diff -Nru s390-tools-2.31.0/CHANGELOG.md s390-tools-2.33.1/CHANGELOG.md --- s390-tools-2.31.0/CHANGELOG.md 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/CHANGELOG.md 2024-05-28 08:26:36.000000000 +0200 @@ -1,6 +1,53 @@ Release history for s390-tools (MIT version) -------------------------------------------- +* __v2.33.1 (2024-05-28)__ + + For Linux kernel version: 6.9 + + Bug Fixes: + - s390-tools: Fix formatting and typos in README.md + - s390-tools: Fix release string + +* __v2.33.0 (2024-05-27)__ + + For Linux kernel version: 6.9 + + Add new tools / libraries: + - chpstat: New tool for displaying channel path statistics + - libutil: Add output format helpers(util_fmt: JSON, JSON-SEQ, CSV, text pairs) + + Changes of existing tools / libraries: + - chzdev: Add --is-owner to identify files created by zdev + - dasdfmt: Change default mode to always use full-format (Note: affects ESE DASD) + - libap: Significantly reduce delay time between file lock retries + - pvattest: Rewrite from C to Rust + - pvattest: Support additional data & user-data + - rust/pv: Support for Attestation + + Bug Fixes: + - chreipl: Improve disk type detection when running under QEMU + - dbginfo.sh: Use POSIX option with uname + - s390-tools: Fix missing hyphen escapes in the man page for many tools + - zipl/src: Fix bugs in disk_get_info() reproducible in corner cases + +* __v2.32.0 (2024-04-03)__ + + For Linux kernel version: 6.8 + + Changes of existing tools: + - cpumf/lscpumf: add support for machine type 3932 + - genprotimg, pvattest, and pvsecret accept IBM signing key with Armonk as + subject locality + - zdump/zipl: Support for List-Directed dump from ECKD DASD + - zkey: Detect FIPS mode and generate PBKDF for luksFormat according to it + + Bug Fixes: + - dbginfo.sh: dash compatible copy sequence + - rust/pv_core: Fix UvDeviceInfo::get() method + - zipl/src: Fix leak of files if run with a broken configuration + - zkey: Fix convert command to accept only keys of type CCA-AESDATA + * __v2.31.0 (2024-02-02)__ For Linux kernel version: 6.7 diff -Nru s390-tools-2.31.0/common.mak s390-tools-2.33.1/common.mak --- s390-tools-2.31.0/common.mak 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/common.mak 2024-05-28 08:26:36.000000000 +0200 @@ -31,11 +31,11 @@ # Global definitions # The variable "DISTRELEASE" should be overwritten in rpm spec files with: # "make DISTRELEASE=%{release}" and "make install DISTRELEASE=%{release}" -VERSION = 2 -RELEASE = 31 -PATCHLEVEL = 0 -DISTRELEASE = build-$(shell date +%Y%m%d) -S390_TOOLS_RELEASE = $(VERSION).$(RELEASE).$(PATCHLEVEL)-$(DISTRELEASE) +VERSION := 2 +RELEASE := 33 +PATCHLEVEL := 1 +DISTRELEASE := build-$(shell date +%Y%m%d) +S390_TOOLS_RELEASE := $(VERSION).$(RELEASE).$(PATCHLEVEL)-$(DISTRELEASE) export S390_TOOLS_RELEASE reldir = $(subst $(realpath $(dir $(filter %common.mak,$(MAKEFILE_LIST))))/,,$(CURDIR)) @@ -293,8 +293,8 @@ $(ZFCPDUMP_DIR) $(SYSTEMDSYSTEMUNITDIR) \ $(USRLIB64DIR) $(USRINCLUDEDIR) $(ZKEYKMSPLUGINDIR) \ $(SOINSTALLDIR) $(USRLIBDIR) -OWNER = $(shell id -un) -GROUP = $(shell id -gn) +OWNER := $(shell id -un) +GROUP := $(shell id -gn) export INSTALLDIR BINDIR LIBDIR USRLIBDIR USRLIB64DIR MANDIR OWNER GROUP # Special defines for zfcpdump diff -Nru s390-tools-2.31.0/cpacfstats/cpacfstats.c s390-tools-2.33.1/cpacfstats/cpacfstats.c --- s390-tools-2.31.0/cpacfstats/cpacfstats.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/cpacfstats/cpacfstats.c 2024-05-28 08:26:36.000000000 +0200 @@ -55,168 +55,6 @@ [PAI_KERNEL] = "pai_kernel" }; -/* Strings for the pai counter details. Note that this is 0-based - * while PoP is 1-based. - */ -static const char *const pai_str[] = { - [ 0] = "KM DES", - [ 1] = "KM 2key TDES", - [ 2] = "KM TDES", - [ 3] = "KM DES protected key", - [ 4] = "KM 2key TDES protected key", - [ 5] = "KM TDES protected key", - [ 6] = "KM AES 128bit", - [ 7] = "KM AES 192bit", - [ 8] = "KM AES 256bit", - [ 9] = "KM AES 128bit protected key", - [ 10] = "KM AES 192bit protected key", - [ 11] = "KM AES 256bit protected key", - [ 12] = "KM AES-XTS 128bit", - [ 13] = "KM AES-XTS 256bit", - [ 14] = "KM AES-XTS 128bit protected key", - [ 15] = "KM AES-XTS 256bit protected key", - [ 16] = "KMC DES", - [ 17] = "KMC 2key TDES", - [ 18] = "KMC TDES", - [ 19] = "KMC DES protected key", - [ 20] = "KMC 2key TDES protected key", - [ 21] = "KMC TDES protected key", - [ 22] = "KMC AES 128bit", - [ 23] = "KMC AES 192bit", - [ 24] = "KMC AES 256bit", - [ 25] = "KMC AES 128bit protected key", - [ 26] = "KMC AES 192bit protected key", - [ 27] = "KMC AES 256bit protected key", - [ 28] = "KMC PRNG", - [ 29] = "KMA AES 128bit", - [ 30] = "KMA AES 192bit", - [ 31] = "KMA AES 256bit", - [ 32] = "KMA AES 128bit protected key", - [ 33] = "KMA AES 192bit protected key", - [ 34] = "KMA AES 256bit protected key", - [ 35] = "KMF DES", - [ 36] = "KMF 2key TDES", - [ 37] = "KMF TDES", - [ 38] = "KMF DES protected key", - [ 39] = "KMF 2key TDES protected key", - [ 40] = "KMF TDES protected key", - [ 41] = "KMF AES 128bit", - [ 42] = "KMF AES 192bit", - [ 43] = "KMF AES 256bit", - [ 44] = "KMF AES 128bit protected key", - [ 45] = "KMF AES 192bit protected key", - [ 46] = "KMF AES 256bit protected key", - [ 47] = "KMCTR DES", - [ 48] = "KMCTR 2key TDES", - [ 49] = "KMCTR TDES", - [ 50] = "KMCTR DES protected key", - [ 51] = "KMCTR 2key TDES protected key", - [ 52] = "KMCTR TDES protected key", - [ 53] = "KMCTR AES 128bit", - [ 54] = "KMCTR AES 192bit", - [ 55] = "KMCTR AES 256bit", - [ 56] = "KMCTR AES 128bit protected key", - [ 57] = "KMCTR AES 192bit protected key", - [ 58] = "KMCTR AES 256bit protected key", - [ 59] = "KMO DES", - [ 60] = "KMO 2key TDES", - [ 61] = "KMO TDES", - [ 62] = "KMO DES protected key", - [ 63] = "KMO 2key TDES protected key", - [ 64] = "KMO TDES protected key", - [ 65] = "KMO AES 128bit", - [ 66] = "KMO AES 192bit", - [ 67] = "KMO AES 256bit", - [ 68] = "KMO AES 128bit protected key", - [ 69] = "KMO AES 192bit protected key", - [ 70] = "KMO AES 256bit protected key", - [ 71] = "KIMD SHA1", - [ 72] = "KIMD SHA256", - [ 73] = "KIMD SHA512", - [ 74] = "KIMD SHA3-224", - [ 75] = "KIMD SHA3-256", - [ 76] = "KIMD SHA3-384", - [ 77] = "KIMD SHA3-512", - [ 78] = "KIMD SHAKE 128", - [ 79] = "KIMD SHAKE 256", - [ 80] = "KIMD GHASH", - [ 81] = "KLMD SHA1", - [ 82] = "KLMD SHA256", - [ 83] = "KLMD SHA512", - [ 84] = "KLMD SHA3-224", - [ 85] = "KLMD SHA3-256", - [ 86] = "KLMD SHA3-384", - [ 87] = "KLMD SHA3-512", - [ 88] = "KLMD SHAKE 128", - [ 89] = "KLMD SHAKE 256", - [ 90] = "KMAC DES", - [ 91] = "KMAC 2key TDES", - [ 92] = "KMAC TDES", - [ 93] = "KMAC DES protected key", - [ 94] = "KMAC 2key TDES protected key", - [ 95] = "KMAC TDES protected key", - [ 96] = "KMAC AES 128bit", - [ 97] = "KMAC AES 192bit", - [ 98] = "KMAC AES 256bit", - [ 99] = "KMAC AES 128bit protected key", - [100] = "KMAC AES 192bit protected key", - [101] = "KMAC AES 256bit protected key", - [102] = "PCC Last Block CMAC DES", - [103] = "PCC Last Block CMAC 2key TDES", - [104] = "PCC Last Block CMAC TDES", - [105] = "PCC Last Block CMAC DES protected key", - [106] = "PCC Last Block CMAC 2key TDES protected key", - [107] = "PCC Last Block CMAC TDES protected key", - [108] = "PCC Last Block CMAC AES 128bit", - [109] = "PCC Last Block CMAC AES 192bit", - [110] = "PCC Last Block CMAC AES 256bit", - [111] = "PCC Last Block CMAC AES 128bit protected key", - [112] = "PCC Last Block CMAC AES 192bit protected key", - [113] = "PCC Last Block CMAC AES 256bit protected key", - [114] = "PCC XTS Parameter AES 128bit", - [115] = "PCC XTS Parameter AES 256bit", - [116] = "PCC XTS Parameter AES 128bit protected key", - [117] = "PCC XTS Parameter AES 256bit protected key", - [118] = "PCC Scalar Mult P256", - [119] = "PCC Scalar Mult P384", - [120] = "PCC Scalar Mult P521", - [121] = "PCC Scalar Mult Ed25519", - [122] = "PCC Scalar Mult Ed448", - [123] = "PCC Scalar Mult X25519", - [124] = "PCC Scalar Mult X448", - [125] = "PRNO SHA512 DRNG", - [126] = "PRNO TRNG Query Ratio", - [127] = "PRNO TRNG", - [128] = "KDSA ECDSA Verify P256", - [129] = "KDSA ECDSA Verify P384", - [130] = "KDSA ECDSA Verify P521", - [131] = "KDSA ECDSA Sign P256", - [132] = "KDSA ECDSA Sign P384", - [133] = "KDSA ECDSA Sign P521", - [134] = "KDSA ECDSA Sign P256 protected key", - [135] = "KDSA ECDSA Sign P384 protected key", - [136] = "KDSA ECDSA Sign P521 protected key", - [137] = "KDSA EdDSA Verify Ed25519", - [138] = "KDSA EdDSA Verify Ed448", - [139] = "KDSA EdDSA Sign Ed25519", - [140] = "KDSA EdDSA Sign Ed448", - [141] = "KDSA EdDSA Sign Ed25519 protected key", - [142] = "KDSA EdDSA Sign Ed448 protected key", - [143] = "PCKMO DES", - [144] = "PCKMO 2key TDES", - [145] = "PCKMO TDES", - [146] = "PCKMO AES 128bit", - [147] = "PCKMO AES 192bit", - [148] = "PCKMO AES 256bit", - [149] = "PCKMO ECC P256", - [150] = "PCKMO ECC P384", - [151] = "PCKMO ECC P521", - [152] = "PCKMO ECC Ed25519", - [153] = "PCKMO ECC Ed448", - [154] = "Reserved 1", - [155] = "Reserved 2" -}; - static int paiprintnonzero; @@ -275,7 +113,8 @@ int state, uint64_t value) { int paictr = 0, paistate = 0, ec; - uint64_t i, paivalue = 0, maxnum; + uint64_t i, paivalue = 0; + unsigned int maxnum; const char *space; switch (ctr) { @@ -288,11 +127,11 @@ printf("\"value\":%d}", !!value); return; case PAI_USER: - maxnum = NUM_PAI_USER; + maxnum = get_num_user_space_ctrs(); space = "user"; break; case PAI_KERNEL: - maxnum = NUM_PAI_KERNEL; + maxnum = MAX_NUM_PAI; space = "kernel"; break; default: @@ -304,7 +143,7 @@ return; if (value > maxnum) { eprint("Incompatible versions detected!\n"); - eprint("Expected %"PRIu64" counter space for %s, but got %"PRIu64"\n", + eprint("Expected %lu counter space for %s, but got %lu\n", maxnum, space, value); exit(EXIT_FAILURE); } @@ -315,18 +154,18 @@ /* No more data for this virtual event after error. */ return; } - if (paictr > NUM_PAI_KERNEL) { + if (paictr > MAX_NUM_PAI) { eprint("Pai counter number too big: %d\n", paictr); } else { printjsonsep(); printf("{\"counter\":\"%s\",\"space\":\"%s\",\"counterid\":%d,", - pai_str[paictr], space, paictr + 1); + get_ctr_name(paictr), space, paictr + 1); if (paistate < 0) { printf("\"error\":%d}", paistate); /* Protocol does not send further counters. */ return; } - printf("\"value\":%"PRIu64"}", paivalue); + printf("\"value\":%lu}", paivalue); } } } @@ -341,7 +180,8 @@ [UNSUPPORTED] = "unsupported" }; int paictr = 0, paistate = 0, ec; - uint64_t i, paivalue = 0, maxnum; + uint64_t i, paivalue = 0; + unsigned int maxnum; const char *ctrstr; switch (ctr) { @@ -350,11 +190,11 @@ printf(" hotplug detected\n"); return; case PAI_USER: - maxnum = NUM_PAI_USER; + maxnum = get_num_user_space_ctrs(); ctrstr = "pai_user"; break; case PAI_KERNEL: - maxnum = NUM_PAI_KERNEL; + maxnum = MAX_NUM_PAI; ctrstr = "pai_kernel"; break; default: @@ -372,7 +212,7 @@ return; if (value > maxnum) { eprint("Incompatible versions detected!\n"); - eprint("Expected %"PRIu64" counters for %s, but got %"PRIu64"\n", + eprint("Expected %lu counters for %s, but got %lu\n", maxnum, ctrstr, value); exit(EXIT_FAILURE); } @@ -383,10 +223,11 @@ /* No more data for this virtual event after error. */ return; } - if (paictr > NUM_PAI_KERNEL) + if (paictr > MAX_NUM_PAI) eprint("Pai counter number too big: %d\n", paictr); else if (!paiprintnonzero || paivalue > 0) - printf(" %-45s: %"PRIu64"\n", pai_str[paictr], paivalue); + printf(" (%3d) %-45s: %lu\n", paictr + 1, + get_ctr_name(paictr), paivalue); } } @@ -402,7 +243,7 @@ else if (state == UNSUPPORTED) printf(" %s counter: unsupported\n", counter_str[ctr]); else - printf(" %s counter: %"PRIu64"\n", counter_str[ctr], value); + printf(" %s counter: %lu\n", counter_str[ctr], value); } @@ -417,7 +258,7 @@ } else if (state == ENABLED) { printjsonsep(); printf("{\"counter\":\"%s\",", counter_str[ctr]); - printf("\"value\":%"PRIu64"}", value); + printf("\"value\":%lu}", value); } } diff -Nru s390-tools-2.31.0/cpacfstats/cpacfstats_common.c s390-tools-2.33.1/cpacfstats/cpacfstats_common.c --- s390-tools-2.31.0/cpacfstats/cpacfstats_common.c 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/cpacfstats/cpacfstats_common.c 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: MIT */ +/* + * cpacfstats_common.c - shared code by daemon and client + * + * Copyright IBM Corp. 2024 + */ + +#include +#include +#include "cpacfstats.h" + +struct pai_counter { + const char *str; + const unsigned int counter_type; +}; + +/* + * Strings for the pai counter details. + * Integer indicating if kernel space is needed (0 for user, KERNEL_ONLY_COUNTER for kernel) + * Note that this is 0-based while PoP is 1-based. + * + * When adding new items to this list add the counter number in the pai_idx + * list in cpacfstatsd.c and increase the number of total counters in + * cpacfstats.h. + */ +const struct pai_counter pai[] = { + [ 0] = {"KM DES", KERNEL_AND_USER_COUNTER}, + [ 1] = {"KM 2key TDES", KERNEL_AND_USER_COUNTER}, + [ 2] = {"KM TDES", KERNEL_AND_USER_COUNTER}, + [ 3] = {"KM DES protected key", KERNEL_AND_USER_COUNTER}, + [ 4] = {"KM 2key TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 5] = {"KM TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 6] = {"KM AES 128bit", KERNEL_AND_USER_COUNTER}, + [ 7] = {"KM AES 192bit", KERNEL_AND_USER_COUNTER}, + [ 8] = {"KM AES 256bit", KERNEL_AND_USER_COUNTER}, + [ 9] = {"KM AES 128bit protected key", KERNEL_AND_USER_COUNTER}, + [ 10] = {"KM AES 192bit protected key", KERNEL_AND_USER_COUNTER}, + [ 11] = {"KM AES 256bit protected key", KERNEL_AND_USER_COUNTER}, + [ 12] = {"KM AES-XTS 128bit", KERNEL_AND_USER_COUNTER}, + [ 13] = {"KM AES-XTS 256bit", KERNEL_AND_USER_COUNTER}, + [ 14] = {"KM AES-XTS 128bit protected key", KERNEL_AND_USER_COUNTER}, + [ 15] = {"KM AES-XTS 256bit protected key", KERNEL_AND_USER_COUNTER}, + [ 16] = {"KMC DES", KERNEL_AND_USER_COUNTER}, + [ 17] = {"KMC 2key TDES", KERNEL_AND_USER_COUNTER}, + [ 18] = {"KMC TDES", KERNEL_AND_USER_COUNTER}, + [ 19] = {"KMC DES protected key", KERNEL_AND_USER_COUNTER}, + [ 20] = {"KMC 2key TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 21] = {"KMC TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 22] = {"KMC AES 128bit", KERNEL_AND_USER_COUNTER}, + [ 23] = {"KMC AES 192bit", KERNEL_AND_USER_COUNTER}, + [ 24] = {"KMC AES 256bit", KERNEL_AND_USER_COUNTER}, + [ 25] = {"KMC AES 128bit protected key", KERNEL_AND_USER_COUNTER}, + [ 26] = {"KMC AES 192bit protected key", KERNEL_AND_USER_COUNTER}, + [ 27] = {"KMC AES 256bit protected key", KERNEL_AND_USER_COUNTER}, + [ 28] = {"KMC PRNG", KERNEL_AND_USER_COUNTER}, + [ 29] = {"KMA AES 128bit", KERNEL_AND_USER_COUNTER}, + [ 30] = {"KMA AES 192bit", KERNEL_AND_USER_COUNTER}, + [ 31] = {"KMA AES 256bit", KERNEL_AND_USER_COUNTER}, + [ 32] = {"KMA AES 128bit protected key", KERNEL_AND_USER_COUNTER}, + [ 33] = {"KMA AES 192bit protected key", KERNEL_AND_USER_COUNTER}, + [ 34] = {"KMA AES 256bit protected key", KERNEL_AND_USER_COUNTER}, + [ 35] = {"KMF DES", KERNEL_AND_USER_COUNTER}, + [ 36] = {"KMF 2key TDES", KERNEL_AND_USER_COUNTER}, + [ 37] = {"KMF TDES", KERNEL_AND_USER_COUNTER}, + [ 38] = {"KMF DES protected key", KERNEL_AND_USER_COUNTER}, + [ 39] = {"KMF 2key TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 40] = {"KMF TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 41] = {"KMF AES 128bit", KERNEL_AND_USER_COUNTER}, + [ 42] = {"KMF AES 192bit", KERNEL_AND_USER_COUNTER}, + [ 43] = {"KMF AES 256bit", KERNEL_AND_USER_COUNTER}, + [ 44] = {"KMF AES 128bit protected key", KERNEL_AND_USER_COUNTER}, + [ 45] = {"KMF AES 192bit protected key", KERNEL_AND_USER_COUNTER}, + [ 46] = {"KMF AES 256bit protected key", KERNEL_AND_USER_COUNTER}, + [ 47] = {"KMCTR DES", KERNEL_AND_USER_COUNTER}, + [ 48] = {"KMCTR 2key TDES", KERNEL_AND_USER_COUNTER}, + [ 49] = {"KMCTR TDES", KERNEL_AND_USER_COUNTER}, + [ 50] = {"KMCTR DES protected key", KERNEL_AND_USER_COUNTER}, + [ 51] = {"KMCTR 2key TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 52] = {"KMCTR TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 53] = {"KMCTR AES 128bit", KERNEL_AND_USER_COUNTER}, + [ 54] = {"KMCTR AES 192bit", KERNEL_AND_USER_COUNTER}, + [ 55] = {"KMCTR AES 256bit", KERNEL_AND_USER_COUNTER}, + [ 56] = {"KMCTR AES 128bit protected key", KERNEL_AND_USER_COUNTER}, + [ 57] = {"KMCTR AES 192bit protected key", KERNEL_AND_USER_COUNTER}, + [ 58] = {"KMCTR AES 256bit protected key", KERNEL_AND_USER_COUNTER}, + [ 59] = {"KMO DES", KERNEL_AND_USER_COUNTER}, + [ 60] = {"KMO 2key TDES", KERNEL_AND_USER_COUNTER}, + [ 61] = {"KMO TDES", KERNEL_AND_USER_COUNTER}, + [ 62] = {"KMO DES protected key", KERNEL_AND_USER_COUNTER}, + [ 63] = {"KMO 2key TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 64] = {"KMO TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 65] = {"KMO AES 128bit", KERNEL_AND_USER_COUNTER}, + [ 66] = {"KMO AES 192bit", KERNEL_AND_USER_COUNTER}, + [ 67] = {"KMO AES 256bit", KERNEL_AND_USER_COUNTER}, + [ 68] = {"KMO AES 128bit protected key", KERNEL_AND_USER_COUNTER}, + [ 69] = {"KMO AES 192bit protected key", KERNEL_AND_USER_COUNTER}, + [ 70] = {"KMO AES 256bit protected key", KERNEL_AND_USER_COUNTER}, + [ 71] = {"KIMD SHA1", KERNEL_AND_USER_COUNTER}, + [ 72] = {"KIMD SHA256", KERNEL_AND_USER_COUNTER}, + [ 73] = {"KIMD SHA512", KERNEL_AND_USER_COUNTER}, + [ 74] = {"KIMD SHA3-224", KERNEL_AND_USER_COUNTER}, + [ 75] = {"KIMD SHA3-256", KERNEL_AND_USER_COUNTER}, + [ 76] = {"KIMD SHA3-384", KERNEL_AND_USER_COUNTER}, + [ 77] = {"KIMD SHA3-512", KERNEL_AND_USER_COUNTER}, + [ 78] = {"KIMD SHAKE 128", KERNEL_AND_USER_COUNTER}, + [ 79] = {"KIMD SHAKE 256", KERNEL_AND_USER_COUNTER}, + [ 80] = {"KIMD GHASH", KERNEL_AND_USER_COUNTER}, + [ 81] = {"KLMD SHA1", KERNEL_AND_USER_COUNTER}, + [ 82] = {"KLMD SHA256", KERNEL_AND_USER_COUNTER}, + [ 83] = {"KLMD SHA512", KERNEL_AND_USER_COUNTER}, + [ 84] = {"KLMD SHA3-224", KERNEL_AND_USER_COUNTER}, + [ 85] = {"KLMD SHA3-256", KERNEL_AND_USER_COUNTER}, + [ 86] = {"KLMD SHA3-384", KERNEL_AND_USER_COUNTER}, + [ 87] = {"KLMD SHA3-512", KERNEL_AND_USER_COUNTER}, + [ 88] = {"KLMD SHAKE 128", KERNEL_AND_USER_COUNTER}, + [ 89] = {"KLMD SHAKE 256", KERNEL_AND_USER_COUNTER}, + [ 90] = {"KMAC DES", KERNEL_AND_USER_COUNTER}, + [ 91] = {"KMAC 2key TDES", KERNEL_AND_USER_COUNTER}, + [ 92] = {"KMAC TDES", KERNEL_AND_USER_COUNTER}, + [ 93] = {"KMAC DES protected key", KERNEL_AND_USER_COUNTER}, + [ 94] = {"KMAC 2key TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 95] = {"KMAC TDES protected key", KERNEL_AND_USER_COUNTER}, + [ 96] = {"KMAC AES 128bit", KERNEL_AND_USER_COUNTER}, + [ 97] = {"KMAC AES 192bit", KERNEL_AND_USER_COUNTER}, + [ 98] = {"KMAC AES 256bit", KERNEL_AND_USER_COUNTER}, + [ 99] = {"KMAC AES 128bit protected key", KERNEL_AND_USER_COUNTER}, + [100] = {"KMAC AES 192bit protected key", KERNEL_AND_USER_COUNTER}, + [101] = {"KMAC AES 256bit protected key", KERNEL_AND_USER_COUNTER}, + [102] = {"PCC Last Block CMAC DES", KERNEL_AND_USER_COUNTER}, + [103] = {"PCC Last Block CMAC 2key TDES", KERNEL_AND_USER_COUNTER}, + [104] = {"PCC Last Block CMAC TDES", KERNEL_AND_USER_COUNTER}, + [105] = {"PCC Last Block CMAC DES protected key", + KERNEL_AND_USER_COUNTER}, + [106] = {"PCC Last Block CMAC 2key TDES protected key", + KERNEL_AND_USER_COUNTER}, + [107] = {"PCC Last Block CMAC TDES protected key", + KERNEL_AND_USER_COUNTER}, + [108] = {"PCC Last Block CMAC AES 128bit", KERNEL_AND_USER_COUNTER}, + [109] = {"PCC Last Block CMAC AES 192bit", KERNEL_AND_USER_COUNTER}, + [110] = {"PCC Last Block CMAC AES 256bit", KERNEL_AND_USER_COUNTER}, + [111] = {"PCC Last Block CMAC AES 128bit protected key", + KERNEL_AND_USER_COUNTER}, + [112] = {"PCC Last Block CMAC AES 192bit protected key", + KERNEL_AND_USER_COUNTER}, + [113] = {"PCC Last Block CMAC AES 256bit protected key", + KERNEL_AND_USER_COUNTER}, + [114] = {"PCC XTS Parameter AES 128bit", KERNEL_AND_USER_COUNTER}, + [115] = {"PCC XTS Parameter AES 256bit", KERNEL_AND_USER_COUNTER}, + [116] = {"PCC XTS Parameter AES 128bit protected key", + KERNEL_AND_USER_COUNTER}, + [117] = {"PCC XTS Parameter AES 256bit protected key", + KERNEL_AND_USER_COUNTER}, + [118] = {"PCC Scalar Mult P256", KERNEL_AND_USER_COUNTER}, + [119] = {"PCC Scalar Mult P384", KERNEL_AND_USER_COUNTER}, + [120] = {"PCC Scalar Mult P521", KERNEL_AND_USER_COUNTER}, + [121] = {"PCC Scalar Mult Ed25519", KERNEL_AND_USER_COUNTER}, + [122] = {"PCC Scalar Mult Ed448", KERNEL_AND_USER_COUNTER}, + [123] = {"PCC Scalar Mult X25519", KERNEL_AND_USER_COUNTER}, + [124] = {"PCC Scalar Mult X448", KERNEL_AND_USER_COUNTER}, + [125] = {"PRNO SHA512 DRNG", KERNEL_AND_USER_COUNTER}, + [126] = {"PRNO TRNG Query Ratio", KERNEL_AND_USER_COUNTER}, + [127] = {"PRNO TRNG", KERNEL_AND_USER_COUNTER}, + [128] = {"KDSA ECDSA Verify P256", KERNEL_AND_USER_COUNTER}, + [129] = {"KDSA ECDSA Verify P384", KERNEL_AND_USER_COUNTER}, + [130] = {"KDSA ECDSA Verify P521", KERNEL_AND_USER_COUNTER}, + [131] = {"KDSA ECDSA Sign P256", KERNEL_AND_USER_COUNTER}, + [132] = {"KDSA ECDSA Sign P384", KERNEL_AND_USER_COUNTER}, + [133] = {"KDSA ECDSA Sign P521", KERNEL_AND_USER_COUNTER}, + [134] = {"KDSA ECDSA Sign P256 protected key", + KERNEL_AND_USER_COUNTER}, + [135] = {"KDSA ECDSA Sign P384 protected key", + KERNEL_AND_USER_COUNTER}, + [136] = {"KDSA ECDSA Sign P521 protected key", + KERNEL_AND_USER_COUNTER}, + [137] = {"KDSA EdDSA Verify Ed25519", KERNEL_AND_USER_COUNTER}, + [138] = {"KDSA EdDSA Verify Ed448", KERNEL_AND_USER_COUNTER}, + [139] = {"KDSA EdDSA Sign Ed25519", KERNEL_AND_USER_COUNTER}, + [140] = {"KDSA EdDSA Sign Ed448", KERNEL_AND_USER_COUNTER}, + [141] = {"KDSA EdDSA Sign Ed25519 protected key", + KERNEL_AND_USER_COUNTER}, + [142] = {"KDSA EdDSA Sign Ed448 protected key", + KERNEL_AND_USER_COUNTER}, + [143] = {"PCKMO DES", KERNEL_ONLY_COUNTER}, + [144] = {"PCKMO 2key TDES", KERNEL_ONLY_COUNTER}, + [145] = {"PCKMO TDES", KERNEL_ONLY_COUNTER}, + [146] = {"PCKMO AES 128bit", KERNEL_ONLY_COUNTER}, + [147] = {"PCKMO AES 192bit", KERNEL_ONLY_COUNTER}, + [148] = {"PCKMO AES 256bit", KERNEL_ONLY_COUNTER}, + [149] = {"PCKMO ECC P256", KERNEL_ONLY_COUNTER}, + [150] = {"PCKMO ECC P384", KERNEL_ONLY_COUNTER}, + [151] = {"PCKMO ECC P521", KERNEL_ONLY_COUNTER}, + [152] = {"PCKMO ECC Ed25519", KERNEL_ONLY_COUNTER}, + [153] = {"PCKMO ECC Ed448", KERNEL_ONLY_COUNTER}, + [154] = {"Reserved 1", KERNEL_ONLY_COUNTER}, + [155] = {"Reserved 2", KERNEL_ONLY_COUNTER} +}; + +/* + * Returns counter_type of pai_counter struct + * + * SUPPRESS_COUNTER + * KERNEL_AND_USER_COUNTER + * KERNEL_ONLY_COUNTER + */ +enum counter_type is_user_space(unsigned int ctr) +{ + if (ctr >= MAX_NUM_PAI) + return SUPPRESS_COUNTER; + return pai[ctr].counter_type; +} + +const char *get_ctr_name(unsigned int ctr) +{ + if (ctr >= MAX_NUM_PAI) + return NULL; + return pai[ctr].str; +} + +/* + * Returns number of PAI counters for which no kernel space is needed + */ +unsigned int get_num_user_space_ctrs(void) +{ + unsigned int counter = 0; + unsigned int i; + + for (i = 0; i < MAX_NUM_PAI; i++) { + if (is_user_space(i) == KERNEL_AND_USER_COUNTER) + counter++; + } + + return counter; +} diff -Nru s390-tools-2.31.0/cpacfstats/cpacfstatsd.c s390-tools-2.33.1/cpacfstats/cpacfstatsd.c --- s390-tools-2.31.0/cpacfstats/cpacfstatsd.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/cpacfstats/cpacfstatsd.c 2024-05-28 08:26:36.000000000 +0200 @@ -24,12 +24,52 @@ #include #include #include +#include +#include +#include #include "lib/zt_common.h" +#include "lib/util_file.h" #include "cpacfstats.h" static volatile int stopsig; +/* + * This list contains the counter numbers sorted by instruction + */ +static const unsigned int pai_idx[] = { + // KM + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + // KMC + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + // KMA + 29, 30, 31, 32, 33, 34, + // KMF + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + // KMCTR + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + // KMO + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + // KIMD + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + // KLMD + 81, 82, 83, 84, 85, 86, 87, 88, 89, + // KMAC + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + // PCC + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + // PRNO + 125, 126, 127, + // KDSA + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, + // PCKMO + 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + // Reserved + 154, 155 +}; + static const char *const name = "cpacfstatsd"; static const char *const usage = @@ -93,34 +133,35 @@ * Note that the PAI counters are 0-based, not 1 based as in PoP! * Sending ends with the first error. */ -static int do_send_pai(int s, int user) +static int do_send_pai(int s, int user, unsigned int *counter) { - int ctr, state, i, maxctr, rc = 0; + int ctr, state, i, rc = 0; + unsigned int current_ctr; uint64_t value; - if (user) { - ctr = PAI_USER; - maxctr = NUM_PAI_USER; - } else { - ctr = PAI_KERNEL; - maxctr = NUM_PAI_KERNEL; - } + ctr = user ? PAI_USER : PAI_KERNEL; + state = perf_ctr_state(ctr); if (state != ENABLED) return rc; - for (i = 0; i < maxctr; ++i) { - rc = perf_read_pai_ctr(i, user, &value); + for (i = 0; i < MAX_NUM_PAI; ++i) { + current_ctr = pai_idx[i]; + if ((user && is_user_space(current_ctr) != KERNEL_AND_USER_COUNTER) || + (!user && is_user_space(current_ctr) == SUPPRESS_COUNTER) || + counter[current_ctr] != 1) + continue; + rc = perf_read_pai_ctr(current_ctr, user, &value); if (rc != 0) { - send_answer(s, i, rc, 0); + send_answer(s, current_ctr, rc, 0); break; } - send_answer(s, i, state, value); + send_answer(s, current_ctr, state, value); } return rc; } -static int do_enable(int s, enum ctr_e ctr) +static int do_enable(int s, enum ctr_e ctr, unsigned int *supported_counters) { uint64_t value = 0; int i, rc = 0; @@ -132,7 +173,7 @@ if (i == (int) ctr || ctr == ALL_COUNTER) { state = perf_ctr_state(i); if (state == DISABLED) { - rc = perf_enable_ctr(i); + rc = perf_enable_ctr(i, supported_counters); if (rc != 0) { send_answer(s, i, rc, 0); break; @@ -140,7 +181,7 @@ state = ENABLED; } if (state != UNSUPPORTED) { - rc = perf_read_ctr(i, &value); + rc = perf_read_ctr(i, &value, supported_counters); if (rc != 0) { send_answer(s, i, rc, 0); break; @@ -148,20 +189,20 @@ } send_answer(s, i, state, value); if (i == PAI_USER) - rc = do_send_pai(s, 1); + rc = do_send_pai(s, 1, supported_counters); if (i == PAI_KERNEL) - rc = do_send_pai(s, 0); + rc = do_send_pai(s, 0, supported_counters); } } if (rc == 0) { - rc = perf_read_ctr(HOTPLUG_DETECTED, &value); + rc = perf_read_ctr(HOTPLUG_DETECTED, &value, NULL); send_answer(s, HOTPLUG_DETECTED, rc, value); } return rc; } -static int do_disable(int s, enum ctr_e ctr) +static int do_disable(int s, enum ctr_e ctr, unsigned int *supported_counters) { int i, rc = 0; uint64_t value; @@ -171,7 +212,7 @@ continue; if (i == (int) ctr || ctr == ALL_COUNTER) { if (perf_ctr_state(i) == ENABLED) { - rc = perf_disable_ctr(i); + rc = perf_disable_ctr(i, supported_counters); if (rc != 0) { send_answer(s, i, rc, 0); break; @@ -181,14 +222,14 @@ } } if (rc == 0) { - rc = perf_read_ctr(HOTPLUG_DETECTED, &value); + rc = perf_read_ctr(HOTPLUG_DETECTED, &value, NULL); send_answer(s, HOTPLUG_DETECTED, rc, value); } return rc; } -static int do_reset(int s, enum ctr_e ctr) +static int do_reset(int s, enum ctr_e ctr, unsigned int *supported_counters) { int i, rc = 0, state; uint64_t value; @@ -199,7 +240,7 @@ if (i == (int) ctr || ctr == ALL_COUNTER) { state = perf_ctr_state(i); if (state == ENABLED) { - rc = perf_reset_ctr(i, &value); + rc = perf_reset_ctr(i, &value, supported_counters); if (rc != 0) { send_answer(s, i, rc, 0); break; @@ -207,20 +248,20 @@ } send_answer(s, i, state, value); if (i == PAI_USER) - rc = do_send_pai(s, 1); + rc = do_send_pai(s, 1, supported_counters); if (i == PAI_KERNEL) - rc = do_send_pai(s, 0); + rc = do_send_pai(s, 0, supported_counters); } } if (rc == 0) { - rc = perf_read_ctr(HOTPLUG_DETECTED, &value); + rc = perf_read_ctr(HOTPLUG_DETECTED, &value, NULL); send_answer(s, HOTPLUG_DETECTED, rc, value); } return rc; } -static int do_print(int s, enum ctr_e ctr) +static int do_print(int s, enum ctr_e ctr, unsigned int *supported_counters) { int i, rc = 0, state; uint64_t value = 0; @@ -231,7 +272,7 @@ if (i == (int) ctr || ctr == ALL_COUNTER) { state = perf_ctr_state(i); if (state == ENABLED) { - rc = perf_read_ctr(i, &value); + rc = perf_read_ctr(i, &value, supported_counters); if (rc != 0) { send_answer(s, i, rc, 0); break; @@ -239,13 +280,13 @@ } send_answer(s, i, state, value); if (i == PAI_USER) - rc = do_send_pai(s, 1); + rc = do_send_pai(s, 1, supported_counters); if (i == PAI_KERNEL) - rc = do_send_pai(s, 0); + rc = do_send_pai(s, 0, supported_counters); } } if (rc == 0) { - rc = perf_read_ctr(HOTPLUG_DETECTED, &value); + rc = perf_read_ctr(HOTPLUG_DETECTED, &value, NULL); send_answer(s, HOTPLUG_DETECTED, rc, value); } return rc; @@ -436,9 +477,44 @@ } +/* + * returns -1 on error + * returns X where X is the found counters in dir + * + * the supplied array supported_counters[] is filled in this function with the + * available PAI counters found in SYSFS_PAI_COUNTER + */ +static void supported_functions(unsigned int supported_counters[]) +{ + const char *dir = SYSFS_PAI_COUNTER; + struct dirent *dp = NULL; + char filepath[PATH_MAX]; + unsigned int num; + DIR *dfd = NULL; + + dfd = opendir(dir); + if (dfd == NULL) + return; + + while ((dp = readdir(dfd)) != NULL) { + if ((strcmp(dp->d_name, ".") != 0) && + (strcmp(dp->d_name, "..") != 0)) { + snprintf(filepath, sizeof(filepath), "%s%s", dir, dp->d_name); + if (util_file_read_va(filepath, "event=0x10%x", &num) != 1) + continue; + if (num > 0 && num <= MAX_NUM_PAI) + supported_counters[num - 1] = 1; + } + } + + closedir(dfd); + return; +} + int main(int argc, char *argv[]) { int rc, sfd, foreground = 0, startup_pipe = -1, initialized = 0; + unsigned int supported_counters[MAX_NUM_PAI] = { 0 }; struct sigaction act; if (argc > 1) { @@ -485,7 +561,9 @@ } } - if (perf_init() != 0) { + supported_functions(supported_counters); + + if (perf_init(supported_counters) != 0) { eprint("Couldn't initialize perf lib\n"); goto error; } @@ -548,13 +626,13 @@ } if (cmd == ENABLE) - rc = do_enable(s, ctr); + rc = do_enable(s, ctr, supported_counters); else if (cmd == DISABLE) - rc = do_disable(s, ctr); + rc = do_disable(s, ctr, supported_counters); else if (cmd == RESET) - rc = do_reset(s, ctr); + rc = do_reset(s, ctr, supported_counters); else if (cmd == PRINT) - rc = do_print(s, ctr); + rc = do_print(s, ctr, supported_counters); else { eprint("Received unknown command %d, ignoring\n", (int) cmd); diff -Nru s390-tools-2.31.0/cpacfstats/cpacfstats.h s390-tools-2.33.1/cpacfstats/cpacfstats.h --- s390-tools-2.31.0/cpacfstats/cpacfstats.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/cpacfstats/cpacfstats.h 2024-05-28 08:26:36.000000000 +0200 @@ -20,12 +20,22 @@ #define DEFAULT_RECV_TIMEOUT (30 * 1000) /* - * Number of PAI counters for user space. This excludes PCKMO since - * this instruction is privileged. + * Number of PAI counters. Contains all counters regardless of kernel or user + * space */ -#define NUM_PAI_USER 143 -/* Number of PAI counters for kernel space. Contains all counters. */ -#define NUM_PAI_KERNEL 156 +#define MAX_NUM_PAI 156 + +/* + * This is the sysfs directory from which cpacfstatsd daemon application loads + * the available PAI counters + */ +#define SYSFS_PAI_COUNTER "/sys/bus/event_source/devices/pai_crypto/events/" + +/* + * Note that this is the first kernel only counter in the 1-based list of the + * architecture and NOT from the 0-based list in the cpacfstats code! + */ +#define FIRST_KERNEL_ONLY_COUNTER 144 int eprint(const char *format, ...); @@ -67,6 +77,12 @@ UNSUPPORTED }; +enum counter_type { + SUPPRESS_COUNTER = 0, + KERNEL_AND_USER_COUNTER, + KERNEL_ONLY_COUNTER, +}; + /* * query send from client to daemon * Consist of: @@ -122,15 +138,23 @@ /* perf_crypto.c */ -int perf_init(void); +int perf_init(unsigned int *supported_counters); void perf_stop(void); void perf_close(void); -int perf_enable_ctr(enum ctr_e ctr); -int perf_disable_ctr(enum ctr_e ctr); -int perf_reset_ctr(enum ctr_e ctr, uint64_t *value); -int perf_read_ctr(enum ctr_e ctr, uint64_t *value); +int perf_enable_ctr(enum ctr_e ctr, unsigned int *supported_counters); +int perf_disable_ctr(enum ctr_e ctr, unsigned int *supported_counters); +int perf_reset_ctr(enum ctr_e ctr, uint64_t *value, unsigned int + *supported_counters); +int perf_read_ctr(enum ctr_e ctr, uint64_t *value, unsigned int + *supported_counters); int perf_ecc_supported(void); int perf_ctr_state(enum ctr_e ctr); int perf_read_pai_ctr(unsigned int ctrnum, int user, uint64_t *value); +/* cpacfstats_common.c */ + +enum counter_type is_user_space(unsigned int ctr); +const char *get_ctr_name(unsigned int ctr); +unsigned int get_num_user_space_ctrs(void); + #endif diff -Nru s390-tools-2.31.0/cpacfstats/Makefile s390-tools-2.33.1/cpacfstats/Makefile --- s390-tools-2.31.0/cpacfstats/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/cpacfstats/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -21,10 +21,11 @@ all: check_dep cpacfstats cpacfstatsd -cpacfstatsd: cpacfstatsd.o stats_sock.o perf_crypto.o +cpacfstatsd: cpacfstatsd.o stats_sock.o perf_crypto.o cpacfstats_common.o \ + $(rootdir)/libutil/libutil.a $(LINK) $(ALL_LDFLAGS) $^ $(LDLIBS) -ludev -lpthread -o $@ -cpacfstats: cpacfstats.o stats_sock.o +cpacfstats: cpacfstats.o stats_sock.o cpacfstats_common.o $(LINK) $(ALL_LDFLAGS) $^ $(LDLIBS) -o $@ install: all diff -Nru s390-tools-2.31.0/cpacfstats/perf_crypto.c s390-tools-2.33.1/cpacfstats/perf_crypto.c --- s390-tools-2.31.0/cpacfstats/perf_crypto.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/cpacfstats/perf_crypto.c 2024-05-28 08:26:36.000000000 +0200 @@ -52,8 +52,8 @@ struct percpucounter { int ctr_fds[ALL_COUNTER]; - int pai_user[NUM_PAI_USER]; - int pai_kernel[NUM_PAI_KERNEL]; + int pai_user[MAX_NUM_PAI]; + int pai_kernel[MAX_NUM_PAI]; unsigned int cpunum; struct percpucounter *next; }; @@ -84,10 +84,10 @@ for (i = 0; i < ALL_COUNTER; ++i) ppc->ctr_fds[i] = -1; - for (i = 0; i < NUM_PAI_USER; ++i) + for (i = 0; i < MAX_NUM_PAI; ++i) { ppc->pai_user[i] = -1; - for (i = 0; i < NUM_PAI_KERNEL; ++i) ppc->pai_kernel[i] = -1; + } ppc->cpunum = cpunum; ppc->next = NULL; } @@ -100,10 +100,10 @@ for (i = 0; i < ALL_COUNTER; ++i) (void)close(pcpu->ctr_fds[i]); - for (i = 0; i < NUM_PAI_USER; ++i) + for (i = 0; i < MAX_NUM_PAI; ++i) { (void)close(pcpu->pai_user[i]); - for (i = 0; i < NUM_PAI_KERNEL; ++i) (void)close(pcpu->pai_kernel[i]); + } free(pcpu); } @@ -225,7 +225,7 @@ return 0; } -static int activatecpu(unsigned int cpu) +static int activatecpu(unsigned int cpu, unsigned int *supported_counters) { struct perf_event_attr pfm_event; struct percpucounter *ppc; @@ -273,7 +273,10 @@ (ctr_state[PAI_USER] == UNSUPPORTED) == (ctr_state[PAI_KERNEL] == UNSUPPORTED) */ if (ctr_state[PAI_USER] != UNSUPPORTED) { - for (i = 1; i <= NUM_PAI_USER; ++i) { + for (i = 1; i <= MAX_NUM_PAI; ++i) { + if (is_user_space(i - 1) != KERNEL_AND_USER_COUNTER || + supported_counters[i - 1] != 1) + continue; memset(&pfm_event, 0, sizeof(pfm_event)); pfm_event.size = sizeof(pfm_event); pfm_event.type = paipmutype; @@ -307,7 +310,14 @@ ppc->pai_kernel[i - 1] = fd; } } - for (; i <= NUM_PAI_KERNEL; ++i) { + /* + * i can start at the index of the first PAI counter + * for which kernel space is needed + */ + for (i = FIRST_KERNEL_ONLY_COUNTER; i <= MAX_NUM_PAI; ++i) { + if (is_user_space(i - 1) == SUPPRESS_COUNTER || + supported_counters[i - 1] != 1) + continue; memset(&pfm_event, 0, sizeof(pfm_event)); pfm_event.size = sizeof(pfm_event); pfm_event.type = paipmutype; @@ -348,10 +358,10 @@ if (pcpu != NULL) { for (i = 0; i < ALL_COUNTER; ++i) (void)close(pcpu->ctr_fds[i]); - for (i = 0; i < NUM_PAI_USER; ++i) + for (i = 0; i < MAX_NUM_PAI; ++i) { (void)close(pcpu->pai_user[i]); - for (i = 0; i < NUM_PAI_KERNEL; ++i) (void)close(pcpu->pai_kernel[i]); + } free(pcpu); if (enabledcounter) hotplugdetected = 1; @@ -359,7 +369,7 @@ pthread_mutex_unlock(&rootmux); } -static int addallcpus(void) +static int addallcpus(unsigned int *supported_counters) { unsigned int start, end; int scanned, rc = 0; @@ -379,7 +389,7 @@ if (scanned == 1) end = start; for (; start <= end; ++start) { - if (activatecpu(start)) { + if (activatecpu(start, supported_counters)) { rc = -1; goto out; } @@ -409,7 +419,7 @@ return res; } -static void *hotplughandler(void *UNUSED(unused)) +static void *hotplughandler(void *supported_counters) { struct udev *hotplug; struct udev_monitor *monitor; @@ -452,7 +462,7 @@ path = udev_device_get_devpath(dev); if (sscanf(path, "/devices/system/cpu/cpu%u", &cpunum) != 1) continue; - if (on && activatecpu(cpunum)) + if (on && activatecpu(cpunum, (unsigned int *) supported_counters)) eprint("Failed to attach to hotplugged CPU %u\n", cpunum); if (off) deactivatecpu(cpunum); @@ -462,11 +472,18 @@ return NULL; } -int perf_init(void) +int perf_init(unsigned int *supported_counters) { - int ecc_supported, i, num; + static const char *cpum_cf[] = { + "DEA_FUNCTIONS", + "AES_FUNCTIONS", + "SHA_FUNCTIONS", + "PRNG_FUNCTIONS", + "ECC_FUNCTION_COUNT" + }; unsigned long maxfd; struct rlimit rlim; + int i, num; FILE *f; /* initialize performance monitoring library */ @@ -478,16 +495,18 @@ /* We currently support all cpumf counters plus two virtual * counters for PAI. */ num = ALL_COUNTER + 2; - /* Check if ECC is supported on current hardware */ - ecc_supported = perf_counter_supported("cpum_cf", "ECC_FUNCTION_COUNT"); if (!cpumf_authorized()) { for (i = 0; i < ALL_COUNTER; ++i) ctr_state[i] = UNSUPPORTED; num -= ALL_COUNTER; - } else if (!ecc_supported) { - ctr_state[ECC_FUNCTIONS] = UNSUPPORTED; - --num; + } else { + for (i = 0; i < ALL_COUNTER; i++) { + if (!perf_counter_supported("cpum_cf", cpum_cf[i])) { + ctr_state[i] = UNSUPPORTED; + num--; + } + } } if (!perf_counter_supported("pai_crypto", "CRYPTO_ALL")) { @@ -523,11 +542,12 @@ return -1; } - if (pthread_create(&hotplugthread, NULL, hotplughandler, NULL)) { + if (pthread_create(&hotplugthread, NULL, hotplughandler, + supported_counters)) { eprint("Failed to start hotplug handler thread\n"); return -1; } - return addallcpus(); + return addallcpus(supported_counters); } @@ -552,11 +572,14 @@ } -static int enable_array(int *arr, int size) +static int enable_array(int *arr, int user, unsigned int *supported_counters) { int i, ec, rc = 0; - for (i = 0; i < size; ++i) { + for (i = 0; i < MAX_NUM_PAI; ++i) { + if ((user && is_user_space(i) != KERNEL_AND_USER_COUNTER) || + supported_counters[i] != 1 || is_user_space(i) == SUPPRESS_COUNTER) + continue; ec = ioctl(arr[i], PERF_EVENT_IOC_ENABLE, 0); if (ec < 0) { eprint("Ioctl(PERF_EVENT_IOC_ENABLE) failed with errno=%d [%s]\n", @@ -568,14 +591,14 @@ } -int perf_enable_ctr(enum ctr_e ctr) +int perf_enable_ctr(enum ctr_e ctr, unsigned int *supported_counters) { struct percpucounter *pcpu; int ec, rc = 0; if (ctr == ALL_COUNTER) { for (ctr = 0; ctr < ALL_COUNTER; ctr++) { - rc = perf_enable_ctr(ctr); + rc = perf_enable_ctr(ctr, supported_counters); if (rc != 0) return rc; } @@ -593,7 +616,7 @@ endforeachcpu(); } else if (ctr == PAI_USER) { foreachcpu(pcpu) { - ec = enable_array(pcpu->pai_user, NUM_PAI_USER); + ec = enable_array(pcpu->pai_user, 1, supported_counters); if (ec < 0) rc = -1; } @@ -602,7 +625,7 @@ endforeachcpu(); } else if (ctr == PAI_KERNEL) { foreachcpu(pcpu) { - ec = enable_array(pcpu->pai_kernel, NUM_PAI_KERNEL); + ec = enable_array(pcpu->pai_kernel, 0, supported_counters); if (ec < 0) rc = -1; } @@ -617,11 +640,14 @@ } -static int disable_array(int *arr, int size) +static int disable_array(int *arr, int user, unsigned int *supported_counters) { int i, ec, rc = 0; - for (i = 0; i < size; ++i) { + for (i = 0; i < MAX_NUM_PAI; ++i) { + if ((user && is_user_space(i) != KERNEL_AND_USER_COUNTER) || + supported_counters[i] != 1 || is_user_space(i) == SUPPRESS_COUNTER) + continue; ec = ioctl(arr[i], PERF_EVENT_IOC_DISABLE, 0); if (ec < 0) { eprint("Ioctl(PERF_EVENT_IOC_DISABLE) failed with errno=%d [%s]\n", @@ -633,14 +659,14 @@ } -int perf_disable_ctr(enum ctr_e ctr) +int perf_disable_ctr(enum ctr_e ctr, unsigned int *supported_counters) { struct percpucounter *pcpu; int ec, rc = 0; if (ctr == ALL_COUNTER) { for (ctr = 0; ctr < ALL_COUNTER; ctr++) { - rc = perf_disable_ctr(ctr); + rc = perf_disable_ctr(ctr, supported_counters); if (rc != 0) return rc; } @@ -660,7 +686,7 @@ endforeachcpu(); } else if (ctr == PAI_USER) { foreachcpu(pcpu) { - ec = disable_array(pcpu->pai_user, NUM_PAI_USER); + ec = disable_array(pcpu->pai_user, 1, supported_counters); if (ec < 0) rc = -1; } @@ -671,7 +697,7 @@ endforeachcpu(); } else if (ctr == PAI_KERNEL) { foreachcpu(pcpu) { - ec = disable_array(pcpu->pai_kernel, NUM_PAI_KERNEL); + ec = disable_array(pcpu->pai_kernel, 0, supported_counters); if (ec < 0) rc = -1; } @@ -688,11 +714,14 @@ } -static int reset_array(int *arr, int size) +static int reset_array(int *arr, int user, unsigned int *supported_counters) { int ec, rc = 0, i; - for (i = 0; i < size; ++i) { + for (i = 0; i < MAX_NUM_PAI; ++i) { + if ((user && is_user_space(i) != KERNEL_AND_USER_COUNTER) || + supported_counters[i] != 1 || is_user_space(i) == SUPPRESS_COUNTER) + continue; ec = ioctl(arr[i], PERF_EVENT_IOC_RESET, 0); if (ec < 0) { eprint("Ioctl(PERF_EVENT_IOC_RESET) failed with errno=%d [%s]\n", @@ -704,14 +733,15 @@ } -int perf_reset_ctr(enum ctr_e ctr, uint64_t *value) +int perf_reset_ctr(enum ctr_e ctr, uint64_t *value, unsigned int + *supported_counters) { struct percpucounter *pcpu; int ec, rc = 0; if (ctr == ALL_COUNTER) { for (ctr = 0; ctr < ALL_COUNTER; ctr++) { - rc = perf_reset_ctr(ctr, value); + rc = perf_reset_ctr(ctr, value, supported_counters); if (rc != 0) return rc; } @@ -727,14 +757,14 @@ endforeachcpu(); } else if (ctr == PAI_USER) { foreachcpu(pcpu) { - ec = reset_array(pcpu->pai_user, NUM_PAI_USER); + ec = reset_array(pcpu->pai_user, 1, supported_counters); if (ec < 0) rc = -1; } endforeachcpu(); } else if (ctr == PAI_KERNEL) { foreachcpu(pcpu) { - ec = reset_array(pcpu->pai_kernel, NUM_PAI_KERNEL); + ec = reset_array(pcpu->pai_kernel, 0, supported_counters); if (ec < 0) rc = -1; } @@ -743,12 +773,13 @@ rc = -1; } if (rc == 0) - rc = perf_read_ctr(ctr, value); + rc = perf_read_ctr(ctr, value, supported_counters); return rc; } -int perf_read_ctr(enum ctr_e ctr, uint64_t *value) +int perf_read_ctr(enum ctr_e ctr, uint64_t *value, unsigned int + *supported_counters) { struct percpucounter *pcpu; int ec, rc = 0; @@ -761,11 +792,26 @@ return 0; } if (ctr == PAI_USER) { - *value = NUM_PAI_USER; + int c = 0; + + for (int i = 0; i < MAX_NUM_PAI; i++) { + if (is_user_space(i) == KERNEL_AND_USER_COUNTER && + supported_counters[i] == 1) + c++; + } + + *value = c; return 0; } if (ctr == PAI_KERNEL) { - *value = NUM_PAI_KERNEL; + int c = 0; + + for (int i = 0; i < MAX_NUM_PAI; i++) { + if (supported_counters[i] == 1) + c++; + } + + *value = c; return 0; } if (ctr >= ALL_COUNTER) @@ -802,14 +848,15 @@ int perf_read_pai_ctr(unsigned int ctrnum, int user, uint64_t *value) { struct percpucounter *pcpu; - unsigned int maxctr; int *arr, ec, rc = 0; uint64_t val; *value = 0; - maxctr = user ? NUM_PAI_USER : NUM_PAI_KERNEL; - if (ctrnum >= maxctr) + + if (is_user_space(ctrnum) == SUPPRESS_COUNTER || + (user && is_user_space(ctrnum) == KERNEL_ONLY_COUNTER)) return -1; + foreachcpu(pcpu) { arr = user ? pcpu->pai_user : pcpu->pai_kernel; ec = read(arr[ctrnum], &val, sizeof(val)); diff -Nru s390-tools-2.31.0/cpumf/lscpumf.c s390-tools-2.33.1/cpumf/lscpumf.c --- s390-tools-2.31.0/cpumf/lscpumf.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/cpumf/lscpumf.c 2024-05-28 08:26:36.000000000 +0200 @@ -3473,6 +3473,7 @@ read_ccerror(cp, *len); break; case UTIL_ARCH_MACHINE_TYPE_Z16: + case UTIL_ARCH_MACHINE_TYPE_Z16_A02: cp = cpumcf_z16_counters; *len = ARRAY_SIZE(cpumcf_z16_counters); } diff -Nru s390-tools-2.31.0/dasdfmt/dasdfmt.8 s390-tools-2.33.1/dasdfmt/dasdfmt.8 --- s390-tools-2.31.0/dasdfmt/dasdfmt.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/dasdfmt/dasdfmt.8 2024-05-28 08:26:36.000000000 +0200 @@ -7,11 +7,11 @@ dasdfmt \- formatting of DASD (ECKD) disk drives. .SH SYNOPSIS -\fBdasdfmt\fR [-h] [-t] [-v] [-y] [-p] [-P] [-m \fIstep\fR] +\fBdasdfmt\fR [\-h] [\-t] [\-v] [\-y] [\-p] [\-P] [\-m \fIstep\fR] .br - [-r \fIcylinder\fR] [-b \fIblksize\fR] [-l \fIvolser\fR] [-d \fIlayout\fR] + [\-r \fIcylinder\fR] [\-b \fIblksize\fR] [\-l \fIvolser\fR] [\-d \fIlayout\fR] .br - [-L] [-V] [-F] [-k] [-C] [-M \fImode\fR] \fIdevice\fR + [\-L] [\-V] [\-F] [\-k] [\-C] [\-M \fImode\fR] \fIdevice\fR .SH DESCRIPTION \fBdasdfmt\fR formats a DASD (ECKD) disk drive to prepare it @@ -26,26 +26,26 @@ .SH OPTIONS .TP -\fB-h\fR or \fB--help\fR +\fB\-h\fR or \fB\-\-help\fR Print usage and exit. .TP -\fB-t\fR or \fB--test\fR +\fB\-t\fR or \fB\-\-test\fR Disables any modification of the disk drive. .br \fBdasdfmt\fR just prints out, what it \fBwould\fR do. .TP -\fB-v\fR +\fB\-v\fR Increases verbosity. .TP -\fB-y\fR +\fB\-y\fR Start formatting without further user-confirmation. .TP -\fB--norecordzero\fR +\fB\-\-norecordzero\fR Remove permission for subsystem to format write record zero. .br This is an expert option: Per default in recent dasd drivers, subsystems are @@ -54,32 +54,32 @@ .br .TP -\fB-L\fR or \fB--no_label\fR +\fB\-L\fR or \fB\-\-no_label\fR Omit the writing of a disk label after formatting. .br This makes only sense for the 'ldl' disk layout. .br -The '-L' option has to be specified after the '-d ldl' option. +The '\-L' option has to be specified after the '\-d ldl' option. .br -e.g. dasdfmt -d ldl -L /dev/... +e.g. dasdfmt \-d ldl \-L /dev/... .TP -\fB-V\fR or \fB--version\fR +\fB\-V\fR or \fB\-\-version\fR Print version number and exit. .TP -\fB-F\fR or \fB--force\fR +\fB\-F\fR or \fB\-\-force\fR Formats the device without performing sanity checking. .TP -\fB-C\fR or \fB--check_host_count\fR +\fB\-C\fR or \fB\-\-check_host_count\fR Force dasdfmt to check the host access open count to ensure the device is not online on another operating system instance .TP -\fB-d\fR \fIlayout\fR or \fB--disk_layout\fR=\fIlayout\fR +\fB\-d\fR \fIlayout\fR or \fB\-\-disk_layout\fR=\fIlayout\fR Formats the device with compatible disk layout or linux disk layout. \fIlayout\fR is either \fIcdl\fR for the compatible disk layout (default) or \fIldl\fR for the linux disk layout. @@ -95,39 +95,39 @@ running in background or redirecting the output to a file. .TP -\fB-P\fR or \fB--percentage\fR +\fB\-P\fR or \fB\-\-percentage\fR Print one line for each formatted cylinder showing the number of the cylinder and percentage of formatting process. Intended to be used by higher level interfaces. .TP -\fB-m\fR \fIstep\fR or \fB--hashmarks\fR=\fIstep\fR +\fB\-m\fR \fIstep\fR or \fB\-\-hashmarks\fR=\fIstep\fR Print a hashmark every \fIstep\fR cylinders. The value \fIstep\fR has to be within range [1,1000], otherwise it will be set to the default, which is 10. .br You can use this option to see the progress of formatting in case you -are not able to use the progress bar option -p, e.g. with a 3270 +are not able to use the progress bar option \-p, e.g. with a 3270 terminal. .br -The value will be at least as big as the -r or --requestsize value. +The value will be at least as big as the \-r or \-\-requestsize value. .br .TP -\fB-M\fR \fImode\fR or \fB--mode\fR=\fImode\fR +\fB\-M\fR \fImode\fR or \fB\-\-mode\fR=\fImode\fR Specify the \fImode\fR to be used to format the device. Valid modes are: .RS .IP full Format the entire disk with the specified blocksize. (default) .IP quick -Format the first two tracks and write label and partition information. Use this -option only if you are sure that the target DASD already contains a regular -format with the specified blocksize. A blocksize can optionally be specified -using \fB-b\fR (\fB--blocksize\fR). -.br -For thin-provisioned DASD ESE volumes, quick is the default mode. A full space -release then precedes the formatting step. If this space release fails, dasdfmt -falls back to a full-format mode. Formatting stops if the space release fails -and quick mode was specified explicitly using \fB-M\fR. Specify the -\fB--no-discard\fR option to omit the space release. +Format the first two tracks and write label and partition information. +.br +Use this option for DASD ESE volumes to take the benefits of thin provisioning. +In this case, a full space release precedes the formatting step. If this space +release fails, then the formatting also fails. Specify the \fB\-\-no\-discard\fR +option to omit the space release. +.br +For non-ESE volumes use this option only if you are sure that the target DASD +already contains a regular format with the specified blocksize. A blocksize can +optionally be specified using \fB\-b\fR (\fB\-\-blocksize\fR). .IP expand Format all unformatted tracks at the end of the target DASD. This mode assumes @@ -135,20 +135,20 @@ formatted, while a consecutive set of tracks at the end are unformatted. You can use this mode to make added space available for Linux use after dynamically increasing the size of a DASD volume. A blocksize can optionally be specified -using \fB-b\fR (\fB--blocksize\fR). +using \fB\-b\fR (\fB\-\-blocksize\fR). .RE .TP -\fB--check\fR +\fB\-\-check\fR Perform a complete format check on a DASD volume. A blocksize can be specified -with \fB-b\fR (\fB--blocksize\fR). +with \fB\-b\fR (\fB\-\-blocksize\fR). .TP -\fB--no-discard\fR +\fB\-\-no\-discard\fR Omit a full space release when formatting a thin-provisioned DASD ESE volume. .TP -\fB-r\fR \fIcylindercount\fR or \fB--requestsize\fR=\fIcylindercount\fR +\fB\-r\fR \fIcylindercount\fR or \fB\-\-requestsize\fR=\fIcylindercount\fR Number of cylinders to be processed in one formatting step. The value must be an integer in the range 1 - 255. .br @@ -158,19 +158,19 @@ .br .TP -\fB-b\fR \fIblksize\fR or \fB--blocksize\fR=\fIblksize\fR +\fB\-b\fR \fIblksize\fR or \fB\-\-blocksize\fR=\fIblksize\fR Specify blocksize to be used. \fIblksize\fR must be a positive integer and always be a power of two. The recommended blocksize is 4096 bytes. .TP -\fB-l\fR \fIvolser\fR or \fB--label\fR=\fIvolser\fR +\fB\-l\fR \fIvolser\fR or \fB\-\-label\fR=\fIvolser\fR Specify the volume serial number or volume identifier to be written to disk after formatting. If no label is specified, a sensible default is used. \fIvolser\fR is interpreted as ASCII string and is automatically converted to uppercase and then to EBCDIC. .br -e.g. -l LNX001 or --label=DASD01 +e.g. \-l LNX001 or \-\-label=DASD01 .br The \fIvolser\fR identifies by serial number the volume. A volume serial @@ -192,11 +192,11 @@ In case you really have to use special characters, make sure you are using quotes. In addition there is a special handling for the '$' sign. Please specify it using '\\$' if necessary. .br -e.g. -l 'a@b\\$c#' to get A@B$C# +e.g. \-l 'a@b\\$c#' to get A@B$C# .br .TP -\fB-k\fR or \fB--keep_volser\fR +\fB\-k\fR or \fB\-\-keep_volser\fR Keeps the Volume Serial Number when writing the Volume Label. This is useful if the volume already has a Serial Number that should not be overwritten. .br diff -Nru s390-tools-2.31.0/dasdfmt/dasdfmt.c s390-tools-2.33.1/dasdfmt/dasdfmt.c --- s390-tools-2.31.0/dasdfmt/dasdfmt.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/dasdfmt/dasdfmt.c 2024-05-28 08:26:36.000000000 +0200 @@ -1230,7 +1230,7 @@ format_params->start_unit = first; } -static int dasdfmt_release_space(void) +static void dasdfmt_release_space(void) { format_data_t r = { .start_unit = 0, @@ -1240,21 +1240,12 @@ int err = 0; if (!g.ese || g.no_discard) - return 0; + return; printf("Releasing space for the entire device...\n"); err = dasd_release_space(g.dev_node, &r); - /* - * Warn or Error on failing RAS depending on QUICK mode set explicitly or automatically - */ - if (err && !g.mode_specified) { - warnx("Could not release space. Falling back to full format."); - return 1; - } else if (err && g.mode_specified) { + if (err) error("Could not release space: %s", strerror(err)); - } - - return 0; } static void dasdfmt_prepare_and_format(unsigned int cylinders, unsigned int heads, @@ -1454,12 +1445,8 @@ dasdfmt_prepare_and_format(cylinders, heads, p); break; case QUICK: - if (dasdfmt_release_space()) { - p->stop_unit = (cylinders * heads) - 1; - dasdfmt_prepare_and_format(cylinders, heads, p); - } else { - dasdfmt_quick_format(cylinders, heads, p); - } + dasdfmt_release_space(); + dasdfmt_quick_format(cylinders, heads, p); break; case EXPAND: dasdfmt_expand_format(cylinders, heads, p); @@ -1491,7 +1478,7 @@ } if (!g.mode_specified) - mode = g.ese ? QUICK : FULL; + mode = FULL; } /* diff -Nru s390-tools-2.31.0/dasdinfo/dasdinfo.8 s390-tools-2.33.1/dasdinfo/dasdinfo.8 --- s390-tools-2.31.0/dasdinfo/dasdinfo.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/dasdinfo/dasdinfo.8 2024-05-28 08:26:36.000000000 +0200 @@ -7,28 +7,28 @@ .B "dasdinfo " \- tool to read unique id from s390 DASD device .SH SYNOPSIS -.BI "dasdinfo [-a] [-l] [-u] [-x] [-e] {-i " -.BI "| -b " -.BI " | -d " +.BI "dasdinfo [\-a] [\-l] [\-u] [\-x] [\-e] {\-i " +.BI "| \-b " +.BI " | \-d " .BI "}" .sp -.BI "dasdinfo [-h] [-v]" +.BI "dasdinfo [\-h] [\-v]" .SH DESCRIPTION -.B dasdinfo +.B dasdinfo displays specific information about a specified DASD device. It is normally called from a udev rule, to provide udev with a unique id string and additional information (type, serial) for an S390 DASD drive. Udev can use this -information to create symlinks in /dev/disk/by-id and /dev/disk/by-label +information to create symlinks in /dev/disk/by\-id and /dev/disk/by\-label to the real device node. .SH OPTIONS .TP -.BI "-a|--all" +.BI "\-a|\-\-all" Same as -u -x -l .TP -.BI "-x|--extended-uid" +.BI "\-x|\-\-extended\-uid" Print DASD uid This option prints the full uid of the DASD. When z/VM provides two @@ -42,13 +42,13 @@ Qualifier (SNEQ) (or hypervisor injected self-description data) is available by applying the PTFs for VM APAR VM64273 on z/VM 5.2.0 and higher. .TP -.BI "-u|--uid" +.BI "\-u|\-\-uid" Print DASD uid without z/VM minidisk token z/VM may provide an additional token that can be used to distinguish -between different minidisks (see --extended-uid option). To remain +between different minidisks (see \-\-extended\-uid option). To remain compatible with systems that were installed on older Linux or z/VM -levels, the -u option will print the uid excluding any z/VM-provided +levels, the \-u option will print the uid excluding any z/VM-provided minidisk token. For example, if the extended uid is @@ -57,35 +57,35 @@ minidisk token, e.g. in an LPAR environment, then both uids are the same. .TP -.BI "-l|--label" +.BI "\-l|\-\-label" Print DASD volume label (volser). .TP -.BI "-i|--busid " +.BI "\-i|\-\-busid " Use the bus ID as input parameter, e.g. 0.0.e910. .TP -.BI "-b|--block " +.BI "\-b|\-\-block " Use the block device name as input parameter, e.g. dasdb. .TP -.BI "-d|--devnode " +.BI "\-d|\-\-devnode " Use a device node as input parameter, e.g. /dev/dasdb. .TP -.BI "-e|--export" +.BI "\-e|\-\-export" Print all values (ID_BUS, ID_TYPE, ID_SERIAL). .TP -.BI "-h|--help" +.BI "\-h|\-\-help" Print usage text. .TP -.BI "-v|--version" +.BI "\-v|\-\-version" Print version number. .SH EXAMPLES -dasdinfo -u -i 0.0.e910 +dasdinfo \-u \-i 0.0.e910 -dasdinfo -u -b dasdb +dasdinfo \-u \-b dasdb -dasdinfo -u -d /dev/dasdb +dasdinfo \-u \-d /dev/dasdb -All three examples should return the same unique ID for +All three examples should return the same unique ID for the same DASD device, e.g. IBM.75000000092461.e900.10. In case this uid is not available, dasdinfo will return diff -Nru s390-tools-2.31.0/dasdview/dasdview.8 s390-tools-2.33.1/dasdview/dasdview.8 --- s390-tools-2.31.0/dasdview/dasdview.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/dasdview/dasdview.8 2024-05-28 08:26:36.000000000 +0200 @@ -7,21 +7,21 @@ dasdview \- Display DASD and VTOC information and dump the content of a DASD to the console. .SH SYNOPSIS -\fBdasdview\fR [-h] [-v] +\fBdasdview\fR [\-h] [\-v] .br - [-b \fIbegin\fR] [-s \fIsize\fR] [-1|-2] + [\-b \fIbegin\fR] [\-s \fIsize\fR] [\-1|\-2] .br - [-i] [-x] [-j] [-c] + [\-i] [\-x] [\-j] [\-c] .br - [-l] [-t {\fIinfo\fR|\fIf1\fR|\fIf3\fR|\fIf4\fR|\fIf5\fR|\fIf7\fR|\fIf8\fR|\fIf9\fR}] + [\-l] [\-t {\fIinfo\fR|\fIf1\fR|\fIf3\fR|\fIf4\fR|\fIf5\fR|\fIf7\fR|\fIf8\fR|\fIf9\fR}] .br \fIdevice\fR .SH DESCRIPTION \fBdasdview\fR prints you some useful information of your disks to the console. -You can display a disk dump by specifying start point and offset and you can +You can display a disk dump by specifying start point and offset and you can print the volume label and VTOC entries. The \fIdevice\fR is the node of the device (e.g. '/dev/dasda'). -Any device node created by udev for kernel 2.6 can be used +Any device node created by udev for kernel 2.6 can be used (e.g. '/dev/dasd/0.0.b100/disc'). DASD devices in raw_track_access mode are supported and detected @@ -29,30 +29,30 @@ functions are available as in the regular mode, but the output may have a slightly different layout: .IP \(bu 2 -The disk dump functions (\fB-b\fR and \fB-s\fR) print the count, +The disk dump functions (\fB\-b\fR and \fB\-s\fR) print the count, key and data information for the whole track, and not just the contents of the data areas. .IP \(bu 2 -The VTOC listing (\fB-t\fR) print all specified DSCBs in the same +The VTOC listing (\fB\-t\fR) print all specified DSCBs in the same format as in the regular mode, but in the sequence as they appear in -the VTOC. The \fB-t info\fR overview contains more details for each +the VTOC. The \fB\-t info\fR overview contains more details for each data set than in the regular mode, to support the larger variety of data set layouts. .SH OPTIONS .TP -\fB-h\fR or \fB--help\fR +\fB\-h\fR or \fB\-\-help\fR Print usage and exit. .TP -\fB-v\fR or \fB--version\fR +\fB\-v\fR or \fB\-\-version\fR Print version number and exit. .TP -\fB-b\fR \fIbegin\fR or \fB--begin=\fR\fIbegin\fR -Print a disk dump to the console, starting with \fIbegin\fR. The content of -the disk will be displayed in hexadecimal numbers, ASCII text and EBCDIC text. -If no size is specified dasdview will take the default size. The variable +\fB\-b\fR \fIbegin\fR or \fB\-\-begin=\fR\fIbegin\fR +Print a disk dump to the console, starting with \fIbegin\fR. The content of +the disk will be displayed in hexadecimal numbers, ASCII text and EBCDIC text. +If no size is specified dasdview will take the default size. The variable \fIbegin\fR can be specified in one of the following ways: .br @@ -63,10 +63,10 @@ .br \fBNote 1:\fR dasdview will show you the content of your disk using the DASD -driver. If this driver decides to hide or add some parts of the disk, you have -to live with it. This happens for example with the first two tracks of a -cdl-formatted disk. In this case the DASD driver fills up shorter blocks with -zeros to have a constant blocksize. And all applications, including dasdview, +driver. If this driver decides to hide or add some parts of the disk, you have +to live with it. This happens for example with the first two tracks of a +cdl-formatted disk. In this case the DASD driver fills up shorter blocks with +zeros to have a constant blocksize. And all applications, including dasdview, believe it. .br \fBNote 2:\fR In raw_track_access mode \fIbegin\fR must be aligned to @@ -76,24 +76,24 @@ examples: .br - -b 32 --> start printing at Byte 32 + \-b 32 --> start printing at Byte 32 .br - -b 32k --> start printing at kByte 32 + \-b 32k --> start printing at kByte 32 .br - -b 32m --> start printing at MByte 32 + \-b 32m --> start printing at MByte 32 .br - -b 32b --> start printing at block 32 + \-b 32b --> start printing at block 32 .br - -b 32t --> start printing at track 32 + \-b 32t --> start printing at track 32 .br - -b 32c --> start printing at cylinder 32 + \-b 32c --> start printing at cylinder 32 .TP -\fB-s\fR \fIsize\fR or \fB--size=\fR\fIsize\fR -Print a disk dump to the console, starting with \fIbegin\fR, specified with -the \fB-b\fR option and size \fIsize\fR. The content of the disk will be -displayed in hexadecimal numbers, ASCII text and EBCDIC text. If no start -value is specified dasdview will take the default start value. The variable +\fB\-s\fR \fIsize\fR or \fB\-\-size=\fR\fIsize\fR +Print a disk dump to the console, starting with \fIbegin\fR, specified with +the \fB\-b\fR option and size \fIsize\fR. The content of the disk will be +displayed in hexadecimal numbers, ASCII text and EBCDIC text. If no start +value is specified dasdview will take the default start value. The variable \fIsize\fR can be specified in one of the following ways: .br @@ -111,76 +111,76 @@ examples: .br - -s 16 --> use a 16 Byte size + \-s 16 --> use a 16 Byte size .br - -s 16k --> use a 16 kByte size + \-s 16k --> use a 16 kByte size .br - -s 16m --> use a 16 MByte size + \-s 16m --> use a 16 MByte size .br - -s 16b --> use a 16 block size + \-s 16b --> use a 16 block size .br - -s 16t --> use a 16 track size + \-s 16t --> use a 16 track size .br - -s 16c --> use a 16 cylinder size + \-s 16c --> use a 16 cylinder size .TP -\fB-1\fR -This option tells dasdview to print the disk dump using format 1. This means -you will get 16 Bytes per line in hex, ascii and ebcdic. There is no line +\fB\-1\fR +This option tells dasdview to print the disk dump using format 1. This means +you will get 16 Bytes per line in hex, ascii and ebcdic. There is no line number. .br -The \fB-1\fR option makes only sense with the \fB-b\fR and/or the \fB-s\fR -options. +The \fB\-1\fR option makes only sense with the \fB\-b\fR and/or the \fB\-s\fR +options. .br This is the default. .TP -\fB-2\fR -This option tells dasdview to print the disk dump using format 2. This means -you will get 8 Bytes per line in hex, ascii and ebcdic. And in addition a line +\fB\-2\fR +This option tells dasdview to print the disk dump using format 2. This means +you will get 8 Bytes per line in hex, ascii and ebcdic. And in addition a line number and a decimal and hexadecimal byte count will be printed. .br -The \fB-2\fR option makes only sense with the \fB-b\fR and/or the \fB-s\fR +The \fB\-2\fR option makes only sense with the \fB\-b\fR and/or the \fB\-s\fR options. In raw_track_access mode this format is not supported and the option will be ignored. .TP -\fB-i\fR or \fB--info\fR +\fB\-i\fR or \fB\-\-info\fR Print some useful information (e.g. device node/number/type or geometry data). -When running dasdview on a kernel 2.6 based distribution the busid +When running dasdview on a kernel 2.6 based distribution the busid is printed instead of the device number. .TP -\fB-x\fR or \fB--extended\fR +\fB\-x\fR or \fB\-\-extended\fR Print some more DASD information (e.g. open count, subchannel identifier). .TP -\fB-j\fR or \fB--volser\fR +\fB\-j\fR or \fB\-\-volser\fR Print volume serial number (volume identifier). .TP -\fB-l\fR or \fB--label\fR +\fB\-l\fR or \fB\-\-label\fR Print the volume label. .TP -\fB-c\fR or \fB--characteristic\fR +\fB\-c\fR or \fB\-\-characteristic\fR Print some information about the device e.g. if it is encrypted. .TP -\fB-t\fR \fIspec\fR or \fB--vtoc=\fR\fIspec\fR +\fB\-t\fR \fIspec\fR or \fB\-\-vtoc=\fR\fIspec\fR Print the VTOC (table of content) or single VTOC entries to the console. \fIspec\fR can be one of the following strings: .br -\fIinfo\fR: +\fIinfo\fR: .br -Gives you a VTOC overview. You will see what other S/390 or zSeries operating +Gives you a VTOC overview. You will see what other S/390 or zSeries operating systems would see (e.g. data set names and sizes). .br \fIf1\fR: .br -Print the content of all format 1 DSCBs. +Print the content of all format 1 DSCBs. .br \fIf3\fR: @@ -188,17 +188,17 @@ Print the content of all format 3 DSCBs. .br -\fIf4\fR: +\fIf4\fR: .br Print the content of the format 4 DSCB. .br -\fIf5\fR: +\fIf5\fR: .br Print the content of the format 5 DSCB. .br -\fIf7\fR: +\fIf7\fR: .br Print the content of the format 7 DSCB. .br @@ -213,6 +213,6 @@ Print the content of all format 9 DSCBs. .br -\fIall\fR: +\fIall\fR: .br -Print the content of all DSCBs. +Print the content of all DSCBs. diff -Nru s390-tools-2.31.0/debian/changelog s390-tools-2.33.1/debian/changelog --- s390-tools-2.31.0/debian/changelog 2024-04-02 12:45:30.000000000 +0200 +++ s390-tools-2.33.1/debian/changelog 2024-05-28 10:32:08.000000000 +0200 @@ -1,3 +1,28 @@ +s390-tools (2.33.1-0ubuntu1) oracular; urgency=medium + + * New upstream release, requested in LP: #2067355 + - Refresh d/p/rust_paths.patch to remove fuzz and to avoid build failure + - Run d/rules vendor-tarball to create new 'rust-vendor' folder + - Adjust d/p/vendor-remove-unused-deps.patch due to new rust-vendor folder + - Remove the following patches, since they're incl. in the new version: + - d/p/lp-2058944-dbginfo.sh-dash-compatible-copy-sequence.patch + - d/p/lp-2059303-rust-pv-test-Code-Certificate-refactoring.patch + - d/p/lp-2059303-rust-pv-Support-Armonk-in-IBM-signing-key-subject.patch + - d/p/lp-2059303-genprotimg-support-Armonk-in-IBM-signing-key-....patch + - d/p/lp-2059303-libpv-Support-Armonk-in-IBM-signing-key-subject.patch + - d/p/lp-2059303-pvattest-Fix-root-ca-parsing.patch + - Update XS-Vendored-Sources-Rust field in d/control. + - Adjust d/s390-tools.install* to cope with changed folders + and to move doc files (readmes) from usr/share/s390-tools/* to + usr/share/doc/s390-tools/* to solve lintian msgs. + - Replace obsolete Build-Depends pkg-config pkgconf to solve lintian msg. + - Redefine Section in d/control for libraries to solve lintian msgs. + - Update DEP-3 header (fix typo and expand description) of + d/p/debian/patches/gzip-files-without-timestamps-or-names.patch. + - Add Build-Depends-Package line to *.symbol files to solve lintian msgs. + + -- Frank Heimes Tue, 28 May 2024 10:32:08 +0200 + s390-tools (2.31.0-0ubuntu5) noble; urgency=medium * Add the following commits as patches: diff -Nru s390-tools-2.31.0/debian/control s390-tools-2.33.1/debian/control --- s390-tools-2.31.0/debian/control 2024-02-26 15:41:56.000000000 +0100 +++ s390-tools-2.33.1/debian/control 2024-05-28 10:32:08.000000000 +0200 @@ -23,12 +23,12 @@ libssl-dev, libxml2-dev, libz-dev, - pkg-config, + pkgconf, quilt Standards-Version: 4.6.2 Homepage: https://github.com/ibm-s390-linux/s390-tools XS-Build-Indep-Architecture: s390x -XS-Vendored-Sources-Rust: addr2line@0.21.0, adler@1.0.2, aho-corasick@1.1.2, anstream@0.3.2, anstyle-parse@0.2.0, anstyle-query@1.0.0, anstyle-wincon@1.0.1, anstyle@1.0.0, anyhow@1.0.71, assert-json-diff@2.0.2, autocfg@1.1.0, backtrace@0.3.69, bitflags@1.3.2, bitflags@2.4.1, byteorder@1.4.3, bytes@1.5.0, cc@1.0.79, cfg-if@1.0.0, clap@4.3.4, clap_builder@4.3.4, clap_derive@4.3.2, clap_lex@0.5.0, colorchoice@1.0.0, curl-sys@0.4.68+curl-8.4.0, curl@0.4.44, equivalent@1.0.1, errno-dragonfly@0.1.2, errno@0.3.1, fnv@1.0.7, foreign-types-shared@0.1.1, foreign-types@0.3.2, form_urlencoded@1.2.0, futures-channel@0.3.29, futures-core@0.3.29, futures-executor@0.3.29, futures-io@0.3.29, futures-macro@0.3.29, futures-sink@0.3.29, futures-task@0.3.29, futures-util@0.3.29, futures@0.3.29, getrandom@0.2.10, gimli@0.28.1, h2@0.3.22, hashbrown@0.12.3, hashbrown@0.14.3, heck@0.4.1, hermit-abi@0.3.1, http-body@0.4.5, http@0.2.11, httparse@1.8.0, httpdate@1.0.3, hyper@0.14.27, indexmap@1.9.3, indexmap@2.1.0, io-lifetimes@1.0.11, is-terminal@0.4.7, itoa@1.0.6, lazy_static@1.4.0, libc@0.2.146, libz-sys@1.1.9, linux-raw-sys@0.3.8, lock_api@0.4.11, log@0.4.19, memchr@2.6.4, miniz_oxide@0.7.1, mio@0.8.8, mockito@1.2.0, num_cpus@1.16.0, object@0.32.1, once_cell@1.19.0, openssl-macros@0.1.1, openssl-probe@0.1.5, openssl-sys@0.9.96, openssl@0.10.60, parking_lot@0.12.1, parking_lot_core@0.9.9, percent-encoding@2.3.0, pin-project-lite@0.2.13, pin-utils@0.1.0, pkg-config@0.3.27, ppv-lite86@0.2.17, proc-macro2@1.0.75, quote@1.0.35, rand@0.8.5, rand_chacha@0.3.1, rand_core@0.6.4, redox_syscall@0.4.1, regex-automata@0.4.3, regex-syntax@0.8.2, regex@1.10.2, rustc-demangle@0.1.23, rustix@0.37.27, ryu@1.0.13, schannel@0.1.21, scopeguard@1.2.0, serde@1.0.164, serde_derive@1.0.164, serde_json@1.0.99, serde_test@1.0.176, serde_urlencoded@0.7.1, serde_yaml@0.9.21, signal-hook-registry@1.4.1, similar@2.3.0, slab@0.4.9, smallvec@1.11.2, socket2@0.4.9, socket2@0.5.4, strsim@0.10.0, syn@2.0.47, terminal_size@0.2.6, thiserror-impl@1.0.40, thiserror@1.0.40, tokio-macros@2.1.0, tokio-util@0.7.10, tokio@1.33.0, tower-service@0.3.2, tracing-core@0.1.32, tracing@0.1.40, try-lock@0.2.4, unicode-ident@1.0.9, unsafe-libyaml@0.2.8, utf8parse@0.2.1, vcpkg@0.2.15, want@0.3.1, wasi@0.11.0+wasi-snapshot-preview1, zerocopy-derive@0.7.32, zerocopy@0.7.32 +XS-Vendored-Sources-Rust: aho-corasick@1.1.2, anstream@0.3.2, anstyle-parse@0.2.0, anstyle-query@1.0.0, anstyle-wincon@1.0.1, anstyle@1.0.0, anyhow@1.0.71, autocfg@1.1.0, bitflags@1.3.2, bitflags@2.4.1, byteorder@1.4.3, cc@1.0.79, cfg-if@1.0.0, clap@4.3.4, clap_builder@4.3.4, clap_complete@4.3.1, clap_derive@4.3.2, clap_lex@0.5.0, colorchoice@1.0.0, curl-sys@0.4.72+curl-8.6.0, curl@0.4.44, errno-dragonfly@0.1.2, errno@0.3.1, foreign-types-shared@0.1.1, foreign-types@0.3.2, getrandom@0.2.10, hashbrown@0.12.3, heck@0.4.1, hermit-abi@0.3.1, indexmap@1.9.3, io-lifetimes@1.0.11, is-terminal@0.4.7, itoa@1.0.6, lazy_static@1.4.0, libc@0.2.146, libz-sys@1.1.9, linux-raw-sys@0.3.8, log@0.4.19, memchr@2.6.4, once_cell@1.19.0, openssl-macros@0.1.1, openssl-probe@0.1.5, openssl-sys@0.9.96, openssl@0.10.60, pkg-config@0.3.27, ppv-lite86@0.2.17, proc-macro2@1.0.75, quote@1.0.35, rand@0.8.5, rand_chacha@0.3.1, rand_core@0.6.4, regex-automata@0.4.3, regex-syntax@0.8.2, regex@1.10.2, rustix@0.37.27, ryu@1.0.13, schannel@0.1.21, serde@1.0.164, serde_derive@1.0.164, serde_test@1.0.176, serde_yaml@0.9.21, socket2@0.4.9, strsim@0.10.0, syn@2.0.47, terminal_size@0.2.6, thiserror-impl@1.0.40, thiserror@1.0.40, unicode-ident@1.0.9, unsafe-libyaml@0.2.8, utf8parse@0.2.1, vcpkg@0.2.15, wasi@0.11.0+wasi-snapshot-preview1, zerocopy-derive@0.7.32, zerocopy@0.7.32 Package: s390-tools Architecture: s390 s390x amd64 ppc64el arm64 @@ -100,6 +100,7 @@ collection of utilities. Package: libekmfweb1 +Section: libs Architecture: s390 s390x Depends: ${misc:Depends}, ${shlibs:Depends} Description: integration with IBM Enterprise Key Management Foundation @@ -110,6 +111,7 @@ This package contains the runtime library. Package: libekmfweb-dev +Section: libdevel Architecture: s390 s390x Depends: ${misc:Depends} Description: integration with IBM Enterprise Key Management Foundation (development) @@ -120,6 +122,7 @@ This package contains the development library and headers. Package: libkmipclient1 +Section: libs Architecture: s390 s390x Depends: ${misc:Depends}, ${shlibs:Depends} Description: KMIP client library @@ -131,6 +134,7 @@ This package contains the runtime library. Package: libkmipclient-dev +Section: libdevel Architecture: s390 s390x Depends: ${misc:Depends} Description: KMIP client library (development) diff -Nru s390-tools-2.31.0/debian/libekmfweb1.symbols s390-tools-2.33.1/debian/libekmfweb1.symbols --- s390-tools-2.31.0/debian/libekmfweb1.symbols 2024-02-06 11:33:18.000000000 +0100 +++ s390-tools-2.33.1/debian/libekmfweb1.symbols 2024-05-28 10:32:08.000000000 +0200 @@ -1,4 +1,5 @@ libekmfweb.so.1 libekmfweb1 #MINVER# +* Build-Depends-Package: libekmfweb-dev LIBEKMFWEB_1.0@LIBEKMFWEB_1.0 2.15.1 ekmf_check_feature@LIBEKMFWEB_1.0 2.15.1 ekmf_check_login_token@LIBEKMFWEB_1.0 2.15.1 diff -Nru s390-tools-2.31.0/debian/libkmipclient1.symbols s390-tools-2.33.1/debian/libkmipclient1.symbols --- s390-tools-2.31.0/debian/libkmipclient1.symbols 2024-02-06 11:33:18.000000000 +0100 +++ s390-tools-2.33.1/debian/libkmipclient1.symbols 2024-05-28 10:32:08.000000000 +0200 @@ -1,4 +1,5 @@ libkmipclient.so.1 libkmipclient1 #MINVER# +* Build-Depends-Package: libkmipclient-dev LIBKMIPCLIENT_1.0@LIBKMIPCLIENT_1.0 2.17 kmip_connection_free@LIBKMIPCLIENT_1.0 2.17 kmip_connection_get_server_cert@LIBKMIPCLIENT_1.0 2.17 @@ -219,4 +220,4 @@ kmip_node_new_structure_va@LIBKMIPCLIENT_1.0 2.17 kmip_node_new_text_string@LIBKMIPCLIENT_1.0 2.17 kmip_node_upref@LIBKMIPCLIENT_1.0 2.17 - kmip_set_default_protocol_version@LIBKMIPCLIENT_1.0 2.17 \ No newline at end of file + kmip_set_default_protocol_version@LIBKMIPCLIENT_1.0 2.17 diff -Nru s390-tools-2.31.0/debian/patches/gzip-files-without-timestamps-or-names.patch s390-tools-2.33.1/debian/patches/gzip-files-without-timestamps-or-names.patch --- s390-tools-2.31.0/debian/patches/gzip-files-without-timestamps-or-names.patch 2024-02-06 11:33:18.000000000 +0100 +++ s390-tools-2.33.1/debian/patches/gzip-files-without-timestamps-or-names.patch 2024-05-28 10:32:08.000000000 +0200 @@ -1,6 +1,7 @@ -Description: Do gzip compressions without timestamps o -names +Description: Do gzip compression without timestamps or names Having timestamps in gzip leads to the lintian warning 'package-contains-timestamped-gzip' and will harm reproducible builds. + Gzip option '-n' avoids saving original file name and timestamp by default. Author: Frank Heimes Origin: other Bug: https://bugs.launchpad.net/bugs/2049612 diff -Nru s390-tools-2.31.0/debian/patches/lp-2058944-dbginfo.sh-dash-compatible-copy-sequence.patch s390-tools-2.33.1/debian/patches/lp-2058944-dbginfo.sh-dash-compatible-copy-sequence.patch --- s390-tools-2.31.0/debian/patches/lp-2058944-dbginfo.sh-dash-compatible-copy-sequence.patch 2024-04-02 12:45:30.000000000 +0200 +++ s390-tools-2.33.1/debian/patches/lp-2058944-dbginfo.sh-dash-compatible-copy-sequence.patch 1970-01-01 01:00:00.000000000 +0100 @@ -1,61 +0,0 @@ -From 1c128c0d11f23f9cf4bb9f4cf89a48b3011c4a99 Mon Sep 17 00:00:00 2001 -From: Joern Siglen -Date: Tue, 12 Mar 2024 11:34:37 +0100 -Subject: [PATCH] dbginfo.sh: dash compatible copy sequence - -rewrite the copy of dbginfo.sh for dash compatibility - -Reviewed-by: Mike Storzer -Reviewed-by: Mario Held -Signed-off-by: Joern Siglen -Signed-off-by: Steffen Eiden - -Origin: upstream, https://github.com/ibm-s390-linux/s390-tools/commit/1c128c0d11f23f9cf4bb9f4cf89a48b3011c4a99 -Bug-Ubuntu: https://bugs.launchpad.net/bugs/2058944 -Last-Update: 2024-03-25 - ---- - CHANGELOG.md | 1 + - scripts/dbginfo.sh | 7 +++---- - 2 files changed, 4 insertions(+), 4 deletions(-) - -diff --git a/CHANGELOG.md b/CHANGELOG.md -index 0424611a..b8792daa 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -1,6 +1,7 @@ Release history for s390-tools (MIT version) - Release history for s390-tools (MIT version) - -------------------------------------------- - -+ - dbginfo.sh: dash compatible copy sequence - * __v2.31.0 (2024-02-02)__ - - For Linux kernel version: 6.7 -diff --git a/scripts/dbginfo.sh b/scripts/dbginfo.sh -index 7763067a..e363c128 100755 ---- a/scripts/dbginfo.sh -+++ b/scripts/dbginfo.sh -@@ -15,7 +15,7 @@ export LC_ALL - ######################################## - # Global used variables - readonly SCRIPTNAME="${0##*/}" # general name of this script --readonly STARTDIR="$(pwd)" # save calling directory -+readonly FULLPATHSCRIPT="$(readlink -f "${0}")" - # - readonly DATETIME="$(date +%Y-%m-%d-%H-%M-%S 2>/dev/null)" - readonly DOCKER=$(if type docker >/dev/null 2>&1; then echo "YES"; else echo "NO"; fi) -@@ -1420,9 +1420,8 @@ environment_setup() { - create_package() { - local rc_tar - pr_syslog_stdout ${step_num} "Finalizing: Creating archive with collected data" -- # get a copy of the script used - enabled for relative path calls -- cd "${STARTDIR}" -- cp -p "${BASH_SOURCE[0]}" "${WORKPATH}" -+ # get a copy of the script used -+ cp -p "${FULLPATHSCRIPT}" "${WORKPATH}" - # create the archive - cd "${WORKDIR_BASE}" - touch "${WORKARCHIVE}" --- -2.25.1 - diff -Nru s390-tools-2.31.0/debian/patches/lp-2059303-genprotimg-support-Armonk-in-IBM-signing-key-subject.patch s390-tools-2.33.1/debian/patches/lp-2059303-genprotimg-support-Armonk-in-IBM-signing-key-subject.patch --- s390-tools-2.31.0/debian/patches/lp-2059303-genprotimg-support-Armonk-in-IBM-signing-key-subject.patch 2024-04-02 12:45:30.000000000 +0200 +++ s390-tools-2.33.1/debian/patches/lp-2059303-genprotimg-support-Armonk-in-IBM-signing-key-subject.patch 1970-01-01 01:00:00.000000000 +0100 @@ -1,334 +0,0 @@ -From d14e7593cc6380911ca42b09e11c53477ae13d5c Mon Sep 17 00:00:00 2001 -From: Marc Hartmayer -Date: Thu, 14 Mar 2024 16:05:09 +0000 -Subject: [PATCH] genprotimg: support `Armonk` in IBM signing key subject - -New IBM signing certificates will have 'Armonk' as locality in the -subject. Make sure that certificate revocations lists (CRL) with -'Poughkeepsie' as issuer locality are still considered as valid as long -as they are signed with the IBM signing keys private key. In addition, -drop the check for 'issuer(HKD) == subject(HKSK)' as it doesn't improve -security. While at it, remove now unused functions and fix a memory leak -of @akid in `check_crl_issuer`. - -Reviewed-by: Christoph Schlameuss -Signed-off-by: Marc Hartmayer -Signed-off-by: Steffen Eiden - -Origin: upstream, https://github.com/ibm-s390-linux/s390-toolsd14e7593cc6380911ca42b09e11c53477ae13d5c -Bug-Ubuntu: https://bugs.launchpad.net/bugs/2059303 -Last-Update: 2024-04-02 - ---- - genprotimg/src/include/pv_crypto_def.h | 3 +- - genprotimg/src/utils/crypto.c | 210 ++++++++++++------------- - genprotimg/src/utils/crypto.h | 1 + - 3 files changed, 104 insertions(+), 110 deletions(-) - -diff --git a/genprotimg/src/include/pv_crypto_def.h b/genprotimg/src/include/pv_crypto_def.h -index 3635433..49710dc 100644 ---- a/genprotimg/src/include/pv_crypto_def.h -+++ b/genprotimg/src/include/pv_crypto_def.h -@@ -17,7 +17,8 @@ - /* IBM signing key subject */ - #define PV_IBM_Z_SUBJECT_COMMON_NAME "International Business Machines Corporation" - #define PV_IBM_Z_SUBJECT_COUNTRY_NAME "US" --#define PV_IBM_Z_SUBJECT_LOCALITY_NAME "Poughkeepsie" -+#define PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE "Poughkeepsie" -+#define PV_IBM_Z_SUBJECT_LOCALITY_NAME_ARMONK "Armonk" - #define PV_IBM_Z_SUBJECT_ORGANIZATIONONAL_UNIT_NAME_SUFFIX "Key Signing Service" - #define PV_IBM_Z_SUBJECT_ORGANIZATION_NAME "International Business Machines Corporation" - #define PV_IBM_Z_SUBJECT_STATE "New York" -diff --git a/genprotimg/src/utils/crypto.c b/genprotimg/src/utils/crypto.c -index e3bbf1b..86565b9 100644 ---- a/genprotimg/src/utils/crypto.c -+++ b/genprotimg/src/utils/crypto.c -@@ -664,62 +664,9 @@ static gboolean x509_name_data_by_nid_equal(X509_NAME *name, gint nid, - return memcmp(data, y, data_len) == 0; - } - --static gboolean own_X509_NAME_ENTRY_equal(const X509_NAME_ENTRY *x, -- const X509_NAME_ENTRY *y) --{ -- const ASN1_OBJECT *x_obj = X509_NAME_ENTRY_get_object(x); -- const ASN1_STRING *x_data = X509_NAME_ENTRY_get_data(x); -- const ASN1_OBJECT *y_obj = X509_NAME_ENTRY_get_object(y); -- const ASN1_STRING *y_data = X509_NAME_ENTRY_get_data(y); -- gint x_len = ASN1_STRING_length(x_data); -- gint y_len = ASN1_STRING_length(y_data); -- -- if (x_len < 0 || x_len != y_len) -- return FALSE; -- -- /* ASN1_STRING_cmp(x_data, y_data) == 0 doesn't work because it also -- * compares the type, which is sometimes different. -- */ -- return OBJ_cmp(x_obj, y_obj) == 0 && -- memcmp(ASN1_STRING_get0_data(x_data), -- ASN1_STRING_get0_data(y_data), -- (unsigned long)x_len) == 0; --} -- --static gboolean own_X509_NAME_equal(const X509_NAME *x, const X509_NAME *y) --{ -- gint x_count = X509_NAME_entry_count(x); -- gint y_count = X509_NAME_entry_count(y); -- -- if (x != y && (!x || !y)) -- return FALSE; -- -- if (x_count != y_count) -- return FALSE; -- -- for (gint i = 0; i < x_count; i++) { -- const X509_NAME_ENTRY *entry_i = X509_NAME_get_entry(x, i); -- gboolean entry_found = FALSE; -- -- for (gint j = 0; j < y_count; j++) { -- const X509_NAME_ENTRY *entry_j = -- X509_NAME_get_entry(y, j); -- -- if (own_X509_NAME_ENTRY_equal(entry_i, entry_j)) { -- entry_found = TRUE; -- break; -- } -- } -- -- if (!entry_found) -- return FALSE; -- } -- return TRUE; --} -- - /* Checks whether the subject of @cert is a IBM signing key subject. For this we - * must check that the subject is equal to: 'C = US, ST = New York, L = -- * Poughkeepsie, O = International Business Machines Corporation, CN = -+ * Poughkeepsie or Armonk, O = International Business Machines Corporation, CN = - * International Business Machines Corporation' and the organization unit (OUT) - * must end with the suffix ' Key Signing Service'. - */ -@@ -743,8 +690,10 @@ static gboolean has_ibm_signing_subject(X509 *cert) - PV_IBM_Z_SUBJECT_STATE)) - return FALSE; - -- if (!x509_name_data_by_nid_equal(subject, NID_localityName, -- PV_IBM_Z_SUBJECT_LOCALITY_NAME)) -+ if (!(x509_name_data_by_nid_equal(subject, NID_localityName, -+ PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE) || -+ x509_name_data_by_nid_equal(subject, NID_localityName, -+ PV_IBM_Z_SUBJECT_LOCALITY_NAME_ARMONK))) - return FALSE; - - if (!x509_name_data_by_nid_equal(subject, NID_organizationName, -@@ -806,6 +755,39 @@ static X509_NAME *x509_name_reorder_attributes(const X509_NAME *name, const gint - return g_steal_pointer(&ret); - } - -+/** Replace locality 'Armonk' with 'Pougkeepsie'. If Armonk was not set return -+ * `NULL`. -+ */ -+static X509_NAME *x509_armonk_locality_fixup(const X509_NAME *name) -+{ -+ g_autoptr(X509_NAME) ret = NULL; -+ int pos; -+ -+ /* Check if ``L=Armonk`` */ -+ if (!x509_name_data_by_nid_equal((X509_NAME *)name, NID_localityName, -+ PV_IBM_Z_SUBJECT_LOCALITY_NAME_ARMONK)) -+ return NULL; -+ -+ ret = X509_NAME_dup(name); -+ if (!ret) -+ g_abort(); -+ -+ pos = X509_NAME_get_index_by_NID(ret, NID_localityName, -1); -+ if (pos == -1) -+ return NULL; -+ -+ X509_NAME_ENTRY_free(X509_NAME_delete_entry(ret, pos)); -+ -+ /* Create a new name entry at the same position as before */ -+ if (X509_NAME_add_entry_by_NID( -+ ret, NID_localityName, MBSTRING_UTF8, -+ (const unsigned char *)&PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE, -+ sizeof(PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE) - 1, pos, 0) != 1) -+ return NULL; -+ -+ return g_steal_pointer(&ret); -+} -+ - /* In RFC 5280 the attributes of a (subject/issuer) name is not mandatory - * ordered. The problem is that our certificates are not consistent in the order - * (see https://tools.ietf.org/html/rfc5280#section-4.1.2.4 for details). -@@ -828,24 +810,10 @@ X509_NAME *c2b_name(const X509_NAME *name) - return X509_NAME_dup((X509_NAME *)name); - } - --/* Verify that: subject(issuer) == issuer(crl) and SKID(issuer) == AKID(crl) */ -+/* Verify that SKID(issuer) == AKID(crl) if available */ - static gint check_crl_issuer(X509_CRL *crl, X509 *issuer, GError **err) - { -- const X509_NAME *crl_issuer = X509_CRL_get_issuer(crl); -- const X509_NAME *issuer_subject = X509_get_subject_name(issuer); -- AUTHORITY_KEYID *akid = NULL; -- -- if (!own_X509_NAME_equal(issuer_subject, crl_issuer)) { -- g_autofree char *issuer_subject_str = X509_NAME_oneline(issuer_subject, -- NULL, 0); -- g_autofree char *crl_issuer_str = X509_NAME_oneline(crl_issuer, NULL, 0); -- -- g_set_error(err, PV_CRYPTO_ERROR, -- PV_CRYPTO_ERROR_CRL_SUBJECT_ISSUER_MISMATCH, -- _("issuer mismatch:\n%s\n%s"), -- issuer_subject_str, crl_issuer_str); -- return -1; -- } -+ g_autoptr(AUTHORITY_KEYID) akid = NULL; - - /* If AKID(@crl) is specified it must match with SKID(@issuer) */ - akid = X509_CRL_get_ext_d2i(crl, NID_authority_key_identifier, NULL, NULL); -@@ -881,7 +849,6 @@ gint check_crl_valid_for_cert(X509_CRL *crl, X509 *cert, - return -1; - } - -- /* check that the @crl issuer matches with the subject name of @cert*/ - if (check_crl_issuer(crl, cert, err) < 0) - return -1; - -@@ -910,6 +877,60 @@ gint check_crl_valid_for_cert(X509_CRL *crl, X509 *cert, - return 0; - } - -+/* This function contains work-arounds for some known subject(CRT)<->issuer(CRL) -+ * issues. -+ */ -+static STACK_OF_X509_CRL *quirk_X509_STORE_ctx_get1_crls(X509_STORE_CTX *ctx, -+ const X509_NAME *subject, GError **err) -+{ -+ g_autoptr(X509_NAME) fixed_subject = NULL; -+ g_autoptr(STACK_OF_X509_CRL) ret = NULL; -+ -+ ret = Pv_X509_STORE_CTX_get1_crls(ctx, subject); -+ if (ret && sk_X509_CRL_num(ret) > 0) -+ return g_steal_pointer(&ret); -+ -+ /* Workaround to fix the mismatch between issuer name of the * IBM -+ * signing CRLs and the IBM signing key subject name. Locality name has -+ * changed from Poughkeepsie to Armonk. -+ */ -+ fixed_subject = x509_armonk_locality_fixup(subject); -+ /* Was the locality replaced? */ -+ if (fixed_subject) { -+ X509_NAME *tmp; -+ -+ sk_X509_CRL_free(ret); -+ ret = Pv_X509_STORE_CTX_get1_crls(ctx, fixed_subject); -+ if (ret && sk_X509_CRL_num(ret) > 0) -+ return g_steal_pointer(&ret); -+ -+ /* Workaround to fix the ordering mismatch between issuer name -+ * of the IBM signing CRLs and the IBM signing key subject name. -+ */ -+ tmp = fixed_subject; -+ fixed_subject = c2b_name(fixed_subject); -+ X509_NAME_free(tmp); -+ sk_X509_CRL_free(ret); -+ ret = Pv_X509_STORE_CTX_get1_crls(ctx, fixed_subject); -+ if (ret && sk_X509_CRL_num(ret) > 0) -+ return g_steal_pointer(&ret); -+ X509_NAME_free(fixed_subject); -+ fixed_subject = NULL; -+ } -+ -+ /* Workaround to fix the ordering mismatch between issuer name of the -+ * IBM signing CRLs and the IBM signing key subject name. -+ */ -+ fixed_subject = c2b_name(subject); -+ sk_X509_CRL_free(ret); -+ ret = Pv_X509_STORE_CTX_get1_crls(ctx, fixed_subject); -+ if (ret && sk_X509_CRL_num(ret) > 0) -+ return g_steal_pointer(&ret); -+ -+ g_set_error(err, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_NO_CRL, _("no CRL found")); -+ return NULL; -+} -+ - /* Given a certificate @cert try to find valid revocation lists in @ctx. If no - * valid CRL was found NULL is returned. - */ -@@ -927,20 +948,9 @@ STACK_OF_X509_CRL *store_ctx_find_valid_crls(X509_STORE_CTX *ctx, X509 *cert, - return NULL; - } - -- ret = X509_STORE_CTX_get1_crls(ctx, subject); -- if (!ret) { -- /* Workaround to fix the mismatch between issuer name of the -- * IBM Z signing CRLs and the IBM Z signing key subject name. -- */ -- g_autoptr(X509_NAME) broken_subject = c2b_name(subject); -- -- ret = X509_STORE_CTX_get1_crls(ctx, broken_subject); -- if (!ret) { -- g_set_error(err, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_NO_CRL, -- _("no CRL found")); -- return NULL; -- } -- } -+ ret = quirk_X509_STORE_ctx_get1_crls(ctx, subject, err); -+ if (!ret) -+ return NULL; - - /* Filter out non-valid CRLs for @cert */ - for (gint i = 0; i < sk_X509_CRL_num(ret); i++) { -@@ -1328,32 +1338,14 @@ gint check_chain_parameters(const STACK_OF_X509 *chain, - - /* It's almost the same as X509_check_issed from OpenSSL does except that we - * don't check the key usage of the potential issuer. This means we check: -- * 1. issuer_name(cert) == subject_name(issuer) -- * 2. Check whether the akid(cert) (if available) matches the issuer skid -- * 3. Check that the cert algrithm matches the subject algorithm -- * 4. Verify the signature of certificate @cert is using the public key of -+ * 1. Check whether the akid(cert) (if available) matches the issuer skid -+ * 2. Check that the cert algrithm matches the subject algorithm -+ * 3. Verify the signature of certificate @cert is using the public key of - * @issuer. - */ - static gint check_host_key_issued(X509 *cert, X509 *issuer, GError **err) - { -- const X509_NAME *issuer_subject = X509_get_subject_name(issuer); -- const X509_NAME *cert_issuer = X509_get_issuer_name(cert); -- AUTHORITY_KEYID *akid = NULL; -- -- /* We cannot use X509_NAME_cmp() because it considers the order of the -- * X509_NAME_Entries. -- */ -- if (!own_X509_NAME_equal(issuer_subject, cert_issuer)) { -- g_autofree char *issuer_subject_str = -- X509_NAME_oneline(issuer_subject, NULL, 0); -- g_autofree char *cert_issuer_str = -- X509_NAME_oneline(cert_issuer, NULL, 0); -- g_set_error(err, PV_CRYPTO_ERROR, -- PV_CRYPTO_ERROR_CERT_SUBJECT_ISSUER_MISMATCH, -- _("Subject issuer mismatch:\n'%s'\n'%s'"), -- issuer_subject_str, cert_issuer_str); -- return -1; -- } -+ g_autoptr(AUTHORITY_KEYID) akid = NULL; - - akid = X509_get_ext_d2i(cert, NID_authority_key_identifier, NULL, NULL); - if (akid && X509_check_akid(issuer, akid) != X509_V_OK) { -diff --git a/genprotimg/src/utils/crypto.h b/genprotimg/src/utils/crypto.h -index fdf66de..e45e57d 100644 ---- a/genprotimg/src/utils/crypto.h -+++ b/genprotimg/src/utils/crypto.h -@@ -75,6 +75,7 @@ void x509_pair_free(x509_pair *pair); - /* Register auto cleanup functions */ - WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(ASN1_INTEGER, ASN1_INTEGER_free) - WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(ASN1_OCTET_STRING, ASN1_OCTET_STRING_free) -+WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(AUTHORITY_KEYID, AUTHORITY_KEYID_free) - WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(BIGNUM, BN_free) - WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(BIO, BIO_free_all) - WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(BN_CTX, BN_CTX_free) --- -2.25.1 - diff -Nru s390-tools-2.31.0/debian/patches/lp-2059303-libpv-Support-Armonk-in-IBM-signing-key-subject.patch s390-tools-2.33.1/debian/patches/lp-2059303-libpv-Support-Armonk-in-IBM-signing-key-subject.patch --- s390-tools-2.31.0/debian/patches/lp-2059303-libpv-Support-Armonk-in-IBM-signing-key-subject.patch 2024-04-02 12:45:30.000000000 +0200 +++ s390-tools-2.33.1/debian/patches/lp-2059303-libpv-Support-Armonk-in-IBM-signing-key-subject.patch 1970-01-01 01:00:00.000000000 +0100 @@ -1,251 +0,0 @@ -From d7c95265cdb6217b0203efa5893c3a27838af63c Mon Sep 17 00:00:00 2001 -From: Steffen Eiden -Date: Tue, 12 Mar 2024 10:14:43 +0100 -Subject: [PATCH] libpv: Support `Armonk` in IBM signing key subject - -New IBM signing keys will have Armonk as locality in the subject. -Ensure that CRLs with Poughkeepsie as issuer locality are still -discovered if they are signed with the signing keys private key. -Also, drop the check for issuer/subject comparison and only rely on -validity period and cryptographic signatures. - -Reviewed-by: Marc Hartmayer -Reviewed-by: Christoph Schlameuss -Signed-off-by: Steffen Eiden - -Origin: upstream, https://github.com/ibm-s390-linux/s390-toolsd7c95265cdb6217b0203efa5893c3a27838af63c -Bug-Ubuntu: https://bugs.launchpad.net/bugs/2059303 -Last-Update: 2024-04-02 - ---- - include/libpv/cert.h | 3 +- - libpv/cert.c | 148 +++++++++++++++++++++++++++++-------------- - 2 files changed, 102 insertions(+), 49 deletions(-) - -diff --git a/include/libpv/cert.h b/include/libpv/cert.h -index bceb3c6..aebe33b 100644 ---- a/include/libpv/cert.h -+++ b/include/libpv/cert.h -@@ -16,7 +16,8 @@ - - #define PV_IBM_Z_SUBJECT_COMMON_NAME "International Business Machines Corporation" - #define PV_IBM_Z_SUBJECT_COUNTRY_NAME "US" --#define PV_IBM_Z_SUBJECT_LOCALITY_NAME "Poughkeepsie" -+#define PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE "Poughkeepsie" -+#define PV_IBM_Z_SUBJECT_LOCALITY_NAME_ARMONK "Armonk" - #define PV_IBM_Z_SUBJECT_ORGANIZATIONAL_UNIT_NAME_SUFFIX "Key Signing Service" - #define PV_IBM_Z_SUBJECT_ORGANIZATION_NAME "International Business Machines Corporation" - #define PV_IBM_Z_SUBJECT_STATE "New York" -diff --git a/libpv/cert.c b/libpv/cert.c -index c8bb8cc..f4774fc 100644 ---- a/libpv/cert.c -+++ b/libpv/cert.c -@@ -857,7 +857,7 @@ static gboolean x509_name_data_by_nid_equal(X509_NAME *name, int nid, const char - - /* Checks whether the subject of @cert is a IBM signing key subject. For this we - * must check that the subject is equal to: 'C = US, ST = New York, L = -- * Poughkeepsie, O = International Business Machines Corporation, CN = -+ * Poughkeepsie or Armonk, O = International Business Machines Corporation, CN = - * International Business Machines Corporation' and the organization unit (OUT) - * must end with the suffix ' Key Signing Service'. - */ -@@ -879,7 +879,10 @@ static gboolean has_ibm_signing_subject(X509 *cert) - if (!x509_name_data_by_nid_equal(subject, NID_stateOrProvinceName, PV_IBM_Z_SUBJECT_STATE)) - return FALSE; - -- if (!x509_name_data_by_nid_equal(subject, NID_localityName, PV_IBM_Z_SUBJECT_LOCALITY_NAME)) -+ if (!(x509_name_data_by_nid_equal(subject, NID_localityName, -+ PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE) || -+ x509_name_data_by_nid_equal(subject, NID_localityName, -+ PV_IBM_Z_SUBJECT_LOCALITY_NAME_ARMONK))) - return FALSE; - - if (!x509_name_data_by_nid_equal(subject, NID_organizationName, -@@ -1085,10 +1088,9 @@ static int check_signature_algo_match(const EVP_PKEY *pkey, const X509 *subject, - - /* It's almost the same as X509_check_issed from OpenSSL does except that we - * don't check the key usage of the potential issuer. This means we check: -- * 1. issuer_name(cert) == subject_name(issuer) -- * 2. Check whether the akid(cert) (if available) matches the issuer skid -- * 3. Check that the cert algrithm matches the subject algorithm -- * 4. Verify the signature of certificate @cert is using the public key of -+ * 1. Check whether the akid(cert) (if available) matches the issuer skid -+ * 2. Check that the cert algrithm matches the subject algorithm -+ * 3. Verify the signature of certificate @cert is using the public key of - * @issuer. - */ - static int check_host_key_issued(X509 *cert, X509 *issuer, GError **error) -@@ -1097,19 +1099,6 @@ static int check_host_key_issued(X509 *cert, X509 *issuer, GError **error) - const X509_NAME *cert_issuer = X509_get_issuer_name(cert); - g_autoptr(AUTHORITY_KEYID) akid = NULL; - -- /* We cannot use X509_NAME_cmp() because it considers the order of the -- * X509_NAME_Entries. -- */ -- if (!own_X509_NAME_equal(issuer_subject, cert_issuer)) { -- g_autofree char *issuer_subject_str = pv_X509_NAME_oneline(issuer_subject); -- g_autofree char *cert_issuer_str = pv_X509_NAME_oneline(cert_issuer); -- -- g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_CERT_SUBJECT_ISSUER_MISMATCH, -- _("Subject issuer mismatch:\n'%s'\n'%s'"), issuer_subject_str, -- cert_issuer_str); -- return -1; -- } -- - akid = X509_get_ext_d2i(cert, NID_authority_key_identifier, NULL, NULL); - if (akid && X509_check_akid(issuer, akid) != X509_V_OK) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_SKID_AKID_MISMATCH, -@@ -1286,21 +1275,10 @@ int pv_verify_cert(X509_STORE_CTX *ctx, X509 *cert, GError **error) - return 0; - } - --/* Verify that: subject(issuer) == issuer(crl) and SKID(issuer) == AKID(crl) */ -+/* Verify that SKID(issuer) == AKID(crl) */ - static int check_crl_issuer(X509_CRL *crl, X509 *issuer, GError **error) - { -- const X509_NAME *crl_issuer = X509_CRL_get_issuer(crl); -- const X509_NAME *issuer_subject = X509_get_subject_name(issuer); -- AUTHORITY_KEYID *akid = NULL; -- -- if (!own_X509_NAME_equal(issuer_subject, crl_issuer)) { -- g_autofree char *issuer_subject_str = pv_X509_NAME_oneline(issuer_subject); -- g_autofree char *crl_issuer_str = pv_X509_NAME_oneline(crl_issuer); -- -- g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_CRL_SUBJECT_ISSUER_MISMATCH, -- _("issuer mismatch:\n%s\n%s"), issuer_subject_str, crl_issuer_str); -- return -1; -- } -+ g_autoptr(AUTHORITY_KEYID) akid = NULL; - - /* If AKID(@crl) is specified it must match with SKID(@issuer) */ - akid = X509_CRL_get_ext_d2i(crl, NID_authority_key_identifier, NULL, NULL); -@@ -1325,7 +1303,6 @@ int pv_verify_crl(X509_CRL *crl, X509 *cert, int verify_flags, GError **error) - return -1; - } - -- /* check that the @crl issuer matches with the subject name of @cert*/ - if (check_crl_issuer(crl, cert, error) < 0) - return -1; - -@@ -1393,6 +1370,93 @@ int pv_check_chain_parameters(const STACK_OF_X509 *chain, GError **error) - return 0; - } - -+/** Replace locality 'Armonk' with 'Pougkeepsie'. If Armonk was not set return -+ * `NULL`. -+ */ -+static X509_NAME *x509_armonk_locality_fixup(const X509_NAME *name) -+{ -+ g_autoptr(X509_NAME) ret = NULL; -+ int pos; -+ -+ /* Check if ``L=Armonk`` */ -+ if (!x509_name_data_by_nid_equal((X509_NAME *)name, NID_localityName, -+ PV_IBM_Z_SUBJECT_LOCALITY_NAME_ARMONK)) -+ return NULL; -+ -+ ret = X509_NAME_dup(name); -+ if (!ret) -+ g_abort(); -+ -+ pos = X509_NAME_get_index_by_NID(ret, NID_localityName, -1); -+ if (pos == -1) -+ return NULL; -+ -+ X509_NAME_ENTRY_free(X509_NAME_delete_entry(ret, pos)); -+ -+ /* Create a new name entry at the same position as before */ -+ if (X509_NAME_add_entry_by_NID( -+ ret, NID_localityName, MBSTRING_UTF8, -+ (const unsigned char *)&PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE, -+ sizeof(PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE) - 1, pos, 0) != 1) -+ return NULL; -+ -+ return g_steal_pointer(&ret); -+} -+ -+/* This function contains work-arounds for some known subject(CRT)<->issuer(CRL) -+ * issues. -+ */ -+static STACK_OF_X509_CRL *quirk_X509_STORE_ctx_get1_crls(X509_STORE_CTX *ctx, -+ const X509_NAME *subject, GError **err) -+{ -+ g_autoptr(X509_NAME) fixed_subject = NULL; -+ g_autoptr(STACK_OF_X509_CRL) ret = NULL; -+ -+ ret = pv_X509_STORE_CTX_get1_crls(ctx, subject); -+ if (ret && sk_X509_CRL_num(ret) > 0) -+ return g_steal_pointer(&ret); -+ -+ /* Workaround to fix the mismatch between issuer name of the * IBM -+ * signing CRLs and the IBM signing key subject name. Locality name has -+ * changed from Poughkeepsie to Armonk. -+ */ -+ fixed_subject = x509_armonk_locality_fixup(subject); -+ /* Was the locality replaced? */ -+ if (fixed_subject) { -+ X509_NAME *tmp; -+ -+ sk_X509_CRL_free(ret); -+ ret = pv_X509_STORE_CTX_get1_crls(ctx, fixed_subject); -+ if (ret && sk_X509_CRL_num(ret) > 0) -+ return g_steal_pointer(&ret); -+ -+ /* Workaround to fix the ordering mismatch between issuer name -+ * of the IBM signing CRLs and the IBM signing key subject name. -+ */ -+ tmp = fixed_subject; -+ fixed_subject = pv_c2b_name(fixed_subject); -+ X509_NAME_free(tmp); -+ sk_X509_CRL_free(ret); -+ ret = pv_X509_STORE_CTX_get1_crls(ctx, fixed_subject); -+ if (ret && sk_X509_CRL_num(ret) > 0) -+ return g_steal_pointer(&ret); -+ X509_NAME_free(fixed_subject); -+ fixed_subject = NULL; -+ } -+ -+ /* Workaround to fix the ordering mismatch between issuer name of the -+ * IBM signing CRLs and the IBM signing key subject name. -+ */ -+ fixed_subject = pv_c2b_name(subject); -+ sk_X509_CRL_free(ret); -+ ret = pv_X509_STORE_CTX_get1_crls(ctx, fixed_subject); -+ if (ret && sk_X509_CRL_num(ret) > 0) -+ return g_steal_pointer(&ret); -+ -+ g_set_error(err, PV_CERT_ERROR, PV_CERT_ERROR_NO_CRL, _("no CRL found")); -+ return NULL; -+} -+ - /* Given a certificate @cert try to find valid revocation lists in @ctx. If no - * valid CRL was found NULL is returned. - */ -@@ -1412,21 +1476,9 @@ STACK_OF_X509_CRL *pv_store_ctx_find_valid_crls(X509_STORE_CTX *ctx, X509 *cert, - return NULL; - } - -- ret = pv_X509_STORE_CTX_get1_crls(ctx, subject); -- if (!ret) { -- /* Workaround to fix the mismatch between issuer name of the -- * IBM Z signing CRLs and the IBM Z signing key subject name. -- */ -- g_autoptr(X509_NAME) broken_subject = pv_c2b_name(subject); -- -- ret = pv_X509_STORE_CTX_get1_crls(ctx, broken_subject); -- if (!ret) { -- g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_NO_CRL, _("no CRL found")); -- g_info("ERROR: %s", (*error)->message); -- return NULL; -- } -- } -- -+ ret = quirk_X509_STORE_ctx_get1_crls(ctx, subject, error); -+ if (!ret) -+ return NULL; - /* Filter out non-valid CRLs for @cert */ - for (int i = 0; i < sk_X509_CRL_num(ret); i++) { - X509_CRL *crl = sk_X509_CRL_value(ret, i); --- -2.25.1 - diff -Nru s390-tools-2.31.0/debian/patches/lp-2059303-pvattest-Fix-root-ca-parsing.patch s390-tools-2.33.1/debian/patches/lp-2059303-pvattest-Fix-root-ca-parsing.patch --- s390-tools-2.31.0/debian/patches/lp-2059303-pvattest-Fix-root-ca-parsing.patch 2024-04-02 12:45:30.000000000 +0200 +++ s390-tools-2.33.1/debian/patches/lp-2059303-pvattest-Fix-root-ca-parsing.patch 1970-01-01 01:00:00.000000000 +0100 @@ -1,49 +0,0 @@ -From 2b5e7b049123aff094c7de79ba57a5df09471b2e Mon Sep 17 00:00:00 2001 -From: Steffen Eiden -Date: Wed, 20 Mar 2024 15:36:52 +0100 -Subject: [PATCH] pvattest: Fix root-ca parsing - -The parser setup falsely set the argument type as filename array, but -code expected a single filename. Fixed by setting up the parser -correctly to expect a single file name. - -Fixes: 3ab06d77fb1b ("pvattest: Create, perform, and verify attestation measurements") -Reviewed-by: Marc Hartmayer -Signed-off-by: Steffen Eiden - -Origin: upstream, https://github.com/ibm-s390-linux/s390-tools2b5e7b049123aff094c7de79ba57a5df09471b2e -Bug-Ubuntu: https://bugs.launchpad.net/bugs/2059303 -Last-Update: 2024-04-02 - ---- - pvattest/src/argparse.c | 14 +++++++------- - 1 file changed, 7 insertions(+), 7 deletions(-) - -diff --git a/pvattest/src/argparse.c b/pvattest/src/argparse.c -index fe5662f..5924ddc 100644 ---- a/pvattest/src/argparse.c -+++ b/pvattest/src/argparse.c -@@ -192,13 +192,13 @@ static gboolean hex_str_toull(const char *nptr, uint64_t *dst, GError **error) - } - - /* NOTE REQUIRED */ --#define _entry_root_ca(__arg_data, __indent) \ -- { \ -- .long_name = "root-ca", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ -- .arg = G_OPTION_ARG_FILENAME_ARRAY, .arg_data = __arg_data, \ -- .description = "Use FILE as the trusted root CA instead the\n" __indent \ -- "root CAs that are installed on the system (optional).\n", \ -- .arg_description = "FILE", \ -+#define _entry_root_ca(__arg_data, __indent) \ -+ { \ -+ .long_name = "root-ca", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ -+ .arg = G_OPTION_ARG_FILENAME, .arg_data = __arg_data, \ -+ .description = "Use FILE as the trusted root CA instead the\n" __indent \ -+ "root CAs that are installed on the system (optional).\n", \ -+ .arg_description = "FILE", \ - } - - /* NOTE REQUIRED */ --- -2.25.1 - diff -Nru s390-tools-2.31.0/debian/patches/lp-2059303-rust-pv-Support-Armonk-in-IBM-signing-key-subject.patch s390-tools-2.33.1/debian/patches/lp-2059303-rust-pv-Support-Armonk-in-IBM-signing-key-subject.patch --- s390-tools-2.31.0/debian/patches/lp-2059303-rust-pv-Support-Armonk-in-IBM-signing-key-subject.patch 2024-04-02 12:45:30.000000000 +0200 +++ s390-tools-2.33.1/debian/patches/lp-2059303-rust-pv-Support-Armonk-in-IBM-signing-key-subject.patch 1970-01-01 01:00:00.000000000 +0100 @@ -1,348 +0,0 @@ -From 1a3d0b74f7819f5e087e6ecbf3ec879a05a88bbc Mon Sep 17 00:00:00 2001 -From: Steffen Eiden -Date: Tue, 12 Mar 2024 10:17:31 +0100 -Subject: [PATCH] rust/pv: Support `Armonk` in IBM signing key subject - -New IBM signing keys will have Armonk as locality in the subject. -Ensure that CRLs with Poughkeepsie as issuer locality are still -discovered if they are signed with the signing keys private key. -Also, drop the check for issuer/subject comparison and only rely on -validity period and cryptographic signatures. - -Reviewed-by: Christoph Schlameuss -Reviewed-by: Marc Hartmayer -Signed-off-by: Steffen Eiden - -Origin: upstream, https://github.com/ibm-s390-linux/s390-tools1a3d0b74f7819f5e087e6ecbf3ec879a05a88bbc -Bug-Ubuntu: https://bugs.launchpad.net/bugs/2059303 -Last-Update: 2024-04-02 - ---- - rust/pv/src/verify.rs | 58 ++++++++++++++++++++++------ - rust/pv/src/verify/helper.rs | 75 +++++++++++++----------------------- - rust/pv/src/verify/test.rs | 44 ++++++++++++++++----- - 3 files changed, 107 insertions(+), 70 deletions(-) - -diff --git a/rust/pv/src/verify.rs b/rust/pv/src/verify.rs -index 619494b..ffa6e69 100644 ---- a/rust/pv/src/verify.rs -+++ b/rust/pv/src/verify.rs -@@ -3,10 +3,11 @@ - // Copyright IBM Corp. 2023 - - use core::slice; --use log::debug; -+use log::{debug, trace}; -+use openssl::error::ErrorStack; - use openssl::stack::Stack; - use openssl::x509::store::X509Store; --use openssl::x509::{CrlStatus, X509Ref, X509StoreContext, X509}; -+use openssl::x509::{CrlStatus, X509NameRef, X509Ref, X509StoreContext, X509StoreContextRef, X509}; - use openssl_extensions::crl::{StackableX509Crl, X509StoreContextExtension, X509StoreExtension}; - - #[cfg(not(test))] -@@ -86,8 +87,8 @@ impl HkdVerifier for CertVerifier { - if verified_crls.is_empty() { - bail_hkd_verify!(NoCrl); - } -- for crl in &verified_crls { -- match crl.get_by_cert(&hkd.to_owned()) { -+ for crl in verified_crls { -+ match crl.get_by_serial(hkd.serial_number()) { - CrlStatus::NotRevoked => (), - _ => bail_hkd_verify!(HdkRevoked), - } -@@ -98,21 +99,54 @@ impl HkdVerifier for CertVerifier { - } - - impl CertVerifier { -+ fn quirk_crls( -+ ctx: &mut X509StoreContextRef, -+ subject: &X509NameRef, -+ ) -> Result, ErrorStack> { -+ match ctx.crls(subject) { -+ Ok(ret) if !ret.is_empty() => return Ok(ret), -+ _ => (), -+ } -+ -+ // Armonk/Poughkeepsie fixup -+ trace!("quirk_crls: Try Locality"); -+ if let Some(locality_subject) = helper::armonk_locality_fixup(subject) { -+ match ctx.crls(&locality_subject) { -+ Ok(ret) if !ret.is_empty() => return Ok(ret), -+ _ => (), -+ } -+ -+ // reorder -+ trace!("quirk_crls: Try Locality+Reorder"); -+ if let Ok(locality_ordered_subject) = helper::reorder_x509_names(&locality_subject) { -+ match ctx.crls(&locality_ordered_subject) { -+ Ok(ret) if !ret.is_empty() => return Ok(ret), -+ _ => (), -+ } -+ } -+ } -+ -+ // reorder unchanged loaciliy subject -+ trace!("quirk_crls: Try Reorder"); -+ if let Ok(ordered_subject) = helper::reorder_x509_names(subject) { -+ match ctx.crls(&ordered_subject) { -+ Ok(ret) if !ret.is_empty() => return Ok(ret), -+ _ => (), -+ } -+ } -+ // nothing found, return empty stack -+ Stack::new() -+ } -+ - ///Download the CLRs that a HKD refers to. - pub fn hkd_crls(&self, hkd: &X509Ref) -> Result> { - let mut ctx = X509StoreContext::new()?; - // Unfortunately we cannot use a dedicated function here and have to use a closure (E0434) - // Otherwise, we cannot refer to self -+ // Search for local CRLs - let mut crls = ctx.init_opt(&self.store, None, None, |ctx| { - let subject = self.ibm_z_sign_key.subject_name(); -- match ctx.crls(subject) { -- Ok(crls) => Ok(crls), -- _ => { -- // reorder the name and try again -- let broken_subj = helper::reorder_x509_names(subject)?; -- ctx.crls(&broken_subj).or_else(helper::stack_err_hlp) -- } -- } -+ Self::quirk_crls(ctx, subject) - })?; - - if !self.offline { -diff --git a/rust/pv/src/verify/helper.rs b/rust/pv/src/verify/helper.rs -index 6ac540b..4dd0844 100644 ---- a/rust/pv/src/verify/helper.rs -+++ b/rust/pv/src/verify/helper.rs -@@ -11,7 +11,7 @@ use openssl::{ - error::ErrorStack, - nid::Nid, - ssl::SslFiletype, -- stack::{Stack, Stackable}, -+ stack::Stack, - x509::{ - store::{File, X509Lookup, X509StoreBuilder, X509StoreRef}, - verify::{X509VerifyFlags, X509VerifyParam}, -@@ -20,6 +20,7 @@ use openssl::{ - }, - }; - use openssl_extensions::akid::{AkidCheckResult, AkidExtension}; -+use std::str::from_utf8; - use std::{cmp::Ordering, ffi::c_int, usize}; - - /// Minimum security level for the keys/certificates used to establish a chain of -@@ -34,7 +35,6 @@ const SECURITY_CHAIN_MAX_LEN: c_int = 2; - /// verifies that the HKD - /// * has enough security bits - /// * is inside its validity period --/// * issuer name is the subject name of the [`sign_key`] - /// * the Authority Key ID matches the Signing Key ID of the [`sign_key`] - pub fn verify_hkd_options(hkd: &X509Ref, sign_key: &X509Ref) -> Result<()> { - let hk_pkey = hkd.public_key()?; -@@ -48,9 +48,6 @@ pub fn verify_hkd_options(hkd: &X509Ref, sign_key: &X509Ref) -> Result<()> { - // verify that the hkd is still valid - check_validity_period(hkd.not_before(), hkd.not_after())?; - -- // check if hkd.issuer_name == issuer.subject -- check_x509_name_equal(sign_key.subject_name(), hkd.issuer_name())?; -- - // verify that the AKID of the hkd matches the SKID of the issuer - if let Some(akid) = hkd.akid() { - if akid.check(sign_key) != AkidCheckResult::OK { -@@ -70,9 +67,6 @@ pub fn verify_crl(crl: &X509CrlRef, issuer: &X509Ref) -> Option<()> { - return None; - } - } -- -- check_x509_name_equal(crl.issuer_name(), issuer.subject_name()).ok()?; -- - match crl.verify(issuer.public_key().ok()?.as_ref()).ok()? { - true => Some(()), - false => None, -@@ -191,7 +185,8 @@ pub fn extract_ibm_sign_key(certs: Vec) -> Result<(X509, Stack)> { - //Asn1StringRef::as_slice aka ASN1_STRING_get0_data gives a string without \0 delimiter - const IBM_Z_COMMON_NAME: &[u8; 43usize] = b"International Business Machines Corporation"; - const IBM_Z_COUNTRY_NAME: &[u8; 2usize] = b"US"; --const IBM_Z_LOCALITY_NAME: &[u8; 12usize] = b"Poughkeepsie"; -+const IBM_Z_LOCALITY_NAME_POUGHKEEPSIE: &[u8; 12usize] = b"Poughkeepsie"; -+const IBM_Z_LOCALITY_NAME_ARMONK: &[u8; 6usize] = b"Armonk"; - const IBM_Z_ORGANIZATIONAL_UNIT_NAME_SUFFIX: &str = "Key Signing Service"; - const IBM_Z_ORGANIZATION_NAME: &[u8; 43usize] = b"International Business Machines Corporation"; - const IBM_Z_STATE: &[u8; 8usize] = b"New York"; -@@ -210,7 +205,8 @@ fn is_ibm_signing_cert(cert: &X509) -> bool { - if subj.entries().count() != IMB_Z_ENTRY_COUNT - || !name_data_eq(subj, Nid::COUNTRYNAME, IBM_Z_COUNTRY_NAME) - || !name_data_eq(subj, Nid::STATEORPROVINCENAME, IBM_Z_STATE) -- || !name_data_eq(subj, Nid::LOCALITYNAME, IBM_Z_LOCALITY_NAME) -+ || !(name_data_eq(subj, Nid::LOCALITYNAME, IBM_Z_LOCALITY_NAME_POUGHKEEPSIE) -+ || name_data_eq(subj, Nid::LOCALITYNAME, IBM_Z_LOCALITY_NAME_ARMONK)) - || !name_data_eq(subj, Nid::ORGANIZATIONNAME, IBM_Z_ORGANIZATION_NAME) - || !name_data_eq(subj, Nid::COMMONNAME, IBM_Z_COMMON_NAME) - { -@@ -354,24 +350,6 @@ fn check_validity_period(not_before: &Asn1TimeRef, not_after: &Asn1TimeRef) -> R - } - } - --fn check_x509_name_equal(lhs: &X509NameRef, rhs: &X509NameRef) -> Result<()> { -- if lhs.entries().count() != rhs.entries().count() { -- bail_hkd_verify!(IssuerMismatch); -- } -- -- for l in lhs.entries() { -- // search for the matching value in the rhs names -- // found none? -> names are not equal -- if !rhs -- .entries() -- .any(|r| l.data().as_slice() == r.data().as_slice()) -- { -- bail_hkd_verify!(IssuerMismatch); -- } -- } -- Ok(()) --} -- - const NIDS_CORRECT_ORDER: [Nid; 6] = [ - Nid::COUNTRYNAME, - Nid::ORGANIZATIONNAME, -@@ -394,13 +372,28 @@ pub fn reorder_x509_names(subject: &X509NameRef) -> std::result::Result( -- e: ErrorStack, --) -> std::result::Result, openssl::error::ErrorStack> { -- match e.errors().len() { -- 0 => Stack::::new(), -- _ => Err(e), -+/** -+* Workaround for potential locality mismatches between CRLs and Certs -+* # Return -+* fixed subject or none if locality was not Armonk or any OpenSSL error -+*/ -+pub fn armonk_locality_fixup(subject: &X509NameRef) -> Option { -+ if !name_data_eq(subject, Nid::LOCALITYNAME, IBM_Z_LOCALITY_NAME_ARMONK) { -+ return None; - } -+ -+ let mut ret = X509Name::builder().ok()?; -+ for entry in subject.entries() { -+ match entry.object().nid() { -+ nid @ Nid::LOCALITYNAME => ret -+ .append_entry_by_nid(nid, from_utf8(IBM_Z_LOCALITY_NAME_POUGHKEEPSIE).ok()?) -+ .ok()?, -+ _ => { -+ ret.append_entry(entry).ok()?; -+ } -+ } -+ } -+ Some(ret.build()) - } - - #[cfg(test)] -@@ -436,20 +429,6 @@ mod test { - )); - } - -- #[test] -- fn x509_name_equal() { -- let sign_crt = load_gen_cert("ibm.crt"); -- let hkd = load_gen_cert("host.crt"); -- let other = load_gen_cert("inter_ca.crt"); -- -- assert!(super::check_x509_name_equal(sign_crt.subject_name(), hkd.issuer_name()).is_ok(),); -- -- assert!(matches!( -- super::check_x509_name_equal(other.subject_name(), hkd.subject_name()), -- Err(Error::HkdVerify(IssuerMismatch)) -- )); -- } -- - #[test] - fn is_ibm_z_sign_key() { - let ibm_crt = load_gen_cert("ibm.crt"); -diff --git a/rust/pv/src/verify/test.rs b/rust/pv/src/verify/test.rs -index 15806b2..2fd6bc3 100644 ---- a/rust/pv/src/verify/test.rs -+++ b/rust/pv/src/verify/test.rs -@@ -74,16 +74,15 @@ fn dist_points() { - assert_eq!(res, exp); - } - --fn verify(offline: bool, ibm_crt: &'static str, ibm_crl: &'static str) { -+fn verify(offline: bool, ibm_crt: &'static str, ibm_crl: &'static str, hkd: &'static str) { - let root_crt = get_cert_asset_path_string("root_ca.chained.crt"); - let inter_crt = get_cert_asset_path_string("inter_ca.crt"); - let inter_crl = get_cert_asset_path_string("inter_ca.crl"); - let ibm_crt = get_cert_asset_path_string(ibm_crt); - let ibm_crl = get_cert_asset_path_string(ibm_crl); - let hkd_revoked = load_gen_cert("host_rev.crt"); -- let hkd_inv = load_gen_cert("host_invalid_signing_key.crt"); - let hkd_exp = load_gen_cert("host_crt_expired.crt"); -- let hkd = load_gen_cert("host.crt"); -+ let hkd = load_gen_cert(hkd); - - let crls = &[ibm_crl, inter_crl]; - let verifier = CertVerifier::new( -@@ -102,11 +101,6 @@ fn verify(offline: bool, ibm_crt: &'static str, ibm_crl: &'static str) { - Err(Error::HkdVerify(HdkRevoked)) - )); - -- assert!(matches!( -- verifier.verify(&hkd_inv), -- Err(Error::HkdVerify(IssuerMismatch)) -- )); -- - assert!(matches!( - verifier.verify(&hkd_exp), - Err(Error::HkdVerify(AfterValidity)) -@@ -115,10 +109,40 @@ fn verify(offline: bool, ibm_crt: &'static str, ibm_crl: &'static str) { - - #[test] - fn verify_online() { -- verify(false, "ibm.crt", "ibm.crl") -+ verify(false, "ibm.crt", "ibm.crl", "host.crt") - } - - #[test] - fn verify_offline() { -- verify(true, "ibm.crt", "ibm.crl") -+ verify(true, "ibm.crt", "ibm.crl", "host.crt") -+} -+ -+#[test] -+fn verify_armonk_crt_online() { -+ verify(false, "ibm_armonk.crt", "ibm.crl", "host.crt") -+} -+ -+#[test] -+fn verify_armonk_crt_offline() { -+ verify(true, "ibm_armonk.crt", "ibm.crl", "host.crt") -+} -+ -+#[test] -+fn verify_armonk_crl_online() { -+ verify(false, "ibm_armonk.crt", "ibm_armonk.crl", "host.crt") -+} -+ -+#[test] -+fn verify_armonk_crl_offline() { -+ verify(true, "ibm_armonk.crt", "ibm_armonk.crl", "host.crt") -+} -+ -+#[test] -+fn verify_armonk_hkd_online() { -+ verify(false, "ibm_armonk.crt", "ibm_armonk.crl", "host_armonk.crt") -+} -+ -+#[test] -+fn verify_armonk_hkd_offline() { -+ verify(true, "ibm_armonk.crt", "ibm_armonk.crl", "host_armonk.crt") - } --- -2.25.1 - diff -Nru s390-tools-2.31.0/debian/patches/lp-2059303-rust-pv-test-Code-Certificate-refactoring.patch s390-tools-2.33.1/debian/patches/lp-2059303-rust-pv-test-Code-Certificate-refactoring.patch --- s390-tools-2.31.0/debian/patches/lp-2059303-rust-pv-test-Code-Certificate-refactoring.patch 2024-04-02 12:45:30.000000000 +0200 +++ s390-tools-2.33.1/debian/patches/lp-2059303-rust-pv-test-Code-Certificate-refactoring.patch 1970-01-01 01:00:00.000000000 +0100 @@ -1,3020 +0,0 @@ -From f6c6f0cc712433221fb0588c754e0d09884453dd Mon Sep 17 00:00:00 2001 -From: Marc Hartmayer -Date: Thu, 21 Mar 2024 09:56:54 +0000 -Subject: [PATCH] rust/pv/test: Code + Certificate refactoring - -* Get rid of Mockito -* create certs with AKID -* simplify things in the `create_certs.py` script - -Signed-off-by: Marc Hartmayer -Signed-off-by: Steffen Eiden - -Origin: upstream, https://github.com/ibm-s390-linux/s390-toolsf6c6f0cc712433221fb0588c754e0d09884453dd -Bug-Ubuntu: https://bugs.launchpad.net/bugs/2059303 -Last-Update: 2024-04-02 - ---- - rust/Cargo.lock | 2 - - rust/pv/Cargo.toml | 2 - - rust/pv/src/verify.rs | 16 +- - rust/pv/src/verify/helper.rs | 51 +---- - rust/pv/src/verify/test.rs | 134 ++++------- - rust/pv/tests/assets/cert/fake_host.crt | 29 +++ - rust/pv/tests/assets/cert/fake_host.key | 8 + - rust/pv/tests/assets/cert/fake_ibm.crl | 20 ++ - rust/pv/tests/assets/cert/fake_ibm.crt | 38 ++++ - rust/pv/tests/assets/cert/fake_ibm.key | 52 +++++ - rust/pv/tests/assets/cert/fake_inter_ca.crl | 29 +-- - rust/pv/tests/assets/cert/fake_inter_ca.crt | 57 +++-- - rust/pv/tests/assets/cert/fake_inter_ca.key | 52 +++++ - rust/pv/tests/assets/cert/fake_root_ca.crl | 20 ++ - rust/pv/tests/assets/cert/fake_root_ca.crt | 37 ++++ - rust/pv/tests/assets/cert/fake_root_ca.key | 52 +++++ - .../tests/assets/cert/fake_root_ca_valid.crl | 20 ++ - rust/pv/tests/assets/cert/gen/create_certs.py | 208 +++++++++++------- - rust/pv/tests/assets/cert/host.crt | 41 ++-- - rust/pv/tests/assets/cert/host.key | 8 + - rust/pv/tests/assets/cert/host2.crt | 29 +++ - rust/pv/tests/assets/cert/host2.key | 8 + - rust/pv/tests/assets/cert/host_armonk.crt | 29 +++ - .../pv/tests/assets/cert/host_crt_expired.crt | 39 ++-- - .../assets/cert/host_invalid_signing_key.crt | 39 ++-- - .../assets/cert/host_invalid_signing_key.key | 8 + - rust/pv/tests/assets/cert/host_rev.crt | 41 ++-- - rust/pv/tests/assets/cert/host_rev.key | 8 + - rust/pv/tests/assets/cert/host_uri_na.crt | 29 +++ - rust/pv/tests/assets/cert/ibm.chained.crt | 59 +++++ - rust/pv/tests/assets/cert/ibm.crl | 31 +-- - rust/pv/tests/assets/cert/ibm.crt | 57 +++-- - rust/pv/tests/assets/cert/ibm.key | 52 +++++ - rust/pv/tests/assets/cert/ibm_armonk.crl | 21 ++ - rust/pv/tests/assets/cert/ibm_armonk.crt | 38 ++++ - rust/pv/tests/assets/cert/ibm_expired.crt | 38 ++++ - .../pv/tests/assets/cert/ibm_invalid_hash.crl | 20 ++ - .../tests/assets/cert/ibm_outdated_early.crl | 29 +-- - .../tests/assets/cert/ibm_outdated_late.crl | 35 +-- - rust/pv/tests/assets/cert/ibm_rev.crt | 57 +++-- - .../pv/tests/assets/cert/ibm_wrong_issuer.crl | 19 ++ - .../tests/assets/cert/ibm_wrong_subject.crl | 20 ++ - .../tests/assets/cert/ibm_wrong_subject.crt | 57 +++-- - .../tests/assets/cert/ibm_wrong_subject.key | 52 +++++ - .../pv/tests/assets/cert/inter_ca.chained.crt | 58 +++++ - rust/pv/tests/assets/cert/inter_ca.crl | 31 +-- - rust/pv/tests/assets/cert/inter_ca.crt | 57 +++-- - .../assets/cert/inter_ca.invalid_date.crl | 20 ++ - .../assets/cert/inter_ca.invalid_signer.crl | 20 ++ - rust/pv/tests/assets/cert/inter_ca.key | 52 +++++ - rust/pv/tests/assets/cert/root_ca.chained.crt | 85 +++---- - rust/pv/tests/assets/cert/root_ca.crl | 20 ++ - rust/pv/tests/assets/cert/root_ca.crt | 56 ++--- - rust/pv/tests/assets/cert/root_ca.key | 52 +++++ - 56 files changed, 1548 insertions(+), 594 deletions(-) - create mode 100644 rust/pv/tests/assets/cert/fake_host.crt - create mode 100644 rust/pv/tests/assets/cert/fake_host.key - create mode 100644 rust/pv/tests/assets/cert/fake_ibm.crl - create mode 100644 rust/pv/tests/assets/cert/fake_ibm.crt - create mode 100644 rust/pv/tests/assets/cert/fake_ibm.key - create mode 100644 rust/pv/tests/assets/cert/fake_inter_ca.key - create mode 100644 rust/pv/tests/assets/cert/fake_root_ca.crl - create mode 100644 rust/pv/tests/assets/cert/fake_root_ca.crt - create mode 100644 rust/pv/tests/assets/cert/fake_root_ca.key - create mode 100644 rust/pv/tests/assets/cert/fake_root_ca_valid.crl - create mode 100644 rust/pv/tests/assets/cert/host.key - create mode 100644 rust/pv/tests/assets/cert/host2.crt - create mode 100644 rust/pv/tests/assets/cert/host2.key - create mode 100644 rust/pv/tests/assets/cert/host_armonk.crt - create mode 100644 rust/pv/tests/assets/cert/host_invalid_signing_key.key - create mode 100644 rust/pv/tests/assets/cert/host_rev.key - create mode 100644 rust/pv/tests/assets/cert/host_uri_na.crt - create mode 100644 rust/pv/tests/assets/cert/ibm.chained.crt - create mode 100644 rust/pv/tests/assets/cert/ibm.key - create mode 100644 rust/pv/tests/assets/cert/ibm_armonk.crl - create mode 100644 rust/pv/tests/assets/cert/ibm_armonk.crt - create mode 100644 rust/pv/tests/assets/cert/ibm_expired.crt - create mode 100644 rust/pv/tests/assets/cert/ibm_invalid_hash.crl - create mode 100644 rust/pv/tests/assets/cert/ibm_wrong_issuer.crl - create mode 100644 rust/pv/tests/assets/cert/ibm_wrong_subject.crl - create mode 100644 rust/pv/tests/assets/cert/ibm_wrong_subject.key - create mode 100644 rust/pv/tests/assets/cert/inter_ca.chained.crt - create mode 100644 rust/pv/tests/assets/cert/inter_ca.invalid_date.crl - create mode 100644 rust/pv/tests/assets/cert/inter_ca.invalid_signer.crl - create mode 100644 rust/pv/tests/assets/cert/inter_ca.key - create mode 100644 rust/pv/tests/assets/cert/root_ca.crl - create mode 100644 rust/pv/tests/assets/cert/root_ca.key - -diff --git a/rust/Cargo.lock b/rust/Cargo.lock -index 339cb9f..17d11ab 100644 ---- a/rust/Cargo.lock -+++ b/rust/Cargo.lock -@@ -780,8 +780,6 @@ dependencies = [ - "clap", - "curl", - "log", -- "mockito", -- "once_cell", - "openssl", - "openssl_extensions", - "pv_core", -diff --git a/rust/pv/Cargo.toml b/rust/pv/Cargo.toml -index 240f04d..e202121 100644 ---- a/rust/pv/Cargo.toml -+++ b/rust/pv/Cargo.toml -@@ -19,6 +19,4 @@ openssl_extensions = { path = "openssl_extensions" } - pv_core = { path = "../pv_core" } - - [dev-dependencies] --mockito = {version = "1", default-features = false } --once_cell = "1.19" - serde_test = "1" -diff --git a/rust/pv/src/verify.rs b/rust/pv/src/verify.rs -index b000f0a..619494b 100644 ---- a/rust/pv/src/verify.rs -+++ b/rust/pv/src/verify.rs -@@ -7,8 +7,12 @@ use log::debug; - use openssl::stack::Stack; - use openssl::x509::store::X509Store; - use openssl::x509::{CrlStatus, X509Ref, X509StoreContext, X509}; --use openssl_extensions::crl::StackableX509Crl; --use openssl_extensions::crl::X509StoreContextExtension; -+use openssl_extensions::crl::{StackableX509Crl, X509StoreContextExtension, X509StoreExtension}; -+ -+#[cfg(not(test))] -+use helper::download_first_crl_from_x509; -+#[cfg(test)] -+use test::download_first_crl_from_x509; - - use crate::error::bail_hkd_verify; - use crate::misc::{read_certs, read_file}; -@@ -113,7 +117,7 @@ impl CertVerifier { - - if !self.offline { - // Try to download a CRL if defined in the HKD -- if let Some(crl) = helper::download_first_crl_from_x509(hkd)? { -+ if let Some(crl) = download_first_crl_from_x509(hkd)? { - crl.into_iter().try_for_each(|c| crls.push(c.into()))?; - } - } -@@ -143,7 +147,11 @@ impl CertVerifier { - for path in cert_paths { - let mut crt = read_certs(&read_file(path, "certificate")?)?; - if !offline { -- helper::download_crls_into_store(&mut store, &crt)?; -+ for c in &crt { -+ if let Some(crl) = download_first_crl_from_x509(c)? { -+ crl.iter().try_for_each(|c| store.add_crl(c))?; -+ } -+ } - } - untr_certs.append(&mut crt); - } -diff --git a/rust/pv/src/verify/helper.rs b/rust/pv/src/verify/helper.rs -index 0c7aa8f..6ac540b 100644 ---- a/rust/pv/src/verify/helper.rs -+++ b/rust/pv/src/verify/helper.rs -@@ -3,10 +3,8 @@ - // Copyright IBM Corp. 2023 - - use crate::error::bail_hkd_verify; --use crate::misc::read_crls; - use crate::HkdVerifyErrorType::*; - use crate::{Error, Result}; --use curl::easy::{Easy2, Handler, WriteError}; - use log::debug; - use openssl::{ - asn1::{Asn1Time, Asn1TimeRef}, -@@ -15,17 +13,14 @@ use openssl::{ - ssl::SslFiletype, - stack::{Stack, Stackable}, - x509::{ -- store::{File, X509Lookup, X509StoreBuilder, X509StoreBuilderRef, X509StoreRef}, -+ store::{File, X509Lookup, X509StoreBuilder, X509StoreRef}, - verify::{X509VerifyFlags, X509VerifyParam}, -- X509Crl, X509CrlRef, X509Name, X509NameRef, X509PurposeId, X509Ref, X509StoreContext, -+ X509CrlRef, X509Name, X509NameRef, X509PurposeId, X509Ref, X509StoreContext, - X509StoreContextRef, X509VerifyResult, X509, - }, - }; --use openssl_extensions::{ -- akid::{AkidCheckResult, AkidExtension}, -- crl::X509StoreExtension, --}; --use std::{cmp::Ordering, ffi::c_int, time::Duration, usize}; -+use openssl_extensions::akid::{AkidCheckResult, AkidExtension}; -+use std::{cmp::Ordering, ffi::c_int, usize}; - - /// Minimum security level for the keys/certificates used to establish a chain of - /// trust (see https://www.openssl.org/docs/man1.1.1/man3/X509_VERIFY_PARAM_set_auth_level.html -@@ -192,17 +187,6 @@ pub fn extract_ibm_sign_key(certs: Vec) -> Result<(X509, Stack)> { - Ok((ibm_z_sign_key, chain)) - } - --/// for all certs load the first CRL specified into our store --pub fn download_crls_into_store(store: &mut X509StoreBuilderRef, crts: &[X509]) -> Result<()> { -- for crt in crts { -- debug!("Download crls for {crt:?}"); -- if let Some(crl) = download_first_crl_from_x509(crt)? { -- crl.iter().try_for_each(|c| store.add_crl(c))?; -- } -- } -- Ok(()) --} -- - // Name Entry values of an IBM Z key signing cert - //Asn1StringRef::as_slice aka ASN1_STRING_get0_data gives a string without \0 delimiter - const IBM_Z_COMMON_NAME: &[u8; 43usize] = b"International Business Machines Corporation"; -@@ -319,14 +303,17 @@ pub fn x509_dist_points(cert: &X509Ref) -> Vec { - res - } - --const CRL_TIMEOUT_MAX: Duration = Duration::from_secs(3); -- - /// Searches for CRL Distribution points and downloads the CRL. Stops after the first successful - /// download. - /// - /// Error if sth bad(=unexpected) happens (not bad: crl not available at link, unexpected format) - /// Other issues are mapped to Ok(None) --pub fn download_first_crl_from_x509(cert: &X509Ref) -> Result>> { -+#[cfg(not(test))] -+pub fn download_first_crl_from_x509(cert: &X509Ref) -> Result>> { -+ use crate::misc::read_crls; -+ use curl::easy::{Easy2, Handler, WriteError}; -+ use std::time::Duration; -+ const CRL_TIMEOUT_MAX: Duration = Duration::from_secs(3); - struct Buf(Vec); - - impl Handler for Buf { -@@ -420,8 +407,6 @@ pub fn stack_err_hlp( - /// tests for some private functions - mod test { - -- use openssl_extensions::x509_crl_eq; -- - use super::*; - use crate::test_utils::*; - use std::time::{Duration, SystemTime}; -@@ -497,20 +482,4 @@ mod test { - )); - assert!(super::get_ibm_z_sign_key(&[ibm_crt, no_sign_crt]).is_ok(),); - } -- -- #[test] -- fn download_first_crl_from_x509() { -- let ibm_crt = load_gen_cert("ibm.crt"); -- let inter_crl = load_gen_crl("inter_ca.crl"); -- let _m_inter = super::super::test::mock_endpt("inter_ca.crl"); -- -- let crl_d = super::download_first_crl_from_x509(&ibm_crt) -- .unwrap() -- .unwrap(); -- assert_eq!(crl_d.len(), 1); -- assert!(x509_crl_eq( -- crl_d.first().unwrap().as_ref(), -- inter_crl.as_ref() -- )); -- } - } -diff --git a/rust/pv/src/verify/test.rs b/rust/pv/src/verify/test.rs -index da7ac1a..15806b2 100644 ---- a/rust/pv/src/verify/test.rs -+++ b/rust/pv/src/verify/test.rs -@@ -5,32 +5,30 @@ - #![cfg(test)] - - use super::{helper, helper::*, *}; --use crate::{Error, HkdVerifyErrorType::*}; --use core::slice; --use once_cell::sync::OnceCell; --use openssl::stack::Stack; --use std::sync::Mutex; -+use crate::{misc::read_crls, Error, HkdVerifyErrorType::*}; -+use openssl::{stack::Stack, x509::X509Crl}; -+use std::path::Path; - - use crate::test_utils::*; - --pub fn mock_endpt(res: &str) -> mockito::Mock { -- static MOCK_SERVER: OnceCell> = OnceCell::new(); -- -- let res_path = get_cert_asset_path(res); -- -- MOCK_SERVER -- .get_or_init(|| mockito::Server::new_with_port(1234).into()) -- .lock() -- .expect("COULD NOT GET THE MOCK_SERVER LOCK") -- .mock("GET", format!("/crl/{res}").as_str()) -- .with_header("content-type", "application/pkix-crl") -- .with_body_from_file(res_path) -- .create() --} -- --#[test] --fn mockito_server_available() { -- let _mock = mock_endpt("ibm.crt"); -+//mock function -+pub fn download_first_crl_from_x509(cert: &X509Ref) -> Result>> { -+ fn mock_download>(path: P) -> Result> { -+ read_crls(&std::fs::read(path)?) -+ } -+ -+ for dist_point in x509_dist_points(cert) { -+ { -+ let path = get_cert_asset_path(&dist_point); -+ let crls = if let Ok(buf) = mock_download(&path) { -+ buf -+ } else { -+ continue; -+ }; -+ return Ok(Some(crls)); -+ } -+ } -+ Ok(None) - } - - #[test] -@@ -44,22 +42,12 @@ fn store_setup() { - - #[test] - fn verify_chain_online() { -- let ibm_crt = load_gen_cert("ibm.crt"); -- let inter_crt = load_gen_cert("inter_ca.crt"); -+ let ibm_crt = get_cert_asset_path_string("ibm.crt"); -+ let inter_crt = get_cert_asset_path_string("inter_ca.crt"); - let root_crt = get_cert_asset_path_string("root_ca.chained.crt"); - -- let mock_inter = mock_endpt("inter_ca.crl"); -- -- let mut store = helper::store_setup(&Some(root_crt), &[], &[]).unwrap(); -- download_crls_into_store(&mut store, slice::from_ref(&ibm_crt)).unwrap(); -- let store = store.build(); -- -- mock_inter.assert(); -- -- let mut sk = Stack::::new().unwrap(); -- sk.push(inter_crt).unwrap(); -- verify_chain(&store, &sk, &[ibm_crt.clone()]).unwrap(); -- assert!(verify_chain(&store, &sk, &[ibm_crt]).is_ok()); -+ let ret = CertVerifier::new(&[ibm_crt, inter_crt], &[], &root_crt.into(), false); -+ assert!(ret.is_ok(), "CertVerifier::new failed: {ret:?}"); - } - - #[test] -@@ -79,33 +67,36 @@ fn verify_chain_offline() { - } - - #[test] --fn verify_online() { -+fn dist_points() { -+ let crt = load_gen_cert("ibm.crt"); -+ let res = x509_dist_points(&crt); -+ let exp = vec!["inter_ca.crl"]; -+ assert_eq!(res, exp); -+} -+ -+fn verify(offline: bool, ibm_crt: &'static str, ibm_crl: &'static str) { - let root_crt = get_cert_asset_path_string("root_ca.chained.crt"); - let inter_crt = get_cert_asset_path_string("inter_ca.crt"); -- let ibm_crt = get_cert_asset_path_string("ibm.crt"); -+ let inter_crl = get_cert_asset_path_string("inter_ca.crl"); -+ let ibm_crt = get_cert_asset_path_string(ibm_crt); -+ let ibm_crl = get_cert_asset_path_string(ibm_crl); - let hkd_revoked = load_gen_cert("host_rev.crt"); - let hkd_inv = load_gen_cert("host_invalid_signing_key.crt"); - let hkd_exp = load_gen_cert("host_crt_expired.crt"); - let hkd = load_gen_cert("host.crt"); - -- let mock_inter = mock_endpt("inter_ca.crl"); -- let mock_ibm = mock_endpt("ibm.crl"); -- -- let inter_crl = get_cert_asset_path_string("inter_ca.crl"); -- let ibm_crl = get_cert_asset_path_string("ibm.crl"); -+ let crls = &[ibm_crl, inter_crl]; - let verifier = CertVerifier::new( - &[ibm_crt, inter_crt], -- &[ibm_crl, inter_crl], -+ if offline { crls } else { &[] }, - &Some(root_crt), -- false, -+ offline, - ) - .unwrap(); - -- mock_inter.assert(); -+ let res = verifier.verify(&hkd); -+ assert!(res.is_ok(), "Verify failed: res: {res:?}"); - -- verifier.verify(&hkd).unwrap(); -- -- mock_ibm.assert(); - assert!(matches!( - verifier.verify(&hkd_revoked), - Err(Error::HkdVerify(HdkRevoked)) -@@ -123,46 +114,11 @@ fn verify_online() { - } - - #[test] --fn verify_offline() { -- let root_crt = get_cert_asset_path_string("root_ca.chained.crt"); -- let inter_crt = get_cert_asset_path_string("inter_ca.crt"); -- let inter_crl = get_cert_asset_path_string("inter_ca.crl"); -- let ibm_crt = get_cert_asset_path_string("ibm.crt"); -- let ibm_crl = get_cert_asset_path_string("ibm.crl"); -- let hkd_revoked = load_gen_cert("host_rev.crt"); -- let hkd_inv = load_gen_cert("host_invalid_signing_key.crt"); -- let hkd_exp = load_gen_cert("host_crt_expired.crt"); -- let hkd = load_gen_cert("host.crt"); -- -- let verifier = CertVerifier::new( -- &[ibm_crt, inter_crt], -- &[ibm_crl, inter_crl], -- &Some(root_crt), -- true, -- ) -- .unwrap(); -- -- verifier.verify(&hkd).unwrap(); -- assert!(matches!( -- verifier.verify(&hkd_revoked), -- Err(Error::HkdVerify(HdkRevoked)) -- )); -- -- assert!(matches!( -- verifier.verify(&hkd_inv), -- Err(Error::HkdVerify(IssuerMismatch)) -- )); -- -- assert!(matches!( -- verifier.verify(&hkd_exp), -- Err(Error::HkdVerify(AfterValidity)) -- )); -+fn verify_online() { -+ verify(false, "ibm.crt", "ibm.crl") - } - - #[test] --fn dist_points() { -- let crt = load_gen_cert("ibm.crt"); -- let res = x509_dist_points(&crt); -- let exp = vec!["http://127.0.0.1:1234/crl/inter_ca.crl"]; -- assert_eq!(res, exp); -+fn verify_offline() { -+ verify(true, "ibm.crt", "ibm.crl") - } -diff --git a/rust/pv/tests/assets/cert/fake_host.crt b/rust/pv/tests/assets/cert/fake_host.crt -new file mode 100644 -index 0000000..bf212a8 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/fake_host.crt -@@ -0,0 +1,29 @@ -+-----BEGIN CERTIFICATE----- -+MIIE/TCCAuWgAwIBAgIUWT/F3gP9fOTTq3yOMVskcXM8vOAwDQYJKoZIhvcNAQEN -+BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -+ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -+IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -+azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl -+eSBTaWduaW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUy -+MzlaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp -+bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h -+bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv -+cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw -+EAYHKoZIzj0CAQYFK4EEACMDgYYABAAT1zpBNemPCT0tEx2S9qnlPxyBgxKl6BSX -+ghqf3MfOqQKIUdKEbxP0nr2QtIR/MwvBp4YhjxZ9AZtzVtXbQrULTAGlKK4qcf1I -+W1rYZ5S0b4dmvh2HbIv9wZLWKaZ/ovnvAJk949WDCbBzC3Dy1E3zCaYPefLHHNve -+dWX8RtFUhnTqm6NxMG8wHQYDVR0fBBYwFDASoBCgDoYMZmFrZV9pYm0uY3J0MA4G -+A1UdDwEB/wQEAwIDCDAfBgNVHSMEGDAWgBSQCOxEmGwiupA2ER7srMBydGO1aTAd -+BgNVHQ4EFgQUUl5SPIEseFNeneyuOidsw06L7gQwDQYJKoZIhvcNAQENBQADggIB -+AFAg/hFlaBT+WNlUmVJlAd6FYr6vvRjJiKxcNBZ5wElzxA5OjuGX6pLYiNkzXSy2 -+N/4nQ3a8fr04IB9Uhx5ncMzSKVkG+4mbN3xmR7f6zZuFWV/T9Aom3LIbQ5KYR2wW -+EvX8b2xbvd74rKAgavq/iuFRn8skQJGgQk9J2YEApOW9wkoFQRgziuu55Cw5GT+f -+w9rKgAFTN33ZwfWs86ELJlDOY0aX5373WGccuEKm8y+l2UVLlly125eezz84RSh7 -+5j0VUTjK4ZqpSD8yiPxN+vocV+nY52cgWxrVf7g7wlPunxXWX4rxR0z/mEVDDBAC -+y7cAxsa4HtmJexuZbjEh8TEgDZXDA24BuFXqtkwqnUpv8KghyKb0KgTQecP9rRhL -+b2iHdQrALnddMzJzp7Tn+jsR2A2G0lLLsIuIRFR/1eAxecgTMoxXV8N0SJHn6emH -+Uwif1Qr5JVw4UOFSLW3MT3f70hY5hMzrxowOapcYhyR0vbhGFrF1YI8YmAuk2m4P -+Dfd5Za5cnfVJiAdNBnNPtV/5wiViX1VJGCrIxgDN6B2VE0VQLp2gKwpsSmhqN4CA -+qHzAi3Yj3F8vQ2mWQeuABWLAJrIoxXpVYQvTf4uij0ARZRbajQXx+LZnAUsCi5eZ -+LaYrTVn8NzHlGQocUWljG06GaLY8GNqKPQrZD/+6hIYK -+-----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/fake_host.key b/rust/pv/tests/assets/cert/fake_host.key -new file mode 100644 -index 0000000..1e00411 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/fake_host.key -@@ -0,0 +1,8 @@ -+-----BEGIN PRIVATE KEY----- -+MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIAXR6FCIgd+fjhO/WJ -+KwLTP01mBDtYkWbVE71jshjLLpZHunRpsYKbQKCwaDmMYLRrQnTxPgZH1PVKzguI -+7M7n9zyhgYkDgYYABAAT1zpBNemPCT0tEx2S9qnlPxyBgxKl6BSXghqf3MfOqQKI -+UdKEbxP0nr2QtIR/MwvBp4YhjxZ9AZtzVtXbQrULTAGlKK4qcf1IW1rYZ5S0b4dm -+vh2HbIv9wZLWKaZ/ovnvAJk949WDCbBzC3Dy1E3zCaYPefLHHNvedWX8RtFUhnTq -+mw== -+-----END PRIVATE KEY----- -diff --git a/rust/pv/tests/assets/cert/fake_ibm.crl b/rust/pv/tests/assets/cert/fake_ibm.crl -new file mode 100644 -index 0000000..21a76e7 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/fake_ibm.crl -@@ -0,0 +1,20 @@ -+-----BEGIN X509 CRL----- -+MIIDVTCCAT0CAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll -+MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTI0MDMx -+MTE1NTIzOVoYDzIzODgxMjIzMTU1MjM5WjAVMBMCAgIrFw0yNDAzMjAxNTUyMzla -+oCMwITAfBgNVHSMEGDAWgBSQCOxEmGwiupA2ER7srMBydGO1aTANBgkqhkiG9w0B -+AQ0FAAOCAgEAsRE3oW/VAx2JPESuWDZVbKIX9n26BZUZ2mdmUocRmn7KQ6CAi0Ac -+L5YRUvYt7kmGo6BSA6rUa0TMoMBtFzfIJ6HQZzEoA/LOkIKMfNHsFFzpIJxghPYJ -+PfPkXoLpAOBcLDrYWr1bJ3mkvrd6Tuyx02wJEhVmTcF8W/18AFRPuEVg/u3vJmeQ -+yMdwQZG42kEslvhCTO688vozYeX+dXO1/AXamzYQZyEWk2cBQ28DIc5eg39Tq77N -+89xqWNK/FWKkF4USn9psiBJQpKEjq+P1jTgdpuN3IGv618hlxS455eGyJrbiTxjD -+osWGShI+ZVznnqw98aX558hnNWHic+5JVvdJAwNaMMuNNkApFdz328z9dPtcUAZT -+7mNcKAuY1NcqdxKTPwkGWfmZm+WndGwKLwpshBhF/ImiAb/UiDrLh0jGbQ/FDE3j -+DtWs4k8eXIibtzGhgjfewdXO66jw5Z55FajcOPgja3uw2g2KEhJ+/VP1YtCvCpkm -+NX0liKgKLzUGDCbzyrH7QD16T633ebLbak3CtNMMlxZRQO8DQDbDsEKybCG62f/w -+OmM+QmL7lRFcXDbtVNxBSQKaDBDlNaEGEjM8phz166g+UMD7M6xJjmwJx/G/QvUd -+98YQfMEIt8IlrIbIfHXnAlj4SJwWNfOw+SN0dCqD5CDEYzYVtZgrNXY= -+-----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/fake_ibm.crt b/rust/pv/tests/assets/cert/fake_ibm.crt -new file mode 100644 -index 0000000..9f89ff9 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/fake_ibm.crt -@@ -0,0 +1,38 @@ -+-----BEGIN CERTIFICATE----- -+MIIGrTCCBJWgAwIBAgIUMCRfNPXX7mCzJea1P3mCUDqZdB0wDQYJKoZIhvcNAQEL -+BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -+ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -+IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -+azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 -+MDMyMTE0NTIzOVoYDzIzODgxMjIzMTQ1MjM5WjCBzDELMAkGA1UEBhMCVVMxNDAy -+BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp -+b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y -+cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMRUwEwYDVQQHDAxQb3VnaGtlZXBz -+aWUxJzAlBgNVBAsMHklCTSBaIEhvc3QgS2V5IFNpZ25pbmcgU2VydmljZTCCAiIw -+DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALIvcLAKt2DM5rTc1nOhyVT2EbNy -+CA1Sy+l1mSloBRVqjyk464mvlqq9p/CDO1e6tWiFhdEFChuTYzzDOxHazLjPc2Y5 -+U+r0QYYY8KK6jLQDj4crChPMJoJ0HZbmlWe/3uMl7lmrVMyHgqqOftQ52etBCGDn -+d/RV7U4OC+MJ01ePYUcABxPh3APA+DDV5eZ2b1k74JzLvXa+SGA82MPDQYc70Waf -+CcsfCw04Adnc968CEUbNQYxdfXZfL0Uvi44bohuaB5b07KY4cCetdMaBIuowgVsa -+d3Zo3pI/vaj8nlAIVmYNbG4aVmItG2q+3K8Zt11A1WXMCfVBfzg4aHAmEPJzVD+s -+u0maCTrRwRfKudHU+FFI36x4aqauNj0jAoQtuACYLS+69z6G0MKHd50jQXYiGwKP -+LaTI+mWJ2+GBPMZpRFKyFlAXTFBFCIpZopAdfhgCMkbb212cARGD9N55xjx0F5u5 -+kuXWhyF/0Zq7IdXdBlR9/0uc2I0z2P2RpQ1x1TExjqZoEt6WrpYuWPHtKqeLV1zu -+PonIPZFsKXJDtxoJhvhk/Aivv0329faKxqtIlH1W7b2BrrkC1UYh4+w9f/CVG2tD -+FRkXDEdgyknFsGarHSIGtrIr4vRGAh2p6a+7lNnb9GgpGshFIaOnXf1SXFLgdBwb -+mEPx8pjT7Fnu01HxAgMBAAGjgZkwgZYwIQYDVR0fBBowGDAWoBSgEoYQZmFrZV9y -+b290X2NhLmNybDAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUE -+DDAKBggrBgEFBQcDAzAfBgNVHSMEGDAWgBQsIzqwsaKXaQyBeov7u8c7q8IEdzAd -+BgNVHQ4EFgQUkAjsRJhsIrqQNhEe7KzAcnRjtWkwDQYJKoZIhvcNAQELBQADggIB -+AL+JmoykUGjMygjv23qdStop7VI2ekieYBKJANlq1eEoICLW0yIw9B57pxR6iFpS -+ClulV5vU4GvXlOAP7mTJJQWeuU5Z+sn9s+nLMkwZQj6QBpYwVru3SWx7H4XojRkk -+bVhN7hTrfXfQAkTLuol9PHNrGiTNGKRultBg6votpMSyd2wWgli7nt0QUydhquEL -+2kROaUlHRLo9cKttyy6MIypWlneGx9KC3X/UMoZgxCygpCDQycIp1oU53ZlHO4AZ -+7WeJ/FjEM2PIy6EiBnhrfmRimDjghPM8c8OIMx7fVjc5yS7KnryZ8HuzIeO6CPL+ -+/9+bJASuSAHmAZN3qKmiVUVfYhcbU+hKuCS2HRNkRhE/fZidp3K02OqSiRegiPRC -+TIcIXJ/SZ8M345yjGekAJ/M7RGIoirbdLYmpOpvHDg0qMlKFsCdpqasivPFjtLpX -+M1hvu9Ahz2HteKhm7WV9tOQ2lzMdqFDNTbxhhGzy7NJf+20zooTZCDwlNlo5JmKl -+Bg/Tns0rE5ZN2j5iVvv32Bp5CrY8oy8Xq8NJitaEb4JQKSLIya++rot+OTnX7inc -+LzsAwru4YgT+jGnHxEQslPcLMjuLdgZZGyXlKit+8KyslwaAb5Yv5e2rP90gYFFG -+5ZxgKxI1NfJZXgPXjrnDv/FPJZe+agqlTJ0RYCJeSmCu -+-----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/fake_ibm.key b/rust/pv/tests/assets/cert/fake_ibm.key -new file mode 100644 -index 0000000..92ddf79 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/fake_ibm.key -@@ -0,0 +1,52 @@ -+-----BEGIN PRIVATE KEY----- -+MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCyL3CwCrdgzOa0 -+3NZzoclU9hGzcggNUsvpdZkpaAUVao8pOOuJr5aqvafwgztXurVohYXRBQobk2M8 -+wzsR2sy4z3NmOVPq9EGGGPCiuoy0A4+HKwoTzCaCdB2W5pVnv97jJe5Zq1TMh4Kq -+jn7UOdnrQQhg53f0Ve1ODgvjCdNXj2FHAAcT4dwDwPgw1eXmdm9ZO+Ccy712vkhg -+PNjDw0GHO9FmnwnLHwsNOAHZ3PevAhFGzUGMXX12Xy9FL4uOG6IbmgeW9OymOHAn -+rXTGgSLqMIFbGnd2aN6SP72o/J5QCFZmDWxuGlZiLRtqvtyvGbddQNVlzAn1QX84 -+OGhwJhDyc1Q/rLtJmgk60cEXyrnR1PhRSN+seGqmrjY9IwKELbgAmC0vuvc+htDC -+h3edI0F2IhsCjy2kyPplidvhgTzGaURSshZQF0xQRQiKWaKQHX4YAjJG29tdnAER -+g/TeecY8dBebuZLl1ochf9GauyHV3QZUff9LnNiNM9j9kaUNcdUxMY6maBLelq6W -+Lljx7Sqni1dc7j6JyD2RbClyQ7caCYb4ZPwIr79N9vX2isarSJR9Vu29ga65AtVG -+IePsPX/wlRtrQxUZFwxHYMpJxbBmqx0iBrayK+L0RgIdqemvu5TZ2/RoKRrIRSGj -+p139UlxS4HQcG5hD8fKY0+xZ7tNR8QIDAQABAoICAA3bS5edFbqk5JIPFGxEmhwN -+0L2UMhUbKblbiFAkgzSxpzVs9EAuU/iuLAezpONDJUVeENR64FjZot+ATTErwzzi -+ARbjgWelnAzaFqiI+lUra7d2vN9iQmJltkFcaCT70lD2y6zUepE3Po2V7D4Dy7MU -+SRsEFRt+rSgjRoBe7L0D7OwxD9vXdh0ingpqL+k2r50QX3zCCVdZH9bAFJlQr2Fd -+YQDqgD/4B4t9HgO7v9Uvw3kukBji0lVovIvUUTV5Z6de2JVAMcERZYNoZUrkkvsE -+T6LukXi3WgukglLIZmVR6KoiBBsh2DLlUBsCgbXqOYy6dH3omqOkdsi/9js2cp3O -+6waEBmtrH4qmOzXOnmdchg5zNs5eKVwpsrMc04sS74Xr/CWlUPlMsbL4lV3sdAsW -+jL/iJO/3VSUwPHPfIbQgt7AJrkP31ESDSGNN5ac4er/ltcmB1r2MI5kuPgWL2psh -+ILNFViwarwzPFHLJyiAYSW44p7kzvltoPyOfNyO5ekgeJKuYUfZiNyKYRwrStO4X -+FsukIbYLnPa3ZIb3C0RVBx+lTUU+eyGd/rt/rWFA3Lnvbx4r3FdcYj2DnRM1ukgK -+1QnmqUbWpRQ2wFZsdOPtMva2AUOurzv7Fze83ZyaijAmLnphnpUjqPbj2RYVWzwv -+R7FUHThkc+uzcWhcOnVbAoIBAQDXRKxscm8eurBSQwPx64Td7tmPqvkzANFG9Flv -+GC1FnwP/UpKXkEQe2cMu4hvVAnS3awVAfPkRvyPZDJbL5jjvDdLtXbCtbnZEEriI -+TqT2UU0cnRxUybVesU+cT95PAWiEEvxFTHxOlv2rtKx0fZrd/ft530V3U7UeH7fk -+lZbpEGqmbZn3hGt2UNAXItwi6FT1OjGmOXfBVZUOUSOTDeEX8Dtx+H81ReXTRslA -+QXRruHDZX7x8Tw8YWL5Cq0eCPv5+Bm2DxLkzk616C7icv3qrJQS7u/nijg+qGec4 -+aKqFGy+Gvc12cGUt6FoWG+6zS+NAihawxind+V/JYnIt1ulLAoIBAQDT5oPaWE1b -+Vkm2RqVscq5dnKT+ImmIyUfM7lC6Ff6tf7CyxPumHwrMRnqfas8L5KnvgVQi33rn -+OJ5SXoTSJDvU42eqFXD4fAKpsIwbxYi+pY2RPH+PxYNtMvWTMmKZnrqVowFAUHyG -+f6yTIHqIZcy4Ll8wrq+Z2ZNQZAMtllXHLgdlFxDcReGmWvhtEtF2yc36DalQPWuS -+XvC4Zf1ja37Mslst42MdYNBi9QxE4CKVIaDWnF1Yh5xq8i0DIbJt37xpweXtkT9R -+Z6Fefc+hx7A/A5i8bSK6QQsrdL3CokZHY2dbYqDSNlD+21QgEI7wK6ivkS7XkUD/ -+fSVIRD7SWYgzAoIBAAoI4ny/rNxi2XtMMm/hibUKwEuJLcqp3BeRpmWeW+Xl7rrF -+L54vFG3XutiBo8h/L8+pBnqmGLoyDcq9Yn4owjiqjU2RU34SKyMrODzqZZgx3AVc -+cYimSnUakNp5gqRuLWASvn3Aff7v3O1XI77eaAy3HTTmKofQeB3qXpkiPAGrST1u -+2IGIQ7YlOD4L2vUpnWQ9DTlxblqt0Z/0OlNNj1OdWDgM3GkwU/FQWGtNYc7vrxsC -+8ndc/Bgnct8Kuu+gXh6j0BEXZ4a2+Jw61aVA68f0ls7liVV9R8+nG3cusdw4kzOV -+v3Eo9h54uVJUhQEIpZRJm5sr8aGuUT/C/g2S+hcCggEAW91HtFUr1DkoY7lk5gsh -+xLuwW+yXTBHW6uU2YjY+3wDInfgAERjMGZtEdfBcKo/LjGXJEAVKxwsouBT6CBBl -+T/n2ayo4e8FndiFv3GpayiwDn79WngHG5IR/Kn1hea/yvASa+kLqeXTIYFBoTtGz -+WvXflr9kqZJF50g0iILwVRWDZzQEvzochX5SzRancJQ0k/9wM7Us+ZvnSEoO/BcS -+NbPtC4vU4FukfAI4e3OgCn81t1S6szK3gTXUhdMKA9BHYqIJCGE7zhLbRpfMeBqW -+MfthL+8wawbfzMsjqUmopjJWEKxFhFy/6H01j2EeVsjWrKaIZDQ4tYqPqzDK+26N -+OQKCAQAV5336ahvQ504soqkNpOiVplH0Ksl3r3/WuO1xr/Hq4A8YXbPFeSFkztxg -+kjZABH6c7rV3/8NHJx86MAxXCAlLTIkR5aXmPrBI+EYm/fle9q/AbK+ppq3IUHVF -+KXpost8YysCRlel2QrhkTln5PJd4oN2xoMWKPHEuc+mhxBMQdq30JtmlVRfm9JaN -+ba3PoW9ecZG5eLP7pZi+AW+KfsaSg9U54jZToYVWeTnTzd7LD70hstun9E+FZAyh -+lrTkZuq578ltOuUWwDgKOly0bCAO+q6KMdb7pbPxbtqtdNGbFpXaChLcKB8lwa+8 -+vpiaRjhZtk5MLPJs9mTHq1STrzL9 -+-----END PRIVATE KEY----- -diff --git a/rust/pv/tests/assets/cert/fake_inter_ca.crl b/rust/pv/tests/assets/cert/fake_inter_ca.crl -index 7f907f4..146dbb4 100644 ---- a/rust/pv/tests/assets/cert/fake_inter_ca.crl -+++ b/rust/pv/tests/assets/cert/fake_inter_ca.crl -@@ -1,19 +1,20 @@ - -----BEGIN X509 CRL----- --MIIDITCCAQkCAQEwDQYJKoZIhvcNAQELBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD -+MIIDRjCCAS4CAQEwDQYJKoZIhvcNAQENBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD - VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u - MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv - cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD --VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTIzMDMxOTExMDQ0N1oYDzIzODcx --MjMxMTEwNDQ3WjAVMBMCAgG8Fw0yMzAzMjgxMTA0NDdaMA0GCSqGSIb3DQEBCwUA --A4ICAQA+KQrjx/6nKzPggDpKEAzH6XxhUU4CZmyUKFirUdenQBjYLag2Nono75o8 --9DVK3vuK7aeg4tIkUYOBcOEgYx0wEPU6PEE+yOO6KErn7qilN9gRnDHOTrfT0iNY --Rabyeat256gyfrsS84ZB7MnbhecWwC6sP2NiH18VCprH865X6sm5SxAfez1zITV3 --YVtudX4UczqbfpDgP4BU5ERMI71tqj4gKjaHFkC0TGizSphiINDKPmUMbd1w7FHg --Ilj+7pNZS377GCX9JzoTaKLuMBiblkwSTUJic7Z2BJZlTgm18hhfT3AZQDVvkq0A --AEPxQzm3be3ZUb+zJvueTeizVHkd3Eufnk69p7w4wNRQMwfj/icm27RZa3bBpF2o --esr2Ptik9ZBN81oMAakONZ4Wxuf0n/KBd6VBjy6WkbalKGVoZn70Nke3+9HGWSIh --bgbuHt7XAlvsgVChtZWsGsyxYw4p2ku4T2ajUfpxqQY1DDCAThweuHl2FND87Kr1 --5sblLLhA3QdjQ0EavsCV1646xorvoyw7YdHkqCPjRb1FsPWm/IePbtu+w9/VjDRF --KcHgBZBWmmQHj/9ykSI9pA5J7R7Nij6sX6Iu1g2yKiPnXeRQFiwhgsxslNk8eJfq --cK4c4HhnNtXa/c8jHcbymwqkF8Qltz0cbEW1usxZ2u6153pyPQ== -+VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTI0MDMxMTE1NTIzOFoYDzIzODgx -+MjIzMTU1MjM4WjAVMBMCAgG8Fw0yNDAzMjAxNTUyMzhaoCMwITAfBgNVHSMEGDAW -+gBSMe9JFoed6PvsUVY3z4MU6KhnKCjANBgkqhkiG9w0BAQ0FAAOCAgEATjy3crOb -+zDvF2CiBxmDMvOK8E/fhQ1BSUOB2qc/OiqK6zb1Q2MFvni945+Q+TwbHJqWBfU2z -+QHuhOQHUQQNnIvMPXgjJuT32eF2Y7J4weaQOHNjrt1dwE6TPC1DLNUzibbjKr5x1 -+/scmCiXWYxjeLvS7aTACNaD1UmUxyroK6h4oc968ofdPURK54sAiddY+VBA4KW4B -+WlAjxGSErZk0DIg5SXakSQbTkz8+pLnrqqtmvwQXk92lyfrwsSiZhHlcfBrJ6+mp -+1Mho9H5r97mO/LRRV+2CRShqTElxVORbW3Q2ku6RGJHEUB7AssIeKBtEma8yfUff -+S2bX0P82ETUCT3CPZK29QJ8eaxxH+PaRnyiT/W8vWuo+BRw8XqQ2AsM22vVCPP61 -+mg6VNsPJkJ8UaUF/S41aGMS21HNGk4Ik0iv0wiuu3q2rcFwGuuWB/gRUfSuPfFSO -+K5sEf47dBPB5kMZCSEkX1RvHPWAYwd6g8GgVdlQEt/IIX4ChoASES5JNhDFar7tg -+6h72VhfTtwEkGqg/z8vI1zP1Qzl2bZ773B/k/TAJ3j6N1JXiHUKioL1hAnA9glrj -+3hnZQEQSewPf8opPe/l7UEQNXGlamYic5WRsm8xbN1KL6HARLqxkiC7K8SS6U99s -+T7L2jh4egE5kyM4lLOFYfEugj4s6Soq6XuU= - -----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/fake_inter_ca.crt b/rust/pv/tests/assets/cert/fake_inter_ca.crt -index a7eb145..f30df89 100644 ---- a/rust/pv/tests/assets/cert/fake_inter_ca.crt -+++ b/rust/pv/tests/assets/cert/fake_inter_ca.crt -@@ -1,38 +1,37 @@ - -----BEGIN CERTIFICATE----- --MIIGpjCCBI6gAwIBAgIUMp+RLATMshrQnbOfPkwKoDNyYhcwDQYJKoZIhvcNAQEL -+MIIGjDCCBHSgAwIBAgIUKp3mjstxJ9gXt3S7lpKjri2dEUwwDQYJKoZIhvcNAQEL - BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu - ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs - IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y --azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz --MDMyOTA5MDQ0N1oYDzIzODcxMjMxMDkwNDQ3WjCBvTELMAkGA1UEBhMCVVMxNDAy -+azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 -+MDMyMTE0NTIzOFoYDzIzODgxMjIzMTQ1MjM4WjCBvTELMAkGA1UEBhMCVVMxNDAy - BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp - b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y - cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxHjAc - BgNVBAsMFUlCTSBaIEludGVybWVkaWF0ZSBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD --ggIPADCCAgoCggIBAK75zkJO8mpqPrD9vlSsfJgW7hbioQpuphSo1+3q9cAAWKFg --TYUcGBUNR/lUVdYqgzo3AglUwldWfeO9mBCIGNSN/heLFt1KzNutBsnE3YEeGKpM --7nHhMzh41otFdpZEZfrsGXGok07dy2mEV0mx72e9ALWbXFhxYsdWdSSVTlBH8xcd --38rAzfAiTbjgAUnTIdCPjAJKbXSDBXGXZ3+iuhFxNtSWyJr1AsxPzESErCPzUQjr --m8TM24lKq69zimTEkN4uwP5U8s2JPzbKosg2k24RbpDgkjO8iNK7RL9SMRUE8daP --+eru5EwN4BlZfsNpZDFbILxbt/2sxqmdsx/Nupa5ZAfcHRs88p4l1D3QIiZzaSEc --nCotM/kmnHWbgeJbkGbC9fD23dNJ29uqZU0fbRnG4HpSutrYD6lPg7PXnMt5tT+f --0+wQds38woXT9qW/kN/2WtkVYDhyVjxCgD8iHOZpz2LUmvJfi7Gz9B/DeW1dzgbo --cGxz9ee+R+T5KcKg+XvHD6slk82GrSM21b7zJeK92bJtjkqxBtQf+YgcKtOO7QX7 --37C1XvSHFnKvyyRJrldJFGEKfK2C66hdASHRdbUhWHFo1AA7VqzKB1fU9M2+ltUZ --zRpYD7X36OtRY1KsHHn+SVvsn404hWwgPblZ04nsMPanj+jsN//6M9r5lMezAgMB --AAGjgaEwgZ4wOwYDVR0fBDQwMjAwoC6gLIYqaHR0cDovLzEyNy4wLjAuMToxMjM0 --L2NybC9mYWtlX3Jvb3RfY2EuY3JsMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ --BAQDAgEGMB8GA1UdIwQYMBaAFNnX872GsEyiVMz7up1p/Nq/K+0VMB0GA1UdDgQW --BBT2zEtzeetYaOXAVw0Z9Pda1V9n1DANBgkqhkiG9w0BAQsFAAOCAgEAb7xPI93b --gqpXi4E/hAl7Vh354kKV+1smd1Omf7mQZbTrKBo6o1s0ZpXCbC9+WzK47R21VPNe --EgZ3uzTkURu4AX+5OnAMiWDFAGRSDt8ZXk6ZukWP7OmHsnLCu2strdhrvC4EhMF5 --G9VPIQsTx16CpprrVjzVzJg4i/X+U9dypvnAQeneyz4Ul/kPr6di8bOB3FiBeEDu --dOkVTbnlDa+wMQlCvqlFroFjEHZBKK/+PVIx9cJYj1grmzgqzm2FGUs5Wvcixpgb --uSHCQY9JP8Hy0xl3wx58VaymUK4EMfs612CfzOMClaiooDYuZgzVfhalU6g268nc --PQ9RCRJtJuda4mJ2H3Rag79sIiCVV31tE6tLXjOGebuO0vEB8wOjJc4YW1gtrZFy --GltT+HMdFgjO2c6HynpkmtqS8axQAw2hVaOpdJbDW+R0jHihO98FAgXR27TqTFp9 --sjBacfITeYRXGjQTDU1qxuEfoLnTZRIct1TjRTI1HBT8fl1exxgKUyEawH3MUCo7 --LsTPSNASKkJH3Rp29be9xTejUx2HUUwOE/DIF0HKaN+aAc8TR31/4HvC3bf5VPyD --wIwazpjVDZlQ2w+Wry1zNezNCPKiWtkfkj+TkT32h4ZfQEX8t0MUpKnL1LOSV/8h --PV357EenQb+f9DeC4BIbmipovLaUWmml10c= -+ggIPADCCAgoCggIBAK+kNJHAUL4dcMnjElivyuzkO2UwTUlJcKQCBmWRHYQjRbP5 -+akJx8SZ+wI0Spo00hG4sV2BxA83J+Yrked53DEzlR+RxojA16vXzlUO8c9KH2dJQ -+E17PgZnu7/hECmQcT69ZJAOh78ILEXRYk+2ixSTaZRTtchJdzfXIQ0633O2Mi/7z -+z5idQmQeXbitC3QIZeNzwitl2FLXPIw6MUnktOKNjQSNxMr9AU8q1cfOoICJQs0J -+wYVpby0dv1z0f2N21JJywaeAImHa2h58sSX6uqwOXtzdwcPlr2+iJ09YwHM8uO0D -+rtPx3bLHfkvbpMd5cCKjSeFozP1nVnlKJUmAZ8UXk4MAFKCtuLv2/InA8MkclZmX -+1IzBbvYDOw7AnsAN2VQYZSgM8vCmnRCzpTmtvb3ysSo32sTUjGW44giuRtGqh5Ct -+LeRpZZVm6zDFY6cjpr34+3Vc4pys81d3Dq+Sos4YVPXhTKW3I1VtFIdCeNyey7hv -+epjna6/JvOzQuwK90+t9VmZk7jTY2WOUNXJhzDTeDku/aTMIeXUZrAxg3pOvl/wf -+SN5i4Gauhl7URDx3nI0jc4Y2u9NGFi0TYJMCRVVLknAcre5cDKFyf9ts1gDyxTDc -++orszCE3ZQzXZeEY6aiQPihhL1YDk5SkkTI6XHmUqLW3HFlfg2aFH/oI+VGBAgMB -+AAGjgYcwgYQwIQYDVR0fBBowGDAWoBSgEoYQZmFrZV9yb290X2NhLmNybDAPBgNV -+HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQsIzqwsaKX -+aQyBeov7u8c7q8IEdzAdBgNVHQ4EFgQUjHvSRaHnej77FFWN8+DFOioZygowDQYJ -+KoZIhvcNAQELBQADggIBAD4GmfmI5R6cy/Sp37buyO53azgw4RvVclIy/2qSPHVX -+Os0pPPIIXLsbJMy7M6rDvKx/thZ27BDwms5dNuDynZ494XjqTmwzbBr+qEIzCNpa -+QiX0MHf9JqFq5hkcZihfJ8PZL9JWIjiRfMI6AERd1pU1QJI/G8ha1vAkPfcA7GvH -+NBBwKBm63iHYhp+zmnEEh85lpj4pEq+hOLK+mJxu55BodbNiBlBGu4EbalZlwXFp -+7lGHLPkxuXZndrlrm8Lk+hi558NgNFxqz6qYtUc7txajViU2xFjEkTcQ0FKNiFpV -+eHn5TNjwh4QucnWc7wI9hcDoADrekTEr8mUKrJXgxaBLIXEShC0ZDBnJsEwW4xbg -+GQD9qXyo0h8cYx2NngvL+9Ee3rzdYAaSfAnZOU0xxrqZt+2dstNqi3pLKdS6RpA/ -+3Dt3cAnf0mQQSUP6oZQpfdSEJ9uggT9h+kgJz5RGOQfEELKA5RxHtTe7249J3vDv -+hoy0Uy6+w3Ji5AQdn3G5uUGxLsBh/uS1dl8hq4gihrpPbKVJHVxqPL0HCx7DyhRw -+hx53GXhGIgZN7QSGTrB5iz9YyzejsnysS0Um+figkxtC1atqhVkqzMDZBHUwRPfy -+wq8PREXoRITJHfSkg01bInRFMMNWDQuPwSHDX9OYNqpkXzlV8/ao9Rx6JOtiG0a8 - -----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/fake_inter_ca.key b/rust/pv/tests/assets/cert/fake_inter_ca.key -new file mode 100644 -index 0000000..18f31da ---- /dev/null -+++ b/rust/pv/tests/assets/cert/fake_inter_ca.key -@@ -0,0 +1,52 @@ -+-----BEGIN PRIVATE KEY----- -+MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCvpDSRwFC+HXDJ -+4xJYr8rs5DtlME1JSXCkAgZlkR2EI0Wz+WpCcfEmfsCNEqaNNIRuLFdgcQPNyfmK -+5HnedwxM5UfkcaIwNer185VDvHPSh9nSUBNez4GZ7u/4RApkHE+vWSQDoe/CCxF0 -+WJPtosUk2mUU7XISXc31yENOt9ztjIv+88+YnUJkHl24rQt0CGXjc8IrZdhS1zyM -+OjFJ5LTijY0EjcTK/QFPKtXHzqCAiULNCcGFaW8tHb9c9H9jdtSScsGngCJh2toe -+fLEl+rqsDl7c3cHD5a9voidPWMBzPLjtA67T8d2yx35L26THeXAio0nhaMz9Z1Z5 -+SiVJgGfFF5ODABSgrbi79vyJwPDJHJWZl9SMwW72AzsOwJ7ADdlUGGUoDPLwpp0Q -+s6U5rb298rEqN9rE1IxluOIIrkbRqoeQrS3kaWWVZuswxWOnI6a9+Pt1XOKcrPNX -+dw6vkqLOGFT14UyltyNVbRSHQnjcnsu4b3qY52uvybzs0LsCvdPrfVZmZO402Nlj -+lDVyYcw03g5Lv2kzCHl1GawMYN6Tr5f8H0jeYuBmroZe1EQ8d5yNI3OGNrvTRhYt -+E2CTAkVVS5JwHK3uXAyhcn/bbNYA8sUw3PqK7MwhN2UM12XhGOmokD4oYS9WA5OU -+pJEyOlx5lKi1txxZX4NmhR/6CPlRgQIDAQABAoICAAzvUnVE5NVZaPHfPH3GRXMc -+bEDblad+5nIXmZW/gf1WFScnyDLoPf6MIV1KSTR3MIUGFDG8pO9x70QSpyRyXzgm -+/vxEf5GeGOV/yKduQJfZrBmMTt7huH1H/slpZlwAx0AGOCwhz/y58LFBKDS9Ethv -+0nef9HGEkbtG4ikRCo6+dd1i4nAObcIeh9+ms8QZ7bn6T4t/YrbYFcQMcY6L7qPM -+EZ1rHNnnl8H3KTr/nSuWIDKmJCexb+/yQ8JiWaLPMKCv3ZHasn2DHuLVozylKjvr -+S4JnvOIyvzChEBTMeBDMgSBoT+bZJi7pOOR4gpozl+XfIVzdqVIEmxZP1u7iq1GV -+0jS/NwIAiZyp0e4YC+joS81AaskqoiTnk59inipWgtXaqz9peSd5hDA2u4mfSCVY -+aKHEDv3dUYyc++JTr0qjAlxuu0SJjPZMTeIbMoVX+7RTwEHyQoiHzNLzBXo5yGm8 -+b29oOqA4DePMt9+J/h3xuiUH9SmYxTkSX6oGZAJC2qEELGFbpN61zLSjJOlv+wfC -+Nmpd8O88q6dSG9Tk2o2xeLRXAEulEtmIwjr0x8S10pieJ+ZhoaHAyIRWnbVfwj78 -+pKLdpd8MPToTS4f98nkkVfbmc7ymVdz4JhnYTco4aqmtai/8yVUzzIjOLfJ6bp1s -+N9okI+1fKgTVlyNj40opAoIBAQDeZqJLT9DTAhxZqa+ZUYc/ztoKiT1JLXDZDWhO -+J2MVqFdTs7sAw/NhVx7Buf2VBmRBZ8jW/izrING228bCkpIvF8Kf5Fz5busJcJB5 -+E2xhnIKRVrnw30JCxF0dGCqpfA79GyhfSsAdcx8ou0TUAgznvYHz6BvdYL8VuGuB -+YQ5uJNSykrtHsZBwsk143VrwmUyL/HEiq/btVJ5vRLKtbR59+KOzqgZKTgjDZDm9 -+8HwA8Nw45GqFdXUIHJwCwbK0+YE9qW/QCLDtx3wct+E1fs0C6TuMfLrh5khHUa6P -+EZKLzkTRm7VHYrFFSGQBQBQ9mMalEttkrHmnHwfd5PAgCGH5AoIBAQDKLSVff0GB -+5uadXXcRMRrhA+crUud8RUi9BEXdcUJR4fLosJl6dEBkhrC9pxVpfczAL5zXaqFg -+Z4R3AinWB7trIGC1MaVc1CZenBtiVsPHNzOkqeZdcpK/WZcqJ/6AiDrXWYDu5N2n -+hsff3Wtp2QqqhrrsP9pZnyrikAYeBvtuys28wpe4L8fZhwyRNry2jziIlmomtHyX -+vr1xjfwQyVNno0RpppgAdul3RqBUV1HCpL7pndhfwWRCnW7fVDawYqSpIXUHjW8E -+3+RgpHh0/YtLNinRKe2cDYorgxF02B1YCcSignh1msIbuJcH1MI7N2C0C2atiGYE -+eSK+R12HcM3JAoIBAHaHXJemwjSzO0jOFrgvq1VmeO6ElhUaErqbWqvMchJo1aHW -+eCPAS0XlmI7HAU8bSPNSzMdIT8hAhYRfPV8VnrNahm+Q1bxaUQmG5Hii0XB0aWHs -+Rs0JL9dFsBqBdrs9Uv+yKaIfxKPtZv3eUKBtN1OKvGexnKgvl6eL0j/x6i7pkjJZ -+4VYkXEazwHZaAs2X5iP1Npaz77YtEwNaKaAkN8wLZ7OpOhD/5cu87sk8Edqug7AO -+jHb0UpswJDWT5hptn2OtmdnVx/XyC0OC/JP2MG0MwJ/vGeqrQHpCHjZBt0irdiIA -+SmzxPGkgW6wO6rqpYbU4h5TwFyXqc9be3Ns1nIECggEAYZUIxN8XLyvTg4DpR1L8 -+Nj88BJ0vPbvzL8gwMIHKENuN8uHKMmCJ8/tOsztCCni9qsVQXmkJGw7b0NUqfDOf -+MkWeZ9Zeij+bhW6ziPN361+pfYDDv7bdPZ5wZ9iF7mPSgr6gjK3KjmFvd43Xmm83 -+xrbg4cawDTHV8SSyzytvkDoyszj5Id1uCIA2gKB61WKrVsHC2oui11so9PYjA7co -+mo0jKBtQomjCpt9f41WCEQCTZQ7asN0XF1AFg6WR//CcqUWMxuhs/V1TTZIU0eLO -+qK6r4FjsZrXiSr4oXs0w3J1aW1W97oWTyu19eDooxYwlEMGv/XIoS2BsdIrdidHY -+gQKCAQBrA0X+AwT9xgEwYaXnA9RINBPSnzruv1giHHAmY9iS0V6BNJUzlJDyASdT -+7drV0YRY7bKCguZYNVZh3o259OXNsAOStd9yiyFzZr6bAVvTVXIkrB040Rd1xptC -+yl+kxYEfDeENkCR6h8JkFDupG4xF0PT70sE1sUGgFg3585nWzhN/s6xC9vxtBNHc -+R+a1UmoH8pYSONttldtW7g2kB5uuQTU0lZVSHDj9B8z0Twj3b6+kQlXt0ielrVV2 -+TSVZcmbIyyEYBDcAxOCvHN+rzKbP7H+7JqHVgnbe12pnVoN7WVMbR2v3F4pK+0aA -+M9Wd2js5nPPh0AN3Px1643vFYcAQ -+-----END PRIVATE KEY----- -diff --git a/rust/pv/tests/assets/cert/fake_root_ca.crl b/rust/pv/tests/assets/cert/fake_root_ca.crl -new file mode 100644 -index 0000000..f78b649 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/fake_root_ca.crl -@@ -0,0 +1,20 @@ -+-----BEGIN X509 CRL----- -+MIIDPjCCASYCAQEwDQYJKoZIhvcNAQENBQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMRYwFAYD -+VQQLDA1JQk0gWiBSb290IENBFw0yNDAzMTExNTUyMzhaGA8yMzg4MTIyMzE1NTIz -+OFowFTATAgIBTRcNMjQwMzIwMTU1MjM4WqAjMCEwHwYDVR0jBBgwFoAULCM6sLGi -+l2kMgXqL+7vHO6vCBHcwDQYJKoZIhvcNAQENBQADggIBAJB5ERfMQEZ5Pdc3A8+d -+gDiY53VEdA0Zd8MDv+j+Mgu1qn7IW2rpE313yVHieclZ65ReEtb825St8UBJtjiZ -+9Vd4lK9FUQKKCblCYNrEbu9cvqNeHDGxldQPUkEoz+z5kielcEwD6RUsK5fSYe3Y -+6E3jc9mMHw7xWCSiolvNr5y6AUymfu22v41qgfaB0yNe5uz6vRgRoiy+OTP3Z7oc -+ls7o73F9O4k6QYdba8us4v0TSwZAAPO16TsHhmyHcBe3w23UIusTU/c+6A++hGdU -+TNUW3OZl+hv+4BcnClrVBDfWiExOMNzIhH9hzlXQ2qNPNqz2ymlH8Wgpf1TBYJ8m -+xe8EuGGrDFuewa45kA8uxuHCiSLCsBowDEXXkswaSF4E4yzYZqcNDcLOedK7vG9G -++zXYHOpLsZMyfgauxjuWtwR/ma+ub85CHy+eUC2waI+Mk0Uk0Lr5y0Jdp8ztxakp -+UlVHYNU/Q+kwHfmKztkOsedomOf1/8IvzE930ZB8rv0G8ok2HXYvA3+r3lcDSG/V -+7+yq3HcJezw0XCDtc5mv8dWI6pUv8siWFKok+hlkZpeSXgtqRhF2rzI8yz+lk5Cr -+5yCWroPI0T44o2RzjfGZAViRH0nnYF1GoBORSZORppubcjmiTy46OFG/fiWKGKV1 -+sngAoQ4TCDQzId0zwGLtUCHu -+-----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/fake_root_ca.crt b/rust/pv/tests/assets/cert/fake_root_ca.crt -new file mode 100644 -index 0000000..93cc5ec ---- /dev/null -+++ b/rust/pv/tests/assets/cert/fake_root_ca.crt -@@ -0,0 +1,37 @@ -+-----BEGIN CERTIFICATE----- -+MIIGXzCCBEegAwIBAgIUC2OH79tloDOfMBeyDBA84epTMKYwDQYJKoZIhvcNAQEL -+BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -+ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -+IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -+azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 -+MDMyMTE0NTIzOFoYDzIzODgxMjIzMTQ1MjM4WjCBtTELMAkGA1UEBhMCVVMxNDAy -+BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp -+b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y -+cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxFjAU -+BgNVBAsMDUlCTSBaIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -+AoICAQDK+jIoIZVYyEq7ZrxfKv/TQQWCgI5C8BewYkB+/rt8NY0fn/aAZbvJdnDX -+cQCLpCeETDi7C2hGujnZQN68otr2GpJVQ/kgKnM8yU/p9yBSUan9Zo/195a9YytF -+6Ys/Yc0HApoPtB3t85NHNjAvhzdcBKWMb+/2FiJ61gdnrO/zORnxp/BejxTMyB6r -+0mhhVX4wwdl5vfkX/qS3TL0rajEz0V+SwneDiIomoVJCnco40Km1M20qU5L7EpNH -+IMEP00hcj40zO0jJ8cGDNoKgTHsZPovYfFmWITVOFvaPcKaloUU67dYEg0fv3ypX -+aYHRszcd794VOWQ85r/mlFSSELwoUrVH7G0t6wnzOawW2kk7ZYndbo+Z1ZFb1Ul+ -+iVwUAHzx/ylmOORJNM4JGiYyC5+1481MLeB+37+V3TJaBe4IoKqVo/OCBsaPt7x1 -+rMpZLw4gXj3A0Hh8gi0z5HDkKolMSawuU8dpFeI0GN+4hJN9DNQh0OTGLEfzeo+1 -+lNzsF+jGT9B0tApWhBq/QTyHXvJREVx32hPd/1X6bVbd9mik0bexg6bVpZVNG1sn -+7GsVO2wq5OWKe3UtSh8wJcjGdW38fPAB46oIBQ7x0HGdp/KFiBTWxrexLThMFvYL -+wPtWedj8ntgOBoH5MJgL1XkVYP+1EQYaBJNfswvF4CrvPRCtCwIDAQABo2MwYTAP -+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQsIzqw -+saKXaQyBeov7u8c7q8IEdzAdBgNVHQ4EFgQULCM6sLGil2kMgXqL+7vHO6vCBHcw -+DQYJKoZIhvcNAQELBQADggIBALZNLMgm7RqIKY++Blc2qeIDrwlx1U/PPN/PsZsN -+cQR48Bq+vQ4cV9hOcZC3jfRbkkDho8jxVTXdEOcpDyU4zNSLG1+b1FNRvqjq2L+d -+ow190dYqvfgbdNFo8V6kLdRFe5aCLhHTWGLTbl0vvkc99h1Lt9ZXr6LzhFwbXSGr -+Q/qJoLZFVBwJZmfZY3VmL661sBcTmzDDGJxg00fg+1jCIk2Ot3dn+7jQ8g6HYMQv -+8GrqGKcFia8fXcTTmcH+Swr8rps1MYypjkpX/zVHsFuENdqZNkqU+OLTaifkrBuO -+6evTTjEOcDzMFs5Uipjvq7XOBg5rz9BKRXrjDMdI4CmUzMR272lToNw7lHqqNDKi -+nxAzPjPWVwXQTu2LFY3NwubWkPAVsd6FsHKzQHlq97N4sj+vp6vctaEUgFS4pUlL -+tbjw6AtQA72z4uIpcAzE2ctTj831QeQSZGKBUpeNPjuK/NjytXQm8MvW7LI2OgaI -+H5B9+KE9KjqijpNiiOoidYVwFRrZniIQii06qXwQUgz2gGIzYPER9+3PijrtOY+t -+K/vS6NohUYb16PhbbvzFktSGBtvp1kUF4fB2NJh0z6OiZXVEHlCYEDTv78RS36iU -+PXijR+rALi20P4Sg6picuvmt2epfSMQ4ynNxPhbSG7wMp+Zp5tOrZcmg4jN0Yt56 -+tACH -+-----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/fake_root_ca.key b/rust/pv/tests/assets/cert/fake_root_ca.key -new file mode 100644 -index 0000000..ad924fe ---- /dev/null -+++ b/rust/pv/tests/assets/cert/fake_root_ca.key -@@ -0,0 +1,52 @@ -+-----BEGIN PRIVATE KEY----- -+MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDK+jIoIZVYyEq7 -+ZrxfKv/TQQWCgI5C8BewYkB+/rt8NY0fn/aAZbvJdnDXcQCLpCeETDi7C2hGujnZ -+QN68otr2GpJVQ/kgKnM8yU/p9yBSUan9Zo/195a9YytF6Ys/Yc0HApoPtB3t85NH -+NjAvhzdcBKWMb+/2FiJ61gdnrO/zORnxp/BejxTMyB6r0mhhVX4wwdl5vfkX/qS3 -+TL0rajEz0V+SwneDiIomoVJCnco40Km1M20qU5L7EpNHIMEP00hcj40zO0jJ8cGD -+NoKgTHsZPovYfFmWITVOFvaPcKaloUU67dYEg0fv3ypXaYHRszcd794VOWQ85r/m -+lFSSELwoUrVH7G0t6wnzOawW2kk7ZYndbo+Z1ZFb1Ul+iVwUAHzx/ylmOORJNM4J -+GiYyC5+1481MLeB+37+V3TJaBe4IoKqVo/OCBsaPt7x1rMpZLw4gXj3A0Hh8gi0z -+5HDkKolMSawuU8dpFeI0GN+4hJN9DNQh0OTGLEfzeo+1lNzsF+jGT9B0tApWhBq/ -+QTyHXvJREVx32hPd/1X6bVbd9mik0bexg6bVpZVNG1sn7GsVO2wq5OWKe3UtSh8w -+JcjGdW38fPAB46oIBQ7x0HGdp/KFiBTWxrexLThMFvYLwPtWedj8ntgOBoH5MJgL -+1XkVYP+1EQYaBJNfswvF4CrvPRCtCwIDAQABAoICABhe0JAvnWYwK4OIouwCQP5f -+pjEJt7WjF2pewZj92LY3GnSbmNXIYTL1J43rzBb4D06RIEFE7gY1QdDV5b2xsqct -+1Y1V60lzZKv9t7yYUVHQ6vS5JbvF4wa7gMHqIoU/ptrv8wjDpXxLwc52W8ljwL/U -+Rv7aScNlhkpH/FJJr0hSqMzHpH1AofdcZ7eq5vbDdF1CSRbsOkYpQCRJ8G86rCGp -+QoJfxZ24p8ME8i18EyrzKA0mLuCQHED3n/tARtiG6P9ECmiw9jJY9n+x7HPL+v5d -+m3N9//ud4m/ieJlhpdXpIQ4hlDqtJdalGB5aqp2U7kndN5rwwxo/fPb1ru8YAfSB -+TK+5ikIn1lfzkiRoTVAmZubXqSSi1xiudqwL28euIDn0f93aiEInHO7Vx2o4vpgQ -+MJ3Hg5XaGygUAR+ZSnQ7lcv4kKMpSeqYuZSRRLs/RZ2Kw3jCYpyQyr1Ii8qH3H9S -+hUqZqUZK4splagRureExDkFrGvo5cBsmMGZqNfG8Ix9imEC4kRhhraIcmziwY2Ca -+mKqGH+sGpcvHX6vmDe7wZb4fo+Mkkacext42t8dtkPT3nEJhsR0xqmcgvyt6yP5t -+y4mv2hGeDEsclpWNfuug0DeOwXjClATZVrP4baL4rGrP6AGFDiRz0PbWmaJwMLDZ -+D+06rj6nO9Ibu/t5tkWRAoIBAQDw2JSTzLEdrkX4cTaHE7pLWY2BQuiIXdYOdXbW -+jkPytSgqHfpTckCDl8UJWMBzo87xDb5Q9Hmsa37Mj5HIz+lGxLPukuhiRUnu/XJQ -+AxzLXBBmdIsb0Rha1kChNASPHXo2Ozi9OsWB9UC65P+U1cGmE882A/Y4p5ak7C+q -+iQk+IRgSqKrLIIHFG0h6Zi0f+DlXMtsNQ7mjDk62WjWzneS/NPwN99VwGacGqI99 -+IOhJIKfhpbIXMg2Wo0Xj3HwaHWccK10Va/x8jvjunX9V0px19yCPnMsMWTZ0WN9F -+CRbeD8zCuwasjQkBkqiqMyShk/gs4K6J9TMc5tprB2xB7+f3AoIBAQDXv6WWdsRa -+NuuMrzJ94ZLESUIHBg/u1uGJgvtckpKAm25SnEB2C4d8u09zey0BsXsxBKLJ/HAp -+Nci07/dKkDlmG6+SkVhcIsEAokw2EFWG/+EQJ1Bd2gbX9RXQv3O2sda/2h3QB8Vu -+BUmvK8nwpFaD5eqrIeFxBhao4vHon80Bv1L1F+QVfbjOgV5eS146VU7Gl7b/LuMk -+yx3I4HwWqNbQUGTnVfC6wprae9G1Tamui+PnCPMdVSP972gxMNfhqkt04BN2j2A5 -+/Lat2VdL100W2XFzLEQmfD4sAlsJQ/jTcGhDb9Brxlf9kQYt1r+a8igcJQqoz8yr -+1wLabN6FKuaNAoIBAHALwit0ad2uCt1HBiAXPG77jYpaL0XpqcD2QsAUVWYfgzz8 -+z01s9LiDreXoRThHN+oLA8QzyDs/kzDlheQPXa5PqonODJBTPc8SV9EDcazl+rc/ -+dswNHbB8xnp4cbqG8ykxqfbW1bXc/C02hfSe8UUrKBYwB6dZyAqX2qESuZsO2F0t -+3K89Q0IIrFJNIKcj2sFHZoMoQ7+o01OgaiVSym1t3+k7qC9Lr9m4J3EGEyqaJ6Ah -+btW9snanJMeZ9p5LmYGQZvClWUQ1W3ffC7NIlQOIYbyOLCRliKDeC5jZXqsWXKMn -+UTaLMmpp6U+tFViaNzXhnTGPQiUq/OgX/vQ6AQcCggEBALGJyY+ZyFacnxU0Do68 -+E3Rm/GifOnlGZm4sVQCGtPwT66MbZYg+UI357ZWQJSchj8h0kik5DGs9ER6j4ZGA -+QVufKMmpxVZ8WupUo/ZRVrAy3FfYoi+4/Ky1x+/xvBz5F0jlCmdoHbU9sLbvh8v4 -+0CDWEFgnF0dUxUzRBFRzO9ZWRMPflxeAT5XPq4JY7v99t8eAjVxMjyp14tqssDBN -+XAgsP/yGIgTto1RrU9SoRhuWjkJOgREAQQ/z9H+WO/A9nL5ermV/8qWFKibBlS6Z -+y5wj71HheWtaDidVAOkNADOuan4kAxTNMRc+QiFyeFXfM9aFPNJRvZEi6/wmXq6i -+8PkCggEBAMK0qSiZ0kBgTqgYhpKqCwEUgEueTC3EloZQ6CHV1vTEfsyM3gEQ2A5r -+8+viQf1RD4UloCWfOF1CA9SqgtXAAIQyGVAATjG7sbUNAK7jac9UdZOS8QSkcJMS -+UIHXndlct/vijnNe/htGCcu+C2KCJz1lfW07DB8WOoyP/5e7es7e0RGjGgGdRdu2 -+MyCXJ3lG0heiVUp2d+5R80UAZQdlHuzo9sItxSUbsO639Q1c+Cew/cM6YPxdcTGo -+LWnAsG3yJZgR3pipS9/GqISY725c8QJijYMG3GFUBD6pMCdD46Kl/rfjUsYK3Aoe -+2lXPecP5/ilaLLUhs7SijqHzcKiNL80= -+-----END PRIVATE KEY----- -diff --git a/rust/pv/tests/assets/cert/fake_root_ca_valid.crl b/rust/pv/tests/assets/cert/fake_root_ca_valid.crl -new file mode 100644 -index 0000000..e80dc5d ---- /dev/null -+++ b/rust/pv/tests/assets/cert/fake_root_ca_valid.crl -@@ -0,0 +1,20 @@ -+-----BEGIN X509 CRL----- -+MIIDPjCCASYCAQEwDQYJKoZIhvcNAQENBQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMRYwFAYD -+VQQLDA1JQk0gWiBSb290IENBFw0yNDAzMTExNTUyMzhaGA8yMzg4MTIyMzE1NTIz -+OFowFTATAgIBTRcNMjQwMzIwMTU1MjM4WqAjMCEwHwYDVR0jBBgwFoAU1p7a8iaQ -+sGamSdR9585yWIW36CowDQYJKoZIhvcNAQENBQADggIBAMcBN8PztJELdCo6fQvw -+C9ZU41AmBZsMdIoacgacupt94gHf8SAO1Ric46j3KERpA0mWKAx6RH1s3OX2qN/C -+YSrVPIZ076Cn9EMVdK9w9hqt6SYHoot9CwPutF6BBG5O2uN6eAsEM21B5Tr6xTZ/ -+SUj3fO3+QsePDL84BRfU5vtZcGj1BkULopWwibG82ayEUlbY3J8OPjK5YKuHtxJd -+lI4yfkmrEPwHEm8bwI6lhaIxrtTjoVgoolw+FqtVdVdU6O254AUKwxN88bHoA/sk -+BOr13AoHXiKEBaEJhocTLJRfvUZVRYGll43ELsy60XEH51W27/uULnC92vBkp4Q1 -+5hPwSD0RHKfMwsLKqoJaWX1CxfdttapCfriehMXJlv46GPmq6cLIEbbHzJKsyJhN -+4mW8Uiwn1aM5EhMauSLuuwNp8QRo4rmQzUstxwn4O/3HzTcjVWbn0XvBXa2S5HtD -+2kyWwN/qck1N9aXYk/sf2A2py2ECDuXc/Kvh1kDi6ZSaKA0VSS1FX62HvdqZKQb5 -+ZmyoQUpg8yv0hholaZd9Jye0pwww/K8CFddsyUMRYDSj0+qiw4pgx6KK3ElKnHeH -+uq+iLxDLWTqrWQw6k0I92B9gSlNKR3Z6Zq2UTxo6hB7IYgZv5nxx3BRxnTm6aH6a -+pIoxgnFnbNnyUIRAkOCHbFqy -+-----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/gen/create_certs.py b/rust/pv/tests/assets/cert/gen/create_certs.py -index 0d972cf..21c423f 100755 ---- a/rust/pv/tests/assets/cert/gen/create_certs.py -+++ b/rust/pv/tests/assets/cert/gen/create_certs.py -@@ -4,13 +4,10 @@ import os.path - from enum import Enum - - from cryptography import x509 --from cryptography.x509.oid import NameOID --from cryptography.hazmat.primitives import hashes - from cryptography.hazmat.backends import default_backend --from cryptography.hazmat.primitives import serialization --from cryptography.hazmat.primitives.asymmetric import ec --from cryptography.hazmat.primitives.asymmetric import rsa -- -+from cryptography.hazmat.primitives import hashes, serialization -+from cryptography.hazmat.primitives.asymmetric import ec, rsa -+from cryptography.x509.oid import NameOID - - ONE_DAY = datetime.timedelta(1, 0, 0) - -@@ -25,7 +22,9 @@ def createRSAKeyPair(size=4096): - ) - - --def createCRL(pkey, issuer, serial_numbers=None, last_update=None, next_update=None): -+def createCRL( -+ pkey, issuer, serial_numbers=None, last_update=None, next_update=None, authid=True -+): - serial_numbers = [333] if serial_numbers is None else serial_numbers - builder = x509.CertificateRevocationListBuilder() - builder = builder.issuer_name(issuer) -@@ -43,50 +42,15 @@ def createCRL(pkey, issuer, serial_numbers=None, last_update=None, next_update=N - .build(default_backend()) - ) - builder = builder.add_revoked_certificate(revoked_cert) -- crl = builder.sign( -- private_key=pkey, algorithm=hashes.SHA256(), backend=default_backend() -- ) -- return crl -- -- --def createRootCA(pkey, subject): -- issuer = subject -- ca = ( -- x509.CertificateBuilder() -- .subject_name(subject) -- .issuer_name(issuer) -- .public_key(pkey.public_key()) -- .serial_number(x509.random_serial_number()) -- .not_valid_before(datetime.datetime.utcnow()) -- .not_valid_after( -- datetime.datetime.utcnow() + datetime.timedelta(days=365 * 365) -- ) -- .add_extension( -- x509.BasicConstraints(ca=True, path_length=None), -- critical=True, -- # Sign our certificate with our private key -- ) -- .add_extension( -- x509.KeyUsage( -- digital_signature=False, -- key_encipherment=False, -- content_commitment=False, -- data_encipherment=False, -- key_agreement=False, -- encipher_only=False, -- decipher_only=False, -- key_cert_sign=True, -- crl_sign=True, -- ), -- critical=True, -- ) -- .add_extension( -- x509.SubjectKeyIdentifier.from_public_key(pkey.public_key()), -+ if authid: -+ builder = builder.add_extension( -+ x509.AuthorityKeyIdentifier.from_issuer_public_key(pkey.public_key()), - critical=False, - ) -- .sign(pkey, hashes.SHA512(), default_backend()) -+ crl = builder.sign( -+ private_key=pkey, algorithm=hashes.SHA512(), backend=default_backend() - ) -- return ca -+ return crl - - - class CertType(Enum): -@@ -200,9 +164,7 @@ def createCert( - critical=True, - ) - .add_extension( -- x509.ExtendedKeyUsage( -- [x509.oid.ExtendedKeyUsageOID.CODE_SIGNING] -- ), -+ x509.ExtendedKeyUsage([x509.oid.ExtendedKeyUsageOID.CODE_SIGNING]), - critical=False, - ) - ) -@@ -254,8 +216,7 @@ def getPrivKey(path, create_priv_key): - - - if __name__ == "__main__": -- MOCKUP_CRL_DIST = "http://127.0.0.1:1234/crl/" -- -+ MOCKUP_CRL_DIST = "" - - # create root CA - root_ca_subject = x509.Name( -@@ -310,7 +271,13 @@ if __name__ == "__main__": - t=CertType.ROOT_CA, - ) - -- fake_root_ca_crt = createRootCA(fake_root_ca_pkey, fake_root_ca_subject) -+ fake_root_ca_crt = createCert( -+ pkey=fake_root_ca_pkey, -+ subject=fake_root_ca_subject, -+ issuer_pkey=fake_root_ca_pkey, -+ crl_uri=None, -+ t=CertType.ROOT_CA, -+ ) - - # create intermediate CA - inter_ca_pkey = getPrivKey("inter_ca.key", createRSAKeyPair) -@@ -354,7 +321,7 @@ if __name__ == "__main__": - - # create ibm certificate - ibm_pkey = getPrivKey("ibm.key", createRSAKeyPair) -- ibm_subject = x509.Name( -+ ibm_subject_poughkeepsie = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), - x509.NameAttribute( -@@ -371,9 +338,35 @@ if __name__ == "__main__": - ), - ] - ) -- ibm_crt = createCert( -+ ibm_pougkeepsie_crt = createCert( - pkey=ibm_pkey, -- subject=ibm_subject, -+ subject=ibm_subject_poughkeepsie, -+ issuer_crt=inter_ca_crt, -+ issuer_pkey=inter_ca_pkey, -+ crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", -+ t=CertType.SIGNING_CERT, -+ ) -+ -+ ibm_subject_armonk = x509.Name( -+ [ -+ x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), -+ x509.NameAttribute( -+ NameOID.ORGANIZATION_NAME, -+ u"International Business Machines Corporation", -+ ), -+ x509.NameAttribute( -+ NameOID.COMMON_NAME, u"International Business Machines Corporation" -+ ), -+ x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), -+ x509.NameAttribute(NameOID.LOCALITY_NAME, u"Armonk"), -+ x509.NameAttribute( -+ NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Host Key Signing Service" -+ ), -+ ] -+ ) -+ ibm_armonk_crt = createCert( -+ pkey=ibm_pkey, -+ subject=ibm_subject_armonk, - issuer_crt=inter_ca_crt, - issuer_pkey=inter_ca_pkey, - crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", -@@ -381,7 +374,7 @@ if __name__ == "__main__": - ) - ibm_expired_crt = createCert( - pkey=ibm_pkey, -- subject=ibm_subject, -+ subject=ibm_subject_poughkeepsie, - issuer_crt=inter_ca_crt, - issuer_pkey=inter_ca_pkey, - crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", -@@ -390,11 +383,11 @@ if __name__ == "__main__": - not_after=datetime.datetime.today() - 1 * 365 * ONE_DAY, - ) - -- #create revoked ibm certificate -+ # create revoked ibm certificate - ibm_rev_pkey = getPrivKey("ibm.key", createRSAKeyPair) - ibm_rev_crt = createCert( - pkey=ibm_rev_pkey, -- subject=ibm_subject, -+ subject=ibm_subject_poughkeepsie, - issuer_crt=inter_ca_crt, - issuer_pkey=inter_ca_pkey, - crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", -@@ -402,7 +395,9 @@ if __name__ == "__main__": - ) - - # create inter CLRs -- inter_ca_crl = createCRL(inter_ca_pkey, inter_ca_subject, [444, ibm_rev_crt.serial_number]) -+ inter_ca_crl = createCRL( -+ inter_ca_pkey, inter_ca_subject, [444, ibm_rev_crt.serial_number] -+ ) - inter_ca_invalid_signer_crl = createCRL(root_ca_pkey, inter_ca_subject, [444]) - inter_ca_invalid_date_crl = createCRL( - inter_ca_pkey, -@@ -431,7 +426,9 @@ if __name__ == "__main__": - ), - ] - ) -- ibm_wrong_subject_crl = createCRL(ibm_wrong_subject_pkey, ibm_wrong_subject_subject, [555]) -+ ibm_wrong_subject_crl = createCRL( -+ ibm_wrong_subject_pkey, ibm_wrong_subject_subject, [555] -+ ) - ibm_wrong_subject_crt = createCert( - pkey=ibm_wrong_subject_pkey, - subject=ibm_wrong_subject_subject, -@@ -469,7 +466,6 @@ if __name__ == "__main__": - t=CertType.SIGNING_CERT, - ) - -- - def host_subj(): - return x509.Name( - [ -@@ -487,14 +483,13 @@ if __name__ == "__main__": - ] - ) - -- - # create host certificate - host_pkey = getPrivKey("host.key", createEcKeyPair) - host_subject = host_subj() - host_crt = createCert( - pkey=host_pkey, - subject=host_subject, -- issuer_crt=ibm_crt, -+ issuer_crt=ibm_pougkeepsie_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "ibm.crl", - t=CertType.HOST_CERT, -@@ -502,7 +497,7 @@ if __name__ == "__main__": - host_crt_expired = createCert( - pkey=host_pkey, - subject=host_subject, -- issuer_crt=ibm_crt, -+ issuer_crt=ibm_pougkeepsie_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "ibm.crl", - t=CertType.HOST_CERT, -@@ -512,7 +507,7 @@ if __name__ == "__main__": - host_uri_na_crt = createCert( - pkey=host_pkey, - subject=host_subject, -- issuer_crt=ibm_crt, -+ issuer_crt=ibm_pougkeepsie_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "notavailable", - t=CertType.HOST_CERT, -@@ -523,7 +518,7 @@ if __name__ == "__main__": - host_crt = createCert( - pkey=host_pkey, - subject=host_subject, -- issuer_crt=ibm_crt, -+ issuer_crt=ibm_pougkeepsie_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "ibm.crl", - t=CertType.HOST_CERT, -@@ -534,35 +529,48 @@ if __name__ == "__main__": - host_rev_crt = createCert( - pkey=host_rev_pkey, - subject=host_rev_subject, -- issuer_crt=ibm_crt, -+ issuer_crt=ibm_pougkeepsie_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "ibm.crl", - t=CertType.HOST_CERT, - ) - - # some IBM revocation lists -- ibm_crl = createCRL(ibm_pkey, ibm_subject, [555, host_rev_crt.serial_number]) -+ ibm_poughkeepsie_crl = createCRL( -+ ibm_pkey, ibm_subject_poughkeepsie, [555, host_rev_crt.serial_number] -+ ) -+ ibm_armonk_crl = createCRL( -+ ibm_pkey, ibm_subject_armonk, [555, host_rev_crt.serial_number] -+ ) -+ - ibm_outdated_early_crl = createCRL( - ibm_pkey, -- ibm_subject, -+ ibm_subject_poughkeepsie, - [], - last_update=datetime.datetime.today() + 1000 * 365 * ONE_DAY, - next_update=datetime.datetime.today() + 1001 * 365 * ONE_DAY, - ) - ibm_outdated_late_crl = createCRL( - ibm_pkey, -- ibm_subject, -+ ibm_subject_poughkeepsie, - [], - last_update=datetime.datetime.today() - 2 * 365 * ONE_DAY, - next_update=datetime.datetime.today() - 1 * 365 * ONE_DAY, - ) -- ibm_wrong_issuer_crl = createCRL(ibm_pkey, inter_ca_subject, []) -+ ibm_wrong_issuer_priv_key_crl = createCRL( -+ ibm_pkey, inter_ca_subject, [], authid=False -+ ) - ibm_invalid_hash_crl = createCRL( -- inter_ca_pkey, ibm_subject, [555, host_crt.serial_number] -+ inter_ca_pkey, -+ ibm_subject_poughkeepsie, -+ [555, host_crt.serial_number], -+ authid=False, - ) - - # create host certificate issued by a non-valid signing key -- host_invalid_signing_key_pkey = getPrivKey("host_invalid_signing_key.key", createEcKeyPair) -+ host_invalid_signing_key_pkey = getPrivKey( -+ "host_invalid_signing_key.key", createEcKeyPair -+ ) - host_invalid_signing_key_subject = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), -@@ -606,12 +614,36 @@ if __name__ == "__main__": - host2_crt = createCert( - pkey=host2_pkey, - subject=host2_subject, -- issuer_crt=ibm_crt, -+ issuer_crt=ibm_pougkeepsie_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "ibm.crl", - t=CertType.HOST_CERT, - ) - -+ host_armonk_pkey = getPrivKey("host.key", createEcKeyPair) -+ host_armonk_subject = x509.Name( -+ [ -+ x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), -+ x509.NameAttribute( -+ NameOID.ORGANIZATION_NAME, -+ u"International Business Machines Corporation", -+ ), -+ x509.NameAttribute( -+ NameOID.COMMON_NAME, u"International Business Machines Corporation" -+ ), -+ x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), -+ x509.NameAttribute(NameOID.LOCALITY_NAME, u"Armonk"), -+ x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Host Key"), -+ ] -+ ) -+ host_armonk_crt = createCert( -+ pkey=host_armonk_pkey, -+ subject=host_armonk_subject, -+ issuer_crt=ibm_armonk_crt, -+ issuer_pkey=ibm_pkey, -+ crl_uri=MOCKUP_CRL_DIST + "ibm_armonk.crl", -+ t=CertType.HOST_CERT, -+ ) - - fake_host_pkey = getPrivKey("fake_host.key", createEcKeyPair) - fake_host_subject = x509.Name( -@@ -637,7 +669,7 @@ if __name__ == "__main__": - crl_uri=MOCKUP_CRL_DIST + "fake_ibm.crt", - t=CertType.HOST_CERT, - ) -- #TODO DER chain -+ # TODO DER chain - - # store CA - with open("root_ca.crt", "wb") as f: -@@ -673,22 +705,26 @@ if __name__ == "__main__": - - # store IBM - with open("ibm.crt", "wb") as f: -- f.write(ibm_crt.public_bytes(serialization.Encoding.PEM)) -+ f.write(ibm_pougkeepsie_crt.public_bytes(serialization.Encoding.PEM)) -+ with open("ibm_armonk.crt", "wb") as f: -+ f.write(ibm_armonk_crt.public_bytes(serialization.Encoding.PEM)) - with open("ibm_rev.crt", "wb") as f: - f.write(ibm_rev_crt.public_bytes(serialization.Encoding.PEM)) - with open("ibm_expired.crt", "wb") as f: - f.write(ibm_expired_crt.public_bytes(serialization.Encoding.PEM)) - with open("ibm.crl", "wb") as f: -- f.write(ibm_crl.public_bytes(serialization.Encoding.PEM)) -+ f.write(ibm_poughkeepsie_crl.public_bytes(serialization.Encoding.PEM)) -+ with open("ibm_armonk.crl", "wb") as f: -+ f.write(ibm_armonk_crl.public_bytes(serialization.Encoding.PEM)) - with open("ibm.chained.crt", "wb") as f: -- f.write(ibm_crl.public_bytes(serialization.Encoding.PEM)) -- f.write(ibm_crt.public_bytes(serialization.Encoding.PEM)) -+ f.write(ibm_poughkeepsie_crl.public_bytes(serialization.Encoding.PEM)) -+ f.write(ibm_pougkeepsie_crt.public_bytes(serialization.Encoding.PEM)) - with open("ibm_outdated_early.crl", "wb") as f: - f.write(ibm_outdated_early_crl.public_bytes(serialization.Encoding.PEM)) - with open("ibm_outdated_late.crl", "wb") as f: - f.write(ibm_outdated_late_crl.public_bytes(serialization.Encoding.PEM)) - with open("ibm_wrong_issuer.crl", "wb") as f: -- f.write(ibm_wrong_issuer_crl.public_bytes(serialization.Encoding.PEM)) -+ f.write(ibm_wrong_issuer_priv_key_crl.public_bytes(serialization.Encoding.PEM)) - with open("ibm_invalid_hash.crl", "wb") as f: - f.write(ibm_invalid_hash_crl.public_bytes(serialization.Encoding.PEM)) - with open("ibm_wrong_subject.crt", "wb") as f: -@@ -719,6 +755,10 @@ if __name__ == "__main__": - with open("host2.crt", "wb") as f: - f.write(host2_crt.public_bytes(serialization.Encoding.PEM)) - -+ # store host_armonk -+ with open("host_armonk.crt", "wb") as f: -+ f.write(host_armonk_crt.public_bytes(serialization.Encoding.PEM)) -+ - # store fake host - with open("fake_host.crt", "wb") as f: - f.write(fake_host_crt.public_bytes(serialization.Encoding.PEM)) -@@ -728,6 +768,6 @@ if __name__ == "__main__": - - # store a DER cert and crl - with open("der.crt", "wb") as f: -- f.write(ibm_crt.public_bytes(serialization.Encoding.DER)) -+ f.write(ibm_pougkeepsie_crt.public_bytes(serialization.Encoding.DER)) - with open("der.crl", "wb") as f: -- f.write(ibm_crl.public_bytes(serialization.Encoding.DER)) -+ f.write(ibm_poughkeepsie_crl.public_bytes(serialization.Encoding.DER)) -diff --git a/rust/pv/tests/assets/cert/host.crt b/rust/pv/tests/assets/cert/host.crt -index 3334a81..1c10f4e 100644 ---- a/rust/pv/tests/assets/cert/host.crt -+++ b/rust/pv/tests/assets/cert/host.crt -@@ -1,30 +1,29 @@ - -----BEGIN CERTIFICATE----- --MIIFFDCCAvygAwIBAgIUO+SNGBgRvkmR/lKiZhiN4l4zS70wDQYJKoZIhvcNAQEN -+MIIE+DCCAuCgAwIBAgIUBSLhuGTvxPbggG70ISL2R6DDGZcwDQYJKoZIhvcNAQEN - BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu - ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs - IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y - azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl --eSBTaWduaW5nIFNlcnZpY2UwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0 --NDhaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp -+eSBTaWduaW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUy -+MzlaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp - bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h - bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv - cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw --EAYHKoZIzj0CAQYFK4EEACMDgYYABAF2iBzYS4tt5xI8fpAO33jn97aEmZFGsSY7 --qLhshEfwirbUacKxKO2eHDUBWAWs09MCM9ORIvfi+KocKxR7eIO0BgDDRVbCXPkv --Kc5mvUQRTWt7PHjhOj+QvbkAQPaHblJi93imGpN9GzSALrJX404Gct1fKjR63aoo --IDUJKnDDXC55c6OBhzCBhDAyBgNVHR8EKzApMCegJaAjhiFodHRwOi8vMTI3LjAu --MC4xOjEyMzQvY3JsL2libS5jcmwwDgYDVR0PAQH/BAQDAgMIMB8GA1UdIwQYMBaA --FN6M1/Do0NgBB3F9b9loWSA+sGd8MB0GA1UdDgQWBBR6ESQ+J+iXQe0a/KZ2+N+Z --8a9SZjANBgkqhkiG9w0BAQ0FAAOCAgEAVWJHoCxxkqh7b+y93PY4KPy4jJC4gN1S --dAPqttD/yteJ/4mbVel1/KNSoQBk5EpJmRqeBwHCgGJaT/TxYXNw/b8mRPxe/xbb --wieZMqlSmH028UjYDku1eM0IgHISgoCesIR95D5iAOWbMMVUwIHIHTfmhK7DmZVe --SPf7RIkctrpYxZh0Gw8KLZO6Mfy/9tq3dps0A7KS6jjdrF+M9LavPGwFvtfvRMTi --rdteByO2saGAKDvrjtievwlWCNBJlKV1arW9krN7eqJY5YO6eRbX6UjuhbPRgjte --eZ4jL121TBJaKZU7Q/lvYHIWfzstwQdiem2Ua1GyiiEvPZrQlmqQ3gDBtwJQGB4Q --2myP7MY7THiKObjaB8qRsVxKM78ktwAtAYSZv7gZlmSJ/uTMzDV5D2TQxqs7zwCj --sV+psUn4nvh58xP+DW+MYbF/Cpmzvul9FjMKBs270vE1q+gMot27rbQHRRJ4lVN5 --khiG6Oi6blVkPKExIVIiaZ9diXK6NhtWp15PWljNiDxZO+zpkeuw7cKLn/idzmvP --Gcj6m7DqcdsSIHNKbR5iM2VuDhg/j8uBD3uF2Wlymp31TBQgdYWSihJpwZKHNqJD --uq9SmegwI5gYg64KwABZM9hGbl/krXt/0CeCR5HRc+fthanKx/tO2tbCuVT3FR5J --XpKNUy1D78o= -+EAYHKoZIzj0CAQYFK4EEACMDgYYABAHGO0MnpQa6Q2IxgqV7AGwd3OwBnYOJjYJF -+hzrwY+wQacmJjWeNyHahBCxu4bM8vDr70SF5vZFrWpcWpc9JTY5AagFCFDqIfSvL -+J6lKJuCog5RfMsWJpG2j/MnK7MxG+Ph0R+ItmLFbWFxCV5YOT43olhwYZr/pd9qH -+PAD96UEDM8JanKNsMGowGAYDVR0fBBEwDzANoAugCYYHaWJtLmNybDAOBgNVHQ8B -+Af8EBAMCAwgwHwYDVR0jBBgwFoAUw4weXbTWAZisD86gZSugZ6V1FNkwHQYDVR0O -+BBYEFL4m5UxVbUdEl9yg4sjiWKO/EAxyMA0GCSqGSIb3DQEBDQUAA4ICAQBwaPG5 -+Mg3iKtxR6ncteH+YmtMHW4/wB/341pTKFsKBYsMNWuCC5AKbNrshCNbFbctqhLrB -+LmEpmza3/Pk6izO4AozHNl0tRec/HnQr2gonfI48HBDRiV2f40x0gJG9gGCiJy7o -+6iKZDYUnjfnhXVC67RwLMEiIKbeOAWQ9hHqegUjYdaaIlhyiHLMuWMceidvG76nN -+2eyJUNEouT4+UvquD2oqSitB3ZLhWRqPOQn57ME1b0QYF240PN8r21YtzPmSI+s/ -+ej04EcQZrlJId6GtU7YwD1767hVw84v/QjPbMqnYQbxX8n3IvOf541rQ0UdjBFc9 -+UhbnSn32IGFrRlL1y3MPBF6hLPcpW4P0QrUijc6gZ+x6SNFho8n+dk53F7RvMi1l -+SLgJl7x8pUeqBn5QKMcYYsZG39oZmQj4xHjAABx2hRWayDscvROiQpvLHRtLVmk7 -++hq4Q/jalc2cNHZSwLX6Tv5P+8waTnXg8YNEHeAAgcw1lD+uw5HgusjGD4USE7Hq -+Q6EDGzC+Ny3u2+35XWbNaKWVthtKAIcZ9B4LjdJXeQFGcOMr6yV5rKfOFQwXo8bS -+rNy57tiva8KM2weSfRil4f146Rsb3TJzUdlkaN+NVIY0YImiC+rR0qa6Iv6JCio1 -+F2lu8m/aRHQQF5J5fD7ge6v7F2D6K3qT9tTlrw== - -----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/host.key b/rust/pv/tests/assets/cert/host.key -new file mode 100644 -index 0000000..f7f9888 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/host.key -@@ -0,0 +1,8 @@ -+-----BEGIN PRIVATE KEY----- -+MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIA8rPNC6rxZR+GxSxb -+qWRreFGnWRQGd22nHWKAvQmrA5GlXTtWQMoL8il9Jb1OnrQPPo620nQpzA1GXo4U -+BUqgYoShgYkDgYYABAHGO0MnpQa6Q2IxgqV7AGwd3OwBnYOJjYJFhzrwY+wQacmJ -+jWeNyHahBCxu4bM8vDr70SF5vZFrWpcWpc9JTY5AagFCFDqIfSvLJ6lKJuCog5Rf -+MsWJpG2j/MnK7MxG+Ph0R+ItmLFbWFxCV5YOT43olhwYZr/pd9qHPAD96UEDM8Ja -+nA== -+-----END PRIVATE KEY----- -diff --git a/rust/pv/tests/assets/cert/host2.crt b/rust/pv/tests/assets/cert/host2.crt -new file mode 100644 -index 0000000..ce418d7 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/host2.crt -@@ -0,0 +1,29 @@ -+-----BEGIN CERTIFICATE----- -+MIIE+DCCAuCgAwIBAgIUC3KzCH9KUb8ZaY6J/97gqebLoIYwDQYJKoZIhvcNAQEN -+BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -+ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -+IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -+azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl -+eSBTaWduaW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUy -+MzlaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp -+bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h -+bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv -+cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw -+EAYHKoZIzj0CAQYFK4EEACMDgYYABAE3KDrdCdVeBV34NicA8AoP7hItcwxgXHOQ -+F+V02N6b5AR7w3YDHE/JSzSoZZIYiqdy8SmeD5GtwHLV8tLQ8xnadgHT0BesGS02 -+w0H5foGzzZOHZW3AfbdH4MpssR4Bf1jqL0jw6eV+oiMStDuZ44zri7PFjvVByt6M -+oeSvh5qAXCQG/6NsMGowGAYDVR0fBBEwDzANoAugCYYHaWJtLmNybDAOBgNVHQ8B -+Af8EBAMCAwgwHwYDVR0jBBgwFoAUw4weXbTWAZisD86gZSugZ6V1FNkwHQYDVR0O -+BBYEFL8FyjCu1iRcjwx4pzfo+VIQ5NdbMA0GCSqGSIb3DQEBDQUAA4ICAQASxSXu -+RxNw/kSwqedNq9jOTHb5FATNykBIVSuXS9BB2qkjcTVDXNnlsSBrPL/CumRp/TFD -+5VsB2rLhESmUrpghQrODeFvyFE52yVhxvcNCyjz7yIQZvc4qofMQMsg3o0rSqp2s -+lu1PUbcrL2aCG1yxB3isObVqiWaiRdnPxL8aX3Qt6BszlwWUgaFoaH0uZxlgVGKV -+C+dXrn5WkNRVd2ouHSLQE6fIUYIf/TrV+AKu804IEoFRIvMUCqQRUHsj5toKhfDb -+6tl/Xd+EiPCYbnhR2J01I08yxMExvYXfXapY7JJjlWTHKFKaxLoqv++NZRM1bW6s -+uyLWP735Qb+0AmhZ6TfeJM7H77LpQK0WCylaNVJWWjXt9UsnNdirCbp/jpKF8bnG -+2PkjBHKruvCakqw1bDq8eDv9In1Ki+Um4gp7OfjYvcN8zxvGQofgj++UaCy982iX -+WSq14iUyrRDVu8zWghL/F1lUx7ab8UV+OrmZuCALVHZ76YVdwmJXGYll1OBbJbgL -+5xze6p77vKzbgNyABWmR6TlHq/nFDhj9kKirpQaI7WHyOtsGpc7sqd0tT+CeOhNf -+l3xXyFPb6N58aSC2cY0W0Nq6X/mWIgMqHY9lzYLmoLBFFZlIjqWzwyVcajmjrtaK -+rlfs0e9f9DvVV8bMTXFMUBlWrmYDrROKpYLqhg== -+-----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/host2.key b/rust/pv/tests/assets/cert/host2.key -new file mode 100644 -index 0000000..ba27ed1 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/host2.key -@@ -0,0 +1,8 @@ -+-----BEGIN PRIVATE KEY----- -+MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIAQNfN5BLLLAdhlwGk -+Ve8w2t+9x4LWR9st6CLCYHnTgc2gr4+HqgcMwmNuj3cA8ENBvIShUnHkX2E+9CyP -+3W1ZN8OhgYkDgYYABAE3KDrdCdVeBV34NicA8AoP7hItcwxgXHOQF+V02N6b5AR7 -+w3YDHE/JSzSoZZIYiqdy8SmeD5GtwHLV8tLQ8xnadgHT0BesGS02w0H5foGzzZOH -+ZW3AfbdH4MpssR4Bf1jqL0jw6eV+oiMStDuZ44zri7PFjvVByt6MoeSvh5qAXCQG -+/w== -+-----END PRIVATE KEY----- -diff --git a/rust/pv/tests/assets/cert/host_armonk.crt b/rust/pv/tests/assets/cert/host_armonk.crt -new file mode 100644 -index 0000000..d4f547f ---- /dev/null -+++ b/rust/pv/tests/assets/cert/host_armonk.crt -@@ -0,0 +1,29 @@ -+-----BEGIN CERTIFICATE----- -+MIIE+TCCAuGgAwIBAgIUWlKUd1HKU8R7cjv5NJdWhHzhT5cwDQYJKoZIhvcNAQEN -+BQAwgcYxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -+ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -+IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -+azEPMA0GA1UEBwwGQXJtb25rMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWdu -+aW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUyMzlaMIG2 -+MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBN -+YWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNp -+bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxDzAN -+BgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZswEAYHKoZI -+zj0CAQYFK4EEACMDgYYABAHGO0MnpQa6Q2IxgqV7AGwd3OwBnYOJjYJFhzrwY+wQ -+acmJjWeNyHahBCxu4bM8vDr70SF5vZFrWpcWpc9JTY5AagFCFDqIfSvLJ6lKJuCo -+g5RfMsWJpG2j/MnK7MxG+Ph0R+ItmLFbWFxCV5YOT43olhwYZr/pd9qHPAD96UED -+M8JanKNzMHEwHwYDVR0fBBgwFjAUoBKgEIYOaWJtX2FybW9uay5jcmwwDgYDVR0P -+AQH/BAQDAgMIMB8GA1UdIwQYMBaAFMOMHl201gGYrA/OoGUroGeldRTZMB0GA1Ud -+DgQWBBS+JuVMVW1HRJfcoOLI4lijvxAMcjANBgkqhkiG9w0BAQ0FAAOCAgEACPbD -+hFuibNamlcpnVG7mgnO56RB/K5otOCGS0hbR3OFUGY1ZDQ++M44OF/b+eNYHgjsr -+ER4r2cne3qBjOArFj/toEDGM2l/DFWDnpOvh1ZItJjpQe07OGn+KpTLf/ZB0Q5D7 -+jV/ddjJ0GGyxessjfUvmCT4BVWn1bXKJFSgujic7lgMf8WBGwW+WW3eZecYsh/Cn -+rHWfQYqtvzB1uM89bwZwN+lvHz/QHTreAShDAiCY9M31cemvUvC58z8jPKngFhuf -+C7ZMUXbzu2jYWo3EowzvcRpO1KqlLfNHjGLbaBaZWP2ocK2IUTrsAbr7PTdcAqjX -+TRIds4JlbidA8OziEXFTcM2xShS6WISsO+9JXk5Xxc3+xscjSamHTMwRm2LaKjrr -+d8mOxMs4/5547CBsBFMqu98tiD18K0yqdJiJJRDDOvqVeCvDE6y/pzKaSHIIyA0P -+ATnjQzvgqbcwLrdoJ4WNdTpZieoisUR0oMAKUViPwd0xStzNX6K89PSd5c0Bqbhj -+TwCpaT+DfcvlWk4wqEa9rLn+Su1SUfMSD2dITynLW0UexiKPHZ5epOPu5ho6cOk/ -+fe/N1cX7SyOjhErWLwqr974YM0EPiR6f5lNooYXMPbatq8lcG0gpEg5pggHhsLiY -+LGfvu2jQJKGnrBYM3IcRGUsc+VZ6CNTJjaIIbGg= -+-----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/host_crt_expired.crt b/rust/pv/tests/assets/cert/host_crt_expired.crt -index e439ec7..8742fa5 100644 ---- a/rust/pv/tests/assets/cert/host_crt_expired.crt -+++ b/rust/pv/tests/assets/cert/host_crt_expired.crt -@@ -1,30 +1,29 @@ - -----BEGIN CERTIFICATE----- --MIIFEjCCAvqgAwIBAgIUMXh4o6xcPRTKpYDr+YgZnmWeatMwDQYJKoZIhvcNAQEN -+MIIE9jCCAt6gAwIBAgIUSpwJMAovpO3Z9SuY7Zw+/pv3bLEwDQYJKoZIhvcNAQEN - BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu - ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs - IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y - azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl --eSBTaWduaW5nIFNlcnZpY2UwHhcNMjEwMzI5MTEwNDQ4WhcNMjIwMzI5MTEwNDQ4 -+eSBTaWduaW5nIFNlcnZpY2UwHhcNMjIwMzIyMTU1MjM5WhcNMjMwMzIyMTU1MjM5 - WjCBtjELMAkGA1UEBhMCVVMxNDAyBgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5l - c3MgTWFjaGluZXMgQ29ycG9yYXRpb24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwg - QnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3Jr - MQ8wDQYDVQQHDAZBcm1vbmsxFzAVBgNVBAsMDklCTSBaIEhvc3QgS2V5MIGbMBAG --ByqGSM49AgEGBSuBBAAjA4GGAAQBdogc2EuLbecSPH6QDt945/e2hJmRRrEmO6i4 --bIRH8Iq21GnCsSjtnhw1AVgFrNPTAjPTkSL34viqHCsUe3iDtAYAw0VWwlz5LynO --Zr1EEU1rezx44To/kL25AED2h25SYvd4phqTfRs0gC6yV+NOBnLdXyo0et2qKCA1 --CSpww1wueXOjgYcwgYQwMgYDVR0fBCswKTAnoCWgI4YhaHR0cDovLzEyNy4wLjAu --MToxMjM0L2NybC9pYm0uY3JsMA4GA1UdDwEB/wQEAwIDCDAfBgNVHSMEGDAWgBTe --jNfw6NDYAQdxfW/ZaFkgPrBnfDAdBgNVHQ4EFgQUehEkPifol0HtGvymdvjfmfGv --UmYwDQYJKoZIhvcNAQENBQADggIBAEsSd5vd7Vk1y1YsE4eWkrBrMElYa3/O6G2Q --oMZFo2mzzDH50NBEwYG4K+SjEmqJbAErtNHsAcJLWlvORiNoBmPcB6FEMifgCvuZ --zbSEiL/tt8XLI1M04DdKjVZ6AIrdhMKPz/AaRycnlHjbq0R0fEJP/SnWxtGnHewB --QGM8TDGCzXrwXsOr50soxQ+cbXFJ6eQyGrtNP0eyJ7kkIrz6+SJ0dQPXxoZpdtfY --XEv1OagX0tAuDUG26do6MjwC1qiDKoLdkxSFRkCvyRHqFapKlLhzBMrLhQ+Hl6E/ --kD5ORD2nMvTHcHWbjb7Mr6tcxKG+7CcJO0hYJbdfNCcKYc3EmE49wazSTBKvWfJp --XObVEGeM/11cdcg6Li1jw/JrrexEeQpjgoNuAgGKRmxzJBOCPNkU8jGs5QEqFCyw --fpl7BA+ydW2/zAvcr7mZZgyK4KiRTdK5VTGfXuwTv+Q3hsE0CZ6L+byNCZajyGzs --xq9ydh2G4kIlWFzs+2gSxQWYRiOGt6W7FVdiPYOnAVgzmRJdfR1qVrWZTGbPxJbX --1O3qYBPQE2tU8xsyl/HuikGProda2xwfTjmRhr7DPYyF75nGPvtGm6vwBBcWm+xl --jI0a/dHqE5MR6acOrXYNSFflPcfd04vJ87Ajx/wFr0Glo/8LWtzMp0nFpvif9LDX --3sGEtRA1 -+ByqGSM49AgEGBSuBBAAjA4GGAAQBxjtDJ6UGukNiMYKlewBsHdzsAZ2DiY2CRYc6 -+8GPsEGnJiY1njch2oQQsbuGzPLw6+9Eheb2Ra1qXFqXPSU2OQGoBQhQ6iH0ryyep -+SibgqIOUXzLFiaRto/zJyuzMRvj4dEfiLZixW1hcQleWDk+N6JYcGGa/6XfahzwA -+/elBAzPCWpyjbDBqMBgGA1UdHwQRMA8wDaALoAmGB2libS5jcmwwDgYDVR0PAQH/ -+BAQDAgMIMB8GA1UdIwQYMBaAFMOMHl201gGYrA/OoGUroGeldRTZMB0GA1UdDgQW -+BBS+JuVMVW1HRJfcoOLI4lijvxAMcjANBgkqhkiG9w0BAQ0FAAOCAgEAd9NdTLDK -+fWPdOg78c+CaG2XVnIzBy6niur4vM1DPJYbwabTYbpJM2jtevMo3h96jCd6AfHzv -+cFZ1BzpYsWtjbDFhiqowmKnlFlOIoIDXCaG7vBvDWc4iWGG9PHMWEouSQWxg77bZ -+A8l7+K0VdRWYCqwxxClK8oK3PqGyGqELRDWwlBb0kRi9XRmB+2BjOdZOQvmGka/R -+l8hZWHTs+SPHZ+ySiJn55/w7gN/LCJs1XX70jPrzu01lDPTqVIrGKBtWBMKFKR7g -+A4r3EThfj85Xq1T45rvD0ozlkyVmdMtSk3z+fKCt5+qEHAu52kq2Ps7xQviAYkIK -+f+yNf1a/Ly79QoqEibRXuKMk/oWoqv7f5vT5HqoWEWZFIYbdbYTBIwkw3mHwZRZx -+CcyiukHw2W/ZwM6p60gXWZwrQxwVkdhxWnAHrAToL6O8M9Fk4aVWUkFNjqVUjTWS -+HLCxv8mCPujXIneqXyiJOpAPKu2kjmoFwfisjevNUmO5EqUHUba98Ne7rdiWweP3 -+nwcBBKIQ2MVZ/lPqfPtqFOHqBzCbqozVw1HzE2/ImnYsK8SGqwBnSJyTt+WjM/8S -+4ulkjVDIwWFwUkOWjM2yorp3u2tFJF2sMv+3AR3mp6aDjF7sCzYpLaZag6oDmwOF -+nR+VwEbTC8gnl21swWSjtUC8I0EidfxzCpc= - -----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/host_invalid_signing_key.crt b/rust/pv/tests/assets/cert/host_invalid_signing_key.crt -index d31905c..945164e 100644 ---- a/rust/pv/tests/assets/cert/host_invalid_signing_key.crt -+++ b/rust/pv/tests/assets/cert/host_invalid_signing_key.crt -@@ -1,30 +1,29 @@ - -----BEGIN CERTIFICATE----- --MIIFHzCCAwegAwIBAgIUBphLhhHJL2K96bK/cfgBk0d+7gkwDQYJKoZIhvcNAQEN -+MIIFAzCCAuugAwIBAgIUELm+ubD3lNPrFxtCj86wMh7lYEcwDQYJKoZIhvcNAQEN - BQAwgckxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu - ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs - IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y - azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMSQwIgYDVQQLDBtLZXkgU2lnbmluZyBT --ZXJ2aWNlIEludmFsaWQwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0NDha -+ZXJ2aWNlIEludmFsaWQwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUyMzla - MIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVz - cyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBC - dXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsx - DzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZswEAYH --KoZIzj0CAQYFK4EEACMDgYYABAF709fZwb3pGZWfHLcCG2fLNcvNh6IqPHwfjRYp --brM5BEE8XoXxcCAXwxo4EAGOcuZBRKP6Ofahek7ppizV8bkPMQDCIgNq2N8mwPFO --99CbyrZs4ZMeAWWWPJiHXUHbWQq4ko8w4UUT7nUIgdHQwWM6TdZml8ke+LE9Jxtx --h9KkfCPi/aOBlTCBkjBABgNVHR8EOTA3MDWgM6Axhi9odHRwOi8vMTI3LjAuMC4x --OjEyMzQvY3JsL2libV93cm9uZ19zdWJqZWN0LmNybDAOBgNVHQ8BAf8EBAMCAwgw --HwYDVR0jBBgwFoAU+jIyiVonTYe6GuSPiAkxA65qou0wHQYDVR0OBBYEFJdEECIf --7UdEf08saThHZDzSfxjhMA0GCSqGSIb3DQEBDQUAA4ICAQBgvREPqqKvAZM3q9pG --5S6wtUspz1Y1sBD4duPEnMZ7Vf9a1HRPrR4vc5ncFIcyS/U/UusvWgFMYoa6WIZR --l4OqRplKF1pwCaQ2F/8OdGMV37iUqZuN6V/GggbFXgMFK1dH29T6h4VtoKC9yScQ --ToHQLuz4ymkd2BwxYix19M6QwdrqomjJb2/zrc7pvMZ0k8KKYi/wt6tlz7FDvsxF --VSDf29gm98kfDJfzPfAC5D93YruAohsP8SakVdA2/YbTkDfImT8ggSnsE83upSD6 --ssjKPPNRunLeCKLb55/Ikcok1iyGhfdmkJvdIHSEvyNp0p7mrohz6l748xdKkKNt --9hOzsfNjThq3zp97ND7M+knqNuzsZIkcV/OUdxNBootIrJXvfeqpaw++5SfWvf+6 --1dHJQpDU3cXKAQ0/RvvqLC+aPvklk2efuvBKIKP9X4WqcP+l2P19GMaM1SZtr5S5 --OWMxqT6sW5lSX5Smm4rMB3UmLDS0SXxHIIvFEQSABiWb6Y0ibDj8a+YrWVyQNH1K --fCSNvsW1r03D06q6gp9fxL1hFBLUw9ooi7ewPfNmm3doe2R0TQpE0pWkiRhhjDjJ --RcIPp+gfYDtt0LcoYVpNdKtLoRPVtO2K3zeU0ezW4PL9Q0tjxh2K/+1+8UEv7nJD --sFg4nQMeygdSK4w0ZlT8xqXVJw== -+KoZIzj0CAQYFK4EEACMDgYYABAB/2mDXj/QP7fcpxLTNfFfuh/X1Y/RS8XK+Y4v+ -+0nisTQhe3MK8BZ7D0bcB872s8EJCYZ1OYr5O2epQgv2hbk4T1wCGKe+Pjf2QHTts -+wXMpPf87R3etF+zWR+CYH+5N5uQhjW9Ueeq7wve3A61e+Atz7IzTKVlM29kT0XvI -+sYtazn6usqN6MHgwJgYDVR0fBB8wHTAboBmgF4YVaWJtX3dyb25nX3N1YmplY3Qu -+Y3JsMA4GA1UdDwEB/wQEAwIDCDAfBgNVHSMEGDAWgBTXVfLQH2D0KccIHhK6M5MR -+TITvtDAdBgNVHQ4EFgQUWdGEEpvjFJ4SNUillC09iBfcTEUwDQYJKoZIhvcNAQEN -+BQADggIBACfZ7+KtT0Hl4Y/JBwCtUqCRRt4kCM5rXqORswkJ6RNmx1OtBHPlkUEI -+E5HrE0dZlaECfqMkOBxhbBqOSpoL/xjz1gDHILWXEbWchuFnU4EAgYvJS7nLAHHO -+swJCEP8VDM3y5lUWni8sXs6ro/H5tAkm260+xbn0BFF99l+M124ZUawB+tDg7E9b -+MNG0eWtfeiZzWBBhoxhQoD03gFADWxTU1EK4JgCl7frsj4PjK7TMKTV/Gd/Bd5l0 -+KreFW5orupd3YBqDeUYI6p1l9+uThottwrA4z/POygxvhqgoqNA/+evMWX3wxNza -+ComAmWffB5EJQFEKcpdASwEhyjBMap15GdJVBFGYzXAlksQz7k1fr4h0ebuYtcpb -+dvcDyeIxjsg5XiP32494wogLUjHioi5mwIrGn8/9V6JLRGfKBHnmejHBR/cbMpuv -+rpl0ZZks3bicmBVTKFyj1ORRGX1cXsbtCbPBJEmBeR5IOFYBeaQHrqTcaEBz4lWe -+K2OT6ebcqtsCzDSmjsoeAX/CWRVaIzLzMc9SqE8lZ68ajXjbt/59I7EBeOenttO5 -+mg9mokwDqoeM0bXyT3qdT4s9fhy0BPgnBqJHKIc9afzV79Iz1uJ/E+TZJ+tzhbAU -+n4fFxIHOdbyaOLCLT0+O74FZCQq8FUUnL9QVY5dXTH2458IUiD20 - -----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/host_invalid_signing_key.key b/rust/pv/tests/assets/cert/host_invalid_signing_key.key -new file mode 100644 -index 0000000..41ef9e4 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/host_invalid_signing_key.key -@@ -0,0 +1,8 @@ -+-----BEGIN PRIVATE KEY----- -+MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIB48Wl8pvlD/o6F7YG -+sFfCXNQRbKbppIrywEBkfNG0VzfXazBOLD41bcdPEBh1111kSYgn46uUVxjabyLp -+BgQthf2hgYkDgYYABAB/2mDXj/QP7fcpxLTNfFfuh/X1Y/RS8XK+Y4v+0nisTQhe -+3MK8BZ7D0bcB872s8EJCYZ1OYr5O2epQgv2hbk4T1wCGKe+Pjf2QHTtswXMpPf87 -+R3etF+zWR+CYH+5N5uQhjW9Ueeq7wve3A61e+Atz7IzTKVlM29kT0XvIsYtazn6u -+sg== -+-----END PRIVATE KEY----- -diff --git a/rust/pv/tests/assets/cert/host_rev.crt b/rust/pv/tests/assets/cert/host_rev.crt -index b6c584b..6516e93 100644 ---- a/rust/pv/tests/assets/cert/host_rev.crt -+++ b/rust/pv/tests/assets/cert/host_rev.crt -@@ -1,30 +1,29 @@ - -----BEGIN CERTIFICATE----- --MIIFFDCCAvygAwIBAgIUGuqbEx5X1pFQfF4T6kIfdcrUwbMwDQYJKoZIhvcNAQEN -+MIIE+DCCAuCgAwIBAgIUMi8utJhRyr5bbvABnCn/Q9/vJ0kwDQYJKoZIhvcNAQEN - BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu - ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs - IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y - azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl --eSBTaWduaW5nIFNlcnZpY2UwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0 --NDhaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp -+eSBTaWduaW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUy -+MzlaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp - bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h - bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv - cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw --EAYHKoZIzj0CAQYFK4EEACMDgYYABABphgYAOqZ4uJPUtVIqaU7UsJgz9+xMmGDq --V7nFimGmkbmqPLT96jIyN4CWdLzfbxP0xvIklkEoOm1xV07YR2LHHQATNEPISTDH --7fySZ47QVc5tyECpZVW7JvSvWKA/KiApAbB1ixErfDrqW1nG5IUtEbDYJgtGwPI/ --+I7e9cU2wkA5mKOBhzCBhDAyBgNVHR8EKzApMCegJaAjhiFodHRwOi8vMTI3LjAu --MC4xOjEyMzQvY3JsL2libS5jcmwwDgYDVR0PAQH/BAQDAgMIMB8GA1UdIwQYMBaA --FN6M1/Do0NgBB3F9b9loWSA+sGd8MB0GA1UdDgQWBBTECeFXnyfTnFazbR0K9cYk --lS3O0jANBgkqhkiG9w0BAQ0FAAOCAgEAOjEE4/KdvcJZbloSGLue27FSrhExvUJ3 --tYS3rs2xg3Ua2daCioI00VrwIN2Fjisqvi10Nv6+NWz5w1220AJyjlmPxvWFcPco --sXLAOWDhi217JaoJ+RzavpOwzhTffEpvPwR6RU4A36vvonc4jm3mWs6F1i5T6YPi --ZaYuk3CRme6WX012rBhIc+heTGh5ZDwwPmGDMLXdpsu+2+3sCPxUW6eQcOWoXkhJ --jn+n6mU18JdN5+wU6Lig5uxXnoP1VN8Xog/mmKV4ThVAS8k9iS5wFK4jl27n6XZy --wfd65WWlm4MMEvKNruj025aSPJp/bcnchBNfnXXPuI5GnYS2cC3TXHd4XT3r3pYn --qgLoNi+AkxnTnxEv5lg+oE+yTMNxDh4iiYkX96ljamGbUPBvbD7bi3Oc/7EOVra1 --BmCGmcjToEnm0e0it9yyuYwKQ6nTz906W3XzaFB+awnXZcGEMgwwdvdal8+eki/r --ofT0nO6cuXbbPYc0rSs/2f3WxjDVmQtiVoJ1dPYIrTX/4lbrsYWVS016Y0UcZ/D/ --/qWWWZIyYpzPasTEgGqtwb5WhBvz3RAFTePefFlTzBHhSPk0Tsu4+W7zHpapfZ0M --0LXT1lGR5WBmur002vbTm4yt7tzdypMbL2i70WGEp4mpRohmBG1m9hYGcelip3iL --Im6vFBNWpVE= -+EAYHKoZIzj0CAQYFK4EEACMDgYYABAHkrwf4hZ7M5ahoYHT0u1Xgl28qxURcdNmW -+kYDA5u1Y2mGXQq85BakAOyfxl/FF/cBrLm0eLVFpePumqgPna3El1gAs9p2SFwN7 -+4MmcLhbwPmGnWmzVrOh9cNS988XYf01E74966r4MKpAGxSNPKDwu4doXFVAYRH/w -+tcgbjlk+Qi3c96NsMGowGAYDVR0fBBEwDzANoAugCYYHaWJtLmNybDAOBgNVHQ8B -+Af8EBAMCAwgwHwYDVR0jBBgwFoAUw4weXbTWAZisD86gZSugZ6V1FNkwHQYDVR0O -+BBYEFG+41urs6GhYzOrJD/MJDWMSrUlvMA0GCSqGSIb3DQEBDQUAA4ICAQAnowYb -+R1qgnpH5FLgkj3LndAAfFmJMlTrNcmtafjaLZHUmxxO1tFjb7vvGgu9LggwwSRoN -+y0Qsu0mywXd/ntEuWDyD735aGtUE9xnmluoj4ILBnln195VuvUoWYjCy0XsGZg6G -+UL8bOaZgbvXQWDcdXfpYnM9C5Uj4mB3LFDdasd7nDv2F9vZlHNkd0uGmZvBco3SX -+GNFETOaY5HjU3jLlCmD74p7G9e//+153R2aXEww3SVVJyRowO7SpwH88DNjl2a6A -+OLWoA3oRrX4rDQI66LYv0RZMDCTvzY+HxOQHXeswosA8D96GhpClb1X0wLZUp6gR -+pLzVzaE8RFnSg52jzjI3ehFIoSK0x9R1KPzKbYDh7Q8Sc5sOSe6r9Q01BdpBOUjp -+iTKhgbUEbcIwcaKLIGeSBe0+Yh1uQpqBtOPectUp9bz+hEUDzG6QMS5VFxAGuZWY -+2iA97jxHsUBEgMBZha7z/hQ68Pst+KaIV/24By8lKm0rWpLSKOXVAW1lWZrlpIcx -+Le8+dmy9/JFJXmmN/pmAEVVsK5dYtVXt6ikVi+LPr/7fCfvt2zk88wiUp8iz5jgQ -+pNYT8MPOMf+yzzoqPXjGmrT5znuHnCg0OSh0u8NILWQPQl8M6KNiI8lzcVBy6Djq -+fmnQlaxP1Ey+wrznWmBu5zesU8uMyRTxIfnEzg== - -----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/host_rev.key b/rust/pv/tests/assets/cert/host_rev.key -new file mode 100644 -index 0000000..94e97f1 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/host_rev.key -@@ -0,0 +1,8 @@ -+-----BEGIN PRIVATE KEY----- -+MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIBLz0ExeBJrxd5gA7C -+YFJLVuNd4CPNc/jsOLIMnIbLHSegdOtQn9tief97k5rZdz/MQzWp7rCmsLUYw/fM -+ktUQjJKhgYkDgYYABAHkrwf4hZ7M5ahoYHT0u1Xgl28qxURcdNmWkYDA5u1Y2mGX -+Qq85BakAOyfxl/FF/cBrLm0eLVFpePumqgPna3El1gAs9p2SFwN74MmcLhbwPmGn -+WmzVrOh9cNS988XYf01E74966r4MKpAGxSNPKDwu4doXFVAYRH/wtcgbjlk+Qi3c -+9w== -+-----END PRIVATE KEY----- -diff --git a/rust/pv/tests/assets/cert/host_uri_na.crt b/rust/pv/tests/assets/cert/host_uri_na.crt -new file mode 100644 -index 0000000..8471663 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/host_uri_na.crt -@@ -0,0 +1,29 @@ -+-----BEGIN CERTIFICATE----- -+MIIE/TCCAuWgAwIBAgIUPRltRSFZSbucmw20u9TN7WHxG8UwDQYJKoZIhvcNAQEN -+BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -+ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -+IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -+azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl -+eSBTaWduaW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUy -+MzlaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp -+bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h -+bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv -+cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw -+EAYHKoZIzj0CAQYFK4EEACMDgYYABAHGO0MnpQa6Q2IxgqV7AGwd3OwBnYOJjYJF -+hzrwY+wQacmJjWeNyHahBCxu4bM8vDr70SF5vZFrWpcWpc9JTY5AagFCFDqIfSvL -+J6lKJuCog5RfMsWJpG2j/MnK7MxG+Ph0R+ItmLFbWFxCV5YOT43olhwYZr/pd9qH -+PAD96UEDM8JanKNxMG8wHQYDVR0fBBYwFDASoBCgDoYMbm90YXZhaWxhYmxlMA4G -+A1UdDwEB/wQEAwIDCDAfBgNVHSMEGDAWgBTDjB5dtNYBmKwPzqBlK6BnpXUU2TAd -+BgNVHQ4EFgQUviblTFVtR0SX3KDiyOJYo78QDHIwDQYJKoZIhvcNAQENBQADggIB -+AGlM/E6RfiQV8si38/IieHw2TsZDNKluXA/edGN4d9i5ztIB+G57IgvAR9GBeRTB -+AxANycBGOWy+VXsUij3UgpoK3NiZJ+XvgFYqx0ZVUX3uPmU6R0Ko5cHGmSEVV6KS -+5ToGLqSZdftybGt9NYKg+LGxzIhT15tgUENkOrFVRSlKVn1zAP+sa2vOEtH5aX6S -+ins3hHQBuhGKSR/z2I5IzEVhnx0FJxpFO32QBdmXUdgjlHcpiIy1BaQRTca/U4iJ -+Q+fnzn5UP5v29ftr/PRh0W9rPVeS8ge+vuPC1eriRL4WWnkMFm0QljERqhg48NWk -+zT8mnVn0E3vQ2Y71FKg85ov0Sza8/OpBgAgnoyCDUIp8YD7BrU1viLEoqLtlvsoH -+l/+mU/CnCe5749C6OBppKhdrIRSG8bh6mIeco1ALDNIaqGoUtdhib0f+YKeCc0zC -+MFjvoSranndvwO+z/qdL7Feay8uAjgq1GPNuj/xaEbzrZz0yF2viMXfK62kgwq+W -+ebg2D1FHVuKXQ2q3IXmwcyLOEUScUcEYDsM5eDenoScgIE2FfHvoGOdJVLsQ9U+8 -+2HTb7ntvbc6ANCTcnt51s5/PfD7UaHeJiSxRpU6rjxnqKZ2mEjbF/Ha8AjImQrrd -+jpjNgH6H5p7ZQqc/4DGa/Q2B/rfvmn5bAgOhicEKMzm1 -+-----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/ibm.chained.crt b/rust/pv/tests/assets/cert/ibm.chained.crt -new file mode 100644 -index 0000000..e96e13f ---- /dev/null -+++ b/rust/pv/tests/assets/cert/ibm.chained.crt -@@ -0,0 +1,59 @@ -+-----BEGIN X509 CRL----- -+MIIDfDCCAWQCAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll -+MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTI0MDMx -+MTE1NTIzOVoYDzIzODgxMjIzMTU1MjM5WjA8MBMCAgIrFw0yNDAzMjAxNTUyMzla -+MCUCFDIvLrSYUcq+W27wAZwp/0Pf7ydJFw0yNDAzMjAxNTUyMzlaoCMwITAfBgNV -+HSMEGDAWgBTDjB5dtNYBmKwPzqBlK6BnpXUU2TANBgkqhkiG9w0BAQ0FAAOCAgEA -+hMmvyXLOtDG7faP93ZqMh061Qms/at5uYz/eAA9CGfS4E+hq2uQaic6gEMWf5NkH -+9xRWB1UM9sLR5Ai/Fn+MqsbeXK+1ommOfYx0KL6J8JihHAlQpGYJXu683dxIB3LL -+ijZ+od5pes5ZIRKtMNaO4+meDG6e5guXDeFNyJaNYqFXOJcy8OIL4O3T840qY1aY -+YcHZs/sNwC/hSAloTeUQ6S+ga/EplzSDumz5VrRsEhVd3wScM1FNeqwMvA4VPUIa -+NOtZJNLJBLhfmetTeO/wb/y3Ekp8sXAf8UJqABczJODSaGc2RwULmxJd2Q5lESpX -+RkfvObqTWgpzpyV/1OhZYDUlXJqiADYnyAsQL8WV2uFQ5fRG7baVDiNYn4uE92Of -+FN7jXmT+7UcHSO5L0VqHS6Ia6j2NammKlbhsAw7dU3uxEgCJvBYlWSY+z9Fs+O1B -+hNU42fL5W7JnoGfqG+LqfDdbG0MG+GMo1rDDcvrEK73F41w7mLmZhfYAZY26xU6Q -+XyLcrG4YfR6FCyOs53zwrKKG9x3ETqY3S0DSSOKTnuvIGZBCSOxGpqdb7pQax6IJ -+PqsM+IzFC1C0Ol0eX1ee734rCHfNP6XEM7ucjBweNUJRrU0Q5CrGGJzEQz0ym/JN -+81f/kQJJosSSc1+j8iDiaKkUgcQ1DWWZLYZgvCkwZcY= -+-----END X509 CRL----- -+-----BEGIN CERTIFICATE----- -+MIIGsTCCBJmgAwIBAgIUMwTHYM3peBjwVRi6iICr6FVn0cMwDQYJKoZIhvcNAQEL -+BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -+ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -+IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -+azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg -+Q0EwIBcNMjQwMzIxMTQ1MjM4WhgPMjM4ODEyMjMxNDUyMzhaMIHMMQswCQYDVQQG -+EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD -+b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo -+aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxFTATBgNVBAcMDFBv -+dWdoa2VlcHNpZTEnMCUGA1UECwweSUJNIFogSG9zdCBLZXkgU2lnbmluZyBTZXJ2 -+aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxNqcFB9GSj+nC1PV -+OTIAvzTuo3X7bTbohEPrIDDsEcGyVzQtfagI9vkxhhGLhXxIXrkwEI4u75DeLBdq -+ZR3KFJ3QD+jHW12EWNpmSYu272CEAJeb/SBJaFbh40WbY1BsTP+3zI5QMwVTmCH5 -+QqODD+GU0fDSo9Gti0yX4rxOY+54jDC77AhSlU55rYruBoIXVe4CcPUpqzZLgqB2 -+cTJ5g7lokGqc6w9pveCznVYGPfcdDt+ePGssOvrfBOJnn6N44tTRAG623BrpctXi -+t3IAhG60y6CqF29BjV0RKOv09nOBiph2a3lAiKYrpAt92BETFB7KSQManUrrvWOD -+mYEcoVkzvrVrB3WnCcHs6lsjQg1FcNrGcDzV9SmX0BIhM+fasxZyHWc08pr3NrRn -+Eks7lRqY+TS8DAeZYJt/2M0Jr32d6Bh+WdyGFFUj6sBPtCaC2VnSkxOgKXeWf/c5 -+EUXIQT2YXEkNK9CP8Kqs8IerEIpfceelQReA5QcvNruaJNktWLKJgYTrdLdfOP0u -+s+9JJHWcFH3tv5906CA+Tlm53Dk6SaRc/DB7lu94yTkcSOpxXZnximZi0GcPZYmZ -+TaswE7d8HCPLhks/RkZSV2764Kl08xle65APiWZ4dM24uSyu75Izb4hrwgIA4qyM -+dDVQnI3cu6utmKUPyHoJtH26dr0CAwEAAaOBlTCBkjAdBgNVHR8EFjAUMBKgEKAO -+hgxpbnRlcl9jYS5jcmwwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwEwYD -+VR0lBAwwCgYIKwYBBQUHAwMwHwYDVR0jBBgwFoAU+f7h8O2ttBHtwI4OZnpD9wU0 -+9GYwHQYDVR0OBBYEFMOMHl201gGYrA/OoGUroGeldRTZMA0GCSqGSIb3DQEBCwUA -+A4ICAQBfBZZ7ZPkRjRgk+0/7CGam2AAv/GSuma8lOnJ6IpBEUL0D01reRe9NO1Y5 -+iXasGZ80e95oC7WtePt1LS2I5rOOb3No0iHcUTm5lZV2M/ObAOwraQLXePCRVgU/ -+OQzNhtRoCpqgL80/1Ne+MTHj1yZp2GRKXOdMd39KZoG92JU/h556GQ6reN6WKNs+ -+wbFG+JKiRnfvPsul6J5lG4QuObCAZXa0fhcQNUHyWjCGIhMa8AuhkDDlIEOBV1Vx -+T/ixdoFhDIoz0xExFidR5uqYXKZlgzcMZ4JT7x0Vs/YsOIg9z5PNkyjt8Kjrxvqs -+NZ1eYuNNxdXxdJZ2x04y8AzLkLRYntOCks5nEamQtzv0ice1Jg52qNJnMm0Y0U9o -+7BjlWy/pp8dBfIA+FyxsIs62hAE6Z/vuJa1VuWJblrT1MFM48Bh64p1m5uDXAZ9w -+Oy7n6YDpzWKWqn6XNV6FoDSkkwaOwB6bPaqAP1ZvP3BmVhednyYRYY21M2aOmxos -+7rpaW1FHgYV96Dm60rL7XRtOEYZNPSIPDO/Ro1oWs3EFwjLLAWtlK1HdMswlnVDX -+6M+LNCRZA3foNi6xvf7CvPkshtQe3FOfiw/OiPBsYsYREC+90Ml/oeTIsyCaOF6n -+5+dpjCBeOUaxzfeQI4WCrLG6+iQZNXWgBkW/PHaPZTldRWzvdQ== -+-----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/ibm.crl b/rust/pv/tests/assets/cert/ibm.crl -index e87cd59..8db2900 100644 ---- a/rust/pv/tests/assets/cert/ibm.crl -+++ b/rust/pv/tests/assets/cert/ibm.crl -@@ -1,20 +1,21 @@ - -----BEGIN X509 CRL----- --MIIDVzCCAT8CAQEwDQYJKoZIhvcNAQELBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD -+MIIDfDCCAWQCAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD - VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u - MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv - cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll --MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTIzMDMx --OTExMDQ0OFoYDzIzODcxMjMxMTEwNDQ4WjA8MBMCAgIrFw0yMzAzMjgxMTA0NDha --MCUCFBrqmxMeV9aRUHxeE+pCH3XK1MGzFw0yMzAzMjgxMTA0NDhaMA0GCSqGSIb3 --DQEBCwUAA4ICAQCGdm0ls5MXM6MUI0wR7qOitKh3TIfRnCvhSibVPskjlBZaBT01 --F6xaQGyWVR19IzQNn9GxOGMqvRy/oSihznBeA0+e9497IOPXKop/JsypZR101539 --ntVt691ncmctxKnb2nT4dw7AuiLTxMVzdJ/ouXovnPcgSv/r8lwBo1fXxOgQlQLE --Pi126WFkkgBK7EANnAXiXVWvdM6p67jl/AQGOVHp8MeXowejDdVqKzoU6yyMRDeE --uEU4QibvH/J8VPLC/A2oh4XTZbJ5rB6u3rz2fFGI03XqSrJJHbNenGVQ2ar5qJeI --6kHNDIuuwXN+7JPFf8JXdk8L0G88rQsnjrcm0GzQPW/nZ5bN3FA1V139rdOhSBLR --QgaKzju8Le/Zem317ykOJbC6nDBORmpBVzXYdXA9RMg4PIs3kRVqp/RMiiClz42z --w8c1khmcH6FO2Q5Z40vq8tmSLhbu6PgGIPIya/OQacgDjDiDGcWGvqzVCWv/6AoL --em7b5Piu4yznVkEUA2h3LvoigYTJCgHFrQnoIcuM8vx8QkDjXxSHeuy3wTd2l67S --pZp+jSJPdqWe2PWALJrYuq736E2rZ013eLybHKYOkoJP6ZLewh4gsomO0bpxTL1U --TjPsJncaAP/gLqHi0QD4+irMlo6Q9YpEIkbp9ScEoVMHRL9A/vBQfUJfZw== -+MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTI0MDMx -+MTE1NTIzOVoYDzIzODgxMjIzMTU1MjM5WjA8MBMCAgIrFw0yNDAzMjAxNTUyMzla -+MCUCFDIvLrSYUcq+W27wAZwp/0Pf7ydJFw0yNDAzMjAxNTUyMzlaoCMwITAfBgNV -+HSMEGDAWgBTDjB5dtNYBmKwPzqBlK6BnpXUU2TANBgkqhkiG9w0BAQ0FAAOCAgEA -+hMmvyXLOtDG7faP93ZqMh061Qms/at5uYz/eAA9CGfS4E+hq2uQaic6gEMWf5NkH -+9xRWB1UM9sLR5Ai/Fn+MqsbeXK+1ommOfYx0KL6J8JihHAlQpGYJXu683dxIB3LL -+ijZ+od5pes5ZIRKtMNaO4+meDG6e5guXDeFNyJaNYqFXOJcy8OIL4O3T840qY1aY -+YcHZs/sNwC/hSAloTeUQ6S+ga/EplzSDumz5VrRsEhVd3wScM1FNeqwMvA4VPUIa -+NOtZJNLJBLhfmetTeO/wb/y3Ekp8sXAf8UJqABczJODSaGc2RwULmxJd2Q5lESpX -+RkfvObqTWgpzpyV/1OhZYDUlXJqiADYnyAsQL8WV2uFQ5fRG7baVDiNYn4uE92Of -+FN7jXmT+7UcHSO5L0VqHS6Ia6j2NammKlbhsAw7dU3uxEgCJvBYlWSY+z9Fs+O1B -+hNU42fL5W7JnoGfqG+LqfDdbG0MG+GMo1rDDcvrEK73F41w7mLmZhfYAZY26xU6Q -+XyLcrG4YfR6FCyOs53zwrKKG9x3ETqY3S0DSSOKTnuvIGZBCSOxGpqdb7pQax6IJ -+PqsM+IzFC1C0Ol0eX1ee734rCHfNP6XEM7ucjBweNUJRrU0Q5CrGGJzEQz0ym/JN -+81f/kQJJosSSc1+j8iDiaKkUgcQ1DWWZLYZgvCkwZcY= - -----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/ibm.crt b/rust/pv/tests/assets/cert/ibm.crt -index 94818c6..ffe786b 100644 ---- a/rust/pv/tests/assets/cert/ibm.crt -+++ b/rust/pv/tests/assets/cert/ibm.crt -@@ -1,39 +1,38 @@ - -----BEGIN CERTIFICATE----- --MIIGyzCCBLOgAwIBAgIUeGuWhNwpt9CPzFJ5UJAKfkIlLDcwDQYJKoZIhvcNAQEL -+MIIGsTCCBJmgAwIBAgIUMwTHYM3peBjwVRi6iICr6FVn0cMwDQYJKoZIhvcNAQEL - BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu - ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs - IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y - azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg --Q0EwIBcNMjMwMzI5MDkwNDQ3WhgPMjM4NzEyMzEwOTA0NDdaMIHMMQswCQYDVQQG -+Q0EwIBcNMjQwMzIxMTQ1MjM4WhgPMjM4ODEyMjMxNDUyMzhaMIHMMQswCQYDVQQG - EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD - b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo - aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxFTATBgNVBAcMDFBv - dWdoa2VlcHNpZTEnMCUGA1UECwweSUJNIFogSG9zdCBLZXkgU2lnbmluZyBTZXJ2 --aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsXm4EgSxVmuNsKgo --sfJNxNQFkdbpCxxyMOuyBFyGKxSwiGl20VWB6itsG+b9NOmAI6D6FPvLPuB/gSq0 --HR5FuSIkmp9AUx6Xi6lVA2L7qdINo1OA4nJKe5hLSHfjWn36DZFrDyoODlDzLJrS --O4h4QSP1nDSFhk1ilsR1czlvKreXkJw5FsUHRhZGG6idLoK2Ibrne6WsVhPG6WNA --abuvQFv40cQWcyRoePktG8ImnNF5GDewXwtpzARSQj9jTL/gE77DH78a77J8+H+h --d5guiBbPZK8gNkPe0CjD0B7tUx/+sByoaSeQffFG4Cnu5JJDqFq9LXPjtlG2nXSB --Qh6Sc2gzP3jKV+ZcSwQw9AxEQ0ZB/VIPsR8KZKReDDztEpcKxwxlh7bEix/0Y1sv --zT7/I+m5kufdDG6j3hHHzniKXL4b/WyedSfdnVqIJw82FgPFgyIY6F/0ccLdkhAm --fLtQJBHc3UyK13l0qVJxhAFdz1Q0zfScBS6qM/Gnbcdc6MY9/bZdIK7E+4op5iAM --kvQHap7qnArhI8VQ1bcXYlQ6asPj4e10lmzroiBHM4N/Yuxv38tmtUCudSB+EcXi --EgJIOLmLq2ZACRzug9KPyMXOD/Yxz2wPgs6I3gvrB22w/MfC77wMfEMuQQk4cZel --TFKosHgLjvcLG+zx5yh5ZVFcNV0CAwEAAaOBrzCBrDA3BgNVHR8EMDAuMCygKqAo --hiZodHRwOi8vMTI3LjAuMC4xOjEyMzQvY3JsL2ludGVyX2NhLmNybDAMBgNVHRMB --Af8EAjAAMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAfBgNV --HSMEGDAWgBQRqYdWstn6ntusRZ8LjyPFEicL5TAdBgNVHQ4EFgQU3ozX8OjQ2AEH --cX1v2WhZID6wZ3wwDQYJKoZIhvcNAQELBQADggIBAEqkZawT89MngrmMTREjYGKZ --+qrm7uQf9wFiM7H7Xs11OEJ5PkNh4jNnnIsXZxc8rr76x+zLr4F6aI08AQn5QOy9 --JXGIbrMHLebtn198aIOYbxZisbXnBlVO3Xz+k8JLdzsu5zxjjaDY3/a63X2ccStJ --U53pSqvgJi6/AvMPA1CPazSjxu6na8rYz6d7c/god7OF0qwQ/ePqd4uJOaImm7HH --CCkwMPYO7UyOWU5CSPMcJ86SGYhvYkoM7wZeJoukK6HlKDI1SRubiTFAx+Hbyk1R --dyVY9vmIOeUlsGEMgsW836g++dg8efRIbIYbSBLQhUL64lLA6wZJ6/oCtC29aX+o --UfxcGUROrpZ5Xi4b4sn0vW4rYq65BzlU17x45XsZMh11hX9aPNE4B62Jl2XLjX3P --Sedu7b/QB6jWpwTAdH96LeLxVepAWiVcFBApBqpu7wxRhCs6M1t3Gh9nvlPE5NRz --zsmx+HVZIgWoP3CgHmiHqajphL0xp6R9qJOyzAVChsmbQYvr+rfaXMv24KBvJYgc --xq5iCP7IccgC6WlhpWyAoTSuhiStTZJtlCKPZqc+HRcuf2fLWXip8YKNHgEtTxNz --7citBXZNoRDFULWwiYDnwhGcZ53p5zPLABYKZdfNHdI+tV92AbzYyQaV0ZwcxL9K --ObAAlDzZKE8vJwT94E3O -+aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxNqcFB9GSj+nC1PV -+OTIAvzTuo3X7bTbohEPrIDDsEcGyVzQtfagI9vkxhhGLhXxIXrkwEI4u75DeLBdq -+ZR3KFJ3QD+jHW12EWNpmSYu272CEAJeb/SBJaFbh40WbY1BsTP+3zI5QMwVTmCH5 -+QqODD+GU0fDSo9Gti0yX4rxOY+54jDC77AhSlU55rYruBoIXVe4CcPUpqzZLgqB2 -+cTJ5g7lokGqc6w9pveCznVYGPfcdDt+ePGssOvrfBOJnn6N44tTRAG623BrpctXi -+t3IAhG60y6CqF29BjV0RKOv09nOBiph2a3lAiKYrpAt92BETFB7KSQManUrrvWOD -+mYEcoVkzvrVrB3WnCcHs6lsjQg1FcNrGcDzV9SmX0BIhM+fasxZyHWc08pr3NrRn -+Eks7lRqY+TS8DAeZYJt/2M0Jr32d6Bh+WdyGFFUj6sBPtCaC2VnSkxOgKXeWf/c5 -+EUXIQT2YXEkNK9CP8Kqs8IerEIpfceelQReA5QcvNruaJNktWLKJgYTrdLdfOP0u -+s+9JJHWcFH3tv5906CA+Tlm53Dk6SaRc/DB7lu94yTkcSOpxXZnximZi0GcPZYmZ -+TaswE7d8HCPLhks/RkZSV2764Kl08xle65APiWZ4dM24uSyu75Izb4hrwgIA4qyM -+dDVQnI3cu6utmKUPyHoJtH26dr0CAwEAAaOBlTCBkjAdBgNVHR8EFjAUMBKgEKAO -+hgxpbnRlcl9jYS5jcmwwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwEwYD -+VR0lBAwwCgYIKwYBBQUHAwMwHwYDVR0jBBgwFoAU+f7h8O2ttBHtwI4OZnpD9wU0 -+9GYwHQYDVR0OBBYEFMOMHl201gGYrA/OoGUroGeldRTZMA0GCSqGSIb3DQEBCwUA -+A4ICAQBfBZZ7ZPkRjRgk+0/7CGam2AAv/GSuma8lOnJ6IpBEUL0D01reRe9NO1Y5 -+iXasGZ80e95oC7WtePt1LS2I5rOOb3No0iHcUTm5lZV2M/ObAOwraQLXePCRVgU/ -+OQzNhtRoCpqgL80/1Ne+MTHj1yZp2GRKXOdMd39KZoG92JU/h556GQ6reN6WKNs+ -+wbFG+JKiRnfvPsul6J5lG4QuObCAZXa0fhcQNUHyWjCGIhMa8AuhkDDlIEOBV1Vx -+T/ixdoFhDIoz0xExFidR5uqYXKZlgzcMZ4JT7x0Vs/YsOIg9z5PNkyjt8Kjrxvqs -+NZ1eYuNNxdXxdJZ2x04y8AzLkLRYntOCks5nEamQtzv0ice1Jg52qNJnMm0Y0U9o -+7BjlWy/pp8dBfIA+FyxsIs62hAE6Z/vuJa1VuWJblrT1MFM48Bh64p1m5uDXAZ9w -+Oy7n6YDpzWKWqn6XNV6FoDSkkwaOwB6bPaqAP1ZvP3BmVhednyYRYY21M2aOmxos -+7rpaW1FHgYV96Dm60rL7XRtOEYZNPSIPDO/Ro1oWs3EFwjLLAWtlK1HdMswlnVDX -+6M+LNCRZA3foNi6xvf7CvPkshtQe3FOfiw/OiPBsYsYREC+90Ml/oeTIsyCaOF6n -+5+dpjCBeOUaxzfeQI4WCrLG6+iQZNXWgBkW/PHaPZTldRWzvdQ== - -----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/ibm.key b/rust/pv/tests/assets/cert/ibm.key -new file mode 100644 -index 0000000..847325e ---- /dev/null -+++ b/rust/pv/tests/assets/cert/ibm.key -@@ -0,0 +1,52 @@ -+-----BEGIN PRIVATE KEY----- -+MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDE2pwUH0ZKP6cL -+U9U5MgC/NO6jdfttNuiEQ+sgMOwRwbJXNC19qAj2+TGGEYuFfEheuTAQji7vkN4s -+F2plHcoUndAP6MdbXYRY2mZJi7bvYIQAl5v9IEloVuHjRZtjUGxM/7fMjlAzBVOY -+IflCo4MP4ZTR8NKj0a2LTJfivE5j7niMMLvsCFKVTnmtiu4GghdV7gJw9SmrNkuC -+oHZxMnmDuWiQapzrD2m94LOdVgY99x0O3548ayw6+t8E4mefo3ji1NEAbrbcGuly -+1eK3cgCEbrTLoKoXb0GNXREo6/T2c4GKmHZreUCIpiukC33YERMUHspJAxqdSuu9 -+Y4OZgRyhWTO+tWsHdacJwezqWyNCDUVw2sZwPNX1KZfQEiEz59qzFnIdZzTymvc2 -+tGcSSzuVGpj5NLwMB5lgm3/YzQmvfZ3oGH5Z3IYUVSPqwE+0JoLZWdKTE6Apd5Z/ -+9zkRRchBPZhcSQ0r0I/wqqzwh6sQil9x56VBF4DlBy82u5ok2S1YsomBhOt0t184 -+/S6z70kkdZwUfe2/n3ToID5OWbncOTpJpFz8MHuW73jJORxI6nFdmfGKZmLQZw9l -+iZlNqzATt3wcI8uGSz9GRlJXbvrgqXTzGV7rkA+JZnh0zbi5LK7vkjNviGvCAgDi -+rIx0NVCcjdy7q62YpQ/Iegm0fbp2vQIDAQABAoICACjHosq2WeDaw9egY80H80ip -+BgC92bqbw9pWmZhfGiBpkHDpQb1EuKq1H9HGw7EA+JsK4Q6k7cs6JxRaO3O8Epdn -+w+jvz5C5VEf14ne85kY7p8zZm3p6HErtqLjkvBZ8XBstZF2PY8TWByW+b/0ik22Q -+TTyal4BRWRLA6LwTR2gDSFy+Yce5R6ZfjexqGfomLABkoIPW/e1Wf5kNdcvoaWzk -+1WkJdP1rAXxQ1tSys/NVRGNG5uhcSVrkipvf7RiQjGPtM2jUBrSdLJxZ+7AGAV5t -+8q02WNYm+Zz32SyxiNsfPYhBIlMMWNaWGfdPXCANFXc3E6/BYVuRYDmpKmN78DqN -+XrSCsJOahg7A9f2JIVKmreC/WmHi0+eL0fCcMHK95CN415mYLagg1bLhOwu0cdjq -+peSx3+xlEWENDm0B4mYQaPDylHht++Giswg9VpZeIPstgvNARoiCgRCOiR3mXVBg -+ZX+BpH45RSZ0hqdhkssiwP3iOQH0xYe1kEvzcaCeNrGc8cZy4dOcZjYso1XNZEQY -+vnUzAfo0TasiMES/RiNuy7Zf0oTP1UV7QootsqPQrZCFf3eixC91pq4Lh2URUseo -+4yOOI1URBWU5U2RuykzYWsqs/xB+CfJy6dhn6jCjm5lZFt1/wEKj5+7JZbZWkH8A -+G2HHxKIQWz5d2KjbQBkxAoIBAQDwUtb6mrJ/JBvMubdR2Tkyog9lT7qeR848ZZqS -+ybGEuT1Ohhd7vtjiIQzMLX7s6XIUoSE3AWFJJzqFrr0f1HkwShBNlvF77We3jski -+p7u4bsJ91mjHBWl/bAKW9s3YA6PRakk8k215mwORf+trhFkcOJ63DHIPxBnPDQEI -+VBBMvp5ogHHIh5wTql+c4OpdEb5s0PuesKFzjdzKtvPashaP3ebt2nM8Nai86UKQ -+WTcZxMLk36XSLFcW2vZlmlkQ7q8gMM3BIliR2ofmB0AlPBwOY/gUHG+tIBbuQWoX -+tw4DJPY3Y9uWRqAcWOkSbQpAEOUVzyK5nrwXwnH7vOA9HOiZAoIBAQDRsd7x3NGL -+Gp8oY8XULwIrZfyQmsk+nozBNuo2Q40qoXBoReIUF212UMMYc2iX8bMo1jBCzmnl -+3m2BA6aVbpxwlp14Ybe78F8ucQN6Dx5wmT7QyDYxdKyMAYOnOHWdW1ic1y2HjKY3 -+vinswhuI93C4Dkf4mxFtP+aWT3RpQxS7DsztBTKux7fACoU+OKQxrmZTkPSVhuEE -+DXIov09q3awEvB/R+dDzGj6NXwxQ+Kr3yLl137RvVWcvrObyysKLfnVx+Of0Kol/ -+6VD82BBWDcgveu62JPSZ8ckY4eQbi3yl5mOAbL3vre+AQYuDiuOo2NiPOsAimGaz -+kv9gXZQ1leHFAoIBAQDsGHYavN+fCFpHRixSvJT0qUF2xl0QInr52teAXaWIPnN8 -+MT/g0h1ACjgIXqnTFYR9v85hu3lX5LIZoxEptBNa3Wgm0aNrnE/IhP4UjbRd/HIW -+Lg3BeA+snu/sX4raLLlDgqdwW2WxkhhvWLxvZBYnI3jJW/CyjHTOdHgPNobM3nfB -+Mm6WEqPCrh5AgLW9uTDatnR78gqq+zNt806eC1ce/2FfSrzq6cxbys0aAoufRS4y -+q4S8ddMZIQPvzTKy78ocVdXNZ3Cb2ZSo53adHfByMsQE/eq4qk3cw2b25V9et3Er -++W5AtCGXt8FB2N25Et/8DQKQWOFwdhaEuYmSgFQBAoIBAEgmEBRdqsdWyI4oDggc -+iH2QIJ9McpOWD83m7Bzxjx+s3jUyXkAVc3czAH1oMAOfiMozL/W8eZk6t6idLfLa -+VP80A0hJLuN+J/GdttmHXCzXvVIuoN7RSxD88GRXu7gBlvKX4rVxwjsJtfkdLEYr -+BOB/IWo7SHqzcs4i1mXlS7u0svOWR0L1upZbyE6JRI1HeTOle1H7T26Khc3ZTSTy -+0l3qsHQZpTgPvpf7rQwrEwAgUxdoefeYheFUdz3wX5GJWDV3s69B939IMrJcUPqa -+0Vbs0DdhbbuOAmgKSOblTmTyaSflwlA2I2KYqrz+y6frvE9DopoEn8mHeVCZwgXR -+5RUCggEBAMv2kXszsi3rgnlpFObRRnjyE5g5U3As1ii6FzUHm/aurUYqAZyMMnE+ -+RdKX1zBUfTUXqijSpl7avZrf7DVTWR42kiCJM7PM0lrcFUcYFCIs0x7Qk2j3YIpm -+17vOFMpkZyYdWIniw3ihmWQTM3s3qTfdAbZPztnSInnjp+aXzU/tbfsmOmROcLY7 -+V0xYNWeMs+P1axoEC7zGmhCAaN/A+zDY34VLHGDrR4dVKCnFUQFoRwqfE6PtICFL -+4fvdbbhwmU3sxY48y8ws/JyMfWachcFg/zbHVRR/K4j0sGfJB2NKu49jCcwPFaFT -+jLfytXEfbNWBGcgHySC4HUwnpUVxN44= -+-----END PRIVATE KEY----- -diff --git a/rust/pv/tests/assets/cert/ibm_armonk.crl b/rust/pv/tests/assets/cert/ibm_armonk.crl -new file mode 100644 -index 0000000..b1f3b77 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/ibm_armonk.crl -@@ -0,0 +1,21 @@ -+-----BEGIN X509 CRL----- -+MIIDdjCCAV4CAQEwDQYJKoZIhvcNAQENBQAwgcYxCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMScwJQYD -+VQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTI0MDMxMTE1NTIz -+OVoYDzIzODgxMjIzMTU1MjM5WjA8MBMCAgIrFw0yNDAzMjAxNTUyMzlaMCUCFDIv -+LrSYUcq+W27wAZwp/0Pf7ydJFw0yNDAzMjAxNTUyMzlaoCMwITAfBgNVHSMEGDAW -+gBTDjB5dtNYBmKwPzqBlK6BnpXUU2TANBgkqhkiG9w0BAQ0FAAOCAgEAgvfpzZbh -+/utz4zq6/KQdFW25wK9tqPkwFut9vdsOAGMkRRcX699Rk3Km1TpgBVGRKAlsQKfS -+IMq1FtNwbKMv+ycZHXDewk/i7dd2A0eUNzbDxD/WJ7mBdUHJtzkvIKwBmU7SkwdM -+yBRA20H8xKioveDrms+MM3a4oLNRzkZI/FWkSqjCDh9lI6igZNt3J29vkcdIlu+P -+Maa7Cpwh1ELrQbwE3NVgbr172D9qzQ+/NiRlM/Hla/lqGELcUgwVgkNMBFEMo7iK -+XsWy9WQ+/0RExo7n09jk9D4tCyAizGae72Xai8IJQD4tSVNpv6ZtNHZBBKQhkjGy -+PXGM8tHC91Tr8E9CSVVOrtxVit5htxjnTkJLg/9XnoAkxGNbUFbvCm+zIOzkmlJ4 -+Drvvt9W4tVfkcfILHKmy4PG/0JU/tAygpr2Hk39e/Lcym64O9gz2g0pU4On/qD6e -+91Q4822RIYLDK/IO3H30CbO25nBChZ4z6KBMWXHG8mTJ5m1IbwZTPxBlL8t47P9v -+2ce2XDkcPzYpk/wqJ4xxQYhuhV9RaJreEnnqCZ9ER7L+heoJcCZcnr56wT4mNd/a -+Kfdm0Oxm8CFCMj1djVJQ7+ghsOY3RAehQ75m2Od5BRM+Vv3gYtkzPWTkWjDUn3us -+2KwZvR+lNaIVXPlQqBlIWJvehWZexLzTkA0= -+-----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/ibm_armonk.crt b/rust/pv/tests/assets/cert/ibm_armonk.crt -new file mode 100644 -index 0000000..7f95dd6 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/ibm_armonk.crt -@@ -0,0 +1,38 @@ -+-----BEGIN CERTIFICATE----- -+MIIGqzCCBJOgAwIBAgIUWYLchAxfdv8uD28Im9DK+Lo8pokwDQYJKoZIhvcNAQEL -+BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -+ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -+IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -+azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg -+Q0EwIBcNMjQwMzIxMTQ1MjM4WhgPMjM4ODEyMjMxNDUyMzhaMIHGMQswCQYDVQQG -+EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD -+b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo -+aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxDzANBgNVBAcMBkFy -+bW9uazEnMCUGA1UECwweSUJNIFogSG9zdCBLZXkgU2lnbmluZyBTZXJ2aWNlMIIC -+IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxNqcFB9GSj+nC1PVOTIAvzTu -+o3X7bTbohEPrIDDsEcGyVzQtfagI9vkxhhGLhXxIXrkwEI4u75DeLBdqZR3KFJ3Q -+D+jHW12EWNpmSYu272CEAJeb/SBJaFbh40WbY1BsTP+3zI5QMwVTmCH5QqODD+GU -+0fDSo9Gti0yX4rxOY+54jDC77AhSlU55rYruBoIXVe4CcPUpqzZLgqB2cTJ5g7lo -+kGqc6w9pveCznVYGPfcdDt+ePGssOvrfBOJnn6N44tTRAG623BrpctXit3IAhG60 -+y6CqF29BjV0RKOv09nOBiph2a3lAiKYrpAt92BETFB7KSQManUrrvWODmYEcoVkz -+vrVrB3WnCcHs6lsjQg1FcNrGcDzV9SmX0BIhM+fasxZyHWc08pr3NrRnEks7lRqY -++TS8DAeZYJt/2M0Jr32d6Bh+WdyGFFUj6sBPtCaC2VnSkxOgKXeWf/c5EUXIQT2Y -+XEkNK9CP8Kqs8IerEIpfceelQReA5QcvNruaJNktWLKJgYTrdLdfOP0us+9JJHWc -+FH3tv5906CA+Tlm53Dk6SaRc/DB7lu94yTkcSOpxXZnximZi0GcPZYmZTaswE7d8 -+HCPLhks/RkZSV2764Kl08xle65APiWZ4dM24uSyu75Izb4hrwgIA4qyMdDVQnI3c -+u6utmKUPyHoJtH26dr0CAwEAAaOBlTCBkjAdBgNVHR8EFjAUMBKgEKAOhgxpbnRl -+cl9jYS5jcmwwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAww -+CgYIKwYBBQUHAwMwHwYDVR0jBBgwFoAU+f7h8O2ttBHtwI4OZnpD9wU09GYwHQYD -+VR0OBBYEFMOMHl201gGYrA/OoGUroGeldRTZMA0GCSqGSIb3DQEBCwUAA4ICAQC5 -+dH5QKZHynK7vGHOtol3brWgAWKkUVj+uqok/AUyGF5hkcXI2AVmu5nWV16Z6/c0b -+rD37YMPCCnZorQQg5g3c3H9In3NzTYWj1q2YT13yQ5PaD56vkwfPQKlmY2kMb/v8 -+Y5Ho2LFjhKKOpzP77CsYMs9ZdXs6VKGZ5dSmOAOvJ4AcGaRPs3jXVz3EZFgc+ytK -+705mWrAgTYI3xcemxBTwILWAVCoqqirWrDNd3jicQo4Ks/H07RtLuVNY8kXUJEN9 -+UMG7Ggzc/rTlvV/PUsJaQl8lunPDdbUBLsXE1iWaaAxmRTTQaDX8Ygq8NFZgGSrk -+E/dnJXcnJyV/5GH22Ho4JVVtADkP1wh3TKcojiDfM2WlzatSOMPdeISsUQ/D+VSm -+GuSOxPkS0wj5XUpoJz2bKvXNMH1Mdp9sMfOlkMe47iTmU5gK4PJMoj2NZ7zW9u6p -+pYXQz9LhdwoTJZBJXVYSA1q+sIIy8u3vFXEeN88FuoY8yS8t8qjwGEcKP1oojFxV -+ibj9cv87Vcd+tgXAqwkjxGQVbDZPBlL6OWCYoEluqWGnbUD1mUp2y8K99ZlzoT44 -+i2ipEFyZmVPrmwgKRlz1UYDIsQvtvVw91Oi+DRNP1u+9D++Mnz2itGixHh1Hob25 -+nQl1/SS4PfgMlYscbHYOfLAzOnW02FvrJZgQAQUpgg== -+-----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/ibm_expired.crt b/rust/pv/tests/assets/cert/ibm_expired.crt -new file mode 100644 -index 0000000..ba49ba8 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/ibm_expired.crt -@@ -0,0 +1,38 @@ -+-----BEGIN CERTIFICATE----- -+MIIGrzCCBJegAwIBAgIUU5pxAXMrXmWzioXpyoQoipFwsH4wDQYJKoZIhvcNAQEL -+BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -+ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -+IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -+azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg -+Q0EwHhcNMjIwMzIyMTU1MjM4WhcNMjMwMzIyMTU1MjM4WjCBzDELMAkGA1UEBhMC -+VVMxNDAyBgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y -+cG9yYXRpb24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGlu -+ZXMgQ29ycG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMRUwEwYDVQQHDAxQb3Vn -+aGtlZXBzaWUxJzAlBgNVBAsMHklCTSBaIEhvc3QgS2V5IFNpZ25pbmcgU2Vydmlj -+ZTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMTanBQfRko/pwtT1Tky -+AL807qN1+2026IRD6yAw7BHBslc0LX2oCPb5MYYRi4V8SF65MBCOLu+Q3iwXamUd -+yhSd0A/ox1tdhFjaZkmLtu9ghACXm/0gSWhW4eNFm2NQbEz/t8yOUDMFU5gh+UKj -+gw/hlNHw0qPRrYtMl+K8TmPueIwwu+wIUpVOea2K7gaCF1XuAnD1Kas2S4KgdnEy -+eYO5aJBqnOsPab3gs51WBj33HQ7fnjxrLDr63wTiZ5+jeOLU0QButtwa6XLV4rdy -+AIRutMugqhdvQY1dESjr9PZzgYqYdmt5QIimK6QLfdgRExQeykkDGp1K671jg5mB -+HKFZM761awd1pwnB7OpbI0INRXDaxnA81fUpl9ASITPn2rMWch1nNPKa9za0ZxJL -+O5UamPk0vAwHmWCbf9jNCa99negYflnchhRVI+rAT7QmgtlZ0pMToCl3ln/3ORFF -+yEE9mFxJDSvQj/CqrPCHqxCKX3HnpUEXgOUHLza7miTZLViyiYGE63S3Xzj9LrPv -+SSR1nBR97b+fdOggPk5Zudw5OkmkXPwwe5bveMk5HEjqcV2Z8YpmYtBnD2WJmU2r -+MBO3fBwjy4ZLP0ZGUldu+uCpdPMZXuuQD4lmeHTNuLksru+SM2+Ia8ICAOKsjHQ1 -+UJyN3LurrZilD8h6CbR9una9AgMBAAGjgZUwgZIwHQYDVR0fBBYwFDASoBCgDoYM -+aW50ZXJfY2EuY3JsMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgeAMBMGA1Ud -+JQQMMAoGCCsGAQUFBwMDMB8GA1UdIwQYMBaAFPn+4fDtrbQR7cCODmZ6Q/cFNPRm -+MB0GA1UdDgQWBBTDjB5dtNYBmKwPzqBlK6BnpXUU2TANBgkqhkiG9w0BAQsFAAOC -+AgEAo+WGpxIz8s0UY8gOlCYeGr6SRYbgH7bcFVaV2fJf2eacv/YSaryc4PNwFnO4 -++e/LvE8908xq57Kl0xpCzp4bO941/aJkJuyr59TOe1nhcH0z0Yik9Dt1zRxxy0XO -+uVUSYuYEmKlWNgsWAGCmcHAz+wQxn2qjpD8k/GAe8Rr7O7r/EnSt6HvVKt3dl+zx -+rv88gKKJKWrbn7zn4/c/E3XG9RxunCsYGtOfUfk8tGeWtemoGNmXlCLCmKR4JKjk -+mEEM16veHHdD5k9MPj6YQH/jh1pDOD11o8GlOd/rEQCpFWeg23Z0uEq5j+qr1FI7 -+Fjk4HvTtwGU7QXn+QV9bSjhE6svdWVbmADwb3Z/UU//UzgEAZDPb/n4cC95vk/Lt -+Wwq5MzHBIyeHladxzLE7LuVA9dmkv7dnST7L5ZAqQYcT5c7vxkrTHD53ZTojPMD2 -+Lmd3ZIcRIdFbPChY/+ajuBaNpi+gBwQaQ0J4b70fKW7oUxOpgSjh12ACNOYmVRWb -+sCzSiIaxkD0pvIYLgNXcY2CekuWxO+4DLGNGPPAHeovue0yRgNKTC14AHsLWh04c -+aWdomQToTHFxJcoVk2LiE5nwg5Xu8BoNuMag1jl/ZQVsl4LrL4bItwplWdH/bSyi -+9VFeSwPGTiNverBtv/aF4q7O6JWB7/zUh2GqYQcILcaIy4E= -+-----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/ibm_invalid_hash.crl b/rust/pv/tests/assets/cert/ibm_invalid_hash.crl -new file mode 100644 -index 0000000..9a1f834 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/ibm_invalid_hash.crl -@@ -0,0 +1,20 @@ -+-----BEGIN X509 CRL----- -+MIIDVzCCAT8CAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll -+MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTI0MDMx -+MTE1NTIzOVoYDzIzODgxMjIzMTU1MjM5WjA8MBMCAgIrFw0yNDAzMjAxNTUyMzla -+MCUCFAUi4bhk78T24IBu9CEi9kegwxmXFw0yNDAzMjAxNTUyMzlaMA0GCSqGSIb3 -+DQEBDQUAA4ICAQCH2paQrJTqVzDjBOixREnhV0Ned2SHHwuIxZPeA1LrA1MR8KeY -+RCNiBxaOg2fkIe7WbPbiFVmosRq1kMjOJrgajvI6CG4is5F37KhW+Q/8siNcc8y3 -+oiu+b8O5wp7vfUq9f4bxh78ytS+cHJpxHLoYzlp1f6aha2x2kUqDQzEf3ghIXEAN -+y0Jlmn2OqeZ/cCnWG+QF8Yb9OMlnR6rTpwk4ml0TvMa1/LhBUMj7vva0BmXuTunW -+eEr/wxmTMyJsRKNXgqRDLil+hnNzIP7HOmofyad9dyHv73gOkmu1GizwSB/AEVoB -+J5N/LUeCh4cG3YJYYpw7HaAHUnka2SDiuL3MD7VD5ONXiwcZIplT7I9vJHaBimoz -+kUTr3BkzFHas+KiPuGLm25Pxc0G8mVZl0jzoIJe0JS4bi+NCqeQ3C5s4WOZ476Bs -+5PSWbkJ8GwUxtXciOxHVKvgnBkc55bng2VEJlRLDfERdcTa1yobvJBRNZ8J/gDu5 -+aWvRJs+cQ+1Bj4oq3ejV+Be5UFEVEFfQd4wFixvZcr781aDhYg744Ig0n3oZE0pg -+ye9V55q5yra6K1eertb63y1WvIrpfvNBM5p7mFbYBc5vnoH44mAhwbsCL5Mkjcxp -+0XjPSUsE9OyORtN92YLrZztEXIZmjw7otzQCQmcj/0ri3X+M1RLUTh+BVQ== -+-----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/ibm_outdated_early.crl b/rust/pv/tests/assets/cert/ibm_outdated_early.crl -index 2505dec..8ebc5c0 100644 ---- a/rust/pv/tests/assets/cert/ibm_outdated_early.crl -+++ b/rust/pv/tests/assets/cert/ibm_outdated_early.crl -@@ -1,19 +1,20 @@ - -----BEGIN X509 CRL----- --MIIDGzCCAQMCAQEwDQYJKoZIhvcNAQELBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD -+MIIDQDCCASgCAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD - VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u - MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv - cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll --MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UYDzMwMjIw --NzMwMTEwNDQ4WhgPMzAyMzA3MzAxMTA0NDhaMA0GCSqGSIb3DQEBCwUAA4ICAQAa --bg7llgCL+OdmZQEeBKey5Dm/NxJGJljoT/sxFbQ+86lwACh1mbdxVkaPyUC/oE9T --4ppC/eHoaRcdmvN4FlIYrUhqnrTGD4s8VSoYvJ7+f5ZFGjUyflnMwyaal21hDaG4 --2SZjPVOQ0ksEA3mrHE1MTRVFqFl4ZFxGhh7NYMoOEkffM1UooChWHTTBMz67nmbh --Ih0MDHhS5J7677K2N05402Z3v3S+Y8QEjIQjDsTC1S9V607eEfG9YEND2KicQKPH --r+CK9/fVaiTz9wgUEyybps4MFoWBuUqqRebQoargFZW8w329LuS6VokbM6BSduOT --qaYFtzp3DGZbvKwUGjiGVgB/PzzB1rv+2+i/EI3D4RJt+k8xvlBIIONxwK/hcjI3 --/i6hJueQpeCuasfX8ck/uKzSf0PhCmyLwWxQux66FJq4sXqWoqwf5P/U+tbB8zna --0cX5/f8+rS7ansbxjeiCHUkbdUEoY7k7KMSNUrtqbgQ4VyjTziysTbSEG7jkb4ri --Jaa9mDfWCkdwfB3TqDofWRkOdNpPTkj9TVZJ5FdV1h39D9O7B+VvedIiVod/KhB1 --DyOa44YpkEcS51PuNAC/exUd6nOv9Mz+WOUP+RrxHqndRYE0RGaFP9vENyks0Kga --4CLB/IbT2rmpLivK2i6i3NOqzcykHOab3LtwOLDDmg== -+MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UYDzMwMjMw -+NzIzMTU1MjM5WhgPMzAyNDA3MjIxNTUyMzlaoCMwITAfBgNVHSMEGDAWgBTDjB5d -+tNYBmKwPzqBlK6BnpXUU2TANBgkqhkiG9w0BAQ0FAAOCAgEATxMm63G11oaOvRm9 -+nb20MXyXaqejudryJLcKSAUNNcvVpfj4zO4iG7Eiipq5svrEHmb8aNELKb7eHVY0 -+uT5V1hd3xELkQqVS8rxcD2rQfoE8B3kkeuKtgI0yxC4rkrt8recajh8au/FSoknq -+Ts1CKsc0ghFo01gIv4fgWC7eWFrfw51T/bZW4AN1QsAT0wEqcYjuDTZpwXU+8JsB -+czQ1wWoKJPCAbh6ZOd8R/DEywffWSVjS+8hrmIc6TAwfu7SOBSvDLh7kKZeEgZ6Y -+lwp1R9EcRBYLlyclg4E7w/WL2DY426Lo+0aSIWcFwGbU3kWwFNXCKIpYvVQZ4QPT -+2lZfuRVMe1ZgRdPRz5wkiEYYIGFoe3cOC16jHGi+Vs9JQX/EgoRiy/YYxZdckuxA -+961S5h4bbYyB9aNH1vNn9Kp2r8oDGD6xucXM9cTXDsXvLLSFUxcFqoOdixYqFzX+ -+EsZM5A33pMq5zKLkRD8JI2lP7nT0wdyaDjuH4ION7y90tJiq1Q1GPfmJ6+mp/3/I -+QF1Mh8axL59Rgz/KrzszP4+kn3NuhvKQOjL3QDo5TSXEmsdjERFHsmOuTwB9E4iH -+EIBu7bz9GfVMDUL8LSOK8Tq9Gj9Zzy8kYe2BuwNOTTDzQAUnPZK/m6p1wm/K3vjL -+2pjdFJmfRBtPNSRXL2hY2MYdZDk= - -----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/ibm_outdated_late.crl b/rust/pv/tests/assets/cert/ibm_outdated_late.crl -index b2db750..e37d2f2 100644 ---- a/rust/pv/tests/assets/cert/ibm_outdated_late.crl -+++ b/rust/pv/tests/assets/cert/ibm_outdated_late.crl -@@ -1,19 +1,20 @@ - -----BEGIN X509 CRL----- --MIIDFjCB/wIBATANBgkqhkiG9w0BAQsFADCBzDELMAkGA1UEBhMCVVMxNDAyBgNV --BAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRpb24x --NDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9y --YXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMRUwEwYDVQQHDAxQb3VnaGtlZXBzaWUx --JzAlBgNVBAsMHklCTSBaIEhvc3QgS2V5IFNpZ25pbmcgU2VydmljZRcNMjEwMzI5 --MTEwNDQ4WhcNMjIwMzI5MTEwNDQ4WjANBgkqhkiG9w0BAQsFAAOCAgEAh+81L4ws --rvOC1j7nwxthaIE4m8alnuaq9h9uxYfDZooimvGwhCjfcoWmfSSA8lt3vbMJ2vRe --ZwnXXCyGWmetZ6ObnT0jwL0QuOYEuIdA8QHleBxLYBeFr4k0O9i8i3VUG0YcgZuN --H+lcpGdoIx2WV0cZ4rbzZ1cNx3bieaivqNoLQAy7g9jTizmTfY5ZvlvuG3iqSG+P --08APxmtt1qFEm1LVu4SyUSyoGB1NaxeoziMITQdfFqHoPRsu7Wdyuqi9f5irIPwM --VQNKs/Y+3Q3S8YkTW3yqxhj4HdSKJE4qVBLMYm7muirDFWo25u2sDX1LJHBsQLvV --fi7cGY0YnOJL2Y7A3XKDuqtZ34zpXg3Hhqpa9RF55K2u5dYUaPq8MEQUHK67II1r --YZAwfarhijQQ6t03E0vrzPVYpK8VjNUunYKQdOBS3OkKgXCwEMuqQDrps98BgDQ4 --qfbVfxwm9XEHJZaX/qFR0sp8OQd/SD5dnS3DBl0Pp5w+w2xIaSA7QmBpDqWY66Hb --cJq4CLOKTasHTddHKz7O7zIu8QhwJGLabtnx18iaHTNNHaTF6k/51pwvA3HkJgds --HVcUNljsDNSPE258JwR2XoQAUu6VuFwRzgD7lGwdI70CpIBeAP1TRSius5RsZB+u --cw+872CILlIdNJ72lzMPWQNH+IB1RU8U/eA= -+MIIDPDCCASQCAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll -+MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTIyMDMy -+MjE1NTIzOVoXDTIzMDMyMjE1NTIzOVqgIzAhMB8GA1UdIwQYMBaAFMOMHl201gGY -+rA/OoGUroGeldRTZMA0GCSqGSIb3DQEBDQUAA4ICAQAGD8ryV/GUC+s4qfqchMZA -+QYYOBcV7lS9i8zFrdB7UJuhCL/gXzrmldFsi6hW95PBNBtADT75UQ4JzNNVKYXWe -+wTbEOG54N7Ff4LdbbCRNjqpyOkqQgRWZOgiTeHSeiNLU+P98HZVKFIfiOS6Rs6zB -+5UNdYwCGz9kkeiR7xSpp0z86jI8WSxHR6e7CH78Ax/9dpGAyKYYj8LY0l7igxlIL -+yRgu0S81VsprcHKuGp5wcnabOWO7lEUCTFbYa7Cgc1+avUl1vaCmMqUsvydurCU+ -+BCN8Zhc7noOXm0AE+58r9yy30aW9n+NUmP7uX6Eibb5NXtVZRVe83Ltk2Rbi9HRM -+hkcO0X37mXu+jcqw714i/r0mUsfGtC54IRMCgyIgWJ3TDdJ9ORo0rkrYRXVzKHdb -+0nqXEsrYecaCNcnx8iygC2YEwAN9WH8cvvLTJQcL+j57xcJVqOSbhXTEE5LfiHpa -+DO+dc9A+THZNO+o8GEZ3teTxylOVctiO9CkUkICmVZ9VDA73qOzHI8DJxpCAwT0I -+whm1QfCROAXYFW9enTuCGDZGPoyTf+tVrAGLmCWLewBV6W3z+7OBfNomktuI4Z0+ -+opvwIJwYX3emeZ/5gDuYmwnvNYkC0GRQ/994430b/4Rssv32SCjToJ+Ko5pz4x18 -+YUtbQcKxpJzRDSkccck83A== - -----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/ibm_rev.crt b/rust/pv/tests/assets/cert/ibm_rev.crt -index 6dc4544..0e4dab6 100644 ---- a/rust/pv/tests/assets/cert/ibm_rev.crt -+++ b/rust/pv/tests/assets/cert/ibm_rev.crt -@@ -1,39 +1,38 @@ - -----BEGIN CERTIFICATE----- --MIIGyzCCBLOgAwIBAgIUTPiBWJn8k37onZ/aYgLxudbD3kgwDQYJKoZIhvcNAQEL -+MIIGsTCCBJmgAwIBAgIUXvZ6XWXXrTAFn66B61xZNya2iYMwDQYJKoZIhvcNAQEL - BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu - ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs - IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y - azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg --Q0EwIBcNMjMwMzI5MDkwNDQ3WhgPMjM4NzEyMzEwOTA0NDdaMIHMMQswCQYDVQQG -+Q0EwIBcNMjQwMzIxMTQ1MjM4WhgPMjM4ODEyMjMxNDUyMzhaMIHMMQswCQYDVQQG - EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD - b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo - aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxFTATBgNVBAcMDFBv - dWdoa2VlcHNpZTEnMCUGA1UECwweSUJNIFogSG9zdCBLZXkgU2lnbmluZyBTZXJ2 --aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsXm4EgSxVmuNsKgo --sfJNxNQFkdbpCxxyMOuyBFyGKxSwiGl20VWB6itsG+b9NOmAI6D6FPvLPuB/gSq0 --HR5FuSIkmp9AUx6Xi6lVA2L7qdINo1OA4nJKe5hLSHfjWn36DZFrDyoODlDzLJrS --O4h4QSP1nDSFhk1ilsR1czlvKreXkJw5FsUHRhZGG6idLoK2Ibrne6WsVhPG6WNA --abuvQFv40cQWcyRoePktG8ImnNF5GDewXwtpzARSQj9jTL/gE77DH78a77J8+H+h --d5guiBbPZK8gNkPe0CjD0B7tUx/+sByoaSeQffFG4Cnu5JJDqFq9LXPjtlG2nXSB --Qh6Sc2gzP3jKV+ZcSwQw9AxEQ0ZB/VIPsR8KZKReDDztEpcKxwxlh7bEix/0Y1sv --zT7/I+m5kufdDG6j3hHHzniKXL4b/WyedSfdnVqIJw82FgPFgyIY6F/0ccLdkhAm --fLtQJBHc3UyK13l0qVJxhAFdz1Q0zfScBS6qM/Gnbcdc6MY9/bZdIK7E+4op5iAM --kvQHap7qnArhI8VQ1bcXYlQ6asPj4e10lmzroiBHM4N/Yuxv38tmtUCudSB+EcXi --EgJIOLmLq2ZACRzug9KPyMXOD/Yxz2wPgs6I3gvrB22w/MfC77wMfEMuQQk4cZel --TFKosHgLjvcLG+zx5yh5ZVFcNV0CAwEAAaOBrzCBrDA3BgNVHR8EMDAuMCygKqAo --hiZodHRwOi8vMTI3LjAuMC4xOjEyMzQvY3JsL2ludGVyX2NhLmNybDAMBgNVHRMB --Af8EAjAAMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAfBgNV --HSMEGDAWgBQRqYdWstn6ntusRZ8LjyPFEicL5TAdBgNVHQ4EFgQU3ozX8OjQ2AEH --cX1v2WhZID6wZ3wwDQYJKoZIhvcNAQELBQADggIBAA1ipq2MIRdRIiUZ6M6AjY91 --L5iUpgKGrmhna/gg2b6AlugDOtVDsfeFm389aSplwY+zcJZ2AbUXhXe8RVHOEUwf --O7iEDWsMZ/wxit5ZotJ5kzr49n9RTtomgbQSqxWadLq6Q9hYOWg/cr1FvXwL5tyO --rHyuRXhkUMmuk4aQ1sfybfPp0PJKbzWu001Q4rxbJTlaib9b+CcsyLWHs06JXVKi --755Lg9/ND1bQjW2CMJbZ2rm8V7oh4J0tJuF3DntOjyOk+yosckTF3bFffGPR67WC --RkZebIKx2Rh6OXMrQTLz9ldqWo0cW0O353gSmrMxExWKhgoDrZKc4UeOanweDTqO --4lU0RP/4naDuQl6/FE0rUzUkfAJmsKIuI4G1lQNZhaqUH/BdN1du094RON0T5agK --etoBcPpNpxOn4N86TJaYoDjRSDpKwxXVKZs9lk5GRLxRhqtY3iQYVZrz2gY36Ri4 --lnuKZCeFmfjHvvktmb08EmrvGiQAhXzI8yfeVhlwP8lhtumIO877VW++tedK0z0D --6aBz1LsVI3IbinDZPRsWl0EEi+JFmpIktmMTdSn+0vTs7XJjbBZ+VKaPPWOG/Afg --Qav6+1AnnMtieGfrj3tyyfKo0vPSZKbdGzEr79Ukl+cxLdG5O5qZTy4ASm3EOw36 --p70HafhN8Vioa+uhQObP -+aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxNqcFB9GSj+nC1PV -+OTIAvzTuo3X7bTbohEPrIDDsEcGyVzQtfagI9vkxhhGLhXxIXrkwEI4u75DeLBdq -+ZR3KFJ3QD+jHW12EWNpmSYu272CEAJeb/SBJaFbh40WbY1BsTP+3zI5QMwVTmCH5 -+QqODD+GU0fDSo9Gti0yX4rxOY+54jDC77AhSlU55rYruBoIXVe4CcPUpqzZLgqB2 -+cTJ5g7lokGqc6w9pveCznVYGPfcdDt+ePGssOvrfBOJnn6N44tTRAG623BrpctXi -+t3IAhG60y6CqF29BjV0RKOv09nOBiph2a3lAiKYrpAt92BETFB7KSQManUrrvWOD -+mYEcoVkzvrVrB3WnCcHs6lsjQg1FcNrGcDzV9SmX0BIhM+fasxZyHWc08pr3NrRn -+Eks7lRqY+TS8DAeZYJt/2M0Jr32d6Bh+WdyGFFUj6sBPtCaC2VnSkxOgKXeWf/c5 -+EUXIQT2YXEkNK9CP8Kqs8IerEIpfceelQReA5QcvNruaJNktWLKJgYTrdLdfOP0u -+s+9JJHWcFH3tv5906CA+Tlm53Dk6SaRc/DB7lu94yTkcSOpxXZnximZi0GcPZYmZ -+TaswE7d8HCPLhks/RkZSV2764Kl08xle65APiWZ4dM24uSyu75Izb4hrwgIA4qyM -+dDVQnI3cu6utmKUPyHoJtH26dr0CAwEAAaOBlTCBkjAdBgNVHR8EFjAUMBKgEKAO -+hgxpbnRlcl9jYS5jcmwwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwEwYD -+VR0lBAwwCgYIKwYBBQUHAwMwHwYDVR0jBBgwFoAU+f7h8O2ttBHtwI4OZnpD9wU0 -+9GYwHQYDVR0OBBYEFMOMHl201gGYrA/OoGUroGeldRTZMA0GCSqGSIb3DQEBCwUA -+A4ICAQBlx15n00tEAP8moDUehCtLLJx7CCG/V7gcldTZACXpOYkcuRcKW9GlXqQv -+N30rTZpIZv3XIQEhhWhMJ3dqJRC0l2CbNlrIcM9t5p7GrYS/I/HGamKIU09jfud2 -+1/FimjvBOQKr583vTLL0kr3Aosd3S8jHGA7Clal/85SntQN0kDxnceo01aCUhnxg -+Lkrd2+N0wPYGW5DZR6jk4Y/GiOO+q/ANO+tm4szT8RNwC5sNectpaI+ZRNlUHCdM -+DtZO5HkBADQ7PdZ2x51gliS5l9w7obFY62TG6LgtKFJgqEhqsHMIp+/OwmKbpP/p -+urUDJFCZGWD+lBkaxyy+VsPlvddU7gnXSm0wXoCxpXerRwWFb9/Kc5Q+kQB4nCkv -+bHm/zAkhjoRSjWYPcLL3F/9P858W9QlkhNdcoq73EIuc3FHUMdQUm+rjxGxfO76h -+fXoR2uBGlESO+gKL2iC9E7KKB+o07hdmafEnE8mGTRcGmVPp8e5SS7LfN8lfHez+ -+3gBo2/lW1+2wBmBNIeBkkBpUI69DmNZvMliGNl/3LAIR/slQ6ZYXR/2dLeyhqwRt -+QSj5e6Jw2fg3ekG11R98OGoP3ZC/N1dTTh82O571ZBGxIWA6zPDrWj7tX0oW+JK0 -+Of23BiH+4sex36HshqiyZ/faq4AtO62uII5mRQzlAwE2QdXI1g== - -----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/ibm_wrong_issuer.crl b/rust/pv/tests/assets/cert/ibm_wrong_issuer.crl -new file mode 100644 -index 0000000..6f912c0 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/ibm_wrong_issuer.crl -@@ -0,0 +1,19 @@ -+-----BEGIN X509 CRL----- -+MIIDCTCB8gIBATANBgkqhkiG9w0BAQ0FADCBvTELMAkGA1UEBhMCVVMxNDAyBgNV -+BAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRpb24x -+NDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9y -+YXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxHjAcBgNV -+BAsMFUlCTSBaIEludGVybWVkaWF0ZSBDQRcNMjQwMzExMTU1MjM5WhgPMjM4ODEy -+MjMxNTUyMzlaMA0GCSqGSIb3DQEBDQUAA4ICAQC1oMFXyJ2mAz0el6drt4KtH38d -+FfDvsjc6hTgiPbmQW4NmLuSKnNLOyOulsRaV7Kl7hxQpd+Gyimqbtb1ObeqLvnyb -+W5kX90k94l8M/laZT1mZm39AGR8xW8HDtuJgwliI7RujX6iWuyrJCxM4qpAHe0Jw -+GGQd7ZlzEZ/4xMsZV0lPSsA9CPUn4HyZ2FWj3DJjfXloEOu+krcpmvmL3EROUi/h -+F6bsViKRmoQoe5GJFLdG4eVSgc9+ejaM23n8C0tmAhz7FKOYOqU7pS9+hvCyJ2Ul -+c8S3V7hzTlz1G4AJAeSJtCnV/hkpt+yj0eQa0Q92I0NZY3UWT8dOMZUxpHO91Th5 -+4nfaF6FVSO/AQcpr4FPElYObp+RigqWf44v3vrriHTYi9QntKaNZq5XLgx8EkgBx -+1dTM7mDJ9W21ZWGEbRtLRaKt71RVsVKVLh5CPQrFEzENOTuy66QD26LNC2BfE3TJ -+tsMgjm14Fm1mC91roj7sOsO3MUl4YKh6cdKFtfryWr5Du+7x9/6vjabuay/BopC9 -+BF/q8ITHh5Shs0h9OpOeunGFbcTnYOSgOpDLwC4++hur8p0BzClJkt2Z4yZGuKe+ -+lh66PA/UkHtC3E8NDxVRkd+NsejSJUzRR0+1ruEoDEGP4jZKL83UpOFjIWGlIBsM -+2gwwqV/LJsywfW1kFg== -+-----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/ibm_wrong_subject.crl b/rust/pv/tests/assets/cert/ibm_wrong_subject.crl -new file mode 100644 -index 0000000..94debff ---- /dev/null -+++ b/rust/pv/tests/assets/cert/ibm_wrong_subject.crl -@@ -0,0 +1,20 @@ -+-----BEGIN X509 CRL----- -+MIIDUjCCAToCAQEwDQYJKoZIhvcNAQENBQAwgckxCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll -+MSQwIgYDVQQLDBtLZXkgU2lnbmluZyBTZXJ2aWNlIEludmFsaWQXDTI0MDMxMTE1 -+NTIzOVoYDzIzODgxMjIzMTU1MjM5WjAVMBMCAgIrFw0yNDAzMjAxNTUyMzlaoCMw -+ITAfBgNVHSMEGDAWgBTXVfLQH2D0KccIHhK6M5MRTITvtDANBgkqhkiG9w0BAQ0F -+AAOCAgEAkHC9VxzSFGrY4NWoMLkunjaRQ5qmc4+3PFDiZLaGQua5QMJixMo+QIp4 -+RfCLAkvI4VzMdoJXhl4+sV0mocn1HWDBxFUwfsvV4h84o8bEUK+bizrFsEoN58R0 -+J3yfbikgj4P7WFQdU7p7bbAkmoKejt5+gu5etV2royIh2mjckCN7WnYzvhfRUBYz -+v+93R1usMMQIVId6l6k4DuBqaFyip8AKXVoXj4KbmwXZ8n+ILd5gWV3WKYo/ffCh -+h2g+jaS8JS8CYqtVGEb7F0zQwdPe1tHA/SBFBCNVqzHos+yET1m0Cn1zoYPvkEu9 -+U2OVK5tqffEyrkN8hRm7LT2NIYji/z4VOIY/sGA1SPO7HNzoheAyHR7u9ortGMYv -+2Q69SEqtA7N6kMX3y/dL95sI/E3EJp5Z27jMMP7+aJ53F/5xmIqkZarA0ETuBic/ -+KlNjG0WJilUMH7D7emTxksTdZOC8OKjg6gPY5GhVmlQ/LRvJQtoXqC84UQV2ul8R -+chvU55EUB67247g/WAusCjutISXnKQ4RGPaH9eOu1yN7StiXtJyiZ8R14ZihhC82 -+ZzxjfDDk6ATR87bkAeDYJvubLJkVtUyUK0vgklNsyA+hwbdr5E5Q7IaRW2Bk/tnt -+K9ANhMjaerwcR20aOmA7pVuMGXU2MhFIleQzl+Yfv8N1CDECw3Y= -+-----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/ibm_wrong_subject.crt b/rust/pv/tests/assets/cert/ibm_wrong_subject.crt -index 077e0a8..8290c7c 100644 ---- a/rust/pv/tests/assets/cert/ibm_wrong_subject.crt -+++ b/rust/pv/tests/assets/cert/ibm_wrong_subject.crt -@@ -1,39 +1,38 @@ - -----BEGIN CERTIFICATE----- --MIIGyDCCBLCgAwIBAgIUOgU+VHYEK4Q4dZokfM01Ok2XBtUwDQYJKoZIhvcNAQEL -+MIIGrjCCBJagAwIBAgIUOJ2eXoc9SJ1pRCC4x452LX35eNowDQYJKoZIhvcNAQEL - BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu - ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs - IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y - azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg --Q0EwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0NDhaMIHJMQswCQYDVQQG -+Q0EwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUyMzlaMIHJMQswCQYDVQQG - EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD - b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo - aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxFTATBgNVBAcMDFBv - dWdoa2VlcHNpZTEkMCIGA1UECwwbS2V5IFNpZ25pbmcgU2VydmljZSBJbnZhbGlk --MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA61P3Lfow3E6wGpMJmUWp --wsfFZwcuKSf64JXDn1pVJLUcjTwhApshnGaSBb+knlpwvsO1evrR7re9ZRh51730 --IintOP5IA3CSGd7fqmTpchx3kdFOndrXS7BwAWuB/eZ1qzKeOYpyAS3VSE4FphYi --LSxGfwSUl89pwYyWyqGl21hv/sBL6cc+Lm55vXbeRwWKW9K/w7BkhtK1zx1xm4i9 --4x1aXJ6DGWQpIk1sVDNPtzQVZYvmR1Y10/r75sNgA/WMiZx3/2VyCREnV+UXfvsX --fyMLcbwMWWt6psdhtoGFZ2sLJka5ZNvttQKfbde4TA3I6fpsrMi+oTT9YO3it5zG --ORCUC+j5B+zrzbSv+RgL+SnnAPkHqufb1a/4mFs/uTbjUYHN2/rhObnkLK4Xtfly --FBlivxx5haT9o49YkCv7l57+We4nafBPMw96ac5AGzA0gVwdMTeRZ3joT2Pc/zSf --H5E9wg3MZfg3TN2THB4S//r1/XOaA5F4BGjorbpPhp1/YaeF0rRMlAbZVKXHZJBR --n5qN8hD/V2tXviEkrZRL+iW6ltkslsjIkzrYSS+6goymUjWrkGjmcsTo0SStHE0p --7pOChLwpUtpaElemp1NDzVJqvrglWPkM1ZIIjxpk23zxKj7V2FazqP6PVuyeWdkj --VYN86ULDRG5j1hfn/n0HEC0CAwEAAaOBrzCBrDA3BgNVHR8EMDAuMCygKqAohiZo --dHRwOi8vMTI3LjAuMC4xOjEyMzQvY3JsL2ludGVyX2NhLmNybDAMBgNVHRMBAf8E --AjAAMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAfBgNVHSME --GDAWgBQRqYdWstn6ntusRZ8LjyPFEicL5TAdBgNVHQ4EFgQU+jIyiVonTYe6GuSP --iAkxA65qou0wDQYJKoZIhvcNAQELBQADggIBABcvU42Z/T7hT8ke5viG2x7FJmwA --gkrphOYiooB77sxY+cjxaYsct4yvFXvwuNAcstnlBK0jJRaWzLwswR1t2bXbRwQF --kjDO3br4ALRMHkDPa8iNioogyap8X6r40p7rvfnudKX0+MruLHXN3ZM2ltucYAYU --oR/Wa04KxDuZQHeKrDosAsJCv5MwgF69H3oPbhspFQsP2V5fFsxupnWFzVlwPfcQ --0lgHVC3nZ2Rj7ZariT/px3nfZ6Eg3pRyK32r2SQWVN/oVBEd5cCTONvD7Hr2SrtB --9D58f+vDyVNWM5OED7NqlNDaQw2x9BMjdEVYTGGRW4IXPbXWH08NUcEkT1Tx/vUE --EPlTgwt88Fca03yvAn/8Daw7ezsJNAFwDpPDcQhPi3vg2l32nuRkuQ5641hJiTGw --TEtpJc3dg3FJymG999rOCLLIheNLMehEDMPZHqG7XeEg/42F0580MdkOenMpjhwg --ZhrommB85sZcGBOwc63VMb5PPInYDQi5PXz9Tpann/VliVd4Dpnyn0XVy73VccXu --WWgDt8gJKWUpRiJ6MZzEKkBrXYjPLmrKB64usEJNQ1e2NIKV3bwvH5K3PmibyVBu --9fT5t0VXQpNxxlwngCjvjtt0D/frMCJQXpXpnz25aQDog9bnD1yl02SzxdZaVK05 --LZP4wR2beOGlz828 -+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAw7LcEUPdhr0FKp+muH7w -+Bj5Mbktl4e4kpIMqc2OaTxtwM4RDUDpLwy9X4PuEpGYEgOwOQYyd6udfInpjVVTy -+JBnwxdfpgHfsdqamkrZDqjGWHeSw4332D8gGMNFLmbCuHEzVmr1XLYQFVYMTuWex -+VFvL9ctA4gv5xO9f6oL7fVE7X420qb7IzntrxJHKsQA/IheJkZHYvPpBLs9Xlwje -+tb5OwyhdbTIsfR4V3vHdQ3shwD9TFbc+6MTDryWZThlDDUA8f+iO+euAe6QjLDnL -+JgLwnXd4EmGyePDhBM8oSex5o3/vzJVmJkpb40pvloOG11qlXGaqJxOLH2jjsNKx -+iC4l7oAHP0tXIw5f8hn+pk5vQNIcIHguXIT65ZR0IFRE9tjciLOtxndEn976jmqN -+Qu6Ajx5DiImwzp4wHjJOoVWmKCM9BklFRhwJvZ2xTmBGvC20jX0OgEaCeWV+E7UK -+2pzfGwwkWdXC7ihIszhGNP/lLlE3IflS0MHSxtie5fUrwuoZ+bVkKYClkLYeKrBT -+MLfszTuWg+rJfGmCwvYWGG9gcIRc1T3EuOiNpN4YJ5HZyD4rSI82fSzbrAtAPWPk -+6iVdmo9Ban0pQhMzyH2Gu07OnvBpmH2ADLzFpugjWafW157//GCNVgjrKVN/JbuC -+0nne/OTbQgCO1mFOy6RI1RcCAwEAAaOBlTCBkjAdBgNVHR8EFjAUMBKgEKAOhgxp -+bnRlcl9jYS5jcmwwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0l -+BAwwCgYIKwYBBQUHAwMwHwYDVR0jBBgwFoAU+f7h8O2ttBHtwI4OZnpD9wU09GYw -+HQYDVR0OBBYEFNdV8tAfYPQpxwgeErozkxFMhO+0MA0GCSqGSIb3DQEBCwUAA4IC -+AQATY32D+AMy1/x1WLGbQcvEkcT5+UEGp82FEVamXy4eM+zctzemEufjSszEpRod -+N5UgiNGDjuD5ttZSc37XHwtOwqZv2V0tu2jclxQQCswPjLhkecpIwSFJW9Y7IQPo -++4FKry6q+vyo0sLA+pIhTT+OHK8ijy9PQEx9YOHgr0HTTTG8UZ/6oqx70lvx9KcA -+x9PhFb5TWaOhOds+v7NEPxslwrMRQ4jOz92bGeBjSXBzxwohoKKWcLWIy5ktJsFS -+PhVztHa+on4AXdDJVifU4lYIPTRZBX491EnIpEaZtyEED319SNlj691+5SgDNHRw -+Ysk+xcVYy8RmL84qfTNce+t2eEr0vjnkUqVNt6ps3UvkK2jvlehBlfPW/Dy/r/Er -+q1KNvc1Uiza3ryf2dAMDWL2VBIKMZb+d88cEuLHIgmCXrxtcyxCBTWCK6t4cnKsQ -+J82B4XQ9IRCCJDTjGEQLxsjYt9jwHeUIK0l/sseBeohxMfDptfRQ8hWt7c5SvvNy -+xRGJ1adli7LiJ3+gnPcyO+D+3DPtKtgY4y8Aed5P3oDOQGNdUznSP2qexUpT9GO0 -+fJaRI8Bc3Kj1zLkS+mtYHeOBu5eyLhR0fSBpKr3/3RoKj2/NI3SvlBbJ6dD5jQBs -+4qEi8T70wjhDqITPyqZV4auy/0/h89JXDB/qAg162p8EJA== - -----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/ibm_wrong_subject.key b/rust/pv/tests/assets/cert/ibm_wrong_subject.key -new file mode 100644 -index 0000000..bfd50bf ---- /dev/null -+++ b/rust/pv/tests/assets/cert/ibm_wrong_subject.key -@@ -0,0 +1,52 @@ -+-----BEGIN PRIVATE KEY----- -+MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDDstwRQ92GvQUq -+n6a4fvAGPkxuS2Xh7iSkgypzY5pPG3AzhENQOkvDL1fg+4SkZgSA7A5BjJ3q518i -+emNVVPIkGfDF1+mAd+x2pqaStkOqMZYd5LDjffYPyAYw0UuZsK4cTNWavVcthAVV -+gxO5Z7FUW8v1y0DiC/nE71/qgvt9UTtfjbSpvsjOe2vEkcqxAD8iF4mRkdi8+kEu -+z1eXCN61vk7DKF1tMix9HhXe8d1DeyHAP1MVtz7oxMOvJZlOGUMNQDx/6I7564B7 -+pCMsOcsmAvCdd3gSYbJ48OEEzyhJ7Hmjf+/MlWYmSlvjSm+Wg4bXWqVcZqonE4sf -+aOOw0rGILiXugAc/S1cjDl/yGf6mTm9A0hwgeC5chPrllHQgVET22NyIs63Gd0Sf -+3vqOao1C7oCPHkOIibDOnjAeMk6hVaYoIz0GSUVGHAm9nbFOYEa8LbSNfQ6ARoJ5 -+ZX4TtQranN8bDCRZ1cLuKEizOEY0/+UuUTch+VLQwdLG2J7l9SvC6hn5tWQpgKWQ -+th4qsFMwt+zNO5aD6sl8aYLC9hYYb2BwhFzVPcS46I2k3hgnkdnIPitIjzZ9LNus -+C0A9Y+TqJV2aj0FqfSlCEzPIfYa7Ts6e8GmYfYAMvMWm6CNZp9bXnv/8YI1WCOsp -+U38lu4LSed785NtCAI7WYU7LpEjVFwIDAQABAoICAAqhxnv3pGrkDQpORygB2Xd1 -+XgCl/wCByCLZ7236bNE8a+GYn3GV4TTW9x7Fe2TVGAyLBpFAGvo+nLgKTyg9J7SX -+ZjHRc6Gjokil8CnLVizCaeXw3T1WxA4Cb3eqf0F3zFXERNyVyc0yvXlyWBl8DTHI -+lPGjG8DMJsMwwBTFDfW2epPL5pNMRquVH/s7cnggS83F2pb6hfxqWi05XYhaloLo -+Nm463KyIi8s2XbjRihRW9bP0nMZywKuzuO/kioooLDDlmwPV9iKUzVOqTLDj6OoF -+Qd5ENdVF0oTojUkOGiG+A7PCyRvjx/tvkcNs8VgLiEFd7trwxvC9ipLnx2r9X2Cu -+zi8iQJ901lz6cj5tR5VBttM2MaLFtELTa4HDmVMf6cNuMNZO8eULNmV2EX5K/L5b -+JD9Y1fhctQr2+pEvhgela+hi4XC/jZ/HBxDg2gg1lzt7MNjODi/fISz/zGexGQW0 -+R9KElISntcpsyG8bqH2LQFQ7XzgAF1qrUC8Z9KFONGRGGZViH/K/aeH00PqCEwG/ -+kv5MH+JgS+Rb2AM6HT6SK4rchAkpINPDIsD//dh4vdEfPQ/ysICf3yqACSYgu0cl -+kaTyNQE/Oga1tDOAbC8YlxPRmFHCybz8gXG4iyfFaXLGaZB+UXwZCMKOBsWbid4v -+l+1wRY/EcEuxgU6itVv9AoIBAQDo38kPbhvcT7I56++EKVuEfCzwAnXV/kttXRQk -+QQP9b7C6ZVsEgHz9/sNWzwrp0+28JXMZIggAt0kE783hitDlh7mRIUqjYoU6FcKs -+J3qaHR/Dn5TeDFlMeXmbDOsXPLRDTFdvX9+We6gbAUY1NjbK/CS+fOIigzw/jplS -+DIPxpsPLQ79/lUvr842Fn9HAIoPWq6bhMW4qibh8clJInMO9VTz4TF0FnmkPgl3Y -+R63h5zngQiOAtWCIleN/9F4G7VvrDqYHAHX54vL5Vz+dgtsynJIcpQlIJ5Z6w4hU -+q04aHuxDBkNPNRfTUIo/Jb1ghxK0m8JEZb9FE5TmZ2PDH0l9AoIBAQDXIfwjSYez -+fahZtDCis9skiaRBz2Vw+EUruXyEShER2mPaW2Of/cxAjE+cRPC4aGuseHs+4TWR -+CMnLU8lsrrw80HhdRjd11Gvr/o5qlqgxnw3zNSWQzwld+QcyxpzOgIxIR4kFV+1z -+VoG5PO+vARhcZH2tXrrWdjnCTchaMV74Sffb8bHD0p0PfcAg3CdSzMhynnMGp3wN -+PBZXqk0T+C9isaXKxL8HcDJxgcJHEyEZp9ygQtzQo1BPluVTWPJRQQvP6VLNyNl2 -+NISvmYJSwRQ3yuqEvWE4eSge9kaNp2eUrUz5O3t1NyOa4qioVPnlCH7Glaj6nyNS -+Ler2dszcqz0jAoIBAQCnY9SeZsNYBWFTCSjNkvzZqniSvPH+tB97qSBFPwajMZmT -+Ii/eeI1f9bRrvb9WfKOyTikBs9iUyyqNheIzcRjfJERa8dc4wiSJsAKSxH34MV8X -+uqnDQpUdx2OF9C84MMZSaZmf0QZioNghMTVKIoIYPk4bLqFNtY+lD9ddhumA1iJ/ -+BV+tUZ+VJyhfGJYoyaaCtDfsPx+1K/GUYoiK8UQx4AdHY3yqAUf7gfX69OskKpUL -+gf8LEVUmWLeCziCYUh10RL9K4SXsTRnh6Lkte1Ycdzb/qBGX7/zWmZ5xXgFx/TBO -+rT3MvZk2p/n3kiUiMXVcpWlqyMhH2t95DnBDXUXxAoIBABcS0Q0j1MceghDk75Zz -+vdxEWvan/NRJ/Hk9EqrJmt0UVENWK/A78000/1DeYAcXQ/0iiu1qkCk3DRWererX -+Lt9C6LXwUwBzQQP1sGakM/PmgEOGfrnySqnmjKwYezb2uJUD/yEwlgoZzB4G+BoH -+/wnhyUzv7RAVbAp39zYdN3dfz5KqcIt1Kl0/+nxLwHfz+Me6UNH04qw0tpy+ajfr -+FYH7VbHSuj5c/TwJU8H1vkYXJ+WUZkSGT5XJtFzlHFA0rsSk3LmvggtYhKaky0J+ -+7OEzBUYpXaUF8ZSoi1akDsr2b1wH0iz3Nf1ls/sh6g9zgs1fvdjreolU3W+DvGMq -+hosCggEAWvrHup9KFfS7QznGOLjOdil+HT8TqbVjFa51S7gd5A2B3XpC2mDk9XPO -+uxvUjXxuH18hRlLHP2uERlIsOl37iPJ2jY4C1SUQb9xNmSarfUKwsgDhJKBfPNIC -+LXmXQgs/gcL+Sm/l6Az4slsV/9xg1Sp4EG/3bBM8i4VRRcpKZqwttZ/pArzYNrYp -+LbZeRCm34iPDAelganc4cX/wzlXa36j0Eh0PHd1SvQhKb9lcxtbV4EFhxD9qC1VF -+URy6m7FJrA0r5Hrh4SuSagOpbPCJTeXP0Ysvx/tsl7DR1kIpGkMGhYHMScbKjGwe -+ZAbU3ynucdvh2dgqN4agS//dmuRo0A== -+-----END PRIVATE KEY----- -diff --git a/rust/pv/tests/assets/cert/inter_ca.chained.crt b/rust/pv/tests/assets/cert/inter_ca.chained.crt -new file mode 100644 -index 0000000..45fb8be ---- /dev/null -+++ b/rust/pv/tests/assets/cert/inter_ca.chained.crt -@@ -0,0 +1,58 @@ -+-----BEGIN CERTIFICATE----- -+MIIGhjCCBG6gAwIBAgIUOkSdvHg4/HXtknNMhIFkuPv/ghMwDQYJKoZIhvcNAQEL -+BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -+ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -+IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -+azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 -+MDMyMTE0NTIzOFoYDzIzODgxMjIzMTQ1MjM4WjCBvTELMAkGA1UEBhMCVVMxNDAy -+BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp -+b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y -+cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxHjAc -+BgNVBAsMFUlCTSBaIEludGVybWVkaWF0ZSBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD -+ggIPADCCAgoCggIBAMheyYWl/STLJ0iwlrqNRyURatdeC8oDpKFdpglYHAs/jo3s -+fWNySCnaw6NCe0vxFLpqcK8VMNFRGu/XhR/kZ1YR3V4mLwF1Wa5v7a7J9swq50Fk -+CsLtaU5vq/h6rIpy0NLnmN5KgqChrMh9IwZ+Mc8sqc/0BFFJsuCCGu0TNlGVOhmN -+AbdS3s7wEUwT023CKn47G3pVqeaErEB9honz1I71g5/jNKGe5CLCV35ExzsrzU43 -+atyJ0jgh15PYCXDTdsRccSmEs2S6Xh2o4ZhlqioWB+tKxGsdxq8Ri4soy6yyooOz -+T/3X5CHpKxiI2P9z38Pr9egPcNPPVMGDhzwHz7p3iBPg0RcWd5VP2nimLJsdGWK0 -+bkU7zlQ3R2NelSIW9Hr8MVASihmELvX+AcC6KhTpHHhf3CTPgcAfV2fE9U84Xl2i -+shmoEsUQTUx97qKUOKRfY6o+WMBnVkzlqWj+s52ndiT+0KNLTDtvlejEFf1VSF43 -+IkS9UJK+XxxEnvIBzNKI5EbWlG5Z38/nKv6pjTXFi3aZR4cdmI/0XfAjLTkrTBaW -+lkguEjt+/cxPYJOqt50ldI9kle8XTu/HibmcbU2wYIF21CBjE/hLk/KY3FQlnTFL -+y7x3bM0CuTfJ8Noy59f3l56fwPOpaQWqhqO+UoFkbFlbROAiVxfb8KlyZPdNAgMB -+AAGjgYEwfzAcBgNVHR8EFTATMBGgD6ANhgtyb290X2NhLmNybDAPBgNVHRMBAf8E -+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBTWntryJpCwZqZJ1H3n -+znJYhbfoKjAdBgNVHQ4EFgQU+f7h8O2ttBHtwI4OZnpD9wU09GYwDQYJKoZIhvcN -+AQELBQADggIBAE8gulfly5+EC8DX3K02qEYPoQwVbVhD0wGrlAhgJiakDvPlX6/K -+vSe/1nNRG87jXvXdDiuJ6F4iKZpeJndzvx/8ZEmllyyxDwb3UOmylwW/o3/Uh6fY -+kiVBfW6uNNB0BfDKcXDDZgKjTg3kLT5z8m4u8rPoIPFkLFl9AuAq82Ll6NQ+xZFP -+lZ3K6HN+ntVnIGP4XkOgEYPxjJO3yTGle8VBqLfo/JKwbZtKfNXxSAMRXiP02SQg -+D9yshxkonQYWog2hHz8oDuQQNbzaAFlxnY914av/XwxP8TwEfGNchNtAtrlGRlx8 -+PjMfp3Mnbz71yp+L2We2/A7njIPbEcn0FIBedpNyyBON5Cd6Xqx18otmMTtUILZ3 -+SUKeYmLp8soVMEmmnWz6y1a4bCKwo6hA8oSoq5ydIeWy/jI9v7DF0S/qZTz3c2Q1 -+a3aniuug2FRAxuU/8fSlMrE4672d3505SbHUblhy9XzQ4+sWjkDtYnY89kyY4BTu -+W5n6JlVoewZGOjlJ9/6mV6BVLQ74IeiytWtdH5uOQ1wroi5Kq+EroGgFmPSQCIvN -+XNDvBCNFN/O1+/eVZd6JNx6NMO7DrWql3GFMtJE8u/SWXA9vit3pfmeDrosZ4SDg -+ZPt7+JfzITUh2UMaPsQsPF8G8/tYmxDbELE5LjFcQ24ps362rG+q2rfA -+-----END CERTIFICATE----- -+-----BEGIN X509 CRL----- -+MIIDbTCCAVUCAQEwDQYJKoZIhvcNAQENBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD -+VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTI0MDMxMTE1NTIzOFoYDzIzODgx -+MjIzMTU1MjM4WjA8MBMCAgG8Fw0yNDAzMjAxNTUyMzhaMCUCFF72el1l160wBZ+u -+getcWTcmtomDFw0yNDAzMjAxNTUyMzhaoCMwITAfBgNVHSMEGDAWgBT5/uHw7a20 -+Ee3Ajg5mekP3BTT0ZjANBgkqhkiG9w0BAQ0FAAOCAgEAbGRxfJj3wsZ9iUsYTO3W -+7+hNbZ+nRaokZT1UgprzDTMmQKWp5HRyvAsTtzxeJZ4NDEqP2mg8imvmSUSnLSmR -+pdq7vUdk7lKvdV++fZo4XIRF/pqv7+8Nz8iZvxINGhFaJDUUPPQSFcLm00JIUMzn -+9nh5JkCkKFuk34DgHDR3Zn+nM6R+gAuaDsBgv3xnU6PKVW796JPbz3yrN9fma9Pw -+P27ICXVyOH2oH7p/E7oNB/J0YxKcD5bjaFkzVHsMExCzeyGTA56qtdN2O1Oxiw2z -+L1Yitj1c+2/P29vhCw0IuxKjduL15Qu5Px5BT+B6V3cVUPbn9fYlDjSFAHxyrGno -+X3QnVzCChVoHuS+Og/QwEx6AcTSEbl4E47XQK0gr1cG7ayOZoDO3rqGQ+eO6kREM -+LpX2lHPofzMBk9lGPfAZX41pXUlshT0irrwFbIt3OTGfvU5x2wAjCap1InzvFS9J -+4vEFHcLeHAi5ztlnYNIkB9/kja3ogpSCbcO6WoveJeHCTsXk5K4qIOSvoLYEdRE1 -+Pn2EJStyULZW9Sv1JH2puyZ2d2Y7cl6DqCZ5D4tFsyFFsMUNlBJQSxKoPDYnGsi8 -+DOTxrwhdxG/mSwn/NoYjZdC0Y+NJyBs1RvLvBZLdgzWS8I+uvyuwTfn27tP7GT6Z -+8hmLPBMvUOyczXdMD6b1mfQ= -+-----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/inter_ca.crl b/rust/pv/tests/assets/cert/inter_ca.crl -index 311eace..e7d0faa 100644 ---- a/rust/pv/tests/assets/cert/inter_ca.crl -+++ b/rust/pv/tests/assets/cert/inter_ca.crl -@@ -1,20 +1,21 @@ - -----BEGIN X509 CRL----- --MIIDSDCCATACAQEwDQYJKoZIhvcNAQELBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD -+MIIDbTCCAVUCAQEwDQYJKoZIhvcNAQENBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD - VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u - MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv - cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD --VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTIzMDMxOTExMDQ0N1oYDzIzODcx --MjMxMTEwNDQ3WjA8MBMCAgG8Fw0yMzAzMjgxMTA0NDdaMCUCFEz4gViZ/JN+6J2f --2mIC8bnWw95IFw0yMzAzMjgxMTA0NDdaMA0GCSqGSIb3DQEBCwUAA4ICAQA0FKRq --yESt1SYxVW+BlFjeDWCf1GL471q+603JiRek5iEVt+bZXrII9y9lXsYhZ1d7BxCt --Wyo/497tPMwRKSiPJwrXPODQn3DTl6EM6VB+w9Kipmm3Fq97TSRBuiDkYaS/nUHh --nDfj40qb1tc18SgBXVLSSiu97U0JMAq8AHIfMzlnhIe4fJ7TJU2TFSrkFAOUVqZs --p0/J3aDccYJBnUnEeGD44i80wd3xmuOoBDqRgKcasYsv8QmSFhbD4BTvicxKueDD --kiWTFbgNTDQU9Prp8gYmuSOaQoK6S+8DlO80IRTDwpDq1nQaf5MvwfOqfwQVgAjt --RgrC9BI1RvQu0OyihvcqOh9EEj5O9D/nrgTdsWYJGF/otb8lL6JdXDOAjqWSkZVA --gKDq4NPUskgKzoccD6HY5wgIvSZTV8bXjz2ST2oddfg0/7akNBEmq4TQV9NHb/G0 --AihNJgd3HtESn5Fhm51aJZPyuwqzmkmNHTuHZ5qeDB1dN/UVSqfnLXKeKirOtJCq --VdWGZTFEKJSDPgmLOMy0GhrOeM/y5N5MJZBrwBxPJ3No3TOGr13Ir8E1cxCnchZX --PgpyXNU183gMX4k5NVEWYpCzoTxzY7PNvaMft61IkC9DIdnRxRbEqfdLiVy/k4jP --MUjX4ThGwtUVzqVOiH5uRRE1J7Msk/W3EYzIWg== -+VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTI0MDMxMTE1NTIzOFoYDzIzODgx -+MjIzMTU1MjM4WjA8MBMCAgG8Fw0yNDAzMjAxNTUyMzhaMCUCFF72el1l160wBZ+u -+getcWTcmtomDFw0yNDAzMjAxNTUyMzhaoCMwITAfBgNVHSMEGDAWgBT5/uHw7a20 -+Ee3Ajg5mekP3BTT0ZjANBgkqhkiG9w0BAQ0FAAOCAgEAbGRxfJj3wsZ9iUsYTO3W -+7+hNbZ+nRaokZT1UgprzDTMmQKWp5HRyvAsTtzxeJZ4NDEqP2mg8imvmSUSnLSmR -+pdq7vUdk7lKvdV++fZo4XIRF/pqv7+8Nz8iZvxINGhFaJDUUPPQSFcLm00JIUMzn -+9nh5JkCkKFuk34DgHDR3Zn+nM6R+gAuaDsBgv3xnU6PKVW796JPbz3yrN9fma9Pw -+P27ICXVyOH2oH7p/E7oNB/J0YxKcD5bjaFkzVHsMExCzeyGTA56qtdN2O1Oxiw2z -+L1Yitj1c+2/P29vhCw0IuxKjduL15Qu5Px5BT+B6V3cVUPbn9fYlDjSFAHxyrGno -+X3QnVzCChVoHuS+Og/QwEx6AcTSEbl4E47XQK0gr1cG7ayOZoDO3rqGQ+eO6kREM -+LpX2lHPofzMBk9lGPfAZX41pXUlshT0irrwFbIt3OTGfvU5x2wAjCap1InzvFS9J -+4vEFHcLeHAi5ztlnYNIkB9/kja3ogpSCbcO6WoveJeHCTsXk5K4qIOSvoLYEdRE1 -+Pn2EJStyULZW9Sv1JH2puyZ2d2Y7cl6DqCZ5D4tFsyFFsMUNlBJQSxKoPDYnGsi8 -+DOTxrwhdxG/mSwn/NoYjZdC0Y+NJyBs1RvLvBZLdgzWS8I+uvyuwTfn27tP7GT6Z -+8hmLPBMvUOyczXdMD6b1mfQ= - -----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/inter_ca.crt b/rust/pv/tests/assets/cert/inter_ca.crt -index 8cf1cc4..0a6e1a0 100644 ---- a/rust/pv/tests/assets/cert/inter_ca.crt -+++ b/rust/pv/tests/assets/cert/inter_ca.crt -@@ -1,38 +1,37 @@ - -----BEGIN CERTIFICATE----- --MIIGoTCCBImgAwIBAgIUXCG9Tf1Ea3mKUicsQMd1lldTtIgwDQYJKoZIhvcNAQEL -+MIIGhjCCBG6gAwIBAgIUOkSdvHg4/HXtknNMhIFkuPv/ghMwDQYJKoZIhvcNAQEL - BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu - ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs - IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y --azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz --MDMyOTA5MDQ0NloYDzIzODcxMjMxMDkwNDQ2WjCBvTELMAkGA1UEBhMCVVMxNDAy -+azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 -+MDMyMTE0NTIzOFoYDzIzODgxMjIzMTQ1MjM4WjCBvTELMAkGA1UEBhMCVVMxNDAy - BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp - b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y - cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxHjAc - BgNVBAsMFUlCTSBaIEludGVybWVkaWF0ZSBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD --ggIPADCCAgoCggIBAJYQSR4xUs7bFbMY4r4v8l/9cuRxOxnZ8/cFpIDKzCA4c7/U --omjJCaPOFJrldYthRnyjN6MdqKyIED0XjW/MIlbqjMJ4E4AvWKZC1+r3+YoFCYmg --pLq7kfBkw8Kd8BFhfRsGg60VeAzZ79Y6BvJZSyXXQeH1RYiH/bW4o6PdFaO/tchx --KFlYgOW5MoGTemx+muifZ9iKav4/feFZsh73+OFf+KyruSkGnM18YNqnoMiNL3M9 --H5T86OBjcPYHhhwCp/v5cjfD4Yaa1WAM0Bsy+o6b/VwSNhrk8U8JF2rjuK3wZm3L --hyMa3QOn/kgoonl7sVCKes6GpwOmiS/+qKf14JBK+bjTpDk/CsRYpnonBJKNGOzy --tr6CTqLWcJtoUz3kr1ZZnGXUmBjqYc9vYI2EnlzHnBAf+gplJVqtbEZckLx7rKBD --QXyXp5pqDZmnnxQ9qlk1ZMeqw/mjLackdi1CRg8SfA0GlRcQYmcWxc2U5iCcs+ym --q+V0ciK4YFg/z2wEMFEsarGclW8YrZ1RtY+IcmtCXf3rRa0CEbHCiclHoVtuX20c --LZuYsQ5y6TdeWkDTcAwm3ZCYa54LeySuYny8F8A7by42KRg+Z/JjaOlA6hBPGwvC --p1frJqQod5uGd0Zg0DrNKjjWIVc4Z38dRy48b1Ija081WBtKwyJiX8jto6yfAgMB --AAGjgZwwgZkwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovLzEyNy4wLjAuMToxMjM0 --L2NybC9yb290X2NhLmNybDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB --BjAfBgNVHSMEGDAWgBRJTc+PTs7kYFuicyYmOttP7ItAazAdBgNVHQ4EFgQUEamH --VrLZ+p7brEWfC48jxRInC+UwDQYJKoZIhvcNAQELBQADggIBAD3ZOiRT6ESzxFIK --76FY7qNM1PWcNrgTmMDy8iHfBWEBmkAUQbHvY/U5fPnPj3vPLiHXkLDbUliXyEnL --4myo966j8dettrvB7pxibCy7J2FxwoKwMUOY+4IgGBuxVVoWVBwzu5me35RnDZ6i --9+dRJXiZnO2cEVvfFmEfq0w5SQLsqmR6EeIhoUOepmEJDpjE3cSz6QnQ6KWdw2wf --e+dviPlDwS0sNg006lqSy3rVzsnlqLAsoDkeOEZyZmPbc6sAx4RJS9nBH4WERWb5 --XxVOYIn0QmlJKwlULB3x8dhxUv+a7alBjDt6v2MW1zXH8v1ZcMcJnFy8i6m0VzkV --edrO/ONmqfi/EUr/FothDLQnCoykWjcfL1JGLADjzyRE86Wg4L/DRil8k5wH8Fir --ZZE/kLeOkQN5FhvQK+m3YzGtxkehO7Io3YWmzbv05ZI2d6zroyP6DXS/zJY9wuNd --I/6zp6eUYb/mtT3NF3h1C3SjQpELT2IDoXXYQsbvcVk7pgMB2mP9sYnoDlQsXZvC --oEzD/ollmkHsgD3Zr3p6ANSiNpW6iRYBiWsRoXmVJw+nTSYvWLiMI/vuABXYPPn1 --Tc6yypXgtezMNtUI4fxJ5pU5aHMKL4+XGtCcACyazoVZaUam1DulWUcDhuH/4Om9 --FhcPOT6WqhWjn/zZLW6Rr5OvXwoF -+ggIPADCCAgoCggIBAMheyYWl/STLJ0iwlrqNRyURatdeC8oDpKFdpglYHAs/jo3s -+fWNySCnaw6NCe0vxFLpqcK8VMNFRGu/XhR/kZ1YR3V4mLwF1Wa5v7a7J9swq50Fk -+CsLtaU5vq/h6rIpy0NLnmN5KgqChrMh9IwZ+Mc8sqc/0BFFJsuCCGu0TNlGVOhmN -+AbdS3s7wEUwT023CKn47G3pVqeaErEB9honz1I71g5/jNKGe5CLCV35ExzsrzU43 -+atyJ0jgh15PYCXDTdsRccSmEs2S6Xh2o4ZhlqioWB+tKxGsdxq8Ri4soy6yyooOz -+T/3X5CHpKxiI2P9z38Pr9egPcNPPVMGDhzwHz7p3iBPg0RcWd5VP2nimLJsdGWK0 -+bkU7zlQ3R2NelSIW9Hr8MVASihmELvX+AcC6KhTpHHhf3CTPgcAfV2fE9U84Xl2i -+shmoEsUQTUx97qKUOKRfY6o+WMBnVkzlqWj+s52ndiT+0KNLTDtvlejEFf1VSF43 -+IkS9UJK+XxxEnvIBzNKI5EbWlG5Z38/nKv6pjTXFi3aZR4cdmI/0XfAjLTkrTBaW -+lkguEjt+/cxPYJOqt50ldI9kle8XTu/HibmcbU2wYIF21CBjE/hLk/KY3FQlnTFL -+y7x3bM0CuTfJ8Noy59f3l56fwPOpaQWqhqO+UoFkbFlbROAiVxfb8KlyZPdNAgMB -+AAGjgYEwfzAcBgNVHR8EFTATMBGgD6ANhgtyb290X2NhLmNybDAPBgNVHRMBAf8E -+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBTWntryJpCwZqZJ1H3n -+znJYhbfoKjAdBgNVHQ4EFgQU+f7h8O2ttBHtwI4OZnpD9wU09GYwDQYJKoZIhvcN -+AQELBQADggIBAE8gulfly5+EC8DX3K02qEYPoQwVbVhD0wGrlAhgJiakDvPlX6/K -+vSe/1nNRG87jXvXdDiuJ6F4iKZpeJndzvx/8ZEmllyyxDwb3UOmylwW/o3/Uh6fY -+kiVBfW6uNNB0BfDKcXDDZgKjTg3kLT5z8m4u8rPoIPFkLFl9AuAq82Ll6NQ+xZFP -+lZ3K6HN+ntVnIGP4XkOgEYPxjJO3yTGle8VBqLfo/JKwbZtKfNXxSAMRXiP02SQg -+D9yshxkonQYWog2hHz8oDuQQNbzaAFlxnY914av/XwxP8TwEfGNchNtAtrlGRlx8 -+PjMfp3Mnbz71yp+L2We2/A7njIPbEcn0FIBedpNyyBON5Cd6Xqx18otmMTtUILZ3 -+SUKeYmLp8soVMEmmnWz6y1a4bCKwo6hA8oSoq5ydIeWy/jI9v7DF0S/qZTz3c2Q1 -+a3aniuug2FRAxuU/8fSlMrE4672d3505SbHUblhy9XzQ4+sWjkDtYnY89kyY4BTu -+W5n6JlVoewZGOjlJ9/6mV6BVLQ74IeiytWtdH5uOQ1wroi5Kq+EroGgFmPSQCIvN -+XNDvBCNFN/O1+/eVZd6JNx6NMO7DrWql3GFMtJE8u/SWXA9vit3pfmeDrosZ4SDg -+ZPt7+JfzITUh2UMaPsQsPF8G8/tYmxDbELE5LjFcQ24ps362rG+q2rfA - -----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/inter_ca.invalid_date.crl b/rust/pv/tests/assets/cert/inter_ca.invalid_date.crl -new file mode 100644 -index 0000000..cba6246 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/inter_ca.invalid_date.crl -@@ -0,0 +1,20 @@ -+-----BEGIN X509 CRL----- -+MIIDRDCCASwCAQEwDQYJKoZIhvcNAQENBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD -+VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTI0MDMxOTE1NTIzOFoXDTI0MDMy -+MDE1NTIzOFowFTATAgIBvBcNMjQwMzIwMTU1MjM4WqAjMCEwHwYDVR0jBBgwFoAU -++f7h8O2ttBHtwI4OZnpD9wU09GYwDQYJKoZIhvcNAQENBQADggIBACgakUDSN7qq -+ATkeMC0PpwymGBu6i3Jd641BKDhwe0yG9rDa1ppm1W9z/z64LrzKSSeXJ8Hc07wP -+E94ODXqouo0WlGK4HnxGJ2QwHPJG69bYfcJREQw0Df/dTjlatyIc95QSsmChKixu -+3goFe1laa421bUDIFr111Zv+4DV/LDoPZSQvRe0xHvouZNAQ0rQ8T4gVJkU6w/dW -+636DvyXpW6/LyBTH4Nf4PHWjP6PTgzOW8+yX0RwCpHegwAA0ehEJZ8HAvW7VQWA3 -+uDdfB8j9uxyKQeeIkYLkK/Ds5P0nN8Kd/6OKGlF+9mRCpEQ0n3tgQa1q7tn9blVL -+9yu3mmTbRB3YTRUCy/B7Wc8AwYE+k56wZoYpa7GI7mRYe7Vwhs6PeFmJLcZ3ByOl -+1eeR6yIRzSbS50rVNOeaez4o0hjGEj82OJpK1THtB8pnCCIE5eZ8NI37WZGcPvMU -+KbNC+JjX+rlcnK7NTdS7eQZjcKOYVuDZ0lW5n+TaXKTLL24tuA+oV1SbRZOZN7QE -+5/31A/1dCg2NDLKLNsSlq3A2DAJE5SOMSNCq4V+WGp36LAlKlNvYfHrcRxBEM6bp -+Qa4xCrcUINJ9x51wd0h5+5DilGdpoukSNwnSbMTBG7degdh2DGT4MqJ2Y1+puL9n -+7EmMLPMyTYK/tB4ZsbgV/z05Dyngi3Ut -+-----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/inter_ca.invalid_signer.crl b/rust/pv/tests/assets/cert/inter_ca.invalid_signer.crl -new file mode 100644 -index 0000000..a3f07fb ---- /dev/null -+++ b/rust/pv/tests/assets/cert/inter_ca.invalid_signer.crl -@@ -0,0 +1,20 @@ -+-----BEGIN X509 CRL----- -+MIIDRjCCAS4CAQEwDQYJKoZIhvcNAQENBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD -+VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTI0MDMxMTE1NTIzOFoYDzIzODgx -+MjIzMTU1MjM4WjAVMBMCAgG8Fw0yNDAzMjAxNTUyMzhaoCMwITAfBgNVHSMEGDAW -+gBTWntryJpCwZqZJ1H3nznJYhbfoKjANBgkqhkiG9w0BAQ0FAAOCAgEAFSRriQnY -+3LIc9AHCGYZM+BFfaDnQCiwUx2PuYownMGOyDOu8xjBBIgNgLsNsqVLBQqP/tOdz -+9WpIQd2VXbj8o2UaHZullRbJNJoNHzuC0KsdYuvZVooJ46nJONCo6RwsapiLlXnf -+Y/c//Ynqmk8xBZZpB4pAZKKq0D3eoeNFPJGWSwRa5AfQx5qpbikntK6khIBb8c+4 -+NfK6VMpEsHZQUG25fdLIpDOV2nnhRd2Xbcu/THFeCV2VnI8yoormOeiJznRJq1bv -+C4+jmW4NUAvFUFbcBRDpuq8LTIQUYuQYs018aCOqTTpMTTV5Q1iWuTZtAbO91UJp -+APEtEJH+/Wd/xB5ABk19bMiyxbCQeoLci7US0YEzASeGLH3Z59swwMvkGUhc2UvE -+Jtpx0unupgdA3lCpa/nDYwuFYCqmZ1kKHYtELC7WUldzd8K/J8XkNF4UIreTTD/0 -+kLC3UZEKuoIj3Lv4GrnWKcIdQ51jUMdbTUeIdWFzjqdxFq4i6oqgWA5Hjcr3yDcM -+ATjxVDLxeLC+fJf6/SQu+7dOWKPKzIS8BcufmZpkRoxrt5qmgoJHuceps5pdfa0B -+dy8xEb9FnGNSuLec1rcUPPFgCioSgDFoQiU0aoToW/kDVTA6/PHFh6TD6Qynmc4K -+PtMu/kCMWULgMi68svcnA7NxpoKAsCnQH6c= -+-----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/inter_ca.key b/rust/pv/tests/assets/cert/inter_ca.key -new file mode 100644 -index 0000000..124d6d2 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/inter_ca.key -@@ -0,0 +1,52 @@ -+-----BEGIN PRIVATE KEY----- -+MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDIXsmFpf0kyydI -+sJa6jUclEWrXXgvKA6ShXaYJWBwLP46N7H1jckgp2sOjQntL8RS6anCvFTDRURrv -+14Uf5GdWEd1eJi8BdVmub+2uyfbMKudBZArC7WlOb6v4eqyKctDS55jeSoKgoazI -+fSMGfjHPLKnP9ARRSbLgghrtEzZRlToZjQG3Ut7O8BFME9Ntwip+Oxt6VanmhKxA -+fYaJ89SO9YOf4zShnuQiwld+RMc7K81ON2rcidI4IdeT2Alw03bEXHEphLNkul4d -+qOGYZaoqFgfrSsRrHcavEYuLKMussqKDs0/91+Qh6SsYiNj/c9/D6/XoD3DTz1TB -+g4c8B8+6d4gT4NEXFneVT9p4piybHRlitG5FO85UN0djXpUiFvR6/DFQEooZhC71 -+/gHAuioU6Rx4X9wkz4HAH1dnxPVPOF5dorIZqBLFEE1Mfe6ilDikX2OqPljAZ1ZM -+5alo/rOdp3Yk/tCjS0w7b5XoxBX9VUheNyJEvVCSvl8cRJ7yAczSiORG1pRuWd/P -+5yr+qY01xYt2mUeHHZiP9F3wIy05K0wWlpZILhI7fv3MT2CTqredJXSPZJXvF07v -+x4m5nG1NsGCBdtQgYxP4S5PymNxUJZ0xS8u8d2zNArk3yfDaMufX95een8DzqWkF -+qoajvlKBZGxZW0TgIlcX2/CpcmT3TQIDAQABAoICAB9LwM2tbLHdwkvF+zD1ppZZ -+V1b+szgxI/ppSmj8uFqgaXds5/PLso5JA1QhaukkZVCtld6e4HJdKOgrwTkHP4Wv -+wiP1slNXvTNz+4uYs4HVtKufwNeL5e4Qnqtvm7n/L3M1pciYmjkVL4vcEceul8CM -+cRSQQEljCburzqFXZh1NgdbiUZGM8cygLg90LEqhMGppeIP908zzzYTAJm6vJTTU -+D6Q+RGb3DpqIQMqx2u14zLcqDDiYlTtbu9R1gpn7CXqnlqw9tBhoTJF04pGfVXn/ -+2WpMkgvKCZGoG6PXciKN/zizXevf19vdE7RgEYaq1lk/ZngBy2Vl5Y7ZKJr9fFoU -+hdZq+w8UroAAzRIiLrbnGCt0+tK3rBC/DvtffNjvwjvphGaxKiz1pZdegZ4TsO+y -+zd+Aa6Gb/XCf5TIakFaYXPTpKj0UMi/TVKshblC9JWr3cuL3EoGJe4XtqxS0cGiT -+llLDTbiS4eSAM6S6Mek55jsD8KCGBrROp+A0Wc7nUinWfKXThJYvzOm4E0gSlvbN -+Fg9LqlbK47vbtSyAmfNcdl+C5oHVwE5p5AkZV8hNQOVusfs0aLufvWC+NCGLyJ/d -+wuMmFmx2XkrkhD+BsuhP1grkSjJLpKfufCzP9NBKXRzDMReiOI40AjL5Rm9ZVOc4 -+99B1CuyCiWG4BPUVlj3ZAoIBAQDs0nMc6lOY2VVkAYe6B5SmF94nMUzmegcIC4Hi -+oP0+4X8M2GfAiOKAb9QaMgrNnjOOeomYMUtfUhueRkXhvt5ssmalYCTLAuskApJ0 -+UM34UfpY3P3V3i69hOSdpPje6bsorTMOWheNiMz5XKTWaVnk27Ou2fAuChHuLbmC -+ApeMEYd/VgbwgdtOpLaJGPt0cQ1FhbSsoac4tHMLwN9GpVegts/WZZb0bzjXDLEY -+RcfxcYCO0FJvfaIyfr/JEnw0CsLrkDdIVcsG+QiebygxV0CiDzCeHUgd0uLNq+X6 -+fXqqQh8KUMHVHuFwGpEljpxvHqXQFq5SaEYqq7m9tGTcrHS5AoIBAQDYmKgHWG9a -+lz+usrwoxuHkzPJ+lpqslEVNs60nPf3aI6+bYjsOI7MNT7QkGQXdQ/aErWqzAali -+566f3sUsDz2z/jaMGZYElqgF5sQdIAFTihPS5fkX7mcF5Nqst2tQz1rogEwWgJ14 -+XSQFCogASkxTEjD+/1kmuaSsc0NuOSkh4Wcxohla/7GQaiWnTBLnZ9axobLmmT76 -+vf4mGqAWmNNenHRkUY+ZoYF1xLpR/60OKzQTdUrL5NZB3bL4n3+ViOI0JE3VZYRQ -+PP3/tTr/NwwhTbkxpw/pNhbFIItPM1fzmfieGfgZlqmj5EXgwtGG2qlFPgR+9tWw -+uJwf4bHUerU1AoIBAQDDXRj5UZWFEkUPKvP80yQdoLLzQgheWWxThqqven9v0DYv -+MpbkjgfMTHR856th1JBTiWLY2lGDYQjHmNEtWXfD06g55QZ1MwMnwbdvnPlNUNNl -+W2lLdO66CVdPdTiZK9fpxnfH7ype5+uwCm0xM9ekpFmmdMNeN5BzG+VdCyZZtlxA -+/4baAUYZqmeq0aEefeSk5ZgWkYSRkssVdxa6AMw5GJZ8F3JgUyBgx2eQzoAS/b9A -+ETrwHoQfg9BS35z2kaobCe2RDrVeGzKxAKH3kjMPfdhtl1pWwBG5+YTPD5SRv9o7 -+eENuvPrcsA3tHaiPQoknEI7eDIdVzDR9+sL6CIqBAoIBAQCcY90vMJN2fa7lnPhv -+GOsSIUkWTffwlD5WFF3577DTSOEK+KpbUzt9aQdQ6SBq2x+sPOrFxXVgjJhxppAH -+eBm9VNhd4DuJpJ49ZJpFzU4n25LkvFhXBzQr9UpPW0CJYK7rIXfO557LwbS4TxpT -+21GwyXnHJOhiJbjZK4CMnYkthWrVU42rPuQeugXl/e/IVhmWuIJMLmpi8bwIF0Fw -+D3jO33jK1nkxHIQ1XI6LlmiFynwXcKFJBzoM5N7M1Z1xgxyROYVZh6s5pFBhyaGH -+lV+UzGHjkBTU9tEKFK2Byji/E3WH8ohJMZfbVn9+Aiz3ifqenGDyq5cvupxACN41 -+5UPhAoIBAC51vwgPKuFNXU/pipkRMr7S+Ew7N5rVij3dJhcaq0RMNm24zneKZoC2 -+7b7zKd92zYV4RgviUZEiSmiGXbEvVkNrSZ0BESRt8C7dfqDZx8E3+LMH2Fz2Q3CJ -+EJjEGiQuDQuyx2ICs5ETvIFzxy0A+uRbsrjJHbikizvPbX6qY1F9q4m3GAxU1mlv -+Nj548F/eL13gCUVPfmcbkDXKXDjeMydogPqcH20/d+88Cl5puFV1KwBptmJ/2WVP -+fcVKpeLL1DvWsd9UikzYuLXP3tGs47aPPKqX20lQoZWW+su6uWXi62WuOSPckHF0 -+/y+kPmf1RQh8JbiNv7ddFdj4eUReLOo= -+-----END PRIVATE KEY----- -diff --git a/rust/pv/tests/assets/cert/root_ca.chained.crt b/rust/pv/tests/assets/cert/root_ca.chained.crt -index 9a03bc5..48b549c 100644 ---- a/rust/pv/tests/assets/cert/root_ca.chained.crt -+++ b/rust/pv/tests/assets/cert/root_ca.chained.crt -@@ -1,56 +1,57 @@ - -----BEGIN CERTIFICATE----- --MIIGXzCCBEegAwIBAgIUWAen+5+bRZaL8kVlq51CXPQUdZUwDQYJKoZIhvcNAQEL -+MIIGXzCCBEegAwIBAgIUd6BiIEGe+cX502NGGPVuMHrV0ucwDQYJKoZIhvcNAQEL - BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu - ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs - IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y --azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz --MDMyOTA5MDQ0NFoYDzIzODcxMjMxMDkwNDQ0WjCBtTELMAkGA1UEBhMCVVMxNDAy -+azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 -+MDMyMTE0NTIzN1oYDzIzODgxMjIzMTQ1MjM3WjCBtTELMAkGA1UEBhMCVVMxNDAy - BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp - b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y - cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxFjAU - BgNVBAsMDUlCTSBaIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK --AoICAQD0gnTWPpI/FahyE+PizunOHzNBi88KObopjL6+6p9J10RB82pDxaLoMxPo --6xkm0ZKiFea6ll6sAfcKPzCei+3691sCOiGN/JYdSZR+SynRGeD1o8KJWAwfmZ4w --DNHWHRqUE6u3oqAPDQUqaOnzyGdh2vGLP3HU5F5rIDhYzilm7WuXykYdiF/P6Rjs --xzzQZOxHTnvV8byEDN0Sw0ONsQSFee0uM22e20Kme34A4p+p7QBiRFSJAQyLtp/o --YBY8zeIgekr+BppRbCItEFAgHBS8DLk04jhNfGlU5RteaOJe7D6tg7z9CC8X0dAI --/p1vJkz5zYMPGuoKKaowDAJ+ncwN+UDYhbd4g0ATKC7M5DFmn4Wy60G+3op2BNp4 --PadcsOcXwzKsOSQix0+n4Tfw1QQ07+ilBHBe8fCRpk2Nf/byYuEaBjPBTXUcpTsp --Ydv0s5HjSyRfhEWKMQBOIn6mSORayTJ1/xQECrOfuIopLiZyMCjhixT4PImiGucM --ruCmsHZvfA/8VQxyEcPK5K+XqFaSr5r423C4CzcafHhvUFOYj2WmJyZzWc2X2fjo --l+ZspNpQTlvYgVi2cTeBnbNqCNMcTiGoyie5roHGa4CnigHuBIVSuUVGq/rqFKnT --KgKboI8C3XpQID6rFcYY444wOpfea8LodHTXFF6o8A5m+6QlGwIDAQABo2MwYTAP --BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBRJTc+P --Ts7kYFuicyYmOttP7ItAazAdBgNVHQ4EFgQUSU3Pj07O5GBbonMmJjrbT+yLQGsw --DQYJKoZIhvcNAQELBQADggIBAFCGlLEdAsicVPPuekhdfOyoK/P6r5JHnPIpC894 --9TVUp/7LBvDOwcNAvs+34dsbfRtMmwXUCGofrS4S0+zugnpgx6D1ScWC+FNdBJf9 --C6XKBiU4Mxn/mKWTDUMBCxI+RRbpOvOox5h6pQcfo0IYz5okazol1nT40IdvNkZ7 --WDAz6Xqmw8k/+8Y+l1fDSfUqcgPnTBfgX3kkna8VHBx74nSqeFgOVvtd8BJq/xIG --6rAkGsv/PzmQqKpyoD70N/nGALomLlVScX5qIbNrXWW0DWxY+kDwhRlskPdYS7RJ --Qol3NX0t7GZ+l+0W2Kpp7k04n5xrJ7pzTDyltJJEeaLfqrMlF5MCK/UyJqxa3LQr --IkP2wTxM4HR1gsIvr8ei3QFSBtxEGiQl2VWfBs8bwy+KkL1ao1UIcB+1dj4SLy8x --RqNFNfogkyLL9cELiKxViRQgNYowv3OsSw30i4eSHMor8m0qfVf38t7gi+m0Ba+T --+RIV5MlZcjG6C3jS8AMpUwXO4hOxGEueuXmZuCDmyypqRpMJc0hu2JA1Q9fztvfn --hnOAv6K++5hB4mIyqUp8/QJjO9X2kKa4+MmbuNTnRuC13s2f7pzHjZR3cFf6QN5b --duxTQZ18xvqXIR6EV4PVwShjCMS6NHmkmumIxMzaDK1mGwL7l7B0ZS+r3i7qwGPw --2iYv -+AoICAQDMw/UE5XNiZpjuCRtyq4A9PDBuJJgyUl14xvN8lcsLHGK9b/WkQdDxUXsN -+QbvS2HVkg2X6AGHwixk/3U0mzgzImZhmh3i0IbFn7L2eqpuvgP82BJpveq58Sjsk -+OHYuG2JqSv63reKtK8pLXtKZnSV9vFUAwxWSuqA68FLwDgjGMK/Iy3Hh+cwnpffn -+IMqJ70Nlxc7z4KKO4nrO/+rIQSqe0UpjEEaK4Eb3YXWWontaah3/sw+uAbVncJMV -++AacHQzi130sJJeLe4p+8DD9EYX112DEj6WFrHpyEqhsGXvLO7uJRzMqJ1WVz47x -+j8wK+GPm66KCeOM+wm65u49vNHDotlQzoclIzLDiegQo/3ob5YRTFNDJzbNN6AAE -+zF8K7f8t2TNYeWgPOmXLEVmFalOSNTZ5sLrJipsYs7uT7CT1orW3PvM16v+knigJ -+NuryIdr/+hFgRh7Sr4U/6H6fDuZGJXgZeUSNM7S1UI0iFTZyKAP/3EQrhRWlIROs -+ql9rFnH0LWaPcmk66Yyhq6Va0dmLBSjGPWAHLguKyHkMvhJfMd/1miGfz9Rz9LbE -+6CqOK8sf6EQNWTgmpTdXgjmR9dvMk3ki2aar0u+IamzfIx3y0OPSlpaO8mFMucZe -+3iaeG/l84sUR93NgVI2dCdtEinCQviudV+QwRdLzZQQNM5zLzwIDAQABo2MwYTAP -+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBTWntry -+JpCwZqZJ1H3nznJYhbfoKjAdBgNVHQ4EFgQU1p7a8iaQsGamSdR9585yWIW36Cow -+DQYJKoZIhvcNAQELBQADggIBAHw/+RKyDRBsvWksX4ji95W+4uo292psPzpeusjw -+Ztl25D4jssgjNbEiNwyYgV9e9BCse9hOkFwAE6ogwBAel7POX8XjizaJcwSs/GaC -+2ORQ/KYRpMsypENZ4HZQbc8j9ROqTDD45B+9/5nAA6le/7wd0yS8hbum/b78vAN1 -+98Ja8wfQIq1PE0heFELSRR2hAmcGkxIo1tBqP/CnAzOpBZ2ovPJ75oeGlnpk/ATY -+rUJPTY0UPGp9ZSq/l+t88onDewjCblMMMBeJSEklP2CFlE1jGx3QJtnThiJ9WQKG -+TMakgaAERrcvXs6Lx2GFuk7kCOJsWJtvT6CJVCUtEBHNGY3GqyQwNrE3OEVQ6CFb -+CNNarrRKlArBISExLY3xn0CcuWr8GewsIvwzmJBX+u2Xs0s7RYFc4S4s/SbRLqi4 -+Gn77E7dG711nKHXwDxYOuDAL0MNBT4aDephQK+rF7GlHMLTUPWKYCwmjh2vzmRWJ -+fZkojybvX6g8+7fKmwTUn5LXlcnmjv9KYN8LgfHSQhvRaGtGph0X8mVH3eCs4MGG -+OUU6p9wdPS3MDBHRzn0MXEhq25r+xT829lcHdoxkMVorVDTDD7hOm3HBsF/LOh6c -+/sA1LZnnfy+YZt+Gn5cE5CSK3ddmzRbrB+ZHWbJahegJktWq2rrnw9ipfRln7DJF -+fsHf - -----END CERTIFICATE----- - -----BEGIN X509 CRL----- --MIIDGTCCAQECAQEwDQYJKoZIhvcNAQELBQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYD -+MIIDPjCCASYCAQEwDQYJKoZIhvcNAQENBQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYD - VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u - MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv - cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMRYwFAYD --VQQLDA1JQk0gWiBSb290IENBFw0yMzAzMTkxMTA0NDRaGA8yMzg3MTIzMTExMDQ0 --NFowFTATAgIBTRcNMjMwMzI4MTEwNDQ0WjANBgkqhkiG9w0BAQsFAAOCAgEAzXCT --KYc+J3XC49cCzdJBRzXdVk8AqylNFCBC/Z4a9AaPnFgUHIZqLcucvVTBMlYkzQST --7lu47hksCrlePGeY58goa8rOUuTjH0Gk/809oMNMxyJ3UjuEq/Q45gDeKys8UZqu --qrgHZ1dnBB5ARdPEhkMLzBgizrknhPXcAyg0f4dy8wFPCNJ0T+DiNdqKoQCZNsxD --3p1N3vTMIO20oWbX9cDZoY2Xb0rT9Cbt7ES3JY1DB4Z92zPB5ZxFuCIsxT3Jtszd --fM6YktxJbUvds/mqwmYCbQNZ4veS5YcrFPVVSADjnwP88GMbIQddAvXLOhjrUj4B --QEMndtREs0MvkDZdc/YkTEI/c1QF1xNT+UrMOxC0sEHSvcOQXNtw579QJ0gucA2J --HWWr6p2wTDrIefhppBQS0GSY2n7L1loKAZZNWt56TQoXRFWI4CtLJJTsPKIHWTQz --KLBgv5UlOdbcuh1+foY/XS8prlZvMS22oiDMLIknBR7ywYuYEq9YPKzDXWAL4mHk --DInbBQEYC2ar7wiLLBOM/2c2BmkzDdygChj7/1xvNYMGXEnak0Y/V75uAuWQ1h0T --3e52xW2RzwjYsoM04WBsSJFNd7VYSuSX7SneJywrSBneB+XvB7tcVaycsA1f3BMT --ptsIMqT9/N3++8MGCb/SRWoWFlLjITR9l5y3BUU= -+VQQLDA1JQk0gWiBSb290IENBFw0yNDAzMTExNTUyMzdaGA8yMzg4MTIyMzE1NTIz -+N1owFTATAgIBTRcNMjQwMzIwMTU1MjM3WqAjMCEwHwYDVR0jBBgwFoAU1p7a8iaQ -+sGamSdR9585yWIW36CowDQYJKoZIhvcNAQENBQADggIBAIs4Y7Qsk2TAJQqdNQeM -+3Gjx2fYFHbliDRVQPU5OP19ppeUzFGRL/SRplCH4rvEj4bBwQZcsNdmzxYyFbWcz -+lU38l5/mgkLkPdHzRnbPmVraWqKzuvNrTQ3Lj5VcgOJ95UAOwOIg2jWZlkIAUgO/ -+cayWJK7l0gOIDCKSyuI6haM5D6wNAwfyKZ4WkBgYulCeqRmsMePvsj/WYL1tcR6Y -+1WSN6FtvLn9u2vqrJ879BEH5RDQSkCXsxBqKFnOpLf/gpRYhRp+3JpSACXOzy1wr -+8KxlvZ/14UOhz4x8VZ9GJTzLydzLPIhNTwcl1PJyD3pOhASxfdSXuR8f3jJvAGv5 -+kaj9uO23sKU4PzLfr/DPFkhKsp+vShoSGtnZjj7ewYW9Y/PY0D9TX8CWIcwKuJ8U -+XTWRj9s/FLTlqfrbJXvSaRd0rihGQI+mSwar5cnBuxULgw4V35YmTiY8wCf3+k9c -+tGRRUj5A2fVs9NWWTReBatoou8Rnuraz6ctqwFsxJQSGS1JpfECznGq2wKqLstKn -+WCMEAKehcGqLlSco7Hg2qRNgDw5AAC058drRf9L70geV4Tf00iXjK19aHSJ1wH+p -+7hUcTCiQ7413ztC7HawqsRW3Yzszucj+6vqF8ZYMh+9oEzzWJEt/piKakIwbxwKk -+cf5JE3KE7X+LQS3p4+sw/wP4 - -----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/root_ca.crl b/rust/pv/tests/assets/cert/root_ca.crl -new file mode 100644 -index 0000000..39ad714 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/root_ca.crl -@@ -0,0 +1,20 @@ -+-----BEGIN X509 CRL----- -+MIIDPjCCASYCAQEwDQYJKoZIhvcNAQENBQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYD -+VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -+MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -+cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMRYwFAYD -+VQQLDA1JQk0gWiBSb290IENBFw0yNDAzMTExNTUyMzdaGA8yMzg4MTIyMzE1NTIz -+N1owFTATAgIBTRcNMjQwMzIwMTU1MjM3WqAjMCEwHwYDVR0jBBgwFoAU1p7a8iaQ -+sGamSdR9585yWIW36CowDQYJKoZIhvcNAQENBQADggIBAIs4Y7Qsk2TAJQqdNQeM -+3Gjx2fYFHbliDRVQPU5OP19ppeUzFGRL/SRplCH4rvEj4bBwQZcsNdmzxYyFbWcz -+lU38l5/mgkLkPdHzRnbPmVraWqKzuvNrTQ3Lj5VcgOJ95UAOwOIg2jWZlkIAUgO/ -+cayWJK7l0gOIDCKSyuI6haM5D6wNAwfyKZ4WkBgYulCeqRmsMePvsj/WYL1tcR6Y -+1WSN6FtvLn9u2vqrJ879BEH5RDQSkCXsxBqKFnOpLf/gpRYhRp+3JpSACXOzy1wr -+8KxlvZ/14UOhz4x8VZ9GJTzLydzLPIhNTwcl1PJyD3pOhASxfdSXuR8f3jJvAGv5 -+kaj9uO23sKU4PzLfr/DPFkhKsp+vShoSGtnZjj7ewYW9Y/PY0D9TX8CWIcwKuJ8U -+XTWRj9s/FLTlqfrbJXvSaRd0rihGQI+mSwar5cnBuxULgw4V35YmTiY8wCf3+k9c -+tGRRUj5A2fVs9NWWTReBatoou8Rnuraz6ctqwFsxJQSGS1JpfECznGq2wKqLstKn -+WCMEAKehcGqLlSco7Hg2qRNgDw5AAC058drRf9L70geV4Tf00iXjK19aHSJ1wH+p -+7hUcTCiQ7413ztC7HawqsRW3Yzszucj+6vqF8ZYMh+9oEzzWJEt/piKakIwbxwKk -+cf5JE3KE7X+LQS3p4+sw/wP4 -+-----END X509 CRL----- -diff --git a/rust/pv/tests/assets/cert/root_ca.crt b/rust/pv/tests/assets/cert/root_ca.crt -index 24cbb0c..8d9b5af 100644 ---- a/rust/pv/tests/assets/cert/root_ca.crt -+++ b/rust/pv/tests/assets/cert/root_ca.crt -@@ -1,37 +1,37 @@ - -----BEGIN CERTIFICATE----- --MIIGXzCCBEegAwIBAgIUWAen+5+bRZaL8kVlq51CXPQUdZUwDQYJKoZIhvcNAQEL -+MIIGXzCCBEegAwIBAgIUd6BiIEGe+cX502NGGPVuMHrV0ucwDQYJKoZIhvcNAQEL - BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu - ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs - IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y --azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz --MDMyOTA5MDQ0NFoYDzIzODcxMjMxMDkwNDQ0WjCBtTELMAkGA1UEBhMCVVMxNDAy -+azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 -+MDMyMTE0NTIzN1oYDzIzODgxMjIzMTQ1MjM3WjCBtTELMAkGA1UEBhMCVVMxNDAy - BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp - b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y - cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxFjAU - BgNVBAsMDUlCTSBaIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK --AoICAQD0gnTWPpI/FahyE+PizunOHzNBi88KObopjL6+6p9J10RB82pDxaLoMxPo --6xkm0ZKiFea6ll6sAfcKPzCei+3691sCOiGN/JYdSZR+SynRGeD1o8KJWAwfmZ4w --DNHWHRqUE6u3oqAPDQUqaOnzyGdh2vGLP3HU5F5rIDhYzilm7WuXykYdiF/P6Rjs --xzzQZOxHTnvV8byEDN0Sw0ONsQSFee0uM22e20Kme34A4p+p7QBiRFSJAQyLtp/o --YBY8zeIgekr+BppRbCItEFAgHBS8DLk04jhNfGlU5RteaOJe7D6tg7z9CC8X0dAI --/p1vJkz5zYMPGuoKKaowDAJ+ncwN+UDYhbd4g0ATKC7M5DFmn4Wy60G+3op2BNp4 --PadcsOcXwzKsOSQix0+n4Tfw1QQ07+ilBHBe8fCRpk2Nf/byYuEaBjPBTXUcpTsp --Ydv0s5HjSyRfhEWKMQBOIn6mSORayTJ1/xQECrOfuIopLiZyMCjhixT4PImiGucM --ruCmsHZvfA/8VQxyEcPK5K+XqFaSr5r423C4CzcafHhvUFOYj2WmJyZzWc2X2fjo --l+ZspNpQTlvYgVi2cTeBnbNqCNMcTiGoyie5roHGa4CnigHuBIVSuUVGq/rqFKnT --KgKboI8C3XpQID6rFcYY444wOpfea8LodHTXFF6o8A5m+6QlGwIDAQABo2MwYTAP --BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBRJTc+P --Ts7kYFuicyYmOttP7ItAazAdBgNVHQ4EFgQUSU3Pj07O5GBbonMmJjrbT+yLQGsw --DQYJKoZIhvcNAQELBQADggIBAFCGlLEdAsicVPPuekhdfOyoK/P6r5JHnPIpC894 --9TVUp/7LBvDOwcNAvs+34dsbfRtMmwXUCGofrS4S0+zugnpgx6D1ScWC+FNdBJf9 --C6XKBiU4Mxn/mKWTDUMBCxI+RRbpOvOox5h6pQcfo0IYz5okazol1nT40IdvNkZ7 --WDAz6Xqmw8k/+8Y+l1fDSfUqcgPnTBfgX3kkna8VHBx74nSqeFgOVvtd8BJq/xIG --6rAkGsv/PzmQqKpyoD70N/nGALomLlVScX5qIbNrXWW0DWxY+kDwhRlskPdYS7RJ --Qol3NX0t7GZ+l+0W2Kpp7k04n5xrJ7pzTDyltJJEeaLfqrMlF5MCK/UyJqxa3LQr --IkP2wTxM4HR1gsIvr8ei3QFSBtxEGiQl2VWfBs8bwy+KkL1ao1UIcB+1dj4SLy8x --RqNFNfogkyLL9cELiKxViRQgNYowv3OsSw30i4eSHMor8m0qfVf38t7gi+m0Ba+T --+RIV5MlZcjG6C3jS8AMpUwXO4hOxGEueuXmZuCDmyypqRpMJc0hu2JA1Q9fztvfn --hnOAv6K++5hB4mIyqUp8/QJjO9X2kKa4+MmbuNTnRuC13s2f7pzHjZR3cFf6QN5b --duxTQZ18xvqXIR6EV4PVwShjCMS6NHmkmumIxMzaDK1mGwL7l7B0ZS+r3i7qwGPw --2iYv -+AoICAQDMw/UE5XNiZpjuCRtyq4A9PDBuJJgyUl14xvN8lcsLHGK9b/WkQdDxUXsN -+QbvS2HVkg2X6AGHwixk/3U0mzgzImZhmh3i0IbFn7L2eqpuvgP82BJpveq58Sjsk -+OHYuG2JqSv63reKtK8pLXtKZnSV9vFUAwxWSuqA68FLwDgjGMK/Iy3Hh+cwnpffn -+IMqJ70Nlxc7z4KKO4nrO/+rIQSqe0UpjEEaK4Eb3YXWWontaah3/sw+uAbVncJMV -++AacHQzi130sJJeLe4p+8DD9EYX112DEj6WFrHpyEqhsGXvLO7uJRzMqJ1WVz47x -+j8wK+GPm66KCeOM+wm65u49vNHDotlQzoclIzLDiegQo/3ob5YRTFNDJzbNN6AAE -+zF8K7f8t2TNYeWgPOmXLEVmFalOSNTZ5sLrJipsYs7uT7CT1orW3PvM16v+knigJ -+NuryIdr/+hFgRh7Sr4U/6H6fDuZGJXgZeUSNM7S1UI0iFTZyKAP/3EQrhRWlIROs -+ql9rFnH0LWaPcmk66Yyhq6Va0dmLBSjGPWAHLguKyHkMvhJfMd/1miGfz9Rz9LbE -+6CqOK8sf6EQNWTgmpTdXgjmR9dvMk3ki2aar0u+IamzfIx3y0OPSlpaO8mFMucZe -+3iaeG/l84sUR93NgVI2dCdtEinCQviudV+QwRdLzZQQNM5zLzwIDAQABo2MwYTAP -+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBTWntry -+JpCwZqZJ1H3nznJYhbfoKjAdBgNVHQ4EFgQU1p7a8iaQsGamSdR9585yWIW36Cow -+DQYJKoZIhvcNAQELBQADggIBAHw/+RKyDRBsvWksX4ji95W+4uo292psPzpeusjw -+Ztl25D4jssgjNbEiNwyYgV9e9BCse9hOkFwAE6ogwBAel7POX8XjizaJcwSs/GaC -+2ORQ/KYRpMsypENZ4HZQbc8j9ROqTDD45B+9/5nAA6le/7wd0yS8hbum/b78vAN1 -+98Ja8wfQIq1PE0heFELSRR2hAmcGkxIo1tBqP/CnAzOpBZ2ovPJ75oeGlnpk/ATY -+rUJPTY0UPGp9ZSq/l+t88onDewjCblMMMBeJSEklP2CFlE1jGx3QJtnThiJ9WQKG -+TMakgaAERrcvXs6Lx2GFuk7kCOJsWJtvT6CJVCUtEBHNGY3GqyQwNrE3OEVQ6CFb -+CNNarrRKlArBISExLY3xn0CcuWr8GewsIvwzmJBX+u2Xs0s7RYFc4S4s/SbRLqi4 -+Gn77E7dG711nKHXwDxYOuDAL0MNBT4aDephQK+rF7GlHMLTUPWKYCwmjh2vzmRWJ -+fZkojybvX6g8+7fKmwTUn5LXlcnmjv9KYN8LgfHSQhvRaGtGph0X8mVH3eCs4MGG -+OUU6p9wdPS3MDBHRzn0MXEhq25r+xT829lcHdoxkMVorVDTDD7hOm3HBsF/LOh6c -+/sA1LZnnfy+YZt+Gn5cE5CSK3ddmzRbrB+ZHWbJahegJktWq2rrnw9ipfRln7DJF -+fsHf - -----END CERTIFICATE----- -diff --git a/rust/pv/tests/assets/cert/root_ca.key b/rust/pv/tests/assets/cert/root_ca.key -new file mode 100644 -index 0000000..abbe186 ---- /dev/null -+++ b/rust/pv/tests/assets/cert/root_ca.key -@@ -0,0 +1,52 @@ -+-----BEGIN PRIVATE KEY----- -+MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDMw/UE5XNiZpju -+CRtyq4A9PDBuJJgyUl14xvN8lcsLHGK9b/WkQdDxUXsNQbvS2HVkg2X6AGHwixk/ -+3U0mzgzImZhmh3i0IbFn7L2eqpuvgP82BJpveq58SjskOHYuG2JqSv63reKtK8pL -+XtKZnSV9vFUAwxWSuqA68FLwDgjGMK/Iy3Hh+cwnpffnIMqJ70Nlxc7z4KKO4nrO -+/+rIQSqe0UpjEEaK4Eb3YXWWontaah3/sw+uAbVncJMV+AacHQzi130sJJeLe4p+ -+8DD9EYX112DEj6WFrHpyEqhsGXvLO7uJRzMqJ1WVz47xj8wK+GPm66KCeOM+wm65 -+u49vNHDotlQzoclIzLDiegQo/3ob5YRTFNDJzbNN6AAEzF8K7f8t2TNYeWgPOmXL -+EVmFalOSNTZ5sLrJipsYs7uT7CT1orW3PvM16v+knigJNuryIdr/+hFgRh7Sr4U/ -+6H6fDuZGJXgZeUSNM7S1UI0iFTZyKAP/3EQrhRWlIROsql9rFnH0LWaPcmk66Yyh -+q6Va0dmLBSjGPWAHLguKyHkMvhJfMd/1miGfz9Rz9LbE6CqOK8sf6EQNWTgmpTdX -+gjmR9dvMk3ki2aar0u+IamzfIx3y0OPSlpaO8mFMucZe3iaeG/l84sUR93NgVI2d -+CdtEinCQviudV+QwRdLzZQQNM5zLzwIDAQABAoICAADiwh8UzQR6dCPndy5uTn41 -+UfJQBzaEg7H/jlMWJMw2AblXECV3QWuh0hnzFFhrpkpahSjpMoNDXscXcnt9/bEq -+dO3QnTWORcGw1PsoOscuFCyMJYzg53tTKOFVuzEk3i6eh26M+oOMQnJEBT4z31Ml -+auq3bVL2qrXGj11JekE9Oa1xL9tt5LOxNJrT2fxxxAVfLy9/48Qhd62Ijt/x5DjO -+p/c4vU1hff0Y91TA/C6eceXGxQUYLcw2QPSh34lyWLlsfDaiK+OnE6jL6jJWDpHL -+Ljh7dJhY8CipKwBYtd/hsMR9wdtnUyf0P2aNHDFZ9LitgUT2N+lwFuGHza2J+QkO -+sJndo0bNZCRbcfyaATpqQK50fLHpSOkIaA0uNf8IPsgRHnC9oHimIM70/VB4xN5K -+8UJb02rVvAWsV2ZJEzhBXt3ivMwH6d+HKVerkjdhuKxUSO0x4ZfArZI0md6M2Etf -+b2UbMOyyTknWiJcYD65151rThGeyKRSW1GalvrwWwcAAWyabf61a681brpY0t/u4 -+aZXBdg41DiZ8bnheK706+l7hpCt/Zg3d+HPVtG8xWreFZzWMPNAmuVcdqtEOqdhe -+aW6xDR/7ekdVjtzFboTVuq12JvMgucfXoYjqNA1TxvXIdl1eSWZzEnfXhDxDezpI -+ZUXfR1jdmyEucFXAlkjVAoIBAQD094bU92/iTkV3ySqDqd1a9gdfMw9ewi18TzWM -+fCtE+dWgvXE1RmzRmHaeKjElJaeAAc/ggXen1ecJmCRsD1wjItyP1/j/IzY/7yzK -+KtLV3w3oArENFwZKa3HeaHsapwladJBVC0CpaRnfO52VKDa/1gvDQsojOkDhngCH -+MKfRd5x4c4+WOuvTpfyQASLXJcurlIhabCa/MGIvhMRXcspGkkttZqxhLGoioAmS -+VcKhje8UuDLnBxCFrmIOVl5Hf8wT4TfOYWq6J5F/v/32dq8Rdf/W8KRIq0N5JDnC -+xFKGkm8QEFxNuSzzfykMcEhKGYXAYFKGIBRhM+nNGuFyqMB7AoIBAQDV/Oguqp74 -+4inMMQTP8i3aYlASfU/l2OkNQE7JbxIClf52TzOCPy8v86FjIyx2iYPv2osI3RGi -+yVdDIYPXOBEQZQV7eHS3kuO5/lFw6psQVjMf0qdE9hGuZx3MnXQgol7mUEDS7Rjc -+e1YSkYPpdSd2y1Gsb84iZcbBgBPZsdjy81ZBrfP8bo3BVBtqQIngWCXvM3gWmBH/ -+duNxtpvNBFruX2ZwaKcRJjfZ7V6iy2fzEYH84FPyiwj/COCtSfDgkFjCmEa2jepQ -++5r1VXFjeWwonfYeiwZ/WlIGDKhHNZG4XCni05CA2ZejDLmI3IKaMjukWNI49hhS -+ypLJj5n9a8O9AoIBAHrPfbFXO6hhRUKAf+fySR9JKPj5SENhZhxkOZxsw+SYvBkR -+4Kes3vFVVmKFFI5jCk6KmgUb4zkpa+LLe7cRHEghOiKDTDndFV2IMxRH09uVMAuZ -+DdpaKLU2mKfACbea9n164sFAGvLT1jysTpxwxMH4NX13BpASrGRwKiOcqQXCyZc5 -+mErgz0oUdUJcz3K9aBZlKNbsUaCYSSE5lpWg3vfycA4w40r0UWF6ilHq3ODRn4Lr -+tdlNGWNrwY0ej7WYxF9TEf9Np4wcOj2pq1Dcv3gpiFHh4vrrobAiETMr6ZO95iBP -+k3cD1x1cKuApipRbp0qC/9xuSMlSlWxWhaBOKfECggEAb0NdBka4+fe+e+lQ8z6d -+ENvlfnehv4UVSEqTrLEP0EBlWua7hZGM24X1+DIlwEyoSWJ8wFMSBG5j7QfUIWeJ -+l9ivDRAIwBqkReUyO2AA2HG5i2Zgir7XWrNLD0UfSIikh2RbEFEviBSpIGaBDDZa -+Gq6E/P/1UnVQ7vPFXn/WqhxUUTo9jpd9JXSx/IEqL3gl4UYFvtm7IfWTNWEZiXQm -+Q6NfBDumAoi4qZt+hW710bDcwbtyar5YIyNejzvO/zSOsj+zJOCNYSYx4DZZCrvr -+vQLFIgRvkHBKDdMu/DeiWRWywbn3fMemzKSlI8BkOAC+eimkxPFQnFuwDxWXn+kU -+kQKCAQEA0EkmMxpI8eC47YeHXKBiACMsRiyiItXal7TAwy/ZQMg/nxrDB85f5L8B -+Zucw2UsA40/5EN/L6n10iaWBQzz7q3TcI15MmIzoBA7Bh9IJI9Oz1+oeOn01b9u0 -+7cb+5Shh9Tmu2G4Z/9LE7wB8q9sU/BtJns6owFbGHL9q6ndtl7YVQ7uVzUQjNQph -+qp2DvA8qZYBkNRNZwYAMOJaAO8uhDBSR2wm9XiZ3Q9kar0Tx6sR9WK2pjZTE3okJ -+RUqzQS0EXQ2XLepP3rhYtjAjtlnx9K3jniIHciyFV2rW0SaGH4VvE/nRvCM12q/8 -+p+5MT0G3vVdRQM0Poco3B8DElAoMTQ== -+-----END PRIVATE KEY----- --- -2.25.1 - diff -Nru s390-tools-2.31.0/debian/patches/rust_paths.patch s390-tools-2.33.1/debian/patches/rust_paths.patch --- s390-tools-2.31.0/debian/patches/rust_paths.patch 2024-02-06 11:33:18.000000000 +0100 +++ s390-tools-2.33.1/debian/patches/rust_paths.patch 2024-05-28 10:32:08.000000000 +0200 @@ -7,7 +7,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ --- a/rust/Makefile +++ b/rust/Makefile -@@ -71,9 +71,9 @@ +@@ -73,9 +73,9 @@ install-rust-tools: $(BUILD_TARGETS) $(INSTALL) -d -m 755 $(DESTDIR)$(USRBINDIR) $(foreach target,$(CARGO_TARGETS),\ @@ -18,4 +18,4 @@ + $(INSTALL) target/$(DEB_HOST_RUST_TYPE)/release/$(target) $(DESTDIR)$(USRBINDIR);) install-man: - $(foreach target,$(CARGO_TARGETS),\ + $(INSTALL) -d -m 755 $(DESTDIR)$(MANDIR)/man1 diff -Nru s390-tools-2.31.0/debian/patches/series s390-tools-2.33.1/debian/patches/series --- s390-tools-2.31.0/debian/patches/series 2024-04-02 12:45:30.000000000 +0200 +++ s390-tools-2.33.1/debian/patches/series 2024-05-28 10:32:08.000000000 +0200 @@ -3,9 +3,3 @@ rust_paths.patch vendor-remove-unused-deps.patch gzip-files-without-timestamps-or-names.patch -lp-2058944-dbginfo.sh-dash-compatible-copy-sequence.patch -lp-2059303-rust-pv-test-Code-Certificate-refactoring.patch -lp-2059303-rust-pv-Support-Armonk-in-IBM-signing-key-subject.patch -lp-2059303-genprotimg-support-Armonk-in-IBM-signing-key-subject.patch -lp-2059303-libpv-Support-Armonk-in-IBM-signing-key-subject.patch -lp-2059303-pvattest-Fix-root-ca-parsing.patch diff -Nru s390-tools-2.31.0/debian/patches/vendor-remove-unused-deps.patch s390-tools-2.33.1/debian/patches/vendor-remove-unused-deps.patch --- s390-tools-2.31.0/debian/patches/vendor-remove-unused-deps.patch 2024-02-06 11:33:18.000000000 +0100 +++ s390-tools-2.33.1/debian/patches/vendor-remove-unused-deps.patch 2024-05-28 10:32:08.000000000 +0200 @@ -2,7 +2,7 @@ For some reason cargo insists on having them available at build time Author: Simon Chopin Forwarded: not-needed -Last-Update: 2023-08-16 +Last-Update: 2024-05-28 --- This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ --- a/rust-vendor/curl/Cargo.toml @@ -26,19 +26,17 @@ --- a/rust-vendor/socket2/Cargo.toml +++ b/rust-vendor/socket2/Cargo.toml -@@ -75,13 +75,3 @@ +@@ -58,11 +58,3 @@ [target."cfg(unix)".dependencies.libc] - version = "0.2.141" + version = "0.2.139" - --[target."cfg(windows)".dependencies.windows-sys] --version = "0.48" +-[target."cfg(windows)".dependencies.winapi] +-version = "0.3.9" -features = [ -- "Win32_Foundation", -- "Win32_Networking_WinSock", -- "Win32_System_IO", -- "Win32_System_Threading", -- "Win32_System_WindowsProgramming", +- "handleapi", +- "ws2ipdef", +- "ws2tcpip", -] --- a/rust-vendor/anstyle-query/Cargo.toml +++ b/rust-vendor/anstyle-query/Cargo.toml @@ -178,7 +176,7 @@ -version = "0.2" - -[target."cfg(windows)".dependencies.windows-sys] --version = "0.48" +-version = "0.52" -features = ["Win32_Networking_WinSock"] - [badges.appveyor] @@ -206,55 +204,3 @@ - "Win32_Foundation", - "Win32_System_Diagnostics_Debug", -] ---- a/rust-vendor/socket2-0.4.9/Cargo.toml -+++ b/rust-vendor/socket2-0.4.9/Cargo.toml -@@ -58,11 +58,3 @@ - - [target."cfg(unix)".dependencies.libc] - version = "0.2.139" -- --[target."cfg(windows)".dependencies.winapi] --version = "0.3.9" --features = [ -- "handleapi", -- "ws2ipdef", -- "ws2tcpip", --] ---- a/rust-vendor/backtrace/Cargo.toml -+++ b/rust-vendor/backtrace/Cargo.toml -@@ -103,18 +103,6 @@ - serialize-serde = ["serde"] - std = [] - unix-backtrace = [] --verify-winapi = [ -- "winapi/dbghelp", -- "winapi/handleapi", -- "winapi/libloaderapi", -- "winapi/memoryapi", -- "winapi/minwindef", -- "winapi/processthreadsapi", -- "winapi/synchapi", -- "winapi/tlhelp32", -- "winapi/winbase", -- "winapi/winnt", --] - - [target."cfg(not(all(windows, target_env = \"msvc\", not(target_vendor = \"uwp\"))))".dependencies.addr2line] - version = "0.21.0" -@@ -139,7 +127,3 @@ - "archive", - ] - default-features = false -- --[target."cfg(windows)".dependencies.winapi] --version = "0.3.9" --optional = true ---- a/rust-vendor/parking_lot_core/Cargo.toml -+++ b/rust-vendor/parking_lot_core/Cargo.toml -@@ -61,6 +61,3 @@ - - [target."cfg(unix)".dependencies.libc] - version = "0.2.95" -- --[target."cfg(windows)".dependencies.windows-targets] --version = "0.48.0" diff -Nru s390-tools-2.31.0/debian/s390-tools.install s390-tools-2.33.1/debian/s390-tools.install --- s390-tools-2.31.0/debian/s390-tools.install 2024-02-06 11:33:18.000000000 +0100 +++ s390-tools-2.33.1/debian/s390-tools.install 2024-05-28 10:32:08.000000000 +0200 @@ -1,17 +1,17 @@ # pick selected tools and files for non-s390x platforms # pvattest -usr/bin/pvattest +usr/bin/pvattest* usr/bin/pvextract-hdr -pvattest/tools/pvattest-info usr/share/s390-tools/pvattest/ +rust/pvattest/tools/pvattest-* usr/share/s390-tools/pvattest/ usr/share/man/man1/pvattest*.1 -pvattest/README.md usr/share/s390-tools/pvattest/ +rust/pvattest/README.md usr/share/doc/s390-tools/pvattest/ # genprotimg usr/bin/genprotimg usr/share/s390-tools/genprotimg/check_hostkeydoc usr/share/man/man1/genprotimg.1 -genprotimg/README.md usr/share/s390-tools/genprotimg/ +genprotimg/README.md usr/share/doc/s390-tools/genprotimg/ # pvsecret usr/bin/pvsecret diff -Nru s390-tools-2.31.0/debian/s390-tools.install.s390x s390-tools-2.33.1/debian/s390-tools.install.s390x --- s390-tools-2.31.0/debian/s390-tools.install.s390x 2024-02-06 11:33:18.000000000 +0100 +++ s390-tools-2.33.1/debian/s390-tools.install.s390x 2024-05-28 10:32:08.000000000 +0200 @@ -5,6 +5,7 @@ usr/share/man/ lib/ usr/share/s390-tools +rust/pvattest/tools/pvattest-* usr/share/s390-tools/pvattest/ # kernel stuff debian/kernel/zz-zipl /etc/initramfs/post-update.d @@ -46,16 +47,14 @@ /sbin/lsstp /usr/share/man/man8/lsstp.8 -# pvattest -pvattest/tools/pvattest-info /usr/share/s390-tools/pvattest/ - # ap_tools debian/tmp/usr/lib/mdevctl/scripts.d/callouts/ap-check /usr/lib/mdevctl/scripts.d/callouts/ etc/mdevctl.d/scripts.d/callouts/ap-check.sh # readmes -genprotimg/README.md /usr/share/s390-tools/genprotimg/ -pvattest/README.md /usr/share/s390-tools/pvattest/ -zfcpdump/README.part /usr/share/s390-tools/zfcpdump/ -hsavmcore/initramfs/ubuntu/README.md /usr/share/s390-tools/hsavmcore/initramfs/ -iucvterm/doc/ts-shell/README.ts-shell /usr/share/s390-tools/iucvterm/ts-shell/ +genprotimg/README.md /usr/share/doc/s390-tools/genprotimg/ +rust/pvattest/README.md /usr/share/doc/s390-tools/pvattest/ +zfcpdump/README.part /usr/share/doc/s390-tools/zfcpdump/ +hsavmcore/initramfs/ubuntu/README.md /usr/share/doc/s390-tools/hsavmcore/initramfs/ +iucvterm/doc/ts-shell/README.ts-shell /usr/share/doc/s390-tools/iucvterm/ts-shell/ +netboot/README.md /usr/share/doc/s390-tools/netboot/ diff -Nru s390-tools-2.31.0/etc/sysconfig/dumpconf s390-tools-2.33.1/etc/sysconfig/dumpconf --- s390-tools-2.31.0/etc/sysconfig/dumpconf 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/etc/sysconfig/dumpconf 2024-05-28 08:26:36.000000000 +0200 @@ -29,6 +29,15 @@ # DELAY_MINUTES=5 # +# Dump on ECKD device (DASD) +# +#ON_PANIC=dump +#DUMP_TYPE=eckd +#DEVICE=0.0.1004 +#BOOTPROG=0 +#BR_CHR=auto + +# # Dump on fcp device (SCSI Disk) # # ON_PANIC=dump diff -Nru s390-tools-2.31.0/fdasd/fdasd.8 s390-tools-2.33.1/fdasd/fdasd.8 --- s390-tools-2.31.0/fdasd/fdasd.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/fdasd/fdasd.8 2024-05-28 08:26:36.000000000 +0200 @@ -8,16 +8,16 @@ .SH SYNOPSIS interactive mode: .br - \fBfdasd\fR [-s] [-r] [-C] \fIdevice\fR + \fBfdasd\fR [\-s] [\-r] [\-C] \fIdevice\fR .br command line mode: .br - \fBfdasd\fR [-s] [-r] [-C] {-a[-k|-l \fIvolser\fR]|-i|-p|-c \fIconf_file\fR} + \fBfdasd\fR [\-s] [\-r] [\-C] {\-a[\-k|\-l \fIvolser\fR]|\-i|\-p|\-c \fIconf_file\fR} [-f \fI[type,blocksize]\fR] \fIdevice\fR .br help: .br - \fBfdasd\fR {-h|-v} + \fBfdasd\fR {\-h|\-v} .SH DESCRIPTION \fBfdasd\fR writes a partition table to a cdl (compatible disk layout) formatted DASD, in the form of @@ -30,35 +30,35 @@ \fBfdasd\fR can result in loss of data. .SH OPTIONS .TP -\fB-h\fR or \fB--help\fR +\fB\-h\fR or \fB\-\-help\fR Print usage information, then exit. .TP -\fB-v\fR or \fB--version\fR +\fB\-v\fR or \fB\-\-version\fR Print version information, then exit. .TP -\fB-s\fR or \fB--silent\fR +\fB\-s\fR or \fB\-\-silent\fR Suppress messages in non-interactive mode. .TP -\fB-r\fR or \fB--verbose\fR +\fB\-r\fR or \fB\-\-verbose\fR Provide more verbose output. .TP -\fB-a\fR or \fB--auto\fR +\fB\-a\fR or \fB\-\-auto\fR Automatically create a partition using the entire disk in non-interactive mode. .TP -\fB-k\fR or \fB--keep_volser\fR +\fB\-k\fR or \fB\-\-keep_volser\fR Keeps the Volume Serial Number when writing the Volume Label. .br This is useful if the volume already has a Serial Number that should not be overwritten. This option is only applicable in non-interactive mode. .TP -\fB-l\fR \fIvolser\fR or \fB--label\fR \fIvolser\fR +\fB\-l\fR \fIvolser\fR or \fB\-\-label\fR \fIvolser\fR Specify the volume serial. .br \fIvolser\fR is interpreted as ASCII string and is automatically converted to @@ -82,14 +82,14 @@ Please specify it using '\\$' if necessary. .br -e.g. -l 'a@b\\$c#' to get A@B$C# +e.g. \-l 'a@b\\$c#' to get A@B$C# .br Omitting this parameter causes fdasd to ask for it in case it is needed. .br .TP -\fB-c\fR \fIconf_file\fR or \fB--config\fR \fIconf_file\fR +\fB\-c\fR \fIconf_file\fR or \fB\-\-config\fR \fIconf_file\fR Use this option to create multiple partitions according to specifications in a configuration file, \fIconf_file\fR. .br @@ -124,37 +124,37 @@ .br .TP -\fB-i\fR or \fB--volser\fR +\fB\-i\fR or \fB\-\-volser\fR Print the volume serial, then exit. .TP -\fB-p\fR or \fB--table\fR +\fB\-p\fR or \fB\-\-table\fR Print partition table, then exit. .br -In combination with the -s option fdasd will display a short version of the +In combination with the \-s option fdasd will display a short version of the partition table. .TP -\fB-C\fR or \fB--check_host_count\fR +\fB\-C\fR or \fB\-\-check_host_count\fR Force fdasd to check the host access open count to ensure the device is not online on another operating system instance .TP -\fB-f\fR \fI[type,blocksize]\fR or \fB--force\fR \fI[type,blocksize]\fR +\fB\-f\fR \fI[type,blocksize]\fR or \fB\-\-force\fR \fI[type,blocksize]\fR Force fdasd to work on non DASD devices. .br If fdasd is to be used on a block device that is neither a native DASD nor exposes the proper disk geometry of a DASD of type 3390, -then the --force option can be used to assume the geometry of a +then the \-\-force option can be used to assume the geometry of a given device type. The default device type is 3390 and the default block size is 4096. An optional argument of , can be used to specify type and blocksize explicitly. For example: --f +\-f has the same effect as --f3390,4096 or --force=3390,4096 +\-f3390,4096 or \-\-force=3390,4096 Valid device types are: 3390, 3380, 9345 .br diff -Nru s390-tools-2.31.0/genprotimg/boot/Makefile s390-tools-2.33.1/genprotimg/boot/Makefile --- s390-tools-2.31.0/genprotimg/boot/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/genprotimg/boot/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -7,7 +7,7 @@ ifeq ($(HOST_ARCH),s390x) ZIPL_DIR := $(rootdir)/zipl ZIPL_BOOT_DIR := $(ZIPL_DIR)/boot -PKGDATADIR := $(DESTDIR)$(TOOLS_DATADIR)/genprotimg +PKGDATADIR := $(TOOLS_DATADIR)/genprotimg INCLUDE_PATHS := $(ZIPL_BOOT_DIR) $(ZIPL_DIR)/include $(rootdir)/include INCLUDE_PARMS := $(addprefix -I,$(INCLUDE_PATHS)) @@ -86,9 +86,9 @@ @chmod a-x $@ install: stage3a.bin stage3b_reloc.bin - $(INSTALL) -d -m 755 "$(PKGDATADIR)" - $(INSTALL) -g $(GROUP) -o $(OWNER) -m 644 stage3a.bin "$(PKGDATADIR)" - $(INSTALL) -g $(GROUP) -o $(OWNER) -m 644 stage3b_reloc.bin "$(PKGDATADIR)" + $(INSTALL) -d -m 755 "$(DESTDIR)$(PKGDATADIR)" + $(INSTALL) -g $(GROUP) -o $(OWNER) -m 644 stage3a.bin "$(DESTDIR)$(PKGDATADIR)" + $(INSTALL) -g $(GROUP) -o $(OWNER) -m 644 stage3b_reloc.bin "$(DESTDIR)$(PKGDATADIR)" else # Don't generate the dependency files (see `common.mak` for the diff -Nru s390-tools-2.31.0/genprotimg/Makefile s390-tools-2.33.1/genprotimg/Makefile --- s390-tools-2.31.0/genprotimg/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/genprotimg/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -3,7 +3,7 @@ .DEFAULT_GOAL := all -PKGDATADIR := "$(DESTDIR)$(TOOLS_DATADIR)/genprotimg" +PKGDATADIR := "$(TOOLS_DATADIR)/genprotimg" TESTS := SUBDIRS := boot src man RECURSIVE_TARGETS := all-recursive install-recursive clean-recursive @@ -11,8 +11,8 @@ all: all-recursive install: install-recursive - $(INSTALL) -d -m 755 "$(PKGDATADIR)" - $(INSTALL) -g $(GROUP) -o $(OWNER) -m 755 samples/check_hostkeydoc "$(PKGDATADIR)" + $(INSTALL) -d -m 755 "$(DESTDIR)$(PKGDATADIR)" + $(INSTALL) -g $(GROUP) -o $(OWNER) -m 755 samples/check_hostkeydoc "$(DESTDIR)$(PKGDATADIR)" clean: clean-recursive diff -Nru s390-tools-2.31.0/genprotimg/samples/check_hostkeydoc s390-tools-2.33.1/genprotimg/samples/check_hostkeydoc --- s390-tools-2.31.0/genprotimg/samples/check_hostkeydoc 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/genprotimg/samples/check_hostkeydoc 2024-05-28 08:26:36.000000000 +0200 @@ -4,7 +4,7 @@ # # Sample script to verify that a host key document is genuine by # verifying the issuer, the validity date and the signature. -# Optionally verify the full trust chain using a CA certficate. +# Optionally verify the full trust chain using a CA certificate. # # Sample invocation: # @@ -15,31 +15,33 @@ # s390-tools is free software; you can redistribute it and/or modify # it under the terms of the MIT license. See LICENSE for details. - # Allocate temporary files ISSUER_PUBKEY_FILE=$(mktemp) SIGNATURE_FILE=$(mktemp) BODY_FILE=$(mktemp) ISSUER_DN_FILE=$(mktemp) SUBJECT_DN_FILE=$(mktemp) -DEF_ISSUER_DN_FILE=$(mktemp) +DEF_ISSUER_ARMONK_DN_FILE=$(mktemp) +DEF_ISSUER_POUGHKEEPSIE_DN_FILE=$(mktemp) CANONICAL_ISSUER_DN_FILE=$(mktemp) CRL_SERIAL_FILE=$(mktemp) # Cleanup on exit cleanup() { - rm -f $ISSUER_PUBKEY_FILE $SIGNATURE_FILE $BODY_FILE \ - $ISSUER_DN_FILE $SUBJECT_DN_FILE $DEF_ISSUER_DN_FILE \ - $CANONICAL_ISSUER_DN_FILE $CRL_SERIAL_FILE + rm -f "$ISSUER_PUBKEY_FILE" "$SIGNATURE_FILE" "$BODY_FILE" \ + "$ISSUER_DN_FILE" "$SUBJECT_DN_FILE" "$DEF_ISSUER_ARMONK_DN_FILE" "$DEF_ISSUER_POUGHKEEPSIE_DN_FILE" \ + "$CANONICAL_ISSUER_DN_FILE" "$CRL_SERIAL_FILE" } trap cleanup EXIT # Enhanced error checking for bash -if [ -n "${BASH}" ] -then +if [ -n "${BASH}" ]; then + # shellcheck disable=SC3040 set -o posix + # shellcheck disable=SC3040 set -o pipefail + # shellcheck disable=SC3040 set -o nounset fi set -e @@ -47,8 +49,8 @@ # Usage usage() { -cat <<-EOF -Usage: `basename $1` [-d] [-c CA-cert] [-r CRL] host-key-doc signing-key-cert + cat <<-EOF +Usage: $(basename "$1") [-d] [-c CA-cert] [-r CRL] host-key-doc signing-key-cert Verify an IBM Secure Execution host key document against a signing key. @@ -71,8 +73,7 @@ { # Verify certificate chain in case a CA certificate file/bundle # was specified on the command line. - if [ $# = 1 ] - then + if [ -z "$2" ]; then cat >&2 <<-EOF !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! No CA certificate specified! Skipping trust chain verification. @@ -80,37 +81,37 @@ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! EOF else - openssl verify -crl_download -crl_check $2 && - openssl verify -crl_download -crl_check -untrusted $2 $1 || - exit 1 + openssl verify -crl_download -crl_check "$2" && + openssl verify -crl_download -crl_check -untrusted "$2" "$1" || + exit 1 fi } extract_pubkey() { - openssl x509 -in $1 -pubkey -noout > $2 + openssl x509 -in "$1" -pubkey -noout >"$2" } extract_signature() { # Assuming that the last field is the signature - SIGOFFSET=$(openssl asn1parse -in $1 | tail -1 | cut -d : -f 1) + SIGOFFSET=$(openssl asn1parse -in "$1" | tail -1 | cut -d : -f 1) - openssl asn1parse -in $1 -out $2 -strparse $SIGOFFSET -noout + openssl asn1parse -in "$1" -out "$2" -strparse "$SIGOFFSET" -noout } extract_body() { # Assuming that the first field is the full cert body - SIGOFFSET=$(openssl asn1parse -in $1 | head -2 | tail -1 | cut -d : -f 1) + SIGOFFSET=$(openssl asn1parse -in "$1" | head -2 | tail -1 | cut -d : -f 1) - openssl asn1parse -in $1 -out $2 -strparse $SIGOFFSET -noout + openssl asn1parse -in "$1" -out "$2" -strparse "$SIGOFFSET" -noout } verify_signature() { # Assuming that the signature algorithm is SHA512 with RSA - openssl sha512 -verify $1 -signature $2 $3 + openssl sha512 -verify "$1" -signature "$2" "$3" } canonical_dn() @@ -120,18 +121,30 @@ DNTYPE=$3 OUTPUT=$4 - openssl $OBJTYPE -in $OBJ -$DNTYPE -noout -nameopt multiline \ - | sort | grep -v $DNTYPE= > $OUTPUT + openssl "$OBJTYPE" -in "$OBJ" -"$DNTYPE" -noout -nameopt multiline | + LC_ALL=C sort | grep -v "$DNTYPE"= >"$OUTPUT" } -default_issuer() +default_issuer_armonk() { cat <<-EOF commonName = International Business Machines Corporation countryName = US - localityName = Poughkeepsie + localityName = Armonk + organizationName = International Business Machines Corporation organizationalUnitName = Key Signing Service + stateOrProvinceName = New York +EOF +} + +default_issuer_pougkeepsie() +{ + cat <<-EOF + commonName = International Business Machines Corporation + countryName = US + localityName = Poughkeepsie organizationName = International Business Machines Corporation + organizationalUnitName = Key Signing Service stateOrProvinceName = New York EOF } @@ -141,42 +154,37 @@ # stripping off the prefix verify_default_issuer() { - default_issuer > $DEF_ISSUER_DN_FILE + default_issuer_pougkeepsie >"$DEF_ISSUER_POUGHKEEPSIE_DN_FILE" + default_issuer_armonk >"$DEF_ISSUER_ARMONK_DN_FILE" sed "s/\(^[ ]*organizationalUnitName[ ]*=[ ]*\).*\(Key Signing Service$\)/\1\2/" \ - $ISSUER_DN_FILE > $CANONICAL_ISSUER_DN_FILE + "$ISSUER_DN_FILE" >"$CANONICAL_ISSUER_DN_FILE" - if ! diff $CANONICAL_ISSUER_DN_FILE $DEF_ISSUER_DN_FILE - then + if ! { + diff "$CANONICAL_ISSUER_DN_FILE" "$DEF_ISSUER_POUGHKEEPSIE_DN_FILE" || + diff "$CANONICAL_ISSUER_DN_FILE" "$DEF_ISSUER_ARMONK_DN_FILE" + } >/dev/null 2>&1; then echo Incorrect default issuer >&2 && exit 1 fi } verify_issuer_files() { - if [ $1 -eq 1 ] - then - verify_default_issuer - fi - - if diff $ISSUER_DN_FILE $SUBJECT_DN_FILE - then - echo Issuer verification OK - else - echo Issuer verification failed >&2 && exit 1 + if [ "$1" -eq 1 ]; then + verify_default_issuer fi } cert_time() { - DATE=$(openssl x509 -in $1 -$2 -noout | sed "s/^.*=//") + DATE=$(openssl x509 -in "$1" -"$2" -noout | sed "s/^.*=//") date -d "$DATE" +%s } crl_time() { - DATE=$(openssl crl -in $1 -$2 -noout | sed "s/^.*=//") + DATE=$(openssl crl -in "$1" -"$2" -noout | sed "s/^.*=//") date -d "$DATE" +%s } @@ -188,8 +196,7 @@ MSG="${3:-Certificate}" NOW=$(date +%s) - if [ $START -le $NOW -a $NOW -le $END ] - then + if [ "$START" -le "$NOW" ] && [ "$NOW" -le "$END" ]; then echo "${MSG} dates are OK" else echo "${MSG} date verification failed" >&2 && exit 1 @@ -198,22 +205,21 @@ crl_serials() { - openssl crl -in $1 -text -noout | \ - grep "Serial Number" > $CRL_SERIAL_FILE + openssl crl -in "$1" -text -noout | + grep "Serial Number" >"$CRL_SERIAL_FILE" } check_serial() { - CERT_SERIAL=$(openssl x509 -in $1 -noout -serial | cut -d = -f 2) + CERT_SERIAL=$(openssl x509 -in "$1" -noout -serial | cut -d = -f 2) - grep -q $CERT_SERIAL $CRL_SERIAL_FILE + grep -q "$CERT_SERIAL" "$CRL_SERIAL_FILE" } check_file() { - [ $# = 0 ] || [ -e "$1" ] || - (echo "File '$1' not found" >&2 && exit 1) + (echo "File '$1' not found" >&2 && exit 1) } # check args @@ -221,28 +227,25 @@ CA_FILE= CHECK_DEFAULT_ISSUER=1 -args=$(getopt -qu "dr:c:h" $*) -if [ $? = 0 ] -then - set -- $args - while [ $1 != "" ] - do - case $1 in - -d) CHECK_DEFAULT_ISSUER=0; shift;; - -r) CRL_FILE=$2; shift 2;; - -c) CA_FILE=$2; shift 2;; - -h) usage $0; exit 0;; - --) shift; break;; - esac - done -else - usage $0 >&2 - exit 1 -fi +while getopts 'dr:c:h' opt; do + case $opt in + d) CHECK_DEFAULT_ISSUER=0 ;; + r) CRL_FILE=$OPTARG ;; + c) CA_FILE=$OPTARG ;; + h) + usage "$0" + exit 0 + ;; + ?) + usage "$0" + exit 1 + ;; + esac +done +shift "$((OPTIND - 1))" -if [ $# -ne 2 ] -then - usage $0 >&2 +if [ $# -ne 2 ]; then + usage "$0" >&2 exit 1 fi @@ -250,51 +253,51 @@ HKSK_FILE=$2 # Check whether all specified files exist -check_file $HKD_FILE -check_file $HKSK_FILE -check_file $CA_FILE -check_file $CRL_FILE +check_file "$HKD_FILE" +check_file "$HKSK_FILE" +# CA and CRL are optional arguments +[ -n "$CA_FILE" ] && check_file "$CA_FILE" +[ -n "$CRL_FILE" ] && check_file "$CRL_FILE" # Check trust chain -check_verify_chain $HKSK_FILE $CA_FILE +check_verify_chain "$HKSK_FILE" "$CA_FILE" # Verify host key document signature -echo -n "Checking host key document signature: " -extract_pubkey $HKSK_FILE $ISSUER_PUBKEY_FILE && -extract_signature $HKD_FILE $SIGNATURE_FILE && -extract_body $HKD_FILE $BODY_FILE && -verify_signature $ISSUER_PUBKEY_FILE $SIGNATURE_FILE $BODY_FILE || -exit 1 +printf "Checking host key document signature: " +extract_pubkey "$HKSK_FILE" "$ISSUER_PUBKEY_FILE" && + extract_signature "$HKD_FILE" "$SIGNATURE_FILE" && + extract_body "$HKD_FILE" "$BODY_FILE" && + verify_signature "$ISSUER_PUBKEY_FILE" "$SIGNATURE_FILE" "$BODY_FILE" || + exit 1 # Verify the issuer -canonical_dn x509 $HKD_FILE issuer $ISSUER_DN_FILE -canonical_dn x509 $HKSK_FILE subject $SUBJECT_DN_FILE +canonical_dn x509 "$HKD_FILE" issuer "$ISSUER_DN_FILE" +canonical_dn x509 "$HKSK_FILE" subject "$SUBJECT_DN_FILE" verify_issuer_files $CHECK_DEFAULT_ISSUER # Verify dates -verify_dates $(cert_time $HKD_FILE startdate) $(cert_time $HKD_FILE enddate) +verify_dates "$(cert_time "$HKD_FILE" startdate)" "$(cert_time "$HKD_FILE" enddate)" # Check CRL if specified -if [ -n "$CRL_FILE" ] -then - echo -n "Checking CRL signature: " - extract_signature $CRL_FILE $SIGNATURE_FILE && - extract_body $CRL_FILE $BODY_FILE && - verify_signature $ISSUER_PUBKEY_FILE $SIGNATURE_FILE $BODY_FILE || - exit 1 +if [ -n "$CRL_FILE" ]; then + printf "Checking CRL signature: " + extract_signature "$CRL_FILE" "$SIGNATURE_FILE" && + extract_body "$CRL_FILE" "$BODY_FILE" && + verify_signature "$ISSUER_PUBKEY_FILE" "$SIGNATURE_FILE" "$BODY_FILE" || + exit 1 - echo -n "CRL " - canonical_dn crl $CRL_FILE issuer $ISSUER_DN_FILE - canonical_dn x509 $HKSK_FILE subject $SUBJECT_DN_FILE + printf "CRL " + canonical_dn crl "$CRL_FILE" issuer "$ISSUER_DN_FILE" + canonical_dn x509 "$HKSK_FILE" subject "$SUBJECT_DN_FILE" verify_issuer_files $CHECK_DEFAULT_ISSUER - verify_dates $(crl_time $CRL_FILE lastupdate) $(crl_time $CRL_FILE nextupdate) 'CRL' + verify_dates "$(crl_time "$CRL_FILE" lastupdate)" "$(crl_time "$CRL_FILE" nextupdate)" 'CRL' - crl_serials $CRL_FILE - check_serial $HKD_FILE && - echo "Certificate is revoked, do not use it anymore!" >&2 && - exit 1 + crl_serials "$CRL_FILE" + check_serial "$HKD_FILE" && + echo "Certificate is revoked, do not use it anymore!" >&2 && + exit 1 fi # We made it -echo All checks reqested for \'$HKD_FILE\' were successful +echo All checks requested for \'"$HKD_FILE"\' were successful diff -Nru s390-tools-2.31.0/genprotimg/src/include/pv_crypto_def.h s390-tools-2.33.1/genprotimg/src/include/pv_crypto_def.h --- s390-tools-2.31.0/genprotimg/src/include/pv_crypto_def.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/genprotimg/src/include/pv_crypto_def.h 2024-05-28 08:26:36.000000000 +0200 @@ -17,7 +17,8 @@ /* IBM signing key subject */ #define PV_IBM_Z_SUBJECT_COMMON_NAME "International Business Machines Corporation" #define PV_IBM_Z_SUBJECT_COUNTRY_NAME "US" -#define PV_IBM_Z_SUBJECT_LOCALITY_NAME "Poughkeepsie" +#define PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE "Poughkeepsie" +#define PV_IBM_Z_SUBJECT_LOCALITY_NAME_ARMONK "Armonk" #define PV_IBM_Z_SUBJECT_ORGANIZATIONONAL_UNIT_NAME_SUFFIX "Key Signing Service" #define PV_IBM_Z_SUBJECT_ORGANIZATION_NAME "International Business Machines Corporation" #define PV_IBM_Z_SUBJECT_STATE "New York" diff -Nru s390-tools-2.31.0/genprotimg/src/Makefile s390-tools-2.33.1/genprotimg/src/Makefile --- s390-tools-2.31.0/genprotimg/src/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/genprotimg/src/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -3,7 +3,7 @@ bin_PROGRAM = genprotimg -PKGDATADIR ?= "$(DESTDIR)$(TOOLS_DATADIR)/genprotimg" +PKGDATADIR ?= "$(TOOLS_DATADIR)/genprotimg" SRC_DIR := $(dir $(realpath $(firstword $(MAKEFILE_LIST)))) TOP_SRCDIR := $(SRC_DIR)/../ ROOT_DIR = $(TOP_SRC_DIR)/../../ diff -Nru s390-tools-2.31.0/genprotimg/src/utils/crypto.c s390-tools-2.33.1/genprotimg/src/utils/crypto.c --- s390-tools-2.31.0/genprotimg/src/utils/crypto.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/genprotimg/src/utils/crypto.c 2024-05-28 08:26:36.000000000 +0200 @@ -664,62 +664,9 @@ return memcmp(data, y, data_len) == 0; } -static gboolean own_X509_NAME_ENTRY_equal(const X509_NAME_ENTRY *x, - const X509_NAME_ENTRY *y) -{ - const ASN1_OBJECT *x_obj = X509_NAME_ENTRY_get_object(x); - const ASN1_STRING *x_data = X509_NAME_ENTRY_get_data(x); - const ASN1_OBJECT *y_obj = X509_NAME_ENTRY_get_object(y); - const ASN1_STRING *y_data = X509_NAME_ENTRY_get_data(y); - gint x_len = ASN1_STRING_length(x_data); - gint y_len = ASN1_STRING_length(y_data); - - if (x_len < 0 || x_len != y_len) - return FALSE; - - /* ASN1_STRING_cmp(x_data, y_data) == 0 doesn't work because it also - * compares the type, which is sometimes different. - */ - return OBJ_cmp(x_obj, y_obj) == 0 && - memcmp(ASN1_STRING_get0_data(x_data), - ASN1_STRING_get0_data(y_data), - (unsigned long)x_len) == 0; -} - -static gboolean own_X509_NAME_equal(const X509_NAME *x, const X509_NAME *y) -{ - gint x_count = X509_NAME_entry_count(x); - gint y_count = X509_NAME_entry_count(y); - - if (x != y && (!x || !y)) - return FALSE; - - if (x_count != y_count) - return FALSE; - - for (gint i = 0; i < x_count; i++) { - const X509_NAME_ENTRY *entry_i = X509_NAME_get_entry(x, i); - gboolean entry_found = FALSE; - - for (gint j = 0; j < y_count; j++) { - const X509_NAME_ENTRY *entry_j = - X509_NAME_get_entry(y, j); - - if (own_X509_NAME_ENTRY_equal(entry_i, entry_j)) { - entry_found = TRUE; - break; - } - } - - if (!entry_found) - return FALSE; - } - return TRUE; -} - /* Checks whether the subject of @cert is a IBM signing key subject. For this we * must check that the subject is equal to: 'C = US, ST = New York, L = - * Poughkeepsie, O = International Business Machines Corporation, CN = + * Poughkeepsie or Armonk, O = International Business Machines Corporation, CN = * International Business Machines Corporation' and the organization unit (OUT) * must end with the suffix ' Key Signing Service'. */ @@ -743,8 +690,10 @@ PV_IBM_Z_SUBJECT_STATE)) return FALSE; - if (!x509_name_data_by_nid_equal(subject, NID_localityName, - PV_IBM_Z_SUBJECT_LOCALITY_NAME)) + if (!(x509_name_data_by_nid_equal(subject, NID_localityName, + PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE) || + x509_name_data_by_nid_equal(subject, NID_localityName, + PV_IBM_Z_SUBJECT_LOCALITY_NAME_ARMONK))) return FALSE; if (!x509_name_data_by_nid_equal(subject, NID_organizationName, @@ -806,6 +755,39 @@ return g_steal_pointer(&ret); } +/** Replace locality 'Armonk' with 'Pougkeepsie'. If Armonk was not set return + * `NULL`. + */ +static X509_NAME *x509_armonk_locality_fixup(const X509_NAME *name) +{ + g_autoptr(X509_NAME) ret = NULL; + int pos; + + /* Check if ``L=Armonk`` */ + if (!x509_name_data_by_nid_equal((X509_NAME *)name, NID_localityName, + PV_IBM_Z_SUBJECT_LOCALITY_NAME_ARMONK)) + return NULL; + + ret = X509_NAME_dup((X509_NAME *)name); + if (!ret) + g_abort(); + + pos = X509_NAME_get_index_by_NID(ret, NID_localityName, -1); + if (pos == -1) + return NULL; + + X509_NAME_ENTRY_free(X509_NAME_delete_entry(ret, pos)); + + /* Create a new name entry at the same position as before */ + if (X509_NAME_add_entry_by_NID( + ret, NID_localityName, MBSTRING_UTF8, + (const unsigned char *)&PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE, + sizeof(PV_IBM_Z_SUBJECT_LOCALITY_NAME_POUGHKEEPSIE) - 1, pos, 0) != 1) + return NULL; + + return g_steal_pointer(&ret); +} + /* In RFC 5280 the attributes of a (subject/issuer) name is not mandatory * ordered. The problem is that our certificates are not consistent in the order * (see https://tools.ietf.org/html/rfc5280#section-4.1.2.4 for details). @@ -828,24 +810,10 @@ return X509_NAME_dup((X509_NAME *)name); } -/* Verify that: subject(issuer) == issuer(crl) and SKID(issuer) == AKID(crl) */ +/* Verify that SKID(issuer) == AKID(crl) if available */ static gint check_crl_issuer(X509_CRL *crl, X509 *issuer, GError **err) { - const X509_NAME *crl_issuer = X509_CRL_get_issuer(crl); - const X509_NAME *issuer_subject = X509_get_subject_name(issuer); - AUTHORITY_KEYID *akid = NULL; - - if (!own_X509_NAME_equal(issuer_subject, crl_issuer)) { - g_autofree char *issuer_subject_str = X509_NAME_oneline(issuer_subject, - NULL, 0); - g_autofree char *crl_issuer_str = X509_NAME_oneline(crl_issuer, NULL, 0); - - g_set_error(err, PV_CRYPTO_ERROR, - PV_CRYPTO_ERROR_CRL_SUBJECT_ISSUER_MISMATCH, - _("issuer mismatch:\n%s\n%s"), - issuer_subject_str, crl_issuer_str); - return -1; - } + g_autoptr(AUTHORITY_KEYID) akid = NULL; /* If AKID(@crl) is specified it must match with SKID(@issuer) */ akid = X509_CRL_get_ext_d2i(crl, NID_authority_key_identifier, NULL, NULL); @@ -881,7 +849,6 @@ return -1; } - /* check that the @crl issuer matches with the subject name of @cert*/ if (check_crl_issuer(crl, cert, err) < 0) return -1; @@ -910,6 +877,60 @@ return 0; } +/* This function contains work-arounds for some known subject(CRT)<->issuer(CRL) + * issues. + */ +static STACK_OF_X509_CRL *quirk_X509_STORE_ctx_get1_crls(X509_STORE_CTX *ctx, + const X509_NAME *subject, GError **err) +{ + g_autoptr(X509_NAME) fixed_subject = NULL; + g_autoptr(STACK_OF_X509_CRL) ret = NULL; + + ret = Pv_X509_STORE_CTX_get1_crls(ctx, subject); + if (ret && sk_X509_CRL_num(ret) > 0) + return g_steal_pointer(&ret); + + /* Workaround to fix the mismatch between issuer name of the * IBM + * signing CRLs and the IBM signing key subject name. Locality name has + * changed from Poughkeepsie to Armonk. + */ + fixed_subject = x509_armonk_locality_fixup(subject); + /* Was the locality replaced? */ + if (fixed_subject) { + X509_NAME *tmp; + + sk_X509_CRL_free(ret); + ret = Pv_X509_STORE_CTX_get1_crls(ctx, fixed_subject); + if (ret && sk_X509_CRL_num(ret) > 0) + return g_steal_pointer(&ret); + + /* Workaround to fix the ordering mismatch between issuer name + * of the IBM signing CRLs and the IBM signing key subject name. + */ + tmp = fixed_subject; + fixed_subject = c2b_name(fixed_subject); + X509_NAME_free(tmp); + sk_X509_CRL_free(ret); + ret = Pv_X509_STORE_CTX_get1_crls(ctx, fixed_subject); + if (ret && sk_X509_CRL_num(ret) > 0) + return g_steal_pointer(&ret); + X509_NAME_free(fixed_subject); + fixed_subject = NULL; + } + + /* Workaround to fix the ordering mismatch between issuer name of the + * IBM signing CRLs and the IBM signing key subject name. + */ + fixed_subject = c2b_name(subject); + sk_X509_CRL_free(ret); + ret = Pv_X509_STORE_CTX_get1_crls(ctx, fixed_subject); + if (ret && sk_X509_CRL_num(ret) > 0) + return g_steal_pointer(&ret); + + g_set_error(err, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_NO_CRL, _("no CRL found")); + return NULL; +} + /* Given a certificate @cert try to find valid revocation lists in @ctx. If no * valid CRL was found NULL is returned. */ @@ -927,20 +948,9 @@ return NULL; } - ret = X509_STORE_CTX_get1_crls(ctx, subject); - if (!ret) { - /* Workaround to fix the mismatch between issuer name of the - * IBM Z signing CRLs and the IBM Z signing key subject name. - */ - g_autoptr(X509_NAME) broken_subject = c2b_name(subject); - - ret = X509_STORE_CTX_get1_crls(ctx, broken_subject); - if (!ret) { - g_set_error(err, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_NO_CRL, - _("no CRL found")); - return NULL; - } - } + ret = quirk_X509_STORE_ctx_get1_crls(ctx, subject, err); + if (!ret) + return NULL; /* Filter out non-valid CRLs for @cert */ for (gint i = 0; i < sk_X509_CRL_num(ret); i++) { @@ -1328,32 +1338,14 @@ /* It's almost the same as X509_check_issed from OpenSSL does except that we * don't check the key usage of the potential issuer. This means we check: - * 1. issuer_name(cert) == subject_name(issuer) - * 2. Check whether the akid(cert) (if available) matches the issuer skid - * 3. Check that the cert algrithm matches the subject algorithm - * 4. Verify the signature of certificate @cert is using the public key of + * 1. Check whether the akid(cert) (if available) matches the issuer skid + * 2. Check that the cert algrithm matches the subject algorithm + * 3. Verify the signature of certificate @cert is using the public key of * @issuer. */ static gint check_host_key_issued(X509 *cert, X509 *issuer, GError **err) { - const X509_NAME *issuer_subject = X509_get_subject_name(issuer); - const X509_NAME *cert_issuer = X509_get_issuer_name(cert); - AUTHORITY_KEYID *akid = NULL; - - /* We cannot use X509_NAME_cmp() because it considers the order of the - * X509_NAME_Entries. - */ - if (!own_X509_NAME_equal(issuer_subject, cert_issuer)) { - g_autofree char *issuer_subject_str = - X509_NAME_oneline(issuer_subject, NULL, 0); - g_autofree char *cert_issuer_str = - X509_NAME_oneline(cert_issuer, NULL, 0); - g_set_error(err, PV_CRYPTO_ERROR, - PV_CRYPTO_ERROR_CERT_SUBJECT_ISSUER_MISMATCH, - _("Subject issuer mismatch:\n'%s'\n'%s'"), - issuer_subject_str, cert_issuer_str); - return -1; - } + g_autoptr(AUTHORITY_KEYID) akid = NULL; akid = X509_get_ext_d2i(cert, NID_authority_key_identifier, NULL, NULL); if (akid && X509_check_akid(issuer, akid) != X509_V_OK) { @@ -1834,14 +1826,12 @@ g_assert(out_len >= 0); num_bytes_written = BIO_write(b_out, out_buf, out_len); - if (num_bytes_written < 0) { + if (num_bytes_written != out_len) { g_set_error(err, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_INTERNAL, _("Failed to write")); return -1; } - g_assert(num_bytes_written == out_len); - tmp_size_out += (guint)num_bytes_written; /* Set new tweak value. Please keep in mind that the diff -Nru s390-tools-2.31.0/genprotimg/src/utils/crypto.h s390-tools-2.33.1/genprotimg/src/utils/crypto.h --- s390-tools-2.31.0/genprotimg/src/utils/crypto.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/genprotimg/src/utils/crypto.h 2024-05-28 08:26:36.000000000 +0200 @@ -75,6 +75,7 @@ /* Register auto cleanup functions */ WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(ASN1_INTEGER, ASN1_INTEGER_free) WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(ASN1_OCTET_STRING, ASN1_OCTET_STRING_free) +WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(AUTHORITY_KEYID, AUTHORITY_KEYID_free) WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(BIGNUM, BN_free) WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(BIO, BIO_free_all) WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(BN_CTX, BN_CTX_free) diff -Nru s390-tools-2.31.0/.gitignore s390-tools-2.33.1/.gitignore --- s390-tools-2.31.0/.gitignore 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/.gitignore 2024-05-28 08:26:36.000000000 +0200 @@ -92,6 +92,7 @@ vmur/vmur zconf/chp/chchp zconf/chp/lschp +zconf/chp/chpstat/chpstat zconf/css/lscss zconf/qeth/lsqeth zconf/scm/lsscm diff -Nru s390-tools-2.31.0/include/boot/os_info.h s390-tools-2.33.1/include/boot/os_info.h --- s390-tools-2.31.0/include/boot/os_info.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/boot/os_info.h 2024-05-28 08:26:36.000000000 +0200 @@ -24,11 +24,25 @@ #define OS_INFO_VMCOREINFO 0 #define OS_INFO_REIPL_BLOCK 1 #define OS_INFO_FLAGS_ENTRY 2 +#define OS_INFO_RESERVED 3 +#define OS_INFO_IDENTITY_BASE 4 +#define OS_INFO_KASLR_OFFSET 5 +#define OS_INFO_KASLR_OFF_PHYS 6 +#define OS_INFO_VMEMMAP 7 +#define OS_INFO_AMODE31_START 8 +#define OS_INFO_AMODE31_END 9 +#define OS_INFO_IMAGE_START 10 +#define OS_INFO_IMAGE_END 11 +#define OS_INFO_IMAGE_PHYS 12 +#define OS_INFO_MAX 13 #define OS_INFO_FLAG_REIPL_CLEAR (1UL << 0) struct os_info_entry { - uint64_t addr; + union { + uint64_t addr; + uint64_t val; + }; uint64_t size; uint32_t csum; } __packed; @@ -40,8 +54,8 @@ uint16_t version_minor; uint64_t crashkernel_addr; uint64_t crashkernel_size; - struct os_info_entry entry[3]; - uint8_t reserved[4004]; + struct os_info_entry entry[OS_INFO_MAX]; + uint8_t reserved[3804]; } __packed; /* diff -Nru s390-tools-2.31.0/include/dump/s390_dump.h s390-tools-2.33.1/include/dump/s390_dump.h --- s390-tools-2.31.0/include/dump/s390_dump.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/dump/s390_dump.h 2024-05-28 08:26:36.000000000 +0200 @@ -26,6 +26,17 @@ #define DF_S390_EM_STR "DUMP_END" #define DF_S390_CPU_MAX 512 #define DF_S390_MAGIC_BLK_ECKD 3 +#define DF_S390_DUMPER_MAGIC_SIZE 7 +#define DF_S390_DUMPER_MAGIC32 "ZECKD31" +#define DF_S390_DUMPER_MAGIC64 "ZECKD64" +#define DF_S390_DUMPER_MAGIC_EXT "XECKD64" +#define DF_S390_DUMPER_MAGIC32_FBA "ZDFBA31" +#define DF_S390_DUMPER_MAGIC64_FBA "ZDFBA64" +#define DF_S390_DUMPER_MAGIC_FBA_EXT "XDFBA64" +#define DF_S390_DUMPER_MAGIC_MV "ZMULT64" +#define DF_S390_DUMPER_MAGIC_MV_EXT "XMULT64" +#define OLD_DUMPER_HEX_INSTR1 "\x0d\x10\x47\xf0" /* BASR + 1st halfword of BC */ +#define OLD_DUMPER_HEX_INSTR2 "\x0d\xd0" /* BASR 13,0 */ /* * Architecture of dumped system diff -Nru s390-tools-2.31.0/include/lib/ap.h s390-tools-2.33.1/include/lib/ap.h --- s390-tools-2.31.0/include/lib/ap.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/lib/ap.h 2024-05-28 08:26:36.000000000 +0200 @@ -21,7 +21,9 @@ #define AP_UDEV_FILE "/etc/udev/rules.d/41-ap.rules" #define AP_LOCKFILE "/run/lock/s390apconfig.lock" -#define AP_LOCK_RETRIES 15 +#define AP_LOCK_RETRIES 3000 +#define AP_LOCK_DELAY_US 30000 /* wait at least 30ms between lock retries */ +#define AP_LOCK_VARIANCE_US 3000 /* or as much as 33ms */ /* apmask and aqmask are each represented as 67 character strings with: * '0x' leading characters diff -Nru s390-tools-2.31.0/include/lib/util_arch.h s390-tools-2.33.1/include/lib/util_arch.h --- s390-tools-2.31.0/include/lib/util_arch.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/lib/util_arch.h 2024-05-28 08:26:36.000000000 +0200 @@ -27,6 +27,7 @@ UTIL_ARCH_MACHINE_TYPE_Z15 = 8561, UTIL_ARCH_MACHINE_TYPE_Z15_T02 = 8562, UTIL_ARCH_MACHINE_TYPE_Z16 = 3931, + UTIL_ARCH_MACHINE_TYPE_Z16_A02 = 3932, }; int util_arch_machine_type(void); diff -Nru s390-tools-2.31.0/include/lib/util_base.h s390-tools-2.33.1/include/lib/util_base.h --- s390-tools-2.31.0/include/lib/util_base.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/lib/util_base.h 2024-05-28 08:26:36.000000000 +0200 @@ -14,7 +14,10 @@ #include #include +#include + #include "zt_common.h" +#include "lib/util_libc.h" void util_hexdump(FILE *fh, const char *tag, const void *data, int cnt); void util_hexdump_grp(FILE *fh, const char *tag, const void *data, int group, @@ -37,4 +40,30 @@ free(ptr_vec); } +/* + * Expand size of dynamic array (element_t *) by one element + * + * @param[in,out] array Pointer to array (element_t **) + * @param[in,out] num Pointer to integer containing number of elements + */ +#define util_expand_array(array, num) \ + do { \ + unsigned int __size = sizeof(*(*(array))); \ + *(array) = util_realloc(*(array), ++(*(num)) * __size); \ + memset(&((*(array))[*(num) - 1]), 0, __size); \ + } while (0) + +/* + * Append one element to dynamic array (element_t *) + * + * @param[in,out] array Pointer to array (element_t **) + * @param[in,out] num Pointer to integer containing number of elements + * @param[in] element Element to add (element_t) + */ +#define util_add_array(array, num, element) \ + do { \ + util_expand_array(array, num); \ + (*(array))[*(num) - 1] = (element) ; \ + } while (0) + #endif /* LIB_UTIL_BASE_H */ diff -Nru s390-tools-2.31.0/include/lib/util_fmt.h s390-tools-2.33.1/include/lib/util_fmt.h --- s390-tools-2.31.0/include/lib/util_fmt.h 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/include/lib/util_fmt.h 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,235 @@ +/* + * util_fmt - Format structured key-value data as JSON, text pairs, or CSV + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + * + * This module provides helper functions for converting structured key-value + * data into different output formats. + * + * Benefits: + * - Output format can be dynamically configured at run-time + * - Callers do not need to add extra code for each output format + * - Some format-specific requirements such as quoting, indentation, and + * comma-placement are automated + * + * Basic API calling sequence: + * + * util_fmt_init() => Select output format + * util_fmt_obj_start() => Start a new object or list + * util_fmt_pair() => Emit a key-value pair + * util_fmt_obj_end() => End the latest object or list + * util_fmt_exit() => Cleanup + * + * Note: + * - Supported data elements are objects, lists and key-value pairs (mappings) + * - Scalars are only supported as part of a mapping + * - For CSV output and key filtering, mapping keys must be unique - this can + * be achieved either by choosing unique key names or by including object + * names via the FMT_PREFIX flag + * - For CSV output, at least one object or list with the FMT_ROW flag must be + * emitted + * - Common tool-specific meta-information such as API-level, tool version, + * etc. is automatically added to the output + */ + +#ifndef LIB_UTIL_FMT_H +#define LIB_UTIL_FMT_H + +#include +#include + +/* Flag value for default behavior (all flag types). */ +#define FMT_DEFAULT 0 + +/* Names of supported output format types. */ +#define FMT_TYPE_NAMES "json json-seq pairs csv" + +/** + * enum util_fmt_t - Output format types. + * @FMT_JSON: JavaScript Object Notation output data structure + * @FMT_JSONSEQ: Sequence of JSON data structures according to RFC7464 + * @FMT_PAIRS: Textual key=value pairs + * @FMT_CSV: Comma-separated-values output + * + * Use these types with util_fmt_init() to control the output format. + */ +enum util_fmt_t { + FMT_JSON, + FMT_JSONSEQ, + FMT_PAIRS, + FMT_CSV, +}; + +/** + * enum util_fmt_flags_t - Format control flags. + * @FMT_NOPREFIX: (pairs) Remove object hierarchy prefix from keys + * @FMT_KEEPINVAL: (all) Print mappings even if value is marked as invalid + * Values will be replaced with null (JSON) or an empty + * string + * @FMT_QUOTEALL: (all) Add quotes to all mapping values + * @FMT_FILTER: (all) Ignore keys not announced via util_fmt_add_key() + * @FMT_HANDLEINT: (json) Ensure correct JSON closure when interrupted + * @FMT_NOMETA: (all) Do not emit tool meta-data + * @FMT_WARN: (all) Warn about incorrect API usage + * + * Use these flags with util_fmt_init() to control generic aspects. + */ +enum util_fmt_flags_t { + FMT_NOPREFIX = (1 << 0), + FMT_KEEPINVAL = (1 << 1), + FMT_QUOTEALL = (1 << 2), + FMT_FILTER = (1 << 3), + FMT_HANDLEINT = (1 << 4), + FMT_NOMETA = (1 << 5), + FMT_WARN = (1 << 6), +}; + +/** + * enum util_fmt_oflags_t - Object flags. + * @FMT_LIST: (all) Object is a list + * @FMT_ROW: (csv) Start a new CSV row with this object + * @FMT_PREFIX: (all) Include object name in key prefix for CSV headings + * and filter keys + * + * Use these flags with util_fmt_obj_start() to control object related + * aspects. + */ +enum util_fmt_oflags_t { + FMT_LIST = (1 << 0), + FMT_ROW = (1 << 1), + FMT_PREFIX = (1 << 2), +}; + +/** + * enum util_fmt_mflags_t - Mapping flags. + * @FMT_QUOTE: (all) Quote value + * @FMT_INVAL: (all) Mark value as invalid + * @FMT_PERSIST: (csv) Keep value across CSV rows until overwritten + * + * Use these flags with util_fmt_pair() to control mapping related aspects. + */ +enum util_fmt_mflags_t { + FMT_QUOTE = (1 << 0), + FMT_INVAL = (1 << 1), + FMT_PERSIST = (1 << 2), +}; + +/** + * util_fmt_init() - Initialize output formatter. + * @fd : Output file descriptor + * @type : Output format type + * @flags: Formatting parameters + * @api_level: Output format level indicator + * + * Prepare for writing formatted output with the given @type to @fd. Additional + * @flags can be specified to control certain output aspects (see &enum + * util_fmt_flags_t). + * + * @api_level represents an application-specific output format version number: + * this number starts at 1 and must be increased whenever an incompatible format + * change is introduced, e.g. when a non-optional object or mapping is removed + * or used for different data. + */ +void util_fmt_init(FILE *fd, enum util_fmt_t type, unsigned int flags, + int api_level); + +/** + * util_fmt_exit() - Release resources used by output formatter. + * + * Release all resources currently in use by the output formatter. + */ +void util_fmt_exit(void); + +/** + * util_fmt_name_to_type() - Convert format name to type identifier. + * @name: Format name + * @type: Pointer to resulting format type identifier + * + * Search supported output format types for a type with associated @name. If + * found, store resulting type identifier in @type. + * + * Return: %true if type is found, %false otherwise. + */ +bool util_fmt_name_to_type(const char *name, enum util_fmt_t *type); + +/** + * util_fmt_set_indent() - Set indentation parameters. + * @base : Base indentation level to apply to all output lines (default 0) + * @width : Number of indentation characters per intendation level (default 2) + * @ind_char: Indentation characters to use (default space). + */ +void util_fmt_set_indent(unsigned int base, unsigned int width, char ind_char); + +/** + * util_fmt_add_key() - Register expected mapping keys. + * @fmt: Format string to generate key + * + * Register a mapping key before the associated key-value pair is emitted. + * + * Use this function together with format control flag @FMT_FILTER to ignore all + * key-value pairs for which the key has not been registered. This can be + * useful to allow for dynamically configured filtering of output based on + * a static list of emitted mappings. + * + * When creating CSV output, use this function to register all column keys + * in advance to enable a stable column list in case of rows that do not + * provide data for all columns. + */ +void util_fmt_add_key(const char *fmt, ...); + +/** + * util_fmt_obj_start() - Start a new data object. + * @oflags: Flags controlling aspects of this object. + * @fmt : Format string for generating an object name or %NULL. + * + * Use this function to start a new object in output data. Depending on + * @oflags, the new object represents either a normal object or a list. @oflags + * can also be used to indicated that an object corresponds to a new row of + * CSV data. If @fmt is non-%NULL, the resulting name is used in a format + * type specified way: + * + * Pairs: + * - Object names are reflected as dot-separated component in the mapping + * prefix, e.g. 'a.b.key=value' + * - An index is generated for mappings and objects that are part of list, + * e.g. 'a.b[1].key=value' + * JSON: + * - Object names are reflected as key-object mappings, e.g. + * : { } + * - Required commas between objects and mappings are automatically generated + * CSV: + * - Object names and the list type flag have no effect + * - When flag @FMT_ROW is specified, a CSV row will be emitted when + * util_fmt_obj_end() is called for the associated object + */ +void util_fmt_obj_start(unsigned int oflags, const char *fmt, ...); + +/** + * util_fmt_obj_end() - Announce the end of the latest data object started. + * + * Each object started with util_fmt_obj_start() must be ended with an + * associated util_fmt_obj_end() call. + */ +void util_fmt_obj_end(void); + +/** + * util_fmt_pair() - Emit a key-value pair. + * @mflags: Flags controlling this pair. + * @key : Key for this pair, excluding prefix. + * @fmt : Format string used to generated the pair value. + * + * Emit a key-value pair with the specified @key and the value that results + * from format string @fmt. + * + * Notes: + * - For JSON, a mapping can only occur after util_fmt_obj_start() + * - For CSV, each @key must be unique, either by choosing unique key names + * or by including object names as prefix via the use of FMT_PREFIX in + * parent objects + */ +void util_fmt_pair(unsigned int mflags, const char *key, const char *fmt, ...); + +#endif /* LIB_UTIL_FMT_H */ diff -Nru s390-tools-2.31.0/include/lib/util_libc.h s390-tools-2.33.1/include/lib/util_libc.h --- s390-tools-2.31.0/include/lib/util_libc.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/lib/util_libc.h 2024-05-28 08:26:36.000000000 +0200 @@ -127,6 +127,7 @@ int __util_vsprintf(const char *func, const char *file, int line, char *str, const char *fmt, va_list ap); char *util_strcat_realloc(char *str1, const char *str2); +void util_concatf(char **str1, const char *fmt, ...); void util_str_toupper(char *str); char *util_strstrip(char *s); diff -Nru s390-tools-2.31.0/include/lib/util_lockfile.h s390-tools-2.33.1/include/lib/util_lockfile.h --- s390-tools-2.31.0/include/lib/util_lockfile.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/lib/util_lockfile.h 2024-05-28 08:26:36.000000000 +0200 @@ -18,7 +18,11 @@ #define UTIL_LOCKFILE_ERR 4 /* Other, unexpected error conditions */ int util_lockfile_lock(char *lockfile, int retries); +int util_lockfile_lock_cw(char *lockfile, int retries, unsigned int waitinc, + unsigned int maxwait); int util_lockfile_parent_lock(char *lockfile, int retries); +int util_lockfile_parent_lock_cw(char *lockfile, int retries, + unsigned int waitinc, unsigned int maxwait); int util_lockfile_release(char *lockfile); int util_lockfile_parent_release(char *lockfile); diff -Nru s390-tools-2.31.0/include/lib/vtoc.h s390-tools-2.33.1/include/lib/vtoc.h --- s390-tools-2.31.0/include/lib/vtoc.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/lib/vtoc.h 2024-05-28 08:26:36.000000000 +0200 @@ -40,6 +40,7 @@ #define LV_COMPAT_CYL 0xFFFE #define VTOC_ERROR "VTOC error:" +#define MAX_VTOC_ENTRIES 9 /* max number of VTOC labels for cdl formatted DASD */ typedef struct ttr { diff -Nru s390-tools-2.31.0/include/libpv/cert.h s390-tools-2.33.1/include/libpv/cert.h --- s390-tools-2.31.0/include/libpv/cert.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/libpv/cert.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,439 +0,0 @@ -/* - * Certificate functions and definitions. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef LIBPV_CERT_H -#define LIBPV_CERT_H - -#include -#include - -#include "libpv/common.h" - -#define PV_IBM_Z_SUBJECT_COMMON_NAME "International Business Machines Corporation" -#define PV_IBM_Z_SUBJECT_COUNTRY_NAME "US" -#define PV_IBM_Z_SUBJECT_LOCALITY_NAME "Poughkeepsie" -#define PV_IBM_Z_SUBJECT_ORGANIZATIONAL_UNIT_NAME_SUFFIX "Key Signing Service" -#define PV_IBM_Z_SUBJECT_ORGANIZATION_NAME "International Business Machines Corporation" -#define PV_IBM_Z_SUBJECT_STATE "New York" -#define PV_IMB_Z_SUBJECT_ENTRY_COUNT 6 - -/* Minimum security level for the keys/certificates used to establish a chain of - * trust (see https://www.openssl.org/docs/man1.1.1/man3/X509_VERIFY_PARAM_set_auth_level.html - * for details). - */ -#define PV_CERTS_SECURITY_LEVEL 2 - -/** pv_cert_init: - * - * Should not be called by user. - * Use pv_init() instead which - * calls this function during creation. - * - * Sets up data structures for caching CRLs. - */ -void pv_cert_init(void); - -/** pv_cert_cleanup: - * - * Should not be called by user. - * Use pv_cleanup() instead which - * calls this function during creation. - * - * Cleans up data structures for caching CRLs. - */ -void pv_cert_cleanup(void); - -#define PV_CERT_ERROR g_quark_from_static_string("pv-cert-error-quark") -typedef enum { - PV_CERT_ERROR_CERT_REVOKED, - PV_CERT_ERROR_CERT_SIGNATURE_INVALID, - PV_CERT_ERROR_CERT_SUBJECT_ISSUER_MISMATCH, - PV_CERT_ERROR_CRL_DOWNLOAD_FAILED, - PV_CERT_ERROR_CRL_SIGNATURE_INVALID, - PV_CERT_ERROR_CRL_SUBJECT_ISSUER_MISMATCH, - PV_CERT_ERROR_FAILED_DOWNLOAD_CRL, - PV_CERT_ERROR_INTERNAL, - PV_CERT_ERROR_INVALID_PARM, - PV_CERT_ERROR_INVALID_SIGNATURE_ALGORITHM, - PV_CERT_ERROR_INVALID_VALIDITY_PERIOD, - PV_CERT_ERROR_LOAD_CRL, - PV_CERT_ERROR_LOAD_DEFAULT_CA, - PV_CERT_ERROR_LOAD_ROOT_CA, - PV_CERT_ERROR_MALFORMED_CERTIFICATE, - PV_CERT_ERROR_MALFORMED_ROOT_CA, - PV_CERT_ERROR_NO_CRL, - PV_CERT_ERROR_NO_CRLDP, - PV_CERT_ERROR_NO_IBM_Z_SIGNING_KEY, - PV_CERT_ERROR_NO_ISSUER_IBM_Z_FOUND, - PV_CERT_ERROR_NO_PUBLIC_KEY, - PV_CERT_ERROR_READ_CERTIFICATE, - PV_CERT_ERROR_READ_CRL, - PV_CERT_ERROR_SIGNATURE_ALGORITHM_MISMATCH, - PV_CERT_ERROR_SKID_AKID_MISMATCH, - PV_CERT_ERROR_VERIFICATION_FAILED, - PV_CERT_ERROR_WRONG_CA_USED, -} PvCertErrors; - -/** PvX509WithPath - X509 certificate associated with a path - */ -typedef struct { - X509 *cert; - char *path; -} PvX509WithPath; - -/** pv_x509_with_path_new: - * - * @cert: X509 certificate - * @path: Path of that X509 certificate - * - * Returns: (nullable) (transfer full): new X509 with path - */ -PvX509WithPath *pv_x509_with_path_new(X509 *cert, const char *path); - -/** pv_x509_with_path_free: - * - * Frees the path and the PvX509WithPath; Decreases the refcount of the X509 - */ -void pv_x509_with_path_free(PvX509WithPath *cert); - -typedef STACK_OF(DIST_POINT) STACK_OF_DIST_POINT; -typedef STACK_OF(X509) STACK_OF_X509; -typedef STACK_OF(X509_CRL) STACK_OF_X509_CRL; -typedef GSList PvCertWithPathList; - -typedef struct { - X509 *cert; - STACK_OF_X509_CRL *crls; -} PvX509Pair; - -/** pv_x509_pair_new_take: - * @cert: ptr to X509 - * @crls: ptr to CRLs - * - * Takes a X509 and the associated CRLs and builds a pair. - * Both, *cert and *crls will be NULL afterwards, and owned by the pair. - * - * Returns: (nullable) (transfer full): New PvX509Pair - */ -PvX509Pair *pv_x509_pair_new_take(X509 **cert, STACK_OF_X509_CRL **crls); - -/** pv_x509_pair_free: - * - * Decreases the refcount of the X509 and crls. - * Frees the PvX509Pair. - */ -void pv_x509_pair_free(PvX509Pair *pair); - -void STACK_OF_DIST_POINT_free(STACK_OF_DIST_POINT *stack); -void STACK_OF_X509_free(STACK_OF_X509 *stack); -void STACK_OF_X509_CRL_free(STACK_OF_X509_CRL *stack); - -/** pv_x509_from_pem_der_data: - * - * @data: GBytes containing the cert in PEM format - * @error: return location for a #GError - * - * Returns: (nullable) (transfer full): X509 cert - */ -X509 *pv_x509_from_pem_der_data(GBytes *data, GError **error); - -/** pv_x509_get_ec_pubkey: - * - * @cert: X509 to extract elliptic curve pubkey from - * @nid: numerical identifier of the expected curve - * @error: return location for a #GError - * - * Returns: (nullable) (transfer full): corresponding pupkey for the given certificate - */ -EVP_PKEY *pv_x509_get_ec_pubkey(X509 *cert, int nid, GError **error); - -/** pv_get_ec_pubkeys: - * - * @certs_with_path: List of PvX509WithPath - * @nid: numerical identifier of the expected curve - * @error: return location for a #GError - * - * Returns: (nullable) (transfer full): List of corresponding public keys for the given certificate - */ -GSList *pv_get_ec_pubkeys(PvCertWithPathList *certs_with_path, int nid, GError **error); - -/* pv_load_certificates: - * - * @cert_paths: list of cert paths. - * @error: return location for a #GError - * - * @cert_paths must contain at least one element, otherwise an error is - * reported. - * - * Returns: (nullable) (transfer full): List of PvX509WithPath corresponding to the given paths - */ -PvCertWithPathList *pv_load_certificates(char **cert_paths, GError **error); - -/* pv_load_first_cert_from_file: - * - * @path: location of the x509 - * @error: return location for a #GError - * - * This function reads in only the first certificate and ignores all other. This - * is only relevant for the PEM file format. For the host-key document and the - * root CA this behavior is expected. - * - * Returns: (nullable) (transfer full): PvX509WithPath corresponding to the given path - */ -X509 *pv_load_first_cert_from_file(const char *path, GError **error); - -/* pv_load_first_crl_from_file: - * - * @path: location of the x509 CRL - * @error: return location for a #GError - * - * This function reads in only the first CRL and ignores all other. This - * is only relevant for the PEM file format. - * - * Returns: (nullable) (transfer full): X509_CRL corresponding to the given path - */ -X509_CRL *pv_load_first_crl_from_file(const char *path, GError **error); - -/** pv_store_setup_crl_download: - * - * @st: X509_STORE - */ -void pv_store_setup_crl_download(X509_STORE *st); - -/** pv_load_first_crl_by_cert: - * @cert: X509 to specify the download location. - * @error: return location for a #GError - * - * This function returns the first X509_CRL found from the CRL distribution - * points specified in @cert. - * - * Returns: (nullable) (transfer full): x509 CRL corresponding to the given X509 - */ -X509_CRL *pv_load_first_crl_by_cert(X509 *cert, GError **error); - -/** pv_try_load_crls_by_certs: - * - * @certs_with_path: List of PvX509WithPath - * - * Returns: (nullable) (transfer full): Stack of CRLs corresponding to the given X509 - */ -STACK_OF_X509_CRL *pv_try_load_crls_by_certs(PvCertWithPathList *certs_with_path); - -/** pv_store_setup: - * - * @root_ca_path: Location of the rootCA or NULL if SystemRoot CA shall be used - * @crl_paths: List of CRL paths or NULL - * @cert_with_crl_paths: List of (untrusted) X509 paths - * @error: return location for a #GError - * - * The untrusted certs need to be verified before actually verifying a Host Key Document. - * - * Returns: (nullable) (transfer full): X509_store with given input data. - * - */ -X509_STORE *pv_store_setup(char *root_ca_path, char **crl_paths, char **cert_with_crl_paths, - GError **error); - -/** pv_get_x509_stack: - * - * x509_with_path_list: list of PvX509WithPath - * - * Returns: (nullable) (transfer full): Stack of X509 corresponding to the given x509 with path - */ -STACK_OF_X509 *pv_get_x509_stack(const GSList *x509_with_path_list); - -/** pv_init_store_ctx: - * - * @ctx: a uninitialized Store CTX - * @trusted: X509_STORE with a trusted rootCA - * @chain: untrusted X509s - * @error: return location for a #GError - * - * Can be called multiple times on the same context if X509_STORE_CTX_cleanup(ctx) - * was called before. - * - * Returns: - * 0 on success - * -1 in failure - */ -int pv_init_store_ctx(X509_STORE_CTX *ctx, X509_STORE *trusted, STACK_OF_X509 *chain, - GError **error) PV_NONNULL(1, 2, 3); - -/** pv_init_store_ctx: - * - * @trusted: X509_STORE with a trusted rootCA - * @chain: untrusted X509s - * @error: return location for a #GError - * - * Returns: (nullable) (transfer full): X509_STORE_CTX setup with the input data - */ -X509_STORE_CTX *pv_create_store_ctx(X509_STORE *trusted, STACK_OF_X509 *chain, GError **error) - PV_NONNULL(1, 2); -/** pv_remove_ibm_signing_certs: - * - * @certs: Stack of X509s - * - * Returns: (transfer full): - * List of all IBM Z signing key certificates in @certs and remove them - * from the chain. - * Empty stack if no IBM Z signing key is found. - */ -STACK_OF_X509 *pv_remove_ibm_signing_certs(STACK_OF_X509 *certs); - -/** pv_c2b_name: - * - * Workaround to fix the mismatch between issuer name of the - * IBM Z signing CRLs and the IBM Z signing key subject name. - * - * In RFC 5280 the attributes of a (subject/issuer) name is not mandatory - * ordered. The problem is that our certificates are not consistent in the order - * (see https://tools.ietf.org/html/rfc5280#section-4.1.2.4 for details). - * - * This function tries to reorder the name attributes such that - * further OpenSSL calls can work with it. The caller is - * responsible to free the returned value. - */ -X509_NAME *pv_c2b_name(const X509_NAME *name); - -/** pv_verify_host_key: - * - * @host_key: X509 to be verified - * @issuer_pairs: IBM signing key X509+CRLs Pairs used for verification - * @level: Security level. see PV_CERTS_SECURITY_LEVEL - * @error: return location for a #GError - * - * Returns: - * 0 if Host key could be verified with one of the IBM signing keys - * -1 if no IBM signing key could verify the authenticity of the given host key - * - */ -int pv_verify_host_key(X509 *host_key, GSList *issuer_pairs, int verify_flags, int level, - GError **error); - -/** pv_verify_cert: - * - * @ctx: trusted store ctx used for verification - * @cert: X509 to be verified - * @error: return location for a #GError - * - * Cannot be used to verify host keys with IBM signing keys, as IBM signing - * keys are no intermediate CAs. Use pv_verify_host_key() instead. - * - * Returns: - * 0 if @cert could be verified - * -1 if @cert could not be verified - */ -int pv_verify_cert(X509_STORE_CTX *ctx, X509 *cert, GError **error) PV_NONNULL(1, 2); - -/** pv_check_crl_valid_for_cert: - * - * @crl: CRL to be verified - * @cert: Cert that probably issued the given CRL - * @verify_flags: X509 Verification flags (X509_V_FLAG_) - * @error: return location for a #GError - * - * Verify whether a revocation list @crl is valid and is issued by @cert. For - * this multiple steps must be done: - * - * 1. verify issuer of the CRL matches with the suject name of @cert - * 2. verify the validity period of the CRL - * 3. verify the signature of the CRL - * - * Important: This function does not verify whether @cert is allowed to issue a - * CRL. - * - * Returns: - * 0 if @crl is valid and issued by @cert - * -1 otherwise - */ -int pv_verify_crl(X509_CRL *crl, X509 *cert, int verify_flags, GError **error); - -/** pv_check_chain_parameters: - * - * @chain: chain of trust to be validated - * @error: return location for a #GError - * - * Verifies that chain has at least a RootCA ans intermediate CA - * and logs the used ROD CA subject - * - * Returns: - * 0 @chain is valid - * -1 otherwise - */ -int pv_check_chain_parameters(const STACK_OF_X509 *chain, GError **error); - -/** pv_store_set_verify_param: - * - * @store: X509_STORE to set parameters - * @error: return location for a #GError - * - * Returns: - * 0 on success - * -1 on failure - */ -int pv_store_set_verify_param(X509_STORE *store, GError **error); - -/** pv_store_ctx_find_valid_crls: - * - * @ctx: STORE_CTX for searching CRLs - * @cert: X509 to match CRLs aggainst - * @error: return location for a #GError - * - * Returns: (nullable) (transfer full): STACK of CRLs related to given @crl fin @ctx - */ -STACK_OF_X509_CRL *pv_store_ctx_find_valid_crls(X509_STORE_CTX *ctx, X509 *cert, GError **error) - PV_NONNULL(1, 2); - -/** pv_verify_host_key_doc: - * - * @host_key_certs_with_path: X509s to be verified - * @trusted. X509_STORE with a rusted RootCA - * @untrusted_certs: STACK OF untrusted X509s - * @online: true if CRLs shall be downloaded - * @error: return location for a #GError - * - * Returns: - * 0 if all given HKDs could be verified using the chain of trust. - * -1 otherwise - */ -int pv_verify_host_key_doc(PvCertWithPathList *host_key_certs_with_path, X509_STORE *trusted, - STACK_OF_X509 *untrusted_certs, gboolean online, GError **error) - PV_NONNULL(1, 2, 3); - -/** pv_verify_host_key_docs_by_path: - * - * @host_key_paths: locations of X509 to be verified - * @optional_root_ca_path: rootCA location or NULL if Default shall be used - * @optional_crl_paths: locations of CRLs or NULL - * @untrusted_cert_paths: locations of IntermediateCAs including the IBM signing key - * @online: true if CRLs shall be downloaded - * @error: return location for a #GError - * - * Returns: - * 0 if all given HKDs could be verfied using the chain of trust. - * -1 otherwise - */ -int pv_verify_host_key_docs_by_path(char **host_key_paths, char *optional_root_ca_path, - char **optional_crl_paths, char **untrusted_cert_paths, - gboolean online, GError **error) PV_NONNULL(1, 4); - -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(AUTHORITY_KEYID, AUTHORITY_KEYID_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(PvX509WithPath, pv_x509_with_path_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(STACK_OF_DIST_POINT, STACK_OF_DIST_POINT_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(STACK_OF_X509, STACK_OF_X509_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(STACK_OF_X509_CRL, STACK_OF_X509_CRL_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(X509, X509_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(X509_CRL, X509_CRL_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(X509_LOOKUP, X509_LOOKUP_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(X509_NAME, X509_NAME_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(X509_VERIFY_PARAM, X509_VERIFY_PARAM_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(PvX509Pair, pv_x509_pair_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(X509_STORE, X509_STORE_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(X509_STORE_CTX, X509_STORE_CTX_free) - -#endif /* LIBPV_CERT_H */ diff -Nru s390-tools-2.31.0/include/libpv/common.h s390-tools-2.33.1/include/libpv/common.h --- s390-tools-2.31.0/include/libpv/common.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/libpv/common.h 2024-05-28 08:26:36.000000000 +0200 @@ -14,22 +14,6 @@ * the glib version is supported */ #include "libpv/glib-helper.h" - #include -#include "libpv/openssl-compat.h" -#include "libpv/macros.h" - -/** pv_init: - * - * Must be called before any libpv call. - */ -int pv_init(void); - -/** pv_cleanup: - * - * Must be called when done with using libpv. - */ -void pv_cleanup(void); - #endif /* LIBPV_COMMON_H */ diff -Nru s390-tools-2.31.0/include/libpv/crypto.h s390-tools-2.33.1/include/libpv/crypto.h --- s390-tools-2.31.0/include/libpv/crypto.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/libpv/crypto.h 2024-05-28 08:26:36.000000000 +0200 @@ -15,6 +15,7 @@ #include #include "libpv/common.h" +#define PV_NONNULL(...) typedef struct pv_cipher_parms { const EVP_CIPHER *cipher; @@ -26,17 +27,6 @@ }; } PvCipherParms; -typedef union { - struct { - uint8_t x[80]; - uint8_t y[80]; - }; - uint8_t data[160]; -} PvEcdhPubKey; -G_STATIC_ASSERT(sizeof(PvEcdhPubKey) == 160); - -typedef GSList PvEvpKeyList; - enum PvCryptoMode { PV_ENCRYPT, PV_DECRYPT, @@ -61,42 +51,6 @@ */ int pv_BIO_reset(BIO *b); -/** - * pv_generate_rand_data: - * @size: number of generated random bytes using a crypographically secure pseudo random generator - * @error: return location for a #GError - * - * Creates a new #GBytes with @size random bytes using a cryptographically - * secure pseudo random generator. - * - * Returns: (nullable) (transfer full): a new #GBytes, or %NULL in case of an error - */ -GBytes *pv_generate_rand_data(size_t size, GError **error); - -/** - * pv_generate_key: - * @cipher: specifies the OpenSSL cipher for which a cryptographically secure key should be generated - * @error: return location for a #GError - * - * Creates a random key for @cipher using a cryptographically secure pseudo - * random generator. - * - * Returns: (nullable) (transfer full): a new #GBytes, or %NULL in case of an error - */ -GBytes *pv_generate_key(const EVP_CIPHER *cipher, GError **error) PV_NONNULL(1); - -/** - * pv_generate_iv: - * @cipher: specifies the OpenSSL cipher for which a cryptographically secure IV should be generated - * @error: return location for a #GError - * - * Creates a random IV for @cipher using a cryptographically secure pseudo - * random generator. - * - * Returns: (nullable) (transfer full): a new #GBytes, or %NULL in case of an error - */ -GBytes *pv_generate_iv(const EVP_CIPHER *cipher, GError **error) PV_NONNULL(1); - /* Symmetric en/decryption functions */ /** @@ -135,7 +89,7 @@ * @derived_key_len: size of the output key * @key: input key * @salt: salt for the extraction - * @info: infor for the expansion + * @info: info for the expansion * @md: EVP mode of operation * @error: return location for a #GError * @@ -147,54 +101,15 @@ GBytes *pv_hkdf_extract_and_expand(size_t derived_key_len, GBytes *key, GBytes *salt, GBytes *info, const EVP_MD *md, GError **error) PV_NONNULL(2, 3, 4, 5); -/** pv_generate_ec_key: - * - * @nid: Numerical identifier of the curve - * @error: return location for a #GError - * - * Returns: (nullable) (transfer full): new random key based on the given curve - */ -EVP_PKEY *pv_generate_ec_key(int nid, GError **error); - -/** pv_evp_pkey_to_ecdh_pub_key: - * - * @key: input key in EVP_PKEY format - * @error: return location for a #GError - * - * Returns: the public part of the input @key in ECDH format. - */ -PvEcdhPubKey *pv_evp_pkey_to_ecdh_pub_key(EVP_PKEY *key, GError **error) PV_NONNULL(1); - -/** pv_derive_exchange_key: - * @cust: Customer Key - * @host: Host key - * @error: return location for a #GError - * - * Returns: (nullable) (transfer full): Shared Secret of @cust and @host - */ -GBytes *pv_derive_exchange_key(EVP_PKEY *cust, EVP_PKEY *host, GError **error) PV_NONNULL(1, 2); - GQuark pv_crypto_error_quark(void); #define PV_CRYPTO_ERROR pv_crypto_error_quark() typedef enum { - PV_CRYPTO_ERROR_DERIVE, PV_CRYPTO_ERROR_HKDF_FAIL, PV_CRYPTO_ERROR_INTERNAL, - PV_CRYPTO_ERROR_INVALID_KEY_SIZE, - PV_CRYPTO_ERROR_KEYGENERATION, - PV_CRYPTO_ERROR_RANDOMIZATION, - PV_CRYPTO_ERROR_READ_FILE, PV_CRYPTO_ERROR_NO_MATCH_TAG, } PvCryptoErrors; -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(ASN1_INTEGER, ASN1_INTEGER_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(ASN1_OCTET_STRING, ASN1_OCTET_STRING_free) WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(BIO, BIO_free_all) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(BIGNUM, BN_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(BN_CTX, BN_CTX_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(EC_GROUP, EC_GROUP_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(EC_KEY, EC_KEY_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(EC_POINT, EC_POINT_free) WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(EVP_CIPHER_CTX, EVP_CIPHER_CTX_free) WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(EVP_PKEY, EVP_PKEY_free) WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(EVP_PKEY_CTX, EVP_PKEY_CTX_free) diff -Nru s390-tools-2.31.0/include/libpv/curl.h s390-tools-2.33.1/include/libpv/curl.h --- s390-tools-2.31.0/include/libpv/curl.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/libpv/curl.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,53 +0,0 @@ -/* - * Libcurl utils - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef LIBPV_CURL_H -#define LIBPV_CURL_H - -#include - -#include "libpv/common.h" - -#define CRL_DOWNLOAD_TIMEOUT_MS 3000 -#define CRL_DOWNLOAD_MAX_SIZE 0x100000 - -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(CURL, curl_easy_cleanup) - -/** curl_download: - * @url: URL to specify location of data - * @timeout_ms: time to wait until fail - * @max_size: Maximum size of the downloaded data - * @error: return location for a GError - * - * Returns: (nullable) (transfer full): Downloaded data as #GByteArray - */ -GByteArray *curl_download(const char *url, long timeout_ms, uint max_size, GError **err); - -/** pv_curl_init: - * - * Should not be called by user. - * Use pv_init() instead which - * calls this function during creation. - */ -int pv_curl_init(void); - -/** pv_curl_cleanup: - * - * Should not be called by user. - * Use pv_cleanup() instead which - * calls this function during creation. - */ -void pv_curl_cleanup(void); - -#define PV_CURL_ERROR g_quark_from_static_string("pv-curl-error-quark") -typedef enum { - PV_CURL_ERROR_CURL_INIT_FAILED, - PV_CURL_ERROR_DOWNLOAD_FAILED, -} PvCurlErrors; - -#endif /* LIBPV_CURL_H */ diff -Nru s390-tools-2.31.0/include/libpv/glib-helper.h s390-tools-2.33.1/include/libpv/glib-helper.h --- s390-tools-2.31.0/include/libpv/glib-helper.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/libpv/glib-helper.h 2024-05-28 08:26:36.000000000 +0200 @@ -28,8 +28,6 @@ #include #include -#include "libpv/macros.h" - #ifdef __clang__ #define WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(...) \ DO_PRAGMA(clang diagnostic push) \ @@ -40,6 +38,8 @@ #define WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(...) G_DEFINE_AUTOPTR_CLEANUP_FUNC(__VA_ARGS__) #endif +#define DO_PRAGMA(x) _Pragma(#x) + #define pv_wrapped_g_assert(__expr) g_assert(__expr) /** pv_sec_gbytes_new_take: diff -Nru s390-tools-2.31.0/include/libpv/hash.h s390-tools-2.33.1/include/libpv/hash.h --- s390-tools-2.31.0/include/libpv/hash.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/libpv/hash.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,119 +0,0 @@ -/* - * Hashing definitions. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef LIBPV_HASH_H -#define LIBPV_HASH_H - -#include - -#include "libpv/common.h" - -/** pv_digest_ctx_new: - * @md: mode of digest, e.g. #EVP_sha256() - * @error: return location for a #GError - * - * Returns: (nullable) (transfer full): a new #EVP_MD_CTX, or %NULL in case of an error - */ -EVP_MD_CTX *pv_digest_ctx_new(const EVP_MD *md, GError **error); - -/** pv_digest_ctx_update: - * @ctx: EVP_MD_CTX to add data - * @data: #GBytes to add to the context - * @error: return location for a #GError - * - * Adds @data to the digest context. Can be called multiple times. - * - * Returns: 0 in case of success, -1 otherwise. - */ -int pv_digest_ctx_update(EVP_MD_CTX *ctx, GBytes *data, GError **error); - -/** pv_digest_ctx_update_raw: - * @ctx: #EVP_MD_CTX to add data - * @buf: data to add to the context - * @size: size of @buf - * @error: return location for a #GError - * - * Adds @buf to the digest context. Can be called multiple times. - * - * Returns: 0 in case of success, -1 otherwise. - */ -int pv_digest_ctx_update_raw(EVP_MD_CTX *ctx, const uint8_t *buf, size_t size, GError **error); - -/** pv_digest_ctx_finalize: - * @ctx: #EVP_MD_CTX with data to digest - * @error: return location for a #GError - * - * Calculates the digest of all previously added data. Do not use @ctx afterwards. - * - * Returns: (nullable) (transfer full): Digest of all data added before as #GBytes, or NULL in case of error. - */ -GBytes *pv_digest_ctx_finalize(EVP_MD_CTX *ctx, GError **error); - -/** pv_sha256_hash: - * @buf: data for which a sha256 hash sould be calculated - * @size: size of @buf - * @error: return location for a #GError - * - * Shorthand for initializing a sha256-digest ctx, updating, and finalizing. - * - * Returns: (nullable) (transfer full): SHA256 of @buf as #GBytes, or NULL in case of error. - */ -GBytes *pv_sha256_hash(uint8_t *buf, size_t size, GError **error); - -/** pv_hmac_ctx_new: - * @key: key used for the HMAC - * @md: mode of digest, e.g. #EVP_sha512() - * @error: return location for a #GError - * - * Returns: (nullable) (transfer full): New #HMAC_CTX or NULL in case of error - */ -HMAC_CTX *pv_hmac_ctx_new(GBytes *key, const EVP_MD *md, GError **error); - -/** pv_hmac_ctx_update_raw: - * @ctx: #HMAC_CTX to add data - * @buf: data to add to the context - * @size: size of @buf - * @error: return location for a #GError - * - * Adds @buf to the HMAC context. Can be called multiple times. - * - * Returns: 0 in case of success, -1 otherwise. - */ -int pv_hmac_ctx_update_raw(HMAC_CTX *ctx, const void *data, size_t size, GError **error); - -/** pv_hmac_ctx_update: - * @ctx: #HMAC_CTX to add data - * @data: #GBytes to add to the context - * @error: return location for a #GError - * - * Adds @data to the HMAC context. Can be called multiple times. - * - * Returns: 0 in case of success, -1 otherwise. - */ - -int pv_hmac_ctx_update(HMAC_CTX *ctx, GBytes *data, GError **error); - -/** pv_hmac_ctx_finalize: - * @ctx: #HMAC_CTX with data to digest - * @error: return location for a #GError - * - * Calculates the HMAC of all previously added data. Do not use @ctx afterwards. - * - * Returns: (nullable) (transfer full): HMAC of all data added before as #GBytes, or NULL in case of error. - */ -GBytes *pv_hamc_ctx_finalize(HMAC_CTX *ctx, GError **error); - -#define PV_HASH_ERROR g_quark_from_static_string("pv-crypro-error-quark") -typedef enum { - PV_HASH_ERROR_INTERNAL, -} PvHashErrors; - -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(EVP_MD_CTX, EVP_MD_CTX_free) -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(HMAC_CTX, HMAC_CTX_free) - -#endif /* LIBPV_HASH_H */ diff -Nru s390-tools-2.31.0/include/libpv/macros.h s390-tools-2.33.1/include/libpv/macros.h --- s390-tools-2.31.0/include/libpv/macros.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/libpv/macros.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,21 +0,0 @@ -/* - * Libpv common macro definitions. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - * - */ -#ifndef LIBPV_MACROS_H -#define LIBPV_MACROS_H - -#include - -#define PV_NONNULL(...) -#define DO_PRAGMA(x) _Pragma(#x) - -/* Most significant bit */ -#define PV_MSB(idx) ((uint64_t)1 << (63 - (idx))) - -#endif /* LIBPV_MACROS_H */ diff -Nru s390-tools-2.31.0/include/libpv/openssl-compat.h s390-tools-2.33.1/include/libpv/openssl-compat.h --- s390-tools-2.31.0/include/libpv/openssl-compat.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/libpv/openssl-compat.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,29 +0,0 @@ -/* - * OpenSSL compatibility utils - * - * Copyright IBM Corp. 2021 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef LIBPV_OPENSSL_COMPAT_H -#define LIBPV_OPENSSL_COMPAT_H - -#include -#include -#include - -#if OPENSSL_VERSION_NUMBER >= 0x30000000L -#define pv_X509_STORE_CTX_get_current_cert(ctx) X509_STORE_CTX_get_current_cert(ctx) -#define pv_X509_STORE_CTX_get1_crls(ctx, nm) X509_STORE_CTX_get1_crls((ctx), (nm)) -#define pv_X509_STORE_set_lookup_crls(st, cb) X509_STORE_set_lookup_crls(st, cb) -#elif OPENSSL_VERSION_NUMBER >= 0x10100000L -#define pv_X509_STORE_CTX_get_current_cert(ctx) \ - X509_STORE_CTX_get_current_cert((X509_STORE_CTX *)(ctx)) -#define pv_X509_STORE_CTX_get1_crls(ctx, nm) \ - X509_STORE_CTX_get1_crls((X509_STORE_CTX *)(ctx), (X509_NAME *)(nm)) -#define pv_X509_STORE_set_lookup_crls(st, cb) \ - X509_STORE_set_lookup_crls(st, (X509_STORE_CTX_lookup_crls_fn)(cb)) -#endif - -#endif /* LIBPV_OPENSSL_COMPAT_H */ diff -Nru s390-tools-2.31.0/include/libpv/se-hdr.h s390-tools-2.33.1/include/libpv/se-hdr.h --- s390-tools-2.31.0/include/libpv/se-hdr.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/include/libpv/se-hdr.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,96 +0,0 @@ -/* - * PV/SE header definitions - * - * Copyright IBM Corp. 2020 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef LIBPV_SE_HDR_H -#define LIBPV_SE_HDR_H - -#include "libpv/common.h" - -#include - -#include "boot/psw.h" -#include "libpv/crypto.h" -#include "libpv/macros.h" - -/* Magic number which is used to identify the file containing the PV - * header - */ -#define PV_MAGIC_NUMBER 0x49424d5365634578ULL -#define PV_VERSION_1 0x00000100U - -/* Plaintext control flags */ -/* dumping of the configuration is allowed */ -#define PV_PCF_ALLOW_DUMPING PV_MSB(34) -/* prevent Ultravisor decryption during unpack operation */ -#define PV_PCF_NO_DECRYPTION PV_MSB(35) -/* PCKMO encrypt-DEA/TDEA-key functions allowed */ -#define PV_PCF_PCKMO_DEA_TDEA PV_MSB(56) -/* PCKMO encrypt-AES-key functions allowed */ -#define PV_PCF_PCKMO_AES PV_MSB(57) -/* PCKMO encrypt-ECC-key functions allowed */ -#define PV_PCF_PCKM_ECC PV_MSB(58) - -/* maxima for the PV version 1 */ -#define PV_V1_IPIB_MAX_SIZE PAGE_SIZE -#define PV_V1_PV_HDR_MIN_SIZE \ - (sizeof(struct pv_hdr_head) + sizeof(struct pv_hdr_encrypted) + \ - sizeof(((struct pv_hdr *)0)->tag) + 1 * sizeof(struct pv_hdr_key_slot)) -#define PV_V1_PV_HDR_MAX_SIZE (2 * PAGE_SIZE) - -#define PV_IMAGE_ENCR_KEY_SIZE 64 - -typedef struct pv_hdr_key_slot { - uint8_t digest_key[SHA256_DIGEST_LENGTH]; - uint8_t wrapped_key[32]; - uint8_t tag[16]; -} __packed PvHdrKeySlot; - -typedef struct pv_hdr_opt_item { - uint32_t otype; - uint8_t ibk[32]; - uint8_t data[]; -} __packed PvHdrOptItem; - -/* integrity protected data (by GCM tag), but non-encrypted */ -struct pv_hdr_head { - uint64_t magic; - uint32_t version; - uint32_t phs; - uint8_t iv[12]; - uint32_t res1; - uint64_t nks; - uint64_t sea; - uint64_t nep; - uint64_t pcf; - PvEcdhPubKey cust_pub_key; - uint8_t pld[SHA512_DIGEST_LENGTH]; - uint8_t ald[SHA512_DIGEST_LENGTH]; - uint8_t tld[SHA512_DIGEST_LENGTH]; -} __packed; - -/* Must not have any padding */ -struct pv_hdr_encrypted { - uint8_t cust_comm_key[32]; - uint8_t img_enc_key_1[PV_IMAGE_ENCR_KEY_SIZE / 2]; - uint8_t img_enc_key_2[PV_IMAGE_ENCR_KEY_SIZE / 2]; - struct psw_t psw; - uint64_t scf; - uint32_t noi; - uint32_t res2; -}; -G_STATIC_ASSERT(sizeof(struct pv_hdr_encrypted) == 32 + 32 + 32 + sizeof(struct psw_t) + 8 + 4 + 4); - -typedef struct pv_hdr { - struct pv_hdr_head head; - struct pv_hdr_key_slot *slots; - struct pv_hdr_encrypted *encrypted; - struct pv_hdr_opt_item **optional_items; - uint8_t tag[16]; -} PvHdr; - -#endif /* LIBPV_SE_HDR_H */ diff -Nru s390-tools-2.31.0/ipl_tools/ccw.c s390-tools-2.33.1/ipl_tools/ccw.c --- s390-tools-2.31.0/ipl_tools/ccw.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/ipl_tools/ccw.c 2024-05-28 08:26:36.000000000 +0200 @@ -15,9 +15,32 @@ #include #include +#include "lib/util_path.h" +#include "lib/util_panic.h" #include "ipl_tools.h" /* + * Look up for the device in /sys/devices/ hierarchy. + * + * path must be PATH_MAX large and the value will be replaced in place + */ +static int device_sysfs_path(const char *device, char *path, const size_t path_size) +{ + util_assert(device != NULL, "Internal error: device is NULL"); + util_assert(path != NULL, "Internal error: path is NULL"); + util_assert(path_size == PATH_MAX, "Internal error: path_size is '%zu', but must be '%zu'", + path_size, PATH_MAX); + char *buf = util_path_sysfs("block/%s/device", device); + + if (!realpath(buf, path)) { + free(buf); + return -1; + } + free(buf); + return 0; +} + +/* * Check if the specified device number is a valid device number * which can be found in the /sys/bus/ccw/drivers/dasd-eckd/ * structure. @@ -43,6 +66,26 @@ } /* + * Check if the specified device is a valid virtio subchannel device + */ +int ccw_is_virtio_device(const char *device) +{ + char path[PATH_MAX] = { '\0' }; + unsigned virtio = 0; + + if (device_sysfs_path(device, path, sizeof(path)) != 0) + return -1; + + /* + * The output has the following format: + * /sys/devices/css0/0.0.0000/0.0.0000/virtio0/block/vda + */ + if (sscanf(path, "/sys/devices/css0/%*[0-9a-f.]/%*[0-9a-f.]/virtio%u", &virtio) != 1) + return -1; + return 0; +} + +/* * Return CCW Bus ID (old sysfs) */ static int ccw_busid_get_sysfs_old(const char *device, char *busid) @@ -77,11 +120,9 @@ */ static int ccw_busid_get_sysfs_new(const char *device, char *busid) { - char path[PATH_MAX], buf[4096]; + char path[PATH_MAX] = { '\0' }; - memset(buf, 0, sizeof(buf)); - snprintf(path, sizeof(path), "/sys/block/%s/device", device); - if (realpath(path, buf) == NULL) + if (device_sysfs_path(device, path, sizeof(path)) != 0) return -1; /* @@ -89,7 +130,7 @@ * /sys/devices/css0/0.0.0119/0.0.3f19/block/dasda * /sys/devices/css0/0.0.0000/0.0.0000/virtio0/block/vda */ - if (sscanf(buf, "/sys/devices/css0/%*[0-9a-f.]/%[0-9a-f.]", busid) != 1) + if (sscanf(path, "/sys/devices/css0/%*[0-9a-f.]/%[0-9a-f.]", busid) != 1) return -1; return 0; } diff -Nru s390-tools-2.31.0/ipl_tools/cmd_chreipl.c s390-tools-2.33.1/ipl_tools/cmd_chreipl.c --- s390-tools-2.31.0/ipl_tools/cmd_chreipl.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/ipl_tools/cmd_chreipl.c 2024-05-28 08:26:36.000000000 +0200 @@ -391,7 +391,8 @@ static int set_reipl_type(const char *dev_name) { if (strncmp(dev_name, "dasd", strlen("dasd")) == 0 || - strncmp(dev_name, "vd", strlen("vd")) == 0) + strncmp(dev_name, "vd", strlen("vd")) == 0 || + ccw_is_virtio_device(dev_name) == 0) l.reipl_type = REIPL_CCW; else if (strncmp(dev_name, "sd", strlen("sd")) == 0) l.reipl_type = REIPL_FCP; diff -Nru s390-tools-2.31.0/ipl_tools/ipl_tools.h s390-tools-2.33.1/ipl_tools/ipl_tools.h --- s390-tools-2.31.0/ipl_tools/ipl_tools.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/ipl_tools/ipl_tools.h 2024-05-28 08:26:36.000000000 +0200 @@ -87,6 +87,7 @@ * CCW */ extern int ccw_is_device(const char *devno); +extern int ccw_is_virtio_device(const char *device); extern void ccw_busid_get(const char *device, char *devno); /* diff -Nru s390-tools-2.31.0/libap/ap.c s390-tools-2.33.1/libap/ap.c --- s390-tools-2.31.0/libap/ap.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libap/ap.c 2024-05-28 08:26:36.000000000 +0200 @@ -11,12 +11,15 @@ #include #include #include +#include #include +#include #include #include #include #include #include +#include #ifdef HAVE_JSONC #include @@ -698,6 +701,20 @@ } } +static unsigned int random_delay(void) +{ + static bool libap_seed = true; + struct timeval t; + + if (libap_seed) { + gettimeofday(&t, NULL); + srand((unsigned int)((t.tv_sec + t.tv_usec) % UINT_MAX)); + libap_seed = false; + } + + return AP_LOCK_DELAY_US + (rand() % AP_LOCK_VARIANCE_US); +} + /** * Acquire the ap config lock using this Process ID * @@ -707,7 +724,9 @@ */ int ap_get_lock(void) { - return util_lockfile_lock(AP_LOCKFILE, AP_LOCK_RETRIES); + unsigned int delay = random_delay(); + + return util_lockfile_lock_cw(AP_LOCKFILE, AP_LOCK_RETRIES, delay, delay); } /** @@ -719,7 +738,10 @@ */ int ap_get_lock_callout(void) { - return util_lockfile_parent_lock(AP_LOCKFILE, AP_LOCK_RETRIES); + unsigned int delay = random_delay(); + + return util_lockfile_parent_lock_cw(AP_LOCKFILE, AP_LOCK_RETRIES, delay, + delay); } /** diff -Nru s390-tools-2.31.0/libpv/cert.c s390-tools-2.33.1/libpv/cert.c --- s390-tools-2.31.0/libpv/cert.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libpv/cert.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,1654 +0,0 @@ -/* - * Certificate functions and definitions. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -/* Must be included before any other header */ -#include "config.h" - -#include - -#include "libpv/cert.h" -#include "libpv/crypto.h" -#include "libpv/curl.h" - -/* Used for the caching of the downloaded CRLs */ -static GHashTable *cached_crls; - -void pv_cert_init(void) -{ - if (!cached_crls) - cached_crls = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, - (GDestroyNotify)X509_CRL_free); -} - -void pv_cert_cleanup(void) -{ - g_clear_pointer(&cached_crls, g_hash_table_destroy); -} - -PvX509WithPath *pv_x509_with_path_new(X509 *cert, const char *path) -{ - g_autoptr(PvX509WithPath) ret = g_new(PvX509WithPath, 1); - - g_assert(cert && path); - - if (X509_up_ref(cert) != 1) - g_abort(); - ret->cert = cert; - ret->path = g_strdup(path); - return g_steal_pointer(&ret); -} - -void pv_x509_with_path_free(PvX509WithPath *cert) -{ - if (!cert) - return; - - X509_free(cert->cert); - g_free(cert->path); - g_free(cert); -} - -PvX509Pair *pv_x509_pair_new_take(X509 **cert, STACK_OF_X509_CRL **crls) -{ - g_autoptr(PvX509Pair) ret = g_new0(PvX509Pair, 1); - - g_assert(cert); - g_assert(crls); - - ret->cert = g_steal_pointer(cert); - ret->crls = g_steal_pointer(crls); - return g_steal_pointer(&ret); -} - -void pv_x509_pair_free(PvX509Pair *pair) -{ - if (!pair) - return; - - sk_X509_CRL_pop_free(pair->crls, X509_CRL_free); - X509_free(pair->cert); - g_free(pair); -} - -void STACK_OF_DIST_POINT_free(STACK_OF_DIST_POINT *stack) -{ - if (!stack) - return; - - sk_DIST_POINT_pop_free(stack, DIST_POINT_free); -} - -void STACK_OF_X509_free(STACK_OF_X509 *stack) -{ - if (!stack) - return; - - sk_X509_pop_free(stack, X509_free); -} - -void STACK_OF_X509_CRL_free(STACK_OF_X509_CRL *stack) -{ - if (!stack) - return; - - sk_X509_CRL_pop_free(stack, X509_CRL_free); -} - -static gboolean certificate_uses_elliptic_curve(EVP_PKEY *key, int nid, GError **error) -{ - g_autoptr(EC_KEY) ec = NULL; - int rc; - - g_assert(key); - - if (EVP_PKEY_id(key) != EVP_PKEY_EC) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INVALID_PARM, _("No EC key found")); - return FALSE; - } - - ec = EVP_PKEY_get1_EC_KEY(key); - if (!ec) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INVALID_PARM, _("No EC key found")); - return FALSE; - } - - if (EC_KEY_check_key(ec) != 1) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INVALID_PARM, _("Invalid EC key")); - return FALSE; - } - - rc = EC_GROUP_get_curve_name(EC_KEY_get0_group(ec)); - if (rc != nid) { - /* maybe the NID is unset */ - if (rc == 0) { - g_autoptr(EC_GROUP) grp = EC_GROUP_new_by_curve_name(nid); - const EC_POINT *pub = EC_KEY_get0_public_key(ec); - g_autoptr(BN_CTX) ctx = BN_CTX_new(); - - if (EC_POINT_is_on_curve(grp, pub, ctx) != 1) { - g_set_error_literal(error, PV_CERT_ERROR, - PV_CERT_ERROR_INVALID_PARM, - _("Invalid EC curve")); - return FALSE; - } - } else { - /* NID was set but doesn't match with the expected NID - */ - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INVALID_PARM, - _("Wrong NID used: '%d'"), - EC_GROUP_get_curve_name(EC_KEY_get0_group(ec))); - return FALSE; - } - } - return TRUE; -} - -EVP_PKEY *pv_x509_get_ec_pubkey(X509 *cert, int nid, GError **error) -{ - g_autoptr(EVP_PKEY) ret = NULL; - - ret = X509_get_pubkey(cert); - if (!ret) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INVALID_PARM, - _("Failed to get public key from host-key document")); - return NULL; - } - - if (!certificate_uses_elliptic_curve(ret, nid, error)) { - g_prefix_error(error, _("Host-key document does not use an elliptic EC curve")); - return NULL; - } - - return g_steal_pointer(&ret); -} - -GSList *pv_get_ec_pubkeys(PvCertWithPathList *certs_with_path, int nid, GError **error) -{ - g_autoslist(EVP_PKEY) ret = NULL; - - for (GSList *iterator = certs_with_path; iterator; iterator = iterator->next) { - const PvX509WithPath *cert_with_path = iterator->data; - g_autoptr(EVP_PKEY) host_key = NULL; - X509 *cert = cert_with_path->cert; - - host_key = pv_x509_get_ec_pubkey(cert, nid, error); - if (!host_key) - return NULL; - - ret = g_slist_append(ret, g_steal_pointer(&host_key)); - } - - return g_steal_pointer(&ret); -} - -PvCertWithPathList *pv_load_certificates(char **cert_paths, GError **error) -{ - g_autoslist(PvX509WithPath) ret = NULL; - - for (char **iterator = cert_paths; iterator != NULL && *iterator != NULL; iterator++) { - const char *cert_path = *iterator; - g_autoptr(X509) cert = NULL; - - g_assert(cert_path); - - cert = pv_load_first_cert_from_file(cert_path, error); - if (!cert) - return NULL; - - ret = g_slist_append(ret, pv_x509_with_path_new(cert, cert_path)); - } - if (!ret) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_READ_CERTIFICATE, - _("no certificates specified")); - return NULL; - } - - return g_steal_pointer(&ret); -} - -X509 *pv_load_first_cert_from_file(const char *path, GError **error) -{ - g_autoptr(BIO) bio = BIO_new_file(path, "r"); - g_autoptr(X509) cert = NULL; - - if (!bio) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_READ_CERTIFICATE, - _("unable to read certificate: '%s'"), path); - return NULL; - } - - cert = PEM_read_bio_X509(bio, NULL, NULL, NULL); - if (cert) - return g_steal_pointer(&cert); - ERR_clear_error(); - if (pv_BIO_reset(bio) < 0) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_READ_CERTIFICATE, - _("unable to load certificate: '%s'"), path); - return NULL; - } - - /* maybe the certificate is stored in DER format */ - cert = d2i_X509_bio(bio, NULL); - if (cert) - return g_steal_pointer(&cert); - - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_READ_CERTIFICATE, - _("unable to load certificate: '%s'"), path); - return NULL; -} - -static X509_CRL *load_crl_from_bio(BIO *bio) -{ - g_autoptr(X509_CRL) crl = PEM_read_bio_X509_CRL(bio, NULL, NULL, NULL); - if (crl) - return g_steal_pointer(&crl); - ERR_clear_error(); - if (pv_BIO_reset(bio) < 0) - return NULL; - - /* maybe the CRL is stored in DER format */ - crl = d2i_X509_CRL_bio(bio, NULL); - if (crl) - return g_steal_pointer(&crl); - return NULL; -} - -/* This function reads in only the first CRL and ignores all other. This is only - * relevant for the PEM file format. - */ -X509_CRL *pv_load_first_crl_from_file(const char *path, GError **error) -{ - g_autoptr(BIO) bio = BIO_new_file(path, "r"); - g_autoptr(X509_CRL) crl = NULL; - - if (!bio) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_READ_CRL, - _("unable to read CRL: '%s'"), path); - return NULL; - } - - crl = load_crl_from_bio(bio); - if (!crl) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_READ_CRL, - _("unable to load CRL: '%s'"), path); - return NULL; - } - return g_steal_pointer(&crl); -} - -static char *pv_X509_NAME_oneline(const X509_NAME *name) -{ - g_autoptr(BIO) key_bio = BIO_new(BIO_s_mem()); - g_autofree char *ret = NULL; - char *key = NULL; - long len; - - if (X509_NAME_print_ex(key_bio, name, 0, XN_FLAG_RFC2253) == -1) { - g_autofree char *openssl_err_msg = pv_get_openssl_errors(); - - g_warning(_("Cannot receive X509-NAME from CRL: %s"), openssl_err_msg); - return NULL; - } - - len = BIO_get_mem_data(key_bio, &key); - if (len < 0) { - g_warning(_("Cannot receive X509-NAME from CRL")); - return NULL; - } - - ret = g_malloc0((size_t)len + 1); - memcpy(ret, key, (size_t)len); - return g_steal_pointer(&ret); -} - -static gboolean cache_crl(const X509_NAME *name, X509_CRL *crl) -{ - g_autofree char *key = NULL; - - g_assert(name); - - key = pv_X509_NAME_oneline(name); - if (!key) { - g_warning(_("Cannot receive X509-NAME from CRL")); - return FALSE; - } - if (X509_CRL_up_ref(crl) != 1) - g_abort(); - return g_hash_table_insert(cached_crls, g_steal_pointer(&key), crl); -} - -/* Caller is responsible for free'ing */ -static X509_CRL *lookup_crl(const X509_NAME *name) -{ - g_autoptr(X509_CRL) crl = NULL; - g_autofree char *key = NULL; - - g_assert(name); - - key = pv_X509_NAME_oneline(name); - if (!key) - return NULL; - crl = g_hash_table_lookup(cached_crls, key); - if (crl) { - if (X509_CRL_up_ref(crl) != 1) - g_abort(); - return g_steal_pointer(&crl); - } - return NULL; -} - -/* Returns empty stack if no CRL downloaded. */ -static STACK_OF_X509_CRL *crls_download_cb(const X509_STORE_CTX *ctx, const X509_NAME *nm) -{ - g_autoptr(STACK_OF_X509_CRL) crls = NULL; - g_autoptr(X509_CRL) crl = NULL; - /* must not be free'd */ - X509 *cert = NULL; - - crls = sk_X509_CRL_new_null(); - if (!crls) - g_abort(); - cert = pv_X509_STORE_CTX_get_current_cert(ctx); - if (!cert) - return g_steal_pointer(&crls); - g_assert(X509_NAME_cmp(X509_get_issuer_name(cert), nm) == 0); - crl = lookup_crl(nm); - if (!crl) { - /* ignore error */ - crl = pv_load_first_crl_by_cert(cert, NULL); - if (!crl) - return g_steal_pointer(&crls); - g_assert_true(cache_crl(nm, crl)); - } - if (sk_X509_CRL_push(crls, g_steal_pointer(&crl)) == 0) - g_abort(); - return g_steal_pointer(&crls); -} - -/* Downloaded CRLs have a higher precedence than the CRLs specified on the - * command line. - */ -static STACK_OF_X509_CRL *crls_cb(const X509_STORE_CTX *ctx, const X509_NAME *nm) -{ - g_autoptr(STACK_OF_X509_CRL) crls = crls_download_cb(ctx, nm); - - if (sk_X509_CRL_num(crls) > 0) - return g_steal_pointer(&crls); - return pv_X509_STORE_CTX_get1_crls(ctx, nm); -} - -/* Set up CRL lookup with download support */ -void pv_store_setup_crl_download(X509_STORE *st) -{ - pv_X509_STORE_set_lookup_crls(st, crls_cb); -} - -static X509_CRL *GByteArray_to_X509_CRL(const GByteArray *data) -{ - g_autoptr(X509_CRL) ret = NULL; - g_autoptr(BIO) bio = NULL; - - g_assert(data); - - if (data->len > INT_MAX) - return NULL; - - bio = BIO_new_mem_buf(data->data, (int)data->len); - if (!bio) - g_abort(); - - ret = load_crl_from_bio(bio); - if (!ret) - return NULL; - - return g_steal_pointer(&ret); -} - -static int load_crl_from_web(const char *url, X509_CRL **crl, GError **error) -{ - g_autoptr(X509_CRL) tmp_crl = NULL; - g_autoptr(GByteArray) data = NULL; - g_assert(crl); - - data = curl_download(url, CRL_DOWNLOAD_TIMEOUT_MS, CRL_DOWNLOAD_MAX_SIZE, error); - if (!data) { - g_prefix_error(error, _("unable to download CRL: ")); - return -1; - } - tmp_crl = GByteArray_to_X509_CRL(data); - if (!tmp_crl) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_CRL_DOWNLOAD_FAILED, - _("unable to load CRL from '%s'"), url); - return -1; - } - *crl = g_steal_pointer(&tmp_crl); - return 0; -} - -/* Get the first http[s] URL from a DIST_POINT */ -static const char *get_first_dp_url(DIST_POINT *dp) -{ - GENERAL_NAMES *general_names; - - g_assert(dp); - - if (!dp->distpoint || dp->distpoint->type != 0) - return NULL; - - general_names = dp->distpoint->name.fullname; - for (int i = 0; i < sk_GENERAL_NAME_num(general_names); i++) { - GENERAL_NAME *name = sk_GENERAL_NAME_value(general_names, i); - g_autofree const char *uri_str = NULL; - ASN1_STRING *uri_asn1; - const char *uri_data; - int uri_data_len; - int type; - - uri_asn1 = GENERAL_NAME_get0_value(name, &type); - if (type != GEN_URI) - continue; - uri_data_len = ASN1_STRING_length(uri_asn1); - if (uri_data_len < 0) - continue; - uri_data = (const char *)ASN1_STRING_get0_data(uri_asn1); - /* Make sure that uri_str is null-terminated as in general it - * cannot be assumed that @uri_data is null-terminated. - */ - uri_str = g_strndup(uri_data, (size_t)uri_data_len); - if (g_str_has_prefix(uri_str, "http://")) - return uri_data; - if (g_str_has_prefix(uri_str, "https://")) - return uri_data; - } - return NULL; -} - -/* Download a CRL using the URI specified in the distribution @crldp */ -static X509_CRL *load_crl_by_dist_point(DIST_POINT *crldp, GError **error) -{ - const char *uri = get_first_dp_url(crldp); - g_autoptr(X509_CRL) crl = NULL; - - if (!uri) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("no valid URL specified in distribution point")); - return NULL; - } - - if (load_crl_from_web(uri, &crl, error) < 0) - return NULL; - - return g_steal_pointer(&crl); -} - -/* This function returns the first X509_CRL found from the CRL distribution - * points specified in @cert. This function could be optimized by filtering - * duplicate certificates and/or filtering duplicated URIs. - */ -X509_CRL *pv_load_first_crl_by_cert(X509 *cert, GError **error) -{ - g_autoptr(STACK_OF_DIST_POINT) crldps = NULL; - g_autoptr(GError) last_error = NULL; - g_autoptr(X509_CRL) ret = NULL; - int dist_points_cnt; - - g_assert(cert); - - crldps = X509_get_ext_d2i(cert, NID_crl_distribution_points, NULL, NULL); - if (!crldps || sk_DIST_POINT_num(crldps) == 0) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_NO_CRLDP, - _("no distribution point found")); - return NULL; - } - - dist_points_cnt = sk_DIST_POINT_num(crldps); - for (int i = 0; i < dist_points_cnt; i++) { - DIST_POINT *crldp = sk_DIST_POINT_value(crldps, i); - g_assert(crldp); - - g_clear_error(&last_error); - ret = load_crl_by_dist_point(crldp, &last_error); - if (ret) - return g_steal_pointer(&ret); - } - - /* relabel error */ - if (last_error) - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_FAILED_DOWNLOAD_CRL, - "%s", last_error->message); - else - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_FAILED_DOWNLOAD_CRL, - _("failed to download CRL")); - return NULL; -} - -STACK_OF_X509_CRL *pv_try_load_crls_by_certs(GSList *certs_with_path) -{ - g_autoptr(STACK_OF_X509_CRL) ret = sk_X509_CRL_new_null(); - if (!ret) - g_abort(); - - for (GSList *iterator = certs_with_path; iterator; iterator = iterator->next) { - PvX509WithPath *cert_with_path = iterator->data; - X509 *cert = cert_with_path->cert; - g_autoptr(X509_CRL) crl = NULL; - g_assert(cert); - /* ignore error */ - crl = pv_load_first_crl_by_cert(cert, NULL); - if (!crl) - continue; - if (sk_X509_CRL_push(ret, g_steal_pointer(&crl)) == 0) - g_abort(); - } - return g_steal_pointer(&ret); -} - -#define DEFINE_GSLIST_MAP(t2, t1) \ - typedef t1 *(*g_slist_map_func_##t2##_##t1)(const t2 *x, GError **error); \ - G_GNUC_UNUSED static GSList *g_slist_map_##t2##_##t1( \ - const GSList *list, g_slist_map_func_##t2##_##t1 func, GError **error) \ - { \ - g_autoslist(t1) ret = NULL; \ - for (const GSList *iterator = list; iterator; iterator = iterator->next) { \ - const t2 *value = iterator->data; \ - t1 *new_value = NULL; \ - g_assert(value); \ - new_value = func(value, error); \ - if (!new_value) \ - return NULL; \ - ret = g_slist_append(ret, g_steal_pointer(&new_value)); \ - } \ - return g_steal_pointer(&ret); \ - } - -#define DEFINE_GSLIST_TO_STACK(t1) \ - G_GNUC_UNUSED static STACK_OF(t1) * g_slist_to_stack_of_##t1(GSList **list) \ - { \ - g_assert(list); \ - g_autoptr(STACK_OF_##t1) ret = sk_##t1##_new_null(); \ - if (!ret) \ - g_abort(); \ - for (GSList *iterator = *list; iterator; iterator = iterator->next) { \ - if (sk_##t1##_push(ret, g_steal_pointer(&iterator->data)) == 0) \ - g_abort(); \ - } \ - g_clear_pointer(list, g_slist_free); \ - return g_steal_pointer(&ret); \ - } - -DEFINE_GSLIST_MAP(PvX509WithPath, X509) -DEFINE_GSLIST_TO_STACK(X509) - -static X509 *pv_x509_with_path_get_cert(const PvX509WithPath *cert_with_path, - G_GNUC_UNUSED GError **error) -{ - g_autoptr(X509) cert = NULL; - - g_assert(cert_with_path && cert_with_path->cert); - - cert = cert_with_path->cert; - if (X509_up_ref(cert) != 1) - g_abort(); - return g_steal_pointer(&cert); -} - -/* @crl_paths is allowed to be NULL */ -static int load_crls_to_store(X509_STORE *store, char **crl_paths, gboolean err_out_empty_crls, - GError **error) -{ - for (char **iterator = crl_paths; iterator != NULL && *iterator != NULL; iterator++) { - X509_LOOKUP *lookup = X509_STORE_add_lookup(store, X509_LOOKUP_file()); - const char *crl_path = *iterator; - int count; - - g_assert(crl_path); - - if (!lookup) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("X509 store initialization failed")); - return -1; - } - - /* support *.pem files containing multiple CRLs */ - count = X509_load_crl_file(lookup, crl_path, X509_FILETYPE_PEM); - if (count > 0) - continue; - - count = X509_load_crl_file(lookup, crl_path, X509_FILETYPE_ASN1); - if (count == 1) - continue; - - if (err_out_empty_crls) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_LOAD_CRL, - _("unable to load CRL from: '%s'"), crl_path); - return -1; - } - } - - return 0; -} - -X509_STORE *pv_store_setup(char *root_ca_path, char **crl_paths, char **cert_with_crl_paths, - GError **error) -{ - g_autoptr(X509_STORE) store = X509_STORE_new(); - if (!store) - g_abort(); - - /* if @root_ca_path != NULL use the specified root CA only, otherwise use the - * default root CAs found on the system - */ - if (root_ca_path) { - X509_LOOKUP *lookup = X509_STORE_add_lookup(store, X509_LOOKUP_file()); - int count; - - if (!lookup) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("X509 store initialization failed")); - return NULL; - } - - /* only the PEM format allows embedded CRLs so we've to - * check for it only here and not in case of ASN1 - */ - count = X509_load_cert_file(lookup, root_ca_path, X509_FILETYPE_PEM); - if (count > 0) { - /* Out of security reasons that it can be easily - * overseen that there are multiple certificates located - * in a PEM-file we raise an error - */ - if (count > 1) { - g_set_error( - error, PV_CERT_ERROR, PV_CERT_ERROR_LOAD_ROOT_CA, - _("multiple certificates in one PEM file is not supported: '%s'"), - root_ca_path); - return NULL; - } - - /* PEM format so it's possible there are CRLs embedded - */ - (void)X509_load_crl_file(lookup, root_ca_path, X509_FILETYPE_PEM); - } else { - /* Maybe the root CA is stored in ASN1 format */ - count = X509_load_cert_file(lookup, root_ca_path, X509_FILETYPE_ASN1); - if (count != 1) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_LOAD_ROOT_CA, - _("failed to load root certificate from '%s'"), - root_ca_path); - return NULL; - } - } - } else { - /* Load certificates into @store from the hardcoded OpenSSL - * default paths - */ - if (X509_STORE_set_default_paths(store) != 1) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_LOAD_DEFAULT_CA, - _("failed to load system root certificates")); - return NULL; - } - } - - /* Error out if a CRL file was provided that has not at least one CRL*/ - if (load_crls_to_store(store, crl_paths, TRUE, error) < 0) - return NULL; - - /* Try to load CRLs from the provided untrusted certificates */ - if (load_crls_to_store(store, cert_with_crl_paths, FALSE, error) < 0) - return NULL; - - return g_steal_pointer(&store); -} - -STACK_OF_X509 *pv_get_x509_stack(const GSList *x509_with_path_list) -{ - g_autoslist(X509) certs = NULL; - g_autoptr(GError) error = NULL; - - certs = g_slist_map_PvX509WithPath_X509(x509_with_path_list, pv_x509_with_path_get_cert, - &error); - g_assert_no_error(error); - return g_slist_to_stack_of_X509(&certs); -} - -int pv_init_store_ctx(X509_STORE_CTX *ctx, X509_STORE *trusted, STACK_OF_X509 *chain, - GError **error) -{ - pv_wrapped_g_assert(ctx); - pv_wrapped_g_assert(trusted); - pv_wrapped_g_assert(chain); - - if (X509_STORE_CTX_init(ctx, trusted, NULL, chain) != 1) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("X509 store initialization failed: %s"), - X509_verify_cert_error_string(X509_STORE_CTX_get_error(ctx))); - return -1; - } - return 0; -} - -X509_STORE_CTX *pv_create_store_ctx(X509_STORE *trusted, STACK_OF_X509 *chain, GError **error) -{ - g_autoptr(X509_STORE_CTX) ctx = X509_STORE_CTX_new(); - - pv_wrapped_g_assert(trusted); - pv_wrapped_g_assert(chain); - - if (!ctx) - return NULL; - - if (pv_init_store_ctx(ctx, trusted, chain, error) < 0) - return NULL; - - return g_steal_pointer(&ctx); -} - -int pv_store_set_verify_param(X509_STORE *store, GError **error) -{ - g_autoptr(X509_VERIFY_PARAM) param = NULL; - unsigned long flags = X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL | - X509_V_FLAG_TRUSTED_FIRST | X509_V_FLAG_CHECK_SS_SIGNATURE | - X509_V_FLAG_X509_STRICT | X509_V_FLAG_POLICY_CHECK; - - /* Create a X509_VERIFY_PARAM structure, which specifies which checks - * should be done by the certificate verification operation - */ - param = X509_VERIFY_PARAM_new(); - if (!param) - g_abort(); - - /* The maximum depth level of the chain of trust for the verification of - * the IBM Z signing key is 2, i.e. IBM Z signing key -> intermediate CA - * -> root CA - */ - X509_VERIFY_PARAM_set_depth(param, 2); - - /* Set minimum allowed security level to at least 112 bits. */ - X509_VERIFY_PARAM_set_auth_level(param, PV_CERTS_SECURITY_LEVEL); - - /* Set verification purpose to 'Any Purpose' and specify that the - * associated trust setting of the default purpose should be used. - */ - if (X509_VERIFY_PARAM_set_purpose(param, X509_PURPOSE_ANY | X509_TRUST_DEFAULT) != 1) - goto error; - - /* Each certificate from the chain of trust must be checked against a - * CRL to see if it has been revoked. In addition, use trusted - * certificates first mode, check signature of the last certificate, - * strict mode, and verify the policies. - */ - if (X509_VERIFY_PARAM_set_flags(param, flags) != 1) - goto error; - - if (X509_STORE_set1_param(store, param) != 1) - goto error; - - return 0; - -error: - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("X509 store initialization failed")); - return -1; -} - -static int x509_name_entry_get0_data(X509_NAME_ENTRY *entry, const uint8_t **data, size_t *data_len) -{ - const ASN1_STRING *asn1_str; - int tmp_data_len; - - g_assert(data); - g_assert(data_len); - - asn1_str = X509_NAME_ENTRY_get_data(entry); - if (!asn1_str) - return -1; - - tmp_data_len = ASN1_STRING_length(asn1_str); - if (tmp_data_len < 0) - return -1; - - *data = ASN1_STRING_get0_data(asn1_str); - *data_len = (size_t)tmp_data_len; - return 0; -} - -/* The caller must not free *data! */ -static int x509_name_get0_data_by_NID(X509_NAME *name, int nid, const uint8_t **data, - size_t *data_len) -{ - X509_NAME_ENTRY *entry = NULL; - int lastpos = -1; - - lastpos = X509_NAME_get_index_by_NID(name, nid, lastpos); - if (lastpos == -1) - return -1; - - entry = X509_NAME_get_entry(name, lastpos); - if (!entry) - return -1; - - if (x509_name_entry_get0_data(entry, data, data_len) < 0) - return -1; - - return 0; -} - -/* @y must be a NULL-terminated string */ -static gboolean x509_name_data_by_nid_equal(X509_NAME *name, int nid, const char *y) -{ - const uint8_t *data = NULL; - size_t y_len = strlen(y); - size_t data_len; - - if (x509_name_get0_data_by_NID(name, nid, &data, &data_len) < 0) - return FALSE; - - if (data_len != y_len) - return FALSE; - - return memcmp(data, y, data_len) == 0; -} - -/* Checks whether the subject of @cert is a IBM signing key subject. For this we - * must check that the subject is equal to: 'C = US, ST = New York, L = - * Poughkeepsie, O = International Business Machines Corporation, CN = - * International Business Machines Corporation' and the organization unit (OUT) - * must end with the suffix ' Key Signing Service'. - */ -static gboolean has_ibm_signing_subject(X509 *cert) -{ - X509_NAME *subject = X509_get_subject_name(cert); - /* X509_NAME_entry_count is safe to be used with NULL */ - int entry_count = X509_NAME_entry_count(subject); - g_autofree char *data_str = NULL; - const uint8_t *data; - size_t data_len; - - if (entry_count != PV_IMB_Z_SUBJECT_ENTRY_COUNT) - return FALSE; - - if (!x509_name_data_by_nid_equal(subject, NID_countryName, PV_IBM_Z_SUBJECT_COUNTRY_NAME)) - return FALSE; - - if (!x509_name_data_by_nid_equal(subject, NID_stateOrProvinceName, PV_IBM_Z_SUBJECT_STATE)) - return FALSE; - - if (!x509_name_data_by_nid_equal(subject, NID_localityName, PV_IBM_Z_SUBJECT_LOCALITY_NAME)) - return FALSE; - - if (!x509_name_data_by_nid_equal(subject, NID_organizationName, - PV_IBM_Z_SUBJECT_ORGANIZATION_NAME)) - return FALSE; - - if (!x509_name_data_by_nid_equal(subject, NID_commonName, PV_IBM_Z_SUBJECT_COMMON_NAME)) - return FALSE; - - if (x509_name_get0_data_by_NID(subject, NID_organizationalUnitName, &data, &data_len) < 0) - return FALSE; - - /* Make sure that data_str is null-terminated as in general it cannot be - * assumed that @data is null-terminated. - */ - data_str = g_strndup((const char *)data, data_len); - if (!g_str_has_suffix(data_str, PV_IBM_Z_SUBJECT_ORGANIZATIONAL_UNIT_NAME_SUFFIX)) - return FALSE; - - return TRUE; -} - -/* Return a list of all IBM Z signing key certificates in @certs and remove them - * from the chain. Return empty stack if no IBM Z signing key is found. - */ -STACK_OF_X509 *pv_remove_ibm_signing_certs(STACK_OF_X509 *certs) -{ - g_autoptr(STACK_OF_X509) ret = sk_X509_new_null(); - - for (int i = 0; i < sk_X509_num(certs); i++) { - X509 *cert = sk_X509_value(certs, i); - - g_assert(cert); - - if (!has_ibm_signing_subject(cert)) - continue; - - /* Remove this certificate from the list and change i-- as the - * array has changed - this is not beautiful, but right now the - * easiest solution I came up with. - */ - if (sk_X509_delete(certs, i--) != cert) - g_abort(); - - if (sk_X509_push(ret, g_steal_pointer(&cert)) == 0) - g_abort(); - } - - return g_steal_pointer(&ret); -} - -static X509_NAME *x509_name_reorder_attributes(const X509_NAME *name, const int nids[], - size_t nids_len) -{ - int entry_count = X509_NAME_entry_count(name); - g_autoptr(X509_NAME) ret = NULL; - - if (entry_count < 0) - return NULL; - - if (nids_len != (size_t)entry_count) - return NULL; - - ret = X509_NAME_new(); - if (!ret) - g_abort(); - - for (size_t i = 0; i < nids_len; i++) { - const X509_NAME_ENTRY *entry = NULL; - int nid = nids[i]; - int lastpos = -1; - - lastpos = X509_NAME_get_index_by_NID((X509_NAME *)name, nid, lastpos); - if (lastpos == -1) - return NULL; - - entry = X509_NAME_get_entry(name, lastpos); - if (!entry) - return NULL; - - if (X509_NAME_add_entry(ret, entry, -1, 0) != 1) - return NULL; - } - return g_steal_pointer(&ret); -} - -X509_NAME *pv_c2b_name(const X509_NAME *name) -{ - int nids[] = { NID_countryName, NID_organizationName, NID_organizationalUnitName, - NID_localityName, NID_stateOrProvinceName, NID_commonName }; - g_autoptr(X509_NAME) broken_name = NULL; - - g_assert(name); - - /* Try to reorder the attributes */ - broken_name = x509_name_reorder_attributes(name, nids, G_N_ELEMENTS(nids)); - if (broken_name) - return g_steal_pointer(&broken_name); - return X509_NAME_dup((X509_NAME *)name); -} - -static int security_level_to_bits(int level) -{ - static int security_bits[] = { 0, 80, 112, 128, 192, 256 }; - - g_assert(level > 0 && level < (int)G_N_ELEMENTS(security_bits)); - - return security_bits[level]; -} - -/* returns - * 0 when the certificate is valid, - * -1 when not yet valid, - * 1 when expired - */ -static int check_validity_period(const ASN1_TIME *not_before, const ASN1_TIME *not_after) -{ - if (X509_cmp_current_time(not_before) != -1) - return -1; - - if (X509_cmp_current_time(not_after) != 1) - return 1; - - return 0; -} - -static gboolean own_X509_NAME_ENTRY_equal(const X509_NAME_ENTRY *x, const X509_NAME_ENTRY *y) -{ - const ASN1_OBJECT *x_obj = X509_NAME_ENTRY_get_object(x); - const ASN1_STRING *x_data = X509_NAME_ENTRY_get_data(x); - const ASN1_OBJECT *y_obj = X509_NAME_ENTRY_get_object(y); - const ASN1_STRING *y_data = X509_NAME_ENTRY_get_data(y); - int x_len = ASN1_STRING_length(x_data); - int y_len = ASN1_STRING_length(y_data); - - if (x_len < 0 || x_len != y_len) - return FALSE; - - /* ASN1_STRING_cmp(x_data, y_data) == 0 doesn't work because it also - * compares the type, which is sometimes different. - */ - return OBJ_cmp(x_obj, y_obj) == 0 && - memcmp(ASN1_STRING_get0_data(x_data), ASN1_STRING_get0_data(y_data), - (unsigned long)x_len) == 0; -} - -static gboolean own_X509_NAME_equal(const X509_NAME *x, const X509_NAME *y) -{ - int x_count = X509_NAME_entry_count(x); - int y_count = X509_NAME_entry_count(y); - - if (x != y && (!x || !y)) - return FALSE; - - if (x_count != y_count) - return FALSE; - - for (int i = 0; i < x_count; i++) { - const X509_NAME_ENTRY *entry_i = X509_NAME_get_entry(x, i); - gboolean entry_found = FALSE; - - for (int j = 0; j < y_count; j++) { - const X509_NAME_ENTRY *entry_j = X509_NAME_get_entry(y, j); - - if (own_X509_NAME_ENTRY_equal(entry_i, entry_j)) { - entry_found = TRUE; - break; - } - } - - if (!entry_found) - return FALSE; - } - return TRUE; -} - -/* Verify that the used public key algorithm matches the subject signature - * algorithm - */ -static int check_signature_algo_match(const EVP_PKEY *pkey, const X509 *subject, GError **error) -{ - int pkey_nid; - - if (!pkey) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_NO_PUBLIC_KEY, _("no public key")); - return -1; - } - - if (OBJ_find_sigid_algs(X509_get_signature_nid(subject), NULL, &pkey_nid) != 1) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INVALID_SIGNATURE_ALGORITHM, - _("unsupported signature algorithm")); - return -1; - } - - if (EVP_PKEY_type(pkey_nid) != EVP_PKEY_base_id(pkey)) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_SIGNATURE_ALGORITHM_MISMATCH, - _("signature algorithm mismatch")); - return -1; - } - - return 0; -} - -/* It's almost the same as X509_check_issed from OpenSSL does except that we - * don't check the key usage of the potential issuer. This means we check: - * 1. issuer_name(cert) == subject_name(issuer) - * 2. Check whether the akid(cert) (if available) matches the issuer skid - * 3. Check that the cert algrithm matches the subject algorithm - * 4. Verify the signature of certificate @cert is using the public key of - * @issuer. - */ -static int check_host_key_issued(X509 *cert, X509 *issuer, GError **error) -{ - const X509_NAME *issuer_subject = X509_get_subject_name(issuer); - const X509_NAME *cert_issuer = X509_get_issuer_name(cert); - g_autoptr(AUTHORITY_KEYID) akid = NULL; - - /* We cannot use X509_NAME_cmp() because it considers the order of the - * X509_NAME_Entries. - */ - if (!own_X509_NAME_equal(issuer_subject, cert_issuer)) { - g_autofree char *issuer_subject_str = pv_X509_NAME_oneline(issuer_subject); - g_autofree char *cert_issuer_str = pv_X509_NAME_oneline(cert_issuer); - - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_CERT_SUBJECT_ISSUER_MISMATCH, - _("Subject issuer mismatch:\n'%s'\n'%s'"), issuer_subject_str, - cert_issuer_str); - return -1; - } - - akid = X509_get_ext_d2i(cert, NID_authority_key_identifier, NULL, NULL); - if (akid && X509_check_akid(issuer, akid) != X509_V_OK) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_SKID_AKID_MISMATCH, - _("AKID mismatch")); - return -1; - } - - if (check_signature_algo_match(X509_get0_pubkey(issuer), cert, error) < 0) - return -1; - - if (X509_verify(cert, X509_get0_pubkey(issuer)) != 1) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_CERT_SIGNATURE_INVALID, - _("Signature verification failed")); - return -1; - } - - return 0; -} - -static gboolean is_cert_revoked(X509 *cert, X509_CRL *crl) -{ - X509_REVOKED *revoked = NULL; - int rc; - - if (!cert || !crl) - g_abort(); - - rc = X509_CRL_get0_by_serial(crl, &revoked, (ASN1_INTEGER *)X509_get0_serialNumber(cert)); - if (rc == 0) - return FALSE; - - if (revoked) - return TRUE; - - return FALSE; -} - -/* Assumptions are that the issuer_crt and issuer_crl is a trusted IBM Z - * signing certificate/revocation list. This function verifies a host-key - * document. To do so multiple steps are required: - * - * 1. issuer(host_key) == subject(issuer_crt) - * 2. Signature verification - * 3. @host_key must not be expired - * 4. @host_key must not be revoked - */ -int pv_verify_host_key(X509 *host_key, GSList *issuer_pairs, int verify_flags, int level, - GError **error) -{ - const int exp_security_bits = security_level_to_bits(level); - EVP_PKEY *pkey; - gboolean successfully_checked = FALSE; - int pkey_security_bits; - - g_assert(host_key); - pkey = X509_get0_pubkey(host_key); - - if (!pkey) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("failed to retrieve public key")); - return -1; - } - - /* check key level, if necessary */ - pkey_security_bits = EVP_PKEY_security_bits(pkey); - if (exp_security_bits > 0 && pkey_security_bits < exp_security_bits) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_VERIFICATION_FAILED, - _("not enough bits of security (%d, %d expected)"), pkey_security_bits, - exp_security_bits); - return -1; - } - - if (!(verify_flags & X509_V_FLAG_NO_CHECK_TIME)) { - const ASN1_TIME *last = X509_get0_notBefore(host_key); - const ASN1_TIME *next = X509_get0_notAfter(host_key); - - if (!last || !next || check_validity_period(last, next)) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INVALID_VALIDITY_PERIOD, - _("validity period is not valid")); - return -1; - } - } else { - verify_flags &= ~X509_V_FLAG_NO_CHECK_TIME; - } - - /* Verify that the host_key was issued by a certificate and that it - * wasn't revoked. - */ - for (GSList *iterator = issuer_pairs; iterator; iterator = iterator->next) { - const PvX509Pair *pair = iterator->data; - STACK_OF_X509_CRL *issuer_crls = NULL; - X509 *issuer_cert = NULL; - - g_assert(pair); - - issuer_cert = pair->cert; - issuer_crls = pair->crls; - - g_assert(issuer_cert); - - /* Verify that the issuer(host_key) == subject(issuer_cert) and - * that the signature is valid - */ - if (check_host_key_issued(host_key, issuer_cert, NULL) < 0) - continue; - - /* Check against CRL */ - if (verify_flags & X509_V_FLAG_CRL_CHECK) { - gboolean crl_checked = FALSE; - - verify_flags &= ~X509_V_FLAG_CRL_CHECK; - for (int i = 0; i < sk_X509_CRL_num(issuer_crls); i++) { - X509_CRL *issuer_crl = sk_X509_CRL_value(issuer_crls, i); - - g_assert(issuer_crl); - - if (is_cert_revoked(host_key, issuer_crl)) { - g_set_error(error, PV_CERT_ERROR, - PV_CERT_ERROR_CERT_REVOKED, - _("certificate revoked")); - return -1; - } - - crl_checked = TRUE; - } - - if (!crl_checked) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_NO_CRL, - _("no valid CRL found")); - return -1; - } - successfully_checked = TRUE; - break; - } - } - - if (!successfully_checked) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_NO_ISSUER_IBM_Z_FOUND, - _("no IBM Z signing key that issued this host-key document found")); - return -1; - } - - /* were some unsupported flags specified? */ - g_assert(verify_flags == 0); - return 0; -} - -int pv_verify_cert(X509_STORE_CTX *ctx, X509 *cert, GError **error) -{ - int rc; - - pv_wrapped_g_assert(cert); - pv_wrapped_g_assert(ctx); - - X509_STORE_CTX_set_cert(ctx, cert); - rc = X509_verify_cert(ctx); - if (rc != 1) { - X509 *tmp_cert = NULL; - - tmp_cert = pv_X509_STORE_CTX_get_current_cert(ctx); - if (tmp_cert) { - g_autofree char *subj_name = - pv_X509_NAME_oneline(X509_get_subject_name(tmp_cert)); - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_VERIFICATION_FAILED, - _("failed to verify certificate '%s': %s"), subj_name, - X509_verify_cert_error_string(X509_STORE_CTX_get_error(ctx))); - } else { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_VERIFICATION_FAILED, - _("failed to verify certificate: %s"), - X509_verify_cert_error_string(X509_STORE_CTX_get_error(ctx))); - } - return -1; - } - return 0; -} - -/* Verify that: subject(issuer) == issuer(crl) and SKID(issuer) == AKID(crl) */ -static int check_crl_issuer(X509_CRL *crl, X509 *issuer, GError **error) -{ - const X509_NAME *crl_issuer = X509_CRL_get_issuer(crl); - const X509_NAME *issuer_subject = X509_get_subject_name(issuer); - AUTHORITY_KEYID *akid = NULL; - - if (!own_X509_NAME_equal(issuer_subject, crl_issuer)) { - g_autofree char *issuer_subject_str = pv_X509_NAME_oneline(issuer_subject); - g_autofree char *crl_issuer_str = pv_X509_NAME_oneline(crl_issuer); - - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_CRL_SUBJECT_ISSUER_MISMATCH, - _("issuer mismatch:\n%s\n%s"), issuer_subject_str, crl_issuer_str); - return -1; - } - - /* If AKID(@crl) is specified it must match with SKID(@issuer) */ - akid = X509_CRL_get_ext_d2i(crl, NID_authority_key_identifier, NULL, NULL); - if (akid && X509_check_akid(issuer, akid) != X509_V_OK) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_SKID_AKID_MISMATCH, - _("AKID mismatch")); - return -1; - } - - return 0; -} - -int pv_verify_crl(X509_CRL *crl, X509 *cert, int verify_flags, GError **error) -{ - EVP_PKEY *pkey = X509_get0_pubkey(cert); - - g_assert(crl); - - if (!pkey) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("failed to retrieve public key from the certificate")); - return -1; - } - - /* check that the @crl issuer matches with the subject name of @cert*/ - if (check_crl_issuer(crl, cert, error) < 0) - return -1; - - /* verify the validity period of the CRL */ - if (!(verify_flags & X509_V_FLAG_NO_CHECK_TIME)) { - const ASN1_TIME *last = X509_CRL_get0_lastUpdate(crl); - const ASN1_TIME *next = X509_CRL_get0_nextUpdate(crl); - - if (!last || !next || check_validity_period(last, next)) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INVALID_VALIDITY_PERIOD, - _("validity period is not valid")); - return -1; - } - } else { - verify_flags &= ~X509_V_FLAG_NO_CHECK_TIME; - } - - /* verify the signature */ - if (X509_CRL_verify(crl, pkey) != 1) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_CRL_SIGNATURE_INVALID, - _("signature is not valid")); - return -1; - } - g_assert(verify_flags == 0); - return 0; -} - -int pv_check_chain_parameters(const STACK_OF_X509 *chain, GError **error) -{ - const X509_NAME *ca_x509_subject = NULL; - g_autofree char *ca_subject = NULL; - int len = sk_X509_num(chain); - X509 *ca = NULL; - - if (len < 2) { - g_set_error( - error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("there must be at least one root and one leaf certificate in the chain of trust")); - return -1; - } - - /* get the root certificate of the chain of trust */ - ca = sk_X509_value(chain, len - 1); - if (!ca) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("no root certificate found")); - return -1; - } - - ca_x509_subject = X509_get_subject_name(ca); - if (!ca_x509_subject) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("subject of the root CA cannot be retrieved")); - return -1; - } - - ca_subject = pv_X509_NAME_oneline(ca_x509_subject); - if (!ca_subject) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("subject name of the root CA cannot be retrieved")); - return -1; - } - g_info(_("Root CA used: '%s'"), ca_subject); - - return 0; -} - -/* Given a certificate @cert try to find valid revocation lists in @ctx. If no - * valid CRL was found NULL is returned. - */ -STACK_OF_X509_CRL *pv_store_ctx_find_valid_crls(X509_STORE_CTX *ctx, X509 *cert, GError **error) -{ - g_autoptr(STACK_OF_X509_CRL) ret = NULL; - const int verify_flags = 0; - X509_NAME *subject = NULL; - - pv_wrapped_g_assert(ctx); - pv_wrapped_g_assert(cert); - - subject = X509_get_subject_name(cert); - if (!subject) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_MALFORMED_CERTIFICATE, - _("certificate is malformed")); - return NULL; - } - - ret = pv_X509_STORE_CTX_get1_crls(ctx, subject); - if (!ret) { - /* Workaround to fix the mismatch between issuer name of the - * IBM Z signing CRLs and the IBM Z signing key subject name. - */ - g_autoptr(X509_NAME) broken_subject = pv_c2b_name(subject); - - ret = pv_X509_STORE_CTX_get1_crls(ctx, broken_subject); - if (!ret) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_NO_CRL, _("no CRL found")); - g_info("ERROR: %s", (*error)->message); - return NULL; - } - } - - /* Filter out non-valid CRLs for @cert */ - for (int i = 0; i < sk_X509_CRL_num(ret); i++) { - X509_CRL *crl = sk_X509_CRL_value(ret, i); - - g_assert(crl); - - /* If @crl is not valid remove it from the array and log a - * warning. - */ - if (pv_verify_crl(crl, cert, verify_flags, error) < 0) { - g_assert(error); - g_warning(_("CRL is not valid: %s"), (*error)->message); - g_clear_error(error); - - /* Remove this certificate from the list and change i-- as the - * array has changed - this is not beautfiul, but right now the - * easiest solution I came up with - */ - if (sk_X509_CRL_delete(ret, i--) != crl) - g_abort(); - - g_clear_pointer(&crl, X509_CRL_free); - } - } - - if (sk_X509_CRL_num(ret) < 1) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_NO_CRL, _("no valid CRL found")); - return NULL; - } - return g_steal_pointer(&ret); -} - -/* - * Finds the IBM signing key in the stack. - * Error out, if there is not exactly one IBM signing key. - */ -static STACK_OF_X509 *get_ibm_signing_certs(STACK_OF_X509 *certs, GError **error) -{ - g_autoptr(STACK_OF_X509) ibm_signing_certs = NULL; - int ibm_signing_certs_count; - - /* Find all IBM Z signing keys and remove them from the chain as we - * have to verify that they're valid. The last step of the chain of - * trust verification must be done manually, as the IBM Z signing keys - * are not marked as (intermediate) CA and therefore the standard - * `X509_verify_cert` function of OpenSSL cannot be used to verify the - * actual host-key documents. - */ - ibm_signing_certs = pv_remove_ibm_signing_certs(certs); - ibm_signing_certs_count = sk_X509_num(ibm_signing_certs); - if (ibm_signing_certs_count < 1) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_NO_IBM_Z_SIGNING_KEY, - _("Specify at least one IBM Z signing key")); - return NULL; - } else if (ibm_signing_certs_count > 1) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_NO_IBM_Z_SIGNING_KEY, - _("Specify only one IBM Z signing key")); - return NULL; - } - g_assert(ibm_signing_certs_count == 1); - - return g_steal_pointer(&ibm_signing_certs); -} - -static gboolean download_crls(X509_STORE *trusted, PvCertWithPathList *host_key_certs_with_path, - GError **error) -{ - g_autoptr(STACK_OF_X509_CRL) downloaded_ibm_signing_crls = NULL; - - /* Set up the download routine for the lookup of CRLs. */ - pv_store_setup_crl_download(trusted); - - /* Try to download the CRLs of the IBM Z signing certificates - * specified in the host-key documents. Ignore download errors - * as it's still possible that a CRL is specified via command - * line. - */ - downloaded_ibm_signing_crls = pv_try_load_crls_by_certs(host_key_certs_with_path); - - /* Add the downloaded CRLs to the store so they can be used for - * the verification later. - */ - for (int i = 0; i < sk_X509_CRL_num(downloaded_ibm_signing_crls); i++) { - X509_CRL *crl = sk_X509_CRL_value(downloaded_ibm_signing_crls, i); - - if (X509_STORE_add_crl(trusted, crl) != 1) { - g_set_error(error, PV_CERT_ERROR, PV_CERT_ERROR_INTERNAL, - _("failed to load CRL")); - return FALSE; - } - } - return TRUE; -} - -gboolean pv_verify_host_key_doc(PvCertWithPathList *host_key_certs_with_path, X509_STORE *trusted, - STACK_OF_X509 *untrusted_certs, gboolean online, GError **error) -{ - g_autoslist(PvX509Pair) ibm_z_pairs = NULL; - g_autoptr(STACK_OF_X509) ibm_signing_certs = NULL; - g_autoptr(X509_STORE_CTX) ctx = NULL; - - pv_wrapped_g_assert(host_key_certs_with_path); - pv_wrapped_g_assert(trusted); - pv_wrapped_g_assert(untrusted_certs); - - if (online && !download_crls(trusted, host_key_certs_with_path, error)) - return -1; - - /* Find all IBM Z signing keys and remove them from the chain as we - * have to verify that they're valid. The last step of the chain of - * trust verification must be done manually, as the IBM Z signing keys - * are not marked as (intermediate) CA and therefore the standard - * `X509_verify_cert` function of OpenSSL cannot be used to verify the - * actual host-key documents. - */ - ibm_signing_certs = get_ibm_signing_certs(untrusted_certs, error); - if (!ibm_signing_certs) - return -1; - - if (pv_store_set_verify_param(trusted, error) < 0) - return -1; - - ctx = pv_create_store_ctx(trusted, untrusted_certs, error); - if (!ctx) - return -1; - /* - * Get all IBM-signing-[key,crls] pairs. - * NOTE: Currently there is only one signing-key allowed - */ - for (int i = 0; i < sk_X509_num(ibm_signing_certs); i++) { - g_autoptr(X509) ibm_signing_cert = sk_X509_pop(ibm_signing_certs); - g_autoptr(STACK_OF_X509_CRL) ibm_signing_crls = NULL; - PvX509Pair *ibm_z_pair = NULL; - - /* - * Get CRLs for the IBM signing cert - */ - ibm_signing_crls = pv_store_ctx_find_valid_crls(ctx, ibm_signing_cert, error); - if (!ibm_signing_crls) { - g_prefix_error(error, _("IBM Z signing key: ")); - return -1; - } - - /* build the pair and add it to the list */ - ibm_z_pair = pv_x509_pair_new_take(&ibm_signing_cert, &ibm_signing_crls); - g_assert(!ibm_signing_cert); - g_assert(!ibm_signing_crls); - ibm_z_pairs = g_slist_append(ibm_z_pairs, ibm_z_pair); - } - - /* Verify host-key documents by using the IBM Z signing - * certificates and the corresponding certificate revocation - * lists. - */ - for (GSList *iterator = host_key_certs_with_path; iterator; iterator = iterator->next) { - PvX509WithPath *host_key_with_path = iterator->data; - const char *host_key_path = host_key_with_path->path; - X509 *host_key = host_key_with_path->cert; - int flags = X509_V_FLAG_CRL_CHECK; - - if (pv_verify_host_key(host_key, ibm_z_pairs, flags, PV_CERTS_SECURITY_LEVEL, - error) < 0) { - g_prefix_error(error, "'%s': ", host_key_path); - return -1; - } - } - - /* Verify that all IBM Z signing keys are trustable. - * For this we must check: - * - * 1. Can a chain of trust be established ending in a root CA - * 2. Is the correct root CA used? It has either to be the - * System CA or the root CA specified via command line. - */ - for (GSList *iterator = ibm_z_pairs; iterator; iterator = iterator->next) { - const PvX509Pair *ibm_z_pair = iterator->data; - - if (pv_verify_cert(ctx, ibm_z_pair->cert, error) < 0) - return -1; - if (pv_check_chain_parameters(X509_STORE_CTX_get0_chain(ctx), error) < 0) - return -1; - /* re-init ctx for the next verification */ - X509_STORE_CTX_cleanup(ctx); - if (pv_init_store_ctx(ctx, trusted, untrusted_certs, error) != 0) - return -1; - } - return 0; -} - -int pv_verify_host_key_docs_by_path(char **host_key_paths, char *optional_root_ca_path, - char **crl_paths, char **untrusted_cert_paths, gboolean online, - GError **error) -{ - g_autoslist(PvX509WithPath) untrusted_certs_with_path = NULL, host_key_certs = NULL; - g_autoptr(STACK_OF_X509) untrusted_certs = NULL; - g_autoptr(X509_STORE) trusted = NULL; - - pv_wrapped_g_assert(host_key_paths); - pv_wrapped_g_assert(untrusted_cert_paths); - - /* Load trusted root CAs of the system if and only if @root_ca_path is - * NULL, otherwise use the root CA specified by @root_ca_path. - */ - trusted = pv_store_setup(optional_root_ca_path, crl_paths, untrusted_cert_paths, error); - if (!trusted) - return -1; - - /* Load all untrusted certificates (e.g. IBM Z signing key and - * intermediate CA) that are required to establish a chain of - * trust starting from the host-key document up to the root CA (if not - * otherwise specified that can be one of the system wide installed - * root CAs, e.g. DigiCert). - */ - untrusted_certs_with_path = pv_load_certificates(untrusted_cert_paths, error); - if (!untrusted_certs_with_path) - return -1; - /* Convert to STACK_OF(X509) */ - untrusted_certs = pv_get_x509_stack(untrusted_certs_with_path); - - host_key_certs = pv_load_certificates(host_key_paths, error); - if (!host_key_certs) - return -1; - - return pv_verify_host_key_doc(host_key_certs, trusted, untrusted_certs, online, error); -} diff -Nru s390-tools-2.31.0/libpv/common.c s390-tools-2.33.1/libpv/common.c --- s390-tools-2.31.0/libpv/common.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libpv/common.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,45 +0,0 @@ -/* - * Libpv common functions. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - * - */ -/* Must be included before any other header */ -#include "config.h" - -#include "libpv/common.h" -#include "libpv/cert.h" -#include "libpv/curl.h" - -/* setup and tear down */ -int pv_init(void) -{ - static size_t openssl_initalized; - - if (g_once_init_enter(&openssl_initalized)) { - if (OPENSSL_VERSION_NUMBER < 0x1000100fL) - g_assert_not_reached(); -#if OPENSSL_VERSION_NUMBER < 0x10100000L - SSL_library_init(); - SSL_load_error_strings(); -#else - OPENSSL_init_crypto(0, NULL); -#endif - - if (pv_curl_init() != 0) - return -1; - - pv_cert_init(); - g_once_init_leave(&openssl_initalized, 1); - } - return 0; -} - -void pv_cleanup(void) -{ - pv_cert_cleanup(); - pv_curl_cleanup(); -} diff -Nru s390-tools-2.31.0/libpv/crypto.c s390-tools-2.33.1/libpv/crypto.c --- s390-tools-2.31.0/libpv/crypto.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libpv/crypto.c 2024-05-28 08:26:36.000000000 +0200 @@ -17,7 +17,6 @@ #include "lib/zt_common.h" #include "libpv/crypto.h" #include "libpv/glib-helper.h" -#include "libpv/hash.h" char *pv_get_openssl_errors(void) { @@ -46,59 +45,6 @@ return 1; } -GBytes *pv_generate_rand_data(size_t size, GError **error) -{ - g_autofree uint8_t *data = NULL; - - if (size > INT_MAX) { - g_set_error_literal(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_RANDOMIZATION, - "Too many random data requested. Split it up"); - OPENSSL_clear_free(data, size); - return NULL; - } - - data = g_malloc(size); - if (RAND_bytes(data, (int)size) != 1) { - g_set_error_literal(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_RANDOMIZATION, - "The required amount of random data is not available"); - return NULL; - } - - return pv_sec_gbytes_new_take(g_steal_pointer(&data), size); -} - -GBytes *pv_generate_key(const EVP_CIPHER *cipher, GError **error) -{ - int size; - - pv_wrapped_g_assert(cipher); - - size = EVP_CIPHER_key_length(cipher); - if (size <= 0) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_KEYGENERATION, - "Unknown cipher"); - return NULL; - } - - return pv_generate_rand_data((guint)size, error); -} - -GBytes *pv_generate_iv(const EVP_CIPHER *cipher, GError **error) -{ - int size; - - pv_wrapped_g_assert(cipher); - - size = EVP_CIPHER_iv_length(cipher); - if (size <= 0) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_KEYGENERATION, - "Unknown cipher"); - return NULL; - } - - return pv_generate_rand_data((guint)size, error); -} - static int64_t pv_gcm_encrypt_decrypt(GBytes *input, GBytes *aad, const PvCipherParms *parms, GBytes **output, GBytes **tagp, enum PvCryptoMode mode, GError **error) @@ -360,168 +306,6 @@ return pv_sec_gbytes_new_take(g_steal_pointer(&derived_key), derived_key_len); } -EVP_PKEY *pv_generate_ec_key(int nid, GError **error) -{ - g_autoptr(EVP_PKEY_CTX) ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL); - g_autoptr(EVP_PKEY) ret = NULL; - - g_assert(ctx); - - if (EVP_PKEY_keygen_init(ctx) != 1) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_KEYGENERATION, - _("EC key could not be auto-generated")); - return NULL; - } - - if (EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, nid) != 1) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_KEYGENERATION, - _("EC key could not be auto-generated")); - return NULL; - } - - if (EVP_PKEY_keygen(ctx, &ret) != 1) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_KEYGENERATION, - _("EC key could not be auto-generated")); - return NULL; - } - - return g_steal_pointer(&ret); -} - -/* Convert a EVP_PKEY to the key format used in the PV header */ -PvEcdhPubKey *pv_evp_pkey_to_ecdh_pub_key(EVP_PKEY *key, GError **error) -{ - g_autofree PvEcdhPubKey *ret = g_new0(PvEcdhPubKey, 1); - g_autoptr(BIGNUM) pub_x_big = NULL, pub_y_big = NULL; - g_autoptr(EC_KEY) ec_key = NULL; - const EC_POINT *pub_key; - const EC_GROUP *grp; - - pv_wrapped_g_assert(key); - - ec_key = EVP_PKEY_get1_EC_KEY(key); - if (!ec_key) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_INTERNAL, - _("Key has the wrong type")); - return NULL; - } - - pub_key = EC_KEY_get0_public_key(ec_key); - if (!pub_key) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_INTERNAL, - _("Failed to get public key")); - return NULL; - } - - grp = EC_KEY_get0_group(ec_key); - if (!grp) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_INTERNAL, - _("Failed to get EC group")); - return NULL; - } - - pub_x_big = BN_new(); - if (!pub_x_big) - g_abort(); - - pub_y_big = BN_new(); - if (!pub_y_big) - g_abort(); - - if (EC_POINT_get_affine_coordinates_GFp(grp, pub_key, pub_x_big, pub_y_big, NULL) != 1) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_INTERNAL, - _("Cannot convert key to internal format")); - return NULL; - } - - if (BN_bn2binpad(pub_x_big, ret->x, sizeof(ret->x)) < 0) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_INTERNAL, - _("Cannot convert key to internal format")); - return NULL; - } - - if (BN_bn2binpad(pub_y_big, ret->y, sizeof(ret->y)) < 0) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_INTERNAL, - _("Cannot convert key to internal format")); - return NULL; - } - - return g_steal_pointer(&ret); -} - -static GBytes *derive_key(EVP_PKEY *key1, EVP_PKEY *key2, GError **error) -{ - g_autoptr(EVP_PKEY_CTX) ctx = NULL; - uint8_t *data = NULL; - size_t data_size, key_size; - - ctx = EVP_PKEY_CTX_new(key1, NULL); - if (!ctx) - g_abort(); - - if (EVP_PKEY_derive_init(ctx) != 1) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_INTERNAL, - _("Key derivation failed")); - return NULL; - } - - if (EVP_PKEY_derive_set_peer(ctx, key2) != 1) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_INTERNAL, - _("Key derivation failed")); - return NULL; - } - - /* Determine buffer length */ - if (EVP_PKEY_derive(ctx, NULL, &key_size) != 1) { - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_DERIVE, - _("Key derivation failed")); - return NULL; - } - - data_size = key_size; - data = OPENSSL_malloc(data_size); - if (!data) - g_abort(); - if (EVP_PKEY_derive(ctx, data, &data_size) != 1) { - OPENSSL_clear_free(data, data_size); - g_set_error(error, PV_CRYPTO_ERROR, PV_CRYPTO_ERROR_DERIVE, - _("Key derivation failed")); - return NULL; - } - - g_assert(data_size == key_size); - return pv_sec_gbytes_new_take(g_steal_pointer(&data), data_size); -} - -GBytes *pv_derive_exchange_key(EVP_PKEY *cust, EVP_PKEY *host, GError **error) -{ - const guint8 append[] = { 0x00, 0x00, 0x00, 0x01 }; - g_autoptr(GBytes) derived_key = NULL, ret = NULL; - g_autoptr(GByteArray) der_key_ga = NULL; - g_autofree uint8_t *raw = NULL; - size_t raw_len; - - pv_wrapped_g_assert(cust); - pv_wrapped_g_assert(host); - - derived_key = derive_key(cust, host, error); - if (!derived_key) - return NULL; - - der_key_ga = g_bytes_unref_to_array(g_steal_pointer(&derived_key)); - /* ANSI X.9.63-2011: 66 bytes x with leading 7 bits and - * concatenate 32 bit int '1' - */ - der_key_ga = g_byte_array_append(der_key_ga, append, sizeof(append)); - /* free GBytesArray and get underlying data */ - raw_len = der_key_ga->len; - raw = g_byte_array_free(g_steal_pointer(&der_key_ga), FALSE); - - ret = pv_sha256_hash(raw, raw_len, error); - OPENSSL_cleanse(raw, raw_len); - return g_steal_pointer(&ret); -} - GQuark pv_crypto_error_quark(void) { return g_quark_from_static_string("pv-crypto-error-quark"); diff -Nru s390-tools-2.31.0/libpv/curl.c s390-tools-2.33.1/libpv/curl.c --- s390-tools-2.31.0/libpv/curl.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libpv/curl.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,116 +0,0 @@ -/* - * Libcurl utils - * - * Copyright IBM Corp. 2020 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -/* Must be included before any other header */ -#include "config.h" - -#include -#include - -#include "lib/zt_common.h" -#include "libpv/curl.h" - -struct UserData { - GByteArray *buffer; - uint max_size; -}; - -static size_t write_callback(char *ptr, size_t size, size_t nmemb, void *userdata) -{ - g_assert(userdata); - struct UserData *data = (struct UserData *)userdata; - GByteArray *buffer = data->buffer; - uint64_t actual_size; - size_t err; - - g_assert(buffer); - - if (!g_uint64_checked_mul(&actual_size, size, nmemb)) - g_abort(); - - /* Signal an error condition by returning a amount that differs - * from the amount passed to the callback. This results in a - * CURLE_WRITE_ERROR. - */ - err = actual_size + 1; - - if (actual_size > G_MAXUINT) - return err; - - data->buffer = g_byte_array_append(buffer, (uint8_t *)ptr, (uint)actual_size); - if (data->buffer->len > data->max_size) - return err; - - return actual_size; -} - -int pv_curl_init(void) -{ - if (curl_global_init(CURL_GLOBAL_ALL) != 0) - return -1; - return 0; -} - -void pv_curl_cleanup(void) -{ - curl_global_cleanup(); -} - -GByteArray *curl_download(const char *url, long timeout_ms, uint max_size, GError **err) -{ - g_autoptr(GByteArray) ret = NULL; - g_autoptr(CURL) handle = NULL; - g_autofree char *agent = NULL; - struct UserData userdata; - CURLcode rc; - - /* set up curl session */ - handle = curl_easy_init(); - if (!handle) - g_abort(); - - /* follow redirection */ - rc = curl_easy_setopt(handle, CURLOPT_FOLLOWLOCATION, 1L); - if (rc != CURLE_OK) - goto curl_err; - rc = curl_easy_setopt(handle, CURLOPT_TIMEOUT_MS, timeout_ms); - if (rc != CURLE_OK) - goto curl_err; - rc = curl_easy_setopt(handle, CURLOPT_NOSIGNAL, 1L); - if (rc != CURLE_OK) - goto curl_err; - agent = g_strdup_printf("%s/%s", GETTEXT_PACKAGE, RELEASE_STRING); - rc = curl_easy_setopt(handle, CURLOPT_USERAGENT, agent); - if (rc != CURLE_OK) - goto curl_err; - rc = curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, write_callback); - if (rc != CURLE_OK) - goto curl_err; - ret = g_byte_array_new(); - userdata.buffer = ret; - userdata.max_size = max_size; - rc = curl_easy_setopt(handle, CURLOPT_WRITEDATA, (void *)&userdata); - if (rc != CURLE_OK) - goto curl_err; - rc = curl_easy_setopt(handle, CURLOPT_URL, url); - if (rc != CURLE_OK) - goto curl_err; - - rc = curl_easy_perform(handle); - if (rc != CURLE_OK) { - g_set_error(err, PV_CURL_ERROR, PV_CURL_ERROR_DOWNLOAD_FAILED, - _("download failed: %s"), curl_easy_strerror(rc)); - return NULL; - } - - return g_steal_pointer(&ret); -curl_err: - g_set_error(err, PV_CURL_ERROR, PV_CURL_ERROR_CURL_INIT_FAILED, - _("cURL initialization failed: %s"), curl_easy_strerror(rc)); - return NULL; -} diff -Nru s390-tools-2.31.0/libpv/hash.c s390-tools-2.33.1/libpv/hash.c --- s390-tools-2.31.0/libpv/hash.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libpv/hash.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,153 +0,0 @@ -/* - * Hashing functions. - - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -/* Must be included before any other header */ -#include "config.h" - -#include "libpv/crypto.h" -#include "libpv/hash.h" - -GBytes *pv_sha256_hash(uint8_t *buf, size_t size, GError **error) -{ - g_autoptr(EVP_MD_CTX) ctx = NULL; - - ctx = pv_digest_ctx_new(EVP_sha256(), error); - if (!ctx) - return NULL; - - if (pv_digest_ctx_update_raw(ctx, buf, size, error) != 0) - return NULL; - - return pv_digest_ctx_finalize(ctx, error); -} - -EVP_MD_CTX *pv_digest_ctx_new(const EVP_MD *md, GError **error) -{ - g_autoptr(EVP_MD_CTX) ctx = EVP_MD_CTX_new(); - - if (!ctx) { - g_set_error(error, PV_HASH_ERROR, PV_HASH_ERROR_INTERNAL, - _("Hash context generation failed")); - return NULL; - } - - if (EVP_DigestInit_ex(ctx, md, NULL) != 1) { - g_set_error(error, PV_HASH_ERROR, PV_HASH_ERROR_INTERNAL, - _("EVP_DigestInit_ex failed")); - return NULL; - } - - return g_steal_pointer(&ctx); -} - -int pv_digest_ctx_update_raw(EVP_MD_CTX *ctx, const uint8_t *buf, size_t size, GError **error) -{ - if (!buf || size == 0) - return 0; - - if (EVP_DigestUpdate(ctx, buf, size) != 1) { - g_set_error(error, PV_HASH_ERROR, PV_HASH_ERROR_INTERNAL, - _("EVP_DigestUpdate failed")); - return -1; - } - return 0; -} - -int pv_digest_ctx_update(EVP_MD_CTX *ctx, GBytes *data, GError **error) -{ - const uint8_t *buf; - size_t buf_size; - - if (!data) - return 0; - buf = g_bytes_get_data((GBytes *)data, &buf_size); - return pv_digest_ctx_update_raw(ctx, buf, buf_size, error); -} - -GBytes *pv_digest_ctx_finalize(EVP_MD_CTX *ctx, GError **error) -{ - int md_size = EVP_MD_size(EVP_MD_CTX_md(ctx)); - g_autofree uint8_t *digest = NULL; - unsigned int digest_size; - - g_assert(md_size > 0); - - digest = g_malloc0((uint)md_size); - if (EVP_DigestFinal_ex(ctx, digest, &digest_size) != 1) { - g_set_error(error, PV_HASH_ERROR, PV_HASH_ERROR_INTERNAL, - _("EVP_DigestFinal_ex failed")); - return NULL; - } - - g_assert(digest_size == (uint)md_size); - return g_bytes_new_take(g_steal_pointer(&digest), digest_size); -} - -HMAC_CTX *pv_hmac_ctx_new(GBytes *key, const EVP_MD *md, GError **error) -{ - g_autoptr(HMAC_CTX) ctx = HMAC_CTX_new(); - const uint8_t *key_data; - size_t key_size; - - key_data = g_bytes_get_data(key, &key_size); - - if (HMAC_Init_ex(ctx, key_data, (int)key_size, md, NULL) != 1) { - g_autofree char *openssl_err_msg = pv_get_openssl_errors(); - - g_set_error(error, PV_HASH_ERROR, PV_HASH_ERROR_INTERNAL, - "unable to create HMAC context: %s", openssl_err_msg); - return NULL; - } - return g_steal_pointer(&ctx); -} - -int pv_hmac_ctx_update_raw(HMAC_CTX *ctx, const void *buf, size_t size, GError **error) -{ - if (!buf || size == 0) - return 0; - - if (HMAC_Update(ctx, buf, size) != 1) { - g_autofree char *openssl_err_msg = pv_get_openssl_errors(); - - g_set_error(error, PV_HASH_ERROR, PV_HASH_ERROR_INTERNAL, - "unable to add data to HMAC context: %s", openssl_err_msg); - return -1; - } - return 0; -} - -int pv_hmac_ctx_update(HMAC_CTX *ctx, GBytes *data, GError **error) -{ - const uint8_t *buf; - size_t buf_size; - - if (!data) - return 0; - buf = g_bytes_get_data((GBytes *)data, &buf_size); - return pv_hmac_ctx_update_raw(ctx, buf, buf_size, error); -} - -GBytes *pv_hamc_ctx_finalize(HMAC_CTX *ctx, GError **error) -{ - int md_size = EVP_MD_size(HMAC_CTX_get_md(ctx)); - g_autofree uint8_t *hmac = NULL; - unsigned int hmac_size = 0; - - g_assert(md_size > 0); - - hmac = g_malloc0((unsigned int)md_size); - - if (HMAC_Final(ctx, hmac, &hmac_size) != 1) { - g_autofree char *openssl_err_msg = pv_get_openssl_errors(); - - g_set_error(error, PV_HASH_ERROR, PV_HASH_ERROR_INTERNAL, - "unable to calculate HMAC: %s", openssl_err_msg); - return NULL; - } - return g_bytes_new_take(g_steal_pointer(&hmac), hmac_size); -} diff -Nru s390-tools-2.31.0/libpv/Makefile s390-tools-2.33.1/libpv/Makefile --- s390-tools-2.31.0/libpv/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libpv/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -9,10 +9,7 @@ GLIB2_LIBS := $(shell $(PKG_CONFIG) --silence-errors --libs glib-2.0) LIBCRYPTO_CFLAGS := $(shell $(PKG_CONFIG) --silence-errors --cflags libcrypto) LIBCRYPTO_LIBS := $(shell $(PKG_CONFIG) --silence-errors --libs libcrypto) -LIBCURL_CFLAGS := $(shell $(PKG_CONFIG) --silence-errors --cflags libcurl) -LIBCURL_LIBS := $(shell $(PKG_CONFIG) --silence-errors --libs libcurl) -LDLIBS += $(GLIB2_LIBS) $(LIBCRYPTO_LIBS) $(LIBCURL_LIBS) - +LDLIBS += $(GLIB2_LIBS) $(LIBCRYPTO_LIBS) WARNINGS := -Wall -Wextra -Wshadow \ -Wcast-align -Wwrite-strings -Wmissing-prototypes \ -Wmissing-declarations -Wredundant-decls -Wnested-externs \ @@ -24,18 +21,15 @@ ALL_CFLAGS += -DOPENSSL_API_COMPAT=0x10101000L \ $(GLIB2_CFLAGS) \ $(LIBCRYPTO_CFLAGS) \ - $(LIBCURL_CFLAGS) \ $(WARNINGS) \ $(NULL) BUILD_TARGETS := skip-$(LIB) ifneq (${HAVE_OPENSSL},0) ifneq (${HAVE_GLIB2},0) -ifneq (${HAVE_LIBCURL},0) BUILD_TARGETS := $(LIB) endif endif -endif sources := $(wildcard *.c) objects := $(patsubst %.c,%.o,$(sources)) @@ -82,9 +76,4 @@ "openssl-devel / libssl-dev version >= 1.1.1", \ "HAVE_OPENSSL=0", \ "-I.") - $(call check_dep, \ - "$(LIB)", \ - "curl/curl.h", \ - "libcurl-devel", \ - "HAVE_LIBCURL=0") touch $@ diff -Nru s390-tools-2.31.0/libutil/util_arch.c s390-tools-2.33.1/libutil/util_arch.c --- s390-tools-2.31.0/libutil/util_arch.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libutil/util_arch.c 2024-05-28 08:26:36.000000000 +0200 @@ -90,10 +90,10 @@ case UTIL_ARCH_MACHINE_TYPE_Z14_ZR1: return "IBM z14 ZR1"; case UTIL_ARCH_MACHINE_TYPE_Z15: - return "IBM z15"; case UTIL_ARCH_MACHINE_TYPE_Z15_T02: - return "IBM z15 Model T02"; + return "IBM z15"; case UTIL_ARCH_MACHINE_TYPE_Z16: + case UTIL_ARCH_MACHINE_TYPE_Z16_A02: return "IBM z16"; default: return "Unknown machine type"; @@ -111,6 +111,7 @@ case UTIL_ARCH_MACHINE_TYPE_Z15: case UTIL_ARCH_MACHINE_TYPE_Z15_T02: case UTIL_ARCH_MACHINE_TYPE_Z16: + case UTIL_ARCH_MACHINE_TYPE_Z16_A02: return HSA_SIZE_512M; default: return HSA_SIZE_32M; diff -Nru s390-tools-2.31.0/libutil/util_base.c s390-tools-2.33.1/libutil/util_base.c --- s390-tools-2.31.0/libutil/util_base.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libutil/util_base.c 2024-05-28 08:26:36.000000000 +0200 @@ -27,7 +27,7 @@ for (i = 0; i < count; i++) { if (first) { - fprintf(fh, "%*s", indent, " "); + fprintf(fh, "%*s", indent, ""); if (tag) fprintf(fh, "%s: ", tag); fprintf(fh, "%08x: ", i); diff -Nru s390-tools-2.31.0/libutil/util_file.c s390-tools-2.33.1/libutil/util_file.c --- s390-tools-2.31.0/libutil/util_file.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libutil/util_file.c 2024-05-28 08:26:36.000000000 +0200 @@ -110,7 +110,8 @@ } rc = 0; out_fclose: - fclose(fp); + if (fclose(fp)) + return -1; return rc; } diff -Nru s390-tools-2.31.0/libutil/util_fmt.c s390-tools-2.33.1/libutil/util_fmt.c --- s390-tools-2.31.0/libutil/util_fmt.c 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/libutil/util_fmt.c 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,763 @@ +/* + * util - Utility function library + * + * Format structured data as key-value pairs, JSON, or CSV + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "lib/util_base.h" +#include "lib/util_fmt.h" +#include "lib/util_libc.h" +#include "lib/util_rec.h" +#include "lib/zt_common.h" + +struct obj_t { + char *name; + bool is_list; + bool is_row; + bool is_prefix; + unsigned int index; +}; + +struct key_t { + char *name; + bool persist; +}; + +static struct { + enum util_fmt_t type; + FILE *fd; + int fileno; + /* Format control. */ + bool hide_prefix; + bool hide_inval; + bool quote_all; + bool do_filter; + bool do_warn; + bool hide_meta; + bool handle_int; + int api_level; + const char *nl; + /* JSON specifics. */ + unsigned int ind_base; + unsigned int ind_width; + char ind_char; + bool meta_done; + /* CSV specifics. */ + struct util_rec *csv_rec; + bool csv_hdr; + bool csv_data; + /* State. */ + unsigned int lvl; + struct obj_t *objs; + unsigned int num_objs; + struct key_t *keys; + unsigned int num_keys; + struct sigaction old_int; + struct sigaction old_term; + /* Methods. */ + void (*obj_start)(struct obj_t *parent, struct obj_t *obj); + void (*obj_end)(struct obj_t *parent, struct obj_t *obj); + void (*map)(struct obj_t *parent, unsigned int mflags, const char *key, + const char *val); + void (*term)(void); +} f; + +#define fwarn(fmt, ...) \ + do { if (f.do_warn) warnx(fmt, ##__VA_ARGS__); } while (0) + +/* Map format name to format ID. */ +static const struct { + const char *name; + enum util_fmt_t fmt; +} formats[] = { + { "json", FMT_JSON }, + { "json-seq", FMT_JSONSEQ }, + { "pairs", FMT_PAIRS }, + { "csv", FMT_CSV }, +}; + +/* Signal mask for blocking INT and TERM signals. */ +static sigset_t no_int_mask; + +bool util_fmt_name_to_type(const char *name, enum util_fmt_t *type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(formats); i++) { + if (strcasecmp(name, formats[i].name) == 0) { + *type = formats[i].fmt; + return true; + } + } + return false; +} + +static void safe_write(const char *str) +{ + size_t done, todo; + ssize_t rc; + + if (f.fileno < 0) + return; + for (done = 0; (todo = strlen(&str[done])) > 0; done += (size_t)rc) { + rc = write(f.fileno, &str[done], todo); + if (rc <= 0) + return; + } +} + +static void _indent(unsigned int off, bool safe) +{ + unsigned int num, i; + + if (f.type == FMT_JSONSEQ) + return; + num = f.ind_base + off; + if (f.type == FMT_JSON && f.lvl > 0) + num += f.lvl - 1; + for (i = 0; i < num * f.ind_width; i++) { + if (!safe) { + fputc(f.ind_char, f.fd); + } else if (f.fileno >= 0) { + if (write(f.fileno, &f.ind_char, 1) <= 0) + return; + } + } +} + +#define indent(x) _indent(x, false) + +static void obj_free(struct obj_t *obj) +{ + free(obj->name); + memset(obj, 0, sizeof(*obj)); +} + +static void disable_int(sigset_t *saved) +{ + if (f.handle_int) + sigprocmask(SIG_BLOCK, &no_int_mask, saved); +} + +static void enable_int(sigset_t *saved) +{ + if (f.handle_int) { + /* Ensure latest updates are flushed to file descriptor. */ + fflush(f.fd); + sigprocmask(SIG_SETMASK, saved, NULL); + } +} + +static void int_handler(int signum) +{ + struct sigaction *old; + + if (f.term) + f.term(); + /* Re-install and call original handler. */ + old = (signum == SIGINT) ? &f.old_int : &f.old_term; + sigaction(signum, old, NULL); + raise(signum); +} + +static void setup_int_handler(void) +{ + struct sigaction act; + + memset(&act, 0, sizeof(act)); + act.sa_handler = &int_handler; + sigaction(SIGINT, &act, &f.old_int); + sigaction(SIGTERM, &act, &f.old_term); +} + +static void remove_int_handler(void) +{ + sigaction(SIGINT, &f.old_int, NULL); + sigaction(SIGTERM, &f.old_term, NULL); +} + +void util_fmt_exit(void) +{ + unsigned int i; + + if (f.handle_int) + remove_int_handler(); + if (f.lvl > 0) + fwarn("%s before remaining %d util_obj_end()", __func__, f.lvl); + for (i = 0; i < f.num_keys; i++) + free(f.keys[i].name); + free(f.keys); + for (i = 0; i < f.num_objs; i++) + obj_free(&f.objs[i]); + free(f.objs); + if (f.type == FMT_CSV) + util_rec_free(f.csv_rec); +} + +void util_fmt_set_indent(unsigned int base, unsigned int width, char ind_char) +{ + f.ind_base = base; + f.ind_width = width; + f.ind_char = ind_char; +} + +static unsigned int to_hex(char *str, int val, unsigned int num_digits) +{ + int digit; + char *c; + + for (c = str + num_digits - 1; c >= str; c--) { + digit = (val & 0xf); + val >>= 4; + *c = (char)((digit >= 10) ? digit - 10 + 'a' : digit + '0'); + } + + return num_digits; +} + +static char get_escape(const char *map, char c) +{ + int i; + + for (i = 0; map[i] && map[i + 1]; i += 2) { + if (map[i] == c) + return map[i + 1]; + } + return 0; +} + +struct quote_params { + const char *double_chars; + const char *esc_map; + char hex_char; + unsigned int hex_digits; + unsigned int max_width_per_char; +}; + +static char *do_quote(const char *str, const struct quote_params *p) +{ + unsigned int from, to; + char *q, esc, c; + + /* Start with worst-case length assuming every char is replaced. */ + q = util_zalloc(strlen(str) * p->max_width_per_char + /* "" nul */ 3); + to = 0; + q[to++] = '"'; + for (from = 0; (c = str[from]); from++) { + if (p->double_chars && strchr(p->double_chars, c)) { + /* Escape characters by doubling them ("" in CSV). */ + q[to++] = c; + q[to++] = c; + } else if (p->esc_map && (esc = get_escape(p->esc_map, c))) { + /* Escape characters with backslash + letter. */ + q[to++] = '\\'; + q[to++] = esc; + } else if (p->hex_char && !isprint(c)) { + /* Escape characters with backslash + hex code. */ + q[to++] = '\\'; + q[to++] = p->hex_char; + to += to_hex(&q[to], c, p->hex_digits); + } else { + q[to++] = c; + } + } + q[to++] = '"'; + + return util_realloc(q, (size_t)to + 1); +} + +static char *csv_quote(const char *str) +{ + static const struct quote_params csv_quote_params = { + .double_chars = "\"", + .esc_map = NULL, + .hex_char = 0, + .hex_digits = 0, + .max_width_per_char = 2 /* " => "" */, + }; + + return do_quote(str, &csv_quote_params); +} + +static void add_key(const char *name, bool persist) +{ + struct key_t key; + char *hdr; + + key.name = util_strdup(name); + key.persist = persist; + util_add_array(&f.keys, &f.num_keys, key); + if (f.type == FMT_CSV) { + hdr = csv_quote(name); + util_rec_def(f.csv_rec, name, UTIL_REC_ALIGN_LEFT, 0, hdr); + free(hdr); + util_rec_set(f.csv_rec, name, "\"\""); + f.csv_hdr = true; + } +} + +static struct key_t *get_key(const char *name) +{ + unsigned int i; + + for (i = 0; i < f.num_keys; i++) { + if (strcmp(name, f.keys[i].name) == 0) + return &f.keys[i]; + } + return NULL; +} + +void util_fmt_add_key(const char *fmt, ...) +{ + va_list args; + char *key; + + va_start(args, fmt); + util_vasprintf(&key, fmt, args); + va_end(args); + + /* Only add unique keys. */ + if (!get_key(key)) + add_key(key, true); + free(key); +} + +static bool update_key(const char *name, bool persist) +{ + struct key_t *key; + bool rc = true; + + key = get_key(name); + if (key) { + key->persist = persist; + } else if (!f.do_filter) { + add_key(name, persist); + } else { + fwarn("util_fmt_pair for key '%s' without util_fmt_add_key()", + name); + rc = false; + } + return rc; +} + +static struct obj_t *curr_obj(int off) +{ + int lvl = (int)f.lvl - 1 + off; + + return lvl < 0 ? NULL : &f.objs[lvl]; +} + +static void _util_fmt_obj_end(void); + +/* + * By s390-tools convention, all tool output must be contained in an extra + * top-level object that includes tool-invocation meta-data. + */ +static void emit_meta_object(void) +{ + unsigned int quoted = FMT_PERSIST | FMT_QUOTE, unquoted = FMT_PERSIST; + char hostname[HOST_NAME_MAX + 1] = { 0 }, date[30]; + struct timeval tv; + struct tm *tm; + + f.meta_done = true; + util_fmt_obj_start(FMT_DEFAULT, NULL); + util_fmt_obj_start(FMT_PREFIX, "meta"); + + /* + * "meta": { + * "api_level": 1, + * "version": "2.32.0", + * "host": "localhost", + * "time_epoch": 1714392976, + * "time": "2024-04-29 14:16:16+0200", + * } + */ + util_fmt_pair(unquoted, "api_level", "%d", f.api_level); + util_fmt_pair(quoted, "version", "%s", RELEASE_STRING); + gethostname(hostname, sizeof(hostname) - 1); + util_fmt_pair(quoted, "host", "%s", hostname); + gettimeofday(&tv, NULL); + util_fmt_pair(unquoted, "time_epoch", "%llu", tv.tv_sec); + tm = localtime(&tv.tv_sec); + if (!strftime(date, sizeof(date), "%F %T%z", tm)) + date[0] = 0; + util_fmt_pair(quoted, "time", "%s", date); + _util_fmt_obj_end(); + + if (f.type == FMT_JSONSEQ) { + /* Tool meta-data is a separate object for JSONSEQ. */ + util_fmt_obj_end(); + } +} + +void util_fmt_obj_start(unsigned int oflags, const char *fmt, ...) +{ + struct obj_t *parent, *obj; + char *name = NULL; + sigset_t set; + va_list args; + + if (!f.hide_meta && !f.meta_done && f.lvl == 0) { + emit_meta_object(); + /* + * Allow override of top-level key name for supplementary + * output formats. + */ + if (!fmt) + name = util_strdup(program_invocation_short_name); + } + if (fmt) { + va_start(args, fmt); + util_vasprintf(&name, fmt, args); + va_end(args); + } + f.lvl++; + if (f.lvl > f.num_objs) + util_expand_array(&f.objs, &f.num_objs); + parent = curr_obj(-1); + obj = curr_obj(0); + obj->name = name; + obj->is_list = (oflags & FMT_LIST); + obj->is_row = (oflags & FMT_ROW); + obj->is_prefix = (oflags & FMT_PREFIX); + obj->index = 0; + if (f.obj_start) { + disable_int(&set); + f.obj_start(parent, obj); + enable_int(&set); + } + if (parent) + parent->index++; +} + +static void _util_fmt_obj_end(void) +{ + struct obj_t *obj, *parent; + sigset_t set; + + if (f.lvl == 0) { + fwarn("%s without util_fmt_obj_start", __func__); + return; + } + parent = curr_obj(-1); + obj = curr_obj(0); + if (f.obj_end) { + disable_int(&set); + f.obj_end(parent, obj); + enable_int(&set); + } + f.lvl--; + obj_free(obj); +} + +void util_fmt_obj_end(void) +{ + _util_fmt_obj_end(); + + if (f.lvl == 1 && f.meta_done && f.type != FMT_JSONSEQ) { + /* Emit closure for top-level meta-container object. */ + util_fmt_obj_end(); + } +} + +static char *add_prefix(const char *str, bool full) +{ + struct obj_t *obj; + unsigned int i; + char *prefix; + + prefix = util_strdup(""); + for (i = 0; i < f.lvl; i++) { + obj = &f.objs[i]; + if (!full && !obj->is_prefix) + continue; + if (obj->name) { + if (*prefix) + util_concatf(&prefix, "."); + util_concatf(&prefix, "%s", obj->name); + } + if (obj->is_list && full) + util_concatf(&prefix, "[%d]", obj->index - 1); + } + if (*prefix) + util_concatf(&prefix, "."); + util_concatf(&prefix, "%s", str); + + return prefix; +} + +void util_fmt_pair(unsigned int mflags, const char *key, const char *fmt, ...) +{ + char *val, *prefixed_key; + struct obj_t *obj; + bool is_filtered; + sigset_t set; + va_list args; + + obj = curr_obj(0); + if (!obj) { + fwarn("%s before util_fmt_obj_start", __func__); + return; + } + + /* Filter by key. */ + if (f.do_filter) { + prefixed_key = add_prefix(key, false); + is_filtered = !get_key(prefixed_key); + free(prefixed_key); + if (is_filtered) + return; + } + + /* Filter by validity. */ + if (f.hide_inval && (mflags & FMT_INVAL)) + return; + + va_start(args, fmt); + util_vasprintf(&val, fmt, args); + va_end(args); + + if (f.map) { + disable_int(&set); + f.map(obj, mflags, key, val); + enable_int(&set); + } + obj->index++; + + free(val); +} + +static char *pairs_quote(const char *str) +{ + static const struct quote_params pairs_quote_params = { + .double_chars = NULL, + .esc_map = "\"\"$$``\\\\\aa\bb\ee\ff\nn\rr\tt\vv", + .hex_char = 'x', + .hex_digits = 2, + .max_width_per_char = 4 /* '\x' + 2 hex_digits */, + }; + + return do_quote(str, &pairs_quote_params); +} + +static void pairs_map(struct obj_t *UNUSED(obj), unsigned int mflags, + const char *key, const char *val) +{ + char *full_key, *qval = NULL; + + if (mflags & FMT_INVAL) + val = ""; + indent(0); + if (f.quote_all || (mflags & FMT_QUOTE)) + qval = pairs_quote(val); + if (f.hide_prefix) { + fprintf(f.fd, "%s=%s\n", key, qval ?: val); + } else { + full_key = add_prefix(key, true); + fprintf(f.fd, "%s=%s\n", full_key, qval ?: val); + free(full_key); + } + free(qval); +} + +static char *json_quote(const char *str) +{ + static const struct quote_params json_quote_params = { + .double_chars = NULL, + .esc_map = "\"\"\\\\\bb\ff\nn\rr\tt", + .hex_char = 'u', + .hex_digits = 4, + .max_width_per_char = 6 /* '\u' + 4 hex_digits */, + }; + + return do_quote(str, &json_quote_params); +} + +static void json_obj_start(struct obj_t *parent, struct obj_t *obj) +{ + char *key; + + if (!parent && f.type == FMT_JSONSEQ) { + /* Emit leading record separator according to RFC 7464. */ + fprintf(f.fd, "\x1e"); + } + if (parent && parent->index > 0) + fprintf(f.fd, ",%s", f.nl); + indent(0); + if (parent && !parent->is_list && obj->name) { + key = json_quote(obj->name); + fprintf(f.fd, "%s: ", key); + free(key); + } + fprintf(f.fd, obj->is_list ? "[%s" : "{%s", f.nl); +} + +static void json_obj_end(struct obj_t *parent, struct obj_t *obj) +{ + if (obj->index > 0) + fprintf(f.fd, "%s", f.nl); + indent(0); + fprintf(f.fd, obj->is_list ? "]" : "}"); + if (!parent) + fprintf(f.fd, "\n"); +} + +/* + * Ensure syntactically correct JSON by emitting all pending closure elements. + * Called in signal context - only use signal-safe functions. + */ +static void json_term(void) +{ + struct obj_t *obj, *parent; + + for (; f.lvl > 0; f.lvl--) { + obj = curr_obj(0); + parent = curr_obj(-1); + if (obj->index > 0) + safe_write(f.nl); + _indent(0, true); + safe_write(obj->is_list ? "]" : "}"); + if (!parent) + safe_write(f.nl); + } +} + +static void json_map(struct obj_t *parent, unsigned int mflags, + const char *key, const char *val) +{ + char *qkey, *qval = NULL; + + qkey = json_quote(key); + if (mflags & FMT_INVAL) + qval = util_strdup("null"); + else if (f.quote_all || (mflags & FMT_QUOTE)) + qval = json_quote(val); + if (parent->index > 0) + fprintf(f.fd, ",%s", f.nl); + indent(1); + fprintf(f.fd, "%s: %s", qkey, qval ?: val); + free(qval); + free(qkey); +} + +static void csv_obj_start(struct obj_t *UNUSED(parent), struct obj_t *obj) +{ + if (!obj->is_row) + return; +} + +static void csv_obj_end(struct obj_t *UNUSED(parent), struct obj_t *obj) +{ + unsigned int i; + + if (!(obj->is_row || (f.lvl == 1 && f.csv_data))) + return; + if (f.csv_hdr) { + /* Print row with CSV header. */ + indent(0); + util_rec_print_hdr(f.csv_rec); + f.csv_hdr = false; + } + /* Print row with CSV data. */ + indent(0); + util_rec_print(f.csv_rec); + f.csv_data = false; + /* Reset non-persistent fields. */ + for (i = 0; i < f.num_keys; i++) { + if (!f.keys[i].persist) + util_rec_set(f.csv_rec, f.keys[i].name, "\"\""); + } +} + +static void csv_map(struct obj_t *UNUSED(obj), unsigned int mflags, + const char *key, const char *val) +{ + char *qval = NULL, *prefixed_key; + + /* Use empty string for invalid values. */ + if (mflags & FMT_INVAL) + val = ""; + /* Quote value if requested. */ + if (f.quote_all || (mflags & FMT_QUOTE)) + qval = csv_quote(val); + /* Process key and value. */ + prefixed_key = add_prefix(key, false); + if (update_key(prefixed_key, mflags & FMT_PERSIST)) { + util_rec_set(f.csv_rec, prefixed_key, "%s", qval ?: val); + f.csv_data = true; + } + free(prefixed_key); + free(qval); +} + +void util_fmt_init(FILE *fd, enum util_fmt_t type, unsigned int flags, + int api_level) +{ + memset(&f, 0, sizeof(f)); + f.type = type; + f.fd = fd; + f.fileno = fileno(fd); + f.hide_prefix = (flags & FMT_NOPREFIX); + f.hide_inval = !(flags & FMT_KEEPINVAL); + f.hide_meta = (flags & FMT_NOMETA); + f.quote_all = (flags & FMT_QUOTEALL); + f.do_filter = (flags & FMT_FILTER); + f.do_warn = (flags & FMT_WARN); + f.handle_int = (flags & FMT_HANDLEINT); + f.api_level = api_level; + if (type == FMT_JSONSEQ) + f.nl = ""; + else + f.nl = "\n"; + f.ind_width = 2; + f.ind_char = ' '; + f.meta_done = false; + switch (type) { + case FMT_PAIRS: + f.map = &pairs_map; + break; + case FMT_JSON: + case FMT_JSONSEQ: + f.obj_start = &json_obj_start; + f.obj_end = &json_obj_end; + f.map = &json_map; + f.term = &json_term; + break; + case FMT_CSV: + f.obj_start = &csv_obj_start; + f.obj_end = &csv_obj_end; + f.map = &csv_map; + f.csv_rec = util_rec_new_csv(","); + f.csv_hdr = true; + f.csv_data = false; + break; + } + /* Ensure consistent number format for callers that use setlocale(). */ + setlocale(LC_NUMERIC, "C"); + if (f.handle_int) { + setup_int_handler(); + sigemptyset(&no_int_mask); + sigaddset(&no_int_mask, SIGINT); + sigaddset(&no_int_mask, SIGTERM); + } +} diff -Nru s390-tools-2.31.0/libutil/util_fmt_example.c s390-tools-2.33.1/libutil/util_fmt_example.c --- s390-tools-2.31.0/libutil/util_fmt_example.c 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/libutil/util_fmt_example.c 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,251 @@ +/* + * util_fmt_example - Example program for util_fmt + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include + +#include "lib/util_base.h" +#include "lib/util_fmt.h" + +#define API_LEVEL 1 + +static void meta_example(enum util_fmt_t format) +{ + util_fmt_init(stdout, format, FMT_DEFAULT, API_LEVEL); + + /* + * First call to util_fmt_obj_start() automatically adds meta-data + * object as required by s390-tools convention. + */ + util_fmt_obj_start(FMT_DEFAULT, NULL); + util_fmt_pair(FMT_QUOTE, "key", "value"); + util_fmt_obj_end(); + + util_fmt_exit(); +} + +static void simple_example(enum util_fmt_t format, int fmt_flags) +{ + /* + * Note: Meta-data is excluded in this example for readability but + * must be included in actual tool output. + */ + util_fmt_init(stdout, format, fmt_flags | FMT_NOMETA, API_LEVEL); + + /* + * { + * "child": { + * "key": "value", + * "invalid":"invalidvalue" <== Marked as invalid + * } + * } + */ + util_fmt_obj_start(FMT_DEFAULT, NULL); + util_fmt_obj_start(FMT_DEFAULT, "child"); + util_fmt_pair(FMT_QUOTE, "key", "value"); + util_fmt_pair(FMT_QUOTE | FMT_INVAL, "invalid", "invalidvalue"); + util_fmt_obj_end(); + util_fmt_obj_end(); + + util_fmt_exit(); +} + +static void list_example(enum util_fmt_t format, int flags) +{ + int i; + + /* + * Note: Meta-data is excluded in this example for readability but + * must be included in actual tool output. + */ + util_fmt_init(stdout, format, flags | FMT_NOMETA, API_LEVEL); + + /* + * "cond","key" + * condvalue0,value0 + * "",value1 + * "",value2 + * "",value3 + */ + util_fmt_obj_start(FMT_DEFAULT, NULL); + util_fmt_obj_start(FMT_LIST, "list"); + + for (i = 0; i < 4; i++) { + util_fmt_obj_start(FMT_ROW, NULL); + if (i == 0) + util_fmt_pair(flags, "cond", "condvalue%d", i); + util_fmt_pair(FMT_DEFAULT, "key", "value%d", i); + util_fmt_obj_end(); + } + + util_fmt_obj_end(); + util_fmt_obj_end(); + + util_fmt_exit(); +} + +#define NUM_KEYS 4 + +static void vary_example(enum util_fmt_t format, bool add) +{ + const char *keys[NUM_KEYS] = { "key_a", "key_b", "key_c", "key_d" }; + int i; + + /* + * Note: Meta-data is excluded in this example for readability but + * must be included in actual tool output. + */ + util_fmt_init(stdout, format, FMT_NOMETA, API_LEVEL); + + if (add) { + /* Make keys known before starting output. */ + for (i = 0; i < NUM_KEYS; i++) + util_fmt_add_key(keys[i]); + } + + util_fmt_obj_start(FMT_LIST, "list"); + for (i = 0; i < 4; i++) { + util_fmt_obj_start(FMT_ROW, NULL); + util_fmt_pair(FMT_DEFAULT, keys[i], "value%d", i); + util_fmt_obj_end(); + } + util_fmt_obj_end(); + + util_fmt_exit(); +} + +static void filter_example(enum util_fmt_t format) +{ + /* + * Note: Meta-data is excluded in this example for readability but + * must be included in actual tool output. + */ + util_fmt_init(stdout, format, FMT_FILTER | FMT_NOMETA, API_LEVEL); + util_fmt_add_key("key_a"); + /* + * { + * "key_a": "value_a", + * "key_b": "value_b" <== Not announced via util_fmt_add_key() + * } + */ + util_fmt_obj_start(FMT_DEFAULT, NULL); + util_fmt_pair(FMT_QUOTE, "key_a", "value_a"); + util_fmt_pair(FMT_QUOTE, "key_b", "value_b"); + util_fmt_obj_end(); + + util_fmt_exit(); +} + +static void prefix_example(enum util_fmt_t format, bool do_prefix) +{ + /* + * Note: Meta-data is excluded in this example for readability but + * must be included in actual tool output. + */ + util_fmt_init(stdout, format, FMT_NOMETA, API_LEVEL); + + /* + * { + * "key": "value0", + * "obj1": { // Marked as prefix object + * "key": "value1" + * } + * } + */ + util_fmt_obj_start(FMT_DEFAULT, "obj0"); + util_fmt_pair(FMT_QUOTE, "key", "value0"); + util_fmt_obj_start(do_prefix ? FMT_PREFIX : FMT_DEFAULT, "obj1"); + util_fmt_pair(FMT_QUOTE, "key", "value1"); + util_fmt_obj_end(); + util_fmt_obj_end(); + + util_fmt_exit(); +} + +static void announce(const char *example_name) +{ + static int example_number; + int i; + + if (example_number++ > 0) + printf("\n"); + + printf("%d. %s\n====", example_number, example_name); + for (i = strlen(example_name); i > 0; i--) + printf("="); + printf("\n"); +} + +int main(int UNUSED(argc), char *UNUSED(argv[])) +{ + announce("JSON output"); + simple_example(FMT_JSON, FMT_KEEPINVAL); + + announce("JSON without invalid pairs"); + simple_example(FMT_JSON, FMT_DEFAULT); + + announce("JSON formatted as sequence"); + simple_example(FMT_JSONSEQ, FMT_DEFAULT); + + announce("Pairs output"); + simple_example(FMT_PAIRS, FMT_KEEPINVAL); + + announce("Pairs output without invalid pairs"); + simple_example(FMT_PAIRS, FMT_DEFAULT); + + announce("Pairs without prefix"); + simple_example(FMT_PAIRS, FMT_NOPREFIX); + + announce("CSV output"); + simple_example(FMT_CSV, FMT_KEEPINVAL); + + announce("CSV list output"); + list_example(FMT_CSV, FMT_DEFAULT); + + announce("CSV list with persistent cond value"); + list_example(FMT_CSV, FMT_PERSIST); + + announce("JSON with filtered key"); + filter_example(FMT_JSON); + + announce("Pairs with filtered key"); + filter_example(FMT_PAIRS); + + announce("CSV with filtered key"); + filter_example(FMT_CSV); + + announce("CSV list with varying keys"); + vary_example(FMT_CSV, false); + + announce("CSV list with pre-announced varying keys"); + vary_example(FMT_CSV, true); + + announce("JSON output with meta-data"); + meta_example(FMT_JSON); + + announce("JSON sequence output with meta-data"); + meta_example(FMT_JSONSEQ); + + announce("Pairs output with meta-data"); + meta_example(FMT_PAIRS); + + announce("CSV output with meta-data"); + meta_example(FMT_CSV); + + announce("JSON output with duplicate keys"); + prefix_example(FMT_JSON, false); + + announce("CSV output with duplicate keys"); + prefix_example(FMT_CSV, false); + + announce("CSV output with duplicate keys distinguished by prefix"); + prefix_example(FMT_CSV, true); + + return 0; +} diff -Nru s390-tools-2.31.0/libutil/util_libc.c s390-tools-2.33.1/libutil/util_libc.c --- s390-tools-2.31.0/libutil/util_libc.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libutil/util_libc.c 2024-05-28 08:26:36.000000000 +0200 @@ -140,6 +140,26 @@ } /** + * Concatenate a string with the result of a format string expansion + * + * @param[in, out] str1 Pointer to pointer to first string + * @param[in] fmt Format string for generation of the second string + * @param[in] ... Parameters for format string + */ +void util_concatf(char **str1, const char *fmt, ...) +{ + va_list args; + char *str2; + + va_start(args, fmt); + util_vasprintf(&str2, fmt, args); + va_end(args); + + *str1 = util_strcat_realloc(*str1, str2); + free(str2); +} + +/** * Convert string to uppercase * * String \a str is converted to uppercase diff -Nru s390-tools-2.31.0/libutil/util_libc_example.c s390-tools-2.33.1/libutil/util_libc_example.c --- s390-tools-2.31.0/libutil/util_libc_example.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libutil/util_libc_example.c 2024-05-28 08:26:36.000000000 +0200 @@ -40,6 +40,14 @@ fprintf(stderr, "result = \"%s\"\n", str); free(str); + /* Use util_concatf() for string concatenation */ + fprintf(stderr, "Try to concatenate \"list\" plus comma-separated list of numbers 1 to 3: "); + str = NULL; + util_concatf(&str, "list:"); + for (int i = 1; i <= 3; i++) + util_concatf(&str, "%s%d", (i > 1 ? "," : ""), i); + fprintf(stderr, "result = %s\n", str); /* list:part1,part2,part3 */ + /* One byte allocation should work */ fprintf(stderr, "Try to allocate 1 byte: "); ptr = util_malloc(1); diff -Nru s390-tools-2.31.0/libutil/util_lockfile.c s390-tools-2.33.1/libutil/util_lockfile.c --- s390-tools-2.31.0/libutil/util_lockfile.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libutil/util_lockfile.c 2024-05-28 08:26:36.000000000 +0200 @@ -25,8 +25,8 @@ #include "lib/util_panic.h" #define WAITPID 120 /* Time to wait for pid to be written */ -#define WAITINC 5 /* Additional time to wait each retry */ -#define MAXWAIT 60 /* Maximum wait between retries */ +#define DEF_WAITINC_US 5000000 /* Additional time to wait each retry */ +#define DEF_MAXWAIT_US 60000000 /* Maximum wait between retries */ #define BUFSIZE 40 /* Buffer must be large enough to fit pid string */ /** @@ -133,15 +133,20 @@ * * @param[in] lockfile Path to the lock file * @param[in] retries Number of times to retry if lock fails initially + * @param[in] waitinc How many micro-seconds to extend wait time before + * additional retry + * @param[in] maxwait Maximum wait time before retry * @param[in] pid PID to use for lock ownership * * @retval 0 Lock created with PID as owner * @retval !=0 Lock was not created */ -static int do_lockfile_lock(char *lockfile, unsigned int retries, int pid) +static int do_lockfile_lock(char *lockfile, unsigned int retries, int pid, + unsigned int waitinc, unsigned int maxwait) { - int fd, plen, len, rc = 0, snooze = 0; unsigned int tries = retries + 1; + int fd, plen, len, rc = 0; + unsigned int snooze = 0; char buf[BUFSIZE]; char *tpath; @@ -190,9 +195,9 @@ if (rc != 0) { tries--; if (tries > 0) { - snooze += WAITINC; - snooze = (snooze > MAXWAIT) ? MAXWAIT : snooze; - sleep(snooze); + snooze += waitinc; + snooze = (snooze > maxwait) ? maxwait : snooze; + usleep(snooze); } } } while (tries > 0); @@ -255,7 +260,27 @@ */ int util_lockfile_lock(char *lockfile, int retries) { - return do_lockfile_lock(lockfile, retries, getpid()); + return do_lockfile_lock(lockfile, retries, getpid(), DEF_WAITINC_US, + DEF_MAXWAIT_US); +} + +/** + * Attempt to create a lockfile owned by this process at the specified path + * using a custom wait/retry time. + * + * @param[in] lockfile Path to the lock file + * @param[in] retries Number of times to retry if lock fails initially + * @param[in] waitinc How many micro-seconds to extend wait time before + * additional retry + * @param[in] maxwait Maximum wait time before retry + * + * @retval 0 Lock created + * @retval !=0 Lock was not created + */ +int util_lockfile_lock_cw(char *lockfile, int retries, unsigned int waitinc, + unsigned int maxwait) +{ + return do_lockfile_lock(lockfile, retries, getpid(), waitinc, maxwait); } /** @@ -270,7 +295,27 @@ */ int util_lockfile_parent_lock(char *lockfile, int retries) { - return do_lockfile_lock(lockfile, retries, getppid()); + return do_lockfile_lock(lockfile, retries, getppid(), DEF_WAITINC_US, + DEF_MAXWAIT_US); +} + +/** + * Attempt to create a lockfile owned by the parent of this process at the + * specified path using a custom wait/retry time. + * + * @param[in] lockfile Path to the lock file + * @param[in] retries Number of times to retry if lock fails initially + * @param[in] waitinc How many micro-seconds to extend wait time before + * additional retry + * @param[in] maxwait Maximum wait time before retry + * + * @retval 0 Lock created + * @retval !=0 Lock was not created + */ +int util_lockfile_parent_lock_cw(char *lockfile, int retries, + unsigned int waitinc, unsigned int maxwait) +{ + return do_lockfile_lock(lockfile, retries, getppid(), waitinc, maxwait); } /** diff -Nru s390-tools-2.31.0/libutil/util_rec.c s390-tools-2.33.1/libutil/util_rec.c --- s390-tools-2.31.0/libutil/util_rec.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/libutil/util_rec.c 2024-05-28 08:26:36.000000000 +0200 @@ -111,7 +111,7 @@ /** * Create a new record with "wide" output format * - * @param[in] hdr_sep Header separator + * @param[in] hdr_sep Header separator or %NULL for no separator * * @returns Pointer to the created record */ @@ -121,7 +121,7 @@ rec->list = util_list_new(struct util_rec_fld, node); rec->fmt.type = REC_FMT_WIDE; - rec->fmt.d.wide_p.hdr_sep = util_strdup(hdr_sep); + rec->fmt.d.wide_p.hdr_sep = hdr_sep ? util_strdup(hdr_sep) : NULL; rec->fmt.d.wide_p.argz_sep = ','; rec->fmt.indent = 0; return rec; @@ -249,7 +249,7 @@ /** * Create a new record with "long" output format * - * @param[in] hdr_sep Header separator + * @param[in] hdr_sep Header separator or %NULL for no separator * @param[in] col_sep Column separator * @param[in] key Primary key of record * @param[in] key_size Width of left column i.e. keys @@ -264,7 +264,7 @@ rec->list = util_list_new(struct util_rec_fld, node); rec->fmt.type = REC_FMT_LONG; - rec->fmt.d.long_p.hdr_sep = util_strdup(hdr_sep); + rec->fmt.d.long_p.hdr_sep = hdr_sep ? util_strdup(hdr_sep) : NULL; rec->fmt.d.long_p.col_sep = util_strdup(col_sep); rec->fmt.d.long_p.key = util_strdup(key); rec->fmt.d.long_p.key_size = key_size; diff -Nru s390-tools-2.31.0/Makefile s390-tools-2.33.1/Makefile --- s390-tools-2.31.0/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -15,12 +15,12 @@ vmcp man mon_tools dasdinfo vmur cpuplugd ipl_tools \ ziomon iucvterm hyptop cmsfs-fuse qethqoat zfcpdump zdsfs cpumf \ systemd hmcdrvfs cpacfstats zdev dump2tar zkey netboot etc zpcictl \ - genprotimg lsstp hsci hsavmcore chreipl-fcp-mpath ap_tools pvattest \ - rust + genprotimg lsstp hsci hsavmcore chreipl-fcp-mpath ap_tools rust + else BASELIB_DIRS = LIB_DIRS = libpv -TOOL_DIRS = genprotimg pvattest rust +TOOL_DIRS = genprotimg rust endif SUB_DIRS = $(BASELIB_DIRS) $(LIB_DIRS) $(TOOL_DIRS) diff -Nru s390-tools-2.31.0/man/dumpconf.8 s390-tools-2.33.1/man/dumpconf.8 --- s390-tools-2.31.0/man/dumpconf.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/man/dumpconf.8 2024-05-28 08:26:36.000000000 +0200 @@ -45,7 +45,7 @@ .TP \fB - DUMP_TYPE:\fR -Type of dump device. Possible values are 'ccw', 'fcp' and 'nvme'. +Type of dump device. Possible values are 'ccw', 'eckd', 'fcp' and 'nvme'. .TP \fB - DEVICE:\fR @@ -72,6 +72,11 @@ Boot program selector. .TP +\fB - BR_CHR:\fR +Boot record location in "C,H,R" format (comma separated values for +Cylinder, Head and Record) or "auto". + +.TP \fB - BR_LBA:\fR Boot record logical block address. @@ -147,6 +152,23 @@ .br # +.br +# Example configuration for an ECKD dump device (DASD) +.br +# +.br +ON_PANIC=dump +.br +DUMP_TYPE=eckd +.br +DEVICE=0.0.1004 +.br +BOOTPROG=0 +.br +BR_CHR=auto +.br + +# .br # Example configuration for an FCP dump device (SCSI Disk) .br diff -Nru s390-tools-2.31.0/.pre-commit-config.yaml s390-tools-2.33.1/.pre-commit-config.yaml --- s390-tools-2.31.0/.pre-commit-config.yaml 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/.pre-commit-config.yaml 2024-05-28 08:26:36.000000000 +0200 @@ -1,5 +1,5 @@ --- -exclude: \.(crt|crl)$ +exclude: \.(bin|crl|crt|key)$ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.1.0 diff -Nru s390-tools-2.31.0/pvattest/Makefile s390-tools-2.33.1/pvattest/Makefile --- s390-tools-2.31.0/pvattest/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ -# Common definitions -include ../common.mak - -.DEFAULT_GOAL := all - -PKGDATADIR := "$(DESTDIR)$(TOOLS_DATADIR)/pvattest" -SUBDIRS := src man tools -RECURSIVE_TARGETS := all-recursive clean-recursive install-recursive - -all: all-recursive - -install: install-recursive - -clean: clean-recursive - -$(RECURSIVE_TARGETS): - @target=`echo $@ |sed s/-recursive//`; \ - for d in $(SUBDIRS); do \ - $(MAKE) -C $$d $$target || exit 1; \ - done - -.PHONY: all install clean $(RECURSIVE_TARGETS) diff -Nru s390-tools-2.31.0/pvattest/man/Makefile s390-tools-2.33.1/pvattest/man/Makefile --- s390-tools-2.31.0/pvattest/man/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/man/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,9 +0,0 @@ -include ../../common.mak - -all: - -install: - $(INSTALL) -d -m 755 $(DESTDIR)$(MANDIR)/man1 - $(INSTALL) -m 644 -c *.1 -t $(DESTDIR)$(MANDIR)/man1 - -.PHONY: all install clean diff -Nru s390-tools-2.31.0/pvattest/man/pvattest.1 s390-tools-2.33.1/pvattest/man/pvattest.1 --- s390-tools-2.31.0/pvattest/man/pvattest.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/man/pvattest.1 1970-01-01 01:00:00.000000000 +0100 @@ -1,104 +0,0 @@ -.\" Copyright 2022 IBM Corp. -.\" s390-tools is free software; you can redistribute it and/or modify -.\" it under the terms of the MIT license. See LICENSE for details. -.\" -.TH pvattest 1 "07 June 2022" "s390-tools" "Attestation Manual" -.nh -.ad l -.SH NAME -\fBpvattest [OPTION?] COMMAND [OPTIONS] \fP- create, perform, and verify attestation measurements -\fB -.RE -\fB -.SH SYNOPSIS -.nf -.fam C -\fBpvattest\fP \fIcreate\fP [\fIOPTIONS\fP] -\fBpvattest\fP \fIperform\fP [\fIOPTIONS\fP] -\fBpvattest\fP \fIverify\fP [\fIOPTIONS\fP] - -.fam T -.fi -.fam T -.fi -.SH DESCRIPTION -Use \fBpvattest\fP to attest that an IBM Secure Execution guest is the correct guest, and that it was started in a secure manner. -Run '\fBpvattest\fP \fIcreate\fP' and '\fBpvattest\fP \fIverify\fP' in a trusted environment only. -.PP -.nf -.fam C - create On a trusted system, creates an attestation request. - - perform On the SE-guest to be attested, sends the attestation request to the Ultravisor and receives the answer. - - verify On a trusted system, compares the answer from the Ultravisor to the one from your trusted environment. If they differ, the Secure Execution guest might be compromised. - -.fam T -.fi -For meaningful results, run '\fIcreate\fP' and '\fIverify\fP' in a trusted environment, like your workstation or a previously attested IBM Secure Execution guest. Otherwise, the attestation might be tampered with. For all certificates, revocation lists, and host-key documents, both the PEM and DER input formats are supported. If you run \fBpvattest\fP on a machine architecture other than z/Architecture, 'measure' is not available. -.PP -Use '\fBpvattest\fP [COMMAND] \fB-h\fP' to get detailed help -.RE -.PP - -.SH OPTIONS -.TP -.B -\fB-h\fP, \fB--help\fP -Show help options -.TP -.B -\fB-v\fP, \fB--version\fP -Print the version and exit. -.TP -.B -\fB-V\fP, \fB--verbose\fP -Provide more detailed output (optional) -.RE -.PP - -.SH EXAMPLE -For details refer to the man page of the command. -.PP -Create the request on a trusted system. -.PP -.nf -.fam C - trusted:~$ pvattest create -k hkd.crt --cert CA.crt --cert ibmsk.crt --arpk arp.key -o attreq.bin - -.fam T -.fi -On the SE-guest, \fIperform\fP the attestation. -.PP -.nf -.fam C - seguest:~$ pvattest perform -i attreq.bin -o attresp.bin - -.fam T -.fi -On a trusted system, \fIverify\fP that the response is correct. Here, the protection key from the creation and the SE-guest’s header is used to \fIverify\fP the measurement. -.PP -.nf -.fam C - trusted:~$ pvattest verify -i attresp.bin --arpk arp.key --hdr se_guest.hdr - trusted:~$ echo $? - 0 - -.fam T -.fi - -If the measurements do not match \fBpvattest\fP exits with code 2 and emits an error message. The SE-guest attestation failed. -.PP -.nf -.fam C - trusted:~$ pvattest verify -i wrongresp.bin --arpk arp.key --hdr se_guest.hdr - ERROR: Attestation measurement verification failed: - Calculated and received attestation measurement are not the same. - trusted:~$ echo $? - 2 - -.fam T -.fi - -.SH SEE ALSO -\fBpvattest\fP-\fIcreate\fP(1), \fBpvattest-\fIverify\fP\fP(1), \fBpvattest\fP-\fIperform\fP(1) diff -Nru s390-tools-2.31.0/pvattest/man/pvattest-create.1 s390-tools-2.33.1/pvattest/man/pvattest-create.1 --- s390-tools-2.31.0/pvattest/man/pvattest-create.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/man/pvattest-create.1 1970-01-01 01:00:00.000000000 +0100 @@ -1,83 +0,0 @@ -.\" Copyright 2022 IBM Corp. -.\" s390-tools is free software; you can redistribute it and/or modify -.\" it under the terms of the MIT license. See LICENSE for details. -.\" -.TH pvattest-create 1 "07 June 2022" "s390-tools" "Attestation Manual" -.nh -.ad l -.SH NAME -\fBpvattest [OPTION?] create [OPTIONS] \fP- create an attestation measurement request -\fB -.SH DESCRIPTION -Prepare attestation measurement requests for an IBM Secure Execution guest. -Only prepare attestation requests in a trusted environment, such as your workstation. -The 'pvattest create' command creates a randomly generated key to protect the attestation request. -This key is only valid for this specific request. In order to avoid compromising the attestation, -do not publish the protection key and delete it after verification. -Every 'create' command generates a new, random protection key. -.SH OPTIONS -.TP -.B -\fB-h\fP, \fB--help\fP -Prints usage information, then exits. -.TP -.B -\fB-k\fP, \fB--host-key-document\fP=\fBFILE\fP -Specify one or more host key documents. At least one is required. -Specify this option multiple times to create an attestation request control block that is usable on multiple hosts. -.TP -.B -\fB-C\fP, \fB--cert\fP=\fBFILE\fP -Specifies the certificate that is used to establish a chain of trust for the verification of the host-key documents. Specify this option twice to specify the IBM Z signing key and the intermediate CA certificate (signed by the root CA). Required. Ignored when \fB--no-verify\fP is specified. -.TP -.B -\fB--crl\fP=\fBFILE\fP -Specifies the revocation list that is used to check whether a certificate of the chain of trust is -revoked. Specify this option multiple times to use multiple CRLs (optional). -.TP -.B -\fB--root-ca\fP=\fBFILE\fP -Specifies the root CA certificate for the verification. If omitted, -the system wide root CAs installed on the system are used. Use -this only if you trust the specified certificate. Optional. -.TP -.B -\fB-o\fP, \fB--output\fP=\fBFILE\fP -\fBFILE\fP specifies the output for the attestation request control block. -.TP -.B -\fB-a\fP, \fB--arpk\fP=\fBFILE\fP -Save the protection key as GCM-AES256 key in \fBFILE\fP Do not publish this key, otherwise your attestation is compromised. -.TP -.B -\fB--no-verify\fP -Disable the host-key document verification. Does not require the host-key documents to be valid. Do -not use for a production request unless you verified the host-key document before (optional). -.TP -.B -\fB--offline\fP -Specifies offline mode, in which no attempt is made to download CRLs. (optional). -.TP -.B -\fB-V\fP, \fB--verbose\fP -Provide more detailed output (optional). -.SH EXAMPLE -Create an attestation request with the protection key 'arp.key', write the request to 'arcb.bin', and verify the host-key document using the CA-signed key 'DigiCertCA.crt' and the intermediate key 'IbmSigningKey.crt'. -.PP -.nf -.fam C - pvattest create -k hkd.crt --arpk arp.key -o attreq.bin --cert DigiCertCA.crt --cert IbmSigningKey.crt - -.fam T -.fi -Create an attestation request with the protection key 'arp.key', write the request to 'arcb.bin', verify the host-key document using the CA-signed key 'DigiCertCA.crt' and the intermediate key 'IbmSigningKey.crt', and instead of downloading the certificate revocation list use certificate revocation lists 'DigiCertCA.crl', 'IbmSigningKey.crl', and 'rootCA.crl'. -.PP -.nf -.fam C - pvattest create -k hkd.crt --arpk arp.key -o attreq.bin --cert DigiCertCA.crt --cert IbmSigningKey.crt --offline --crl DigiCertCA.crl --crl IbmSigningKey.crl --crl rootCA.crl - - -.fam T -.fi -.SH SEE ALSO -\fBpvattest\fP(1), \fBpvattest-verify\fP(1), \fBpvattest-perform\fP(1) diff -Nru s390-tools-2.31.0/pvattest/man/pvattest-perform.1 s390-tools-2.33.1/pvattest/man/pvattest-perform.1 --- s390-tools-2.31.0/pvattest/man/pvattest-perform.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/man/pvattest-perform.1 1970-01-01 01:00:00.000000000 +0100 @@ -1,50 +0,0 @@ -.\" Copyright 2022 IBM Corp. -.\" s390-tools is free software; you can redistribute it and/or modify -.\" it under the terms of the MIT license. See LICENSE for details. -.\" -.TH pvattest-perform 1 "07 June 2022" "s390-tools" "Attestation Manual" -.nh -.ad l -.SH NAME -\fBpvattest [OPTION?] perform [OPTIONS] \fP- execute an attestation measurement request -\fB -.SH DESCRIPTION -Run a measurement of this system using '/dev/uv'. Works only if this device is -available and the attestation Ultravisor facility is present. -The input must be an attestation request created with 'pvattest create'. -Output will contain the original request and the response from the Ultravisor. -.RE -.PP - -.SH OPTIONS -.TP -.B -\fB-h\fP, \fB--help\fP -Show help options -.TP -.B -\fB-i\fP, \fB--input\fP=\fBFILE\fP -\fBFILE\fP specifies the attestation request as input. -.TP -.B -\fB-o\fP, \fB--output\fP=\fBFILE\fP -\fBFILE\fP specifies the output for the attestation result. -.TP -.B -\fB-V\fP, \fB--verbose\fP -Provide more detailed output (optional) -.RE -.PP - -.SH EXAMPLE -Perform an attestation measurement with the attestation request 'arcb.bin' and write the output to 'measurement.bin'. -.PP -.nf -.fam C - pvattest perform --input attreq.bin --output attresp.bin - - -.fam T -.fi -.SH SEE ALSO -\fBpvattest\fP(1), \fBpvattest-create\fP(1), \fBpvattest-verify\fP(1) diff -Nru s390-tools-2.31.0/pvattest/man/pvattest-verify.1 s390-tools-2.33.1/pvattest/man/pvattest-verify.1 --- s390-tools-2.31.0/pvattest/man/pvattest-verify.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/man/pvattest-verify.1 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -.\" Copyright 2022 IBM Corp. -.\" s390-tools is free software; you can redistribute it and/or modify -.\" it under the terms of the MIT license. See LICENSE for details. -.\" -.TH pvattest-verify 1 "07 June 2022" "s390-tools" "Attestation Manual" -.nh -.ad l -.SH NAME -\fBpvattest [OPTION?] verify [OPTIONS] \fP- verify an attestation measurement -\fB -.SH DESCRIPTION -Verify that a previously generated attestation measurement of an IBM Secure Execution guest is as expected. Only verify attestation requests in a trusted environment, such as your workstation. Input must contain the response as produced by 'pvattest perform'. The protection key must be the one that was used to create the request by 'pvattest create'. Please delete it after verification. The header must be the IBM Secure Execution header of the image that was attested during 'pvattest perform' -.RE -.PP - -.SH OPTIONS -.TP -.B -\fB-h\fP, \fB--help\fP -Show help options -.TP -.B -\fB-i\fP, \fB--input\fP=\fBFILE\fP -\fBFILE\fP specifies the attestation result as input. -.TP -.B -\fB-o\fP, \fB--ouput\fP=\fBFILE\fP -\fBFILE\fP specifies the output for the verification result. -.TP -.B -\fB--hdr\fP=\fBFILE\fP -Specify the header of the guest image. Exactly one is required. -.TP -.B -\fB-a\fP, \fB--arpk\fP=\fBFILE\fP -Use \fBFILE\fP to specify the GCM-AES256 key to decrypt the attestation request. Delete this key after verification. -.TP -.B -\fB--format\fP=\fByaml\fP -Define the output format. -Default value: 'yaml' - -Possible values: -.RS 4 -- \fByaml\fP: Use YAML format -.RE - -.TP -.B -\fB-V\fP, \fB--verbose\fP -Provide more detailed output (optional) -.RE -.PP - -.SH EXAMPLE -To verify a measurement in 'measurement.bin' with the protection key 'arp.kep' and SE-guest header 'se_guest.hdr'. -.PP -.nf -.fam C - pvattest verify --input attresp.bin --arpk arp.key --hdr se_guest.hdr - -.fam T -.fi -If the verification was successful the program exists with zero. -If the verification failed it exists with 2 and prints the following to stderr: -.PP -.nf -.fam C - ERROR: Attestation measurement verification failed: - Calculated and received attestation measurement are not the same. - -.fam T -.fi -.SH SEE ALSO -\fBpvattest\fP(1), \fBpvattest-create\fP(1), \fBpvattest-perform\fP(1) diff -Nru s390-tools-2.31.0/pvattest/README.md s390-tools-2.33.1/pvattest/README.md --- s390-tools-2.31.0/pvattest/README.md 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,100 +0,0 @@ -# pvattest - -Use `pvattest` to attest an IBM Secure Execution guest running on z16 and later. - -With `pvattest` you can create attestation requests in a trusted environment and attest -an IBM Secure Execution for Linux guest to verify that a provider is running the correct image. -To achieve this, use the following commands: - * `create` On a trusted system, creates an attestation request. - * `perform` Performs an attestation measurement on the SE-guest to be attested. For this a - attestation request is sent to the Ultravisor (UV) and the answer received. The `perform` - command requires IBM z16 or later z/Architecture hardware. - * `verify` On a trusted system, compares the answer from the Ultravisor to the - expected answer. If they differ, the Secure Execution guest might be a different guest - than expected, or not secure at all. - -For meaningful results, run `create` and `verify` only in a trusted environment, -like your workstation or a previously attested IBM Secure Execution guest. -Otherwise, the attestation can be compromised. -For all certificates, revocation lists, and host-key documents, both the PEM and DER input -formats are supported. If you run this program on a non S390 System, 'perform' is not be available. - -## Getting started - -If all dependencies are met (see the s390-tools README) issue `make` in the source tree to build `pvattest`. - -## Details -### create -`pvattest create` needs the host-key-document, a location to store the -attestation request protection key, and a location to store the request data. -Unless the `--no-verify` flag is set it additionally requires the IBM signing key -and the intermediate CA. The output contains the request in binary form which serves as input -to `pvattest perform`. Must be run in a trusted environment. Especially, do not create the request -on a system you want to attest. The attestation request protection key is valid for this request only, -must be kept until the verification is completed and must be destroyed afterwards -Keep the key secret. - -### perform -`pvattest perform` needs a request in binary form generated by `pvattest create`and -a location to store the output. It will send the request to the device at `/dev/uv` -which passes the request to the Ultravisor. -Kernel will then send the request to the Ultravisor which will calculate the answer. -The Answer is then passed back to userspace and handled by `pvattest` -The output includes the original request and the answer from the Ultravisor. - -### verify -`pvattest verify` needs the SE-guest header, the attestation request protection key, -and the attestation request and the response to the `pvattest perform` command from the Ultravisor. -It calculates the measurement in the trusted environment and compares it to the response from -the Ultravisor in the previous step. -The following return codes are possible: - -0. successful verification: The calculated measurement matches the response from the Ultravisor - -1. failed verification: The command ended with an error, for example, because of incorrect input or an invalid SE header - -2. failed verification: The calculated measurement does not match the response from the Ultravisor - -Run `pvattest verify` in a trusted environment. Especially, do not verify on the system you want to attest. - -## Measurement -The measurement is a cryptographic measurement of the following block. -Only HMAC-SHA512 is supported. - -| Start | Size | Content | -|---------|------------|---------------------------------------------------------------| -| 0x0 | 0x40 | Page List Digest (from SE header) | -| 0x40 | 0x40 | Address List Digest (from SE header) | -| 0x80 | 0x40 | Tweak List Digest (from SE header) | -| 0xc0 | 0x10 | SE Header Tag (from SE header) | -| 0xd0 | 0x10 | Configuration UID (generated by UV, included in the answer) | -| 0xe0 | 0x02 | User Data Length (defined during measurement on the SE-guest) | -| 0xe2 | 0x02 | Zeros | -| 0xe4 | 0x04 | Additional Data Length (set by UV, included in the answer) | -| 0xe8 | 0 - 0x100 | User Data (generated during measurement on the SE-guest) | -| ... | 0 or 0x10 | Optional Nonce (generated during request creation) | -| ... | 0 - 0x8000 | Additional Data (generated by UV, included in the answer) | - -### User Data -By default `pvattest` does not include any User Data, therefore the length is zero. -`User Data` is data generated by the SE guest and passed to UV during the measurement. -The `User Data` must be known to or be replicable by the verifier to verify the correctness of the User Data. -The addition of user data is currently an experimental setting. - -### Additional Data -`Additional data` is data known to the Ultravisor. By default UV will not include any `Additional Data`. -Adding `Additional Data` is currently an experimental setting. - -## Example - -Create an attestation request in a trusted environment: - -`pvattest create -k hkd.crt --arpk arp.key -o arcb.bin --cert IntermediateCA.crt --cert IbmSigningKey.crt` - -Perform an attestation measurement on an IBM Secure Execution guest: - -`pvattest perform --input arcb.bin --output measurement.bin` - -Verify the response from the Ultravisor against the attestation request in a trusted environment: - -`pvattest verify --input measurement.bin --arpk arp.key --hdr se_guest.hdr` diff -Nru s390-tools-2.31.0/pvattest/src/arcb.c s390-tools-2.33.1/pvattest/src/arcb.c --- s390-tools-2.31.0/pvattest/src/arcb.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/arcb.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,423 +0,0 @@ -/* - * Attestation Request Control Block related functions - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -/* Must be included before any other header */ -#include "config.h" - -#include -#include - -#include "libpv/crypto.h" -#include "libpv/hash.h" - -#include "arcb.h" -#include "common.h" -#include "log.h" - -#define ARVN_VERSION_1 0x0100 -#define MAX_ARL 0x2000 - -typedef struct arcb_v1_hdr { - uint64_t reserved0; /* 0x0000 */ - be32_t arvn; /* 0x0008 */ - be32_t arl; /* 0x000c */ - uint8_t iv[ARCB_V1_IV_SIZE]; /* 0x0010 */ - uint32_t reserved1c; /* 0x001c */ - uint8_t reserved20[7]; /* 0x0020 */ - uint8_t nks; /* 0x0027 */ - uint32_t reserved28; /* 0x0028 */ - be32_t sea; /* 0x002c */ - be64_t paf; /* 0x0030 */ - be32_t mai; /* 0x0038 */ - uint32_t reserved3c; /* 0x003c */ - PvEcdhPubKey cpk; /* 0x0040 */ -} __packed arcb_v1_hdr_t; -G_STATIC_ASSERT(sizeof(arcb_v1_hdr_t) == 0xe0); - -typedef struct arcb_v1_key_slot { - uint8_t phkh[ARCB_V1_PHKH_SIZE]; - uint8_t warpk[ARCB_V1_ATTEST_PROT_KEY_SIZE]; - uint8_t kst[ARCB_V1_TAG_SIZE]; -} __packed arcb_v1_key_slot_t; -G_STATIC_ASSERT(sizeof(arcb_v1_key_slot_t) == 0x50); - -struct arcb_v1 { - /* authenticated data */ - uint32_t arvn; - uint32_t mai; - uint64_t paf; - GBytes *iv; - EVP_PKEY *evp_cust_pub_key; - GSList *host_key_slots; - - /* confidential data */ - GBytes *confidential_measurement_key; - GBytes *confidential_optional_nonce; - GBytes *confidential_att_req_prot_key; -}; - -void arcb_v1_clear_free(arcb_v1_t *arcb) -{ - if (!arcb) - return; - - g_slist_free_full(arcb->host_key_slots, g_free); - g_bytes_unref(arcb->confidential_measurement_key); - g_bytes_unref(arcb->confidential_optional_nonce); - g_bytes_unref(arcb->confidential_att_req_prot_key); - g_bytes_unref(arcb->iv); - EVP_PKEY_free(arcb->evp_cust_pub_key); - g_free(arcb); -} - -static void arcb_v1_set_paf(arcb_v1_t *arcb, const uint64_t paf, GError **error) -{ - const uint64_t known_flags = ARCB_V1_PAF_ALL & ~ARCB_V1_PAF_NONCE; - - if ((paf & ARCB_V1_PAF_NONCE) != 0) { - g_set_error(error, ARCB_ERROR, ARCB_ERR_INVALID_PAF, - _("The given paf (%#.16lx) specifies the NONCE flag (%#.16lx)."), paf, - ARCB_V1_PAF_NONCE); - return; - } - if ((paf & ~known_flags) != 0) - pvattest_log_warning( - _("The given paf (%#.16lx) specifies unknown flags. Use at your own risk!"), - paf, known_flags); - arcb->paf = paf; -} - -arcb_v1_t *arcb_v1_new(GBytes *arpk, GBytes *iv, uint32_t mai, EVP_PKEY *evp_cpk, GBytes *mkey, - uint64_t paf, GError **error) -{ - g_autoptr(arcb_v1_t) arcb = g_new0(arcb_v1_t, 1); - - g_assert(g_bytes_get_size(iv) == ARCB_V1_IV_SIZE); - g_assert(g_bytes_get_size(arpk) == ARCB_V1_ATTEST_PROT_KEY_SIZE); - g_assert(g_bytes_get_size(mkey) == HMAC_SHA512_KEY_SIZE); - - pv_wrapped_g_assert(arpk); - pv_wrapped_g_assert(iv); - pv_wrapped_g_assert(evp_cpk); - pv_wrapped_g_assert(mkey); - - arcb->arvn = ARVN_VERSION_1; - arcb->mai = mai; - arcb_v1_set_paf(arcb, paf, error); - if (*error) - return NULL; - arcb->iv = g_bytes_ref(iv); - - if (EVP_PKEY_up_ref(evp_cpk) != 1) - g_abort(); - arcb->evp_cust_pub_key = evp_cpk; - - arcb->confidential_att_req_prot_key = g_bytes_ref(arpk); - arcb->confidential_measurement_key = g_bytes_ref(mkey); - - return g_steal_pointer(&arcb); -} - -int arcb_v1_add_key_slot(arcb_v1_t *arcb, EVP_PKEY *evp_host, GError **error) -{ - g_autoptr(GBytes) warpk = NULL, tag = NULL, phkh = NULL; - g_autoptr(GBytes) exchange_key = NULL, iv = NULL; - g_autofree arcb_v1_key_slot_t *key_slot = NULL; - g_autofree PvEcdhPubKey *ecdh_host = NULL; - g_autofree uint8_t *iv_raw = NULL; - PvCipherParms parms; - int64_t gcm_rc; - - g_assert(arcb->confidential_att_req_prot_key); - - pv_wrapped_g_assert(arcb); - pv_wrapped_g_assert(evp_host); - - /* encrypt (=wrap) attestation request protection key, store warpk + tag */ - exchange_key = pv_derive_exchange_key(arcb->evp_cust_pub_key, evp_host, error); - if (!exchange_key) - return -1; - - iv_raw = g_malloc0(ARCB_V1_IV_SIZE); - iv = g_bytes_new_take(g_steal_pointer(&iv_raw), ARCB_V1_IV_SIZE); - if (!iv) - g_abort(); - - parms.key = exchange_key; - parms.iv = iv; - parms.cipher = EVP_aes_256_gcm(); - parms.tag_size = ARCB_V1_TAG_SIZE; - gcm_rc = pv_gcm_encrypt(arcb->confidential_att_req_prot_key, NULL, &parms, &warpk, &tag, - error); - if (gcm_rc != ARCB_V1_ATTEST_PROT_KEY_SIZE) - return -1; - - /* calculate public host key hash */ - ecdh_host = pv_evp_pkey_to_ecdh_pub_key(evp_host, error); - if (!ecdh_host) - return -1; - phkh = pv_sha256_hash(ecdh_host->data, sizeof(ecdh_host->data), error); - if (!phkh) - return -1; - - /* copy to list */ - g_assert(g_bytes_get_size(warpk) == sizeof(key_slot->warpk)); - g_assert(g_bytes_get_size(tag) == sizeof(key_slot->kst)); - g_assert(g_bytes_get_size(phkh) == sizeof(key_slot->phkh)); - - key_slot = g_malloc0(sizeof(*key_slot)); - pv_gbytes_memcpy(key_slot->warpk, sizeof(key_slot->warpk), warpk, NULL); - pv_gbytes_memcpy(key_slot->kst, sizeof(key_slot->warpk), tag, NULL); - pv_gbytes_memcpy(key_slot->phkh, sizeof(key_slot->warpk), phkh, NULL); - - arcb->host_key_slots = g_slist_prepend(arcb->host_key_slots, g_steal_pointer(&key_slot)); - return 0; -} - -void arcb_v1_set_nonce(arcb_v1_t *arcb, GBytes *nonce) -{ - pv_wrapped_g_assert(arcb); - pv_wrapped_g_assert(nonce); - arcb_v1_rm_nonce(arcb); - g_assert(!arcb->confidential_optional_nonce); - - g_assert(g_bytes_get_size(nonce) == ARCB_V1_NONCE_SIZE); - arcb->confidential_optional_nonce = g_bytes_ref(nonce); - - arcb->paf |= ARCB_V1_PAF_NONCE; -} - -void arcb_v1_rm_nonce(arcb_v1_t *arcb) -{ - pv_wrapped_g_assert(arcb); - if (!arcb->confidential_optional_nonce) - return; - g_bytes_unref(arcb->confidential_optional_nonce); - arcb->confidential_optional_nonce = NULL; - arcb->paf &= ~ARCB_V1_PAF_NONCE; -} - -GBytes *arcb_v1_serialize(const arcb_v1_t *arcb, GError **error) -{ - pv_wrapped_g_assert(arcb); - g_autoptr(GByteArray) arcb_gba = NULL; - g_autoptr(GBytes) confidential_area = NULL; - g_autoptr(GBytes) aad = NULL; - g_autoptr(GBytes) art = NULL; - g_autoptr(GBytes) encrypted_area = NULL; - g_autoptr(GBytes) result = NULL; - g_autofree PvEcdhPubKey *ecdh_cpk = NULL; - PvCipherParms parms = { - .cipher = EVP_aes_256_gcm(), - .tag_size = AES_256_GCM_TAG_SIZE, - }; - size_t att_req_len = 0, nks = 0, sea = 0; - - arcb_v1_hdr_t hdr = { - .arvn = GUINT32_TO_BE(arcb->arvn), - .paf = GUINT64_TO_BE(arcb->paf), - .mai = GUINT32_TO_BE(arcb->mai), - }; - - g_assert(arcb->host_key_slots); - - /* calculate sizes */ - nks = g_slist_length(arcb->host_key_slots); - g_assert(nks < 0xFF); - - sea = g_bytes_get_size(arcb->confidential_measurement_key); - if (arcb->confidential_optional_nonce) - sea += g_bytes_get_size(arcb->confidential_optional_nonce); - - g_assert(sea == HMAC_SHA512_KEY_SIZE || sea == HMAC_SHA512_KEY_SIZE + ARCB_V1_NONCE_SIZE); - - att_req_len = sizeof(hdr) + nks * sizeof(arcb_v1_key_slot_t) + HMAC_SHA512_KEY_SIZE + - ARCB_V1_TAG_SIZE; - if (arcb->confidential_optional_nonce) - att_req_len += ARCB_V1_NONCE_SIZE; - - g_assert(att_req_len <= MAX_ARL); - - /* copy plain data to contiguous memory */ - hdr.arl = GUINT32_TO_BE((uint32_t)att_req_len); - - pv_gbytes_memcpy(hdr.iv, ARCB_V1_IV_SIZE, arcb->iv, NULL); - hdr.nks = (uint8_t)nks; - hdr.sea = GUINT32_TO_BE((uint32_t)sea); - ecdh_cpk = pv_evp_pkey_to_ecdh_pub_key(arcb->evp_cust_pub_key, error); - memcpy(&hdr.cpk, ecdh_cpk, sizeof(*ecdh_cpk)); - arcb_gba = g_byte_array_sized_new((guint)att_req_len); - g_byte_array_append(arcb_gba, (const uint8_t *)&hdr, sizeof(hdr)); - - for (GSList *elem = arcb->host_key_slots; elem; elem = elem->next) - g_byte_array_append(arcb_gba, elem->data, sizeof(arcb_v1_key_slot_t)); - - /* encrypt the confidential data */ - confidential_area = secure_gbytes_concat(arcb->confidential_measurement_key, - arcb->confidential_optional_nonce); - parms.key = arcb->confidential_att_req_prot_key; - parms.iv = arcb->iv; - aad = g_bytes_new(arcb_gba->data, arcb_gba->len); - pv_gcm_encrypt(confidential_area, aad, &parms, &encrypted_area, &art, error); - if (*error) - return NULL; - - g_byte_array_append(arcb_gba, g_bytes_get_data(encrypted_area, NULL), (guint)sea); - g_byte_array_append(arcb_gba, g_bytes_get_data(art, NULL), ARCB_V1_TAG_SIZE); - - result = g_byte_array_free_to_bytes(arcb_gba); - arcb_gba = NULL; - return g_steal_pointer(&result); -} - -uint32_t arcb_v1_get_required_measurement_size(const arcb_v1_t *arcb, GError **error) -{ - pv_wrapped_g_assert(arcb); - switch (arcb->mai) { - case MAI_HMAC_SHA512: - return HMAC_SHA512_KEY_SIZE; - default: - g_set_error(error, ARCB_ERROR, ARCB_ERR_INVALID_MAI, - _("Unknown measurement algorithm ID specified (%#x)."), arcb->mai); - return 0; - } -} - -uint32_t arcb_v1_get_required_additional_size(const arcb_v1_t *arcb) -{ - uint32_t size = 0; - - pv_wrapped_g_assert(arcb); - - if (arcb_v1_additional_has_phkh_image(arcb)) - size += ARCB_V1_PHKH_SIZE; - if (arcb_v1_additional_has_phkh_attest(arcb)) - size += ARCB_V1_PHKH_SIZE; - return size; -} - -gboolean arcb_v1_use_nonce(const arcb_v1_t *arcb) -{ - pv_wrapped_g_assert(arcb); - return arcb->confidential_optional_nonce != NULL; -} - -gboolean arcb_v1_additional_has_phkh_image(const arcb_v1_t *arcb) -{ - pv_wrapped_g_assert(arcb); - return (arcb->paf & ARCB_V1_PAF_AAD_PHKH_HEADER) != 0; -} - -gboolean arcb_v1_additional_has_phkh_attest(const arcb_v1_t *arcb) -{ - pv_wrapped_g_assert(arcb); - return (arcb->paf & ARCB_V1_PAF_AAD_PHKH_ATTEST) != 0; -} - -GBytes *arcb_v1_get_measurement_key(const arcb_v1_t *arcb) -{ - pv_wrapped_g_assert(arcb); - return g_bytes_ref(arcb->confidential_measurement_key); -} - -GBytes *arcb_v1_get_nonce(const arcb_v1_t *arcb) -{ - pv_wrapped_g_assert(arcb); - if (arcb->confidential_optional_nonce) - return g_bytes_ref(arcb->confidential_optional_nonce); - return NULL; -} - -GBytes *arcb_v1_get_arp_key(const arcb_v1_t *arcb) -{ - pv_wrapped_g_assert(arcb); - return g_bytes_ref(arcb->confidential_att_req_prot_key); -} - -static gboolean is_v1_arcb(size_t aad_size, size_t sea, size_t arl, size_t serialized_arcb_size, - uint32_t arcb_version, gboolean has_nonce) -{ - gboolean result = aad_size + sea + ARCB_V1_TAG_SIZE == arl; - - result &= arl <= serialized_arcb_size; - result &= arcb_version == ARVN_VERSION_1; - result &= has_nonce ? sea == HMAC_SHA512_KEY_SIZE + ARCB_V1_NONCE_SIZE : - sea == HMAC_SHA512_KEY_SIZE; - return result; -} - -gboolean arcb_v1_verify_serialized_arcb(GBytes *serialized_arcb, GBytes *arpk, - GBytes **measurement_key, GBytes **optional_nonce, - GError **error) -{ - g_autoptr(GBytes) encr = NULL, decr = NULL, aad = NULL, tag = NULL, iv = NULL; - const struct arcb_v1_hdr *serialized_arcb_hdr; - const uint8_t *encr_u8, *aad_u8, *tag_u8; - const uint8_t *serialized_arcb_u8; - size_t serialized_arcb_size; - uint32_t arcb_version, mai; - size_t aad_size, arl, sea; - PvCipherParms parms; - gboolean has_nonce; - uint64_t paf; - - pv_wrapped_g_assert(serialized_arcb); - pv_wrapped_g_assert(arpk); - serialized_arcb_u8 = g_bytes_get_data(serialized_arcb, &serialized_arcb_size); - serialized_arcb_hdr = (const arcb_v1_hdr_t *)serialized_arcb_u8; - arl = GUINT32_FROM_BE(serialized_arcb_hdr->arl); - arcb_version = GUINT32_FROM_BE(serialized_arcb_hdr->arvn); - mai = GUINT32_FROM_BE(serialized_arcb_hdr->mai); - - aad_u8 = serialized_arcb_u8; - aad_size = sizeof(*serialized_arcb_hdr) + - serialized_arcb_hdr->nks * sizeof(arcb_v1_key_slot_t); - encr_u8 = aad_u8 + aad_size; - sea = GUINT32_FROM_BE(serialized_arcb_hdr->sea); - tag_u8 = encr_u8 + sea; - paf = GUINT64_FROM_BE(serialized_arcb_hdr->paf); - has_nonce = (paf & ARCB_V1_PAF_NONCE) != 0; - - if (!is_v1_arcb(aad_size, sea, arl, serialized_arcb_size, arcb_version, has_nonce)) { - g_set_error(error, ARCB_ERROR, ARCB_ERR_INVALID_ARCB, - _("The provided attestation request is not valid")); - return FALSE; - } - if (mai != MAI_HMAC_SHA512) { - g_set_error(error, ARCB_ERROR, ARCB_ERR_INVALID_MAI, - _("Unsupported measurement argument ID (%#x)"), mai); - return FALSE; - } - - aad = g_bytes_new(aad_u8, aad_size); - encr = g_bytes_new(encr_u8, sea); - tag = g_bytes_new(tag_u8, ARCB_V1_TAG_SIZE); - iv = g_bytes_new(serialized_arcb_hdr->iv, sizeof(serialized_arcb_hdr->iv)); - - parms.cipher = EVP_aes_256_gcm(); - parms.tag_size = AES_256_GCM_TAG_SIZE; - parms.key = arpk; - parms.iv = iv; - pv_gcm_decrypt(encr, aad, tag, &parms, &decr, error); - if (*error) { - GError *tmp_error = NULL; - - g_set_error(&tmp_error, ARCB_ERROR, ARCB_ERR_INVALID_ARCB, - _("Cannot verify the attestation request: %s"), (*error)->message); - g_clear_error(error); - g_propagate_error(error, tmp_error); - return FALSE; - } - if (measurement_key) - *measurement_key = g_bytes_new(g_bytes_get_data(decr, NULL), HMAC_SHA512_KEY_SIZE); - if (optional_nonce && has_nonce) - *optional_nonce = - g_bytes_new((uint8_t *)g_bytes_get_data(decr, NULL) + HMAC_SHA512_KEY_SIZE, - ARCB_V1_NONCE_SIZE); - return TRUE; -} diff -Nru s390-tools-2.31.0/pvattest/src/arcb.h s390-tools-2.33.1/pvattest/src/arcb.h --- s390-tools-2.31.0/pvattest/src/arcb.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/arcb.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,152 +0,0 @@ -/* - * Attestation Request Control Block related functions - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef PVATTEST_ARCB_H -#define PVATTEST_ARCB_H -/* Must be included before any other header */ -#include "config.h" - -#include "libpv/glib-helper.h" - -#include "lib/zt_common.h" -#include "libpv/crypto.h" -#include "libpv/macros.h" - -#include "types.h" - -#define MAI_HMAC_RESERVED_INVALID 0 -#define MAI_HMAC_SHA512 0x1 - -#define HMAC_SHA512_KEY_SIZE 64 -#define ARCB_V1_ATTEST_PROT_KEY_SIZE 32 -#define ARCB_V1_NONCE_SIZE 16 -#define ARCB_V1_TAG_SIZE 16 -#define ARCB_V1_IV_SIZE 12 -#define ARCB_V1_PHKH_SIZE 32 - -/* Optional nonce in ARCB */ -#define ARCB_V1_PAF_NONCE PV_MSB(1) -/* Public host key hash used to unseal SE header added to additional data to be measured */ -#define ARCB_V1_PAF_AAD_PHKH_HEADER PV_MSB(2) -/* Public host key hash used to unseal this attestation added to additional data to be measured */ -#define ARCB_V1_PAF_AAD_PHKH_ATTEST PV_MSB(3) -/* Temporary backup-host-key use allowed */ -#define ARCB_V1_PAF_TMP_BACKUP_ALLOWED PV_MSB(62) - -/* Global not-host-specific key allowed */ -#define ARCB_V1_PAF_GLOBAL_NHS_KEY_ALLOWED PV_MSB(63) - -#define ARCB_V1_PAF_ALL \ - (ARCB_V1_PAF_NONCE | ARCB_V1_PAF_AAD_PHKH_HEADER | ARCB_V1_PAF_AAD_PHKH_ATTEST | \ - ARCB_V1_PAF_TMP_BACKUP_ALLOWED | ARCB_V1_PAF_GLOBAL_NHS_KEY_ALLOWED) - -typedef struct arcb_v1 arcb_v1_t; - -/** arcb_v1_new: - * - * @arpk: Attestation Request Protection key. AES-GCM-256 key to - * protect Measurement Key and Nonce. - * Must be ´ARCB_V1_ATTEST_PROT_KEY_SIZE´ bytes long. - * @iv: IV for protecting Measuremt Key and Nonce. - * Should be random for each new ARPK. - * Must be ´ARCB_V1_IV_SIZE´ bytes long. - * @mai: Measurement Algorithm Identifier for the attestation measurement. - * See ´enum mai´ - * @evp_cpk: Customer key in EVP_PKEY format. Must contain private and public key pair. - * @mkey: Measurement key to calculate the Measurement. - * Must be ´HMAC_SHA512_KEY_SIZE´ bytes long. - * @paf: Plain text Attestation Flags. See ´enum plaintext_attestattion_flags´. - * ´ARCB_V1_PAF_NONCE´ must not be set. - * @error: GError. *error will != NULL if error occours. - * - * arpk, mkey, and iv must me correct size - * If not this is considered as a programming error (No warning; - * Results in Assertion or undefined behavior). - * - * GBytes will be ref'ed. - * - * All numbers must be in system byte order and will be converted to big endian - * if needed. - * - * Returns: (nullable) (transfer full): new ARCB context. - */ -arcb_v1_t *arcb_v1_new(GBytes *arpk, GBytes *iv, uint32_t mai, EVP_PKEY *evp_cpk, GBytes *mkey, - uint64_t paf, GError **error) PV_NONNULL(1, 2, 4, 5); -void arcb_v1_clear_free(arcb_v1_t *arcb); -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(arcb_v1_t, arcb_v1_clear_free) - -/** arcb_v1_add_key_slot: - * - * @arcb: ARCB context. - * @evp_host: Host public key. - * @error: GError. *error will != NULL if error occours. - * - * Builds a key slot. Calculates exchange key, wraps ARPK with the exchange key. - * Calculates the public host key hash. Calculates the key slot tag. - * Adds it to the ARCB. - * - * Returns: 0 in case of success, -1 otherwise - */ -int arcb_v1_add_key_slot(arcb_v1_t *arcb, EVP_PKEY *evp_host, GError **error) PV_NONNULL(1, 2); -void arcb_v1_set_nonce(arcb_v1_t *arcb, GBytes *nonce) PV_NONNULL(1, 2); -void arcb_v1_rm_nonce(arcb_v1_t *arcb) PV_NONNULL(1); - -/** arcb_v1_serialize: - * - * @arcb: ARCB context. - * @error: GError. *error will != NULL if error occurs. - * - * Will create a valid ARCB for the UV. Including encrypting confidential data. - * At least one key_slot must be added beforehand. - * - * Returns: (nullable) (transfer full): The serialized ARCB which can be added to the - * Retrieve Attestation Measurement UVC as GBytes. - */ -GBytes *arcb_v1_serialize(const arcb_v1_t *arcb, GError **error) PV_NONNULL(1, 2); - -uint32_t arcb_v1_get_required_measurement_size(const arcb_v1_t *arcb, GError **error) - PV_NONNULL(1, 2); -uint32_t arcb_v1_get_required_additional_size(const arcb_v1_t *arcb) PV_NONNULL(1); -gboolean arcb_v1_use_nonce(const arcb_v1_t *arcb) PV_NONNULL(1); -gboolean arcb_v1_additional_has_phkh_image(const arcb_v1_t *arcb) PV_NONNULL(1); -gboolean arcb_v1_additional_has_phkh_attest(const arcb_v1_t *arcb) PV_NONNULL(1); - -GBytes *arcb_v1_get_measurement_key(const arcb_v1_t *arcb) PV_NONNULL(1); -GBytes *arcb_v1_get_nonce(const arcb_v1_t *arcb) PV_NONNULL(1); -GBytes *arcb_v1_get_arp_key(const arcb_v1_t *arcb) PV_NONNULL(1); - -/** arcb_v1_verify_serialized_arcb: - * - * @serialized_arcb: binary ARCB in UV readable format. - * @arpk: Attestation Request Protection key that was used to create serialized_arpk - * @measurement_key: Output parameter: decrypted measurement key if no error. - * May be NULL if not interested for this output. - * @optional_nonce: Output parameter: decrypted nonce if no error. - * May be NULL if not interested for this output. - * @error: GError. *error will != NULL if error occurs. - * - * - * Checks if sizes are sound and flags are known by this implementation. - * Decrypts Measurement key and nonce (if given) and verifies ARCB tag. - * - * Returns: TRUE if ARCB is valid, including matching ARCB tag. Otherwise FALSE. - * - */ -gboolean arcb_v1_verify_serialized_arcb(GBytes *serialized_arcb, GBytes *arpk, - GBytes **measurement_key, GBytes **optional_nonce, - GError **error) PV_NONNULL(1, 2); - -#define ARCB_ERROR g_quark_from_static_string("pv-arcb_error-quark") -typedef enum arcb_error { - ARCB_ERR_INVALID_ARCB, - ARCB_ERR_INVALID_PAF, - ARCB_ERR_INVALID_MAI, - ARCB_ERR_UNABLE_ENCR_ARPK, -} arcb_error_e; - -#endif /* PVATTEST_ARCB_H */ diff -Nru s390-tools-2.31.0/pvattest/src/argparse.c s390-tools-2.33.1/pvattest/src/argparse.c --- s390-tools-2.31.0/pvattest/src/argparse.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/argparse.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,674 +0,0 @@ -/* - * Definitions used for parsing arguments. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -/* Must be included before any other header */ -#include "config.h" - -#include -#include -#include -#include - -#include "argparse.h" -#include "log.h" -#include "common.h" - -#define DEFAULT_OUTPUT_FILE_NAME "attest.bin" -#define DEFAULT_OPTION_PHKH_IMG FALSE -#define DEFAULT_OPTION_PHKH_ATT FALSE -#define DEFAULT_OPTION_NO_VERIFY FALSE -#define DEFAULT_OPTION_ONLINE TRUE -#define DEFAULT_OPTION_NONCE TRUE - -static pvattest_config_t pvattest_config = { - .general = { - .log_level = PVATTEST_LOG_LVL_DEFAULT, - }, - .create = { - .output_path = NULL, - .host_key_document_paths = NULL, - .crl_paths = NULL, - .root_ca_path = NULL, - .certificate_paths = NULL, - .arp_key_out_path = NULL, - .phkh_img = DEFAULT_OPTION_PHKH_IMG, - .phkh_att = DEFAULT_OPTION_PHKH_ATT, - .online = DEFAULT_OPTION_ONLINE, - .use_nonce = DEFAULT_OPTION_NONCE, - .paf = 0, - .x_aad_size = -1, - }, - .perform = { - .output_path = NULL, - .input_path = NULL, - }, - .verify = { - .input_path = NULL, - .output_path = NULL, - .hdr_path = NULL, - .arp_key_in_path = NULL, - .output_fmt = VERIFY_FMT_YAML, - }, -}; -typedef gboolean (*verify_options_fn_t)(GError **); - -static gboolean check_for_non_null(const void *ptr, const char *msg, GError **error) -{ - if (!ptr) { - g_set_error(error, PVATTEST_ERROR, PVATTEST_ERR_INV_ARG, "%s", msg); - return FALSE; - } - return TRUE; -} - -static gboolean _check_for_invalid_path(const char *path, gboolean must_exist, GError **error) -{ - int cached_errno = 0; - - g_assert(path); - - if (must_exist) { - if (access(path, F_OK | R_OK) != 0) - cached_errno = errno; - } - if (cached_errno) { - g_set_error(error, PVATTEST_ERROR, PVATTEST_ERR_INV_ARG, "Cannot access '%s': %s", - path, g_strerror(cached_errno)); - return FALSE; - } - return TRUE; -} - -static gboolean check_for_optional_invalid_path(const char *path, gboolean must_exist, - GError **error) -{ - if (!path) - return TRUE; - return _check_for_invalid_path(path, must_exist, error); -} - -static gboolean check_for_invalid_path(const char *path, gboolean must_exist, const char *null_msg, - GError **error) -{ - if (!check_for_non_null(path, null_msg, error)) - return FALSE; - return _check_for_invalid_path(path, must_exist, error); -} - -static gboolean _check_file_list(char **path_list, gboolean must_exist, GError **error) -{ - char *path = NULL; - for (char **path_it = path_list; path_it != NULL && *path_it != NULL; path_it++) { - path = *path_it; - if (!_check_for_invalid_path(path, must_exist, error)) - return FALSE; - } - return TRUE; -} - -static gboolean check_optional_file_list(char **path_list, gboolean must_exist, GError **error) -{ - if (!path_list) - return TRUE; - return _check_file_list(path_list, must_exist, error); -} - -static gboolean check_file_list(char **path_list, gboolean must_exist, const char *null_msg, - GError **error) -{ - if (!check_for_non_null(path_list, null_msg, error)) - return FALSE; - return _check_file_list(path_list, must_exist, error); -} - -static gboolean hex_str_toull(const char *nptr, uint64_t *dst, GError **error) -{ - uint64_t value; - gchar *end; - - g_assert(dst); - - if (!g_str_is_ascii(nptr)) { - g_set_error( - error, PVATTEST_ERROR, PVATTEST_ERR_INV_ARG, - _("Invalid value: '%s'. A hexadecimal value is required, for example '0xcfe'"), - nptr); - return FALSE; - } - - value = g_ascii_strtoull(nptr, &end, 16); - if ((value == G_MAXUINT64 && errno == ERANGE) || (end && *end != '\0')) { - g_set_error( - error, PVATTEST_ERROR, PVATTEST_ERR_INV_ARG, - _("Invalid value: '%s'. A hexadecimal value is required, for example '0xcfe'"), - nptr); - return FALSE; - } - *dst = value; - return TRUE; -} - -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wmissing-field-initializers" - -/************************* SHARED OPTIONS *************************************/ -/* NOTE REQUIRED */ -#define _entry_host_key_document(__arg_data, __indent) \ - { \ - .long_name = "host-key-document", .short_name = 'k', .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_FILENAME_ARRAY, .arg_data = __arg_data, \ - .description = \ - "FILE specifies a host-key document. At least one is required.\n" __indent \ - "Specify this option multiple times to enable the request for\n" __indent \ - "more than one host.\n", \ - .arg_description = "FILE", \ - } - -/* NOTE REQUIRED */ -#define _entry_certs(__arg_data, __indent) \ - { \ - .long_name = "cert", .short_name = 'C', .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_FILENAME_ARRAY, .arg_data = __arg_data, \ - .description = "FILE contains a certificate that is used to\n" __indent \ - "establish a chain of trust for the verification\n" __indent \ - "of the host-key documents. The IBM Z signing\n" __indent \ - "key and intermediate CA certificate (signed\n" __indent \ - "by the root CA) are required.\n", \ - .arg_description = "FILE", \ - } - -/* NOTE REQUIRED */ -#define _entry_crls(__arg_data, __indent) \ - { \ - .long_name = "crl", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_FILENAME_ARRAY, .arg_data = __arg_data, \ - .description = "FILE contains a certificate revocation list (optional).\n", \ - .arg_description = "FILE", \ - } - -/* NOTE REQUIRED */ -#define _entry_root_ca(__arg_data, __indent) \ - { \ - .long_name = "root-ca", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_FILENAME_ARRAY, .arg_data = __arg_data, \ - .description = "Use FILE as the trusted root CA instead the\n" __indent \ - "root CAs that are installed on the system (optional).\n", \ - .arg_description = "FILE", \ - } - -/* NOTE REQUIRED */ -#define _entry_guest_hdr(__arg_data, __indent) \ - { \ - .long_name = "hdr", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_FILENAME, .arg_data = __arg_data, \ - .description = "FILE specifies the header of the guest image.\n" __indent \ - "Exactly one is required.\n", \ - .arg_description = "FILE", \ - } - -/* NOTE REQUIRED */ -#define _entry_input(__arg_data, __additional_text, __indent) \ - { \ - .long_name = "input", .short_name = 'i', .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_FILENAME, .arg_data = __arg_data, \ - .description = "FILE specifies the " __additional_text " as input.\n", \ - .arg_description = "FILE", \ - } - -/* NOTE REQUIRED */ -#define _entry_output(__arg_data, __additional_text, __indent) \ - { \ - .long_name = "output", .short_name = 'o', .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_FILENAME, .arg_data = __arg_data, \ - .description = "FILE specifies the output for the " __additional_text "\n", \ - .arg_description = "FILE", \ - } - -/* NOTE REQUIRED */ -#define _entry_att_prot_key_save(__arg_data, __indent) \ - { \ - .long_name = "arpk", .short_name = 'a', .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_FILENAME, .arg_data = __arg_data, \ - .description = \ - "Save the protection key as GCM-AES256 key in FILE\n" __indent \ - "Do not publish this key, otherwise your attestation is compromised.\n", \ - .arg_description = "FILE", \ - } - -/* NOTE REQUIRED */ -#define _entry_att_prot_key_load(__arg_data, __indent) \ - { \ - .long_name = "arpk", .short_name = 'a', .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_FILENAME, .arg_data = __arg_data, \ - .description = "Use FILE to specify the GCM-AES256 key to decrypt\n" __indent \ - "the attestation request.\n" __indent \ - "Delete this key after verification.\n", \ - .arg_description = "FILE", \ - } - -#define _entry_phkh_img(__arg_data, __indent) \ - { \ - .long_name = "x-phkh-img", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_NONE, .arg_data = __arg_data, \ - .description = "Add the public host key hash of the\n" __indent \ - "image header used to decrypt\n" __indent \ - "the secure guest to the measurement. (optional)\n" \ - } - -#define _entry_phkh_att(__arg_data, __indent) \ - { \ - .long_name = "x-phkh-att", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_NONE, .arg_data = __arg_data, \ - .description = "Add the public host key hash of the\n" __indent \ - "attestation header used to decrypt\n" __indent \ - "the attestation request to the measurement. (optional)\n" \ - } - -#define _entry_no_verify(__arg_data, __indent) \ - { \ - .long_name = "no-verify", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_NONE, .arg_data = __arg_data, \ - .description = "Disable the host-key document verification.\n" __indent \ - "(optional)\n", \ - } - -#define _entry_offline_maps_to_online(__arg_data, __indent) \ - { \ - .long_name = "offline", .short_name = 0, .flags = G_OPTION_FLAG_REVERSE, \ - .arg = G_OPTION_ARG_NONE, .arg_data = __arg_data, \ - .description = "Don't download CRLs. (optional)\n", \ - } - -#define _entry_verbose(__indent) \ - { \ - .long_name = "verbose", .short_name = 'V', .flags = G_OPTION_FLAG_NO_ARG, \ - .arg = G_OPTION_ARG_CALLBACK, .arg_data = &increase_log_lvl, \ - .description = "Provide more detailed output. (optional)\n", \ - .arg_description = NULL, \ - } - -#define _entry_x_paf(__arg_data, __indent) \ - { \ - .long_name = "x-paf", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_CALLBACK, .arg_data = __arg_data, \ - .description = "Specify the Plain text Attestation Flags\n" __indent \ - "as a hexadecimal value. Flags that change\n" __indent \ - "the paf (--phkh-*) take precedence over\n" __indent \ - "this flag.\n" __indent \ - "Setting the nonce paf is not allowed here.\n" __indent \ - "(optional, default 0x0)\n", \ - .arg_description = "HEX", \ - } - -#define _entry_x_no_nonce(__arg_data, __indent) \ - { \ - .long_name = "x-no-nonce", .short_name = 0, .flags = G_OPTION_FLAG_REVERSE, \ - .arg = G_OPTION_ARG_NONE, .arg_data = __arg_data, \ - .description = "Do not use a nonce in the request.\n" __indent \ - "(optional, not recommended)\n" \ - } - -#define _entry_x_aad_size(__arg_data, __indent) \ - { \ - .long_name = "x-add-size", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_INT, .arg_data = __arg_data, \ - .description = "Specify the size of the additional area\n" __indent \ - "Overwrite every flag that changes\n" __indent \ - "this size implicitly. No verification is performed!\n" __indent \ - "Ignored if negative.\n" __indent "(optional, default ignored)\n", \ - .arg_description = "INT" \ - } - -#define _entry_x_user_data(__arg_data, __indent) \ - { \ - .long_name = "x-user-data", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_FILENAME, .arg_data = __arg_data, \ - .description = "Use FILE to specify the user data.\n", .arg_description = "FILE", \ - } - -#define _entry__verify_format(__indent) \ - { \ - .long_name = "format", .short_name = 0, .flags = G_OPTION_FLAG_NONE, \ - .arg = G_OPTION_ARG_CALLBACK, .arg_data = &set_verify_output_format, \ - .description = "Define the output format.\n" __indent \ - "Defaults to 'yaml'. (possible values: 'yaml')\n", \ - .arg_description = "FORMAT", \ - } - -static gboolean increase_log_lvl(G_GNUC_UNUSED const char *option_name, - G_GNUC_UNUSED const char *value, G_GNUC_UNUSED void *data, - G_GNUC_UNUSED GError **error) -{ - pvattest_log_increase_log_lvl(&pvattest_config.general.log_level); - return TRUE; -} - -static gboolean set_verify_output_format(const char *option_name, const char *value, - G_GNUC_UNUSED void *data, GError **error) -{ - if (!g_strcmp0(value, "yaml")) { - pvattest_config.verify.output_fmt = VERIFY_FMT_YAML; - } else { - g_set_error(error, G_OPTION_ERROR, G_OPTION_ERROR_FAILED, - _("Found value '%s' for option '%s', but only 'yaml' is allowed."), - value, option_name); - return FALSE; - } - return TRUE; -} - -static gboolean create_set_paf(G_GNUC_UNUSED const char *option_name, const char *value, - G_GNUC_UNUSED void *data, GError **error) -{ - return hex_str_toull(value, &pvattest_config.create.paf, error); -} - -/***************************** GENERAL OPTIONS ********************************/ -static gboolean print_version = FALSE; - -static GOptionEntry general_options[] = { - { - .long_name = "version", - .short_name = 'v', - .flags = G_OPTION_FLAG_NONE, - .arg = G_OPTION_ARG_NONE, - .arg_data = &print_version, - .description = "Print the version and exit.\n", - .arg_description = NULL, - }, - _entry_verbose(""), - { NULL }, -}; - -/************************* CREATE ATTESTATION OPTIONS *************************/ -#define create_indent " " - -static GOptionEntry create_options[] = { - _entry_host_key_document(&pvattest_config.create.host_key_document_paths, create_indent), - _entry_certs(&pvattest_config.create.certificate_paths, create_indent), - _entry_crls(&pvattest_config.create.crl_paths, create_indent), - _entry_root_ca(&pvattest_config.create.root_ca_path, create_indent), - _entry_output(&pvattest_config.create.output_path, "attestation request", create_indent), - _entry_att_prot_key_save(&pvattest_config.create.arp_key_out_path, create_indent), - - _entry_no_verify(&pvattest_config.create.no_verify, create_indent), - _entry_offline_maps_to_online(&pvattest_config.create.online, create_indent), - _entry_verbose(create_indent), - { NULL } -}; - -static GOptionEntry experimental_create_options[] = { - _entry_x_no_nonce(&pvattest_config.create.use_nonce, create_indent), - _entry_x_paf(&create_set_paf, create_indent), - _entry_x_aad_size(&pvattest_config.create.x_aad_size, create_indent), - _entry_phkh_img(&pvattest_config.create.phkh_img, create_indent), - _entry_phkh_att(&pvattest_config.create.phkh_att, create_indent), - { NULL } -}; - -static gboolean verify_create(GError **error) -{ - if (!check_file_list(pvattest_config.create.host_key_document_paths, TRUE, - _("Specify --host-key-document at least once."), error)) - return FALSE; - if (!pvattest_config.create.no_verify) { - if (!check_file_list( - pvattest_config.create.certificate_paths, TRUE, - _("Either specify the IBM Z signing key and" - " intermediate CA certificate\nby using the '--cert' option, or" - " use the '--no-verify' flag to disable the\nhost-key document" - " verification completely (at your own risk).\n" - "Only use this option in test environments or if" - " you trust the unverified document."), - error)) - return FALSE; - } - if (!check_for_invalid_path(pvattest_config.create.arp_key_out_path, FALSE, - _("Missing argument for --arpk."), error)) - return FALSE; - if (!check_for_invalid_path(pvattest_config.create.output_path, FALSE, - _("Missing argument for --output."), error)) - return FALSE; - if (!check_optional_file_list(pvattest_config.create.crl_paths, TRUE, error)) - return FALSE; - if (!check_for_optional_invalid_path(pvattest_config.create.root_ca_path, TRUE, error)) - return FALSE; - return TRUE; -}; - -/************************* MEASUREMENT OPTIONS ********************************/ -#define perform_indent " " - -static GOptionEntry perform_options[] = { - _entry_input(&pvattest_config.perform.input_path, "attestation request", perform_indent), - _entry_output(&pvattest_config.perform.output_path, "attestation result", perform_indent), - _entry_verbose(perform_indent), - { NULL }, -}; - -static GOptionEntry experimental_perform_options[] = { - _entry_x_user_data(&pvattest_config.perform.user_data_path, perform_indent), - { NULL }, -}; - -static gboolean verify_perform(GError **error) -{ - if (!check_for_invalid_path(pvattest_config.perform.input_path, TRUE, - _("Missing argument for --input."), error)) - return FALSE; - if (!check_for_invalid_path(pvattest_config.perform.output_path, FALSE, - _("Missing argument for --output."), error)) - return FALSE; - if (!check_for_optional_invalid_path(pvattest_config.perform.user_data_path, TRUE, error)) - return FALSE; - return TRUE; -} - -/************************* VERIFY OPTIONS ************************************/ -#define verify_indent " " - -static GOptionEntry verify_options[] = { - _entry_input(&pvattest_config.verify.input_path, "attestation result", verify_indent), - _entry_output(&pvattest_config.verify.output_path, - "verification result.\n" verify_indent "(optional)", verify_indent), - _entry_guest_hdr(&pvattest_config.verify.hdr_path, verify_indent), - _entry_att_prot_key_load(&pvattest_config.verify.arp_key_in_path, verify_indent), - _entry_verbose(verify_indent), - _entry__verify_format(verify_indent), - { NULL }, -}; - -static gboolean verify_verify(GError **error) -{ - if (!check_for_invalid_path(pvattest_config.verify.input_path, TRUE, - _("Missing argument for --input."), error)) - return FALSE; - if (!check_for_invalid_path(pvattest_config.verify.hdr_path, TRUE, - _("Missing argument for --hdr."), error)) - return FALSE; - if (!check_for_invalid_path(pvattest_config.verify.arp_key_in_path, TRUE, - _("Missing argument for --arpk."), error)) - return FALSE; - return TRUE; -} - -/************************** OPTIONS END ***************************************/ - -#pragma GCC diagnostic pop - -static char summary[] = - "\n" - "Create, perform, and verify attestation measurements for IBM Secure Execution guest" - " systems.\n" - "\n" - "COMMANDS\n" - " create On a trusted system, creates an attestation request.\n" - " perform On the SE-guest to be attested, sends the attestation request\n" - " to the Ultravisor and receives the answer.\n" -#ifndef PVATTEST_COMPILE_PERFORM - " (not supported on this platform)\n" -#endif /* PVATTEST_COMPILE_PERFORM */ - - " verify On a trusted system, compares the one from your trusted system.\n" - " If they differ, the Secure Execution guest might not be compromised\n" - "\n" - "Use '" GETTEXT_PACKAGE " [COMMAND] -h' to get detailed help\n"; -static char create_summary[] = - "Create attestation measurement requests to attest an\n" - "IBM Secure Execution guest. Only build attestation requests in a trusted\n" - "environment such as your Workstation.\n" - "To avoid compromising the attestation do not publish the\n" - "protection key and delete it after verification.\n" - "Every 'create' will generate a new, random protection key.\n"; -static char perform_summary[] = -#ifndef PVATTEST_COMPILE_PERFORM - "This system does NOT support 'perform'.\n" -#endif /* PVATTEST_COMPILE_PERFORM */ - "Perform a measurement of this IBM Secure Execution guest using '/dev/uv'.\n"; -static char verify_summary[] = - "Verify that a previously generated attestation measurement of an\n" - "IBM Secure Execution guest yielded the expected results.\n" - "Verify attestation requests only in a trusted environment, such as your workstation."; - -static void print_version_and_exit(void) -{ - printf("%s version %s\n", GETTEXT_PACKAGE, RELEASE_STRING); - printf("%s\n", COPYRIGHT_NOTICE); - exit(EXIT_SUCCESS); -} - -static GOptionContext *create_ctx(GOptionEntry *options, GOptionEntry *experimental_options, - const char *param_name, const char *opt_summary) -{ - GOptionContext *ret = g_option_context_new(param_name); - GOptionGroup *x_group = NULL; - g_option_context_add_main_entries(ret, options, NULL); - g_option_context_set_summary(ret, opt_summary); - if (experimental_options) { - x_group = g_option_group_new( - "experimental", - "Experimental Options; Do not use in a production environment", - "Show experimental options", NULL, NULL); - g_option_group_add_entries(x_group, experimental_options); - g_option_context_add_group(ret, x_group); - } - return ret; -} - -enum pvattest_command pvattest_parse(int *argc, char **argvp[], pvattest_config_t **config, - GError **error) -{ - g_autoptr(GOptionContext) main_context = NULL, subc_context = NULL; - char **argv = *argvp; - enum pvattest_command subc = PVATTEST_SUBC_INVALID; - verify_options_fn_t verify_options_fn = NULL; - - pv_wrapped_g_assert(argc); - pv_wrapped_g_assert(argvp); - pv_wrapped_g_assert(config); - - /* - * First parse until the first non dash argument. This must be one of the commands. - * (strict POSIX parsing) - */ - main_context = g_option_context_new( - "COMMAND [OPTIONS] - create, perform, and verify attestation measurements"); - g_option_context_set_strict_posix(main_context, TRUE); - g_option_context_add_main_entries(main_context, general_options, NULL); - g_option_context_set_summary(main_context, summary); - - if (!g_option_context_parse(main_context, argc, argvp, error)) - return PVATTEST_SUBC_INVALID; - if (print_version) - print_version_and_exit(); - - /* - * Parse depending on the specified command - */ - else if (g_strcmp0(argv[1], PVATTEST_SUBC_STR_CREATE) == 0) { - subc_context = - create_ctx(create_options, experimental_create_options, - "create [OPTIONS] - create an attestation measurement request", - create_summary); - subc = PVATTEST_SUBC_CREATE; - verify_options_fn = &verify_create; - } else if (g_strcmp0(argv[1], PVATTEST_SUBC_STR_PERFORM) == 0) { - subc_context = - create_ctx(perform_options, experimental_perform_options, - "perform [OPTIONS] - perform an attestation measurement request", - perform_summary); - subc = PVATTEST_SUBC_PERFORM; - verify_options_fn = &verify_perform; -#ifndef PVATTEST_COMPILE_PERFORM - g_set_error(error, PVATTEST_ERROR, PVATTEST_ERR_INV_ARG, - _("This system does not support the 'perform' command.")); - return PVATTEST_SUBC_INVALID; -#endif /* PVATTEST_COMPILE_PERFORM */ - } else if (g_strcmp0(argv[1], PVATTEST_SUBC_STR_VERIFY) == 0) { - subc_context = create_ctx(verify_options, NULL, - "verify [OPTIONS] - verify an attestation measurement", - verify_summary); - subc = PVATTEST_SUBC_VERIFY; - verify_options_fn = &verify_verify; - } else { - if (argv[1]) - g_set_error(error, PVATTEST_ERROR, PVATTEST_ERR_INV_ARGV, - _("Invalid command specified: %s."), argv[1]); - else - g_set_error(error, PVATTEST_ERROR, PVATTEST_ERR_INV_ARGV, - _("No command specified.")); - return PVATTEST_SUBC_INVALID; - } - g_assert(verify_options_fn); - - if (!g_option_context_parse(subc_context, argc, argvp, error)) - return PVATTEST_SUBC_INVALID; - - if (!verify_options_fn(error)) - return PVATTEST_SUBC_INVALID; - - *config = &pvattest_config; - return subc; -} - -static void pvattest_parse_clear_create_config(pvattest_create_config_t *config) -{ - if (!config) - return; - g_strfreev(config->host_key_document_paths); - g_strfreev(config->certificate_paths); - g_free(config->arp_key_out_path); - g_free(config->output_path); -} - -static void pvattest_parse_clear_perform_config(pvattest_perform_config_t *config) -{ - if (!config) - return; - g_free(config->input_path); - g_free(config->output_path); -} - -static void pvattest_parse_clear_verify_config(pvattest_verify_config_t *config) -{ - if (!config) - return; - g_free(config->input_path); - g_free(config->output_path); - g_free(config->hdr_path); - g_free(config->arp_key_in_path); -} - -void pvattest_parse_clear_config(pvattest_config_t *config) -{ - if (!config) - return; - pvattest_parse_clear_create_config(&config->create); - pvattest_parse_clear_perform_config(&config->perform); - pvattest_parse_clear_verify_config(&config->verify); -} diff -Nru s390-tools-2.31.0/pvattest/src/argparse.h s390-tools-2.33.1/pvattest/src/argparse.h --- s390-tools-2.31.0/pvattest/src/argparse.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/argparse.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,113 +0,0 @@ -/* - * Definitions used for parsing arguments. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef PVATTEST_ARGPARSE_H -#define PVATTEST_ARGPARSE_H -/* Must be included before any other header */ -#include "config.h" - -#include - -#include "libpv/glib-helper.h" -#include "libpv/macros.h" - -#define PVATTEST_SUBC_STR_CREATE "create" -#define PVATTEST_SUBC_STR_PERFORM "perform" -#define PVATTEST_SUBC_STR_VERIFY "verify" - -enum pvattest_command { - PVATTEST_SUBC_INVALID, - PVATTEST_SUBC_CREATE, - PVATTEST_SUBC_PERFORM, - PVATTEST_SUBC_VERIFY, -}; - -typedef struct { - int log_level; -} pvattest_general_config_t; - -typedef struct { - char **host_key_document_paths; - char **certificate_paths; - char **crl_paths; - char *root_ca_path; - - char *arp_key_out_path; - char *output_path; - - gboolean phkh_img; - gboolean phkh_att; - gboolean no_verify; - gboolean online; - - /* experimental flags */ - gboolean use_nonce; /* default TRUE */ - uint64_t paf; /* default 0 */ - int x_aad_size; /* default -1 -> ignore */ -} pvattest_create_config_t; - -typedef struct { - char *output_path; - char *input_path; - /* experimental flags */ - char *user_data_path; /* default NULL */ -} pvattest_perform_config_t; - -enum verify_output_format { - VERIFY_FMT_HUMAN, - VERIFY_FMT_YAML, -}; - -typedef struct { - char *input_path; - char *output_path; - enum verify_output_format output_fmt; - char *hdr_path; - char *arp_key_in_path; -} pvattest_verify_config_t; - -typedef struct { - pvattest_general_config_t general; - pvattest_create_config_t create; - pvattest_perform_config_t perform; - pvattest_verify_config_t verify; -} pvattest_config_t; - -/** - * pvattest_parse_clear_config: - * - * @config: struct to be cleared - * - * clears but not frees all config. - * all non config members such like char* will be freed. - */ -void pvattest_parse_clear_config(pvattest_config_t *config); - -/** - * pvattest_parse: - * - * @argc: ptr to argument count - * @argv: ptr to argument vector - * @config: output: ptr to parsed config. Target is statically allocated. - * You are responsible for freeing all non config ptrs. - * use #pvattest_parse_clear_config for that. - * - * Will not return if verbose or help parsed. - * - * Returns: selected command as enum - */ -enum pvattest_command pvattest_parse(int *argc, char **argvp[], pvattest_config_t **config, - GError **error) PV_NONNULL(1, 2, 3); - -#define PVATTEST_ERROR g_quark_from_static_string("pv-pvattest_error-quark") -typedef enum { - PVATTEST_ERR_INV_ARGV, - PVATTEST_ERR_INV_ARG, -} pv_pvattest_error_e; - -#endif /* PVATTEST_ARGPARSE_H */ diff -Nru s390-tools-2.31.0/pvattest/src/attestation.c s390-tools-2.33.1/pvattest/src/attestation.c --- s390-tools-2.31.0/pvattest/src/attestation.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/attestation.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,148 +0,0 @@ -/* - * Attestation related functions - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -/* Must be included before any other header */ -#include "config.h" - -#include "libpv/cert.h" -#include "libpv/hash.h" -#include "libpv/se-hdr.h" - -#include "exchange_format.h" -#include "attestation.h" - -G_STATIC_ASSERT(sizeof(((att_meas_ctx_t *)0)->pld) == sizeof(((struct pv_hdr_head *)0)->pld)); -G_STATIC_ASSERT(sizeof(((att_meas_ctx_t *)0)->ald) == sizeof(((struct pv_hdr_head *)0)->ald)); -G_STATIC_ASSERT(sizeof(((att_meas_ctx_t *)0)->tld) == sizeof(((struct pv_hdr_head *)0)->tld)); -G_STATIC_ASSERT(sizeof(((att_meas_ctx_t *)0)->tag) == sizeof(((struct pv_hdr *)0)->tag)); - -struct att_meas_sizes { - uint16_t user_data_len; - uint16_t zeros; - uint32_t additional_data_len; -} __packed; -G_STATIC_ASSERT(sizeof(struct att_meas_sizes) == 8); - -/* - * All optional arguments may be NULL - * user_data is up to 256 bytes long, or NULL. - * nonce is 16 bytes long or NULL. - * additional_data is up to 32768 bytes long or NULL. - */ -GBytes *att_gen_measurement_hmac_sha512(const att_meas_ctx_t *meas_ctx, GBytes *measurement_key, - GBytes *optional_user_data, GBytes *optional_nonce, - GBytes *optional_additional_data, GError **error) -{ - struct att_meas_sizes meas_sizes = {}; - g_autoptr(HMAC_CTX) hmac_ctx = NULL; - size_t additional_data_size = 0; - size_t user_data_size = 0; - size_t nonce_size = 0; - - pv_wrapped_g_assert(meas_ctx); - pv_wrapped_g_assert(measurement_key); - - if (optional_user_data) - user_data_size = g_bytes_get_size(optional_user_data); - if (optional_additional_data) - additional_data_size = g_bytes_get_size(optional_additional_data); - if (optional_nonce) - nonce_size = g_bytes_get_size(optional_nonce); - - /* checks for these sizes resulting in GErrors are done before */ - g_assert(user_data_size <= PVATTEST_USER_DATA_MAX_SIZE); - g_assert(additional_data_size <= PVATTEST_ADDITIONAL_MAX_SIZE); - g_assert(nonce_size == 0 || nonce_size == ARCB_V1_NONCE_SIZE); - - pv_wrapped_g_assert(meas_ctx); - pv_wrapped_g_assert(measurement_key); - - hmac_ctx = pv_hmac_ctx_new(measurement_key, EVP_sha512(), error); - if (!hmac_ctx) - return NULL; - - meas_sizes.user_data_len = GUINT16_TO_BE((uint16_t)user_data_size); - meas_sizes.zeros = 0; - meas_sizes.additional_data_len = GUINT32_TO_BE((uint32_t)additional_data_size); - - if (pv_hmac_ctx_update_raw(hmac_ctx, meas_ctx, sizeof(*meas_ctx), error) != 0) - return NULL; - - /* add the sizes of user and additional data. */ - if (pv_hmac_ctx_update_raw(hmac_ctx, &meas_sizes, sizeof(meas_sizes), error)) - return NULL; - - /* update optional data. if NULL passed (or size = 0) nothing will happen to the HMAC_CTX */ - if (pv_hmac_ctx_update(hmac_ctx, optional_user_data, error) != 0) - return NULL; - if (pv_hmac_ctx_update(hmac_ctx, optional_nonce, error) != 0) - return NULL; - if (pv_hmac_ctx_update(hmac_ctx, optional_additional_data, error) != 0) - return NULL; - return pv_hamc_ctx_finalize(hmac_ctx, error); -} - -att_meas_ctx_t *att_extract_from_hdr(GBytes *se_hdr, GError **error) -{ - g_autofree att_meas_ctx_t *meas = NULL; - const struct pv_hdr *hdr = NULL; - size_t se_hdr_tag_offset; - size_t se_hdr_size; - uint8_t *hdr_u8; - - pv_wrapped_g_assert(se_hdr); - - hdr = g_bytes_get_data(se_hdr, &se_hdr_size); - hdr_u8 = (uint8_t *)hdr; - - if (se_hdr_size < PV_V1_PV_HDR_MIN_SIZE) { - g_set_error(error, ATT_ERROR, ATT_ERR_INVALID_HDR, - _("Invalid SE header provided.")); - return NULL; - } - - if (GUINT32_FROM_BE(hdr->head.phs) != se_hdr_size || - GUINT64_FROM_BE(hdr->head.magic) != PV_MAGIC_NUMBER) { - g_set_error(error, ATT_ERROR, ATT_ERR_INVALID_HDR, - _("Invalid SE header provided.")); - return NULL; - } - - se_hdr_tag_offset = GUINT32_FROM_BE(hdr->head.phs) - sizeof(hdr->tag); - meas = g_new0(att_meas_ctx_t, 1); - - memcpy(meas->pld, hdr->head.pld, sizeof(meas->pld)); - memcpy(meas->ald, hdr->head.ald, sizeof(meas->ald)); - memcpy(meas->tld, hdr->head.tld, sizeof(meas->tld)); - memcpy(meas->tag, hdr_u8 + se_hdr_tag_offset, sizeof(meas->tag)); - - return g_steal_pointer(&meas); -} - -void att_add_uid(att_meas_ctx_t *meas_ctx, GBytes *config_uid) -{ - pv_wrapped_g_assert(meas_ctx); - pv_wrapped_g_assert(config_uid); - - g_assert(g_bytes_get_size(config_uid) == ATT_CONFIG_UID_SIZE); - pv_gbytes_memcpy(meas_ctx->config_uid, ATT_CONFIG_UID_SIZE, config_uid, NULL); -} - -gboolean att_verify_measurement(const GBytes *calculated_measurement, - const GBytes *uvio_measurement, GError **error) -{ - pv_wrapped_g_assert(calculated_measurement); - pv_wrapped_g_assert(uvio_measurement); - - if (g_bytes_compare(calculated_measurement, uvio_measurement) != 0) { - g_set_error(error, ATT_ERROR, ATT_ERR_MEASUREMENT_VERIFICATION_FAILED, - _("Calculated and received attestation measurement are not equal.")); - return FALSE; - } - return TRUE; -} diff -Nru s390-tools-2.31.0/pvattest/src/attestation.h s390-tools-2.33.1/pvattest/src/attestation.h --- s390-tools-2.31.0/pvattest/src/attestation.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/attestation.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,99 +0,0 @@ -/* - * Attestation related functions and definitions. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef PVATTEST_ATTESTATION_H -#define PVATTEST_ATTESTATION_H -/* Must be included before any other header */ -#include "config.h" - -#include - -#include "libpv/glib-helper.h" -#include "libpv/crypto.h" -#include "libpv/cert.h" - -#include "common.h" -#include "types.h" -#include "arcb.h" - -#define ATT_CONFIG_UID_SIZE 16 - -typedef struct { - uint8_t pld[SHA512_DIGEST_LENGTH]; - uint8_t ald[SHA512_DIGEST_LENGTH]; - uint8_t tld[SHA512_DIGEST_LENGTH]; - uint8_t tag[AES_256_GCM_TAG_SIZE]; - uint8_t config_uid[ATT_CONFIG_UID_SIZE]; -} __packed att_meas_ctx_t; -G_STATIC_ASSERT(sizeof(att_meas_ctx_t) == 224); - -/** - * att_gen_measurement_hmac_sha512: - * - * @meas_ctx: measurement context. - * @measurement_key: AES-256-GCM key for generating the measurement calculation. - * @optional_user_data: NULL or up to 256 bytes GBytes. - * @optional_nonce: NULL or a nonce of exactly `ARCB_V1_NONCE_SIZE` bytes - * @optional_additional_data: NULL or up to 0x8000 bytes of GBytes. - * @error: GError. *error will != NULL if error occurs. - * - * Calculates the measurement value. - * If the input data is the same which the UV used in the Retrieve Attestation Measurement - * the result should be identical to the data in the ´Measurement Data Address´ UVC. - * - * Returns: (nullable) (transfer full): a hmac_sha512 of the given data. - */ -GBytes *att_gen_measurement_hmac_sha512(const att_meas_ctx_t *meas_ctx, GBytes *measurement_key, - GBytes *optional_user_data, GBytes *optional_nonce, - GBytes *optional_additional_data, GError **error) - PV_NONNULL(1, 2); - -/** - * att_extract_from_hdr: - * - * @se_hdr: binary SE guest header. - * @error: GError. *error will != NULL if error occurs. - * - * Verifies that SE header size and magic ,but no cryptographical verification. - * Then, find and extracts pld, ald, tld, and SE tagi and adds it to the context. - * - * Returns: new attestation measurement context. - */ -att_meas_ctx_t *att_extract_from_hdr(GBytes *se_hdr, GError **error) PV_NONNULL(1); - -/** att_add_uid: - * - * @meas_ctx: measurement context. - * @config_uid: pointer to config UID. Must be `ATT_CONFIG_UID_SIZE` bytes long. - * - * Copies the config UID to the measurement context. - * Wrong size is considered as a Programming error. - */ -void att_add_uid(att_meas_ctx_t *meas_ctx, GBytes *config_uid) PV_NONNULL(1, 2); - -/** att_verify_measurement: - * - * @calculated_measurement: measurement calculated by a trusted system - * @uvio_measurement: measurement generated by an UV - * @error: GError. *error will != NULL if error occurs. - * - * Returns: TRUE if measurements are identical, otherwise FALSE - */ -gboolean att_verify_measurement(const GBytes *calculated_measurement, - const GBytes *uvio_measurement, GError **error) PV_NONNULL(1, 2); - -#define ATT_ERROR g_quark_from_static_string("pv-att_error-quark") -typedef enum att_error { - ATT_ERR_INVALID_HDR, - ATT_ERR_INVALID_USER_DATA, - ATT_ERR_MEASUREMENT_VERIFICATION_FAILED, - ATT_ERR_PHKH_NO_FIT_IN_USER_DATA, - ATT_ERR_PHKH_NO_MATCH, -} att_error_e; - -#endif /* PVATTEST_ATTESTATION_H */ diff -Nru s390-tools-2.31.0/pvattest/src/common.c s390-tools-2.33.1/pvattest/src/common.c --- s390-tools-2.31.0/pvattest/src/common.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/common.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,47 +0,0 @@ -/* - * Common functions for pvattest. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -/* Must be included before any other header */ -#include "config.h" - -#include -#include - -#include "libpv/glib-helper.h" - -#include "types.h" -#include "common.h" - -gboolean wrapped_g_file_set_content(const char *filename, GBytes *bytes, mode_t mode, - GError **error) -{ - const void *data; - size_t size; - gboolean rc; - - data = g_bytes_get_data(bytes, &size); - rc = g_file_set_contents(filename, data, (ssize_t)size, error); - if (rc && mode != 0666) - chmod(filename, mode); - return rc; -} - -GBytes *secure_gbytes_concat(GBytes *lh, GBytes *rh) -{ - g_autoptr(GByteArray) lha = NULL; - - if (!lh && !rh) - return NULL; - if (!lh) - return g_bytes_ref(rh); - if (!rh) - return g_bytes_ref(lh); - lha = g_bytes_unref_to_array(g_bytes_ref(lh)); - g_byte_array_append(lha, g_bytes_get_data(rh, NULL), (guint)g_bytes_get_size(rh)); - return pv_sec_gbytes_new(lha->data, lha->len); -} diff -Nru s390-tools-2.31.0/pvattest/src/common.h s390-tools-2.33.1/pvattest/src/common.h --- s390-tools-2.31.0/pvattest/src/common.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/common.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ -/* - * Common functions for pvattest. - * - * IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef PVATTEST_COMMON_H -#define PVATTEST_COMMON_H -/* Must be included before any other header */ -#include "config.h" - -#include - -#include "libpv/glib-helper.h" -#include -#include "libpv/macros.h" -#include "lib/zt_common.h" - -#include "types.h" - -#define COPYRIGHT_NOTICE "Copyright IBM Corp. 2022" - -#define AES_256_GCM_TAG_SIZE 16 - -gboolean wrapped_g_file_set_content(const char *filename, GBytes *bytes, mode_t mode, - GError **error); - -/** - * just ref's up if one of them is NULL. - * If both NULL returns NULL. - * Otherwise returns lh ++ rh - */ -GBytes *secure_gbytes_concat(GBytes *lh, GBytes *rh); - -#endif /* PVATTEST_COMMON_H */ diff -Nru s390-tools-2.31.0/pvattest/src/config.h s390-tools-2.33.1/pvattest/src/config.h --- s390-tools-2.31.0/pvattest/src/config.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/config.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -/* - * Config file. - * Must be include before any other header. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - * - */ -#ifndef PVATTEST_CONFIG_H -#define PVATTEST_CONFIG_H -#define GETTEXT_PACKAGE "pvattest" - -#ifdef __GNUC__ -#ifdef __s390x__ -#ifndef PVATTEST_NO_PERFORM -#define PVATTEST_COMPILE_PERFORM -#endif -#endif -#endif - -#ifdef __clang__ -#ifdef __zarch__ -#ifndef PVATTEST_NO_PERFORM -#define PVATTEST_COMPILE_PERFORM -#endif -#endif -#endif - -#endif /* PVATTEST_CONFIG_H */ diff -Nru s390-tools-2.31.0/pvattest/src/exchange_format.c s390-tools-2.33.1/pvattest/src/exchange_format.c --- s390-tools-2.31.0/pvattest/src/exchange_format.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/exchange_format.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,479 +0,0 @@ -/* - * Functions for the pvattest exchange format to send attestation requests and responses between - * machines . - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -/* Must be included before any other header */ -#include "config.h" - -#include -#include -#include -#include -#include -#include - -#include "common.h" - -#include "exchange_format.h" -#include "log.h" - -struct exchange_shared_hdr { - be64_t magic; - be32_t version; - be32_t size; -} __packed; - -/* - * If size == 0 - * offset ignored. - * (part does not exist) - * if offset >0 and <0x50 -> invalid format - * if offset == 0 and size > 0 no data saved, however the request will need this amount of memory to - * succeed. - * Only makes sense for measurement and additional data. This however, is not enforced. - */ -struct entry { - be32_t size; - be32_t offset; -} __packed; -G_STATIC_ASSERT(sizeof(struct entry) == 8); - -struct _exchange_format_v1_hdr { - be64_t magic; - be32_t version; - be32_t size; - uint64_t reserved; - struct entry serialized_arcb; - struct entry measurement; - struct entry additional_data; - struct entry user_data; - struct entry config_uid; -} __packed; -G_STATIC_ASSERT(sizeof(exchange_format_v1_hdr_t) == 0x40); - -struct _exchange_format_ctx { - uint32_t version; - uint32_t req_meas_size; - uint32_t req_add_size; - GBytes *serialized_arcb; - GBytes *measurement; - GBytes *additional_data; - GBytes *user_data; - GBytes *config_uid; -}; - -/* Use a byte array to avoid any byteorder issues while checking */ - -#define PVATTEST_EXCHANGE_MAGIC 0x7076617474657374 /* pvattest */ -static const uint8_t exchange_magic[] = { 0x70, 0x76, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74 }; - -exchange_format_ctx_t *exchange_ctx_new(uint32_t version, GBytes *serialized_arcb, - uint32_t req_measurement_size, uint32_t req_additional_size, - GError **error) -{ - g_autoptr(exchange_format_ctx_t) ctx = NULL; - - pv_wrapped_g_assert(serialized_arcb); - - if (version != PVATTEST_EXCHANGE_VERSION_1_00) { - g_set_error(error, EXCHANGE_FORMAT_ERROR, EXCHANGE_FORMAT_ERROR_UNSUPPORTED_VERSION, - _("'%d' unsupported version."), version); - return NULL; - } - - ctx = g_malloc0(sizeof(*ctx)); - ctx->version = version; - - exchange_set_serialized_arcb(ctx, serialized_arcb); - ctx->req_meas_size = req_measurement_size; - ctx->req_add_size = req_additional_size; - - return g_steal_pointer(&ctx); -} - -static GBytes *get_content(GBytes *file_content, const struct entry *entry, const size_t max_size, - GError **error) -{ - uint64_t size = GUINT32_FROM_BE(entry->size); - uint64_t offset = GUINT32_FROM_BE(entry->offset); - size_t file_size = 0; - const uint8_t *file_content_u8 = g_bytes_get_data(file_content, &file_size); - - if (size == 0 || offset == 0) - return NULL; - - if (offset < sizeof(exchange_format_v1_hdr_t) || offset + size > file_size || - size > max_size) { - g_set_error(error, EXCHANGE_FORMAT_ERROR, EXCHANGE_FORMAT_ERROR_INVALID_FORMAT, - _("Input file is not in a valid format.")); - return NULL; - } - return g_bytes_new(file_content_u8 + offset, size); -} - -static gboolean check_format(const struct exchange_shared_hdr *hdr) -{ - if (memcmp(exchange_magic, &hdr->magic, sizeof(exchange_magic)) == 0) - return TRUE; - return FALSE; -} - -exchange_format_ctx_t *exchange_ctx_from_file(const char *filename, GError **error) -{ - g_autoptr(exchange_format_ctx_t) ctx = g_malloc0(sizeof(*ctx)); - const struct exchange_shared_hdr *hdr = NULL; - const exchange_format_v1_hdr_t *hdr_v1 = NULL; - g_autoptr(GBytes) file_content = NULL; - size_t config_uid_size = 0; - size_t file_size; - - pv_wrapped_g_assert(filename); - - file_content = pv_file_get_content_as_g_bytes(filename, error); - if (!file_content) - return NULL; - hdr = (const struct exchange_shared_hdr *)g_bytes_get_data(file_content, &file_size); - - if (file_size < sizeof(*hdr) || !check_format(hdr)) { - g_set_error(error, EXCHANGE_FORMAT_ERROR, EXCHANGE_FORMAT_ERROR_INVALID_FORMAT, - _("'%s' is not in a valid format."), filename); - return NULL; - } - - if (GUINT32_FROM_BE(hdr->version) != PVATTEST_EXCHANGE_VERSION_1_00) { - g_set_error(error, EXCHANGE_FORMAT_ERROR, EXCHANGE_FORMAT_ERROR_INVALID_FORMAT, - _("The version (%#x) of '%s' is not supported"), - GUINT32_FROM_BE(hdr->version), filename); - return NULL; - } - - /* get the header */ - if (file_size < sizeof(exchange_format_v1_hdr_t)) { - g_set_error(error, EXCHANGE_FORMAT_ERROR, EXCHANGE_FORMAT_ERROR_INVALID_FORMAT, - _("'%s' is not in a valid format."), filename); - return NULL; - } - hdr_v1 = (const exchange_format_v1_hdr_t *)hdr; - - /* get entries if present */ - ctx->serialized_arcb = - get_content(file_content, &hdr_v1->serialized_arcb, PVATTEST_ARCB_MAX_SIZE, error); - if (*error) - return NULL; - ctx->measurement = get_content(file_content, &hdr_v1->measurement, - PVATTEST_MEASUREMENT_MAX_SIZE, error); - if (*error) - return NULL; - ctx->additional_data = get_content(file_content, &hdr_v1->additional_data, - PVATTEST_ADDITIONAL_MAX_SIZE, error); - if (*error) - return NULL; - ctx->user_data = - get_content(file_content, &hdr_v1->user_data, PVATTEST_USER_DATA_MAX_SIZE, error); - if (*error) - return NULL; - ctx->config_uid = get_content(file_content, &hdr_v1->config_uid, PVATTEST_UID_SIZE, error); - if (*error) - return NULL; - - if (ctx->config_uid) - config_uid_size = g_bytes_get_size(ctx->config_uid); - - if (config_uid_size != PVATTEST_UID_SIZE && config_uid_size != 0) { - g_set_error(error, EXCHANGE_FORMAT_ERROR, EXCHANGE_FORMAT_ERROR_INVALID_FORMAT, - _("'%s' is not in a valid format."), filename); - return NULL; - } - ctx->req_meas_size = GUINT32_FROM_BE(hdr_v1->measurement.size); - ctx->req_add_size = GUINT32_FROM_BE(hdr_v1->additional_data.size); - ctx->version = GUINT32_FROM_BE(hdr->version); - - return g_steal_pointer(&ctx); -} - -void clear_free_exchange_ctx(exchange_format_ctx_t *ctx) -{ - if (!ctx) - return; - - if (ctx->serialized_arcb) - g_bytes_unref(ctx->serialized_arcb); - if (ctx->measurement) - g_bytes_unref(ctx->measurement); - if (ctx->additional_data) - g_bytes_unref(ctx->additional_data); - if (ctx->user_data) - g_bytes_unref(ctx->user_data); - if (ctx->config_uid) - g_bytes_unref(ctx->config_uid); - - g_free(ctx); -} - -void exchange_set_serialized_arcb(exchange_format_ctx_t *ctx, GBytes *serialized_arcb) -{ - pv_wrapped_g_assert(ctx); - pv_wrapped_g_assert(serialized_arcb); - - g_bytes_ref(serialized_arcb); - g_bytes_unref(ctx->serialized_arcb); - ctx->serialized_arcb = serialized_arcb; -} - -void exchange_set_measurement(exchange_format_ctx_t *ctx, GBytes *measurement) -{ - pv_wrapped_g_assert(ctx); - pv_wrapped_g_assert(measurement); - - g_bytes_ref(measurement); - g_bytes_unref(ctx->measurement); - ctx->measurement = measurement; -} - -void exchange_set_additional_data(exchange_format_ctx_t *ctx, GBytes *additional_data) -{ - pv_wrapped_g_assert(ctx); - pv_wrapped_g_assert(additional_data); - - g_bytes_ref(additional_data); - g_bytes_unref(ctx->additional_data); - ctx->additional_data = additional_data; -} - -void exchange_set_user_data(exchange_format_ctx_t *ctx, GBytes *user_data) -{ - pv_wrapped_g_assert(ctx); - pv_wrapped_g_assert(user_data); - - g_bytes_ref(user_data); - g_bytes_unref(ctx->user_data); - ctx->user_data = user_data; -} - -void exchange_set_config_uid(exchange_format_ctx_t *ctx, GBytes *config_uid) -{ - pv_wrapped_g_assert(ctx); - pv_wrapped_g_assert(config_uid); - - g_bytes_ref(config_uid); - g_bytes_unref(ctx->config_uid); - ctx->config_uid = config_uid; -} - -static GBytes *gbytes_ref0(GBytes *bytes) -{ - if (!bytes) - return NULL; - return g_bytes_ref(bytes); -} - -GBytes *exchange_get_serialized_arcb(const exchange_format_ctx_t *ctx) -{ - pv_wrapped_g_assert(ctx); - - return gbytes_ref0(ctx->serialized_arcb); -} - -GBytes *exchange_get_measurement(const exchange_format_ctx_t *ctx) -{ - pv_wrapped_g_assert(ctx); - - return gbytes_ref0(ctx->measurement); -} - -GBytes *exchange_get_additional_data(const exchange_format_ctx_t *ctx) -{ - pv_wrapped_g_assert(ctx); - - return gbytes_ref0(ctx->additional_data); -} - -GBytes *exchange_get_user_data(const exchange_format_ctx_t *ctx) -{ - pv_wrapped_g_assert(ctx); - - return gbytes_ref0(ctx->user_data); -} - -GBytes *exchange_get_config_uid(const exchange_format_ctx_t *ctx) -{ - pv_wrapped_g_assert(ctx); - - return gbytes_ref0(ctx->config_uid); -} - -uint32_t exchange_get_requested_measurement_size(const exchange_format_ctx_t *ctx) -{ - pv_wrapped_g_assert(ctx); - - return ctx->req_meas_size; -} - -uint32_t exchange_get_requested_additional_data_size(const exchange_format_ctx_t *ctx) -{ - pv_wrapped_g_assert(ctx); - - return ctx->req_add_size; -} - -static struct entry add_g_bytes(GBytes *bytes, FILE *file, GError **error) -{ - struct entry result = {}; - long offset; - size_t size; - const void *data = g_bytes_get_data(bytes, &size); - - g_assert(size <= G_MAXUINT32); - - offset = pv_file_tell(file, error); - g_assert(offset <= G_MAXUINT32); - if (offset < 0) - return result; - - result.offset = GUINT32_TO_BE((uint32_t)offset); - result.size = GUINT32_TO_BE((uint32_t)size); - pv_file_write(file, data, size, error); - return result; -} - -int exchange_write_to_file(const exchange_format_ctx_t *ctx, const char *filename, GError **error) -{ - exchange_format_v1_hdr_t hdr = { - .magic = GUINT64_TO_BE(PVATTEST_EXCHANGE_MAGIC), - .version = GUINT32_TO_BE(ctx->version), - }; - size_t file_size = sizeof(hdr); - g_autoptr(FILE) file = NULL; - struct stat file_stat; - long actual_file_size; - size_t tmp_size; - - pv_wrapped_g_assert(ctx); - pv_wrapped_g_assert(filename); - - file = pv_file_open(filename, "w", error); - if (!file) - return -1; - - if (fstat(fileno(file), &file_stat) != 0 || !S_ISREG(file_stat.st_mode)) { - g_set_error(error, EXCHANGE_FORMAT_ERROR, - EXCHANGE_FORMAT_ERROR_UNSUPPORTED_FILE_TYPE, - "Only regular files are supported: '%s'", filename); - return -1; - } - - if (pv_file_seek(file, sizeof(exchange_format_v1_hdr_t), SEEK_SET, error)) - return -1; - - if (ctx->serialized_arcb) { - hdr.serialized_arcb = add_g_bytes(ctx->serialized_arcb, file, error); - if (*error) - return -1; - file_size += g_bytes_get_size(ctx->serialized_arcb); - } - if (ctx->measurement) { - hdr.measurement = add_g_bytes(ctx->measurement, file, error); - if (*error) - return -1; - file_size += g_bytes_get_size(ctx->measurement); - } else { - hdr.measurement.size = GUINT32_TO_BE(ctx->req_meas_size); - } - - if (ctx->additional_data) { - hdr.additional_data = add_g_bytes(ctx->additional_data, file, error); - if (*error) - return -1; - file_size += g_bytes_get_size(ctx->additional_data); - } else { - hdr.additional_data.size = GUINT32_TO_BE(ctx->req_add_size); - } - - if (ctx->user_data) { - tmp_size = g_bytes_get_size(ctx->user_data); - g_assert(tmp_size <= PVATTEST_USER_DATA_MAX_SIZE); - tmp_size = MIN(tmp_size, PVATTEST_USER_DATA_MAX_SIZE); /* should be a noop */ - hdr.user_data = add_g_bytes(ctx->user_data, file, error); - if (*error) - return -1; - file_size += g_bytes_get_size(ctx->user_data); - } - if (ctx->config_uid) { - tmp_size = g_bytes_get_size(ctx->config_uid); - g_assert(tmp_size == PVATTEST_UID_SIZE); - tmp_size = MIN(tmp_size, PVATTEST_UID_SIZE); /* should be a noop */ - hdr.config_uid = add_g_bytes(ctx->config_uid, file, error); - if (*error) - return -1; - file_size += g_bytes_get_size(ctx->config_uid); - } - - /* - * This case should never happen. It could be seen as a programming error as: - * ARCB is restricted by kernel (and this tool) to be max 1M, Additional+meas to max 8pages - * userdata to 256B and config uid to 16b this is way less than 4G. - * - * However, lets be conservative and trow an error instead of an assertion. - */ - if (file_size > UINT32_MAX) { - g_set_error( - error, EXCHANGE_FORMAT_ERROR, EXCHANGE_FORMAT_ERROR_INVALID_FORMAT, - "The exchange file format cannot handle this much data in one blob. (%#lx bytes)", - file_size); - return -1; - } - hdr.size = GUINT32_TO_BE((uint32_t)file_size); - if (pv_file_seek(file, 0, SEEK_SET, error) != 0) - return -1; - if (sizeof(hdr) != pv_file_write(file, &hdr, sizeof(hdr), error)) - return -1; - if (pv_file_seek(file, 0, SEEK_END, error) != 0) - return -1; - actual_file_size = pv_file_tell(file, error); - if (actual_file_size < 0) - return -1; - if (actual_file_size != (uint32_t)file_size) { - g_set_error( - error, EXCHANGE_FORMAT_ERROR, EXCHANGE_FORMAT_ERROR_WRITE, - "The exchange file size doesn't match the expectations: %ld bytes != %lu bytes", - actual_file_size, file_size); - return -1; - } - - return 0; -} - -static void print_entry(const char *name, GBytes *data, const gboolean print_data, FILE *stream) -{ - if (!data) - return; - fprintf(stream, _("%s (%#lx bytes)"), name, g_bytes_get_size(data)); - if (print_data) { - fprintf(stream, ":\n"); - pvattest_hexdump(stream, data, 16, " ", TRUE); - } - fprintf(stream, "\n"); -} - -void exchange_info_print(const exchange_format_ctx_t *ctx, const gboolean print_data, FILE *stream) -{ - pv_wrapped_g_assert(ctx); - pv_wrapped_g_assert(stream); - - fprintf(stream, _("Version: %#x\n"), ctx->version); - fprintf(stream, _("Sections:\n")); - print_entry(_(" ARCB"), ctx->serialized_arcb, print_data, stream); - print_entry(_(" Measurement"), ctx->measurement, print_data, stream); - print_entry(_(" Additional Data"), ctx->additional_data, print_data, stream); - print_entry(_(" User Data"), ctx->user_data, print_data, stream); - print_entry(_(" Config UID"), ctx->config_uid, print_data, stream); - if (!ctx->measurement) - fprintf(stream, _("Required measurement size: %#x\n"), ctx->req_meas_size); - if (!ctx->additional_data) - fprintf(stream, _("Required additional data size: %#x\n"), ctx->req_add_size); -} diff -Nru s390-tools-2.31.0/pvattest/src/exchange_format.h s390-tools-2.33.1/pvattest/src/exchange_format.h --- s390-tools-2.31.0/pvattest/src/exchange_format.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/exchange_format.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,166 +0,0 @@ -/* - * Definitions for the pvattest exchange format to send attestation requests and responses between - * machines. The "exchange format" is a simple file format to send labeled binary blobs between - * pvattest instances on different machines. All sizes, etc are in big endian. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef PVATTEST_EXCHANGE_FORMAT_H -#define PVATTEST_EXCHANGE_FORMAT_H -/* Must be included before any other header */ -#include "config.h" - -#include "libpv/glib-helper.h" - -#include "types.h" -#include "common.h" - -/* Similar to linux/arch/s390x/include/uapi/uvdevice.h as this part needs to be - * architecture independent. - */ -#define PVATTEST_UID_SIZE 0x10UL -#define PVATTEST_USER_DATA_MAX_SIZE 0x100UL -#define PVATTEST_ARCB_MAX_SIZE 0x100000 -#define PVATTEST_MEASUREMENT_MAX_SIZE 0x8000 -#define PVATTEST_ADDITIONAL_MAX_SIZE 0x8000 - -#define PVATTEST_EXCHANGE_V_INVALID 0 -#define PVATTEST_EXCHANGE_VERSION_1_00 0x0100 - -typedef struct _exchange_format_v1_hdr exchange_format_v1_hdr_t; -typedef struct _exchange_format_ctx exchange_format_ctx_t; - -/** - * exchange_ctx_new: - * - * @version: Format version. Currently, only version 1 supported. - * @serialized_arcb: ARCB as #GBytes - * @req_measurement_size: Measurement size the given ARCB needs. - * @req_measurement_size: Additional Data size the given ARCB needs. - * @error: GError. *error will != NULL if error occurs. - * - * Returns: (nullable) (transfer full): new, empty exchange format context - * - */ -exchange_format_ctx_t *exchange_ctx_new(const uint32_t version, GBytes *serialized_arcb, - const uint32_t req_measurement_size, - const uint32_t req_additional_size, GError **error) - PV_NONNULL(2); - -/** - * exchange_ctx_from_file: - * - * @filename: name of the file to be loaded - * @error: GError. *error will != NULL if error occurs. - * - * Loads all blobs from file and caches them in the context structure. - * - * Returns: (nullable) (transfer full): exchange format context filled with data from file - * - */ -exchange_format_ctx_t *exchange_ctx_from_file(const char *filename, GError **error) PV_NONNULL(1); -void clear_free_exchange_ctx(exchange_format_ctx_t *ctx); -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(exchange_format_ctx_t, clear_free_exchange_ctx) - -/** - * exchange_set_serialized_arcb: - * - * @ctx: exchange format context - * @serialized_arcb: blob to add. - * - * Adds blob to the exchange format. Unreferences old data if already set. - */ -void exchange_set_serialized_arcb(exchange_format_ctx_t *ctx, GBytes *serialized_arcb) - PV_NONNULL(1, 2); - -/** - * exchange_set_measurement: - * - * @ctx: exchange format context - * @measurement: blob to add. - * - * Adds blob to the exchange format. Unreferences old data if already set. - */ -void exchange_set_measurement(exchange_format_ctx_t *ctx, GBytes *measurement) PV_NONNULL(1, 2); - -/** - * exchange_set_additional_data: - * - * @ctx: exchange format context - * @additional_data: blob to add. - * - * Adds blob to the exchange format. Unreferences old data if already set. - */ -void exchange_set_additional_data(exchange_format_ctx_t *ctx, GBytes *additional_data) - PV_NONNULL(1, 2); - -/** - * exchange_set_user_data: - * - * @ctx: exchange format context - * @user_data: blob to add. - * - * Adds blob to the exchange format. Unreferences old data if already set. - */ -void exchange_set_user_data(exchange_format_ctx_t *ctx, GBytes *user_data) PV_NONNULL(1, 2); - -/** - * exchange_set_config_uid: - * - * @ctx: exchange format context - * @config_uid: blob to add. - * - * Adds blob to the exchange format. Unreferences old data if already set. - */ -void exchange_set_config_uid(exchange_format_ctx_t *ctx, GBytes *config_uid) PV_NONNULL(1, 2); - -GBytes *exchange_get_serialized_arcb(const exchange_format_ctx_t *ctx) PV_NONNULL(1); -GBytes *exchange_get_measurement(const exchange_format_ctx_t *ctx) PV_NONNULL(1); -GBytes *exchange_get_additional_data(const exchange_format_ctx_t *ctx) PV_NONNULL(1); -GBytes *exchange_get_user_data(const exchange_format_ctx_t *ctx) PV_NONNULL(1); -GBytes *exchange_get_config_uid(const exchange_format_ctx_t *ctx) PV_NONNULL(1); - -uint32_t exchange_get_requested_measurement_size(const exchange_format_ctx_t *ctx) PV_NONNULL(1); -uint32_t exchange_get_requested_additional_data_size(const exchange_format_ctx_t *ctx) - PV_NONNULL(1); - -/** - * exchange_write_to_file: - * - * @ctx: exchange format context - * @filename: name of the file to be loaded - * @error: GError. *error will != NULL if error occours. - * - * Takes all Data in the context and writes them into a file. - * Places the exchange format header before the data. - * - * Returns: 0 in case of success, -1 otherwise. - */ -int exchange_write_to_file(const exchange_format_ctx_t *ctx, const char *filename, GError **error) - PV_NONNULL(1, 2); - -/** - * exchange_info_print: - * - * @ctx: exchange format context - * @print_data: TRUE: print present data + label names - * FALSE: just print label names of present data - * @stream: FILE* stream to print data - * - * Prints the content of @ctx to @stream. - */ -void exchange_info_print(const exchange_format_ctx_t *ctx, const gboolean print_data, FILE *stream) - PV_NONNULL(1, 3); - -#define EXCHANGE_FORMAT_ERROR g_quark_from_static_string("pv-exchange-format_error-quark") -typedef enum { - EXCHANGE_FORMAT_ERROR_INVALID_FORMAT, - EXCHANGE_FORMAT_ERROR_UNSUPPORTED_VERSION, - EXCHANGE_FORMAT_ERROR_WRITE, - EXCHANGE_FORMAT_ERROR_UNSUPPORTED_FILE_TYPE, -} exchange_error_e; - -#endif /* PVATTEST_EXCHANGE_FORMAT_H */ diff -Nru s390-tools-2.31.0/pvattest/src/.gitignore s390-tools-2.33.1/pvattest/src/.gitignore --- s390-tools-2.31.0/pvattest/src/.gitignore 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/.gitignore 1970-01-01 01:00:00.000000000 +0100 @@ -1,2 +0,0 @@ -.check-dep-pvattest -.detect-openssl.dep.c diff -Nru s390-tools-2.31.0/pvattest/src/log.c s390-tools-2.33.1/pvattest/src/log.c --- s390-tools-2.31.0/pvattest/src/log.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/log.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,213 +0,0 @@ -/* - * Functions used for logging. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -/* Must be included before any other header */ -#include "config.h" - -#include -#include - -#include "types.h" -#include "common.h" -#include "log.h" - -void pvattest_log_increase_log_lvl(int *log_lvl) -{ - if (*log_lvl >= PVATTEST_LOG_LVL_MAX) - return; - *log_lvl = *log_lvl << 1; -} - -void pvattest_log_error(const char *format, ...) -{ - va_list argp; - - va_start(argp, format); - g_logv(NULL, PVATTEST_LOG_LVL_ERROR, format, argp); - va_end(argp); -} - -void pvattest_log_warning(const char *format, ...) -{ - va_list argp; - - va_start(argp, format); - g_logv(NULL, PVATTEST_LOG_LVL_WARNING, format, argp); - va_end(argp); -} - -void pvattest_log_info(const char *format, ...) -{ - va_list argp; - - va_start(argp, format); - g_logv(NULL, PVATTEST_LOG_LVL_INFO, format, argp); - va_end(argp); -} - -void pvattest_log_debug(const char *format, ...) -{ - va_list argp; - - va_start(argp, format); - g_logv(NULL, PVATTEST_LOG_LVL_DEBUG, format, argp); - va_end(argp); -} - -static void _log_print(FILE *stream, const char *prefix, const char *message, const char *postfix) -{ - g_autofree char *prefix_empty = NULL, *new_msg = NULL; - size_t prefix_len = strlen(prefix); - char **message_v; - - if (!prefix || prefix_len == 0) { - printf("%s%s", message, postfix); - return; - } - - message_v = g_strsplit(message, "\n", 0); - prefix_empty = g_malloc0(prefix_len + 2); - - snprintf(prefix_empty, prefix_len + 2, "\n%*c\b", (int)prefix_len, ' '); - new_msg = g_strjoinv(prefix_empty, message_v); - - fprintf(stream, "%s%s%s", prefix, new_msg, postfix); - - g_strfreev(message_v); -} - -static void _log_logger(GLogLevelFlags level, const char *message, int log_level, - gboolean use_prefix, const char *postfix) -{ - const char *prefix = ""; - - /* filter out messages depending on debugging level */ - if ((level & PVATTEST_LOG_LVL_DEBUG) && log_level < PVATTEST_LOG_LVL_DEBUG) - return; - - if ((level & PVATTEST_LOG_LVL_INFO) && log_level < PVATTEST_LOG_LVL_INFO) - return; - - if (use_prefix && level & (G_LOG_LEVEL_WARNING | PVATTEST_LOG_LVL_WARNING)) - prefix = _("WARNING: "); - - if (use_prefix && level & (G_LOG_LEVEL_ERROR | PVATTEST_LOG_LVL_ERROR)) - prefix = _("ERROR: "); - - if (use_prefix && level & (G_LOG_LEVEL_DEBUG | PVATTEST_LOG_LVL_DEBUG)) - prefix = _("DEBUG: "); - - if (level & (G_LOG_LEVEL_WARNING | G_LOG_LEVEL_ERROR | PVATTEST_LOG_LVL_WARNING | - PVATTEST_LOG_LVL_ERROR)) - _log_print(stderr, prefix, message, postfix); - else - _log_print(stdout, prefix, message, postfix); -} - -/** - * prefixes type. and adds a "\n" ad the end. - */ -void pvattest_log_default_logger(const char *log_domain G_GNUC_UNUSED, GLogLevelFlags level, - const char *message, void *user_data) -{ - int log_level = *(int *)user_data; - - _log_logger(level, message, log_level, TRUE, "\n"); -} - -/* - * writes message as it is if log level is high enough. - */ -void pvattest_log_plain_logger(const char *log_domain G_GNUC_UNUSED, GLogLevelFlags level, - const char *message, void *user_data) -{ - int log_level = *(int *)user_data; - - _log_logger(level, message, log_level, FALSE, ""); -} - -void pvattest_log_bytes(const void *data, size_t size, size_t width, const char *prefix, - gboolean beautify, GLogLevelFlags log_lvl) -{ - const uint8_t *data_b = data; - - pv_wrapped_g_assert(data); - - if (beautify) - g_log(PVATTEST_BYTES_LOG_DOMAIN, log_lvl, "%s0x0000 ", prefix); - else - g_log(PVATTEST_BYTES_LOG_DOMAIN, log_lvl, "%s", prefix); - for (size_t i = 0; i < size; i++) { - g_log(PVATTEST_BYTES_LOG_DOMAIN, log_lvl, "%02x", data_b[i]); - if (i % 2 == 1 && beautify) - g_log(PVATTEST_BYTES_LOG_DOMAIN, log_lvl, " "); - if (i == size - 1) - break; - if (i % width == width - 1) { - if (beautify) - g_log(PVATTEST_BYTES_LOG_DOMAIN, log_lvl, "\n%s0x%04lx ", prefix, - i + 1); - else - g_log(PVATTEST_BYTES_LOG_DOMAIN, log_lvl, "\n%s", prefix); - } - } - g_log(PVATTEST_BYTES_LOG_DOMAIN, log_lvl, "\n"); -} - -int pvattest_hexdump(FILE *stream, GBytes *bytes, const size_t width, const char *prefix, - const gboolean beautify) -{ - const uint8_t *data; - size_t size; - - pv_wrapped_g_assert(bytes); - pv_wrapped_g_assert(stream); - - data = g_bytes_get_data(bytes, &size); - pv_wrapped_g_assert(data); - - if (beautify) { - if (fprintf(stream, "%s0x0000 ", prefix) < 0) - return -1; - } else { - if (fprintf(stream, "%s", prefix) < 0) - return -1; - } - for (size_t i = 0; i < size; i++) { - if (fprintf(stream, "%02x", data[i]) < 0) - return -1; - if (i % 2 == 1 && beautify) { - if (fprintf(stream, " ") < 0) - return -1; - } - if (i == size - 1) - break; - if (width == 0) - continue; - if (i % width == width - 1) { - if (beautify) { - if (fprintf(stream, "\n%s0x%04lx ", prefix, i + 1) < 0) - return -1; - } else { - if (fprintf(stream, "\n%s", prefix) < 0) - return -1; - } - } - } - return 0; -} - -void pvattest_log_GError(const char *info, GError *error) -{ - pv_wrapped_g_assert(info); - - if (!error) - return; - - pvattest_log_error("%s:\n%s", info, error->message); -} diff -Nru s390-tools-2.31.0/pvattest/src/log.h s390-tools-2.33.1/pvattest/src/log.h --- s390-tools-2.31.0/pvattest/src/log.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/log.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,67 +0,0 @@ -/* - * Definitions used for logging. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef PVATTEST_LOG_H -#define PVATTEST_LOG_H -/* Must be included before any other header */ -#include "config.h" - -#include "libpv/glib-helper.h" -#include "libpv/macros.h" - -#define PVATTEST_LOG_LVL_TOOL_ALL (1 << (G_LOG_LEVEL_USER_SHIFT)) -#define PVATTEST_LOG_LVL_ERROR (1 << (G_LOG_LEVEL_USER_SHIFT)) -#define PVATTEST_LOG_LVL_WARNING (1 << (G_LOG_LEVEL_USER_SHIFT + 1)) -#define PVATTEST_LOG_LVL_INFO (1 << (G_LOG_LEVEL_USER_SHIFT + 2)) -#define PVATTEST_LOG_LVL_DEBUG (1 << (G_LOG_LEVEL_USER_SHIFT + 3)) - -#define PVATTEST_LOG_LVL_DEFAULT PVATTEST_LOG_LVL_WARNING -#define PVATTEST_LOG_LVL_MAX PVATTEST_LOG_LVL_DEBUG - -#define PVATTEST_BYTES_LOG_DOMAIN "pvattest_bytes" - -void pvattest_log_increase_log_lvl(int *log_lvl); -void pvattest_log_error(const char *format, ...); -void pvattest_log_warning(const char *format, ...); -void pvattest_log_info(const char *format, ...); -void pvattest_log_debug(const char *format, ...); - -/** pvattest_log_default_logger: - * - * A #GLogFunc implementation. - * Prefixes log level and adds a "\n" ad the end. - */ -void pvattest_log_default_logger(const char *log_domain, GLogLevelFlags level, const char *message, - void *user_data); -/* pvattest_log_plain_logger: - * - * A #GLogFunc implementation. - * Writes message as it is if log level is high enough. - */ -void pvattest_log_plain_logger(const char *log_domain, GLogLevelFlags level, const char *message, - void *user_data); -#define dhexdump(v, s) \ - { \ - pvattest_log_debug("%s (%li byte):", #v, s); \ - pvattest_log_bytes(v, s, 16L, " ", TRUE, PVATTEST_LOG_LVL_DEBUG); \ - g_log(PVATTEST_BYTES_LOG_DOMAIN, PVATTEST_LOG_LVL_DEBUG, "\n"); \ - } -#define gbhexdump(v) \ - { \ - pvattest_log_debug("%s:(%li byte):", #v, g_bytes_get_size(v)); \ - pvattest_log_bytes(g_bytes_get_data(v, NULL), g_bytes_get_size(v), 16L, " ", \ - TRUE, PVATTEST_LOG_LVL_DEBUG); \ - g_log(PVATTEST_BYTES_LOG_DOMAIN, PVATTEST_LOG_LVL_DEBUG, "\n"); \ - } -void pvattest_log_bytes(const void *data, size_t size, size_t width, const char *prefix, - gboolean beautify, GLogLevelFlags log_lvl) PV_NONNULL(1); -int pvattest_hexdump(FILE *stream, GBytes *bytes, const size_t width, const char *prefix, - const gboolean beautify) PV_NONNULL(1, 2); -void pvattest_log_GError(const char *info, GError *error) PV_NONNULL(1); - -#endif /* PVATTEST_LOG_H */ diff -Nru s390-tools-2.31.0/pvattest/src/Makefile s390-tools-2.33.1/pvattest/src/Makefile --- s390-tools-2.31.0/pvattest/src/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,112 +0,0 @@ -include ../../common.mak - -BIN_PROGRAM = pvattest -PKGDATADIR ?= "$(DESTDIR)$(TOOLS_DATADIR)/$(BIN_PROGRAM)" - -SRC_DIR := $(dir $(realpath $(firstword $(MAKEFILE_LIST)))) -PVATTESTDIR := $(rootdir)/pvattest -INCLUDE_PATHS = "$(SRC_DIR)" "$(rootdir)/include" -INCLUDE_PARMS = $(addprefix -I,$(INCLUDE_PATHS)) - -LIBPV_DIR = $(rootdir)/libpv -LIBPV = $(LIBPV_DIR)/libpv.a - -WARNINGS := -Wall -Wextra -Wshadow \ - -Wcast-align -Wwrite-strings -Wmissing-prototypes \ - -Wmissing-declarations -Wredundant-decls -Wnested-externs \ - -Wno-long-long -Wuninitialized -Wconversion -Wstrict-prototypes \ - -Wpointer-arith -Wno-error=inline \ - -Wno-unused-function -Wno-unused-parameter -Wno-unused-variable \ - $(NULL) - -PVATTEST_SRCS := $(wildcard *.c) \ - $(NULL) - -$(BIN_PROGRAM)_SRCS := \ - $(PVATTEST_SRCS) \ - $(NULL) - -$(BIN_PROGRAM)_OBJS := $($(BIN_PROGRAM)_SRCS:.c=.o) - -GLIB2_CFLAGS := $(shell $(PKG_CONFIG) --silence-errors --cflags glib-2.0) -GLIB2_LIBS := $(shell $(PKG_CONFIG) --silence-errors --libs glib-2.0) -LIBCRYPTO_CFLAGS := $(shell $(PKG_CONFIG) --silence-errors --cflags libcrypto) -LIBCRYPTO_LIBS := $(shell $(PKG_CONFIG) --silence-errors --libs libcrypto) -LIBCURL_CFLAGS := $(shell $(PKG_CONFIG) --silence-errors --cflags libcurl) -LIBCURL_LIBS := $(shell $(PKG_CONFIG) --silence-errors --libs libcurl) - -ALL_CFLAGS += -DPKGDATADIR=$(PKGDATADIR) \ - -DOPENSSL_API_COMPAT=0x10101000L \ - $(GLIB2_CFLAGS) \ - $(LIBCRYPTO_CFLAGS) \ - $(LIBCURL_CFLAGS) \ - $(WARNINGS) \ - $(NULL) - -ifneq ($(call check_header_prereq,"asm/uvdevice.h"),yes) - ALL_CFLAGS += -DPVATTEST_NO_PERFORM -endif - -ALL_CPPFLAGS += $(INCLUDE_PARMS) -LDLIBS += $(GLIB2_LIBS) $(LIBCRYPTO_LIBS) $(LIBCURL_LIBS) - -BUILD_TARGETS := skip-$(BIN_PROGRAM) -INSTALL_TARGETS := skip-$(BIN_PROGRAM) -ifneq (${HAVE_OPENSSL},0) -ifneq (${HAVE_GLIB2},0) -ifneq (${HAVE_LIBCURL}, 0) - BUILD_TARGETS := $(BIN_PROGRAM) - INSTALL_TARGETS := install-$(BIN_PROGRAM) -endif -endif -endif - -all: $(BUILD_TARGETS) - -install: $(INSTALL_TARGETS) - -$(BIN_PROGRAM): $($(BIN_PROGRAM)_OBJS) $(LIBPV) - -skip-$(BIN_PROGRAM): - echo " SKIP $(BIN_PROGRAM) due to unresolved dependencies" - -clean: - $(RM) -f -- $($(BIN_PROGRAM)_OBJS) $(BIN_PROGRAM) .check-dep-$(BIN_PROGRAM) .detect-openssl.dep.c - -install-$(BIN_PROGRAM): $(BIN_PROGRAM) - $(INSTALL) -d -m 755 $(DESTDIR)$(USRBINDIR) - $(INSTALL) -c $^ $(DESTDIR)$(USRBINDIR) - - -.PHONY: all install clean skip-$(BIN_PROGRAM) install-$(BIN_PROGRAM) - -$($(BIN_PROGRAM)_OBJS): .check-dep-$(BIN_PROGRAM) - -.detect-openssl.dep.c: - echo "#include " > $@ - echo "#if OPENSSL_VERSION_NUMBER < 0x10101000L" >> $@ - echo " #error openssl version 1.1.1 is required" >> $@ - echo "#endif" >> $@ - echo "static void __attribute__((unused)) test(void) {" >> $@ - echo " EVP_MD_CTX *ctx = EVP_MD_CTX_new();" >> $@ - echo " EVP_MD_CTX_free(ctx);" >> $@ - echo "}" >> $@ - -.check-dep-$(BIN_PROGRAM): .detect-openssl.dep.c - $(call check_dep, \ - "$(BIN_PROGRAM)", \ - "glib.h", \ - "glib2-devel / libglib2.0-dev", \ - "HAVE_GLIB2=0") - $(call check_dep, \ - "$(BIN_PROGRAM)", \ - $^, \ - "openssl-devel / libssl-dev version >= 1.1.1", \ - "HAVE_OPENSSL=0", \ - "-I.") - $(call check_dep, \ - "$(BIN_PROGRAM)", \ - "curl/curl.h", \ - "libcurl-devel", \ - "HAVE_LIBCURL=0") - touch $@ diff -Nru s390-tools-2.31.0/pvattest/src/pvattest.c s390-tools-2.33.1/pvattest/src/pvattest.c --- s390-tools-2.31.0/pvattest/src/pvattest.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/pvattest.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,466 +0,0 @@ -/* - * Entry point for the pvattest tool. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -/* Must be included before any other header */ -#include "config.h" - -#include -#include - -#include - -#include "libpv/crypto.h" -#include "libpv/cert.h" - -#include "uvio.h" -#include "common.h" -#include "attestation.h" -#include "arcb.h" -#include "argparse.h" -#include "exchange_format.h" -#include "log.h" - -#define PVATTEST_NID NID_secp521r1 -#define PVATTEST_UV_PATH "/dev/uv" -#define PVATTEST_EXIT_MEASURE_NOT_VERIFIED 2 - -enum pvattest_error { - PVATTEST_ERROR_INVAL_ATT_RESULT, -}; - -static arcb_v1_t *create_arcb(char **host_key_paths, const gboolean use_nonce, - const gboolean phkh_img, const gboolean phkh_att, - const uint64_t user_paf, GError **error) -{ - g_autoptr(GBytes) arpk = NULL, meas_key = NULL, nonce = NULL, iv = NULL; - g_autoslist(PvX509WithPath) host_keys_with_path = NULL; - g_autoslist(EVP_PKEY) evp_host_keys = NULL; - const uint32_t mai = MAI_HMAC_SHA512; - g_autoptr(EVP_PKEY) evp_cpk = NULL; - g_autoptr(arcb_v1_t) arcb = NULL; - uint64_t paf = user_paf; - - g_assert(host_key_paths); - - arpk = pv_generate_key(EVP_aes_256_gcm(), error); - if (!arpk) - return NULL; - iv = pv_generate_iv(EVP_aes_256_gcm(), error); - if (!iv) - return NULL; - evp_cpk = pv_generate_ec_key(PVATTEST_NID, error); - if (!evp_cpk) - return NULL; - meas_key = pv_generate_rand_data(HMAC_SHA512_KEY_SIZE, error); - if (!meas_key) - return NULL; - - if (phkh_img) - paf |= ARCB_V1_PAF_AAD_PHKH_HEADER; - if (phkh_att) - paf |= ARCB_V1_PAF_AAD_PHKH_ATTEST; - - arcb = arcb_v1_new(arpk, iv, mai, evp_cpk, meas_key, paf, error); - if (!arcb) - return NULL; - if (use_nonce) { - nonce = pv_generate_rand_data(ARCB_V1_NONCE_SIZE, error); - if (!nonce) - return NULL; - arcb_v1_set_nonce(arcb, nonce); - } - - host_keys_with_path = pv_load_certificates(host_key_paths, error); - if (!host_keys_with_path) - return NULL; - - /* Extract EVP_PKEY structures and verify that the correct elliptic - * curve is used. - */ - evp_host_keys = pv_get_ec_pubkeys(host_keys_with_path, PVATTEST_NID, error); - if (!evp_host_keys) - return NULL; - for (GSList *iter = evp_host_keys; iter; iter = iter->next) { - EVP_PKEY *host_key = iter->data; - - if (arcb_v1_add_key_slot(arcb, host_key, error) < 0) - return NULL; - } - return g_steal_pointer(&arcb); -} - -#define __PVATTEST_CREATE_ERROR_MSG _("Creating the attestation request failed") -static int do_create(const pvattest_create_config_t *create_config) -{ - g_autoptr(exchange_format_ctx_t) output_ctx = NULL; - uint32_t measurement_size, additional_data_size; - g_autoptr(GBytes) serialized_arcb = NULL; - g_autoptr(arcb_v1_t) arcb = NULL; - g_autoptr(GError) error = NULL; - g_autoptr(GBytes) arpk = NULL; - - if (!create_config->use_nonce) - pvattest_log_warning(_("No nonce used. (Experimental setting)")); - - if (create_config->no_verify) { - pvattest_log_warning(_("Host-key document verification is disabled.\n" - "The attestation result could be compromised!")); - pvattest_log_debug(_("Verification skipped.")); - } else { - if (pv_verify_host_key_docs_by_path( - create_config->host_key_document_paths, create_config->root_ca_path, - create_config->crl_paths, create_config->certificate_paths, - create_config->online, &error) < 0) - goto err_exit; - pvattest_log_debug(_("Verification passed.")); - } - - /* build attestation request */ - arcb = create_arcb(create_config->host_key_document_paths, create_config->use_nonce, - create_config->phkh_img, create_config->phkh_att, create_config->paf, - &error); - if (!arcb) - goto err_exit; - - additional_data_size = arcb_v1_get_required_additional_size(arcb); - if (create_config->x_aad_size >= 0) { - g_assert_cmpint(create_config->x_aad_size, <=, UINT32_MAX); - additional_data_size = (uint32_t)create_config->x_aad_size; - } - measurement_size = arcb_v1_get_required_measurement_size(arcb, &error); - if (error) - goto err_exit; - - serialized_arcb = arcb_v1_serialize(arcb, &error); - if (!serialized_arcb) - goto err_exit; - - /* write attestation request data to file */ - output_ctx = exchange_ctx_new(PVATTEST_EXCHANGE_VERSION_1_00, serialized_arcb, - measurement_size, additional_data_size, &error); - if (!output_ctx) - goto err_exit; - if (exchange_write_to_file(output_ctx, create_config->output_path, &error) < 0) - goto err_exit; - pvattest_log_debug(_("ARCB written to file.")); - - /* write attestation request protection key to file */ - arpk = arcb_v1_get_arp_key(arcb); - wrapped_g_file_set_content(create_config->arp_key_out_path, arpk, 0600, &error); - if (error) - goto err_exit; - pvattest_log_debug(_("ARPK written to file.")); - - return EXIT_SUCCESS; - -err_exit: - pvattest_log_GError(__PVATTEST_CREATE_ERROR_MSG, error); - return EXIT_FAILURE; -} - -#ifdef PVATTEST_COMPILE_PERFORM -#define __PVATTEST_MEASURE_ERROR_MSG _("Performing the attestation measurement failed") -static int do_perform(pvattest_perform_config_t *perform_config) -{ - g_autoptr(GBytes) serialized_arcb = NULL, user_data = NULL, measurement = NULL, - additional_data = NULL, config_uid = NULL; - size_t uv_measurement_data_size, uv_addidtional_data_size; - g_autoptr(exchange_format_ctx_t) exchange_ctx = NULL; - uint32_t measurement_size, additional_data_size; - g_autoptr(uvio_attest_t) uvio_attest = NULL; - g_autoptr(GError) error = NULL; - be16_t uv_rc; - int uv_fd; - - exchange_ctx = exchange_ctx_from_file(perform_config->input_path, &error); - if (!exchange_ctx) - goto err_exit; - - serialized_arcb = exchange_get_serialized_arcb(exchange_ctx); - if (!serialized_arcb) { - g_set_error(&error, PVATTEST_ERROR, ARCB_ERR_INVALID_ARCB, - _("The input does not provide an attestation request.")); - - goto err_exit; - } - - measurement_size = exchange_get_requested_measurement_size(exchange_ctx); - additional_data_size = exchange_get_requested_additional_data_size(exchange_ctx); - - pvattest_log_debug(_("Input data loaded.")); - - if (perform_config->user_data_path) { - user_data = pv_file_get_content_as_g_bytes(perform_config->user_data_path, &error); - if (!user_data) - goto err_exit; - pvattest_log_debug(_("Added user data from '%s'"), perform_config->user_data_path); - } - uvio_attest = build_attestation_v1_ioctl(serialized_arcb, user_data, measurement_size, - additional_data_size, &error); - if (!uvio_attest) - goto err_exit; - - pvattest_log_debug(_("attestation context generated.")); - - /* execute attestation */ - uv_fd = uvio_open(PVATTEST_UV_PATH, &error); - if (uv_fd < 0) - goto err_exit; - - uv_rc = uvio_ioctl_attest(uv_fd, uvio_attest, &error); - close(uv_fd); - if (uv_rc != UVC_EXECUTED) - goto err_exit; - pvattest_log_debug(_("attestation measurement successful. rc = %#x"), uv_rc); - - /* write to file */ - measurement = uvio_get_measurement(uvio_attest); - additional_data = uvio_get_additional_data(uvio_attest); - config_uid = uvio_get_config_uid(uvio_attest); - - uv_measurement_data_size = measurement == NULL ? 0 : g_bytes_get_size(measurement); - if (uv_measurement_data_size != measurement_size) { - g_set_error(&error, PVATTEST_ERROR, PVATTEST_ERROR_INVAL_ATT_RESULT, - "The measurement size returned by Ultravisor is not as expected."); - goto err_exit; - } - - uv_addidtional_data_size = additional_data == NULL ? 0 : g_bytes_get_size(additional_data); - if (uv_addidtional_data_size != additional_data_size) { - g_set_error(&error, PVATTEST_ERROR, PVATTEST_ERROR_INVAL_ATT_RESULT, - "The additional data size returned by Ultravisor is not as expected."); - goto err_exit; - } - - exchange_set_measurement(exchange_ctx, measurement); - if (additional_data) - exchange_set_additional_data(exchange_ctx, additional_data); - exchange_set_config_uid(exchange_ctx, config_uid); - if (user_data) - exchange_set_user_data(exchange_ctx, user_data); - - if (exchange_write_to_file(exchange_ctx, perform_config->output_path, &error) < 0) - goto err_exit; - - pvattest_log_debug(_("Output written to file.")); - - return EXIT_SUCCESS; - -err_exit: - pvattest_log_GError(__PVATTEST_MEASURE_ERROR_MSG, error); - return EXIT_FAILURE; -} -#endif /* PVATTEST_COMPILE_PERFORM */ - -static int fprint_verify_result(FILE *stream, const enum verify_output_format fmt, - GBytes *config_uid, GBytes *additional_data) -{ - switch (fmt) { - case VERIFY_FMT_HUMAN: - if (fprintf(stream, _("Attestation measurement verified\n")) < 0) - return -1; - if (fprintf(stream, _("Config UID:\n")) < 0) - return -1; - if (pvattest_hexdump(stream, config_uid, 0x10L, "0x", FALSE) < 0) - return -1; - if (fprintf(stream, _("\n")) < 0) - return -1; - - if (additional_data) { - if (fprintf(stream, _("Additional Data:\n")) < 0) - return -1; - if (pvattest_hexdump(stream, additional_data, 0x60L, "0x", FALSE) < 0) - return -1; - if (fprintf(stream, _("\n")) < 0) - return -1; - } - break; - case VERIFY_FMT_YAML: - if (fprintf(stream, "cuid: ") < 0) - return -1; - if (pvattest_hexdump(stream, config_uid, 0L, "'0x", FALSE) < 0) - return -1; - if (fprintf(stream, _("'\n")) < 0) - return -1; - - if (additional_data) { - if (fprintf(stream, "add: ") < 0) - return -1; - - if (pvattest_hexdump(stream, additional_data, 0x0L, "'0x", FALSE) < 0) - return -1; - if (fprintf(stream, _("'\n")) < 0) - return -1; - } - break; - default: - g_assert_not_reached(); - break; - } - return 0; -} - -#define __PVATTEST_VERIFY_ERROR_MSG _("Attestation measurement verification failed") -static int do_verify(const pvattest_verify_config_t *verify_config, const int appl_log_lvl) -{ - g_autoptr(GBytes) user_data = NULL, uv_measurement = NULL, additional_data = NULL, - image_hdr = NULL, calc_measurement = NULL, config_uid = NULL, - meas_key = NULL, arp_key = NULL, nonce = NULL, serialized_arcb = NULL; - g_autofree att_meas_ctx_t *measurement_hdr = NULL; - g_autoptr(exchange_format_ctx_t) input_ctx = NULL; - const char *err_prefix = __PVATTEST_VERIFY_ERROR_MSG; - g_autoptr(GError) error = NULL; - gboolean rc; - - image_hdr = pv_file_get_content_as_g_bytes(verify_config->hdr_path, &error); - if (!image_hdr) - goto err_exit; - - measurement_hdr = att_extract_from_hdr(image_hdr, &error); - if (!measurement_hdr) - goto err_exit; - - pvattest_log_debug(_("Image header loaded.")); - - input_ctx = exchange_ctx_from_file(verify_config->input_path, &error); - if (!input_ctx) - goto err_exit; - - config_uid = exchange_get_config_uid(input_ctx); - uv_measurement = exchange_get_measurement(input_ctx); - user_data = exchange_get_user_data(input_ctx); - additional_data = exchange_get_additional_data(input_ctx); - serialized_arcb = exchange_get_serialized_arcb(input_ctx); - - if (!uv_measurement || !serialized_arcb) { - g_set_error(&error, PVATTEST_ERROR, PVATTEST_SUBC_INVALID, - _("Input data has no measurement")); - goto err_exit; - } - pvattest_log_debug(_("Input data loaded.")); - - att_add_uid(measurement_hdr, config_uid); - - arp_key = pv_file_get_content_as_g_bytes(verify_config->arp_key_in_path, &error); - if (!arp_key) - goto err_exit; - pvattest_log_debug(_("ARPK loaded.")); - - rc = arcb_v1_verify_serialized_arcb(serialized_arcb, arp_key, &meas_key, &nonce, &error); - if (!rc) - goto err_exit; - - pvattest_log_debug(_("Input ARCB verified.")); - - calc_measurement = att_gen_measurement_hmac_sha512(measurement_hdr, meas_key, user_data, - nonce, additional_data, &error); - if (!calc_measurement) - goto err_exit; - pvattest_log_debug(_("Measurement calculated.")); - - if (!att_verify_measurement(calc_measurement, uv_measurement, &error)) { - pvattest_log_GError(__PVATTEST_VERIFY_ERROR_MSG, error); - pvattest_log_debug(_("Measurement values:")); - gbhexdump(uv_measurement); - gbhexdump(calc_measurement); - return PVATTEST_EXIT_MEASURE_NOT_VERIFIED; - } - - /* Write human-readable output to stdout */ - if (appl_log_lvl >= PVATTEST_LOG_LVL_INFO) { - if (fprint_verify_result(stdout, VERIFY_FMT_HUMAN, config_uid, additional_data) < - 0) { - g_set_error(&error, PV_GLIB_HELPER_ERROR, PV_GLIB_HELPER_FILE_ERROR, - "stdout: %s", g_strerror(errno)); - err_prefix = "Failed to write output"; - goto err_exit; - } - } - - /* Write to file */ - if (verify_config->output_path) { - g_autoptr(FILE) output = pv_file_open(verify_config->output_path, "wx", &error); - - if (!output) { - err_prefix = "Failed to write output"; - goto err_exit; - } - if (fprint_verify_result(output, verify_config->output_fmt, config_uid, - additional_data) < 0) { - g_set_error(&error, PV_GLIB_HELPER_ERROR, PV_GLIB_HELPER_FILE_ERROR, - "'%s': %s", verify_config->output_path, g_strerror(errno)); - err_prefix = "Failed to write output"; - goto err_exit; - } - } - return EXIT_SUCCESS; - -err_exit: - pvattest_log_GError(err_prefix, error); - return EXIT_FAILURE; -} - -/* - * Will not free the config structs, but the nested char* etc. - * that's what we need to do as we will receive a statically allocated config_t - * Not defined in the parse header as someone might incorrectly assume - * that the config pointers will be freed. - */ -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(pvattest_config_t, pvattest_parse_clear_config) -int main(int argc, char *argv[]) -{ - int appl_log_lvl = PVATTEST_LOG_LVL_DEFAULT; - g_autoptr(pvattest_config_t) config = NULL; - g_autoptr(GError) error = NULL; - enum pvattest_command command; - int rc; - - /* setting up the default log handler to filter messages based on the - * log level specified by the user. - */ - g_log_set_handler(NULL, G_LOG_LEVEL_MASK | G_LOG_FLAG_FATAL | G_LOG_FLAG_RECURSION, - &pvattest_log_default_logger, &appl_log_lvl); - /* setting up the log handler for hexdumps (no prefix and '\n' at end of - * message)to filter messages based on the log level specified by the - * user. - */ - g_log_set_handler(PVATTEST_BYTES_LOG_DOMAIN, - G_LOG_LEVEL_MASK | G_LOG_FLAG_FATAL | G_LOG_FLAG_RECURSION, - &pvattest_log_plain_logger, &appl_log_lvl); - - command = pvattest_parse(&argc, &argv, &config, &error); - if (command == PVATTEST_SUBC_INVALID) { - pvattest_log_error(_("%s\nTry '%s --help' for more information"), error->message, - GETTEXT_PACKAGE); - exit(EXIT_FAILURE); - } - g_assert(config); - appl_log_lvl = config->general.log_level; - - pv_init(); - - switch (command) { - case PVATTEST_SUBC_CREATE: - rc = do_create(&config->create); - break; -#ifdef PVATTEST_COMPILE_PERFORM - case PVATTEST_SUBC_PERFORM: - rc = do_perform(&config->perform); - break; -#endif /* PVATTEST_COMPILE_PERFORM */ - case PVATTEST_SUBC_VERIFY: - rc = do_verify(&config->verify, appl_log_lvl); - break; - default: - g_return_val_if_reached(EXIT_FAILURE); - } - - pv_cleanup(); - - return rc; -} diff -Nru s390-tools-2.31.0/pvattest/src/types.h s390-tools-2.33.1/pvattest/src/types.h --- s390-tools-2.31.0/pvattest/src/types.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/types.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,18 +0,0 @@ -/* - * Common data type definitions and functions - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef PVATTEST_TYPES_H -#define PVATTEST_TYPES_H -#include - -/* Types to mark values as big endian. */ -typedef uint16_t be16_t; -typedef uint32_t be32_t; -typedef uint64_t be64_t; - -#endif /* PVATTEST_TYPES_H */ diff -Nru s390-tools-2.31.0/pvattest/src/uvio.c s390-tools-2.33.1/pvattest/src/uvio.c --- s390-tools-2.31.0/pvattest/src/uvio.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/uvio.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,182 +0,0 @@ -/* - * UV device (uvio) related functions and definitions. - * uses s390 only (kernel) features. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -/* Must be included before any other header */ -#include "config.h" -#include "lib/zt_common.h" - -#ifdef PVATTEST_COMPILE_PERFORM -#include -#include -#include -#include -#include - -#include "attestation.h" -#include "uvio.h" -#include "common.h" -#include "log.h" - -/* some helper macros */ -#define U64_TO_PTR(v) ((void *)(v)) -#define PTR_TO_U64(ptr) ((uint64_t)(ptr)) - -uvio_attest_t *build_attestation_v1_ioctl(GBytes *serialized_arcb, GBytes *user_data, - const uint32_t measurement_size, - const uint32_t add_data_size, GError **error) -{ - g_autoptr(uvio_attest_t) uvio_attest = NULL; - size_t arcb_size; - void *arcb; - - pv_wrapped_g_assert(serialized_arcb); - - g_bytes_ref(serialized_arcb); - arcb = g_bytes_unref_to_data(serialized_arcb, &arcb_size); - - uvio_attest = g_malloc0(sizeof(*uvio_attest)); - uvio_attest->arcb_addr = PTR_TO_U64(g_steal_pointer(&arcb)); - g_assert_cmpuint(arcb_size, <, UINT32_MAX); - uvio_attest->arcb_len = (uint32_t)arcb_size; - /* transferred the local ownership of the arcb from this function to uvio_attest; nullify pointer */ - g_steal_pointer(&serialized_arcb); - - if (user_data) { - size_t copied_data_size; - - if (pv_gbytes_memcpy(uvio_attest->user_data, sizeof(uvio_attest->user_data), - user_data, &copied_data_size) == NULL) { - g_set_error(error, ATT_ERROR, ATT_ERR_INVALID_USER_DATA, - _("User data %li bytes is larger than %li bytes"), - g_bytes_get_size(user_data), sizeof(uvio_attest->user_data)); - return NULL; - } - STATIC_ASSERT(sizeof(uvio_attest->user_data) <= UINT16_MAX); - uvio_attest->user_data_len = (uint16_t)copied_data_size; - } - - uvio_attest->meas_addr = PTR_TO_U64(g_malloc0(measurement_size)); - uvio_attest->meas_len = measurement_size; - - uvio_attest->add_data_addr = PTR_TO_U64(g_malloc0(add_data_size)); - uvio_attest->add_data_len = add_data_size; - - return g_steal_pointer(&uvio_attest); -} - -void uvio_attest_free(uvio_attest_t *attest) -{ - if (!attest) - return; - - g_free(U64_TO_PTR(attest->arcb_addr)); - g_free(U64_TO_PTR(attest->meas_addr)); - g_free(U64_TO_PTR(attest->add_data_addr)); - g_free(attest); -} - -GBytes *uvio_get_measurement(const uvio_attest_t *attest) -{ - pv_wrapped_g_assert(attest); - - if (attest->meas_addr == (__u64)0) - return NULL; - return g_bytes_new(U64_TO_PTR(attest->meas_addr), attest->meas_len); -} - -GBytes *uvio_get_additional_data(const uvio_attest_t *attest) -{ - pv_wrapped_g_assert(attest); - - if (attest->add_data_addr == (__u64)0) - return NULL; - return g_bytes_new(U64_TO_PTR(attest->add_data_addr), attest->add_data_len); -} - -GBytes *uvio_get_config_uid(const uvio_attest_t *attest) -{ - pv_wrapped_g_assert(attest); - - return g_bytes_new(attest->config_uid, sizeof(attest->config_uid)); -} - -uint16_t uvio_ioctl(const int uv_fd, const unsigned int cmd, const uint32_t flags, - const void *argument, const uint32_t argument_size, GError **error) -{ - g_autofree struct uvio_ioctl_cb *uv_ioctl = g_malloc0(sizeof(*uv_ioctl)); - int rc, cached_errno; - - pv_wrapped_g_assert(argument); - - uv_ioctl->flags = flags; - uv_ioctl->argument_addr = PTR_TO_U64(argument); - uv_ioctl->argument_len = argument_size; - rc = ioctl(uv_fd, cmd, uv_ioctl); - cached_errno = errno; - - if (rc < 0) { - g_set_error(error, UVIO_ERROR, UVIO_ERR_UV_IOCTL, _("ioctl failed: %s "), - g_strerror(cached_errno)); - return 0; - } - - if (uv_ioctl->uv_rc != UVC_EXECUTED) - g_set_error(error, UVIO_ERROR, UVIO_ERR_UV_NOT_OK, - _("Ultravisor call returned '%#x' (%s)"), uv_ioctl->uv_rc, - uvio_uv_rc_to_str(uv_ioctl->uv_rc)); - return uv_ioctl->uv_rc; -} - -uint16_t uvio_ioctl_attest(const int uv_fd, uvio_attest_t *attest, GError **error) -{ - pv_wrapped_g_assert(attest); - - return uvio_ioctl(uv_fd, UVIO_IOCTL_ATT, 0, attest, sizeof(*attest), error); -} - -int uvio_open(const char *uv_path, GError **error) -{ - pv_wrapped_g_assert(uv_path); - - int uv_fd; - int cached_errno; - - uv_fd = open(uv_path, O_RDWR); - cached_errno = errno; - if (uv_fd < 0) - g_set_error(error, UVIO_ERROR, UVIO_ERR_UV_OPEN, - _("Cannot open uv driver at %s: %s"), uv_path, - g_strerror(cached_errno)); - return uv_fd; -} - -const char *uvio_uv_rc_to_str(const int rc) -{ - switch (rc) { - case 0x02: - return _("Invalid UV command"); - case 0x106: - return _("Unsupported attestation request version"); - case 0x108: - return _("Number of key slots is greater than the maximum number supported"); - case 0x10a: - return _("Unsupported plaintext attestation flags"); - case 0x10c: - return _( - "Unable to decrypt attestation request control block. No valid host-key was provided"); - case 0x10d: - return _("Measurement data length is too small to store measurement"); - case 0x10e: - return _("Additional data length is too small to store measurement"); - default: - return _("Unknown code"); - } -} - -#endif /* PVATTEST_COMPILE_PERFORM */ diff -Nru s390-tools-2.31.0/pvattest/src/uvio.h s390-tools-2.33.1/pvattest/src/uvio.h --- s390-tools-2.31.0/pvattest/src/uvio.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/src/uvio.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,110 +0,0 @@ -/* - * UV device (uvio) related functions and definitions. - * - * Copyright IBM Corp. 2022 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ -#ifndef PVATTEST_UVIO_H -#define PVATTEST_UVIO_H -#include "config.h" - -#ifdef PVATTEST_COMPILE_PERFORM - -#include -#include - -#include "libpv/glib-helper.h" - -#include "arcb.h" -#include "common.h" - -#define UVC_EXECUTED 0x0001 - -typedef struct uvio_attest uvio_attest_t; -G_STATIC_ASSERT(sizeof(uvio_attest_t) == 0x138); -G_STATIC_ASSERT(sizeof(struct uvio_ioctl_cb) == 0x40); - -/** - * build_attestation_v1_ioctl: - * @serialized_arcb: A ARCB in binary format - * @user_data (optional): up to 256 bytes of user data to be added to the measurement - * @measurement_size: Size of the measurement result to be allocated - * @add_data_size: Size of the additional data to be allocated - * @error: return location for a #GError - * - * Builds the structure to be passed to `/dev/uv` for attestation IOCTLs and - * allocates any required memory. - * - * Returns: (nullable) (transfer full): Pointer to a uvio_attest_t to be passed to `/dev/uv` - */ -uvio_attest_t *build_attestation_v1_ioctl(GBytes *serialized_arcb, GBytes *user_data, - const uint32_t measurement_size, - const uint32_t add_data_size, GError **error) - PV_NONNULL(1); -GBytes *uvio_get_measurement(const uvio_attest_t *attest) PV_NONNULL(1); -GBytes *uvio_get_additional_data(const uvio_attest_t *attest) PV_NONNULL(1); -GBytes *uvio_get_config_uid(const uvio_attest_t *attest) PV_NONNULL(1); -void uvio_attest_free(uvio_attest_t *attest); -WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(uvio_attest_t, uvio_attest_free) - -/** - * uvio_ioctl: - * @uv_fd: file descriptor to the UV-device - * @cmd: IOCTL cmd - * @flags: flags for the uv IOCTL - * @argument: pointer to the payload - * @argument_size: size of #argument - * @error: return location for a #GError - * - * Builds the IOCTL structure using, flags and argument, performs the IOCTL, and returns the UV rc in big endian. - * If the device driver emits an error code, a corresponding #GError will be created. - * Use the specialized calls (uvio_ioctl_*). - * - * Returns: UV rc if no device error occurred (>0) - * 0 on #GError - */ -uint16_t uvio_ioctl(const int uv_fd, const unsigned int cmd, const uint32_t flags, - const void *argument, const uint32_t argument_size, GError **error) - PV_NONNULL(4); -/** - * uvio_ioctl_attest: - * @uv_fd: file descriptor to the UV-device - * @attest: pointer to the attestation request - * @error: return location for a #GError - * - * Wraps 'uvio_ioctl' for attestation. - * - * Returns: UV rc if no device error occurred (>0) - * 0 on #GError - */ -uint16_t uvio_ioctl_attest(const int uv_fd, uvio_attest_t *attest, GError **error) PV_NONNULL(2); - -/** - * uvio_open: - * @uv_path: path of the UV-device usually at /dev/uv - * @error: return location for a #GError - * - * Returns: File descriptor for the UV-device - * 0 on #GError - */ -int uvio_open(const char *uv_path, GError **error) PV_NONNULL(1); - -/** - * uvio_uv_rc_to_str: - * @rc: UV return code - * - * Returns: Pointer to an error string corresponding to the given UV-rc. - */ -const char *uvio_uv_rc_to_str(const int rc); - -#define UVIO_ERROR g_quark_from_static_string("pv-uvio_error-quark") -typedef enum { - UVIO_ERR_UV_IOCTL, - UVIO_ERR_UV_OPEN, - UVIO_ERR_UV_NOT_OK, -} uvio_error_e; - -#endif /* PVATTEST_COMPILE_PERFORM */ -#endif /* PVATTEST_UVIO_H */ diff -Nru s390-tools-2.31.0/pvattest/tools/Makefile s390-tools-2.33.1/pvattest/tools/Makefile --- s390-tools-2.31.0/pvattest/tools/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/tools/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,5 +0,0 @@ -include ../../common.mak - -install: - $(INSTALL) -d -m 755 $(DESTDIR)$(USRBINDIR) - $(INSTALL) -g $(GROUP) -o $(OWNER) -m 755 pvextract-hdr "$(DESTDIR)$(USRBINDIR)" diff -Nru s390-tools-2.31.0/pvattest/tools/pvattest-info s390-tools-2.33.1/pvattest/tools/pvattest-info --- s390-tools-2.31.0/pvattest/tools/pvattest-info 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/tools/pvattest-info 1970-01-01 01:00:00.000000000 +0100 @@ -1,94 +0,0 @@ -#!/bin/bash -# -# pvattest-info - get additional information from an attestation measurement -# -# Sample: -# ./pvattest-info attestresp.bin -# -# Copyright IBM Corp. 2022 -# -# s390-tools is free software; you can redistribute it and/or modify -# it under the terms of the MIT license. See LICENSE for details. - -set -o pipefail -set -o nounset -set -e - -XDUMP='od -A x -t x2z -v --endian=big' - -usage() { - cat <<-EOF - Usage: $(basename "$0") FILE - - Prints config UID and additional data if available. - EOF -} - -function check_is_pvattest_binary() { - local input="$1" - local size - local version - - size=$(wc -c <"$input") - if [ "$size" -lt 64 ]; then - echo "ERROR: Input file is too small." >&2 - exit 1 - fi - - ${XDUMP} --read-bytes 16 -- "${input}" 2>/dev/null | grep -q pvattest || - { echo "ERROR: ${input} does not contain a pvattest binary output." >&2 && exit 1; } - - size=$(${XDUMP} --skip-bytes 12 --read-bytes 4 -- "${input}" 2>/dev/null | awk 'NR==1 {print "0x" $2 $3}') - if [ $((size)) -lt 64 ]; then - echo "ERROR: ${input} does not contain a pvattest binary output." >&2 - exit 1 - fi - - version=$(${XDUMP} --skip-bytes 8 --read-bytes 4 -- "$input" 2>/dev/null) - echo "$version" | grep -q "0000 0100" || - { echo -n "WARNING: unknown hdr version " >&2 && - echo "$version" | awk '{print "0x" $2 $3}'>&2 ; } -} - -function print_entry() { - local file_off="$1" - local text="$2" - local input="$3" - local size - local off - - size=$(${XDUMP} --skip-bytes $((file_off)) --read-bytes 4 -- "${input}" 2>/dev/null | - awk 'NR==1 {print "0x" $2 $3}') - off=$(${XDUMP} --skip-bytes $((file_off + 4)) --read-bytes 4 -- "${input}" 2>/dev/null | - awk 'NR==1 {print "0x" $2 $3}') - - if [[ $size != "0x00000000" ]] || [[ $off != "0x00000000" ]]; then - echo "${text}:" - od -A n -w$((size)) -t x8 --skip-bytes $((off)) --read-bytes $((size)) -- "${input}" 2>/dev/null |\ - sed -e 's/\s//g' - fi -} - -function require_command() { - local cmd="$1" - - command -v "$cmd" >/dev/null 2>&1 || \ - { echo >&2 "ERROR: $cmd required but not installed."; exit 1; } -} - -require_command awk -require_command wc -require_command od - -if [ $# -eq 0 ]; then - echo "ERROR: Input not set. Use '$(basename "$0") [FILE]' to specify the Input file" >&2 - exit 1 -fi - -input="$1" - -[ -e "$input" ] || { echo "ERROR: File '$1' not found" >&2 && exit 1; } -check_is_pvattest_binary "$input" - -print_entry 0x38 "Config UID" "$input" -print_entry 0x28 "Additional Data" "$input" diff -Nru s390-tools-2.31.0/pvattest/tools/pvextract-hdr s390-tools-2.33.1/pvattest/tools/pvextract-hdr --- s390-tools-2.31.0/pvattest/tools/pvextract-hdr 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/pvattest/tools/pvextract-hdr 1970-01-01 01:00:00.000000000 +0100 @@ -1,104 +0,0 @@ -#!/bin/bash -# -# pvextract_hdr - extract an IBM Secure Execution header from the Image -# -# Sample: -# ./pvextract-hdr -o sehdr.bin se-image.bin -# -# Copyright IBM Corp. 2022 -# -# s390-tools is free software; you can redistribute it and/or modify -# it under the terms of the MIT license. See LICENSE for details. - -set -o pipefail -set -o nounset -set -e - -XDUMP='od -A x -t x2z -v --endian=big' - -def_output='sehdr.bin' -def_skip=0x14 -def_len=0x4 - -usage() { - cat <<-EOF - Usage: $(basename "$0") [-o ${def_output}] [-s ${def_skip}] [-l ${def_len}] FILE - - Extract the header of the SE-image located in FILE. - By default ${def_skip} pages will be skipped until starting to search - for the header. By default the search will be stopped after ${def_len} pages. - '${def_output}' is the default output file name. - EOF -} - -function check_file() { - [ -e "$1" ] || - { echo "ERROR: File '$1' not found" >&2 && exit 1; } -} - -function check_hdr_ver() { - local hdr_start="$1" - local input="$2" - ${XDUMP} --skip-bytes $((hdr_start + 8)) --read-bytes 4 -- "$input" 2>/dev/null | grep -q "000 0100" || - { echo -n "WARNING: unknown hdr version " && - ${XDUMP} --skip-bytes $((hdr_start + 8)) --read_bytes 4 -- "$input" 2>/dev/null | awk '{print "0x" $2 $3}'; } -} - -function require_command() { - local cmd="$1" - - command -v "$cmd" >/dev/null 2>&1 || \ - { echo >&2 "ERROR: $cmd required but not installed."; exit 1; } -} - -require_command od -require_command awk -require_command grep - -output=${def_output} -parsed_skip=${def_skip} -parsed_len=${def_len} -# the last argument must be the input file -input="${*: -1}" -while getopts 'o:s:l:h' OPTION; do - case "$OPTION" in - o) output="$OPTARG" ;; - s) parsed_skip="$OPTARG" ;; - l) parsed_len="$OPTARG" ;; - h) - usage - exit 0 - ;; - :) - echo "ERROR: Must supply an argument to -$OPTARG." >&2 - exit 1 - ;; - *) - usage - exit 1 - ;; - esac -done - -#argument specify pages; convert to bytes -skip=$((parsed_skip * 0x1000)) -len=$((parsed_len * 0x1000)) - -if [ $# -eq 0 ]; then - echo "ERROR: Input not set. Use '$(basename "$0") [FILE]' to specify the Input file" >&2 - exit 1 -fi - -check_file "$input" -hdr_start=$(${XDUMP} --skip-bytes $((skip)) --read-bytes $((len)) -- "${input}" 2>/dev/null | grep IBMSecEx || - { echo ERROR: "${input} does not contain an SE header." >&2 && exit 1; }) -hdr_start=$(echo "${hdr_start}" | awk '{print "0x" $1}' | cut -c 1-10) -echo "SE header found at offset ${hdr_start}" - -check_hdr_ver "$hdr_start" "$input" - -size=$(${XDUMP} --skip-bytes $((hdr_start + 12)) --read-bytes 4 -- "${input}" 2>/dev/null | - awk 'NR==1 {print "0x" $2 $3}') - -dd if="${input}" of="${output}" bs=1 count=$((size)) skip=$((hdr_start)) status=none -echo "SE header written to '${output}' ($((size)) bytes)" diff -Nru s390-tools-2.31.0/README.md s390-tools-2.33.1/README.md --- s390-tools-2.31.0/README.md 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/README.md 2024-05-28 08:26:36.000000000 +0200 @@ -19,6 +19,12 @@ all s390-tools that are written in rust and require external crates. Disable the compilation of all tools in `rust/` using HAVE_CARGO=0 See the `rust/README.md` for Details + - pvattest: + Create, perform, and verify IBM Secure Execution attestation measurements. + - pvapconfig: + Automatic configure APQNs within an SE KVM guest + - pvsecret: + Manage secrets for IBM Secure Execution guests * dasdfmt: Low-level format ECKD DASDs with the classical Linux disk layout or the new @@ -38,9 +44,6 @@ * genprotimg: Create a protected virtualization image. - * pvattest: - Create, perform, and verify protected virtualization attestation measurements. - * udev rules: - 59-dasd.rules: rules for unique DASD device nodes created in /dev/disk/. - 57-osasnmpd.rules: udev rules for osasnmpd. @@ -360,12 +363,11 @@ The runtime requirements are: openssl-libs (>= 1.1.0) and glib2. -* pvattest: +* rust/pvattest: For building pvattest you need OpenSSL version 1.1.1 or newer - installed (openssl-devel.rpm). Also required is glib2.56 or newer - (glib2-devel.rpm) and libcurl. + installed (openssl-devel.rpm). Also required is cargo and libcurl. Tip: you may skip the pvattest build by adding - `HAVE_OPENSSL=0`, `HAVE_LIBCURL=0`, or `HAVE_GLIB2=0`. + `HAVE_OPENSSL=0`, `HAVE_LIBCURL=0`, or `HAVE_CARGO=0`. The runtime requirements are: openssl-libs (>= 1.1.1) and glib2.56 or newer. diff -Nru s390-tools-2.31.0/rust/Cargo.lock s390-tools-2.33.1/rust/Cargo.lock --- s390-tools-2.31.0/rust/Cargo.lock 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/Cargo.lock 2024-05-28 08:26:36.000000000 +0200 @@ -3,21 +3,6 @@ version = 3 [[package]] -name = "addr2line" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] name = "aho-corasick" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -82,37 +67,12 @@ checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] -name = "assert-json-diff" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] -name = "backtrace" -version = "0.3.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -131,12 +91,6 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] -name = "bytes" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" - -[[package]] name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -174,6 +128,15 @@ ] [[package]] +name = "clap_complete" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6b5c519bab3ea61843a7923d074b04245624bb84a64a8c150f5deb014e388b" +dependencies = [ + "clap", +] + +[[package]] name = "clap_derive" version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -208,15 +171,15 @@ "openssl-probe", "openssl-sys", "schannel", - "socket2 0.4.9", + "socket2", "winapi", ] [[package]] name = "curl-sys" -version = "0.4.68+curl-8.4.0" +version = "0.4.72+curl-8.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a0d18d88360e374b16b2273c832b5e57258ffc1d4aa4f96b108e0738d5752f" +checksum = "29cbdc8314c447d11e8fd156dcdd031d9e02a7a976163e396b548c03153bc9ea" dependencies = [ "cc", "libc", @@ -224,16 +187,10 @@ "openssl-sys", "pkg-config", "vcpkg", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] name = "errno" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -255,12 +212,6 @@ ] [[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] name = "foreign-types" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -276,104 +227,6 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] -name = "form_urlencoded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "futures" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" - -[[package]] -name = "futures-executor" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" - -[[package]] -name = "futures-macro" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-sink" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" - -[[package]] -name = "futures-task" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" - -[[package]] -name = "futures-util" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] name = "getrandom" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -385,43 +238,12 @@ ] [[package]] -name = "gimli" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" - -[[package]] -name = "h2" -version = "0.3.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap 2.1.0", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] -name = "hashbrown" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" - -[[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -434,81 +256,13 @@ checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" [[package]] -name = "http" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - -[[package]] -name = "hyper" -version = "0.14.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2 0.4.9", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] name = "indexmap" version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", - "hashbrown 0.12.3", -] - -[[package]] -name = "indexmap" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" -dependencies = [ - "equivalent", - "hashbrown 0.14.3", + "hashbrown", ] [[package]] @@ -571,16 +325,6 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] -name = "lock_api" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] name = "log" version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -593,63 +337,6 @@ checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] -name = "miniz_oxide" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" -dependencies = [ - "libc", - "wasi", - "windows-sys 0.48.0", -] - -[[package]] -name = "mockito" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8d3038e23466858569c2d30a537f691fa0d53b51626630ae08262943e3bbb8b" -dependencies = [ - "assert-json-diff", - "futures", - "hyper", - "log", - "rand", - "regex", - "serde_json", - "serde_urlencoded", - "similar", - "tokio", -] - -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "object" -version = "0.32.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" -dependencies = [ - "memchr", -] - -[[package]] name = "once_cell" version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -700,58 +387,6 @@ ] [[package]] -name = "openssl_extensions" -version = "0.1.0" -dependencies = [ - "foreign-types", - "libc", - "log", - "openssl", - "openssl-sys", -] - -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets", -] - -[[package]] -name = "percent-encoding" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" - -[[package]] -name = "pin-project-lite" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] name = "pkg-config" version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -773,65 +408,46 @@ ] [[package]] -name = "pv" -version = "1.0.0" +name = "pvapconfig" +version = "0.10.0" dependencies = [ - "byteorder", "clap", - "curl", - "log", - "mockito", - "once_cell", + "clap_complete", + "lazy_static", "openssl", - "openssl_extensions", - "pv_core", + "rand", + "regex", + "s390_pv_core", "serde", - "serde_test", - "thiserror", + "serde_yaml", "utils", - "zerocopy", ] [[package]] -name = "pv_core" -version = "1.0.0" +name = "pvattest" +version = "0.10.0" dependencies = [ + "anyhow", "byteorder", - "lazy_static", - "libc", - "log", - "mockito", - "serde", - "serde_test", - "thiserror", - "utils", - "zerocopy", -] - -[[package]] -name = "pvapconfig" -version = "0.9.0" -dependencies = [ "clap", - "lazy_static", - "openssl", - "openssl-sys", - "pv_core", - "rand", - "regex", + "clap_complete", + "log", + "s390_pv", "serde", "serde_yaml", "utils", + "zerocopy", ] [[package]] name = "pvsecret" -version = "0.9.0" +version = "0.10.0" dependencies = [ "anyhow", "clap", + "clap_complete", "log", - "pv", + "s390_pv", "serde_yaml", "utils", ] @@ -876,15 +492,6 @@ ] [[package]] -name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] name = "regex" version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -914,12 +521,6 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] -name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - -[[package]] name = "rustix" version = "0.37.27" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -940,6 +541,37 @@ checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] +name = "s390_pv" +version = "0.10.0" +dependencies = [ + "byteorder", + "curl", + "foreign-types", + "log", + "openssl", + "openssl-sys", + "s390_pv_core", + "serde", + "serde_test", + "thiserror", + "zerocopy", +] + +[[package]] +name = "s390_pv_core" +version = "0.10.0" +dependencies = [ + "byteorder", + "lazy_static", + "libc", + "log", + "serde", + "serde_test", + "thiserror", + "zerocopy", +] + +[[package]] name = "schannel" version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -949,12 +581,6 @@ ] [[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] name = "serde" version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -975,17 +601,6 @@ ] [[package]] -name = "serde_json" -version = "1.0.99" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] name = "serde_test" version = "1.0.176" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -995,24 +610,12 @@ ] [[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] name = "serde_yaml" version = "0.9.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9d684e3ec7de3bf5466b32bd75303ac16f0736426e5a4e0d6e489559ce1249c" dependencies = [ - "indexmap 1.9.3", + "indexmap", "itoa", "ryu", "serde", @@ -1020,36 +623,6 @@ ] [[package]] -name = "signal-hook-registry" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" -dependencies = [ - "libc", -] - -[[package]] -name = "similar" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597" - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] - -[[package]] -name = "smallvec" -version = "1.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" - -[[package]] name = "socket2" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1060,16 +633,6 @@ ] [[package]] -name = "socket2" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" -dependencies = [ - "libc", - "windows-sys 0.48.0", -] - -[[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1117,81 +680,6 @@ ] [[package]] -name = "tokio" -version = "1.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" -dependencies = [ - "backtrace", - "bytes", - "libc", - "mio", - "num_cpus", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "socket2 0.5.4", - "tokio-macros", - "windows-sys 0.48.0", -] - -[[package]] -name = "tokio-macros" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tokio-util" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", - "tracing", -] - -[[package]] -name = "tower-service" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" - -[[package]] -name = "tracing" -version = "0.1.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" -dependencies = [ - "pin-project-lite", - "tracing-core", -] - -[[package]] -name = "tracing-core" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" -dependencies = [ - "once_cell", -] - -[[package]] -name = "try-lock" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" - -[[package]] name = "unicode-ident" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1211,7 +699,14 @@ [[package]] name = "utils" -version = "0.1.0" +version = "0.10.0" +dependencies = [ + "clap", + "libc", + "log", + "s390_pv", + "serde", +] [[package]] name = "vcpkg" @@ -1220,15 +715,6 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1277,7 +763,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.4", ] [[package]] @@ -1296,6 +791,21 @@ ] [[package]] +name = "windows-targets" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +dependencies = [ + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", +] + +[[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1308,6 +818,12 @@ checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" + +[[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1320,6 +836,12 @@ checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] +name = "windows_aarch64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" + +[[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1332,6 +854,12 @@ checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] +name = "windows_i686_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" + +[[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1344,6 +872,12 @@ checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] +name = "windows_i686_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" + +[[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1356,6 +890,12 @@ checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] +name = "windows_x86_64_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" + +[[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1368,6 +908,12 @@ checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" + +[[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1380,6 +926,12 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] +name = "windows_x86_64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" + +[[package]] name = "zerocopy" version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" diff -Nru s390-tools-2.31.0/rust/Cargo.toml s390-tools-2.33.1/rust/Cargo.toml --- s390-tools-2.31.0/rust/Cargo.toml 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/Cargo.toml 2024-05-28 08:26:36.000000000 +0200 @@ -3,6 +3,7 @@ "pv", "pv_core", "pvapconfig", + "pvattest", "pvsecret", "utils", ] diff -Nru s390-tools-2.31.0/rust/Makefile s390-tools-2.33.1/rust/Makefile --- s390-tools-2.31.0/rust/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -19,7 +19,7 @@ ifneq (${HAVE_OPENSSL},0) ifneq (${HAVE_LIBCURL},0) - PV_TARGETS := pvsecret pvapconfig + PV_TARGETS := pvsecret pvapconfig pvattest PV_BUILD_TARGETS := $(PV_TARGETS) CARGO_TEST_TARGETS += $(addsuffix .test,pv $(PV_TARGETS)) @@ -58,6 +58,8 @@ all: $(BUILD_TARGETS) install: $(INSTALL_TARGETS) + $(INSTALL) -d -m 755 $(DESTDIR)$(USRBINDIR) + $(INSTALL) -g $(GROUP) -o $(OWNER) -m 755 pvattest/tools/pvextract-hdr "$(DESTDIR)$(USRBINDIR)" print-rust-targets: echo $(BUILD_TARGETS) @@ -76,6 +78,7 @@ $(INSTALL) target/release/$(target) $(DESTDIR)$(USRBINDIR);) install-man: + $(INSTALL) -d -m 755 $(DESTDIR)$(MANDIR)/man1 $(foreach target,$(CARGO_TARGETS),\ $(INSTALL) -m 644 $(target)/man/*.1 -t $(DESTDIR)$(MANDIR)/man1;) $(foreach target,$(PV_TARGETS),\ diff -Nru s390-tools-2.31.0/rust/pv/Cargo.toml s390-tools-2.33.1/rust/pv/Cargo.toml --- s390-tools-2.31.0/rust/pv/Cargo.toml 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/Cargo.toml 2024-05-28 08:26:36.000000000 +0200 @@ -1,24 +1,26 @@ [package] -name = "pv" -version = "1.0.0" +name = "s390_pv" +version = "0.10.0" edition.workspace = true license.workspace = true +description = "s390-tools IBM Secure Execution utilities" +keywords = ["s390", "s390x", "IBM_Secure_Execution"] +repository = "https://github.com/ibm-s390-linux/s390-tools/tree/master/rust" +categories = ["hardware-support"] +readme = "README.md" [dependencies] byteorder = "1.3" -clap = { version ="4", features = ["derive", "wrap_help"] } -curl = "0.4.7" +curl = "0.4.44" +foreign-types = "0.3.1" log = { version = "0.4.6", features = ["std", "release_max_level_debug"] } -openssl = "0.10.49" +openssl = "0.10.57" +openssl-sys = "0.9.92" serde = { version = "1.0.139", features = ["derive"] } thiserror = "1.0.33" -utils = {path = "../utils"} zerocopy = { version="0.7", features = ["derive"] } -openssl_extensions = { path = "openssl_extensions" } -pv_core = { path = "../pv_core" } +pv_core = { path = "../pv_core", package = "s390_pv_core", version = "0.10.0" } [dev-dependencies] -mockito = {version = "1", default-features = false } -once_cell = "1.19" -serde_test = "1" +serde_test = "1.0.139" diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/build.rs s390-tools-2.33.1/rust/pv/openssl_extensions/build.rs --- s390-tools-2.31.0/rust/pv/openssl_extensions/build.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright IBM Corp. 2023 - -#![allow( - clippy::inconsistent_digit_grouping, - clippy::uninlined_format_args, - clippy::unusual_byte_groupings -)] - -use std::env; - -fn main() { - if let Ok(vars) = env::var("DEP_OPENSSL_CONF") { - for var in vars.split(',') { - println!("cargo:rustc-cfg=osslconf=\"{}\"", var); - } - } - - if let Ok(version) = env::var("DEP_OPENSSL_VERSION_NUMBER") { - let version = u64::from_str_radix(&version, 16).unwrap(); - - if version >= 0x1_00_01_00_0 { - println!("cargo:rustc-cfg=ossl101"); - } - if version >= 0x1_00_02_00_0 { - println!("cargo:rustc-cfg=ossl102"); - } - if version >= 0x1_01_00_00_0 { - println!("cargo:rustc-cfg=ossl110"); - } - if version >= 0x1_01_00_07_0 { - println!("cargo:rustc-cfg=ossl110g"); - } - if version >= 0x1_01_00_08_0 { - println!("cargo:rustc-cfg=ossl110h"); - } - if version >= 0x1_01_01_00_0 { - println!("cargo:rustc-cfg=ossl111"); - } - if version >= 0x3_00_00_00_0 { - println!("cargo:rustc-cfg=ossl300"); - } - } -} diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/Cargo.toml s390-tools-2.33.1/rust/pv/openssl_extensions/Cargo.toml --- s390-tools-2.31.0/rust/pv/openssl_extensions/Cargo.toml 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -[package] -name = "openssl_extensions" -version = "0.1.0" -edition.workspace = true -license.workspace = true - -[dependencies] -foreign-types = "0.3.1" -libc = {version = "0.2.49", features = [ "extra_traits"] } -log = { version = "0.4.6", features = ["std", "release_max_level_debug"] } -openssl = "0.10.49" -openssl-sys = "0.9.85" diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/src/akid.rs s390-tools-2.33.1/rust/pv/openssl_extensions/src/akid.rs --- s390-tools-2.31.0/rust/pv/openssl_extensions/src/akid.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/src/akid.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,120 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright IBM Corp. 2023 - -use std::fmt; - -use foreign_types::{foreign_type, ForeignType, ForeignTypeRef}; -use openssl::x509::{X509CrlRef, X509Ref}; -use std::ffi::c_int; - -mod ffi { - extern "C" { - pub fn X509_check_akid( - issuer: *const openssl_sys::X509, - akid: *const openssl_sys::AUTHORITY_KEYID, - ) -> super::c_int; - } -} - -foreign_type! { - type CType = openssl_sys::AUTHORITY_KEYID; - fn drop = openssl_sys::AUTHORITY_KEYID_free; - - /// An `Authority Key Identifier`. - pub struct Akid; - /// Reference to `Akid` - pub struct AkidRef; -} - -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct AkidCheckResult(c_int); - -impl fmt::Debug for AkidCheckResult { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("AkidCheckResult") - .field("code", &self.0) - .finish() - } -} - -impl AkidCheckResult { - /// Creates an `AkidCheckResult` from a raw error number. - unsafe fn from_raw(err: c_int) -> AkidCheckResult { - AkidCheckResult(err) - } - - pub const OK: AkidCheckResult = AkidCheckResult(openssl_sys::X509_V_OK); - pub const ERR_AKID_ISSUER_SERIAL_MISMATCH: AkidCheckResult = - AkidCheckResult(openssl_sys::X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH); - pub const ERR_AKID_SKID_MISMATCH: AkidCheckResult = - AkidCheckResult(openssl_sys::X509_V_ERR_AKID_SKID_MISMATCH); -} - -impl AkidRef { - ///Check if the `Akid` matches the issuer - /// - pub fn check(&self, issuer: &X509Ref) -> AkidCheckResult { - unsafe { - let res = ffi::X509_check_akid(issuer.as_ptr(), self.as_ptr()); - AkidCheckResult::from_raw(res) - } - } -} - -pub trait AkidExtension { - fn akid(&self) -> Option; -} - -impl AkidExtension for X509Ref { - fn akid(&self) -> Option { - unsafe { - let ptr = openssl_sys::X509_get_ext_d2i( - self.as_ptr(), - openssl_sys::NID_authority_key_identifier, - std::ptr::null_mut(), - std::ptr::null_mut(), - ); - if ptr.is_null() { - None - } else { - Some(Akid::from_ptr(ptr as *mut _)) - } - } - } -} - -impl AkidExtension for X509CrlRef { - fn akid(&self) -> Option { - unsafe { - let ptr = openssl_sys::X509_CRL_get_ext_d2i( - self.as_ptr(), - openssl_sys::NID_authority_key_identifier, - std::ptr::null_mut(), - std::ptr::null_mut(), - ); - if ptr.is_null() { - None - } else { - Some(Akid::from_ptr(ptr as *mut _)) - } - } - } -} - -#[cfg(test)] -mod test { - use crate::test_utils::load_gen_cert; - - use super::*; - - #[test] - fn akid() { - let cert = load_gen_cert("ibm.crt"); - let ca = load_gen_cert("root_ca.crt"); - - let akid = cert.akid().unwrap(); - let res = akid.check(&ca); - assert_eq!(res, AkidCheckResult::OK); - } -} diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/src/crl.rs s390-tools-2.33.1/rust/pv/openssl_extensions/src/crl.rs --- s390-tools-2.31.0/rust/pv/openssl_extensions/src/crl.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/src/crl.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,128 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright IBM Corp. 2023 - -pub use crate::stackable_crl::*; -use foreign_types::{ForeignType, ForeignTypeRef}; -use openssl::{ - error::ErrorStack, - stack::{Stack, StackRef}, - x509::{ - store::{X509StoreBuilderRef, X509StoreRef}, - X509CrlRef, X509NameRef, X509Ref, X509StoreContextRef, X509, - }, -}; - -pub fn opt_to_ptr(o: Option<&T>) -> *mut T::CType { - match o { - None => std::ptr::null_mut(), - Some(p) => p.as_ptr(), - } -} - -mod ffi { - extern "C" { - #[cfg(ossl110)] - pub fn X509_STORE_CTX_get1_crls( - ctx: *mut openssl_sys::X509_STORE_CTX, - nm: *mut openssl_sys::X509_NAME, - ) -> *mut openssl_sys::stack_st_X509_CRL; - pub fn X509_STORE_add_crl( - xs: *mut openssl_sys::X509_STORE, - x: *mut openssl_sys::X509_CRL, - ) -> std::ffi::c_int; - } -} - -pub trait X509StoreExtension { - fn add_crl(&mut self, crl: &X509CrlRef) -> Result<(), ErrorStack>; -} - -impl X509StoreExtension for X509StoreBuilderRef { - fn add_crl(&mut self, crl: &X509CrlRef) -> Result<(), ErrorStack> { - unsafe { - { - let r = ffi::X509_STORE_add_crl(self.as_ptr(), crl.as_ptr()); - if r <= 0 { - Err(ErrorStack::get()) - } else { - Ok(()) - } - } - } - } -} - -pub trait X509StoreContextExtension { - fn init_opt( - &mut self, - trust: &X509StoreRef, - cert: Option<&X509Ref>, - cert_chain: Option<&StackRef>, - with_context: F, - ) -> Result - where - F: FnOnce(&mut X509StoreContextRef) -> std::result::Result; - fn crls( - &mut self, - subj: &X509NameRef, - ) -> std::result::Result, ErrorStack>; -} - -impl X509StoreContextExtension for X509StoreContextRef { - fn init_opt( - &mut self, - trust: &X509StoreRef, - cert: Option<&X509Ref>, - cert_chain: Option<&StackRef>, - with_context: F, - ) -> Result - where - F: FnOnce(&mut X509StoreContextRef) -> std::result::Result, - { - struct Cleanup<'a>(&'a mut X509StoreContextRef); - - impl<'a> Drop for Cleanup<'a> { - fn drop(&mut self) { - unsafe { - openssl_sys::X509_STORE_CTX_cleanup(self.0.as_ptr()); - } - } - } - - unsafe { - { - let r = openssl_sys::X509_STORE_CTX_init( - self.as_ptr(), - trust.as_ptr(), - opt_to_ptr(cert), - opt_to_ptr(cert_chain), - ); - if r <= 0 { - Err(ErrorStack::get()) - } else { - Ok(r) - } - }?; - } - let cleanup = Cleanup(self); - with_context(cleanup.0) - } - /// Get all Certificate Revocation Lists with the subject currently stored - #[cfg(ossl110)] - fn crls( - &mut self, - subj: &X509NameRef, - ) -> std::result::Result, ErrorStack> { - unsafe { - { - let r = ffi::X509_STORE_CTX_get1_crls(self.as_ptr(), subj.as_ptr()); - if r.is_null() { - Err(ErrorStack::get()) - } else { - Ok(Stack::from_ptr(r)) - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/src/lib.rs s390-tools-2.33.1/rust/pv/openssl_extensions/src/lib.rs --- s390-tools-2.31.0/rust/pv/openssl_extensions/src/lib.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright IBM Corp. 2023 - -#![doc(hidden)] - -/// Extensions to the rust-openssl crate, that are not upstream yet -/// Upstreaming mostly work in progress -pub mod akid; -pub mod crl; -mod stackable_crl; - -/// Test if two CRLs are equal. -/// -/// relates to X509_CRL_match -/// (Upstream is missing that functionality) -pub fn x509_crl_eq(a: &openssl::x509::X509CrlRef, b: &openssl::x509::X509CrlRef) -> bool { - use foreign_types::ForeignTypeRef; - let cmp = unsafe { openssl_sys::X509_CRL_match(a.as_ptr(), b.as_ptr()) }; - cmp == 0 -} - -#[allow(dead_code)] -mod test_utils { - include!("../../src/test_utils.rs"); -} diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/src/stackable_crl.rs s390-tools-2.33.1/rust/pv/openssl_extensions/src/stackable_crl.rs --- s390-tools-2.31.0/rust/pv/openssl_extensions/src/stackable_crl.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/src/stackable_crl.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,142 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright IBM Corp. 2023 - -use std::{marker::PhantomData, ptr}; - -use foreign_types::{ForeignType, ForeignTypeRef}; -use openssl::{ - error::ErrorStack, - stack::Stackable, - x509::{X509Crl, X509CrlRef}, -}; -use openssl_sys::BIO_new_mem_buf; -use std::ffi::c_int; - -pub struct StackableX509Crl(*mut openssl_sys::X509_CRL); - -impl ForeignType for StackableX509Crl { - type CType = openssl_sys::X509_CRL; - type Ref = X509CrlRef; - unsafe fn from_ptr(ptr: *mut openssl_sys::X509_CRL) -> StackableX509Crl { - StackableX509Crl(ptr) - } - fn as_ptr(&self) -> *mut openssl_sys::X509_CRL { - self.0 - } -} -impl Drop for StackableX509Crl { - fn drop(&mut self) { - unsafe { (openssl_sys::X509_CRL_free)(self.0) } - } -} -impl ::std::ops::Deref for StackableX509Crl { - type Target = X509CrlRef; - fn deref(&self) -> &X509CrlRef { - unsafe { ForeignTypeRef::from_ptr(self.0) } - } -} -impl ::std::ops::DerefMut for StackableX509Crl { - fn deref_mut(&mut self) -> &mut X509CrlRef { - unsafe { ForeignTypeRef::from_ptr_mut(self.0) } - } -} -#[allow(clippy::explicit_auto_deref)] -impl ::std::borrow::Borrow for StackableX509Crl { - fn borrow(&self) -> &X509CrlRef { - &**self - } -} -#[allow(clippy::explicit_auto_deref)] -impl ::std::convert::AsRef for StackableX509Crl { - fn as_ref(&self) -> &X509CrlRef { - &**self - } -} - -impl Stackable for StackableX509Crl { - type StackType = openssl_sys::stack_st_X509_CRL; -} - -pub struct MemBioSlice<'a>(*mut openssl_sys::BIO, PhantomData<&'a [u8]>); -impl<'a> Drop for MemBioSlice<'a> { - fn drop(&mut self) { - unsafe { - openssl_sys::BIO_free_all(self.0); - } - } -} - -impl<'a> MemBioSlice<'a> { - pub fn new(buf: &'a [u8]) -> Result, ErrorStack> { - openssl_sys::init(); - - assert!(buf.len() <= c_int::max_value() as usize); - let bio = unsafe { - { - let r = BIO_new_mem_buf(buf.as_ptr() as *const _, buf.len() as c_int); - if r.is_null() { - Err(ErrorStack::get()) - } else { - Ok(r) - } - }? - }; - - Ok(MemBioSlice(bio, PhantomData)) - } - - pub fn as_ptr(&self) -> *mut openssl_sys::BIO { - self.0 - } -} - -impl StackableX509Crl { - pub fn stack_from_pem(pem: &[u8]) -> Result, ErrorStack> { - unsafe { - openssl_sys::init(); - let bio = MemBioSlice::new(pem)?; - - let mut crls = vec![]; - loop { - let r = openssl_sys::PEM_read_bio_X509_CRL( - bio.as_ptr(), - ptr::null_mut(), - None, - ptr::null_mut(), - ); - if r.is_null() { - let err = openssl_sys::ERR_peek_last_error(); - if openssl_sys::ERR_GET_LIB(err) as c_int == openssl_sys::ERR_LIB_PEM - && openssl_sys::ERR_GET_REASON(err) == openssl_sys::PEM_R_NO_START_LINE - { - openssl_sys::ERR_clear_error(); - break; - } - - return Err(ErrorStack::get()); - } else { - crls.push(X509Crl::from_ptr(r)); - } - } - - Ok(crls) - } - } -} -impl From for StackableX509Crl { - fn from(value: X509Crl) -> Self { - unsafe { - openssl_sys::X509_CRL_up_ref(value.as_ptr()); - StackableX509Crl::from_ptr(value.as_ptr()) - } - } -} -impl From for X509Crl { - fn from(value: StackableX509Crl) -> Self { - unsafe { - openssl_sys::X509_CRL_up_ref(value.as_ptr()); - X509Crl::from_ptr(value.as_ptr()) - } - } -} Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/der.crl and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/der.crl differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/der.crt and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/der.crt differ diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/fake_inter_ca.crl s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/fake_inter_ca.crl --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/fake_inter_ca.crl 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/fake_inter_ca.crl 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ ------BEGIN X509 CRL----- -MIIDITCCAQkCAQEwDQYJKoZIhvcNAQELBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD -VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD -VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTIzMDMxOTExMDQ0N1oYDzIzODcx -MjMxMTEwNDQ3WjAVMBMCAgG8Fw0yMzAzMjgxMTA0NDdaMA0GCSqGSIb3DQEBCwUA -A4ICAQA+KQrjx/6nKzPggDpKEAzH6XxhUU4CZmyUKFirUdenQBjYLag2Nono75o8 -9DVK3vuK7aeg4tIkUYOBcOEgYx0wEPU6PEE+yOO6KErn7qilN9gRnDHOTrfT0iNY -Rabyeat256gyfrsS84ZB7MnbhecWwC6sP2NiH18VCprH865X6sm5SxAfez1zITV3 -YVtudX4UczqbfpDgP4BU5ERMI71tqj4gKjaHFkC0TGizSphiINDKPmUMbd1w7FHg -Ilj+7pNZS377GCX9JzoTaKLuMBiblkwSTUJic7Z2BJZlTgm18hhfT3AZQDVvkq0A -AEPxQzm3be3ZUb+zJvueTeizVHkd3Eufnk69p7w4wNRQMwfj/icm27RZa3bBpF2o -esr2Ptik9ZBN81oMAakONZ4Wxuf0n/KBd6VBjy6WkbalKGVoZn70Nke3+9HGWSIh -bgbuHt7XAlvsgVChtZWsGsyxYw4p2ku4T2ajUfpxqQY1DDCAThweuHl2FND87Kr1 -5sblLLhA3QdjQ0EavsCV1646xorvoyw7YdHkqCPjRb1FsPWm/IePbtu+w9/VjDRF -KcHgBZBWmmQHj/9ykSI9pA5J7R7Nij6sX6Iu1g2yKiPnXeRQFiwhgsxslNk8eJfq -cK4c4HhnNtXa/c8jHcbymwqkF8Qltz0cbEW1usxZ2u6153pyPQ== ------END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/fake_inter_ca.crt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/fake_inter_ca.crt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/fake_inter_ca.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/fake_inter_ca.crt 1970-01-01 01:00:00.000000000 +0100 @@ -1,38 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGpjCCBI6gAwIBAgIUMp+RLATMshrQnbOfPkwKoDNyYhcwDQYJKoZIhvcNAQEL -BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz -MDMyOTA5MDQ0N1oYDzIzODcxMjMxMDkwNDQ3WjCBvTELMAkGA1UEBhMCVVMxNDAy -BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp -b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y -cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxHjAc -BgNVBAsMFUlCTSBaIEludGVybWVkaWF0ZSBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD -ggIPADCCAgoCggIBAK75zkJO8mpqPrD9vlSsfJgW7hbioQpuphSo1+3q9cAAWKFg -TYUcGBUNR/lUVdYqgzo3AglUwldWfeO9mBCIGNSN/heLFt1KzNutBsnE3YEeGKpM -7nHhMzh41otFdpZEZfrsGXGok07dy2mEV0mx72e9ALWbXFhxYsdWdSSVTlBH8xcd -38rAzfAiTbjgAUnTIdCPjAJKbXSDBXGXZ3+iuhFxNtSWyJr1AsxPzESErCPzUQjr -m8TM24lKq69zimTEkN4uwP5U8s2JPzbKosg2k24RbpDgkjO8iNK7RL9SMRUE8daP -+eru5EwN4BlZfsNpZDFbILxbt/2sxqmdsx/Nupa5ZAfcHRs88p4l1D3QIiZzaSEc -nCotM/kmnHWbgeJbkGbC9fD23dNJ29uqZU0fbRnG4HpSutrYD6lPg7PXnMt5tT+f -0+wQds38woXT9qW/kN/2WtkVYDhyVjxCgD8iHOZpz2LUmvJfi7Gz9B/DeW1dzgbo -cGxz9ee+R+T5KcKg+XvHD6slk82GrSM21b7zJeK92bJtjkqxBtQf+YgcKtOO7QX7 -37C1XvSHFnKvyyRJrldJFGEKfK2C66hdASHRdbUhWHFo1AA7VqzKB1fU9M2+ltUZ -zRpYD7X36OtRY1KsHHn+SVvsn404hWwgPblZ04nsMPanj+jsN//6M9r5lMezAgMB -AAGjgaEwgZ4wOwYDVR0fBDQwMjAwoC6gLIYqaHR0cDovLzEyNy4wLjAuMToxMjM0 -L2NybC9mYWtlX3Jvb3RfY2EuY3JsMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ -BAQDAgEGMB8GA1UdIwQYMBaAFNnX872GsEyiVMz7up1p/Nq/K+0VMB0GA1UdDgQW -BBT2zEtzeetYaOXAVw0Z9Pda1V9n1DANBgkqhkiG9w0BAQsFAAOCAgEAb7xPI93b -gqpXi4E/hAl7Vh354kKV+1smd1Omf7mQZbTrKBo6o1s0ZpXCbC9+WzK47R21VPNe -EgZ3uzTkURu4AX+5OnAMiWDFAGRSDt8ZXk6ZukWP7OmHsnLCu2strdhrvC4EhMF5 -G9VPIQsTx16CpprrVjzVzJg4i/X+U9dypvnAQeneyz4Ul/kPr6di8bOB3FiBeEDu -dOkVTbnlDa+wMQlCvqlFroFjEHZBKK/+PVIx9cJYj1grmzgqzm2FGUs5Wvcixpgb -uSHCQY9JP8Hy0xl3wx58VaymUK4EMfs612CfzOMClaiooDYuZgzVfhalU6g268nc -PQ9RCRJtJuda4mJ2H3Rag79sIiCVV31tE6tLXjOGebuO0vEB8wOjJc4YW1gtrZFy -GltT+HMdFgjO2c6HynpkmtqS8axQAw2hVaOpdJbDW+R0jHihO98FAgXR27TqTFp9 -sjBacfITeYRXGjQTDU1qxuEfoLnTZRIct1TjRTI1HBT8fl1exxgKUyEawH3MUCo7 -LsTPSNASKkJH3Rp29be9xTejUx2HUUwOE/DIF0HKaN+aAc8TR31/4HvC3bf5VPyD -wIwazpjVDZlQ2w+Wry1zNezNCPKiWtkfkj+TkT32h4ZfQEX8t0MUpKnL1LOSV/8h -PV357EenQb+f9DeC4BIbmipovLaUWmml10c= ------END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/clean.sh s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/clean.sh --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/clean.sh 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/clean.sh 1970-01-01 01:00:00.000000000 +0100 @@ -1,2 +0,0 @@ -#!/bin/bash -rm -f -- *.key *.crt *.crl diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/create_certs.py s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/create_certs.py --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/create_certs.py 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/create_certs.py 1970-01-01 01:00:00.000000000 +0100 @@ -1,733 +0,0 @@ -#!/bin/env python3 -import datetime -import os.path -from enum import Enum - -from cryptography import x509 -from cryptography.x509.oid import NameOID -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import serialization -from cryptography.hazmat.primitives.asymmetric import ec -from cryptography.hazmat.primitives.asymmetric import rsa - - -ONE_DAY = datetime.timedelta(1, 0, 0) - - -def createEcKeyPair(curve=ec.SECP521R1): - return ec.generate_private_key(curve=curve, backend=default_backend()) - - -def createRSAKeyPair(size=4096): - return rsa.generate_private_key( - public_exponent=65537, key_size=size, backend=default_backend() - ) - - -def createCRL(pkey, issuer, serial_numbers=None, last_update=None, next_update=None): - serial_numbers = [333] if serial_numbers is None else serial_numbers - builder = x509.CertificateRevocationListBuilder() - builder = builder.issuer_name(issuer) - last_update = last_update or datetime.datetime.today() - 10 * ONE_DAY - next_update = next_update or datetime.datetime.today() + 365 * 365 * ONE_DAY - builder = builder.last_update(last_update) - builder = builder.next_update(next_update) - for sn in serial_numbers: - revoked_cert = ( - x509.RevokedCertificateBuilder() - .serial_number(sn) - .revocation_date( - datetime.datetime.today() - ONE_DAY, - ) - .build(default_backend()) - ) - builder = builder.add_revoked_certificate(revoked_cert) - crl = builder.sign( - private_key=pkey, algorithm=hashes.SHA256(), backend=default_backend() - ) - return crl - - -def createRootCA(pkey, subject): - issuer = subject - ca = ( - x509.CertificateBuilder() - .subject_name(subject) - .issuer_name(issuer) - .public_key(pkey.public_key()) - .serial_number(x509.random_serial_number()) - .not_valid_before(datetime.datetime.utcnow()) - .not_valid_after( - datetime.datetime.utcnow() + datetime.timedelta(days=365 * 365) - ) - .add_extension( - x509.BasicConstraints(ca=True, path_length=None), - critical=True, - # Sign our certificate with our private key - ) - .add_extension( - x509.KeyUsage( - digital_signature=False, - key_encipherment=False, - content_commitment=False, - data_encipherment=False, - key_agreement=False, - encipher_only=False, - decipher_only=False, - key_cert_sign=True, - crl_sign=True, - ), - critical=True, - ) - .add_extension( - x509.SubjectKeyIdentifier.from_public_key(pkey.public_key()), - critical=False, - ) - .sign(pkey, hashes.SHA512(), default_backend()) - ) - return ca - - -class CertType(Enum): - ROOT_CA = 1 - INTER_CA = 2 - SIGNING_CERT = 3 - HOST_CERT = 4 - - -def createCert( - pkey, - subject, - crl_uri, - issuer_crt=None, - issuer_pkey=None, - t=CertType.ROOT_CA, - not_before=None, - not_after=None, - pub_key=None, -): - sha = hashes.SHA256 - not_before = not_before or datetime.datetime.utcnow() - not_after = not_after or datetime.datetime.utcnow() + datetime.timedelta( - days=365 * 365 - ) - crl_dp = None - if crl_uri is not None: - crl_dp = x509.DistributionPoint( - [x509.UniformResourceIdentifier(crl_uri)], - relative_name=None, - reasons=None, - crl_issuer=None, - ) - cert_builder = x509.CertificateBuilder().subject_name(subject) - if t == CertType.ROOT_CA: - cert_builder = cert_builder.issuer_name(subject) - issuer_pub_key = pkey.public_key() - else: - cert_builder = cert_builder.issuer_name(issuer_crt.subject) - issuer_pub_key = issuer_crt.public_key() - if pub_key is None: - pub_key = pkey.public_key() - - cert_builder = ( - cert_builder.public_key(pub_key) - .serial_number(x509.random_serial_number()) - .not_valid_before(not_before) - .not_valid_after(not_after) - ) - - if crl_dp is not None: - cert_builder = cert_builder.add_extension( - x509.CRLDistributionPoints([crl_dp]), - critical=False, - ) - - if t == CertType.ROOT_CA: - cert_builder = cert_builder.add_extension( - x509.BasicConstraints(ca=True, path_length=None), - critical=True, - ).add_extension( - x509.KeyUsage( - digital_signature=False, - key_encipherment=False, - content_commitment=False, - data_encipherment=False, - key_agreement=False, - encipher_only=False, - decipher_only=False, - key_cert_sign=True, - crl_sign=True, - ), - critical=True, - ) - elif t == CertType.INTER_CA: - cert_builder = cert_builder.add_extension( - x509.BasicConstraints(ca=True, path_length=None), - critical=True, - ).add_extension( - x509.KeyUsage( - digital_signature=False, - key_encipherment=False, - content_commitment=False, - data_encipherment=False, - key_agreement=False, - encipher_only=False, - decipher_only=False, - key_cert_sign=True, - crl_sign=True, - ), - critical=True, - ) - elif t == CertType.SIGNING_CERT: - cert_builder = ( - cert_builder.add_extension( - x509.BasicConstraints(ca=False, path_length=None), - critical=True, - ) - .add_extension( - x509.KeyUsage( - digital_signature=True, - key_encipherment=False, - content_commitment=False, - data_encipherment=False, - key_agreement=False, - encipher_only=False, - decipher_only=False, - key_cert_sign=False, - crl_sign=False, - ), - critical=True, - ) - .add_extension( - x509.ExtendedKeyUsage( - [x509.oid.ExtendedKeyUsageOID.CODE_SIGNING] - ), - critical=False, - ) - ) - else: - sha = hashes.SHA512 - cert_builder = cert_builder.add_extension( - x509.KeyUsage( - digital_signature=False, - key_encipherment=False, - content_commitment=False, - data_encipherment=False, - key_agreement=True, - encipher_only=False, - decipher_only=False, - key_cert_sign=False, - crl_sign=False, - ), - critical=True, - ) - - cert_builder = cert_builder.add_extension( - x509.AuthorityKeyIdentifier.from_issuer_public_key(issuer_pub_key), - critical=False, - ).add_extension( - x509.SubjectKeyIdentifier.from_public_key(pkey.public_key()), - critical=False, - ) - return cert_builder.sign(issuer_pkey, sha(), default_backend()) - - -def getPrivKey(path, create_priv_key): - pkey = None - if os.path.isfile(path): - with open(path, "rb") as f: - pkey = serialization.load_pem_private_key( - f.read(), password=None, backend=default_backend() - ) - if not pkey: - pkey = create_priv_key() - with open(path, "wb") as f: - f.write( - pkey.private_bytes( - serialization.Encoding.PEM, - format=serialization.PrivateFormat.PKCS8, - encryption_algorithm=serialization.NoEncryption(), - ) - ) - return pkey - - -if __name__ == "__main__": - MOCKUP_CRL_DIST = "http://127.0.0.1:1234/crl/" - - - # create root CA - root_ca_subject = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), - x509.NameAttribute( - NameOID.ORGANIZATION_NAME, - u"International Business Machines Corporation", - ), - x509.NameAttribute( - NameOID.COMMON_NAME, u"International Business Machines Corporation" - ), - x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), - x509.NameAttribute(NameOID.LOCALITY_NAME, u"Armonk"), - x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Root CA"), - ] - ) - root_ca_pkey = getPrivKey("root_ca.key", createRSAKeyPair) - root_ca_crl = createCRL(root_ca_pkey, root_ca_subject, [333]) - - root_ca_crt = createCert( - pkey=root_ca_pkey, - subject=root_ca_subject, - issuer_pkey=root_ca_pkey, - crl_uri=None, - t=CertType.ROOT_CA, - ) - - fake_root_ca_subject = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), - x509.NameAttribute( - NameOID.ORGANIZATION_NAME, - u"International Business Machines Corporation", - ), - x509.NameAttribute( - NameOID.COMMON_NAME, u"International Business Machines Corporation" - ), - x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), - x509.NameAttribute(NameOID.LOCALITY_NAME, u"Armonk"), - x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Root CA"), - ] - ) - fake_root_ca_pkey = getPrivKey("fake_root_ca.key", createRSAKeyPair) - fake_root_ca_crl = createCRL(fake_root_ca_pkey, fake_root_ca_subject, [333]) - fake_root_ca_valid_crl = createCRL(root_ca_pkey, fake_root_ca_subject, [333]) - fake_root_ca_crt = createCert( - pkey=fake_root_ca_pkey, - subject=fake_root_ca_subject, - issuer_pkey=fake_root_ca_pkey, - crl_uri=None, - t=CertType.ROOT_CA, - ) - - fake_root_ca_crt = createRootCA(fake_root_ca_pkey, fake_root_ca_subject) - - # create intermediate CA - inter_ca_pkey = getPrivKey("inter_ca.key", createRSAKeyPair) - inter_ca_subject = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), - x509.NameAttribute( - NameOID.ORGANIZATION_NAME, - u"International Business Machines Corporation", - ), - x509.NameAttribute( - NameOID.COMMON_NAME, u"International Business Machines Corporation" - ), - x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), - x509.NameAttribute(NameOID.LOCALITY_NAME, u"Armonk"), - x509.NameAttribute( - NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Intermediate CA" - ), - ] - ) - - inter_ca_crt = createCert( - pkey=inter_ca_pkey, - subject=inter_ca_subject, - issuer_crt=root_ca_crt, - issuer_pkey=root_ca_pkey, - crl_uri=MOCKUP_CRL_DIST + "root_ca.crl", - t=CertType.INTER_CA, - ) - - fake_inter_ca_pkey = getPrivKey("fake_inter_ca.key", createRSAKeyPair) - fake_inter_ca_crt = createCert( - pkey=fake_inter_ca_pkey, - subject=inter_ca_subject, - issuer_crt=fake_root_ca_crt, - issuer_pkey=fake_root_ca_pkey, - crl_uri=MOCKUP_CRL_DIST + "fake_root_ca.crl", - t=CertType.INTER_CA, - ) - fake_inter_ca_crl = createCRL(fake_inter_ca_pkey, inter_ca_subject, [444]) - - # create ibm certificate - ibm_pkey = getPrivKey("ibm.key", createRSAKeyPair) - ibm_subject = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), - x509.NameAttribute( - NameOID.ORGANIZATION_NAME, - u"International Business Machines Corporation", - ), - x509.NameAttribute( - NameOID.COMMON_NAME, u"International Business Machines Corporation" - ), - x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), - x509.NameAttribute(NameOID.LOCALITY_NAME, u"Poughkeepsie"), - x509.NameAttribute( - NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Host Key Signing Service" - ), - ] - ) - ibm_crt = createCert( - pkey=ibm_pkey, - subject=ibm_subject, - issuer_crt=inter_ca_crt, - issuer_pkey=inter_ca_pkey, - crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", - t=CertType.SIGNING_CERT, - ) - ibm_expired_crt = createCert( - pkey=ibm_pkey, - subject=ibm_subject, - issuer_crt=inter_ca_crt, - issuer_pkey=inter_ca_pkey, - crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", - t=CertType.SIGNING_CERT, - not_before=datetime.datetime.today() - 2 * 365 * ONE_DAY, - not_after=datetime.datetime.today() - 1 * 365 * ONE_DAY, - ) - - #create revoked ibm certificate - ibm_rev_pkey = getPrivKey("ibm.key", createRSAKeyPair) - ibm_rev_crt = createCert( - pkey=ibm_rev_pkey, - subject=ibm_subject, - issuer_crt=inter_ca_crt, - issuer_pkey=inter_ca_pkey, - crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", - t=CertType.SIGNING_CERT, - ) - - # create inter CLRs - inter_ca_crl = createCRL(inter_ca_pkey, inter_ca_subject, [444, ibm_rev_crt.serial_number]) - inter_ca_invalid_signer_crl = createCRL(root_ca_pkey, inter_ca_subject, [444]) - inter_ca_invalid_date_crl = createCRL( - inter_ca_pkey, - inter_ca_subject, - [444], - last_update=datetime.datetime.today() - 2 * ONE_DAY, - next_update=datetime.datetime.today() - 1 * ONE_DAY, - ) - - # create signing key using wrong OU in subject - ibm_wrong_subject_pkey = getPrivKey("ibm_wrong_subject.key", createRSAKeyPair) - ibm_wrong_subject_subject = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), - x509.NameAttribute( - NameOID.ORGANIZATION_NAME, - u"International Business Machines Corporation", - ), - x509.NameAttribute( - NameOID.COMMON_NAME, u"International Business Machines Corporation" - ), - x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), - x509.NameAttribute(NameOID.LOCALITY_NAME, u"Poughkeepsie"), - x509.NameAttribute( - NameOID.ORGANIZATIONAL_UNIT_NAME, u"Key Signing Service Invalid" - ), - ] - ) - ibm_wrong_subject_crl = createCRL(ibm_wrong_subject_pkey, ibm_wrong_subject_subject, [555]) - ibm_wrong_subject_crt = createCert( - pkey=ibm_wrong_subject_pkey, - subject=ibm_wrong_subject_subject, - issuer_crt=inter_ca_crt, - issuer_pkey=inter_ca_pkey, - crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", - t=CertType.SIGNING_CERT, - ) - - fake_ibm_pkey = getPrivKey("fake_ibm.key", createRSAKeyPair) - fake_ibm_subject = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), - x509.NameAttribute( - NameOID.ORGANIZATION_NAME, - u"International Business Machines Corporation", - ), - x509.NameAttribute( - NameOID.COMMON_NAME, u"International Business Machines Corporation" - ), - x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), - x509.NameAttribute(NameOID.LOCALITY_NAME, u"Poughkeepsie"), - x509.NameAttribute( - NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Host Key Signing Service" - ), - ] - ) - fake_ibm_crl = createCRL(fake_ibm_pkey, fake_ibm_subject, [555]) - fake_ibm_crt = createCert( - pkey=fake_ibm_pkey, - subject=fake_ibm_subject, - issuer_crt=fake_root_ca_crt, - issuer_pkey=fake_root_ca_pkey, - crl_uri=MOCKUP_CRL_DIST + "fake_root_ca.crl", - t=CertType.SIGNING_CERT, - ) - - - def host_subj(): - return x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), - x509.NameAttribute( - NameOID.ORGANIZATION_NAME, - u"International Business Machines Corporation", - ), - x509.NameAttribute( - NameOID.COMMON_NAME, u"International Business Machines Corporation" - ), - x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), - x509.NameAttribute(NameOID.LOCALITY_NAME, u"Armonk"), - x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Host Key"), - ] - ) - - - # create host certificate - host_pkey = getPrivKey("host.key", createEcKeyPair) - host_subject = host_subj() - host_crt = createCert( - pkey=host_pkey, - subject=host_subject, - issuer_crt=ibm_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "ibm.crl", - t=CertType.HOST_CERT, - ) - host_crt_expired = createCert( - pkey=host_pkey, - subject=host_subject, - issuer_crt=ibm_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "ibm.crl", - t=CertType.HOST_CERT, - not_before=datetime.datetime.today() - 2 * 365 * ONE_DAY, - not_after=datetime.datetime.today() - 1 * 365 * ONE_DAY, - ) - host_uri_na_crt = createCert( - pkey=host_pkey, - subject=host_subject, - issuer_crt=ibm_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "notavailable", - t=CertType.HOST_CERT, - ) - - host_pkey = getPrivKey("host.key", createEcKeyPair) - host_subject = host_subj() - host_crt = createCert( - pkey=host_pkey, - subject=host_subject, - issuer_crt=ibm_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "ibm.crl", - t=CertType.HOST_CERT, - ) - - host_rev_pkey = getPrivKey("host_rev.key", createEcKeyPair) - host_rev_subject = host_subj() - host_rev_crt = createCert( - pkey=host_rev_pkey, - subject=host_rev_subject, - issuer_crt=ibm_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "ibm.crl", - t=CertType.HOST_CERT, - ) - - # some IBM revocation lists - ibm_crl = createCRL(ibm_pkey, ibm_subject, [555, host_rev_crt.serial_number]) - ibm_outdated_early_crl = createCRL( - ibm_pkey, - ibm_subject, - [], - last_update=datetime.datetime.today() + 1000 * 365 * ONE_DAY, - next_update=datetime.datetime.today() + 1001 * 365 * ONE_DAY, - ) - ibm_outdated_late_crl = createCRL( - ibm_pkey, - ibm_subject, - [], - last_update=datetime.datetime.today() - 2 * 365 * ONE_DAY, - next_update=datetime.datetime.today() - 1 * 365 * ONE_DAY, - ) - ibm_wrong_issuer_crl = createCRL(ibm_pkey, inter_ca_subject, []) - ibm_invalid_hash_crl = createCRL( - inter_ca_pkey, ibm_subject, [555, host_crt.serial_number] - ) - - # create host certificate issued by a non-valid signing key - host_invalid_signing_key_pkey = getPrivKey("host_invalid_signing_key.key", createEcKeyPair) - host_invalid_signing_key_subject = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), - x509.NameAttribute( - NameOID.ORGANIZATION_NAME, - u"International Business Machines Corporation", - ), - x509.NameAttribute( - NameOID.COMMON_NAME, u"International Business Machines Corporation" - ), - x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), - x509.NameAttribute(NameOID.LOCALITY_NAME, u"Armonk"), - x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Host Key"), - ] - ) - host_invalid_signing_key_crt = createCert( - pkey=host_invalid_signing_key_pkey, - subject=host_invalid_signing_key_subject, - issuer_crt=ibm_wrong_subject_crt, - issuer_pkey=ibm_wrong_subject_pkey, - crl_uri=MOCKUP_CRL_DIST + "ibm_wrong_subject.crl", - t=CertType.HOST_CERT, - ) - - host2_pkey = getPrivKey("host2.key", createEcKeyPair) - host2_subject = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), - x509.NameAttribute( - NameOID.ORGANIZATION_NAME, - u"International Business Machines Corporation", - ), - x509.NameAttribute( - NameOID.COMMON_NAME, u"International Business Machines Corporation" - ), - x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), - x509.NameAttribute(NameOID.LOCALITY_NAME, u"Armonk"), - x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Host Key"), - ] - ) - host2_crt = createCert( - pkey=host2_pkey, - subject=host2_subject, - issuer_crt=ibm_crt, - issuer_pkey=ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "ibm.crl", - t=CertType.HOST_CERT, - ) - - - fake_host_pkey = getPrivKey("fake_host.key", createEcKeyPair) - fake_host_subject = x509.Name( - [ - x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), - x509.NameAttribute( - NameOID.ORGANIZATION_NAME, - u"International Business Machines Corporation", - ), - x509.NameAttribute( - NameOID.COMMON_NAME, u"International Business Machines Corporation" - ), - x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), - x509.NameAttribute(NameOID.LOCALITY_NAME, u"Armonk"), - x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Host Key"), - ] - ) - fake_host_crt = createCert( - pkey=fake_host_pkey, - subject=fake_host_subject, - issuer_crt=fake_ibm_crt, - issuer_pkey=fake_ibm_pkey, - crl_uri=MOCKUP_CRL_DIST + "fake_ibm.crt", - t=CertType.HOST_CERT, - ) - #TODO DER chain - - # store CA - with open("root_ca.crt", "wb") as f: - f.write(root_ca_crt.public_bytes(serialization.Encoding.PEM)) - with open("root_ca.crl", "wb") as f: - f.write(root_ca_crl.public_bytes(serialization.Encoding.PEM)) - with open("root_ca.chained.crt", "wb") as f: - f.write(root_ca_crt.public_bytes(serialization.Encoding.PEM)) - f.write(root_ca_crl.public_bytes(serialization.Encoding.PEM)) - - with open("fake_root_ca.crt", "wb") as f: - f.write(fake_root_ca_crt.public_bytes(serialization.Encoding.PEM)) - with open("fake_root_ca.crl", "wb") as f: - f.write(fake_root_ca_crl.public_bytes(serialization.Encoding.PEM)) - with open("fake_root_ca_valid.crl", "wb") as f: - f.write(fake_root_ca_valid_crl.public_bytes(serialization.Encoding.PEM)) - - with open("inter_ca.crt", "wb") as f: - f.write(inter_ca_crt.public_bytes(serialization.Encoding.PEM)) - with open("inter_ca.crl", "wb") as f: - f.write(inter_ca_crl.public_bytes(serialization.Encoding.PEM)) - with open("inter_ca.invalid_date.crl", "wb") as f: - f.write(inter_ca_invalid_date_crl.public_bytes(serialization.Encoding.PEM)) - with open("inter_ca.invalid_signer.crl", "wb") as f: - f.write(inter_ca_invalid_signer_crl.public_bytes(serialization.Encoding.PEM)) - with open("inter_ca.chained.crt", "wb") as f: - f.write(inter_ca_crt.public_bytes(serialization.Encoding.PEM)) - f.write(inter_ca_crl.public_bytes(serialization.Encoding.PEM)) - with open("fake_inter_ca.crt", "wb") as f: - f.write(fake_inter_ca_crt.public_bytes(serialization.Encoding.PEM)) - with open("fake_inter_ca.crl", "wb") as f: - f.write(fake_inter_ca_crl.public_bytes(serialization.Encoding.PEM)) - - # store IBM - with open("ibm.crt", "wb") as f: - f.write(ibm_crt.public_bytes(serialization.Encoding.PEM)) - with open("ibm_rev.crt", "wb") as f: - f.write(ibm_rev_crt.public_bytes(serialization.Encoding.PEM)) - with open("ibm_expired.crt", "wb") as f: - f.write(ibm_expired_crt.public_bytes(serialization.Encoding.PEM)) - with open("ibm.crl", "wb") as f: - f.write(ibm_crl.public_bytes(serialization.Encoding.PEM)) - with open("ibm.chained.crt", "wb") as f: - f.write(ibm_crl.public_bytes(serialization.Encoding.PEM)) - f.write(ibm_crt.public_bytes(serialization.Encoding.PEM)) - with open("ibm_outdated_early.crl", "wb") as f: - f.write(ibm_outdated_early_crl.public_bytes(serialization.Encoding.PEM)) - with open("ibm_outdated_late.crl", "wb") as f: - f.write(ibm_outdated_late_crl.public_bytes(serialization.Encoding.PEM)) - with open("ibm_wrong_issuer.crl", "wb") as f: - f.write(ibm_wrong_issuer_crl.public_bytes(serialization.Encoding.PEM)) - with open("ibm_invalid_hash.crl", "wb") as f: - f.write(ibm_invalid_hash_crl.public_bytes(serialization.Encoding.PEM)) - with open("ibm_wrong_subject.crt", "wb") as f: - f.write(ibm_wrong_subject_crt.public_bytes(serialization.Encoding.PEM)) - with open("ibm_wrong_subject.crl", "wb") as f: - f.write(ibm_wrong_subject_crl.public_bytes(serialization.Encoding.PEM)) - - with open("fake_ibm.crt", "wb") as f: - f.write(fake_ibm_crt.public_bytes(serialization.Encoding.PEM)) - with open("fake_ibm.crl", "wb") as f: - f.write(fake_ibm_crl.public_bytes(serialization.Encoding.PEM)) - - # store host - with open("host.crt", "wb") as f: - f.write(host_crt.public_bytes(serialization.Encoding.PEM)) - with open("host_uri_na.crt", "wb") as f: - f.write(host_uri_na_crt.public_bytes(serialization.Encoding.PEM)) - - # store host key issued by a signing key using the wrong subject OU - with open("host_invalid_signing_key.crt", "wb") as f: - f.write(host_invalid_signing_key_crt.public_bytes(serialization.Encoding.PEM)) - - # store revoked host - with open("host_rev.crt", "wb") as f: - f.write(host_rev_crt.public_bytes(serialization.Encoding.PEM)) - - # store host2 - with open("host2.crt", "wb") as f: - f.write(host2_crt.public_bytes(serialization.Encoding.PEM)) - - # store fake host - with open("fake_host.crt", "wb") as f: - f.write(fake_host_crt.public_bytes(serialization.Encoding.PEM)) - - with open("host_crt_expired.crt", "wb") as f: - f.write(host_crt_expired.public_bytes(serialization.Encoding.PEM)) - - # store a DER cert and crl - with open("der.crt", "wb") as f: - f.write(ibm_crt.public_bytes(serialization.Encoding.DER)) - with open("der.crl", "wb") as f: - f.write(ibm_crl.public_bytes(serialization.Encoding.DER)) diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/create_certs.sh s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/create_certs.sh --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/create_certs.sh 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/create_certs.sh 1970-01-01 01:00:00.000000000 +0100 @@ -1,17 +0,0 @@ -#!/bin/bash -if [ $# -eq 0 ]; then - path="." -else - path="$1" -fi - -if test -f "${path}"/host.crt; then - exit 0 -fi - -python -m venv "${path}"/gen_venv -source "${path}"/gen_venv/bin/activate -pip3 install -r "${path}"/requirements.txt -cd "${path}" || exit 2 -python3 ./create_certs.py -deactivate diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/.gitignore s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/.gitignore --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/.gitignore 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/.gitignore 1970-01-01 01:00:00.000000000 +0100 @@ -1,4 +0,0 @@ -*.key -*.crt -*.crl -gen_venv/* diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/Makefile s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/Makefile --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,7 +0,0 @@ -cert: clean - bash create_certs.sh - -clean: - bash clean.sh - -.PHONY: clean cert diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/requirements.txt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/requirements.txt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/gen/requirements.txt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/gen/requirements.txt 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -cryptography>=39.0.0 diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/host.crt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/host.crt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/host.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/host.crt 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFFDCCAvygAwIBAgIUO+SNGBgRvkmR/lKiZhiN4l4zS70wDQYJKoZIhvcNAQEN -BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl -eSBTaWduaW5nIFNlcnZpY2UwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0 -NDhaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp -bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h -bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv -cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw -EAYHKoZIzj0CAQYFK4EEACMDgYYABAF2iBzYS4tt5xI8fpAO33jn97aEmZFGsSY7 -qLhshEfwirbUacKxKO2eHDUBWAWs09MCM9ORIvfi+KocKxR7eIO0BgDDRVbCXPkv -Kc5mvUQRTWt7PHjhOj+QvbkAQPaHblJi93imGpN9GzSALrJX404Gct1fKjR63aoo -IDUJKnDDXC55c6OBhzCBhDAyBgNVHR8EKzApMCegJaAjhiFodHRwOi8vMTI3LjAu -MC4xOjEyMzQvY3JsL2libS5jcmwwDgYDVR0PAQH/BAQDAgMIMB8GA1UdIwQYMBaA -FN6M1/Do0NgBB3F9b9loWSA+sGd8MB0GA1UdDgQWBBR6ESQ+J+iXQe0a/KZ2+N+Z -8a9SZjANBgkqhkiG9w0BAQ0FAAOCAgEAVWJHoCxxkqh7b+y93PY4KPy4jJC4gN1S -dAPqttD/yteJ/4mbVel1/KNSoQBk5EpJmRqeBwHCgGJaT/TxYXNw/b8mRPxe/xbb -wieZMqlSmH028UjYDku1eM0IgHISgoCesIR95D5iAOWbMMVUwIHIHTfmhK7DmZVe -SPf7RIkctrpYxZh0Gw8KLZO6Mfy/9tq3dps0A7KS6jjdrF+M9LavPGwFvtfvRMTi -rdteByO2saGAKDvrjtievwlWCNBJlKV1arW9krN7eqJY5YO6eRbX6UjuhbPRgjte -eZ4jL121TBJaKZU7Q/lvYHIWfzstwQdiem2Ua1GyiiEvPZrQlmqQ3gDBtwJQGB4Q -2myP7MY7THiKObjaB8qRsVxKM78ktwAtAYSZv7gZlmSJ/uTMzDV5D2TQxqs7zwCj -sV+psUn4nvh58xP+DW+MYbF/Cpmzvul9FjMKBs270vE1q+gMot27rbQHRRJ4lVN5 -khiG6Oi6blVkPKExIVIiaZ9diXK6NhtWp15PWljNiDxZO+zpkeuw7cKLn/idzmvP -Gcj6m7DqcdsSIHNKbR5iM2VuDhg/j8uBD3uF2Wlymp31TBQgdYWSihJpwZKHNqJD -uq9SmegwI5gYg64KwABZM9hGbl/krXt/0CeCR5HRc+fthanKx/tO2tbCuVT3FR5J -XpKNUy1D78o= ------END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/host_crt_expired.crt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/host_crt_expired.crt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/host_crt_expired.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/host_crt_expired.crt 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFEjCCAvqgAwIBAgIUMXh4o6xcPRTKpYDr+YgZnmWeatMwDQYJKoZIhvcNAQEN -BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl -eSBTaWduaW5nIFNlcnZpY2UwHhcNMjEwMzI5MTEwNDQ4WhcNMjIwMzI5MTEwNDQ4 -WjCBtjELMAkGA1UEBhMCVVMxNDAyBgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5l -c3MgTWFjaGluZXMgQ29ycG9yYXRpb24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwg -QnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3Jr -MQ8wDQYDVQQHDAZBcm1vbmsxFzAVBgNVBAsMDklCTSBaIEhvc3QgS2V5MIGbMBAG -ByqGSM49AgEGBSuBBAAjA4GGAAQBdogc2EuLbecSPH6QDt945/e2hJmRRrEmO6i4 -bIRH8Iq21GnCsSjtnhw1AVgFrNPTAjPTkSL34viqHCsUe3iDtAYAw0VWwlz5LynO -Zr1EEU1rezx44To/kL25AED2h25SYvd4phqTfRs0gC6yV+NOBnLdXyo0et2qKCA1 -CSpww1wueXOjgYcwgYQwMgYDVR0fBCswKTAnoCWgI4YhaHR0cDovLzEyNy4wLjAu -MToxMjM0L2NybC9pYm0uY3JsMA4GA1UdDwEB/wQEAwIDCDAfBgNVHSMEGDAWgBTe -jNfw6NDYAQdxfW/ZaFkgPrBnfDAdBgNVHQ4EFgQUehEkPifol0HtGvymdvjfmfGv -UmYwDQYJKoZIhvcNAQENBQADggIBAEsSd5vd7Vk1y1YsE4eWkrBrMElYa3/O6G2Q -oMZFo2mzzDH50NBEwYG4K+SjEmqJbAErtNHsAcJLWlvORiNoBmPcB6FEMifgCvuZ -zbSEiL/tt8XLI1M04DdKjVZ6AIrdhMKPz/AaRycnlHjbq0R0fEJP/SnWxtGnHewB -QGM8TDGCzXrwXsOr50soxQ+cbXFJ6eQyGrtNP0eyJ7kkIrz6+SJ0dQPXxoZpdtfY -XEv1OagX0tAuDUG26do6MjwC1qiDKoLdkxSFRkCvyRHqFapKlLhzBMrLhQ+Hl6E/ -kD5ORD2nMvTHcHWbjb7Mr6tcxKG+7CcJO0hYJbdfNCcKYc3EmE49wazSTBKvWfJp -XObVEGeM/11cdcg6Li1jw/JrrexEeQpjgoNuAgGKRmxzJBOCPNkU8jGs5QEqFCyw -fpl7BA+ydW2/zAvcr7mZZgyK4KiRTdK5VTGfXuwTv+Q3hsE0CZ6L+byNCZajyGzs -xq9ydh2G4kIlWFzs+2gSxQWYRiOGt6W7FVdiPYOnAVgzmRJdfR1qVrWZTGbPxJbX -1O3qYBPQE2tU8xsyl/HuikGProda2xwfTjmRhr7DPYyF75nGPvtGm6vwBBcWm+xl -jI0a/dHqE5MR6acOrXYNSFflPcfd04vJ87Ajx/wFr0Glo/8LWtzMp0nFpvif9LDX -3sGEtRA1 ------END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/host_invalid_signing_key.crt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/host_invalid_signing_key.crt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/host_invalid_signing_key.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/host_invalid_signing_key.crt 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFHzCCAwegAwIBAgIUBphLhhHJL2K96bK/cfgBk0d+7gkwDQYJKoZIhvcNAQEN -BQAwgckxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMSQwIgYDVQQLDBtLZXkgU2lnbmluZyBT -ZXJ2aWNlIEludmFsaWQwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0NDha -MIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVz -cyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBC -dXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsx -DzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZswEAYH -KoZIzj0CAQYFK4EEACMDgYYABAF709fZwb3pGZWfHLcCG2fLNcvNh6IqPHwfjRYp -brM5BEE8XoXxcCAXwxo4EAGOcuZBRKP6Ofahek7ppizV8bkPMQDCIgNq2N8mwPFO -99CbyrZs4ZMeAWWWPJiHXUHbWQq4ko8w4UUT7nUIgdHQwWM6TdZml8ke+LE9Jxtx -h9KkfCPi/aOBlTCBkjBABgNVHR8EOTA3MDWgM6Axhi9odHRwOi8vMTI3LjAuMC4x -OjEyMzQvY3JsL2libV93cm9uZ19zdWJqZWN0LmNybDAOBgNVHQ8BAf8EBAMCAwgw -HwYDVR0jBBgwFoAU+jIyiVonTYe6GuSPiAkxA65qou0wHQYDVR0OBBYEFJdEECIf -7UdEf08saThHZDzSfxjhMA0GCSqGSIb3DQEBDQUAA4ICAQBgvREPqqKvAZM3q9pG -5S6wtUspz1Y1sBD4duPEnMZ7Vf9a1HRPrR4vc5ncFIcyS/U/UusvWgFMYoa6WIZR -l4OqRplKF1pwCaQ2F/8OdGMV37iUqZuN6V/GggbFXgMFK1dH29T6h4VtoKC9yScQ -ToHQLuz4ymkd2BwxYix19M6QwdrqomjJb2/zrc7pvMZ0k8KKYi/wt6tlz7FDvsxF -VSDf29gm98kfDJfzPfAC5D93YruAohsP8SakVdA2/YbTkDfImT8ggSnsE83upSD6 -ssjKPPNRunLeCKLb55/Ikcok1iyGhfdmkJvdIHSEvyNp0p7mrohz6l748xdKkKNt -9hOzsfNjThq3zp97ND7M+knqNuzsZIkcV/OUdxNBootIrJXvfeqpaw++5SfWvf+6 -1dHJQpDU3cXKAQ0/RvvqLC+aPvklk2efuvBKIKP9X4WqcP+l2P19GMaM1SZtr5S5 -OWMxqT6sW5lSX5Smm4rMB3UmLDS0SXxHIIvFEQSABiWb6Y0ibDj8a+YrWVyQNH1K -fCSNvsW1r03D06q6gp9fxL1hFBLUw9ooi7ewPfNmm3doe2R0TQpE0pWkiRhhjDjJ -RcIPp+gfYDtt0LcoYVpNdKtLoRPVtO2K3zeU0ezW4PL9Q0tjxh2K/+1+8UEv7nJD -sFg4nQMeygdSK4w0ZlT8xqXVJw== ------END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/host_rev.crt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/host_rev.crt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/host_rev.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/host_rev.crt 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFFDCCAvygAwIBAgIUGuqbEx5X1pFQfF4T6kIfdcrUwbMwDQYJKoZIhvcNAQEN -BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl -eSBTaWduaW5nIFNlcnZpY2UwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0 -NDhaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp -bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h -bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv -cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw -EAYHKoZIzj0CAQYFK4EEACMDgYYABABphgYAOqZ4uJPUtVIqaU7UsJgz9+xMmGDq -V7nFimGmkbmqPLT96jIyN4CWdLzfbxP0xvIklkEoOm1xV07YR2LHHQATNEPISTDH -7fySZ47QVc5tyECpZVW7JvSvWKA/KiApAbB1ixErfDrqW1nG5IUtEbDYJgtGwPI/ -+I7e9cU2wkA5mKOBhzCBhDAyBgNVHR8EKzApMCegJaAjhiFodHRwOi8vMTI3LjAu -MC4xOjEyMzQvY3JsL2libS5jcmwwDgYDVR0PAQH/BAQDAgMIMB8GA1UdIwQYMBaA -FN6M1/Do0NgBB3F9b9loWSA+sGd8MB0GA1UdDgQWBBTECeFXnyfTnFazbR0K9cYk -lS3O0jANBgkqhkiG9w0BAQ0FAAOCAgEAOjEE4/KdvcJZbloSGLue27FSrhExvUJ3 -tYS3rs2xg3Ua2daCioI00VrwIN2Fjisqvi10Nv6+NWz5w1220AJyjlmPxvWFcPco -sXLAOWDhi217JaoJ+RzavpOwzhTffEpvPwR6RU4A36vvonc4jm3mWs6F1i5T6YPi -ZaYuk3CRme6WX012rBhIc+heTGh5ZDwwPmGDMLXdpsu+2+3sCPxUW6eQcOWoXkhJ -jn+n6mU18JdN5+wU6Lig5uxXnoP1VN8Xog/mmKV4ThVAS8k9iS5wFK4jl27n6XZy -wfd65WWlm4MMEvKNruj025aSPJp/bcnchBNfnXXPuI5GnYS2cC3TXHd4XT3r3pYn -qgLoNi+AkxnTnxEv5lg+oE+yTMNxDh4iiYkX96ljamGbUPBvbD7bi3Oc/7EOVra1 -BmCGmcjToEnm0e0it9yyuYwKQ6nTz906W3XzaFB+awnXZcGEMgwwdvdal8+eki/r -ofT0nO6cuXbbPYc0rSs/2f3WxjDVmQtiVoJ1dPYIrTX/4lbrsYWVS016Y0UcZ/D/ -/qWWWZIyYpzPasTEgGqtwb5WhBvz3RAFTePefFlTzBHhSPk0Tsu4+W7zHpapfZ0M -0LXT1lGR5WBmur002vbTm4yt7tzdypMbL2i70WGEp4mpRohmBG1m9hYGcelip3iL -Im6vFBNWpVE= ------END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm.crl s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm.crl --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm.crl 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm.crl 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ ------BEGIN X509 CRL----- -MIIDVzCCAT8CAQEwDQYJKoZIhvcNAQELBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD -VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll -MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTIzMDMx -OTExMDQ0OFoYDzIzODcxMjMxMTEwNDQ4WjA8MBMCAgIrFw0yMzAzMjgxMTA0NDha -MCUCFBrqmxMeV9aRUHxeE+pCH3XK1MGzFw0yMzAzMjgxMTA0NDhaMA0GCSqGSIb3 -DQEBCwUAA4ICAQCGdm0ls5MXM6MUI0wR7qOitKh3TIfRnCvhSibVPskjlBZaBT01 -F6xaQGyWVR19IzQNn9GxOGMqvRy/oSihznBeA0+e9497IOPXKop/JsypZR101539 -ntVt691ncmctxKnb2nT4dw7AuiLTxMVzdJ/ouXovnPcgSv/r8lwBo1fXxOgQlQLE -Pi126WFkkgBK7EANnAXiXVWvdM6p67jl/AQGOVHp8MeXowejDdVqKzoU6yyMRDeE -uEU4QibvH/J8VPLC/A2oh4XTZbJ5rB6u3rz2fFGI03XqSrJJHbNenGVQ2ar5qJeI -6kHNDIuuwXN+7JPFf8JXdk8L0G88rQsnjrcm0GzQPW/nZ5bN3FA1V139rdOhSBLR -QgaKzju8Le/Zem317ykOJbC6nDBORmpBVzXYdXA9RMg4PIs3kRVqp/RMiiClz42z -w8c1khmcH6FO2Q5Z40vq8tmSLhbu6PgGIPIya/OQacgDjDiDGcWGvqzVCWv/6AoL -em7b5Piu4yznVkEUA2h3LvoigYTJCgHFrQnoIcuM8vx8QkDjXxSHeuy3wTd2l67S -pZp+jSJPdqWe2PWALJrYuq736E2rZ013eLybHKYOkoJP6ZLewh4gsomO0bpxTL1U -TjPsJncaAP/gLqHi0QD4+irMlo6Q9YpEIkbp9ScEoVMHRL9A/vBQfUJfZw== ------END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm.crt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm.crt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm.crt 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGyzCCBLOgAwIBAgIUeGuWhNwpt9CPzFJ5UJAKfkIlLDcwDQYJKoZIhvcNAQEL -BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg -Q0EwIBcNMjMwMzI5MDkwNDQ3WhgPMjM4NzEyMzEwOTA0NDdaMIHMMQswCQYDVQQG -EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD -b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo -aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxFTATBgNVBAcMDFBv -dWdoa2VlcHNpZTEnMCUGA1UECwweSUJNIFogSG9zdCBLZXkgU2lnbmluZyBTZXJ2 -aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsXm4EgSxVmuNsKgo -sfJNxNQFkdbpCxxyMOuyBFyGKxSwiGl20VWB6itsG+b9NOmAI6D6FPvLPuB/gSq0 -HR5FuSIkmp9AUx6Xi6lVA2L7qdINo1OA4nJKe5hLSHfjWn36DZFrDyoODlDzLJrS -O4h4QSP1nDSFhk1ilsR1czlvKreXkJw5FsUHRhZGG6idLoK2Ibrne6WsVhPG6WNA -abuvQFv40cQWcyRoePktG8ImnNF5GDewXwtpzARSQj9jTL/gE77DH78a77J8+H+h -d5guiBbPZK8gNkPe0CjD0B7tUx/+sByoaSeQffFG4Cnu5JJDqFq9LXPjtlG2nXSB -Qh6Sc2gzP3jKV+ZcSwQw9AxEQ0ZB/VIPsR8KZKReDDztEpcKxwxlh7bEix/0Y1sv -zT7/I+m5kufdDG6j3hHHzniKXL4b/WyedSfdnVqIJw82FgPFgyIY6F/0ccLdkhAm -fLtQJBHc3UyK13l0qVJxhAFdz1Q0zfScBS6qM/Gnbcdc6MY9/bZdIK7E+4op5iAM -kvQHap7qnArhI8VQ1bcXYlQ6asPj4e10lmzroiBHM4N/Yuxv38tmtUCudSB+EcXi -EgJIOLmLq2ZACRzug9KPyMXOD/Yxz2wPgs6I3gvrB22w/MfC77wMfEMuQQk4cZel -TFKosHgLjvcLG+zx5yh5ZVFcNV0CAwEAAaOBrzCBrDA3BgNVHR8EMDAuMCygKqAo -hiZodHRwOi8vMTI3LjAuMC4xOjEyMzQvY3JsL2ludGVyX2NhLmNybDAMBgNVHRMB -Af8EAjAAMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAfBgNV -HSMEGDAWgBQRqYdWstn6ntusRZ8LjyPFEicL5TAdBgNVHQ4EFgQU3ozX8OjQ2AEH -cX1v2WhZID6wZ3wwDQYJKoZIhvcNAQELBQADggIBAEqkZawT89MngrmMTREjYGKZ -+qrm7uQf9wFiM7H7Xs11OEJ5PkNh4jNnnIsXZxc8rr76x+zLr4F6aI08AQn5QOy9 -JXGIbrMHLebtn198aIOYbxZisbXnBlVO3Xz+k8JLdzsu5zxjjaDY3/a63X2ccStJ -U53pSqvgJi6/AvMPA1CPazSjxu6na8rYz6d7c/god7OF0qwQ/ePqd4uJOaImm7HH -CCkwMPYO7UyOWU5CSPMcJ86SGYhvYkoM7wZeJoukK6HlKDI1SRubiTFAx+Hbyk1R -dyVY9vmIOeUlsGEMgsW836g++dg8efRIbIYbSBLQhUL64lLA6wZJ6/oCtC29aX+o -UfxcGUROrpZ5Xi4b4sn0vW4rYq65BzlU17x45XsZMh11hX9aPNE4B62Jl2XLjX3P -Sedu7b/QB6jWpwTAdH96LeLxVepAWiVcFBApBqpu7wxRhCs6M1t3Gh9nvlPE5NRz -zsmx+HVZIgWoP3CgHmiHqajphL0xp6R9qJOyzAVChsmbQYvr+rfaXMv24KBvJYgc -xq5iCP7IccgC6WlhpWyAoTSuhiStTZJtlCKPZqc+HRcuf2fLWXip8YKNHgEtTxNz -7citBXZNoRDFULWwiYDnwhGcZ53p5zPLABYKZdfNHdI+tV92AbzYyQaV0ZwcxL9K -ObAAlDzZKE8vJwT94E3O ------END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm_outdated_early.crl s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm_outdated_early.crl --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm_outdated_early.crl 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm_outdated_early.crl 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ ------BEGIN X509 CRL----- -MIIDGzCCAQMCAQEwDQYJKoZIhvcNAQELBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD -VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll -MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UYDzMwMjIw -NzMwMTEwNDQ4WhgPMzAyMzA3MzAxMTA0NDhaMA0GCSqGSIb3DQEBCwUAA4ICAQAa -bg7llgCL+OdmZQEeBKey5Dm/NxJGJljoT/sxFbQ+86lwACh1mbdxVkaPyUC/oE9T -4ppC/eHoaRcdmvN4FlIYrUhqnrTGD4s8VSoYvJ7+f5ZFGjUyflnMwyaal21hDaG4 -2SZjPVOQ0ksEA3mrHE1MTRVFqFl4ZFxGhh7NYMoOEkffM1UooChWHTTBMz67nmbh -Ih0MDHhS5J7677K2N05402Z3v3S+Y8QEjIQjDsTC1S9V607eEfG9YEND2KicQKPH -r+CK9/fVaiTz9wgUEyybps4MFoWBuUqqRebQoargFZW8w329LuS6VokbM6BSduOT -qaYFtzp3DGZbvKwUGjiGVgB/PzzB1rv+2+i/EI3D4RJt+k8xvlBIIONxwK/hcjI3 -/i6hJueQpeCuasfX8ck/uKzSf0PhCmyLwWxQux66FJq4sXqWoqwf5P/U+tbB8zna -0cX5/f8+rS7ansbxjeiCHUkbdUEoY7k7KMSNUrtqbgQ4VyjTziysTbSEG7jkb4ri -Jaa9mDfWCkdwfB3TqDofWRkOdNpPTkj9TVZJ5FdV1h39D9O7B+VvedIiVod/KhB1 -DyOa44YpkEcS51PuNAC/exUd6nOv9Mz+WOUP+RrxHqndRYE0RGaFP9vENyks0Kga -4CLB/IbT2rmpLivK2i6i3NOqzcykHOab3LtwOLDDmg== ------END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm_outdated_late.crl s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm_outdated_late.crl --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm_outdated_late.crl 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm_outdated_late.crl 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ ------BEGIN X509 CRL----- -MIIDFjCB/wIBATANBgkqhkiG9w0BAQsFADCBzDELMAkGA1UEBhMCVVMxNDAyBgNV -BAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRpb24x -NDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9y -YXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMRUwEwYDVQQHDAxQb3VnaGtlZXBzaWUx -JzAlBgNVBAsMHklCTSBaIEhvc3QgS2V5IFNpZ25pbmcgU2VydmljZRcNMjEwMzI5 -MTEwNDQ4WhcNMjIwMzI5MTEwNDQ4WjANBgkqhkiG9w0BAQsFAAOCAgEAh+81L4ws -rvOC1j7nwxthaIE4m8alnuaq9h9uxYfDZooimvGwhCjfcoWmfSSA8lt3vbMJ2vRe -ZwnXXCyGWmetZ6ObnT0jwL0QuOYEuIdA8QHleBxLYBeFr4k0O9i8i3VUG0YcgZuN -H+lcpGdoIx2WV0cZ4rbzZ1cNx3bieaivqNoLQAy7g9jTizmTfY5ZvlvuG3iqSG+P -08APxmtt1qFEm1LVu4SyUSyoGB1NaxeoziMITQdfFqHoPRsu7Wdyuqi9f5irIPwM -VQNKs/Y+3Q3S8YkTW3yqxhj4HdSKJE4qVBLMYm7muirDFWo25u2sDX1LJHBsQLvV -fi7cGY0YnOJL2Y7A3XKDuqtZ34zpXg3Hhqpa9RF55K2u5dYUaPq8MEQUHK67II1r -YZAwfarhijQQ6t03E0vrzPVYpK8VjNUunYKQdOBS3OkKgXCwEMuqQDrps98BgDQ4 -qfbVfxwm9XEHJZaX/qFR0sp8OQd/SD5dnS3DBl0Pp5w+w2xIaSA7QmBpDqWY66Hb -cJq4CLOKTasHTddHKz7O7zIu8QhwJGLabtnx18iaHTNNHaTF6k/51pwvA3HkJgds -HVcUNljsDNSPE258JwR2XoQAUu6VuFwRzgD7lGwdI70CpIBeAP1TRSius5RsZB+u -cw+872CILlIdNJ72lzMPWQNH+IB1RU8U/eA= ------END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm_rev.crt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm_rev.crt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm_rev.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm_rev.crt 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGyzCCBLOgAwIBAgIUTPiBWJn8k37onZ/aYgLxudbD3kgwDQYJKoZIhvcNAQEL -BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg -Q0EwIBcNMjMwMzI5MDkwNDQ3WhgPMjM4NzEyMzEwOTA0NDdaMIHMMQswCQYDVQQG -EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD -b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo -aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxFTATBgNVBAcMDFBv -dWdoa2VlcHNpZTEnMCUGA1UECwweSUJNIFogSG9zdCBLZXkgU2lnbmluZyBTZXJ2 -aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsXm4EgSxVmuNsKgo -sfJNxNQFkdbpCxxyMOuyBFyGKxSwiGl20VWB6itsG+b9NOmAI6D6FPvLPuB/gSq0 -HR5FuSIkmp9AUx6Xi6lVA2L7qdINo1OA4nJKe5hLSHfjWn36DZFrDyoODlDzLJrS -O4h4QSP1nDSFhk1ilsR1czlvKreXkJw5FsUHRhZGG6idLoK2Ibrne6WsVhPG6WNA -abuvQFv40cQWcyRoePktG8ImnNF5GDewXwtpzARSQj9jTL/gE77DH78a77J8+H+h -d5guiBbPZK8gNkPe0CjD0B7tUx/+sByoaSeQffFG4Cnu5JJDqFq9LXPjtlG2nXSB -Qh6Sc2gzP3jKV+ZcSwQw9AxEQ0ZB/VIPsR8KZKReDDztEpcKxwxlh7bEix/0Y1sv -zT7/I+m5kufdDG6j3hHHzniKXL4b/WyedSfdnVqIJw82FgPFgyIY6F/0ccLdkhAm -fLtQJBHc3UyK13l0qVJxhAFdz1Q0zfScBS6qM/Gnbcdc6MY9/bZdIK7E+4op5iAM -kvQHap7qnArhI8VQ1bcXYlQ6asPj4e10lmzroiBHM4N/Yuxv38tmtUCudSB+EcXi -EgJIOLmLq2ZACRzug9KPyMXOD/Yxz2wPgs6I3gvrB22w/MfC77wMfEMuQQk4cZel -TFKosHgLjvcLG+zx5yh5ZVFcNV0CAwEAAaOBrzCBrDA3BgNVHR8EMDAuMCygKqAo -hiZodHRwOi8vMTI3LjAuMC4xOjEyMzQvY3JsL2ludGVyX2NhLmNybDAMBgNVHRMB -Af8EAjAAMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAfBgNV -HSMEGDAWgBQRqYdWstn6ntusRZ8LjyPFEicL5TAdBgNVHQ4EFgQU3ozX8OjQ2AEH -cX1v2WhZID6wZ3wwDQYJKoZIhvcNAQELBQADggIBAA1ipq2MIRdRIiUZ6M6AjY91 -L5iUpgKGrmhna/gg2b6AlugDOtVDsfeFm389aSplwY+zcJZ2AbUXhXe8RVHOEUwf -O7iEDWsMZ/wxit5ZotJ5kzr49n9RTtomgbQSqxWadLq6Q9hYOWg/cr1FvXwL5tyO -rHyuRXhkUMmuk4aQ1sfybfPp0PJKbzWu001Q4rxbJTlaib9b+CcsyLWHs06JXVKi -755Lg9/ND1bQjW2CMJbZ2rm8V7oh4J0tJuF3DntOjyOk+yosckTF3bFffGPR67WC -RkZebIKx2Rh6OXMrQTLz9ldqWo0cW0O353gSmrMxExWKhgoDrZKc4UeOanweDTqO -4lU0RP/4naDuQl6/FE0rUzUkfAJmsKIuI4G1lQNZhaqUH/BdN1du094RON0T5agK -etoBcPpNpxOn4N86TJaYoDjRSDpKwxXVKZs9lk5GRLxRhqtY3iQYVZrz2gY36Ri4 -lnuKZCeFmfjHvvktmb08EmrvGiQAhXzI8yfeVhlwP8lhtumIO877VW++tedK0z0D -6aBz1LsVI3IbinDZPRsWl0EEi+JFmpIktmMTdSn+0vTs7XJjbBZ+VKaPPWOG/Afg -Qav6+1AnnMtieGfrj3tyyfKo0vPSZKbdGzEr79Ukl+cxLdG5O5qZTy4ASm3EOw36 -p70HafhN8Vioa+uhQObP ------END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm_wrong_subject.crt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm_wrong_subject.crt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/ibm_wrong_subject.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/ibm_wrong_subject.crt 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGyDCCBLCgAwIBAgIUOgU+VHYEK4Q4dZokfM01Ok2XBtUwDQYJKoZIhvcNAQEL -BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg -Q0EwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0NDhaMIHJMQswCQYDVQQG -EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD -b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo -aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxFTATBgNVBAcMDFBv -dWdoa2VlcHNpZTEkMCIGA1UECwwbS2V5IFNpZ25pbmcgU2VydmljZSBJbnZhbGlk -MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA61P3Lfow3E6wGpMJmUWp -wsfFZwcuKSf64JXDn1pVJLUcjTwhApshnGaSBb+knlpwvsO1evrR7re9ZRh51730 -IintOP5IA3CSGd7fqmTpchx3kdFOndrXS7BwAWuB/eZ1qzKeOYpyAS3VSE4FphYi -LSxGfwSUl89pwYyWyqGl21hv/sBL6cc+Lm55vXbeRwWKW9K/w7BkhtK1zx1xm4i9 -4x1aXJ6DGWQpIk1sVDNPtzQVZYvmR1Y10/r75sNgA/WMiZx3/2VyCREnV+UXfvsX -fyMLcbwMWWt6psdhtoGFZ2sLJka5ZNvttQKfbde4TA3I6fpsrMi+oTT9YO3it5zG -ORCUC+j5B+zrzbSv+RgL+SnnAPkHqufb1a/4mFs/uTbjUYHN2/rhObnkLK4Xtfly -FBlivxx5haT9o49YkCv7l57+We4nafBPMw96ac5AGzA0gVwdMTeRZ3joT2Pc/zSf -H5E9wg3MZfg3TN2THB4S//r1/XOaA5F4BGjorbpPhp1/YaeF0rRMlAbZVKXHZJBR -n5qN8hD/V2tXviEkrZRL+iW6ltkslsjIkzrYSS+6goymUjWrkGjmcsTo0SStHE0p -7pOChLwpUtpaElemp1NDzVJqvrglWPkM1ZIIjxpk23zxKj7V2FazqP6PVuyeWdkj -VYN86ULDRG5j1hfn/n0HEC0CAwEAAaOBrzCBrDA3BgNVHR8EMDAuMCygKqAohiZo -dHRwOi8vMTI3LjAuMC4xOjEyMzQvY3JsL2ludGVyX2NhLmNybDAMBgNVHRMBAf8E -AjAAMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAfBgNVHSME -GDAWgBQRqYdWstn6ntusRZ8LjyPFEicL5TAdBgNVHQ4EFgQU+jIyiVonTYe6GuSP -iAkxA65qou0wDQYJKoZIhvcNAQELBQADggIBABcvU42Z/T7hT8ke5viG2x7FJmwA -gkrphOYiooB77sxY+cjxaYsct4yvFXvwuNAcstnlBK0jJRaWzLwswR1t2bXbRwQF -kjDO3br4ALRMHkDPa8iNioogyap8X6r40p7rvfnudKX0+MruLHXN3ZM2ltucYAYU -oR/Wa04KxDuZQHeKrDosAsJCv5MwgF69H3oPbhspFQsP2V5fFsxupnWFzVlwPfcQ -0lgHVC3nZ2Rj7ZariT/px3nfZ6Eg3pRyK32r2SQWVN/oVBEd5cCTONvD7Hr2SrtB -9D58f+vDyVNWM5OED7NqlNDaQw2x9BMjdEVYTGGRW4IXPbXWH08NUcEkT1Tx/vUE -EPlTgwt88Fca03yvAn/8Daw7ezsJNAFwDpPDcQhPi3vg2l32nuRkuQ5641hJiTGw -TEtpJc3dg3FJymG999rOCLLIheNLMehEDMPZHqG7XeEg/42F0580MdkOenMpjhwg -ZhrommB85sZcGBOwc63VMb5PPInYDQi5PXz9Tpann/VliVd4Dpnyn0XVy73VccXu -WWgDt8gJKWUpRiJ6MZzEKkBrXYjPLmrKB64usEJNQ1e2NIKV3bwvH5K3PmibyVBu -9fT5t0VXQpNxxlwngCjvjtt0D/frMCJQXpXpnz25aQDog9bnD1yl02SzxdZaVK05 -LZP4wR2beOGlz828 ------END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/inter_ca.crl s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/inter_ca.crl --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/inter_ca.crl 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/inter_ca.crl 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ ------BEGIN X509 CRL----- -MIIDSDCCATACAQEwDQYJKoZIhvcNAQELBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD -VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD -VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTIzMDMxOTExMDQ0N1oYDzIzODcx -MjMxMTEwNDQ3WjA8MBMCAgG8Fw0yMzAzMjgxMTA0NDdaMCUCFEz4gViZ/JN+6J2f -2mIC8bnWw95IFw0yMzAzMjgxMTA0NDdaMA0GCSqGSIb3DQEBCwUAA4ICAQA0FKRq -yESt1SYxVW+BlFjeDWCf1GL471q+603JiRek5iEVt+bZXrII9y9lXsYhZ1d7BxCt -Wyo/497tPMwRKSiPJwrXPODQn3DTl6EM6VB+w9Kipmm3Fq97TSRBuiDkYaS/nUHh -nDfj40qb1tc18SgBXVLSSiu97U0JMAq8AHIfMzlnhIe4fJ7TJU2TFSrkFAOUVqZs -p0/J3aDccYJBnUnEeGD44i80wd3xmuOoBDqRgKcasYsv8QmSFhbD4BTvicxKueDD -kiWTFbgNTDQU9Prp8gYmuSOaQoK6S+8DlO80IRTDwpDq1nQaf5MvwfOqfwQVgAjt -RgrC9BI1RvQu0OyihvcqOh9EEj5O9D/nrgTdsWYJGF/otb8lL6JdXDOAjqWSkZVA -gKDq4NPUskgKzoccD6HY5wgIvSZTV8bXjz2ST2oddfg0/7akNBEmq4TQV9NHb/G0 -AihNJgd3HtESn5Fhm51aJZPyuwqzmkmNHTuHZ5qeDB1dN/UVSqfnLXKeKirOtJCq -VdWGZTFEKJSDPgmLOMy0GhrOeM/y5N5MJZBrwBxPJ3No3TOGr13Ir8E1cxCnchZX -PgpyXNU183gMX4k5NVEWYpCzoTxzY7PNvaMft61IkC9DIdnRxRbEqfdLiVy/k4jP -MUjX4ThGwtUVzqVOiH5uRRE1J7Msk/W3EYzIWg== ------END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/inter_ca.crt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/inter_ca.crt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/inter_ca.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/inter_ca.crt 1970-01-01 01:00:00.000000000 +0100 @@ -1,38 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGoTCCBImgAwIBAgIUXCG9Tf1Ea3mKUicsQMd1lldTtIgwDQYJKoZIhvcNAQEL -BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz -MDMyOTA5MDQ0NloYDzIzODcxMjMxMDkwNDQ2WjCBvTELMAkGA1UEBhMCVVMxNDAy -BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp -b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y -cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxHjAc -BgNVBAsMFUlCTSBaIEludGVybWVkaWF0ZSBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD -ggIPADCCAgoCggIBAJYQSR4xUs7bFbMY4r4v8l/9cuRxOxnZ8/cFpIDKzCA4c7/U -omjJCaPOFJrldYthRnyjN6MdqKyIED0XjW/MIlbqjMJ4E4AvWKZC1+r3+YoFCYmg -pLq7kfBkw8Kd8BFhfRsGg60VeAzZ79Y6BvJZSyXXQeH1RYiH/bW4o6PdFaO/tchx -KFlYgOW5MoGTemx+muifZ9iKav4/feFZsh73+OFf+KyruSkGnM18YNqnoMiNL3M9 -H5T86OBjcPYHhhwCp/v5cjfD4Yaa1WAM0Bsy+o6b/VwSNhrk8U8JF2rjuK3wZm3L -hyMa3QOn/kgoonl7sVCKes6GpwOmiS/+qKf14JBK+bjTpDk/CsRYpnonBJKNGOzy -tr6CTqLWcJtoUz3kr1ZZnGXUmBjqYc9vYI2EnlzHnBAf+gplJVqtbEZckLx7rKBD -QXyXp5pqDZmnnxQ9qlk1ZMeqw/mjLackdi1CRg8SfA0GlRcQYmcWxc2U5iCcs+ym -q+V0ciK4YFg/z2wEMFEsarGclW8YrZ1RtY+IcmtCXf3rRa0CEbHCiclHoVtuX20c -LZuYsQ5y6TdeWkDTcAwm3ZCYa54LeySuYny8F8A7by42KRg+Z/JjaOlA6hBPGwvC -p1frJqQod5uGd0Zg0DrNKjjWIVc4Z38dRy48b1Ija081WBtKwyJiX8jto6yfAgMB -AAGjgZwwgZkwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovLzEyNy4wLjAuMToxMjM0 -L2NybC9yb290X2NhLmNybDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB -BjAfBgNVHSMEGDAWgBRJTc+PTs7kYFuicyYmOttP7ItAazAdBgNVHQ4EFgQUEamH -VrLZ+p7brEWfC48jxRInC+UwDQYJKoZIhvcNAQELBQADggIBAD3ZOiRT6ESzxFIK -76FY7qNM1PWcNrgTmMDy8iHfBWEBmkAUQbHvY/U5fPnPj3vPLiHXkLDbUliXyEnL -4myo966j8dettrvB7pxibCy7J2FxwoKwMUOY+4IgGBuxVVoWVBwzu5me35RnDZ6i -9+dRJXiZnO2cEVvfFmEfq0w5SQLsqmR6EeIhoUOepmEJDpjE3cSz6QnQ6KWdw2wf -e+dviPlDwS0sNg006lqSy3rVzsnlqLAsoDkeOEZyZmPbc6sAx4RJS9nBH4WERWb5 -XxVOYIn0QmlJKwlULB3x8dhxUv+a7alBjDt6v2MW1zXH8v1ZcMcJnFy8i6m0VzkV -edrO/ONmqfi/EUr/FothDLQnCoykWjcfL1JGLADjzyRE86Wg4L/DRil8k5wH8Fir -ZZE/kLeOkQN5FhvQK+m3YzGtxkehO7Io3YWmzbv05ZI2d6zroyP6DXS/zJY9wuNd -I/6zp6eUYb/mtT3NF3h1C3SjQpELT2IDoXXYQsbvcVk7pgMB2mP9sYnoDlQsXZvC -oEzD/ollmkHsgD3Zr3p6ANSiNpW6iRYBiWsRoXmVJw+nTSYvWLiMI/vuABXYPPn1 -Tc6yypXgtezMNtUI4fxJ5pU5aHMKL4+XGtCcACyazoVZaUam1DulWUcDhuH/4Om9 -FhcPOT6WqhWjn/zZLW6Rr5OvXwoF ------END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/root_ca.chained.crt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/root_ca.chained.crt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/root_ca.chained.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/root_ca.chained.crt 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGXzCCBEegAwIBAgIUWAen+5+bRZaL8kVlq51CXPQUdZUwDQYJKoZIhvcNAQEL -BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz -MDMyOTA5MDQ0NFoYDzIzODcxMjMxMDkwNDQ0WjCBtTELMAkGA1UEBhMCVVMxNDAy -BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp -b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y -cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxFjAU -BgNVBAsMDUlCTSBaIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -AoICAQD0gnTWPpI/FahyE+PizunOHzNBi88KObopjL6+6p9J10RB82pDxaLoMxPo -6xkm0ZKiFea6ll6sAfcKPzCei+3691sCOiGN/JYdSZR+SynRGeD1o8KJWAwfmZ4w -DNHWHRqUE6u3oqAPDQUqaOnzyGdh2vGLP3HU5F5rIDhYzilm7WuXykYdiF/P6Rjs -xzzQZOxHTnvV8byEDN0Sw0ONsQSFee0uM22e20Kme34A4p+p7QBiRFSJAQyLtp/o -YBY8zeIgekr+BppRbCItEFAgHBS8DLk04jhNfGlU5RteaOJe7D6tg7z9CC8X0dAI -/p1vJkz5zYMPGuoKKaowDAJ+ncwN+UDYhbd4g0ATKC7M5DFmn4Wy60G+3op2BNp4 -PadcsOcXwzKsOSQix0+n4Tfw1QQ07+ilBHBe8fCRpk2Nf/byYuEaBjPBTXUcpTsp -Ydv0s5HjSyRfhEWKMQBOIn6mSORayTJ1/xQECrOfuIopLiZyMCjhixT4PImiGucM -ruCmsHZvfA/8VQxyEcPK5K+XqFaSr5r423C4CzcafHhvUFOYj2WmJyZzWc2X2fjo -l+ZspNpQTlvYgVi2cTeBnbNqCNMcTiGoyie5roHGa4CnigHuBIVSuUVGq/rqFKnT -KgKboI8C3XpQID6rFcYY444wOpfea8LodHTXFF6o8A5m+6QlGwIDAQABo2MwYTAP -BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBRJTc+P -Ts7kYFuicyYmOttP7ItAazAdBgNVHQ4EFgQUSU3Pj07O5GBbonMmJjrbT+yLQGsw -DQYJKoZIhvcNAQELBQADggIBAFCGlLEdAsicVPPuekhdfOyoK/P6r5JHnPIpC894 -9TVUp/7LBvDOwcNAvs+34dsbfRtMmwXUCGofrS4S0+zugnpgx6D1ScWC+FNdBJf9 -C6XKBiU4Mxn/mKWTDUMBCxI+RRbpOvOox5h6pQcfo0IYz5okazol1nT40IdvNkZ7 -WDAz6Xqmw8k/+8Y+l1fDSfUqcgPnTBfgX3kkna8VHBx74nSqeFgOVvtd8BJq/xIG -6rAkGsv/PzmQqKpyoD70N/nGALomLlVScX5qIbNrXWW0DWxY+kDwhRlskPdYS7RJ -Qol3NX0t7GZ+l+0W2Kpp7k04n5xrJ7pzTDyltJJEeaLfqrMlF5MCK/UyJqxa3LQr -IkP2wTxM4HR1gsIvr8ei3QFSBtxEGiQl2VWfBs8bwy+KkL1ao1UIcB+1dj4SLy8x -RqNFNfogkyLL9cELiKxViRQgNYowv3OsSw30i4eSHMor8m0qfVf38t7gi+m0Ba+T -+RIV5MlZcjG6C3jS8AMpUwXO4hOxGEueuXmZuCDmyypqRpMJc0hu2JA1Q9fztvfn -hnOAv6K++5hB4mIyqUp8/QJjO9X2kKa4+MmbuNTnRuC13s2f7pzHjZR3cFf6QN5b -duxTQZ18xvqXIR6EV4PVwShjCMS6NHmkmumIxMzaDK1mGwL7l7B0ZS+r3i7qwGPw -2iYv ------END CERTIFICATE----- ------BEGIN X509 CRL----- -MIIDGTCCAQECAQEwDQYJKoZIhvcNAQELBQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYD -VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u -MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv -cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMRYwFAYD -VQQLDA1JQk0gWiBSb290IENBFw0yMzAzMTkxMTA0NDRaGA8yMzg3MTIzMTExMDQ0 -NFowFTATAgIBTRcNMjMwMzI4MTEwNDQ0WjANBgkqhkiG9w0BAQsFAAOCAgEAzXCT -KYc+J3XC49cCzdJBRzXdVk8AqylNFCBC/Z4a9AaPnFgUHIZqLcucvVTBMlYkzQST -7lu47hksCrlePGeY58goa8rOUuTjH0Gk/809oMNMxyJ3UjuEq/Q45gDeKys8UZqu -qrgHZ1dnBB5ARdPEhkMLzBgizrknhPXcAyg0f4dy8wFPCNJ0T+DiNdqKoQCZNsxD -3p1N3vTMIO20oWbX9cDZoY2Xb0rT9Cbt7ES3JY1DB4Z92zPB5ZxFuCIsxT3Jtszd -fM6YktxJbUvds/mqwmYCbQNZ4veS5YcrFPVVSADjnwP88GMbIQddAvXLOhjrUj4B -QEMndtREs0MvkDZdc/YkTEI/c1QF1xNT+UrMOxC0sEHSvcOQXNtw579QJ0gucA2J -HWWr6p2wTDrIefhppBQS0GSY2n7L1loKAZZNWt56TQoXRFWI4CtLJJTsPKIHWTQz -KLBgv5UlOdbcuh1+foY/XS8prlZvMS22oiDMLIknBR7ywYuYEq9YPKzDXWAL4mHk -DInbBQEYC2ar7wiLLBOM/2c2BmkzDdygChj7/1xvNYMGXEnak0Y/V75uAuWQ1h0T -3e52xW2RzwjYsoM04WBsSJFNd7VYSuSX7SneJywrSBneB+XvB7tcVaycsA1f3BMT -ptsIMqT9/N3++8MGCb/SRWoWFlLjITR9l5y3BUU= ------END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/root_ca.crt s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/root_ca.crt --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/cert/root_ca.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/cert/root_ca.crt 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGXzCCBEegAwIBAgIUWAen+5+bRZaL8kVlq51CXPQUdZUwDQYJKoZIhvcNAQEL -BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu -ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs -IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz -MDMyOTA5MDQ0NFoYDzIzODcxMjMxMDkwNDQ0WjCBtTELMAkGA1UEBhMCVVMxNDAy -BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp -b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y -cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxFjAU -BgNVBAsMDUlCTSBaIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -AoICAQD0gnTWPpI/FahyE+PizunOHzNBi88KObopjL6+6p9J10RB82pDxaLoMxPo -6xkm0ZKiFea6ll6sAfcKPzCei+3691sCOiGN/JYdSZR+SynRGeD1o8KJWAwfmZ4w -DNHWHRqUE6u3oqAPDQUqaOnzyGdh2vGLP3HU5F5rIDhYzilm7WuXykYdiF/P6Rjs -xzzQZOxHTnvV8byEDN0Sw0ONsQSFee0uM22e20Kme34A4p+p7QBiRFSJAQyLtp/o -YBY8zeIgekr+BppRbCItEFAgHBS8DLk04jhNfGlU5RteaOJe7D6tg7z9CC8X0dAI -/p1vJkz5zYMPGuoKKaowDAJ+ncwN+UDYhbd4g0ATKC7M5DFmn4Wy60G+3op2BNp4 -PadcsOcXwzKsOSQix0+n4Tfw1QQ07+ilBHBe8fCRpk2Nf/byYuEaBjPBTXUcpTsp -Ydv0s5HjSyRfhEWKMQBOIn6mSORayTJ1/xQECrOfuIopLiZyMCjhixT4PImiGucM -ruCmsHZvfA/8VQxyEcPK5K+XqFaSr5r423C4CzcafHhvUFOYj2WmJyZzWc2X2fjo -l+ZspNpQTlvYgVi2cTeBnbNqCNMcTiGoyie5roHGa4CnigHuBIVSuUVGq/rqFKnT -KgKboI8C3XpQID6rFcYY444wOpfea8LodHTXFF6o8A5m+6QlGwIDAQABo2MwYTAP -BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBRJTc+P -Ts7kYFuicyYmOttP7ItAazAdBgNVHQ4EFgQUSU3Pj07O5GBbonMmJjrbT+yLQGsw -DQYJKoZIhvcNAQELBQADggIBAFCGlLEdAsicVPPuekhdfOyoK/P6r5JHnPIpC894 -9TVUp/7LBvDOwcNAvs+34dsbfRtMmwXUCGofrS4S0+zugnpgx6D1ScWC+FNdBJf9 -C6XKBiU4Mxn/mKWTDUMBCxI+RRbpOvOox5h6pQcfo0IYz5okazol1nT40IdvNkZ7 -WDAz6Xqmw8k/+8Y+l1fDSfUqcgPnTBfgX3kkna8VHBx74nSqeFgOVvtd8BJq/xIG -6rAkGsv/PzmQqKpyoD70N/nGALomLlVScX5qIbNrXWW0DWxY+kDwhRlskPdYS7RJ -Qol3NX0t7GZ+l+0W2Kpp7k04n5xrJ7pzTDyltJJEeaLfqrMlF5MCK/UyJqxa3LQr -IkP2wTxM4HR1gsIvr8ei3QFSBtxEGiQl2VWfBs8bwy+KkL1ao1UIcB+1dj4SLy8x -RqNFNfogkyLL9cELiKxViRQgNYowv3OsSw30i4eSHMor8m0qfVf38t7gi+m0Ba+T -+RIV5MlZcjG6C3jS8AMpUwXO4hOxGEueuXmZuCDmyypqRpMJc0hu2JA1Q9fztvfn -hnOAv6K++5hB4mIyqUp8/QJjO9X2kKa4+MmbuNTnRuC13s2f7pzHjZR3cFf6QN5b -duxTQZ18xvqXIR6EV4PVwShjCMS6NHmkmumIxMzaDK1mGwL7l7B0ZS+r3i7qwGPw -2iYv ------END CERTIFICATE----- Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/exp/asrcb/assoc_derived_default_cuid_one and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/exp/asrcb/assoc_derived_default_cuid_one differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/exp/asrcb/assoc_none_default_cuid_one and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/exp/asrcb/assoc_none_default_cuid_one differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/exp/asrcb/assoc_simple_default_cuid_one and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/exp/asrcb/assoc_simple_default_cuid_one differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_derived_default_cuid_one and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_derived_default_cuid_one differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_none_default_cuid_one and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_none_default_cuid_one differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_none_default_cuid_seven and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_none_default_cuid_seven differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_none_default_ncuid_one and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_none_default_ncuid_one differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_none_dump_cuid_one and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_none_dump_cuid_one differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_simple_default_cuid_one and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/exp/asrcb/null_simple_default_cuid_one differ diff -Nru s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/exp/keyslot.bin s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/exp/keyslot.bin --- s390-tools-2.31.0/rust/pv/openssl_extensions/tests/assets/exp/keyslot.bin 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/openssl_extensions/tests/assets/exp/keyslot.bin 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -A\%¥Ñœ8a—3áÁ·ûØ‹Ý1؂խàK— +# s390_pv - library for pv-tools + +This library is intended to be used by tools and libraries that +are used for creating and managing [IBM Secure Execution](https://www.ibm.com/docs/en/linux-on-systems?topic=virtualization-secure-execution) guests. +`pv` provides abstraction layers for encryption, secure memory management, +and accessing the uvdevice. + +If your project is not targeted to provide tooling for and/or managing of IBM Secure execution +guests, do **not** use this crate. + +## OpenSSL 1.1.0+ is required + +If you do not need any OpenSSL features use [s390_pv_core](https://crates.io/crates/s390_pv_core). +This crate reexports all symbols from `s390_pv_core`. If your project uses this crate do **not** include `s390_pv_core` as well. + +## Import crate +The recommended way of importing this crate is: +```bash +cargo add s390_pv --rename pv +``` diff -Nru s390-tools-2.31.0/rust/pv/src/brcb.rs s390-tools-2.33.1/rust/pv/src/brcb.rs --- s390-tools-2.31.0/rust/pv/src/brcb.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/brcb.rs 2024-05-28 08:26:36.000000000 +0200 @@ -1,6 +1,6 @@ // SPDX-License-Identifier: MIT // -// Copyright IBM Corp. 2023 +// Copyright IBM Corp. 2023, 2024 use std::{ io::{Read, Seek, SeekFrom::Current}, @@ -8,9 +8,8 @@ }; // (SE) boot request control block aka SE header -use crate::{assert_size, static_assert, Error, Result, PAGESIZE}; +use crate::{assert_size, request::MagicValue, static_assert, Error, Result, PAGESIZE}; use log::debug; -use pv_core::request::MagicValue; use zerocopy::{AsBytes, BigEndian, FromBytes, FromZeroes, U32, U64}; /// Struct containing all SE-header tags. @@ -19,43 +18,55 @@ /// Page List Digest (pld) /// Address List Digest (ald) /// Tweak List Digest (tld) -/// SE Header Tag (seht) -/// +/// SE-Header Tag (tag) #[repr(C)] -#[derive(Debug, Clone, Copy, AsBytes, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, AsBytes, PartialEq, Eq, FromBytes, FromZeroes)] pub struct BootHdrTags { pld: [u8; BootHdrHead::DIGEST_SIZE], ald: [u8; BootHdrHead::DIGEST_SIZE], tld: [u8; BootHdrHead::DIGEST_SIZE], - seht: [u8; BootHdrHead::SEHT_SIZE], + tag: [u8; BootHdrHead::TAG_SIZE], +} +assert_size!(BootHdrTags, 0xd0); + +impl AsRef<[u8]> for BootHdrTags { + fn as_ref(&self) -> &[u8] { + self.as_bytes() + } +} + +impl TryFrom> for BootHdrTags { + type Error = Error; + + fn try_from(value: Vec) -> Result { + Self::ref_from(&value) + .ok_or_else(|| Error::InvBootHdrSize(value.len())) + .copied() + } } -/// Magiv value for a SE-(boot)header +/// Magic value for a SE-(boot)header +#[derive(Debug)] pub struct BootHdrMagic; impl MagicValue<8> for BootHdrMagic { const MAGIC: [u8; 8] = [0x49, 0x42, 0x4d, 0x53, 0x65, 0x63, 0x45, 0x78]; } impl BootHdrTags { - /// Returns a reference to the SE-hdr tag of this [`BootHdrTags`]. - pub fn seht(&self) -> &[u8; 16] { - &self.seht + /// Returns a reference to the SE-header tag of this [`BootHdrTags`]. + pub fn tag(&self) -> &[u8; 16] { + &self.tag } /// Creates a new [`BootHdrTags`]. Useful for writing tests. #[doc(hidden)] - pub const fn new(pld: [u8; 64], ald: [u8; 64], tld: [u8; 64], seht: [u8; 16]) -> Self { - Self { - ald, - tld, - pld, - seht, - } + pub const fn new(pld: [u8; 64], ald: [u8; 64], tld: [u8; 64], tag: [u8; 16]) -> Self { + Self { ald, tld, pld, tag } } - /// returns false if no hdr found, true otherwise - /// in the very unlikel case an IO error can appear - /// when seeking to the beginning of the header + /// Returns `false` if no SE-header found, `true` otherwise. + /// In the very unlikely case an IO error can appear + /// when seeking to the beginning of the header. fn seek_se_hdr_start(img: &mut R) -> Result where R: Read + Seek, @@ -93,8 +104,8 @@ /// /// # Errors /// - /// This function will return an error if `hdr` is not at least as long as the header specifies - /// in bytes 12-15 or the first 8 bytes do not contain the magic value. + /// This function will return an error if the header could not be found in + /// `img` or is invalid. pub fn from_se_image(img: &mut R) -> Result where R: Read + Seek, @@ -107,36 +118,42 @@ let mut hdr = vec![0u8; size_of::()]; img.read_exact(&mut hdr)?; + // Very unlikely - seek_se_hdr_start should point to a header or error-out + if !BootHdrMagic::starts_with_magic(&hdr) { + debug!("Inv magic"); + return Err(Error::InvBootHdr); + } + let hdr_head = match BootHdrHead::read_from_prefix(hdr.as_mut_slice()) { Some(hdr) => hdr, None => { - debug!("Boot hdr is to small"); + debug!("Boot hdr is too small"); return Err(Error::InvBootHdr); } }; - //Some sanity checks - if !BootHdrMagic::starts_with_magic(&hdr) || hdr_head.version.get() != 0x100 { - debug!("Inv magic or size"); + // Some sanity checks + if hdr_head.version.get() != 0x100 { + debug!("Unsupported hdr-version: {:0>4x}", hdr_head.version.get()); return Err(Error::InvBootHdr); } - //go to the Bot header tag + // go to the Boot header tag img.seek(Current( hdr_head.size.get() as i64 - size_of::() as i64 - - BootHdrHead::SEHT_SIZE as i64, + - BootHdrHead::TAG_SIZE as i64, ))?; // read in the tag - let mut seht = [0u8; BootHdrHead::SEHT_SIZE]; - img.read_exact(seht.as_mut_slice())?; + let mut tag = [0u8; BootHdrHead::TAG_SIZE]; + img.read_exact(tag.as_mut_slice())?; Ok(BootHdrTags { pld: hdr_head.pld, ald: hdr_head.ald, tld: hdr_head.tld, - seht, + tag, }) } } @@ -161,7 +178,7 @@ assert_size!(BootHdrHead, 0x1A0); impl BootHdrHead { const DIGEST_SIZE: usize = 0x40; - const SEHT_SIZE: usize = 0x10; + const TAG_SIZE: usize = 0x10; } #[cfg(test)] @@ -194,7 +211,7 @@ 0x8f, 0x9b, 0xe0, 0xa5, 0x49, 0xd8, 0xd7, 0xa9, 0x4a, 0xe7, 0x20, 0xe5, 0xc0, 0x76, 0x0a, 0x82, 0x5d, 0x47, 0x9f, 0xe6, 0x7a, 0xf5, ], - seht: [ + tag: [ 0x92, 0x30, 0x9d, 0x45, 0x89, 0xb9, 0xa8, 0x5b, 0x42, 0x7f, 0x87, 0x53, 0x17, 0x1d, 0x15, 0x20, ], @@ -225,7 +242,7 @@ Err(Error::InvBootHdr) )); - //header is at a non expected position + // header is at a non expected position let mut img = vec![0u8; PAGESIZE]; img[0x008..0x288].copy_from_slice(bin_hdr); assert!(matches!( @@ -242,4 +259,24 @@ let hdr_tags = BootHdrTags::from_se_image(&mut Cursor::new(img)).unwrap(); assert_eq!(hdr_tags, EXP_HDR); } + + #[test] + fn tags_convert_u8() { + let bin_hdr = get_test_asset!("exp/secure_guest.hdr"); + let hdr_tags = BootHdrTags::from_se_image(&mut Cursor::new(*bin_hdr)).unwrap(); + let ser: &[u8] = hdr_tags.as_ref(); + let mut ser = ser.to_vec(); + + let der: BootHdrTags = ser.clone().try_into().unwrap(); + assert_eq!(hdr_tags, der); + + ser.pop(); + let der: Result = ser.clone().try_into(); + assert!(matches!(der, Err(Error::InvBootHdrSize(_)))); + + ser.push(17); + ser.push(17); + let der: Result = ser.clone().try_into(); + assert!(matches!(der, Err(Error::InvBootHdrSize(_)))); + } } diff -Nru s390-tools-2.31.0/rust/pv/src/cli.rs s390-tools-2.33.1/rust/pv/src/cli.rs --- s390-tools-2.31.0/rust/pv/src/cli.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/cli.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,166 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright IBM Corp. 2023 - -use crate::misc::{create_file, open_file}; -use crate::Result; -use clap::{ArgGroup, Args, ValueHint}; -use std::io::{Read, Write}; - -/// CLI Argument collection for handling certificates. -#[derive(Args, Debug, PartialEq, Eq, Default)] -#[command( - group(ArgGroup::new("pv_verify").required(true).args(["no_verify", "certs"])), - )] -pub struct CertificateOptions { - /// Use FILE as a host-key document. - /// - /// Can be specified multiple times and must be used at least once. - #[arg( - short = 'k', - long = "host-key-document", - value_name = "FILE", - required = true, - value_hint = ValueHint::FilePath, - use_value_delimiter = true, - value_delimiter = ',', - )] - pub host_key_documents: Vec, - - /// Disable the host-key document verification. - /// - /// Does not require the host-key documents to be valid. - /// Do not use for a production request unless you verified the host-key document beforehand. - #[arg(long)] - pub no_verify: bool, - - /// Use FILE as a certificate to verify the host key or keys. - /// - /// The certificates are used to establish a chain of trust for the verification - /// of the host-key documents. Specify this option twice to specify the IBM Z signing key and - /// the intermediate CA certificate (signed by the root CA). - #[arg( - short= 'C', - long = "cert", - value_name = "FILE", - alias("crt"), - value_hint = ValueHint::FilePath, - use_value_delimiter = true, - value_delimiter = ',', - )] - pub certs: Vec, - - /// Use FILE as a certificate revocation list. - /// - /// The list is used to check whether a certificate of the chain of - /// trust is revoked. Specify this option multiple times to use multiple CRLs. - #[arg( - long = "crl", - requires("certs"), - value_name = "FILE", - value_hint = ValueHint::FilePath, - use_value_delimiter = true, - value_delimiter = ',', - )] - pub crls: Vec, - - /// Make no attempt to download CRLs. - #[arg(long, requires("certs"))] - pub offline: bool, - - /// Use FILE as the root-CA certificate for the verification. - /// - /// If omitted, the system wide-root CAs installed on the system are used. - /// Use this only if you trust the specified certificate. - #[arg(long, requires("certs"))] - pub root_ca: Option, -} - -impl CertificateOptions { - /// Returns the verifier of this [`CertificateOptions`] based on the given CLI options. - /// - /// # Errors - /// - /// This function will return an error if [`crate::request::HkdVerifier`] cannot be created. - pub fn verifier(&self) -> Result> { - use crate::verify::{CertVerifier, NoVerifyHkd}; - match self.no_verify { - true => { - log::warn!( - "Host-key document verification is disabled. The secret may not be protected." - ); - Ok(Box::new(NoVerifyHkd)) - } - false => Ok(Box::new(CertVerifier::new( - &self.certs, - &self.crls, - &self.root_ca, - self.offline, - )?)), - } - } -} - -/// stdout -pub const STDOUT: &str = "-"; -/// stdin -pub const STDIN: &str = "-"; - -/// Converts an argument value into a Writer. -pub fn get_writer_from_cli_file_arg(path: &str) -> Result> { - if path == STDOUT { - Ok(Box::new(std::io::stdout())) - } else { - Ok(Box::new(create_file(path)?)) - } -} - -/// Converts an argument value into a Reader. -pub fn get_reader_from_cli_file_arg(path: &str) -> Result> { - if path == STDIN { - Ok(Box::new(std::io::stdin())) - } else { - Ok(Box::new(open_file(path)?)) - } -} - -#[cfg(test)] -mod test { - use clap::Parser; - - use super::*; - - #[test] - #[rustfmt::skip] - fn cli_args() { - //Verify only that some arguments are optional, we do not want to test clap, only the - //configuration - let valid_args = [vec!["pgr", "-k", "hkd.crt", "--no-verify"], vec!["pgr", "-k", "hkd.crt", "--crt", "abc.crt"]]; - // Test for the minimal amount of flags to yield an invalid combination - let invalid_args = [ - vec!["pgr", "-k", "hkd.crt"], - vec!["pgr", "--no-verify", "--crt", "abc.crt"], - vec!["pgr", "--no-verify", "--crt", "abc.crt", "--offline"], - vec!["pgr", "--no-verify", "--crt", "abc.crt", "--crl", "abc.crl"], - vec!["pgr", "--no-verify", "--crt", "abc.crt", "--root-ca", "root.crt"], - vec!["pgr", "--offline"], - vec!["pgr", "--crl", "abc.crl"], - vec!["pgr", "--root-ca", "root.crt"], - ]; - #[derive(Parser, Debug)] - struct TestParser { - #[command(flatten)] - pub verify_args: CertificateOptions, - } - - for arg in valid_args { - let res = TestParser::try_parse_from(&arg); - assert!(res.is_ok()); - } - - for arg in invalid_args { - let res = TestParser::try_parse_from(&arg); - assert!(res.is_err()); - } - } -} diff -Nru s390-tools-2.31.0/rust/pv/src/confidential.rs s390-tools-2.33.1/rust/pv/src/confidential.rs --- s390-tools-2.31.0/rust/pv/src/confidential.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/confidential.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2023, 2024 + +use std::fmt::Debug; + +/// Trait for securely zeroizing memory. +/// +/// To be used with [`Confidential`] +pub trait Zeroize { + /// Reliably overwrites the given buffer with zeros, + fn zeroize(&mut self); +} + +// Automatically impl Zeroize for u8 arrays +impl Zeroize for [u8; COUNT] { + /// Reliably overwrites the given buffer with zeros, + /// by performing a volatile write followed by a memory barrier + fn zeroize(&mut self) { + // SAFETY: given buffer(self) has the correct (compile time) size + unsafe { std::ptr::write_volatile(self, [0u8; COUNT]) }; + std::sync::atomic::compiler_fence(std::sync::atomic::Ordering::SeqCst); + } +} + +impl Zeroize for Vec { + /// Reliably overwrites the given buffer with zeros, + /// by overwriting the whole vector's capacity with zeros. + fn zeroize(&mut self) { + // TODO use `volatile_set_memory` when stabilized + let mut dst = self.as_mut_ptr(); + for _ in 0..self.capacity() { + // SAFETY: + // * Vec allocated at least capacity elements continuously + // * dst points always to a valid location + unsafe { + std::ptr::write_volatile(dst, 0); + dst = dst.add(1); + } + } + std::sync::atomic::compiler_fence(std::sync::atomic::Ordering::SeqCst); + } +} + +/// Thin wrapper around an type implementing Zeroize. +/// +/// A `Confidential` represents a confidential value that must be securely overwritten during drop. +/// Will never leak its wrapped value during [`Debug`] +/// +/// ```rust +/// use s390_pv::request::Confidential; +/// fn foo(value: Confidential<[u8; 2]>) { +/// println!("value: {value:?}"); +/// } +/// # fn main() { +/// foo([1, 2].into()); +/// // prints: +/// // in debug builds: +/// // value: Confidential([1, 2]) +/// // in release builds: +/// // value: Confidential(***) +/// # } +/// ``` +#[derive(Clone, PartialEq, Eq, Default)] +pub struct Confidential(C); +impl Confidential { + /// Convert a type into a self overwriting one. + /// + /// Prefer using [`Into`] + pub fn new(v: C) -> Self { + Confidential(v) + } + + /// Get a reference to the contained value + pub fn value(&self) -> &C { + &self.0 + } + + /// Get an immutable reference to the contained value + /// + /// NOTE that modifications to a mutable reference can trigger reallocation. + /// e.g. a [`Vec`] might expand if more space needed. -> preallocate enough space + /// or operate on slices. The old locations can and will **NOT** be zeroized. + pub fn value_mut(&mut self) -> &mut C { + &mut self.0 + } +} + +impl Debug for Confidential { + #[allow(unreachable_code)] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // do NOT leak secrets in production builds + #[cfg(not(debug_assertions))] + return write!(f, "Confidential(***)"); + + let mut b = f.debug_tuple("Confidential"); + b.field(&self.0); + b.finish() + } +} + +impl From for Confidential { + fn from(v: C) -> Confidential { + Confidential(v) + } +} + +impl Zeroize for Confidential { + fn zeroize(&mut self) { + self.0.zeroize(); + } +} + +impl Drop for Confidential { + fn drop(&mut self) { + self.0.zeroize(); + } +} diff -Nru s390-tools-2.31.0/rust/pv/src/crypto.rs s390-tools-2.33.1/rust/pv/src/crypto.rs --- s390-tools-2.31.0/rust/pv/src/crypto.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/crypto.rs 2024-05-28 08:26:36.000000000 +0200 @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT // -// Copyright IBM Corp. 2023 +// Copyright IBM Corp. 2023, 2024 -use crate::{error::Result, secret::Secret, Error}; +use crate::{confidential::Confidential, error::Result, Error}; use openssl::{ derive::Deriver, ec::{EcGroup, EcKey}, @@ -14,17 +14,20 @@ rand::rand_bytes, rsa::Padding, sign::{Signer, Verifier}, - symm::{encrypt_aead, Cipher}, + symm::{decrypt_aead, encrypt_aead, Cipher}, }; use std::{convert::TryInto, ops::Range}; /// An AES256-key that will purge itself out of the memory when going out of scope -/// -pub type Aes256Key = Secret<[u8; 32]>; +pub type Aes256Key = Confidential<[u8; 32]>; pub(crate) const AES_256_GCM_TAG_SIZE: usize = 16; +#[allow(dead_code)] +pub(crate) const SHA_256_HASH_SIZE: u32 = 32; +#[allow(dead_code)] +pub(crate) type Sha256Hash = [u8; SHA_256_HASH_SIZE as usize]; + /// Types of symmetric keys, to specify during construction. -/// #[non_exhaustive] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum SymKeyType { @@ -33,7 +36,6 @@ } /// Types of symmetric keys -/// #[non_exhaustive] #[derive(Debug, Clone, PartialEq, Eq)] pub enum SymKey { @@ -63,21 +65,6 @@ } } -impl Aes256Key { - /// Generates an AES256 key from an digest (hash). - /// - /// # Panics - /// - /// Panics if `digset` is not 32 bytes long. - fn from_digest(digest: DigestBytes) -> Self { - let key: [u8; 32] = digest - .as_ref() - .try_into() - .expect("Unexpected OpenSSl Error. Sha256 hash not 32 bytes long"); - key.into() - } -} - impl From for SymKey { fn from(value: Aes256Key) -> Self { Self::Aes256(value) @@ -90,7 +77,7 @@ /// # Errors /// /// This function will return an OpenSSL error if the key could not be generated. -pub fn hkdf_rfc_5869( +pub(crate) fn hkdf_rfc_5869( md: &MdRef, ikm: &[u8], salt: &[u8], @@ -114,17 +101,20 @@ /// # Errors /// /// This function will return an error if something went bad in OpenSSL. -pub fn derive_key(k1: &PKey, k2: &PKey) -> Result { +pub(crate) fn derive_key(k1: &PKeyRef, k2: &PKeyRef) -> Result { let mut der = Deriver::new(k1)?; der.set_peer(k2)?; let mut key = der.derive_to_vec()?; key.extend([0, 0, 0, 1]); - let secr = Secret::new(key); + let secr = Confidential::new(key); - Ok(Aes256Key::from_digest(hash( - MessageDigest::sha256(), - secr.value(), - )?)) + // Panic: does not panic as SHA256 digest is 32 bytes long + Ok(Aes256Key::new( + hash(MessageDigest::sha256(), secr.value())? + .as_ref() + .try_into() + .unwrap(), + )) } /// Generate a random array. @@ -132,7 +122,7 @@ /// # Errors /// /// This function will return an error if the entropy source fails or is not available. -pub fn random_array() -> Result<[u8; COUNT]> { +pub(crate) fn random_array() -> Result<[u8; COUNT]> { let mut rand = [0; COUNT]; rand_bytes(&mut rand)?; Ok(rand) @@ -143,12 +133,40 @@ /// # Errors /// /// This function will return an error if the key could not be generated by OpenSSL. -pub fn gen_ec_key() -> Result> { +pub(crate) fn gen_ec_key() -> Result> { let group = EcGroup::from_curve_name(Nid::SECP521R1)?; let key: EcKey = EcKey::generate(&group)?; PKey::from_ec_key(key).map_err(Error::Crypto) } +/// Result type for an AES encryption in GCM mode.. +#[derive(Debug)] +pub(crate) struct AesGcmResult { + /// The result. + /// + /// [`Vec`] with the following content: + /// 1. `aad` + /// 2. `encr(conf)` + /// 3. `aes gcm tag` + pub buf: Vec, + /// The position of the authenticated data in [`Self::buf`] + pub aad_range: Range, + /// The position of the encrypted data in [`Self::buf`] + pub encr_range: Range, + /// The position of the tag in [`Self::buf`] + #[allow(unused)] + // here for completeness + pub tag_range: Range, +} + +impl AesGcmResult { + /// Deconstruct the result to just the resulting data w/o ranges. + pub(crate) fn data(self) -> Vec { + let Self { buf, .. } = self; + buf + } +} + /// Encrypt confidential Data with a symmetric key and provida a gcm tag. /// /// * `key` - symmetric key used for encryption @@ -156,21 +174,15 @@ /// * `aad` - additional authentic data /// * `conf` - data to be encrypted /// -/// # Returns -/// [`Vec`] with the following content: -/// 1. `aad` -/// 2. `encr(conf)` -/// 3. `aes gcm tag` -/// /// # Errors /// /// This function will return an error if the data could not be encrypted by OpenSSL. -pub fn encrypt_aes_gcm( +pub(crate) fn encrypt_aes_gcm( key: &SymKey, iv: &[u8], aad: &[u8], conf: &[u8], -) -> Result<(Vec, Range, Range, Range)> { +) -> Result { let mut tag = vec![0xff; AES_256_GCM_TAG_SIZE]; let encr = match key { SymKey::Aes256(key) => encrypt_aead( @@ -183,7 +195,7 @@ )?, }; - let mut res = vec![0; aad.len() + encr.len() + tag.len()]; + let mut buf = vec![0; aad.len() + encr.len() + tag.len()]; let aad_range = Range { start: 0, end: aad.len(), @@ -197,10 +209,53 @@ end: aad.len() + encr.len() + tag.len(), }; - res[aad_range.clone()].copy_from_slice(aad); - res[encr_range.clone()].copy_from_slice(&encr); - res[tag_range.clone()].copy_from_slice(&tag); - Ok((res, aad_range, encr_range, tag_range)) + buf[aad_range.clone()].copy_from_slice(aad); + buf[encr_range.clone()].copy_from_slice(&encr); + buf[tag_range.clone()].copy_from_slice(&tag); + Ok(AesGcmResult { + buf, + aad_range, + encr_range, + tag_range, + }) +} + +/// Decrypt encrypted data with a symmetric key compare the GCM-tag. +/// +/// * `key` - symmetric key used for encryption +/// * `iv` - initialisation vector +/// * `aad` - additional authenticated data +/// * `encr` - encrypted data +/// * `tag` - GCM-tag to compare with +/// +/// # Returns +/// [`Vec`] with the decrypted data +/// +/// # Errors +/// +/// This function will return an error if the data could not be encrypted by OpenSSL. +pub(crate) fn decrypt_aes_gcm( + key: &SymKey, + iv: &[u8], + aad: &[u8], + encr: &[u8], + tag: &[u8], +) -> Result>> { + let decr = match key { + SymKey::Aes256(key) => { + decrypt_aead(Cipher::aes_256_gcm(), key.value(), Some(iv), aad, encr, tag) + } + } + .map_err(|ssl_err| { + // Empty error-stack -> no internal ssl error but decryption failed. + // Very likely due to a tag mismatch. + if ssl_err.errors().is_empty() { + Error::GcmTagMismatch + } else { + Error::Crypto(ssl_err) + } + })?; + Ok(decr.into()) } /// Calculate the hash of a slice. @@ -208,10 +263,23 @@ /// # Errors /// /// This function will return an error if OpenSSL could not compute the hash. -pub fn hash(t: MessageDigest, data: &[u8]) -> Result { +pub(crate) fn hash(t: MessageDigest, data: &[u8]) -> Result { openssl::hash::hash(t, data).map_err(Error::Crypto) } +/// Calculate the HMAC of the given message. +pub(crate) fn calculate_hmac( + hmac_key: &PKeyRef, + dgst: MessageDigest, + msg: &[u8], +) -> Result> { + match hmac_key.id() { + Id::HMAC => Signer::new(dgst, hmac_key)? + .sign_oneshot_to_vec(msg) + .map_err(Error::Crypto), + _ => Err(Error::UnsupportedSigningKey), + } +} /// Calculate a digital signature scheme. /// /// Calculates the digital signature of the provided message using the signing key. [`Id::EC`], @@ -220,7 +288,11 @@ /// # Errors /// /// This function will return an error if OpenSSL could not compute the signature. -pub fn sign_msg(skey: &PKeyRef, dgst: MessageDigest, msg: &[u8]) -> Result> { +pub(crate) fn sign_msg( + skey: &PKeyRef, + dgst: MessageDigest, + msg: &[u8], +) -> Result> { match skey.id() { Id::EC => { let mut sgn = Signer::new(dgst, skey)?; @@ -246,7 +318,7 @@ /// # Errors /// /// This function will return an error if OpenSSL could not compute the signature. -pub fn verify_signature( +pub(crate) fn verify_signature( skey: &PKeyRef, dgst: MessageDigest, msg: &[u8], @@ -343,7 +415,7 @@ } #[test] - fn encrypt_aes_256_gcm() { + fn encrypt_decrypt_aes_256_gcm() { let aes_gcm_key = [ 0xee, 0xbc, 0x1f, 0x57, 0x48, 0x7f, 0x51, 0x92, 0x1c, 0x04, 0x65, 0x66, 0x5f, 0x8a, 0xe6, 0xd1, 0x65, 0x8b, 0xb2, 0x6d, 0xe6, 0xf8, 0xa0, 0x69, 0xa3, 0x52, 0x02, 0x93, @@ -352,10 +424,10 @@ let aes_gcm_iv = [ 0x99, 0xaa, 0x3e, 0x68, 0xed, 0x81, 0x73, 0xa0, 0xee, 0xd0, 0x66, 0x84, ]; - let aes_gcm_plain = [ + let aes_gcm_plain = Confidential::new(vec![ 0xf5, 0x6e, 0x87, 0x05, 0x5b, 0xc3, 0x2d, 0x0e, 0xeb, 0x31, 0xb2, 0xea, 0xcc, 0x2b, 0xf2, 0xa5, - ]; + ]); let aes_gcm_aad = [ 0x4d, 0x23, 0xc3, 0xce, 0xc3, 0x34, 0xb4, 0x9b, 0xdb, 0x37, 0x0c, 0x43, 0x7f, 0xec, 0x78, 0xde, @@ -366,14 +438,45 @@ 0xb9, 0xf2, 0x17, 0x36, 0x67, 0xba, 0x05, 0x10, 0x26, 0x2a, 0xe4, 0x87, 0xd7, 0x37, 0xee, 0x62, 0x98, 0xf7, 0x7e, 0x0c, ]; + let key = SymKey::Aes256(aes_gcm_key.into()); + + let AesGcmResult { + buf, + aad_range, + encr_range, + tag_range, + } = encrypt_aes_gcm(&key, &aes_gcm_iv, &aes_gcm_aad, aes_gcm_plain.value()).unwrap(); + assert_eq!(buf, aes_gcm_res); - let (res, ..) = encrypt_aes_gcm( - &SymKey::Aes256(aes_gcm_key.into()), + let conf = decrypt_aes_gcm( + &key, &aes_gcm_iv, - &aes_gcm_aad, - &aes_gcm_plain, + &buf[aad_range], + &buf[encr_range], + &buf[tag_range], ) .unwrap(); - assert_eq!(res, aes_gcm_res); + assert_eq!(conf, aes_gcm_plain); + } + + #[test] + fn hmac_sha512_rfc_4868() { + // use a test vector with key=64bytes of RFC 4868: + // https://www.rfc-editor.org/rfc/rfc4868.html#section-2.7.2.3 + let key = [0xb; 64]; + let data = [0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65]; + + let exp = vec![ + 0x63, 0x7e, 0xdc, 0x6e, 0x01, 0xdc, 0xe7, 0xe6, 0x74, 0x2a, 0x99, 0x45, 0x1a, 0xae, + 0x82, 0xdf, 0x23, 0xda, 0x3e, 0x92, 0x43, 0x9e, 0x59, 0x0e, 0x43, 0xe7, 0x61, 0xb3, + 0x3e, 0x91, 0x0f, 0xb8, 0xac, 0x28, 0x78, 0xeb, 0xd5, 0x80, 0x3f, 0x6f, 0x0b, 0x61, + 0xdb, 0xce, 0x5e, 0x25, 0x1f, 0xf8, 0x78, 0x9a, 0x47, 0x22, 0xc1, 0xbe, 0x65, 0xae, + 0xa4, 0x5f, 0xd4, 0x64, 0xe8, 0x9f, 0x8f, 0x5b, + ]; + let pkey = PKey::hmac(&key).unwrap(); + + let hmac = calculate_hmac(&pkey, MessageDigest::sha512(), &data).unwrap(); + + assert_eq!(hmac, exp); } } diff -Nru s390-tools-2.31.0/rust/pv/src/error.rs s390-tools-2.33.1/rust/pv/src/error.rs --- s390-tools-2.31.0/rust/pv/src/error.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/error.rs 2024-05-28 08:26:36.000000000 +0200 @@ -1,6 +1,10 @@ // SPDX-License-Identifier: MIT // -// Copyright IBM Corp. 2023 +// Copyright IBM Corp. 2023, 2024 + +use std::path::PathBuf; + +use crate::secret::UserDataType; /// Result type for this crate pub type Result = std::result::Result; @@ -24,7 +28,7 @@ #[error("Cannot load {ty} from {path}")] X509Load { - path: String, + path: PathBuf, ty: &'static str, source: openssl::error::ErrorStack, }, @@ -41,13 +45,16 @@ #[error("Provided binary request is too small")] BinRequestSmall, - #[error("No Config UID found: {0}")] + #[error("No Configuration UID found: {0}")] NoCuid(String), // errors from request types #[error("Customer Communication Key must be 32 bytes long")] CckSize, + #[error("Decryption failed. Probably due to a GCM tag mismatch.")] + GcmTagMismatch, + #[error("Invalid {0} user-data for signing provided. Max {} bytes allowed", .0.max())] AsrcbInvSgnUserData(UserDataType), @@ -68,6 +75,37 @@ )] AsrcbUserDataSgnFail, + #[error("The provided Host Key Document in '{hkd}' is not in PEM or DER format")] + HkdNotPemOrDer { + hkd: String, + source: openssl::error::ErrorStack, + }, + + #[error("The provided host key document in {0} contains no certificate!")] + NoHkdInFile(String), + + #[error("Invalid input size ({0}) for boot hdr")] + InvBootHdrSize(usize), + + #[error("Input does not contain an attestation request")] + NoArcb, + + #[error("The attestation request has an unknown version (.0)")] + BinArcbInvVersion(u32), + + #[error( + "The attestation request encrypted sice is to0 small (.0). Request probably tampered with." + )] + BinArcbSeaSmall(u32), + + #[error("The input is missing the Configuration UID entry. It is probably not an attestation response")] + AttExCuidMissing, + + #[error( + "Attestation flags indicating that the additional data contains {0}, but no data was provided." + )] + AddDataMissing(&'static str), + // errors from other crates #[error(transparent)] PvCore(#[from] pv_core::Error), @@ -82,12 +120,11 @@ // used in macros #[doc(hidden)] impl Error { - pub const CRL: &'static str = "CRL"; pub const CERT: &'static str = "certificate"; + pub const CRL: &'static str = "CRL"; } /// Error cases for verifying host-key documents -/// #[allow(missing_docs)] #[derive(thiserror::Error, Debug, PartialEq, Eq)] #[non_exhaustive] @@ -97,7 +134,7 @@ #[error("No valid CRL found")] NoCrl, #[error("Host-key document is revoked.")] - HdkRevoked, + HkdRevoked, #[error("Not enough bits of security. ({0}, {1} expected)")] SecurityBits(u32, u32), #[error("Authority Key Id mismatch")] @@ -126,5 +163,3 @@ }; } pub(crate) use bail_hkd_verify; - -use crate::request::uvsecret::UserDataType; diff -Nru s390-tools-2.31.0/rust/pv/src/lib.rs s390-tools-2.33.1/rust/pv/src/lib.rs --- s390-tools-2.31.0/rust/pv/src/lib.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/lib.rs 2024-05-28 08:26:36.000000000 +0200 @@ -1,99 +1,120 @@ // SPDX-License-Identifier: MIT // -// Copyright IBM Corp. 2023 +// Copyright IBM Corp. 2023, 2024 -#![deny(missing_docs)] -//! pv - library for pv-tools +#![deny( + missing_docs, + missing_debug_implementations, + trivial_numeric_casts, + unstable_features, + unused_import_braces, + unused_qualifications +)] +#![doc = include_str!("../README.md")] +//! # Manage guest secret store //! -//! This library is intened to be used by tools and libraries that -//! are used for creating and managing IBM Secure Execution guests. -//! `pv` provides abstraction layers for encryption, secure memory management, -//! logging, and accessing the uvdevice. +//! This crate provides functionalities for creating add-secret requests. Also provides support for +//! sending those requests, list all stored secrets, and lock the secret store. //! -//! If you do not need any OpenSSL features use `pv_core`. -//! This crate reexports all symbols from `pv_core` +//! ## Create +//! [`secret::AddSecretRequest`] +//! +//! ## Add +//! [`uv::UvDevice`] and [`uv::AddCmd`] +//! +//! ## List +//! [`uv::UvDevice`] and [`uv::ListCmd`] +//! +//! ## Lock +//! [`uv::UvDevice`] and [`uv::LockCmd`] +//! +//! # Attestation +//! +//! This crate provides functionalities for creating, performing, and verifying Attestation +//! measurements for _IBM Secure Execution for Linux_. See: +//! +//! ## Create +//! [`attest::AttestationRequest`] +//! +//! ## Perform +//! [`uv::UvDevice`] and [`uv::AttestationCmd`] +//! +//! # Verify +//! [`attest::AttestationItems`], [`attest::AttestationMeasurement`] mod brcb; -mod cli; +mod confidential; mod crypto; mod error; +mod openssl_extensions; mod req; -mod secret; mod utils; +mod uvattest; mod uvsecret; mod verify; /// utility functions for writing TESTS!!! -//hide any test helpers on docs! +// hide any test helpers on docs! #[doc(hidden)] #[allow(dead_code)] pub mod test_utils; -pub use ::utils::assert_size; -pub use ::utils::static_assert; +pub use pv_core::assert_size; +pub use pv_core::static_assert; const PAGESIZE: usize = 0x1000; /// Definitions and functions for interacting with the Ultravisor pub mod uv { - pub use pv_core::uv::{ - uv_ioctl, ConfigUid, UvCmd, UvDevice, UvDeviceInfo, UvFlags, UvcSuccess, + pub use pv_core::uv::*; +} + +/// Functionalities for creating attestation requests +pub mod attest { + pub use crate::uvattest::{ + additional::AdditionalData, + arcb::{AttestationAuthenticated, AttestationRequest}, + arcb::{AttestationFlags, AttestationVersion}, + attest::{AttestationItems, AttestationMeasurement}, }; - pub use pv_core::uv::{AddCmd, ListCmd, LockCmd}; - pub use pv_core::uv::{ListableSecretType, SecretEntry, SecretList}; + pub use pv_core::attest::*; } /// Miscellaneous functions and definitions pub mod misc { - pub use crate::cli::{ - get_reader_from_cli_file_arg, get_writer_from_cli_file_arg, CertificateOptions, STDIN, - STDOUT, - }; - pub use crate::utils::{read_certs, read_crls, read_private_key}; + pub use crate::utils::read_certs; pub use pv_core::misc::*; - pub use pv_core::PvLogger; } pub use crate::error::HkdVerifyErrorType; pub use error::{Error, Result}; +pub use pv_core::Error as PvCoreError; +pub use pv_core::{FileAccessErrorType, FileIoErrorType}; /// Functionalities to build UV requests pub mod request { - pub use crate::brcb::{BootHdrMagic, BootHdrTags}; - pub use crate::crypto::derive_key; - pub use crate::crypto::random_array; - pub use crate::crypto::{encrypt_aes_gcm, gen_ec_key}; - pub use crate::crypto::{hash, hkdf_rfc_5869}; - pub use crate::crypto::{sign_msg, verify_signature}; - pub use crate::crypto::{Aes256Key, SymKey, SymKeyType}; - pub use crate::req::{Aad, BinReqValues, Encrypt, Keyslot, ReqEncrCtx, Request}; - pub use crate::secret::{Secret, Zeroize}; + pub use crate::brcb::BootHdrTags; + pub use crate::confidential::{Confidential, Zeroize}; + pub use crate::crypto::{SymKey, SymKeyType}; + pub use crate::req::{Keyslot, ReqEncrCtx, Request}; pub use crate::verify::{CertVerifier, HkdVerifier, NoVerifyHkd}; /// Reexports some useful OpenSSL symbols pub mod openssl { pub use openssl::error::ErrorStack; - pub use openssl::hash::MessageDigest; - pub use openssl::md::Md; pub use openssl::pkey; + pub use openssl::x509; } - /// Functionalities for creating add-secret requests - pub mod uvsecret { - pub use crate::uvsecret::{ - asrcb::{AddSecretFlags, AddSecretRequest, AddSecretVersion}, - ext_secret::ExtSecret, - guest_secret::GuestSecret, - user_data::verify_asrcb_and_get_user_data, - }; - pub use pv_core::request::uvsecret::AddSecretMagic; - pub use pv_core::request::uvsecret::UserDataType; - } - pub use pv_core::request::RequestMagic; + pub use pv_core::request::*; } -/// Provides cargo version Info about this crate. -/// -/// Produces `pv-crate ` -pub const fn crate_info() -> &'static str { - concat!(env!("CARGO_PKG_NAME"), "-crate ", env!("CARGO_PKG_VERSION")) +/// Functionalities for creating add-secret requests +pub mod secret { + pub use crate::uvsecret::{ + asrcb::{AddSecretFlags, AddSecretRequest, AddSecretVersion}, + ext_secret::ExtSecret, + guest_secret::GuestSecret, + user_data::verify_asrcb_and_get_user_data, + }; + pub use pv_core::secret::*; } diff -Nru s390-tools-2.31.0/rust/pv/src/openssl_extensions/akid.rs s390-tools-2.33.1/rust/pv/src/openssl_extensions/akid.rs --- s390-tools-2.31.0/rust/pv/src/openssl_extensions/akid.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/openssl_extensions/akid.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2023 + +use std::fmt; + +use foreign_types::{foreign_type, ForeignType, ForeignTypeRef}; +use openssl::x509::{X509CrlRef, X509Ref}; +use std::ffi::c_int; + +mod ffi { + extern "C" { + pub fn X509_check_akid( + issuer: *const openssl_sys::X509, + akid: *const openssl_sys::AUTHORITY_KEYID, + ) -> super::c_int; + } +} + +foreign_type! { + type CType = openssl_sys::AUTHORITY_KEYID; + fn drop = openssl_sys::AUTHORITY_KEYID_free; + + /// An `Authority Key Identifier`. + pub struct Akid; + /// Reference to `Akid` + pub struct AkidRef; +} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub struct AkidCheckResult(c_int); + +impl fmt::Debug for AkidCheckResult { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("AkidCheckResult") + .field("code", &self.0) + .finish() + } +} + +impl AkidCheckResult { + pub const OK: AkidCheckResult = AkidCheckResult(openssl_sys::X509_V_OK); + + /// Creates an `AkidCheckResult` from a raw error number. + unsafe fn from_raw(err: c_int) -> AkidCheckResult { + AkidCheckResult(err) + } +} + +impl AkidRef { + /// Check if the `Akid` matches the issuer + pub fn check(&self, issuer: &X509Ref) -> AkidCheckResult { + unsafe { + let res = ffi::X509_check_akid(issuer.as_ptr(), self.as_ptr()); + AkidCheckResult::from_raw(res) + } + } +} + +pub trait AkidExtension { + fn akid(&self) -> Option; +} + +impl AkidExtension for X509Ref { + fn akid(&self) -> Option { + unsafe { + let ptr = openssl_sys::X509_get_ext_d2i( + self.as_ptr(), + openssl_sys::NID_authority_key_identifier, + std::ptr::null_mut(), + std::ptr::null_mut(), + ); + if ptr.is_null() { + None + } else { + Some(Akid::from_ptr(ptr as *mut _)) + } + } + } +} + +impl AkidExtension for X509CrlRef { + fn akid(&self) -> Option { + unsafe { + let ptr = openssl_sys::X509_CRL_get_ext_d2i( + self.as_ptr(), + openssl_sys::NID_authority_key_identifier, + std::ptr::null_mut(), + std::ptr::null_mut(), + ); + if ptr.is_null() { + None + } else { + Some(Akid::from_ptr(ptr as *mut _)) + } + } + } +} + +#[cfg(test)] +mod test { + use crate::test_utils::load_gen_cert; + + use super::*; + + #[test] + fn akid() { + let cert = load_gen_cert("ibm.crt"); + let ca = load_gen_cert("root_ca.crt"); + + let akid = cert.akid().unwrap(); + let res = akid.check(&ca); + assert_eq!(res, AkidCheckResult::OK); + } +} diff -Nru s390-tools-2.31.0/rust/pv/src/openssl_extensions/crl.rs s390-tools-2.33.1/rust/pv/src/openssl_extensions/crl.rs --- s390-tools-2.31.0/rust/pv/src/openssl_extensions/crl.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/openssl_extensions/crl.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2023 +pub use crate::openssl_extensions::stackable_crl::*; +use foreign_types::{ForeignType, ForeignTypeRef}; +use openssl::{ + error::ErrorStack, + stack::{Stack, StackRef}, + x509::{ + store::{X509StoreBuilderRef, X509StoreRef}, + X509CrlRef, X509NameRef, X509Ref, X509StoreContextRef, X509, + }, +}; + +pub fn opt_to_ptr(o: Option<&T>) -> *mut T::CType { + match o { + None => std::ptr::null_mut(), + Some(p) => p.as_ptr(), + } +} + +mod ffi { + extern "C" { + pub fn X509_STORE_CTX_get1_crls( + ctx: *mut openssl_sys::X509_STORE_CTX, + nm: *mut openssl_sys::X509_NAME, + ) -> *mut openssl_sys::stack_st_X509_CRL; + pub fn X509_STORE_add_crl( + xs: *mut openssl_sys::X509_STORE, + x: *mut openssl_sys::X509_CRL, + ) -> std::ffi::c_int; + } +} + +pub trait X509StoreExtension { + fn add_crl(&mut self, crl: &X509CrlRef) -> Result<(), ErrorStack>; +} + +impl X509StoreExtension for X509StoreBuilderRef { + fn add_crl(&mut self, crl: &X509CrlRef) -> Result<(), ErrorStack> { + unsafe { + { + let r = ffi::X509_STORE_add_crl(self.as_ptr(), crl.as_ptr()); + if r <= 0 { + Err(ErrorStack::get()) + } else { + Ok(()) + } + } + } + } +} + +pub trait X509StoreContextExtension { + fn init_opt( + &mut self, + trust: &X509StoreRef, + cert: Option<&X509Ref>, + cert_chain: Option<&StackRef>, + with_context: F, + ) -> Result + where + F: FnOnce(&mut X509StoreContextRef) -> Result; + fn crls(&mut self, subj: &X509NameRef) -> Result, ErrorStack>; +} + +impl X509StoreContextExtension for X509StoreContextRef { + fn init_opt( + &mut self, + trust: &X509StoreRef, + cert: Option<&X509Ref>, + cert_chain: Option<&StackRef>, + with_context: F, + ) -> Result + where + F: FnOnce(&mut X509StoreContextRef) -> Result, + { + struct Cleanup<'a>(&'a mut X509StoreContextRef); + + impl<'a> Drop for Cleanup<'a> { + fn drop(&mut self) { + unsafe { + openssl_sys::X509_STORE_CTX_cleanup(self.0.as_ptr()); + } + } + } + + unsafe { + { + let r = openssl_sys::X509_STORE_CTX_init( + self.as_ptr(), + trust.as_ptr(), + opt_to_ptr(cert), + opt_to_ptr(cert_chain), + ); + if r <= 0 { + Err(ErrorStack::get()) + } else { + Ok(r) + } + }?; + } + let cleanup = Cleanup(self); + with_context(cleanup.0) + } + + /// Get all Certificate Revocation Lists with the subject currently stored + fn crls(&mut self, subj: &X509NameRef) -> Result, ErrorStack> { + unsafe { + { + let r = ffi::X509_STORE_CTX_get1_crls(self.as_ptr(), subj.as_ptr()); + if r.is_null() { + Err(ErrorStack::get()) + } else { + Ok(Stack::from_ptr(r)) + } + } + } + } +} diff -Nru s390-tools-2.31.0/rust/pv/src/openssl_extensions/mod.rs s390-tools-2.33.1/rust/pv/src/openssl_extensions/mod.rs --- s390-tools-2.31.0/rust/pv/src/openssl_extensions/mod.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/openssl_extensions/mod.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 + +#![doc(hidden)] + +/// Extensions to the rust-openssl crate +mod akid; +mod crl; +mod stackable_crl; + +pub use akid::*; +pub use crl::*; diff -Nru s390-tools-2.31.0/rust/pv/src/openssl_extensions/stackable_crl.rs s390-tools-2.33.1/rust/pv/src/openssl_extensions/stackable_crl.rs --- s390-tools-2.31.0/rust/pv/src/openssl_extensions/stackable_crl.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/openssl_extensions/stackable_crl.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2023 + +use std::{marker::PhantomData, ptr}; + +use foreign_types::{ForeignType, ForeignTypeRef}; +use openssl::{ + error::ErrorStack, + stack::Stackable, + x509::{X509Crl, X509CrlRef}, +}; +use openssl_sys::BIO_new_mem_buf; +use std::ffi::c_int; + +#[derive(Debug)] +pub struct StackableX509Crl(*mut openssl_sys::X509_CRL); + +impl ForeignType for StackableX509Crl { + type CType = openssl_sys::X509_CRL; + type Ref = X509CrlRef; + + unsafe fn from_ptr(ptr: *mut openssl_sys::X509_CRL) -> StackableX509Crl { + StackableX509Crl(ptr) + } + + fn as_ptr(&self) -> *mut openssl_sys::X509_CRL { + self.0 + } +} +impl Drop for StackableX509Crl { + fn drop(&mut self) { + unsafe { (openssl_sys::X509_CRL_free)(self.0) } + } +} +impl ::std::ops::Deref for StackableX509Crl { + type Target = X509CrlRef; + + fn deref(&self) -> &X509CrlRef { + unsafe { ForeignTypeRef::from_ptr(self.0) } + } +} +impl ::std::ops::DerefMut for StackableX509Crl { + fn deref_mut(&mut self) -> &mut X509CrlRef { + unsafe { ForeignTypeRef::from_ptr_mut(self.0) } + } +} +#[allow(clippy::explicit_auto_deref)] +impl ::std::borrow::Borrow for StackableX509Crl { + fn borrow(&self) -> &X509CrlRef { + &**self + } +} +#[allow(clippy::explicit_auto_deref)] +impl ::std::convert::AsRef for StackableX509Crl { + fn as_ref(&self) -> &X509CrlRef { + &**self + } +} + +impl Stackable for StackableX509Crl { + type StackType = openssl_sys::stack_st_X509_CRL; +} + +pub struct MemBioSlice<'a>(*mut openssl_sys::BIO, PhantomData<&'a [u8]>); +impl<'a> Drop for MemBioSlice<'a> { + fn drop(&mut self) { + unsafe { + openssl_sys::BIO_free_all(self.0); + } + } +} + +impl<'a> MemBioSlice<'a> { + pub fn new(buf: &'a [u8]) -> Result, ErrorStack> { + openssl_sys::init(); + + assert!(buf.len() <= c_int::MAX as usize); + let bio = unsafe { + { + let r = BIO_new_mem_buf(buf.as_ptr() as *const _, buf.len() as c_int); + if r.is_null() { + Err(ErrorStack::get()) + } else { + Ok(r) + } + }? + }; + + Ok(MemBioSlice(bio, PhantomData)) + } + + pub fn as_ptr(&self) -> *mut openssl_sys::BIO { + self.0 + } +} + +impl StackableX509Crl { + pub fn stack_from_pem(pem: &[u8]) -> Result, ErrorStack> { + unsafe { + openssl_sys::init(); + let bio = MemBioSlice::new(pem)?; + + let mut crls = vec![]; + loop { + let r = openssl_sys::PEM_read_bio_X509_CRL( + bio.as_ptr(), + ptr::null_mut(), + None, + ptr::null_mut(), + ); + if r.is_null() { + let err = openssl_sys::ERR_peek_last_error(); + if openssl_sys::ERR_GET_LIB(err) == openssl_sys::ERR_LIB_PEM + && openssl_sys::ERR_GET_REASON(err) == openssl_sys::PEM_R_NO_START_LINE + { + openssl_sys::ERR_clear_error(); + break; + } + + return Err(ErrorStack::get()); + } else { + crls.push(X509Crl::from_ptr(r)); + } + } + + Ok(crls) + } + } +} +impl From for StackableX509Crl { + fn from(value: X509Crl) -> Self { + unsafe { + openssl_sys::X509_CRL_up_ref(value.as_ptr()); + StackableX509Crl::from_ptr(value.as_ptr()) + } + } +} +impl From for X509Crl { + fn from(value: StackableX509Crl) -> Self { + unsafe { + openssl_sys::X509_CRL_up_ref(value.as_ptr()); + X509Crl::from_ptr(value.as_ptr()) + } + } +} diff -Nru s390-tools-2.31.0/rust/pv/src/req.rs s390-tools-2.33.1/rust/pv/src/req.rs --- s390-tools-2.31.0/rust/pv/src/req.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/req.rs 2024-05-28 08:26:36.000000000 +0200 @@ -2,9 +2,13 @@ // // Copyright IBM Corp. 2023 -use crate::crypto::AES_256_GCM_TAG_SIZE; +use crate::assert_size; +use crate::crypto::{ + decrypt_aes_gcm, derive_key, encrypt_aes_gcm, gen_ec_key, random_array, AesGcmResult, SymKey, + SymKeyType, AES_256_GCM_TAG_SIZE, +}; use crate::misc::to_u32; -use crate::request::{derive_key, encrypt_aes_gcm, gen_ec_key, random_array, SymKey, SymKeyType}; +use crate::request::Confidential; use crate::{Error, Result}; use openssl::bn::{BigNum, BigNumContext}; use openssl::ec::{EcGroupRef, EcPointRef}; @@ -14,8 +18,6 @@ use pv_core::request::{RequestMagic, RequestVersion}; use std::convert::TryInto; use std::mem::size_of; -use std::ops::Range; -use utils::assert_size; use zerocopy::{AsBytes, BigEndian, FromBytes, FromZeroes, U32}; /// Encrypt a _secret_ using self and a given private key. @@ -28,7 +30,7 @@ /// # Errors /// /// This function will return an error if OpenSSL could not encrypt the secret. - fn encrypt(&self, secret: &[u8], priv_key: &PKey) -> Result> { + fn encrypt(&self, secret: &[u8], priv_key: &PKeyRef) -> Result> { let mut res = Vec::with_capacity(80); self.encrypt_to(secret, priv_key, &mut res)?; Ok(res) @@ -43,10 +45,16 @@ /// # Errors /// /// This function will return an error if OpenSSL could not encrypt the secret. - fn encrypt_to(&self, secret: &[u8], priv_key: &PKey, to: &mut Vec) -> Result<()>; + fn encrypt_to( + &self, + secret: &[u8], + priv_key: &PKeyRef, + to: &mut Vec, + ) -> Result<()>; } /// Types of Authenticated Data +#[allow(missing_debug_implementations)] pub enum Aad<'a> { /// Authenticated Keyslot Ks(&'a Keyslot), @@ -82,7 +90,9 @@ impl Encrypt for Keyslot { /// Encrypts the given request protection key `prot_key`. /// - /// The AES256 encryption key is derived from `self` as public key, and `priv_key` as private key. + /// The AES256 encryption key is derived from `self` as public key, and `priv_key` as private + /// key. + /// /// # Returns /// The encrypted Keyslot. /// @@ -92,11 +102,12 @@ fn encrypt_to( &self, prot_key: &[u8], - priv_key: &PKey, + priv_key: &PKeyRef, to: &mut Vec, ) -> Result<()> { let derived_key = derive_key(priv_key, &self.0)?; - let (mut wrpk_and_kst, ..) = encrypt_aes_gcm(&derived_key.into(), &[0; 12], &[], prot_key)?; + let mut wrpk_and_kst = + encrypt_aes_gcm(&derived_key.into(), &[0; 12], &[], prot_key)?.data(); let phk: EcdhPubkeyCoord = self.0.as_ref().try_into()?; to.reserve(80); @@ -106,7 +117,7 @@ } } -/// Context used to mange the encryption of requests. +/// Context used to manage the encryption of requests. /// Intended to be used by [`Request`] implementations #[derive(Debug)] pub struct ReqEncrCtx { @@ -143,7 +154,7 @@ prot_key, }) } - /// + /// Create a new encryption context with random input values. /// /// # Errors @@ -155,12 +166,6 @@ } } - ///Panics if data does not fit into bin_aad+offs - // #[track_caller] - // pub fn copy_to_bin_aad(_bin_aad: &mut [u8], _aad_offs: usize, _data: &[u8]) { - // todo!(); - // } - /// Build the authenticated data for a request. /// # Returns /// ```none @@ -172,7 +177,6 @@ /// | Request type dependent AAD data | /// |-------------------------------------------------------------| /// ``` - /// pub fn build_aad( &self, version: RequestVersion, @@ -202,8 +206,8 @@ }?; let mut auth_data: Vec = Vec::with_capacity(2048); - //reserve space for the request header - auth_data.resize(std::mem::size_of::(), 0); + // reserve space for the request header + auth_data.resize(size_of::(), 0); for a in aad { match a { @@ -225,11 +229,11 @@ let req_hdr = RequestHdr::new(version, rql, self.iv, nks, sea, magic); // copy request header to the start of the request - auth_data[..std::mem::size_of::()].copy_from_slice(req_hdr.as_bytes()); + auth_data[..size_of::()].copy_from_slice(req_hdr.as_bytes()); Ok(auth_data) } - /// get the public coordinates from the private key (Customer private key) + /// Get the public coordinates from the private key (Customer private key) /// # Errors /// /// This function will return an error if the public key could not be extracted by OpenSSL. @@ -238,7 +242,7 @@ self.priv_key.as_ref().try_into().map_err(Error::Crypto) } - /// Encrypt confidential Data with this encryption context and provide a gcm tag. + /// Encrypt confidential Data with this encryption context and provide a GCM tag. /// /// * `aad` - additional authentic data /// * `conf` - data to be encrypted @@ -252,13 +256,14 @@ /// # Errors /// /// This function will return an error if the data could not be encrypted by OpenSSL. - pub fn encrypt_aead( - &self, - aad: &[u8], - conf: &[u8], - ) -> Result<(Vec, Range, Range, Range)> { + pub(crate) fn encrypt_aead(&self, aad: &[u8], conf: &[u8]) -> Result { encrypt_aes_gcm(&self.prot_key, &self.iv, aad, conf) } + + /// Returns a reference to the request protection key of this [`ReqEncrCtx`]. + pub fn prot_key(&self) -> &SymKey { + &self.prot_key + } } #[repr(C)] @@ -270,8 +275,8 @@ } } -/// Get the pub ecdh coordinates in the format the Ultravisor expects it: -/// The two coordinates are pdadded to 80 bytes each. +/// Get the pub ECDH coordinates in the format the Ultravisor expects it: +/// The two coordinates are padded to 80 bytes each. fn get_pub_ecdh_points(pkey: &EcPointRef, grp: &EcGroupRef) -> Result<[u8; 160], ErrorStack> { const ECDH_PUB_KEY_COORD_POINT_SIZE: i32 = 0x50; let mut x = BigNum::new()?; @@ -287,6 +292,7 @@ ($type: ty) => { impl TryFrom<&PKeyRef<$type>> for EcdhPubkeyCoord { type Error = ErrorStack; + fn try_from(key: &PKeyRef<$type>) -> Result { let k = key.ec_key()?; k.check_key()?; @@ -345,9 +351,9 @@ /// `ReqEncrCtx` when implementing `encrypt`. A hostkey should be represented by [`Keyslot`] during /// encryption. /// -/// An UV request consists of an authenticated area (AAD), an encrypted area (Encr) and a 16 byte tag. -/// The AAD contains a general header and Request type defined data (including Keyslots). -/// It is encrypted with an Request protection key (symmetric). This key is encrypted with a +/// An UV request consists of an authenticated area (AAD), an encrypted area (Encr) and a 16 byte +/// tag. The AAD contains a general header and Request type defined data (including Keyslots). It +/// is encrypted with an Request protection key (symmetric). This key is encrypted with a /// (generated) private key and the public key of the host system (Host key) /// ```none /// _______________________________________________________________ @@ -361,7 +367,7 @@ /// | ---------------------------------------------------- | /// | AES GCM Tag (16) | /// |_____________________________________________________________| -///``` +/// ``` pub trait Request { /// Encrypt the request into its binary format /// @@ -378,7 +384,6 @@ /// A struct to represent some parts of a binary/encrypted request. #[derive(Debug)] -#[allow(unused)] #[allow(clippy::len_without_is_empty)] pub struct BinReqValues<'a> { iv: &'a [u8], @@ -435,6 +440,28 @@ pub fn len(&self) -> usize { self.len } + + /// Returns the size of the encrypted area + pub fn sea(&self) -> u32 { + self.encr.len() as u32 + } + + /// Decrypts the encrypted area with the provided key + pub fn decrypt(&self, key: &SymKey) -> Result>> { + decrypt_aes_gcm(key, self.iv, self.aad, self.encr, self.tag) + } + + /// Returns a reference to the request dependent authenticated area of this [`BinReqValues`] + /// already interpreted. + /// + /// If target struct is larger than the request depended-AAD None is returned. See + /// [`FromBytes::ref_from_prefix`] + pub fn req_dep_aad(&self) -> Option<&T> + where + T: FromBytes + Sized, + { + T::ref_from_prefix(self.req_dep_aad) + } } #[cfg(test)] @@ -465,15 +492,15 @@ .unwrap(); let mut aad_exp = vec![ - 0x12, 0x34, 0x56, 0x89, 0xab, 0xcd, 0xef, 0, //progr + 0x12, 0x34, 0x56, 0x89, 0xab, 0xcd, 0xef, 0, // progr 0, 0, 2, 0, // vers - 0, 0, 0, 168, //size + 0, 0, 0, 168, // size 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // iv - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //res - 1, //nks + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // res + 1, // nks 0, 0, 0, 0, // res 0, 0, 0, 16, // sea - 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, //aad + 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, // aad ]; aad_exp.extend_from_slice(get_test_asset!("exp/keyslot.bin")); assert_eq!(&aad, &aad_exp); @@ -506,7 +533,7 @@ let (_, host_key) = get_test_keys(); let ctx = ReqEncrCtx::new_aes_256(Some([0x11; 12]), None, None).unwrap(); - let ks = vec![ + let ks = [ Keyslot::new(host_key.clone()), Keyslot::new(host_key.clone()), Keyslot::new(host_key), @@ -524,12 +551,12 @@ let hdr = RequestHdr::new(0x200, 22, [0x11; 12], 15, 44, None); let hdr_bin = hdr.as_bytes(); let hdr_bin_exp = [ - 0u8, 0, 0, 0, 0, 0, 0, 0, //magic + 0u8, 0, 0, 0, 0, 0, 0, 0, // magic 0, 0, 2, 0, // vers - 0, 0, 0, 22, //size + 0, 0, 0, 22, // size 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // iv - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //res - 15, //nks + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // res + 15, // nks 0, 0, 0, 0, // res 0, 0, 0, 44, // sea ]; @@ -541,12 +568,12 @@ let mut hdr = RequestHdr::new(0x200, 0x1234, [0x11; 12], 15, 44, Some(TEST_MAGIC)); let hdr_bin = hdr.as_bytes_mut(); let hdr_bin_exp = [ - 0x12, 0x34, 0x56, 0x89, 0xab, 0xcd, 0xef, 0, //magic + 0x12, 0x34, 0x56, 0x89, 0xab, 0xcd, 0xef, 0, // magic 0, 0, 2, 0, // vers - 0, 0, 0x12, 0x34, //size + 0, 0, 0x12, 0x34, // size 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // iv - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //res - 15, //nks + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // res + 15, // nks 0, 0, 0, 0, // res 0, 0, 0, 44, // sea ]; diff -Nru s390-tools-2.31.0/rust/pv/src/secret.rs s390-tools-2.33.1/rust/pv/src/secret.rs --- s390-tools-2.31.0/rust/pv/src/secret.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/secret.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,117 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright IBM Corp. 2023 - -use std::fmt::Debug; - -/// Trait for securely zeroizing memory. -/// -/// To be used with [`Secret`] -pub trait Zeroize { - /// Reliably overwrites the given buffer with zeros, - fn zeroize(&mut self); -} - -/* Automatically impl Zeroize for u8 arrays */ -impl Zeroize for [u8; COUNT] { - /// Reliably overwrites the given buffer with zeros, - /// by performing a volatile write followed by a memory barrier - fn zeroize(&mut self) { - // SAFETY: given buffer(self) has the correct (compile time) size - unsafe { std::ptr::write_volatile(self, [0u8; COUNT]) }; - std::sync::atomic::compiler_fence(std::sync::atomic::Ordering::SeqCst); - } -} - -impl Zeroize for Vec { - /// Reliably overwrites the given buffer with zeros, - /// by overwriting the whole vector's capacity with zeros. - fn zeroize(&mut self) { - //TODO use `volatile_set_memory` when stabilized - let mut dst = self.as_mut_ptr(); - for _ in 0..self.capacity() { - // SAFETY: - // * Vec allocated at least capacity elements continuously - // * dst points always to a valid location - unsafe { - std::ptr::write_volatile(dst, 0); - dst = dst.add(1); - } - } - std::sync::atomic::compiler_fence(std::sync::atomic::Ordering::SeqCst); - } -} - -/// Thin wrapper around an type implementing Zeroize. -/// -/// A `Secret` represents a confidential value that must be securely overwritten during drop. -/// Will never leak its wrapped value during [`Debug`] -/// -/// ```rust -/// use pv::request::Secret; -/// fn foo(value: Secret<[u8; 2]>) { -/// println!("value: {value:?}"); -/// } -/// # fn main() { -/// foo([1,2].into()); -/// // prints: -/// // in debug builds: -/// // value: Secret([1, 2]) -/// // in release builds: -/// // value: Secret(***) -/// # } -/// ``` -#[derive(Clone, PartialEq, Eq, Default)] -pub struct Secret(C); -impl Secret { - /// Convert a type into a self overwriting one. - /// - /// Prefer using [`Into`] - pub fn new(v: C) -> Self { - Secret(v) - } - - /// Get a reference to the contained value - pub fn value(&self) -> &C { - &self.0 - } - /// Get a imutable reference to the contained value - /// - /// NOTE that modifications to a mutable reference can trigger reallocation. - /// e.g. a [`Vec`] might expand if more space needed. -> preallocate enough space - /// or operate on slices. The old locations can and will **NOT** be zeroized. - pub fn value_mut(&mut self) -> &mut C { - &mut self.0 - } -} - -impl Debug for Secret { - #[allow(unreachable_code)] - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - // do NOT leak secrets in production builds - #[cfg(not(debug_assertions))] - return write!(f, "Secret(***)"); - - let mut b = f.debug_tuple("Secret"); - b.field(&self.0); - b.finish() - } -} - -impl From for Secret { - fn from(v: C) -> Secret { - Secret(v) - } -} - -impl Zeroize for Secret { - fn zeroize(&mut self) { - self.0.zeroize(); - } -} - -impl Drop for Secret { - fn drop(&mut self) { - self.0.zeroize(); - } -} diff -Nru s390-tools-2.31.0/rust/pv/src/test_utils.rs s390-tools-2.33.1/rust/pv/src/test_utils.rs --- s390-tools-2.31.0/rust/pv/src/test_utils.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/test_utils.rs 2024-05-28 08:26:36.000000000 +0200 @@ -30,7 +30,7 @@ } pub fn get_cert_asset_path>(path: P) -> PathBuf { - let mut p = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let mut p = PathBuf::from(env!("CARGO_MANIFEST_DIR")); p.push("tests"); p.push("assets"); p.push("cert"); @@ -39,16 +39,10 @@ p } -pub fn get_cert_asset_path_string(path: &'static str) -> String { - get_cert_asset_path(path) - .into_os_string() - .into_string() - .unwrap() -} /// TEST ONLY! Load an cert /// /// panic on errors -pub fn get_cert_asset(path: &'static str) -> Vec { +pub fn get_cert_asset>(path: P) -> Vec { let p = get_cert_asset_path(path); fs::read(p).unwrap() } @@ -56,7 +50,7 @@ /// TEST ONLY! Load cert found in the asset path /// /// panic on errors -pub fn load_gen_cert(asset_path: &'static str) -> X509 { +pub fn load_gen_cert>(asset_path: P) -> X509 { let buf = get_cert_asset(asset_path); let mut cert = X509::from_der(&buf) .map(|crt| vec![crt]) @@ -66,10 +60,10 @@ cert.pop().unwrap() } -/// TEST ONLY! Load the crl found in the asset path +/// TEST ONLY! Load the CRL found in the asset path /// /// panic on errors -pub fn load_gen_crl(asset_path: &'static str) -> X509Crl { +pub fn load_gen_crl>(asset_path: P) -> X509Crl { let buf = get_cert_asset(asset_path); X509Crl::from_der(&buf) @@ -79,7 +73,7 @@ /// TEST ONLY! Get a fixed private/public pair and a fixed public key /// -/// Intened for TESTING only. All parts of the key including the private key are checked in git and +/// Intended for TESTING only. All parts of the key including the private key are checked in git and /// visible for the public pub fn get_test_keys() -> (PKey, PKey) { let pub_key = get_test_asset!("keys/public_cust.bin"); diff -Nru s390-tools-2.31.0/rust/pv/src/utils.rs s390-tools-2.33.1/rust/pv/src/utils.rs --- s390-tools-2.31.0/rust/pv/src/utils.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/utils.rs 2024-05-28 08:26:36.000000000 +0200 @@ -3,7 +3,7 @@ // Copyright IBM Corp. 2023 use crate::{Error, Result}; use openssl::{ - pkey::{PKey, Private}, + error::ErrorStack, x509::{X509Crl, X509}, }; @@ -13,11 +13,11 @@ /// /// This function will return an error if the underlying OpenSSL implementation cannot parse `buf` /// as `DER` or `PEM`. -pub fn read_crls(buf: &[u8]) -> Result> { - use openssl_extensions::crl::StackableX509Crl; - X509Crl::from_der(buf) +pub fn read_crls>(buf: T) -> Result> { + use crate::openssl_extensions::StackableX509Crl; + X509Crl::from_der(buf.as_ref()) .map(|crl| vec![crl]) - .or_else(|_| StackableX509Crl::stack_from_pem(buf)) + .or_else(|_| StackableX509Crl::stack_from_pem(buf.as_ref())) .map_err(Error::Crypto) } @@ -26,38 +26,24 @@ /// # Errors /// /// This function will return an error if the underlying OpenSSL implementation cannot parse `buf` - -pub fn read_certs(buf: &[u8]) -> Result> { - X509::from_der(buf) +pub fn read_certs>(buf: T) -> Result, ErrorStack> { + X509::from_der(buf.as_ref()) .map(|crt| vec![crt]) - .or_else(|_| X509::stack_from_pem(buf)) - .map_err(Error::Crypto) -} - -/// Read+parse the first key from the buffer. -/// -/// # Errors -/// -/// This function will return an error if the underlying OpenSSL implementation cannot parse `buf` -/// as `DER` or `PEM`. -pub fn read_private_key(buf: &[u8]) -> Result> { - PKey::private_key_from_der(buf) - .or_else(|_| PKey::private_key_from_pem(buf)) - .map_err(Error::Crypto) + .or_else(|_| X509::stack_from_pem(buf.as_ref())) } #[cfg(test)] mod tests { - use crate::{get_test_asset, test_utils::*}; + use crate::test_utils::*; #[test] fn read_crls() { let crl = get_cert_asset("ibm.crl"); let crl_der = get_cert_asset("der.crl"); let fail = get_cert_asset("ibm.crt"); - assert_eq!(super::read_crls(&crl).unwrap().len(), 1); - assert_eq!(super::read_crls(&crl_der).unwrap().len(), 1); - assert_eq!(super::read_crls(&fail).unwrap().len(), 0); + assert_eq!(super::read_crls(crl).unwrap().len(), 1); + assert_eq!(super::read_crls(crl_der).unwrap().len(), 1); + assert_eq!(super::read_crls(fail).unwrap().len(), 0); } #[test] @@ -65,22 +51,8 @@ let crt = get_cert_asset("ibm.crt"); let crt_der = get_cert_asset("der.crt"); let fail = get_cert_asset("ibm.crl"); - assert_eq!(super::read_certs(&crt).unwrap().len(), 1); - assert_eq!(super::read_certs(&crt_der).unwrap().len(), 1); - assert_eq!(super::read_certs(&fail).unwrap().len(), 0); - } - - #[test] - fn read_private_key() { - let key = get_test_asset!("keys/rsa3072key.pem"); - let key = super::read_private_key(key).unwrap(); - assert_eq!(key.rsa().unwrap().size(), 384); - } - - #[test] - fn read_private_key_fail() { - let key = get_test_asset!("exp/secure_guest.hdr"); - let key = super::read_private_key(key); - assert!(key.is_err()); + assert_eq!(super::read_certs(crt).unwrap().len(), 1); + assert_eq!(super::read_certs(crt_der).unwrap().len(), 1); + assert_eq!(super::read_certs(fail).unwrap().len(), 0); } } diff -Nru s390-tools-2.31.0/rust/pv/src/uvattest/additional.rs s390-tools-2.33.1/rust/pv/src/uvattest/additional.rs --- s390-tools-2.31.0/rust/pv/src/uvattest/additional.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/uvattest/additional.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 +use super::arcb::AttestationFlags; +use crate::req::Keyslot; +use crate::static_assert; +use crate::{Error, Result}; +use serde::Serialize; +use std::fmt::Display; +use zerocopy::FromBytes; + +/// Hash for additional-data stuff used for parsing [`AdditionalData`] +pub(crate) type AttAddHash = [u8; ATT_ADD_HASH_SIZE as usize]; +pub(crate) const ATT_ADD_HASH_SIZE: u32 = 0x20; +static_assert!(Keyslot::PHKH_SIZE == ATT_ADD_HASH_SIZE); + +/// Struct describing the additional-data of an Attestation Request +#[derive(Serialize, Debug)] +#[serde(default)] +pub struct AdditionalData +where + T: Serialize, +{ + #[serde(skip_serializing_if = "Option::is_none")] + image_phkh: Option, + #[serde(skip_serializing_if = "Option::is_none")] + attestation_phkh: Option, +} + +impl Display for AdditionalData +where + T: Display + Serialize, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn write_field( + f: &mut std::fmt::Formatter<'_>, + name: &'static str, + s: &Option, + ) -> std::fmt::Result { + if let Some(hash) = s { + writeln!(f, "{name}")?; + match f.alternate() { + true => writeln!(f, "{hash:#}")?, + false => writeln!(f, "{hash}")?, + }; + } + Ok(()) + } + write_field(f, "Image PHKH", &self.image_phkh)?; + write_field(f, "Attestation PHKH", &self.attestation_phkh) + } +} + +fn read_hash<'a>( + data: &'a [u8], + read: bool, + name: &'static str, +) -> Result<(Option<&'a AttAddHash>, &'a [u8])> { + match read { + true => { + let (v, data) = + AttAddHash::slice_from_prefix(data, 1).ok_or(Error::AddDataMissing(name))?; + // slice from prefix ensures that there is 1 element. + Ok((Some(&v[0]), data)) + } + false => Ok((None, data)), + } +} + +impl AdditionalData { + /// Provides a reference to the image public host key hash. + /// + /// This is the hash of the public host key of the corresponding private machine key that + /// decrypted the Secure Execution guest. + /// Contains a value if that value was requested by the attestation request. + pub fn image_public_host_key_hash(&self) -> Option<&T> { + self.image_phkh.as_ref() + } + + /// Provides a reference to the attestation public host key hash. + /// + /// This is the hash of the public host key of the corresponding private machine key that + /// decrypted the Attestation request. + /// Contains a value if that value was requested by the attestation request. + pub fn attestation_public_host_key_hash(&self) -> Option<&T> { + self.attestation_phkh.as_ref() + } +} + +impl<'a, T: Serialize + From<&'a [u8]> + Sized> AdditionalData { + /// Create Additional data from the u8-slice variant + pub fn from_other(other: AdditionalData<&'a [u8]>) -> Self { + let AdditionalData { + image_phkh, + attestation_phkh, + } = other; + Self { + image_phkh: image_phkh.map(|i| i.into()), + attestation_phkh: attestation_phkh.map(|i| i.into()), + } + } +} + +impl<'a> AdditionalData<&'a [u8]> { + /// Create from a slice of additional-data + /// + /// `flags`: Flags indicating which additional-data field is present. + /// + /// # Error + /// + /// Fails if there is a mismatch between the data and the flags. Should not happen after a + /// successful attestation verification. + pub fn from_slice(data: &'a [u8], flags: &AttestationFlags) -> Result { + let _data = data; + let (image_phkh, _data) = read_hash(data, flags.image_phkh(), "Image PHKH")?; + let (attestation_phkh, _data) = read_hash(data, flags.attest_phkh(), "Attestation PHKH")?; + + Ok(Self { + image_phkh: image_phkh.map(|v| v.as_slice()), + attestation_phkh: attestation_phkh.map(|v| v.as_slice()), + }) + } +} diff -Nru s390-tools-2.31.0/rust/pv/src/uvattest/arcb.rs s390-tools-2.33.1/rust/pv/src/uvattest/arcb.rs --- s390-tools-2.31.0/rust/pv/src/uvattest/arcb.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/uvattest/arcb.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 +use super::{additional::ATT_ADD_HASH_SIZE, AttNonce}; +use crate::{ + assert_size, + attest::{AttestationMagic, AttestationMeasAlg}, + crypto::random_array, + misc::Flags, + req::{Aad, BinReqValues, Keyslot, ReqEncrCtx}, + request::{Confidential, MagicValue, Request, RequestVersion, SymKey, Zeroize}, + static_assert, + uv::UvFlags, + Error, Result, +}; +use openssl::pkey::{PKey, Public}; +use std::mem::size_of; +use zerocopy::{AsBytes, BigEndian, FromBytes, FromZeroes, U32}; + +#[cfg(doc)] +use crate::{ + request::SymKeyType, + uv::AttestationCmd, + verify::{CertVerifier, HkdVerifier}, +}; + +/// Retrieve Attestation Request Control Block +/// +/// An ARCB holds an Attestation Measurement key to attest a SE-guest. +/// The (architectural optional) nonce is always used and freshly generated for a new +/// [`AttestationRequest`]. +/// +/// Layout: +/// ```none +/// _______________________________________________________________ +/// | generic header (48) +/// | --------------------------------------------------- | +/// | Plaintext Attestation flags (8) | +/// | Measurement Algorithm Identifier (4) | +/// | Reserved(4) | +/// | Customer Public Key (160) generated for each request | +/// | N Keyslots(80 each) | +/// | --------------------------------------------------- | +/// | Measurement key (64) | Encrypted +/// | Optional Nonce (0 or 16) | Encrypted +/// | --------------------------------------------------- | +/// | AES GCM Tag (16) | +/// |_____________________________________________________________| +/// ``` +/// +/// # Example +/// Create an Attestation request with default flags (= use a nonce) +/// +/// ```rust,no_run +/// # use s390_pv::attest::{AttestationFlags, AttestationMeasAlg, AttestationRequest, AttestationVersion}; +/// # use s390_pv::request::{SymKeyType, Request, ReqEncrCtx}; +/// # fn main() -> s390_pv::Result<()> { +/// let att_version = AttestationVersion::One; +/// let meas_alg = AttestationMeasAlg::HmacSha512; +/// let mut arcb = AttestationRequest::new(att_version, meas_alg, AttestationFlags::default())?; +/// // read-in hostkey document(s). Not verified for brevity. +/// let hkd = s390_pv::misc::read_certs(&std::fs::read("host-key-document.crt")?)?; +/// // IBM issued HKD certificates typically have one X509 +/// let hkd = hkd.first().unwrap().public_key()?; +/// arcb.add_hostkey(hkd); +/// // you can add multiple hostkeys +/// // arcb.add_hostkey(another_hkd); +/// // encrypt it +/// let ctx = ReqEncrCtx::random(SymKeyType::Aes256)?; +/// let arcb = arcb.encrypt(&ctx)?; +/// # Ok(()) +/// # } +/// ``` +/// # See Also +/// +/// * [`AttestationFlags`] +/// * [`AttestationMeasAlg`] +/// * [`AttestationVersion`] +/// * [`SymKeyType`] +/// * [`Request`] +/// * [`ReqEncrCtx`] +/// * [`AttestationCmd`] +/// * [`HkdVerifier`], [`CertVerifier`] +#[derive(Debug)] +pub struct AttestationRequest { + version: AttestationVersion, + aad: AttestationAuthenticated, + keyslots: Vec, + conf: Confidential, +} + +impl AttestationRequest { + /// Create a new retrieve attestation measurement request + pub fn new( + version: AttestationVersion, + mai: AttestationMeasAlg, + mut flags: AttestationFlags, + ) -> Result { + // This implementation enforces using a nonce + flags.set_nonce(); + Ok(Self { + version, + aad: AttestationAuthenticated::new(flags, mai), + keyslots: vec![], + conf: ReqConfData::random()?, + }) + } + + /// Returns a reference to the flags of this [`AttestationRequest`]. + pub fn flags(&self) -> &AttestationFlags { + &self.aad.flags + } + + /// Returns a copy of the confidential data of this [`AttestationRequest`]. + /// + /// Gives a copy of the confidential data of this request for further + /// processing. This data should be never exposed in cleartext to anyone but + /// the creator and the verifier of this request. + pub fn confidential_data(&self) -> AttestationConfidential { + let conf = self.conf.value(); + AttestationConfidential::new(conf.meas_key.to_vec(), conf.nonce.into()) + } + + fn aad(&self, ctx: &ReqEncrCtx) -> Result> { + let cust_pub_key = ctx.key_coords()?; + let mut aad: Vec = Vec::with_capacity(self.keyslots.len() + 2); + aad.push(Aad::Plain(self.aad.as_bytes())); + aad.push(Aad::Plain(cust_pub_key.as_ref())); + self.keyslots.iter().for_each(|k| aad.push(Aad::Ks(k))); + ctx.build_aad( + self.version.into(), + &aad, + size_of::(), + AttestationMagic::MAGIC, + ) + } + + /// Decrypts the request and extracts the authenticated and confidential data + /// + /// Deconstructs the `arcb` and decrypts it using `arpk` + /// + /// # Error + /// + /// Returns an error if the request is malformed or the decryption failed + pub fn decrypt_bin( + arcb: &[u8], + arpk: &SymKey, + ) -> Result<(AttestationAuthenticated, AttestationConfidential)> { + if !AttestationMagic::starts_with_magic(arcb) { + return Err(Error::NoArcb); + } + + let values = BinReqValues::get(arcb)?; + + match values.version().try_into()? { + AttestationVersion::One => (), + }; + let auth: &AttestationAuthenticated = values.req_dep_aad().ok_or(Error::BinRequestSmall)?; + + let mai = auth.mai.try_into()?; + let keysize = match mai { + v @ AttestationMeasAlg::HmacSha512 => v.exp_size(), + } as usize; + + if keysize > values.sea() as usize { + return Err(Error::BinArcbSeaSmall(values.sea())); + } + + let decr = values.decrypt(arpk)?; + + // size sanitized by fence before + let meas_key = &decr.value()[..keysize]; + let nonce = if decr.value().len() == size_of::() { + Some( + (&decr.value()[keysize..decr.value().len()]) + .try_into() + .unwrap(), + ) + } else { + None + }; + let conf = AttestationConfidential::new(meas_key.to_vec(), nonce); + + Ok((auth.to_owned(), conf)) + } +} + +/// Confidential Data of an attestation request +/// +/// contains a measurement key and an optional nonce +#[derive(Debug)] +pub struct AttestationConfidential { + measurement_key: Confidential>, + nonce: Option>, +} + +impl AttestationConfidential { + /// Returns a reference to the measurement key of this [`AttestationConfidential`]. + pub fn measurement_key(&self) -> &[u8] { + self.measurement_key.value() + } + + /// Returns a reference to the nonce of this [`AttestationConfidential`]. + pub fn nonce(&self) -> &Option> { + &self.nonce + } + + fn new(measurement_key: Vec, nonce: Option) -> Self { + Self { + measurement_key: measurement_key.into(), + nonce: nonce.map(Confidential::new), + } + } +} + +impl Request for AttestationRequest { + fn encrypt(&self, ctx: &ReqEncrCtx) -> Result> { + let conf = self.conf.value().as_bytes(); + let aad = self.aad(ctx)?; + ctx.encrypt_aead(&aad, conf).map(|res| res.data()) + } + + fn add_hostkey(&mut self, hostkey: PKey) { + self.keyslots.push(Keyslot::new(hostkey)) + } +} + +/// Versions for [`AttestationRequest`] +#[repr(u32)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum AttestationVersion { + /// Version 1 (= 0x0100) + One = 0x0100, +} + +impl TryFrom for AttestationVersion { + type Error = Error; + + fn try_from(value: u32) -> Result { + if value == Self::One as u32 { + Ok(Self::One) + } else { + Err(Error::BinArcbInvVersion(value)) + } + } +} + +impl From for RequestVersion { + fn from(val: AttestationVersion) -> Self { + val as RequestVersion + } +} + +/// Authenticated additional Data of an [`AttestationRequest`] +#[repr(C)] +#[derive(Debug, AsBytes, FromZeroes, FromBytes, Clone, Copy)] +pub struct AttestationAuthenticated { + flags: AttestationFlags, + mai: U32, + res: u32, +} +assert_size!(AttestationAuthenticated, 0x10); + +impl AttestationAuthenticated { + fn new(flags: AttestationFlags, mai: AttestationMeasAlg) -> Self { + Self { + flags, + mai: mai.into(), + res: 0, + } + } + + /// Returns a reference to the flags of this [`AttestationAuthenticated`]. + pub fn flags(&self) -> &AttestationFlags { + &self.flags + } + + /// Returns the [`AttestationMeasAlg`] of this [`AttestationAuthenticated`]. + /// + /// # Panics + /// + /// Panics if the library failed to set up the MAI correctly. + pub fn mai(&self) -> AttestationMeasAlg { + AttestationMeasAlg::try_from(self.mai).expect("ReqAuthData invariant hurt. Invalid MAI") + } +} + +/// Attestation flags +#[repr(C)] +#[derive(Default, Debug, AsBytes, FromZeroes, FromBytes, Clone, Copy)] +pub struct AttestationFlags(UvFlags); +static_assert!(AttestationFlags::FLAG_TO_ADD_SIZE.len() < 64); + +impl AttestationFlags { + /// Maps the flag to the (maximum) required size for the additional data + pub(crate) const FLAG_TO_ADD_SIZE: [u32; 4] = [0, 0, ATT_ADD_HASH_SIZE, ATT_ADD_HASH_SIZE]; + + /// Returns the maximum size this flag requires for additional data + pub fn expected_additional_size(&self) -> u32 { + Self::FLAG_TO_ADD_SIZE + .iter() + .enumerate() + .fold(0, |size, (b, s)| size + self.0.is_set(b as u8) as u32 * s) + } + + /// Flag 1 - use a nonce + /// + /// This attestation implementation forces the use of a nonce, so this will always be on and + /// the function is non-public + fn set_nonce(&mut self) { + self.0.set_bit(1); + } + + /// Flag 2 - request the image public host-key hash + /// + /// Asks the Ultravisor to provide the host-key hash that unpacked the SE-image to be added in + /// additional data. Requires 32 bytes. + pub fn set_image_phkh(&mut self) { + self.0.set_bit(2); + } + + /// Check weather the image public host key hash flag is on + pub fn image_phkh(&self) -> bool { + self.0.is_set(2) + } + + /// Flag 3 - request the attestation public host-key hash + /// + /// Asks the Ultravisor to provide the host-key hash that unpacked the attestation request to + /// be added in additional data. Requires 32 bytes. + pub fn set_attest_phkh(&mut self) { + self.0.set_bit(3); + } + + /// Check weather the attestation public host key hash flag is on + pub fn attest_phkh(&self) -> bool { + self.0.is_set(3) + } +} + +#[repr(C)] +#[derive(Debug, AsBytes)] +struct ReqConfData { + meas_key: [u8; 64], + nonce: AttNonce, +} +assert_size!(ReqConfData, 80); + +impl ReqConfData { + fn random() -> Result> { + Ok(Confidential::new(Self { + meas_key: random_array()?, + nonce: random_array()?, + })) + } +} + +impl Zeroize for ReqConfData { + fn zeroize(&mut self) { + self.meas_key.zeroize(); + self.nonce.zeroize(); + } +} + +#[cfg(test)] +mod test { + use super::*; + + use crate::{get_test_asset, request::SymKey, test_utils::get_test_keys}; + + const ARPK: [u8; 32] = [0x17; 32]; + const NONCE: [u8; 16] = [0xab; 16]; + const MEAS: [u8; 64] = [0x77; 64]; + + fn mk_arcb() -> Vec { + let (cust_key, host_key) = get_test_keys(); + let ctx = ReqEncrCtx::new_aes_256( + Some([0x55; 12]), + Some(cust_key), + Some(SymKey::Aes256(ARPK.into())), + ) + .unwrap(); + + let mut flags = AttestationFlags::default(); + flags.set_image_phkh(); + flags.set_attest_phkh(); + + let mut arcb = AttestationRequest::new( + AttestationVersion::One, + AttestationMeasAlg::HmacSha512, + flags, + ) + .unwrap(); + + // manually set confidential data (API does not allow this) + arcb.conf.value_mut().nonce = NONCE; + arcb.conf.value_mut().meas_key = MEAS; + + arcb.add_hostkey(host_key); + arcb.encrypt(&ctx).unwrap() + } + + #[test] + fn arcb() { + let request = mk_arcb(); + let exp = get_test_asset!("exp/arcb.bin"); + + assert_eq!(request, exp); + } + + #[test] + fn decrypt_bin() { + let request = mk_arcb(); + let arpk = SymKey::Aes256(ARPK.into()); + let (_, conf) = AttestationRequest::decrypt_bin(&request, &arpk).unwrap(); + assert_eq!(conf.measurement_key(), &MEAS); + assert_eq!(conf.nonce().as_ref().unwrap().value(), &NONCE); + } + + #[test] + fn decrypt_bin_fail_magic() { + let arpk = SymKey::Aes256(ARPK.into()); + let mut tamp_arcb = mk_arcb(); + + // tamper magic + tamp_arcb[0] = 17; + let ret = AttestationRequest::decrypt_bin(&tamp_arcb, &arpk); + assert!(matches!(ret, Err(Error::NoArcb))); + } + + #[test] + fn decrypt_bin_fail_mai() { + let arpk = SymKey::Aes256(ARPK.into()); + let mut tamp_arcb = mk_arcb(); + + // tamper MAI + tamp_arcb[0x3b] = 17; + let ret = AttestationRequest::decrypt_bin(&tamp_arcb, &arpk); + println!("{ret:?}"); + assert!(matches!( + ret, + Err(Error::PvCore(pv_core::Error::BinArcbInvAlgorithm(17))) + )); + } + + #[test] + fn decrypt_bin_fail_aad() { + let arpk = SymKey::Aes256(ARPK.into()); + let mut tamp_arcb = mk_arcb(); + + // tamper AAD + tamp_arcb[0x3c] = 17; + let ret = AttestationRequest::decrypt_bin(&tamp_arcb, &arpk); + assert!(matches!(ret, Err(Error::GcmTagMismatch))); + } +} diff -Nru s390-tools-2.31.0/rust/pv/src/uvattest/attest.rs s390-tools-2.33.1/rust/pv/src/uvattest/attest.rs --- s390-tools-2.31.0/rust/pv/src/uvattest/attest.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/uvattest/attest.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 + +use super::AttNonce; +use crate::{ + attest::AttestationMeasAlg, brcb::BootHdrTags, crypto::calculate_hmac, request::Confidential, + uv::ConfigUid, Result, +}; +use openssl::{ + hash::MessageDigest, + pkey::{PKeyRef, Private}, +}; +use std::mem::size_of; +use zerocopy::{AsBytes, BigEndian, U16, U32}; + +#[cfg(doc)] +use crate::attest::AttestationRequest; + +/// Holds the data to be measured. +/// +/// The Attestation measurement is an authentication code of the following data: +/// +/// ```none +/// |-------------------------------| +/// | From SE-header: | +/// | Page List Digest (64) | +/// | Address List Digest (64) | +/// | Tweak List Digest (64) | +/// | SE Header Tag (16) | +/// | Configuration Unique Id (16) | +/// | user-data length (2) | +/// | zeros (2) | +/// | additional data length (4) | +/// | user-data (0-256) | +/// | optional nonce (0 or 16) | +/// | additional data (0+) | +/// |-------------------------------| +/// ``` +#[derive(Debug)] +pub struct AttestationItems(Confidential>); + +// tags: BootHdrTags, +// cuid: ConfigUid, +// user_data_len: U16, +// res: u16, +// additional_len: U32, +// user_data: Vec, +// nonce: Option<[u8; 16]>, +// additional: Vec, +impl AttestationItems { + /// Create a new attestation item struct. + /// + /// * `tags`: The tags from the SE header + /// * `cuid`: The Configuration Unique Id from the SE guest for which the Measurement was + /// calculated + /// * `user`: up to 256 bytes of arbitrary data generated on the SE-guest before measuring + /// * `nonce`: technically optional nonce, but [`AttestationRequest`] enforces it + /// * `additional`: additional data generated by the Firmware depending on the Attestation flags + /// + /// If size values of `user` or `additional` are longer than 16/32 bit they are silently + /// truncated. `user-data` is limited to 256 bytes architecture wise, and additional data is + /// limited to 8 pages by the uvdevice. Larger sizes will produce invalid measurements + /// anyhow. + pub fn new( + tags: &BootHdrTags, + cuid: &ConfigUid, + user: Option<&[u8]>, + nonce: Option<&AttNonce>, + additional: Option<&[u8]>, + ) -> Self { + // expectations are ensured by ExchangeCtx invariants + let user = user.unwrap_or(&[]); + let user_data_len: U16 = (user.len() as u16).into(); + + let additional = additional.unwrap_or(&[]); + let additional_len: U32 = (additional.len() as u32).into(); + + let size = size_of::() // PLD ALD TLD TAG + + size_of::() + + size_of::() // user_len + + size_of::() // reserved + + size_of::() // additional_len + + user.len() + + match nonce { + Some(_) => size_of::(), + None => 0, + } + + additional.len(); + + let mut items = Vec::with_capacity(size); + items.extend_from_slice(tags.as_bytes()); + items.extend_from_slice(cuid.as_bytes()); + items.extend_from_slice(user_data_len.as_bytes()); + items.extend_from_slice(&[0, 0]); + items.extend_from_slice(additional_len.as_bytes()); + items.extend_from_slice(user); + if let Some(nonce) = nonce { + items.extend_from_slice(nonce); + } + items.extend_from_slice(additional); + assert!(items.len() == size); + Self(items.into()) + } +} + +/// Holds an attestation measurement +#[derive(Debug)] +#[allow(clippy::len_without_is_empty)] +pub struct AttestationMeasurement(Vec); +impl AttestationMeasurement { + /// Calculate an attestation measurement + pub fn calculate( + items: AttestationItems, + mai: AttestationMeasAlg, + meas_key: &PKeyRef, + ) -> Result { + match mai { + AttestationMeasAlg::HmacSha512 => { + calculate_hmac(meas_key, MessageDigest::sha512(), items.0.value()).map(Self) + } + } + } + + /// Returns the length of the [`AttestationMeasurement`]. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Securely compares the calculated measurement with a given one + /// + /// Exists early when sizes do not match + pub fn eq_secure(&self, other: &[u8]) -> bool { + if self.len() != other.len() { + return false; + } + openssl::memcmp::eq(&self.0, other) + } +} + +impl AsRef<[u8]> for AttestationMeasurement { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} +impl From> for AttestationMeasurement { + fn from(value: Vec) -> Self { + Self(value) + } +} + +#[cfg(test)] +mod test { + use super::*; + use openssl::pkey::PKey; + + const M_KEY: [u8; 64] = [0x41; 64]; + const BOOT_HDR_TAGS: BootHdrTags = BootHdrTags::new([1; 64], [2; 64], [3; 64], [4; 16]); + const CUID: [u8; 16] = [5; 16]; + const USER: [u8; 256] = [7; 256]; + const NONCE: [u8; 16] = [8; 16]; + const ADDITIONAL: [u8; 128] = [9; 128]; + + // just for better output in case of a test failure + impl PartialEq<[u8]> for AttestationMeasurement { + fn eq(&self, other: &[u8]) -> bool { + self.eq_secure(other) + } + } + + #[test] + fn measurement_all() { + const EXP_HMAC: [u8; 64] = [ + 0x88, 0x79, 0x4c, 0x62, 0xcc, 0xe7, 0xbc, 0xf2, 0x62, 0x16, 0xde, 0xb3, 0xf4, 0x8f, + 0x13, 0xfe, 0xa6, 0x37, 0x4b, 0x6d, 0x7e, 0x35, 0xbc, 0xc5, 0xc2, 0xce, 0x68, 0x12, + 0x1d, 0xb6, 0xf4, 0x5d, 0xfc, 0x8c, 0x17, 0x18, 0x56, 0x46, 0x35, 0x49, 0x40, 0x8b, + 0xf8, 0xe7, 0xd1, 0xac, 0xa1, 0x1e, 0xfa, 0xd0, 0xa8, 0x78, 0xaf, 0x97, 0xdc, 0x9e, + 0x21, 0xa1, 0xfc, 0x2a, 0x32, 0xf3, 0xa6, 0x75, + ]; + let items = AttestationItems::new( + &BOOT_HDR_TAGS, + &CUID, + Some(&USER), + Some(&NONCE), + Some(&ADDITIONAL), + ); + let key = PKey::hmac(&M_KEY).unwrap(); + let meas = + AttestationMeasurement::calculate(items, AttestationMeasAlg::HmacSha512, &key).unwrap(); + assert_eq!(meas, EXP_HMAC[..]); + assert!(meas.eq_secure(&EXP_HMAC[..])); + } + + #[test] + fn measurement_user_add() { + const EXP_HMAC: [u8; 64] = [ + 0xfb, 0xd4, 0xf7, 0x38, 0xa3, 0x90, 0xed, 0xd9, 0x47, 0xcd, 0x4f, 0x11, 0xaf, 0x3a, + 0x2f, 0x3b, 0xab, 0x2f, 0xdf, 0x8b, 0xf8, 0x9b, 0xf8, 0x1b, 0xeb, 0x49, 0x51, 0x17, + 0xf4, 0x38, 0x2c, 0xf4, 0x2f, 0x07, 0x30, 0xc8, 0xc7, 0xd9, 0xe3, 0xca, 0x27, 0xfb, + 0x25, 0xad, 0xfc, 0xeb, 0x21, 0x22, 0x4f, 0x57, 0xfd, 0xb3, 0x98, 0xdc, 0xf4, 0x1a, + 0x83, 0xc1, 0x46, 0xe6, 0xa2, 0x3d, 0xb7, 0x60, + ]; + let items = + AttestationItems::new(&BOOT_HDR_TAGS, &CUID, Some(&USER), None, Some(&ADDITIONAL)); + let key = PKey::hmac(&M_KEY).unwrap(); + let meas = + AttestationMeasurement::calculate(items, AttestationMeasAlg::HmacSha512, &key).unwrap(); + assert_eq!(meas, EXP_HMAC[..]); + assert!(meas.eq_secure(&EXP_HMAC[..])); + } + + #[test] + fn measurement_add() { + const EXP_HMAC: [u8; 64] = [ + 0x63, 0x67, 0x1f, 0xbf, 0x29, 0x50, 0x36, 0xeb, 0x10, 0x23, 0xea, 0x71, 0xf7, 0x18, + 0x2e, 0x7d, 0x63, 0x43, 0xdc, 0x7b, 0x2d, 0xa5, 0x84, 0xe8, 0x24, 0xd0, 0xa7, 0xd1, + 0x98, 0xab, 0x9c, 0xde, 0xd7, 0x56, 0xc9, 0x3b, 0x39, 0x05, 0x0f, 0xfb, 0x76, 0x45, + 0x55, 0xb0, 0x1f, 0x88, 0xcb, 0x82, 0x01, 0x7a, 0x6a, 0x15, 0xc7, 0xe0, 0xba, 0xfc, + 0x60, 0x05, 0xf1, 0xe4, 0xf7, 0x8a, 0xa1, 0x24, + ]; + let items = AttestationItems::new(&BOOT_HDR_TAGS, &CUID, None, None, Some(&ADDITIONAL)); + let key = PKey::hmac(&M_KEY).unwrap(); + let meas = + AttestationMeasurement::calculate(items, AttestationMeasAlg::HmacSha512, &key).unwrap(); + assert_eq!(meas, EXP_HMAC[..]); + assert!(meas.eq_secure(&EXP_HMAC[..])); + } + + #[test] + fn measurement_minimal() { + const EXP_HMAC: [u8; 64] = [ + 0xc5, 0xc3, 0x4c, 0x93, 0x83, 0x5d, 0x1e, 0xc2, 0x3f, 0x5c, 0x2d, 0x77, 0x8d, 0xfa, + 0x20, 0x12, 0x9b, 0x11, 0xb3, 0x05, 0x60, 0x17, 0x42, 0xcb, 0x2f, 0x38, 0xe0, 0xed, + 0x98, 0x94, 0xdc, 0xdb, 0x73, 0xfc, 0x86, 0x95, 0xab, 0x6a, 0x8d, 0xba, 0xd0, 0x74, + 0x40, 0x73, 0xdd, 0xc8, 0x1a, 0x5e, 0xaa, 0xfa, 0x52, 0xe4, 0xa1, 0x5a, 0xf8, 0xde, + 0xb8, 0xd7, 0x61, 0x09, 0x19, 0x22, 0x84, 0x7f, + ]; + let items = AttestationItems::new(&BOOT_HDR_TAGS, &CUID, None, None, None); + let key = PKey::hmac(&M_KEY).unwrap(); + let meas = + AttestationMeasurement::calculate(items, AttestationMeasAlg::HmacSha512, &key).unwrap(); + assert_eq!(meas, EXP_HMAC[..]); + assert!(meas.eq_secure(&EXP_HMAC[..])); + } +} diff -Nru s390-tools-2.31.0/rust/pv/src/uvattest.rs s390-tools-2.33.1/rust/pv/src/uvattest.rs --- s390-tools-2.31.0/rust/pv/src/uvattest.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/uvattest.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,5 @@ +pub mod additional; +pub mod arcb; +pub mod attest; + +type AttNonce = [u8; 16]; diff -Nru s390-tools-2.31.0/rust/pv/src/uvsecret/asrcb.rs s390-tools-2.33.1/rust/pv/src/uvsecret/asrcb.rs --- s390-tools-2.31.0/rust/pv/src/uvsecret/asrcb.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/uvsecret/asrcb.rs 2024-05-28 08:26:36.000000000 +0200 @@ -5,53 +5,21 @@ use super::user_data::UserData; use crate::{ assert_size, + crypto::{hkdf_rfc_5869, AesGcmResult}, misc::Flags, - request::{ - hkdf_rfc_5869, - openssl::{ - pkey::{PKey, Private, Public}, - Md, - }, - uvsecret::{ExtSecret, GuestSecret}, - Aad, BootHdrTags, Keyslot, ReqEncrCtx, Request, Secret, - }, + req::{Aad, Keyslot, ReqEncrCtx}, + request::{BootHdrTags, Confidential, Request}, + secret::{ExtSecret, GuestSecret}, uv::{ConfigUid, UvFlags}, Result, }; +use openssl::{ + md::Md, + pkey::{PKey, Private, Public}, +}; use pv_core::request::RequestVersion; use zerocopy::AsBytes; -/// Internal wrapper for Guest Secret, so that we can dump it in the form the UV wants it to be -#[derive(Debug, Clone)] -struct BinGuestSecret(GuestSecret); -impl BinGuestSecret { - /// Reference to the confidential data - fn confidential(&self) -> &[u8] { - match &self.0 { - GuestSecret::Null => &[], - GuestSecret::Association { secret, .. } => secret.value().as_slice(), - } - } - fn dump_auth(&self) -> Vec { - match &self.0 { - GuestSecret::Null => vec![0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - GuestSecret::Association { id, .. } => { - let mut buf = vec![0; 48]; - buf[3] = 2; - buf[7] = 0x20; - buf[16..48].copy_from_slice(id.as_slice()); - buf - } - } - } -} - -impl From for BinGuestSecret { - fn from(secret: GuestSecret) -> Self { - BinGuestSecret(secret) - } -} - /// Authenticated data w/o user data #[repr(C)] #[derive(Debug, Clone, Copy, AsBytes)] @@ -74,14 +42,14 @@ } } -#[derive(Debug, Clone)] +#[derive(Debug)] struct ReqConfData { - secret: BinGuestSecret, - extension_secret: Secret<[u8; 32]>, + secret: GuestSecret, + extension_secret: Confidential<[u8; 32]>, } impl ReqConfData { - fn to_bytes(&self) -> Secret> { + fn to_bytes(&self) -> Confidential> { let secret = self.secret.confidential(); let mut v = vec![0; secret.len() + 32]; @@ -140,10 +108,10 @@ /// Add-secret request Control Block /// -/// An ASRCB wraps a secret to transport it securely to the Ultravisor. +/// An ASRCB wraps a secret to securely transport it to the Ultravisor. /// /// Layout: -///```none +/// ```none /// _______________________________________________________________ /// | generic header (48) /// | --------------------------------------------------- | @@ -162,8 +130,8 @@ /// | --------------------------------------------------- | /// | AES GCM Tag (16) | /// |_____________________________________________________________| -///``` -#[derive(Clone, Debug)] +/// ``` +#[derive(Debug)] pub struct AddSecretRequest { version: AddSecretVersion, aad: ReqAuthData, @@ -180,7 +148,6 @@ /// /// The request has no extension secret, no configuration UID, no host-keys, /// and no user data - /// pub fn new( version: AddSecretVersion, secret: GuestSecret, @@ -189,8 +156,8 @@ ) -> Self { AddSecretRequest { conf: ReqConfData { - extension_secret: Secret::new([0; 32]), - secret: secret.into(), + extension_secret: Confidential::new([0; 32]), + secret, }, aad: ReqAuthData::new(boot_tags, flags), keyslots: vec![], @@ -216,7 +183,7 @@ ExtSecret::Derived(cck) => hkdf_rfc_5869( Md::sha512(), cck.value(), - self.aad.boot_tags.seht(), + self.aad.boot_tags.tag(), DER_EXT_SECRET_INFO, )? .into(), @@ -226,7 +193,7 @@ /// Returns a reference to the guest secret of this [`AddSecretRequest`]. pub fn guest_secret(&self) -> &GuestSecret { - &self.conf.secret.0 + &self.conf.secret } /// Add user-data to the Add-Secret request @@ -239,16 +206,21 @@ /// - RSA 2048 bit (up to 256 byte message) /// - RSA 3072 bit (up to 128 byte message) /// - /// The signature can be verified during the verification of the secret-request on the target machine. - pub fn set_user_data(&mut self, msg: Vec, skey: Option>) -> Result<()> { - self.user_data = UserData::new(skey, msg)?; + /// The signature can be verified during the verification of the secret-request on the target + /// machine. + pub fn set_user_data>>( + &mut self, + msg: T, + skey: Option>, + ) -> Result<()> { + self.user_data = UserData::new(skey, msg.into())?; Ok(()) } - /// compiles the authenticated area of this request + /// Compiles the authenticated area of this request fn aad(&self, ctx: &ReqEncrCtx, conf_len: usize) -> Result> { let cust_pub_key = ctx.key_coords()?; - let secr_auth = self.conf.secret.dump_auth(); + let secr_auth = self.conf.secret.auth(); let user_data = self.user_data.data(); let mut aad: Vec = Vec::with_capacity(5 + self.keyslots.len()); @@ -261,7 +233,7 @@ } aad.push(Aad::Plain(cust_pub_key.as_ref())); self.keyslots.iter().for_each(|k| aad.push(Aad::Ks(k))); - aad.push(Aad::Plain(&secr_auth)); + aad.push(Aad::Plain(secr_auth.get())); ctx.build_aad(self.version.into(), &aad, conf_len, self.user_data.magic()) } @@ -283,13 +255,18 @@ Ok(res) } - /// encrypt data, sign request with user-provided signing key, insert signature into aad, + /// Encrypts data, sign request with user-provided signing key, insert signature into aad, /// calculate request tag fn encrypt_with_signed_user_data(&self, ctx: &ReqEncrCtx) -> Result> { - //encrypt data w/o aead + // encrypt data w/o aead let conf = self.conf.to_bytes(); let aad = self.aad(ctx, conf.value().len())?; - let (mut buf, aad_range, encr_range, _) = ctx.encrypt_aead(&aad, conf.value())?; + let AesGcmResult { + mut buf, + aad_range, + encr_range, + .. + } = ctx.encrypt_aead(&aad, conf.value())?; drop(aad); @@ -303,7 +280,7 @@ // encrypt again with signed data buf[encr_range.clone()].copy_from_slice(conf.value()); ctx.encrypt_aead(&buf[aad_range], &buf[encr_range]) - .map(|(buf, ..)| buf) + .map(|res| res.data()) } } @@ -313,7 +290,7 @@ UserData::Null | UserData::Unsigned(_) => { let conf = self.conf.to_bytes(); let aad = self.aad(ctx, conf.value().len())?; - ctx.encrypt_aead(&aad, conf.value()).map(|(buf, ..)| buf) + ctx.encrypt_aead(&aad, conf.value()).map(|res| res.data()) } _ => self.encrypt_with_signed_user_data(ctx), } @@ -323,34 +300,3 @@ self.keyslots.push(Keyslot::new(hostkey)) } } - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn guest_secret_bin_null() { - let gs: BinGuestSecret = GuestSecret::Null.into(); - let gs_bytes = gs.dump_auth(); - let exp = vec![0u8, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - - assert_eq!(exp, gs_bytes); - assert_eq!(&Vec::::new(), gs.confidential()) - } - - #[test] - fn guest_secret_bin_ap() { - let gs: BinGuestSecret = GuestSecret::Association { - name: "test".to_string(), - id: [1; 32], - secret: [2; 32].into(), - } - .into(); - let gs_bytes_auth = gs.dump_auth(); - let mut exp = vec![0u8, 0, 0, 2, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0]; - exp.extend([1; 32]); - - assert_eq!(exp, gs_bytes_auth); - assert_eq!(&[2; 32], gs.confidential()); - } -} diff -Nru s390-tools-2.31.0/rust/pv/src/uvsecret/ext_secret.rs s390-tools-2.33.1/rust/pv/src/uvsecret/ext_secret.rs --- s390-tools-2.31.0/rust/pv/src/uvsecret/ext_secret.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/uvsecret/ext_secret.rs 2024-05-28 08:26:36.000000000 +0200 @@ -2,13 +2,13 @@ // // Copyright IBM Corp. 2023 -use crate::request::Secret; +use crate::request::Confidential; -/// Extension Secret for [`crate::request::uvsecret::AddSecretRequest`] +/// Extension Secret for [`crate::secret::AddSecretRequest`] #[derive(Debug, Clone)] pub enum ExtSecret { /// A bytepattern that must be equal for each request targeting the same SE-guest instance - Simple(Secret<[u8; 32]>), // contains the secret + Simple(Confidential<[u8; 32]>), // contains the secret /// A secret that is derived from the Customer communication key from the SE-header - Derived(Secret<[u8; 32]>), // contains the cck + Derived(Confidential<[u8; 32]>), // contains the cck } diff -Nru s390-tools-2.31.0/rust/pv/src/uvsecret/guest_secret.rs s390-tools-2.33.1/rust/pv/src/uvsecret/guest_secret.rs --- s390-tools-2.31.0/rust/pv/src/uvsecret/guest_secret.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/uvsecret/guest_secret.rs 2024-05-28 08:26:36.000000000 +0200 @@ -2,19 +2,25 @@ // // Copyright IBM Corp. 2023 -#[allow(unused_imports)] //used for more convenient docstring +#[allow(unused_imports)] // used for more convenient docstring use super::asrcb::AddSecretRequest; +use crate::assert_size; use crate::{ - request::{hash, openssl::MessageDigest, random_array, Secret}, + crypto::{hash, random_array}, + request::Confidential, Result, }; -use pv_core::for_pv::{ser_gsid, SECRET_ID_SIZE}; +use byteorder::BigEndian; +use openssl::hash::MessageDigest; +use pv_core::uv::{ListableSecretType, SecretId}; use serde::{Deserialize, Serialize}; -use std::convert::TryInto; +use std::{convert::TryInto, fmt::Display}; +use zerocopy::{AsBytes, U16, U32}; + +const ASSOC_SECRET_SIZE: usize = 32; -const SECRET_SIZE: usize = 32; /// A Secret to be added in [`AddSecretRequest`] -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] pub enum GuestSecret { /// No guest secret Null, @@ -24,12 +30,11 @@ Association { /// Name of the secret name: String, - #[serde(serialize_with = "ser_gsid", deserialize_with = "de_gsid")] /// SHA256 hash of [`GuestSecret::Association::name`] - id: [u8; SECRET_ID_SIZE], - /// Confidential actual assocuiation secret (32 bytes) + id: SecretId, + /// Confidential actual association secret (32 bytes) #[serde(skip)] - secret: Secret<[u8; SECRET_SIZE]>, + secret: Confidential<[u8; ASSOC_SECRET_SIZE]>, }, } @@ -44,9 +49,12 @@ /// This function will return an error if OpenSSL cannot create a hash. pub fn association(name: &str, secret: O) -> Result where - O: Into>, + O: Into>, { - let id = hash(MessageDigest::sha256(), name.as_bytes())?.to_vec(); + let id: [u8; SecretId::ID_SIZE] = hash(MessageDigest::sha256(), name.as_bytes())? + .to_vec() + .try_into() + .unwrap(); let secret = match secret.into() { Some(s) => s, None => random_array()?, @@ -54,38 +62,106 @@ Ok(GuestSecret::Association { name: name.to_string(), - id: id.try_into().unwrap(), + id: id.into(), secret: secret.into(), }) } + + /// Reference to the confidential data + pub(crate) fn confidential(&self) -> &[u8] { + match &self { + GuestSecret::Null => &[], + GuestSecret::Association { secret, .. } => secret.value().as_slice(), + } + } + + /// Creates the non-confidential part of the secret ad-hoc + pub(crate) fn auth(&self) -> SecretAuth { + match &self { + GuestSecret::Null => SecretAuth::Null, + // Panic: every non null secret type is listable -> no panic + listable => { + SecretAuth::Listable(ListableSecretHdr::from_guest_secret(listable).unwrap()) + } + } + } + + /// Returns the UV type ID + fn kind(&self) -> u16 { + match self { + // Null is not listable, but the ListableSecretType provides the type constant (1) + GuestSecret::Null => ListableSecretType::NULL, + GuestSecret::Association { .. } => ListableSecretType::ASSOCIATION, + } + } + + /// Size of the secret value + fn secret_len(&self) -> u32 { + match self { + GuestSecret::Null => 0, + GuestSecret::Association { secret, .. } => secret.value().len() as u32, + } + } + + /// Returns the ID of the secret type (if any) + fn id(&self) -> Option { + match self { + GuestSecret::Null => None, + GuestSecret::Association { id, .. } => Some(id.to_owned()), + } + } } -fn de_gsid<'de, D>(de: D) -> Result<[u8; 32], D::Error> -where - D: serde::Deserializer<'de>, -{ - struct FieldVisitor; - - impl<'de> serde::de::Visitor<'de> for FieldVisitor { - type Value = [u8; SECRET_ID_SIZE]; - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - formatter.write_str("a `32 bytes long hexstring` prepended with 0x") - } - fn visit_str(self, s: &str) -> Result - where - E: serde::de::Error, - { - if s.len() != SECRET_ID_SIZE * 2 + 2 { - return Err(serde::de::Error::invalid_length(s.len(), &self)); + +impl Display for GuestSecret { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GuestSecret::Null => write!(f, "Meta"), + gs => { + let kind: U16 = gs.kind().into(); + let st: ListableSecretType = kind.into(); + write!(f, "{st}") } - let nb = s.strip_prefix("0x").ok_or_else(|| { - serde::de::Error::invalid_value(serde::de::Unexpected::Str(s), &self) - })?; - crate::misc::parse_hex(nb) - .try_into() - .map_err(|_| serde::de::Error::invalid_value(serde::de::Unexpected::Str(s), &self)) } } - de.deserialize_identifier(FieldVisitor) +} + +#[derive(Debug)] +pub(crate) enum SecretAuth { + Null, + Listable(ListableSecretHdr), +} + +impl SecretAuth { + pub fn get(&self) -> &[u8] { + match self { + SecretAuth::Null => &[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + SecretAuth::Listable(h) => h.as_bytes(), + } + } +} + +#[repr(C)] +#[derive(Debug, AsBytes)] +pub(crate) struct ListableSecretHdr { + res0: u16, + kind: U16, + secret_len: U32, + res8: u64, + id: SecretId, +} +assert_size!(ListableSecretHdr, 0x30); + +impl ListableSecretHdr { + fn from_guest_secret(gs: &GuestSecret) -> Option { + let id = gs.id()?; + Some(Self { + res0: 0, + kind: gs.kind().into(), + secret_len: gs.secret_len().into(), + res8: 0, + id, + }) + } } #[cfg(test)] @@ -93,7 +169,6 @@ use super::*; use serde_test::{assert_tokens, Token}; - //todo test GuestSecret::association #[test] fn association() { let secret_value = [0x11; 32]; @@ -106,7 +181,7 @@ let secret = GuestSecret::association("association secret", secret_value).unwrap(); let exp = GuestSecret::Association { name, - id: exp_id, + id: exp_id.into(), secret: secret_value.into(), }; assert_eq!(secret, exp); @@ -121,7 +196,7 @@ ]; let asc = GuestSecret::Association { name: "test123".to_string(), - id, + id: id.into(), secret: [0; 32].into(), }; @@ -141,4 +216,29 @@ ], ); } + + #[test] + fn guest_secret_bin_null() { + let gs = GuestSecret::Null; + let gs_bytes = gs.auth(); + let exp = vec![0u8, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + + assert_eq!(exp, gs_bytes.get()); + assert_eq!(&Vec::::new(), gs.confidential()) + } + + #[test] + fn guest_secret_bin_ap() { + let gs = GuestSecret::Association { + name: "test".to_string(), + id: [1; 32].into(), + secret: [2; 32].into(), + }; + let gs_bytes_auth = gs.auth(); + let mut exp = vec![0u8, 0, 0, 2, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0]; + exp.extend([1; 32]); + + assert_eq!(exp, gs_bytes_auth.get()); + assert_eq!(&[2; 32], gs.confidential()); + } } diff -Nru s390-tools-2.31.0/rust/pv/src/uvsecret/user_data.rs s390-tools-2.33.1/rust/pv/src/uvsecret/user_data.rs --- s390-tools-2.31.0/rust/pv/src/uvsecret/user_data.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/uvsecret/user_data.rs 2024-05-28 08:26:36.000000000 +0200 @@ -1,23 +1,16 @@ +use crate::assert_size; use crate::{ crypto::{sign_msg, verify_signature}, req::BinReqValues, request::{ - openssl::{ - pkey::{PKey, Private}, - MessageDigest, - }, - uvsecret::{AddSecretRequest, AddSecretVersion}, + openssl::pkey::{HasParams, HasPublic, Id, PKey, PKeyRef, Private, Public}, RequestMagic, }, + secret::{AddSecretMagic, AddSecretRequest, AddSecretVersion, UserDataType}, Error, Result, }; -use openssl::{ - nid::Nid, - pkey::{HasParams, HasPublic, Id, PKeyRef, Public}, -}; -use pv_core::request::uvsecret::AddSecretMagic; -use pv_core::request::uvsecret::UserDataType; -use utils::assert_size; +use openssl::hash::MessageDigest; +use openssl::nid::Nid; use zerocopy::{AsBytes, BigEndian, FromBytes, FromZeroes, U16}; /// User data. @@ -119,11 +112,12 @@ let magic: AddSecretMagic = self.data_type().into(); magic.get() } + /// Creates new user data /// /// Verifies that the provided data + signature fits into 512 bytes /// - /// #Error + /// # Error /// An error is reported if the provided data and the signature would not fit into 512 bytes /// An error is reported if the key is not of type RSA (2048|3072) or EC(specp521r1) pub(super) fn new(sign_key: Option>, data: Vec) -> Result { @@ -252,7 +246,7 @@ /// /// Extracrted user-data if available /// -/// #Errors +/// # Errors /// /// returns an error if /// - No sound add-secret request presented @@ -271,7 +265,7 @@ return Err(Error::BinAsrcbInvVersion); } - //preventing the two lines after the truncate from panicking + // preventing the two lines after the truncate from panicking let req_len = req.len(); if asrcb.len() < req_len || req_len < AddSecretRequest::V1_USER_DATA_OFFS + UserData::USER_DATA_SIZE @@ -323,6 +317,7 @@ /// Overwrites the signature in the buf with zeros. /// /// #Panics + /// /// Panics it provided buffer is smaller that 512 bytes or kind is Null or Unsigned fn new(buf: &mut [u8], kind: UserDataType) -> Self { assert!(buf.len() >= 0x200); @@ -359,7 +354,7 @@ UserDataType::Unsigned => unreachable!(), }; - //overwrite signature field with zeros + // overwrite signature field with zeros sgn.fill(0); ret } diff -Nru s390-tools-2.31.0/rust/pv/src/uvsecret/uvc.rs s390-tools-2.33.1/rust/pv/src/uvsecret/uvc.rs --- s390-tools-2.31.0/rust/pv/src/uvsecret/uvc.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/uvsecret/uvc.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,123 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright IBM Corp. 2023 - -use crate::{ - requires_feat, - uv::{uv_ioctl, UvCmd, UvDevice}, - Error, Result, PAGESIZE, -}; -use pv_core::request::{uvsecret::AddSecretMagic, MagicValue}; -use std::io::Read; -use std::usize; - -/// _List Secrets_ Ultravisor command. -/// -/// The List Secrets Ultravisor call is used to list the -/// secrets that are in the secret store for the current SE-guest. -/// -#[doc = requires_feat!(uvsecret)] -pub struct ListCmd(Vec); -impl ListCmd { - fn with_size(size: usize) -> Self { - Self(vec![0; size]) - } -} - -impl Default for ListCmd { - fn default() -> Self { - Self::with_size(PAGESIZE) - } -} - -impl UvCmd for ListCmd { - fn data(&mut self) -> Option<&mut [u8]> { - Some(self.0.as_mut_slice()) - } - - fn cmd(&self) -> u64 { - uv_ioctl(UvDevice::LIST_SECRET_NR) - } - - fn rc_fmt(&self, _rc: u16, _rrc: u16) -> Option<&'static str> { - None - } -} - -/// _Add Secret_ Ultravisor command. -/// -/// The Add Secret Ultravisor-call is used to add a secret -/// to the secret store for the current SE-guest. -/// -#[doc = requires_feat!(uvsecret)] -pub struct AddCmd(Vec); - -impl AddCmd { - /// Create a new Add Secret command using the provided data. - /// - /// # Errors - /// - /// This function will return an error if the provided data does not start with the - /// ['crate::AddSecretRequest'] magic Value. - pub fn new(bin_add_secret_req: &mut R) -> Result { - let mut data = Vec::with_capacity(PAGESIZE); - bin_add_secret_req.read_to_end(&mut data)?; - - if !AddSecretMagic::starts_with_magic(&data) { - return Err(Error::NoAsrcb); - } - Ok(Self(data)) - } -} - -impl UvCmd for AddCmd { - fn data(&mut self) -> Option<&mut [u8]> { - Some(&mut self.0) - } - - fn cmd(&self) -> u64 { - uv_ioctl(UvDevice::ADD_SECRET_NR) - } - - fn rc_fmt(&self, rc: u16, _rrc: u16) -> Option<&'static str> { - match rc { - 0x0101 => Some("not allowed to modify the secret store"), - 0x0102 => Some("secret store locked"), - 0x0103 => Some("access exception when accessing request control block"), - 0x0104 => Some("unsupported add secret version"), - 0x0105 => Some("invalid request size"), - 0x0106 => Some("invalid number of host-keys"), - 0x0107 => Some("unsupported flags specified"), - 0x0108 => Some("unable to decrypt the request"), - 0x0109 => Some("unsupported secret provided"), - 0x010a => Some("invalid length for the specified secret"), - 0x010b => Some("secret store full"), - 0x010c => Some("unable to add secret"), - 0x010d => Some("dump in progress, try again later"), - _ => None, - } - } -} - -/// _Lock Secret Store_ Ultravisor command. -/// -/// The Lock Secret Store Ultravisor-call is used to block -/// all changes to the secret store. Upon successful -/// completion of a Lock Secret Store Ultravisor-call, any -/// request to modify the secret store will fail. -/// -#[doc = requires_feat!(uvsecret)] -pub struct LockCmd; -impl UvCmd for LockCmd { - fn cmd(&self) -> u64 { - uv_ioctl(UvDevice::LOCK_SECRET_NR) - } - - fn rc_fmt(&self, rc: u16, _rrc: u16) -> Option<&'static str> { - match rc { - 0x0101 => Some("not allowed to modify the secret store"), - 0x0102 => Some("secret store already locked"), - _ => None, - } - } -} diff -Nru s390-tools-2.31.0/rust/pv/src/verify/helper.rs s390-tools-2.33.1/rust/pv/src/verify/helper.rs --- s390-tools-2.31.0/rust/pv/src/verify/helper.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/verify/helper.rs 2024-05-28 08:26:36.000000000 +0200 @@ -3,43 +3,38 @@ // Copyright IBM Corp. 2023 use crate::error::bail_hkd_verify; -use crate::misc::read_crls; +use crate::openssl_extensions::{AkidCheckResult, AkidExtension}; use crate::HkdVerifyErrorType::*; use crate::{Error, Result}; -use curl::easy::{Easy2, Handler, WriteError}; use log::debug; use openssl::{ asn1::{Asn1Time, Asn1TimeRef}, error::ErrorStack, nid::Nid, ssl::SslFiletype, - stack::{Stack, Stackable}, + stack::Stack, x509::{ - store::{File, X509Lookup, X509StoreBuilder, X509StoreBuilderRef, X509StoreRef}, + store::{File, X509Lookup, X509StoreBuilder, X509StoreRef}, verify::{X509VerifyFlags, X509VerifyParam}, - X509Crl, X509CrlRef, X509Name, X509NameRef, X509PurposeId, X509Ref, X509StoreContext, + X509CrlRef, X509Name, X509NameRef, X509PurposeId, X509Ref, X509StoreContext, X509StoreContextRef, X509VerifyResult, X509, }, }; -use openssl_extensions::{ - akid::{AkidCheckResult, AkidExtension}, - crl::X509StoreExtension, -}; -use std::{cmp::Ordering, ffi::c_int, time::Duration, usize}; +use std::path::Path; +use std::str::from_utf8; +use std::{cmp::Ordering, ffi::c_int}; /// Minimum security level for the keys/certificates used to establish a chain of /// trust (see https://www.openssl.org/docs/man1.1.1/man3/X509_VERIFY_PARAM_set_auth_level.html /// for details). -/// const SECURITY_LEVEL: usize = 2; const SECURITY_BITS_ARRAY: [u32; 6] = [0, 80, 112, 128, 192, 256]; const SECURITY_BITS: u32 = SECURITY_BITS_ARRAY[SECURITY_LEVEL]; const SECURITY_CHAIN_MAX_LEN: c_int = 2; -/// verifies that the HKD +/// Verifies that the HKD /// * has enough security bits /// * is inside its validity period -/// * issuer name is the subject name of the [`sign_key`] /// * the Authority Key ID matches the Signing Key ID of the [`sign_key`] pub fn verify_hkd_options(hkd: &X509Ref, sign_key: &X509Ref) -> Result<()> { let hk_pkey = hkd.public_key()?; @@ -48,14 +43,11 @@ if SECURITY_BITS > 0 && SECURITY_BITS > security_bits { return Err(Error::HkdVerify(SecurityBits(security_bits, SECURITY_BITS))); } - // TODO rust-openssl fix X509::not.after/before() impl to return Option& not panic on nullptr from C? - //try_... rust-openssl - // verify that the hkd is still valid + // TODO rust-openssl fix X509::not.after/before() impl to return Option& not panic on nullptr + // from C? try_... rust-openssl + // verify that the HKD is still valid check_validity_period(hkd.not_before(), hkd.not_after())?; - // check if hkd.issuer_name == issuer.subject - check_x509_name_equal(sign_key.subject_name(), hkd.issuer_name())?; - // verify that the AKID of the hkd matches the SKID of the issuer if let Some(akid) = hkd.akid() { if akid.check(sign_key) != AkidCheckResult::OK { @@ -75,9 +67,6 @@ return None; } } - - check_x509_name_equal(crl.issuer_name(), issuer.subject_name()).ok()?; - match crl.verify(issuer.public_key().ok()?.as_ref()).ok()? { true => Some(()), false => None, @@ -85,10 +74,10 @@ } /// Setup the x509Store such that it can be used it for verifying certificates -pub fn store_setup( - root_ca_path: &Option, - crl_paths: &[String], - cert_w_crl_paths: &[String], +pub fn store_setup, Q: AsRef, R: AsRef>( + root_ca_path: Option

, + crl_paths: &[Q], + cert_w_crl_paths: &[R], ) -> Result { let mut x509store = X509StoreBuilder::new()?; @@ -99,7 +88,7 @@ for crl in crl_paths { load_crl_to_store(&mut x509store, crl, true).map_err(|source| Error::X509Load { - path: crl.to_owned(), + path: crl.as_ref().into(), ty: Error::CRL, source, })?; @@ -107,7 +96,7 @@ for crl in cert_w_crl_paths { load_crl_to_store(&mut x509store, crl, false).map_err(|source| Error::X509Load { - path: crl.to_owned(), + path: crl.as_ref().into(), ty: Error::CRL, source, })?; @@ -192,22 +181,12 @@ Ok((ibm_z_sign_key, chain)) } -/// for all certs load the first CRL specified into our store -pub fn download_crls_into_store(store: &mut X509StoreBuilderRef, crts: &[X509]) -> Result<()> { - for crt in crts { - debug!("Download crls for {crt:?}"); - if let Some(crl) = download_first_crl_from_x509(crt)? { - crl.iter().try_for_each(|c| store.add_crl(c))?; - } - } - Ok(()) -} - // Name Entry values of an IBM Z key signing cert -//Asn1StringRef::as_slice aka ASN1_STRING_get0_data gives a string without \0 delimiter +// Asn1StringRef::as_slice aka ASN1_STRING_get0_data gives a string without \0 delimiter const IBM_Z_COMMON_NAME: &[u8; 43usize] = b"International Business Machines Corporation"; const IBM_Z_COUNTRY_NAME: &[u8; 2usize] = b"US"; -const IBM_Z_LOCALITY_NAME: &[u8; 12usize] = b"Poughkeepsie"; +const IBM_Z_LOCALITY_NAME_POUGHKEEPSIE: &[u8; 12usize] = b"Poughkeepsie"; +const IBM_Z_LOCALITY_NAME_ARMONK: &[u8; 6usize] = b"Armonk"; const IBM_Z_ORGANIZATIONAL_UNIT_NAME_SUFFIX: &str = "Key Signing Service"; const IBM_Z_ORGANIZATION_NAME: &[u8; 43usize] = b"International Business Machines Corporation"; const IBM_Z_STATE: &[u8; 8usize] = b"New York"; @@ -226,7 +205,8 @@ if subj.entries().count() != IMB_Z_ENTRY_COUNT || !name_data_eq(subj, Nid::COUNTRYNAME, IBM_Z_COUNTRY_NAME) || !name_data_eq(subj, Nid::STATEORPROVINCENAME, IBM_Z_STATE) - || !name_data_eq(subj, Nid::LOCALITYNAME, IBM_Z_LOCALITY_NAME) + || !(name_data_eq(subj, Nid::LOCALITYNAME, IBM_Z_LOCALITY_NAME_POUGHKEEPSIE) + || name_data_eq(subj, Nid::LOCALITYNAME, IBM_Z_LOCALITY_NAME_ARMONK)) || !name_data_eq(subj, Nid::ORGANIZATIONNAME, IBM_Z_ORGANIZATION_NAME) || !name_data_eq(subj, Nid::COMMONNAME, IBM_Z_COMMON_NAME) { @@ -255,35 +235,35 @@ } } -fn load_root_ca(path: &str, x509_store: &mut X509StoreBuilder) -> Result<()> { +fn load_root_ca>(path: P, x509_store: &mut X509StoreBuilder) -> Result<()> { let lu = x509_store.add_lookup(X509Lookup::::file())?; // Try to load cert as PEM file - match lu.load_cert_file(path, SslFiletype::PEM) { + match lu.load_cert_file(&path, SslFiletype::PEM) { Ok(_) => lu - .load_crl_file(path, SslFiletype::PEM) + .load_crl_file(&path, SslFiletype::PEM) .map(|_| ()) .or(Ok(())), // Not a PEM file? try ASN1 Err(_) => lu - .load_cert_file(path, SslFiletype::ASN1) + .load_cert_file(&path, SslFiletype::ASN1) .map(|_| ()) .map_err(|source| Error::X509Load { - path: path.to_string(), + path: path.as_ref().into(), ty: Error::CERT, source, }), } } -fn load_crl_to_store( +fn load_crl_to_store>( x509_store: &mut X509StoreBuilder, - path: &str, + path: P, err_out_empty_crl: bool, -) -> std::result::Result<(), openssl::error::ErrorStack> { +) -> std::result::Result<(), ErrorStack> { let lu = x509_store.add_lookup(X509Lookup::::file())?; // Try to load cert as PEM file - if lu.load_crl_file(path, SslFiletype::PEM).is_err() { + if lu.load_crl_file(&path, SslFiletype::PEM).is_err() { // Not a PEM file? try read as ASN1 let res = lu.load_crl_file(path, SslFiletype::ASN1); if err_out_empty_crl { @@ -293,7 +273,7 @@ Ok(()) } -///Run through the forest of the distribution points and find them +/// Run through the forest of the distribution points and find them pub fn x509_dist_points(cert: &X509Ref) -> Vec { let mut res = Vec::::with_capacity(1); let dps = match cert.crl_distribution_points() { @@ -319,14 +299,17 @@ res } -const CRL_TIMEOUT_MAX: Duration = Duration::from_secs(3); - /// Searches for CRL Distribution points and downloads the CRL. Stops after the first successful /// download. /// -/// Error if sth bad(=unexpected) happens (not bad: crl not available at link, unexpected format) -/// Other issues are mapped to Ok(None) -pub fn download_first_crl_from_x509(cert: &X509Ref) -> Result>> { +/// Error if something bad(=unexpected) happens (not bad: CRL not available at link, unexpected +/// format) Other issues are mapped to Ok(None) +#[cfg(not(test))] +pub fn download_first_crl_from_x509(cert: &X509Ref) -> Result>> { + use crate::utils::read_crls; + use curl::easy::{Easy2, Handler, WriteError}; + use std::time::Duration; + const CRL_TIMEOUT_MAX: Duration = Duration::from_secs(3); struct Buf(Vec); impl Handler for Buf { @@ -367,24 +350,6 @@ } } -fn check_x509_name_equal(lhs: &X509NameRef, rhs: &X509NameRef) -> Result<()> { - if lhs.entries().count() != rhs.entries().count() { - bail_hkd_verify!(IssuerMismatch); - } - - for l in lhs.entries() { - // search for the matching value in the rhs names - // found none? -> names are not equal - if !rhs - .entries() - .any(|r| l.data().as_slice() == r.data().as_slice()) - { - bail_hkd_verify!(IssuerMismatch); - } - } - Ok(()) -} - const NIDS_CORRECT_ORDER: [Nid; 6] = [ Nid::COUNTRYNAME, Nid::ORGANIZATIONNAME, @@ -393,10 +358,8 @@ Nid::STATEORPROVINCENAME, Nid::COMMONNAME, ]; -/** - * Workaround to fix the mismatch between issuer name of the - * IBM Z signing CRLs and the IBM Z signing key subject name. - */ +/// Workaround to fix the mismatch between issuer name of the +/// IBM Z signing CRLs and the IBM Z signing key subject name. pub fn reorder_x509_names(subject: &X509NameRef) -> std::result::Result { let mut correct_subj = X509Name::builder()?; for nid in NIDS_CORRECT_ORDER { @@ -407,21 +370,32 @@ Ok(correct_subj.build()) } -pub fn stack_err_hlp( - e: ErrorStack, -) -> std::result::Result, openssl::error::ErrorStack> { - match e.errors().len() { - 0 => Stack::::new(), - _ => Err(e), +/// Workaround for potential locality mismatches between CRLs and Certs +/// # Return +/// fixed subject or none if locality was not Armonk or any OpenSSL error +pub fn armonk_locality_fixup(subject: &X509NameRef) -> Option { + if !name_data_eq(subject, Nid::LOCALITYNAME, IBM_Z_LOCALITY_NAME_ARMONK) { + return None; + } + + let mut ret = X509Name::builder().ok()?; + for entry in subject.entries() { + match entry.object().nid() { + nid @ Nid::LOCALITYNAME => ret + .append_entry_by_nid(nid, from_utf8(IBM_Z_LOCALITY_NAME_POUGHKEEPSIE).ok()?) + .ok()?, + _ => { + ret.append_entry(entry).ok()?; + } + } } + Some(ret.build()) } #[cfg(test)] /// tests for some private functions mod test { - use openssl_extensions::x509_crl_eq; - use super::*; use crate::test_utils::*; use std::time::{Duration, SystemTime}; @@ -452,20 +426,6 @@ } #[test] - fn x509_name_equal() { - let sign_crt = load_gen_cert("ibm.crt"); - let hkd = load_gen_cert("host.crt"); - let other = load_gen_cert("inter_ca.crt"); - - assert!(super::check_x509_name_equal(sign_crt.subject_name(), hkd.issuer_name()).is_ok(),); - - assert!(matches!( - super::check_x509_name_equal(other.subject_name(), hkd.subject_name()), - Err(Error::HkdVerify(IssuerMismatch)) - )); - } - - #[test] fn is_ibm_z_sign_key() { let ibm_crt = load_gen_cert("ibm.crt"); let no_ibm_crt = load_gen_cert("inter_ca.crt"); @@ -497,20 +457,4 @@ )); assert!(super::get_ibm_z_sign_key(&[ibm_crt, no_sign_crt]).is_ok(),); } - - #[test] - fn download_first_crl_from_x509() { - let ibm_crt = load_gen_cert("ibm.crt"); - let inter_crl = load_gen_crl("inter_ca.crl"); - let _m_inter = super::super::test::mock_endpt("inter_ca.crl"); - - let crl_d = super::download_first_crl_from_x509(&ibm_crt) - .unwrap() - .unwrap(); - assert_eq!(crl_d.len(), 1); - assert!(x509_crl_eq( - crl_d.first().unwrap().as_ref(), - inter_crl.as_ref() - )); - } } diff -Nru s390-tools-2.31.0/rust/pv/src/verify/test.rs s390-tools-2.33.1/rust/pv/src/verify/test.rs --- s390-tools-2.31.0/rust/pv/src/verify/test.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/verify/test.rs 2024-05-28 08:26:36.000000000 +0200 @@ -5,71 +5,62 @@ #![cfg(test)] use super::{helper, helper::*, *}; -use crate::{Error, HkdVerifyErrorType::*}; -use core::slice; -use once_cell::sync::OnceCell; -use openssl::stack::Stack; -use std::sync::Mutex; +use crate::{utils::read_crls, Error, HkdVerifyErrorType::*}; +use openssl::{stack::Stack, x509::X509Crl}; +use std::path::Path; use crate::test_utils::*; -pub fn mock_endpt(res: &str) -> mockito::Mock { - static MOCK_SERVER: OnceCell> = OnceCell::new(); - - let res_path = get_cert_asset_path(res); - - MOCK_SERVER - .get_or_init(|| mockito::Server::new_with_port(1234).into()) - .lock() - .expect("COULD NOT GET THE MOCK_SERVER LOCK") - .mock("GET", format!("/crl/{res}").as_str()) - .with_header("content-type", "application/pkix-crl") - .with_body_from_file(res_path) - .create() -} - -#[test] -fn mockito_server_available() { - let _mock = mock_endpt("ibm.crt"); +// Mock function +pub fn download_first_crl_from_x509(cert: &X509Ref) -> Result>> { + fn mock_download>(path: P) -> Result> { + read_crls(std::fs::read(path)?) + } + + for dist_point in x509_dist_points(cert) { + { + let path = get_cert_asset_path(&dist_point); + let crls = if let Ok(buf) = mock_download(&path) { + buf + } else { + continue; + }; + return Ok(Some(crls)); + } + } + Ok(None) } #[test] fn store_setup() { - let ibm_str = get_cert_asset_path_string("ibm.crt"); - let inter_str = get_cert_asset_path_string("inter.crt"); + let ibm_path = get_cert_asset_path("ibm.crt"); + let inter_path = get_cert_asset_path("inter.crt"); + let crls: [String; 0] = []; - let store = helper::store_setup(&None, &[], &[ibm_str, inter_str]); + let store = helper::store_setup(None::, &crls, &[&ibm_path, &inter_path]); assert!(store.is_ok()); } #[test] fn verify_chain_online() { - let ibm_crt = load_gen_cert("ibm.crt"); - let inter_crt = load_gen_cert("inter_ca.crt"); - let root_crt = get_cert_asset_path_string("root_ca.chained.crt"); - - let mock_inter = mock_endpt("inter_ca.crl"); + let ibm_crt = get_cert_asset_path("ibm.crt"); + let inter_crt = get_cert_asset_path("inter_ca.crt"); + let root_crt = get_cert_asset_path("root_ca.chained.crt"); + let crls: [String; 0] = []; - let mut store = helper::store_setup(&Some(root_crt), &[], &[]).unwrap(); - download_crls_into_store(&mut store, slice::from_ref(&ibm_crt)).unwrap(); - let store = store.build(); - - mock_inter.assert(); - - let mut sk = Stack::::new().unwrap(); - sk.push(inter_crt).unwrap(); - verify_chain(&store, &sk, &[ibm_crt.clone()]).unwrap(); - assert!(verify_chain(&store, &sk, &[ibm_crt]).is_ok()); + let ret = CertVerifier::new(&[&ibm_crt, &inter_crt], &crls, Some(&root_crt), false); + assert!(ret.is_ok(), "CertVerifier::new failed: {ret:?}"); } #[test] fn verify_chain_offline() { let ibm_crt = load_gen_cert("ibm.crt"); - let inter_crl = get_cert_asset_path_string("inter_ca.crl"); + let inter_crl = get_cert_asset_path("inter_ca.crl"); let inter_crt = load_gen_cert("inter_ca.crt"); - let root_crt = get_cert_asset_path_string("root_ca.chained.crt"); + let root_crt = get_cert_asset_path("root_ca.chained.crt"); + let certs: [String; 0] = []; - let store = helper::store_setup(&Some(root_crt), &[inter_crl], &[]) + let store = helper::store_setup(Some(&root_crt), &[&inter_crl], &certs) .unwrap() .build(); @@ -79,41 +70,38 @@ } #[test] -fn verify_online() { - let root_crt = get_cert_asset_path_string("root_ca.chained.crt"); - let inter_crt = get_cert_asset_path_string("inter_ca.crt"); - let ibm_crt = get_cert_asset_path_string("ibm.crt"); +fn dist_points() { + let crt = load_gen_cert("ibm.crt"); + let res = x509_dist_points(&crt); + let exp = vec!["inter_ca.crl"]; + assert_eq!(res, exp); +} + +fn verify(offline: bool, ibm_crt: &'static str, ibm_crl: &'static str, hkd: &'static str) { + let root_crt = get_cert_asset_path("root_ca.chained.crt"); + let inter_crt = get_cert_asset_path("inter_ca.crt"); + let inter_crl = get_cert_asset_path("inter_ca.crl"); + let ibm_crt = get_cert_asset_path(ibm_crt); + let ibm_crl = get_cert_asset_path(ibm_crl); let hkd_revoked = load_gen_cert("host_rev.crt"); - let hkd_inv = load_gen_cert("host_invalid_signing_key.crt"); let hkd_exp = load_gen_cert("host_crt_expired.crt"); - let hkd = load_gen_cert("host.crt"); - - let mock_inter = mock_endpt("inter_ca.crl"); - let mock_ibm = mock_endpt("ibm.crl"); + let hkd = load_gen_cert(hkd); - let inter_crl = get_cert_asset_path_string("inter_ca.crl"); - let ibm_crl = get_cert_asset_path_string("ibm.crl"); + let crls = [&ibm_crl, &inter_crl]; let verifier = CertVerifier::new( - &[ibm_crt, inter_crt], - &[ibm_crl, inter_crl], - &Some(root_crt), - false, + &[&ibm_crt, &inter_crt], + if offline { &crls } else { &[] }, + Some(&root_crt), + offline, ) .unwrap(); - mock_inter.assert(); - - verifier.verify(&hkd).unwrap(); + let res = verifier.verify(&hkd); + assert!(res.is_ok(), "Verify failed: res: {res:?}"); - mock_ibm.assert(); assert!(matches!( verifier.verify(&hkd_revoked), - Err(Error::HkdVerify(HdkRevoked)) - )); - - assert!(matches!( - verifier.verify(&hkd_inv), - Err(Error::HkdVerify(IssuerMismatch)) + Err(Error::HkdVerify(HkdRevoked)) )); assert!(matches!( @@ -123,46 +111,41 @@ } #[test] +fn verify_online() { + verify(false, "ibm.crt", "ibm.crl", "host.crt") +} + +#[test] fn verify_offline() { - let root_crt = get_cert_asset_path_string("root_ca.chained.crt"); - let inter_crt = get_cert_asset_path_string("inter_ca.crt"); - let inter_crl = get_cert_asset_path_string("inter_ca.crl"); - let ibm_crt = get_cert_asset_path_string("ibm.crt"); - let ibm_crl = get_cert_asset_path_string("ibm.crl"); - let hkd_revoked = load_gen_cert("host_rev.crt"); - let hkd_inv = load_gen_cert("host_invalid_signing_key.crt"); - let hkd_exp = load_gen_cert("host_crt_expired.crt"); - let hkd = load_gen_cert("host.crt"); + verify(true, "ibm.crt", "ibm.crl", "host.crt") +} - let verifier = CertVerifier::new( - &[ibm_crt, inter_crt], - &[ibm_crl, inter_crl], - &Some(root_crt), - true, - ) - .unwrap(); +#[test] +fn verify_armonk_crt_online() { + verify(false, "ibm_armonk.crt", "ibm.crl", "host.crt") +} - verifier.verify(&hkd).unwrap(); - assert!(matches!( - verifier.verify(&hkd_revoked), - Err(Error::HkdVerify(HdkRevoked)) - )); +#[test] +fn verify_armonk_crt_offline() { + verify(true, "ibm_armonk.crt", "ibm.crl", "host.crt") +} - assert!(matches!( - verifier.verify(&hkd_inv), - Err(Error::HkdVerify(IssuerMismatch)) - )); +#[test] +fn verify_armonk_crl_online() { + verify(false, "ibm_armonk.crt", "ibm_armonk.crl", "host.crt") +} - assert!(matches!( - verifier.verify(&hkd_exp), - Err(Error::HkdVerify(AfterValidity)) - )); +#[test] +fn verify_armonk_crl_offline() { + verify(true, "ibm_armonk.crt", "ibm_armonk.crl", "host.crt") } #[test] -fn dist_points() { - let crt = load_gen_cert("ibm.crt"); - let res = x509_dist_points(&crt); - let exp = vec!["http://127.0.0.1:1234/crl/inter_ca.crl"]; - assert_eq!(res, exp); +fn verify_armonk_hkd_online() { + verify(false, "ibm_armonk.crt", "ibm_armonk.crl", "host_armonk.crt") +} + +#[test] +fn verify_armonk_hkd_offline() { + verify(true, "ibm_armonk.crt", "ibm_armonk.crl", "host_armonk.crt") } diff -Nru s390-tools-2.31.0/rust/pv/src/verify.rs s390-tools-2.33.1/rust/pv/src/verify.rs --- s390-tools-2.31.0/rust/pv/src/verify.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/src/verify.rs 2024-05-28 08:26:36.000000000 +0200 @@ -2,13 +2,19 @@ // // Copyright IBM Corp. 2023 +use crate::openssl_extensions::{StackableX509Crl, X509StoreContextExtension, X509StoreExtension}; use core::slice; -use log::debug; +use log::{debug, trace}; +use openssl::error::ErrorStack; use openssl::stack::Stack; use openssl::x509::store::X509Store; -use openssl::x509::{CrlStatus, X509Ref, X509StoreContext, X509}; -use openssl_extensions::crl::StackableX509Crl; -use openssl_extensions::crl::X509StoreContextExtension; +use openssl::x509::{CrlStatus, X509NameRef, X509Ref, X509StoreContext, X509StoreContextRef, X509}; +use std::path::Path; + +#[cfg(not(test))] +use helper::download_first_crl_from_x509; +#[cfg(test)] +use test::download_first_crl_from_x509; use crate::error::bail_hkd_verify; use crate::misc::{read_certs, read_file}; @@ -23,14 +29,15 @@ pub trait HkdVerifier { /// Checks if the given host-key document can be trusted. /// - /// #Errors + /// # Errors /// - /// This function will return an error if the Hostkey cannot be trusted. - /// Refer to the concrete Error type for the specific reason. + /// This function will return an error if the host-key document cannot be + /// trusted. Refer to the concrete Error type for the specific reason. fn verify(&self, hkd: &X509Ref) -> Result<()>; } -/// A "verifier" that does not verify and accepts all given host-keys as valid. +/// A verifier that does not verify and accepts all given host-keys as valid. +#[derive(Debug)] pub struct NoVerifyHkd; impl HkdVerifier for NoVerifyHkd { fn verify(&self, _hkd: &X509Ref) -> Result<()> { @@ -38,7 +45,7 @@ } } -/// A Verifier that checks the host-key document against a chain of trust. +/// A verifier that checks the host-key document against a chain of trust. pub struct CertVerifier { store: X509Store, ibm_z_sign_key: X509, @@ -62,15 +69,15 @@ fn verify(&self, hkd: &X509Ref) -> Result<()> { helper::verify_hkd_options(hkd, &self.ibm_z_sign_key)?; - // verify that the hkd was signed with the key of the IBM signing key + // verify that the HKD was signed with the key of the IBM signing key if !hkd.verify(self.ibm_z_sign_key.public_key()?.as_ref())? { bail_hkd_verify!(Signature); } - // Find matching crl for sign key in the store or download them + // Find matching CRL for sign key in the store or download them let crls = self.hkd_crls(hkd)?; - // Verify that the CLRs are still valid + // Verify that the CRLs are still valid let mut verified_crls = Vec::with_capacity(crls.len()); for crl in &crls { if helper::verify_crl(crl, &self.ibm_z_sign_key).is_some() { @@ -78,14 +85,14 @@ } } - // Test if hkd was revoked (min1 required) + // Test if HKD was revoked (min1 required) if verified_crls.is_empty() { bail_hkd_verify!(NoCrl); } - for crl in &verified_crls { - match crl.get_by_cert(&hkd.to_owned()) { + for crl in verified_crls { + match crl.get_by_serial(hkd.serial_number()) { CrlStatus::NotRevoked => (), - _ => bail_hkd_verify!(HdkRevoked), + _ => bail_hkd_verify!(HkdRevoked), } } debug!("HKD: verified"); @@ -94,26 +101,59 @@ } impl CertVerifier { - ///Download the CLRs that a HKD refers to. + fn quirk_crls( + ctx: &mut X509StoreContextRef, + subject: &X509NameRef, + ) -> Result, ErrorStack> { + match ctx.crls(subject) { + Ok(ret) if !ret.is_empty() => return Ok(ret), + _ => (), + } + + // Armonk/Poughkeepsie fixup + trace!("quirk_crls: Try Locality"); + if let Some(locality_subject) = helper::armonk_locality_fixup(subject) { + match ctx.crls(&locality_subject) { + Ok(ret) if !ret.is_empty() => return Ok(ret), + _ => (), + } + + // reorder + trace!("quirk_crls: Try Locality+Reorder"); + if let Ok(locality_ordered_subject) = helper::reorder_x509_names(&locality_subject) { + match ctx.crls(&locality_ordered_subject) { + Ok(ret) if !ret.is_empty() => return Ok(ret), + _ => (), + } + } + } + + // reorder unchanged locality subject + trace!("quirk_crls: Try Reorder"); + if let Ok(ordered_subject) = helper::reorder_x509_names(subject) { + match ctx.crls(&ordered_subject) { + Ok(ret) if !ret.is_empty() => return Ok(ret), + _ => (), + } + } + // nothing found, return empty stack + Stack::new() + } + + /// Download the CRLs that a HKD refers to. pub fn hkd_crls(&self, hkd: &X509Ref) -> Result> { let mut ctx = X509StoreContext::new()?; // Unfortunately we cannot use a dedicated function here and have to use a closure (E0434) // Otherwise, we cannot refer to self + // Search for local CRLs let mut crls = ctx.init_opt(&self.store, None, None, |ctx| { let subject = self.ibm_z_sign_key.subject_name(); - match ctx.crls(subject) { - Ok(crls) => Ok(crls), - _ => { - // reorder the name and try again - let broken_subj = helper::reorder_x509_names(subject)?; - ctx.crls(&broken_subj).or_else(helper::stack_err_hlp) - } - } + Self::quirk_crls(ctx, subject) })?; if !self.offline { // Try to download a CRL if defined in the HKD - if let Some(crl) = helper::download_first_crl_from_x509(hkd)? { + if let Some(crl) = download_first_crl_from_x509(hkd)? { crl.into_iter().try_for_each(|c| crls.push(c.into()))?; } } @@ -124,26 +164,36 @@ impl CertVerifier { /// Create a `CertVerifier`. /// - /// * `cert_paths` - Paths to Cerificates for the chain of trust + /// * `cert_paths` - Paths to certificates for the chain of trust /// * `crl_paths` - Paths to certificate revocation lists for the chain of trust /// * `root_ca_path` - Path to the root of trust /// * `offline` - if set to true the verification process will not try to download CRLs from the /// internet. + /// /// # Errors /// /// This function will return an error if the chain of trust could not be established. - pub fn new( - cert_paths: &[String], - crl_paths: &[String], - root_ca_path: &Option, + pub fn new( + cert_paths: &[P], + crl_paths: &[Q], + root_ca_path: Option, offline: bool, - ) -> Result { + ) -> Result + where + P: AsRef, + Q: AsRef, + R: AsRef, + { let mut store = helper::store_setup(root_ca_path, crl_paths, cert_paths)?; let mut untr_certs = Vec::with_capacity(cert_paths.len()); for path in cert_paths { let mut crt = read_certs(&read_file(path, "certificate")?)?; if !offline { - helper::download_crls_into_store(&mut store, &crt)?; + for c in &crt { + if let Some(crl) = download_first_crl_from_x509(c)? { + crl.iter().try_for_each(|c| store.add_crl(c))?; + } + } } untr_certs.append(&mut crt); } diff -Nru s390-tools-2.31.0/rust/pv/tests/add_secret_request.rs s390-tools-2.33.1/rust/pv/tests/add_secret_request.rs --- s390-tools-2.31.0/rust/pv/tests/add_secret_request.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/add_secret_request.rs 2024-05-28 08:26:36.000000000 +0200 @@ -6,16 +6,16 @@ nid::Nid, pkey::Private, }; -use pv::{ +use s390_pv::{ get_test_asset, request::{ openssl::pkey::{PKey, Public}, - uvsecret::{ - verify_asrcb_and_get_user_data, AddSecretFlags, AddSecretRequest, AddSecretVersion, - ExtSecret, GuestSecret, - }, BootHdrTags, ReqEncrCtx, Request, SymKey, }, + secret::{ + verify_asrcb_and_get_user_data, AddSecretFlags, AddSecretRequest, AddSecretVersion, + ExtSecret, GuestSecret, + }, test_utils::get_test_keys, uv::ConfigUid, Result, Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/tests/assets/cert/der.crl and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/tests/assets/cert/der.crl differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/tests/assets/cert/der.crt and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/tests/assets/cert/der.crt differ diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_host.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_host.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_host.crt 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_host.crt 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE/TCCAuWgAwIBAgIUWT/F3gP9fOTTq3yOMVskcXM8vOAwDQYJKoZIhvcNAQEN +BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu +ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs +IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y +azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl +eSBTaWduaW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUy +MzlaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp +bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h +bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv +cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw +EAYHKoZIzj0CAQYFK4EEACMDgYYABAAT1zpBNemPCT0tEx2S9qnlPxyBgxKl6BSX +ghqf3MfOqQKIUdKEbxP0nr2QtIR/MwvBp4YhjxZ9AZtzVtXbQrULTAGlKK4qcf1I +W1rYZ5S0b4dmvh2HbIv9wZLWKaZ/ovnvAJk949WDCbBzC3Dy1E3zCaYPefLHHNve +dWX8RtFUhnTqm6NxMG8wHQYDVR0fBBYwFDASoBCgDoYMZmFrZV9pYm0uY3J0MA4G +A1UdDwEB/wQEAwIDCDAfBgNVHSMEGDAWgBSQCOxEmGwiupA2ER7srMBydGO1aTAd +BgNVHQ4EFgQUUl5SPIEseFNeneyuOidsw06L7gQwDQYJKoZIhvcNAQENBQADggIB +AFAg/hFlaBT+WNlUmVJlAd6FYr6vvRjJiKxcNBZ5wElzxA5OjuGX6pLYiNkzXSy2 +N/4nQ3a8fr04IB9Uhx5ncMzSKVkG+4mbN3xmR7f6zZuFWV/T9Aom3LIbQ5KYR2wW +EvX8b2xbvd74rKAgavq/iuFRn8skQJGgQk9J2YEApOW9wkoFQRgziuu55Cw5GT+f +w9rKgAFTN33ZwfWs86ELJlDOY0aX5373WGccuEKm8y+l2UVLlly125eezz84RSh7 +5j0VUTjK4ZqpSD8yiPxN+vocV+nY52cgWxrVf7g7wlPunxXWX4rxR0z/mEVDDBAC +y7cAxsa4HtmJexuZbjEh8TEgDZXDA24BuFXqtkwqnUpv8KghyKb0KgTQecP9rRhL +b2iHdQrALnddMzJzp7Tn+jsR2A2G0lLLsIuIRFR/1eAxecgTMoxXV8N0SJHn6emH +Uwif1Qr5JVw4UOFSLW3MT3f70hY5hMzrxowOapcYhyR0vbhGFrF1YI8YmAuk2m4P +Dfd5Za5cnfVJiAdNBnNPtV/5wiViX1VJGCrIxgDN6B2VE0VQLp2gKwpsSmhqN4CA +qHzAi3Yj3F8vQ2mWQeuABWLAJrIoxXpVYQvTf4uij0ARZRbajQXx+LZnAUsCi5eZ +LaYrTVn8NzHlGQocUWljG06GaLY8GNqKPQrZD/+6hIYK +-----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_host.key s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_host.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_host.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_host.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,8 @@ +-----BEGIN PRIVATE KEY----- +MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIAXR6FCIgd+fjhO/WJ +KwLTP01mBDtYkWbVE71jshjLLpZHunRpsYKbQKCwaDmMYLRrQnTxPgZH1PVKzguI +7M7n9zyhgYkDgYYABAAT1zpBNemPCT0tEx2S9qnlPxyBgxKl6BSXghqf3MfOqQKI +UdKEbxP0nr2QtIR/MwvBp4YhjxZ9AZtzVtXbQrULTAGlKK4qcf1IW1rYZ5S0b4dm +vh2HbIv9wZLWKaZ/ovnvAJk949WDCbBzC3Dy1E3zCaYPefLHHNvedWX8RtFUhnTq +mw== +-----END PRIVATE KEY----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_ibm.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_ibm.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_ibm.crl 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_ibm.crl 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,20 @@ +-----BEGIN X509 CRL----- +MIIDVTCCAT0CAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll +MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTI0MDMx +MTE1NTIzOVoYDzIzODgxMjIzMTU1MjM5WjAVMBMCAgIrFw0yNDAzMjAxNTUyMzla +oCMwITAfBgNVHSMEGDAWgBSQCOxEmGwiupA2ER7srMBydGO1aTANBgkqhkiG9w0B +AQ0FAAOCAgEAsRE3oW/VAx2JPESuWDZVbKIX9n26BZUZ2mdmUocRmn7KQ6CAi0Ac +L5YRUvYt7kmGo6BSA6rUa0TMoMBtFzfIJ6HQZzEoA/LOkIKMfNHsFFzpIJxghPYJ +PfPkXoLpAOBcLDrYWr1bJ3mkvrd6Tuyx02wJEhVmTcF8W/18AFRPuEVg/u3vJmeQ +yMdwQZG42kEslvhCTO688vozYeX+dXO1/AXamzYQZyEWk2cBQ28DIc5eg39Tq77N +89xqWNK/FWKkF4USn9psiBJQpKEjq+P1jTgdpuN3IGv618hlxS455eGyJrbiTxjD +osWGShI+ZVznnqw98aX558hnNWHic+5JVvdJAwNaMMuNNkApFdz328z9dPtcUAZT +7mNcKAuY1NcqdxKTPwkGWfmZm+WndGwKLwpshBhF/ImiAb/UiDrLh0jGbQ/FDE3j +DtWs4k8eXIibtzGhgjfewdXO66jw5Z55FajcOPgja3uw2g2KEhJ+/VP1YtCvCpkm +NX0liKgKLzUGDCbzyrH7QD16T633ebLbak3CtNMMlxZRQO8DQDbDsEKybCG62f/w +OmM+QmL7lRFcXDbtVNxBSQKaDBDlNaEGEjM8phz166g+UMD7M6xJjmwJx/G/QvUd +98YQfMEIt8IlrIbIfHXnAlj4SJwWNfOw+SN0dCqD5CDEYzYVtZgrNXY= +-----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_ibm.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_ibm.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_ibm.crt 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_ibm.crt 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,38 @@ +-----BEGIN CERTIFICATE----- +MIIGrTCCBJWgAwIBAgIUMCRfNPXX7mCzJea1P3mCUDqZdB0wDQYJKoZIhvcNAQEL +BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu +ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs +IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y +azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 +MDMyMTE0NTIzOVoYDzIzODgxMjIzMTQ1MjM5WjCBzDELMAkGA1UEBhMCVVMxNDAy +BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp +b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y +cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMRUwEwYDVQQHDAxQb3VnaGtlZXBz +aWUxJzAlBgNVBAsMHklCTSBaIEhvc3QgS2V5IFNpZ25pbmcgU2VydmljZTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALIvcLAKt2DM5rTc1nOhyVT2EbNy +CA1Sy+l1mSloBRVqjyk464mvlqq9p/CDO1e6tWiFhdEFChuTYzzDOxHazLjPc2Y5 +U+r0QYYY8KK6jLQDj4crChPMJoJ0HZbmlWe/3uMl7lmrVMyHgqqOftQ52etBCGDn +d/RV7U4OC+MJ01ePYUcABxPh3APA+DDV5eZ2b1k74JzLvXa+SGA82MPDQYc70Waf +CcsfCw04Adnc968CEUbNQYxdfXZfL0Uvi44bohuaB5b07KY4cCetdMaBIuowgVsa +d3Zo3pI/vaj8nlAIVmYNbG4aVmItG2q+3K8Zt11A1WXMCfVBfzg4aHAmEPJzVD+s +u0maCTrRwRfKudHU+FFI36x4aqauNj0jAoQtuACYLS+69z6G0MKHd50jQXYiGwKP +LaTI+mWJ2+GBPMZpRFKyFlAXTFBFCIpZopAdfhgCMkbb212cARGD9N55xjx0F5u5 +kuXWhyF/0Zq7IdXdBlR9/0uc2I0z2P2RpQ1x1TExjqZoEt6WrpYuWPHtKqeLV1zu +PonIPZFsKXJDtxoJhvhk/Aivv0329faKxqtIlH1W7b2BrrkC1UYh4+w9f/CVG2tD +FRkXDEdgyknFsGarHSIGtrIr4vRGAh2p6a+7lNnb9GgpGshFIaOnXf1SXFLgdBwb +mEPx8pjT7Fnu01HxAgMBAAGjgZkwgZYwIQYDVR0fBBowGDAWoBSgEoYQZmFrZV9y +b290X2NhLmNybDAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUE +DDAKBggrBgEFBQcDAzAfBgNVHSMEGDAWgBQsIzqwsaKXaQyBeov7u8c7q8IEdzAd +BgNVHQ4EFgQUkAjsRJhsIrqQNhEe7KzAcnRjtWkwDQYJKoZIhvcNAQELBQADggIB +AL+JmoykUGjMygjv23qdStop7VI2ekieYBKJANlq1eEoICLW0yIw9B57pxR6iFpS +ClulV5vU4GvXlOAP7mTJJQWeuU5Z+sn9s+nLMkwZQj6QBpYwVru3SWx7H4XojRkk +bVhN7hTrfXfQAkTLuol9PHNrGiTNGKRultBg6votpMSyd2wWgli7nt0QUydhquEL +2kROaUlHRLo9cKttyy6MIypWlneGx9KC3X/UMoZgxCygpCDQycIp1oU53ZlHO4AZ +7WeJ/FjEM2PIy6EiBnhrfmRimDjghPM8c8OIMx7fVjc5yS7KnryZ8HuzIeO6CPL+ +/9+bJASuSAHmAZN3qKmiVUVfYhcbU+hKuCS2HRNkRhE/fZidp3K02OqSiRegiPRC +TIcIXJ/SZ8M345yjGekAJ/M7RGIoirbdLYmpOpvHDg0qMlKFsCdpqasivPFjtLpX +M1hvu9Ahz2HteKhm7WV9tOQ2lzMdqFDNTbxhhGzy7NJf+20zooTZCDwlNlo5JmKl +Bg/Tns0rE5ZN2j5iVvv32Bp5CrY8oy8Xq8NJitaEb4JQKSLIya++rot+OTnX7inc +LzsAwru4YgT+jGnHxEQslPcLMjuLdgZZGyXlKit+8KyslwaAb5Yv5e2rP90gYFFG +5ZxgKxI1NfJZXgPXjrnDv/FPJZe+agqlTJ0RYCJeSmCu +-----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_ibm.key s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_ibm.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_ibm.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_ibm.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCyL3CwCrdgzOa0 +3NZzoclU9hGzcggNUsvpdZkpaAUVao8pOOuJr5aqvafwgztXurVohYXRBQobk2M8 +wzsR2sy4z3NmOVPq9EGGGPCiuoy0A4+HKwoTzCaCdB2W5pVnv97jJe5Zq1TMh4Kq +jn7UOdnrQQhg53f0Ve1ODgvjCdNXj2FHAAcT4dwDwPgw1eXmdm9ZO+Ccy712vkhg +PNjDw0GHO9FmnwnLHwsNOAHZ3PevAhFGzUGMXX12Xy9FL4uOG6IbmgeW9OymOHAn +rXTGgSLqMIFbGnd2aN6SP72o/J5QCFZmDWxuGlZiLRtqvtyvGbddQNVlzAn1QX84 +OGhwJhDyc1Q/rLtJmgk60cEXyrnR1PhRSN+seGqmrjY9IwKELbgAmC0vuvc+htDC +h3edI0F2IhsCjy2kyPplidvhgTzGaURSshZQF0xQRQiKWaKQHX4YAjJG29tdnAER +g/TeecY8dBebuZLl1ochf9GauyHV3QZUff9LnNiNM9j9kaUNcdUxMY6maBLelq6W +Lljx7Sqni1dc7j6JyD2RbClyQ7caCYb4ZPwIr79N9vX2isarSJR9Vu29ga65AtVG +IePsPX/wlRtrQxUZFwxHYMpJxbBmqx0iBrayK+L0RgIdqemvu5TZ2/RoKRrIRSGj +p139UlxS4HQcG5hD8fKY0+xZ7tNR8QIDAQABAoICAA3bS5edFbqk5JIPFGxEmhwN +0L2UMhUbKblbiFAkgzSxpzVs9EAuU/iuLAezpONDJUVeENR64FjZot+ATTErwzzi +ARbjgWelnAzaFqiI+lUra7d2vN9iQmJltkFcaCT70lD2y6zUepE3Po2V7D4Dy7MU +SRsEFRt+rSgjRoBe7L0D7OwxD9vXdh0ingpqL+k2r50QX3zCCVdZH9bAFJlQr2Fd +YQDqgD/4B4t9HgO7v9Uvw3kukBji0lVovIvUUTV5Z6de2JVAMcERZYNoZUrkkvsE +T6LukXi3WgukglLIZmVR6KoiBBsh2DLlUBsCgbXqOYy6dH3omqOkdsi/9js2cp3O +6waEBmtrH4qmOzXOnmdchg5zNs5eKVwpsrMc04sS74Xr/CWlUPlMsbL4lV3sdAsW +jL/iJO/3VSUwPHPfIbQgt7AJrkP31ESDSGNN5ac4er/ltcmB1r2MI5kuPgWL2psh +ILNFViwarwzPFHLJyiAYSW44p7kzvltoPyOfNyO5ekgeJKuYUfZiNyKYRwrStO4X +FsukIbYLnPa3ZIb3C0RVBx+lTUU+eyGd/rt/rWFA3Lnvbx4r3FdcYj2DnRM1ukgK +1QnmqUbWpRQ2wFZsdOPtMva2AUOurzv7Fze83ZyaijAmLnphnpUjqPbj2RYVWzwv +R7FUHThkc+uzcWhcOnVbAoIBAQDXRKxscm8eurBSQwPx64Td7tmPqvkzANFG9Flv +GC1FnwP/UpKXkEQe2cMu4hvVAnS3awVAfPkRvyPZDJbL5jjvDdLtXbCtbnZEEriI +TqT2UU0cnRxUybVesU+cT95PAWiEEvxFTHxOlv2rtKx0fZrd/ft530V3U7UeH7fk +lZbpEGqmbZn3hGt2UNAXItwi6FT1OjGmOXfBVZUOUSOTDeEX8Dtx+H81ReXTRslA +QXRruHDZX7x8Tw8YWL5Cq0eCPv5+Bm2DxLkzk616C7icv3qrJQS7u/nijg+qGec4 +aKqFGy+Gvc12cGUt6FoWG+6zS+NAihawxind+V/JYnIt1ulLAoIBAQDT5oPaWE1b +Vkm2RqVscq5dnKT+ImmIyUfM7lC6Ff6tf7CyxPumHwrMRnqfas8L5KnvgVQi33rn +OJ5SXoTSJDvU42eqFXD4fAKpsIwbxYi+pY2RPH+PxYNtMvWTMmKZnrqVowFAUHyG +f6yTIHqIZcy4Ll8wrq+Z2ZNQZAMtllXHLgdlFxDcReGmWvhtEtF2yc36DalQPWuS +XvC4Zf1ja37Mslst42MdYNBi9QxE4CKVIaDWnF1Yh5xq8i0DIbJt37xpweXtkT9R +Z6Fefc+hx7A/A5i8bSK6QQsrdL3CokZHY2dbYqDSNlD+21QgEI7wK6ivkS7XkUD/ +fSVIRD7SWYgzAoIBAAoI4ny/rNxi2XtMMm/hibUKwEuJLcqp3BeRpmWeW+Xl7rrF +L54vFG3XutiBo8h/L8+pBnqmGLoyDcq9Yn4owjiqjU2RU34SKyMrODzqZZgx3AVc +cYimSnUakNp5gqRuLWASvn3Aff7v3O1XI77eaAy3HTTmKofQeB3qXpkiPAGrST1u +2IGIQ7YlOD4L2vUpnWQ9DTlxblqt0Z/0OlNNj1OdWDgM3GkwU/FQWGtNYc7vrxsC +8ndc/Bgnct8Kuu+gXh6j0BEXZ4a2+Jw61aVA68f0ls7liVV9R8+nG3cusdw4kzOV +v3Eo9h54uVJUhQEIpZRJm5sr8aGuUT/C/g2S+hcCggEAW91HtFUr1DkoY7lk5gsh +xLuwW+yXTBHW6uU2YjY+3wDInfgAERjMGZtEdfBcKo/LjGXJEAVKxwsouBT6CBBl +T/n2ayo4e8FndiFv3GpayiwDn79WngHG5IR/Kn1hea/yvASa+kLqeXTIYFBoTtGz +WvXflr9kqZJF50g0iILwVRWDZzQEvzochX5SzRancJQ0k/9wM7Us+ZvnSEoO/BcS +NbPtC4vU4FukfAI4e3OgCn81t1S6szK3gTXUhdMKA9BHYqIJCGE7zhLbRpfMeBqW +MfthL+8wawbfzMsjqUmopjJWEKxFhFy/6H01j2EeVsjWrKaIZDQ4tYqPqzDK+26N +OQKCAQAV5336ahvQ504soqkNpOiVplH0Ksl3r3/WuO1xr/Hq4A8YXbPFeSFkztxg +kjZABH6c7rV3/8NHJx86MAxXCAlLTIkR5aXmPrBI+EYm/fle9q/AbK+ppq3IUHVF +KXpost8YysCRlel2QrhkTln5PJd4oN2xoMWKPHEuc+mhxBMQdq30JtmlVRfm9JaN +ba3PoW9ecZG5eLP7pZi+AW+KfsaSg9U54jZToYVWeTnTzd7LD70hstun9E+FZAyh +lrTkZuq578ltOuUWwDgKOly0bCAO+q6KMdb7pbPxbtqtdNGbFpXaChLcKB8lwa+8 +vpiaRjhZtk5MLPJs9mTHq1STrzL9 +-----END PRIVATE KEY----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_inter_ca.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_inter_ca.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_inter_ca.crl 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_inter_ca.crl 2024-05-28 08:26:36.000000000 +0200 @@ -1,19 +1,20 @@ -----BEGIN X509 CRL----- -MIIDITCCAQkCAQEwDQYJKoZIhvcNAQELBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD +MIIDRjCCAS4CAQEwDQYJKoZIhvcNAQENBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD -VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTIzMDMxOTExMDQ0N1oYDzIzODcx -MjMxMTEwNDQ3WjAVMBMCAgG8Fw0yMzAzMjgxMTA0NDdaMA0GCSqGSIb3DQEBCwUA -A4ICAQA+KQrjx/6nKzPggDpKEAzH6XxhUU4CZmyUKFirUdenQBjYLag2Nono75o8 -9DVK3vuK7aeg4tIkUYOBcOEgYx0wEPU6PEE+yOO6KErn7qilN9gRnDHOTrfT0iNY -Rabyeat256gyfrsS84ZB7MnbhecWwC6sP2NiH18VCprH865X6sm5SxAfez1zITV3 -YVtudX4UczqbfpDgP4BU5ERMI71tqj4gKjaHFkC0TGizSphiINDKPmUMbd1w7FHg -Ilj+7pNZS377GCX9JzoTaKLuMBiblkwSTUJic7Z2BJZlTgm18hhfT3AZQDVvkq0A -AEPxQzm3be3ZUb+zJvueTeizVHkd3Eufnk69p7w4wNRQMwfj/icm27RZa3bBpF2o -esr2Ptik9ZBN81oMAakONZ4Wxuf0n/KBd6VBjy6WkbalKGVoZn70Nke3+9HGWSIh -bgbuHt7XAlvsgVChtZWsGsyxYw4p2ku4T2ajUfpxqQY1DDCAThweuHl2FND87Kr1 -5sblLLhA3QdjQ0EavsCV1646xorvoyw7YdHkqCPjRb1FsPWm/IePbtu+w9/VjDRF -KcHgBZBWmmQHj/9ykSI9pA5J7R7Nij6sX6Iu1g2yKiPnXeRQFiwhgsxslNk8eJfq -cK4c4HhnNtXa/c8jHcbymwqkF8Qltz0cbEW1usxZ2u6153pyPQ== +VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTI0MDMxMTE1NTIzOFoYDzIzODgx +MjIzMTU1MjM4WjAVMBMCAgG8Fw0yNDAzMjAxNTUyMzhaoCMwITAfBgNVHSMEGDAW +gBSMe9JFoed6PvsUVY3z4MU6KhnKCjANBgkqhkiG9w0BAQ0FAAOCAgEATjy3crOb +zDvF2CiBxmDMvOK8E/fhQ1BSUOB2qc/OiqK6zb1Q2MFvni945+Q+TwbHJqWBfU2z +QHuhOQHUQQNnIvMPXgjJuT32eF2Y7J4weaQOHNjrt1dwE6TPC1DLNUzibbjKr5x1 +/scmCiXWYxjeLvS7aTACNaD1UmUxyroK6h4oc968ofdPURK54sAiddY+VBA4KW4B +WlAjxGSErZk0DIg5SXakSQbTkz8+pLnrqqtmvwQXk92lyfrwsSiZhHlcfBrJ6+mp +1Mho9H5r97mO/LRRV+2CRShqTElxVORbW3Q2ku6RGJHEUB7AssIeKBtEma8yfUff +S2bX0P82ETUCT3CPZK29QJ8eaxxH+PaRnyiT/W8vWuo+BRw8XqQ2AsM22vVCPP61 +mg6VNsPJkJ8UaUF/S41aGMS21HNGk4Ik0iv0wiuu3q2rcFwGuuWB/gRUfSuPfFSO +K5sEf47dBPB5kMZCSEkX1RvHPWAYwd6g8GgVdlQEt/IIX4ChoASES5JNhDFar7tg +6h72VhfTtwEkGqg/z8vI1zP1Qzl2bZ773B/k/TAJ3j6N1JXiHUKioL1hAnA9glrj +3hnZQEQSewPf8opPe/l7UEQNXGlamYic5WRsm8xbN1KL6HARLqxkiC7K8SS6U99s +T7L2jh4egE5kyM4lLOFYfEugj4s6Soq6XuU= -----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_inter_ca.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_inter_ca.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_inter_ca.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_inter_ca.crt 2024-05-28 08:26:36.000000000 +0200 @@ -1,38 +1,37 @@ -----BEGIN CERTIFICATE----- -MIIGpjCCBI6gAwIBAgIUMp+RLATMshrQnbOfPkwKoDNyYhcwDQYJKoZIhvcNAQEL +MIIGjDCCBHSgAwIBAgIUKp3mjstxJ9gXt3S7lpKjri2dEUwwDQYJKoZIhvcNAQEL BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz -MDMyOTA5MDQ0N1oYDzIzODcxMjMxMDkwNDQ3WjCBvTELMAkGA1UEBhMCVVMxNDAy +azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 +MDMyMTE0NTIzOFoYDzIzODgxMjIzMTQ1MjM4WjCBvTELMAkGA1UEBhMCVVMxNDAy BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxHjAc BgNVBAsMFUlCTSBaIEludGVybWVkaWF0ZSBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD -ggIPADCCAgoCggIBAK75zkJO8mpqPrD9vlSsfJgW7hbioQpuphSo1+3q9cAAWKFg -TYUcGBUNR/lUVdYqgzo3AglUwldWfeO9mBCIGNSN/heLFt1KzNutBsnE3YEeGKpM -7nHhMzh41otFdpZEZfrsGXGok07dy2mEV0mx72e9ALWbXFhxYsdWdSSVTlBH8xcd -38rAzfAiTbjgAUnTIdCPjAJKbXSDBXGXZ3+iuhFxNtSWyJr1AsxPzESErCPzUQjr -m8TM24lKq69zimTEkN4uwP5U8s2JPzbKosg2k24RbpDgkjO8iNK7RL9SMRUE8daP -+eru5EwN4BlZfsNpZDFbILxbt/2sxqmdsx/Nupa5ZAfcHRs88p4l1D3QIiZzaSEc -nCotM/kmnHWbgeJbkGbC9fD23dNJ29uqZU0fbRnG4HpSutrYD6lPg7PXnMt5tT+f -0+wQds38woXT9qW/kN/2WtkVYDhyVjxCgD8iHOZpz2LUmvJfi7Gz9B/DeW1dzgbo -cGxz9ee+R+T5KcKg+XvHD6slk82GrSM21b7zJeK92bJtjkqxBtQf+YgcKtOO7QX7 -37C1XvSHFnKvyyRJrldJFGEKfK2C66hdASHRdbUhWHFo1AA7VqzKB1fU9M2+ltUZ -zRpYD7X36OtRY1KsHHn+SVvsn404hWwgPblZ04nsMPanj+jsN//6M9r5lMezAgMB -AAGjgaEwgZ4wOwYDVR0fBDQwMjAwoC6gLIYqaHR0cDovLzEyNy4wLjAuMToxMjM0 -L2NybC9mYWtlX3Jvb3RfY2EuY3JsMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ -BAQDAgEGMB8GA1UdIwQYMBaAFNnX872GsEyiVMz7up1p/Nq/K+0VMB0GA1UdDgQW -BBT2zEtzeetYaOXAVw0Z9Pda1V9n1DANBgkqhkiG9w0BAQsFAAOCAgEAb7xPI93b -gqpXi4E/hAl7Vh354kKV+1smd1Omf7mQZbTrKBo6o1s0ZpXCbC9+WzK47R21VPNe -EgZ3uzTkURu4AX+5OnAMiWDFAGRSDt8ZXk6ZukWP7OmHsnLCu2strdhrvC4EhMF5 -G9VPIQsTx16CpprrVjzVzJg4i/X+U9dypvnAQeneyz4Ul/kPr6di8bOB3FiBeEDu -dOkVTbnlDa+wMQlCvqlFroFjEHZBKK/+PVIx9cJYj1grmzgqzm2FGUs5Wvcixpgb -uSHCQY9JP8Hy0xl3wx58VaymUK4EMfs612CfzOMClaiooDYuZgzVfhalU6g268nc -PQ9RCRJtJuda4mJ2H3Rag79sIiCVV31tE6tLXjOGebuO0vEB8wOjJc4YW1gtrZFy -GltT+HMdFgjO2c6HynpkmtqS8axQAw2hVaOpdJbDW+R0jHihO98FAgXR27TqTFp9 -sjBacfITeYRXGjQTDU1qxuEfoLnTZRIct1TjRTI1HBT8fl1exxgKUyEawH3MUCo7 -LsTPSNASKkJH3Rp29be9xTejUx2HUUwOE/DIF0HKaN+aAc8TR31/4HvC3bf5VPyD -wIwazpjVDZlQ2w+Wry1zNezNCPKiWtkfkj+TkT32h4ZfQEX8t0MUpKnL1LOSV/8h -PV357EenQb+f9DeC4BIbmipovLaUWmml10c= +ggIPADCCAgoCggIBAK+kNJHAUL4dcMnjElivyuzkO2UwTUlJcKQCBmWRHYQjRbP5 +akJx8SZ+wI0Spo00hG4sV2BxA83J+Yrked53DEzlR+RxojA16vXzlUO8c9KH2dJQ +E17PgZnu7/hECmQcT69ZJAOh78ILEXRYk+2ixSTaZRTtchJdzfXIQ0633O2Mi/7z +z5idQmQeXbitC3QIZeNzwitl2FLXPIw6MUnktOKNjQSNxMr9AU8q1cfOoICJQs0J +wYVpby0dv1z0f2N21JJywaeAImHa2h58sSX6uqwOXtzdwcPlr2+iJ09YwHM8uO0D +rtPx3bLHfkvbpMd5cCKjSeFozP1nVnlKJUmAZ8UXk4MAFKCtuLv2/InA8MkclZmX +1IzBbvYDOw7AnsAN2VQYZSgM8vCmnRCzpTmtvb3ysSo32sTUjGW44giuRtGqh5Ct +LeRpZZVm6zDFY6cjpr34+3Vc4pys81d3Dq+Sos4YVPXhTKW3I1VtFIdCeNyey7hv +epjna6/JvOzQuwK90+t9VmZk7jTY2WOUNXJhzDTeDku/aTMIeXUZrAxg3pOvl/wf +SN5i4Gauhl7URDx3nI0jc4Y2u9NGFi0TYJMCRVVLknAcre5cDKFyf9ts1gDyxTDc ++orszCE3ZQzXZeEY6aiQPihhL1YDk5SkkTI6XHmUqLW3HFlfg2aFH/oI+VGBAgMB +AAGjgYcwgYQwIQYDVR0fBBowGDAWoBSgEoYQZmFrZV9yb290X2NhLmNybDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQsIzqwsaKX +aQyBeov7u8c7q8IEdzAdBgNVHQ4EFgQUjHvSRaHnej77FFWN8+DFOioZygowDQYJ +KoZIhvcNAQELBQADggIBAD4GmfmI5R6cy/Sp37buyO53azgw4RvVclIy/2qSPHVX +Os0pPPIIXLsbJMy7M6rDvKx/thZ27BDwms5dNuDynZ494XjqTmwzbBr+qEIzCNpa +QiX0MHf9JqFq5hkcZihfJ8PZL9JWIjiRfMI6AERd1pU1QJI/G8ha1vAkPfcA7GvH +NBBwKBm63iHYhp+zmnEEh85lpj4pEq+hOLK+mJxu55BodbNiBlBGu4EbalZlwXFp +7lGHLPkxuXZndrlrm8Lk+hi558NgNFxqz6qYtUc7txajViU2xFjEkTcQ0FKNiFpV +eHn5TNjwh4QucnWc7wI9hcDoADrekTEr8mUKrJXgxaBLIXEShC0ZDBnJsEwW4xbg +GQD9qXyo0h8cYx2NngvL+9Ee3rzdYAaSfAnZOU0xxrqZt+2dstNqi3pLKdS6RpA/ +3Dt3cAnf0mQQSUP6oZQpfdSEJ9uggT9h+kgJz5RGOQfEELKA5RxHtTe7249J3vDv +hoy0Uy6+w3Ji5AQdn3G5uUGxLsBh/uS1dl8hq4gihrpPbKVJHVxqPL0HCx7DyhRw +hx53GXhGIgZN7QSGTrB5iz9YyzejsnysS0Um+figkxtC1atqhVkqzMDZBHUwRPfy +wq8PREXoRITJHfSkg01bInRFMMNWDQuPwSHDX9OYNqpkXzlV8/ao9Rx6JOtiG0a8 -----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_inter_ca.key s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_inter_ca.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_inter_ca.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_inter_ca.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCvpDSRwFC+HXDJ +4xJYr8rs5DtlME1JSXCkAgZlkR2EI0Wz+WpCcfEmfsCNEqaNNIRuLFdgcQPNyfmK +5HnedwxM5UfkcaIwNer185VDvHPSh9nSUBNez4GZ7u/4RApkHE+vWSQDoe/CCxF0 +WJPtosUk2mUU7XISXc31yENOt9ztjIv+88+YnUJkHl24rQt0CGXjc8IrZdhS1zyM +OjFJ5LTijY0EjcTK/QFPKtXHzqCAiULNCcGFaW8tHb9c9H9jdtSScsGngCJh2toe +fLEl+rqsDl7c3cHD5a9voidPWMBzPLjtA67T8d2yx35L26THeXAio0nhaMz9Z1Z5 +SiVJgGfFF5ODABSgrbi79vyJwPDJHJWZl9SMwW72AzsOwJ7ADdlUGGUoDPLwpp0Q +s6U5rb298rEqN9rE1IxluOIIrkbRqoeQrS3kaWWVZuswxWOnI6a9+Pt1XOKcrPNX +dw6vkqLOGFT14UyltyNVbRSHQnjcnsu4b3qY52uvybzs0LsCvdPrfVZmZO402Nlj +lDVyYcw03g5Lv2kzCHl1GawMYN6Tr5f8H0jeYuBmroZe1EQ8d5yNI3OGNrvTRhYt +E2CTAkVVS5JwHK3uXAyhcn/bbNYA8sUw3PqK7MwhN2UM12XhGOmokD4oYS9WA5OU +pJEyOlx5lKi1txxZX4NmhR/6CPlRgQIDAQABAoICAAzvUnVE5NVZaPHfPH3GRXMc +bEDblad+5nIXmZW/gf1WFScnyDLoPf6MIV1KSTR3MIUGFDG8pO9x70QSpyRyXzgm +/vxEf5GeGOV/yKduQJfZrBmMTt7huH1H/slpZlwAx0AGOCwhz/y58LFBKDS9Ethv +0nef9HGEkbtG4ikRCo6+dd1i4nAObcIeh9+ms8QZ7bn6T4t/YrbYFcQMcY6L7qPM +EZ1rHNnnl8H3KTr/nSuWIDKmJCexb+/yQ8JiWaLPMKCv3ZHasn2DHuLVozylKjvr +S4JnvOIyvzChEBTMeBDMgSBoT+bZJi7pOOR4gpozl+XfIVzdqVIEmxZP1u7iq1GV +0jS/NwIAiZyp0e4YC+joS81AaskqoiTnk59inipWgtXaqz9peSd5hDA2u4mfSCVY +aKHEDv3dUYyc++JTr0qjAlxuu0SJjPZMTeIbMoVX+7RTwEHyQoiHzNLzBXo5yGm8 +b29oOqA4DePMt9+J/h3xuiUH9SmYxTkSX6oGZAJC2qEELGFbpN61zLSjJOlv+wfC +Nmpd8O88q6dSG9Tk2o2xeLRXAEulEtmIwjr0x8S10pieJ+ZhoaHAyIRWnbVfwj78 +pKLdpd8MPToTS4f98nkkVfbmc7ymVdz4JhnYTco4aqmtai/8yVUzzIjOLfJ6bp1s +N9okI+1fKgTVlyNj40opAoIBAQDeZqJLT9DTAhxZqa+ZUYc/ztoKiT1JLXDZDWhO +J2MVqFdTs7sAw/NhVx7Buf2VBmRBZ8jW/izrING228bCkpIvF8Kf5Fz5busJcJB5 +E2xhnIKRVrnw30JCxF0dGCqpfA79GyhfSsAdcx8ou0TUAgznvYHz6BvdYL8VuGuB +YQ5uJNSykrtHsZBwsk143VrwmUyL/HEiq/btVJ5vRLKtbR59+KOzqgZKTgjDZDm9 +8HwA8Nw45GqFdXUIHJwCwbK0+YE9qW/QCLDtx3wct+E1fs0C6TuMfLrh5khHUa6P +EZKLzkTRm7VHYrFFSGQBQBQ9mMalEttkrHmnHwfd5PAgCGH5AoIBAQDKLSVff0GB +5uadXXcRMRrhA+crUud8RUi9BEXdcUJR4fLosJl6dEBkhrC9pxVpfczAL5zXaqFg +Z4R3AinWB7trIGC1MaVc1CZenBtiVsPHNzOkqeZdcpK/WZcqJ/6AiDrXWYDu5N2n +hsff3Wtp2QqqhrrsP9pZnyrikAYeBvtuys28wpe4L8fZhwyRNry2jziIlmomtHyX +vr1xjfwQyVNno0RpppgAdul3RqBUV1HCpL7pndhfwWRCnW7fVDawYqSpIXUHjW8E +3+RgpHh0/YtLNinRKe2cDYorgxF02B1YCcSignh1msIbuJcH1MI7N2C0C2atiGYE +eSK+R12HcM3JAoIBAHaHXJemwjSzO0jOFrgvq1VmeO6ElhUaErqbWqvMchJo1aHW +eCPAS0XlmI7HAU8bSPNSzMdIT8hAhYRfPV8VnrNahm+Q1bxaUQmG5Hii0XB0aWHs +Rs0JL9dFsBqBdrs9Uv+yKaIfxKPtZv3eUKBtN1OKvGexnKgvl6eL0j/x6i7pkjJZ +4VYkXEazwHZaAs2X5iP1Npaz77YtEwNaKaAkN8wLZ7OpOhD/5cu87sk8Edqug7AO +jHb0UpswJDWT5hptn2OtmdnVx/XyC0OC/JP2MG0MwJ/vGeqrQHpCHjZBt0irdiIA +SmzxPGkgW6wO6rqpYbU4h5TwFyXqc9be3Ns1nIECggEAYZUIxN8XLyvTg4DpR1L8 +Nj88BJ0vPbvzL8gwMIHKENuN8uHKMmCJ8/tOsztCCni9qsVQXmkJGw7b0NUqfDOf +MkWeZ9Zeij+bhW6ziPN361+pfYDDv7bdPZ5wZ9iF7mPSgr6gjK3KjmFvd43Xmm83 +xrbg4cawDTHV8SSyzytvkDoyszj5Id1uCIA2gKB61WKrVsHC2oui11so9PYjA7co +mo0jKBtQomjCpt9f41WCEQCTZQ7asN0XF1AFg6WR//CcqUWMxuhs/V1TTZIU0eLO +qK6r4FjsZrXiSr4oXs0w3J1aW1W97oWTyu19eDooxYwlEMGv/XIoS2BsdIrdidHY +gQKCAQBrA0X+AwT9xgEwYaXnA9RINBPSnzruv1giHHAmY9iS0V6BNJUzlJDyASdT +7drV0YRY7bKCguZYNVZh3o259OXNsAOStd9yiyFzZr6bAVvTVXIkrB040Rd1xptC +yl+kxYEfDeENkCR6h8JkFDupG4xF0PT70sE1sUGgFg3585nWzhN/s6xC9vxtBNHc +R+a1UmoH8pYSONttldtW7g2kB5uuQTU0lZVSHDj9B8z0Twj3b6+kQlXt0ielrVV2 +TSVZcmbIyyEYBDcAxOCvHN+rzKbP7H+7JqHVgnbe12pnVoN7WVMbR2v3F4pK+0aA +M9Wd2js5nPPh0AN3Px1643vFYcAQ +-----END PRIVATE KEY----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_root_ca.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_root_ca.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_root_ca.crl 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_root_ca.crl 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,20 @@ +-----BEGIN X509 CRL----- +MIIDPjCCASYCAQEwDQYJKoZIhvcNAQENBQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMRYwFAYD +VQQLDA1JQk0gWiBSb290IENBFw0yNDAzMTExNTUyMzhaGA8yMzg4MTIyMzE1NTIz +OFowFTATAgIBTRcNMjQwMzIwMTU1MjM4WqAjMCEwHwYDVR0jBBgwFoAULCM6sLGi +l2kMgXqL+7vHO6vCBHcwDQYJKoZIhvcNAQENBQADggIBAJB5ERfMQEZ5Pdc3A8+d +gDiY53VEdA0Zd8MDv+j+Mgu1qn7IW2rpE313yVHieclZ65ReEtb825St8UBJtjiZ +9Vd4lK9FUQKKCblCYNrEbu9cvqNeHDGxldQPUkEoz+z5kielcEwD6RUsK5fSYe3Y +6E3jc9mMHw7xWCSiolvNr5y6AUymfu22v41qgfaB0yNe5uz6vRgRoiy+OTP3Z7oc +ls7o73F9O4k6QYdba8us4v0TSwZAAPO16TsHhmyHcBe3w23UIusTU/c+6A++hGdU +TNUW3OZl+hv+4BcnClrVBDfWiExOMNzIhH9hzlXQ2qNPNqz2ymlH8Wgpf1TBYJ8m +xe8EuGGrDFuewa45kA8uxuHCiSLCsBowDEXXkswaSF4E4yzYZqcNDcLOedK7vG9G ++zXYHOpLsZMyfgauxjuWtwR/ma+ub85CHy+eUC2waI+Mk0Uk0Lr5y0Jdp8ztxakp +UlVHYNU/Q+kwHfmKztkOsedomOf1/8IvzE930ZB8rv0G8ok2HXYvA3+r3lcDSG/V +7+yq3HcJezw0XCDtc5mv8dWI6pUv8siWFKok+hlkZpeSXgtqRhF2rzI8yz+lk5Cr +5yCWroPI0T44o2RzjfGZAViRH0nnYF1GoBORSZORppubcjmiTy46OFG/fiWKGKV1 +sngAoQ4TCDQzId0zwGLtUCHu +-----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_root_ca.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_root_ca.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_root_ca.crt 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_root_ca.crt 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,37 @@ +-----BEGIN CERTIFICATE----- +MIIGXzCCBEegAwIBAgIUC2OH79tloDOfMBeyDBA84epTMKYwDQYJKoZIhvcNAQEL +BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu +ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs +IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y +azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 +MDMyMTE0NTIzOFoYDzIzODgxMjIzMTQ1MjM4WjCBtTELMAkGA1UEBhMCVVMxNDAy +BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp +b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y +cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxFjAU +BgNVBAsMDUlCTSBaIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQDK+jIoIZVYyEq7ZrxfKv/TQQWCgI5C8BewYkB+/rt8NY0fn/aAZbvJdnDX +cQCLpCeETDi7C2hGujnZQN68otr2GpJVQ/kgKnM8yU/p9yBSUan9Zo/195a9YytF +6Ys/Yc0HApoPtB3t85NHNjAvhzdcBKWMb+/2FiJ61gdnrO/zORnxp/BejxTMyB6r +0mhhVX4wwdl5vfkX/qS3TL0rajEz0V+SwneDiIomoVJCnco40Km1M20qU5L7EpNH +IMEP00hcj40zO0jJ8cGDNoKgTHsZPovYfFmWITVOFvaPcKaloUU67dYEg0fv3ypX +aYHRszcd794VOWQ85r/mlFSSELwoUrVH7G0t6wnzOawW2kk7ZYndbo+Z1ZFb1Ul+ +iVwUAHzx/ylmOORJNM4JGiYyC5+1481MLeB+37+V3TJaBe4IoKqVo/OCBsaPt7x1 +rMpZLw4gXj3A0Hh8gi0z5HDkKolMSawuU8dpFeI0GN+4hJN9DNQh0OTGLEfzeo+1 +lNzsF+jGT9B0tApWhBq/QTyHXvJREVx32hPd/1X6bVbd9mik0bexg6bVpZVNG1sn +7GsVO2wq5OWKe3UtSh8wJcjGdW38fPAB46oIBQ7x0HGdp/KFiBTWxrexLThMFvYL +wPtWedj8ntgOBoH5MJgL1XkVYP+1EQYaBJNfswvF4CrvPRCtCwIDAQABo2MwYTAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQsIzqw +saKXaQyBeov7u8c7q8IEdzAdBgNVHQ4EFgQULCM6sLGil2kMgXqL+7vHO6vCBHcw +DQYJKoZIhvcNAQELBQADggIBALZNLMgm7RqIKY++Blc2qeIDrwlx1U/PPN/PsZsN +cQR48Bq+vQ4cV9hOcZC3jfRbkkDho8jxVTXdEOcpDyU4zNSLG1+b1FNRvqjq2L+d +ow190dYqvfgbdNFo8V6kLdRFe5aCLhHTWGLTbl0vvkc99h1Lt9ZXr6LzhFwbXSGr +Q/qJoLZFVBwJZmfZY3VmL661sBcTmzDDGJxg00fg+1jCIk2Ot3dn+7jQ8g6HYMQv +8GrqGKcFia8fXcTTmcH+Swr8rps1MYypjkpX/zVHsFuENdqZNkqU+OLTaifkrBuO +6evTTjEOcDzMFs5Uipjvq7XOBg5rz9BKRXrjDMdI4CmUzMR272lToNw7lHqqNDKi +nxAzPjPWVwXQTu2LFY3NwubWkPAVsd6FsHKzQHlq97N4sj+vp6vctaEUgFS4pUlL +tbjw6AtQA72z4uIpcAzE2ctTj831QeQSZGKBUpeNPjuK/NjytXQm8MvW7LI2OgaI +H5B9+KE9KjqijpNiiOoidYVwFRrZniIQii06qXwQUgz2gGIzYPER9+3PijrtOY+t +K/vS6NohUYb16PhbbvzFktSGBtvp1kUF4fB2NJh0z6OiZXVEHlCYEDTv78RS36iU +PXijR+rALi20P4Sg6picuvmt2epfSMQ4ynNxPhbSG7wMp+Zp5tOrZcmg4jN0Yt56 +tACH +-----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_root_ca.key s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_root_ca.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_root_ca.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_root_ca.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDK+jIoIZVYyEq7 +ZrxfKv/TQQWCgI5C8BewYkB+/rt8NY0fn/aAZbvJdnDXcQCLpCeETDi7C2hGujnZ +QN68otr2GpJVQ/kgKnM8yU/p9yBSUan9Zo/195a9YytF6Ys/Yc0HApoPtB3t85NH +NjAvhzdcBKWMb+/2FiJ61gdnrO/zORnxp/BejxTMyB6r0mhhVX4wwdl5vfkX/qS3 +TL0rajEz0V+SwneDiIomoVJCnco40Km1M20qU5L7EpNHIMEP00hcj40zO0jJ8cGD +NoKgTHsZPovYfFmWITVOFvaPcKaloUU67dYEg0fv3ypXaYHRszcd794VOWQ85r/m +lFSSELwoUrVH7G0t6wnzOawW2kk7ZYndbo+Z1ZFb1Ul+iVwUAHzx/ylmOORJNM4J +GiYyC5+1481MLeB+37+V3TJaBe4IoKqVo/OCBsaPt7x1rMpZLw4gXj3A0Hh8gi0z +5HDkKolMSawuU8dpFeI0GN+4hJN9DNQh0OTGLEfzeo+1lNzsF+jGT9B0tApWhBq/ +QTyHXvJREVx32hPd/1X6bVbd9mik0bexg6bVpZVNG1sn7GsVO2wq5OWKe3UtSh8w +JcjGdW38fPAB46oIBQ7x0HGdp/KFiBTWxrexLThMFvYLwPtWedj8ntgOBoH5MJgL +1XkVYP+1EQYaBJNfswvF4CrvPRCtCwIDAQABAoICABhe0JAvnWYwK4OIouwCQP5f +pjEJt7WjF2pewZj92LY3GnSbmNXIYTL1J43rzBb4D06RIEFE7gY1QdDV5b2xsqct +1Y1V60lzZKv9t7yYUVHQ6vS5JbvF4wa7gMHqIoU/ptrv8wjDpXxLwc52W8ljwL/U +Rv7aScNlhkpH/FJJr0hSqMzHpH1AofdcZ7eq5vbDdF1CSRbsOkYpQCRJ8G86rCGp +QoJfxZ24p8ME8i18EyrzKA0mLuCQHED3n/tARtiG6P9ECmiw9jJY9n+x7HPL+v5d +m3N9//ud4m/ieJlhpdXpIQ4hlDqtJdalGB5aqp2U7kndN5rwwxo/fPb1ru8YAfSB +TK+5ikIn1lfzkiRoTVAmZubXqSSi1xiudqwL28euIDn0f93aiEInHO7Vx2o4vpgQ +MJ3Hg5XaGygUAR+ZSnQ7lcv4kKMpSeqYuZSRRLs/RZ2Kw3jCYpyQyr1Ii8qH3H9S +hUqZqUZK4splagRureExDkFrGvo5cBsmMGZqNfG8Ix9imEC4kRhhraIcmziwY2Ca +mKqGH+sGpcvHX6vmDe7wZb4fo+Mkkacext42t8dtkPT3nEJhsR0xqmcgvyt6yP5t +y4mv2hGeDEsclpWNfuug0DeOwXjClATZVrP4baL4rGrP6AGFDiRz0PbWmaJwMLDZ +D+06rj6nO9Ibu/t5tkWRAoIBAQDw2JSTzLEdrkX4cTaHE7pLWY2BQuiIXdYOdXbW +jkPytSgqHfpTckCDl8UJWMBzo87xDb5Q9Hmsa37Mj5HIz+lGxLPukuhiRUnu/XJQ +AxzLXBBmdIsb0Rha1kChNASPHXo2Ozi9OsWB9UC65P+U1cGmE882A/Y4p5ak7C+q +iQk+IRgSqKrLIIHFG0h6Zi0f+DlXMtsNQ7mjDk62WjWzneS/NPwN99VwGacGqI99 +IOhJIKfhpbIXMg2Wo0Xj3HwaHWccK10Va/x8jvjunX9V0px19yCPnMsMWTZ0WN9F +CRbeD8zCuwasjQkBkqiqMyShk/gs4K6J9TMc5tprB2xB7+f3AoIBAQDXv6WWdsRa +NuuMrzJ94ZLESUIHBg/u1uGJgvtckpKAm25SnEB2C4d8u09zey0BsXsxBKLJ/HAp +Nci07/dKkDlmG6+SkVhcIsEAokw2EFWG/+EQJ1Bd2gbX9RXQv3O2sda/2h3QB8Vu +BUmvK8nwpFaD5eqrIeFxBhao4vHon80Bv1L1F+QVfbjOgV5eS146VU7Gl7b/LuMk +yx3I4HwWqNbQUGTnVfC6wprae9G1Tamui+PnCPMdVSP972gxMNfhqkt04BN2j2A5 +/Lat2VdL100W2XFzLEQmfD4sAlsJQ/jTcGhDb9Brxlf9kQYt1r+a8igcJQqoz8yr +1wLabN6FKuaNAoIBAHALwit0ad2uCt1HBiAXPG77jYpaL0XpqcD2QsAUVWYfgzz8 +z01s9LiDreXoRThHN+oLA8QzyDs/kzDlheQPXa5PqonODJBTPc8SV9EDcazl+rc/ +dswNHbB8xnp4cbqG8ykxqfbW1bXc/C02hfSe8UUrKBYwB6dZyAqX2qESuZsO2F0t +3K89Q0IIrFJNIKcj2sFHZoMoQ7+o01OgaiVSym1t3+k7qC9Lr9m4J3EGEyqaJ6Ah +btW9snanJMeZ9p5LmYGQZvClWUQ1W3ffC7NIlQOIYbyOLCRliKDeC5jZXqsWXKMn +UTaLMmpp6U+tFViaNzXhnTGPQiUq/OgX/vQ6AQcCggEBALGJyY+ZyFacnxU0Do68 +E3Rm/GifOnlGZm4sVQCGtPwT66MbZYg+UI357ZWQJSchj8h0kik5DGs9ER6j4ZGA +QVufKMmpxVZ8WupUo/ZRVrAy3FfYoi+4/Ky1x+/xvBz5F0jlCmdoHbU9sLbvh8v4 +0CDWEFgnF0dUxUzRBFRzO9ZWRMPflxeAT5XPq4JY7v99t8eAjVxMjyp14tqssDBN +XAgsP/yGIgTto1RrU9SoRhuWjkJOgREAQQ/z9H+WO/A9nL5ermV/8qWFKibBlS6Z +y5wj71HheWtaDidVAOkNADOuan4kAxTNMRc+QiFyeFXfM9aFPNJRvZEi6/wmXq6i +8PkCggEBAMK0qSiZ0kBgTqgYhpKqCwEUgEueTC3EloZQ6CHV1vTEfsyM3gEQ2A5r +8+viQf1RD4UloCWfOF1CA9SqgtXAAIQyGVAATjG7sbUNAK7jac9UdZOS8QSkcJMS +UIHXndlct/vijnNe/htGCcu+C2KCJz1lfW07DB8WOoyP/5e7es7e0RGjGgGdRdu2 +MyCXJ3lG0heiVUp2d+5R80UAZQdlHuzo9sItxSUbsO639Q1c+Cew/cM6YPxdcTGo +LWnAsG3yJZgR3pipS9/GqISY725c8QJijYMG3GFUBD6pMCdD46Kl/rfjUsYK3Aoe +2lXPecP5/ilaLLUhs7SijqHzcKiNL80= +-----END PRIVATE KEY----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_root_ca_valid.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_root_ca_valid.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/fake_root_ca_valid.crl 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/fake_root_ca_valid.crl 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,20 @@ +-----BEGIN X509 CRL----- +MIIDPjCCASYCAQEwDQYJKoZIhvcNAQENBQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMRYwFAYD +VQQLDA1JQk0gWiBSb290IENBFw0yNDAzMTExNTUyMzhaGA8yMzg4MTIyMzE1NTIz +OFowFTATAgIBTRcNMjQwMzIwMTU1MjM4WqAjMCEwHwYDVR0jBBgwFoAU1p7a8iaQ +sGamSdR9585yWIW36CowDQYJKoZIhvcNAQENBQADggIBAMcBN8PztJELdCo6fQvw +C9ZU41AmBZsMdIoacgacupt94gHf8SAO1Ric46j3KERpA0mWKAx6RH1s3OX2qN/C +YSrVPIZ076Cn9EMVdK9w9hqt6SYHoot9CwPutF6BBG5O2uN6eAsEM21B5Tr6xTZ/ +SUj3fO3+QsePDL84BRfU5vtZcGj1BkULopWwibG82ayEUlbY3J8OPjK5YKuHtxJd +lI4yfkmrEPwHEm8bwI6lhaIxrtTjoVgoolw+FqtVdVdU6O254AUKwxN88bHoA/sk +BOr13AoHXiKEBaEJhocTLJRfvUZVRYGll43ELsy60XEH51W27/uULnC92vBkp4Q1 +5hPwSD0RHKfMwsLKqoJaWX1CxfdttapCfriehMXJlv46GPmq6cLIEbbHzJKsyJhN +4mW8Uiwn1aM5EhMauSLuuwNp8QRo4rmQzUstxwn4O/3HzTcjVWbn0XvBXa2S5HtD +2kyWwN/qck1N9aXYk/sf2A2py2ECDuXc/Kvh1kDi6ZSaKA0VSS1FX62HvdqZKQb5 +ZmyoQUpg8yv0hholaZd9Jye0pwww/K8CFddsyUMRYDSj0+qiw4pgx6KK3ElKnHeH +uq+iLxDLWTqrWQw6k0I92B9gSlNKR3Z6Zq2UTxo6hB7IYgZv5nxx3BRxnTm6aH6a +pIoxgnFnbNnyUIRAkOCHbFqy +-----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/gen/create_certs.py s390-tools-2.33.1/rust/pv/tests/assets/cert/gen/create_certs.py --- s390-tools-2.31.0/rust/pv/tests/assets/cert/gen/create_certs.py 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/gen/create_certs.py 2024-05-28 08:26:36.000000000 +0200 @@ -4,13 +4,10 @@ from enum import Enum from cryptography import x509 -from cryptography.x509.oid import NameOID -from cryptography.hazmat.primitives import hashes from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import serialization -from cryptography.hazmat.primitives.asymmetric import ec -from cryptography.hazmat.primitives.asymmetric import rsa - +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import ec, rsa +from cryptography.x509.oid import NameOID ONE_DAY = datetime.timedelta(1, 0, 0) @@ -25,7 +22,9 @@ ) -def createCRL(pkey, issuer, serial_numbers=None, last_update=None, next_update=None): +def createCRL( + pkey, issuer, serial_numbers=None, last_update=None, next_update=None, authid=True +): serial_numbers = [333] if serial_numbers is None else serial_numbers builder = x509.CertificateRevocationListBuilder() builder = builder.issuer_name(issuer) @@ -43,50 +42,15 @@ .build(default_backend()) ) builder = builder.add_revoked_certificate(revoked_cert) - crl = builder.sign( - private_key=pkey, algorithm=hashes.SHA256(), backend=default_backend() - ) - return crl - - -def createRootCA(pkey, subject): - issuer = subject - ca = ( - x509.CertificateBuilder() - .subject_name(subject) - .issuer_name(issuer) - .public_key(pkey.public_key()) - .serial_number(x509.random_serial_number()) - .not_valid_before(datetime.datetime.utcnow()) - .not_valid_after( - datetime.datetime.utcnow() + datetime.timedelta(days=365 * 365) - ) - .add_extension( - x509.BasicConstraints(ca=True, path_length=None), - critical=True, - # Sign our certificate with our private key - ) - .add_extension( - x509.KeyUsage( - digital_signature=False, - key_encipherment=False, - content_commitment=False, - data_encipherment=False, - key_agreement=False, - encipher_only=False, - decipher_only=False, - key_cert_sign=True, - crl_sign=True, - ), - critical=True, - ) - .add_extension( - x509.SubjectKeyIdentifier.from_public_key(pkey.public_key()), + if authid: + builder = builder.add_extension( + x509.AuthorityKeyIdentifier.from_issuer_public_key(pkey.public_key()), critical=False, ) - .sign(pkey, hashes.SHA512(), default_backend()) + crl = builder.sign( + private_key=pkey, algorithm=hashes.SHA512(), backend=default_backend() ) - return ca + return crl class CertType(Enum): @@ -200,9 +164,7 @@ critical=True, ) .add_extension( - x509.ExtendedKeyUsage( - [x509.oid.ExtendedKeyUsageOID.CODE_SIGNING] - ), + x509.ExtendedKeyUsage([x509.oid.ExtendedKeyUsageOID.CODE_SIGNING]), critical=False, ) ) @@ -254,8 +216,7 @@ if __name__ == "__main__": - MOCKUP_CRL_DIST = "http://127.0.0.1:1234/crl/" - + MOCKUP_CRL_DIST = "" # create root CA root_ca_subject = x509.Name( @@ -310,7 +271,13 @@ t=CertType.ROOT_CA, ) - fake_root_ca_crt = createRootCA(fake_root_ca_pkey, fake_root_ca_subject) + fake_root_ca_crt = createCert( + pkey=fake_root_ca_pkey, + subject=fake_root_ca_subject, + issuer_pkey=fake_root_ca_pkey, + crl_uri=None, + t=CertType.ROOT_CA, + ) # create intermediate CA inter_ca_pkey = getPrivKey("inter_ca.key", createRSAKeyPair) @@ -354,7 +321,7 @@ # create ibm certificate ibm_pkey = getPrivKey("ibm.key", createRSAKeyPair) - ibm_subject = x509.Name( + ibm_subject_poughkeepsie = x509.Name( [ x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), x509.NameAttribute( @@ -371,9 +338,35 @@ ), ] ) - ibm_crt = createCert( + ibm_pougkeepsie_crt = createCert( pkey=ibm_pkey, - subject=ibm_subject, + subject=ibm_subject_poughkeepsie, + issuer_crt=inter_ca_crt, + issuer_pkey=inter_ca_pkey, + crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", + t=CertType.SIGNING_CERT, + ) + + ibm_subject_armonk = x509.Name( + [ + x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), + x509.NameAttribute( + NameOID.ORGANIZATION_NAME, + u"International Business Machines Corporation", + ), + x509.NameAttribute( + NameOID.COMMON_NAME, u"International Business Machines Corporation" + ), + x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), + x509.NameAttribute(NameOID.LOCALITY_NAME, u"Armonk"), + x509.NameAttribute( + NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Host Key Signing Service" + ), + ] + ) + ibm_armonk_crt = createCert( + pkey=ibm_pkey, + subject=ibm_subject_armonk, issuer_crt=inter_ca_crt, issuer_pkey=inter_ca_pkey, crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", @@ -381,7 +374,7 @@ ) ibm_expired_crt = createCert( pkey=ibm_pkey, - subject=ibm_subject, + subject=ibm_subject_poughkeepsie, issuer_crt=inter_ca_crt, issuer_pkey=inter_ca_pkey, crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", @@ -390,11 +383,11 @@ not_after=datetime.datetime.today() - 1 * 365 * ONE_DAY, ) - #create revoked ibm certificate + # create revoked ibm certificate ibm_rev_pkey = getPrivKey("ibm.key", createRSAKeyPair) ibm_rev_crt = createCert( pkey=ibm_rev_pkey, - subject=ibm_subject, + subject=ibm_subject_poughkeepsie, issuer_crt=inter_ca_crt, issuer_pkey=inter_ca_pkey, crl_uri=MOCKUP_CRL_DIST + "inter_ca.crl", @@ -402,7 +395,9 @@ ) # create inter CLRs - inter_ca_crl = createCRL(inter_ca_pkey, inter_ca_subject, [444, ibm_rev_crt.serial_number]) + inter_ca_crl = createCRL( + inter_ca_pkey, inter_ca_subject, [444, ibm_rev_crt.serial_number] + ) inter_ca_invalid_signer_crl = createCRL(root_ca_pkey, inter_ca_subject, [444]) inter_ca_invalid_date_crl = createCRL( inter_ca_pkey, @@ -431,7 +426,9 @@ ), ] ) - ibm_wrong_subject_crl = createCRL(ibm_wrong_subject_pkey, ibm_wrong_subject_subject, [555]) + ibm_wrong_subject_crl = createCRL( + ibm_wrong_subject_pkey, ibm_wrong_subject_subject, [555] + ) ibm_wrong_subject_crt = createCert( pkey=ibm_wrong_subject_pkey, subject=ibm_wrong_subject_subject, @@ -469,7 +466,6 @@ t=CertType.SIGNING_CERT, ) - def host_subj(): return x509.Name( [ @@ -487,14 +483,13 @@ ] ) - # create host certificate host_pkey = getPrivKey("host.key", createEcKeyPair) host_subject = host_subj() host_crt = createCert( pkey=host_pkey, subject=host_subject, - issuer_crt=ibm_crt, + issuer_crt=ibm_pougkeepsie_crt, issuer_pkey=ibm_pkey, crl_uri=MOCKUP_CRL_DIST + "ibm.crl", t=CertType.HOST_CERT, @@ -502,7 +497,7 @@ host_crt_expired = createCert( pkey=host_pkey, subject=host_subject, - issuer_crt=ibm_crt, + issuer_crt=ibm_pougkeepsie_crt, issuer_pkey=ibm_pkey, crl_uri=MOCKUP_CRL_DIST + "ibm.crl", t=CertType.HOST_CERT, @@ -512,7 +507,7 @@ host_uri_na_crt = createCert( pkey=host_pkey, subject=host_subject, - issuer_crt=ibm_crt, + issuer_crt=ibm_pougkeepsie_crt, issuer_pkey=ibm_pkey, crl_uri=MOCKUP_CRL_DIST + "notavailable", t=CertType.HOST_CERT, @@ -523,7 +518,7 @@ host_crt = createCert( pkey=host_pkey, subject=host_subject, - issuer_crt=ibm_crt, + issuer_crt=ibm_pougkeepsie_crt, issuer_pkey=ibm_pkey, crl_uri=MOCKUP_CRL_DIST + "ibm.crl", t=CertType.HOST_CERT, @@ -534,35 +529,48 @@ host_rev_crt = createCert( pkey=host_rev_pkey, subject=host_rev_subject, - issuer_crt=ibm_crt, + issuer_crt=ibm_pougkeepsie_crt, issuer_pkey=ibm_pkey, crl_uri=MOCKUP_CRL_DIST + "ibm.crl", t=CertType.HOST_CERT, ) # some IBM revocation lists - ibm_crl = createCRL(ibm_pkey, ibm_subject, [555, host_rev_crt.serial_number]) + ibm_poughkeepsie_crl = createCRL( + ibm_pkey, ibm_subject_poughkeepsie, [555, host_rev_crt.serial_number] + ) + ibm_armonk_crl = createCRL( + ibm_pkey, ibm_subject_armonk, [555, host_rev_crt.serial_number] + ) + ibm_outdated_early_crl = createCRL( ibm_pkey, - ibm_subject, + ibm_subject_poughkeepsie, [], last_update=datetime.datetime.today() + 1000 * 365 * ONE_DAY, next_update=datetime.datetime.today() + 1001 * 365 * ONE_DAY, ) ibm_outdated_late_crl = createCRL( ibm_pkey, - ibm_subject, + ibm_subject_poughkeepsie, [], last_update=datetime.datetime.today() - 2 * 365 * ONE_DAY, next_update=datetime.datetime.today() - 1 * 365 * ONE_DAY, ) - ibm_wrong_issuer_crl = createCRL(ibm_pkey, inter_ca_subject, []) + ibm_wrong_issuer_priv_key_crl = createCRL( + ibm_pkey, inter_ca_subject, [], authid=False + ) ibm_invalid_hash_crl = createCRL( - inter_ca_pkey, ibm_subject, [555, host_crt.serial_number] + inter_ca_pkey, + ibm_subject_poughkeepsie, + [555, host_crt.serial_number], + authid=False, ) # create host certificate issued by a non-valid signing key - host_invalid_signing_key_pkey = getPrivKey("host_invalid_signing_key.key", createEcKeyPair) + host_invalid_signing_key_pkey = getPrivKey( + "host_invalid_signing_key.key", createEcKeyPair + ) host_invalid_signing_key_subject = x509.Name( [ x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), @@ -606,12 +614,36 @@ host2_crt = createCert( pkey=host2_pkey, subject=host2_subject, - issuer_crt=ibm_crt, + issuer_crt=ibm_pougkeepsie_crt, issuer_pkey=ibm_pkey, crl_uri=MOCKUP_CRL_DIST + "ibm.crl", t=CertType.HOST_CERT, ) + host_armonk_pkey = getPrivKey("host.key", createEcKeyPair) + host_armonk_subject = x509.Name( + [ + x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"), + x509.NameAttribute( + NameOID.ORGANIZATION_NAME, + u"International Business Machines Corporation", + ), + x509.NameAttribute( + NameOID.COMMON_NAME, u"International Business Machines Corporation" + ), + x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"New York"), + x509.NameAttribute(NameOID.LOCALITY_NAME, u"Armonk"), + x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u"IBM Z Host Key"), + ] + ) + host_armonk_crt = createCert( + pkey=host_armonk_pkey, + subject=host_armonk_subject, + issuer_crt=ibm_armonk_crt, + issuer_pkey=ibm_pkey, + crl_uri=MOCKUP_CRL_DIST + "ibm_armonk.crl", + t=CertType.HOST_CERT, + ) fake_host_pkey = getPrivKey("fake_host.key", createEcKeyPair) fake_host_subject = x509.Name( @@ -637,7 +669,7 @@ crl_uri=MOCKUP_CRL_DIST + "fake_ibm.crt", t=CertType.HOST_CERT, ) - #TODO DER chain + # TODO DER chain # store CA with open("root_ca.crt", "wb") as f: @@ -673,22 +705,26 @@ # store IBM with open("ibm.crt", "wb") as f: - f.write(ibm_crt.public_bytes(serialization.Encoding.PEM)) + f.write(ibm_pougkeepsie_crt.public_bytes(serialization.Encoding.PEM)) + with open("ibm_armonk.crt", "wb") as f: + f.write(ibm_armonk_crt.public_bytes(serialization.Encoding.PEM)) with open("ibm_rev.crt", "wb") as f: f.write(ibm_rev_crt.public_bytes(serialization.Encoding.PEM)) with open("ibm_expired.crt", "wb") as f: f.write(ibm_expired_crt.public_bytes(serialization.Encoding.PEM)) with open("ibm.crl", "wb") as f: - f.write(ibm_crl.public_bytes(serialization.Encoding.PEM)) + f.write(ibm_poughkeepsie_crl.public_bytes(serialization.Encoding.PEM)) + with open("ibm_armonk.crl", "wb") as f: + f.write(ibm_armonk_crl.public_bytes(serialization.Encoding.PEM)) with open("ibm.chained.crt", "wb") as f: - f.write(ibm_crl.public_bytes(serialization.Encoding.PEM)) - f.write(ibm_crt.public_bytes(serialization.Encoding.PEM)) + f.write(ibm_poughkeepsie_crl.public_bytes(serialization.Encoding.PEM)) + f.write(ibm_pougkeepsie_crt.public_bytes(serialization.Encoding.PEM)) with open("ibm_outdated_early.crl", "wb") as f: f.write(ibm_outdated_early_crl.public_bytes(serialization.Encoding.PEM)) with open("ibm_outdated_late.crl", "wb") as f: f.write(ibm_outdated_late_crl.public_bytes(serialization.Encoding.PEM)) with open("ibm_wrong_issuer.crl", "wb") as f: - f.write(ibm_wrong_issuer_crl.public_bytes(serialization.Encoding.PEM)) + f.write(ibm_wrong_issuer_priv_key_crl.public_bytes(serialization.Encoding.PEM)) with open("ibm_invalid_hash.crl", "wb") as f: f.write(ibm_invalid_hash_crl.public_bytes(serialization.Encoding.PEM)) with open("ibm_wrong_subject.crt", "wb") as f: @@ -719,6 +755,10 @@ with open("host2.crt", "wb") as f: f.write(host2_crt.public_bytes(serialization.Encoding.PEM)) + # store host_armonk + with open("host_armonk.crt", "wb") as f: + f.write(host_armonk_crt.public_bytes(serialization.Encoding.PEM)) + # store fake host with open("fake_host.crt", "wb") as f: f.write(fake_host_crt.public_bytes(serialization.Encoding.PEM)) @@ -728,6 +768,6 @@ # store a DER cert and crl with open("der.crt", "wb") as f: - f.write(ibm_crt.public_bytes(serialization.Encoding.DER)) + f.write(ibm_pougkeepsie_crt.public_bytes(serialization.Encoding.DER)) with open("der.crl", "wb") as f: - f.write(ibm_crl.public_bytes(serialization.Encoding.DER)) + f.write(ibm_poughkeepsie_crl.public_bytes(serialization.Encoding.DER)) diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/host2.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/host2.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/host2.crt 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/host2.crt 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE+DCCAuCgAwIBAgIUC3KzCH9KUb8ZaY6J/97gqebLoIYwDQYJKoZIhvcNAQEN +BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu +ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs +IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y +azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl +eSBTaWduaW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUy +MzlaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp +bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h +bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv +cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw +EAYHKoZIzj0CAQYFK4EEACMDgYYABAE3KDrdCdVeBV34NicA8AoP7hItcwxgXHOQ +F+V02N6b5AR7w3YDHE/JSzSoZZIYiqdy8SmeD5GtwHLV8tLQ8xnadgHT0BesGS02 +w0H5foGzzZOHZW3AfbdH4MpssR4Bf1jqL0jw6eV+oiMStDuZ44zri7PFjvVByt6M +oeSvh5qAXCQG/6NsMGowGAYDVR0fBBEwDzANoAugCYYHaWJtLmNybDAOBgNVHQ8B +Af8EBAMCAwgwHwYDVR0jBBgwFoAUw4weXbTWAZisD86gZSugZ6V1FNkwHQYDVR0O +BBYEFL8FyjCu1iRcjwx4pzfo+VIQ5NdbMA0GCSqGSIb3DQEBDQUAA4ICAQASxSXu +RxNw/kSwqedNq9jOTHb5FATNykBIVSuXS9BB2qkjcTVDXNnlsSBrPL/CumRp/TFD +5VsB2rLhESmUrpghQrODeFvyFE52yVhxvcNCyjz7yIQZvc4qofMQMsg3o0rSqp2s +lu1PUbcrL2aCG1yxB3isObVqiWaiRdnPxL8aX3Qt6BszlwWUgaFoaH0uZxlgVGKV +C+dXrn5WkNRVd2ouHSLQE6fIUYIf/TrV+AKu804IEoFRIvMUCqQRUHsj5toKhfDb +6tl/Xd+EiPCYbnhR2J01I08yxMExvYXfXapY7JJjlWTHKFKaxLoqv++NZRM1bW6s +uyLWP735Qb+0AmhZ6TfeJM7H77LpQK0WCylaNVJWWjXt9UsnNdirCbp/jpKF8bnG +2PkjBHKruvCakqw1bDq8eDv9In1Ki+Um4gp7OfjYvcN8zxvGQofgj++UaCy982iX +WSq14iUyrRDVu8zWghL/F1lUx7ab8UV+OrmZuCALVHZ76YVdwmJXGYll1OBbJbgL +5xze6p77vKzbgNyABWmR6TlHq/nFDhj9kKirpQaI7WHyOtsGpc7sqd0tT+CeOhNf +l3xXyFPb6N58aSC2cY0W0Nq6X/mWIgMqHY9lzYLmoLBFFZlIjqWzwyVcajmjrtaK +rlfs0e9f9DvVV8bMTXFMUBlWrmYDrROKpYLqhg== +-----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/host2.key s390-tools-2.33.1/rust/pv/tests/assets/cert/host2.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/host2.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/host2.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,8 @@ +-----BEGIN PRIVATE KEY----- +MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIAQNfN5BLLLAdhlwGk +Ve8w2t+9x4LWR9st6CLCYHnTgc2gr4+HqgcMwmNuj3cA8ENBvIShUnHkX2E+9CyP +3W1ZN8OhgYkDgYYABAE3KDrdCdVeBV34NicA8AoP7hItcwxgXHOQF+V02N6b5AR7 +w3YDHE/JSzSoZZIYiqdy8SmeD5GtwHLV8tLQ8xnadgHT0BesGS02w0H5foGzzZOH +ZW3AfbdH4MpssR4Bf1jqL0jw6eV+oiMStDuZ44zri7PFjvVByt6MoeSvh5qAXCQG +/w== +-----END PRIVATE KEY----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/host_armonk.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/host_armonk.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/host_armonk.crt 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/host_armonk.crt 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE+TCCAuGgAwIBAgIUWlKUd1HKU8R7cjv5NJdWhHzhT5cwDQYJKoZIhvcNAQEN +BQAwgcYxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu +ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs +IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y +azEPMA0GA1UEBwwGQXJtb25rMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWdu +aW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUyMzlaMIG2 +MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBN +YWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNp +bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxDzAN +BgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZswEAYHKoZI +zj0CAQYFK4EEACMDgYYABAHGO0MnpQa6Q2IxgqV7AGwd3OwBnYOJjYJFhzrwY+wQ +acmJjWeNyHahBCxu4bM8vDr70SF5vZFrWpcWpc9JTY5AagFCFDqIfSvLJ6lKJuCo +g5RfMsWJpG2j/MnK7MxG+Ph0R+ItmLFbWFxCV5YOT43olhwYZr/pd9qHPAD96UED +M8JanKNzMHEwHwYDVR0fBBgwFjAUoBKgEIYOaWJtX2FybW9uay5jcmwwDgYDVR0P +AQH/BAQDAgMIMB8GA1UdIwQYMBaAFMOMHl201gGYrA/OoGUroGeldRTZMB0GA1Ud +DgQWBBS+JuVMVW1HRJfcoOLI4lijvxAMcjANBgkqhkiG9w0BAQ0FAAOCAgEACPbD +hFuibNamlcpnVG7mgnO56RB/K5otOCGS0hbR3OFUGY1ZDQ++M44OF/b+eNYHgjsr +ER4r2cne3qBjOArFj/toEDGM2l/DFWDnpOvh1ZItJjpQe07OGn+KpTLf/ZB0Q5D7 +jV/ddjJ0GGyxessjfUvmCT4BVWn1bXKJFSgujic7lgMf8WBGwW+WW3eZecYsh/Cn +rHWfQYqtvzB1uM89bwZwN+lvHz/QHTreAShDAiCY9M31cemvUvC58z8jPKngFhuf +C7ZMUXbzu2jYWo3EowzvcRpO1KqlLfNHjGLbaBaZWP2ocK2IUTrsAbr7PTdcAqjX +TRIds4JlbidA8OziEXFTcM2xShS6WISsO+9JXk5Xxc3+xscjSamHTMwRm2LaKjrr +d8mOxMs4/5547CBsBFMqu98tiD18K0yqdJiJJRDDOvqVeCvDE6y/pzKaSHIIyA0P +ATnjQzvgqbcwLrdoJ4WNdTpZieoisUR0oMAKUViPwd0xStzNX6K89PSd5c0Bqbhj +TwCpaT+DfcvlWk4wqEa9rLn+Su1SUfMSD2dITynLW0UexiKPHZ5epOPu5ho6cOk/ +fe/N1cX7SyOjhErWLwqr974YM0EPiR6f5lNooYXMPbatq8lcG0gpEg5pggHhsLiY +LGfvu2jQJKGnrBYM3IcRGUsc+VZ6CNTJjaIIbGg= +-----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/host.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/host.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/host.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/host.crt 2024-05-28 08:26:36.000000000 +0200 @@ -1,30 +1,29 @@ -----BEGIN CERTIFICATE----- -MIIFFDCCAvygAwIBAgIUO+SNGBgRvkmR/lKiZhiN4l4zS70wDQYJKoZIhvcNAQEN +MIIE+DCCAuCgAwIBAgIUBSLhuGTvxPbggG70ISL2R6DDGZcwDQYJKoZIhvcNAQEN BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl -eSBTaWduaW5nIFNlcnZpY2UwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0 -NDhaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp +eSBTaWduaW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUy +MzlaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw -EAYHKoZIzj0CAQYFK4EEACMDgYYABAF2iBzYS4tt5xI8fpAO33jn97aEmZFGsSY7 -qLhshEfwirbUacKxKO2eHDUBWAWs09MCM9ORIvfi+KocKxR7eIO0BgDDRVbCXPkv -Kc5mvUQRTWt7PHjhOj+QvbkAQPaHblJi93imGpN9GzSALrJX404Gct1fKjR63aoo -IDUJKnDDXC55c6OBhzCBhDAyBgNVHR8EKzApMCegJaAjhiFodHRwOi8vMTI3LjAu -MC4xOjEyMzQvY3JsL2libS5jcmwwDgYDVR0PAQH/BAQDAgMIMB8GA1UdIwQYMBaA -FN6M1/Do0NgBB3F9b9loWSA+sGd8MB0GA1UdDgQWBBR6ESQ+J+iXQe0a/KZ2+N+Z -8a9SZjANBgkqhkiG9w0BAQ0FAAOCAgEAVWJHoCxxkqh7b+y93PY4KPy4jJC4gN1S -dAPqttD/yteJ/4mbVel1/KNSoQBk5EpJmRqeBwHCgGJaT/TxYXNw/b8mRPxe/xbb -wieZMqlSmH028UjYDku1eM0IgHISgoCesIR95D5iAOWbMMVUwIHIHTfmhK7DmZVe -SPf7RIkctrpYxZh0Gw8KLZO6Mfy/9tq3dps0A7KS6jjdrF+M9LavPGwFvtfvRMTi -rdteByO2saGAKDvrjtievwlWCNBJlKV1arW9krN7eqJY5YO6eRbX6UjuhbPRgjte -eZ4jL121TBJaKZU7Q/lvYHIWfzstwQdiem2Ua1GyiiEvPZrQlmqQ3gDBtwJQGB4Q -2myP7MY7THiKObjaB8qRsVxKM78ktwAtAYSZv7gZlmSJ/uTMzDV5D2TQxqs7zwCj -sV+psUn4nvh58xP+DW+MYbF/Cpmzvul9FjMKBs270vE1q+gMot27rbQHRRJ4lVN5 -khiG6Oi6blVkPKExIVIiaZ9diXK6NhtWp15PWljNiDxZO+zpkeuw7cKLn/idzmvP -Gcj6m7DqcdsSIHNKbR5iM2VuDhg/j8uBD3uF2Wlymp31TBQgdYWSihJpwZKHNqJD -uq9SmegwI5gYg64KwABZM9hGbl/krXt/0CeCR5HRc+fthanKx/tO2tbCuVT3FR5J -XpKNUy1D78o= +EAYHKoZIzj0CAQYFK4EEACMDgYYABAHGO0MnpQa6Q2IxgqV7AGwd3OwBnYOJjYJF +hzrwY+wQacmJjWeNyHahBCxu4bM8vDr70SF5vZFrWpcWpc9JTY5AagFCFDqIfSvL +J6lKJuCog5RfMsWJpG2j/MnK7MxG+Ph0R+ItmLFbWFxCV5YOT43olhwYZr/pd9qH +PAD96UEDM8JanKNsMGowGAYDVR0fBBEwDzANoAugCYYHaWJtLmNybDAOBgNVHQ8B +Af8EBAMCAwgwHwYDVR0jBBgwFoAUw4weXbTWAZisD86gZSugZ6V1FNkwHQYDVR0O +BBYEFL4m5UxVbUdEl9yg4sjiWKO/EAxyMA0GCSqGSIb3DQEBDQUAA4ICAQBwaPG5 +Mg3iKtxR6ncteH+YmtMHW4/wB/341pTKFsKBYsMNWuCC5AKbNrshCNbFbctqhLrB +LmEpmza3/Pk6izO4AozHNl0tRec/HnQr2gonfI48HBDRiV2f40x0gJG9gGCiJy7o +6iKZDYUnjfnhXVC67RwLMEiIKbeOAWQ9hHqegUjYdaaIlhyiHLMuWMceidvG76nN +2eyJUNEouT4+UvquD2oqSitB3ZLhWRqPOQn57ME1b0QYF240PN8r21YtzPmSI+s/ +ej04EcQZrlJId6GtU7YwD1767hVw84v/QjPbMqnYQbxX8n3IvOf541rQ0UdjBFc9 +UhbnSn32IGFrRlL1y3MPBF6hLPcpW4P0QrUijc6gZ+x6SNFho8n+dk53F7RvMi1l +SLgJl7x8pUeqBn5QKMcYYsZG39oZmQj4xHjAABx2hRWayDscvROiQpvLHRtLVmk7 ++hq4Q/jalc2cNHZSwLX6Tv5P+8waTnXg8YNEHeAAgcw1lD+uw5HgusjGD4USE7Hq +Q6EDGzC+Ny3u2+35XWbNaKWVthtKAIcZ9B4LjdJXeQFGcOMr6yV5rKfOFQwXo8bS +rNy57tiva8KM2weSfRil4f146Rsb3TJzUdlkaN+NVIY0YImiC+rR0qa6Iv6JCio1 +F2lu8m/aRHQQF5J5fD7ge6v7F2D6K3qT9tTlrw== -----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/host_crt_expired.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/host_crt_expired.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/host_crt_expired.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/host_crt_expired.crt 2024-05-28 08:26:36.000000000 +0200 @@ -1,30 +1,29 @@ -----BEGIN CERTIFICATE----- -MIIFEjCCAvqgAwIBAgIUMXh4o6xcPRTKpYDr+YgZnmWeatMwDQYJKoZIhvcNAQEN +MIIE9jCCAt6gAwIBAgIUSpwJMAovpO3Z9SuY7Zw+/pv3bLEwDQYJKoZIhvcNAQEN BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl -eSBTaWduaW5nIFNlcnZpY2UwHhcNMjEwMzI5MTEwNDQ4WhcNMjIwMzI5MTEwNDQ4 +eSBTaWduaW5nIFNlcnZpY2UwHhcNMjIwMzIyMTU1MjM5WhcNMjMwMzIyMTU1MjM5 WjCBtjELMAkGA1UEBhMCVVMxNDAyBgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5l c3MgTWFjaGluZXMgQ29ycG9yYXRpb24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwg QnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3Jr MQ8wDQYDVQQHDAZBcm1vbmsxFzAVBgNVBAsMDklCTSBaIEhvc3QgS2V5MIGbMBAG -ByqGSM49AgEGBSuBBAAjA4GGAAQBdogc2EuLbecSPH6QDt945/e2hJmRRrEmO6i4 -bIRH8Iq21GnCsSjtnhw1AVgFrNPTAjPTkSL34viqHCsUe3iDtAYAw0VWwlz5LynO -Zr1EEU1rezx44To/kL25AED2h25SYvd4phqTfRs0gC6yV+NOBnLdXyo0et2qKCA1 -CSpww1wueXOjgYcwgYQwMgYDVR0fBCswKTAnoCWgI4YhaHR0cDovLzEyNy4wLjAu -MToxMjM0L2NybC9pYm0uY3JsMA4GA1UdDwEB/wQEAwIDCDAfBgNVHSMEGDAWgBTe -jNfw6NDYAQdxfW/ZaFkgPrBnfDAdBgNVHQ4EFgQUehEkPifol0HtGvymdvjfmfGv -UmYwDQYJKoZIhvcNAQENBQADggIBAEsSd5vd7Vk1y1YsE4eWkrBrMElYa3/O6G2Q -oMZFo2mzzDH50NBEwYG4K+SjEmqJbAErtNHsAcJLWlvORiNoBmPcB6FEMifgCvuZ -zbSEiL/tt8XLI1M04DdKjVZ6AIrdhMKPz/AaRycnlHjbq0R0fEJP/SnWxtGnHewB -QGM8TDGCzXrwXsOr50soxQ+cbXFJ6eQyGrtNP0eyJ7kkIrz6+SJ0dQPXxoZpdtfY -XEv1OagX0tAuDUG26do6MjwC1qiDKoLdkxSFRkCvyRHqFapKlLhzBMrLhQ+Hl6E/ -kD5ORD2nMvTHcHWbjb7Mr6tcxKG+7CcJO0hYJbdfNCcKYc3EmE49wazSTBKvWfJp -XObVEGeM/11cdcg6Li1jw/JrrexEeQpjgoNuAgGKRmxzJBOCPNkU8jGs5QEqFCyw -fpl7BA+ydW2/zAvcr7mZZgyK4KiRTdK5VTGfXuwTv+Q3hsE0CZ6L+byNCZajyGzs -xq9ydh2G4kIlWFzs+2gSxQWYRiOGt6W7FVdiPYOnAVgzmRJdfR1qVrWZTGbPxJbX -1O3qYBPQE2tU8xsyl/HuikGProda2xwfTjmRhr7DPYyF75nGPvtGm6vwBBcWm+xl -jI0a/dHqE5MR6acOrXYNSFflPcfd04vJ87Ajx/wFr0Glo/8LWtzMp0nFpvif9LDX -3sGEtRA1 +ByqGSM49AgEGBSuBBAAjA4GGAAQBxjtDJ6UGukNiMYKlewBsHdzsAZ2DiY2CRYc6 +8GPsEGnJiY1njch2oQQsbuGzPLw6+9Eheb2Ra1qXFqXPSU2OQGoBQhQ6iH0ryyep +SibgqIOUXzLFiaRto/zJyuzMRvj4dEfiLZixW1hcQleWDk+N6JYcGGa/6XfahzwA +/elBAzPCWpyjbDBqMBgGA1UdHwQRMA8wDaALoAmGB2libS5jcmwwDgYDVR0PAQH/ +BAQDAgMIMB8GA1UdIwQYMBaAFMOMHl201gGYrA/OoGUroGeldRTZMB0GA1UdDgQW +BBS+JuVMVW1HRJfcoOLI4lijvxAMcjANBgkqhkiG9w0BAQ0FAAOCAgEAd9NdTLDK +fWPdOg78c+CaG2XVnIzBy6niur4vM1DPJYbwabTYbpJM2jtevMo3h96jCd6AfHzv +cFZ1BzpYsWtjbDFhiqowmKnlFlOIoIDXCaG7vBvDWc4iWGG9PHMWEouSQWxg77bZ +A8l7+K0VdRWYCqwxxClK8oK3PqGyGqELRDWwlBb0kRi9XRmB+2BjOdZOQvmGka/R +l8hZWHTs+SPHZ+ySiJn55/w7gN/LCJs1XX70jPrzu01lDPTqVIrGKBtWBMKFKR7g +A4r3EThfj85Xq1T45rvD0ozlkyVmdMtSk3z+fKCt5+qEHAu52kq2Ps7xQviAYkIK +f+yNf1a/Ly79QoqEibRXuKMk/oWoqv7f5vT5HqoWEWZFIYbdbYTBIwkw3mHwZRZx +CcyiukHw2W/ZwM6p60gXWZwrQxwVkdhxWnAHrAToL6O8M9Fk4aVWUkFNjqVUjTWS +HLCxv8mCPujXIneqXyiJOpAPKu2kjmoFwfisjevNUmO5EqUHUba98Ne7rdiWweP3 +nwcBBKIQ2MVZ/lPqfPtqFOHqBzCbqozVw1HzE2/ImnYsK8SGqwBnSJyTt+WjM/8S +4ulkjVDIwWFwUkOWjM2yorp3u2tFJF2sMv+3AR3mp6aDjF7sCzYpLaZag6oDmwOF +nR+VwEbTC8gnl21swWSjtUC8I0EidfxzCpc= -----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/host_invalid_signing_key.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/host_invalid_signing_key.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/host_invalid_signing_key.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/host_invalid_signing_key.crt 2024-05-28 08:26:36.000000000 +0200 @@ -1,30 +1,29 @@ -----BEGIN CERTIFICATE----- -MIIFHzCCAwegAwIBAgIUBphLhhHJL2K96bK/cfgBk0d+7gkwDQYJKoZIhvcNAQEN +MIIFAzCCAuugAwIBAgIUELm+ubD3lNPrFxtCj86wMh7lYEcwDQYJKoZIhvcNAQEN BQAwgckxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMSQwIgYDVQQLDBtLZXkgU2lnbmluZyBT -ZXJ2aWNlIEludmFsaWQwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0NDha +ZXJ2aWNlIEludmFsaWQwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUyMzla MIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVz cyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBC dXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsx DzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZswEAYH -KoZIzj0CAQYFK4EEACMDgYYABAF709fZwb3pGZWfHLcCG2fLNcvNh6IqPHwfjRYp -brM5BEE8XoXxcCAXwxo4EAGOcuZBRKP6Ofahek7ppizV8bkPMQDCIgNq2N8mwPFO -99CbyrZs4ZMeAWWWPJiHXUHbWQq4ko8w4UUT7nUIgdHQwWM6TdZml8ke+LE9Jxtx -h9KkfCPi/aOBlTCBkjBABgNVHR8EOTA3MDWgM6Axhi9odHRwOi8vMTI3LjAuMC4x -OjEyMzQvY3JsL2libV93cm9uZ19zdWJqZWN0LmNybDAOBgNVHQ8BAf8EBAMCAwgw -HwYDVR0jBBgwFoAU+jIyiVonTYe6GuSPiAkxA65qou0wHQYDVR0OBBYEFJdEECIf -7UdEf08saThHZDzSfxjhMA0GCSqGSIb3DQEBDQUAA4ICAQBgvREPqqKvAZM3q9pG -5S6wtUspz1Y1sBD4duPEnMZ7Vf9a1HRPrR4vc5ncFIcyS/U/UusvWgFMYoa6WIZR -l4OqRplKF1pwCaQ2F/8OdGMV37iUqZuN6V/GggbFXgMFK1dH29T6h4VtoKC9yScQ -ToHQLuz4ymkd2BwxYix19M6QwdrqomjJb2/zrc7pvMZ0k8KKYi/wt6tlz7FDvsxF -VSDf29gm98kfDJfzPfAC5D93YruAohsP8SakVdA2/YbTkDfImT8ggSnsE83upSD6 -ssjKPPNRunLeCKLb55/Ikcok1iyGhfdmkJvdIHSEvyNp0p7mrohz6l748xdKkKNt -9hOzsfNjThq3zp97ND7M+knqNuzsZIkcV/OUdxNBootIrJXvfeqpaw++5SfWvf+6 -1dHJQpDU3cXKAQ0/RvvqLC+aPvklk2efuvBKIKP9X4WqcP+l2P19GMaM1SZtr5S5 -OWMxqT6sW5lSX5Smm4rMB3UmLDS0SXxHIIvFEQSABiWb6Y0ibDj8a+YrWVyQNH1K -fCSNvsW1r03D06q6gp9fxL1hFBLUw9ooi7ewPfNmm3doe2R0TQpE0pWkiRhhjDjJ -RcIPp+gfYDtt0LcoYVpNdKtLoRPVtO2K3zeU0ezW4PL9Q0tjxh2K/+1+8UEv7nJD -sFg4nQMeygdSK4w0ZlT8xqXVJw== +KoZIzj0CAQYFK4EEACMDgYYABAB/2mDXj/QP7fcpxLTNfFfuh/X1Y/RS8XK+Y4v+ +0nisTQhe3MK8BZ7D0bcB872s8EJCYZ1OYr5O2epQgv2hbk4T1wCGKe+Pjf2QHTts +wXMpPf87R3etF+zWR+CYH+5N5uQhjW9Ueeq7wve3A61e+Atz7IzTKVlM29kT0XvI +sYtazn6usqN6MHgwJgYDVR0fBB8wHTAboBmgF4YVaWJtX3dyb25nX3N1YmplY3Qu +Y3JsMA4GA1UdDwEB/wQEAwIDCDAfBgNVHSMEGDAWgBTXVfLQH2D0KccIHhK6M5MR +TITvtDAdBgNVHQ4EFgQUWdGEEpvjFJ4SNUillC09iBfcTEUwDQYJKoZIhvcNAQEN +BQADggIBACfZ7+KtT0Hl4Y/JBwCtUqCRRt4kCM5rXqORswkJ6RNmx1OtBHPlkUEI +E5HrE0dZlaECfqMkOBxhbBqOSpoL/xjz1gDHILWXEbWchuFnU4EAgYvJS7nLAHHO +swJCEP8VDM3y5lUWni8sXs6ro/H5tAkm260+xbn0BFF99l+M124ZUawB+tDg7E9b +MNG0eWtfeiZzWBBhoxhQoD03gFADWxTU1EK4JgCl7frsj4PjK7TMKTV/Gd/Bd5l0 +KreFW5orupd3YBqDeUYI6p1l9+uThottwrA4z/POygxvhqgoqNA/+evMWX3wxNza +ComAmWffB5EJQFEKcpdASwEhyjBMap15GdJVBFGYzXAlksQz7k1fr4h0ebuYtcpb +dvcDyeIxjsg5XiP32494wogLUjHioi5mwIrGn8/9V6JLRGfKBHnmejHBR/cbMpuv +rpl0ZZks3bicmBVTKFyj1ORRGX1cXsbtCbPBJEmBeR5IOFYBeaQHrqTcaEBz4lWe +K2OT6ebcqtsCzDSmjsoeAX/CWRVaIzLzMc9SqE8lZ68ajXjbt/59I7EBeOenttO5 +mg9mokwDqoeM0bXyT3qdT4s9fhy0BPgnBqJHKIc9afzV79Iz1uJ/E+TZJ+tzhbAU +n4fFxIHOdbyaOLCLT0+O74FZCQq8FUUnL9QVY5dXTH2458IUiD20 -----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/host_invalid_signing_key.key s390-tools-2.33.1/rust/pv/tests/assets/cert/host_invalid_signing_key.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/host_invalid_signing_key.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/host_invalid_signing_key.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,8 @@ +-----BEGIN PRIVATE KEY----- +MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIB48Wl8pvlD/o6F7YG +sFfCXNQRbKbppIrywEBkfNG0VzfXazBOLD41bcdPEBh1111kSYgn46uUVxjabyLp +BgQthf2hgYkDgYYABAB/2mDXj/QP7fcpxLTNfFfuh/X1Y/RS8XK+Y4v+0nisTQhe +3MK8BZ7D0bcB872s8EJCYZ1OYr5O2epQgv2hbk4T1wCGKe+Pjf2QHTtswXMpPf87 +R3etF+zWR+CYH+5N5uQhjW9Ueeq7wve3A61e+Atz7IzTKVlM29kT0XvIsYtazn6u +sg== +-----END PRIVATE KEY----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/host.key s390-tools-2.33.1/rust/pv/tests/assets/cert/host.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/host.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/host.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,8 @@ +-----BEGIN PRIVATE KEY----- +MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIA8rPNC6rxZR+GxSxb +qWRreFGnWRQGd22nHWKAvQmrA5GlXTtWQMoL8il9Jb1OnrQPPo620nQpzA1GXo4U +BUqgYoShgYkDgYYABAHGO0MnpQa6Q2IxgqV7AGwd3OwBnYOJjYJFhzrwY+wQacmJ +jWeNyHahBCxu4bM8vDr70SF5vZFrWpcWpc9JTY5AagFCFDqIfSvLJ6lKJuCog5Rf +MsWJpG2j/MnK7MxG+Ph0R+ItmLFbWFxCV5YOT43olhwYZr/pd9qHPAD96UEDM8Ja +nA== +-----END PRIVATE KEY----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/host_rev.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/host_rev.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/host_rev.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/host_rev.crt 2024-05-28 08:26:36.000000000 +0200 @@ -1,30 +1,29 @@ -----BEGIN CERTIFICATE----- -MIIFFDCCAvygAwIBAgIUGuqbEx5X1pFQfF4T6kIfdcrUwbMwDQYJKoZIhvcNAQEN +MIIE+DCCAuCgAwIBAgIUMi8utJhRyr5bbvABnCn/Q9/vJ0kwDQYJKoZIhvcNAQEN BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl -eSBTaWduaW5nIFNlcnZpY2UwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0 -NDhaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp +eSBTaWduaW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUy +MzlaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw -EAYHKoZIzj0CAQYFK4EEACMDgYYABABphgYAOqZ4uJPUtVIqaU7UsJgz9+xMmGDq -V7nFimGmkbmqPLT96jIyN4CWdLzfbxP0xvIklkEoOm1xV07YR2LHHQATNEPISTDH -7fySZ47QVc5tyECpZVW7JvSvWKA/KiApAbB1ixErfDrqW1nG5IUtEbDYJgtGwPI/ -+I7e9cU2wkA5mKOBhzCBhDAyBgNVHR8EKzApMCegJaAjhiFodHRwOi8vMTI3LjAu -MC4xOjEyMzQvY3JsL2libS5jcmwwDgYDVR0PAQH/BAQDAgMIMB8GA1UdIwQYMBaA -FN6M1/Do0NgBB3F9b9loWSA+sGd8MB0GA1UdDgQWBBTECeFXnyfTnFazbR0K9cYk -lS3O0jANBgkqhkiG9w0BAQ0FAAOCAgEAOjEE4/KdvcJZbloSGLue27FSrhExvUJ3 -tYS3rs2xg3Ua2daCioI00VrwIN2Fjisqvi10Nv6+NWz5w1220AJyjlmPxvWFcPco -sXLAOWDhi217JaoJ+RzavpOwzhTffEpvPwR6RU4A36vvonc4jm3mWs6F1i5T6YPi -ZaYuk3CRme6WX012rBhIc+heTGh5ZDwwPmGDMLXdpsu+2+3sCPxUW6eQcOWoXkhJ -jn+n6mU18JdN5+wU6Lig5uxXnoP1VN8Xog/mmKV4ThVAS8k9iS5wFK4jl27n6XZy -wfd65WWlm4MMEvKNruj025aSPJp/bcnchBNfnXXPuI5GnYS2cC3TXHd4XT3r3pYn -qgLoNi+AkxnTnxEv5lg+oE+yTMNxDh4iiYkX96ljamGbUPBvbD7bi3Oc/7EOVra1 -BmCGmcjToEnm0e0it9yyuYwKQ6nTz906W3XzaFB+awnXZcGEMgwwdvdal8+eki/r -ofT0nO6cuXbbPYc0rSs/2f3WxjDVmQtiVoJ1dPYIrTX/4lbrsYWVS016Y0UcZ/D/ -/qWWWZIyYpzPasTEgGqtwb5WhBvz3RAFTePefFlTzBHhSPk0Tsu4+W7zHpapfZ0M -0LXT1lGR5WBmur002vbTm4yt7tzdypMbL2i70WGEp4mpRohmBG1m9hYGcelip3iL -Im6vFBNWpVE= +EAYHKoZIzj0CAQYFK4EEACMDgYYABAHkrwf4hZ7M5ahoYHT0u1Xgl28qxURcdNmW +kYDA5u1Y2mGXQq85BakAOyfxl/FF/cBrLm0eLVFpePumqgPna3El1gAs9p2SFwN7 +4MmcLhbwPmGnWmzVrOh9cNS988XYf01E74966r4MKpAGxSNPKDwu4doXFVAYRH/w +tcgbjlk+Qi3c96NsMGowGAYDVR0fBBEwDzANoAugCYYHaWJtLmNybDAOBgNVHQ8B +Af8EBAMCAwgwHwYDVR0jBBgwFoAUw4weXbTWAZisD86gZSugZ6V1FNkwHQYDVR0O +BBYEFG+41urs6GhYzOrJD/MJDWMSrUlvMA0GCSqGSIb3DQEBDQUAA4ICAQAnowYb +R1qgnpH5FLgkj3LndAAfFmJMlTrNcmtafjaLZHUmxxO1tFjb7vvGgu9LggwwSRoN +y0Qsu0mywXd/ntEuWDyD735aGtUE9xnmluoj4ILBnln195VuvUoWYjCy0XsGZg6G +UL8bOaZgbvXQWDcdXfpYnM9C5Uj4mB3LFDdasd7nDv2F9vZlHNkd0uGmZvBco3SX +GNFETOaY5HjU3jLlCmD74p7G9e//+153R2aXEww3SVVJyRowO7SpwH88DNjl2a6A +OLWoA3oRrX4rDQI66LYv0RZMDCTvzY+HxOQHXeswosA8D96GhpClb1X0wLZUp6gR +pLzVzaE8RFnSg52jzjI3ehFIoSK0x9R1KPzKbYDh7Q8Sc5sOSe6r9Q01BdpBOUjp +iTKhgbUEbcIwcaKLIGeSBe0+Yh1uQpqBtOPectUp9bz+hEUDzG6QMS5VFxAGuZWY +2iA97jxHsUBEgMBZha7z/hQ68Pst+KaIV/24By8lKm0rWpLSKOXVAW1lWZrlpIcx +Le8+dmy9/JFJXmmN/pmAEVVsK5dYtVXt6ikVi+LPr/7fCfvt2zk88wiUp8iz5jgQ +pNYT8MPOMf+yzzoqPXjGmrT5znuHnCg0OSh0u8NILWQPQl8M6KNiI8lzcVBy6Djq +fmnQlaxP1Ey+wrznWmBu5zesU8uMyRTxIfnEzg== -----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/host_rev.key s390-tools-2.33.1/rust/pv/tests/assets/cert/host_rev.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/host_rev.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/host_rev.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,8 @@ +-----BEGIN PRIVATE KEY----- +MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIBLz0ExeBJrxd5gA7C +YFJLVuNd4CPNc/jsOLIMnIbLHSegdOtQn9tief97k5rZdz/MQzWp7rCmsLUYw/fM +ktUQjJKhgYkDgYYABAHkrwf4hZ7M5ahoYHT0u1Xgl28qxURcdNmWkYDA5u1Y2mGX +Qq85BakAOyfxl/FF/cBrLm0eLVFpePumqgPna3El1gAs9p2SFwN74MmcLhbwPmGn +WmzVrOh9cNS988XYf01E74966r4MKpAGxSNPKDwu4doXFVAYRH/wtcgbjlk+Qi3c +9w== +-----END PRIVATE KEY----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/host_uri_na.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/host_uri_na.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/host_uri_na.crt 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/host_uri_na.crt 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE/TCCAuWgAwIBAgIUPRltRSFZSbucmw20u9TN7WHxG8UwDQYJKoZIhvcNAQEN +BQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu +ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs +IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y +azEVMBMGA1UEBwwMUG91Z2hrZWVwc2llMScwJQYDVQQLDB5JQk0gWiBIb3N0IEtl +eSBTaWduaW5nIFNlcnZpY2UwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUy +MzlaMIG2MQswCQYDVQQGEwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNp +bmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25h +bCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlv +cmsxDzANBgNVBAcMBkFybW9uazEXMBUGA1UECwwOSUJNIFogSG9zdCBLZXkwgZsw +EAYHKoZIzj0CAQYFK4EEACMDgYYABAHGO0MnpQa6Q2IxgqV7AGwd3OwBnYOJjYJF +hzrwY+wQacmJjWeNyHahBCxu4bM8vDr70SF5vZFrWpcWpc9JTY5AagFCFDqIfSvL +J6lKJuCog5RfMsWJpG2j/MnK7MxG+Ph0R+ItmLFbWFxCV5YOT43olhwYZr/pd9qH +PAD96UEDM8JanKNxMG8wHQYDVR0fBBYwFDASoBCgDoYMbm90YXZhaWxhYmxlMA4G +A1UdDwEB/wQEAwIDCDAfBgNVHSMEGDAWgBTDjB5dtNYBmKwPzqBlK6BnpXUU2TAd +BgNVHQ4EFgQUviblTFVtR0SX3KDiyOJYo78QDHIwDQYJKoZIhvcNAQENBQADggIB +AGlM/E6RfiQV8si38/IieHw2TsZDNKluXA/edGN4d9i5ztIB+G57IgvAR9GBeRTB +AxANycBGOWy+VXsUij3UgpoK3NiZJ+XvgFYqx0ZVUX3uPmU6R0Ko5cHGmSEVV6KS +5ToGLqSZdftybGt9NYKg+LGxzIhT15tgUENkOrFVRSlKVn1zAP+sa2vOEtH5aX6S +ins3hHQBuhGKSR/z2I5IzEVhnx0FJxpFO32QBdmXUdgjlHcpiIy1BaQRTca/U4iJ +Q+fnzn5UP5v29ftr/PRh0W9rPVeS8ge+vuPC1eriRL4WWnkMFm0QljERqhg48NWk +zT8mnVn0E3vQ2Y71FKg85ov0Sza8/OpBgAgnoyCDUIp8YD7BrU1viLEoqLtlvsoH +l/+mU/CnCe5749C6OBppKhdrIRSG8bh6mIeco1ALDNIaqGoUtdhib0f+YKeCc0zC +MFjvoSranndvwO+z/qdL7Feay8uAjgq1GPNuj/xaEbzrZz0yF2viMXfK62kgwq+W +ebg2D1FHVuKXQ2q3IXmwcyLOEUScUcEYDsM5eDenoScgIE2FfHvoGOdJVLsQ9U+8 +2HTb7ntvbc6ANCTcnt51s5/PfD7UaHeJiSxRpU6rjxnqKZ2mEjbF/Ha8AjImQrrd +jpjNgH6H5p7ZQqc/4DGa/Q2B/rfvmn5bAgOhicEKMzm1 +-----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_armonk.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_armonk.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_armonk.crl 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_armonk.crl 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,21 @@ +-----BEGIN X509 CRL----- +MIIDdjCCAV4CAQEwDQYJKoZIhvcNAQENBQAwgcYxCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMScwJQYD +VQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTI0MDMxMTE1NTIz +OVoYDzIzODgxMjIzMTU1MjM5WjA8MBMCAgIrFw0yNDAzMjAxNTUyMzlaMCUCFDIv +LrSYUcq+W27wAZwp/0Pf7ydJFw0yNDAzMjAxNTUyMzlaoCMwITAfBgNVHSMEGDAW +gBTDjB5dtNYBmKwPzqBlK6BnpXUU2TANBgkqhkiG9w0BAQ0FAAOCAgEAgvfpzZbh +/utz4zq6/KQdFW25wK9tqPkwFut9vdsOAGMkRRcX699Rk3Km1TpgBVGRKAlsQKfS +IMq1FtNwbKMv+ycZHXDewk/i7dd2A0eUNzbDxD/WJ7mBdUHJtzkvIKwBmU7SkwdM +yBRA20H8xKioveDrms+MM3a4oLNRzkZI/FWkSqjCDh9lI6igZNt3J29vkcdIlu+P +Maa7Cpwh1ELrQbwE3NVgbr172D9qzQ+/NiRlM/Hla/lqGELcUgwVgkNMBFEMo7iK +XsWy9WQ+/0RExo7n09jk9D4tCyAizGae72Xai8IJQD4tSVNpv6ZtNHZBBKQhkjGy +PXGM8tHC91Tr8E9CSVVOrtxVit5htxjnTkJLg/9XnoAkxGNbUFbvCm+zIOzkmlJ4 +Drvvt9W4tVfkcfILHKmy4PG/0JU/tAygpr2Hk39e/Lcym64O9gz2g0pU4On/qD6e +91Q4822RIYLDK/IO3H30CbO25nBChZ4z6KBMWXHG8mTJ5m1IbwZTPxBlL8t47P9v +2ce2XDkcPzYpk/wqJ4xxQYhuhV9RaJreEnnqCZ9ER7L+heoJcCZcnr56wT4mNd/a +Kfdm0Oxm8CFCMj1djVJQ7+ghsOY3RAehQ75m2Od5BRM+Vv3gYtkzPWTkWjDUn3us +2KwZvR+lNaIVXPlQqBlIWJvehWZexLzTkA0= +-----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_armonk.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_armonk.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_armonk.crt 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_armonk.crt 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,38 @@ +-----BEGIN CERTIFICATE----- +MIIGqzCCBJOgAwIBAgIUWYLchAxfdv8uD28Im9DK+Lo8pokwDQYJKoZIhvcNAQEL +BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu +ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs +IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y +azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg +Q0EwIBcNMjQwMzIxMTQ1MjM4WhgPMjM4ODEyMjMxNDUyMzhaMIHGMQswCQYDVQQG +EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD +b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo +aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxDzANBgNVBAcMBkFy +bW9uazEnMCUGA1UECwweSUJNIFogSG9zdCBLZXkgU2lnbmluZyBTZXJ2aWNlMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxNqcFB9GSj+nC1PVOTIAvzTu +o3X7bTbohEPrIDDsEcGyVzQtfagI9vkxhhGLhXxIXrkwEI4u75DeLBdqZR3KFJ3Q +D+jHW12EWNpmSYu272CEAJeb/SBJaFbh40WbY1BsTP+3zI5QMwVTmCH5QqODD+GU +0fDSo9Gti0yX4rxOY+54jDC77AhSlU55rYruBoIXVe4CcPUpqzZLgqB2cTJ5g7lo +kGqc6w9pveCznVYGPfcdDt+ePGssOvrfBOJnn6N44tTRAG623BrpctXit3IAhG60 +y6CqF29BjV0RKOv09nOBiph2a3lAiKYrpAt92BETFB7KSQManUrrvWODmYEcoVkz +vrVrB3WnCcHs6lsjQg1FcNrGcDzV9SmX0BIhM+fasxZyHWc08pr3NrRnEks7lRqY ++TS8DAeZYJt/2M0Jr32d6Bh+WdyGFFUj6sBPtCaC2VnSkxOgKXeWf/c5EUXIQT2Y +XEkNK9CP8Kqs8IerEIpfceelQReA5QcvNruaJNktWLKJgYTrdLdfOP0us+9JJHWc +FH3tv5906CA+Tlm53Dk6SaRc/DB7lu94yTkcSOpxXZnximZi0GcPZYmZTaswE7d8 +HCPLhks/RkZSV2764Kl08xle65APiWZ4dM24uSyu75Izb4hrwgIA4qyMdDVQnI3c +u6utmKUPyHoJtH26dr0CAwEAAaOBlTCBkjAdBgNVHR8EFjAUMBKgEKAOhgxpbnRl +cl9jYS5jcmwwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAww +CgYIKwYBBQUHAwMwHwYDVR0jBBgwFoAU+f7h8O2ttBHtwI4OZnpD9wU09GYwHQYD +VR0OBBYEFMOMHl201gGYrA/OoGUroGeldRTZMA0GCSqGSIb3DQEBCwUAA4ICAQC5 +dH5QKZHynK7vGHOtol3brWgAWKkUVj+uqok/AUyGF5hkcXI2AVmu5nWV16Z6/c0b +rD37YMPCCnZorQQg5g3c3H9In3NzTYWj1q2YT13yQ5PaD56vkwfPQKlmY2kMb/v8 +Y5Ho2LFjhKKOpzP77CsYMs9ZdXs6VKGZ5dSmOAOvJ4AcGaRPs3jXVz3EZFgc+ytK +705mWrAgTYI3xcemxBTwILWAVCoqqirWrDNd3jicQo4Ks/H07RtLuVNY8kXUJEN9 +UMG7Ggzc/rTlvV/PUsJaQl8lunPDdbUBLsXE1iWaaAxmRTTQaDX8Ygq8NFZgGSrk +E/dnJXcnJyV/5GH22Ho4JVVtADkP1wh3TKcojiDfM2WlzatSOMPdeISsUQ/D+VSm +GuSOxPkS0wj5XUpoJz2bKvXNMH1Mdp9sMfOlkMe47iTmU5gK4PJMoj2NZ7zW9u6p +pYXQz9LhdwoTJZBJXVYSA1q+sIIy8u3vFXEeN88FuoY8yS8t8qjwGEcKP1oojFxV +ibj9cv87Vcd+tgXAqwkjxGQVbDZPBlL6OWCYoEluqWGnbUD1mUp2y8K99ZlzoT44 +i2ipEFyZmVPrmwgKRlz1UYDIsQvtvVw91Oi+DRNP1u+9D++Mnz2itGixHh1Hob25 +nQl1/SS4PfgMlYscbHYOfLAzOnW02FvrJZgQAQUpgg== +-----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm.chained.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm.chained.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm.chained.crt 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm.chained.crt 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,59 @@ +-----BEGIN X509 CRL----- +MIIDfDCCAWQCAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll +MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTI0MDMx +MTE1NTIzOVoYDzIzODgxMjIzMTU1MjM5WjA8MBMCAgIrFw0yNDAzMjAxNTUyMzla +MCUCFDIvLrSYUcq+W27wAZwp/0Pf7ydJFw0yNDAzMjAxNTUyMzlaoCMwITAfBgNV +HSMEGDAWgBTDjB5dtNYBmKwPzqBlK6BnpXUU2TANBgkqhkiG9w0BAQ0FAAOCAgEA +hMmvyXLOtDG7faP93ZqMh061Qms/at5uYz/eAA9CGfS4E+hq2uQaic6gEMWf5NkH +9xRWB1UM9sLR5Ai/Fn+MqsbeXK+1ommOfYx0KL6J8JihHAlQpGYJXu683dxIB3LL +ijZ+od5pes5ZIRKtMNaO4+meDG6e5guXDeFNyJaNYqFXOJcy8OIL4O3T840qY1aY +YcHZs/sNwC/hSAloTeUQ6S+ga/EplzSDumz5VrRsEhVd3wScM1FNeqwMvA4VPUIa +NOtZJNLJBLhfmetTeO/wb/y3Ekp8sXAf8UJqABczJODSaGc2RwULmxJd2Q5lESpX +RkfvObqTWgpzpyV/1OhZYDUlXJqiADYnyAsQL8WV2uFQ5fRG7baVDiNYn4uE92Of +FN7jXmT+7UcHSO5L0VqHS6Ia6j2NammKlbhsAw7dU3uxEgCJvBYlWSY+z9Fs+O1B +hNU42fL5W7JnoGfqG+LqfDdbG0MG+GMo1rDDcvrEK73F41w7mLmZhfYAZY26xU6Q +XyLcrG4YfR6FCyOs53zwrKKG9x3ETqY3S0DSSOKTnuvIGZBCSOxGpqdb7pQax6IJ +PqsM+IzFC1C0Ol0eX1ee734rCHfNP6XEM7ucjBweNUJRrU0Q5CrGGJzEQz0ym/JN +81f/kQJJosSSc1+j8iDiaKkUgcQ1DWWZLYZgvCkwZcY= +-----END X509 CRL----- +-----BEGIN CERTIFICATE----- +MIIGsTCCBJmgAwIBAgIUMwTHYM3peBjwVRi6iICr6FVn0cMwDQYJKoZIhvcNAQEL +BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu +ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs +IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y +azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg +Q0EwIBcNMjQwMzIxMTQ1MjM4WhgPMjM4ODEyMjMxNDUyMzhaMIHMMQswCQYDVQQG +EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD +b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo +aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxFTATBgNVBAcMDFBv +dWdoa2VlcHNpZTEnMCUGA1UECwweSUJNIFogSG9zdCBLZXkgU2lnbmluZyBTZXJ2 +aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxNqcFB9GSj+nC1PV +OTIAvzTuo3X7bTbohEPrIDDsEcGyVzQtfagI9vkxhhGLhXxIXrkwEI4u75DeLBdq +ZR3KFJ3QD+jHW12EWNpmSYu272CEAJeb/SBJaFbh40WbY1BsTP+3zI5QMwVTmCH5 +QqODD+GU0fDSo9Gti0yX4rxOY+54jDC77AhSlU55rYruBoIXVe4CcPUpqzZLgqB2 +cTJ5g7lokGqc6w9pveCznVYGPfcdDt+ePGssOvrfBOJnn6N44tTRAG623BrpctXi +t3IAhG60y6CqF29BjV0RKOv09nOBiph2a3lAiKYrpAt92BETFB7KSQManUrrvWOD +mYEcoVkzvrVrB3WnCcHs6lsjQg1FcNrGcDzV9SmX0BIhM+fasxZyHWc08pr3NrRn +Eks7lRqY+TS8DAeZYJt/2M0Jr32d6Bh+WdyGFFUj6sBPtCaC2VnSkxOgKXeWf/c5 +EUXIQT2YXEkNK9CP8Kqs8IerEIpfceelQReA5QcvNruaJNktWLKJgYTrdLdfOP0u +s+9JJHWcFH3tv5906CA+Tlm53Dk6SaRc/DB7lu94yTkcSOpxXZnximZi0GcPZYmZ +TaswE7d8HCPLhks/RkZSV2764Kl08xle65APiWZ4dM24uSyu75Izb4hrwgIA4qyM +dDVQnI3cu6utmKUPyHoJtH26dr0CAwEAAaOBlTCBkjAdBgNVHR8EFjAUMBKgEKAO +hgxpbnRlcl9jYS5jcmwwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwEwYD +VR0lBAwwCgYIKwYBBQUHAwMwHwYDVR0jBBgwFoAU+f7h8O2ttBHtwI4OZnpD9wU0 +9GYwHQYDVR0OBBYEFMOMHl201gGYrA/OoGUroGeldRTZMA0GCSqGSIb3DQEBCwUA +A4ICAQBfBZZ7ZPkRjRgk+0/7CGam2AAv/GSuma8lOnJ6IpBEUL0D01reRe9NO1Y5 +iXasGZ80e95oC7WtePt1LS2I5rOOb3No0iHcUTm5lZV2M/ObAOwraQLXePCRVgU/ +OQzNhtRoCpqgL80/1Ne+MTHj1yZp2GRKXOdMd39KZoG92JU/h556GQ6reN6WKNs+ +wbFG+JKiRnfvPsul6J5lG4QuObCAZXa0fhcQNUHyWjCGIhMa8AuhkDDlIEOBV1Vx +T/ixdoFhDIoz0xExFidR5uqYXKZlgzcMZ4JT7x0Vs/YsOIg9z5PNkyjt8Kjrxvqs +NZ1eYuNNxdXxdJZ2x04y8AzLkLRYntOCks5nEamQtzv0ice1Jg52qNJnMm0Y0U9o +7BjlWy/pp8dBfIA+FyxsIs62hAE6Z/vuJa1VuWJblrT1MFM48Bh64p1m5uDXAZ9w +Oy7n6YDpzWKWqn6XNV6FoDSkkwaOwB6bPaqAP1ZvP3BmVhednyYRYY21M2aOmxos +7rpaW1FHgYV96Dm60rL7XRtOEYZNPSIPDO/Ro1oWs3EFwjLLAWtlK1HdMswlnVDX +6M+LNCRZA3foNi6xvf7CvPkshtQe3FOfiw/OiPBsYsYREC+90Ml/oeTIsyCaOF6n +5+dpjCBeOUaxzfeQI4WCrLG6+iQZNXWgBkW/PHaPZTldRWzvdQ== +-----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm.crl 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm.crl 2024-05-28 08:26:36.000000000 +0200 @@ -1,20 +1,21 @@ -----BEGIN X509 CRL----- -MIIDVzCCAT8CAQEwDQYJKoZIhvcNAQELBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD +MIIDfDCCAWQCAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll -MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTIzMDMx -OTExMDQ0OFoYDzIzODcxMjMxMTEwNDQ4WjA8MBMCAgIrFw0yMzAzMjgxMTA0NDha -MCUCFBrqmxMeV9aRUHxeE+pCH3XK1MGzFw0yMzAzMjgxMTA0NDhaMA0GCSqGSIb3 -DQEBCwUAA4ICAQCGdm0ls5MXM6MUI0wR7qOitKh3TIfRnCvhSibVPskjlBZaBT01 -F6xaQGyWVR19IzQNn9GxOGMqvRy/oSihznBeA0+e9497IOPXKop/JsypZR101539 -ntVt691ncmctxKnb2nT4dw7AuiLTxMVzdJ/ouXovnPcgSv/r8lwBo1fXxOgQlQLE -Pi126WFkkgBK7EANnAXiXVWvdM6p67jl/AQGOVHp8MeXowejDdVqKzoU6yyMRDeE -uEU4QibvH/J8VPLC/A2oh4XTZbJ5rB6u3rz2fFGI03XqSrJJHbNenGVQ2ar5qJeI -6kHNDIuuwXN+7JPFf8JXdk8L0G88rQsnjrcm0GzQPW/nZ5bN3FA1V139rdOhSBLR -QgaKzju8Le/Zem317ykOJbC6nDBORmpBVzXYdXA9RMg4PIs3kRVqp/RMiiClz42z -w8c1khmcH6FO2Q5Z40vq8tmSLhbu6PgGIPIya/OQacgDjDiDGcWGvqzVCWv/6AoL -em7b5Piu4yznVkEUA2h3LvoigYTJCgHFrQnoIcuM8vx8QkDjXxSHeuy3wTd2l67S -pZp+jSJPdqWe2PWALJrYuq736E2rZ013eLybHKYOkoJP6ZLewh4gsomO0bpxTL1U -TjPsJncaAP/gLqHi0QD4+irMlo6Q9YpEIkbp9ScEoVMHRL9A/vBQfUJfZw== +MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTI0MDMx +MTE1NTIzOVoYDzIzODgxMjIzMTU1MjM5WjA8MBMCAgIrFw0yNDAzMjAxNTUyMzla +MCUCFDIvLrSYUcq+W27wAZwp/0Pf7ydJFw0yNDAzMjAxNTUyMzlaoCMwITAfBgNV +HSMEGDAWgBTDjB5dtNYBmKwPzqBlK6BnpXUU2TANBgkqhkiG9w0BAQ0FAAOCAgEA +hMmvyXLOtDG7faP93ZqMh061Qms/at5uYz/eAA9CGfS4E+hq2uQaic6gEMWf5NkH +9xRWB1UM9sLR5Ai/Fn+MqsbeXK+1ommOfYx0KL6J8JihHAlQpGYJXu683dxIB3LL +ijZ+od5pes5ZIRKtMNaO4+meDG6e5guXDeFNyJaNYqFXOJcy8OIL4O3T840qY1aY +YcHZs/sNwC/hSAloTeUQ6S+ga/EplzSDumz5VrRsEhVd3wScM1FNeqwMvA4VPUIa +NOtZJNLJBLhfmetTeO/wb/y3Ekp8sXAf8UJqABczJODSaGc2RwULmxJd2Q5lESpX +RkfvObqTWgpzpyV/1OhZYDUlXJqiADYnyAsQL8WV2uFQ5fRG7baVDiNYn4uE92Of +FN7jXmT+7UcHSO5L0VqHS6Ia6j2NammKlbhsAw7dU3uxEgCJvBYlWSY+z9Fs+O1B +hNU42fL5W7JnoGfqG+LqfDdbG0MG+GMo1rDDcvrEK73F41w7mLmZhfYAZY26xU6Q +XyLcrG4YfR6FCyOs53zwrKKG9x3ETqY3S0DSSOKTnuvIGZBCSOxGpqdb7pQax6IJ +PqsM+IzFC1C0Ol0eX1ee734rCHfNP6XEM7ucjBweNUJRrU0Q5CrGGJzEQz0ym/JN +81f/kQJJosSSc1+j8iDiaKkUgcQ1DWWZLYZgvCkwZcY= -----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm.crt 2024-05-28 08:26:36.000000000 +0200 @@ -1,39 +1,38 @@ -----BEGIN CERTIFICATE----- -MIIGyzCCBLOgAwIBAgIUeGuWhNwpt9CPzFJ5UJAKfkIlLDcwDQYJKoZIhvcNAQEL +MIIGsTCCBJmgAwIBAgIUMwTHYM3peBjwVRi6iICr6FVn0cMwDQYJKoZIhvcNAQEL BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg -Q0EwIBcNMjMwMzI5MDkwNDQ3WhgPMjM4NzEyMzEwOTA0NDdaMIHMMQswCQYDVQQG +Q0EwIBcNMjQwMzIxMTQ1MjM4WhgPMjM4ODEyMjMxNDUyMzhaMIHMMQswCQYDVQQG EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxFTATBgNVBAcMDFBv dWdoa2VlcHNpZTEnMCUGA1UECwweSUJNIFogSG9zdCBLZXkgU2lnbmluZyBTZXJ2 -aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsXm4EgSxVmuNsKgo -sfJNxNQFkdbpCxxyMOuyBFyGKxSwiGl20VWB6itsG+b9NOmAI6D6FPvLPuB/gSq0 -HR5FuSIkmp9AUx6Xi6lVA2L7qdINo1OA4nJKe5hLSHfjWn36DZFrDyoODlDzLJrS -O4h4QSP1nDSFhk1ilsR1czlvKreXkJw5FsUHRhZGG6idLoK2Ibrne6WsVhPG6WNA -abuvQFv40cQWcyRoePktG8ImnNF5GDewXwtpzARSQj9jTL/gE77DH78a77J8+H+h -d5guiBbPZK8gNkPe0CjD0B7tUx/+sByoaSeQffFG4Cnu5JJDqFq9LXPjtlG2nXSB -Qh6Sc2gzP3jKV+ZcSwQw9AxEQ0ZB/VIPsR8KZKReDDztEpcKxwxlh7bEix/0Y1sv -zT7/I+m5kufdDG6j3hHHzniKXL4b/WyedSfdnVqIJw82FgPFgyIY6F/0ccLdkhAm -fLtQJBHc3UyK13l0qVJxhAFdz1Q0zfScBS6qM/Gnbcdc6MY9/bZdIK7E+4op5iAM -kvQHap7qnArhI8VQ1bcXYlQ6asPj4e10lmzroiBHM4N/Yuxv38tmtUCudSB+EcXi -EgJIOLmLq2ZACRzug9KPyMXOD/Yxz2wPgs6I3gvrB22w/MfC77wMfEMuQQk4cZel -TFKosHgLjvcLG+zx5yh5ZVFcNV0CAwEAAaOBrzCBrDA3BgNVHR8EMDAuMCygKqAo -hiZodHRwOi8vMTI3LjAuMC4xOjEyMzQvY3JsL2ludGVyX2NhLmNybDAMBgNVHRMB -Af8EAjAAMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAfBgNV -HSMEGDAWgBQRqYdWstn6ntusRZ8LjyPFEicL5TAdBgNVHQ4EFgQU3ozX8OjQ2AEH -cX1v2WhZID6wZ3wwDQYJKoZIhvcNAQELBQADggIBAEqkZawT89MngrmMTREjYGKZ -+qrm7uQf9wFiM7H7Xs11OEJ5PkNh4jNnnIsXZxc8rr76x+zLr4F6aI08AQn5QOy9 -JXGIbrMHLebtn198aIOYbxZisbXnBlVO3Xz+k8JLdzsu5zxjjaDY3/a63X2ccStJ -U53pSqvgJi6/AvMPA1CPazSjxu6na8rYz6d7c/god7OF0qwQ/ePqd4uJOaImm7HH -CCkwMPYO7UyOWU5CSPMcJ86SGYhvYkoM7wZeJoukK6HlKDI1SRubiTFAx+Hbyk1R -dyVY9vmIOeUlsGEMgsW836g++dg8efRIbIYbSBLQhUL64lLA6wZJ6/oCtC29aX+o -UfxcGUROrpZ5Xi4b4sn0vW4rYq65BzlU17x45XsZMh11hX9aPNE4B62Jl2XLjX3P -Sedu7b/QB6jWpwTAdH96LeLxVepAWiVcFBApBqpu7wxRhCs6M1t3Gh9nvlPE5NRz -zsmx+HVZIgWoP3CgHmiHqajphL0xp6R9qJOyzAVChsmbQYvr+rfaXMv24KBvJYgc -xq5iCP7IccgC6WlhpWyAoTSuhiStTZJtlCKPZqc+HRcuf2fLWXip8YKNHgEtTxNz -7citBXZNoRDFULWwiYDnwhGcZ53p5zPLABYKZdfNHdI+tV92AbzYyQaV0ZwcxL9K -ObAAlDzZKE8vJwT94E3O +aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxNqcFB9GSj+nC1PV +OTIAvzTuo3X7bTbohEPrIDDsEcGyVzQtfagI9vkxhhGLhXxIXrkwEI4u75DeLBdq +ZR3KFJ3QD+jHW12EWNpmSYu272CEAJeb/SBJaFbh40WbY1BsTP+3zI5QMwVTmCH5 +QqODD+GU0fDSo9Gti0yX4rxOY+54jDC77AhSlU55rYruBoIXVe4CcPUpqzZLgqB2 +cTJ5g7lokGqc6w9pveCznVYGPfcdDt+ePGssOvrfBOJnn6N44tTRAG623BrpctXi +t3IAhG60y6CqF29BjV0RKOv09nOBiph2a3lAiKYrpAt92BETFB7KSQManUrrvWOD +mYEcoVkzvrVrB3WnCcHs6lsjQg1FcNrGcDzV9SmX0BIhM+fasxZyHWc08pr3NrRn +Eks7lRqY+TS8DAeZYJt/2M0Jr32d6Bh+WdyGFFUj6sBPtCaC2VnSkxOgKXeWf/c5 +EUXIQT2YXEkNK9CP8Kqs8IerEIpfceelQReA5QcvNruaJNktWLKJgYTrdLdfOP0u +s+9JJHWcFH3tv5906CA+Tlm53Dk6SaRc/DB7lu94yTkcSOpxXZnximZi0GcPZYmZ +TaswE7d8HCPLhks/RkZSV2764Kl08xle65APiWZ4dM24uSyu75Izb4hrwgIA4qyM +dDVQnI3cu6utmKUPyHoJtH26dr0CAwEAAaOBlTCBkjAdBgNVHR8EFjAUMBKgEKAO +hgxpbnRlcl9jYS5jcmwwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwEwYD +VR0lBAwwCgYIKwYBBQUHAwMwHwYDVR0jBBgwFoAU+f7h8O2ttBHtwI4OZnpD9wU0 +9GYwHQYDVR0OBBYEFMOMHl201gGYrA/OoGUroGeldRTZMA0GCSqGSIb3DQEBCwUA +A4ICAQBfBZZ7ZPkRjRgk+0/7CGam2AAv/GSuma8lOnJ6IpBEUL0D01reRe9NO1Y5 +iXasGZ80e95oC7WtePt1LS2I5rOOb3No0iHcUTm5lZV2M/ObAOwraQLXePCRVgU/ +OQzNhtRoCpqgL80/1Ne+MTHj1yZp2GRKXOdMd39KZoG92JU/h556GQ6reN6WKNs+ +wbFG+JKiRnfvPsul6J5lG4QuObCAZXa0fhcQNUHyWjCGIhMa8AuhkDDlIEOBV1Vx +T/ixdoFhDIoz0xExFidR5uqYXKZlgzcMZ4JT7x0Vs/YsOIg9z5PNkyjt8Kjrxvqs +NZ1eYuNNxdXxdJZ2x04y8AzLkLRYntOCks5nEamQtzv0ice1Jg52qNJnMm0Y0U9o +7BjlWy/pp8dBfIA+FyxsIs62hAE6Z/vuJa1VuWJblrT1MFM48Bh64p1m5uDXAZ9w +Oy7n6YDpzWKWqn6XNV6FoDSkkwaOwB6bPaqAP1ZvP3BmVhednyYRYY21M2aOmxos +7rpaW1FHgYV96Dm60rL7XRtOEYZNPSIPDO/Ro1oWs3EFwjLLAWtlK1HdMswlnVDX +6M+LNCRZA3foNi6xvf7CvPkshtQe3FOfiw/OiPBsYsYREC+90Ml/oeTIsyCaOF6n +5+dpjCBeOUaxzfeQI4WCrLG6+iQZNXWgBkW/PHaPZTldRWzvdQ== -----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_expired.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_expired.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_expired.crt 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_expired.crt 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,38 @@ +-----BEGIN CERTIFICATE----- +MIIGrzCCBJegAwIBAgIUU5pxAXMrXmWzioXpyoQoipFwsH4wDQYJKoZIhvcNAQEL +BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu +ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs +IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y +azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg +Q0EwHhcNMjIwMzIyMTU1MjM4WhcNMjMwMzIyMTU1MjM4WjCBzDELMAkGA1UEBhMC +VVMxNDAyBgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y +cG9yYXRpb24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGlu +ZXMgQ29ycG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMRUwEwYDVQQHDAxQb3Vn +aGtlZXBzaWUxJzAlBgNVBAsMHklCTSBaIEhvc3QgS2V5IFNpZ25pbmcgU2Vydmlj +ZTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMTanBQfRko/pwtT1Tky +AL807qN1+2026IRD6yAw7BHBslc0LX2oCPb5MYYRi4V8SF65MBCOLu+Q3iwXamUd +yhSd0A/ox1tdhFjaZkmLtu9ghACXm/0gSWhW4eNFm2NQbEz/t8yOUDMFU5gh+UKj +gw/hlNHw0qPRrYtMl+K8TmPueIwwu+wIUpVOea2K7gaCF1XuAnD1Kas2S4KgdnEy +eYO5aJBqnOsPab3gs51WBj33HQ7fnjxrLDr63wTiZ5+jeOLU0QButtwa6XLV4rdy +AIRutMugqhdvQY1dESjr9PZzgYqYdmt5QIimK6QLfdgRExQeykkDGp1K671jg5mB +HKFZM761awd1pwnB7OpbI0INRXDaxnA81fUpl9ASITPn2rMWch1nNPKa9za0ZxJL +O5UamPk0vAwHmWCbf9jNCa99negYflnchhRVI+rAT7QmgtlZ0pMToCl3ln/3ORFF +yEE9mFxJDSvQj/CqrPCHqxCKX3HnpUEXgOUHLza7miTZLViyiYGE63S3Xzj9LrPv +SSR1nBR97b+fdOggPk5Zudw5OkmkXPwwe5bveMk5HEjqcV2Z8YpmYtBnD2WJmU2r +MBO3fBwjy4ZLP0ZGUldu+uCpdPMZXuuQD4lmeHTNuLksru+SM2+Ia8ICAOKsjHQ1 +UJyN3LurrZilD8h6CbR9una9AgMBAAGjgZUwgZIwHQYDVR0fBBYwFDASoBCgDoYM +aW50ZXJfY2EuY3JsMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgeAMBMGA1Ud +JQQMMAoGCCsGAQUFBwMDMB8GA1UdIwQYMBaAFPn+4fDtrbQR7cCODmZ6Q/cFNPRm +MB0GA1UdDgQWBBTDjB5dtNYBmKwPzqBlK6BnpXUU2TANBgkqhkiG9w0BAQsFAAOC +AgEAo+WGpxIz8s0UY8gOlCYeGr6SRYbgH7bcFVaV2fJf2eacv/YSaryc4PNwFnO4 ++e/LvE8908xq57Kl0xpCzp4bO941/aJkJuyr59TOe1nhcH0z0Yik9Dt1zRxxy0XO +uVUSYuYEmKlWNgsWAGCmcHAz+wQxn2qjpD8k/GAe8Rr7O7r/EnSt6HvVKt3dl+zx +rv88gKKJKWrbn7zn4/c/E3XG9RxunCsYGtOfUfk8tGeWtemoGNmXlCLCmKR4JKjk +mEEM16veHHdD5k9MPj6YQH/jh1pDOD11o8GlOd/rEQCpFWeg23Z0uEq5j+qr1FI7 +Fjk4HvTtwGU7QXn+QV9bSjhE6svdWVbmADwb3Z/UU//UzgEAZDPb/n4cC95vk/Lt +Wwq5MzHBIyeHladxzLE7LuVA9dmkv7dnST7L5ZAqQYcT5c7vxkrTHD53ZTojPMD2 +Lmd3ZIcRIdFbPChY/+ajuBaNpi+gBwQaQ0J4b70fKW7oUxOpgSjh12ACNOYmVRWb +sCzSiIaxkD0pvIYLgNXcY2CekuWxO+4DLGNGPPAHeovue0yRgNKTC14AHsLWh04c +aWdomQToTHFxJcoVk2LiE5nwg5Xu8BoNuMag1jl/ZQVsl4LrL4bItwplWdH/bSyi +9VFeSwPGTiNverBtv/aF4q7O6JWB7/zUh2GqYQcILcaIy4E= +-----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_invalid_hash.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_invalid_hash.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_invalid_hash.crl 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_invalid_hash.crl 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,20 @@ +-----BEGIN X509 CRL----- +MIIDVzCCAT8CAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll +MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTI0MDMx +MTE1NTIzOVoYDzIzODgxMjIzMTU1MjM5WjA8MBMCAgIrFw0yNDAzMjAxNTUyMzla +MCUCFAUi4bhk78T24IBu9CEi9kegwxmXFw0yNDAzMjAxNTUyMzlaMA0GCSqGSIb3 +DQEBDQUAA4ICAQCH2paQrJTqVzDjBOixREnhV0Ned2SHHwuIxZPeA1LrA1MR8KeY +RCNiBxaOg2fkIe7WbPbiFVmosRq1kMjOJrgajvI6CG4is5F37KhW+Q/8siNcc8y3 +oiu+b8O5wp7vfUq9f4bxh78ytS+cHJpxHLoYzlp1f6aha2x2kUqDQzEf3ghIXEAN +y0Jlmn2OqeZ/cCnWG+QF8Yb9OMlnR6rTpwk4ml0TvMa1/LhBUMj7vva0BmXuTunW +eEr/wxmTMyJsRKNXgqRDLil+hnNzIP7HOmofyad9dyHv73gOkmu1GizwSB/AEVoB +J5N/LUeCh4cG3YJYYpw7HaAHUnka2SDiuL3MD7VD5ONXiwcZIplT7I9vJHaBimoz +kUTr3BkzFHas+KiPuGLm25Pxc0G8mVZl0jzoIJe0JS4bi+NCqeQ3C5s4WOZ476Bs +5PSWbkJ8GwUxtXciOxHVKvgnBkc55bng2VEJlRLDfERdcTa1yobvJBRNZ8J/gDu5 +aWvRJs+cQ+1Bj4oq3ejV+Be5UFEVEFfQd4wFixvZcr781aDhYg744Ig0n3oZE0pg +ye9V55q5yra6K1eertb63y1WvIrpfvNBM5p7mFbYBc5vnoH44mAhwbsCL5Mkjcxp +0XjPSUsE9OyORtN92YLrZztEXIZmjw7otzQCQmcj/0ri3X+M1RLUTh+BVQ== +-----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm.key s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDE2pwUH0ZKP6cL +U9U5MgC/NO6jdfttNuiEQ+sgMOwRwbJXNC19qAj2+TGGEYuFfEheuTAQji7vkN4s +F2plHcoUndAP6MdbXYRY2mZJi7bvYIQAl5v9IEloVuHjRZtjUGxM/7fMjlAzBVOY +IflCo4MP4ZTR8NKj0a2LTJfivE5j7niMMLvsCFKVTnmtiu4GghdV7gJw9SmrNkuC +oHZxMnmDuWiQapzrD2m94LOdVgY99x0O3548ayw6+t8E4mefo3ji1NEAbrbcGuly +1eK3cgCEbrTLoKoXb0GNXREo6/T2c4GKmHZreUCIpiukC33YERMUHspJAxqdSuu9 +Y4OZgRyhWTO+tWsHdacJwezqWyNCDUVw2sZwPNX1KZfQEiEz59qzFnIdZzTymvc2 +tGcSSzuVGpj5NLwMB5lgm3/YzQmvfZ3oGH5Z3IYUVSPqwE+0JoLZWdKTE6Apd5Z/ +9zkRRchBPZhcSQ0r0I/wqqzwh6sQil9x56VBF4DlBy82u5ok2S1YsomBhOt0t184 +/S6z70kkdZwUfe2/n3ToID5OWbncOTpJpFz8MHuW73jJORxI6nFdmfGKZmLQZw9l +iZlNqzATt3wcI8uGSz9GRlJXbvrgqXTzGV7rkA+JZnh0zbi5LK7vkjNviGvCAgDi +rIx0NVCcjdy7q62YpQ/Iegm0fbp2vQIDAQABAoICACjHosq2WeDaw9egY80H80ip +BgC92bqbw9pWmZhfGiBpkHDpQb1EuKq1H9HGw7EA+JsK4Q6k7cs6JxRaO3O8Epdn +w+jvz5C5VEf14ne85kY7p8zZm3p6HErtqLjkvBZ8XBstZF2PY8TWByW+b/0ik22Q +TTyal4BRWRLA6LwTR2gDSFy+Yce5R6ZfjexqGfomLABkoIPW/e1Wf5kNdcvoaWzk +1WkJdP1rAXxQ1tSys/NVRGNG5uhcSVrkipvf7RiQjGPtM2jUBrSdLJxZ+7AGAV5t +8q02WNYm+Zz32SyxiNsfPYhBIlMMWNaWGfdPXCANFXc3E6/BYVuRYDmpKmN78DqN +XrSCsJOahg7A9f2JIVKmreC/WmHi0+eL0fCcMHK95CN415mYLagg1bLhOwu0cdjq +peSx3+xlEWENDm0B4mYQaPDylHht++Giswg9VpZeIPstgvNARoiCgRCOiR3mXVBg +ZX+BpH45RSZ0hqdhkssiwP3iOQH0xYe1kEvzcaCeNrGc8cZy4dOcZjYso1XNZEQY +vnUzAfo0TasiMES/RiNuy7Zf0oTP1UV7QootsqPQrZCFf3eixC91pq4Lh2URUseo +4yOOI1URBWU5U2RuykzYWsqs/xB+CfJy6dhn6jCjm5lZFt1/wEKj5+7JZbZWkH8A +G2HHxKIQWz5d2KjbQBkxAoIBAQDwUtb6mrJ/JBvMubdR2Tkyog9lT7qeR848ZZqS +ybGEuT1Ohhd7vtjiIQzMLX7s6XIUoSE3AWFJJzqFrr0f1HkwShBNlvF77We3jski +p7u4bsJ91mjHBWl/bAKW9s3YA6PRakk8k215mwORf+trhFkcOJ63DHIPxBnPDQEI +VBBMvp5ogHHIh5wTql+c4OpdEb5s0PuesKFzjdzKtvPashaP3ebt2nM8Nai86UKQ +WTcZxMLk36XSLFcW2vZlmlkQ7q8gMM3BIliR2ofmB0AlPBwOY/gUHG+tIBbuQWoX +tw4DJPY3Y9uWRqAcWOkSbQpAEOUVzyK5nrwXwnH7vOA9HOiZAoIBAQDRsd7x3NGL +Gp8oY8XULwIrZfyQmsk+nozBNuo2Q40qoXBoReIUF212UMMYc2iX8bMo1jBCzmnl +3m2BA6aVbpxwlp14Ybe78F8ucQN6Dx5wmT7QyDYxdKyMAYOnOHWdW1ic1y2HjKY3 +vinswhuI93C4Dkf4mxFtP+aWT3RpQxS7DsztBTKux7fACoU+OKQxrmZTkPSVhuEE +DXIov09q3awEvB/R+dDzGj6NXwxQ+Kr3yLl137RvVWcvrObyysKLfnVx+Of0Kol/ +6VD82BBWDcgveu62JPSZ8ckY4eQbi3yl5mOAbL3vre+AQYuDiuOo2NiPOsAimGaz +kv9gXZQ1leHFAoIBAQDsGHYavN+fCFpHRixSvJT0qUF2xl0QInr52teAXaWIPnN8 +MT/g0h1ACjgIXqnTFYR9v85hu3lX5LIZoxEptBNa3Wgm0aNrnE/IhP4UjbRd/HIW +Lg3BeA+snu/sX4raLLlDgqdwW2WxkhhvWLxvZBYnI3jJW/CyjHTOdHgPNobM3nfB +Mm6WEqPCrh5AgLW9uTDatnR78gqq+zNt806eC1ce/2FfSrzq6cxbys0aAoufRS4y +q4S8ddMZIQPvzTKy78ocVdXNZ3Cb2ZSo53adHfByMsQE/eq4qk3cw2b25V9et3Er ++W5AtCGXt8FB2N25Et/8DQKQWOFwdhaEuYmSgFQBAoIBAEgmEBRdqsdWyI4oDggc +iH2QIJ9McpOWD83m7Bzxjx+s3jUyXkAVc3czAH1oMAOfiMozL/W8eZk6t6idLfLa +VP80A0hJLuN+J/GdttmHXCzXvVIuoN7RSxD88GRXu7gBlvKX4rVxwjsJtfkdLEYr +BOB/IWo7SHqzcs4i1mXlS7u0svOWR0L1upZbyE6JRI1HeTOle1H7T26Khc3ZTSTy +0l3qsHQZpTgPvpf7rQwrEwAgUxdoefeYheFUdz3wX5GJWDV3s69B939IMrJcUPqa +0Vbs0DdhbbuOAmgKSOblTmTyaSflwlA2I2KYqrz+y6frvE9DopoEn8mHeVCZwgXR +5RUCggEBAMv2kXszsi3rgnlpFObRRnjyE5g5U3As1ii6FzUHm/aurUYqAZyMMnE+ +RdKX1zBUfTUXqijSpl7avZrf7DVTWR42kiCJM7PM0lrcFUcYFCIs0x7Qk2j3YIpm +17vOFMpkZyYdWIniw3ihmWQTM3s3qTfdAbZPztnSInnjp+aXzU/tbfsmOmROcLY7 +V0xYNWeMs+P1axoEC7zGmhCAaN/A+zDY34VLHGDrR4dVKCnFUQFoRwqfE6PtICFL +4fvdbbhwmU3sxY48y8ws/JyMfWachcFg/zbHVRR/K4j0sGfJB2NKu49jCcwPFaFT +jLfytXEfbNWBGcgHySC4HUwnpUVxN44= +-----END PRIVATE KEY----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_outdated_early.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_outdated_early.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_outdated_early.crl 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_outdated_early.crl 2024-05-28 08:26:36.000000000 +0200 @@ -1,19 +1,20 @@ -----BEGIN X509 CRL----- -MIIDGzCCAQMCAQEwDQYJKoZIhvcNAQELBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD +MIIDQDCCASgCAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll -MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UYDzMwMjIw -NzMwMTEwNDQ4WhgPMzAyMzA3MzAxMTA0NDhaMA0GCSqGSIb3DQEBCwUAA4ICAQAa -bg7llgCL+OdmZQEeBKey5Dm/NxJGJljoT/sxFbQ+86lwACh1mbdxVkaPyUC/oE9T -4ppC/eHoaRcdmvN4FlIYrUhqnrTGD4s8VSoYvJ7+f5ZFGjUyflnMwyaal21hDaG4 -2SZjPVOQ0ksEA3mrHE1MTRVFqFl4ZFxGhh7NYMoOEkffM1UooChWHTTBMz67nmbh -Ih0MDHhS5J7677K2N05402Z3v3S+Y8QEjIQjDsTC1S9V607eEfG9YEND2KicQKPH -r+CK9/fVaiTz9wgUEyybps4MFoWBuUqqRebQoargFZW8w329LuS6VokbM6BSduOT -qaYFtzp3DGZbvKwUGjiGVgB/PzzB1rv+2+i/EI3D4RJt+k8xvlBIIONxwK/hcjI3 -/i6hJueQpeCuasfX8ck/uKzSf0PhCmyLwWxQux66FJq4sXqWoqwf5P/U+tbB8zna -0cX5/f8+rS7ansbxjeiCHUkbdUEoY7k7KMSNUrtqbgQ4VyjTziysTbSEG7jkb4ri -Jaa9mDfWCkdwfB3TqDofWRkOdNpPTkj9TVZJ5FdV1h39D9O7B+VvedIiVod/KhB1 -DyOa44YpkEcS51PuNAC/exUd6nOv9Mz+WOUP+RrxHqndRYE0RGaFP9vENyks0Kga -4CLB/IbT2rmpLivK2i6i3NOqzcykHOab3LtwOLDDmg== +MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UYDzMwMjMw +NzIzMTU1MjM5WhgPMzAyNDA3MjIxNTUyMzlaoCMwITAfBgNVHSMEGDAWgBTDjB5d +tNYBmKwPzqBlK6BnpXUU2TANBgkqhkiG9w0BAQ0FAAOCAgEATxMm63G11oaOvRm9 +nb20MXyXaqejudryJLcKSAUNNcvVpfj4zO4iG7Eiipq5svrEHmb8aNELKb7eHVY0 +uT5V1hd3xELkQqVS8rxcD2rQfoE8B3kkeuKtgI0yxC4rkrt8recajh8au/FSoknq +Ts1CKsc0ghFo01gIv4fgWC7eWFrfw51T/bZW4AN1QsAT0wEqcYjuDTZpwXU+8JsB +czQ1wWoKJPCAbh6ZOd8R/DEywffWSVjS+8hrmIc6TAwfu7SOBSvDLh7kKZeEgZ6Y +lwp1R9EcRBYLlyclg4E7w/WL2DY426Lo+0aSIWcFwGbU3kWwFNXCKIpYvVQZ4QPT +2lZfuRVMe1ZgRdPRz5wkiEYYIGFoe3cOC16jHGi+Vs9JQX/EgoRiy/YYxZdckuxA +961S5h4bbYyB9aNH1vNn9Kp2r8oDGD6xucXM9cTXDsXvLLSFUxcFqoOdixYqFzX+ +EsZM5A33pMq5zKLkRD8JI2lP7nT0wdyaDjuH4ION7y90tJiq1Q1GPfmJ6+mp/3/I +QF1Mh8axL59Rgz/KrzszP4+kn3NuhvKQOjL3QDo5TSXEmsdjERFHsmOuTwB9E4iH +EIBu7bz9GfVMDUL8LSOK8Tq9Gj9Zzy8kYe2BuwNOTTDzQAUnPZK/m6p1wm/K3vjL +2pjdFJmfRBtPNSRXL2hY2MYdZDk= -----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_outdated_late.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_outdated_late.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_outdated_late.crl 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_outdated_late.crl 2024-05-28 08:26:36.000000000 +0200 @@ -1,19 +1,20 @@ -----BEGIN X509 CRL----- -MIIDFjCB/wIBATANBgkqhkiG9w0BAQsFADCBzDELMAkGA1UEBhMCVVMxNDAyBgNV -BAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRpb24x -NDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9y -YXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMRUwEwYDVQQHDAxQb3VnaGtlZXBzaWUx -JzAlBgNVBAsMHklCTSBaIEhvc3QgS2V5IFNpZ25pbmcgU2VydmljZRcNMjEwMzI5 -MTEwNDQ4WhcNMjIwMzI5MTEwNDQ4WjANBgkqhkiG9w0BAQsFAAOCAgEAh+81L4ws -rvOC1j7nwxthaIE4m8alnuaq9h9uxYfDZooimvGwhCjfcoWmfSSA8lt3vbMJ2vRe -ZwnXXCyGWmetZ6ObnT0jwL0QuOYEuIdA8QHleBxLYBeFr4k0O9i8i3VUG0YcgZuN -H+lcpGdoIx2WV0cZ4rbzZ1cNx3bieaivqNoLQAy7g9jTizmTfY5ZvlvuG3iqSG+P -08APxmtt1qFEm1LVu4SyUSyoGB1NaxeoziMITQdfFqHoPRsu7Wdyuqi9f5irIPwM -VQNKs/Y+3Q3S8YkTW3yqxhj4HdSKJE4qVBLMYm7muirDFWo25u2sDX1LJHBsQLvV -fi7cGY0YnOJL2Y7A3XKDuqtZ34zpXg3Hhqpa9RF55K2u5dYUaPq8MEQUHK67II1r -YZAwfarhijQQ6t03E0vrzPVYpK8VjNUunYKQdOBS3OkKgXCwEMuqQDrps98BgDQ4 -qfbVfxwm9XEHJZaX/qFR0sp8OQd/SD5dnS3DBl0Pp5w+w2xIaSA7QmBpDqWY66Hb -cJq4CLOKTasHTddHKz7O7zIu8QhwJGLabtnx18iaHTNNHaTF6k/51pwvA3HkJgds -HVcUNljsDNSPE258JwR2XoQAUu6VuFwRzgD7lGwdI70CpIBeAP1TRSius5RsZB+u -cw+872CILlIdNJ72lzMPWQNH+IB1RU8U/eA= +MIIDPDCCASQCAQEwDQYJKoZIhvcNAQENBQAwgcwxCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll +MScwJQYDVQQLDB5JQk0gWiBIb3N0IEtleSBTaWduaW5nIFNlcnZpY2UXDTIyMDMy +MjE1NTIzOVoXDTIzMDMyMjE1NTIzOVqgIzAhMB8GA1UdIwQYMBaAFMOMHl201gGY +rA/OoGUroGeldRTZMA0GCSqGSIb3DQEBDQUAA4ICAQAGD8ryV/GUC+s4qfqchMZA +QYYOBcV7lS9i8zFrdB7UJuhCL/gXzrmldFsi6hW95PBNBtADT75UQ4JzNNVKYXWe +wTbEOG54N7Ff4LdbbCRNjqpyOkqQgRWZOgiTeHSeiNLU+P98HZVKFIfiOS6Rs6zB +5UNdYwCGz9kkeiR7xSpp0z86jI8WSxHR6e7CH78Ax/9dpGAyKYYj8LY0l7igxlIL +yRgu0S81VsprcHKuGp5wcnabOWO7lEUCTFbYa7Cgc1+avUl1vaCmMqUsvydurCU+ +BCN8Zhc7noOXm0AE+58r9yy30aW9n+NUmP7uX6Eibb5NXtVZRVe83Ltk2Rbi9HRM +hkcO0X37mXu+jcqw714i/r0mUsfGtC54IRMCgyIgWJ3TDdJ9ORo0rkrYRXVzKHdb +0nqXEsrYecaCNcnx8iygC2YEwAN9WH8cvvLTJQcL+j57xcJVqOSbhXTEE5LfiHpa +DO+dc9A+THZNO+o8GEZ3teTxylOVctiO9CkUkICmVZ9VDA73qOzHI8DJxpCAwT0I +whm1QfCROAXYFW9enTuCGDZGPoyTf+tVrAGLmCWLewBV6W3z+7OBfNomktuI4Z0+ +opvwIJwYX3emeZ/5gDuYmwnvNYkC0GRQ/994430b/4Rssv32SCjToJ+Ko5pz4x18 +YUtbQcKxpJzRDSkccck83A== -----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_rev.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_rev.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_rev.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_rev.crt 2024-05-28 08:26:36.000000000 +0200 @@ -1,39 +1,38 @@ -----BEGIN CERTIFICATE----- -MIIGyzCCBLOgAwIBAgIUTPiBWJn8k37onZ/aYgLxudbD3kgwDQYJKoZIhvcNAQEL +MIIGsTCCBJmgAwIBAgIUXvZ6XWXXrTAFn66B61xZNya2iYMwDQYJKoZIhvcNAQEL BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg -Q0EwIBcNMjMwMzI5MDkwNDQ3WhgPMjM4NzEyMzEwOTA0NDdaMIHMMQswCQYDVQQG +Q0EwIBcNMjQwMzIxMTQ1MjM4WhgPMjM4ODEyMjMxNDUyMzhaMIHMMQswCQYDVQQG EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxFTATBgNVBAcMDFBv dWdoa2VlcHNpZTEnMCUGA1UECwweSUJNIFogSG9zdCBLZXkgU2lnbmluZyBTZXJ2 -aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsXm4EgSxVmuNsKgo -sfJNxNQFkdbpCxxyMOuyBFyGKxSwiGl20VWB6itsG+b9NOmAI6D6FPvLPuB/gSq0 -HR5FuSIkmp9AUx6Xi6lVA2L7qdINo1OA4nJKe5hLSHfjWn36DZFrDyoODlDzLJrS -O4h4QSP1nDSFhk1ilsR1czlvKreXkJw5FsUHRhZGG6idLoK2Ibrne6WsVhPG6WNA -abuvQFv40cQWcyRoePktG8ImnNF5GDewXwtpzARSQj9jTL/gE77DH78a77J8+H+h -d5guiBbPZK8gNkPe0CjD0B7tUx/+sByoaSeQffFG4Cnu5JJDqFq9LXPjtlG2nXSB -Qh6Sc2gzP3jKV+ZcSwQw9AxEQ0ZB/VIPsR8KZKReDDztEpcKxwxlh7bEix/0Y1sv -zT7/I+m5kufdDG6j3hHHzniKXL4b/WyedSfdnVqIJw82FgPFgyIY6F/0ccLdkhAm -fLtQJBHc3UyK13l0qVJxhAFdz1Q0zfScBS6qM/Gnbcdc6MY9/bZdIK7E+4op5iAM -kvQHap7qnArhI8VQ1bcXYlQ6asPj4e10lmzroiBHM4N/Yuxv38tmtUCudSB+EcXi -EgJIOLmLq2ZACRzug9KPyMXOD/Yxz2wPgs6I3gvrB22w/MfC77wMfEMuQQk4cZel -TFKosHgLjvcLG+zx5yh5ZVFcNV0CAwEAAaOBrzCBrDA3BgNVHR8EMDAuMCygKqAo -hiZodHRwOi8vMTI3LjAuMC4xOjEyMzQvY3JsL2ludGVyX2NhLmNybDAMBgNVHRMB -Af8EAjAAMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAfBgNV -HSMEGDAWgBQRqYdWstn6ntusRZ8LjyPFEicL5TAdBgNVHQ4EFgQU3ozX8OjQ2AEH -cX1v2WhZID6wZ3wwDQYJKoZIhvcNAQELBQADggIBAA1ipq2MIRdRIiUZ6M6AjY91 -L5iUpgKGrmhna/gg2b6AlugDOtVDsfeFm389aSplwY+zcJZ2AbUXhXe8RVHOEUwf -O7iEDWsMZ/wxit5ZotJ5kzr49n9RTtomgbQSqxWadLq6Q9hYOWg/cr1FvXwL5tyO -rHyuRXhkUMmuk4aQ1sfybfPp0PJKbzWu001Q4rxbJTlaib9b+CcsyLWHs06JXVKi -755Lg9/ND1bQjW2CMJbZ2rm8V7oh4J0tJuF3DntOjyOk+yosckTF3bFffGPR67WC -RkZebIKx2Rh6OXMrQTLz9ldqWo0cW0O353gSmrMxExWKhgoDrZKc4UeOanweDTqO -4lU0RP/4naDuQl6/FE0rUzUkfAJmsKIuI4G1lQNZhaqUH/BdN1du094RON0T5agK -etoBcPpNpxOn4N86TJaYoDjRSDpKwxXVKZs9lk5GRLxRhqtY3iQYVZrz2gY36Ri4 -lnuKZCeFmfjHvvktmb08EmrvGiQAhXzI8yfeVhlwP8lhtumIO877VW++tedK0z0D -6aBz1LsVI3IbinDZPRsWl0EEi+JFmpIktmMTdSn+0vTs7XJjbBZ+VKaPPWOG/Afg -Qav6+1AnnMtieGfrj3tyyfKo0vPSZKbdGzEr79Ukl+cxLdG5O5qZTy4ASm3EOw36 -p70HafhN8Vioa+uhQObP +aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxNqcFB9GSj+nC1PV +OTIAvzTuo3X7bTbohEPrIDDsEcGyVzQtfagI9vkxhhGLhXxIXrkwEI4u75DeLBdq +ZR3KFJ3QD+jHW12EWNpmSYu272CEAJeb/SBJaFbh40WbY1BsTP+3zI5QMwVTmCH5 +QqODD+GU0fDSo9Gti0yX4rxOY+54jDC77AhSlU55rYruBoIXVe4CcPUpqzZLgqB2 +cTJ5g7lokGqc6w9pveCznVYGPfcdDt+ePGssOvrfBOJnn6N44tTRAG623BrpctXi +t3IAhG60y6CqF29BjV0RKOv09nOBiph2a3lAiKYrpAt92BETFB7KSQManUrrvWOD +mYEcoVkzvrVrB3WnCcHs6lsjQg1FcNrGcDzV9SmX0BIhM+fasxZyHWc08pr3NrRn +Eks7lRqY+TS8DAeZYJt/2M0Jr32d6Bh+WdyGFFUj6sBPtCaC2VnSkxOgKXeWf/c5 +EUXIQT2YXEkNK9CP8Kqs8IerEIpfceelQReA5QcvNruaJNktWLKJgYTrdLdfOP0u +s+9JJHWcFH3tv5906CA+Tlm53Dk6SaRc/DB7lu94yTkcSOpxXZnximZi0GcPZYmZ +TaswE7d8HCPLhks/RkZSV2764Kl08xle65APiWZ4dM24uSyu75Izb4hrwgIA4qyM +dDVQnI3cu6utmKUPyHoJtH26dr0CAwEAAaOBlTCBkjAdBgNVHR8EFjAUMBKgEKAO +hgxpbnRlcl9jYS5jcmwwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwEwYD +VR0lBAwwCgYIKwYBBQUHAwMwHwYDVR0jBBgwFoAU+f7h8O2ttBHtwI4OZnpD9wU0 +9GYwHQYDVR0OBBYEFMOMHl201gGYrA/OoGUroGeldRTZMA0GCSqGSIb3DQEBCwUA +A4ICAQBlx15n00tEAP8moDUehCtLLJx7CCG/V7gcldTZACXpOYkcuRcKW9GlXqQv +N30rTZpIZv3XIQEhhWhMJ3dqJRC0l2CbNlrIcM9t5p7GrYS/I/HGamKIU09jfud2 +1/FimjvBOQKr583vTLL0kr3Aosd3S8jHGA7Clal/85SntQN0kDxnceo01aCUhnxg +Lkrd2+N0wPYGW5DZR6jk4Y/GiOO+q/ANO+tm4szT8RNwC5sNectpaI+ZRNlUHCdM +DtZO5HkBADQ7PdZ2x51gliS5l9w7obFY62TG6LgtKFJgqEhqsHMIp+/OwmKbpP/p +urUDJFCZGWD+lBkaxyy+VsPlvddU7gnXSm0wXoCxpXerRwWFb9/Kc5Q+kQB4nCkv +bHm/zAkhjoRSjWYPcLL3F/9P858W9QlkhNdcoq73EIuc3FHUMdQUm+rjxGxfO76h +fXoR2uBGlESO+gKL2iC9E7KKB+o07hdmafEnE8mGTRcGmVPp8e5SS7LfN8lfHez+ +3gBo2/lW1+2wBmBNIeBkkBpUI69DmNZvMliGNl/3LAIR/slQ6ZYXR/2dLeyhqwRt +QSj5e6Jw2fg3ekG11R98OGoP3ZC/N1dTTh82O571ZBGxIWA6zPDrWj7tX0oW+JK0 +Of23BiH+4sex36HshqiyZ/faq4AtO62uII5mRQzlAwE2QdXI1g== -----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_wrong_issuer.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_wrong_issuer.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_wrong_issuer.crl 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_wrong_issuer.crl 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,19 @@ +-----BEGIN X509 CRL----- +MIIDCTCB8gIBATANBgkqhkiG9w0BAQ0FADCBvTELMAkGA1UEBhMCVVMxNDAyBgNV +BAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRpb24x +NDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9y +YXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxHjAcBgNV +BAsMFUlCTSBaIEludGVybWVkaWF0ZSBDQRcNMjQwMzExMTU1MjM5WhgPMjM4ODEy +MjMxNTUyMzlaMA0GCSqGSIb3DQEBDQUAA4ICAQC1oMFXyJ2mAz0el6drt4KtH38d +FfDvsjc6hTgiPbmQW4NmLuSKnNLOyOulsRaV7Kl7hxQpd+Gyimqbtb1ObeqLvnyb +W5kX90k94l8M/laZT1mZm39AGR8xW8HDtuJgwliI7RujX6iWuyrJCxM4qpAHe0Jw +GGQd7ZlzEZ/4xMsZV0lPSsA9CPUn4HyZ2FWj3DJjfXloEOu+krcpmvmL3EROUi/h +F6bsViKRmoQoe5GJFLdG4eVSgc9+ejaM23n8C0tmAhz7FKOYOqU7pS9+hvCyJ2Ul +c8S3V7hzTlz1G4AJAeSJtCnV/hkpt+yj0eQa0Q92I0NZY3UWT8dOMZUxpHO91Th5 +4nfaF6FVSO/AQcpr4FPElYObp+RigqWf44v3vrriHTYi9QntKaNZq5XLgx8EkgBx +1dTM7mDJ9W21ZWGEbRtLRaKt71RVsVKVLh5CPQrFEzENOTuy66QD26LNC2BfE3TJ +tsMgjm14Fm1mC91roj7sOsO3MUl4YKh6cdKFtfryWr5Du+7x9/6vjabuay/BopC9 +BF/q8ITHh5Shs0h9OpOeunGFbcTnYOSgOpDLwC4++hur8p0BzClJkt2Z4yZGuKe+ +lh66PA/UkHtC3E8NDxVRkd+NsejSJUzRR0+1ruEoDEGP4jZKL83UpOFjIWGlIBsM +2gwwqV/LJsywfW1kFg== +-----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_wrong_subject.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_wrong_subject.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_wrong_subject.crl 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_wrong_subject.crl 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,20 @@ +-----BEGIN X509 CRL----- +MIIDUjCCAToCAQEwDQYJKoZIhvcNAQENBQAwgckxCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEVMBMGA1UEBwwMUG91Z2hrZWVwc2ll +MSQwIgYDVQQLDBtLZXkgU2lnbmluZyBTZXJ2aWNlIEludmFsaWQXDTI0MDMxMTE1 +NTIzOVoYDzIzODgxMjIzMTU1MjM5WjAVMBMCAgIrFw0yNDAzMjAxNTUyMzlaoCMw +ITAfBgNVHSMEGDAWgBTXVfLQH2D0KccIHhK6M5MRTITvtDANBgkqhkiG9w0BAQ0F +AAOCAgEAkHC9VxzSFGrY4NWoMLkunjaRQ5qmc4+3PFDiZLaGQua5QMJixMo+QIp4 +RfCLAkvI4VzMdoJXhl4+sV0mocn1HWDBxFUwfsvV4h84o8bEUK+bizrFsEoN58R0 +J3yfbikgj4P7WFQdU7p7bbAkmoKejt5+gu5etV2royIh2mjckCN7WnYzvhfRUBYz +v+93R1usMMQIVId6l6k4DuBqaFyip8AKXVoXj4KbmwXZ8n+ILd5gWV3WKYo/ffCh +h2g+jaS8JS8CYqtVGEb7F0zQwdPe1tHA/SBFBCNVqzHos+yET1m0Cn1zoYPvkEu9 +U2OVK5tqffEyrkN8hRm7LT2NIYji/z4VOIY/sGA1SPO7HNzoheAyHR7u9ortGMYv +2Q69SEqtA7N6kMX3y/dL95sI/E3EJp5Z27jMMP7+aJ53F/5xmIqkZarA0ETuBic/ +KlNjG0WJilUMH7D7emTxksTdZOC8OKjg6gPY5GhVmlQ/LRvJQtoXqC84UQV2ul8R +chvU55EUB67247g/WAusCjutISXnKQ4RGPaH9eOu1yN7StiXtJyiZ8R14ZihhC82 +ZzxjfDDk6ATR87bkAeDYJvubLJkVtUyUK0vgklNsyA+hwbdr5E5Q7IaRW2Bk/tnt +K9ANhMjaerwcR20aOmA7pVuMGXU2MhFIleQzl+Yfv8N1CDECw3Y= +-----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_wrong_subject.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_wrong_subject.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_wrong_subject.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_wrong_subject.crt 2024-05-28 08:26:36.000000000 +0200 @@ -1,39 +1,38 @@ -----BEGIN CERTIFICATE----- -MIIGyDCCBLCgAwIBAgIUOgU+VHYEK4Q4dZokfM01Ok2XBtUwDQYJKoZIhvcNAQEL +MIIGrjCCBJagAwIBAgIUOJ2eXoc9SJ1pRCC4x452LX35eNowDQYJKoZIhvcNAQEL BQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y azEPMA0GA1UEBwwGQXJtb25rMR4wHAYDVQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUg -Q0EwIBcNMjMwMzI5MDkwNDQ4WhgPMjM4NzEyMzEwOTA0NDhaMIHJMQswCQYDVQQG +Q0EwIBcNMjQwMzIxMTQ1MjM5WhgPMjM4ODEyMjMxNDUyMzlaMIHJMQswCQYDVQQG EwJVUzE0MDIGA1UECgwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBD b3Jwb3JhdGlvbjE0MDIGA1UEAwwrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNo aW5lcyBDb3Jwb3JhdGlvbjERMA8GA1UECAwITmV3IFlvcmsxFTATBgNVBAcMDFBv dWdoa2VlcHNpZTEkMCIGA1UECwwbS2V5IFNpZ25pbmcgU2VydmljZSBJbnZhbGlk -MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA61P3Lfow3E6wGpMJmUWp -wsfFZwcuKSf64JXDn1pVJLUcjTwhApshnGaSBb+knlpwvsO1evrR7re9ZRh51730 -IintOP5IA3CSGd7fqmTpchx3kdFOndrXS7BwAWuB/eZ1qzKeOYpyAS3VSE4FphYi -LSxGfwSUl89pwYyWyqGl21hv/sBL6cc+Lm55vXbeRwWKW9K/w7BkhtK1zx1xm4i9 -4x1aXJ6DGWQpIk1sVDNPtzQVZYvmR1Y10/r75sNgA/WMiZx3/2VyCREnV+UXfvsX -fyMLcbwMWWt6psdhtoGFZ2sLJka5ZNvttQKfbde4TA3I6fpsrMi+oTT9YO3it5zG -ORCUC+j5B+zrzbSv+RgL+SnnAPkHqufb1a/4mFs/uTbjUYHN2/rhObnkLK4Xtfly -FBlivxx5haT9o49YkCv7l57+We4nafBPMw96ac5AGzA0gVwdMTeRZ3joT2Pc/zSf -H5E9wg3MZfg3TN2THB4S//r1/XOaA5F4BGjorbpPhp1/YaeF0rRMlAbZVKXHZJBR -n5qN8hD/V2tXviEkrZRL+iW6ltkslsjIkzrYSS+6goymUjWrkGjmcsTo0SStHE0p -7pOChLwpUtpaElemp1NDzVJqvrglWPkM1ZIIjxpk23zxKj7V2FazqP6PVuyeWdkj -VYN86ULDRG5j1hfn/n0HEC0CAwEAAaOBrzCBrDA3BgNVHR8EMDAuMCygKqAohiZo -dHRwOi8vMTI3LjAuMC4xOjEyMzQvY3JsL2ludGVyX2NhLmNybDAMBgNVHRMBAf8E -AjAAMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAfBgNVHSME -GDAWgBQRqYdWstn6ntusRZ8LjyPFEicL5TAdBgNVHQ4EFgQU+jIyiVonTYe6GuSP -iAkxA65qou0wDQYJKoZIhvcNAQELBQADggIBABcvU42Z/T7hT8ke5viG2x7FJmwA -gkrphOYiooB77sxY+cjxaYsct4yvFXvwuNAcstnlBK0jJRaWzLwswR1t2bXbRwQF -kjDO3br4ALRMHkDPa8iNioogyap8X6r40p7rvfnudKX0+MruLHXN3ZM2ltucYAYU -oR/Wa04KxDuZQHeKrDosAsJCv5MwgF69H3oPbhspFQsP2V5fFsxupnWFzVlwPfcQ -0lgHVC3nZ2Rj7ZariT/px3nfZ6Eg3pRyK32r2SQWVN/oVBEd5cCTONvD7Hr2SrtB -9D58f+vDyVNWM5OED7NqlNDaQw2x9BMjdEVYTGGRW4IXPbXWH08NUcEkT1Tx/vUE -EPlTgwt88Fca03yvAn/8Daw7ezsJNAFwDpPDcQhPi3vg2l32nuRkuQ5641hJiTGw -TEtpJc3dg3FJymG999rOCLLIheNLMehEDMPZHqG7XeEg/42F0580MdkOenMpjhwg -ZhrommB85sZcGBOwc63VMb5PPInYDQi5PXz9Tpann/VliVd4Dpnyn0XVy73VccXu -WWgDt8gJKWUpRiJ6MZzEKkBrXYjPLmrKB64usEJNQ1e2NIKV3bwvH5K3PmibyVBu -9fT5t0VXQpNxxlwngCjvjtt0D/frMCJQXpXpnz25aQDog9bnD1yl02SzxdZaVK05 -LZP4wR2beOGlz828 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAw7LcEUPdhr0FKp+muH7w +Bj5Mbktl4e4kpIMqc2OaTxtwM4RDUDpLwy9X4PuEpGYEgOwOQYyd6udfInpjVVTy +JBnwxdfpgHfsdqamkrZDqjGWHeSw4332D8gGMNFLmbCuHEzVmr1XLYQFVYMTuWex +VFvL9ctA4gv5xO9f6oL7fVE7X420qb7IzntrxJHKsQA/IheJkZHYvPpBLs9Xlwje +tb5OwyhdbTIsfR4V3vHdQ3shwD9TFbc+6MTDryWZThlDDUA8f+iO+euAe6QjLDnL +JgLwnXd4EmGyePDhBM8oSex5o3/vzJVmJkpb40pvloOG11qlXGaqJxOLH2jjsNKx +iC4l7oAHP0tXIw5f8hn+pk5vQNIcIHguXIT65ZR0IFRE9tjciLOtxndEn976jmqN +Qu6Ajx5DiImwzp4wHjJOoVWmKCM9BklFRhwJvZ2xTmBGvC20jX0OgEaCeWV+E7UK +2pzfGwwkWdXC7ihIszhGNP/lLlE3IflS0MHSxtie5fUrwuoZ+bVkKYClkLYeKrBT +MLfszTuWg+rJfGmCwvYWGG9gcIRc1T3EuOiNpN4YJ5HZyD4rSI82fSzbrAtAPWPk +6iVdmo9Ban0pQhMzyH2Gu07OnvBpmH2ADLzFpugjWafW157//GCNVgjrKVN/JbuC +0nne/OTbQgCO1mFOy6RI1RcCAwEAAaOBlTCBkjAdBgNVHR8EFjAUMBKgEKAOhgxp +bnRlcl9jYS5jcmwwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0l +BAwwCgYIKwYBBQUHAwMwHwYDVR0jBBgwFoAU+f7h8O2ttBHtwI4OZnpD9wU09GYw +HQYDVR0OBBYEFNdV8tAfYPQpxwgeErozkxFMhO+0MA0GCSqGSIb3DQEBCwUAA4IC +AQATY32D+AMy1/x1WLGbQcvEkcT5+UEGp82FEVamXy4eM+zctzemEufjSszEpRod +N5UgiNGDjuD5ttZSc37XHwtOwqZv2V0tu2jclxQQCswPjLhkecpIwSFJW9Y7IQPo ++4FKry6q+vyo0sLA+pIhTT+OHK8ijy9PQEx9YOHgr0HTTTG8UZ/6oqx70lvx9KcA +x9PhFb5TWaOhOds+v7NEPxslwrMRQ4jOz92bGeBjSXBzxwohoKKWcLWIy5ktJsFS +PhVztHa+on4AXdDJVifU4lYIPTRZBX491EnIpEaZtyEED319SNlj691+5SgDNHRw +Ysk+xcVYy8RmL84qfTNce+t2eEr0vjnkUqVNt6ps3UvkK2jvlehBlfPW/Dy/r/Er +q1KNvc1Uiza3ryf2dAMDWL2VBIKMZb+d88cEuLHIgmCXrxtcyxCBTWCK6t4cnKsQ +J82B4XQ9IRCCJDTjGEQLxsjYt9jwHeUIK0l/sseBeohxMfDptfRQ8hWt7c5SvvNy +xRGJ1adli7LiJ3+gnPcyO+D+3DPtKtgY4y8Aed5P3oDOQGNdUznSP2qexUpT9GO0 +fJaRI8Bc3Kj1zLkS+mtYHeOBu5eyLhR0fSBpKr3/3RoKj2/NI3SvlBbJ6dD5jQBs +4qEi8T70wjhDqITPyqZV4auy/0/h89JXDB/qAg162p8EJA== -----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_wrong_subject.key s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_wrong_subject.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/ibm_wrong_subject.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/ibm_wrong_subject.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDDstwRQ92GvQUq +n6a4fvAGPkxuS2Xh7iSkgypzY5pPG3AzhENQOkvDL1fg+4SkZgSA7A5BjJ3q518i +emNVVPIkGfDF1+mAd+x2pqaStkOqMZYd5LDjffYPyAYw0UuZsK4cTNWavVcthAVV +gxO5Z7FUW8v1y0DiC/nE71/qgvt9UTtfjbSpvsjOe2vEkcqxAD8iF4mRkdi8+kEu +z1eXCN61vk7DKF1tMix9HhXe8d1DeyHAP1MVtz7oxMOvJZlOGUMNQDx/6I7564B7 +pCMsOcsmAvCdd3gSYbJ48OEEzyhJ7Hmjf+/MlWYmSlvjSm+Wg4bXWqVcZqonE4sf +aOOw0rGILiXugAc/S1cjDl/yGf6mTm9A0hwgeC5chPrllHQgVET22NyIs63Gd0Sf +3vqOao1C7oCPHkOIibDOnjAeMk6hVaYoIz0GSUVGHAm9nbFOYEa8LbSNfQ6ARoJ5 +ZX4TtQranN8bDCRZ1cLuKEizOEY0/+UuUTch+VLQwdLG2J7l9SvC6hn5tWQpgKWQ +th4qsFMwt+zNO5aD6sl8aYLC9hYYb2BwhFzVPcS46I2k3hgnkdnIPitIjzZ9LNus +C0A9Y+TqJV2aj0FqfSlCEzPIfYa7Ts6e8GmYfYAMvMWm6CNZp9bXnv/8YI1WCOsp +U38lu4LSed785NtCAI7WYU7LpEjVFwIDAQABAoICAAqhxnv3pGrkDQpORygB2Xd1 +XgCl/wCByCLZ7236bNE8a+GYn3GV4TTW9x7Fe2TVGAyLBpFAGvo+nLgKTyg9J7SX +ZjHRc6Gjokil8CnLVizCaeXw3T1WxA4Cb3eqf0F3zFXERNyVyc0yvXlyWBl8DTHI +lPGjG8DMJsMwwBTFDfW2epPL5pNMRquVH/s7cnggS83F2pb6hfxqWi05XYhaloLo +Nm463KyIi8s2XbjRihRW9bP0nMZywKuzuO/kioooLDDlmwPV9iKUzVOqTLDj6OoF +Qd5ENdVF0oTojUkOGiG+A7PCyRvjx/tvkcNs8VgLiEFd7trwxvC9ipLnx2r9X2Cu +zi8iQJ901lz6cj5tR5VBttM2MaLFtELTa4HDmVMf6cNuMNZO8eULNmV2EX5K/L5b +JD9Y1fhctQr2+pEvhgela+hi4XC/jZ/HBxDg2gg1lzt7MNjODi/fISz/zGexGQW0 +R9KElISntcpsyG8bqH2LQFQ7XzgAF1qrUC8Z9KFONGRGGZViH/K/aeH00PqCEwG/ +kv5MH+JgS+Rb2AM6HT6SK4rchAkpINPDIsD//dh4vdEfPQ/ysICf3yqACSYgu0cl +kaTyNQE/Oga1tDOAbC8YlxPRmFHCybz8gXG4iyfFaXLGaZB+UXwZCMKOBsWbid4v +l+1wRY/EcEuxgU6itVv9AoIBAQDo38kPbhvcT7I56++EKVuEfCzwAnXV/kttXRQk +QQP9b7C6ZVsEgHz9/sNWzwrp0+28JXMZIggAt0kE783hitDlh7mRIUqjYoU6FcKs +J3qaHR/Dn5TeDFlMeXmbDOsXPLRDTFdvX9+We6gbAUY1NjbK/CS+fOIigzw/jplS +DIPxpsPLQ79/lUvr842Fn9HAIoPWq6bhMW4qibh8clJInMO9VTz4TF0FnmkPgl3Y +R63h5zngQiOAtWCIleN/9F4G7VvrDqYHAHX54vL5Vz+dgtsynJIcpQlIJ5Z6w4hU +q04aHuxDBkNPNRfTUIo/Jb1ghxK0m8JEZb9FE5TmZ2PDH0l9AoIBAQDXIfwjSYez +fahZtDCis9skiaRBz2Vw+EUruXyEShER2mPaW2Of/cxAjE+cRPC4aGuseHs+4TWR +CMnLU8lsrrw80HhdRjd11Gvr/o5qlqgxnw3zNSWQzwld+QcyxpzOgIxIR4kFV+1z +VoG5PO+vARhcZH2tXrrWdjnCTchaMV74Sffb8bHD0p0PfcAg3CdSzMhynnMGp3wN +PBZXqk0T+C9isaXKxL8HcDJxgcJHEyEZp9ygQtzQo1BPluVTWPJRQQvP6VLNyNl2 +NISvmYJSwRQ3yuqEvWE4eSge9kaNp2eUrUz5O3t1NyOa4qioVPnlCH7Glaj6nyNS +Ler2dszcqz0jAoIBAQCnY9SeZsNYBWFTCSjNkvzZqniSvPH+tB97qSBFPwajMZmT +Ii/eeI1f9bRrvb9WfKOyTikBs9iUyyqNheIzcRjfJERa8dc4wiSJsAKSxH34MV8X +uqnDQpUdx2OF9C84MMZSaZmf0QZioNghMTVKIoIYPk4bLqFNtY+lD9ddhumA1iJ/ +BV+tUZ+VJyhfGJYoyaaCtDfsPx+1K/GUYoiK8UQx4AdHY3yqAUf7gfX69OskKpUL +gf8LEVUmWLeCziCYUh10RL9K4SXsTRnh6Lkte1Ycdzb/qBGX7/zWmZ5xXgFx/TBO +rT3MvZk2p/n3kiUiMXVcpWlqyMhH2t95DnBDXUXxAoIBABcS0Q0j1MceghDk75Zz +vdxEWvan/NRJ/Hk9EqrJmt0UVENWK/A78000/1DeYAcXQ/0iiu1qkCk3DRWererX +Lt9C6LXwUwBzQQP1sGakM/PmgEOGfrnySqnmjKwYezb2uJUD/yEwlgoZzB4G+BoH +/wnhyUzv7RAVbAp39zYdN3dfz5KqcIt1Kl0/+nxLwHfz+Me6UNH04qw0tpy+ajfr +FYH7VbHSuj5c/TwJU8H1vkYXJ+WUZkSGT5XJtFzlHFA0rsSk3LmvggtYhKaky0J+ +7OEzBUYpXaUF8ZSoi1akDsr2b1wH0iz3Nf1ls/sh6g9zgs1fvdjreolU3W+DvGMq +hosCggEAWvrHup9KFfS7QznGOLjOdil+HT8TqbVjFa51S7gd5A2B3XpC2mDk9XPO +uxvUjXxuH18hRlLHP2uERlIsOl37iPJ2jY4C1SUQb9xNmSarfUKwsgDhJKBfPNIC +LXmXQgs/gcL+Sm/l6Az4slsV/9xg1Sp4EG/3bBM8i4VRRcpKZqwttZ/pArzYNrYp +LbZeRCm34iPDAelganc4cX/wzlXa36j0Eh0PHd1SvQhKb9lcxtbV4EFhxD9qC1VF +URy6m7FJrA0r5Hrh4SuSagOpbPCJTeXP0Ysvx/tsl7DR1kIpGkMGhYHMScbKjGwe +ZAbU3ynucdvh2dgqN4agS//dmuRo0A== +-----END PRIVATE KEY----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.chained.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.chained.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.chained.crt 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.chained.crt 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,58 @@ +-----BEGIN CERTIFICATE----- +MIIGhjCCBG6gAwIBAgIUOkSdvHg4/HXtknNMhIFkuPv/ghMwDQYJKoZIhvcNAQEL +BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu +ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs +IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y +azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 +MDMyMTE0NTIzOFoYDzIzODgxMjIzMTQ1MjM4WjCBvTELMAkGA1UEBhMCVVMxNDAy +BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp +b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y +cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxHjAc +BgNVBAsMFUlCTSBaIEludGVybWVkaWF0ZSBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMheyYWl/STLJ0iwlrqNRyURatdeC8oDpKFdpglYHAs/jo3s +fWNySCnaw6NCe0vxFLpqcK8VMNFRGu/XhR/kZ1YR3V4mLwF1Wa5v7a7J9swq50Fk +CsLtaU5vq/h6rIpy0NLnmN5KgqChrMh9IwZ+Mc8sqc/0BFFJsuCCGu0TNlGVOhmN +AbdS3s7wEUwT023CKn47G3pVqeaErEB9honz1I71g5/jNKGe5CLCV35ExzsrzU43 +atyJ0jgh15PYCXDTdsRccSmEs2S6Xh2o4ZhlqioWB+tKxGsdxq8Ri4soy6yyooOz +T/3X5CHpKxiI2P9z38Pr9egPcNPPVMGDhzwHz7p3iBPg0RcWd5VP2nimLJsdGWK0 +bkU7zlQ3R2NelSIW9Hr8MVASihmELvX+AcC6KhTpHHhf3CTPgcAfV2fE9U84Xl2i +shmoEsUQTUx97qKUOKRfY6o+WMBnVkzlqWj+s52ndiT+0KNLTDtvlejEFf1VSF43 +IkS9UJK+XxxEnvIBzNKI5EbWlG5Z38/nKv6pjTXFi3aZR4cdmI/0XfAjLTkrTBaW +lkguEjt+/cxPYJOqt50ldI9kle8XTu/HibmcbU2wYIF21CBjE/hLk/KY3FQlnTFL +y7x3bM0CuTfJ8Noy59f3l56fwPOpaQWqhqO+UoFkbFlbROAiVxfb8KlyZPdNAgMB +AAGjgYEwfzAcBgNVHR8EFTATMBGgD6ANhgtyb290X2NhLmNybDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBTWntryJpCwZqZJ1H3n +znJYhbfoKjAdBgNVHQ4EFgQU+f7h8O2ttBHtwI4OZnpD9wU09GYwDQYJKoZIhvcN +AQELBQADggIBAE8gulfly5+EC8DX3K02qEYPoQwVbVhD0wGrlAhgJiakDvPlX6/K +vSe/1nNRG87jXvXdDiuJ6F4iKZpeJndzvx/8ZEmllyyxDwb3UOmylwW/o3/Uh6fY +kiVBfW6uNNB0BfDKcXDDZgKjTg3kLT5z8m4u8rPoIPFkLFl9AuAq82Ll6NQ+xZFP +lZ3K6HN+ntVnIGP4XkOgEYPxjJO3yTGle8VBqLfo/JKwbZtKfNXxSAMRXiP02SQg +D9yshxkonQYWog2hHz8oDuQQNbzaAFlxnY914av/XwxP8TwEfGNchNtAtrlGRlx8 +PjMfp3Mnbz71yp+L2We2/A7njIPbEcn0FIBedpNyyBON5Cd6Xqx18otmMTtUILZ3 +SUKeYmLp8soVMEmmnWz6y1a4bCKwo6hA8oSoq5ydIeWy/jI9v7DF0S/qZTz3c2Q1 +a3aniuug2FRAxuU/8fSlMrE4672d3505SbHUblhy9XzQ4+sWjkDtYnY89kyY4BTu +W5n6JlVoewZGOjlJ9/6mV6BVLQ74IeiytWtdH5uOQ1wroi5Kq+EroGgFmPSQCIvN +XNDvBCNFN/O1+/eVZd6JNx6NMO7DrWql3GFMtJE8u/SWXA9vit3pfmeDrosZ4SDg +ZPt7+JfzITUh2UMaPsQsPF8G8/tYmxDbELE5LjFcQ24ps362rG+q2rfA +-----END CERTIFICATE----- +-----BEGIN X509 CRL----- +MIIDbTCCAVUCAQEwDQYJKoZIhvcNAQENBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD +VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTI0MDMxMTE1NTIzOFoYDzIzODgx +MjIzMTU1MjM4WjA8MBMCAgG8Fw0yNDAzMjAxNTUyMzhaMCUCFF72el1l160wBZ+u +getcWTcmtomDFw0yNDAzMjAxNTUyMzhaoCMwITAfBgNVHSMEGDAWgBT5/uHw7a20 +Ee3Ajg5mekP3BTT0ZjANBgkqhkiG9w0BAQ0FAAOCAgEAbGRxfJj3wsZ9iUsYTO3W +7+hNbZ+nRaokZT1UgprzDTMmQKWp5HRyvAsTtzxeJZ4NDEqP2mg8imvmSUSnLSmR +pdq7vUdk7lKvdV++fZo4XIRF/pqv7+8Nz8iZvxINGhFaJDUUPPQSFcLm00JIUMzn +9nh5JkCkKFuk34DgHDR3Zn+nM6R+gAuaDsBgv3xnU6PKVW796JPbz3yrN9fma9Pw +P27ICXVyOH2oH7p/E7oNB/J0YxKcD5bjaFkzVHsMExCzeyGTA56qtdN2O1Oxiw2z +L1Yitj1c+2/P29vhCw0IuxKjduL15Qu5Px5BT+B6V3cVUPbn9fYlDjSFAHxyrGno +X3QnVzCChVoHuS+Og/QwEx6AcTSEbl4E47XQK0gr1cG7ayOZoDO3rqGQ+eO6kREM +LpX2lHPofzMBk9lGPfAZX41pXUlshT0irrwFbIt3OTGfvU5x2wAjCap1InzvFS9J +4vEFHcLeHAi5ztlnYNIkB9/kja3ogpSCbcO6WoveJeHCTsXk5K4qIOSvoLYEdRE1 +Pn2EJStyULZW9Sv1JH2puyZ2d2Y7cl6DqCZ5D4tFsyFFsMUNlBJQSxKoPDYnGsi8 +DOTxrwhdxG/mSwn/NoYjZdC0Y+NJyBs1RvLvBZLdgzWS8I+uvyuwTfn27tP7GT6Z +8hmLPBMvUOyczXdMD6b1mfQ= +-----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.crl 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.crl 2024-05-28 08:26:36.000000000 +0200 @@ -1,20 +1,21 @@ -----BEGIN X509 CRL----- -MIIDSDCCATACAQEwDQYJKoZIhvcNAQELBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD +MIIDbTCCAVUCAQEwDQYJKoZIhvcNAQENBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD -VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTIzMDMxOTExMDQ0N1oYDzIzODcx -MjMxMTEwNDQ3WjA8MBMCAgG8Fw0yMzAzMjgxMTA0NDdaMCUCFEz4gViZ/JN+6J2f -2mIC8bnWw95IFw0yMzAzMjgxMTA0NDdaMA0GCSqGSIb3DQEBCwUAA4ICAQA0FKRq -yESt1SYxVW+BlFjeDWCf1GL471q+603JiRek5iEVt+bZXrII9y9lXsYhZ1d7BxCt -Wyo/497tPMwRKSiPJwrXPODQn3DTl6EM6VB+w9Kipmm3Fq97TSRBuiDkYaS/nUHh -nDfj40qb1tc18SgBXVLSSiu97U0JMAq8AHIfMzlnhIe4fJ7TJU2TFSrkFAOUVqZs -p0/J3aDccYJBnUnEeGD44i80wd3xmuOoBDqRgKcasYsv8QmSFhbD4BTvicxKueDD -kiWTFbgNTDQU9Prp8gYmuSOaQoK6S+8DlO80IRTDwpDq1nQaf5MvwfOqfwQVgAjt -RgrC9BI1RvQu0OyihvcqOh9EEj5O9D/nrgTdsWYJGF/otb8lL6JdXDOAjqWSkZVA -gKDq4NPUskgKzoccD6HY5wgIvSZTV8bXjz2ST2oddfg0/7akNBEmq4TQV9NHb/G0 -AihNJgd3HtESn5Fhm51aJZPyuwqzmkmNHTuHZ5qeDB1dN/UVSqfnLXKeKirOtJCq -VdWGZTFEKJSDPgmLOMy0GhrOeM/y5N5MJZBrwBxPJ3No3TOGr13Ir8E1cxCnchZX -PgpyXNU183gMX4k5NVEWYpCzoTxzY7PNvaMft61IkC9DIdnRxRbEqfdLiVy/k4jP -MUjX4ThGwtUVzqVOiH5uRRE1J7Msk/W3EYzIWg== +VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTI0MDMxMTE1NTIzOFoYDzIzODgx +MjIzMTU1MjM4WjA8MBMCAgG8Fw0yNDAzMjAxNTUyMzhaMCUCFF72el1l160wBZ+u +getcWTcmtomDFw0yNDAzMjAxNTUyMzhaoCMwITAfBgNVHSMEGDAWgBT5/uHw7a20 +Ee3Ajg5mekP3BTT0ZjANBgkqhkiG9w0BAQ0FAAOCAgEAbGRxfJj3wsZ9iUsYTO3W +7+hNbZ+nRaokZT1UgprzDTMmQKWp5HRyvAsTtzxeJZ4NDEqP2mg8imvmSUSnLSmR +pdq7vUdk7lKvdV++fZo4XIRF/pqv7+8Nz8iZvxINGhFaJDUUPPQSFcLm00JIUMzn +9nh5JkCkKFuk34DgHDR3Zn+nM6R+gAuaDsBgv3xnU6PKVW796JPbz3yrN9fma9Pw +P27ICXVyOH2oH7p/E7oNB/J0YxKcD5bjaFkzVHsMExCzeyGTA56qtdN2O1Oxiw2z +L1Yitj1c+2/P29vhCw0IuxKjduL15Qu5Px5BT+B6V3cVUPbn9fYlDjSFAHxyrGno +X3QnVzCChVoHuS+Og/QwEx6AcTSEbl4E47XQK0gr1cG7ayOZoDO3rqGQ+eO6kREM +LpX2lHPofzMBk9lGPfAZX41pXUlshT0irrwFbIt3OTGfvU5x2wAjCap1InzvFS9J +4vEFHcLeHAi5ztlnYNIkB9/kja3ogpSCbcO6WoveJeHCTsXk5K4qIOSvoLYEdRE1 +Pn2EJStyULZW9Sv1JH2puyZ2d2Y7cl6DqCZ5D4tFsyFFsMUNlBJQSxKoPDYnGsi8 +DOTxrwhdxG/mSwn/NoYjZdC0Y+NJyBs1RvLvBZLdgzWS8I+uvyuwTfn27tP7GT6Z +8hmLPBMvUOyczXdMD6b1mfQ= -----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.crt 2024-05-28 08:26:36.000000000 +0200 @@ -1,38 +1,37 @@ -----BEGIN CERTIFICATE----- -MIIGoTCCBImgAwIBAgIUXCG9Tf1Ea3mKUicsQMd1lldTtIgwDQYJKoZIhvcNAQEL +MIIGhjCCBG6gAwIBAgIUOkSdvHg4/HXtknNMhIFkuPv/ghMwDQYJKoZIhvcNAQEL BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz -MDMyOTA5MDQ0NloYDzIzODcxMjMxMDkwNDQ2WjCBvTELMAkGA1UEBhMCVVMxNDAy +azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 +MDMyMTE0NTIzOFoYDzIzODgxMjIzMTQ1MjM4WjCBvTELMAkGA1UEBhMCVVMxNDAy BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxHjAc BgNVBAsMFUlCTSBaIEludGVybWVkaWF0ZSBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD -ggIPADCCAgoCggIBAJYQSR4xUs7bFbMY4r4v8l/9cuRxOxnZ8/cFpIDKzCA4c7/U -omjJCaPOFJrldYthRnyjN6MdqKyIED0XjW/MIlbqjMJ4E4AvWKZC1+r3+YoFCYmg -pLq7kfBkw8Kd8BFhfRsGg60VeAzZ79Y6BvJZSyXXQeH1RYiH/bW4o6PdFaO/tchx -KFlYgOW5MoGTemx+muifZ9iKav4/feFZsh73+OFf+KyruSkGnM18YNqnoMiNL3M9 -H5T86OBjcPYHhhwCp/v5cjfD4Yaa1WAM0Bsy+o6b/VwSNhrk8U8JF2rjuK3wZm3L -hyMa3QOn/kgoonl7sVCKes6GpwOmiS/+qKf14JBK+bjTpDk/CsRYpnonBJKNGOzy -tr6CTqLWcJtoUz3kr1ZZnGXUmBjqYc9vYI2EnlzHnBAf+gplJVqtbEZckLx7rKBD -QXyXp5pqDZmnnxQ9qlk1ZMeqw/mjLackdi1CRg8SfA0GlRcQYmcWxc2U5iCcs+ym -q+V0ciK4YFg/z2wEMFEsarGclW8YrZ1RtY+IcmtCXf3rRa0CEbHCiclHoVtuX20c -LZuYsQ5y6TdeWkDTcAwm3ZCYa54LeySuYny8F8A7by42KRg+Z/JjaOlA6hBPGwvC -p1frJqQod5uGd0Zg0DrNKjjWIVc4Z38dRy48b1Ija081WBtKwyJiX8jto6yfAgMB -AAGjgZwwgZkwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovLzEyNy4wLjAuMToxMjM0 -L2NybC9yb290X2NhLmNybDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB -BjAfBgNVHSMEGDAWgBRJTc+PTs7kYFuicyYmOttP7ItAazAdBgNVHQ4EFgQUEamH -VrLZ+p7brEWfC48jxRInC+UwDQYJKoZIhvcNAQELBQADggIBAD3ZOiRT6ESzxFIK -76FY7qNM1PWcNrgTmMDy8iHfBWEBmkAUQbHvY/U5fPnPj3vPLiHXkLDbUliXyEnL -4myo966j8dettrvB7pxibCy7J2FxwoKwMUOY+4IgGBuxVVoWVBwzu5me35RnDZ6i -9+dRJXiZnO2cEVvfFmEfq0w5SQLsqmR6EeIhoUOepmEJDpjE3cSz6QnQ6KWdw2wf -e+dviPlDwS0sNg006lqSy3rVzsnlqLAsoDkeOEZyZmPbc6sAx4RJS9nBH4WERWb5 -XxVOYIn0QmlJKwlULB3x8dhxUv+a7alBjDt6v2MW1zXH8v1ZcMcJnFy8i6m0VzkV -edrO/ONmqfi/EUr/FothDLQnCoykWjcfL1JGLADjzyRE86Wg4L/DRil8k5wH8Fir -ZZE/kLeOkQN5FhvQK+m3YzGtxkehO7Io3YWmzbv05ZI2d6zroyP6DXS/zJY9wuNd -I/6zp6eUYb/mtT3NF3h1C3SjQpELT2IDoXXYQsbvcVk7pgMB2mP9sYnoDlQsXZvC -oEzD/ollmkHsgD3Zr3p6ANSiNpW6iRYBiWsRoXmVJw+nTSYvWLiMI/vuABXYPPn1 -Tc6yypXgtezMNtUI4fxJ5pU5aHMKL4+XGtCcACyazoVZaUam1DulWUcDhuH/4Om9 -FhcPOT6WqhWjn/zZLW6Rr5OvXwoF +ggIPADCCAgoCggIBAMheyYWl/STLJ0iwlrqNRyURatdeC8oDpKFdpglYHAs/jo3s +fWNySCnaw6NCe0vxFLpqcK8VMNFRGu/XhR/kZ1YR3V4mLwF1Wa5v7a7J9swq50Fk +CsLtaU5vq/h6rIpy0NLnmN5KgqChrMh9IwZ+Mc8sqc/0BFFJsuCCGu0TNlGVOhmN +AbdS3s7wEUwT023CKn47G3pVqeaErEB9honz1I71g5/jNKGe5CLCV35ExzsrzU43 +atyJ0jgh15PYCXDTdsRccSmEs2S6Xh2o4ZhlqioWB+tKxGsdxq8Ri4soy6yyooOz +T/3X5CHpKxiI2P9z38Pr9egPcNPPVMGDhzwHz7p3iBPg0RcWd5VP2nimLJsdGWK0 +bkU7zlQ3R2NelSIW9Hr8MVASihmELvX+AcC6KhTpHHhf3CTPgcAfV2fE9U84Xl2i +shmoEsUQTUx97qKUOKRfY6o+WMBnVkzlqWj+s52ndiT+0KNLTDtvlejEFf1VSF43 +IkS9UJK+XxxEnvIBzNKI5EbWlG5Z38/nKv6pjTXFi3aZR4cdmI/0XfAjLTkrTBaW +lkguEjt+/cxPYJOqt50ldI9kle8XTu/HibmcbU2wYIF21CBjE/hLk/KY3FQlnTFL +y7x3bM0CuTfJ8Noy59f3l56fwPOpaQWqhqO+UoFkbFlbROAiVxfb8KlyZPdNAgMB +AAGjgYEwfzAcBgNVHR8EFTATMBGgD6ANhgtyb290X2NhLmNybDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBTWntryJpCwZqZJ1H3n +znJYhbfoKjAdBgNVHQ4EFgQU+f7h8O2ttBHtwI4OZnpD9wU09GYwDQYJKoZIhvcN +AQELBQADggIBAE8gulfly5+EC8DX3K02qEYPoQwVbVhD0wGrlAhgJiakDvPlX6/K +vSe/1nNRG87jXvXdDiuJ6F4iKZpeJndzvx/8ZEmllyyxDwb3UOmylwW/o3/Uh6fY +kiVBfW6uNNB0BfDKcXDDZgKjTg3kLT5z8m4u8rPoIPFkLFl9AuAq82Ll6NQ+xZFP +lZ3K6HN+ntVnIGP4XkOgEYPxjJO3yTGle8VBqLfo/JKwbZtKfNXxSAMRXiP02SQg +D9yshxkonQYWog2hHz8oDuQQNbzaAFlxnY914av/XwxP8TwEfGNchNtAtrlGRlx8 +PjMfp3Mnbz71yp+L2We2/A7njIPbEcn0FIBedpNyyBON5Cd6Xqx18otmMTtUILZ3 +SUKeYmLp8soVMEmmnWz6y1a4bCKwo6hA8oSoq5ydIeWy/jI9v7DF0S/qZTz3c2Q1 +a3aniuug2FRAxuU/8fSlMrE4672d3505SbHUblhy9XzQ4+sWjkDtYnY89kyY4BTu +W5n6JlVoewZGOjlJ9/6mV6BVLQ74IeiytWtdH5uOQ1wroi5Kq+EroGgFmPSQCIvN +XNDvBCNFN/O1+/eVZd6JNx6NMO7DrWql3GFMtJE8u/SWXA9vit3pfmeDrosZ4SDg +ZPt7+JfzITUh2UMaPsQsPF8G8/tYmxDbELE5LjFcQ24ps362rG+q2rfA -----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.invalid_date.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.invalid_date.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.invalid_date.crl 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.invalid_date.crl 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,20 @@ +-----BEGIN X509 CRL----- +MIIDRDCCASwCAQEwDQYJKoZIhvcNAQENBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD +VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTI0MDMxOTE1NTIzOFoXDTI0MDMy +MDE1NTIzOFowFTATAgIBvBcNMjQwMzIwMTU1MjM4WqAjMCEwHwYDVR0jBBgwFoAU ++f7h8O2ttBHtwI4OZnpD9wU09GYwDQYJKoZIhvcNAQENBQADggIBACgakUDSN7qq +ATkeMC0PpwymGBu6i3Jd641BKDhwe0yG9rDa1ppm1W9z/z64LrzKSSeXJ8Hc07wP +E94ODXqouo0WlGK4HnxGJ2QwHPJG69bYfcJREQw0Df/dTjlatyIc95QSsmChKixu +3goFe1laa421bUDIFr111Zv+4DV/LDoPZSQvRe0xHvouZNAQ0rQ8T4gVJkU6w/dW +636DvyXpW6/LyBTH4Nf4PHWjP6PTgzOW8+yX0RwCpHegwAA0ehEJZ8HAvW7VQWA3 +uDdfB8j9uxyKQeeIkYLkK/Ds5P0nN8Kd/6OKGlF+9mRCpEQ0n3tgQa1q7tn9blVL +9yu3mmTbRB3YTRUCy/B7Wc8AwYE+k56wZoYpa7GI7mRYe7Vwhs6PeFmJLcZ3ByOl +1eeR6yIRzSbS50rVNOeaez4o0hjGEj82OJpK1THtB8pnCCIE5eZ8NI37WZGcPvMU +KbNC+JjX+rlcnK7NTdS7eQZjcKOYVuDZ0lW5n+TaXKTLL24tuA+oV1SbRZOZN7QE +5/31A/1dCg2NDLKLNsSlq3A2DAJE5SOMSNCq4V+WGp36LAlKlNvYfHrcRxBEM6bp +Qa4xCrcUINJ9x51wd0h5+5DilGdpoukSNwnSbMTBG7degdh2DGT4MqJ2Y1+puL9n +7EmMLPMyTYK/tB4ZsbgV/z05Dyngi3Ut +-----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.invalid_signer.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.invalid_signer.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.invalid_signer.crl 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.invalid_signer.crl 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,20 @@ +-----BEGIN X509 CRL----- +MIIDRjCCAS4CAQEwDQYJKoZIhvcNAQENBQAwgb0xCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMR4wHAYD +VQQLDBVJQk0gWiBJbnRlcm1lZGlhdGUgQ0EXDTI0MDMxMTE1NTIzOFoYDzIzODgx +MjIzMTU1MjM4WjAVMBMCAgG8Fw0yNDAzMjAxNTUyMzhaoCMwITAfBgNVHSMEGDAW +gBTWntryJpCwZqZJ1H3nznJYhbfoKjANBgkqhkiG9w0BAQ0FAAOCAgEAFSRriQnY +3LIc9AHCGYZM+BFfaDnQCiwUx2PuYownMGOyDOu8xjBBIgNgLsNsqVLBQqP/tOdz +9WpIQd2VXbj8o2UaHZullRbJNJoNHzuC0KsdYuvZVooJ46nJONCo6RwsapiLlXnf +Y/c//Ynqmk8xBZZpB4pAZKKq0D3eoeNFPJGWSwRa5AfQx5qpbikntK6khIBb8c+4 +NfK6VMpEsHZQUG25fdLIpDOV2nnhRd2Xbcu/THFeCV2VnI8yoormOeiJznRJq1bv +C4+jmW4NUAvFUFbcBRDpuq8LTIQUYuQYs018aCOqTTpMTTV5Q1iWuTZtAbO91UJp +APEtEJH+/Wd/xB5ABk19bMiyxbCQeoLci7US0YEzASeGLH3Z59swwMvkGUhc2UvE +Jtpx0unupgdA3lCpa/nDYwuFYCqmZ1kKHYtELC7WUldzd8K/J8XkNF4UIreTTD/0 +kLC3UZEKuoIj3Lv4GrnWKcIdQ51jUMdbTUeIdWFzjqdxFq4i6oqgWA5Hjcr3yDcM +ATjxVDLxeLC+fJf6/SQu+7dOWKPKzIS8BcufmZpkRoxrt5qmgoJHuceps5pdfa0B +dy8xEb9FnGNSuLec1rcUPPFgCioSgDFoQiU0aoToW/kDVTA6/PHFh6TD6Qynmc4K +PtMu/kCMWULgMi68svcnA7NxpoKAsCnQH6c= +-----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.key s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/inter_ca.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/inter_ca.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDIXsmFpf0kyydI +sJa6jUclEWrXXgvKA6ShXaYJWBwLP46N7H1jckgp2sOjQntL8RS6anCvFTDRURrv +14Uf5GdWEd1eJi8BdVmub+2uyfbMKudBZArC7WlOb6v4eqyKctDS55jeSoKgoazI +fSMGfjHPLKnP9ARRSbLgghrtEzZRlToZjQG3Ut7O8BFME9Ntwip+Oxt6VanmhKxA +fYaJ89SO9YOf4zShnuQiwld+RMc7K81ON2rcidI4IdeT2Alw03bEXHEphLNkul4d +qOGYZaoqFgfrSsRrHcavEYuLKMussqKDs0/91+Qh6SsYiNj/c9/D6/XoD3DTz1TB +g4c8B8+6d4gT4NEXFneVT9p4piybHRlitG5FO85UN0djXpUiFvR6/DFQEooZhC71 +/gHAuioU6Rx4X9wkz4HAH1dnxPVPOF5dorIZqBLFEE1Mfe6ilDikX2OqPljAZ1ZM +5alo/rOdp3Yk/tCjS0w7b5XoxBX9VUheNyJEvVCSvl8cRJ7yAczSiORG1pRuWd/P +5yr+qY01xYt2mUeHHZiP9F3wIy05K0wWlpZILhI7fv3MT2CTqredJXSPZJXvF07v +x4m5nG1NsGCBdtQgYxP4S5PymNxUJZ0xS8u8d2zNArk3yfDaMufX95een8DzqWkF +qoajvlKBZGxZW0TgIlcX2/CpcmT3TQIDAQABAoICAB9LwM2tbLHdwkvF+zD1ppZZ +V1b+szgxI/ppSmj8uFqgaXds5/PLso5JA1QhaukkZVCtld6e4HJdKOgrwTkHP4Wv +wiP1slNXvTNz+4uYs4HVtKufwNeL5e4Qnqtvm7n/L3M1pciYmjkVL4vcEceul8CM +cRSQQEljCburzqFXZh1NgdbiUZGM8cygLg90LEqhMGppeIP908zzzYTAJm6vJTTU +D6Q+RGb3DpqIQMqx2u14zLcqDDiYlTtbu9R1gpn7CXqnlqw9tBhoTJF04pGfVXn/ +2WpMkgvKCZGoG6PXciKN/zizXevf19vdE7RgEYaq1lk/ZngBy2Vl5Y7ZKJr9fFoU +hdZq+w8UroAAzRIiLrbnGCt0+tK3rBC/DvtffNjvwjvphGaxKiz1pZdegZ4TsO+y +zd+Aa6Gb/XCf5TIakFaYXPTpKj0UMi/TVKshblC9JWr3cuL3EoGJe4XtqxS0cGiT +llLDTbiS4eSAM6S6Mek55jsD8KCGBrROp+A0Wc7nUinWfKXThJYvzOm4E0gSlvbN +Fg9LqlbK47vbtSyAmfNcdl+C5oHVwE5p5AkZV8hNQOVusfs0aLufvWC+NCGLyJ/d +wuMmFmx2XkrkhD+BsuhP1grkSjJLpKfufCzP9NBKXRzDMReiOI40AjL5Rm9ZVOc4 +99B1CuyCiWG4BPUVlj3ZAoIBAQDs0nMc6lOY2VVkAYe6B5SmF94nMUzmegcIC4Hi +oP0+4X8M2GfAiOKAb9QaMgrNnjOOeomYMUtfUhueRkXhvt5ssmalYCTLAuskApJ0 +UM34UfpY3P3V3i69hOSdpPje6bsorTMOWheNiMz5XKTWaVnk27Ou2fAuChHuLbmC +ApeMEYd/VgbwgdtOpLaJGPt0cQ1FhbSsoac4tHMLwN9GpVegts/WZZb0bzjXDLEY +RcfxcYCO0FJvfaIyfr/JEnw0CsLrkDdIVcsG+QiebygxV0CiDzCeHUgd0uLNq+X6 +fXqqQh8KUMHVHuFwGpEljpxvHqXQFq5SaEYqq7m9tGTcrHS5AoIBAQDYmKgHWG9a +lz+usrwoxuHkzPJ+lpqslEVNs60nPf3aI6+bYjsOI7MNT7QkGQXdQ/aErWqzAali +566f3sUsDz2z/jaMGZYElqgF5sQdIAFTihPS5fkX7mcF5Nqst2tQz1rogEwWgJ14 +XSQFCogASkxTEjD+/1kmuaSsc0NuOSkh4Wcxohla/7GQaiWnTBLnZ9axobLmmT76 +vf4mGqAWmNNenHRkUY+ZoYF1xLpR/60OKzQTdUrL5NZB3bL4n3+ViOI0JE3VZYRQ +PP3/tTr/NwwhTbkxpw/pNhbFIItPM1fzmfieGfgZlqmj5EXgwtGG2qlFPgR+9tWw +uJwf4bHUerU1AoIBAQDDXRj5UZWFEkUPKvP80yQdoLLzQgheWWxThqqven9v0DYv +MpbkjgfMTHR856th1JBTiWLY2lGDYQjHmNEtWXfD06g55QZ1MwMnwbdvnPlNUNNl +W2lLdO66CVdPdTiZK9fpxnfH7ype5+uwCm0xM9ekpFmmdMNeN5BzG+VdCyZZtlxA +/4baAUYZqmeq0aEefeSk5ZgWkYSRkssVdxa6AMw5GJZ8F3JgUyBgx2eQzoAS/b9A +ETrwHoQfg9BS35z2kaobCe2RDrVeGzKxAKH3kjMPfdhtl1pWwBG5+YTPD5SRv9o7 +eENuvPrcsA3tHaiPQoknEI7eDIdVzDR9+sL6CIqBAoIBAQCcY90vMJN2fa7lnPhv +GOsSIUkWTffwlD5WFF3577DTSOEK+KpbUzt9aQdQ6SBq2x+sPOrFxXVgjJhxppAH +eBm9VNhd4DuJpJ49ZJpFzU4n25LkvFhXBzQr9UpPW0CJYK7rIXfO557LwbS4TxpT +21GwyXnHJOhiJbjZK4CMnYkthWrVU42rPuQeugXl/e/IVhmWuIJMLmpi8bwIF0Fw +D3jO33jK1nkxHIQ1XI6LlmiFynwXcKFJBzoM5N7M1Z1xgxyROYVZh6s5pFBhyaGH +lV+UzGHjkBTU9tEKFK2Byji/E3WH8ohJMZfbVn9+Aiz3ifqenGDyq5cvupxACN41 +5UPhAoIBAC51vwgPKuFNXU/pipkRMr7S+Ew7N5rVij3dJhcaq0RMNm24zneKZoC2 +7b7zKd92zYV4RgviUZEiSmiGXbEvVkNrSZ0BESRt8C7dfqDZx8E3+LMH2Fz2Q3CJ +EJjEGiQuDQuyx2ICs5ETvIFzxy0A+uRbsrjJHbikizvPbX6qY1F9q4m3GAxU1mlv +Nj548F/eL13gCUVPfmcbkDXKXDjeMydogPqcH20/d+88Cl5puFV1KwBptmJ/2WVP +fcVKpeLL1DvWsd9UikzYuLXP3tGs47aPPKqX20lQoZWW+su6uWXi62WuOSPckHF0 +/y+kPmf1RQh8JbiNv7ddFdj4eUReLOo= +-----END PRIVATE KEY----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/root_ca.chained.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/root_ca.chained.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/root_ca.chained.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/root_ca.chained.crt 2024-05-28 08:26:36.000000000 +0200 @@ -1,56 +1,57 @@ -----BEGIN CERTIFICATE----- -MIIGXzCCBEegAwIBAgIUWAen+5+bRZaL8kVlq51CXPQUdZUwDQYJKoZIhvcNAQEL +MIIGXzCCBEegAwIBAgIUd6BiIEGe+cX502NGGPVuMHrV0ucwDQYJKoZIhvcNAQEL BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz -MDMyOTA5MDQ0NFoYDzIzODcxMjMxMDkwNDQ0WjCBtTELMAkGA1UEBhMCVVMxNDAy +azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 +MDMyMTE0NTIzN1oYDzIzODgxMjIzMTQ1MjM3WjCBtTELMAkGA1UEBhMCVVMxNDAy BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxFjAU BgNVBAsMDUlCTSBaIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -AoICAQD0gnTWPpI/FahyE+PizunOHzNBi88KObopjL6+6p9J10RB82pDxaLoMxPo -6xkm0ZKiFea6ll6sAfcKPzCei+3691sCOiGN/JYdSZR+SynRGeD1o8KJWAwfmZ4w -DNHWHRqUE6u3oqAPDQUqaOnzyGdh2vGLP3HU5F5rIDhYzilm7WuXykYdiF/P6Rjs -xzzQZOxHTnvV8byEDN0Sw0ONsQSFee0uM22e20Kme34A4p+p7QBiRFSJAQyLtp/o -YBY8zeIgekr+BppRbCItEFAgHBS8DLk04jhNfGlU5RteaOJe7D6tg7z9CC8X0dAI -/p1vJkz5zYMPGuoKKaowDAJ+ncwN+UDYhbd4g0ATKC7M5DFmn4Wy60G+3op2BNp4 -PadcsOcXwzKsOSQix0+n4Tfw1QQ07+ilBHBe8fCRpk2Nf/byYuEaBjPBTXUcpTsp -Ydv0s5HjSyRfhEWKMQBOIn6mSORayTJ1/xQECrOfuIopLiZyMCjhixT4PImiGucM -ruCmsHZvfA/8VQxyEcPK5K+XqFaSr5r423C4CzcafHhvUFOYj2WmJyZzWc2X2fjo -l+ZspNpQTlvYgVi2cTeBnbNqCNMcTiGoyie5roHGa4CnigHuBIVSuUVGq/rqFKnT -KgKboI8C3XpQID6rFcYY444wOpfea8LodHTXFF6o8A5m+6QlGwIDAQABo2MwYTAP -BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBRJTc+P -Ts7kYFuicyYmOttP7ItAazAdBgNVHQ4EFgQUSU3Pj07O5GBbonMmJjrbT+yLQGsw -DQYJKoZIhvcNAQELBQADggIBAFCGlLEdAsicVPPuekhdfOyoK/P6r5JHnPIpC894 -9TVUp/7LBvDOwcNAvs+34dsbfRtMmwXUCGofrS4S0+zugnpgx6D1ScWC+FNdBJf9 -C6XKBiU4Mxn/mKWTDUMBCxI+RRbpOvOox5h6pQcfo0IYz5okazol1nT40IdvNkZ7 -WDAz6Xqmw8k/+8Y+l1fDSfUqcgPnTBfgX3kkna8VHBx74nSqeFgOVvtd8BJq/xIG -6rAkGsv/PzmQqKpyoD70N/nGALomLlVScX5qIbNrXWW0DWxY+kDwhRlskPdYS7RJ -Qol3NX0t7GZ+l+0W2Kpp7k04n5xrJ7pzTDyltJJEeaLfqrMlF5MCK/UyJqxa3LQr -IkP2wTxM4HR1gsIvr8ei3QFSBtxEGiQl2VWfBs8bwy+KkL1ao1UIcB+1dj4SLy8x -RqNFNfogkyLL9cELiKxViRQgNYowv3OsSw30i4eSHMor8m0qfVf38t7gi+m0Ba+T -+RIV5MlZcjG6C3jS8AMpUwXO4hOxGEueuXmZuCDmyypqRpMJc0hu2JA1Q9fztvfn -hnOAv6K++5hB4mIyqUp8/QJjO9X2kKa4+MmbuNTnRuC13s2f7pzHjZR3cFf6QN5b -duxTQZ18xvqXIR6EV4PVwShjCMS6NHmkmumIxMzaDK1mGwL7l7B0ZS+r3i7qwGPw -2iYv +AoICAQDMw/UE5XNiZpjuCRtyq4A9PDBuJJgyUl14xvN8lcsLHGK9b/WkQdDxUXsN +QbvS2HVkg2X6AGHwixk/3U0mzgzImZhmh3i0IbFn7L2eqpuvgP82BJpveq58Sjsk +OHYuG2JqSv63reKtK8pLXtKZnSV9vFUAwxWSuqA68FLwDgjGMK/Iy3Hh+cwnpffn +IMqJ70Nlxc7z4KKO4nrO/+rIQSqe0UpjEEaK4Eb3YXWWontaah3/sw+uAbVncJMV ++AacHQzi130sJJeLe4p+8DD9EYX112DEj6WFrHpyEqhsGXvLO7uJRzMqJ1WVz47x +j8wK+GPm66KCeOM+wm65u49vNHDotlQzoclIzLDiegQo/3ob5YRTFNDJzbNN6AAE +zF8K7f8t2TNYeWgPOmXLEVmFalOSNTZ5sLrJipsYs7uT7CT1orW3PvM16v+knigJ +NuryIdr/+hFgRh7Sr4U/6H6fDuZGJXgZeUSNM7S1UI0iFTZyKAP/3EQrhRWlIROs +ql9rFnH0LWaPcmk66Yyhq6Va0dmLBSjGPWAHLguKyHkMvhJfMd/1miGfz9Rz9LbE +6CqOK8sf6EQNWTgmpTdXgjmR9dvMk3ki2aar0u+IamzfIx3y0OPSlpaO8mFMucZe +3iaeG/l84sUR93NgVI2dCdtEinCQviudV+QwRdLzZQQNM5zLzwIDAQABo2MwYTAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBTWntry +JpCwZqZJ1H3nznJYhbfoKjAdBgNVHQ4EFgQU1p7a8iaQsGamSdR9585yWIW36Cow +DQYJKoZIhvcNAQELBQADggIBAHw/+RKyDRBsvWksX4ji95W+4uo292psPzpeusjw +Ztl25D4jssgjNbEiNwyYgV9e9BCse9hOkFwAE6ogwBAel7POX8XjizaJcwSs/GaC +2ORQ/KYRpMsypENZ4HZQbc8j9ROqTDD45B+9/5nAA6le/7wd0yS8hbum/b78vAN1 +98Ja8wfQIq1PE0heFELSRR2hAmcGkxIo1tBqP/CnAzOpBZ2ovPJ75oeGlnpk/ATY +rUJPTY0UPGp9ZSq/l+t88onDewjCblMMMBeJSEklP2CFlE1jGx3QJtnThiJ9WQKG +TMakgaAERrcvXs6Lx2GFuk7kCOJsWJtvT6CJVCUtEBHNGY3GqyQwNrE3OEVQ6CFb +CNNarrRKlArBISExLY3xn0CcuWr8GewsIvwzmJBX+u2Xs0s7RYFc4S4s/SbRLqi4 +Gn77E7dG711nKHXwDxYOuDAL0MNBT4aDephQK+rF7GlHMLTUPWKYCwmjh2vzmRWJ +fZkojybvX6g8+7fKmwTUn5LXlcnmjv9KYN8LgfHSQhvRaGtGph0X8mVH3eCs4MGG +OUU6p9wdPS3MDBHRzn0MXEhq25r+xT829lcHdoxkMVorVDTDD7hOm3HBsF/LOh6c +/sA1LZnnfy+YZt+Gn5cE5CSK3ddmzRbrB+ZHWbJahegJktWq2rrnw9ipfRln7DJF +fsHf -----END CERTIFICATE----- -----BEGIN X509 CRL----- -MIIDGTCCAQECAQEwDQYJKoZIhvcNAQELBQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYD +MIIDPjCCASYCAQEwDQYJKoZIhvcNAQENBQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYD VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMRYwFAYD -VQQLDA1JQk0gWiBSb290IENBFw0yMzAzMTkxMTA0NDRaGA8yMzg3MTIzMTExMDQ0 -NFowFTATAgIBTRcNMjMwMzI4MTEwNDQ0WjANBgkqhkiG9w0BAQsFAAOCAgEAzXCT -KYc+J3XC49cCzdJBRzXdVk8AqylNFCBC/Z4a9AaPnFgUHIZqLcucvVTBMlYkzQST -7lu47hksCrlePGeY58goa8rOUuTjH0Gk/809oMNMxyJ3UjuEq/Q45gDeKys8UZqu -qrgHZ1dnBB5ARdPEhkMLzBgizrknhPXcAyg0f4dy8wFPCNJ0T+DiNdqKoQCZNsxD -3p1N3vTMIO20oWbX9cDZoY2Xb0rT9Cbt7ES3JY1DB4Z92zPB5ZxFuCIsxT3Jtszd -fM6YktxJbUvds/mqwmYCbQNZ4veS5YcrFPVVSADjnwP88GMbIQddAvXLOhjrUj4B -QEMndtREs0MvkDZdc/YkTEI/c1QF1xNT+UrMOxC0sEHSvcOQXNtw579QJ0gucA2J -HWWr6p2wTDrIefhppBQS0GSY2n7L1loKAZZNWt56TQoXRFWI4CtLJJTsPKIHWTQz -KLBgv5UlOdbcuh1+foY/XS8prlZvMS22oiDMLIknBR7ywYuYEq9YPKzDXWAL4mHk -DInbBQEYC2ar7wiLLBOM/2c2BmkzDdygChj7/1xvNYMGXEnak0Y/V75uAuWQ1h0T -3e52xW2RzwjYsoM04WBsSJFNd7VYSuSX7SneJywrSBneB+XvB7tcVaycsA1f3BMT -ptsIMqT9/N3++8MGCb/SRWoWFlLjITR9l5y3BUU= +VQQLDA1JQk0gWiBSb290IENBFw0yNDAzMTExNTUyMzdaGA8yMzg4MTIyMzE1NTIz +N1owFTATAgIBTRcNMjQwMzIwMTU1MjM3WqAjMCEwHwYDVR0jBBgwFoAU1p7a8iaQ +sGamSdR9585yWIW36CowDQYJKoZIhvcNAQENBQADggIBAIs4Y7Qsk2TAJQqdNQeM +3Gjx2fYFHbliDRVQPU5OP19ppeUzFGRL/SRplCH4rvEj4bBwQZcsNdmzxYyFbWcz +lU38l5/mgkLkPdHzRnbPmVraWqKzuvNrTQ3Lj5VcgOJ95UAOwOIg2jWZlkIAUgO/ +cayWJK7l0gOIDCKSyuI6haM5D6wNAwfyKZ4WkBgYulCeqRmsMePvsj/WYL1tcR6Y +1WSN6FtvLn9u2vqrJ879BEH5RDQSkCXsxBqKFnOpLf/gpRYhRp+3JpSACXOzy1wr +8KxlvZ/14UOhz4x8VZ9GJTzLydzLPIhNTwcl1PJyD3pOhASxfdSXuR8f3jJvAGv5 +kaj9uO23sKU4PzLfr/DPFkhKsp+vShoSGtnZjj7ewYW9Y/PY0D9TX8CWIcwKuJ8U +XTWRj9s/FLTlqfrbJXvSaRd0rihGQI+mSwar5cnBuxULgw4V35YmTiY8wCf3+k9c +tGRRUj5A2fVs9NWWTReBatoou8Rnuraz6ctqwFsxJQSGS1JpfECznGq2wKqLstKn +WCMEAKehcGqLlSco7Hg2qRNgDw5AAC058drRf9L70geV4Tf00iXjK19aHSJ1wH+p +7hUcTCiQ7413ztC7HawqsRW3Yzszucj+6vqF8ZYMh+9oEzzWJEt/piKakIwbxwKk +cf5JE3KE7X+LQS3p4+sw/wP4 -----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/root_ca.crl s390-tools-2.33.1/rust/pv/tests/assets/cert/root_ca.crl --- s390-tools-2.31.0/rust/pv/tests/assets/cert/root_ca.crl 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/root_ca.crl 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,20 @@ +-----BEGIN X509 CRL----- +MIIDPjCCASYCAQEwDQYJKoZIhvcNAQENBQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYD +VQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9u +MTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMREwDwYDVQQIDAhOZXcgWW9yazEPMA0GA1UEBwwGQXJtb25rMRYwFAYD +VQQLDA1JQk0gWiBSb290IENBFw0yNDAzMTExNTUyMzdaGA8yMzg4MTIyMzE1NTIz +N1owFTATAgIBTRcNMjQwMzIwMTU1MjM3WqAjMCEwHwYDVR0jBBgwFoAU1p7a8iaQ +sGamSdR9585yWIW36CowDQYJKoZIhvcNAQENBQADggIBAIs4Y7Qsk2TAJQqdNQeM +3Gjx2fYFHbliDRVQPU5OP19ppeUzFGRL/SRplCH4rvEj4bBwQZcsNdmzxYyFbWcz +lU38l5/mgkLkPdHzRnbPmVraWqKzuvNrTQ3Lj5VcgOJ95UAOwOIg2jWZlkIAUgO/ +cayWJK7l0gOIDCKSyuI6haM5D6wNAwfyKZ4WkBgYulCeqRmsMePvsj/WYL1tcR6Y +1WSN6FtvLn9u2vqrJ879BEH5RDQSkCXsxBqKFnOpLf/gpRYhRp+3JpSACXOzy1wr +8KxlvZ/14UOhz4x8VZ9GJTzLydzLPIhNTwcl1PJyD3pOhASxfdSXuR8f3jJvAGv5 +kaj9uO23sKU4PzLfr/DPFkhKsp+vShoSGtnZjj7ewYW9Y/PY0D9TX8CWIcwKuJ8U +XTWRj9s/FLTlqfrbJXvSaRd0rihGQI+mSwar5cnBuxULgw4V35YmTiY8wCf3+k9c +tGRRUj5A2fVs9NWWTReBatoou8Rnuraz6ctqwFsxJQSGS1JpfECznGq2wKqLstKn +WCMEAKehcGqLlSco7Hg2qRNgDw5AAC058drRf9L70geV4Tf00iXjK19aHSJ1wH+p +7hUcTCiQ7413ztC7HawqsRW3Yzszucj+6vqF8ZYMh+9oEzzWJEt/piKakIwbxwKk +cf5JE3KE7X+LQS3p4+sw/wP4 +-----END X509 CRL----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/root_ca.crt s390-tools-2.33.1/rust/pv/tests/assets/cert/root_ca.crt --- s390-tools-2.31.0/rust/pv/tests/assets/cert/root_ca.crt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/root_ca.crt 2024-05-28 08:26:36.000000000 +0200 @@ -1,37 +1,37 @@ -----BEGIN CERTIFICATE----- -MIIGXzCCBEegAwIBAgIUWAen+5+bRZaL8kVlq51CXPQUdZUwDQYJKoZIhvcNAQEL +MIIGXzCCBEegAwIBAgIUd6BiIEGe+cX502NGGPVuMHrV0ucwDQYJKoZIhvcNAQEL BQAwgbUxCzAJBgNVBAYTAlVTMTQwMgYDVQQKDCtJbnRlcm5hdGlvbmFsIEJ1c2lu ZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMTQwMgYDVQQDDCtJbnRlcm5hdGlvbmFs IEJ1c2luZXNzIE1hY2hpbmVzIENvcnBvcmF0aW9uMREwDwYDVQQIDAhOZXcgWW9y -azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTIz -MDMyOTA5MDQ0NFoYDzIzODcxMjMxMDkwNDQ0WjCBtTELMAkGA1UEBhMCVVMxNDAy +azEPMA0GA1UEBwwGQXJtb25rMRYwFAYDVQQLDA1JQk0gWiBSb290IENBMCAXDTI0 +MDMyMTE0NTIzN1oYDzIzODgxMjIzMTQ1MjM3WjCBtTELMAkGA1UEBhMCVVMxNDAy BgNVBAoMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29ycG9yYXRp b24xNDAyBgNVBAMMK0ludGVybmF0aW9uYWwgQnVzaW5lc3MgTWFjaGluZXMgQ29y cG9yYXRpb24xETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1vbmsxFjAU BgNVBAsMDUlCTSBaIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -AoICAQD0gnTWPpI/FahyE+PizunOHzNBi88KObopjL6+6p9J10RB82pDxaLoMxPo -6xkm0ZKiFea6ll6sAfcKPzCei+3691sCOiGN/JYdSZR+SynRGeD1o8KJWAwfmZ4w -DNHWHRqUE6u3oqAPDQUqaOnzyGdh2vGLP3HU5F5rIDhYzilm7WuXykYdiF/P6Rjs -xzzQZOxHTnvV8byEDN0Sw0ONsQSFee0uM22e20Kme34A4p+p7QBiRFSJAQyLtp/o -YBY8zeIgekr+BppRbCItEFAgHBS8DLk04jhNfGlU5RteaOJe7D6tg7z9CC8X0dAI -/p1vJkz5zYMPGuoKKaowDAJ+ncwN+UDYhbd4g0ATKC7M5DFmn4Wy60G+3op2BNp4 -PadcsOcXwzKsOSQix0+n4Tfw1QQ07+ilBHBe8fCRpk2Nf/byYuEaBjPBTXUcpTsp -Ydv0s5HjSyRfhEWKMQBOIn6mSORayTJ1/xQECrOfuIopLiZyMCjhixT4PImiGucM -ruCmsHZvfA/8VQxyEcPK5K+XqFaSr5r423C4CzcafHhvUFOYj2WmJyZzWc2X2fjo -l+ZspNpQTlvYgVi2cTeBnbNqCNMcTiGoyie5roHGa4CnigHuBIVSuUVGq/rqFKnT -KgKboI8C3XpQID6rFcYY444wOpfea8LodHTXFF6o8A5m+6QlGwIDAQABo2MwYTAP -BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBRJTc+P -Ts7kYFuicyYmOttP7ItAazAdBgNVHQ4EFgQUSU3Pj07O5GBbonMmJjrbT+yLQGsw -DQYJKoZIhvcNAQELBQADggIBAFCGlLEdAsicVPPuekhdfOyoK/P6r5JHnPIpC894 -9TVUp/7LBvDOwcNAvs+34dsbfRtMmwXUCGofrS4S0+zugnpgx6D1ScWC+FNdBJf9 -C6XKBiU4Mxn/mKWTDUMBCxI+RRbpOvOox5h6pQcfo0IYz5okazol1nT40IdvNkZ7 -WDAz6Xqmw8k/+8Y+l1fDSfUqcgPnTBfgX3kkna8VHBx74nSqeFgOVvtd8BJq/xIG -6rAkGsv/PzmQqKpyoD70N/nGALomLlVScX5qIbNrXWW0DWxY+kDwhRlskPdYS7RJ -Qol3NX0t7GZ+l+0W2Kpp7k04n5xrJ7pzTDyltJJEeaLfqrMlF5MCK/UyJqxa3LQr -IkP2wTxM4HR1gsIvr8ei3QFSBtxEGiQl2VWfBs8bwy+KkL1ao1UIcB+1dj4SLy8x -RqNFNfogkyLL9cELiKxViRQgNYowv3OsSw30i4eSHMor8m0qfVf38t7gi+m0Ba+T -+RIV5MlZcjG6C3jS8AMpUwXO4hOxGEueuXmZuCDmyypqRpMJc0hu2JA1Q9fztvfn -hnOAv6K++5hB4mIyqUp8/QJjO9X2kKa4+MmbuNTnRuC13s2f7pzHjZR3cFf6QN5b -duxTQZ18xvqXIR6EV4PVwShjCMS6NHmkmumIxMzaDK1mGwL7l7B0ZS+r3i7qwGPw -2iYv +AoICAQDMw/UE5XNiZpjuCRtyq4A9PDBuJJgyUl14xvN8lcsLHGK9b/WkQdDxUXsN +QbvS2HVkg2X6AGHwixk/3U0mzgzImZhmh3i0IbFn7L2eqpuvgP82BJpveq58Sjsk +OHYuG2JqSv63reKtK8pLXtKZnSV9vFUAwxWSuqA68FLwDgjGMK/Iy3Hh+cwnpffn +IMqJ70Nlxc7z4KKO4nrO/+rIQSqe0UpjEEaK4Eb3YXWWontaah3/sw+uAbVncJMV ++AacHQzi130sJJeLe4p+8DD9EYX112DEj6WFrHpyEqhsGXvLO7uJRzMqJ1WVz47x +j8wK+GPm66KCeOM+wm65u49vNHDotlQzoclIzLDiegQo/3ob5YRTFNDJzbNN6AAE +zF8K7f8t2TNYeWgPOmXLEVmFalOSNTZ5sLrJipsYs7uT7CT1orW3PvM16v+knigJ +NuryIdr/+hFgRh7Sr4U/6H6fDuZGJXgZeUSNM7S1UI0iFTZyKAP/3EQrhRWlIROs +ql9rFnH0LWaPcmk66Yyhq6Va0dmLBSjGPWAHLguKyHkMvhJfMd/1miGfz9Rz9LbE +6CqOK8sf6EQNWTgmpTdXgjmR9dvMk3ki2aar0u+IamzfIx3y0OPSlpaO8mFMucZe +3iaeG/l84sUR93NgVI2dCdtEinCQviudV+QwRdLzZQQNM5zLzwIDAQABo2MwYTAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBTWntry +JpCwZqZJ1H3nznJYhbfoKjAdBgNVHQ4EFgQU1p7a8iaQsGamSdR9585yWIW36Cow +DQYJKoZIhvcNAQELBQADggIBAHw/+RKyDRBsvWksX4ji95W+4uo292psPzpeusjw +Ztl25D4jssgjNbEiNwyYgV9e9BCse9hOkFwAE6ogwBAel7POX8XjizaJcwSs/GaC +2ORQ/KYRpMsypENZ4HZQbc8j9ROqTDD45B+9/5nAA6le/7wd0yS8hbum/b78vAN1 +98Ja8wfQIq1PE0heFELSRR2hAmcGkxIo1tBqP/CnAzOpBZ2ovPJ75oeGlnpk/ATY +rUJPTY0UPGp9ZSq/l+t88onDewjCblMMMBeJSEklP2CFlE1jGx3QJtnThiJ9WQKG +TMakgaAERrcvXs6Lx2GFuk7kCOJsWJtvT6CJVCUtEBHNGY3GqyQwNrE3OEVQ6CFb +CNNarrRKlArBISExLY3xn0CcuWr8GewsIvwzmJBX+u2Xs0s7RYFc4S4s/SbRLqi4 +Gn77E7dG711nKHXwDxYOuDAL0MNBT4aDephQK+rF7GlHMLTUPWKYCwmjh2vzmRWJ +fZkojybvX6g8+7fKmwTUn5LXlcnmjv9KYN8LgfHSQhvRaGtGph0X8mVH3eCs4MGG +OUU6p9wdPS3MDBHRzn0MXEhq25r+xT829lcHdoxkMVorVDTDD7hOm3HBsF/LOh6c +/sA1LZnnfy+YZt+Gn5cE5CSK3ddmzRbrB+ZHWbJahegJktWq2rrnw9ipfRln7DJF +fsHf -----END CERTIFICATE----- diff -Nru s390-tools-2.31.0/rust/pv/tests/assets/cert/root_ca.key s390-tools-2.33.1/rust/pv/tests/assets/cert/root_ca.key --- s390-tools-2.31.0/rust/pv/tests/assets/cert/root_ca.key 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/assets/cert/root_ca.key 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDMw/UE5XNiZpju +CRtyq4A9PDBuJJgyUl14xvN8lcsLHGK9b/WkQdDxUXsNQbvS2HVkg2X6AGHwixk/ +3U0mzgzImZhmh3i0IbFn7L2eqpuvgP82BJpveq58SjskOHYuG2JqSv63reKtK8pL +XtKZnSV9vFUAwxWSuqA68FLwDgjGMK/Iy3Hh+cwnpffnIMqJ70Nlxc7z4KKO4nrO +/+rIQSqe0UpjEEaK4Eb3YXWWontaah3/sw+uAbVncJMV+AacHQzi130sJJeLe4p+ +8DD9EYX112DEj6WFrHpyEqhsGXvLO7uJRzMqJ1WVz47xj8wK+GPm66KCeOM+wm65 +u49vNHDotlQzoclIzLDiegQo/3ob5YRTFNDJzbNN6AAEzF8K7f8t2TNYeWgPOmXL +EVmFalOSNTZ5sLrJipsYs7uT7CT1orW3PvM16v+knigJNuryIdr/+hFgRh7Sr4U/ +6H6fDuZGJXgZeUSNM7S1UI0iFTZyKAP/3EQrhRWlIROsql9rFnH0LWaPcmk66Yyh +q6Va0dmLBSjGPWAHLguKyHkMvhJfMd/1miGfz9Rz9LbE6CqOK8sf6EQNWTgmpTdX +gjmR9dvMk3ki2aar0u+IamzfIx3y0OPSlpaO8mFMucZe3iaeG/l84sUR93NgVI2d +CdtEinCQviudV+QwRdLzZQQNM5zLzwIDAQABAoICAADiwh8UzQR6dCPndy5uTn41 +UfJQBzaEg7H/jlMWJMw2AblXECV3QWuh0hnzFFhrpkpahSjpMoNDXscXcnt9/bEq +dO3QnTWORcGw1PsoOscuFCyMJYzg53tTKOFVuzEk3i6eh26M+oOMQnJEBT4z31Ml +auq3bVL2qrXGj11JekE9Oa1xL9tt5LOxNJrT2fxxxAVfLy9/48Qhd62Ijt/x5DjO +p/c4vU1hff0Y91TA/C6eceXGxQUYLcw2QPSh34lyWLlsfDaiK+OnE6jL6jJWDpHL +Ljh7dJhY8CipKwBYtd/hsMR9wdtnUyf0P2aNHDFZ9LitgUT2N+lwFuGHza2J+QkO +sJndo0bNZCRbcfyaATpqQK50fLHpSOkIaA0uNf8IPsgRHnC9oHimIM70/VB4xN5K +8UJb02rVvAWsV2ZJEzhBXt3ivMwH6d+HKVerkjdhuKxUSO0x4ZfArZI0md6M2Etf +b2UbMOyyTknWiJcYD65151rThGeyKRSW1GalvrwWwcAAWyabf61a681brpY0t/u4 +aZXBdg41DiZ8bnheK706+l7hpCt/Zg3d+HPVtG8xWreFZzWMPNAmuVcdqtEOqdhe +aW6xDR/7ekdVjtzFboTVuq12JvMgucfXoYjqNA1TxvXIdl1eSWZzEnfXhDxDezpI +ZUXfR1jdmyEucFXAlkjVAoIBAQD094bU92/iTkV3ySqDqd1a9gdfMw9ewi18TzWM +fCtE+dWgvXE1RmzRmHaeKjElJaeAAc/ggXen1ecJmCRsD1wjItyP1/j/IzY/7yzK +KtLV3w3oArENFwZKa3HeaHsapwladJBVC0CpaRnfO52VKDa/1gvDQsojOkDhngCH +MKfRd5x4c4+WOuvTpfyQASLXJcurlIhabCa/MGIvhMRXcspGkkttZqxhLGoioAmS +VcKhje8UuDLnBxCFrmIOVl5Hf8wT4TfOYWq6J5F/v/32dq8Rdf/W8KRIq0N5JDnC +xFKGkm8QEFxNuSzzfykMcEhKGYXAYFKGIBRhM+nNGuFyqMB7AoIBAQDV/Oguqp74 +4inMMQTP8i3aYlASfU/l2OkNQE7JbxIClf52TzOCPy8v86FjIyx2iYPv2osI3RGi +yVdDIYPXOBEQZQV7eHS3kuO5/lFw6psQVjMf0qdE9hGuZx3MnXQgol7mUEDS7Rjc +e1YSkYPpdSd2y1Gsb84iZcbBgBPZsdjy81ZBrfP8bo3BVBtqQIngWCXvM3gWmBH/ +duNxtpvNBFruX2ZwaKcRJjfZ7V6iy2fzEYH84FPyiwj/COCtSfDgkFjCmEa2jepQ ++5r1VXFjeWwonfYeiwZ/WlIGDKhHNZG4XCni05CA2ZejDLmI3IKaMjukWNI49hhS +ypLJj5n9a8O9AoIBAHrPfbFXO6hhRUKAf+fySR9JKPj5SENhZhxkOZxsw+SYvBkR +4Kes3vFVVmKFFI5jCk6KmgUb4zkpa+LLe7cRHEghOiKDTDndFV2IMxRH09uVMAuZ +DdpaKLU2mKfACbea9n164sFAGvLT1jysTpxwxMH4NX13BpASrGRwKiOcqQXCyZc5 +mErgz0oUdUJcz3K9aBZlKNbsUaCYSSE5lpWg3vfycA4w40r0UWF6ilHq3ODRn4Lr +tdlNGWNrwY0ej7WYxF9TEf9Np4wcOj2pq1Dcv3gpiFHh4vrrobAiETMr6ZO95iBP +k3cD1x1cKuApipRbp0qC/9xuSMlSlWxWhaBOKfECggEAb0NdBka4+fe+e+lQ8z6d +ENvlfnehv4UVSEqTrLEP0EBlWua7hZGM24X1+DIlwEyoSWJ8wFMSBG5j7QfUIWeJ +l9ivDRAIwBqkReUyO2AA2HG5i2Zgir7XWrNLD0UfSIikh2RbEFEviBSpIGaBDDZa +Gq6E/P/1UnVQ7vPFXn/WqhxUUTo9jpd9JXSx/IEqL3gl4UYFvtm7IfWTNWEZiXQm +Q6NfBDumAoi4qZt+hW710bDcwbtyar5YIyNejzvO/zSOsj+zJOCNYSYx4DZZCrvr +vQLFIgRvkHBKDdMu/DeiWRWywbn3fMemzKSlI8BkOAC+eimkxPFQnFuwDxWXn+kU +kQKCAQEA0EkmMxpI8eC47YeHXKBiACMsRiyiItXal7TAwy/ZQMg/nxrDB85f5L8B +Zucw2UsA40/5EN/L6n10iaWBQzz7q3TcI15MmIzoBA7Bh9IJI9Oz1+oeOn01b9u0 +7cb+5Shh9Tmu2G4Z/9LE7wB8q9sU/BtJns6owFbGHL9q6ndtl7YVQ7uVzUQjNQph +qp2DvA8qZYBkNRNZwYAMOJaAO8uhDBSR2wm9XiZ3Q9kar0Tx6sR9WK2pjZTE3okJ +RUqzQS0EXQ2XLepP3rhYtjAjtlnx9K3jniIHciyFV2rW0SaGH4VvE/nRvCM12q/8 +p+5MT0G3vVdRQM0Poco3B8DElAoMTQ== +-----END PRIVATE KEY----- Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/tests/assets/exp/arcb.bin and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/tests/assets/exp/arcb.bin differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pv/tests/assets/exp/exchange.bin and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pv/tests/assets/exp/exchange.bin differ diff -Nru s390-tools-2.31.0/rust/pv/tests/cert_verifier.rs s390-tools-2.33.1/rust/pv/tests/cert_verifier.rs --- s390-tools-2.31.0/rust/pv/tests/cert_verifier.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv/tests/cert_verifier.rs 2024-05-28 08:26:36.000000000 +0200 @@ -2,9 +2,9 @@ // // Copyright IBM Corp. 2023 -use pv::request::CertVerifier; -use pv::test_utils::*; -use pv::{Error, HkdVerifyErrorType::*}; +use s390_pv::request::CertVerifier; +use s390_pv::test_utils::*; +use s390_pv::{Error, HkdVerifyErrorType::*}; use std::ffi::c_int; #[track_caller] @@ -30,90 +30,81 @@ #[test] fn verifier_new() { - let root_chn_crt = get_cert_asset_path_string("root_ca.chained.crt"); - let root_crt = get_cert_asset_path_string("root_ca.crt"); - let inter_crt = get_cert_asset_path_string("inter_ca.crt"); - let inter_fake_crt = get_cert_asset_path_string("fake_inter_ca.crt"); - let inter_fake_crl = get_cert_asset_path_string("fake_inter_ca.crl"); - let inter_crl = get_cert_asset_path_string("inter_ca.crl"); - let ibm_crt = get_cert_asset_path_string("ibm.crt"); - let ibm_early_crt = get_cert_asset_path_string("ibm_outdated_early.crl"); - let ibm_late_crt = get_cert_asset_path_string("ibm_outdated_late.crl"); - let ibm_rev_crt = get_cert_asset_path_string("ibm_rev.crt"); + let root_chn_crt = get_cert_asset_path("root_ca.chained.crt"); + let root_crt = get_cert_asset_path("root_ca.crt"); + let inter_crt = get_cert_asset_path("inter_ca.crt"); + let inter_fake_crt = get_cert_asset_path("fake_inter_ca.crt"); + let inter_fake_crl = get_cert_asset_path("fake_inter_ca.crl"); + let inter_crl = get_cert_asset_path("inter_ca.crl"); + let ibm_crt = get_cert_asset_path("ibm.crt"); + let ibm_early_crt = get_cert_asset_path("ibm_outdated_early.crl"); + let ibm_late_crt = get_cert_asset_path("ibm_outdated_late.crl"); + let ibm_rev_crt = get_cert_asset_path("ibm_rev.crt"); + let empty: [String; 0] = []; // Too many signing keys - let verifier = CertVerifier::new(&[ibm_crt.clone(), ibm_rev_crt.clone()], &[], &None, true); + let verifier = CertVerifier::new(&[&ibm_crt, &ibm_rev_crt], &empty, None::, true); assert!(matches!(verifier, Err(Error::HkdVerify(ManyIbmSignKeys)))); // No CRL for each X509 let verifier = CertVerifier::new( - &[inter_crt.clone(), ibm_crt.clone()], - &[inter_crl.clone()], - &Some(root_crt), + &[&inter_crt, &ibm_crt], + &[&inter_crl], + Some(&root_crt), false, ); verify_sign_error(3, verifier.unwrap_err()); - let verifier = CertVerifier::new( - &[inter_crt.clone(), ibm_crt.clone()], - &[], - &Some(root_chn_crt.clone()), - false, - ); + let verifier = CertVerifier::new(&[&inter_crt, &ibm_crt], &empty, Some(&root_chn_crt), false); verify_sign_error(3, verifier.unwrap_err()); // Wrong intermediate (or ibm key) let verifier = CertVerifier::new( - &[inter_fake_crt, ibm_crt.clone()], - &[inter_fake_crl], - &Some(root_chn_crt.clone()), + &[&inter_fake_crt, &ibm_crt], + &[&inter_fake_crl], + Some(&root_chn_crt), true, ); // Depending on the OpenSSL version different error codes can appear verify_sign_error_slice(&[20, 30], verifier.unwrap_err()); // Wrong root ca - let verifier = CertVerifier::new( - &[inter_crt.clone(), ibm_crt.clone()], - &[inter_crl.clone()], - &None, - true, - ); + let verifier = CertVerifier::new(&[&inter_crt, &ibm_crt], &[&inter_crl], None::, true); verify_sign_error(20, verifier.unwrap_err()); // Correct signing key + intermediate cert let _verifier = CertVerifier::new( - &[inter_crt.clone(), ibm_crt.clone()], - &[inter_crl.clone()], - &Some(root_chn_crt.clone()), + &[&inter_crt, &ibm_crt], + &[&inter_crl], + Some(&root_chn_crt), false, ) .unwrap(); // No intermediate key - let verifier = CertVerifier::new(&[ibm_crt], &[], &Some(root_chn_crt.clone()), false); + let verifier = CertVerifier::new(&[&ibm_crt], &empty, Some(&root_chn_crt), false); verify_sign_error(20, verifier.unwrap_err()); // IBM Sign outdated let verifier = CertVerifier::new( - &[inter_crt.clone(), ibm_early_crt], - &[inter_crl.clone()], - &Some(root_chn_crt.clone()), + &[&inter_crt, &ibm_early_crt], + &[&inter_crl], + Some(&root_chn_crt), false, ); assert!(matches!(verifier, Err(Error::HkdVerify(NoIbmSignKey)))); let verifier = CertVerifier::new( - &[inter_crt.clone(), ibm_late_crt], - &[inter_crl.clone()], - &Some(root_chn_crt.clone()), + &[&inter_crt, &ibm_late_crt], + &[&inter_crl], + Some(&root_chn_crt), false, ); assert!(matches!(verifier, Err(Error::HkdVerify(NoIbmSignKey)))); // Revoked let verifier = CertVerifier::new( - &[inter_crt, ibm_rev_crt], - &[inter_crl], - &Some(root_chn_crt), + &[&inter_crt, &ibm_rev_crt], + &[&inter_crl], + Some(&root_chn_crt), false, ); verify_sign_error(23, verifier.unwrap_err()); diff -Nru s390-tools-2.31.0/rust/pvapconfig/build.rs s390-tools-2.33.1/rust/pvapconfig/build.rs --- s390-tools-2.31.0/rust/pvapconfig/build.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvapconfig/build.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 +// it under the terms of the MIT license. See LICENSE for details. + +use clap::{CommandFactory, ValueEnum}; +use clap_complete::{generate_to, Shell}; +use std::env; +use std::io::Error; + +include!("src/cli.rs"); + +fn main() -> Result<(), Error> { + let outdir = env::var_os("OUT_DIR").unwrap(); + let crate_name = env!("CARGO_PKG_NAME"); + let mut cmd = Cli::command(); + for &shell in Shell::value_variants() { + generate_to(shell, &mut cmd, crate_name, &outdir)?; + } + + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-changed=src/cli.rs"); + Ok(()) +} diff -Nru s390-tools-2.31.0/rust/pvapconfig/Cargo.toml s390-tools-2.33.1/rust/pvapconfig/Cargo.toml --- s390-tools-2.31.0/rust/pvapconfig/Cargo.toml 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvapconfig/Cargo.toml 2024-05-28 08:26:36.000000000 +0200 @@ -2,7 +2,7 @@ name = "pvapconfig" description = "A tool to configure the AP resources inside a SE guest based on UV secrets and an AP config file." authors = ["Harald Freudenberger "] -version = "0.9.0" +version = "0.10.0" edition.workspace = true license.workspace = true @@ -10,10 +10,14 @@ clap = { version ="4.1", features = ["derive", "wrap_help"]} lazy_static = "1.1" openssl = { version = "0.10" } -openssl-sys = { version = "0.9" } -pv_core = { path = "../pv_core"} +pv_core = { path = "../pv_core", package = "s390_pv_core"} rand = "0.8" -regex = "1" -serde = { version = "1.0", features = ["derive"] } +regex = "1.7" +serde = { version = "1.0.139", features = ["derive"] } serde_yaml = "0.9" utils = { path = "../utils" } + +[build-dependencies] +clap = { version ="4.1", features = ["derive", "wrap_help"]} +clap_complete = "4.1" +lazy_static = "1.1" diff -Nru s390-tools-2.31.0/rust/pvapconfig/man/pvapconfig.1 s390-tools-2.33.1/rust/pvapconfig/man/pvapconfig.1 --- s390-tools-2.31.0/rust/pvapconfig/man/pvapconfig.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvapconfig/man/pvapconfig.1 2024-05-28 08:26:36.000000000 +0200 @@ -34,7 +34,7 @@ feature. .TP 3 3. Read in and validate the AP configuration file. By default if not -overwritten by the --config option the AP configuration is read from +overwritten by the \-\-config option the AP configuration is read from .I /etc/pvapconfig.yaml and syntactically verified. See section CONFIGFILE for details about the syntax and semantic of the configuration file. @@ -71,31 +71,31 @@ successfully. .SH OPTIONS .TP 8 -.B -c, --config +.B \-c, \-\-config Use as the AP config file for pvapconfig. If pvapconfig is run without this option the default configuration file /etc/pvapconfig.yaml is used. .TP 8 -.B -h, --help +.B \-h, \-\-help Print pvapconfig usage information and exit. .TP 8 -.B -n, --dry-run +.B \-n, \-\-dry\-run Do not bind, unbind or associate APQNs but only process the configuration and the available APQNs and secrets and simulate the bind, unbind or associate action on the chosen APQN. Use it together with the verbose option to see which actions pvapconfig would do if unleashed. .TP 8 -.B -s, --strict +.B \-s, \-\-strict All AP config entries need to be satisfied to have pvapconfig terminate with success. Without this option one applied AP config entry is enough to meet the expectations. .TP 8 -.B -v, --verbose +.B \-v, \-\-verbose Print out informational messages about what pvapconfig is actually doing. .TP 8 -.B -V, --version +.B \-V, \-\-version Print version information and exit. .SH CONFIGFILE The pvapconfig yaml configuration file consists of a list of AP config diff -Nru s390-tools-2.31.0/rust/pvapconfig/src/cli.rs s390-tools-2.33.1/rust/pvapconfig/src/cli.rs --- s390-tools-2.31.0/rust/pvapconfig/src/cli.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvapconfig/src/cli.rs 2024-05-28 08:26:36.000000000 +0200 @@ -2,7 +2,6 @@ // // Copyright IBM Corp. 2023 // -//! Command line interface for pvapconfig // use clap::Parser; @@ -11,6 +10,7 @@ /// The default pvapconfig config file pub const PATH_DEFAULT_CONFIG_FILE: &str = "/etc/pvapconfig.yaml"; +/// Command line interface for pvapconfig #[derive(Parser, Clone)] pub struct Cli { /// Provide a custom config file (overwrites default /etc/pvapconfig.yaml). diff -Nru s390-tools-2.31.0/rust/pvapconfig/src/config.rs s390-tools-2.33.1/rust/pvapconfig/src/config.rs --- s390-tools-2.31.0/rust/pvapconfig/src/config.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvapconfig/src/config.rs 2024-05-28 08:26:36.000000000 +0200 @@ -42,7 +42,8 @@ } // if name is given, calculate sha256 digest for this name // test for the hash calculated here can be done with openssl: - // echo -n "Hello" >in.bin; openssl dgst -sha256 -binary -out out.bin in.bin; hexdump -C out.bin + // echo -n "Hello" >in.bin; openssl dgst -sha256 -binary -out out.bin in.bin; hexdump -C + // out.bin if self.name.is_empty() { return Ok(()); } diff -Nru s390-tools-2.31.0/rust/pvapconfig/src/helper.rs s390-tools-2.33.1/rust/pvapconfig/src/helper.rs --- s390-tools-2.31.0/rust/pvapconfig/src/helper.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvapconfig/src/helper.rs 2024-05-28 08:26:36.000000000 +0200 @@ -199,7 +199,7 @@ #[cfg(test)] mod tests { use super::*; - use pv_core::misc::TemporaryDirectory; + use utils::TemporaryDirectory; // Only very simple tests diff -Nru s390-tools-2.31.0/rust/pvapconfig/src/main.rs s390-tools-2.33.1/rust/pvapconfig/src/main.rs --- s390-tools-2.31.0/rust/pvapconfig/src/main.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvapconfig/src/main.rs 2024-05-28 08:26:36.000000000 +0200 @@ -18,7 +18,7 @@ use helper::{LockFile, PATH_PVAPCONFIG_LOCK}; use pv_core::uv::{ListableSecretType, SecretList}; use std::process::ExitCode; -use utils::release_string; +use utils::print_version; /// Simple macro for /// if Cli::verbose() { @@ -58,11 +58,7 @@ fn main() -> ExitCode { // handle version option if cli::ARGS.version { - println!( - "{} version {}\nCopyright IBM Corp. 2023", - env!("CARGO_PKG_NAME"), - release_string!() - ); + print_version!(0, "2023"); return ExitCode::SUCCESS; } @@ -528,8 +524,8 @@ fn make_assoc_secretentry(idx: u16, hexidstr: &str) -> SecretEntry { let id = hexstring_to_u8(hexidstr); let idlen: u32 = id.len().try_into().unwrap(); - let idarray = <&[u8; 32]>::try_from(id.as_slice()).unwrap(); - SecretEntry::new(idx, ListableSecretType::Association, *idarray, idlen) + let idarray: [u8; 32] = id.try_into().unwrap(); + SecretEntry::new(idx, ListableSecretType::Association, idarray.into(), idlen) } fn make_test_secrets() -> Vec { @@ -607,13 +603,11 @@ #[test] fn test_do_ap_config_invocation_1() { let test_apqns = make_test_apqns(); - let mut apqns: Vec = Vec::new(); - apqns.push(test_apqns[0].clone()); + let apqns: Vec = vec![test_apqns[0].clone()]; let secrets: Vec = Vec::new(); let secretlist = SecretList::new(secrets.len() as u16, secrets); let test_apconfigs = make_test_apconfigs(); - let mut apconfig: Vec = Vec::new(); - apconfig.push(test_apconfigs[0].clone()); + let apconfig: Vec = vec![test_apconfigs[0].clone()]; let apcfglist = ApConfigList::from_apconfigentry_vec(apconfig); let mut apqnlist = ApqnList::from_apqn_vec(apqns); let r = do_ap_config(&mut apqnlist, &secretlist, &apcfglist, true); @@ -625,16 +619,12 @@ #[test] fn test_do_ap_config_invocation_2() { let test_apqns = make_test_apqns(); - let mut apqns: Vec = Vec::new(); - apqns.push(test_apqns[1].clone()); + let apqns: Vec = vec![test_apqns[1].clone()]; let mut secrets = make_test_secrets(); - while secrets.len() > 2 { - secrets.pop(); - } + secrets.truncate(2); let secretlist = SecretList::new(secrets.len() as u16, secrets); let test_apconfigs = make_test_apconfigs(); - let mut apconfig: Vec = Vec::new(); - apconfig.push(test_apconfigs[1].clone()); + let apconfig: Vec = vec![test_apconfigs[1].clone()]; let apcfglist = ApConfigList::from_apconfigentry_vec(apconfig); let mut apqnlist = ApqnList::from_apqn_vec(apqns); let r = do_ap_config(&mut apqnlist, &secretlist, &apcfglist, true); diff -Nru s390-tools-2.31.0/rust/pvattest/build.rs s390-tools-2.33.1/rust/pvattest/build.rs --- s390-tools-2.31.0/rust/pvattest/build.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/build.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 +// it under the terms of the MIT license. See LICENSE for details. + +use clap::CommandFactory; +use clap_complete::{generate_to, Shell}; +use std::env; +use std::io::Error; + +include!("src/cli.rs"); + +fn main() -> Result<(), Error> { + let outdir = env::var_os("OUT_DIR").unwrap(); + let crate_name = env!("CARGO_PKG_NAME"); + let mut cmd = CliOptions::command(); + for &shell in Shell::value_variants() { + generate_to(shell, &mut cmd, crate_name, &outdir)?; + } + + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-changed=src/cli.rs"); + println!("cargo:rerun-if-changed=../utils/src/cli.rs"); + Ok(()) +} diff -Nru s390-tools-2.31.0/rust/pvattest/Cargo.toml s390-tools-2.33.1/rust/pvattest/Cargo.toml --- s390-tools-2.31.0/rust/pvattest/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/Cargo.toml 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,24 @@ +[package] +name = "pvattest" +version = "0.10.0" +edition.workspace = true +license.workspace = true + +[dependencies] +anyhow = { version = "1.0.70", features = ["std"] } +byteorder = "1.3" +clap = { version ="4.1", features = ["derive", "wrap_help"]} +log = { version = "0.4.6", features = ["std", "release_max_level_debug"] } +serde_yaml = "0.9" +serde = { version = "1.0.139", features = ["derive"]} +zerocopy = { version="0.7", features = ["derive"] } + +pv = { path = "../pv", package = "s390_pv" } +utils = { path = "../utils" } + +[build-dependencies] +clap = { version ="4.1", features = ["derive", "wrap_help"]} +clap_complete = "4.1" +log = { version = "0.4", features = ["std", "release_max_level_debug"] } + +utils = { path = "../utils" } diff -Nru s390-tools-2.31.0/rust/pvattest/man/pvattest.1 s390-tools-2.33.1/rust/pvattest/man/pvattest.1 --- s390-tools-2.31.0/rust/pvattest/man/pvattest.1 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/man/pvattest.1 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,114 @@ +.\" Copyright 2024 IBM Corp. +.\" s390-tools is free software; you can redistribute it and/or modify +.\" it under the terms of the MIT license. See LICENSE for details. +.\" + +.TH pvattest 1 "2024-05-15" "s390-tools" "Attestation Manual" +.nh +.ad l +.SH NAME +\fBpvattest\fP - create, perform, and verify attestation measurements +\fB +.SH SYNOPSIS +.nf +.fam C +pvattest [OPTIONS] +.fam C +.fi +.SH DESCRIPTION +Create, perform, and verify attestation measurements for IBM Secure Execution +guest systems. +.SH "PVATTEST COMMANDS" +.PP + +\fBcreate\fR +.RS 4 +Create an attestation measurement request +.RE + +.PP + +\fBperform\fR +.RS 4 +Send the attestation request to the Ultravisor +.RE + +.PP + +\fBverify\fR +.RS 4 +Verify an attestation response +.RE + +.PP + +\fBversion\fR +.RS 4 +Print version information and exit +.RE + +.SH OPTIONS +.PP +\-v, \-\-verbose +.RS 4 +Provide more detailed output. +.RE +.RE +.PP +\-\-version +.RS 4 +Print version information and exit. +.RE +.RE +.PP +\-h, \-\-help +.RS 4 +Print help (see a summary with '-h'). +.RE +.RE + +.SH EXAMPLES +For details refer to the man page of the command. +.PP +Create the request on a trusted system. +.PP +.nf +.fam C + trusted:~$ pvattest create \-k hkd.crt \-\-cert CA.crt \-\-cert ibmsk.crt \-\-arpk arp.key \-o attreq.bin + +.fam T +.fi +On the SE-guest, \fIperform\fP the attestation. +.PP +.nf +.fam C + seguest:~$ pvattest perform attreq.bin attresp.bin + +.fam T +.fi +On a trusted system, \fIverify\fP that the response is correct. Here, the protection key from the creation and the SE-guest’s header is used to \fIverify\fP the measurement. +.PP +.nf +.fam C + trusted:~$ pvattest verify \-i attresp.bin \-\-arpk arp.key \-\-hdr se_guest.hdr + trusted:~$ echo $? + 0 + +.fam T +.fi + +If the measurements do not match \fBpvattest\fP exits with code 2 and emits an error message. The SE-guest attestation failed. +.PP +.nf +.fam C + trusted:~$ pvattest verify \-i wrongresp.bin \-\-arpk arp.key \-\-hdr se_guest.hdr + ERROR: Attestation measurement verification failed: + Calculated and received attestation measurement are not the same. + trusted:~$ echo $? + 2 + +.fam T +.fi +.SH "SEE ALSO" +.sp +\fBpvattest-create\fR(1) \fBpvattest-perform\fR(1) \fBpvattest-verify\fR(1) diff -Nru s390-tools-2.31.0/rust/pvattest/man/pvattest-create.1 s390-tools-2.33.1/rust/pvattest/man/pvattest-create.1 --- s390-tools-2.31.0/rust/pvattest/man/pvattest-create.1 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/man/pvattest-create.1 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,132 @@ +.\" Copyright 2024 IBM Corp. +.\" s390-tools is free software; you can redistribute it and/or modify +.\" it under the terms of the MIT license. See LICENSE for details. +.\" + +.TH pvattest-create 1 "2024-05-15" "s390-tools" "Attestation Manual" +.nh +.ad l +.SH NAME +\fBpvattest create\fP - Create an attestation measurement request +\fB +.SH SYNOPSIS +.nf +.fam C +pvattest create [OPTIONS] --host-key-document --output --arpk <--no-verify|--cert > +.fam C +.fi +.SH DESCRIPTION +Create attestation measurement requests to attest an IBM Secure Execution guest. +Only build attestation requests in a trusted environment such as your +Workstation. To avoid compromising the attestation do not publish the +attestation request protection key and shred it after verification. Every +'create' will generate a new, random protection key. +.SH OPTIONS +.PP +\-k, \-\-host-key-document +.RS 4 +Use FILE as a host-key document. Can be specified multiple times and must be +used at least once. +.RE +.RE +.PP +\-\-no-verify +.RS 4 +Disable the host-key document verification. Does not require the host-key +documents to be valid. Do not use for a production request unless you verified +the host-key document beforehand. +.RE +.RE +.PP +\-C, \-\-cert +.RS 4 +Use FILE as a certificate to verify the host key or keys. The certificates are +used to establish a chain of trust for the verification of the host-key +documents. Specify this option twice to specify the IBM Z signing key and the +intermediate CA certificate (signed by the root CA). +.RE +.RE +.PP +\-\-crl +.RS 4 +Use FILE as a certificate revocation list. The list is used to check whether a +certificate of the chain of trust is revoked. Specify this option multiple times +to use multiple CRLs. +.RE +.RE +.PP +\-\-offline +.RS 4 +Make no attempt to download CRLs. +.RE +.RE +.PP +\-\-root-ca +.RS 4 +Use FILE as the root-CA certificate for the verification. If omitted, the system +wide-root CAs installed on the system are used. Use this only if you trust the +specified certificate. +.RE +.RE +.PP +\-o, \-\-output +.RS 4 +Write the generated request to FILE. +.RE +.RE +.PP +\-\-arpk +.RS 4 +Save the protection key as unencrypted GCM-AES256 key in FILE Do not publish +this key, otherwise your attestation is compromised. +.RE +.RE +.PP +\-\-add-data +.RS 4 +Specify-additional data for the request. Additional data is provided by the +Ultravisor and returned during the attestation request and is covered by the +attestation measurement. Can be specified multiple times. Optional. + +Possible values: +.RS 4 +- \fBphkh-img\fP: Request the public host-key-hash of the key that decrypted the SE-image as additional-data. + +- \fBphkh-att\fP: Request the public host-key-hash of the key that decrypted the attestation request as additional-data. + +.RE +.RE +.PP +\-v, \-\-verbose +.RS 4 +Provide more detailed output. +.RE +.RE +.PP +\-h, \-\-help +.RS 4 +Print help (see a summary with '-h'). +.RE +.RE + +.SH EXAMPLES +Create an attestation request with the protection key 'arp.key', write the request to 'arcb.bin', and verify the host-key document using the CA-signed key 'DigiCertCA.crt' and the intermediate key 'IbmSigningKey.crt'. +.PP +.nf +.fam C + $ pvattest create \-k hkd.crt -\-\arpk arp.key \-o attreq.bin \-\-cert DigiCertCA.crt \-\-cert IbmSigningKey.crt + +.fam T +.fi +Create an attestation request with the protection key 'arp.key', write the request to 'arcb.bin', verify the host-key document using the CA-signed key 'DigiCertCA.crt' and the intermediate key 'IbmSigningKey.crt', and instead of downloading the certificate revocation list use certificate revocation lists 'DigiCertCA.crl', 'IbmSigningKey.crl', and 'rootCA.crl'. +.PP +.nf +.fam C + $ pvattest create \-k hkd.crt \-\-arpk arp.key \-o attreq.bin \-\-cert DigiCertCA.crt \-\-cert IbmSigningKey.crt \-\-offline \-\-crl DigiCertCA.crl \-\-crl IbmSigningKey.crl \-\-crl rootCA.crl + + +.fam T +.fi +.SH "SEE ALSO" +.sp +\fBpvattest\fR(1) diff -Nru s390-tools-2.31.0/rust/pvattest/man/pvattest-perform.1 s390-tools-2.33.1/rust/pvattest/man/pvattest-perform.1 --- s390-tools-2.31.0/rust/pvattest/man/pvattest-perform.1 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/man/pvattest-perform.1 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,71 @@ +.\" Copyright 2024 IBM Corp. +.\" s390-tools is free software; you can redistribute it and/or modify +.\" it under the terms of the MIT license. See LICENSE for details. +.\" + +.TH pvattest-perform 1 "2024-05-15" "s390-tools" "Attestation Manual" +.nh +.ad l +.SH NAME +\fBpvattest perform\fP - Send the attestation request to the Ultravisor +\fB +.SH SYNOPSIS +.nf +.fam C +pvattest perform [OPTIONS] [INPUT] [OUTPUT] +.fam C +.fi +.SH DESCRIPTION +Run a measurement of this system through ’/dev/uv’. This device must be +accessible and the attestation Ultravisor facility must be present. The input +must be an attestation request created with ’pvattest create’. Output will +contain the original request and the response from the Ultravisor. +.SH OPTIONS +.PP + +.RS 4 +Specify the request to be sent. +.RE +.RE +.PP + +.RS 4 +Write the result to FILE. +.RE +.RE + +.PP +\-u, \-\-user-data +.RS 4 +Provide up to 256 bytes of user input User-data is arbitrary user-defined data +appended to the Attestation measurement. It is verified during the Attestation +measurement verification. May be any arbitrary data, as long as it is less or +equal to 256 bytes +.RE +.RE +.PP +\-v, \-\-verbose +.RS 4 +Provide more detailed output. +.RE +.RE +.PP +\-h, \-\-help +.RS 4 +Print help (see a summary with '-h'). +.RE +.RE + +.SH EXAMPLES +Perform an attestation measurement with the attestation request 'attreq.bin' and write the output to 'attresp.bin'. +.PP +.nf +.fam C + $ pvattest perform attreq.bin attresp.bin + + +.fam T +.fi +.SH "SEE ALSO" +.sp +\fBpvattest\fR(1) diff -Nru s390-tools-2.31.0/rust/pvattest/man/pvattest-verify.1 s390-tools-2.33.1/rust/pvattest/man/pvattest-verify.1 --- s390-tools-2.31.0/rust/pvattest/man/pvattest-verify.1 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/man/pvattest-verify.1 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,124 @@ +.\" Copyright 2024 IBM Corp. +.\" s390-tools is free software; you can redistribute it and/or modify +.\" it under the terms of the MIT license. See LICENSE for details. +.\" + +.TH pvattest-verify 1 "2024-05-15" "s390-tools" "Attestation Manual" +.nh +.ad l +.SH NAME +\fBpvattest verify\fP - Verify an attestation response +\fB +.SH SYNOPSIS +.nf +.fam C +pvattest verify [OPTIONS] --input --hdr --arpk +.fam C +.fi +.SH DESCRIPTION +Verify that a previously generated attestation measurement of an IBM Secure +Execution guest is as expected. Only verify attestation requests in a trusted +environment, such as your workstation. Input must contain the response as +produced by ’pvattest perform’. The protection key must be the one that was +used to create the request by ’pvattest create’. Shred the protection key +after the verification. The header must be the IBM Secure Execution header of +the image that was attested during ’pvattest perform’ +.SH OPTIONS +.PP +\-i, \-\-input +.RS 4 +Specify the attestation request to be verified. +.RE +.RE +.PP +\-o, \-\-output +.RS 4 +Specify the output for the verification result. +.RE +.RE +.PP +\-\-hdr +.RS 4 +Specifies the header of the guest image. Can be an IBM Secure Execution image +created by genprotimg or an extracted IBM Secure Execution header. The header +must start at a page boundary. +.RE +.RE +.PP +\-\-arpk +.RS 4 +Use FILE as the protection key to decrypt the request Do not publish this key, +otherwise your attestation is compromised. Delete this key after verification. +.RE +.RE +.PP +\-\-format +.RS 4 +Define the output format. +[default: 'yaml'] + +Possible values: +.RS 4 +- \fByaml\fP: Use yaml format. + +.RE +.RE +.PP +\-u, \-\-user-data +.RS 4 +Write the user data to the FILE if any. Writes the user data, if the response +contains any, to FILE The user-data is part of the attestation measurement. If +the user-data is written to FILE the user-data was part of the measurement and +verified. Emits a warning if the response contains no user-data +.RE +.RE +.PP +\-v, \-\-verbose +.RS 4 +Provide more detailed output. +.RE +.RE +.PP +\-h, \-\-help +.RS 4 +Print help (see a summary with '-h'). +.RE +.RE + +.SH EXIT STATUS +.TP 8 +.B 0 - Attestation Verified +Attesatation measurement verified successfully. Measured guest is in Secure Execution mode. +.RE + +.TP 8 +.B 1 - Program Error +Something went wrong during the local calculation or receiving of the measurement value. Refer to the error message. +.RE + +.TP 8 +.B 2 - Attestation NOT Verified +Attesation measurement calculation does not match the received value. Measured guest is very likely not in Secure Execution mode. +.RE +.SH EXAMPLES +To verify a measurement in 'measurement.bin' with the protection key 'arp.kep' and SE-guest header 'se_guest.hdr'. +.PP +.nf +.fam C + $ pvattest verify --input attresp.bin --arpk arp.key --hdr se_guest.hdr + +.fam T +.fi +If the verification was successful the program exists with zero. +If the verification failed it exists with 2 and prints the following to stderr: +.PP +.nf +.fam C + ERROR: Attestation measurement verification failed: + Calculated and received attestation measurement are not the same. + +.fam T +.fi +.SH "SEE ALSO" +.sp +\fBpvattest\fR(1) diff -Nru s390-tools-2.31.0/rust/pvattest/README.md s390-tools-2.33.1/rust/pvattest/README.md --- s390-tools-2.31.0/rust/pvattest/README.md 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/README.md 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,261 @@ + +# pvattest +## Synopsis +`pvattest [OPTIONS] ` +## Description +create, perform, and verify attestation measurements Create, perform, and verify +attestation measurements for IBM Secure Execution guest systems. +## Commands Overview +- **create** +

    +Create an attestation measurement request +
+ +- **perform** +
    +Send the attestation request to the Ultravisor +
+ +- **verify** +
    +Verify an attestation response +
+ +- **version** +
    +Print version information and exit +
+ +## Options + +`-v`, `--verbose` +
    +Provide more detailed output +
+ + +`--version` +
    +Print version information and exit +
+ + +`-h`, `--help` +
    +Print help (see a summary with '-h') +
+ + +## pvattest create +### Synopsis +`pvattest create [OPTIONS] --host-key-document --output --arpk <--no-verify|--cert >` +### Description +Create an attestation measurement request Create attestation measurement +requests to attest an IBM Secure Execution guest. Only build attestation +requests in a trusted environment such as your Workstation. To avoid +compromising the attestation do not publish the attestation request protection +key and shred it after verification. Every 'create' will generate a new, random +protection key. +### Options + +`-k`, `--host-key-document ` +
    +Use FILE as a host-key document. Can be specified multiple times and must be +used at least once. +
+ + +`--no-verify` +
    +Disable the host-key document verification. Does not require the host-key +documents to be valid. Do not use for a production request unless you verified +the host-key document beforehand. +
+ + +`-C`, `--cert ` +
    +Use FILE as a certificate to verify the host key or keys. The certificates are +used to establish a chain of trust for the verification of the host-key +documents. Specify this option twice to specify the IBM Z signing key and the +intermediate CA certificate (signed by the root CA). +
+ + +`--crl ` +
    +Use FILE as a certificate revocation list. The list is used to check whether a +certificate of the chain of trust is revoked. Specify this option multiple times +to use multiple CRLs. +
+ + +`--offline` +
    +Make no attempt to download CRLs +
+ + +`--root-ca ` +
    +Use FILE as the root-CA certificate for the verification. If omitted, the system +wide-root CAs installed on the system are used. Use this only if you trust the +specified certificate. +
+ + +`-o`, `--output ` +
    +Write the generated request to FILE +
+ + +`--arpk ` +
    +Save the protection key as unencrypted GCM-AES256 key in FILE Do not publish +this key, otherwise your attestation is compromised. +
+ + +`--add-data ` +
    +Specify-additional data for the request. Additional data is provided by the +Ultravisor and returned during the attestation request and is covered by the +attestation measurement. Can be specified multiple times. Optional. + Possible values: + - **phkh-img**: Request the public host-key-hash of the key that decrypted the SE-image as additional-data + - **phkh-att**: Request the public host-key-hash of the key that decrypted the attestation request as additional-data +
+ + +`-v`, `--verbose` +
    +Provide more detailed output +
+ + +`-h`, `--help` +
    +Print help (see a summary with '-h') +
+ + +## pvattest perform +### Synopsis +`pvattest perform [OPTIONS] [INPUT] [OUTPUT]` +### Description +Send the attestation request to the Ultravisor Run a measurement of this system +through ’/dev/uv’. This device must be accessible and the attestation +Ultravisor facility must be present. The input must be an attestation request +created with ’pvattest create’. Output will contain the original request and +the response from the Ultravisor. +### Arguments + +`` +
    +Specify the request to be sent +
+ + +`` +
    +Write the result to FILE +
+ + +### Options + +`-u`, `--user-data ` +
    +Provide up to 256 bytes of user input User-data is arbitrary user-defined data +appended to the Attestation measurement. It is verified during the Attestation +measurement verification. May be any arbitrary data, as long as it is less or +equal to 256 bytes +
+ + +`-v`, `--verbose` +
    +Provide more detailed output +
+ + +`-h`, `--help` +
    +Print help (see a summary with '-h') +
+ + +## pvattest verify +### Synopsis +`pvattest verify [OPTIONS] --input --hdr --arpk ` +### Description +Verify an attestation response Verify that a previously generated attestation +measurement of an IBM Secure Execution guest is as expected. Only verify +attestation requests in a trusted environment, such as your workstation. Input +must contain the response as produced by ’pvattest perform’. The protection +key must be the one that was used to create the request by ’pvattest create’. +Shred the protection key after the verification. The header must be the IBM +Secure Execution header of the image that was attested during ’pvattest +perform’ +### Options + +`-i`, `--input ` +
    +Specify the attestation request to be verified +
+ + +`-o`, `--output ` +
    +Specify the output for the verification result +
+ + +`--hdr ` +
    +Specifies the header of the guest image. Can be an IBM Secure Execution image +created by genprotimg or an extracted IBM Secure Execution header. The header +must start at a page boundary. +
+ + +`--arpk ` +
    +Use FILE as the protection key to decrypt the request Do not publish this key, +otherwise your attestation is compromised. Delete this key after verification. +
+ + +`--format ` +
    +Define the output format + Default value: 'yaml' + Possible values: + - **yaml**: Use yaml format +
+ + +`-u`, `--user-data ` +
    +Write the user data to the FILE if any. Writes the user data, if the response +contains any, to FILE The user-data is part of the attestation measurement. If +the user-data is written to FILE the user-data was part of the measurement and +verified. Emits a warning if the response contains no user-data +
+ + +`-v`, `--verbose` +
    +Provide more detailed output +
+ + +`-h`, `--help` +
    +Print help (see a summary with '-h') +
diff -Nru s390-tools-2.31.0/rust/pvattest/src/cli.rs s390-tools-2.33.1/rust/pvattest/src/cli.rs --- s390-tools-2.31.0/rust/pvattest/src/cli.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/src/cli.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 + +use clap::{Args, Parser, Subcommand, ValueEnum, ValueHint}; +use log::warn; +use utils::CertificateOptions; + +/// create, perform, and verify attestation measurements +/// +/// Create, perform, and verify attestation measurements for IBM Secure Execution guest systems. +#[derive(Parser, Debug)] +pub struct CliOptions { + /// Provide more detailed output + #[arg(short='v', long, action = clap::ArgAction::Count)] + verbose: u8, + + /// Deprecated short verbose flag (-V) form the C implementation. + /// + /// If specified a deprecation warning is emitted, + #[arg(short = 'V', hide = true, action = clap::ArgAction::Count)] + verbose_deprecated: u8, + + /// Print version information and exit + #[arg(long)] + pub version: bool, + + #[command(subcommand)] + pub cmd: Command, +} + +impl CliOptions { + pub fn verbosity(&self) -> u8 { + let verbose_deprecated = self.verbose_deprecated + + match &self.cmd { + Command::Create(cmd) => cmd.verbose_deprecated, + Command::Perform(cmd) => cmd.verbose_deprecated, + Command::Verify(cmd) => cmd.verbose_deprecated, + Command::Version => 0, + }; + if verbose_deprecated > 0 { + warn!("WARNING: Use of deprecated flag '-V'. Use '-v' or '--verbose' instead.") + } + verbose_deprecated + + self.verbose + + match &self.cmd { + Command::Create(cmd) => cmd.verbose, + Command::Perform(cmd) => cmd.verbose, + Command::Verify(cmd) => cmd.verbose, + Command::Version => 0, + } + } +} + +#[derive(Subcommand, Debug)] +pub enum Command { + /// Create an attestation measurement request + /// + /// Create attestation measurement requests to attest an IBM Secure Execution guest. Only build + /// attestation requests in a trusted environment such as your Workstation. To avoid + /// compromising the attestation do not publish the attestation request protection key and + /// shred it after verification. Every 'create' will generate a new, random protection key. + Create(Box), + + /// Send the attestation request to the Ultravisor + /// + /// Run a measurement of this system through ’/dev/uv’. This device must be accessible and the + /// attestation Ultravisor facility must be present. The input must be an attestation request + /// created with ’pvattest create’. Output will contain the original request and the response + /// from the Ultravisor. + Perform(PerformAttOpt), + + /// Verify an attestation response + /// + /// Verify that a previously generated attestation measurement of an IBM Secure Execution guest + /// is as expected. Only verify attestation requests in a trusted environment, such as your + /// workstation. Input must contain the response as produced by ’pvattest perform’. The + /// protection key must be the one that was used to create the request by ’pvattest create’. + /// Shred the protection key after the verification. The header must be the IBM Secure + /// Execution header of the image that was attested during ’pvattest perform’ + Verify(VerifyOpt), + + /// Print version information and exit. + #[command(aliases(["--version"]), hide(true))] + Version, +} + +#[derive(Args, Debug)] +pub struct CreateAttOpt { + #[command(flatten)] + pub certificate_args: CertificateOptions, + + /// Write the generated request to FILE. + #[arg(short, long, value_name = "FILE", value_hint = ValueHint::FilePath,)] + pub output: String, + + /// Save the protection key as unencrypted GCM-AES256 key in FILE + /// + /// Do not publish this key, otherwise your attestation is compromised. + #[arg(long, value_name = "FILE", value_hint = ValueHint::FilePath,)] + pub arpk: String, + + /// Specify-additional data for the request. + /// + /// Additional data is provided by the Ultravisor and returned during the attestation request + /// and is covered by the attestation measurement. Can be specified multiple times. + /// Optional. + #[arg(long, value_name = "FLAGS")] + pub add_data: Vec, + + /// Provide more detailed output. + #[arg(short='v', long, action = clap::ArgAction::Count)] + verbose: u8, + + /// Deprecated short verbose flag (-V) form the C implementation. + /// + /// If specified a deprecation warning is emitted, + #[arg(short = 'V', hide = true, action = clap::ArgAction::Count)] + verbose_deprecated: u8, +} + +#[derive(Debug, ValueEnum, Clone, Copy)] +pub enum AttAddFlags { + /// Request the public host-key-hash of the key that decrypted the SE-image as additional-data + PhkhImg, + /// Request the public host-key-hash of the key that decrypted the attestation request as + /// additional-data + PhkhAtt, +} + +// all members s390x only +#[derive(Args, Debug)] +pub struct PerformAttOpt { + /// Specify the request to be sent. + #[cfg(target_arch = "s390x")] + #[arg(hide=true, short, long, value_name = "FILE", value_hint = ValueHint::FilePath,)] + pub input: Option, + + /// Specify the request to be sent. + #[cfg(target_arch = "s390x")] + #[arg(value_name = "INPUT", value_hint = ValueHint::FilePath,required_unless_present("input"), conflicts_with("input"))] + pub input_pos: Option, + + /// Write the result to FILE. + #[cfg(target_arch = "s390x")] + #[arg(hide=true, short, long, value_name = "FILE", value_hint = ValueHint::FilePath,)] + pub output: Option, + + /// Write the result to FILE. + #[arg( value_name = "OUTPUT", value_hint = ValueHint::FilePath,required_unless_present("output"), conflicts_with("output"))] + #[cfg(target_arch = "s390x")] + pub output_pos: Option, + + /// Provide up to 256 bytes of user input + /// + /// User-data is arbitrary user-defined data appended to the Attestation measurement. + /// It is verified during the Attestation measurement verification. + /// May be any arbitrary data, as long as it is less or equal to 256 bytes + #[arg(short, long, value_name = "File", value_hint = ValueHint::FilePath,)] + pub user_data: Option, + + /// Provide more detailed output. + #[arg(short='v', long, action = clap::ArgAction::Count)] + verbose: u8, + + /// Deprecated short verbose flag (-V) form the C implementation. + /// + /// If specified a deprecation warning is emitted, + #[arg(short = 'V', hide = true, action = clap::ArgAction::Count)] + verbose_deprecated: u8, +} + +#[cfg(target_arch = "s390x")] +pub struct PerformAttOptComb<'a> { + pub input: &'a str, + pub output: &'a str, + pub user_data: Option<&'a str>, +} + +#[cfg(target_arch = "s390x")] +impl<'a> From<&'a PerformAttOpt> for PerformAttOptComb<'a> { + fn from(value: &'a PerformAttOpt) -> Self { + let input = match (&value.input, &value.input_pos) { + (None, Some(i)) => i, + (Some(i), None) => i, + (Some(_), Some(_)) => unreachable!(), + (None, None) => unreachable!(), + }; + let output = match (&value.output, &value.output_pos) { + (None, Some(o)) => o, + (Some(o), None) => o, + (Some(_), Some(_)) => unreachable!(), + (None, None) => unreachable!(), + }; + let user_data = value.user_data.as_deref(); + Self { + input, + output, + user_data, + } + } +} + +#[derive(Args, Debug)] +pub struct VerifyOpt { + /// Specify the attestation request to be verified. + #[arg(short, long, value_name = "FILE", value_hint = ValueHint::FilePath,)] + pub input: String, + + /// Specify the output for the verification result + #[arg(short, long, value_name = "FILE", value_hint = ValueHint::FilePath,)] + pub output: Option, + + /// Specifies the header of the guest image. + /// + /// Can be an IBM Secure Execution image created by genprotimg or an extracted IBM Secure + /// Execution header. The header must start at a page boundary. + #[arg(long, value_name = "FILE", value_hint = ValueHint::FilePath)] + pub hdr: String, + + /// Use FILE as the protection key to decrypt the request + /// + /// Do not publish this key, otherwise your attestation is compromised. + /// Delete this key after verification. + #[arg(long, value_name = "FILE", value_hint = ValueHint::FilePath,)] + pub arpk: String, + + /// Define the output format. + #[arg(long, value_enum, default_value_t)] + pub format: VerifyOutputType, + + /// Write the user data to the FILE if any. + /// + /// Writes the user data, if the response contains any, to FILE + /// The user-data is part of the attestation measurement. If the user-data is written to FILE + /// the user-data was part of the measurement and verified. + /// Emits a warning if the response contains no user-data + #[arg(long, short ,value_name = "FILE", value_hint = ValueHint::FilePath,)] + pub user_data: Option, + + /// Provide more detailed output. + #[arg(short='v', long, action = clap::ArgAction::Count)] + verbose: u8, + + /// Deprecated short verbose flag (-V) form the C implementation. + /// + /// If specified a deprecation warning is emitted, + #[arg(short = 'V', hide = true, action = clap::ArgAction::Count)] + verbose_deprecated: u8, +} + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum, Debug, Default)] +pub enum VerifyOutputType { + /// Use yaml format. + #[default] + Yaml, +} diff -Nru s390-tools-2.31.0/rust/pvattest/src/cmd/create.rs s390-tools-2.33.1/rust/pvattest/src/cmd/create.rs --- s390-tools-2.31.0/rust/pvattest/src/cmd/create.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/src/cmd/create.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 + +use crate::{ + cli::{AttAddFlags, CreateAttOpt}, + exchange::{ExchangeFormatRequest, ExchangeFormatVersion}, +}; +use anyhow::{bail, Context, Result}; +use log::{debug, warn}; +use pv::{ + attest::{AttestationFlags, AttestationMeasAlg, AttestationRequest, AttestationVersion}, + misc::{create_file, write_file}, + request::{ReqEncrCtx, Request, SymKey, SymKeyType}, +}; +use std::process::ExitCode; + +fn flags(cli_flags: &[AttAddFlags]) -> AttestationFlags { + let mut att_flags = AttestationFlags::default(); + for flag in cli_flags { + match flag { + AttAddFlags::PhkhImg => att_flags.set_image_phkh(), + AttAddFlags::PhkhAtt => att_flags.set_attest_phkh(), + } + } + att_flags +} + +pub fn create(opt: &CreateAttOpt) -> Result { + let att_version = AttestationVersion::One; + let meas_alg = AttestationMeasAlg::HmacSha512; + + let mut arcb = AttestationRequest::new(att_version, meas_alg, flags(&opt.add_data))?; + debug!("Generated Attestation request"); + + // Add host-key documents + opt.certificate_args + .get_verified_hkds("attestation request")? + .into_iter() + .for_each(|k| arcb.add_hostkey(k)); + debug!("Added all host-keys"); + + let encr_ctx = + ReqEncrCtx::random(SymKeyType::Aes256).context("Failed to generate random input")?; + let ser_arcb = arcb.encrypt(&encr_ctx)?; + warn!("Successfully generated the request"); + + let mut output = create_file(&opt.output)?; + let exch_ctx = ExchangeFormatRequest::new( + ser_arcb, + meas_alg.exp_size(), + arcb.flags().expected_additional_size(), + )?; + exch_ctx.write(&mut output, ExchangeFormatVersion::One)?; + + let arpk = match encr_ctx.prot_key() { + SymKey::Aes256(k) => k, + _ => bail!("Unexpected key type"), + }; + write_file( + &opt.arpk, + arpk.value(), + "Attestation request Protection Key", + )?; + + Ok(ExitCode::SUCCESS) +} diff -Nru s390-tools-2.31.0/rust/pvattest/src/cmd/perform.rs s390-tools-2.33.1/rust/pvattest/src/cmd/perform.rs --- s390-tools-2.31.0/rust/pvattest/src/cmd/perform.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/src/cmd/perform.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 + +use crate::{ + cli::PerformAttOptComb, + exchange::{ExchangeFormatRequest, ExchangeFormatResponse, ExchangeFormatVersion}, +}; +use anyhow::Result; +use pv::{ + misc::{create_file, open_file, read_file}, + uv::{AttestationCmd, UvDevice}, +}; +use std::process::ExitCode; + +pub fn perform<'a, P>(opt: P) -> Result +where + P: Into>, +{ + let opt = opt.into(); + let mut input = open_file(opt.input)?; + let mut output = create_file(opt.output)?; + let uvdevice = UvDevice::open()?; + + let ex_in = ExchangeFormatRequest::read(&mut input)?; + let user_data = opt + .user_data + .map(|u| read_file(u, "user-data")) + .transpose()?; + + let mut cmd = AttestationCmd::new_request( + ex_in.arcb.clone().into(), + user_data.clone(), + ex_in.exp_measurement, + ex_in.exp_additional, + )?; + + uvdevice.send_cmd(&mut cmd)?; + + let measurement = cmd.measurement(); + let additional = cmd.additional_owned(); + let cuid = cmd.cuid(); + + let ex_out = ExchangeFormatResponse::new( + ex_in.arcb, + measurement.to_owned(), + additional, + user_data, + cuid.to_owned(), + )?; + ex_out.write(&mut output, ExchangeFormatVersion::One)?; + + Ok(ExitCode::SUCCESS) +} diff -Nru s390-tools-2.31.0/rust/pvattest/src/cmd/verify.rs s390-tools-2.33.1/rust/pvattest/src/cmd/verify.rs --- s390-tools-2.31.0/rust/pvattest/src/cmd/verify.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/src/cmd/verify.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 + +use anyhow::Result; +use log::{debug, warn}; +use pv::{ + attest::{ + AdditionalData, AttestationFlags, AttestationItems, AttestationMeasurement, + AttestationRequest, + }, + misc::{create_file, open_file, read_exact_file, write_file}, + request::{openssl::pkey::PKey, BootHdrTags, Confidential, SymKey}, +}; +use serde::Serialize; +use std::{fmt::Display, process::ExitCode}; +use utils::HexSlice; + +use crate::{ + cli::{VerifyOpt, VerifyOutputType}, + exchange::ExchangeFormatResponse, + EXIT_CODE_ATTESTATION_FAIL, +}; + +#[derive(Serialize)] +struct VerifyOutput<'a> { + cuid: HexSlice<'a>, + #[serde(skip_serializing_if = "Option::is_none")] + add: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + add_fields: Option>>, + #[serde(skip_serializing_if = "Option::is_none")] + user_data: Option>, +} + +impl<'a> VerifyOutput<'a> { + fn from_exchange(resp: &'a ExchangeFormatResponse, flags: &AttestationFlags) -> Result { + let additional_data_fields = resp + .additional() + .map(|a| AdditionalData::from_slice(a, flags)) + .transpose()?; + let user_data = resp.user().map(|u| u.into()); + + Ok(Self { + cuid: resp.config_uid().into(), + add: resp.additional().map(|a| a.into()), + add_fields: additional_data_fields.map(AdditionalData::from_other), + user_data, + }) + } +} + +impl<'a> Display for VerifyOutput<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Config UID:")?; + writeln!(f, "{:#}", self.cuid)?; + if let Some(data) = &self.add { + writeln!(f, "Additional-data:")?; + writeln!(f, "{:#}", data)?; + } + if let Some(data) = &self.add_fields { + writeln!(f, "Additional-data content:")?; + writeln!(f, "{:#}", data)?; + } + if let Some(data) = &self.user_data { + writeln!(f, "user-data:")?; + writeln!(f, "{:#}", data)?; + } + Ok(()) + } +} + +pub fn verify(opt: &VerifyOpt) -> Result { + let mut input = open_file(&opt.input)?; + let mut img = open_file(&opt.hdr)?; + let output = opt.output.as_ref().map(create_file).transpose()?; + let arpk = SymKey::Aes256( + read_exact_file(&opt.arpk, "Attestation request protection key").map(Confidential::new)?, + ); + let tags = BootHdrTags::from_se_image(&mut img)?; + let exchange = ExchangeFormatResponse::read(&mut input)?; + + let (auth, conf) = AttestationRequest::decrypt_bin(exchange.arcb(), &arpk)?; + let meas_key = PKey::hmac(conf.measurement_key())?; + let items = AttestationItems::new( + &tags, + exchange.config_uid(), + exchange.user(), + conf.nonce().as_ref().map(|v| v.value()), + exchange.additional(), + ); + + let measurement = AttestationMeasurement::calculate(items, auth.mai(), &meas_key)?; + + let uv_meas = exchange.measurement(); + if !measurement.eq_secure(uv_meas) { + debug!("Measurement values:"); + debug!("Recieved: {}", HexSlice::from(uv_meas)); + debug!("Calculated: {}", HexSlice::from(measurement.as_ref())); + warn!("Attestation measurement verification failed. Calculated and received attestation measurement are not equal."); + return Ok(ExitCode::from(EXIT_CODE_ATTESTATION_FAIL)); + } + warn!("Attestation measurement verified"); + // Error impossible CUID is present Attestation verified + let pr_data = VerifyOutput::from_exchange(&exchange, auth.flags())?; + + warn!("{pr_data}"); + if let Some(mut output) = output { + match opt.format { + VerifyOutputType::Yaml => serde_yaml::to_writer(&mut output, &pr_data)?, + }; + } + + if let Some(user_data) = &opt.user_data { + match exchange.user() { + Some(data) => write_file(user_data, data, "user-data")?, + None => { + warn!("Location for `user-data` specified, but respose does not contain any user-data") + } + } + }; + + Ok(ExitCode::SUCCESS) +} diff -Nru s390-tools-2.31.0/rust/pvattest/src/cmd.rs s390-tools-2.33.1/rust/pvattest/src/cmd.rs --- s390-tools-2.31.0/rust/pvattest/src/cmd.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/src/cmd.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 +// +pub mod create; +#[cfg(target_arch = "s390x")] +pub mod perform; +pub mod verify; + +pub use create::create; +pub use verify::verify; + +pub const CMD_FN: &[&str] = &["+create", "+verify"]; +// s390 branch +#[cfg(target_arch = "s390x")] +mod uv_cmd { + pub use super::perform::perform; + pub const UV_CMD_FN: &[&str] = &["+perform"]; +} + +// non s390-branch +#[cfg(not(target_arch = "s390x"))] +mod uv_cmd { + use std::process::ExitCode; + + use anyhow::{bail, Result}; + + pub fn perform(_: &crate::cli::PerformAttOpt) -> Result { + bail!("Command only available on s390x") + } + pub const UV_CMD_FN: &[&str] = &[]; +} +pub use uv_cmd::*; diff -Nru s390-tools-2.31.0/rust/pvattest/src/exchange.rs s390-tools-2.33.1/rust/pvattest/src/exchange.rs --- s390-tools-2.31.0/rust/pvattest/src/exchange.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/src/exchange.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,884 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 +use anyhow::{anyhow, bail, Error, Result}; +use byteorder::ByteOrder; +use pv::{assert_size, request::MagicValue, uv::AttestationCmd, uv::ConfigUid}; +use std::{ + io::{ErrorKind, Read, Seek, SeekFrom, Write}, + mem::size_of, +}; +use zerocopy::{AsBytes, BigEndian, FromBytes, FromZeroes, U32, U64}; + +const INV_EXCHANGE_FMT_ERROR_TEXT: &str = "The input has not the correct format:"; + +#[repr(C)] +#[derive(Debug, AsBytes, PartialEq, Eq, Default, FromZeroes, FromBytes)] +struct Entry { + size: U32, + offset: U32, +} +assert_size!(Entry, 8); + +/// If size == 0 the offset is ignored. (entry does not exist) +/// If offset >0 and <0x40 -> invalid format +/// If offset == 0 and size > 0 no data saved, however the request will need this amount of memory +/// to succeed. Only makes sense for measurement and additional data. This however, is not +/// enforced. +impl Entry { + fn new(size: u32, offset: u32) -> Self { + Self { + size: size.into(), + offset: offset.into(), + } + } + + /// # Panic + /// + /// panics if `val` is larger than `max_size` bytes + fn from_slice(val: Option<&[u8]>, max_size: u32, offset: &mut u32) -> Self { + match val { + Some(val) => { + assert!(val.len() <= max_size as usize); + let size = val.len() as u32; + let res = Self::new(size, *offset); + *offset += size; + res + } + None => Self::default(), + } + } + + /// # Panic + /// + /// panics if `val` is larger than `max_size` bytes + fn from_exp(val: Option) -> Self { + if let Some(val) = val { + Self::new(val, 0) + } else { + Self::default() + } + } + + fn from_none() -> Self { + Self::default() + } + + /// Reads data from stream if required + fn read(&self, reader: &mut R) -> Result + where + R: Read + Seek, + { + match self { + Entry { size, .. } if size.get() == 0 => Ok(ExpOrData::None), + Entry { size, offset } if offset.get() == 0 => Ok(ExpOrData::Exp(size.get())), + Entry { size, offset } => { + reader.seek(SeekFrom::Start(offset.get() as u64))?; + let mut buf = vec![0; size.get() as usize]; + reader.read_exact(&mut buf)?; + Ok(ExpOrData::Data(buf)) + } + } + } +} + +#[repr(C)] +#[derive(Debug, AsBytes, FromZeroes, FromBytes)] +struct ExchangeFormatV1Hdr { + magic: U64, + version: U32, + size: U32, + reserved: U64, + /// v1 specific + arcb: Entry, + measurement: Entry, + additional: Entry, + user: Entry, + config_uid: Entry, +} +assert_size!(ExchangeFormatV1Hdr, 0x40); + +impl ExchangeFormatV1Hdr { + fn new_request(arcb: &[u8], measurement: u32, additional: u32) -> Result { + let mut offset: u32 = size_of::() as u32; + let arcb_entry = Entry::from_slice(Some(arcb), AttestationCmd::ARCB_MAX_SIZE, &mut offset); + let measurement_entry = Entry::from_exp(Some(measurement)); + let exp_add = match additional { + 0 => None, + size => Some(size), + }; + // TODO min and max size check? + let additional_entry = Entry::from_exp(exp_add); //, AttestationCmd::ADDITIONAL_MAX_SIZE, &mut offset); + let user_entry = Entry::from_none(); + let cuid_entry = Entry::from_none(); + + Ok(Self { + magic: U64::from_bytes(ExchangeMagic::MAGIC), + version: ExchangeFormatVersion::One.into(), + size: offset.into(), + reserved: 0.into(), + arcb: arcb_entry, + measurement: measurement_entry, + additional: additional_entry, + user: user_entry, + config_uid: cuid_entry, + }) + } + + fn new_response( + arcb: &[u8], + measurement: &[u8], + additional: Option<&[u8]>, + user: Option<&[u8]>, + config_uid: &[u8], + ) -> Result { + let mut offset: u32 = size_of::() as u32; + let arcb_entry = Entry::from_slice(Some(arcb), AttestationCmd::ARCB_MAX_SIZE, &mut offset); + let measurement_entry = Entry::from_slice( + Some(measurement), + AttestationCmd::MEASUREMENT_MAX_SIZE, + &mut offset, + ); + let additional_entry = + Entry::from_slice(additional, AttestationCmd::ADDITIONAL_MAX_SIZE, &mut offset); + let user_entry = Entry::from_slice(user, AttestationCmd::USER_MAX_SIZE, &mut offset); + let cuid_entry = Entry::from_slice(Some(config_uid), 0x10, &mut offset); + + Ok(Self { + magic: U64::from_bytes(ExchangeMagic::MAGIC), + version: ExchangeFormatVersion::One.into(), + size: offset.into(), + reserved: 0.into(), + arcb: arcb_entry, + measurement: measurement_entry, + additional: additional_entry, + user: user_entry, + config_uid: cuid_entry, + }) + } +} + +/// The magic value used to identify an [`ExchangeFormatRequest`] +/// +/// The magic value is ASCII: +/// ```rust +/// # use s390_pv_core::attest::ExchangeMagic; +/// # use s390_pv_core::request::MagicValue; +/// # fn main() { +/// # let magic = +/// b"pvattest" +/// # ; +/// # assert!(ExchangeMagic::starts_with_magic(magic)); +/// # } +/// ``` +pub struct ExchangeMagic; +impl MagicValue<8> for ExchangeMagic { + const MAGIC: [u8; 8] = [0x70, 0x76, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74]; +} + +/// Version identifier for an [`ExchangeFormatRequest`] +#[repr(u32)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ExchangeFormatVersion { + /// Version 1 (= 0x0100) + One = 0x0100, +} + +impl TryFrom> for ExchangeFormatVersion { + type Error = Error; + + fn try_from(value: U32) -> Result { + if value.get() == Self::One as u32 { + Ok(Self::One) + } else { + bail!( + "{INV_EXCHANGE_FMT_ERROR_TEXT} Unsupported version: ({})", + value.get() + ); + } + } +} + +impl From for U32 { + fn from(value: ExchangeFormatVersion) -> Self { + (value as u32).into() + } +} + +/// A parsed exchange entry value +/// +/// An entry can be all zero(None), just a size (Exp) or a offset+size to some data (Data) +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum ExpOrData { + Exp(u32), + Data(Vec), + None, +} + +impl ExpOrData { + /// calculates the (expected or real) size + fn size(&self) -> u32 { + match self { + ExpOrData::Exp(s) => *s, + // size is max u32 large as read in before + ExpOrData::Data(v) => v.len() as u32, + ExpOrData::None => 0, + } + } + + /// Returns data if self is [`ExpOrData::Data`] + /// + /// Consumes itself + fn data(self) -> Option> { + match self { + ExpOrData::Data(v) => Some(v), + _ => None, + } + } +} + +impl From> for ExpOrData { + fn from(value: Option) -> Self { + match value { + Some(v) => ExpOrData::Exp(v), + None => ExpOrData::None, + } + } +} + +impl From<&ExpOrData> for Option { + fn from(value: &ExpOrData) -> Self { + match value { + ExpOrData::Exp(v) => Some(*v), + _ => None, + } + } +} + +impl From for Option> { + fn from(value: ExpOrData) -> Self { + match value { + ExpOrData::Exp(s) => Some(vec![0; s as usize]), + ExpOrData::Data(d) => Some(d), + ExpOrData::None => None, + } + } +} + +/// The _exchange format_ is a simple file format to send labeled binary blobs between +/// pvattest instances on different machines. +#[derive(Debug, PartialEq, Eq)] +pub struct ExchangeFormatRequest { + // all sizes are guaranteed to fit in the exchange format/UV Call at any time + // pub to allow deconstruction of this struct + pub arcb: Vec, + pub exp_measurement: u32, + pub exp_additional: u32, +} + +/// The _exchange format_ is a simple file format to send labeled binary blobs between +/// pvattest instances on different machines. +#[derive(Debug, PartialEq, Eq)] +pub struct ExchangeFormatResponse { + // all sizes are guaranteed to fit in the exchange format/UV Call at any time + // pub to allow deconstruction of this struct + pub arcb: Vec, + pub measurement: Vec, + pub additional: Option>, + pub user: Option>, + pub config_uid: ConfigUid, +} + +impl ExchangeFormatRequest { + /// Creates a new exchange context, with an attestation request, expected measurement and + /// optional an additional data size. Useful for creating a attestation request. + pub fn new(arcb: Vec, exp_measurement: u32, exp_additional: u32) -> Result { + verify_size( + exp_measurement, + 1, + AttestationCmd::MEASUREMENT_MAX_SIZE, + "Expected measurement size", + )?; + verify_size( + exp_additional, + 0, + AttestationCmd::ADDITIONAL_MAX_SIZE, + "Expected additional data size", + )?; + verify_slice(&arcb, AttestationCmd::ARCB_MAX_SIZE, "Attestation request")?; + + Ok(Self { + arcb, + exp_measurement, + exp_additional, + }) + } + + fn write_v1(&self, writer: &mut W) -> Result<()> + where + W: Write, + { + let hdr = ExchangeFormatV1Hdr::new_request( + self.arcb.as_slice(), + self.exp_measurement, + self.exp_additional, + )?; + writer.write_all(hdr.as_bytes())?; + writer.write_all(&self.arcb)?; + Ok(()) + } + + /// Serializes the encapsulated data into the provides stream in the provided format + pub fn write(&self, writer: &mut W, version: ExchangeFormatVersion) -> Result<()> + where + W: Write, + { + match version { + ExchangeFormatVersion::One => self.write_v1(writer), + } + } + + /// Reads and deserializes the exchange file in the provided stream + /// + /// # Errors + /// + /// Returns an error if the stream does not contain data in exchange format, CUID or user data + /// do not fit, or any IO error that can appear during reading streams. + pub fn read(reader: &mut R) -> Result + where + R: Read + Seek, + { + let mut buf = vec![0; size_of::()]; + match reader.read_exact(&mut buf) { + Ok(it) => it, + // report hdr file to small for header + Err(err) if err.kind() == ErrorKind::UnexpectedEof => { + bail!("{INV_EXCHANGE_FMT_ERROR_TEXT} Invalid Header."); + } + Err(err) => return Err(err.into()), + }; + + if !ExchangeMagic::starts_with_magic(&buf) { + bail!("{INV_EXCHANGE_FMT_ERROR_TEXT} Does not start with the magic value.",); + } + + let hdr = ExchangeFormatV1Hdr::ref_from(buf.as_slice()) + .ok_or(anyhow!("{INV_EXCHANGE_FMT_ERROR_TEXT} Invalid Header."))?; + + match TryInto::::try_into(hdr.version)? { + ExchangeFormatVersion::One => (), + } + + if stream_len(reader)? < hdr.size.get() as u64 { + bail!("{INV_EXCHANGE_FMT_ERROR_TEXT} File size too small"); + } + let arcb = hdr.arcb.read(reader)?.data().ok_or(anyhow!( + "{INV_EXCHANGE_FMT_ERROR_TEXT} Contains no attestation request.", + ))?; + + let measurement = hdr.measurement.read(reader)?.size(); + let additional = hdr.additional.read(reader)?.size(); + Self::new(arcb, measurement, additional) + } +} + +// Seek::stream_is unstable +// not expose to API users +// taken from rust std::io::seek; +fn stream_len(seek: &mut S) -> Result +where + S: Seek, +{ + let old_pos = seek.stream_position()?; + let len = seek.seek(SeekFrom::End(0))?; + + // Avoid seeking a third time when we were already at the end of the + // stream. The branch is usually way cheaper than a seek operation. + if old_pos != len { + seek.seek(SeekFrom::Start(old_pos))?; + } + + Ok(len) +} + +fn verify_size(size: u32, min_size: u32, max_size: u32, field: &'static str) -> Result<()> { + if size < min_size { + bail!("{INV_EXCHANGE_FMT_ERROR_TEXT} The {field} field is too small ({size})"); + } + + if size > max_size { + bail!("{INV_EXCHANGE_FMT_ERROR_TEXT} The {field} field is too large ({size})"); + } + + Ok(()) +} + +/// check that a slice has at max `max_size` amount of bytes +fn verify_slice(val: &[u8], max_size: u32, field: &'static str) -> Result<()> { + if val.len() > max_size as usize { + bail!( + "{INV_EXCHANGE_FMT_ERROR_TEXT} The {field} field is too large ({})", + val.len() + ); + } + Ok(()) +} + +impl ExchangeFormatResponse { + /// Creates a new exchange context, with an attestation request, measurement and + /// cuid. + pub fn new( + arcb: Vec, + measurement: Vec, + additional: Option>, + user: Option>, + config_uid: ConfigUid, + ) -> Result { + // should not fail; Already checked during import. + verify_slice( + &arcb, + AttestationCmd::ARCB_MAX_SIZE, + "Attestation request data", + )?; + verify_slice( + &measurement, + AttestationCmd::MEASUREMENT_MAX_SIZE, + "Attestation Measurement", + )?; + + if let Some(additional) = &additional { + verify_slice( + additional, + AttestationCmd::ADDITIONAL_MAX_SIZE, + "Additional data", + )?; + } + + if let Some(user) = &user { + verify_slice(user, AttestationCmd::USER_MAX_SIZE, "User data")?; + } + + Ok(Self { + arcb, + measurement, + additional, + user, + config_uid, + }) + } + + fn write_v1(&self, writer: &mut W) -> Result<()> + where + W: Write, + { + let hdr = ExchangeFormatV1Hdr::new_response( + self.arcb.as_slice(), + &self.measurement, + self.additional.as_deref(), + self.user.as_deref(), + &self.config_uid, + )?; + writer.write_all(hdr.as_bytes())?; + writer.write_all(&self.arcb)?; + writer.write_all(&self.measurement)?; + if let Some(data) = &self.additional { + writer.write_all(data)?; + } + if let Some(data) = &self.user { + writer.write_all(data)?; + } + writer.write_all(&self.config_uid)?; + Ok(()) + } + + /// Serializes the encapsulated data into the provides stream in the provided format + pub fn write(&self, writer: &mut W, version: ExchangeFormatVersion) -> Result<()> + where + W: Write, + { + match version { + ExchangeFormatVersion::One => self.write_v1(writer), + } + } + + /// Reads and deserializes the exchange file in the provided stream + /// + /// # Errors + /// + /// Returns an error if the stream does not contain data in exchange format, CUID or user data + /// do not fit, or any IO error that can appear during reading streams. + pub fn read(reader: &mut R) -> Result + where + R: Read + Seek, + { + let mut buf = vec![0; size_of::()]; + match reader.read_exact(&mut buf) { + Ok(it) => it, + // report hdr file to small for header + Err(err) if err.kind() == ErrorKind::UnexpectedEof => { + bail!("{INV_EXCHANGE_FMT_ERROR_TEXT} Invalid Header."); + } + Err(err) => return Err(err.into()), + }; + + if !ExchangeMagic::starts_with_magic(&buf) { + bail!("{INV_EXCHANGE_FMT_ERROR_TEXT} Does not start with the magic value."); + } + + let hdr = ExchangeFormatV1Hdr::ref_from(buf.as_slice()) + .ok_or(anyhow!("{INV_EXCHANGE_FMT_ERROR_TEXT} Invalid Header."))?; + + match TryInto::::try_into(hdr.version)? { + ExchangeFormatVersion::One => (), + } + + if stream_len(reader)? < hdr.size.get() as u64 { + bail!("{INV_EXCHANGE_FMT_ERROR_TEXT} File size too small"); + } + let arcb = hdr.arcb.read(reader)?.data().ok_or(anyhow!( + "{INV_EXCHANGE_FMT_ERROR_TEXT} Contains no attestation request.", + ))?; + + // TODO remove unwrap + let measurement = hdr.measurement.read(reader)?.data().ok_or(anyhow!( + "{INV_EXCHANGE_FMT_ERROR_TEXT} Contains no attestation response (Measurement missing).", + ))?; + let additional = hdr.additional.read(reader)?.data(); + let user = hdr.user.read(reader)?.data(); + let config_uid: ConfigUid = match hdr.config_uid.read(reader)?.data() { + Some(v) => v.try_into().map_err(|_| { +anyhow!( + "{INV_EXCHANGE_FMT_ERROR_TEXT} Configuration UID has an invalid size. Expected size 16, is {}",hdr.config_uid.size.get() + ) + })?, + None => + bail!("{INV_EXCHANGE_FMT_ERROR_TEXT} Contains no attestation response (CUID missing).") +, + }; + Self::new(arcb, measurement, additional, user, config_uid) + } + + /// Returns the measurement of this [`ExchangeFormatRequest`]. + pub fn measurement(&self) -> &[u8] { + &self.measurement + } + + /// Returns the additional data of this [`ExchangeFormatRequest`]. + pub fn additional(&self) -> Option<&[u8]> { + self.additional.as_deref() + } + + /// Returns the user data of this [`ExchangeFormatRequest`]. + pub fn user(&self) -> Option<&[u8]> { + self.user.as_deref() + } + + /// Returns the config UID of this [`ExchangeFormatRequest`]. + /// + /// # Error + /// Returns an error if the [`ExchangeFormatRequest`] contains no CUID, + pub fn config_uid(&self) -> &ConfigUid { + &self.config_uid + } + + /// Returns a reference to the attestation request of this [`ExchangeFormatRequest`]. + pub fn arcb(&self) -> &[u8] { + self.arcb.as_ref() + } +} + +#[cfg(test)] +mod test { + + use std::io::Cursor; + + use super::*; + use pv::misc::read_file; + + #[test] + fn exchange_from_slice() { + let val = &[0; 17]; + let mut offset = 18; + + let entry = Entry::from_slice(Some(val), 20, &mut offset); + assert_eq!( + entry, + Entry { + size: 17.into(), + offset: 18.into(), + } + ); + assert_eq!(offset, 18 + 17); + } + static ARCB: [u8; 16] = [0x11; 16]; + static MEASUREMENT: [u8; 64] = [0x12; 64]; + static ADDITIONAL: [u8; 32] = [0x13; 32]; + static CUID: [u8; 16] = [0x14; 16]; + static USER: [u8; 256] = [0x15; 256]; + + fn test_read_write_request( + path: &'static str, + arcb: Vec, + measurement: usize, + additional: usize, + ) { + // TODO as 32 checks + let ctx_write = ExchangeFormatRequest::new(arcb, measurement as u32, additional as u32) + .expect("exchange fmt creation"); + + // let mut out = create_file(path).unwrap(); + let mut out = vec![]; + ctx_write + .write(&mut out, ExchangeFormatVersion::One) + .unwrap(); + + let buf = read_file(path, "test read exchange").unwrap(); + + assert_eq!(out, buf); + + let ctx_read = ExchangeFormatRequest::read(&mut Cursor::new(&mut &buf)).unwrap(); + + assert_eq!(ctx_read, ctx_write); + } + + fn test_read_write_response( + path: &'static str, + arcb: Vec, + measurement: Vec, + additional: Option>, + user: Option>, + cuid: ConfigUid, + ) { + let ctx_write = ExchangeFormatResponse::new(arcb, measurement, additional, user, cuid) + .expect("exchange fmt creation"); + + // let mut out = create_file(path).unwrap(); + + let mut out = vec![]; + ctx_write + .write(&mut out, ExchangeFormatVersion::One) + .unwrap(); + + let buf = read_file(path, "test read exchange").unwrap(); + + assert_eq!(out, buf); + + let ctx_read = ExchangeFormatResponse::read(&mut Cursor::new(&mut &buf)).unwrap(); + + assert_eq!(ctx_read, ctx_write); + } + + #[test] + fn full_req() { + test_read_write_request( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/full_req.bin" + ), + ARCB.to_vec(), + MEASUREMENT.len(), + ADDITIONAL.len(), + ); + } + + #[test] + fn add_req() { + test_read_write_request( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/add_req.bin" + ), + ARCB.to_vec(), + MEASUREMENT.len(), + ADDITIONAL.len(), + ); + } + + #[test] + fn invalid_req() { + ExchangeFormatRequest::new(ARCB.to_vec(), 0, ADDITIONAL.len() as u32).unwrap_err(); + } + + #[test] + fn min_req() { + test_read_write_request( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/min_req.bin" + ), + ARCB.to_vec(), + MEASUREMENT.len(), + 0, + ); + } + + #[test] + fn full_resp() { + test_read_write_response( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/full_resp.bin" + ), + ARCB.to_vec(), + MEASUREMENT.to_vec(), + ADDITIONAL.to_vec().into(), + USER.to_vec().into(), + CUID, + ); + } + + #[test] + fn add_resp() { + test_read_write_response( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/add_resp.bin" + ), + ARCB.to_vec(), + MEASUREMENT.to_vec(), + ADDITIONAL.to_vec().into(), + None, + CUID, + ); + } + + #[test] + fn user_resp() { + test_read_write_response( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/user_resp.bin" + ), + ARCB.to_vec(), + MEASUREMENT.to_vec(), + None, + USER.to_vec().into(), + CUID, + ); + } + #[test] + fn min_resp() { + test_read_write_response( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/min_resp.bin" + ), + ARCB.to_vec(), + MEASUREMENT.to_vec(), + None, + None, + CUID, + ) + } + + #[test] + fn resp_no_cuid() { + let buf = include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/min_req.bin" + )); + let _ctx_read = ExchangeFormatResponse::read(&mut Cursor::new(&mut &buf)).unwrap_err(); + } + + #[test] + fn resp_inv_magic() { + let mut buf = read_file( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/min_req.bin" + ), + "test resp inv magic", + ) + .unwrap(); + // tamper with the magic + buf[0] = !buf[0]; + + let _ctx_read = ExchangeFormatResponse::read(&mut Cursor::new(&mut &buf)).unwrap_err(); + } + + #[test] + fn no_arcb() { + let mut buf = read_file( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/min_req.bin" + ), + "test resp inv magic", + ) + .unwrap(); + // delete the arcb entry + buf[0x18..0x20].copy_from_slice(&[0; 8]); + + let _ctx_read = ExchangeFormatRequest::read(&mut Cursor::new(&mut &buf)).unwrap_err(); + } + + #[test] + fn small() { + let mut buf = read_file( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/min_req.bin" + ), + "test resp inv magic", + ) + .unwrap(); + buf.pop(); + + let _ctx_read = ExchangeFormatRequest::read(&mut Cursor::new(&mut &buf)).unwrap_err(); + } + + #[test] + fn hdr() { + // buffer smaller than the header but containing the magic + let buf = [ + 0x70, 0x76, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x1, 0x2, 0x3, 0x4, + ]; + + let _ctx_read = ExchangeFormatRequest::read(&mut Cursor::new(&mut &buf)).unwrap_err(); + } + + #[test] + fn version() { + let mut buf = read_file( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/min_req.bin" + ), + "test resp inv magic", + ) + .unwrap(); + // tamper with the version + buf[0x8] = 0xff; + + let _ctx_read = ExchangeFormatRequest::read(&mut Cursor::new(&mut &buf)).unwrap_err(); + } + + #[test] + fn cuid_size() { + let mut buf = read_file( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/assets/", + "exp/exchange/min_resp.bin" + ), + "test resp inv magic", + ) + .unwrap(); + // tamper with the cuid size + buf[0x3b] = 0xf; + + let _ctx_read = ExchangeFormatResponse::read(&mut Cursor::new(&mut &buf)).unwrap_err(); + } +} diff -Nru s390-tools-2.31.0/rust/pvattest/src/main.rs s390-tools-2.33.1/rust/pvattest/src/main.rs --- s390-tools-2.31.0/rust/pvattest/src/main.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/src/main.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 + +mod cli; +mod cmd; +mod exchange; + +use clap::{CommandFactory, Parser}; +use cli::CliOptions; +use log::trace; +use std::process::ExitCode; +use utils::{print_cli_error, print_error, print_version, PvLogger}; + +use crate::cli::Command; +use crate::cmd::*; + +static LOGGER: PvLogger = PvLogger; +const FEATURES: &[&[&str]] = &[cmd::CMD_FN, cmd::UV_CMD_FN]; +const EXIT_CODE_ATTESTATION_FAIL: u8 = 2; +const EXIT_CODE_LOGGER_FAIL: u8 = 3; + +fn print_version(verbosity: u8) -> anyhow::Result { + print_version!(verbosity, "2024", FEATURES.concat()); + Ok(ExitCode::SUCCESS) +} + +fn main() -> ExitCode { + let cli: CliOptions = match CliOptions::try_parse() { + Ok(cli) => cli, + Err(e) => return print_cli_error(e, CliOptions::command()), + }; + + // set up logger/stderr + if let Err(e) = LOGGER.start(cli.verbosity()) { + // should(TM) never happen + eprintln!("Logger error: {e:?}"); + return EXIT_CODE_LOGGER_FAIL.into(); + } + + trace!("Trace verbosity, may leak secrets to command-line"); + trace!("Options {cli:?}"); + + let res = match &cli.cmd { + Command::Create(opt) => create(opt), + Command::Perform(opt) => perform(opt), + Command::Verify(opt) => verify(opt), + Command::Version => print_version(cli.verbosity()), + }; + match res { + Ok(c) => c, + Err(e) => print_error(&e, cli.verbosity()), + } +} Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pvattest/tests/assets/exp/exchange/add_req.bin and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pvattest/tests/assets/exp/exchange/add_req.bin differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pvattest/tests/assets/exp/exchange/add_resp.bin and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pvattest/tests/assets/exp/exchange/add_resp.bin differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pvattest/tests/assets/exp/exchange/full.bin and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pvattest/tests/assets/exp/exchange/full.bin differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pvattest/tests/assets/exp/exchange/full_req.bin and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pvattest/tests/assets/exp/exchange/full_req.bin differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pvattest/tests/assets/exp/exchange/full_resp.bin and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pvattest/tests/assets/exp/exchange/full_resp.bin differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pvattest/tests/assets/exp/exchange/min_req.bin and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pvattest/tests/assets/exp/exchange/min_req.bin differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pvattest/tests/assets/exp/exchange/min_resp.bin and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pvattest/tests/assets/exp/exchange/min_resp.bin differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pvattest/tests/assets/exp/exchange/user_req.bin and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pvattest/tests/assets/exp/exchange/user_req.bin differ Binary files /tmp/L7oDrAtTTe/s390-tools-2.31.0/rust/pvattest/tests/assets/exp/exchange/user_resp.bin and /tmp/7G_tv5hQHR/s390-tools-2.33.1/rust/pvattest/tests/assets/exp/exchange/user_resp.bin differ diff -Nru s390-tools-2.31.0/rust/pvattest/tools/Makefile s390-tools-2.33.1/rust/pvattest/tools/Makefile --- s390-tools-2.31.0/rust/pvattest/tools/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/tools/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,5 @@ +include ../../common.mak + +install: + $(INSTALL) -d -m 755 $(DESTDIR)$(USRBINDIR) + $(INSTALL) -g $(GROUP) -o $(OWNER) -m 755 pvextract-hdr "$(DESTDIR)$(USRBINDIR)" diff -Nru s390-tools-2.31.0/rust/pvattest/tools/pvattest-info s390-tools-2.33.1/rust/pvattest/tools/pvattest-info --- s390-tools-2.31.0/rust/pvattest/tools/pvattest-info 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/tools/pvattest-info 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,94 @@ +#!/bin/bash +# +# pvattest-info - get additional information from an attestation measurement +# +# Sample: +# ./pvattest-info attestresp.bin +# +# Copyright IBM Corp. 2022 +# +# s390-tools is free software; you can redistribute it and/or modify +# it under the terms of the MIT license. See LICENSE for details. + +set -o pipefail +set -o nounset +set -e + +XDUMP='od -A x -t x2z -v --endian=big' + +usage() { + cat <<-EOF + Usage: $(basename "$0") FILE + + Prints config UID and additional data if available. + EOF +} + +function check_is_pvattest_binary() { + local input="$1" + local size + local version + + size=$(wc -c <"$input") + if [ "$size" -lt 64 ]; then + echo "ERROR: Input file is too small." >&2 + exit 1 + fi + + ${XDUMP} --read-bytes 16 -- "${input}" 2>/dev/null | grep -q pvattest || + { echo "ERROR: ${input} does not contain a pvattest binary output." >&2 && exit 1; } + + size=$(${XDUMP} --skip-bytes 12 --read-bytes 4 -- "${input}" 2>/dev/null | awk 'NR==1 {print "0x" $2 $3}') + if [ $((size)) -lt 64 ]; then + echo "ERROR: ${input} does not contain a pvattest binary output." >&2 + exit 1 + fi + + version=$(${XDUMP} --skip-bytes 8 --read-bytes 4 -- "$input" 2>/dev/null) + echo "$version" | grep -q "0000 0100" || + { echo -n "WARNING: unknown hdr version " >&2 && + echo "$version" | awk '{print "0x" $2 $3}'>&2 ; } +} + +function print_entry() { + local file_off="$1" + local text="$2" + local input="$3" + local size + local off + + size=$(${XDUMP} --skip-bytes $((file_off)) --read-bytes 4 -- "${input}" 2>/dev/null | + awk 'NR==1 {print "0x" $2 $3}') + off=$(${XDUMP} --skip-bytes $((file_off + 4)) --read-bytes 4 -- "${input}" 2>/dev/null | + awk 'NR==1 {print "0x" $2 $3}') + + if [[ $size != "0x00000000" ]] || [[ $off != "0x00000000" ]]; then + echo "${text}:" + od -A n -w$((size)) -t x8 --skip-bytes $((off)) --read-bytes $((size)) -- "${input}" 2>/dev/null |\ + sed -e 's/\s//g' + fi +} + +function require_command() { + local cmd="$1" + + command -v "$cmd" >/dev/null 2>&1 || \ + { echo >&2 "ERROR: $cmd required but not installed."; exit 1; } +} + +require_command awk +require_command wc +require_command od + +if [ $# -eq 0 ]; then + echo "ERROR: Input not set. Use '$(basename "$0") [FILE]' to specify the Input file" >&2 + exit 1 +fi + +input="$1" + +[ -e "$input" ] || { echo "ERROR: File '$1' not found" >&2 && exit 1; } +check_is_pvattest_binary "$input" + +print_entry 0x38 "Config UID" "$input" +print_entry 0x28 "Additional Data" "$input" diff -Nru s390-tools-2.31.0/rust/pvattest/tools/pvextract-hdr s390-tools-2.33.1/rust/pvattest/tools/pvextract-hdr --- s390-tools-2.31.0/rust/pvattest/tools/pvextract-hdr 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvattest/tools/pvextract-hdr 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,104 @@ +#!/bin/bash +# +# pvextract_hdr - extract an IBM Secure Execution header from the Image +# +# Sample: +# ./pvextract-hdr -o sehdr.bin se-image.bin +# +# Copyright IBM Corp. 2022 +# +# s390-tools is free software; you can redistribute it and/or modify +# it under the terms of the MIT license. See LICENSE for details. + +set -o pipefail +set -o nounset +set -e + +XDUMP='od -A x -t x2z -v --endian=big' + +def_output='sehdr.bin' +def_skip=0x14 +def_len=0x4 + +usage() { + cat <<-EOF + Usage: $(basename "$0") [-o ${def_output}] [-s ${def_skip}] [-l ${def_len}] FILE + + Extract the header of the SE-image located in FILE. + By default ${def_skip} pages will be skipped until starting to search + for the header. By default the search will be stopped after ${def_len} pages. + '${def_output}' is the default output file name. + EOF +} + +function check_file() { + [ -e "$1" ] || + { echo "ERROR: File '$1' not found" >&2 && exit 1; } +} + +function check_hdr_ver() { + local hdr_start="$1" + local input="$2" + ${XDUMP} --skip-bytes $((hdr_start + 8)) --read-bytes 4 -- "$input" 2>/dev/null | grep -q "000 0100" || + { echo -n "WARNING: unknown hdr version " && + ${XDUMP} --skip-bytes $((hdr_start + 8)) --read_bytes 4 -- "$input" 2>/dev/null | awk '{print "0x" $2 $3}'; } +} + +function require_command() { + local cmd="$1" + + command -v "$cmd" >/dev/null 2>&1 || \ + { echo >&2 "ERROR: $cmd required but not installed."; exit 1; } +} + +require_command od +require_command awk +require_command grep + +output=${def_output} +parsed_skip=${def_skip} +parsed_len=${def_len} +# the last argument must be the input file +input="${*: -1}" +while getopts 'o:s:l:h' OPTION; do + case "$OPTION" in + o) output="$OPTARG" ;; + s) parsed_skip="$OPTARG" ;; + l) parsed_len="$OPTARG" ;; + h) + usage + exit 0 + ;; + :) + echo "ERROR: Must supply an argument to -$OPTARG." >&2 + exit 1 + ;; + *) + usage + exit 1 + ;; + esac +done + +#argument specify pages; convert to bytes +skip=$((parsed_skip * 0x1000)) +len=$((parsed_len * 0x1000)) + +if [ $# -eq 0 ]; then + echo "ERROR: Input not set. Use '$(basename "$0") [FILE]' to specify the Input file" >&2 + exit 1 +fi + +check_file "$input" +hdr_start=$(${XDUMP} --skip-bytes $((skip)) --read-bytes $((len)) -- "${input}" 2>/dev/null | grep IBMSecEx || + { echo ERROR: "${input} does not contain an SE header." >&2 && exit 1; }) +hdr_start=$(echo "${hdr_start}" | awk '{print "0x" $1}' | cut -c 1-10) +echo "SE header found at offset ${hdr_start}" + +check_hdr_ver "$hdr_start" "$input" + +size=$(${XDUMP} --skip-bytes $((hdr_start + 12)) --read-bytes 4 -- "${input}" 2>/dev/null | + awk 'NR==1 {print "0x" $2 $3}') + +dd if="${input}" of="${output}" bs=1 count=$((size)) skip=$((hdr_start)) status=none +echo "SE header written to '${output}' ($((size)) bytes)" diff -Nru s390-tools-2.31.0/rust/pv_core/Cargo.toml s390-tools-2.33.1/rust/pv_core/Cargo.toml --- s390-tools-2.31.0/rust/pv_core/Cargo.toml 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/Cargo.toml 2024-05-28 08:26:36.000000000 +0200 @@ -1,8 +1,13 @@ [package] -name = "pv_core" -version = "1.0.0" +name = "s390_pv_core" +version = "0.10.0" edition.workspace = true license.workspace = true +description = "s390-tools IBM Secure Execution core utilities" +keywords = ["s390", "s390x", "IBM_Secure_Execution"] +repository = "https://github.com/ibm-s390-linux/s390-tools/tree/master/rust" +categories = ["hardware-support"] +readme = "README.md" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -10,12 +15,10 @@ libc = "0.2.49" log = { version = "0.4.6", features = ["std", "release_max_level_debug"] } thiserror = "1.0.33" -utils = {path = "../utils"} zerocopy = {version = "0.7", features = ["derive"]} serde = { version = "1.0.139", features = ["derive"]} byteorder = "1.3" [dev-dependencies] -serde_test = "1" -mockito = {version = "1", default-features = false } +serde_test = "1.0.139" lazy_static = "1.1" diff -Nru s390-tools-2.31.0/rust/pv_core/README.md s390-tools-2.33.1/rust/pv_core/README.md --- s390-tools-2.31.0/rust/pv_core/README.md 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/README.md 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,24 @@ + +# s390_pv_core - basic library for pv-tools + +This library is intended to be used by tools and libraries that +are used for creating and managing [IBM Secure Execution](https://www.ibm.com/docs/en/linux-on-systems?topic=virtualization-secure-execution) guests. +`s390_pv_core` provides abstraction layers for secure memory management, +logging, and accessing the uvdevice. + +If your project is not targeted to provide tooling for and/or managing of IBM Secure execution +guests, do **not** use this crate. + +It does not provide any cryptographic operations through OpenSSL. +For this use [s390_pv](https://crates.io/crates/s390_pv_core) which reexports all symbols from this crate. +If your project uses `s390_pv` crate do **not** include `s390_pv_core` as well. + +## Import crate +The recommended way of importing this crate is: +```bash +cargo add s390_pv_core --rename pv_core +``` diff -Nru s390-tools-2.31.0/rust/pv_core/src/error.rs s390-tools-2.33.1/rust/pv_core/src/error.rs --- s390-tools-2.31.0/rust/pv_core/src/error.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/error.rs 2024-05-28 08:26:36.000000000 +0200 @@ -1,6 +1,8 @@ // SPDX-License-Identifier: MIT // -// Copyright IBM Corp. 2023 +// Copyright IBM Corp. 2023, 2024 + +use std::path::PathBuf; /// Result type for this crate pub type Result = std::result::Result; @@ -10,8 +12,7 @@ #[derive(thiserror::Error, Debug)] #[non_exhaustive] pub enum Error { - #[cfg_attr(debug_assertions, error("Ultravisor: '{msg}' ({rc:#06x},{rrc:#06x})"))] - #[cfg_attr(not(debug_assertions), error("Ultravisor: '{msg}' ({rc:#06x})"))] + #[error("Ultravisor: '{msg}' ({rc:#06x},{rrc:#06x})")] Uv { rc: u16, rrc: u16, @@ -25,13 +26,13 @@ FileIo { ty: FileIoErrorType, ctx: String, - path: String, + path: PathBuf, source: std::io::Error, }, #[error("Cannot {ty} `{path}`")] FileAccess { ty: FileAccessErrorType, - path: String, + path: PathBuf, source: std::io::Error, }, @@ -44,9 +45,24 @@ #[error("Input does not contain an add-secret request")] NoAsrcb, + #[error("Input add-secret request is larger than 8k")] + AscrbLarge, + #[error("Input contains unsupported user-data type: {0:#06x}")] UnsupportedUserData(u16), + #[error("The input has not the correct format: {field} is too large. Maximal size {max_size}")] + AttDataSizeLarge { field: &'static str, max_size: u32 }, + + #[error("The input has not the correct format: {field} is too small. Minimal size {min_size}")] + AttDataSizeSmall { field: &'static str, min_size: u32 }, + + #[error("The attestation request has an unknown algorithm type (.0)")] + BinArcbInvAlgorithm(u32), + + #[error("The attestation request does not specify a measurement size or measurement data.")] + BinArcbNoMeasurement, + // errors from other crates #[error(transparent)] Io(#[from] std::io::Error), diff -Nru s390-tools-2.31.0/rust/pv_core/src/lib.rs s390-tools-2.33.1/rust/pv_core/src/lib.rs --- s390-tools-2.31.0/rust/pv_core/src/lib.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/lib.rs 2024-05-28 08:26:36.000000000 +0200 @@ -1,28 +1,29 @@ // SPDX-License-Identifier: MIT // -// Copyright IBM Corp. 2023 -#![deny(missing_docs)] -#![allow(unused)] -//! pv_core - basic library for pv-tools -//! -//! This library is intened to be used by tools and libraries that -//! are used for creating and managing IBM Secure Execution guests. -//! `pv_core` provides abstraction layers for secure memory management, -//! logging, and accessing the uvdevice. -//! -//! It does not provide any cryptographic operations through OpenSSL. -//! For this use `pv` which reexports all symbos from this crate. +// Copyright IBM Corp. 2023, 2024 +#![deny( + missing_docs, + missing_debug_implementations, + trivial_numeric_casts, + unstable_features, + unused_import_braces, + unused_qualifications +)] +#![doc = include_str!("../README.md")] mod error; -mod log; mod macros; -mod tmpfile; mod utils; +mod uvattest; mod uvdevice; mod uvsecret; -pub use crate::log::PvLogger; pub use error::{Error, FileAccessErrorType, FileIoErrorType, Result}; +/// Functionalities for reading attestation requests +pub mod attest { + pub use crate::uvattest::{AttestationMagic, AttestationMeasAlg}; +} + /// Miscellaneous functions and definitions pub mod misc { pub use crate::utils::pv_guest_bit_set; @@ -30,35 +31,29 @@ pub use crate::utils::{parse_hex, to_u16, to_u32, try_parse_u128, try_parse_u64}; pub use crate::utils::{read, write}; pub use crate::utils::{Flags, Lsb0Flags64, Msb0Flags64}; - - pub use crate::tmpfile::TemporaryDirectory; } /// Definitions and functions for interacting with the Ultravisor +/// +/// For detailed Information on how to send Ultravisor Commands see [`crate::uv::UvDevice`] and +/// [`crate::uv::UvCmd`] pub mod uv { + pub use crate::uvdevice::attest::AttestationCmd; pub use crate::uvdevice::secret::{AddCmd, ListCmd, LockCmd}; - pub use crate::uvdevice::secret::{ListableSecretType, SecretEntry, SecretList}; - pub use crate::uvdevice::{ - uv_ioctl, ConfigUid, UvCmd, UvDevice, UvDeviceInfo, UvFlags, UvcSuccess, - }; + pub use crate::uvdevice::secret_list::{ListableSecretType, SecretEntry, SecretId, SecretList}; + pub use crate::uvdevice::{ConfigUid, UvCmd, UvDevice, UvDeviceInfo, UvFlags, UvcSuccess}; } /// Functionalities to verify UV requests pub mod request { - /// Functionalities for reading add-secret requests - pub mod uvsecret { - pub use crate::uvsecret::AddSecretMagic; - pub use crate::uvsecret::UserDataType; - } - - /// Version number of the request in system-endian + /// Version number of the request in system endianness pub type RequestVersion = u32; /// Request magic value /// /// The first 8 byte of a request providing an identifier of the request type /// for programs pub type RequestMagic = [u8; 8]; - /// A `MagicValue` is a bytepattern, that indicates if a byte slice contains the specified + /// A `MagicValue` is a byte pattern, that indicates if a byte slice contains the specified /// (binary) data. pub trait MagicValue { /// Magic value as byte array @@ -73,21 +68,11 @@ } } -/// Provides cargo version Info about this crate. -/// -/// Produces `pv_core-crate ` -pub const fn crate_info() -> &'static str { - concat!(env!("CARGO_PKG_NAME"), "-crate ", env!("CARGO_PKG_VERSION")) +/// Functionalities for reading add-secret requests +pub mod secret { + pub use crate::uvsecret::AddSecretMagic; + pub use crate::uvsecret::UserDataType; } // Internal definitions/ imports const PAGESIZE: usize = 0x1000; -use ::utils::assert_size; -use ::utils::static_assert; - -#[doc(hidden)] -/// stuff pv_core and pv share. Not intended for other users -pub mod for_pv { - pub use crate::uvdevice::secret::ser_gsid; - pub use crate::uvdevice::secret::SECRET_ID_SIZE; -} diff -Nru s390-tools-2.31.0/rust/pv_core/src/log.rs s390-tools-2.33.1/rust/pv_core/src/log.rs --- s390-tools-2.31.0/rust/pv_core/src/log.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/log.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright IBM Corp. 2023 - -use log::{self, Level, LevelFilter, Log, Metadata, Record}; - -/// A simple Logger that prints to stderr if the verbosity level is high enough. -/// Prints log-level for Debug+Trace -#[derive(Clone, Default, Debug)] -pub struct PvLogger; - -fn to_level(verbosity: u8) -> LevelFilter { - match verbosity { - // Error and Warn on by default - 0 => LevelFilter::Warn, - 1 => LevelFilter::Info, - 2 => LevelFilter::Debug, - _ => LevelFilter::Trace, - } -} - -impl PvLogger { - /// Set self as the logger for this application. - /// - /// # Errors - /// - /// An error is returned if a logger has already been set. - pub fn start(&'static self, verbosity: u8) -> Result<(), log::SetLoggerError> { - log::set_logger(self).map(|()| log::set_max_level(to_level(verbosity))) - } -} - -impl Log for PvLogger { - fn enabled(&self, _metadata: &Metadata) -> bool { - true - } - - fn log(&self, record: &Record) { - if self.enabled(record.metadata()) { - if record.level() > Level::Info { - eprintln!("{}: {}", record.level(), record.args()); - } else { - eprintln!("{}", record.args()); - } - } - } - fn flush(&self) {} -} diff -Nru s390-tools-2.31.0/rust/pv_core/src/macros.rs s390-tools-2.33.1/rust/pv_core/src/macros.rs --- s390-tools-2.31.0/rust/pv_core/src/macros.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/macros.rs 2024-05-28 08:26:36.000000000 +0200 @@ -1,20 +1,13 @@ // SPDX-License-Identifier: MIT // -// Copyright IBM Corp. 2023 - -macro_rules! path_to_str { - ($path: expr) => { - $path.as_ref().to_str().unwrap_or("no UTF-8 path") - }; -} -pub(crate) use path_to_str; +// Copyright IBM Corp. 2023, 2024 macro_rules! file_error { ($ty: tt, $ctx: expr, $path:expr, $src: expr) => { $crate::Error::FileIo { ty: $crate::FileIoErrorType::$ty, ctx: $ctx.to_string(), - path: $path.to_string(), + path: $path.as_ref().to_path_buf(), source: $src, } }; @@ -28,14 +21,35 @@ } pub(crate) use bail_spec; -#[doc(hidden)] +/// Asserts a constant expression evaluates to `true`. +/// +/// If the expression is not evaluated to `true` the compilation will fail. #[macro_export] -macro_rules! file_acc_error { - ($ty: tt, $path:expr, $src: expr) => { - $crate::Error::FileAccess { - ty: $crate::FileAccessErrorType::$ty, - path: $path.to_string(), - source: $src, - } +macro_rules! static_assert { + ($condition:expr) => { + const _: () = core::assert!($condition); + }; +} + +/// Asserts that a type has a specific size. +/// +/// Useful to validate structs that are passed to C code. +/// If the size has not the expected value the compilation will fail. +/// +/// # Example +/// ```rust +/// # use s390_pv_core::assert_size; +/// # fn main() {} +/// #[repr(C)] +/// struct c_struct { +/// v: u64, +/// } +/// assert_size!(c_struct, 8); +/// // assert_size!(c_struct, 7);//won't compile +/// ``` +#[macro_export] +macro_rules! assert_size { + ($t:ty, $sz:expr ) => { + $crate::static_assert!(::std::mem::size_of::<$t>() == $sz); }; } diff -Nru s390-tools-2.31.0/rust/pv_core/src/tmpfile.rs s390-tools-2.33.1/rust/pv_core/src/tmpfile.rs --- s390-tools-2.31.0/rust/pv_core/src/tmpfile.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/tmpfile.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,152 +0,0 @@ -use std::{ - ffi::{CString, OsStr}, - os::unix::prelude::OsStrExt, - path::{Path, PathBuf}, -}; - -/// Rust wrapper for `libc::mkdtemp` -fn mkdtemp>(template: P) -> Result { - let template_cstr = CString::new(template.as_ref().as_os_str().as_bytes())?; - let template_raw = template_cstr.into_raw(); - unsafe { - // SAFETY: template_raw is a valid CString because it was generated by - // the `CString::new`. - let ret = libc::mkdtemp(template_raw); - - if ret.is_null() { - Err(std::io::Error::last_os_error()) - } else { - // SAFETY: `template_raw` is still a valid CString because it was - // generated by `CString::new` and modified by `libc::mkdtemp`. - let path_cstr = std::ffi::CString::from_raw(template_raw); - let path = OsStr::from_bytes(path_cstr.as_bytes()); - let path = std::path::PathBuf::from(path); - - Ok(path) - } - } -} - -/// This type creates a temporary directory that is automatically removed when -/// it goes out of scope. It utilizes the `mkdtemp` function and its semantics, -/// with the addition of automatically including the template characters -/// `XXXXXX`. -#[derive(PartialEq, Eq, Debug)] -pub struct TemporaryDirectory { - path: Box, -} - -impl TemporaryDirectory { - /// Creates a temporary directory using `prefix` as directory prefix. - /// - /// # Errors - /// - /// An error is returned if the temporary directory could not be created. - pub fn new>(prefix: P) -> Result { - let mut template = prefix.as_ref().to_owned(); - let mut template_os_string = template.as_mut_os_string(); - template_os_string.push("XXXXXX"); - - let temp_dir = mkdtemp(template_os_string)?; - Ok(Self { - path: temp_dir.into_boxed_path(), - }) - } - - /// Returns the path of the created temporary directory. - pub fn path(&self) -> &Path { - self.path.as_ref() - } - - fn forget(mut self) { - self.path = PathBuf::new().into_boxed_path(); - std::mem::forget(self); - } - - /// Removes the created temporary directory and it's contents. - pub fn close(mut self) -> std::io::Result<()> { - let ret = std::fs::remove_dir_all(&self.path); - self.forget(); - ret - } -} - -impl AsRef for TemporaryDirectory { - fn as_ref(&self) -> &Path { - self.path() - } -} - -impl Drop for TemporaryDirectory { - fn drop(&mut self) { - let _ = std::fs::remove_dir_all(&self.path); - } -} - -#[cfg(test)] -mod tests { - use std::path::PathBuf; - - use super::{mkdtemp, TemporaryDirectory}; - - #[test] - fn mkdtemp_test() { - let template_inv_not_last_characters = "XXXXXXyay"; - let template_inv_too_less_x = "yayXXXXX"; - let template_inv_path_does_not_exist = "../NA-yay/XXXXXX"; - - let template = "yayXXXXXX"; - - let err = mkdtemp(template_inv_not_last_characters).expect_err("invalid template"); - let err = mkdtemp(template_inv_too_less_x).expect_err("invalid template"); - let err = - mkdtemp(template_inv_path_does_not_exist).expect_err("path does not exist template"); - - let path = mkdtemp(template).expect("mkdtemp should work"); - assert!(path.exists()); - assert!(path.as_os_str().to_str().expect("works").starts_with("yay")); - std::fs::remove_dir(path); - } - - #[test] - fn temporary_directory_empty_name_test() { - let temp_dir = TemporaryDirectory::new("").expect("should work"); - let path = temp_dir.path().to_owned(); - assert!(path.exists()); - - // Test that close removes the directory - temp_dir.close(); - assert!(!path.exists()); - } - - #[test] - fn temporary_directory_drop_test() { - let temp_dir = TemporaryDirectory::new("").expect("should work"); - let path = temp_dir.path().to_owned(); - assert!(path.exists()); - - // Test that the destructor removes the directory - drop(temp_dir); - assert!(!path.exists()); - } - - #[test] - fn temporary_directory_close_test() { - let temp_dir = TemporaryDirectory::new("yay").expect("should work"); - - let path = temp_dir.path().to_owned(); - assert!(path.exists()); - assert!(path.as_os_str().to_str().expect("works").starts_with("yay")); - - // Test that close() removes the directory - temp_dir.close(); - assert!(!path.exists()); - } - - #[test] - fn temporary_directory_as_ref_test() { - let temp_dir = TemporaryDirectory::new("").expect("should work"); - - assert_eq!(temp_dir.path(), temp_dir.as_ref()); - } -} diff -Nru s390-tools-2.31.0/rust/pv_core/src/utils.rs s390-tools-2.33.1/rust/pv_core/src/utils.rs --- s390-tools-2.31.0/rust/pv_core/src/utils.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/utils.rs 2024-05-28 08:26:36.000000000 +0200 @@ -2,7 +2,7 @@ // // Copyright IBM Corp. 2023 use crate::{ - macros::{bail_spec, file_error, path_to_str}, + macros::{bail_spec, file_error}, Error, FileAccessErrorType, FileIoErrorType, Result, }; use std::{ @@ -16,15 +16,15 @@ pub trait Flags: From + for<'a> From<&'a T> { /// Set the specified bit to one. /// # Panics - ///Panics if bit is >= 64 + /// Panics if bit is >= 64 fn set_bit(&mut self, bit: u8); /// Set the specified bit to zero. /// # Panics - ///Panics if bit is >= 64 + /// Panics if bit is >= 64 fn unset_bit(&mut self, bit: u8); /// Test if the specified bit is set. /// # Panics - ///Panics if bit is >= 64 + /// Panics if bit is >= 64 fn is_set(&self, bit: u8) -> bool; } @@ -119,7 +119,7 @@ /// * `ctx` - Error context string in case of an error /// ```rust /// # use std::error::Error; -/// # use pv_core::misc::try_parse_u128; +/// # use s390_pv_core::misc::try_parse_u128; /// # fn main() -> Result<(), Box> { /// let hex = "11223344556677889900aabbccddeeff"; /// try_parse_u128(&hex, "The test")?; @@ -155,7 +155,7 @@ /// * `ctx` - Error context string in case of an error /// ```rust /// # use std::error::Error; -/// # use pv_core::misc::try_parse_u64; +/// # use s390_pv_core::misc::try_parse_u64; /// # fn main() -> Result<(), Box> { /// let hex = "1234567890abcdef"; /// try_parse_u64(&hex, "The test")?; @@ -187,7 +187,7 @@ pub fn open_file>(path: P) -> Result { File::open(&path).map_err(|e| Error::FileAccess { ty: FileAccessErrorType::Open, - path: path_to_str!(path).to_string(), + path: path.as_ref().to_path_buf(), source: e, }) } @@ -200,7 +200,7 @@ pub fn create_file>(path: P) -> Result { File::create(&path).map_err(|e| Error::FileAccess { ty: FileAccessErrorType::Create, - path: path_to_str!(path).to_string(), + path: path.as_ref().to_path_buf(), source: e, }) } @@ -224,9 +224,9 @@ path: P, ctx: &str, ) -> Result<[u8; COUNT]> { - let mut f = std::fs::File::open(&path).map_err(|e| Error::FileAccess { - ty: crate::FileAccessErrorType::Open, - path: path_to_str!(path).to_string(), + let mut f = File::open(&path).map_err(|e| Error::FileAccess { + ty: FileAccessErrorType::Open, + path: path.as_ref().to_path_buf(), source: e, })?; @@ -236,7 +236,7 @@ let mut buf = [0; COUNT]; f.read_exact(&mut buf) - .map_err(|e| file_error!(Read, ctx, path_to_str!(path).to_string(), e))?; + .map_err(|e| file_error!(Read, ctx, path, e))?; Ok(buf) } @@ -249,14 +249,7 @@ /// # Errors /// Passes through any kind of error `std::fs::read` produces pub fn read_file>(path: P, ctx: &str) -> Result> { - std::fs::read(&path).map_err(|e| { - file_error!( - Read, - ctx, - path.as_ref().to_str().unwrap_or("no UTF-8 path"), - e - ) - }) + std::fs::read(&path).map_err(|e| file_error!(Read, ctx, path, e)) } /// Reads all content from a [`std::io::Read`] and add context in case of an error @@ -267,12 +260,12 @@ /// /// # Errors /// Passes through any kind of error `std::fs::read` produces -pub fn read(rd: &mut R, path: &str, ctx: &str) -> Result> { +pub fn read>(rd: &mut R, path: P, ctx: &str) -> Result> { let mut buf = vec![]; rd.read_to_end(&mut buf).map_err(|e| Error::FileIo { ty: FileIoErrorType::Write, ctx: ctx.to_string(), - path: path.to_string(), + path: path.as_ref().to_path_buf(), source: e, })?; Ok(buf) @@ -286,11 +279,11 @@ /// /// # Errors /// Passes through any kind of error `std::fs::write` produces -pub fn write_file>(path: &str, data: D, ctx: &str) -> Result<()> { - std::fs::write(path, data.as_ref()).map_err(|e| Error::FileIo { +pub fn write_file, P: AsRef>(path: P, data: D, ctx: &str) -> Result<()> { + std::fs::write(path.as_ref(), data.as_ref()).map_err(|e| Error::FileIo { ty: FileIoErrorType::Write, ctx: ctx.to_string(), - path: path.to_string(), + path: path.as_ref().to_path_buf(), source: e, }) } @@ -303,11 +296,16 @@ /// /// # Errors /// Passes through any kind of error `std::fs::write` produces -pub fn write, W: Write>(wr: &mut W, data: D, path: &str, ctx: &str) -> Result<()> { +pub fn write, P: AsRef, W: Write>( + wr: &mut W, + data: D, + path: P, + ctx: &str, +) -> Result<()> { wr.write_all(data.as_ref()).map_err(|e| Error::FileIo { ty: FileIoErrorType::Write, ctx: ctx.to_string(), - path: path.to_string(), + path: path.as_ref().to_path_buf(), source: e, }) } @@ -363,7 +361,7 @@ pub fn pv_guest_bit_set() -> bool { #[cfg(not(target_arch = "s390x"))] return false; - //s390 branch + // s390 branch let v = std::fs::read("/sys/firmware/uv/prot_virt_guest").unwrap_or_else(|_| vec![0]); let v: u8 = String::from_utf8_lossy(&v[..1]).parse().unwrap_or(0); v == 1 diff -Nru s390-tools-2.31.0/rust/pv_core/src/uvattest.rs s390-tools-2.33.1/rust/pv_core/src/uvattest.rs --- s390-tools-2.31.0/rust/pv_core/src/uvattest.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/uvattest.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 + +use crate::{request::MagicValue, Error}; +use byteorder::{BigEndian, ByteOrder}; +use zerocopy::U32; + +/// The magic value used to identify an attestation request +/// +/// The magic value is ASCII: +/// ```rust +/// # use s390_pv_core::attest::AttestationMagic; +/// # use s390_pv_core::request::MagicValue; +/// # fn main() { +/// # let magic = & +/// [0u8; 8] +/// # ; +/// # assert!(AttestationMagic::starts_with_magic(magic)); +/// # } +/// ``` +#[derive(Debug)] +pub struct AttestationMagic; +impl MagicValue<8> for AttestationMagic { + const MAGIC: [u8; 8] = [0; 8]; +} + +/// Identifier for the used measurement algorithm +#[repr(u32)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum AttestationMeasAlg { + /// Use HMAC with SHA512 as measurement algorithm + HmacSha512 = 1, +} + +impl AttestationMeasAlg { + /// Report the expected size for a given measurement algorithm + pub const fn exp_size(&self) -> u32 { + match self { + AttestationMeasAlg::HmacSha512 => 64, + } + } +} + +impl TryFrom> for AttestationMeasAlg { + type Error = Error; + + fn try_from(value: U32) -> Result { + if value.get() == AttestationMeasAlg::HmacSha512 as u32 { + Ok(Self::HmacSha512) + } else { + Err(Error::BinArcbInvAlgorithm(value.get())) + } + } +} + +impl From for U32 { + fn from(value: AttestationMeasAlg) -> Self { + (value as u32).into() + } +} diff -Nru s390-tools-2.31.0/rust/pv_core/src/uvdevice/attest.rs s390-tools-2.33.1/rust/pv_core/src/uvdevice/attest.rs --- s390-tools-2.31.0/rust/pv_core/src/uvdevice/attest.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/uvdevice/attest.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 +use super::{ffi, AttestationUserData, ConfigUid, UvCmd}; +use crate::{Error, Result}; +use std::ptr; +use zerocopy::{AsBytes, FromZeroes}; + +/// _Retrieve Attestation Measurement_ UVC +/// +/// The Attestation Request has two input and three outputs. +/// ARCB and user-data are inputs for the UV. +/// Measurement, additional data, and the Configuration Unique ID are outputs generated by UV. +/// +/// The Attestation Request Control Block (ARCB) is a cryptographically verified +/// and secured request to UV and user-Data is some plaintext data which is +/// going to be included in the Attestation Measurement calculation. +/// +/// Measurement is a cryptographic measurement of the callers properties, +/// optional data configured by the ARCB and the user-data. If specified by the +/// ARCB, UV will add some additional Data to the measurement calculation. +/// This additional data is then returned as well. +/// +/// If the Retrieve Attestation Measurement UV facility is not present, +/// UV will return invalid command rc. +/// +/// # Example +/// +/// ```rust,no_run +/// # use s390_pv_core::uv::UvDevice; +/// # use s390_pv_core::uv::AttestationCmd; +/// # fn main() -> s390_pv_core::Result<()> { +/// let arcb = std::fs::read("arcb")?.into(); +/// let user_data = vec![0, 1, 2, 3]; +/// // Hard-coded example +/// let mut cmd = AttestationCmd::new_request(arcb, Some(user_data), 64, 0)?; +/// let uv = UvDevice::open()?; +/// # uv.send_cmd(&mut cmd)?; +/// # Ok(()) +/// # } +/// ``` +#[derive(Debug)] +pub struct AttestationCmd { + // all sizes are guaranteed to fit in the exchange format/UV-Call at any time + // attestation data, these must not changed by this tooling, this is an invariant of this + // struct, so that the raw pointer stay valid through the lifetime of this struct. + // no mutable references are ever passed from this struct + arcb: Box<[u8]>, + measurement: Vec, + additional: Option>, + // raw IOCTL struct + uvio_attest: ffi::uvio_attest, +} + +impl AttestationCmd { + /// Maximum size for Additional-data + pub const ADDITIONAL_MAX_SIZE: u32 = ffi::UVIO_ATT_ADDITIONAL_MAX_LEN as u32; + /// Maximum size for Attestation Request Control Block + pub const ARCB_MAX_SIZE: u32 = ffi::UVIO_ATT_ARCB_MAX_LEN as u32; + /// Maximum size for Configuration UID + pub const CUID_SIZE: u32 = ffi::UVIO_ATT_UID_LEN as u32; + /// Maximum size for Measurement-data + pub const MEASUREMENT_MAX_SIZE: u32 = ffi::UVIO_ATT_MEASUREMENT_MAX_LEN as u32; + /// Maximum size for user-data + pub const USER_MAX_SIZE: u32 = ffi::UVIO_ATT_USER_DATA_LEN as u32; + + fn verify_size(size: u32, min_size: u32, max_size: u32, field: &'static str) -> Result<()> { + if size < min_size { + return Err(Error::AttDataSizeSmall { field, min_size }); + } + if size > max_size { + return Err(Error::AttDataSizeLarge { field, max_size }); + } + + Ok(()) + } + + fn verify_slice(val: &[u8], max_size: u32, field: &'static str) -> Result<()> { + if val.len() > max_size as usize { + Err(Error::AttDataSizeLarge { field, max_size }) + } else { + Ok(()) + } + } + + /// Creates a new [`AttestationCmd`] + /// + /// * `arcb` - binary attestation request + /// * `user_data` - optional, up to 256 bytes of arbitrary data + /// * `exp_measurement` - expected size the Attestation measurement requires + /// * `exp_additional` - expected size of the additional data. + /// + /// Creates a new Retrieve Attestation Measurement UVC. + pub fn new_request( + arcb: Box<[u8]>, + user_data: Option>, + exp_measurement: u32, + exp_additional: u32, + ) -> Result { + Self::verify_size( + exp_measurement, + 1, + AttestationCmd::MEASUREMENT_MAX_SIZE, + "Expected measurement size", + )?; + Self::verify_size( + exp_additional, + 0, + AttestationCmd::ADDITIONAL_MAX_SIZE, + "Expected additional data size", + )?; + Self::verify_slice(&arcb, AttestationCmd::ARCB_MAX_SIZE, "Attestation request")?; + if let Some(ref data) = user_data { + Self::verify_slice(data, AttestationCmd::USER_MAX_SIZE, "User data")?; + } + + let (user_len, user_data) = match user_data { + // enforced by tests before invariants + Some(user) => (Some(user.len() as u16), { + let mut user_data = AttestationUserData::new_zeroed(); + user_data[0..user.len()].clone_from_slice(&user); + Some(user_data) + }), + None => (None, None), + }; + + let mut additional = match exp_additional { + 0 => None, + size => Some(vec![0u8; size as usize]), + }; + let mut measurement = vec![0u8; exp_measurement as usize]; + let uvio_attest = unsafe { + ffi::uvio_attest::new( + &arcb, + &mut measurement, + additional.as_deref_mut(), + user_data, + user_len, + ) + }; + Ok(Self { + arcb, + measurement, + additional, + uvio_attest, + }) + } + + /// Provides the additional data calculated by UV after a successful UVC + /// + /// Truncates the additional data to the correct length in place. + /// If called before a successful attestation the data in this buffer is undefined. + pub fn additional(&mut self) -> Option<&[u8]> { + // truncate the add size to the UV reported size + match &mut self.additional { + Some(ref mut a) => a.truncate(self.uvio_attest.add_data_len as usize), + None => (), + } + self.additional.as_deref() + } + + /// Copies the additional data calculated by UV after a successful UVC into a Vec + /// + /// Truncates the additional data to the correct length. + /// If called before a successful attestation the data in this buffer is undefined. + pub fn additional_owned(&self) -> Option> { + let mut additional = self.additional.clone()?; + additional.truncate(self.uvio_attest.add_data_len as usize); + Some(additional) + } + + /// Provides the Configuration Unique Identifier received from UV after a successful UVC + /// + /// If called before a successful attestation the data in this buffer is undefined. + pub fn cuid(&self) -> &ConfigUid { + &self.uvio_attest.config_uid + } + + /// Provides the attestation measurement calculated by UV after a successful UVC + /// + /// If called before a successful attestation the data in this buffer is undefined. + pub fn measurement(&self) -> &[u8] { + &self.measurement + } + + /// Returns a reference to the request of this [`AttestationCmd`]. + pub fn arcb(&self) -> &[u8] { + self.arcb.as_ref() + } +} + +impl UvCmd for AttestationCmd { + const UV_IOCTL_NR: u8 = ffi::UVIO_IOCTL_ATT_NR; + + fn rc_fmt(&self, rc: u16, _rrc: u16) -> Option<&'static str> { + match rc { + // should not happen, uvdevice local value + 0x0101 => Some("Invalid continuation token specified"), + 0x010a => Some("Unsupported plaintext attestation flag set"), + 0x010b => Some("Unsupported measurement algorithm specified."), + 0x010c => Some("Unable to decrypt attestation request control block. Probably no valid host-key was provided"), + 0x0106 => Some("Unsupported attestation request version"), + // should not happen, protected by AttestationCmd constructors + 0x0102 => Some("User data length is greater than 256"), + // should not happen, uvdevice ensures this + 0x0103 => Some("Access exception recognized when accessing the attestation request control block"), + // should not happen, uvdevice ensures this + 0x0104 => Some("Access exception recognized when accessing the measurement data area"), + // should not happen, uvdevice ensures this + 0x0105 => Some("Access exception recognized when accessing the additional data area"), + // should not happen, ensured by Attestation Request builder + 0x0107 => Some("Invalid attestation request length for the specified attestation request version"), + // 0 case should not happen, ensured by Attestation Request builder + 0x0108 => Some("Number of key slots is either equal to 0 or greater than the maximum number supported by the specified attestation request version"), + // should not happen, ensured by Attestation Request builder + 0x0109 => Some("Size of encrypted area does not match measurement length plus any optional items"), + // should not happen, ensured by Attestation Request builder + 0x010d => Some("Measurement data length is not large enough to store measurement"), + // should not happen, ensured by Attestation Request builder + 0x010e => Some("Additional data length not large enough to hold all requested additional data"), + _ => None, + } + } + + fn data(&mut self) -> Option<&mut [u8]> { + Some(self.uvio_attest.as_bytes_mut()) + } +} + +fn opt_to_mut_ptr_u64(opt: &mut Option<&mut [u8]>) -> u64 { + (match opt { + Some(v) => v.as_mut_ptr(), + None => ptr::null_mut(), + }) as u64 +} + +impl ffi::uvio_attest { + /// Create a new attestation IOCTL control block + /// + /// Happily converts slice lengths into u32/u16 without verifying. + /// Therefore marked as unsafe. + /// + /// # SAFETY + /// It is safe to call this function iff: + /// - arcb.len() < u32::MAX + /// - additional.len() < u32::MAX + /// - user_len() <= 256 + /// - pointyer fit into an u64 + unsafe fn new( + arcb: &[u8], + measurement: &mut [u8], + mut additional: Option<&mut [u8]>, + user: Option, + user_len: Option, + ) -> Self { + Self { + arcb_addr: arcb.as_ptr() as u64, + meas_addr: measurement.as_ptr() as u64, + add_data_addr: opt_to_mut_ptr_u64(&mut additional), + user_data: user.unwrap_or([0; 256]), + config_uid: [0; 16], + arcb_len: arcb.len() as u32, + meas_len: measurement.len() as u32, + add_data_len: additional.unwrap_or_default().len() as u32, + user_data_len: user_len.unwrap_or_default(), + reserved136: 0, + } + } +} diff -Nru s390-tools-2.31.0/rust/pv_core/src/uvdevice/ffi.rs s390-tools-2.33.1/rust/pv_core/src/uvdevice/ffi.rs --- s390-tools-2.31.0/rust/pv_core/src/uvdevice/ffi.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/uvdevice/ffi.rs 2024-05-28 08:26:36.000000000 +0200 @@ -9,6 +9,8 @@ pub const UVIO_ATT_MEASUREMENT_MAX_LEN: usize = 0x8000; pub const UVIO_ATT_ADDITIONAL_MAX_LEN: usize = 0x8000; pub const UVIO_ADD_SECRET_MAX_LEN: usize = 0x100000; +#[allow(unused)] +// here for completeness pub const UVIO_LIST_SECRETS_LEN: usize = 0x1000; // equal to ascii 'u' @@ -28,7 +30,6 @@ /// Ultravisor. /// /// `flags` is currently unused and to be set zero -/// #[repr(C)] #[derive(Debug)] pub struct uvio_ioctl_cb { @@ -68,29 +69,30 @@ /// Request Attestation Measurement control block /// /// The Attestation Request has two input and two outputs. -/// ARCB and User Data are inputs for the UV. -/// Measurement and Additional Data are outputs generated by UV. +/// ARCB and user-data are inputs for the UV. +/// Measurement and additional-data are outputs generated by UV. /// /// The Attestation Request Control Block (ARCB) is a cryptographically verified -/// and secured request to UV and User Data is some plaintext data which is +/// and secured request to UV and user-data is some plaintext data which is /// going to be included in the Attestation Measurement calculation. /// /// Measurement is a cryptographic measurement of the callers properties, /// optional data configured by the ARCB and the user data. If specified by the -/// ARCB, UV will add some Additional Data to the measurement calculation. -/// This Additional Data is then returned as well. +/// ARCB, UV will add some additional-data to the measurement calculation. +/// This additional-data is then returned as well. /// /// If the Retrieve Attestation Measurement UV facility is not present, /// UV will return invalid command rc. -/// Obviously all numbers are in BIG-endian! +/// +/// All numbers are in big-endian! #[repr(C)] #[derive(Debug, AsBytes, FromZeroes, FromBytes)] pub struct uvio_attest { - pub arcb_addr: u64, //in - pub meas_addr: u64, //out - pub add_data_addr: u64, //out - pub user_data: [u8; UVIO_ATT_USER_DATA_LEN], //in - pub config_uid: [u8; UVIO_ATT_UID_LEN], //out + pub arcb_addr: u64, // in + pub meas_addr: u64, // out + pub add_data_addr: u64, // out + pub user_data: [u8; UVIO_ATT_USER_DATA_LEN], // in + pub config_uid: [u8; UVIO_ATT_UID_LEN], // out pub arcb_len: u32, pub meas_len: u32, pub add_data_len: u32, @@ -99,15 +101,8 @@ } assert_size!(uvio_attest, 0x138); -#[allow(dead_code)] //TODO rm when pv learns attestation -impl uvio_attest { - pub const ARCB_MAX_LEN: usize = UVIO_ATT_ARCB_MAX_LEN; - pub const MEASUREMENT_MAX_LEN: usize = UVIO_ATT_MEASUREMENT_MAX_LEN; - pub const ADDITIONAL_MAX_LEN: usize = UVIO_ATT_ADDITIONAL_MAX_LEN; -} - /// corresponds to the UV_IOCTL macro -pub const fn uv_ioctl(nr: u8) -> u64 { +pub(crate) const fn uv_ioctl(nr: u8) -> u64 { iowr(UVIO_TYPE_UVC, nr, std::mem::size_of::()) } static_assert!(uv_ioctl(UVIO_IOCTL_ATT_NR) == 0xc0407501); diff -Nru s390-tools-2.31.0/rust/pv_core/src/uvdevice/info.rs s390-tools-2.33.1/rust/pv_core/src/uvdevice/info.rs --- s390-tools-2.31.0/rust/pv_core/src/uvdevice/info.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/uvdevice/info.rs 2024-05-28 08:26:36.000000000 +0200 @@ -2,10 +2,10 @@ // // Copyright IBM Corp. 2023 -use super::ffi::uvio_uvdev_info; +use super::ffi::{self, uvio_uvdev_info}; use crate::{ misc::{Flags, Lsb0Flags64}, - uv::{uv_ioctl, UvCmd, UvDevice}, + uv::{UvCmd, UvDevice}, Result, }; use std::fmt::Display; @@ -22,9 +22,8 @@ /// If the bit is set in both, `supp_uvio_cmds` and `supp_uv_cmds`, /// the uvdevice and the Ultravisor support that call. /// -/// Note that bit 0 ([`UvDevice::INFO_NR`]) is always zero for `supp_uv_cmds` -/// as there is no corresponding UV-call. -/// +/// Note that bit 0 is always zero for `supp_uv_cmds` +/// as there is no corresponding Info UV-call. #[derive(Debug)] pub struct UvDeviceInfo { supp_uvio_cmds: Lsb0Flags64, @@ -38,7 +37,7 @@ /// /// This function will return an error if the ioctl fails and the error code is not /// [`libc::ENOTTY`]. - /// `ENOTTY` is most likely because the uvdevice does not support the info IOCTL. + /// `ENOTTY` is most likely because older uvdevices does not support the info IOCTL. /// In that case one can safely assume that the device only supports the Attestation IOCTL. /// Therefore this is what this function returns IOCTL support for Attestation and _Data not /// available_ for the UV Attestation facility. @@ -48,10 +47,15 @@ let mut cmd = uvio_uvdev_info::new_zeroed(); match uv.send_cmd(&mut cmd) { Ok(_) => Ok(cmd.into()), - Err(crate::Error::Io(e)) if e.raw_os_error() == Some(libc::ENOTTY) => Ok(Self { - supp_uvio_cmds: (UvDevice::ATTESTATION_NR as u64).into(), - supp_uv_cmds: None, - }), + Err(crate::Error::Io(e)) if e.raw_os_error() == Some(libc::ENOTTY) => { + let mut supp_uvio_cmds = Lsb0Flags64::default(); + supp_uvio_cmds.set_bit(ffi::UVIO_IOCTL_ATT_NR); + + Ok(Self { + supp_uvio_cmds, + supp_uv_cmds: None, + }) + } Err(e) => Err(e), } } @@ -67,9 +71,7 @@ } impl UvCmd for uvio_uvdev_info { - fn cmd(&self) -> u64 { - uv_ioctl(UvDevice::INFO_NR) - } + const UV_IOCTL_NR: u8 = ffi::UVIO_IOCTL_UVDEV_INFO_NR; fn data(&mut self) -> Option<&mut [u8]> { Some(self.as_bytes_mut()) @@ -82,11 +84,11 @@ fn nr_as_string(nr: u8) -> Option<&'static str> { match nr { - UvDevice::INFO_NR => Some("Info"), - UvDevice::ATTESTATION_NR => Some("Attestation"), - UvDevice::ADD_SECRET_NR => Some("Add Secret"), - UvDevice::LIST_SECRET_NR => Some("List Secrets"), - UvDevice::LOCK_SECRET_NR => Some("Lock Secret Store"), + ffi::UVIO_IOCTL_UVDEV_INFO_NR => Some("Info"), + ffi::UVIO_IOCTL_ATT_NR => Some("Attestation"), + ffi::UVIO_IOCTL_ADD_SECRET_NR => Some("Add Secret"), + ffi::UVIO_IOCTL_LIST_SECRETS_NR => Some("List Secrets"), + ffi::UVIO_IOCTL_LOCK_SECRETS_NR => Some("Lock Secret Store"), _ => None, } } diff -Nru s390-tools-2.31.0/rust/pv_core/src/uvdevice/secret_list.rs s390-tools-2.33.1/rust/pv_core/src/uvdevice/secret_list.rs --- s390-tools-2.31.0/rust/pv_core/src/uvdevice/secret_list.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/uvdevice/secret_list.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 + +use crate::assert_size; +use crate::{misc::to_u16, uv::ListCmd, uvdevice::UvCmd, Error, Result}; +use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; +use serde::{Deserialize, Serialize, Serializer}; +use std::{ + fmt::Display, + io::{Cursor, Read, Seek, Write}, + slice::Iter, + vec::IntoIter, +}; +use zerocopy::{AsBytes, FromBytes, FromZeroes, U16, U32}; + +/// The 32 byte long ID of an UV secret +/// +/// (de)serializes itself in/from a hex-string +#[repr(C)] +#[derive(PartialEq, Eq, AsBytes, FromZeroes, FromBytes, Debug, Clone)] +pub struct SecretId([u8; Self::ID_SIZE]); +assert_size!(SecretId, SecretId::ID_SIZE); + +impl SecretId { + /// Size in bytes of the [`SecretId`] + pub const ID_SIZE: usize = 32; + + /// Create a [`SecretId`] forom a buffer. + pub fn from(buf: [u8; Self::ID_SIZE]) -> Self { + buf.into() + } +} + +impl Serialize for SecretId { + fn serialize(&self, ser: S) -> std::result::Result + where + S: Serializer, + { + // calls Display at one point + ser.serialize_str(&self.to_string()) + } +} + +impl<'de> Deserialize<'de> for SecretId { + fn deserialize(de: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + de_gsid(de).map(|id| id.into()) + } +} + +impl Display for SecretId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut s = String::with_capacity(32 * 2 + 2); + s.push_str("0x"); + let s = self.0.iter().fold(s, |acc, e| acc + &format!("{e:02x}")); + write!(f, "{s}") + } +} + +impl From<[u8; SecretId::ID_SIZE]> for SecretId { + fn from(value: [u8; SecretId::ID_SIZE]) -> Self { + Self(value) + } +} + +impl AsRef<[u8]> for SecretId { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +/// A secret in a [`SecretList`] +#[repr(C)] +#[derive(Debug, PartialEq, Eq, AsBytes, FromZeroes, FromBytes, Serialize)] +pub struct SecretEntry { + #[serde(serialize_with = "ser_u16")] + index: U16, + #[serde(serialize_with = "ser_u16")] + stype: U16, + #[serde(serialize_with = "ser_u32")] + len: U32, + #[serde(skip)] + res_8: u64, + id: SecretId, +} +assert_size!(SecretEntry, SecretEntry::STRUCT_SIZE); + +impl SecretEntry { + const STRUCT_SIZE: usize = 0x30; + + /// Create a new entry for a [`SecretList`]. + /// + /// The content of this entry will very likely not represent the status of the guest in the + /// Ultravisor. Use of [`SecretList::decode`] in any non-test environments is encuraged. + pub fn new(index: u16, stype: ListableSecretType, id: SecretId, secret_len: u32) -> Self { + Self { + index: index.into(), + stype: stype.into(), + len: secret_len.into(), + res_8: 0, + id, + } + } + + /// Returns the index of this [`SecretEntry`]. + pub fn index(&self) -> u16 { + self.index.get() + } + + /// Returns the secret type of this [`SecretEntry`]. + pub fn stype(&self) -> ListableSecretType { + self.stype.into() + } + + /// Returns a reference to the id of this [`SecretEntry`]. + /// + /// The slice is guaranteed to be 32 bytes long. + /// ```rust + /// # use s390_pv_core::uv::SecretEntry; + /// # use zerocopy::FromZeroes; + /// # let secr = SecretEntry::new_zeroed(); + /// # assert_eq!(secr.id().len(), 32); + /// ``` + pub fn id(&self) -> &[u8] { + self.id.as_ref() + } +} + +impl Display for SecretEntry { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let stype: ListableSecretType = self.stype.into(); + writeln!(f, "{} {}:", self.index, stype)?; + write!(f, " ")?; + for b in self.id.as_ref() { + write!(f, "{b:02x}")?; + } + Ok(()) + } +} + +/// List of secrets used to parse the [`crate::uv::ListCmd`] result. +/// +/// The list should not hold more than 0xffffffff elements +#[derive(Debug, PartialEq, Eq, Serialize)] +pub struct SecretList { + total_num_secrets: usize, + secrets: Vec, +} + +impl<'a> IntoIterator for &'a SecretList { + type IntoIter = Iter<'a, SecretEntry>; + type Item = &'a SecretEntry; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for SecretList { + type IntoIter = IntoIter; + type Item = SecretEntry; + + fn into_iter(self) -> Self::IntoIter { + self.secrets.into_iter() + } +} + +impl FromIterator for SecretList { + fn from_iter>(iter: T) -> Self { + let secrets: Vec<_> = iter.into_iter().collect(); + let total_num_secrets = secrets.len() as u16; + Self::new(total_num_secrets, secrets) + } +} + +impl SecretList { + /// Creates a new SecretList. + /// + /// The content of this list will very likely not represent the status of the guest in the + /// Ultravisor. Use of [`SecretList::decode`] in any non-test environments is encuraged. + pub fn new(total_num_secrets: u16, secrets: Vec) -> Self { + Self { + total_num_secrets: total_num_secrets as usize, + secrets, + } + } + + /// Returns an iterator over the slice. + /// + /// The iterator yields all secret entries from start to end. + pub fn iter(&self) -> Iter<'_, SecretEntry> { + self.secrets.iter() + } + + /// Returns the length of this [`SecretList`]. + pub fn len(&self) -> usize { + self.secrets.len() + } + + /// Returns `true` if the [`SecretList`] contains no [`SecretEntry`]. + pub fn is_empty(&self) -> bool { + self.secrets.is_empty() + } + + /// Reports the number of secrets stored in UV. + /// + /// This number may be not equal to the provided number of [`SecretEntry`] + pub fn total_num_secrets(&self) -> usize { + self.total_num_secrets + } + + /// Encodes the list in the same binary format the UV would do + pub fn encode(&self, w: &mut T) -> Result<()> { + let num_s = to_u16(self.secrets.len()).ok_or(Error::ManySecrets)?; + w.write_u16::(num_s)?; + w.write_u16::( + self.total_num_secrets + .try_into() + .map_err(|_| Error::ManySecrets)?, + )?; + w.write_all(&[0u8; 12])?; + for secret in &self.secrets { + w.write_all(secret.as_bytes())?; + } + w.flush().map_err(Error::Io) + } + + /// Decodes the list from the binary format of the UV into this internal representation + pub fn decode(r: &mut R) -> std::io::Result { + let num_s = r.read_u16::()?; + let total_num_secrets = r.read_u16::()? as usize; + let mut v: Vec = Vec::with_capacity(num_s as usize); + r.seek(std::io::SeekFrom::Current(12))?; // skip reserved bytes + let mut buf = [0u8; SecretEntry::STRUCT_SIZE]; + for _ in 0..num_s { + r.read_exact(&mut buf)?; + // cannot fail. buffer has the same size as the secret entry + let secr = SecretEntry::read_from(buf.as_slice()).unwrap(); + v.push(secr); + } + Ok(Self { + total_num_secrets, + secrets: v, + }) + } +} + +impl TryFrom for SecretList { + type Error = Error; + + fn try_from(mut list: ListCmd) -> Result { + SecretList::decode(&mut Cursor::new(list.data().unwrap())).map_err(Error::InvSecretList) + } +} + +impl Display for SecretList { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Total number of secrets: {}", self.total_num_secrets)?; + if !self.secrets.is_empty() { + writeln!(f)?; + } + for s in &self.secrets { + writeln!(f, "{s}")?; + } + Ok(()) + } +} + +fn ser_u32(v: &U32, ser: S) -> Result { + ser.serialize_u32(v.get()) +} + +fn ser_u16(v: &U16, ser: S) -> Result { + ser.serialize_u16(v.get()) +} + +/// Secret types that can appear in a [`SecretList`] +#[non_exhaustive] +#[derive(PartialEq, Eq, Debug)] +pub enum ListableSecretType { + /// Association Secret + Association, + /// Invalid secret type, that should never appear in a list + /// + /// 0 is reserved + /// 1 is Null secret, with no id and not listable + Invalid(u16), + /// Unknown secret type + Unknown(u16), +} + +impl ListableSecretType { + /// UV type id for an association secret + pub const ASSOCIATION: u16 = 0x0002; + /// UV type id for a null secret + pub const NULL: u16 = 0x0001; + const RESERVED_0: u16 = 0x0000; +} + +impl Display for ListableSecretType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Association => write!(f, "Association"), + Self::Invalid(n) => write!(f, "Invalid({n})"), + Self::Unknown(n) => write!(f, "Unknown({n})"), + } + } +} + +impl From> for ListableSecretType { + fn from(value: U16) -> Self { + match value.get() { + Self::RESERVED_0 => Self::Invalid(Self::RESERVED_0), + Self::NULL => Self::Invalid(Self::NULL), + Self::ASSOCIATION => ListableSecretType::Association, + n => Self::Unknown(n), + } + } +} + +impl From for U16 { + fn from(value: ListableSecretType) -> Self { + match value { + ListableSecretType::Association => ListableSecretType::ASSOCIATION, + ListableSecretType::Invalid(n) | ListableSecretType::Unknown(n) => n, + } + .into() + } +} + +fn de_gsid<'de, D>(de: D) -> Result<[u8; 32], D::Error> +where + D: serde::Deserializer<'de>, +{ + struct FieldVisitor; + + impl<'de> serde::de::Visitor<'de> for FieldVisitor { + type Value = [u8; SecretId::ID_SIZE]; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a `32 bytes long hexstring` prepended with 0x") + } + + fn visit_str(self, s: &str) -> Result + where + E: serde::de::Error, + { + if s.len() != SecretId::ID_SIZE * 2 + 2 { + return Err(serde::de::Error::invalid_length(s.len(), &self)); + } + let nb = s.strip_prefix("0x").ok_or_else(|| { + serde::de::Error::invalid_value(serde::de::Unexpected::Str(s), &self) + })?; + crate::misc::parse_hex(nb) + .try_into() + .map_err(|_| serde::de::Error::invalid_value(serde::de::Unexpected::Str(s), &self)) + } + } + de.deserialize_identifier(FieldVisitor) +} + +#[cfg(test)] +mod test { + + use serde_test::{assert_ser_tokens, assert_tokens, Token}; + + use super::*; + use std::io::{BufReader, BufWriter, Cursor}; + + #[test] + fn dump_secret_entry() { + const EXP: &[u8] = &[ + 0x00, 0x01, 0x00, 0x02, // idx + type + 0x00, 0x00, 0x00, 0x20, // len + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved + // id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + ]; + let s = SecretEntry { + index: 1.into(), + stype: 2.into(), + len: 32.into(), + res_8: 0, + id: SecretId::from([0; 32]), + }; + + assert_eq!(s.as_bytes(), EXP); + } + + #[test] + fn secret_list_dec() { + let buf = [ + 0x00u8, 0x01, // num secr stored + 0x01, 0x12, // total num secrets + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, // reserved + // secret + 0x00, 0x01, 0x00, 0x02, // idx + type + 0x00, 0x00, 0x00, 0x20, // len + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved + // id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + ]; + + let exp = SecretList { + total_num_secrets: 0x112, + secrets: vec![SecretEntry { + index: 1.into(), + stype: 2.into(), + len: 32.into(), + res_8: 0, + id: SecretId::from([0; 32]), + }], + }; + + let mut br = BufReader::new(Cursor::new(buf)); + let sl = SecretList::decode(&mut br).unwrap(); + assert_eq!(sl, exp); + } + + #[test] + fn secret_list_enc() { + const EXP: &[u8] = &[ + 0x00, 0x01, // num secr stored + 0x01, 0x12, // total num secrets + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, // reserved + // secret + 0x00, 0x01, 0x00, 0x02, // idx + type + 0x00, 0x00, 0x00, 0x20, // len + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved + // id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + ]; + + let sl = SecretList { + total_num_secrets: 0x112, + secrets: vec![SecretEntry { + index: 1.into(), + stype: 2.into(), + len: 32.into(), + res_8: 0, + id: SecretId::from([0; 32]), + }], + }; + + let mut buf = [0u8; 0x40]; + { + let mut bw = BufWriter::new(&mut buf[..]); + sl.encode(&mut bw).unwrap(); + } + println!("list: {sl:?}"); + assert_eq!(buf, EXP); + } + + #[test] + fn secret_entry_ser() { + let entry = SecretEntry::new_zeroed(); + + assert_ser_tokens( + &entry, + &[ + Token::Struct { + name: "SecretEntry", + len: (4), + }, + Token::String("index"), + Token::U16(0), + Token::String("stype"), + Token::U16(0), + Token::String("len"), + Token::U32(0), + Token::String("id"), + Token::String("0x0000000000000000000000000000000000000000000000000000000000000000"), + Token::StructEnd, + ], + ) + } + + #[test] + fn secret_id_serde() { + let id = SecretId::from([ + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, + 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, + 0x89, 0xab, 0xcd, 0xef, + ]); + assert_tokens( + &id, + &[Token::String( + "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + )], + ) + } +} diff -Nru s390-tools-2.31.0/rust/pv_core/src/uvdevice/secret.rs s390-tools-2.33.1/rust/pv_core/src/uvdevice/secret.rs --- s390-tools-2.31.0/rust/pv_core/src/uvdevice/secret.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/uvdevice/secret.rs 2024-05-28 08:26:36.000000000 +0200 @@ -2,27 +2,15 @@ // // Copyright IBM Corp. 2023 -use crate::{ - assert_size, - misc::to_u16, - request::{uvsecret::AddSecretMagic, MagicValue}, - uv::{uv_ioctl, UvCmd, UvDevice}, - Error, Result, PAGESIZE, -}; -use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; -use serde::{Serialize, Serializer}; -use std::{ - fmt::Display, - io::{Cursor, Read, Seek, Write}, - slice::Iter, - vec::IntoIter, -}; -use zerocopy::{AsBytes, FromBytes, FromZeroes, U16, U32}; +use super::ffi; +use crate::{request::MagicValue, uv::UvCmd, uvsecret::AddSecretMagic, Error, Result, PAGESIZE}; +use std::io::Read; /// _List Secrets_ Ultravisor command. /// /// The List Secrets Ultravisor call is used to list the /// secrets that are in the secret store for the current SE-guest. +#[derive(Debug)] pub struct ListCmd(Vec); impl ListCmd { fn with_size(size: usize) -> Self { @@ -42,14 +30,12 @@ } impl UvCmd for ListCmd { + const UV_IOCTL_NR: u8 = ffi::UVIO_IOCTL_LIST_SECRETS_NR; + fn data(&mut self) -> Option<&mut [u8]> { Some(self.0.as_mut_slice()) } - fn cmd(&self) -> u64 { - uv_ioctl(UvDevice::LIST_SECRET_NR) - } - fn rc_fmt(&self, _rc: u16, _rrc: u16) -> Option<&'static str> { None } @@ -59,6 +45,7 @@ /// /// The Add Secret Ultravisor-call is used to add a secret /// to the secret store for the current SE-guest. +#[derive(Debug)] pub struct AddCmd(Vec); impl AddCmd { @@ -72,6 +59,10 @@ let mut data = Vec::with_capacity(PAGESIZE); bin_add_secret_req.read_to_end(&mut data)?; + if data.len() > ffi::UVIO_ADD_SECRET_MAX_LEN { + return Err(Error::AscrbLarge); + } + if !AddSecretMagic::starts_with_magic(&data[..6]) { return Err(Error::NoAsrcb); } @@ -80,14 +71,12 @@ } impl UvCmd for AddCmd { + const UV_IOCTL_NR: u8 = ffi::UVIO_IOCTL_ADD_SECRET_NR; + fn data(&mut self) -> Option<&mut [u8]> { Some(&mut self.0) } - fn cmd(&self) -> u64 { - uv_ioctl(UvDevice::ADD_SECRET_NR) - } - fn rc_fmt(&self, rc: u16, _rrc: u16) -> Option<&'static str> { match rc { 0x0101 => Some("not allowed to modify the secret store"), @@ -114,11 +103,10 @@ /// all changes to the secret store. Upon successful /// completion of a Lock Secret Store Ultravisor-call, any /// request to modify the secret store will fail. +#[derive(Debug)] pub struct LockCmd; impl UvCmd for LockCmd { - fn cmd(&self) -> u64 { - uv_ioctl(UvDevice::LOCK_SECRET_NR) - } + const UV_IOCTL_NR: u8 = ffi::UVIO_IOCTL_LOCK_SECRETS_NR; fn rc_fmt(&self, rc: u16, _rrc: u16) -> Option<&'static str> { match rc { @@ -128,362 +116,3 @@ } } } - -/// List of secrets used to parse the [`crate::uv::ListCmd`] result. -/// -/// The list should not hold more than 0xffffffff elements -#[derive(Debug, PartialEq, Eq, Serialize)] -pub struct SecretList { - total_num_secrets: usize, - secrets: Vec, -} - -impl<'a> IntoIterator for &'a SecretList { - type Item = &'a SecretEntry; - type IntoIter = Iter<'a, SecretEntry>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for SecretList { - type Item = SecretEntry; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.secrets.into_iter() - } -} - -impl FromIterator for SecretList { - fn from_iter>(iter: T) -> Self { - let secrets: Vec<_> = iter.into_iter().collect(); - let total_num_secrets = secrets.len() as u16; - Self::new(total_num_secrets, secrets) - } -} - -impl SecretList { - /// Creates a new SecretList. - /// - /// The content of this list will very liekly not represent the status of the guest in the - /// Ultravisor. Use of [`SecretList::decode`] in any non-test environments is encuraged. - pub fn new(total_num_secrets: u16, secrets: Vec) -> Self { - Self { - total_num_secrets: total_num_secrets as usize, - secrets, - } - } - - /// Returns an iterator over the slice. - /// - /// The iterator yields all secret entries from start to end. - pub fn iter(&self) -> Iter<'_, SecretEntry> { - self.secrets.iter() - } - - /// Returns the length of this [`SecretList`]. - pub fn len(&self) -> usize { - self.secrets.len() - } - - /// Returns `true` if the [`SecretList`] contains no [`SecretEntry`]. - pub fn is_empty(&self) -> bool { - self.secrets.is_empty() - } - - /// Reports the number of secrets stored in UV. - /// - /// This number may be not equal to the provided number of [`SecretEntry`] - pub fn total_num_secrets(&self) -> usize { - self.total_num_secrets - } - - /// Encodes the list in the same binary format the UV would do - pub fn encode(&self, w: &mut T) -> Result<()> { - let num_s = to_u16(self.secrets.len()).ok_or(Error::ManySecrets)?; - w.write_u16::(num_s)?; - w.write_u16::( - self.total_num_secrets - .try_into() - .map_err(|_| Error::ManySecrets)?, - )?; - w.write_all(&[0u8; 12])?; - for secret in &self.secrets { - w.write_all(secret.as_bytes())?; - } - w.flush().map_err(Error::Io) - } - - /// Decodes the list from the binary format of the UV into this internal representation - pub fn decode(r: &mut R) -> std::io::Result { - let num_s = r.read_u16::()?; - let total_num_secrets = r.read_u16::()? as usize; - let mut v: Vec = Vec::with_capacity(num_s as usize); - r.seek(std::io::SeekFrom::Current(12))?; //skip reserved bytes - let mut buf = [0u8; SecretEntry::STRUCT_SIZE]; - for _ in 0..num_s { - r.read_exact(&mut buf)?; - //cannot fail. buffer has the same size as the secret entry - let secr = SecretEntry::read_from(buf.as_slice()).unwrap(); - v.push(secr); - } - Ok(Self { - total_num_secrets, - secrets: v, - }) - } -} - -impl TryFrom for SecretList { - type Error = Error; - fn try_from(mut list: ListCmd) -> Result { - SecretList::decode(&mut Cursor::new(list.data().unwrap())).map_err(Error::InvSecretList) - } -} - -impl Display for SecretList { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "Total number of secrets: {}", self.total_num_secrets)?; - if !self.secrets.is_empty() { - writeln!(f)?; - } - for s in &self.secrets { - writeln!(f, "{s}")?; - } - Ok(()) - } -} - -fn ser_u32(v: &U32, ser: S) -> Result { - ser.serialize_u32(v.get()) -} - -fn ser_u16(v: &U16, ser: S) -> Result { - ser.serialize_u16(v.get()) -} - -/// Secret types that can appear in a [`SecretList`] -#[non_exhaustive] -#[derive(PartialEq, Eq)] -pub enum ListableSecretType { - /// Association Secret - Association, - /// Invalid secret type, that should never appear in a list - /// - /// 0 is reserved - /// 1 is Null secret, with no id and not listable - Invalid(u16), - /// Unknown secret type - Unknown(u16), -} -impl ListableSecretType { - const RESERVED_0: u16 = 0x0000; - const NULL: u16 = 0x0001; - const ASSOCIATION: u16 = 0x0002; -} - -impl Display for ListableSecretType { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Association => write!(f, "Association"), - Self::Invalid(n) => write!(f, "Invalid({n})"), - Self::Unknown(n) => write!(f, "Unknown({n})"), - } - } -} - -impl From> for ListableSecretType { - fn from(value: U16) -> Self { - match value.get() { - Self::RESERVED_0 => Self::Invalid(Self::RESERVED_0), - Self::NULL => Self::Invalid(Self::NULL), - Self::ASSOCIATION => ListableSecretType::Association, - n => Self::Unknown(n), - } - } -} - -impl From for U16 { - fn from(value: ListableSecretType) -> Self { - match value { - ListableSecretType::Association => ListableSecretType::ASSOCIATION, - ListableSecretType::Invalid(n) | ListableSecretType::Unknown(n) => n, - } - .into() - } -} - -#[doc(hidden)] -pub const SECRET_ID_SIZE: usize = 32; - -#[doc(hidden)] -pub fn ser_gsid(id: &[u8; SECRET_ID_SIZE], ser: S) -> Result -where - S: serde::Serializer, -{ - let mut s = String::with_capacity(32 * 2 + 2); - s.push_str("0x"); - let s = id.iter().fold(s, |acc, e| acc + &format!("{e:02x}")); - ser.serialize_str(&s) -} - -/// A secret in a [`SecretList`] -#[repr(C)] -#[derive(Debug, PartialEq, Eq, AsBytes, FromZeroes, FromBytes, Serialize)] -pub struct SecretEntry { - #[serde(serialize_with = "ser_u16")] - index: U16, - #[serde(serialize_with = "ser_u16")] - stype: U16, - #[serde(serialize_with = "ser_u32")] - len: U32, - #[serde(skip)] - res_8: u64, - #[serde(serialize_with = "ser_gsid")] - id: [u8; SECRET_ID_SIZE], -} -assert_size!(SecretEntry, SecretEntry::STRUCT_SIZE); - -impl SecretEntry { - const STRUCT_SIZE: usize = 0x30; - - /// Create a new entry for a [`SecretList`]. - /// - /// The content of this entry will very liekly not represent the status of the guest in the - /// Ultravisor. Use of [`SecretList::decode`] in any non-test environments is encuraged. - pub fn new(index: u16, stype: ListableSecretType, id: [u8; 32], secret_len: u32) -> Self { - Self { - index: index.into(), - stype: stype.into(), - len: secret_len.into(), - res_8: 0, - id, - } - } - - /// Returns the index of this [`SecretEntry`]. - pub fn index(&self) -> u16 { - self.index.get() - } - - /// Returns the secret type of this [`SecretEntry`]. - pub fn stype(&self) -> ListableSecretType { - self.stype.into() - } - - /// Returns a reference to the id of this [`SecretEntry`]. - pub fn id(&self) -> &[u8] { - &self.id - } -} - -impl Display for SecretEntry { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let stype: ListableSecretType = self.stype.into(); - writeln!(f, "{} {}:", self.index, stype)?; - write!(f, " ")?; - for b in self.id { - write!(f, "{b:02x}")?; - } - Ok(()) - } -} - -#[cfg(test)] -mod test { - - use super::*; - use std::io::{BufReader, BufWriter, Cursor}; - - #[test] - fn dump_secret_entry() { - const EXP: &[u8] = &[ - 0x00, 0x01, 0x00, 0x02, //idx + type - 0x00, 0x00, 0x00, 0x20, //len - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved - // id - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - ]; - let s = SecretEntry { - index: 1.into(), - stype: 2.into(), - len: 32.into(), - res_8: 0, - id: [0; 32], - }; - - assert_eq!(s.as_bytes(), EXP); - } - - #[test] - fn secret_list_dec() { - let buf = [ - 0x00u8, 0x01, // num secr stored - 0x01, 0x12, // total num secrets - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //reserved - // secret - 0x00, 0x01, 0x00, 0x02, //idx + type - 0x00, 0x00, 0x00, 0x20, //len - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved - // id - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - ]; - - let exp = SecretList { - total_num_secrets: 0x112, - secrets: vec![SecretEntry { - index: 1.into(), - stype: 2.into(), - len: 32.into(), - res_8: 0, - id: [0; 32], - }], - }; - - let mut br = BufReader::new(Cursor::new(buf)); - let sl = SecretList::decode(&mut br).unwrap(); - assert_eq!(sl, exp); - } - - #[test] - fn secret_list_enc() { - const EXP: &[u8] = &[ - 0x00, 0x01, // num secr stored - 0x01, 0x12, // total num secrets - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //reserved - // secret - 0x00, 0x01, 0x00, 0x02, //idx + type - 0x00, 0x00, 0x00, 0x20, //len - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved - // id - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - ]; - - let sl = SecretList { - total_num_secrets: 0x112, - secrets: vec![SecretEntry { - index: 1.into(), - stype: 2.into(), - len: 32.into(), - res_8: 0, - id: [0; 32], - }], - }; - - let mut buf = [0u8; 0x40]; - { - let mut bw = BufWriter::new(&mut buf[..]); - sl.encode(&mut bw).unwrap(); - } - println!("list: {sl:?}"); - assert_eq!(buf, EXP); - } -} diff -Nru s390-tools-2.31.0/rust/pv_core/src/uvdevice/test.rs s390-tools-2.33.1/rust/pv_core/src/uvdevice/test.rs --- s390-tools-2.31.0/rust/pv_core/src/uvdevice/test.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/uvdevice/test.rs 2024-05-28 08:26:36.000000000 +0200 @@ -38,6 +38,7 @@ self.exp_cmd = cmd; self } + pub fn set_mdfy(&mut self, mdfy: F) -> &mut Self where F: FnMut(&mut ffi::uvio_ioctl_cb) -> c_int + 'static + Send + Sync, @@ -45,6 +46,7 @@ self.modify = Box::new(mdfy); self } + pub fn reset(&mut self) -> bool { let old = self.called; self.called = false; @@ -86,6 +88,7 @@ ); self } + fn size_eq(&self, exp: u32) -> &Self { assert_eq!( self.argument_len, exp, @@ -94,10 +97,12 @@ ); self } + fn set_rc(&mut self, rc: u16) -> &mut Self { self.uv_rc = rc; self } + fn set_rrc(&mut self, rrc: u16) -> &mut Self { self.uv_rrc = rrc; self @@ -107,12 +112,16 @@ const TEST_CMD: u64 = 17; struct TestCmd(Option>); impl UvCmd for TestCmd { + const UV_IOCTL_NR: u8 = 42; + fn cmd(&self) -> u64 { TEST_CMD } + fn rc_fmt(&self, _rc: u16, _rrc: u16) -> Option<&'static str> { None } + fn data(&mut self) -> Option<&mut [u8]> { match &mut self.0 { None => None, @@ -122,9 +131,10 @@ } impl UvDevice { - ///use some random fd for `uvdevice` its OK, as the ioctl is mocked and never touches the passed file + /// use some random fd for `uvdevice` its OK, as the ioctl is mocked and never touches the + /// passed file fn test_dev() -> Self { - UvDevice(unsafe { std::fs::File::from_raw_fd(17) }) + UvDevice(unsafe { File::from_raw_fd(17) }) } } diff -Nru s390-tools-2.31.0/rust/pv_core/src/uvdevice.rs s390-tools-2.33.1/rust/pv_core/src/uvdevice.rs --- s390-tools-2.31.0/rust/pv_core/src/uvdevice.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/uvdevice.rs 2024-05-28 08:26:36.000000000 +0200 @@ -23,14 +23,17 @@ mod ffi; mod info; mod test; -pub use ffi::uv_ioctl; +pub(crate) use ffi::uv_ioctl; +pub mod attest; pub mod secret; +pub mod secret_list; pub use info::UvDeviceInfo; -#[allow(dead_code)] //TODO rm when pv learns attestation + +/// User data for the attestation UVC pub type AttestationUserData = [u8; ffi::UVIO_ATT_USER_DATA_LEN]; -///Configuration Unique Id of the Secure Execution guest +/// Configuration Unique Id of the Secure Execution guest pub type ConfigUid = [u8; ffi::UVIO_ATT_UID_LEN]; /// Bitflags as used by the Ultravisor in MSB0 ordering @@ -58,7 +61,7 @@ debug!("ioctl resulted with {cb:?}"); match rc { 0 => Ok(()), - //NOTE io::Error handles all errnos ioctl uses + // NOTE io::Error handles all errnos ioctl uses _ => Err(std::io::Error::last_os_error().into()), } } @@ -81,16 +84,28 @@ } /// Ultravisor Command. +/// +/// Implementers provide information on the specific Ultravisor command metadata and content. +/// API users do not need to interact directly with any functions provided by this trait and refer +/// to the specialized access and tweaking functionalities of the specific command. pub trait UvCmd { + /// The UV IOCTL number of the UV call + const UV_IOCTL_NR: u8; /// Returns the uvdevice IOCTL command that his command uses. /// /// # Returns - /// The IOCTL cmd for this UvCmd usually sth like `uv_ioctl!(CMD_NR)` - fn cmd(&self) -> u64; + /// + /// The IOCTL cmd for this UvCmd usually something like `uv_ioctl!(CMD_NR)` + fn cmd(&self) -> u64 { + uv_ioctl(Self::UV_IOCTL_NR) + } /// Converts UV return codes into human readable error messages /// - /// no need to handle `0x0000, 0x0001, 0x0002, 0x0005, 0x0030, 0x0031, 0x0032, 0x0100` + /// # Note for implementations + /// + /// No need to handle `0x0000, 0x0001, 0x0002, 0x0005, 0x0030, 0x0031, 0x0032, 0x0100` fn rc_fmt(&self, rc: u16, rrc: u16) -> Option<&'static str>; + /// Returns data used by this command if available. fn data(&mut self) -> Option<&mut [u8]> { None @@ -147,28 +162,37 @@ RC_MORE_DATA = UvDevice::RC_MORE_DATA, } -/// The UvDevice is a (virtual) device on s390 machines to send Ultravisor commands from userspace. +/// The UvDevice is a (virtual) device on s390 machines to send Ultravisor commands(UVCs) from +/// userspace. +/// +/// On s390 machines with Ultravisor enabled (Secure Execution guest & hosts) the device at +/// `/dev/uv` will accept ioctls. +/// +/// # Example +/// +/// Use a implementation of [`UvCmd`] to send a specific Ultravisor command to the uvdevice to +/// forward to Firmware. +/// +/// ```rust,no_run +/// # use s390_pv_core::uv::UvDevice; +/// # use s390_pv_core::uv::AddCmd; +/// # use std::fs::File; +/// # fn main() -> s390_pv_core::Result<()> { +/// let mut file = File::open("request")?; +/// let uv = UvDevice::open()?; +/// let mut cmd = AddCmd::new(&mut file)?; +/// uv.send_cmd(&mut cmd)?; +/// # Ok(()) +/// # } +/// // do something with the result +/// ``` +#[derive(Debug)] pub struct UvDevice(File); impl UvDevice { - const RC_SUCCESS: u16 = 0x0001; - const RC_MORE_DATA: u16 = 0x0100; const PATH: &'static str = "/dev/uv"; - - /// IOCTL number for the info UVC - pub const INFO_NR: u8 = ffi::UVIO_IOCTL_UVDEV_INFO_NR; - /// IOCTL number for the attestation UVC - pub const ATTESTATION_NR: u8 = ffi::UVIO_IOCTL_ATT_NR; - /// IOCTL number for the add secret UVC - pub const ADD_SECRET_NR: u8 = ffi::UVIO_IOCTL_ADD_SECRET_NR; - /// IOCTL number for the list secret UVC - pub const LIST_SECRET_NR: u8 = ffi::UVIO_IOCTL_LIST_SECRETS_NR; - /// IOCTL number for the lock ksecret UVC - pub const LOCK_SECRET_NR: u8 = ffi::UVIO_IOCTL_LOCK_SECRETS_NR; - /// Maximum length for add-secret requests - pub const ADD_SECRET_MAX_LEN: usize = ffi::UVIO_ADD_SECRET_MAX_LEN; - /// Size of the buffer for list secret requests - pub const LIST_SECRETS_LEN: usize = ffi::UVIO_LIST_SECRETS_LEN; + const RC_MORE_DATA: u16 = 0x0100; + const RC_SUCCESS: u16 = 0x0001; /// Open the uvdevice located at `/dev/uv` /// @@ -183,7 +207,7 @@ .open(UvDevice::PATH) .map_err(|e| Error::FileAccess { ty: FileAccessErrorType::Open, - path: (UvDevice::PATH).to_string(), + path: (UvDevice::PATH).into(), source: e, })?, )) @@ -192,12 +216,15 @@ /// Send an Ultravisor Command via this uvdevice. /// /// This works by sending an IOCTL to the uvdevice. + /// /// # Errors /// /// This function will return an error if the IOCTL fails or the Ultravisor does not report /// a success. + /// /// # Returns - /// [`UvcSuccess`] if the UVC ececuted successfully + /// + /// [`UvcSuccess`] if the UVC executed successfully pub fn send_cmd(&self, cmd: &mut C) -> Result { let mut cb = IoctlCb::new(cmd.data())?; ioctl_raw(self.0.as_raw_fd(), cmd.cmd(), &mut cb)?; diff -Nru s390-tools-2.31.0/rust/pv_core/src/uvsecret.rs s390-tools-2.33.1/rust/pv_core/src/uvsecret.rs --- s390-tools-2.31.0/rust/pv_core/src/uvsecret.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pv_core/src/uvsecret.rs 2024-05-28 08:26:36.000000000 +0200 @@ -2,35 +2,28 @@ // // Copyright IBM Corp. 2023 +use crate::assert_size; use crate::{ - misc::to_u16, request::{MagicValue, RequestMagic}, - uv::{ListCmd, UvCmd}, Error, Result, }; use byteorder::{BigEndian, ByteOrder}; -use std::{ - fmt::Display, - io::{Cursor, Read, Seek, Write}, - mem::size_of, -}; -use utils::{assert_size, static_assert}; -use zerocopy::{AsBytes, FromBytes, U16, U32}; +use std::{fmt::Display, mem::size_of}; +use zerocopy::{AsBytes, U16}; -/// The magic value used to identify an add-secret request`] +/// The magic value used to identify an ['crate:AddSecretRequest'] /// /// The magic value is ASCII: /// ```rust -/// # use pv_core::request::uvsecret::AddSecretMagic; -/// # use pv_core::request::MagicValue; +/// # use s390_pv_core::secret::AddSecretMagic; +/// # use s390_pv_core::request::MagicValue; /// # fn main() { /// # let magic = /// b"asrcbM" /// # ; /// # assert!(AddSecretMagic::starts_with_magic(magic)); /// # } -///``` -/// +/// ``` #[repr(C)] #[derive(Debug, Clone, Copy, PartialEq, Eq, AsBytes)] pub struct AddSecretMagic { @@ -56,7 +49,7 @@ /// Try to convert from a byte slice. /// - /// Retuns [`None`] if the byte slice does not contain a valid magic value variant. + /// Returns [`None`] if the byte slice does not contain a valid magic value variant. pub fn try_from_bytes(bytes: &[u8]) -> Result { if !Self::starts_with_magic(bytes) || bytes.len() < size_of::() { return Err(Error::NoAsrcb); @@ -70,8 +63,8 @@ /// Returns the [`UserDataType`] of this [`AddSecretMagic`]. pub fn kind(&self) -> UserDataType { - // Panic: Will never panic. The value is cheched during construcion of the object for - // beeing one of the enum values. + // Panic: Will never panic. The value is checked during construction of + // the object for being one of the enum values. self.kind.get().try_into().unwrap() } } @@ -84,11 +77,11 @@ Null = 0x0000, /// Arbitrary user data (max 512 bytes) Unsigned = 0x0001, - /// User data message signed with an Ec key, (max 256 byte) + /// User data message signed with an EC key, (max 256 byte) SgnEcSECP521R1 = 0x0002, - /// User data message signature with a Rsa key of 2048 bit size, (max 256 byte) + /// User data message signature with a RSA key of 2048 bit size, (max 256 byte) SgnRsa2048 = 0x0003, - /// User data message signature with a Rsa key of 3072 bit size, (max 128 byte) + /// User data message signature with a RSA key of 3072 bit size, (max 128 byte) SgnRsa3072 = 0x0004, } @@ -153,10 +146,8 @@ #[cfg(test)] mod test { use crate::{ - request::{ - uvsecret::{AddSecretMagic, UserDataType}, - MagicValue, - }, + request::MagicValue, + secret::{AddSecretMagic, UserDataType}, Error, }; diff -Nru s390-tools-2.31.0/rust/pvsecret/build.rs s390-tools-2.33.1/rust/pvsecret/build.rs --- s390-tools-2.31.0/rust/pvsecret/build.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/build.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 +// it under the terms of the MIT license. See LICENSE for details. + +use clap_complete::{generate_to, Shell}; +use std::env; +use std::io::Error; + +include!("src/cli.rs"); + +fn main() -> Result<(), Error> { + let outdir = env::var_os("OUT_DIR").unwrap(); + let crate_name = env!("CARGO_PKG_NAME"); + let mut cmd = CliOptions::command(); + for &shell in Shell::value_variants() { + generate_to(shell, &mut cmd, crate_name, &outdir)?; + } + + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-changed=src/cli.rs"); + println!("cargo:rerun-if-changed=../utils/src/cli.rs"); + Ok(()) +} diff -Nru s390-tools-2.31.0/rust/pvsecret/Cargo.toml s390-tools-2.33.1/rust/pvsecret/Cargo.toml --- s390-tools-2.31.0/rust/pvsecret/Cargo.toml 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/Cargo.toml 2024-05-28 08:26:36.000000000 +0200 @@ -1,14 +1,21 @@ [package] name = "pvsecret" -version = "0.9.0" +version = "0.10.0" edition.workspace = true license.workspace = true [dependencies] anyhow = { version = "1.0.70", features = ["std"] } -clap = { version ="4", features = ["derive", "wrap_help"]} +clap = { version ="4.1", features = ["derive", "wrap_help"]} log = { version = "0.4.6", features = ["std", "release_max_level_debug"] } serde_yaml = "0.9" -pv = { path = "../pv" } +pv = { path = "../pv" , package = "s390_pv" } +utils = { path = "../utils"} + +[build-dependencies] +clap = { version ="4.1", features = ["derive", "wrap_help"]} +clap_complete = "4.1" +log = { version = "0.4", features = ["std", "release_max_level_debug"] } + utils = { path = "../utils" } diff -Nru s390-tools-2.31.0/rust/pvsecret/man/pvsecret.1 s390-tools-2.33.1/rust/pvsecret/man/pvsecret.1 --- s390-tools-2.31.0/rust/pvsecret/man/pvsecret.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/man/pvsecret.1 2024-05-28 08:26:36.000000000 +0200 @@ -1,9 +1,9 @@ -.\" Copyright 2023 IBM Corp. +.\" Copyright 2023, 2024 IBM Corp. .\" s390-tools is free software; you can redistribute it and/or modify .\" it under the terms of the MIT license. See LICENSE for details. .\" -.TH pvsecret 1 "2024-01-30" "s390-tools" "UV-Secret Manual" +.TH pvsecret 1 "2024-05-21" "s390-tools" "UV-Secret Manual" .nh .ad l .SH NAME @@ -33,6 +33,42 @@ information. For all certificates, revocation lists, and host-key documents, both the PEM and DER input formats are supported. +.SH "PVSECRET COMMANDS" +.PP + +\fBcreate\fR +.RS 4 +Create a new add-secret request +.RE + +.PP + +\fBadd\fR +.RS 4 +Perform an add-secret request (s390x only) +.RE + +.PP + +\fBlock\fR +.RS 4 +Lock the secret-store (s390x only) +.RE + +.PP + +\fBlist\fR +.RS 4 +List all ultravisor secrets (s390x only) +.RE + +.PP + +\fBverify\fR +.RS 4 +Verify that an add-secret request is sane +.RE + .SH OPTIONS .PP \-v, \-\-verbose @@ -46,6 +82,12 @@ Print version information and exit. .RE .RE +.PP +\-h, \-\-help +.RS 4 +Print help. +.RE +.RE .SH EXAMPLES .PP @@ -56,7 +98,7 @@ .PP .nf .fam C - trusted:~$ pvsecret create -k hkd.crt --cert CA.crt --cert ibmsk.crt --hdr pvimage -o addsecreq.bin association EXAMPLE + trusted:~$ pvsecret create \-k hkd.crt \-\-cert CA.crt \-\-cert ibmsk.crt \-\-hdr pvimage \-o addsecreq.bin association EXAMPLE Successfully generated the request Successfully wrote association info to 'EXAMPLE.yaml' .fam T diff -Nru s390-tools-2.31.0/rust/pvsecret/man/pvsecret-add.1 s390-tools-2.33.1/rust/pvsecret/man/pvsecret-add.1 --- s390-tools-2.31.0/rust/pvsecret/man/pvsecret-add.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/man/pvsecret-add.1 2024-05-28 08:26:36.000000000 +0200 @@ -1,9 +1,9 @@ -.\" Copyright 2023 IBM Corp. +.\" Copyright 2023, 2024 IBM Corp. .\" s390-tools is free software; you can redistribute it and/or modify .\" it under the terms of the MIT license. See LICENSE for details. .\" -.TH pvsecret-add 1 "2024-01-30" "s390-tools" "UV-Secret Manual" +.TH pvsecret-add 1 "2024-05-21" "s390-tools" "UV-Secret Manual" .nh .ad l .SH NAME @@ -26,6 +26,12 @@ .RE .RE +.PP +\-h, \-\-help +.RS 4 +Print help. +.RE +.RE .SH "SEE ALSO" .sp diff -Nru s390-tools-2.31.0/rust/pvsecret/man/pvsecret-create.1 s390-tools-2.33.1/rust/pvsecret/man/pvsecret-create.1 --- s390-tools-2.31.0/rust/pvsecret/man/pvsecret-create.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/man/pvsecret-create.1 2024-05-28 08:26:36.000000000 +0200 @@ -1,9 +1,9 @@ -.\" Copyright 2023 IBM Corp. +.\" Copyright 2023, 2024 IBM Corp. .\" s390-tools is free software; you can redistribute it and/or modify .\" it under the terms of the MIT license. See LICENSE for details. .\" -.TH pvsecret-create 1 "2024-01-30" "s390-tools" "UV-Secret Manual" +.TH pvsecret-create 1 "2024-05-21" "s390-tools" "UV-Secret Manual" .nh .ad l .SH NAME @@ -26,6 +26,21 @@ \fB--hdr\fR. Optionally, the request can be bound to a specific instance when bound to the Configuration Unique ID from \fBpvattest\fR using \fB--cuid\fR +.SH "PVSECRET CREATE COMMANDS" +.PP + +\fBmeta\fR +.RS 4 +Create a meta secret +.RE + +.PP + +\fBassociation\fR +.RS 4 +Create an association secret +.RE + .SH OPTIONS .PP \-k, \-\-host-key-document @@ -133,8 +148,8 @@ Use the content of FILE as the Configuration Unique ID. The file must contain exactly 128 bit of data or a yaml with a `cuid` entry. If specified, the value must match the Config-UID from the attestation result of that guest. If not -specified, the CUID will be ignored by the Ultravisor during the verification -of the request. +specified, the CUID will be ignored by the Ultravisor during the verification of +the request. .RE .RE .PP @@ -169,8 +184,8 @@ .PP \-\-user-sign-key .RS 4 -Use the content of FILE as user signing key. Adds a signature defined calculated -from the key in to the add-secret request. The file must be in DER or PEM +Use the content of FILE as user signing key. Adds a signature calculated from +the key in to the add-secret request. The file must be in DER or PEM format containing a private key. Supported are RSA 2048 & 3072-bit and EC(secp521r1) keys. The firmware ignores the content, but the request tag protects the signature. The user-signing key signs the request. The location of @@ -179,6 +194,12 @@ Optional. No signature by default. .RE .RE +.PP +\-h, \-\-help +.RS 4 +Print help. +.RE +.RE .SH "SEE ALSO" .sp diff -Nru s390-tools-2.31.0/rust/pvsecret/man/pvsecret-create-association.1 s390-tools-2.33.1/rust/pvsecret/man/pvsecret-create-association.1 --- s390-tools-2.31.0/rust/pvsecret/man/pvsecret-create-association.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/man/pvsecret-create-association.1 2024-05-28 08:26:36.000000000 +0200 @@ -1,9 +1,9 @@ -.\" Copyright 2023 IBM Corp. +.\" Copyright 2023, 2024 IBM Corp. .\" s390-tools is free software; you can redistribute it and/or modify .\" it under the terms of the MIT license. See LICENSE for details. .\" -.TH pvsecret-create-association 1 "2024-01-30" "s390-tools" "UV-Secret Manual" +.TH pvsecret-create-association 1 "2024-05-21" "s390-tools" "UV-Secret Manual" .nh .ad l .SH NAME @@ -49,6 +49,12 @@ \fB--input-secret\fR. Destroy the secret when it is not used anymore. .RE .RE +.PP +\-h, \-\-help +.RS 4 +Print help. +.RE +.RE .SH "SEE ALSO" .sp diff -Nru s390-tools-2.31.0/rust/pvsecret/man/pvsecret-list.1 s390-tools-2.33.1/rust/pvsecret/man/pvsecret-list.1 --- s390-tools-2.31.0/rust/pvsecret/man/pvsecret-list.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/man/pvsecret-list.1 2024-05-28 08:26:36.000000000 +0200 @@ -1,9 +1,9 @@ -.\" Copyright 2023 IBM Corp. +.\" Copyright 2023, 2024 IBM Corp. .\" s390-tools is free software; you can redistribute it and/or modify .\" it under the terms of the MIT license. See LICENSE for details. .\" -.TH pvsecret-list 1 "2024-01-30" "s390-tools" "UV-Secret Manual" +.TH pvsecret-list 1 "2024-05-21" "s390-tools" "UV-Secret Manual" .nh .ad l .SH NAME @@ -43,6 +43,12 @@ .RE .RE +.PP +\-h, \-\-help +.RS 4 +Print help. +.RE +.RE .SH "SEE ALSO" .sp diff -Nru s390-tools-2.31.0/rust/pvsecret/man/pvsecret-lock.1 s390-tools-2.33.1/rust/pvsecret/man/pvsecret-lock.1 --- s390-tools-2.31.0/rust/pvsecret/man/pvsecret-lock.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/man/pvsecret-lock.1 2024-05-28 08:26:36.000000000 +0200 @@ -1,9 +1,9 @@ -.\" Copyright 2023 IBM Corp. +.\" Copyright 2024 IBM Corp. .\" s390-tools is free software; you can redistribute it and/or modify .\" it under the terms of the MIT license. See LICENSE for details. .\" -.TH pvsecret-lock 1 "2024-01-30" "s390-tools" "UV-Secret Manual" +.TH pvsecret-lock 1 "2024-05-15" "s390-tools" "UV-Secret Manual" .nh .ad l .SH NAME diff -Nru s390-tools-2.31.0/rust/pvsecret/man/pvsecret-verify.1 s390-tools-2.33.1/rust/pvsecret/man/pvsecret-verify.1 --- s390-tools-2.31.0/rust/pvsecret/man/pvsecret-verify.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/man/pvsecret-verify.1 2024-05-28 08:26:36.000000000 +0200 @@ -1,9 +1,9 @@ -.\" Copyright 2023 IBM Corp. +.\" Copyright 2023, 2024 IBM Corp. .\" s390-tools is free software; you can redistribute it and/or modify .\" it under the terms of the MIT license. See LICENSE for details. .\" -.TH pvsecret-verify 1 "2024-01-30" "s390-tools" "UV-Secret Manual" +.TH pvsecret-verify 1 "2024-05-21" "s390-tools" "UV-Secret Manual" .nh .ad l .SH NAME @@ -108,6 +108,12 @@ [default: '-'] .RE .RE +.PP +\-h, \-\-help +.RS 4 +Print help. +.RE +.RE .SH EXAMPLES .PP @@ -115,7 +121,7 @@ .PP .RS .IP trusted:~$ 12 -pvsecret create -k hkd.crt --cert CA.crt --cert ibmsk.crt --hdr pvimage -o addsecreq.bin --user-data user_data --user-sign-key usr_sgn_key.priv.pem association EXAMPLE +pvsecret create \-k hkd.crt \-\-cert CA.crt \-\-cert ibmsk.crt \-\-hdr pvimage \-o addsecreq.bin \-\-user\-data user_data \-\-user\-sign\-key usr_sgn_key.priv.pem association EXAMPLE .RE .RS Successfully generated the request @@ -123,11 +129,11 @@ Successfully wrote association info to 'EXAMPLE.yaml' .RE -For example, on the SE-guest, perform \fIverify\fP on the request to verify the user-signature and the saneness of the request. On success, The user-data is printed to stdout (if \fI--output\fP was not specified) and \fFSuccesfully verified the request.\fP is printed to stderr. +For example, on the SE-guest, perform \fIverify\fP on the request to verify the user-signature and the saneness of the request. On success, The user-data is printed to stdout (if \fI\-\-output\fP was not specified) and \fFSuccesfully verified the request.\fP is printed to stderr. .PP .RS .IP seguest:~$ 12 -pvsecret verify --user-cert user_cert.pem -o addsecreq.bin +pvsecret verify \-\-user\-cert user_cert.pem \-o addsecreq.bin .RE .RS some example user-data diff -Nru s390-tools-2.31.0/rust/pvsecret/README.md s390-tools-2.33.1/rust/pvsecret/README.md --- s390-tools-2.31.0/rust/pvsecret/README.md 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/README.md 2024-05-28 08:26:36.000000000 +0200 @@ -1,5 +1,5 @@ @@ -64,6 +64,12 @@ +`-h`, `--help` +
    +Print help +
+ + ## pvsecret create ### Synopsis `pvsecret create [OPTIONS] --host-key-document --hdr --output <--no-verify|--cert > ` @@ -196,8 +202,8 @@ Use the content of FILE as the Configuration Unique ID. The file must contain exactly 128 bit of data or a yaml with a `cuid` entry. If specified, the value must match the Config-UID from the attestation result of that guest. If not -specified, the CUID will be ignored by the Ultravisor during the verification -of the request. +specified, the CUID will be ignored by the Ultravisor during the verification of +the request. @@ -229,8 +235,8 @@ `--user-sign-key `
    -Use the content of FILE as user signing key. Adds a signature defined calculated -from the key in to the add-secret request. The file must be in DER or PEM +Use the content of FILE as user signing key. Adds a signature calculated from +the key in to the add-secret request. The file must be in DER or PEM format containing a private key. Supported are RSA 2048 & 3072-bit and EC(secp521r1) keys. The firmware ignores the content, but the request tag protects the signature. The user-signing key signs the request. The location of @@ -240,6 +246,12 @@
+`-h`, `--help` +
    +Print help +
+ + ### pvsecret create meta #### Synopsis `pvsecret create meta` @@ -287,6 +299,12 @@ +`-h`, `--help` +
    +Print help +
+ + ## pvsecret add ### Synopsis `pvsecret add ` @@ -339,6 +357,12 @@ +`-h`, `--help` +
    +Print help +
+ + ## pvsecret verify ### Synopsis `pvsecret verify [OPTIONS] ` @@ -407,3 +431,9 @@ contains this user-data with padded zeros if available. Default value: '-' + + +`-h`, `--help` +
    +Print help +
diff -Nru s390-tools-2.31.0/rust/pvsecret/src/cli.rs s390-tools-2.33.1/rust/pvsecret/src/cli.rs --- s390-tools-2.31.0/rust/pvsecret/src/cli.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/src/cli.rs 2024-05-28 08:26:36.000000000 +0200 @@ -3,8 +3,7 @@ // Copyright IBM Corp. 2023 use clap::{ArgGroup, Args, CommandFactory, Parser, Subcommand, ValueEnum, ValueHint}; -use pv::misc::CertificateOptions; -use pv::misc::STDOUT; +use utils::{CertificateOptions, STDOUT}; /// Manage secrets for IBM Secure Execution guests. /// @@ -119,7 +118,7 @@ /// user data can be up to 512 bytes of arbitrary data, and the maximum size depends on the /// size of the user-signing key: /// - No key: user data can be 512 bytes. - /// - EC or RSA 2048 keys: user data can be 256 bytes. + /// - EC(secp521r1) or RSA 2048 keys: user data can be 256 bytes. /// - RSA 3072 key: user data can be 128 bytes. /// /// The firmware ignores this data, but the request tag protects the user-data. Optional. No @@ -131,10 +130,11 @@ /// /// Adds a signature calculated from the key in to the add-secret request. The /// file must be in DER or PEM format containing a private key. Supported are RSA 2048 & - /// 3072-bit and EC(secp521r1) keys. The firmware ignores the content, but the request tag protects the - /// signature. The user-signing key signs the request. The location of the signature is filled - /// with zeros during the signature calculation. The request tag also secures the signature. - /// See man pvsecret verify for more details. Optional. No signature by default. + /// 3072-bit and EC(secp521r1) keys. The firmware ignores the content, but the request tag + /// protects the signature. The user-signing key signs the request. The location of the + /// signature is filled with zeros during the signature calculation. The request tag also + /// secures the signature. See man pvsecret verify for more details. Optional. No signature + /// by default. #[arg(long, value_name = "FILE", value_hint = ValueHint::FilePath,)] pub user_sign_key: Option, } @@ -158,9 +158,9 @@ /// white-spaces mapped to `_`. name: String, - ///Print the hashed name to stdout. + /// Print the hashed name to stdout. /// - ///The hashed name is not written to `NAME.yaml` + /// The hashed name is not written to `NAME.yaml` #[arg(long)] stdout: bool, diff -Nru s390-tools-2.31.0/rust/pvsecret/src/cmd/add.rs s390-tools-2.33.1/rust/pvsecret/src/cmd/add.rs --- s390-tools-2.31.0/rust/pvsecret/src/cmd/add.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/src/cmd/add.rs 2024-05-28 08:26:36.000000000 +0200 @@ -5,8 +5,8 @@ use crate::cli::AddSecretOpt; use anyhow::{Context, Result}; use log::warn; -use pv::misc::get_reader_from_cli_file_arg; use pv::uv::{AddCmd, UvDevice}; +use utils::get_reader_from_cli_file_arg; /// Do an Add Secret UVC pub fn add(opt: &AddSecretOpt) -> Result<()> { diff -Nru s390-tools-2.31.0/rust/pvsecret/src/cmd/create.rs s390-tools-2.33.1/rust/pvsecret/src/cmd/create.rs --- s390-tools-2.31.0/rust/pvsecret/src/cmd/create.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/src/cmd/create.rs 2024-05-28 08:26:36.000000000 +0200 @@ -1,23 +1,24 @@ // SPDX-License-Identifier: MIT // -// Copyright IBM Corp. 2023 +// Copyright IBM Corp. 2023, 2024 use crate::cli::{AddSecretType, CreateSecretFlags, CreateSecretOpt}; -use anyhow::{anyhow, bail, Context, Result}; +use anyhow::{anyhow, bail, Context, Error, Result}; use log::{debug, info, trace, warn}; use pv::{ misc::{ - get_writer_from_cli_file_arg, open_file, parse_hex, pv_guest_bit_set, read_certs, - read_exact_file, read_file, read_private_key, try_parse_u128, try_parse_u64, write, + open_file, parse_hex, pv_guest_bit_set, read_exact_file, read_file, try_parse_u128, + try_parse_u64, write, }, request::{ - openssl::pkey::{PKey, Public}, - uvsecret::{AddSecretFlags, AddSecretRequest, AddSecretVersion, ExtSecret, GuestSecret}, - BootHdrTags, HkdVerifier, ReqEncrCtx, Request, SymKeyType, + openssl::pkey::{PKey, Private}, + BootHdrTags, ReqEncrCtx, Request, SymKeyType, }, + secret::{AddSecretFlags, AddSecretRequest, AddSecretVersion, ExtSecret, GuestSecret}, uv::ConfigUid, }; use serde_yaml::Value; +use utils::get_writer_from_cli_file_arg; fn write_out>(path: &str, data: D, ctx: &str) -> pv::Result<()> { let mut wr = get_writer_from_cli_file_arg(path)?; @@ -40,8 +41,8 @@ debug!("Generated Add-secret request"); // Add host-key documents - let verifier = opt.certificate_args.verifier()?; - read_and_verify_hkds(&opt.certificate_args.host_key_documents, verifier)? + opt.certificate_args + .get_verified_hkds("secret")? .into_iter() .for_each(|k| asrcb.add_hostkey(k)); @@ -57,6 +58,13 @@ write_secret(&opt.secret, &asrcb) } +/// Read+parse the first key from the buffer. +fn read_private_key(buf: &[u8]) -> Result> { + PKey::private_key_from_der(buf) + .or_else(|_| PKey::private_key_from_pem(buf)) + .map_err(Error::new) +} + /// Set-up the `add-secret request` from command-line arguments fn build_asrcb(opt: &CreateSecretOpt) -> Result { debug!("Build add-secret request"); @@ -191,38 +199,6 @@ Ok(()) } -/// reads HKDs into memory, verifies them with the provided HKD verifier. -/// returns list of public keys or Err -/// Aborts on first error -fn read_and_verify_hkds( - hkds: &Vec, - verifier: Box, -) -> Result>> { - let mut res = Vec::with_capacity(hkds.len()); - for hkd in hkds { - let hk = read_file(hkd, "host-key document")?; - let certs = read_certs(&hk).with_context(|| { - format!("The provided Host Key Document in '{hkd}' is not in PEM or DER format") - })?; - if certs.is_empty() { - let msg = format!( - "The provided host key document in {} contains no certificate!", - hkd - ); - return Err(anyhow!(msg)); - } - if certs.len() > 1 { - warn!("The host key document in '{hkd}' contains more than one certificate! Only the first certificate will be used.") - } - - // len is >= 1 -> unwrap will succeed - let c = certs.first().unwrap(); - verifier.verify(c)?; - res.push(c.public_key()?); - info!("Use host-key document at '{hkd}'"); - } - Ok(res) -} /// Write the generated secret (if any) to the specified output stream fn write_secret(secret: &AddSecretType, asrcb: &AddSecretRequest) -> Result<()> { if let AddSecretType::Association { @@ -237,7 +213,7 @@ .map(|c| if c.is_whitespace() { '_' } else { c }) .collect(); - //write non confidential data (=name+id) to a yaml + // write non confidential data (=name+id) to a yaml let secret_info = serde_yaml::to_string(asrcb.guest_secret())?; if stdout.to_owned() { println!("{secret_info}"); @@ -262,3 +238,21 @@ }; Ok(()) } + +#[cfg(test)] +mod test { + + #[test] + fn read_private_key() { + let key = include_bytes!("../../../pv/tests/assets/keys/rsa3072key.pem"); + let key = super::read_private_key(key).unwrap(); + assert_eq!(key.rsa().unwrap().size(), 384); + } + + #[test] + fn read_private_key_fail() { + let key = include_bytes!("create.rs"); + let key = super::read_private_key(key); + assert!(key.is_err()); + } +} diff -Nru s390-tools-2.31.0/rust/pvsecret/src/cmd/list.rs s390-tools-2.33.1/rust/pvsecret/src/cmd/list.rs --- s390-tools-2.31.0/rust/pvsecret/src/cmd/list.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/src/cmd/list.rs 2024-05-28 08:26:36.000000000 +0200 @@ -5,10 +5,8 @@ use crate::cli::{ListSecretOpt, ListSecretOutputType}; use anyhow::{Context, Result}; use log::warn; -use pv::{ - misc::{get_writer_from_cli_file_arg, STDOUT}, - uv::{ListCmd, SecretList, UvDevice, UvcSuccess}, -}; +use pv::uv::{ListCmd, SecretList, UvDevice, UvcSuccess}; +use utils::{get_writer_from_cli_file_arg, STDOUT}; /// Do a List Secrets UVC pub fn list(opt: &ListSecretOpt) -> Result<()> { diff -Nru s390-tools-2.31.0/rust/pvsecret/src/cmd/verify.rs s390-tools-2.33.1/rust/pvsecret/src/cmd/verify.rs --- s390-tools-2.31.0/rust/pvsecret/src/cmd/verify.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/src/cmd/verify.rs 2024-05-28 08:26:36.000000000 +0200 @@ -1,19 +1,17 @@ +use crate::cli::VerifyOpt; use anyhow::{anyhow, Context, Result}; use log::warn; +use pv::misc::{read_certs, read_file}; use pv::{ - misc::{get_reader_from_cli_file_arg, get_writer_from_cli_file_arg, read_certs, read_file}, - request::{ - openssl::pkey::{PKey, Public}, - uvsecret::verify_asrcb_and_get_user_data, - }, + request::openssl::pkey::{PKey, Public}, + secret::verify_asrcb_and_get_user_data, }; - -use crate::cli::VerifyOpt; +use utils::{get_reader_from_cli_file_arg, get_writer_from_cli_file_arg}; /// read the content of a DER or PEM x509 and return the public key fn read_sgn_key(path: &str) -> Result> { - read_certs(&read_file(path, "user-signing key")?)? - .get(0) + read_certs(read_file(path, "user-signing key")?)? + .first() .ok_or(anyhow!("File does not contain a X509 certificate"))? .public_key() .map_err(anyhow::Error::new) diff -Nru s390-tools-2.31.0/rust/pvsecret/src/cmd.rs s390-tools-2.33.1/rust/pvsecret/src/cmd.rs --- s390-tools-2.31.0/rust/pvsecret/src/cmd.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/src/cmd.rs 2024-05-28 08:26:36.000000000 +0200 @@ -8,18 +8,39 @@ mod verify; pub use verify::verify; -// Commands (directly) related to UVCs are only available on s389x +pub const CMD_FN: &[&str] = &["+create", "+verify"]; + #[cfg(target_arch = "s390x")] mod add; #[cfg(target_arch = "s390x")] -pub use add::add; - -#[cfg(target_arch = "s390x")] mod list; #[cfg(target_arch = "s390x")] -pub use list::list; - -#[cfg(target_arch = "s390x")] mod lock; + +// Commands (directly) related to UVCs are only available on s389x #[cfg(target_arch = "s390x")] -pub use lock::lock; +mod uv_cmd { + pub use super::*; + pub use add::add; + pub use list::list; + pub use lock::lock; + pub const UV_CMD_FN: &[&str] = &["+add", "+lock", "+list"]; +} + +#[cfg(not(target_arch = "s390x"))] +mod uv_cmd { + use crate::cli::{AddSecretOpt, ListSecretOpt}; + use anyhow::{bail, Result}; + macro_rules! not_supp { + ($name: ident $( ,$opt: ty )?) => { + pub fn $name($(_: &$opt)?) -> Result<()> { + bail!("Command only available on s390x") + } + }; + } + not_supp!(add, AddSecretOpt); + not_supp!(list, ListSecretOpt); + not_supp!(lock); + pub const UV_CMD_FN: &[&str] = &[]; +} +pub use uv_cmd::*; diff -Nru s390-tools-2.31.0/rust/pvsecret/src/main.rs s390-tools-2.33.1/rust/pvsecret/src/main.rs --- s390-tools-2.31.0/rust/pvsecret/src/main.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/pvsecret/src/main.rs 2024-05-28 08:26:36.000000000 +0200 @@ -1,92 +1,39 @@ // SPDX-License-Identifier: MIT // -// Copyright IBM Corp. 2023 +// Copyright IBM Corp. 2023, 2024 mod cli; mod cmd; -use clap::CommandFactory; -use clap::Parser; +use clap::{CommandFactory, Parser}; use cli::{CliOptions, Command}; use log::trace; -use pv::misc::PvLogger; use std::process::ExitCode; -use utils::release_string; +use utils::{print_cli_error, print_error, print_version, PvLogger}; use crate::cli::validate_cli; static LOGGER: PvLogger = PvLogger; static EXIT_LOGGER: u8 = 3; -const FEATURES: &[&str] = &[ - "+create", - #[cfg(target_arch = "s390x")] - "+add", - #[cfg(target_arch = "s390x")] - "+lock", - #[cfg(target_arch = "s390x")] - "+list", - "+verify", -]; - -fn print_error(e: anyhow::Error, verbosity: u8) -> ExitCode { - if verbosity > 0 { - // Debug formatter also prints the whole error stack - // So only print it when on verbose - eprintln!("error: {e:?}") - } else { - eprintln!("error: {e}") - }; - ExitCode::FAILURE -} - -fn print_cli_error(e: clap::Error) -> ExitCode { - let ret = if e.use_stderr() { - ExitCode::FAILURE - } else { - ExitCode::SUCCESS - }; - //Ignore any errors during printing of the error - let _ = e.format(&mut CliOptions::command()).print(); - ret -} +const FEATURES: &[&[&str]] = &[cmd::CMD_FN, cmd::UV_CMD_FN]; fn print_version(verbosity: u8) -> anyhow::Result<()> { - println!( - "{} version {}\nCopyright IBM Corp. 2023", - env!("CARGO_PKG_NAME"), - release_string!() - ); - if verbosity > 0 { - FEATURES.iter().for_each(|f| print!("{f} ")); - println!("(compiled)"); - println!( - "\n{}-crate {}", - env!("CARGO_PKG_NAME"), - env!("CARGO_PKG_VERSION") - ); - println!("{}", pv::crate_info()); - } + print_version!(verbosity, "2024", FEATURES.concat()); Ok(()) } -#[cfg(not(target_arch = "s390x"))] -fn not_supported() -> anyhow::Result<()> { - use anyhow::bail; - bail!("Command only available on s390x") -} - fn main() -> ExitCode { let cli: CliOptions = match CliOptions::try_parse() { Ok(cli) => match validate_cli(&cli) { Ok(_) => cli, - Err(e) => return print_cli_error(e), + Err(e) => return print_cli_error(e, CliOptions::command()), }, - Err(e) => return print_cli_error(e), + Err(e) => return print_cli_error(e, CliOptions::command()), }; // set up logger/std(out,err) if let Err(e) = LOGGER.start(cli.verbose) { - //should(TM) never happen + // should(TM) never happen eprintln!("Logger error: {e:?}"); return EXIT_LOGGER.into(); } @@ -102,19 +49,9 @@ // perform the command selected by the user let res = match &cli.cmd { - #[cfg(target_arch = "s390x")] Command::Add(opt) => cmd::add(opt), - #[cfg(target_arch = "s390x")] Command::List(opt) => cmd::list(opt), - #[cfg(target_arch = "s390x")] Command::Lock => cmd::lock(), - - #[cfg(not(target_arch = "s390x"))] - Command::Add(_) => not_supported(), - #[cfg(not(target_arch = "s390x"))] - Command::List(_) => not_supported(), - #[cfg(not(target_arch = "s390x"))] - Command::Lock => not_supported(), Command::Create(opt) => cmd::create(opt), Command::Version => print_version(cli.verbose), Command::Verify(opt) => cmd::verify(opt), @@ -122,6 +59,6 @@ match res { Ok(_) => ExitCode::SUCCESS, - Err(e) => print_error(e, cli.verbose), + Err(e) => print_error(&e, cli.verbose), } } diff -Nru s390-tools-2.31.0/rust/README.md s390-tools-2.33.1/rust/README.md --- s390-tools-2.31.0/rust/README.md 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/README.md 2024-05-28 08:26:36.000000000 +0200 @@ -37,6 +37,7 @@ ## Tools * __pvsecret__ _Manage secrets for IBM Secure Execution guests_ +* __pvapconfig__ _automatic configure APQNs within an SE KVM guest_ ## Writing new tools We encourage to use Rust for new tools. However, for some use cases it makes diff -Nru s390-tools-2.31.0/rust/utils/Cargo.toml s390-tools-2.33.1/rust/utils/Cargo.toml --- s390-tools-2.31.0/rust/utils/Cargo.toml 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/utils/Cargo.toml 2024-05-28 08:26:36.000000000 +0200 @@ -1,5 +1,12 @@ [package] name = "utils" -version = "0.1.0" +version = "0.10.0" edition.workspace = true license.workspace = true + +[dependencies] +clap = { version ="4.1", features = ["derive", "wrap_help"] } +libc = "0.2.49" +log = { version = "0.4.6", features = ["std", "release_max_level_debug"] } +pv = { path = "../pv", package = "s390_pv" } +serde = { version = "1.0.139"} diff -Nru s390-tools-2.31.0/rust/utils/src/cli.rs s390-tools-2.33.1/rust/utils/src/cli.rs --- s390-tools-2.31.0/rust/utils/src/cli.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/utils/src/cli.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2023, 2024 + +use clap::{ArgGroup, Args, Command, ValueHint}; +use log::{info, warn}; +use pv::misc::read_file; +use pv::{ + misc::{create_file, open_file, read_certs}, + request::{ + openssl::pkey::{PKey, Public}, + HkdVerifier, + }, + Error, Result, +}; +use std::io::{Read, Write}; +use std::path::Path; +use std::process::ExitCode; + +/// CLI Argument collection for handling host-keys, IBM signing keys, and certificates. +#[derive(Args, Debug, PartialEq, Eq, Default)] +#[command( + group(ArgGroup::new("pv_verify").required(true).args(["no_verify", "certs"])), + )] +pub struct CertificateOptions { + /// Use FILE as a host-key document. + /// + /// Can be specified multiple times and must be used at least once. + #[arg( + short = 'k', + long = "host-key-document", + value_name = "FILE", + required = true, + value_hint = ValueHint::FilePath, + use_value_delimiter = true, + value_delimiter = ',', + )] + pub host_key_documents: Vec, + + /// Disable the host-key document verification. + /// + /// Does not require the host-key documents to be valid. + /// Do not use for a production request unless you verified the host-key document beforehand. + #[arg(long)] + pub no_verify: bool, + + /// Use FILE as a certificate to verify the host-key or keys. + /// + /// The certificates are used to establish a chain of trust for the verification + /// of the host-key documents. Specify this option twice to specify the IBM Z signing key and + /// the intermediate CA certificate (signed by the root CA). + #[arg( + short= 'C', + long = "cert", + value_name = "FILE", + alias("crt"), + value_hint = ValueHint::FilePath, + use_value_delimiter = true, + value_delimiter = ',', + )] + pub certs: Vec, + + /// Use FILE as a certificate revocation list. + /// + /// The list is used to check whether a certificate of the chain of + /// trust is revoked. Specify this option multiple times to use multiple CRLs. + #[arg( + long = "crl", + requires("certs"), + value_name = "FILE", + value_hint = ValueHint::FilePath, + use_value_delimiter = true, + value_delimiter = ',', + )] + pub crls: Vec, + + /// Make no attempt to download CRLs. + #[arg(long, requires("certs"))] + pub offline: bool, + + /// Use FILE as the root-CA certificate for the verification. + /// + /// If omitted, the system wide-root CAs installed on the system are used. + /// Use this only if you trust the specified certificate. + #[arg(long, requires("certs"))] + pub root_ca: Option, +} + +impl CertificateOptions { + /// Returns the verifier of this [`CertificateOptions`] based on the given CLI options. + /// + /// - `protectee`: what you want to create. e.g. add-secret request or SE-image + /// + /// # Errors + /// + /// This function will return an error if [`crate::request::HkdVerifier`] cannot be created. + fn verifier(&self, protectee: &'static str) -> Result> { + use pv::request::{CertVerifier, NoVerifyHkd}; + match self.no_verify { + true => { + log::warn!( + "Host-key document verification is disabled. The {protectee} may not be protected." + ); + Ok(Box::new(NoVerifyHkd)) + } + false => Ok(Box::new(CertVerifier::new( + &self.certs, + &self.crls, + self.root_ca.as_ref(), + self.offline, + )?)), + } + } + + /// Read the host-keys specified and verifies them if required + /// + /// - `protectee`: what you want to create. e.g. add-secret request or SE-image + /// + /// # Error + /// Returns an error if something went wrong during parsing the HKDs, the verification chain + /// could not built, or when the verification + /// failed. + pub fn get_verified_hkds(&self, protectee: &'static str) -> Result>> { + let hkds = &self.host_key_documents; + let verifier = self.verifier(protectee)?; + + let mut res = Vec::with_capacity(hkds.len()); + for hkd in hkds { + let hk = read_file(hkd, "host-key document")?; + let certs = read_certs(&hk).map_err(|source| Error::HkdNotPemOrDer { + hkd: hkd.to_string(), + source, + })?; + if certs.is_empty() { + return Err(Error::NoHkdInFile(hkd.to_string())); + } + if certs.len() != 1 { + warn!("The host-key document in '{hkd}' contains more than one certificate!") + } + + // Panic: len is == 1 -> unwrap will succeed/not panic + let c = certs.first().unwrap(); + verifier.verify(c)?; + res.push(c.public_key()?); + info!("Use host-key document at '{hkd}'"); + } + Ok(res) + } +} + +/// stdout +pub const STDOUT: &str = "-"; +/// stdin +pub const STDIN: &str = "-"; + +/// Converts an argument value into a Writer. +pub fn get_writer_from_cli_file_arg>(path: P) -> Result> { + if path.as_ref() == Path::new(STDOUT) { + Ok(Box::new(std::io::stdout())) + } else { + Ok(Box::new(create_file(path)?)) + } +} + +/// Converts an argument value into a Reader. +pub fn get_reader_from_cli_file_arg>(path: P) -> Result> { + if path.as_ref() == Path::new(STDIN) { + Ok(Box::new(std::io::stdin())) + } else { + Ok(Box::new(open_file(path)?)) + } +} + +/// Print an error that occured during CLI parsing +pub fn print_cli_error(e: clap::Error, mut cmd: Command) -> ExitCode { + let ret = if e.use_stderr() { + ExitCode::FAILURE + } else { + ExitCode::SUCCESS + }; + // Ignore any errors during printing of the error + let _ = e.format(&mut cmd).print(); + ret +} + +/// Print an error to stderr +pub fn print_error(e: &E, verbosity: u8) -> ExitCode +where + // Error trait is not required, but here to limit the usage to errors + E: AsRef + std::fmt::Debug + std::fmt::Display, +{ + if verbosity > 0 { + // Debug formatter also prints the whole error stack + // So only print it when on verbose + eprintln!("error: {e:?}") + } else { + eprintln!("error: {e}") + }; + ExitCode::FAILURE +} + +#[cfg(test)] +mod test { + use clap::Parser; + + use super::*; + + #[test] + #[rustfmt::skip] + fn cli_args() { + //Verify only that some arguments are optional, we do not want to test clap, only the + //configuration + let valid_args = [vec!["pgr", "-k", "hkd.crt", "--no-verify"], vec!["pgr", "-k", "hkd.crt", "--crt", "abc.crt"]]; + // Test for the minimal amount of flags to yield an invalid combination + let invalid_args = [ + vec!["pgr", "-k", "hkd.crt"], + vec!["pgr", "--no-verify", "--crt", "abc.crt"], + vec!["pgr", "--no-verify", "--crt", "abc.crt", "--offline"], + vec!["pgr", "--no-verify", "--crt", "abc.crt", "--crl", "abc.crl"], + vec!["pgr", "--no-verify", "--crt", "abc.crt", "--root-ca", "root.crt"], + vec!["pgr", "--offline"], + vec!["pgr", "--crl", "abc.crl"], + vec!["pgr", "--root-ca", "root.crt"], + ]; + #[derive(Parser, Debug)] + struct TestParser { + #[command(flatten)] + pub verify_args: CertificateOptions, + } + + for arg in valid_args { + let res = TestParser::try_parse_from(&arg); + assert!(res.is_ok()); + } + + for arg in invalid_args { + let res = TestParser::try_parse_from(&arg); + assert!(res.is_err()); + } + } +} diff -Nru s390-tools-2.31.0/rust/utils/src/hexslice.rs s390-tools-2.33.1/rust/utils/src/hexslice.rs --- s390-tools-2.31.0/rust/utils/src/hexslice.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/utils/src/hexslice.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2024 +use serde::Serialize; + +/// Displays/Serializes an u8-slice into a Hex-string +/// +/// Thin wrapper around an u8-slice. +#[derive(Debug)] +pub struct HexSlice<'a>(&'a [u8]); + +impl<'a> HexSlice<'a> { + /// Creates a [`HexSlice`] from the given value. + pub fn from(s: &'a T) -> Self + where + T: ?Sized + AsRef<[u8]> + 'a, + { + s.into() + } +} + +impl<'a, T> From<&'a T> for HexSlice<'a> +where + T: ?Sized + AsRef<[u8]> + 'a, +{ + fn from(value: &'a T) -> Self { + Self(value.as_ref()) + } +} +impl<'a> Serialize for HexSlice<'a> { + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + serializer.serialize_str(&format!("{self:#}")) + } +} + +impl std::fmt::Display for HexSlice<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if f.alternate() { + write!(f, "0x")?; + } + for byte in self.0 { + write!(f, "{:0>2x}", byte)?; + } + Ok(()) + } +} + +impl AsRef<[u8]> for HexSlice<'_> { + fn as_ref(&self) -> &[u8] { + self.0 + } +} diff -Nru s390-tools-2.31.0/rust/utils/src/lib.rs s390-tools-2.33.1/rust/utils/src/lib.rs --- s390-tools-2.31.0/rust/utils/src/lib.rs 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/rust/utils/src/lib.rs 2024-05-28 08:26:36.000000000 +0200 @@ -2,7 +2,19 @@ //! Utils for s390-tools written in rust. //! Not intened to be used outside of s390-tools. //! -//! Copyright IBM Corp. 2023 +//! Copyright IBM Corp. 2023, 2024 +mod cli; +mod hexslice; +mod log; +mod tmpfile; + +pub use crate::cli::CertificateOptions; +pub use crate::cli::{get_reader_from_cli_file_arg, get_writer_from_cli_file_arg}; +pub use crate::cli::{print_cli_error, print_error}; +pub use crate::cli::{STDIN, STDOUT}; +pub use crate::hexslice::HexSlice; +pub use crate::log::PvLogger; +pub use crate::tmpfile::TemporaryDirectory; /// Get the s390-tools release string /// @@ -28,6 +40,33 @@ }}; } +#[macro_export] +/// Print the version to stdout +/// +/// verbosity: integer if >0 more and more details printed +/// feat: (optional) list of features +/// rel_str: a string containig the release name +macro_rules! print_version { + ($verbosity: expr, $year: expr $( ,$feat: expr)?) => {{ + println!( + "{} version {}\nCopyright IBM Corp. {}", + env!("CARGO_PKG_NAME"), + $crate::release_string!(), + $year, + ); + if $verbosity > 0 { + $($feat.iter().for_each(|f| print!("{f} ")); println!("(compiled)");)? + } + if $verbosity > 1 { + println!( + "\n{}-crate {}", + env!("CARGO_PKG_NAME"), + env!("CARGO_PKG_VERSION"), + ); + } + }}; +} + /// Asserts a constant expression evaluates to `true`. /// /// If the expression is not evaluated to `true` the compilation will fail. diff -Nru s390-tools-2.31.0/rust/utils/src/log.rs s390-tools-2.33.1/rust/utils/src/log.rs --- s390-tools-2.31.0/rust/utils/src/log.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/utils/src/log.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: MIT +// +// Copyright IBM Corp. 2023 + +use log::{self, Level, LevelFilter, Log, Metadata, Record}; + +/// A simple Logger that prints to stderr if the verbosity level is high enough. +/// Prints log-level for Debug+Trace +#[derive(Clone, Default, Debug)] +pub struct PvLogger; + +fn to_level(verbosity: u8) -> LevelFilter { + match verbosity { + // Error and Warn on by default + 0 => LevelFilter::Warn, + 1 => LevelFilter::Info, + 2 => LevelFilter::Debug, + _ => LevelFilter::Trace, + } +} + +impl PvLogger { + /// Set self as the logger for this application. + /// + /// # Errors + /// + /// An error is returned if a logger has already been set. + pub fn start(&'static self, verbosity: u8) -> Result<(), log::SetLoggerError> { + log::set_logger(self).map(|()| log::set_max_level(to_level(verbosity))) + } +} + +impl Log for PvLogger { + fn enabled(&self, _metadata: &Metadata) -> bool { + true + } + + fn log(&self, record: &Record) { + if self.enabled(record.metadata()) { + if record.level() > Level::Info { + eprintln!("{}: {}", record.level(), record.args()); + } else { + eprintln!("{}", record.args()); + } + } + } + + fn flush(&self) {} +} diff -Nru s390-tools-2.31.0/rust/utils/src/tmpfile.rs s390-tools-2.33.1/rust/utils/src/tmpfile.rs --- s390-tools-2.31.0/rust/utils/src/tmpfile.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust/utils/src/tmpfile.rs 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,150 @@ +use std::{ + ffi::{CString, OsStr}, + os::unix::prelude::OsStrExt, + path::{Path, PathBuf}, +}; + +/// Rust wrapper for `libc::mkdtemp` +fn mkdtemp>(template: P) -> Result { + let template_cstr = CString::new(template.as_ref().as_os_str().as_bytes())?; + let template_raw = template_cstr.into_raw(); + unsafe { + // SAFETY: template_raw is a valid CString because it was generated by + // the `CString::new`. + let ret = libc::mkdtemp(template_raw); + + if ret.is_null() { + Err(std::io::Error::last_os_error()) + } else { + // SAFETY: `template_raw` is still a valid CString because it was + // generated by `CString::new` and modified by `libc::mkdtemp`. + let path_cstr = std::ffi::CString::from_raw(template_raw); + let path = OsStr::from_bytes(path_cstr.as_bytes()); + let path = std::path::PathBuf::from(path); + + Ok(path) + } + } +} + +/// This type creates a temporary directory that is automatically removed when +/// it goes out of scope. It utilizes the `mkdtemp` function and its semantics, +/// with the addition of automatically including the template characters +/// `XXXXXX`. +#[derive(PartialEq, Eq, Debug)] +pub struct TemporaryDirectory { + path: Box, +} + +impl TemporaryDirectory { + /// Creates a temporary directory using `prefix` as directory prefix. + /// + /// # Errors + /// + /// An error is returned if the temporary directory could not be created. + pub fn new>(prefix: P) -> Result { + let mut template = prefix.as_ref().to_owned(); + let template_os_string = template.as_mut_os_string(); + template_os_string.push("XXXXXX"); + + let temp_dir = mkdtemp(template_os_string)?; + Ok(Self { + path: temp_dir.into_boxed_path(), + }) + } + + /// Returns the path of the created temporary directory. + pub fn path(&self) -> &Path { + self.path.as_ref() + } + + fn forget(mut self) { + self.path = PathBuf::new().into_boxed_path(); + std::mem::forget(self); + } + + /// Removes the created temporary directory and it's contents. + pub fn close(self) -> std::io::Result<()> { + let ret = std::fs::remove_dir_all(&self.path); + self.forget(); + ret + } +} + +impl AsRef for TemporaryDirectory { + fn as_ref(&self) -> &Path { + self.path() + } +} + +impl Drop for TemporaryDirectory { + fn drop(&mut self) { + let _ = std::fs::remove_dir_all(&self.path); + } +} + +#[cfg(test)] +mod tests { + use super::{mkdtemp, TemporaryDirectory}; + + #[test] + fn mkdtemp_test() { + let template_inv_not_last_characters = "XXXXXXyay"; + let template_inv_too_less_x = "yayXXXXX"; + let template_inv_path_does_not_exist = "../NA-yay/XXXXXX"; + + let template = "yayXXXXXX"; + + let _err = mkdtemp(template_inv_not_last_characters).expect_err("invalid template"); + let _err = mkdtemp(template_inv_too_less_x).expect_err("invalid template"); + let _err = + mkdtemp(template_inv_path_does_not_exist).expect_err("path does not exist template"); + + let path = mkdtemp(template).expect("mkdtemp should work"); + assert!(path.exists()); + assert!(path.as_os_str().to_str().expect("works").starts_with("yay")); + std::fs::remove_dir(path).unwrap(); + } + + #[test] + fn temporary_directory_empty_name_test() { + let temp_dir = TemporaryDirectory::new("").expect("should work"); + let path = temp_dir.path().to_owned(); + assert!(path.exists()); + + // Test that close removes the directory + temp_dir.close().unwrap(); + assert!(!path.exists()); + } + + #[test] + fn temporary_directory_drop_test() { + let temp_dir = TemporaryDirectory::new("").expect("should work"); + let path = temp_dir.path().to_owned(); + assert!(path.exists()); + + // Test that the destructor removes the directory + drop(temp_dir); + assert!(!path.exists()); + } + + #[test] + fn temporary_directory_close_test() { + let temp_dir = TemporaryDirectory::new("yay").expect("should work"); + + let path = temp_dir.path().to_owned(); + assert!(path.exists()); + assert!(path.as_os_str().to_str().expect("works").starts_with("yay")); + + // Test that close() removes the directory + temp_dir.close().unwrap(); + assert!(!path.exists()); + } + + #[test] + fn temporary_directory_as_ref_test() { + let temp_dir = TemporaryDirectory::new("").expect("should work"); + + assert_eq!(temp_dir.path(), temp_dir.as_ref()); + } +} diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/benchmark.sh s390-tools-2.33.1/rust-vendor/addr2line/benchmark.sh --- s390-tools-2.31.0/rust-vendor/addr2line/benchmark.sh 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/benchmark.sh 1970-01-01 01:00:00.000000000 +0100 @@ -1,112 +0,0 @@ -#!/bin/bash -if [[ $# -le 1 ]]; then - echo "Usage: $0 [] REFS..." - exit 1 -fi -target="$1" -shift - -addresses="" -if [[ -e "$1" ]]; then - addresses="$1" - shift -fi - -# path to "us" -# readlink -f, but more portable: -dirname=$(perl -e 'use Cwd "abs_path";print abs_path(shift)' "$(dirname "$0")") - -# https://stackoverflow.com/a/2358432/472927 -{ - # compile all refs - pushd "$dirname" > /dev/null - # if the user has some local changes, preserve them - nstashed=$(git stash list | wc -l) - echo "==> Stashing any local modifications" - git stash --keep-index > /dev/null - popstash() { - # https://stackoverflow.com/q/24520791/472927 - if [[ "$(git stash list | wc -l)" -ne "$nstashed" ]]; then - echo "==> Restoring stashed state" - git stash pop > /dev/null - fi - } - # if the user has added stuff to the index, abort - if ! git diff-index --quiet HEAD --; then - echo "Refusing to overwrite outstanding git changes" - popstash - exit 2 - fi - current=$(git symbolic-ref --short HEAD) - for ref in "$@"; do - echo "==> Compiling $ref" - git checkout -q "$ref" - commit=$(git rev-parse HEAD) - fn="target/release/addr2line-$commit" - if [[ ! -e "$fn" ]]; then - cargo build --release --example addr2line - cp target/release/examples/addr2line "$fn" - fi - if [[ "$ref" != "$commit" ]]; then - ln -sfn "addr2line-$commit" target/release/addr2line-"$ref" - fi - done - git checkout -q "$current" - popstash - popd > /dev/null - - # get us some addresses to look up - if [[ -z "$addresses" ]]; then - echo "==> Looking for benchmarking addresses (this may take a while)" - addresses=$(mktemp tmp.XXXXXXXXXX) - objdump -C -x --disassemble -l "$target" \ - | grep -P '0[048]:' \ - | awk '{print $1}' \ - | sed 's/:$//' \ - > "$addresses" - echo " -> Addresses stored in $addresses; you should re-use it next time" - fi - - run() { - func="$1" - name="$2" - cmd="$3" - args="$4" - printf "%s\t%s\t" "$name" "$func" - if [[ "$cmd" =~ llvm-symbolizer ]]; then - /usr/bin/time -f '%e\t%M' "$cmd" $args -obj="$target" < "$addresses" 2>&1 >/dev/null - else - /usr/bin/time -f '%e\t%M' "$cmd" $args -e "$target" < "$addresses" 2>&1 >/dev/null - fi - } - - # run without functions - log1=$(mktemp tmp.XXXXXXXXXX) - echo "==> Benchmarking" - run nofunc binutils addr2line >> "$log1" - #run nofunc elfutils eu-addr2line >> "$log1" - run nofunc llvm-sym llvm-symbolizer -functions=none >> "$log1" - for ref in "$@"; do - run nofunc "$ref" "$dirname/target/release/addr2line-$ref" >> "$log1" - done - cat "$log1" | column -t - - # run with functions - log2=$(mktemp tmp.XXXXXXXXXX) - echo "==> Benchmarking with -f" - run func binutils addr2line "-f -i" >> "$log2" - #run func elfutils eu-addr2line "-f -i" >> "$log2" - run func llvm-sym llvm-symbolizer "-functions=linkage -demangle=0" >> "$log2" - for ref in "$@"; do - run func "$ref" "$dirname/target/release/addr2line-$ref" "-f -i" >> "$log2" - done - cat "$log2" | column -t - cat "$log2" >> "$log1"; rm "$log2" - - echo "==> Plotting" - Rscript --no-readline --no-restore --no-save "$dirname/bench.plot.r" < "$log1" - - echo "==> Cleaning up" - rm "$log1" - exit 0 -} diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/bench.plot.r s390-tools-2.33.1/rust-vendor/addr2line/bench.plot.r --- s390-tools-2.31.0/rust-vendor/addr2line/bench.plot.r 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/bench.plot.r 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -v <- read.table(file("stdin")) -t <- data.frame(prog=v[,1], funcs=(v[,2]=="func"), time=v[,3], mem=v[,4], stringsAsFactors=FALSE) - -t$prog <- as.character(t$prog) -t$prog[t$prog == "master"] <- "gimli-rs/addr2line" -t$funcs[t$funcs == TRUE] <- "With functions" -t$funcs[t$funcs == FALSE] <- "File/line only" -t$mem = t$mem / 1024.0 - -library(ggplot2) -p <- ggplot(data=t, aes(x=prog, y=time, fill=prog)) -p <- p + geom_bar(stat = "identity") -p <- p + facet_wrap(~ funcs) -p <- p + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) -p <- p + ylab("time (s)") + ggtitle("addr2line runtime") -ggsave('time.png',plot=p,width=10,height=6) - -p <- ggplot(data=t, aes(x=prog, y=mem, fill=prog)) -p <- p + geom_bar(stat = "identity") -p <- p + facet_wrap(~ funcs) -p <- p + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) -p <- p + ylab("memory (kB)") + ggtitle("addr2line memory usage") -ggsave('memory.png',plot=p,width=10,height=6) diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/addr2line/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/addr2line/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/Cargo.lock s390-tools-2.33.1/rust-vendor/addr2line/Cargo.lock --- s390-tools-2.31.0/rust-vendor/addr2line/Cargo.lock 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/Cargo.lock 1970-01-01 01:00:00.000000000 +0100 @@ -1,704 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addr2line" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" -dependencies = [ - "gimli 0.27.2", -] - -[[package]] -name = "addr2line" -version = "0.21.0" -dependencies = [ - "backtrace", - "clap", - "compiler_builtins", - "cpp_demangle", - "fallible-iterator", - "findshlibs", - "gimli 0.28.0", - "libtest-mimic", - "memmap2", - "object 0.32.0", - "rustc-demangle", - "rustc-std-workspace-alloc", - "rustc-std-workspace-core", - "smallvec", - "typed-arena", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "anstream" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is-terminal", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" - -[[package]] -name = "anstyle-parse" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "anstyle-wincon" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" -dependencies = [ - "anstyle", - "windows-sys", -] - -[[package]] -name = "backtrace" -version = "0.3.67" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" -dependencies = [ - "addr2line 0.19.0", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object 0.30.3", - "rustc-demangle", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "cc" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "clap" -version = "4.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd" -dependencies = [ - "clap_builder", - "clap_derive", - "once_cell", -] - -[[package]] -name = "clap_builder" -version = "4.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim", - "terminal_size", -] - -[[package]] -name = "clap_derive" -version = "4.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.15", -] - -[[package]] -name = "clap_lex" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" - -[[package]] -name = "colorchoice" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" - -[[package]] -name = "compiler_builtins" -version = "0.1.91" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571298a3cce7e2afbd3d61abb91a18667d5ab25993ec577a88ee8ac45f00cc3a" - -[[package]] -name = "cpp_demangle" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c76f98bdfc7f66172e6c7065f981ebb576ffc903fe4c0561d9f0c2509226dc6" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crc32fast" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "errno" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" -dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "fallible-iterator" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" - -[[package]] -name = "findshlibs" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" -dependencies = [ - "cc", - "lazy_static", - "libc", - "winapi", -] - -[[package]] -name = "flate2" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - -[[package]] -name = "gimli" -version = "0.27.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" - -[[package]] -name = "gimli" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" -dependencies = [ - "compiler_builtins", - "fallible-iterator", - "rustc-std-workspace-alloc", - "rustc-std-workspace-core", - "stable_deref_trait", -] - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" - -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.2", - "libc", - "windows-sys", -] - -[[package]] -name = "is-terminal" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" -dependencies = [ - "hermit-abi 0.3.2", - "rustix 0.38.8", - "windows-sys", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.147" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" - -[[package]] -name = "libtest-mimic" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d8de370f98a6cb8a4606618e53e802f93b094ddec0f96988eaec2c27e6e9ce7" -dependencies = [ - "clap", - "termcolor", - "threadpool", -] - -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "linux-raw-sys" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "memmap2" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" -dependencies = [ - "libc", -] - -[[package]] -name = "miniz_oxide" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" -dependencies = [ - "adler", -] - -[[package]] -name = "num_cpus" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" -dependencies = [ - "hermit-abi 0.2.6", - "libc", -] - -[[package]] -name = "object" -version = "0.30.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" -dependencies = [ - "memchr", -] - -[[package]] -name = "object" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe" -dependencies = [ - "flate2", - "memchr", - "ruzstd", -] - -[[package]] -name = "once_cell" -version = "1.17.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" - -[[package]] -name = "proc-macro2" -version = "1.0.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" - -[[package]] -name = "rustc-std-workspace-alloc" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff66d57013a5686e1917ed6a025d54dd591fcda71a41fe07edf4d16726aefa86" - -[[package]] -name = "rustc-std-workspace-core" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1956f5517128a2b6f23ab2dadf1a976f4f5b27962e7724c2bf3d45e539ec098c" - -[[package]] -name = "rustix" -version = "0.37.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys", -] - -[[package]] -name = "rustix" -version = "0.38.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" -dependencies = [ - "bitflags 2.4.0", - "errno", - "libc", - "linux-raw-sys 0.4.5", - "windows-sys", -] - -[[package]] -name = "ruzstd" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3ffab8f9715a0d455df4bbb9d21e91135aab3cd3ca187af0cd0c3c3f868fdc" -dependencies = [ - "byteorder", - "thiserror-core", - "twox-hash", -] - -[[package]] -name = "smallvec" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" - -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "termcolor" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "terminal_size" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237" -dependencies = [ - "rustix 0.37.23", - "windows-sys", -] - -[[package]] -name = "thiserror-core" -version = "1.0.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d97345f6437bb2004cd58819d8a9ef8e36cdd7661c2abc4bbde0a7c40d9f497" -dependencies = [ - "thiserror-core-impl", -] - -[[package]] -name = "thiserror-core-impl" -version = "1.0.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - -[[package]] -name = "twox-hash" -version = "1.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" -dependencies = [ - "cfg-if", - "static_assertions", -] - -[[package]] -name = "typed-arena" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a" - -[[package]] -name = "unicode-ident" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" - -[[package]] -name = "utf8parse" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.48.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/Cargo.toml s390-tools-2.33.1/rust-vendor/addr2line/Cargo.toml --- s390-tools-2.31.0/rust-vendor/addr2line/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,147 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.65" -name = "addr2line" -version = "0.21.0" -exclude = [ - "/benches/*", - "/fixtures/*", - ".github", -] -description = "A cross-platform symbolication library written in Rust, using `gimli`" -documentation = "https://docs.rs/addr2line" -readme = "./README.md" -keywords = [ - "DWARF", - "debug", - "elf", - "symbolicate", - "atos", -] -categories = ["development-tools::debugging"] -license = "Apache-2.0 OR MIT" -repository = "https://github.com/gimli-rs/addr2line" - -[profile.bench] -codegen-units = 1 -debug = true - -[profile.release] -debug = true - -[[example]] -name = "addr2line" -required-features = ["default"] - -[[test]] -name = "output_equivalence" -harness = false -required-features = ["default"] - -[[test]] -name = "correctness" -required-features = ["default"] - -[[test]] -name = "parse" -required-features = ["std-object"] - -[dependencies.alloc] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-alloc" - -[dependencies.compiler_builtins] -version = "0.1.2" -optional = true - -[dependencies.core] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-core" - -[dependencies.cpp_demangle] -version = "0.4" -features = ["alloc"] -optional = true -default-features = false - -[dependencies.fallible-iterator] -version = "0.3.0" -optional = true -default-features = false - -[dependencies.gimli] -version = "0.28.0" -features = ["read"] -default-features = false - -[dependencies.memmap2] -version = "0.5.5" -optional = true - -[dependencies.object] -version = "0.32.0" -features = ["read"] -optional = true -default-features = false - -[dependencies.rustc-demangle] -version = "0.1" -optional = true - -[dependencies.smallvec] -version = "1" -optional = true -default-features = false - -[dev-dependencies.backtrace] -version = "0.3.13" - -[dev-dependencies.clap] -version = "4.3.21" -features = ["wrap_help"] - -[dev-dependencies.findshlibs] -version = "0.10" - -[dev-dependencies.libtest-mimic] -version = "0.6.1" - -[dev-dependencies.typed-arena] -version = "2" - -[features] -default = [ - "rustc-demangle", - "cpp_demangle", - "std-object", - "fallible-iterator", - "smallvec", - "memmap2", -] -rustc-dep-of-std = [ - "core", - "alloc", - "compiler_builtins", - "gimli/rustc-dep-of-std", -] -std = ["gimli/std"] -std-object = [ - "std", - "object", - "object/std", - "object/compression", - "gimli/endian-reader", -] diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/CHANGELOG.md s390-tools-2.33.1/rust-vendor/addr2line/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/addr2line/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,336 +0,0 @@ -# `addr2line` Change Log - --------------------------------------------------------------------------------- - -## 0.21.0 (2023/08/12) - -### Breaking changes - -* Updated `gimli`, `object`, and `fallible-iterator` dependencies. - -### Changed - -* The minimum supported rust version is 1.65.0. - -* Store boxed slices instead of `Vec` objects in `Context`. - [#278](https://github.com/gimli-rs/addr2line/pull/278) - --------------------------------------------------------------------------------- - -## 0.20.0 (2023/04/15) - -### Breaking changes - -* The minimum supported rust version is 1.58.0. - -* Changed `Context::find_frames` to return `LookupResult`. - Use `LookupResult::skip_all_loads` to obtain the result without loading split DWARF. - [#260](https://github.com/gimli-rs/addr2line/pull/260) - -* Replaced `Context::find_dwarf_unit` with `Context::find_dwarf_and_unit`. - [#260](https://github.com/gimli-rs/addr2line/pull/260) - -* Updated `object` dependency. - -### Changed - -* Fix handling of file index 0 for DWARF 5. - [#264](https://github.com/gimli-rs/addr2line/pull/264) - -### Added - -* Added types and methods to support loading split DWARF: - `LookupResult`, `SplitDwarfLoad`, `SplitDwarfLoader`, `Context::preload_units`. - [#260](https://github.com/gimli-rs/addr2line/pull/260) - [#262](https://github.com/gimli-rs/addr2line/pull/262) - [#263](https://github.com/gimli-rs/addr2line/pull/263) - --------------------------------------------------------------------------------- - -## 0.19.0 (2022/11/24) - -### Breaking changes - -* Updated `gimli` and `object` dependencies. - --------------------------------------------------------------------------------- - -## 0.18.0 (2022/07/16) - -### Breaking changes - -* Updated `object` dependency. - -### Changed - -* Fixed handling of relative path for `DW_AT_comp_dir`. - [#239](https://github.com/gimli-rs/addr2line/pull/239) - -* Fixed handling of `DW_FORM_addrx` for DWARF 5 support. - [#243](https://github.com/gimli-rs/addr2line/pull/243) - -* Fixed handling of units that are missing range information. - [#249](https://github.com/gimli-rs/addr2line/pull/249) - --------------------------------------------------------------------------------- - -## 0.17.0 (2021/10/24) - -### Breaking changes - -* Updated `gimli` and `object` dependencies. - -### Changed - -* Use `skip_attributes` to improve performance. - [#236](https://github.com/gimli-rs/addr2line/pull/236) - --------------------------------------------------------------------------------- - -## 0.16.0 (2021/07/26) - -### Breaking changes - -* Updated `gimli` and `object` dependencies. - --------------------------------------------------------------------------------- - -## 0.15.2 (2021/06/04) - -### Fixed - -* Allow `Context` to be `Send`. - [#219](https://github.com/gimli-rs/addr2line/pull/219) - --------------------------------------------------------------------------------- - -## 0.15.1 (2021/05/02) - -### Fixed - -* Don't ignore aranges with address 0. - [#217](https://github.com/gimli-rs/addr2line/pull/217) - --------------------------------------------------------------------------------- - -## 0.15.0 (2021/05/02) - -### Breaking changes - -* Updated `gimli` and `object` dependencies. - [#215](https://github.com/gimli-rs/addr2line/pull/215) - -* Added `debug_aranges` parameter to `Context::from_sections`. - [#200](https://github.com/gimli-rs/addr2line/pull/200) - -### Added - -* Added `.debug_aranges` support. - [#200](https://github.com/gimli-rs/addr2line/pull/200) - -* Added supplementary object file support. - [#208](https://github.com/gimli-rs/addr2line/pull/208) - -### Fixed - -* Fixed handling of Windows paths in locations. - [#209](https://github.com/gimli-rs/addr2line/pull/209) - -* examples/addr2line: Flush stdout after each response. - [#210](https://github.com/gimli-rs/addr2line/pull/210) - -* examples/addr2line: Avoid copying every section. - [#213](https://github.com/gimli-rs/addr2line/pull/213) - --------------------------------------------------------------------------------- - -## 0.14.1 (2020/12/31) - -### Fixed - -* Fix location lookup for skeleton units. - [#201](https://github.com/gimli-rs/addr2line/pull/201) - -### Added - -* Added `Context::find_location_range`. - [#196](https://github.com/gimli-rs/addr2line/pull/196) - [#199](https://github.com/gimli-rs/addr2line/pull/199) - --------------------------------------------------------------------------------- - -## 0.14.0 (2020/10/27) - -### Breaking changes - -* Updated `gimli` and `object` dependencies. - -### Fixed - -* Handle units that only have line information. - [#188](https://github.com/gimli-rs/addr2line/pull/188) - -* Handle DWARF units with version <= 4 and no `DW_AT_name`. - [#191](https://github.com/gimli-rs/addr2line/pull/191) - -* Fix handling of `DW_FORM_ref_addr`. - [#193](https://github.com/gimli-rs/addr2line/pull/193) - --------------------------------------------------------------------------------- - -## 0.13.0 (2020/07/07) - -### Breaking changes - -* Updated `gimli` and `object` dependencies. - -* Added `rustc-dep-of-std` feature. - [#166](https://github.com/gimli-rs/addr2line/pull/166) - -### Changed - -* Improve performance by parsing function contents lazily. - [#178](https://github.com/gimli-rs/addr2line/pull/178) - -* Don't skip `.debug_info` and `.debug_line` entries with a zero address. - [#182](https://github.com/gimli-rs/addr2line/pull/182) - --------------------------------------------------------------------------------- - -## 0.12.2 (2020/06/21) - -### Fixed - -* Avoid linear search for `DW_FORM_ref_addr`. - [#175](https://github.com/gimli-rs/addr2line/pull/175) - --------------------------------------------------------------------------------- - -## 0.12.1 (2020/05/19) - -### Fixed - -* Handle units with overlapping address ranges. - [#163](https://github.com/gimli-rs/addr2line/pull/163) - -* Don't assert for functions with overlapping address ranges. - [#168](https://github.com/gimli-rs/addr2line/pull/168) - --------------------------------------------------------------------------------- - -## 0.12.0 (2020/05/12) - -### Breaking changes - -* Updated `gimli` and `object` dependencies. - -* Added more optional features: `smallvec` and `fallible-iterator`. - [#160](https://github.com/gimli-rs/addr2line/pull/160) - -### Added - -* Added `Context::dwarf` and `Context::find_dwarf_unit`. - [#159](https://github.com/gimli-rs/addr2line/pull/159) - -### Changed - -* Removed `lazycell` dependency. - [#160](https://github.com/gimli-rs/addr2line/pull/160) - --------------------------------------------------------------------------------- - -## 0.11.0 (2020/01/11) - -### Breaking changes - -* Updated `gimli` and `object` dependencies. - -* [#130](https://github.com/gimli-rs/addr2line/pull/130) - Changed `Location::file` from `Option` to `Option<&str>`. - This required adding lifetime parameters to `Location` and other structs that - contain it. - -* [#152](https://github.com/gimli-rs/addr2line/pull/152) - Changed `Location::line` and `Location::column` from `Option`to `Option`. - -* [#156](https://github.com/gimli-rs/addr2line/pull/156) - Deleted `alloc` feature, and fixed `no-std` builds with stable rust. - Removed default `Reader` parameter for `Context`, and added `ObjectContext` instead. - -### Added - -* [#134](https://github.com/gimli-rs/addr2line/pull/134) - Added `Context::from_dwarf`. - -### Changed - -* [#133](https://github.com/gimli-rs/addr2line/pull/133) - Fixed handling of units that can't be parsed. - -* [#155](https://github.com/gimli-rs/addr2line/pull/155) - Fixed `addr2line` output to match binutils. - -* [#130](https://github.com/gimli-rs/addr2line/pull/130) - Improved `.debug_line` parsing performance. - -* [#148](https://github.com/gimli-rs/addr2line/pull/148) - [#150](https://github.com/gimli-rs/addr2line/pull/150) - [#151](https://github.com/gimli-rs/addr2line/pull/151) - [#152](https://github.com/gimli-rs/addr2line/pull/152) - Improved `.debug_info` parsing performance. - -* [#137](https://github.com/gimli-rs/addr2line/pull/137) - [#138](https://github.com/gimli-rs/addr2line/pull/138) - [#139](https://github.com/gimli-rs/addr2line/pull/139) - [#140](https://github.com/gimli-rs/addr2line/pull/140) - [#146](https://github.com/gimli-rs/addr2line/pull/146) - Improved benchmarks. - --------------------------------------------------------------------------------- - -## 0.10.0 (2019/07/07) - -### Breaking changes - -* [#127](https://github.com/gimli-rs/addr2line/pull/127) - Update `gimli`. - --------------------------------------------------------------------------------- - -## 0.9.0 (2019/05/02) - -### Breaking changes - -* [#121](https://github.com/gimli-rs/addr2line/pull/121) - Update `gimli`, `object`, and `fallible-iterator` dependencies. - -### Added - -* [#121](https://github.com/gimli-rs/addr2line/pull/121) - Reexport `gimli`, `object`, and `fallible-iterator`. - --------------------------------------------------------------------------------- - -## 0.8.0 (2019/02/06) - -### Breaking changes - -* [#107](https://github.com/gimli-rs/addr2line/pull/107) - Update `object` dependency to 0.11. This is part of the public API. - -### Added - -* [#101](https://github.com/gimli-rs/addr2line/pull/101) - Add `object` feature (enabled by default). Disable this feature to remove - the `object` dependency and `Context::new` API. - -* [#102](https://github.com/gimli-rs/addr2line/pull/102) - Add `std` (enabled by default) and `alloc` features. - -### Changed - -* [#108](https://github.com/gimli-rs/addr2line/issues/108) - `demangle` no longer outputs the hash for rust symbols. - -* [#109](https://github.com/gimli-rs/addr2line/issues/109) - Set default `R` for `Context`. diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/coverage.sh s390-tools-2.33.1/rust-vendor/addr2line/coverage.sh --- s390-tools-2.31.0/rust-vendor/addr2line/coverage.sh 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/coverage.sh 1970-01-01 01:00:00.000000000 +0100 @@ -1,5 +0,0 @@ -#!/bin/sh -# Run tarpaulin and pycobertura to generate coverage.html. - -cargo tarpaulin --skip-clean --out Xml -pycobertura show --format html --output coverage.html cobertura.xml diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/examples/addr2line.rs s390-tools-2.33.1/rust-vendor/addr2line/examples/addr2line.rs --- s390-tools-2.31.0/rust-vendor/addr2line/examples/addr2line.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/examples/addr2line.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,317 +0,0 @@ -use std::borrow::Cow; -use std::fs::File; -use std::io::{BufRead, Lines, StdinLock, Write}; -use std::path::{Path, PathBuf}; - -use clap::{Arg, ArgAction, Command}; -use fallible_iterator::FallibleIterator; -use object::{Object, ObjectSection, SymbolMap, SymbolMapName}; -use typed_arena::Arena; - -use addr2line::{Context, Location}; - -fn parse_uint_from_hex_string(string: &str) -> Option { - if string.len() > 2 && string.starts_with("0x") { - u64::from_str_radix(&string[2..], 16).ok() - } else { - u64::from_str_radix(string, 16).ok() - } -} - -enum Addrs<'a> { - Args(clap::parser::ValuesRef<'a, String>), - Stdin(Lines>), -} - -impl<'a> Iterator for Addrs<'a> { - type Item = Option; - - fn next(&mut self) -> Option> { - let text = match *self { - Addrs::Args(ref mut vals) => vals.next().map(Cow::from), - Addrs::Stdin(ref mut lines) => lines.next().map(Result::unwrap).map(Cow::from), - }; - text.as_ref() - .map(Cow::as_ref) - .map(parse_uint_from_hex_string) - } -} - -fn print_loc(loc: Option<&Location<'_>>, basenames: bool, llvm: bool) { - if let Some(loc) = loc { - if let Some(ref file) = loc.file.as_ref() { - let path = if basenames { - Path::new(Path::new(file).file_name().unwrap()) - } else { - Path::new(file) - }; - print!("{}:", path.display()); - } else { - print!("??:"); - } - if llvm { - print!("{}:{}", loc.line.unwrap_or(0), loc.column.unwrap_or(0)); - } else if let Some(line) = loc.line { - print!("{}", line); - } else { - print!("?"); - } - println!(); - } else if llvm { - println!("??:0:0"); - } else { - println!("??:0"); - } -} - -fn print_function(name: Option<&str>, language: Option, demangle: bool) { - if let Some(name) = name { - if demangle { - print!("{}", addr2line::demangle_auto(Cow::from(name), language)); - } else { - print!("{}", name); - } - } else { - print!("??"); - } -} - -fn load_file_section<'input, 'arena, Endian: gimli::Endianity>( - id: gimli::SectionId, - file: &object::File<'input>, - endian: Endian, - arena_data: &'arena Arena>, -) -> Result, ()> { - // TODO: Unify with dwarfdump.rs in gimli. - let name = id.name(); - match file.section_by_name(name) { - Some(section) => match section.uncompressed_data().unwrap() { - Cow::Borrowed(b) => Ok(gimli::EndianSlice::new(b, endian)), - Cow::Owned(b) => Ok(gimli::EndianSlice::new(arena_data.alloc(b.into()), endian)), - }, - None => Ok(gimli::EndianSlice::new(&[][..], endian)), - } -} - -fn find_name_from_symbols<'a>( - symbols: &'a SymbolMap>, - probe: u64, -) -> Option<&'a str> { - symbols.get(probe).map(|x| x.name()) -} - -struct Options<'a> { - do_functions: bool, - do_inlines: bool, - pretty: bool, - print_addrs: bool, - basenames: bool, - demangle: bool, - llvm: bool, - exe: &'a PathBuf, - sup: Option<&'a PathBuf>, -} - -fn main() { - let matches = Command::new("addr2line") - .version(env!("CARGO_PKG_VERSION")) - .about("A fast addr2line Rust port") - .max_term_width(100) - .args(&[ - Arg::new("exe") - .short('e') - .long("exe") - .value_name("filename") - .value_parser(clap::value_parser!(PathBuf)) - .help( - "Specify the name of the executable for which addresses should be translated.", - ) - .required(true), - Arg::new("sup") - .long("sup") - .value_name("filename") - .value_parser(clap::value_parser!(PathBuf)) - .help("Path to supplementary object file."), - Arg::new("functions") - .short('f') - .long("functions") - .action(ArgAction::SetTrue) - .help("Display function names as well as file and line number information."), - Arg::new("pretty").short('p').long("pretty-print") - .action(ArgAction::SetTrue) - .help( - "Make the output more human friendly: each location are printed on one line.", - ), - Arg::new("inlines").short('i').long("inlines") - .action(ArgAction::SetTrue) - .help( - "If the address belongs to a function that was inlined, the source information for \ - all enclosing scopes back to the first non-inlined function will also be printed.", - ), - Arg::new("addresses").short('a').long("addresses") - .action(ArgAction::SetTrue) - .help( - "Display the address before the function name, file and line number information.", - ), - Arg::new("basenames") - .short('s') - .long("basenames") - .action(ArgAction::SetTrue) - .help("Display only the base of each file name."), - Arg::new("demangle").short('C').long("demangle") - .action(ArgAction::SetTrue) - .help( - "Demangle function names. \ - Specifying a specific demangling style (like GNU addr2line) is not supported. \ - (TODO)" - ), - Arg::new("llvm") - .long("llvm") - .action(ArgAction::SetTrue) - .help("Display output in the same format as llvm-symbolizer."), - Arg::new("addrs") - .action(ArgAction::Append) - .help("Addresses to use instead of reading from stdin."), - ]) - .get_matches(); - - let arena_data = Arena::new(); - - let opts = Options { - do_functions: matches.get_flag("functions"), - do_inlines: matches.get_flag("inlines"), - pretty: matches.get_flag("pretty"), - print_addrs: matches.get_flag("addresses"), - basenames: matches.get_flag("basenames"), - demangle: matches.get_flag("demangle"), - llvm: matches.get_flag("llvm"), - exe: matches.get_one::("exe").unwrap(), - sup: matches.get_one::("sup"), - }; - - let file = File::open(opts.exe).unwrap(); - let map = unsafe { memmap2::Mmap::map(&file).unwrap() }; - let object = &object::File::parse(&*map).unwrap(); - - let endian = if object.is_little_endian() { - gimli::RunTimeEndian::Little - } else { - gimli::RunTimeEndian::Big - }; - - let mut load_section = |id: gimli::SectionId| -> Result<_, _> { - load_file_section(id, object, endian, &arena_data) - }; - - let sup_map; - let sup_object = if let Some(sup_path) = opts.sup { - let sup_file = File::open(sup_path).unwrap(); - sup_map = unsafe { memmap2::Mmap::map(&sup_file).unwrap() }; - Some(object::File::parse(&*sup_map).unwrap()) - } else { - None - }; - - let symbols = object.symbol_map(); - let mut dwarf = gimli::Dwarf::load(&mut load_section).unwrap(); - if let Some(ref sup_object) = sup_object { - let mut load_sup_section = |id: gimli::SectionId| -> Result<_, _> { - load_file_section(id, sup_object, endian, &arena_data) - }; - dwarf.load_sup(&mut load_sup_section).unwrap(); - } - - let mut split_dwarf_loader = addr2line::builtin_split_dwarf_loader::SplitDwarfLoader::new( - |data, endian| { - gimli::EndianSlice::new(arena_data.alloc(Cow::Owned(data.into_owned())), endian) - }, - Some(opts.exe.clone()), - ); - let ctx = Context::from_dwarf(dwarf).unwrap(); - - let stdin = std::io::stdin(); - let addrs = matches - .get_many::("addrs") - .map(Addrs::Args) - .unwrap_or_else(|| Addrs::Stdin(stdin.lock().lines())); - - for probe in addrs { - if opts.print_addrs { - let addr = probe.unwrap_or(0); - if opts.llvm { - print!("0x{:x}", addr); - } else { - print!("0x{:016x}", addr); - } - if opts.pretty { - print!(": "); - } else { - println!(); - } - } - - if opts.do_functions || opts.do_inlines { - let mut printed_anything = false; - if let Some(probe) = probe { - let frames = ctx.find_frames(probe); - let frames = split_dwarf_loader.run(frames).unwrap(); - let mut frames = frames.enumerate(); - while let Some((i, frame)) = frames.next().unwrap() { - if opts.pretty && i != 0 { - print!(" (inlined by) "); - } - - if opts.do_functions { - if let Some(func) = frame.function { - print_function( - func.raw_name().ok().as_ref().map(AsRef::as_ref), - func.language, - opts.demangle, - ); - } else { - let name = find_name_from_symbols(&symbols, probe); - print_function(name, None, opts.demangle); - } - - if opts.pretty { - print!(" at "); - } else { - println!(); - } - } - - print_loc(frame.location.as_ref(), opts.basenames, opts.llvm); - - printed_anything = true; - - if !opts.do_inlines { - break; - } - } - } - - if !printed_anything { - if opts.do_functions { - let name = probe.and_then(|probe| find_name_from_symbols(&symbols, probe)); - print_function(name, None, opts.demangle); - - if opts.pretty { - print!(" at "); - } else { - println!(); - } - } - - print_loc(None, opts.basenames, opts.llvm); - } - } else { - let loc = probe.and_then(|probe| ctx.find_location(probe).unwrap()); - print_loc(loc.as_ref(), opts.basenames, opts.llvm); - } - - if opts.llvm { - println!(); - } - std::io::stdout().flush().unwrap(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/addr2line/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/addr2line/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/LICENSE-MIT s390-tools-2.33.1/rust-vendor/addr2line/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/addr2line/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2016-2018 The gimli Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/README.md s390-tools-2.33.1/rust-vendor/addr2line/README.md --- s390-tools-2.31.0/rust-vendor/addr2line/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,48 +0,0 @@ -# addr2line - -[![](https://img.shields.io/crates/v/addr2line.svg)](https://crates.io/crates/addr2line) -[![](https://img.shields.io/docsrs/addr2line.svg)](https://docs.rs/addr2line) -[![Coverage Status](https://coveralls.io/repos/github/gimli-rs/addr2line/badge.svg?branch=master)](https://coveralls.io/github/gimli-rs/addr2line?branch=master) - -A cross-platform library for retrieving per-address debug information -from files with DWARF debug information. - -`addr2line` uses [`gimli`](https://github.com/gimli-rs/gimli) to parse -the debug information, and exposes an interface for finding -the source file, line number, and wrapping function for instruction -addresses within the target program. These lookups can either be -performed programmatically through `Context::find_location` and -`Context::find_frames`, or via the included example binary, -`addr2line` (named and modelled after the equivalent utility from -[GNU binutils](https://sourceware.org/binutils/docs/binutils/addr2line.html)). - -# Quickstart - - Add the [`addr2line` crate](https://crates.io/crates/addr2line) to your `Cargo.toml` - - Load the file and parse it with [`addr2line::object::read::File::parse`](https://docs.rs/object/*/object/read/struct.File.html#method.parse) - - Pass the parsed file to [`addr2line::Context::new` ](https://docs.rs/addr2line/*/addr2line/struct.Context.html#method.new) - - Use [`addr2line::Context::find_location`](https://docs.rs/addr2line/*/addr2line/struct.Context.html#method.find_location) - or [`addr2line::Context::find_frames`](https://docs.rs/addr2line/*/addr2line/struct.Context.html#method.find_frames) - to look up debug information for an address - -# Performance - -`addr2line` optimizes for speed over memory by caching parsed information. -The DWARF information is parsed lazily where possible. - -The library aims to perform similarly to equivalent existing tools such -as `addr2line` from binutils, `eu-addr2line` from elfutils, and -`llvm-symbolize` from the llvm project, and in the past some benchmarking -was done that indicates a comparable performance. - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([`LICENSE-APACHE`](./LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([`LICENSE-MIT`](./LICENSE-MIT) or https://opensource.org/licenses/MIT) - -at your option. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/rustfmt.toml s390-tools-2.33.1/rust-vendor/addr2line/rustfmt.toml --- s390-tools-2.31.0/rust-vendor/addr2line/rustfmt.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/rustfmt.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ - diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/src/builtin_split_dwarf_loader.rs s390-tools-2.33.1/rust-vendor/addr2line/src/builtin_split_dwarf_loader.rs --- s390-tools-2.31.0/rust-vendor/addr2line/src/builtin_split_dwarf_loader.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/src/builtin_split_dwarf_loader.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,164 +0,0 @@ -use alloc::borrow::Cow; -use alloc::sync::Arc; -use std::fs::File; -use std::path::PathBuf; - -use object::Object; - -use crate::{LookupContinuation, LookupResult}; - -#[cfg(unix)] -fn convert_path>( - r: &R, -) -> Result { - use std::ffi::OsStr; - use std::os::unix::ffi::OsStrExt; - let bytes = r.to_slice()?; - let s = OsStr::from_bytes(&bytes); - Ok(PathBuf::from(s)) -} - -#[cfg(not(unix))] -fn convert_path>( - r: &R, -) -> Result { - let bytes = r.to_slice()?; - let s = std::str::from_utf8(&bytes).map_err(|_| gimli::Error::BadUtf8)?; - Ok(PathBuf::from(s)) -} - -fn load_section<'data: 'file, 'file, O, R, F>( - id: gimli::SectionId, - file: &'file O, - endian: R::Endian, - loader: &mut F, -) -> Result -where - O: object::Object<'data, 'file>, - R: gimli::Reader, - F: FnMut(Cow<'data, [u8]>, R::Endian) -> R, -{ - use object::ObjectSection; - - let data = id - .dwo_name() - .and_then(|dwo_name| { - file.section_by_name(dwo_name) - .and_then(|section| section.uncompressed_data().ok()) - }) - .unwrap_or(Cow::Borrowed(&[])); - Ok(loader(data, endian)) -} - -/// A simple builtin split DWARF loader. -pub struct SplitDwarfLoader -where - R: gimli::Reader, - F: FnMut(Cow<'_, [u8]>, R::Endian) -> R, -{ - loader: F, - dwarf_package: Option>, -} - -impl SplitDwarfLoader -where - R: gimli::Reader, - F: FnMut(Cow<'_, [u8]>, R::Endian) -> R, -{ - fn load_dwarf_package(loader: &mut F, path: Option) -> Option> { - let mut path = path.map(Ok).unwrap_or_else(std::env::current_exe).ok()?; - let dwp_extension = path - .extension() - .map(|previous_extension| { - let mut previous_extension = previous_extension.to_os_string(); - previous_extension.push(".dwp"); - previous_extension - }) - .unwrap_or_else(|| "dwp".into()); - path.set_extension(dwp_extension); - let file = File::open(&path).ok()?; - let map = unsafe { memmap2::Mmap::map(&file).ok()? }; - let dwp = object::File::parse(&*map).ok()?; - - let endian = if dwp.is_little_endian() { - gimli::RunTimeEndian::Little - } else { - gimli::RunTimeEndian::Big - }; - - let empty = loader(Cow::Borrowed(&[]), endian); - gimli::DwarfPackage::load( - |section_id| load_section(section_id, &dwp, endian, loader), - empty, - ) - .ok() - } - - /// Create a new split DWARF loader. - pub fn new(mut loader: F, path: Option) -> SplitDwarfLoader { - let dwarf_package = SplitDwarfLoader::load_dwarf_package(&mut loader, path); - SplitDwarfLoader { - loader, - dwarf_package, - } - } - - /// Run the provided `LookupResult` to completion, loading any necessary - /// split DWARF along the way. - pub fn run(&mut self, mut l: LookupResult) -> L::Output - where - L: LookupContinuation, - { - loop { - let (load, continuation) = match l { - LookupResult::Output(output) => break output, - LookupResult::Load { load, continuation } => (load, continuation), - }; - - let mut r: Option>> = None; - if let Some(dwp) = self.dwarf_package.as_ref() { - if let Ok(Some(cu)) = dwp.find_cu(load.dwo_id, &load.parent) { - r = Some(Arc::new(cu)); - } - } - - if r.is_none() { - let mut path = PathBuf::new(); - if let Some(p) = load.comp_dir.as_ref() { - if let Ok(p) = convert_path(p) { - path.push(p); - } - } - - if let Some(p) = load.path.as_ref() { - if let Ok(p) = convert_path(p) { - path.push(p); - } - } - - if let Ok(file) = File::open(&path) { - if let Ok(map) = unsafe { memmap2::Mmap::map(&file) } { - if let Ok(file) = object::File::parse(&*map) { - let endian = if file.is_little_endian() { - gimli::RunTimeEndian::Little - } else { - gimli::RunTimeEndian::Big - }; - - r = gimli::Dwarf::load(|id| { - load_section(id, &file, endian, &mut self.loader) - }) - .ok() - .map(|mut dwo_dwarf| { - dwo_dwarf.make_dwo(&load.parent); - Arc::new(dwo_dwarf) - }); - } - } - } - } - - l = continuation.resume(r); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/src/function.rs s390-tools-2.33.1/rust-vendor/addr2line/src/function.rs --- s390-tools-2.31.0/rust-vendor/addr2line/src/function.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/src/function.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,555 +0,0 @@ -use alloc::boxed::Box; -use alloc::vec::Vec; -use core::cmp::Ordering; -use core::iter; - -use crate::lazy::LazyCell; -use crate::maybe_small; -use crate::{Context, DebugFile, Error, RangeAttributes}; - -pub(crate) struct Functions { - /// List of all `DW_TAG_subprogram` details in the unit. - pub(crate) functions: Box< - [( - gimli::UnitOffset, - LazyCell, Error>>, - )], - >, - /// List of `DW_TAG_subprogram` address ranges in the unit. - pub(crate) addresses: Box<[FunctionAddress]>, -} - -/// A single address range for a function. -/// -/// It is possible for a function to have multiple address ranges; this -/// is handled by having multiple `FunctionAddress` entries with the same -/// `function` field. -pub(crate) struct FunctionAddress { - range: gimli::Range, - /// An index into `Functions::functions`. - pub(crate) function: usize, -} - -pub(crate) struct Function { - pub(crate) dw_die_offset: gimli::UnitOffset, - pub(crate) name: Option, - /// List of all `DW_TAG_inlined_subroutine` details in this function. - inlined_functions: Box<[InlinedFunction]>, - /// List of `DW_TAG_inlined_subroutine` address ranges in this function. - inlined_addresses: Box<[InlinedFunctionAddress]>, -} - -pub(crate) struct InlinedFunctionAddress { - range: gimli::Range, - call_depth: usize, - /// An index into `Function::inlined_functions`. - function: usize, -} - -pub(crate) struct InlinedFunction { - pub(crate) dw_die_offset: gimli::UnitOffset, - pub(crate) name: Option, - pub(crate) call_file: Option, - pub(crate) call_line: u32, - pub(crate) call_column: u32, -} - -impl Functions { - pub(crate) fn parse( - unit: &gimli::Unit, - sections: &gimli::Dwarf, - ) -> Result, Error> { - let mut functions = Vec::new(); - let mut addresses = Vec::new(); - let mut entries = unit.entries_raw(None)?; - while !entries.is_empty() { - let dw_die_offset = entries.next_offset(); - if let Some(abbrev) = entries.read_abbreviation()? { - if abbrev.tag() == gimli::DW_TAG_subprogram { - let mut ranges = RangeAttributes::default(); - for spec in abbrev.attributes() { - match entries.read_attribute(*spec) { - Ok(ref attr) => { - match attr.name() { - gimli::DW_AT_low_pc => match attr.value() { - gimli::AttributeValue::Addr(val) => { - ranges.low_pc = Some(val) - } - gimli::AttributeValue::DebugAddrIndex(index) => { - ranges.low_pc = Some(sections.address(unit, index)?); - } - _ => {} - }, - gimli::DW_AT_high_pc => match attr.value() { - gimli::AttributeValue::Addr(val) => { - ranges.high_pc = Some(val) - } - gimli::AttributeValue::DebugAddrIndex(index) => { - ranges.high_pc = Some(sections.address(unit, index)?); - } - gimli::AttributeValue::Udata(val) => { - ranges.size = Some(val) - } - _ => {} - }, - gimli::DW_AT_ranges => { - ranges.ranges_offset = - sections.attr_ranges_offset(unit, attr.value())?; - } - _ => {} - }; - } - Err(e) => return Err(e), - } - } - - let function_index = functions.len(); - if ranges.for_each_range(sections, unit, |range| { - addresses.push(FunctionAddress { - range, - function: function_index, - }); - })? { - functions.push((dw_die_offset, LazyCell::new())); - } - } else { - entries.skip_attributes(abbrev.attributes())?; - } - } - } - - // The binary search requires the addresses to be sorted. - // - // It also requires them to be non-overlapping. In practice, overlapping - // function ranges are unlikely, so we don't try to handle that yet. - // - // It's possible for multiple functions to have the same address range if the - // compiler can detect and remove functions with identical code. In that case - // we'll nondeterministically return one of them. - addresses.sort_by_key(|x| x.range.begin); - - Ok(Functions { - functions: functions.into_boxed_slice(), - addresses: addresses.into_boxed_slice(), - }) - } - - pub(crate) fn find_address(&self, probe: u64) -> Option { - self.addresses - .binary_search_by(|address| { - if probe < address.range.begin { - Ordering::Greater - } else if probe >= address.range.end { - Ordering::Less - } else { - Ordering::Equal - } - }) - .ok() - } - - pub(crate) fn parse_inlined_functions( - &self, - file: DebugFile, - unit: &gimli::Unit, - ctx: &Context, - sections: &gimli::Dwarf, - ) -> Result<(), Error> { - for function in &*self.functions { - function - .1 - .borrow_with(|| Function::parse(function.0, file, unit, ctx, sections)) - .as_ref() - .map_err(Error::clone)?; - } - Ok(()) - } -} - -impl Function { - pub(crate) fn parse( - dw_die_offset: gimli::UnitOffset, - file: DebugFile, - unit: &gimli::Unit, - ctx: &Context, - sections: &gimli::Dwarf, - ) -> Result { - let mut entries = unit.entries_raw(Some(dw_die_offset))?; - let depth = entries.next_depth(); - let abbrev = entries.read_abbreviation()?.unwrap(); - debug_assert_eq!(abbrev.tag(), gimli::DW_TAG_subprogram); - - let mut name = None; - for spec in abbrev.attributes() { - match entries.read_attribute(*spec) { - Ok(ref attr) => { - match attr.name() { - gimli::DW_AT_linkage_name | gimli::DW_AT_MIPS_linkage_name => { - if let Ok(val) = sections.attr_string(unit, attr.value()) { - name = Some(val); - } - } - gimli::DW_AT_name => { - if name.is_none() { - name = sections.attr_string(unit, attr.value()).ok(); - } - } - gimli::DW_AT_abstract_origin | gimli::DW_AT_specification => { - if name.is_none() { - name = name_attr(attr.value(), file, unit, ctx, sections, 16)?; - } - } - _ => {} - }; - } - Err(e) => return Err(e), - } - } - - let mut inlined_functions = Vec::new(); - let mut inlined_addresses = Vec::new(); - Function::parse_children( - &mut entries, - depth, - file, - unit, - ctx, - sections, - &mut inlined_functions, - &mut inlined_addresses, - 0, - )?; - - // Sort ranges in "breadth-first traversal order", i.e. first by call_depth - // and then by range.begin. This allows finding the range containing an - // address at a certain depth using binary search. - // Note: Using DFS order, i.e. ordering by range.begin first and then by - // call_depth, would not work! Consider the two examples - // "[0..10 at depth 0], [0..2 at depth 1], [6..8 at depth 1]" and - // "[0..5 at depth 0], [0..2 at depth 1], [5..10 at depth 0], [6..8 at depth 1]". - // In this example, if you want to look up address 7 at depth 0, and you - // encounter [0..2 at depth 1], are you before or after the target range? - // You don't know. - inlined_addresses.sort_by(|r1, r2| { - if r1.call_depth < r2.call_depth { - Ordering::Less - } else if r1.call_depth > r2.call_depth { - Ordering::Greater - } else if r1.range.begin < r2.range.begin { - Ordering::Less - } else if r1.range.begin > r2.range.begin { - Ordering::Greater - } else { - Ordering::Equal - } - }); - - Ok(Function { - dw_die_offset, - name, - inlined_functions: inlined_functions.into_boxed_slice(), - inlined_addresses: inlined_addresses.into_boxed_slice(), - }) - } - - fn parse_children( - entries: &mut gimli::EntriesRaw<'_, '_, R>, - depth: isize, - file: DebugFile, - unit: &gimli::Unit, - ctx: &Context, - sections: &gimli::Dwarf, - inlined_functions: &mut Vec>, - inlined_addresses: &mut Vec, - inlined_depth: usize, - ) -> Result<(), Error> { - loop { - let dw_die_offset = entries.next_offset(); - let next_depth = entries.next_depth(); - if next_depth <= depth { - return Ok(()); - } - if let Some(abbrev) = entries.read_abbreviation()? { - match abbrev.tag() { - gimli::DW_TAG_subprogram => { - Function::skip(entries, abbrev, next_depth)?; - } - gimli::DW_TAG_inlined_subroutine => { - InlinedFunction::parse( - dw_die_offset, - entries, - abbrev, - next_depth, - file, - unit, - ctx, - sections, - inlined_functions, - inlined_addresses, - inlined_depth, - )?; - } - _ => { - entries.skip_attributes(abbrev.attributes())?; - } - } - } - } - } - - fn skip( - entries: &mut gimli::EntriesRaw<'_, '_, R>, - abbrev: &gimli::Abbreviation, - depth: isize, - ) -> Result<(), Error> { - // TODO: use DW_AT_sibling - entries.skip_attributes(abbrev.attributes())?; - while entries.next_depth() > depth { - if let Some(abbrev) = entries.read_abbreviation()? { - entries.skip_attributes(abbrev.attributes())?; - } - } - Ok(()) - } - - /// Build the list of inlined functions that contain `probe`. - pub(crate) fn find_inlined_functions( - &self, - probe: u64, - ) -> iter::Rev>> { - // `inlined_functions` is ordered from outside to inside. - let mut inlined_functions = maybe_small::Vec::new(); - let mut inlined_addresses = &self.inlined_addresses[..]; - loop { - let current_depth = inlined_functions.len(); - // Look up (probe, current_depth) in inline_ranges. - // `inlined_addresses` is sorted in "breadth-first traversal order", i.e. - // by `call_depth` first, and then by `range.begin`. See the comment at - // the sort call for more information about why. - let search = inlined_addresses.binary_search_by(|range| { - if range.call_depth > current_depth { - Ordering::Greater - } else if range.call_depth < current_depth { - Ordering::Less - } else if range.range.begin > probe { - Ordering::Greater - } else if range.range.end <= probe { - Ordering::Less - } else { - Ordering::Equal - } - }); - if let Ok(index) = search { - let function_index = inlined_addresses[index].function; - inlined_functions.push(&self.inlined_functions[function_index]); - inlined_addresses = &inlined_addresses[index + 1..]; - } else { - break; - } - } - inlined_functions.into_iter().rev() - } -} - -impl InlinedFunction { - fn parse( - dw_die_offset: gimli::UnitOffset, - entries: &mut gimli::EntriesRaw<'_, '_, R>, - abbrev: &gimli::Abbreviation, - depth: isize, - file: DebugFile, - unit: &gimli::Unit, - ctx: &Context, - sections: &gimli::Dwarf, - inlined_functions: &mut Vec>, - inlined_addresses: &mut Vec, - inlined_depth: usize, - ) -> Result<(), Error> { - let mut ranges = RangeAttributes::default(); - let mut name = None; - let mut call_file = None; - let mut call_line = 0; - let mut call_column = 0; - for spec in abbrev.attributes() { - match entries.read_attribute(*spec) { - Ok(ref attr) => match attr.name() { - gimli::DW_AT_low_pc => match attr.value() { - gimli::AttributeValue::Addr(val) => ranges.low_pc = Some(val), - gimli::AttributeValue::DebugAddrIndex(index) => { - ranges.low_pc = Some(sections.address(unit, index)?); - } - _ => {} - }, - gimli::DW_AT_high_pc => match attr.value() { - gimli::AttributeValue::Addr(val) => ranges.high_pc = Some(val), - gimli::AttributeValue::DebugAddrIndex(index) => { - ranges.high_pc = Some(sections.address(unit, index)?); - } - gimli::AttributeValue::Udata(val) => ranges.size = Some(val), - _ => {} - }, - gimli::DW_AT_ranges => { - ranges.ranges_offset = sections.attr_ranges_offset(unit, attr.value())?; - } - gimli::DW_AT_linkage_name | gimli::DW_AT_MIPS_linkage_name => { - if let Ok(val) = sections.attr_string(unit, attr.value()) { - name = Some(val); - } - } - gimli::DW_AT_name => { - if name.is_none() { - name = sections.attr_string(unit, attr.value()).ok(); - } - } - gimli::DW_AT_abstract_origin | gimli::DW_AT_specification => { - if name.is_none() { - name = name_attr(attr.value(), file, unit, ctx, sections, 16)?; - } - } - gimli::DW_AT_call_file => { - // There is a spec issue [1] with how DW_AT_call_file is specified in DWARF 5. - // Before, a file index of 0 would indicate no source file, however in - // DWARF 5 this could be a valid index into the file table. - // - // Implementations such as LLVM generates a file index of 0 when DWARF 5 is - // used. - // - // Thus, if we see a version of 5 or later, treat a file index of 0 as such. - // [1]: http://wiki.dwarfstd.org/index.php?title=DWARF5_Line_Table_File_Numbers - if let gimli::AttributeValue::FileIndex(fi) = attr.value() { - if fi > 0 || unit.header.version() >= 5 { - call_file = Some(fi); - } - } - } - gimli::DW_AT_call_line => { - call_line = attr.udata_value().unwrap_or(0) as u32; - } - gimli::DW_AT_call_column => { - call_column = attr.udata_value().unwrap_or(0) as u32; - } - _ => {} - }, - Err(e) => return Err(e), - } - } - - let function_index = inlined_functions.len(); - inlined_functions.push(InlinedFunction { - dw_die_offset, - name, - call_file, - call_line, - call_column, - }); - - ranges.for_each_range(sections, unit, |range| { - inlined_addresses.push(InlinedFunctionAddress { - range, - call_depth: inlined_depth, - function: function_index, - }); - })?; - - Function::parse_children( - entries, - depth, - file, - unit, - ctx, - sections, - inlined_functions, - inlined_addresses, - inlined_depth + 1, - ) - } -} - -fn name_attr( - attr: gimli::AttributeValue, - mut file: DebugFile, - unit: &gimli::Unit, - ctx: &Context, - sections: &gimli::Dwarf, - recursion_limit: usize, -) -> Result, Error> -where - R: gimli::Reader, -{ - if recursion_limit == 0 { - return Ok(None); - } - - match attr { - gimli::AttributeValue::UnitRef(offset) => { - name_entry(file, unit, offset, ctx, sections, recursion_limit) - } - gimli::AttributeValue::DebugInfoRef(dr) => { - let (unit, offset) = ctx.find_unit(dr, file)?; - name_entry(file, unit, offset, ctx, sections, recursion_limit) - } - gimli::AttributeValue::DebugInfoRefSup(dr) => { - if let Some(sup_sections) = sections.sup.as_ref() { - file = DebugFile::Supplementary; - let (unit, offset) = ctx.find_unit(dr, file)?; - name_entry(file, unit, offset, ctx, sup_sections, recursion_limit) - } else { - Ok(None) - } - } - _ => Ok(None), - } -} - -fn name_entry( - file: DebugFile, - unit: &gimli::Unit, - offset: gimli::UnitOffset, - ctx: &Context, - sections: &gimli::Dwarf, - recursion_limit: usize, -) -> Result, Error> -where - R: gimli::Reader, -{ - let mut entries = unit.entries_raw(Some(offset))?; - let abbrev = if let Some(abbrev) = entries.read_abbreviation()? { - abbrev - } else { - return Err(gimli::Error::NoEntryAtGivenOffset); - }; - - let mut name = None; - let mut next = None; - for spec in abbrev.attributes() { - match entries.read_attribute(*spec) { - Ok(ref attr) => match attr.name() { - gimli::DW_AT_linkage_name | gimli::DW_AT_MIPS_linkage_name => { - if let Ok(val) = sections.attr_string(unit, attr.value()) { - return Ok(Some(val)); - } - } - gimli::DW_AT_name => { - if let Ok(val) = sections.attr_string(unit, attr.value()) { - name = Some(val); - } - } - gimli::DW_AT_abstract_origin | gimli::DW_AT_specification => { - next = Some(attr.value()); - } - _ => {} - }, - Err(e) => return Err(e), - } - } - - if name.is_some() { - return Ok(name); - } - - if let Some(next) = next { - return name_attr(next, file, unit, ctx, sections, recursion_limit - 1); - } - - Ok(None) -} diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/src/lazy.rs s390-tools-2.33.1/rust-vendor/addr2line/src/lazy.rs --- s390-tools-2.31.0/rust-vendor/addr2line/src/lazy.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/src/lazy.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -use core::cell::UnsafeCell; - -pub struct LazyCell { - contents: UnsafeCell>, -} -impl LazyCell { - pub fn new() -> LazyCell { - LazyCell { - contents: UnsafeCell::new(None), - } - } - - pub fn borrow(&self) -> Option<&T> { - unsafe { &*self.contents.get() }.as_ref() - } - - pub fn borrow_with(&self, closure: impl FnOnce() -> T) -> &T { - // First check if we're already initialized... - let ptr = self.contents.get(); - if let Some(val) = unsafe { &*ptr } { - return val; - } - // Note that while we're executing `closure` our `borrow_with` may - // be called recursively. This means we need to check again after - // the closure has executed. For that we use the `get_or_insert` - // method which will only perform mutation if we aren't already - // `Some`. - let val = closure(); - unsafe { (*ptr).get_or_insert(val) } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/addr2line/src/lib.rs s390-tools-2.33.1/rust-vendor/addr2line/src/lib.rs --- s390-tools-2.31.0/rust-vendor/addr2line/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/addr2line/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1729 +0,0 @@ -//! This crate provides a cross-platform library and binary for translating addresses into -//! function names, file names and line numbers. Given an address in an executable or an -//! offset in a section of a relocatable object, it uses the debugging information to -//! figure out which file name and line number are associated with it. -//! -//! When used as a library, files must first be loaded using the -//! [`object`](https://github.com/gimli-rs/object) crate. -//! A context can then be created with [`Context::new`](./struct.Context.html#method.new). -//! The context caches some of the parsed information so that multiple lookups are -//! efficient. -//! Location information is obtained with -//! [`Context::find_location`](./struct.Context.html#method.find_location) or -//! [`Context::find_location_range`](./struct.Context.html#method.find_location_range). -//! Function information is obtained with -//! [`Context::find_frames`](./struct.Context.html#method.find_frames), which returns -//! a frame for each inline function. Each frame contains both name and location. -//! -//! The crate has an example CLI wrapper around the library which provides some of -//! the functionality of the `addr2line` command line tool distributed with [GNU -//! binutils](https://www.gnu.org/software/binutils/). -//! -//! Currently this library only provides information from the DWARF debugging information, -//! which is parsed using [`gimli`](https://github.com/gimli-rs/gimli). The example CLI -//! wrapper also uses symbol table information provided by the `object` crate. -#![deny(missing_docs)] -#![no_std] - -#[cfg(feature = "std")] -extern crate std; - -#[allow(unused_imports)] -#[macro_use] -extern crate alloc; - -#[cfg(feature = "fallible-iterator")] -pub extern crate fallible_iterator; -pub extern crate gimli; -#[cfg(feature = "object")] -pub extern crate object; - -use alloc::borrow::Cow; -use alloc::boxed::Box; -#[cfg(feature = "object")] -use alloc::rc::Rc; -use alloc::string::{String, ToString}; -use alloc::sync::Arc; -use alloc::vec::Vec; - -use core::cmp::{self, Ordering}; -use core::iter; -use core::marker::PhantomData; -use core::mem; -use core::num::NonZeroU64; -use core::ops::ControlFlow; -use core::u64; - -use crate::function::{Function, Functions, InlinedFunction}; -use crate::lazy::LazyCell; - -#[cfg(feature = "smallvec")] -mod maybe_small { - pub type Vec = smallvec::SmallVec<[T; 16]>; - pub type IntoIter = smallvec::IntoIter<[T; 16]>; -} -#[cfg(not(feature = "smallvec"))] -mod maybe_small { - pub type Vec = alloc::vec::Vec; - pub type IntoIter = alloc::vec::IntoIter; -} - -#[cfg(all(feature = "std", feature = "object", feature = "memmap2"))] -/// A simple builtin split DWARF loader. -pub mod builtin_split_dwarf_loader; -mod function; -mod lazy; - -type Error = gimli::Error; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum DebugFile { - Primary, - Supplementary, - Dwo, -} - -/// Operations that consult debug information may require additional files -/// to be loaded if split DWARF is being used. This enum returns the result -/// of the operation in the `Break` variant, or information about the split -/// DWARF that is required and a continuation to invoke once it is available -/// in the `Continue` variant. -/// -/// This enum is intended to be used in a loop like so: -/// ```no_run -/// # use addr2line::*; -/// # use std::sync::Arc; -/// # let ctx: Context> = todo!(); -/// # let do_split_dwarf_load = |load: SplitDwarfLoad>| -> Option>>> { None }; -/// const ADDRESS: u64 = 0xdeadbeef; -/// let mut r = ctx.find_frames(ADDRESS); -/// let result = loop { -/// match r { -/// LookupResult::Output(result) => break result, -/// LookupResult::Load { load, continuation } => { -/// let dwo = do_split_dwarf_load(load); -/// r = continuation.resume(dwo); -/// } -/// } -/// }; -/// ``` -pub enum LookupResult { - /// The lookup requires split DWARF data to be loaded. - Load { - /// The information needed to find the split DWARF data. - load: SplitDwarfLoad<::Buf>, - /// The continuation to resume with the loaded split DWARF data. - continuation: L, - }, - /// The lookup has completed and produced an output. - Output(::Output), -} - -/// This trait represents a partially complete operation that can be resumed -/// once a load of needed split DWARF data is completed or abandoned by the -/// API consumer. -pub trait LookupContinuation: Sized { - /// The final output of this operation. - type Output; - /// The type of reader used. - type Buf: gimli::Reader; - - /// Resumes the operation with the provided data. - /// - /// After the caller loads the split DWARF data required, call this - /// method to resume the operation. The return value of this method - /// indicates if the computation has completed or if further data is - /// required. - /// - /// If the additional data cannot be located, or the caller does not - /// support split DWARF, `resume(None)` can be used to continue the - /// operation with the data that is available. - fn resume(self, input: Option>>) -> LookupResult; -} - -impl LookupResult { - /// Callers that do not handle split DWARF can call `skip_all_loads` - /// to fast-forward to the end result. This result is produced with - /// the data that is available and may be less accurate than the - /// the results that would be produced if the caller did properly - /// support split DWARF. - pub fn skip_all_loads(mut self) -> L::Output { - loop { - self = match self { - LookupResult::Output(t) => return t, - LookupResult::Load { continuation, .. } => continuation.resume(None), - }; - } - } - - fn map T>(self, f: F) -> LookupResult> { - match self { - LookupResult::Output(t) => LookupResult::Output(f(t)), - LookupResult::Load { load, continuation } => LookupResult::Load { - load, - continuation: MappedLookup { - original: continuation, - mutator: f, - }, - }, - } - } - - fn unwrap(self) -> L::Output { - match self { - LookupResult::Output(t) => t, - LookupResult::Load { .. } => unreachable!("Internal API misuse"), - } - } -} - -/// The state necessary to perform address to line translation. -/// -/// Constructing a `Context` is somewhat costly, so users should aim to reuse `Context`s -/// when performing lookups for many addresses in the same executable. -pub struct Context { - sections: Arc>, - unit_ranges: Box<[UnitRange]>, - units: Box<[ResUnit]>, - sup_units: Box<[SupUnit]>, -} - -/// The type of `Context` that supports the `new` method. -#[cfg(feature = "std-object")] -pub type ObjectContext = Context>; - -#[cfg(feature = "std-object")] -impl Context> { - /// Construct a new `Context`. - /// - /// The resulting `Context` uses `gimli::EndianRcSlice`. - /// This means it is not thread safe, has no lifetime constraints (since it copies - /// the input data), and works for any endianity. - /// - /// Performance sensitive applications may want to use `Context::from_dwarf` - /// with a more specialised `gimli::Reader` implementation. - #[inline] - pub fn new<'data: 'file, 'file, O: object::Object<'data, 'file>>( - file: &'file O, - ) -> Result { - Self::new_with_sup(file, None) - } - - /// Construct a new `Context`. - /// - /// Optionally also use a supplementary object file. - /// - /// The resulting `Context` uses `gimli::EndianRcSlice`. - /// This means it is not thread safe, has no lifetime constraints (since it copies - /// the input data), and works for any endianity. - /// - /// Performance sensitive applications may want to use `Context::from_dwarf` - /// with a more specialised `gimli::Reader` implementation. - pub fn new_with_sup<'data: 'file, 'file, O: object::Object<'data, 'file>>( - file: &'file O, - sup_file: Option<&'file O>, - ) -> Result { - let endian = if file.is_little_endian() { - gimli::RunTimeEndian::Little - } else { - gimli::RunTimeEndian::Big - }; - - fn load_section<'data: 'file, 'file, O, Endian>( - id: gimli::SectionId, - file: &'file O, - endian: Endian, - ) -> Result, Error> - where - O: object::Object<'data, 'file>, - Endian: gimli::Endianity, - { - use object::ObjectSection; - - let data = file - .section_by_name(id.name()) - .and_then(|section| section.uncompressed_data().ok()) - .unwrap_or(Cow::Borrowed(&[])); - Ok(gimli::EndianRcSlice::new(Rc::from(&*data), endian)) - } - - let mut dwarf = gimli::Dwarf::load(|id| load_section(id, file, endian))?; - if let Some(sup_file) = sup_file { - dwarf.load_sup(|id| load_section(id, sup_file, endian))?; - } - Context::from_dwarf(dwarf) - } -} - -impl Context { - /// Construct a new `Context` from DWARF sections. - /// - /// This method does not support using a supplementary object file. - pub fn from_sections( - debug_abbrev: gimli::DebugAbbrev, - debug_addr: gimli::DebugAddr, - debug_aranges: gimli::DebugAranges, - debug_info: gimli::DebugInfo, - debug_line: gimli::DebugLine, - debug_line_str: gimli::DebugLineStr, - debug_ranges: gimli::DebugRanges, - debug_rnglists: gimli::DebugRngLists, - debug_str: gimli::DebugStr, - debug_str_offsets: gimli::DebugStrOffsets, - default_section: R, - ) -> Result { - Self::from_dwarf(gimli::Dwarf { - debug_abbrev, - debug_addr, - debug_aranges, - debug_info, - debug_line, - debug_line_str, - debug_str, - debug_str_offsets, - debug_types: default_section.clone().into(), - locations: gimli::LocationLists::new( - default_section.clone().into(), - default_section.into(), - ), - ranges: gimli::RangeLists::new(debug_ranges, debug_rnglists), - file_type: gimli::DwarfFileType::Main, - sup: None, - abbreviations_cache: gimli::AbbreviationsCache::new(), - }) - } - - /// Construct a new `Context` from an existing [`gimli::Dwarf`] object. - #[inline] - pub fn from_dwarf(sections: gimli::Dwarf) -> Result, Error> { - let sections = Arc::new(sections); - let (unit_ranges, units) = Context::parse_units(§ions)?; - let sup_units = if let Some(sup) = sections.sup.as_ref() { - Context::parse_sup(sup)? - } else { - Vec::new() - }; - Ok(Context { - sections, - unit_ranges: unit_ranges.into_boxed_slice(), - units: units.into_boxed_slice(), - sup_units: sup_units.into_boxed_slice(), - }) - } - - /// Finds the CUs for the function address given. - /// - /// There might be multiple CUs whose range contains this address. - /// Weak symbols have shown up in the wild which cause this to happen - /// but otherwise this can happen if the CU has non-contiguous functions - /// but only reports a single range. - /// - /// Consequently we return an iterator for all CUs which may contain the - /// address, and the caller must check if there is actually a function or - /// location in the CU for that address. - fn find_units(&self, probe: u64) -> impl Iterator> { - self.find_units_range(probe, probe + 1) - .map(|(unit, _range)| unit) - } - - /// Finds the CUs covering the range of addresses given. - /// - /// The range is [low, high) (ie, the upper bound is exclusive). This can return multiple - /// ranges for the same unit. - #[inline] - fn find_units_range( - &self, - probe_low: u64, - probe_high: u64, - ) -> impl Iterator, &gimli::Range)> { - // First up find the position in the array which could have our function - // address. - let pos = match self - .unit_ranges - .binary_search_by_key(&probe_high, |i| i.range.begin) - { - // Although unlikely, we could find an exact match. - Ok(i) => i + 1, - // No exact match was found, but this probe would fit at slot `i`. - // This means that slot `i` is bigger than `probe`, along with all - // indices greater than `i`, so we need to search all previous - // entries. - Err(i) => i, - }; - - // Once we have our index we iterate backwards from that position - // looking for a matching CU. - self.unit_ranges[..pos] - .iter() - .rev() - .take_while(move |i| { - // We know that this CU's start is beneath the probe already because - // of our sorted array. - debug_assert!(i.range.begin <= probe_high); - - // Each entry keeps track of the maximum end address seen so far, - // starting from the beginning of the array of unit ranges. We're - // iterating in reverse so if our probe is beyond the maximum range - // of this entry, then it's guaranteed to not fit in any prior - // entries, so we break out. - probe_low < i.max_end - }) - .filter_map(move |i| { - // If this CU doesn't actually contain this address, move to the - // next CU. - if probe_low >= i.range.end || probe_high <= i.range.begin { - return None; - } - Some((&self.units[i.unit_id], &i.range)) - }) - } - - /// Find the DWARF unit corresponding to the given virtual memory address. - pub fn find_dwarf_and_unit( - &self, - probe: u64, - ) -> LookupResult< - impl LookupContinuation, &gimli::Unit)>, Buf = R>, - > { - let mut units_iter = self.find_units(probe); - if let Some(unit) = units_iter.next() { - return LoopingLookup::new_lookup( - unit.find_function_or_location(probe, self), - move |r| { - ControlFlow::Break(match r { - Ok((Some(_), _)) | Ok((_, Some(_))) => { - let (_file, sections, unit) = unit - .dwarf_and_unit_dwo(self) - // We've already been through both error cases here to get to this point. - .unwrap() - .unwrap(); - Some((sections, unit)) - } - _ => match units_iter.next() { - Some(next_unit) => { - return ControlFlow::Continue( - next_unit.find_function_or_location(probe, self), - ); - } - None => None, - }, - }) - }, - ); - } - - LoopingLookup::new_complete(None) - } - - /// Find the source file and line corresponding to the given virtual memory address. - pub fn find_location(&self, probe: u64) -> Result>, Error> { - for unit in self.find_units(probe) { - if let Some(location) = unit.find_location(probe, &self.sections)? { - return Ok(Some(location)); - } - } - Ok(None) - } - - /// Return source file and lines for a range of addresses. For each location it also - /// returns the address and size of the range of the underlying instructions. - pub fn find_location_range( - &self, - probe_low: u64, - probe_high: u64, - ) -> Result, Error> { - LocationRangeIter::new(self, probe_low, probe_high) - } - - /// Return an iterator for the function frames corresponding to the given virtual - /// memory address. - /// - /// If the probe address is not for an inline function then only one frame is - /// returned. - /// - /// If the probe address is for an inline function then the first frame corresponds - /// to the innermost inline function. Subsequent frames contain the caller and call - /// location, until an non-inline caller is reached. - pub fn find_frames( - &self, - probe: u64, - ) -> LookupResult, Error>, Buf = R>> - { - let mut units_iter = self.find_units(probe); - if let Some(unit) = units_iter.next() { - LoopingLookup::new_lookup(unit.find_function_or_location(probe, self), move |r| { - ControlFlow::Break(match r { - Err(e) => Err(e), - Ok((Some(function), location)) => { - let inlined_functions = function.find_inlined_functions(probe); - Ok(FrameIter(FrameIterState::Frames(FrameIterFrames { - unit, - sections: &self.sections, - function, - inlined_functions, - next: location, - }))) - } - Ok((None, Some(location))) => { - Ok(FrameIter(FrameIterState::Location(Some(location)))) - } - Ok((None, None)) => match units_iter.next() { - Some(next_unit) => { - return ControlFlow::Continue( - next_unit.find_function_or_location(probe, self), - ); - } - None => Ok(FrameIter(FrameIterState::Empty)), - }, - }) - }) - } else { - LoopingLookup::new_complete(Ok(FrameIter(FrameIterState::Empty))) - } - } - - /// Preload units for `probe`. - /// - /// The iterator returns pairs of `SplitDwarfLoad`s containing the - /// information needed to locate and load split DWARF for `probe` and - /// a matching callback to invoke once that data is available. - /// - /// If this method is called, and all of the returned closures are invoked, - /// addr2line guarantees that any future API call for the address `probe` - /// will not require the loading of any split DWARF. - /// - /// ```no_run - /// # use addr2line::*; - /// # use std::sync::Arc; - /// # let ctx: Context> = todo!(); - /// # let do_split_dwarf_load = |load: SplitDwarfLoad>| -> Option>>> { None }; - /// const ADDRESS: u64 = 0xdeadbeef; - /// ctx.preload_units(ADDRESS).for_each(|(load, callback)| { - /// let dwo = do_split_dwarf_load(load); - /// callback(dwo); - /// }); - /// - /// let frames_iter = match ctx.find_frames(ADDRESS) { - /// LookupResult::Output(result) => result, - /// LookupResult::Load { .. } => unreachable!("addr2line promised we wouldn't get here"), - /// }; - /// - /// // ... - /// ``` - pub fn preload_units( - &'_ self, - probe: u64, - ) -> impl Iterator< - Item = ( - SplitDwarfLoad, - impl FnOnce(Option>>) -> Result<(), gimli::Error> + '_, - ), - > { - self.find_units(probe) - .filter_map(move |unit| match unit.dwarf_and_unit_dwo(self) { - LookupResult::Output(_) => None, - LookupResult::Load { load, continuation } => Some((load, |result| { - continuation.resume(result).unwrap().map(|_| ()) - })), - }) - } - - /// Initialize all line data structures. This is used for benchmarks. - #[doc(hidden)] - pub fn parse_lines(&self) -> Result<(), Error> { - for unit in self.units.iter() { - unit.parse_lines(&self.sections)?; - } - Ok(()) - } - - /// Initialize all function data structures. This is used for benchmarks. - #[doc(hidden)] - pub fn parse_functions(&self) -> Result<(), Error> { - for unit in self.units.iter() { - unit.parse_functions(self).skip_all_loads()?; - } - Ok(()) - } - - /// Initialize all inlined function data structures. This is used for benchmarks. - #[doc(hidden)] - pub fn parse_inlined_functions(&self) -> Result<(), Error> { - for unit in self.units.iter() { - unit.parse_inlined_functions(self).skip_all_loads()?; - } - Ok(()) - } -} - -struct UnitRange { - unit_id: usize, - max_end: u64, - range: gimli::Range, -} - -struct ResUnit { - offset: gimli::DebugInfoOffset, - dw_unit: gimli::Unit, - lang: Option, - lines: LazyCell>, - funcs: LazyCell, Error>>, - dwo: LazyCell>, gimli::Unit)>>, Error>>, -} - -struct SupUnit { - offset: gimli::DebugInfoOffset, - dw_unit: gimli::Unit, -} - -impl Context { - fn parse_units(sections: &gimli::Dwarf) -> Result<(Vec, Vec>), Error> { - // Find all the references to compilation units in .debug_aranges. - // Note that we always also iterate through all of .debug_info to - // find compilation units, because .debug_aranges may be missing some. - let mut aranges = Vec::new(); - let mut headers = sections.debug_aranges.headers(); - while let Some(header) = headers.next()? { - aranges.push((header.debug_info_offset(), header.offset())); - } - aranges.sort_by_key(|i| i.0); - - let mut unit_ranges = Vec::new(); - let mut res_units = Vec::new(); - let mut units = sections.units(); - while let Some(header) = units.next()? { - let unit_id = res_units.len(); - let offset = match header.offset().as_debug_info_offset() { - Some(offset) => offset, - None => continue, - }; - // We mainly want compile units, but we may need to follow references to entries - // within other units for function names. We don't need anything from type units. - match header.type_() { - gimli::UnitType::Type { .. } | gimli::UnitType::SplitType { .. } => continue, - _ => {} - } - let dw_unit = match sections.unit(header) { - Ok(dw_unit) => dw_unit, - Err(_) => continue, - }; - - let mut lang = None; - let mut have_unit_range = false; - { - let mut entries = dw_unit.entries_raw(None)?; - - let abbrev = match entries.read_abbreviation()? { - Some(abbrev) => abbrev, - None => continue, - }; - - let mut ranges = RangeAttributes::default(); - for spec in abbrev.attributes() { - let attr = entries.read_attribute(*spec)?; - match attr.name() { - gimli::DW_AT_low_pc => match attr.value() { - gimli::AttributeValue::Addr(val) => ranges.low_pc = Some(val), - gimli::AttributeValue::DebugAddrIndex(index) => { - ranges.low_pc = Some(sections.address(&dw_unit, index)?); - } - _ => {} - }, - gimli::DW_AT_high_pc => match attr.value() { - gimli::AttributeValue::Addr(val) => ranges.high_pc = Some(val), - gimli::AttributeValue::DebugAddrIndex(index) => { - ranges.high_pc = Some(sections.address(&dw_unit, index)?); - } - gimli::AttributeValue::Udata(val) => ranges.size = Some(val), - _ => {} - }, - gimli::DW_AT_ranges => { - ranges.ranges_offset = - sections.attr_ranges_offset(&dw_unit, attr.value())?; - } - gimli::DW_AT_language => { - if let gimli::AttributeValue::Language(val) = attr.value() { - lang = Some(val); - } - } - _ => {} - } - } - - // Find the address ranges for the CU, using in order of preference: - // - DW_AT_ranges - // - .debug_aranges - // - DW_AT_low_pc/DW_AT_high_pc - // - // Using DW_AT_ranges before .debug_aranges is possibly an arbitrary choice, - // but the feeling is that DW_AT_ranges is more likely to be reliable or complete - // if it is present. - // - // .debug_aranges must be used before DW_AT_low_pc/DW_AT_high_pc because - // it has been observed on macOS that DW_AT_ranges was not emitted even for - // discontiguous CUs. - let i = match ranges.ranges_offset { - Some(_) => None, - None => aranges.binary_search_by_key(&offset, |x| x.0).ok(), - }; - if let Some(mut i) = i { - // There should be only one set per CU, but in practice multiple - // sets have been observed. This is probably a compiler bug, but - // either way we need to handle it. - while i > 0 && aranges[i - 1].0 == offset { - i -= 1; - } - for (_, aranges_offset) in aranges[i..].iter().take_while(|x| x.0 == offset) { - let aranges_header = sections.debug_aranges.header(*aranges_offset)?; - let mut aranges = aranges_header.entries(); - while let Some(arange) = aranges.next()? { - if arange.length() != 0 { - unit_ranges.push(UnitRange { - range: arange.range(), - unit_id, - max_end: 0, - }); - have_unit_range = true; - } - } - } - } else { - have_unit_range |= ranges.for_each_range(sections, &dw_unit, |range| { - unit_ranges.push(UnitRange { - range, - unit_id, - max_end: 0, - }); - })?; - } - } - - let lines = LazyCell::new(); - if !have_unit_range { - // The unit did not declare any ranges. - // Try to get some ranges from the line program sequences. - if let Some(ref ilnp) = dw_unit.line_program { - if let Ok(lines) = lines - .borrow_with(|| Lines::parse(&dw_unit, ilnp.clone(), sections)) - .as_ref() - { - for sequence in lines.sequences.iter() { - unit_ranges.push(UnitRange { - range: gimli::Range { - begin: sequence.start, - end: sequence.end, - }, - unit_id, - max_end: 0, - }) - } - } - } - } - - res_units.push(ResUnit { - offset, - dw_unit, - lang, - lines, - funcs: LazyCell::new(), - dwo: LazyCell::new(), - }); - } - - // Sort this for faster lookup in `find_unit_and_address` below. - unit_ranges.sort_by_key(|i| i.range.begin); - - // Calculate the `max_end` field now that we've determined the order of - // CUs. - let mut max = 0; - for i in unit_ranges.iter_mut() { - max = max.max(i.range.end); - i.max_end = max; - } - - Ok((unit_ranges, res_units)) - } - - fn parse_sup(sections: &gimli::Dwarf) -> Result>, Error> { - let mut sup_units = Vec::new(); - let mut units = sections.units(); - while let Some(header) = units.next()? { - let offset = match header.offset().as_debug_info_offset() { - Some(offset) => offset, - None => continue, - }; - let dw_unit = match sections.unit(header) { - Ok(dw_unit) => dw_unit, - Err(_) => continue, - }; - sup_units.push(SupUnit { dw_unit, offset }); - } - Ok(sup_units) - } - - // Find the unit containing the given offset, and convert the offset into a unit offset. - fn find_unit( - &self, - offset: gimli::DebugInfoOffset, - file: DebugFile, - ) -> Result<(&gimli::Unit, gimli::UnitOffset), Error> { - let unit = match file { - DebugFile::Primary => { - match self - .units - .binary_search_by_key(&offset.0, |unit| unit.offset.0) - { - // There is never a DIE at the unit offset or before the first unit. - Ok(_) | Err(0) => return Err(gimli::Error::NoEntryAtGivenOffset), - Err(i) => &self.units[i - 1].dw_unit, - } - } - DebugFile::Supplementary => { - match self - .sup_units - .binary_search_by_key(&offset.0, |unit| unit.offset.0) - { - // There is never a DIE at the unit offset or before the first unit. - Ok(_) | Err(0) => return Err(gimli::Error::NoEntryAtGivenOffset), - Err(i) => &self.sup_units[i - 1].dw_unit, - } - } - DebugFile::Dwo => return Err(gimli::Error::NoEntryAtGivenOffset), - }; - - let unit_offset = offset - .to_unit_offset(&unit.header) - .ok_or(gimli::Error::NoEntryAtGivenOffset)?; - Ok((unit, unit_offset)) - } -} - -struct Lines { - files: Box<[String]>, - sequences: Box<[LineSequence]>, -} - -impl Lines { - fn parse( - dw_unit: &gimli::Unit, - ilnp: gimli::IncompleteLineProgram, - sections: &gimli::Dwarf, - ) -> Result { - let mut sequences = Vec::new(); - let mut sequence_rows = Vec::::new(); - let mut rows = ilnp.rows(); - while let Some((_, row)) = rows.next_row()? { - if row.end_sequence() { - if let Some(start) = sequence_rows.first().map(|x| x.address) { - let end = row.address(); - let mut rows = Vec::new(); - mem::swap(&mut rows, &mut sequence_rows); - sequences.push(LineSequence { - start, - end, - rows: rows.into_boxed_slice(), - }); - } - continue; - } - - let address = row.address(); - let file_index = row.file_index(); - let line = row.line().map(NonZeroU64::get).unwrap_or(0) as u32; - let column = match row.column() { - gimli::ColumnType::LeftEdge => 0, - gimli::ColumnType::Column(x) => x.get() as u32, - }; - - if let Some(last_row) = sequence_rows.last_mut() { - if last_row.address == address { - last_row.file_index = file_index; - last_row.line = line; - last_row.column = column; - continue; - } - } - - sequence_rows.push(LineRow { - address, - file_index, - line, - column, - }); - } - sequences.sort_by_key(|x| x.start); - - let mut files = Vec::new(); - let header = rows.header(); - match header.file(0) { - Some(file) => files.push(render_file(dw_unit, file, header, sections)?), - None => files.push(String::from("")), // DWARF version <= 4 may not have 0th index - } - let mut index = 1; - while let Some(file) = header.file(index) { - files.push(render_file(dw_unit, file, header, sections)?); - index += 1; - } - - Ok(Self { - files: files.into_boxed_slice(), - sequences: sequences.into_boxed_slice(), - }) - } -} - -fn render_file( - dw_unit: &gimli::Unit, - file: &gimli::FileEntry, - header: &gimli::LineProgramHeader, - sections: &gimli::Dwarf, -) -> Result { - let mut path = if let Some(ref comp_dir) = dw_unit.comp_dir { - comp_dir.to_string_lossy()?.into_owned() - } else { - String::new() - }; - - // The directory index 0 is defined to correspond to the compilation unit directory. - if file.directory_index() != 0 { - if let Some(directory) = file.directory(header) { - path_push( - &mut path, - sections - .attr_string(dw_unit, directory)? - .to_string_lossy()? - .as_ref(), - ); - } - } - - path_push( - &mut path, - sections - .attr_string(dw_unit, file.path_name())? - .to_string_lossy()? - .as_ref(), - ); - - Ok(path) -} - -struct LineSequence { - start: u64, - end: u64, - rows: Box<[LineRow]>, -} - -struct LineRow { - address: u64, - file_index: u64, - line: u32, - column: u32, -} - -/// This struct contains the information needed to find split DWARF data -/// and to produce a `gimli::Dwarf` for it. -pub struct SplitDwarfLoad { - /// The dwo id, for looking up in a DWARF package, or for - /// verifying an unpacked dwo found on the file system - pub dwo_id: gimli::DwoId, - /// The compilation directory `path` is relative to. - pub comp_dir: Option, - /// A path on the filesystem, relative to `comp_dir` to find this dwo. - pub path: Option, - /// Once the split DWARF data is loaded, the loader is expected - /// to call [make_dwo(parent)](gimli::read::Dwarf::make_dwo) before - /// returning the data. - pub parent: Arc>, -} - -struct SimpleLookup -where - F: FnOnce(Option>>) -> T, - R: gimli::Reader, -{ - f: F, - phantom: PhantomData<(T, R)>, -} - -impl SimpleLookup -where - F: FnOnce(Option>>) -> T, - R: gimli::Reader, -{ - fn new_complete(t: F::Output) -> LookupResult> { - LookupResult::Output(t) - } - - fn new_needs_load(load: SplitDwarfLoad, f: F) -> LookupResult> { - LookupResult::Load { - load, - continuation: SimpleLookup { - f, - phantom: PhantomData, - }, - } - } -} - -impl LookupContinuation for SimpleLookup -where - F: FnOnce(Option>>) -> T, - R: gimli::Reader, -{ - type Output = T; - type Buf = R; - - fn resume(self, v: Option>>) -> LookupResult { - LookupResult::Output((self.f)(v)) - } -} - -struct MappedLookup -where - L: LookupContinuation, - F: FnOnce(L::Output) -> T, -{ - original: L, - mutator: F, -} - -impl LookupContinuation for MappedLookup -where - L: LookupContinuation, - F: FnOnce(L::Output) -> T, -{ - type Output = T; - type Buf = L::Buf; - - fn resume(self, v: Option>>) -> LookupResult { - match self.original.resume(v) { - LookupResult::Output(t) => LookupResult::Output((self.mutator)(t)), - LookupResult::Load { load, continuation } => LookupResult::Load { - load, - continuation: MappedLookup { - original: continuation, - mutator: self.mutator, - }, - }, - } - } -} - -/// Some functions (e.g. `find_frames`) require considering multiple -/// compilation units, each of which might require their own split DWARF -/// lookup (and thus produce a continuation). -/// -/// We store the underlying continuation here as well as a mutator function -/// that will either a) decide that the result of this continuation is -/// what is needed and mutate it to the final result or b) produce another -/// `LookupResult`. `new_lookup` will in turn eagerly drive any non-continuation -/// `LookupResult` with successive invocations of the mutator, until a new -/// continuation or a final result is produced. And finally, the impl of -/// `LookupContinuation::resume` will call `new_lookup` each time the -/// computation is resumed. -struct LoopingLookup -where - L: LookupContinuation, - F: FnMut(L::Output) -> ControlFlow>, -{ - continuation: L, - mutator: F, -} - -impl LoopingLookup -where - L: LookupContinuation, - F: FnMut(L::Output) -> ControlFlow>, -{ - fn new_complete(t: T) -> LookupResult { - LookupResult::Output(t) - } - - fn new_lookup(mut r: LookupResult, mut mutator: F) -> LookupResult { - // Drive the loop eagerly so that we only ever have to represent one state - // (the r == ControlFlow::Continue state) in LoopingLookup. - loop { - match r { - LookupResult::Output(l) => match mutator(l) { - ControlFlow::Break(t) => return LookupResult::Output(t), - ControlFlow::Continue(r2) => { - r = r2; - } - }, - LookupResult::Load { load, continuation } => { - return LookupResult::Load { - load, - continuation: LoopingLookup { - continuation, - mutator, - }, - }; - } - } - } - } -} - -impl LookupContinuation for LoopingLookup -where - L: LookupContinuation, - F: FnMut(L::Output) -> ControlFlow>, -{ - type Output = T; - type Buf = L::Buf; - - fn resume(self, v: Option>>) -> LookupResult { - let r = self.continuation.resume(v); - LoopingLookup::new_lookup(r, self.mutator) - } -} - -impl ResUnit { - fn dwarf_and_unit_dwo<'unit, 'ctx: 'unit>( - &'unit self, - ctx: &'ctx Context, - ) -> LookupResult< - SimpleLookup< - Result<(DebugFile, &'unit gimli::Dwarf, &'unit gimli::Unit), Error>, - R, - impl FnOnce( - Option>>, - ) - -> Result<(DebugFile, &'unit gimli::Dwarf, &'unit gimli::Unit), Error>, - >, - > { - loop { - break SimpleLookup::new_complete(match self.dwo.borrow() { - Some(Ok(Some(v))) => Ok((DebugFile::Dwo, &*v.0, &v.1)), - Some(Ok(None)) => Ok((DebugFile::Primary, &*ctx.sections, &self.dw_unit)), - Some(Err(e)) => Err(*e), - None => { - let dwo_id = match self.dw_unit.dwo_id { - None => { - self.dwo.borrow_with(|| Ok(None)); - continue; - } - Some(dwo_id) => dwo_id, - }; - - let comp_dir = self.dw_unit.comp_dir.clone(); - - let dwo_name = self.dw_unit.dwo_name().and_then(|s| { - if let Some(s) = s { - Ok(Some(ctx.sections.attr_string(&self.dw_unit, s)?)) - } else { - Ok(None) - } - }); - - let path = match dwo_name { - Ok(v) => v, - Err(e) => { - self.dwo.borrow_with(|| Err(e)); - continue; - } - }; - - let process_dwo = move |dwo_dwarf: Option>>| { - let dwo_dwarf = match dwo_dwarf { - None => return Ok(None), - Some(dwo_dwarf) => dwo_dwarf, - }; - let mut dwo_units = dwo_dwarf.units(); - let dwo_header = match dwo_units.next()? { - Some(dwo_header) => dwo_header, - None => return Ok(None), - }; - - let mut dwo_unit = dwo_dwarf.unit(dwo_header)?; - dwo_unit.copy_relocated_attributes(&self.dw_unit); - Ok(Some(Box::new((dwo_dwarf, dwo_unit)))) - }; - - return SimpleLookup::new_needs_load( - SplitDwarfLoad { - dwo_id, - comp_dir, - path, - parent: ctx.sections.clone(), - }, - move |dwo_dwarf| match self.dwo.borrow_with(|| process_dwo(dwo_dwarf)) { - Ok(Some(v)) => Ok((DebugFile::Dwo, &*v.0, &v.1)), - Ok(None) => Ok((DebugFile::Primary, &*ctx.sections, &self.dw_unit)), - Err(e) => Err(*e), - }, - ); - } - }); - } - } - - fn parse_lines(&self, sections: &gimli::Dwarf) -> Result, Error> { - // NB: line information is always stored in the main debug file so this does not need - // to handle DWOs. - let ilnp = match self.dw_unit.line_program { - Some(ref ilnp) => ilnp, - None => return Ok(None), - }; - self.lines - .borrow_with(|| Lines::parse(&self.dw_unit, ilnp.clone(), sections)) - .as_ref() - .map(Some) - .map_err(Error::clone) - } - - fn parse_functions_dwarf_and_unit( - &self, - unit: &gimli::Unit, - sections: &gimli::Dwarf, - ) -> Result<&Functions, Error> { - self.funcs - .borrow_with(|| Functions::parse(unit, sections)) - .as_ref() - .map_err(Error::clone) - } - - fn parse_functions<'unit, 'ctx: 'unit>( - &'unit self, - ctx: &'ctx Context, - ) -> LookupResult, Error>, Buf = R>> - { - self.dwarf_and_unit_dwo(ctx).map(move |r| { - let (_file, sections, unit) = r?; - self.parse_functions_dwarf_and_unit(unit, sections) - }) - } - fn parse_inlined_functions<'unit, 'ctx: 'unit>( - &'unit self, - ctx: &'ctx Context, - ) -> LookupResult, Buf = R> + 'unit> { - self.dwarf_and_unit_dwo(ctx).map(move |r| { - let (file, sections, unit) = r?; - self.funcs - .borrow_with(|| Functions::parse(unit, sections)) - .as_ref() - .map_err(Error::clone)? - .parse_inlined_functions(file, unit, ctx, sections) - }) - } - - fn find_location( - &self, - probe: u64, - sections: &gimli::Dwarf, - ) -> Result>, Error> { - if let Some(mut iter) = LocationRangeUnitIter::new(self, sections, probe, probe + 1)? { - match iter.next() { - None => Ok(None), - Some((_addr, _len, loc)) => Ok(Some(loc)), - } - } else { - Ok(None) - } - } - - #[inline] - fn find_location_range( - &self, - probe_low: u64, - probe_high: u64, - sections: &gimli::Dwarf, - ) -> Result>, Error> { - LocationRangeUnitIter::new(self, sections, probe_low, probe_high) - } - - fn find_function_or_location<'unit, 'ctx: 'unit>( - &'unit self, - probe: u64, - ctx: &'ctx Context, - ) -> LookupResult< - impl LookupContinuation< - Output = Result<(Option<&'unit Function>, Option>), Error>, - Buf = R, - >, - > { - self.dwarf_and_unit_dwo(ctx).map(move |r| { - let (file, sections, unit) = r?; - let functions = self.parse_functions_dwarf_and_unit(unit, sections)?; - let function = match functions.find_address(probe) { - Some(address) => { - let function_index = functions.addresses[address].function; - let (offset, ref function) = functions.functions[function_index]; - Some( - function - .borrow_with(|| Function::parse(offset, file, unit, ctx, sections)) - .as_ref() - .map_err(Error::clone)?, - ) - } - None => None, - }; - let location = self.find_location(probe, sections)?; - Ok((function, location)) - }) - } -} - -/// Iterator over `Location`s in a range of addresses, returned by `Context::find_location_range`. -pub struct LocationRangeIter<'ctx, R: gimli::Reader> { - unit_iter: Box, &'ctx gimli::Range)> + 'ctx>, - iter: Option>, - - probe_low: u64, - probe_high: u64, - sections: &'ctx gimli::Dwarf, -} - -impl<'ctx, R: gimli::Reader> LocationRangeIter<'ctx, R> { - #[inline] - fn new(ctx: &'ctx Context, probe_low: u64, probe_high: u64) -> Result { - let sections = &ctx.sections; - let unit_iter = ctx.find_units_range(probe_low, probe_high); - Ok(Self { - unit_iter: Box::new(unit_iter), - iter: None, - probe_low, - probe_high, - sections, - }) - } - - fn next_loc(&mut self) -> Result)>, Error> { - loop { - let iter = self.iter.take(); - match iter { - None => match self.unit_iter.next() { - Some((unit, range)) => { - self.iter = unit.find_location_range( - cmp::max(self.probe_low, range.begin), - cmp::min(self.probe_high, range.end), - self.sections, - )?; - } - None => return Ok(None), - }, - Some(mut iter) => { - if let item @ Some(_) = iter.next() { - self.iter = Some(iter); - return Ok(item); - } - } - } - } - } -} - -impl<'ctx, R> Iterator for LocationRangeIter<'ctx, R> -where - R: gimli::Reader + 'ctx, -{ - type Item = (u64, u64, Location<'ctx>); - - #[inline] - fn next(&mut self) -> Option { - match self.next_loc() { - Err(_) => None, - Ok(loc) => loc, - } - } -} - -#[cfg(feature = "fallible-iterator")] -impl<'ctx, R> fallible_iterator::FallibleIterator for LocationRangeIter<'ctx, R> -where - R: gimli::Reader + 'ctx, -{ - type Item = (u64, u64, Location<'ctx>); - type Error = Error; - - #[inline] - fn next(&mut self) -> Result, Self::Error> { - self.next_loc() - } -} - -struct LocationRangeUnitIter<'ctx> { - lines: &'ctx Lines, - seqs: &'ctx [LineSequence], - seq_idx: usize, - row_idx: usize, - probe_high: u64, -} - -impl<'ctx> LocationRangeUnitIter<'ctx> { - fn new( - resunit: &'ctx ResUnit, - sections: &gimli::Dwarf, - probe_low: u64, - probe_high: u64, - ) -> Result, Error> { - let lines = resunit.parse_lines(sections)?; - - if let Some(lines) = lines { - // Find index for probe_low. - let seq_idx = lines.sequences.binary_search_by(|sequence| { - if probe_low < sequence.start { - Ordering::Greater - } else if probe_low >= sequence.end { - Ordering::Less - } else { - Ordering::Equal - } - }); - let seq_idx = match seq_idx { - Ok(x) => x, - Err(0) => 0, // probe below sequence, but range could overlap - Err(_) => lines.sequences.len(), - }; - - let row_idx = if let Some(seq) = lines.sequences.get(seq_idx) { - let idx = seq.rows.binary_search_by(|row| row.address.cmp(&probe_low)); - match idx { - Ok(x) => x, - Err(0) => 0, // probe below sequence, but range could overlap - Err(x) => x - 1, - } - } else { - 0 - }; - - Ok(Some(Self { - lines, - seqs: &*lines.sequences, - seq_idx, - row_idx, - probe_high, - })) - } else { - Ok(None) - } - } -} - -impl<'ctx> Iterator for LocationRangeUnitIter<'ctx> { - type Item = (u64, u64, Location<'ctx>); - - fn next(&mut self) -> Option<(u64, u64, Location<'ctx>)> { - while let Some(seq) = self.seqs.get(self.seq_idx) { - if seq.start >= self.probe_high { - break; - } - - match seq.rows.get(self.row_idx) { - Some(row) => { - if row.address >= self.probe_high { - break; - } - - let file = self - .lines - .files - .get(row.file_index as usize) - .map(String::as_str); - let nextaddr = seq - .rows - .get(self.row_idx + 1) - .map(|row| row.address) - .unwrap_or(seq.end); - - let item = ( - row.address, - nextaddr - row.address, - Location { - file, - line: if row.line != 0 { Some(row.line) } else { None }, - column: if row.column != 0 { - Some(row.column) - } else { - None - }, - }, - ); - self.row_idx += 1; - - return Some(item); - } - None => { - self.seq_idx += 1; - self.row_idx = 0; - } - } - } - None - } -} - -fn path_push(path: &mut String, p: &str) { - if has_unix_root(p) || has_windows_root(p) { - *path = p.to_string(); - } else { - let dir_separator = if has_windows_root(path.as_str()) { - '\\' - } else { - '/' - }; - - if !path.is_empty() && !path.ends_with(dir_separator) { - path.push(dir_separator); - } - *path += p; - } -} - -/// Check if the path in the given string has a unix style root -fn has_unix_root(p: &str) -> bool { - p.starts_with('/') -} - -/// Check if the path in the given string has a windows style root -fn has_windows_root(p: &str) -> bool { - p.starts_with('\\') || p.get(1..3) == Some(":\\") -} -struct RangeAttributes { - low_pc: Option, - high_pc: Option, - size: Option, - ranges_offset: Option::Offset>>, -} - -impl Default for RangeAttributes { - fn default() -> Self { - RangeAttributes { - low_pc: None, - high_pc: None, - size: None, - ranges_offset: None, - } - } -} - -impl RangeAttributes { - fn for_each_range( - &self, - sections: &gimli::Dwarf, - unit: &gimli::Unit, - mut f: F, - ) -> Result { - let mut added_any = false; - let mut add_range = |range: gimli::Range| { - if range.begin < range.end { - f(range); - added_any = true - } - }; - if let Some(ranges_offset) = self.ranges_offset { - let mut range_list = sections.ranges(unit, ranges_offset)?; - while let Some(range) = range_list.next()? { - add_range(range); - } - } else if let (Some(begin), Some(end)) = (self.low_pc, self.high_pc) { - add_range(gimli::Range { begin, end }); - } else if let (Some(begin), Some(size)) = (self.low_pc, self.size) { - add_range(gimli::Range { - begin, - end: begin + size, - }); - } - Ok(added_any) - } -} - -/// An iterator over function frames. -pub struct FrameIter<'ctx, R>(FrameIterState<'ctx, R>) -where - R: gimli::Reader; - -enum FrameIterState<'ctx, R> -where - R: gimli::Reader, -{ - Empty, - Location(Option>), - Frames(FrameIterFrames<'ctx, R>), -} - -struct FrameIterFrames<'ctx, R> -where - R: gimli::Reader, -{ - unit: &'ctx ResUnit, - sections: &'ctx gimli::Dwarf, - function: &'ctx Function, - inlined_functions: iter::Rev>>, - next: Option>, -} - -impl<'ctx, R> FrameIter<'ctx, R> -where - R: gimli::Reader + 'ctx, -{ - /// Advances the iterator and returns the next frame. - pub fn next(&mut self) -> Result>, Error> { - let frames = match &mut self.0 { - FrameIterState::Empty => return Ok(None), - FrameIterState::Location(location) => { - // We can't move out of a mutable reference, so use `take` instead. - let location = location.take(); - self.0 = FrameIterState::Empty; - return Ok(Some(Frame { - dw_die_offset: None, - function: None, - location, - })); - } - FrameIterState::Frames(frames) => frames, - }; - - let loc = frames.next.take(); - let func = match frames.inlined_functions.next() { - Some(func) => func, - None => { - let frame = Frame { - dw_die_offset: Some(frames.function.dw_die_offset), - function: frames.function.name.clone().map(|name| FunctionName { - name, - language: frames.unit.lang, - }), - location: loc, - }; - self.0 = FrameIterState::Empty; - return Ok(Some(frame)); - } - }; - - let mut next = Location { - file: None, - line: if func.call_line != 0 { - Some(func.call_line) - } else { - None - }, - column: if func.call_column != 0 { - Some(func.call_column) - } else { - None - }, - }; - if let Some(call_file) = func.call_file { - if let Some(lines) = frames.unit.parse_lines(frames.sections)? { - next.file = lines.files.get(call_file as usize).map(String::as_str); - } - } - frames.next = Some(next); - - Ok(Some(Frame { - dw_die_offset: Some(func.dw_die_offset), - function: func.name.clone().map(|name| FunctionName { - name, - language: frames.unit.lang, - }), - location: loc, - })) - } -} - -#[cfg(feature = "fallible-iterator")] -impl<'ctx, R> fallible_iterator::FallibleIterator for FrameIter<'ctx, R> -where - R: gimli::Reader + 'ctx, -{ - type Item = Frame<'ctx, R>; - type Error = Error; - - #[inline] - fn next(&mut self) -> Result>, Error> { - self.next() - } -} - -/// A function frame. -pub struct Frame<'ctx, R: gimli::Reader> { - /// The DWARF unit offset corresponding to the DIE of the function. - pub dw_die_offset: Option>, - /// The name of the function. - pub function: Option>, - /// The source location corresponding to this frame. - pub location: Option>, -} - -/// A function name. -pub struct FunctionName { - /// The name of the function. - pub name: R, - /// The language of the compilation unit containing this function. - pub language: Option, -} - -impl FunctionName { - /// The raw name of this function before demangling. - pub fn raw_name(&self) -> Result, Error> { - self.name.to_string_lossy() - } - - /// The name of this function after demangling (if applicable). - pub fn demangle(&self) -> Result, Error> { - self.raw_name().map(|x| demangle_auto(x, self.language)) - } -} - -/// Demangle a symbol name using the demangling scheme for the given language. -/// -/// Returns `None` if demangling failed or is not required. -#[allow(unused_variables)] -pub fn demangle(name: &str, language: gimli::DwLang) -> Option { - match language { - #[cfg(feature = "rustc-demangle")] - gimli::DW_LANG_Rust => rustc_demangle::try_demangle(name) - .ok() - .as_ref() - .map(|x| format!("{:#}", x)), - #[cfg(feature = "cpp_demangle")] - gimli::DW_LANG_C_plus_plus - | gimli::DW_LANG_C_plus_plus_03 - | gimli::DW_LANG_C_plus_plus_11 - | gimli::DW_LANG_C_plus_plus_14 => cpp_demangle::Symbol::new(name) - .ok() - .and_then(|x| x.demangle(&Default::default()).ok()), - _ => None, - } -} - -/// Apply 'best effort' demangling of a symbol name. -/// -/// If `language` is given, then only the demangling scheme for that language -/// is used. -/// -/// If `language` is `None`, then heuristics are used to determine how to -/// demangle the name. Currently, these heuristics are very basic. -/// -/// If demangling fails or is not required, then `name` is returned unchanged. -pub fn demangle_auto(name: Cow<'_, str>, language: Option) -> Cow<'_, str> { - match language { - Some(language) => demangle(name.as_ref(), language), - None => demangle(name.as_ref(), gimli::DW_LANG_Rust) - .or_else(|| demangle(name.as_ref(), gimli::DW_LANG_C_plus_plus)), - } - .map(Cow::from) - .unwrap_or(name) -} - -/// A source location. -pub struct Location<'a> { - /// The file name. - pub file: Option<&'a str>, - /// The line number. - pub line: Option, - /// The column number. - pub column: Option, -} - -#[cfg(test)] -mod tests { - #[test] - fn context_is_send() { - fn assert_is_send() {} - assert_is_send::>>(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/adler/benches/bench.rs s390-tools-2.33.1/rust-vendor/adler/benches/bench.rs --- s390-tools-2.31.0/rust-vendor/adler/benches/bench.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/adler/benches/bench.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,109 +0,0 @@ -extern crate adler; -extern crate criterion; - -use adler::{adler32_slice, Adler32}; -use criterion::{criterion_group, criterion_main, Criterion, Throughput}; - -fn simple(c: &mut Criterion) { - { - const SIZE: usize = 100; - - let mut group = c.benchmark_group("simple-100b"); - group.throughput(Throughput::Bytes(SIZE as u64)); - group.bench_function("zeroes-100", |bencher| { - bencher.iter(|| { - adler32_slice(&[0; SIZE]); - }); - }); - group.bench_function("ones-100", |bencher| { - bencher.iter(|| { - adler32_slice(&[0xff; SIZE]); - }); - }); - } - - { - const SIZE: usize = 1024; - - let mut group = c.benchmark_group("simple-1k"); - group.throughput(Throughput::Bytes(SIZE as u64)); - - group.bench_function("zeroes-1k", |bencher| { - bencher.iter(|| { - adler32_slice(&[0; SIZE]); - }); - }); - - group.bench_function("ones-1k", |bencher| { - bencher.iter(|| { - adler32_slice(&[0xff; SIZE]); - }); - }); - } - - { - const SIZE: usize = 1024 * 1024; - - let mut group = c.benchmark_group("simple-1m"); - group.throughput(Throughput::Bytes(SIZE as u64)); - group.bench_function("zeroes-1m", |bencher| { - bencher.iter(|| { - adler32_slice(&[0; SIZE]); - }); - }); - - group.bench_function("ones-1m", |bencher| { - bencher.iter(|| { - adler32_slice(&[0xff; SIZE]); - }); - }); - } -} - -fn chunked(c: &mut Criterion) { - const SIZE: usize = 16 * 1024 * 1024; - - let data = vec![0xAB; SIZE]; - - let mut group = c.benchmark_group("chunked-16m"); - group.throughput(Throughput::Bytes(SIZE as u64)); - group.bench_function("5552", |bencher| { - bencher.iter(|| { - let mut h = Adler32::new(); - for chunk in data.chunks(5552) { - h.write_slice(chunk); - } - h.checksum() - }); - }); - group.bench_function("8k", |bencher| { - bencher.iter(|| { - let mut h = Adler32::new(); - for chunk in data.chunks(8 * 1024) { - h.write_slice(chunk); - } - h.checksum() - }); - }); - group.bench_function("64k", |bencher| { - bencher.iter(|| { - let mut h = Adler32::new(); - for chunk in data.chunks(64 * 1024) { - h.write_slice(chunk); - } - h.checksum() - }); - }); - group.bench_function("1m", |bencher| { - bencher.iter(|| { - let mut h = Adler32::new(); - for chunk in data.chunks(1024 * 1024) { - h.write_slice(chunk); - } - h.checksum() - }); - }); -} - -criterion_group!(benches, simple, chunked); -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/adler/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/adler/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/adler/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/adler/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/adler/Cargo.toml s390-tools-2.33.1/rust-vendor/adler/Cargo.toml --- s390-tools-2.31.0/rust-vendor/adler/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/adler/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,64 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "adler" -version = "1.0.2" -authors = ["Jonas Schievink "] -description = "A simple clean-room implementation of the Adler-32 checksum" -documentation = "https://docs.rs/adler/" -readme = "README.md" -keywords = ["checksum", "integrity", "hash", "adler32", "zlib"] -categories = ["algorithms"] -license = "0BSD OR MIT OR Apache-2.0" -repository = "https://github.com/jonas-schievink/adler.git" -[package.metadata.docs.rs] -rustdoc-args = ["--cfg=docsrs"] - -[package.metadata.release] -no-dev-version = true -pre-release-commit-message = "Release {{version}}" -tag-message = "{{version}}" - -[[package.metadata.release.pre-release-replacements]] -file = "CHANGELOG.md" -replace = "## Unreleased\n\nNo changes.\n\n## [{{version}} - {{date}}](https://github.com/jonas-schievink/adler/releases/tag/v{{version}})\n" -search = "## Unreleased\n" - -[[package.metadata.release.pre-release-replacements]] -file = "README.md" -replace = "adler = \"{{version}}\"" -search = "adler = \"[a-z0-9\\\\.-]+\"" - -[[package.metadata.release.pre-release-replacements]] -file = "src/lib.rs" -replace = "https://docs.rs/adler/{{version}}" -search = "https://docs.rs/adler/[a-z0-9\\.-]+" - -[[bench]] -name = "bench" -harness = false -[dependencies.compiler_builtins] -version = "0.1.2" -optional = true - -[dependencies.core] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-core" -[dev-dependencies.criterion] -version = "0.3.2" - -[features] -default = ["std"] -rustc-dep-of-std = ["core", "compiler_builtins"] -std = [] diff -Nru s390-tools-2.31.0/rust-vendor/adler/CHANGELOG.md s390-tools-2.33.1/rust-vendor/adler/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/adler/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/adler/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,63 +0,0 @@ -# Changelog - -## Unreleased - -No changes. - -## [1.0.2 - 2021-02-26](https://github.com/jonas-schievink/adler/releases/tag/v1.0.2) - -- Fix doctest on big-endian systems ([#9]). - -[#9]: https://github.com/jonas-schievink/adler/pull/9 - -## [1.0.1 - 2020-11-08](https://github.com/jonas-schievink/adler/releases/tag/v1.0.1) - -### Fixes - -- Fix documentation on docs.rs. - -## [1.0.0 - 2020-11-08](https://github.com/jonas-schievink/adler/releases/tag/v1.0.0) - -### Fixes - -- Fix `cargo test --no-default-features` ([#5]). - -### Improvements - -- Extended and clarified documentation. -- Added more rustdoc examples. -- Extended CI to test the crate with `--no-default-features`. - -### Breaking Changes - -- `adler32_reader` now takes its generic argument by value instead of as a `&mut`. -- Renamed `adler32_reader` to `adler32`. - -## [0.2.3 - 2020-07-11](https://github.com/jonas-schievink/adler/releases/tag/v0.2.3) - -- Process 4 Bytes at a time, improving performance by up to 50% ([#2]). - -## [0.2.2 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.2) - -- Bump MSRV to 1.31.0. - -## [0.2.1 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.1) - -- Add a few `#[inline]` annotations to small functions. -- Fix CI badge. -- Allow integration into libstd. - -## [0.2.0 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.0) - -- Support `#![no_std]` when using `default-features = false`. -- Improve performance by around 7x. -- Support Rust 1.8.0. -- Improve API naming. - -## [0.1.0 - 2020-06-26](https://github.com/jonas-schievink/adler/releases/tag/v0.1.0) - -Initial release. - - -[#2]: https://github.com/jonas-schievink/adler/pull/2 -[#5]: https://github.com/jonas-schievink/adler/pull/5 diff -Nru s390-tools-2.31.0/rust-vendor/adler/LICENSE-0BSD s390-tools-2.33.1/rust-vendor/adler/LICENSE-0BSD --- s390-tools-2.31.0/rust-vendor/adler/LICENSE-0BSD 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/adler/LICENSE-0BSD 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -Copyright (C) Jonas Schievink - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN -AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/adler/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/adler/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/adler/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/adler/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/LICENSE-2.0 - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/adler/LICENSE-MIT s390-tools-2.33.1/rust-vendor/adler/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/adler/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/adler/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/adler/README.md s390-tools-2.33.1/rust-vendor/adler/README.md --- s390-tools-2.31.0/rust-vendor/adler/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/adler/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ -# Adler-32 checksums for Rust - -[![crates.io](https://img.shields.io/crates/v/adler.svg)](https://crates.io/crates/adler) -[![docs.rs](https://docs.rs/adler/badge.svg)](https://docs.rs/adler/) -![CI](https://github.com/jonas-schievink/adler/workflows/CI/badge.svg) - -This crate provides a simple implementation of the Adler-32 checksum, used in -the zlib compression format. - -Please refer to the [changelog](CHANGELOG.md) to see what changed in the last -releases. - -## Features - -- Permissively licensed (0BSD) clean-room implementation. -- Zero dependencies. -- Zero `unsafe`. -- Decent performance (3-4 GB/s). -- Supports `#![no_std]` (with `default-features = false`). - -## Usage - -Add an entry to your `Cargo.toml`: - -```toml -[dependencies] -adler = "1.0.2" -``` - -Check the [API Documentation](https://docs.rs/adler/) for how to use the -crate's functionality. - -## Rust version support - -Currently, this crate supports all Rust versions starting at Rust 1.31.0. - -Bumping the Minimum Supported Rust Version (MSRV) is *not* considered a breaking -change, but will not be done without good reasons. The latest 3 stable Rust -versions will always be supported no matter what. diff -Nru s390-tools-2.31.0/rust-vendor/adler/RELEASE_PROCESS.md s390-tools-2.33.1/rust-vendor/adler/RELEASE_PROCESS.md --- s390-tools-2.31.0/rust-vendor/adler/RELEASE_PROCESS.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/adler/RELEASE_PROCESS.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,13 +0,0 @@ -# What to do to publish a new release - -1. Ensure all notable changes are in the changelog under "Unreleased". - -2. Execute `cargo release ` to bump version(s), tag and publish - everything. External subcommand, must be installed with `cargo install - cargo-release`. - - `` can be one of `major|minor|patch`. If this is the first release - (`0.1.0`), use `minor`, since the version starts out as `0.0.0`. - -3. Go to the GitHub releases, edit the just-pushed tag. Copy the release notes - from the changelog. diff -Nru s390-tools-2.31.0/rust-vendor/adler/src/algo.rs s390-tools-2.33.1/rust-vendor/adler/src/algo.rs --- s390-tools-2.31.0/rust-vendor/adler/src/algo.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/adler/src/algo.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,146 +0,0 @@ -use crate::Adler32; -use std::ops::{AddAssign, MulAssign, RemAssign}; - -impl Adler32 { - pub(crate) fn compute(&mut self, bytes: &[u8]) { - // The basic algorithm is, for every byte: - // a = (a + byte) % MOD - // b = (b + a) % MOD - // where MOD = 65521. - // - // For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows: - // - Between calls to `write`, we ensure that a and b are always in range 0..MOD. - // - We use 32-bit arithmetic in this function. - // - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD` - // operation. - // - // According to Wikipedia, b is calculated as follows for non-incremental checksumming: - // b = n×D1 + (n−1)×D2 + (n−2)×D3 + ... + Dn + n*1 (mod 65521) - // Where n is the number of bytes and Di is the i-th Byte. We need to change this to account - // for the previous values of a and b, as well as treat every input Byte as being 255: - // b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520 - // Or in other words: - // b_inc = n*65520 + n(n+1)/2*255 - // The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521. - // 2^32-65521 = n*65520 + n(n+1)/2*255 - // Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552. - // - // On top of the optimization outlined above, the algorithm can also be parallelized with a - // bit more work: - // - // Note that b is a linear combination of a vector of input bytes (D1, ..., Dn). - // - // If we fix some value k Self { - U32X4([ - u32::from(bytes[0]), - u32::from(bytes[1]), - u32::from(bytes[2]), - u32::from(bytes[3]), - ]) - } -} - -impl AddAssign for U32X4 { - fn add_assign(&mut self, other: Self) { - for (s, o) in self.0.iter_mut().zip(other.0.iter()) { - *s += o; - } - } -} - -impl RemAssign for U32X4 { - fn rem_assign(&mut self, quotient: u32) { - for s in self.0.iter_mut() { - *s %= quotient; - } - } -} - -impl MulAssign for U32X4 { - fn mul_assign(&mut self, rhs: u32) { - for s in self.0.iter_mut() { - *s *= rhs; - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/adler/src/lib.rs s390-tools-2.33.1/rust-vendor/adler/src/lib.rs --- s390-tools-2.31.0/rust-vendor/adler/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/adler/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,287 +0,0 @@ -//! Adler-32 checksum implementation. -//! -//! This implementation features: -//! -//! - Permissively licensed (0BSD) clean-room implementation. -//! - Zero dependencies. -//! - Zero `unsafe`. -//! - Decent performance (3-4 GB/s). -//! - `#![no_std]` support (with `default-features = false`). - -#![doc(html_root_url = "https://docs.rs/adler/1.0.2")] -// Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default -#![doc(test(attr(deny(unused_imports, unused_must_use))))] -#![cfg_attr(docsrs, feature(doc_cfg))] -#![warn(missing_debug_implementations)] -#![forbid(unsafe_code)] -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(not(feature = "std"))] -extern crate core as std; - -mod algo; - -use std::hash::Hasher; - -#[cfg(feature = "std")] -use std::io::{self, BufRead}; - -/// Adler-32 checksum calculator. -/// -/// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default -/// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via -/// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`]. -/// -/// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any -/// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although -/// that is not recommended (while every checksum is a hash function, they are not necessarily a -/// good one). -/// -/// # Examples -/// -/// Basic, piecewise checksum calculation: -/// -/// ``` -/// use adler::Adler32; -/// -/// let mut adler = Adler32::new(); -/// -/// adler.write_slice(&[0, 1, 2]); -/// adler.write_slice(&[3, 4, 5]); -/// -/// assert_eq!(adler.checksum(), 0x00290010); -/// ``` -/// -/// Using `Hash` to process structures: -/// -/// ``` -/// use std::hash::Hash; -/// use adler::Adler32; -/// -/// #[derive(Hash)] -/// struct Data { -/// byte: u8, -/// word: u16, -/// big: u64, -/// } -/// -/// let mut adler = Adler32::new(); -/// -/// let data = Data { byte: 0x1F, word: 0xABCD, big: !0 }; -/// data.hash(&mut adler); -/// -/// // hash value depends on architecture endianness -/// if cfg!(target_endian = "little") { -/// assert_eq!(adler.checksum(), 0x33410990); -/// } -/// if cfg!(target_endian = "big") { -/// assert_eq!(adler.checksum(), 0x331F0990); -/// } -/// -/// ``` -/// -/// [`new`]: #method.new -/// [`from_checksum`]: #method.from_checksum -/// [`checksum`]: #method.checksum -#[derive(Debug, Copy, Clone)] -pub struct Adler32 { - a: u16, - b: u16, -} - -impl Adler32 { - /// Creates a new Adler-32 instance with default state. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Creates an `Adler32` instance from a precomputed Adler-32 checksum. - /// - /// This allows resuming checksum calculation without having to keep the `Adler32` instance - /// around. - /// - /// # Example - /// - /// ``` - /// # use adler::Adler32; - /// let parts = [ - /// "rust", - /// "acean", - /// ]; - /// let whole = adler::adler32_slice(b"rustacean"); - /// - /// let mut sum = Adler32::new(); - /// sum.write_slice(parts[0].as_bytes()); - /// let partial = sum.checksum(); - /// - /// // ...later - /// - /// let mut sum = Adler32::from_checksum(partial); - /// sum.write_slice(parts[1].as_bytes()); - /// assert_eq!(sum.checksum(), whole); - /// ``` - #[inline] - pub fn from_checksum(sum: u32) -> Self { - Adler32 { - a: sum as u16, - b: (sum >> 16) as u16, - } - } - - /// Returns the calculated checksum at this point in time. - #[inline] - pub fn checksum(&self) -> u32 { - (u32::from(self.b) << 16) | u32::from(self.a) - } - - /// Adds `bytes` to the checksum calculation. - /// - /// If efficiency matters, this should be called with Byte slices that contain at least a few - /// thousand Bytes. - pub fn write_slice(&mut self, bytes: &[u8]) { - self.compute(bytes); - } -} - -impl Default for Adler32 { - #[inline] - fn default() -> Self { - Adler32 { a: 1, b: 0 } - } -} - -impl Hasher for Adler32 { - #[inline] - fn finish(&self) -> u64 { - u64::from(self.checksum()) - } - - fn write(&mut self, bytes: &[u8]) { - self.write_slice(bytes); - } -} - -/// Calculates the Adler-32 checksum of a byte slice. -/// -/// This is a convenience function around the [`Adler32`] type. -/// -/// [`Adler32`]: struct.Adler32.html -pub fn adler32_slice(data: &[u8]) -> u32 { - let mut h = Adler32::new(); - h.write_slice(data); - h.checksum() -} - -/// Calculates the Adler-32 checksum of a `BufRead`'s contents. -/// -/// The passed `BufRead` implementor will be read until it reaches EOF (or until it reports an -/// error). -/// -/// If you only have a `Read` implementor, you can wrap it in `std::io::BufReader` before calling -/// this function. -/// -/// # Errors -/// -/// Any error returned by the reader are bubbled up by this function. -/// -/// # Examples -/// -/// ```no_run -/// # fn run() -> Result<(), Box> { -/// use adler::adler32; -/// -/// use std::fs::File; -/// use std::io::BufReader; -/// -/// let file = File::open("input.txt")?; -/// let mut file = BufReader::new(file); -/// -/// adler32(&mut file)?; -/// # Ok(()) } -/// # fn main() { run().unwrap() } -/// ``` -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub fn adler32(mut reader: R) -> io::Result { - let mut h = Adler32::new(); - loop { - let len = { - let buf = reader.fill_buf()?; - if buf.is_empty() { - return Ok(h.checksum()); - } - - h.write_slice(buf); - buf.len() - }; - reader.consume(len); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn zeroes() { - assert_eq!(adler32_slice(&[]), 1); - assert_eq!(adler32_slice(&[0]), 1 | 1 << 16); - assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16); - assert_eq!(adler32_slice(&[0; 100]), 0x00640001); - assert_eq!(adler32_slice(&[0; 1024]), 0x04000001); - assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001); - } - - #[test] - fn ones() { - assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e); - assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11); - } - - #[test] - fn mixed() { - assert_eq!(adler32_slice(&[1]), 2 | 2 << 16); - assert_eq!(adler32_slice(&[40]), 41 | 41 << 16); - - assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1); - } - - /// Example calculation from https://en.wikipedia.org/wiki/Adler-32. - #[test] - fn wiki() { - assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398); - } - - #[test] - fn resume() { - let mut adler = Adler32::new(); - adler.write_slice(&[0xff; 1024]); - let partial = adler.checksum(); - assert_eq!(partial, 0x79a6fc2e); // from above - adler.write_slice(&[0xff; 1024 * 1024 - 1024]); - assert_eq!(adler.checksum(), 0x8e88ef11); // from above - - // Make sure that we can resume computing from the partial checksum via `from_checksum`. - let mut adler = Adler32::from_checksum(partial); - adler.write_slice(&[0xff; 1024 * 1024 - 1024]); - assert_eq!(adler.checksum(), 0x8e88ef11); // from above - } - - #[cfg(feature = "std")] - #[test] - fn bufread() { - use std::io::BufReader; - fn test(data: &[u8], checksum: u32) { - // `BufReader` uses an 8 KB buffer, so this will test buffer refilling. - let mut buf = BufReader::new(data); - let real_sum = adler32(&mut buf).unwrap(); - assert_eq!(checksum, real_sum); - } - - test(&[], 1); - test(&[0; 1024], 0x04000001); - test(&[0; 1024 * 1024], 0x00f00001); - test(&[0xA5; 1024 * 1024], 0xd5009ab1); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/assert-json-diff/bin/release s390-tools-2.33.1/rust-vendor/assert-json-diff/bin/release --- s390-tools-2.31.0/rust-vendor/assert-json-diff/bin/release 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/assert-json-diff/bin/release 1970-01-01 01:00:00.000000000 +0100 @@ -1,54 +0,0 @@ -#!/bin/bash -set -e - -confirm() { - while true; do - read -p "$1? Please double check. y/n? " yn - case $yn in - [Yy]* ) break;; - [Nn]* ) exit 1;; - * ) echo "Please answer yes or no.";; - esac - done -} - -cargo fmt --all -- --check -echo "✔ code formatting looks good!" - -cargo check -echo "✔ types look good" - -cargo readme > README.md -echo "✔ README.md compiled" - -cargo test > /dev/null -echo "✔ tests are passing" - -confirm "Updated Cargo.toml" -confirm "Updated CHANGELOG.md" - -version="$1" -version_without_v="`sed \"s/v//g\" <(echo $version)`" - -if (echo $version | egrep "v\d+\.\d+\.\d+" > /dev/null) -then - confirm "Ready to release $version (as $version_without_v)?" -else - echo "Invalid version number: $1" - exit 1 -fi - -version_in_toml=$(cat Cargo.toml | egrep "^version = \"$version_without_v\"") - -if [[ "$version_in_toml" == "version = \"$version_without_v\"" ]] -then - true -else - echo "Cargo.toml isn't set to version $version_without_v" -fi - -GIT_COMMITTER_DATE=$(git log -n1 --pretty=%aD) git tag -a -m "Release $version" $version -git push --tags - -cargo publish --dry-run -cargo publish || true diff -Nru s390-tools-2.31.0/rust-vendor/assert-json-diff/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/assert-json-diff/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/assert-json-diff/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/assert-json-diff/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/assert-json-diff/Cargo.toml s390-tools-2.33.1/rust-vendor/assert-json-diff/Cargo.toml --- s390-tools-2.31.0/rust-vendor/assert-json-diff/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/assert-json-diff/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,44 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "assert-json-diff" -version = "2.0.2" -authors = ["David Pedersen "] -description = "Easily compare two JSON values and get great output" -homepage = "https://github.com/davidpdrsn/assert-json-diff" -documentation = "https://docs.rs/assert-json-diff" -readme = "README.md" -keywords = [ - "serde_json", - "json", - "testing", -] -categories = ["development-tools"] -license = "MIT" -repository = "https://github.com/davidpdrsn/assert-json-diff.git" - -[dependencies.serde] -version = "1" - -[dependencies.serde_json] -version = "1" - -[dev-dependencies.serde] -version = "1" -features = ["derive"] - -[dev-dependencies.version-sync] -version = "0.8" - -[badges.maintenance] -status = "passively-maintained" diff -Nru s390-tools-2.31.0/rust-vendor/assert-json-diff/CHANGELOG.md s390-tools-2.33.1/rust-vendor/assert-json-diff/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/assert-json-diff/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/assert-json-diff/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,77 +0,0 @@ -# Change Log - -All user visible changes to this project will be documented in this file. -This project adheres to [Semantic Versioning](http://semver.org/), as described -for Rust libraries in [RFC #1105](https://github.com/rust-lang/rfcs/blob/master/text/1105-api-evolution.md) - -## Unreleased - -- None. - -### Breaking changes - -None. - -## 2.0.2 - 2022-06-29 - -- Don't move the `Value`s being compared in `assert_json_matches` - -## 2.0.1 - 2021-02-14 - -- Add maintenance status to readme and `Cargo.toml`. - -## 2.0.0 - 2021-01-23 - -## Unreleased - -- A less strict numeric mode for comparisons is now supported. The `AssumeFloat` mode will make `1 == 1.0`. This mode can be set via `Config::numeric_mode`. -- A panicking `assert_json_matches` macro has been added which takes a `Config`. -- Remove dependency on "extend". - -### Breaking changes - -- Some breaking changes have been made to support customizing how the JSON values are compared: - - `assert_json_eq_no_panic` and `assert_json_include_no_panic` have been replaced by `assert_json_matches_no_panic` which takes a `Config` that describes how the comparison should work. - - This setup will support adding further customizations without more breaking changes. - -## 1.1.0 - 2020-07-12 - -- All methods now accept any `T: Serialize` rather than just `serde_json::Value`. - -## 1.0.3 - 2020-02-21 - -- Introduce non-panicking functions with `assert_json_include_no_panic` and `assert_json_eq_no_panic`. - -## 1.0.2 - 2020-02-19 - -- Internal diffing algorithm simplified. There should be no external changes. Some error messages might have changed, but everything that passed/failed before should still do the same. - -## 1.0.1 - 2019-10-24 - -- Update to 2018 edition - -## 1.0.0 - 2019-02-15 - -### Fixed - -- Make macros work with trailing comma - -## 0.2.1 - 2018-11-15 - -### Fixed - -- Fix wrong error message when a JSON atom was missing from actual. - -## 0.2.0 - 2018-11-16 - -### Added - -- Add `assert_json_include`. It does partial matching the same way the old `assert_json_eq` did. - -### Changed - -- Change `assert_json_eq` do exact matching. If the two values are not exactly the same, it'll panic. - -## 0.1.0 - 2018-10-17 - -Initial release. diff -Nru s390-tools-2.31.0/rust-vendor/assert-json-diff/LICENSE s390-tools-2.33.1/rust-vendor/assert-json-diff/LICENSE --- s390-tools-2.31.0/rust-vendor/assert-json-diff/LICENSE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/assert-json-diff/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,7 +0,0 @@ -Copyright (c) 2019 David Pedersen - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/assert-json-diff/README.md s390-tools-2.33.1/rust-vendor/assert-json-diff/README.md --- s390-tools-2.31.0/rust-vendor/assert-json-diff/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/assert-json-diff/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,149 +0,0 @@ -[![Crates.io](https://img.shields.io/crates/v/assert-json-diff.svg)](https://crates.io/crates/assert-json-diff) -[![Docs](https://docs.rs/assert-json-diff/badge.svg)](https://docs.rs/assert-json-diff) -[![dependency status](https://deps.rs/repo/github/davidpdrsn/assert-json-diff/status.svg)](https://deps.rs/repo/github/davidpdrsn/assert-json-diff) -[![Build status](https://github.com/davidpdrsn/assert-json-diff/workflows/CI/badge.svg)](https://github.com/davidpdrsn/assert-json-diff/actions) -![maintenance-status](https://img.shields.io/badge/maintenance-passively--maintained-yellowgreen.svg) - -# assert-json-diff - -This crate includes macros for comparing two serializable values by diffing their JSON -representations. It is designed to give much more helpful error messages than the standard -[`assert_eq!`]. It basically does a diff of the two objects and tells you the exact -differences. This is useful when asserting that two large JSON objects are the same. - -It uses the [serde] and [serde_json] to perform the serialization. - -[serde]: https://crates.io/crates/serde -[serde_json]: https://crates.io/crates/serde_json -[`assert_eq!`]: https://doc.rust-lang.org/std/macro.assert_eq.html - -### Partial matching - -If you want to assert that one JSON value is "included" in another use -[`assert_json_include`](macro.assert_json_include.html): - -```rust -use assert_json_diff::assert_json_include; -use serde_json::json; - -let a = json!({ - "data": { - "users": [ - { - "id": 1, - "country": { - "name": "Denmark" - } - }, - { - "id": 24, - "country": { - "name": "Denmark" - } - } - ] - } -}); - -let b = json!({ - "data": { - "users": [ - { - "id": 1, - "country": { - "name": "Sweden" - } - }, - { - "id": 2, - "country": { - "name": "Denmark" - } - } - ] - } -}); - -assert_json_include!(actual: a, expected: b) -``` - -This will panic with the error message: - -``` -json atoms at path ".data.users[0].country.name" are not equal: - expected: - "Sweden" - actual: - "Denmark" - -json atoms at path ".data.users[1].id" are not equal: - expected: - 2 - actual: - 24 -``` - -[`assert_json_include`](macro.assert_json_include.html) allows extra data in `actual` but not in `expected`. That is so you can verify just a part -of the JSON without having to specify the whole thing. For example this test passes: - -```rust -use assert_json_diff::assert_json_include; -use serde_json::json; - -assert_json_include!( - actual: json!({ - "a": { "b": 1 }, - }), - expected: json!({ - "a": {}, - }) -) -``` - -However `expected` cannot contain additional data so this test fails: - -```rust -use assert_json_diff::assert_json_include; -use serde_json::json; - -assert_json_include!( - actual: json!({ - "a": {}, - }), - expected: json!({ - "a": { "b": 1 }, - }) -) -``` - -That will print - -``` -json atom at path ".a.b" is missing from actual -``` - -### Exact matching - -If you want to ensure two JSON values are *exactly* the same, use [`assert_json_eq`](macro.assert_json_eq.html). - -```rust -use assert_json_diff::assert_json_eq; -use serde_json::json; - -assert_json_eq!( - json!({ "a": { "b": 1 } }), - json!({ "a": {} }) -) -``` - -This will panic with the error message: - -``` -json atom at path ".a.b" is missing from lhs -``` - -### Further customization - -You can use [`assert_json_matches`] to further customize the comparison. - -License: MIT diff -Nru s390-tools-2.31.0/rust-vendor/assert-json-diff/src/core_ext.rs s390-tools-2.33.1/rust-vendor/assert-json-diff/src/core_ext.rs --- s390-tools-2.31.0/rust-vendor/assert-json-diff/src/core_ext.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/assert-json-diff/src/core_ext.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -pub trait Indent { - fn indent(&self, level: u32) -> String; -} - -impl Indent for T -where - T: ToString, -{ - fn indent(&self, level: u32) -> String { - let mut indent = String::new(); - for _ in 0..level { - indent.push(' '); - } - - self.to_string() - .lines() - .map(|line| format!("{}{}", indent, line)) - .collect::>() - .join("\n") - } -} - -pub trait Indexes { - fn indexes(&self) -> Vec; -} - -impl Indexes for Vec { - fn indexes(&self) -> Vec { - if self.is_empty() { - vec![] - } else { - (0..=self.len() - 1).collect() - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_indent() { - assert_eq!(" foo", "foo".indent(2)); - assert_eq!(" foo\n bar", "foo\nbar".indent(2)); - } - - #[test] - fn test_indexes() { - let empty: Vec = vec![]; - let empty_indexes: Vec = vec![]; - assert_eq!(empty.indexes(), empty_indexes); - - assert_eq!(vec!['a', 'b'].indexes(), vec![0, 1]); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/assert-json-diff/src/diff.rs s390-tools-2.33.1/rust-vendor/assert-json-diff/src/diff.rs --- s390-tools-2.31.0/rust-vendor/assert-json-diff/src/diff.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/assert-json-diff/src/diff.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,532 +0,0 @@ -use crate::core_ext::{Indent, Indexes}; -use crate::{CompareMode, Config, NumericMode}; -use serde_json::Value; -use std::{collections::HashSet, fmt}; - -pub(crate) fn diff<'a>(lhs: &'a Value, rhs: &'a Value, config: Config) -> Vec> { - let mut acc = vec![]; - diff_with(lhs, rhs, config, Path::Root, &mut acc); - acc -} - -fn diff_with<'a>( - lhs: &'a Value, - rhs: &'a Value, - config: Config, - path: Path<'a>, - acc: &mut Vec>, -) { - let mut folder = DiffFolder { - rhs, - path, - acc, - config, - }; - - fold_json(lhs, &mut folder); -} - -#[derive(Debug)] -struct DiffFolder<'a, 'b> { - rhs: &'a Value, - path: Path<'a>, - acc: &'b mut Vec>, - config: Config, -} - -macro_rules! direct_compare { - ($name:ident) => { - fn $name(&mut self, lhs: &'a Value) { - if self.rhs != lhs { - self.acc.push(Difference { - lhs: Some(lhs), - rhs: Some(&self.rhs), - path: self.path.clone(), - config: self.config.clone(), - }); - } - } - }; -} - -impl<'a, 'b> DiffFolder<'a, 'b> { - direct_compare!(on_null); - direct_compare!(on_bool); - direct_compare!(on_string); - - fn on_number(&mut self, lhs: &'a Value) { - let is_equal = match self.config.numeric_mode { - NumericMode::Strict => self.rhs == lhs, - NumericMode::AssumeFloat => self.rhs.as_f64() == lhs.as_f64(), - }; - if !is_equal { - self.acc.push(Difference { - lhs: Some(lhs), - rhs: Some(&self.rhs), - path: self.path.clone(), - config: self.config.clone(), - }); - } - } - - fn on_array(&mut self, lhs: &'a Value) { - if let Some(rhs) = self.rhs.as_array() { - let lhs = lhs.as_array().unwrap(); - - match self.config.compare_mode { - CompareMode::Inclusive => { - for (idx, rhs) in rhs.iter().enumerate() { - let path = self.path.append(Key::Idx(idx)); - - if let Some(lhs) = lhs.get(idx) { - diff_with(lhs, rhs, self.config.clone(), path, self.acc) - } else { - self.acc.push(Difference { - lhs: None, - rhs: Some(&self.rhs), - path, - config: self.config.clone(), - }); - } - } - } - CompareMode::Strict => { - let all_keys = rhs - .indexes() - .into_iter() - .chain(lhs.indexes()) - .collect::>(); - for key in all_keys { - let path = self.path.append(Key::Idx(key)); - - match (lhs.get(key), rhs.get(key)) { - (Some(lhs), Some(rhs)) => { - diff_with(lhs, rhs, self.config.clone(), path, self.acc); - } - (None, Some(rhs)) => { - self.acc.push(Difference { - lhs: None, - rhs: Some(rhs), - path, - config: self.config.clone(), - }); - } - (Some(lhs), None) => { - self.acc.push(Difference { - lhs: Some(lhs), - rhs: None, - path, - config: self.config.clone(), - }); - } - (None, None) => { - unreachable!("at least one of the maps should have the key") - } - } - } - } - } - } else { - self.acc.push(Difference { - lhs: Some(lhs), - rhs: Some(&self.rhs), - path: self.path.clone(), - config: self.config.clone(), - }); - } - } - - fn on_object(&mut self, lhs: &'a Value) { - if let Some(rhs) = self.rhs.as_object() { - let lhs = lhs.as_object().unwrap(); - - match self.config.compare_mode { - CompareMode::Inclusive => { - for (key, rhs) in rhs.iter() { - let path = self.path.append(Key::Field(key)); - - if let Some(lhs) = lhs.get(key) { - diff_with(lhs, rhs, self.config.clone(), path, self.acc) - } else { - self.acc.push(Difference { - lhs: None, - rhs: Some(&self.rhs), - path, - config: self.config.clone(), - }); - } - } - } - CompareMode::Strict => { - let all_keys = rhs.keys().chain(lhs.keys()).collect::>(); - for key in all_keys { - let path = self.path.append(Key::Field(key)); - - match (lhs.get(key), rhs.get(key)) { - (Some(lhs), Some(rhs)) => { - diff_with(lhs, rhs, self.config.clone(), path, self.acc); - } - (None, Some(rhs)) => { - self.acc.push(Difference { - lhs: None, - rhs: Some(rhs), - path, - config: self.config.clone(), - }); - } - (Some(lhs), None) => { - self.acc.push(Difference { - lhs: Some(lhs), - rhs: None, - path, - config: self.config.clone(), - }); - } - (None, None) => { - unreachable!("at least one of the maps should have the key") - } - } - } - } - } - } else { - self.acc.push(Difference { - lhs: Some(lhs), - rhs: Some(&self.rhs), - path: self.path.clone(), - config: self.config.clone(), - }); - } - } -} - -#[derive(Debug, PartialEq)] -pub(crate) struct Difference<'a> { - path: Path<'a>, - lhs: Option<&'a Value>, - rhs: Option<&'a Value>, - config: Config, -} - -impl<'a> fmt::Display for Difference<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let json_to_string = |json: &Value| serde_json::to_string_pretty(json).unwrap(); - - match (&self.config.compare_mode, &self.lhs, &self.rhs) { - (CompareMode::Inclusive, Some(actual), Some(expected)) => { - writeln!(f, "json atoms at path \"{}\" are not equal:", self.path)?; - writeln!(f, " expected:")?; - writeln!(f, "{}", json_to_string(expected).indent(8))?; - writeln!(f, " actual:")?; - write!(f, "{}", json_to_string(actual).indent(8))?; - } - (CompareMode::Inclusive, None, Some(_expected)) => { - write!( - f, - "json atom at path \"{}\" is missing from actual", - self.path - )?; - } - (CompareMode::Inclusive, Some(_actual), None) => { - unreachable!("stuff missing actual wont produce an error") - } - (CompareMode::Inclusive, None, None) => unreachable!("can't both be missing"), - - (CompareMode::Strict, Some(lhs), Some(rhs)) => { - writeln!(f, "json atoms at path \"{}\" are not equal:", self.path)?; - writeln!(f, " lhs:")?; - writeln!(f, "{}", json_to_string(lhs).indent(8))?; - writeln!(f, " rhs:")?; - write!(f, "{}", json_to_string(rhs).indent(8))?; - } - (CompareMode::Strict, None, Some(_)) => { - write!(f, "json atom at path \"{}\" is missing from lhs", self.path)?; - } - (CompareMode::Strict, Some(_), None) => { - write!(f, "json atom at path \"{}\" is missing from rhs", self.path)?; - } - (CompareMode::Strict, None, None) => unreachable!("can't both be missing"), - } - - Ok(()) - } -} - -#[derive(Debug, Clone, PartialEq)] -enum Path<'a> { - Root, - Keys(Vec>), -} - -impl<'a> Path<'a> { - fn append(&self, next: Key<'a>) -> Path<'a> { - match self { - Path::Root => Path::Keys(vec![next]), - Path::Keys(list) => { - let mut copy = list.clone(); - copy.push(next); - Path::Keys(copy) - } - } - } -} - -impl<'a> fmt::Display for Path<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Path::Root => write!(f, "(root)"), - Path::Keys(keys) => { - for key in keys { - write!(f, "{}", key)?; - } - Ok(()) - } - } - } -} - -#[derive(Debug, Copy, Clone, PartialEq)] -enum Key<'a> { - Idx(usize), - Field(&'a str), -} - -impl<'a> fmt::Display for Key<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Key::Idx(idx) => write!(f, "[{}]", idx), - Key::Field(key) => write!(f, ".{}", key), - } - } -} - -fn fold_json<'a>(json: &'a Value, folder: &mut DiffFolder<'a, '_>) { - match json { - Value::Null => folder.on_null(json), - Value::Bool(_) => folder.on_bool(json), - Value::Number(_) => folder.on_number(json), - Value::String(_) => folder.on_string(json), - Value::Array(_) => folder.on_array(json), - Value::Object(_) => folder.on_object(json), - } -} - -#[cfg(test)] -mod test { - #[allow(unused_imports)] - use super::*; - use serde_json::json; - - #[test] - fn test_diffing_leaf_json() { - let diffs = diff( - &json!(null), - &json!(null), - Config::new(CompareMode::Inclusive), - ); - assert_eq!(diffs, vec![]); - - let diffs = diff( - &json!(false), - &json!(false), - Config::new(CompareMode::Inclusive), - ); - assert_eq!(diffs, vec![]); - - let diffs = diff( - &json!(true), - &json!(true), - Config::new(CompareMode::Inclusive), - ); - assert_eq!(diffs, vec![]); - - let diffs = diff( - &json!(false), - &json!(true), - Config::new(CompareMode::Inclusive), - ); - assert_eq!(diffs.len(), 1); - - let diffs = diff( - &json!(true), - &json!(false), - Config::new(CompareMode::Inclusive), - ); - assert_eq!(diffs.len(), 1); - - let actual = json!(1); - let expected = json!(1); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs, vec![]); - - let actual = json!(2); - let expected = json!(1); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 1); - - let actual = json!(1); - let expected = json!(2); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 1); - - let actual = json!(1.0); - let expected = json!(1.0); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs, vec![]); - - let actual = json!(1); - let expected = json!(1.0); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 1); - - let actual = json!(1.0); - let expected = json!(1); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 1); - - let actual = json!(1); - let expected = json!(1.0); - let diffs = diff( - &actual, - &expected, - Config::new(CompareMode::Inclusive).numeric_mode(NumericMode::AssumeFloat), - ); - assert_eq!(diffs, vec![]); - - let actual = json!(1.0); - let expected = json!(1); - let diffs = diff( - &actual, - &expected, - Config::new(CompareMode::Inclusive).numeric_mode(NumericMode::AssumeFloat), - ); - assert_eq!(diffs, vec![]); - } - - #[test] - fn test_diffing_array() { - // empty - let actual = json!([]); - let expected = json!([]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs, vec![]); - - let actual = json!([1]); - let expected = json!([]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 0); - - let actual = json!([]); - let expected = json!([1]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 1); - - // eq - let actual = json!([1]); - let expected = json!([1]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs, vec![]); - - // actual longer - let actual = json!([1, 2]); - let expected = json!([1]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs, vec![]); - - // expected longer - let actual = json!([1]); - let expected = json!([1, 2]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 1); - - // eq length but different - let actual = json!([1, 3]); - let expected = json!([1, 2]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 1); - - // different types - let actual = json!(1); - let expected = json!([1]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 1); - - let actual = json!([1]); - let expected = json!(1); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 1); - } - - #[test] - fn test_array_strict() { - let actual = json!([]); - let expected = json!([]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Strict)); - assert_eq!(diffs.len(), 0); - - let actual = json!([1, 2]); - let expected = json!([1, 2]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Strict)); - assert_eq!(diffs.len(), 0); - - let actual = json!([1]); - let expected = json!([1, 2]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Strict)); - assert_eq!(diffs.len(), 1); - - let actual = json!([1, 2]); - let expected = json!([1]); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Strict)); - assert_eq!(diffs.len(), 1); - } - - #[test] - fn test_object() { - let actual = json!({}); - let expected = json!({}); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs, vec![]); - - let actual = json!({ "a": 1 }); - let expected = json!({ "a": 1 }); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs, vec![]); - - let actual = json!({ "a": 1, "b": 123 }); - let expected = json!({ "a": 1 }); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs, vec![]); - - let actual = json!({ "a": 1 }); - let expected = json!({ "b": 1 }); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 1); - - let actual = json!({ "a": 1 }); - let expected = json!({ "a": 2 }); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs.len(), 1); - - let actual = json!({ "a": { "b": true } }); - let expected = json!({ "a": {} }); - let diffs = diff(&actual, &expected, Config::new(CompareMode::Inclusive)); - assert_eq!(diffs, vec![]); - } - - #[test] - fn test_object_strict() { - let lhs = json!({}); - let rhs = json!({ "a": 1 }); - let diffs = diff(&lhs, &rhs, Config::new(CompareMode::Strict)); - assert_eq!(diffs.len(), 1); - - let lhs = json!({ "a": 1 }); - let rhs = json!({}); - let diffs = diff(&lhs, &rhs, Config::new(CompareMode::Strict)); - assert_eq!(diffs.len(), 1); - - let json = json!({ "a": 1 }); - let diffs = diff(&json, &json, Config::new(CompareMode::Strict)); - assert_eq!(diffs, vec![]); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/assert-json-diff/src/lib.rs s390-tools-2.33.1/rust-vendor/assert-json-diff/src/lib.rs --- s390-tools-2.31.0/rust-vendor/assert-json-diff/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/assert-json-diff/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,660 +0,0 @@ -//! This crate includes macros for comparing two serializable values by diffing their JSON -//! representations. It is designed to give much more helpful error messages than the standard -//! [`assert_eq!`]. It basically does a diff of the two objects and tells you the exact -//! differences. This is useful when asserting that two large JSON objects are the same. -//! -//! It uses the [serde] and [serde_json] to perform the serialization. -//! -//! [serde]: https://crates.io/crates/serde -//! [serde_json]: https://crates.io/crates/serde_json -//! [`assert_eq!`]: https://doc.rust-lang.org/std/macro.assert_eq.html -//! -//! ## Partial matching -//! -//! If you want to assert that one JSON value is "included" in another use -//! [`assert_json_include`](macro.assert_json_include.html): -//! -//! ```should_panic -//! use assert_json_diff::assert_json_include; -//! use serde_json::json; -//! -//! let a = json!({ -//! "data": { -//! "users": [ -//! { -//! "id": 1, -//! "country": { -//! "name": "Denmark" -//! } -//! }, -//! { -//! "id": 24, -//! "country": { -//! "name": "Denmark" -//! } -//! } -//! ] -//! } -//! }); -//! -//! let b = json!({ -//! "data": { -//! "users": [ -//! { -//! "id": 1, -//! "country": { -//! "name": "Sweden" -//! } -//! }, -//! { -//! "id": 2, -//! "country": { -//! "name": "Denmark" -//! } -//! } -//! ] -//! } -//! }); -//! -//! assert_json_include!(actual: a, expected: b) -//! ``` -//! -//! This will panic with the error message: -//! -//! ```text -//! json atoms at path ".data.users[0].country.name" are not equal: -//! expected: -//! "Sweden" -//! actual: -//! "Denmark" -//! -//! json atoms at path ".data.users[1].id" are not equal: -//! expected: -//! 2 -//! actual: -//! 24 -//! ``` -//! -//! [`assert_json_include`](macro.assert_json_include.html) allows extra data in `actual` but not in `expected`. That is so you can verify just a part -//! of the JSON without having to specify the whole thing. For example this test passes: -//! -//! ``` -//! use assert_json_diff::assert_json_include; -//! use serde_json::json; -//! -//! assert_json_include!( -//! actual: json!({ -//! "a": { "b": 1 }, -//! }), -//! expected: json!({ -//! "a": {}, -//! }) -//! ) -//! ``` -//! -//! However `expected` cannot contain additional data so this test fails: -//! -//! ```should_panic -//! use assert_json_diff::assert_json_include; -//! use serde_json::json; -//! -//! assert_json_include!( -//! actual: json!({ -//! "a": {}, -//! }), -//! expected: json!({ -//! "a": { "b": 1 }, -//! }) -//! ) -//! ``` -//! -//! That will print -//! -//! ```text -//! json atom at path ".a.b" is missing from actual -//! ``` -//! -//! ## Exact matching -//! -//! If you want to ensure two JSON values are *exactly* the same, use [`assert_json_eq`](macro.assert_json_eq.html). -//! -//! ```rust,should_panic -//! use assert_json_diff::assert_json_eq; -//! use serde_json::json; -//! -//! assert_json_eq!( -//! json!({ "a": { "b": 1 } }), -//! json!({ "a": {} }) -//! ) -//! ``` -//! -//! This will panic with the error message: -//! -//! ```text -//! json atom at path ".a.b" is missing from lhs -//! ``` -//! -//! ## Further customization -//! -//! You can use [`assert_json_matches`] to further customize the comparison. - -#![deny( - missing_docs, - unused_imports, - missing_debug_implementations, - missing_copy_implementations, - trivial_casts, - trivial_numeric_casts, - unsafe_code, - unstable_features, - unused_import_braces, - unused_qualifications, - unknown_lints -)] - -use diff::diff; -use serde::Serialize; - -mod core_ext; -mod diff; - -/// Compare two JSON values for an inclusive match. -/// -/// It allows `actual` to contain additional data. If you want an exact match use -/// [`assert_json_eq`](macro.assert_json_eq.html) instead. -/// -/// See [crate documentation](index.html) for examples. -#[macro_export] -macro_rules! assert_json_include { - (actual: $actual:expr, expected: $expected:expr $(,)?) => {{ - $crate::assert_json_matches!( - $actual, - $expected, - $crate::Config::new($crate::CompareMode::Inclusive) - ) - }}; - (expected: $expected:expr, actual: $actual:expr $(,)?) => {{ - $crate::assert_json_include!(actual: $actual, expected: $expected) - }}; -} - -/// Compare two JSON values for an exact match. -/// -/// If you want an inclusive match use [`assert_json_include`](macro.assert_json_include.html) instead. -/// -/// See [crate documentation](index.html) for examples. -#[macro_export] -macro_rules! assert_json_eq { - ($lhs:expr, $rhs:expr $(,)?) => {{ - $crate::assert_json_matches!($lhs, $rhs, $crate::Config::new($crate::CompareMode::Strict)) - }}; -} - -/// Compare two JSON values according to a configuration. -/// -/// ``` -/// use assert_json_diff::{ -/// CompareMode, -/// Config, -/// NumericMode, -/// assert_json_matches, -/// }; -/// use serde_json::json; -/// -/// let config = Config::new(CompareMode::Strict).numeric_mode(NumericMode::AssumeFloat); -/// -/// assert_json_matches!( -/// json!({ -/// "a": { "b": [1, 2, 3.0] }, -/// }), -/// json!({ -/// "a": { "b": [1, 2.0, 3] }, -/// }), -/// config, -/// ) -/// ``` -/// -/// When using `CompareMode::Inclusive` the first argument is `actual` and the second argument is -/// `expected`. Example: -/// -/// ``` -/// # use assert_json_diff::{ -/// # CompareMode, -/// # Config, -/// # NumericMode, -/// # assert_json_matches, -/// # assert_json_include, -/// # }; -/// # use serde_json::json; -/// # -/// // This -/// assert_json_matches!( -/// json!({ -/// "a": { "b": 1 }, -/// }), -/// json!({ -/// "a": {}, -/// }), -/// Config::new(CompareMode::Inclusive), -/// ); -/// -/// // Is the same as this -/// assert_json_include!( -/// actual: json!({ -/// "a": { "b": 1 }, -/// }), -/// expected: json!({ -/// "a": {}, -/// }), -/// ); -/// ``` -#[macro_export] -macro_rules! assert_json_matches { - ($lhs:expr, $rhs:expr, $config:expr $(,)?) => {{ - if let Err(error) = $crate::assert_json_matches_no_panic(&$lhs, &$rhs, $config) { - panic!("\n\n{}\n\n", error); - } - }}; -} - -/// Compares two JSON values without panicking. -/// -/// Instead it returns a `Result` where the error is the message that would be passed to `panic!`. -/// This is might be useful if you want to control how failures are reported and don't want to deal -/// with panics. -pub fn assert_json_matches_no_panic( - lhs: &Lhs, - rhs: &Rhs, - config: Config, -) -> Result<(), String> -where - Lhs: Serialize, - Rhs: Serialize, -{ - let lhs = serde_json::to_value(lhs).unwrap_or_else(|err| { - panic!( - "Couldn't convert left hand side value to JSON. Serde error: {}", - err - ) - }); - let rhs = serde_json::to_value(rhs).unwrap_or_else(|err| { - panic!( - "Couldn't convert right hand side value to JSON. Serde error: {}", - err - ) - }); - - let diffs = diff(&lhs, &rhs, config); - - if diffs.is_empty() { - Ok(()) - } else { - let msg = diffs - .into_iter() - .map(|d| d.to_string()) - .collect::>() - .join("\n\n"); - Err(msg) - } -} - -/// Configuration for how JSON values should be compared. -#[derive(Debug, Clone, PartialEq, Eq)] -#[allow(missing_copy_implementations)] -pub struct Config { - pub(crate) compare_mode: CompareMode, - pub(crate) numeric_mode: NumericMode, -} - -impl Config { - /// Create a new [`Config`] using the given [`CompareMode`]. - /// - /// The default `numeric_mode` is be [`NumericMode::Strict`]. - pub fn new(compare_mode: CompareMode) -> Self { - Self { - compare_mode, - numeric_mode: NumericMode::Strict, - } - } - - /// Change the config's numeric mode. - /// - /// The default `numeric_mode` is be [`NumericMode::Strict`]. - pub fn numeric_mode(mut self, numeric_mode: NumericMode) -> Self { - self.numeric_mode = numeric_mode; - self - } - - /// Change the config's compare mode. - pub fn compare_mode(mut self, compare_mode: CompareMode) -> Self { - self.compare_mode = compare_mode; - self - } -} - -/// Mode for how JSON values should be compared. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum CompareMode { - /// The two JSON values don't have to be exactly equal. The "actual" value is only required to - /// be "contained" inside "expected". See [crate documentation](index.html) for examples. - /// - /// The mode used with [`assert_json_include`]. - Inclusive, - /// The two JSON values must be exactly equal. - /// - /// The mode used with [`assert_json_eq`]. - Strict, -} - -/// How should numbers be compared. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum NumericMode { - /// Different numeric types aren't considered equal. - Strict, - /// All numeric types are converted to float before comparison. - AssumeFloat, -} - -#[cfg(test)] -mod tests { - use super::*; - use serde_json::{json, Value}; - use std::fmt::Write; - - #[test] - fn boolean_root() { - let result = test_partial_match(json!(true), json!(true)); - assert_output_eq(result, Ok(())); - - let result = test_partial_match(json!(false), json!(false)); - assert_output_eq(result, Ok(())); - - let result = test_partial_match(json!(false), json!(true)); - assert_output_eq( - result, - Err(r#"json atoms at path "(root)" are not equal: - expected: - true - actual: - false"#), - ); - - let result = test_partial_match(json!(true), json!(false)); - assert_output_eq( - result, - Err(r#"json atoms at path "(root)" are not equal: - expected: - false - actual: - true"#), - ); - } - - #[test] - fn string_root() { - let result = test_partial_match(json!("true"), json!("true")); - assert_output_eq(result, Ok(())); - - let result = test_partial_match(json!("false"), json!("false")); - assert_output_eq(result, Ok(())); - - let result = test_partial_match(json!("false"), json!("true")); - assert_output_eq( - result, - Err(r#"json atoms at path "(root)" are not equal: - expected: - "true" - actual: - "false""#), - ); - - let result = test_partial_match(json!("true"), json!("false")); - assert_output_eq( - result, - Err(r#"json atoms at path "(root)" are not equal: - expected: - "false" - actual: - "true""#), - ); - } - - #[test] - fn number_root() { - let result = test_partial_match(json!(1), json!(1)); - assert_output_eq(result, Ok(())); - - let result = test_partial_match(json!(0), json!(0)); - assert_output_eq(result, Ok(())); - - let result = test_partial_match(json!(0), json!(1)); - assert_output_eq( - result, - Err(r#"json atoms at path "(root)" are not equal: - expected: - 1 - actual: - 0"#), - ); - - let result = test_partial_match(json!(1), json!(0)); - assert_output_eq( - result, - Err(r#"json atoms at path "(root)" are not equal: - expected: - 0 - actual: - 1"#), - ); - } - - #[test] - fn null_root() { - let result = test_partial_match(json!(null), json!(null)); - assert_output_eq(result, Ok(())); - - let result = test_partial_match(json!(null), json!(1)); - assert_output_eq( - result, - Err(r#"json atoms at path "(root)" are not equal: - expected: - 1 - actual: - null"#), - ); - - let result = test_partial_match(json!(1), json!(null)); - assert_output_eq( - result, - Err(r#"json atoms at path "(root)" are not equal: - expected: - null - actual: - 1"#), - ); - } - - #[test] - fn into_object() { - let result = test_partial_match(json!({ "a": true }), json!({ "a": true })); - assert_output_eq(result, Ok(())); - - let result = test_partial_match(json!({ "a": false }), json!({ "a": true })); - assert_output_eq( - result, - Err(r#"json atoms at path ".a" are not equal: - expected: - true - actual: - false"#), - ); - - let result = - test_partial_match(json!({ "a": { "b": true } }), json!({ "a": { "b": true } })); - assert_output_eq(result, Ok(())); - - let result = test_partial_match(json!({ "a": true }), json!({ "a": { "b": true } })); - assert_output_eq( - result, - Err(r#"json atoms at path ".a" are not equal: - expected: - { - "b": true - } - actual: - true"#), - ); - - let result = test_partial_match(json!({}), json!({ "a": true })); - assert_output_eq( - result, - Err(r#"json atom at path ".a" is missing from actual"#), - ); - - let result = test_partial_match(json!({ "a": { "b": true } }), json!({ "a": true })); - assert_output_eq( - result, - Err(r#"json atoms at path ".a" are not equal: - expected: - true - actual: - { - "b": true - }"#), - ); - } - - #[test] - fn into_array() { - let result = test_partial_match(json!([1]), json!([1])); - assert_output_eq(result, Ok(())); - - let result = test_partial_match(json!([2]), json!([1])); - assert_output_eq( - result, - Err(r#"json atoms at path "[0]" are not equal: - expected: - 1 - actual: - 2"#), - ); - - let result = test_partial_match(json!([1, 2, 4]), json!([1, 2, 3])); - assert_output_eq( - result, - Err(r#"json atoms at path "[2]" are not equal: - expected: - 3 - actual: - 4"#), - ); - - let result = test_partial_match(json!({ "a": [1, 2, 3]}), json!({ "a": [1, 2, 4]})); - assert_output_eq( - result, - Err(r#"json atoms at path ".a[2]" are not equal: - expected: - 4 - actual: - 3"#), - ); - - let result = test_partial_match(json!({ "a": [1, 2, 3]}), json!({ "a": [1, 2]})); - assert_output_eq(result, Ok(())); - - let result = test_partial_match(json!({ "a": [1, 2]}), json!({ "a": [1, 2, 3]})); - assert_output_eq( - result, - Err(r#"json atom at path ".a[2]" is missing from actual"#), - ); - } - - #[test] - fn exact_matching() { - let result = test_exact_match(json!(true), json!(true)); - assert_output_eq(result, Ok(())); - - let result = test_exact_match(json!("s"), json!("s")); - assert_output_eq(result, Ok(())); - - let result = test_exact_match(json!("a"), json!("b")); - assert_output_eq( - result, - Err(r#"json atoms at path "(root)" are not equal: - lhs: - "a" - rhs: - "b""#), - ); - - let result = test_exact_match( - json!({ "a": [1, { "b": 2 }] }), - json!({ "a": [1, { "b": 3 }] }), - ); - assert_output_eq( - result, - Err(r#"json atoms at path ".a[1].b" are not equal: - lhs: - 2 - rhs: - 3"#), - ); - } - - #[test] - fn exact_match_output_message() { - let result = test_exact_match(json!({ "a": { "b": 1 } }), json!({ "a": {} })); - assert_output_eq( - result, - Err(r#"json atom at path ".a.b" is missing from rhs"#), - ); - - let result = test_exact_match(json!({ "a": {} }), json!({ "a": { "b": 1 } })); - assert_output_eq( - result, - Err(r#"json atom at path ".a.b" is missing from lhs"#), - ); - } - - fn assert_output_eq(actual: Result<(), String>, expected: Result<(), &str>) { - match (actual, expected) { - (Ok(()), Ok(())) => {} - - (Err(actual_error), Ok(())) => { - let mut f = String::new(); - writeln!(f, "Did not expect error, but got").unwrap(); - writeln!(f, "{}", actual_error).unwrap(); - panic!("{}", f); - } - - (Ok(()), Err(expected_error)) => { - let expected_error = expected_error.to_string(); - let mut f = String::new(); - writeln!(f, "Expected error, but did not get one. Expected error:").unwrap(); - writeln!(f, "{}", expected_error).unwrap(); - panic!("{}", f); - } - - (Err(actual_error), Err(expected_error)) => { - let expected_error = expected_error.to_string(); - if actual_error != expected_error { - let mut f = String::new(); - writeln!(f, "Errors didn't match").unwrap(); - writeln!(f, "Expected:").unwrap(); - writeln!(f, "{}", expected_error).unwrap(); - writeln!(f, "Got:").unwrap(); - writeln!(f, "{}", actual_error).unwrap(); - panic!("{}", f); - } - } - } - } - - fn test_partial_match(lhs: Value, rhs: Value) -> Result<(), String> { - assert_json_matches_no_panic(&lhs, &rhs, Config::new(CompareMode::Inclusive)) - } - - fn test_exact_match(lhs: Value, rhs: Value) -> Result<(), String> { - assert_json_matches_no_panic(&lhs, &rhs, Config::new(CompareMode::Strict)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/benches/benchmarks.rs s390-tools-2.33.1/rust-vendor/backtrace/benches/benchmarks.rs --- s390-tools-2.31.0/rust-vendor/backtrace/benches/benchmarks.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/benches/benchmarks.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,92 +0,0 @@ -#![feature(test)] - -extern crate test; - -#[cfg(feature = "std")] -use backtrace::Backtrace; - -#[bench] -#[cfg(feature = "std")] -fn trace(b: &mut test::Bencher) { - #[inline(never)] - fn the_function() { - backtrace::trace(|frame| { - let ip = frame.ip(); - test::black_box(ip); - true - }); - } - b.iter(the_function); -} - -#[bench] -#[cfg(feature = "std")] -fn trace_and_resolve_callback(b: &mut test::Bencher) { - #[inline(never)] - fn the_function() { - backtrace::trace(|frame| { - backtrace::resolve(frame.ip(), |symbol| { - let addr = symbol.addr(); - test::black_box(addr); - }); - true - }); - } - b.iter(the_function); -} - -#[bench] -#[cfg(feature = "std")] -fn trace_and_resolve_separate(b: &mut test::Bencher) { - #[inline(never)] - fn the_function(frames: &mut Vec<*mut std::ffi::c_void>) { - backtrace::trace(|frame| { - frames.push(frame.ip()); - true - }); - frames.iter().for_each(|frame_ip| { - backtrace::resolve(*frame_ip, |symbol| { - test::black_box(symbol); - }); - }); - } - let mut frames = Vec::with_capacity(1024); - b.iter(|| { - the_function(&mut frames); - frames.clear(); - }); -} - -#[bench] -#[cfg(feature = "std")] -fn new_unresolved(b: &mut test::Bencher) { - #[inline(never)] - fn the_function() { - let bt = Backtrace::new_unresolved(); - test::black_box(bt); - } - b.iter(the_function); -} - -#[bench] -#[cfg(feature = "std")] -fn new(b: &mut test::Bencher) { - #[inline(never)] - fn the_function() { - let bt = Backtrace::new(); - test::black_box(bt); - } - b.iter(the_function); -} - -#[bench] -#[cfg(feature = "std")] -fn new_unresolved_and_resolve_separate(b: &mut test::Bencher) { - #[inline(never)] - fn the_function() { - let mut bt = Backtrace::new_unresolved(); - bt.resolve(); - test::black_box(bt); - } - b.iter(the_function); -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/build.rs s390-tools-2.33.1/rust-vendor/backtrace/build.rs --- s390-tools-2.31.0/rust-vendor/backtrace/build.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,49 +0,0 @@ -extern crate cc; - -use std::env; -use std::path::Path; - -// Must be public so the build script of `std` can call it. -pub fn main() { - match env::var("CARGO_CFG_TARGET_OS").unwrap_or_default().as_str() { - "android" => build_android(), - _ => {} - } -} - -fn build_android() { - // Resolve `src/android-api.c` relative to this file. - // Required to support calling this from the `std` build script. - let android_api_c = Path::new(file!()) - .parent() - .unwrap() - .join("src/android-api.c"); - let expansion = match cc::Build::new().file(android_api_c).try_expand() { - Ok(result) => result, - Err(e) => { - println!("failed to run C compiler: {}", e); - return; - } - }; - let expansion = match std::str::from_utf8(&expansion) { - Ok(s) => s, - Err(_) => return, - }; - println!("expanded android version detection:\n{}", expansion); - let marker = "APIVERSION"; - let i = match expansion.find(marker) { - Some(i) => i, - None => return, - }; - let version = match expansion[i + marker.len() + 1..].split_whitespace().next() { - Some(s) => s, - None => return, - }; - let version = match version.parse::() { - Ok(n) => n, - Err(_) => return, - }; - if version >= 21 { - println!("cargo:rustc-cfg=feature=\"dl_iterate_phdr\""); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/backtrace/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/backtrace/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/Cargo.lock s390-tools-2.33.1/rust-vendor/backtrace/Cargo.lock --- s390-tools-2.31.0/rust-vendor/backtrace/Cargo.lock 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/Cargo.lock 1970-01-01 01:00:00.000000000 +0100 @@ -1,192 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addr2line" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "backtrace" -version = "0.3.69" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "cpp_demangle", - "libc", - "libloading", - "miniz_oxide", - "object", - "rustc-demangle", - "rustc-serialize", - "serde", - "winapi", -] - -[[package]] -name = "cc" -version = "1.0.72" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "cpp_demangle" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "gimli" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" - -[[package]] -name = "libc" -version = "0.2.147" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" - -[[package]] -name = "libloading" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" -dependencies = [ - "cfg-if", - "winapi", -] - -[[package]] -name = "memchr" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" - -[[package]] -name = "miniz_oxide" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" -dependencies = [ - "adler", -] - -[[package]] -name = "object" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe" -dependencies = [ - "memchr", -] - -[[package]] -name = "proc-macro2" -version = "1.0.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "quote" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" - -[[package]] -name = "rustc-serialize" -version = "0.3.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" - -[[package]] -name = "serde" -version = "1.0.136" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.136" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "syn" -version = "1.0.86" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "unicode-xid" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/Cargo.toml s390-tools-2.33.1/rust-vendor/backtrace/Cargo.toml --- s390-tools-2.31.0/rust-vendor/backtrace/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,145 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "backtrace" -version = "0.3.69" -authors = ["The Rust Project Developers"] -build = "build.rs" -exclude = ["/ci/"] -autoexamples = true -autotests = true -description = """ -A library to acquire a stack trace (backtrace) at runtime in a Rust program. -""" -homepage = "https://github.com/rust-lang/backtrace-rs" -documentation = "https://docs.rs/backtrace" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/backtrace-rs" - -[[example]] -name = "backtrace" -required-features = ["std"] - -[[example]] -name = "raw" -required-features = ["std"] - -[[test]] -name = "skip_inner_frames" -required-features = ["std"] - -[[test]] -name = "long_fn_name" -required-features = ["std"] - -[[test]] -name = "smoke" -required-features = ["std"] -edition = "2018" - -[[test]] -name = "accuracy" -required-features = ["std"] -edition = "2018" - -[[test]] -name = "concurrent-panics" -harness = false -required-features = ["std"] - -[[test]] -name = "current-exe-mismatch" -harness = false -required-features = ["std"] - -[dependencies.cfg-if] -version = "1.0" - -[dependencies.cpp_demangle] -version = "0.4.0" -features = ["alloc"] -optional = true -default-features = false - -[dependencies.rustc-demangle] -version = "0.1.4" - -[dependencies.rustc-serialize] -version = "0.3" -optional = true - -[dependencies.serde] -version = "1.0" -features = ["derive"] -optional = true - -[dev-dependencies.libloading] -version = "0.7" - -[build-dependencies.cc] -version = "1.0.67" - -[features] -coresymbolication = [] -dbghelp = [] -default = ["std"] -dladdr = [] -gimli-symbolize = [] -kernel32 = [] -libbacktrace = [] -libunwind = [] -serialize-rustc = ["rustc-serialize"] -serialize-serde = ["serde"] -std = [] -unix-backtrace = [] -verify-winapi = [ - "winapi/dbghelp", - "winapi/handleapi", - "winapi/libloaderapi", - "winapi/memoryapi", - "winapi/minwindef", - "winapi/processthreadsapi", - "winapi/synchapi", - "winapi/tlhelp32", - "winapi/winbase", - "winapi/winnt", -] - -[target."cfg(not(all(windows, target_env = \"msvc\", not(target_vendor = \"uwp\"))))".dependencies.addr2line] -version = "0.21.0" -default-features = false - -[target."cfg(not(all(windows, target_env = \"msvc\", not(target_vendor = \"uwp\"))))".dependencies.libc] -version = "0.2.146" -default-features = false - -[target."cfg(not(all(windows, target_env = \"msvc\", not(target_vendor = \"uwp\"))))".dependencies.miniz_oxide] -version = "0.7.0" -default-features = false - -[target."cfg(not(all(windows, target_env = \"msvc\", not(target_vendor = \"uwp\"))))".dependencies.object] -version = "0.32.0" -features = [ - "read_core", - "elf", - "macho", - "pe", - "unaligned", - "archive", -] -default-features = false - -[target."cfg(windows)".dependencies.winapi] -version = "0.3.9" -optional = true diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/examples/backtrace.rs s390-tools-2.33.1/rust-vendor/backtrace/examples/backtrace.rs --- s390-tools-2.31.0/rust-vendor/backtrace/examples/backtrace.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/examples/backtrace.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,5 +0,0 @@ -use backtrace::Backtrace; - -fn main() { - println!("{:?}", Backtrace::new()); -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/examples/raw.rs s390-tools-2.33.1/rust-vendor/backtrace/examples/raw.rs --- s390-tools-2.31.0/rust-vendor/backtrace/examples/raw.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/examples/raw.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,52 +0,0 @@ -fn main() { - foo(); -} - -fn foo() { - bar() -} -fn bar() { - baz() -} -fn baz() { - print() -} - -#[cfg(target_pointer_width = "32")] -const HEX_WIDTH: usize = 10; -#[cfg(target_pointer_width = "64")] -const HEX_WIDTH: usize = 20; - -fn print() { - let mut cnt = 0; - backtrace::trace(|frame| { - let ip = frame.ip(); - print!("frame #{:<2} - {:#02$x}", cnt, ip as usize, HEX_WIDTH); - cnt += 1; - - let mut resolved = false; - backtrace::resolve(frame.ip(), |symbol| { - if !resolved { - resolved = true; - } else { - print!("{}", vec![" "; 7 + 2 + 3 + HEX_WIDTH].join("")); - } - - if let Some(name) = symbol.name() { - print!(" - {}", name); - } else { - print!(" - "); - } - if let Some(file) = symbol.filename() { - if let Some(l) = symbol.lineno() { - print!("\n{:13}{:4$}@ {}:{}", "", "", file.display(), l, HEX_WIDTH); - } - } - println!(""); - }); - if !resolved { - println!(" - "); - } - true // keep going - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/backtrace/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/backtrace/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/LICENSE-MIT s390-tools-2.33.1/rust-vendor/backtrace/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/backtrace/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2014 Alex Crichton - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/README.md s390-tools-2.33.1/rust-vendor/backtrace/README.md --- s390-tools-2.31.0/rust-vendor/backtrace/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,73 +0,0 @@ -# backtrace-rs - -[Documentation](https://docs.rs/backtrace) - -A library for acquiring backtraces at runtime for Rust. This library aims to -enhance the support of the standard library by providing a programmatic -interface to work with, but it also supports simply easily printing the current -backtrace like libstd's panics. - -## Install - -```toml -[dependencies] -backtrace = "0.3" -``` - -## Usage - -To simply capture a backtrace and defer dealing with it until a later time, -you can use the top-level `Backtrace` type. - -```rust -use backtrace::Backtrace; - -fn main() { - let bt = Backtrace::new(); - - // do_some_work(); - - println!("{:?}", bt); -} -``` - -If, however, you'd like more raw access to the actual tracing functionality, you -can use the `trace` and `resolve` functions directly. - -```rust -fn main() { - backtrace::trace(|frame| { - let ip = frame.ip(); - let symbol_address = frame.symbol_address(); - - // Resolve this instruction pointer to a symbol name - backtrace::resolve_frame(frame, |symbol| { - if let Some(name) = symbol.name() { - // ... - } - if let Some(filename) = symbol.filename() { - // ... - } - }); - - true // keep going to the next frame - }); -} -``` - -# License - -This project is licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - https://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - https://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in backtrace-rs by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/android-api.c s390-tools-2.33.1/rust-vendor/backtrace/src/android-api.c --- s390-tools-2.31.0/rust-vendor/backtrace/src/android-api.c 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/android-api.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,4 +0,0 @@ -// Used from the build script to detect the value of the `__ANDROID_API__` -// builtin #define - -APIVERSION __ANDROID_API__ diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/backtrace/dbghelp.rs s390-tools-2.33.1/rust-vendor/backtrace/src/backtrace/dbghelp.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/backtrace/dbghelp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/backtrace/dbghelp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,257 +0,0 @@ -//! Backtrace strategy for MSVC platforms. -//! -//! This module contains the ability to generate a backtrace on MSVC using one -//! of two possible methods. The `StackWalkEx` function is primarily used if -//! possible, but not all systems have that. Failing that the `StackWalk64` -//! function is used instead. Note that `StackWalkEx` is favored because it -//! handles debuginfo internally and returns inline frame information. -//! -//! Note that all dbghelp support is loaded dynamically, see `src/dbghelp.rs` -//! for more information about that. - -#![allow(bad_style)] - -use super::super::{dbghelp, windows::*}; -use core::ffi::c_void; -use core::mem; - -#[derive(Clone, Copy)] -pub enum StackFrame { - New(STACKFRAME_EX), - Old(STACKFRAME64), -} - -#[derive(Clone, Copy)] -pub struct Frame { - pub(crate) stack_frame: StackFrame, - base_address: *mut c_void, -} - -// we're just sending around raw pointers and reading them, never interpreting -// them so this should be safe to both send and share across threads. -unsafe impl Send for Frame {} -unsafe impl Sync for Frame {} - -impl Frame { - pub fn ip(&self) -> *mut c_void { - self.addr_pc().Offset as *mut _ - } - - pub fn sp(&self) -> *mut c_void { - self.addr_stack().Offset as *mut _ - } - - pub fn symbol_address(&self) -> *mut c_void { - self.ip() - } - - pub fn module_base_address(&self) -> Option<*mut c_void> { - Some(self.base_address) - } - - fn addr_pc(&self) -> &ADDRESS64 { - match self.stack_frame { - StackFrame::New(ref new) => &new.AddrPC, - StackFrame::Old(ref old) => &old.AddrPC, - } - } - - fn addr_pc_mut(&mut self) -> &mut ADDRESS64 { - match self.stack_frame { - StackFrame::New(ref mut new) => &mut new.AddrPC, - StackFrame::Old(ref mut old) => &mut old.AddrPC, - } - } - - fn addr_frame_mut(&mut self) -> &mut ADDRESS64 { - match self.stack_frame { - StackFrame::New(ref mut new) => &mut new.AddrFrame, - StackFrame::Old(ref mut old) => &mut old.AddrFrame, - } - } - - fn addr_stack(&self) -> &ADDRESS64 { - match self.stack_frame { - StackFrame::New(ref new) => &new.AddrStack, - StackFrame::Old(ref old) => &old.AddrStack, - } - } - - fn addr_stack_mut(&mut self) -> &mut ADDRESS64 { - match self.stack_frame { - StackFrame::New(ref mut new) => &mut new.AddrStack, - StackFrame::Old(ref mut old) => &mut old.AddrStack, - } - } -} - -#[repr(C, align(16))] // required by `CONTEXT`, is a FIXME in winapi right now -struct MyContext(CONTEXT); - -#[inline(always)] -pub unsafe fn trace(cb: &mut dyn FnMut(&super::Frame) -> bool) { - // Allocate necessary structures for doing the stack walk - let process = GetCurrentProcess(); - let thread = GetCurrentThread(); - - let mut context = mem::zeroed::(); - RtlCaptureContext(&mut context.0); - - // Ensure this process's symbols are initialized - let dbghelp = match dbghelp::init() { - Ok(dbghelp) => dbghelp, - Err(()) => return, // oh well... - }; - - // On x86_64 and ARM64 we opt to not use the default `Sym*` functions from - // dbghelp for getting the function table and module base. Instead we use - // the `RtlLookupFunctionEntry` function in kernel32 which will account for - // JIT compiler frames as well. These should be equivalent, but using - // `Rtl*` allows us to backtrace through JIT frames. - // - // Note that `RtlLookupFunctionEntry` only works for in-process backtraces, - // but that's all we support anyway, so it all lines up well. - cfg_if::cfg_if! { - if #[cfg(target_pointer_width = "64")] { - use core::ptr; - - unsafe extern "system" fn function_table_access(_process: HANDLE, addr: DWORD64) -> PVOID { - let mut base = 0; - RtlLookupFunctionEntry(addr, &mut base, ptr::null_mut()).cast() - } - - unsafe extern "system" fn get_module_base(_process: HANDLE, addr: DWORD64) -> DWORD64 { - let mut base = 0; - RtlLookupFunctionEntry(addr, &mut base, ptr::null_mut()); - base - } - } else { - let function_table_access = dbghelp.SymFunctionTableAccess64(); - let get_module_base = dbghelp.SymGetModuleBase64(); - } - } - - let process_handle = GetCurrentProcess(); - - // Attempt to use `StackWalkEx` if we can, but fall back to `StackWalk64` - // since it's in theory supported on more systems. - match (*dbghelp.dbghelp()).StackWalkEx() { - Some(StackWalkEx) => { - let mut inner: STACKFRAME_EX = mem::zeroed(); - inner.StackFrameSize = mem::size_of::() as DWORD; - let mut frame = super::Frame { - inner: Frame { - stack_frame: StackFrame::New(inner), - base_address: 0 as _, - }, - }; - let image = init_frame(&mut frame.inner, &context.0); - let frame_ptr = match &mut frame.inner.stack_frame { - StackFrame::New(ptr) => ptr as *mut STACKFRAME_EX, - _ => unreachable!(), - }; - - while StackWalkEx( - image as DWORD, - process, - thread, - frame_ptr, - &mut context.0 as *mut CONTEXT as *mut _, - None, - Some(function_table_access), - Some(get_module_base), - None, - 0, - ) == TRUE - { - frame.inner.base_address = get_module_base(process_handle, frame.ip() as _) as _; - - if !cb(&frame) { - break; - } - } - } - None => { - let mut frame = super::Frame { - inner: Frame { - stack_frame: StackFrame::Old(mem::zeroed()), - base_address: 0 as _, - }, - }; - let image = init_frame(&mut frame.inner, &context.0); - let frame_ptr = match &mut frame.inner.stack_frame { - StackFrame::Old(ptr) => ptr as *mut STACKFRAME64, - _ => unreachable!(), - }; - - while dbghelp.StackWalk64()( - image as DWORD, - process, - thread, - frame_ptr, - &mut context.0 as *mut CONTEXT as *mut _, - None, - Some(function_table_access), - Some(get_module_base), - None, - ) == TRUE - { - frame.inner.base_address = get_module_base(process_handle, frame.ip() as _) as _; - - if !cb(&frame) { - break; - } - } - } - } -} - -#[cfg(target_arch = "x86_64")] -fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD { - frame.addr_pc_mut().Offset = ctx.Rip as u64; - frame.addr_pc_mut().Mode = AddrModeFlat; - frame.addr_stack_mut().Offset = ctx.Rsp as u64; - frame.addr_stack_mut().Mode = AddrModeFlat; - frame.addr_frame_mut().Offset = ctx.Rbp as u64; - frame.addr_frame_mut().Mode = AddrModeFlat; - - IMAGE_FILE_MACHINE_AMD64 -} - -#[cfg(target_arch = "x86")] -fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD { - frame.addr_pc_mut().Offset = ctx.Eip as u64; - frame.addr_pc_mut().Mode = AddrModeFlat; - frame.addr_stack_mut().Offset = ctx.Esp as u64; - frame.addr_stack_mut().Mode = AddrModeFlat; - frame.addr_frame_mut().Offset = ctx.Ebp as u64; - frame.addr_frame_mut().Mode = AddrModeFlat; - - IMAGE_FILE_MACHINE_I386 -} - -#[cfg(target_arch = "aarch64")] -fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD { - frame.addr_pc_mut().Offset = ctx.Pc as u64; - frame.addr_pc_mut().Mode = AddrModeFlat; - frame.addr_stack_mut().Offset = ctx.Sp as u64; - frame.addr_stack_mut().Mode = AddrModeFlat; - unsafe { - frame.addr_frame_mut().Offset = ctx.u.s().Fp as u64; - } - frame.addr_frame_mut().Mode = AddrModeFlat; - IMAGE_FILE_MACHINE_ARM64 -} - -#[cfg(target_arch = "arm")] -fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> WORD { - frame.addr_pc_mut().Offset = ctx.Pc as u64; - frame.addr_pc_mut().Mode = AddrModeFlat; - frame.addr_stack_mut().Offset = ctx.Sp as u64; - frame.addr_stack_mut().Mode = AddrModeFlat; - unsafe { - frame.addr_frame_mut().Offset = ctx.R11 as u64; - } - frame.addr_frame_mut().Mode = AddrModeFlat; - IMAGE_FILE_MACHINE_ARMNT -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/backtrace/libunwind.rs s390-tools-2.33.1/rust-vendor/backtrace/src/backtrace/libunwind.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/backtrace/libunwind.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/backtrace/libunwind.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,268 +0,0 @@ -//! Backtrace support using libunwind/gcc_s/etc APIs. -//! -//! This module contains the ability to unwind the stack using libunwind-style -//! APIs. Note that there's a whole bunch of implementations of the -//! libunwind-like API, and this is just trying to be compatible with most of -//! them all at once instead of being picky. -//! -//! The libunwind API is powered by `_Unwind_Backtrace` and is in practice very -//! reliable at generating a backtrace. It's not entirely clear how it does it -//! (frame pointers? eh_frame info? both?) but it seems to work! -//! -//! Most of the complexity of this module is handling the various platform -//! differences across libunwind implementations. Otherwise this is a pretty -//! straightforward Rust binding to the libunwind APIs. -//! -//! This is the default unwinding API for all non-Windows platforms currently. - -use super::super::Bomb; -use core::ffi::c_void; - -pub enum Frame { - Raw(*mut uw::_Unwind_Context), - Cloned { - ip: *mut c_void, - sp: *mut c_void, - symbol_address: *mut c_void, - }, -} - -// With a raw libunwind pointer it should only ever be access in a readonly -// threadsafe fashion, so it's `Sync`. When sending to other threads via `Clone` -// we always switch to a version which doesn't retain interior pointers, so we -// should be `Send` as well. -unsafe impl Send for Frame {} -unsafe impl Sync for Frame {} - -impl Frame { - pub fn ip(&self) -> *mut c_void { - let ctx = match *self { - Frame::Raw(ctx) => ctx, - Frame::Cloned { ip, .. } => return ip, - }; - unsafe { uw::_Unwind_GetIP(ctx) as *mut c_void } - } - - pub fn sp(&self) -> *mut c_void { - match *self { - Frame::Raw(ctx) => unsafe { uw::get_sp(ctx) as *mut c_void }, - Frame::Cloned { sp, .. } => sp, - } - } - - pub fn symbol_address(&self) -> *mut c_void { - if let Frame::Cloned { symbol_address, .. } = *self { - return symbol_address; - } - - // The macOS linker emits a "compact" unwind table that only includes an - // entry for a function if that function either has an LSDA or its - // encoding differs from that of the previous entry. Consequently, on - // macOS, `_Unwind_FindEnclosingFunction` is unreliable (it can return a - // pointer to some totally unrelated function). Instead, we just always - // return the ip. - // - // https://github.com/rust-lang/rust/issues/74771#issuecomment-664056788 - // - // Note the `skip_inner_frames.rs` test is skipped on macOS due to this - // clause, and if this is fixed that test in theory can be run on macOS! - if cfg!(target_vendor = "apple") { - self.ip() - } else { - unsafe { uw::_Unwind_FindEnclosingFunction(self.ip()) } - } - } - - pub fn module_base_address(&self) -> Option<*mut c_void> { - None - } -} - -impl Clone for Frame { - fn clone(&self) -> Frame { - Frame::Cloned { - ip: self.ip(), - sp: self.sp(), - symbol_address: self.symbol_address(), - } - } -} - -#[inline(always)] -pub unsafe fn trace(mut cb: &mut dyn FnMut(&super::Frame) -> bool) { - uw::_Unwind_Backtrace(trace_fn, &mut cb as *mut _ as *mut _); - - extern "C" fn trace_fn( - ctx: *mut uw::_Unwind_Context, - arg: *mut c_void, - ) -> uw::_Unwind_Reason_Code { - let cb = unsafe { &mut *(arg as *mut &mut dyn FnMut(&super::Frame) -> bool) }; - let cx = super::Frame { - inner: Frame::Raw(ctx), - }; - - let mut bomb = Bomb { enabled: true }; - let keep_going = cb(&cx); - bomb.enabled = false; - - if keep_going { - uw::_URC_NO_REASON - } else { - uw::_URC_FAILURE - } - } -} - -/// Unwind library interface used for backtraces -/// -/// Note that dead code is allowed as here are just bindings -/// iOS doesn't use all of them it but adding more -/// platform-specific configs pollutes the code too much -#[allow(non_camel_case_types)] -#[allow(non_snake_case)] -#[allow(dead_code)] -mod uw { - pub use self::_Unwind_Reason_Code::*; - - use core::ffi::c_void; - - #[repr(C)] - pub enum _Unwind_Reason_Code { - _URC_NO_REASON = 0, - _URC_FOREIGN_EXCEPTION_CAUGHT = 1, - _URC_FATAL_PHASE2_ERROR = 2, - _URC_FATAL_PHASE1_ERROR = 3, - _URC_NORMAL_STOP = 4, - _URC_END_OF_STACK = 5, - _URC_HANDLER_FOUND = 6, - _URC_INSTALL_CONTEXT = 7, - _URC_CONTINUE_UNWIND = 8, - _URC_FAILURE = 9, // used only by ARM EABI - } - - pub enum _Unwind_Context {} - - pub type _Unwind_Trace_Fn = - extern "C" fn(ctx: *mut _Unwind_Context, arg: *mut c_void) -> _Unwind_Reason_Code; - - extern "C" { - pub fn _Unwind_Backtrace( - trace: _Unwind_Trace_Fn, - trace_argument: *mut c_void, - ) -> _Unwind_Reason_Code; - } - - cfg_if::cfg_if! { - // available since GCC 4.2.0, should be fine for our purpose - if #[cfg(all( - not(all(target_os = "android", target_arch = "arm")), - not(all(target_os = "freebsd", target_arch = "arm")), - not(all(target_os = "linux", target_arch = "arm")), - not(all(target_os = "horizon", target_arch = "arm")), - not(all(target_os = "vita", target_arch = "arm")), - ))] { - extern "C" { - pub fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t; - pub fn _Unwind_FindEnclosingFunction(pc: *mut c_void) -> *mut c_void; - - #[cfg(not(all(target_os = "linux", target_arch = "s390x")))] - // This function is a misnomer: rather than getting this frame's - // Canonical Frame Address (aka the caller frame's SP) it - // returns this frame's SP. - // - // https://github.com/libunwind/libunwind/blob/d32956507cf29d9b1a98a8bce53c78623908f4fe/src/unwind/GetCFA.c#L28-L35 - #[link_name = "_Unwind_GetCFA"] - pub fn get_sp(ctx: *mut _Unwind_Context) -> libc::uintptr_t; - - } - - // s390x uses a biased CFA value, therefore we need to use - // _Unwind_GetGR to get the stack pointer register (%r15) - // instead of relying on _Unwind_GetCFA. - #[cfg(all(target_os = "linux", target_arch = "s390x"))] - pub unsafe fn get_sp(ctx: *mut _Unwind_Context) -> libc::uintptr_t { - extern "C" { - pub fn _Unwind_GetGR(ctx: *mut _Unwind_Context, index: libc::c_int) -> libc::uintptr_t; - } - _Unwind_GetGR(ctx, 15) - } - } else { - // On android and arm, the function `_Unwind_GetIP` and a bunch of - // others are macros, so we define functions containing the - // expansion of the macros. - // - // TODO: link to the header file that defines these macros, if you - // can find it. (I, fitzgen, cannot find the header file that some - // of these macro expansions were originally borrowed from.) - #[repr(C)] - enum _Unwind_VRS_Result { - _UVRSR_OK = 0, - _UVRSR_NOT_IMPLEMENTED = 1, - _UVRSR_FAILED = 2, - } - #[repr(C)] - enum _Unwind_VRS_RegClass { - _UVRSC_CORE = 0, - _UVRSC_VFP = 1, - _UVRSC_FPA = 2, - _UVRSC_WMMXD = 3, - _UVRSC_WMMXC = 4, - } - #[repr(C)] - enum _Unwind_VRS_DataRepresentation { - _UVRSD_UINT32 = 0, - _UVRSD_VFPX = 1, - _UVRSD_FPAX = 2, - _UVRSD_UINT64 = 3, - _UVRSD_FLOAT = 4, - _UVRSD_DOUBLE = 5, - } - - type _Unwind_Word = libc::c_uint; - extern "C" { - fn _Unwind_VRS_Get( - ctx: *mut _Unwind_Context, - klass: _Unwind_VRS_RegClass, - word: _Unwind_Word, - repr: _Unwind_VRS_DataRepresentation, - data: *mut c_void, - ) -> _Unwind_VRS_Result; - } - - pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t { - let mut val: _Unwind_Word = 0; - let ptr = &mut val as *mut _Unwind_Word; - let _ = _Unwind_VRS_Get( - ctx, - _Unwind_VRS_RegClass::_UVRSC_CORE, - 15, - _Unwind_VRS_DataRepresentation::_UVRSD_UINT32, - ptr as *mut c_void, - ); - (val & !1) as libc::uintptr_t - } - - // R13 is the stack pointer on arm. - const SP: _Unwind_Word = 13; - - pub unsafe fn get_sp(ctx: *mut _Unwind_Context) -> libc::uintptr_t { - let mut val: _Unwind_Word = 0; - let ptr = &mut val as *mut _Unwind_Word; - let _ = _Unwind_VRS_Get( - ctx, - _Unwind_VRS_RegClass::_UVRSC_CORE, - SP, - _Unwind_VRS_DataRepresentation::_UVRSD_UINT32, - ptr as *mut c_void, - ); - val as libc::uintptr_t - } - - // This function also doesn't exist on Android or ARM/Linux, so make it - // a no-op. - pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut c_void) -> *mut c_void { - pc - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/backtrace/miri.rs s390-tools-2.33.1/rust-vendor/backtrace/src/backtrace/miri.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/backtrace/miri.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/backtrace/miri.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,109 +0,0 @@ -use alloc::boxed::Box; -use alloc::vec::Vec; -use core::ffi::c_void; - -extern "Rust" { - fn miri_backtrace_size(flags: u64) -> usize; - fn miri_get_backtrace(flags: u64, buf: *mut *mut ()); - fn miri_resolve_frame(ptr: *mut (), flags: u64) -> MiriFrame; - fn miri_resolve_frame_names(ptr: *mut (), flags: u64, name_buf: *mut u8, filename_buf: *mut u8); -} - -#[repr(C)] -pub struct MiriFrame { - pub name_len: usize, - pub filename_len: usize, - pub lineno: u32, - pub colno: u32, - pub fn_ptr: *mut c_void, -} - -#[derive(Clone, Debug)] -pub struct FullMiriFrame { - pub name: Box<[u8]>, - pub filename: Box<[u8]>, - pub lineno: u32, - pub colno: u32, - pub fn_ptr: *mut c_void, -} - -#[derive(Debug, Clone)] -pub struct Frame { - pub addr: *mut c_void, - pub inner: FullMiriFrame, -} - -// SAFETY: Miri guarantees that the returned pointer -// can be used from any thread. -unsafe impl Send for Frame {} -unsafe impl Sync for Frame {} - -impl Frame { - pub fn ip(&self) -> *mut c_void { - self.addr - } - - pub fn sp(&self) -> *mut c_void { - core::ptr::null_mut() - } - - pub fn symbol_address(&self) -> *mut c_void { - self.inner.fn_ptr - } - - pub fn module_base_address(&self) -> Option<*mut c_void> { - None - } -} - -pub fn trace bool>(cb: F) { - // SAFETY: Miri guarantees that the backtrace API functions - // can be called from any thread. - unsafe { trace_unsynchronized(cb) }; -} - -pub fn resolve_addr(ptr: *mut c_void) -> Frame { - // SAFETY: Miri will stop execution with an error if this pointer - // is invalid. - let frame = unsafe { miri_resolve_frame(ptr as *mut (), 1) }; - - let mut name = Vec::with_capacity(frame.name_len); - let mut filename = Vec::with_capacity(frame.filename_len); - - // SAFETY: name and filename have been allocated with the amount - // of memory miri has asked for, and miri guarantees it will initialize it - unsafe { - miri_resolve_frame_names(ptr as *mut (), 0, name.as_mut_ptr(), filename.as_mut_ptr()); - - name.set_len(frame.name_len); - filename.set_len(frame.filename_len); - } - - Frame { - addr: ptr, - inner: FullMiriFrame { - name: name.into(), - filename: filename.into(), - lineno: frame.lineno, - colno: frame.colno, - fn_ptr: frame.fn_ptr, - }, - } -} - -unsafe fn trace_unsynchronized bool>(mut cb: F) { - let len = miri_backtrace_size(0); - - let mut frames = Vec::with_capacity(len); - - miri_get_backtrace(1, frames.as_mut_ptr()); - - frames.set_len(len); - - for ptr in frames.iter() { - let frame = resolve_addr(*ptr as *mut c_void); - if !cb(&super::Frame { inner: frame }) { - return; - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/backtrace/mod.rs s390-tools-2.33.1/rust-vendor/backtrace/src/backtrace/mod.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/backtrace/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/backtrace/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,163 +0,0 @@ -use core::ffi::c_void; -use core::fmt; - -/// Inspects the current call-stack, passing all active frames into the closure -/// provided to calculate a stack trace. -/// -/// This function is the workhorse of this library in calculating the stack -/// traces for a program. The given closure `cb` is yielded instances of a -/// `Frame` which represent information about that call frame on the stack. The -/// closure is yielded frames in a top-down fashion (most recently called -/// functions first). -/// -/// The closure's return value is an indication of whether the backtrace should -/// continue. A return value of `false` will terminate the backtrace and return -/// immediately. -/// -/// Once a `Frame` is acquired you will likely want to call `backtrace::resolve` -/// to convert the `ip` (instruction pointer) or symbol address to a `Symbol` -/// through which the name and/or filename/line number can be learned. -/// -/// Note that this is a relatively low-level function and if you'd like to, for -/// example, capture a backtrace to be inspected later, then the `Backtrace` -/// type may be more appropriate. -/// -/// # Required features -/// -/// This function requires the `std` feature of the `backtrace` crate to be -/// enabled, and the `std` feature is enabled by default. -/// -/// # Panics -/// -/// This function strives to never panic, but if the `cb` provided panics then -/// some platforms will force a double panic to abort the process. Some -/// platforms use a C library which internally uses callbacks which cannot be -/// unwound through, so panicking from `cb` may trigger a process abort. -/// -/// # Example -/// -/// ``` -/// extern crate backtrace; -/// -/// fn main() { -/// backtrace::trace(|frame| { -/// // ... -/// -/// true // continue the backtrace -/// }); -/// } -/// ``` -#[cfg(feature = "std")] -pub fn trace bool>(cb: F) { - let _guard = crate::lock::lock(); - unsafe { trace_unsynchronized(cb) } -} - -/// Same as `trace`, only unsafe as it's unsynchronized. -/// -/// This function does not have synchronization guarantees but is available -/// when the `std` feature of this crate isn't compiled in. See the `trace` -/// function for more documentation and examples. -/// -/// # Panics -/// -/// See information on `trace` for caveats on `cb` panicking. -pub unsafe fn trace_unsynchronized bool>(mut cb: F) { - trace_imp(&mut cb) -} - -/// A trait representing one frame of a backtrace, yielded to the `trace` -/// function of this crate. -/// -/// The tracing function's closure will be yielded frames, and the frame is -/// virtually dispatched as the underlying implementation is not always known -/// until runtime. -#[derive(Clone)] -pub struct Frame { - pub(crate) inner: FrameImp, -} - -impl Frame { - /// Returns the current instruction pointer of this frame. - /// - /// This is normally the next instruction to execute in the frame, but not - /// all implementations list this with 100% accuracy (but it's generally - /// pretty close). - /// - /// It is recommended to pass this value to `backtrace::resolve` to turn it - /// into a symbol name. - pub fn ip(&self) -> *mut c_void { - self.inner.ip() - } - - /// Returns the current stack pointer of this frame. - /// - /// In the case that a backend cannot recover the stack pointer for this - /// frame, a null pointer is returned. - pub fn sp(&self) -> *mut c_void { - self.inner.sp() - } - - /// Returns the starting symbol address of the frame of this function. - /// - /// This will attempt to rewind the instruction pointer returned by `ip` to - /// the start of the function, returning that value. In some cases, however, - /// backends will just return `ip` from this function. - /// - /// The returned value can sometimes be used if `backtrace::resolve` failed - /// on the `ip` given above. - pub fn symbol_address(&self) -> *mut c_void { - self.inner.symbol_address() - } - - /// Returns the base address of the module to which the frame belongs. - pub fn module_base_address(&self) -> Option<*mut c_void> { - self.inner.module_base_address() - } -} - -impl fmt::Debug for Frame { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Frame") - .field("ip", &self.ip()) - .field("symbol_address", &self.symbol_address()) - .finish() - } -} - -cfg_if::cfg_if! { - // This needs to come first, to ensure that - // Miri takes priority over the host platform - if #[cfg(miri)] { - pub(crate) mod miri; - use self::miri::trace as trace_imp; - pub(crate) use self::miri::Frame as FrameImp; - } else if #[cfg( - any( - all( - unix, - not(target_os = "emscripten"), - not(all(target_os = "ios", target_arch = "arm")), - not(all(target_os = "nto", target_env = "nto70")), - ), - all( - target_env = "sgx", - target_vendor = "fortanix", - ), - ) - )] { - mod libunwind; - use self::libunwind::trace as trace_imp; - pub(crate) use self::libunwind::Frame as FrameImp; - } else if #[cfg(all(windows, not(target_vendor = "uwp")))] { - mod dbghelp; - use self::dbghelp::trace as trace_imp; - pub(crate) use self::dbghelp::Frame as FrameImp; - #[cfg(target_env = "msvc")] // only used in dbghelp symbolize - pub(crate) use self::dbghelp::StackFrame; - } else { - mod noop; - use self::noop::trace as trace_imp; - pub(crate) use self::noop::Frame as FrameImp; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/backtrace/noop.rs s390-tools-2.33.1/rust-vendor/backtrace/src/backtrace/noop.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/backtrace/noop.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/backtrace/noop.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,28 +0,0 @@ -//! Empty implementation of unwinding used when no other implementation is -//! appropriate. - -use core::ffi::c_void; - -#[inline(always)] -pub fn trace(_cb: &mut dyn FnMut(&super::Frame) -> bool) {} - -#[derive(Clone)] -pub struct Frame; - -impl Frame { - pub fn ip(&self) -> *mut c_void { - 0 as *mut _ - } - - pub fn sp(&self) -> *mut c_void { - 0 as *mut _ - } - - pub fn symbol_address(&self) -> *mut c_void { - 0 as *mut _ - } - - pub fn module_base_address(&self) -> Option<*mut c_void> { - None - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/capture.rs s390-tools-2.33.1/rust-vendor/backtrace/src/capture.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/capture.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/capture.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,555 +0,0 @@ -use crate::PrintFmt; -use crate::{resolve, resolve_frame, trace, BacktraceFmt, Symbol, SymbolName}; -use std::ffi::c_void; -use std::fmt; -use std::path::{Path, PathBuf}; -use std::prelude::v1::*; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -/// Representation of an owned and self-contained backtrace. -/// -/// This structure can be used to capture a backtrace at various points in a -/// program and later used to inspect what the backtrace was at that time. -/// -/// `Backtrace` supports pretty-printing of backtraces through its `Debug` -/// implementation. -/// -/// # Required features -/// -/// This function requires the `std` feature of the `backtrace` crate to be -/// enabled, and the `std` feature is enabled by default. -#[derive(Clone)] -#[cfg_attr(feature = "serialize-rustc", derive(RustcDecodable, RustcEncodable))] -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] -pub struct Backtrace { - // Frames here are listed from top-to-bottom of the stack - frames: Vec, - // The index we believe is the actual start of the backtrace, omitting - // frames like `Backtrace::new` and `backtrace::trace`. - actual_start_index: usize, -} - -fn _assert_send_sync() { - fn _assert() {} - _assert::(); -} - -/// Captured version of a frame in a backtrace. -/// -/// This type is returned as a list from `Backtrace::frames` and represents one -/// stack frame in a captured backtrace. -/// -/// # Required features -/// -/// This function requires the `std` feature of the `backtrace` crate to be -/// enabled, and the `std` feature is enabled by default. -#[derive(Clone)] -pub struct BacktraceFrame { - frame: Frame, - symbols: Option>, -} - -#[derive(Clone)] -enum Frame { - Raw(crate::Frame), - #[allow(dead_code)] - Deserialized { - ip: usize, - symbol_address: usize, - module_base_address: Option, - }, -} - -impl Frame { - fn ip(&self) -> *mut c_void { - match *self { - Frame::Raw(ref f) => f.ip(), - Frame::Deserialized { ip, .. } => ip as *mut c_void, - } - } - - fn symbol_address(&self) -> *mut c_void { - match *self { - Frame::Raw(ref f) => f.symbol_address(), - Frame::Deserialized { symbol_address, .. } => symbol_address as *mut c_void, - } - } - - fn module_base_address(&self) -> Option<*mut c_void> { - match *self { - Frame::Raw(ref f) => f.module_base_address(), - Frame::Deserialized { - module_base_address, - .. - } => module_base_address.map(|addr| addr as *mut c_void), - } - } -} - -/// Captured version of a symbol in a backtrace. -/// -/// This type is returned as a list from `BacktraceFrame::symbols` and -/// represents the metadata for a symbol in a backtrace. -/// -/// # Required features -/// -/// This function requires the `std` feature of the `backtrace` crate to be -/// enabled, and the `std` feature is enabled by default. -#[derive(Clone)] -#[cfg_attr(feature = "serialize-rustc", derive(RustcDecodable, RustcEncodable))] -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] -pub struct BacktraceSymbol { - name: Option>, - addr: Option, - filename: Option, - lineno: Option, - colno: Option, -} - -impl Backtrace { - /// Captures a backtrace at the callsite of this function, returning an - /// owned representation. - /// - /// This function is useful for representing a backtrace as an object in - /// Rust. This returned value can be sent across threads and printed - /// elsewhere, and the purpose of this value is to be entirely self - /// contained. - /// - /// Note that on some platforms acquiring a full backtrace and resolving it - /// can be extremely expensive. If the cost is too much for your application - /// it's recommended to instead use `Backtrace::new_unresolved()` which - /// avoids the symbol resolution step (which typically takes the longest) - /// and allows deferring that to a later date. - /// - /// # Examples - /// - /// ``` - /// use backtrace::Backtrace; - /// - /// let current_backtrace = Backtrace::new(); - /// ``` - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - #[inline(never)] // want to make sure there's a frame here to remove - pub fn new() -> Backtrace { - let mut bt = Self::create(Self::new as usize); - bt.resolve(); - bt - } - - /// Similar to `new` except that this does not resolve any symbols, this - /// simply captures the backtrace as a list of addresses. - /// - /// At a later time the `resolve` function can be called to resolve this - /// backtrace's symbols into readable names. This function exists because - /// the resolution process can sometimes take a significant amount of time - /// whereas any one backtrace may only be rarely printed. - /// - /// # Examples - /// - /// ``` - /// use backtrace::Backtrace; - /// - /// let mut current_backtrace = Backtrace::new_unresolved(); - /// println!("{:?}", current_backtrace); // no symbol names - /// current_backtrace.resolve(); - /// println!("{:?}", current_backtrace); // symbol names now present - /// ``` - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - #[inline(never)] // want to make sure there's a frame here to remove - pub fn new_unresolved() -> Backtrace { - Self::create(Self::new_unresolved as usize) - } - - fn create(ip: usize) -> Backtrace { - let mut frames = Vec::new(); - let mut actual_start_index = None; - trace(|frame| { - frames.push(BacktraceFrame { - frame: Frame::Raw(frame.clone()), - symbols: None, - }); - - if frame.symbol_address() as usize == ip && actual_start_index.is_none() { - actual_start_index = Some(frames.len()); - } - true - }); - - Backtrace { - frames, - actual_start_index: actual_start_index.unwrap_or(0), - } - } - - /// Returns the frames from when this backtrace was captured. - /// - /// The first entry of this slice is likely the function `Backtrace::new`, - /// and the last frame is likely something about how this thread or the main - /// function started. - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn frames(&self) -> &[BacktraceFrame] { - &self.frames[self.actual_start_index..] - } - - /// If this backtrace was created from `new_unresolved` then this function - /// will resolve all addresses in the backtrace to their symbolic names. - /// - /// If this backtrace has been previously resolved or was created through - /// `new`, this function does nothing. - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn resolve(&mut self) { - for frame in self.frames.iter_mut().filter(|f| f.symbols.is_none()) { - let mut symbols = Vec::new(); - { - let sym = |symbol: &Symbol| { - symbols.push(BacktraceSymbol { - name: symbol.name().map(|m| m.as_bytes().to_vec()), - addr: symbol.addr().map(|a| a as usize), - filename: symbol.filename().map(|m| m.to_owned()), - lineno: symbol.lineno(), - colno: symbol.colno(), - }); - }; - match frame.frame { - Frame::Raw(ref f) => resolve_frame(f, sym), - Frame::Deserialized { ip, .. } => { - resolve(ip as *mut c_void, sym); - } - } - } - frame.symbols = Some(symbols); - } - } -} - -impl From> for Backtrace { - fn from(frames: Vec) -> Self { - Backtrace { - frames, - actual_start_index: 0, - } - } -} - -impl From for BacktraceFrame { - fn from(frame: crate::Frame) -> BacktraceFrame { - BacktraceFrame { - frame: Frame::Raw(frame), - symbols: None, - } - } -} - -impl Into> for Backtrace { - fn into(self) -> Vec { - self.frames - } -} - -impl BacktraceFrame { - /// Same as `Frame::ip` - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn ip(&self) -> *mut c_void { - self.frame.ip() as *mut c_void - } - - /// Same as `Frame::symbol_address` - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn symbol_address(&self) -> *mut c_void { - self.frame.symbol_address() as *mut c_void - } - - /// Same as `Frame::module_base_address` - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn module_base_address(&self) -> Option<*mut c_void> { - self.frame - .module_base_address() - .map(|addr| addr as *mut c_void) - } - - /// Returns the list of symbols that this frame corresponds to. - /// - /// Normally there is only one symbol per frame, but sometimes if a number - /// of functions are inlined into one frame then multiple symbols will be - /// returned. The first symbol listed is the "innermost function", whereas - /// the last symbol is the outermost (last caller). - /// - /// Note that if this frame came from an unresolved backtrace then this will - /// return an empty list. - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn symbols(&self) -> &[BacktraceSymbol] { - self.symbols.as_ref().map(|s| &s[..]).unwrap_or(&[]) - } -} - -impl BacktraceSymbol { - /// Same as `Symbol::name` - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn name(&self) -> Option> { - self.name.as_ref().map(|s| SymbolName::new(s)) - } - - /// Same as `Symbol::addr` - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn addr(&self) -> Option<*mut c_void> { - self.addr.map(|s| s as *mut c_void) - } - - /// Same as `Symbol::filename` - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn filename(&self) -> Option<&Path> { - self.filename.as_ref().map(|p| &**p) - } - - /// Same as `Symbol::lineno` - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn lineno(&self) -> Option { - self.lineno - } - - /// Same as `Symbol::colno` - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn colno(&self) -> Option { - self.colno - } -} - -impl fmt::Debug for Backtrace { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let full = fmt.alternate(); - let (frames, style) = if full { - (&self.frames[..], PrintFmt::Full) - } else { - (&self.frames[self.actual_start_index..], PrintFmt::Short) - }; - - // When printing paths we try to strip the cwd if it exists, otherwise - // we just print the path as-is. Note that we also only do this for the - // short format, because if it's full we presumably want to print - // everything. - let cwd = std::env::current_dir(); - let mut print_path = - move |fmt: &mut fmt::Formatter<'_>, path: crate::BytesOrWideString<'_>| { - let path = path.into_path_buf(); - if !full { - if let Ok(cwd) = &cwd { - if let Ok(suffix) = path.strip_prefix(cwd) { - return fmt::Display::fmt(&suffix.display(), fmt); - } - } - } - fmt::Display::fmt(&path.display(), fmt) - }; - - let mut f = BacktraceFmt::new(fmt, style, &mut print_path); - f.add_context()?; - for frame in frames { - f.frame().backtrace_frame(frame)?; - } - f.finish()?; - Ok(()) - } -} - -impl Default for Backtrace { - fn default() -> Backtrace { - Backtrace::new() - } -} - -impl fmt::Debug for BacktraceFrame { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("BacktraceFrame") - .field("ip", &self.ip()) - .field("symbol_address", &self.symbol_address()) - .finish() - } -} - -impl fmt::Debug for BacktraceSymbol { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("BacktraceSymbol") - .field("name", &self.name()) - .field("addr", &self.addr()) - .field("filename", &self.filename()) - .field("lineno", &self.lineno()) - .field("colno", &self.colno()) - .finish() - } -} - -#[cfg(feature = "serialize-rustc")] -mod rustc_serialize_impls { - use super::*; - use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; - - #[derive(RustcEncodable, RustcDecodable)] - struct SerializedFrame { - ip: usize, - symbol_address: usize, - module_base_address: Option, - symbols: Option>, - } - - impl Decodable for BacktraceFrame { - fn decode(d: &mut D) -> Result - where - D: Decoder, - { - let frame: SerializedFrame = SerializedFrame::decode(d)?; - Ok(BacktraceFrame { - frame: Frame::Deserialized { - ip: frame.ip, - symbol_address: frame.symbol_address, - module_base_address: frame.module_base_address, - }, - symbols: frame.symbols, - }) - } - } - - impl Encodable for BacktraceFrame { - fn encode(&self, e: &mut E) -> Result<(), E::Error> - where - E: Encoder, - { - let BacktraceFrame { frame, symbols } = self; - SerializedFrame { - ip: frame.ip() as usize, - symbol_address: frame.symbol_address() as usize, - module_base_address: frame.module_base_address().map(|addr| addr as usize), - symbols: symbols.clone(), - } - .encode(e) - } - } -} - -#[cfg(feature = "serde")] -mod serde_impls { - use super::*; - use serde::de::Deserializer; - use serde::ser::Serializer; - use serde::{Deserialize, Serialize}; - - #[derive(Serialize, Deserialize)] - struct SerializedFrame { - ip: usize, - symbol_address: usize, - module_base_address: Option, - symbols: Option>, - } - - impl Serialize for BacktraceFrame { - fn serialize(&self, s: S) -> Result - where - S: Serializer, - { - let BacktraceFrame { frame, symbols } = self; - SerializedFrame { - ip: frame.ip() as usize, - symbol_address: frame.symbol_address() as usize, - module_base_address: frame.module_base_address().map(|addr| addr as usize), - symbols: symbols.clone(), - } - .serialize(s) - } - } - - impl<'a> Deserialize<'a> for BacktraceFrame { - fn deserialize(d: D) -> Result - where - D: Deserializer<'a>, - { - let frame: SerializedFrame = SerializedFrame::deserialize(d)?; - Ok(BacktraceFrame { - frame: Frame::Deserialized { - ip: frame.ip, - symbol_address: frame.symbol_address, - module_base_address: frame.module_base_address, - }, - symbols: frame.symbols, - }) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_frame_conversion() { - let mut frames = vec![]; - crate::trace(|frame| { - let converted = BacktraceFrame::from(frame.clone()); - frames.push(converted); - true - }); - - let mut manual = Backtrace::from(frames); - manual.resolve(); - let frames = manual.frames(); - - for frame in frames { - println!("{:?}", frame.ip()); - println!("{:?}", frame.symbol_address()); - println!("{:?}", frame.module_base_address()); - println!("{:?}", frame.symbols()); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/dbghelp.rs s390-tools-2.33.1/rust-vendor/backtrace/src/dbghelp.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/dbghelp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/dbghelp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,365 +0,0 @@ -//! A module to assist in managing dbghelp bindings on Windows -//! -//! Backtraces on Windows (at least for MSVC) are largely powered through -//! `dbghelp.dll` and the various functions that it contains. These functions -//! are currently loaded *dynamically* rather than linking to `dbghelp.dll` -//! statically. This is currently done by the standard library (and is in theory -//! required there), but is an effort to help reduce the static dll dependencies -//! of a library since backtraces are typically pretty optional. That being -//! said, `dbghelp.dll` almost always successfully loads on Windows. -//! -//! Note though that since we're loading all this support dynamically we can't -//! actually use the raw definitions in `winapi`, but rather we need to define -//! the function pointer types ourselves and use that. We don't really want to -//! be in the business of duplicating winapi, so we have a Cargo feature -//! `verify-winapi` which asserts that all bindings match those in winapi and -//! this feature is enabled on CI. -//! -//! Finally, you'll note here that the dll for `dbghelp.dll` is never unloaded, -//! and that's currently intentional. The thinking is that we can globally cache -//! it and use it between calls to the API, avoiding expensive loads/unloads. If -//! this is a problem for leak detectors or something like that we can cross the -//! bridge when we get there. - -#![allow(non_snake_case)] - -use super::windows::*; -use core::mem; -use core::ptr; - -// Work around `SymGetOptions` and `SymSetOptions` not being present in winapi -// itself. Otherwise this is only used when we're double-checking types against -// winapi. -#[cfg(feature = "verify-winapi")] -mod dbghelp { - use crate::windows::*; - pub use winapi::um::dbghelp::{ - StackWalk64, StackWalkEx, SymCleanup, SymFromAddrW, SymFunctionTableAccess64, - SymGetLineFromAddrW64, SymGetModuleBase64, SymGetOptions, SymInitializeW, SymSetOptions, - }; - - extern "system" { - // Not defined in winapi yet - pub fn SymFromInlineContextW( - hProcess: HANDLE, - Address: DWORD64, - InlineContext: ULONG, - Displacement: PDWORD64, - Symbol: PSYMBOL_INFOW, - ) -> BOOL; - pub fn SymGetLineFromInlineContextW( - hProcess: HANDLE, - dwAddr: DWORD64, - InlineContext: ULONG, - qwModuleBaseAddress: DWORD64, - pdwDisplacement: PDWORD, - Line: PIMAGEHLP_LINEW64, - ) -> BOOL; - } - - pub fn assert_equal_types(a: T, _b: T) -> T { - a - } -} - -// This macro is used to define a `Dbghelp` structure which internally contains -// all the function pointers that we might load. -macro_rules! dbghelp { - (extern "system" { - $(fn $name:ident($($arg:ident: $argty:ty),*) -> $ret: ty;)* - }) => ( - pub struct Dbghelp { - /// The loaded DLL for `dbghelp.dll` - dll: HMODULE, - - // Each function pointer for each function we might use - $($name: usize,)* - } - - static mut DBGHELP: Dbghelp = Dbghelp { - // Initially we haven't loaded the DLL - dll: 0 as *mut _, - // Initially all functions are set to zero to say they need to be - // dynamically loaded. - $($name: 0,)* - }; - - // Convenience typedef for each function type. - $(pub type $name = unsafe extern "system" fn($($argty),*) -> $ret;)* - - impl Dbghelp { - /// Attempts to open `dbghelp.dll`. Returns success if it works or - /// error if `LoadLibraryW` fails. - /// - /// Panics if library is already loaded. - fn ensure_open(&mut self) -> Result<(), ()> { - if !self.dll.is_null() { - return Ok(()) - } - let lib = b"dbghelp.dll\0"; - unsafe { - self.dll = LoadLibraryA(lib.as_ptr() as *const i8); - if self.dll.is_null() { - Err(()) - } else { - Ok(()) - } - } - } - - // Function for each method we'd like to use. When called it will - // either read the cached function pointer or load it and return the - // loaded value. Loads are asserted to succeed. - $(pub fn $name(&mut self) -> Option<$name> { - unsafe { - if self.$name == 0 { - let name = concat!(stringify!($name), "\0"); - self.$name = self.symbol(name.as_bytes())?; - } - let ret = mem::transmute::(self.$name); - #[cfg(feature = "verify-winapi")] - dbghelp::assert_equal_types(ret, dbghelp::$name); - Some(ret) - } - })* - - fn symbol(&self, symbol: &[u8]) -> Option { - unsafe { - match GetProcAddress(self.dll, symbol.as_ptr() as *const _) as usize { - 0 => None, - n => Some(n), - } - } - } - } - - // Convenience proxy to use the cleanup locks to reference dbghelp - // functions. - #[allow(dead_code)] - impl Init { - $(pub fn $name(&self) -> $name { - unsafe { - DBGHELP.$name().unwrap() - } - })* - - pub fn dbghelp(&self) -> *mut Dbghelp { - unsafe { - &mut DBGHELP - } - } - } - ) - -} - -const SYMOPT_DEFERRED_LOADS: DWORD = 0x00000004; - -dbghelp! { - extern "system" { - fn SymGetOptions() -> DWORD; - fn SymSetOptions(options: DWORD) -> DWORD; - fn SymInitializeW( - handle: HANDLE, - path: PCWSTR, - invade: BOOL - ) -> BOOL; - fn SymCleanup(handle: HANDLE) -> BOOL; - fn StackWalk64( - MachineType: DWORD, - hProcess: HANDLE, - hThread: HANDLE, - StackFrame: LPSTACKFRAME64, - ContextRecord: PVOID, - ReadMemoryRoutine: PREAD_PROCESS_MEMORY_ROUTINE64, - FunctionTableAccessRoutine: PFUNCTION_TABLE_ACCESS_ROUTINE64, - GetModuleBaseRoutine: PGET_MODULE_BASE_ROUTINE64, - TranslateAddress: PTRANSLATE_ADDRESS_ROUTINE64 - ) -> BOOL; - fn SymFunctionTableAccess64( - hProcess: HANDLE, - AddrBase: DWORD64 - ) -> PVOID; - fn SymGetModuleBase64( - hProcess: HANDLE, - AddrBase: DWORD64 - ) -> DWORD64; - fn SymFromAddrW( - hProcess: HANDLE, - Address: DWORD64, - Displacement: PDWORD64, - Symbol: PSYMBOL_INFOW - ) -> BOOL; - fn SymGetLineFromAddrW64( - hProcess: HANDLE, - dwAddr: DWORD64, - pdwDisplacement: PDWORD, - Line: PIMAGEHLP_LINEW64 - ) -> BOOL; - fn StackWalkEx( - MachineType: DWORD, - hProcess: HANDLE, - hThread: HANDLE, - StackFrame: LPSTACKFRAME_EX, - ContextRecord: PVOID, - ReadMemoryRoutine: PREAD_PROCESS_MEMORY_ROUTINE64, - FunctionTableAccessRoutine: PFUNCTION_TABLE_ACCESS_ROUTINE64, - GetModuleBaseRoutine: PGET_MODULE_BASE_ROUTINE64, - TranslateAddress: PTRANSLATE_ADDRESS_ROUTINE64, - Flags: DWORD - ) -> BOOL; - fn SymFromInlineContextW( - hProcess: HANDLE, - Address: DWORD64, - InlineContext: ULONG, - Displacement: PDWORD64, - Symbol: PSYMBOL_INFOW - ) -> BOOL; - fn SymGetLineFromInlineContextW( - hProcess: HANDLE, - dwAddr: DWORD64, - InlineContext: ULONG, - qwModuleBaseAddress: DWORD64, - pdwDisplacement: PDWORD, - Line: PIMAGEHLP_LINEW64 - ) -> BOOL; - } -} - -pub struct Init { - lock: HANDLE, -} - -/// Initialize all support necessary to access `dbghelp` API functions from this -/// crate. -/// -/// Note that this function is **safe**, it internally has its own -/// synchronization. Also note that it is safe to call this function multiple -/// times recursively. -pub fn init() -> Result { - use core::sync::atomic::{AtomicUsize, Ordering::SeqCst}; - - // Helper function for generating a name that's unique to the process. - fn mutex_name() -> [u8; 33] { - let mut name: [u8; 33] = *b"Local\\RustBacktraceMutex00000000\0"; - let mut id = unsafe { GetCurrentProcessId() }; - // Quick and dirty no alloc u32 to hex. - let mut index = name.len() - 1; - while id > 0 { - name[index - 1] = match (id & 0xF) as u8 { - h @ 0..=9 => b'0' + h, - h => b'A' + (h - 10), - }; - id >>= 4; - index -= 1; - } - name - } - - unsafe { - // First thing we need to do is to synchronize this function. This can - // be called concurrently from other threads or recursively within one - // thread. Note that it's trickier than that though because what we're - // using here, `dbghelp`, *also* needs to be synchronized with all other - // callers to `dbghelp` in this process. - // - // Typically there aren't really that many calls to `dbghelp` within the - // same process and we can probably safely assume that we're the only - // ones accessing it. There is, however, one primary other user we have - // to worry about which is ironically ourselves, but in the standard - // library. The Rust standard library depends on this crate for - // backtrace support, and this crate also exists on crates.io. This - // means that if the standard library is printing a panic backtrace it - // may race with this crate coming from crates.io, causing segfaults. - // - // To help solve this synchronization problem we employ a - // Windows-specific trick here (it is, after all, a Windows-specific - // restriction about synchronization). We create a *session-local* named - // mutex to protect this call. The intention here is that the standard - // library and this crate don't have to share Rust-level APIs to - // synchronize here but can instead work behind the scenes to make sure - // they're synchronizing with one another. That way when this function - // is called through the standard library or through crates.io we can be - // sure that the same mutex is being acquired. - // - // So all of that is to say that the first thing we do here is we - // atomically create a `HANDLE` which is a named mutex on Windows. We - // synchronize a bit with other threads sharing this function - // specifically and ensure that only one handle is created per instance - // of this function. Note that the handle is never closed once it's - // stored in the global. - // - // After we've actually go the lock we simply acquire it, and our `Init` - // handle we hand out will be responsible for dropping it eventually. - static LOCK: AtomicUsize = AtomicUsize::new(0); - let mut lock = LOCK.load(SeqCst); - if lock == 0 { - let name = mutex_name(); - lock = CreateMutexA(ptr::null_mut(), 0, name.as_ptr().cast::()) as usize; - if lock == 0 { - return Err(()); - } - if let Err(other) = LOCK.compare_exchange(0, lock, SeqCst, SeqCst) { - debug_assert!(other != 0); - CloseHandle(lock as HANDLE); - lock = other; - } - } - debug_assert!(lock != 0); - let lock = lock as HANDLE; - let r = WaitForSingleObjectEx(lock, INFINITE, FALSE); - debug_assert_eq!(r, 0); - let ret = Init { lock }; - - // Ok, phew! Now that we're all safely synchronized, let's actually - // start processing everything. First up we need to ensure that - // `dbghelp.dll` is actually loaded in this process. We do this - // dynamically to avoid a static dependency. This has historically been - // done to work around weird linking issues and is intended at making - // binaries a bit more portable since this is largely just a debugging - // utility. - // - // Once we've opened `dbghelp.dll` we need to call some initialization - // functions in it, and that's detailed more below. We only do this - // once, though, so we've got a global boolean indicating whether we're - // done yet or not. - DBGHELP.ensure_open()?; - - static mut INITIALIZED: bool = false; - if INITIALIZED { - return Ok(ret); - } - - let orig = DBGHELP.SymGetOptions().unwrap()(); - - // Ensure that the `SYMOPT_DEFERRED_LOADS` flag is set, because - // according to MSVC's own docs about this: "This is the fastest, most - // efficient way to use the symbol handler.", so let's do that! - DBGHELP.SymSetOptions().unwrap()(orig | SYMOPT_DEFERRED_LOADS); - - // Actually initialize symbols with MSVC. Note that this can fail, but we - // ignore it. There's not a ton of prior art for this per se, but LLVM - // internally seems to ignore the return value here and one of the - // sanitizer libraries in LLVM prints a scary warning if this fails but - // basically ignores it in the long run. - // - // One case this comes up a lot for Rust is that the standard library and - // this crate on crates.io both want to compete for `SymInitializeW`. The - // standard library historically wanted to initialize then cleanup most of - // the time, but now that it's using this crate it means that someone will - // get to initialization first and the other will pick up that - // initialization. - DBGHELP.SymInitializeW().unwrap()(GetCurrentProcess(), ptr::null_mut(), TRUE); - INITIALIZED = true; - Ok(ret) - } -} - -impl Drop for Init { - fn drop(&mut self) { - unsafe { - let r = ReleaseMutex(self.lock); - debug_assert!(r != 0); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/lib.rs s390-tools-2.33.1/rust-vendor/backtrace/src/lib.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,192 +0,0 @@ -//! A library for acquiring a backtrace at runtime -//! -//! This library is meant to supplement the `RUST_BACKTRACE=1` support of the -//! standard library by allowing an acquisition of a backtrace at runtime -//! programmatically. The backtraces generated by this library do not need to be -//! parsed, for example, and expose the functionality of multiple backend -//! implementations. -//! -//! # Usage -//! -//! First, add this to your Cargo.toml -//! -//! ```toml -//! [dependencies] -//! backtrace = "0.3" -//! ``` -//! -//! Next: -//! -//! ``` -//! fn main() { -//! # // Unsafe here so test passes on no_std. -//! # #[cfg(feature = "std")] { -//! backtrace::trace(|frame| { -//! let ip = frame.ip(); -//! let symbol_address = frame.symbol_address(); -//! -//! // Resolve this instruction pointer to a symbol name -//! backtrace::resolve_frame(frame, |symbol| { -//! if let Some(name) = symbol.name() { -//! // ... -//! } -//! if let Some(filename) = symbol.filename() { -//! // ... -//! } -//! }); -//! -//! true // keep going to the next frame -//! }); -//! } -//! # } -//! ``` -//! -//! # Backtrace accuracy -//! -//! This crate implements best-effort attempts to get the native backtrace. This -//! is not always guaranteed to work, and some platforms don't return any -//! backtrace at all. If your application requires accurate backtraces then it's -//! recommended to closely evaluate this crate to see whether it's suitable -//! for your use case on your target platforms. -//! -//! Even on supported platforms, there's a number of reasons that backtraces may -//! be less-than-accurate, including but not limited to: -//! -//! * Unwind information may not be available. This crate primarily implements -//! backtraces by unwinding the stack, but not all functions may have -//! unwinding information (e.g. DWARF unwinding information). -//! -//! * Rust code may be compiled without unwinding information for some -//! functions. This can also happen for Rust code compiled with -//! `-Cpanic=abort`. You can remedy this, however, with -//! `-Cforce-unwind-tables` as a compiler option. -//! -//! * Unwind information may be inaccurate or corrupt. In the worst case -//! inaccurate unwind information can lead this library to segfault. In the -//! best case inaccurate information will result in a truncated stack trace. -//! -//! * Backtraces may not report filenames/line numbers correctly due to missing -//! or corrupt debug information. This won't lead to segfaults unlike corrupt -//! unwinding information, but missing or malformed debug information will -//! mean that filenames and line numbers will not be available. This may be -//! because debug information wasn't generated by the compiler, or it's just -//! missing on the filesystem. -//! -//! * Not all platforms are supported. For example there's no way to get a -//! backtrace on WebAssembly at the moment. -//! -//! * Crate features may be disabled. Currently this crate supports using Gimli -//! libbacktrace on non-Windows platforms for reading debuginfo for -//! backtraces. If both crate features are disabled, however, then these -//! platforms will generate a backtrace but be unable to generate symbols for -//! it. -//! -//! In most standard workflows for most standard platforms you generally don't -//! need to worry about these caveats. We'll try to fix ones where we can over -//! time, but otherwise it's important to be aware of the limitations of -//! unwinding-based backtraces! - -#![deny(missing_docs)] -#![no_std] -#![cfg_attr( - all(feature = "std", target_env = "sgx", target_vendor = "fortanix"), - feature(sgx_platform) -)] -#![warn(rust_2018_idioms)] -// When we're building as part of libstd, silence all warnings since they're -// irrelevant as this crate is developed out-of-tree. -#![cfg_attr(backtrace_in_libstd, allow(warnings))] -#![cfg_attr(not(feature = "std"), allow(dead_code))] -// We know this is deprecated, it's only here for back-compat reasons. -#![cfg_attr(feature = "rustc-serialize", allow(deprecated))] - -#[cfg(feature = "std")] -#[macro_use] -extern crate std; - -// This is only used for gimli right now, which is only used on some platforms, and miri -// so don't worry if it's unused in other configurations. -#[allow(unused_extern_crates)] -extern crate alloc; - -pub use self::backtrace::{trace_unsynchronized, Frame}; -mod backtrace; - -pub use self::symbolize::resolve_frame_unsynchronized; -pub use self::symbolize::{resolve_unsynchronized, Symbol, SymbolName}; -mod symbolize; - -pub use self::types::BytesOrWideString; -mod types; - -#[cfg(feature = "std")] -pub use self::symbolize::clear_symbol_cache; - -mod print; -pub use print::{BacktraceFmt, BacktraceFrameFmt, PrintFmt}; - -cfg_if::cfg_if! { - if #[cfg(feature = "std")] { - pub use self::backtrace::trace; - pub use self::symbolize::{resolve, resolve_frame}; - pub use self::capture::{Backtrace, BacktraceFrame, BacktraceSymbol}; - mod capture; - } -} - -#[allow(dead_code)] -struct Bomb { - enabled: bool, -} - -#[allow(dead_code)] -impl Drop for Bomb { - fn drop(&mut self) { - if self.enabled { - panic!("cannot panic during the backtrace function"); - } - } -} - -#[allow(dead_code)] -#[cfg(feature = "std")] -mod lock { - use std::boxed::Box; - use std::cell::Cell; - use std::sync::{Mutex, MutexGuard, Once}; - - pub struct LockGuard(Option>); - - static mut LOCK: *mut Mutex<()> = 0 as *mut _; - static INIT: Once = Once::new(); - thread_local!(static LOCK_HELD: Cell = Cell::new(false)); - - impl Drop for LockGuard { - fn drop(&mut self) { - if self.0.is_some() { - LOCK_HELD.with(|slot| { - assert!(slot.get()); - slot.set(false); - }); - } - } - } - - pub fn lock() -> LockGuard { - if LOCK_HELD.with(|l| l.get()) { - return LockGuard(None); - } - LOCK_HELD.with(|s| s.set(true)); - unsafe { - INIT.call_once(|| { - LOCK = Box::into_raw(Box::new(Mutex::new(()))); - }); - LockGuard(Some((*LOCK).lock().unwrap())) - } - } -} - -#[cfg(all(windows, not(target_vendor = "uwp")))] -mod dbghelp; -#[cfg(windows)] -mod windows; diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/print/fuchsia.rs s390-tools-2.33.1/rust-vendor/backtrace/src/print/fuchsia.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/print/fuchsia.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/print/fuchsia.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,441 +0,0 @@ -use core::fmt::{self, Write}; -use core::mem::{size_of, transmute}; -use core::slice::from_raw_parts; -use libc::c_char; - -extern "C" { - // dl_iterate_phdr takes a callback that will receive a dl_phdr_info pointer - // for every DSO that has been linked into the process. dl_iterate_phdr also - // ensures that the dynamic linker is locked from start to finish of the - // iteration. If the callback returns a non-zero value the iteration is - // terminated early. 'data' will be passed as the third argument to the - // callback on each call. 'size' gives the size of the dl_phdr_info. - #[allow(improper_ctypes)] - fn dl_iterate_phdr( - f: extern "C" fn(info: &dl_phdr_info, size: usize, data: &mut DsoPrinter<'_, '_>) -> i32, - data: &mut DsoPrinter<'_, '_>, - ) -> i32; -} - -// We need to parse out the build ID and some basic program header data -// which means that we need a bit of stuff from the ELF spec as well. - -const PT_LOAD: u32 = 1; -const PT_NOTE: u32 = 4; - -// Now we have to replicate, bit for bit, the structure of the dl_phdr_info -// type used by fuchsia's current dynamic linker. Chromium also has this ABI -// boundary as well as crashpad. Eventually we'd like to move these cases to -// use elf-search but we'd need to provide that in the SDK and that has not -// yet been done. Thus we (and they) are stuck having to use this method -// which incurs a tight coupling with the fuchsia libc. - -#[allow(non_camel_case_types)] -#[repr(C)] -struct dl_phdr_info { - addr: *const u8, - name: *const c_char, - phdr: *const Elf_Phdr, - phnum: u16, - adds: u64, - subs: u64, - tls_modid: usize, - tls_data: *const u8, -} - -impl dl_phdr_info { - fn program_headers(&self) -> PhdrIter<'_> { - PhdrIter { - phdrs: self.phdr_slice(), - base: self.addr, - } - } - // We have no way of knowing of checking if e_phoff and e_phnum are valid. - // libc should ensure this for us however so it's safe to form a slice here. - fn phdr_slice(&self) -> &[Elf_Phdr] { - unsafe { from_raw_parts(self.phdr, self.phnum as usize) } - } -} - -struct PhdrIter<'a> { - phdrs: &'a [Elf_Phdr], - base: *const u8, -} - -impl<'a> Iterator for PhdrIter<'a> { - type Item = Phdr<'a>; - fn next(&mut self) -> Option { - self.phdrs.split_first().map(|(phdr, new_phdrs)| { - self.phdrs = new_phdrs; - Phdr { - phdr, - base: self.base, - } - }) - } -} - -// Elf_Phdr represents a 64-bit ELF program header in the endianness of the target -// architecture. -#[allow(non_camel_case_types)] -#[derive(Clone, Debug)] -#[repr(C)] -struct Elf_Phdr { - p_type: u32, - p_flags: u32, - p_offset: u64, - p_vaddr: u64, - p_paddr: u64, - p_filesz: u64, - p_memsz: u64, - p_align: u64, -} - -// Phdr represents a valid ELF program header and its contents. -struct Phdr<'a> { - phdr: &'a Elf_Phdr, - base: *const u8, -} - -impl<'a> Phdr<'a> { - // We have no way of checking if p_addr or p_memsz are valid. Fuchsia's libc - // parses the notes first however so by virtue of being here these headers - // must be valid. NoteIter does not require the underlying data to be valid - // but it does require the bounds to be valid. We trust that libc has ensured - // that this is the case for us here. - fn notes(&self) -> NoteIter<'a> { - unsafe { - NoteIter::new( - self.base.add(self.phdr.p_offset as usize), - self.phdr.p_memsz as usize, - ) - } - } -} - -// The note type for build IDs. -const NT_GNU_BUILD_ID: u32 = 3; - -// Elf_Nhdr represents an ELF note header in the endianness of the target. -#[allow(non_camel_case_types)] -#[repr(C)] -struct Elf_Nhdr { - n_namesz: u32, - n_descsz: u32, - n_type: u32, -} - -// Note represents an ELF note (header + contents). The name is left as a u8 -// slice because it is not always null terminated and rust makes it easy enough -// to check that the bytes match eitherway. -struct Note<'a> { - name: &'a [u8], - desc: &'a [u8], - tipe: u32, -} - -// NoteIter lets you safely iterate over a note segment. It terminates as soon -// as an error occurs or there are no more notes. If you iterate over invalid -// data it will function as though no notes were found. -struct NoteIter<'a> { - base: &'a [u8], - error: bool, -} - -impl<'a> NoteIter<'a> { - // It is an invariant of function that the pointer and size given denote a - // valid range of bytes that can all be read. The contents of these bytes - // can be anything but the range must be valid for this to be safe. - unsafe fn new(base: *const u8, size: usize) -> Self { - NoteIter { - base: from_raw_parts(base, size), - error: false, - } - } -} - -// align_to aligns 'x' to 'to'-byte alignment assuming 'to' is a power of 2. -// This follows a standard pattern in C/C++ ELF parsing code where -// (x + to - 1) & -to is used. Rust does not let you negate usize so I use -// 2's-complement conversion to recreate that. -fn align_to(x: usize, to: usize) -> usize { - (x + to - 1) & (!to + 1) -} - -// take_bytes_align4 consumes num bytes from the slice (if present) and -// additionally ensures that the final slice is properlly aligned. If an -// either the number of bytes requested is too large or the slice can't be -// realigned afterwards due to not enough remaining bytes existing, None is -// returned and the slice is not modified. -fn take_bytes_align4<'a>(num: usize, bytes: &mut &'a [u8]) -> Option<&'a [u8]> { - if bytes.len() < align_to(num, 4) { - return None; - } - let (out, bytes_new) = bytes.split_at(num); - *bytes = &bytes_new[align_to(num, 4) - num..]; - Some(out) -} - -// This function has no real invariants the caller must uphold other than -// perhaps that 'bytes' should be aligned for performance (and on some -// architectures correctness). The values in the Elf_Nhdr fields might -// be nonsense but this function ensures no such thing. -fn take_nhdr<'a>(bytes: &mut &'a [u8]) -> Option<&'a Elf_Nhdr> { - if size_of::() > bytes.len() { - return None; - } - // This is safe as long as there is enough space and we just confirmed that - // in the if statement above so this should not be unsafe. - let out = unsafe { transmute::<*const u8, &'a Elf_Nhdr>(bytes.as_ptr()) }; - // Note that sice_of::() is always 4-byte aligned. - *bytes = &bytes[size_of::()..]; - Some(out) -} - -impl<'a> Iterator for NoteIter<'a> { - type Item = Note<'a>; - fn next(&mut self) -> Option { - // Check if we've reached the end. - if self.base.len() == 0 || self.error { - return None; - } - // We transmute out an nhdr but we carefully consider the resulting - // struct. We don't trust the namesz or descsz and we make no unsafe - // decisions based on the type. So even if we get out complete garbage - // we should still be safe. - let nhdr = take_nhdr(&mut self.base)?; - let name = take_bytes_align4(nhdr.n_namesz as usize, &mut self.base)?; - let desc = take_bytes_align4(nhdr.n_descsz as usize, &mut self.base)?; - Some(Note { - name: name, - desc: desc, - tipe: nhdr.n_type, - }) - } -} - -struct Perm(u32); - -/// Indicates that a segment is executable. -const PERM_X: u32 = 0b00000001; -/// Indicates that a segment is writable. -const PERM_W: u32 = 0b00000010; -/// Indicates that a segment is readable. -const PERM_R: u32 = 0b00000100; - -impl core::fmt::Display for Perm { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let v = self.0; - if v & PERM_R != 0 { - f.write_char('r')? - } - if v & PERM_W != 0 { - f.write_char('w')? - } - if v & PERM_X != 0 { - f.write_char('x')? - } - Ok(()) - } -} - -/// Represents an ELF segment at runtime. -struct Segment { - /// Gives the runtime virtual address of this segment's contents. - addr: usize, - /// Gives the memory size of this segment's contents. - size: usize, - /// Gives the module virtual address of this segment with the ELF file. - mod_rel_addr: usize, - /// Gives the permissions found in the ELF file. These permissions are not - /// necessarily the permissions present at runtime however. - flags: Perm, -} - -/// Lets one iterate over Segments from a DSO. -struct SegmentIter<'a> { - phdrs: &'a [Elf_Phdr], - base: usize, -} - -impl Iterator for SegmentIter<'_> { - type Item = Segment; - - fn next(&mut self) -> Option { - self.phdrs.split_first().and_then(|(phdr, new_phdrs)| { - self.phdrs = new_phdrs; - if phdr.p_type != PT_LOAD { - self.next() - } else { - Some(Segment { - addr: phdr.p_vaddr as usize + self.base, - size: phdr.p_memsz as usize, - mod_rel_addr: phdr.p_vaddr as usize, - flags: Perm(phdr.p_flags), - }) - } - }) - } -} - -/// Represents an ELF DSO (Dynamic Shared Object). This type references -/// the data stored in the actual DSO rather than making its own copy. -struct Dso<'a> { - /// The dynamic linker always gives us a name, even if the name is empty. - /// In the case of the main executable this name will be empty. In the case - /// of a shared object it will be the soname (see DT_SONAME). - name: &'a str, - /// On Fuchsia virtually all binaries have build IDs but this is not a strict - /// requirement. There's no way to match up DSO information with a real ELF - /// file afterwards if there is no build_id so we require that every DSO - /// have one here. DSO's without a build_id are ignored. - build_id: &'a [u8], - - base: usize, - phdrs: &'a [Elf_Phdr], -} - -impl Dso<'_> { - /// Returns an iterator over Segments in this DSO. - fn segments(&self) -> SegmentIter<'_> { - SegmentIter { - phdrs: self.phdrs.as_ref(), - base: self.base, - } - } -} - -struct HexSlice<'a> { - bytes: &'a [u8], -} - -impl fmt::Display for HexSlice<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for byte in self.bytes { - write!(f, "{:02x}", byte)?; - } - Ok(()) - } -} - -fn get_build_id<'a>(info: &'a dl_phdr_info) -> Option<&'a [u8]> { - for phdr in info.program_headers() { - if phdr.phdr.p_type == PT_NOTE { - for note in phdr.notes() { - if note.tipe == NT_GNU_BUILD_ID && (note.name == b"GNU\0" || note.name == b"GNU") { - return Some(note.desc); - } - } - } - } - None -} - -/// These errors encode issues that arise while parsing information about -/// each DSO. -enum Error { - /// NameError means that an error occurred while converting a C style string - /// into a rust string. - NameError(core::str::Utf8Error), - /// BuildIDError means that we didn't find a build ID. This could either be - /// because the DSO had no build ID or because the segment containing the - /// build ID was malformed. - BuildIDError, -} - -/// Calls either 'dso' or 'error' for each DSO linked into the process by the -/// dynamic linker. -/// -/// # Arguments -/// -/// * `visitor` - A DsoPrinter that will have one of eats methods called foreach DSO. -fn for_each_dso(mut visitor: &mut DsoPrinter<'_, '_>) { - extern "C" fn callback( - info: &dl_phdr_info, - _size: usize, - visitor: &mut DsoPrinter<'_, '_>, - ) -> i32 { - // dl_iterate_phdr ensures that info.name will point to a valid - // location. - let name_len = unsafe { libc::strlen(info.name) }; - let name_slice: &[u8] = - unsafe { core::slice::from_raw_parts(info.name as *const u8, name_len) }; - let name = match core::str::from_utf8(name_slice) { - Ok(name) => name, - Err(err) => { - return visitor.error(Error::NameError(err)) as i32; - } - }; - let build_id = match get_build_id(info) { - Some(build_id) => build_id, - None => { - return visitor.error(Error::BuildIDError) as i32; - } - }; - visitor.dso(Dso { - name: name, - build_id: build_id, - phdrs: info.phdr_slice(), - base: info.addr as usize, - }) as i32 - } - unsafe { dl_iterate_phdr(callback, &mut visitor) }; -} - -struct DsoPrinter<'a, 'b> { - writer: &'a mut core::fmt::Formatter<'b>, - module_count: usize, - error: core::fmt::Result, -} - -impl DsoPrinter<'_, '_> { - fn dso(&mut self, dso: Dso<'_>) -> bool { - let mut write = || { - write!( - self.writer, - "{{{{{{module:{:#x}:{}:elf:{}}}}}}}\n", - self.module_count, - dso.name, - HexSlice { - bytes: dso.build_id.as_ref() - } - )?; - for seg in dso.segments() { - write!( - self.writer, - "{{{{{{mmap:{:#x}:{:#x}:load:{:#x}:{}:{:#x}}}}}}}\n", - seg.addr, seg.size, self.module_count, seg.flags, seg.mod_rel_addr - )?; - } - self.module_count += 1; - Ok(()) - }; - match write() { - Ok(()) => false, - Err(err) => { - self.error = Err(err); - true - } - } - } - fn error(&mut self, _error: Error) -> bool { - false - } -} - -/// This function prints the Fuchsia symbolizer markup for all information contained in a DSO. -pub fn print_dso_context(out: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - out.write_str("{{{reset:begin}}}\n")?; - let mut visitor = DsoPrinter { - writer: out, - module_count: 0, - error: Ok(()), - }; - for_each_dso(&mut visitor); - visitor.error -} - -/// This function prints the Fuchsia symbolizer markup to end the backtrace. -pub fn finish_context(out: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - out.write_str("{{{reset:end}}}\n") -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/print.rs s390-tools-2.33.1/rust-vendor/backtrace/src/print.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/print.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/print.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,320 +0,0 @@ -#[cfg(feature = "std")] -use super::{BacktraceFrame, BacktraceSymbol}; -use super::{BytesOrWideString, Frame, SymbolName}; -use core::ffi::c_void; -use core::fmt; - -const HEX_WIDTH: usize = 2 + 2 * core::mem::size_of::(); - -#[cfg(target_os = "fuchsia")] -mod fuchsia; - -/// A formatter for backtraces. -/// -/// This type can be used to print a backtrace regardless of where the backtrace -/// itself comes from. If you have a `Backtrace` type then its `Debug` -/// implementation already uses this printing format. -pub struct BacktraceFmt<'a, 'b> { - fmt: &'a mut fmt::Formatter<'b>, - frame_index: usize, - format: PrintFmt, - print_path: - &'a mut (dyn FnMut(&mut fmt::Formatter<'_>, BytesOrWideString<'_>) -> fmt::Result + 'b), -} - -/// The styles of printing that we can print -#[derive(Copy, Clone, Eq, PartialEq)] -pub enum PrintFmt { - /// Prints a terser backtrace which ideally only contains relevant information - Short, - /// Prints a backtrace that contains all possible information - Full, - #[doc(hidden)] - __Nonexhaustive, -} - -impl<'a, 'b> BacktraceFmt<'a, 'b> { - /// Create a new `BacktraceFmt` which will write output to the provided - /// `fmt`. - /// - /// The `format` argument will control the style in which the backtrace is - /// printed, and the `print_path` argument will be used to print the - /// `BytesOrWideString` instances of filenames. This type itself doesn't do - /// any printing of filenames, but this callback is required to do so. - pub fn new( - fmt: &'a mut fmt::Formatter<'b>, - format: PrintFmt, - print_path: &'a mut (dyn FnMut(&mut fmt::Formatter<'_>, BytesOrWideString<'_>) -> fmt::Result - + 'b), - ) -> Self { - BacktraceFmt { - fmt, - frame_index: 0, - format, - print_path, - } - } - - /// Prints a preamble for the backtrace about to be printed. - /// - /// This is required on some platforms for backtraces to be fully - /// symbolicated later, and otherwise this should just be the first method - /// you call after creating a `BacktraceFmt`. - pub fn add_context(&mut self) -> fmt::Result { - #[cfg(target_os = "fuchsia")] - fuchsia::print_dso_context(self.fmt)?; - Ok(()) - } - - /// Adds a frame to the backtrace output. - /// - /// This commit returns an RAII instance of a `BacktraceFrameFmt` which can be used - /// to actually print a frame, and on destruction it will increment the - /// frame counter. - pub fn frame(&mut self) -> BacktraceFrameFmt<'_, 'a, 'b> { - BacktraceFrameFmt { - fmt: self, - symbol_index: 0, - } - } - - /// Completes the backtrace output. - /// - /// This is currently a no-op but is added for future compatibility with - /// backtrace formats. - pub fn finish(&mut self) -> fmt::Result { - #[cfg(target_os = "fuchsia")] - fuchsia::finish_context(self.fmt)?; - Ok(()) - } - - /// Inserts a message in the backtrace output. - /// - /// This allows information to be inserted between frames, - /// and won't increment the `frame_index` unlike the `frame` - /// method. - pub fn message(&mut self, msg: &str) -> fmt::Result { - self.fmt.write_str(msg) - } - - /// Return the inner formatter. - /// - /// This is used for writing custom information between frames with `write!` and `writeln!`, - /// and won't increment the `frame_index` unlike the `frame` method. - pub fn formatter(&mut self) -> &mut fmt::Formatter<'b> { - self.fmt - } -} - -/// A formatter for just one frame of a backtrace. -/// -/// This type is created by the `BacktraceFmt::frame` function. -pub struct BacktraceFrameFmt<'fmt, 'a, 'b> { - fmt: &'fmt mut BacktraceFmt<'a, 'b>, - symbol_index: usize, -} - -impl BacktraceFrameFmt<'_, '_, '_> { - /// Prints a `BacktraceFrame` with this frame formatter. - /// - /// This will recursively print all `BacktraceSymbol` instances within the - /// `BacktraceFrame`. - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - #[cfg(feature = "std")] - pub fn backtrace_frame(&mut self, frame: &BacktraceFrame) -> fmt::Result { - let symbols = frame.symbols(); - for symbol in symbols { - self.backtrace_symbol(frame, symbol)?; - } - if symbols.is_empty() { - self.print_raw(frame.ip(), None, None, None)?; - } - Ok(()) - } - - /// Prints a `BacktraceSymbol` within a `BacktraceFrame`. - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - #[cfg(feature = "std")] - pub fn backtrace_symbol( - &mut self, - frame: &BacktraceFrame, - symbol: &BacktraceSymbol, - ) -> fmt::Result { - self.print_raw_with_column( - frame.ip(), - symbol.name(), - // TODO: this isn't great that we don't end up printing anything - // with non-utf8 filenames. Thankfully almost everything is utf8 so - // this shouldn't be too bad. - symbol - .filename() - .and_then(|p| Some(BytesOrWideString::Bytes(p.to_str()?.as_bytes()))), - symbol.lineno(), - symbol.colno(), - )?; - Ok(()) - } - - /// Prints a raw traced `Frame` and `Symbol`, typically from within the raw - /// callbacks of this crate. - pub fn symbol(&mut self, frame: &Frame, symbol: &super::Symbol) -> fmt::Result { - self.print_raw_with_column( - frame.ip(), - symbol.name(), - symbol.filename_raw(), - symbol.lineno(), - symbol.colno(), - )?; - Ok(()) - } - - /// Adds a raw frame to the backtrace output. - /// - /// This method, unlike the previous, takes the raw arguments in case - /// they're being source from different locations. Note that this may be - /// called multiple times for one frame. - pub fn print_raw( - &mut self, - frame_ip: *mut c_void, - symbol_name: Option>, - filename: Option>, - lineno: Option, - ) -> fmt::Result { - self.print_raw_with_column(frame_ip, symbol_name, filename, lineno, None) - } - - /// Adds a raw frame to the backtrace output, including column information. - /// - /// This method, like the previous, takes the raw arguments in case - /// they're being source from different locations. Note that this may be - /// called multiple times for one frame. - pub fn print_raw_with_column( - &mut self, - frame_ip: *mut c_void, - symbol_name: Option>, - filename: Option>, - lineno: Option, - colno: Option, - ) -> fmt::Result { - // Fuchsia is unable to symbolize within a process so it has a special - // format which can be used to symbolize later. Print that instead of - // printing addresses in our own format here. - if cfg!(target_os = "fuchsia") { - self.print_raw_fuchsia(frame_ip)?; - } else { - self.print_raw_generic(frame_ip, symbol_name, filename, lineno, colno)?; - } - self.symbol_index += 1; - Ok(()) - } - - #[allow(unused_mut)] - fn print_raw_generic( - &mut self, - mut frame_ip: *mut c_void, - symbol_name: Option>, - filename: Option>, - lineno: Option, - colno: Option, - ) -> fmt::Result { - // No need to print "null" frames, it basically just means that the - // system backtrace was a bit eager to trace back super far. - if let PrintFmt::Short = self.fmt.format { - if frame_ip.is_null() { - return Ok(()); - } - } - - // To reduce TCB size in Sgx enclave, we do not want to implement symbol - // resolution functionality. Rather, we can print the offset of the - // address here, which could be later mapped to correct function. - #[cfg(all(feature = "std", target_env = "sgx", target_vendor = "fortanix"))] - { - let image_base = std::os::fortanix_sgx::mem::image_base(); - frame_ip = usize::wrapping_sub(frame_ip as usize, image_base as _) as _; - } - - // Print the index of the frame as well as the optional instruction - // pointer of the frame. If we're beyond the first symbol of this frame - // though we just print appropriate whitespace. - if self.symbol_index == 0 { - write!(self.fmt.fmt, "{:4}: ", self.fmt.frame_index)?; - if let PrintFmt::Full = self.fmt.format { - write!(self.fmt.fmt, "{:1$?} - ", frame_ip, HEX_WIDTH)?; - } - } else { - write!(self.fmt.fmt, " ")?; - if let PrintFmt::Full = self.fmt.format { - write!(self.fmt.fmt, "{:1$}", "", HEX_WIDTH + 3)?; - } - } - - // Next up write out the symbol name, using the alternate formatting for - // more information if we're a full backtrace. Here we also handle - // symbols which don't have a name, - match (symbol_name, &self.fmt.format) { - (Some(name), PrintFmt::Short) => write!(self.fmt.fmt, "{:#}", name)?, - (Some(name), PrintFmt::Full) => write!(self.fmt.fmt, "{}", name)?, - (None, _) | (_, PrintFmt::__Nonexhaustive) => write!(self.fmt.fmt, "")?, - } - self.fmt.fmt.write_str("\n")?; - - // And last up, print out the filename/line number if they're available. - if let (Some(file), Some(line)) = (filename, lineno) { - self.print_fileline(file, line, colno)?; - } - - Ok(()) - } - - fn print_fileline( - &mut self, - file: BytesOrWideString<'_>, - line: u32, - colno: Option, - ) -> fmt::Result { - // Filename/line are printed on lines under the symbol name, so print - // some appropriate whitespace to sort of right-align ourselves. - if let PrintFmt::Full = self.fmt.format { - write!(self.fmt.fmt, "{:1$}", "", HEX_WIDTH)?; - } - write!(self.fmt.fmt, " at ")?; - - // Delegate to our internal callback to print the filename and then - // print out the line number. - (self.fmt.print_path)(self.fmt.fmt, file)?; - write!(self.fmt.fmt, ":{}", line)?; - - // Add column number, if available. - if let Some(colno) = colno { - write!(self.fmt.fmt, ":{}", colno)?; - } - - write!(self.fmt.fmt, "\n")?; - Ok(()) - } - - fn print_raw_fuchsia(&mut self, frame_ip: *mut c_void) -> fmt::Result { - // We only care about the first symbol of a frame - if self.symbol_index == 0 { - self.fmt.fmt.write_str("{{{bt:")?; - write!(self.fmt.fmt, "{}:{:?}", self.fmt.frame_index, frame_ip)?; - self.fmt.fmt.write_str("}}}\n")?; - } - Ok(()) - } -} - -impl Drop for BacktraceFrameFmt<'_, '_, '_> { - fn drop(&mut self) { - self.fmt.frame_index += 1; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/dbghelp.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/dbghelp.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/dbghelp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/dbghelp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,218 +0,0 @@ -//! Symbolication strategy using `dbghelp.dll` on Windows, only used for MSVC -//! -//! This symbolication strategy, like with backtraces, uses dynamically loaded -//! information from `dbghelp.dll`. (see `src/dbghelp.rs` for info about why -//! it's dynamically loaded). -//! -//! This API selects its resolution strategy based on the frame provided or the -//! information we have at hand. If a frame from `StackWalkEx` is given to us -//! then we use similar APIs to generate correct information about inlined -//! functions. Otherwise if all we have is an address or an older stack frame -//! from `StackWalk64` we use the older APIs for symbolication. -//! -//! There's a good deal of support in this module, but a good chunk of it is -//! converting back and forth between Windows types and Rust types. For example -//! symbols come to us as wide strings which we then convert to utf-8 strings if -//! we can. - -#![allow(bad_style)] - -use super::super::{backtrace::StackFrame, dbghelp, windows::*}; -use super::{BytesOrWideString, ResolveWhat, SymbolName}; -use core::char; -use core::ffi::c_void; -use core::marker; -use core::mem; -use core::slice; - -// Store an OsString on std so we can provide the symbol name and filename. -pub struct Symbol<'a> { - name: *const [u8], - addr: *mut c_void, - line: Option, - filename: Option<*const [u16]>, - #[cfg(feature = "std")] - _filename_cache: Option<::std::ffi::OsString>, - #[cfg(not(feature = "std"))] - _filename_cache: (), - _marker: marker::PhantomData<&'a i32>, -} - -impl Symbol<'_> { - pub fn name(&self) -> Option> { - Some(SymbolName::new(unsafe { &*self.name })) - } - - pub fn addr(&self) -> Option<*mut c_void> { - Some(self.addr as *mut _) - } - - pub fn filename_raw(&self) -> Option> { - self.filename - .map(|slice| unsafe { BytesOrWideString::Wide(&*slice) }) - } - - pub fn colno(&self) -> Option { - None - } - - pub fn lineno(&self) -> Option { - self.line - } - - #[cfg(feature = "std")] - pub fn filename(&self) -> Option<&::std::path::Path> { - use std::path::Path; - - self._filename_cache.as_ref().map(Path::new) - } -} - -#[repr(C, align(8))] -struct Aligned8(T); - -pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol)) { - // Ensure this process's symbols are initialized - let dbghelp = match dbghelp::init() { - Ok(dbghelp) => dbghelp, - Err(()) => return, // oh well... - }; - - match what { - ResolveWhat::Address(_) => resolve_without_inline(&dbghelp, what.address_or_ip(), cb), - ResolveWhat::Frame(frame) => match &frame.inner.stack_frame { - StackFrame::New(frame) => resolve_with_inline(&dbghelp, frame, cb), - StackFrame::Old(_) => resolve_without_inline(&dbghelp, frame.ip(), cb), - }, - } -} - -unsafe fn resolve_with_inline( - dbghelp: &dbghelp::Init, - frame: &STACKFRAME_EX, - cb: &mut dyn FnMut(&super::Symbol), -) { - do_resolve( - |info| { - dbghelp.SymFromInlineContextW()( - GetCurrentProcess(), - super::adjust_ip(frame.AddrPC.Offset as *mut _) as u64, - frame.InlineFrameContext, - &mut 0, - info, - ) - }, - |line| { - dbghelp.SymGetLineFromInlineContextW()( - GetCurrentProcess(), - super::adjust_ip(frame.AddrPC.Offset as *mut _) as u64, - frame.InlineFrameContext, - 0, - &mut 0, - line, - ) - }, - cb, - ) -} - -unsafe fn resolve_without_inline( - dbghelp: &dbghelp::Init, - addr: *mut c_void, - cb: &mut dyn FnMut(&super::Symbol), -) { - do_resolve( - |info| dbghelp.SymFromAddrW()(GetCurrentProcess(), addr as DWORD64, &mut 0, info), - |line| dbghelp.SymGetLineFromAddrW64()(GetCurrentProcess(), addr as DWORD64, &mut 0, line), - cb, - ) -} - -unsafe fn do_resolve( - sym_from_addr: impl FnOnce(*mut SYMBOL_INFOW) -> BOOL, - get_line_from_addr: impl FnOnce(&mut IMAGEHLP_LINEW64) -> BOOL, - cb: &mut dyn FnMut(&super::Symbol), -) { - const SIZE: usize = 2 * MAX_SYM_NAME + mem::size_of::(); - let mut data = Aligned8([0u8; SIZE]); - let data = &mut data.0; - let info = &mut *(data.as_mut_ptr() as *mut SYMBOL_INFOW); - info.MaxNameLen = MAX_SYM_NAME as ULONG; - // the struct size in C. the value is different to - // `size_of::() - MAX_SYM_NAME + 1` (== 81) - // due to struct alignment. - info.SizeOfStruct = 88; - - if sym_from_addr(info) != TRUE { - return; - } - - // If the symbol name is greater than MaxNameLen, SymFromAddrW will - // give a buffer of (MaxNameLen - 1) characters and set NameLen to - // the real value. - let name_len = ::core::cmp::min(info.NameLen as usize, info.MaxNameLen as usize - 1); - let name_ptr = info.Name.as_ptr() as *const u16; - let name = slice::from_raw_parts(name_ptr, name_len); - - // Reencode the utf-16 symbol to utf-8 so we can use `SymbolName::new` like - // all other platforms - let mut name_len = 0; - let mut name_buffer = [0; 256]; - { - let mut remaining = &mut name_buffer[..]; - for c in char::decode_utf16(name.iter().cloned()) { - let c = c.unwrap_or(char::REPLACEMENT_CHARACTER); - let len = c.len_utf8(); - if len < remaining.len() { - c.encode_utf8(remaining); - let tmp = remaining; - remaining = &mut tmp[len..]; - name_len += len; - } else { - break; - } - } - } - let name = &name_buffer[..name_len] as *const [u8]; - - let mut line = mem::zeroed::(); - line.SizeOfStruct = mem::size_of::() as DWORD; - - let mut filename = None; - let mut lineno = None; - if get_line_from_addr(&mut line) == TRUE { - lineno = Some(line.LineNumber as u32); - - let base = line.FileName; - let mut len = 0; - while *base.offset(len) != 0 { - len += 1; - } - - let len = len as usize; - - filename = Some(slice::from_raw_parts(base, len) as *const [u16]); - } - - cb(&super::Symbol { - inner: Symbol { - name, - addr: info.Address as *mut _, - line: lineno, - filename, - _filename_cache: cache(filename), - _marker: marker::PhantomData, - }, - }) -} - -#[cfg(feature = "std")] -unsafe fn cache(filename: Option<*const [u16]>) -> Option<::std::ffi::OsString> { - use std::os::windows::ffi::OsStringExt; - filename.map(|f| ::std::ffi::OsString::from_wide(&*f)) -} - -#[cfg(not(feature = "std"))] -unsafe fn cache(_filename: Option<*const [u16]>) {} - -pub unsafe fn clear_symbol_cache() {} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/coff.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/coff.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/coff.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/coff.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,118 +0,0 @@ -use super::{gimli, Context, Endian, EndianSlice, Mapping, Path, Stash, Vec}; -use alloc::sync::Arc; -use core::convert::TryFrom; -use object::pe::{ImageDosHeader, ImageSymbol}; -use object::read::coff::ImageSymbol as _; -use object::read::pe::{ImageNtHeaders, ImageOptionalHeader, SectionTable}; -use object::read::StringTable; -use object::LittleEndian as LE; - -#[cfg(target_pointer_width = "32")] -type Pe = object::pe::ImageNtHeaders32; -#[cfg(target_pointer_width = "64")] -type Pe = object::pe::ImageNtHeaders64; - -impl Mapping { - pub fn new(path: &Path) -> Option { - let map = super::mmap(path)?; - Mapping::mk(map, |data, stash| { - Context::new(stash, Object::parse(data)?, None, None) - }) - } -} - -pub struct Object<'a> { - data: &'a [u8], - sections: SectionTable<'a>, - symbols: Vec<(usize, &'a ImageSymbol)>, - strings: StringTable<'a>, -} - -pub fn get_image_base(data: &[u8]) -> Option { - let dos_header = ImageDosHeader::parse(data).ok()?; - let mut offset = dos_header.nt_headers_offset().into(); - let (nt_headers, _) = Pe::parse(data, &mut offset).ok()?; - usize::try_from(nt_headers.optional_header().image_base()).ok() -} - -impl<'a> Object<'a> { - fn parse(data: &'a [u8]) -> Option> { - let dos_header = ImageDosHeader::parse(data).ok()?; - let mut offset = dos_header.nt_headers_offset().into(); - let (nt_headers, _) = Pe::parse(data, &mut offset).ok()?; - let sections = nt_headers.sections(data, offset).ok()?; - let symtab = nt_headers.symbols(data).ok()?; - let strings = symtab.strings(); - let image_base = usize::try_from(nt_headers.optional_header().image_base()).ok()?; - - // Collect all the symbols into a local vector which is sorted - // by address and contains enough data to learn about the symbol - // name. Note that we only look at function symbols and also - // note that the sections are 1-indexed because the zero section - // is special (apparently). - let mut symbols = Vec::new(); - let mut i = 0; - let len = symtab.len(); - while i < len { - let sym = symtab.symbol(i).ok()?; - i += 1 + sym.number_of_aux_symbols as usize; - let section_number = sym.section_number.get(LE); - if sym.derived_type() != object::pe::IMAGE_SYM_DTYPE_FUNCTION || section_number == 0 { - continue; - } - let addr = usize::try_from(sym.value.get(LE)).ok()?; - let section = sections - .section(usize::try_from(section_number).ok()?) - .ok()?; - let va = usize::try_from(section.virtual_address.get(LE)).ok()?; - symbols.push((addr + va + image_base, sym)); - } - symbols.sort_unstable_by_key(|x| x.0); - Some(Object { - data, - sections, - strings, - symbols, - }) - } - - pub fn section(&self, _: &Stash, name: &str) -> Option<&'a [u8]> { - Some( - self.sections - .section_by_name(self.strings, name.as_bytes())? - .1 - .pe_data(self.data) - .ok()?, - ) - } - - pub fn search_symtab<'b>(&'b self, addr: u64) -> Option<&'b [u8]> { - // Note that unlike other formats COFF doesn't embed the size of - // each symbol. As a last ditch effort search for the *closest* - // symbol to a particular address and return that one. This gets - // really wonky once symbols start getting removed because the - // symbols returned here can be totally incorrect, but we have - // no idea of knowing how to detect that. - let addr = usize::try_from(addr).ok()?; - let i = match self.symbols.binary_search_by_key(&addr, |p| p.0) { - Ok(i) => i, - // typically `addr` isn't in the array, but `i` is where - // we'd insert it, so the previous position must be the - // greatest less than `addr` - Err(i) => i.checked_sub(1)?, - }; - self.symbols[i].1.name(self.strings).ok() - } - - pub(super) fn search_object_map(&self, _addr: u64) -> Option<(&Context<'_>, u64)> { - None - } -} - -pub(super) fn handle_split_dwarf<'data>( - _package: Option<&gimli::DwarfPackage>>, - _stash: &'data Stash, - _load: addr2line::SplitDwarfLoad>, -) -> Option>>> { - None -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/elf.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/elf.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/elf.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/elf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,495 +0,0 @@ -use super::mystd::ffi::{OsStr, OsString}; -use super::mystd::fs; -use super::mystd::os::unix::ffi::{OsStrExt, OsStringExt}; -use super::mystd::path::{Path, PathBuf}; -use super::Either; -use super::{gimli, Context, Endian, EndianSlice, Mapping, Stash, Vec}; -use alloc::sync::Arc; -use core::convert::{TryFrom, TryInto}; -use core::str; -use object::elf::{ELFCOMPRESS_ZLIB, ELF_NOTE_GNU, NT_GNU_BUILD_ID, SHF_COMPRESSED}; -use object::read::elf::{CompressionHeader, FileHeader, SectionHeader, SectionTable, Sym}; -use object::read::StringTable; -use object::{BigEndian, Bytes, NativeEndian}; - -#[cfg(target_pointer_width = "32")] -type Elf = object::elf::FileHeader32; -#[cfg(target_pointer_width = "64")] -type Elf = object::elf::FileHeader64; - -impl Mapping { - pub fn new(path: &Path) -> Option { - let map = super::mmap(path)?; - Mapping::mk_or_other(map, |map, stash| { - let object = Object::parse(&map)?; - - // Try to locate an external debug file using the build ID. - if let Some(path_debug) = object.build_id().and_then(locate_build_id) { - if let Some(mapping) = Mapping::new_debug(path, path_debug, None) { - return Some(Either::A(mapping)); - } - } - - // Try to locate an external debug file using the GNU debug link section. - if let Some((path_debug, crc)) = object.gnu_debuglink_path(path) { - if let Some(mapping) = Mapping::new_debug(path, path_debug, Some(crc)) { - return Some(Either::A(mapping)); - } - } - - let dwp = Mapping::load_dwarf_package(path, stash); - - Context::new(stash, object, None, dwp).map(Either::B) - }) - } - - /// Load debuginfo from an external debug file. - fn new_debug(original_path: &Path, path: PathBuf, crc: Option) -> Option { - let map = super::mmap(&path)?; - Mapping::mk(map, |map, stash| { - let object = Object::parse(&map)?; - - if let Some(_crc) = crc { - // TODO: check crc - } - - // Try to locate a supplementary object file. - let mut sup = None; - if let Some((path_sup, build_id_sup)) = object.gnu_debugaltlink_path(&path) { - if let Some(map_sup) = super::mmap(&path_sup) { - let map_sup = stash.cache_mmap(map_sup); - if let Some(sup_) = Object::parse(map_sup) { - if sup_.build_id() == Some(build_id_sup) { - sup = Some(sup_); - } - } - } - } - - let dwp = Mapping::load_dwarf_package(original_path, stash); - - Context::new(stash, object, sup, dwp) - }) - } - - /// Try to locate a DWARF package file. - fn load_dwarf_package<'data>(path: &Path, stash: &'data Stash) -> Option> { - let mut path_dwp = path.to_path_buf(); - let dwp_extension = path - .extension() - .map(|previous_extension| { - let mut previous_extension = previous_extension.to_os_string(); - previous_extension.push(".dwp"); - previous_extension - }) - .unwrap_or_else(|| "dwp".into()); - path_dwp.set_extension(dwp_extension); - if let Some(map_dwp) = super::mmap(&path_dwp) { - let map_dwp = stash.cache_mmap(map_dwp); - if let Some(dwp_) = Object::parse(map_dwp) { - return Some(dwp_); - } - } - - None - } -} - -struct ParsedSym { - address: u64, - size: u64, - name: u32, -} - -pub struct Object<'a> { - /// Zero-sized type representing the native endianness. - /// - /// We could use a literal instead, but this helps ensure correctness. - endian: NativeEndian, - /// The entire file data. - data: &'a [u8], - sections: SectionTable<'a, Elf>, - strings: StringTable<'a>, - /// List of pre-parsed and sorted symbols by base address. - syms: Vec, -} - -impl<'a> Object<'a> { - fn parse(data: &'a [u8]) -> Option> { - let elf = Elf::parse(data).ok()?; - let endian = elf.endian().ok()?; - let sections = elf.sections(endian, data).ok()?; - let mut syms = sections - .symbols(endian, data, object::elf::SHT_SYMTAB) - .ok()?; - if syms.is_empty() { - syms = sections - .symbols(endian, data, object::elf::SHT_DYNSYM) - .ok()?; - } - let strings = syms.strings(); - - let mut syms = syms - .iter() - // Only look at function/object symbols. This mirrors what - // libbacktrace does and in general we're only symbolicating - // function addresses in theory. Object symbols correspond - // to data, and maybe someone's crazy enough to have a - // function go into static data? - .filter(|sym| { - let st_type = sym.st_type(); - st_type == object::elf::STT_FUNC || st_type == object::elf::STT_OBJECT - }) - // skip anything that's in an undefined section header, - // since it means it's an imported function and we're only - // symbolicating with locally defined functions. - .filter(|sym| sym.st_shndx(endian) != object::elf::SHN_UNDEF) - .map(|sym| { - let address = sym.st_value(endian).into(); - let size = sym.st_size(endian).into(); - let name = sym.st_name(endian); - ParsedSym { - address, - size, - name, - } - }) - .collect::>(); - syms.sort_unstable_by_key(|s| s.address); - Some(Object { - endian, - data, - sections, - strings, - syms, - }) - } - - pub fn section(&self, stash: &'a Stash, name: &str) -> Option<&'a [u8]> { - if let Some(section) = self.section_header(name) { - let mut data = Bytes(section.data(self.endian, self.data).ok()?); - - // Check for DWARF-standard (gABI) compression, i.e., as generated - // by ld's `--compress-debug-sections=zlib-gabi` flag. - let flags: u64 = section.sh_flags(self.endian).into(); - if (flags & u64::from(SHF_COMPRESSED)) == 0 { - // Not compressed. - return Some(data.0); - } - - let header = data.read::<::CompressionHeader>().ok()?; - if header.ch_type(self.endian) != ELFCOMPRESS_ZLIB { - // Zlib compression is the only known type. - return None; - } - let size = usize::try_from(header.ch_size(self.endian)).ok()?; - let buf = stash.allocate(size); - decompress_zlib(data.0, buf)?; - return Some(buf); - } - - // Check for the nonstandard GNU compression format, i.e., as generated - // by ld's `--compress-debug-sections=zlib-gnu` flag. This means that if - // we're actually asking for `.debug_info` then we need to look up a - // section named `.zdebug_info`. - if !name.starts_with(".debug_") { - return None; - } - let debug_name = name[7..].as_bytes(); - let compressed_section = self - .sections - .iter() - .filter_map(|header| { - let name = self.sections.section_name(self.endian, header).ok()?; - if name.starts_with(b".zdebug_") && &name[8..] == debug_name { - Some(header) - } else { - None - } - }) - .next()?; - let mut data = Bytes(compressed_section.data(self.endian, self.data).ok()?); - if data.read_bytes(8).ok()?.0 != b"ZLIB\0\0\0\0" { - return None; - } - let size = usize::try_from(data.read::>().ok()?.get(BigEndian)).ok()?; - let buf = stash.allocate(size); - decompress_zlib(data.0, buf)?; - Some(buf) - } - - fn section_header(&self, name: &str) -> Option<&::SectionHeader> { - self.sections - .section_by_name(self.endian, name.as_bytes()) - .map(|(_index, section)| section) - } - - pub fn search_symtab<'b>(&'b self, addr: u64) -> Option<&'b [u8]> { - // Same sort of binary search as Windows above - let i = match self.syms.binary_search_by_key(&addr, |sym| sym.address) { - Ok(i) => i, - Err(i) => i.checked_sub(1)?, - }; - let sym = self.syms.get(i)?; - if sym.address <= addr && addr <= sym.address + sym.size { - self.strings.get(sym.name).ok() - } else { - None - } - } - - pub(super) fn search_object_map(&self, _addr: u64) -> Option<(&Context<'_>, u64)> { - None - } - - fn build_id(&self) -> Option<&'a [u8]> { - for section in self.sections.iter() { - if let Ok(Some(mut notes)) = section.notes(self.endian, self.data) { - while let Ok(Some(note)) = notes.next() { - if note.name() == ELF_NOTE_GNU && note.n_type(self.endian) == NT_GNU_BUILD_ID { - return Some(note.desc()); - } - } - } - } - None - } - - // The contents of the ".gnu_debuglink" section is documented at: - // https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html - fn gnu_debuglink_path(&self, path: &Path) -> Option<(PathBuf, u32)> { - let section = self.section_header(".gnu_debuglink")?; - let data = section.data(self.endian, self.data).ok()?; - let len = data.iter().position(|x| *x == 0)?; - let filename = &data[..len]; - let offset = (len + 1 + 3) & !3; - let crc_bytes = data - .get(offset..offset + 4) - .and_then(|bytes| bytes.try_into().ok())?; - let crc = u32::from_ne_bytes(crc_bytes); - let path_debug = locate_debuglink(path, filename)?; - Some((path_debug, crc)) - } - - // The format of the ".gnu_debugaltlink" section is based on gdb. - fn gnu_debugaltlink_path(&self, path: &Path) -> Option<(PathBuf, &'a [u8])> { - let section = self.section_header(".gnu_debugaltlink")?; - let data = section.data(self.endian, self.data).ok()?; - let len = data.iter().position(|x| *x == 0)?; - let filename = &data[..len]; - let build_id = &data[len + 1..]; - let path_sup = locate_debugaltlink(path, filename, build_id)?; - Some((path_sup, build_id)) - } -} - -fn decompress_zlib(input: &[u8], output: &mut [u8]) -> Option<()> { - use miniz_oxide::inflate::core::inflate_flags::{ - TINFL_FLAG_PARSE_ZLIB_HEADER, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF, - }; - use miniz_oxide::inflate::core::{decompress, DecompressorOxide}; - use miniz_oxide::inflate::TINFLStatus; - - let (status, in_read, out_read) = decompress( - &mut DecompressorOxide::new(), - input, - output, - 0, - TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | TINFL_FLAG_PARSE_ZLIB_HEADER, - ); - if status == TINFLStatus::Done && in_read == input.len() && out_read == output.len() { - Some(()) - } else { - None - } -} - -const DEBUG_PATH: &[u8] = b"/usr/lib/debug"; - -fn debug_path_exists() -> bool { - cfg_if::cfg_if! { - if #[cfg(any(target_os = "freebsd", target_os = "linux"))] { - use core::sync::atomic::{AtomicU8, Ordering}; - static DEBUG_PATH_EXISTS: AtomicU8 = AtomicU8::new(0); - - let mut exists = DEBUG_PATH_EXISTS.load(Ordering::Relaxed); - if exists == 0 { - exists = if Path::new(OsStr::from_bytes(DEBUG_PATH)).is_dir() { - 1 - } else { - 2 - }; - DEBUG_PATH_EXISTS.store(exists, Ordering::Relaxed); - } - exists == 1 - } else { - false - } - } -} - -/// Locate a debug file based on its build ID. -/// -/// The format of build id paths is documented at: -/// https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html -fn locate_build_id(build_id: &[u8]) -> Option { - const BUILD_ID_PATH: &[u8] = b"/usr/lib/debug/.build-id/"; - const BUILD_ID_SUFFIX: &[u8] = b".debug"; - - if build_id.len() < 2 { - return None; - } - - if !debug_path_exists() { - return None; - } - - let mut path = - Vec::with_capacity(BUILD_ID_PATH.len() + BUILD_ID_SUFFIX.len() + build_id.len() * 2 + 1); - path.extend(BUILD_ID_PATH); - path.push(hex(build_id[0] >> 4)); - path.push(hex(build_id[0] & 0xf)); - path.push(b'/'); - for byte in &build_id[1..] { - path.push(hex(byte >> 4)); - path.push(hex(byte & 0xf)); - } - path.extend(BUILD_ID_SUFFIX); - Some(PathBuf::from(OsString::from_vec(path))) -} - -fn hex(byte: u8) -> u8 { - if byte < 10 { - b'0' + byte - } else { - b'a' + byte - 10 - } -} - -/// Locate a file specified in a `.gnu_debuglink` section. -/// -/// `path` is the file containing the section. -/// `filename` is from the contents of the section. -/// -/// Search order is based on gdb, documented at: -/// https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html -/// -/// gdb also allows the user to customize the debug search path, but we don't. -/// -/// gdb also supports debuginfod, but we don't yet. -fn locate_debuglink(path: &Path, filename: &[u8]) -> Option { - let path = fs::canonicalize(path).ok()?; - let parent = path.parent()?; - let mut f = PathBuf::from(OsString::with_capacity( - DEBUG_PATH.len() + parent.as_os_str().len() + filename.len() + 2, - )); - let filename = Path::new(OsStr::from_bytes(filename)); - - // Try "/parent/filename" if it differs from "path" - f.push(parent); - f.push(filename); - if f != path && f.is_file() { - return Some(f); - } - - // Try "/parent/.debug/filename" - let mut s = OsString::from(f); - s.clear(); - f = PathBuf::from(s); - f.push(parent); - f.push(".debug"); - f.push(filename); - if f.is_file() { - return Some(f); - } - - if debug_path_exists() { - // Try "/usr/lib/debug/parent/filename" - let mut s = OsString::from(f); - s.clear(); - f = PathBuf::from(s); - f.push(OsStr::from_bytes(DEBUG_PATH)); - f.push(parent.strip_prefix("/").unwrap()); - f.push(filename); - if f.is_file() { - return Some(f); - } - } - - None -} - -/// Locate a file specified in a `.gnu_debugaltlink` section. -/// -/// `path` is the file containing the section. -/// `filename` and `build_id` are the contents of the section. -/// -/// Search order is based on gdb: -/// - filename, which is either absolute or relative to `path` -/// - the build ID path under `BUILD_ID_PATH` -/// -/// gdb also allows the user to customize the debug search path, but we don't. -/// -/// gdb also supports debuginfod, but we don't yet. -fn locate_debugaltlink(path: &Path, filename: &[u8], build_id: &[u8]) -> Option { - let filename = Path::new(OsStr::from_bytes(filename)); - if filename.is_absolute() { - if filename.is_file() { - return Some(filename.into()); - } - } else { - let path = fs::canonicalize(path).ok()?; - let parent = path.parent()?; - let mut f = PathBuf::from(parent); - f.push(filename); - if f.is_file() { - return Some(f); - } - } - - locate_build_id(build_id) -} - -fn convert_path(r: &R) -> Result { - let bytes = r.to_slice()?; - Ok(PathBuf::from(OsStr::from_bytes(&bytes))) -} - -pub(super) fn handle_split_dwarf<'data>( - package: Option<&gimli::DwarfPackage>>, - stash: &'data Stash, - load: addr2line::SplitDwarfLoad>, -) -> Option>>> { - if let Some(dwp) = package.as_ref() { - if let Ok(Some(cu)) = dwp.find_cu(load.dwo_id, &load.parent) { - return Some(Arc::new(cu)); - } - } - - let mut path = PathBuf::new(); - if let Some(p) = load.comp_dir.as_ref() { - path.push(convert_path(p).ok()?); - } - - path.push(convert_path(load.path.as_ref()?).ok()?); - - if let Some(map_dwo) = super::mmap(&path) { - let map_dwo = stash.cache_mmap(map_dwo); - if let Some(dwo) = Object::parse(map_dwo) { - return gimli::Dwarf::load(|id| -> Result<_, ()> { - let data = id - .dwo_name() - .and_then(|name| dwo.section(stash, name)) - .unwrap_or(&[]); - Ok(EndianSlice::new(data, Endian)) - }) - .ok() - .map(|mut dwo_dwarf| { - dwo_dwarf.make_dwo(&load.parent); - Arc::new(dwo_dwarf) - }); - } - } - - None -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,71 +0,0 @@ -// Other Unix (e.g. Linux) platforms use ELF as an object file format -// and typically implement an API called `dl_iterate_phdr` to load -// native libraries. - -use super::mystd::borrow::ToOwned; -use super::mystd::env; -use super::mystd::ffi::{CStr, OsStr}; -use super::mystd::os::unix::prelude::*; -use super::{Library, LibrarySegment, OsString, Vec}; -use core::slice; - -pub(super) fn native_libraries() -> Vec { - let mut ret = Vec::new(); - unsafe { - libc::dl_iterate_phdr(Some(callback), &mut ret as *mut Vec<_> as *mut _); - } - return ret; -} - -fn infer_current_exe(base_addr: usize) -> OsString { - if let Ok(entries) = super::parse_running_mmaps::parse_maps() { - let opt_path = entries - .iter() - .find(|e| e.ip_matches(base_addr) && e.pathname().len() > 0) - .map(|e| e.pathname()) - .cloned(); - if let Some(path) = opt_path { - return path; - } - } - env::current_exe().map(|e| e.into()).unwrap_or_default() -} - -// `info` should be a valid pointers. -// `vec` should be a valid pointer to a `std::Vec`. -unsafe extern "C" fn callback( - info: *mut libc::dl_phdr_info, - _size: libc::size_t, - vec: *mut libc::c_void, -) -> libc::c_int { - let info = &*info; - let libs = &mut *(vec as *mut Vec); - let is_main_prog = info.dlpi_name.is_null() || *info.dlpi_name == 0; - let name = if is_main_prog { - // The man page for dl_iterate_phdr says that the first object visited by - // callback is the main program; so the first time we encounter a - // nameless entry, we can assume its the main program and try to infer its path. - // After that, we cannot continue that assumption, and we use an empty string. - if libs.is_empty() { - infer_current_exe(info.dlpi_addr as usize) - } else { - OsString::new() - } - } else { - let bytes = CStr::from_ptr(info.dlpi_name).to_bytes(); - OsStr::from_bytes(bytes).to_owned() - }; - let headers = slice::from_raw_parts(info.dlpi_phdr, info.dlpi_phnum as usize); - libs.push(Library { - name, - segments: headers - .iter() - .map(|header| LibrarySegment { - len: (*header).p_memsz as usize, - stated_virtual_memory_address: (*header).p_vaddr as usize, - }) - .collect(), - bias: info.dlpi_addr as usize, - }); - 0 -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_haiku.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_haiku.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_haiku.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_haiku.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,48 +0,0 @@ -// Haiku implements the image_info struct and the get_next_image_info() -// functions to iterate through the loaded executable images. The -// image_info struct contains a pointer to the start of the .text -// section within the virtual address space, as well as the size of -// that section. All the read-only segments of the ELF-binary are in -// that part of the address space. - -use super::mystd::borrow::ToOwned; -use super::mystd::ffi::{CStr, OsStr}; -use super::mystd::mem::MaybeUninit; -use super::mystd::os::unix::prelude::*; -use super::{Library, LibrarySegment, Vec}; - -pub(super) fn native_libraries() -> Vec { - let mut libraries: Vec = Vec::new(); - - unsafe { - let mut info = MaybeUninit::::zeroed(); - let mut cookie: i32 = 0; - // Load the first image to get a valid info struct - let mut status = - libc::get_next_image_info(libc::B_CURRENT_TEAM, &mut cookie, info.as_mut_ptr()); - if status != libc::B_OK { - return libraries; - } - let mut info = info.assume_init(); - - while status == libc::B_OK { - let mut segments = Vec::new(); - segments.push(LibrarySegment { - stated_virtual_memory_address: 0, - len: info.text_size as usize, - }); - - let bytes = CStr::from_ptr(info.name.as_ptr()).to_bytes(); - let name = OsStr::from_bytes(bytes).to_owned(); - libraries.push(Library { - name: name, - segments: segments, - bias: info.text as usize, - }); - - status = libc::get_next_image_info(libc::B_CURRENT_TEAM, &mut cookie, &mut info); - } - } - - libraries -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_illumos.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_illumos.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_illumos.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_illumos.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,99 +0,0 @@ -use super::mystd::borrow::ToOwned; -use super::mystd::ffi::{CStr, OsStr}; -use super::mystd::os::unix::prelude::*; -use super::{Library, LibrarySegment, Vec}; -use core::mem; -use object::NativeEndian; - -#[cfg(target_pointer_width = "64")] -use object::elf::{FileHeader64 as FileHeader, ProgramHeader64 as ProgramHeader}; - -type EHdr = FileHeader; -type PHdr = ProgramHeader; - -#[repr(C)] -struct LinkMap { - l_addr: libc::c_ulong, - l_name: *const libc::c_char, - l_ld: *const libc::c_void, - l_next: *const LinkMap, - l_prev: *const LinkMap, - l_refname: *const libc::c_char, -} - -const RTLD_SELF: *const libc::c_void = -3isize as *const libc::c_void; -const RTLD_DI_LINKMAP: libc::c_int = 2; - -extern "C" { - fn dlinfo( - handle: *const libc::c_void, - request: libc::c_int, - p: *mut libc::c_void, - ) -> libc::c_int; -} - -pub(super) fn native_libraries() -> Vec { - let mut libs = Vec::new(); - - // Request the current link map from the runtime linker: - let map = unsafe { - let mut map: *const LinkMap = mem::zeroed(); - if dlinfo( - RTLD_SELF, - RTLD_DI_LINKMAP, - (&mut map) as *mut *const LinkMap as *mut libc::c_void, - ) != 0 - { - return libs; - } - map - }; - - // Each entry in the link map represents a loaded object: - let mut l = map; - while !l.is_null() { - // Fetch the fully qualified path of the loaded object: - let bytes = unsafe { CStr::from_ptr((*l).l_name) }.to_bytes(); - let name = OsStr::from_bytes(bytes).to_owned(); - - // The base address of the object loaded into memory: - let addr = unsafe { (*l).l_addr }; - - // Use the ELF header for this object to locate the program - // header: - let e: *const EHdr = unsafe { (*l).l_addr as *const EHdr }; - let phoff = unsafe { (*e).e_phoff }.get(NativeEndian); - let phnum = unsafe { (*e).e_phnum }.get(NativeEndian); - let etype = unsafe { (*e).e_type }.get(NativeEndian); - - let phdr: *const PHdr = (addr + phoff) as *const PHdr; - let phdr = unsafe { core::slice::from_raw_parts(phdr, phnum as usize) }; - - libs.push(Library { - name, - segments: phdr - .iter() - .map(|p| { - let memsz = p.p_memsz.get(NativeEndian); - let vaddr = p.p_vaddr.get(NativeEndian); - LibrarySegment { - len: memsz as usize, - stated_virtual_memory_address: vaddr as usize, - } - }) - .collect(), - bias: if etype == object::elf::ET_EXEC { - // Program header addresses for the base executable are - // already absolute. - 0 - } else { - // Other addresses are relative to the object base. - addr as usize - }, - }); - - l = unsafe { (*l).l_next }; - } - - libs -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_libnx.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_libnx.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_libnx.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_libnx.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -use super::{Library, LibrarySegment, Vec}; - -// DevkitA64 doesn't natively support debug info, but the build system will -// place debug info at the path `romfs:/debug_info.elf`. -pub(super) fn native_libraries() -> Vec { - extern "C" { - static __start__: u8; - } - - let bias = unsafe { &__start__ } as *const u8 as usize; - - let mut ret = Vec::new(); - let mut segments = Vec::new(); - segments.push(LibrarySegment { - stated_virtual_memory_address: 0, - len: usize::max_value() - bias, - }); - - let path = "romfs:/debug_info.elf"; - ret.push(Library { - name: path.into(), - segments, - bias, - }); - - ret -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_macos.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_macos.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_macos.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_macos.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,146 +0,0 @@ -#![allow(deprecated)] - -use super::mystd::ffi::{CStr, OsStr}; -use super::mystd::os::unix::prelude::*; -use super::mystd::prelude::v1::*; -use super::{Library, LibrarySegment}; -use core::convert::TryInto; -use core::mem; - -pub(super) fn native_libraries() -> Vec { - let mut ret = Vec::new(); - let images = unsafe { libc::_dyld_image_count() }; - for i in 0..images { - ret.extend(native_library(i)); - } - return ret; -} - -fn native_library(i: u32) -> Option { - use object::macho; - use object::read::macho::{MachHeader, Segment}; - use object::NativeEndian; - - // Fetch the name of this library which corresponds to the path of - // where to load it as well. - let name = unsafe { - let name = libc::_dyld_get_image_name(i); - if name.is_null() { - return None; - } - CStr::from_ptr(name) - }; - - // Load the image header of this library and delegate to `object` to - // parse all the load commands so we can figure out all the segments - // involved here. - let (mut load_commands, endian) = unsafe { - let header = libc::_dyld_get_image_header(i); - if header.is_null() { - return None; - } - match (*header).magic { - macho::MH_MAGIC => { - let endian = NativeEndian; - let header = &*(header as *const macho::MachHeader32); - let data = core::slice::from_raw_parts( - header as *const _ as *const u8, - mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize, - ); - (header.load_commands(endian, data, 0).ok()?, endian) - } - macho::MH_MAGIC_64 => { - let endian = NativeEndian; - let header = &*(header as *const macho::MachHeader64); - let data = core::slice::from_raw_parts( - header as *const _ as *const u8, - mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize, - ); - (header.load_commands(endian, data, 0).ok()?, endian) - } - _ => return None, - } - }; - - // Iterate over the segments and register known regions for segments - // that we find. Additionally record information bout text segments - // for processing later, see comments below. - let mut segments = Vec::new(); - let mut first_text = 0; - let mut text_fileoff_zero = false; - while let Some(cmd) = load_commands.next().ok()? { - if let Some((seg, _)) = cmd.segment_32().ok()? { - if seg.name() == b"__TEXT" { - first_text = segments.len(); - if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 { - text_fileoff_zero = true; - } - } - segments.push(LibrarySegment { - len: seg.vmsize(endian).try_into().ok()?, - stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?, - }); - } - if let Some((seg, _)) = cmd.segment_64().ok()? { - if seg.name() == b"__TEXT" { - first_text = segments.len(); - if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 { - text_fileoff_zero = true; - } - } - segments.push(LibrarySegment { - len: seg.vmsize(endian).try_into().ok()?, - stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?, - }); - } - } - - // Determine the "slide" for this library which ends up being the - // bias we use to figure out where in memory objects are loaded. - // This is a bit of a weird computation though and is the result of - // trying a few things in the wild and seeing what sticks. - // - // The general idea is that the `bias` plus a segment's - // `stated_virtual_memory_address` is going to be where in the - // actual address space the segment resides. The other thing we rely - // on though is that a real address minus the `bias` is the index to - // look up in the symbol table and debuginfo. - // - // It turns out, though, that for system loaded libraries these - // calculations are incorrect. For native executables, however, it - // appears correct. Lifting some logic from LLDB's source it has - // some special-casing for the first `__TEXT` section loaded from - // file offset 0 with a nonzero size. For whatever reason when this - // is present it appears to mean that the symbol table is relative - // to just the vmaddr slide for the library. If it's *not* present - // then the symbol table is relative to the vmaddr slide plus the - // segment's stated address. - // - // To handle this situation if we *don't* find a text section at - // file offset zero then we increase the bias by the first text - // sections's stated address and decrease all stated addresses by - // that amount as well. That way the symbol table is always appears - // relative to the library's bias amount. This appears to have the - // right results for symbolizing via the symbol table. - // - // Honestly I'm not entirely sure whether this is right or if - // there's something else that should indicate how to do this. For - // now though this seems to work well enough (?) and we should - // always be able to tweak this over time if necessary. - // - // For some more information see #318 - let mut slide = unsafe { libc::_dyld_get_image_vmaddr_slide(i) as usize }; - if !text_fileoff_zero { - let adjust = segments[first_text].stated_virtual_memory_address; - for segment in segments.iter_mut() { - segment.stated_virtual_memory_address -= adjust; - } - slide += adjust; - } - - Some(Library { - name: OsStr::from_bytes(name.to_bytes()).to_owned(), - segments, - bias: slide, - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_windows.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_windows.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/libs_windows.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/libs_windows.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,89 +0,0 @@ -use super::super::super::windows::*; -use super::mystd::os::windows::prelude::*; -use super::{coff, mmap, Library, LibrarySegment, OsString}; -use alloc::vec; -use alloc::vec::Vec; -use core::mem; -use core::mem::MaybeUninit; - -// For loading native libraries on Windows, see some discussion on -// rust-lang/rust#71060 for the various strategies here. -pub(super) fn native_libraries() -> Vec { - let mut ret = Vec::new(); - unsafe { - add_loaded_images(&mut ret); - } - return ret; -} - -unsafe fn add_loaded_images(ret: &mut Vec) { - let snap = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, 0); - if snap == INVALID_HANDLE_VALUE { - return; - } - - let mut me = MaybeUninit::::zeroed().assume_init(); - me.dwSize = mem::size_of_val(&me) as DWORD; - if Module32FirstW(snap, &mut me) == TRUE { - loop { - if let Some(lib) = load_library(&me) { - ret.push(lib); - } - - if Module32NextW(snap, &mut me) != TRUE { - break; - } - } - } - - CloseHandle(snap); -} - -unsafe fn load_library(me: &MODULEENTRY32W) -> Option { - let pos = me - .szExePath - .iter() - .position(|i| *i == 0) - .unwrap_or(me.szExePath.len()); - let name = OsString::from_wide(&me.szExePath[..pos]); - - // MinGW libraries currently don't support ASLR - // (rust-lang/rust#16514), but DLLs can still be relocated around in - // the address space. It appears that addresses in debug info are - // all as-if this library was loaded at its "image base", which is a - // field in its COFF file headers. Since this is what debuginfo - // seems to list we parse the symbol table and store addresses as if - // the library was loaded at "image base" as well. - // - // The library may not be loaded at "image base", however. - // (presumably something else may be loaded there?) This is where - // the `bias` field comes into play, and we need to figure out the - // value of `bias` here. Unfortunately though it's not clear how to - // acquire this from a loaded module. What we do have, however, is - // the actual load address (`modBaseAddr`). - // - // As a bit of a cop-out for now we mmap the file, read the file - // header information, then drop the mmap. This is wasteful because - // we'll probably reopen the mmap later, but this should work well - // enough for now. - // - // Once we have the `image_base` (desired load location) and the - // `base_addr` (actual load location) we can fill in the `bias` - // (difference between the actual and desired) and then the stated - // address of each segment is the `image_base` since that's what the - // file says. - // - // For now it appears that unlike ELF/MachO we can make do with one - // segment per library, using `modBaseSize` as the whole size. - let mmap = mmap(name.as_ref())?; - let image_base = coff::get_image_base(&mmap)?; - let base_addr = me.modBaseAddr as usize; - Some(Library { - name, - bias: base_addr.wrapping_sub(image_base), - segments: vec![LibrarySegment { - stated_virtual_memory_address: image_base, - len: me.modBaseSize as usize, - }], - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/macho.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/macho.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/macho.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/macho.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,333 +0,0 @@ -use super::{gimli, Box, Context, Endian, EndianSlice, Mapping, Path, Stash, Vec}; -use alloc::sync::Arc; -use core::convert::TryInto; -use object::macho; -use object::read::macho::{MachHeader, Nlist, Section, Segment as _}; -use object::{Bytes, NativeEndian}; - -#[cfg(target_pointer_width = "32")] -type Mach = object::macho::MachHeader32; -#[cfg(target_pointer_width = "64")] -type Mach = object::macho::MachHeader64; -type MachSegment = ::Segment; -type MachSection = ::Section; -type MachNlist = ::Nlist; - -impl Mapping { - // The loading path for macOS is so different we just have a completely - // different implementation of the function here. On macOS we need to go - // probing the filesystem for a bunch of files. - pub fn new(path: &Path) -> Option { - // First up we need to load the unique UUID which is stored in the macho - // header of the file we're reading, specified at `path`. - let map = super::mmap(path)?; - let (macho, data) = find_header(&map)?; - let endian = macho.endian().ok()?; - let uuid = macho.uuid(endian, data, 0).ok()?; - - // Next we need to look for a `*.dSYM` file. For now we just probe the - // containing directory and look around for something that matches - // `*.dSYM`. Once it's found we root through the dwarf resources that it - // contains and try to find a macho file which has a matching UUID as - // the one of our own file. If we find a match that's the dwarf file we - // want to return. - if let Some(uuid) = uuid { - if let Some(parent) = path.parent() { - if let Some(mapping) = Mapping::load_dsym(parent, uuid) { - return Some(mapping); - } - } - } - - // Looks like nothing matched our UUID, so let's at least return our own - // file. This should have the symbol table for at least some - // symbolication purposes. - Mapping::mk(map, |data, stash| { - let (macho, data) = find_header(data)?; - let endian = macho.endian().ok()?; - let obj = Object::parse(macho, endian, data)?; - Context::new(stash, obj, None, None) - }) - } - - fn load_dsym(dir: &Path, uuid: [u8; 16]) -> Option { - for entry in dir.read_dir().ok()? { - let entry = entry.ok()?; - let filename = match entry.file_name().into_string() { - Ok(name) => name, - Err(_) => continue, - }; - if !filename.ends_with(".dSYM") { - continue; - } - let candidates = entry.path().join("Contents/Resources/DWARF"); - if let Some(mapping) = Mapping::try_dsym_candidate(&candidates, uuid) { - return Some(mapping); - } - } - None - } - - fn try_dsym_candidate(dir: &Path, uuid: [u8; 16]) -> Option { - // Look for files in the `DWARF` directory which have a matching uuid to - // the original object file. If we find one then we found the debug - // information. - for entry in dir.read_dir().ok()? { - let entry = entry.ok()?; - let map = super::mmap(&entry.path())?; - let candidate = Mapping::mk(map, |data, stash| { - let (macho, data) = find_header(data)?; - let endian = macho.endian().ok()?; - let entry_uuid = macho.uuid(endian, data, 0).ok()??; - if entry_uuid != uuid { - return None; - } - let obj = Object::parse(macho, endian, data)?; - Context::new(stash, obj, None, None) - }); - if let Some(candidate) = candidate { - return Some(candidate); - } - } - - None - } -} - -fn find_header(data: &'_ [u8]) -> Option<(&'_ Mach, &'_ [u8])> { - use object::endian::BigEndian; - - let desired_cpu = || { - if cfg!(target_arch = "x86") { - Some(macho::CPU_TYPE_X86) - } else if cfg!(target_arch = "x86_64") { - Some(macho::CPU_TYPE_X86_64) - } else if cfg!(target_arch = "arm") { - Some(macho::CPU_TYPE_ARM) - } else if cfg!(target_arch = "aarch64") { - Some(macho::CPU_TYPE_ARM64) - } else { - None - } - }; - - let mut data = Bytes(data); - match data - .clone() - .read::>() - .ok()? - .get(NativeEndian) - { - macho::MH_MAGIC_64 | macho::MH_CIGAM_64 | macho::MH_MAGIC | macho::MH_CIGAM => {} - - macho::FAT_MAGIC | macho::FAT_CIGAM => { - let mut header_data = data; - let endian = BigEndian; - let header = header_data.read::().ok()?; - let nfat = header.nfat_arch.get(endian); - let arch = (0..nfat) - .filter_map(|_| header_data.read::().ok()) - .find(|arch| desired_cpu() == Some(arch.cputype.get(endian)))?; - let offset = arch.offset.get(endian); - let size = arch.size.get(endian); - data = data - .read_bytes_at(offset.try_into().ok()?, size.try_into().ok()?) - .ok()?; - } - - macho::FAT_MAGIC_64 | macho::FAT_CIGAM_64 => { - let mut header_data = data; - let endian = BigEndian; - let header = header_data.read::().ok()?; - let nfat = header.nfat_arch.get(endian); - let arch = (0..nfat) - .filter_map(|_| header_data.read::().ok()) - .find(|arch| desired_cpu() == Some(arch.cputype.get(endian)))?; - let offset = arch.offset.get(endian); - let size = arch.size.get(endian); - data = data - .read_bytes_at(offset.try_into().ok()?, size.try_into().ok()?) - .ok()?; - } - - _ => return None, - } - - Mach::parse(data.0, 0).ok().map(|h| (h, data.0)) -} - -// This is used both for executables/libraries and source object files. -pub struct Object<'a> { - endian: NativeEndian, - data: &'a [u8], - dwarf: Option<&'a [MachSection]>, - syms: Vec<(&'a [u8], u64)>, - syms_sort_by_name: bool, - // Only set for executables/libraries, and not the source object files. - object_map: Option>, - // The outer Option is for lazy loading, and the inner Option allows load errors to be cached. - object_mappings: Box<[Option>]>, -} - -impl<'a> Object<'a> { - fn parse(mach: &'a Mach, endian: NativeEndian, data: &'a [u8]) -> Option> { - let is_object = mach.filetype(endian) == object::macho::MH_OBJECT; - let mut dwarf = None; - let mut syms = Vec::new(); - let mut syms_sort_by_name = false; - let mut commands = mach.load_commands(endian, data, 0).ok()?; - let mut object_map = None; - let mut object_mappings = Vec::new(); - while let Ok(Some(command)) = commands.next() { - if let Some((segment, section_data)) = MachSegment::from_command(command).ok()? { - // Object files should have all sections in a single unnamed segment load command. - if segment.name() == b"__DWARF" || (is_object && segment.name() == b"") { - dwarf = segment.sections(endian, section_data).ok(); - } - } else if let Some(symtab) = command.symtab().ok()? { - let symbols = symtab.symbols::(endian, data).ok()?; - syms = symbols - .iter() - .filter_map(|nlist: &MachNlist| { - let name = nlist.name(endian, symbols.strings()).ok()?; - if name.len() > 0 && nlist.is_definition() { - Some((name, u64::from(nlist.n_value(endian)))) - } else { - None - } - }) - .collect(); - if is_object { - // We never search object file symbols by address. - // Instead, we already know the symbol name from the executable, and we - // need to search by name to find the matching symbol in the object file. - syms.sort_unstable_by_key(|(name, _)| *name); - syms_sort_by_name = true; - } else { - syms.sort_unstable_by_key(|(_, addr)| *addr); - let map = symbols.object_map(endian); - object_mappings.resize_with(map.objects().len(), || None); - object_map = Some(map); - } - } - } - - Some(Object { - endian, - data, - dwarf, - syms, - syms_sort_by_name, - object_map, - object_mappings: object_mappings.into_boxed_slice(), - }) - } - - pub fn section(&self, _: &Stash, name: &str) -> Option<&'a [u8]> { - let name = name.as_bytes(); - let dwarf = self.dwarf?; - let section = dwarf.into_iter().find(|section| { - let section_name = section.name(); - section_name == name || { - section_name.starts_with(b"__") - && name.starts_with(b".") - && §ion_name[2..] == &name[1..] - } - })?; - Some(section.data(self.endian, self.data).ok()?) - } - - pub fn search_symtab<'b>(&'b self, addr: u64) -> Option<&'b [u8]> { - debug_assert!(!self.syms_sort_by_name); - let i = match self.syms.binary_search_by_key(&addr, |(_, addr)| *addr) { - Ok(i) => i, - Err(i) => i.checked_sub(1)?, - }; - let (sym, _addr) = self.syms.get(i)?; - Some(sym) - } - - /// Try to load a context for an object file. - /// - /// If dsymutil was not run, then the DWARF may be found in the source object files. - pub(super) fn search_object_map<'b>(&'b mut self, addr: u64) -> Option<(&Context<'b>, u64)> { - // `object_map` contains a map from addresses to symbols and object paths. - // Look up the address and get a mapping for the object. - let object_map = self.object_map.as_ref()?; - let symbol = object_map.get(addr)?; - let object_index = symbol.object_index(); - let mapping = self.object_mappings.get_mut(object_index)?; - if mapping.is_none() { - // No cached mapping, so create it. - *mapping = Some(object_mapping(object_map.objects().get(object_index)?)); - } - let cx: &'b Context<'static> = &mapping.as_ref()?.as_ref()?.cx; - // Don't leak the `'static` lifetime, make sure it's scoped to just ourselves. - let cx = unsafe { core::mem::transmute::<&'b Context<'static>, &'b Context<'b>>(cx) }; - - // We must translate the address in order to be able to look it up - // in the DWARF in the object file. - debug_assert!(cx.object.syms.is_empty() || cx.object.syms_sort_by_name); - let i = cx - .object - .syms - .binary_search_by_key(&symbol.name(), |(name, _)| *name) - .ok()?; - let object_symbol = cx.object.syms.get(i)?; - let object_addr = addr - .wrapping_sub(symbol.address()) - .wrapping_add(object_symbol.1); - Some((cx, object_addr)) - } -} - -fn object_mapping(path: &[u8]) -> Option { - use super::mystd::ffi::OsStr; - use super::mystd::os::unix::prelude::*; - - let map; - - // `N_OSO` symbol names can be either `/path/to/object.o` or `/path/to/archive.a(object.o)`. - let member_name = if let Some((archive_path, member_name)) = split_archive_path(path) { - map = super::mmap(Path::new(OsStr::from_bytes(archive_path)))?; - Some(member_name) - } else { - map = super::mmap(Path::new(OsStr::from_bytes(path)))?; - None - }; - Mapping::mk(map, |data, stash| { - let data = match member_name { - Some(member_name) => { - let archive = object::read::archive::ArchiveFile::parse(data).ok()?; - let member = archive - .members() - .filter_map(Result::ok) - .find(|m| m.name() == member_name)?; - member.data(data).ok()? - } - None => data, - }; - let (macho, data) = find_header(data)?; - let endian = macho.endian().ok()?; - let obj = Object::parse(macho, endian, data)?; - Context::new(stash, obj, None, None) - }) -} - -fn split_archive_path(path: &[u8]) -> Option<(&[u8], &[u8])> { - let (last, path) = path.split_last()?; - if *last != b')' { - return None; - } - let index = path.iter().position(|&x| x == b'(')?; - let (archive, rest) = path.split_at(index); - Some((archive, &rest[1..])) -} - -pub(super) fn handle_split_dwarf<'data>( - _package: Option<&gimli::DwarfPackage>>, - _stash: &'data Stash, - _load: addr2line::SplitDwarfLoad>, -) -> Option>>> { - None -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/mmap_fake.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/mmap_fake.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/mmap_fake.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/mmap_fake.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -use super::{mystd::io::Read, File}; -use alloc::vec::Vec; -use core::ops::Deref; - -pub struct Mmap { - vec: Vec, -} - -impl Mmap { - pub unsafe fn map(mut file: &File, len: usize) -> Option { - let mut mmap = Mmap { - vec: Vec::with_capacity(len), - }; - file.read_to_end(&mut mmap.vec).ok()?; - Some(mmap) - } -} - -impl Deref for Mmap { - type Target = [u8]; - - fn deref(&self) -> &[u8] { - &self.vec[..] - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/mmap_unix.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/mmap_unix.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/mmap_unix.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/mmap_unix.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,49 +0,0 @@ -use super::mystd::fs::File; -use super::mystd::os::unix::prelude::*; -use core::ops::Deref; -use core::ptr; -use core::slice; - -#[cfg(not(all(target_os = "linux", target_env = "gnu")))] -use libc::mmap as mmap64; -#[cfg(all(target_os = "linux", target_env = "gnu"))] -use libc::mmap64; - -pub struct Mmap { - ptr: *mut libc::c_void, - len: usize, -} - -impl Mmap { - pub unsafe fn map(file: &File, len: usize) -> Option { - let ptr = mmap64( - ptr::null_mut(), - len, - libc::PROT_READ, - libc::MAP_PRIVATE, - file.as_raw_fd(), - 0, - ); - if ptr == libc::MAP_FAILED { - return None; - } - Some(Mmap { ptr, len }) - } -} - -impl Deref for Mmap { - type Target = [u8]; - - fn deref(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self.ptr as *const u8, self.len) } - } -} - -impl Drop for Mmap { - fn drop(&mut self) { - unsafe { - let r = libc::munmap(self.ptr, self.len); - debug_assert_eq!(r, 0); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/mmap_windows.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/mmap_windows.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/mmap_windows.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/mmap_windows.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,57 +0,0 @@ -use super::super::super::windows::*; -use super::mystd::fs::File; -use super::mystd::os::windows::prelude::*; -use core::ops::Deref; -use core::ptr; -use core::slice; - -pub struct Mmap { - // keep the file alive to prevent it from being deleted which would cause - // us to read bad data. - _file: File, - ptr: *mut c_void, - len: usize, -} - -impl Mmap { - pub unsafe fn map(file: &File, len: usize) -> Option { - let file = file.try_clone().ok()?; - let mapping = CreateFileMappingA( - file.as_raw_handle() as *mut _, - ptr::null_mut(), - PAGE_READONLY, - 0, - 0, - ptr::null(), - ); - if mapping.is_null() { - return None; - } - let ptr = MapViewOfFile(mapping, FILE_MAP_READ, 0, 0, len); - CloseHandle(mapping); - if ptr.is_null() { - return None; - } - Some(Mmap { - _file: file, - ptr, - len, - }) - } -} -impl Deref for Mmap { - type Target = [u8]; - - fn deref(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self.ptr as *const u8, self.len) } - } -} - -impl Drop for Mmap { - fn drop(&mut self) { - unsafe { - let r = UnmapViewOfFile(self.ptr); - debug_assert!(r != 0); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,295 +0,0 @@ -// Note: This file is only currently used on targets that call out to the code -// in `mod libs_dl_iterate_phdr` (e.g. linux, freebsd, ...); it may be more -// general purpose, but it hasn't been tested elsewhere. - -use super::mystd::fs::File; -use super::mystd::io::Read; -use super::mystd::str::FromStr; -use super::{OsString, String, Vec}; - -#[derive(PartialEq, Eq, Debug)] -pub(super) struct MapsEntry { - /// start (inclusive) and limit (exclusive) of address range. - address: (usize, usize), - /// The perms field are the permissions for the entry - /// - /// r = read - /// w = write - /// x = execute - /// s = shared - /// p = private (copy on write) - perms: [char; 4], - /// Offset into the file (or "whatever"). - offset: usize, - /// device (major, minor) - dev: (usize, usize), - /// inode on the device. 0 indicates that no inode is associated with the memory region (e.g. uninitalized data aka BSS). - inode: usize, - /// Usually the file backing the mapping. - /// - /// Note: The man page for proc includes a note about "coordination" by - /// using readelf to see the Offset field in ELF program headers. pnkfelix - /// is not yet sure if that is intended to be a comment on pathname, or what - /// form/purpose such coordination is meant to have. - /// - /// There are also some pseudo-paths: - /// "[stack]": The initial process's (aka main thread's) stack. - /// "[stack:]": a specific thread's stack. (This was only present for a limited range of Linux verisons; it was determined to be too expensive to provide.) - /// "[vdso]": Virtual dynamically linked shared object - /// "[heap]": The process's heap - /// - /// The pathname can be blank, which means it is an anonymous mapping - /// obtained via mmap. - /// - /// Newlines in pathname are replaced with an octal escape sequence. - /// - /// The pathname may have "(deleted)" appended onto it if the file-backed - /// path has been deleted. - /// - /// Note that modifications like the latter two indicated above imply that - /// in general the pathname may be ambiguous. (I.e. you cannot tell if the - /// denoted filename actually ended with the text "(deleted)", or if that - /// was added by the maps rendering. - pathname: OsString, -} - -pub(super) fn parse_maps() -> Result, &'static str> { - let mut v = Vec::new(); - let mut proc_self_maps = - File::open("/proc/self/maps").map_err(|_| "Couldn't open /proc/self/maps")?; - let mut buf = String::new(); - let _bytes_read = proc_self_maps - .read_to_string(&mut buf) - .map_err(|_| "Couldn't read /proc/self/maps")?; - for line in buf.lines() { - v.push(line.parse()?); - } - - Ok(v) -} - -impl MapsEntry { - pub(super) fn pathname(&self) -> &OsString { - &self.pathname - } - - pub(super) fn ip_matches(&self, ip: usize) -> bool { - self.address.0 <= ip && ip < self.address.1 - } -} - -impl FromStr for MapsEntry { - type Err = &'static str; - - // Format: address perms offset dev inode pathname - // e.g.: "ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]" - // e.g.: "7f5985f46000-7f5985f48000 rw-p 00039000 103:06 76021795 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2" - // e.g.: "35b1a21000-35b1a22000 rw-p 00000000 00:00 0" - // - // Note that paths may contain spaces, so we can't use `str::split` for parsing (until - // Split::remainder is stabilized #77998). - fn from_str(s: &str) -> Result { - let (range_str, s) = s.trim_start().split_once(' ').unwrap_or((s, "")); - if range_str.is_empty() { - return Err("Couldn't find address"); - } - - let (perms_str, s) = s.trim_start().split_once(' ').unwrap_or((s, "")); - if perms_str.is_empty() { - return Err("Couldn't find permissions"); - } - - let (offset_str, s) = s.trim_start().split_once(' ').unwrap_or((s, "")); - if offset_str.is_empty() { - return Err("Couldn't find offset"); - } - - let (dev_str, s) = s.trim_start().split_once(' ').unwrap_or((s, "")); - if dev_str.is_empty() { - return Err("Couldn't find dev"); - } - - let (inode_str, s) = s.trim_start().split_once(' ').unwrap_or((s, "")); - if inode_str.is_empty() { - return Err("Couldn't find inode"); - } - - // Pathname may be omitted in which case it will be empty - let pathname_str = s.trim_start(); - - let hex = |s| usize::from_str_radix(s, 16).map_err(|_| "Couldn't parse hex number"); - let address = if let Some((start, limit)) = range_str.split_once('-') { - (hex(start)?, hex(limit)?) - } else { - return Err("Couldn't parse address range"); - }; - let perms: [char; 4] = { - let mut chars = perms_str.chars(); - let mut c = || chars.next().ok_or("insufficient perms"); - let perms = [c()?, c()?, c()?, c()?]; - if chars.next().is_some() { - return Err("too many perms"); - } - perms - }; - let offset = hex(offset_str)?; - let dev = if let Some((major, minor)) = dev_str.split_once(':') { - (hex(major)?, hex(minor)?) - } else { - return Err("Couldn't parse dev"); - }; - let inode = hex(inode_str)?; - let pathname = pathname_str.into(); - - Ok(MapsEntry { - address, - perms, - offset, - dev, - inode, - pathname, - }) - } -} - -// Make sure we can parse 64-bit sample output if we're on a 64-bit target. -#[cfg(target_pointer_width = "64")] -#[test] -fn check_maps_entry_parsing_64bit() { - assert_eq!( - "ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 \ - [vsyscall]" - .parse::() - .unwrap(), - MapsEntry { - address: (0xffffffffff600000, 0xffffffffff601000), - perms: ['-', '-', 'x', 'p'], - offset: 0x00000000, - dev: (0x00, 0x00), - inode: 0x0, - pathname: "[vsyscall]".into(), - } - ); - - assert_eq!( - "7f5985f46000-7f5985f48000 rw-p 00039000 103:06 76021795 \ - /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2" - .parse::() - .unwrap(), - MapsEntry { - address: (0x7f5985f46000, 0x7f5985f48000), - perms: ['r', 'w', '-', 'p'], - offset: 0x00039000, - dev: (0x103, 0x06), - inode: 0x76021795, - pathname: "/usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2".into(), - } - ); - assert_eq!( - "35b1a21000-35b1a22000 rw-p 00000000 00:00 0" - .parse::() - .unwrap(), - MapsEntry { - address: (0x35b1a21000, 0x35b1a22000), - perms: ['r', 'w', '-', 'p'], - offset: 0x00000000, - dev: (0x00, 0x00), - inode: 0x0, - pathname: Default::default(), - } - ); -} - -// (This output was taken from a 32-bit machine, but will work on any target) -#[test] -fn check_maps_entry_parsing_32bit() { - /* Example snippet of output: - 08056000-08077000 rw-p 00000000 00:00 0 [heap] - b7c79000-b7e02000 r--p 00000000 08:01 60662705 /usr/lib/locale/locale-archive - b7e02000-b7e03000 rw-p 00000000 00:00 0 - */ - assert_eq!( - "08056000-08077000 rw-p 00000000 00:00 0 \ - [heap]" - .parse::() - .unwrap(), - MapsEntry { - address: (0x08056000, 0x08077000), - perms: ['r', 'w', '-', 'p'], - offset: 0x00000000, - dev: (0x00, 0x00), - inode: 0x0, - pathname: "[heap]".into(), - } - ); - - assert_eq!( - "b7c79000-b7e02000 r--p 00000000 08:01 60662705 \ - /usr/lib/locale/locale-archive" - .parse::() - .unwrap(), - MapsEntry { - address: (0xb7c79000, 0xb7e02000), - perms: ['r', '-', '-', 'p'], - offset: 0x00000000, - dev: (0x08, 0x01), - inode: 0x60662705, - pathname: "/usr/lib/locale/locale-archive".into(), - } - ); - assert_eq!( - "b7e02000-b7e03000 rw-p 00000000 00:00 0" - .parse::() - .unwrap(), - MapsEntry { - address: (0xb7e02000, 0xb7e03000), - perms: ['r', 'w', '-', 'p'], - offset: 0x00000000, - dev: (0x00, 0x00), - inode: 0x0, - pathname: Default::default(), - } - ); - assert_eq!( - "b7c79000-b7e02000 r--p 00000000 08:01 60662705 \ - /executable/path/with some spaces" - .parse::() - .unwrap(), - MapsEntry { - address: (0xb7c79000, 0xb7e02000), - perms: ['r', '-', '-', 'p'], - offset: 0x00000000, - dev: (0x08, 0x01), - inode: 0x60662705, - pathname: "/executable/path/with some spaces".into(), - } - ); - assert_eq!( - "b7c79000-b7e02000 r--p 00000000 08:01 60662705 \ - /executable/path/with multiple-continuous spaces " - .parse::() - .unwrap(), - MapsEntry { - address: (0xb7c79000, 0xb7e02000), - perms: ['r', '-', '-', 'p'], - offset: 0x00000000, - dev: (0x08, 0x01), - inode: 0x60662705, - pathname: "/executable/path/with multiple-continuous spaces ".into(), - } - ); - assert_eq!( - " b7c79000-b7e02000 r--p 00000000 08:01 60662705 \ - /executable/path/starts-with-spaces" - .parse::() - .unwrap(), - MapsEntry { - address: (0xb7c79000, 0xb7e02000), - perms: ['r', '-', '-', 'p'], - offset: 0x00000000, - dev: (0x08, 0x01), - inode: 0x60662705, - pathname: "/executable/path/starts-with-spaces".into(), - } - ); -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/stash.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/stash.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli/stash.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli/stash.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,50 +0,0 @@ -// only used on Linux right now, so allow dead code elsewhere -#![cfg_attr(not(target_os = "linux"), allow(dead_code))] - -use super::Mmap; -use alloc::vec; -use alloc::vec::Vec; -use core::cell::UnsafeCell; - -/// A simple arena allocator for byte buffers. -pub struct Stash { - buffers: UnsafeCell>>, - mmaps: UnsafeCell>, -} - -impl Stash { - pub fn new() -> Stash { - Stash { - buffers: UnsafeCell::new(Vec::new()), - mmaps: UnsafeCell::new(Vec::new()), - } - } - - /// Allocates a buffer of the specified size and returns a mutable reference - /// to it. - pub fn allocate(&self, size: usize) -> &mut [u8] { - // SAFETY: this is the only function that ever constructs a mutable - // reference to `self.buffers`. - let buffers = unsafe { &mut *self.buffers.get() }; - let i = buffers.len(); - buffers.push(vec![0; size]); - // SAFETY: we never remove elements from `self.buffers`, so a reference - // to the data inside any buffer will live as long as `self` does. - &mut buffers[i] - } - - /// Stores a `Mmap` for the lifetime of this `Stash`, returning a pointer - /// which is scoped to just this lifetime. - pub fn cache_mmap(&self, map: Mmap) -> &[u8] { - // SAFETY: this is the only location for a mutable pointer to - // `mmaps`, and this structure isn't threadsafe to shared across - // threads either. We also never remove elements from `self.mmaps`, - // so a reference to the data inside the map will live as long as - // `self` does. - unsafe { - let mmaps = &mut *self.mmaps.get(); - mmaps.push(map); - mmaps.last().unwrap() - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/gimli.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/gimli.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,511 +0,0 @@ -//! Support for symbolication using the `gimli` crate on crates.io -//! -//! This is the default symbolication implementation for Rust. - -use self::gimli::read::EndianSlice; -use self::gimli::NativeEndian as Endian; -use self::mmap::Mmap; -use self::stash::Stash; -use super::BytesOrWideString; -use super::ResolveWhat; -use super::SymbolName; -use addr2line::gimli; -use core::convert::TryInto; -use core::mem; -use core::u32; -use libc::c_void; -use mystd::ffi::OsString; -use mystd::fs::File; -use mystd::path::Path; -use mystd::prelude::v1::*; - -#[cfg(backtrace_in_libstd)] -mod mystd { - pub use crate::*; -} -#[cfg(not(backtrace_in_libstd))] -extern crate std as mystd; - -cfg_if::cfg_if! { - if #[cfg(windows)] { - #[path = "gimli/mmap_windows.rs"] - mod mmap; - } else if #[cfg(any( - target_os = "android", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "haiku", - target_os = "ios", - target_os = "linux", - target_os = "macos", - target_os = "openbsd", - target_os = "solaris", - target_os = "illumos", - ))] { - #[path = "gimli/mmap_unix.rs"] - mod mmap; - } else { - #[path = "gimli/mmap_fake.rs"] - mod mmap; - } -} - -mod stash; - -const MAPPINGS_CACHE_SIZE: usize = 4; - -struct Mapping { - // 'static lifetime is a lie to hack around lack of support for self-referential structs. - cx: Context<'static>, - _map: Mmap, - stash: Stash, -} - -enum Either { - #[allow(dead_code)] - A(A), - B(B), -} - -impl Mapping { - /// Creates a `Mapping` by ensuring that the `data` specified is used to - /// create a `Context` and it can only borrow from that or the `Stash` of - /// decompressed sections or auxiliary data. - fn mk(data: Mmap, mk: F) -> Option - where - F: for<'a> FnOnce(&'a [u8], &'a Stash) -> Option>, - { - Mapping::mk_or_other(data, move |data, stash| { - let cx = mk(data, stash)?; - Some(Either::B(cx)) - }) - } - - /// Creates a `Mapping` from `data`, or if the closure decides to, returns a - /// different mapping. - fn mk_or_other(data: Mmap, mk: F) -> Option - where - F: for<'a> FnOnce(&'a [u8], &'a Stash) -> Option>>, - { - let stash = Stash::new(); - let cx = match mk(&data, &stash)? { - Either::A(mapping) => return Some(mapping), - Either::B(cx) => cx, - }; - Some(Mapping { - // Convert to 'static lifetimes since the symbols should - // only borrow `map` and `stash` and we're preserving them below. - cx: unsafe { core::mem::transmute::, Context<'static>>(cx) }, - _map: data, - stash: stash, - }) - } -} - -struct Context<'a> { - dwarf: addr2line::Context>, - object: Object<'a>, - package: Option>>, -} - -impl<'data> Context<'data> { - fn new( - stash: &'data Stash, - object: Object<'data>, - sup: Option>, - dwp: Option>, - ) -> Option> { - let mut sections = gimli::Dwarf::load(|id| -> Result<_, ()> { - let data = object.section(stash, id.name()).unwrap_or(&[]); - Ok(EndianSlice::new(data, Endian)) - }) - .ok()?; - - if let Some(sup) = sup { - sections - .load_sup(|id| -> Result<_, ()> { - let data = sup.section(stash, id.name()).unwrap_or(&[]); - Ok(EndianSlice::new(data, Endian)) - }) - .ok()?; - } - let dwarf = addr2line::Context::from_dwarf(sections).ok()?; - - let mut package = None; - if let Some(dwp) = dwp { - package = Some( - gimli::DwarfPackage::load( - |id| -> Result<_, gimli::Error> { - let data = id - .dwo_name() - .and_then(|name| dwp.section(stash, name)) - .unwrap_or(&[]); - Ok(EndianSlice::new(data, Endian)) - }, - EndianSlice::new(&[], Endian), - ) - .ok()?, - ); - } - - Some(Context { - dwarf, - object, - package, - }) - } - - fn find_frames( - &'_ self, - stash: &'data Stash, - probe: u64, - ) -> gimli::Result>> { - use addr2line::{LookupContinuation, LookupResult}; - - let mut l = self.dwarf.find_frames(probe); - loop { - let (load, continuation) = match l { - LookupResult::Output(output) => break output, - LookupResult::Load { load, continuation } => (load, continuation), - }; - - l = continuation.resume(handle_split_dwarf(self.package.as_ref(), stash, load)); - } - } -} - -fn mmap(path: &Path) -> Option { - let file = File::open(path).ok()?; - let len = file.metadata().ok()?.len().try_into().ok()?; - unsafe { Mmap::map(&file, len) } -} - -cfg_if::cfg_if! { - if #[cfg(windows)] { - mod coff; - use self::coff::{handle_split_dwarf, Object}; - } else if #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - ))] { - mod macho; - use self::macho::{handle_split_dwarf, Object}; - } else { - mod elf; - use self::elf::{handle_split_dwarf, Object}; - } -} - -cfg_if::cfg_if! { - if #[cfg(windows)] { - mod libs_windows; - use libs_windows::native_libraries; - } else if #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - ))] { - mod libs_macos; - use libs_macos::native_libraries; - } else if #[cfg(target_os = "illumos")] { - mod libs_illumos; - use libs_illumos::native_libraries; - } else if #[cfg(all( - any( - target_os = "linux", - target_os = "fuchsia", - target_os = "freebsd", - target_os = "openbsd", - target_os = "netbsd", - all(target_os = "android", feature = "dl_iterate_phdr"), - ), - not(target_env = "uclibc"), - ))] { - mod libs_dl_iterate_phdr; - use libs_dl_iterate_phdr::native_libraries; - #[path = "gimli/parse_running_mmaps_unix.rs"] - mod parse_running_mmaps; - } else if #[cfg(target_env = "libnx")] { - mod libs_libnx; - use libs_libnx::native_libraries; - } else if #[cfg(target_os = "haiku")] { - mod libs_haiku; - use libs_haiku::native_libraries; - } else { - // Everything else should doesn't know how to load native libraries. - fn native_libraries() -> Vec { - Vec::new() - } - } -} - -#[derive(Default)] -struct Cache { - /// All known shared libraries that have been loaded. - libraries: Vec, - - /// Mappings cache where we retain parsed dwarf information. - /// - /// This list has a fixed capacity for its entire lifetime which never - /// increases. The `usize` element of each pair is an index into `libraries` - /// above where `usize::max_value()` represents the current executable. The - /// `Mapping` is corresponding parsed dwarf information. - /// - /// Note that this is basically an LRU cache and we'll be shifting things - /// around in here as we symbolize addresses. - mappings: Vec<(usize, Mapping)>, -} - -struct Library { - name: OsString, - /// Segments of this library loaded into memory, and where they're loaded. - segments: Vec, - /// The "bias" of this library, typically where it's loaded into memory. - /// This value is added to each segment's stated address to get the actual - /// virtual memory address that the segment is loaded into. Additionally - /// this bias is subtracted from real virtual memory addresses to index into - /// debuginfo and the symbol table. - bias: usize, -} - -struct LibrarySegment { - /// The stated address of this segment in the object file. This is not - /// actually where the segment is loaded, but rather this address plus the - /// containing library's `bias` is where to find it. - stated_virtual_memory_address: usize, - /// The size of this segment in memory. - len: usize, -} - -// unsafe because this is required to be externally synchronized -pub unsafe fn clear_symbol_cache() { - Cache::with_global(|cache| cache.mappings.clear()); -} - -impl Cache { - fn new() -> Cache { - Cache { - mappings: Vec::with_capacity(MAPPINGS_CACHE_SIZE), - libraries: native_libraries(), - } - } - - // unsafe because this is required to be externally synchronized - unsafe fn with_global(f: impl FnOnce(&mut Self)) { - // A very small, very simple LRU cache for debug info mappings. - // - // The hit rate should be very high, since the typical stack doesn't cross - // between many shared libraries. - // - // The `addr2line::Context` structures are pretty expensive to create. Its - // cost is expected to be amortized by subsequent `locate` queries, which - // leverage the structures built when constructing `addr2line::Context`s to - // get nice speedups. If we didn't have this cache, that amortization would - // never happen, and symbolicating backtraces would be ssssllllooooowwww. - static mut MAPPINGS_CACHE: Option = None; - - f(MAPPINGS_CACHE.get_or_insert_with(|| Cache::new())) - } - - fn avma_to_svma(&self, addr: *const u8) -> Option<(usize, *const u8)> { - self.libraries - .iter() - .enumerate() - .filter_map(|(i, lib)| { - // First up, test if this `lib` has any segment containing the - // `addr` (handling relocation). If this check passes then we - // can continue below and actually translate the address. - // - // Note that we're using `wrapping_add` here to avoid overflow - // checks. It's been seen in the wild that the SVMA + bias - // computation overflows. It seems a bit odd that would happen - // but there's not a huge amount we can do about it other than - // probably just ignore those segments since they're likely - // pointing off into space. This originally came up in - // rust-lang/backtrace-rs#329. - if !lib.segments.iter().any(|s| { - let svma = s.stated_virtual_memory_address; - let start = svma.wrapping_add(lib.bias); - let end = start.wrapping_add(s.len); - let address = addr as usize; - start <= address && address < end - }) { - return None; - } - - // Now that we know `lib` contains `addr`, we can offset with - // the bias to find the stated virtual memory address. - let svma = (addr as usize).wrapping_sub(lib.bias); - Some((i, svma as *const u8)) - }) - .next() - } - - fn mapping_for_lib<'a>(&'a mut self, lib: usize) -> Option<(&'a mut Context<'a>, &'a Stash)> { - let idx = self.mappings.iter().position(|(idx, _)| *idx == lib); - - // Invariant: after this conditional completes without early returning - // from an error, the cache entry for this path is at index 0. - - if let Some(idx) = idx { - // When the mapping is already in the cache, move it to the front. - if idx != 0 { - let entry = self.mappings.remove(idx); - self.mappings.insert(0, entry); - } - } else { - // When the mapping is not in the cache, create a new mapping, - // insert it into the front of the cache, and evict the oldest cache - // entry if necessary. - let name = &self.libraries[lib].name; - let mapping = Mapping::new(name.as_ref())?; - - if self.mappings.len() == MAPPINGS_CACHE_SIZE { - self.mappings.pop(); - } - - self.mappings.insert(0, (lib, mapping)); - } - - let mapping = &mut self.mappings[0].1; - let cx: &'a mut Context<'static> = &mut mapping.cx; - let stash: &'a Stash = &mapping.stash; - // don't leak the `'static` lifetime, make sure it's scoped to just - // ourselves - Some(( - unsafe { mem::transmute::<&'a mut Context<'static>, &'a mut Context<'a>>(cx) }, - stash, - )) - } -} - -pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol)) { - let addr = what.address_or_ip(); - let mut call = |sym: Symbol<'_>| { - // Extend the lifetime of `sym` to `'static` since we are unfortunately - // required to here, but it's only ever going out as a reference so no - // reference to it should be persisted beyond this frame anyway. - let sym = mem::transmute::, Symbol<'static>>(sym); - (cb)(&super::Symbol { inner: sym }); - }; - - Cache::with_global(|cache| { - let (lib, addr) = match cache.avma_to_svma(addr as *const u8) { - Some(pair) => pair, - None => return, - }; - - // Finally, get a cached mapping or create a new mapping for this file, and - // evaluate the DWARF info to find the file/line/name for this address. - let (cx, stash) = match cache.mapping_for_lib(lib) { - Some((cx, stash)) => (cx, stash), - None => return, - }; - let mut any_frames = false; - if let Ok(mut frames) = cx.find_frames(stash, addr as u64) { - while let Ok(Some(frame)) = frames.next() { - any_frames = true; - let name = match frame.function { - Some(f) => Some(f.name.slice()), - None => cx.object.search_symtab(addr as u64), - }; - call(Symbol::Frame { - addr: addr as *mut c_void, - location: frame.location, - name, - }); - } - } - if !any_frames { - if let Some((object_cx, object_addr)) = cx.object.search_object_map(addr as u64) { - if let Ok(mut frames) = object_cx.find_frames(stash, object_addr) { - while let Ok(Some(frame)) = frames.next() { - any_frames = true; - call(Symbol::Frame { - addr: addr as *mut c_void, - location: frame.location, - name: frame.function.map(|f| f.name.slice()), - }); - } - } - } - } - if !any_frames { - if let Some(name) = cx.object.search_symtab(addr as u64) { - call(Symbol::Symtab { - addr: addr as *mut c_void, - name, - }); - } - } - }); -} - -pub enum Symbol<'a> { - /// We were able to locate frame information for this symbol, and - /// `addr2line`'s frame internally has all the nitty gritty details. - Frame { - addr: *mut c_void, - location: Option>, - name: Option<&'a [u8]>, - }, - /// Couldn't find debug information, but we found it in the symbol table of - /// the elf executable. - Symtab { addr: *mut c_void, name: &'a [u8] }, -} - -impl Symbol<'_> { - pub fn name(&self) -> Option> { - match self { - Symbol::Frame { name, .. } => { - let name = name.as_ref()?; - Some(SymbolName::new(name)) - } - Symbol::Symtab { name, .. } => Some(SymbolName::new(name)), - } - } - - pub fn addr(&self) -> Option<*mut c_void> { - match self { - Symbol::Frame { addr, .. } => Some(*addr), - Symbol::Symtab { .. } => None, - } - } - - pub fn filename_raw(&self) -> Option> { - match self { - Symbol::Frame { location, .. } => { - let file = location.as_ref()?.file?; - Some(BytesOrWideString::Bytes(file.as_bytes())) - } - Symbol::Symtab { .. } => None, - } - } - - pub fn filename(&self) -> Option<&Path> { - match self { - Symbol::Frame { location, .. } => { - let file = location.as_ref()?.file?; - Some(Path::new(file)) - } - Symbol::Symtab { .. } => None, - } - } - - pub fn lineno(&self) -> Option { - match self { - Symbol::Frame { location, .. } => location.as_ref()?.line, - Symbol::Symtab { .. } => None, - } - } - - pub fn colno(&self) -> Option { - match self { - Symbol::Frame { location, .. } => location.as_ref()?.column, - Symbol::Symtab { .. } => None, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/miri.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/miri.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/miri.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/miri.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -use core::ffi::c_void; -use core::marker::PhantomData; - -use super::super::backtrace::miri::{resolve_addr, Frame}; -use super::BytesOrWideString; -use super::{ResolveWhat, SymbolName}; - -pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol)) { - let sym = match what { - ResolveWhat::Address(addr) => Symbol { - inner: resolve_addr(addr), - _unused: PhantomData, - }, - ResolveWhat::Frame(frame) => Symbol { - inner: frame.inner.clone(), - _unused: PhantomData, - }, - }; - cb(&super::Symbol { inner: sym }) -} - -pub struct Symbol<'a> { - inner: Frame, - _unused: PhantomData<&'a ()>, -} - -impl<'a> Symbol<'a> { - pub fn name(&self) -> Option> { - Some(SymbolName::new(&self.inner.inner.name)) - } - - pub fn addr(&self) -> Option<*mut c_void> { - Some(self.inner.addr) - } - - pub fn filename_raw(&self) -> Option> { - Some(BytesOrWideString::Bytes(&self.inner.inner.filename)) - } - - pub fn lineno(&self) -> Option { - Some(self.inner.inner.lineno) - } - - pub fn colno(&self) -> Option { - Some(self.inner.inner.colno) - } - - #[cfg(feature = "std")] - pub fn filename(&self) -> Option<&std::path::Path> { - Some(std::path::Path::new( - core::str::from_utf8(&self.inner.inner.filename).unwrap(), - )) - } -} - -pub unsafe fn clear_symbol_cache() {} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/mod.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/mod.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,485 +0,0 @@ -use core::{fmt, str}; - -cfg_if::cfg_if! { - if #[cfg(feature = "std")] { - use std::path::Path; - use std::prelude::v1::*; - } -} - -use super::backtrace::Frame; -use super::types::BytesOrWideString; -use core::ffi::c_void; -use rustc_demangle::{try_demangle, Demangle}; - -/// Resolve an address to a symbol, passing the symbol to the specified -/// closure. -/// -/// This function will look up the given address in areas such as the local -/// symbol table, dynamic symbol table, or DWARF debug info (depending on the -/// activated implementation) to find symbols to yield. -/// -/// The closure may not be called if resolution could not be performed, and it -/// also may be called more than once in the case of inlined functions. -/// -/// Symbols yielded represent the execution at the specified `addr`, returning -/// file/line pairs for that address (if available). -/// -/// Note that if you have a `Frame` then it's recommended to use the -/// `resolve_frame` function instead of this one. -/// -/// # Required features -/// -/// This function requires the `std` feature of the `backtrace` crate to be -/// enabled, and the `std` feature is enabled by default. -/// -/// # Panics -/// -/// This function strives to never panic, but if the `cb` provided panics then -/// some platforms will force a double panic to abort the process. Some -/// platforms use a C library which internally uses callbacks which cannot be -/// unwound through, so panicking from `cb` may trigger a process abort. -/// -/// # Example -/// -/// ``` -/// extern crate backtrace; -/// -/// fn main() { -/// backtrace::trace(|frame| { -/// let ip = frame.ip(); -/// -/// backtrace::resolve(ip, |symbol| { -/// // ... -/// }); -/// -/// false // only look at the top frame -/// }); -/// } -/// ``` -#[cfg(feature = "std")] -pub fn resolve(addr: *mut c_void, cb: F) { - let _guard = crate::lock::lock(); - unsafe { resolve_unsynchronized(addr, cb) } -} - -/// Resolve a previously capture frame to a symbol, passing the symbol to the -/// specified closure. -/// -/// This function performs the same function as `resolve` except that it takes a -/// `Frame` as an argument instead of an address. This can allow some platform -/// implementations of backtracing to provide more accurate symbol information -/// or information about inline frames for example. It's recommended to use this -/// if you can. -/// -/// # Required features -/// -/// This function requires the `std` feature of the `backtrace` crate to be -/// enabled, and the `std` feature is enabled by default. -/// -/// # Panics -/// -/// This function strives to never panic, but if the `cb` provided panics then -/// some platforms will force a double panic to abort the process. Some -/// platforms use a C library which internally uses callbacks which cannot be -/// unwound through, so panicking from `cb` may trigger a process abort. -/// -/// # Example -/// -/// ``` -/// extern crate backtrace; -/// -/// fn main() { -/// backtrace::trace(|frame| { -/// backtrace::resolve_frame(frame, |symbol| { -/// // ... -/// }); -/// -/// false // only look at the top frame -/// }); -/// } -/// ``` -#[cfg(feature = "std")] -pub fn resolve_frame(frame: &Frame, cb: F) { - let _guard = crate::lock::lock(); - unsafe { resolve_frame_unsynchronized(frame, cb) } -} - -pub enum ResolveWhat<'a> { - Address(*mut c_void), - Frame(&'a Frame), -} - -impl<'a> ResolveWhat<'a> { - #[allow(dead_code)] - fn address_or_ip(&self) -> *mut c_void { - match self { - ResolveWhat::Address(a) => adjust_ip(*a), - ResolveWhat::Frame(f) => adjust_ip(f.ip()), - } - } -} - -// IP values from stack frames are typically (always?) the instruction -// *after* the call that's the actual stack trace. Symbolizing this on -// causes the filename/line number to be one ahead and perhaps into -// the void if it's near the end of the function. -// -// This appears to basically always be the case on all platforms, so we always -// subtract one from a resolved ip to resolve it to the previous call -// instruction instead of the instruction being returned to. -// -// Ideally we would not do this. Ideally we would require callers of the -// `resolve` APIs here to manually do the -1 and account that they want location -// information for the *previous* instruction, not the current. Ideally we'd -// also expose on `Frame` if we are indeed the address of the next instruction -// or the current. -// -// For now though this is a pretty niche concern so we just internally always -// subtract one. Consumers should keep working and getting pretty good results, -// so we should be good enough. -fn adjust_ip(a: *mut c_void) -> *mut c_void { - if a.is_null() { - a - } else { - (a as usize - 1) as *mut c_void - } -} - -/// Same as `resolve`, only unsafe as it's unsynchronized. -/// -/// This function does not have synchronization guarantees but is available when -/// the `std` feature of this crate isn't compiled in. See the `resolve` -/// function for more documentation and examples. -/// -/// # Panics -/// -/// See information on `resolve` for caveats on `cb` panicking. -pub unsafe fn resolve_unsynchronized(addr: *mut c_void, mut cb: F) -where - F: FnMut(&Symbol), -{ - imp::resolve(ResolveWhat::Address(addr), &mut cb) -} - -/// Same as `resolve_frame`, only unsafe as it's unsynchronized. -/// -/// This function does not have synchronization guarantees but is available -/// when the `std` feature of this crate isn't compiled in. See the -/// `resolve_frame` function for more documentation and examples. -/// -/// # Panics -/// -/// See information on `resolve_frame` for caveats on `cb` panicking. -pub unsafe fn resolve_frame_unsynchronized(frame: &Frame, mut cb: F) -where - F: FnMut(&Symbol), -{ - imp::resolve(ResolveWhat::Frame(frame), &mut cb) -} - -/// A trait representing the resolution of a symbol in a file. -/// -/// This trait is yielded as a trait object to the closure given to the -/// `backtrace::resolve` function, and it is virtually dispatched as it's -/// unknown which implementation is behind it. -/// -/// A symbol can give contextual information about a function, for example the -/// name, filename, line number, precise address, etc. Not all information is -/// always available in a symbol, however, so all methods return an `Option`. -pub struct Symbol { - // TODO: this lifetime bound needs to be persisted eventually to `Symbol`, - // but that's currently a breaking change. For now this is safe since - // `Symbol` is only ever handed out by reference and can't be cloned. - inner: imp::Symbol<'static>, -} - -impl Symbol { - /// Returns the name of this function. - /// - /// The returned structure can be used to query various properties about the - /// symbol name: - /// - /// * The `Display` implementation will print out the demangled symbol. - /// * The raw `str` value of the symbol can be accessed (if it's valid - /// utf-8). - /// * The raw bytes for the symbol name can be accessed. - pub fn name(&self) -> Option> { - self.inner.name() - } - - /// Returns the starting address of this function. - pub fn addr(&self) -> Option<*mut c_void> { - self.inner.addr().map(|p| p as *mut _) - } - - /// Returns the raw filename as a slice. This is mainly useful for `no_std` - /// environments. - pub fn filename_raw(&self) -> Option> { - self.inner.filename_raw() - } - - /// Returns the column number for where this symbol is currently executing. - /// - /// Only gimli currently provides a value here and even then only if `filename` - /// returns `Some`, and so it is then consequently subject to similar caveats. - pub fn colno(&self) -> Option { - self.inner.colno() - } - - /// Returns the line number for where this symbol is currently executing. - /// - /// This return value is typically `Some` if `filename` returns `Some`, and - /// is consequently subject to similar caveats. - pub fn lineno(&self) -> Option { - self.inner.lineno() - } - - /// Returns the file name where this function was defined. - /// - /// This is currently only available when libbacktrace or gimli is being - /// used (e.g. unix platforms other) and when a binary is compiled with - /// debuginfo. If neither of these conditions is met then this will likely - /// return `None`. - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - #[cfg(feature = "std")] - #[allow(unreachable_code)] - pub fn filename(&self) -> Option<&Path> { - self.inner.filename() - } -} - -impl fmt::Debug for Symbol { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut d = f.debug_struct("Symbol"); - if let Some(name) = self.name() { - d.field("name", &name); - } - if let Some(addr) = self.addr() { - d.field("addr", &addr); - } - - #[cfg(feature = "std")] - { - if let Some(filename) = self.filename() { - d.field("filename", &filename); - } - } - - if let Some(lineno) = self.lineno() { - d.field("lineno", &lineno); - } - d.finish() - } -} - -cfg_if::cfg_if! { - if #[cfg(feature = "cpp_demangle")] { - // Maybe a parsed C++ symbol, if parsing the mangled symbol as Rust - // failed. - struct OptionCppSymbol<'a>(Option<::cpp_demangle::BorrowedSymbol<'a>>); - - impl<'a> OptionCppSymbol<'a> { - fn parse(input: &'a [u8]) -> OptionCppSymbol<'a> { - OptionCppSymbol(::cpp_demangle::BorrowedSymbol::new(input).ok()) - } - - fn none() -> OptionCppSymbol<'a> { - OptionCppSymbol(None) - } - } - } else { - use core::marker::PhantomData; - - // Make sure to keep this zero-sized, so that the `cpp_demangle` feature - // has no cost when disabled. - struct OptionCppSymbol<'a>(PhantomData<&'a ()>); - - impl<'a> OptionCppSymbol<'a> { - fn parse(_: &'a [u8]) -> OptionCppSymbol<'a> { - OptionCppSymbol(PhantomData) - } - - fn none() -> OptionCppSymbol<'a> { - OptionCppSymbol(PhantomData) - } - } - } -} - -/// A wrapper around a symbol name to provide ergonomic accessors to the -/// demangled name, the raw bytes, the raw string, etc. -// Allow dead code for when the `cpp_demangle` feature is not enabled. -#[allow(dead_code)] -pub struct SymbolName<'a> { - bytes: &'a [u8], - demangled: Option>, - cpp_demangled: OptionCppSymbol<'a>, -} - -impl<'a> SymbolName<'a> { - /// Creates a new symbol name from the raw underlying bytes. - pub fn new(bytes: &'a [u8]) -> SymbolName<'a> { - let str_bytes = str::from_utf8(bytes).ok(); - let demangled = str_bytes.and_then(|s| try_demangle(s).ok()); - - let cpp = if demangled.is_none() { - OptionCppSymbol::parse(bytes) - } else { - OptionCppSymbol::none() - }; - - SymbolName { - bytes: bytes, - demangled: demangled, - cpp_demangled: cpp, - } - } - - /// Returns the raw (mangled) symbol name as a `str` if the symbol is valid utf-8. - /// - /// Use the `Display` implementation if you want the demangled version. - pub fn as_str(&self) -> Option<&'a str> { - self.demangled - .as_ref() - .map(|s| s.as_str()) - .or_else(|| str::from_utf8(self.bytes).ok()) - } - - /// Returns the raw symbol name as a list of bytes - pub fn as_bytes(&self) -> &'a [u8] { - self.bytes - } -} - -fn format_symbol_name( - fmt: fn(&str, &mut fmt::Formatter<'_>) -> fmt::Result, - mut bytes: &[u8], - f: &mut fmt::Formatter<'_>, -) -> fmt::Result { - while bytes.len() > 0 { - match str::from_utf8(bytes) { - Ok(name) => { - fmt(name, f)?; - break; - } - Err(err) => { - fmt("\u{FFFD}", f)?; - - match err.error_len() { - Some(len) => bytes = &bytes[err.valid_up_to() + len..], - None => break, - } - } - } - } - Ok(()) -} - -cfg_if::cfg_if! { - if #[cfg(feature = "cpp_demangle")] { - impl<'a> fmt::Display for SymbolName<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(ref s) = self.demangled { - s.fmt(f) - } else if let Some(ref cpp) = self.cpp_demangled.0 { - cpp.fmt(f) - } else { - format_symbol_name(fmt::Display::fmt, self.bytes, f) - } - } - } - } else { - impl<'a> fmt::Display for SymbolName<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(ref s) = self.demangled { - s.fmt(f) - } else { - format_symbol_name(fmt::Display::fmt, self.bytes, f) - } - } - } - } -} - -cfg_if::cfg_if! { - if #[cfg(all(feature = "std", feature = "cpp_demangle"))] { - impl<'a> fmt::Debug for SymbolName<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use std::fmt::Write; - - if let Some(ref s) = self.demangled { - return s.fmt(f) - } - - // This may to print if the demangled symbol isn't actually - // valid, so handle the error here gracefully by not propagating - // it outwards. - if let Some(ref cpp) = self.cpp_demangled.0 { - let mut s = String::new(); - if write!(s, "{}", cpp).is_ok() { - return s.fmt(f) - } - } - - format_symbol_name(fmt::Debug::fmt, self.bytes, f) - } - } - } else { - impl<'a> fmt::Debug for SymbolName<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(ref s) = self.demangled { - s.fmt(f) - } else { - format_symbol_name(fmt::Debug::fmt, self.bytes, f) - } - } - } - } -} - -/// Attempt to reclaim that cached memory used to symbolicate addresses. -/// -/// This method will attempt to release any global data structures that have -/// otherwise been cached globally or in the thread which typically represent -/// parsed DWARF information or similar. -/// -/// # Caveats -/// -/// While this function is always available it doesn't actually do anything on -/// most implementations. Libraries like dbghelp or libbacktrace do not provide -/// facilities to deallocate state and manage the allocated memory. For now the -/// `gimli-symbolize` feature of this crate is the only feature where this -/// function has any effect. -#[cfg(feature = "std")] -pub fn clear_symbol_cache() { - let _guard = crate::lock::lock(); - unsafe { - imp::clear_symbol_cache(); - } -} - -cfg_if::cfg_if! { - if #[cfg(miri)] { - mod miri; - use miri as imp; - } else if #[cfg(all(windows, target_env = "msvc", not(target_vendor = "uwp")))] { - mod dbghelp; - use dbghelp as imp; - } else if #[cfg(all( - any(unix, all(windows, target_env = "gnu")), - not(target_vendor = "uwp"), - not(target_os = "emscripten"), - any(not(backtrace_in_libstd), feature = "backtrace"), - ))] { - mod gimli; - use gimli as imp; - } else { - mod noop; - use noop as imp; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/noop.rs s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/noop.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/symbolize/noop.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/symbolize/noop.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -//! Empty symbolication strategy used to compile for platforms that have no -//! support. - -use super::{BytesOrWideString, ResolveWhat, SymbolName}; -use core::ffi::c_void; -use core::marker; - -pub unsafe fn resolve(_addr: ResolveWhat<'_>, _cb: &mut dyn FnMut(&super::Symbol)) {} - -pub struct Symbol<'a> { - _marker: marker::PhantomData<&'a i32>, -} - -impl Symbol<'_> { - pub fn name(&self) -> Option> { - None - } - - pub fn addr(&self) -> Option<*mut c_void> { - None - } - - pub fn filename_raw(&self) -> Option> { - None - } - - #[cfg(feature = "std")] - pub fn filename(&self) -> Option<&::std::path::Path> { - None - } - - pub fn lineno(&self) -> Option { - None - } - - pub fn colno(&self) -> Option { - None - } -} - -pub unsafe fn clear_symbol_cache() {} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/types.rs s390-tools-2.33.1/rust-vendor/backtrace/src/types.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/types.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/types.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,83 +0,0 @@ -//! Platform dependent types. - -cfg_if::cfg_if! { - if #[cfg(feature = "std")] { - use std::borrow::Cow; - use std::fmt; - use std::path::PathBuf; - use std::prelude::v1::*; - use std::str; - } -} - -/// A platform independent representation of a string. When working with `std` -/// enabled it is recommended to the convenience methods for providing -/// conversions to `std` types. -#[derive(Debug)] -pub enum BytesOrWideString<'a> { - /// A slice, typically provided on Unix platforms. - Bytes(&'a [u8]), - /// Wide strings typically from Windows. - Wide(&'a [u16]), -} - -#[cfg(feature = "std")] -impl<'a> BytesOrWideString<'a> { - /// Lossy converts to a `Cow`, will allocate if `Bytes` is not valid - /// UTF-8 or if `BytesOrWideString` is `Wide`. - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn to_str_lossy(&self) -> Cow<'a, str> { - use self::BytesOrWideString::*; - - match self { - &Bytes(slice) => String::from_utf8_lossy(slice), - &Wide(wide) => Cow::Owned(String::from_utf16_lossy(wide)), - } - } - - /// Provides a `Path` representation of `BytesOrWideString`. - /// - /// # Required features - /// - /// This function requires the `std` feature of the `backtrace` crate to be - /// enabled, and the `std` feature is enabled by default. - pub fn into_path_buf(self) -> PathBuf { - #[cfg(unix)] - { - use std::ffi::OsStr; - use std::os::unix::ffi::OsStrExt; - - if let BytesOrWideString::Bytes(slice) = self { - return PathBuf::from(OsStr::from_bytes(slice)); - } - } - - #[cfg(windows)] - { - use std::ffi::OsString; - use std::os::windows::ffi::OsStringExt; - - if let BytesOrWideString::Wide(slice) = self { - return PathBuf::from(OsString::from_wide(slice)); - } - } - - if let BytesOrWideString::Bytes(b) = self { - if let Ok(s) = str::from_utf8(b) { - return PathBuf::from(s); - } - } - unreachable!() - } -} - -#[cfg(feature = "std")] -impl<'a> fmt::Display for BytesOrWideString<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.to_str_lossy().fmt(f) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/backtrace/src/windows.rs s390-tools-2.33.1/rust-vendor/backtrace/src/windows.rs --- s390-tools-2.31.0/rust-vendor/backtrace/src/windows.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/backtrace/src/windows.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,693 +0,0 @@ -//! A module to define the FFI definitions we use on Windows for `dbghelp.dll` -//! -//! This module uses a custom macro, `ffi!`, to wrap all definitions to -//! automatically generate tests to assert that our definitions here are the -//! same as `winapi`. -//! -//! This module largely exists to integrate into libstd itself where winapi is -//! not currently available. - -#![allow(bad_style, dead_code)] - -cfg_if::cfg_if! { - if #[cfg(feature = "verify-winapi")] { - pub use self::winapi::c_void; - pub use self::winapi::HINSTANCE; - pub use self::winapi::FARPROC; - pub use self::winapi::LPSECURITY_ATTRIBUTES; - #[cfg(target_pointer_width = "64")] - pub use self::winapi::PUNWIND_HISTORY_TABLE; - #[cfg(target_pointer_width = "64")] - pub use self::winapi::PRUNTIME_FUNCTION; - - mod winapi { - pub use winapi::ctypes::*; - pub use winapi::shared::basetsd::*; - pub use winapi::shared::minwindef::*; - pub use winapi::um::dbghelp::*; - pub use winapi::um::fileapi::*; - pub use winapi::um::handleapi::*; - pub use winapi::um::libloaderapi::*; - pub use winapi::um::memoryapi::*; - pub use winapi::um::minwinbase::*; - pub use winapi::um::processthreadsapi::*; - pub use winapi::um::synchapi::*; - pub use winapi::um::tlhelp32::*; - pub use winapi::um::winbase::*; - pub use winapi::um::winnt::*; - } - } else { - pub use core::ffi::c_void; - pub type HINSTANCE = *mut c_void; - pub type FARPROC = *mut c_void; - pub type LPSECURITY_ATTRIBUTES = *mut c_void; - #[cfg(target_pointer_width = "64")] - pub type PRUNTIME_FUNCTION = *mut c_void; - #[cfg(target_pointer_width = "64")] - pub type PUNWIND_HISTORY_TABLE = *mut c_void; - } -} - -macro_rules! ffi { - () => (); - - (#[repr($($r:tt)*)] pub struct $name:ident { $(pub $field:ident: $ty:ty,)* } $($rest:tt)*) => ( - #[repr($($r)*)] - #[cfg(not(feature = "verify-winapi"))] - #[derive(Copy, Clone)] - pub struct $name { - $(pub $field: $ty,)* - } - - #[cfg(feature = "verify-winapi")] - pub use self::winapi::$name; - - #[test] - #[cfg(feature = "verify-winapi")] - fn $name() { - use core::mem; - - #[repr($($r)*)] - pub struct $name { - $(pub $field: $ty,)* - } - - assert_eq!( - mem::size_of::<$name>(), - mem::size_of::(), - concat!("size of ", stringify!($name), " is wrong"), - ); - assert_eq!( - mem::align_of::<$name>(), - mem::align_of::(), - concat!("align of ", stringify!($name), " is wrong"), - ); - - type Winapi = winapi::$name; - - fn assert_same(_: T, _: T) {} - - unsafe { - let a = &*(mem::align_of::<$name>() as *const $name); - let b = &*(mem::align_of::() as *const Winapi); - - $( - ffi!(@test_fields a b $field $ty); - )* - } - } - - ffi!($($rest)*); - ); - - // Handling verification against unions in winapi requires some special care - (@test_fields $a:ident $b:ident FltSave $ty:ty) => ( - // Skip this field on x86_64 `CONTEXT` since it's a union and a bit funny - ); - (@test_fields $a:ident $b:ident D $ty:ty) => ({ - let a = &$a.D; - let b = $b.D(); - assert_same(a, b); - assert_eq!(a as *const $ty, b as *const $ty, "misplaced field D"); - }); - (@test_fields $a:ident $b:ident s $ty:ty) => ({ - let a = &$a.s; - let b = $b.s(); - assert_same(a, b); - assert_eq!(a as *const $ty, b as *const $ty, "misplaced field s"); - }); - - // Otherwise test all fields normally. - (@test_fields $a:ident $b:ident $field:ident $ty:ty) => ({ - let a = &$a.$field; - let b = &$b.$field; - assert_same(a, b); - assert_eq!(a as *const $ty, b as *const $ty, - concat!("misplaced field ", stringify!($field))); - }); - - (pub type $name:ident = $ty:ty; $($rest:tt)*) => ( - pub type $name = $ty; - - #[cfg(feature = "verify-winapi")] - #[allow(dead_code)] - const $name: () = { - fn _foo() { - trait SameType {} - impl SameType for (T, T) {} - fn assert_same() {} - - assert_same::<($name, winapi::$name)>(); - } - }; - - ffi!($($rest)*); - ); - - (pub const $name:ident: $ty:ty = $val:expr; $($rest:tt)*) => ( - pub const $name: $ty = $val; - - #[cfg(feature = "verify-winapi")] - #[allow(unused_imports)] - mod $name { - use super::*; - #[test] - fn assert_valid() { - let x: $ty = winapi::$name; - assert_eq!(x, $val); - } - } - - - ffi!($($rest)*); - ); - - ($(#[$meta:meta])* extern "system" { $(pub fn $name:ident($($args:tt)*) -> $ret:ty;)* } $($rest:tt)*) => ( - $(#[$meta])* extern "system" { - $(pub fn $name($($args)*) -> $ret;)* - } - - $( - #[cfg(feature = "verify-winapi")] - mod $name { - #[test] - fn assert_same() { - use super::*; - - assert_eq!($name as usize, winapi::$name as usize); - let mut x: unsafe extern "system" fn($($args)*) -> $ret; - x = $name; - let _ = x; - x = winapi::$name; - let _ = x; - } - } - )* - - ffi!($($rest)*); - ); - - (impl $name:ident { $($i:tt)* } $($rest:tt)*) => ( - #[cfg(not(feature = "verify-winapi"))] - impl $name { - $($i)* - } - - ffi!($($rest)*); - ); -} - -ffi! { - #[repr(C)] - pub struct STACKFRAME64 { - pub AddrPC: ADDRESS64, - pub AddrReturn: ADDRESS64, - pub AddrFrame: ADDRESS64, - pub AddrStack: ADDRESS64, - pub AddrBStore: ADDRESS64, - pub FuncTableEntry: PVOID, - pub Params: [DWORD64; 4], - pub Far: BOOL, - pub Virtual: BOOL, - pub Reserved: [DWORD64; 3], - pub KdHelp: KDHELP64, - } - - pub type LPSTACKFRAME64 = *mut STACKFRAME64; - - #[repr(C)] - pub struct STACKFRAME_EX { - pub AddrPC: ADDRESS64, - pub AddrReturn: ADDRESS64, - pub AddrFrame: ADDRESS64, - pub AddrStack: ADDRESS64, - pub AddrBStore: ADDRESS64, - pub FuncTableEntry: PVOID, - pub Params: [DWORD64; 4], - pub Far: BOOL, - pub Virtual: BOOL, - pub Reserved: [DWORD64; 3], - pub KdHelp: KDHELP64, - pub StackFrameSize: DWORD, - pub InlineFrameContext: DWORD, - } - - pub type LPSTACKFRAME_EX = *mut STACKFRAME_EX; - - #[repr(C)] - pub struct IMAGEHLP_LINEW64 { - pub SizeOfStruct: DWORD, - pub Key: PVOID, - pub LineNumber: DWORD, - pub FileName: PWSTR, - pub Address: DWORD64, - } - - pub type PIMAGEHLP_LINEW64 = *mut IMAGEHLP_LINEW64; - - #[repr(C)] - pub struct SYMBOL_INFOW { - pub SizeOfStruct: ULONG, - pub TypeIndex: ULONG, - pub Reserved: [ULONG64; 2], - pub Index: ULONG, - pub Size: ULONG, - pub ModBase: ULONG64, - pub Flags: ULONG, - pub Value: ULONG64, - pub Address: ULONG64, - pub Register: ULONG, - pub Scope: ULONG, - pub Tag: ULONG, - pub NameLen: ULONG, - pub MaxNameLen: ULONG, - pub Name: [WCHAR; 1], - } - - pub type PSYMBOL_INFOW = *mut SYMBOL_INFOW; - - pub type PTRANSLATE_ADDRESS_ROUTINE64 = Option< - unsafe extern "system" fn(hProcess: HANDLE, hThread: HANDLE, lpaddr: LPADDRESS64) -> DWORD64, - >; - pub type PGET_MODULE_BASE_ROUTINE64 = - Option DWORD64>; - pub type PFUNCTION_TABLE_ACCESS_ROUTINE64 = - Option PVOID>; - pub type PREAD_PROCESS_MEMORY_ROUTINE64 = Option< - unsafe extern "system" fn( - hProcess: HANDLE, - qwBaseAddress: DWORD64, - lpBuffer: PVOID, - nSize: DWORD, - lpNumberOfBytesRead: LPDWORD, - ) -> BOOL, - >; - - #[repr(C)] - pub struct ADDRESS64 { - pub Offset: DWORD64, - pub Segment: WORD, - pub Mode: ADDRESS_MODE, - } - - pub type LPADDRESS64 = *mut ADDRESS64; - - pub type ADDRESS_MODE = u32; - - #[repr(C)] - pub struct KDHELP64 { - pub Thread: DWORD64, - pub ThCallbackStack: DWORD, - pub ThCallbackBStore: DWORD, - pub NextCallback: DWORD, - pub FramePointer: DWORD, - pub KiCallUserMode: DWORD64, - pub KeUserCallbackDispatcher: DWORD64, - pub SystemRangeStart: DWORD64, - pub KiUserExceptionDispatcher: DWORD64, - pub StackBase: DWORD64, - pub StackLimit: DWORD64, - pub BuildVersion: DWORD, - pub Reserved0: DWORD, - pub Reserved1: [DWORD64; 4], - } - - #[repr(C)] - pub struct MODULEENTRY32W { - pub dwSize: DWORD, - pub th32ModuleID: DWORD, - pub th32ProcessID: DWORD, - pub GlblcntUsage: DWORD, - pub ProccntUsage: DWORD, - pub modBaseAddr: *mut u8, - pub modBaseSize: DWORD, - pub hModule: HMODULE, - pub szModule: [WCHAR; MAX_MODULE_NAME32 + 1], - pub szExePath: [WCHAR; MAX_PATH], - } - - pub const MAX_SYM_NAME: usize = 2000; - pub const AddrModeFlat: ADDRESS_MODE = 3; - pub const TRUE: BOOL = 1; - pub const FALSE: BOOL = 0; - pub const PROCESS_QUERY_INFORMATION: DWORD = 0x400; - pub const IMAGE_FILE_MACHINE_ARM64: u16 = 43620; - pub const IMAGE_FILE_MACHINE_AMD64: u16 = 34404; - pub const IMAGE_FILE_MACHINE_I386: u16 = 332; - pub const IMAGE_FILE_MACHINE_ARMNT: u16 = 452; - pub const FILE_SHARE_READ: DWORD = 0x1; - pub const FILE_SHARE_WRITE: DWORD = 0x2; - pub const OPEN_EXISTING: DWORD = 0x3; - pub const GENERIC_READ: DWORD = 0x80000000; - pub const INFINITE: DWORD = !0; - pub const PAGE_READONLY: DWORD = 2; - pub const FILE_MAP_READ: DWORD = 4; - pub const TH32CS_SNAPMODULE: DWORD = 0x00000008; - pub const INVALID_HANDLE_VALUE: HANDLE = -1isize as HANDLE; - pub const MAX_MODULE_NAME32: usize = 255; - pub const MAX_PATH: usize = 260; - - pub type DWORD = u32; - pub type PDWORD = *mut u32; - pub type BOOL = i32; - pub type DWORD64 = u64; - pub type PDWORD64 = *mut u64; - pub type HANDLE = *mut c_void; - pub type PVOID = HANDLE; - pub type PCWSTR = *const u16; - pub type LPSTR = *mut i8; - pub type LPCSTR = *const i8; - pub type PWSTR = *mut u16; - pub type WORD = u16; - pub type ULONG = u32; - pub type ULONG64 = u64; - pub type WCHAR = u16; - pub type PCONTEXT = *mut CONTEXT; - pub type LPDWORD = *mut DWORD; - pub type DWORDLONG = u64; - pub type HMODULE = HINSTANCE; - pub type SIZE_T = usize; - pub type LPVOID = *mut c_void; - pub type LPCVOID = *const c_void; - pub type LPMODULEENTRY32W = *mut MODULEENTRY32W; - - #[link(name = "kernel32")] - extern "system" { - pub fn GetCurrentProcess() -> HANDLE; - pub fn GetCurrentThread() -> HANDLE; - pub fn RtlCaptureContext(ContextRecord: PCONTEXT) -> (); - pub fn LoadLibraryA(a: *const i8) -> HMODULE; - pub fn GetProcAddress(h: HMODULE, name: *const i8) -> FARPROC; - pub fn GetModuleHandleA(name: *const i8) -> HMODULE; - pub fn OpenProcess( - dwDesiredAccess: DWORD, - bInheitHandle: BOOL, - dwProcessId: DWORD, - ) -> HANDLE; - pub fn GetCurrentProcessId() -> DWORD; - pub fn CloseHandle(h: HANDLE) -> BOOL; - pub fn CreateFileA( - lpFileName: LPCSTR, - dwDesiredAccess: DWORD, - dwShareMode: DWORD, - lpSecurityAttributes: LPSECURITY_ATTRIBUTES, - dwCreationDisposition: DWORD, - dwFlagsAndAttributes: DWORD, - hTemplateFile: HANDLE, - ) -> HANDLE; - pub fn CreateMutexA( - attrs: LPSECURITY_ATTRIBUTES, - initial: BOOL, - name: LPCSTR, - ) -> HANDLE; - pub fn ReleaseMutex(hMutex: HANDLE) -> BOOL; - pub fn WaitForSingleObjectEx( - hHandle: HANDLE, - dwMilliseconds: DWORD, - bAlertable: BOOL, - ) -> DWORD; - pub fn CreateFileMappingA( - hFile: HANDLE, - lpFileMappingAttributes: LPSECURITY_ATTRIBUTES, - flProtect: DWORD, - dwMaximumSizeHigh: DWORD, - dwMaximumSizeLow: DWORD, - lpName: LPCSTR, - ) -> HANDLE; - pub fn MapViewOfFile( - hFileMappingObject: HANDLE, - dwDesiredAccess: DWORD, - dwFileOffsetHigh: DWORD, - dwFileOffsetLow: DWORD, - dwNumberOfBytesToMap: SIZE_T, - ) -> LPVOID; - pub fn UnmapViewOfFile(lpBaseAddress: LPCVOID) -> BOOL; - pub fn CreateToolhelp32Snapshot( - dwFlags: DWORD, - th32ProcessID: DWORD, - ) -> HANDLE; - pub fn Module32FirstW( - hSnapshot: HANDLE, - lpme: LPMODULEENTRY32W, - ) -> BOOL; - pub fn Module32NextW( - hSnapshot: HANDLE, - lpme: LPMODULEENTRY32W, - ) -> BOOL; - } -} - -#[cfg(target_pointer_width = "64")] -ffi! { - #[link(name = "kernel32")] - extern "system" { - pub fn RtlLookupFunctionEntry( - ControlPc: DWORD64, - ImageBase: PDWORD64, - HistoryTable: PUNWIND_HISTORY_TABLE, - ) -> PRUNTIME_FUNCTION; - } -} - -#[cfg(target_arch = "aarch64")] -ffi! { - #[repr(C, align(16))] - pub struct CONTEXT { - pub ContextFlags: DWORD, - pub Cpsr: DWORD, - pub u: CONTEXT_u, - pub Sp: u64, - pub Pc: u64, - pub V: [ARM64_NT_NEON128; 32], - pub Fpcr: DWORD, - pub Fpsr: DWORD, - pub Bcr: [DWORD; ARM64_MAX_BREAKPOINTS], - pub Bvr: [DWORD64; ARM64_MAX_BREAKPOINTS], - pub Wcr: [DWORD; ARM64_MAX_WATCHPOINTS], - pub Wvr: [DWORD64; ARM64_MAX_WATCHPOINTS], - } - - #[repr(C)] - pub struct CONTEXT_u { - pub s: CONTEXT_u_s, - } - - impl CONTEXT_u { - pub unsafe fn s(&self) -> &CONTEXT_u_s { - &self.s - } - } - - #[repr(C)] - pub struct CONTEXT_u_s { - pub X0: u64, - pub X1: u64, - pub X2: u64, - pub X3: u64, - pub X4: u64, - pub X5: u64, - pub X6: u64, - pub X7: u64, - pub X8: u64, - pub X9: u64, - pub X10: u64, - pub X11: u64, - pub X12: u64, - pub X13: u64, - pub X14: u64, - pub X15: u64, - pub X16: u64, - pub X17: u64, - pub X18: u64, - pub X19: u64, - pub X20: u64, - pub X21: u64, - pub X22: u64, - pub X23: u64, - pub X24: u64, - pub X25: u64, - pub X26: u64, - pub X27: u64, - pub X28: u64, - pub Fp: u64, - pub Lr: u64, - } - - pub const ARM64_MAX_BREAKPOINTS: usize = 8; - pub const ARM64_MAX_WATCHPOINTS: usize = 2; - - #[repr(C)] - pub struct ARM64_NT_NEON128 { - pub D: [f64; 2], - } -} - -#[cfg(target_arch = "x86")] -ffi! { - #[repr(C)] - pub struct CONTEXT { - pub ContextFlags: DWORD, - pub Dr0: DWORD, - pub Dr1: DWORD, - pub Dr2: DWORD, - pub Dr3: DWORD, - pub Dr6: DWORD, - pub Dr7: DWORD, - pub FloatSave: FLOATING_SAVE_AREA, - pub SegGs: DWORD, - pub SegFs: DWORD, - pub SegEs: DWORD, - pub SegDs: DWORD, - pub Edi: DWORD, - pub Esi: DWORD, - pub Ebx: DWORD, - pub Edx: DWORD, - pub Ecx: DWORD, - pub Eax: DWORD, - pub Ebp: DWORD, - pub Eip: DWORD, - pub SegCs: DWORD, - pub EFlags: DWORD, - pub Esp: DWORD, - pub SegSs: DWORD, - pub ExtendedRegisters: [u8; 512], - } - - #[repr(C)] - pub struct FLOATING_SAVE_AREA { - pub ControlWord: DWORD, - pub StatusWord: DWORD, - pub TagWord: DWORD, - pub ErrorOffset: DWORD, - pub ErrorSelector: DWORD, - pub DataOffset: DWORD, - pub DataSelector: DWORD, - pub RegisterArea: [u8; 80], - pub Spare0: DWORD, - } -} - -#[cfg(target_arch = "x86_64")] -ffi! { - #[repr(C, align(8))] - pub struct CONTEXT { - pub P1Home: DWORDLONG, - pub P2Home: DWORDLONG, - pub P3Home: DWORDLONG, - pub P4Home: DWORDLONG, - pub P5Home: DWORDLONG, - pub P6Home: DWORDLONG, - - pub ContextFlags: DWORD, - pub MxCsr: DWORD, - - pub SegCs: WORD, - pub SegDs: WORD, - pub SegEs: WORD, - pub SegFs: WORD, - pub SegGs: WORD, - pub SegSs: WORD, - pub EFlags: DWORD, - - pub Dr0: DWORDLONG, - pub Dr1: DWORDLONG, - pub Dr2: DWORDLONG, - pub Dr3: DWORDLONG, - pub Dr6: DWORDLONG, - pub Dr7: DWORDLONG, - - pub Rax: DWORDLONG, - pub Rcx: DWORDLONG, - pub Rdx: DWORDLONG, - pub Rbx: DWORDLONG, - pub Rsp: DWORDLONG, - pub Rbp: DWORDLONG, - pub Rsi: DWORDLONG, - pub Rdi: DWORDLONG, - pub R8: DWORDLONG, - pub R9: DWORDLONG, - pub R10: DWORDLONG, - pub R11: DWORDLONG, - pub R12: DWORDLONG, - pub R13: DWORDLONG, - pub R14: DWORDLONG, - pub R15: DWORDLONG, - - pub Rip: DWORDLONG, - - pub FltSave: FLOATING_SAVE_AREA, - - pub VectorRegister: [M128A; 26], - pub VectorControl: DWORDLONG, - - pub DebugControl: DWORDLONG, - pub LastBranchToRip: DWORDLONG, - pub LastBranchFromRip: DWORDLONG, - pub LastExceptionToRip: DWORDLONG, - pub LastExceptionFromRip: DWORDLONG, - } - - #[repr(C)] - pub struct M128A { - pub Low: u64, - pub High: i64, - } -} - -#[repr(C)] -#[cfg(target_arch = "x86_64")] -#[derive(Copy, Clone)] -pub struct FLOATING_SAVE_AREA { - _Dummy: [u8; 512], -} - -#[cfg(target_arch = "arm")] -ffi! { - // #[repr(C)] - // pub struct NEON128 { - // pub Low: ULONG64, - // pub High: LONG64, - // } - - // pub type PNEON128 = *mut NEON128; - - #[repr(C)] - pub struct CONTEXT_u { - // pub Q: [NEON128; 16], - pub D: [ULONG64; 32], - // pub S: [DWORD; 32], - } - - pub const ARM_MAX_BREAKPOINTS: usize = 8; - pub const ARM_MAX_WATCHPOINTS: usize = 1; - - #[repr(C)] - pub struct CONTEXT { - pub ContextFlags: DWORD, - pub R0: DWORD, - pub R1: DWORD, - pub R2: DWORD, - pub R3: DWORD, - pub R4: DWORD, - pub R5: DWORD, - pub R6: DWORD, - pub R7: DWORD, - pub R8: DWORD, - pub R9: DWORD, - pub R10: DWORD, - pub R11: DWORD, - pub R12: DWORD, - pub Sp: DWORD, - pub Lr: DWORD, - pub Pc: DWORD, - pub Cpsr: DWORD, - pub Fpsrc: DWORD, - pub Padding: DWORD, - pub u: CONTEXT_u, - pub Bvr: [DWORD; ARM_MAX_BREAKPOINTS], - pub Bcr: [DWORD; ARM_MAX_BREAKPOINTS], - pub Wvr: [DWORD; ARM_MAX_WATCHPOINTS], - pub Wcr: [DWORD; ARM_MAX_WATCHPOINTS], - pub Padding2: [DWORD; 2], - } -} // IFDEF(arm) diff -Nru s390-tools-2.31.0/rust-vendor/bytes/benches/buf.rs s390-tools-2.33.1/rust-vendor/bytes/benches/buf.rs --- s390-tools-2.31.0/rust-vendor/bytes/benches/buf.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/benches/buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,186 +0,0 @@ -#![feature(test)] -#![warn(rust_2018_idioms)] - -extern crate test; - -use bytes::Buf; -use test::Bencher; - -/// Dummy Buf implementation -struct TestBuf { - buf: &'static [u8], - readlens: &'static [usize], - init_pos: usize, - pos: usize, - readlen_pos: usize, - readlen: usize, -} -impl TestBuf { - fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBuf { - let mut buf = TestBuf { - buf, - readlens, - init_pos, - pos: 0, - readlen_pos: 0, - readlen: 0, - }; - buf.reset(); - buf - } - fn reset(&mut self) { - self.pos = self.init_pos; - self.readlen_pos = 0; - self.next_readlen(); - } - /// Compute the length of the next read : - /// - use the next value specified in readlens (capped by remaining) if any - /// - else the remaining - fn next_readlen(&mut self) { - self.readlen = self.buf.len() - self.pos; - if let Some(readlen) = self.readlens.get(self.readlen_pos) { - self.readlen = std::cmp::min(self.readlen, *readlen); - self.readlen_pos += 1; - } - } -} -impl Buf for TestBuf { - fn remaining(&self) -> usize { - self.buf.len() - self.pos - } - fn advance(&mut self, cnt: usize) { - self.pos += cnt; - assert!(self.pos <= self.buf.len()); - self.next_readlen(); - } - fn chunk(&self) -> &[u8] { - if self.readlen == 0 { - Default::default() - } else { - &self.buf[self.pos..self.pos + self.readlen] - } - } -} - -/// Dummy Buf implementation -/// version with methods forced to not be inlined (to simulate costly calls) -struct TestBufC { - inner: TestBuf, -} -impl TestBufC { - fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBufC { - TestBufC { - inner: TestBuf::new(buf, readlens, init_pos), - } - } - fn reset(&mut self) { - self.inner.reset() - } -} -impl Buf for TestBufC { - #[inline(never)] - fn remaining(&self) -> usize { - self.inner.remaining() - } - #[inline(never)] - fn advance(&mut self, cnt: usize) { - self.inner.advance(cnt) - } - #[inline(never)] - fn chunk(&self) -> &[u8] { - self.inner.chunk() - } -} - -macro_rules! bench { - ($fname:ident, testbuf $testbuf:ident $readlens:expr, $method:ident $(,$arg:expr)*) => ( - #[bench] - fn $fname(b: &mut Bencher) { - let mut bufs = [ - $testbuf::new(&[1u8; 8+0], $readlens, 0), - $testbuf::new(&[1u8; 8+1], $readlens, 1), - $testbuf::new(&[1u8; 8+2], $readlens, 2), - $testbuf::new(&[1u8; 8+3], $readlens, 3), - $testbuf::new(&[1u8; 8+4], $readlens, 4), - $testbuf::new(&[1u8; 8+5], $readlens, 5), - $testbuf::new(&[1u8; 8+6], $readlens, 6), - $testbuf::new(&[1u8; 8+7], $readlens, 7), - ]; - b.iter(|| { - for i in 0..8 { - bufs[i].reset(); - let buf: &mut dyn Buf = &mut bufs[i]; // type erasure - test::black_box(buf.$method($($arg,)*)); - } - }) - } - ); - ($fname:ident, slice, $method:ident $(,$arg:expr)*) => ( - #[bench] - fn $fname(b: &mut Bencher) { - // buf must be long enough for one read of 8 bytes starting at pos 7 - let arr = [1u8; 8+7]; - b.iter(|| { - for i in 0..8 { - let mut buf = &arr[i..]; - let buf = &mut buf as &mut dyn Buf; // type erasure - test::black_box(buf.$method($($arg,)*)); - } - }) - } - ); - ($fname:ident, option) => ( - #[bench] - fn $fname(b: &mut Bencher) { - let data = [1u8; 1]; - b.iter(|| { - for _ in 0..8 { - let mut buf = Some(data); - let buf = &mut buf as &mut dyn Buf; // type erasure - test::black_box(buf.get_u8()); - } - }) - } - ); -} - -macro_rules! bench_group { - ($method:ident $(,$arg:expr)*) => ( - bench!(slice, slice, $method $(,$arg)*); - bench!(tbuf_1, testbuf TestBuf &[], $method $(,$arg)*); - bench!(tbuf_1_costly, testbuf TestBufC &[], $method $(,$arg)*); - bench!(tbuf_2, testbuf TestBuf &[1], $method $(,$arg)*); - bench!(tbuf_2_costly, testbuf TestBufC &[1], $method $(,$arg)*); - // bench!(tbuf_onebyone, testbuf TestBuf &[1,1,1,1,1,1,1,1], $method $(,$arg)*); - // bench!(tbuf_onebyone_costly, testbuf TestBufC &[1,1,1,1,1,1,1,1], $method $(,$arg)*); - ); -} - -mod get_u8 { - use super::*; - bench_group!(get_u8); -} -mod get_u16 { - use super::*; - bench_group!(get_u16); -} -mod get_u32 { - use super::*; - bench_group!(get_u32); -} -mod get_u64 { - use super::*; - bench_group!(get_u64); -} -mod get_f32 { - use super::*; - bench_group!(get_f32); -} -mod get_f64 { - use super::*; - bench_group!(get_f64); -} -mod get_uint24 { - use super::*; - bench_group!(get_uint, 3); -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/benches/bytes_mut.rs s390-tools-2.33.1/rust-vendor/bytes/benches/bytes_mut.rs --- s390-tools-2.31.0/rust-vendor/bytes/benches/bytes_mut.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/benches/bytes_mut.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,266 +0,0 @@ -#![feature(test)] -#![warn(rust_2018_idioms)] - -extern crate test; - -use bytes::{BufMut, BytesMut}; -use test::Bencher; - -#[bench] -fn alloc_small(b: &mut Bencher) { - b.iter(|| { - for _ in 0..1024 { - test::black_box(BytesMut::with_capacity(12)); - } - }) -} - -#[bench] -fn alloc_mid(b: &mut Bencher) { - b.iter(|| { - test::black_box(BytesMut::with_capacity(128)); - }) -} - -#[bench] -fn alloc_big(b: &mut Bencher) { - b.iter(|| { - test::black_box(BytesMut::with_capacity(4096)); - }) -} - -#[bench] -fn deref_unique(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(4096); - buf.put(&[0u8; 1024][..]); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn deref_unique_unroll(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(4096); - buf.put(&[0u8; 1024][..]); - - b.iter(|| { - for _ in 0..128 { - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn deref_shared(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(4096); - buf.put(&[0u8; 1024][..]); - let _b2 = buf.split_off(1024); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn deref_two(b: &mut Bencher) { - let mut buf1 = BytesMut::with_capacity(8); - buf1.put(&[0u8; 8][..]); - - let mut buf2 = BytesMut::with_capacity(4096); - buf2.put(&[0u8; 1024][..]); - - b.iter(|| { - for _ in 0..512 { - test::black_box(&buf1[..]); - test::black_box(&buf2[..]); - } - }) -} - -#[bench] -fn clone_frozen(b: &mut Bencher) { - let bytes = BytesMut::from(&b"hello world 1234567890 and have a good byte 0987654321"[..]) - .split() - .freeze(); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&bytes.clone()); - } - }) -} - -#[bench] -fn alloc_write_split_to_mid(b: &mut Bencher) { - b.iter(|| { - let mut buf = BytesMut::with_capacity(128); - buf.put_slice(&[0u8; 64]); - test::black_box(buf.split_to(64)); - }) -} - -#[bench] -fn drain_write_drain(b: &mut Bencher) { - let data = [0u8; 128]; - - b.iter(|| { - let mut buf = BytesMut::with_capacity(1024); - let mut parts = Vec::with_capacity(8); - - for _ in 0..8 { - buf.put(&data[..]); - parts.push(buf.split_to(128)); - } - - test::black_box(parts); - }) -} - -#[bench] -fn fmt_write(b: &mut Bencher) { - use std::fmt::Write; - let mut buf = BytesMut::with_capacity(128); - let s = "foo bar baz quux lorem ipsum dolor et"; - - b.bytes = s.len() as u64; - b.iter(|| { - let _ = write!(buf, "{}", s); - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }) -} - -#[bench] -fn bytes_mut_extend(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(256); - let data = [33u8; 32]; - - b.bytes = data.len() as u64 * 4; - b.iter(|| { - for _ in 0..4 { - buf.extend(&data); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -// BufMut for BytesMut vs Vec - -#[bench] -fn put_slice_bytes_mut(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(256); - let data = [33u8; 32]; - - b.bytes = data.len() as u64 * 4; - b.iter(|| { - for _ in 0..4 { - buf.put_slice(&data); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -#[bench] -fn put_u8_bytes_mut(b: &mut Bencher) { - let mut buf = BytesMut::with_capacity(256); - let cnt = 128; - - b.bytes = cnt as u64; - b.iter(|| { - for _ in 0..cnt { - buf.put_u8(b'x'); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -#[bench] -fn put_slice_vec(b: &mut Bencher) { - let mut buf = Vec::::with_capacity(256); - let data = [33u8; 32]; - - b.bytes = data.len() as u64 * 4; - b.iter(|| { - for _ in 0..4 { - buf.put_slice(&data); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -#[bench] -fn put_u8_vec(b: &mut Bencher) { - let mut buf = Vec::::with_capacity(256); - let cnt = 128; - - b.bytes = cnt as u64; - b.iter(|| { - for _ in 0..cnt { - buf.put_u8(b'x'); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -#[bench] -fn put_slice_vec_extend(b: &mut Bencher) { - let mut buf = Vec::::with_capacity(256); - let data = [33u8; 32]; - - b.bytes = data.len() as u64 * 4; - b.iter(|| { - for _ in 0..4 { - buf.extend_from_slice(&data); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} - -#[bench] -fn put_u8_vec_push(b: &mut Bencher) { - let mut buf = Vec::::with_capacity(256); - let cnt = 128; - - b.bytes = cnt as u64; - b.iter(|| { - for _ in 0..cnt { - buf.push(b'x'); - } - test::black_box(&buf); - unsafe { - buf.set_len(0); - } - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/benches/bytes.rs s390-tools-2.33.1/rust-vendor/bytes/benches/bytes.rs --- s390-tools-2.31.0/rust-vendor/bytes/benches/bytes.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/benches/bytes.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,120 +0,0 @@ -#![feature(test)] -#![warn(rust_2018_idioms)] - -extern crate test; - -use bytes::Bytes; -use test::Bencher; - -#[bench] -fn deref_unique(b: &mut Bencher) { - let buf = Bytes::from(vec![0; 1024]); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn deref_shared(b: &mut Bencher) { - let buf = Bytes::from(vec![0; 1024]); - let _b2 = buf.clone(); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn deref_static(b: &mut Bencher) { - let buf = Bytes::from_static(b"hello world"); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&buf[..]); - } - }) -} - -#[bench] -fn clone_static(b: &mut Bencher) { - let bytes = - Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes()); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&bytes.clone()); - } - }) -} - -#[bench] -fn clone_shared(b: &mut Bencher) { - let bytes = Bytes::from(b"hello world 1234567890 and have a good byte 0987654321".to_vec()); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&bytes.clone()); - } - }) -} - -#[bench] -fn clone_arc_vec(b: &mut Bencher) { - use std::sync::Arc; - let bytes = Arc::new(b"hello world 1234567890 and have a good byte 0987654321".to_vec()); - - b.iter(|| { - for _ in 0..1024 { - test::black_box(&bytes.clone()); - } - }) -} - -#[bench] -fn from_long_slice(b: &mut Bencher) { - let data = [0u8; 128]; - b.bytes = data.len() as u64; - b.iter(|| { - let buf = Bytes::copy_from_slice(&data[..]); - test::black_box(buf); - }) -} - -#[bench] -fn slice_empty(b: &mut Bencher) { - b.iter(|| { - // `clone` is to convert to ARC - let b = Bytes::from(vec![17; 1024]).clone(); - for i in 0..1000 { - test::black_box(b.slice(i % 100..i % 100)); - } - }) -} - -#[bench] -fn slice_short_from_arc(b: &mut Bencher) { - b.iter(|| { - // `clone` is to convert to ARC - let b = Bytes::from(vec![17; 1024]).clone(); - for i in 0..1000 { - test::black_box(b.slice(1..2 + i % 10)); - } - }) -} - -#[bench] -fn split_off_and_drop(b: &mut Bencher) { - b.iter(|| { - for _ in 0..1024 { - let v = vec![10; 200]; - let mut b = Bytes::from(v); - test::black_box(b.split_off(100)); - test::black_box(b); - } - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/bytes/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/bytes/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/bytes/Cargo.toml s390-tools-2.33.1/rust-vendor/bytes/Cargo.toml --- s390-tools-2.31.0/rust-vendor/bytes/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,54 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "bytes" -version = "1.5.0" -authors = [ - "Carl Lerche ", - "Sean McArthur ", -] -description = "Types and traits for working with bytes" -readme = "README.md" -keywords = [ - "buffers", - "zero-copy", - "io", -] -categories = [ - "network-programming", - "data-structures", -] -license = "MIT" -repository = "https://github.com/tokio-rs/bytes" - -[package.metadata.docs.rs] -rustdoc-args = [ - "--cfg", - "docsrs", -] - -[dependencies.serde] -version = "1.0.60" -features = ["alloc"] -optional = true -default-features = false - -[dev-dependencies.serde_test] -version = "1.0" - -[features] -default = ["std"] -std = [] - -[target."cfg(loom)".dev-dependencies.loom] -version = "0.5" diff -Nru s390-tools-2.31.0/rust-vendor/bytes/CHANGELOG.md s390-tools-2.33.1/rust-vendor/bytes/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/bytes/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,294 +0,0 @@ -# 1.5.0 (September 7, 2023) - -### Added - -- Add `UninitSlice::{new,init}` (#598, #599) -- Implement `BufMut` for `&mut [MaybeUninit]` (#597) - -### Changed - -- Mark `BytesMut::extend_from_slice` as inline (#595) - -# 1.4.0 (January 31, 2023) - -### Added - -- Make `IntoIter` constructor public (#581) - -### Fixed - -- Avoid large reallocations when freezing `BytesMut` (#592) - -### Documented - -- Document which functions require `std` (#591) -- Fix duplicate "the the" typos (#585) - -# 1.3.0 (November 20, 2022) - -### Added - -- Rename and expose `BytesMut::spare_capacity_mut` (#572) -- Implement native-endian get and put functions for `Buf` and `BufMut` (#576) - -### Fixed - -- Don't have important data in unused capacity when calling reserve (#563) - -### Documented - -- `Bytes::new` etc should return `Self` not `Bytes` (#568) - -# 1.2.1 (July 30, 2022) - -### Fixed - -- Fix unbounded memory growth when using `reserve` (#560) - -# 1.2.0 (July 19, 2022) - -### Added - -- Add `BytesMut::zeroed` (#517) -- Implement `Extend` for `BytesMut` (#527) -- Add conversion from `BytesMut` to `Vec` (#543, #554) -- Add conversion from `Bytes` to `Vec` (#547) -- Add `UninitSlice::as_uninit_slice_mut()` (#548) -- Add const to `Bytes::{len,is_empty}` (#514) - -### Changed - -- Reuse vector in `BytesMut::reserve` (#539, #544) - -### Fixed - -- Make miri happy (#515, #523, #542, #545, #553) -- Make tsan happy (#541) -- Fix `remaining_mut()` on chain (#488) -- Fix amortized asymptotics of `BytesMut` (#555) - -### Documented - -- Redraw layout diagram with box drawing characters (#539) -- Clarify `BytesMut::unsplit` docs (#535) - -# 1.1.0 (August 25, 2021) - -### Added - -- `BufMut::put_bytes(self, val, cnt)` (#487) -- Implement `From>` for `Bytes` (#504) - -### Changed - -- Override `put_slice` for `&mut [u8]` (#483) -- Panic on integer overflow in `Chain::remaining` (#482) -- Add inline tags to `UninitSlice` methods (#443) -- Override `copy_to_bytes` for Chain and Take (#481) -- Keep capacity when unsplit on empty other buf (#502) - -### Documented - -- Clarify `BufMut` allocation guarantees (#501) -- Clarify `BufMut::put_int` behavior (#486) -- Clarify actions of `clear` and `truncate`. (#508) - -# 1.0.1 (January 11, 2021) - -### Changed -- mark `Vec::put_slice` with `#[inline]` (#459) - -### Fixed -- Fix deprecation warning (#457) -- use `Box::into_raw` instead of `mem::forget`-in-disguise (#458) - -# 1.0.0 (December 22, 2020) - -### Changed -- Rename `Buf`/`BufMut` methods `bytes()` and `bytes_mut()` to `chunk()` and `chunk_mut()` (#450) - -### Removed -- remove unused Buf implementation. (#449) - -# 0.6.0 (October 21, 2020) - -API polish in preparation for a 1.0 release. - -### Changed -- `BufMut` is now an `unsafe` trait (#432). -- `BufMut::bytes_mut()` returns `&mut UninitSlice`, a type owned by `bytes` to - avoid undefined behavior (#433). -- `Buf::copy_to_bytes(len)` replaces `Buf::into_bytes()` (#439). -- `Buf`/`BufMut` utility methods are moved onto the trait and `*Ext` traits are - removed (#431). - -### Removed -- `BufMut::bytes_vectored_mut()` (#430). -- `new` methods on combinator types (#434). - -# 0.5.6 (July 13, 2020) - -- Improve `BytesMut` to reuse buffer when fully `advance`d. -- Mark `BytesMut::{as_mut, set_len}` with `#[inline]`. -- Relax synchronization when cloning in shared vtable of `Bytes`. -- Move `loom` to `dev-dependencies`. - -# 0.5.5 (June 18, 2020) - -### Added -- Allow using the `serde` feature in `no_std` environments (#385). - -### Fix -- Fix `BufMut::advance_mut` to panic if advanced passed the capacity (#354).. -- Fix `BytesMut::freeze` ignoring amount previously `advance`d (#352). - -# 0.5.4 (January 23, 2020) - -### Added -- Make `Bytes::new` a `const fn`. -- Add `From` for `Bytes`. - -### Fix -- Fix reversed arguments in `PartialOrd` for `Bytes`. -- Fix `Bytes::truncate` losing original capacity when repr is an unshared `Vec`. -- Fix `Bytes::from(Vec)` when allocator gave `Vec` a pointer with LSB set. -- Fix panic in `Bytes::slice_ref` if argument is an empty slice. - -# 0.5.3 (December 12, 2019) - -### Added -- `must_use` attributes to `split`, `split_off`, and `split_to` methods (#337). - -### Fix -- Potential freeing of a null pointer in `Bytes` when constructed with an empty `Vec` (#341, #342). -- Calling `Bytes::truncate` with a size large than the length will no longer clear the `Bytes` (#333). - -# 0.5.2 (November 27, 2019) - -### Added -- `Limit` methods `into_inner`, `get_ref`, `get_mut`, `limit`, and `set_limit` (#325). - -# 0.5.1 (November 25, 2019) - -### Fix -- Growth documentation for `BytesMut` (#321) - -# 0.5.0 (November 25, 2019) - -### Fix -- Potential overflow in `copy_to_slice` - -### Changed -- Increased minimum supported Rust version to 1.39. -- `Bytes` is now a "trait object", allowing for custom allocation strategies (#298) -- `BytesMut` implicitly grows internal storage. `remaining_mut()` returns - `usize::MAX` (#316). -- `BufMut::bytes_mut` returns `&mut [MaybeUninit]` to reflect the unknown - initialization state (#305). -- `Buf` / `BufMut` implementations for `&[u8]` and `&mut [u8]` - respectively (#261). -- Move `Buf` / `BufMut` "extra" functions to an extension trait (#306). -- `BufMutExt::limit` (#309). -- `Bytes::slice` takes a `RangeBounds` argument (#265). -- `Bytes::from_static` is now a `const fn` (#311). -- A multitude of smaller performance optimizations. - -### Added -- `no_std` support (#281). -- `get_*`, `put_*`, `get_*_le`, and `put_*le` accessors for handling byte order. -- `BorrowMut` implementation for `BytesMut` (#185). - -### Removed -- `IntoBuf` (#288). -- `Buf` implementation for `&str` (#301). -- `byteorder` dependency (#280). -- `iovec` dependency, use `std::IoSlice` instead (#263). -- optional `either` dependency (#315). -- optional `i128` feature -- now available on stable. (#276). - -# 0.4.12 (March 6, 2019) - -### Added -- Implement `FromIterator<&'a u8>` for `BytesMut`/`Bytes` (#244). -- Implement `Buf` for `VecDeque` (#249). - -# 0.4.11 (November 17, 2018) - -* Use raw pointers for potentially racy loads (#233). -* Implement `BufRead` for `buf::Reader` (#232). -* Documentation tweaks (#234). - -# 0.4.10 (September 4, 2018) - -* impl `Buf` and `BufMut` for `Either` (#225). -* Add `Bytes::slice_ref` (#208). - -# 0.4.9 (July 12, 2018) - -* Add 128 bit number support behind a feature flag (#209). -* Implement `IntoBuf` for `&mut [u8]` - -# 0.4.8 (May 25, 2018) - -* Fix panic in `BytesMut` `FromIterator` implementation. -* Bytes: Recycle space when reserving space in vec mode (#197). -* Bytes: Add resize fn (#203). - -# 0.4.7 (April 27, 2018) - -* Make `Buf` and `BufMut` usable as trait objects (#186). -* impl BorrowMut for BytesMut (#185). -* Improve accessor performance (#195). - -# 0.4.6 (Janary 8, 2018) - -* Implement FromIterator for Bytes/BytesMut (#148). -* Add `advance` fn to Bytes/BytesMut (#166). -* Add `unsplit` fn to `BytesMut` (#162, #173). -* Improvements to Bytes split fns (#92). - -# 0.4.5 (August 12, 2017) - -* Fix range bug in `Take::bytes` -* Misc performance improvements -* Add extra `PartialEq` implementations. -* Add `Bytes::with_capacity` -* Implement `AsMut[u8]` for `BytesMut` - -# 0.4.4 (May 26, 2017) - -* Add serde support behind feature flag -* Add `extend_from_slice` on `Bytes` and `BytesMut` -* Add `truncate` and `clear` on `Bytes` -* Misc additional std trait implementations -* Misc performance improvements - -# 0.4.3 (April 30, 2017) - -* Fix Vec::advance_mut bug -* Bump minimum Rust version to 1.15 -* Misc performance tweaks - -# 0.4.2 (April 5, 2017) - -* Misc performance tweaks -* Improved `Debug` implementation for `Bytes` -* Avoid some incorrect assert panics - -# 0.4.1 (March 15, 2017) - -* Expose `buf` module and have most types available from there vs. root. -* Implement `IntoBuf` for `T: Buf`. -* Add `FromBuf` and `Buf::collect`. -* Add iterator adapter for `Buf`. -* Add scatter/gather support to `Buf` and `BufMut`. -* Add `Buf::chain`. -* Reduce allocations on repeated calls to `BytesMut::reserve`. -* Implement `Debug` for more types. -* Remove `Source` in favor of `IntoBuf`. -* Implement `Extend` for `BytesMut`. - - -# 0.4.0 (February 24, 2017) - -* Initial release diff -Nru s390-tools-2.31.0/rust-vendor/bytes/ci/miri.sh s390-tools-2.33.1/rust-vendor/bytes/ci/miri.sh --- s390-tools-2.31.0/rust-vendor/bytes/ci/miri.sh 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/ci/miri.sh 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -#!/bin/bash -set -e - -rustup toolchain install nightly --component miri -rustup override set nightly -cargo miri setup - -export MIRIFLAGS="-Zmiri-strict-provenance" - -cargo miri test -cargo miri test --target mips64-unknown-linux-gnuabi64 diff -Nru s390-tools-2.31.0/rust-vendor/bytes/ci/test-stable.sh s390-tools-2.33.1/rust-vendor/bytes/ci/test-stable.sh --- s390-tools-2.31.0/rust-vendor/bytes/ci/test-stable.sh 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/ci/test-stable.sh 1970-01-01 01:00:00.000000000 +0100 @@ -1,28 +0,0 @@ -#!/bin/bash - -set -ex - -cmd="${1:-test}" - -# Install cargo-hack for feature flag test -host=$(rustc -Vv | grep host | sed 's/host: //') -curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-$host.tar.gz | tar xzf - -C ~/.cargo/bin - -# Run with each feature -# * --each-feature includes both default/no-default features -# * --optional-deps is needed for serde feature -cargo hack "${cmd}" --each-feature --optional-deps -# Run with all features -cargo "${cmd}" --all-features - -cargo doc --no-deps --all-features - -if [[ "${RUST_VERSION}" == "nightly"* ]]; then - # Check benchmarks - cargo check --benches - - # Check minimal versions - cargo clean - cargo update -Zminimal-versions - cargo check --all-features -fi diff -Nru s390-tools-2.31.0/rust-vendor/bytes/ci/tsan.sh s390-tools-2.33.1/rust-vendor/bytes/ci/tsan.sh --- s390-tools-2.31.0/rust-vendor/bytes/ci/tsan.sh 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/ci/tsan.sh 1970-01-01 01:00:00.000000000 +0100 @@ -1,13 +0,0 @@ -#!/bin/bash - -set -ex - -export ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" - -# Run address sanitizer -RUSTFLAGS="-Z sanitizer=address" \ -cargo test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut - -# Run thread sanitizer -RUSTFLAGS="-Z sanitizer=thread" \ -cargo -Zbuild-std test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut diff -Nru s390-tools-2.31.0/rust-vendor/bytes/clippy.toml s390-tools-2.33.1/rust-vendor/bytes/clippy.toml --- s390-tools-2.31.0/rust-vendor/bytes/clippy.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/clippy.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -msrv = "1.39" diff -Nru s390-tools-2.31.0/rust-vendor/bytes/LICENSE s390-tools-2.33.1/rust-vendor/bytes/LICENSE --- s390-tools-2.31.0/rust-vendor/bytes/LICENSE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2018 Carl Lerche - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/bytes/README.md s390-tools-2.33.1/rust-vendor/bytes/README.md --- s390-tools-2.31.0/rust-vendor/bytes/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -# Bytes - -A utility library for working with bytes. - -[![Crates.io][crates-badge]][crates-url] -[![Build Status][ci-badge]][ci-url] - -[crates-badge]: https://img.shields.io/crates/v/bytes.svg -[crates-url]: https://crates.io/crates/bytes -[ci-badge]: https://github.com/tokio-rs/bytes/workflows/CI/badge.svg -[ci-url]: https://github.com/tokio-rs/bytes/actions - -[Documentation](https://docs.rs/bytes) - -## Usage - -To use `bytes`, first add this to your `Cargo.toml`: - -```toml -[dependencies] -bytes = "1" -``` - -Next, add this to your crate: - -```rust -use bytes::{Bytes, BytesMut, Buf, BufMut}; -``` - -## Serde support - -Serde support is optional and disabled by default. To enable use the feature `serde`. - -```toml -[dependencies] -bytes = { version = "1", features = ["serde"] } -``` - -## Building documentation - -When building the `bytes` documentation the `docsrs` option should be used, otherwise -feature gates will not be shown. This requires a nightly toolchain: - -``` -RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc -``` - -## License - -This project is licensed under the [MIT license](LICENSE). - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in `bytes` by you, shall be licensed as MIT, without any additional -terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/buf/buf_impl.rs s390-tools-2.33.1/rust-vendor/bytes/src/buf/buf_impl.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/buf/buf_impl.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/buf/buf_impl.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1394 +0,0 @@ -#[cfg(feature = "std")] -use crate::buf::{reader, Reader}; -use crate::buf::{take, Chain, Take}; - -use core::{cmp, mem, ptr}; - -#[cfg(feature = "std")] -use std::io::IoSlice; - -use alloc::boxed::Box; - -macro_rules! buf_get_impl { - ($this:ident, $typ:tt::$conv:tt) => {{ - const SIZE: usize = mem::size_of::<$typ>(); - // try to convert directly from the bytes - // this Option trick is to avoid keeping a borrow on self - // when advance() is called (mut borrow) and to call bytes() only once - let ret = $this - .chunk() - .get(..SIZE) - .map(|src| unsafe { $typ::$conv(*(src as *const _ as *const [_; SIZE])) }); - - if let Some(ret) = ret { - // if the direct conversion was possible, advance and return - $this.advance(SIZE); - return ret; - } else { - // if not we copy the bytes in a temp buffer then convert - let mut buf = [0; SIZE]; - $this.copy_to_slice(&mut buf); // (do the advance) - return $typ::$conv(buf); - } - }}; - (le => $this:ident, $typ:tt, $len_to_read:expr) => {{ - debug_assert!(mem::size_of::<$typ>() >= $len_to_read); - - // The same trick as above does not improve the best case speed. - // It seems to be linked to the way the method is optimised by the compiler - let mut buf = [0; (mem::size_of::<$typ>())]; - $this.copy_to_slice(&mut buf[..($len_to_read)]); - return $typ::from_le_bytes(buf); - }}; - (be => $this:ident, $typ:tt, $len_to_read:expr) => {{ - debug_assert!(mem::size_of::<$typ>() >= $len_to_read); - - let mut buf = [0; (mem::size_of::<$typ>())]; - $this.copy_to_slice(&mut buf[mem::size_of::<$typ>() - ($len_to_read)..]); - return $typ::from_be_bytes(buf); - }}; -} - -/// Read bytes from a buffer. -/// -/// A buffer stores bytes in memory such that read operations are infallible. -/// The underlying storage may or may not be in contiguous memory. A `Buf` value -/// is a cursor into the buffer. Reading from `Buf` advances the cursor -/// position. It can be thought of as an efficient `Iterator` for collections of -/// bytes. -/// -/// The simplest `Buf` is a `&[u8]`. -/// -/// ``` -/// use bytes::Buf; -/// -/// let mut buf = &b"hello world"[..]; -/// -/// assert_eq!(b'h', buf.get_u8()); -/// assert_eq!(b'e', buf.get_u8()); -/// assert_eq!(b'l', buf.get_u8()); -/// -/// let mut rest = [0; 8]; -/// buf.copy_to_slice(&mut rest); -/// -/// assert_eq!(&rest[..], &b"lo world"[..]); -/// ``` -pub trait Buf { - /// Returns the number of bytes between the current position and the end of - /// the buffer. - /// - /// This value is greater than or equal to the length of the slice returned - /// by `chunk()`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"hello world"[..]; - /// - /// assert_eq!(buf.remaining(), 11); - /// - /// buf.get_u8(); - /// - /// assert_eq!(buf.remaining(), 10); - /// ``` - /// - /// # Implementer notes - /// - /// Implementations of `remaining` should ensure that the return value does - /// not change unless a call is made to `advance` or any other function that - /// is documented to change the `Buf`'s current position. - fn remaining(&self) -> usize; - - /// Returns a slice starting at the current position and of length between 0 - /// and `Buf::remaining()`. Note that this *can* return shorter slice (this allows - /// non-continuous internal representation). - /// - /// This is a lower level function. Most operations are done with other - /// functions. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"hello world"[..]; - /// - /// assert_eq!(buf.chunk(), &b"hello world"[..]); - /// - /// buf.advance(6); - /// - /// assert_eq!(buf.chunk(), &b"world"[..]); - /// ``` - /// - /// # Implementer notes - /// - /// This function should never panic. Once the end of the buffer is reached, - /// i.e., `Buf::remaining` returns 0, calls to `chunk()` should return an - /// empty slice. - // The `chunk` method was previously called `bytes`. This alias makes the rename - // more easily discoverable. - #[cfg_attr(docsrs, doc(alias = "bytes"))] - fn chunk(&self) -> &[u8]; - - /// Fills `dst` with potentially multiple slices starting at `self`'s - /// current position. - /// - /// If the `Buf` is backed by disjoint slices of bytes, `chunk_vectored` enables - /// fetching more than one slice at once. `dst` is a slice of `IoSlice` - /// references, enabling the slice to be directly used with [`writev`] - /// without any further conversion. The sum of the lengths of all the - /// buffers in `dst` will be less than or equal to `Buf::remaining()`. - /// - /// The entries in `dst` will be overwritten, but the data **contained** by - /// the slices **will not** be modified. If `chunk_vectored` does not fill every - /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices - /// in `self. - /// - /// This is a lower level function. Most operations are done with other - /// functions. - /// - /// # Implementer notes - /// - /// This function should never panic. Once the end of the buffer is reached, - /// i.e., `Buf::remaining` returns 0, calls to `chunk_vectored` must return 0 - /// without mutating `dst`. - /// - /// Implementations should also take care to properly handle being called - /// with `dst` being a zero length slice. - /// - /// [`writev`]: http://man7.org/linux/man-pages/man2/readv.2.html - #[cfg(feature = "std")] - #[cfg_attr(docsrs, doc(cfg(feature = "std")))] - fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { - if dst.is_empty() { - return 0; - } - - if self.has_remaining() { - dst[0] = IoSlice::new(self.chunk()); - 1 - } else { - 0 - } - } - - /// Advance the internal cursor of the Buf - /// - /// The next call to `chunk()` will return a slice starting `cnt` bytes - /// further into the underlying buffer. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"hello world"[..]; - /// - /// assert_eq!(buf.chunk(), &b"hello world"[..]); - /// - /// buf.advance(6); - /// - /// assert_eq!(buf.chunk(), &b"world"[..]); - /// ``` - /// - /// # Panics - /// - /// This function **may** panic if `cnt > self.remaining()`. - /// - /// # Implementer notes - /// - /// It is recommended for implementations of `advance` to panic if `cnt > - /// self.remaining()`. If the implementation does not panic, the call must - /// behave as if `cnt == self.remaining()`. - /// - /// A call with `cnt == 0` should never panic and be a no-op. - fn advance(&mut self, cnt: usize); - - /// Returns true if there are any more bytes to consume - /// - /// This is equivalent to `self.remaining() != 0`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"a"[..]; - /// - /// assert!(buf.has_remaining()); - /// - /// buf.get_u8(); - /// - /// assert!(!buf.has_remaining()); - /// ``` - fn has_remaining(&self) -> bool { - self.remaining() > 0 - } - - /// Copies bytes from `self` into `dst`. - /// - /// The cursor is advanced by the number of bytes copied. `self` must have - /// enough remaining bytes to fill `dst`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"hello world"[..]; - /// let mut dst = [0; 5]; - /// - /// buf.copy_to_slice(&mut dst); - /// assert_eq!(&b"hello"[..], &dst); - /// assert_eq!(6, buf.remaining()); - /// ``` - /// - /// # Panics - /// - /// This function panics if `self.remaining() < dst.len()` - fn copy_to_slice(&mut self, dst: &mut [u8]) { - let mut off = 0; - - assert!(self.remaining() >= dst.len()); - - while off < dst.len() { - let cnt; - - unsafe { - let src = self.chunk(); - cnt = cmp::min(src.len(), dst.len() - off); - - ptr::copy_nonoverlapping(src.as_ptr(), dst[off..].as_mut_ptr(), cnt); - - off += cnt; - } - - self.advance(cnt); - } - } - - /// Gets an unsigned 8 bit integer from `self`. - /// - /// The current position is advanced by 1. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08 hello"[..]; - /// assert_eq!(8, buf.get_u8()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is no more remaining data in `self`. - fn get_u8(&mut self) -> u8 { - assert!(self.remaining() >= 1); - let ret = self.chunk()[0]; - self.advance(1); - ret - } - - /// Gets a signed 8 bit integer from `self`. - /// - /// The current position is advanced by 1. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08 hello"[..]; - /// assert_eq!(8, buf.get_i8()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is no more remaining data in `self`. - fn get_i8(&mut self) -> i8 { - assert!(self.remaining() >= 1); - let ret = self.chunk()[0] as i8; - self.advance(1); - ret - } - - /// Gets an unsigned 16 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x09 hello"[..]; - /// assert_eq!(0x0809, buf.get_u16()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u16(&mut self) -> u16 { - buf_get_impl!(self, u16::from_be_bytes); - } - - /// Gets an unsigned 16 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x09\x08 hello"[..]; - /// assert_eq!(0x0809, buf.get_u16_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u16_le(&mut self) -> u16 { - buf_get_impl!(self, u16::from_le_bytes); - } - - /// Gets an unsigned 16 bit integer from `self` in native-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x08\x09 hello", - /// false => b"\x09\x08 hello", - /// }; - /// assert_eq!(0x0809, buf.get_u16_ne()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u16_ne(&mut self) -> u16 { - buf_get_impl!(self, u16::from_ne_bytes); - } - - /// Gets a signed 16 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x09 hello"[..]; - /// assert_eq!(0x0809, buf.get_i16()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i16(&mut self) -> i16 { - buf_get_impl!(self, i16::from_be_bytes); - } - - /// Gets a signed 16 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x09\x08 hello"[..]; - /// assert_eq!(0x0809, buf.get_i16_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i16_le(&mut self) -> i16 { - buf_get_impl!(self, i16::from_le_bytes); - } - - /// Gets a signed 16 bit integer from `self` in native-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x08\x09 hello", - /// false => b"\x09\x08 hello", - /// }; - /// assert_eq!(0x0809, buf.get_i16_ne()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i16_ne(&mut self) -> i16 { - buf_get_impl!(self, i16::from_ne_bytes); - } - - /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..]; - /// assert_eq!(0x0809A0A1, buf.get_u32()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u32(&mut self) -> u32 { - buf_get_impl!(self, u32::from_be_bytes); - } - - /// Gets an unsigned 32 bit integer from `self` in the little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..]; - /// assert_eq!(0x0809A0A1, buf.get_u32_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u32_le(&mut self) -> u32 { - buf_get_impl!(self, u32::from_le_bytes); - } - - /// Gets an unsigned 32 bit integer from `self` in native-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x08\x09\xA0\xA1 hello", - /// false => b"\xA1\xA0\x09\x08 hello", - /// }; - /// assert_eq!(0x0809A0A1, buf.get_u32_ne()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u32_ne(&mut self) -> u32 { - buf_get_impl!(self, u32::from_ne_bytes); - } - - /// Gets a signed 32 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..]; - /// assert_eq!(0x0809A0A1, buf.get_i32()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i32(&mut self) -> i32 { - buf_get_impl!(self, i32::from_be_bytes); - } - - /// Gets a signed 32 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..]; - /// assert_eq!(0x0809A0A1, buf.get_i32_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i32_le(&mut self) -> i32 { - buf_get_impl!(self, i32::from_le_bytes); - } - - /// Gets a signed 32 bit integer from `self` in native-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x08\x09\xA0\xA1 hello", - /// false => b"\xA1\xA0\x09\x08 hello", - /// }; - /// assert_eq!(0x0809A0A1, buf.get_i32_ne()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i32_ne(&mut self) -> i32 { - buf_get_impl!(self, i32::from_ne_bytes); - } - - /// Gets an unsigned 64 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..]; - /// assert_eq!(0x0102030405060708, buf.get_u64()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u64(&mut self) -> u64 { - buf_get_impl!(self, u64::from_be_bytes); - } - - /// Gets an unsigned 64 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; - /// assert_eq!(0x0102030405060708, buf.get_u64_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u64_le(&mut self) -> u64 { - buf_get_impl!(self, u64::from_le_bytes); - } - - /// Gets an unsigned 64 bit integer from `self` in native-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello", - /// false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello", - /// }; - /// assert_eq!(0x0102030405060708, buf.get_u64_ne()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u64_ne(&mut self) -> u64 { - buf_get_impl!(self, u64::from_ne_bytes); - } - - /// Gets a signed 64 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..]; - /// assert_eq!(0x0102030405060708, buf.get_i64()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i64(&mut self) -> i64 { - buf_get_impl!(self, i64::from_be_bytes); - } - - /// Gets a signed 64 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; - /// assert_eq!(0x0102030405060708, buf.get_i64_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i64_le(&mut self) -> i64 { - buf_get_impl!(self, i64::from_le_bytes); - } - - /// Gets a signed 64 bit integer from `self` in native-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello", - /// false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello", - /// }; - /// assert_eq!(0x0102030405060708, buf.get_i64_ne()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i64_ne(&mut self) -> i64 { - buf_get_impl!(self, i64::from_ne_bytes); - } - - /// Gets an unsigned 128 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..]; - /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u128(&mut self) -> u128 { - buf_get_impl!(self, u128::from_be_bytes); - } - - /// Gets an unsigned 128 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; - /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u128_le(&mut self) -> u128 { - buf_get_impl!(self, u128::from_le_bytes); - } - - /// Gets an unsigned 128 bit integer from `self` in native-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello", - /// false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello", - /// }; - /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_ne()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_u128_ne(&mut self) -> u128 { - buf_get_impl!(self, u128::from_ne_bytes); - } - - /// Gets a signed 128 bit integer from `self` in big-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..]; - /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i128(&mut self) -> i128 { - buf_get_impl!(self, i128::from_be_bytes); - } - - /// Gets a signed 128 bit integer from `self` in little-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; - /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i128_le(&mut self) -> i128 { - buf_get_impl!(self, i128::from_le_bytes); - } - - /// Gets a signed 128 bit integer from `self` in native-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello", - /// false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello", - /// }; - /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_ne()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_i128_ne(&mut self) -> i128 { - buf_get_impl!(self, i128::from_ne_bytes); - } - - /// Gets an unsigned n-byte integer from `self` in big-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03 hello"[..]; - /// assert_eq!(0x010203, buf.get_uint(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_uint(&mut self, nbytes: usize) -> u64 { - buf_get_impl!(be => self, u64, nbytes); - } - - /// Gets an unsigned n-byte integer from `self` in little-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x03\x02\x01 hello"[..]; - /// assert_eq!(0x010203, buf.get_uint_le(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_uint_le(&mut self, nbytes: usize) -> u64 { - buf_get_impl!(le => self, u64, nbytes); - } - - /// Gets an unsigned n-byte integer from `self` in native-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x01\x02\x03 hello", - /// false => b"\x03\x02\x01 hello", - /// }; - /// assert_eq!(0x010203, buf.get_uint_ne(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_uint_ne(&mut self, nbytes: usize) -> u64 { - if cfg!(target_endian = "big") { - self.get_uint(nbytes) - } else { - self.get_uint_le(nbytes) - } - } - - /// Gets a signed n-byte integer from `self` in big-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x01\x02\x03 hello"[..]; - /// assert_eq!(0x010203, buf.get_int(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_int(&mut self, nbytes: usize) -> i64 { - buf_get_impl!(be => self, i64, nbytes); - } - - /// Gets a signed n-byte integer from `self` in little-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x03\x02\x01 hello"[..]; - /// assert_eq!(0x010203, buf.get_int_le(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_int_le(&mut self, nbytes: usize) -> i64 { - buf_get_impl!(le => self, i64, nbytes); - } - - /// Gets a signed n-byte integer from `self` in native-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x01\x02\x03 hello", - /// false => b"\x03\x02\x01 hello", - /// }; - /// assert_eq!(0x010203, buf.get_int_ne(3)); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_int_ne(&mut self, nbytes: usize) -> i64 { - if cfg!(target_endian = "big") { - self.get_int(nbytes) - } else { - self.get_int_le(nbytes) - } - } - - /// Gets an IEEE754 single-precision (4 bytes) floating point number from - /// `self` in big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x3F\x99\x99\x9A hello"[..]; - /// assert_eq!(1.2f32, buf.get_f32()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_f32(&mut self) -> f32 { - f32::from_bits(Self::get_u32(self)) - } - - /// Gets an IEEE754 single-precision (4 bytes) floating point number from - /// `self` in little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x9A\x99\x99\x3F hello"[..]; - /// assert_eq!(1.2f32, buf.get_f32_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_f32_le(&mut self) -> f32 { - f32::from_bits(Self::get_u32_le(self)) - } - - /// Gets an IEEE754 single-precision (4 bytes) floating point number from - /// `self` in native-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x3F\x99\x99\x9A hello", - /// false => b"\x9A\x99\x99\x3F hello", - /// }; - /// assert_eq!(1.2f32, buf.get_f32_ne()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_f32_ne(&mut self) -> f32 { - f32::from_bits(Self::get_u32_ne(self)) - } - - /// Gets an IEEE754 double-precision (8 bytes) floating point number from - /// `self` in big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"[..]; - /// assert_eq!(1.2f64, buf.get_f64()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_f64(&mut self) -> f64 { - f64::from_bits(Self::get_u64(self)) - } - - /// Gets an IEEE754 double-precision (8 bytes) floating point number from - /// `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = &b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"[..]; - /// assert_eq!(1.2f64, buf.get_f64_le()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_f64_le(&mut self) -> f64 { - f64::from_bits(Self::get_u64_le(self)) - } - - /// Gets an IEEE754 double-precision (8 bytes) floating point number from - /// `self` in native-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf: &[u8] = match cfg!(target_endian = "big") { - /// true => b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello", - /// false => b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello", - /// }; - /// assert_eq!(1.2f64, buf.get_f64_ne()); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining data in `self`. - fn get_f64_ne(&mut self) -> f64 { - f64::from_bits(Self::get_u64_ne(self)) - } - - /// Consumes `len` bytes inside self and returns new instance of `Bytes` - /// with this data. - /// - /// This function may be optimized by the underlying type to avoid actual - /// copies. For example, `Bytes` implementation will do a shallow copy - /// (ref-count increment). - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let bytes = (&b"hello world"[..]).copy_to_bytes(5); - /// assert_eq!(&bytes[..], &b"hello"[..]); - /// ``` - fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { - use super::BufMut; - - assert!(len <= self.remaining(), "`len` greater than remaining"); - - let mut ret = crate::BytesMut::with_capacity(len); - ret.put(self.take(len)); - ret.freeze() - } - - /// Creates an adaptor which will read at most `limit` bytes from `self`. - /// - /// This function returns a new instance of `Buf` which will read at most - /// `limit` bytes. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Buf, BufMut}; - /// - /// let mut buf = b"hello world"[..].take(5); - /// let mut dst = vec![]; - /// - /// dst.put(&mut buf); - /// assert_eq!(dst, b"hello"); - /// - /// let mut buf = buf.into_inner(); - /// dst.clear(); - /// dst.put(&mut buf); - /// assert_eq!(dst, b" world"); - /// ``` - fn take(self, limit: usize) -> Take - where - Self: Sized, - { - take::new(self, limit) - } - - /// Creates an adaptor which will chain this buffer with another. - /// - /// The returned `Buf` instance will first consume all bytes from `self`. - /// Afterwards the output is equivalent to the output of next. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut chain = b"hello "[..].chain(&b"world"[..]); - /// - /// let full = chain.copy_to_bytes(11); - /// assert_eq!(full.chunk(), b"hello world"); - /// ``` - fn chain(self, next: U) -> Chain - where - Self: Sized, - { - Chain::new(self, next) - } - - /// Creates an adaptor which implements the `Read` trait for `self`. - /// - /// This function returns a new value which implements `Read` by adapting - /// the `Read` trait functions to the `Buf` trait functions. Given that - /// `Buf` operations are infallible, none of the `Read` functions will - /// return with `Err`. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Bytes, Buf}; - /// use std::io::Read; - /// - /// let buf = Bytes::from("hello world"); - /// - /// let mut reader = buf.reader(); - /// let mut dst = [0; 1024]; - /// - /// let num = reader.read(&mut dst).unwrap(); - /// - /// assert_eq!(11, num); - /// assert_eq!(&dst[..11], &b"hello world"[..]); - /// ``` - #[cfg(feature = "std")] - #[cfg_attr(docsrs, doc(cfg(feature = "std")))] - fn reader(self) -> Reader - where - Self: Sized, - { - reader::new(self) - } -} - -macro_rules! deref_forward_buf { - () => { - fn remaining(&self) -> usize { - (**self).remaining() - } - - fn chunk(&self) -> &[u8] { - (**self).chunk() - } - - #[cfg(feature = "std")] - fn chunks_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize { - (**self).chunks_vectored(dst) - } - - fn advance(&mut self, cnt: usize) { - (**self).advance(cnt) - } - - fn has_remaining(&self) -> bool { - (**self).has_remaining() - } - - fn copy_to_slice(&mut self, dst: &mut [u8]) { - (**self).copy_to_slice(dst) - } - - fn get_u8(&mut self) -> u8 { - (**self).get_u8() - } - - fn get_i8(&mut self) -> i8 { - (**self).get_i8() - } - - fn get_u16(&mut self) -> u16 { - (**self).get_u16() - } - - fn get_u16_le(&mut self) -> u16 { - (**self).get_u16_le() - } - - fn get_u16_ne(&mut self) -> u16 { - (**self).get_u16_ne() - } - - fn get_i16(&mut self) -> i16 { - (**self).get_i16() - } - - fn get_i16_le(&mut self) -> i16 { - (**self).get_i16_le() - } - - fn get_i16_ne(&mut self) -> i16 { - (**self).get_i16_ne() - } - - fn get_u32(&mut self) -> u32 { - (**self).get_u32() - } - - fn get_u32_le(&mut self) -> u32 { - (**self).get_u32_le() - } - - fn get_u32_ne(&mut self) -> u32 { - (**self).get_u32_ne() - } - - fn get_i32(&mut self) -> i32 { - (**self).get_i32() - } - - fn get_i32_le(&mut self) -> i32 { - (**self).get_i32_le() - } - - fn get_i32_ne(&mut self) -> i32 { - (**self).get_i32_ne() - } - - fn get_u64(&mut self) -> u64 { - (**self).get_u64() - } - - fn get_u64_le(&mut self) -> u64 { - (**self).get_u64_le() - } - - fn get_u64_ne(&mut self) -> u64 { - (**self).get_u64_ne() - } - - fn get_i64(&mut self) -> i64 { - (**self).get_i64() - } - - fn get_i64_le(&mut self) -> i64 { - (**self).get_i64_le() - } - - fn get_i64_ne(&mut self) -> i64 { - (**self).get_i64_ne() - } - - fn get_uint(&mut self, nbytes: usize) -> u64 { - (**self).get_uint(nbytes) - } - - fn get_uint_le(&mut self, nbytes: usize) -> u64 { - (**self).get_uint_le(nbytes) - } - - fn get_uint_ne(&mut self, nbytes: usize) -> u64 { - (**self).get_uint_ne(nbytes) - } - - fn get_int(&mut self, nbytes: usize) -> i64 { - (**self).get_int(nbytes) - } - - fn get_int_le(&mut self, nbytes: usize) -> i64 { - (**self).get_int_le(nbytes) - } - - fn get_int_ne(&mut self, nbytes: usize) -> i64 { - (**self).get_int_ne(nbytes) - } - - fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { - (**self).copy_to_bytes(len) - } - }; -} - -impl Buf for &mut T { - deref_forward_buf!(); -} - -impl Buf for Box { - deref_forward_buf!(); -} - -impl Buf for &[u8] { - #[inline] - fn remaining(&self) -> usize { - self.len() - } - - #[inline] - fn chunk(&self) -> &[u8] { - self - } - - #[inline] - fn advance(&mut self, cnt: usize) { - *self = &self[cnt..]; - } -} - -#[cfg(feature = "std")] -impl> Buf for std::io::Cursor { - fn remaining(&self) -> usize { - let len = self.get_ref().as_ref().len(); - let pos = self.position(); - - if pos >= len as u64 { - return 0; - } - - len - pos as usize - } - - fn chunk(&self) -> &[u8] { - let len = self.get_ref().as_ref().len(); - let pos = self.position(); - - if pos >= len as u64 { - return &[]; - } - - &self.get_ref().as_ref()[pos as usize..] - } - - fn advance(&mut self, cnt: usize) { - let pos = (self.position() as usize) - .checked_add(cnt) - .expect("overflow"); - - assert!(pos <= self.get_ref().as_ref().len()); - self.set_position(pos as u64); - } -} - -// The existence of this function makes the compiler catch if the Buf -// trait is "object-safe" or not. -fn _assert_trait_object(_b: &dyn Buf) {} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/buf/buf_mut.rs s390-tools-2.33.1/rust-vendor/bytes/src/buf/buf_mut.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/buf/buf_mut.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/buf/buf_mut.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1528 +0,0 @@ -use crate::buf::{limit, Chain, Limit, UninitSlice}; -#[cfg(feature = "std")] -use crate::buf::{writer, Writer}; - -use core::{cmp, mem, ptr, usize}; - -use alloc::{boxed::Box, vec::Vec}; - -/// A trait for values that provide sequential write access to bytes. -/// -/// Write bytes to a buffer -/// -/// A buffer stores bytes in memory such that write operations are infallible. -/// The underlying storage may or may not be in contiguous memory. A `BufMut` -/// value is a cursor into the buffer. Writing to `BufMut` advances the cursor -/// position. -/// -/// The simplest `BufMut` is a `Vec`. -/// -/// ``` -/// use bytes::BufMut; -/// -/// let mut buf = vec![]; -/// -/// buf.put(&b"hello world"[..]); -/// -/// assert_eq!(buf, b"hello world"); -/// ``` -pub unsafe trait BufMut { - /// Returns the number of bytes that can be written from the current - /// position until the end of the buffer is reached. - /// - /// This value is greater than or equal to the length of the slice returned - /// by `chunk_mut()`. - /// - /// Writing to a `BufMut` may involve allocating more memory on the fly. - /// Implementations may fail before reaching the number of bytes indicated - /// by this method if they encounter an allocation failure. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut dst = [0; 10]; - /// let mut buf = &mut dst[..]; - /// - /// let original_remaining = buf.remaining_mut(); - /// buf.put(&b"hello"[..]); - /// - /// assert_eq!(original_remaining - 5, buf.remaining_mut()); - /// ``` - /// - /// # Implementer notes - /// - /// Implementations of `remaining_mut` should ensure that the return value - /// does not change unless a call is made to `advance_mut` or any other - /// function that is documented to change the `BufMut`'s current position. - /// - /// # Note - /// - /// `remaining_mut` may return value smaller than actual available space. - fn remaining_mut(&self) -> usize; - - /// Advance the internal cursor of the BufMut - /// - /// The next call to `chunk_mut` will return a slice starting `cnt` bytes - /// further into the underlying buffer. - /// - /// This function is unsafe because there is no guarantee that the bytes - /// being advanced past have been initialized. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = Vec::with_capacity(16); - /// - /// // Write some data - /// buf.chunk_mut()[0..2].copy_from_slice(b"he"); - /// unsafe { buf.advance_mut(2) }; - /// - /// // write more bytes - /// buf.chunk_mut()[0..3].copy_from_slice(b"llo"); - /// - /// unsafe { buf.advance_mut(3); } - /// - /// assert_eq!(5, buf.len()); - /// assert_eq!(buf, b"hello"); - /// ``` - /// - /// # Panics - /// - /// This function **may** panic if `cnt > self.remaining_mut()`. - /// - /// # Implementer notes - /// - /// It is recommended for implementations of `advance_mut` to panic if - /// `cnt > self.remaining_mut()`. If the implementation does not panic, - /// the call must behave as if `cnt == self.remaining_mut()`. - /// - /// A call with `cnt == 0` should never panic and be a no-op. - unsafe fn advance_mut(&mut self, cnt: usize); - - /// Returns true if there is space in `self` for more bytes. - /// - /// This is equivalent to `self.remaining_mut() != 0`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut dst = [0; 5]; - /// let mut buf = &mut dst[..]; - /// - /// assert!(buf.has_remaining_mut()); - /// - /// buf.put(&b"hello"[..]); - /// - /// assert!(!buf.has_remaining_mut()); - /// ``` - fn has_remaining_mut(&self) -> bool { - self.remaining_mut() > 0 - } - - /// Returns a mutable slice starting at the current BufMut position and of - /// length between 0 and `BufMut::remaining_mut()`. Note that this *can* be shorter than the - /// whole remainder of the buffer (this allows non-continuous implementation). - /// - /// This is a lower level function. Most operations are done with other - /// functions. - /// - /// The returned byte slice may represent uninitialized memory. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = Vec::with_capacity(16); - /// - /// unsafe { - /// // MaybeUninit::as_mut_ptr - /// buf.chunk_mut()[0..].as_mut_ptr().write(b'h'); - /// buf.chunk_mut()[1..].as_mut_ptr().write(b'e'); - /// - /// buf.advance_mut(2); - /// - /// buf.chunk_mut()[0..].as_mut_ptr().write(b'l'); - /// buf.chunk_mut()[1..].as_mut_ptr().write(b'l'); - /// buf.chunk_mut()[2..].as_mut_ptr().write(b'o'); - /// - /// buf.advance_mut(3); - /// } - /// - /// assert_eq!(5, buf.len()); - /// assert_eq!(buf, b"hello"); - /// ``` - /// - /// # Implementer notes - /// - /// This function should never panic. `chunk_mut` should return an empty - /// slice **if and only if** `remaining_mut()` returns 0. In other words, - /// `chunk_mut()` returning an empty slice implies that `remaining_mut()` will - /// return 0 and `remaining_mut()` returning 0 implies that `chunk_mut()` will - /// return an empty slice. - /// - /// This function may trigger an out-of-memory abort if it tries to allocate - /// memory and fails to do so. - // The `chunk_mut` method was previously called `bytes_mut`. This alias makes the - // rename more easily discoverable. - #[cfg_attr(docsrs, doc(alias = "bytes_mut"))] - fn chunk_mut(&mut self) -> &mut UninitSlice; - - /// Transfer bytes into `self` from `src` and advance the cursor by the - /// number of bytes written. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// - /// buf.put_u8(b'h'); - /// buf.put(&b"ello"[..]); - /// buf.put(&b" world"[..]); - /// - /// assert_eq!(buf, b"hello world"); - /// ``` - /// - /// # Panics - /// - /// Panics if `self` does not have enough capacity to contain `src`. - fn put(&mut self, mut src: T) - where - Self: Sized, - { - assert!(self.remaining_mut() >= src.remaining()); - - while src.has_remaining() { - let l; - - unsafe { - let s = src.chunk(); - let d = self.chunk_mut(); - l = cmp::min(s.len(), d.len()); - - ptr::copy_nonoverlapping(s.as_ptr(), d.as_mut_ptr() as *mut u8, l); - } - - src.advance(l); - unsafe { - self.advance_mut(l); - } - } - } - - /// Transfer bytes into `self` from `src` and advance the cursor by the - /// number of bytes written. - /// - /// `self` must have enough remaining capacity to contain all of `src`. - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut dst = [0; 6]; - /// - /// { - /// let mut buf = &mut dst[..]; - /// buf.put_slice(b"hello"); - /// - /// assert_eq!(1, buf.remaining_mut()); - /// } - /// - /// assert_eq!(b"hello\0", &dst); - /// ``` - fn put_slice(&mut self, src: &[u8]) { - let mut off = 0; - - assert!( - self.remaining_mut() >= src.len(), - "buffer overflow; remaining = {}; src = {}", - self.remaining_mut(), - src.len() - ); - - while off < src.len() { - let cnt; - - unsafe { - let dst = self.chunk_mut(); - cnt = cmp::min(dst.len(), src.len() - off); - - ptr::copy_nonoverlapping(src[off..].as_ptr(), dst.as_mut_ptr() as *mut u8, cnt); - - off += cnt; - } - - unsafe { - self.advance_mut(cnt); - } - } - } - - /// Put `cnt` bytes `val` into `self`. - /// - /// Logically equivalent to calling `self.put_u8(val)` `cnt` times, but may work faster. - /// - /// `self` must have at least `cnt` remaining capacity. - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut dst = [0; 6]; - /// - /// { - /// let mut buf = &mut dst[..]; - /// buf.put_bytes(b'a', 4); - /// - /// assert_eq!(2, buf.remaining_mut()); - /// } - /// - /// assert_eq!(b"aaaa\0\0", &dst); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_bytes(&mut self, val: u8, cnt: usize) { - for _ in 0..cnt { - self.put_u8(val); - } - } - - /// Writes an unsigned 8 bit integer to `self`. - /// - /// The current position is advanced by 1. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u8(0x01); - /// assert_eq!(buf, b"\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u8(&mut self, n: u8) { - let src = [n]; - self.put_slice(&src); - } - - /// Writes a signed 8 bit integer to `self`. - /// - /// The current position is advanced by 1. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i8(0x01); - /// assert_eq!(buf, b"\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i8(&mut self, n: i8) { - let src = [n as u8]; - self.put_slice(&src) - } - - /// Writes an unsigned 16 bit integer to `self` in big-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u16(0x0809); - /// assert_eq!(buf, b"\x08\x09"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u16(&mut self, n: u16) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes an unsigned 16 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u16_le(0x0809); - /// assert_eq!(buf, b"\x09\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u16_le(&mut self, n: u16) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes an unsigned 16 bit integer to `self` in native-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u16_ne(0x0809); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x08\x09"); - /// } else { - /// assert_eq!(buf, b"\x09\x08"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u16_ne(&mut self, n: u16) { - self.put_slice(&n.to_ne_bytes()) - } - - /// Writes a signed 16 bit integer to `self` in big-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i16(0x0809); - /// assert_eq!(buf, b"\x08\x09"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i16(&mut self, n: i16) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes a signed 16 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i16_le(0x0809); - /// assert_eq!(buf, b"\x09\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i16_le(&mut self, n: i16) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes a signed 16 bit integer to `self` in native-endian byte order. - /// - /// The current position is advanced by 2. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i16_ne(0x0809); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x08\x09"); - /// } else { - /// assert_eq!(buf, b"\x09\x08"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i16_ne(&mut self, n: i16) { - self.put_slice(&n.to_ne_bytes()) - } - - /// Writes an unsigned 32 bit integer to `self` in big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u32(0x0809A0A1); - /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u32(&mut self, n: u32) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes an unsigned 32 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u32_le(0x0809A0A1); - /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u32_le(&mut self, n: u32) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes an unsigned 32 bit integer to `self` in native-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u32_ne(0x0809A0A1); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); - /// } else { - /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u32_ne(&mut self, n: u32) { - self.put_slice(&n.to_ne_bytes()) - } - - /// Writes a signed 32 bit integer to `self` in big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i32(0x0809A0A1); - /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i32(&mut self, n: i32) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes a signed 32 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i32_le(0x0809A0A1); - /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i32_le(&mut self, n: i32) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes a signed 32 bit integer to `self` in native-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i32_ne(0x0809A0A1); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); - /// } else { - /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i32_ne(&mut self, n: i32) { - self.put_slice(&n.to_ne_bytes()) - } - - /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u64(0x0102030405060708); - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u64(&mut self, n: u64) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes an unsigned 64 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u64_le(0x0102030405060708); - /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u64_le(&mut self, n: u64) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes an unsigned 64 bit integer to `self` in native-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u64_ne(0x0102030405060708); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); - /// } else { - /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u64_ne(&mut self, n: u64) { - self.put_slice(&n.to_ne_bytes()) - } - - /// Writes a signed 64 bit integer to `self` in the big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i64(0x0102030405060708); - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i64(&mut self, n: i64) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes a signed 64 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i64_le(0x0102030405060708); - /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i64_le(&mut self, n: i64) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes a signed 64 bit integer to `self` in native-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i64_ne(0x0102030405060708); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); - /// } else { - /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i64_ne(&mut self, n: i64) { - self.put_slice(&n.to_ne_bytes()) - } - - /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u128(0x01020304050607080910111213141516); - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u128(&mut self, n: u128) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes an unsigned 128 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u128_le(0x01020304050607080910111213141516); - /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u128_le(&mut self, n: u128) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes an unsigned 128 bit integer to `self` in native-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_u128_ne(0x01020304050607080910111213141516); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); - /// } else { - /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_u128_ne(&mut self, n: u128) { - self.put_slice(&n.to_ne_bytes()) - } - - /// Writes a signed 128 bit integer to `self` in the big-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i128(0x01020304050607080910111213141516); - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i128(&mut self, n: i128) { - self.put_slice(&n.to_be_bytes()) - } - - /// Writes a signed 128 bit integer to `self` in little-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i128_le(0x01020304050607080910111213141516); - /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i128_le(&mut self, n: i128) { - self.put_slice(&n.to_le_bytes()) - } - - /// Writes a signed 128 bit integer to `self` in native-endian byte order. - /// - /// The current position is advanced by 16. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_i128_ne(0x01020304050607080910111213141516); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); - /// } else { - /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_i128_ne(&mut self, n: i128) { - self.put_slice(&n.to_ne_bytes()) - } - - /// Writes an unsigned n-byte integer to `self` in big-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_uint(0x010203, 3); - /// assert_eq!(buf, b"\x01\x02\x03"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_uint(&mut self, n: u64, nbytes: usize) { - self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]); - } - - /// Writes an unsigned n-byte integer to `self` in the little-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_uint_le(0x010203, 3); - /// assert_eq!(buf, b"\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_uint_le(&mut self, n: u64, nbytes: usize) { - self.put_slice(&n.to_le_bytes()[0..nbytes]); - } - - /// Writes an unsigned n-byte integer to `self` in the native-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_uint_ne(0x010203, 3); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x01\x02\x03"); - /// } else { - /// assert_eq!(buf, b"\x03\x02\x01"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_uint_ne(&mut self, n: u64, nbytes: usize) { - if cfg!(target_endian = "big") { - self.put_uint(n, nbytes) - } else { - self.put_uint_le(n, nbytes) - } - } - - /// Writes low `nbytes` of a signed integer to `self` in big-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_int(0x0504010203, 3); - /// assert_eq!(buf, b"\x01\x02\x03"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self` or if `nbytes` is greater than 8. - fn put_int(&mut self, n: i64, nbytes: usize) { - self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]); - } - - /// Writes low `nbytes` of a signed integer to `self` in little-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_int_le(0x0504010203, 3); - /// assert_eq!(buf, b"\x03\x02\x01"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self` or if `nbytes` is greater than 8. - fn put_int_le(&mut self, n: i64, nbytes: usize) { - self.put_slice(&n.to_le_bytes()[0..nbytes]); - } - - /// Writes low `nbytes` of a signed integer to `self` in native-endian byte order. - /// - /// The current position is advanced by `nbytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_int_ne(0x010203, 3); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x01\x02\x03"); - /// } else { - /// assert_eq!(buf, b"\x03\x02\x01"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self` or if `nbytes` is greater than 8. - fn put_int_ne(&mut self, n: i64, nbytes: usize) { - if cfg!(target_endian = "big") { - self.put_int(n, nbytes) - } else { - self.put_int_le(n, nbytes) - } - } - - /// Writes an IEEE754 single-precision (4 bytes) floating point number to - /// `self` in big-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_f32(1.2f32); - /// assert_eq!(buf, b"\x3F\x99\x99\x9A"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_f32(&mut self, n: f32) { - self.put_u32(n.to_bits()); - } - - /// Writes an IEEE754 single-precision (4 bytes) floating point number to - /// `self` in little-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_f32_le(1.2f32); - /// assert_eq!(buf, b"\x9A\x99\x99\x3F"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_f32_le(&mut self, n: f32) { - self.put_u32_le(n.to_bits()); - } - - /// Writes an IEEE754 single-precision (4 bytes) floating point number to - /// `self` in native-endian byte order. - /// - /// The current position is advanced by 4. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_f32_ne(1.2f32); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x3F\x99\x99\x9A"); - /// } else { - /// assert_eq!(buf, b"\x9A\x99\x99\x3F"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_f32_ne(&mut self, n: f32) { - self.put_u32_ne(n.to_bits()); - } - - /// Writes an IEEE754 double-precision (8 bytes) floating point number to - /// `self` in big-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_f64(1.2f64); - /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_f64(&mut self, n: f64) { - self.put_u64(n.to_bits()); - } - - /// Writes an IEEE754 double-precision (8 bytes) floating point number to - /// `self` in little-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_f64_le(1.2f64); - /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F"); - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_f64_le(&mut self, n: f64) { - self.put_u64_le(n.to_bits()); - } - - /// Writes an IEEE754 double-precision (8 bytes) floating point number to - /// `self` in native-endian byte order. - /// - /// The current position is advanced by 8. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut buf = vec![]; - /// buf.put_f64_ne(1.2f64); - /// if cfg!(target_endian = "big") { - /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33"); - /// } else { - /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F"); - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if there is not enough remaining capacity in - /// `self`. - fn put_f64_ne(&mut self, n: f64) { - self.put_u64_ne(n.to_bits()); - } - - /// Creates an adaptor which can write at most `limit` bytes to `self`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let arr = &mut [0u8; 128][..]; - /// assert_eq!(arr.remaining_mut(), 128); - /// - /// let dst = arr.limit(10); - /// assert_eq!(dst.remaining_mut(), 10); - /// ``` - fn limit(self, limit: usize) -> Limit - where - Self: Sized, - { - limit::new(self, limit) - } - - /// Creates an adaptor which implements the `Write` trait for `self`. - /// - /// This function returns a new value which implements `Write` by adapting - /// the `Write` trait functions to the `BufMut` trait functions. Given that - /// `BufMut` operations are infallible, none of the `Write` functions will - /// return with `Err`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// use std::io::Write; - /// - /// let mut buf = vec![].writer(); - /// - /// let num = buf.write(&b"hello world"[..]).unwrap(); - /// assert_eq!(11, num); - /// - /// let buf = buf.into_inner(); - /// - /// assert_eq!(*buf, b"hello world"[..]); - /// ``` - #[cfg(feature = "std")] - #[cfg_attr(docsrs, doc(cfg(feature = "std")))] - fn writer(self) -> Writer - where - Self: Sized, - { - writer::new(self) - } - - /// Creates an adapter which will chain this buffer with another. - /// - /// The returned `BufMut` instance will first write to all bytes from - /// `self`. Afterwards, it will write to `next`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut a = [0u8; 5]; - /// let mut b = [0u8; 6]; - /// - /// let mut chain = (&mut a[..]).chain_mut(&mut b[..]); - /// - /// chain.put_slice(b"hello world"); - /// - /// assert_eq!(&a[..], b"hello"); - /// assert_eq!(&b[..], b" world"); - /// ``` - fn chain_mut(self, next: U) -> Chain - where - Self: Sized, - { - Chain::new(self, next) - } -} - -macro_rules! deref_forward_bufmut { - () => { - fn remaining_mut(&self) -> usize { - (**self).remaining_mut() - } - - fn chunk_mut(&mut self) -> &mut UninitSlice { - (**self).chunk_mut() - } - - unsafe fn advance_mut(&mut self, cnt: usize) { - (**self).advance_mut(cnt) - } - - fn put_slice(&mut self, src: &[u8]) { - (**self).put_slice(src) - } - - fn put_u8(&mut self, n: u8) { - (**self).put_u8(n) - } - - fn put_i8(&mut self, n: i8) { - (**self).put_i8(n) - } - - fn put_u16(&mut self, n: u16) { - (**self).put_u16(n) - } - - fn put_u16_le(&mut self, n: u16) { - (**self).put_u16_le(n) - } - - fn put_u16_ne(&mut self, n: u16) { - (**self).put_u16_ne(n) - } - - fn put_i16(&mut self, n: i16) { - (**self).put_i16(n) - } - - fn put_i16_le(&mut self, n: i16) { - (**self).put_i16_le(n) - } - - fn put_i16_ne(&mut self, n: i16) { - (**self).put_i16_ne(n) - } - - fn put_u32(&mut self, n: u32) { - (**self).put_u32(n) - } - - fn put_u32_le(&mut self, n: u32) { - (**self).put_u32_le(n) - } - - fn put_u32_ne(&mut self, n: u32) { - (**self).put_u32_ne(n) - } - - fn put_i32(&mut self, n: i32) { - (**self).put_i32(n) - } - - fn put_i32_le(&mut self, n: i32) { - (**self).put_i32_le(n) - } - - fn put_i32_ne(&mut self, n: i32) { - (**self).put_i32_ne(n) - } - - fn put_u64(&mut self, n: u64) { - (**self).put_u64(n) - } - - fn put_u64_le(&mut self, n: u64) { - (**self).put_u64_le(n) - } - - fn put_u64_ne(&mut self, n: u64) { - (**self).put_u64_ne(n) - } - - fn put_i64(&mut self, n: i64) { - (**self).put_i64(n) - } - - fn put_i64_le(&mut self, n: i64) { - (**self).put_i64_le(n) - } - - fn put_i64_ne(&mut self, n: i64) { - (**self).put_i64_ne(n) - } - }; -} - -unsafe impl BufMut for &mut T { - deref_forward_bufmut!(); -} - -unsafe impl BufMut for Box { - deref_forward_bufmut!(); -} - -unsafe impl BufMut for &mut [u8] { - #[inline] - fn remaining_mut(&self) -> usize { - self.len() - } - - #[inline] - fn chunk_mut(&mut self) -> &mut UninitSlice { - // UninitSlice is repr(transparent), so safe to transmute - unsafe { &mut *(*self as *mut [u8] as *mut _) } - } - - #[inline] - unsafe fn advance_mut(&mut self, cnt: usize) { - // Lifetime dance taken from `impl Write for &mut [u8]`. - let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt); - *self = b; - } - - #[inline] - fn put_slice(&mut self, src: &[u8]) { - self[..src.len()].copy_from_slice(src); - unsafe { - self.advance_mut(src.len()); - } - } - - fn put_bytes(&mut self, val: u8, cnt: usize) { - assert!(self.remaining_mut() >= cnt); - unsafe { - ptr::write_bytes(self.as_mut_ptr(), val, cnt); - self.advance_mut(cnt); - } - } -} - -unsafe impl BufMut for &mut [core::mem::MaybeUninit] { - #[inline] - fn remaining_mut(&self) -> usize { - self.len() - } - - #[inline] - fn chunk_mut(&mut self) -> &mut UninitSlice { - UninitSlice::uninit(self) - } - - #[inline] - unsafe fn advance_mut(&mut self, cnt: usize) { - // Lifetime dance taken from `impl Write for &mut [u8]`. - let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt); - *self = b; - } - - #[inline] - fn put_slice(&mut self, src: &[u8]) { - self.chunk_mut()[..src.len()].copy_from_slice(src); - unsafe { - self.advance_mut(src.len()); - } - } - - fn put_bytes(&mut self, val: u8, cnt: usize) { - assert!(self.remaining_mut() >= cnt); - unsafe { - ptr::write_bytes(self.as_mut_ptr() as *mut u8, val, cnt); - self.advance_mut(cnt); - } - } -} - -unsafe impl BufMut for Vec { - #[inline] - fn remaining_mut(&self) -> usize { - // A vector can never have more than isize::MAX bytes - core::isize::MAX as usize - self.len() - } - - #[inline] - unsafe fn advance_mut(&mut self, cnt: usize) { - let len = self.len(); - let remaining = self.capacity() - len; - - assert!( - cnt <= remaining, - "cannot advance past `remaining_mut`: {:?} <= {:?}", - cnt, - remaining - ); - - self.set_len(len + cnt); - } - - #[inline] - fn chunk_mut(&mut self) -> &mut UninitSlice { - if self.capacity() == self.len() { - self.reserve(64); // Grow the vec - } - - let cap = self.capacity(); - let len = self.len(); - - let ptr = self.as_mut_ptr(); - unsafe { &mut UninitSlice::from_raw_parts_mut(ptr, cap)[len..] } - } - - // Specialize these methods so they can skip checking `remaining_mut` - // and `advance_mut`. - fn put(&mut self, mut src: T) - where - Self: Sized, - { - // In case the src isn't contiguous, reserve upfront - self.reserve(src.remaining()); - - while src.has_remaining() { - let l; - - // a block to contain the src.bytes() borrow - { - let s = src.chunk(); - l = s.len(); - self.extend_from_slice(s); - } - - src.advance(l); - } - } - - #[inline] - fn put_slice(&mut self, src: &[u8]) { - self.extend_from_slice(src); - } - - fn put_bytes(&mut self, val: u8, cnt: usize) { - let new_len = self.len().checked_add(cnt).unwrap(); - self.resize(new_len, val); - } -} - -// The existence of this function makes the compiler catch if the BufMut -// trait is "object-safe" or not. -fn _assert_trait_object(_b: &dyn BufMut) {} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/buf/chain.rs s390-tools-2.33.1/rust-vendor/bytes/src/buf/chain.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/buf/chain.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/buf/chain.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,242 +0,0 @@ -use crate::buf::{IntoIter, UninitSlice}; -use crate::{Buf, BufMut, Bytes}; - -#[cfg(feature = "std")] -use std::io::IoSlice; - -/// A `Chain` sequences two buffers. -/// -/// `Chain` is an adapter that links two underlying buffers and provides a -/// continuous view across both buffers. It is able to sequence either immutable -/// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values). -/// -/// This struct is generally created by calling [`Buf::chain`]. Please see that -/// function's documentation for more detail. -/// -/// # Examples -/// -/// ``` -/// use bytes::{Bytes, Buf}; -/// -/// let mut buf = (&b"hello "[..]) -/// .chain(&b"world"[..]); -/// -/// let full: Bytes = buf.copy_to_bytes(11); -/// assert_eq!(full[..], b"hello world"[..]); -/// ``` -/// -/// [`Buf::chain`]: trait.Buf.html#method.chain -/// [`Buf`]: trait.Buf.html -/// [`BufMut`]: trait.BufMut.html -#[derive(Debug)] -pub struct Chain { - a: T, - b: U, -} - -impl Chain { - /// Creates a new `Chain` sequencing the provided values. - pub(crate) fn new(a: T, b: U) -> Chain { - Chain { a, b } - } - - /// Gets a reference to the first underlying `Buf`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let buf = (&b"hello"[..]) - /// .chain(&b"world"[..]); - /// - /// assert_eq!(buf.first_ref()[..], b"hello"[..]); - /// ``` - pub fn first_ref(&self) -> &T { - &self.a - } - - /// Gets a mutable reference to the first underlying `Buf`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = (&b"hello"[..]) - /// .chain(&b"world"[..]); - /// - /// buf.first_mut().advance(1); - /// - /// let full = buf.copy_to_bytes(9); - /// assert_eq!(full, b"elloworld"[..]); - /// ``` - pub fn first_mut(&mut self) -> &mut T { - &mut self.a - } - - /// Gets a reference to the last underlying `Buf`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let buf = (&b"hello"[..]) - /// .chain(&b"world"[..]); - /// - /// assert_eq!(buf.last_ref()[..], b"world"[..]); - /// ``` - pub fn last_ref(&self) -> &U { - &self.b - } - - /// Gets a mutable reference to the last underlying `Buf`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let mut buf = (&b"hello "[..]) - /// .chain(&b"world"[..]); - /// - /// buf.last_mut().advance(1); - /// - /// let full = buf.copy_to_bytes(10); - /// assert_eq!(full, b"hello orld"[..]); - /// ``` - pub fn last_mut(&mut self) -> &mut U { - &mut self.b - } - - /// Consumes this `Chain`, returning the underlying values. - /// - /// # Examples - /// - /// ``` - /// use bytes::Buf; - /// - /// let chain = (&b"hello"[..]) - /// .chain(&b"world"[..]); - /// - /// let (first, last) = chain.into_inner(); - /// assert_eq!(first[..], b"hello"[..]); - /// assert_eq!(last[..], b"world"[..]); - /// ``` - pub fn into_inner(self) -> (T, U) { - (self.a, self.b) - } -} - -impl Buf for Chain -where - T: Buf, - U: Buf, -{ - fn remaining(&self) -> usize { - self.a.remaining().checked_add(self.b.remaining()).unwrap() - } - - fn chunk(&self) -> &[u8] { - if self.a.has_remaining() { - self.a.chunk() - } else { - self.b.chunk() - } - } - - fn advance(&mut self, mut cnt: usize) { - let a_rem = self.a.remaining(); - - if a_rem != 0 { - if a_rem >= cnt { - self.a.advance(cnt); - return; - } - - // Consume what is left of a - self.a.advance(a_rem); - - cnt -= a_rem; - } - - self.b.advance(cnt); - } - - #[cfg(feature = "std")] - fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { - let mut n = self.a.chunks_vectored(dst); - n += self.b.chunks_vectored(&mut dst[n..]); - n - } - - fn copy_to_bytes(&mut self, len: usize) -> Bytes { - let a_rem = self.a.remaining(); - if a_rem >= len { - self.a.copy_to_bytes(len) - } else if a_rem == 0 { - self.b.copy_to_bytes(len) - } else { - assert!( - len - a_rem <= self.b.remaining(), - "`len` greater than remaining" - ); - let mut ret = crate::BytesMut::with_capacity(len); - ret.put(&mut self.a); - ret.put((&mut self.b).take(len - a_rem)); - ret.freeze() - } - } -} - -unsafe impl BufMut for Chain -where - T: BufMut, - U: BufMut, -{ - fn remaining_mut(&self) -> usize { - self.a - .remaining_mut() - .saturating_add(self.b.remaining_mut()) - } - - fn chunk_mut(&mut self) -> &mut UninitSlice { - if self.a.has_remaining_mut() { - self.a.chunk_mut() - } else { - self.b.chunk_mut() - } - } - - unsafe fn advance_mut(&mut self, mut cnt: usize) { - let a_rem = self.a.remaining_mut(); - - if a_rem != 0 { - if a_rem >= cnt { - self.a.advance_mut(cnt); - return; - } - - // Consume what is left of a - self.a.advance_mut(a_rem); - - cnt -= a_rem; - } - - self.b.advance_mut(cnt); - } -} - -impl IntoIterator for Chain -where - T: Buf, - U: Buf, -{ - type Item = u8; - type IntoIter = IntoIter>; - - fn into_iter(self) -> Self::IntoIter { - IntoIter::new(self) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/buf/iter.rs s390-tools-2.33.1/rust-vendor/bytes/src/buf/iter.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/buf/iter.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/buf/iter.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,130 +0,0 @@ -use crate::Buf; - -/// Iterator over the bytes contained by the buffer. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use bytes::Bytes; -/// -/// let buf = Bytes::from(&b"abc"[..]); -/// let mut iter = buf.into_iter(); -/// -/// assert_eq!(iter.next(), Some(b'a')); -/// assert_eq!(iter.next(), Some(b'b')); -/// assert_eq!(iter.next(), Some(b'c')); -/// assert_eq!(iter.next(), None); -/// ``` -/// -/// [`iter`]: trait.Buf.html#method.iter -/// [`Buf`]: trait.Buf.html -#[derive(Debug)] -pub struct IntoIter { - inner: T, -} - -impl IntoIter { - /// Creates an iterator over the bytes contained by the buffer. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let buf = Bytes::from_static(b"abc"); - /// let mut iter = buf.into_iter(); - /// - /// assert_eq!(iter.next(), Some(b'a')); - /// assert_eq!(iter.next(), Some(b'b')); - /// assert_eq!(iter.next(), Some(b'c')); - /// assert_eq!(iter.next(), None); - /// ``` - pub fn new(inner: T) -> IntoIter { - IntoIter { inner } - } - - /// Consumes this `IntoIter`, returning the underlying value. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, Bytes}; - /// - /// let buf = Bytes::from(&b"abc"[..]); - /// let mut iter = buf.into_iter(); - /// - /// assert_eq!(iter.next(), Some(b'a')); - /// - /// let buf = iter.into_inner(); - /// assert_eq!(2, buf.remaining()); - /// ``` - pub fn into_inner(self) -> T { - self.inner - } - - /// Gets a reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, Bytes}; - /// - /// let buf = Bytes::from(&b"abc"[..]); - /// let mut iter = buf.into_iter(); - /// - /// assert_eq!(iter.next(), Some(b'a')); - /// - /// assert_eq!(2, iter.get_ref().remaining()); - /// ``` - pub fn get_ref(&self) -> &T { - &self.inner - } - - /// Gets a mutable reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, BytesMut}; - /// - /// let buf = BytesMut::from(&b"abc"[..]); - /// let mut iter = buf.into_iter(); - /// - /// assert_eq!(iter.next(), Some(b'a')); - /// - /// iter.get_mut().advance(1); - /// - /// assert_eq!(iter.next(), Some(b'c')); - /// ``` - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } -} - -impl Iterator for IntoIter { - type Item = u8; - - fn next(&mut self) -> Option { - if !self.inner.has_remaining() { - return None; - } - - let b = self.inner.chunk()[0]; - self.inner.advance(1); - - Some(b) - } - - fn size_hint(&self) -> (usize, Option) { - let rem = self.inner.remaining(); - (rem, Some(rem)) - } -} - -impl ExactSizeIterator for IntoIter {} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/buf/limit.rs s390-tools-2.33.1/rust-vendor/bytes/src/buf/limit.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/buf/limit.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/buf/limit.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -use crate::buf::UninitSlice; -use crate::BufMut; - -use core::cmp; - -/// A `BufMut` adapter which limits the amount of bytes that can be written -/// to an underlying buffer. -#[derive(Debug)] -pub struct Limit { - inner: T, - limit: usize, -} - -pub(super) fn new(inner: T, limit: usize) -> Limit { - Limit { inner, limit } -} - -impl Limit { - /// Consumes this `Limit`, returning the underlying value. - pub fn into_inner(self) -> T { - self.inner - } - - /// Gets a reference to the underlying `BufMut`. - /// - /// It is inadvisable to directly write to the underlying `BufMut`. - pub fn get_ref(&self) -> &T { - &self.inner - } - - /// Gets a mutable reference to the underlying `BufMut`. - /// - /// It is inadvisable to directly write to the underlying `BufMut`. - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } - - /// Returns the maximum number of bytes that can be written - /// - /// # Note - /// - /// If the inner `BufMut` has fewer bytes than indicated by this method then - /// that is the actual number of available bytes. - pub fn limit(&self) -> usize { - self.limit - } - - /// Sets the maximum number of bytes that can be written. - /// - /// # Note - /// - /// If the inner `BufMut` has fewer bytes than `lim` then that is the actual - /// number of available bytes. - pub fn set_limit(&mut self, lim: usize) { - self.limit = lim - } -} - -unsafe impl BufMut for Limit { - fn remaining_mut(&self) -> usize { - cmp::min(self.inner.remaining_mut(), self.limit) - } - - fn chunk_mut(&mut self) -> &mut UninitSlice { - let bytes = self.inner.chunk_mut(); - let end = cmp::min(bytes.len(), self.limit); - &mut bytes[..end] - } - - unsafe fn advance_mut(&mut self, cnt: usize) { - assert!(cnt <= self.limit); - self.inner.advance_mut(cnt); - self.limit -= cnt; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/buf/mod.rs s390-tools-2.33.1/rust-vendor/bytes/src/buf/mod.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/buf/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/buf/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -//! Utilities for working with buffers. -//! -//! A buffer is any structure that contains a sequence of bytes. The bytes may -//! or may not be stored in contiguous memory. This module contains traits used -//! to abstract over buffers as well as utilities for working with buffer types. -//! -//! # `Buf`, `BufMut` -//! -//! These are the two foundational traits for abstractly working with buffers. -//! They can be thought as iterators for byte structures. They offer additional -//! performance over `Iterator` by providing an API optimized for byte slices. -//! -//! See [`Buf`] and [`BufMut`] for more details. -//! -//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) -//! [`Buf`]: trait.Buf.html -//! [`BufMut`]: trait.BufMut.html - -mod buf_impl; -mod buf_mut; -mod chain; -mod iter; -mod limit; -#[cfg(feature = "std")] -mod reader; -mod take; -mod uninit_slice; -mod vec_deque; -#[cfg(feature = "std")] -mod writer; - -pub use self::buf_impl::Buf; -pub use self::buf_mut::BufMut; -pub use self::chain::Chain; -pub use self::iter::IntoIter; -pub use self::limit::Limit; -pub use self::take::Take; -pub use self::uninit_slice::UninitSlice; - -#[cfg(feature = "std")] -pub use self::{reader::Reader, writer::Writer}; diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/buf/reader.rs s390-tools-2.33.1/rust-vendor/bytes/src/buf/reader.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/buf/reader.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/buf/reader.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,81 +0,0 @@ -use crate::Buf; - -use std::{cmp, io}; - -/// A `Buf` adapter which implements `io::Read` for the inner value. -/// -/// This struct is generally created by calling `reader()` on `Buf`. See -/// documentation of [`reader()`](trait.Buf.html#method.reader) for more -/// details. -#[derive(Debug)] -pub struct Reader { - buf: B, -} - -pub fn new(buf: B) -> Reader { - Reader { buf } -} - -impl Reader { - /// Gets a reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::Buf; - /// - /// let buf = b"hello world".reader(); - /// - /// assert_eq!(b"hello world", buf.get_ref()); - /// ``` - pub fn get_ref(&self) -> &B { - &self.buf - } - - /// Gets a mutable reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - pub fn get_mut(&mut self) -> &mut B { - &mut self.buf - } - - /// Consumes this `Reader`, returning the underlying value. - /// - /// # Examples - /// - /// ```rust - /// use bytes::Buf; - /// use std::io; - /// - /// let mut buf = b"hello world".reader(); - /// let mut dst = vec![]; - /// - /// io::copy(&mut buf, &mut dst).unwrap(); - /// - /// let buf = buf.into_inner(); - /// assert_eq!(0, buf.remaining()); - /// ``` - pub fn into_inner(self) -> B { - self.buf - } -} - -impl io::Read for Reader { - fn read(&mut self, dst: &mut [u8]) -> io::Result { - let len = cmp::min(self.buf.remaining(), dst.len()); - - Buf::copy_to_slice(&mut self.buf, &mut dst[0..len]); - Ok(len) - } -} - -impl io::BufRead for Reader { - fn fill_buf(&mut self) -> io::Result<&[u8]> { - Ok(self.buf.chunk()) - } - fn consume(&mut self, amt: usize) { - self.buf.advance(amt) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/buf/take.rs s390-tools-2.33.1/rust-vendor/bytes/src/buf/take.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/buf/take.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/buf/take.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,155 +0,0 @@ -use crate::{Buf, Bytes}; - -use core::cmp; - -/// A `Buf` adapter which limits the bytes read from an underlying buffer. -/// -/// This struct is generally created by calling `take()` on `Buf`. See -/// documentation of [`take()`](trait.Buf.html#method.take) for more details. -#[derive(Debug)] -pub struct Take { - inner: T, - limit: usize, -} - -pub fn new(inner: T, limit: usize) -> Take { - Take { inner, limit } -} - -impl Take { - /// Consumes this `Take`, returning the underlying value. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, BufMut}; - /// - /// let mut buf = b"hello world".take(2); - /// let mut dst = vec![]; - /// - /// dst.put(&mut buf); - /// assert_eq!(*dst, b"he"[..]); - /// - /// let mut buf = buf.into_inner(); - /// - /// dst.clear(); - /// dst.put(&mut buf); - /// assert_eq!(*dst, b"llo world"[..]); - /// ``` - pub fn into_inner(self) -> T { - self.inner - } - - /// Gets a reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::Buf; - /// - /// let buf = b"hello world".take(2); - /// - /// assert_eq!(11, buf.get_ref().remaining()); - /// ``` - pub fn get_ref(&self) -> &T { - &self.inner - } - - /// Gets a mutable reference to the underlying `Buf`. - /// - /// It is inadvisable to directly read from the underlying `Buf`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, BufMut}; - /// - /// let mut buf = b"hello world".take(2); - /// let mut dst = vec![]; - /// - /// buf.get_mut().advance(2); - /// - /// dst.put(&mut buf); - /// assert_eq!(*dst, b"ll"[..]); - /// ``` - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } - - /// Returns the maximum number of bytes that can be read. - /// - /// # Note - /// - /// If the inner `Buf` has fewer bytes than indicated by this method then - /// that is the actual number of available bytes. - /// - /// # Examples - /// - /// ```rust - /// use bytes::Buf; - /// - /// let mut buf = b"hello world".take(2); - /// - /// assert_eq!(2, buf.limit()); - /// assert_eq!(b'h', buf.get_u8()); - /// assert_eq!(1, buf.limit()); - /// ``` - pub fn limit(&self) -> usize { - self.limit - } - - /// Sets the maximum number of bytes that can be read. - /// - /// # Note - /// - /// If the inner `Buf` has fewer bytes than `lim` then that is the actual - /// number of available bytes. - /// - /// # Examples - /// - /// ```rust - /// use bytes::{Buf, BufMut}; - /// - /// let mut buf = b"hello world".take(2); - /// let mut dst = vec![]; - /// - /// dst.put(&mut buf); - /// assert_eq!(*dst, b"he"[..]); - /// - /// dst.clear(); - /// - /// buf.set_limit(3); - /// dst.put(&mut buf); - /// assert_eq!(*dst, b"llo"[..]); - /// ``` - pub fn set_limit(&mut self, lim: usize) { - self.limit = lim - } -} - -impl Buf for Take { - fn remaining(&self) -> usize { - cmp::min(self.inner.remaining(), self.limit) - } - - fn chunk(&self) -> &[u8] { - let bytes = self.inner.chunk(); - &bytes[..cmp::min(bytes.len(), self.limit)] - } - - fn advance(&mut self, cnt: usize) { - assert!(cnt <= self.limit); - self.inner.advance(cnt); - self.limit -= cnt; - } - - fn copy_to_bytes(&mut self, len: usize) -> Bytes { - assert!(len <= self.remaining(), "`len` greater than remaining"); - - let r = self.inner.copy_to_bytes(len); - self.limit -= len; - r - } -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/buf/uninit_slice.rs s390-tools-2.33.1/rust-vendor/bytes/src/buf/uninit_slice.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/buf/uninit_slice.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/buf/uninit_slice.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,257 +0,0 @@ -use core::fmt; -use core::mem::MaybeUninit; -use core::ops::{ - Index, IndexMut, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, -}; - -/// Uninitialized byte slice. -/// -/// Returned by `BufMut::chunk_mut()`, the referenced byte slice may be -/// uninitialized. The wrapper provides safe access without introducing -/// undefined behavior. -/// -/// The safety invariants of this wrapper are: -/// -/// 1. Reading from an `UninitSlice` is undefined behavior. -/// 2. Writing uninitialized bytes to an `UninitSlice` is undefined behavior. -/// -/// The difference between `&mut UninitSlice` and `&mut [MaybeUninit]` is -/// that it is possible in safe code to write uninitialized bytes to an -/// `&mut [MaybeUninit]`, which this type prohibits. -#[repr(transparent)] -pub struct UninitSlice([MaybeUninit]); - -impl UninitSlice { - /// Creates a `&mut UninitSlice` wrapping a slice of initialised memory. - /// - /// # Examples - /// - /// ``` - /// use bytes::buf::UninitSlice; - /// - /// let mut buffer = [0u8; 64]; - /// let slice = UninitSlice::new(&mut buffer[..]); - /// ``` - #[inline] - pub fn new(slice: &mut [u8]) -> &mut UninitSlice { - unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit] as *mut UninitSlice) } - } - - /// Creates a `&mut UninitSlice` wrapping a slice of uninitialised memory. - /// - /// # Examples - /// - /// ``` - /// use bytes::buf::UninitSlice; - /// use core::mem::MaybeUninit; - /// - /// let mut buffer = [MaybeUninit::uninit(); 64]; - /// let slice = UninitSlice::uninit(&mut buffer[..]); - /// - /// let mut vec = Vec::with_capacity(1024); - /// let spare: &mut UninitSlice = vec.spare_capacity_mut().into(); - /// ``` - #[inline] - pub fn uninit(slice: &mut [MaybeUninit]) -> &mut UninitSlice { - unsafe { &mut *(slice as *mut [MaybeUninit] as *mut UninitSlice) } - } - - fn uninit_ref(slice: &[MaybeUninit]) -> &UninitSlice { - unsafe { &*(slice as *const [MaybeUninit] as *const UninitSlice) } - } - - /// Create a `&mut UninitSlice` from a pointer and a length. - /// - /// # Safety - /// - /// The caller must ensure that `ptr` references a valid memory region owned - /// by the caller representing a byte slice for the duration of `'a`. - /// - /// # Examples - /// - /// ``` - /// use bytes::buf::UninitSlice; - /// - /// let bytes = b"hello world".to_vec(); - /// let ptr = bytes.as_ptr() as *mut _; - /// let len = bytes.len(); - /// - /// let slice = unsafe { UninitSlice::from_raw_parts_mut(ptr, len) }; - /// ``` - #[inline] - pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut UninitSlice { - let maybe_init: &mut [MaybeUninit] = - core::slice::from_raw_parts_mut(ptr as *mut _, len); - Self::uninit(maybe_init) - } - - /// Write a single byte at the specified offset. - /// - /// # Panics - /// - /// The function panics if `index` is out of bounds. - /// - /// # Examples - /// - /// ``` - /// use bytes::buf::UninitSlice; - /// - /// let mut data = [b'f', b'o', b'o']; - /// let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) }; - /// - /// slice.write_byte(0, b'b'); - /// - /// assert_eq!(b"boo", &data[..]); - /// ``` - #[inline] - pub fn write_byte(&mut self, index: usize, byte: u8) { - assert!(index < self.len()); - - unsafe { self[index..].as_mut_ptr().write(byte) } - } - - /// Copies bytes from `src` into `self`. - /// - /// The length of `src` must be the same as `self`. - /// - /// # Panics - /// - /// The function panics if `src` has a different length than `self`. - /// - /// # Examples - /// - /// ``` - /// use bytes::buf::UninitSlice; - /// - /// let mut data = [b'f', b'o', b'o']; - /// let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) }; - /// - /// slice.copy_from_slice(b"bar"); - /// - /// assert_eq!(b"bar", &data[..]); - /// ``` - #[inline] - pub fn copy_from_slice(&mut self, src: &[u8]) { - use core::ptr; - - assert_eq!(self.len(), src.len()); - - unsafe { - ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len()); - } - } - - /// Return a raw pointer to the slice's buffer. - /// - /// # Safety - /// - /// The caller **must not** read from the referenced memory and **must not** - /// write **uninitialized** bytes to the slice either. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut data = [0, 1, 2]; - /// let mut slice = &mut data[..]; - /// let ptr = BufMut::chunk_mut(&mut slice).as_mut_ptr(); - /// ``` - #[inline] - pub fn as_mut_ptr(&mut self) -> *mut u8 { - self.0.as_mut_ptr() as *mut _ - } - - /// Return a `&mut [MaybeUninit]` to this slice's buffer. - /// - /// # Safety - /// - /// The caller **must not** read from the referenced memory and **must not** write - /// **uninitialized** bytes to the slice either. This is because `BufMut` implementation - /// that created the `UninitSlice` knows which parts are initialized. Writing uninitalized - /// bytes to the slice may cause the `BufMut` to read those bytes and trigger undefined - /// behavior. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut data = [0, 1, 2]; - /// let mut slice = &mut data[..]; - /// unsafe { - /// let uninit_slice = BufMut::chunk_mut(&mut slice).as_uninit_slice_mut(); - /// }; - /// ``` - #[inline] - pub unsafe fn as_uninit_slice_mut<'a>(&'a mut self) -> &'a mut [MaybeUninit] { - &mut *(self as *mut _ as *mut [MaybeUninit]) - } - - /// Returns the number of bytes in the slice. - /// - /// # Examples - /// - /// ``` - /// use bytes::BufMut; - /// - /// let mut data = [0, 1, 2]; - /// let mut slice = &mut data[..]; - /// let len = BufMut::chunk_mut(&mut slice).len(); - /// - /// assert_eq!(len, 3); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.0.len() - } -} - -impl fmt::Debug for UninitSlice { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("UninitSlice[...]").finish() - } -} - -impl<'a> From<&'a mut [u8]> for &'a mut UninitSlice { - fn from(slice: &'a mut [u8]) -> Self { - UninitSlice::new(slice) - } -} - -impl<'a> From<&'a mut [MaybeUninit]> for &'a mut UninitSlice { - fn from(slice: &'a mut [MaybeUninit]) -> Self { - UninitSlice::uninit(slice) - } -} - -macro_rules! impl_index { - ($($t:ty),*) => { - $( - impl Index<$t> for UninitSlice { - type Output = UninitSlice; - - #[inline] - fn index(&self, index: $t) -> &UninitSlice { - UninitSlice::uninit_ref(&self.0[index]) - } - } - - impl IndexMut<$t> for UninitSlice { - #[inline] - fn index_mut(&mut self, index: $t) -> &mut UninitSlice { - UninitSlice::uninit(&mut self.0[index]) - } - } - )* - }; -} - -impl_index!( - Range, - RangeFrom, - RangeFull, - RangeInclusive, - RangeTo, - RangeToInclusive -); diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/buf/vec_deque.rs s390-tools-2.33.1/rust-vendor/bytes/src/buf/vec_deque.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/buf/vec_deque.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/buf/vec_deque.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ -use alloc::collections::VecDeque; - -use super::Buf; - -impl Buf for VecDeque { - fn remaining(&self) -> usize { - self.len() - } - - fn chunk(&self) -> &[u8] { - let (s1, s2) = self.as_slices(); - if s1.is_empty() { - s2 - } else { - s1 - } - } - - fn advance(&mut self, cnt: usize) { - self.drain(..cnt); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/buf/writer.rs s390-tools-2.33.1/rust-vendor/bytes/src/buf/writer.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/buf/writer.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/buf/writer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,88 +0,0 @@ -use crate::BufMut; - -use std::{cmp, io}; - -/// A `BufMut` adapter which implements `io::Write` for the inner value. -/// -/// This struct is generally created by calling `writer()` on `BufMut`. See -/// documentation of [`writer()`](trait.BufMut.html#method.writer) for more -/// details. -#[derive(Debug)] -pub struct Writer { - buf: B, -} - -pub fn new(buf: B) -> Writer { - Writer { buf } -} - -impl Writer { - /// Gets a reference to the underlying `BufMut`. - /// - /// It is inadvisable to directly write to the underlying `BufMut`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::BufMut; - /// - /// let buf = Vec::with_capacity(1024).writer(); - /// - /// assert_eq!(1024, buf.get_ref().capacity()); - /// ``` - pub fn get_ref(&self) -> &B { - &self.buf - } - - /// Gets a mutable reference to the underlying `BufMut`. - /// - /// It is inadvisable to directly write to the underlying `BufMut`. - /// - /// # Examples - /// - /// ```rust - /// use bytes::BufMut; - /// - /// let mut buf = vec![].writer(); - /// - /// buf.get_mut().reserve(1024); - /// - /// assert_eq!(1024, buf.get_ref().capacity()); - /// ``` - pub fn get_mut(&mut self) -> &mut B { - &mut self.buf - } - - /// Consumes this `Writer`, returning the underlying value. - /// - /// # Examples - /// - /// ```rust - /// use bytes::BufMut; - /// use std::io; - /// - /// let mut buf = vec![].writer(); - /// let mut src = &b"hello world"[..]; - /// - /// io::copy(&mut src, &mut buf).unwrap(); - /// - /// let buf = buf.into_inner(); - /// assert_eq!(*buf, b"hello world"[..]); - /// ``` - pub fn into_inner(self) -> B { - self.buf - } -} - -impl io::Write for Writer { - fn write(&mut self, src: &[u8]) -> io::Result { - let n = cmp::min(self.buf.remaining_mut(), src.len()); - - self.buf.put(&src[0..n]); - Ok(n) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/bytes_mut.rs s390-tools-2.33.1/rust-vendor/bytes/src/bytes_mut.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/bytes_mut.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/bytes_mut.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1813 +0,0 @@ -use core::iter::{FromIterator, Iterator}; -use core::mem::{self, ManuallyDrop, MaybeUninit}; -use core::ops::{Deref, DerefMut}; -use core::ptr::{self, NonNull}; -use core::{cmp, fmt, hash, isize, slice, usize}; - -use alloc::{ - borrow::{Borrow, BorrowMut}, - boxed::Box, - string::String, - vec, - vec::Vec, -}; - -use crate::buf::{IntoIter, UninitSlice}; -use crate::bytes::Vtable; -#[allow(unused)] -use crate::loom::sync::atomic::AtomicMut; -use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; -use crate::{Buf, BufMut, Bytes}; - -/// A unique reference to a contiguous slice of memory. -/// -/// `BytesMut` represents a unique view into a potentially shared memory region. -/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to -/// mutate the memory. -/// -/// `BytesMut` can be thought of as containing a `buf: Arc>`, an offset -/// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the -/// same `buf` overlaps with its slice. That guarantee means that a write lock -/// is not required. -/// -/// # Growth -/// -/// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as -/// necessary. However, explicitly reserving the required space up-front before -/// a series of inserts will be more efficient. -/// -/// # Examples -/// -/// ``` -/// use bytes::{BytesMut, BufMut}; -/// -/// let mut buf = BytesMut::with_capacity(64); -/// -/// buf.put_u8(b'h'); -/// buf.put_u8(b'e'); -/// buf.put(&b"llo"[..]); -/// -/// assert_eq!(&buf[..], b"hello"); -/// -/// // Freeze the buffer so that it can be shared -/// let a = buf.freeze(); -/// -/// // This does not allocate, instead `b` points to the same memory. -/// let b = a.clone(); -/// -/// assert_eq!(&a[..], b"hello"); -/// assert_eq!(&b[..], b"hello"); -/// ``` -pub struct BytesMut { - ptr: NonNull, - len: usize, - cap: usize, - data: *mut Shared, -} - -// Thread-safe reference-counted container for the shared storage. This mostly -// the same as `core::sync::Arc` but without the weak counter. The ref counting -// fns are based on the ones found in `std`. -// -// The main reason to use `Shared` instead of `core::sync::Arc` is that it ends -// up making the overall code simpler and easier to reason about. This is due to -// some of the logic around setting `Inner::arc` and other ways the `arc` field -// is used. Using `Arc` ended up requiring a number of funky transmutes and -// other shenanigans to make it work. -struct Shared { - vec: Vec, - original_capacity_repr: usize, - ref_count: AtomicUsize, -} - -// Buffer storage strategy flags. -const KIND_ARC: usize = 0b0; -const KIND_VEC: usize = 0b1; -const KIND_MASK: usize = 0b1; - -// The max original capacity value. Any `Bytes` allocated with a greater initial -// capacity will default to this. -const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17; -// The original capacity algorithm will not take effect unless the originally -// allocated capacity was at least 1kb in size. -const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10; -// The original capacity is stored in powers of 2 starting at 1kb to a max of -// 64kb. Representing it as such requires only 3 bits of storage. -const ORIGINAL_CAPACITY_MASK: usize = 0b11100; -const ORIGINAL_CAPACITY_OFFSET: usize = 2; - -// When the storage is in the `Vec` representation, the pointer can be advanced -// at most this value. This is due to the amount of storage available to track -// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY -// bits. -const VEC_POS_OFFSET: usize = 5; -const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET; -const NOT_VEC_POS_MASK: usize = 0b11111; - -#[cfg(target_pointer_width = "64")] -const PTR_WIDTH: usize = 64; -#[cfg(target_pointer_width = "32")] -const PTR_WIDTH: usize = 32; - -/* - * - * ===== BytesMut ===== - * - */ - -impl BytesMut { - /// Creates a new `BytesMut` with the specified capacity. - /// - /// The returned `BytesMut` will be able to hold at least `capacity` bytes - /// without reallocating. - /// - /// It is important to note that this function does not specify the length - /// of the returned `BytesMut`, but only the capacity. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BytesMut, BufMut}; - /// - /// let mut bytes = BytesMut::with_capacity(64); - /// - /// // `bytes` contains no data, even though there is capacity - /// assert_eq!(bytes.len(), 0); - /// - /// bytes.put(&b"hello world"[..]); - /// - /// assert_eq!(&bytes[..], b"hello world"); - /// ``` - #[inline] - pub fn with_capacity(capacity: usize) -> BytesMut { - BytesMut::from_vec(Vec::with_capacity(capacity)) - } - - /// Creates a new `BytesMut` with default capacity. - /// - /// Resulting object has length 0 and unspecified capacity. - /// This function does not allocate. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BytesMut, BufMut}; - /// - /// let mut bytes = BytesMut::new(); - /// - /// assert_eq!(0, bytes.len()); - /// - /// bytes.reserve(2); - /// bytes.put_slice(b"xy"); - /// - /// assert_eq!(&b"xy"[..], &bytes[..]); - /// ``` - #[inline] - pub fn new() -> BytesMut { - BytesMut::with_capacity(0) - } - - /// Returns the number of bytes contained in this `BytesMut`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let b = BytesMut::from(&b"hello"[..]); - /// assert_eq!(b.len(), 5); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.len - } - - /// Returns true if the `BytesMut` has a length of 0. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let b = BytesMut::with_capacity(64); - /// assert!(b.is_empty()); - /// ``` - #[inline] - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - /// Returns the number of bytes the `BytesMut` can hold without reallocating. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let b = BytesMut::with_capacity(64); - /// assert_eq!(b.capacity(), 64); - /// ``` - #[inline] - pub fn capacity(&self) -> usize { - self.cap - } - - /// Converts `self` into an immutable `Bytes`. - /// - /// The conversion is zero cost and is used to indicate that the slice - /// referenced by the handle will no longer be mutated. Once the conversion - /// is done, the handle can be cloned and shared across threads. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BytesMut, BufMut}; - /// use std::thread; - /// - /// let mut b = BytesMut::with_capacity(64); - /// b.put(&b"hello world"[..]); - /// let b1 = b.freeze(); - /// let b2 = b1.clone(); - /// - /// let th = thread::spawn(move || { - /// assert_eq!(&b1[..], b"hello world"); - /// }); - /// - /// assert_eq!(&b2[..], b"hello world"); - /// th.join().unwrap(); - /// ``` - #[inline] - pub fn freeze(mut self) -> Bytes { - if self.kind() == KIND_VEC { - // Just re-use `Bytes` internal Vec vtable - unsafe { - let (off, _) = self.get_vec_pos(); - let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); - mem::forget(self); - let mut b: Bytes = vec.into(); - b.advance(off); - b - } - } else { - debug_assert_eq!(self.kind(), KIND_ARC); - - let ptr = self.ptr.as_ptr(); - let len = self.len; - let data = AtomicPtr::new(self.data.cast()); - mem::forget(self); - unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } - } - } - - /// Creates a new `BytesMut`, which is initialized with zero. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let zeros = BytesMut::zeroed(42); - /// - /// assert_eq!(zeros.len(), 42); - /// zeros.into_iter().for_each(|x| assert_eq!(x, 0)); - /// ``` - pub fn zeroed(len: usize) -> BytesMut { - BytesMut::from_vec(vec![0; len]) - } - - /// Splits the bytes into two at the given index. - /// - /// Afterwards `self` contains elements `[0, at)`, and the returned - /// `BytesMut` contains elements `[at, capacity)`. - /// - /// This is an `O(1)` operation that just increases the reference count - /// and sets a few indices. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut a = BytesMut::from(&b"hello world"[..]); - /// let mut b = a.split_off(5); - /// - /// a[0] = b'j'; - /// b[0] = b'!'; - /// - /// assert_eq!(&a[..], b"jello"); - /// assert_eq!(&b[..], b"!world"); - /// ``` - /// - /// # Panics - /// - /// Panics if `at > capacity`. - #[must_use = "consider BytesMut::truncate if you don't need the other half"] - pub fn split_off(&mut self, at: usize) -> BytesMut { - assert!( - at <= self.capacity(), - "split_off out of bounds: {:?} <= {:?}", - at, - self.capacity(), - ); - unsafe { - let mut other = self.shallow_clone(); - other.set_start(at); - self.set_end(at); - other - } - } - - /// Removes the bytes from the current view, returning them in a new - /// `BytesMut` handle. - /// - /// Afterwards, `self` will be empty, but will retain any additional - /// capacity that it had before the operation. This is identical to - /// `self.split_to(self.len())`. - /// - /// This is an `O(1)` operation that just increases the reference count and - /// sets a few indices. - /// - /// # Examples - /// - /// ``` - /// use bytes::{BytesMut, BufMut}; - /// - /// let mut buf = BytesMut::with_capacity(1024); - /// buf.put(&b"hello world"[..]); - /// - /// let other = buf.split(); - /// - /// assert!(buf.is_empty()); - /// assert_eq!(1013, buf.capacity()); - /// - /// assert_eq!(other, b"hello world"[..]); - /// ``` - #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"] - pub fn split(&mut self) -> BytesMut { - let len = self.len(); - self.split_to(len) - } - - /// Splits the buffer into two at the given index. - /// - /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut` - /// contains elements `[0, at)`. - /// - /// This is an `O(1)` operation that just increases the reference count and - /// sets a few indices. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut a = BytesMut::from(&b"hello world"[..]); - /// let mut b = a.split_to(5); - /// - /// a[0] = b'!'; - /// b[0] = b'j'; - /// - /// assert_eq!(&a[..], b"!world"); - /// assert_eq!(&b[..], b"jello"); - /// ``` - /// - /// # Panics - /// - /// Panics if `at > len`. - #[must_use = "consider BytesMut::advance if you don't need the other half"] - pub fn split_to(&mut self, at: usize) -> BytesMut { - assert!( - at <= self.len(), - "split_to out of bounds: {:?} <= {:?}", - at, - self.len(), - ); - - unsafe { - let mut other = self.shallow_clone(); - other.set_end(at); - self.set_start(at); - other - } - } - - /// Shortens the buffer, keeping the first `len` bytes and dropping the - /// rest. - /// - /// If `len` is greater than the buffer's current length, this has no - /// effect. - /// - /// Existing underlying capacity is preserved. - /// - /// The [`split_off`] method can emulate `truncate`, but this causes the - /// excess bytes to be returned instead of dropped. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::from(&b"hello world"[..]); - /// buf.truncate(5); - /// assert_eq!(buf, b"hello"[..]); - /// ``` - /// - /// [`split_off`]: #method.split_off - pub fn truncate(&mut self, len: usize) { - if len <= self.len() { - unsafe { - self.set_len(len); - } - } - } - - /// Clears the buffer, removing all data. Existing capacity is preserved. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::from(&b"hello world"[..]); - /// buf.clear(); - /// assert!(buf.is_empty()); - /// ``` - pub fn clear(&mut self) { - self.truncate(0); - } - - /// Resizes the buffer so that `len` is equal to `new_len`. - /// - /// If `new_len` is greater than `len`, the buffer is extended by the - /// difference with each additional byte set to `value`. If `new_len` is - /// less than `len`, the buffer is simply truncated. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::new(); - /// - /// buf.resize(3, 0x1); - /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]); - /// - /// buf.resize(2, 0x2); - /// assert_eq!(&buf[..], &[0x1, 0x1]); - /// - /// buf.resize(4, 0x3); - /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]); - /// ``` - pub fn resize(&mut self, new_len: usize, value: u8) { - let len = self.len(); - if new_len > len { - let additional = new_len - len; - self.reserve(additional); - unsafe { - let dst = self.chunk_mut().as_mut_ptr(); - ptr::write_bytes(dst, value, additional); - self.set_len(new_len); - } - } else { - self.truncate(new_len); - } - } - - /// Sets the length of the buffer. - /// - /// This will explicitly set the size of the buffer without actually - /// modifying the data, so it is up to the caller to ensure that the data - /// has been initialized. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut b = BytesMut::from(&b"hello world"[..]); - /// - /// unsafe { - /// b.set_len(5); - /// } - /// - /// assert_eq!(&b[..], b"hello"); - /// - /// unsafe { - /// b.set_len(11); - /// } - /// - /// assert_eq!(&b[..], b"hello world"); - /// ``` - #[inline] - pub unsafe fn set_len(&mut self, len: usize) { - debug_assert!(len <= self.cap, "set_len out of bounds"); - self.len = len; - } - - /// Reserves capacity for at least `additional` more bytes to be inserted - /// into the given `BytesMut`. - /// - /// More than `additional` bytes may be reserved in order to avoid frequent - /// reallocations. A call to `reserve` may result in an allocation. - /// - /// Before allocating new buffer space, the function will attempt to reclaim - /// space in the existing buffer. If the current handle references a view - /// into a larger original buffer, and all other handles referencing part - /// of the same original buffer have been dropped, then the current view - /// can be copied/shifted to the front of the buffer and the handle can take - /// ownership of the full buffer, provided that the full buffer is large - /// enough to fit the requested additional capacity. - /// - /// This optimization will only happen if shifting the data from the current - /// view to the front of the buffer is not too expensive in terms of the - /// (amortized) time required. The precise condition is subject to change; - /// as of now, the length of the data being shifted needs to be at least as - /// large as the distance that it's shifted by. If the current view is empty - /// and the original buffer is large enough to fit the requested additional - /// capacity, then reallocations will never happen. - /// - /// # Examples - /// - /// In the following example, a new buffer is allocated. - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::from(&b"hello"[..]); - /// buf.reserve(64); - /// assert!(buf.capacity() >= 69); - /// ``` - /// - /// In the following example, the existing buffer is reclaimed. - /// - /// ``` - /// use bytes::{BytesMut, BufMut}; - /// - /// let mut buf = BytesMut::with_capacity(128); - /// buf.put(&[0; 64][..]); - /// - /// let ptr = buf.as_ptr(); - /// let other = buf.split(); - /// - /// assert!(buf.is_empty()); - /// assert_eq!(buf.capacity(), 64); - /// - /// drop(other); - /// buf.reserve(128); - /// - /// assert_eq!(buf.capacity(), 128); - /// assert_eq!(buf.as_ptr(), ptr); - /// ``` - /// - /// # Panics - /// - /// Panics if the new capacity overflows `usize`. - #[inline] - pub fn reserve(&mut self, additional: usize) { - let len = self.len(); - let rem = self.capacity() - len; - - if additional <= rem { - // The handle can already store at least `additional` more bytes, so - // there is no further work needed to be done. - return; - } - - self.reserve_inner(additional); - } - - // In separate function to allow the short-circuits in `reserve` to - // be inline-able. Significant helps performance. - fn reserve_inner(&mut self, additional: usize) { - let len = self.len(); - let kind = self.kind(); - - if kind == KIND_VEC { - // If there's enough free space before the start of the buffer, then - // just copy the data backwards and reuse the already-allocated - // space. - // - // Otherwise, since backed by a vector, use `Vec::reserve` - // - // We need to make sure that this optimization does not kill the - // amortized runtimes of BytesMut's operations. - unsafe { - let (off, prev) = self.get_vec_pos(); - - // Only reuse space if we can satisfy the requested additional space. - // - // Also check if the value of `off` suggests that enough bytes - // have been read to account for the overhead of shifting all - // the data (in an amortized analysis). - // Hence the condition `off >= self.len()`. - // - // This condition also already implies that the buffer is going - // to be (at least) half-empty in the end; so we do not break - // the (amortized) runtime with future resizes of the underlying - // `Vec`. - // - // [For more details check issue #524, and PR #525.] - if self.capacity() - self.len() + off >= additional && off >= self.len() { - // There's enough space, and it's not too much overhead: - // reuse the space! - // - // Just move the pointer back to the start after copying - // data back. - let base_ptr = self.ptr.as_ptr().offset(-(off as isize)); - // Since `off >= self.len()`, the two regions don't overlap. - ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len); - self.ptr = vptr(base_ptr); - self.set_vec_pos(0, prev); - - // Length stays constant, but since we moved backwards we - // can gain capacity back. - self.cap += off; - } else { - // Not enough space, or reusing might be too much overhead: - // allocate more space! - let mut v = - ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off)); - v.reserve(additional); - - // Update the info - self.ptr = vptr(v.as_mut_ptr().add(off)); - self.len = v.len() - off; - self.cap = v.capacity() - off; - } - - return; - } - } - - debug_assert_eq!(kind, KIND_ARC); - let shared: *mut Shared = self.data; - - // Reserving involves abandoning the currently shared buffer and - // allocating a new vector with the requested capacity. - // - // Compute the new capacity - let mut new_cap = len.checked_add(additional).expect("overflow"); - - let original_capacity; - let original_capacity_repr; - - unsafe { - original_capacity_repr = (*shared).original_capacity_repr; - original_capacity = original_capacity_from_repr(original_capacity_repr); - - // First, try to reclaim the buffer. This is possible if the current - // handle is the only outstanding handle pointing to the buffer. - if (*shared).is_unique() { - // This is the only handle to the buffer. It can be reclaimed. - // However, before doing the work of copying data, check to make - // sure that the vector has enough capacity. - let v = &mut (*shared).vec; - - let v_capacity = v.capacity(); - let ptr = v.as_mut_ptr(); - - let offset = offset_from(self.ptr.as_ptr(), ptr); - - // Compare the condition in the `kind == KIND_VEC` case above - // for more details. - if v_capacity >= new_cap + offset { - self.cap = new_cap; - // no copy is necessary - } else if v_capacity >= new_cap && offset >= len { - // The capacity is sufficient, and copying is not too much - // overhead: reclaim the buffer! - - // `offset >= len` means: no overlap - ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len); - - self.ptr = vptr(ptr); - self.cap = v.capacity(); - } else { - // calculate offset - let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize); - - // new_cap is calculated in terms of `BytesMut`, not the underlying - // `Vec`, so it does not take the offset into account. - // - // Thus we have to manually add it here. - new_cap = new_cap.checked_add(off).expect("overflow"); - - // The vector capacity is not sufficient. The reserve request is - // asking for more than the initial buffer capacity. Allocate more - // than requested if `new_cap` is not much bigger than the current - // capacity. - // - // There are some situations, using `reserve_exact` that the - // buffer capacity could be below `original_capacity`, so do a - // check. - let double = v.capacity().checked_shl(1).unwrap_or(new_cap); - - new_cap = cmp::max(double, new_cap); - - // No space - allocate more - // - // The length field of `Shared::vec` is not used by the `BytesMut`; - // instead we use the `len` field in the `BytesMut` itself. However, - // when calling `reserve`, it doesn't guarantee that data stored in - // the unused capacity of the vector is copied over to the new - // allocation, so we need to ensure that we don't have any data we - // care about in the unused capacity before calling `reserve`. - debug_assert!(off + len <= v.capacity()); - v.set_len(off + len); - v.reserve(new_cap - v.len()); - - // Update the info - self.ptr = vptr(v.as_mut_ptr().add(off)); - self.cap = v.capacity() - off; - } - - return; - } else { - new_cap = cmp::max(new_cap, original_capacity); - } - } - - // Create a new vector to store the data - let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap)); - - // Copy the bytes - v.extend_from_slice(self.as_ref()); - - // Release the shared handle. This must be done *after* the bytes are - // copied. - unsafe { release_shared(shared) }; - - // Update self - let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; - self.data = invalid_ptr(data); - self.ptr = vptr(v.as_mut_ptr()); - self.len = v.len(); - self.cap = v.capacity(); - } - - /// Appends given bytes to this `BytesMut`. - /// - /// If this `BytesMut` object does not have enough capacity, it is resized - /// first. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::with_capacity(0); - /// buf.extend_from_slice(b"aaabbb"); - /// buf.extend_from_slice(b"cccddd"); - /// - /// assert_eq!(b"aaabbbcccddd", &buf[..]); - /// ``` - #[inline] - pub fn extend_from_slice(&mut self, extend: &[u8]) { - let cnt = extend.len(); - self.reserve(cnt); - - unsafe { - let dst = self.spare_capacity_mut(); - // Reserved above - debug_assert!(dst.len() >= cnt); - - ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt); - } - - unsafe { - self.advance_mut(cnt); - } - } - - /// Absorbs a `BytesMut` that was previously split off. - /// - /// If the two `BytesMut` objects were previously contiguous and not mutated - /// in a way that causes re-allocation i.e., if `other` was created by - /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation - /// that just decreases a reference count and sets a few indices. - /// Otherwise this method degenerates to - /// `self.extend_from_slice(other.as_ref())`. - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// let mut buf = BytesMut::with_capacity(64); - /// buf.extend_from_slice(b"aaabbbcccddd"); - /// - /// let split = buf.split_off(6); - /// assert_eq!(b"aaabbb", &buf[..]); - /// assert_eq!(b"cccddd", &split[..]); - /// - /// buf.unsplit(split); - /// assert_eq!(b"aaabbbcccddd", &buf[..]); - /// ``` - pub fn unsplit(&mut self, other: BytesMut) { - if self.is_empty() { - *self = other; - return; - } - - if let Err(other) = self.try_unsplit(other) { - self.extend_from_slice(other.as_ref()); - } - } - - // private - - // For now, use a `Vec` to manage the memory for us, but we may want to - // change that in the future to some alternate allocator strategy. - // - // Thus, we don't expose an easy way to construct from a `Vec` since an - // internal change could make a simple pattern (`BytesMut::from(vec)`) - // suddenly a lot more expensive. - #[inline] - pub(crate) fn from_vec(mut vec: Vec) -> BytesMut { - let ptr = vptr(vec.as_mut_ptr()); - let len = vec.len(); - let cap = vec.capacity(); - mem::forget(vec); - - let original_capacity_repr = original_capacity_to_repr(cap); - let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; - - BytesMut { - ptr, - len, - cap, - data: invalid_ptr(data), - } - } - - #[inline] - fn as_slice(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } - } - - #[inline] - fn as_slice_mut(&mut self) -> &mut [u8] { - unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } - } - - unsafe fn set_start(&mut self, start: usize) { - // Setting the start to 0 is a no-op, so return early if this is the - // case. - if start == 0 { - return; - } - - debug_assert!(start <= self.cap, "internal: set_start out of bounds"); - - let kind = self.kind(); - - if kind == KIND_VEC { - // Setting the start when in vec representation is a little more - // complicated. First, we have to track how far ahead the - // "start" of the byte buffer from the beginning of the vec. We - // also have to ensure that we don't exceed the maximum shift. - let (mut pos, prev) = self.get_vec_pos(); - pos += start; - - if pos <= MAX_VEC_POS { - self.set_vec_pos(pos, prev); - } else { - // The repr must be upgraded to ARC. This will never happen - // on 64 bit systems and will only happen on 32 bit systems - // when shifting past 134,217,727 bytes. As such, we don't - // worry too much about performance here. - self.promote_to_shared(/*ref_count = */ 1); - } - } - - // Updating the start of the view is setting `ptr` to point to the - // new start and updating the `len` field to reflect the new length - // of the view. - self.ptr = vptr(self.ptr.as_ptr().add(start)); - - if self.len >= start { - self.len -= start; - } else { - self.len = 0; - } - - self.cap -= start; - } - - unsafe fn set_end(&mut self, end: usize) { - debug_assert_eq!(self.kind(), KIND_ARC); - assert!(end <= self.cap, "set_end out of bounds"); - - self.cap = end; - self.len = cmp::min(self.len, end); - } - - fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> { - if other.capacity() == 0 { - return Ok(()); - } - - let ptr = unsafe { self.ptr.as_ptr().add(self.len) }; - if ptr == other.ptr.as_ptr() - && self.kind() == KIND_ARC - && other.kind() == KIND_ARC - && self.data == other.data - { - // Contiguous blocks, just combine directly - self.len += other.len; - self.cap += other.cap; - Ok(()) - } else { - Err(other) - } - } - - #[inline] - fn kind(&self) -> usize { - self.data as usize & KIND_MASK - } - - unsafe fn promote_to_shared(&mut self, ref_cnt: usize) { - debug_assert_eq!(self.kind(), KIND_VEC); - debug_assert!(ref_cnt == 1 || ref_cnt == 2); - - let original_capacity_repr = - (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET; - - // The vec offset cannot be concurrently mutated, so there - // should be no danger reading it. - let off = (self.data as usize) >> VEC_POS_OFFSET; - - // First, allocate a new `Shared` instance containing the - // `Vec` fields. It's important to note that `ptr`, `len`, - // and `cap` cannot be mutated without having `&mut self`. - // This means that these fields will not be concurrently - // updated and since the buffer hasn't been promoted to an - // `Arc`, those three fields still are the components of the - // vector. - let shared = Box::new(Shared { - vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off), - original_capacity_repr, - ref_count: AtomicUsize::new(ref_cnt), - }); - - let shared = Box::into_raw(shared); - - // The pointer should be aligned, so this assert should - // always succeed. - debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC); - - self.data = shared; - } - - /// Makes an exact shallow clone of `self`. - /// - /// The kind of `self` doesn't matter, but this is unsafe - /// because the clone will have the same offsets. You must - /// be sure the returned value to the user doesn't allow - /// two views into the same range. - #[inline] - unsafe fn shallow_clone(&mut self) -> BytesMut { - if self.kind() == KIND_ARC { - increment_shared(self.data); - ptr::read(self) - } else { - self.promote_to_shared(/*ref_count = */ 2); - ptr::read(self) - } - } - - #[inline] - unsafe fn get_vec_pos(&mut self) -> (usize, usize) { - debug_assert_eq!(self.kind(), KIND_VEC); - - let prev = self.data as usize; - (prev >> VEC_POS_OFFSET, prev) - } - - #[inline] - unsafe fn set_vec_pos(&mut self, pos: usize, prev: usize) { - debug_assert_eq!(self.kind(), KIND_VEC); - debug_assert!(pos <= MAX_VEC_POS); - - self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)); - } - - /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit`. - /// - /// The returned slice can be used to fill the buffer with data (e.g. by - /// reading from a file) before marking the data as initialized using the - /// [`set_len`] method. - /// - /// [`set_len`]: BytesMut::set_len - /// - /// # Examples - /// - /// ``` - /// use bytes::BytesMut; - /// - /// // Allocate buffer big enough for 10 bytes. - /// let mut buf = BytesMut::with_capacity(10); - /// - /// // Fill in the first 3 elements. - /// let uninit = buf.spare_capacity_mut(); - /// uninit[0].write(0); - /// uninit[1].write(1); - /// uninit[2].write(2); - /// - /// // Mark the first 3 bytes of the buffer as being initialized. - /// unsafe { - /// buf.set_len(3); - /// } - /// - /// assert_eq!(&buf[..], &[0, 1, 2]); - /// ``` - #[inline] - pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit] { - unsafe { - let ptr = self.ptr.as_ptr().add(self.len); - let len = self.cap - self.len; - - slice::from_raw_parts_mut(ptr.cast(), len) - } - } -} - -impl Drop for BytesMut { - fn drop(&mut self) { - let kind = self.kind(); - - if kind == KIND_VEC { - unsafe { - let (off, _) = self.get_vec_pos(); - - // Vector storage, free the vector - let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); - } - } else if kind == KIND_ARC { - unsafe { release_shared(self.data) }; - } - } -} - -impl Buf for BytesMut { - #[inline] - fn remaining(&self) -> usize { - self.len() - } - - #[inline] - fn chunk(&self) -> &[u8] { - self.as_slice() - } - - #[inline] - fn advance(&mut self, cnt: usize) { - assert!( - cnt <= self.remaining(), - "cannot advance past `remaining`: {:?} <= {:?}", - cnt, - self.remaining(), - ); - unsafe { - self.set_start(cnt); - } - } - - fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { - self.split_to(len).freeze() - } -} - -unsafe impl BufMut for BytesMut { - #[inline] - fn remaining_mut(&self) -> usize { - usize::MAX - self.len() - } - - #[inline] - unsafe fn advance_mut(&mut self, cnt: usize) { - let new_len = self.len() + cnt; - assert!( - new_len <= self.cap, - "new_len = {}; capacity = {}", - new_len, - self.cap - ); - self.len = new_len; - } - - #[inline] - fn chunk_mut(&mut self) -> &mut UninitSlice { - if self.capacity() == self.len() { - self.reserve(64); - } - self.spare_capacity_mut().into() - } - - // Specialize these methods so they can skip checking `remaining_mut` - // and `advance_mut`. - - fn put(&mut self, mut src: T) - where - Self: Sized, - { - while src.has_remaining() { - let s = src.chunk(); - let l = s.len(); - self.extend_from_slice(s); - src.advance(l); - } - } - - fn put_slice(&mut self, src: &[u8]) { - self.extend_from_slice(src); - } - - fn put_bytes(&mut self, val: u8, cnt: usize) { - self.reserve(cnt); - unsafe { - let dst = self.spare_capacity_mut(); - // Reserved above - debug_assert!(dst.len() >= cnt); - - ptr::write_bytes(dst.as_mut_ptr(), val, cnt); - - self.advance_mut(cnt); - } - } -} - -impl AsRef<[u8]> for BytesMut { - #[inline] - fn as_ref(&self) -> &[u8] { - self.as_slice() - } -} - -impl Deref for BytesMut { - type Target = [u8]; - - #[inline] - fn deref(&self) -> &[u8] { - self.as_ref() - } -} - -impl AsMut<[u8]> for BytesMut { - #[inline] - fn as_mut(&mut self) -> &mut [u8] { - self.as_slice_mut() - } -} - -impl DerefMut for BytesMut { - #[inline] - fn deref_mut(&mut self) -> &mut [u8] { - self.as_mut() - } -} - -impl<'a> From<&'a [u8]> for BytesMut { - fn from(src: &'a [u8]) -> BytesMut { - BytesMut::from_vec(src.to_vec()) - } -} - -impl<'a> From<&'a str> for BytesMut { - fn from(src: &'a str) -> BytesMut { - BytesMut::from(src.as_bytes()) - } -} - -impl From for Bytes { - fn from(src: BytesMut) -> Bytes { - src.freeze() - } -} - -impl PartialEq for BytesMut { - fn eq(&self, other: &BytesMut) -> bool { - self.as_slice() == other.as_slice() - } -} - -impl PartialOrd for BytesMut { - fn partial_cmp(&self, other: &BytesMut) -> Option { - self.as_slice().partial_cmp(other.as_slice()) - } -} - -impl Ord for BytesMut { - fn cmp(&self, other: &BytesMut) -> cmp::Ordering { - self.as_slice().cmp(other.as_slice()) - } -} - -impl Eq for BytesMut {} - -impl Default for BytesMut { - #[inline] - fn default() -> BytesMut { - BytesMut::new() - } -} - -impl hash::Hash for BytesMut { - fn hash(&self, state: &mut H) - where - H: hash::Hasher, - { - let s: &[u8] = self.as_ref(); - s.hash(state); - } -} - -impl Borrow<[u8]> for BytesMut { - fn borrow(&self) -> &[u8] { - self.as_ref() - } -} - -impl BorrowMut<[u8]> for BytesMut { - fn borrow_mut(&mut self) -> &mut [u8] { - self.as_mut() - } -} - -impl fmt::Write for BytesMut { - #[inline] - fn write_str(&mut self, s: &str) -> fmt::Result { - if self.remaining_mut() >= s.len() { - self.put_slice(s.as_bytes()); - Ok(()) - } else { - Err(fmt::Error) - } - } - - #[inline] - fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { - fmt::write(self, args) - } -} - -impl Clone for BytesMut { - fn clone(&self) -> BytesMut { - BytesMut::from(&self[..]) - } -} - -impl IntoIterator for BytesMut { - type Item = u8; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter::new(self) - } -} - -impl<'a> IntoIterator for &'a BytesMut { - type Item = &'a u8; - type IntoIter = core::slice::Iter<'a, u8>; - - fn into_iter(self) -> Self::IntoIter { - self.as_ref().iter() - } -} - -impl Extend for BytesMut { - fn extend(&mut self, iter: T) - where - T: IntoIterator, - { - let iter = iter.into_iter(); - - let (lower, _) = iter.size_hint(); - self.reserve(lower); - - // TODO: optimize - // 1. If self.kind() == KIND_VEC, use Vec::extend - // 2. Make `reserve` inline-able - for b in iter { - self.reserve(1); - self.put_u8(b); - } - } -} - -impl<'a> Extend<&'a u8> for BytesMut { - fn extend(&mut self, iter: T) - where - T: IntoIterator, - { - self.extend(iter.into_iter().copied()) - } -} - -impl Extend for BytesMut { - fn extend(&mut self, iter: T) - where - T: IntoIterator, - { - for bytes in iter { - self.extend_from_slice(&bytes) - } - } -} - -impl FromIterator for BytesMut { - fn from_iter>(into_iter: T) -> Self { - BytesMut::from_vec(Vec::from_iter(into_iter)) - } -} - -impl<'a> FromIterator<&'a u8> for BytesMut { - fn from_iter>(into_iter: T) -> Self { - BytesMut::from_iter(into_iter.into_iter().copied()) - } -} - -/* - * - * ===== Inner ===== - * - */ - -unsafe fn increment_shared(ptr: *mut Shared) { - let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed); - - if old_size > isize::MAX as usize { - crate::abort(); - } -} - -unsafe fn release_shared(ptr: *mut Shared) { - // `Shared` storage... follow the drop steps from Arc. - if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 { - return; - } - - // This fence is needed to prevent reordering of use of the data and - // deletion of the data. Because it is marked `Release`, the decreasing - // of the reference count synchronizes with this `Acquire` fence. This - // means that use of the data happens before decreasing the reference - // count, which happens before this fence, which happens before the - // deletion of the data. - // - // As explained in the [Boost documentation][1], - // - // > It is important to enforce any possible access to the object in one - // > thread (through an existing reference) to *happen before* deleting - // > the object in a different thread. This is achieved by a "release" - // > operation after dropping a reference (any access to the object - // > through this reference must obviously happened before), and an - // > "acquire" operation before deleting the object. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - // - // Thread sanitizer does not support atomic fences. Use an atomic load - // instead. - (*ptr).ref_count.load(Ordering::Acquire); - - // Drop the data - drop(Box::from_raw(ptr)); -} - -impl Shared { - fn is_unique(&self) -> bool { - // The goal is to check if the current handle is the only handle - // that currently has access to the buffer. This is done by - // checking if the `ref_count` is currently 1. - // - // The `Acquire` ordering synchronizes with the `Release` as - // part of the `fetch_sub` in `release_shared`. The `fetch_sub` - // operation guarantees that any mutations done in other threads - // are ordered before the `ref_count` is decremented. As such, - // this `Acquire` will guarantee that those mutations are - // visible to the current thread. - self.ref_count.load(Ordering::Acquire) == 1 - } -} - -#[inline] -fn original_capacity_to_repr(cap: usize) -> usize { - let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize); - cmp::min( - width, - MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH, - ) -} - -fn original_capacity_from_repr(repr: usize) -> usize { - if repr == 0 { - return 0; - } - - 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1)) -} - -/* -#[test] -fn test_original_capacity_to_repr() { - assert_eq!(original_capacity_to_repr(0), 0); - - let max_width = 32; - - for width in 1..(max_width + 1) { - let cap = 1 << width - 1; - - let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH { - 0 - } else if width < MAX_ORIGINAL_CAPACITY_WIDTH { - width - MIN_ORIGINAL_CAPACITY_WIDTH - } else { - MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH - }; - - assert_eq!(original_capacity_to_repr(cap), expected); - - if width > 1 { - assert_eq!(original_capacity_to_repr(cap + 1), expected); - } - - // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below - if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 { - assert_eq!(original_capacity_to_repr(cap - 24), expected - 1); - assert_eq!(original_capacity_to_repr(cap + 76), expected); - } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 { - assert_eq!(original_capacity_to_repr(cap - 1), expected - 1); - assert_eq!(original_capacity_to_repr(cap - 48), expected - 1); - } - } -} - -#[test] -fn test_original_capacity_from_repr() { - assert_eq!(0, original_capacity_from_repr(0)); - - let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH; - - assert_eq!(min_cap, original_capacity_from_repr(1)); - assert_eq!(min_cap * 2, original_capacity_from_repr(2)); - assert_eq!(min_cap * 4, original_capacity_from_repr(3)); - assert_eq!(min_cap * 8, original_capacity_from_repr(4)); - assert_eq!(min_cap * 16, original_capacity_from_repr(5)); - assert_eq!(min_cap * 32, original_capacity_from_repr(6)); - assert_eq!(min_cap * 64, original_capacity_from_repr(7)); -} -*/ - -unsafe impl Send for BytesMut {} -unsafe impl Sync for BytesMut {} - -/* - * - * ===== PartialEq / PartialOrd ===== - * - */ - -impl PartialEq<[u8]> for BytesMut { - fn eq(&self, other: &[u8]) -> bool { - &**self == other - } -} - -impl PartialOrd<[u8]> for BytesMut { - fn partial_cmp(&self, other: &[u8]) -> Option { - (**self).partial_cmp(other) - } -} - -impl PartialEq for [u8] { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for [u8] { - fn partial_cmp(&self, other: &BytesMut) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) - } -} - -impl PartialEq for BytesMut { - fn eq(&self, other: &str) -> bool { - &**self == other.as_bytes() - } -} - -impl PartialOrd for BytesMut { - fn partial_cmp(&self, other: &str) -> Option { - (**self).partial_cmp(other.as_bytes()) - } -} - -impl PartialEq for str { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for str { - fn partial_cmp(&self, other: &BytesMut) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) - } -} - -impl PartialEq> for BytesMut { - fn eq(&self, other: &Vec) -> bool { - *self == other[..] - } -} - -impl PartialOrd> for BytesMut { - fn partial_cmp(&self, other: &Vec) -> Option { - (**self).partial_cmp(&other[..]) - } -} - -impl PartialEq for Vec { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for Vec { - fn partial_cmp(&self, other: &BytesMut) -> Option { - other.partial_cmp(self) - } -} - -impl PartialEq for BytesMut { - fn eq(&self, other: &String) -> bool { - *self == other[..] - } -} - -impl PartialOrd for BytesMut { - fn partial_cmp(&self, other: &String) -> Option { - (**self).partial_cmp(other.as_bytes()) - } -} - -impl PartialEq for String { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for String { - fn partial_cmp(&self, other: &BytesMut) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) - } -} - -impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut -where - BytesMut: PartialEq, -{ - fn eq(&self, other: &&'a T) -> bool { - *self == **other - } -} - -impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut -where - BytesMut: PartialOrd, -{ - fn partial_cmp(&self, other: &&'a T) -> Option { - self.partial_cmp(*other) - } -} - -impl PartialEq for &[u8] { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for &[u8] { - fn partial_cmp(&self, other: &BytesMut) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) - } -} - -impl PartialEq for &str { - fn eq(&self, other: &BytesMut) -> bool { - *other == *self - } -} - -impl PartialOrd for &str { - fn partial_cmp(&self, other: &BytesMut) -> Option { - other.partial_cmp(self) - } -} - -impl PartialEq for Bytes { - fn eq(&self, other: &BytesMut) -> bool { - other[..] == self[..] - } -} - -impl PartialEq for BytesMut { - fn eq(&self, other: &Bytes) -> bool { - other[..] == self[..] - } -} - -impl From for Vec { - fn from(mut bytes: BytesMut) -> Self { - let kind = bytes.kind(); - - let mut vec = if kind == KIND_VEC { - unsafe { - let (off, _) = bytes.get_vec_pos(); - rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off) - } - } else if kind == KIND_ARC { - let shared = bytes.data as *mut Shared; - - if unsafe { (*shared).is_unique() } { - let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new()); - - unsafe { release_shared(shared) }; - - vec - } else { - return bytes.deref().to_vec(); - } - } else { - return bytes.deref().to_vec(); - }; - - let len = bytes.len; - - unsafe { - ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len); - vec.set_len(len); - } - - mem::forget(bytes); - - vec - } -} - -#[inline] -fn vptr(ptr: *mut u8) -> NonNull { - if cfg!(debug_assertions) { - NonNull::new(ptr).expect("Vec pointer should be non-null") - } else { - unsafe { NonNull::new_unchecked(ptr) } - } -} - -/// Returns a dangling pointer with the given address. This is used to store -/// integer data in pointer fields. -/// -/// It is equivalent to `addr as *mut T`, but this fails on miri when strict -/// provenance checking is enabled. -#[inline] -fn invalid_ptr(addr: usize) -> *mut T { - let ptr = core::ptr::null_mut::().wrapping_add(addr); - debug_assert_eq!(ptr as usize, addr); - ptr.cast::() -} - -/// Precondition: dst >= original -/// -/// The following line is equivalent to: -/// -/// ```rust,ignore -/// self.ptr.as_ptr().offset_from(ptr) as usize; -/// ``` -/// -/// But due to min rust is 1.39 and it is only stablised -/// in 1.47, we cannot use it. -#[inline] -fn offset_from(dst: *mut u8, original: *mut u8) -> usize { - debug_assert!(dst >= original); - - dst as usize - original as usize -} - -unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec { - let ptr = ptr.offset(-(off as isize)); - len += off; - cap += off; - - Vec::from_raw_parts(ptr, len, cap) -} - -// ===== impl SharedVtable ===== - -static SHARED_VTABLE: Vtable = Vtable { - clone: shared_v_clone, - to_vec: shared_v_to_vec, - drop: shared_v_drop, -}; - -unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Relaxed) as *mut Shared; - increment_shared(shared); - - let data = AtomicPtr::new(shared as *mut ()); - Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) -} - -unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - let shared: *mut Shared = data.load(Ordering::Relaxed).cast(); - - if (*shared).is_unique() { - let shared = &mut *shared; - - // Drop shared - let mut vec = mem::replace(&mut shared.vec, Vec::new()); - release_shared(shared); - - // Copy back buffer - ptr::copy(ptr, vec.as_mut_ptr(), len); - vec.set_len(len); - - vec - } else { - let v = slice::from_raw_parts(ptr, len).to_vec(); - release_shared(shared); - v - } -} - -unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { - data.with_mut(|shared| { - release_shared(*shared as *mut Shared); - }); -} - -// compile-fails - -/// ```compile_fail -/// use bytes::BytesMut; -/// #[deny(unused_must_use)] -/// { -/// let mut b1 = BytesMut::from("hello world"); -/// b1.split_to(6); -/// } -/// ``` -fn _split_to_must_use() {} - -/// ```compile_fail -/// use bytes::BytesMut; -/// #[deny(unused_must_use)] -/// { -/// let mut b1 = BytesMut::from("hello world"); -/// b1.split_off(6); -/// } -/// ``` -fn _split_off_must_use() {} - -/// ```compile_fail -/// use bytes::BytesMut; -/// #[deny(unused_must_use)] -/// { -/// let mut b1 = BytesMut::from("hello world"); -/// b1.split(); -/// } -/// ``` -fn _split_must_use() {} - -// fuzz tests -#[cfg(all(test, loom))] -mod fuzz { - use loom::sync::Arc; - use loom::thread; - - use super::BytesMut; - use crate::Bytes; - - #[test] - fn bytes_mut_cloning_frozen() { - loom::model(|| { - let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze(); - let addr = a.as_ptr() as usize; - - // test the Bytes::clone is Sync by putting it in an Arc - let a1 = Arc::new(a); - let a2 = a1.clone(); - - let t1 = thread::spawn(move || { - let b: Bytes = (*a1).clone(); - assert_eq!(b.as_ptr() as usize, addr); - }); - - let t2 = thread::spawn(move || { - let b: Bytes = (*a2).clone(); - assert_eq!(b.as_ptr() as usize, addr); - }); - - t1.join().unwrap(); - t2.join().unwrap(); - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/bytes.rs s390-tools-2.33.1/rust-vendor/bytes/src/bytes.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/bytes.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/bytes.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1304 +0,0 @@ -use core::iter::FromIterator; -use core::ops::{Deref, RangeBounds}; -use core::{cmp, fmt, hash, mem, ptr, slice, usize}; - -use alloc::{ - alloc::{dealloc, Layout}, - borrow::Borrow, - boxed::Box, - string::String, - vec::Vec, -}; - -use crate::buf::IntoIter; -#[allow(unused)] -use crate::loom::sync::atomic::AtomicMut; -use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; -use crate::Buf; - -/// A cheaply cloneable and sliceable chunk of contiguous memory. -/// -/// `Bytes` is an efficient container for storing and operating on contiguous -/// slices of memory. It is intended for use primarily in networking code, but -/// could have applications elsewhere as well. -/// -/// `Bytes` values facilitate zero-copy network programming by allowing multiple -/// `Bytes` objects to point to the same underlying memory. -/// -/// `Bytes` does not have a single implementation. It is an interface, whose -/// exact behavior is implemented through dynamic dispatch in several underlying -/// implementations of `Bytes`. -/// -/// All `Bytes` implementations must fulfill the following requirements: -/// - They are cheaply cloneable and thereby shareable between an unlimited amount -/// of components, for example by modifying a reference count. -/// - Instances can be sliced to refer to a subset of the original buffer. -/// -/// ``` -/// use bytes::Bytes; -/// -/// let mut mem = Bytes::from("Hello world"); -/// let a = mem.slice(0..5); -/// -/// assert_eq!(a, "Hello"); -/// -/// let b = mem.split_to(6); -/// -/// assert_eq!(mem, "world"); -/// assert_eq!(b, "Hello "); -/// ``` -/// -/// # Memory layout -/// -/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used -/// to track information about which segment of the underlying memory the -/// `Bytes` handle has access to. -/// -/// `Bytes` keeps both a pointer to the shared state containing the full memory -/// slice and a pointer to the start of the region visible by the handle. -/// `Bytes` also tracks the length of its view into the memory. -/// -/// # Sharing -/// -/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define -/// how sharing/cloning is implemented in detail. -/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for -/// cloning the backing storage in order to share it behind between multiple -/// `Bytes` instances. -/// -/// For `Bytes` implementations which refer to constant memory (e.g. created -/// via `Bytes::from_static()`) the cloning implementation will be a no-op. -/// -/// For `Bytes` implementations which point to a reference counted shared storage -/// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the -/// reference count. -/// -/// Due to this mechanism, multiple `Bytes` instances may point to the same -/// shared memory region. -/// Each `Bytes` instance can point to different sections within that -/// memory region, and `Bytes` instances may or may not have overlapping views -/// into the memory. -/// -/// The following diagram visualizes a scenario where 2 `Bytes` instances make -/// use of an `Arc`-based backing storage, and provide access to different views: -/// -/// ```text -/// -/// Arc ptrs ┌─────────┠-/// ________________________ / │ Bytes 2 │ -/// / └─────────┘ -/// / ┌───────────┠| | -/// |_________/ │ Bytes 1 │ | | -/// | └───────────┘ | | -/// | | | ___/ data | tail -/// | data | tail |/ | -/// v v v v -/// ┌─────┬─────┬───────────┬───────────────┬─────┠-/// │ Arc │ │ │ │ │ -/// └─────┴─────┴───────────┴───────────────┴─────┘ -/// ``` -pub struct Bytes { - ptr: *const u8, - len: usize, - // inlined "trait object" - data: AtomicPtr<()>, - vtable: &'static Vtable, -} - -pub(crate) struct Vtable { - /// fn(data, ptr, len) - pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, - /// fn(data, ptr, len) - /// - /// takes `Bytes` to value - pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec, - /// fn(data, ptr, len) - pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), -} - -impl Bytes { - /// Creates a new empty `Bytes`. - /// - /// This will not allocate and the returned `Bytes` handle will be empty. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let b = Bytes::new(); - /// assert_eq!(&b[..], b""); - /// ``` - #[inline] - #[cfg(not(all(loom, test)))] - pub const fn new() -> Self { - // Make it a named const to work around - // "unsizing casts are not allowed in const fn" - const EMPTY: &[u8] = &[]; - Bytes::from_static(EMPTY) - } - - #[cfg(all(loom, test))] - pub fn new() -> Self { - const EMPTY: &[u8] = &[]; - Bytes::from_static(EMPTY) - } - - /// Creates a new `Bytes` from a static slice. - /// - /// The returned `Bytes` will point directly to the static slice. There is - /// no allocating or copying. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let b = Bytes::from_static(b"hello"); - /// assert_eq!(&b[..], b"hello"); - /// ``` - #[inline] - #[cfg(not(all(loom, test)))] - pub const fn from_static(bytes: &'static [u8]) -> Self { - Bytes { - ptr: bytes.as_ptr(), - len: bytes.len(), - data: AtomicPtr::new(ptr::null_mut()), - vtable: &STATIC_VTABLE, - } - } - - #[cfg(all(loom, test))] - pub fn from_static(bytes: &'static [u8]) -> Self { - Bytes { - ptr: bytes.as_ptr(), - len: bytes.len(), - data: AtomicPtr::new(ptr::null_mut()), - vtable: &STATIC_VTABLE, - } - } - - /// Returns the number of bytes contained in this `Bytes`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let b = Bytes::from(&b"hello"[..]); - /// assert_eq!(b.len(), 5); - /// ``` - #[inline] - pub const fn len(&self) -> usize { - self.len - } - - /// Returns true if the `Bytes` has a length of 0. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let b = Bytes::new(); - /// assert!(b.is_empty()); - /// ``` - #[inline] - pub const fn is_empty(&self) -> bool { - self.len == 0 - } - - /// Creates `Bytes` instance from slice, by copying it. - pub fn copy_from_slice(data: &[u8]) -> Self { - data.to_vec().into() - } - - /// Returns a slice of self for the provided range. - /// - /// This will increment the reference count for the underlying memory and - /// return a new `Bytes` handle set to the slice. - /// - /// This operation is `O(1)`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let a = Bytes::from(&b"hello world"[..]); - /// let b = a.slice(2..5); - /// - /// assert_eq!(&b[..], b"llo"); - /// ``` - /// - /// # Panics - /// - /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing - /// will panic. - pub fn slice(&self, range: impl RangeBounds) -> Self { - use core::ops::Bound; - - let len = self.len(); - - let begin = match range.start_bound() { - Bound::Included(&n) => n, - Bound::Excluded(&n) => n + 1, - Bound::Unbounded => 0, - }; - - let end = match range.end_bound() { - Bound::Included(&n) => n.checked_add(1).expect("out of range"), - Bound::Excluded(&n) => n, - Bound::Unbounded => len, - }; - - assert!( - begin <= end, - "range start must not be greater than end: {:?} <= {:?}", - begin, - end, - ); - assert!( - end <= len, - "range end out of bounds: {:?} <= {:?}", - end, - len, - ); - - if end == begin { - return Bytes::new(); - } - - let mut ret = self.clone(); - - ret.len = end - begin; - ret.ptr = unsafe { ret.ptr.add(begin) }; - - ret - } - - /// Returns a slice of self that is equivalent to the given `subset`. - /// - /// When processing a `Bytes` buffer with other tools, one often gets a - /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it. - /// This function turns that `&[u8]` into another `Bytes`, as if one had - /// called `self.slice()` with the offsets that correspond to `subset`. - /// - /// This operation is `O(1)`. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let bytes = Bytes::from(&b"012345678"[..]); - /// let as_slice = bytes.as_ref(); - /// let subset = &as_slice[2..6]; - /// let subslice = bytes.slice_ref(&subset); - /// assert_eq!(&subslice[..], b"2345"); - /// ``` - /// - /// # Panics - /// - /// Requires that the given `sub` slice is in fact contained within the - /// `Bytes` buffer; otherwise this function will panic. - pub fn slice_ref(&self, subset: &[u8]) -> Self { - // Empty slice and empty Bytes may have their pointers reset - // so explicitly allow empty slice to be a subslice of any slice. - if subset.is_empty() { - return Bytes::new(); - } - - let bytes_p = self.as_ptr() as usize; - let bytes_len = self.len(); - - let sub_p = subset.as_ptr() as usize; - let sub_len = subset.len(); - - assert!( - sub_p >= bytes_p, - "subset pointer ({:p}) is smaller than self pointer ({:p})", - subset.as_ptr(), - self.as_ptr(), - ); - assert!( - sub_p + sub_len <= bytes_p + bytes_len, - "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})", - self.as_ptr(), - bytes_len, - subset.as_ptr(), - sub_len, - ); - - let sub_offset = sub_p - bytes_p; - - self.slice(sub_offset..(sub_offset + sub_len)) - } - - /// Splits the bytes into two at the given index. - /// - /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes` - /// contains elements `[at, len)`. - /// - /// This is an `O(1)` operation that just increases the reference count and - /// sets a few indices. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let mut a = Bytes::from(&b"hello world"[..]); - /// let b = a.split_off(5); - /// - /// assert_eq!(&a[..], b"hello"); - /// assert_eq!(&b[..], b" world"); - /// ``` - /// - /// # Panics - /// - /// Panics if `at > len`. - #[must_use = "consider Bytes::truncate if you don't need the other half"] - pub fn split_off(&mut self, at: usize) -> Self { - assert!( - at <= self.len(), - "split_off out of bounds: {:?} <= {:?}", - at, - self.len(), - ); - - if at == self.len() { - return Bytes::new(); - } - - if at == 0 { - return mem::replace(self, Bytes::new()); - } - - let mut ret = self.clone(); - - self.len = at; - - unsafe { ret.inc_start(at) }; - - ret - } - - /// Splits the bytes into two at the given index. - /// - /// Afterwards `self` contains elements `[at, len)`, and the returned - /// `Bytes` contains elements `[0, at)`. - /// - /// This is an `O(1)` operation that just increases the reference count and - /// sets a few indices. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let mut a = Bytes::from(&b"hello world"[..]); - /// let b = a.split_to(5); - /// - /// assert_eq!(&a[..], b" world"); - /// assert_eq!(&b[..], b"hello"); - /// ``` - /// - /// # Panics - /// - /// Panics if `at > len`. - #[must_use = "consider Bytes::advance if you don't need the other half"] - pub fn split_to(&mut self, at: usize) -> Self { - assert!( - at <= self.len(), - "split_to out of bounds: {:?} <= {:?}", - at, - self.len(), - ); - - if at == self.len() { - return mem::replace(self, Bytes::new()); - } - - if at == 0 { - return Bytes::new(); - } - - let mut ret = self.clone(); - - unsafe { self.inc_start(at) }; - - ret.len = at; - ret - } - - /// Shortens the buffer, keeping the first `len` bytes and dropping the - /// rest. - /// - /// If `len` is greater than the buffer's current length, this has no - /// effect. - /// - /// The [`split_off`] method can emulate `truncate`, but this causes the - /// excess bytes to be returned instead of dropped. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let mut buf = Bytes::from(&b"hello world"[..]); - /// buf.truncate(5); - /// assert_eq!(buf, b"hello"[..]); - /// ``` - /// - /// [`split_off`]: #method.split_off - #[inline] - pub fn truncate(&mut self, len: usize) { - if len < self.len { - // The Vec "promotable" vtables do not store the capacity, - // so we cannot truncate while using this repr. We *have* to - // promote using `split_off` so the capacity can be stored. - if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE - || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE - { - drop(self.split_off(len)); - } else { - self.len = len; - } - } - } - - /// Clears the buffer, removing all data. - /// - /// # Examples - /// - /// ``` - /// use bytes::Bytes; - /// - /// let mut buf = Bytes::from(&b"hello world"[..]); - /// buf.clear(); - /// assert!(buf.is_empty()); - /// ``` - #[inline] - pub fn clear(&mut self) { - self.truncate(0); - } - - #[inline] - pub(crate) unsafe fn with_vtable( - ptr: *const u8, - len: usize, - data: AtomicPtr<()>, - vtable: &'static Vtable, - ) -> Bytes { - Bytes { - ptr, - len, - data, - vtable, - } - } - - // private - - #[inline] - fn as_slice(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self.ptr, self.len) } - } - - #[inline] - unsafe fn inc_start(&mut self, by: usize) { - // should already be asserted, but debug assert for tests - debug_assert!(self.len >= by, "internal: inc_start out of bounds"); - self.len -= by; - self.ptr = self.ptr.add(by); - } -} - -// Vtable must enforce this behavior -unsafe impl Send for Bytes {} -unsafe impl Sync for Bytes {} - -impl Drop for Bytes { - #[inline] - fn drop(&mut self) { - unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) } - } -} - -impl Clone for Bytes { - #[inline] - fn clone(&self) -> Bytes { - unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) } - } -} - -impl Buf for Bytes { - #[inline] - fn remaining(&self) -> usize { - self.len() - } - - #[inline] - fn chunk(&self) -> &[u8] { - self.as_slice() - } - - #[inline] - fn advance(&mut self, cnt: usize) { - assert!( - cnt <= self.len(), - "cannot advance past `remaining`: {:?} <= {:?}", - cnt, - self.len(), - ); - - unsafe { - self.inc_start(cnt); - } - } - - fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { - if len == self.remaining() { - core::mem::replace(self, Bytes::new()) - } else { - let ret = self.slice(..len); - self.advance(len); - ret - } - } -} - -impl Deref for Bytes { - type Target = [u8]; - - #[inline] - fn deref(&self) -> &[u8] { - self.as_slice() - } -} - -impl AsRef<[u8]> for Bytes { - #[inline] - fn as_ref(&self) -> &[u8] { - self.as_slice() - } -} - -impl hash::Hash for Bytes { - fn hash(&self, state: &mut H) - where - H: hash::Hasher, - { - self.as_slice().hash(state); - } -} - -impl Borrow<[u8]> for Bytes { - fn borrow(&self) -> &[u8] { - self.as_slice() - } -} - -impl IntoIterator for Bytes { - type Item = u8; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter::new(self) - } -} - -impl<'a> IntoIterator for &'a Bytes { - type Item = &'a u8; - type IntoIter = core::slice::Iter<'a, u8>; - - fn into_iter(self) -> Self::IntoIter { - self.as_slice().iter() - } -} - -impl FromIterator for Bytes { - fn from_iter>(into_iter: T) -> Self { - Vec::from_iter(into_iter).into() - } -} - -// impl Eq - -impl PartialEq for Bytes { - fn eq(&self, other: &Bytes) -> bool { - self.as_slice() == other.as_slice() - } -} - -impl PartialOrd for Bytes { - fn partial_cmp(&self, other: &Bytes) -> Option { - self.as_slice().partial_cmp(other.as_slice()) - } -} - -impl Ord for Bytes { - fn cmp(&self, other: &Bytes) -> cmp::Ordering { - self.as_slice().cmp(other.as_slice()) - } -} - -impl Eq for Bytes {} - -impl PartialEq<[u8]> for Bytes { - fn eq(&self, other: &[u8]) -> bool { - self.as_slice() == other - } -} - -impl PartialOrd<[u8]> for Bytes { - fn partial_cmp(&self, other: &[u8]) -> Option { - self.as_slice().partial_cmp(other) - } -} - -impl PartialEq for [u8] { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for [u8] { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) - } -} - -impl PartialEq for Bytes { - fn eq(&self, other: &str) -> bool { - self.as_slice() == other.as_bytes() - } -} - -impl PartialOrd for Bytes { - fn partial_cmp(&self, other: &str) -> Option { - self.as_slice().partial_cmp(other.as_bytes()) - } -} - -impl PartialEq for str { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for str { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) - } -} - -impl PartialEq> for Bytes { - fn eq(&self, other: &Vec) -> bool { - *self == other[..] - } -} - -impl PartialOrd> for Bytes { - fn partial_cmp(&self, other: &Vec) -> Option { - self.as_slice().partial_cmp(&other[..]) - } -} - -impl PartialEq for Vec { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for Vec { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) - } -} - -impl PartialEq for Bytes { - fn eq(&self, other: &String) -> bool { - *self == other[..] - } -} - -impl PartialOrd for Bytes { - fn partial_cmp(&self, other: &String) -> Option { - self.as_slice().partial_cmp(other.as_bytes()) - } -} - -impl PartialEq for String { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for String { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) - } -} - -impl PartialEq for &[u8] { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for &[u8] { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) - } -} - -impl PartialEq for &str { - fn eq(&self, other: &Bytes) -> bool { - *other == *self - } -} - -impl PartialOrd for &str { - fn partial_cmp(&self, other: &Bytes) -> Option { - <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) - } -} - -impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes -where - Bytes: PartialEq, -{ - fn eq(&self, other: &&'a T) -> bool { - *self == **other - } -} - -impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes -where - Bytes: PartialOrd, -{ - fn partial_cmp(&self, other: &&'a T) -> Option { - self.partial_cmp(&**other) - } -} - -// impl From - -impl Default for Bytes { - #[inline] - fn default() -> Bytes { - Bytes::new() - } -} - -impl From<&'static [u8]> for Bytes { - fn from(slice: &'static [u8]) -> Bytes { - Bytes::from_static(slice) - } -} - -impl From<&'static str> for Bytes { - fn from(slice: &'static str) -> Bytes { - Bytes::from_static(slice.as_bytes()) - } -} - -impl From> for Bytes { - fn from(vec: Vec) -> Bytes { - let mut vec = vec; - let ptr = vec.as_mut_ptr(); - let len = vec.len(); - let cap = vec.capacity(); - - // Avoid an extra allocation if possible. - if len == cap { - return Bytes::from(vec.into_boxed_slice()); - } - - let shared = Box::new(Shared { - buf: ptr, - cap, - ref_cnt: AtomicUsize::new(1), - }); - mem::forget(vec); - - let shared = Box::into_raw(shared); - // The pointer should be aligned, so this assert should - // always succeed. - debug_assert!( - 0 == (shared as usize & KIND_MASK), - "internal: Box should have an aligned pointer", - ); - Bytes { - ptr, - len, - data: AtomicPtr::new(shared as _), - vtable: &SHARED_VTABLE, - } - } -} - -impl From> for Bytes { - fn from(slice: Box<[u8]>) -> Bytes { - // Box<[u8]> doesn't contain a heap allocation for empty slices, - // so the pointer isn't aligned enough for the KIND_VEC stashing to - // work. - if slice.is_empty() { - return Bytes::new(); - } - - let len = slice.len(); - let ptr = Box::into_raw(slice) as *mut u8; - - if ptr as usize & 0x1 == 0 { - let data = ptr_map(ptr, |addr| addr | KIND_VEC); - Bytes { - ptr, - len, - data: AtomicPtr::new(data.cast()), - vtable: &PROMOTABLE_EVEN_VTABLE, - } - } else { - Bytes { - ptr, - len, - data: AtomicPtr::new(ptr.cast()), - vtable: &PROMOTABLE_ODD_VTABLE, - } - } - } -} - -impl From for Bytes { - fn from(s: String) -> Bytes { - Bytes::from(s.into_bytes()) - } -} - -impl From for Vec { - fn from(bytes: Bytes) -> Vec { - let bytes = mem::ManuallyDrop::new(bytes); - unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) } - } -} - -// ===== impl Vtable ===== - -impl fmt::Debug for Vtable { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Vtable") - .field("clone", &(self.clone as *const ())) - .field("drop", &(self.drop as *const ())) - .finish() - } -} - -// ===== impl StaticVtable ===== - -const STATIC_VTABLE: Vtable = Vtable { - clone: static_clone, - to_vec: static_to_vec, - drop: static_drop, -}; - -unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let slice = slice::from_raw_parts(ptr, len); - Bytes::from_static(slice) -} - -unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - let slice = slice::from_raw_parts(ptr, len); - slice.to_vec() -} - -unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { - // nothing to drop for &'static [u8] -} - -// ===== impl PromotableVtable ===== - -static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { - clone: promotable_even_clone, - to_vec: promotable_even_to_vec, - drop: promotable_even_drop, -}; - -static PROMOTABLE_ODD_VTABLE: Vtable = Vtable { - clone: promotable_odd_clone, - to_vec: promotable_odd_to_vec, - drop: promotable_odd_drop, -}; - -unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Acquire); - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - shallow_clone_arc(shared.cast(), ptr, len) - } else { - debug_assert_eq!(kind, KIND_VEC); - let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); - shallow_clone_vec(data, shared, buf, ptr, len) - } -} - -unsafe fn promotable_to_vec( - data: &AtomicPtr<()>, - ptr: *const u8, - len: usize, - f: fn(*mut ()) -> *mut u8, -) -> Vec { - let shared = data.load(Ordering::Acquire); - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - shared_to_vec_impl(shared.cast(), ptr, len) - } else { - // If Bytes holds a Vec, then the offset must be 0. - debug_assert_eq!(kind, KIND_VEC); - - let buf = f(shared); - - let cap = (ptr as usize - buf as usize) + len; - - // Copy back buffer - ptr::copy(ptr, buf, len); - - Vec::from_raw_parts(buf, len, cap) - } -} - -unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - promotable_to_vec(data, ptr, len, |shared| { - ptr_map(shared.cast(), |addr| addr & !KIND_MASK) - }) -} - -unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - data.with_mut(|shared| { - let shared = *shared; - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - release_shared(shared.cast()); - } else { - debug_assert_eq!(kind, KIND_VEC); - let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); - free_boxed_slice(buf, ptr, len); - } - }); -} - -unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Acquire); - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - shallow_clone_arc(shared as _, ptr, len) - } else { - debug_assert_eq!(kind, KIND_VEC); - shallow_clone_vec(data, shared, shared.cast(), ptr, len) - } -} - -unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - promotable_to_vec(data, ptr, len, |shared| shared.cast()) -} - -unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - data.with_mut(|shared| { - let shared = *shared; - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - release_shared(shared.cast()); - } else { - debug_assert_eq!(kind, KIND_VEC); - - free_boxed_slice(shared.cast(), ptr, len); - } - }); -} - -unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) { - let cap = (offset as usize - buf as usize) + len; - dealloc(buf, Layout::from_size_align(cap, 1).unwrap()) -} - -// ===== impl SharedVtable ===== - -struct Shared { - // Holds arguments to dealloc upon Drop, but otherwise doesn't use them - buf: *mut u8, - cap: usize, - ref_cnt: AtomicUsize, -} - -impl Drop for Shared { - fn drop(&mut self) { - unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) } - } -} - -// Assert that the alignment of `Shared` is divisible by 2. -// This is a necessary invariant since we depend on allocating `Shared` a -// shared object to implicitly carry the `KIND_ARC` flag in its pointer. -// This flag is set when the LSB is 0. -const _: [(); 0 - mem::align_of::() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2. - -static SHARED_VTABLE: Vtable = Vtable { - clone: shared_clone, - to_vec: shared_to_vec, - drop: shared_drop, -}; - -const KIND_ARC: usize = 0b0; -const KIND_VEC: usize = 0b1; -const KIND_MASK: usize = 0b1; - -unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Relaxed); - shallow_clone_arc(shared as _, ptr, len) -} - -unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec { - // Check that the ref_cnt is 1 (unique). - // - // If it is unique, then it is set to 0 with AcqRel fence for the same - // reason in release_shared. - // - // Otherwise, we take the other branch and call release_shared. - if (*shared) - .ref_cnt - .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed) - .is_ok() - { - let buf = (*shared).buf; - let cap = (*shared).cap; - - // Deallocate Shared - drop(Box::from_raw(shared as *mut mem::ManuallyDrop)); - - // Copy back buffer - ptr::copy(ptr, buf, len); - - Vec::from_raw_parts(buf, len, cap) - } else { - let v = slice::from_raw_parts(ptr, len).to_vec(); - release_shared(shared); - v - } -} - -unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len) -} - -unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { - data.with_mut(|shared| { - release_shared(shared.cast()); - }); -} - -unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes { - let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed); - - if old_size > usize::MAX >> 1 { - crate::abort(); - } - - Bytes { - ptr, - len, - data: AtomicPtr::new(shared as _), - vtable: &SHARED_VTABLE, - } -} - -#[cold] -unsafe fn shallow_clone_vec( - atom: &AtomicPtr<()>, - ptr: *const (), - buf: *mut u8, - offset: *const u8, - len: usize, -) -> Bytes { - // If the buffer is still tracked in a `Vec`. It is time to - // promote the vec to an `Arc`. This could potentially be called - // concurrently, so some care must be taken. - - // First, allocate a new `Shared` instance containing the - // `Vec` fields. It's important to note that `ptr`, `len`, - // and `cap` cannot be mutated without having `&mut self`. - // This means that these fields will not be concurrently - // updated and since the buffer hasn't been promoted to an - // `Arc`, those three fields still are the components of the - // vector. - let shared = Box::new(Shared { - buf, - cap: (offset as usize - buf as usize) + len, - // Initialize refcount to 2. One for this reference, and one - // for the new clone that will be returned from - // `shallow_clone`. - ref_cnt: AtomicUsize::new(2), - }); - - let shared = Box::into_raw(shared); - - // The pointer should be aligned, so this assert should - // always succeed. - debug_assert!( - 0 == (shared as usize & KIND_MASK), - "internal: Box should have an aligned pointer", - ); - - // Try compare & swapping the pointer into the `arc` field. - // `Release` is used synchronize with other threads that - // will load the `arc` field. - // - // If the `compare_exchange` fails, then the thread lost the - // race to promote the buffer to shared. The `Acquire` - // ordering will synchronize with the `compare_exchange` - // that happened in the other thread and the `Shared` - // pointed to by `actual` will be visible. - match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) { - Ok(actual) => { - debug_assert!(actual as usize == ptr as usize); - // The upgrade was successful, the new handle can be - // returned. - Bytes { - ptr: offset, - len, - data: AtomicPtr::new(shared as _), - vtable: &SHARED_VTABLE, - } - } - Err(actual) => { - // The upgrade failed, a concurrent clone happened. Release - // the allocation that was made in this thread, it will not - // be needed. - let shared = Box::from_raw(shared); - mem::forget(*shared); - - // Buffer already promoted to shared storage, so increment ref - // count. - shallow_clone_arc(actual as _, offset, len) - } - } -} - -unsafe fn release_shared(ptr: *mut Shared) { - // `Shared` storage... follow the drop steps from Arc. - if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 { - return; - } - - // This fence is needed to prevent reordering of use of the data and - // deletion of the data. Because it is marked `Release`, the decreasing - // of the reference count synchronizes with this `Acquire` fence. This - // means that use of the data happens before decreasing the reference - // count, which happens before this fence, which happens before the - // deletion of the data. - // - // As explained in the [Boost documentation][1], - // - // > It is important to enforce any possible access to the object in one - // > thread (through an existing reference) to *happen before* deleting - // > the object in a different thread. This is achieved by a "release" - // > operation after dropping a reference (any access to the object - // > through this reference must obviously happened before), and an - // > "acquire" operation before deleting the object. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - // - // Thread sanitizer does not support atomic fences. Use an atomic load - // instead. - (*ptr).ref_cnt.load(Ordering::Acquire); - - // Drop the data - drop(Box::from_raw(ptr)); -} - -// Ideally we would always use this version of `ptr_map` since it is strict -// provenance compatible, but it results in worse codegen. We will however still -// use it on miri because it gives better diagnostics for people who test bytes -// code with miri. -// -// See https://github.com/tokio-rs/bytes/pull/545 for more info. -#[cfg(miri)] -fn ptr_map(ptr: *mut u8, f: F) -> *mut u8 -where - F: FnOnce(usize) -> usize, -{ - let old_addr = ptr as usize; - let new_addr = f(old_addr); - let diff = new_addr.wrapping_sub(old_addr); - ptr.wrapping_add(diff) -} - -#[cfg(not(miri))] -fn ptr_map(ptr: *mut u8, f: F) -> *mut u8 -where - F: FnOnce(usize) -> usize, -{ - let old_addr = ptr as usize; - let new_addr = f(old_addr); - new_addr as *mut u8 -} - -// compile-fails - -/// ```compile_fail -/// use bytes::Bytes; -/// #[deny(unused_must_use)] -/// { -/// let mut b1 = Bytes::from("hello world"); -/// b1.split_to(6); -/// } -/// ``` -fn _split_to_must_use() {} - -/// ```compile_fail -/// use bytes::Bytes; -/// #[deny(unused_must_use)] -/// { -/// let mut b1 = Bytes::from("hello world"); -/// b1.split_off(6); -/// } -/// ``` -fn _split_off_must_use() {} - -// fuzz tests -#[cfg(all(test, loom))] -mod fuzz { - use loom::sync::Arc; - use loom::thread; - - use super::Bytes; - #[test] - fn bytes_cloning_vec() { - loom::model(|| { - let a = Bytes::from(b"abcdefgh".to_vec()); - let addr = a.as_ptr() as usize; - - // test the Bytes::clone is Sync by putting it in an Arc - let a1 = Arc::new(a); - let a2 = a1.clone(); - - let t1 = thread::spawn(move || { - let b: Bytes = (*a1).clone(); - assert_eq!(b.as_ptr() as usize, addr); - }); - - let t2 = thread::spawn(move || { - let b: Bytes = (*a2).clone(); - assert_eq!(b.as_ptr() as usize, addr); - }); - - t1.join().unwrap(); - t2.join().unwrap(); - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/fmt/debug.rs s390-tools-2.33.1/rust-vendor/bytes/src/fmt/debug.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/fmt/debug.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/fmt/debug.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,49 +0,0 @@ -use core::fmt::{Debug, Formatter, Result}; - -use super::BytesRef; -use crate::{Bytes, BytesMut}; - -/// Alternative implementation of `std::fmt::Debug` for byte slice. -/// -/// Standard `Debug` implementation for `[u8]` is comma separated -/// list of numbers. Since large amount of byte strings are in fact -/// ASCII strings or contain a lot of ASCII strings (e. g. HTTP), -/// it is convenient to print strings as ASCII when possible. -impl Debug for BytesRef<'_> { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - write!(f, "b\"")?; - for &b in self.0 { - // https://doc.rust-lang.org/reference/tokens.html#byte-escapes - if b == b'\n' { - write!(f, "\\n")?; - } else if b == b'\r' { - write!(f, "\\r")?; - } else if b == b'\t' { - write!(f, "\\t")?; - } else if b == b'\\' || b == b'"' { - write!(f, "\\{}", b as char)?; - } else if b == b'\0' { - write!(f, "\\0")?; - // ASCII printable - } else if (0x20..0x7f).contains(&b) { - write!(f, "{}", b as char)?; - } else { - write!(f, "\\x{:02x}", b)?; - } - } - write!(f, "\"")?; - Ok(()) - } -} - -impl Debug for Bytes { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - Debug::fmt(&BytesRef(self.as_ref()), f) - } -} - -impl Debug for BytesMut { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - Debug::fmt(&BytesRef(self.as_ref()), f) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/fmt/hex.rs s390-tools-2.33.1/rust-vendor/bytes/src/fmt/hex.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/fmt/hex.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/fmt/hex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ -use core::fmt::{Formatter, LowerHex, Result, UpperHex}; - -use super::BytesRef; -use crate::{Bytes, BytesMut}; - -impl LowerHex for BytesRef<'_> { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - for &b in self.0 { - write!(f, "{:02x}", b)?; - } - Ok(()) - } -} - -impl UpperHex for BytesRef<'_> { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - for &b in self.0 { - write!(f, "{:02X}", b)?; - } - Ok(()) - } -} - -macro_rules! hex_impl { - ($tr:ident, $ty:ty) => { - impl $tr for $ty { - fn fmt(&self, f: &mut Formatter<'_>) -> Result { - $tr::fmt(&BytesRef(self.as_ref()), f) - } - } - }; -} - -hex_impl!(LowerHex, Bytes); -hex_impl!(LowerHex, BytesMut); -hex_impl!(UpperHex, Bytes); -hex_impl!(UpperHex, BytesMut); diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/fmt/mod.rs s390-tools-2.33.1/rust-vendor/bytes/src/fmt/mod.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/fmt/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/fmt/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,5 +0,0 @@ -mod debug; -mod hex; - -/// `BytesRef` is not a part of public API of bytes crate. -struct BytesRef<'a>(&'a [u8]); diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/lib.rs s390-tools-2.33.1/rust-vendor/bytes/src/lib.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,117 +0,0 @@ -#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] -#![doc(test( - no_crate_inject, - attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) -))] -#![no_std] -#![cfg_attr(docsrs, feature(doc_cfg))] - -//! Provides abstractions for working with bytes. -//! -//! The `bytes` crate provides an efficient byte buffer structure -//! ([`Bytes`](struct.Bytes.html)) and traits for working with buffer -//! implementations ([`Buf`], [`BufMut`]). -//! -//! [`Buf`]: trait.Buf.html -//! [`BufMut`]: trait.BufMut.html -//! -//! # `Bytes` -//! -//! `Bytes` is an efficient container for storing and operating on contiguous -//! slices of memory. It is intended for use primarily in networking code, but -//! could have applications elsewhere as well. -//! -//! `Bytes` values facilitate zero-copy network programming by allowing multiple -//! `Bytes` objects to point to the same underlying memory. This is managed by -//! using a reference count to track when the memory is no longer needed and can -//! be freed. -//! -//! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]` -//! or `Vec`), but usually a `BytesMut` is used first and written to. For -//! example: -//! -//! ```rust -//! use bytes::{BytesMut, BufMut}; -//! -//! let mut buf = BytesMut::with_capacity(1024); -//! buf.put(&b"hello world"[..]); -//! buf.put_u16(1234); -//! -//! let a = buf.split(); -//! assert_eq!(a, b"hello world\x04\xD2"[..]); -//! -//! buf.put(&b"goodbye world"[..]); -//! -//! let b = buf.split(); -//! assert_eq!(b, b"goodbye world"[..]); -//! -//! assert_eq!(buf.capacity(), 998); -//! ``` -//! -//! In the above example, only a single buffer of 1024 is allocated. The handles -//! `a` and `b` will share the underlying buffer and maintain indices tracking -//! the view into the buffer represented by the handle. -//! -//! See the [struct docs] for more details. -//! -//! [struct docs]: struct.Bytes.html -//! -//! # `Buf`, `BufMut` -//! -//! These two traits provide read and write access to buffers. The underlying -//! storage may or may not be in contiguous memory. For example, `Bytes` is a -//! buffer that guarantees contiguous memory, but a [rope] stores the bytes in -//! disjoint chunks. `Buf` and `BufMut` maintain cursors tracking the current -//! position in the underlying byte storage. When bytes are read or written, the -//! cursor is advanced. -//! -//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) -//! -//! ## Relation with `Read` and `Write` -//! -//! At first glance, it may seem that `Buf` and `BufMut` overlap in -//! functionality with `std::io::Read` and `std::io::Write`. However, they -//! serve different purposes. A buffer is the value that is provided as an -//! argument to `Read::read` and `Write::write`. `Read` and `Write` may then -//! perform a syscall, which has the potential of failing. Operations on `Buf` -//! and `BufMut` are infallible. - -extern crate alloc; - -#[cfg(feature = "std")] -extern crate std; - -pub mod buf; -pub use crate::buf::{Buf, BufMut}; - -mod bytes; -mod bytes_mut; -mod fmt; -mod loom; -pub use crate::bytes::Bytes; -pub use crate::bytes_mut::BytesMut; - -// Optional Serde support -#[cfg(feature = "serde")] -mod serde; - -#[inline(never)] -#[cold] -fn abort() -> ! { - #[cfg(feature = "std")] - { - std::process::abort(); - } - - #[cfg(not(feature = "std"))] - { - struct Abort; - impl Drop for Abort { - fn drop(&mut self) { - panic!(); - } - } - let _a = Abort; - panic!("abort"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/loom.rs s390-tools-2.33.1/rust-vendor/bytes/src/loom.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/loom.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/loom.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ -#[cfg(not(all(test, loom)))] -pub(crate) mod sync { - pub(crate) mod atomic { - pub(crate) use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; - - pub(crate) trait AtomicMut { - fn with_mut(&mut self, f: F) -> R - where - F: FnOnce(&mut *mut T) -> R; - } - - impl AtomicMut for AtomicPtr { - fn with_mut(&mut self, f: F) -> R - where - F: FnOnce(&mut *mut T) -> R, - { - f(self.get_mut()) - } - } - } -} - -#[cfg(all(test, loom))] -pub(crate) mod sync { - pub(crate) mod atomic { - pub(crate) use loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; - - pub(crate) trait AtomicMut {} - } -} diff -Nru s390-tools-2.31.0/rust-vendor/bytes/src/serde.rs s390-tools-2.33.1/rust-vendor/bytes/src/serde.rs --- s390-tools-2.31.0/rust-vendor/bytes/src/serde.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/bytes/src/serde.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,89 +0,0 @@ -use super::{Bytes, BytesMut}; -use alloc::string::String; -use alloc::vec::Vec; -use core::{cmp, fmt}; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; - -macro_rules! serde_impl { - ($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => { - impl Serialize for $ty { - #[inline] - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_bytes(&self) - } - } - - struct $visitor_ty; - - impl<'de> de::Visitor<'de> for $visitor_ty { - type Value = $ty; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("byte array") - } - - #[inline] - fn visit_seq(self, mut seq: V) -> Result - where - V: de::SeqAccess<'de>, - { - let len = cmp::min(seq.size_hint().unwrap_or(0), 4096); - let mut values: Vec = Vec::with_capacity(len); - - while let Some(value) = seq.next_element()? { - values.push(value); - } - - Ok($ty::$from_vec(values)) - } - - #[inline] - fn visit_bytes(self, v: &[u8]) -> Result - where - E: de::Error, - { - Ok($ty::$from_slice(v)) - } - - #[inline] - fn visit_byte_buf(self, v: Vec) -> Result - where - E: de::Error, - { - Ok($ty::$from_vec(v)) - } - - #[inline] - fn visit_str(self, v: &str) -> Result - where - E: de::Error, - { - Ok($ty::$from_slice(v.as_bytes())) - } - - #[inline] - fn visit_string(self, v: String) -> Result - where - E: de::Error, - { - Ok($ty::$from_vec(v.into_bytes())) - } - } - - impl<'de> Deserialize<'de> for $ty { - #[inline] - fn deserialize(deserializer: D) -> Result<$ty, D::Error> - where - D: Deserializer<'de>, - { - deserializer.deserialize_byte_buf($visitor_ty) - } - } - }; -} - -serde_impl!(Bytes, BytesVisitor, copy_from_slice, from); -serde_impl!(BytesMut, BytesMutVisitor, from, from_vec); diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/clap_complete/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/clap_complete/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/.cargo-checksum.json 2024-05-28 11:57:39.000000000 +0200 @@ -0,0 +1 @@ +{"files":{},"package":"7f6b5c519bab3ea61843a7923d074b04245624bb84a64a8c150f5deb014e388b"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/Cargo.lock s390-tools-2.33.1/rust-vendor/clap_complete/Cargo.lock --- s390-tools-2.31.0/rust-vendor/clap_complete/Cargo.lock 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/Cargo.lock 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,848 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "anstream" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e579a7752471abc2a8268df8b20005e3eadd975f585398f17efcfd8d4927371" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" + +[[package]] +name = "anstyle-parse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bcd8291a340dd8ac70e18878bc4501dd7b4ff970cfa21c207d36ece51ea88fd" +dependencies = [ + "anstyle", + "windows-sys 0.48.0", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "backtrace" +version = "0.3.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "cc" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc" +dependencies = [ + "clap_builder", + "clap_derive", + "once_cell", +] + +[[package]] +name = "clap_builder" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990" +dependencies = [ + "anstyle", + "backtrace", + "bitflags", + "clap_lex", +] + +[[package]] +name = "clap_complete" +version = "4.3.1" +dependencies = [ + "clap", + "clap_lex", + "is_executable", + "pathdiff", + "shlex", + "snapbox", + "trycmd", + "unicode-xid", +] + +[[package]] +name = "clap_derive" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" + +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + +[[package]] +name = "crossbeam-channel" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "either" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" + +[[package]] +name = "errno" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "escargot" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5584ba17d7ab26a8a7284f13e5bd196294dd2f2d79773cff29b9e9edef601a6" +dependencies = [ + "log", + "once_cell", + "serde", + "serde_json", +] + +[[package]] +name = "gimli" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" + +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + +[[package]] +name = "indexmap" +version = "1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +dependencies = [ + "autocfg", + "hashbrown", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" +dependencies = [ + "libc", + "windows-sys 0.42.0", +] + +[[package]] +name = "is-terminal" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" +dependencies = [ + "hermit-abi 0.3.1", + "io-lifetimes", + "rustix", + "windows-sys 0.48.0", +] + +[[package]] +name = "is_executable" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa9acdc6d67b75e626ad644734e8bc6df893d9cd2a834129065d3dd6158ea9c8" +dependencies = [ + "winapi", +] + +[[package]] +name = "itoa" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" + +[[package]] +name = "libc" +version = "0.2.144" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" + +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + +[[package]] +name = "miniz_oxide" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + +[[package]] +name = "num_cpus" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" +dependencies = [ + "hermit-abi 0.1.19", + "libc", +] + +[[package]] +name = "object" +version = "0.30.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" + +[[package]] +name = "os_pipe" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ae859aa07428ca9a929b936690f8b12dc5f11dd8c6992a18ca93919f28bc177" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "pathdiff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" + +[[package]] +name = "proc-macro2" +version = "1.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rayon" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-utils", + "num_cpus", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + +[[package]] +name = "rustix" +version = "0.37.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2aae838e49b3d63e9274e1c01833cc8139d3fec468c3b84688c628f44b1ae11d" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.45.0", +] + +[[package]] +name = "ryu" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "serde" +version = "1.0.158" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "771d4d9c4163ee138805e12c710dd365e4f44be8be0503cb1bb9eb989425d9c9" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.158" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e801c1712f48475582b7696ac71e0ca34ebb30e09338425384269d9717c62cad" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" +dependencies = [ + "serde", +] + +[[package]] +name = "shlex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" + +[[package]] +name = "similar" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ac7f900db32bf3fd12e0117dd3dc4da74bc52ebaac97f39668446d89694803" + +[[package]] +name = "snapbox" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6bccd62078347f89a914e3004d94582e13824d4e3d8a816317862884c423835" +dependencies = [ + "anstream", + "anstyle", + "escargot", + "libc", + "normalize-line-endings", + "os_pipe", + "similar", + "snapbox-macros", + "wait-timeout", + "windows-sys 0.45.0", +] + +[[package]] +name = "snapbox-macros" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaaf09df9f0eeae82be96290918520214530e738a7fe5a351b0f24cf77c0ca31" +dependencies = [ + "anstream", +] + +[[package]] +name = "syn" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "toml_datetime" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92d964908cec0d030b812013af25a0e57fddfadb1e066ecc6681d86253129d4f" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "trycmd" +version = "0.14.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2925e71868a12b173c1eb166018c2d2f9dfaedfcaec747bdb6ea2246785d258e" +dependencies = [ + "glob", + "humantime", + "humantime-serde", + "rayon", + "serde", + "shlex", + "snapbox", + "toml_edit", +] + +[[package]] +name = "unicode-ident" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" + +[[package]] +name = "unicode-xid" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" + +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + +[[package]] +name = "winnow" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +dependencies = [ + "memchr", +] diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/Cargo.toml s390-tools-2.33.1/rust-vendor/clap_complete/Cargo.toml --- s390-tools-2.31.0/rust-vendor/clap_complete/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/Cargo.toml 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,147 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.64.0" +name = "clap_complete" +version = "4.3.1" +include = [ + "build.rs", + "src/**/*", + "Cargo.toml", + "LICENSE*", + "README.md", + "benches/**/*", + "examples/**/*", +] +description = "Generate shell completion scripts for your clap::Command" +readme = "README.md" +keywords = [ + "clap", + "cli", + "completion", + "bash", +] +categories = ["command-line-interface"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/clap-rs/clap/tree/master/clap_complete" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[[package.metadata.release.pre-release-replacements]] +file = "CHANGELOG.md" +min = 1 +replace = "{{version}}" +search = "Unreleased" + +[[package.metadata.release.pre-release-replacements]] +exactly = 1 +file = "CHANGELOG.md" +replace = "...{{tag_name}}" +search = '\.\.\.HEAD' + +[[package.metadata.release.pre-release-replacements]] +file = "CHANGELOG.md" +min = 1 +replace = "{{date}}" +search = "ReleaseDate" + +[[package.metadata.release.pre-release-replacements]] +exactly = 1 +file = "CHANGELOG.md" +replace = """ + +## [Unreleased] - ReleaseDate +""" +search = "" + +[[package.metadata.release.pre-release-replacements]] +exactly = 1 +file = "CHANGELOG.md" +replace = """ + +[Unreleased]: https://github.com/clap-rs/clap/compare/{{tag_name}}...HEAD""" +search = "" + +[[package.metadata.release.pre-release-replacements]] +exactly = 4 +file = "README.md" +prerelease = true +replace = "github.com/clap-rs/clap/blob/{{tag_name}}/" +search = "github.com/clap-rs/clap/blob/[^/]+/" + +[lib] +bench = false + +[[example]] +name = "dynamic" +required-features = ["unstable-dynamic"] + +[dependencies.clap] +version = "4.1.0" +features = ["std"] +default-features = false + +[dependencies.clap_lex] +version = "0.5.0" +optional = true + +[dependencies.is_executable] +version = "1.0.1" +optional = true + +[dependencies.pathdiff] +version = "0.2.1" +optional = true + +[dependencies.shlex] +version = "1.1.0" +optional = true + +[dependencies.unicode-xid] +version = "0.2.2" +optional = true + +[dev-dependencies.clap] +version = "4.0.0" +features = [ + "std", + "derive", + "help", +] +default-features = false + +[dev-dependencies.snapbox] +version = "0.4.11" +features = ["diff"] + +[dev-dependencies.trycmd] +version = "0.14.16" +features = [ + "color-auto", + "diff", + "examples", +] +default-features = false + +[features] +debug = ["clap/debug"] +default = [] +unstable-dynamic = [ + "dep:clap_lex", + "dep:shlex", + "dep:unicode-xid", + "clap/derive", + "dep:is_executable", + "dep:pathdiff", +] diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/examples/completion-derive.rs s390-tools-2.33.1/rust-vendor/clap_complete/examples/completion-derive.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/examples/completion-derive.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/examples/completion-derive.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,83 @@ +//! How to use value hints and generate shell completions. +//! +//! Usage with zsh: +//! ```console +//! $ cargo run --example completion-derive -- --generate=zsh > /usr/local/share/zsh/site-functions/_completion_derive +//! $ compinit +//! $ ./target/debug/examples/completion_derive -- +//! ``` +//! fish: +//! ```console +//! $ cargo run --example completion-derive -- --generate=fish > completion_derive.fish +//! $ . ./completion_derive.fish +//! $ ./target/debug/examples/completion_derive -- +//! ``` +use clap::{Args, Command, CommandFactory, Parser, Subcommand, ValueHint}; +use clap_complete::{generate, Generator, Shell}; +use std::ffi::OsString; +use std::io; +use std::path::PathBuf; + +#[derive(Parser, Debug, PartialEq)] +#[command(name = "completion-derive")] +struct Opt { + // If provided, outputs the completion file for given shell + #[arg(long = "generate", value_enum)] + generator: Option, + #[command(subcommand)] + command: Option, +} + +#[derive(Subcommand, Debug, PartialEq)] +enum Commands { + #[command(visible_alias = "hint")] + ValueHint(ValueHintOpt), +} + +#[derive(Args, Debug, PartialEq)] +struct ValueHintOpt { + // Showcasing all possible ValueHints: + #[arg(long, value_hint = ValueHint::Unknown)] + unknown: Option, + #[arg(long, value_hint = ValueHint::Other)] + other: Option, + #[arg(short, long, value_hint = ValueHint::AnyPath)] + path: Option, + #[arg(short, long, value_hint = ValueHint::FilePath)] + file: Option, + #[arg(short, long, value_hint = ValueHint::DirPath)] + dir: Option, + #[arg(short, long, value_hint = ValueHint::ExecutablePath)] + exe: Option, + #[arg(long, value_hint = ValueHint::CommandName)] + cmd_name: Option, + #[arg(short, long, value_hint = ValueHint::CommandString)] + cmd: Option, + // Command::trailing_var_ar is required to use ValueHint::CommandWithArguments + #[arg(trailing_var_arg = true, value_hint = ValueHint::CommandWithArguments)] + command_with_args: Vec, + #[arg(short, long, value_hint = ValueHint::Username)] + user: Option, + #[arg(long, value_hint = ValueHint::Hostname)] + host: Option, + #[arg(long, value_hint = ValueHint::Url)] + url: Option, + #[arg(long, value_hint = ValueHint::EmailAddress)] + email: Option, +} + +fn print_completions(gen: G, cmd: &mut Command) { + generate(gen, cmd, cmd.get_name().to_string(), &mut io::stdout()); +} + +fn main() { + let opt = Opt::parse(); + + if let Some(generator) = opt.generator { + let mut cmd = Opt::command(); + eprintln!("Generating completion file for {generator:?}..."); + print_completions(generator, &mut cmd); + } else { + println!("{opt:#?}"); + } +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/examples/completion.rs s390-tools-2.33.1/rust-vendor/clap_complete/examples/completion.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/examples/completion.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/examples/completion.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,109 @@ +//! Example to test arguments with different ValueHint values. +//! +//! Usage with zsh: +//! ```console +//! $ cargo run --example completion -- --generate=zsh > /usr/local/share/zsh/site-functions/_completion$ +//! $ compinit +//! $ ./target/debug/examples/completion -- +//! ``` +//! fish: +//! ```console +//! $ cargo run --example completion -- --generate=fish > completion.fish +//! $ . ./completion.fish +//! $ ./target/debug/examples/completion -- +//! ``` +use clap::{value_parser, Arg, Command, ValueHint}; +use clap_complete::{generate, Generator, Shell}; +use std::io; + +fn build_cli() -> Command { + let value_hint_command = Command::new("value-hint") + .visible_alias("hint") + .arg( + Arg::new("unknown") + .long("unknown") + .value_hint(ValueHint::Unknown), + ) + .arg(Arg::new("other").long("other").value_hint(ValueHint::Other)) + .arg( + Arg::new("path") + .long("path") + .short('p') + .value_hint(ValueHint::AnyPath), + ) + .arg( + Arg::new("file") + .long("file") + .short('f') + .value_hint(ValueHint::FilePath), + ) + .arg( + Arg::new("dir") + .long("dir") + .short('d') + .value_hint(ValueHint::DirPath), + ) + .arg( + Arg::new("exe") + .long("exe") + .short('e') + .value_hint(ValueHint::ExecutablePath), + ) + .arg( + Arg::new("cmd_name") + .long("cmd-name") + .value_hint(ValueHint::CommandName), + ) + .arg( + Arg::new("cmd") + .long("cmd") + .short('c') + .value_hint(ValueHint::CommandString), + ) + .arg( + Arg::new("command_with_args") + .num_args(1..) + // AppSettings::TrailingVarArg is required to use ValueHint::CommandWithArguments + .trailing_var_arg(true) + .value_hint(ValueHint::CommandWithArguments), + ) + .arg( + Arg::new("user") + .short('u') + .long("user") + .value_hint(ValueHint::Username), + ) + .arg( + Arg::new("host") + .long("host") + .value_hint(ValueHint::Hostname), + ) + .arg(Arg::new("url").long("url").value_hint(ValueHint::Url)) + .arg( + Arg::new("email") + .long("email") + .value_hint(ValueHint::EmailAddress), + ); + + Command::new("completion") + .arg( + Arg::new("generator") + .long("generate") + .value_parser(value_parser!(Shell)), + ) + .subcommand(value_hint_command) +} + +fn print_completions(gen: G, cmd: &mut Command) { + generate(gen, cmd, cmd.get_name().to_string(), &mut io::stdout()); +} + +fn main() { + let matches = build_cli().get_matches(); + + if let Some(generator) = matches.get_one::("generator") { + let mut cmd = build_cli(); + eprintln!("Generating completion file for {generator}..."); + print_completions(*generator, &mut cmd); + } +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/examples/dynamic.rs s390-tools-2.33.1/rust-vendor/clap_complete/examples/dynamic.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/examples/dynamic.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/examples/dynamic.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,37 @@ +use clap::FromArgMatches; +use clap::Subcommand; + +fn command() -> clap::Command { + let cmd = clap::Command::new("dynamic") + .arg( + clap::Arg::new("input") + .long("input") + .short('i') + .value_hint(clap::ValueHint::FilePath), + ) + .arg( + clap::Arg::new("format") + .long("format") + .short('F') + .value_parser(["json", "yaml", "toml"]), + ) + .args_conflicts_with_subcommands(true); + clap_complete::dynamic::bash::CompleteCommand::augment_subcommands(cmd) +} + +fn main() { + let cmd = command(); + let matches = cmd.get_matches(); + if let Ok(completions) = + clap_complete::dynamic::bash::CompleteCommand::from_arg_matches(&matches) + { + completions.complete(&mut command()); + } else { + println!("{matches:#?}"); + } +} + +#[test] +fn verify_cli() { + command().debug_assert(); +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/clap_complete/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/clap_complete/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/LICENSE-APACHE 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/LICENSE-MIT s390-tools-2.33.1/rust-vendor/clap_complete/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/clap_complete/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/LICENSE-MIT 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015-2022 Kevin B. Knapp and Clap Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/README.md s390-tools-2.33.1/rust-vendor/clap_complete/README.md --- s390-tools-2.31.0/rust-vendor/clap_complete/README.md 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/README.md 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,23 @@ + +# clap_complete + +> **Shell completion generation for `clap`** + +[![Crates.io](https://img.shields.io/crates/v/clap_complete?style=flat-square)](https://crates.io/crates/clap_complete) +[![Crates.io](https://img.shields.io/crates/d/clap_complete?style=flat-square)](https://crates.io/crates/clap_complete) +[![License](https://img.shields.io/badge/license-Apache%202.0-blue?style=flat-square)](https://github.com/clap-rs/clap/blob/clap_complete-v4.3.1/LICENSE-APACHE) +[![License](https://img.shields.io/badge/license-MIT-blue?style=flat-square)](https://github.com/clap-rs/clap/blob/clap_complete-v4.3.1/LICENSE-MIT) + +Dual-licensed under [Apache 2.0](LICENSE-APACHE) or [MIT](LICENSE-MIT). + +1. [About](#about) +2. [API Reference](https://docs.rs/clap_complete) +3. [Questions & Discussions](https://github.com/clap-rs/clap/discussions) +4. [CONTRIBUTING](https://github.com/clap-rs/clap/blob/clap_complete-v4.3.1/clap_complete/CONTRIBUTING.md) +5. [Sponsors](https://github.com/clap-rs/clap/blob/clap_complete-v4.3.1/README.md#sponsors) + +## About + +### Related Projects + +- [clap_complete_fig](https://crates.io/crates/clap_complete_fig) for [fig](https://fig.io/) shell completion support diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/dynamic.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/dynamic.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/dynamic.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/dynamic.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,549 @@ +//! Complete commands within shells + +/// Complete commands within bash +pub mod bash { + use std::ffi::OsStr; + use std::ffi::OsString; + use std::io::Write; + + use clap_lex::OsStrExt as _; + use unicode_xid::UnicodeXID; + + #[derive(clap::Subcommand)] + #[command(hide = true)] + #[allow(missing_docs)] + #[derive(Clone, Debug)] + pub enum CompleteCommand { + /// Register shell completions for this program + Complete(CompleteArgs), + } + + #[derive(clap::Args)] + #[command(group = clap::ArgGroup::new("complete").multiple(true).conflicts_with("register"))] + #[allow(missing_docs)] + #[derive(Clone, Debug)] + pub struct CompleteArgs { + /// Path to write completion-registration to + #[arg(long, required = true)] + register: Option, + + #[arg( + long, + required = true, + value_name = "COMP_CWORD", + hide_short_help = true, + group = "complete" + )] + index: Option, + + #[arg(long, hide_short_help = true, group = "complete")] + ifs: Option, + + #[arg( + long = "type", + required = true, + hide_short_help = true, + group = "complete" + )] + comp_type: Option, + + #[arg(long, hide_short_help = true, group = "complete")] + space: bool, + + #[arg( + long, + conflicts_with = "space", + hide_short_help = true, + group = "complete" + )] + no_space: bool, + + #[arg(raw = true, hide_short_help = true, group = "complete")] + comp_words: Vec, + } + + impl CompleteCommand { + /// Process the completion request + pub fn complete(&self, cmd: &mut clap::Command) -> std::convert::Infallible { + self.try_complete(cmd).unwrap_or_else(|e| e.exit()); + std::process::exit(0) + } + + /// Process the completion request + pub fn try_complete(&self, cmd: &mut clap::Command) -> clap::error::Result<()> { + debug!("CompleteCommand::try_complete: {self:?}"); + let CompleteCommand::Complete(args) = self; + if let Some(out_path) = args.register.as_deref() { + let mut buf = Vec::new(); + let name = cmd.get_name(); + let bin = cmd.get_bin_name().unwrap_or_else(|| cmd.get_name()); + register(name, [bin], bin, &Behavior::default(), &mut buf)?; + if out_path == std::path::Path::new("-") { + std::io::stdout().write_all(&buf)?; + } else if out_path.is_dir() { + let out_path = out_path.join(file_name(name)); + std::fs::write(out_path, buf)?; + } else { + std::fs::write(out_path, buf)?; + } + } else { + let index = args.index.unwrap_or_default(); + let comp_type = args.comp_type.unwrap_or_default(); + let space = match (args.space, args.no_space) { + (true, false) => Some(true), + (false, true) => Some(false), + (true, true) => { + unreachable!("`--space` and `--no-space` set, clap should prevent this") + } + (false, false) => None, + } + .unwrap(); + let current_dir = std::env::current_dir().ok(); + let completions = complete( + cmd, + args.comp_words.clone(), + index, + comp_type, + space, + current_dir.as_deref(), + )?; + + let mut buf = Vec::new(); + for (i, completion) in completions.iter().enumerate() { + if i != 0 { + write!(&mut buf, "{}", args.ifs.as_deref().unwrap_or("\n"))?; + } + write!(&mut buf, "{}", completion.to_string_lossy())?; + } + std::io::stdout().write_all(&buf)?; + } + + Ok(()) + } + } + + /// The recommended file name for the registration code + pub fn file_name(name: &str) -> String { + format!("{name}.bash") + } + + /// Define the completion behavior + pub enum Behavior { + /// Bare bones behavior + Minimal, + /// Fallback to readline behavior when no matches are generated + Readline, + /// Customize bash's completion behavior + Custom(String), + } + + impl Default for Behavior { + fn default() -> Self { + Self::Readline + } + } + + /// Generate code to register the dynamic completion + pub fn register( + name: &str, + executables: impl IntoIterator>, + completer: &str, + behavior: &Behavior, + buf: &mut dyn Write, + ) -> Result<(), std::io::Error> { + let escaped_name = name.replace('-', "_"); + debug_assert!( + escaped_name.chars().all(|c| c.is_xid_continue()), + "`name` must be an identifier, got `{escaped_name}`" + ); + let mut upper_name = escaped_name.clone(); + upper_name.make_ascii_uppercase(); + + let executables = executables + .into_iter() + .map(|s| shlex::quote(s.as_ref()).into_owned()) + .collect::>() + .join(" "); + + let options = match behavior { + Behavior::Minimal => "-o nospace -o bashdefault", + Behavior::Readline => "-o nospace -o default -o bashdefault", + Behavior::Custom(c) => c.as_str(), + }; + + let completer = shlex::quote(completer); + + let script = r#" +_clap_complete_NAME() { + local IFS=$'\013' + local SUPPRESS_SPACE=0 + if compopt +o nospace 2> /dev/null; then + SUPPRESS_SPACE=1 + fi + if [[ ${SUPPRESS_SPACE} == 1 ]]; then + SPACE_ARG="--no-space" + else + SPACE_ARG="--space" + fi + COMPREPLY=( $("COMPLETER" complete --index ${COMP_CWORD} --type ${COMP_TYPE} ${SPACE_ARG} --ifs="$IFS" -- "${COMP_WORDS[@]}") ) + if [[ $? != 0 ]]; then + unset COMPREPLY + elif [[ $SUPPRESS_SPACE == 1 ]] && [[ "${COMPREPLY-}" =~ [=/:]$ ]]; then + compopt -o nospace + fi +} +complete OPTIONS -F _clap_complete_NAME EXECUTABLES +"# + .replace("NAME", &escaped_name) + .replace("EXECUTABLES", &executables) + .replace("OPTIONS", options) + .replace("COMPLETER", &completer) + .replace("UPPER", &upper_name); + + writeln!(buf, "{script}")?; + Ok(()) + } + + /// Type of completion attempted that caused a completion function to be called + #[derive(Copy, Clone, Debug, PartialEq, Eq)] + #[non_exhaustive] + pub enum CompType { + /// Normal completion + Normal, + /// List completions after successive tabs + Successive, + /// List alternatives on partial word completion + Alternatives, + /// List completions if the word is not unmodified + Unmodified, + /// Menu completion + Menu, + } + + impl clap::ValueEnum for CompType { + fn value_variants<'a>() -> &'a [Self] { + &[ + Self::Normal, + Self::Successive, + Self::Alternatives, + Self::Unmodified, + Self::Menu, + ] + } + fn to_possible_value(&self) -> ::std::option::Option { + match self { + Self::Normal => { + let value = "9"; + debug_assert_eq!(b'\t'.to_string(), value); + Some( + clap::builder::PossibleValue::new(value) + .alias("normal") + .help("Normal completion"), + ) + } + Self::Successive => { + let value = "63"; + debug_assert_eq!(b'?'.to_string(), value); + Some( + clap::builder::PossibleValue::new(value) + .alias("successive") + .help("List completions after successive tabs"), + ) + } + Self::Alternatives => { + let value = "33"; + debug_assert_eq!(b'!'.to_string(), value); + Some( + clap::builder::PossibleValue::new(value) + .alias("alternatives") + .help("List alternatives on partial word completion"), + ) + } + Self::Unmodified => { + let value = "64"; + debug_assert_eq!(b'@'.to_string(), value); + Some( + clap::builder::PossibleValue::new(value) + .alias("unmodified") + .help("List completions if the word is not unmodified"), + ) + } + Self::Menu => { + let value = "37"; + debug_assert_eq!(b'%'.to_string(), value); + Some( + clap::builder::PossibleValue::new(value) + .alias("menu") + .help("Menu completion"), + ) + } + } + } + } + + impl Default for CompType { + fn default() -> Self { + Self::Normal + } + } + + /// Complete the command specified + pub fn complete( + cmd: &mut clap::Command, + args: Vec, + arg_index: usize, + _comp_type: CompType, + _trailing_space: bool, + current_dir: Option<&std::path::Path>, + ) -> Result, std::io::Error> { + cmd.build(); + + let raw_args = clap_lex::RawArgs::new(args.into_iter()); + let mut cursor = raw_args.cursor(); + let mut target_cursor = raw_args.cursor(); + raw_args.seek( + &mut target_cursor, + clap_lex::SeekFrom::Start(arg_index as u64), + ); + // As we loop, `cursor` will always be pointing to the next item + raw_args.next_os(&mut target_cursor); + + // TODO: Multicall support + if !cmd.is_no_binary_name_set() { + raw_args.next_os(&mut cursor); + } + + let mut current_cmd = &*cmd; + let mut pos_index = 1; + let mut is_escaped = false; + while let Some(arg) = raw_args.next(&mut cursor) { + if cursor == target_cursor { + return complete_arg(&arg, current_cmd, current_dir, pos_index, is_escaped); + } + + debug!("complete::next: Begin parsing '{:?}'", arg.to_value_os(),); + + if let Ok(value) = arg.to_value() { + if let Some(next_cmd) = current_cmd.find_subcommand(value) { + current_cmd = next_cmd; + pos_index = 0; + continue; + } + } + + if is_escaped { + pos_index += 1; + } else if arg.is_escape() { + is_escaped = true; + } else if let Some(_long) = arg.to_long() { + } else if let Some(_short) = arg.to_short() { + } else { + pos_index += 1; + } + } + + Err(std::io::Error::new( + std::io::ErrorKind::Other, + "No completion generated", + )) + } + + fn complete_arg( + arg: &clap_lex::ParsedArg<'_>, + cmd: &clap::Command, + current_dir: Option<&std::path::Path>, + pos_index: usize, + is_escaped: bool, + ) -> Result, std::io::Error> { + debug!( + "complete_arg: arg={:?}, cmd={:?}, current_dir={:?}, pos_index={}, is_escaped={}", + arg, + cmd.get_name(), + current_dir, + pos_index, + is_escaped + ); + let mut completions = Vec::new(); + + if !is_escaped { + if let Some((flag, value)) = arg.to_long() { + if let Ok(flag) = flag { + if let Some(value) = value { + if let Some(arg) = cmd.get_arguments().find(|a| a.get_long() == Some(flag)) + { + completions.extend( + complete_arg_value(value.to_str().ok_or(value), arg, current_dir) + .into_iter() + .map(|os| { + // HACK: Need better `OsStr` manipulation + format!("--{}={}", flag, os.to_string_lossy()).into() + }), + ) + } + } else { + completions.extend( + crate::generator::utils::longs_and_visible_aliases(cmd) + .into_iter() + .filter_map(|f| { + f.starts_with(flag).then(|| format!("--{f}").into()) + }), + ); + } + } + } else if arg.is_escape() || arg.is_stdio() || arg.is_empty() { + // HACK: Assuming knowledge of is_escape / is_stdio + completions.extend( + crate::generator::utils::longs_and_visible_aliases(cmd) + .into_iter() + .map(|f| format!("--{f}").into()), + ); + } + + if arg.is_empty() || arg.is_stdio() || arg.is_short() { + // HACK: Assuming knowledge of is_stdio + completions.extend( + crate::generator::utils::shorts_and_visible_aliases(cmd) + .into_iter() + // HACK: Need better `OsStr` manipulation + .map(|f| format!("{}{}", arg.to_value_os().to_string_lossy(), f).into()), + ); + } + } + + if let Some(positional) = cmd + .get_positionals() + .find(|p| p.get_index() == Some(pos_index)) + { + completions.extend(complete_arg_value(arg.to_value(), positional, current_dir)); + } + + if let Ok(value) = arg.to_value() { + completions.extend(complete_subcommand(value, cmd)); + } + + Ok(completions) + } + + fn complete_arg_value( + value: Result<&str, &OsStr>, + arg: &clap::Arg, + current_dir: Option<&std::path::Path>, + ) -> Vec { + let mut values = Vec::new(); + debug!("complete_arg_value: arg={arg:?}, value={value:?}"); + + if let Some(possible_values) = crate::generator::utils::possible_values(arg) { + if let Ok(value) = value { + values.extend(possible_values.into_iter().filter_map(|p| { + let name = p.get_name(); + name.starts_with(value).then(|| name.into()) + })); + } + } else { + let value_os = match value { + Ok(value) => OsStr::new(value), + Err(value_os) => value_os, + }; + match arg.get_value_hint() { + clap::ValueHint::Other => { + // Should not complete + } + clap::ValueHint::Unknown | clap::ValueHint::AnyPath => { + values.extend(complete_path(value_os, current_dir, |_| true)); + } + clap::ValueHint::FilePath => { + values.extend(complete_path(value_os, current_dir, |p| p.is_file())); + } + clap::ValueHint::DirPath => { + values.extend(complete_path(value_os, current_dir, |p| p.is_dir())); + } + clap::ValueHint::ExecutablePath => { + use is_executable::IsExecutable; + values.extend(complete_path(value_os, current_dir, |p| p.is_executable())); + } + clap::ValueHint::CommandName + | clap::ValueHint::CommandString + | clap::ValueHint::CommandWithArguments + | clap::ValueHint::Username + | clap::ValueHint::Hostname + | clap::ValueHint::Url + | clap::ValueHint::EmailAddress => { + // No completion implementation + } + _ => { + // Safe-ish fallback + values.extend(complete_path(value_os, current_dir, |_| true)); + } + } + values.sort(); + } + + values + } + + fn complete_path( + value_os: &OsStr, + current_dir: Option<&std::path::Path>, + is_wanted: impl Fn(&std::path::Path) -> bool, + ) -> Vec { + let mut completions = Vec::new(); + + let current_dir = match current_dir { + Some(current_dir) => current_dir, + None => { + // Can't complete without a `current_dir` + return Vec::new(); + } + }; + let (existing, prefix) = value_os + .split_once("\\") + .unwrap_or((OsStr::new(""), value_os)); + let root = current_dir.join(existing); + debug!("complete_path: root={root:?}, prefix={prefix:?}"); + let prefix = prefix.to_string_lossy(); + + for entry in std::fs::read_dir(&root) + .ok() + .into_iter() + .flatten() + .filter_map(Result::ok) + { + let raw_file_name = OsString::from(entry.file_name()); + if !raw_file_name.starts_with(&prefix) { + continue; + } + + if entry.metadata().map(|m| m.is_dir()).unwrap_or(false) { + let path = entry.path(); + let mut suggestion = pathdiff::diff_paths(&path, current_dir).unwrap_or(path); + suggestion.push(""); // Ensure trailing `/` + completions.push(suggestion.as_os_str().to_owned()); + } else { + let path = entry.path(); + if is_wanted(&path) { + let suggestion = pathdiff::diff_paths(&path, current_dir).unwrap_or(path); + completions.push(suggestion.as_os_str().to_owned()); + } + } + } + + completions + } + + fn complete_subcommand(value: &str, cmd: &clap::Command) -> Vec { + debug!( + "complete_subcommand: cmd={:?}, value={:?}", + cmd.get_name(), + value + ); + + let mut scs = crate::generator::utils::all_subcommands(cmd) + .into_iter() + .filter(|x| x.0.starts_with(value)) + .map(|x| OsString::from(&x.0)) + .collect::>(); + scs.sort(); + scs.dedup(); + scs + } +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/generator/mod.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/generator/mod.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/generator/mod.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/generator/mod.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,261 @@ +//! Shell completion machinery + +pub mod utils; + +use std::ffi::OsString; +use std::fs::File; +use std::io::Error; +use std::io::Write; +use std::path::PathBuf; + +use clap::Command; + +/// Generator trait which can be used to write generators +pub trait Generator { + /// Returns the file name that is created when this generator is called during compile time. + /// + /// # Panics + /// + /// May panic when called outside of the context of [`generate`] or [`generate_to`] + /// + /// # Examples + /// + /// ``` + /// # use std::io::Write; + /// # use clap::Command; + /// use clap_complete::Generator; + /// + /// pub struct Fish; + /// + /// impl Generator for Fish { + /// fn file_name(&self, name: &str) -> String { + /// format!("{name}.fish") + /// } + /// # fn generate(&self, cmd: &Command, buf: &mut dyn Write) {} + /// } + /// ``` + fn file_name(&self, name: &str) -> String; + + /// Generates output out of [`clap::Command`](Command). + /// + /// # Panics + /// + /// May panic when called outside of the context of [`generate`] or [`generate_to`] + /// + /// # Examples + /// + /// The following example generator displays the [`clap::Command`](Command) + /// as if it is printed using [`std::println`]. + /// + /// ``` + /// use std::{io::Write, fmt::write}; + /// use clap::Command; + /// use clap_complete::Generator; + /// + /// pub struct ClapDebug; + /// + /// impl Generator for ClapDebug { + /// # fn file_name(&self, name: &str) -> String { + /// # name.into() + /// # } + /// fn generate(&self, cmd: &Command, buf: &mut dyn Write) { + /// write!(buf, "{cmd}").unwrap(); + /// } + /// } + /// ``` + fn generate(&self, cmd: &Command, buf: &mut dyn Write); +} + +/// Generate a completions file for a specified shell at compile-time. +/// +/// **NOTE:** to generate the file at compile time you must use a `build.rs` "Build Script" or a +/// [`cargo-xtask`](https://github.com/matklad/cargo-xtask) +/// +/// # Examples +/// +/// The following example generates a bash completion script via a `build.rs` script. In this +/// simple example, we'll demo a very small application with only a single subcommand and two +/// args. Real applications could be many multiple levels deep in subcommands, and have tens or +/// potentially hundreds of arguments. +/// +/// First, it helps if we separate out our `Command` definition into a separate file. Whether you +/// do this as a function, or bare Command definition is a matter of personal preference. +/// +/// ``` +/// // src/cli.rs +/// # use clap::{Command, Arg, ArgAction}; +/// pub fn build_cli() -> Command { +/// Command::new("compl") +/// .about("Tests completions") +/// .arg(Arg::new("file") +/// .help("some input file")) +/// .subcommand(Command::new("test") +/// .about("tests things") +/// .arg(Arg::new("case") +/// .long("case") +/// .action(ArgAction::Set) +/// .help("the case to test"))) +/// } +/// ``` +/// +/// In our regular code, we can simply call this `build_cli()` function, then call +/// `get_matches()`, or any of the other normal methods directly after. For example: +/// +/// ```ignore +/// // src/main.rs +/// +/// mod cli; +/// +/// fn main() { +/// let _m = cli::build_cli().get_matches(); +/// +/// // normal logic continues... +/// } +/// ``` +/// +/// Next, we set up our `Cargo.toml` to use a `build.rs` build script. +/// +/// ```toml +/// # Cargo.toml +/// build = "build.rs" +/// +/// [dependencies] +/// clap = "*" +/// +/// [build-dependencies] +/// clap = "*" +/// clap_complete = "*" +/// ``` +/// +/// Next, we place a `build.rs` in our project root. +/// +/// ```ignore +/// use clap_complete::{generate_to, shells::Bash}; +/// use std::env; +/// use std::io::Error; +/// +/// include!("src/cli.rs"); +/// +/// fn main() -> Result<(), Error> { +/// let outdir = match env::var_os("OUT_DIR") { +/// None => return Ok(()), +/// Some(outdir) => outdir, +/// }; +/// +/// let mut cmd = build_cli(); +/// let path = generate_to( +/// Bash, +/// &mut cmd, // We need to specify what generator to use +/// "myapp", // We need to specify the bin name manually +/// outdir, // We need to specify where to write to +/// )?; +/// +/// println!("cargo:warning=completion file is generated: {path:?}"); +/// +/// Ok(()) +/// } +/// ``` +/// +/// Now, once we compile there will be a `{bin_name}.bash` file in the directory. +/// Assuming we compiled with debug mode, it would be somewhere similar to +/// `/target/debug/build/myapp-/out/myapp.bash`. +/// +/// **NOTE:** Please look at the individual [shells][crate::shells] +/// to see the name of the files generated. +/// +/// Using [`ValueEnum::value_variants()`][clap::ValueEnum::value_variants] you can easily loop over +/// all the supported shell variants to generate all the completions at once too. +/// +/// ```ignore +/// use clap::ValueEnum; +/// use clap_complete::{generate_to, Shell}; +/// use std::env; +/// use std::io::Error; +/// +/// include!("src/cli.rs"); +/// +/// fn main() -> Result<(), Error> { +/// let outdir = match env::var_os("OUT_DIR") { +/// None => return Ok(()), +/// Some(outdir) => outdir, +/// }; +/// +/// let mut cmd = build_cli(); +/// for &shell in Shell::value_variants() { +/// generate_to(shell, &mut cmd, "myapp", outdir)?; +/// } +/// +/// Ok(()) +/// } +/// ``` +pub fn generate_to( + gen: G, + cmd: &mut Command, + bin_name: S, + out_dir: T, +) -> Result +where + G: Generator, + S: Into, + T: Into, +{ + cmd.set_bin_name(bin_name); + + let out_dir = PathBuf::from(out_dir.into()); + let file_name = gen.file_name(cmd.get_bin_name().unwrap()); + + let path = out_dir.join(file_name); + let mut file = File::create(&path)?; + + _generate::(gen, cmd, &mut file); + Ok(path) +} + +/// Generate a completions file for a specified shell at runtime. +/// +/// Until `cargo install` can install extra files like a completion script, this may be +/// used e.g. in a command that outputs the contents of the completion script, to be +/// redirected into a file by the user. +/// +/// # Examples +/// +/// Assuming a separate `cli.rs` like the [`generate_to` example](generate_to()), +/// we can let users generate a completion script using a command: +/// +/// ```ignore +/// // src/main.rs +/// +/// mod cli; +/// use std::io; +/// use clap_complete::{generate, shells::Bash}; +/// +/// fn main() { +/// let matches = cli::build_cli().get_matches(); +/// +/// if matches.is_present("generate-bash-completions") { +/// generate(Bash, &mut cli::build_cli(), "myapp", &mut io::stdout()); +/// } +/// +/// // normal logic continues... +/// } +/// +/// ``` +/// +/// Usage: +/// +/// ```console +/// $ myapp generate-bash-completions > /usr/share/bash-completion/completions/myapp.bash +/// ``` +pub fn generate(gen: G, cmd: &mut Command, bin_name: S, buf: &mut dyn Write) +where + G: Generator, + S: Into, +{ + cmd.set_bin_name(bin_name); + _generate::(gen, cmd, buf) +} + +fn _generate(gen: G, cmd: &mut Command, buf: &mut dyn Write) { + cmd.build(); + gen.generate(cmd, buf) +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/generator/utils.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/generator/utils.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/generator/utils.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/generator/utils.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,278 @@ +//! Helpers for writing generators + +use clap::{Arg, Command}; + +/// Gets all subcommands including child subcommands in the form of `("name", "bin_name")`. +/// +/// Subcommand `rustup toolchain install` would be converted to +/// `("install", "rustup toolchain install")`. +pub fn all_subcommands(cmd: &Command) -> Vec<(String, String)> { + let mut subcmds: Vec<_> = subcommands(cmd); + + for sc_v in cmd.get_subcommands().map(all_subcommands) { + subcmds.extend(sc_v); + } + + subcmds +} + +/// Finds the subcommand [`clap::Command`] from the given [`clap::Command`] with the given path. +/// +/// **NOTE:** `path` should not contain the root `bin_name`. +pub fn find_subcommand_with_path<'cmd>(p: &'cmd Command, path: Vec<&str>) -> &'cmd Command { + let mut cmd = p; + + for sc in path { + cmd = cmd.find_subcommand(sc).unwrap(); + } + + cmd +} + +/// Gets subcommands of [`clap::Command`] in the form of `("name", "bin_name")`. +/// +/// Subcommand `rustup toolchain install` would be converted to +/// `("install", "rustup toolchain install")`. +pub fn subcommands(p: &Command) -> Vec<(String, String)> { + debug!("subcommands: name={}", p.get_name()); + debug!("subcommands: Has subcommands...{:?}", p.has_subcommands()); + + let mut subcmds = vec![]; + + for sc in p.get_subcommands() { + let sc_bin_name = sc.get_bin_name().unwrap(); + + debug!( + "subcommands:iter: name={}, bin_name={}", + sc.get_name(), + sc_bin_name + ); + + subcmds.push((sc.get_name().to_string(), sc_bin_name.to_string())); + } + + subcmds +} + +/// Gets all the short options, their visible aliases and flags of a [`clap::Command`]. +/// Includes `h` and `V` depending on the [`clap::Command`] settings. +pub fn shorts_and_visible_aliases(p: &Command) -> Vec { + debug!("shorts: name={}", p.get_name()); + + p.get_arguments() + .filter_map(|a| { + if !a.is_positional() { + if a.get_visible_short_aliases().is_some() && a.get_short().is_some() { + let mut shorts_and_visible_aliases = a.get_visible_short_aliases().unwrap(); + shorts_and_visible_aliases.push(a.get_short().unwrap()); + Some(shorts_and_visible_aliases) + } else if a.get_visible_short_aliases().is_none() && a.get_short().is_some() { + Some(vec![a.get_short().unwrap()]) + } else { + None + } + } else { + None + } + }) + .flatten() + .collect() +} + +/// Gets all the long options, their visible aliases and flags of a [`clap::Command`]. +/// Includes `help` and `version` depending on the [`clap::Command`] settings. +pub fn longs_and_visible_aliases(p: &Command) -> Vec { + debug!("longs: name={}", p.get_name()); + + p.get_arguments() + .filter_map(|a| { + if !a.is_positional() { + if a.get_visible_aliases().is_some() && a.get_long().is_some() { + let mut visible_aliases: Vec<_> = a + .get_visible_aliases() + .unwrap() + .into_iter() + .map(|s| s.to_string()) + .collect(); + visible_aliases.push(a.get_long().unwrap().to_string()); + Some(visible_aliases) + } else if a.get_visible_aliases().is_none() && a.get_long().is_some() { + Some(vec![a.get_long().unwrap().to_string()]) + } else { + None + } + } else { + None + } + }) + .flatten() + .collect() +} + +/// Gets all the flags of a [`clap::Command`](Command). +/// Includes `help` and `version` depending on the [`clap::Command`] settings. +pub fn flags(p: &Command) -> Vec { + debug!("flags: name={}", p.get_name()); + p.get_arguments() + .filter(|a| !a.get_num_args().expect("built").takes_values() && !a.is_positional()) + .cloned() + .collect() +} + +/// Get the possible values for completion +pub fn possible_values(a: &Arg) -> Option> { + if !a.get_num_args().expect("built").takes_values() { + None + } else { + a.get_value_parser() + .possible_values() + .map(|pvs| pvs.collect()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use clap::Arg; + use clap::ArgAction; + + fn common_app() -> Command { + Command::new("myapp") + .subcommand( + Command::new("test").subcommand(Command::new("config")).arg( + Arg::new("file") + .short('f') + .short_alias('c') + .visible_short_alias('p') + .long("file") + .action(ArgAction::SetTrue) + .visible_alias("path"), + ), + ) + .subcommand(Command::new("hello")) + .bin_name("my-cmd") + } + + fn built() -> Command { + let mut cmd = common_app(); + + cmd.build(); + cmd + } + + fn built_with_version() -> Command { + let mut cmd = common_app().version("3.0"); + + cmd.build(); + cmd + } + + #[test] + fn test_subcommands() { + let cmd = built_with_version(); + + assert_eq!( + subcommands(&cmd), + vec![ + ("test".to_string(), "my-cmd test".to_string()), + ("hello".to_string(), "my-cmd hello".to_string()), + ("help".to_string(), "my-cmd help".to_string()), + ] + ); + } + + #[test] + fn test_all_subcommands() { + let cmd = built_with_version(); + + assert_eq!( + all_subcommands(&cmd), + vec![ + ("test".to_string(), "my-cmd test".to_string()), + ("hello".to_string(), "my-cmd hello".to_string()), + ("help".to_string(), "my-cmd help".to_string()), + ("config".to_string(), "my-cmd test config".to_string()), + ("help".to_string(), "my-cmd test help".to_string()), + ("config".to_string(), "my-cmd test help config".to_string()), + ("help".to_string(), "my-cmd test help help".to_string()), + ("test".to_string(), "my-cmd help test".to_string()), + ("hello".to_string(), "my-cmd help hello".to_string()), + ("help".to_string(), "my-cmd help help".to_string()), + ("config".to_string(), "my-cmd help test config".to_string()), + ] + ); + } + + #[test] + fn test_find_subcommand_with_path() { + let cmd = built_with_version(); + let sc_app = find_subcommand_with_path(&cmd, "test config".split(' ').collect()); + + assert_eq!(sc_app.get_name(), "config"); + } + + #[test] + fn test_flags() { + let cmd = built_with_version(); + let actual_flags = flags(&cmd); + + assert_eq!(actual_flags.len(), 2); + assert_eq!(actual_flags[0].get_long(), Some("help")); + assert_eq!(actual_flags[1].get_long(), Some("version")); + + let sc_flags = flags(find_subcommand_with_path(&cmd, vec!["test"])); + + assert_eq!(sc_flags.len(), 2); + assert_eq!(sc_flags[0].get_long(), Some("file")); + assert_eq!(sc_flags[1].get_long(), Some("help")); + } + + #[test] + fn test_flag_subcommand() { + let cmd = built(); + let actual_flags = flags(&cmd); + + assert_eq!(actual_flags.len(), 1); + assert_eq!(actual_flags[0].get_long(), Some("help")); + + let sc_flags = flags(find_subcommand_with_path(&cmd, vec!["test"])); + + assert_eq!(sc_flags.len(), 2); + assert_eq!(sc_flags[0].get_long(), Some("file")); + assert_eq!(sc_flags[1].get_long(), Some("help")); + } + + #[test] + fn test_shorts() { + let cmd = built_with_version(); + let shorts = shorts_and_visible_aliases(&cmd); + + assert_eq!(shorts.len(), 2); + assert_eq!(shorts[0], 'h'); + assert_eq!(shorts[1], 'V'); + + let sc_shorts = shorts_and_visible_aliases(find_subcommand_with_path(&cmd, vec!["test"])); + + assert_eq!(sc_shorts.len(), 3); + assert_eq!(sc_shorts[0], 'p'); + assert_eq!(sc_shorts[1], 'f'); + assert_eq!(sc_shorts[2], 'h'); + } + + #[test] + fn test_longs() { + let cmd = built_with_version(); + let longs = longs_and_visible_aliases(&cmd); + + assert_eq!(longs.len(), 2); + assert_eq!(longs[0], "help"); + assert_eq!(longs[1], "version"); + + let sc_longs = longs_and_visible_aliases(find_subcommand_with_path(&cmd, vec!["test"])); + + assert_eq!(sc_longs.len(), 3); + assert_eq!(sc_longs[0], "path"); + assert_eq!(sc_longs[1], "file"); + assert_eq!(sc_longs[2], "help"); + } +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/lib.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/lib.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/lib.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,74 @@ +// Copyright â“’ 2015-2018 Kevin B. Knapp +// +// `clap_complete` is distributed under the terms of both the MIT license and the Apache License +// (Version 2.0). +// See the [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT) files in this repository +// for more information. + +#![doc(html_logo_url = "https://raw.githubusercontent.com/clap-rs/clap/master/assets/clap.png")] +#![doc = include_str!("../README.md")] +#![warn(missing_docs, trivial_casts, unused_allocation, trivial_numeric_casts)] +#![forbid(unsafe_code)] +#![allow(clippy::needless_doctest_main)] + +//! ## Quick Start +//! +//! - For generating at compile-time, see [`generate_to`] +//! - For generating at runtime, see [`generate`] +//! +//! [`Shell`] is a convenience `enum` for an argument value type that implements `Generator` +//! for each natively-supported shell type. +//! +//! ## Example +//! +//! ```rust,no_run +//! use clap::{Command, Arg, ValueHint, value_parser, ArgAction}; +//! use clap_complete::{generate, Generator, Shell}; +//! use std::io; +//! +//! fn build_cli() -> Command { +//! Command::new("example") +//! .arg(Arg::new("file") +//! .help("some input file") +//! .value_hint(ValueHint::AnyPath), +//! ) +//! .arg( +//! Arg::new("generator") +//! .long("generate") +//! .action(ArgAction::Set) +//! .value_parser(value_parser!(Shell)), +//! ) +//! } +//! +//! fn print_completions(gen: G, cmd: &mut Command) { +//! generate(gen, cmd, cmd.get_name().to_string(), &mut io::stdout()); +//! } +//! +//! fn main() { +//! let matches = build_cli().get_matches(); +//! +//! if let Some(generator) = matches.get_one::("generator").copied() { +//! let mut cmd = build_cli(); +//! eprintln!("Generating completion file for {generator}..."); +//! print_completions(generator, &mut cmd); +//! } +//! } +//! ``` + +const INTERNAL_ERROR_MSG: &str = "Fatal internal error. Please consider filing a bug \ + report at https://github.com/clap-rs/clap/issues"; + +#[macro_use] +#[allow(missing_docs)] +mod macros; + +pub mod generator; +pub mod shells; + +pub use generator::generate; +pub use generator::generate_to; +pub use generator::Generator; +pub use shells::Shell; + +#[cfg(feature = "unstable-dynamic")] +pub mod dynamic; diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/macros.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/macros.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/macros.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/macros.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,21 @@ +macro_rules! w { + ($buf:expr, $to_w:expr) => { + match $buf.write_all($to_w) { + Ok(..) => (), + Err(..) => panic!("Failed to write to generated file"), + } + }; +} + +#[cfg(feature = "debug")] +macro_rules! debug { + ($($arg:tt)*) => { + eprint!("[{:>w$}] \t", module_path!(), w = 28); + eprintln!($($arg)*) + } +} + +#[cfg(not(feature = "debug"))] +macro_rules! debug { + ($($arg:tt)*) => {}; +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/bash.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/bash.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/bash.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/bash.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,243 @@ +use std::{fmt::Write as _, io::Write}; + +use clap::*; + +use crate::generator::{utils, Generator}; + +/// Generate bash completion file +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct Bash; + +impl Generator for Bash { + fn file_name(&self, name: &str) -> String { + format!("{name}.bash") + } + + fn generate(&self, cmd: &Command, buf: &mut dyn Write) { + let bin_name = cmd + .get_bin_name() + .expect("crate::generate should have set the bin_name"); + + w!( + buf, + format!( + "_{name}() {{ + local i cur prev opts cmd + COMPREPLY=() + cur=\"${{COMP_WORDS[COMP_CWORD]}}\" + prev=\"${{COMP_WORDS[COMP_CWORD-1]}}\" + cmd=\"\" + opts=\"\" + + for i in ${{COMP_WORDS[@]}} + do + case \"${{cmd}},${{i}}\" in + \",$1\") + cmd=\"{cmd}\" + ;;{subcmds} + *) + ;; + esac + done + + case \"${{cmd}}\" in + {cmd}) + opts=\"{name_opts}\" + if [[ ${{cur}} == -* || ${{COMP_CWORD}} -eq 1 ]] ; then + COMPREPLY=( $(compgen -W \"${{opts}}\" -- \"${{cur}}\") ) + return 0 + fi + case \"${{prev}}\" in{name_opts_details} + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W \"${{opts}}\" -- \"${{cur}}\") ) + return 0 + ;;{subcmd_details} + esac +}} + +complete -F _{name} -o bashdefault -o default {name} +", + name = bin_name, + cmd = bin_name.replace('-', "__"), + name_opts = all_options_for_path(cmd, bin_name), + name_opts_details = option_details_for_path(cmd, bin_name), + subcmds = all_subcommands(cmd), + subcmd_details = subcommand_details(cmd) + ) + .as_bytes() + ); + } +} + +fn all_subcommands(cmd: &Command) -> String { + debug!("all_subcommands"); + + fn add_command( + parent_fn_name: &str, + cmd: &Command, + subcmds: &mut Vec<(String, String, String)>, + ) { + let fn_name = format!( + "{parent_fn_name}__{cmd_name}", + parent_fn_name = parent_fn_name, + cmd_name = cmd.get_name().to_string().replace('-', "__") + ); + subcmds.push(( + parent_fn_name.to_string(), + cmd.get_name().to_string(), + fn_name.clone(), + )); + for alias in cmd.get_visible_aliases() { + subcmds.push(( + parent_fn_name.to_string(), + alias.to_string(), + fn_name.clone(), + )); + } + for subcmd in cmd.get_subcommands() { + add_command(&fn_name, subcmd, subcmds); + } + } + let mut subcmds = vec![]; + let fn_name = cmd.get_name().replace('-', "__"); + for subcmd in cmd.get_subcommands() { + add_command(&fn_name, subcmd, &mut subcmds); + } + subcmds.sort(); + + let mut cases = vec![String::new()]; + for (parent_fn_name, name, fn_name) in subcmds { + cases.push(format!( + "{parent_fn_name},{name}) + cmd=\"{fn_name}\" + ;;", + )); + } + + cases.join("\n ") +} + +fn subcommand_details(cmd: &Command) -> String { + debug!("subcommand_details"); + + let mut subcmd_dets = vec![String::new()]; + let mut scs = utils::all_subcommands(cmd) + .iter() + .map(|x| x.1.replace(' ', "__")) + .collect::>(); + + scs.sort(); + + subcmd_dets.extend(scs.iter().map(|sc| { + format!( + "{subcmd}) + opts=\"{sc_opts}\" + if [[ ${{cur}} == -* || ${{COMP_CWORD}} -eq {level} ]] ; then + COMPREPLY=( $(compgen -W \"${{opts}}\" -- \"${{cur}}\") ) + return 0 + fi + case \"${{prev}}\" in{opts_details} + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W \"${{opts}}\" -- \"${{cur}}\") ) + return 0 + ;;", + subcmd = sc.replace('-', "__"), + sc_opts = all_options_for_path(cmd, sc), + level = sc.split("__").map(|_| 1).sum::(), + opts_details = option_details_for_path(cmd, sc) + ) + })); + + subcmd_dets.join("\n ") +} + +fn option_details_for_path(cmd: &Command, path: &str) -> String { + debug!("option_details_for_path: path={path}"); + + let p = utils::find_subcommand_with_path(cmd, path.split("__").skip(1).collect()); + let mut opts = vec![String::new()]; + + for o in p.get_opts() { + if let Some(longs) = o.get_long_and_visible_aliases() { + opts.extend(longs.iter().map(|long| { + format!( + "--{}) + COMPREPLY=({}) + return 0 + ;;", + long, + vals_for(o) + ) + })); + } + + if let Some(shorts) = o.get_short_and_visible_aliases() { + opts.extend(shorts.iter().map(|short| { + format!( + "-{}) + COMPREPLY=({}) + return 0 + ;;", + short, + vals_for(o) + ) + })); + } + } + + opts.join("\n ") +} + +fn vals_for(o: &Arg) -> String { + debug!("vals_for: o={}", o.get_id()); + + if let Some(vals) = crate::generator::utils::possible_values(o) { + format!( + "$(compgen -W \"{}\" -- \"${{cur}}\")", + vals.iter() + .filter(|pv| !pv.is_hide_set()) + .map(|n| n.get_name()) + .collect::>() + .join(" ") + ) + } else if o.get_value_hint() == ValueHint::Other { + String::from("\"${cur}\"") + } else { + String::from("$(compgen -f \"${cur}\")") + } +} + +fn all_options_for_path(cmd: &Command, path: &str) -> String { + debug!("all_options_for_path: path={path}"); + + let p = utils::find_subcommand_with_path(cmd, path.split("__").skip(1).collect()); + + let mut opts = String::new(); + for short in utils::shorts_and_visible_aliases(p) { + write!(&mut opts, "-{short} ").unwrap(); + } + for long in utils::longs_and_visible_aliases(p) { + write!(&mut opts, "--{long} ").unwrap(); + } + for pos in p.get_positionals() { + if let Some(vals) = utils::possible_values(pos) { + for value in vals { + write!(&mut opts, "{} ", value.get_name()).unwrap(); + } + } else { + write!(&mut opts, "{pos} ").unwrap(); + } + } + for (sc, _) in utils::subcommands(p) { + write!(&mut opts, "{sc} ").unwrap(); + } + opts.pop(); + + opts +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/elvish.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/elvish.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/elvish.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/elvish.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,136 @@ +use std::io::Write; + +use clap::builder::StyledStr; +use clap::*; + +use crate::generator::{utils, Generator}; +use crate::INTERNAL_ERROR_MSG; + +/// Generate elvish completion file +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct Elvish; + +impl Generator for Elvish { + fn file_name(&self, name: &str) -> String { + format!("{name}.elv") + } + + fn generate(&self, cmd: &Command, buf: &mut dyn Write) { + let bin_name = cmd + .get_bin_name() + .expect("crate::generate should have set the bin_name"); + + let subcommands_cases = generate_inner(cmd, ""); + + let result = format!( + r#" +use builtin; +use str; + +set edit:completion:arg-completer[{bin_name}] = {{|@words| + fn spaces {{|n| + builtin:repeat $n ' ' | str:join '' + }} + fn cand {{|text desc| + edit:complex-candidate $text &display=$text' '(spaces (- 14 (wcswidth $text)))$desc + }} + var command = '{bin_name}' + for word $words[1..-1] {{ + if (str:has-prefix $word '-') {{ + break + }} + set command = $command';'$word + }} + var completions = [{subcommands_cases} + ] + $completions[$command] +}} +"#, + ); + + w!(buf, result.as_bytes()); + } +} + +// Escape string inside single quotes +fn escape_string(string: &str) -> String { + string.replace('\'', "''") +} + +fn get_tooltip(help: Option<&StyledStr>, data: T) -> String { + match help { + Some(help) => escape_string(&help.to_string()), + _ => data.to_string(), + } +} + +fn generate_inner(p: &Command, previous_command_name: &str) -> String { + debug!("generate_inner"); + + let command_name = if previous_command_name.is_empty() { + p.get_bin_name().expect(INTERNAL_ERROR_MSG).to_string() + } else { + format!("{};{}", previous_command_name, &p.get_name()) + }; + + let mut completions = String::new(); + let preamble = String::from("\n cand "); + + for option in p.get_opts() { + if let Some(shorts) = option.get_short_and_visible_aliases() { + let tooltip = get_tooltip(option.get_help(), shorts[0]); + for short in shorts { + completions.push_str(&preamble); + completions.push_str(format!("-{short} '{tooltip}'").as_str()); + } + } + + if let Some(longs) = option.get_long_and_visible_aliases() { + let tooltip = get_tooltip(option.get_help(), longs[0]); + for long in longs { + completions.push_str(&preamble); + completions.push_str(format!("--{long} '{tooltip}'").as_str()); + } + } + } + + for flag in utils::flags(p) { + if let Some(shorts) = flag.get_short_and_visible_aliases() { + let tooltip = get_tooltip(flag.get_help(), shorts[0]); + for short in shorts { + completions.push_str(&preamble); + completions.push_str(format!("-{short} '{tooltip}'").as_str()); + } + } + + if let Some(longs) = flag.get_long_and_visible_aliases() { + let tooltip = get_tooltip(flag.get_help(), longs[0]); + for long in longs { + completions.push_str(&preamble); + completions.push_str(format!("--{long} '{tooltip}'").as_str()); + } + } + } + + for subcommand in p.get_subcommands() { + let data = &subcommand.get_name(); + let tooltip = get_tooltip(subcommand.get_about(), data); + + completions.push_str(&preamble); + completions.push_str(format!("{data} '{tooltip}'").as_str()); + } + + let mut subcommands_cases = format!( + r" + &'{}'= {{{} + }}", + &command_name, completions + ); + + for subcommand in p.get_subcommands() { + let subcommand_subcommands_cases = generate_inner(subcommand, &command_name); + subcommands_cases.push_str(&subcommand_subcommands_cases); + } + + subcommands_cases +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/fish.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/fish.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/fish.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/fish.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,199 @@ +use std::io::Write; + +use clap::*; + +use crate::generator::{utils, Generator}; + +/// Generate fish completion file +/// +/// Note: The fish generator currently only supports named options (-o/--option), not positional arguments. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct Fish; + +impl Generator for Fish { + fn file_name(&self, name: &str) -> String { + format!("{name}.fish") + } + + fn generate(&self, cmd: &Command, buf: &mut dyn Write) { + let bin_name = cmd + .get_bin_name() + .expect("crate::generate should have set the bin_name"); + + let mut buffer = String::new(); + gen_fish_inner(bin_name, &[], cmd, &mut buffer); + w!(buf, buffer.as_bytes()); + } +} + +// Escape string inside single quotes +fn escape_string(string: &str, escape_comma: bool) -> String { + let string = string.replace('\\', "\\\\").replace('\'', "\\'"); + if escape_comma { + string.replace(',', "\\,") + } else { + string + } +} + +fn gen_fish_inner( + root_command: &str, + parent_commands: &[&str], + cmd: &Command, + buffer: &mut String, +) { + debug!("gen_fish_inner"); + // example : + // + // complete + // -c {command} + // -d "{description}" + // -s {short} + // -l {long} + // -a "{possible_arguments}" + // -r # if require parameter + // -f # don't use file completion + // -n "__fish_use_subcommand" # complete for command "myprog" + // -n "__fish_seen_subcommand_from subcmd1" # complete for command "myprog subcmd1" + + let mut basic_template = format!("complete -c {root_command}"); + + if parent_commands.is_empty() { + if cmd.has_subcommands() { + basic_template.push_str(" -n \"__fish_use_subcommand\""); + } + } else { + basic_template.push_str( + format!( + " -n \"{}\"", + parent_commands + .iter() + .map(|command| format!("__fish_seen_subcommand_from {command}")) + .chain( + cmd.get_subcommands() + .map(|command| format!("not __fish_seen_subcommand_from {command}")) + ) + .collect::>() + .join("; and ") + ) + .as_str(), + ); + } + + debug!("gen_fish_inner: parent_commands={parent_commands:?}"); + + for option in cmd.get_opts() { + let mut template = basic_template.clone(); + + if let Some(shorts) = option.get_short_and_visible_aliases() { + for short in shorts { + template.push_str(format!(" -s {short}").as_str()); + } + } + + if let Some(longs) = option.get_long_and_visible_aliases() { + for long in longs { + template.push_str(format!(" -l {}", escape_string(long, false)).as_str()); + } + } + + if let Some(data) = option.get_help() { + template + .push_str(format!(" -d '{}'", escape_string(&data.to_string(), false)).as_str()); + } + + template.push_str(value_completion(option).as_str()); + + buffer.push_str(template.as_str()); + buffer.push('\n'); + } + + for flag in utils::flags(cmd) { + let mut template = basic_template.clone(); + + if let Some(shorts) = flag.get_short_and_visible_aliases() { + for short in shorts { + template.push_str(format!(" -s {short}").as_str()); + } + } + + if let Some(longs) = flag.get_long_and_visible_aliases() { + for long in longs { + template.push_str(format!(" -l {}", escape_string(long, false)).as_str()); + } + } + + if let Some(data) = flag.get_help() { + template + .push_str(format!(" -d '{}'", escape_string(&data.to_string(), false)).as_str()); + } + + buffer.push_str(template.as_str()); + buffer.push('\n'); + } + + for subcommand in cmd.get_subcommands() { + let mut template = basic_template.clone(); + + template.push_str(" -f"); + template.push_str(format!(" -a \"{}\"", &subcommand.get_name()).as_str()); + + if let Some(data) = subcommand.get_about() { + template.push_str(format!(" -d '{}'", escape_string(&data.to_string(), false)).as_str()) + } + + buffer.push_str(template.as_str()); + buffer.push('\n'); + } + + // generate options of subcommands + for subcommand in cmd.get_subcommands() { + let mut parent_commands: Vec<_> = parent_commands.into(); + parent_commands.push(subcommand.get_name()); + gen_fish_inner(root_command, &parent_commands, subcommand, buffer); + } +} + +fn value_completion(option: &Arg) -> String { + if !option.get_num_args().expect("built").takes_values() { + return "".to_string(); + } + + if let Some(data) = crate::generator::utils::possible_values(option) { + // We return the possible values with their own empty description e.g. {a\t,b\t} + // this makes sure that a and b don't get the description of the option or argument + format!( + " -r -f -a \"{{{}}}\"", + data.iter() + .filter_map(|value| if value.is_hide_set() { + None + } else { + Some(format!( + "{}\t{}", + escape_string(value.get_name(), true).as_str(), + escape_string(&value.get_help().unwrap_or_default().to_string(), true) + )) + }) + .collect::>() + .join(",") + ) + } else { + // NB! If you change this, please also update the table in `ValueHint` documentation. + match option.get_value_hint() { + ValueHint::Unknown => " -r", + // fish has no built-in support to distinguish these + ValueHint::AnyPath | ValueHint::FilePath | ValueHint::ExecutablePath => " -r -F", + ValueHint::DirPath => " -r -f -a \"(__fish_complete_directories)\"", + // It seems fish has no built-in support for completing command + arguments as + // single string (CommandString). Complete just the command name. + ValueHint::CommandString | ValueHint::CommandName => { + " -r -f -a \"(__fish_complete_command)\"" + } + ValueHint::Username => " -r -f -a \"(__fish_complete_users)\"", + ValueHint::Hostname => " -r -f -a \"(__fish_print_hostnames)\"", + // Disable completion for others + _ => " -r -f", + } + .to_string() + } +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/mod.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/mod.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/mod.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/mod.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,15 @@ +//! Shell-specific generators + +mod bash; +mod elvish; +mod fish; +mod powershell; +mod shell; +mod zsh; + +pub use bash::Bash; +pub use elvish::Elvish; +pub use fish::Fish; +pub use powershell::PowerShell; +pub use shell::Shell; +pub use zsh::Zsh; diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/powershell.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/powershell.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/powershell.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/powershell.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,140 @@ +use std::io::Write; + +use clap::builder::StyledStr; +use clap::*; + +use crate::generator::{utils, Generator}; +use crate::INTERNAL_ERROR_MSG; + +/// Generate powershell completion file +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct PowerShell; + +impl Generator for PowerShell { + fn file_name(&self, name: &str) -> String { + format!("_{name}.ps1") + } + + fn generate(&self, cmd: &Command, buf: &mut dyn Write) { + let bin_name = cmd + .get_bin_name() + .expect("crate::generate should have set the bin_name"); + + let subcommands_cases = generate_inner(cmd, ""); + + let result = format!( + r#" +using namespace System.Management.Automation +using namespace System.Management.Automation.Language + +Register-ArgumentCompleter -Native -CommandName '{bin_name}' -ScriptBlock {{ + param($wordToComplete, $commandAst, $cursorPosition) + + $commandElements = $commandAst.CommandElements + $command = @( + '{bin_name}' + for ($i = 1; $i -lt $commandElements.Count; $i++) {{ + $element = $commandElements[$i] + if ($element -isnot [StringConstantExpressionAst] -or + $element.StringConstantType -ne [StringConstantType]::BareWord -or + $element.Value.StartsWith('-') -or + $element.Value -eq $wordToComplete) {{ + break + }} + $element.Value + }}) -join ';' + + $completions = @(switch ($command) {{{subcommands_cases} + }}) + + $completions.Where{{ $_.CompletionText -like "$wordToComplete*" }} | + Sort-Object -Property ListItemText +}} +"# + ); + + w!(buf, result.as_bytes()); + } +} + +// Escape string inside single quotes +fn escape_string(string: &str) -> String { + string.replace('\'', "''") +} + +fn get_tooltip(help: Option<&StyledStr>, data: T) -> String { + match help { + Some(help) => escape_string(&help.to_string()), + _ => data.to_string(), + } +} + +fn generate_inner(p: &Command, previous_command_name: &str) -> String { + debug!("generate_inner"); + + let command_name = if previous_command_name.is_empty() { + p.get_bin_name().expect(INTERNAL_ERROR_MSG).to_string() + } else { + format!("{};{}", previous_command_name, &p.get_name()) + }; + + let mut completions = String::new(); + let preamble = String::from("\n [CompletionResult]::new("); + + for option in p.get_opts() { + generate_aliases(&mut completions, &preamble, option); + } + + for flag in utils::flags(p) { + generate_aliases(&mut completions, &preamble, &flag); + } + + for subcommand in p.get_subcommands() { + let data = &subcommand.get_name(); + let tooltip = get_tooltip(subcommand.get_about(), data); + + completions.push_str(&preamble); + completions.push_str( + format!("'{data}', '{data}', [CompletionResultType]::ParameterValue, '{tooltip}')") + .as_str(), + ); + } + + let mut subcommands_cases = format!( + r" + '{}' {{{} + break + }}", + &command_name, completions + ); + + for subcommand in p.get_subcommands() { + let subcommand_subcommands_cases = generate_inner(subcommand, &command_name); + subcommands_cases.push_str(&subcommand_subcommands_cases); + } + + subcommands_cases +} + +fn generate_aliases(completions: &mut String, preamble: &String, arg: &Arg) { + use std::fmt::Write as _; + + if let Some(aliases) = arg.get_short_and_visible_aliases() { + let tooltip = get_tooltip(arg.get_help(), aliases[0]); + for alias in aliases { + let _ = write!( + completions, + "{preamble}'-{alias}', '{alias}', [CompletionResultType]::ParameterName, '{tooltip}')" + ); + } + } + if let Some(aliases) = arg.get_long_and_visible_aliases() { + let tooltip = get_tooltip(arg.get_help(), aliases[0]); + for alias in aliases { + let _ = write!( + completions, + "{preamble}'--{alias}', '{alias}', [CompletionResultType]::ParameterName, '{tooltip}')" + ); + } + } +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/shell.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/shell.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/shell.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/shell.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,155 @@ +use std::fmt::Display; +use std::path::Path; +use std::str::FromStr; + +use clap::builder::PossibleValue; +use clap::ValueEnum; + +use crate::shells; +use crate::Generator; + +/// Shell with auto-generated completion script available. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[non_exhaustive] +pub enum Shell { + /// Bourne Again SHell (bash) + Bash, + /// Elvish shell + Elvish, + /// Friendly Interactive SHell (fish) + Fish, + /// PowerShell + PowerShell, + /// Z SHell (zsh) + Zsh, +} + +impl Display for Shell { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.to_possible_value() + .expect("no values are skipped") + .get_name() + .fmt(f) + } +} + +impl FromStr for Shell { + type Err = String; + + fn from_str(s: &str) -> Result { + for variant in Self::value_variants() { + if variant.to_possible_value().unwrap().matches(s, false) { + return Ok(*variant); + } + } + Err(format!("invalid variant: {s}")) + } +} + +// Hand-rolled so it can work even when `derive` feature is disabled +impl ValueEnum for Shell { + fn value_variants<'a>() -> &'a [Self] { + &[ + Shell::Bash, + Shell::Elvish, + Shell::Fish, + Shell::PowerShell, + Shell::Zsh, + ] + } + + fn to_possible_value<'a>(&self) -> Option { + Some(match self { + Shell::Bash => PossibleValue::new("bash"), + Shell::Elvish => PossibleValue::new("elvish"), + Shell::Fish => PossibleValue::new("fish"), + Shell::PowerShell => PossibleValue::new("powershell"), + Shell::Zsh => PossibleValue::new("zsh"), + }) + } +} + +impl Generator for Shell { + fn file_name(&self, name: &str) -> String { + match self { + Shell::Bash => shells::Bash.file_name(name), + Shell::Elvish => shells::Elvish.file_name(name), + Shell::Fish => shells::Fish.file_name(name), + Shell::PowerShell => shells::PowerShell.file_name(name), + Shell::Zsh => shells::Zsh.file_name(name), + } + } + + fn generate(&self, cmd: &clap::Command, buf: &mut dyn std::io::Write) { + match self { + Shell::Bash => shells::Bash.generate(cmd, buf), + Shell::Elvish => shells::Elvish.generate(cmd, buf), + Shell::Fish => shells::Fish.generate(cmd, buf), + Shell::PowerShell => shells::PowerShell.generate(cmd, buf), + Shell::Zsh => shells::Zsh.generate(cmd, buf), + } + } +} + +impl Shell { + /// Parse a shell from a path to the executable for the shell + /// + /// # Examples + /// + /// ``` + /// use clap_complete::shells::Shell; + /// + /// assert_eq!(Shell::from_shell_path("/bin/bash"), Some(Shell::Bash)); + /// assert_eq!(Shell::from_shell_path("/usr/bin/zsh"), Some(Shell::Zsh)); + /// assert_eq!(Shell::from_shell_path("/opt/my_custom_shell"), None); + /// ``` + pub fn from_shell_path>(path: P) -> Option { + parse_shell_from_path(path.as_ref()) + } + + /// Determine the user's current shell from the environment + /// + /// This will read the SHELL environment variable and try to determine which shell is in use + /// from that. + /// + /// If SHELL is not set, then on windows, it will default to powershell, and on + /// other OSes it will return `None`. + /// + /// If SHELL is set, but contains a value that doesn't correspond to one of the supported shell + /// types, then return `None`. + /// + /// # Example: + /// + /// ```no_run + /// # use clap::Command; + /// use clap_complete::{generate, shells::Shell}; + /// # fn build_cli() -> Command { + /// # Command::new("compl") + /// # } + /// let mut cmd = build_cli(); + /// generate(Shell::from_env().unwrap_or(Shell::Bash), &mut cmd, "myapp", &mut std::io::stdout()); + /// ``` + pub fn from_env() -> Option { + if let Some(env_shell) = std::env::var_os("SHELL") { + Shell::from_shell_path(env_shell) + } else if cfg!(windows) { + Some(Shell::PowerShell) + } else { + None + } + } +} + +// use a separate function to avoid having to monomorphize the entire function due +// to from_shell_path being generic +fn parse_shell_from_path(path: &Path) -> Option { + let name = path.file_stem()?.to_str()?; + match name { + "bash" => Some(Shell::Bash), + "zsh" => Some(Shell::Zsh), + "fish" => Some(Shell::Fish), + "elvish" => Some(Shell::Elvish), + "powershell" | "powershell_ise" => Some(Shell::PowerShell), + _ => None, + } +} diff -Nru s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/zsh.rs s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/zsh.rs --- s390-tools-2.31.0/rust-vendor/clap_complete/src/shells/zsh.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/clap_complete/src/shells/zsh.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,691 @@ +use std::io::Write; + +use clap::*; + +use crate::generator::{utils, Generator}; +use crate::INTERNAL_ERROR_MSG; + +/// Generate zsh completion file +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct Zsh; + +impl Generator for Zsh { + fn file_name(&self, name: &str) -> String { + format!("_{name}") + } + + fn generate(&self, cmd: &Command, buf: &mut dyn Write) { + let bin_name = cmd + .get_bin_name() + .expect("crate::generate should have set the bin_name"); + + w!( + buf, + format!( + "#compdef {name} + +autoload -U is-at-least + +_{name}() {{ + typeset -A opt_args + typeset -a _arguments_options + local ret=1 + + if is-at-least 5.2; then + _arguments_options=(-s -S -C) + else + _arguments_options=(-s -C) + fi + + local context curcontext=\"$curcontext\" state line + {initial_args}{subcommands} +}} + +{subcommand_details} + +if [ \"$funcstack[1]\" = \"_{name}\" ]; then + _{name} \"$@\" +else + compdef _{name} {name} +fi +", + name = bin_name, + initial_args = get_args_of(cmd, None), + subcommands = get_subcommands_of(cmd), + subcommand_details = subcommand_details(cmd) + ) + .as_bytes() + ); + } +} + +// Displays the commands of a subcommand +// (( $+functions[_[bin_name_underscore]_commands] )) || +// _[bin_name_underscore]_commands() { +// local commands; commands=( +// '[arg_name]:[arg_help]' +// ) +// _describe -t commands '[bin_name] commands' commands "$@" +// +// Where the following variables are present: +// [bin_name_underscore]: The full space delineated bin_name, where spaces have been replaced by +// underscore characters +// [arg_name]: The name of the subcommand +// [arg_help]: The help message of the subcommand +// [bin_name]: The full space delineated bin_name +// +// Here's a snippet from rustup: +// +// (( $+functions[_rustup_commands] )) || +// _rustup_commands() { +// local commands; commands=( +// 'show:Show the active and installed toolchains' +// 'update:Update Rust toolchains' +// # ... snip for brevity +// 'help:Print this message or the help of the given subcommand(s)' +// ) +// _describe -t commands 'rustup commands' commands "$@" +// +fn subcommand_details(p: &Command) -> String { + debug!("subcommand_details"); + + let bin_name = p + .get_bin_name() + .expect("crate::generate should have set the bin_name"); + + let mut ret = vec![]; + + // First we do ourself + let parent_text = format!( + "\ +(( $+functions[_{bin_name_underscore}_commands] )) || +_{bin_name_underscore}_commands() {{ + local commands; commands=({subcommands_and_args}) + _describe -t commands '{bin_name} commands' commands \"$@\" +}}", + bin_name_underscore = bin_name.replace(' ', "__"), + bin_name = bin_name, + subcommands_and_args = subcommands_of(p) + ); + ret.push(parent_text); + + // Next we start looping through all the children, grandchildren, etc. + let mut all_subcommands = utils::all_subcommands(p); + + all_subcommands.sort(); + all_subcommands.dedup(); + + for (_, ref bin_name) in &all_subcommands { + debug!("subcommand_details:iter: bin_name={bin_name}"); + + ret.push(format!( + "\ +(( $+functions[_{bin_name_underscore}_commands] )) || +_{bin_name_underscore}_commands() {{ + local commands; commands=({subcommands_and_args}) + _describe -t commands '{bin_name} commands' commands \"$@\" +}}", + bin_name_underscore = bin_name.replace(' ', "__"), + bin_name = bin_name, + subcommands_and_args = + subcommands_of(parser_of(p, bin_name).expect(INTERNAL_ERROR_MSG)) + )); + } + + ret.join("\n") +} + +// Generates subcommand completions in form of +// +// '[arg_name]:[arg_help]' +// +// Where: +// [arg_name]: the subcommand's name +// [arg_help]: the help message of the subcommand +// +// A snippet from rustup: +// 'show:Show the active and installed toolchains' +// 'update:Update Rust toolchains' +fn subcommands_of(p: &Command) -> String { + debug!("subcommands_of"); + + let mut segments = vec![]; + + fn add_subcommands(subcommand: &Command, name: &str, ret: &mut Vec) { + debug!("add_subcommands"); + + let text = format!( + "'{name}:{help}' \\", + name = name, + help = escape_help(&subcommand.get_about().unwrap_or_default().to_string()) + ); + + ret.push(text); + } + + // The subcommands + for command in p.get_subcommands() { + debug!("subcommands_of:iter: subcommand={}", command.get_name()); + + add_subcommands(command, command.get_name(), &mut segments); + + for alias in command.get_visible_aliases() { + add_subcommands(command, alias, &mut segments); + } + } + + // Surround the text with newlines for proper formatting. + // We need this to prevent weirdly formatted `command=(\n \n)` sections. + // When there are no (sub-)commands. + if !segments.is_empty() { + segments.insert(0, "".to_string()); + segments.push(" ".to_string()); + } + + segments.join("\n") +} + +// Get's the subcommand section of a completion file +// This looks roughly like: +// +// case $state in +// ([bin_name]_args) +// curcontext=\"${curcontext%:*:*}:[name_hyphen]-command-$words[1]:\" +// case $line[1] in +// +// ([name]) +// _arguments -C -s -S \ +// [subcommand_args] +// && ret=0 +// +// [RECURSIVE_CALLS] +// +// ;;", +// +// [repeat] +// +// esac +// ;; +// esac", +// +// Where the following variables are present: +// [name] = The subcommand name in the form of "install" for "rustup toolchain install" +// [bin_name] = The full space delineated bin_name such as "rustup toolchain install" +// [name_hyphen] = The full space delineated bin_name, but replace spaces with hyphens +// [repeat] = From the same recursive calls, but for all subcommands +// [subcommand_args] = The same as zsh::get_args_of +fn get_subcommands_of(parent: &Command) -> String { + debug!( + "get_subcommands_of: Has subcommands...{:?}", + parent.has_subcommands() + ); + + if !parent.has_subcommands() { + return String::new(); + } + + let subcommand_names = utils::subcommands(parent); + let mut all_subcommands = vec![]; + + for (ref name, ref bin_name) in &subcommand_names { + debug!( + "get_subcommands_of:iter: parent={}, name={name}, bin_name={bin_name}", + parent.get_name(), + ); + let mut segments = vec![format!("({name})")]; + let subcommand_args = get_args_of( + parser_of(parent, bin_name).expect(INTERNAL_ERROR_MSG), + Some(parent), + ); + + if !subcommand_args.is_empty() { + segments.push(subcommand_args); + } + + // Get the help text of all child subcommands. + let children = get_subcommands_of(parser_of(parent, bin_name).expect(INTERNAL_ERROR_MSG)); + + if !children.is_empty() { + segments.push(children); + } + + segments.push(String::from(";;")); + all_subcommands.push(segments.join("\n")); + } + + let parent_bin_name = parent + .get_bin_name() + .expect("crate::generate should have set the bin_name"); + + format!( + " + case $state in + ({name}) + words=($line[{pos}] \"${{words[@]}}\") + (( CURRENT += 1 )) + curcontext=\"${{curcontext%:*:*}}:{name_hyphen}-command-$line[{pos}]:\" + case $line[{pos}] in + {subcommands} + esac + ;; +esac", + name = parent.get_name(), + name_hyphen = parent_bin_name.replace(' ', "-"), + subcommands = all_subcommands.join("\n"), + pos = parent.get_positionals().count() + 1 + ) +} + +// Get the Command for a given subcommand tree. +// +// Given the bin_name "a b c" and the Command for "a" this returns the "c" Command. +// Given the bin_name "a b c" and the Command for "b" this returns the "c" Command. +fn parser_of<'cmd>(parent: &'cmd Command, bin_name: &str) -> Option<&'cmd Command> { + debug!("parser_of: p={}, bin_name={}", parent.get_name(), bin_name); + + if bin_name == parent.get_bin_name().unwrap_or_default() { + return Some(parent); + } + + for subcommand in parent.get_subcommands() { + if let Some(ret) = parser_of(subcommand, bin_name) { + return Some(ret); + } + } + + None +} + +// Writes out the args section, which ends up being the flags, opts and positionals, and a jump to +// another ZSH function if there are subcommands. +// The structure works like this: +// ([conflicting_args]) [multiple] arg [takes_value] [[help]] [: :(possible_values)] +// ^-- list '-v -h' ^--'*' ^--'+' ^-- list 'one two three' +// +// An example from the rustup command: +// +// _arguments -C -s -S \ +// '(-h --help --verbose)-v[Enable verbose output]' \ +// '(-V -v --version --verbose --help)-h[Print help information]' \ +// # ... snip for brevity +// ':: :_rustup_commands' \ # <-- displays subcommands +// '*::: :->rustup' \ # <-- displays subcommand args and child subcommands +// && ret=0 +// +// The args used for _arguments are as follows: +// -C: modify the $context internal variable +// -s: Allow stacking of short args (i.e. -a -b -c => -abc) +// -S: Do not complete anything after '--' and treat those as argument values +fn get_args_of(parent: &Command, p_global: Option<&Command>) -> String { + debug!("get_args_of"); + + let mut segments = vec![String::from("_arguments \"${_arguments_options[@]}\" \\")]; + let opts = write_opts_of(parent, p_global); + let flags = write_flags_of(parent, p_global); + let positionals = write_positionals_of(parent); + + if !opts.is_empty() { + segments.push(opts); + } + + if !flags.is_empty() { + segments.push(flags); + } + + if !positionals.is_empty() { + segments.push(positionals); + } + + if parent.has_subcommands() { + let parent_bin_name = parent + .get_bin_name() + .expect("crate::generate should have set the bin_name"); + let subcommand_bin_name = format!( + "\":: :_{name}_commands\" \\", + name = parent_bin_name.replace(' ', "__") + ); + segments.push(subcommand_bin_name); + + let subcommand_text = format!("\"*::: :->{name}\" \\", name = parent.get_name()); + segments.push(subcommand_text); + }; + + segments.push(String::from("&& ret=0")); + segments.join("\n") +} + +// Uses either `possible_vals` or `value_hint` to give hints about possible argument values +fn value_completion(arg: &Arg) -> Option { + if let Some(values) = crate::generator::utils::possible_values(arg) { + if values + .iter() + .any(|value| !value.is_hide_set() && value.get_help().is_some()) + { + Some(format!( + "(({}))", + values + .iter() + .filter_map(|value| { + if value.is_hide_set() { + None + } else { + Some(format!( + r#"{name}\:"{tooltip}""#, + name = escape_value(value.get_name()), + tooltip = + escape_help(&value.get_help().unwrap_or_default().to_string()), + )) + } + }) + .collect::>() + .join("\n") + )) + } else { + Some(format!( + "({})", + values + .iter() + .filter(|pv| !pv.is_hide_set()) + .map(|n| n.get_name()) + .collect::>() + .join(" ") + )) + } + } else { + // NB! If you change this, please also update the table in `ValueHint` documentation. + Some( + match arg.get_value_hint() { + ValueHint::Unknown => { + return None; + } + ValueHint::Other => "( )", + ValueHint::AnyPath => "_files", + ValueHint::FilePath => "_files", + ValueHint::DirPath => "_files -/", + ValueHint::ExecutablePath => "_absolute_command_paths", + ValueHint::CommandName => "_command_names -e", + ValueHint::CommandString => "_cmdstring", + ValueHint::CommandWithArguments => "_cmdambivalent", + ValueHint::Username => "_users", + ValueHint::Hostname => "_hosts", + ValueHint::Url => "_urls", + ValueHint::EmailAddress => "_email_addresses", + _ => { + return None; + } + } + .to_string(), + ) + } +} + +/// Escape help string inside single quotes and brackets +fn escape_help(string: &str) -> String { + string + .replace('\\', "\\\\") + .replace('\'', "'\\''") + .replace('[', "\\[") + .replace(']', "\\]") + .replace(':', "\\:") + .replace('$', "\\$") + .replace('`', "\\`") +} + +/// Escape value string inside single quotes and parentheses +fn escape_value(string: &str) -> String { + string + .replace('\\', "\\\\") + .replace('\'', "'\\''") + .replace('[', "\\[") + .replace(']', "\\]") + .replace(':', "\\:") + .replace('$', "\\$") + .replace('`', "\\`") + .replace('(', "\\(") + .replace(')', "\\)") + .replace(' ', "\\ ") +} + +fn write_opts_of(p: &Command, p_global: Option<&Command>) -> String { + debug!("write_opts_of"); + + let mut ret = vec![]; + + for o in p.get_opts() { + debug!("write_opts_of:iter: o={}", o.get_id()); + + let help = escape_help(&o.get_help().unwrap_or_default().to_string()); + let conflicts = arg_conflicts(p, o, p_global); + + let multiple = if let ArgAction::Count | ArgAction::Append = o.get_action() { + "*" + } else { + "" + }; + + let vn = match o.get_value_names() { + None => " ".to_string(), + Some(val) => val[0].to_string(), + }; + let vc = match value_completion(o) { + Some(val) => format!(":{vn}:{val}"), + None => format!(":{vn}: "), + }; + let vc = vc.repeat(o.get_num_args().expect("built").min_values()); + + if let Some(shorts) = o.get_short_and_visible_aliases() { + for short in shorts { + let s = format!("'{conflicts}{multiple}-{short}+[{help}]{vc}' \\"); + + debug!("write_opts_of:iter: Wrote...{}", &*s); + ret.push(s); + } + } + if let Some(longs) = o.get_long_and_visible_aliases() { + for long in longs { + let l = format!("'{conflicts}{multiple}--{long}=[{help}]{vc}' \\"); + + debug!("write_opts_of:iter: Wrote...{}", &*l); + ret.push(l); + } + } + } + + ret.join("\n") +} + +fn arg_conflicts(cmd: &Command, arg: &Arg, app_global: Option<&Command>) -> String { + fn push_conflicts(conflicts: &[&Arg], res: &mut Vec) { + for conflict in conflicts { + if let Some(s) = conflict.get_short() { + res.push(format!("-{s}")); + } + + if let Some(l) = conflict.get_long() { + res.push(format!("--{l}")); + } + } + } + + let mut res = vec![]; + match (app_global, arg.is_global_set()) { + (Some(x), true) => { + let conflicts = x.get_arg_conflicts_with(arg); + + if conflicts.is_empty() { + return String::new(); + } + + push_conflicts(&conflicts, &mut res); + } + (_, _) => { + let conflicts = cmd.get_arg_conflicts_with(arg); + + if conflicts.is_empty() { + return String::new(); + } + + push_conflicts(&conflicts, &mut res); + } + }; + + format!("({})", res.join(" ")) +} + +fn write_flags_of(p: &Command, p_global: Option<&Command>) -> String { + debug!("write_flags_of;"); + + let mut ret = vec![]; + + for f in utils::flags(p) { + debug!("write_flags_of:iter: f={}", f.get_id()); + + let help = escape_help(&f.get_help().unwrap_or_default().to_string()); + let conflicts = arg_conflicts(p, &f, p_global); + + let multiple = if let ArgAction::Count | ArgAction::Append = f.get_action() { + "*" + } else { + "" + }; + + if let Some(short) = f.get_short() { + let s = format!("'{conflicts}{multiple}-{short}[{help}]' \\"); + + debug!("write_flags_of:iter: Wrote...{}", &*s); + + ret.push(s); + + if let Some(short_aliases) = f.get_visible_short_aliases() { + for alias in short_aliases { + let s = format!("'{conflicts}{multiple}-{alias}[{help}]' \\",); + + debug!("write_flags_of:iter: Wrote...{}", &*s); + + ret.push(s); + } + } + } + + if let Some(long) = f.get_long() { + let l = format!("'{conflicts}{multiple}--{long}[{help}]' \\"); + + debug!("write_flags_of:iter: Wrote...{}", &*l); + + ret.push(l); + + if let Some(aliases) = f.get_visible_aliases() { + for alias in aliases { + let l = format!("'{conflicts}{multiple}--{alias}[{help}]' \\"); + + debug!("write_flags_of:iter: Wrote...{}", &*l); + + ret.push(l); + } + } + } + } + + ret.join("\n") +} + +fn write_positionals_of(p: &Command) -> String { + debug!("write_positionals_of;"); + + let mut ret = vec![]; + + // Completions for commands that end with two Vec arguments require special care. + // - You can have two Vec args separated with a custom value terminator. + // - You can have two Vec args with the second one set to last (raw sets last) + // which will require a '--' separator to be used before the second argument + // on the command-line. + // + // We use the '-S' _arguments option to disable completion after '--'. Thus, the + // completion for the second argument in scenario (B) does not need to be emitted + // because it is implicitly handled by the '-S' option. + // We only need to emit the first catch-all. + // + // Have we already emitted a catch-all multi-valued positional argument + // without a custom value terminator? + let mut catch_all_emitted = false; + + for arg in p.get_positionals() { + debug!("write_positionals_of:iter: arg={}", arg.get_id()); + + let num_args = arg.get_num_args().expect("built"); + let is_multi_valued = num_args.max_values() > 1; + + if catch_all_emitted && (arg.is_last_set() || is_multi_valued) { + // This is the final argument and it also takes multiple arguments. + // We've already emitted a catch-all positional argument so we don't need + // to emit anything for this argument because it is implicitly handled by + // the use of the '-S' _arguments option. + continue; + } + + let cardinality_value; + // If we have any subcommands, we'll emit a catch-all argument, so we shouldn't + // emit one here. + let cardinality = if is_multi_valued && !p.has_subcommands() { + match arg.get_value_terminator() { + Some(terminator) => { + cardinality_value = format!("*{}:", escape_value(terminator)); + cardinality_value.as_str() + } + None => { + catch_all_emitted = true; + "*:" + } + } + } else if !arg.is_required_set() { + ":" + } else { + "" + }; + + let a = format!( + "'{cardinality}:{name}{help}:{value_completion}' \\", + cardinality = cardinality, + name = arg.get_id(), + help = arg + .get_help() + .map(|s| s.to_string()) + .map(|v| " -- ".to_owned() + &v) + .unwrap_or_else(|| "".to_owned()) + .replace('[', "\\[") + .replace(']', "\\]") + .replace('\'', "'\\''") + .replace(':', "\\:"), + value_completion = value_completion(arg).unwrap_or_default() + ); + + debug!("write_positionals_of:iter: Wrote...{a}"); + + ret.push(a); + } + + ret.join("\n") +} + +#[cfg(test)] +mod tests { + use crate::shells::zsh::{escape_help, escape_value}; + + #[test] + fn test_escape_value() { + let raw_string = "\\ [foo]() `bar https://$PATH"; + assert_eq!( + escape_value(raw_string), + "\\\\\\ \\[foo\\]\\(\\)\\ \\`bar\\ https\\://\\$PATH" + ) + } + + #[test] + fn test_escape_help() { + let raw_string = "\\ [foo]() `bar https://$PATH"; + assert_eq!( + escape_help(raw_string), + "\\\\ \\[foo\\]() \\`bar https\\://\\$PATH" + ) + } +} diff -Nru s390-tools-2.31.0/rust-vendor/curl-sys/build.rs s390-tools-2.33.1/rust-vendor/curl-sys/build.rs --- s390-tools-2.31.0/rust-vendor/curl-sys/build.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/curl-sys/build.rs 2024-05-28 11:57:36.000000000 +0200 @@ -100,7 +100,7 @@ .replace("@LIBCURL_LIBS@", "") .replace("@SUPPORT_FEATURES@", "") .replace("@SUPPORT_PROTOCOLS@", "") - .replace("@CURLVERSION@", "8.4.0"), + .replace("@CURLVERSION@", "8.6.0"), ) .unwrap(); @@ -215,8 +215,10 @@ .file("curl/lib/vauth/vauth.c") .file("curl/lib/vquic/curl_msh3.c") .file("curl/lib/vquic/curl_ngtcp2.c") + .file("curl/lib/vquic/curl_osslq.c") .file("curl/lib/vquic/curl_quiche.c") .file("curl/lib/vquic/vquic.c") + .file("curl/lib/vquic/vquic-tls.c") .file("curl/lib/vtls/hostcheck.c") .file("curl/lib/vtls/keylog.c") .file("curl/lib/vtls/vtls.c") diff -Nru s390-tools-2.31.0/rust-vendor/curl-sys/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/curl-sys/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/curl-sys/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/curl-sys/.cargo-checksum.json 2024-05-28 11:57:39.000000000 +0200 @@ -1 +1 @@ -{"files":{},"package":"b4a0d18d88360e374b16b2273c832b5e57258ffc1d4aa4f96b108e0738d5752f"} \ No newline at end of file +{"files":{},"package":"29cbdc8314c447d11e8fd156dcdd031d9e02a7a976163e396b548c03153bc9ea"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/curl-sys/Cargo.toml s390-tools-2.33.1/rust-vendor/curl-sys/Cargo.toml --- s390-tools-2.31.0/rust-vendor/curl-sys/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/curl-sys/Cargo.toml 2024-05-28 11:57:36.000000000 +0200 @@ -12,7 +12,7 @@ [package] edition = "2018" name = "curl-sys" -version = "0.4.68+curl-8.4.0" +version = "0.4.72+curl-8.6.0" authors = ["Alex Crichton "] build = "build.rs" links = "curl" @@ -77,7 +77,7 @@ version = "0.2" [target."cfg(windows)".dependencies.windows-sys] -version = "0.48" +version = "0.52" features = ["Win32_Networking_WinSock"] [badges.appveyor] diff -Nru s390-tools-2.31.0/rust-vendor/equivalent/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/equivalent/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/equivalent/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/equivalent/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/equivalent/Cargo.toml s390-tools-2.33.1/rust-vendor/equivalent/Cargo.toml --- s390-tools-2.31.0/rust-vendor/equivalent/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/equivalent/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -rust-version = "1.6" -name = "equivalent" -version = "1.0.1" -description = "Traits for key comparison in maps." -readme = "README.md" -keywords = [ - "hashmap", - "no_std", -] -categories = [ - "data-structures", - "no-std", -] -license = "Apache-2.0 OR MIT" -repository = "https://github.com/cuviper/equivalent" diff -Nru s390-tools-2.31.0/rust-vendor/equivalent/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/equivalent/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/equivalent/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/equivalent/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/equivalent/LICENSE-MIT s390-tools-2.33.1/rust-vendor/equivalent/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/equivalent/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/equivalent/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2016--2023 - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/equivalent/README.md s390-tools-2.33.1/rust-vendor/equivalent/README.md --- s390-tools-2.31.0/rust-vendor/equivalent/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/equivalent/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -# Equivalent - -[![crates.io](https://img.shields.io/crates/v/equivalent.svg)](https://crates.io/crates/equivalent) -[![docs](https://docs.rs/equivalent/badge.svg)](https://docs.rs/equivalent) - -`Equivalent` and `Comparable` are Rust traits for key comparison in maps. - -These may be used in the implementation of maps where the lookup type `Q` -may be different than the stored key type `K`. - -* `Q: Equivalent` checks for equality, similar to the `HashMap` - constraint `K: Borrow, Q: Eq`. -* `Q: Comparable` checks the ordering, similar to the `BTreeMap` - constraint `K: Borrow, Q: Ord`. - -These traits are not used by the maps in the standard library, but they may -add more flexibility in third-party map implementations, especially in -situations where a strict `K: Borrow` relationship is not available. - -## License - -Equivalent is distributed under the terms of both the MIT license and the -Apache License (Version 2.0). See [LICENSE-APACHE](LICENSE-APACHE) and -[LICENSE-MIT](LICENSE-MIT) for details. Opening a pull request is -assumed to signal agreement with these licensing terms. diff -Nru s390-tools-2.31.0/rust-vendor/equivalent/src/lib.rs s390-tools-2.33.1/rust-vendor/equivalent/src/lib.rs --- s390-tools-2.31.0/rust-vendor/equivalent/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/equivalent/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,113 +0,0 @@ -//! [`Equivalent`] and [`Comparable`] are traits for key comparison in maps. -//! -//! These may be used in the implementation of maps where the lookup type `Q` -//! may be different than the stored key type `K`. -//! -//! * `Q: Equivalent` checks for equality, similar to the `HashMap` -//! constraint `K: Borrow, Q: Eq`. -//! * `Q: Comparable` checks the ordering, similar to the `BTreeMap` -//! constraint `K: Borrow, Q: Ord`. -//! -//! These traits are not used by the maps in the standard library, but they may -//! add more flexibility in third-party map implementations, especially in -//! situations where a strict `K: Borrow` relationship is not available. -//! -//! # Examples -//! -//! ``` -//! use equivalent::*; -//! use std::cmp::Ordering; -//! -//! pub struct Pair(pub A, pub B); -//! -//! impl<'a, A: ?Sized, B: ?Sized, C, D> Equivalent<(C, D)> for Pair<&'a A, &'a B> -//! where -//! A: Equivalent, -//! B: Equivalent, -//! { -//! fn equivalent(&self, key: &(C, D)) -> bool { -//! self.0.equivalent(&key.0) && self.1.equivalent(&key.1) -//! } -//! } -//! -//! impl<'a, A: ?Sized, B: ?Sized, C, D> Comparable<(C, D)> for Pair<&'a A, &'a B> -//! where -//! A: Comparable, -//! B: Comparable, -//! { -//! fn compare(&self, key: &(C, D)) -> Ordering { -//! match self.0.compare(&key.0) { -//! Ordering::Equal => self.1.compare(&key.1), -//! not_equal => not_equal, -//! } -//! } -//! } -//! -//! fn main() { -//! let key = (String::from("foo"), String::from("bar")); -//! let q1 = Pair("foo", "bar"); -//! let q2 = Pair("boo", "bar"); -//! let q3 = Pair("foo", "baz"); -//! -//! assert!(q1.equivalent(&key)); -//! assert!(!q2.equivalent(&key)); -//! assert!(!q3.equivalent(&key)); -//! -//! assert_eq!(q1.compare(&key), Ordering::Equal); -//! assert_eq!(q2.compare(&key), Ordering::Less); -//! assert_eq!(q3.compare(&key), Ordering::Greater); -//! } -//! ``` - -#![no_std] - -use core::borrow::Borrow; -use core::cmp::Ordering; - -/// Key equivalence trait. -/// -/// This trait allows hash table lookup to be customized. It has one blanket -/// implementation that uses the regular solution with `Borrow` and `Eq`, just -/// like `HashMap` does, so that you can pass `&str` to lookup into a map with -/// `String` keys and so on. -/// -/// # Contract -/// -/// The implementor **must** hash like `K`, if it is hashable. -pub trait Equivalent { - /// Compare self to `key` and return `true` if they are equal. - fn equivalent(&self, key: &K) -> bool; -} - -impl Equivalent for Q -where - Q: Eq, - K: Borrow, -{ - #[inline] - fn equivalent(&self, key: &K) -> bool { - PartialEq::eq(self, key.borrow()) - } -} - -/// Key ordering trait. -/// -/// This trait allows ordered map lookup to be customized. It has one blanket -/// implementation that uses the regular solution with `Borrow` and `Ord`, just -/// like `BTreeMap` does, so that you can pass `&str` to lookup into a map with -/// `String` keys and so on. -pub trait Comparable: Equivalent { - /// Compare self to `key` and return their ordering. - fn compare(&self, key: &K) -> Ordering; -} - -impl Comparable for Q -where - Q: Ord, - K: Borrow, -{ - #[inline] - fn compare(&self, key: &K) -> Ordering { - Ord::cmp(self, key.borrow()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/fnv/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/fnv/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/fnv/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/fnv/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/fnv/Cargo.toml s390-tools-2.33.1/rust-vendor/fnv/Cargo.toml --- s390-tools-2.31.0/rust-vendor/fnv/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/fnv/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,29 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "fnv" -version = "1.0.7" -authors = ["Alex Crichton "] -description = "Fowler–Noll–Vo hash function" -documentation = "https://doc.servo.org/fnv/" -readme = "README.md" -license = "Apache-2.0 / MIT" -repository = "https://github.com/servo/rust-fnv" - -[lib] -name = "fnv" -path = "lib.rs" - -[features] -default = ["std"] -std = [] diff -Nru s390-tools-2.31.0/rust-vendor/fnv/lib.rs s390-tools-2.33.1/rust-vendor/fnv/lib.rs --- s390-tools-2.31.0/rust-vendor/fnv/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/fnv/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,367 +0,0 @@ -//! An implementation of the [Fowler–Noll–Vo hash function][chongo]. -//! -//! ## About -//! -//! The FNV hash function is a custom `Hasher` implementation that is more -//! efficient for smaller hash keys. -//! -//! [The Rust FAQ states that][faq] while the default `Hasher` implementation, -//! SipHash, is good in many cases, it is notably slower than other algorithms -//! with short keys, such as when you have a map of integers to other values. -//! In cases like these, [FNV is demonstrably faster][graphs]. -//! -//! Its disadvantages are that it performs badly on larger inputs, and -//! provides no protection against collision attacks, where a malicious user -//! can craft specific keys designed to slow a hasher down. Thus, it is -//! important to profile your program to ensure that you are using small hash -//! keys, and be certain that your program could not be exposed to malicious -//! inputs (including being a networked server). -//! -//! The Rust compiler itself uses FNV, as it is not worried about -//! denial-of-service attacks, and can assume that its inputs are going to be -//! small—a perfect use case for FNV. -//! -#![cfg_attr(feature = "std", doc = r#" - -## Using FNV in a `HashMap` - -The `FnvHashMap` type alias is the easiest way to use the standard library’s -`HashMap` with FNV. - -```rust -use fnv::FnvHashMap; - -let mut map = FnvHashMap::default(); -map.insert(1, "one"); -map.insert(2, "two"); - -map = FnvHashMap::with_capacity_and_hasher(10, Default::default()); -map.insert(1, "one"); -map.insert(2, "two"); -``` - -Note, the standard library’s `HashMap::new` and `HashMap::with_capacity` -are only implemented for the `RandomState` hasher, so using `Default` to -get the hasher is the next best option. - -## Using FNV in a `HashSet` - -Similarly, `FnvHashSet` is a type alias for the standard library’s `HashSet` -with FNV. - -```rust -use fnv::FnvHashSet; - -let mut set = FnvHashSet::default(); -set.insert(1); -set.insert(2); - -set = FnvHashSet::with_capacity_and_hasher(10, Default::default()); -set.insert(1); -set.insert(2); -``` -"#)] -//! -//! [chongo]: http://www.isthe.com/chongo/tech/comp/fnv/index.html -//! [faq]: https://www.rust-lang.org/en-US/faq.html#why-are-rusts-hashmaps-slow -//! [graphs]: https://cglab.ca/~abeinges/blah/hash-rs/ - -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(all(not(feature = "std"), test))] -extern crate alloc; - -#[cfg(feature = "std")] -use std::default::Default; -#[cfg(feature = "std")] -use std::hash::{Hasher, BuildHasherDefault}; -#[cfg(feature = "std")] -use std::collections::{HashMap, HashSet}; -#[cfg(not(feature = "std"))] -use core::default::Default; -#[cfg(not(feature = "std"))] -use core::hash::{Hasher, BuildHasherDefault}; - -/// An implementation of the Fowler–Noll–Vo hash function. -/// -/// See the [crate documentation](index.html) for more details. -#[allow(missing_copy_implementations)] -pub struct FnvHasher(u64); - -impl Default for FnvHasher { - - #[inline] - fn default() -> FnvHasher { - FnvHasher(0xcbf29ce484222325) - } -} - -impl FnvHasher { - /// Create an FNV hasher starting with a state corresponding - /// to the hash `key`. - #[inline] - pub fn with_key(key: u64) -> FnvHasher { - FnvHasher(key) - } -} - -impl Hasher for FnvHasher { - #[inline] - fn finish(&self) -> u64 { - self.0 - } - - #[inline] - fn write(&mut self, bytes: &[u8]) { - let FnvHasher(mut hash) = *self; - - for byte in bytes.iter() { - hash = hash ^ (*byte as u64); - hash = hash.wrapping_mul(0x100000001b3); - } - - *self = FnvHasher(hash); - } -} - -/// A builder for default FNV hashers. -pub type FnvBuildHasher = BuildHasherDefault; - -/// A `HashMap` using a default FNV hasher. -#[cfg(feature = "std")] -pub type FnvHashMap = HashMap; - -/// A `HashSet` using a default FNV hasher. -#[cfg(feature = "std")] -pub type FnvHashSet = HashSet; - - -#[cfg(test)] -mod test { - use super::*; - #[cfg(feature = "std")] - use std::hash::Hasher; - #[cfg(not(feature = "std"))] - use alloc::vec::Vec; - - fn fnv1a(bytes: &[u8]) -> u64 { - let mut hasher = FnvHasher::default(); - hasher.write(bytes); - hasher.finish() - } - - fn repeat_10(bytes: &[u8]) -> Vec { - (0..10).flat_map(|_| bytes.iter().cloned()).collect() - } - - fn repeat_500(bytes: &[u8]) -> Vec { - (0..500).flat_map(|_| bytes.iter().cloned()).collect() - } - - #[test] - fn basic_tests() { - assert_eq!(fnv1a(b""), 0xcbf29ce484222325); - assert_eq!(fnv1a(b"a"), 0xaf63dc4c8601ec8c); - assert_eq!(fnv1a(b"b"), 0xaf63df4c8601f1a5); - assert_eq!(fnv1a(b"c"), 0xaf63de4c8601eff2); - assert_eq!(fnv1a(b"d"), 0xaf63d94c8601e773); - assert_eq!(fnv1a(b"e"), 0xaf63d84c8601e5c0); - assert_eq!(fnv1a(b"f"), 0xaf63db4c8601ead9); - assert_eq!(fnv1a(b"fo"), 0x08985907b541d342); - assert_eq!(fnv1a(b"foo"), 0xdcb27518fed9d577); - assert_eq!(fnv1a(b"foob"), 0xdd120e790c2512af); - assert_eq!(fnv1a(b"fooba"), 0xcac165afa2fef40a); - assert_eq!(fnv1a(b"foobar"), 0x85944171f73967e8); - assert_eq!(fnv1a(b"\0"), 0xaf63bd4c8601b7df); - assert_eq!(fnv1a(b"a\0"), 0x089be207b544f1e4); - assert_eq!(fnv1a(b"b\0"), 0x08a61407b54d9b5f); - assert_eq!(fnv1a(b"c\0"), 0x08a2ae07b54ab836); - assert_eq!(fnv1a(b"d\0"), 0x0891b007b53c4869); - assert_eq!(fnv1a(b"e\0"), 0x088e4a07b5396540); - assert_eq!(fnv1a(b"f\0"), 0x08987c07b5420ebb); - assert_eq!(fnv1a(b"fo\0"), 0xdcb28a18fed9f926); - assert_eq!(fnv1a(b"foo\0"), 0xdd1270790c25b935); - assert_eq!(fnv1a(b"foob\0"), 0xcac146afa2febf5d); - assert_eq!(fnv1a(b"fooba\0"), 0x8593d371f738acfe); - assert_eq!(fnv1a(b"foobar\0"), 0x34531ca7168b8f38); - assert_eq!(fnv1a(b"ch"), 0x08a25607b54a22ae); - assert_eq!(fnv1a(b"cho"), 0xf5faf0190cf90df3); - assert_eq!(fnv1a(b"chon"), 0xf27397910b3221c7); - assert_eq!(fnv1a(b"chong"), 0x2c8c2b76062f22e0); - assert_eq!(fnv1a(b"chongo"), 0xe150688c8217b8fd); - assert_eq!(fnv1a(b"chongo "), 0xf35a83c10e4f1f87); - assert_eq!(fnv1a(b"chongo w"), 0xd1edd10b507344d0); - assert_eq!(fnv1a(b"chongo wa"), 0x2a5ee739b3ddb8c3); - assert_eq!(fnv1a(b"chongo was"), 0xdcfb970ca1c0d310); - assert_eq!(fnv1a(b"chongo was "), 0x4054da76daa6da90); - assert_eq!(fnv1a(b"chongo was h"), 0xf70a2ff589861368); - assert_eq!(fnv1a(b"chongo was he"), 0x4c628b38aed25f17); - assert_eq!(fnv1a(b"chongo was her"), 0x9dd1f6510f78189f); - assert_eq!(fnv1a(b"chongo was here"), 0xa3de85bd491270ce); - assert_eq!(fnv1a(b"chongo was here!"), 0x858e2fa32a55e61d); - assert_eq!(fnv1a(b"chongo was here!\n"), 0x46810940eff5f915); - assert_eq!(fnv1a(b"ch\0"), 0xf5fadd190cf8edaa); - assert_eq!(fnv1a(b"cho\0"), 0xf273ed910b32b3e9); - assert_eq!(fnv1a(b"chon\0"), 0x2c8c5276062f6525); - assert_eq!(fnv1a(b"chong\0"), 0xe150b98c821842a0); - assert_eq!(fnv1a(b"chongo\0"), 0xf35aa3c10e4f55e7); - assert_eq!(fnv1a(b"chongo \0"), 0xd1ed680b50729265); - assert_eq!(fnv1a(b"chongo w\0"), 0x2a5f0639b3dded70); - assert_eq!(fnv1a(b"chongo wa\0"), 0xdcfbaa0ca1c0f359); - assert_eq!(fnv1a(b"chongo was\0"), 0x4054ba76daa6a430); - assert_eq!(fnv1a(b"chongo was \0"), 0xf709c7f5898562b0); - assert_eq!(fnv1a(b"chongo was h\0"), 0x4c62e638aed2f9b8); - assert_eq!(fnv1a(b"chongo was he\0"), 0x9dd1a8510f779415); - assert_eq!(fnv1a(b"chongo was her\0"), 0xa3de2abd4911d62d); - assert_eq!(fnv1a(b"chongo was here\0"), 0x858e0ea32a55ae0a); - assert_eq!(fnv1a(b"chongo was here!\0"), 0x46810f40eff60347); - assert_eq!(fnv1a(b"chongo was here!\n\0"), 0xc33bce57bef63eaf); - assert_eq!(fnv1a(b"cu"), 0x08a24307b54a0265); - assert_eq!(fnv1a(b"cur"), 0xf5b9fd190cc18d15); - assert_eq!(fnv1a(b"curd"), 0x4c968290ace35703); - assert_eq!(fnv1a(b"curds"), 0x07174bd5c64d9350); - assert_eq!(fnv1a(b"curds "), 0x5a294c3ff5d18750); - assert_eq!(fnv1a(b"curds a"), 0x05b3c1aeb308b843); - assert_eq!(fnv1a(b"curds an"), 0xb92a48da37d0f477); - assert_eq!(fnv1a(b"curds and"), 0x73cdddccd80ebc49); - assert_eq!(fnv1a(b"curds and "), 0xd58c4c13210a266b); - assert_eq!(fnv1a(b"curds and w"), 0xe78b6081243ec194); - assert_eq!(fnv1a(b"curds and wh"), 0xb096f77096a39f34); - assert_eq!(fnv1a(b"curds and whe"), 0xb425c54ff807b6a3); - assert_eq!(fnv1a(b"curds and whey"), 0x23e520e2751bb46e); - assert_eq!(fnv1a(b"curds and whey\n"), 0x1a0b44ccfe1385ec); - assert_eq!(fnv1a(b"cu\0"), 0xf5ba4b190cc2119f); - assert_eq!(fnv1a(b"cur\0"), 0x4c962690ace2baaf); - assert_eq!(fnv1a(b"curd\0"), 0x0716ded5c64cda19); - assert_eq!(fnv1a(b"curds\0"), 0x5a292c3ff5d150f0); - assert_eq!(fnv1a(b"curds \0"), 0x05b3e0aeb308ecf0); - assert_eq!(fnv1a(b"curds a\0"), 0xb92a5eda37d119d9); - assert_eq!(fnv1a(b"curds an\0"), 0x73ce41ccd80f6635); - assert_eq!(fnv1a(b"curds and\0"), 0xd58c2c132109f00b); - assert_eq!(fnv1a(b"curds and \0"), 0xe78baf81243f47d1); - assert_eq!(fnv1a(b"curds and w\0"), 0xb0968f7096a2ee7c); - assert_eq!(fnv1a(b"curds and wh\0"), 0xb425a84ff807855c); - assert_eq!(fnv1a(b"curds and whe\0"), 0x23e4e9e2751b56f9); - assert_eq!(fnv1a(b"curds and whey\0"), 0x1a0b4eccfe1396ea); - assert_eq!(fnv1a(b"curds and whey\n\0"), 0x54abd453bb2c9004); - assert_eq!(fnv1a(b"hi"), 0x08ba5f07b55ec3da); - assert_eq!(fnv1a(b"hi\0"), 0x337354193006cb6e); - assert_eq!(fnv1a(b"hello"), 0xa430d84680aabd0b); - assert_eq!(fnv1a(b"hello\0"), 0xa9bc8acca21f39b1); - assert_eq!(fnv1a(b"\xff\x00\x00\x01"), 0x6961196491cc682d); - assert_eq!(fnv1a(b"\x01\x00\x00\xff"), 0xad2bb1774799dfe9); - assert_eq!(fnv1a(b"\xff\x00\x00\x02"), 0x6961166491cc6314); - assert_eq!(fnv1a(b"\x02\x00\x00\xff"), 0x8d1bb3904a3b1236); - assert_eq!(fnv1a(b"\xff\x00\x00\x03"), 0x6961176491cc64c7); - assert_eq!(fnv1a(b"\x03\x00\x00\xff"), 0xed205d87f40434c7); - assert_eq!(fnv1a(b"\xff\x00\x00\x04"), 0x6961146491cc5fae); - assert_eq!(fnv1a(b"\x04\x00\x00\xff"), 0xcd3baf5e44f8ad9c); - assert_eq!(fnv1a(b"\x40\x51\x4e\x44"), 0xe3b36596127cd6d8); - assert_eq!(fnv1a(b"\x44\x4e\x51\x40"), 0xf77f1072c8e8a646); - assert_eq!(fnv1a(b"\x40\x51\x4e\x4a"), 0xe3b36396127cd372); - assert_eq!(fnv1a(b"\x4a\x4e\x51\x40"), 0x6067dce9932ad458); - assert_eq!(fnv1a(b"\x40\x51\x4e\x54"), 0xe3b37596127cf208); - assert_eq!(fnv1a(b"\x54\x4e\x51\x40"), 0x4b7b10fa9fe83936); - assert_eq!(fnv1a(b"127.0.0.1"), 0xaabafe7104d914be); - assert_eq!(fnv1a(b"127.0.0.1\0"), 0xf4d3180b3cde3eda); - assert_eq!(fnv1a(b"127.0.0.2"), 0xaabafd7104d9130b); - assert_eq!(fnv1a(b"127.0.0.2\0"), 0xf4cfb20b3cdb5bb1); - assert_eq!(fnv1a(b"127.0.0.3"), 0xaabafc7104d91158); - assert_eq!(fnv1a(b"127.0.0.3\0"), 0xf4cc4c0b3cd87888); - assert_eq!(fnv1a(b"64.81.78.68"), 0xe729bac5d2a8d3a7); - assert_eq!(fnv1a(b"64.81.78.68\0"), 0x74bc0524f4dfa4c5); - assert_eq!(fnv1a(b"64.81.78.74"), 0xe72630c5d2a5b352); - assert_eq!(fnv1a(b"64.81.78.74\0"), 0x6b983224ef8fb456); - assert_eq!(fnv1a(b"64.81.78.84"), 0xe73042c5d2ae266d); - assert_eq!(fnv1a(b"64.81.78.84\0"), 0x8527e324fdeb4b37); - assert_eq!(fnv1a(b"feedface"), 0x0a83c86fee952abc); - assert_eq!(fnv1a(b"feedface\0"), 0x7318523267779d74); - assert_eq!(fnv1a(b"feedfacedaffdeed"), 0x3e66d3d56b8caca1); - assert_eq!(fnv1a(b"feedfacedaffdeed\0"), 0x956694a5c0095593); - assert_eq!(fnv1a(b"feedfacedeadbeef"), 0xcac54572bb1a6fc8); - assert_eq!(fnv1a(b"feedfacedeadbeef\0"), 0xa7a4c9f3edebf0d8); - assert_eq!(fnv1a(b"line 1\nline 2\nline 3"), 0x7829851fac17b143); - assert_eq!(fnv1a(b"chongo /\\../\\"), 0x2c8f4c9af81bcf06); - assert_eq!(fnv1a(b"chongo /\\../\\\0"), 0xd34e31539740c732); - assert_eq!(fnv1a(b"chongo (Landon Curt Noll) /\\../\\"), 0x3605a2ac253d2db1); - assert_eq!(fnv1a(b"chongo (Landon Curt Noll) /\\../\\\0"), 0x08c11b8346f4a3c3); - assert_eq!(fnv1a(b"http://antwrp.gsfc.nasa.gov/apod/astropix.html"), 0x6be396289ce8a6da); - assert_eq!(fnv1a(b"http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash"), 0xd9b957fb7fe794c5); - assert_eq!(fnv1a(b"http://epod.usra.edu/"), 0x05be33da04560a93); - assert_eq!(fnv1a(b"http://exoplanet.eu/"), 0x0957f1577ba9747c); - assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/cam3/"), 0xda2cc3acc24fba57); - assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/cams/HMcam/"), 0x74136f185b29e7f0); - assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/kilauea/update/deformation.html"), 0xb2f2b4590edb93b2); - assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/kilauea/update/images.html"), 0xb3608fce8b86ae04); - assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/kilauea/update/maps.html"), 0x4a3a865079359063); - assert_eq!(fnv1a(b"http://hvo.wr.usgs.gov/volcanowatch/current_issue.html"), 0x5b3a7ef496880a50); - assert_eq!(fnv1a(b"http://neo.jpl.nasa.gov/risk/"), 0x48fae3163854c23b); - assert_eq!(fnv1a(b"http://norvig.com/21-days.html"), 0x07aaa640476e0b9a); - assert_eq!(fnv1a(b"http://primes.utm.edu/curios/home.php"), 0x2f653656383a687d); - assert_eq!(fnv1a(b"http://slashdot.org/"), 0xa1031f8e7599d79c); - assert_eq!(fnv1a(b"http://tux.wr.usgs.gov/Maps/155.25-19.5.html"), 0xa31908178ff92477); - assert_eq!(fnv1a(b"http://volcano.wr.usgs.gov/kilaueastatus.php"), 0x097edf3c14c3fb83); - assert_eq!(fnv1a(b"http://www.avo.alaska.edu/activity/Redoubt.php"), 0xb51ca83feaa0971b); - assert_eq!(fnv1a(b"http://www.dilbert.com/fast/"), 0xdd3c0d96d784f2e9); - assert_eq!(fnv1a(b"http://www.fourmilab.ch/gravitation/orbits/"), 0x86cd26a9ea767d78); - assert_eq!(fnv1a(b"http://www.fpoa.net/"), 0xe6b215ff54a30c18); - assert_eq!(fnv1a(b"http://www.ioccc.org/index.html"), 0xec5b06a1c5531093); - assert_eq!(fnv1a(b"http://www.isthe.com/cgi-bin/number.cgi"), 0x45665a929f9ec5e5); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/bio.html"), 0x8c7609b4a9f10907); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/index.html"), 0x89aac3a491f0d729); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/src/calc/lucas-calc"), 0x32ce6b26e0f4a403); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/astro/venus2004.html"), 0x614ab44e02b53e01); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/astro/vita.html"), 0xfa6472eb6eef3290); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/comp/c/expert.html"), 0x9e5d75eb1948eb6a); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/comp/calc/index.html"), 0xb6d12ad4a8671852); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/comp/fnv/index.html"), 0x88826f56eba07af1); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/number/howhigh.html"), 0x44535bf2645bc0fd); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/number/number.html"), 0x169388ffc21e3728); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/prime/mersenne.html"), 0xf68aac9e396d8224); - assert_eq!(fnv1a(b"http://www.isthe.com/chongo/tech/math/prime/mersenne.html#largest"), 0x8e87d7e7472b3883); - assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/corpspeak.cgi"), 0x295c26caa8b423de); - assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/haiku.cgi"), 0x322c814292e72176); - assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/rand-none.cgi"), 0x8a06550eb8af7268); - assert_eq!(fnv1a(b"http://www.lavarnd.org/cgi-bin/randdist.cgi"), 0xef86d60e661bcf71); - assert_eq!(fnv1a(b"http://www.lavarnd.org/index.html"), 0x9e5426c87f30ee54); - assert_eq!(fnv1a(b"http://www.lavarnd.org/what/nist-test.html"), 0xf1ea8aa826fd047e); - assert_eq!(fnv1a(b"http://www.macosxhints.com/"), 0x0babaf9a642cb769); - assert_eq!(fnv1a(b"http://www.mellis.com/"), 0x4b3341d4068d012e); - assert_eq!(fnv1a(b"http://www.nature.nps.gov/air/webcams/parks/havoso2alert/havoalert.cfm"), 0xd15605cbc30a335c); - assert_eq!(fnv1a(b"http://www.nature.nps.gov/air/webcams/parks/havoso2alert/timelines_24.cfm"), 0x5b21060aed8412e5); - assert_eq!(fnv1a(b"http://www.paulnoll.com/"), 0x45e2cda1ce6f4227); - assert_eq!(fnv1a(b"http://www.pepysdiary.com/"), 0x50ae3745033ad7d4); - assert_eq!(fnv1a(b"http://www.sciencenews.org/index/home/activity/view"), 0xaa4588ced46bf414); - assert_eq!(fnv1a(b"http://www.skyandtelescope.com/"), 0xc1b0056c4a95467e); - assert_eq!(fnv1a(b"http://www.sput.nl/~rob/sirius.html"), 0x56576a71de8b4089); - assert_eq!(fnv1a(b"http://www.systemexperts.com/"), 0xbf20965fa6dc927e); - assert_eq!(fnv1a(b"http://www.tq-international.com/phpBB3/index.php"), 0x569f8383c2040882); - assert_eq!(fnv1a(b"http://www.travelquesttours.com/index.htm"), 0xe1e772fba08feca0); - assert_eq!(fnv1a(b"http://www.wunderground.com/global/stations/89606.html"), 0x4ced94af97138ac4); - assert_eq!(fnv1a(&repeat_10(b"21701")), 0xc4112ffb337a82fb); - assert_eq!(fnv1a(&repeat_10(b"M21701")), 0xd64a4fd41de38b7d); - assert_eq!(fnv1a(&repeat_10(b"2^21701-1")), 0x4cfc32329edebcbb); - assert_eq!(fnv1a(&repeat_10(b"\x54\xc5")), 0x0803564445050395); - assert_eq!(fnv1a(&repeat_10(b"\xc5\x54")), 0xaa1574ecf4642ffd); - assert_eq!(fnv1a(&repeat_10(b"23209")), 0x694bc4e54cc315f9); - assert_eq!(fnv1a(&repeat_10(b"M23209")), 0xa3d7cb273b011721); - assert_eq!(fnv1a(&repeat_10(b"2^23209-1")), 0x577c2f8b6115bfa5); - assert_eq!(fnv1a(&repeat_10(b"\x5a\xa9")), 0xb7ec8c1a769fb4c1); - assert_eq!(fnv1a(&repeat_10(b"\xa9\x5a")), 0x5d5cfce63359ab19); - assert_eq!(fnv1a(&repeat_10(b"391581216093")), 0x33b96c3cd65b5f71); - assert_eq!(fnv1a(&repeat_10(b"391581*2^216093-1")), 0xd845097780602bb9); - assert_eq!(fnv1a(&repeat_10(b"\x05\xf9\x9d\x03\x4c\x81")), 0x84d47645d02da3d5); - assert_eq!(fnv1a(&repeat_10(b"FEDCBA9876543210")), 0x83544f33b58773a5); - assert_eq!(fnv1a(&repeat_10(b"\xfe\xdc\xba\x98\x76\x54\x32\x10")), 0x9175cbb2160836c5); - assert_eq!(fnv1a(&repeat_10(b"EFCDAB8967452301")), 0xc71b3bc175e72bc5); - assert_eq!(fnv1a(&repeat_10(b"\xef\xcd\xab\x89\x67\x45\x23\x01")), 0x636806ac222ec985); - assert_eq!(fnv1a(&repeat_10(b"0123456789ABCDEF")), 0xb6ef0e6950f52ed5); - assert_eq!(fnv1a(&repeat_10(b"\x01\x23\x45\x67\x89\xab\xcd\xef")), 0xead3d8a0f3dfdaa5); - assert_eq!(fnv1a(&repeat_10(b"1032547698BADCFE")), 0x922908fe9a861ba5); - assert_eq!(fnv1a(&repeat_10(b"\x10\x32\x54\x76\x98\xba\xdc\xfe")), 0x6d4821de275fd5c5); - assert_eq!(fnv1a(&repeat_500(b"\x00")), 0x1fe3fce62bd816b5); - assert_eq!(fnv1a(&repeat_500(b"\x07")), 0xc23e9fccd6f70591); - assert_eq!(fnv1a(&repeat_500(b"~")), 0xc1af12bdfe16b5b5); - assert_eq!(fnv1a(&repeat_500(b"\x7f")), 0x39e9f18f2f85e221); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/fnv/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/fnv/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/fnv/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/fnv/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/fnv/LICENSE-MIT s390-tools-2.33.1/rust-vendor/fnv/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/fnv/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/fnv/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2017 Contributors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/fnv/README.md s390-tools-2.33.1/rust-vendor/fnv/README.md --- s390-tools-2.31.0/rust-vendor/fnv/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/fnv/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,81 +0,0 @@ -# rust-fnv - -An implementation of the [Fowler–Noll–Vo hash function][chongo]. - -### [Read the documentation](https://doc.servo.org/fnv/) - - -## About - -The FNV hash function is a custom `Hasher` implementation that is more -efficient for smaller hash keys. - -[The Rust FAQ states that][faq] while the default `Hasher` implementation, -SipHash, is good in many cases, it is notably slower than other algorithms -with short keys, such as when you have a map of integers to other values. -In cases like these, [FNV is demonstrably faster][graphs]. - -Its disadvantages are that it performs badly on larger inputs, and -provides no protection against collision attacks, where a malicious user -can craft specific keys designed to slow a hasher down. Thus, it is -important to profile your program to ensure that you are using small hash -keys, and be certain that your program could not be exposed to malicious -inputs (including being a networked server). - -The Rust compiler itself uses FNV, as it is not worried about -denial-of-service attacks, and can assume that its inputs are going to be -small—a perfect use case for FNV. - - -## Usage - -To include this crate in your program, add the following to your `Cargo.toml`: - -```toml -[dependencies] -fnv = "1.0.3" -``` - - -## Using FNV in a HashMap - -The `FnvHashMap` type alias is the easiest way to use the standard library’s -`HashMap` with FNV. - -```rust -use fnv::FnvHashMap; - -let mut map = FnvHashMap::default(); -map.insert(1, "one"); -map.insert(2, "two"); - -map = FnvHashMap::with_capacity_and_hasher(10, Default::default()); -map.insert(1, "one"); -map.insert(2, "two"); -``` - -Note, the standard library’s `HashMap::new` and `HashMap::with_capacity` -are only implemented for the `RandomState` hasher, so using `Default` to -get the hasher is the next best option. - - -## Using FNV in a HashSet - -Similarly, `FnvHashSet` is a type alias for the standard library’s `HashSet` -with FNV. - -```rust -use fnv::FnvHashSet; - -let mut set = FnvHashSet::default(); -set.insert(1); -set.insert(2); - -set = FnvHashSet::with_capacity_and_hasher(10, Default::default()); -set.insert(1); -set.insert(2); -``` - -[chongo]: http://www.isthe.com/chongo/tech/comp/fnv/index.html -[faq]: https://www.rust-lang.org/en-US/faq.html#why-are-rusts-hashmaps-slow -[graphs]: https://cglab.ca/~abeinges/blah/hash-rs/ diff -Nru s390-tools-2.31.0/rust-vendor/form_urlencoded/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/form_urlencoded/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/form_urlencoded/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/form_urlencoded/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/form_urlencoded/Cargo.toml s390-tools-2.33.1/rust-vendor/form_urlencoded/Cargo.toml --- s390-tools-2.31.0/rust-vendor/form_urlencoded/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/form_urlencoded/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,36 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.51" -name = "form_urlencoded" -version = "1.2.0" -authors = ["The rust-url developers"] -description = "Parser and serializer for the application/x-www-form-urlencoded syntax, as used by HTML forms." -categories = ["no_std"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/servo/rust-url" - -[lib] -test = false - -[dependencies.percent-encoding] -version = "2.3.0" -default-features = false - -[features] -alloc = ["percent-encoding/alloc"] -default = ["std"] -std = [ - "alloc", - "percent-encoding/std", -] diff -Nru s390-tools-2.31.0/rust-vendor/form_urlencoded/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/form_urlencoded/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/form_urlencoded/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/form_urlencoded/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/form_urlencoded/LICENSE-MIT s390-tools-2.33.1/rust-vendor/form_urlencoded/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/form_urlencoded/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/form_urlencoded/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2013-2016 The rust-url developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/form_urlencoded/src/lib.rs s390-tools-2.33.1/rust-vendor/form_urlencoded/src/lib.rs --- s390-tools-2.31.0/rust-vendor/form_urlencoded/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/form_urlencoded/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,427 +0,0 @@ -// Copyright 2013-2016 The rust-url developers. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Parser and serializer for the [`application/x-www-form-urlencoded` syntax]( -//! http://url.spec.whatwg.org/#application/x-www-form-urlencoded), -//! as used by HTML forms. -//! -//! Converts between a string (such as an URL’s query string) -//! and a sequence of (name, value) pairs. -#![no_std] - -// For forwards compatibility -#[cfg(feature = "std")] -extern crate std as _; - -extern crate alloc; - -#[cfg(not(feature = "alloc"))] -compile_error!("the `alloc` feature must currently be enabled"); - -use alloc::borrow::{Borrow, Cow, ToOwned}; -use alloc::string::String; -use core::str; -use percent_encoding::{percent_decode, percent_encode_byte}; - -/// Convert a byte string in the `application/x-www-form-urlencoded` syntax -/// into a iterator of (name, value) pairs. -/// -/// Use `parse(input.as_bytes())` to parse a `&str` string. -/// -/// The names and values are percent-decoded. For instance, `%23first=%25try%25` will be -/// converted to `[("#first", "%try%")]`. -#[inline] -pub fn parse(input: &[u8]) -> Parse<'_> { - Parse { input } -} -/// The return type of `parse()`. -#[derive(Copy, Clone)] -pub struct Parse<'a> { - input: &'a [u8], -} - -impl<'a> Iterator for Parse<'a> { - type Item = (Cow<'a, str>, Cow<'a, str>); - - fn next(&mut self) -> Option { - loop { - if self.input.is_empty() { - return None; - } - let mut split2 = self.input.splitn(2, |&b| b == b'&'); - let sequence = split2.next().unwrap(); - self.input = split2.next().unwrap_or(&[][..]); - if sequence.is_empty() { - continue; - } - let mut split2 = sequence.splitn(2, |&b| b == b'='); - let name = split2.next().unwrap(); - let value = split2.next().unwrap_or(&[][..]); - return Some((decode(name), decode(value))); - } - } -} - -fn decode(input: &[u8]) -> Cow<'_, str> { - let replaced = replace_plus(input); - decode_utf8_lossy(match percent_decode(&replaced).into() { - Cow::Owned(vec) => Cow::Owned(vec), - Cow::Borrowed(_) => replaced, - }) -} - -/// Replace b'+' with b' ' -fn replace_plus(input: &[u8]) -> Cow<'_, [u8]> { - match input.iter().position(|&b| b == b'+') { - None => Cow::Borrowed(input), - Some(first_position) => { - let mut replaced = input.to_owned(); - replaced[first_position] = b' '; - for byte in &mut replaced[first_position + 1..] { - if *byte == b'+' { - *byte = b' '; - } - } - Cow::Owned(replaced) - } - } -} - -impl<'a> Parse<'a> { - /// Return a new iterator that yields pairs of `String` instead of pairs of `Cow`. - pub fn into_owned(self) -> ParseIntoOwned<'a> { - ParseIntoOwned { inner: self } - } -} - -/// Like `Parse`, but yields pairs of `String` instead of pairs of `Cow`. -pub struct ParseIntoOwned<'a> { - inner: Parse<'a>, -} - -impl<'a> Iterator for ParseIntoOwned<'a> { - type Item = (String, String); - - fn next(&mut self) -> Option { - self.inner - .next() - .map(|(k, v)| (k.into_owned(), v.into_owned())) - } -} - -/// The [`application/x-www-form-urlencoded` byte serializer]( -/// https://url.spec.whatwg.org/#concept-urlencoded-byte-serializer). -/// -/// Return an iterator of `&str` slices. -pub fn byte_serialize(input: &[u8]) -> ByteSerialize<'_> { - ByteSerialize { bytes: input } -} - -/// Return value of `byte_serialize()`. -#[derive(Debug)] -pub struct ByteSerialize<'a> { - bytes: &'a [u8], -} - -fn byte_serialized_unchanged(byte: u8) -> bool { - matches!(byte, b'*' | b'-' | b'.' | b'0' ..= b'9' | b'A' ..= b'Z' | b'_' | b'a' ..= b'z') -} - -impl<'a> Iterator for ByteSerialize<'a> { - type Item = &'a str; - - fn next(&mut self) -> Option<&'a str> { - if let Some((&first, tail)) = self.bytes.split_first() { - if !byte_serialized_unchanged(first) { - self.bytes = tail; - return Some(if first == b' ' { - "+" - } else { - percent_encode_byte(first) - }); - } - let position = tail.iter().position(|&b| !byte_serialized_unchanged(b)); - let (unchanged_slice, remaining) = match position { - // 1 for first_byte + i unchanged in tail - Some(i) => self.bytes.split_at(1 + i), - None => (self.bytes, &[][..]), - }; - self.bytes = remaining; - // This unsafe is appropriate because we have already checked these - // bytes in byte_serialized_unchanged, which checks for a subset - // of UTF-8. So we know these bytes are valid UTF-8, and doing - // another UTF-8 check would be wasteful. - Some(unsafe { str::from_utf8_unchecked(unchanged_slice) }) - } else { - None - } - } - - fn size_hint(&self) -> (usize, Option) { - if self.bytes.is_empty() { - (0, Some(0)) - } else { - (1, Some(self.bytes.len())) - } - } -} - -/// The [`application/x-www-form-urlencoded` serializer]( -/// https://url.spec.whatwg.org/#concept-urlencoded-serializer). -pub struct Serializer<'a, T: Target> { - target: Option, - start_position: usize, - encoding: EncodingOverride<'a>, -} - -pub trait Target { - fn as_mut_string(&mut self) -> &mut String; - fn finish(self) -> Self::Finished; - type Finished; -} - -impl Target for String { - fn as_mut_string(&mut self) -> &mut String { - self - } - fn finish(self) -> Self { - self - } - type Finished = Self; -} - -impl<'a> Target for &'a mut String { - fn as_mut_string(&mut self) -> &mut String { - self - } - fn finish(self) -> Self { - self - } - type Finished = Self; -} - -impl<'a, T: Target> Serializer<'a, T> { - /// Create a new `application/x-www-form-urlencoded` serializer for the given target. - /// - /// If the target is non-empty, - /// its content is assumed to already be in `application/x-www-form-urlencoded` syntax. - pub fn new(target: T) -> Self { - Self::for_suffix(target, 0) - } - - /// Create a new `application/x-www-form-urlencoded` serializer - /// for a suffix of the given target. - /// - /// If that suffix is non-empty, - /// its content is assumed to already be in `application/x-www-form-urlencoded` syntax. - pub fn for_suffix(mut target: T, start_position: usize) -> Self { - if target.as_mut_string().len() < start_position { - panic!( - "invalid length {} for target of length {}", - start_position, - target.as_mut_string().len() - ); - } - - Serializer { - target: Some(target), - start_position, - encoding: None, - } - } - - /// Remove any existing name/value pair. - /// - /// Panics if called after `.finish()`. - pub fn clear(&mut self) -> &mut Self { - string(&mut self.target).truncate(self.start_position); - self - } - - /// Set the character encoding to be used for names and values before percent-encoding. - pub fn encoding_override(&mut self, new: EncodingOverride<'a>) -> &mut Self { - self.encoding = new; - self - } - - /// Serialize and append a name/value pair. - /// - /// Panics if called after `.finish()`. - pub fn append_pair(&mut self, name: &str, value: &str) -> &mut Self { - append_pair( - string(&mut self.target), - self.start_position, - self.encoding, - name, - value, - ); - self - } - - /// Serialize and append a name of parameter without any value. - /// - /// Panics if called after `.finish()`. - pub fn append_key_only(&mut self, name: &str) -> &mut Self { - append_key_only( - string(&mut self.target), - self.start_position, - self.encoding, - name, - ); - self - } - - /// Serialize and append a number of name/value pairs. - /// - /// This simply calls `append_pair` repeatedly. - /// This can be more convenient, so the user doesn’t need to introduce a block - /// to limit the scope of `Serializer`’s borrow of its string. - /// - /// Panics if called after `.finish()`. - pub fn extend_pairs(&mut self, iter: I) -> &mut Self - where - I: IntoIterator, - I::Item: Borrow<(K, V)>, - K: AsRef, - V: AsRef, - { - { - let string = string(&mut self.target); - for pair in iter { - let (k, v) = pair.borrow(); - append_pair( - string, - self.start_position, - self.encoding, - k.as_ref(), - v.as_ref(), - ); - } - } - self - } - - /// Serialize and append a number of names without values. - /// - /// This simply calls `append_key_only` repeatedly. - /// This can be more convenient, so the user doesn’t need to introduce a block - /// to limit the scope of `Serializer`’s borrow of its string. - /// - /// Panics if called after `.finish()`. - pub fn extend_keys_only(&mut self, iter: I) -> &mut Self - where - I: IntoIterator, - I::Item: Borrow, - K: AsRef, - { - { - let string = string(&mut self.target); - for key in iter { - let k = key.borrow().as_ref(); - append_key_only(string, self.start_position, self.encoding, k); - } - } - self - } - - /// If this serializer was constructed with a string, take and return that string. - /// - /// ```rust - /// use form_urlencoded; - /// let encoded: String = form_urlencoded::Serializer::new(String::new()) - /// .append_pair("foo", "bar & baz") - /// .append_pair("saison", "Été+hiver") - /// .finish(); - /// assert_eq!(encoded, "foo=bar+%26+baz&saison=%C3%89t%C3%A9%2Bhiver"); - /// ``` - /// - /// Panics if called more than once. - pub fn finish(&mut self) -> T::Finished { - self.target - .take() - .expect("url::form_urlencoded::Serializer double finish") - .finish() - } -} - -fn append_separator_if_needed(string: &mut String, start_position: usize) { - if string.len() > start_position { - string.push('&') - } -} - -fn string(target: &mut Option) -> &mut String { - target - .as_mut() - .expect("url::form_urlencoded::Serializer finished") - .as_mut_string() -} - -fn append_pair( - string: &mut String, - start_position: usize, - encoding: EncodingOverride<'_>, - name: &str, - value: &str, -) { - append_separator_if_needed(string, start_position); - append_encoded(name, string, encoding); - string.push('='); - append_encoded(value, string, encoding); -} - -fn append_key_only( - string: &mut String, - start_position: usize, - encoding: EncodingOverride, - name: &str, -) { - append_separator_if_needed(string, start_position); - append_encoded(name, string, encoding); -} - -fn append_encoded(s: &str, string: &mut String, encoding: EncodingOverride<'_>) { - string.extend(byte_serialize(&encode(encoding, s))) -} - -pub(crate) fn encode<'a>(encoding_override: EncodingOverride<'_>, input: &'a str) -> Cow<'a, [u8]> { - if let Some(o) = encoding_override { - return o(input); - } - input.as_bytes().into() -} - -pub(crate) fn decode_utf8_lossy(input: Cow<'_, [u8]>) -> Cow<'_, str> { - // Note: This function is duplicated in `percent_encoding/lib.rs`. - match input { - Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes), - Cow::Owned(bytes) => { - match String::from_utf8_lossy(&bytes) { - Cow::Borrowed(utf8) => { - // If from_utf8_lossy returns a Cow::Borrowed, then we can - // be sure our original bytes were valid UTF-8. This is because - // if the bytes were invalid UTF-8 from_utf8_lossy would have - // to allocate a new owned string to back the Cow so it could - // replace invalid bytes with a placeholder. - - // First we do a debug_assert to confirm our description above. - let raw_utf8: *const [u8] = utf8.as_bytes(); - debug_assert!(raw_utf8 == &*bytes as *const [u8]); - - // Given we know the original input bytes are valid UTF-8, - // and we have ownership of those bytes, we re-use them and - // return a Cow::Owned here. - Cow::Owned(unsafe { String::from_utf8_unchecked(bytes) }) - } - Cow::Owned(s) => Cow::Owned(s), - } - } - } -} - -pub type EncodingOverride<'a> = Option<&'a dyn Fn(&str) -> Cow<'_, [u8]>>; diff -Nru s390-tools-2.31.0/rust-vendor/futures/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/futures/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/futures/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/futures/Cargo.toml s390-tools-2.33.1/rust-vendor/futures/Cargo.toml --- s390-tools-2.31.0/rust-vendor/futures/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,147 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.56" -name = "futures" -version = "0.3.29" -description = """ -An implementation of futures and streams featuring zero allocations, -composability, and iterator-like interfaces. -""" -homepage = "https://rust-lang.github.io/futures-rs" -readme = "README.md" -keywords = [ - "futures", - "async", - "future", -] -categories = ["asynchronous"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/futures-rs" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs", -] - -[package.metadata.playground] -features = [ - "std", - "async-await", - "compat", - "io-compat", - "executor", - "thread-pool", -] - -[dependencies.futures-channel] -version = "0.3.29" -features = ["sink"] -default-features = false - -[dependencies.futures-core] -version = "0.3.29" -default-features = false - -[dependencies.futures-executor] -version = "0.3.29" -optional = true -default-features = false - -[dependencies.futures-io] -version = "0.3.29" -default-features = false - -[dependencies.futures-sink] -version = "0.3.29" -default-features = false - -[dependencies.futures-task] -version = "0.3.29" -default-features = false - -[dependencies.futures-util] -version = "0.3.29" -features = ["sink"] -default-features = false - -[dev-dependencies.assert_matches] -version = "1.3.0" - -[dev-dependencies.pin-project] -version = "1.0.11" - -[dev-dependencies.pin-utils] -version = "0.1.0" - -[dev-dependencies.static_assertions] -version = "1" - -[dev-dependencies.tokio] -version = "0.1.11" - -[features] -alloc = [ - "futures-core/alloc", - "futures-task/alloc", - "futures-sink/alloc", - "futures-channel/alloc", - "futures-util/alloc", -] -async-await = [ - "futures-util/async-await", - "futures-util/async-await-macro", -] -bilock = ["futures-util/bilock"] -cfg-target-has-atomic = [] -compat = [ - "std", - "futures-util/compat", -] -default = [ - "std", - "async-await", - "executor", -] -executor = [ - "std", - "futures-executor/std", -] -io-compat = [ - "compat", - "futures-util/io-compat", -] -std = [ - "alloc", - "futures-core/std", - "futures-task/std", - "futures-io/std", - "futures-sink/std", - "futures-util/std", - "futures-util/io", - "futures-util/channel", -] -thread-pool = [ - "executor", - "futures-executor/thread-pool", -] -unstable = [ - "futures-core/unstable", - "futures-task/unstable", - "futures-channel/unstable", - "futures-io/unstable", - "futures-util/unstable", -] -write-all-vectored = ["futures-util/write-all-vectored"] diff -Nru s390-tools-2.31.0/rust-vendor/futures/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/futures/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/futures/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/futures/LICENSE-MIT s390-tools-2.33.1/rust-vendor/futures/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/futures/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/futures/README.md s390-tools-2.33.1/rust-vendor/futures/README.md --- s390-tools-2.31.0/rust-vendor/futures/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,61 +0,0 @@ -

- futures-rs -

- -

- Zero-cost asynchronous programming in Rust -

- -

- - Build Status - - - - crates.io - -

- -

- - Documentation - | - Website - -

- -`futures-rs` is a library providing the foundations for asynchronous programming in Rust. -It includes key trait definitions like `Stream`, as well as utilities like `join!`, -`select!`, and various futures combinator methods which enable expressive asynchronous -control flow. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -futures = "0.3" -``` - -The current `futures` requires Rust 1.56 or later. - -### Feature `std` - -Futures-rs works without the standard library, such as in bare metal environments. -However, it has a significantly reduced API surface. To use futures-rs in -a `#[no_std]` environment, use: - -```toml -[dependencies] -futures = { version = "0.3", default-features = false } -``` - -## License - -Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or -[MIT license](LICENSE-MIT) at your option. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/futures/src/lib.rs s390-tools-2.33.1/rust-vendor/futures/src/lib.rs --- s390-tools-2.31.0/rust-vendor/futures/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,260 +0,0 @@ -//! Abstractions for asynchronous programming. -//! -//! This crate provides a number of core abstractions for writing asynchronous -//! code: -//! -//! - [Futures](crate::future) are single eventual values produced by -//! asynchronous computations. Some programming languages (e.g. JavaScript) -//! call this concept "promise". -//! - [Streams](crate::stream) represent a series of values -//! produced asynchronously. -//! - [Sinks](crate::sink) provide support for asynchronous writing of -//! data. -//! - [Executors](crate::executor) are responsible for running asynchronous -//! tasks. -//! -//! The crate also contains abstractions for [asynchronous I/O](crate::io) and -//! [cross-task communication](crate::channel). -//! -//! Underlying all of this is the *task system*, which is a form of lightweight -//! threading. Large asynchronous computations are built up using futures, -//! streams and sinks, and then spawned as independent tasks that are run to -//! completion, but *do not block* the thread running them. -//! -//! The following example describes how the task system context is built and used -//! within macros and keywords such as async and await!. -//! -//! ```rust -//! # use futures::channel::mpsc; -//! # use futures::executor; ///standard executors to provide a context for futures and streams -//! # use futures::executor::ThreadPool; -//! # use futures::StreamExt; -//! # -//! fn main() { -//! # { -//! let pool = ThreadPool::new().expect("Failed to build pool"); -//! let (tx, rx) = mpsc::unbounded::(); -//! -//! // Create a future by an async block, where async is responsible for an -//! // implementation of Future. At this point no executor has been provided -//! // to this future, so it will not be running. -//! let fut_values = async { -//! // Create another async block, again where the Future implementation -//! // is generated by async. Since this is inside of a parent async block, -//! // it will be provided with the executor of the parent block when the parent -//! // block is executed. -//! // -//! // This executor chaining is done by Future::poll whose second argument -//! // is a std::task::Context. This represents our executor, and the Future -//! // implemented by this async block can be polled using the parent async -//! // block's executor. -//! let fut_tx_result = async move { -//! (0..100).for_each(|v| { -//! tx.unbounded_send(v).expect("Failed to send"); -//! }) -//! }; -//! -//! // Use the provided thread pool to spawn the generated future -//! // responsible for transmission -//! pool.spawn_ok(fut_tx_result); -//! -//! let fut_values = rx -//! .map(|v| v * 2) -//! .collect(); -//! -//! // Use the executor provided to this async block to wait for the -//! // future to complete. -//! fut_values.await -//! }; -//! -//! // Actually execute the above future, which will invoke Future::poll and -//! // subsequently chain appropriate Future::poll and methods needing executors -//! // to drive all futures. Eventually fut_values will be driven to completion. -//! let values: Vec = executor::block_on(fut_values); -//! -//! println!("Values={:?}", values); -//! # } -//! # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 -//! } -//! ``` -//! -//! The majority of examples and code snippets in this crate assume that they are -//! inside an async block as written above. - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - single_use_lifetimes, - unreachable_pub -)] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] -#![cfg_attr(docsrs, feature(doc_cfg))] - -#[cfg(all(feature = "bilock", not(feature = "unstable")))] -compile_error!("The `bilock` feature requires the `unstable` feature as an explicit opt-in to unstable features"); - -#[doc(no_inline)] -pub use futures_core::future::{Future, TryFuture}; -#[doc(no_inline)] -pub use futures_util::future::{FutureExt, TryFutureExt}; - -#[doc(no_inline)] -pub use futures_core::stream::{Stream, TryStream}; -#[doc(no_inline)] -pub use futures_util::stream::{StreamExt, TryStreamExt}; - -#[doc(no_inline)] -pub use futures_sink::Sink; -#[doc(no_inline)] -pub use futures_util::sink::SinkExt; - -#[cfg(feature = "std")] -#[doc(no_inline)] -pub use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite}; -#[cfg(feature = "std")] -#[doc(no_inline)] -pub use futures_util::{AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt}; - -// Macro reexports -pub use futures_core::ready; // Readiness propagation -pub use futures_util::pin_mut; -#[cfg(feature = "std")] -#[cfg(feature = "async-await")] -pub use futures_util::select; -#[cfg(feature = "async-await")] -pub use futures_util::{join, pending, poll, select_biased, try_join}; // Async-await - -// Module reexports -#[doc(inline)] -pub use futures_util::{future, never, sink, stream, task}; - -#[cfg(feature = "std")] -#[cfg(feature = "async-await")] -pub use futures_util::stream_select; - -#[cfg(feature = "alloc")] -#[doc(inline)] -pub use futures_channel as channel; -#[cfg(feature = "alloc")] -#[doc(inline)] -pub use futures_util::lock; - -#[cfg(feature = "std")] -#[doc(inline)] -pub use futures_util::io; - -#[cfg(feature = "executor")] -#[cfg_attr(docsrs, doc(cfg(feature = "executor")))] -pub mod executor { - //! Built-in executors and related tools. - //! - //! All asynchronous computation occurs within an executor, which is - //! capable of spawning futures as tasks. This module provides several - //! built-in executors, as well as tools for building your own. - //! - //! - //! This module is only available when the `executor` feature of this - //! library is activated. - //! - //! # Using a thread pool (M:N task scheduling) - //! - //! Most of the time tasks should be executed on a [thread pool](ThreadPool). - //! A small set of worker threads can handle a very large set of spawned tasks - //! (which are much lighter weight than threads). Tasks spawned onto the pool - //! with the [`spawn_ok`](ThreadPool::spawn_ok) function will run ambiently on - //! the created threads. - //! - //! # Spawning additional tasks - //! - //! Tasks can be spawned onto a spawner by calling its [`spawn_obj`] method - //! directly. In the case of `!Send` futures, [`spawn_local_obj`] can be used - //! instead. - //! - //! # Single-threaded execution - //! - //! In addition to thread pools, it's possible to run a task (and the tasks - //! it spawns) entirely within a single thread via the [`LocalPool`] executor. - //! Aside from cutting down on synchronization costs, this executor also makes - //! it possible to spawn non-`Send` tasks, via [`spawn_local_obj`]. The - //! [`LocalPool`] is best suited for running I/O-bound tasks that do relatively - //! little work between I/O operations. - //! - //! There is also a convenience function [`block_on`] for simply running a - //! future to completion on the current thread. - //! - //! [`spawn_obj`]: https://docs.rs/futures/0.3/futures/task/trait.Spawn.html#tymethod.spawn_obj - //! [`spawn_local_obj`]: https://docs.rs/futures/0.3/futures/task/trait.LocalSpawn.html#tymethod.spawn_local_obj - - pub use futures_executor::{ - block_on, block_on_stream, enter, BlockingStream, Enter, EnterError, LocalPool, - LocalSpawner, - }; - - #[cfg(feature = "thread-pool")] - #[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))] - pub use futures_executor::{ThreadPool, ThreadPoolBuilder}; -} - -#[cfg(feature = "compat")] -#[cfg_attr(docsrs, doc(cfg(feature = "compat")))] -pub mod compat { - //! Interop between `futures` 0.1 and 0.3. - //! - //! This module is only available when the `compat` feature of this - //! library is activated. - - pub use futures_util::compat::{ - Compat, Compat01As03, Compat01As03Sink, CompatSink, Executor01As03, Executor01CompatExt, - Executor01Future, Future01CompatExt, Sink01CompatExt, Stream01CompatExt, - }; - - #[cfg(feature = "io-compat")] - #[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))] - pub use futures_util::compat::{AsyncRead01CompatExt, AsyncWrite01CompatExt}; -} - -pub mod prelude { - //! A "prelude" for crates using the `futures` crate. - //! - //! This prelude is similar to the standard library's prelude in that you'll - //! almost always want to import its entire contents, but unlike the - //! standard library's prelude you'll have to do so manually: - //! - //! ``` - //! # #[allow(unused_imports)] - //! use futures::prelude::*; - //! ``` - //! - //! The prelude may grow over time as additional items see ubiquitous use. - - pub use crate::future::{self, Future, TryFuture}; - pub use crate::sink::{self, Sink}; - pub use crate::stream::{self, Stream, TryStream}; - - #[doc(no_inline)] - #[allow(unreachable_pub)] - pub use crate::future::{FutureExt as _, TryFutureExt as _}; - #[doc(no_inline)] - pub use crate::sink::SinkExt as _; - #[doc(no_inline)] - #[allow(unreachable_pub)] - pub use crate::stream::{StreamExt as _, TryStreamExt as _}; - - #[cfg(feature = "std")] - pub use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite}; - - #[cfg(feature = "std")] - #[doc(no_inline)] - #[allow(unreachable_pub)] - pub use crate::io::{ - AsyncBufReadExt as _, AsyncReadExt as _, AsyncSeekExt as _, AsyncWriteExt as _, - }; -} diff -Nru s390-tools-2.31.0/rust-vendor/futures/tests_disabled/all.rs s390-tools-2.33.1/rust-vendor/futures/tests_disabled/all.rs --- s390-tools-2.31.0/rust-vendor/futures/tests_disabled/all.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures/tests_disabled/all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,400 +0,0 @@ -use futures::channel::oneshot::{self, Canceled}; -use futures::executor::block_on; -use futures::future; -use std::sync::mpsc::{channel, TryRecvError}; - -// mod support; -// use support::*; - -fn unselect(r: Result, Either<(E, B), (E, A)>>) -> Result { - match r { - Ok(Either::Left((t, _))) | Ok(Either::Right((t, _))) => Ok(t), - Err(Either::Left((e, _))) | Err(Either::Right((e, _))) => Err(e), - } -} - -#[test] -fn result_smoke() { - fn is_future_v(_: C) - where - A: Send + 'static, - B: Send + 'static, - C: Future, - { - } - - is_future_v::(f_ok(1).map(|a| a + 1)); - is_future_v::(f_ok(1).map_err(|a| a + 1)); - is_future_v::(f_ok(1).and_then(Ok)); - is_future_v::(f_ok(1).or_else(Err)); - is_future_v::<(i32, i32), u32, _>(f_ok(1).join(Err(3))); - is_future_v::(f_ok(1).map(f_ok).flatten()); - - assert_done(|| f_ok(1), r_ok(1)); - assert_done(|| f_err(1), r_err(1)); - assert_done(|| result(Ok(1)), r_ok(1)); - assert_done(|| result(Err(1)), r_err(1)); - assert_done(|| ok(1), r_ok(1)); - assert_done(|| err(1), r_err(1)); - assert_done(|| f_ok(1).map(|a| a + 2), r_ok(3)); - assert_done(|| f_err(1).map(|a| a + 2), r_err(1)); - assert_done(|| f_ok(1).map_err(|a| a + 2), r_ok(1)); - assert_done(|| f_err(1).map_err(|a| a + 2), r_err(3)); - assert_done(|| f_ok(1).and_then(|a| Ok(a + 2)), r_ok(3)); - assert_done(|| f_err(1).and_then(|a| Ok(a + 2)), r_err(1)); - assert_done(|| f_ok(1).and_then(|a| Err(a as u32 + 3)), r_err(4)); - assert_done(|| f_err(1).and_then(|a| Err(a as u32 + 4)), r_err(1)); - assert_done(|| f_ok(1).or_else(|a| Ok(a as i32 + 2)), r_ok(1)); - assert_done(|| f_err(1).or_else(|a| Ok(a as i32 + 2)), r_ok(3)); - assert_done(|| f_ok(1).or_else(|a| Err(a + 3)), r_ok(1)); - assert_done(|| f_err(1).or_else(|a| Err(a + 4)), r_err(5)); - assert_done(|| f_ok(1).select(f_err(2)).then(unselect), r_ok(1)); - assert_done(|| f_ok(1).select(Ok(2)).then(unselect), r_ok(1)); - assert_done(|| f_err(1).select(f_ok(1)).then(unselect), r_err(1)); - assert_done(|| f_ok(1).select(empty()).then(unselect), Ok(1)); - assert_done(|| empty().select(f_ok(1)).then(unselect), Ok(1)); - assert_done(|| f_ok(1).join(f_err(1)), Err(1)); - assert_done(|| f_ok(1).join(Ok(2)), Ok((1, 2))); - assert_done(|| f_err(1).join(f_ok(1)), Err(1)); - assert_done(|| f_ok(1).then(|_| Ok(2)), r_ok(2)); - assert_done(|| f_ok(1).then(|_| Err(2)), r_err(2)); - assert_done(|| f_err(1).then(|_| Ok(2)), r_ok(2)); - assert_done(|| f_err(1).then(|_| Err(2)), r_err(2)); -} - -#[test] -fn test_empty() { - fn empty() -> Empty { - future::empty() - } - - assert_empty(|| empty()); - assert_empty(|| empty().select(empty())); - assert_empty(|| empty().join(empty())); - assert_empty(|| empty().join(f_ok(1))); - assert_empty(|| f_ok(1).join(empty())); - assert_empty(|| empty().or_else(move |_| empty())); - assert_empty(|| empty().and_then(move |_| empty())); - assert_empty(|| f_err(1).or_else(move |_| empty())); - assert_empty(|| f_ok(1).and_then(move |_| empty())); - assert_empty(|| empty().map(|a| a + 1)); - assert_empty(|| empty().map_err(|a| a + 1)); - assert_empty(|| empty().then(|a| a)); -} - -#[test] -fn test_ok() { - assert_done(|| ok(1), r_ok(1)); - assert_done(|| err(1), r_err(1)); -} - -#[test] -fn flatten() { - fn ok(a: T) -> FutureResult { - future::ok(a) - } - fn err(b: E) -> FutureResult { - future::err(b) - } - - assert_done(|| ok(ok(1)).flatten(), r_ok(1)); - assert_done(|| ok(err(1)).flatten(), r_err(1)); - assert_done(|| err(1u32).map(ok).flatten(), r_err(1)); - assert_done(|| future::ok(future::ok(1)).flatten(), r_ok(1)); - assert_empty(|| ok(empty::()).flatten()); - assert_empty(|| empty::().map(ok).flatten()); -} - -#[test] -fn smoke_oneshot() { - assert_done( - || { - let (c, p) = oneshot::channel(); - c.send(1).unwrap(); - p - }, - Ok(1), - ); - assert_done( - || { - let (c, p) = oneshot::channel::(); - drop(c); - p - }, - Err(Canceled), - ); - let mut completes = Vec::new(); - assert_empty(|| { - let (a, b) = oneshot::channel::(); - completes.push(a); - b - }); - - let (c, mut p) = oneshot::channel::(); - drop(c); - let res = panic_waker_lw(|lw| p.poll(lw)); - assert!(res.is_err()); - let (c, p) = oneshot::channel::(); - drop(c); - let (tx, rx) = channel(); - p.then(move |_| tx.send(())).forget(); - rx.recv().unwrap(); -} - -#[test] -fn select_cancels() { - let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); - let ((btx, brx), (dtx, drx)) = (channel(), channel()); - let b = b.map(move |b| { - btx.send(b).unwrap(); - b - }); - let d = d.map(move |d| { - dtx.send(d).unwrap(); - d - }); - - let mut f = b.select(d).then(unselect); - // assert!(f.poll(&mut Task::new()).is_pending()); - assert!(brx.try_recv().is_err()); - assert!(drx.try_recv().is_err()); - a.send(1).unwrap(); - noop_waker_lw(|lw| { - let res = f.poll(lw); - assert!(res.ok().unwrap().is_ready()); - assert_eq!(brx.recv().unwrap(), 1); - drop(c); - assert!(drx.recv().is_err()); - - let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); - let ((btx, _brx), (dtx, drx)) = (channel(), channel()); - let b = b.map(move |b| { - btx.send(b).unwrap(); - b - }); - let d = d.map(move |d| { - dtx.send(d).unwrap(); - d - }); - - let mut f = b.select(d).then(unselect); - assert!(f.poll(lw).ok().unwrap().is_pending()); - assert!(f.poll(lw).ok().unwrap().is_pending()); - a.send(1).unwrap(); - assert!(f.poll(lw).ok().unwrap().is_ready()); - drop((c, f)); - assert!(drx.recv().is_err()); - }) -} - -#[test] -fn join_cancels() { - let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); - let ((btx, _brx), (dtx, drx)) = (channel(), channel()); - let b = b.map(move |b| { - btx.send(b).unwrap(); - b - }); - let d = d.map(move |d| { - dtx.send(d).unwrap(); - d - }); - - let mut f = b.join(d); - drop(a); - let res = panic_waker_lw(|lw| f.poll(lw)); - assert!(res.is_err()); - drop(c); - assert!(drx.recv().is_err()); - - let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); - let ((btx, _brx), (dtx, drx)) = (channel(), channel()); - let b = b.map(move |b| { - btx.send(b).unwrap(); - b - }); - let d = d.map(move |d| { - dtx.send(d).unwrap(); - d - }); - - let (tx, rx) = channel(); - let f = b.join(d); - f.then(move |_| { - tx.send(()).unwrap(); - let res: Result<(), ()> = Ok(()); - res - }) - .forget(); - assert!(rx.try_recv().is_err()); - drop(a); - rx.recv().unwrap(); - drop(c); - assert!(drx.recv().is_err()); -} - -#[test] -fn join_incomplete() { - let (a, b) = oneshot::channel::(); - let (tx, rx) = channel(); - noop_waker_lw(|lw| { - let mut f = ok(1).join(b).map(move |r| tx.send(r).unwrap()); - assert!(f.poll(lw).ok().unwrap().is_pending()); - assert!(rx.try_recv().is_err()); - a.send(2).unwrap(); - assert!(f.poll(lw).ok().unwrap().is_ready()); - assert_eq!(rx.recv().unwrap(), (1, 2)); - - let (a, b) = oneshot::channel::(); - let (tx, rx) = channel(); - let mut f = b.join(Ok(2)).map(move |r| tx.send(r).unwrap()); - assert!(f.poll(lw).ok().unwrap().is_pending()); - assert!(rx.try_recv().is_err()); - a.send(1).unwrap(); - assert!(f.poll(lw).ok().unwrap().is_ready()); - assert_eq!(rx.recv().unwrap(), (1, 2)); - - let (a, b) = oneshot::channel::(); - let (tx, rx) = channel(); - let mut f = ok(1).join(b).map_err(move |_r| tx.send(2).unwrap()); - assert!(f.poll(lw).ok().unwrap().is_pending()); - assert!(rx.try_recv().is_err()); - drop(a); - assert!(f.poll(lw).is_err()); - assert_eq!(rx.recv().unwrap(), 2); - - let (a, b) = oneshot::channel::(); - let (tx, rx) = channel(); - let mut f = b.join(Ok(2)).map_err(move |_r| tx.send(1).unwrap()); - assert!(f.poll(lw).ok().unwrap().is_pending()); - assert!(rx.try_recv().is_err()); - drop(a); - assert!(f.poll(lw).is_err()); - assert_eq!(rx.recv().unwrap(), 1); - }) -} - -#[test] -fn select2() { - assert_done(|| f_ok(2).select(empty()).then(unselect), Ok(2)); - assert_done(|| empty().select(f_ok(2)).then(unselect), Ok(2)); - assert_done(|| f_err(2).select(empty()).then(unselect), Err(2)); - assert_done(|| empty().select(f_err(2)).then(unselect), Err(2)); - - assert_done( - || { - f_ok(1).select(f_ok(2)).map_err(|_| 0).and_then(|either_tup| { - let (a, b) = either_tup.into_inner(); - b.map(move |b| a + b) - }) - }, - Ok(3), - ); - - // Finish one half of a select and then fail the second, ensuring that we - // get the notification of the second one. - { - let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); - let f = b.select(d); - let (tx, rx) = channel(); - f.map(move |r| tx.send(r).unwrap()).forget(); - a.send(1).unwrap(); - let (val, next) = rx.recv().unwrap().into_inner(); - assert_eq!(val, 1); - let (tx, rx) = channel(); - next.map_err(move |_r| tx.send(2).unwrap()).forget(); - assert_eq!(rx.try_recv().err().unwrap(), TryRecvError::Empty); - drop(c); - assert_eq!(rx.recv().unwrap(), 2); - } - - // Fail the second half and ensure that we see the first one finish - { - let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); - let f = b.select(d); - let (tx, rx) = channel(); - f.map_err(move |r| tx.send((1, r.into_inner().1)).unwrap()).forget(); - drop(c); - let (val, next) = rx.recv().unwrap(); - assert_eq!(val, 1); - let (tx, rx) = channel(); - next.map(move |r| tx.send(r).unwrap()).forget(); - assert_eq!(rx.try_recv().err().unwrap(), TryRecvError::Empty); - a.send(2).unwrap(); - assert_eq!(rx.recv().unwrap(), 2); - } - - // Cancelling the first half should cancel the second - { - let ((_a, b), (_c, d)) = (oneshot::channel::(), oneshot::channel::()); - let ((btx, brx), (dtx, drx)) = (channel(), channel()); - let b = b.map(move |v| { - btx.send(v).unwrap(); - v - }); - let d = d.map(move |v| { - dtx.send(v).unwrap(); - v - }); - let f = b.select(d); - drop(f); - assert!(drx.recv().is_err()); - assert!(brx.recv().is_err()); - } - - // Cancel after a schedule - { - let ((_a, b), (_c, d)) = (oneshot::channel::(), oneshot::channel::()); - let ((btx, brx), (dtx, drx)) = (channel(), channel()); - let b = b.map(move |v| { - btx.send(v).unwrap(); - v - }); - let d = d.map(move |v| { - dtx.send(v).unwrap(); - v - }); - let mut f = b.select(d); - let _res = noop_waker_lw(|lw| f.poll(lw)); - drop(f); - assert!(drx.recv().is_err()); - assert!(brx.recv().is_err()); - } - - // Cancel propagates - { - let ((a, b), (_c, d)) = (oneshot::channel::(), oneshot::channel::()); - let ((btx, brx), (dtx, drx)) = (channel(), channel()); - let b = b.map(move |v| { - btx.send(v).unwrap(); - v - }); - let d = d.map(move |v| { - dtx.send(v).unwrap(); - v - }); - let (tx, rx) = channel(); - b.select(d).map(move |_| tx.send(()).unwrap()).forget(); - drop(a); - assert!(drx.recv().is_err()); - assert!(brx.recv().is_err()); - assert!(rx.recv().is_err()); - } - - // Cancel on early drop - { - let (tx, rx) = channel(); - let f = f_ok(1).select(empty::<_, ()>().map(move |()| { - tx.send(()).unwrap(); - 1 - })); - drop(f); - assert!(rx.recv().is_err()); - } -} - -#[test] -fn option() { - assert_eq!(Ok(Some(())), block_on(Some(ok::<(), ()>(())).into_future())); - assert_eq!(Ok::<_, ()>(None::<()>), block_on(None::>.into_future())); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures/tests_disabled/stream.rs s390-tools-2.33.1/rust-vendor/futures/tests_disabled/stream.rs --- s390-tools-2.31.0/rust-vendor/futures/tests_disabled/stream.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures/tests_disabled/stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,368 +0,0 @@ -use futures::channel::mpsc; -use futures::channel::oneshot; -use futures::executor::{block_on, block_on_stream}; -use futures::future::{err, ok}; -use futures::stream::{empty, iter_ok, poll_fn, Peekable}; - -// mod support; -// use support::*; - -pub struct Iter { - iter: I, -} - -pub fn iter(i: J) -> Iter -where - J: IntoIterator>, -{ - Iter { iter: i.into_iter() } -} - -impl Stream for Iter -where - I: Iterator>, -{ - type Item = T; - type Error = E; - - fn poll_next(&mut self, _: &mut Context<'_>) -> Poll, E> { - match self.iter.next() { - Some(Ok(e)) => Ok(Poll::Ready(Some(e))), - Some(Err(e)) => Err(e), - None => Ok(Poll::Ready(None)), - } - } -} - -fn list() -> Box + Send> { - let (tx, rx) = mpsc::channel(1); - tx.send(Ok(1)).and_then(|tx| tx.send(Ok(2))).and_then(|tx| tx.send(Ok(3))).forget(); - Box::new(rx.then(|r| r.unwrap())) -} - -fn err_list() -> Box + Send> { - let (tx, rx) = mpsc::channel(1); - tx.send(Ok(1)).and_then(|tx| tx.send(Ok(2))).and_then(|tx| tx.send(Err(3))).forget(); - Box::new(rx.then(|r| r.unwrap())) -} - -#[test] -fn map() { - assert_done(|| list().map(|a| a + 1).collect(), Ok(vec![2, 3, 4])); -} - -#[test] -fn map_err() { - assert_done(|| err_list().map_err(|a| a + 1).collect::>(), Err(4)); -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -struct FromErrTest(u32); - -impl From for FromErrTest { - fn from(i: u32) -> Self { - Self(i) - } -} - -#[test] -fn from_err() { - assert_done(|| err_list().err_into().collect::>(), Err(FromErrTest(3))); -} - -#[test] -fn fold() { - assert_done(|| list().fold(0, |a, b| ok::(a + b)), Ok(6)); - assert_done(|| err_list().fold(0, |a, b| ok::(a + b)), Err(3)); -} - -#[test] -fn filter() { - assert_done(|| list().filter(|a| ok(*a % 2 == 0)).collect(), Ok(vec![2])); -} - -#[test] -fn filter_map() { - assert_done( - || list().filter_map(|x| ok(if x % 2 == 0 { Some(x + 10) } else { None })).collect(), - Ok(vec![12]), - ); -} - -#[test] -fn and_then() { - assert_done(|| list().and_then(|a| Ok(a + 1)).collect(), Ok(vec![2, 3, 4])); - assert_done(|| list().and_then(|a| err::(a as u32)).collect::>(), Err(1)); -} - -#[test] -fn then() { - assert_done(|| list().then(|a| a.map(|e| e + 1)).collect(), Ok(vec![2, 3, 4])); -} - -#[test] -fn or_else() { - assert_done(|| err_list().or_else(|a| ok::(a as i32)).collect(), Ok(vec![1, 2, 3])); -} - -#[test] -fn flatten() { - assert_done(|| list().map(|_| list()).flatten().collect(), Ok(vec![1, 2, 3, 1, 2, 3, 1, 2, 3])); -} - -#[test] -fn skip() { - assert_done(|| list().skip(2).collect(), Ok(vec![3])); -} - -#[test] -fn skip_passes_errors_through() { - let mut s = block_on_stream(iter(vec![Err(1), Err(2), Ok(3), Ok(4), Ok(5)]).skip(1)); - assert_eq!(s.next(), Some(Err(1))); - assert_eq!(s.next(), Some(Err(2))); - assert_eq!(s.next(), Some(Ok(4))); - assert_eq!(s.next(), Some(Ok(5))); - assert_eq!(s.next(), None); -} - -#[test] -fn skip_while() { - assert_done(|| list().skip_while(|e| Ok(*e % 2 == 1)).collect(), Ok(vec![2, 3])); -} -#[test] -fn take() { - assert_done(|| list().take(2).collect(), Ok(vec![1, 2])); -} - -#[test] -fn take_while() { - assert_done(|| list().take_while(|e| Ok(*e < 3)).collect(), Ok(vec![1, 2])); -} - -#[test] -fn take_passes_errors_through() { - let mut s = block_on_stream(iter(vec![Err(1), Err(2), Ok(3), Ok(4), Err(4)]).take(1)); - assert_eq!(s.next(), Some(Err(1))); - assert_eq!(s.next(), Some(Err(2))); - assert_eq!(s.next(), Some(Ok(3))); - assert_eq!(s.next(), None); - - let mut s = block_on_stream(iter(vec![Ok(1), Err(2)]).take(1)); - assert_eq!(s.next(), Some(Ok(1))); - assert_eq!(s.next(), None); -} - -#[test] -fn peekable() { - assert_done(|| list().peekable().collect(), Ok(vec![1, 2, 3])); -} - -#[test] -fn fuse() { - let mut stream = block_on_stream(list().fuse()); - assert_eq!(stream.next(), Some(Ok(1))); - assert_eq!(stream.next(), Some(Ok(2))); - assert_eq!(stream.next(), Some(Ok(3))); - assert_eq!(stream.next(), None); - assert_eq!(stream.next(), None); - assert_eq!(stream.next(), None); -} - -#[test] -fn buffered() { - let (tx, rx) = mpsc::channel(1); - let (a, b) = oneshot::channel::(); - let (c, d) = oneshot::channel::(); - - tx.send(Box::new(b.recover(|_| panic!())) as Box + Send>) - .and_then(|tx| tx.send(Box::new(d.map_err(|_| panic!())))) - .forget(); - - let mut rx = rx.buffered(2); - sassert_empty(&mut rx); - c.send(3).unwrap(); - sassert_empty(&mut rx); - a.send(5).unwrap(); - let mut rx = block_on_stream(rx); - assert_eq!(rx.next(), Some(Ok(5))); - assert_eq!(rx.next(), Some(Ok(3))); - assert_eq!(rx.next(), None); - - let (tx, rx) = mpsc::channel(1); - let (a, b) = oneshot::channel::(); - let (c, d) = oneshot::channel::(); - - tx.send(Box::new(b.recover(|_| panic!())) as Box + Send>) - .and_then(|tx| tx.send(Box::new(d.map_err(|_| panic!())))) - .forget(); - - let mut rx = rx.buffered(1); - sassert_empty(&mut rx); - c.send(3).unwrap(); - sassert_empty(&mut rx); - a.send(5).unwrap(); - let mut rx = block_on_stream(rx); - assert_eq!(rx.next(), Some(Ok(5))); - assert_eq!(rx.next(), Some(Ok(3))); - assert_eq!(rx.next(), None); -} - -#[test] -fn unordered() { - let (tx, rx) = mpsc::channel(1); - let (a, b) = oneshot::channel::(); - let (c, d) = oneshot::channel::(); - - tx.send(Box::new(b.recover(|_| panic!())) as Box + Send>) - .and_then(|tx| tx.send(Box::new(d.recover(|_| panic!())))) - .forget(); - - let mut rx = rx.buffer_unordered(2); - sassert_empty(&mut rx); - let mut rx = block_on_stream(rx); - c.send(3).unwrap(); - assert_eq!(rx.next(), Some(Ok(3))); - a.send(5).unwrap(); - assert_eq!(rx.next(), Some(Ok(5))); - assert_eq!(rx.next(), None); - - let (tx, rx) = mpsc::channel(1); - let (a, b) = oneshot::channel::(); - let (c, d) = oneshot::channel::(); - - tx.send(Box::new(b.recover(|_| panic!())) as Box + Send>) - .and_then(|tx| tx.send(Box::new(d.recover(|_| panic!())))) - .forget(); - - // We don't even get to see `c` until `a` completes. - let mut rx = rx.buffer_unordered(1); - sassert_empty(&mut rx); - c.send(3).unwrap(); - sassert_empty(&mut rx); - a.send(5).unwrap(); - let mut rx = block_on_stream(rx); - assert_eq!(rx.next(), Some(Ok(5))); - assert_eq!(rx.next(), Some(Ok(3))); - assert_eq!(rx.next(), None); -} - -#[test] -fn zip() { - assert_done(|| list().zip(list()).collect(), Ok(vec![(1, 1), (2, 2), (3, 3)])); - assert_done(|| list().zip(list().take(2)).collect(), Ok(vec![(1, 1), (2, 2)])); - assert_done(|| list().take(2).zip(list()).collect(), Ok(vec![(1, 1), (2, 2)])); - assert_done(|| err_list().zip(list()).collect::>(), Err(3)); - assert_done(|| list().zip(list().map(|x| x + 1)).collect(), Ok(vec![(1, 2), (2, 3), (3, 4)])); -} - -#[test] -fn peek() { - struct Peek { - inner: Peekable + Send>>, - } - - impl Future for Peek { - type Item = (); - type Error = u32; - - fn poll(&mut self, cx: &mut Context<'_>) -> Poll<(), u32> { - { - let res = ready!(self.inner.peek(cx))?; - assert_eq!(res, Some(&1)); - } - assert_eq!(self.inner.peek(cx).unwrap(), Some(&1).into()); - assert_eq!(self.inner.poll_next(cx).unwrap(), Some(1).into()); - Ok(Poll::Ready(())) - } - } - - block_on(Peek { inner: list().peekable() }).unwrap() -} - -#[test] -fn wait() { - assert_eq!(block_on_stream(list()).collect::, _>>(), Ok(vec![1, 2, 3])); -} - -#[test] -fn chunks() { - assert_done(|| list().chunks(3).collect(), Ok(vec![vec![1, 2, 3]])); - assert_done(|| list().chunks(1).collect(), Ok(vec![vec![1], vec![2], vec![3]])); - assert_done(|| list().chunks(2).collect(), Ok(vec![vec![1, 2], vec![3]])); - let mut list = block_on_stream(err_list().chunks(3)); - let i = list.next().unwrap().unwrap(); - assert_eq!(i, vec![1, 2]); - let i = list.next().unwrap().unwrap_err(); - assert_eq!(i, 3); -} - -#[test] -#[should_panic] -fn chunks_panic_on_cap_zero() { - let _ = list().chunks(0); -} - -#[test] -fn forward() { - let v = Vec::new(); - let v = block_on(iter_ok::<_, Never>(vec![0, 1]).forward(v)).unwrap().1; - assert_eq!(v, vec![0, 1]); - - let v = block_on(iter_ok::<_, Never>(vec![2, 3]).forward(v)).unwrap().1; - assert_eq!(v, vec![0, 1, 2, 3]); - - assert_done( - move || iter_ok::<_, Never>(vec![4, 5]).forward(v).map(|(_, s)| s), - Ok(vec![0, 1, 2, 3, 4, 5]), - ); -} - -#[test] -fn concat() { - let a = iter_ok::<_, ()>(vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]); - assert_done(move || a.concat(), Ok(vec![1, 2, 3, 4, 5, 6, 7, 8, 9])); - - let b = iter(vec![Ok::<_, ()>(vec![1, 2, 3]), Err(()), Ok(vec![7, 8, 9])]); - assert_done(move || b.concat(), Err(())); -} - -#[test] -fn concat2() { - let a = iter_ok::<_, ()>(vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]); - assert_done(move || a.concat(), Ok(vec![1, 2, 3, 4, 5, 6, 7, 8, 9])); - - let b = iter(vec![Ok::<_, ()>(vec![1, 2, 3]), Err(()), Ok(vec![7, 8, 9])]); - assert_done(move || b.concat(), Err(())); - - let c = empty::, ()>(); - assert_done(move || c.concat(), Ok(vec![])) -} - -#[test] -fn stream_poll_fn() { - let mut counter = 5usize; - - let read_stream = poll_fn(move |_| -> Poll, std::io::Error> { - if counter == 0 { - return Ok(Poll::Ready(None)); - } - counter -= 1; - Ok(Poll::Ready(Some(counter))) - }); - - assert_eq!(block_on_stream(read_stream).count(), 5); -} - -#[test] -fn inspect() { - let mut seen = vec![]; - assert_done(|| list().inspect(|&a| seen.push(a)).collect(), Ok(vec![1, 2, 3])); - assert_eq!(seen, [1, 2, 3]); -} - -#[test] -fn inspect_err() { - let mut seen = vec![]; - assert_done(|| err_list().inspect_err(|&a| seen.push(a)).collect::>(), Err(3)); - assert_eq!(seen, [3]); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/benches/sync_mpsc.rs s390-tools-2.33.1/rust-vendor/futures-channel/benches/sync_mpsc.rs --- s390-tools-2.31.0/rust-vendor/futures-channel/benches/sync_mpsc.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/benches/sync_mpsc.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,135 +0,0 @@ -#![feature(test)] - -extern crate test; -use crate::test::Bencher; - -use { - futures::{ - channel::mpsc::{self, Sender, UnboundedSender}, - ready, - sink::Sink, - stream::{Stream, StreamExt}, - task::{Context, Poll}, - }, - futures_test::task::noop_context, - std::pin::Pin, -}; - -/// Single producer, single consumer -#[bench] -fn unbounded_1_tx(b: &mut Bencher) { - let mut cx = noop_context(); - b.iter(|| { - let (tx, mut rx) = mpsc::unbounded(); - - // 1000 iterations to avoid measuring overhead of initialization - // Result should be divided by 1000 - for i in 0..1000 { - // Poll, not ready, park - assert_eq!(Poll::Pending, rx.poll_next_unpin(&mut cx)); - - UnboundedSender::unbounded_send(&tx, i).unwrap(); - - // Now poll ready - assert_eq!(Poll::Ready(Some(i)), rx.poll_next_unpin(&mut cx)); - } - }) -} - -/// 100 producers, single consumer -#[bench] -fn unbounded_100_tx(b: &mut Bencher) { - let mut cx = noop_context(); - b.iter(|| { - let (tx, mut rx) = mpsc::unbounded(); - - let tx: Vec<_> = (0..100).map(|_| tx.clone()).collect(); - - // 1000 send/recv operations total, result should be divided by 1000 - for _ in 0..10 { - for (i, x) in tx.iter().enumerate() { - assert_eq!(Poll::Pending, rx.poll_next_unpin(&mut cx)); - - UnboundedSender::unbounded_send(x, i).unwrap(); - - assert_eq!(Poll::Ready(Some(i)), rx.poll_next_unpin(&mut cx)); - } - } - }) -} - -#[bench] -fn unbounded_uncontended(b: &mut Bencher) { - let mut cx = noop_context(); - b.iter(|| { - let (tx, mut rx) = mpsc::unbounded(); - - for i in 0..1000 { - UnboundedSender::unbounded_send(&tx, i).expect("send"); - // No need to create a task, because poll is not going to park. - assert_eq!(Poll::Ready(Some(i)), rx.poll_next_unpin(&mut cx)); - } - }) -} - -/// A Stream that continuously sends incrementing number of the queue -struct TestSender { - tx: Sender, - last: u32, // Last number sent -} - -// Could be a Future, it doesn't matter -impl Stream for TestSender { - type Item = u32; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = &mut *self; - let mut tx = Pin::new(&mut this.tx); - - ready!(tx.as_mut().poll_ready(cx)).unwrap(); - tx.as_mut().start_send(this.last + 1).unwrap(); - this.last += 1; - assert_eq!(Poll::Pending, tx.as_mut().poll_flush(cx)); - Poll::Ready(Some(this.last)) - } -} - -/// Single producers, single consumer -#[bench] -fn bounded_1_tx(b: &mut Bencher) { - let mut cx = noop_context(); - b.iter(|| { - let (tx, mut rx) = mpsc::channel(0); - - let mut tx = TestSender { tx, last: 0 }; - - for i in 0..1000 { - assert_eq!(Poll::Ready(Some(i + 1)), tx.poll_next_unpin(&mut cx)); - assert_eq!(Poll::Pending, tx.poll_next_unpin(&mut cx)); - assert_eq!(Poll::Ready(Some(i + 1)), rx.poll_next_unpin(&mut cx)); - } - }) -} - -/// 100 producers, single consumer -#[bench] -fn bounded_100_tx(b: &mut Bencher) { - let mut cx = noop_context(); - b.iter(|| { - // Each sender can send one item after specified capacity - let (tx, mut rx) = mpsc::channel(0); - - let mut tx: Vec<_> = (0..100).map(|_| TestSender { tx: tx.clone(), last: 0 }).collect(); - - for i in 0..10 { - for x in &mut tx { - // Send an item - assert_eq!(Poll::Ready(Some(i + 1)), x.poll_next_unpin(&mut cx)); - // Then block - assert_eq!(Poll::Pending, x.poll_next_unpin(&mut cx)); - // Recv the item - assert_eq!(Poll::Ready(Some(i + 1)), rx.poll_next_unpin(&mut cx)); - } - } - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/build.rs s390-tools-2.33.1/rust-vendor/futures-channel/build.rs --- s390-tools-2.31.0/rust-vendor/futures-channel/build.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -// The rustc-cfg listed below are considered public API, but it is *unstable* -// and outside of the normal semver guarantees: -// -// - `futures_no_atomic_cas` -// Assume the target does *not* support atomic CAS operations. -// This is usually detected automatically by the build script, but you may -// need to enable it manually when building for custom targets or using -// non-cargo build systems that don't run the build script. -// -// With the exceptions mentioned above, the rustc-cfg emitted by the build -// script are *not* public API. - -#![warn(rust_2018_idioms, single_use_lifetimes)] - -use std::env; - -include!("no_atomic_cas.rs"); - -fn main() { - let target = match env::var("TARGET") { - Ok(target) => target, - Err(e) => { - println!( - "cargo:warning={}: unable to get TARGET environment variable: {}", - env!("CARGO_PKG_NAME"), - e - ); - return; - } - }; - - // Note that this is `no_*`, not `has_*`. This allows treating - // `cfg(target_has_atomic = "ptr")` as true when the build script doesn't - // run. This is needed for compatibility with non-cargo build systems that - // don't run the build script. - if NO_ATOMIC_CAS.contains(&&*target) { - println!("cargo:rustc-cfg=futures_no_atomic_cas"); - } - - println!("cargo:rerun-if-changed=no_atomic_cas.rs"); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/futures-channel/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/futures-channel/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/Cargo.toml s390-tools-2.33.1/rust-vendor/futures-channel/Cargo.toml --- s390-tools-2.31.0/rust-vendor/futures-channel/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,52 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.56" -name = "futures-channel" -version = "0.3.29" -description = """ -Channels for asynchronous communication using futures-rs. -""" -homepage = "https://rust-lang.github.io/futures-rs" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/futures-rs" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs", -] - -[dependencies.futures-core] -version = "0.3.29" -default-features = false - -[dependencies.futures-sink] -version = "0.3.29" -optional = true -default-features = false - -[dev-dependencies] - -[features] -alloc = ["futures-core/alloc"] -cfg-target-has-atomic = [] -default = ["std"] -sink = ["futures-sink"] -std = [ - "alloc", - "futures-core/std", -] -unstable = [] diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/futures-channel/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/futures-channel/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/LICENSE-MIT s390-tools-2.33.1/rust-vendor/futures-channel/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/futures-channel/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/no_atomic_cas.rs s390-tools-2.33.1/rust-vendor/futures-channel/no_atomic_cas.rs --- s390-tools-2.31.0/rust-vendor/futures-channel/no_atomic_cas.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/no_atomic_cas.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,17 +0,0 @@ -// This file is @generated by no_atomic_cas.sh. -// It is not intended for manual editing. - -const NO_ATOMIC_CAS: &[&str] = &[ - "armv4t-none-eabi", - "armv5te-none-eabi", - "avr-unknown-gnu-atmega328", - "bpfeb-unknown-none", - "bpfel-unknown-none", - "msp430-none-elf", - "riscv32i-unknown-none-elf", - "riscv32im-unknown-none-elf", - "riscv32imc-unknown-none-elf", - "thumbv4t-none-eabi", - "thumbv5te-none-eabi", - "thumbv6m-none-eabi", -]; diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/README.md s390-tools-2.33.1/rust-vendor/futures-channel/README.md --- s390-tools-2.31.0/rust-vendor/futures-channel/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -# futures-channel - -Channels for asynchronous communication using futures-rs. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -futures-channel = "0.3" -``` - -The current `futures-channel` requires Rust 1.56 or later. - -## License - -Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or -[MIT license](LICENSE-MIT) at your option. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/src/lib.rs s390-tools-2.33.1/rust-vendor/futures-channel/src/lib.rs --- s390-tools-2.31.0/rust-vendor/futures-channel/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,42 +0,0 @@ -//! Asynchronous channels. -//! -//! Like threads, concurrent tasks sometimes need to communicate with each -//! other. This module contains two basic abstractions for doing so: -//! -//! - [oneshot], a way of sending a single value from one task to another. -//! - [mpsc], a multi-producer, single-consumer channel for sending values -//! between tasks, analogous to the similarly-named structure in the standard -//! library. -//! -//! All items are only available when the `std` or `alloc` feature of this -//! library is activated, and it is activated by default. - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - single_use_lifetimes, - unreachable_pub -)] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -extern crate alloc; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod lock; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "std")] -pub mod mpsc; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub mod oneshot; diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/src/lock.rs s390-tools-2.33.1/rust-vendor/futures-channel/src/lock.rs --- s390-tools-2.31.0/rust-vendor/futures-channel/src/lock.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/src/lock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,102 +0,0 @@ -//! A "mutex" which only supports `try_lock` -//! -//! As a futures library the eventual call to an event loop should be the only -//! thing that ever blocks, so this is assisted with a fast user-space -//! implementation of a lock that can only have a `try_lock` operation. - -use core::cell::UnsafeCell; -use core::ops::{Deref, DerefMut}; -use core::sync::atomic::AtomicBool; -use core::sync::atomic::Ordering::SeqCst; - -/// A "mutex" around a value, similar to `std::sync::Mutex`. -/// -/// This lock only supports the `try_lock` operation, however, and does not -/// implement poisoning. -#[derive(Debug)] -pub(crate) struct Lock { - locked: AtomicBool, - data: UnsafeCell, -} - -/// Sentinel representing an acquired lock through which the data can be -/// accessed. -pub(crate) struct TryLock<'a, T> { - __ptr: &'a Lock, -} - -// The `Lock` structure is basically just a `Mutex`, and these two impls are -// intended to mirror the standard library's corresponding impls for `Mutex`. -// -// If a `T` is sendable across threads, so is the lock, and `T` must be sendable -// across threads to be `Sync` because it allows mutable access from multiple -// threads. -unsafe impl Send for Lock {} -unsafe impl Sync for Lock {} - -impl Lock { - /// Creates a new lock around the given value. - pub(crate) fn new(t: T) -> Self { - Self { locked: AtomicBool::new(false), data: UnsafeCell::new(t) } - } - - /// Attempts to acquire this lock, returning whether the lock was acquired or - /// not. - /// - /// If `Some` is returned then the data this lock protects can be accessed - /// through the sentinel. This sentinel allows both mutable and immutable - /// access. - /// - /// If `None` is returned then the lock is already locked, either elsewhere - /// on this thread or on another thread. - pub(crate) fn try_lock(&self) -> Option> { - if !self.locked.swap(true, SeqCst) { - Some(TryLock { __ptr: self }) - } else { - None - } - } -} - -impl Deref for TryLock<'_, T> { - type Target = T; - fn deref(&self) -> &T { - // The existence of `TryLock` represents that we own the lock, so we - // can safely access the data here. - unsafe { &*self.__ptr.data.get() } - } -} - -impl DerefMut for TryLock<'_, T> { - fn deref_mut(&mut self) -> &mut T { - // The existence of `TryLock` represents that we own the lock, so we - // can safely access the data here. - // - // Additionally, we're the *only* `TryLock` in existence so mutable - // access should be ok. - unsafe { &mut *self.__ptr.data.get() } - } -} - -impl Drop for TryLock<'_, T> { - fn drop(&mut self) { - self.__ptr.locked.store(false, SeqCst); - } -} - -#[cfg(test)] -mod tests { - use super::Lock; - - #[test] - fn smoke() { - let a = Lock::new(1); - let mut a1 = a.try_lock().unwrap(); - assert!(a.try_lock().is_none()); - assert_eq!(*a1, 1); - *a1 = 2; - drop(a1); - assert_eq!(*a.try_lock().unwrap(), 2); - assert_eq!(*a.try_lock().unwrap(), 2); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/src/mpsc/mod.rs s390-tools-2.33.1/rust-vendor/futures-channel/src/mpsc/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-channel/src/mpsc/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/src/mpsc/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1372 +0,0 @@ -//! A multi-producer, single-consumer queue for sending values across -//! asynchronous tasks. -//! -//! Similarly to the `std`, channel creation provides [`Receiver`] and -//! [`Sender`] handles. [`Receiver`] implements [`Stream`] and allows a task to -//! read values out of the channel. If there is no message to read from the -//! channel, the current task will be notified when a new value is sent. -//! [`Sender`] implements the `Sink` trait and allows a task to send messages into -//! the channel. If the channel is at capacity, the send will be rejected and -//! the task will be notified when additional capacity is available. In other -//! words, the channel provides backpressure. -//! -//! Unbounded channels are also available using the `unbounded` constructor. -//! -//! # Disconnection -//! -//! When all [`Sender`] handles have been dropped, it is no longer -//! possible to send values into the channel. This is considered the termination -//! event of the stream. As such, [`Receiver::poll_next`] -//! will return `Ok(Ready(None))`. -//! -//! If the [`Receiver`] handle is dropped, then messages can no longer -//! be read out of the channel. In this case, all further attempts to send will -//! result in an error. -//! -//! # Clean Shutdown -//! -//! If the [`Receiver`] is simply dropped, then it is possible for -//! there to be messages still in the channel that will not be processed. As -//! such, it is usually desirable to perform a "clean" shutdown. To do this, the -//! receiver will first call `close`, which will prevent any further messages to -//! be sent into the channel. Then, the receiver consumes the channel to -//! completion, at which point the receiver can be dropped. -//! -//! [`Sender`]: struct.Sender.html -//! [`Receiver`]: struct.Receiver.html -//! [`Stream`]: ../../futures_core/stream/trait.Stream.html -//! [`Receiver::poll_next`]: -//! ../../futures_core/stream/trait.Stream.html#tymethod.poll_next - -// At the core, the channel uses an atomic FIFO queue for message passing. This -// queue is used as the primary coordination primitive. In order to enforce -// capacity limits and handle back pressure, a secondary FIFO queue is used to -// send parked task handles. -// -// The general idea is that the channel is created with a `buffer` size of `n`. -// The channel capacity is `n + num-senders`. Each sender gets one "guaranteed" -// slot to hold a message. This allows `Sender` to know for a fact that a send -// will succeed *before* starting to do the actual work of sending the value. -// Since most of this work is lock-free, once the work starts, it is impossible -// to safely revert. -// -// If the sender is unable to process a send operation, then the current -// task is parked and the handle is sent on the parked task queue. -// -// Note that the implementation guarantees that the channel capacity will never -// exceed the configured limit, however there is no *strict* guarantee that the -// receiver will wake up a parked task *immediately* when a slot becomes -// available. However, it will almost always unpark a task when a slot becomes -// available and it is *guaranteed* that a sender will be unparked when the -// message that caused the sender to become parked is read out of the channel. -// -// The steps for sending a message are roughly: -// -// 1) Increment the channel message count -// 2) If the channel is at capacity, push the task handle onto the wait queue -// 3) Push the message onto the message queue. -// -// The steps for receiving a message are roughly: -// -// 1) Pop a message from the message queue -// 2) Pop a task handle from the wait queue -// 3) Decrement the channel message count. -// -// It's important for the order of operations on lock-free structures to happen -// in reverse order between the sender and receiver. This makes the message -// queue the primary coordination structure and establishes the necessary -// happens-before semantics required for the acquire / release semantics used -// by the queue structure. - -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::__internal::AtomicWaker; -use futures_core::task::{Context, Poll, Waker}; -use std::fmt; -use std::pin::Pin; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::SeqCst; -use std::sync::{Arc, Mutex}; -use std::thread; - -use crate::mpsc::queue::Queue; - -mod queue; -#[cfg(feature = "sink")] -mod sink_impl; - -struct UnboundedSenderInner { - // Channel state shared between the sender and receiver. - inner: Arc>, -} - -struct BoundedSenderInner { - // Channel state shared between the sender and receiver. - inner: Arc>, - - // Handle to the task that is blocked on this sender. This handle is sent - // to the receiver half in order to be notified when the sender becomes - // unblocked. - sender_task: Arc>, - - // `true` if the sender might be blocked. This is an optimization to avoid - // having to lock the mutex most of the time. - maybe_parked: bool, -} - -// We never project Pin<&mut SenderInner> to `Pin<&mut T>` -impl Unpin for UnboundedSenderInner {} -impl Unpin for BoundedSenderInner {} - -/// The transmission end of a bounded mpsc channel. -/// -/// This value is created by the [`channel`] function. -pub struct Sender(Option>); - -/// The transmission end of an unbounded mpsc channel. -/// -/// This value is created by the [`unbounded`] function. -pub struct UnboundedSender(Option>); - -trait AssertKinds: Send + Sync + Clone {} -impl AssertKinds for UnboundedSender {} - -/// The receiving end of a bounded mpsc channel. -/// -/// This value is created by the [`channel`] function. -pub struct Receiver { - inner: Option>>, -} - -/// The receiving end of an unbounded mpsc channel. -/// -/// This value is created by the [`unbounded`] function. -pub struct UnboundedReceiver { - inner: Option>>, -} - -// `Pin<&mut UnboundedReceiver>` is never projected to `Pin<&mut T>` -impl Unpin for UnboundedReceiver {} - -/// The error type for [`Sender`s](Sender) used as `Sink`s. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct SendError { - kind: SendErrorKind, -} - -/// The error type returned from [`try_send`](Sender::try_send). -#[derive(Clone, PartialEq, Eq)] -pub struct TrySendError { - err: SendError, - val: T, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -enum SendErrorKind { - Full, - Disconnected, -} - -/// The error type returned from [`try_next`](Receiver::try_next). -pub struct TryRecvError { - _priv: (), -} - -impl fmt::Display for SendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.is_full() { - write!(f, "send failed because channel is full") - } else { - write!(f, "send failed because receiver is gone") - } - } -} - -impl std::error::Error for SendError {} - -impl SendError { - /// Returns `true` if this error is a result of the channel being full. - pub fn is_full(&self) -> bool { - match self.kind { - SendErrorKind::Full => true, - _ => false, - } - } - - /// Returns `true` if this error is a result of the receiver being dropped. - pub fn is_disconnected(&self) -> bool { - match self.kind { - SendErrorKind::Disconnected => true, - _ => false, - } - } -} - -impl fmt::Debug for TrySendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TrySendError").field("kind", &self.err.kind).finish() - } -} - -impl fmt::Display for TrySendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.is_full() { - write!(f, "send failed because channel is full") - } else { - write!(f, "send failed because receiver is gone") - } - } -} - -impl std::error::Error for TrySendError {} - -impl TrySendError { - /// Returns `true` if this error is a result of the channel being full. - pub fn is_full(&self) -> bool { - self.err.is_full() - } - - /// Returns `true` if this error is a result of the receiver being dropped. - pub fn is_disconnected(&self) -> bool { - self.err.is_disconnected() - } - - /// Returns the message that was attempted to be sent but failed. - pub fn into_inner(self) -> T { - self.val - } - - /// Drops the message and converts into a `SendError`. - pub fn into_send_error(self) -> SendError { - self.err - } -} - -impl fmt::Debug for TryRecvError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("TryRecvError").finish() - } -} - -impl fmt::Display for TryRecvError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "receiver channel is empty") - } -} - -impl std::error::Error for TryRecvError {} - -struct UnboundedInner { - // Internal channel state. Consists of the number of messages stored in the - // channel as well as a flag signalling that the channel is closed. - state: AtomicUsize, - - // Atomic, FIFO queue used to send messages to the receiver - message_queue: Queue, - - // Number of senders in existence - num_senders: AtomicUsize, - - // Handle to the receiver's task. - recv_task: AtomicWaker, -} - -struct BoundedInner { - // Max buffer size of the channel. If `None` then the channel is unbounded. - buffer: usize, - - // Internal channel state. Consists of the number of messages stored in the - // channel as well as a flag signalling that the channel is closed. - state: AtomicUsize, - - // Atomic, FIFO queue used to send messages to the receiver - message_queue: Queue, - - // Atomic, FIFO queue used to send parked task handles to the receiver. - parked_queue: Queue>>, - - // Number of senders in existence - num_senders: AtomicUsize, - - // Handle to the receiver's task. - recv_task: AtomicWaker, -} - -// Struct representation of `Inner::state`. -#[derive(Clone, Copy)] -struct State { - // `true` when the channel is open - is_open: bool, - - // Number of messages in the channel - num_messages: usize, -} - -// The `is_open` flag is stored in the left-most bit of `Inner::state` -const OPEN_MASK: usize = usize::max_value() - (usize::max_value() >> 1); - -// When a new channel is created, it is created in the open state with no -// pending messages. -const INIT_STATE: usize = OPEN_MASK; - -// The maximum number of messages that a channel can track is `usize::max_value() >> 1` -const MAX_CAPACITY: usize = !(OPEN_MASK); - -// The maximum requested buffer size must be less than the maximum capacity of -// a channel. This is because each sender gets a guaranteed slot. -const MAX_BUFFER: usize = MAX_CAPACITY >> 1; - -// Sent to the consumer to wake up blocked producers -struct SenderTask { - task: Option, - is_parked: bool, -} - -impl SenderTask { - fn new() -> Self { - Self { task: None, is_parked: false } - } - - fn notify(&mut self) { - self.is_parked = false; - - if let Some(task) = self.task.take() { - task.wake(); - } - } -} - -/// Creates a bounded mpsc channel for communicating between asynchronous tasks. -/// -/// Being bounded, this channel provides backpressure to ensure that the sender -/// outpaces the receiver by only a limited amount. The channel's capacity is -/// equal to `buffer + num-senders`. In other words, each sender gets a -/// guaranteed slot in the channel capacity, and on top of that there are -/// `buffer` "first come, first serve" slots available to all senders. -/// -/// The [`Receiver`] returned implements the [`Stream`] trait, while [`Sender`] -/// implements `Sink`. -pub fn channel(buffer: usize) -> (Sender, Receiver) { - // Check that the requested buffer size does not exceed the maximum buffer - // size permitted by the system. - assert!(buffer < MAX_BUFFER, "requested buffer size too large"); - - let inner = Arc::new(BoundedInner { - buffer, - state: AtomicUsize::new(INIT_STATE), - message_queue: Queue::new(), - parked_queue: Queue::new(), - num_senders: AtomicUsize::new(1), - recv_task: AtomicWaker::new(), - }); - - let tx = BoundedSenderInner { - inner: inner.clone(), - sender_task: Arc::new(Mutex::new(SenderTask::new())), - maybe_parked: false, - }; - - let rx = Receiver { inner: Some(inner) }; - - (Sender(Some(tx)), rx) -} - -/// Creates an unbounded mpsc channel for communicating between asynchronous -/// tasks. -/// -/// A `send` on this channel will always succeed as long as the receive half has -/// not been closed. If the receiver falls behind, messages will be arbitrarily -/// buffered. -/// -/// **Note** that the amount of available system memory is an implicit bound to -/// the channel. Using an `unbounded` channel has the ability of causing the -/// process to run out of memory. In this case, the process will be aborted. -pub fn unbounded() -> (UnboundedSender, UnboundedReceiver) { - let inner = Arc::new(UnboundedInner { - state: AtomicUsize::new(INIT_STATE), - message_queue: Queue::new(), - num_senders: AtomicUsize::new(1), - recv_task: AtomicWaker::new(), - }); - - let tx = UnboundedSenderInner { inner: inner.clone() }; - - let rx = UnboundedReceiver { inner: Some(inner) }; - - (UnboundedSender(Some(tx)), rx) -} - -/* - * - * ===== impl Sender ===== - * - */ - -impl UnboundedSenderInner { - fn poll_ready_nb(&self) -> Poll> { - let state = decode_state(self.inner.state.load(SeqCst)); - if state.is_open { - Poll::Ready(Ok(())) - } else { - Poll::Ready(Err(SendError { kind: SendErrorKind::Disconnected })) - } - } - - // Push message to the queue and signal to the receiver - fn queue_push_and_signal(&self, msg: T) { - // Push the message onto the message queue - self.inner.message_queue.push(msg); - - // Signal to the receiver that a message has been enqueued. If the - // receiver is parked, this will unpark the task. - self.inner.recv_task.wake(); - } - - // Increment the number of queued messages. Returns the resulting number. - fn inc_num_messages(&self) -> Option { - let mut curr = self.inner.state.load(SeqCst); - - loop { - let mut state = decode_state(curr); - - // The receiver end closed the channel. - if !state.is_open { - return None; - } - - // This probably is never hit? Odds are the process will run out of - // memory first. It may be worth to return something else in this - // case? - assert!( - state.num_messages < MAX_CAPACITY, - "buffer space \ - exhausted; sending this messages would overflow the state" - ); - - state.num_messages += 1; - - let next = encode_state(&state); - match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) { - Ok(_) => return Some(state.num_messages), - Err(actual) => curr = actual, - } - } - } - - /// Returns whether the senders send to the same receiver. - fn same_receiver(&self, other: &Self) -> bool { - Arc::ptr_eq(&self.inner, &other.inner) - } - - /// Returns whether the sender send to this receiver. - fn is_connected_to(&self, inner: &Arc>) -> bool { - Arc::ptr_eq(&self.inner, inner) - } - - /// Returns pointer to the Arc containing sender - /// - /// The returned pointer is not referenced and should be only used for hashing! - fn ptr(&self) -> *const UnboundedInner { - &*self.inner - } - - /// Returns whether this channel is closed without needing a context. - fn is_closed(&self) -> bool { - !decode_state(self.inner.state.load(SeqCst)).is_open - } - - /// Closes this channel from the sender side, preventing any new messages. - fn close_channel(&self) { - // There's no need to park this sender, its dropping, - // and we don't want to check for capacity, so skip - // that stuff from `do_send`. - - self.inner.set_closed(); - self.inner.recv_task.wake(); - } -} - -impl BoundedSenderInner { - /// Attempts to send a message on this `Sender`, returning the message - /// if there was an error. - fn try_send(&mut self, msg: T) -> Result<(), TrySendError> { - // If the sender is currently blocked, reject the message - if !self.poll_unparked(None).is_ready() { - return Err(TrySendError { err: SendError { kind: SendErrorKind::Full }, val: msg }); - } - - // The channel has capacity to accept the message, so send it - self.do_send_b(msg) - } - - // Do the send without failing. - // Can be called only by bounded sender. - fn do_send_b(&mut self, msg: T) -> Result<(), TrySendError> { - // Anyone calling do_send *should* make sure there is room first, - // but assert here for tests as a sanity check. - debug_assert!(self.poll_unparked(None).is_ready()); - - // First, increment the number of messages contained by the channel. - // This operation will also atomically determine if the sender task - // should be parked. - // - // `None` is returned in the case that the channel has been closed by the - // receiver. This happens when `Receiver::close` is called or the - // receiver is dropped. - let park_self = match self.inc_num_messages() { - Some(num_messages) => { - // Block if the current number of pending messages has exceeded - // the configured buffer size - num_messages > self.inner.buffer - } - None => { - return Err(TrySendError { - err: SendError { kind: SendErrorKind::Disconnected }, - val: msg, - }) - } - }; - - // If the channel has reached capacity, then the sender task needs to - // be parked. This will send the task handle on the parked task queue. - // - // However, when `do_send` is called while dropping the `Sender`, - // `task::current()` can't be called safely. In this case, in order to - // maintain internal consistency, a blank message is pushed onto the - // parked task queue. - if park_self { - self.park(); - } - - self.queue_push_and_signal(msg); - - Ok(()) - } - - // Push message to the queue and signal to the receiver - fn queue_push_and_signal(&self, msg: T) { - // Push the message onto the message queue - self.inner.message_queue.push(msg); - - // Signal to the receiver that a message has been enqueued. If the - // receiver is parked, this will unpark the task. - self.inner.recv_task.wake(); - } - - // Increment the number of queued messages. Returns the resulting number. - fn inc_num_messages(&self) -> Option { - let mut curr = self.inner.state.load(SeqCst); - - loop { - let mut state = decode_state(curr); - - // The receiver end closed the channel. - if !state.is_open { - return None; - } - - // This probably is never hit? Odds are the process will run out of - // memory first. It may be worth to return something else in this - // case? - assert!( - state.num_messages < MAX_CAPACITY, - "buffer space \ - exhausted; sending this messages would overflow the state" - ); - - state.num_messages += 1; - - let next = encode_state(&state); - match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) { - Ok(_) => return Some(state.num_messages), - Err(actual) => curr = actual, - } - } - } - - fn park(&mut self) { - { - let mut sender = self.sender_task.lock().unwrap(); - sender.task = None; - sender.is_parked = true; - } - - // Send handle over queue - let t = self.sender_task.clone(); - self.inner.parked_queue.push(t); - - // Check to make sure we weren't closed after we sent our task on the - // queue - let state = decode_state(self.inner.state.load(SeqCst)); - self.maybe_parked = state.is_open; - } - - /// Polls the channel to determine if there is guaranteed capacity to send - /// at least one item without waiting. - /// - /// # Return value - /// - /// This method returns: - /// - /// - `Poll::Ready(Ok(_))` if there is sufficient capacity; - /// - `Poll::Pending` if the channel may not have - /// capacity, in which case the current task is queued to be notified once - /// capacity is available; - /// - `Poll::Ready(Err(SendError))` if the receiver has been dropped. - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - let state = decode_state(self.inner.state.load(SeqCst)); - if !state.is_open { - return Poll::Ready(Err(SendError { kind: SendErrorKind::Disconnected })); - } - - self.poll_unparked(Some(cx)).map(Ok) - } - - /// Returns whether the senders send to the same receiver. - fn same_receiver(&self, other: &Self) -> bool { - Arc::ptr_eq(&self.inner, &other.inner) - } - - /// Returns whether the sender send to this receiver. - fn is_connected_to(&self, receiver: &Arc>) -> bool { - Arc::ptr_eq(&self.inner, receiver) - } - - /// Returns pointer to the Arc containing sender - /// - /// The returned pointer is not referenced and should be only used for hashing! - fn ptr(&self) -> *const BoundedInner { - &*self.inner - } - - /// Returns whether this channel is closed without needing a context. - fn is_closed(&self) -> bool { - !decode_state(self.inner.state.load(SeqCst)).is_open - } - - /// Closes this channel from the sender side, preventing any new messages. - fn close_channel(&self) { - // There's no need to park this sender, its dropping, - // and we don't want to check for capacity, so skip - // that stuff from `do_send`. - - self.inner.set_closed(); - self.inner.recv_task.wake(); - } - - fn poll_unparked(&mut self, cx: Option<&mut Context<'_>>) -> Poll<()> { - // First check the `maybe_parked` variable. This avoids acquiring the - // lock in most cases - if self.maybe_parked { - // Get a lock on the task handle - let mut task = self.sender_task.lock().unwrap(); - - if !task.is_parked { - self.maybe_parked = false; - return Poll::Ready(()); - } - - // At this point, an unpark request is pending, so there will be an - // unpark sometime in the future. We just need to make sure that - // the correct task will be notified. - // - // Update the task in case the `Sender` has been moved to another - // task - task.task = cx.map(|cx| cx.waker().clone()); - - Poll::Pending - } else { - Poll::Ready(()) - } - } -} - -impl Sender { - /// Attempts to send a message on this `Sender`, returning the message - /// if there was an error. - pub fn try_send(&mut self, msg: T) -> Result<(), TrySendError> { - if let Some(inner) = &mut self.0 { - inner.try_send(msg) - } else { - Err(TrySendError { err: SendError { kind: SendErrorKind::Disconnected }, val: msg }) - } - } - - /// Send a message on the channel. - /// - /// This function should only be called after - /// [`poll_ready`](Sender::poll_ready) has reported that the channel is - /// ready to receive a message. - pub fn start_send(&mut self, msg: T) -> Result<(), SendError> { - self.try_send(msg).map_err(|e| e.err) - } - - /// Polls the channel to determine if there is guaranteed capacity to send - /// at least one item without waiting. - /// - /// # Return value - /// - /// This method returns: - /// - /// - `Poll::Ready(Ok(_))` if there is sufficient capacity; - /// - `Poll::Pending` if the channel may not have - /// capacity, in which case the current task is queued to be notified once - /// capacity is available; - /// - `Poll::Ready(Err(SendError))` if the receiver has been dropped. - pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - let inner = self.0.as_mut().ok_or(SendError { kind: SendErrorKind::Disconnected })?; - inner.poll_ready(cx) - } - - /// Returns whether this channel is closed without needing a context. - pub fn is_closed(&self) -> bool { - self.0.as_ref().map(BoundedSenderInner::is_closed).unwrap_or(true) - } - - /// Closes this channel from the sender side, preventing any new messages. - pub fn close_channel(&mut self) { - if let Some(inner) = &mut self.0 { - inner.close_channel(); - } - } - - /// Disconnects this sender from the channel, closing it if there are no more senders left. - pub fn disconnect(&mut self) { - self.0 = None; - } - - /// Returns whether the senders send to the same receiver. - pub fn same_receiver(&self, other: &Self) -> bool { - match (&self.0, &other.0) { - (Some(inner), Some(other)) => inner.same_receiver(other), - _ => false, - } - } - - /// Returns whether the sender send to this receiver. - pub fn is_connected_to(&self, receiver: &Receiver) -> bool { - match (&self.0, &receiver.inner) { - (Some(inner), Some(receiver)) => inner.is_connected_to(receiver), - _ => false, - } - } - - /// Hashes the receiver into the provided hasher - pub fn hash_receiver(&self, hasher: &mut H) - where - H: std::hash::Hasher, - { - use std::hash::Hash; - - let ptr = self.0.as_ref().map(|inner| inner.ptr()); - ptr.hash(hasher); - } -} - -impl UnboundedSender { - /// Check if the channel is ready to receive a message. - pub fn poll_ready(&self, _: &mut Context<'_>) -> Poll> { - let inner = self.0.as_ref().ok_or(SendError { kind: SendErrorKind::Disconnected })?; - inner.poll_ready_nb() - } - - /// Returns whether this channel is closed without needing a context. - pub fn is_closed(&self) -> bool { - self.0.as_ref().map(UnboundedSenderInner::is_closed).unwrap_or(true) - } - - /// Closes this channel from the sender side, preventing any new messages. - pub fn close_channel(&self) { - if let Some(inner) = &self.0 { - inner.close_channel(); - } - } - - /// Disconnects this sender from the channel, closing it if there are no more senders left. - pub fn disconnect(&mut self) { - self.0 = None; - } - - // Do the send without parking current task. - fn do_send_nb(&self, msg: T) -> Result<(), TrySendError> { - if let Some(inner) = &self.0 { - if inner.inc_num_messages().is_some() { - inner.queue_push_and_signal(msg); - return Ok(()); - } - } - - Err(TrySendError { err: SendError { kind: SendErrorKind::Disconnected }, val: msg }) - } - - /// Send a message on the channel. - /// - /// This method should only be called after `poll_ready` has been used to - /// verify that the channel is ready to receive a message. - pub fn start_send(&mut self, msg: T) -> Result<(), SendError> { - self.do_send_nb(msg).map_err(|e| e.err) - } - - /// Sends a message along this channel. - /// - /// This is an unbounded sender, so this function differs from `Sink::send` - /// by ensuring the return type reflects that the channel is always ready to - /// receive messages. - pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError> { - self.do_send_nb(msg) - } - - /// Returns whether the senders send to the same receiver. - pub fn same_receiver(&self, other: &Self) -> bool { - match (&self.0, &other.0) { - (Some(inner), Some(other)) => inner.same_receiver(other), - _ => false, - } - } - - /// Returns whether the sender send to this receiver. - pub fn is_connected_to(&self, receiver: &UnboundedReceiver) -> bool { - match (&self.0, &receiver.inner) { - (Some(inner), Some(receiver)) => inner.is_connected_to(receiver), - _ => false, - } - } - - /// Hashes the receiver into the provided hasher - pub fn hash_receiver(&self, hasher: &mut H) - where - H: std::hash::Hasher, - { - use std::hash::Hash; - - let ptr = self.0.as_ref().map(|inner| inner.ptr()); - ptr.hash(hasher); - } - - /// Return the number of messages in the queue or 0 if channel is disconnected. - pub fn len(&self) -> usize { - if let Some(sender) = &self.0 { - decode_state(sender.inner.state.load(SeqCst)).num_messages - } else { - 0 - } - } - - /// Return false is channel has no queued messages, true otherwise. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } -} - -impl Clone for Sender { - fn clone(&self) -> Self { - Self(self.0.clone()) - } -} - -impl Clone for UnboundedSender { - fn clone(&self) -> Self { - Self(self.0.clone()) - } -} - -impl Clone for UnboundedSenderInner { - fn clone(&self) -> Self { - // Since this atomic op isn't actually guarding any memory and we don't - // care about any orderings besides the ordering on the single atomic - // variable, a relaxed ordering is acceptable. - let mut curr = self.inner.num_senders.load(SeqCst); - - loop { - // If the maximum number of senders has been reached, then fail - if curr == MAX_BUFFER { - panic!("cannot clone `Sender` -- too many outstanding senders"); - } - - debug_assert!(curr < MAX_BUFFER); - - let next = curr + 1; - match self.inner.num_senders.compare_exchange(curr, next, SeqCst, SeqCst) { - Ok(_) => { - // The ABA problem doesn't matter here. We only care that the - // number of senders never exceeds the maximum. - return Self { inner: self.inner.clone() }; - } - Err(actual) => curr = actual, - } - } - } -} - -impl Clone for BoundedSenderInner { - fn clone(&self) -> Self { - // Since this atomic op isn't actually guarding any memory and we don't - // care about any orderings besides the ordering on the single atomic - // variable, a relaxed ordering is acceptable. - let mut curr = self.inner.num_senders.load(SeqCst); - - loop { - // If the maximum number of senders has been reached, then fail - if curr == self.inner.max_senders() { - panic!("cannot clone `Sender` -- too many outstanding senders"); - } - - debug_assert!(curr < self.inner.max_senders()); - - let next = curr + 1; - match self.inner.num_senders.compare_exchange(curr, next, SeqCst, SeqCst) { - Ok(_) => { - // The ABA problem doesn't matter here. We only care that the - // number of senders never exceeds the maximum. - return Self { - inner: self.inner.clone(), - sender_task: Arc::new(Mutex::new(SenderTask::new())), - maybe_parked: false, - }; - } - Err(actual) => curr = actual, - } - } - } -} - -impl Drop for UnboundedSenderInner { - fn drop(&mut self) { - // Ordering between variables don't matter here - let prev = self.inner.num_senders.fetch_sub(1, SeqCst); - - if prev == 1 { - self.close_channel(); - } - } -} - -impl Drop for BoundedSenderInner { - fn drop(&mut self) { - // Ordering between variables don't matter here - let prev = self.inner.num_senders.fetch_sub(1, SeqCst); - - if prev == 1 { - self.close_channel(); - } - } -} - -impl fmt::Debug for Sender { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Sender").field("closed", &self.is_closed()).finish() - } -} - -impl fmt::Debug for UnboundedSender { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("UnboundedSender").field("closed", &self.is_closed()).finish() - } -} - -/* - * - * ===== impl Receiver ===== - * - */ - -impl Receiver { - /// Closes the receiving half of a channel, without dropping it. - /// - /// This prevents any further messages from being sent on the channel while - /// still enabling the receiver to drain messages that are buffered. - pub fn close(&mut self) { - if let Some(inner) = &mut self.inner { - inner.set_closed(); - - // Wake up any threads waiting as they'll see that we've closed the - // channel and will continue on their merry way. - while let Some(task) = unsafe { inner.parked_queue.pop_spin() } { - task.lock().unwrap().notify(); - } - } - } - - /// Tries to receive the next message without notifying a context if empty. - /// - /// It is not recommended to call this function from inside of a future, - /// only when you've otherwise arranged to be notified when the channel is - /// no longer empty. - /// - /// This function returns: - /// * `Ok(Some(t))` when message is fetched - /// * `Ok(None)` when channel is closed and no messages left in the queue - /// * `Err(e)` when there are no messages available, but channel is not yet closed - pub fn try_next(&mut self) -> Result, TryRecvError> { - match self.next_message() { - Poll::Ready(msg) => Ok(msg), - Poll::Pending => Err(TryRecvError { _priv: () }), - } - } - - fn next_message(&mut self) -> Poll> { - let inner = match self.inner.as_mut() { - None => return Poll::Ready(None), - Some(inner) => inner, - }; - // Pop off a message - match unsafe { inner.message_queue.pop_spin() } { - Some(msg) => { - // If there are any parked task handles in the parked queue, - // pop one and unpark it. - self.unpark_one(); - - // Decrement number of messages - self.dec_num_messages(); - - Poll::Ready(Some(msg)) - } - None => { - let state = decode_state(inner.state.load(SeqCst)); - if state.is_closed() { - // If closed flag is set AND there are no pending messages - // it means end of stream - self.inner = None; - Poll::Ready(None) - } else { - // If queue is open, we need to return Pending - // to be woken up when new messages arrive. - // If queue is closed but num_messages is non-zero, - // it means that senders updated the state, - // but didn't put message to queue yet, - // so we need to park until sender unparks the task - // after queueing the message. - Poll::Pending - } - } - } - } - - // Unpark a single task handle if there is one pending in the parked queue - fn unpark_one(&mut self) { - if let Some(inner) = &mut self.inner { - if let Some(task) = unsafe { inner.parked_queue.pop_spin() } { - task.lock().unwrap().notify(); - } - } - } - - fn dec_num_messages(&self) { - if let Some(inner) = &self.inner { - // OPEN_MASK is highest bit, so it's unaffected by subtraction - // unless there's underflow, and we know there's no underflow - // because number of messages at this point is always > 0. - inner.state.fetch_sub(1, SeqCst); - } - } -} - -// The receiver does not ever take a Pin to the inner T -impl Unpin for Receiver {} - -impl FusedStream for Receiver { - fn is_terminated(&self) -> bool { - self.inner.is_none() - } -} - -impl Stream for Receiver { - type Item = T; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // Try to read a message off of the message queue. - match self.next_message() { - Poll::Ready(msg) => { - if msg.is_none() { - self.inner = None; - } - Poll::Ready(msg) - } - Poll::Pending => { - // There are no messages to read, in this case, park. - self.inner.as_ref().unwrap().recv_task.register(cx.waker()); - // Check queue again after parking to prevent race condition: - // a message could be added to the queue after previous `next_message` - // before `register` call. - self.next_message() - } - } - } - - fn size_hint(&self) -> (usize, Option) { - if let Some(inner) = &self.inner { - decode_state(inner.state.load(SeqCst)).size_hint() - } else { - (0, Some(0)) - } - } -} - -impl Drop for Receiver { - fn drop(&mut self) { - // Drain the channel of all pending messages - self.close(); - if self.inner.is_some() { - loop { - match self.next_message() { - Poll::Ready(Some(_)) => {} - Poll::Ready(None) => break, - Poll::Pending => { - let state = decode_state(self.inner.as_ref().unwrap().state.load(SeqCst)); - - // If the channel is closed, then there is no need to park. - if state.is_closed() { - break; - } - - // TODO: Spinning isn't ideal, it might be worth - // investigating using a condvar or some other strategy - // here. That said, if this case is hit, then another thread - // is about to push the value into the queue and this isn't - // the only spinlock in the impl right now. - thread::yield_now(); - } - } - } - } - } -} - -impl fmt::Debug for Receiver { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let closed = if let Some(ref inner) = self.inner { - decode_state(inner.state.load(SeqCst)).is_closed() - } else { - false - }; - - f.debug_struct("Receiver").field("closed", &closed).finish() - } -} - -impl UnboundedReceiver { - /// Closes the receiving half of a channel, without dropping it. - /// - /// This prevents any further messages from being sent on the channel while - /// still enabling the receiver to drain messages that are buffered. - pub fn close(&mut self) { - if let Some(inner) = &mut self.inner { - inner.set_closed(); - } - } - - /// Tries to receive the next message without notifying a context if empty. - /// - /// It is not recommended to call this function from inside of a future, - /// only when you've otherwise arranged to be notified when the channel is - /// no longer empty. - /// - /// This function returns: - /// * `Ok(Some(t))` when message is fetched - /// * `Ok(None)` when channel is closed and no messages left in the queue - /// * `Err(e)` when there are no messages available, but channel is not yet closed - pub fn try_next(&mut self) -> Result, TryRecvError> { - match self.next_message() { - Poll::Ready(msg) => Ok(msg), - Poll::Pending => Err(TryRecvError { _priv: () }), - } - } - - fn next_message(&mut self) -> Poll> { - let inner = match self.inner.as_mut() { - None => return Poll::Ready(None), - Some(inner) => inner, - }; - // Pop off a message - match unsafe { inner.message_queue.pop_spin() } { - Some(msg) => { - // Decrement number of messages - self.dec_num_messages(); - - Poll::Ready(Some(msg)) - } - None => { - let state = decode_state(inner.state.load(SeqCst)); - if state.is_closed() { - // If closed flag is set AND there are no pending messages - // it means end of stream - self.inner = None; - Poll::Ready(None) - } else { - // If queue is open, we need to return Pending - // to be woken up when new messages arrive. - // If queue is closed but num_messages is non-zero, - // it means that senders updated the state, - // but didn't put message to queue yet, - // so we need to park until sender unparks the task - // after queueing the message. - Poll::Pending - } - } - } - } - - fn dec_num_messages(&self) { - if let Some(inner) = &self.inner { - // OPEN_MASK is highest bit, so it's unaffected by subtraction - // unless there's underflow, and we know there's no underflow - // because number of messages at this point is always > 0. - inner.state.fetch_sub(1, SeqCst); - } - } -} - -impl FusedStream for UnboundedReceiver { - fn is_terminated(&self) -> bool { - self.inner.is_none() - } -} - -impl Stream for UnboundedReceiver { - type Item = T; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // Try to read a message off of the message queue. - match self.next_message() { - Poll::Ready(msg) => { - if msg.is_none() { - self.inner = None; - } - Poll::Ready(msg) - } - Poll::Pending => { - // There are no messages to read, in this case, park. - self.inner.as_ref().unwrap().recv_task.register(cx.waker()); - // Check queue again after parking to prevent race condition: - // a message could be added to the queue after previous `next_message` - // before `register` call. - self.next_message() - } - } - } - - fn size_hint(&self) -> (usize, Option) { - if let Some(inner) = &self.inner { - decode_state(inner.state.load(SeqCst)).size_hint() - } else { - (0, Some(0)) - } - } -} - -impl Drop for UnboundedReceiver { - fn drop(&mut self) { - // Drain the channel of all pending messages - self.close(); - if self.inner.is_some() { - loop { - match self.next_message() { - Poll::Ready(Some(_)) => {} - Poll::Ready(None) => break, - Poll::Pending => { - let state = decode_state(self.inner.as_ref().unwrap().state.load(SeqCst)); - - // If the channel is closed, then there is no need to park. - if state.is_closed() { - break; - } - - // TODO: Spinning isn't ideal, it might be worth - // investigating using a condvar or some other strategy - // here. That said, if this case is hit, then another thread - // is about to push the value into the queue and this isn't - // the only spinlock in the impl right now. - thread::yield_now(); - } - } - } - } - } -} - -impl fmt::Debug for UnboundedReceiver { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let closed = if let Some(ref inner) = self.inner { - decode_state(inner.state.load(SeqCst)).is_closed() - } else { - false - }; - - f.debug_struct("Receiver").field("closed", &closed).finish() - } -} - -/* - * - * ===== impl Inner ===== - * - */ - -impl UnboundedInner { - // Clear `open` flag in the state, keep `num_messages` intact. - fn set_closed(&self) { - let curr = self.state.load(SeqCst); - if !decode_state(curr).is_open { - return; - } - - self.state.fetch_and(!OPEN_MASK, SeqCst); - } -} - -impl BoundedInner { - // The return value is such that the total number of messages that can be - // enqueued into the channel will never exceed MAX_CAPACITY - fn max_senders(&self) -> usize { - MAX_CAPACITY - self.buffer - } - - // Clear `open` flag in the state, keep `num_messages` intact. - fn set_closed(&self) { - let curr = self.state.load(SeqCst); - if !decode_state(curr).is_open { - return; - } - - self.state.fetch_and(!OPEN_MASK, SeqCst); - } -} - -unsafe impl Send for UnboundedInner {} -unsafe impl Sync for UnboundedInner {} - -unsafe impl Send for BoundedInner {} -unsafe impl Sync for BoundedInner {} - -impl State { - fn is_closed(&self) -> bool { - !self.is_open && self.num_messages == 0 - } - - fn size_hint(&self) -> (usize, Option) { - if self.is_open { - (self.num_messages, None) - } else { - (self.num_messages, Some(self.num_messages)) - } - } -} - -/* - * - * ===== Helpers ===== - * - */ - -fn decode_state(num: usize) -> State { - State { is_open: num & OPEN_MASK == OPEN_MASK, num_messages: num & MAX_CAPACITY } -} - -fn encode_state(state: &State) -> usize { - let mut num = state.num_messages; - - if state.is_open { - num |= OPEN_MASK; - } - - num -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/src/mpsc/queue.rs s390-tools-2.33.1/rust-vendor/futures-channel/src/mpsc/queue.rs --- s390-tools-2.31.0/rust-vendor/futures-channel/src/mpsc/queue.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/src/mpsc/queue.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,174 +0,0 @@ -/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are - * those of the authors and should not be interpreted as representing official - * policies, either expressed or implied, of Dmitry Vyukov. - */ - -//! A mostly lock-free multi-producer, single consumer queue for sending -//! messages between asynchronous tasks. -//! -//! The queue implementation is essentially the same one used for mpsc channels -//! in the standard library. -//! -//! Note that the current implementation of this queue has a caveat of the `pop` -//! method, and see the method for more information about it. Due to this -//! caveat, this queue may not be appropriate for all use-cases. - -// http://www.1024cores.net/home/lock-free-algorithms -// /queues/non-intrusive-mpsc-node-based-queue - -// NOTE: this implementation is lifted from the standard library and only -// slightly modified - -pub(super) use self::PopResult::*; - -use std::cell::UnsafeCell; -use std::ptr; -use std::sync::atomic::{AtomicPtr, Ordering}; -use std::thread; - -/// A result of the `pop` function. -pub(super) enum PopResult { - /// Some data has been popped - Data(T), - /// The queue is empty - Empty, - /// The queue is in an inconsistent state. Popping data should succeed, but - /// some pushers have yet to make enough progress in order allow a pop to - /// succeed. It is recommended that a pop() occur "in the near future" in - /// order to see if the sender has made progress or not - Inconsistent, -} - -struct Node { - next: AtomicPtr, - value: Option, -} - -/// The multi-producer single-consumer structure. This is not cloneable, but it -/// may be safely shared so long as it is guaranteed that there is only one -/// popper at a time (many pushers are allowed). -pub(super) struct Queue { - head: AtomicPtr>, - tail: UnsafeCell<*mut Node>, -} - -unsafe impl Send for Queue {} -unsafe impl Sync for Queue {} - -impl Node { - unsafe fn new(v: Option) -> *mut Self { - Box::into_raw(Box::new(Self { next: AtomicPtr::new(ptr::null_mut()), value: v })) - } -} - -impl Queue { - /// Creates a new queue that is safe to share among multiple producers and - /// one consumer. - pub(super) fn new() -> Self { - let stub = unsafe { Node::new(None) }; - Self { head: AtomicPtr::new(stub), tail: UnsafeCell::new(stub) } - } - - /// Pushes a new value onto this queue. - pub(super) fn push(&self, t: T) { - unsafe { - let n = Node::new(Some(t)); - let prev = self.head.swap(n, Ordering::AcqRel); - (*prev).next.store(n, Ordering::Release); - } - } - - /// Pops some data from this queue. - /// - /// Note that the current implementation means that this function cannot - /// return `Option`. It is possible for this queue to be in an - /// inconsistent state where many pushes have succeeded and completely - /// finished, but pops cannot return `Some(t)`. This inconsistent state - /// happens when a pusher is preempted at an inopportune moment. - /// - /// This inconsistent state means that this queue does indeed have data, but - /// it does not currently have access to it at this time. - /// - /// This function is unsafe because only one thread can call it at a time. - pub(super) unsafe fn pop(&self) -> PopResult { - let tail = *self.tail.get(); - let next = (*tail).next.load(Ordering::Acquire); - - if !next.is_null() { - *self.tail.get() = next; - assert!((*tail).value.is_none()); - assert!((*next).value.is_some()); - let ret = (*next).value.take().unwrap(); - drop(Box::from_raw(tail)); - return Data(ret); - } - - if self.head.load(Ordering::Acquire) == tail { - Empty - } else { - Inconsistent - } - } - - /// Pop an element similarly to `pop` function, but spin-wait on inconsistent - /// queue state instead of returning `Inconsistent`. - /// - /// This function is unsafe because only one thread can call it at a time. - pub(super) unsafe fn pop_spin(&self) -> Option { - loop { - match self.pop() { - Empty => return None, - Data(t) => return Some(t), - // Inconsistent means that there will be a message to pop - // in a short time. This branch can only be reached if - // values are being produced from another thread, so there - // are a few ways that we can deal with this: - // - // 1) Spin - // 2) thread::yield_now() - // 3) task::current().unwrap() & return Pending - // - // For now, thread::yield_now() is used, but it would - // probably be better to spin a few times then yield. - Inconsistent => { - thread::yield_now(); - } - } - } - } -} - -impl Drop for Queue { - fn drop(&mut self) { - unsafe { - let mut cur = *self.tail.get(); - while !cur.is_null() { - let next = (*cur).next.load(Ordering::Relaxed); - drop(Box::from_raw(cur)); - cur = next; - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/src/mpsc/sink_impl.rs s390-tools-2.33.1/rust-vendor/futures-channel/src/mpsc/sink_impl.rs --- s390-tools-2.31.0/rust-vendor/futures-channel/src/mpsc/sink_impl.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/src/mpsc/sink_impl.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,73 +0,0 @@ -use super::{SendError, Sender, TrySendError, UnboundedSender}; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; -use std::pin::Pin; - -impl Sink for Sender { - type Error = SendError; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - (*self).poll_ready(cx) - } - - fn start_send(mut self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> { - (*self).start_send(msg) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match (*self).poll_ready(cx) { - Poll::Ready(Err(ref e)) if e.is_disconnected() => { - // If the receiver disconnected, we consider the sink to be flushed. - Poll::Ready(Ok(())) - } - x => x, - } - } - - fn poll_close(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.disconnect(); - Poll::Ready(Ok(())) - } -} - -impl Sink for UnboundedSender { - type Error = SendError; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Self::poll_ready(&*self, cx) - } - - fn start_send(mut self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> { - Self::start_send(&mut *self, msg) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_close(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.disconnect(); - Poll::Ready(Ok(())) - } -} - -impl Sink for &UnboundedSender { - type Error = SendError; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - UnboundedSender::poll_ready(*self, cx) - } - - fn start_send(self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> { - self.unbounded_send(msg).map_err(TrySendError::into_send_error) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.close_channel(); - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-channel/src/oneshot.rs s390-tools-2.33.1/rust-vendor/futures-channel/src/oneshot.rs --- s390-tools-2.31.0/rust-vendor/futures-channel/src/oneshot.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-channel/src/oneshot.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,488 +0,0 @@ -//! A channel for sending a single message between asynchronous tasks. -//! -//! This is a single-producer, single-consumer channel. - -use alloc::sync::Arc; -use core::fmt; -use core::pin::Pin; -use core::sync::atomic::AtomicBool; -use core::sync::atomic::Ordering::SeqCst; -use futures_core::future::{FusedFuture, Future}; -use futures_core::task::{Context, Poll, Waker}; - -use crate::lock::Lock; - -/// A future for a value that will be provided by another asynchronous task. -/// -/// This is created by the [`channel`] function. -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Receiver { - inner: Arc>, -} - -/// A means of transmitting a single value to another task. -/// -/// This is created by the [`channel`] function. -pub struct Sender { - inner: Arc>, -} - -// The channels do not ever project Pin to the inner T -impl Unpin for Receiver {} -impl Unpin for Sender {} - -/// Internal state of the `Receiver`/`Sender` pair above. This is all used as -/// the internal synchronization between the two for send/recv operations. -struct Inner { - /// Indicates whether this oneshot is complete yet. This is filled in both - /// by `Sender::drop` and by `Receiver::drop`, and both sides interpret it - /// appropriately. - /// - /// For `Receiver`, if this is `true`, then it's guaranteed that `data` is - /// unlocked and ready to be inspected. - /// - /// For `Sender` if this is `true` then the oneshot has gone away and it - /// can return ready from `poll_canceled`. - complete: AtomicBool, - - /// The actual data being transferred as part of this `Receiver`. This is - /// filled in by `Sender::complete` and read by `Receiver::poll`. - /// - /// Note that this is protected by `Lock`, but it is in theory safe to - /// replace with an `UnsafeCell` as it's actually protected by `complete` - /// above. I wouldn't recommend doing this, however, unless someone is - /// supremely confident in the various atomic orderings here and there. - data: Lock>, - - /// Field to store the task which is blocked in `Receiver::poll`. - /// - /// This is filled in when a oneshot is polled but not ready yet. Note that - /// the `Lock` here, unlike in `data` above, is important to resolve races. - /// Both the `Receiver` and the `Sender` halves understand that if they - /// can't acquire the lock then some important interference is happening. - rx_task: Lock>, - - /// Like `rx_task` above, except for the task blocked in - /// `Sender::poll_canceled`. Additionally, `Lock` cannot be `UnsafeCell`. - tx_task: Lock>, -} - -/// Creates a new one-shot channel for sending a single value across asynchronous tasks. -/// -/// The channel works for a spsc (single-producer, single-consumer) scheme. -/// -/// This function is similar to Rust's channel constructor found in the standard -/// library. Two halves are returned, the first of which is a `Sender` handle, -/// used to signal the end of a computation and provide its value. The second -/// half is a `Receiver` which implements the `Future` trait, resolving to the -/// value that was given to the `Sender` handle. -/// -/// Each half can be separately owned and sent across tasks. -/// -/// # Examples -/// -/// ``` -/// use futures::channel::oneshot; -/// use std::{thread, time::Duration}; -/// -/// let (sender, receiver) = oneshot::channel::(); -/// -/// thread::spawn(|| { -/// println!("THREAD: sleeping zzz..."); -/// thread::sleep(Duration::from_millis(1000)); -/// println!("THREAD: i'm awake! sending."); -/// sender.send(3).unwrap(); -/// }); -/// -/// println!("MAIN: doing some useful stuff"); -/// -/// futures::executor::block_on(async { -/// println!("MAIN: waiting for msg..."); -/// println!("MAIN: got: {:?}", receiver.await) -/// }); -/// ``` -pub fn channel() -> (Sender, Receiver) { - let inner = Arc::new(Inner::new()); - let receiver = Receiver { inner: inner.clone() }; - let sender = Sender { inner }; - (sender, receiver) -} - -impl Inner { - fn new() -> Self { - Self { - complete: AtomicBool::new(false), - data: Lock::new(None), - rx_task: Lock::new(None), - tx_task: Lock::new(None), - } - } - - fn send(&self, t: T) -> Result<(), T> { - if self.complete.load(SeqCst) { - return Err(t); - } - - // Note that this lock acquisition may fail if the receiver - // is closed and sets the `complete` flag to `true`, whereupon - // the receiver may call `poll()`. - if let Some(mut slot) = self.data.try_lock() { - assert!(slot.is_none()); - *slot = Some(t); - drop(slot); - - // If the receiver called `close()` between the check at the - // start of the function, and the lock being released, then - // the receiver may not be around to receive it, so try to - // pull it back out. - if self.complete.load(SeqCst) { - // If lock acquisition fails, then receiver is actually - // receiving it, so we're good. - if let Some(mut slot) = self.data.try_lock() { - if let Some(t) = slot.take() { - return Err(t); - } - } - } - Ok(()) - } else { - // Must have been closed - Err(t) - } - } - - fn poll_canceled(&self, cx: &mut Context<'_>) -> Poll<()> { - // Fast path up first, just read the flag and see if our other half is - // gone. This flag is set both in our destructor and the oneshot - // destructor, but our destructor hasn't run yet so if it's set then the - // oneshot is gone. - if self.complete.load(SeqCst) { - return Poll::Ready(()); - } - - // If our other half is not gone then we need to park our current task - // and move it into the `tx_task` slot to get notified when it's - // actually gone. - // - // If `try_lock` fails, then the `Receiver` is in the process of using - // it, so we can deduce that it's now in the process of going away and - // hence we're canceled. If it succeeds then we just store our handle. - // - // Crucially we then check `complete` *again* before we return. - // While we were storing our handle inside `tx_task` the - // `Receiver` may have been dropped. The first thing it does is set the - // flag, and if it fails to acquire the lock it assumes that we'll see - // the flag later on. So... we then try to see the flag later on! - let handle = cx.waker().clone(); - match self.tx_task.try_lock() { - Some(mut p) => *p = Some(handle), - None => return Poll::Ready(()), - } - if self.complete.load(SeqCst) { - Poll::Ready(()) - } else { - Poll::Pending - } - } - - fn is_canceled(&self) -> bool { - self.complete.load(SeqCst) - } - - fn drop_tx(&self) { - // Flag that we're a completed `Sender` and try to wake up a receiver. - // Whether or not we actually stored any data will get picked up and - // translated to either an item or cancellation. - // - // Note that if we fail to acquire the `rx_task` lock then that means - // we're in one of two situations: - // - // 1. The receiver is trying to block in `poll` - // 2. The receiver is being dropped - // - // In the first case it'll check the `complete` flag after it's done - // blocking to see if it succeeded. In the latter case we don't need to - // wake up anyone anyway. So in both cases it's ok to ignore the `None` - // case of `try_lock` and bail out. - // - // The first case crucially depends on `Lock` using `SeqCst` ordering - // under the hood. If it instead used `Release` / `Acquire` ordering, - // then it would not necessarily synchronize with `inner.complete` - // and deadlock might be possible, as was observed in - // https://github.com/rust-lang/futures-rs/pull/219. - self.complete.store(true, SeqCst); - - if let Some(mut slot) = self.rx_task.try_lock() { - if let Some(task) = slot.take() { - drop(slot); - task.wake(); - } - } - - // If we registered a task for cancel notification drop it to reduce - // spurious wakeups - if let Some(mut slot) = self.tx_task.try_lock() { - drop(slot.take()); - } - } - - fn close_rx(&self) { - // Flag our completion and then attempt to wake up the sender if it's - // blocked. See comments in `drop` below for more info - self.complete.store(true, SeqCst); - if let Some(mut handle) = self.tx_task.try_lock() { - if let Some(task) = handle.take() { - drop(handle); - task.wake() - } - } - } - - fn try_recv(&self) -> Result, Canceled> { - // If we're complete, either `::close_rx` or `::drop_tx` was called. - // We can assume a successful send if data is present. - if self.complete.load(SeqCst) { - if let Some(mut slot) = self.data.try_lock() { - if let Some(data) = slot.take() { - return Ok(Some(data)); - } - } - Err(Canceled) - } else { - Ok(None) - } - } - - fn recv(&self, cx: &mut Context<'_>) -> Poll> { - // Check to see if some data has arrived. If it hasn't then we need to - // block our task. - // - // Note that the acquisition of the `rx_task` lock might fail below, but - // the only situation where this can happen is during `Sender::drop` - // when we are indeed completed already. If that's happening then we - // know we're completed so keep going. - let done = if self.complete.load(SeqCst) { - true - } else { - let task = cx.waker().clone(); - match self.rx_task.try_lock() { - Some(mut slot) => { - *slot = Some(task); - false - } - None => true, - } - }; - - // If we're `done` via one of the paths above, then look at the data and - // figure out what the answer is. If, however, we stored `rx_task` - // successfully above we need to check again if we're completed in case - // a message was sent while `rx_task` was locked and couldn't notify us - // otherwise. - // - // If we're not done, and we're not complete, though, then we've - // successfully blocked our task and we return `Pending`. - if done || self.complete.load(SeqCst) { - // If taking the lock fails, the sender will realise that the we're - // `done` when it checks the `complete` flag on the way out, and - // will treat the send as a failure. - if let Some(mut slot) = self.data.try_lock() { - if let Some(data) = slot.take() { - return Poll::Ready(Ok(data)); - } - } - Poll::Ready(Err(Canceled)) - } else { - Poll::Pending - } - } - - fn drop_rx(&self) { - // Indicate to the `Sender` that we're done, so any future calls to - // `poll_canceled` are weeded out. - self.complete.store(true, SeqCst); - - // If we've blocked a task then there's no need for it to stick around, - // so we need to drop it. If this lock acquisition fails, though, then - // it's just because our `Sender` is trying to take the task, so we - // let them take care of that. - if let Some(mut slot) = self.rx_task.try_lock() { - let task = slot.take(); - drop(slot); - drop(task); - } - - // Finally, if our `Sender` wants to get notified of us going away, it - // would have stored something in `tx_task`. Here we try to peel that - // out and unpark it. - // - // Note that the `try_lock` here may fail, but only if the `Sender` is - // in the process of filling in the task. If that happens then we - // already flagged `complete` and they'll pick that up above. - if let Some(mut handle) = self.tx_task.try_lock() { - if let Some(task) = handle.take() { - drop(handle); - task.wake() - } - } - } -} - -impl Sender { - /// Completes this oneshot with a successful result. - /// - /// This function will consume `self` and indicate to the other end, the - /// [`Receiver`], that the value provided is the result of the computation - /// this represents. - /// - /// If the value is successfully enqueued for the remote end to receive, - /// then `Ok(())` is returned. If the receiving end was dropped before - /// this function was called, however, then `Err(t)` is returned. - pub fn send(self, t: T) -> Result<(), T> { - self.inner.send(t) - } - - /// Polls this `Sender` half to detect whether its associated - /// [`Receiver`] has been dropped. - /// - /// # Return values - /// - /// If `Ready(())` is returned then the associated `Receiver` has been - /// dropped, which means any work required for sending should be canceled. - /// - /// If `Pending` is returned then the associated `Receiver` is still - /// alive and may be able to receive a message if sent. The current task, - /// however, is scheduled to receive a notification if the corresponding - /// `Receiver` goes away. - pub fn poll_canceled(&mut self, cx: &mut Context<'_>) -> Poll<()> { - self.inner.poll_canceled(cx) - } - - /// Creates a future that resolves when this `Sender`'s corresponding - /// [`Receiver`] half has hung up. - /// - /// This is a utility wrapping [`poll_canceled`](Sender::poll_canceled) - /// to expose a [`Future`]. - pub fn cancellation(&mut self) -> Cancellation<'_, T> { - Cancellation { inner: self } - } - - /// Tests to see whether this `Sender`'s corresponding `Receiver` - /// has been dropped. - /// - /// Unlike [`poll_canceled`](Sender::poll_canceled), this function does not - /// enqueue a task for wakeup upon cancellation, but merely reports the - /// current state, which may be subject to concurrent modification. - pub fn is_canceled(&self) -> bool { - self.inner.is_canceled() - } - - /// Tests to see whether this `Sender` is connected to the given `Receiver`. That is, whether - /// they were created by the same call to `channel`. - pub fn is_connected_to(&self, receiver: &Receiver) -> bool { - Arc::ptr_eq(&self.inner, &receiver.inner) - } -} - -impl Drop for Sender { - fn drop(&mut self) { - self.inner.drop_tx() - } -} - -impl fmt::Debug for Sender { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Sender").field("complete", &self.inner.complete).finish() - } -} - -/// A future that resolves when the receiving end of a channel has hung up. -/// -/// This is an `.await`-friendly interface around [`poll_canceled`](Sender::poll_canceled). -#[must_use = "futures do nothing unless you `.await` or poll them"] -#[derive(Debug)] -pub struct Cancellation<'a, T> { - inner: &'a mut Sender, -} - -impl Future for Cancellation<'_, T> { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - self.inner.poll_canceled(cx) - } -} - -/// Error returned from a [`Receiver`] when the corresponding [`Sender`] is -/// dropped. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Canceled; - -impl fmt::Display for Canceled { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "oneshot canceled") - } -} - -#[cfg(feature = "std")] -impl std::error::Error for Canceled {} - -impl Receiver { - /// Gracefully close this receiver, preventing any subsequent attempts to - /// send to it. - /// - /// Any `send` operation which happens after this method returns is - /// guaranteed to fail. After calling this method, you can use - /// [`Receiver::poll`](core::future::Future::poll) to determine whether a - /// message had previously been sent. - pub fn close(&mut self) { - self.inner.close_rx() - } - - /// Attempts to receive a message outside of the context of a task. - /// - /// Does not schedule a task wakeup or have any other side effects. - /// - /// A return value of `None` must be considered immediately stale (out of - /// date) unless [`close`](Receiver::close) has been called first. - /// - /// Returns an error if the sender was dropped. - pub fn try_recv(&mut self) -> Result, Canceled> { - self.inner.try_recv() - } -} - -impl Future for Receiver { - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.inner.recv(cx) - } -} - -impl FusedFuture for Receiver { - fn is_terminated(&self) -> bool { - if self.inner.complete.load(SeqCst) { - if let Some(slot) = self.inner.data.try_lock() { - if slot.is_some() { - return false; - } - } - true - } else { - false - } - } -} - -impl Drop for Receiver { - fn drop(&mut self) { - self.inner.drop_rx() - } -} - -impl fmt::Debug for Receiver { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Receiver").field("complete", &self.inner.complete).finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/build.rs s390-tools-2.33.1/rust-vendor/futures-core/build.rs --- s390-tools-2.31.0/rust-vendor/futures-core/build.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -// The rustc-cfg listed below are considered public API, but it is *unstable* -// and outside of the normal semver guarantees: -// -// - `futures_no_atomic_cas` -// Assume the target does *not* support atomic CAS operations. -// This is usually detected automatically by the build script, but you may -// need to enable it manually when building for custom targets or using -// non-cargo build systems that don't run the build script. -// -// With the exceptions mentioned above, the rustc-cfg emitted by the build -// script are *not* public API. - -#![warn(rust_2018_idioms, single_use_lifetimes)] - -use std::env; - -include!("no_atomic_cas.rs"); - -fn main() { - let target = match env::var("TARGET") { - Ok(target) => target, - Err(e) => { - println!( - "cargo:warning={}: unable to get TARGET environment variable: {}", - env!("CARGO_PKG_NAME"), - e - ); - return; - } - }; - - // Note that this is `no_*`, not `has_*`. This allows treating - // `cfg(target_has_atomic = "ptr")` as true when the build script doesn't - // run. This is needed for compatibility with non-cargo build systems that - // don't run the build script. - if NO_ATOMIC_CAS.contains(&&*target) { - println!("cargo:rustc-cfg=futures_no_atomic_cas"); - } - - println!("cargo:rerun-if-changed=no_atomic_cas.rs"); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/futures-core/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/futures-core/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/Cargo.toml s390-tools-2.33.1/rust-vendor/futures-core/Cargo.toml --- s390-tools-2.31.0/rust-vendor/futures-core/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,45 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.36" -name = "futures-core" -version = "0.3.29" -description = """ -The core traits and types in for the `futures` library. -""" -homepage = "https://rust-lang.github.io/futures-rs" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/futures-rs" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs", -] - -[dependencies.portable-atomic] -version = "1.3" -features = ["require-cas"] -optional = true -default-features = false - -[dev-dependencies] - -[features] -alloc = [] -cfg-target-has-atomic = [] -default = ["std"] -std = ["alloc"] -unstable = [] diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/futures-core/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/futures-core/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/LICENSE-MIT s390-tools-2.33.1/rust-vendor/futures-core/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/futures-core/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/no_atomic_cas.rs s390-tools-2.33.1/rust-vendor/futures-core/no_atomic_cas.rs --- s390-tools-2.31.0/rust-vendor/futures-core/no_atomic_cas.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/no_atomic_cas.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,17 +0,0 @@ -// This file is @generated by no_atomic_cas.sh. -// It is not intended for manual editing. - -const NO_ATOMIC_CAS: &[&str] = &[ - "armv4t-none-eabi", - "armv5te-none-eabi", - "avr-unknown-gnu-atmega328", - "bpfeb-unknown-none", - "bpfel-unknown-none", - "msp430-none-elf", - "riscv32i-unknown-none-elf", - "riscv32im-unknown-none-elf", - "riscv32imc-unknown-none-elf", - "thumbv4t-none-eabi", - "thumbv5te-none-eabi", - "thumbv6m-none-eabi", -]; diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/README.md s390-tools-2.33.1/rust-vendor/futures-core/README.md --- s390-tools-2.31.0/rust-vendor/futures-core/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -# futures-core - -The core traits and types in for the `futures` library. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -futures-core = "0.3" -``` - -The current `futures-core` requires Rust 1.36 or later. - -## License - -Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or -[MIT license](LICENSE-MIT) at your option. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/src/future.rs s390-tools-2.33.1/rust-vendor/futures-core/src/future.rs --- s390-tools-2.31.0/rust-vendor/futures-core/src/future.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/src/future.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,103 +0,0 @@ -//! Futures. - -use core::ops::DerefMut; -use core::pin::Pin; -use core::task::{Context, Poll}; - -#[doc(no_inline)] -pub use core::future::Future; - -/// An owned dynamically typed [`Future`] for use in cases where you can't -/// statically type your result or need to add some indirection. -#[cfg(feature = "alloc")] -pub type BoxFuture<'a, T> = Pin + Send + 'a>>; - -/// `BoxFuture`, but without the `Send` requirement. -#[cfg(feature = "alloc")] -pub type LocalBoxFuture<'a, T> = Pin + 'a>>; - -/// A future which tracks whether or not the underlying future -/// should no longer be polled. -/// -/// `is_terminated` will return `true` if a future should no longer be polled. -/// Usually, this state occurs after `poll` (or `try_poll`) returned -/// `Poll::Ready`. However, `is_terminated` may also return `true` if a future -/// has become inactive and can no longer make progress and should be ignored -/// or dropped rather than being `poll`ed again. -pub trait FusedFuture: Future { - /// Returns `true` if the underlying future should no longer be polled. - fn is_terminated(&self) -> bool; -} - -impl FusedFuture for &mut F { - fn is_terminated(&self) -> bool { - ::is_terminated(&**self) - } -} - -impl

FusedFuture for Pin

-where - P: DerefMut + Unpin, - P::Target: FusedFuture, -{ - fn is_terminated(&self) -> bool { - ::is_terminated(&**self) - } -} - -mod private_try_future { - use super::Future; - - pub trait Sealed {} - - impl Sealed for F where F: ?Sized + Future> {} -} - -/// A convenience for futures that return `Result` values that includes -/// a variety of adapters tailored to such futures. -pub trait TryFuture: Future + private_try_future::Sealed { - /// The type of successful values yielded by this future - type Ok; - - /// The type of failures yielded by this future - type Error; - - /// Poll this `TryFuture` as if it were a `Future`. - /// - /// This method is a stopgap for a compiler limitation that prevents us from - /// directly inheriting from the `Future` trait; in the future it won't be - /// needed. - fn try_poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; -} - -impl TryFuture for F -where - F: ?Sized + Future>, -{ - type Ok = T; - type Error = E; - - #[inline] - fn try_poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.poll(cx) - } -} - -#[cfg(feature = "alloc")] -mod if_alloc { - use super::*; - use alloc::boxed::Box; - - impl FusedFuture for Box { - fn is_terminated(&self) -> bool { - ::is_terminated(&**self) - } - } - - #[cfg(feature = "std")] - impl FusedFuture for std::panic::AssertUnwindSafe { - fn is_terminated(&self) -> bool { - ::is_terminated(&**self) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/src/lib.rs s390-tools-2.33.1/rust-vendor/futures-core/src/lib.rs --- s390-tools-2.31.0/rust-vendor/futures-core/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -//! Core traits and types for asynchronous operations in Rust. - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_debug_implementations, missing_docs, rust_2018_idioms, unreachable_pub)] -// It cannot be included in the published code because this lints have false positives in the minimum required version. -#![cfg_attr(test, warn(single_use_lifetimes))] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] - -#[cfg(feature = "alloc")] -extern crate alloc; - -pub mod future; -#[doc(no_inline)] -pub use self::future::{FusedFuture, Future, TryFuture}; - -pub mod stream; -#[doc(no_inline)] -pub use self::stream::{FusedStream, Stream, TryStream}; - -#[macro_use] -pub mod task; diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/src/stream.rs s390-tools-2.33.1/rust-vendor/futures-core/src/stream.rs --- s390-tools-2.31.0/rust-vendor/futures-core/src/stream.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/src/stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,235 +0,0 @@ -//! Asynchronous streams. - -use core::ops::DerefMut; -use core::pin::Pin; -use core::task::{Context, Poll}; - -/// An owned dynamically typed [`Stream`] for use in cases where you can't -/// statically type your result or need to add some indirection. -#[cfg(feature = "alloc")] -pub type BoxStream<'a, T> = Pin + Send + 'a>>; - -/// `BoxStream`, but without the `Send` requirement. -#[cfg(feature = "alloc")] -pub type LocalBoxStream<'a, T> = Pin + 'a>>; - -/// A stream of values produced asynchronously. -/// -/// If `Future` is an asynchronous version of `T`, then `Stream` is an asynchronous version of `Iterator`. A stream -/// represents a sequence of value-producing events that occur asynchronously to -/// the caller. -/// -/// The trait is modeled after `Future`, but allows `poll_next` to be called -/// even after a value has been produced, yielding `None` once the stream has -/// been fully exhausted. -#[must_use = "streams do nothing unless polled"] -pub trait Stream { - /// Values yielded by the stream. - type Item; - - /// Attempt to pull out the next value of this stream, registering the - /// current task for wakeup if the value is not yet available, and returning - /// `None` if the stream is exhausted. - /// - /// # Return value - /// - /// There are several possible return values, each indicating a distinct - /// stream state: - /// - /// - `Poll::Pending` means that this stream's next value is not ready - /// yet. Implementations will ensure that the current task will be notified - /// when the next value may be ready. - /// - /// - `Poll::Ready(Some(val))` means that the stream has successfully - /// produced a value, `val`, and may produce further values on subsequent - /// `poll_next` calls. - /// - /// - `Poll::Ready(None)` means that the stream has terminated, and - /// `poll_next` should not be invoked again. - /// - /// # Panics - /// - /// Once a stream has finished (returned `Ready(None)` from `poll_next`), calling its - /// `poll_next` method again may panic, block forever, or cause other kinds of - /// problems; the `Stream` trait places no requirements on the effects of - /// such a call. However, as the `poll_next` method is not marked `unsafe`, - /// Rust's usual rules apply: calls must never cause undefined behavior - /// (memory corruption, incorrect use of `unsafe` functions, or the like), - /// regardless of the stream's state. - /// - /// If this is difficult to guard against then the [`fuse`] adapter can be used - /// to ensure that `poll_next` always returns `Ready(None)` in subsequent - /// calls. - /// - /// [`fuse`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.fuse - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - - /// Returns the bounds on the remaining length of the stream. - /// - /// Specifically, `size_hint()` returns a tuple where the first element - /// is the lower bound, and the second element is the upper bound. - /// - /// The second half of the tuple that is returned is an [`Option`]`<`[`usize`]`>`. - /// A [`None`] here means that either there is no known upper bound, or the - /// upper bound is larger than [`usize`]. - /// - /// # Implementation notes - /// - /// It is not enforced that a stream implementation yields the declared - /// number of elements. A buggy stream may yield less than the lower bound - /// or more than the upper bound of elements. - /// - /// `size_hint()` is primarily intended to be used for optimizations such as - /// reserving space for the elements of the stream, but must not be - /// trusted to e.g., omit bounds checks in unsafe code. An incorrect - /// implementation of `size_hint()` should not lead to memory safety - /// violations. - /// - /// That said, the implementation should provide a correct estimation, - /// because otherwise it would be a violation of the trait's protocol. - /// - /// The default implementation returns `(0, `[`None`]`)` which is correct for any - /// stream. - #[inline] - fn size_hint(&self) -> (usize, Option) { - (0, None) - } -} - -impl Stream for &mut S { - type Item = S::Item; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - S::poll_next(Pin::new(&mut **self), cx) - } - - fn size_hint(&self) -> (usize, Option) { - (**self).size_hint() - } -} - -impl

Stream for Pin

-where - P: DerefMut + Unpin, - P::Target: Stream, -{ - type Item = ::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_next(cx) - } - - fn size_hint(&self) -> (usize, Option) { - (**self).size_hint() - } -} - -/// A stream which tracks whether or not the underlying stream -/// should no longer be polled. -/// -/// `is_terminated` will return `true` if a future should no longer be polled. -/// Usually, this state occurs after `poll_next` (or `try_poll_next`) returned -/// `Poll::Ready(None)`. However, `is_terminated` may also return `true` if a -/// stream has become inactive and can no longer make progress and should be -/// ignored or dropped rather than being polled again. -pub trait FusedStream: Stream { - /// Returns `true` if the stream should no longer be polled. - fn is_terminated(&self) -> bool; -} - -impl FusedStream for &mut F { - fn is_terminated(&self) -> bool { - ::is_terminated(&**self) - } -} - -impl

FusedStream for Pin

-where - P: DerefMut + Unpin, - P::Target: FusedStream, -{ - fn is_terminated(&self) -> bool { - ::is_terminated(&**self) - } -} - -mod private_try_stream { - use super::Stream; - - pub trait Sealed {} - - impl Sealed for S where S: ?Sized + Stream> {} -} - -/// A convenience for streams that return `Result` values that includes -/// a variety of adapters tailored to such futures. -pub trait TryStream: Stream + private_try_stream::Sealed { - /// The type of successful values yielded by this future - type Ok; - - /// The type of failures yielded by this future - type Error; - - /// Poll this `TryStream` as if it were a `Stream`. - /// - /// This method is a stopgap for a compiler limitation that prevents us from - /// directly inheriting from the `Stream` trait; in the future it won't be - /// needed. - fn try_poll_next( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>>; -} - -impl TryStream for S -where - S: ?Sized + Stream>, -{ - type Ok = T; - type Error = E; - - fn try_poll_next( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - self.poll_next(cx) - } -} - -#[cfg(feature = "alloc")] -mod if_alloc { - use super::*; - use alloc::boxed::Box; - - impl Stream for Box { - type Item = S::Item; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_next(cx) - } - - fn size_hint(&self) -> (usize, Option) { - (**self).size_hint() - } - } - - #[cfg(feature = "std")] - impl Stream for std::panic::AssertUnwindSafe { - type Item = S::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - unsafe { self.map_unchecked_mut(|x| &mut x.0) }.poll_next(cx) - } - - fn size_hint(&self) -> (usize, Option) { - self.0.size_hint() - } - } - - impl FusedStream for Box { - fn is_terminated(&self) -> bool { - ::is_terminated(&**self) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/src/task/__internal/atomic_waker.rs s390-tools-2.33.1/rust-vendor/futures-core/src/task/__internal/atomic_waker.rs --- s390-tools-2.31.0/rust-vendor/futures-core/src/task/__internal/atomic_waker.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/src/task/__internal/atomic_waker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,421 +0,0 @@ -use core::cell::UnsafeCell; -use core::fmt; -use core::task::Waker; - -use atomic::AtomicUsize; -use atomic::Ordering::{AcqRel, Acquire, Release}; - -#[cfg(feature = "portable-atomic")] -use portable_atomic as atomic; - -#[cfg(not(feature = "portable-atomic"))] -use core::sync::atomic; - -/// A synchronization primitive for task wakeup. -/// -/// Sometimes the task interested in a given event will change over time. -/// An `AtomicWaker` can coordinate concurrent notifications with the consumer -/// potentially "updating" the underlying task to wake up. This is useful in -/// scenarios where a computation completes in another thread and wants to -/// notify the consumer, but the consumer is in the process of being migrated to -/// a new logical task. -/// -/// Consumers should call `register` before checking the result of a computation -/// and producers should call `wake` after producing the computation (this -/// differs from the usual `thread::park` pattern). It is also permitted for -/// `wake` to be called **before** `register`. This results in a no-op. -/// -/// A single `AtomicWaker` may be reused for any number of calls to `register` or -/// `wake`. -/// -/// # Memory ordering -/// -/// Calling `register` "acquires" all memory "released" by calls to `wake` -/// before the call to `register`. Later calls to `wake` will wake the -/// registered waker (on contention this wake might be triggered in `register`). -/// -/// For concurrent calls to `register` (should be avoided) the ordering is only -/// guaranteed for the winning call. -/// -/// # Examples -/// -/// Here is a simple example providing a `Flag` that can be signalled manually -/// when it is ready. -/// -/// ``` -/// use futures::future::Future; -/// use futures::task::{Context, Poll, AtomicWaker}; -/// use std::sync::Arc; -/// use std::sync::atomic::AtomicBool; -/// use std::sync::atomic::Ordering::Relaxed; -/// use std::pin::Pin; -/// -/// struct Inner { -/// waker: AtomicWaker, -/// set: AtomicBool, -/// } -/// -/// #[derive(Clone)] -/// struct Flag(Arc); -/// -/// impl Flag { -/// pub fn new() -> Self { -/// Self(Arc::new(Inner { -/// waker: AtomicWaker::new(), -/// set: AtomicBool::new(false), -/// })) -/// } -/// -/// pub fn signal(&self) { -/// self.0.set.store(true, Relaxed); -/// self.0.waker.wake(); -/// } -/// } -/// -/// impl Future for Flag { -/// type Output = (); -/// -/// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { -/// // quick check to avoid registration if already done. -/// if self.0.set.load(Relaxed) { -/// return Poll::Ready(()); -/// } -/// -/// self.0.waker.register(cx.waker()); -/// -/// // Need to check condition **after** `register` to avoid a race -/// // condition that would result in lost notifications. -/// if self.0.set.load(Relaxed) { -/// Poll::Ready(()) -/// } else { -/// Poll::Pending -/// } -/// } -/// } -/// ``` -pub struct AtomicWaker { - state: AtomicUsize, - waker: UnsafeCell>, -} - -// `AtomicWaker` is a multi-consumer, single-producer transfer cell. The cell -// stores a `Waker` value produced by calls to `register` and many threads can -// race to take the waker (to wake it) by calling `wake`. -// -// If a new `Waker` instance is produced by calling `register` before an -// existing one is consumed, then the existing one is overwritten. -// -// While `AtomicWaker` is single-producer, the implementation ensures memory -// safety. In the event of concurrent calls to `register`, there will be a -// single winner whose waker will get stored in the cell. The losers will not -// have their tasks woken. As such, callers should ensure to add synchronization -// to calls to `register`. -// -// The implementation uses a single `AtomicUsize` value to coordinate access to -// the `Waker` cell. There are two bits that are operated on independently. -// These are represented by `REGISTERING` and `WAKING`. -// -// The `REGISTERING` bit is set when a producer enters the critical section. The -// `WAKING` bit is set when a consumer enters the critical section. Neither bit -// being set is represented by `WAITING`. -// -// A thread obtains an exclusive lock on the waker cell by transitioning the -// state from `WAITING` to `REGISTERING` or `WAKING`, depending on the operation -// the thread wishes to perform. When this transition is made, it is guaranteed -// that no other thread will access the waker cell. -// -// # Registering -// -// On a call to `register`, an attempt to transition the state from WAITING to -// REGISTERING is made. On success, the caller obtains a lock on the waker cell. -// -// If the lock is obtained, then the thread sets the waker cell to the waker -// provided as an argument. Then it attempts to transition the state back from -// `REGISTERING` -> `WAITING`. -// -// If this transition is successful, then the registering process is complete -// and the next call to `wake` will observe the waker. -// -// If the transition fails, then there was a concurrent call to `wake` that was -// unable to access the waker cell (due to the registering thread holding the -// lock). To handle this, the registering thread removes the waker it just set -// from the cell and calls `wake` on it. This call to wake represents the -// attempt to wake by the other thread (that set the `WAKING` bit). The state is -// then transitioned from `REGISTERING | WAKING` back to `WAITING`. This -// transition must succeed because, at this point, the state cannot be -// transitioned by another thread. -// -// # Waking -// -// On a call to `wake`, an attempt to transition the state from `WAITING` to -// `WAKING` is made. On success, the caller obtains a lock on the waker cell. -// -// If the lock is obtained, then the thread takes ownership of the current value -// in the waker cell, and calls `wake` on it. The state is then transitioned -// back to `WAITING`. This transition must succeed as, at this point, the state -// cannot be transitioned by another thread. -// -// If the thread is unable to obtain the lock, the `WAKING` bit is still. This -// is because it has either been set by the current thread but the previous -// value included the `REGISTERING` bit **or** a concurrent thread is in the -// `WAKING` critical section. Either way, no action must be taken. -// -// If the current thread is the only concurrent call to `wake` and another -// thread is in the `register` critical section, when the other thread **exits** -// the `register` critical section, it will observe the `WAKING` bit and handle -// the wake itself. -// -// If another thread is in the `wake` critical section, then it will handle -// waking the task. -// -// # A potential race (is safely handled). -// -// Imagine the following situation: -// -// * Thread A obtains the `wake` lock and wakes a task. -// -// * Before thread A releases the `wake` lock, the woken task is scheduled. -// -// * Thread B attempts to wake the task. In theory this should result in the -// task being woken, but it cannot because thread A still holds the wake lock. -// -// This case is handled by requiring users of `AtomicWaker` to call `register` -// **before** attempting to observe the application state change that resulted -// in the task being awoken. The wakers also change the application state before -// calling wake. -// -// Because of this, the waker will do one of two things. -// -// 1) Observe the application state change that Thread B is woken for. In this -// case, it is OK for Thread B's wake to be lost. -// -// 2) Call register before attempting to observe the application state. Since -// Thread A still holds the `wake` lock, the call to `register` will result -// in the task waking itself and get scheduled again. - -/// Idle state -const WAITING: usize = 0; - -/// A new waker value is being registered with the `AtomicWaker` cell. -const REGISTERING: usize = 0b01; - -/// The waker currently registered with the `AtomicWaker` cell is being woken. -const WAKING: usize = 0b10; - -impl AtomicWaker { - /// Create an `AtomicWaker`. - pub const fn new() -> Self { - // Make sure that task is Sync - trait AssertSync: Sync {} - impl AssertSync for Waker {} - - Self { state: AtomicUsize::new(WAITING), waker: UnsafeCell::new(None) } - } - - /// Registers the waker to be notified on calls to `wake`. - /// - /// The new task will take place of any previous tasks that were registered - /// by previous calls to `register`. Any calls to `wake` that happen after - /// a call to `register` (as defined by the memory ordering rules), will - /// notify the `register` caller's task and deregister the waker from future - /// notifications. Because of this, callers should ensure `register` gets - /// invoked with a new `Waker` **each** time they require a wakeup. - /// - /// It is safe to call `register` with multiple other threads concurrently - /// calling `wake`. This will result in the `register` caller's current - /// task being notified once. - /// - /// This function is safe to call concurrently, but this is generally a bad - /// idea. Concurrent calls to `register` will attempt to register different - /// tasks to be notified. One of the callers will win and have its task set, - /// but there is no guarantee as to which caller will succeed. - /// - /// # Examples - /// - /// Here is how `register` is used when implementing a flag. - /// - /// ``` - /// use futures::future::Future; - /// use futures::task::{Context, Poll, AtomicWaker}; - /// use std::sync::atomic::AtomicBool; - /// use std::sync::atomic::Ordering::Relaxed; - /// use std::pin::Pin; - /// - /// struct Flag { - /// waker: AtomicWaker, - /// set: AtomicBool, - /// } - /// - /// impl Future for Flag { - /// type Output = (); - /// - /// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - /// // Register **before** checking `set` to avoid a race condition - /// // that would result in lost notifications. - /// self.waker.register(cx.waker()); - /// - /// if self.set.load(Relaxed) { - /// Poll::Ready(()) - /// } else { - /// Poll::Pending - /// } - /// } - /// } - /// ``` - pub fn register(&self, waker: &Waker) { - match self - .state - .compare_exchange(WAITING, REGISTERING, Acquire, Acquire) - .unwrap_or_else(|x| x) - { - WAITING => { - unsafe { - // Locked acquired, update the waker cell - - // Avoid cloning the waker if the old waker will awaken the same task. - match &*self.waker.get() { - Some(old_waker) if old_waker.will_wake(waker) => (), - _ => *self.waker.get() = Some(waker.clone()), - } - - // Release the lock. If the state transitioned to include - // the `WAKING` bit, this means that at least one wake has - // been called concurrently. - // - // Start by assuming that the state is `REGISTERING` as this - // is what we just set it to. If this holds, we know that no - // other writes were performed in the meantime, so there is - // nothing to acquire, only release. In case of concurrent - // wakers, we need to acquire their releases, so success needs - // to do both. - let res = self.state.compare_exchange(REGISTERING, WAITING, AcqRel, Acquire); - - match res { - Ok(_) => { - // memory ordering: acquired self.state during CAS - // - if previous wakes went through it syncs with - // their final release (`fetch_and`) - // - if there was no previous wake the next wake - // will wake us, no sync needed. - } - Err(actual) => { - // This branch can only be reached if at least one - // concurrent thread called `wake`. In this - // case, `actual` **must** be `REGISTERING | - // `WAKING`. - debug_assert_eq!(actual, REGISTERING | WAKING); - - // Take the waker to wake once the atomic operation has - // completed. - let waker = (*self.waker.get()).take().unwrap(); - - // We need to return to WAITING state (clear our lock and - // concurrent WAKING flag). This needs to acquire all - // WAKING fetch_or releases and it needs to release our - // update to self.waker, so we need a `swap` operation. - self.state.swap(WAITING, AcqRel); - - // memory ordering: we acquired the state for all - // concurrent wakes, but future wakes might still - // need to wake us in case we can't make progress - // from the pending wakes. - // - // So we simply schedule to come back later (we could - // also simply leave the registration in place above). - waker.wake(); - } - } - } - } - WAKING => { - // Currently in the process of waking the task, i.e., - // `wake` is currently being called on the old task handle. - // - // memory ordering: we acquired the state for all - // concurrent wakes, but future wakes might still - // need to wake us in case we can't make progress - // from the pending wakes. - // - // So we simply schedule to come back later (we - // could also spin here trying to acquire the lock - // to register). - waker.wake_by_ref(); - } - state => { - // In this case, a concurrent thread is holding the - // "registering" lock. This probably indicates a bug in the - // caller's code as racing to call `register` doesn't make much - // sense. - // - // memory ordering: don't care. a concurrent register() is going - // to succeed and provide proper memory ordering. - // - // We just want to maintain memory safety. It is ok to drop the - // call to `register`. - debug_assert!(state == REGISTERING || state == REGISTERING | WAKING); - } - } - } - - /// Calls `wake` on the last `Waker` passed to `register`. - /// - /// If `register` has not been called yet, then this does nothing. - pub fn wake(&self) { - if let Some(waker) = self.take() { - waker.wake(); - } - } - - /// Returns the last `Waker` passed to `register`, so that the user can wake it. - /// - /// - /// Sometimes, just waking the AtomicWaker is not fine grained enough. This allows the user - /// to take the waker and then wake it separately, rather than performing both steps in one - /// atomic action. - /// - /// If a waker has not been registered, this returns `None`. - pub fn take(&self) -> Option { - // AcqRel ordering is used in order to acquire the value of the `task` - // cell as well as to establish a `release` ordering with whatever - // memory the `AtomicWaker` is associated with. - match self.state.fetch_or(WAKING, AcqRel) { - WAITING => { - // The waking lock has been acquired. - let waker = unsafe { (*self.waker.get()).take() }; - - // Release the lock - self.state.fetch_and(!WAKING, Release); - - waker - } - state => { - // There is a concurrent thread currently updating the - // associated task. - // - // Nothing more to do as the `WAKING` bit has been set. It - // doesn't matter if there are concurrent registering threads or - // not. - // - debug_assert!( - state == REGISTERING || state == REGISTERING | WAKING || state == WAKING - ); - None - } - } - } -} - -impl Default for AtomicWaker { - fn default() -> Self { - Self::new() - } -} - -impl fmt::Debug for AtomicWaker { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "AtomicWaker") - } -} - -unsafe impl Send for AtomicWaker {} -unsafe impl Sync for AtomicWaker {} diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/src/task/__internal/mod.rs s390-tools-2.33.1/rust-vendor/futures-core/src/task/__internal/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-core/src/task/__internal/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/src/task/__internal/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,4 +0,0 @@ -#[cfg(any(not(futures_no_atomic_cas), feature = "portable-atomic"))] -mod atomic_waker; -#[cfg(any(not(futures_no_atomic_cas), feature = "portable-atomic"))] -pub use self::atomic_waker::AtomicWaker; diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/src/task/mod.rs s390-tools-2.33.1/rust-vendor/futures-core/src/task/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-core/src/task/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/src/task/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,10 +0,0 @@ -//! Task notification. - -#[macro_use] -mod poll; - -#[doc(hidden)] -pub mod __internal; - -#[doc(no_inline)] -pub use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; diff -Nru s390-tools-2.31.0/rust-vendor/futures-core/src/task/poll.rs s390-tools-2.33.1/rust-vendor/futures-core/src/task/poll.rs --- s390-tools-2.31.0/rust-vendor/futures-core/src/task/poll.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-core/src/task/poll.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -/// Extracts the successful type of a `Poll`. -/// -/// This macro bakes in propagation of `Pending` signals by returning early. -#[macro_export] -macro_rules! ready { - ($e:expr $(,)?) => { - match $e { - $crate::task::Poll::Ready(t) => t, - $crate::task::Poll::Pending => return $crate::task::Poll::Pending, - } - }; -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-executor/benches/thread_notify.rs s390-tools-2.33.1/rust-vendor/futures-executor/benches/thread_notify.rs --- s390-tools-2.31.0/rust-vendor/futures-executor/benches/thread_notify.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-executor/benches/thread_notify.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,109 +0,0 @@ -#![feature(test)] - -extern crate test; -use crate::test::Bencher; - -use futures::executor::block_on; -use futures::future::Future; -use futures::task::{Context, Poll, Waker}; -use std::pin::Pin; - -#[bench] -fn thread_yield_single_thread_one_wait(b: &mut Bencher) { - const NUM: usize = 10_000; - - struct Yield { - rem: usize, - } - - impl Future for Yield { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - if self.rem == 0 { - Poll::Ready(()) - } else { - self.rem -= 1; - cx.waker().wake_by_ref(); - Poll::Pending - } - } - } - - b.iter(|| { - let y = Yield { rem: NUM }; - block_on(y); - }); -} - -#[bench] -fn thread_yield_single_thread_many_wait(b: &mut Bencher) { - const NUM: usize = 10_000; - - struct Yield { - rem: usize, - } - - impl Future for Yield { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - if self.rem == 0 { - Poll::Ready(()) - } else { - self.rem -= 1; - cx.waker().wake_by_ref(); - Poll::Pending - } - } - } - - b.iter(|| { - for _ in 0..NUM { - let y = Yield { rem: 1 }; - block_on(y); - } - }); -} - -#[bench] -fn thread_yield_multi_thread(b: &mut Bencher) { - use std::sync::mpsc; - use std::thread; - - const NUM: usize = 1_000; - - let (tx, rx) = mpsc::sync_channel::(10_000); - - struct Yield { - rem: usize, - tx: mpsc::SyncSender, - } - impl Unpin for Yield {} - - impl Future for Yield { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - if self.rem == 0 { - Poll::Ready(()) - } else { - self.rem -= 1; - self.tx.send(cx.waker().clone()).unwrap(); - Poll::Pending - } - } - } - - thread::spawn(move || { - while let Ok(task) = rx.recv() { - task.wake(); - } - }); - - b.iter(move || { - let y = Yield { rem: NUM, tx: tx.clone() }; - - block_on(y); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-executor/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/futures-executor/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/futures-executor/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-executor/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/futures-executor/Cargo.toml s390-tools-2.33.1/rust-vendor/futures-executor/Cargo.toml --- s390-tools-2.31.0/rust-vendor/futures-executor/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-executor/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.56" -name = "futures-executor" -version = "0.3.29" -description = """ -Executors for asynchronous tasks based on the futures-rs library. -""" -homepage = "https://rust-lang.github.io/futures-rs" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/futures-rs" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs", -] - -[dependencies.futures-core] -version = "0.3.29" -default-features = false - -[dependencies.futures-task] -version = "0.3.29" -default-features = false - -[dependencies.futures-util] -version = "0.3.29" -default-features = false - -[dependencies.num_cpus] -version = "1.8.0" -optional = true - -[dev-dependencies] - -[features] -default = ["std"] -std = [ - "futures-core/std", - "futures-task/std", - "futures-util/std", -] -thread-pool = [ - "std", - "num_cpus", -] diff -Nru s390-tools-2.31.0/rust-vendor/futures-executor/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/futures-executor/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/futures-executor/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-executor/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/futures-executor/LICENSE-MIT s390-tools-2.33.1/rust-vendor/futures-executor/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/futures-executor/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-executor/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/futures-executor/README.md s390-tools-2.33.1/rust-vendor/futures-executor/README.md --- s390-tools-2.31.0/rust-vendor/futures-executor/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-executor/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -# futures-executor - -Executors for asynchronous tasks based on the futures-rs library. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -futures-executor = "0.3" -``` - -The current `futures-executor` requires Rust 1.56 or later. - -## License - -Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or -[MIT license](LICENSE-MIT) at your option. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/futures-executor/src/enter.rs s390-tools-2.33.1/rust-vendor/futures-executor/src/enter.rs --- s390-tools-2.31.0/rust-vendor/futures-executor/src/enter.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-executor/src/enter.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,80 +0,0 @@ -use std::cell::Cell; -use std::fmt; - -thread_local!(static ENTERED: Cell = Cell::new(false)); - -/// Represents an executor context. -/// -/// For more details, see [`enter` documentation](enter()). -pub struct Enter { - _priv: (), -} - -/// An error returned by `enter` if an execution scope has already been -/// entered. -pub struct EnterError { - _priv: (), -} - -impl fmt::Debug for EnterError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("EnterError").finish() - } -} - -impl fmt::Display for EnterError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "an execution scope has already been entered") - } -} - -impl std::error::Error for EnterError {} - -/// Marks the current thread as being within the dynamic extent of an -/// executor. -/// -/// Executor implementations should call this function before beginning to -/// execute a task, and drop the returned [`Enter`](Enter) value after -/// completing task execution: -/// -/// ``` -/// use futures::executor::enter; -/// -/// let enter = enter().expect("..."); -/// /* run task */ -/// drop(enter); -/// ``` -/// -/// Doing so ensures that executors aren't -/// accidentally invoked in a nested fashion. -/// -/// # Error -/// -/// Returns an error if the current thread is already marked, in which case the -/// caller should panic with a tailored error message. -pub fn enter() -> Result { - ENTERED.with(|c| { - if c.get() { - Err(EnterError { _priv: () }) - } else { - c.set(true); - - Ok(Enter { _priv: () }) - } - }) -} - -impl fmt::Debug for Enter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Enter").finish() - } -} - -impl Drop for Enter { - fn drop(&mut self) { - ENTERED.with(|c| { - assert!(c.get()); - c.set(false); - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-executor/src/lib.rs s390-tools-2.33.1/rust-vendor/futures-executor/src/lib.rs --- s390-tools-2.31.0/rust-vendor/futures-executor/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-executor/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -//! Built-in executors and related tools. -//! -//! All asynchronous computation occurs within an executor, which is -//! capable of spawning futures as tasks. This module provides several -//! built-in executors, as well as tools for building your own. -//! -//! All items are only available when the `std` feature of this -//! library is activated, and it is activated by default. -//! -//! # Using a thread pool (M:N task scheduling) -//! -//! Most of the time tasks should be executed on a [thread pool](ThreadPool). -//! A small set of worker threads can handle a very large set of spawned tasks -//! (which are much lighter weight than threads). Tasks spawned onto the pool -//! with the [`spawn_ok`](ThreadPool::spawn_ok) function will run ambiently on -//! the created threads. -//! -//! # Spawning additional tasks -//! -//! Tasks can be spawned onto a spawner by calling its [`spawn_obj`] method -//! directly. In the case of `!Send` futures, [`spawn_local_obj`] can be used -//! instead. -//! -//! # Single-threaded execution -//! -//! In addition to thread pools, it's possible to run a task (and the tasks -//! it spawns) entirely within a single thread via the [`LocalPool`] executor. -//! Aside from cutting down on synchronization costs, this executor also makes -//! it possible to spawn non-`Send` tasks, via [`spawn_local_obj`]. The -//! [`LocalPool`] is best suited for running I/O-bound tasks that do relatively -//! little work between I/O operations. -//! -//! There is also a convenience function [`block_on`] for simply running a -//! future to completion on the current thread. -//! -//! [`spawn_obj`]: https://docs.rs/futures/0.3/futures/task/trait.Spawn.html#tymethod.spawn_obj -//! [`spawn_local_obj`]: https://docs.rs/futures/0.3/futures/task/trait.LocalSpawn.html#tymethod.spawn_local_obj - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - single_use_lifetimes, - unreachable_pub -)] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] -#![cfg_attr(docsrs, feature(doc_cfg))] - -#[cfg(feature = "std")] -mod local_pool; -#[cfg(feature = "std")] -pub use crate::local_pool::{block_on, block_on_stream, BlockingStream, LocalPool, LocalSpawner}; - -#[cfg(feature = "thread-pool")] -#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))] -#[cfg(feature = "std")] -mod thread_pool; -#[cfg(feature = "thread-pool")] -#[cfg(feature = "std")] -mod unpark_mutex; -#[cfg(feature = "thread-pool")] -#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))] -#[cfg(feature = "std")] -pub use crate::thread_pool::{ThreadPool, ThreadPoolBuilder}; - -#[cfg(feature = "std")] -mod enter; -#[cfg(feature = "std")] -pub use crate::enter::{enter, Enter, EnterError}; diff -Nru s390-tools-2.31.0/rust-vendor/futures-executor/src/local_pool.rs s390-tools-2.33.1/rust-vendor/futures-executor/src/local_pool.rs --- s390-tools-2.31.0/rust-vendor/futures-executor/src/local_pool.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-executor/src/local_pool.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,402 +0,0 @@ -use crate::enter; -use futures_core::future::Future; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; -use futures_task::{waker_ref, ArcWake}; -use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError}; -use futures_util::pin_mut; -use futures_util::stream::FuturesUnordered; -use futures_util::stream::StreamExt; -use std::cell::RefCell; -use std::ops::{Deref, DerefMut}; -use std::rc::{Rc, Weak}; -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, -}; -use std::thread::{self, Thread}; - -/// A single-threaded task pool for polling futures to completion. -/// -/// This executor allows you to multiplex any number of tasks onto a single -/// thread. It's appropriate to poll strictly I/O-bound futures that do very -/// little work in between I/O actions. -/// -/// To get a handle to the pool that implements -/// [`Spawn`](futures_task::Spawn), use the -/// [`spawner()`](LocalPool::spawner) method. Because the executor is -/// single-threaded, it supports a special form of task spawning for non-`Send` -/// futures, via [`spawn_local_obj`](futures_task::LocalSpawn::spawn_local_obj). -#[derive(Debug)] -pub struct LocalPool { - pool: FuturesUnordered>, - incoming: Rc, -} - -/// A handle to a [`LocalPool`](LocalPool) that implements -/// [`Spawn`](futures_task::Spawn). -#[derive(Clone, Debug)] -pub struct LocalSpawner { - incoming: Weak, -} - -type Incoming = RefCell>>; - -pub(crate) struct ThreadNotify { - /// The (single) executor thread. - thread: Thread, - /// A flag to ensure a wakeup (i.e. `unpark()`) is not "forgotten" - /// before the next `park()`, which may otherwise happen if the code - /// being executed as part of the future(s) being polled makes use of - /// park / unpark calls of its own, i.e. we cannot assume that no other - /// code uses park / unpark on the executing `thread`. - unparked: AtomicBool, -} - -thread_local! { - static CURRENT_THREAD_NOTIFY: Arc = Arc::new(ThreadNotify { - thread: thread::current(), - unparked: AtomicBool::new(false), - }); -} - -impl ArcWake for ThreadNotify { - fn wake_by_ref(arc_self: &Arc) { - // Make sure the wakeup is remembered until the next `park()`. - let unparked = arc_self.unparked.swap(true, Ordering::Release); - if !unparked { - // If the thread has not been unparked yet, it must be done - // now. If it was actually parked, it will run again, - // otherwise the token made available by `unpark` - // may be consumed before reaching `park()`, but `unparked` - // ensures it is not forgotten. - arc_self.thread.unpark(); - } - } -} - -// Set up and run a basic single-threaded spawner loop, invoking `f` on each -// turn. -fn run_executor) -> Poll>(mut f: F) -> T { - let _enter = enter().expect( - "cannot execute `LocalPool` executor from within \ - another executor", - ); - - CURRENT_THREAD_NOTIFY.with(|thread_notify| { - let waker = waker_ref(thread_notify); - let mut cx = Context::from_waker(&waker); - loop { - if let Poll::Ready(t) = f(&mut cx) { - return t; - } - - // Wait for a wakeup. - while !thread_notify.unparked.swap(false, Ordering::Acquire) { - // No wakeup occurred. It may occur now, right before parking, - // but in that case the token made available by `unpark()` - // is guaranteed to still be available and `park()` is a no-op. - thread::park(); - } - } - }) -} - -/// Check for a wakeup, but don't consume it. -fn woken() -> bool { - CURRENT_THREAD_NOTIFY.with(|thread_notify| thread_notify.unparked.load(Ordering::Acquire)) -} - -impl LocalPool { - /// Create a new, empty pool of tasks. - pub fn new() -> Self { - Self { pool: FuturesUnordered::new(), incoming: Default::default() } - } - - /// Get a clonable handle to the pool as a [`Spawn`]. - pub fn spawner(&self) -> LocalSpawner { - LocalSpawner { incoming: Rc::downgrade(&self.incoming) } - } - - /// Run all tasks in the pool to completion. - /// - /// ``` - /// use futures::executor::LocalPool; - /// - /// let mut pool = LocalPool::new(); - /// - /// // ... spawn some initial tasks using `spawn.spawn()` or `spawn.spawn_local()` - /// - /// // run *all* tasks in the pool to completion, including any newly-spawned ones. - /// pool.run(); - /// ``` - /// - /// The function will block the calling thread until *all* tasks in the pool - /// are complete, including any spawned while running existing tasks. - pub fn run(&mut self) { - run_executor(|cx| self.poll_pool(cx)) - } - - /// Runs all the tasks in the pool until the given future completes. - /// - /// ``` - /// use futures::executor::LocalPool; - /// - /// let mut pool = LocalPool::new(); - /// # let my_app = async {}; - /// - /// // run tasks in the pool until `my_app` completes - /// pool.run_until(my_app); - /// ``` - /// - /// The function will block the calling thread *only* until the future `f` - /// completes; there may still be incomplete tasks in the pool, which will - /// be inert after the call completes, but can continue with further use of - /// one of the pool's run or poll methods. While the function is running, - /// however, all tasks in the pool will try to make progress. - pub fn run_until(&mut self, future: F) -> F::Output { - pin_mut!(future); - - run_executor(|cx| { - { - // if our main task is done, so are we - let result = future.as_mut().poll(cx); - if let Poll::Ready(output) = result { - return Poll::Ready(output); - } - } - - let _ = self.poll_pool(cx); - Poll::Pending - }) - } - - /// Runs all tasks and returns after completing one future or until no more progress - /// can be made. Returns `true` if one future was completed, `false` otherwise. - /// - /// ``` - /// use futures::executor::LocalPool; - /// use futures::task::LocalSpawnExt; - /// use futures::future::{ready, pending}; - /// - /// let mut pool = LocalPool::new(); - /// let spawner = pool.spawner(); - /// - /// spawner.spawn_local(ready(())).unwrap(); - /// spawner.spawn_local(ready(())).unwrap(); - /// spawner.spawn_local(pending()).unwrap(); - /// - /// // Run the two ready tasks and return true for them. - /// pool.try_run_one(); // returns true after completing one of the ready futures - /// pool.try_run_one(); // returns true after completing the other ready future - /// - /// // the remaining task can not be completed - /// assert!(!pool.try_run_one()); // returns false - /// ``` - /// - /// This function will not block the calling thread and will return the moment - /// that there are no tasks left for which progress can be made or after exactly one - /// task was completed; Remaining incomplete tasks in the pool can continue with - /// further use of one of the pool's run or poll methods. - /// Though only one task will be completed, progress may be made on multiple tasks. - pub fn try_run_one(&mut self) -> bool { - run_executor(|cx| { - loop { - self.drain_incoming(); - - match self.pool.poll_next_unpin(cx) { - // Success! - Poll::Ready(Some(())) => return Poll::Ready(true), - // The pool was empty. - Poll::Ready(None) => return Poll::Ready(false), - Poll::Pending => (), - } - - if !self.incoming.borrow().is_empty() { - // New tasks were spawned; try again. - continue; - } else if woken() { - // The pool yielded to us, but there's more progress to be made. - return Poll::Pending; - } else { - return Poll::Ready(false); - } - } - }) - } - - /// Runs all tasks in the pool and returns if no more progress can be made - /// on any task. - /// - /// ``` - /// use futures::executor::LocalPool; - /// use futures::task::LocalSpawnExt; - /// use futures::future::{ready, pending}; - /// - /// let mut pool = LocalPool::new(); - /// let spawner = pool.spawner(); - /// - /// spawner.spawn_local(ready(())).unwrap(); - /// spawner.spawn_local(ready(())).unwrap(); - /// spawner.spawn_local(pending()).unwrap(); - /// - /// // Runs the two ready task and returns. - /// // The empty task remains in the pool. - /// pool.run_until_stalled(); - /// ``` - /// - /// This function will not block the calling thread and will return the moment - /// that there are no tasks left for which progress can be made; - /// remaining incomplete tasks in the pool can continue with further use of one - /// of the pool's run or poll methods. While the function is running, all tasks - /// in the pool will try to make progress. - pub fn run_until_stalled(&mut self) { - run_executor(|cx| match self.poll_pool(cx) { - // The pool is empty. - Poll::Ready(()) => Poll::Ready(()), - Poll::Pending => { - if woken() { - Poll::Pending - } else { - // We're stalled for now. - Poll::Ready(()) - } - } - }); - } - - /// Poll `self.pool`, re-filling it with any newly-spawned tasks. - /// Repeat until either the pool is empty, or it returns `Pending`. - /// - /// Returns `Ready` if the pool was empty, and `Pending` otherwise. - /// - /// NOTE: the pool may call `wake`, so `Pending` doesn't necessarily - /// mean that the pool can't make progress. - fn poll_pool(&mut self, cx: &mut Context<'_>) -> Poll<()> { - loop { - self.drain_incoming(); - - let pool_ret = self.pool.poll_next_unpin(cx); - - // We queued up some new tasks; add them and poll again. - if !self.incoming.borrow().is_empty() { - continue; - } - - match pool_ret { - Poll::Ready(Some(())) => continue, - Poll::Ready(None) => return Poll::Ready(()), - Poll::Pending => return Poll::Pending, - } - } - } - - /// Empty the incoming queue of newly-spawned tasks. - fn drain_incoming(&mut self) { - let mut incoming = self.incoming.borrow_mut(); - for task in incoming.drain(..) { - self.pool.push(task) - } - } -} - -impl Default for LocalPool { - fn default() -> Self { - Self::new() - } -} - -/// Run a future to completion on the current thread. -/// -/// This function will block the caller until the given future has completed. -/// -/// Use a [`LocalPool`](LocalPool) if you need finer-grained control over -/// spawned tasks. -pub fn block_on(f: F) -> F::Output { - pin_mut!(f); - run_executor(|cx| f.as_mut().poll(cx)) -} - -/// Turn a stream into a blocking iterator. -/// -/// When `next` is called on the resulting `BlockingStream`, the caller -/// will be blocked until the next element of the `Stream` becomes available. -pub fn block_on_stream(stream: S) -> BlockingStream { - BlockingStream { stream } -} - -/// An iterator which blocks on values from a stream until they become available. -#[derive(Debug)] -pub struct BlockingStream { - stream: S, -} - -impl Deref for BlockingStream { - type Target = S; - fn deref(&self) -> &Self::Target { - &self.stream - } -} - -impl DerefMut for BlockingStream { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.stream - } -} - -impl BlockingStream { - /// Convert this `BlockingStream` into the inner `Stream` type. - pub fn into_inner(self) -> S { - self.stream - } -} - -impl Iterator for BlockingStream { - type Item = S::Item; - - fn next(&mut self) -> Option { - LocalPool::new().run_until(self.stream.next()) - } - - fn size_hint(&self) -> (usize, Option) { - self.stream.size_hint() - } -} - -impl Spawn for LocalSpawner { - fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { - if let Some(incoming) = self.incoming.upgrade() { - incoming.borrow_mut().push(future.into()); - Ok(()) - } else { - Err(SpawnError::shutdown()) - } - } - - fn status(&self) -> Result<(), SpawnError> { - if self.incoming.upgrade().is_some() { - Ok(()) - } else { - Err(SpawnError::shutdown()) - } - } -} - -impl LocalSpawn for LocalSpawner { - fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> { - if let Some(incoming) = self.incoming.upgrade() { - incoming.borrow_mut().push(future); - Ok(()) - } else { - Err(SpawnError::shutdown()) - } - } - - fn status_local(&self) -> Result<(), SpawnError> { - if self.incoming.upgrade().is_some() { - Ok(()) - } else { - Err(SpawnError::shutdown()) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-executor/src/thread_pool.rs s390-tools-2.33.1/rust-vendor/futures-executor/src/thread_pool.rs --- s390-tools-2.31.0/rust-vendor/futures-executor/src/thread_pool.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-executor/src/thread_pool.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,380 +0,0 @@ -use crate::enter; -use crate::unpark_mutex::UnparkMutex; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use futures_task::{waker_ref, ArcWake}; -use futures_task::{FutureObj, Spawn, SpawnError}; -use futures_util::future::FutureExt; -use std::cmp; -use std::fmt; -use std::io; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::mpsc; -use std::sync::{Arc, Mutex}; -use std::thread; - -/// A general-purpose thread pool for scheduling tasks that poll futures to -/// completion. -/// -/// The thread pool multiplexes any number of tasks onto a fixed number of -/// worker threads. -/// -/// This type is a clonable handle to the threadpool itself. -/// Cloning it will only create a new reference, not a new threadpool. -/// -/// This type is only available when the `thread-pool` feature of this -/// library is activated. -#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))] -pub struct ThreadPool { - state: Arc, -} - -/// Thread pool configuration object. -/// -/// This type is only available when the `thread-pool` feature of this -/// library is activated. -#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))] -pub struct ThreadPoolBuilder { - pool_size: usize, - stack_size: usize, - name_prefix: Option, - after_start: Option>, - before_stop: Option>, -} - -trait AssertSendSync: Send + Sync {} -impl AssertSendSync for ThreadPool {} - -struct PoolState { - tx: Mutex>, - rx: Mutex>, - cnt: AtomicUsize, - size: usize, -} - -impl fmt::Debug for ThreadPool { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ThreadPool").field("size", &self.state.size).finish() - } -} - -impl fmt::Debug for ThreadPoolBuilder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ThreadPoolBuilder") - .field("pool_size", &self.pool_size) - .field("name_prefix", &self.name_prefix) - .finish() - } -} - -enum Message { - Run(Task), - Close, -} - -impl ThreadPool { - /// Creates a new thread pool with the default configuration. - /// - /// See documentation for the methods in - /// [`ThreadPoolBuilder`](ThreadPoolBuilder) for details on the default - /// configuration. - pub fn new() -> Result { - ThreadPoolBuilder::new().create() - } - - /// Create a default thread pool configuration, which can then be customized. - /// - /// See documentation for the methods in - /// [`ThreadPoolBuilder`](ThreadPoolBuilder) for details on the default - /// configuration. - pub fn builder() -> ThreadPoolBuilder { - ThreadPoolBuilder::new() - } - - /// Spawns a future that will be run to completion. - /// - /// > **Note**: This method is similar to `Spawn::spawn_obj`, except that - /// > it is guaranteed to always succeed. - pub fn spawn_obj_ok(&self, future: FutureObj<'static, ()>) { - let task = Task { - future, - wake_handle: Arc::new(WakeHandle { exec: self.clone(), mutex: UnparkMutex::new() }), - exec: self.clone(), - }; - self.state.send(Message::Run(task)); - } - - /// Spawns a task that polls the given future with output `()` to - /// completion. - /// - /// ``` - /// # { - /// use futures::executor::ThreadPool; - /// - /// let pool = ThreadPool::new().unwrap(); - /// - /// let future = async { /* ... */ }; - /// pool.spawn_ok(future); - /// # } - /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 - /// ``` - /// - /// > **Note**: This method is similar to `SpawnExt::spawn`, except that - /// > it is guaranteed to always succeed. - pub fn spawn_ok(&self, future: Fut) - where - Fut: Future + Send + 'static, - { - self.spawn_obj_ok(FutureObj::new(Box::new(future))) - } -} - -impl Spawn for ThreadPool { - fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { - self.spawn_obj_ok(future); - Ok(()) - } -} - -impl PoolState { - fn send(&self, msg: Message) { - self.tx.lock().unwrap().send(msg).unwrap(); - } - - fn work( - &self, - idx: usize, - after_start: Option>, - before_stop: Option>, - ) { - let _scope = enter().unwrap(); - if let Some(after_start) = after_start { - after_start(idx); - } - loop { - let msg = self.rx.lock().unwrap().recv().unwrap(); - match msg { - Message::Run(task) => task.run(), - Message::Close => break, - } - } - if let Some(before_stop) = before_stop { - before_stop(idx); - } - } -} - -impl Clone for ThreadPool { - fn clone(&self) -> Self { - self.state.cnt.fetch_add(1, Ordering::Relaxed); - Self { state: self.state.clone() } - } -} - -impl Drop for ThreadPool { - fn drop(&mut self) { - if self.state.cnt.fetch_sub(1, Ordering::Relaxed) == 1 { - for _ in 0..self.state.size { - self.state.send(Message::Close); - } - } - } -} - -impl ThreadPoolBuilder { - /// Create a default thread pool configuration. - /// - /// See the other methods on this type for details on the defaults. - pub fn new() -> Self { - Self { - pool_size: cmp::max(1, num_cpus::get()), - stack_size: 0, - name_prefix: None, - after_start: None, - before_stop: None, - } - } - - /// Set size of a future ThreadPool - /// - /// The size of a thread pool is the number of worker threads spawned. By - /// default, this is equal to the number of CPU cores. - /// - /// # Panics - /// - /// Panics if `pool_size == 0`. - pub fn pool_size(&mut self, size: usize) -> &mut Self { - assert!(size > 0); - self.pool_size = size; - self - } - - /// Set stack size of threads in the pool, in bytes. - /// - /// By default, worker threads use Rust's standard stack size. - pub fn stack_size(&mut self, stack_size: usize) -> &mut Self { - self.stack_size = stack_size; - self - } - - /// Set thread name prefix of a future ThreadPool. - /// - /// Thread name prefix is used for generating thread names. For example, if prefix is - /// `my-pool-`, then threads in the pool will get names like `my-pool-1` etc. - /// - /// By default, worker threads are assigned Rust's standard thread name. - pub fn name_prefix>(&mut self, name_prefix: S) -> &mut Self { - self.name_prefix = Some(name_prefix.into()); - self - } - - /// Execute the closure `f` immediately after each worker thread is started, - /// but before running any tasks on it. - /// - /// This hook is intended for bookkeeping and monitoring. - /// The closure `f` will be dropped after the `builder` is dropped - /// and all worker threads in the pool have executed it. - /// - /// The closure provided will receive an index corresponding to the worker - /// thread it's running on. - pub fn after_start(&mut self, f: F) -> &mut Self - where - F: Fn(usize) + Send + Sync + 'static, - { - self.after_start = Some(Arc::new(f)); - self - } - - /// Execute closure `f` just prior to shutting down each worker thread. - /// - /// This hook is intended for bookkeeping and monitoring. - /// The closure `f` will be dropped after the `builder` is dropped - /// and all threads in the pool have executed it. - /// - /// The closure provided will receive an index corresponding to the worker - /// thread it's running on. - pub fn before_stop(&mut self, f: F) -> &mut Self - where - F: Fn(usize) + Send + Sync + 'static, - { - self.before_stop = Some(Arc::new(f)); - self - } - - /// Create a [`ThreadPool`](ThreadPool) with the given configuration. - pub fn create(&mut self) -> Result { - let (tx, rx) = mpsc::channel(); - let pool = ThreadPool { - state: Arc::new(PoolState { - tx: Mutex::new(tx), - rx: Mutex::new(rx), - cnt: AtomicUsize::new(1), - size: self.pool_size, - }), - }; - - for counter in 0..self.pool_size { - let state = pool.state.clone(); - let after_start = self.after_start.clone(); - let before_stop = self.before_stop.clone(); - let mut thread_builder = thread::Builder::new(); - if let Some(ref name_prefix) = self.name_prefix { - thread_builder = thread_builder.name(format!("{}{}", name_prefix, counter)); - } - if self.stack_size > 0 { - thread_builder = thread_builder.stack_size(self.stack_size); - } - thread_builder.spawn(move || state.work(counter, after_start, before_stop))?; - } - Ok(pool) - } -} - -impl Default for ThreadPoolBuilder { - fn default() -> Self { - Self::new() - } -} - -/// A task responsible for polling a future to completion. -struct Task { - future: FutureObj<'static, ()>, - exec: ThreadPool, - wake_handle: Arc, -} - -struct WakeHandle { - mutex: UnparkMutex, - exec: ThreadPool, -} - -impl Task { - /// Actually run the task (invoking `poll` on the future) on the current - /// thread. - fn run(self) { - let Self { mut future, wake_handle, mut exec } = self; - let waker = waker_ref(&wake_handle); - let mut cx = Context::from_waker(&waker); - - // Safety: The ownership of this `Task` object is evidence that - // we are in the `POLLING`/`REPOLL` state for the mutex. - unsafe { - wake_handle.mutex.start_poll(); - - loop { - let res = future.poll_unpin(&mut cx); - match res { - Poll::Pending => {} - Poll::Ready(()) => return wake_handle.mutex.complete(), - } - let task = Self { future, wake_handle: wake_handle.clone(), exec }; - match wake_handle.mutex.wait(task) { - Ok(()) => return, // we've waited - Err(task) => { - // someone's notified us - future = task.future; - exec = task.exec; - } - } - } - } - } -} - -impl fmt::Debug for Task { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Task").field("contents", &"...").finish() - } -} - -impl ArcWake for WakeHandle { - fn wake_by_ref(arc_self: &Arc) { - if let Ok(task) = arc_self.mutex.notify() { - arc_self.exec.state.send(Message::Run(task)) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::sync::mpsc; - - #[test] - fn test_drop_after_start() { - { - let (tx, rx) = mpsc::sync_channel(2); - let _cpu_pool = ThreadPoolBuilder::new() - .pool_size(2) - .after_start(move |_| tx.send(1).unwrap()) - .create() - .unwrap(); - - // After ThreadPoolBuilder is deconstructed, the tx should be dropped - // so that we can use rx as an iterator. - let count = rx.into_iter().count(); - assert_eq!(count, 2); - } - std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-executor/src/unpark_mutex.rs s390-tools-2.33.1/rust-vendor/futures-executor/src/unpark_mutex.rs --- s390-tools-2.31.0/rust-vendor/futures-executor/src/unpark_mutex.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-executor/src/unpark_mutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,137 +0,0 @@ -use std::cell::UnsafeCell; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::SeqCst; - -/// A "lock" around data `D`, which employs a *helping* strategy. -/// -/// Used to ensure that concurrent `unpark` invocations lead to (1) `poll` being -/// invoked on only a single thread at a time (2) `poll` being invoked at least -/// once after each `unpark` (unless the future has completed). -pub(crate) struct UnparkMutex { - // The state of task execution (state machine described below) - status: AtomicUsize, - - // The actual task data, accessible only in the POLLING state - inner: UnsafeCell>, -} - -// `UnparkMutex` functions in many ways like a `Mutex`, except that on -// acquisition failure, the current lock holder performs the desired work -- -// re-polling. -// -// As such, these impls mirror those for `Mutex`. In particular, a reference -// to `UnparkMutex` can be used to gain `&mut` access to the inner data, which -// must therefore be `Send`. -unsafe impl Send for UnparkMutex {} -unsafe impl Sync for UnparkMutex {} - -// There are four possible task states, listed below with their possible -// transitions: - -// The task is blocked, waiting on an event -const WAITING: usize = 0; // --> POLLING - -// The task is actively being polled by a thread; arrival of additional events -// of interest should move it to the REPOLL state -const POLLING: usize = 1; // --> WAITING, REPOLL, or COMPLETE - -// The task is actively being polled, but will need to be re-polled upon -// completion to ensure that all events were observed. -const REPOLL: usize = 2; // --> POLLING - -// The task has finished executing (either successfully or with an error/panic) -const COMPLETE: usize = 3; // No transitions out - -impl UnparkMutex { - pub(crate) fn new() -> Self { - Self { status: AtomicUsize::new(WAITING), inner: UnsafeCell::new(None) } - } - - /// Attempt to "notify" the mutex that a poll should occur. - /// - /// An `Ok` result indicates that the `POLLING` state has been entered, and - /// the caller can proceed to poll the future. An `Err` result indicates - /// that polling is not necessary (because the task is finished or the - /// polling has been delegated). - pub(crate) fn notify(&self) -> Result { - let mut status = self.status.load(SeqCst); - loop { - match status { - // The task is idle, so try to run it immediately. - WAITING => { - match self.status.compare_exchange(WAITING, POLLING, SeqCst, SeqCst) { - Ok(_) => { - let data = unsafe { - // SAFETY: we've ensured mutual exclusion via - // the status protocol; we are the only thread - // that has transitioned to the POLLING state, - // and we won't transition back to QUEUED until - // the lock is "released" by this thread. See - // the protocol diagram above. - (*self.inner.get()).take().unwrap() - }; - return Ok(data); - } - Err(cur) => status = cur, - } - } - - // The task is being polled, so we need to record that it should - // be *repolled* when complete. - POLLING => match self.status.compare_exchange(POLLING, REPOLL, SeqCst, SeqCst) { - Ok(_) => return Err(()), - Err(cur) => status = cur, - }, - - // The task is already scheduled for polling, or is complete, so - // we've got nothing to do. - _ => return Err(()), - } - } - } - - /// Alert the mutex that polling is about to begin, clearing any accumulated - /// re-poll requests. - /// - /// # Safety - /// - /// Callable only from the `POLLING`/`REPOLL` states, i.e. between - /// successful calls to `notify` and `wait`/`complete`. - pub(crate) unsafe fn start_poll(&self) { - self.status.store(POLLING, SeqCst); - } - - /// Alert the mutex that polling completed with `Pending`. - /// - /// # Safety - /// - /// Callable only from the `POLLING`/`REPOLL` states, i.e. between - /// successful calls to `notify` and `wait`/`complete`. - pub(crate) unsafe fn wait(&self, data: D) -> Result<(), D> { - *self.inner.get() = Some(data); - - match self.status.compare_exchange(POLLING, WAITING, SeqCst, SeqCst) { - // no unparks came in while we were running - Ok(_) => Ok(()), - - // guaranteed to be in REPOLL state; just clobber the - // state and run again. - Err(status) => { - assert_eq!(status, REPOLL); - self.status.store(POLLING, SeqCst); - Err((*self.inner.get()).take().unwrap()) - } - } - } - - /// Alert the mutex that the task has completed execution and should not be - /// notified again. - /// - /// # Safety - /// - /// Callable only from the `POLLING`/`REPOLL` states, i.e. between - /// successful calls to `notify` and `wait`/`complete`. - pub(crate) unsafe fn complete(&self) { - self.status.store(COMPLETE, SeqCst); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-io/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/futures-io/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/futures-io/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-io/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/futures-io/Cargo.toml s390-tools-2.33.1/rust-vendor/futures-io/Cargo.toml --- s390-tools-2.31.0/rust-vendor/futures-io/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-io/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.36" -name = "futures-io" -version = "0.3.29" -description = """ -The `AsyncRead`, `AsyncWrite`, `AsyncSeek`, and `AsyncBufRead` traits for the futures-rs library. -""" -homepage = "https://rust-lang.github.io/futures-rs" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/futures-rs" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs", -] - -[dependencies] - -[features] -default = ["std"] -std = [] -unstable = [] diff -Nru s390-tools-2.31.0/rust-vendor/futures-io/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/futures-io/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/futures-io/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-io/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/futures-io/LICENSE-MIT s390-tools-2.33.1/rust-vendor/futures-io/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/futures-io/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-io/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/futures-io/README.md s390-tools-2.33.1/rust-vendor/futures-io/README.md --- s390-tools-2.31.0/rust-vendor/futures-io/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-io/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -# futures-io - -The `AsyncRead`, `AsyncWrite`, `AsyncSeek`, and `AsyncBufRead` traits for the futures-rs library. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -futures-io = "0.3" -``` - -The current `futures-io` requires Rust 1.36 or later. - -## License - -Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or -[MIT license](LICENSE-MIT) at your option. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/futures-io/src/lib.rs s390-tools-2.33.1/rust-vendor/futures-io/src/lib.rs --- s390-tools-2.31.0/rust-vendor/futures-io/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-io/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,558 +0,0 @@ -//! Asynchronous I/O -//! -//! This crate contains the `AsyncRead`, `AsyncWrite`, `AsyncSeek`, and -//! `AsyncBufRead` traits, the asynchronous analogs to -//! `std::io::{Read, Write, Seek, BufRead}`. The primary difference is -//! that these traits integrate with the asynchronous task system. -//! -//! All items of this library are only available when the `std` feature of this -//! library is activated, and it is activated by default. - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_debug_implementations, missing_docs, rust_2018_idioms, unreachable_pub)] -// It cannot be included in the published code because this lints have false positives in the minimum required version. -#![cfg_attr(test, warn(single_use_lifetimes))] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] -#![cfg_attr(docsrs, feature(doc_cfg))] - -#[cfg(feature = "std")] -mod if_std { - use std::io; - use std::ops::DerefMut; - use std::pin::Pin; - use std::task::{Context, Poll}; - - // Re-export some types from `std::io` so that users don't have to deal - // with conflicts when `use`ing `futures::io` and `std::io`. - #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 - #[doc(no_inline)] - pub use io::{Error, ErrorKind, IoSlice, IoSliceMut, Result, SeekFrom}; - - /// Read bytes asynchronously. - /// - /// This trait is analogous to the `std::io::Read` trait, but integrates - /// with the asynchronous task system. In particular, the `poll_read` - /// method, unlike `Read::read`, will automatically queue the current task - /// for wakeup and return if data is not yet available, rather than blocking - /// the calling thread. - pub trait AsyncRead { - /// Attempt to read from the `AsyncRead` into `buf`. - /// - /// On success, returns `Poll::Ready(Ok(num_bytes_read))`. - /// - /// If no data is available for reading, the method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes - /// readable or is closed. - /// - /// # Implementation - /// - /// This function may not return errors of kind `WouldBlock` or - /// `Interrupted`. Implementations must convert `WouldBlock` into - /// `Poll::Pending` and either internally retry or convert - /// `Interrupted` into another error kind. - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll>; - - /// Attempt to read from the `AsyncRead` into `bufs` using vectored - /// IO operations. - /// - /// This method is similar to `poll_read`, but allows data to be read - /// into multiple buffers using a single operation. - /// - /// On success, returns `Poll::Ready(Ok(num_bytes_read))`. - /// - /// If no data is available for reading, the method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes - /// readable or is closed. - /// By default, this method delegates to using `poll_read` on the first - /// nonempty buffer in `bufs`, or an empty one if none exists. Objects which - /// support vectored IO should override this method. - /// - /// # Implementation - /// - /// This function may not return errors of kind `WouldBlock` or - /// `Interrupted`. Implementations must convert `WouldBlock` into - /// `Poll::Pending` and either internally retry or convert - /// `Interrupted` into another error kind. - fn poll_read_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &mut [IoSliceMut<'_>], - ) -> Poll> { - for b in bufs { - if !b.is_empty() { - return self.poll_read(cx, b); - } - } - - self.poll_read(cx, &mut []) - } - } - - /// Write bytes asynchronously. - /// - /// This trait is analogous to the `std::io::Write` trait, but integrates - /// with the asynchronous task system. In particular, the `poll_write` - /// method, unlike `Write::write`, will automatically queue the current task - /// for wakeup and return if the writer cannot take more data, rather than blocking - /// the calling thread. - pub trait AsyncWrite { - /// Attempt to write bytes from `buf` into the object. - /// - /// On success, returns `Poll::Ready(Ok(num_bytes_written))`. - /// - /// If the object is not ready for writing, the method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes - /// writable or is closed. - /// - /// # Implementation - /// - /// This function may not return errors of kind `WouldBlock` or - /// `Interrupted`. Implementations must convert `WouldBlock` into - /// `Poll::Pending` and either internally retry or convert - /// `Interrupted` into another error kind. - /// - /// `poll_write` must try to make progress by flushing the underlying object if - /// that is the only way the underlying object can become writable again. - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll>; - - /// Attempt to write bytes from `bufs` into the object using vectored - /// IO operations. - /// - /// This method is similar to `poll_write`, but allows data from multiple buffers to be written - /// using a single operation. - /// - /// On success, returns `Poll::Ready(Ok(num_bytes_written))`. - /// - /// If the object is not ready for writing, the method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes - /// writable or is closed. - /// - /// By default, this method delegates to using `poll_write` on the first - /// nonempty buffer in `bufs`, or an empty one if none exists. Objects which - /// support vectored IO should override this method. - /// - /// # Implementation - /// - /// This function may not return errors of kind `WouldBlock` or - /// `Interrupted`. Implementations must convert `WouldBlock` into - /// `Poll::Pending` and either internally retry or convert - /// `Interrupted` into another error kind. - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - for b in bufs { - if !b.is_empty() { - return self.poll_write(cx, b); - } - } - - self.poll_write(cx, &[]) - } - - /// Attempt to flush the object, ensuring that any buffered data reach - /// their destination. - /// - /// On success, returns `Poll::Ready(Ok(()))`. - /// - /// If flushing cannot immediately complete, this method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker().wake_by_ref()`) to receive a notification when the object can make - /// progress towards flushing. - /// - /// # Implementation - /// - /// This function may not return errors of kind `WouldBlock` or - /// `Interrupted`. Implementations must convert `WouldBlock` into - /// `Poll::Pending` and either internally retry or convert - /// `Interrupted` into another error kind. - /// - /// It only makes sense to do anything here if you actually buffer data. - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - - /// Attempt to close the object. - /// - /// On success, returns `Poll::Ready(Ok(()))`. - /// - /// If closing cannot immediately complete, this function returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker().wake_by_ref()`) to receive a notification when the object can make - /// progress towards closing. - /// - /// # Implementation - /// - /// This function may not return errors of kind `WouldBlock` or - /// `Interrupted`. Implementations must convert `WouldBlock` into - /// `Poll::Pending` and either internally retry or convert - /// `Interrupted` into another error kind. - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - } - - /// Seek bytes asynchronously. - /// - /// This trait is analogous to the `std::io::Seek` trait, but integrates - /// with the asynchronous task system. In particular, the `poll_seek` - /// method, unlike `Seek::seek`, will automatically queue the current task - /// for wakeup and return if data is not yet available, rather than blocking - /// the calling thread. - pub trait AsyncSeek { - /// Attempt to seek to an offset, in bytes, in a stream. - /// - /// A seek beyond the end of a stream is allowed, but behavior is defined - /// by the implementation. - /// - /// If the seek operation completed successfully, - /// this method returns the new position from the start of the stream. - /// That position can be used later with [`SeekFrom::Start`]. - /// - /// # Errors - /// - /// Seeking to a negative offset is considered an error. - /// - /// # Implementation - /// - /// This function may not return errors of kind `WouldBlock` or - /// `Interrupted`. Implementations must convert `WouldBlock` into - /// `Poll::Pending` and either internally retry or convert - /// `Interrupted` into another error kind. - fn poll_seek( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - pos: SeekFrom, - ) -> Poll>; - } - - /// Read bytes asynchronously. - /// - /// This trait is analogous to the `std::io::BufRead` trait, but integrates - /// with the asynchronous task system. In particular, the `poll_fill_buf` - /// method, unlike `BufRead::fill_buf`, will automatically queue the current task - /// for wakeup and return if data is not yet available, rather than blocking - /// the calling thread. - pub trait AsyncBufRead: AsyncRead { - /// Attempt to return the contents of the internal buffer, filling it with more data - /// from the inner reader if it is empty. - /// - /// On success, returns `Poll::Ready(Ok(buf))`. - /// - /// If no data is available for reading, the method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes - /// readable or is closed. - /// - /// This function is a lower-level call. It needs to be paired with the - /// [`consume`] method to function properly. When calling this - /// method, none of the contents will be "read" in the sense that later - /// calling [`poll_read`] may return the same contents. As such, [`consume`] must - /// be called with the number of bytes that are consumed from this buffer to - /// ensure that the bytes are never returned twice. - /// - /// [`poll_read`]: AsyncRead::poll_read - /// [`consume`]: AsyncBufRead::consume - /// - /// An empty buffer returned indicates that the stream has reached EOF. - /// - /// # Implementation - /// - /// This function may not return errors of kind `WouldBlock` or - /// `Interrupted`. Implementations must convert `WouldBlock` into - /// `Poll::Pending` and either internally retry or convert - /// `Interrupted` into another error kind. - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - - /// Tells this buffer that `amt` bytes have been consumed from the buffer, - /// so they should no longer be returned in calls to [`poll_read`]. - /// - /// This function is a lower-level call. It needs to be paired with the - /// [`poll_fill_buf`] method to function properly. This function does - /// not perform any I/O, it simply informs this object that some amount of - /// its buffer, returned from [`poll_fill_buf`], has been consumed and should - /// no longer be returned. As such, this function may do odd things if - /// [`poll_fill_buf`] isn't called before calling it. - /// - /// The `amt` must be `<=` the number of bytes in the buffer returned by - /// [`poll_fill_buf`]. - /// - /// [`poll_read`]: AsyncRead::poll_read - /// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf - fn consume(self: Pin<&mut Self>, amt: usize); - } - - macro_rules! deref_async_read { - () => { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Pin::new(&mut **self).poll_read(cx, buf) - } - - fn poll_read_vectored( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &mut [IoSliceMut<'_>], - ) -> Poll> { - Pin::new(&mut **self).poll_read_vectored(cx, bufs) - } - }; - } - - impl AsyncRead for Box { - deref_async_read!(); - } - - impl AsyncRead for &mut T { - deref_async_read!(); - } - - impl

AsyncRead for Pin

- where - P: DerefMut + Unpin, - P::Target: AsyncRead, - { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - self.get_mut().as_mut().poll_read(cx, buf) - } - - fn poll_read_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &mut [IoSliceMut<'_>], - ) -> Poll> { - self.get_mut().as_mut().poll_read_vectored(cx, bufs) - } - } - - macro_rules! delegate_async_read_to_stdio { - () => { - fn poll_read( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Poll::Ready(io::Read::read(&mut *self, buf)) - } - - fn poll_read_vectored( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &mut [IoSliceMut<'_>], - ) -> Poll> { - Poll::Ready(io::Read::read_vectored(&mut *self, bufs)) - } - }; - } - - impl AsyncRead for &[u8] { - delegate_async_read_to_stdio!(); - } - - macro_rules! deref_async_write { - () => { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut **self).poll_write(cx, buf) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Pin::new(&mut **self).poll_write_vectored(cx, bufs) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_flush(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_close(cx) - } - }; - } - - impl AsyncWrite for Box { - deref_async_write!(); - } - - impl AsyncWrite for &mut T { - deref_async_write!(); - } - - impl

AsyncWrite for Pin

- where - P: DerefMut + Unpin, - P::Target: AsyncWrite, - { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.get_mut().as_mut().poll_write(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - self.get_mut().as_mut().poll_write_vectored(cx, bufs) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_flush(cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_close(cx) - } - } - - macro_rules! delegate_async_write_to_stdio { - () => { - fn poll_write( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(io::Write::write(&mut *self, buf)) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) - } - - fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::Write::flush(&mut *self)) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } - }; - } - - impl AsyncWrite for Vec { - delegate_async_write_to_stdio!(); - } - - macro_rules! deref_async_seek { - () => { - fn poll_seek( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - pos: SeekFrom, - ) -> Poll> { - Pin::new(&mut **self).poll_seek(cx, pos) - } - }; - } - - impl AsyncSeek for Box { - deref_async_seek!(); - } - - impl AsyncSeek for &mut T { - deref_async_seek!(); - } - - impl

AsyncSeek for Pin

- where - P: DerefMut + Unpin, - P::Target: AsyncSeek, - { - fn poll_seek( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - pos: SeekFrom, - ) -> Poll> { - self.get_mut().as_mut().poll_seek(cx, pos) - } - } - - macro_rules! deref_async_buf_read { - () => { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self.get_mut()).poll_fill_buf(cx) - } - - fn consume(mut self: Pin<&mut Self>, amt: usize) { - Pin::new(&mut **self).consume(amt) - } - }; - } - - impl AsyncBufRead for Box { - deref_async_buf_read!(); - } - - impl AsyncBufRead for &mut T { - deref_async_buf_read!(); - } - - impl

AsyncBufRead for Pin

- where - P: DerefMut + Unpin, - P::Target: AsyncBufRead, - { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - self.get_mut().as_mut().consume(amt) - } - } - - macro_rules! delegate_async_buf_read_to_stdio { - () => { - fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::BufRead::fill_buf(self.get_mut())) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - io::BufRead::consume(self.get_mut(), amt) - } - }; - } - - impl AsyncBufRead for &[u8] { - delegate_async_buf_read_to_stdio!(); - } -} - -#[cfg(feature = "std")] -pub use self::if_std::*; diff -Nru s390-tools-2.31.0/rust-vendor/futures-macro/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/futures-macro/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/futures-macro/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-macro/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/futures-macro/Cargo.toml s390-tools-2.33.1/rust-vendor/futures-macro/Cargo.toml --- s390-tools-2.31.0/rust-vendor/futures-macro/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-macro/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.56" -name = "futures-macro" -version = "0.3.29" -description = """ -The futures-rs procedural macro implementations. -""" -homepage = "https://rust-lang.github.io/futures-rs" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/futures-rs" - -[lib] -proc-macro = true - -[dependencies.proc-macro2] -version = "1.0.60" - -[dependencies.quote] -version = "1.0" - -[dependencies.syn] -version = "2.0.8" -features = ["full"] - -[features] diff -Nru s390-tools-2.31.0/rust-vendor/futures-macro/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/futures-macro/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/futures-macro/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-macro/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/futures-macro/LICENSE-MIT s390-tools-2.33.1/rust-vendor/futures-macro/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/futures-macro/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-macro/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/futures-macro/src/executor.rs s390-tools-2.33.1/rust-vendor/futures-macro/src/executor.rs --- s390-tools-2.31.0/rust-vendor/futures-macro/src/executor.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-macro/src/executor.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -use proc_macro::TokenStream; -use proc_macro2::Span; -use quote::{quote, quote_spanned, ToTokens}; - -pub(crate) fn test(args: TokenStream, item: TokenStream) -> TokenStream { - if !args.is_empty() { - return syn::Error::new_spanned(proc_macro2::TokenStream::from(args), "invalid argument") - .to_compile_error() - .into(); - } - - let mut input = syn::parse_macro_input!(item as syn::ItemFn); - - if input.sig.asyncness.take().is_none() { - return syn::Error::new_spanned(input.sig.fn_token, "Only async functions are supported") - .to_compile_error() - .into(); - } - - // If type mismatch occurs, the current rustc points to the last statement. - let (last_stmt_start_span, last_stmt_end_span) = { - let mut last_stmt = input - .block - .stmts - .last() - .map(ToTokens::into_token_stream) - .unwrap_or_default() - .into_iter(); - // `Span` on stable Rust has a limitation that only points to the first - // token, not the whole tokens. We can work around this limitation by - // using the first/last span of the tokens like - // `syn::Error::new_spanned` does. - let start = last_stmt.next().map_or_else(Span::call_site, |t| t.span()); - let end = last_stmt.last().map_or(start, |t| t.span()); - (start, end) - }; - - let path = quote_spanned! {last_stmt_start_span=> - ::futures_test::__private - }; - let body = &input.block; - input.block.stmts = vec![syn::Stmt::Expr( - syn::parse2(quote_spanned! {last_stmt_end_span=> - #path::block_on(async #body) - }) - .unwrap(), - None, - )]; - - let gen = quote! { - #[::core::prelude::v1::test] - #input - }; - - gen.into() -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-macro/src/join.rs s390-tools-2.33.1/rust-vendor/futures-macro/src/join.rs --- s390-tools-2.31.0/rust-vendor/futures-macro/src/join.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-macro/src/join.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,144 +0,0 @@ -//! The futures-rs `join! macro implementation. - -use proc_macro::TokenStream; -use proc_macro2::{Span, TokenStream as TokenStream2}; -use quote::{format_ident, quote}; -use syn::parse::{Parse, ParseStream}; -use syn::{Expr, Ident, Token}; - -#[derive(Default)] -struct Join { - fut_exprs: Vec, -} - -impl Parse for Join { - fn parse(input: ParseStream<'_>) -> syn::Result { - let mut join = Self::default(); - - while !input.is_empty() { - join.fut_exprs.push(input.parse::()?); - - if !input.is_empty() { - input.parse::()?; - } - } - - Ok(join) - } -} - -fn bind_futures(fut_exprs: Vec, span: Span) -> (Vec, Vec) { - let mut future_let_bindings = Vec::with_capacity(fut_exprs.len()); - let future_names: Vec<_> = fut_exprs - .into_iter() - .enumerate() - .map(|(i, expr)| { - let name = format_ident!("_fut{}", i, span = span); - future_let_bindings.push(quote! { - // Move future into a local so that it is pinned in one place and - // is no longer accessible by the end user. - let mut #name = __futures_crate::future::maybe_done(#expr); - let mut #name = unsafe { __futures_crate::Pin::new_unchecked(&mut #name) }; - }); - name - }) - .collect(); - - (future_let_bindings, future_names) -} - -/// The `join!` macro. -pub(crate) fn join(input: TokenStream) -> TokenStream { - let parsed = syn::parse_macro_input!(input as Join); - - // should be def_site, but that's unstable - let span = Span::call_site(); - - let (future_let_bindings, future_names) = bind_futures(parsed.fut_exprs, span); - - let poll_futures = future_names.iter().map(|fut| { - quote! { - __all_done &= __futures_crate::future::Future::poll( - #fut.as_mut(), __cx).is_ready(); - } - }); - let take_outputs = future_names.iter().map(|fut| { - quote! { - #fut.as_mut().take_output().unwrap(), - } - }); - - TokenStream::from(quote! { { - #( #future_let_bindings )* - - __futures_crate::future::poll_fn(move |__cx: &mut __futures_crate::task::Context<'_>| { - let mut __all_done = true; - #( #poll_futures )* - if __all_done { - __futures_crate::task::Poll::Ready(( - #( #take_outputs )* - )) - } else { - __futures_crate::task::Poll::Pending - } - }).await - } }) -} - -/// The `try_join!` macro. -pub(crate) fn try_join(input: TokenStream) -> TokenStream { - let parsed = syn::parse_macro_input!(input as Join); - - // should be def_site, but that's unstable - let span = Span::call_site(); - - let (future_let_bindings, future_names) = bind_futures(parsed.fut_exprs, span); - - let poll_futures = future_names.iter().map(|fut| { - quote! { - if __futures_crate::future::Future::poll( - #fut.as_mut(), __cx).is_pending() - { - __all_done = false; - } else if #fut.as_mut().output_mut().unwrap().is_err() { - // `.err().unwrap()` rather than `.unwrap_err()` so that we don't introduce - // a `T: Debug` bound. - // Also, for an error type of ! any code after `err().unwrap()` is unreachable. - #[allow(unreachable_code)] - return __futures_crate::task::Poll::Ready( - __futures_crate::Err( - #fut.as_mut().take_output().unwrap().err().unwrap() - ) - ); - } - } - }); - let take_outputs = future_names.iter().map(|fut| { - quote! { - // `.ok().unwrap()` rather than `.unwrap()` so that we don't introduce - // an `E: Debug` bound. - // Also, for an ok type of ! any code after `ok().unwrap()` is unreachable. - #[allow(unreachable_code)] - #fut.as_mut().take_output().unwrap().ok().unwrap(), - } - }); - - TokenStream::from(quote! { { - #( #future_let_bindings )* - - #[allow(clippy::diverging_sub_expression)] - __futures_crate::future::poll_fn(move |__cx: &mut __futures_crate::task::Context<'_>| { - let mut __all_done = true; - #( #poll_futures )* - if __all_done { - __futures_crate::task::Poll::Ready( - __futures_crate::Ok(( - #( #take_outputs )* - )) - ) - } else { - __futures_crate::task::Poll::Pending - } - }).await - } }) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-macro/src/lib.rs s390-tools-2.33.1/rust-vendor/futures-macro/src/lib.rs --- s390-tools-2.31.0/rust-vendor/futures-macro/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-macro/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,61 +0,0 @@ -//! The futures-rs procedural macro implementations. - -#![warn(rust_2018_idioms, single_use_lifetimes, unreachable_pub)] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] - -// Since https://github.com/rust-lang/cargo/pull/7700 `proc_macro` is part of the prelude for -// proc-macro crates, but to support older compilers we still need this explicit `extern crate`. -#[allow(unused_extern_crates)] -extern crate proc_macro; - -use proc_macro::TokenStream; - -mod executor; -mod join; -mod select; -mod stream_select; - -/// The `join!` macro. -#[proc_macro] -pub fn join_internal(input: TokenStream) -> TokenStream { - crate::join::join(input) -} - -/// The `try_join!` macro. -#[proc_macro] -pub fn try_join_internal(input: TokenStream) -> TokenStream { - crate::join::try_join(input) -} - -/// The `select!` macro. -#[proc_macro] -pub fn select_internal(input: TokenStream) -> TokenStream { - crate::select::select(input) -} - -/// The `select_biased!` macro. -#[proc_macro] -pub fn select_biased_internal(input: TokenStream) -> TokenStream { - crate::select::select_biased(input) -} - -// TODO: Change this to doc comment once rustdoc bug fixed: https://github.com/rust-lang/futures-rs/pull/2435 -// The `test` attribute. -#[proc_macro_attribute] -pub fn test_internal(input: TokenStream, item: TokenStream) -> TokenStream { - crate::executor::test(input, item) -} - -/// The `stream_select!` macro. -#[proc_macro] -pub fn stream_select_internal(input: TokenStream) -> TokenStream { - crate::stream_select::stream_select(input.into()) - .unwrap_or_else(syn::Error::into_compile_error) - .into() -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-macro/src/select.rs s390-tools-2.33.1/rust-vendor/futures-macro/src/select.rs --- s390-tools-2.31.0/rust-vendor/futures-macro/src/select.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-macro/src/select.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,330 +0,0 @@ -//! The futures-rs `select! macro implementation. - -use proc_macro::TokenStream; -use proc_macro2::Span; -use quote::{format_ident, quote}; -use syn::parse::{Parse, ParseStream}; -use syn::{parse_quote, Expr, Ident, Pat, Token}; - -mod kw { - syn::custom_keyword!(complete); -} - -struct Select { - // span of `complete`, then expression after `=> ...` - complete: Option, - default: Option, - normal_fut_exprs: Vec, - normal_fut_handlers: Vec<(Pat, Expr)>, -} - -#[allow(clippy::large_enum_variant)] -enum CaseKind { - Complete, - Default, - Normal(Pat, Expr), -} - -impl Parse for Select { - fn parse(input: ParseStream<'_>) -> syn::Result { - let mut select = Self { - complete: None, - default: None, - normal_fut_exprs: vec![], - normal_fut_handlers: vec![], - }; - - while !input.is_empty() { - let case_kind = if input.peek(kw::complete) { - // `complete` - if select.complete.is_some() { - return Err(input.error("multiple `complete` cases found, only one allowed")); - } - input.parse::()?; - CaseKind::Complete - } else if input.peek(Token![default]) { - // `default` - if select.default.is_some() { - return Err(input.error("multiple `default` cases found, only one allowed")); - } - input.parse::()?; - CaseKind::Default - } else { - // ` = ` - let pat = Pat::parse_multi_with_leading_vert(input)?; - input.parse::()?; - let expr = input.parse()?; - CaseKind::Normal(pat, expr) - }; - - // `=> ` - input.parse::]>()?; - let expr = input.parse::()?; - - // Commas after the expression are only optional if it's a `Block` - // or it is the last branch in the `match`. - let is_block = match expr { - Expr::Block(_) => true, - _ => false, - }; - if is_block || input.is_empty() { - input.parse::>()?; - } else { - input.parse::()?; - } - - match case_kind { - CaseKind::Complete => select.complete = Some(expr), - CaseKind::Default => select.default = Some(expr), - CaseKind::Normal(pat, fut_expr) => { - select.normal_fut_exprs.push(fut_expr); - select.normal_fut_handlers.push((pat, expr)); - } - } - } - - Ok(select) - } -} - -// Enum over all the cases in which the `select!` waiting has completed and the result -// can be processed. -// -// `enum __PrivResult<_1, _2, ...> { _1(_1), _2(_2), ..., Complete }` -fn declare_result_enum( - result_ident: Ident, - variants: usize, - complete: bool, - span: Span, -) -> (Vec, syn::ItemEnum) { - // "_0", "_1", "_2" - let variant_names: Vec = - (0..variants).map(|num| format_ident!("_{}", num, span = span)).collect(); - - let type_parameters = &variant_names; - let variants = &variant_names; - - let complete_variant = if complete { Some(quote!(Complete)) } else { None }; - - let enum_item = parse_quote! { - enum #result_ident<#(#type_parameters,)*> { - #( - #variants(#type_parameters), - )* - #complete_variant - } - }; - - (variant_names, enum_item) -} - -/// The `select!` macro. -pub(crate) fn select(input: TokenStream) -> TokenStream { - select_inner(input, true) -} - -/// The `select_biased!` macro. -pub(crate) fn select_biased(input: TokenStream) -> TokenStream { - select_inner(input, false) -} - -fn select_inner(input: TokenStream, random: bool) -> TokenStream { - let parsed = syn::parse_macro_input!(input as Select); - - // should be def_site, but that's unstable - let span = Span::call_site(); - - let enum_ident = Ident::new("__PrivResult", span); - - let (variant_names, enum_item) = declare_result_enum( - enum_ident.clone(), - parsed.normal_fut_exprs.len(), - parsed.complete.is_some(), - span, - ); - - // bind non-`Ident` future exprs w/ `let` - let mut future_let_bindings = Vec::with_capacity(parsed.normal_fut_exprs.len()); - let bound_future_names: Vec<_> = parsed - .normal_fut_exprs - .into_iter() - .zip(variant_names.iter()) - .map(|(expr, variant_name)| { - match expr { - syn::Expr::Path(path) => { - // Don't bind futures that are already a path. - // This prevents creating redundant stack space - // for them. - // Passing Futures by path requires those Futures to implement Unpin. - // We check for this condition here in order to be able to - // safely use Pin::new_unchecked(&mut #path) later on. - future_let_bindings.push(quote! { - __futures_crate::async_await::assert_fused_future(&#path); - __futures_crate::async_await::assert_unpin(&#path); - }); - path - } - _ => { - // Bind and pin the resulting Future on the stack. This is - // necessary to support direct select! calls on !Unpin - // Futures. The Future is not explicitly pinned here with - // a Pin call, but assumed as pinned. The actual Pin is - // created inside the poll() function below to defer the - // creation of the temporary pointer, which would otherwise - // increase the size of the generated Future. - // Safety: This is safe since the lifetime of the Future - // is totally constraint to the lifetime of the select! - // expression, and the Future can't get moved inside it - // (it is shadowed). - future_let_bindings.push(quote! { - let mut #variant_name = #expr; - }); - parse_quote! { #variant_name } - } - } - }) - .collect(); - - // For each future, make an `&mut dyn FnMut(&mut Context<'_>) -> Option>` - // to use for polling that individual future. These will then be put in an array. - let poll_functions = bound_future_names.iter().zip(variant_names.iter()).map( - |(bound_future_name, variant_name)| { - // Below we lazily create the Pin on the Future below. - // This is done in order to avoid allocating memory in the generator - // for the Pin variable. - // Safety: This is safe because one of the following condition applies: - // 1. The Future is passed by the caller by name, and we assert that - // it implements Unpin. - // 2. The Future is created in scope of the select! function and will - // not be moved for the duration of it. It is thereby stack-pinned - quote! { - let mut #variant_name = |__cx: &mut __futures_crate::task::Context<'_>| { - let mut #bound_future_name = unsafe { - __futures_crate::Pin::new_unchecked(&mut #bound_future_name) - }; - if __futures_crate::future::FusedFuture::is_terminated(&#bound_future_name) { - __futures_crate::None - } else { - __futures_crate::Some(__futures_crate::future::FutureExt::poll_unpin( - &mut #bound_future_name, - __cx, - ).map(#enum_ident::#variant_name)) - } - }; - let #variant_name: &mut dyn FnMut( - &mut __futures_crate::task::Context<'_> - ) -> __futures_crate::Option<__futures_crate::task::Poll<_>> = &mut #variant_name; - } - }, - ); - - let none_polled = if parsed.complete.is_some() { - quote! { - __futures_crate::task::Poll::Ready(#enum_ident::Complete) - } - } else { - quote! { - panic!("all futures in select! were completed,\ - but no `complete =>` handler was provided") - } - }; - - let branches = parsed.normal_fut_handlers.into_iter().zip(variant_names.iter()).map( - |((pat, expr), variant_name)| { - quote! { - #enum_ident::#variant_name(#pat) => { #expr }, - } - }, - ); - let branches = quote! { #( #branches )* }; - - let complete_branch = parsed.complete.map(|complete_expr| { - quote! { - #enum_ident::Complete => { #complete_expr }, - } - }); - - let branches = quote! { - #branches - #complete_branch - }; - - let await_select_fut = if parsed.default.is_some() { - // For select! with default this returns the Poll result - quote! { - __poll_fn(&mut __futures_crate::task::Context::from_waker( - __futures_crate::task::noop_waker_ref() - )) - } - } else { - quote! { - __futures_crate::future::poll_fn(__poll_fn).await - } - }; - - let execute_result_expr = if let Some(default_expr) = &parsed.default { - // For select! with default __select_result is a Poll, otherwise not - quote! { - match __select_result { - __futures_crate::task::Poll::Ready(result) => match result { - #branches - }, - _ => #default_expr - } - } - } else { - quote! { - match __select_result { - #branches - } - } - }; - - let shuffle = if random { - quote! { - __futures_crate::async_await::shuffle(&mut __select_arr); - } - } else { - quote!() - }; - - TokenStream::from(quote! { { - #enum_item - - let __select_result = { - #( #future_let_bindings )* - - let mut __poll_fn = |__cx: &mut __futures_crate::task::Context<'_>| { - let mut __any_polled = false; - - #( #poll_functions )* - - let mut __select_arr = [#( #variant_names ),*]; - #shuffle - for poller in &mut __select_arr { - let poller: &mut &mut dyn FnMut( - &mut __futures_crate::task::Context<'_> - ) -> __futures_crate::Option<__futures_crate::task::Poll<_>> = poller; - match poller(__cx) { - __futures_crate::Some(x @ __futures_crate::task::Poll::Ready(_)) => - return x, - __futures_crate::Some(__futures_crate::task::Poll::Pending) => { - __any_polled = true; - } - __futures_crate::None => {} - } - } - - if !__any_polled { - #none_polled - } else { - __futures_crate::task::Poll::Pending - } - }; - - #await_select_fut - }; - - #execute_result_expr - } }) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-macro/src/stream_select.rs s390-tools-2.33.1/rust-vendor/futures-macro/src/stream_select.rs --- s390-tools-2.31.0/rust-vendor/futures-macro/src/stream_select.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-macro/src/stream_select.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,113 +0,0 @@ -use proc_macro2::TokenStream; -use quote::{format_ident, quote, ToTokens}; -use syn::{parse::Parser, punctuated::Punctuated, Expr, Index, Token}; - -/// The `stream_select!` macro. -pub(crate) fn stream_select(input: TokenStream) -> Result { - let args = Punctuated::::parse_terminated.parse2(input)?; - if args.len() < 2 { - return Ok(quote! { - compile_error!("stream select macro needs at least two arguments.") - }); - } - let generic_idents = (0..args.len()).map(|i| format_ident!("_{}", i)).collect::>(); - let field_idents = (0..args.len()).map(|i| format_ident!("__{}", i)).collect::>(); - let field_idents_2 = (0..args.len()).map(|i| format_ident!("___{}", i)).collect::>(); - let field_indices = (0..args.len()).map(Index::from).collect::>(); - let args = args.iter().map(|e| e.to_token_stream()); - - Ok(quote! { - { - #[derive(Debug)] - struct StreamSelect<#(#generic_idents),*> (#(Option<#generic_idents>),*); - - enum StreamEnum<#(#generic_idents),*> { - #( - #generic_idents(#generic_idents) - ),*, - None, - } - - impl __futures_crate::stream::Stream for StreamEnum<#(#generic_idents),*> - where #(#generic_idents: __futures_crate::stream::Stream + ::std::marker::Unpin,)* - { - type Item = ITEM; - - fn poll_next(mut self: ::std::pin::Pin<&mut Self>, cx: &mut __futures_crate::task::Context<'_>) -> __futures_crate::task::Poll> { - match self.get_mut() { - #( - Self::#generic_idents(#generic_idents) => ::std::pin::Pin::new(#generic_idents).poll_next(cx) - ),*, - Self::None => panic!("StreamEnum::None should never be polled!"), - } - } - } - - impl __futures_crate::stream::Stream for StreamSelect<#(#generic_idents),*> - where #(#generic_idents: __futures_crate::stream::Stream + ::std::marker::Unpin,)* - { - type Item = ITEM; - - fn poll_next(mut self: ::std::pin::Pin<&mut Self>, cx: &mut __futures_crate::task::Context<'_>) -> __futures_crate::task::Poll> { - let Self(#(ref mut #field_idents),*) = self.get_mut(); - #( - let mut #field_idents_2 = false; - )* - let mut any_pending = false; - { - let mut stream_array = [#(#field_idents.as_mut().map(|f| StreamEnum::#generic_idents(f)).unwrap_or(StreamEnum::None)),*]; - __futures_crate::async_await::shuffle(&mut stream_array); - - for mut s in stream_array { - if let StreamEnum::None = s { - continue; - } else { - match __futures_crate::stream::Stream::poll_next(::std::pin::Pin::new(&mut s), cx) { - r @ __futures_crate::task::Poll::Ready(Some(_)) => { - return r; - }, - __futures_crate::task::Poll::Pending => { - any_pending = true; - }, - __futures_crate::task::Poll::Ready(None) => { - match s { - #( - StreamEnum::#generic_idents(_) => { #field_idents_2 = true; } - ),*, - StreamEnum::None => panic!("StreamEnum::None should never be polled!"), - } - }, - } - } - } - } - #( - if #field_idents_2 { - *#field_idents = None; - } - )* - if any_pending { - __futures_crate::task::Poll::Pending - } else { - __futures_crate::task::Poll::Ready(None) - } - } - - fn size_hint(&self) -> (usize, Option) { - let mut s = (0, Some(0)); - #( - if let Some(new_hint) = self.#field_indices.as_ref().map(|s| s.size_hint()) { - s.0 += new_hint.0; - // We can change this out for `.zip` when the MSRV is 1.46.0 or higher. - s.1 = s.1.and_then(|a| new_hint.1.map(|b| a + b)); - } - )* - s - } - } - - StreamSelect(#(Some(#args)),*) - - } - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-sink/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/futures-sink/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/futures-sink/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-sink/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/futures-sink/Cargo.toml s390-tools-2.33.1/rust-vendor/futures-sink/Cargo.toml --- s390-tools-2.31.0/rust-vendor/futures-sink/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-sink/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,33 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.36" -name = "futures-sink" -version = "0.3.29" -description = """ -The asynchronous `Sink` trait for the futures-rs library. -""" -homepage = "https://rust-lang.github.io/futures-rs" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/futures-rs" - -[package.metadata.docs.rs] -all-features = true - -[dependencies] - -[features] -alloc = [] -default = ["std"] -std = ["alloc"] diff -Nru s390-tools-2.31.0/rust-vendor/futures-sink/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/futures-sink/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/futures-sink/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-sink/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/futures-sink/LICENSE-MIT s390-tools-2.33.1/rust-vendor/futures-sink/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/futures-sink/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-sink/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/futures-sink/README.md s390-tools-2.33.1/rust-vendor/futures-sink/README.md --- s390-tools-2.31.0/rust-vendor/futures-sink/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-sink/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -# futures-sink - -The asynchronous `Sink` trait for the futures-rs library. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -futures-sink = "0.3" -``` - -The current `futures-sink` requires Rust 1.36 or later. - -## License - -Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or -[MIT license](LICENSE-MIT) at your option. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/futures-sink/src/lib.rs s390-tools-2.33.1/rust-vendor/futures-sink/src/lib.rs --- s390-tools-2.31.0/rust-vendor/futures-sink/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-sink/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,240 +0,0 @@ -//! Asynchronous sinks -//! -//! This crate contains the `Sink` trait which allows values to be sent -//! asynchronously. - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_debug_implementations, missing_docs, rust_2018_idioms, unreachable_pub)] -// It cannot be included in the published code because this lints have false positives in the minimum required version. -#![cfg_attr(test, warn(single_use_lifetimes))] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] - -#[cfg(feature = "alloc")] -extern crate alloc; - -use core::ops::DerefMut; -use core::pin::Pin; -use core::task::{Context, Poll}; - -/// A `Sink` is a value into which other values can be sent, asynchronously. -/// -/// Basic examples of sinks include the sending side of: -/// -/// - Channels -/// - Sockets -/// - Pipes -/// -/// In addition to such "primitive" sinks, it's typical to layer additional -/// functionality, such as buffering, on top of an existing sink. -/// -/// Sending to a sink is "asynchronous" in the sense that the value may not be -/// sent in its entirety immediately. Instead, values are sent in a two-phase -/// way: first by initiating a send, and then by polling for completion. This -/// two-phase setup is analogous to buffered writing in synchronous code, where -/// writes often succeed immediately, but internally are buffered and are -/// *actually* written only upon flushing. -/// -/// In addition, the `Sink` may be *full*, in which case it is not even possible -/// to start the sending process. -/// -/// As with `Future` and `Stream`, the `Sink` trait is built from a few core -/// required methods, and a host of default methods for working in a -/// higher-level way. The `Sink::send_all` combinator is of particular -/// importance: you can use it to send an entire stream to a sink, which is -/// the simplest way to ultimately consume a stream. -#[must_use = "sinks do nothing unless polled"] -pub trait Sink { - /// The type of value produced by the sink when an error occurs. - type Error; - - /// Attempts to prepare the `Sink` to receive a value. - /// - /// This method must be called and return `Poll::Ready(Ok(()))` prior to - /// each call to `start_send`. - /// - /// This method returns `Poll::Ready` once the underlying sink is ready to - /// receive data. If this method returns `Poll::Pending`, the current task - /// is registered to be notified (via `cx.waker().wake_by_ref()`) when `poll_ready` - /// should be called again. - /// - /// In most cases, if the sink encounters an error, the sink will - /// permanently be unable to receive items. - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - - /// Begin the process of sending a value to the sink. - /// Each call to this function must be preceded by a successful call to - /// `poll_ready` which returned `Poll::Ready(Ok(()))`. - /// - /// As the name suggests, this method only *begins* the process of sending - /// the item. If the sink employs buffering, the item isn't fully processed - /// until the buffer is fully flushed. Since sinks are designed to work with - /// asynchronous I/O, the process of actually writing out the data to an - /// underlying object takes place asynchronously. **You *must* use - /// `poll_flush` or `poll_close` in order to guarantee completion of a - /// send**. - /// - /// Implementations of `poll_ready` and `start_send` will usually involve - /// flushing behind the scenes in order to make room for new messages. - /// It is only necessary to call `poll_flush` if you need to guarantee that - /// *all* of the items placed into the `Sink` have been sent. - /// - /// In most cases, if the sink encounters an error, the sink will - /// permanently be unable to receive items. - fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error>; - - /// Flush any remaining output from this sink. - /// - /// Returns `Poll::Ready(Ok(()))` when no buffered items remain. If this - /// value is returned then it is guaranteed that all previous values sent - /// via `start_send` have been flushed. - /// - /// Returns `Poll::Pending` if there is more work left to do, in which - /// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when - /// `poll_flush` should be called again. - /// - /// In most cases, if the sink encounters an error, the sink will - /// permanently be unable to receive items. - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - - /// Flush any remaining output and close this sink, if necessary. - /// - /// Returns `Poll::Ready(Ok(()))` when no buffered items remain and the sink - /// has been successfully closed. - /// - /// Returns `Poll::Pending` if there is more work left to do, in which - /// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when - /// `poll_close` should be called again. - /// - /// If this function encounters an error, the sink should be considered to - /// have failed permanently, and no more `Sink` methods should be called. - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; -} - -impl + Unpin, Item> Sink for &mut S { - type Error = S::Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_ready(cx) - } - - fn start_send(mut self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { - Pin::new(&mut **self).start_send(item) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_flush(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_close(cx) - } -} - -impl Sink for Pin

-where - P: DerefMut + Unpin, - P::Target: Sink, -{ - type Error = >::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_ready(cx) - } - - fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { - self.get_mut().as_mut().start_send(item) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_flush(cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_close(cx) - } -} - -#[cfg(feature = "alloc")] -mod if_alloc { - use super::*; - use core::convert::Infallible as Never; - - impl Sink for alloc::vec::Vec { - type Error = Never; - - fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { - // TODO: impl Unpin for Vec {} - unsafe { self.get_unchecked_mut() }.push(item); - Ok(()) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - } - - impl Sink for alloc::collections::VecDeque { - type Error = Never; - - fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { - // TODO: impl Unpin for Vec {} - unsafe { self.get_unchecked_mut() }.push_back(item); - Ok(()) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - } - - impl + Unpin, Item> Sink for alloc::boxed::Box { - type Error = S::Error; - - fn poll_ready( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut **self).poll_ready(cx) - } - - fn start_send(mut self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { - Pin::new(&mut **self).start_send(item) - } - - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut **self).poll_flush(cx) - } - - fn poll_close( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut **self).poll_close(cx) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/build.rs s390-tools-2.33.1/rust-vendor/futures-task/build.rs --- s390-tools-2.31.0/rust-vendor/futures-task/build.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -// The rustc-cfg listed below are considered public API, but it is *unstable* -// and outside of the normal semver guarantees: -// -// - `futures_no_atomic_cas` -// Assume the target does *not* support atomic CAS operations. -// This is usually detected automatically by the build script, but you may -// need to enable it manually when building for custom targets or using -// non-cargo build systems that don't run the build script. -// -// With the exceptions mentioned above, the rustc-cfg emitted by the build -// script are *not* public API. - -#![warn(rust_2018_idioms, single_use_lifetimes)] - -use std::env; - -include!("no_atomic_cas.rs"); - -fn main() { - let target = match env::var("TARGET") { - Ok(target) => target, - Err(e) => { - println!( - "cargo:warning={}: unable to get TARGET environment variable: {}", - env!("CARGO_PKG_NAME"), - e - ); - return; - } - }; - - // Note that this is `no_*`, not `has_*`. This allows treating - // `cfg(target_has_atomic = "ptr")` as true when the build script doesn't - // run. This is needed for compatibility with non-cargo build systems that - // don't run the build script. - if NO_ATOMIC_CAS.contains(&&*target) { - println!("cargo:rustc-cfg=futures_no_atomic_cas"); - } - - println!("cargo:rerun-if-changed=no_atomic_cas.rs"); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/futures-task/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/futures-task/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/Cargo.toml s390-tools-2.33.1/rust-vendor/futures-task/Cargo.toml --- s390-tools-2.31.0/rust-vendor/futures-task/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.56" -name = "futures-task" -version = "0.3.29" -description = """ -Tools for working with tasks. -""" -homepage = "https://rust-lang.github.io/futures-rs" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/futures-rs" - -[package.metadata.docs.rs] -all-features = true - -[dependencies] - -[dev-dependencies] - -[features] -alloc = [] -cfg-target-has-atomic = [] -default = ["std"] -std = ["alloc"] -unstable = [] diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/futures-task/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/futures-task/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/LICENSE-MIT s390-tools-2.33.1/rust-vendor/futures-task/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/futures-task/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/no_atomic_cas.rs s390-tools-2.33.1/rust-vendor/futures-task/no_atomic_cas.rs --- s390-tools-2.31.0/rust-vendor/futures-task/no_atomic_cas.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/no_atomic_cas.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,17 +0,0 @@ -// This file is @generated by no_atomic_cas.sh. -// It is not intended for manual editing. - -const NO_ATOMIC_CAS: &[&str] = &[ - "armv4t-none-eabi", - "armv5te-none-eabi", - "avr-unknown-gnu-atmega328", - "bpfeb-unknown-none", - "bpfel-unknown-none", - "msp430-none-elf", - "riscv32i-unknown-none-elf", - "riscv32im-unknown-none-elf", - "riscv32imc-unknown-none-elf", - "thumbv4t-none-eabi", - "thumbv5te-none-eabi", - "thumbv6m-none-eabi", -]; diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/README.md s390-tools-2.33.1/rust-vendor/futures-task/README.md --- s390-tools-2.31.0/rust-vendor/futures-task/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -# futures-task - -Tools for working with tasks. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -futures-task = "0.3" -``` - -The current `futures-task` requires Rust 1.56 or later. - -## License - -Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or -[MIT license](LICENSE-MIT) at your option. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/src/arc_wake.rs s390-tools-2.33.1/rust-vendor/futures-task/src/arc_wake.rs --- s390-tools-2.31.0/rust-vendor/futures-task/src/arc_wake.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/src/arc_wake.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,49 +0,0 @@ -use alloc::sync::Arc; - -/// A way of waking up a specific task. -/// -/// By implementing this trait, types that are expected to be wrapped in an `Arc` -/// can be converted into [`Waker`] objects. -/// Those Wakers can be used to signal executors that a task it owns -/// is ready to be `poll`ed again. -/// -/// Currently, there are two ways to convert `ArcWake` into [`Waker`]: -/// -/// * [`waker`](super::waker()) converts `Arc` into [`Waker`]. -/// * [`waker_ref`](super::waker_ref()) converts `&Arc` into [`WakerRef`] that -/// provides access to a [`&Waker`][`Waker`]. -/// -/// [`Waker`]: std::task::Waker -/// [`WakerRef`]: super::WakerRef -// Note: Send + Sync required because `Arc` doesn't automatically imply -// those bounds, but `Waker` implements them. -pub trait ArcWake: Send + Sync { - /// Indicates that the associated task is ready to make progress and should - /// be `poll`ed. - /// - /// This function can be called from an arbitrary thread, including threads which - /// did not create the `ArcWake` based [`Waker`]. - /// - /// Executors generally maintain a queue of "ready" tasks; `wake` should place - /// the associated task onto this queue. - /// - /// [`Waker`]: std::task::Waker - fn wake(self: Arc) { - Self::wake_by_ref(&self) - } - - /// Indicates that the associated task is ready to make progress and should - /// be `poll`ed. - /// - /// This function can be called from an arbitrary thread, including threads which - /// did not create the `ArcWake` based [`Waker`]. - /// - /// Executors generally maintain a queue of "ready" tasks; `wake_by_ref` should place - /// the associated task onto this queue. - /// - /// This function is similar to [`wake`](ArcWake::wake), but must not consume the provided data - /// pointer. - /// - /// [`Waker`]: std::task::Waker - fn wake_by_ref(arc_self: &Arc); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/src/future_obj.rs s390-tools-2.33.1/rust-vendor/futures-task/src/future_obj.rs --- s390-tools-2.31.0/rust-vendor/futures-task/src/future_obj.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/src/future_obj.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,335 +0,0 @@ -use core::{ - fmt, - future::Future, - marker::PhantomData, - mem, - pin::Pin, - task::{Context, Poll}, -}; - -/// A custom trait object for polling futures, roughly akin to -/// `Box + 'a>`. -/// -/// This custom trait object was introduced as currently it is not possible to -/// take `dyn Trait` by value and `Box` is not available in no_std -/// contexts. -pub struct LocalFutureObj<'a, T> { - future: *mut (dyn Future + 'static), - drop_fn: unsafe fn(*mut (dyn Future + 'static)), - _marker: PhantomData<&'a ()>, -} - -// As LocalFutureObj only holds pointers, even if we move it, the pointed to values won't move, -// so this is safe as long as we don't provide any way for a user to directly access the pointers -// and move their values. -impl Unpin for LocalFutureObj<'_, T> {} - -#[allow(single_use_lifetimes)] -#[allow(clippy::transmute_ptr_to_ptr)] -unsafe fn remove_future_lifetime<'a, T>( - ptr: *mut (dyn Future + 'a), -) -> *mut (dyn Future + 'static) { - mem::transmute(ptr) -} - -#[allow(single_use_lifetimes)] -unsafe fn remove_drop_lifetime<'a, T>( - ptr: unsafe fn(*mut (dyn Future + 'a)), -) -> unsafe fn(*mut (dyn Future + 'static)) { - mem::transmute(ptr) -} - -impl<'a, T> LocalFutureObj<'a, T> { - /// Create a `LocalFutureObj` from a custom trait object representation. - #[inline] - pub fn new + 'a>(f: F) -> Self { - Self { - future: unsafe { remove_future_lifetime(f.into_raw()) }, - drop_fn: unsafe { remove_drop_lifetime(F::drop) }, - _marker: PhantomData, - } - } - - /// Converts the `LocalFutureObj` into a `FutureObj`. - /// - /// # Safety - /// - /// To make this operation safe one has to ensure that the `UnsafeFutureObj` - /// instance from which this `LocalFutureObj` was created actually - /// implements `Send`. - #[inline] - pub unsafe fn into_future_obj(self) -> FutureObj<'a, T> { - FutureObj(self) - } -} - -impl fmt::Debug for LocalFutureObj<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("LocalFutureObj").finish() - } -} - -impl<'a, T> From> for LocalFutureObj<'a, T> { - #[inline] - fn from(f: FutureObj<'a, T>) -> Self { - f.0 - } -} - -impl Future for LocalFutureObj<'_, T> { - type Output = T; - - #[inline] - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - unsafe { Pin::new_unchecked(&mut *self.future).poll(cx) } - } -} - -impl Drop for LocalFutureObj<'_, T> { - fn drop(&mut self) { - unsafe { (self.drop_fn)(self.future) } - } -} - -/// A custom trait object for polling futures, roughly akin to -/// `Box + Send + 'a>`. -/// -/// This custom trait object was introduced as currently it is not possible to -/// take `dyn Trait` by value and `Box` is not available in no_std -/// contexts. -/// -/// You should generally not need to use this type outside of `no_std` or when -/// implementing `Spawn`, consider using `BoxFuture` instead. -pub struct FutureObj<'a, T>(LocalFutureObj<'a, T>); - -impl Unpin for FutureObj<'_, T> {} -unsafe impl Send for FutureObj<'_, T> {} - -impl<'a, T> FutureObj<'a, T> { - /// Create a `FutureObj` from a custom trait object representation. - #[inline] - pub fn new + Send>(f: F) -> Self { - Self(LocalFutureObj::new(f)) - } -} - -impl fmt::Debug for FutureObj<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FutureObj").finish() - } -} - -impl Future for FutureObj<'_, T> { - type Output = T; - - #[inline] - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.0).poll(cx) - } -} - -/// A custom implementation of a future trait object for `FutureObj`, providing -/// a vtable with drop support. -/// -/// This custom representation is typically used only in `no_std` contexts, -/// where the default `Box`-based implementation is not available. -/// -/// # Safety -/// -/// See the safety notes on individual methods for what guarantees an -/// implementor must provide. -pub unsafe trait UnsafeFutureObj<'a, T>: 'a { - /// Convert an owned instance into a (conceptually owned) fat pointer. - /// - /// # Safety - /// - /// ## Implementor - /// - /// The trait implementor must guarantee that it is safe to convert the - /// provided `*mut (dyn Future + 'a)` into a `Pin<&mut (dyn - /// Future + 'a)>` and call methods on it, non-reentrantly, - /// until `UnsafeFutureObj::drop` is called with it. - #[allow(clippy::unnecessary_safety_doc)] - fn into_raw(self) -> *mut (dyn Future + 'a); - - /// Drops the future represented by the given fat pointer. - /// - /// # Safety - /// - /// ## Implementor - /// - /// The trait implementor must guarantee that it is safe to call this - /// function once per `into_raw` invocation. - /// - /// ## Caller - /// - /// The caller must ensure: - /// - /// * the pointer passed was obtained from an `into_raw` invocation from - /// this same trait object - /// * the pointer is not currently in use as a `Pin<&mut (dyn Future + 'a)>` - /// * the pointer must not be used again after this function is called - unsafe fn drop(ptr: *mut (dyn Future + 'a)); -} - -unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for &'a mut F -where - F: Future + Unpin + 'a, -{ - fn into_raw(self) -> *mut (dyn Future + 'a) { - self as *mut dyn Future - } - - unsafe fn drop(_ptr: *mut (dyn Future + 'a)) {} -} - -unsafe impl<'a, T> UnsafeFutureObj<'a, T> for &'a mut (dyn Future + Unpin + 'a) { - fn into_raw(self) -> *mut (dyn Future + 'a) { - self as *mut dyn Future - } - - unsafe fn drop(_ptr: *mut (dyn Future + 'a)) {} -} - -unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for Pin<&'a mut F> -where - F: Future + 'a, -{ - fn into_raw(self) -> *mut (dyn Future + 'a) { - unsafe { self.get_unchecked_mut() as *mut dyn Future } - } - - unsafe fn drop(_ptr: *mut (dyn Future + 'a)) {} -} - -unsafe impl<'a, T> UnsafeFutureObj<'a, T> for Pin<&'a mut (dyn Future + 'a)> { - fn into_raw(self) -> *mut (dyn Future + 'a) { - unsafe { self.get_unchecked_mut() as *mut dyn Future } - } - - unsafe fn drop(_ptr: *mut (dyn Future + 'a)) {} -} - -#[cfg(feature = "alloc")] -mod if_alloc { - use super::*; - use alloc::boxed::Box; - - unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for Box - where - F: Future + 'a, - { - fn into_raw(self) -> *mut (dyn Future + 'a) { - Box::into_raw(self) - } - - unsafe fn drop(ptr: *mut (dyn Future + 'a)) { - drop(Box::from_raw(ptr.cast::())) - } - } - - unsafe impl<'a, T: 'a> UnsafeFutureObj<'a, T> for Box + 'a> { - fn into_raw(self) -> *mut (dyn Future + 'a) { - Box::into_raw(self) - } - - unsafe fn drop(ptr: *mut (dyn Future + 'a)) { - drop(Box::from_raw(ptr)) - } - } - - unsafe impl<'a, T: 'a> UnsafeFutureObj<'a, T> for Box + Send + 'a> { - fn into_raw(self) -> *mut (dyn Future + 'a) { - Box::into_raw(self) - } - - unsafe fn drop(ptr: *mut (dyn Future + 'a)) { - drop(Box::from_raw(ptr)) - } - } - - unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for Pin> - where - F: Future + 'a, - { - fn into_raw(self) -> *mut (dyn Future + 'a) { - let mut this = mem::ManuallyDrop::new(self); - unsafe { this.as_mut().get_unchecked_mut() as *mut _ } - } - - unsafe fn drop(ptr: *mut (dyn Future + 'a)) { - drop(Pin::from(Box::from_raw(ptr))) - } - } - - unsafe impl<'a, T: 'a> UnsafeFutureObj<'a, T> for Pin + 'a>> { - fn into_raw(self) -> *mut (dyn Future + 'a) { - let mut this = mem::ManuallyDrop::new(self); - unsafe { this.as_mut().get_unchecked_mut() as *mut _ } - } - - unsafe fn drop(ptr: *mut (dyn Future + 'a)) { - drop(Pin::from(Box::from_raw(ptr))) - } - } - - unsafe impl<'a, T: 'a> UnsafeFutureObj<'a, T> for Pin + Send + 'a>> { - fn into_raw(self) -> *mut (dyn Future + 'a) { - let mut this = mem::ManuallyDrop::new(self); - unsafe { this.as_mut().get_unchecked_mut() as *mut _ } - } - - unsafe fn drop(ptr: *mut (dyn Future + 'a)) { - drop(Pin::from(Box::from_raw(ptr))) - } - } - - impl<'a, F: Future + Send + 'a> From> for FutureObj<'a, ()> { - fn from(boxed: Box) -> Self { - Self::new(boxed) - } - } - - impl<'a> From + Send + 'a>> for FutureObj<'a, ()> { - fn from(boxed: Box + Send + 'a>) -> Self { - Self::new(boxed) - } - } - - impl<'a, F: Future + Send + 'a> From>> for FutureObj<'a, ()> { - fn from(boxed: Pin>) -> Self { - Self::new(boxed) - } - } - - impl<'a> From + Send + 'a>>> for FutureObj<'a, ()> { - fn from(boxed: Pin + Send + 'a>>) -> Self { - Self::new(boxed) - } - } - - impl<'a, F: Future + 'a> From> for LocalFutureObj<'a, ()> { - fn from(boxed: Box) -> Self { - Self::new(boxed) - } - } - - impl<'a> From + 'a>> for LocalFutureObj<'a, ()> { - fn from(boxed: Box + 'a>) -> Self { - Self::new(boxed) - } - } - - impl<'a, F: Future + 'a> From>> for LocalFutureObj<'a, ()> { - fn from(boxed: Pin>) -> Self { - Self::new(boxed) - } - } - - impl<'a> From + 'a>>> for LocalFutureObj<'a, ()> { - fn from(boxed: Pin + 'a>>) -> Self { - Self::new(boxed) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/src/lib.rs s390-tools-2.33.1/rust-vendor/futures-task/src/lib.rs --- s390-tools-2.31.0/rust-vendor/futures-task/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,50 +0,0 @@ -//! Tools for working with tasks. - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_debug_implementations, missing_docs, rust_2018_idioms, unreachable_pub)] -// It cannot be included in the published code because this lints have false positives in the minimum required version. -#![cfg_attr(test, warn(single_use_lifetimes))] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] - -#[cfg(feature = "alloc")] -extern crate alloc; - -mod spawn; -pub use crate::spawn::{LocalSpawn, Spawn, SpawnError}; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod arc_wake; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use crate::arc_wake::ArcWake; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod waker; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use crate::waker::waker; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod waker_ref; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use crate::waker_ref::{waker_ref, WakerRef}; - -mod future_obj; -pub use crate::future_obj::{FutureObj, LocalFutureObj, UnsafeFutureObj}; - -mod noop_waker; -pub use crate::noop_waker::noop_waker; -pub use crate::noop_waker::noop_waker_ref; - -#[doc(no_inline)] -pub use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/src/noop_waker.rs s390-tools-2.33.1/rust-vendor/futures-task/src/noop_waker.rs --- s390-tools-2.31.0/rust-vendor/futures-task/src/noop_waker.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/src/noop_waker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,63 +0,0 @@ -//! Utilities for creating zero-cost wakers that don't do anything. - -use core::ptr::null; -use core::task::{RawWaker, RawWakerVTable, Waker}; - -unsafe fn noop_clone(_data: *const ()) -> RawWaker { - noop_raw_waker() -} - -unsafe fn noop(_data: *const ()) {} - -const NOOP_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(noop_clone, noop, noop, noop); - -const fn noop_raw_waker() -> RawWaker { - RawWaker::new(null(), &NOOP_WAKER_VTABLE) -} - -/// Create a new [`Waker`] which does -/// nothing when `wake()` is called on it. -/// -/// # Examples -/// -/// ``` -/// use futures::task::noop_waker; -/// let waker = noop_waker(); -/// waker.wake(); -/// ``` -#[inline] -pub fn noop_waker() -> Waker { - // FIXME: Since 1.46.0 we can use transmute in consts, allowing this function to be const. - unsafe { Waker::from_raw(noop_raw_waker()) } -} - -/// Get a static reference to a [`Waker`] which -/// does nothing when `wake()` is called on it. -/// -/// # Examples -/// -/// ``` -/// use futures::task::noop_waker_ref; -/// let waker = noop_waker_ref(); -/// waker.wake_by_ref(); -/// ``` -#[inline] -pub fn noop_waker_ref() -> &'static Waker { - struct SyncRawWaker(RawWaker); - unsafe impl Sync for SyncRawWaker {} - - static NOOP_WAKER_INSTANCE: SyncRawWaker = SyncRawWaker(noop_raw_waker()); - - // SAFETY: `Waker` is #[repr(transparent)] over its `RawWaker`. - unsafe { &*(&NOOP_WAKER_INSTANCE.0 as *const RawWaker as *const Waker) } -} - -#[cfg(test)] -mod tests { - #[test] - #[cfg(feature = "std")] - fn issue_2091_cross_thread_segfault() { - let waker = std::thread::spawn(super::noop_waker_ref).join().unwrap(); - waker.wake_by_ref(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/src/spawn.rs s390-tools-2.33.1/rust-vendor/futures-task/src/spawn.rs --- s390-tools-2.31.0/rust-vendor/futures-task/src/spawn.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/src/spawn.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,192 +0,0 @@ -use crate::{FutureObj, LocalFutureObj}; -use core::fmt; - -/// The `Spawn` trait allows for pushing futures onto an executor that will -/// run them to completion. -pub trait Spawn { - /// Spawns a future that will be run to completion. - /// - /// # Errors - /// - /// The executor may be unable to spawn tasks. Spawn errors should - /// represent relatively rare scenarios, such as the executor - /// having been shut down so that it is no longer able to accept - /// tasks. - fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError>; - - /// Determines whether the executor is able to spawn new tasks. - /// - /// This method will return `Ok` when the executor is *likely* - /// (but not guaranteed) to accept a subsequent spawn attempt. - /// Likewise, an `Err` return means that `spawn` is likely, but - /// not guaranteed, to yield an error. - #[inline] - fn status(&self) -> Result<(), SpawnError> { - Ok(()) - } -} - -/// The `LocalSpawn` is similar to [`Spawn`], but allows spawning futures -/// that don't implement `Send`. -pub trait LocalSpawn { - /// Spawns a future that will be run to completion. - /// - /// # Errors - /// - /// The executor may be unable to spawn tasks. Spawn errors should - /// represent relatively rare scenarios, such as the executor - /// having been shut down so that it is no longer able to accept - /// tasks. - fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError>; - - /// Determines whether the executor is able to spawn new tasks. - /// - /// This method will return `Ok` when the executor is *likely* - /// (but not guaranteed) to accept a subsequent spawn attempt. - /// Likewise, an `Err` return means that `spawn` is likely, but - /// not guaranteed, to yield an error. - #[inline] - fn status_local(&self) -> Result<(), SpawnError> { - Ok(()) - } -} - -/// An error that occurred during spawning. -pub struct SpawnError { - _priv: (), -} - -impl fmt::Debug for SpawnError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("SpawnError").field(&"shutdown").finish() - } -} - -impl fmt::Display for SpawnError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Executor is shutdown") - } -} - -#[cfg(feature = "std")] -impl std::error::Error for SpawnError {} - -impl SpawnError { - /// Spawning failed because the executor has been shut down. - pub fn shutdown() -> Self { - Self { _priv: () } - } - - /// Check whether spawning failed to the executor being shut down. - pub fn is_shutdown(&self) -> bool { - true - } -} - -impl Spawn for &Sp { - fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { - Sp::spawn_obj(self, future) - } - - fn status(&self) -> Result<(), SpawnError> { - Sp::status(self) - } -} - -impl Spawn for &mut Sp { - fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { - Sp::spawn_obj(self, future) - } - - fn status(&self) -> Result<(), SpawnError> { - Sp::status(self) - } -} - -impl LocalSpawn for &Sp { - fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> { - Sp::spawn_local_obj(self, future) - } - - fn status_local(&self) -> Result<(), SpawnError> { - Sp::status_local(self) - } -} - -impl LocalSpawn for &mut Sp { - fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> { - Sp::spawn_local_obj(self, future) - } - - fn status_local(&self) -> Result<(), SpawnError> { - Sp::status_local(self) - } -} - -#[cfg(feature = "alloc")] -mod if_alloc { - use super::*; - use alloc::{boxed::Box, rc::Rc}; - - impl Spawn for Box { - fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { - (**self).spawn_obj(future) - } - - fn status(&self) -> Result<(), SpawnError> { - (**self).status() - } - } - - impl LocalSpawn for Box { - fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> { - (**self).spawn_local_obj(future) - } - - fn status_local(&self) -> Result<(), SpawnError> { - (**self).status_local() - } - } - - impl Spawn for Rc { - fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { - (**self).spawn_obj(future) - } - - fn status(&self) -> Result<(), SpawnError> { - (**self).status() - } - } - - impl LocalSpawn for Rc { - fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> { - (**self).spawn_local_obj(future) - } - - fn status_local(&self) -> Result<(), SpawnError> { - (**self).status_local() - } - } - - #[cfg(not(futures_no_atomic_cas))] - impl Spawn for alloc::sync::Arc { - fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { - (**self).spawn_obj(future) - } - - fn status(&self) -> Result<(), SpawnError> { - (**self).status() - } - } - - #[cfg(not(futures_no_atomic_cas))] - impl LocalSpawn for alloc::sync::Arc { - fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> { - (**self).spawn_local_obj(future) - } - - fn status_local(&self) -> Result<(), SpawnError> { - (**self).status_local() - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/src/waker_ref.rs s390-tools-2.33.1/rust-vendor/futures-task/src/waker_ref.rs --- s390-tools-2.31.0/rust-vendor/futures-task/src/waker_ref.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/src/waker_ref.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,66 +0,0 @@ -use super::arc_wake::ArcWake; -use super::waker::waker_vtable; -use alloc::sync::Arc; -use core::marker::PhantomData; -use core::mem::ManuallyDrop; -use core::ops::Deref; -use core::task::{RawWaker, Waker}; - -/// A [`Waker`] that is only valid for a given lifetime. -/// -/// Note: this type implements [`Deref`](std::ops::Deref), -/// so it can be used to get a `&Waker`. -#[derive(Debug)] -pub struct WakerRef<'a> { - waker: ManuallyDrop, - _marker: PhantomData<&'a ()>, -} - -impl<'a> WakerRef<'a> { - /// Create a new [`WakerRef`] from a [`Waker`] reference. - #[inline] - pub fn new(waker: &'a Waker) -> Self { - // copy the underlying (raw) waker without calling a clone, - // as we won't call Waker::drop either. - let waker = ManuallyDrop::new(unsafe { core::ptr::read(waker) }); - Self { waker, _marker: PhantomData } - } - - /// Create a new [`WakerRef`] from a [`Waker`] that must not be dropped. - /// - /// Note: this if for rare cases where the caller created a [`Waker`] in - /// an unsafe way (that will be valid only for a lifetime to be determined - /// by the caller), and the [`Waker`] doesn't need to or must not be - /// destroyed. - #[inline] - pub fn new_unowned(waker: ManuallyDrop) -> Self { - Self { waker, _marker: PhantomData } - } -} - -impl Deref for WakerRef<'_> { - type Target = Waker; - - #[inline] - fn deref(&self) -> &Waker { - &self.waker - } -} - -/// Creates a reference to a [`Waker`] from a reference to `Arc`. -/// -/// The resulting [`Waker`] will call -/// [`ArcWake.wake()`](ArcWake::wake) if awoken. -#[inline] -pub fn waker_ref(wake: &Arc) -> WakerRef<'_> -where - W: ArcWake, -{ - // simply copy the pointer instead of using Arc::into_raw, - // as we don't actually keep a refcount by using ManuallyDrop.< - let ptr = Arc::as_ptr(wake).cast::<()>(); - - let waker = - ManuallyDrop::new(unsafe { Waker::from_raw(RawWaker::new(ptr, waker_vtable::())) }); - WakerRef::new_unowned(waker) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-task/src/waker.rs s390-tools-2.33.1/rust-vendor/futures-task/src/waker.rs --- s390-tools-2.31.0/rust-vendor/futures-task/src/waker.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-task/src/waker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,59 +0,0 @@ -use super::arc_wake::ArcWake; -use alloc::sync::Arc; -use core::mem; -use core::task::{RawWaker, RawWakerVTable, Waker}; - -pub(super) fn waker_vtable() -> &'static RawWakerVTable { - &RawWakerVTable::new( - clone_arc_raw::, - wake_arc_raw::, - wake_by_ref_arc_raw::, - drop_arc_raw::, - ) -} - -/// Creates a [`Waker`] from an `Arc`. -/// -/// The returned [`Waker`] will call -/// [`ArcWake.wake()`](ArcWake::wake) if awoken. -pub fn waker(wake: Arc) -> Waker -where - W: ArcWake + 'static, -{ - let ptr = Arc::into_raw(wake).cast::<()>(); - - unsafe { Waker::from_raw(RawWaker::new(ptr, waker_vtable::())) } -} - -// FIXME: panics on Arc::clone / refcount changes could wreak havoc on the -// code here. We should guard against this by aborting. - -#[allow(clippy::redundant_clone)] // The clone here isn't actually redundant. -unsafe fn increase_refcount(data: *const ()) { - // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop - let arc = mem::ManuallyDrop::new(Arc::::from_raw(data.cast::())); - // Now increase refcount, but don't drop new refcount either - let _arc_clone: mem::ManuallyDrop<_> = arc.clone(); -} - -// used by `waker_ref` -unsafe fn clone_arc_raw(data: *const ()) -> RawWaker { - increase_refcount::(data); - RawWaker::new(data, waker_vtable::()) -} - -unsafe fn wake_arc_raw(data: *const ()) { - let arc: Arc = Arc::from_raw(data.cast::()); - ArcWake::wake(arc); -} - -// used by `waker_ref` -unsafe fn wake_by_ref_arc_raw(data: *const ()) { - // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop - let arc = mem::ManuallyDrop::new(Arc::::from_raw(data.cast::())); - ArcWake::wake_by_ref(&arc); -} - -unsafe fn drop_arc_raw(data: *const ()) { - drop(Arc::::from_raw(data.cast::())) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/benches/bilock.rs s390-tools-2.33.1/rust-vendor/futures-util/benches/bilock.rs --- s390-tools-2.31.0/rust-vendor/futures-util/benches/bilock.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/benches/bilock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,68 +0,0 @@ -#![feature(test)] -#![cfg(feature = "bilock")] - -extern crate test; - -use futures::task::Poll; -use futures_test::task::noop_context; -use futures_util::lock::BiLock; - -use crate::test::Bencher; - -#[bench] -fn contended(b: &mut Bencher) { - let mut context = noop_context(); - - b.iter(|| { - let (x, y) = BiLock::new(1); - - for _ in 0..1000 { - let x_guard = match x.poll_lock(&mut context) { - Poll::Ready(guard) => guard, - _ => panic!(), - }; - - // Try poll second lock while first lock still holds the lock - match y.poll_lock(&mut context) { - Poll::Pending => (), - _ => panic!(), - }; - - drop(x_guard); - - let y_guard = match y.poll_lock(&mut context) { - Poll::Ready(guard) => guard, - _ => panic!(), - }; - - drop(y_guard); - } - (x, y) - }); -} - -#[bench] -fn lock_unlock(b: &mut Bencher) { - let mut context = noop_context(); - - b.iter(|| { - let (x, y) = BiLock::new(1); - - for _ in 0..1000 { - let x_guard = match x.poll_lock(&mut context) { - Poll::Ready(guard) => guard, - _ => panic!(), - }; - - drop(x_guard); - - let y_guard = match y.poll_lock(&mut context) { - Poll::Ready(guard) => guard, - _ => panic!(), - }; - - drop(y_guard); - } - (x, y) - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/benches/flatten_unordered.rs s390-tools-2.33.1/rust-vendor/futures-util/benches/flatten_unordered.rs --- s390-tools-2.31.0/rust-vendor/futures-util/benches/flatten_unordered.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/benches/flatten_unordered.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,58 +0,0 @@ -#![feature(test)] - -extern crate test; -use crate::test::Bencher; - -use futures::channel::oneshot; -use futures::executor::block_on; -use futures::future; -use futures::stream::{self, StreamExt}; -use futures::task::Poll; -use futures_util::FutureExt; -use std::collections::VecDeque; -use std::thread; - -#[bench] -fn oneshot_streams(b: &mut Bencher) { - const STREAM_COUNT: usize = 10_000; - const STREAM_ITEM_COUNT: usize = 1; - - b.iter(|| { - let mut txs = VecDeque::with_capacity(STREAM_COUNT); - let mut rxs = Vec::new(); - - for _ in 0..STREAM_COUNT { - let (tx, rx) = oneshot::channel(); - txs.push_back(tx); - rxs.push(rx); - } - - thread::spawn(move || { - let mut last = 1; - while let Some(tx) = txs.pop_front() { - let _ = tx.send(stream::iter(last..last + STREAM_ITEM_COUNT)); - last += STREAM_ITEM_COUNT; - } - }); - - let mut flatten = stream::iter(rxs) - .map(|recv| recv.into_stream().map(|val| val.unwrap()).flatten()) - .flatten_unordered(None); - - block_on(future::poll_fn(move |cx| { - let mut count = 0; - loop { - match flatten.poll_next_unpin(cx) { - Poll::Ready(None) => break, - Poll::Ready(Some(_)) => { - count += 1; - } - _ => {} - } - } - assert_eq!(count, STREAM_COUNT * STREAM_ITEM_COUNT); - - Poll::Ready(()) - })) - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/benches/futures_unordered.rs s390-tools-2.33.1/rust-vendor/futures-util/benches/futures_unordered.rs --- s390-tools-2.31.0/rust-vendor/futures-util/benches/futures_unordered.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/benches/futures_unordered.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,43 +0,0 @@ -#![feature(test)] - -extern crate test; -use crate::test::Bencher; - -use futures::channel::oneshot; -use futures::executor::block_on; -use futures::future; -use futures::stream::{FuturesUnordered, StreamExt}; -use futures::task::Poll; -use std::collections::VecDeque; -use std::thread; - -#[bench] -fn oneshots(b: &mut Bencher) { - const NUM: usize = 10_000; - - b.iter(|| { - let mut txs = VecDeque::with_capacity(NUM); - let mut rxs = FuturesUnordered::new(); - - for _ in 0..NUM { - let (tx, rx) = oneshot::channel(); - txs.push_back(tx); - rxs.push(rx); - } - - thread::spawn(move || { - while let Some(tx) = txs.pop_front() { - let _ = tx.send("hello"); - } - }); - - block_on(future::poll_fn(move |cx| { - loop { - if let Poll::Ready(None) = rxs.poll_next_unpin(cx) { - break; - } - } - Poll::Ready(()) - })) - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/benches/select.rs s390-tools-2.33.1/rust-vendor/futures-util/benches/select.rs --- s390-tools-2.31.0/rust-vendor/futures-util/benches/select.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/benches/select.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,35 +0,0 @@ -#![feature(test)] - -extern crate test; -use crate::test::Bencher; - -use futures::executor::block_on; -use futures::stream::{repeat, select, StreamExt}; - -#[bench] -fn select_streams(b: &mut Bencher) { - const STREAM_COUNT: usize = 10_000; - - b.iter(|| { - let stream1 = repeat(1).take(STREAM_COUNT); - let stream2 = repeat(2).take(STREAM_COUNT); - let stream3 = repeat(3).take(STREAM_COUNT); - let stream4 = repeat(4).take(STREAM_COUNT); - let stream5 = repeat(5).take(STREAM_COUNT); - let stream6 = repeat(6).take(STREAM_COUNT); - let stream7 = repeat(7).take(STREAM_COUNT); - let count = block_on(async { - let count = select( - stream1, - select( - stream2, - select(stream3, select(stream4, select(stream5, select(stream6, stream7)))), - ), - ) - .count() - .await; - count - }); - assert_eq!(count, STREAM_COUNT * 7); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/build.rs s390-tools-2.33.1/rust-vendor/futures-util/build.rs --- s390-tools-2.31.0/rust-vendor/futures-util/build.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -// The rustc-cfg listed below are considered public API, but it is *unstable* -// and outside of the normal semver guarantees: -// -// - `futures_no_atomic_cas` -// Assume the target does *not* support atomic CAS operations. -// This is usually detected automatically by the build script, but you may -// need to enable it manually when building for custom targets or using -// non-cargo build systems that don't run the build script. -// -// With the exceptions mentioned above, the rustc-cfg emitted by the build -// script are *not* public API. - -#![warn(rust_2018_idioms, single_use_lifetimes)] - -use std::env; - -include!("no_atomic_cas.rs"); - -fn main() { - let target = match env::var("TARGET") { - Ok(target) => target, - Err(e) => { - println!( - "cargo:warning={}: unable to get TARGET environment variable: {}", - env!("CARGO_PKG_NAME"), - e - ); - return; - } - }; - - // Note that this is `no_*`, not `has_*`. This allows treating - // `cfg(target_has_atomic = "ptr")` as true when the build script doesn't - // run. This is needed for compatibility with non-cargo build systems that - // don't run the build script. - if NO_ATOMIC_CAS.contains(&&*target) { - println!("cargo:rustc-cfg=futures_no_atomic_cas"); - } - - println!("cargo:rerun-if-changed=no_atomic_cas.rs"); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/futures-util/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/futures-util/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/Cargo.toml s390-tools-2.33.1/rust-vendor/futures-util/Cargo.toml --- s390-tools-2.31.0/rust-vendor/futures-util/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,135 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.56" -name = "futures-util" -version = "0.3.29" -description = """ -Common utilities and extension traits for the futures-rs library. -""" -homepage = "https://rust-lang.github.io/futures-rs" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/futures-rs" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs", -] - -[dependencies.futures-channel] -version = "0.3.29" -features = ["std"] -optional = true -default-features = false - -[dependencies.futures-core] -version = "0.3.29" -default-features = false - -[dependencies.futures-io] -version = "0.3.29" -features = ["std"] -optional = true -default-features = false - -[dependencies.futures-macro] -version = "=0.3.29" -optional = true -default-features = false - -[dependencies.futures-sink] -version = "0.3.29" -optional = true -default-features = false - -[dependencies.futures-task] -version = "0.3.29" -default-features = false - -[dependencies.futures_01] -version = "0.1.25" -optional = true -package = "futures" - -[dependencies.memchr] -version = "2.2" -optional = true - -[dependencies.pin-project-lite] -version = "0.2.6" - -[dependencies.pin-utils] -version = "0.1.0" - -[dependencies.slab] -version = "0.4.2" -optional = true - -[dependencies.tokio-io] -version = "0.1.9" -optional = true - -[dev-dependencies.tokio] -version = "0.1.11" - -[features] -alloc = [ - "futures-core/alloc", - "futures-task/alloc", -] -async-await = [] -async-await-macro = [ - "async-await", - "futures-macro", -] -bilock = [] -cfg-target-has-atomic = [] -channel = [ - "std", - "futures-channel", -] -compat = [ - "std", - "futures_01", -] -default = [ - "std", - "async-await", - "async-await-macro", -] -io = [ - "std", - "futures-io", - "memchr", -] -io-compat = [ - "io", - "compat", - "tokio-io", -] -portable-atomic = ["futures-core/portable-atomic"] -sink = ["futures-sink"] -std = [ - "alloc", - "futures-core/std", - "futures-task/std", - "slab", -] -unstable = [ - "futures-core/unstable", - "futures-task/unstable", -] -write-all-vectored = ["io"] diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/futures-util/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/futures-util/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/LICENSE-MIT s390-tools-2.33.1/rust-vendor/futures-util/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/futures-util/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/no_atomic_cas.rs s390-tools-2.33.1/rust-vendor/futures-util/no_atomic_cas.rs --- s390-tools-2.31.0/rust-vendor/futures-util/no_atomic_cas.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/no_atomic_cas.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,17 +0,0 @@ -// This file is @generated by no_atomic_cas.sh. -// It is not intended for manual editing. - -const NO_ATOMIC_CAS: &[&str] = &[ - "armv4t-none-eabi", - "armv5te-none-eabi", - "avr-unknown-gnu-atmega328", - "bpfeb-unknown-none", - "bpfel-unknown-none", - "msp430-none-elf", - "riscv32i-unknown-none-elf", - "riscv32im-unknown-none-elf", - "riscv32imc-unknown-none-elf", - "thumbv4t-none-eabi", - "thumbv5te-none-eabi", - "thumbv6m-none-eabi", -]; diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/README.md s390-tools-2.33.1/rust-vendor/futures-util/README.md --- s390-tools-2.31.0/rust-vendor/futures-util/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -# futures-util - -Common utilities and extension traits for the futures-rs library. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -futures-util = "0.3" -``` - -The current `futures-util` requires Rust 1.56 or later. - -## License - -Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or -[MIT license](LICENSE-MIT) at your option. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/abortable.rs s390-tools-2.33.1/rust-vendor/futures-util/src/abortable.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/abortable.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/abortable.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,209 +0,0 @@ -use crate::task::AtomicWaker; -use alloc::sync::Arc; -use core::fmt; -use core::pin::Pin; -use core::sync::atomic::{AtomicBool, Ordering}; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use futures_core::Stream; -use pin_project_lite::pin_project; - -pin_project! { - /// A future/stream which can be remotely short-circuited using an `AbortHandle`. - #[derive(Debug, Clone)] - #[must_use = "futures/streams do nothing unless you poll them"] - pub struct Abortable { - #[pin] - task: T, - inner: Arc, - } -} - -impl Abortable { - /// Creates a new `Abortable` future/stream using an existing `AbortRegistration`. - /// `AbortRegistration`s can be acquired through `AbortHandle::new`. - /// - /// When `abort` is called on the handle tied to `reg` or if `abort` has - /// already been called, the future/stream will complete immediately without making - /// any further progress. - /// - /// # Examples: - /// - /// Usage with futures: - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::{Abortable, AbortHandle, Aborted}; - /// - /// let (abort_handle, abort_registration) = AbortHandle::new_pair(); - /// let future = Abortable::new(async { 2 }, abort_registration); - /// abort_handle.abort(); - /// assert_eq!(future.await, Err(Aborted)); - /// # }); - /// ``` - /// - /// Usage with streams: - /// - /// ``` - /// # futures::executor::block_on(async { - /// # use futures::future::{Abortable, AbortHandle}; - /// # use futures::stream::{self, StreamExt}; - /// - /// let (abort_handle, abort_registration) = AbortHandle::new_pair(); - /// let mut stream = Abortable::new(stream::iter(vec![1, 2, 3]), abort_registration); - /// abort_handle.abort(); - /// assert_eq!(stream.next().await, None); - /// # }); - /// ``` - pub fn new(task: T, reg: AbortRegistration) -> Self { - Self { task, inner: reg.inner } - } - - /// Checks whether the task has been aborted. Note that all this - /// method indicates is whether [`AbortHandle::abort`] was *called*. - /// This means that it will return `true` even if: - /// * `abort` was called after the task had completed. - /// * `abort` was called while the task was being polled - the task may still be running and - /// will not be stopped until `poll` returns. - pub fn is_aborted(&self) -> bool { - self.inner.aborted.load(Ordering::Relaxed) - } -} - -/// A registration handle for an `Abortable` task. -/// Values of this type can be acquired from `AbortHandle::new` and are used -/// in calls to `Abortable::new`. -#[derive(Debug)] -pub struct AbortRegistration { - pub(crate) inner: Arc, -} - -impl AbortRegistration { - /// Create an [`AbortHandle`] from the given [`AbortRegistration`]. - /// - /// The created [`AbortHandle`] is functionally the same as any other - /// [`AbortHandle`]s that are associated with the same [`AbortRegistration`], - /// such as the one created by [`AbortHandle::new_pair`]. - pub fn handle(&self) -> AbortHandle { - AbortHandle { inner: self.inner.clone() } - } -} - -/// A handle to an `Abortable` task. -#[derive(Debug, Clone)] -pub struct AbortHandle { - inner: Arc, -} - -impl AbortHandle { - /// Creates an (`AbortHandle`, `AbortRegistration`) pair which can be used - /// to abort a running future or stream. - /// - /// This function is usually paired with a call to [`Abortable::new`]. - pub fn new_pair() -> (Self, AbortRegistration) { - let inner = - Arc::new(AbortInner { waker: AtomicWaker::new(), aborted: AtomicBool::new(false) }); - - (Self { inner: inner.clone() }, AbortRegistration { inner }) - } -} - -// Inner type storing the waker to awaken and a bool indicating that it -// should be aborted. -#[derive(Debug)] -pub(crate) struct AbortInner { - pub(crate) waker: AtomicWaker, - pub(crate) aborted: AtomicBool, -} - -/// Indicator that the `Abortable` task was aborted. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub struct Aborted; - -impl fmt::Display for Aborted { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "`Abortable` future has been aborted") - } -} - -#[cfg(feature = "std")] -impl std::error::Error for Aborted {} - -impl Abortable { - fn try_poll( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - poll: impl Fn(Pin<&mut T>, &mut Context<'_>) -> Poll, - ) -> Poll> { - // Check if the task has been aborted - if self.is_aborted() { - return Poll::Ready(Err(Aborted)); - } - - // attempt to complete the task - if let Poll::Ready(x) = poll(self.as_mut().project().task, cx) { - return Poll::Ready(Ok(x)); - } - - // Register to receive a wakeup if the task is aborted in the future - self.inner.waker.register(cx.waker()); - - // Check to see if the task was aborted between the first check and - // registration. - // Checking with `is_aborted` which uses `Relaxed` is sufficient because - // `register` introduces an `AcqRel` barrier. - if self.is_aborted() { - return Poll::Ready(Err(Aborted)); - } - - Poll::Pending - } -} - -impl Future for Abortable -where - Fut: Future, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.try_poll(cx, |fut, cx| fut.poll(cx)) - } -} - -impl Stream for Abortable -where - St: Stream, -{ - type Item = St::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.try_poll(cx, |stream, cx| stream.poll_next(cx)).map(Result::ok).map(Option::flatten) - } -} - -impl AbortHandle { - /// Abort the `Abortable` stream/future associated with this handle. - /// - /// Notifies the Abortable task associated with this handle that it - /// should abort. Note that if the task is currently being polled on - /// another thread, it will not immediately stop running. Instead, it will - /// continue to run until its poll method returns. - pub fn abort(&self) { - self.inner.aborted.store(true, Ordering::Relaxed); - self.inner.waker.wake(); - } - - /// Checks whether [`AbortHandle::abort`] was *called* on any associated - /// [`AbortHandle`]s, which includes all the [`AbortHandle`]s linked with - /// the same [`AbortRegistration`]. This means that it will return `true` - /// even if: - /// * `abort` was called after the task had completed. - /// * `abort` was called while the task was being polled - the task may still be running and - /// will not be stopped until `poll` returns. - /// - /// This operation has a Relaxed ordering. - pub fn is_aborted(&self) -> bool { - self.inner.aborted.load(Ordering::Relaxed) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/join_mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/join_mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/join_mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/join_mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,110 +0,0 @@ -//! The `join` macro. - -macro_rules! document_join_macro { - ($join:item $try_join:item) => { - /// Polls multiple futures simultaneously, returning a tuple - /// of all results once complete. - /// - /// While `join!(a, b)` is similar to `(a.await, b.await)`, - /// `join!` polls both futures concurrently and therefore is more efficient. - /// - /// This macro is only usable inside of async functions, closures, and blocks. - /// It is also gated behind the `async-await` feature of this library, which is - /// activated by default. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::join; - /// - /// let a = async { 1 }; - /// let b = async { 2 }; - /// assert_eq!(join!(a, b), (1, 2)); - /// - /// // `join!` is variadic, so you can pass any number of futures - /// let c = async { 3 }; - /// let d = async { 4 }; - /// let e = async { 5 }; - /// assert_eq!(join!(c, d, e), (3, 4, 5)); - /// # }); - /// ``` - $join - - /// Polls multiple futures simultaneously, resolving to a [`Result`] containing - /// either a tuple of the successful outputs or an error. - /// - /// `try_join!` is similar to [`join!`], but completes immediately if any of - /// the futures return an error. - /// - /// This macro is only usable inside of async functions, closures, and blocks. - /// It is also gated behind the `async-await` feature of this library, which is - /// activated by default. - /// - /// # Examples - /// - /// When used on multiple futures that return `Ok`, `try_join!` will return - /// `Ok` of a tuple of the values: - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::try_join; - /// - /// let a = async { Ok::(1) }; - /// let b = async { Ok::(2) }; - /// assert_eq!(try_join!(a, b), Ok((1, 2))); - /// - /// // `try_join!` is variadic, so you can pass any number of futures - /// let c = async { Ok::(3) }; - /// let d = async { Ok::(4) }; - /// let e = async { Ok::(5) }; - /// assert_eq!(try_join!(c, d, e), Ok((3, 4, 5))); - /// # }); - /// ``` - /// - /// If one of the futures resolves to an error, `try_join!` will return - /// that error: - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::try_join; - /// - /// let a = async { Ok::(1) }; - /// let b = async { Err::(2) }; - /// - /// assert_eq!(try_join!(a, b), Err(2)); - /// # }); - /// ``` - $try_join - } -} - -#[allow(unreachable_pub)] -#[doc(hidden)] -pub use futures_macro::join_internal; - -#[allow(unreachable_pub)] -#[doc(hidden)] -pub use futures_macro::try_join_internal; - -document_join_macro! { - #[macro_export] - macro_rules! join { - ($($tokens:tt)*) => {{ - use $crate::__private as __futures_crate; - $crate::join_internal! { - $( $tokens )* - } - }} - } - - #[macro_export] - macro_rules! try_join { - ($($tokens:tt)*) => {{ - use $crate::__private as __futures_crate; - $crate::try_join_internal! { - $( $tokens )* - } - }} - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -//! Await -//! -//! This module contains a number of functions and combinators for working -//! with `async`/`await` code. - -use futures_core::future::{FusedFuture, Future}; -use futures_core::stream::{FusedStream, Stream}; - -#[macro_use] -mod poll; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762 -pub use self::poll::*; - -#[macro_use] -mod pending; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762 -pub use self::pending::*; - -// Primary export is a macro -#[cfg(feature = "async-await-macro")] -mod join_mod; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762 -#[cfg(feature = "async-await-macro")] -pub use self::join_mod::*; - -// Primary export is a macro -#[cfg(feature = "async-await-macro")] -mod select_mod; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762 -#[cfg(feature = "async-await-macro")] -pub use self::select_mod::*; - -// Primary export is a macro -#[cfg(feature = "std")] -#[cfg(feature = "async-await-macro")] -mod stream_select_mod; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762 -#[cfg(feature = "std")] -#[cfg(feature = "async-await-macro")] -pub use self::stream_select_mod::*; - -#[cfg(feature = "std")] -#[cfg(feature = "async-await-macro")] -mod random; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762 -#[cfg(feature = "std")] -#[cfg(feature = "async-await-macro")] -pub use self::random::*; - -#[doc(hidden)] -#[inline(always)] -pub fn assert_unpin(_: &T) {} - -#[doc(hidden)] -#[inline(always)] -pub fn assert_fused_future(_: &T) {} - -#[doc(hidden)] -#[inline(always)] -pub fn assert_fused_stream(_: &T) {} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/pending.rs s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/pending.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/pending.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/pending.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,43 +0,0 @@ -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; - -/// A macro which yields to the event loop once. -/// -/// This is equivalent to returning [`Poll::Pending`](futures_core::task::Poll) -/// from a [`Future::poll`](futures_core::future::Future::poll) implementation. -/// Similarly, when using this macro, it must be ensured that [`wake`](std::task::Waker::wake) -/// is called somewhere when further progress can be made. -/// -/// This macro is only usable inside of async functions, closures, and blocks. -/// It is also gated behind the `async-await` feature of this library, which is -/// activated by default. -#[macro_export] -macro_rules! pending { - () => { - $crate::__private::async_await::pending_once().await - }; -} - -#[doc(hidden)] -pub fn pending_once() -> PendingOnce { - PendingOnce { is_ready: false } -} - -#[allow(missing_debug_implementations)] -#[doc(hidden)] -pub struct PendingOnce { - is_ready: bool, -} - -impl Future for PendingOnce { - type Output = (); - fn poll(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll { - if self.is_ready { - Poll::Ready(()) - } else { - self.is_ready = true; - Poll::Pending - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/poll.rs s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/poll.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/poll.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/poll.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ -use crate::future::FutureExt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; - -/// A macro which returns the result of polling a future once within the -/// current `async` context. -/// -/// This macro is only usable inside of `async` functions, closures, and blocks. -/// It is also gated behind the `async-await` feature of this library, which is -/// activated by default. -/// -/// If you need the result of polling a [`Stream`](crate::stream::Stream), -/// you can use this macro with the [`next`](crate::stream::StreamExt::next) method: -/// `poll!(stream.next())`. -#[macro_export] -macro_rules! poll { - ($x:expr $(,)?) => { - $crate::__private::async_await::poll($x).await - }; -} - -#[doc(hidden)] -pub fn poll(future: F) -> PollOnce { - PollOnce { future } -} - -#[allow(missing_debug_implementations)] -#[doc(hidden)] -pub struct PollOnce { - future: F, -} - -impl Future for PollOnce { - type Output = Poll; - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Poll::Ready(self.future.poll_unpin(cx)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/random.rs s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/random.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/random.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/random.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,54 +0,0 @@ -use std::{ - cell::Cell, - collections::hash_map::DefaultHasher, - hash::Hasher, - num::Wrapping, - sync::atomic::{AtomicUsize, Ordering}, -}; - -// Based on [Fisher–Yates shuffle]. -// -// [Fisher–Yates shuffle]: https://en.wikipedia.org/wiki/Fisher–Yates_shuffle -#[doc(hidden)] -pub fn shuffle(slice: &mut [T]) { - for i in (1..slice.len()).rev() { - slice.swap(i, gen_index(i + 1)); - } -} - -/// Return a value from `0..n`. -fn gen_index(n: usize) -> usize { - (random() % n as u64) as usize -} - -/// Pseudorandom number generator based on [xorshift*]. -/// -/// [xorshift*]: https://en.wikipedia.org/wiki/Xorshift#xorshift* -fn random() -> u64 { - thread_local! { - static RNG: Cell> = Cell::new(Wrapping(prng_seed())); - } - - fn prng_seed() -> u64 { - static COUNTER: AtomicUsize = AtomicUsize::new(0); - - // Any non-zero seed will do - let mut seed = 0; - while seed == 0 { - let mut hasher = DefaultHasher::new(); - hasher.write_usize(COUNTER.fetch_add(1, Ordering::Relaxed)); - seed = hasher.finish(); - } - seed - } - - RNG.with(|rng| { - let mut x = rng.get(); - debug_assert_ne!(x.0, 0); - x ^= x >> 12; - x ^= x << 25; - x ^= x >> 27; - rng.set(x); - x.0.wrapping_mul(0x2545_f491_4f6c_dd1d) - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/select_mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/select_mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/select_mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/select_mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,336 +0,0 @@ -//! The `select` macro. - -macro_rules! document_select_macro { - // This branch is required for `futures 0.3.1`, from before select_biased was introduced - ($select:item) => { - /// Polls multiple futures and streams simultaneously, executing the branch - /// for the future that finishes first. If multiple futures are ready, - /// one will be pseudo-randomly selected at runtime. Futures directly - /// passed to `select!` must be `Unpin` and implement `FusedFuture`. - /// - /// If an expression which yields a `Future` is passed to `select!` - /// (e.g. an `async fn` call) instead of a `Future` by name the `Unpin` - /// requirement is relaxed, since the macro will pin the resulting `Future` - /// on the stack. However the `Future` returned by the expression must - /// still implement `FusedFuture`. - /// - /// Futures and streams which are not already fused can be fused using the - /// `.fuse()` method. Note, though, that fusing a future or stream directly - /// in the call to `select!` will not be enough to prevent it from being - /// polled after completion if the `select!` call is in a loop, so when - /// `select!`ing in a loop, users should take care to `fuse()` outside of - /// the loop. - /// - /// `select!` can be used as an expression and will return the return - /// value of the selected branch. For this reason the return type of every - /// branch in a `select!` must be the same. - /// - /// This macro is only usable inside of async functions, closures, and blocks. - /// It is also gated behind the `async-await` feature of this library, which is - /// activated by default. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::select; - /// let mut a = future::ready(4); - /// let mut b = future::pending::<()>(); - /// - /// let res = select! { - /// a_res = a => a_res + 1, - /// _ = b => 0, - /// }; - /// assert_eq!(res, 5); - /// # }); - /// ``` - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, StreamExt}; - /// use futures::select; - /// let mut st = stream::iter(vec![2]).fuse(); - /// let mut fut = future::pending::<()>(); - /// - /// select! { - /// x = st.next() => assert_eq!(Some(2), x), - /// _ = fut => panic!(), - /// }; - /// # }); - /// ``` - /// - /// As described earlier, `select` can directly select on expressions - /// which return `Future`s - even if those do not implement `Unpin`: - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// use futures::select; - /// - /// // Calling the following async fn returns a Future which does not - /// // implement Unpin - /// async fn async_identity_fn(arg: usize) -> usize { - /// arg - /// } - /// - /// let res = select! { - /// a_res = async_identity_fn(62).fuse() => a_res + 1, - /// b_res = async_identity_fn(13).fuse() => b_res, - /// }; - /// assert!(res == 63 || res == 13); - /// # }); - /// ``` - /// - /// If a similar async function is called outside of `select` to produce - /// a `Future`, the `Future` must be pinned in order to be able to pass - /// it to `select`. This can be achieved via `Box::pin` for pinning a - /// `Future` on the heap or the `pin_mut!` macro for pinning a `Future` - /// on the stack. - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// use futures::select; - /// use futures::pin_mut; - /// - /// // Calling the following async fn returns a Future which does not - /// // implement Unpin - /// async fn async_identity_fn(arg: usize) -> usize { - /// arg - /// } - /// - /// let fut_1 = async_identity_fn(1).fuse(); - /// let fut_2 = async_identity_fn(2).fuse(); - /// let mut fut_1 = Box::pin(fut_1); // Pins the Future on the heap - /// pin_mut!(fut_2); // Pins the Future on the stack - /// - /// let res = select! { - /// a_res = fut_1 => a_res, - /// b_res = fut_2 => b_res, - /// }; - /// assert!(res == 1 || res == 2); - /// # }); - /// ``` - /// - /// `select` also accepts a `complete` branch and a `default` branch. - /// `complete` will run if all futures and streams have already been - /// exhausted. `default` will run if no futures or streams are - /// immediately ready. `complete` takes priority over `default` in - /// the case where all futures have completed. - /// A motivating use-case for passing `Future`s by name as well as for - /// `complete` blocks is to call `select!` in a loop, which is - /// demonstrated in the following example: - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::select; - /// let mut a_fut = future::ready(4); - /// let mut b_fut = future::ready(6); - /// let mut total = 0; - /// - /// loop { - /// select! { - /// a = a_fut => total += a, - /// b = b_fut => total += b, - /// complete => break, - /// default => panic!(), // never runs (futures run first, then complete) - /// }; - /// } - /// assert_eq!(total, 10); - /// # }); - /// ``` - /// - /// Note that the futures that have been matched over can still be mutated - /// from inside the `select!` block's branches. This can be used to implement - /// more complex behavior such as timer resets or writing into the head of - /// a stream. - $select - }; - - ($select:item $select_biased:item) => { - document_select_macro!($select); - - /// Polls multiple futures and streams simultaneously, executing the branch - /// for the future that finishes first. Unlike [`select!`], if multiple futures are ready, - /// one will be selected in order of declaration. Futures directly - /// passed to `select_biased!` must be `Unpin` and implement `FusedFuture`. - /// - /// If an expression which yields a `Future` is passed to `select_biased!` - /// (e.g. an `async fn` call) instead of a `Future` by name the `Unpin` - /// requirement is relaxed, since the macro will pin the resulting `Future` - /// on the stack. However the `Future` returned by the expression must - /// still implement `FusedFuture`. - /// - /// Futures and streams which are not already fused can be fused using the - /// `.fuse()` method. Note, though, that fusing a future or stream directly - /// in the call to `select_biased!` will not be enough to prevent it from being - /// polled after completion if the `select_biased!` call is in a loop, so when - /// `select_biased!`ing in a loop, users should take care to `fuse()` outside of - /// the loop. - /// - /// `select_biased!` can be used as an expression and will return the return - /// value of the selected branch. For this reason the return type of every - /// branch in a `select_biased!` must be the same. - /// - /// This macro is only usable inside of async functions, closures, and blocks. - /// It is also gated behind the `async-await` feature of this library, which is - /// activated by default. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::select_biased; - /// let mut a = future::ready(4); - /// let mut b = future::pending::<()>(); - /// - /// let res = select_biased! { - /// a_res = a => a_res + 1, - /// _ = b => 0, - /// }; - /// assert_eq!(res, 5); - /// # }); - /// ``` - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, StreamExt}; - /// use futures::select_biased; - /// let mut st = stream::iter(vec![2]).fuse(); - /// let mut fut = future::pending::<()>(); - /// - /// select_biased! { - /// x = st.next() => assert_eq!(Some(2), x), - /// _ = fut => panic!(), - /// }; - /// # }); - /// ``` - /// - /// As described earlier, `select_biased` can directly select on expressions - /// which return `Future`s - even if those do not implement `Unpin`: - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// use futures::select_biased; - /// - /// // Calling the following async fn returns a Future which does not - /// // implement Unpin - /// async fn async_identity_fn(arg: usize) -> usize { - /// arg - /// } - /// - /// let res = select_biased! { - /// a_res = async_identity_fn(62).fuse() => a_res + 1, - /// b_res = async_identity_fn(13).fuse() => b_res, - /// }; - /// assert!(res == 63 || res == 12); - /// # }); - /// ``` - /// - /// If a similar async function is called outside of `select_biased` to produce - /// a `Future`, the `Future` must be pinned in order to be able to pass - /// it to `select_biased`. This can be achieved via `Box::pin` for pinning a - /// `Future` on the heap or the `pin_mut!` macro for pinning a `Future` - /// on the stack. - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// use futures::select_biased; - /// use futures::pin_mut; - /// - /// // Calling the following async fn returns a Future which does not - /// // implement Unpin - /// async fn async_identity_fn(arg: usize) -> usize { - /// arg - /// } - /// - /// let fut_1 = async_identity_fn(1).fuse(); - /// let fut_2 = async_identity_fn(2).fuse(); - /// let mut fut_1 = Box::pin(fut_1); // Pins the Future on the heap - /// pin_mut!(fut_2); // Pins the Future on the stack - /// - /// let res = select_biased! { - /// a_res = fut_1 => a_res, - /// b_res = fut_2 => b_res, - /// }; - /// assert!(res == 1 || res == 2); - /// # }); - /// ``` - /// - /// `select_biased` also accepts a `complete` branch and a `default` branch. - /// `complete` will run if all futures and streams have already been - /// exhausted. `default` will run if no futures or streams are - /// immediately ready. `complete` takes priority over `default` in - /// the case where all futures have completed. - /// A motivating use-case for passing `Future`s by name as well as for - /// `complete` blocks is to call `select_biased!` in a loop, which is - /// demonstrated in the following example: - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::select_biased; - /// let mut a_fut = future::ready(4); - /// let mut b_fut = future::ready(6); - /// let mut total = 0; - /// - /// loop { - /// select_biased! { - /// a = a_fut => total += a, - /// b = b_fut => total += b, - /// complete => break, - /// default => panic!(), // never runs (futures run first, then complete) - /// }; - /// } - /// assert_eq!(total, 10); - /// # }); - /// ``` - /// - /// Note that the futures that have been matched over can still be mutated - /// from inside the `select_biased!` block's branches. This can be used to implement - /// more complex behavior such as timer resets or writing into the head of - /// a stream. - /// - /// [`select!`]: macro.select.html - $select_biased - }; -} - -#[cfg(feature = "std")] -#[allow(unreachable_pub)] -#[doc(hidden)] -pub use futures_macro::select_internal; - -#[allow(unreachable_pub)] -#[doc(hidden)] -pub use futures_macro::select_biased_internal; - -document_select_macro! { - #[cfg(feature = "std")] - #[macro_export] - macro_rules! select { - ($($tokens:tt)*) => {{ - use $crate::__private as __futures_crate; - $crate::select_internal! { - $( $tokens )* - } - }} - } - - #[macro_export] - macro_rules! select_biased { - ($($tokens:tt)*) => {{ - use $crate::__private as __futures_crate; - $crate::select_biased_internal! { - $( $tokens )* - } - }} - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/stream_select_mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/stream_select_mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/async_await/stream_select_mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/async_await/stream_select_mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,38 +0,0 @@ -//! The `stream_select` macro. - -#[allow(unreachable_pub)] -#[doc(hidden)] -pub use futures_macro::stream_select_internal; - -/// Combines several streams, all producing the same `Item` type, into one stream. -/// This is similar to `select_all` but does not require the streams to all be the same type. -/// It also keeps the streams inline, and does not require `Box`s to be allocated. -/// Streams passed to this macro must be `Unpin`. -/// -/// If multiple streams are ready, one will be pseudo randomly selected at runtime. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::{stream, StreamExt, stream_select}; -/// let endless_ints = |i| stream::iter(vec![i].into_iter().cycle()).fuse(); -/// -/// let mut endless_numbers = stream_select!(endless_ints(1i32), endless_ints(2), endless_ints(3)); -/// match endless_numbers.next().await { -/// Some(1) => println!("Got a 1"), -/// Some(2) => println!("Got a 2"), -/// Some(3) => println!("Got a 3"), -/// _ => unreachable!(), -/// } -/// # }); -/// ``` -#[macro_export] -macro_rules! stream_select { - ($($tokens:tt)*) => {{ - use $crate::__private as __futures_crate; - $crate::stream_select_internal! { - $( $tokens )* - } - }} -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/compat/compat01as03.rs s390-tools-2.33.1/rust-vendor/futures-util/src/compat/compat01as03.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/compat/compat01as03.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/compat/compat01as03.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,454 +0,0 @@ -use futures_01::executor::{ - spawn as spawn01, Notify as Notify01, NotifyHandle as NotifyHandle01, Spawn as Spawn01, - UnsafeNotify as UnsafeNotify01, -}; -use futures_01::{Async as Async01, Future as Future01, Stream as Stream01}; -#[cfg(feature = "sink")] -use futures_01::{AsyncSink as AsyncSink01, Sink as Sink01}; -use futures_core::{future::Future as Future03, stream::Stream as Stream03, task as task03}; -#[cfg(feature = "sink")] -use futures_sink::Sink as Sink03; -use std::pin::Pin; -use std::task::Context; - -#[cfg(feature = "io-compat")] -#[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use io::{AsyncRead01CompatExt, AsyncWrite01CompatExt}; - -/// Converts a futures 0.1 Future, Stream, AsyncRead, or AsyncWrite -/// object to a futures 0.3-compatible version, -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Compat01As03 { - pub(crate) inner: Spawn01, -} - -impl Unpin for Compat01As03 {} - -impl Compat01As03 { - /// Wraps a futures 0.1 Future, Stream, AsyncRead, or AsyncWrite - /// object in a futures 0.3-compatible wrapper. - pub fn new(object: T) -> Self { - Self { inner: spawn01(object) } - } - - fn in_notify(&mut self, cx: &mut Context<'_>, f: impl FnOnce(&mut T) -> R) -> R { - let notify = &WakerToHandle(cx.waker()); - self.inner.poll_fn_notify(notify, 0, f) - } - - /// Get a reference to 0.1 Future, Stream, AsyncRead, or AsyncWrite object contained within. - pub fn get_ref(&self) -> &T { - self.inner.get_ref() - } - - /// Get a mutable reference to 0.1 Future, Stream, AsyncRead or AsyncWrite object contained - /// within. - pub fn get_mut(&mut self) -> &mut T { - self.inner.get_mut() - } - - /// Consume this wrapper to return the underlying 0.1 Future, Stream, AsyncRead, or - /// AsyncWrite object. - pub fn into_inner(self) -> T { - self.inner.into_inner() - } -} - -/// Extension trait for futures 0.1 [`Future`](futures_01::future::Future) -pub trait Future01CompatExt: Future01 { - /// Converts a futures 0.1 - /// [`Future`](futures_01::future::Future) - /// into a futures 0.3 - /// [`Future>`](futures_core::future::Future). - /// - /// ``` - /// # if cfg!(miri) { return; } // https://github.com/rust-lang/futures-rs/issues/2514 - /// # futures::executor::block_on(async { - /// # // TODO: These should be all using `futures::compat`, but that runs up against Cargo - /// # // feature issues - /// use futures_util::compat::Future01CompatExt; - /// - /// let future = futures_01::future::ok::(1); - /// assert_eq!(future.compat().await, Ok(1)); - /// # }); - /// ``` - fn compat(self) -> Compat01As03 - where - Self: Sized, - { - Compat01As03::new(self) - } -} -impl Future01CompatExt for Fut {} - -/// Extension trait for futures 0.1 [`Stream`](futures_01::stream::Stream) -pub trait Stream01CompatExt: Stream01 { - /// Converts a futures 0.1 - /// [`Stream`](futures_01::stream::Stream) - /// into a futures 0.3 - /// [`Stream>`](futures_core::stream::Stream). - /// - /// ``` - /// # if cfg!(miri) { return; } // https://github.com/rust-lang/futures-rs/issues/2514 - /// # futures::executor::block_on(async { - /// use futures::stream::StreamExt; - /// use futures_util::compat::Stream01CompatExt; - /// - /// let stream = futures_01::stream::once::(Ok(1)); - /// let mut stream = stream.compat(); - /// assert_eq!(stream.next().await, Some(Ok(1))); - /// assert_eq!(stream.next().await, None); - /// # }); - /// ``` - fn compat(self) -> Compat01As03 - where - Self: Sized, - { - Compat01As03::new(self) - } -} -impl Stream01CompatExt for St {} - -/// Extension trait for futures 0.1 [`Sink`](futures_01::sink::Sink) -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -pub trait Sink01CompatExt: Sink01 { - /// Converts a futures 0.1 - /// [`Sink`](futures_01::sink::Sink) - /// into a futures 0.3 - /// [`Sink`](futures_sink::Sink). - /// - /// ``` - /// # if cfg!(miri) { return; } // https://github.com/rust-lang/futures-rs/issues/2514 - /// # futures::executor::block_on(async { - /// use futures::{sink::SinkExt, stream::StreamExt}; - /// use futures_util::compat::{Stream01CompatExt, Sink01CompatExt}; - /// - /// let (tx, rx) = futures_01::unsync::mpsc::channel(1); - /// let (mut tx, mut rx) = (tx.sink_compat(), rx.compat()); - /// - /// tx.send(1).await.unwrap(); - /// drop(tx); - /// assert_eq!(rx.next().await, Some(Ok(1))); - /// assert_eq!(rx.next().await, None); - /// # }); - /// ``` - fn sink_compat(self) -> Compat01As03Sink - where - Self: Sized, - { - Compat01As03Sink::new(self) - } -} -#[cfg(feature = "sink")] -impl Sink01CompatExt for Si {} - -fn poll_01_to_03(x: Result, E>) -> task03::Poll> { - match x? { - Async01::Ready(t) => task03::Poll::Ready(Ok(t)), - Async01::NotReady => task03::Poll::Pending, - } -} - -impl Future03 for Compat01As03 { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> task03::Poll { - poll_01_to_03(self.in_notify(cx, Future01::poll)) - } -} - -impl Stream03 for Compat01As03 { - type Item = Result; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> task03::Poll> { - match self.in_notify(cx, Stream01::poll)? { - Async01::Ready(Some(t)) => task03::Poll::Ready(Some(Ok(t))), - Async01::Ready(None) => task03::Poll::Ready(None), - Async01::NotReady => task03::Poll::Pending, - } - } -} - -/// Converts a futures 0.1 Sink object to a futures 0.3-compatible version -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -#[derive(Debug)] -#[must_use = "sinks do nothing unless polled"] -pub struct Compat01As03Sink { - pub(crate) inner: Spawn01, - pub(crate) buffer: Option, - pub(crate) close_started: bool, -} - -#[cfg(feature = "sink")] -impl Unpin for Compat01As03Sink {} - -#[cfg(feature = "sink")] -impl Compat01As03Sink { - /// Wraps a futures 0.1 Sink object in a futures 0.3-compatible wrapper. - pub fn new(inner: S) -> Self { - Self { inner: spawn01(inner), buffer: None, close_started: false } - } - - fn in_notify(&mut self, cx: &mut Context<'_>, f: impl FnOnce(&mut S) -> R) -> R { - let notify = &WakerToHandle(cx.waker()); - self.inner.poll_fn_notify(notify, 0, f) - } - - /// Get a reference to 0.1 Sink object contained within. - pub fn get_ref(&self) -> &S { - self.inner.get_ref() - } - - /// Get a mutable reference to 0.1 Sink contained within. - pub fn get_mut(&mut self) -> &mut S { - self.inner.get_mut() - } - - /// Consume this wrapper to return the underlying 0.1 Sink. - pub fn into_inner(self) -> S { - self.inner.into_inner() - } -} - -#[cfg(feature = "sink")] -impl Stream03 for Compat01As03Sink -where - S: Stream01, -{ - type Item = Result; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> task03::Poll> { - match self.in_notify(cx, Stream01::poll)? { - Async01::Ready(Some(t)) => task03::Poll::Ready(Some(Ok(t))), - Async01::Ready(None) => task03::Poll::Ready(None), - Async01::NotReady => task03::Poll::Pending, - } - } -} - -#[cfg(feature = "sink")] -impl Sink03 for Compat01As03Sink -where - S: Sink01, -{ - type Error = S::SinkError; - - fn start_send(mut self: Pin<&mut Self>, item: SinkItem) -> Result<(), Self::Error> { - debug_assert!(self.buffer.is_none()); - self.buffer = Some(item); - Ok(()) - } - - fn poll_ready( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> task03::Poll> { - match self.buffer.take() { - Some(item) => match self.in_notify(cx, |f| f.start_send(item))? { - AsyncSink01::Ready => task03::Poll::Ready(Ok(())), - AsyncSink01::NotReady(i) => { - self.buffer = Some(i); - task03::Poll::Pending - } - }, - None => task03::Poll::Ready(Ok(())), - } - } - - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> task03::Poll> { - let item = self.buffer.take(); - match self.in_notify(cx, |f| match item { - Some(i) => match f.start_send(i)? { - AsyncSink01::Ready => f.poll_complete().map(|i| (i, None)), - AsyncSink01::NotReady(t) => Ok((Async01::NotReady, Some(t))), - }, - None => f.poll_complete().map(|i| (i, None)), - })? { - (Async01::Ready(_), _) => task03::Poll::Ready(Ok(())), - (Async01::NotReady, item) => { - self.buffer = item; - task03::Poll::Pending - } - } - } - - fn poll_close( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> task03::Poll> { - let item = self.buffer.take(); - let close_started = self.close_started; - - let result = self.in_notify(cx, |f| { - if !close_started { - if let Some(item) = item { - if let AsyncSink01::NotReady(item) = f.start_send(item)? { - return Ok((Async01::NotReady, Some(item), false)); - } - } - - if let Async01::NotReady = f.poll_complete()? { - return Ok((Async01::NotReady, None, false)); - } - } - - Ok((::close(f)?, None, true)) - }); - - match result? { - (Async01::Ready(_), _, _) => task03::Poll::Ready(Ok(())), - (Async01::NotReady, item, close_started) => { - self.buffer = item; - self.close_started = close_started; - task03::Poll::Pending - } - } - } -} - -struct NotifyWaker(task03::Waker); - -#[allow(missing_debug_implementations)] // false positive: this is private type -#[derive(Clone)] -struct WakerToHandle<'a>(&'a task03::Waker); - -impl From> for NotifyHandle01 { - fn from(handle: WakerToHandle<'_>) -> Self { - let ptr = Box::new(NotifyWaker(handle.0.clone())); - - unsafe { Self::new(Box::into_raw(ptr)) } - } -} - -impl Notify01 for NotifyWaker { - fn notify(&self, _: usize) { - self.0.wake_by_ref(); - } -} - -unsafe impl UnsafeNotify01 for NotifyWaker { - unsafe fn clone_raw(&self) -> NotifyHandle01 { - WakerToHandle(&self.0).into() - } - - unsafe fn drop_raw(&self) { - let ptr: *const dyn UnsafeNotify01 = self; - drop(Box::from_raw(ptr as *mut dyn UnsafeNotify01)); - } -} - -#[cfg(feature = "io-compat")] -#[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))] -mod io { - use super::*; - use futures_io::{AsyncRead as AsyncRead03, AsyncWrite as AsyncWrite03}; - use std::io::Error; - use tokio_io::{AsyncRead as AsyncRead01, AsyncWrite as AsyncWrite01}; - - /// Extension trait for tokio-io [`AsyncRead`](tokio_io::AsyncRead) - #[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))] - pub trait AsyncRead01CompatExt: AsyncRead01 { - /// Converts a tokio-io [`AsyncRead`](tokio_io::AsyncRead) into a futures-io 0.3 - /// [`AsyncRead`](futures_io::AsyncRead). - /// - /// ``` - /// # if cfg!(miri) { return; } // https://github.com/rust-lang/futures-rs/issues/2514 - /// # futures::executor::block_on(async { - /// use futures::io::AsyncReadExt; - /// use futures_util::compat::AsyncRead01CompatExt; - /// - /// let input = b"Hello World!"; - /// let reader /* : impl tokio_io::AsyncRead */ = std::io::Cursor::new(input); - /// let mut reader /* : impl futures::io::AsyncRead + Unpin */ = reader.compat(); - /// - /// let mut output = Vec::with_capacity(12); - /// reader.read_to_end(&mut output).await.unwrap(); - /// assert_eq!(output, input); - /// # }); - /// ``` - fn compat(self) -> Compat01As03 - where - Self: Sized, - { - Compat01As03::new(self) - } - } - impl AsyncRead01CompatExt for R {} - - /// Extension trait for tokio-io [`AsyncWrite`](tokio_io::AsyncWrite) - #[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))] - pub trait AsyncWrite01CompatExt: AsyncWrite01 { - /// Converts a tokio-io [`AsyncWrite`](tokio_io::AsyncWrite) into a futures-io 0.3 - /// [`AsyncWrite`](futures_io::AsyncWrite). - /// - /// ``` - /// # if cfg!(miri) { return; } // https://github.com/rust-lang/futures-rs/issues/2514 - /// # futures::executor::block_on(async { - /// use futures::io::AsyncWriteExt; - /// use futures_util::compat::AsyncWrite01CompatExt; - /// - /// let input = b"Hello World!"; - /// let mut cursor = std::io::Cursor::new(Vec::with_capacity(12)); - /// - /// let mut writer = (&mut cursor).compat(); - /// writer.write_all(input).await.unwrap(); - /// - /// assert_eq!(cursor.into_inner(), input); - /// # }); - /// ``` - fn compat(self) -> Compat01As03 - where - Self: Sized, - { - Compat01As03::new(self) - } - } - impl AsyncWrite01CompatExt for W {} - - impl AsyncRead03 for Compat01As03 { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> task03::Poll> { - poll_01_to_03(self.in_notify(cx, |x| x.poll_read(buf))) - } - } - - impl AsyncWrite03 for Compat01As03 { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> task03::Poll> { - poll_01_to_03(self.in_notify(cx, |x| x.poll_write(buf))) - } - - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> task03::Poll> { - poll_01_to_03(self.in_notify(cx, AsyncWrite01::poll_flush)) - } - - fn poll_close( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> task03::Poll> { - poll_01_to_03(self.in_notify(cx, AsyncWrite01::shutdown)) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/compat/compat03as01.rs s390-tools-2.33.1/rust-vendor/futures-util/src/compat/compat03as01.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/compat/compat03as01.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/compat/compat03as01.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,265 +0,0 @@ -use crate::task::{self as task03, ArcWake as ArcWake03, WakerRef}; -use futures_01::{ - task as task01, Async as Async01, Future as Future01, Poll as Poll01, Stream as Stream01, -}; -#[cfg(feature = "sink")] -use futures_01::{AsyncSink as AsyncSink01, Sink as Sink01, StartSend as StartSend01}; -use futures_core::{ - future::TryFuture as TryFuture03, - stream::TryStream as TryStream03, - task::{RawWaker, RawWakerVTable}, -}; -#[cfg(feature = "sink")] -use futures_sink::Sink as Sink03; -#[cfg(feature = "sink")] -use std::marker::PhantomData; -use std::{mem, pin::Pin, sync::Arc, task::Context}; - -/// Converts a futures 0.3 [`TryFuture`](futures_core::future::TryFuture) or -/// [`TryStream`](futures_core::stream::TryStream) into a futures 0.1 -/// [`Future`](futures_01::future::Future) or -/// [`Stream`](futures_01::stream::Stream). -#[derive(Debug, Clone, Copy)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Compat { - pub(crate) inner: T, -} - -/// Converts a futures 0.3 [`Sink`](futures_sink::Sink) into a futures 0.1 -/// [`Sink`](futures_01::sink::Sink). -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -#[derive(Debug)] -#[must_use = "sinks do nothing unless polled"] -pub struct CompatSink { - inner: T, - _phantom: PhantomData, -} - -impl Compat { - /// Creates a new [`Compat`]. - /// - /// For types which implement appropriate futures `0.3` - /// traits, the result will be a type which implements - /// the corresponding futures 0.1 type. - pub fn new(inner: T) -> Self { - Self { inner } - } - - /// Get a reference to 0.3 Future, Stream, AsyncRead, or AsyncWrite object - /// contained within. - pub fn get_ref(&self) -> &T { - &self.inner - } - - /// Get a mutable reference to 0.3 Future, Stream, AsyncRead, or AsyncWrite object - /// contained within. - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } - - /// Returns the inner item. - pub fn into_inner(self) -> T { - self.inner - } -} - -#[cfg(feature = "sink")] -impl CompatSink { - /// Creates a new [`CompatSink`]. - pub fn new(inner: T) -> Self { - Self { inner, _phantom: PhantomData } - } - - /// Get a reference to 0.3 Sink contained within. - pub fn get_ref(&self) -> &T { - &self.inner - } - - /// Get a mutable reference to 0.3 Sink contained within. - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } - - /// Returns the inner item. - pub fn into_inner(self) -> T { - self.inner - } -} - -fn poll_03_to_01(x: task03::Poll>) -> Result, E> { - match x? { - task03::Poll::Ready(t) => Ok(Async01::Ready(t)), - task03::Poll::Pending => Ok(Async01::NotReady), - } -} - -impl Future01 for Compat -where - Fut: TryFuture03 + Unpin, -{ - type Item = Fut::Ok; - type Error = Fut::Error; - - fn poll(&mut self) -> Poll01 { - with_context(self, |inner, cx| poll_03_to_01(inner.try_poll(cx))) - } -} - -impl Stream01 for Compat -where - St: TryStream03 + Unpin, -{ - type Item = St::Ok; - type Error = St::Error; - - fn poll(&mut self) -> Poll01, Self::Error> { - with_context(self, |inner, cx| match inner.try_poll_next(cx)? { - task03::Poll::Ready(None) => Ok(Async01::Ready(None)), - task03::Poll::Ready(Some(t)) => Ok(Async01::Ready(Some(t))), - task03::Poll::Pending => Ok(Async01::NotReady), - }) - } -} - -#[cfg(feature = "sink")] -impl Sink01 for CompatSink -where - T: Sink03 + Unpin, -{ - type SinkItem = Item; - type SinkError = T::Error; - - fn start_send(&mut self, item: Self::SinkItem) -> StartSend01 { - with_sink_context(self, |mut inner, cx| match inner.as_mut().poll_ready(cx)? { - task03::Poll::Ready(()) => inner.start_send(item).map(|()| AsyncSink01::Ready), - task03::Poll::Pending => Ok(AsyncSink01::NotReady(item)), - }) - } - - fn poll_complete(&mut self) -> Poll01<(), Self::SinkError> { - with_sink_context(self, |inner, cx| poll_03_to_01(inner.poll_flush(cx))) - } - - fn close(&mut self) -> Poll01<(), Self::SinkError> { - with_sink_context(self, |inner, cx| poll_03_to_01(inner.poll_close(cx))) - } -} - -#[derive(Clone)] -struct Current(task01::Task); - -impl Current { - fn new() -> Self { - Self(task01::current()) - } - - fn as_waker(&self) -> WakerRef<'_> { - unsafe fn ptr_to_current<'a>(ptr: *const ()) -> &'a Current { - &*(ptr as *const Current) - } - fn current_to_ptr(current: &Current) -> *const () { - current as *const Current as *const () - } - - unsafe fn clone(ptr: *const ()) -> RawWaker { - // Lazily create the `Arc` only when the waker is actually cloned. - // FIXME: remove `transmute` when a `Waker` -> `RawWaker` conversion - // function is landed in `core`. - mem::transmute::(task03::waker(Arc::new( - ptr_to_current(ptr).clone(), - ))) - } - unsafe fn drop(_: *const ()) {} - unsafe fn wake(ptr: *const ()) { - ptr_to_current(ptr).0.notify() - } - - let ptr = current_to_ptr(self); - let vtable = &RawWakerVTable::new(clone, wake, wake, drop); - WakerRef::new_unowned(std::mem::ManuallyDrop::new(unsafe { - task03::Waker::from_raw(RawWaker::new(ptr, vtable)) - })) - } -} - -impl ArcWake03 for Current { - fn wake_by_ref(arc_self: &Arc) { - arc_self.0.notify(); - } -} - -fn with_context(compat: &mut Compat, f: F) -> R -where - T: Unpin, - F: FnOnce(Pin<&mut T>, &mut Context<'_>) -> R, -{ - let current = Current::new(); - let waker = current.as_waker(); - let mut cx = Context::from_waker(&waker); - f(Pin::new(&mut compat.inner), &mut cx) -} - -#[cfg(feature = "sink")] -fn with_sink_context(compat: &mut CompatSink, f: F) -> R -where - T: Unpin, - F: FnOnce(Pin<&mut T>, &mut Context<'_>) -> R, -{ - let current = Current::new(); - let waker = current.as_waker(); - let mut cx = Context::from_waker(&waker); - f(Pin::new(&mut compat.inner), &mut cx) -} - -#[cfg(feature = "io-compat")] -#[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))] -mod io { - use super::*; - use futures_io::{AsyncRead as AsyncRead03, AsyncWrite as AsyncWrite03}; - use tokio_io::{AsyncRead as AsyncRead01, AsyncWrite as AsyncWrite01}; - - fn poll_03_to_io(x: task03::Poll>) -> Result { - match x { - task03::Poll::Ready(Ok(t)) => Ok(t), - task03::Poll::Pending => Err(std::io::ErrorKind::WouldBlock.into()), - task03::Poll::Ready(Err(e)) => Err(e), - } - } - - impl std::io::Read for Compat { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - let current = Current::new(); - let waker = current.as_waker(); - let mut cx = Context::from_waker(&waker); - poll_03_to_io(Pin::new(&mut self.inner).poll_read(&mut cx, buf)) - } - } - - impl AsyncRead01 for Compat {} - - impl std::io::Write for Compat { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - let current = Current::new(); - let waker = current.as_waker(); - let mut cx = Context::from_waker(&waker); - poll_03_to_io(Pin::new(&mut self.inner).poll_write(&mut cx, buf)) - } - - fn flush(&mut self) -> std::io::Result<()> { - let current = Current::new(); - let waker = current.as_waker(); - let mut cx = Context::from_waker(&waker); - poll_03_to_io(Pin::new(&mut self.inner).poll_flush(&mut cx)) - } - } - - impl AsyncWrite01 for Compat { - fn shutdown(&mut self) -> std::io::Result> { - let current = Current::new(); - let waker = current.as_waker(); - let mut cx = Context::from_waker(&waker); - poll_03_to_01(Pin::new(&mut self.inner).poll_close(&mut cx)) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/compat/executor.rs s390-tools-2.33.1/rust-vendor/futures-util/src/compat/executor.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/compat/executor.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/compat/executor.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,86 +0,0 @@ -use super::{Compat, Future01CompatExt}; -use crate::{ - future::{FutureExt, TryFutureExt, UnitError}, - task::SpawnExt, -}; -use futures_01::future::{ExecuteError as ExecuteError01, Executor as Executor01}; -use futures_01::Future as Future01; -use futures_task::{FutureObj, Spawn as Spawn03, SpawnError as SpawnError03}; - -/// A future that can run on a futures 0.1 -/// [`Executor`](futures_01::future::Executor). -pub type Executor01Future = Compat>>; - -/// Extension trait for futures 0.1 [`Executor`](futures_01::future::Executor). -pub trait Executor01CompatExt: Executor01 + Clone + Send + 'static { - /// Converts a futures 0.1 [`Executor`](futures_01::future::Executor) into a - /// futures 0.3 [`Spawn`](futures_task::Spawn). - /// - /// ``` - /// # if cfg!(miri) { return; } // Miri does not support epoll - /// use futures::task::SpawnExt; - /// use futures::future::{FutureExt, TryFutureExt}; - /// use futures_util::compat::Executor01CompatExt; - /// use tokio::executor::DefaultExecutor; - /// - /// # let (tx, rx) = futures::channel::oneshot::channel(); - /// - /// let spawner = DefaultExecutor::current().compat(); - /// let future03 = async move { - /// println!("Running on the pool"); - /// spawner.spawn(async { - /// println!("Spawned!"); - /// # tx.send(42).unwrap(); - /// }).unwrap(); - /// }; - /// - /// let future01 = future03.unit_error().boxed().compat(); - /// - /// tokio::run(future01); - /// # futures::executor::block_on(rx).unwrap(); - /// ``` - fn compat(self) -> Executor01As03 - where - Self: Sized; -} - -impl Executor01CompatExt for Ex -where - Ex: Executor01 + Clone + Send + 'static, -{ - fn compat(self) -> Executor01As03 { - Executor01As03 { executor01: self } - } -} - -/// Converts a futures 0.1 [`Executor`](futures_01::future::Executor) into a -/// futures 0.3 [`Spawn`](futures_task::Spawn). -#[derive(Debug, Clone)] -pub struct Executor01As03 { - executor01: Ex, -} - -impl Spawn03 for Executor01As03 -where - Ex: Executor01 + Clone + Send + 'static, -{ - fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError03> { - let future = future.unit_error().compat(); - - self.executor01.execute(future).map_err(|_| SpawnError03::shutdown()) - } -} - -#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 -impl Executor01 for Compat -where - for<'a> &'a Sp: Spawn03, - Fut: Future01 + Send + 'static, -{ - fn execute(&self, future: Fut) -> Result<(), ExecuteError01> { - (&self.inner) - .spawn(future.compat().map(|_| ())) - .expect("unable to spawn future from Compat executor"); - Ok(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/compat/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/compat/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/compat/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/compat/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ -//! Interop between `futures` 0.1 and 0.3. -//! -//! This module is only available when the `compat` feature of this -//! library is activated. - -mod executor; -pub use self::executor::{Executor01As03, Executor01CompatExt, Executor01Future}; - -mod compat01as03; -#[cfg(feature = "io-compat")] -#[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))] -pub use self::compat01as03::{AsyncRead01CompatExt, AsyncWrite01CompatExt}; -pub use self::compat01as03::{Compat01As03, Future01CompatExt, Stream01CompatExt}; -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -pub use self::compat01as03::{Compat01As03Sink, Sink01CompatExt}; - -mod compat03as01; -pub use self::compat03as01::Compat; -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -pub use self::compat03as01::CompatSink; diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/fns.rs s390-tools-2.33.1/rust-vendor/futures-util/src/fns.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/fns.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/fns.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,372 +0,0 @@ -use core::fmt::{self, Debug}; -use core::marker::PhantomData; - -pub trait FnOnce1 { - type Output; - fn call_once(self, arg: A) -> Self::Output; -} - -impl FnOnce1 for T -where - T: FnOnce(A) -> R, -{ - type Output = R; - fn call_once(self, arg: A) -> R { - self(arg) - } -} - -pub trait FnMut1: FnOnce1 { - fn call_mut(&mut self, arg: A) -> Self::Output; -} - -impl FnMut1 for T -where - T: FnMut(A) -> R, -{ - fn call_mut(&mut self, arg: A) -> R { - self(arg) - } -} - -// Not used, but present for completeness -#[allow(unreachable_pub)] -pub trait Fn1: FnMut1 { - fn call(&self, arg: A) -> Self::Output; -} - -impl Fn1 for T -where - T: Fn(A) -> R, -{ - fn call(&self, arg: A) -> R { - self(arg) - } -} - -macro_rules! trivial_fn_impls { - ($name:ident <$($arg:ident),*> $t:ty = $debug:literal) => { - impl<$($arg),*> Copy for $t {} - impl<$($arg),*> Clone for $t { - fn clone(&self) -> Self { *self } - } - impl<$($arg),*> Debug for $t { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str($debug) - } - } - impl<$($arg,)* A> FnMut1 for $t where Self: FnOnce1 { - fn call_mut(&mut self, arg: A) -> Self::Output { - self.call_once(arg) - } - } - impl<$($arg,)* A> Fn1 for $t where Self: FnOnce1 { - fn call(&self, arg: A) -> Self::Output { - self.call_once(arg) - } - } - pub(crate) fn $name<$($arg),*>() -> $t { - Default::default() - } - } -} - -pub struct OkFn(PhantomData); - -impl Default for OkFn { - fn default() -> Self { - Self(PhantomData) - } -} - -impl FnOnce1 for OkFn { - type Output = Result; - fn call_once(self, arg: A) -> Self::Output { - Ok(arg) - } -} - -trivial_fn_impls!(ok_fn OkFn = "Ok"); - -#[derive(Debug, Copy, Clone, Default)] -pub struct ChainFn(F, G); - -impl FnOnce1 for ChainFn -where - F: FnOnce1, - G: FnOnce1, -{ - type Output = G::Output; - fn call_once(self, arg: A) -> Self::Output { - self.1.call_once(self.0.call_once(arg)) - } -} -impl FnMut1 for ChainFn -where - F: FnMut1, - G: FnMut1, -{ - fn call_mut(&mut self, arg: A) -> Self::Output { - self.1.call_mut(self.0.call_mut(arg)) - } -} -impl Fn1 for ChainFn -where - F: Fn1, - G: Fn1, -{ - fn call(&self, arg: A) -> Self::Output { - self.1.call(self.0.call(arg)) - } -} -pub(crate) fn chain_fn(f: F, g: G) -> ChainFn { - ChainFn(f, g) -} - -#[derive(Default)] -pub struct MergeResultFn; - -impl FnOnce1> for MergeResultFn { - type Output = T; - fn call_once(self, arg: Result) -> Self::Output { - match arg { - Ok(x) => x, - Err(x) => x, - } - } -} -trivial_fn_impls!(merge_result_fn <> MergeResultFn = "merge_result"); - -#[derive(Debug, Copy, Clone, Default)] -pub struct InspectFn(F); - -#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 -impl FnOnce1 for InspectFn -where - F: for<'a> FnOnce1<&'a A, Output = ()>, -{ - type Output = A; - fn call_once(self, arg: A) -> Self::Output { - self.0.call_once(&arg); - arg - } -} -#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 -impl FnMut1 for InspectFn -where - F: for<'a> FnMut1<&'a A, Output = ()>, -{ - fn call_mut(&mut self, arg: A) -> Self::Output { - self.0.call_mut(&arg); - arg - } -} -#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 -impl Fn1 for InspectFn -where - F: for<'a> Fn1<&'a A, Output = ()>, -{ - fn call(&self, arg: A) -> Self::Output { - self.0.call(&arg); - arg - } -} -pub(crate) fn inspect_fn(f: F) -> InspectFn { - InspectFn(f) -} - -#[derive(Debug, Copy, Clone, Default)] -pub struct MapOkFn(F); - -impl FnOnce1> for MapOkFn -where - F: FnOnce1, -{ - type Output = Result; - fn call_once(self, arg: Result) -> Self::Output { - arg.map(|x| self.0.call_once(x)) - } -} -impl FnMut1> for MapOkFn -where - F: FnMut1, -{ - fn call_mut(&mut self, arg: Result) -> Self::Output { - arg.map(|x| self.0.call_mut(x)) - } -} -impl Fn1> for MapOkFn -where - F: Fn1, -{ - fn call(&self, arg: Result) -> Self::Output { - arg.map(|x| self.0.call(x)) - } -} -pub(crate) fn map_ok_fn(f: F) -> MapOkFn { - MapOkFn(f) -} - -#[derive(Debug, Copy, Clone, Default)] -pub struct MapErrFn(F); - -impl FnOnce1> for MapErrFn -where - F: FnOnce1, -{ - type Output = Result; - fn call_once(self, arg: Result) -> Self::Output { - arg.map_err(|x| self.0.call_once(x)) - } -} -impl FnMut1> for MapErrFn -where - F: FnMut1, -{ - fn call_mut(&mut self, arg: Result) -> Self::Output { - arg.map_err(|x| self.0.call_mut(x)) - } -} -impl Fn1> for MapErrFn -where - F: Fn1, -{ - fn call(&self, arg: Result) -> Self::Output { - arg.map_err(|x| self.0.call(x)) - } -} -pub(crate) fn map_err_fn(f: F) -> MapErrFn { - MapErrFn(f) -} - -#[derive(Debug, Copy, Clone)] -pub struct InspectOkFn(F); - -impl<'a, F, T, E> FnOnce1<&'a Result> for InspectOkFn -where - F: FnOnce1<&'a T, Output = ()>, -{ - type Output = (); - fn call_once(self, arg: &'a Result) -> Self::Output { - if let Ok(x) = arg { - self.0.call_once(x) - } - } -} -impl<'a, F, T, E> FnMut1<&'a Result> for InspectOkFn -where - F: FnMut1<&'a T, Output = ()>, -{ - fn call_mut(&mut self, arg: &'a Result) -> Self::Output { - if let Ok(x) = arg { - self.0.call_mut(x) - } - } -} -impl<'a, F, T, E> Fn1<&'a Result> for InspectOkFn -where - F: Fn1<&'a T, Output = ()>, -{ - fn call(&self, arg: &'a Result) -> Self::Output { - if let Ok(x) = arg { - self.0.call(x) - } - } -} -pub(crate) fn inspect_ok_fn(f: F) -> InspectOkFn { - InspectOkFn(f) -} - -#[derive(Debug, Copy, Clone)] -pub struct InspectErrFn(F); - -impl<'a, F, T, E> FnOnce1<&'a Result> for InspectErrFn -where - F: FnOnce1<&'a E, Output = ()>, -{ - type Output = (); - fn call_once(self, arg: &'a Result) -> Self::Output { - if let Err(x) = arg { - self.0.call_once(x) - } - } -} -impl<'a, F, T, E> FnMut1<&'a Result> for InspectErrFn -where - F: FnMut1<&'a E, Output = ()>, -{ - fn call_mut(&mut self, arg: &'a Result) -> Self::Output { - if let Err(x) = arg { - self.0.call_mut(x) - } - } -} -impl<'a, F, T, E> Fn1<&'a Result> for InspectErrFn -where - F: Fn1<&'a E, Output = ()>, -{ - fn call(&self, arg: &'a Result) -> Self::Output { - if let Err(x) = arg { - self.0.call(x) - } - } -} -pub(crate) fn inspect_err_fn(f: F) -> InspectErrFn { - InspectErrFn(f) -} - -pub(crate) type MapOkOrElseFn = ChainFn, ChainFn, MergeResultFn>>; -pub(crate) fn map_ok_or_else_fn(f: F, g: G) -> MapOkOrElseFn { - chain_fn(map_ok_fn(f), chain_fn(map_err_fn(g), merge_result_fn())) -} - -#[derive(Debug, Copy, Clone, Default)] -pub struct UnwrapOrElseFn(F); - -impl FnOnce1> for UnwrapOrElseFn -where - F: FnOnce1, -{ - type Output = T; - fn call_once(self, arg: Result) -> Self::Output { - arg.unwrap_or_else(|x| self.0.call_once(x)) - } -} -impl FnMut1> for UnwrapOrElseFn -where - F: FnMut1, -{ - fn call_mut(&mut self, arg: Result) -> Self::Output { - arg.unwrap_or_else(|x| self.0.call_mut(x)) - } -} -impl Fn1> for UnwrapOrElseFn -where - F: Fn1, -{ - fn call(&self, arg: Result) -> Self::Output { - arg.unwrap_or_else(|x| self.0.call(x)) - } -} -pub(crate) fn unwrap_or_else_fn(f: F) -> UnwrapOrElseFn { - UnwrapOrElseFn(f) -} - -pub struct IntoFn(PhantomData T>); - -impl Default for IntoFn { - fn default() -> Self { - Self(PhantomData) - } -} -impl FnOnce1 for IntoFn -where - A: Into, -{ - type Output = T; - fn call_once(self, arg: A) -> Self::Output { - arg.into() - } -} - -trivial_fn_impls!(into_fn IntoFn = "Into::into"); diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/abortable.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/abortable.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/abortable.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/abortable.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ -use super::assert_future; -use crate::future::{AbortHandle, Abortable, Aborted}; -use futures_core::future::Future; - -/// Creates a new `Abortable` future and an `AbortHandle` which can be used to stop it. -/// -/// This function is a convenient (but less flexible) alternative to calling -/// `AbortHandle::new` and `Abortable::new` manually. -/// -/// This function is only available when the `std` or `alloc` feature of this -/// library is activated, and it is activated by default. -pub fn abortable(future: Fut) -> (Abortable, AbortHandle) -where - Fut: Future, -{ - let (handle, reg) = AbortHandle::new_pair(); - let abortable = assert_future::, _>(Abortable::new(future, reg)); - (abortable, handle) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/either.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/either.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/either.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/either.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,317 +0,0 @@ -use core::pin::Pin; -use core::task::{Context, Poll}; -use futures_core::future::{FusedFuture, Future}; -use futures_core::stream::{FusedStream, Stream}; -#[cfg(feature = "sink")] -use futures_sink::Sink; - -/// Combines two different futures, streams, or sinks having the same associated types into a single type. -/// -/// This is useful when conditionally choosing between two distinct future types: -/// -/// ```rust -/// use futures::future::Either; -/// -/// # futures::executor::block_on(async { -/// let cond = true; -/// -/// let fut = if cond { -/// Either::Left(async move { 12 }) -/// } else { -/// Either::Right(async move { 44 }) -/// }; -/// -/// assert_eq!(fut.await, 12); -/// # }) -/// ``` -#[derive(Debug, Clone)] -pub enum Either { - /// First branch of the type - Left(/* #[pin] */ A), - /// Second branch of the type - Right(/* #[pin] */ B), -} - -impl Either { - /// Convert `Pin<&Either>` to `Either, Pin<&B>>`, - /// pinned projections of the inner variants. - pub fn as_pin_ref(self: Pin<&Self>) -> Either, Pin<&B>> { - // SAFETY: We can use `new_unchecked` because the `inner` parts are - // guaranteed to be pinned, as they come from `self` which is pinned. - unsafe { - match *Pin::get_ref(self) { - Either::Left(ref inner) => Either::Left(Pin::new_unchecked(inner)), - Either::Right(ref inner) => Either::Right(Pin::new_unchecked(inner)), - } - } - } - - /// Convert `Pin<&mut Either>` to `Either, Pin<&mut B>>`, - /// pinned projections of the inner variants. - pub fn as_pin_mut(self: Pin<&mut Self>) -> Either, Pin<&mut B>> { - // SAFETY: `get_unchecked_mut` is fine because we don't move anything. - // We can use `new_unchecked` because the `inner` parts are guaranteed - // to be pinned, as they come from `self` which is pinned, and we never - // offer an unpinned `&mut A` or `&mut B` through `Pin<&mut Self>`. We - // also don't have an implementation of `Drop`, nor manual `Unpin`. - unsafe { - match *Pin::get_unchecked_mut(self) { - Either::Left(ref mut inner) => Either::Left(Pin::new_unchecked(inner)), - Either::Right(ref mut inner) => Either::Right(Pin::new_unchecked(inner)), - } - } - } -} - -impl Either<(T, A), (T, B)> { - /// Factor out a homogeneous type from an either of pairs. - /// - /// Here, the homogeneous type is the first element of the pairs. - pub fn factor_first(self) -> (T, Either) { - match self { - Either::Left((x, a)) => (x, Either::Left(a)), - Either::Right((x, b)) => (x, Either::Right(b)), - } - } -} - -impl Either<(A, T), (B, T)> { - /// Factor out a homogeneous type from an either of pairs. - /// - /// Here, the homogeneous type is the second element of the pairs. - pub fn factor_second(self) -> (Either, T) { - match self { - Either::Left((a, x)) => (Either::Left(a), x), - Either::Right((b, x)) => (Either::Right(b), x), - } - } -} - -impl Either { - /// Extract the value of an either over two equivalent types. - pub fn into_inner(self) -> T { - match self { - Either::Left(x) => x, - Either::Right(x) => x, - } - } -} - -impl Future for Either -where - A: Future, - B: Future, -{ - type Output = A::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.as_pin_mut() { - Either::Left(x) => x.poll(cx), - Either::Right(x) => x.poll(cx), - } - } -} - -impl FusedFuture for Either -where - A: FusedFuture, - B: FusedFuture, -{ - fn is_terminated(&self) -> bool { - match self { - Either::Left(x) => x.is_terminated(), - Either::Right(x) => x.is_terminated(), - } - } -} - -impl Stream for Either -where - A: Stream, - B: Stream, -{ - type Item = A::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_next(cx), - Either::Right(x) => x.poll_next(cx), - } - } - - fn size_hint(&self) -> (usize, Option) { - match self { - Either::Left(x) => x.size_hint(), - Either::Right(x) => x.size_hint(), - } - } -} - -impl FusedStream for Either -where - A: FusedStream, - B: FusedStream, -{ - fn is_terminated(&self) -> bool { - match self { - Either::Left(x) => x.is_terminated(), - Either::Right(x) => x.is_terminated(), - } - } -} - -#[cfg(feature = "sink")] -impl Sink for Either -where - A: Sink, - B: Sink, -{ - type Error = A::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_ready(cx), - Either::Right(x) => x.poll_ready(cx), - } - } - - fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { - match self.as_pin_mut() { - Either::Left(x) => x.start_send(item), - Either::Right(x) => x.start_send(item), - } - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_flush(cx), - Either::Right(x) => x.poll_flush(cx), - } - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_close(cx), - Either::Right(x) => x.poll_close(cx), - } - } -} - -#[cfg(feature = "io")] -#[cfg(feature = "std")] -mod if_std { - use super::*; - - use core::pin::Pin; - use core::task::{Context, Poll}; - use futures_io::{ - AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, IoSlice, IoSliceMut, Result, SeekFrom, - }; - - impl AsyncRead for Either - where - A: AsyncRead, - B: AsyncRead, - { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_read(cx, buf), - Either::Right(x) => x.poll_read(cx, buf), - } - } - - fn poll_read_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &mut [IoSliceMut<'_>], - ) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_read_vectored(cx, bufs), - Either::Right(x) => x.poll_read_vectored(cx, bufs), - } - } - } - - impl AsyncWrite for Either - where - A: AsyncWrite, - B: AsyncWrite, - { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_write(cx, buf), - Either::Right(x) => x.poll_write(cx, buf), - } - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_write_vectored(cx, bufs), - Either::Right(x) => x.poll_write_vectored(cx, bufs), - } - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_flush(cx), - Either::Right(x) => x.poll_flush(cx), - } - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_close(cx), - Either::Right(x) => x.poll_close(cx), - } - } - } - - impl AsyncSeek for Either - where - A: AsyncSeek, - B: AsyncSeek, - { - fn poll_seek( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - pos: SeekFrom, - ) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_seek(cx, pos), - Either::Right(x) => x.poll_seek(cx, pos), - } - } - } - - impl AsyncBufRead for Either - where - A: AsyncBufRead, - B: AsyncBufRead, - { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.as_pin_mut() { - Either::Left(x) => x.poll_fill_buf(cx), - Either::Right(x) => x.poll_fill_buf(cx), - } - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - match self.as_pin_mut() { - Either::Left(x) => x.consume(amt), - Either::Right(x) => x.consume(amt), - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/catch_unwind.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/catch_unwind.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/catch_unwind.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/catch_unwind.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,38 +0,0 @@ -use core::any::Any; -use core::pin::Pin; -use std::panic::{catch_unwind, AssertUnwindSafe, UnwindSafe}; - -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`catch_unwind`](super::FutureExt::catch_unwind) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct CatchUnwind { - #[pin] - future: Fut, - } -} - -impl CatchUnwind -where - Fut: Future + UnwindSafe, -{ - pub(super) fn new(future: Fut) -> Self { - Self { future } - } -} - -impl Future for CatchUnwind -where - Fut: Future + UnwindSafe, -{ - type Output = Result>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let f = self.project().future; - catch_unwind(AssertUnwindSafe(|| f.poll(cx)))?.map(Ok) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/flatten.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/flatten.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/flatten.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/flatten.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,153 +0,0 @@ -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - #[project = FlattenProj] - #[derive(Debug)] - pub enum Flatten { - First { #[pin] f: Fut1 }, - Second { #[pin] f: Fut2 }, - Empty, - } -} - -impl Flatten { - pub(crate) fn new(future: Fut1) -> Self { - Self::First { f: future } - } -} - -impl FusedFuture for Flatten -where - Fut: Future, - Fut::Output: Future, -{ - fn is_terminated(&self) -> bool { - match self { - Self::Empty => true, - _ => false, - } - } -} - -impl Future for Flatten -where - Fut: Future, - Fut::Output: Future, -{ - type Output = ::Output; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Poll::Ready(loop { - match self.as_mut().project() { - FlattenProj::First { f } => { - let f = ready!(f.poll(cx)); - self.set(Self::Second { f }); - } - FlattenProj::Second { f } => { - let output = ready!(f.poll(cx)); - self.set(Self::Empty); - break output; - } - FlattenProj::Empty => panic!("Flatten polled after completion"), - } - }) - } -} - -impl FusedStream for Flatten -where - Fut: Future, - Fut::Output: Stream, -{ - fn is_terminated(&self) -> bool { - match self { - Self::Empty => true, - _ => false, - } - } -} - -impl Stream for Flatten -where - Fut: Future, - Fut::Output: Stream, -{ - type Item = ::Item; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Poll::Ready(loop { - match self.as_mut().project() { - FlattenProj::First { f } => { - let f = ready!(f.poll(cx)); - self.set(Self::Second { f }); - } - FlattenProj::Second { f } => { - let output = ready!(f.poll_next(cx)); - if output.is_none() { - self.set(Self::Empty); - } - break output; - } - FlattenProj::Empty => break None, - } - }) - } -} - -#[cfg(feature = "sink")] -impl Sink for Flatten -where - Fut: Future, - Fut::Output: Sink, -{ - type Error = >::Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Poll::Ready(loop { - match self.as_mut().project() { - FlattenProj::First { f } => { - let f = ready!(f.poll(cx)); - self.set(Self::Second { f }); - } - FlattenProj::Second { f } => { - break ready!(f.poll_ready(cx)); - } - FlattenProj::Empty => panic!("poll_ready called after eof"), - } - }) - } - - fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { - match self.project() { - FlattenProj::First { .. } => panic!("poll_ready not called first"), - FlattenProj::Second { f } => f.start_send(item), - FlattenProj::Empty => panic!("start_send called after eof"), - } - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.project() { - FlattenProj::First { .. } => Poll::Ready(Ok(())), - FlattenProj::Second { f } => f.poll_flush(cx), - FlattenProj::Empty => panic!("poll_flush called after eof"), - } - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let res = match self.as_mut().project() { - FlattenProj::Second { f } => f.poll_close(cx), - _ => Poll::Ready(Ok(())), - }; - if res.is_ready() { - self.set(Self::Empty); - } - res - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/fuse.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/fuse.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/fuse.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/fuse.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,91 +0,0 @@ -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`fuse`](super::FutureExt::fuse) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Fuse { - #[pin] - inner: Option, - } -} - -impl Fuse { - pub(super) fn new(f: Fut) -> Self { - Self { inner: Some(f) } - } -} - -impl Fuse { - /// Creates a new `Fuse`-wrapped future which is already terminated. - /// - /// This can be useful in combination with looping and the `select!` - /// macro, which bypasses terminated futures. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::future::{Fuse, FusedFuture, FutureExt}; - /// use futures::select; - /// use futures::stream::StreamExt; - /// use futures::pin_mut; - /// - /// let (sender, mut stream) = mpsc::unbounded(); - /// - /// // Send a few messages into the stream - /// sender.unbounded_send(()).unwrap(); - /// sender.unbounded_send(()).unwrap(); - /// drop(sender); - /// - /// // Use `Fuse::terminated()` to create an already-terminated future - /// // which may be instantiated later. - /// let foo_printer = Fuse::terminated(); - /// pin_mut!(foo_printer); - /// - /// loop { - /// select! { - /// _ = foo_printer => {}, - /// () = stream.select_next_some() => { - /// if !foo_printer.is_terminated() { - /// println!("Foo is already being printed!"); - /// } else { - /// foo_printer.set(async { - /// // do some other async operations - /// println!("Printing foo from `foo_printer` future"); - /// }.fuse()); - /// } - /// }, - /// complete => break, // `foo_printer` is terminated and the stream is done - /// } - /// } - /// # }); - /// ``` - pub fn terminated() -> Self { - Self { inner: None } - } -} - -impl FusedFuture for Fuse { - fn is_terminated(&self) -> bool { - self.inner.is_none() - } -} - -impl Future for Fuse { - type Output = Fut::Output; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.as_mut().project().inner.as_pin_mut() { - Some(fut) => fut.poll(cx).map(|output| { - self.project().inner.set(None); - output - }), - None => Poll::Pending, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/map.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/map.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,66 +0,0 @@ -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -use crate::fns::FnOnce1; - -pin_project! { - /// Internal Map future - #[project = MapProj] - #[project_replace = MapProjReplace] - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub enum Map { - Incomplete { - #[pin] - future: Fut, - f: F, - }, - Complete, - } -} - -impl Map { - /// Creates a new Map. - pub(crate) fn new(future: Fut, f: F) -> Self { - Self::Incomplete { future, f } - } -} - -impl FusedFuture for Map -where - Fut: Future, - F: FnOnce1, -{ - fn is_terminated(&self) -> bool { - match self { - Self::Incomplete { .. } => false, - Self::Complete => true, - } - } -} - -impl Future for Map -where - Fut: Future, - F: FnOnce1, -{ - type Output = T; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.as_mut().project() { - MapProj::Incomplete { future, .. } => { - let output = ready!(future.poll(cx)); - match self.project_replace(Map::Complete) { - MapProjReplace::Incomplete { f, .. } => Poll::Ready(f.call_once(output)), - MapProjReplace::Complete => unreachable!(), - } - } - MapProj::Complete => { - panic!("Map must not be polled after it returned `Poll::Ready`") - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,606 +0,0 @@ -//! Futures -//! -//! This module contains a number of functions for working with `Future`s, -//! including the `FutureExt` trait which adds methods to `Future` types. - -#[cfg(feature = "alloc")] -use alloc::boxed::Box; -use core::pin::Pin; - -use crate::fns::{inspect_fn, into_fn, ok_fn, InspectFn, IntoFn, OkFn}; -use crate::future::{assert_future, Either}; -use crate::never::Never; -use crate::stream::assert_stream; -#[cfg(feature = "alloc")] -use futures_core::future::{BoxFuture, LocalBoxFuture}; -use futures_core::{ - future::Future, - stream::Stream, - task::{Context, Poll}, -}; -use pin_utils::pin_mut; - -// Combinators - -mod flatten; -mod fuse; -mod map; - -delegate_all!( - /// Future for the [`flatten`](super::FutureExt::flatten) method. - Flatten( - flatten::Flatten::Output> - ): Debug + Future + FusedFuture + New[|x: F| flatten::Flatten::new(x)] - where F: Future -); - -delegate_all!( - /// Stream for the [`flatten_stream`](FutureExt::flatten_stream) method. - FlattenStream( - flatten::Flatten::Output> - ): Debug + Sink + Stream + FusedStream + New[|x: F| flatten::Flatten::new(x)] - where F: Future -); - -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use fuse::Fuse; - -delegate_all!( - /// Future for the [`map`](super::FutureExt::map) method. - Map( - map::Map - ): Debug + Future + FusedFuture + New[|x: Fut, f: F| map::Map::new(x, f)] -); - -delegate_all!( - /// Stream for the [`into_stream`](FutureExt::into_stream) method. - IntoStream( - crate::stream::Once - ): Debug + Stream + FusedStream + New[|x: F| crate::stream::Once::new(x)] -); - -delegate_all!( - /// Future for the [`map_into`](FutureExt::map_into) combinator. - MapInto( - Map> - ): Debug + Future + FusedFuture + New[|x: Fut| Map::new(x, into_fn())] -); - -delegate_all!( - /// Future for the [`then`](FutureExt::then) method. - Then( - flatten::Flatten, Fut2> - ): Debug + Future + FusedFuture + New[|x: Fut1, y: F| flatten::Flatten::new(Map::new(x, y))] -); - -delegate_all!( - /// Future for the [`inspect`](FutureExt::inspect) method. - Inspect( - map::Map> - ): Debug + Future + FusedFuture + New[|x: Fut, f: F| map::Map::new(x, inspect_fn(f))] -); - -delegate_all!( - /// Future for the [`never_error`](super::FutureExt::never_error) combinator. - NeverError( - Map> - ): Debug + Future + FusedFuture + New[|x: Fut| Map::new(x, ok_fn())] -); - -delegate_all!( - /// Future for the [`unit_error`](super::FutureExt::unit_error) combinator. - UnitError( - Map> - ): Debug + Future + FusedFuture + New[|x: Fut| Map::new(x, ok_fn())] -); - -#[cfg(feature = "std")] -mod catch_unwind; -#[cfg(feature = "std")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::catch_unwind::CatchUnwind; - -#[cfg(feature = "channel")] -#[cfg_attr(docsrs, doc(cfg(feature = "channel")))] -#[cfg(feature = "std")] -mod remote_handle; -#[cfg(feature = "channel")] -#[cfg_attr(docsrs, doc(cfg(feature = "channel")))] -#[cfg(feature = "std")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::remote_handle::{Remote, RemoteHandle}; - -#[cfg(feature = "std")] -mod shared; -#[cfg(feature = "std")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::shared::{Shared, WeakShared}; - -impl FutureExt for T where T: Future {} - -/// An extension trait for `Future`s that provides a variety of convenient -/// adapters. -pub trait FutureExt: Future { - /// Map this future's output to a different type, returning a new future of - /// the resulting type. - /// - /// This function is similar to the `Option::map` or `Iterator::map` where - /// it will change the type of the underlying future. This is useful to - /// chain along a computation once a future has been resolved. - /// - /// Note that this function consumes the receiving future and returns a - /// wrapped version of it, similar to the existing `map` methods in the - /// standard library. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// - /// let future = async { 1 }; - /// let new_future = future.map(|x| x + 3); - /// assert_eq!(new_future.await, 4); - /// # }); - /// ``` - fn map(self, f: F) -> Map - where - F: FnOnce(Self::Output) -> U, - Self: Sized, - { - assert_future::(Map::new(self, f)) - } - - /// Map this future's output to a different type, returning a new future of - /// the resulting type. - /// - /// This function is equivalent to calling `map(Into::into)` but allows naming - /// the return type. - fn map_into(self) -> MapInto - where - Self::Output: Into, - Self: Sized, - { - assert_future::(MapInto::new(self)) - } - - /// Chain on a computation for when a future finished, passing the result of - /// the future to the provided closure `f`. - /// - /// The returned value of the closure must implement the `Future` trait - /// and can represent some more work to be done before the composed future - /// is finished. - /// - /// The closure `f` is only run *after* successful completion of the `self` - /// future. - /// - /// Note that this function consumes the receiving future and returns a - /// wrapped version of it. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// - /// let future_of_1 = async { 1 }; - /// let future_of_4 = future_of_1.then(|x| async move { x + 3 }); - /// assert_eq!(future_of_4.await, 4); - /// # }); - /// ``` - fn then(self, f: F) -> Then - where - F: FnOnce(Self::Output) -> Fut, - Fut: Future, - Self: Sized, - { - assert_future::(Then::new(self, f)) - } - - /// Wrap this future in an `Either` future, making it the left-hand variant - /// of that `Either`. - /// - /// This can be used in combination with the `right_future` method to write `if` - /// statements that evaluate to different futures in different branches. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// - /// let x = 6; - /// let future = if x < 10 { - /// async { true }.left_future() - /// } else { - /// async { false }.right_future() - /// }; - /// - /// assert_eq!(future.await, true); - /// # }); - /// ``` - fn left_future(self) -> Either - where - B: Future, - Self: Sized, - { - assert_future::(Either::Left(self)) - } - - /// Wrap this future in an `Either` future, making it the right-hand variant - /// of that `Either`. - /// - /// This can be used in combination with the `left_future` method to write `if` - /// statements that evaluate to different futures in different branches. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// - /// let x = 6; - /// let future = if x > 10 { - /// async { true }.left_future() - /// } else { - /// async { false }.right_future() - /// }; - /// - /// assert_eq!(future.await, false); - /// # }); - /// ``` - fn right_future(self) -> Either - where - A: Future, - Self: Sized, - { - assert_future::(Either::Right(self)) - } - - /// Convert this future into a single element stream. - /// - /// The returned stream contains single success if this future resolves to - /// success or single error if this future resolves into error. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// use futures::stream::StreamExt; - /// - /// let future = async { 17 }; - /// let stream = future.into_stream(); - /// let collected: Vec<_> = stream.collect().await; - /// assert_eq!(collected, vec![17]); - /// # }); - /// ``` - fn into_stream(self) -> IntoStream - where - Self: Sized, - { - assert_stream::(IntoStream::new(self)) - } - - /// Flatten the execution of this future when the output of this - /// future is itself another future. - /// - /// This can be useful when combining futures together to flatten the - /// computation out the final result. - /// - /// This method is roughly equivalent to `self.then(|x| x)`. - /// - /// Note that this function consumes the receiving future and returns a - /// wrapped version of it. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// - /// let nested_future = async { async { 1 } }; - /// let future = nested_future.flatten(); - /// assert_eq!(future.await, 1); - /// # }); - /// ``` - fn flatten(self) -> Flatten - where - Self::Output: Future, - Self: Sized, - { - let f = Flatten::new(self); - assert_future::<<::Output as Future>::Output, _>(f) - } - - /// Flatten the execution of this future when the successful result of this - /// future is a stream. - /// - /// This can be useful when stream initialization is deferred, and it is - /// convenient to work with that stream as if stream was available at the - /// call site. - /// - /// Note that this function consumes this future and returns a wrapped - /// version of it. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// use futures::stream::{self, StreamExt}; - /// - /// let stream_items = vec![17, 18, 19]; - /// let future_of_a_stream = async { stream::iter(stream_items) }; - /// - /// let stream = future_of_a_stream.flatten_stream(); - /// let list: Vec<_> = stream.collect().await; - /// assert_eq!(list, vec![17, 18, 19]); - /// # }); - /// ``` - fn flatten_stream(self) -> FlattenStream - where - Self::Output: Stream, - Self: Sized, - { - assert_stream::<::Item, _>(FlattenStream::new(self)) - } - - /// Fuse a future such that `poll` will never again be called once it has - /// completed. This method can be used to turn any `Future` into a - /// `FusedFuture`. - /// - /// Normally, once a future has returned `Poll::Ready` from `poll`, - /// any further calls could exhibit bad behavior such as blocking - /// forever, panicking, never returning, etc. If it is known that `poll` - /// may be called too often then this method can be used to ensure that it - /// has defined semantics. - /// - /// If a `fuse`d future is `poll`ed after having returned `Poll::Ready` - /// previously, it will return `Poll::Pending`, from `poll` again (and will - /// continue to do so for all future calls to `poll`). - /// - /// This combinator will drop the underlying future as soon as it has been - /// completed to ensure resources are reclaimed as soon as possible. - fn fuse(self) -> Fuse - where - Self: Sized, - { - let f = Fuse::new(self); - assert_future::(f) - } - - /// Do something with the output of a future before passing it on. - /// - /// When using futures, you'll often chain several of them together. While - /// working on such code, you might want to check out what's happening at - /// various parts in the pipeline, without consuming the intermediate - /// value. To do that, insert a call to `inspect`. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// - /// let future = async { 1 }; - /// let new_future = future.inspect(|&x| println!("about to resolve: {}", x)); - /// assert_eq!(new_future.await, 1); - /// # }); - /// ``` - fn inspect(self, f: F) -> Inspect - where - F: FnOnce(&Self::Output), - Self: Sized, - { - assert_future::(Inspect::new(self, f)) - } - - /// Catches unwinding panics while polling the future. - /// - /// In general, panics within a future can propagate all the way out to the - /// task level. This combinator makes it possible to halt unwinding within - /// the future itself. It's most commonly used within task executors. It's - /// not recommended to use this for error handling. - /// - /// Note that this method requires the `UnwindSafe` bound from the standard - /// library. This isn't always applied automatically, and the standard - /// library provides an `AssertUnwindSafe` wrapper type to apply it - /// after-the fact. To assist using this method, the `Future` trait is also - /// implemented for `AssertUnwindSafe` where `F` implements `Future`. - /// - /// This method is only available when the `std` feature of this - /// library is activated, and it is activated by default. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::{self, FutureExt, Ready}; - /// - /// let future = future::ready(2); - /// assert!(future.catch_unwind().await.is_ok()); - /// - /// let future = future::lazy(|_| -> Ready { - /// unimplemented!() - /// }); - /// assert!(future.catch_unwind().await.is_err()); - /// # }); - /// ``` - #[cfg(feature = "std")] - fn catch_unwind(self) -> CatchUnwind - where - Self: Sized + ::std::panic::UnwindSafe, - { - assert_future::>, _>(CatchUnwind::new( - self, - )) - } - - /// Create a cloneable handle to this future where all handles will resolve - /// to the same result. - /// - /// The `shared` combinator method provides a method to convert any future - /// into a cloneable future. It enables a future to be polled by multiple - /// threads. - /// - /// This method is only available when the `std` feature of this - /// library is activated, and it is activated by default. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// - /// let future = async { 6 }; - /// let shared1 = future.shared(); - /// let shared2 = shared1.clone(); - /// - /// assert_eq!(6, shared1.await); - /// assert_eq!(6, shared2.await); - /// # }); - /// ``` - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::FutureExt; - /// use futures::executor::block_on; - /// use std::thread; - /// - /// let future = async { 6 }; - /// let shared1 = future.shared(); - /// let shared2 = shared1.clone(); - /// let join_handle = thread::spawn(move || { - /// assert_eq!(6, block_on(shared2)); - /// }); - /// assert_eq!(6, shared1.await); - /// join_handle.join().unwrap(); - /// # }); - /// ``` - #[cfg(feature = "std")] - fn shared(self) -> Shared - where - Self: Sized, - Self::Output: Clone, - { - assert_future::(Shared::new(self)) - } - - /// Turn this future into a future that yields `()` on completion and sends - /// its output to another future on a separate task. - /// - /// This can be used with spawning executors to easily retrieve the result - /// of a future executing on a separate task or thread. - /// - /// This method is only available when the `std` feature of this - /// library is activated, and it is activated by default. - #[cfg(feature = "channel")] - #[cfg_attr(docsrs, doc(cfg(feature = "channel")))] - #[cfg(feature = "std")] - fn remote_handle(self) -> (Remote, RemoteHandle) - where - Self: Sized, - { - let (wrapped, handle) = remote_handle::remote_handle(self); - (assert_future::<(), _>(wrapped), handle) - } - - /// Wrap the future in a Box, pinning it. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - #[cfg(feature = "alloc")] - fn boxed<'a>(self) -> BoxFuture<'a, Self::Output> - where - Self: Sized + Send + 'a, - { - assert_future::(Box::pin(self)) - } - - /// Wrap the future in a Box, pinning it. - /// - /// Similar to `boxed`, but without the `Send` requirement. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - #[cfg(feature = "alloc")] - fn boxed_local<'a>(self) -> LocalBoxFuture<'a, Self::Output> - where - Self: Sized + 'a, - { - assert_future::(Box::pin(self)) - } - - /// Turns a [`Future`](Future) into a - /// [`TryFuture](futures_core::future::TryFuture). - fn unit_error(self) -> UnitError - where - Self: Sized, - { - assert_future::, _>(UnitError::new(self)) - } - - /// Turns a [`Future`](Future) into a - /// [`TryFuture](futures_core::future::TryFuture). - fn never_error(self) -> NeverError - where - Self: Sized, - { - assert_future::, _>(NeverError::new(self)) - } - - /// A convenience for calling `Future::poll` on `Unpin` future types. - fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll - where - Self: Unpin, - { - Pin::new(self).poll(cx) - } - - /// Evaluates and consumes the future, returning the resulting output if - /// the future is ready after the first call to `Future::poll`. - /// - /// If `poll` instead returns `Poll::Pending`, `None` is returned. - /// - /// This method is useful in cases where immediacy is more important than - /// waiting for a result. It is also convenient for quickly obtaining - /// the value of a future that is known to always resolve immediately. - /// - /// # Examples - /// - /// ``` - /// # use futures::prelude::*; - /// use futures::{future::ready, future::pending}; - /// let future_ready = ready("foobar"); - /// let future_pending = pending::<&'static str>(); - /// - /// assert_eq!(future_ready.now_or_never(), Some("foobar")); - /// assert_eq!(future_pending.now_or_never(), None); - /// ``` - /// - /// In cases where it is absolutely known that a future should always - /// resolve immediately and never return `Poll::Pending`, this method can - /// be combined with `expect()`: - /// - /// ``` - /// # use futures::{prelude::*, future::ready}; - /// let future_ready = ready("foobar"); - /// - /// assert_eq!(future_ready.now_or_never().expect("Future not ready"), "foobar"); - /// ``` - fn now_or_never(self) -> Option - where - Self: Sized, - { - let noop_waker = crate::task::noop_waker(); - let mut cx = Context::from_waker(&noop_waker); - - let this = self; - pin_mut!(this); - match this.poll(&mut cx) { - Poll::Ready(x) => Some(x), - _ => None, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/remote_handle.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/remote_handle.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/remote_handle.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/remote_handle.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,126 +0,0 @@ -use { - crate::future::{CatchUnwind, FutureExt}, - futures_channel::oneshot::{self, Receiver, Sender}, - futures_core::{ - future::Future, - ready, - task::{Context, Poll}, - }, - pin_project_lite::pin_project, - std::{ - any::Any, - fmt, - panic::{self, AssertUnwindSafe}, - pin::Pin, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - thread, - }, -}; - -/// The handle to a remote future returned by -/// [`remote_handle`](crate::future::FutureExt::remote_handle). When you drop this, -/// the remote future will be woken up to be dropped by the executor. -/// -/// ## Unwind safety -/// -/// When the remote future panics, [Remote] will catch the unwind and transfer it to -/// the thread where `RemoteHandle` is being awaited. This is good for the common -/// case where [Remote] is spawned on a threadpool. It is unlikely that other code -/// in the executor working thread shares mutable data with the spawned future and we -/// preserve the executor from losing its working threads. -/// -/// If you run the future locally and send the handle of to be awaited elsewhere, you -/// must be careful with regard to unwind safety because the thread in which the future -/// is polled will keep running after the panic and the thread running the [RemoteHandle] -/// will unwind. -#[must_use = "dropping a remote handle cancels the underlying future"] -#[derive(Debug)] -#[cfg_attr(docsrs, doc(cfg(feature = "channel")))] -pub struct RemoteHandle { - rx: Receiver>, - keep_running: Arc, -} - -impl RemoteHandle { - /// Drops this handle *without* canceling the underlying future. - /// - /// This method can be used if you want to drop the handle, but let the - /// execution continue. - pub fn forget(self) { - self.keep_running.store(true, Ordering::SeqCst); - } -} - -impl Future for RemoteHandle { - type Output = T; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match ready!(self.rx.poll_unpin(cx)) { - Ok(Ok(output)) => Poll::Ready(output), - // the remote future panicked. - Ok(Err(e)) => panic::resume_unwind(e), - // The oneshot sender was dropped. - Err(e) => panic::resume_unwind(Box::new(e)), - } - } -} - -type SendMsg = Result<::Output, Box<(dyn Any + Send + 'static)>>; - -pin_project! { - /// A future which sends its output to the corresponding `RemoteHandle`. - /// Created by [`remote_handle`](crate::future::FutureExt::remote_handle). - #[must_use = "futures do nothing unless you `.await` or poll them"] - #[cfg_attr(docsrs, doc(cfg(feature = "channel")))] - pub struct Remote { - tx: Option>>, - keep_running: Arc, - #[pin] - future: CatchUnwind>, - } -} - -impl fmt::Debug for Remote { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Remote").field(&self.future).finish() - } -} - -impl Future for Remote { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - let this = self.project(); - - if this.tx.as_mut().unwrap().poll_canceled(cx).is_ready() - && !this.keep_running.load(Ordering::SeqCst) - { - // Cancelled, bail out - return Poll::Ready(()); - } - - let output = ready!(this.future.poll(cx)); - - // if the receiving end has gone away then that's ok, we just ignore the - // send error here. - drop(this.tx.take().unwrap().send(output)); - Poll::Ready(()) - } -} - -pub(super) fn remote_handle(future: Fut) -> (Remote, RemoteHandle) { - let (tx, rx) = oneshot::channel(); - let keep_running = Arc::new(AtomicBool::new(false)); - - // Unwind Safety: See the docs for RemoteHandle. - let wrapped = Remote { - future: AssertUnwindSafe(future).catch_unwind(), - tx: Some(tx), - keep_running: keep_running.clone(), - }; - - (wrapped, RemoteHandle { rx, keep_running }) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/shared.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/shared.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/future/shared.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/future/shared.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,413 +0,0 @@ -use crate::task::{waker_ref, ArcWake}; -use futures_core::future::{FusedFuture, Future}; -use futures_core::task::{Context, Poll, Waker}; -use slab::Slab; -use std::cell::UnsafeCell; -use std::fmt; -use std::hash::Hasher; -use std::pin::Pin; -use std::ptr; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::{Acquire, SeqCst}; -use std::sync::{Arc, Mutex, Weak}; - -/// Future for the [`shared`](super::FutureExt::shared) method. -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Shared { - inner: Option>>, - waker_key: usize, -} - -struct Inner { - future_or_output: UnsafeCell>, - notifier: Arc, -} - -struct Notifier { - state: AtomicUsize, - wakers: Mutex>>>, -} - -/// A weak reference to a [`Shared`] that can be upgraded much like an `Arc`. -pub struct WeakShared(Weak>); - -impl Clone for WeakShared { - fn clone(&self) -> Self { - Self(self.0.clone()) - } -} - -// The future itself is polled behind the `Arc`, so it won't be moved -// when `Shared` is moved. -impl Unpin for Shared {} - -impl fmt::Debug for Shared { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Shared") - .field("inner", &self.inner) - .field("waker_key", &self.waker_key) - .finish() - } -} - -impl fmt::Debug for Inner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Inner").finish() - } -} - -impl fmt::Debug for WeakShared { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("WeakShared").finish() - } -} - -enum FutureOrOutput { - Future(Fut), - Output(Fut::Output), -} - -unsafe impl Send for Inner -where - Fut: Future + Send, - Fut::Output: Send + Sync, -{ -} - -unsafe impl Sync for Inner -where - Fut: Future + Send, - Fut::Output: Send + Sync, -{ -} - -const IDLE: usize = 0; -const POLLING: usize = 1; -const COMPLETE: usize = 2; -const POISONED: usize = 3; - -const NULL_WAKER_KEY: usize = usize::max_value(); - -impl Shared { - pub(super) fn new(future: Fut) -> Self { - let inner = Inner { - future_or_output: UnsafeCell::new(FutureOrOutput::Future(future)), - notifier: Arc::new(Notifier { - state: AtomicUsize::new(IDLE), - wakers: Mutex::new(Some(Slab::new())), - }), - }; - - Self { inner: Some(Arc::new(inner)), waker_key: NULL_WAKER_KEY } - } -} - -impl Shared -where - Fut: Future, -{ - /// Returns [`Some`] containing a reference to this [`Shared`]'s output if - /// it has already been computed by a clone or [`None`] if it hasn't been - /// computed yet or this [`Shared`] already returned its output from - /// [`poll`](Future::poll). - pub fn peek(&self) -> Option<&Fut::Output> { - if let Some(inner) = self.inner.as_ref() { - match inner.notifier.state.load(SeqCst) { - COMPLETE => unsafe { return Some(inner.output()) }, - POISONED => panic!("inner future panicked during poll"), - _ => {} - } - } - None - } - - /// Creates a new [`WeakShared`] for this [`Shared`]. - /// - /// Returns [`None`] if it has already been polled to completion. - pub fn downgrade(&self) -> Option> { - if let Some(inner) = self.inner.as_ref() { - return Some(WeakShared(Arc::downgrade(inner))); - } - None - } - - /// Gets the number of strong pointers to this allocation. - /// - /// Returns [`None`] if it has already been polled to completion. - /// - /// # Safety - /// - /// This method by itself is safe, but using it correctly requires extra care. Another thread - /// can change the strong count at any time, including potentially between calling this method - /// and acting on the result. - #[allow(clippy::unnecessary_safety_doc)] - pub fn strong_count(&self) -> Option { - self.inner.as_ref().map(|arc| Arc::strong_count(arc)) - } - - /// Gets the number of weak pointers to this allocation. - /// - /// Returns [`None`] if it has already been polled to completion. - /// - /// # Safety - /// - /// This method by itself is safe, but using it correctly requires extra care. Another thread - /// can change the weak count at any time, including potentially between calling this method - /// and acting on the result. - #[allow(clippy::unnecessary_safety_doc)] - pub fn weak_count(&self) -> Option { - self.inner.as_ref().map(|arc| Arc::weak_count(arc)) - } - - /// Hashes the internal state of this `Shared` in a way that's compatible with `ptr_eq`. - pub fn ptr_hash(&self, state: &mut H) { - match self.inner.as_ref() { - Some(arc) => { - state.write_u8(1); - ptr::hash(Arc::as_ptr(arc), state); - } - None => { - state.write_u8(0); - } - } - } - - /// Returns `true` if the two `Shared`s point to the same future (in a vein similar to - /// `Arc::ptr_eq`). - /// - /// Returns `false` if either `Shared` has terminated. - pub fn ptr_eq(&self, rhs: &Self) -> bool { - let lhs = match self.inner.as_ref() { - Some(lhs) => lhs, - None => return false, - }; - let rhs = match rhs.inner.as_ref() { - Some(rhs) => rhs, - None => return false, - }; - Arc::ptr_eq(lhs, rhs) - } -} - -impl Inner -where - Fut: Future, -{ - /// Safety: callers must first ensure that `self.inner.state` - /// is `COMPLETE` - unsafe fn output(&self) -> &Fut::Output { - match &*self.future_or_output.get() { - FutureOrOutput::Output(ref item) => item, - FutureOrOutput::Future(_) => unreachable!(), - } - } -} - -impl Inner -where - Fut: Future, - Fut::Output: Clone, -{ - /// Registers the current task to receive a wakeup when we are awoken. - fn record_waker(&self, waker_key: &mut usize, cx: &mut Context<'_>) { - let mut wakers_guard = self.notifier.wakers.lock().unwrap(); - - let wakers = match wakers_guard.as_mut() { - Some(wakers) => wakers, - None => return, - }; - - let new_waker = cx.waker(); - - if *waker_key == NULL_WAKER_KEY { - *waker_key = wakers.insert(Some(new_waker.clone())); - } else { - match wakers[*waker_key] { - Some(ref old_waker) if new_waker.will_wake(old_waker) => {} - // Could use clone_from here, but Waker doesn't specialize it. - ref mut slot => *slot = Some(new_waker.clone()), - } - } - debug_assert!(*waker_key != NULL_WAKER_KEY); - } - - /// Safety: callers must first ensure that `inner.state` - /// is `COMPLETE` - unsafe fn take_or_clone_output(self: Arc) -> Fut::Output { - match Arc::try_unwrap(self) { - Ok(inner) => match inner.future_or_output.into_inner() { - FutureOrOutput::Output(item) => item, - FutureOrOutput::Future(_) => unreachable!(), - }, - Err(inner) => inner.output().clone(), - } - } -} - -impl FusedFuture for Shared -where - Fut: Future, - Fut::Output: Clone, -{ - fn is_terminated(&self) -> bool { - self.inner.is_none() - } -} - -impl Future for Shared -where - Fut: Future, - Fut::Output: Clone, -{ - type Output = Fut::Output; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - - let inner = this.inner.take().expect("Shared future polled again after completion"); - - // Fast path for when the wrapped future has already completed - if inner.notifier.state.load(Acquire) == COMPLETE { - // Safety: We're in the COMPLETE state - return unsafe { Poll::Ready(inner.take_or_clone_output()) }; - } - - inner.record_waker(&mut this.waker_key, cx); - - match inner - .notifier - .state - .compare_exchange(IDLE, POLLING, SeqCst, SeqCst) - .unwrap_or_else(|x| x) - { - IDLE => { - // Lock acquired, fall through - } - POLLING => { - // Another task is currently polling, at this point we just want - // to ensure that the waker for this task is registered - this.inner = Some(inner); - return Poll::Pending; - } - COMPLETE => { - // Safety: We're in the COMPLETE state - return unsafe { Poll::Ready(inner.take_or_clone_output()) }; - } - POISONED => panic!("inner future panicked during poll"), - _ => unreachable!(), - } - - let waker = waker_ref(&inner.notifier); - let mut cx = Context::from_waker(&waker); - - struct Reset<'a> { - state: &'a AtomicUsize, - did_not_panic: bool, - } - - impl Drop for Reset<'_> { - fn drop(&mut self) { - if !self.did_not_panic { - self.state.store(POISONED, SeqCst); - } - } - } - - let mut reset = Reset { state: &inner.notifier.state, did_not_panic: false }; - - let output = { - let future = unsafe { - match &mut *inner.future_or_output.get() { - FutureOrOutput::Future(fut) => Pin::new_unchecked(fut), - _ => unreachable!(), - } - }; - - let poll_result = future.poll(&mut cx); - reset.did_not_panic = true; - - match poll_result { - Poll::Pending => { - if inner.notifier.state.compare_exchange(POLLING, IDLE, SeqCst, SeqCst).is_ok() - { - // Success - drop(reset); - this.inner = Some(inner); - return Poll::Pending; - } else { - unreachable!() - } - } - Poll::Ready(output) => output, - } - }; - - unsafe { - *inner.future_or_output.get() = FutureOrOutput::Output(output); - } - - inner.notifier.state.store(COMPLETE, SeqCst); - - // Wake all tasks and drop the slab - let mut wakers_guard = inner.notifier.wakers.lock().unwrap(); - let mut wakers = wakers_guard.take().unwrap(); - for waker in wakers.drain().flatten() { - waker.wake(); - } - - drop(reset); // Make borrow checker happy - drop(wakers_guard); - - // Safety: We're in the COMPLETE state - unsafe { Poll::Ready(inner.take_or_clone_output()) } - } -} - -impl Clone for Shared -where - Fut: Future, -{ - fn clone(&self) -> Self { - Self { inner: self.inner.clone(), waker_key: NULL_WAKER_KEY } - } -} - -impl Drop for Shared -where - Fut: Future, -{ - fn drop(&mut self) { - if self.waker_key != NULL_WAKER_KEY { - if let Some(ref inner) = self.inner { - if let Ok(mut wakers) = inner.notifier.wakers.lock() { - if let Some(wakers) = wakers.as_mut() { - wakers.remove(self.waker_key); - } - } - } - } - } -} - -impl ArcWake for Notifier { - fn wake_by_ref(arc_self: &Arc) { - let wakers = &mut *arc_self.wakers.lock().unwrap(); - if let Some(wakers) = wakers.as_mut() { - for (_key, opt_waker) in wakers { - if let Some(waker) = opt_waker.take() { - waker.wake(); - } - } - } - } -} - -impl WeakShared { - /// Attempts to upgrade this [`WeakShared`] into a [`Shared`]. - /// - /// Returns [`None`] if all clones of the [`Shared`] have been dropped or polled - /// to completion. - pub fn upgrade(&self) -> Option> { - Some(Shared { inner: Some(self.0.upgrade()?), waker_key: NULL_WAKER_KEY }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/join_all.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/join_all.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/join_all.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/join_all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,166 +0,0 @@ -//! Definition of the `JoinAll` combinator, waiting for all of a list of futures -//! to finish. - -use alloc::boxed::Box; -use alloc::vec::Vec; -use core::fmt; -use core::future::Future; -use core::iter::FromIterator; -use core::mem; -use core::pin::Pin; -use core::task::{Context, Poll}; - -use super::{assert_future, MaybeDone}; - -#[cfg(not(futures_no_atomic_cas))] -use crate::stream::{Collect, FuturesOrdered, StreamExt}; - -pub(crate) fn iter_pin_mut(slice: Pin<&mut [T]>) -> impl Iterator> { - // Safety: `std` _could_ make this unsound if it were to decide Pin's - // invariants aren't required to transmit through slices. Otherwise this has - // the same safety as a normal field pin projection. - unsafe { slice.get_unchecked_mut() }.iter_mut().map(|t| unsafe { Pin::new_unchecked(t) }) -} - -#[must_use = "futures do nothing unless you `.await` or poll them"] -/// Future for the [`join_all`] function. -pub struct JoinAll -where - F: Future, -{ - kind: JoinAllKind, -} - -#[cfg(not(futures_no_atomic_cas))] -pub(crate) const SMALL: usize = 30; - -enum JoinAllKind -where - F: Future, -{ - Small { - elems: Pin]>>, - }, - #[cfg(not(futures_no_atomic_cas))] - Big { - fut: Collect, Vec>, - }, -} - -impl fmt::Debug for JoinAll -where - F: Future + fmt::Debug, - F::Output: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.kind { - JoinAllKind::Small { ref elems } => { - f.debug_struct("JoinAll").field("elems", elems).finish() - } - #[cfg(not(futures_no_atomic_cas))] - JoinAllKind::Big { ref fut, .. } => fmt::Debug::fmt(fut, f), - } - } -} - -/// Creates a future which represents a collection of the outputs of the futures -/// given. -/// -/// The returned future will drive execution for all of its underlying futures, -/// collecting the results into a destination `Vec` in the same order as they -/// were provided. -/// -/// This function is only available when the `std` or `alloc` feature of this -/// library is activated, and it is activated by default. -/// -/// # See Also -/// -/// `join_all` will switch to the more powerful [`FuturesOrdered`] for performance -/// reasons if the number of futures is large. You may want to look into using it or -/// its counterpart [`FuturesUnordered`][crate::stream::FuturesUnordered] directly. -/// -/// Some examples for additional functionality provided by these are: -/// -/// * Adding new futures to the set even after it has been started. -/// -/// * Only polling the specific futures that have been woken. In cases where -/// you have a lot of futures this will result in much more efficient polling. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future::join_all; -/// -/// async fn foo(i: u32) -> u32 { i } -/// -/// let futures = vec![foo(1), foo(2), foo(3)]; -/// -/// assert_eq!(join_all(futures).await, [1, 2, 3]); -/// # }); -/// ``` -pub fn join_all(iter: I) -> JoinAll -where - I: IntoIterator, - I::Item: Future, -{ - let iter = iter.into_iter(); - - #[cfg(futures_no_atomic_cas)] - { - let kind = - JoinAllKind::Small { elems: iter.map(MaybeDone::Future).collect::>().into() }; - - assert_future::::Output>, _>(JoinAll { kind }) - } - - #[cfg(not(futures_no_atomic_cas))] - { - let kind = match iter.size_hint().1 { - Some(max) if max <= SMALL => JoinAllKind::Small { - elems: iter.map(MaybeDone::Future).collect::>().into(), - }, - _ => JoinAllKind::Big { fut: iter.collect::>().collect() }, - }; - - assert_future::::Output>, _>(JoinAll { kind }) - } -} - -impl Future for JoinAll -where - F: Future, -{ - type Output = Vec; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match &mut self.kind { - JoinAllKind::Small { elems } => { - let mut all_done = true; - - for elem in iter_pin_mut(elems.as_mut()) { - if elem.poll(cx).is_pending() { - all_done = false; - } - } - - if all_done { - let mut elems = mem::replace(elems, Box::pin([])); - let result = - iter_pin_mut(elems.as_mut()).map(|e| e.take_output().unwrap()).collect(); - Poll::Ready(result) - } else { - Poll::Pending - } - } - #[cfg(not(futures_no_atomic_cas))] - JoinAllKind::Big { fut } => Pin::new(fut).poll(cx), - } - } -} - -impl FromIterator for JoinAll { - fn from_iter>(iter: T) -> Self { - join_all(iter) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/join.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/join.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/join.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/join.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,217 +0,0 @@ -#![allow(non_snake_case)] - -use super::assert_future; -use crate::future::{maybe_done, MaybeDone}; -use core::fmt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -macro_rules! generate { - ($( - $(#[$doc:meta])* - ($Join:ident, <$($Fut:ident),*>), - )*) => ($( - pin_project! { - $(#[$doc])* - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct $Join<$($Fut: Future),*> { - $(#[pin] $Fut: MaybeDone<$Fut>,)* - } - } - - impl<$($Fut),*> fmt::Debug for $Join<$($Fut),*> - where - $( - $Fut: Future + fmt::Debug, - $Fut::Output: fmt::Debug, - )* - { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct(stringify!($Join)) - $(.field(stringify!($Fut), &self.$Fut))* - .finish() - } - } - - impl<$($Fut: Future),*> $Join<$($Fut),*> { - fn new($($Fut: $Fut),*) -> Self { - Self { - $($Fut: maybe_done($Fut)),* - } - } - } - - impl<$($Fut: Future),*> Future for $Join<$($Fut),*> { - type Output = ($($Fut::Output),*); - - fn poll( - self: Pin<&mut Self>, cx: &mut Context<'_> - ) -> Poll { - let mut all_done = true; - let mut futures = self.project(); - $( - all_done &= futures.$Fut.as_mut().poll(cx).is_ready(); - )* - - if all_done { - Poll::Ready(($(futures.$Fut.take_output().unwrap()), *)) - } else { - Poll::Pending - } - } - } - - impl<$($Fut: FusedFuture),*> FusedFuture for $Join<$($Fut),*> { - fn is_terminated(&self) -> bool { - $( - self.$Fut.is_terminated() - ) && * - } - } - )*) -} - -generate! { - /// Future for the [`join`](join()) function. - (Join, ), - - /// Future for the [`join3`] function. - (Join3, ), - - /// Future for the [`join4`] function. - (Join4, ), - - /// Future for the [`join5`] function. - (Join5, ), -} - -/// Joins the result of two futures, waiting for them both to complete. -/// -/// This function will return a new future which awaits both futures to -/// complete. The returned future will finish with a tuple of both results. -/// -/// Note that this function consumes the passed futures and returns a -/// wrapped version of it. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = async { 1 }; -/// let b = async { 2 }; -/// let pair = future::join(a, b); -/// -/// assert_eq!(pair.await, (1, 2)); -/// # }); -/// ``` -pub fn join(future1: Fut1, future2: Fut2) -> Join -where - Fut1: Future, - Fut2: Future, -{ - let f = Join::new(future1, future2); - assert_future::<(Fut1::Output, Fut2::Output), _>(f) -} - -/// Same as [`join`](join()), but with more futures. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = async { 1 }; -/// let b = async { 2 }; -/// let c = async { 3 }; -/// let tuple = future::join3(a, b, c); -/// -/// assert_eq!(tuple.await, (1, 2, 3)); -/// # }); -/// ``` -pub fn join3( - future1: Fut1, - future2: Fut2, - future3: Fut3, -) -> Join3 -where - Fut1: Future, - Fut2: Future, - Fut3: Future, -{ - let f = Join3::new(future1, future2, future3); - assert_future::<(Fut1::Output, Fut2::Output, Fut3::Output), _>(f) -} - -/// Same as [`join`](join()), but with more futures. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = async { 1 }; -/// let b = async { 2 }; -/// let c = async { 3 }; -/// let d = async { 4 }; -/// let tuple = future::join4(a, b, c, d); -/// -/// assert_eq!(tuple.await, (1, 2, 3, 4)); -/// # }); -/// ``` -pub fn join4( - future1: Fut1, - future2: Fut2, - future3: Fut3, - future4: Fut4, -) -> Join4 -where - Fut1: Future, - Fut2: Future, - Fut3: Future, - Fut4: Future, -{ - let f = Join4::new(future1, future2, future3, future4); - assert_future::<(Fut1::Output, Fut2::Output, Fut3::Output, Fut4::Output), _>(f) -} - -/// Same as [`join`](join()), but with more futures. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = async { 1 }; -/// let b = async { 2 }; -/// let c = async { 3 }; -/// let d = async { 4 }; -/// let e = async { 5 }; -/// let tuple = future::join5(a, b, c, d, e); -/// -/// assert_eq!(tuple.await, (1, 2, 3, 4, 5)); -/// # }); -/// ``` -pub fn join5( - future1: Fut1, - future2: Fut2, - future3: Fut3, - future4: Fut4, - future5: Fut5, -) -> Join5 -where - Fut1: Future, - Fut2: Future, - Fut3: Future, - Fut4: Future, - Fut5: Future, -{ - let f = Join5::new(future1, future2, future3, future4, future5); - assert_future::<(Fut1::Output, Fut2::Output, Fut3::Output, Fut4::Output, Fut5::Output), _>(f) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/lazy.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/lazy.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/lazy.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/lazy.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -use super::assert_future; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::task::{Context, Poll}; - -/// Future for the [`lazy`] function. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Lazy { - f: Option, -} - -// safe because we never generate `Pin<&mut F>` -impl Unpin for Lazy {} - -/// Creates a new future that allows delayed execution of a closure. -/// -/// The provided closure is only run once the future is polled. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = future::lazy(|_| 1); -/// assert_eq!(a.await, 1); -/// -/// let b = future::lazy(|_| -> i32 { -/// panic!("oh no!") -/// }); -/// drop(b); // closure is never run -/// # }); -/// ``` -pub fn lazy(f: F) -> Lazy -where - F: FnOnce(&mut Context<'_>) -> R, -{ - assert_future::(Lazy { f: Some(f) }) -} - -impl FusedFuture for Lazy -where - F: FnOnce(&mut Context<'_>) -> R, -{ - fn is_terminated(&self) -> bool { - self.f.is_none() - } -} - -impl Future for Lazy -where - F: FnOnce(&mut Context<'_>) -> R, -{ - type Output = R; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Poll::Ready((self.f.take().expect("Lazy polled after completion"))(cx)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/maybe_done.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/maybe_done.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/maybe_done.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/maybe_done.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,104 +0,0 @@ -//! Definition of the MaybeDone combinator - -use super::assert_future; -use core::mem; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::task::{Context, Poll}; - -/// A future that may have completed. -/// -/// This is created by the [`maybe_done()`] function. -#[derive(Debug)] -pub enum MaybeDone { - /// A not-yet-completed future - Future(/* #[pin] */ Fut), - /// The output of the completed future - Done(Fut::Output), - /// The empty variant after the result of a [`MaybeDone`] has been - /// taken using the [`take_output`](MaybeDone::take_output) method. - Gone, -} - -impl Unpin for MaybeDone {} - -/// Wraps a future into a `MaybeDone` -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// use futures::pin_mut; -/// -/// let future = future::maybe_done(async { 5 }); -/// pin_mut!(future); -/// assert_eq!(future.as_mut().take_output(), None); -/// let () = future.as_mut().await; -/// assert_eq!(future.as_mut().take_output(), Some(5)); -/// assert_eq!(future.as_mut().take_output(), None); -/// # }); -/// ``` -pub fn maybe_done(future: Fut) -> MaybeDone { - assert_future::<(), _>(MaybeDone::Future(future)) -} - -impl MaybeDone { - /// Returns an [`Option`] containing a mutable reference to the output of the future. - /// The output of this method will be [`Some`] if and only if the inner - /// future has been completed and [`take_output`](MaybeDone::take_output) - /// has not yet been called. - #[inline] - pub fn output_mut(self: Pin<&mut Self>) -> Option<&mut Fut::Output> { - unsafe { - match self.get_unchecked_mut() { - MaybeDone::Done(res) => Some(res), - _ => None, - } - } - } - - /// Attempt to take the output of a `MaybeDone` without driving it - /// towards completion. - #[inline] - pub fn take_output(self: Pin<&mut Self>) -> Option { - match &*self { - Self::Done(_) => {} - Self::Future(_) | Self::Gone => return None, - } - unsafe { - match mem::replace(self.get_unchecked_mut(), Self::Gone) { - MaybeDone::Done(output) => Some(output), - _ => unreachable!(), - } - } - } -} - -impl FusedFuture for MaybeDone { - fn is_terminated(&self) -> bool { - match self { - Self::Future(_) => false, - Self::Done(_) | Self::Gone => true, - } - } -} - -impl Future for MaybeDone { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - unsafe { - match self.as_mut().get_unchecked_mut() { - MaybeDone::Future(f) => { - let res = ready!(Pin::new_unchecked(f).poll(cx)); - self.set(Self::Done(res)); - } - MaybeDone::Done(_) => {} - MaybeDone::Gone => panic!("MaybeDone polled after value taken"), - } - } - Poll::Ready(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,131 +0,0 @@ -//! Asynchronous values. -//! -//! This module contains: -//! -//! - The [`Future`] trait. -//! - The [`FutureExt`] and [`TryFutureExt`] trait, which provides adapters for -//! chaining and composing futures. -//! - Top-level future combinators like [`lazy`](lazy()) which creates a future -//! from a closure that defines its return value, and [`ready`](ready()), -//! which constructs a future with an immediate defined value. - -#[doc(no_inline)] -pub use core::future::Future; - -#[cfg(feature = "alloc")] -pub use futures_core::future::{BoxFuture, LocalBoxFuture}; -pub use futures_core::future::{FusedFuture, TryFuture}; -pub use futures_task::{FutureObj, LocalFutureObj, UnsafeFutureObj}; - -// Extension traits and combinators -#[allow(clippy::module_inception)] -mod future; -pub use self::future::{ - Flatten, Fuse, FutureExt, Inspect, IntoStream, Map, MapInto, NeverError, Then, UnitError, -}; - -#[deprecated(note = "This is now an alias for [Flatten](Flatten)")] -pub use self::future::FlattenStream; - -#[cfg(feature = "std")] -pub use self::future::CatchUnwind; - -#[cfg(feature = "channel")] -#[cfg_attr(docsrs, doc(cfg(feature = "channel")))] -#[cfg(feature = "std")] -pub use self::future::{Remote, RemoteHandle}; - -#[cfg(feature = "std")] -pub use self::future::{Shared, WeakShared}; - -mod try_future; -pub use self::try_future::{ - AndThen, ErrInto, InspectErr, InspectOk, IntoFuture, MapErr, MapOk, MapOkOrElse, OkInto, - OrElse, TryFlatten, TryFlattenStream, TryFutureExt, UnwrapOrElse, -}; - -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -pub use self::try_future::FlattenSink; - -// Primitive futures - -mod lazy; -pub use self::lazy::{lazy, Lazy}; - -mod pending; -pub use self::pending::{pending, Pending}; - -mod maybe_done; -pub use self::maybe_done::{maybe_done, MaybeDone}; - -mod try_maybe_done; -pub use self::try_maybe_done::{try_maybe_done, TryMaybeDone}; - -mod option; -pub use self::option::OptionFuture; - -mod poll_fn; -pub use self::poll_fn::{poll_fn, PollFn}; - -mod poll_immediate; -pub use self::poll_immediate::{poll_immediate, PollImmediate}; - -mod ready; -pub use self::ready::{err, ok, ready, Ready}; - -mod join; -pub use self::join::{join, join3, join4, join5, Join, Join3, Join4, Join5}; - -#[cfg(feature = "alloc")] -mod join_all; -#[cfg(feature = "alloc")] -pub use self::join_all::{join_all, JoinAll}; - -mod select; -pub use self::select::{select, Select}; - -#[cfg(feature = "alloc")] -mod select_all; -#[cfg(feature = "alloc")] -pub use self::select_all::{select_all, SelectAll}; - -mod try_join; -pub use self::try_join::{ - try_join, try_join3, try_join4, try_join5, TryJoin, TryJoin3, TryJoin4, TryJoin5, -}; - -#[cfg(feature = "alloc")] -mod try_join_all; -#[cfg(feature = "alloc")] -pub use self::try_join_all::{try_join_all, TryJoinAll}; - -mod try_select; -pub use self::try_select::{try_select, TrySelect}; - -#[cfg(feature = "alloc")] -mod select_ok; -#[cfg(feature = "alloc")] -pub use self::select_ok::{select_ok, SelectOk}; - -mod either; -pub use self::either::Either; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod abortable; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use crate::abortable::{AbortHandle, AbortRegistration, Abortable, Aborted}; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use abortable::abortable; - -// Just a helper function to ensure the futures we're returning all have the -// right implementations. -pub(crate) fn assert_future(future: F) -> F -where - F: Future, -{ - future -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/option.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/option.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/option.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/option.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,64 +0,0 @@ -//! Definition of the `Option` (optional step) combinator - -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// A future representing a value which may or may not be present. - /// - /// Created by the [`From`] implementation for [`Option`](std::option::Option). - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::OptionFuture; - /// - /// let mut a: OptionFuture<_> = Some(async { 123 }).into(); - /// assert_eq!(a.await, Some(123)); - /// - /// a = None.into(); - /// assert_eq!(a.await, None); - /// # }); - /// ``` - #[derive(Debug, Clone)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct OptionFuture { - #[pin] - inner: Option, - } -} - -impl Default for OptionFuture { - fn default() -> Self { - Self { inner: None } - } -} - -impl Future for OptionFuture { - type Output = Option; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.project().inner.as_pin_mut() { - Some(x) => x.poll(cx).map(Some), - None => Poll::Ready(None), - } - } -} - -impl FusedFuture for OptionFuture { - fn is_terminated(&self) -> bool { - match &self.inner { - Some(x) => x.is_terminated(), - None => true, - } - } -} - -impl From> for OptionFuture { - fn from(option: Option) -> Self { - Self { inner: option } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/pending.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/pending.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/pending.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/pending.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -use super::assert_future; -use core::marker; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::task::{Context, Poll}; - -/// Future for the [`pending()`] function. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Pending { - _data: marker::PhantomData, -} - -impl FusedFuture for Pending { - fn is_terminated(&self) -> bool { - true - } -} - -/// Creates a future which never resolves, representing a computation that never -/// finishes. -/// -/// The returned future will forever return [`Poll::Pending`]. -/// -/// # Examples -/// -/// ```ignore -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let future = future::pending(); -/// let () = future.await; -/// unreachable!(); -/// # }); -/// ``` -#[cfg_attr(docsrs, doc(alias = "never"))] -pub fn pending() -> Pending { - assert_future::(Pending { _data: marker::PhantomData }) -} - -impl Future for Pending { - type Output = T; - - fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll { - Poll::Pending - } -} - -impl Unpin for Pending {} - -impl Clone for Pending { - fn clone(&self) -> Self { - pending() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/poll_fn.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/poll_fn.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/poll_fn.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/poll_fn.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,58 +0,0 @@ -//! Definition of the `PollFn` adapter combinator - -use super::assert_future; -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; - -/// Future for the [`poll_fn`] function. -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct PollFn { - f: F, -} - -impl Unpin for PollFn {} - -/// Creates a new future wrapping around a function returning [`Poll`]. -/// -/// Polling the returned future delegates to the wrapped function. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future::poll_fn; -/// use futures::task::{Context, Poll}; -/// -/// fn read_line(_cx: &mut Context<'_>) -> Poll { -/// Poll::Ready("Hello, World!".into()) -/// } -/// -/// let read_future = poll_fn(read_line); -/// assert_eq!(read_future.await, "Hello, World!".to_owned()); -/// # }); -/// ``` -pub fn poll_fn(f: F) -> PollFn -where - F: FnMut(&mut Context<'_>) -> Poll, -{ - assert_future::(PollFn { f }) -} - -impl fmt::Debug for PollFn { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PollFn").finish() - } -} - -impl Future for PollFn -where - F: FnMut(&mut Context<'_>) -> Poll, -{ - type Output = T; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - (&mut self.f)(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/poll_immediate.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/poll_immediate.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/poll_immediate.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/poll_immediate.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,126 +0,0 @@ -use super::assert_future; -use core::pin::Pin; -use futures_core::task::{Context, Poll}; -use futures_core::{FusedFuture, Future, Stream}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`poll_immediate`](poll_immediate()) function. - /// - /// It will never return [Poll::Pending](core::task::Poll::Pending) - #[derive(Debug, Clone)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct PollImmediate { - #[pin] - future: Option - } -} - -impl Future for PollImmediate -where - F: Future, -{ - type Output = Option; - - #[inline] - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - let inner = - this.future.as_mut().as_pin_mut().expect("PollImmediate polled after completion"); - match inner.poll(cx) { - Poll::Ready(t) => { - this.future.set(None); - Poll::Ready(Some(t)) - } - Poll::Pending => Poll::Ready(None), - } - } -} - -impl FusedFuture for PollImmediate { - fn is_terminated(&self) -> bool { - self.future.is_none() - } -} - -/// A [Stream](crate::stream::Stream) implementation that can be polled repeatedly until the future is done. -/// The stream will never return [Poll::Pending](core::task::Poll::Pending) -/// so polling it in a tight loop is worse than using a blocking synchronous function. -/// ``` -/// # futures::executor::block_on(async { -/// use futures::task::Poll; -/// use futures::{StreamExt, future, pin_mut}; -/// use future::FusedFuture; -/// -/// let f = async { 1_u32 }; -/// pin_mut!(f); -/// let mut r = future::poll_immediate(f); -/// assert_eq!(r.next().await, Some(Poll::Ready(1))); -/// -/// let f = async {futures::pending!(); 42_u8}; -/// pin_mut!(f); -/// let mut p = future::poll_immediate(f); -/// assert_eq!(p.next().await, Some(Poll::Pending)); -/// assert!(!p.is_terminated()); -/// assert_eq!(p.next().await, Some(Poll::Ready(42))); -/// assert!(p.is_terminated()); -/// assert_eq!(p.next().await, None); -/// # }); -/// ``` -impl Stream for PollImmediate -where - F: Future, -{ - type Item = Poll; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - match this.future.as_mut().as_pin_mut() { - // inner is gone, so we can signal that the stream is closed. - None => Poll::Ready(None), - Some(fut) => Poll::Ready(Some(fut.poll(cx).map(|t| { - this.future.set(None); - t - }))), - } - } -} - -/// Creates a future that is immediately ready with an Option of a value. -/// Specifically this means that [poll](core::future::Future::poll()) always returns [Poll::Ready](core::task::Poll::Ready). -/// -/// # Caution -/// -/// When consuming the future by this function, note the following: -/// -/// - This function does not guarantee that the future will run to completion, so it is generally incompatible with passing the non-cancellation-safe future by value. -/// - Even if the future is cancellation-safe, creating and dropping new futures frequently may lead to performance problems. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let r = future::poll_immediate(async { 1_u32 }); -/// assert_eq!(r.await, Some(1)); -/// -/// let p = future::poll_immediate(future::pending::()); -/// assert_eq!(p.await, None); -/// # }); -/// ``` -/// -/// ### Reusing a future -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::{future, pin_mut}; -/// let f = async {futures::pending!(); 42_u8}; -/// pin_mut!(f); -/// assert_eq!(None, future::poll_immediate(&mut f).await); -/// assert_eq!(42, f.await); -/// # }); -/// ``` -pub fn poll_immediate(f: F) -> PollImmediate { - assert_future::, PollImmediate>(PollImmediate { future: Some(f) }) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/ready.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/ready.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/ready.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/ready.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,82 +0,0 @@ -use super::assert_future; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::task::{Context, Poll}; - -/// Future for the [`ready`](ready()) function. -#[derive(Debug, Clone)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Ready(Option); - -impl Ready { - /// Unwraps the value from this immediately ready future. - #[inline] - pub fn into_inner(mut self) -> T { - self.0.take().unwrap() - } -} - -impl Unpin for Ready {} - -impl FusedFuture for Ready { - fn is_terminated(&self) -> bool { - self.0.is_none() - } -} - -impl Future for Ready { - type Output = T; - - #[inline] - fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - Poll::Ready(self.0.take().expect("Ready polled after completion")) - } -} - -/// Creates a future that is immediately ready with a value. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = future::ready(1); -/// assert_eq!(a.await, 1); -/// # }); -/// ``` -pub fn ready(t: T) -> Ready { - assert_future::(Ready(Some(t))) -} - -/// Create a future that is immediately ready with a success value. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = future::ok::(1); -/// assert_eq!(a.await, Ok(1)); -/// # }); -/// ``` -pub fn ok(t: T) -> Ready> { - Ready(Some(Ok(t))) -} - -/// Create a future that is immediately ready with an error value. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = future::err::(1); -/// assert_eq!(a.await, Err(1)); -/// # }); -/// ``` -pub fn err(err: E) -> Ready> { - Ready(Some(Err(err))) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/select_all.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/select_all.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/select_all.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/select_all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -use super::assert_future; -use crate::future::FutureExt; -use alloc::vec::Vec; -use core::iter::FromIterator; -use core::mem; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; - -/// Future for the [`select_all`] function. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SelectAll { - inner: Vec, -} - -impl Unpin for SelectAll {} - -/// Creates a new future which will select over a list of futures. -/// -/// The returned future will wait for any future within `iter` to be ready. Upon -/// completion the item resolved will be returned, along with the index of the -/// future that was ready and the list of all the remaining futures. -/// -/// There are no guarantees provided on the order of the list with the remaining -/// futures. They might be swapped around, reversed, or completely random. -/// -/// This function is only available when the `std` or `alloc` feature of this -/// library is activated, and it is activated by default. -/// -/// # Panics -/// -/// This function will panic if the iterator specified contains no items. -pub fn select_all(iter: I) -> SelectAll -where - I: IntoIterator, - I::Item: Future + Unpin, -{ - let ret = SelectAll { inner: iter.into_iter().collect() }; - assert!(!ret.inner.is_empty()); - assert_future::<(::Output, usize, Vec), _>(ret) -} - -impl SelectAll { - /// Consumes this combinator, returning the underlying futures. - pub fn into_inner(self) -> Vec { - self.inner - } -} - -impl Future for SelectAll { - type Output = (Fut::Output, usize, Vec); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let item = self.inner.iter_mut().enumerate().find_map(|(i, f)| match f.poll_unpin(cx) { - Poll::Pending => None, - Poll::Ready(e) => Some((i, e)), - }); - match item { - Some((idx, res)) => { - #[allow(clippy::let_underscore_future)] - let _ = self.inner.swap_remove(idx); - let rest = mem::take(&mut self.inner); - Poll::Ready((res, idx, rest)) - } - None => Poll::Pending, - } - } -} - -impl FromIterator for SelectAll { - fn from_iter>(iter: T) -> Self { - select_all(iter) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/select_ok.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/select_ok.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/select_ok.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/select_ok.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,85 +0,0 @@ -use super::assert_future; -use crate::future::TryFutureExt; -use alloc::vec::Vec; -use core::iter::FromIterator; -use core::mem; -use core::pin::Pin; -use futures_core::future::{Future, TryFuture}; -use futures_core::task::{Context, Poll}; - -/// Future for the [`select_ok`] function. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SelectOk { - inner: Vec, -} - -impl Unpin for SelectOk {} - -/// Creates a new future which will select the first successful future over a list of futures. -/// -/// The returned future will wait for any future within `iter` to be ready and Ok. Unlike -/// `select_all`, this will only return the first successful completion, or the last -/// failure. This is useful in contexts where any success is desired and failures -/// are ignored, unless all the futures fail. -/// -/// This function is only available when the `std` or `alloc` feature of this -/// library is activated, and it is activated by default. -/// -/// # Panics -/// -/// This function will panic if the iterator specified contains no items. -pub fn select_ok(iter: I) -> SelectOk -where - I: IntoIterator, - I::Item: TryFuture + Unpin, -{ - let ret = SelectOk { inner: iter.into_iter().collect() }; - assert!(!ret.inner.is_empty(), "iterator provided to select_ok was empty"); - assert_future::< - Result<(::Ok, Vec), ::Error>, - _, - >(ret) -} - -impl Future for SelectOk { - type Output = Result<(Fut::Ok, Vec), Fut::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // loop until we've either exhausted all errors, a success was hit, or nothing is ready - loop { - let item = - self.inner.iter_mut().enumerate().find_map(|(i, f)| match f.try_poll_unpin(cx) { - Poll::Pending => None, - Poll::Ready(e) => Some((i, e)), - }); - match item { - Some((idx, res)) => { - // always remove Ok or Err, if it's not the last Err continue looping - drop(self.inner.remove(idx)); - match res { - Ok(e) => { - let rest = mem::take(&mut self.inner); - return Poll::Ready(Ok((e, rest))); - } - Err(e) => { - if self.inner.is_empty() { - return Poll::Ready(Err(e)); - } - } - } - } - None => { - // based on the filter above, nothing is ready, return - return Poll::Pending; - } - } - } - } -} - -impl FromIterator for SelectOk { - fn from_iter>(iter: T) -> Self { - select_ok(iter) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/select.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/select.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/select.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/select.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,134 +0,0 @@ -use super::assert_future; -use crate::future::{Either, FutureExt}; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::task::{Context, Poll}; - -/// Future for the [`select()`] function. -#[must_use = "futures do nothing unless you `.await` or poll them"] -#[derive(Debug)] -pub struct Select { - inner: Option<(A, B)>, -} - -impl Unpin for Select {} - -/// Waits for either one of two differently-typed futures to complete. -/// -/// This function will return a new future which awaits for either one of both -/// futures to complete. The returned future will finish with both the value -/// resolved and a future representing the completion of the other work. -/// -/// Note that this function consumes the receiving futures and returns a -/// wrapped version of them. -/// -/// Also note that if both this and the second future have the same -/// output type you can use the `Either::factor_first` method to -/// conveniently extract out the value at the end. -/// -/// # Examples -/// -/// A simple example -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::{ -/// pin_mut, -/// future::Either, -/// future::self, -/// }; -/// -/// // These two futures have different types even though their outputs have the same type. -/// let future1 = async { -/// future::pending::<()>().await; // will never finish -/// 1 -/// }; -/// let future2 = async { -/// future::ready(2).await -/// }; -/// -/// // 'select' requires Future + Unpin bounds -/// pin_mut!(future1); -/// pin_mut!(future2); -/// -/// let value = match future::select(future1, future2).await { -/// Either::Left((value1, _)) => value1, // `value1` is resolved from `future1` -/// // `_` represents `future2` -/// Either::Right((value2, _)) => value2, // `value2` is resolved from `future2` -/// // `_` represents `future1` -/// }; -/// -/// assert!(value == 2); -/// # }); -/// ``` -/// -/// A more complex example -/// -/// ``` -/// use futures::future::{self, Either, Future, FutureExt}; -/// -/// // A poor-man's join implemented on top of select -/// -/// fn join(a: A, b: B) -> impl Future -/// where A: Future + Unpin, -/// B: Future + Unpin, -/// { -/// future::select(a, b).then(|either| { -/// match either { -/// Either::Left((x, b)) => b.map(move |y| (x, y)).left_future(), -/// Either::Right((y, a)) => a.map(move |x| (x, y)).right_future(), -/// } -/// }) -/// } -/// ``` -pub fn select(future1: A, future2: B) -> Select -where - A: Future + Unpin, - B: Future + Unpin, -{ - assert_future::, _>(Select { - inner: Some((future1, future2)), - }) -} - -impl Future for Select -where - A: Future + Unpin, - B: Future + Unpin, -{ - type Output = Either<(A::Output, B), (B::Output, A)>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - /// When compiled with `-C opt-level=z`, this function will help the compiler eliminate the `None` branch, where - /// `Option::unwrap` does not. - #[inline(always)] - fn unwrap_option(value: Option) -> T { - match value { - None => unreachable!(), - Some(value) => value, - } - } - - let (a, b) = self.inner.as_mut().expect("cannot poll Select twice"); - - if let Poll::Ready(val) = a.poll_unpin(cx) { - return Poll::Ready(Either::Left((val, unwrap_option(self.inner.take()).1))); - } - - if let Poll::Ready(val) = b.poll_unpin(cx) { - return Poll::Ready(Either::Right((val, unwrap_option(self.inner.take()).0))); - } - - Poll::Pending - } -} - -impl FusedFuture for Select -where - A: Future + Unpin, - B: Future + Unpin, -{ - fn is_terminated(&self) -> bool { - self.inner.is_none() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_future/into_future.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_future/into_future.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_future/into_future.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_future/into_future.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,36 +0,0 @@ -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future, TryFuture}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`into_future`](super::TryFutureExt::into_future) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct IntoFuture { - #[pin] - future: Fut, - } -} - -impl IntoFuture { - #[inline] - pub(crate) fn new(future: Fut) -> Self { - Self { future } - } -} - -impl FusedFuture for IntoFuture { - fn is_terminated(&self) -> bool { - self.future.is_terminated() - } -} - -impl Future for IntoFuture { - type Output = Result; - - #[inline] - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.project().future.try_poll(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_future/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_future/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_future/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_future/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,625 +0,0 @@ -//! Futures -//! -//! This module contains a number of functions for working with `Future`s, -//! including the `FutureExt` trait which adds methods to `Future` types. - -#[cfg(feature = "compat")] -use crate::compat::Compat; -use core::pin::Pin; -use futures_core::{ - future::TryFuture, - stream::TryStream, - task::{Context, Poll}, -}; -#[cfg(feature = "sink")] -use futures_sink::Sink; - -use crate::fns::{ - inspect_err_fn, inspect_ok_fn, into_fn, map_err_fn, map_ok_fn, map_ok_or_else_fn, - unwrap_or_else_fn, InspectErrFn, InspectOkFn, IntoFn, MapErrFn, MapOkFn, MapOkOrElseFn, - UnwrapOrElseFn, -}; -use crate::future::{assert_future, Inspect, Map}; -use crate::stream::assert_stream; - -// Combinators -mod into_future; -mod try_flatten; -mod try_flatten_err; - -delegate_all!( - /// Future for the [`try_flatten`](TryFutureExt::try_flatten) method. - TryFlatten( - try_flatten::TryFlatten - ): Debug + Future + FusedFuture + New[|x: Fut1| try_flatten::TryFlatten::new(x)] -); - -delegate_all!( - /// Future for the [`try_flatten_err`](TryFutureExt::try_flatten_err) method. - TryFlattenErr( - try_flatten_err::TryFlattenErr - ): Debug + Future + FusedFuture + New[|x: Fut1| try_flatten_err::TryFlattenErr::new(x)] -); - -delegate_all!( - /// Future for the [`try_flatten_stream`](TryFutureExt::try_flatten_stream) method. - TryFlattenStream( - try_flatten::TryFlatten - ): Debug + Sink + Stream + FusedStream + New[|x: Fut| try_flatten::TryFlatten::new(x)] - where Fut: TryFuture -); - -#[cfg(feature = "sink")] -delegate_all!( - /// Sink for the [`flatten_sink`](TryFutureExt::flatten_sink) method. - #[cfg_attr(docsrs, doc(cfg(feature = "sink")))] - FlattenSink( - try_flatten::TryFlatten - ): Debug + Sink + Stream + FusedStream + New[|x: Fut| try_flatten::TryFlatten::new(x)] -); - -delegate_all!( - /// Future for the [`and_then`](TryFutureExt::and_then) method. - AndThen( - TryFlatten, Fut2> - ): Debug + Future + FusedFuture + New[|x: Fut1, f: F| TryFlatten::new(MapOk::new(x, f))] -); - -delegate_all!( - /// Future for the [`or_else`](TryFutureExt::or_else) method. - OrElse( - TryFlattenErr, Fut2> - ): Debug + Future + FusedFuture + New[|x: Fut1, f: F| TryFlattenErr::new(MapErr::new(x, f))] -); - -delegate_all!( - /// Future for the [`err_into`](TryFutureExt::err_into) method. - ErrInto( - MapErr> - ): Debug + Future + FusedFuture + New[|x: Fut| MapErr::new(x, into_fn())] -); - -delegate_all!( - /// Future for the [`ok_into`](TryFutureExt::ok_into) method. - OkInto( - MapOk> - ): Debug + Future + FusedFuture + New[|x: Fut| MapOk::new(x, into_fn())] -); - -delegate_all!( - /// Future for the [`inspect_ok`](super::TryFutureExt::inspect_ok) method. - InspectOk( - Inspect, InspectOkFn> - ): Debug + Future + FusedFuture + New[|x: Fut, f: F| Inspect::new(IntoFuture::new(x), inspect_ok_fn(f))] -); - -delegate_all!( - /// Future for the [`inspect_err`](super::TryFutureExt::inspect_err) method. - InspectErr( - Inspect, InspectErrFn> - ): Debug + Future + FusedFuture + New[|x: Fut, f: F| Inspect::new(IntoFuture::new(x), inspect_err_fn(f))] -); - -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::into_future::IntoFuture; - -delegate_all!( - /// Future for the [`map_ok`](TryFutureExt::map_ok) method. - MapOk( - Map, MapOkFn> - ): Debug + Future + FusedFuture + New[|x: Fut, f: F| Map::new(IntoFuture::new(x), map_ok_fn(f))] -); - -delegate_all!( - /// Future for the [`map_err`](TryFutureExt::map_err) method. - MapErr( - Map, MapErrFn> - ): Debug + Future + FusedFuture + New[|x: Fut, f: F| Map::new(IntoFuture::new(x), map_err_fn(f))] -); - -delegate_all!( - /// Future for the [`map_ok_or_else`](TryFutureExt::map_ok_or_else) method. - MapOkOrElse( - Map, MapOkOrElseFn> - ): Debug + Future + FusedFuture + New[|x: Fut, f: F, g: G| Map::new(IntoFuture::new(x), map_ok_or_else_fn(f, g))] -); - -delegate_all!( - /// Future for the [`unwrap_or_else`](TryFutureExt::unwrap_or_else) method. - UnwrapOrElse( - Map, UnwrapOrElseFn> - ): Debug + Future + FusedFuture + New[|x: Fut, f: F| Map::new(IntoFuture::new(x), unwrap_or_else_fn(f))] -); - -impl TryFutureExt for Fut {} - -/// Adapters specific to [`Result`]-returning futures -pub trait TryFutureExt: TryFuture { - /// Flattens the execution of this future when the successful result of this - /// future is a [`Sink`]. - /// - /// This can be useful when sink initialization is deferred, and it is - /// convenient to work with that sink as if the sink was available at the - /// call site. - /// - /// Note that this function consumes this future and returns a wrapped - /// version of it. - /// - /// # Examples - /// - /// ``` - /// use futures::future::{Future, TryFutureExt}; - /// use futures::sink::Sink; - /// # use futures::channel::mpsc::{self, SendError}; - /// # type T = i32; - /// # type E = SendError; - /// - /// fn make_sink_async() -> impl Future, - /// E, - /// >> { // ... } - /// # let (tx, _rx) = mpsc::unbounded::(); - /// # futures::future::ready(Ok(tx)) - /// # } - /// fn take_sink(sink: impl Sink) { /* ... */ } - /// - /// let fut = make_sink_async(); - /// take_sink(fut.flatten_sink()) - /// ``` - #[cfg(feature = "sink")] - #[cfg_attr(docsrs, doc(cfg(feature = "sink")))] - fn flatten_sink(self) -> FlattenSink - where - Self::Ok: Sink, - Self: Sized, - { - crate::sink::assert_sink::(FlattenSink::new(self)) - } - - /// Maps this future's success value to a different value. - /// - /// This method can be used to change the [`Ok`](TryFuture::Ok) type of the - /// future into a different type. It is similar to the [`Result::map`] - /// method. You can use this method to chain along a computation once the - /// future has been resolved. - /// - /// The provided closure `f` will only be called if this future is resolved - /// to an [`Ok`]. If it resolves to an [`Err`], panics, or is dropped, then - /// the provided closure will never be invoked. - /// - /// Note that this method consumes the future it is called on and returns a - /// wrapped version of it. - /// - /// # Examples - /// - /// ``` - /// use futures::future::TryFutureExt; - /// - /// # futures::executor::block_on(async { - /// let future = async { Ok::(1) }; - /// let future = future.map_ok(|x| x + 3); - /// assert_eq!(future.await, Ok(4)); - /// # }); - /// ``` - /// - /// Calling [`map_ok`](TryFutureExt::map_ok) on an errored future has no - /// effect: - /// - /// ``` - /// use futures::future::TryFutureExt; - /// - /// # futures::executor::block_on(async { - /// let future = async { Err::(1) }; - /// let future = future.map_ok(|x| x + 3); - /// assert_eq!(future.await, Err(1)); - /// # }); - /// ``` - fn map_ok(self, f: F) -> MapOk - where - F: FnOnce(Self::Ok) -> T, - Self: Sized, - { - assert_future::, _>(MapOk::new(self, f)) - } - - /// Maps this future's success value to a different value, and permits for error handling resulting in the same type. - /// - /// This method can be used to coalesce your [`Ok`](TryFuture::Ok) type and [`Error`](TryFuture::Error) into another type, - /// where that type is the same for both outcomes. - /// - /// The provided closure `f` will only be called if this future is resolved - /// to an [`Ok`]. If it resolves to an [`Err`], panics, or is dropped, then - /// the provided closure will never be invoked. - /// - /// The provided closure `e` will only be called if this future is resolved - /// to an [`Err`]. If it resolves to an [`Ok`], panics, or is dropped, then - /// the provided closure will never be invoked. - /// - /// Note that this method consumes the future it is called on and returns a - /// wrapped version of it. - /// - /// # Examples - /// - /// ``` - /// use futures::future::TryFutureExt; - /// - /// # futures::executor::block_on(async { - /// let future = async { Ok::(5) }; - /// let future = future.map_ok_or_else(|x| x * 2, |x| x + 3); - /// assert_eq!(future.await, 8); - /// - /// let future = async { Err::(5) }; - /// let future = future.map_ok_or_else(|x| x * 2, |x| x + 3); - /// assert_eq!(future.await, 10); - /// # }); - /// ``` - /// - fn map_ok_or_else(self, e: E, f: F) -> MapOkOrElse - where - F: FnOnce(Self::Ok) -> T, - E: FnOnce(Self::Error) -> T, - Self: Sized, - { - assert_future::(MapOkOrElse::new(self, f, e)) - } - - /// Maps this future's error value to a different value. - /// - /// This method can be used to change the [`Error`](TryFuture::Error) type - /// of the future into a different type. It is similar to the - /// [`Result::map_err`] method. You can use this method for example to - /// ensure that futures have the same [`Error`](TryFuture::Error) type when - /// using [`select!`] or [`join!`]. - /// - /// The provided closure `f` will only be called if this future is resolved - /// to an [`Err`]. If it resolves to an [`Ok`], panics, or is dropped, then - /// the provided closure will never be invoked. - /// - /// Note that this method consumes the future it is called on and returns a - /// wrapped version of it. - /// - /// # Examples - /// - /// ``` - /// use futures::future::TryFutureExt; - /// - /// # futures::executor::block_on(async { - /// let future = async { Err::(1) }; - /// let future = future.map_err(|x| x + 3); - /// assert_eq!(future.await, Err(4)); - /// # }); - /// ``` - /// - /// Calling [`map_err`](TryFutureExt::map_err) on a successful future has - /// no effect: - /// - /// ``` - /// use futures::future::TryFutureExt; - /// - /// # futures::executor::block_on(async { - /// let future = async { Ok::(1) }; - /// let future = future.map_err(|x| x + 3); - /// assert_eq!(future.await, Ok(1)); - /// # }); - /// ``` - /// - /// [`join!`]: crate::join - /// [`select!`]: crate::select - fn map_err(self, f: F) -> MapErr - where - F: FnOnce(Self::Error) -> E, - Self: Sized, - { - assert_future::, _>(MapErr::new(self, f)) - } - - /// Maps this future's [`Error`](TryFuture::Error) to a new error type - /// using the [`Into`](std::convert::Into) trait. - /// - /// This method does for futures what the `?`-operator does for - /// [`Result`]: It lets the compiler infer the type of the resulting - /// error. Just as [`map_err`](TryFutureExt::map_err), this is useful for - /// example to ensure that futures have the same [`Error`](TryFuture::Error) - /// type when using [`select!`] or [`join!`]. - /// - /// Note that this method consumes the future it is called on and returns a - /// wrapped version of it. - /// - /// # Examples - /// - /// ``` - /// use futures::future::TryFutureExt; - /// - /// # futures::executor::block_on(async { - /// let future_err_u8 = async { Err::<(), u8>(1) }; - /// let future_err_i32 = future_err_u8.err_into::(); - /// # }); - /// ``` - /// - /// [`join!`]: crate::join - /// [`select!`]: crate::select - fn err_into(self) -> ErrInto - where - Self: Sized, - Self::Error: Into, - { - assert_future::, _>(ErrInto::new(self)) - } - - /// Maps this future's [`Ok`](TryFuture::Ok) to a new type - /// using the [`Into`](std::convert::Into) trait. - fn ok_into(self) -> OkInto - where - Self: Sized, - Self::Ok: Into, - { - assert_future::, _>(OkInto::new(self)) - } - - /// Executes another future after this one resolves successfully. The - /// success value is passed to a closure to create this subsequent future. - /// - /// The provided closure `f` will only be called if this future is resolved - /// to an [`Ok`]. If this future resolves to an [`Err`], panics, or is - /// dropped, then the provided closure will never be invoked. The - /// [`Error`](TryFuture::Error) type of this future and the future - /// returned by `f` have to match. - /// - /// Note that this method consumes the future it is called on and returns a - /// wrapped version of it. - /// - /// # Examples - /// - /// ``` - /// use futures::future::TryFutureExt; - /// - /// # futures::executor::block_on(async { - /// let future = async { Ok::(1) }; - /// let future = future.and_then(|x| async move { Ok::(x + 3) }); - /// assert_eq!(future.await, Ok(4)); - /// # }); - /// ``` - /// - /// Calling [`and_then`](TryFutureExt::and_then) on an errored future has no - /// effect: - /// - /// ``` - /// use futures::future::TryFutureExt; - /// - /// # futures::executor::block_on(async { - /// let future = async { Err::(1) }; - /// let future = future.and_then(|x| async move { Err::(x + 3) }); - /// assert_eq!(future.await, Err(1)); - /// # }); - /// ``` - fn and_then(self, f: F) -> AndThen - where - F: FnOnce(Self::Ok) -> Fut, - Fut: TryFuture, - Self: Sized, - { - assert_future::, _>(AndThen::new(self, f)) - } - - /// Executes another future if this one resolves to an error. The - /// error value is passed to a closure to create this subsequent future. - /// - /// The provided closure `f` will only be called if this future is resolved - /// to an [`Err`]. If this future resolves to an [`Ok`], panics, or is - /// dropped, then the provided closure will never be invoked. The - /// [`Ok`](TryFuture::Ok) type of this future and the future returned by `f` - /// have to match. - /// - /// Note that this method consumes the future it is called on and returns a - /// wrapped version of it. - /// - /// # Examples - /// - /// ``` - /// use futures::future::TryFutureExt; - /// - /// # futures::executor::block_on(async { - /// let future = async { Err::(1) }; - /// let future = future.or_else(|x| async move { Err::(x + 3) }); - /// assert_eq!(future.await, Err(4)); - /// # }); - /// ``` - /// - /// Calling [`or_else`](TryFutureExt::or_else) on a successful future has - /// no effect: - /// - /// ``` - /// use futures::future::TryFutureExt; - /// - /// # futures::executor::block_on(async { - /// let future = async { Ok::(1) }; - /// let future = future.or_else(|x| async move { Ok::(x + 3) }); - /// assert_eq!(future.await, Ok(1)); - /// # }); - /// ``` - fn or_else(self, f: F) -> OrElse - where - F: FnOnce(Self::Error) -> Fut, - Fut: TryFuture, - Self: Sized, - { - assert_future::, _>(OrElse::new(self, f)) - } - - /// Do something with the success value of a future before passing it on. - /// - /// When using futures, you'll often chain several of them together. While - /// working on such code, you might want to check out what's happening at - /// various parts in the pipeline, without consuming the intermediate - /// value. To do that, insert a call to `inspect_ok`. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::TryFutureExt; - /// - /// let future = async { Ok::<_, ()>(1) }; - /// let new_future = future.inspect_ok(|&x| println!("about to resolve: {}", x)); - /// assert_eq!(new_future.await, Ok(1)); - /// # }); - /// ``` - fn inspect_ok(self, f: F) -> InspectOk - where - F: FnOnce(&Self::Ok), - Self: Sized, - { - assert_future::, _>(InspectOk::new(self, f)) - } - - /// Do something with the error value of a future before passing it on. - /// - /// When using futures, you'll often chain several of them together. While - /// working on such code, you might want to check out what's happening at - /// various parts in the pipeline, without consuming the intermediate - /// value. To do that, insert a call to `inspect_err`. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::TryFutureExt; - /// - /// let future = async { Err::<(), _>(1) }; - /// let new_future = future.inspect_err(|&x| println!("about to error: {}", x)); - /// assert_eq!(new_future.await, Err(1)); - /// # }); - /// ``` - fn inspect_err(self, f: F) -> InspectErr - where - F: FnOnce(&Self::Error), - Self: Sized, - { - assert_future::, _>(InspectErr::new(self, f)) - } - - /// Flatten the execution of this future when the successful result of this - /// future is another future. - /// - /// This is equivalent to `future.and_then(|x| x)`. - fn try_flatten(self) -> TryFlatten - where - Self::Ok: TryFuture, - Self: Sized, - { - assert_future::::Ok, Self::Error>, _>(TryFlatten::new(self)) - } - - /// Flatten the execution of this future when the successful result of this - /// future is a stream. - /// - /// This can be useful when stream initialization is deferred, and it is - /// convenient to work with that stream as if stream was available at the - /// call site. - /// - /// Note that this function consumes this future and returns a wrapped - /// version of it. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future::TryFutureExt; - /// use futures::stream::{self, TryStreamExt}; - /// - /// let stream_items = vec![17, 18, 19].into_iter().map(Ok); - /// let future_of_a_stream = async { Ok::<_, ()>(stream::iter(stream_items)) }; - /// - /// let stream = future_of_a_stream.try_flatten_stream(); - /// let list = stream.try_collect::>().await; - /// assert_eq!(list, Ok(vec![17, 18, 19])); - /// # }); - /// ``` - fn try_flatten_stream(self) -> TryFlattenStream - where - Self::Ok: TryStream, - Self: Sized, - { - assert_stream::::Ok, Self::Error>, _>(TryFlattenStream::new( - self, - )) - } - - /// Unwraps this future's output, producing a future with this future's - /// [`Ok`](TryFuture::Ok) type as its - /// [`Output`](std::future::Future::Output) type. - /// - /// If this future is resolved successfully, the returned future will - /// contain the original future's success value as output. Otherwise, the - /// closure `f` is called with the error value to produce an alternate - /// success value. - /// - /// This method is similar to the [`Result::unwrap_or_else`] method. - /// - /// # Examples - /// - /// ``` - /// use futures::future::TryFutureExt; - /// - /// # futures::executor::block_on(async { - /// let future = async { Err::<(), &str>("Boom!") }; - /// let future = future.unwrap_or_else(|_| ()); - /// assert_eq!(future.await, ()); - /// # }); - /// ``` - fn unwrap_or_else(self, f: F) -> UnwrapOrElse - where - Self: Sized, - F: FnOnce(Self::Error) -> Self::Ok, - { - assert_future::(UnwrapOrElse::new(self, f)) - } - - /// Wraps a [`TryFuture`] into a future compatible with libraries using - /// futures 0.1 future definitions. Requires the `compat` feature to enable. - #[cfg(feature = "compat")] - #[cfg_attr(docsrs, doc(cfg(feature = "compat")))] - fn compat(self) -> Compat - where - Self: Sized + Unpin, - { - Compat::new(self) - } - - /// Wraps a [`TryFuture`] into a type that implements - /// [`Future`](std::future::Future). - /// - /// [`TryFuture`]s currently do not implement the - /// [`Future`](std::future::Future) trait due to limitations of the - /// compiler. - /// - /// # Examples - /// - /// ``` - /// use futures::future::{Future, TryFuture, TryFutureExt}; - /// - /// # type T = i32; - /// # type E = (); - /// fn make_try_future() -> impl TryFuture { // ... } - /// # async { Ok::(1) } - /// # } - /// fn take_future(future: impl Future>) { /* ... */ } - /// - /// take_future(make_try_future().into_future()); - /// ``` - fn into_future(self) -> IntoFuture - where - Self: Sized, - { - assert_future::, _>(IntoFuture::new(self)) - } - - /// A convenience method for calling [`TryFuture::try_poll`] on [`Unpin`] - /// future types. - fn try_poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll> - where - Self: Unpin, - { - Pin::new(self).try_poll(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_future/try_flatten_err.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_future/try_flatten_err.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_future/try_flatten_err.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_future/try_flatten_err.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,62 +0,0 @@ -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future, TryFuture}; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - #[project = TryFlattenErrProj] - #[derive(Debug)] - pub enum TryFlattenErr { - First { #[pin] f: Fut1 }, - Second { #[pin] f: Fut2 }, - Empty, - } -} - -impl TryFlattenErr { - pub(crate) fn new(future: Fut1) -> Self { - Self::First { f: future } - } -} - -impl FusedFuture for TryFlattenErr -where - Fut: TryFuture, - Fut::Error: TryFuture, -{ - fn is_terminated(&self) -> bool { - match self { - Self::Empty => true, - _ => false, - } - } -} - -impl Future for TryFlattenErr -where - Fut: TryFuture, - Fut::Error: TryFuture, -{ - type Output = Result::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Poll::Ready(loop { - match self.as_mut().project() { - TryFlattenErrProj::First { f } => match ready!(f.try_poll(cx)) { - Err(f) => self.set(Self::Second { f }), - Ok(e) => { - self.set(Self::Empty); - break Ok(e); - } - }, - TryFlattenErrProj::Second { f } => { - let output = ready!(f.try_poll(cx)); - self.set(Self::Empty); - break output; - } - TryFlattenErrProj::Empty => panic!("TryFlattenErr polled after completion"), - } - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_future/try_flatten.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_future/try_flatten.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_future/try_flatten.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_future/try_flatten.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,162 +0,0 @@ -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future, TryFuture}; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - #[project = TryFlattenProj] - #[derive(Debug)] - pub enum TryFlatten { - First { #[pin] f: Fut1 }, - Second { #[pin] f: Fut2 }, - Empty, - } -} - -impl TryFlatten { - pub(crate) fn new(future: Fut1) -> Self { - Self::First { f: future } - } -} - -impl FusedFuture for TryFlatten -where - Fut: TryFuture, - Fut::Ok: TryFuture, -{ - fn is_terminated(&self) -> bool { - match self { - Self::Empty => true, - _ => false, - } - } -} - -impl Future for TryFlatten -where - Fut: TryFuture, - Fut::Ok: TryFuture, -{ - type Output = Result<::Ok, Fut::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Poll::Ready(loop { - match self.as_mut().project() { - TryFlattenProj::First { f } => match ready!(f.try_poll(cx)) { - Ok(f) => self.set(Self::Second { f }), - Err(e) => { - self.set(Self::Empty); - break Err(e); - } - }, - TryFlattenProj::Second { f } => { - let output = ready!(f.try_poll(cx)); - self.set(Self::Empty); - break output; - } - TryFlattenProj::Empty => panic!("TryFlatten polled after completion"), - } - }) - } -} - -impl FusedStream for TryFlatten -where - Fut: TryFuture, - Fut::Ok: TryStream, -{ - fn is_terminated(&self) -> bool { - match self { - Self::Empty => true, - _ => false, - } - } -} - -impl Stream for TryFlatten -where - Fut: TryFuture, - Fut::Ok: TryStream, -{ - type Item = Result<::Ok, Fut::Error>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Poll::Ready(loop { - match self.as_mut().project() { - TryFlattenProj::First { f } => match ready!(f.try_poll(cx)) { - Ok(f) => self.set(Self::Second { f }), - Err(e) => { - self.set(Self::Empty); - break Some(Err(e)); - } - }, - TryFlattenProj::Second { f } => { - let output = ready!(f.try_poll_next(cx)); - if output.is_none() { - self.set(Self::Empty); - } - break output; - } - TryFlattenProj::Empty => break None, - } - }) - } -} - -#[cfg(feature = "sink")] -impl Sink for TryFlatten -where - Fut: TryFuture, - Fut::Ok: Sink, -{ - type Error = Fut::Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Poll::Ready(loop { - match self.as_mut().project() { - TryFlattenProj::First { f } => match ready!(f.try_poll(cx)) { - Ok(f) => self.set(Self::Second { f }), - Err(e) => { - self.set(Self::Empty); - break Err(e); - } - }, - TryFlattenProj::Second { f } => { - break ready!(f.poll_ready(cx)); - } - TryFlattenProj::Empty => panic!("poll_ready called after eof"), - } - }) - } - - fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { - match self.project() { - TryFlattenProj::First { .. } => panic!("poll_ready not called first"), - TryFlattenProj::Second { f } => f.start_send(item), - TryFlattenProj::Empty => panic!("start_send called after eof"), - } - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.project() { - TryFlattenProj::First { .. } => Poll::Ready(Ok(())), - TryFlattenProj::Second { f } => f.poll_flush(cx), - TryFlattenProj::Empty => panic!("poll_flush called after eof"), - } - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let res = match self.as_mut().project() { - TryFlattenProj::Second { f } => f.poll_close(cx), - _ => Poll::Ready(Ok(())), - }; - if res.is_ready() { - self.set(Self::Empty); - } - res - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_join_all.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_join_all.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_join_all.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_join_all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,200 +0,0 @@ -//! Definition of the `TryJoinAll` combinator, waiting for all of a list of -//! futures to finish with either success or error. - -use alloc::boxed::Box; -use alloc::vec::Vec; -use core::fmt; -use core::future::Future; -use core::iter::FromIterator; -use core::mem; -use core::pin::Pin; -use core::task::{Context, Poll}; - -use super::{assert_future, join_all, IntoFuture, TryFuture, TryMaybeDone}; - -#[cfg(not(futures_no_atomic_cas))] -use crate::stream::{FuturesOrdered, TryCollect, TryStreamExt}; -use crate::TryFutureExt; - -enum FinalState { - Pending, - AllDone, - Error(E), -} - -/// Future for the [`try_join_all`] function. -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct TryJoinAll -where - F: TryFuture, -{ - kind: TryJoinAllKind, -} - -enum TryJoinAllKind -where - F: TryFuture, -{ - Small { - elems: Pin>]>>, - }, - #[cfg(not(futures_no_atomic_cas))] - Big { - fut: TryCollect>, Vec>, - }, -} - -impl fmt::Debug for TryJoinAll -where - F: TryFuture + fmt::Debug, - F::Ok: fmt::Debug, - F::Error: fmt::Debug, - F::Output: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.kind { - TryJoinAllKind::Small { ref elems } => { - f.debug_struct("TryJoinAll").field("elems", elems).finish() - } - #[cfg(not(futures_no_atomic_cas))] - TryJoinAllKind::Big { ref fut, .. } => fmt::Debug::fmt(fut, f), - } - } -} - -/// Creates a future which represents either a collection of the results of the -/// futures given or an error. -/// -/// The returned future will drive execution for all of its underlying futures, -/// collecting the results into a destination `Vec` in the same order as they -/// were provided. -/// -/// If any future returns an error then all other futures will be canceled and -/// an error will be returned immediately. If all futures complete successfully, -/// however, then the returned future will succeed with a `Vec` of all the -/// successful results. -/// -/// This function is only available when the `std` or `alloc` feature of this -/// library is activated, and it is activated by default. -/// -/// # See Also -/// -/// `try_join_all` will switch to the more powerful [`FuturesOrdered`] for performance -/// reasons if the number of futures is large. You may want to look into using it or -/// it's counterpart [`FuturesUnordered`][crate::stream::FuturesUnordered] directly. -/// -/// Some examples for additional functionality provided by these are: -/// -/// * Adding new futures to the set even after it has been started. -/// -/// * Only polling the specific futures that have been woken. In cases where -/// you have a lot of futures this will result in much more efficient polling. -/// -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future::{self, try_join_all}; -/// -/// let futures = vec![ -/// future::ok::(1), -/// future::ok::(2), -/// future::ok::(3), -/// ]; -/// -/// assert_eq!(try_join_all(futures).await, Ok(vec![1, 2, 3])); -/// -/// let futures = vec![ -/// future::ok::(1), -/// future::err::(2), -/// future::ok::(3), -/// ]; -/// -/// assert_eq!(try_join_all(futures).await, Err(2)); -/// # }); -/// ``` -pub fn try_join_all(iter: I) -> TryJoinAll -where - I: IntoIterator, - I::Item: TryFuture, -{ - let iter = iter.into_iter().map(TryFutureExt::into_future); - - #[cfg(futures_no_atomic_cas)] - { - let kind = TryJoinAllKind::Small { - elems: iter.map(TryMaybeDone::Future).collect::>().into(), - }; - - assert_future::::Ok>, ::Error>, _>( - TryJoinAll { kind }, - ) - } - - #[cfg(not(futures_no_atomic_cas))] - { - let kind = match iter.size_hint().1 { - Some(max) if max <= join_all::SMALL => TryJoinAllKind::Small { - elems: iter.map(TryMaybeDone::Future).collect::>().into(), - }, - _ => TryJoinAllKind::Big { fut: iter.collect::>().try_collect() }, - }; - - assert_future::::Ok>, ::Error>, _>( - TryJoinAll { kind }, - ) - } -} - -impl Future for TryJoinAll -where - F: TryFuture, -{ - type Output = Result, F::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match &mut self.kind { - TryJoinAllKind::Small { elems } => { - let mut state = FinalState::AllDone; - - for elem in join_all::iter_pin_mut(elems.as_mut()) { - match elem.try_poll(cx) { - Poll::Pending => state = FinalState::Pending, - Poll::Ready(Ok(())) => {} - Poll::Ready(Err(e)) => { - state = FinalState::Error(e); - break; - } - } - } - - match state { - FinalState::Pending => Poll::Pending, - FinalState::AllDone => { - let mut elems = mem::replace(elems, Box::pin([])); - let results = join_all::iter_pin_mut(elems.as_mut()) - .map(|e| e.take_output().unwrap()) - .collect(); - Poll::Ready(Ok(results)) - } - FinalState::Error(e) => { - let _ = mem::replace(elems, Box::pin([])); - Poll::Ready(Err(e)) - } - } - } - #[cfg(not(futures_no_atomic_cas))] - TryJoinAllKind::Big { fut } => Pin::new(fut).poll(cx), - } - } -} - -impl FromIterator for TryJoinAll -where - F: TryFuture, -{ - fn from_iter>(iter: T) -> Self { - try_join_all(iter) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_join.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_join.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_join.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_join.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,256 +0,0 @@ -#![allow(non_snake_case)] - -use crate::future::{assert_future, try_maybe_done, TryMaybeDone}; -use core::fmt; -use core::pin::Pin; -use futures_core::future::{Future, TryFuture}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -macro_rules! generate { - ($( - $(#[$doc:meta])* - ($Join:ident, ), - )*) => ($( - pin_project! { - $(#[$doc])* - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct $Join { - #[pin] Fut1: TryMaybeDone, - $(#[pin] $Fut: TryMaybeDone<$Fut>,)* - } - } - - impl fmt::Debug for $Join - where - Fut1: TryFuture + fmt::Debug, - Fut1::Ok: fmt::Debug, - Fut1::Error: fmt::Debug, - $( - $Fut: TryFuture + fmt::Debug, - $Fut::Ok: fmt::Debug, - $Fut::Error: fmt::Debug, - )* - { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct(stringify!($Join)) - .field("Fut1", &self.Fut1) - $(.field(stringify!($Fut), &self.$Fut))* - .finish() - } - } - - impl $Join - where - Fut1: TryFuture, - $( - $Fut: TryFuture - ),* - { - fn new(Fut1: Fut1, $($Fut: $Fut),*) -> Self { - Self { - Fut1: try_maybe_done(Fut1), - $($Fut: try_maybe_done($Fut)),* - } - } - } - - impl Future for $Join - where - Fut1: TryFuture, - $( - $Fut: TryFuture - ),* - { - type Output = Result<(Fut1::Ok, $($Fut::Ok),*), Fut1::Error>; - - fn poll( - self: Pin<&mut Self>, cx: &mut Context<'_> - ) -> Poll { - let mut all_done = true; - let mut futures = self.project(); - all_done &= futures.Fut1.as_mut().poll(cx)?.is_ready(); - $( - all_done &= futures.$Fut.as_mut().poll(cx)?.is_ready(); - )* - - if all_done { - Poll::Ready(Ok(( - futures.Fut1.take_output().unwrap(), - $( - futures.$Fut.take_output().unwrap() - ),* - ))) - } else { - Poll::Pending - } - } - } - )*) -} - -generate! { - /// Future for the [`try_join`](try_join()) function. - (TryJoin, ), - - /// Future for the [`try_join3`] function. - (TryJoin3, ), - - /// Future for the [`try_join4`] function. - (TryJoin4, ), - - /// Future for the [`try_join5`] function. - (TryJoin5, ), -} - -/// Joins the result of two futures, waiting for them both to complete or -/// for one to produce an error. -/// -/// This function will return a new future which awaits both futures to -/// complete. If successful, the returned future will finish with a tuple of -/// both results. If unsuccessful, it will complete with the first error -/// encountered. -/// -/// Note that this function consumes the passed futures and returns a -/// wrapped version of it. -/// -/// # Examples -/// -/// When used on multiple futures that return [`Ok`], `try_join` will return -/// [`Ok`] of a tuple of the values: -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = future::ready(Ok::(1)); -/// let b = future::ready(Ok::(2)); -/// let pair = future::try_join(a, b); -/// -/// assert_eq!(pair.await, Ok((1, 2))); -/// # }); -/// ``` -/// -/// If one of the futures resolves to an error, `try_join` will return -/// that error: -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = future::ready(Ok::(1)); -/// let b = future::ready(Err::(2)); -/// let pair = future::try_join(a, b); -/// -/// assert_eq!(pair.await, Err(2)); -/// # }); -/// ``` -pub fn try_join(future1: Fut1, future2: Fut2) -> TryJoin -where - Fut1: TryFuture, - Fut2: TryFuture, -{ - assert_future::, _>(TryJoin::new(future1, future2)) -} - -/// Same as [`try_join`](try_join()), but with more futures. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = future::ready(Ok::(1)); -/// let b = future::ready(Ok::(2)); -/// let c = future::ready(Ok::(3)); -/// let tuple = future::try_join3(a, b, c); -/// -/// assert_eq!(tuple.await, Ok((1, 2, 3))); -/// # }); -/// ``` -pub fn try_join3( - future1: Fut1, - future2: Fut2, - future3: Fut3, -) -> TryJoin3 -where - Fut1: TryFuture, - Fut2: TryFuture, - Fut3: TryFuture, -{ - assert_future::, _>(TryJoin3::new( - future1, future2, future3, - )) -} - -/// Same as [`try_join`](try_join()), but with more futures. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = future::ready(Ok::(1)); -/// let b = future::ready(Ok::(2)); -/// let c = future::ready(Ok::(3)); -/// let d = future::ready(Ok::(4)); -/// let tuple = future::try_join4(a, b, c, d); -/// -/// assert_eq!(tuple.await, Ok((1, 2, 3, 4))); -/// # }); -/// ``` -pub fn try_join4( - future1: Fut1, - future2: Fut2, - future3: Fut3, - future4: Fut4, -) -> TryJoin4 -where - Fut1: TryFuture, - Fut2: TryFuture, - Fut3: TryFuture, - Fut4: TryFuture, -{ - assert_future::, _>( - TryJoin4::new(future1, future2, future3, future4), - ) -} - -/// Same as [`try_join`](try_join()), but with more futures. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::future; -/// -/// let a = future::ready(Ok::(1)); -/// let b = future::ready(Ok::(2)); -/// let c = future::ready(Ok::(3)); -/// let d = future::ready(Ok::(4)); -/// let e = future::ready(Ok::(5)); -/// let tuple = future::try_join5(a, b, c, d, e); -/// -/// assert_eq!(tuple.await, Ok((1, 2, 3, 4, 5))); -/// # }); -/// ``` -pub fn try_join5( - future1: Fut1, - future2: Fut2, - future3: Fut3, - future4: Fut4, - future5: Fut5, -) -> TryJoin5 -where - Fut1: TryFuture, - Fut2: TryFuture, - Fut3: TryFuture, - Fut4: TryFuture, - Fut5: TryFuture, -{ - assert_future::, _>( - TryJoin5::new(future1, future2, future3, future4, future5), - ) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_maybe_done.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_maybe_done.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_maybe_done.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_maybe_done.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,92 +0,0 @@ -//! Definition of the TryMaybeDone combinator - -use super::assert_future; -use core::mem; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future, TryFuture}; -use futures_core::ready; -use futures_core::task::{Context, Poll}; - -/// A future that may have completed with an error. -/// -/// This is created by the [`try_maybe_done()`] function. -#[derive(Debug)] -pub enum TryMaybeDone { - /// A not-yet-completed future - Future(/* #[pin] */ Fut), - /// The output of the completed future - Done(Fut::Ok), - /// The empty variant after the result of a [`TryMaybeDone`] has been - /// taken using the [`take_output`](TryMaybeDone::take_output) method, - /// or if the future returned an error. - Gone, -} - -impl Unpin for TryMaybeDone {} - -/// Wraps a future into a `TryMaybeDone` -pub fn try_maybe_done(future: Fut) -> TryMaybeDone { - assert_future::, _>(TryMaybeDone::Future(future)) -} - -impl TryMaybeDone { - /// Returns an [`Option`] containing a mutable reference to the output of the future. - /// The output of this method will be [`Some`] if and only if the inner - /// future has completed successfully and [`take_output`](TryMaybeDone::take_output) - /// has not yet been called. - #[inline] - pub fn output_mut(self: Pin<&mut Self>) -> Option<&mut Fut::Ok> { - unsafe { - match self.get_unchecked_mut() { - TryMaybeDone::Done(res) => Some(res), - _ => None, - } - } - } - - /// Attempt to take the output of a `TryMaybeDone` without driving it - /// towards completion. - #[inline] - pub fn take_output(self: Pin<&mut Self>) -> Option { - match &*self { - Self::Done(_) => {} - Self::Future(_) | Self::Gone => return None, - } - unsafe { - match mem::replace(self.get_unchecked_mut(), Self::Gone) { - TryMaybeDone::Done(output) => Some(output), - _ => unreachable!(), - } - } - } -} - -impl FusedFuture for TryMaybeDone { - fn is_terminated(&self) -> bool { - match self { - Self::Future(_) => false, - Self::Done(_) | Self::Gone => true, - } - } -} - -impl Future for TryMaybeDone { - type Output = Result<(), Fut::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - unsafe { - match self.as_mut().get_unchecked_mut() { - TryMaybeDone::Future(f) => match ready!(Pin::new_unchecked(f).try_poll(cx)) { - Ok(res) => self.set(Self::Done(res)), - Err(e) => { - self.set(Self::Gone); - return Poll::Ready(Err(e)); - } - }, - TryMaybeDone::Done(_) => {} - TryMaybeDone::Gone => panic!("TryMaybeDone polled after value taken"), - } - } - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_select.rs s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_select.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/future/try_select.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/future/try_select.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,85 +0,0 @@ -use crate::future::{Either, TryFutureExt}; -use core::pin::Pin; -use futures_core::future::{Future, TryFuture}; -use futures_core::task::{Context, Poll}; - -/// Future for the [`try_select()`] function. -#[must_use = "futures do nothing unless you `.await` or poll them"] -#[derive(Debug)] -pub struct TrySelect { - inner: Option<(A, B)>, -} - -impl Unpin for TrySelect {} - -type EitherOk = Either<(::Ok, B), (::Ok, A)>; -type EitherErr = Either<(::Error, B), (::Error, A)>; - -/// Waits for either one of two differently-typed futures to complete. -/// -/// This function will return a new future which awaits for either one of both -/// futures to complete. The returned future will finish with both the value -/// resolved and a future representing the completion of the other work. -/// -/// Note that this function consumes the receiving futures and returns a -/// wrapped version of them. -/// -/// Also note that if both this and the second future have the same -/// success/error type you can use the `Either::factor_first` method to -/// conveniently extract out the value at the end. -/// -/// # Examples -/// -/// ``` -/// use futures::future::{self, Either, Future, FutureExt, TryFuture, TryFutureExt}; -/// -/// // A poor-man's try_join implemented on top of select -/// -/// fn try_join(a: A, b: B) -> impl TryFuture -/// where A: TryFuture + Unpin + 'static, -/// B: TryFuture + Unpin + 'static, -/// E: 'static, -/// { -/// future::try_select(a, b).then(|res| -> Box> + Unpin> { -/// match res { -/// Ok(Either::Left((x, b))) => Box::new(b.map_ok(move |y| (x, y))), -/// Ok(Either::Right((y, a))) => Box::new(a.map_ok(move |x| (x, y))), -/// Err(Either::Left((e, _))) => Box::new(future::err(e)), -/// Err(Either::Right((e, _))) => Box::new(future::err(e)), -/// } -/// }) -/// } -/// ``` -pub fn try_select(future1: A, future2: B) -> TrySelect -where - A: TryFuture + Unpin, - B: TryFuture + Unpin, -{ - super::assert_future::, EitherErr>, _>(TrySelect { - inner: Some((future1, future2)), - }) -} - -impl Future for TrySelect -where - A: TryFuture, - B: TryFuture, -{ - type Output = Result, EitherErr>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let (mut a, mut b) = self.inner.take().expect("cannot poll Select twice"); - match a.try_poll_unpin(cx) { - Poll::Ready(Err(x)) => Poll::Ready(Err(Either::Left((x, b)))), - Poll::Ready(Ok(x)) => Poll::Ready(Ok(Either::Left((x, b)))), - Poll::Pending => match b.try_poll_unpin(cx) { - Poll::Ready(Err(x)) => Poll::Ready(Err(Either::Right((x, a)))), - Poll::Ready(Ok(x)) => Poll::Ready(Ok(Either::Right((x, a)))), - Poll::Pending => { - self.inner = Some((a, b)); - Poll::Pending - } - }, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/allow_std.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/allow_std.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/allow_std.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/allow_std.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,200 +0,0 @@ -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, IoSlice, IoSliceMut, SeekFrom}; -use std::pin::Pin; -use std::{fmt, io}; - -/// A simple wrapper type which allows types which implement only -/// implement `std::io::Read` or `std::io::Write` -/// to be used in contexts which expect an `AsyncRead` or `AsyncWrite`. -/// -/// If these types issue an error with the kind `io::ErrorKind::WouldBlock`, -/// it is expected that they will notify the current task on readiness. -/// Synchronous `std` types should not issue errors of this kind and -/// are safe to use in this context. However, using these types with -/// `AllowStdIo` will cause the event loop to block, so they should be used -/// with care. -#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct AllowStdIo(T); - -impl Unpin for AllowStdIo {} - -macro_rules! try_with_interrupt { - ($e:expr) => { - loop { - match $e { - Ok(e) => { - break e; - } - Err(ref e) if e.kind() == ::std::io::ErrorKind::Interrupted => { - continue; - } - Err(e) => { - return Poll::Ready(Err(e)); - } - } - } - }; -} - -impl AllowStdIo { - /// Creates a new `AllowStdIo` from an existing IO object. - pub fn new(io: T) -> Self { - Self(io) - } - - /// Returns a reference to the contained IO object. - pub fn get_ref(&self) -> &T { - &self.0 - } - - /// Returns a mutable reference to the contained IO object. - pub fn get_mut(&mut self) -> &mut T { - &mut self.0 - } - - /// Consumes self and returns the contained IO object. - pub fn into_inner(self) -> T { - self.0 - } -} - -impl io::Write for AllowStdIo -where - T: io::Write, -{ - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.write(buf) - } - fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - self.0.write_vectored(bufs) - } - fn flush(&mut self) -> io::Result<()> { - self.0.flush() - } - fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { - self.0.write_all(buf) - } - fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> { - self.0.write_fmt(fmt) - } -} - -impl AsyncWrite for AllowStdIo -where - T: io::Write, -{ - fn poll_write( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(Ok(try_with_interrupt!(self.0.write(buf)))) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Poll::Ready(Ok(try_with_interrupt!(self.0.write_vectored(bufs)))) - } - - fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - try_with_interrupt!(self.0.flush()); - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} - -impl io::Read for AllowStdIo -where - T: io::Read, -{ - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.0.read(buf) - } - fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result { - self.0.read_vectored(bufs) - } - fn read_to_end(&mut self, buf: &mut Vec) -> io::Result { - self.0.read_to_end(buf) - } - fn read_to_string(&mut self, buf: &mut String) -> io::Result { - self.0.read_to_string(buf) - } - fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { - self.0.read_exact(buf) - } -} - -impl AsyncRead for AllowStdIo -where - T: io::Read, -{ - fn poll_read( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Poll::Ready(Ok(try_with_interrupt!(self.0.read(buf)))) - } - - fn poll_read_vectored( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &mut [IoSliceMut<'_>], - ) -> Poll> { - Poll::Ready(Ok(try_with_interrupt!(self.0.read_vectored(bufs)))) - } -} - -impl io::Seek for AllowStdIo -where - T: io::Seek, -{ - fn seek(&mut self, pos: SeekFrom) -> io::Result { - self.0.seek(pos) - } -} - -impl AsyncSeek for AllowStdIo -where - T: io::Seek, -{ - fn poll_seek( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - pos: SeekFrom, - ) -> Poll> { - Poll::Ready(Ok(try_with_interrupt!(self.0.seek(pos)))) - } -} - -impl io::BufRead for AllowStdIo -where - T: io::BufRead, -{ - fn fill_buf(&mut self) -> io::Result<&[u8]> { - self.0.fill_buf() - } - fn consume(&mut self, amt: usize) { - self.0.consume(amt) - } -} - -impl AsyncBufRead for AllowStdIo -where - T: io::BufRead, -{ - fn poll_fill_buf(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - let this: *mut Self = &mut *self as *mut _; - Poll::Ready(Ok(try_with_interrupt!(unsafe { &mut *this }.0.fill_buf()))) - } - - fn consume(mut self: Pin<&mut Self>, amt: usize) { - self.0.consume(amt) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/buf_reader.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/buf_reader.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/buf_reader.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/buf_reader.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,263 +0,0 @@ -use super::DEFAULT_BUF_SIZE; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, IoSliceMut, SeekFrom}; -use pin_project_lite::pin_project; -use std::io::{self, Read}; -use std::pin::Pin; -use std::{cmp, fmt}; - -pin_project! { - /// The `BufReader` struct adds buffering to any reader. - /// - /// It can be excessively inefficient to work directly with a [`AsyncRead`] - /// instance. A `BufReader` performs large, infrequent reads on the underlying - /// [`AsyncRead`] and maintains an in-memory buffer of the results. - /// - /// `BufReader` can improve the speed of programs that make *small* and - /// *repeated* read calls to the same file or network socket. It does not - /// help when reading very large amounts at once, or reading just one or a few - /// times. It also provides no advantage when reading from a source that is - /// already in memory, like a `Vec`. - /// - /// When the `BufReader` is dropped, the contents of its buffer will be - /// discarded. Creating multiple instances of a `BufReader` on the same - /// stream can cause data loss. - /// - /// [`AsyncRead`]: futures_io::AsyncRead - /// - // TODO: Examples - pub struct BufReader { - #[pin] - inner: R, - buffer: Box<[u8]>, - pos: usize, - cap: usize, - } -} - -impl BufReader { - /// Creates a new `BufReader` with a default buffer capacity. The default is currently 8 KB, - /// but may change in the future. - pub fn new(inner: R) -> Self { - Self::with_capacity(DEFAULT_BUF_SIZE, inner) - } - - /// Creates a new `BufReader` with the specified buffer capacity. - pub fn with_capacity(capacity: usize, inner: R) -> Self { - unsafe { - let mut buffer = Vec::with_capacity(capacity); - buffer.set_len(capacity); - super::initialize(&inner, &mut buffer); - Self { inner, buffer: buffer.into_boxed_slice(), pos: 0, cap: 0 } - } - } - - delegate_access_inner!(inner, R, ()); - - /// Returns a reference to the internally buffered data. - /// - /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty. - pub fn buffer(&self) -> &[u8] { - &self.buffer[self.pos..self.cap] - } - - /// Invalidates all data in the internal buffer. - #[inline] - fn discard_buffer(self: Pin<&mut Self>) { - let this = self.project(); - *this.pos = 0; - *this.cap = 0; - } -} - -impl BufReader { - /// Seeks relative to the current position. If the new position lies within the buffer, - /// the buffer will not be flushed, allowing for more efficient seeks. - /// This method does not return the location of the underlying reader, so the caller - /// must track this information themselves if it is required. - pub fn seek_relative(self: Pin<&mut Self>, offset: i64) -> SeeKRelative<'_, R> { - SeeKRelative { inner: self, offset, first: true } - } - - /// Attempts to seek relative to the current position. If the new position lies within the buffer, - /// the buffer will not be flushed, allowing for more efficient seeks. - /// This method does not return the location of the underlying reader, so the caller - /// must track this information themselves if it is required. - pub fn poll_seek_relative( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - offset: i64, - ) -> Poll> { - let pos = self.pos as u64; - if offset < 0 { - if let Some(new_pos) = pos.checked_sub((-offset) as u64) { - *self.project().pos = new_pos as usize; - return Poll::Ready(Ok(())); - } - } else if let Some(new_pos) = pos.checked_add(offset as u64) { - if new_pos <= self.cap as u64 { - *self.project().pos = new_pos as usize; - return Poll::Ready(Ok(())); - } - } - self.poll_seek(cx, SeekFrom::Current(offset)).map(|res| res.map(|_| ())) - } -} - -impl AsyncRead for BufReader { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - // If we don't have any buffered data and we're doing a massive read - // (larger than our internal buffer), bypass our internal buffer - // entirely. - if self.pos == self.cap && buf.len() >= self.buffer.len() { - let res = ready!(self.as_mut().project().inner.poll_read(cx, buf)); - self.discard_buffer(); - return Poll::Ready(res); - } - let mut rem = ready!(self.as_mut().poll_fill_buf(cx))?; - let nread = rem.read(buf)?; - self.consume(nread); - Poll::Ready(Ok(nread)) - } - - fn poll_read_vectored( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &mut [IoSliceMut<'_>], - ) -> Poll> { - let total_len = bufs.iter().map(|b| b.len()).sum::(); - if self.pos == self.cap && total_len >= self.buffer.len() { - let res = ready!(self.as_mut().project().inner.poll_read_vectored(cx, bufs)); - self.discard_buffer(); - return Poll::Ready(res); - } - let mut rem = ready!(self.as_mut().poll_fill_buf(cx))?; - let nread = rem.read_vectored(bufs)?; - self.consume(nread); - Poll::Ready(Ok(nread)) - } -} - -impl AsyncBufRead for BufReader { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - - // If we've reached the end of our internal buffer then we need to fetch - // some more data from the underlying reader. - // Branch using `>=` instead of the more correct `==` - // to tell the compiler that the pos..cap slice is always valid. - if *this.pos >= *this.cap { - debug_assert!(*this.pos == *this.cap); - *this.cap = ready!(this.inner.poll_read(cx, this.buffer))?; - *this.pos = 0; - } - Poll::Ready(Ok(&this.buffer[*this.pos..*this.cap])) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - *self.project().pos = cmp::min(self.pos + amt, self.cap); - } -} - -impl AsyncWrite for BufReader { - delegate_async_write!(inner); -} - -impl fmt::Debug for BufReader { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BufReader") - .field("reader", &self.inner) - .field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buffer.len())) - .finish() - } -} - -impl AsyncSeek for BufReader { - /// Seek to an offset, in bytes, in the underlying reader. - /// - /// The position used for seeking with `SeekFrom::Current(_)` is the - /// position the underlying reader would be at if the `BufReader` had no - /// internal buffer. - /// - /// Seeking always discards the internal buffer, even if the seek position - /// would otherwise fall within it. This guarantees that calling - /// `.into_inner()` immediately after a seek yields the underlying reader - /// at the same position. - /// - /// To seek without discarding the internal buffer, use - /// [`BufReader::seek_relative`](BufReader::seek_relative) or - /// [`BufReader::poll_seek_relative`](BufReader::poll_seek_relative). - /// - /// See [`AsyncSeek`](futures_io::AsyncSeek) for more details. - /// - /// Note: In the edge case where you're seeking with `SeekFrom::Current(n)` - /// where `n` minus the internal buffer length overflows an `i64`, two - /// seeks will be performed instead of one. If the second seek returns - /// `Err`, the underlying reader will be left at the same position it would - /// have if you called `seek` with `SeekFrom::Current(0)`. - fn poll_seek( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - pos: SeekFrom, - ) -> Poll> { - let result: u64; - if let SeekFrom::Current(n) = pos { - let remainder = (self.cap - self.pos) as i64; - // it should be safe to assume that remainder fits within an i64 as the alternative - // means we managed to allocate 8 exbibytes and that's absurd. - // But it's not out of the realm of possibility for some weird underlying reader to - // support seeking by i64::min_value() so we need to handle underflow when subtracting - // remainder. - if let Some(offset) = n.checked_sub(remainder) { - result = - ready!(self.as_mut().project().inner.poll_seek(cx, SeekFrom::Current(offset)))?; - } else { - // seek backwards by our remainder, and then by the offset - ready!(self.as_mut().project().inner.poll_seek(cx, SeekFrom::Current(-remainder)))?; - self.as_mut().discard_buffer(); - result = ready!(self.as_mut().project().inner.poll_seek(cx, SeekFrom::Current(n)))?; - } - } else { - // Seeking with Start/End doesn't care about our buffer length. - result = ready!(self.as_mut().project().inner.poll_seek(cx, pos))?; - } - self.discard_buffer(); - Poll::Ready(Ok(result)) - } -} - -/// Future for the [`BufReader::seek_relative`](self::BufReader::seek_relative) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless polled"] -pub struct SeeKRelative<'a, R> { - inner: Pin<&'a mut BufReader>, - offset: i64, - first: bool, -} - -impl Future for SeeKRelative<'_, R> -where - R: AsyncRead + AsyncSeek, -{ - type Output = io::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let offset = self.offset; - if self.first { - self.first = false; - self.inner.as_mut().poll_seek_relative(cx, offset) - } else { - self.inner - .as_mut() - .as_mut() - .poll_seek(cx, SeekFrom::Current(offset)) - .map(|res| res.map(|_| ())) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/buf_writer.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/buf_writer.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/buf_writer.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/buf_writer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,224 +0,0 @@ -use super::DEFAULT_BUF_SIZE; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, IoSlice, SeekFrom}; -use pin_project_lite::pin_project; -use std::fmt; -use std::io::{self, Write}; -use std::pin::Pin; -use std::ptr; - -pin_project! { - /// Wraps a writer and buffers its output. - /// - /// It can be excessively inefficient to work directly with something that - /// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and - /// writes it to an underlying writer in large, infrequent batches. - /// - /// `BufWriter` can improve the speed of programs that make *small* and - /// *repeated* write calls to the same file or network socket. It does not - /// help when writing very large amounts at once, or writing just one or a few - /// times. It also provides no advantage when writing to a destination that is - /// in memory, like a `Vec`. - /// - /// When the `BufWriter` is dropped, the contents of its buffer will be - /// discarded. Creating multiple instances of a `BufWriter` on the same - /// stream can cause data loss. If you need to write out the contents of its - /// buffer, you must manually call flush before the writer is dropped. - /// - /// [`AsyncWrite`]: futures_io::AsyncWrite - /// [`flush`]: super::AsyncWriteExt::flush - /// - // TODO: Examples - pub struct BufWriter { - #[pin] - inner: W, - buf: Vec, - written: usize, - } -} - -impl BufWriter { - /// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB, - /// but may change in the future. - pub fn new(inner: W) -> Self { - Self::with_capacity(DEFAULT_BUF_SIZE, inner) - } - - /// Creates a new `BufWriter` with the specified buffer capacity. - pub fn with_capacity(cap: usize, inner: W) -> Self { - Self { inner, buf: Vec::with_capacity(cap), written: 0 } - } - - pub(super) fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - let len = this.buf.len(); - let mut ret = Ok(()); - while *this.written < len { - match ready!(this.inner.as_mut().poll_write(cx, &this.buf[*this.written..])) { - Ok(0) => { - ret = Err(io::Error::new( - io::ErrorKind::WriteZero, - "failed to write the buffered data", - )); - break; - } - Ok(n) => *this.written += n, - Err(e) => { - ret = Err(e); - break; - } - } - } - if *this.written > 0 { - this.buf.drain(..*this.written); - } - *this.written = 0; - Poll::Ready(ret) - } - - delegate_access_inner!(inner, W, ()); - - /// Returns a reference to the internally buffered data. - pub fn buffer(&self) -> &[u8] { - &self.buf - } - - /// Capacity of `buf`. how many chars can be held in buffer - pub(super) fn capacity(&self) -> usize { - self.buf.capacity() - } - - /// Remaining number of bytes to reach `buf` 's capacity - #[inline] - pub(super) fn spare_capacity(&self) -> usize { - self.buf.capacity() - self.buf.len() - } - - /// Write a byte slice directly into buffer - /// - /// Will truncate the number of bytes written to `spare_capacity()` so you want to - /// calculate the size of your slice to avoid losing bytes - /// - /// Based on `std::io::BufWriter` - pub(super) fn write_to_buf(self: Pin<&mut Self>, buf: &[u8]) -> usize { - let available = self.spare_capacity(); - let amt_to_buffer = available.min(buf.len()); - - // SAFETY: `amt_to_buffer` is <= buffer's spare capacity by construction. - unsafe { - self.write_to_buffer_unchecked(&buf[..amt_to_buffer]); - } - - amt_to_buffer - } - - /// Write byte slice directly into `self.buf` - /// - /// Based on `std::io::BufWriter` - #[inline] - unsafe fn write_to_buffer_unchecked(self: Pin<&mut Self>, buf: &[u8]) { - debug_assert!(buf.len() <= self.spare_capacity()); - let this = self.project(); - let old_len = this.buf.len(); - let buf_len = buf.len(); - let src = buf.as_ptr(); - let dst = this.buf.as_mut_ptr().add(old_len); - ptr::copy_nonoverlapping(src, dst, buf_len); - this.buf.set_len(old_len + buf_len); - } - - /// Write directly using `inner`, bypassing buffering - pub(super) fn inner_poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.project().inner.poll_write(cx, buf) - } - - /// Write directly using `inner`, bypassing buffering - pub(super) fn inner_poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - self.project().inner.poll_write_vectored(cx, bufs) - } -} - -impl AsyncWrite for BufWriter { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - if self.buf.len() + buf.len() > self.buf.capacity() { - ready!(self.as_mut().flush_buf(cx))?; - } - if buf.len() >= self.buf.capacity() { - self.project().inner.poll_write(cx, buf) - } else { - Poll::Ready(self.project().buf.write(buf)) - } - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - let total_len = bufs.iter().map(|b| b.len()).sum::(); - if self.buf.len() + total_len > self.buf.capacity() { - ready!(self.as_mut().flush_buf(cx))?; - } - if total_len >= self.buf.capacity() { - self.project().inner.poll_write_vectored(cx, bufs) - } else { - Poll::Ready(self.project().buf.write_vectored(bufs)) - } - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().flush_buf(cx))?; - self.project().inner.poll_flush(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().flush_buf(cx))?; - self.project().inner.poll_close(cx) - } -} - -impl AsyncRead for BufWriter { - delegate_async_read!(inner); -} - -impl AsyncBufRead for BufWriter { - delegate_async_buf_read!(inner); -} - -impl fmt::Debug for BufWriter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BufWriter") - .field("writer", &self.inner) - .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity())) - .field("written", &self.written) - .finish() - } -} - -impl AsyncSeek for BufWriter { - /// Seek to the offset, in bytes, in the underlying writer. - /// - /// Seeking always writes out the internal buffer before seeking. - fn poll_seek( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - pos: SeekFrom, - ) -> Poll> { - ready!(self.as_mut().flush_buf(cx))?; - self.project().inner.poll_seek(cx, pos) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/chain.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/chain.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/chain.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/chain.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,142 +0,0 @@ -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncBufRead, AsyncRead, IoSliceMut}; -use pin_project_lite::pin_project; -use std::fmt; -use std::io; -use std::pin::Pin; - -pin_project! { - /// Reader for the [`chain`](super::AsyncReadExt::chain) method. - #[must_use = "readers do nothing unless polled"] - pub struct Chain { - #[pin] - first: T, - #[pin] - second: U, - done_first: bool, - } -} - -impl Chain -where - T: AsyncRead, - U: AsyncRead, -{ - pub(super) fn new(first: T, second: U) -> Self { - Self { first, second, done_first: false } - } - - /// Gets references to the underlying readers in this `Chain`. - pub fn get_ref(&self) -> (&T, &U) { - (&self.first, &self.second) - } - - /// Gets mutable references to the underlying readers in this `Chain`. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying readers as doing so may corrupt the internal state of this - /// `Chain`. - pub fn get_mut(&mut self) -> (&mut T, &mut U) { - (&mut self.first, &mut self.second) - } - - /// Gets pinned mutable references to the underlying readers in this `Chain`. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying readers as doing so may corrupt the internal state of this - /// `Chain`. - pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut T>, Pin<&mut U>) { - let this = self.project(); - (this.first, this.second) - } - - /// Consumes the `Chain`, returning the wrapped readers. - pub fn into_inner(self) -> (T, U) { - (self.first, self.second) - } -} - -impl fmt::Debug for Chain -where - T: fmt::Debug, - U: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Chain") - .field("t", &self.first) - .field("u", &self.second) - .field("done_first", &self.done_first) - .finish() - } -} - -impl AsyncRead for Chain -where - T: AsyncRead, - U: AsyncRead, -{ - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - let this = self.project(); - - if !*this.done_first { - match ready!(this.first.poll_read(cx, buf)?) { - 0 if !buf.is_empty() => *this.done_first = true, - n => return Poll::Ready(Ok(n)), - } - } - this.second.poll_read(cx, buf) - } - - fn poll_read_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &mut [IoSliceMut<'_>], - ) -> Poll> { - let this = self.project(); - - if !*this.done_first { - let n = ready!(this.first.poll_read_vectored(cx, bufs)?); - if n == 0 && bufs.iter().any(|b| !b.is_empty()) { - *this.done_first = true - } else { - return Poll::Ready(Ok(n)); - } - } - this.second.poll_read_vectored(cx, bufs) - } -} - -impl AsyncBufRead for Chain -where - T: AsyncBufRead, - U: AsyncBufRead, -{ - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - - if !*this.done_first { - match ready!(this.first.poll_fill_buf(cx)?) { - buf if buf.is_empty() => { - *this.done_first = true; - } - buf => return Poll::Ready(Ok(buf)), - } - } - this.second.poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - let this = self.project(); - - if !*this.done_first { - this.first.consume(amt) - } else { - this.second.consume(amt) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/close.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/close.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/close.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/close.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,28 +0,0 @@ -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncWrite; -use std::io; -use std::pin::Pin; - -/// Future for the [`close`](super::AsyncWriteExt::close) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Close<'a, W: ?Sized> { - writer: &'a mut W, -} - -impl Unpin for Close<'_, W> {} - -impl<'a, W: AsyncWrite + ?Sized + Unpin> Close<'a, W> { - pub(super) fn new(writer: &'a mut W) -> Self { - Self { writer } - } -} - -impl Future for Close<'_, W> { - type Output = io::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut *self.writer).poll_close(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/copy_buf_abortable.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/copy_buf_abortable.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/copy_buf_abortable.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/copy_buf_abortable.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,124 +0,0 @@ -use crate::abortable::{AbortHandle, AbortInner, Aborted}; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncBufRead, AsyncWrite}; -use pin_project_lite::pin_project; -use std::io; -use std::pin::Pin; -use std::sync::atomic::Ordering; -use std::sync::Arc; - -/// Creates a future which copies all the bytes from one object to another, with its `AbortHandle`. -/// -/// The returned future will copy all the bytes read from this `AsyncBufRead` into the -/// `writer` specified. This future will only complete once abort has been requested or the `reader` has hit -/// EOF and all bytes have been written to and flushed from the `writer` -/// provided. -/// -/// On success the number of bytes is returned. If aborted, `Aborted` is returned. Otherwise, the underlying error is returned. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::io::{self, AsyncWriteExt, Cursor}; -/// use futures::future::Aborted; -/// -/// let reader = Cursor::new([1, 2, 3, 4]); -/// let mut writer = Cursor::new(vec![0u8; 5]); -/// -/// let (fut, abort_handle) = io::copy_buf_abortable(reader, &mut writer); -/// let bytes = fut.await; -/// abort_handle.abort(); -/// writer.close().await.unwrap(); -/// match bytes { -/// Ok(Ok(n)) => { -/// assert_eq!(n, 4); -/// assert_eq!(writer.into_inner(), [1, 2, 3, 4, 0]); -/// Ok(n) -/// }, -/// Ok(Err(a)) => { -/// Err::(a) -/// } -/// Err(e) => panic!("{}", e) -/// } -/// # }).unwrap(); -/// ``` -pub fn copy_buf_abortable( - reader: R, - writer: &mut W, -) -> (CopyBufAbortable<'_, R, W>, AbortHandle) -where - R: AsyncBufRead, - W: AsyncWrite + Unpin + ?Sized, -{ - let (handle, reg) = AbortHandle::new_pair(); - (CopyBufAbortable { reader, writer, amt: 0, inner: reg.inner }, handle) -} - -pin_project! { - /// Future for the [`copy_buf()`] function. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct CopyBufAbortable<'a, R, W: ?Sized> { - #[pin] - reader: R, - writer: &'a mut W, - amt: u64, - inner: Arc - } -} - -macro_rules! ready_or_break { - ($e:expr $(,)?) => { - match $e { - $crate::task::Poll::Ready(t) => t, - $crate::task::Poll::Pending => break, - } - }; -} - -impl Future for CopyBufAbortable<'_, R, W> -where - R: AsyncBufRead, - W: AsyncWrite + Unpin + Sized, -{ - type Output = Result, io::Error>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - loop { - // Check if the task has been aborted - if this.inner.aborted.load(Ordering::Relaxed) { - return Poll::Ready(Ok(Err(Aborted))); - } - - // Read some bytes from the reader, and if we have reached EOF, return total bytes read - let buffer = ready_or_break!(this.reader.as_mut().poll_fill_buf(cx))?; - if buffer.is_empty() { - ready_or_break!(Pin::new(&mut this.writer).poll_flush(cx))?; - return Poll::Ready(Ok(Ok(*this.amt))); - } - - // Pass the buffer to the writer, and update the amount written - let i = ready_or_break!(Pin::new(&mut this.writer).poll_write(cx, buffer))?; - if i == 0 { - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - *this.amt += i as u64; - this.reader.as_mut().consume(i); - } - // Schedule the task to be woken up again. - // Never called unless Poll::Pending is returned from io objects. - this.inner.waker.register(cx.waker()); - - // Check to see if the task was aborted between the first check and - // registration. - // Checking with `Relaxed` is sufficient because - // `register` introduces an `AcqRel` barrier. - if this.inner.aborted.load(Ordering::Relaxed) { - return Poll::Ready(Ok(Err(Aborted))); - } - Poll::Pending - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/copy_buf.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/copy_buf.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/copy_buf.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/copy_buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,78 +0,0 @@ -use futures_core::future::Future; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncBufRead, AsyncWrite}; -use pin_project_lite::pin_project; -use std::io; -use std::pin::Pin; - -/// Creates a future which copies all the bytes from one object to another. -/// -/// The returned future will copy all the bytes read from this `AsyncBufRead` into the -/// `writer` specified. This future will only complete once the `reader` has hit -/// EOF and all bytes have been written to and flushed from the `writer` -/// provided. -/// -/// On success the number of bytes is returned. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::io::{self, AsyncWriteExt, Cursor}; -/// -/// let reader = Cursor::new([1, 2, 3, 4]); -/// let mut writer = Cursor::new(vec![0u8; 5]); -/// -/// let bytes = io::copy_buf(reader, &mut writer).await?; -/// writer.close().await?; -/// -/// assert_eq!(bytes, 4); -/// assert_eq!(writer.into_inner(), [1, 2, 3, 4, 0]); -/// # Ok::<(), Box>(()) }).unwrap(); -/// ``` -pub fn copy_buf(reader: R, writer: &mut W) -> CopyBuf<'_, R, W> -where - R: AsyncBufRead, - W: AsyncWrite + Unpin + ?Sized, -{ - CopyBuf { reader, writer, amt: 0 } -} - -pin_project! { - /// Future for the [`copy_buf()`] function. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct CopyBuf<'a, R, W: ?Sized> { - #[pin] - reader: R, - writer: &'a mut W, - amt: u64, - } -} - -impl Future for CopyBuf<'_, R, W> -where - R: AsyncBufRead, - W: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - loop { - let buffer = ready!(this.reader.as_mut().poll_fill_buf(cx))?; - if buffer.is_empty() { - ready!(Pin::new(&mut this.writer).poll_flush(cx))?; - return Poll::Ready(Ok(*this.amt)); - } - - let i = ready!(Pin::new(&mut this.writer).poll_write(cx, buffer))?; - if i == 0 { - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - *this.amt += i as u64; - this.reader.as_mut().consume(i); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/copy.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/copy.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/copy.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/copy.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,58 +0,0 @@ -use super::{copy_buf, BufReader, CopyBuf}; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncRead, AsyncWrite}; -use pin_project_lite::pin_project; -use std::io; -use std::pin::Pin; - -/// Creates a future which copies all the bytes from one object to another. -/// -/// The returned future will copy all the bytes read from this `AsyncRead` into the -/// `writer` specified. This future will only complete once the `reader` has hit -/// EOF and all bytes have been written to and flushed from the `writer` -/// provided. -/// -/// On success the number of bytes is returned. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::io::{self, AsyncWriteExt, Cursor}; -/// -/// let reader = Cursor::new([1, 2, 3, 4]); -/// let mut writer = Cursor::new(vec![0u8; 5]); -/// -/// let bytes = io::copy(reader, &mut writer).await?; -/// writer.close().await?; -/// -/// assert_eq!(bytes, 4); -/// assert_eq!(writer.into_inner(), [1, 2, 3, 4, 0]); -/// # Ok::<(), Box>(()) }).unwrap(); -/// ``` -pub fn copy(reader: R, writer: &mut W) -> Copy<'_, R, W> -where - R: AsyncRead, - W: AsyncWrite + Unpin + ?Sized, -{ - Copy { inner: copy_buf(BufReader::new(reader), writer) } -} - -pin_project! { - /// Future for the [`copy()`] function. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Copy<'a, R, W: ?Sized> { - #[pin] - inner: CopyBuf<'a, BufReader, W>, - } -} - -impl Future for Copy<'_, R, W> { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.project().inner.poll(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/cursor.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/cursor.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/cursor.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/cursor.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,232 +0,0 @@ -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, IoSlice, IoSliceMut, SeekFrom}; -use std::io; -use std::pin::Pin; - -/// A `Cursor` wraps an in-memory buffer and provides it with a -/// [`AsyncSeek`] implementation. -/// -/// `Cursor`s are used with in-memory buffers, anything implementing -/// `AsRef<[u8]>`, to allow them to implement [`AsyncRead`] and/or [`AsyncWrite`], -/// allowing these buffers to be used anywhere you might use a reader or writer -/// that does actual I/O. -/// -/// This library implements some I/O traits on various types which -/// are commonly used as a buffer, like `Cursor<`[`Vec`]`>` and -/// `Cursor<`[`&[u8]`][bytes]`>`. -/// -/// [`AsyncSeek`]: trait.AsyncSeek.html -/// [`AsyncRead`]: trait.AsyncRead.html -/// [`AsyncWrite`]: trait.AsyncWrite.html -/// [bytes]: https://doc.rust-lang.org/std/primitive.slice.html -#[derive(Clone, Debug, Default)] -pub struct Cursor { - inner: io::Cursor, -} - -impl Cursor { - /// Creates a new cursor wrapping the provided underlying in-memory buffer. - /// - /// Cursor initial position is `0` even if underlying buffer (e.g., `Vec`) - /// is not empty. So writing to cursor starts with overwriting `Vec` - /// content, not with appending to it. - /// - /// # Examples - /// - /// ``` - /// use futures::io::Cursor; - /// - /// let buff = Cursor::new(Vec::new()); - /// # fn force_inference(_: &Cursor>) {} - /// # force_inference(&buff); - /// ``` - pub fn new(inner: T) -> Self { - Self { inner: io::Cursor::new(inner) } - } - - /// Consumes this cursor, returning the underlying value. - /// - /// # Examples - /// - /// ``` - /// use futures::io::Cursor; - /// - /// let buff = Cursor::new(Vec::new()); - /// # fn force_inference(_: &Cursor>) {} - /// # force_inference(&buff); - /// - /// let vec = buff.into_inner(); - /// ``` - pub fn into_inner(self) -> T { - self.inner.into_inner() - } - - /// Gets a reference to the underlying value in this cursor. - /// - /// # Examples - /// - /// ``` - /// use futures::io::Cursor; - /// - /// let buff = Cursor::new(Vec::new()); - /// # fn force_inference(_: &Cursor>) {} - /// # force_inference(&buff); - /// - /// let reference = buff.get_ref(); - /// ``` - pub fn get_ref(&self) -> &T { - self.inner.get_ref() - } - - /// Gets a mutable reference to the underlying value in this cursor. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying value as it may corrupt this cursor's position. - /// - /// # Examples - /// - /// ``` - /// use futures::io::Cursor; - /// - /// let mut buff = Cursor::new(Vec::new()); - /// # fn force_inference(_: &Cursor>) {} - /// # force_inference(&buff); - /// - /// let reference = buff.get_mut(); - /// ``` - pub fn get_mut(&mut self) -> &mut T { - self.inner.get_mut() - } - - /// Returns the current position of this cursor. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncSeekExt, Cursor, SeekFrom}; - /// - /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]); - /// - /// assert_eq!(buff.position(), 0); - /// - /// buff.seek(SeekFrom::Current(2)).await?; - /// assert_eq!(buff.position(), 2); - /// - /// buff.seek(SeekFrom::Current(-1)).await?; - /// assert_eq!(buff.position(), 1); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - pub fn position(&self) -> u64 { - self.inner.position() - } - - /// Sets the position of this cursor. - /// - /// # Examples - /// - /// ``` - /// use futures::io::Cursor; - /// - /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]); - /// - /// assert_eq!(buff.position(), 0); - /// - /// buff.set_position(2); - /// assert_eq!(buff.position(), 2); - /// - /// buff.set_position(4); - /// assert_eq!(buff.position(), 4); - /// ``` - pub fn set_position(&mut self, pos: u64) { - self.inner.set_position(pos) - } -} - -impl AsyncSeek for Cursor -where - T: AsRef<[u8]> + Unpin, -{ - fn poll_seek( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - pos: SeekFrom, - ) -> Poll> { - Poll::Ready(io::Seek::seek(&mut self.inner, pos)) - } -} - -impl + Unpin> AsyncRead for Cursor { - fn poll_read( - mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Poll::Ready(io::Read::read(&mut self.inner, buf)) - } - - fn poll_read_vectored( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &mut [IoSliceMut<'_>], - ) -> Poll> { - Poll::Ready(io::Read::read_vectored(&mut self.inner, bufs)) - } -} - -impl AsyncBufRead for Cursor -where - T: AsRef<[u8]> + Unpin, -{ - fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::BufRead::fill_buf(&mut self.get_mut().inner)) - } - - fn consume(mut self: Pin<&mut Self>, amt: usize) { - io::BufRead::consume(&mut self.inner, amt) - } -} - -macro_rules! delegate_async_write_to_stdio { - () => { - fn poll_write( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(io::Write::write(&mut self.inner, buf)) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Poll::Ready(io::Write::write_vectored(&mut self.inner, bufs)) - } - - fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::Write::flush(&mut self.inner)) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } - }; -} - -impl AsyncWrite for Cursor<&mut [u8]> { - delegate_async_write_to_stdio!(); -} - -impl AsyncWrite for Cursor<&mut Vec> { - delegate_async_write_to_stdio!(); -} - -impl AsyncWrite for Cursor> { - delegate_async_write_to_stdio!(); -} - -impl AsyncWrite for Cursor> { - delegate_async_write_to_stdio!(); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/empty.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/empty.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/empty.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/empty.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,59 +0,0 @@ -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncBufRead, AsyncRead}; -use std::fmt; -use std::io; -use std::pin::Pin; - -/// Reader for the [`empty()`] function. -#[must_use = "readers do nothing unless polled"] -pub struct Empty { - _priv: (), -} - -/// Constructs a new handle to an empty reader. -/// -/// All reads from the returned reader will return `Poll::Ready(Ok(0))`. -/// -/// # Examples -/// -/// A slightly sad example of not reading anything into a buffer: -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::io::{self, AsyncReadExt}; -/// -/// let mut buffer = String::new(); -/// let mut reader = io::empty(); -/// reader.read_to_string(&mut buffer).await?; -/// assert!(buffer.is_empty()); -/// # Ok::<(), Box>(()) }).unwrap(); -/// ``` -pub fn empty() -> Empty { - Empty { _priv: () } -} - -impl AsyncRead for Empty { - #[inline] - fn poll_read( - self: Pin<&mut Self>, - _: &mut Context<'_>, - _: &mut [u8], - ) -> Poll> { - Poll::Ready(Ok(0)) - } -} - -impl AsyncBufRead for Empty { - #[inline] - fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(&[])) - } - #[inline] - fn consume(self: Pin<&mut Self>, _: usize) {} -} - -impl fmt::Debug for Empty { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Empty { .. }") - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/fill_buf.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/fill_buf.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/fill_buf.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/fill_buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,51 +0,0 @@ -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncBufRead; -use std::io; -use std::pin::Pin; - -/// Future for the [`fill_buf`](super::AsyncBufReadExt::fill_buf) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct FillBuf<'a, R: ?Sized> { - reader: Option<&'a mut R>, -} - -impl Unpin for FillBuf<'_, R> {} - -impl<'a, R: AsyncBufRead + ?Sized + Unpin> FillBuf<'a, R> { - pub(super) fn new(reader: &'a mut R) -> Self { - Self { reader: Some(reader) } - } -} - -impl<'a, R> Future for FillBuf<'a, R> -where - R: AsyncBufRead + ?Sized + Unpin, -{ - type Output = io::Result<&'a [u8]>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - let reader = this.reader.take().expect("Polled FillBuf after completion"); - - match Pin::new(&mut *reader).poll_fill_buf(cx) { - // With polonius it is possible to remove this inner match and just have the correct - // lifetime of the reference inferred based on which branch is taken - Poll::Ready(Ok(_)) => match Pin::new(reader).poll_fill_buf(cx) { - Poll::Ready(Ok(slice)) => Poll::Ready(Ok(slice)), - Poll::Ready(Err(err)) => { - unreachable!("reader indicated readiness but then returned an error: {:?}", err) - } - Poll::Pending => { - unreachable!("reader indicated readiness but then returned pending") - } - }, - Poll::Ready(Err(err)) => Poll::Ready(Err(err)), - Poll::Pending => { - this.reader = Some(reader); - Poll::Pending - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/flush.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/flush.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/flush.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/flush.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncWrite; -use std::io; -use std::pin::Pin; - -/// Future for the [`flush`](super::AsyncWriteExt::flush) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Flush<'a, W: ?Sized> { - writer: &'a mut W, -} - -impl Unpin for Flush<'_, W> {} - -impl<'a, W: AsyncWrite + ?Sized + Unpin> Flush<'a, W> { - pub(super) fn new(writer: &'a mut W) -> Self { - Self { writer } - } -} - -impl Future for Flush<'_, W> -where - W: AsyncWrite + ?Sized + Unpin, -{ - type Output = io::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut *self.writer).poll_flush(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/into_sink.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/into_sink.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/into_sink.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/into_sink.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,82 +0,0 @@ -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncWrite; -use futures_sink::Sink; -use pin_project_lite::pin_project; -use std::io; -use std::pin::Pin; - -#[derive(Debug)] -struct Block { - offset: usize, - bytes: Item, -} - -pin_project! { - /// Sink for the [`into_sink`](super::AsyncWriteExt::into_sink) method. - #[must_use = "sinks do nothing unless polled"] - #[derive(Debug)] - #[cfg_attr(docsrs, doc(cfg(feature = "sink")))] - pub struct IntoSink { - #[pin] - writer: W, - // An outstanding block for us to push into the underlying writer, along with an offset of how - // far into this block we have written already. - buffer: Option>, - } -} - -impl> IntoSink { - pub(super) fn new(writer: W) -> Self { - Self { writer, buffer: None } - } - - /// If we have an outstanding block in `buffer` attempt to push it into the writer, does _not_ - /// flush the writer after it succeeds in pushing the block into it. - fn poll_flush_buffer( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - let mut this = self.project(); - - if let Some(buffer) = this.buffer { - loop { - let bytes = buffer.bytes.as_ref(); - let written = ready!(this.writer.as_mut().poll_write(cx, &bytes[buffer.offset..]))?; - buffer.offset += written; - if buffer.offset == bytes.len() { - break; - } - } - } - *this.buffer = None; - Poll::Ready(Ok(())) - } -} - -impl> Sink for IntoSink { - type Error = io::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.poll_flush_buffer(cx))?; - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { - debug_assert!(self.buffer.is_none()); - *self.project().buffer = Some(Block { offset: 0, bytes: item }); - Ok(()) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().poll_flush_buffer(cx))?; - ready!(self.project().writer.poll_flush(cx))?; - Poll::Ready(Ok(())) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().poll_flush_buffer(cx))?; - ready!(self.project().writer.poll_close(cx))?; - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/lines.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/lines.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/lines.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/lines.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,47 +0,0 @@ -use super::read_line::read_line_internal; -use futures_core::ready; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncBufRead; -use pin_project_lite::pin_project; -use std::io; -use std::mem; -use std::pin::Pin; - -pin_project! { - /// Stream for the [`lines`](super::AsyncBufReadExt::lines) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Lines { - #[pin] - reader: R, - buf: String, - bytes: Vec, - read: usize, - } -} - -impl Lines { - pub(super) fn new(reader: R) -> Self { - Self { reader, buf: String::new(), bytes: Vec::new(), read: 0 } - } -} - -impl Stream for Lines { - type Item = io::Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - let n = ready!(read_line_internal(this.reader, cx, this.buf, this.bytes, this.read))?; - if n == 0 && this.buf.is_empty() { - return Poll::Ready(None); - } - if this.buf.ends_with('\n') { - this.buf.pop(); - if this.buf.ends_with('\r') { - this.buf.pop(); - } - } - Poll::Ready(Some(Ok(mem::take(this.buf)))) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/line_writer.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/line_writer.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/line_writer.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/line_writer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,155 +0,0 @@ -use super::buf_writer::BufWriter; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncWrite; -use futures_io::IoSlice; -use pin_project_lite::pin_project; -use std::io; -use std::pin::Pin; - -pin_project! { -/// Wrap a writer, like [`BufWriter`] does, but prioritizes buffering lines -/// -/// This was written based on `std::io::LineWriter` which goes into further details -/// explaining the code. -/// -/// Buffering is actually done using `BufWriter`. This class will leverage `BufWriter` -/// to write on-each-line. -#[derive(Debug)] -pub struct LineWriter { - #[pin] - buf_writer: BufWriter, -} -} - -impl LineWriter { - /// Create a new `LineWriter` with default buffer capacity. The default is currently 1KB - /// which was taken from `std::io::LineWriter` - pub fn new(inner: W) -> LineWriter { - LineWriter::with_capacity(1024, inner) - } - - /// Creates a new `LineWriter` with the specified buffer capacity. - pub fn with_capacity(capacity: usize, inner: W) -> LineWriter { - LineWriter { buf_writer: BufWriter::with_capacity(capacity, inner) } - } - - /// Flush `buf_writer` if last char is "new line" - fn flush_if_completed_line(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - match this.buf_writer.buffer().last().copied() { - Some(b'\n') => this.buf_writer.flush_buf(cx), - _ => Poll::Ready(Ok(())), - } - } - - /// Returns a reference to `buf_writer`'s internally buffered data. - pub fn buffer(&self) -> &[u8] { - self.buf_writer.buffer() - } - - /// Acquires a reference to the underlying sink or stream that this combinator is - /// pulling from. - pub fn get_ref(&self) -> &W { - self.buf_writer.get_ref() - } -} - -impl AsyncWrite for LineWriter { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - let mut this = self.as_mut().project(); - let newline_index = match memchr::memrchr(b'\n', buf) { - None => { - ready!(self.as_mut().flush_if_completed_line(cx)?); - return self.project().buf_writer.poll_write(cx, buf); - } - Some(newline_index) => newline_index + 1, - }; - - ready!(this.buf_writer.as_mut().poll_flush(cx)?); - - let lines = &buf[..newline_index]; - - let flushed = { ready!(this.buf_writer.as_mut().inner_poll_write(cx, lines))? }; - - if flushed == 0 { - return Poll::Ready(Ok(0)); - } - - let tail = if flushed >= newline_index { - &buf[flushed..] - } else if newline_index - flushed <= this.buf_writer.capacity() { - &buf[flushed..newline_index] - } else { - let scan_area = &buf[flushed..]; - let scan_area = &scan_area[..this.buf_writer.capacity()]; - match memchr::memrchr(b'\n', scan_area) { - Some(newline_index) => &scan_area[..newline_index + 1], - None => scan_area, - } - }; - - let buffered = this.buf_writer.as_mut().write_to_buf(tail); - Poll::Ready(Ok(flushed + buffered)) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - let mut this = self.as_mut().project(); - // `is_write_vectored()` is handled in original code, but not in this crate - // see https://github.com/rust-lang/rust/issues/70436 - - let last_newline_buf_idx = bufs - .iter() - .enumerate() - .rev() - .find_map(|(i, buf)| memchr::memchr(b'\n', buf).map(|_| i)); - let last_newline_buf_idx = match last_newline_buf_idx { - None => { - ready!(self.as_mut().flush_if_completed_line(cx)?); - return self.project().buf_writer.poll_write_vectored(cx, bufs); - } - Some(i) => i, - }; - - ready!(this.buf_writer.as_mut().poll_flush(cx)?); - - let (lines, tail) = bufs.split_at(last_newline_buf_idx + 1); - - let flushed = { ready!(this.buf_writer.as_mut().inner_poll_write_vectored(cx, lines))? }; - if flushed == 0 { - return Poll::Ready(Ok(0)); - } - - let lines_len = lines.iter().map(|buf| buf.len()).sum(); - if flushed < lines_len { - return Poll::Ready(Ok(flushed)); - } - - let buffered: usize = tail - .iter() - .filter(|buf| !buf.is_empty()) - .map(|buf| this.buf_writer.as_mut().write_to_buf(buf)) - .take_while(|&n| n > 0) - .sum(); - - Poll::Ready(Ok(flushed + buffered)) - } - - /// Forward to `buf_writer` 's `BufWriter::poll_flush()` - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.as_mut().project().buf_writer.poll_flush(cx) - } - - /// Forward to `buf_writer` 's `BufWriter::poll_close()` - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.as_mut().project().buf_writer.poll_close(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,841 +0,0 @@ -//! Asynchronous I/O. -//! -//! This module is the asynchronous version of `std::io`. It defines four -//! traits, [`AsyncRead`], [`AsyncWrite`], [`AsyncSeek`], and [`AsyncBufRead`], -//! which mirror the `Read`, `Write`, `Seek`, and `BufRead` traits of the -//! standard library. However, these traits integrate with the asynchronous -//! task system, so that if an I/O object isn't ready for reading (or writing), -//! the thread is not blocked, and instead the current task is queued to be -//! woken when I/O is ready. -//! -//! In addition, the [`AsyncReadExt`], [`AsyncWriteExt`], [`AsyncSeekExt`], and -//! [`AsyncBufReadExt`] extension traits offer a variety of useful combinators -//! for operating with asynchronous I/O objects, including ways to work with -//! them using futures, streams and sinks. -//! -//! This module is only available when the `std` feature of this -//! library is activated, and it is activated by default. - -#[cfg(feature = "io-compat")] -#[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))] -use crate::compat::Compat; -use crate::future::assert_future; -use crate::stream::assert_stream; -use std::{pin::Pin, ptr}; - -// Re-export some types from `std::io` so that users don't have to deal -// with conflicts when `use`ing `futures::io` and `std::io`. -#[doc(no_inline)] -pub use std::io::{Error, ErrorKind, IoSlice, IoSliceMut, Result, SeekFrom}; - -pub use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite}; - -// used by `BufReader` and `BufWriter` -// https://github.com/rust-lang/rust/blob/master/src/libstd/sys_common/io.rs#L1 -const DEFAULT_BUF_SIZE: usize = 8 * 1024; - -/// Initializes a buffer if necessary. -/// -/// A buffer is currently always initialized. -#[inline] -unsafe fn initialize(_reader: &R, buf: &mut [u8]) { - ptr::write_bytes(buf.as_mut_ptr(), 0, buf.len()) -} - -mod allow_std; -pub use self::allow_std::AllowStdIo; - -mod buf_reader; -pub use self::buf_reader::{BufReader, SeeKRelative}; - -mod buf_writer; -pub use self::buf_writer::BufWriter; - -mod line_writer; -pub use self::line_writer::LineWriter; - -mod chain; -pub use self::chain::Chain; - -mod close; -pub use self::close::Close; - -mod copy; -pub use self::copy::{copy, Copy}; - -mod copy_buf; -pub use self::copy_buf::{copy_buf, CopyBuf}; - -mod copy_buf_abortable; -pub use self::copy_buf_abortable::{copy_buf_abortable, CopyBufAbortable}; - -mod cursor; -pub use self::cursor::Cursor; - -mod empty; -pub use self::empty::{empty, Empty}; - -mod fill_buf; -pub use self::fill_buf::FillBuf; - -mod flush; -pub use self::flush::Flush; - -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -mod into_sink; -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -pub use self::into_sink::IntoSink; - -mod lines; -pub use self::lines::Lines; - -mod read; -pub use self::read::Read; - -mod read_vectored; -pub use self::read_vectored::ReadVectored; - -mod read_exact; -pub use self::read_exact::ReadExact; - -mod read_line; -pub use self::read_line::ReadLine; - -mod read_to_end; -pub use self::read_to_end::ReadToEnd; - -mod read_to_string; -pub use self::read_to_string::ReadToString; - -mod read_until; -pub use self::read_until::ReadUntil; - -mod repeat; -pub use self::repeat::{repeat, Repeat}; - -mod seek; -pub use self::seek::Seek; - -mod sink; -pub use self::sink::{sink, Sink}; - -mod split; -pub use self::split::{ReadHalf, ReuniteError, WriteHalf}; - -mod take; -pub use self::take::Take; - -mod window; -pub use self::window::Window; - -mod write; -pub use self::write::Write; - -mod write_vectored; -pub use self::write_vectored::WriteVectored; - -mod write_all; -pub use self::write_all::WriteAll; - -#[cfg(feature = "write-all-vectored")] -mod write_all_vectored; -#[cfg(feature = "write-all-vectored")] -pub use self::write_all_vectored::WriteAllVectored; - -/// An extension trait which adds utility methods to `AsyncRead` types. -pub trait AsyncReadExt: AsyncRead { - /// Creates an adaptor which will chain this stream with another. - /// - /// The returned `AsyncRead` instance will first read all bytes from this object - /// until EOF is encountered. Afterwards the output is equivalent to the - /// output of `next`. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncReadExt, Cursor}; - /// - /// let reader1 = Cursor::new([1, 2, 3, 4]); - /// let reader2 = Cursor::new([5, 6, 7, 8]); - /// - /// let mut reader = reader1.chain(reader2); - /// let mut buffer = Vec::new(); - /// - /// // read the value into a Vec. - /// reader.read_to_end(&mut buffer).await?; - /// assert_eq!(buffer, [1, 2, 3, 4, 5, 6, 7, 8]); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn chain(self, next: R) -> Chain - where - Self: Sized, - R: AsyncRead, - { - assert_read(Chain::new(self, next)) - } - - /// Tries to read some bytes directly into the given `buf` in asynchronous - /// manner, returning a future type. - /// - /// The returned future will resolve to the number of bytes read once the read - /// operation is completed. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncReadExt, Cursor}; - /// - /// let mut reader = Cursor::new([1, 2, 3, 4]); - /// let mut output = [0u8; 5]; - /// - /// let bytes = reader.read(&mut output[..]).await?; - /// - /// // This is only guaranteed to be 4 because `&[u8]` is a synchronous - /// // reader. In a real system you could get anywhere from 1 to - /// // `output.len()` bytes in a single read. - /// assert_eq!(bytes, 4); - /// assert_eq!(output, [1, 2, 3, 4, 0]); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn read<'a>(&'a mut self, buf: &'a mut [u8]) -> Read<'a, Self> - where - Self: Unpin, - { - assert_future::, _>(Read::new(self, buf)) - } - - /// Creates a future which will read from the `AsyncRead` into `bufs` using vectored - /// IO operations. - /// - /// The returned future will resolve to the number of bytes read once the read - /// operation is completed. - fn read_vectored<'a>(&'a mut self, bufs: &'a mut [IoSliceMut<'a>]) -> ReadVectored<'a, Self> - where - Self: Unpin, - { - assert_future::, _>(ReadVectored::new(self, bufs)) - } - - /// Creates a future which will read exactly enough bytes to fill `buf`, - /// returning an error if end of file (EOF) is hit sooner. - /// - /// The returned future will resolve once the read operation is completed. - /// - /// In the case of an error the buffer and the object will be discarded, with - /// the error yielded. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncReadExt, Cursor}; - /// - /// let mut reader = Cursor::new([1, 2, 3, 4]); - /// let mut output = [0u8; 4]; - /// - /// reader.read_exact(&mut output).await?; - /// - /// assert_eq!(output, [1, 2, 3, 4]); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - /// - /// ## EOF is hit before `buf` is filled - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{self, AsyncReadExt, Cursor}; - /// - /// let mut reader = Cursor::new([1, 2, 3, 4]); - /// let mut output = [0u8; 5]; - /// - /// let result = reader.read_exact(&mut output).await; - /// - /// assert_eq!(result.unwrap_err().kind(), io::ErrorKind::UnexpectedEof); - /// # }); - /// ``` - fn read_exact<'a>(&'a mut self, buf: &'a mut [u8]) -> ReadExact<'a, Self> - where - Self: Unpin, - { - assert_future::, _>(ReadExact::new(self, buf)) - } - - /// Creates a future which will read all the bytes from this `AsyncRead`. - /// - /// On success the total number of bytes read is returned. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncReadExt, Cursor}; - /// - /// let mut reader = Cursor::new([1, 2, 3, 4]); - /// let mut output = Vec::with_capacity(4); - /// - /// let bytes = reader.read_to_end(&mut output).await?; - /// - /// assert_eq!(bytes, 4); - /// assert_eq!(output, vec![1, 2, 3, 4]); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn read_to_end<'a>(&'a mut self, buf: &'a mut Vec) -> ReadToEnd<'a, Self> - where - Self: Unpin, - { - assert_future::, _>(ReadToEnd::new(self, buf)) - } - - /// Creates a future which will read all the bytes from this `AsyncRead`. - /// - /// On success the total number of bytes read is returned. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncReadExt, Cursor}; - /// - /// let mut reader = Cursor::new(&b"1234"[..]); - /// let mut buffer = String::with_capacity(4); - /// - /// let bytes = reader.read_to_string(&mut buffer).await?; - /// - /// assert_eq!(bytes, 4); - /// assert_eq!(buffer, String::from("1234")); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn read_to_string<'a>(&'a mut self, buf: &'a mut String) -> ReadToString<'a, Self> - where - Self: Unpin, - { - assert_future::, _>(ReadToString::new(self, buf)) - } - - /// Helper method for splitting this read/write object into two halves. - /// - /// The two halves returned implement the `AsyncRead` and `AsyncWrite` - /// traits, respectively. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{self, AsyncReadExt, Cursor}; - /// - /// // Note that for `Cursor` the read and write halves share a single - /// // seek position. This may or may not be true for other types that - /// // implement both `AsyncRead` and `AsyncWrite`. - /// - /// let reader = Cursor::new([1, 2, 3, 4]); - /// let mut buffer = Cursor::new(vec![0, 0, 0, 0, 5, 6, 7, 8]); - /// let mut writer = Cursor::new(vec![0u8; 5]); - /// - /// { - /// let (buffer_reader, mut buffer_writer) = (&mut buffer).split(); - /// io::copy(reader, &mut buffer_writer).await?; - /// io::copy(buffer_reader, &mut writer).await?; - /// } - /// - /// assert_eq!(buffer.into_inner(), [1, 2, 3, 4, 5, 6, 7, 8]); - /// assert_eq!(writer.into_inner(), [5, 6, 7, 8, 0]); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn split(self) -> (ReadHalf, WriteHalf) - where - Self: AsyncWrite + Sized, - { - let (r, w) = split::split(self); - (assert_read(r), assert_write(w)) - } - - /// Creates an AsyncRead adapter which will read at most `limit` bytes - /// from the underlying reader. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncReadExt, Cursor}; - /// - /// let reader = Cursor::new(&b"12345678"[..]); - /// let mut buffer = [0; 5]; - /// - /// let mut take = reader.take(4); - /// let n = take.read(&mut buffer).await?; - /// - /// assert_eq!(n, 4); - /// assert_eq!(&buffer, b"1234\0"); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn take(self, limit: u64) -> Take - where - Self: Sized, - { - assert_read(Take::new(self, limit)) - } - - /// Wraps an [`AsyncRead`] in a compatibility wrapper that allows it to be - /// used as a futures 0.1 / tokio-io 0.1 `AsyncRead`. If the wrapped type - /// implements [`AsyncWrite`] as well, the result will also implement the - /// futures 0.1 / tokio 0.1 `AsyncWrite` trait. - /// - /// Requires the `io-compat` feature to enable. - #[cfg(feature = "io-compat")] - #[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))] - fn compat(self) -> Compat - where - Self: Sized + Unpin, - { - Compat::new(self) - } -} - -impl AsyncReadExt for R {} - -/// An extension trait which adds utility methods to `AsyncWrite` types. -pub trait AsyncWriteExt: AsyncWrite { - /// Creates a future which will entirely flush this `AsyncWrite`. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AllowStdIo, AsyncWriteExt}; - /// use std::io::{BufWriter, Cursor}; - /// - /// let mut output = vec![0u8; 5]; - /// - /// { - /// let writer = Cursor::new(&mut output); - /// let mut buffered = AllowStdIo::new(BufWriter::new(writer)); - /// buffered.write_all(&[1, 2]).await?; - /// buffered.write_all(&[3, 4]).await?; - /// buffered.flush().await?; - /// } - /// - /// assert_eq!(output, [1, 2, 3, 4, 0]); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn flush(&mut self) -> Flush<'_, Self> - where - Self: Unpin, - { - assert_future::, _>(Flush::new(self)) - } - - /// Creates a future which will entirely close this `AsyncWrite`. - fn close(&mut self) -> Close<'_, Self> - where - Self: Unpin, - { - assert_future::, _>(Close::new(self)) - } - - /// Creates a future which will write bytes from `buf` into the object. - /// - /// The returned future will resolve to the number of bytes written once the write - /// operation is completed. - fn write<'a>(&'a mut self, buf: &'a [u8]) -> Write<'a, Self> - where - Self: Unpin, - { - assert_future::, _>(Write::new(self, buf)) - } - - /// Creates a future which will write bytes from `bufs` into the object using vectored - /// IO operations. - /// - /// The returned future will resolve to the number of bytes written once the write - /// operation is completed. - fn write_vectored<'a>(&'a mut self, bufs: &'a [IoSlice<'a>]) -> WriteVectored<'a, Self> - where - Self: Unpin, - { - assert_future::, _>(WriteVectored::new(self, bufs)) - } - - /// Write data into this object. - /// - /// Creates a future that will write the entire contents of the buffer `buf` into - /// this `AsyncWrite`. - /// - /// The returned future will not complete until all the data has been written. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncWriteExt, Cursor}; - /// - /// let mut writer = Cursor::new(vec![0u8; 5]); - /// - /// writer.write_all(&[1, 2, 3, 4]).await?; - /// - /// assert_eq!(writer.into_inner(), [1, 2, 3, 4, 0]); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn write_all<'a>(&'a mut self, buf: &'a [u8]) -> WriteAll<'a, Self> - where - Self: Unpin, - { - assert_future::, _>(WriteAll::new(self, buf)) - } - - /// Attempts to write multiple buffers into this writer. - /// - /// Creates a future that will write the entire contents of `bufs` into this - /// `AsyncWrite` using [vectored writes]. - /// - /// The returned future will not complete until all the data has been - /// written. - /// - /// [vectored writes]: std::io::Write::write_vectored - /// - /// # Notes - /// - /// Unlike `io::Write::write_vectored`, this takes a *mutable* reference to - /// a slice of `IoSlice`s, not an immutable one. That's because we need to - /// modify the slice to keep track of the bytes already written. - /// - /// Once this futures returns, the contents of `bufs` are unspecified, as - /// this depends on how many calls to `write_vectored` were necessary. It is - /// best to understand this function as taking ownership of `bufs` and to - /// not use `bufs` afterwards. The underlying buffers, to which the - /// `IoSlice`s point (but not the `IoSlice`s themselves), are unchanged and - /// can be reused. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::AsyncWriteExt; - /// use futures_util::io::Cursor; - /// use std::io::IoSlice; - /// - /// let mut writer = Cursor::new(Vec::new()); - /// let bufs = &mut [ - /// IoSlice::new(&[1]), - /// IoSlice::new(&[2, 3]), - /// IoSlice::new(&[4, 5, 6]), - /// ]; - /// - /// writer.write_all_vectored(bufs).await?; - /// // Note: the contents of `bufs` is now unspecified, see the Notes section. - /// - /// assert_eq!(writer.into_inner(), &[1, 2, 3, 4, 5, 6]); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - #[cfg(feature = "write-all-vectored")] - fn write_all_vectored<'a>( - &'a mut self, - bufs: &'a mut [IoSlice<'a>], - ) -> WriteAllVectored<'a, Self> - where - Self: Unpin, - { - assert_future::, _>(WriteAllVectored::new(self, bufs)) - } - - /// Wraps an [`AsyncWrite`] in a compatibility wrapper that allows it to be - /// used as a futures 0.1 / tokio-io 0.1 `AsyncWrite`. - /// Requires the `io-compat` feature to enable. - #[cfg(feature = "io-compat")] - #[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))] - fn compat_write(self) -> Compat - where - Self: Sized + Unpin, - { - Compat::new(self) - } - - /// Allow using an [`AsyncWrite`] as a [`Sink`](futures_sink::Sink)`>`. - /// - /// This adapter produces a sink that will write each value passed to it - /// into the underlying writer. - /// - /// Note that this function consumes the given writer, returning a wrapped - /// version. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::AsyncWriteExt; - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(vec![Ok([1, 2, 3]), Ok([4, 5, 6])]); - /// - /// let mut writer = vec![]; - /// - /// stream.forward((&mut writer).into_sink()).await?; - /// - /// assert_eq!(writer, vec![1, 2, 3, 4, 5, 6]); - /// # Ok::<(), Box>(()) - /// # })?; - /// # Ok::<(), Box>(()) - /// ``` - #[cfg(feature = "sink")] - #[cfg_attr(docsrs, doc(cfg(feature = "sink")))] - fn into_sink>(self) -> IntoSink - where - Self: Sized, - { - crate::sink::assert_sink::(IntoSink::new(self)) - } -} - -impl AsyncWriteExt for W {} - -/// An extension trait which adds utility methods to `AsyncSeek` types. -pub trait AsyncSeekExt: AsyncSeek { - /// Creates a future which will seek an IO object, and then yield the - /// new position in the object and the object itself. - /// - /// In the case of an error the buffer and the object will be discarded, with - /// the error yielded. - fn seek(&mut self, pos: SeekFrom) -> Seek<'_, Self> - where - Self: Unpin, - { - assert_future::, _>(Seek::new(self, pos)) - } - - /// Creates a future which will return the current seek position from the - /// start of the stream. - /// - /// This is equivalent to `self.seek(SeekFrom::Current(0))`. - fn stream_position(&mut self) -> Seek<'_, Self> - where - Self: Unpin, - { - self.seek(SeekFrom::Current(0)) - } -} - -impl AsyncSeekExt for S {} - -/// An extension trait which adds utility methods to `AsyncBufRead` types. -pub trait AsyncBufReadExt: AsyncBufRead { - /// Creates a future which will wait for a non-empty buffer to be available from this I/O - /// object or EOF to be reached. - /// - /// This method is the async equivalent to [`BufRead::fill_buf`](std::io::BufRead::fill_buf). - /// - /// ```rust - /// # futures::executor::block_on(async { - /// use futures::{io::AsyncBufReadExt as _, stream::{iter, TryStreamExt as _}}; - /// - /// let mut stream = iter(vec![Ok(vec![1, 2, 3]), Ok(vec![4, 5, 6])]).into_async_read(); - /// - /// assert_eq!(stream.fill_buf().await?, vec![1, 2, 3]); - /// stream.consume_unpin(2); - /// - /// assert_eq!(stream.fill_buf().await?, vec![3]); - /// stream.consume_unpin(1); - /// - /// assert_eq!(stream.fill_buf().await?, vec![4, 5, 6]); - /// stream.consume_unpin(3); - /// - /// assert_eq!(stream.fill_buf().await?, vec![]); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn fill_buf(&mut self) -> FillBuf<'_, Self> - where - Self: Unpin, - { - assert_future::, _>(FillBuf::new(self)) - } - - /// A convenience for calling [`AsyncBufRead::consume`] on [`Unpin`] IO types. - /// - /// ```rust - /// # futures::executor::block_on(async { - /// use futures::{io::AsyncBufReadExt as _, stream::{iter, TryStreamExt as _}}; - /// - /// let mut stream = iter(vec![Ok(vec![1, 2, 3])]).into_async_read(); - /// - /// assert_eq!(stream.fill_buf().await?, vec![1, 2, 3]); - /// stream.consume_unpin(2); - /// - /// assert_eq!(stream.fill_buf().await?, vec![3]); - /// stream.consume_unpin(1); - /// - /// assert_eq!(stream.fill_buf().await?, vec![]); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn consume_unpin(&mut self, amt: usize) - where - Self: Unpin, - { - Pin::new(self).consume(amt) - } - - /// Creates a future which will read all the bytes associated with this I/O - /// object into `buf` until the delimiter `byte` or EOF is reached. - /// This method is the async equivalent to [`BufRead::read_until`](std::io::BufRead::read_until). - /// - /// This function will read bytes from the underlying stream until the - /// delimiter or EOF is found. Once found, all bytes up to, and including, - /// the delimiter (if found) will be appended to `buf`. - /// - /// The returned future will resolve to the number of bytes read once the read - /// operation is completed. - /// - /// In the case of an error the buffer and the object will be discarded, with - /// the error yielded. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncBufReadExt, Cursor}; - /// - /// let mut cursor = Cursor::new(b"lorem-ipsum"); - /// let mut buf = vec![]; - /// - /// // cursor is at 'l' - /// let num_bytes = cursor.read_until(b'-', &mut buf).await?; - /// assert_eq!(num_bytes, 6); - /// assert_eq!(buf, b"lorem-"); - /// buf.clear(); - /// - /// // cursor is at 'i' - /// let num_bytes = cursor.read_until(b'-', &mut buf).await?; - /// assert_eq!(num_bytes, 5); - /// assert_eq!(buf, b"ipsum"); - /// buf.clear(); - /// - /// // cursor is at EOF - /// let num_bytes = cursor.read_until(b'-', &mut buf).await?; - /// assert_eq!(num_bytes, 0); - /// assert_eq!(buf, b""); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn read_until<'a>(&'a mut self, byte: u8, buf: &'a mut Vec) -> ReadUntil<'a, Self> - where - Self: Unpin, - { - assert_future::, _>(ReadUntil::new(self, byte, buf)) - } - - /// Creates a future which will read all the bytes associated with this I/O - /// object into `buf` until a newline (the 0xA byte) or EOF is reached, - /// This method is the async equivalent to [`BufRead::read_line`](std::io::BufRead::read_line). - /// - /// This function will read bytes from the underlying stream until the - /// newline delimiter (the 0xA byte) or EOF is found. Once found, all bytes - /// up to, and including, the delimiter (if found) will be appended to - /// `buf`. - /// - /// The returned future will resolve to the number of bytes read once the read - /// operation is completed. - /// - /// In the case of an error the buffer and the object will be discarded, with - /// the error yielded. - /// - /// # Errors - /// - /// This function has the same error semantics as [`read_until`] and will - /// also return an error if the read bytes are not valid UTF-8. If an I/O - /// error is encountered then `buf` may contain some bytes already read in - /// the event that all data read so far was valid UTF-8. - /// - /// [`read_until`]: AsyncBufReadExt::read_until - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncBufReadExt, Cursor}; - /// - /// let mut cursor = Cursor::new(b"foo\nbar"); - /// let mut buf = String::new(); - /// - /// // cursor is at 'f' - /// let num_bytes = cursor.read_line(&mut buf).await?; - /// assert_eq!(num_bytes, 4); - /// assert_eq!(buf, "foo\n"); - /// buf.clear(); - /// - /// // cursor is at 'b' - /// let num_bytes = cursor.read_line(&mut buf).await?; - /// assert_eq!(num_bytes, 3); - /// assert_eq!(buf, "bar"); - /// buf.clear(); - /// - /// // cursor is at EOF - /// let num_bytes = cursor.read_line(&mut buf).await?; - /// assert_eq!(num_bytes, 0); - /// assert_eq!(buf, ""); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn read_line<'a>(&'a mut self, buf: &'a mut String) -> ReadLine<'a, Self> - where - Self: Unpin, - { - assert_future::, _>(ReadLine::new(self, buf)) - } - - /// Returns a stream over the lines of this reader. - /// This method is the async equivalent to [`BufRead::lines`](std::io::BufRead::lines). - /// - /// The stream returned from this function will yield instances of - /// [`io::Result`]`<`[`String`]`>`. Each string returned will *not* have a newline - /// byte (the 0xA byte) or CRLF (0xD, 0xA bytes) at the end. - /// - /// [`io::Result`]: std::io::Result - /// [`String`]: String - /// - /// # Errors - /// - /// Each line of the stream has the same error semantics as [`AsyncBufReadExt::read_line`]. - /// - /// [`AsyncBufReadExt::read_line`]: AsyncBufReadExt::read_line - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncBufReadExt, Cursor}; - /// use futures::stream::StreamExt; - /// - /// let cursor = Cursor::new(b"lorem\nipsum\xc2\r\ndolor"); - /// - /// let mut lines_stream = cursor.lines().map(|l| l.unwrap_or(String::from("invalid UTF_8"))); - /// assert_eq!(lines_stream.next().await, Some(String::from("lorem"))); - /// assert_eq!(lines_stream.next().await, Some(String::from("invalid UTF_8"))); - /// assert_eq!(lines_stream.next().await, Some(String::from("dolor"))); - /// assert_eq!(lines_stream.next().await, None); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - fn lines(self) -> Lines - where - Self: Sized, - { - assert_stream::, _>(Lines::new(self)) - } -} - -impl AsyncBufReadExt for R {} - -// Just a helper function to ensure the reader we're returning all have the -// right implementations. -pub(crate) fn assert_read(reader: R) -> R -where - R: AsyncRead, -{ - reader -} -// Just a helper function to ensure the writer we're returning all have the -// right implementations. -pub(crate) fn assert_write(writer: W) -> W -where - W: AsyncWrite, -{ - writer -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_exact.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_exact.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_exact.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_exact.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,42 +0,0 @@ -use crate::io::AsyncRead; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use std::io; -use std::mem; -use std::pin::Pin; - -/// Future for the [`read_exact`](super::AsyncReadExt::read_exact) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct ReadExact<'a, R: ?Sized> { - reader: &'a mut R, - buf: &'a mut [u8], -} - -impl Unpin for ReadExact<'_, R> {} - -impl<'a, R: AsyncRead + ?Sized + Unpin> ReadExact<'a, R> { - pub(super) fn new(reader: &'a mut R, buf: &'a mut [u8]) -> Self { - Self { reader, buf } - } -} - -impl Future for ReadExact<'_, R> { - type Output = io::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - while !this.buf.is_empty() { - let n = ready!(Pin::new(&mut this.reader).poll_read(cx, this.buf))?; - { - let (_, rest) = mem::take(&mut this.buf).split_at_mut(n); - this.buf = rest; - } - if n == 0 { - return Poll::Ready(Err(io::ErrorKind::UnexpectedEof.into())); - } - } - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_line.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_line.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_line.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_line.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,58 +0,0 @@ -use super::read_until::read_until_internal; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncBufRead; -use std::io; -use std::mem; -use std::pin::Pin; -use std::str; - -/// Future for the [`read_line`](super::AsyncBufReadExt::read_line) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct ReadLine<'a, R: ?Sized> { - reader: &'a mut R, - buf: &'a mut String, - bytes: Vec, - read: usize, -} - -impl Unpin for ReadLine<'_, R> {} - -impl<'a, R: AsyncBufRead + ?Sized + Unpin> ReadLine<'a, R> { - pub(super) fn new(reader: &'a mut R, buf: &'a mut String) -> Self { - Self { reader, bytes: mem::take(buf).into_bytes(), buf, read: 0 } - } -} - -pub(super) fn read_line_internal( - reader: Pin<&mut R>, - cx: &mut Context<'_>, - buf: &mut String, - bytes: &mut Vec, - read: &mut usize, -) -> Poll> { - let ret = ready!(read_until_internal(reader, cx, b'\n', bytes, read)); - if str::from_utf8(bytes).is_err() { - bytes.clear(); - Poll::Ready(ret.and_then(|_| { - Err(io::Error::new(io::ErrorKind::InvalidData, "stream did not contain valid UTF-8")) - })) - } else { - debug_assert!(buf.is_empty()); - debug_assert_eq!(*read, 0); - // Safety: `bytes` is a valid UTF-8 because `str::from_utf8` returned `Ok`. - mem::swap(unsafe { buf.as_mut_vec() }, bytes); - Poll::Ready(ret) - } -} - -impl Future for ReadLine<'_, R> { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let Self { reader, buf, bytes, read } = &mut *self; - read_line_internal(Pin::new(reader), cx, buf, bytes, read) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/read.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/read.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/read.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/read.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ -use crate::io::AsyncRead; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use std::io; -use std::pin::Pin; - -/// Future for the [`read`](super::AsyncReadExt::read) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Read<'a, R: ?Sized> { - reader: &'a mut R, - buf: &'a mut [u8], -} - -impl Unpin for Read<'_, R> {} - -impl<'a, R: AsyncRead + ?Sized + Unpin> Read<'a, R> { - pub(super) fn new(reader: &'a mut R, buf: &'a mut [u8]) -> Self { - Self { reader, buf } - } -} - -impl Future for Read<'_, R> { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - Pin::new(&mut this.reader).poll_read(cx, this.buf) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_to_end.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_to_end.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_to_end.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_to_end.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,91 +0,0 @@ -use futures_core::future::Future; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncRead; -use std::io; -use std::pin::Pin; -use std::vec::Vec; - -/// Future for the [`read_to_end`](super::AsyncReadExt::read_to_end) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct ReadToEnd<'a, R: ?Sized> { - reader: &'a mut R, - buf: &'a mut Vec, - start_len: usize, -} - -impl Unpin for ReadToEnd<'_, R> {} - -impl<'a, R: AsyncRead + ?Sized + Unpin> ReadToEnd<'a, R> { - pub(super) fn new(reader: &'a mut R, buf: &'a mut Vec) -> Self { - let start_len = buf.len(); - Self { reader, buf, start_len } - } -} - -struct Guard<'a> { - buf: &'a mut Vec, - len: usize, -} - -impl Drop for Guard<'_> { - fn drop(&mut self) { - unsafe { - self.buf.set_len(self.len); - } - } -} - -// This uses an adaptive system to extend the vector when it fills. We want to -// avoid paying to allocate and zero a huge chunk of memory if the reader only -// has 4 bytes while still making large reads if the reader does have a ton -// of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every -// time is 4,500 times (!) slower than this if the reader has a very small -// amount of data to return. -// -// Because we're extending the buffer with uninitialized data for trusted -// readers, we need to make sure to truncate that if any of this panics. -pub(super) fn read_to_end_internal( - mut rd: Pin<&mut R>, - cx: &mut Context<'_>, - buf: &mut Vec, - start_len: usize, -) -> Poll> { - let mut g = Guard { len: buf.len(), buf }; - loop { - if g.len == g.buf.len() { - unsafe { - g.buf.reserve(32); - let capacity = g.buf.capacity(); - g.buf.set_len(capacity); - super::initialize(&rd, &mut g.buf[g.len..]); - } - } - - let buf = &mut g.buf[g.len..]; - match ready!(rd.as_mut().poll_read(cx, buf)) { - Ok(0) => return Poll::Ready(Ok(g.len - start_len)), - Ok(n) => { - // We can't allow bogus values from read. If it is too large, the returned vec could have its length - // set past its capacity, or if it overflows the vec could be shortened which could create an invalid - // string if this is called via read_to_string. - assert!(n <= buf.len()); - g.len += n; - } - Err(e) => return Poll::Ready(Err(e)), - } - } -} - -impl Future for ReadToEnd<'_, A> -where - A: AsyncRead + ?Sized + Unpin, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - read_to_end_internal(Pin::new(&mut this.reader), cx, this.buf, this.start_len) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_to_string.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_to_string.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_to_string.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_to_string.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,59 +0,0 @@ -use super::read_to_end::read_to_end_internal; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncRead; -use std::pin::Pin; -use std::vec::Vec; -use std::{io, mem, str}; - -/// Future for the [`read_to_string`](super::AsyncReadExt::read_to_string) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct ReadToString<'a, R: ?Sized> { - reader: &'a mut R, - buf: &'a mut String, - bytes: Vec, - start_len: usize, -} - -impl Unpin for ReadToString<'_, R> {} - -impl<'a, R: AsyncRead + ?Sized + Unpin> ReadToString<'a, R> { - pub(super) fn new(reader: &'a mut R, buf: &'a mut String) -> Self { - let start_len = buf.len(); - Self { reader, bytes: mem::take(buf).into_bytes(), buf, start_len } - } -} - -fn read_to_string_internal( - reader: Pin<&mut R>, - cx: &mut Context<'_>, - buf: &mut String, - bytes: &mut Vec, - start_len: usize, -) -> Poll> { - let ret = ready!(read_to_end_internal(reader, cx, bytes, start_len)); - if str::from_utf8(bytes).is_err() { - Poll::Ready(ret.and_then(|_| { - Err(io::Error::new(io::ErrorKind::InvalidData, "stream did not contain valid UTF-8")) - })) - } else { - debug_assert!(buf.is_empty()); - // Safety: `bytes` is a valid UTF-8 because `str::from_utf8` returned `Ok`. - mem::swap(unsafe { buf.as_mut_vec() }, bytes); - Poll::Ready(ret) - } -} - -impl Future for ReadToString<'_, A> -where - A: AsyncRead + ?Sized + Unpin, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let Self { reader, buf, bytes, start_len } = &mut *self; - read_to_string_internal(Pin::new(reader), cx, buf, bytes, *start_len) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_until.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_until.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_until.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_until.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -use futures_core::future::Future; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncBufRead; -use std::io; -use std::mem; -use std::pin::Pin; - -/// Future for the [`read_until`](super::AsyncBufReadExt::read_until) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct ReadUntil<'a, R: ?Sized> { - reader: &'a mut R, - byte: u8, - buf: &'a mut Vec, - read: usize, -} - -impl Unpin for ReadUntil<'_, R> {} - -impl<'a, R: AsyncBufRead + ?Sized + Unpin> ReadUntil<'a, R> { - pub(super) fn new(reader: &'a mut R, byte: u8, buf: &'a mut Vec) -> Self { - Self { reader, byte, buf, read: 0 } - } -} - -pub(super) fn read_until_internal( - mut reader: Pin<&mut R>, - cx: &mut Context<'_>, - byte: u8, - buf: &mut Vec, - read: &mut usize, -) -> Poll> { - loop { - let (done, used) = { - let available = ready!(reader.as_mut().poll_fill_buf(cx))?; - if let Some(i) = memchr::memchr(byte, available) { - buf.extend_from_slice(&available[..=i]); - (true, i + 1) - } else { - buf.extend_from_slice(available); - (false, available.len()) - } - }; - reader.as_mut().consume(used); - *read += used; - if done || used == 0 { - return Poll::Ready(Ok(mem::replace(read, 0))); - } - } -} - -impl Future for ReadUntil<'_, R> { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let Self { reader, byte, buf, read } = &mut *self; - read_until_internal(Pin::new(reader), cx, *byte, buf, read) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_vectored.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_vectored.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/read_vectored.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/read_vectored.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ -use crate::io::AsyncRead; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use std::io::{self, IoSliceMut}; -use std::pin::Pin; - -/// Future for the [`read_vectored`](super::AsyncReadExt::read_vectored) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct ReadVectored<'a, R: ?Sized> { - reader: &'a mut R, - bufs: &'a mut [IoSliceMut<'a>], -} - -impl Unpin for ReadVectored<'_, R> {} - -impl<'a, R: AsyncRead + ?Sized + Unpin> ReadVectored<'a, R> { - pub(super) fn new(reader: &'a mut R, bufs: &'a mut [IoSliceMut<'a>]) -> Self { - Self { reader, bufs } - } -} - -impl Future for ReadVectored<'_, R> { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - Pin::new(&mut this.reader).poll_read_vectored(cx, this.bufs) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/repeat.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/repeat.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/repeat.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/repeat.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,66 +0,0 @@ -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncRead, IoSliceMut}; -use std::fmt; -use std::io; -use std::pin::Pin; - -/// Reader for the [`repeat()`] function. -#[must_use = "readers do nothing unless polled"] -pub struct Repeat { - byte: u8, -} - -/// Creates an instance of a reader that infinitely repeats one byte. -/// -/// All reads from this reader will succeed by filling the specified buffer with -/// the given byte. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::io::{self, AsyncReadExt}; -/// -/// let mut buffer = [0; 3]; -/// let mut reader = io::repeat(0b101); -/// reader.read_exact(&mut buffer).await.unwrap(); -/// assert_eq!(buffer, [0b101, 0b101, 0b101]); -/// # Ok::<(), Box>(()) }).unwrap(); -/// ``` -pub fn repeat(byte: u8) -> Repeat { - Repeat { byte } -} - -impl AsyncRead for Repeat { - #[inline] - fn poll_read( - self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - for slot in &mut *buf { - *slot = self.byte; - } - Poll::Ready(Ok(buf.len())) - } - - #[inline] - fn poll_read_vectored( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &mut [IoSliceMut<'_>], - ) -> Poll> { - let mut nwritten = 0; - for buf in bufs { - nwritten += ready!(self.as_mut().poll_read(cx, buf))?; - } - Poll::Ready(Ok(nwritten)) - } -} - -impl fmt::Debug for Repeat { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Repeat { .. }") - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/seek.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/seek.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/seek.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/seek.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ -use crate::io::{AsyncSeek, SeekFrom}; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use std::io; -use std::pin::Pin; - -/// Future for the [`seek`](crate::io::AsyncSeekExt::seek) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Seek<'a, S: ?Sized> { - seek: &'a mut S, - pos: SeekFrom, -} - -impl Unpin for Seek<'_, S> {} - -impl<'a, S: AsyncSeek + ?Sized + Unpin> Seek<'a, S> { - pub(super) fn new(seek: &'a mut S, pos: SeekFrom) -> Self { - Self { seek, pos } - } -} - -impl Future for Seek<'_, S> { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - Pin::new(&mut this.seek).poll_seek(cx, this.pos) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/sink.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/sink.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/sink.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/sink.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,67 +0,0 @@ -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncWrite, IoSlice}; -use std::fmt; -use std::io; -use std::pin::Pin; - -/// Writer for the [`sink()`] function. -#[must_use = "writers do nothing unless polled"] -pub struct Sink { - _priv: (), -} - -/// Creates an instance of a writer which will successfully consume all data. -/// -/// All calls to `poll_write` on the returned instance will return `Poll::Ready(Ok(buf.len()))` -/// and the contents of the buffer will not be inspected. -/// -/// # Examples -/// -/// ```rust -/// # futures::executor::block_on(async { -/// use futures::io::{self, AsyncWriteExt}; -/// -/// let buffer = vec![1, 2, 3, 5, 8]; -/// let mut writer = io::sink(); -/// let num_bytes = writer.write(&buffer).await?; -/// assert_eq!(num_bytes, 5); -/// # Ok::<(), Box>(()) }).unwrap(); -/// ``` -pub fn sink() -> Sink { - Sink { _priv: () } -} - -impl AsyncWrite for Sink { - #[inline] - fn poll_write( - self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(Ok(buf.len())) - } - - #[inline] - fn poll_write_vectored( - self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Poll::Ready(Ok(bufs.iter().map(|b| b.len()).sum())) - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - #[inline] - fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -impl fmt::Debug for Sink { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Sink { .. }") - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/split.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/split.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/split.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/split.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,115 +0,0 @@ -use crate::lock::BiLock; -use core::fmt; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncRead, AsyncWrite, IoSlice, IoSliceMut}; -use std::io; -use std::pin::Pin; - -/// The readable half of an object returned from `AsyncRead::split`. -#[derive(Debug)] -pub struct ReadHalf { - handle: BiLock, -} - -/// The writable half of an object returned from `AsyncRead::split`. -#[derive(Debug)] -pub struct WriteHalf { - handle: BiLock, -} - -fn lock_and_then(lock: &BiLock, cx: &mut Context<'_>, f: F) -> Poll> -where - F: FnOnce(Pin<&mut T>, &mut Context<'_>) -> Poll>, -{ - let mut l = ready!(lock.poll_lock(cx)); - f(l.as_pin_mut(), cx) -} - -pub(super) fn split(t: T) -> (ReadHalf, WriteHalf) { - let (a, b) = BiLock::new(t); - (ReadHalf { handle: a }, WriteHalf { handle: b }) -} - -impl ReadHalf { - /// Attempts to put the two "halves" of a split `AsyncRead + AsyncWrite` back - /// together. Succeeds only if the `ReadHalf` and `WriteHalf` are - /// a matching pair originating from the same call to `AsyncReadExt::split`. - pub fn reunite(self, other: WriteHalf) -> Result> { - self.handle - .reunite(other.handle) - .map_err(|err| ReuniteError(ReadHalf { handle: err.0 }, WriteHalf { handle: err.1 })) - } -} - -impl WriteHalf { - /// Attempts to put the two "halves" of a split `AsyncRead + AsyncWrite` back - /// together. Succeeds only if the `ReadHalf` and `WriteHalf` are - /// a matching pair originating from the same call to `AsyncReadExt::split`. - pub fn reunite(self, other: ReadHalf) -> Result> { - other.reunite(self) - } -} - -impl AsyncRead for ReadHalf { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - lock_and_then(&self.handle, cx, |l, cx| l.poll_read(cx, buf)) - } - - fn poll_read_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &mut [IoSliceMut<'_>], - ) -> Poll> { - lock_and_then(&self.handle, cx, |l, cx| l.poll_read_vectored(cx, bufs)) - } -} - -impl AsyncWrite for WriteHalf { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - lock_and_then(&self.handle, cx, |l, cx| l.poll_write(cx, buf)) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - lock_and_then(&self.handle, cx, |l, cx| l.poll_write_vectored(cx, bufs)) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - lock_and_then(&self.handle, cx, |l, cx| l.poll_flush(cx)) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - lock_and_then(&self.handle, cx, |l, cx| l.poll_close(cx)) - } -} - -/// Error indicating a `ReadHalf` and `WriteHalf` were not two halves -/// of a `AsyncRead + AsyncWrite`, and thus could not be `reunite`d. -pub struct ReuniteError(pub ReadHalf, pub WriteHalf); - -impl fmt::Debug for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("ReuniteError").field(&"...").finish() - } -} - -impl fmt::Display for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "tried to reunite a ReadHalf and WriteHalf that don't form a pair") - } -} - -#[cfg(feature = "std")] -impl std::error::Error for ReuniteError {} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/take.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/take.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/take.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/take.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,125 +0,0 @@ -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncBufRead, AsyncRead}; -use pin_project_lite::pin_project; -use std::pin::Pin; -use std::{cmp, io}; - -pin_project! { - /// Reader for the [`take`](super::AsyncReadExt::take) method. - #[derive(Debug)] - #[must_use = "readers do nothing unless you `.await` or poll them"] - pub struct Take { - #[pin] - inner: R, - limit: u64, - } -} - -impl Take { - pub(super) fn new(inner: R, limit: u64) -> Self { - Self { inner, limit } - } - - /// Returns the remaining number of bytes that can be - /// read before this instance will return EOF. - /// - /// # Note - /// - /// This instance may reach `EOF` after reading fewer bytes than indicated by - /// this method if the underlying [`AsyncRead`] instance reaches EOF. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncReadExt, Cursor}; - /// - /// let reader = Cursor::new(&b"12345678"[..]); - /// let mut buffer = [0; 2]; - /// - /// let mut take = reader.take(4); - /// let n = take.read(&mut buffer).await?; - /// - /// assert_eq!(take.limit(), 2); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - pub fn limit(&self) -> u64 { - self.limit - } - - /// Sets the number of bytes that can be read before this instance will - /// return EOF. This is the same as constructing a new `Take` instance, so - /// the amount of bytes read and the previous limit value don't matter when - /// calling this method. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::io::{AsyncReadExt, Cursor}; - /// - /// let reader = Cursor::new(&b"12345678"[..]); - /// let mut buffer = [0; 4]; - /// - /// let mut take = reader.take(4); - /// let n = take.read(&mut buffer).await?; - /// - /// assert_eq!(n, 4); - /// assert_eq!(take.limit(), 0); - /// - /// take.set_limit(10); - /// let n = take.read(&mut buffer).await?; - /// assert_eq!(n, 4); - /// - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - pub fn set_limit(&mut self, limit: u64) { - self.limit = limit - } - - delegate_access_inner!(inner, R, ()); -} - -impl AsyncRead for Take { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - let this = self.project(); - - if *this.limit == 0 { - return Poll::Ready(Ok(0)); - } - - let max = cmp::min(buf.len() as u64, *this.limit) as usize; - let n = ready!(this.inner.poll_read(cx, &mut buf[..max]))?; - *this.limit -= n as u64; - Poll::Ready(Ok(n)) - } -} - -impl AsyncBufRead for Take { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - - // Don't call into inner reader at all at EOF because it may still block - if *this.limit == 0 { - return Poll::Ready(Ok(&[])); - } - - let buf = ready!(this.inner.poll_fill_buf(cx)?); - let cap = cmp::min(buf.len() as u64, *this.limit) as usize; - Poll::Ready(Ok(&buf[..cap])) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - let this = self.project(); - - // Don't let callers reset the limit by passing an overlarge value - let amt = cmp::min(amt as u64, *this.limit) as usize; - *this.limit -= amt as u64; - this.inner.consume(amt); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/window.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/window.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/window.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/window.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,104 +0,0 @@ -use std::ops::{Bound, Range, RangeBounds}; - -/// An owned window around an underlying buffer. -/// -/// Normally slices work great for considering sub-portions of a buffer, but -/// unfortunately a slice is a *borrowed* type in Rust which has an associated -/// lifetime. When working with future and async I/O these lifetimes are not -/// always appropriate, and are sometimes difficult to store in tasks. This -/// type strives to fill this gap by providing an "owned slice" around an -/// underlying buffer of bytes. -/// -/// A `Window` wraps an underlying buffer, `T`, and has configurable -/// start/end indexes to alter the behavior of the `AsRef<[u8]>` implementation -/// that this type carries. -/// -/// This type can be particularly useful when working with the `write_all` -/// combinator in this crate. Data can be sliced via `Window`, consumed by -/// `write_all`, and then earned back once the write operation finishes through -/// the `into_inner` method on this type. -#[derive(Debug)] -pub struct Window { - inner: T, - range: Range, -} - -impl> Window { - /// Creates a new window around the buffer `t` defaulting to the entire - /// slice. - /// - /// Further methods can be called on the returned `Window` to alter the - /// window into the data provided. - pub fn new(t: T) -> Self { - Self { range: 0..t.as_ref().len(), inner: t } - } - - /// Gets a shared reference to the underlying buffer inside of this - /// `Window`. - pub fn get_ref(&self) -> &T { - &self.inner - } - - /// Gets a mutable reference to the underlying buffer inside of this - /// `Window`. - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } - - /// Consumes this `Window`, returning the underlying buffer. - pub fn into_inner(self) -> T { - self.inner - } - - /// Returns the starting index of this window into the underlying buffer - /// `T`. - pub fn start(&self) -> usize { - self.range.start - } - - /// Returns the end index of this window into the underlying buffer - /// `T`. - pub fn end(&self) -> usize { - self.range.end - } - - /// Changes the range of this window to the range specified. - /// - /// # Panics - /// - /// This method will panic if `range` is out of bounds for the underlying - /// slice or if [`start_bound()`] of `range` comes after the [`end_bound()`]. - /// - /// [`start_bound()`]: std::ops::RangeBounds::start_bound - /// [`end_bound()`]: std::ops::RangeBounds::end_bound - pub fn set>(&mut self, range: R) { - let start = match range.start_bound() { - Bound::Included(n) => *n, - Bound::Excluded(n) => *n + 1, - Bound::Unbounded => 0, - }; - let end = match range.end_bound() { - Bound::Included(n) => *n + 1, - Bound::Excluded(n) => *n, - Bound::Unbounded => self.inner.as_ref().len(), - }; - - assert!(end <= self.inner.as_ref().len()); - assert!(start <= end); - - self.range.start = start; - self.range.end = end; - } -} - -impl> AsRef<[u8]> for Window { - fn as_ref(&self) -> &[u8] { - &self.inner.as_ref()[self.range.start..self.range.end] - } -} - -impl> AsMut<[u8]> for Window { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.inner.as_mut()[self.range.start..self.range.end] - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/write_all.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/write_all.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/write_all.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/write_all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,43 +0,0 @@ -use futures_core::future::Future; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncWrite; -use std::io; -use std::mem; -use std::pin::Pin; - -/// Future for the [`write_all`](super::AsyncWriteExt::write_all) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct WriteAll<'a, W: ?Sized> { - writer: &'a mut W, - buf: &'a [u8], -} - -impl Unpin for WriteAll<'_, W> {} - -impl<'a, W: AsyncWrite + ?Sized + Unpin> WriteAll<'a, W> { - pub(super) fn new(writer: &'a mut W, buf: &'a [u8]) -> Self { - Self { writer, buf } - } -} - -impl Future for WriteAll<'_, W> { - type Output = io::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = &mut *self; - while !this.buf.is_empty() { - let n = ready!(Pin::new(&mut this.writer).poll_write(cx, this.buf))?; - { - let (_, rest) = mem::take(&mut this.buf).split_at(n); - this.buf = rest; - } - if n == 0 { - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - } - - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/write_all_vectored.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/write_all_vectored.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/write_all_vectored.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/write_all_vectored.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,193 +0,0 @@ -use futures_core::future::Future; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_io::AsyncWrite; -use futures_io::IoSlice; -use std::io; -use std::pin::Pin; - -/// Future for the -/// [`write_all_vectored`](super::AsyncWriteExt::write_all_vectored) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct WriteAllVectored<'a, W: ?Sized + Unpin> { - writer: &'a mut W, - bufs: &'a mut [IoSlice<'a>], -} - -impl Unpin for WriteAllVectored<'_, W> {} - -impl<'a, W: AsyncWrite + ?Sized + Unpin> WriteAllVectored<'a, W> { - pub(super) fn new(writer: &'a mut W, mut bufs: &'a mut [IoSlice<'a>]) -> Self { - IoSlice::advance_slices(&mut bufs, 0); - Self { writer, bufs } - } -} - -impl Future for WriteAllVectored<'_, W> { - type Output = io::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = &mut *self; - while !this.bufs.is_empty() { - let n = ready!(Pin::new(&mut this.writer).poll_write_vectored(cx, this.bufs))?; - if n == 0 { - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } else { - IoSlice::advance_slices(&mut this.bufs, n); - } - } - - Poll::Ready(Ok(())) - } -} - -#[cfg(test)] -mod tests { - use std::cmp::min; - use std::future::Future; - use std::io; - use std::pin::Pin; - use std::task::{Context, Poll}; - - use crate::io::{AsyncWrite, AsyncWriteExt, IoSlice}; - use crate::task::noop_waker; - - /// Create a new writer that reads from at most `n_bufs` and reads - /// `per_call` bytes (in total) per call to write. - fn test_writer(n_bufs: usize, per_call: usize) -> TestWriter { - TestWriter { n_bufs, per_call, written: Vec::new() } - } - - // TODO: maybe move this the future-test crate? - struct TestWriter { - n_bufs: usize, - per_call: usize, - written: Vec, - } - - impl AsyncWrite for TestWriter { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.poll_write_vectored(cx, &[IoSlice::new(buf)]) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - let mut left = self.per_call; - let mut written = 0; - for buf in bufs.iter().take(self.n_bufs) { - let n = min(left, buf.len()); - self.written.extend_from_slice(&buf[0..n]); - left -= n; - written += n; - } - Poll::Ready(Ok(written)) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - } - - // TODO: maybe move this the future-test crate? - macro_rules! assert_poll_ok { - ($e:expr, $expected:expr) => { - let expected = $expected; - match $e { - Poll::Ready(Ok(ok)) if ok == expected => {} - got => { - panic!("unexpected result, got: {:?}, wanted: Ready(Ok({:?}))", got, expected) - } - } - }; - } - - #[test] - fn test_writer_read_from_one_buf() { - let waker = noop_waker(); - let mut cx = Context::from_waker(&waker); - - let mut dst = test_writer(1, 2); - let mut dst = Pin::new(&mut dst); - - assert_poll_ok!(dst.as_mut().poll_write(&mut cx, &[]), 0); - assert_poll_ok!(dst.as_mut().poll_write_vectored(&mut cx, &[]), 0); - - // Read at most 2 bytes. - assert_poll_ok!(dst.as_mut().poll_write(&mut cx, &[1, 1, 1]), 2); - let bufs = &[IoSlice::new(&[2, 2, 2])]; - assert_poll_ok!(dst.as_mut().poll_write_vectored(&mut cx, bufs), 2); - - // Only read from first buf. - let bufs = &[IoSlice::new(&[3]), IoSlice::new(&[4, 4])]; - assert_poll_ok!(dst.as_mut().poll_write_vectored(&mut cx, bufs), 1); - - assert_eq!(dst.written, &[1, 1, 2, 2, 3]); - } - - #[test] - fn test_writer_read_from_multiple_bufs() { - let waker = noop_waker(); - let mut cx = Context::from_waker(&waker); - - let mut dst = test_writer(3, 3); - let mut dst = Pin::new(&mut dst); - - // Read at most 3 bytes from two buffers. - let bufs = &[IoSlice::new(&[1]), IoSlice::new(&[2, 2, 2])]; - assert_poll_ok!(dst.as_mut().poll_write_vectored(&mut cx, bufs), 3); - - // Read at most 3 bytes from three buffers. - let bufs = &[IoSlice::new(&[3]), IoSlice::new(&[4]), IoSlice::new(&[5, 5])]; - assert_poll_ok!(dst.as_mut().poll_write_vectored(&mut cx, bufs), 3); - - assert_eq!(dst.written, &[1, 2, 2, 3, 4, 5]); - } - - #[test] - fn test_write_all_vectored() { - let waker = noop_waker(); - let mut cx = Context::from_waker(&waker); - - #[rustfmt::skip] // Becomes unreadable otherwise. - let tests: Vec<(_, &'static [u8])> = vec![ - (vec![], &[]), - (vec![IoSlice::new(&[]), IoSlice::new(&[])], &[]), - (vec![IoSlice::new(&[1])], &[1]), - (vec![IoSlice::new(&[1, 2])], &[1, 2]), - (vec![IoSlice::new(&[1, 2, 3])], &[1, 2, 3]), - (vec![IoSlice::new(&[1, 2, 3, 4])], &[1, 2, 3, 4]), - (vec![IoSlice::new(&[1, 2, 3, 4, 5])], &[1, 2, 3, 4, 5]), - (vec![IoSlice::new(&[1]), IoSlice::new(&[2])], &[1, 2]), - (vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2])], &[1, 1, 2, 2]), - (vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2])], &[1, 1, 1, 2, 2, 2]), - (vec![IoSlice::new(&[1, 1, 1, 1]), IoSlice::new(&[2, 2, 2, 2])], &[1, 1, 1, 1, 2, 2, 2, 2]), - (vec![IoSlice::new(&[1]), IoSlice::new(&[2]), IoSlice::new(&[3])], &[1, 2, 3]), - (vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2]), IoSlice::new(&[3, 3])], &[1, 1, 2, 2, 3, 3]), - (vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2]), IoSlice::new(&[3, 3, 3])], &[1, 1, 1, 2, 2, 2, 3, 3, 3]), - ]; - - for (mut input, wanted) in tests { - let mut dst = test_writer(2, 2); - { - let mut future = dst.write_all_vectored(&mut *input); - match Pin::new(&mut future).poll(&mut cx) { - Poll::Ready(Ok(())) => {} - other => panic!("unexpected result polling future: {:?}", other), - } - } - assert_eq!(&*dst.written, &*wanted); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/write.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/write.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/write.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/write.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ -use crate::io::AsyncWrite; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use std::io; -use std::pin::Pin; - -/// Future for the [`write`](super::AsyncWriteExt::write) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Write<'a, W: ?Sized> { - writer: &'a mut W, - buf: &'a [u8], -} - -impl Unpin for Write<'_, W> {} - -impl<'a, W: AsyncWrite + ?Sized + Unpin> Write<'a, W> { - pub(super) fn new(writer: &'a mut W, buf: &'a [u8]) -> Self { - Self { writer, buf } - } -} - -impl Future for Write<'_, W> { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - Pin::new(&mut this.writer).poll_write(cx, this.buf) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/io/write_vectored.rs s390-tools-2.33.1/rust-vendor/futures-util/src/io/write_vectored.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/io/write_vectored.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/io/write_vectored.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ -use crate::io::AsyncWrite; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use std::io::{self, IoSlice}; -use std::pin::Pin; - -/// Future for the [`write_vectored`](super::AsyncWriteExt::write_vectored) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct WriteVectored<'a, W: ?Sized> { - writer: &'a mut W, - bufs: &'a [IoSlice<'a>], -} - -impl Unpin for WriteVectored<'_, W> {} - -impl<'a, W: AsyncWrite + ?Sized + Unpin> WriteVectored<'a, W> { - pub(super) fn new(writer: &'a mut W, bufs: &'a [IoSlice<'a>]) -> Self { - Self { writer, bufs } - } -} - -impl Future for WriteVectored<'_, W> { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - Pin::new(&mut this.writer).poll_write_vectored(cx, this.bufs) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/lib.rs s390-tools-2.33.1/rust-vendor/futures-util/src/lib.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,337 +0,0 @@ -//! Combinators and utilities for working with `Future`s, `Stream`s, `Sink`s, -//! and the `AsyncRead` and `AsyncWrite` traits. - -#![cfg_attr(feature = "write-all-vectored", feature(io_slice_advance))] -#![cfg_attr(not(feature = "std"), no_std)] -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - single_use_lifetimes, - unreachable_pub -)] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] -#![cfg_attr(docsrs, feature(doc_cfg))] - -#[cfg(all(feature = "bilock", not(feature = "unstable")))] -compile_error!("The `bilock` feature requires the `unstable` feature as an explicit opt-in to unstable features"); - -#[cfg(feature = "alloc")] -extern crate alloc; - -// Macro re-exports -pub use futures_core::ready; -pub use pin_utils::pin_mut; - -#[cfg(feature = "async-await")] -#[macro_use] -mod async_await; -#[cfg(feature = "async-await")] -#[doc(hidden)] -pub use self::async_await::*; - -// Not public API. -#[cfg(feature = "async-await")] -#[doc(hidden)] -pub mod __private { - pub use crate::*; - pub use core::{ - option::Option::{self, None, Some}, - pin::Pin, - result::Result::{Err, Ok}, - }; - - pub mod async_await { - pub use crate::async_await::*; - } -} - -#[cfg(feature = "sink")] -macro_rules! delegate_sink { - ($field:ident, $item:ty) => { - fn poll_ready( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - ) -> core::task::Poll> { - self.project().$field.poll_ready(cx) - } - - fn start_send(self: core::pin::Pin<&mut Self>, item: $item) -> Result<(), Self::Error> { - self.project().$field.start_send(item) - } - - fn poll_flush( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - ) -> core::task::Poll> { - self.project().$field.poll_flush(cx) - } - - fn poll_close( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - ) -> core::task::Poll> { - self.project().$field.poll_close(cx) - } - }; -} - -macro_rules! delegate_future { - ($field:ident) => { - fn poll( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - ) -> core::task::Poll { - self.project().$field.poll(cx) - } - }; -} - -macro_rules! delegate_stream { - ($field:ident) => { - fn poll_next( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - ) -> core::task::Poll> { - self.project().$field.poll_next(cx) - } - fn size_hint(&self) -> (usize, Option) { - self.$field.size_hint() - } - }; -} - -#[cfg(feature = "io")] -#[cfg(feature = "std")] -macro_rules! delegate_async_write { - ($field:ident) => { - fn poll_write( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - buf: &[u8], - ) -> core::task::Poll> { - self.project().$field.poll_write(cx, buf) - } - fn poll_write_vectored( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - bufs: &[std::io::IoSlice<'_>], - ) -> core::task::Poll> { - self.project().$field.poll_write_vectored(cx, bufs) - } - fn poll_flush( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - ) -> core::task::Poll> { - self.project().$field.poll_flush(cx) - } - fn poll_close( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - ) -> core::task::Poll> { - self.project().$field.poll_close(cx) - } - }; -} - -#[cfg(feature = "io")] -#[cfg(feature = "std")] -macro_rules! delegate_async_read { - ($field:ident) => { - fn poll_read( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - buf: &mut [u8], - ) -> core::task::Poll> { - self.project().$field.poll_read(cx, buf) - } - - fn poll_read_vectored( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - bufs: &mut [std::io::IoSliceMut<'_>], - ) -> core::task::Poll> { - self.project().$field.poll_read_vectored(cx, bufs) - } - }; -} - -#[cfg(feature = "io")] -#[cfg(feature = "std")] -macro_rules! delegate_async_buf_read { - ($field:ident) => { - fn poll_fill_buf( - self: core::pin::Pin<&mut Self>, - cx: &mut core::task::Context<'_>, - ) -> core::task::Poll> { - self.project().$field.poll_fill_buf(cx) - } - - fn consume(self: core::pin::Pin<&mut Self>, amt: usize) { - self.project().$field.consume(amt) - } - }; -} - -macro_rules! delegate_access_inner { - ($field:ident, $inner:ty, ($($ind:tt)*)) => { - /// Acquires a reference to the underlying sink or stream that this combinator is - /// pulling from. - pub fn get_ref(&self) -> &$inner { - (&self.$field) $($ind get_ref())* - } - - /// Acquires a mutable reference to the underlying sink or stream that this - /// combinator is pulling from. - /// - /// Note that care must be taken to avoid tampering with the state of the - /// sink or stream which may otherwise confuse this combinator. - pub fn get_mut(&mut self) -> &mut $inner { - (&mut self.$field) $($ind get_mut())* - } - - /// Acquires a pinned mutable reference to the underlying sink or stream that this - /// combinator is pulling from. - /// - /// Note that care must be taken to avoid tampering with the state of the - /// sink or stream which may otherwise confuse this combinator. - pub fn get_pin_mut(self: core::pin::Pin<&mut Self>) -> core::pin::Pin<&mut $inner> { - self.project().$field $($ind get_pin_mut())* - } - - /// Consumes this combinator, returning the underlying sink or stream. - /// - /// Note that this may discard intermediate state of this combinator, so - /// care should be taken to avoid losing resources when this is called. - pub fn into_inner(self) -> $inner { - self.$field $($ind into_inner())* - } - } -} - -macro_rules! delegate_all { - (@trait Future $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => { - impl<$($arg),*> futures_core::future::Future for $name<$($arg),*> where $t: futures_core::future::Future $(, $($bound)*)* { - type Output = <$t as futures_core::future::Future>::Output; - - delegate_future!(inner); - } - }; - (@trait FusedFuture $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => { - impl<$($arg),*> futures_core::future::FusedFuture for $name<$($arg),*> where $t: futures_core::future::FusedFuture $(, $($bound)*)* { - fn is_terminated(&self) -> bool { - self.inner.is_terminated() - } - } - }; - (@trait Stream $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => { - impl<$($arg),*> futures_core::stream::Stream for $name<$($arg),*> where $t: futures_core::stream::Stream $(, $($bound)*)* { - type Item = <$t as futures_core::stream::Stream>::Item; - - delegate_stream!(inner); - } - }; - (@trait FusedStream $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => { - impl<$($arg),*> futures_core::stream::FusedStream for $name<$($arg),*> where $t: futures_core::stream::FusedStream $(, $($bound)*)* { - fn is_terminated(&self) -> bool { - self.inner.is_terminated() - } - } - }; - (@trait Sink $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => { - #[cfg(feature = "sink")] - impl<_Item, $($arg),*> futures_sink::Sink<_Item> for $name<$($arg),*> where $t: futures_sink::Sink<_Item> $(, $($bound)*)* { - type Error = <$t as futures_sink::Sink<_Item>>::Error; - - delegate_sink!(inner, _Item); - } - }; - (@trait Debug $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => { - impl<$($arg),*> core::fmt::Debug for $name<$($arg),*> where $t: core::fmt::Debug $(, $($bound)*)* { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - core::fmt::Debug::fmt(&self.inner, f) - } - } - }; - (@trait AccessInner[$inner:ty, ($($ind:tt)*)] $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => { - impl<$($arg),*> $name<$($arg),*> $(where $($bound)*)* { - delegate_access_inner!(inner, $inner, ($($ind)*)); - } - }; - (@trait New[|$($param:ident: $paramt:ty),*| $cons:expr] $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => { - impl<$($arg),*> $name<$($arg),*> $(where $($bound)*)* { - pub(crate) fn new($($param: $paramt),*) -> Self { - Self { inner: $cons } - } - } - }; - ($(#[$attr:meta])* $name:ident<$($arg:ident),*>($t:ty) : $ftrait:ident $([$($targs:tt)*])* $({$($item:tt)*})* $(where $($bound:tt)*)*) => { - pin_project_lite::pin_project! { - #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] - $(#[$attr])* - pub struct $name< $($arg),* > $(where $($bound)*)* { #[pin] inner: $t } - } - - impl<$($arg),*> $name< $($arg),* > $(where $($bound)*)* { - $($($item)*)* - } - - delegate_all!(@trait $ftrait $([$($targs)*])* $name<$($arg),*>($t) $(where $($bound)*)*); - }; - ($(#[$attr:meta])* $name:ident<$($arg:ident),*>($t:ty) : $ftrait:ident $([$($ftargs:tt)*])* + $strait:ident $([$($stargs:tt)*])* $(+ $trait:ident $([$($targs:tt)*])*)* $({$($item:tt)*})* $(where $($bound:tt)*)*) => { - delegate_all!($(#[$attr])* $name<$($arg),*>($t) : $strait $([$($stargs)*])* $(+ $trait $([$($targs)*])*)* $({$($item)*})* $(where $($bound)*)*); - - delegate_all!(@trait $ftrait $([$($ftargs)*])* $name<$($arg),*>($t) $(where $($bound)*)*); - }; -} - -pub mod future; -#[doc(no_inline)] -pub use crate::future::{Future, FutureExt, TryFuture, TryFutureExt}; - -pub mod stream; -#[doc(no_inline)] -pub use crate::stream::{Stream, StreamExt, TryStream, TryStreamExt}; - -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -pub mod sink; -#[cfg(feature = "sink")] -#[doc(no_inline)] -pub use crate::sink::{Sink, SinkExt}; - -pub mod task; - -pub mod never; - -#[cfg(feature = "compat")] -#[cfg_attr(docsrs, doc(cfg(feature = "compat")))] -pub mod compat; - -#[cfg(feature = "io")] -#[cfg_attr(docsrs, doc(cfg(feature = "io")))] -#[cfg(feature = "std")] -pub mod io; -#[cfg(feature = "io")] -#[cfg(feature = "std")] -#[doc(no_inline)] -pub use crate::io::{ - AsyncBufRead, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWrite, - AsyncWriteExt, -}; - -#[cfg(feature = "alloc")] -pub mod lock; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod abortable; - -mod fns; -mod unfold_state; diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/lock/bilock.rs s390-tools-2.33.1/rust-vendor/futures-util/src/lock/bilock.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/lock/bilock.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/lock/bilock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,293 +0,0 @@ -//! Futures-powered synchronization primitives. - -use alloc::boxed::Box; -use alloc::sync::Arc; -use core::cell::UnsafeCell; -use core::ops::{Deref, DerefMut}; -use core::pin::Pin; -use core::sync::atomic::AtomicPtr; -use core::sync::atomic::Ordering::SeqCst; -use core::{fmt, ptr}; -#[cfg(feature = "bilock")] -use futures_core::future::Future; -use futures_core::task::{Context, Poll, Waker}; - -/// A type of futures-powered synchronization primitive which is a mutex between -/// two possible owners. -/// -/// This primitive is not as generic as a full-blown mutex but is sufficient for -/// many use cases where there are only two possible owners of a resource. The -/// implementation of `BiLock` can be more optimized for just the two possible -/// owners. -/// -/// Note that it's possible to use this lock through a poll-style interface with -/// the `poll_lock` method but you can also use it as a future with the `lock` -/// method that consumes a `BiLock` and returns a future that will resolve when -/// it's locked. -/// -/// A `BiLock` is typically used for "split" operations where data which serves -/// two purposes wants to be split into two to be worked with separately. For -/// example a TCP stream could be both a reader and a writer or a framing layer -/// could be both a stream and a sink for messages. A `BiLock` enables splitting -/// these two and then using each independently in a futures-powered fashion. -/// -/// This type is only available when the `bilock` feature of this -/// library is activated. -#[derive(Debug)] -#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))] -pub struct BiLock { - arc: Arc>, -} - -#[derive(Debug)] -struct Inner { - state: AtomicPtr, - value: Option>, -} - -unsafe impl Send for Inner {} -unsafe impl Sync for Inner {} - -impl BiLock { - /// Creates a new `BiLock` protecting the provided data. - /// - /// Two handles to the lock are returned, and these are the only two handles - /// that will ever be available to the lock. These can then be sent to separate - /// tasks to be managed there. - /// - /// The data behind the bilock is considered to be pinned, which allows `Pin` - /// references to locked data. However, this means that the locked value - /// will only be available through `Pin<&mut T>` (not `&mut T`) unless `T` is `Unpin`. - /// Similarly, reuniting the lock and extracting the inner value is only - /// possible when `T` is `Unpin`. - pub fn new(t: T) -> (Self, Self) { - let arc = Arc::new(Inner { - state: AtomicPtr::new(ptr::null_mut()), - value: Some(UnsafeCell::new(t)), - }); - - (Self { arc: arc.clone() }, Self { arc }) - } - - /// Attempt to acquire this lock, returning `Pending` if it can't be - /// acquired. - /// - /// This function will acquire the lock in a nonblocking fashion, returning - /// immediately if the lock is already held. If the lock is successfully - /// acquired then `Poll::Ready` is returned with a value that represents - /// the locked value (and can be used to access the protected data). The - /// lock is unlocked when the returned `BiLockGuard` is dropped. - /// - /// If the lock is already held then this function will return - /// `Poll::Pending`. In this case the current task will also be scheduled - /// to receive a notification when the lock would otherwise become - /// available. - /// - /// # Panics - /// - /// This function will panic if called outside the context of a future's - /// task. - pub fn poll_lock(&self, cx: &mut Context<'_>) -> Poll> { - let mut waker = None; - loop { - let n = self.arc.state.swap(invalid_ptr(1), SeqCst); - match n as usize { - // Woohoo, we grabbed the lock! - 0 => return Poll::Ready(BiLockGuard { bilock: self }), - - // Oops, someone else has locked the lock - 1 => {} - - // A task was previously blocked on this lock, likely our task, - // so we need to update that task. - _ => unsafe { - let mut prev = Box::from_raw(n); - *prev = cx.waker().clone(); - waker = Some(prev); - }, - } - - // type ascription for safety's sake! - let me: Box = waker.take().unwrap_or_else(|| Box::new(cx.waker().clone())); - let me = Box::into_raw(me); - - match self.arc.state.compare_exchange(invalid_ptr(1), me, SeqCst, SeqCst) { - // The lock is still locked, but we've now parked ourselves, so - // just report that we're scheduled to receive a notification. - Ok(_) => return Poll::Pending, - - // Oops, looks like the lock was unlocked after our swap above - // and before the compare_exchange. Deallocate what we just - // allocated and go through the loop again. - Err(n) if n.is_null() => unsafe { - waker = Some(Box::from_raw(me)); - }, - - // The top of this loop set the previous state to 1, so if we - // failed the CAS above then it's because the previous value was - // *not* zero or one. This indicates that a task was blocked, - // but we're trying to acquire the lock and there's only one - // other reference of the lock, so it should be impossible for - // that task to ever block itself. - Err(n) => panic!("invalid state: {}", n as usize), - } - } - } - - /// Perform a "blocking lock" of this lock, consuming this lock handle and - /// returning a future to the acquired lock. - /// - /// This function consumes the `BiLock` and returns a sentinel future, - /// `BiLockAcquire`. The returned future will resolve to - /// `BiLockAcquired` which represents a locked lock similarly to - /// `BiLockGuard`. - /// - /// Note that the returned future will never resolve to an error. - #[cfg(feature = "bilock")] - #[cfg_attr(docsrs, doc(cfg(feature = "bilock")))] - pub fn lock(&self) -> BiLockAcquire<'_, T> { - BiLockAcquire { bilock: self } - } - - /// Attempts to put the two "halves" of a `BiLock` back together and - /// recover the original value. Succeeds only if the two `BiLock`s - /// originated from the same call to `BiLock::new`. - pub fn reunite(self, other: Self) -> Result> - where - T: Unpin, - { - if Arc::ptr_eq(&self.arc, &other.arc) { - drop(other); - let inner = Arc::try_unwrap(self.arc) - .ok() - .expect("futures: try_unwrap failed in BiLock::reunite"); - Ok(unsafe { inner.into_value() }) - } else { - Err(ReuniteError(self, other)) - } - } - - fn unlock(&self) { - let n = self.arc.state.swap(ptr::null_mut(), SeqCst); - match n as usize { - // we've locked the lock, shouldn't be possible for us to see an - // unlocked lock. - 0 => panic!("invalid unlocked state"), - - // Ok, no one else tried to get the lock, we're done. - 1 => {} - - // Another task has parked themselves on this lock, let's wake them - // up as its now their turn. - _ => unsafe { - Box::from_raw(n).wake(); - }, - } - } -} - -impl Inner { - unsafe fn into_value(mut self) -> T { - self.value.take().unwrap().into_inner() - } -} - -impl Drop for Inner { - fn drop(&mut self) { - assert!(self.state.load(SeqCst).is_null()); - } -} - -/// Error indicating two `BiLock`s were not two halves of a whole, and -/// thus could not be `reunite`d. -#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))] -pub struct ReuniteError(pub BiLock, pub BiLock); - -impl fmt::Debug for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("ReuniteError").field(&"...").finish() - } -} - -impl fmt::Display for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "tried to reunite two BiLocks that don't form a pair") - } -} - -#[cfg(feature = "std")] -impl std::error::Error for ReuniteError {} - -/// Returned RAII guard from the `poll_lock` method. -/// -/// This structure acts as a sentinel to the data in the `BiLock` itself, -/// implementing `Deref` and `DerefMut` to `T`. When dropped, the lock will be -/// unlocked. -#[derive(Debug)] -#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))] -pub struct BiLockGuard<'a, T> { - bilock: &'a BiLock, -} - -// We allow parallel access to T via Deref, so Sync bound is also needed here. -unsafe impl Sync for BiLockGuard<'_, T> {} - -impl Deref for BiLockGuard<'_, T> { - type Target = T; - fn deref(&self) -> &T { - unsafe { &*self.bilock.arc.value.as_ref().unwrap().get() } - } -} - -impl DerefMut for BiLockGuard<'_, T> { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.bilock.arc.value.as_ref().unwrap().get() } - } -} - -impl BiLockGuard<'_, T> { - /// Get a mutable pinned reference to the locked value. - pub fn as_pin_mut(&mut self) -> Pin<&mut T> { - // Safety: we never allow moving a !Unpin value out of a bilock, nor - // allow mutable access to it - unsafe { Pin::new_unchecked(&mut *self.bilock.arc.value.as_ref().unwrap().get()) } - } -} - -impl Drop for BiLockGuard<'_, T> { - fn drop(&mut self) { - self.bilock.unlock(); - } -} - -/// Future returned by `BiLock::lock` which will resolve when the lock is -/// acquired. -#[cfg(feature = "bilock")] -#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))] -#[must_use = "futures do nothing unless you `.await` or poll them"] -#[derive(Debug)] -pub struct BiLockAcquire<'a, T> { - bilock: &'a BiLock, -} - -// Pinning is never projected to fields -#[cfg(feature = "bilock")] -impl Unpin for BiLockAcquire<'_, T> {} - -#[cfg(feature = "bilock")] -impl<'a, T> Future for BiLockAcquire<'a, T> { - type Output = BiLockGuard<'a, T>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.bilock.poll_lock(cx) - } -} - -// Based on core::ptr::invalid_mut. Equivalent to `addr as *mut T`, but is strict-provenance compatible. -#[allow(clippy::useless_transmute)] -#[inline] -fn invalid_ptr(addr: usize) -> *mut T { - // SAFETY: every valid integer is also a valid pointer (as long as you don't dereference that - // pointer). - unsafe { core::mem::transmute(addr) } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/lock/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/lock/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/lock/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/lock/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -//! Futures-powered synchronization primitives. -//! -//! This module is only available when the `std` or `alloc` feature of this -//! library is activated, and it is activated by default. - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(any(feature = "sink", feature = "io"))] -#[cfg(not(feature = "bilock"))] -pub(crate) use self::bilock::BiLock; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "bilock")] -#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))] -pub use self::bilock::{BiLock, BiLockAcquire, BiLockGuard, ReuniteError}; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "std")] -pub use self::mutex::{ - MappedMutexGuard, Mutex, MutexGuard, MutexLockFuture, OwnedMutexGuard, OwnedMutexLockFuture, -}; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(any(feature = "bilock", feature = "sink", feature = "io"))] -#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))] -#[cfg_attr(not(feature = "bilock"), allow(unreachable_pub))] -mod bilock; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "std")] -mod mutex; diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/lock/mutex.rs s390-tools-2.33.1/rust-vendor/futures-util/src/lock/mutex.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/lock/mutex.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/lock/mutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,551 +0,0 @@ -use std::cell::UnsafeCell; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut}; -use std::pin::Pin; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::{Arc, Mutex as StdMutex}; -use std::{fmt, mem}; - -use slab::Slab; - -use futures_core::future::{FusedFuture, Future}; -use futures_core::task::{Context, Poll, Waker}; - -/// A futures-aware mutex. -/// -/// # Fairness -/// -/// This mutex provides no fairness guarantees. Tasks may not acquire the mutex -/// in the order that they requested the lock, and it's possible for a single task -/// which repeatedly takes the lock to starve other tasks, which may be left waiting -/// indefinitely. -pub struct Mutex { - state: AtomicUsize, - waiters: StdMutex>, - value: UnsafeCell, -} - -impl fmt::Debug for Mutex { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let state = self.state.load(Ordering::SeqCst); - f.debug_struct("Mutex") - .field("is_locked", &((state & IS_LOCKED) != 0)) - .field("has_waiters", &((state & HAS_WAITERS) != 0)) - .finish() - } -} - -impl From for Mutex { - fn from(t: T) -> Self { - Self::new(t) - } -} - -impl Default for Mutex { - fn default() -> Self { - Self::new(Default::default()) - } -} - -enum Waiter { - Waiting(Waker), - Woken, -} - -impl Waiter { - fn register(&mut self, waker: &Waker) { - match self { - Self::Waiting(w) if waker.will_wake(w) => {} - _ => *self = Self::Waiting(waker.clone()), - } - } - - fn wake(&mut self) { - match mem::replace(self, Self::Woken) { - Self::Waiting(waker) => waker.wake(), - Self::Woken => {} - } - } -} - -const IS_LOCKED: usize = 1 << 0; -const HAS_WAITERS: usize = 1 << 1; - -impl Mutex { - /// Creates a new futures-aware mutex. - pub fn new(t: T) -> Self { - Self { - state: AtomicUsize::new(0), - waiters: StdMutex::new(Slab::new()), - value: UnsafeCell::new(t), - } - } - - /// Consumes this mutex, returning the underlying data. - /// - /// # Examples - /// - /// ``` - /// use futures::lock::Mutex; - /// - /// let mutex = Mutex::new(0); - /// assert_eq!(mutex.into_inner(), 0); - /// ``` - pub fn into_inner(self) -> T { - self.value.into_inner() - } -} - -impl Mutex { - /// Attempt to acquire the lock immediately. - /// - /// If the lock is currently held, this will return `None`. - pub fn try_lock(&self) -> Option> { - let old_state = self.state.fetch_or(IS_LOCKED, Ordering::Acquire); - if (old_state & IS_LOCKED) == 0 { - Some(MutexGuard { mutex: self }) - } else { - None - } - } - - /// Attempt to acquire the lock immediately. - /// - /// If the lock is currently held, this will return `None`. - pub fn try_lock_owned(self: &Arc) -> Option> { - let old_state = self.state.fetch_or(IS_LOCKED, Ordering::Acquire); - if (old_state & IS_LOCKED) == 0 { - Some(OwnedMutexGuard { mutex: self.clone() }) - } else { - None - } - } - - /// Acquire the lock asynchronously. - /// - /// This method returns a future that will resolve once the lock has been - /// successfully acquired. - pub fn lock(&self) -> MutexLockFuture<'_, T> { - MutexLockFuture { mutex: Some(self), wait_key: WAIT_KEY_NONE } - } - - /// Acquire the lock asynchronously. - /// - /// This method returns a future that will resolve once the lock has been - /// successfully acquired. - pub fn lock_owned(self: Arc) -> OwnedMutexLockFuture { - OwnedMutexLockFuture { mutex: Some(self), wait_key: WAIT_KEY_NONE } - } - - /// Returns a mutable reference to the underlying data. - /// - /// Since this call borrows the `Mutex` mutably, no actual locking needs to - /// take place -- the mutable borrow statically guarantees no locks exist. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::lock::Mutex; - /// - /// let mut mutex = Mutex::new(0); - /// *mutex.get_mut() = 10; - /// assert_eq!(*mutex.lock().await, 10); - /// # }); - /// ``` - pub fn get_mut(&mut self) -> &mut T { - // We know statically that there are no other references to `self`, so - // there's no need to lock the inner mutex. - unsafe { &mut *self.value.get() } - } - - fn remove_waker(&self, wait_key: usize, wake_another: bool) { - if wait_key != WAIT_KEY_NONE { - let mut waiters = self.waiters.lock().unwrap(); - match waiters.remove(wait_key) { - Waiter::Waiting(_) => {} - Waiter::Woken => { - // We were awoken, but then dropped before we could - // wake up to acquire the lock. Wake up another - // waiter. - if wake_another { - if let Some((_i, waiter)) = waiters.iter_mut().next() { - waiter.wake(); - } - } - } - } - if waiters.is_empty() { - self.state.fetch_and(!HAS_WAITERS, Ordering::Relaxed); // released by mutex unlock - } - } - } - - // Unlocks the mutex. Called by MutexGuard and MappedMutexGuard when they are - // dropped. - fn unlock(&self) { - let old_state = self.state.fetch_and(!IS_LOCKED, Ordering::AcqRel); - if (old_state & HAS_WAITERS) != 0 { - let mut waiters = self.waiters.lock().unwrap(); - if let Some((_i, waiter)) = waiters.iter_mut().next() { - waiter.wake(); - } - } - } -} - -// Sentinel for when no slot in the `Slab` has been dedicated to this object. -const WAIT_KEY_NONE: usize = usize::MAX; - -/// A future which resolves when the target mutex has been successfully acquired, owned version. -pub struct OwnedMutexLockFuture { - // `None` indicates that the mutex was successfully acquired. - mutex: Option>>, - wait_key: usize, -} - -impl fmt::Debug for OwnedMutexLockFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("OwnedMutexLockFuture") - .field("was_acquired", &self.mutex.is_none()) - .field("mutex", &self.mutex) - .field( - "wait_key", - &(if self.wait_key == WAIT_KEY_NONE { None } else { Some(self.wait_key) }), - ) - .finish() - } -} - -impl FusedFuture for OwnedMutexLockFuture { - fn is_terminated(&self) -> bool { - self.mutex.is_none() - } -} - -impl Future for OwnedMutexLockFuture { - type Output = OwnedMutexGuard; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - let mutex = this.mutex.as_ref().expect("polled OwnedMutexLockFuture after completion"); - - if let Some(lock) = mutex.try_lock_owned() { - mutex.remove_waker(this.wait_key, false); - this.mutex = None; - return Poll::Ready(lock); - } - - { - let mut waiters = mutex.waiters.lock().unwrap(); - if this.wait_key == WAIT_KEY_NONE { - this.wait_key = waiters.insert(Waiter::Waiting(cx.waker().clone())); - if waiters.len() == 1 { - mutex.state.fetch_or(HAS_WAITERS, Ordering::Relaxed); // released by mutex unlock - } - } else { - waiters[this.wait_key].register(cx.waker()); - } - } - - // Ensure that we haven't raced `MutexGuard::drop`'s unlock path by - // attempting to acquire the lock again. - if let Some(lock) = mutex.try_lock_owned() { - mutex.remove_waker(this.wait_key, false); - this.mutex = None; - return Poll::Ready(lock); - } - - Poll::Pending - } -} - -impl Drop for OwnedMutexLockFuture { - fn drop(&mut self) { - if let Some(mutex) = self.mutex.as_ref() { - // This future was dropped before it acquired the mutex. - // - // Remove ourselves from the map, waking up another waiter if we - // had been awoken to acquire the lock. - mutex.remove_waker(self.wait_key, true); - } - } -} - -/// An RAII guard returned by the `lock_owned` and `try_lock_owned` methods. -/// When this structure is dropped (falls out of scope), the lock will be -/// unlocked. -pub struct OwnedMutexGuard { - mutex: Arc>, -} - -impl fmt::Debug for OwnedMutexGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("OwnedMutexGuard") - .field("value", &&**self) - .field("mutex", &self.mutex) - .finish() - } -} - -impl Drop for OwnedMutexGuard { - fn drop(&mut self) { - self.mutex.unlock() - } -} - -impl Deref for OwnedMutexGuard { - type Target = T; - fn deref(&self) -> &T { - unsafe { &*self.mutex.value.get() } - } -} - -impl DerefMut for OwnedMutexGuard { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.mutex.value.get() } - } -} - -/// A future which resolves when the target mutex has been successfully acquired. -pub struct MutexLockFuture<'a, T: ?Sized> { - // `None` indicates that the mutex was successfully acquired. - mutex: Option<&'a Mutex>, - wait_key: usize, -} - -impl fmt::Debug for MutexLockFuture<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MutexLockFuture") - .field("was_acquired", &self.mutex.is_none()) - .field("mutex", &self.mutex) - .field( - "wait_key", - &(if self.wait_key == WAIT_KEY_NONE { None } else { Some(self.wait_key) }), - ) - .finish() - } -} - -impl FusedFuture for MutexLockFuture<'_, T> { - fn is_terminated(&self) -> bool { - self.mutex.is_none() - } -} - -impl<'a, T: ?Sized> Future for MutexLockFuture<'a, T> { - type Output = MutexGuard<'a, T>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mutex = self.mutex.expect("polled MutexLockFuture after completion"); - - if let Some(lock) = mutex.try_lock() { - mutex.remove_waker(self.wait_key, false); - self.mutex = None; - return Poll::Ready(lock); - } - - { - let mut waiters = mutex.waiters.lock().unwrap(); - if self.wait_key == WAIT_KEY_NONE { - self.wait_key = waiters.insert(Waiter::Waiting(cx.waker().clone())); - if waiters.len() == 1 { - mutex.state.fetch_or(HAS_WAITERS, Ordering::Relaxed); // released by mutex unlock - } - } else { - waiters[self.wait_key].register(cx.waker()); - } - } - - // Ensure that we haven't raced `MutexGuard::drop`'s unlock path by - // attempting to acquire the lock again. - if let Some(lock) = mutex.try_lock() { - mutex.remove_waker(self.wait_key, false); - self.mutex = None; - return Poll::Ready(lock); - } - - Poll::Pending - } -} - -impl Drop for MutexLockFuture<'_, T> { - fn drop(&mut self) { - if let Some(mutex) = self.mutex { - // This future was dropped before it acquired the mutex. - // - // Remove ourselves from the map, waking up another waiter if we - // had been awoken to acquire the lock. - mutex.remove_waker(self.wait_key, true); - } - } -} - -/// An RAII guard returned by the `lock` and `try_lock` methods. -/// When this structure is dropped (falls out of scope), the lock will be -/// unlocked. -pub struct MutexGuard<'a, T: ?Sized> { - mutex: &'a Mutex, -} - -impl<'a, T: ?Sized> MutexGuard<'a, T> { - /// Returns a locked view over a portion of the locked data. - /// - /// # Example - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::lock::{Mutex, MutexGuard}; - /// - /// let data = Mutex::new(Some("value".to_string())); - /// { - /// let locked_str = MutexGuard::map(data.lock().await, |opt| opt.as_mut().unwrap()); - /// assert_eq!(&*locked_str, "value"); - /// } - /// # }); - /// ``` - #[inline] - pub fn map(this: Self, f: F) -> MappedMutexGuard<'a, T, U> - where - F: FnOnce(&mut T) -> &mut U, - { - let mutex = this.mutex; - let value = f(unsafe { &mut *this.mutex.value.get() }); - // Don't run the `drop` method for MutexGuard. The ownership of the underlying - // locked state is being moved to the returned MappedMutexGuard. - mem::forget(this); - MappedMutexGuard { mutex, value, _marker: PhantomData } - } -} - -impl fmt::Debug for MutexGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MutexGuard").field("value", &&**self).field("mutex", &self.mutex).finish() - } -} - -impl Drop for MutexGuard<'_, T> { - fn drop(&mut self) { - self.mutex.unlock() - } -} - -impl Deref for MutexGuard<'_, T> { - type Target = T; - fn deref(&self) -> &T { - unsafe { &*self.mutex.value.get() } - } -} - -impl DerefMut for MutexGuard<'_, T> { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.mutex.value.get() } - } -} - -/// An RAII guard returned by the `MutexGuard::map` and `MappedMutexGuard::map` methods. -/// When this structure is dropped (falls out of scope), the lock will be unlocked. -pub struct MappedMutexGuard<'a, T: ?Sized, U: ?Sized> { - mutex: &'a Mutex, - value: *mut U, - _marker: PhantomData<&'a mut U>, -} - -impl<'a, T: ?Sized, U: ?Sized> MappedMutexGuard<'a, T, U> { - /// Returns a locked view over a portion of the locked data. - /// - /// # Example - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::lock::{MappedMutexGuard, Mutex, MutexGuard}; - /// - /// let data = Mutex::new(Some("value".to_string())); - /// { - /// let locked_str = MutexGuard::map(data.lock().await, |opt| opt.as_mut().unwrap()); - /// let locked_char = MappedMutexGuard::map(locked_str, |s| s.get_mut(0..1).unwrap()); - /// assert_eq!(&*locked_char, "v"); - /// } - /// # }); - /// ``` - #[inline] - pub fn map(this: Self, f: F) -> MappedMutexGuard<'a, T, V> - where - F: FnOnce(&mut U) -> &mut V, - { - let mutex = this.mutex; - let value = f(unsafe { &mut *this.value }); - // Don't run the `drop` method for MappedMutexGuard. The ownership of the underlying - // locked state is being moved to the returned MappedMutexGuard. - mem::forget(this); - MappedMutexGuard { mutex, value, _marker: PhantomData } - } -} - -impl fmt::Debug for MappedMutexGuard<'_, T, U> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MappedMutexGuard") - .field("value", &&**self) - .field("mutex", &self.mutex) - .finish() - } -} - -impl Drop for MappedMutexGuard<'_, T, U> { - fn drop(&mut self) { - self.mutex.unlock() - } -} - -impl Deref for MappedMutexGuard<'_, T, U> { - type Target = U; - fn deref(&self) -> &U { - unsafe { &*self.value } - } -} - -impl DerefMut for MappedMutexGuard<'_, T, U> { - fn deref_mut(&mut self) -> &mut U { - unsafe { &mut *self.value } - } -} - -// Mutexes can be moved freely between threads and acquired on any thread so long -// as the inner value can be safely sent between threads. -unsafe impl Send for Mutex {} -unsafe impl Sync for Mutex {} - -// It's safe to switch which thread the acquire is being attempted on so long as -// `T` can be accessed on that thread. -unsafe impl Send for MutexLockFuture<'_, T> {} - -// doesn't have any interesting `&self` methods (only Debug) -unsafe impl Sync for MutexLockFuture<'_, T> {} - -// It's safe to switch which thread the acquire is being attempted on so long as -// `T` can be accessed on that thread. -unsafe impl Send for OwnedMutexLockFuture {} - -// doesn't have any interesting `&self` methods (only Debug) -unsafe impl Sync for OwnedMutexLockFuture {} - -// Safe to send since we don't track any thread-specific details-- the inner -// lock is essentially spinlock-equivalent (attempt to flip an atomic bool) -unsafe impl Send for MutexGuard<'_, T> {} -unsafe impl Sync for MutexGuard<'_, T> {} - -unsafe impl Send for OwnedMutexGuard {} -unsafe impl Sync for OwnedMutexGuard {} - -unsafe impl Send for MappedMutexGuard<'_, T, U> {} -unsafe impl Sync for MappedMutexGuard<'_, T, U> {} - -#[test] -fn test_mutex_guard_debug_not_recurse() { - let mutex = Mutex::new(42); - let guard = mutex.try_lock().unwrap(); - let _ = format!("{:?}", guard); - let guard = MutexGuard::map(guard, |n| n); - let _ = format!("{:?}", guard); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/never.rs s390-tools-2.33.1/rust-vendor/futures-util/src/never.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/never.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/never.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,18 +0,0 @@ -//! This module contains the `Never` type. -//! -//! Values of this type can never be created and will never exist. - -/// A type with no possible values. -/// -/// This is used to indicate values which can never be created, such as the -/// error type of infallible futures. -/// -/// This type is a stable equivalent to the `!` type from `std`. -/// -/// This is currently an alias for [`std::convert::Infallible`], but in -/// the future it may be an alias for [`!`][never]. -/// See ["Future compatibility" section of `std::convert::Infallible`][infallible] for more. -/// -/// [never]: https://doc.rust-lang.org/nightly/std/primitive.never.html -/// [infallible]: https://doc.rust-lang.org/nightly/std/convert/enum.Infallible.html#future-compatibility -pub type Never = core::convert::Infallible; diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/buffer.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/buffer.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/buffer.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/buffer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,105 +0,0 @@ -use alloc::collections::VecDeque; -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Sink for the [`buffer`](super::SinkExt::buffer) method. - #[derive(Debug)] - #[must_use = "sinks do nothing unless polled"] - pub struct Buffer { - #[pin] - sink: Si, - buf: VecDeque, - - // Track capacity separately from the `VecDeque`, which may be rounded up - capacity: usize, - } -} - -impl, Item> Buffer { - pub(super) fn new(sink: Si, capacity: usize) -> Self { - Self { sink, buf: VecDeque::with_capacity(capacity), capacity } - } - - delegate_access_inner!(sink, Si, ()); - - fn try_empty_buffer(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - ready!(this.sink.as_mut().poll_ready(cx))?; - while let Some(item) = this.buf.pop_front() { - this.sink.as_mut().start_send(item)?; - if !this.buf.is_empty() { - ready!(this.sink.as_mut().poll_ready(cx))?; - } - } - Poll::Ready(Ok(())) - } -} - -// Forwarding impl of Stream from the underlying sink -impl Stream for Buffer -where - S: Sink + Stream, -{ - type Item = S::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().sink.poll_next(cx) - } - - fn size_hint(&self) -> (usize, Option) { - self.sink.size_hint() - } -} - -impl FusedStream for Buffer -where - S: Sink + FusedStream, -{ - fn is_terminated(&self) -> bool { - self.sink.is_terminated() - } -} - -impl, Item> Sink for Buffer { - type Error = Si::Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if self.capacity == 0 { - return self.project().sink.poll_ready(cx); - } - - let _ = self.as_mut().try_empty_buffer(cx)?; - - if self.buf.len() >= self.capacity { - Poll::Pending - } else { - Poll::Ready(Ok(())) - } - } - - fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { - if self.capacity == 0 { - self.project().sink.start_send(item) - } else { - self.project().buf.push_back(item); - Ok(()) - } - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().try_empty_buffer(cx))?; - debug_assert!(self.buf.is_empty()); - self.project().sink.poll_flush(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().try_empty_buffer(cx))?; - debug_assert!(self.buf.is_empty()); - self.project().sink.poll_close(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/close.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/close.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/close.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/close.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,32 +0,0 @@ -use core::marker::PhantomData; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; - -/// Future for the [`close`](super::SinkExt::close) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Close<'a, Si: ?Sized, Item> { - sink: &'a mut Si, - _phantom: PhantomData, -} - -impl Unpin for Close<'_, Si, Item> {} - -/// A future that completes when the sink has finished closing. -/// -/// The sink itself is returned after closing is complete. -impl<'a, Si: Sink + Unpin + ?Sized, Item> Close<'a, Si, Item> { - pub(super) fn new(sink: &'a mut Si) -> Self { - Self { sink, _phantom: PhantomData } - } -} - -impl + Unpin + ?Sized, Item> Future for Close<'_, Si, Item> { - type Output = Result<(), Si::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.sink).poll_close(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/drain.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/drain.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/drain.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/drain.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,59 +0,0 @@ -use super::assert_sink; -use crate::never::Never; -use core::marker::PhantomData; -use core::pin::Pin; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; - -/// Sink for the [`drain`] function. -#[derive(Debug)] -#[must_use = "sinks do nothing unless polled"] -pub struct Drain { - marker: PhantomData, -} - -/// Create a sink that will just discard all items given to it. -/// -/// Similar to [`io::Sink`](::std::io::Sink). -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::sink::{self, SinkExt}; -/// -/// let mut drain = sink::drain(); -/// drain.send(5).await?; -/// # Ok::<(), futures::never::Never>(()) }).unwrap(); -/// ``` -pub fn drain() -> Drain { - assert_sink::(Drain { marker: PhantomData }) -} - -impl Unpin for Drain {} - -impl Clone for Drain { - fn clone(&self) -> Self { - drain() - } -} - -impl Sink for Drain { - type Error = Never; - - fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, _item: T) -> Result<(), Self::Error> { - Ok(()) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/err_into.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/err_into.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/err_into.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/err_into.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,57 +0,0 @@ -use crate::sink::{SinkExt, SinkMapErr}; -use futures_core::stream::{FusedStream, Stream}; -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Sink for the [`sink_err_into`](super::SinkExt::sink_err_into) method. - #[derive(Debug)] - #[must_use = "sinks do nothing unless polled"] - pub struct SinkErrInto, Item, E> { - #[pin] - sink: SinkMapErr E>, - } -} - -impl SinkErrInto -where - Si: Sink, - Si::Error: Into, -{ - pub(super) fn new(sink: Si) -> Self { - Self { sink: SinkExt::sink_map_err(sink, Into::into) } - } - - delegate_access_inner!(sink, Si, (.)); -} - -impl Sink for SinkErrInto -where - Si: Sink, - Si::Error: Into, -{ - type Error = E; - - delegate_sink!(sink, Item); -} - -// Forwarding impl of Stream from the underlying sink -impl Stream for SinkErrInto -where - S: Sink + Stream, - S::Error: Into, -{ - type Item = S::Item; - - delegate_stream!(sink); -} - -impl FusedStream for SinkErrInto -where - S: Sink + FusedStream, - S::Error: Into, -{ - fn is_terminated(&self) -> bool { - self.sink.is_terminated() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/fanout.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/fanout.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/fanout.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/fanout.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,111 +0,0 @@ -use core::fmt::{Debug, Formatter, Result as FmtResult}; -use core::pin::Pin; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Sink that clones incoming items and forwards them to two sinks at the same time. - /// - /// Backpressure from any downstream sink propagates up, which means that this sink - /// can only process items as fast as its _slowest_ downstream sink. - #[must_use = "sinks do nothing unless polled"] - pub struct Fanout { - #[pin] - sink1: Si1, - #[pin] - sink2: Si2 - } -} - -impl Fanout { - pub(super) fn new(sink1: Si1, sink2: Si2) -> Self { - Self { sink1, sink2 } - } - - /// Get a shared reference to the inner sinks. - pub fn get_ref(&self) -> (&Si1, &Si2) { - (&self.sink1, &self.sink2) - } - - /// Get a mutable reference to the inner sinks. - pub fn get_mut(&mut self) -> (&mut Si1, &mut Si2) { - (&mut self.sink1, &mut self.sink2) - } - - /// Get a pinned mutable reference to the inner sinks. - pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut Si1>, Pin<&mut Si2>) { - let this = self.project(); - (this.sink1, this.sink2) - } - - /// Consumes this combinator, returning the underlying sinks. - /// - /// Note that this may discard intermediate state of this combinator, - /// so care should be taken to avoid losing resources when this is called. - pub fn into_inner(self) -> (Si1, Si2) { - (self.sink1, self.sink2) - } -} - -impl Debug for Fanout { - fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { - f.debug_struct("Fanout").field("sink1", &self.sink1).field("sink2", &self.sink2).finish() - } -} - -impl Sink for Fanout -where - Si1: Sink, - Item: Clone, - Si2: Sink, -{ - type Error = Si1::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - - let sink1_ready = this.sink1.poll_ready(cx)?.is_ready(); - let sink2_ready = this.sink2.poll_ready(cx)?.is_ready(); - let ready = sink1_ready && sink2_ready; - if ready { - Poll::Ready(Ok(())) - } else { - Poll::Pending - } - } - - fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { - let this = self.project(); - - this.sink1.start_send(item.clone())?; - this.sink2.start_send(item)?; - Ok(()) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - - let sink1_ready = this.sink1.poll_flush(cx)?.is_ready(); - let sink2_ready = this.sink2.poll_flush(cx)?.is_ready(); - let ready = sink1_ready && sink2_ready; - if ready { - Poll::Ready(Ok(())) - } else { - Poll::Pending - } - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - - let sink1_ready = this.sink1.poll_close(cx)?.is_ready(); - let sink2_ready = this.sink2.poll_close(cx)?.is_ready(); - let ready = sink1_ready && sink2_ready; - if ready { - Poll::Ready(Ok(())) - } else { - Poll::Pending - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/feed.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/feed.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/feed.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/feed.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,43 +0,0 @@ -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; - -/// Future for the [`feed`](super::SinkExt::feed) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Feed<'a, Si: ?Sized, Item> { - sink: &'a mut Si, - item: Option, -} - -// Pinning is never projected to children -impl Unpin for Feed<'_, Si, Item> {} - -impl<'a, Si: Sink + Unpin + ?Sized, Item> Feed<'a, Si, Item> { - pub(super) fn new(sink: &'a mut Si, item: Item) -> Self { - Feed { sink, item: Some(item) } - } - - pub(super) fn sink_pin_mut(&mut self) -> Pin<&mut Si> { - Pin::new(self.sink) - } - - pub(super) fn is_item_pending(&self) -> bool { - self.item.is_some() - } -} - -impl + Unpin + ?Sized, Item> Future for Feed<'_, Si, Item> { - type Output = Result<(), Si::Error>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - let mut sink = Pin::new(&mut this.sink); - ready!(sink.as_mut().poll_ready(cx))?; - let item = this.item.take().expect("polled Feed after completion"); - sink.as_mut().start_send(item)?; - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/flush.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/flush.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/flush.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/flush.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,36 +0,0 @@ -use core::marker::PhantomData; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; - -/// Future for the [`flush`](super::SinkExt::flush) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Flush<'a, Si: ?Sized, Item> { - sink: &'a mut Si, - _phantom: PhantomData, -} - -// Pin is never projected to a field. -impl Unpin for Flush<'_, Si, Item> {} - -/// A future that completes when the sink has finished processing all -/// pending requests. -/// -/// The sink itself is returned after flushing is complete; this adapter is -/// intended to be used when you want to stop sending to the sink until -/// all current requests are processed. -impl<'a, Si: Sink + Unpin + ?Sized, Item> Flush<'a, Si, Item> { - pub(super) fn new(sink: &'a mut Si) -> Self { - Self { sink, _phantom: PhantomData } - } -} - -impl + Unpin + ?Sized, Item> Future for Flush<'_, Si, Item> { - type Output = Result<(), Si::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.sink).poll_flush(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/map_err.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/map_err.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/map_err.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/map_err.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,65 +0,0 @@ -use core::pin::Pin; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Sink for the [`sink_map_err`](super::SinkExt::sink_map_err) method. - #[derive(Debug, Clone)] - #[must_use = "sinks do nothing unless polled"] - pub struct SinkMapErr { - #[pin] - sink: Si, - f: Option, - } -} - -impl SinkMapErr { - pub(super) fn new(sink: Si, f: F) -> Self { - Self { sink, f: Some(f) } - } - - delegate_access_inner!(sink, Si, ()); - - fn take_f(self: Pin<&mut Self>) -> F { - self.project().f.take().expect("polled MapErr after completion") - } -} - -impl Sink for SinkMapErr -where - Si: Sink, - F: FnOnce(Si::Error) -> E, -{ - type Error = E; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.as_mut().project().sink.poll_ready(cx).map_err(|e| self.as_mut().take_f()(e)) - } - - fn start_send(mut self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { - self.as_mut().project().sink.start_send(item).map_err(|e| self.as_mut().take_f()(e)) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.as_mut().project().sink.poll_flush(cx).map_err(|e| self.as_mut().take_f()(e)) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.as_mut().project().sink.poll_close(cx).map_err(|e| self.as_mut().take_f()(e)) - } -} - -// Forwarding impl of Stream from the underlying sink -impl Stream for SinkMapErr { - type Item = S::Item; - - delegate_stream!(sink); -} - -impl FusedStream for SinkMapErr { - fn is_terminated(&self) -> bool { - self.sink.is_terminated() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,344 +0,0 @@ -//! Asynchronous sinks. -//! -//! This module contains: -//! -//! - The [`Sink`] trait, which allows you to asynchronously write data. -//! - The [`SinkExt`] trait, which provides adapters for chaining and composing -//! sinks. - -use crate::future::{assert_future, Either}; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::stream::{Stream, TryStream}; -use futures_core::task::{Context, Poll}; - -#[cfg(feature = "compat")] -use crate::compat::CompatSink; - -pub use futures_sink::Sink; - -mod close; -pub use self::close::Close; - -mod drain; -pub use self::drain::{drain, Drain}; - -mod fanout; -pub use self::fanout::Fanout; - -mod feed; -pub use self::feed::Feed; - -mod flush; -pub use self::flush::Flush; - -mod err_into; -pub use self::err_into::SinkErrInto; - -mod map_err; -pub use self::map_err::SinkMapErr; - -mod send; -pub use self::send::Send; - -mod send_all; -pub use self::send_all::SendAll; - -mod unfold; -pub use self::unfold::{unfold, Unfold}; - -mod with; -pub use self::with::With; - -mod with_flat_map; -pub use self::with_flat_map::WithFlatMap; - -#[cfg(feature = "alloc")] -mod buffer; -#[cfg(feature = "alloc")] -pub use self::buffer::Buffer; - -impl SinkExt for T where T: Sink {} - -/// An extension trait for `Sink`s that provides a variety of convenient -/// combinator functions. -pub trait SinkExt: Sink { - /// Composes a function *in front of* the sink. - /// - /// This adapter produces a new sink that passes each value through the - /// given function `f` before sending it to `self`. - /// - /// To process each value, `f` produces a *future*, which is then polled to - /// completion before passing its result down to the underlying sink. If the - /// future produces an error, that error is returned by the new sink. - /// - /// Note that this function consumes the given sink, returning a wrapped - /// version, much like `Iterator::map`. - fn with(self, f: F) -> With - where - F: FnMut(U) -> Fut, - Fut: Future>, - E: From, - Self: Sized, - { - assert_sink::(With::new(self, f)) - } - - /// Composes a function *in front of* the sink. - /// - /// This adapter produces a new sink that passes each value through the - /// given function `f` before sending it to `self`. - /// - /// To process each value, `f` produces a *stream*, of which each value - /// is passed to the underlying sink. A new value will not be accepted until - /// the stream has been drained - /// - /// Note that this function consumes the given sink, returning a wrapped - /// version, much like `Iterator::flat_map`. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::sink::SinkExt; - /// use futures::stream::{self, StreamExt}; - /// - /// let (tx, rx) = mpsc::channel(5); - /// - /// let mut tx = tx.with_flat_map(|x| { - /// stream::iter(vec![Ok(42); x]) - /// }); - /// - /// tx.send(5).await.unwrap(); - /// drop(tx); - /// let received: Vec = rx.collect().await; - /// assert_eq!(received, vec![42, 42, 42, 42, 42]); - /// # }); - /// ``` - fn with_flat_map(self, f: F) -> WithFlatMap - where - F: FnMut(U) -> St, - St: Stream>, - Self: Sized, - { - assert_sink::(WithFlatMap::new(self, f)) - } - - /* - fn with_map(self, f: F) -> WithMap - where F: FnMut(U) -> Self::SinkItem, - Self: Sized; - - fn with_filter(self, f: F) -> WithFilter - where F: FnMut(Self::SinkItem) -> bool, - Self: Sized; - - fn with_filter_map(self, f: F) -> WithFilterMap - where F: FnMut(U) -> Option, - Self: Sized; - */ - - /// Transforms the error returned by the sink. - fn sink_map_err(self, f: F) -> SinkMapErr - where - F: FnOnce(Self::Error) -> E, - Self: Sized, - { - assert_sink::(SinkMapErr::new(self, f)) - } - - /// Map this sink's error to a different error type using the `Into` trait. - /// - /// If wanting to map errors of a `Sink + Stream`, use `.sink_err_into().err_into()`. - fn sink_err_into(self) -> err_into::SinkErrInto - where - Self: Sized, - Self::Error: Into, - { - assert_sink::(SinkErrInto::new(self)) - } - - /// Adds a fixed-size buffer to the current sink. - /// - /// The resulting sink will buffer up to `capacity` items when the - /// underlying sink is unwilling to accept additional items. Calling `flush` - /// on the buffered sink will attempt to both empty the buffer and complete - /// processing on the underlying sink. - /// - /// Note that this function consumes the given sink, returning a wrapped - /// version, much like `Iterator::map`. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - #[cfg(feature = "alloc")] - fn buffer(self, capacity: usize) -> Buffer - where - Self: Sized, - { - assert_sink::(Buffer::new(self, capacity)) - } - - /// Close the sink. - fn close(&mut self) -> Close<'_, Self, Item> - where - Self: Unpin, - { - assert_future::, _>(Close::new(self)) - } - - /// Fanout items to multiple sinks. - /// - /// This adapter clones each incoming item and forwards it to both this as well as - /// the other sink at the same time. - fn fanout(self, other: Si) -> Fanout - where - Self: Sized, - Item: Clone, - Si: Sink, - { - assert_sink::(Fanout::new(self, other)) - } - - /// Flush the sink, processing all pending items. - /// - /// This adapter is intended to be used when you want to stop sending to the sink - /// until all current requests are processed. - fn flush(&mut self) -> Flush<'_, Self, Item> - where - Self: Unpin, - { - assert_future::, _>(Flush::new(self)) - } - - /// A future that completes after the given item has been fully processed - /// into the sink, including flushing. - /// - /// Note that, **because of the flushing requirement, it is usually better - /// to batch together items to send via `feed` or `send_all`, - /// rather than flushing between each item.** - fn send(&mut self, item: Item) -> Send<'_, Self, Item> - where - Self: Unpin, - { - assert_future::, _>(Send::new(self, item)) - } - - /// A future that completes after the given item has been received - /// by the sink. - /// - /// Unlike `send`, the returned future does not flush the sink. - /// It is the caller's responsibility to ensure all pending items - /// are processed, which can be done via `flush` or `close`. - fn feed(&mut self, item: Item) -> Feed<'_, Self, Item> - where - Self: Unpin, - { - assert_future::, _>(Feed::new(self, item)) - } - - /// A future that completes after the given stream has been fully processed - /// into the sink, including flushing. - /// - /// This future will drive the stream to keep producing items until it is - /// exhausted, sending each item to the sink. It will complete once both the - /// stream is exhausted, the sink has received all items, and the sink has - /// been flushed. Note that the sink is **not** closed. If the stream produces - /// an error, that error will be returned by this future without flushing the sink. - /// - /// Doing `sink.send_all(stream)` is roughly equivalent to - /// `stream.forward(sink)`. The returned future will exhaust all items from - /// `stream` and send them to `self`. - fn send_all<'a, St>(&'a mut self, stream: &'a mut St) -> SendAll<'a, Self, St> - where - St: TryStream + Stream + Unpin + ?Sized, - // St: Stream> + Unpin + ?Sized, - Self: Unpin, - { - // TODO: type mismatch resolving `::Item == std::result::Result>::Error>` - // assert_future::, _>(SendAll::new(self, stream)) - SendAll::new(self, stream) - } - - /// Wrap this sink in an `Either` sink, making it the left-hand variant - /// of that `Either`. - /// - /// This can be used in combination with the `right_sink` method to write `if` - /// statements that evaluate to different streams in different branches. - fn left_sink(self) -> Either - where - Si2: Sink, - Self: Sized, - { - assert_sink::(Either::Left(self)) - } - - /// Wrap this stream in an `Either` stream, making it the right-hand variant - /// of that `Either`. - /// - /// This can be used in combination with the `left_sink` method to write `if` - /// statements that evaluate to different streams in different branches. - fn right_sink(self) -> Either - where - Si1: Sink, - Self: Sized, - { - assert_sink::(Either::Right(self)) - } - - /// Wraps a [`Sink`] into a sink compatible with libraries using - /// futures 0.1 `Sink`. Requires the `compat` feature to be enabled. - #[cfg(feature = "compat")] - #[cfg_attr(docsrs, doc(cfg(feature = "compat")))] - fn compat(self) -> CompatSink - where - Self: Sized + Unpin, - { - CompatSink::new(self) - } - - /// A convenience method for calling [`Sink::poll_ready`] on [`Unpin`] - /// sink types. - fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll> - where - Self: Unpin, - { - Pin::new(self).poll_ready(cx) - } - - /// A convenience method for calling [`Sink::start_send`] on [`Unpin`] - /// sink types. - fn start_send_unpin(&mut self, item: Item) -> Result<(), Self::Error> - where - Self: Unpin, - { - Pin::new(self).start_send(item) - } - - /// A convenience method for calling [`Sink::poll_flush`] on [`Unpin`] - /// sink types. - fn poll_flush_unpin(&mut self, cx: &mut Context<'_>) -> Poll> - where - Self: Unpin, - { - Pin::new(self).poll_flush(cx) - } - - /// A convenience method for calling [`Sink::poll_close`] on [`Unpin`] - /// sink types. - fn poll_close_unpin(&mut self, cx: &mut Context<'_>) -> Poll> - where - Self: Unpin, - { - Pin::new(self).poll_close(cx) - } -} - -// Just a helper function to ensure the sinks we're returning all have the -// right implementations. -pub(crate) fn assert_sink(sink: S) -> S -where - S: Sink, -{ - sink -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/send_all.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/send_all.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/send_all.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/send_all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,100 +0,0 @@ -use crate::stream::{Fuse, StreamExt, TryStreamExt}; -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::{Stream, TryStream}; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; - -/// Future for the [`send_all`](super::SinkExt::send_all) method. -#[allow(explicit_outlives_requirements)] // https://github.com/rust-lang/rust/issues/60993 -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SendAll<'a, Si, St> -where - Si: ?Sized, - St: ?Sized + TryStream, -{ - sink: &'a mut Si, - stream: Fuse<&'a mut St>, - buffered: Option, -} - -impl fmt::Debug for SendAll<'_, Si, St> -where - Si: fmt::Debug + ?Sized, - St: fmt::Debug + ?Sized + TryStream, - St::Ok: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SendAll") - .field("sink", &self.sink) - .field("stream", &self.stream) - .field("buffered", &self.buffered) - .finish() - } -} - -// Pinning is never projected to any fields -impl Unpin for SendAll<'_, Si, St> -where - Si: Unpin + ?Sized, - St: TryStream + Unpin + ?Sized, -{ -} - -impl<'a, Si, St, Ok, Error> SendAll<'a, Si, St> -where - Si: Sink + Unpin + ?Sized, - St: TryStream + Stream + Unpin + ?Sized, -{ - pub(super) fn new(sink: &'a mut Si, stream: &'a mut St) -> Self { - Self { sink, stream: stream.fuse(), buffered: None } - } - - fn try_start_send( - &mut self, - cx: &mut Context<'_>, - item: St::Ok, - ) -> Poll> { - debug_assert!(self.buffered.is_none()); - match Pin::new(&mut self.sink).poll_ready(cx)? { - Poll::Ready(()) => Poll::Ready(Pin::new(&mut self.sink).start_send(item)), - Poll::Pending => { - self.buffered = Some(item); - Poll::Pending - } - } - } -} - -impl Future for SendAll<'_, Si, St> -where - Si: Sink + Unpin + ?Sized, - St: Stream> + Unpin + ?Sized, -{ - type Output = Result<(), Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - // If we've got an item buffered already, we need to write it to the - // sink before we can do anything else - if let Some(item) = this.buffered.take() { - ready!(this.try_start_send(cx, item))? - } - - loop { - match this.stream.try_poll_next_unpin(cx)? { - Poll::Ready(Some(item)) => ready!(this.try_start_send(cx, item))?, - Poll::Ready(None) => { - ready!(Pin::new(&mut this.sink).poll_flush(cx))?; - return Poll::Ready(Ok(())); - } - Poll::Pending => { - ready!(Pin::new(&mut this.sink).poll_flush(cx))?; - return Poll::Pending; - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/send.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/send.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/send.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/send.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -use super::Feed; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; - -/// Future for the [`send`](super::SinkExt::send) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Send<'a, Si: ?Sized, Item> { - feed: Feed<'a, Si, Item>, -} - -// Pinning is never projected to children -impl Unpin for Send<'_, Si, Item> {} - -impl<'a, Si: Sink + Unpin + ?Sized, Item> Send<'a, Si, Item> { - pub(super) fn new(sink: &'a mut Si, item: Item) -> Self { - Self { feed: Feed::new(sink, item) } - } -} - -impl + Unpin + ?Sized, Item> Future for Send<'_, Si, Item> { - type Output = Result<(), Si::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - - if this.feed.is_item_pending() { - ready!(Pin::new(&mut this.feed).poll(cx))?; - debug_assert!(!this.feed.is_item_pending()); - } - - // we're done sending the item, but want to block on flushing the - // sink - ready!(this.feed.sink_pin_mut().poll_flush(cx))?; - - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/unfold.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/unfold.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/unfold.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/unfold.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,89 +0,0 @@ -use super::assert_sink; -use crate::unfold_state::UnfoldState; -use core::{future::Future, pin::Pin}; -use futures_core::ready; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Sink for the [`unfold`] function. - #[derive(Debug)] - #[must_use = "sinks do nothing unless polled"] - pub struct Unfold { - function: F, - #[pin] - state: UnfoldState, - } -} - -/// Create a sink from a function which processes one item at a time. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::sink::{self, SinkExt}; -/// -/// let unfold = sink::unfold(0, |mut sum, i: i32| { -/// async move { -/// sum += i; -/// eprintln!("{}", i); -/// Ok::<_, futures::never::Never>(sum) -/// } -/// }); -/// futures::pin_mut!(unfold); -/// unfold.send(5).await?; -/// # Ok::<(), futures::never::Never>(()) }).unwrap(); -/// ``` -pub fn unfold(init: T, function: F) -> Unfold -where - F: FnMut(T, Item) -> R, - R: Future>, -{ - assert_sink::(Unfold { function, state: UnfoldState::Value { value: init } }) -} - -impl Sink for Unfold -where - F: FnMut(T, Item) -> R, - R: Future>, -{ - type Error = E; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } - - fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { - let mut this = self.project(); - let future = match this.state.as_mut().take_value() { - Some(value) => (this.function)(value, item), - None => panic!("start_send called without poll_ready being called first"), - }; - this.state.set(UnfoldState::Future { future }); - Ok(()) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - Poll::Ready(if let Some(future) = this.state.as_mut().project_future() { - match ready!(future.poll(cx)) { - Ok(state) => { - this.state.set(UnfoldState::Value { value: state }); - Ok(()) - } - Err(err) => { - this.state.set(UnfoldState::Empty); - Err(err) - } - } - } else { - Ok(()) - }) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/with_flat_map.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/with_flat_map.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/with_flat_map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/with_flat_map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,127 +0,0 @@ -use core::fmt; -use core::marker::PhantomData; -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Sink for the [`with_flat_map`](super::SinkExt::with_flat_map) method. - #[must_use = "sinks do nothing unless polled"] - pub struct WithFlatMap { - #[pin] - sink: Si, - f: F, - #[pin] - stream: Option, - buffer: Option, - _marker: PhantomData, - } -} - -impl fmt::Debug for WithFlatMap -where - Si: fmt::Debug, - St: fmt::Debug, - Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("WithFlatMap") - .field("sink", &self.sink) - .field("stream", &self.stream) - .field("buffer", &self.buffer) - .finish() - } -} - -impl WithFlatMap -where - Si: Sink, - F: FnMut(U) -> St, - St: Stream>, -{ - pub(super) fn new(sink: Si, f: F) -> Self { - Self { sink, f, stream: None, buffer: None, _marker: PhantomData } - } - - delegate_access_inner!(sink, Si, ()); - - fn try_empty_stream(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - if this.buffer.is_some() { - ready!(this.sink.as_mut().poll_ready(cx))?; - let item = this.buffer.take().unwrap(); - this.sink.as_mut().start_send(item)?; - } - if let Some(mut some_stream) = this.stream.as_mut().as_pin_mut() { - while let Some(item) = ready!(some_stream.as_mut().poll_next(cx)?) { - match this.sink.as_mut().poll_ready(cx)? { - Poll::Ready(()) => this.sink.as_mut().start_send(item)?, - Poll::Pending => { - *this.buffer = Some(item); - return Poll::Pending; - } - }; - } - } - this.stream.set(None); - Poll::Ready(Ok(())) - } -} - -// Forwarding impl of Stream from the underlying sink -impl Stream for WithFlatMap -where - S: Stream + Sink, - F: FnMut(U) -> St, - St: Stream>, -{ - type Item = S::Item; - - delegate_stream!(sink); -} - -impl FusedStream for WithFlatMap -where - S: FusedStream + Sink, - F: FnMut(U) -> St, - St: Stream>, -{ - fn is_terminated(&self) -> bool { - self.sink.is_terminated() - } -} - -impl Sink for WithFlatMap -where - Si: Sink, - F: FnMut(U) -> St, - St: Stream>, -{ - type Error = Si::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.try_empty_stream(cx) - } - - fn start_send(self: Pin<&mut Self>, item: U) -> Result<(), Self::Error> { - let mut this = self.project(); - - assert!(this.stream.is_none()); - this.stream.set(Some((this.f)(item))); - Ok(()) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().try_empty_stream(cx)?); - self.project().sink.poll_flush(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().try_empty_stream(cx)?); - self.project().sink.poll_close(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/sink/with.rs s390-tools-2.33.1/rust-vendor/futures-util/src/sink/with.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/sink/with.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/sink/with.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,134 +0,0 @@ -use core::fmt; -use core::marker::PhantomData; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Sink for the [`with`](super::SinkExt::with) method. - #[must_use = "sinks do nothing unless polled"] - pub struct With { - #[pin] - sink: Si, - f: F, - #[pin] - state: Option, - _phantom: PhantomData Item>, - } -} - -impl fmt::Debug for With -where - Si: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("With").field("sink", &self.sink).field("state", &self.state).finish() - } -} - -impl With -where - Si: Sink, - F: FnMut(U) -> Fut, - Fut: Future, -{ - pub(super) fn new(sink: Si, f: F) -> Self - where - Fut: Future>, - E: From, - { - Self { state: None, sink, f, _phantom: PhantomData } - } -} - -impl Clone for With -where - Si: Clone, - F: Clone, - Fut: Clone, -{ - fn clone(&self) -> Self { - Self { - state: self.state.clone(), - sink: self.sink.clone(), - f: self.f.clone(), - _phantom: PhantomData, - } - } -} - -// Forwarding impl of Stream from the underlying sink -impl Stream for With -where - S: Stream + Sink, - F: FnMut(U) -> Fut, - Fut: Future, -{ - type Item = S::Item; - - delegate_stream!(sink); -} - -impl With -where - Si: Sink, - F: FnMut(U) -> Fut, - Fut: Future>, - E: From, -{ - delegate_access_inner!(sink, Si, ()); - - /// Completes the processing of previous item if any. - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - let item = match this.state.as_mut().as_pin_mut() { - None => return Poll::Ready(Ok(())), - Some(fut) => ready!(fut.poll(cx))?, - }; - this.state.set(None); - this.sink.start_send(item)?; - Poll::Ready(Ok(())) - } -} - -impl Sink for With -where - Si: Sink, - F: FnMut(U) -> Fut, - Fut: Future>, - E: From, -{ - type Error = E; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().poll(cx))?; - ready!(self.project().sink.poll_ready(cx)?); - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, item: U) -> Result<(), Self::Error> { - let mut this = self.project(); - - assert!(this.state.is_none()); - this.state.set(Some((this.f)(item))); - Ok(()) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().poll(cx))?; - ready!(self.project().sink.poll_flush(cx)?); - Poll::Ready(Ok(())) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().poll(cx))?; - ready!(self.project().sink.poll_close(cx)?); - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/abortable.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/abortable.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/abortable.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/abortable.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ -use super::assert_stream; -use crate::stream::{AbortHandle, Abortable}; -use crate::Stream; - -/// Creates a new `Abortable` stream and an `AbortHandle` which can be used to stop it. -/// -/// This function is a convenient (but less flexible) alternative to calling -/// `AbortHandle::new` and `Abortable::new` manually. -/// -/// This function is only available when the `std` or `alloc` feature of this -/// library is activated, and it is activated by default. -pub fn abortable(stream: St) -> (Abortable, AbortHandle) -where - St: Stream, -{ - let (handle, reg) = AbortHandle::new_pair(); - let abortable = assert_stream::(Abortable::new(stream, reg)); - (abortable, handle) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/empty.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/empty.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/empty.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/empty.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,45 +0,0 @@ -use super::assert_stream; -use core::marker::PhantomData; -use core::pin::Pin; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; - -/// Stream for the [`empty`] function. -#[derive(Debug)] -#[must_use = "streams do nothing unless polled"] -pub struct Empty { - _phantom: PhantomData, -} - -/// Creates a stream which contains no elements. -/// -/// The returned stream will always return `Ready(None)` when polled. -pub fn empty() -> Empty { - assert_stream::(Empty { _phantom: PhantomData }) -} - -impl Unpin for Empty {} - -impl FusedStream for Empty { - fn is_terminated(&self) -> bool { - true - } -} - -impl Stream for Empty { - type Item = T; - - fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(None) - } - - fn size_hint(&self) -> (usize, Option) { - (0, Some(0)) - } -} - -impl Clone for Empty { - fn clone(&self) -> Self { - empty() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_ordered.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_ordered.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_ordered.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_ordered.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,248 +0,0 @@ -use crate::stream::{FuturesUnordered, StreamExt}; -use alloc::collections::binary_heap::{BinaryHeap, PeekMut}; -use core::cmp::Ordering; -use core::fmt::{self, Debug}; -use core::iter::FromIterator; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::Stream; -use futures_core::{ - task::{Context, Poll}, - FusedStream, -}; -use pin_project_lite::pin_project; - -pin_project! { - #[must_use = "futures do nothing unless you `.await` or poll them"] - #[derive(Debug)] - struct OrderWrapper { - #[pin] - data: T, // A future or a future's output - index: isize, - } -} - -impl PartialEq for OrderWrapper { - fn eq(&self, other: &Self) -> bool { - self.index == other.index - } -} - -impl Eq for OrderWrapper {} - -impl PartialOrd for OrderWrapper { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for OrderWrapper { - fn cmp(&self, other: &Self) -> Ordering { - // BinaryHeap is a max heap, so compare backwards here. - other.index.cmp(&self.index) - } -} - -impl Future for OrderWrapper -where - T: Future, -{ - type Output = OrderWrapper; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let index = self.index; - self.project().data.poll(cx).map(|output| OrderWrapper { data: output, index }) - } -} - -/// An unbounded queue of futures. -/// -/// This "combinator" is similar to [`FuturesUnordered`], but it imposes a FIFO -/// order on top of the set of futures. While futures in the set will race to -/// completion in parallel, results will only be returned in the order their -/// originating futures were added to the queue. -/// -/// Futures are pushed into this queue and their realized values are yielded in -/// order. This structure is optimized to manage a large number of futures. -/// Futures managed by [`FuturesOrdered`] will only be polled when they generate -/// notifications. This reduces the required amount of work needed to coordinate -/// large numbers of futures. -/// -/// When a [`FuturesOrdered`] is first created, it does not contain any futures. -/// Calling [`poll_next`](FuturesOrdered::poll_next) in this state will result -/// in [`Poll::Ready(None)`](Poll::Ready) to be returned. Futures are submitted -/// to the queue using [`push_back`](FuturesOrdered::push_back) (or -/// [`push_front`](FuturesOrdered::push_front)); however, the future will -/// **not** be polled at this point. [`FuturesOrdered`] will only poll managed -/// futures when [`FuturesOrdered::poll_next`] is called. As such, it -/// is important to call [`poll_next`](FuturesOrdered::poll_next) after pushing -/// new futures. -/// -/// If [`FuturesOrdered::poll_next`] returns [`Poll::Ready(None)`](Poll::Ready) -/// this means that the queue is currently not managing any futures. A future -/// may be submitted to the queue at a later time. At that point, a call to -/// [`FuturesOrdered::poll_next`] will either return the future's resolved value -/// **or** [`Poll::Pending`] if the future has not yet completed. When -/// multiple futures are submitted to the queue, [`FuturesOrdered::poll_next`] -/// will return [`Poll::Pending`] until the first future completes, even if -/// some of the later futures have already completed. -/// -/// Note that you can create a ready-made [`FuturesOrdered`] via the -/// [`collect`](Iterator::collect) method, or you can start with an empty queue -/// with the [`FuturesOrdered::new`] constructor. -/// -/// This type is only available when the `std` or `alloc` feature of this -/// library is activated, and it is activated by default. -#[must_use = "streams do nothing unless polled"] -pub struct FuturesOrdered { - in_progress_queue: FuturesUnordered>, - queued_outputs: BinaryHeap>, - next_incoming_index: isize, - next_outgoing_index: isize, -} - -impl Unpin for FuturesOrdered {} - -impl FuturesOrdered { - /// Constructs a new, empty `FuturesOrdered` - /// - /// The returned [`FuturesOrdered`] does not contain any futures and, in - /// this state, [`FuturesOrdered::poll_next`] will return - /// [`Poll::Ready(None)`](Poll::Ready). - pub fn new() -> Self { - Self { - in_progress_queue: FuturesUnordered::new(), - queued_outputs: BinaryHeap::new(), - next_incoming_index: 0, - next_outgoing_index: 0, - } - } - - /// Returns the number of futures contained in the queue. - /// - /// This represents the total number of in-flight futures, both - /// those currently processing and those that have completed but - /// which are waiting for earlier futures to complete. - pub fn len(&self) -> usize { - self.in_progress_queue.len() + self.queued_outputs.len() - } - - /// Returns `true` if the queue contains no futures - pub fn is_empty(&self) -> bool { - self.in_progress_queue.is_empty() && self.queued_outputs.is_empty() - } - - /// Push a future into the queue. - /// - /// This function submits the given future to the internal set for managing. - /// This function will not call [`poll`](Future::poll) on the submitted - /// future. The caller must ensure that [`FuturesOrdered::poll_next`] is - /// called in order to receive task notifications. - #[deprecated(note = "use `push_back` instead")] - pub fn push(&mut self, future: Fut) { - self.push_back(future); - } - - /// Pushes a future to the back of the queue. - /// - /// This function submits the given future to the internal set for managing. - /// This function will not call [`poll`](Future::poll) on the submitted - /// future. The caller must ensure that [`FuturesOrdered::poll_next`] is - /// called in order to receive task notifications. - pub fn push_back(&mut self, future: Fut) { - let wrapped = OrderWrapper { data: future, index: self.next_incoming_index }; - self.next_incoming_index += 1; - self.in_progress_queue.push(wrapped); - } - - /// Pushes a future to the front of the queue. - /// - /// This function submits the given future to the internal set for managing. - /// This function will not call [`poll`](Future::poll) on the submitted - /// future. The caller must ensure that [`FuturesOrdered::poll_next`] is - /// called in order to receive task notifications. This future will be - /// the next future to be returned complete. - pub fn push_front(&mut self, future: Fut) { - let wrapped = OrderWrapper { data: future, index: self.next_outgoing_index - 1 }; - self.next_outgoing_index -= 1; - self.in_progress_queue.push(wrapped); - } -} - -impl Default for FuturesOrdered { - fn default() -> Self { - Self::new() - } -} - -impl Stream for FuturesOrdered { - type Item = Fut::Output; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = &mut *self; - - // Check to see if we've already received the next value - if let Some(next_output) = this.queued_outputs.peek_mut() { - if next_output.index == this.next_outgoing_index { - this.next_outgoing_index += 1; - return Poll::Ready(Some(PeekMut::pop(next_output).data)); - } - } - - loop { - match ready!(this.in_progress_queue.poll_next_unpin(cx)) { - Some(output) => { - if output.index == this.next_outgoing_index { - this.next_outgoing_index += 1; - return Poll::Ready(Some(output.data)); - } else { - this.queued_outputs.push(output) - } - } - None => return Poll::Ready(None), - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let len = self.len(); - (len, Some(len)) - } -} - -impl Debug for FuturesOrdered { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "FuturesOrdered {{ ... }}") - } -} - -impl FromIterator for FuturesOrdered { - fn from_iter(iter: T) -> Self - where - T: IntoIterator, - { - let acc = Self::new(); - iter.into_iter().fold(acc, |mut acc, item| { - acc.push_back(item); - acc - }) - } -} - -impl FusedStream for FuturesOrdered { - fn is_terminated(&self) -> bool { - self.in_progress_queue.is_terminated() && self.queued_outputs.is_empty() - } -} - -impl Extend for FuturesOrdered { - fn extend(&mut self, iter: I) - where - I: IntoIterator, - { - for item in iter { - self.push_back(item); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_unordered/abort.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_unordered/abort.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_unordered/abort.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_unordered/abort.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -pub(super) fn abort(s: &str) -> ! { - struct DoublePanic; - - impl Drop for DoublePanic { - fn drop(&mut self) { - panic!("panicking twice to abort the program"); - } - } - - let _bomb = DoublePanic; - panic!("{}", s); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_unordered/iter.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_unordered/iter.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_unordered/iter.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_unordered/iter.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,172 +0,0 @@ -use super::task::Task; -use super::FuturesUnordered; -use core::marker::PhantomData; -use core::pin::Pin; -use core::ptr; -use core::sync::atomic::Ordering::Relaxed; - -/// Mutable iterator over all futures in the unordered set. -#[derive(Debug)] -pub struct IterPinMut<'a, Fut> { - pub(super) task: *const Task, - pub(super) len: usize, - pub(super) _marker: PhantomData<&'a mut FuturesUnordered>, -} - -/// Mutable iterator over all futures in the unordered set. -#[derive(Debug)] -pub struct IterMut<'a, Fut: Unpin>(pub(super) IterPinMut<'a, Fut>); - -/// Immutable iterator over all futures in the unordered set. -#[derive(Debug)] -pub struct IterPinRef<'a, Fut> { - pub(super) task: *const Task, - pub(super) len: usize, - pub(super) pending_next_all: *mut Task, - pub(super) _marker: PhantomData<&'a FuturesUnordered>, -} - -/// Immutable iterator over all the futures in the unordered set. -#[derive(Debug)] -pub struct Iter<'a, Fut: Unpin>(pub(super) IterPinRef<'a, Fut>); - -/// Owned iterator over all futures in the unordered set. -#[derive(Debug)] -pub struct IntoIter { - pub(super) len: usize, - pub(super) inner: FuturesUnordered, -} - -impl Iterator for IntoIter { - type Item = Fut; - - fn next(&mut self) -> Option { - // `head_all` can be accessed directly and we don't need to spin on - // `Task::next_all` since we have exclusive access to the set. - let task = self.inner.head_all.get_mut(); - - if (*task).is_null() { - return None; - } - - unsafe { - // Moving out of the future is safe because it is `Unpin` - let future = (*(**task).future.get()).take().unwrap(); - - // Mutable access to a previously shared `FuturesUnordered` implies - // that the other threads already released the object before the - // current thread acquired it, so relaxed ordering can be used and - // valid `next_all` checks can be skipped. - let next = (**task).next_all.load(Relaxed); - *task = next; - if !task.is_null() { - *(**task).prev_all.get() = ptr::null_mut(); - } - self.len -= 1; - Some(future) - } - } - - fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) - } -} - -impl ExactSizeIterator for IntoIter {} - -impl<'a, Fut> Iterator for IterPinMut<'a, Fut> { - type Item = Pin<&'a mut Fut>; - - fn next(&mut self) -> Option { - if self.task.is_null() { - return None; - } - - unsafe { - let future = (*(*self.task).future.get()).as_mut().unwrap(); - - // Mutable access to a previously shared `FuturesUnordered` implies - // that the other threads already released the object before the - // current thread acquired it, so relaxed ordering can be used and - // valid `next_all` checks can be skipped. - let next = (*self.task).next_all.load(Relaxed); - self.task = next; - self.len -= 1; - Some(Pin::new_unchecked(future)) - } - } - - fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) - } -} - -impl ExactSizeIterator for IterPinMut<'_, Fut> {} - -impl<'a, Fut: Unpin> Iterator for IterMut<'a, Fut> { - type Item = &'a mut Fut; - - fn next(&mut self) -> Option { - self.0.next().map(Pin::get_mut) - } - - fn size_hint(&self) -> (usize, Option) { - self.0.size_hint() - } -} - -impl ExactSizeIterator for IterMut<'_, Fut> {} - -impl<'a, Fut> Iterator for IterPinRef<'a, Fut> { - type Item = Pin<&'a Fut>; - - fn next(&mut self) -> Option { - if self.task.is_null() { - return None; - } - - unsafe { - let future = (*(*self.task).future.get()).as_ref().unwrap(); - - // Relaxed ordering can be used since acquire ordering when - // `head_all` was initially read for this iterator implies acquire - // ordering for all previously inserted nodes (and we don't need to - // read `len_all` again for any other nodes). - let next = (*self.task).spin_next_all(self.pending_next_all, Relaxed); - self.task = next; - self.len -= 1; - Some(Pin::new_unchecked(future)) - } - } - - fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) - } -} - -impl ExactSizeIterator for IterPinRef<'_, Fut> {} - -impl<'a, Fut: Unpin> Iterator for Iter<'a, Fut> { - type Item = &'a Fut; - - fn next(&mut self) -> Option { - self.0.next().map(Pin::get_ref) - } - - fn size_hint(&self) -> (usize, Option) { - self.0.size_hint() - } -} - -impl ExactSizeIterator for Iter<'_, Fut> {} - -// SAFETY: we do nothing thread-local and there is no interior mutability, -// so the usual structural `Send`/`Sync` apply. -unsafe impl Send for IterPinRef<'_, Fut> {} -unsafe impl Sync for IterPinRef<'_, Fut> {} - -unsafe impl Send for IterPinMut<'_, Fut> {} -unsafe impl Sync for IterPinMut<'_, Fut> {} - -unsafe impl Send for IntoIter {} -unsafe impl Sync for IntoIter {} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_unordered/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_unordered/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_unordered/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_unordered/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,661 +0,0 @@ -//! An unbounded set of futures. -//! -//! This module is only available when the `std` or `alloc` feature of this -//! library is activated, and it is activated by default. - -use crate::task::AtomicWaker; -use alloc::sync::{Arc, Weak}; -use core::cell::UnsafeCell; -use core::fmt::{self, Debug}; -use core::iter::FromIterator; -use core::marker::PhantomData; -use core::mem; -use core::pin::Pin; -use core::ptr; -use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst}; -use core::sync::atomic::{AtomicBool, AtomicPtr}; -use futures_core::future::Future; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError}; - -mod abort; - -mod iter; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352 -pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; - -mod task; -use self::task::Task; - -mod ready_to_run_queue; -use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; - -/// A set of futures which may complete in any order. -/// -/// See [`FuturesOrdered`](crate::stream::FuturesOrdered) for a version of this -/// type that preserves a FIFO order. -/// -/// This structure is optimized to manage a large number of futures. -/// Futures managed by [`FuturesUnordered`] will only be polled when they -/// generate wake-up notifications. This reduces the required amount of work -/// needed to poll large numbers of futures. -/// -/// [`FuturesUnordered`] can be filled by [`collect`](Iterator::collect)ing an -/// iterator of futures into a [`FuturesUnordered`], or by -/// [`push`](FuturesUnordered::push)ing futures onto an existing -/// [`FuturesUnordered`]. When new futures are added, -/// [`poll_next`](Stream::poll_next) must be called in order to begin receiving -/// wake-ups for new futures. -/// -/// Note that you can create a ready-made [`FuturesUnordered`] via the -/// [`collect`](Iterator::collect) method, or you can start with an empty set -/// with the [`FuturesUnordered::new`] constructor. -/// -/// This type is only available when the `std` or `alloc` feature of this -/// library is activated, and it is activated by default. -#[must_use = "streams do nothing unless polled"] -pub struct FuturesUnordered { - ready_to_run_queue: Arc>, - head_all: AtomicPtr>, - is_terminated: AtomicBool, -} - -unsafe impl Send for FuturesUnordered {} -unsafe impl Sync for FuturesUnordered {} -impl Unpin for FuturesUnordered {} - -impl Spawn for FuturesUnordered> { - fn spawn_obj(&self, future_obj: FutureObj<'static, ()>) -> Result<(), SpawnError> { - self.push(future_obj); - Ok(()) - } -} - -impl LocalSpawn for FuturesUnordered> { - fn spawn_local_obj(&self, future_obj: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> { - self.push(future_obj); - Ok(()) - } -} - -// FuturesUnordered is implemented using two linked lists. One which links all -// futures managed by a `FuturesUnordered` and one that tracks futures that have -// been scheduled for polling. The first linked list allows for thread safe -// insertion of nodes at the head as well as forward iteration, but is otherwise -// not thread safe and is only accessed by the thread that owns the -// `FuturesUnordered` value for any other operations. The second linked list is -// an implementation of the intrusive MPSC queue algorithm described by -// 1024cores.net. -// -// When a future is submitted to the set, a task is allocated and inserted in -// both linked lists. The next call to `poll_next` will (eventually) see this -// task and call `poll` on the future. -// -// Before a managed future is polled, the current context's waker is replaced -// with one that is aware of the specific future being run. This ensures that -// wake-up notifications generated by that specific future are visible to -// `FuturesUnordered`. When a wake-up notification is received, the task is -// inserted into the ready to run queue, so that its future can be polled later. -// -// Each task is wrapped in an `Arc` and thereby atomically reference counted. -// Also, each task contains an `AtomicBool` which acts as a flag that indicates -// whether the task is currently inserted in the atomic queue. When a wake-up -// notification is received, the task will only be inserted into the ready to -// run queue if it isn't inserted already. - -impl Default for FuturesUnordered { - fn default() -> Self { - Self::new() - } -} - -impl FuturesUnordered { - /// Constructs a new, empty [`FuturesUnordered`]. - /// - /// The returned [`FuturesUnordered`] does not contain any futures. - /// In this state, [`FuturesUnordered::poll_next`](Stream::poll_next) will - /// return [`Poll::Ready(None)`](Poll::Ready). - pub fn new() -> Self { - let stub = Arc::new(Task { - future: UnsafeCell::new(None), - next_all: AtomicPtr::new(ptr::null_mut()), - prev_all: UnsafeCell::new(ptr::null()), - len_all: UnsafeCell::new(0), - next_ready_to_run: AtomicPtr::new(ptr::null_mut()), - queued: AtomicBool::new(true), - ready_to_run_queue: Weak::new(), - woken: AtomicBool::new(false), - }); - let stub_ptr = Arc::as_ptr(&stub); - let ready_to_run_queue = Arc::new(ReadyToRunQueue { - waker: AtomicWaker::new(), - head: AtomicPtr::new(stub_ptr as *mut _), - tail: UnsafeCell::new(stub_ptr), - stub, - }); - - Self { - head_all: AtomicPtr::new(ptr::null_mut()), - ready_to_run_queue, - is_terminated: AtomicBool::new(false), - } - } - - /// Returns the number of futures contained in the set. - /// - /// This represents the total number of in-flight futures. - pub fn len(&self) -> usize { - let (_, len) = self.atomic_load_head_and_len_all(); - len - } - - /// Returns `true` if the set contains no futures. - pub fn is_empty(&self) -> bool { - // Relaxed ordering can be used here since we don't need to read from - // the head pointer, only check whether it is null. - self.head_all.load(Relaxed).is_null() - } - - /// Push a future into the set. - /// - /// This method adds the given future to the set. This method will not - /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must - /// ensure that [`FuturesUnordered::poll_next`](Stream::poll_next) is called - /// in order to receive wake-up notifications for the given future. - pub fn push(&self, future: Fut) { - let task = Arc::new(Task { - future: UnsafeCell::new(Some(future)), - next_all: AtomicPtr::new(self.pending_next_all()), - prev_all: UnsafeCell::new(ptr::null_mut()), - len_all: UnsafeCell::new(0), - next_ready_to_run: AtomicPtr::new(ptr::null_mut()), - queued: AtomicBool::new(true), - ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue), - woken: AtomicBool::new(false), - }); - - // Reset the `is_terminated` flag if we've previously marked ourselves - // as terminated. - self.is_terminated.store(false, Relaxed); - - // Right now our task has a strong reference count of 1. We transfer - // ownership of this reference count to our internal linked list - // and we'll reclaim ownership through the `unlink` method below. - let ptr = self.link(task); - - // We'll need to get the future "into the system" to start tracking it, - // e.g. getting its wake-up notifications going to us tracking which - // futures are ready. To do that we unconditionally enqueue it for - // polling here. - self.ready_to_run_queue.enqueue(ptr); - } - - /// Returns an iterator that allows inspecting each future in the set. - pub fn iter(&self) -> Iter<'_, Fut> - where - Fut: Unpin, - { - Iter(Pin::new(self).iter_pin_ref()) - } - - /// Returns an iterator that allows inspecting each future in the set. - pub fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, Fut> { - let (task, len) = self.atomic_load_head_and_len_all(); - let pending_next_all = self.pending_next_all(); - - IterPinRef { task, len, pending_next_all, _marker: PhantomData } - } - - /// Returns an iterator that allows modifying each future in the set. - pub fn iter_mut(&mut self) -> IterMut<'_, Fut> - where - Fut: Unpin, - { - IterMut(Pin::new(self).iter_pin_mut()) - } - - /// Returns an iterator that allows modifying each future in the set. - pub fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, Fut> { - // `head_all` can be accessed directly and we don't need to spin on - // `Task::next_all` since we have exclusive access to the set. - let task = *self.head_all.get_mut(); - let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; - - IterPinMut { task, len, _marker: PhantomData } - } - - /// Returns the current head node and number of futures in the list of all - /// futures within a context where access is shared with other threads - /// (mostly for use with the `len` and `iter_pin_ref` methods). - fn atomic_load_head_and_len_all(&self) -> (*const Task, usize) { - let task = self.head_all.load(Acquire); - let len = if task.is_null() { - 0 - } else { - unsafe { - (*task).spin_next_all(self.pending_next_all(), Acquire); - *(*task).len_all.get() - } - }; - - (task, len) - } - - /// Releases the task. It destroys the future inside and either drops - /// the `Arc` or transfers ownership to the ready to run queue. - /// The task this method is called on must have been unlinked before. - fn release_task(&mut self, task: Arc>) { - // `release_task` must only be called on unlinked tasks - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - unsafe { - debug_assert!((*task.prev_all.get()).is_null()); - } - - // The future is done, try to reset the queued flag. This will prevent - // `wake` from doing any work in the future - let prev = task.queued.swap(true, SeqCst); - - // Drop the future, even if it hasn't finished yet. This is safe - // because we're dropping the future on the thread that owns - // `FuturesUnordered`, which correctly tracks `Fut`'s lifetimes and - // such. - unsafe { - // Set to `None` rather than `take()`ing to prevent moving the - // future. - *task.future.get() = None; - } - - // If the queued flag was previously set, then it means that this task - // is still in our internal ready to run queue. We then transfer - // ownership of our reference count to the ready to run queue, and it'll - // come along and free it later, noticing that the future is `None`. - // - // If, however, the queued flag was *not* set then we're safe to - // release our reference count on the task. The queued flag was set - // above so all future `enqueue` operations will not actually - // enqueue the task, so our task will never see the ready to run queue - // again. The task itself will be deallocated once all reference counts - // have been dropped elsewhere by the various wakers that contain it. - if prev { - mem::forget(task); - } - } - - /// Insert a new task into the internal linked list. - fn link(&self, task: Arc>) -> *const Task { - // `next_all` should already be reset to the pending state before this - // function is called. - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - let ptr = Arc::into_raw(task); - - // Atomically swap out the old head node to get the node that should be - // assigned to `next_all`. - let next = self.head_all.swap(ptr as *mut _, AcqRel); - - unsafe { - // Store the new list length in the new node. - let new_len = if next.is_null() { - 1 - } else { - // Make sure `next_all` has been written to signal that it is - // safe to read `len_all`. - (*next).spin_next_all(self.pending_next_all(), Acquire); - *(*next).len_all.get() + 1 - }; - *(*ptr).len_all.get() = new_len; - - // Write the old head as the next node pointer, signaling to other - // threads that `len_all` and `next_all` are ready to read. - (*ptr).next_all.store(next, Release); - - // `prev_all` updates don't need to be synchronized, as the field is - // only ever used after exclusive access has been acquired. - if !next.is_null() { - *(*next).prev_all.get() = ptr; - } - } - - ptr - } - - /// Remove the task from the linked list tracking all tasks currently - /// managed by `FuturesUnordered`. - /// This method is unsafe because it has be guaranteed that `task` is a - /// valid pointer. - unsafe fn unlink(&mut self, task: *const Task) -> Arc> { - // Compute the new list length now in case we're removing the head node - // and won't be able to retrieve the correct length later. - let head = *self.head_all.get_mut(); - debug_assert!(!head.is_null()); - let new_len = *(*head).len_all.get() - 1; - - let task = Arc::from_raw(task); - let next = task.next_all.load(Relaxed); - let prev = *task.prev_all.get(); - task.next_all.store(self.pending_next_all(), Relaxed); - *task.prev_all.get() = ptr::null_mut(); - - if !next.is_null() { - *(*next).prev_all.get() = prev; - } - - if !prev.is_null() { - (*prev).next_all.store(next, Relaxed); - } else { - *self.head_all.get_mut() = next; - } - - // Store the new list length in the head node. - let head = *self.head_all.get_mut(); - if !head.is_null() { - *(*head).len_all.get() = new_len; - } - - task - } - - /// Returns the reserved value for `Task::next_all` to indicate a pending - /// assignment from the thread that inserted the task. - /// - /// `FuturesUnordered::link` needs to update `Task` pointers in an order - /// that ensures any iterators created on other threads can correctly - /// traverse the entire `Task` list using the chain of `next_all` pointers. - /// This could be solved with a compare-exchange loop that stores the - /// current `head_all` in `next_all` and swaps out `head_all` with the new - /// `Task` pointer if the head hasn't already changed. Under heavy thread - /// contention, this compare-exchange loop could become costly. - /// - /// An alternative is to initialize `next_all` to a reserved pending state - /// first, perform an atomic swap on `head_all`, and finally update - /// `next_all` with the old head node. Iterators will then either see the - /// pending state value or the correct next node pointer, and can reload - /// `next_all` as needed until the correct value is loaded. The number of - /// retries needed (if any) would be small and will always be finite, so - /// this should generally perform better than the compare-exchange loop. - /// - /// A valid `Task` pointer in the `head_all` list is guaranteed to never be - /// this value, so it is safe to use as a reserved value until the correct - /// value can be written. - fn pending_next_all(&self) -> *mut Task { - // The `ReadyToRunQueue` stub is never inserted into the `head_all` - // list, and its pointer value will remain valid for the lifetime of - // this `FuturesUnordered`, so we can make use of its value here. - Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _ - } -} - -impl Stream for FuturesUnordered { - type Item = Fut::Output; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let len = self.len(); - - // Keep track of how many child futures we have polled, - // in case we want to forcibly yield. - let mut polled = 0; - let mut yielded = 0; - - // Ensure `parent` is correctly set. - self.ready_to_run_queue.waker.register(cx.waker()); - - loop { - // Safety: &mut self guarantees the mutual exclusion `dequeue` - // expects - let task = match unsafe { self.ready_to_run_queue.dequeue() } { - Dequeue::Empty => { - if self.is_empty() { - // We can only consider ourselves terminated once we - // have yielded a `None` - *self.is_terminated.get_mut() = true; - return Poll::Ready(None); - } else { - return Poll::Pending; - } - } - Dequeue::Inconsistent => { - // At this point, it may be worth yielding the thread & - // spinning a few times... but for now, just yield using the - // task system. - cx.waker().wake_by_ref(); - return Poll::Pending; - } - Dequeue::Data(task) => task, - }; - - debug_assert!(task != self.ready_to_run_queue.stub()); - - // Safety: - // - `task` is a valid pointer. - // - We are the only thread that accesses the `UnsafeCell` that - // contains the future - let future = match unsafe { &mut *(*task).future.get() } { - Some(future) => future, - - // If the future has already gone away then we're just - // cleaning out this task. See the comment in - // `release_task` for more information, but we're basically - // just taking ownership of our reference count here. - None => { - // This case only happens when `release_task` was called - // for this task before and couldn't drop the task - // because it was already enqueued in the ready to run - // queue. - - // Safety: `task` is a valid pointer - let task = unsafe { Arc::from_raw(task) }; - - // Double check that the call to `release_task` really - // happened. Calling it required the task to be unlinked. - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - unsafe { - debug_assert!((*task.prev_all.get()).is_null()); - } - continue; - } - }; - - // Safety: `task` is a valid pointer - let task = unsafe { self.unlink(task) }; - - // Unset queued flag: This must be done before polling to ensure - // that the future's task gets rescheduled if it sends a wake-up - // notification **during** the call to `poll`. - let prev = task.queued.swap(false, SeqCst); - assert!(prev); - - // We're going to need to be very careful if the `poll` - // method below panics. We need to (a) not leak memory and - // (b) ensure that we still don't have any use-after-frees. To - // manage this we do a few things: - // - // * A "bomb" is created which if dropped abnormally will call - // `release_task`. That way we'll be sure the memory management - // of the `task` is managed correctly. In particular - // `release_task` will drop the future. This ensures that it is - // dropped on this thread and not accidentally on a different - // thread (bad). - // * We unlink the task from our internal queue to preemptively - // assume it'll panic, in which case we'll want to discard it - // regardless. - struct Bomb<'a, Fut> { - queue: &'a mut FuturesUnordered, - task: Option>>, - } - - impl Drop for Bomb<'_, Fut> { - fn drop(&mut self) { - if let Some(task) = self.task.take() { - self.queue.release_task(task); - } - } - } - - let mut bomb = Bomb { task: Some(task), queue: &mut *self }; - - // Poll the underlying future with the appropriate waker - // implementation. This is where a large bit of the unsafety - // starts to stem from internally. The waker is basically just - // our `Arc>` and can schedule the future for polling by - // enqueuing itself in the ready to run queue. - // - // Critically though `Task` won't actually access `Fut`, the - // future, while it's floating around inside of wakers. - // These structs will basically just use `Fut` to size - // the internal allocation, appropriately accessing fields and - // deallocating the task if need be. - let res = { - let task = bomb.task.as_ref().unwrap(); - // We are only interested in whether the future is awoken before it - // finishes polling, so reset the flag here. - task.woken.store(false, Relaxed); - let waker = Task::waker_ref(task); - let mut cx = Context::from_waker(&waker); - - // Safety: We won't move the future ever again - let future = unsafe { Pin::new_unchecked(future) }; - - future.poll(&mut cx) - }; - polled += 1; - - match res { - Poll::Pending => { - let task = bomb.task.take().unwrap(); - // If the future was awoken during polling, we assume - // the future wanted to explicitly yield. - yielded += task.woken.load(Relaxed) as usize; - bomb.queue.link(task); - - // If a future yields, we respect it and yield here. - // If all futures have been polled, we also yield here to - // avoid starving other tasks waiting on the executor. - // (polling the same future twice per iteration may cause - // the problem: https://github.com/rust-lang/futures-rs/pull/2333) - if yielded >= 2 || polled == len { - cx.waker().wake_by_ref(); - return Poll::Pending; - } - continue; - } - Poll::Ready(output) => return Poll::Ready(Some(output)), - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let len = self.len(); - (len, Some(len)) - } -} - -impl Debug for FuturesUnordered { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "FuturesUnordered {{ ... }}") - } -} - -impl FuturesUnordered { - /// Clears the set, removing all futures. - pub fn clear(&mut self) { - self.clear_head_all(); - - // we just cleared all the tasks, and we have &mut self, so this is safe. - unsafe { self.ready_to_run_queue.clear() }; - - self.is_terminated.store(false, Relaxed); - } - - fn clear_head_all(&mut self) { - while !self.head_all.get_mut().is_null() { - let head = *self.head_all.get_mut(); - let task = unsafe { self.unlink(head) }; - self.release_task(task); - } - } -} - -impl Drop for FuturesUnordered { - fn drop(&mut self) { - // When a `FuturesUnordered` is dropped we want to drop all futures - // associated with it. At the same time though there may be tons of - // wakers flying around which contain `Task` references - // inside them. We'll let those naturally get deallocated. - self.clear_head_all(); - - // Note that at this point we could still have a bunch of tasks in the - // ready to run queue. None of those tasks, however, have futures - // associated with them so they're safe to destroy on any thread. At - // this point the `FuturesUnordered` struct, the owner of the one strong - // reference to the ready to run queue will drop the strong reference. - // At that point whichever thread releases the strong refcount last (be - // it this thread or some other thread as part of an `upgrade`) will - // clear out the ready to run queue and free all remaining tasks. - // - // While that freeing operation isn't guaranteed to happen here, it's - // guaranteed to happen "promptly" as no more "blocking work" will - // happen while there's a strong refcount held. - } -} - -impl<'a, Fut: Unpin> IntoIterator for &'a FuturesUnordered { - type Item = &'a Fut; - type IntoIter = Iter<'a, Fut>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, Fut: Unpin> IntoIterator for &'a mut FuturesUnordered { - type Item = &'a mut Fut; - type IntoIter = IterMut<'a, Fut>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -impl IntoIterator for FuturesUnordered { - type Item = Fut; - type IntoIter = IntoIter; - - fn into_iter(mut self) -> Self::IntoIter { - // `head_all` can be accessed directly and we don't need to spin on - // `Task::next_all` since we have exclusive access to the set. - let task = *self.head_all.get_mut(); - let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; - - IntoIter { len, inner: self } - } -} - -impl FromIterator for FuturesUnordered { - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - let acc = Self::new(); - iter.into_iter().fold(acc, |acc, item| { - acc.push(item); - acc - }) - } -} - -impl FusedStream for FuturesUnordered { - fn is_terminated(&self) -> bool { - self.is_terminated.load(Relaxed) - } -} - -impl Extend for FuturesUnordered { - fn extend(&mut self, iter: I) - where - I: IntoIterator, - { - for item in iter { - self.push(item); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,122 +0,0 @@ -use crate::task::AtomicWaker; -use alloc::sync::Arc; -use core::cell::UnsafeCell; -use core::ptr; -use core::sync::atomic::AtomicPtr; -use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; - -use super::abort::abort; -use super::task::Task; - -pub(super) enum Dequeue { - Data(*const Task), - Empty, - Inconsistent, -} - -pub(super) struct ReadyToRunQueue { - // The waker of the task using `FuturesUnordered`. - pub(super) waker: AtomicWaker, - - // Head/tail of the readiness queue - pub(super) head: AtomicPtr>, - pub(super) tail: UnsafeCell<*const Task>, - pub(super) stub: Arc>, -} - -/// An MPSC queue into which the tasks containing the futures are inserted -/// whenever the future inside is scheduled for polling. -impl ReadyToRunQueue { - /// The enqueue function from the 1024cores intrusive MPSC queue algorithm. - pub(super) fn enqueue(&self, task: *const Task) { - unsafe { - debug_assert!((*task).queued.load(Relaxed)); - - // This action does not require any coordination - (*task).next_ready_to_run.store(ptr::null_mut(), Relaxed); - - // Note that these atomic orderings come from 1024cores - let task = task as *mut _; - let prev = self.head.swap(task, AcqRel); - (*prev).next_ready_to_run.store(task, Release); - } - } - - /// The dequeue function from the 1024cores intrusive MPSC queue algorithm - /// - /// Note that this is unsafe as it required mutual exclusion (only one - /// thread can call this) to be guaranteed elsewhere. - pub(super) unsafe fn dequeue(&self) -> Dequeue { - let mut tail = *self.tail.get(); - let mut next = (*tail).next_ready_to_run.load(Acquire); - - if tail == self.stub() { - if next.is_null() { - return Dequeue::Empty; - } - - *self.tail.get() = next; - tail = next; - next = (*next).next_ready_to_run.load(Acquire); - } - - if !next.is_null() { - *self.tail.get() = next; - debug_assert!(tail != self.stub()); - return Dequeue::Data(tail); - } - - if self.head.load(Acquire) as *const _ != tail { - return Dequeue::Inconsistent; - } - - self.enqueue(self.stub()); - - next = (*tail).next_ready_to_run.load(Acquire); - - if !next.is_null() { - *self.tail.get() = next; - return Dequeue::Data(tail); - } - - Dequeue::Inconsistent - } - - pub(super) fn stub(&self) -> *const Task { - Arc::as_ptr(&self.stub) - } - - // Clear the queue of tasks. - // - // Note that each task has a strong reference count associated with it - // which is owned by the ready to run queue. This method just pulls out - // tasks and drops their refcounts. - // - // # Safety - // - // - All tasks **must** have had their futures dropped already (by FuturesUnordered::clear) - // - The caller **must** guarantee unique access to `self` - pub(crate) unsafe fn clear(&self) { - loop { - // SAFETY: We have the guarantee of mutual exclusion required by `dequeue`. - match self.dequeue() { - Dequeue::Empty => break, - Dequeue::Inconsistent => abort("inconsistent in drop"), - Dequeue::Data(ptr) => drop(Arc::from_raw(ptr)), - } - } - } -} - -impl Drop for ReadyToRunQueue { - fn drop(&mut self) { - // Once we're in the destructor for `Inner` we need to clear out - // the ready to run queue of tasks if there's anything left in there. - - // All tasks have had their futures dropped already by the `FuturesUnordered` - // destructor above, and we have &mut self, so this is safe. - unsafe { - self.clear(); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_unordered/task.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_unordered/task.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/futures_unordered/task.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/futures_unordered/task.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,125 +0,0 @@ -use alloc::sync::{Arc, Weak}; -use core::cell::UnsafeCell; -use core::sync::atomic::Ordering::{self, Relaxed, SeqCst}; -use core::sync::atomic::{AtomicBool, AtomicPtr}; - -use super::abort::abort; -use super::ReadyToRunQueue; -use crate::task::{waker_ref, ArcWake, WakerRef}; - -pub(super) struct Task { - // The future - pub(super) future: UnsafeCell>, - - // Next pointer for linked list tracking all active tasks (use - // `spin_next_all` to read when access is shared across threads) - pub(super) next_all: AtomicPtr>, - - // Previous task in linked list tracking all active tasks - pub(super) prev_all: UnsafeCell<*const Task>, - - // Length of the linked list tracking all active tasks when this node was - // inserted (use `spin_next_all` to synchronize before reading when access - // is shared across threads) - pub(super) len_all: UnsafeCell, - - // Next pointer in ready to run queue - pub(super) next_ready_to_run: AtomicPtr>, - - // Queue that we'll be enqueued to when woken - pub(super) ready_to_run_queue: Weak>, - - // Whether or not this task is currently in the ready to run queue - pub(super) queued: AtomicBool, - - // Whether the future was awoken during polling - // It is possible for this flag to be set to true after the polling, - // but it will be ignored. - pub(super) woken: AtomicBool, -} - -// `Task` can be sent across threads safely because it ensures that -// the underlying `Fut` type isn't touched from any of its methods. -// -// The parent (`super`) module is trusted not to access `future` -// across different threads. -unsafe impl Send for Task {} -unsafe impl Sync for Task {} - -impl ArcWake for Task { - fn wake_by_ref(arc_self: &Arc) { - let inner = match arc_self.ready_to_run_queue.upgrade() { - Some(inner) => inner, - None => return, - }; - - arc_self.woken.store(true, Relaxed); - - // It's our job to enqueue this task it into the ready to run queue. To - // do this we set the `queued` flag, and if successful we then do the - // actual queueing operation, ensuring that we're only queued once. - // - // Once the task is inserted call `wake` to notify the parent task, - // as it'll want to come along and run our task later. - // - // Note that we don't change the reference count of the task here, - // we merely enqueue the raw pointer. The `FuturesUnordered` - // implementation guarantees that if we set the `queued` flag that - // there's a reference count held by the main `FuturesUnordered` queue - // still. - let prev = arc_self.queued.swap(true, SeqCst); - if !prev { - inner.enqueue(Arc::as_ptr(arc_self)); - inner.waker.wake(); - } - } -} - -impl Task { - /// Returns a waker reference for this task without cloning the Arc. - pub(super) fn waker_ref(this: &Arc) -> WakerRef<'_> { - waker_ref(this) - } - - /// Spins until `next_all` is no longer set to `pending_next_all`. - /// - /// The temporary `pending_next_all` value is typically overwritten fairly - /// quickly after a node is inserted into the list of all futures, so this - /// should rarely spin much. - /// - /// When it returns, the correct `next_all` value is returned. - /// - /// `Relaxed` or `Acquire` ordering can be used. `Acquire` ordering must be - /// used before `len_all` can be safely read. - #[inline] - pub(super) fn spin_next_all( - &self, - pending_next_all: *mut Self, - ordering: Ordering, - ) -> *const Self { - loop { - let next = self.next_all.load(ordering); - if next != pending_next_all { - return next; - } - } - } -} - -impl Drop for Task { - fn drop(&mut self) { - // Since `Task` is sent across all threads for any lifetime, - // regardless of `Fut`, we, to guarantee memory safety, can't actually - // touch `Fut` at any time except when we have a reference to the - // `FuturesUnordered` itself . - // - // Consequently it *should* be the case that we always drop futures from - // the `FuturesUnordered` instance. This is a bomb, just in case there's - // a bug in that logic. - unsafe { - if (*self.future.get()).is_some() { - abort("future still here when dropping"); - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/iter.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/iter.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/iter.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/iter.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,49 +0,0 @@ -use super::assert_stream; -use core::pin::Pin; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; - -/// Stream for the [`iter`] function. -#[derive(Debug, Clone)] -#[must_use = "streams do nothing unless polled"] -pub struct Iter { - iter: I, -} - -impl Unpin for Iter {} - -/// Converts an `Iterator` into a `Stream` which is always ready -/// to yield the next value. -/// -/// Iterators in Rust don't express the ability to block, so this adapter -/// simply always calls `iter.next()` and returns that. -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::stream::{self, StreamExt}; -/// -/// let stream = stream::iter(vec![17, 19]); -/// assert_eq!(vec![17, 19], stream.collect::>().await); -/// # }); -/// ``` -pub fn iter(i: I) -> Iter -where - I: IntoIterator, -{ - assert_stream::(Iter { iter: i.into_iter() }) -} - -impl Stream for Iter -where - I: Iterator, -{ - type Item = I::Item; - - fn poll_next(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(self.iter.next()) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,148 +0,0 @@ -//! Asynchronous streams. -//! -//! This module contains: -//! -//! - The [`Stream`] trait, for objects that can asynchronously produce a -//! sequence of values. -//! - The [`StreamExt`] and [`TryStreamExt`] trait, which provides adapters for -//! chaining and composing streams. -//! - Top-level stream constructors like [`iter`](iter()) which creates a -//! stream from an iterator. - -#[cfg(feature = "alloc")] -pub use futures_core::stream::{BoxStream, LocalBoxStream}; -pub use futures_core::stream::{FusedStream, Stream, TryStream}; - -// Extension traits and combinators - -#[allow(clippy::module_inception)] -mod stream; -pub use self::stream::{ - All, Any, Chain, Collect, Concat, Count, Cycle, Enumerate, Filter, FilterMap, FlatMap, Flatten, - Fold, ForEach, Fuse, Inspect, Map, Next, NextIf, NextIfEq, Peek, PeekMut, Peekable, Scan, - SelectNextSome, Skip, SkipWhile, StreamExt, StreamFuture, Take, TakeUntil, TakeWhile, Then, - Unzip, Zip, -}; - -#[cfg(feature = "std")] -pub use self::stream::CatchUnwind; - -#[cfg(feature = "alloc")] -pub use self::stream::Chunks; - -#[cfg(feature = "alloc")] -pub use self::stream::ReadyChunks; - -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -pub use self::stream::Forward; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use self::stream::{ - BufferUnordered, Buffered, FlatMapUnordered, FlattenUnordered, ForEachConcurrent, -}; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -#[cfg(feature = "alloc")] -pub use self::stream::{ReuniteError, SplitSink, SplitStream}; - -mod try_stream; -pub use self::try_stream::{ - try_unfold, AndThen, ErrInto, InspectErr, InspectOk, IntoStream, MapErr, MapOk, OrElse, - TryCollect, TryConcat, TryFilter, TryFilterMap, TryFlatten, TryFold, TryForEach, TryNext, - TrySkipWhile, TryStreamExt, TryTakeWhile, TryUnfold, -}; - -#[cfg(feature = "io")] -#[cfg_attr(docsrs, doc(cfg(feature = "io")))] -#[cfg(feature = "std")] -pub use self::try_stream::IntoAsyncRead; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use self::try_stream::{ - TryBufferUnordered, TryBuffered, TryFlattenUnordered, TryForEachConcurrent, -}; - -#[cfg(feature = "alloc")] -pub use self::try_stream::{TryChunks, TryChunksError, TryReadyChunks, TryReadyChunksError}; - -// Primitive streams - -mod iter; -pub use self::iter::{iter, Iter}; - -mod repeat; -pub use self::repeat::{repeat, Repeat}; - -mod repeat_with; -pub use self::repeat_with::{repeat_with, RepeatWith}; - -mod empty; -pub use self::empty::{empty, Empty}; - -mod once; -pub use self::once::{once, Once}; - -mod pending; -pub use self::pending::{pending, Pending}; - -mod poll_fn; -pub use self::poll_fn::{poll_fn, PollFn}; - -mod poll_immediate; -pub use self::poll_immediate::{poll_immediate, PollImmediate}; - -mod select; -pub use self::select::{select, Select}; - -mod select_with_strategy; -pub use self::select_with_strategy::{select_with_strategy, PollNext, SelectWithStrategy}; - -mod unfold; -pub use self::unfold::{unfold, Unfold}; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod futures_ordered; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use self::futures_ordered::FuturesOrdered; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub mod futures_unordered; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -#[doc(inline)] -pub use self::futures_unordered::FuturesUnordered; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub mod select_all; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -#[doc(inline)] -pub use self::select_all::{select_all, SelectAll}; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod abortable; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use crate::abortable::{AbortHandle, AbortRegistration, Abortable, Aborted}; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use abortable::abortable; - -// Just a helper function to ensure the streams we're returning all have the -// right implementations. -pub(crate) fn assert_stream(stream: S) -> S -where - S: Stream, -{ - stream -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/once.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/once.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/once.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/once.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,67 +0,0 @@ -use super::assert_stream; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -/// Creates a stream of a single element. -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::stream::{self, StreamExt}; -/// -/// let stream = stream::once(async { 17 }); -/// let collected = stream.collect::>().await; -/// assert_eq!(collected, vec![17]); -/// # }); -/// ``` -pub fn once(future: Fut) -> Once { - assert_stream::(Once::new(future)) -} - -pin_project! { - /// A stream which emits single element and then EOF. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Once { - #[pin] - future: Option - } -} - -impl Once { - pub(crate) fn new(future: Fut) -> Self { - Self { future: Some(future) } - } -} - -impl Stream for Once { - type Item = Fut::Output; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - let v = match this.future.as_mut().as_pin_mut() { - Some(fut) => ready!(fut.poll(cx)), - None => return Poll::Ready(None), - }; - - this.future.set(None); - Poll::Ready(Some(v)) - } - - fn size_hint(&self) -> (usize, Option) { - if self.future.is_some() { - (1, Some(1)) - } else { - (0, Some(0)) - } - } -} - -impl FusedStream for Once { - fn is_terminated(&self) -> bool { - self.future.is_none() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/pending.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/pending.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/pending.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/pending.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,45 +0,0 @@ -use super::assert_stream; -use core::marker; -use core::pin::Pin; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; - -/// Stream for the [`pending()`] function. -#[derive(Debug)] -#[must_use = "streams do nothing unless polled"] -pub struct Pending { - _data: marker::PhantomData, -} - -/// Creates a stream which never returns any elements. -/// -/// The returned stream will always return `Pending` when polled. -pub fn pending() -> Pending { - assert_stream::(Pending { _data: marker::PhantomData }) -} - -impl Unpin for Pending {} - -impl FusedStream for Pending { - fn is_terminated(&self) -> bool { - true - } -} - -impl Stream for Pending { - type Item = T; - - fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Pending - } - - fn size_hint(&self) -> (usize, Option) { - (0, Some(0)) - } -} - -impl Clone for Pending { - fn clone(&self) -> Self { - pending() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/poll_fn.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/poll_fn.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/poll_fn.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/poll_fn.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,57 +0,0 @@ -//! Definition of the `PollFn` combinator - -use super::assert_stream; -use core::fmt; -use core::pin::Pin; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; - -/// Stream for the [`poll_fn`] function. -#[must_use = "streams do nothing unless polled"] -pub struct PollFn { - f: F, -} - -impl Unpin for PollFn {} - -impl fmt::Debug for PollFn { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PollFn").finish() - } -} - -/// Creates a new stream wrapping a function returning `Poll>`. -/// -/// Polling the returned stream calls the wrapped function. -/// -/// # Examples -/// -/// ``` -/// use futures::stream::poll_fn; -/// use futures::task::Poll; -/// -/// let mut counter = 1usize; -/// -/// let read_stream = poll_fn(move |_| -> Poll> { -/// if counter == 0 { return Poll::Ready(None); } -/// counter -= 1; -/// Poll::Ready(Some("Hello, World!".to_owned())) -/// }); -/// ``` -pub fn poll_fn(f: F) -> PollFn -where - F: FnMut(&mut Context<'_>) -> Poll>, -{ - assert_stream::(PollFn { f }) -} - -impl Stream for PollFn -where - F: FnMut(&mut Context<'_>) -> Poll>, -{ - type Item = T; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - (&mut self.f)(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/poll_immediate.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/poll_immediate.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/poll_immediate.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/poll_immediate.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,80 +0,0 @@ -use core::pin::Pin; -use futures_core::task::{Context, Poll}; -use futures_core::Stream; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [poll_immediate](poll_immediate()) function. - /// - /// It will never return [Poll::Pending](core::task::Poll::Pending) - #[derive(Debug, Clone)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct PollImmediate { - #[pin] - stream: Option - } -} - -impl Stream for PollImmediate -where - S: Stream, -{ - type Item = Poll; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - let stream = match this.stream.as_mut().as_pin_mut() { - // inner is gone, so we can continue to signal that the stream is closed. - None => return Poll::Ready(None), - Some(inner) => inner, - }; - - match stream.poll_next(cx) { - Poll::Ready(Some(t)) => Poll::Ready(Some(Poll::Ready(t))), - Poll::Ready(None) => { - this.stream.set(None); - Poll::Ready(None) - } - Poll::Pending => Poll::Ready(Some(Poll::Pending)), - } - } - - fn size_hint(&self) -> (usize, Option) { - self.stream.as_ref().map_or((0, Some(0)), Stream::size_hint) - } -} - -impl super::FusedStream for PollImmediate { - fn is_terminated(&self) -> bool { - self.stream.is_none() - } -} - -/// Creates a new stream that always immediately returns [Poll::Ready](core::task::Poll::Ready) when awaiting it. -/// -/// This is useful when immediacy is more important than waiting for the next item to be ready. -/// -/// # Examples -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::stream::{self, StreamExt}; -/// use futures::task::Poll; -/// -/// let mut r = stream::poll_immediate(Box::pin(stream::iter(1_u32..3))); -/// assert_eq!(r.next().await, Some(Poll::Ready(1))); -/// assert_eq!(r.next().await, Some(Poll::Ready(2))); -/// assert_eq!(r.next().await, None); -/// -/// let mut p = stream::poll_immediate(Box::pin(stream::once(async { -/// futures::pending!(); -/// 42_u8 -/// }))); -/// assert_eq!(p.next().await, Some(Poll::Pending)); -/// assert_eq!(p.next().await, Some(Poll::Ready(42))); -/// assert_eq!(p.next().await, None); -/// # }); -/// ``` -pub fn poll_immediate(s: S) -> PollImmediate { - super::assert_stream::, PollImmediate>(PollImmediate { stream: Some(s) }) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/repeat.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/repeat.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/repeat.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/repeat.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,58 +0,0 @@ -use super::assert_stream; -use core::pin::Pin; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; - -/// Stream for the [`repeat`] function. -#[derive(Debug, Clone)] -#[must_use = "streams do nothing unless polled"] -pub struct Repeat { - item: T, -} - -/// Create a stream which produces the same item repeatedly. -/// -/// The stream never terminates. Note that you likely want to avoid -/// usage of `collect` or such on the returned stream as it will exhaust -/// available memory as it tries to just fill up all RAM. -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::stream::{self, StreamExt}; -/// -/// let stream = stream::repeat(9); -/// assert_eq!(vec![9, 9, 9], stream.take(3).collect::>().await); -/// # }); -/// ``` -pub fn repeat(item: T) -> Repeat -where - T: Clone, -{ - assert_stream::(Repeat { item }) -} - -impl Unpin for Repeat {} - -impl Stream for Repeat -where - T: Clone, -{ - type Item = T; - - fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Some(self.item.clone())) - } - - fn size_hint(&self) -> (usize, Option) { - (usize::max_value(), None) - } -} - -impl FusedStream for Repeat -where - T: Clone, -{ - fn is_terminated(&self) -> bool { - false - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/repeat_with.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/repeat_with.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/repeat_with.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/repeat_with.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,93 +0,0 @@ -use super::assert_stream; -use core::pin::Pin; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; - -/// An stream that repeats elements of type `A` endlessly by -/// applying the provided closure `F: FnMut() -> A`. -/// -/// This `struct` is created by the [`repeat_with()`] function. -/// See its documentation for more. -#[derive(Debug, Clone)] -#[must_use = "streams do nothing unless polled"] -pub struct RepeatWith { - repeater: F, -} - -impl A> Unpin for RepeatWith {} - -impl A> Stream for RepeatWith { - type Item = A; - - fn poll_next(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Some((&mut self.repeater)())) - } - - fn size_hint(&self) -> (usize, Option) { - (usize::max_value(), None) - } -} - -impl A> FusedStream for RepeatWith { - fn is_terminated(&self) -> bool { - false - } -} - -/// Creates a new stream that repeats elements of type `A` endlessly by -/// applying the provided closure, the repeater, `F: FnMut() -> A`. -/// -/// The `repeat_with()` function calls the repeater over and over again. -/// -/// Infinite stream like `repeat_with()` are often used with adapters like -/// [`stream.take()`], in order to make them finite. -/// -/// If the element type of the stream you need implements [`Clone`], and -/// it is OK to keep the source element in memory, you should instead use -/// the [`stream.repeat()`] function. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::stream::{self, StreamExt}; -/// -/// // let's assume we have some value of a type that is not `Clone` -/// // or which don't want to have in memory just yet because it is expensive: -/// #[derive(PartialEq, Debug)] -/// struct Expensive; -/// -/// // a particular value forever: -/// let mut things = stream::repeat_with(|| Expensive); -/// -/// assert_eq!(Some(Expensive), things.next().await); -/// assert_eq!(Some(Expensive), things.next().await); -/// assert_eq!(Some(Expensive), things.next().await); -/// # }); -/// ``` -/// -/// Using mutation and going finite: -/// -/// ```rust -/// # futures::executor::block_on(async { -/// use futures::stream::{self, StreamExt}; -/// -/// // From the zeroth to the third power of two: -/// let mut curr = 1; -/// let mut pow2 = stream::repeat_with(|| { let tmp = curr; curr *= 2; tmp }) -/// .take(4); -/// -/// assert_eq!(Some(1), pow2.next().await); -/// assert_eq!(Some(2), pow2.next().await); -/// assert_eq!(Some(4), pow2.next().await); -/// assert_eq!(Some(8), pow2.next().await); -/// -/// // ... and now we're done -/// assert_eq!(None, pow2.next().await); -/// # }); -/// ``` -pub fn repeat_with A>(repeater: F) -> RepeatWith { - assert_stream::(RepeatWith { repeater }) -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/select_all.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/select_all.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/select_all.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/select_all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,249 +0,0 @@ -//! An unbounded set of streams - -use core::fmt::{self, Debug}; -use core::iter::FromIterator; -use core::pin::Pin; - -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; - -use super::assert_stream; -use crate::stream::{futures_unordered, FuturesUnordered, StreamExt, StreamFuture}; - -/// An unbounded set of streams -/// -/// This "combinator" provides the ability to maintain a set of streams -/// and drive them all to completion. -/// -/// Streams are pushed into this set and their realized values are -/// yielded as they become ready. Streams will only be polled when they -/// generate notifications. This allows to coordinate a large number of streams. -/// -/// Note that you can create a ready-made `SelectAll` via the -/// `select_all` function in the `stream` module, or you can start with an -/// empty set with the `SelectAll::new` constructor. -#[must_use = "streams do nothing unless polled"] -pub struct SelectAll { - inner: FuturesUnordered>, -} - -impl Debug for SelectAll { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "SelectAll {{ ... }}") - } -} - -impl SelectAll { - /// Constructs a new, empty `SelectAll` - /// - /// The returned `SelectAll` does not contain any streams and, in this - /// state, `SelectAll::poll` will return `Poll::Ready(None)`. - pub fn new() -> Self { - Self { inner: FuturesUnordered::new() } - } - - /// Returns the number of streams contained in the set. - /// - /// This represents the total number of in-flight streams. - pub fn len(&self) -> usize { - self.inner.len() - } - - /// Returns `true` if the set contains no streams - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - /// Push a stream into the set. - /// - /// This function submits the given stream to the set for managing. This - /// function will not call `poll` on the submitted stream. The caller must - /// ensure that `SelectAll::poll` is called in order to receive task - /// notifications. - pub fn push(&mut self, stream: St) { - self.inner.push(stream.into_future()); - } - - /// Returns an iterator that allows inspecting each stream in the set. - pub fn iter(&self) -> Iter<'_, St> { - Iter(self.inner.iter()) - } - - /// Returns an iterator that allows modifying each stream in the set. - pub fn iter_mut(&mut self) -> IterMut<'_, St> { - IterMut(self.inner.iter_mut()) - } - - /// Clears the set, removing all streams. - pub fn clear(&mut self) { - self.inner.clear() - } -} - -impl Default for SelectAll { - fn default() -> Self { - Self::new() - } -} - -impl Stream for SelectAll { - type Item = St::Item; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - match ready!(self.inner.poll_next_unpin(cx)) { - Some((Some(item), remaining)) => { - self.push(remaining); - return Poll::Ready(Some(item)); - } - Some((None, _)) => { - // `FuturesUnordered` thinks it isn't terminated - // because it yielded a Some. - // We do not return, but poll `FuturesUnordered` - // in the next loop iteration. - } - None => return Poll::Ready(None), - } - } - } -} - -impl FusedStream for SelectAll { - fn is_terminated(&self) -> bool { - self.inner.is_terminated() - } -} - -/// Convert a list of streams into a `Stream` of results from the streams. -/// -/// This essentially takes a list of streams (e.g. a vector, an iterator, etc.) -/// and bundles them together into a single stream. -/// The stream will yield items as they become available on the underlying -/// streams internally, in the order they become available. -/// -/// Note that the returned set can also be used to dynamically push more -/// streams into the set as they become available. -/// -/// This function is only available when the `std` or `alloc` feature of this -/// library is activated, and it is activated by default. -pub fn select_all(streams: I) -> SelectAll -where - I: IntoIterator, - I::Item: Stream + Unpin, -{ - let mut set = SelectAll::new(); - - for stream in streams { - set.push(stream); - } - - assert_stream::<::Item, _>(set) -} - -impl FromIterator for SelectAll { - fn from_iter>(iter: T) -> Self { - select_all(iter) - } -} - -impl Extend for SelectAll { - fn extend>(&mut self, iter: T) { - for st in iter { - self.push(st) - } - } -} - -impl IntoIterator for SelectAll { - type Item = St; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter(self.inner.into_iter()) - } -} - -impl<'a, St: Stream + Unpin> IntoIterator for &'a SelectAll { - type Item = &'a St; - type IntoIter = Iter<'a, St>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, St: Stream + Unpin> IntoIterator for &'a mut SelectAll { - type Item = &'a mut St; - type IntoIter = IterMut<'a, St>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -/// Immutable iterator over all streams in the unordered set. -#[derive(Debug)] -pub struct Iter<'a, St: Unpin>(futures_unordered::Iter<'a, StreamFuture>); - -/// Mutable iterator over all streams in the unordered set. -#[derive(Debug)] -pub struct IterMut<'a, St: Unpin>(futures_unordered::IterMut<'a, StreamFuture>); - -/// Owned iterator over all streams in the unordered set. -#[derive(Debug)] -pub struct IntoIter(futures_unordered::IntoIter>); - -impl<'a, St: Stream + Unpin> Iterator for Iter<'a, St> { - type Item = &'a St; - - fn next(&mut self) -> Option { - let st = self.0.next()?; - let next = st.get_ref(); - // This should always be true because FuturesUnordered removes completed futures. - debug_assert!(next.is_some()); - next - } - - fn size_hint(&self) -> (usize, Option) { - self.0.size_hint() - } -} - -impl ExactSizeIterator for Iter<'_, St> {} - -impl<'a, St: Stream + Unpin> Iterator for IterMut<'a, St> { - type Item = &'a mut St; - - fn next(&mut self) -> Option { - let st = self.0.next()?; - let next = st.get_mut(); - // This should always be true because FuturesUnordered removes completed futures. - debug_assert!(next.is_some()); - next - } - - fn size_hint(&self) -> (usize, Option) { - self.0.size_hint() - } -} - -impl ExactSizeIterator for IterMut<'_, St> {} - -impl Iterator for IntoIter { - type Item = St; - - fn next(&mut self) -> Option { - let st = self.0.next()?; - let next = st.into_inner(); - // This should always be true because FuturesUnordered removes completed futures. - debug_assert!(next.is_some()); - next - } - - fn size_hint(&self) -> (usize, Option) { - self.0.size_hint() - } -} - -impl ExactSizeIterator for IntoIter {} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/select.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/select.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/select.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/select.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,117 +0,0 @@ -use super::assert_stream; -use crate::stream::{select_with_strategy, PollNext, SelectWithStrategy}; -use core::pin::Pin; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`select()`] function. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Select { - #[pin] - inner: SelectWithStrategy PollNext, PollNext>, - } -} - -/// This function will attempt to pull items from both streams. Each -/// stream will be polled in a round-robin fashion, and whenever a stream is -/// ready to yield an item that item is yielded. -/// -/// After one of the two input streams completes, the remaining one will be -/// polled exclusively. The returned stream completes when both input -/// streams have completed. -/// -/// Note that this function consumes both streams and returns a wrapped -/// version of them. -/// -/// ## Examples -/// -/// ```rust -/// # futures::executor::block_on(async { -/// use futures::stream::{ repeat, select, StreamExt }; -/// -/// let left = repeat(1); -/// let right = repeat(2); -/// -/// let mut out = select(left, right); -/// -/// for _ in 0..100 { -/// // We should be alternating. -/// assert_eq!(1, out.select_next_some().await); -/// assert_eq!(2, out.select_next_some().await); -/// } -/// # }); -/// ``` -pub fn select(stream1: St1, stream2: St2) -> Select -where - St1: Stream, - St2: Stream, -{ - fn round_robin(last: &mut PollNext) -> PollNext { - last.toggle() - } - - assert_stream::(Select { - inner: select_with_strategy(stream1, stream2, round_robin), - }) -} - -impl Select { - /// Acquires a reference to the underlying streams that this combinator is - /// pulling from. - pub fn get_ref(&self) -> (&St1, &St2) { - self.inner.get_ref() - } - - /// Acquires a mutable reference to the underlying streams that this - /// combinator is pulling from. - /// - /// Note that care must be taken to avoid tampering with the state of the - /// stream which may otherwise confuse this combinator. - pub fn get_mut(&mut self) -> (&mut St1, &mut St2) { - self.inner.get_mut() - } - - /// Acquires a pinned mutable reference to the underlying streams that this - /// combinator is pulling from. - /// - /// Note that care must be taken to avoid tampering with the state of the - /// stream which may otherwise confuse this combinator. - pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut St1>, Pin<&mut St2>) { - let this = self.project(); - this.inner.get_pin_mut() - } - - /// Consumes this combinator, returning the underlying streams. - /// - /// Note that this may discard intermediate state of this combinator, so - /// care should be taken to avoid losing resources when this is called. - pub fn into_inner(self) -> (St1, St2) { - self.inner.into_inner() - } -} - -impl FusedStream for Select -where - St1: Stream, - St2: Stream, -{ - fn is_terminated(&self) -> bool { - self.inner.is_terminated() - } -} - -impl Stream for Select -where - St1: Stream, - St2: Stream, -{ - type Item = St1::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - this.inner.poll_next(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/select_with_strategy.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/select_with_strategy.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/select_with_strategy.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/select_with_strategy.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,304 +0,0 @@ -use super::assert_stream; -use core::{fmt, pin::Pin}; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -/// Type to tell [`SelectWithStrategy`] which stream to poll next. -#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)] -pub enum PollNext { - /// Poll the first stream. - Left, - /// Poll the second stream. - Right, -} - -impl PollNext { - /// Toggle the value and return the old one. - pub fn toggle(&mut self) -> Self { - let old = *self; - *self = self.other(); - old - } - - fn other(&self) -> PollNext { - match self { - PollNext::Left => PollNext::Right, - PollNext::Right => PollNext::Left, - } - } -} - -impl Default for PollNext { - fn default() -> Self { - PollNext::Left - } -} - -enum InternalState { - Start, - LeftFinished, - RightFinished, - BothFinished, -} - -impl InternalState { - fn finish(&mut self, ps: PollNext) { - match (&self, ps) { - (InternalState::Start, PollNext::Left) => { - *self = InternalState::LeftFinished; - } - (InternalState::Start, PollNext::Right) => { - *self = InternalState::RightFinished; - } - (InternalState::LeftFinished, PollNext::Right) - | (InternalState::RightFinished, PollNext::Left) => { - *self = InternalState::BothFinished; - } - _ => {} - } - } -} - -pin_project! { - /// Stream for the [`select_with_strategy()`] function. See function docs for details. - #[must_use = "streams do nothing unless polled"] - #[project = SelectWithStrategyProj] - pub struct SelectWithStrategy { - #[pin] - stream1: St1, - #[pin] - stream2: St2, - internal_state: InternalState, - state: State, - clos: Clos, - } -} - -/// This function will attempt to pull items from both streams. You provide a -/// closure to tell [`SelectWithStrategy`] which stream to poll. The closure can -/// store state on `SelectWithStrategy` to which it will receive a `&mut` on every -/// invocation. This allows basing the strategy on prior choices. -/// -/// After one of the two input streams completes, the remaining one will be -/// polled exclusively. The returned stream completes when both input -/// streams have completed. -/// -/// Note that this function consumes both streams and returns a wrapped -/// version of them. -/// -/// ## Examples -/// -/// ### Priority -/// This example shows how to always prioritize the left stream. -/// -/// ```rust -/// # futures::executor::block_on(async { -/// use futures::stream::{ repeat, select_with_strategy, PollNext, StreamExt }; -/// -/// let left = repeat(1); -/// let right = repeat(2); -/// -/// // We don't need any state, so let's make it an empty tuple. -/// // We must provide some type here, as there is no way for the compiler -/// // to infer it. As we don't need to capture variables, we can just -/// // use a function pointer instead of a closure. -/// fn prio_left(_: &mut ()) -> PollNext { PollNext::Left } -/// -/// let mut out = select_with_strategy(left, right, prio_left); -/// -/// for _ in 0..100 { -/// // Whenever we poll out, we will alwas get `1`. -/// assert_eq!(1, out.select_next_some().await); -/// } -/// # }); -/// ``` -/// -/// ### Round Robin -/// This example shows how to select from both streams round robin. -/// Note: this special case is provided by [`futures-util::stream::select`]. -/// -/// ```rust -/// # futures::executor::block_on(async { -/// use futures::stream::{ repeat, select_with_strategy, PollNext, StreamExt }; -/// -/// let left = repeat(1); -/// let right = repeat(2); -/// -/// let rrobin = |last: &mut PollNext| last.toggle(); -/// -/// let mut out = select_with_strategy(left, right, rrobin); -/// -/// for _ in 0..100 { -/// // We should be alternating now. -/// assert_eq!(1, out.select_next_some().await); -/// assert_eq!(2, out.select_next_some().await); -/// } -/// # }); -/// ``` -pub fn select_with_strategy( - stream1: St1, - stream2: St2, - which: Clos, -) -> SelectWithStrategy -where - St1: Stream, - St2: Stream, - Clos: FnMut(&mut State) -> PollNext, - State: Default, -{ - assert_stream::(SelectWithStrategy { - stream1, - stream2, - state: Default::default(), - internal_state: InternalState::Start, - clos: which, - }) -} - -impl SelectWithStrategy { - /// Acquires a reference to the underlying streams that this combinator is - /// pulling from. - pub fn get_ref(&self) -> (&St1, &St2) { - (&self.stream1, &self.stream2) - } - - /// Acquires a mutable reference to the underlying streams that this - /// combinator is pulling from. - /// - /// Note that care must be taken to avoid tampering with the state of the - /// stream which may otherwise confuse this combinator. - pub fn get_mut(&mut self) -> (&mut St1, &mut St2) { - (&mut self.stream1, &mut self.stream2) - } - - /// Acquires a pinned mutable reference to the underlying streams that this - /// combinator is pulling from. - /// - /// Note that care must be taken to avoid tampering with the state of the - /// stream which may otherwise confuse this combinator. - pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut St1>, Pin<&mut St2>) { - let this = self.project(); - (this.stream1, this.stream2) - } - - /// Consumes this combinator, returning the underlying streams. - /// - /// Note that this may discard intermediate state of this combinator, so - /// care should be taken to avoid losing resources when this is called. - pub fn into_inner(self) -> (St1, St2) { - (self.stream1, self.stream2) - } -} - -impl FusedStream for SelectWithStrategy -where - St1: Stream, - St2: Stream, - Clos: FnMut(&mut State) -> PollNext, -{ - fn is_terminated(&self) -> bool { - match self.internal_state { - InternalState::BothFinished => true, - _ => false, - } - } -} - -#[inline] -fn poll_side( - select: &mut SelectWithStrategyProj<'_, St1, St2, Clos, State>, - side: PollNext, - cx: &mut Context<'_>, -) -> Poll> -where - St1: Stream, - St2: Stream, -{ - match side { - PollNext::Left => select.stream1.as_mut().poll_next(cx), - PollNext::Right => select.stream2.as_mut().poll_next(cx), - } -} - -#[inline] -fn poll_inner( - select: &mut SelectWithStrategyProj<'_, St1, St2, Clos, State>, - side: PollNext, - cx: &mut Context<'_>, -) -> Poll> -where - St1: Stream, - St2: Stream, -{ - let first_done = match poll_side(select, side, cx) { - Poll::Ready(Some(item)) => return Poll::Ready(Some(item)), - Poll::Ready(None) => { - select.internal_state.finish(side); - true - } - Poll::Pending => false, - }; - let other = side.other(); - match poll_side(select, other, cx) { - Poll::Ready(None) => { - select.internal_state.finish(other); - if first_done { - Poll::Ready(None) - } else { - Poll::Pending - } - } - a => a, - } -} - -impl Stream for SelectWithStrategy -where - St1: Stream, - St2: Stream, - Clos: FnMut(&mut State) -> PollNext, -{ - type Item = St1::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - match this.internal_state { - InternalState::Start => { - let next_side = (this.clos)(this.state); - poll_inner(&mut this, next_side, cx) - } - InternalState::LeftFinished => match this.stream2.poll_next(cx) { - Poll::Ready(None) => { - *this.internal_state = InternalState::BothFinished; - Poll::Ready(None) - } - a => a, - }, - InternalState::RightFinished => match this.stream1.poll_next(cx) { - Poll::Ready(None) => { - *this.internal_state = InternalState::BothFinished; - Poll::Ready(None) - } - a => a, - }, - InternalState::BothFinished => Poll::Ready(None), - } - } -} - -impl fmt::Debug for SelectWithStrategy -where - St1: fmt::Debug, - St2: fmt::Debug, - State: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SelectWithStrategy") - .field("stream1", &self.stream1) - .field("stream2", &self.stream2) - .field("state", &self.state) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/all.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/all.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/all.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,93 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`all`](super::StreamExt::all) method. - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct All { - #[pin] - stream: St, - f: F, - done: bool, - #[pin] - future: Option, - } -} - -impl fmt::Debug for All -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("All") - .field("stream", &self.stream) - .field("done", &self.done) - .field("future", &self.future) - .finish() - } -} - -impl All -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, done: false, future: None } - } -} - -impl FusedFuture for All -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.done && self.future.is_none() - } -} - -impl Future for All -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - type Output = bool; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - Poll::Ready(loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - // we're currently processing a future to produce a new value - let res = ready!(fut.poll(cx)); - this.future.set(None); - if !res { - *this.done = true; - break false; - } // early exit - } else if !*this.done { - // we're waiting on a new item from the stream - match ready!(this.stream.as_mut().poll_next(cx)) { - Some(item) => { - this.future.set(Some((this.f)(item))); - } - None => { - *this.done = true; - break true; - } - } - } else { - panic!("All polled after completion") - } - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/any.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/any.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/any.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/any.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,93 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`any`](super::StreamExt::any) method. - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Any { - #[pin] - stream: St, - f: F, - done: bool, - #[pin] - future: Option, - } -} - -impl fmt::Debug for Any -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Any") - .field("stream", &self.stream) - .field("done", &self.done) - .field("future", &self.future) - .finish() - } -} - -impl Any -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, done: false, future: None } - } -} - -impl FusedFuture for Any -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.done && self.future.is_none() - } -} - -impl Future for Any -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - type Output = bool; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - Poll::Ready(loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - // we're currently processing a future to produce a new value - let res = ready!(fut.poll(cx)); - this.future.set(None); - if res { - *this.done = true; - break true; - } // early exit - } else if !*this.done { - // we're waiting on a new item from the stream - match ready!(this.stream.as_mut().poll_next(cx)) { - Some(item) => { - this.future.set(Some((this.f)(item))); - } - None => { - *this.done = true; - break false; - } - } - } else { - panic!("Any polled after completion") - } - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/buffered.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/buffered.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/buffered.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/buffered.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,118 +0,0 @@ -use crate::stream::{Fuse, FusedStream, FuturesOrdered, StreamExt}; -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`buffered`](super::StreamExt::buffered) method. - #[must_use = "streams do nothing unless polled"] - pub struct Buffered - where - St: Stream, - St::Item: Future, - { - #[pin] - stream: Fuse, - in_progress_queue: FuturesOrdered, - max: usize, - } -} - -impl fmt::Debug for Buffered -where - St: Stream + fmt::Debug, - St::Item: Future, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Buffered") - .field("stream", &self.stream) - .field("in_progress_queue", &self.in_progress_queue) - .field("max", &self.max) - .finish() - } -} - -impl Buffered -where - St: Stream, - St::Item: Future, -{ - pub(super) fn new(stream: St, n: usize) -> Self { - Self { stream: super::Fuse::new(stream), in_progress_queue: FuturesOrdered::new(), max: n } - } - - delegate_access_inner!(stream, St, (.)); -} - -impl Stream for Buffered -where - St: Stream, - St::Item: Future, -{ - type Item = ::Output; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - // First up, try to spawn off as many futures as possible by filling up - // our queue of futures. - while this.in_progress_queue.len() < *this.max { - match this.stream.as_mut().poll_next(cx) { - Poll::Ready(Some(fut)) => this.in_progress_queue.push_back(fut), - Poll::Ready(None) | Poll::Pending => break, - } - } - - // Attempt to pull the next value from the in_progress_queue - let res = this.in_progress_queue.poll_next_unpin(cx); - if let Some(val) = ready!(res) { - return Poll::Ready(Some(val)); - } - - // If more values are still coming from the stream, we're not done yet - if this.stream.is_done() { - Poll::Ready(None) - } else { - Poll::Pending - } - } - - fn size_hint(&self) -> (usize, Option) { - let queue_len = self.in_progress_queue.len(); - let (lower, upper) = self.stream.size_hint(); - let lower = lower.saturating_add(queue_len); - let upper = match upper { - Some(x) => x.checked_add(queue_len), - None => None, - }; - (lower, upper) - } -} - -impl FusedStream for Buffered -where - St: Stream, - St::Item: Future, -{ - fn is_terminated(&self) -> bool { - self.stream.is_done() && self.in_progress_queue.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for Buffered -where - S: Stream + Sink, - S::Item: Future, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/buffer_unordered.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/buffer_unordered.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/buffer_unordered.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/buffer_unordered.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,120 +0,0 @@ -use crate::stream::{Fuse, FuturesUnordered, StreamExt}; -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`buffer_unordered`](super::StreamExt::buffer_unordered) - /// method. - #[must_use = "streams do nothing unless polled"] - pub struct BufferUnordered - where - St: Stream, - { - #[pin] - stream: Fuse, - in_progress_queue: FuturesUnordered, - max: usize, - } -} - -impl fmt::Debug for BufferUnordered -where - St: Stream + fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BufferUnordered") - .field("stream", &self.stream) - .field("in_progress_queue", &self.in_progress_queue) - .field("max", &self.max) - .finish() - } -} - -impl BufferUnordered -where - St: Stream, - St::Item: Future, -{ - pub(super) fn new(stream: St, n: usize) -> Self { - Self { - stream: super::Fuse::new(stream), - in_progress_queue: FuturesUnordered::new(), - max: n, - } - } - - delegate_access_inner!(stream, St, (.)); -} - -impl Stream for BufferUnordered -where - St: Stream, - St::Item: Future, -{ - type Item = ::Output; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - // First up, try to spawn off as many futures as possible by filling up - // our queue of futures. - while this.in_progress_queue.len() < *this.max { - match this.stream.as_mut().poll_next(cx) { - Poll::Ready(Some(fut)) => this.in_progress_queue.push(fut), - Poll::Ready(None) | Poll::Pending => break, - } - } - - // Attempt to pull the next value from the in_progress_queue - match this.in_progress_queue.poll_next_unpin(cx) { - x @ Poll::Pending | x @ Poll::Ready(Some(_)) => return x, - Poll::Ready(None) => {} - } - - // If more values are still coming from the stream, we're not done yet - if this.stream.is_done() { - Poll::Ready(None) - } else { - Poll::Pending - } - } - - fn size_hint(&self) -> (usize, Option) { - let queue_len = self.in_progress_queue.len(); - let (lower, upper) = self.stream.size_hint(); - let lower = lower.saturating_add(queue_len); - let upper = match upper { - Some(x) => x.checked_add(queue_len), - None => None, - }; - (lower, upper) - } -} - -impl FusedStream for BufferUnordered -where - St: Stream, - St::Item: Future, -{ - fn is_terminated(&self) -> bool { - self.in_progress_queue.is_terminated() && self.stream.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for BufferUnordered -where - S: Stream + Sink, - S::Item: Future, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/catch_unwind.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/catch_unwind.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/catch_unwind.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/catch_unwind.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,61 +0,0 @@ -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; -use std::any::Any; -use std::panic::{catch_unwind, AssertUnwindSafe, UnwindSafe}; -use std::pin::Pin; - -pin_project! { - /// Stream for the [`catch_unwind`](super::StreamExt::catch_unwind) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct CatchUnwind { - #[pin] - stream: St, - caught_unwind: bool, - } -} - -impl CatchUnwind { - pub(super) fn new(stream: St) -> Self { - Self { stream, caught_unwind: false } - } - - delegate_access_inner!(stream, St, ()); -} - -impl Stream for CatchUnwind { - type Item = Result>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - if *this.caught_unwind { - Poll::Ready(None) - } else { - let res = catch_unwind(AssertUnwindSafe(|| this.stream.as_mut().poll_next(cx))); - - match res { - Ok(poll) => poll.map(|opt| opt.map(Ok)), - Err(e) => { - *this.caught_unwind = true; - Poll::Ready(Some(Err(e))) - } - } - } - } - - fn size_hint(&self) -> (usize, Option) { - if self.caught_unwind { - (0, Some(0)) - } else { - self.stream.size_hint() - } - } -} - -impl FusedStream for CatchUnwind { - fn is_terminated(&self) -> bool { - self.caught_unwind || self.stream.is_terminated() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/chain.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/chain.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/chain.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/chain.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`chain`](super::StreamExt::chain) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Chain { - #[pin] - first: Option, - #[pin] - second: St2, - } -} - -// All interactions with `Pin<&mut Chain<..>>` happen through these methods -impl Chain -where - St1: Stream, - St2: Stream, -{ - pub(super) fn new(stream1: St1, stream2: St2) -> Self { - Self { first: Some(stream1), second: stream2 } - } -} - -impl FusedStream for Chain -where - St1: Stream, - St2: FusedStream, -{ - fn is_terminated(&self) -> bool { - self.first.is_none() && self.second.is_terminated() - } -} - -impl Stream for Chain -where - St1: Stream, - St2: Stream, -{ - type Item = St1::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - if let Some(first) = this.first.as_mut().as_pin_mut() { - if let Some(item) = ready!(first.poll_next(cx)) { - return Poll::Ready(Some(item)); - } - - this.first.set(None); - } - this.second.poll_next(cx) - } - - fn size_hint(&self) -> (usize, Option) { - if let Some(first) = &self.first { - let (first_lower, first_upper) = first.size_hint(); - let (second_lower, second_upper) = self.second.size_hint(); - - let lower = first_lower.saturating_add(second_lower); - - let upper = match (first_upper, second_upper) { - (Some(x), Some(y)) => x.checked_add(y), - _ => None, - }; - - (lower, upper) - } else { - self.second.size_hint() - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/chunks.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/chunks.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/chunks.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/chunks.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,103 +0,0 @@ -use crate::stream::Fuse; -use alloc::vec::Vec; -use core::mem; -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`chunks`](super::StreamExt::chunks) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Chunks { - #[pin] - stream: Fuse, - items: Vec, - cap: usize, // https://github.com/rust-lang/futures-rs/issues/1475 - } -} - -impl Chunks { - pub(super) fn new(stream: St, capacity: usize) -> Self { - assert!(capacity > 0); - - Self { - stream: super::Fuse::new(stream), - items: Vec::with_capacity(capacity), - cap: capacity, - } - } - - fn take(self: Pin<&mut Self>) -> Vec { - let cap = self.cap; - mem::replace(self.project().items, Vec::with_capacity(cap)) - } - - delegate_access_inner!(stream, St, (.)); -} - -impl Stream for Chunks { - type Item = Vec; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.as_mut().project(); - loop { - match ready!(this.stream.as_mut().poll_next(cx)) { - // Push the item into the buffer and check whether it is full. - // If so, replace our buffer with a new and empty one and return - // the full one. - Some(item) => { - this.items.push(item); - if this.items.len() >= *this.cap { - return Poll::Ready(Some(self.take())); - } - } - - // Since the underlying stream ran out of values, return what we - // have buffered, if we have anything. - None => { - let last = if this.items.is_empty() { - None - } else { - let full_buf = mem::take(this.items); - Some(full_buf) - }; - - return Poll::Ready(last); - } - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let chunk_len = usize::from(!self.items.is_empty()); - let (lower, upper) = self.stream.size_hint(); - let lower = (lower / self.cap).saturating_add(chunk_len); - let upper = match upper { - Some(x) => x.checked_add(chunk_len), - None => None, - }; - (lower, upper) - } -} - -impl FusedStream for Chunks { - fn is_terminated(&self) -> bool { - self.stream.is_terminated() && self.items.is_empty() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for Chunks -where - S: Stream + Sink, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/collect.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/collect.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/collect.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/collect.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -use core::mem; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`collect`](super::StreamExt::collect) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Collect { - #[pin] - stream: St, - collection: C, - } -} - -impl Collect { - fn finish(self: Pin<&mut Self>) -> C { - mem::take(self.project().collection) - } - - pub(super) fn new(stream: St) -> Self { - Self { stream, collection: Default::default() } - } -} - -impl FusedFuture for Collect -where - St: FusedStream, - C: Default + Extend, -{ - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -impl Future for Collect -where - St: Stream, - C: Default + Extend, -{ - type Output = C; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.as_mut().project(); - loop { - match ready!(this.stream.as_mut().poll_next(cx)) { - Some(e) => this.collection.extend(Some(e)), - None => return Poll::Ready(self.finish()), - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/concat.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/concat.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/concat.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/concat.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,62 +0,0 @@ -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`concat`](super::StreamExt::concat) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Concat { - #[pin] - stream: St, - accum: Option, - } -} - -impl Concat -where - St: Stream, - St::Item: Extend<::Item> + IntoIterator + Default, -{ - pub(super) fn new(stream: St) -> Self { - Self { stream, accum: None } - } -} - -impl Future for Concat -where - St: Stream, - St::Item: Extend<::Item> + IntoIterator + Default, -{ - type Output = St::Item; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - - loop { - match ready!(this.stream.as_mut().poll_next(cx)) { - None => return Poll::Ready(this.accum.take().unwrap_or_default()), - Some(e) => { - if let Some(a) = this.accum { - a.extend(e) - } else { - *this.accum = Some(e) - } - } - } - } - } -} - -impl FusedFuture for Concat -where - St: FusedStream, - St::Item: Extend<::Item> + IntoIterator + Default, -{ - fn is_terminated(&self) -> bool { - self.accum.is_none() && self.stream.is_terminated() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/count.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/count.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/count.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/count.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,53 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`count`](super::StreamExt::count) method. - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Count { - #[pin] - stream: St, - count: usize - } -} - -impl fmt::Debug for Count -where - St: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Count").field("stream", &self.stream).field("count", &self.count).finish() - } -} - -impl Count { - pub(super) fn new(stream: St) -> Self { - Self { stream, count: 0 } - } -} - -impl FusedFuture for Count { - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -impl Future for Count { - type Output = usize; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - - Poll::Ready(loop { - match ready!(this.stream.as_mut().poll_next(cx)) { - Some(_) => *this.count += 1, - None => break *this.count, - } - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/cycle.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/cycle.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/cycle.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/cycle.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,68 +0,0 @@ -use core::pin::Pin; -use core::usize; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`cycle`](super::StreamExt::cycle) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Cycle { - orig: St, - #[pin] - stream: St, - } -} - -impl Cycle -where - St: Clone + Stream, -{ - pub(super) fn new(stream: St) -> Self { - Self { orig: stream.clone(), stream } - } -} - -impl Stream for Cycle -where - St: Clone + Stream, -{ - type Item = St::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - match ready!(this.stream.as_mut().poll_next(cx)) { - None => { - this.stream.set(this.orig.clone()); - this.stream.poll_next(cx) - } - item => Poll::Ready(item), - } - } - - fn size_hint(&self) -> (usize, Option) { - // the cycle stream is either empty or infinite - match self.orig.size_hint() { - size @ (0, Some(0)) => size, - (0, _) => (0, None), - _ => (usize::max_value(), None), - } - } -} - -impl FusedStream for Cycle -where - St: Clone + Stream, -{ - fn is_terminated(&self) -> bool { - // the cycle stream is either empty or infinite - if let (0, Some(0)) = self.size_hint() { - true - } else { - false - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/enumerate.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/enumerate.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/enumerate.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/enumerate.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,64 +0,0 @@ -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`enumerate`](super::StreamExt::enumerate) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Enumerate { - #[pin] - stream: St, - count: usize, - } -} - -impl Enumerate { - pub(super) fn new(stream: St) -> Self { - Self { stream, count: 0 } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for Enumerate { - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -impl Stream for Enumerate { - type Item = (usize, St::Item); - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - - match ready!(this.stream.poll_next(cx)) { - Some(item) => { - let prev_count = *this.count; - *this.count += 1; - Poll::Ready(Some((prev_count, item))) - } - None => Poll::Ready(None), - } - } - - fn size_hint(&self) -> (usize, Option) { - self.stream.size_hint() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for Enumerate -where - S: Stream + Sink, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/filter_map.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/filter_map.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/filter_map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/filter_map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,111 +0,0 @@ -use crate::fns::FnMut1; -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`filter_map`](super::StreamExt::filter_map) method. - #[must_use = "streams do nothing unless polled"] - pub struct FilterMap { - #[pin] - stream: St, - f: F, - #[pin] - pending: Option, - } -} - -impl fmt::Debug for FilterMap -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FilterMap") - .field("stream", &self.stream) - .field("pending", &self.pending) - .finish() - } -} - -impl FilterMap -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, pending: None } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for FilterMap -where - St: Stream + FusedStream, - F: FnMut1, - Fut: Future>, -{ - fn is_terminated(&self) -> bool { - self.pending.is_none() && self.stream.is_terminated() - } -} - -impl Stream for FilterMap -where - St: Stream, - F: FnMut1, - Fut: Future>, -{ - type Item = T; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - Poll::Ready(loop { - if let Some(p) = this.pending.as_mut().as_pin_mut() { - // We have an item in progress, poll that until it's done - let item = ready!(p.poll(cx)); - this.pending.set(None); - if item.is_some() { - break item; - } - } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) { - // No item in progress, but the stream is still going - this.pending.set(Some(this.f.call_mut(item))); - } else { - // The stream is done - break None; - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - let pending_len = usize::from(self.pending.is_some()); - let (_, upper) = self.stream.size_hint(); - let upper = match upper { - Some(x) => x.checked_add(pending_len), - None => None, - }; - (0, upper) // can't know a lower bound, due to the predicate - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for FilterMap -where - S: Stream + Sink, - F: FnMut1, - Fut: Future, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/filter.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/filter.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/filter.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/filter.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,117 +0,0 @@ -use crate::fns::FnMut1; -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`filter`](super::StreamExt::filter) method. - #[must_use = "streams do nothing unless polled"] - pub struct Filter - where St: Stream, - { - #[pin] - stream: St, - f: F, - #[pin] - pending_fut: Option, - pending_item: Option, - } -} - -impl fmt::Debug for Filter -where - St: Stream + fmt::Debug, - St::Item: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Filter") - .field("stream", &self.stream) - .field("pending_fut", &self.pending_fut) - .field("pending_item", &self.pending_item) - .finish() - } -} - -#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 -impl Filter -where - St: Stream, - F: for<'a> FnMut1<&'a St::Item, Output = Fut>, - Fut: Future, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, pending_fut: None, pending_item: None } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for Filter -where - St: Stream + FusedStream, - F: FnMut(&St::Item) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.pending_fut.is_none() && self.stream.is_terminated() - } -} - -#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 -impl Stream for Filter -where - St: Stream, - F: for<'a> FnMut1<&'a St::Item, Output = Fut>, - Fut: Future, -{ - type Item = St::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - Poll::Ready(loop { - if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() { - let res = ready!(fut.poll(cx)); - this.pending_fut.set(None); - if res { - break this.pending_item.take(); - } - *this.pending_item = None; - } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) { - this.pending_fut.set(Some(this.f.call_mut(&item))); - *this.pending_item = Some(item); - } else { - break None; - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - let pending_len = usize::from(self.pending_item.is_some()); - let (_, upper) = self.stream.size_hint(); - let upper = match upper { - Some(x) => x.checked_add(pending_len), - None => None, - }; - (0, upper) // can't know a lower bound, due to the predicate - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for Filter -where - S: Stream + Sink, - F: FnMut(&S::Item) -> Fut, - Fut: Future, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/flatten.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/flatten.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/flatten.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/flatten.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,73 +0,0 @@ -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`flatten`](super::StreamExt::flatten) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Flatten { - #[pin] - stream: St, - #[pin] - next: Option, - } -} - -impl Flatten { - pub(super) fn new(stream: St) -> Self { - Self { stream, next: None } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for Flatten -where - St: FusedStream, - St::Item: Stream, -{ - fn is_terminated(&self) -> bool { - self.next.is_none() && self.stream.is_terminated() - } -} - -impl Stream for Flatten -where - St: Stream, - St::Item: Stream, -{ - type Item = ::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - Poll::Ready(loop { - if let Some(s) = this.next.as_mut().as_pin_mut() { - if let Some(item) = ready!(s.poll_next(cx)) { - break Some(item); - } else { - this.next.set(None); - } - } else if let Some(s) = ready!(this.stream.as_mut().poll_next(cx)) { - this.next.set(Some(s)); - } else { - break None; - } - }) - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for Flatten -where - S: Stream + Sink, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/flatten_unordered.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/flatten_unordered.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/flatten_unordered.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/flatten_unordered.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,536 +0,0 @@ -use alloc::sync::Arc; -use core::{ - cell::UnsafeCell, - convert::identity, - fmt, - marker::PhantomData, - num::NonZeroUsize, - pin::Pin, - sync::atomic::{AtomicU8, Ordering}, -}; - -use pin_project_lite::pin_project; - -use futures_core::{ - future::Future, - ready, - stream::{FusedStream, Stream}, - task::{Context, Poll, Waker}, -}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use futures_task::{waker, ArcWake}; - -use crate::stream::FuturesUnordered; - -/// Stream for the [`flatten_unordered`](super::StreamExt::flatten_unordered) -/// method. -pub type FlattenUnordered = FlattenUnorderedWithFlowController; - -/// There is nothing to poll and stream isn't being polled/waking/woken at the moment. -const NONE: u8 = 0; - -/// Inner streams need to be polled. -const NEED_TO_POLL_INNER_STREAMS: u8 = 1; - -/// The base stream needs to be polled. -const NEED_TO_POLL_STREAM: u8 = 0b10; - -/// Both base stream and inner streams need to be polled. -const NEED_TO_POLL_ALL: u8 = NEED_TO_POLL_INNER_STREAMS | NEED_TO_POLL_STREAM; - -/// The current stream is being polled at the moment. -const POLLING: u8 = 0b100; - -/// Stream is being woken at the moment. -const WAKING: u8 = 0b1000; - -/// The stream was waked and will be polled. -const WOKEN: u8 = 0b10000; - -/// Internal polling state of the stream. -#[derive(Clone, Debug)] -struct SharedPollState { - state: Arc, -} - -impl SharedPollState { - /// Constructs new `SharedPollState` with the given state. - fn new(value: u8) -> SharedPollState { - SharedPollState { state: Arc::new(AtomicU8::new(value)) } - } - - /// Attempts to start polling, returning stored state in case of success. - /// Returns `None` if either waker is waking at the moment. - fn start_polling( - &self, - ) -> Option<(u8, PollStateBomb<'_, impl FnOnce(&SharedPollState) -> u8>)> { - let value = self - .state - .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |value| { - if value & WAKING == NONE { - Some(POLLING) - } else { - None - } - }) - .ok()?; - let bomb = PollStateBomb::new(self, SharedPollState::reset); - - Some((value, bomb)) - } - - /// Attempts to start the waking process and performs bitwise or with the given value. - /// - /// If some waker is already in progress or stream is already woken/being polled, waking process won't start, however - /// state will be disjuncted with the given value. - fn start_waking( - &self, - to_poll: u8, - ) -> Option<(u8, PollStateBomb<'_, impl FnOnce(&SharedPollState) -> u8>)> { - let value = self - .state - .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |value| { - let mut next_value = value | to_poll; - if value & (WOKEN | POLLING) == NONE { - next_value |= WAKING; - } - - if next_value != value { - Some(next_value) - } else { - None - } - }) - .ok()?; - - // Only start the waking process if we're not in the polling/waking phase and the stream isn't woken already - if value & (WOKEN | POLLING | WAKING) == NONE { - let bomb = PollStateBomb::new(self, SharedPollState::stop_waking); - - Some((value, bomb)) - } else { - None - } - } - - /// Sets current state to - /// - `!POLLING` allowing to use wakers - /// - `WOKEN` if the state was changed during `POLLING` phase as waker will be called, - /// or `will_be_woken` flag supplied - /// - `!WAKING` as - /// * Wakers called during the `POLLING` phase won't propagate their calls - /// * `POLLING` phase can't start if some of the wakers are active - /// So no wrapped waker can touch the inner waker's cell, it's safe to poll again. - fn stop_polling(&self, to_poll: u8, will_be_woken: bool) -> u8 { - self.state - .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |mut value| { - let mut next_value = to_poll; - - value &= NEED_TO_POLL_ALL; - if value != NONE || will_be_woken { - next_value |= WOKEN; - } - next_value |= value; - - Some(next_value & !POLLING & !WAKING) - }) - .unwrap() - } - - /// Toggles state to non-waking, allowing to start polling. - fn stop_waking(&self) -> u8 { - let value = self - .state - .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |value| { - let next_value = value & !WAKING | WOKEN; - - if next_value != value { - Some(next_value) - } else { - None - } - }) - .unwrap_or_else(identity); - - debug_assert!(value & (WOKEN | POLLING | WAKING) == WAKING); - value - } - - /// Resets current state allowing to poll the stream and wake up wakers. - fn reset(&self) -> u8 { - self.state.swap(NEED_TO_POLL_ALL, Ordering::SeqCst) - } -} - -/// Used to execute some function on the given state when dropped. -struct PollStateBomb<'a, F: FnOnce(&SharedPollState) -> u8> { - state: &'a SharedPollState, - drop: Option, -} - -impl<'a, F: FnOnce(&SharedPollState) -> u8> PollStateBomb<'a, F> { - /// Constructs new bomb with the given state. - fn new(state: &'a SharedPollState, drop: F) -> Self { - Self { state, drop: Some(drop) } - } - - /// Deactivates bomb, forces it to not call provided function when dropped. - fn deactivate(mut self) { - self.drop.take(); - } -} - -impl u8> Drop for PollStateBomb<'_, F> { - fn drop(&mut self) { - if let Some(drop) = self.drop.take() { - (drop)(self.state); - } - } -} - -/// Will update state with the provided value on `wake_by_ref` call -/// and then, if there is a need, call `inner_waker`. -struct WrappedWaker { - inner_waker: UnsafeCell>, - poll_state: SharedPollState, - need_to_poll: u8, -} - -unsafe impl Send for WrappedWaker {} -unsafe impl Sync for WrappedWaker {} - -impl WrappedWaker { - /// Replaces given waker's inner_waker for polling stream/futures which will - /// update poll state on `wake_by_ref` call. Use only if you need several - /// contexts. - /// - /// ## Safety - /// - /// This function will modify waker's `inner_waker` via `UnsafeCell`, so - /// it should be used only during `POLLING` phase by one thread at the time. - unsafe fn replace_waker(self_arc: &mut Arc, cx: &Context<'_>) { - *self_arc.inner_waker.get() = cx.waker().clone().into(); - } - - /// Attempts to start the waking process for the waker with the given value. - /// If succeeded, then the stream isn't yet woken and not being polled at the moment. - fn start_waking(&self) -> Option<(u8, PollStateBomb<'_, impl FnOnce(&SharedPollState) -> u8>)> { - self.poll_state.start_waking(self.need_to_poll) - } -} - -impl ArcWake for WrappedWaker { - fn wake_by_ref(self_arc: &Arc) { - if let Some((_, state_bomb)) = self_arc.start_waking() { - // Safety: now state is not `POLLING` - let waker_opt = unsafe { self_arc.inner_waker.get().as_ref().unwrap() }; - - if let Some(inner_waker) = waker_opt.clone() { - // Stop waking to allow polling stream - drop(state_bomb); - - // Wake up inner waker - inner_waker.wake(); - } - } - } -} - -pin_project! { - /// Future which polls optional inner stream. - /// - /// If it's `Some`, it will attempt to call `poll_next` on it, - /// returning `Some((item, next_item_fut))` in case of `Poll::Ready(Some(...))` - /// or `None` in case of `Poll::Ready(None)`. - /// - /// If `poll_next` will return `Poll::Pending`, it will be forwarded to - /// the future and current task will be notified by waker. - #[must_use = "futures do nothing unless you `.await` or poll them"] - struct PollStreamFut { - #[pin] - stream: Option, - } -} - -impl PollStreamFut { - /// Constructs new `PollStreamFut` using given `stream`. - fn new(stream: impl Into>) -> Self { - Self { stream: stream.into() } - } -} - -impl Future for PollStreamFut { - type Output = Option<(St::Item, PollStreamFut)>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut stream = self.project().stream; - - let item = if let Some(stream) = stream.as_mut().as_pin_mut() { - ready!(stream.poll_next(cx)) - } else { - None - }; - let next_item_fut = PollStreamFut::new(stream.get_mut().take()); - let out = item.map(|item| (item, next_item_fut)); - - Poll::Ready(out) - } -} - -pin_project! { - /// Stream for the [`flatten_unordered`](super::StreamExt::flatten_unordered) - /// method with ability to specify flow controller. - #[project = FlattenUnorderedWithFlowControllerProj] - #[must_use = "streams do nothing unless polled"] - pub struct FlattenUnorderedWithFlowController where St: Stream { - #[pin] - inner_streams: FuturesUnordered>, - #[pin] - stream: St, - poll_state: SharedPollState, - limit: Option, - is_stream_done: bool, - inner_streams_waker: Arc, - stream_waker: Arc, - flow_controller: PhantomData - } -} - -impl fmt::Debug for FlattenUnorderedWithFlowController -where - St: Stream + fmt::Debug, - St::Item: Stream + fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FlattenUnorderedWithFlowController") - .field("poll_state", &self.poll_state) - .field("inner_streams", &self.inner_streams) - .field("limit", &self.limit) - .field("stream", &self.stream) - .field("is_stream_done", &self.is_stream_done) - .field("flow_controller", &self.flow_controller) - .finish() - } -} - -impl FlattenUnorderedWithFlowController -where - St: Stream, - Fc: FlowController::Item>, - St::Item: Stream + Unpin, -{ - pub(crate) fn new( - stream: St, - limit: Option, - ) -> FlattenUnorderedWithFlowController { - let poll_state = SharedPollState::new(NEED_TO_POLL_STREAM); - - FlattenUnorderedWithFlowController { - inner_streams: FuturesUnordered::new(), - stream, - is_stream_done: false, - limit: limit.and_then(NonZeroUsize::new), - inner_streams_waker: Arc::new(WrappedWaker { - inner_waker: UnsafeCell::new(None), - poll_state: poll_state.clone(), - need_to_poll: NEED_TO_POLL_INNER_STREAMS, - }), - stream_waker: Arc::new(WrappedWaker { - inner_waker: UnsafeCell::new(None), - poll_state: poll_state.clone(), - need_to_poll: NEED_TO_POLL_STREAM, - }), - poll_state, - flow_controller: PhantomData, - } - } - - delegate_access_inner!(stream, St, ()); -} - -/// Returns the next flow step based on the received item. -pub trait FlowController { - /// Handles an item producing `FlowStep` describing the next flow step. - fn next_step(item: I) -> FlowStep; -} - -impl FlowController for () { - fn next_step(item: I) -> FlowStep { - FlowStep::Continue(item) - } -} - -/// Describes the next flow step. -#[derive(Debug, Clone)] -pub enum FlowStep { - /// Just yields an item and continues standard flow. - Continue(C), - /// Immediately returns an underlying item from the function. - Return(R), -} - -impl FlattenUnorderedWithFlowControllerProj<'_, St, Fc> -where - St: Stream, -{ - /// Checks if current `inner_streams` bucket size is greater than optional limit. - fn is_exceeded_limit(&self) -> bool { - self.limit.map_or(false, |limit| self.inner_streams.len() >= limit.get()) - } -} - -impl FusedStream for FlattenUnorderedWithFlowController -where - St: FusedStream, - Fc: FlowController::Item>, - St::Item: Stream + Unpin, -{ - fn is_terminated(&self) -> bool { - self.stream.is_terminated() && self.inner_streams.is_empty() - } -} - -impl Stream for FlattenUnorderedWithFlowController -where - St: Stream, - Fc: FlowController::Item>, - St::Item: Stream + Unpin, -{ - type Item = ::Item; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut next_item = None; - let mut need_to_poll_next = NONE; - - let mut this = self.as_mut().project(); - - // Attempt to start polling, in case some waker is holding the lock, wait in loop - let (mut poll_state_value, state_bomb) = loop { - if let Some(value) = this.poll_state.start_polling() { - break value; - } - }; - - // Safety: now state is `POLLING`. - unsafe { - WrappedWaker::replace_waker(this.stream_waker, cx); - WrappedWaker::replace_waker(this.inner_streams_waker, cx) - }; - - if poll_state_value & NEED_TO_POLL_STREAM != NONE { - let mut stream_waker = None; - - // Here we need to poll the base stream. - // - // To improve performance, we will attempt to place as many items as we can - // to the `FuturesUnordered` bucket before polling inner streams - loop { - if this.is_exceeded_limit() || *this.is_stream_done { - // We either exceeded the limit or the stream is exhausted - if !*this.is_stream_done { - // The stream needs to be polled in the next iteration - need_to_poll_next |= NEED_TO_POLL_STREAM; - } - - break; - } else { - let mut cx = Context::from_waker( - stream_waker.get_or_insert_with(|| waker(this.stream_waker.clone())), - ); - - match this.stream.as_mut().poll_next(&mut cx) { - Poll::Ready(Some(item)) => { - let next_item_fut = match Fc::next_step(item) { - // Propagates an item immediately (the main use-case is for errors) - FlowStep::Return(item) => { - need_to_poll_next |= NEED_TO_POLL_STREAM - | (poll_state_value & NEED_TO_POLL_INNER_STREAMS); - poll_state_value &= !NEED_TO_POLL_INNER_STREAMS; - - next_item = Some(item); - - break; - } - // Yields an item and continues processing (normal case) - FlowStep::Continue(inner_stream) => { - PollStreamFut::new(inner_stream) - } - }; - // Add new stream to the inner streams bucket - this.inner_streams.as_mut().push(next_item_fut); - // Inner streams must be polled afterward - poll_state_value |= NEED_TO_POLL_INNER_STREAMS; - } - Poll::Ready(None) => { - // Mark the base stream as done - *this.is_stream_done = true; - } - Poll::Pending => { - break; - } - } - } - } - } - - if poll_state_value & NEED_TO_POLL_INNER_STREAMS != NONE { - let inner_streams_waker = waker(this.inner_streams_waker.clone()); - let mut cx = Context::from_waker(&inner_streams_waker); - - match this.inner_streams.as_mut().poll_next(&mut cx) { - Poll::Ready(Some(Some((item, next_item_fut)))) => { - // Push next inner stream item future to the list of inner streams futures - this.inner_streams.as_mut().push(next_item_fut); - // Take the received item - next_item = Some(item); - // On the next iteration, inner streams must be polled again - need_to_poll_next |= NEED_TO_POLL_INNER_STREAMS; - } - Poll::Ready(Some(None)) => { - // On the next iteration, inner streams must be polled again - need_to_poll_next |= NEED_TO_POLL_INNER_STREAMS; - } - _ => {} - } - } - - // We didn't have any `poll_next` panic, so it's time to deactivate the bomb - state_bomb.deactivate(); - - // Call the waker at the end of polling if - let mut force_wake = - // we need to poll the stream and didn't reach the limit yet - need_to_poll_next & NEED_TO_POLL_STREAM != NONE && !this.is_exceeded_limit() - // or we need to poll the inner streams again - || need_to_poll_next & NEED_TO_POLL_INNER_STREAMS != NONE; - - // Stop polling and swap the latest state - poll_state_value = this.poll_state.stop_polling(need_to_poll_next, force_wake); - // If state was changed during `POLLING` phase, we also need to manually call a waker - force_wake |= poll_state_value & NEED_TO_POLL_ALL != NONE; - - let is_done = *this.is_stream_done && this.inner_streams.is_empty(); - - if next_item.is_some() || is_done { - Poll::Ready(next_item) - } else { - if force_wake { - cx.waker().wake_by_ref(); - } - - Poll::Pending - } - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for FlattenUnorderedWithFlowController -where - St: Stream + Sink, -{ - type Error = St::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/fold.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/fold.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/fold.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/fold.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,88 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`fold`](super::StreamExt::fold) method. - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Fold { - #[pin] - stream: St, - f: F, - accum: Option, - #[pin] - future: Option, - } -} - -impl fmt::Debug for Fold -where - St: fmt::Debug, - Fut: fmt::Debug, - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Fold") - .field("stream", &self.stream) - .field("accum", &self.accum) - .field("future", &self.future) - .finish() - } -} - -impl Fold -where - St: Stream, - F: FnMut(T, St::Item) -> Fut, - Fut: Future, -{ - pub(super) fn new(stream: St, f: F, t: T) -> Self { - Self { stream, f, accum: Some(t), future: None } - } -} - -impl FusedFuture for Fold -where - St: Stream, - F: FnMut(T, St::Item) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.accum.is_none() && self.future.is_none() - } -} - -impl Future for Fold -where - St: Stream, - F: FnMut(T, St::Item) -> Fut, - Fut: Future, -{ - type Output = T; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - Poll::Ready(loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - // we're currently processing a future to produce a new accum value - *this.accum = Some(ready!(fut.poll(cx))); - this.future.set(None); - } else if this.accum.is_some() { - // we're waiting on a new item from the stream - let res = ready!(this.stream.as_mut().poll_next(cx)); - let a = this.accum.take().unwrap(); - if let Some(item) = res { - this.future.set(Some((this.f)(a, item))); - } else { - break a; - } - } else { - panic!("Fold polled after completion") - } - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/for_each_concurrent.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/for_each_concurrent.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/for_each_concurrent.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/for_each_concurrent.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,119 +0,0 @@ -use crate::stream::{FuturesUnordered, StreamExt}; -use core::fmt; -use core::num::NonZeroUsize; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`for_each_concurrent`](super::StreamExt::for_each_concurrent) - /// method. - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ForEachConcurrent { - #[pin] - stream: Option, - f: F, - futures: FuturesUnordered, - limit: Option, - } -} - -impl fmt::Debug for ForEachConcurrent -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ForEachConcurrent") - .field("stream", &self.stream) - .field("futures", &self.futures) - .field("limit", &self.limit) - .finish() - } -} - -impl ForEachConcurrent -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - pub(super) fn new(stream: St, limit: Option, f: F) -> Self { - Self { - stream: Some(stream), - // Note: `limit` = 0 gets ignored. - limit: limit.and_then(NonZeroUsize::new), - f, - futures: FuturesUnordered::new(), - } - } -} - -impl FusedFuture for ForEachConcurrent -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.stream.is_none() && self.futures.is_empty() - } -} - -impl Future for ForEachConcurrent -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - let mut this = self.project(); - loop { - let mut made_progress_this_iter = false; - - // Check if we've already created a number of futures greater than `limit` - if this.limit.map(|limit| limit.get() > this.futures.len()).unwrap_or(true) { - let mut stream_completed = false; - let elem = if let Some(stream) = this.stream.as_mut().as_pin_mut() { - match stream.poll_next(cx) { - Poll::Ready(Some(elem)) => { - made_progress_this_iter = true; - Some(elem) - } - Poll::Ready(None) => { - stream_completed = true; - None - } - Poll::Pending => None, - } - } else { - None - }; - if stream_completed { - this.stream.set(None); - } - if let Some(elem) = elem { - this.futures.push((this.f)(elem)); - } - } - - match this.futures.poll_next_unpin(cx) { - Poll::Ready(Some(())) => made_progress_this_iter = true, - Poll::Ready(None) => { - if this.stream.is_none() { - return Poll::Ready(()); - } - } - Poll::Pending => {} - } - - if !made_progress_this_iter { - return Poll::Pending; - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/for_each.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/for_each.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/for_each.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/for_each.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,78 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`for_each`](super::StreamExt::for_each) method. - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ForEach { - #[pin] - stream: St, - f: F, - #[pin] - future: Option, - } -} - -impl fmt::Debug for ForEach -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ForEach") - .field("stream", &self.stream) - .field("future", &self.future) - .finish() - } -} - -impl ForEach -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, future: None } - } -} - -impl FusedFuture for ForEach -where - St: FusedStream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.future.is_none() && self.stream.is_terminated() - } -} - -impl Future for ForEach -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - let mut this = self.project(); - loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - ready!(fut.poll(cx)); - this.future.set(None); - } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) { - this.future.set(Some((this.f)(item))); - } else { - break; - } - } - Poll::Ready(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/forward.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/forward.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/forward.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/forward.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -use crate::stream::Fuse; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`forward`](super::StreamExt::forward) method. - #[project = ForwardProj] - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Forward { - #[pin] - sink: Option, - #[pin] - stream: Fuse, - buffered_item: Option, - } -} - -impl Forward { - pub(crate) fn new(stream: St, sink: Si) -> Self { - Self { sink: Some(sink), stream: Fuse::new(stream), buffered_item: None } - } -} - -impl FusedFuture for Forward -where - Si: Sink, - St: Stream>, -{ - fn is_terminated(&self) -> bool { - self.sink.is_none() - } -} - -impl Future for Forward -where - Si: Sink, - St: Stream>, -{ - type Output = Result<(), E>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let ForwardProj { mut sink, mut stream, buffered_item } = self.project(); - let mut si = sink.as_mut().as_pin_mut().expect("polled `Forward` after completion"); - - loop { - // If we've got an item buffered already, we need to write it to the - // sink before we can do anything else - if buffered_item.is_some() { - ready!(si.as_mut().poll_ready(cx))?; - si.as_mut().start_send(buffered_item.take().unwrap())?; - } - - match stream.as_mut().poll_next(cx)? { - Poll::Ready(Some(item)) => { - *buffered_item = Some(item); - } - Poll::Ready(None) => { - ready!(si.poll_close(cx))?; - sink.set(None); - return Poll::Ready(Ok(())); - } - Poll::Pending => { - ready!(si.poll_flush(cx))?; - return Poll::Pending; - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/fuse.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/fuse.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/fuse.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/fuse.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`fuse`](super::StreamExt::fuse) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Fuse { - #[pin] - stream: St, - done: bool, - } -} - -impl Fuse { - pub(super) fn new(stream: St) -> Self { - Self { stream, done: false } - } - - /// Returns whether the underlying stream has finished or not. - /// - /// If this method returns `true`, then all future calls to poll are - /// guaranteed to return `None`. If this returns `false`, then the - /// underlying stream is still in use. - pub fn is_done(&self) -> bool { - self.done - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for Fuse { - fn is_terminated(&self) -> bool { - self.done - } -} - -impl Stream for Fuse { - type Item = S::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - - if *this.done { - return Poll::Ready(None); - } - - let item = ready!(this.stream.poll_next(cx)); - if item.is_none() { - *this.done = true; - } - Poll::Ready(item) - } - - fn size_hint(&self) -> (usize, Option) { - if self.done { - (0, Some(0)) - } else { - self.stream.size_hint() - } - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl, Item> Sink for Fuse { - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/into_future.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/into_future.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/into_future.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/into_future.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,90 +0,0 @@ -use crate::stream::StreamExt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; - -/// Future for the [`into_future`](super::StreamExt::into_future) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct StreamFuture { - stream: Option, -} - -impl StreamFuture { - pub(super) fn new(stream: St) -> Self { - Self { stream: Some(stream) } - } - - /// Acquires a reference to the underlying stream that this combinator is - /// pulling from. - /// - /// This method returns an `Option` to account for the fact that `StreamFuture`'s - /// implementation of `Future::poll` consumes the underlying stream during polling - /// in order to return it to the caller of `Future::poll` if the stream yielded - /// an element. - pub fn get_ref(&self) -> Option<&St> { - self.stream.as_ref() - } - - /// Acquires a mutable reference to the underlying stream that this - /// combinator is pulling from. - /// - /// Note that care must be taken to avoid tampering with the state of the - /// stream which may otherwise confuse this combinator. - /// - /// This method returns an `Option` to account for the fact that `StreamFuture`'s - /// implementation of `Future::poll` consumes the underlying stream during polling - /// in order to return it to the caller of `Future::poll` if the stream yielded - /// an element. - pub fn get_mut(&mut self) -> Option<&mut St> { - self.stream.as_mut() - } - - /// Acquires a pinned mutable reference to the underlying stream that this - /// combinator is pulling from. - /// - /// Note that care must be taken to avoid tampering with the state of the - /// stream which may otherwise confuse this combinator. - /// - /// This method returns an `Option` to account for the fact that `StreamFuture`'s - /// implementation of `Future::poll` consumes the underlying stream during polling - /// in order to return it to the caller of `Future::poll` if the stream yielded - /// an element. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Option> { - self.get_mut().stream.as_mut().map(Pin::new) - } - - /// Consumes this combinator, returning the underlying stream. - /// - /// Note that this may discard intermediate state of this combinator, so - /// care should be taken to avoid losing resources when this is called. - /// - /// This method returns an `Option` to account for the fact that `StreamFuture`'s - /// implementation of `Future::poll` consumes the underlying stream during polling - /// in order to return it to the caller of `Future::poll` if the stream yielded - /// an element. - pub fn into_inner(self) -> Option { - self.stream - } -} - -impl FusedFuture for StreamFuture { - fn is_terminated(&self) -> bool { - self.stream.is_none() - } -} - -impl Future for StreamFuture { - type Output = (Option, St); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let item = { - let s = self.stream.as_mut().expect("polling StreamFuture twice"); - ready!(s.poll_next_unpin(cx)) - }; - let stream = self.stream.take().unwrap(); - Poll::Ready((item, stream)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/map.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/map.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,77 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -use crate::fns::FnMut1; - -pin_project! { - /// Stream for the [`map`](super::StreamExt::map) method. - #[must_use = "streams do nothing unless polled"] - pub struct Map { - #[pin] - stream: St, - f: F, - } -} - -impl fmt::Debug for Map -where - St: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Map").field("stream", &self.stream).finish() - } -} - -impl Map { - pub(crate) fn new(stream: St, f: F) -> Self { - Self { stream, f } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for Map -where - St: FusedStream, - F: FnMut1, -{ - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -impl Stream for Map -where - St: Stream, - F: FnMut1, -{ - type Item = F::Output; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - let res = ready!(this.stream.as_mut().poll_next(cx)); - Poll::Ready(res.map(|x| this.f.call_mut(x))) - } - - fn size_hint(&self) -> (usize, Option) { - self.stream.size_hint() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for Map -where - St: Stream + Sink, - F: FnMut1, -{ - type Error = St::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1697 +0,0 @@ -//! Streams -//! -//! This module contains a number of functions for working with `Stream`s, -//! including the `StreamExt` trait which adds methods to `Stream` types. - -use crate::future::{assert_future, Either}; -use crate::stream::assert_stream; -#[cfg(feature = "alloc")] -use alloc::boxed::Box; -#[cfg(feature = "alloc")] -use alloc::vec::Vec; -use core::pin::Pin; -#[cfg(feature = "sink")] -use futures_core::stream::TryStream; -#[cfg(feature = "alloc")] -use futures_core::stream::{BoxStream, LocalBoxStream}; -use futures_core::{ - future::Future, - stream::{FusedStream, Stream}, - task::{Context, Poll}, -}; -#[cfg(feature = "sink")] -use futures_sink::Sink; - -use crate::fns::{inspect_fn, InspectFn}; - -mod chain; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::chain::Chain; - -mod collect; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::collect::Collect; - -mod unzip; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::unzip::Unzip; - -mod concat; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::concat::Concat; - -mod count; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::count::Count; - -mod cycle; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::cycle::Cycle; - -mod enumerate; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::enumerate::Enumerate; - -mod filter; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::filter::Filter; - -mod filter_map; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::filter_map::FilterMap; - -mod flatten; - -delegate_all!( - /// Stream for the [`flatten`](StreamExt::flatten) method. - Flatten( - flatten::Flatten - ): Debug + Sink + Stream + FusedStream + AccessInner[St, (.)] + New[|x: St| flatten::Flatten::new(x)] - where St: Stream -); - -mod fold; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::fold::Fold; - -mod any; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::any::Any; - -mod all; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::all::All; - -#[cfg(feature = "sink")] -mod forward; - -#[cfg(feature = "sink")] -delegate_all!( - /// Future for the [`forward`](super::StreamExt::forward) method. - #[cfg_attr(docsrs, doc(cfg(feature = "sink")))] - Forward( - forward::Forward - ): Debug + Future + FusedFuture + New[|x: St, y: Si| forward::Forward::new(x, y)] - where St: TryStream -); - -mod for_each; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::for_each::ForEach; - -mod fuse; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::fuse::Fuse; - -mod into_future; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::into_future::StreamFuture; - -delegate_all!( - /// Stream for the [`inspect`](StreamExt::inspect) method. - Inspect( - map::Map> - ): Debug + Sink + Stream + FusedStream + AccessInner[St, (.)] + New[|x: St, f: F| map::Map::new(x, inspect_fn(f))] -); - -mod map; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::map::Map; - -delegate_all!( - /// Stream for the [`flat_map`](StreamExt::flat_map) method. - FlatMap( - flatten::Flatten, U> - ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| flatten::Flatten::new(Map::new(x, f))] -); - -mod next; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::next::Next; - -mod select_next_some; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::select_next_some::SelectNextSome; - -mod peek; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::peek::{NextIf, NextIfEq, Peek, PeekMut, Peekable}; - -mod skip; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::skip::Skip; - -mod skip_while; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::skip_while::SkipWhile; - -mod take; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::take::Take; - -mod take_while; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::take_while::TakeWhile; - -mod take_until; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::take_until::TakeUntil; - -mod then; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::then::Then; - -mod zip; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::zip::Zip; - -#[cfg(feature = "alloc")] -mod chunks; -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::chunks::Chunks; - -#[cfg(feature = "alloc")] -mod ready_chunks; -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::ready_chunks::ReadyChunks; - -mod scan; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::scan::Scan; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod buffer_unordered; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::buffer_unordered::BufferUnordered; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod buffered; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::buffered::Buffered; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub(crate) mod flatten_unordered; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] -pub use self::flatten_unordered::FlattenUnordered; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -delegate_all!( - /// Stream for the [`flat_map_unordered`](StreamExt::flat_map_unordered) method. - FlatMapUnordered( - FlattenUnordered> - ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, limit: Option, f: F| FlattenUnordered::new(Map::new(x, f), limit)] - where St: Stream, U: Stream, U: Unpin, F: FnMut(St::Item) -> U -); - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod for_each_concurrent; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::for_each_concurrent::ForEachConcurrent; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -#[cfg(feature = "alloc")] -mod split; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "sink")] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::split::{ReuniteError, SplitSink, SplitStream}; - -#[cfg(feature = "std")] -mod catch_unwind; -#[cfg(feature = "std")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::catch_unwind::CatchUnwind; - -impl StreamExt for T where T: Stream {} - -/// An extension trait for `Stream`s that provides a variety of convenient -/// combinator functions. -pub trait StreamExt: Stream { - /// Creates a future that resolves to the next item in the stream. - /// - /// Note that because `next` doesn't take ownership over the stream, - /// the [`Stream`] type must be [`Unpin`]. If you want to use `next` with a - /// [`!Unpin`](Unpin) stream, you'll first have to pin the stream. This can - /// be done by boxing the stream using [`Box::pin`] or - /// pinning it to the stack using the `pin_mut!` macro from the `pin_utils` - /// crate. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let mut stream = stream::iter(1..=3); - /// - /// assert_eq!(stream.next().await, Some(1)); - /// assert_eq!(stream.next().await, Some(2)); - /// assert_eq!(stream.next().await, Some(3)); - /// assert_eq!(stream.next().await, None); - /// # }); - /// ``` - fn next(&mut self) -> Next<'_, Self> - where - Self: Unpin, - { - assert_future::, _>(Next::new(self)) - } - - /// Converts this stream into a future of `(next_item, tail_of_stream)`. - /// If the stream terminates, then the next item is [`None`]. - /// - /// The returned future can be used to compose streams and futures together - /// by placing everything into the "world of futures". - /// - /// Note that because `into_future` moves the stream, the [`Stream`] type - /// must be [`Unpin`]. If you want to use `into_future` with a - /// [`!Unpin`](Unpin) stream, you'll first have to pin the stream. This can - /// be done by boxing the stream using [`Box::pin`] or - /// pinning it to the stack using the `pin_mut!` macro from the `pin_utils` - /// crate. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=3); - /// - /// let (item, stream) = stream.into_future().await; - /// assert_eq!(Some(1), item); - /// - /// let (item, stream) = stream.into_future().await; - /// assert_eq!(Some(2), item); - /// # }); - /// ``` - fn into_future(self) -> StreamFuture - where - Self: Sized + Unpin, - { - assert_future::<(Option, Self), _>(StreamFuture::new(self)) - } - - /// Maps this stream's items to a different type, returning a new stream of - /// the resulting type. - /// - /// The provided closure is executed over all elements of this stream as - /// they are made available. It is executed inline with calls to - /// [`poll_next`](Stream::poll_next). - /// - /// Note that this function consumes the stream passed into it and returns a - /// wrapped version of it, similar to the existing `map` methods in the - /// standard library. - /// - /// See [`StreamExt::then`](Self::then) if you want to use a closure that - /// returns a future instead of a value. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=3); - /// let stream = stream.map(|x| x + 3); - /// - /// assert_eq!(vec![4, 5, 6], stream.collect::>().await); - /// # }); - /// ``` - fn map(self, f: F) -> Map - where - F: FnMut(Self::Item) -> T, - Self: Sized, - { - assert_stream::(Map::new(self, f)) - } - - /// Creates a stream which gives the current iteration count as well as - /// the next value. - /// - /// The stream returned yields pairs `(i, val)`, where `i` is the - /// current index of iteration and `val` is the value returned by the - /// stream. - /// - /// `enumerate()` keeps its count as a [`usize`]. If you want to count by a - /// different sized integer, the [`zip`](StreamExt::zip) function provides similar - /// functionality. - /// - /// # Overflow Behavior - /// - /// The method does no guarding against overflows, so enumerating more than - /// [`prim@usize::max_value()`] elements either produces the wrong result or panics. If - /// debug assertions are enabled, a panic is guaranteed. - /// - /// # Panics - /// - /// The returned stream might panic if the to-be-returned index would - /// overflow a [`usize`]. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(vec!['a', 'b', 'c']); - /// - /// let mut stream = stream.enumerate(); - /// - /// assert_eq!(stream.next().await, Some((0, 'a'))); - /// assert_eq!(stream.next().await, Some((1, 'b'))); - /// assert_eq!(stream.next().await, Some((2, 'c'))); - /// assert_eq!(stream.next().await, None); - /// # }); - /// ``` - fn enumerate(self) -> Enumerate - where - Self: Sized, - { - assert_stream::<(usize, Self::Item), _>(Enumerate::new(self)) - } - - /// Filters the values produced by this stream according to the provided - /// asynchronous predicate. - /// - /// As values of this stream are made available, the provided predicate `f` - /// will be run against them. If the predicate returns a `Future` which - /// resolves to `true`, then the stream will yield the value, but if the - /// predicate returns a `Future` which resolves to `false`, then the value - /// will be discarded and the next value will be produced. - /// - /// Note that this function consumes the stream passed into it and returns a - /// wrapped version of it, similar to the existing `filter` methods in the - /// standard library. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=10); - /// let events = stream.filter(|x| future::ready(x % 2 == 0)); - /// - /// assert_eq!(vec![2, 4, 6, 8, 10], events.collect::>().await); - /// # }); - /// ``` - fn filter(self, f: F) -> Filter - where - F: FnMut(&Self::Item) -> Fut, - Fut: Future, - Self: Sized, - { - assert_stream::(Filter::new(self, f)) - } - - /// Filters the values produced by this stream while simultaneously mapping - /// them to a different type according to the provided asynchronous closure. - /// - /// As values of this stream are made available, the provided function will - /// be run on them. If the future returned by the predicate `f` resolves to - /// [`Some(item)`](Some) then the stream will yield the value `item`, but if - /// it resolves to [`None`] then the next value will be produced. - /// - /// Note that this function consumes the stream passed into it and returns a - /// wrapped version of it, similar to the existing `filter_map` methods in - /// the standard library. - /// - /// # Examples - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=10); - /// let events = stream.filter_map(|x| async move { - /// if x % 2 == 0 { Some(x + 1) } else { None } - /// }); - /// - /// assert_eq!(vec![3, 5, 7, 9, 11], events.collect::>().await); - /// # }); - /// ``` - fn filter_map(self, f: F) -> FilterMap - where - F: FnMut(Self::Item) -> Fut, - Fut: Future>, - Self: Sized, - { - assert_stream::(FilterMap::new(self, f)) - } - - /// Computes from this stream's items new items of a different type using - /// an asynchronous closure. - /// - /// The provided closure `f` will be called with an `Item` once a value is - /// ready, it returns a future which will then be run to completion - /// to produce the next value on this stream. - /// - /// Note that this function consumes the stream passed into it and returns a - /// wrapped version of it. - /// - /// See [`StreamExt::map`](Self::map) if you want to use a closure that - /// returns a value instead of a future. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=3); - /// let stream = stream.then(|x| async move { x + 3 }); - /// - /// assert_eq!(vec![4, 5, 6], stream.collect::>().await); - /// # }); - /// ``` - fn then(self, f: F) -> Then - where - F: FnMut(Self::Item) -> Fut, - Fut: Future, - Self: Sized, - { - assert_stream::(Then::new(self, f)) - } - - /// Transforms a stream into a collection, returning a - /// future representing the result of that computation. - /// - /// The returned future will be resolved when the stream terminates. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::stream::StreamExt; - /// use std::thread; - /// - /// let (tx, rx) = mpsc::unbounded(); - /// - /// thread::spawn(move || { - /// for i in 1..=5 { - /// tx.unbounded_send(i).unwrap(); - /// } - /// }); - /// - /// let output = rx.collect::>().await; - /// assert_eq!(output, vec![1, 2, 3, 4, 5]); - /// # }); - /// ``` - fn collect>(self) -> Collect - where - Self: Sized, - { - assert_future::(Collect::new(self)) - } - - /// Converts a stream of pairs into a future, which - /// resolves to pair of containers. - /// - /// `unzip()` produces a future, which resolves to two - /// collections: one from the left elements of the pairs, - /// and one from the right elements. - /// - /// The returned future will be resolved when the stream terminates. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::stream::StreamExt; - /// use std::thread; - /// - /// let (tx, rx) = mpsc::unbounded(); - /// - /// thread::spawn(move || { - /// tx.unbounded_send((1, 2)).unwrap(); - /// tx.unbounded_send((3, 4)).unwrap(); - /// tx.unbounded_send((5, 6)).unwrap(); - /// }); - /// - /// let (o1, o2): (Vec<_>, Vec<_>) = rx.unzip().await; - /// assert_eq!(o1, vec![1, 3, 5]); - /// assert_eq!(o2, vec![2, 4, 6]); - /// # }); - /// ``` - fn unzip(self) -> Unzip - where - FromA: Default + Extend, - FromB: Default + Extend, - Self: Sized + Stream, - { - assert_future::<(FromA, FromB), _>(Unzip::new(self)) - } - - /// Concatenate all items of a stream into a single extendable - /// destination, returning a future representing the end result. - /// - /// This combinator will extend the first item with the contents - /// of all the subsequent results of the stream. If the stream is - /// empty, the default value will be returned. - /// - /// Works with all collections that implement the - /// [`Extend`](std::iter::Extend) trait. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::stream::StreamExt; - /// use std::thread; - /// - /// let (tx, rx) = mpsc::unbounded(); - /// - /// thread::spawn(move || { - /// for i in (0..3).rev() { - /// let n = i * 3; - /// tx.unbounded_send(vec![n + 1, n + 2, n + 3]).unwrap(); - /// } - /// }); - /// - /// let result = rx.concat().await; - /// - /// assert_eq!(result, vec![7, 8, 9, 4, 5, 6, 1, 2, 3]); - /// # }); - /// ``` - fn concat(self) -> Concat - where - Self: Sized, - Self::Item: Extend<<::Item as IntoIterator>::Item> + IntoIterator + Default, - { - assert_future::(Concat::new(self)) - } - - /// Drives the stream to completion, counting the number of items. - /// - /// # Overflow Behavior - /// - /// The method does no guarding against overflows, so counting elements of a - /// stream with more than [`usize::MAX`] elements either produces the wrong - /// result or panics. If debug assertions are enabled, a panic is guaranteed. - /// - /// # Panics - /// - /// This function might panic if the iterator has more than [`usize::MAX`] - /// elements. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=10); - /// let count = stream.count().await; - /// - /// assert_eq!(count, 10); - /// # }); - /// ``` - fn count(self) -> Count - where - Self: Sized, - { - assert_future::(Count::new(self)) - } - - /// Repeats a stream endlessly. - /// - /// The stream never terminates. Note that you likely want to avoid - /// usage of `collect` or such on the returned stream as it will exhaust - /// available memory as it tries to just fill up all RAM. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// let a = [1, 2, 3]; - /// let mut s = stream::iter(a.iter()).cycle(); - /// - /// assert_eq!(s.next().await, Some(&1)); - /// assert_eq!(s.next().await, Some(&2)); - /// assert_eq!(s.next().await, Some(&3)); - /// assert_eq!(s.next().await, Some(&1)); - /// assert_eq!(s.next().await, Some(&2)); - /// assert_eq!(s.next().await, Some(&3)); - /// assert_eq!(s.next().await, Some(&1)); - /// # }); - /// ``` - fn cycle(self) -> Cycle - where - Self: Sized + Clone, - { - assert_stream::(Cycle::new(self)) - } - - /// Execute an accumulating asynchronous computation over a stream, - /// collecting all the values into one final result. - /// - /// This combinator will accumulate all values returned by this stream - /// according to the closure provided. The initial state is also provided to - /// this method and then is returned again by each execution of the closure. - /// Once the entire stream has been exhausted the returned future will - /// resolve to this value. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let number_stream = stream::iter(0..6); - /// let sum = number_stream.fold(0, |acc, x| async move { acc + x }); - /// assert_eq!(sum.await, 15); - /// # }); - /// ``` - fn fold(self, init: T, f: F) -> Fold - where - F: FnMut(T, Self::Item) -> Fut, - Fut: Future, - Self: Sized, - { - assert_future::(Fold::new(self, f, init)) - } - - /// Execute predicate over asynchronous stream, and return `true` if any element in stream satisfied a predicate. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let number_stream = stream::iter(0..10); - /// let contain_three = number_stream.any(|i| async move { i == 3 }); - /// assert_eq!(contain_three.await, true); - /// # }); - /// ``` - fn any(self, f: F) -> Any - where - F: FnMut(Self::Item) -> Fut, - Fut: Future, - Self: Sized, - { - assert_future::(Any::new(self, f)) - } - - /// Execute predicate over asynchronous stream, and return `true` if all element in stream satisfied a predicate. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let number_stream = stream::iter(0..10); - /// let less_then_twenty = number_stream.all(|i| async move { i < 20 }); - /// assert_eq!(less_then_twenty.await, true); - /// # }); - /// ``` - fn all(self, f: F) -> All - where - F: FnMut(Self::Item) -> Fut, - Fut: Future, - Self: Sized, - { - assert_future::(All::new(self, f)) - } - - /// Flattens a stream of streams into just one continuous stream. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::stream::StreamExt; - /// use std::thread; - /// - /// let (tx1, rx1) = mpsc::unbounded(); - /// let (tx2, rx2) = mpsc::unbounded(); - /// let (tx3, rx3) = mpsc::unbounded(); - /// - /// thread::spawn(move || { - /// tx1.unbounded_send(1).unwrap(); - /// tx1.unbounded_send(2).unwrap(); - /// }); - /// thread::spawn(move || { - /// tx2.unbounded_send(3).unwrap(); - /// tx2.unbounded_send(4).unwrap(); - /// }); - /// thread::spawn(move || { - /// tx3.unbounded_send(rx1).unwrap(); - /// tx3.unbounded_send(rx2).unwrap(); - /// }); - /// - /// let output = rx3.flatten().collect::>().await; - /// assert_eq!(output, vec![1, 2, 3, 4]); - /// # }); - /// ``` - fn flatten(self) -> Flatten - where - Self::Item: Stream, - Self: Sized, - { - assert_stream::<::Item, _>(Flatten::new(self)) - } - - /// Flattens a stream of streams into just one continuous stream. Polls - /// inner streams produced by the base stream concurrently. - /// - /// The only argument is an optional limit on the number of concurrently - /// polled streams. If this limit is not `None`, no more than `limit` streams - /// will be polled at the same time. The `limit` argument is of type - /// `Into>`, and so can be provided as either `None`, - /// `Some(10)`, or just `10`. Note: a limit of zero is interpreted as - /// no limit at all, and will have the same result as passing in `None`. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::stream::StreamExt; - /// use std::thread; - /// - /// let (tx1, rx1) = mpsc::unbounded(); - /// let (tx2, rx2) = mpsc::unbounded(); - /// let (tx3, rx3) = mpsc::unbounded(); - /// - /// thread::spawn(move || { - /// tx1.unbounded_send(1).unwrap(); - /// tx1.unbounded_send(2).unwrap(); - /// }); - /// thread::spawn(move || { - /// tx2.unbounded_send(3).unwrap(); - /// tx2.unbounded_send(4).unwrap(); - /// }); - /// thread::spawn(move || { - /// tx3.unbounded_send(rx1).unwrap(); - /// tx3.unbounded_send(rx2).unwrap(); - /// }); - /// - /// let mut output = rx3.flatten_unordered(None).collect::>().await; - /// output.sort(); - /// - /// assert_eq!(output, vec![1, 2, 3, 4]); - /// # }); - /// ``` - #[cfg(not(futures_no_atomic_cas))] - #[cfg(feature = "alloc")] - fn flatten_unordered(self, limit: impl Into>) -> FlattenUnordered - where - Self::Item: Stream + Unpin, - Self: Sized, - { - assert_stream::<::Item, _>(FlattenUnordered::new(self, limit.into())) - } - - /// Maps a stream like [`StreamExt::map`] but flattens nested `Stream`s. - /// - /// [`StreamExt::map`] is very useful, but if it produces a `Stream` instead, - /// you would have to chain combinators like `.map(f).flatten()` while this - /// combinator provides ability to write `.flat_map(f)` instead of chaining. - /// - /// The provided closure which produces inner streams is executed over all elements - /// of stream as last inner stream is terminated and next stream item is available. - /// - /// Note that this function consumes the stream passed into it and returns a - /// wrapped version of it, similar to the existing `flat_map` methods in the - /// standard library. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=3); - /// let stream = stream.flat_map(|x| stream::iter(vec![x + 3; x])); - /// - /// assert_eq!(vec![4, 5, 5, 6, 6, 6], stream.collect::>().await); - /// # }); - /// ``` - fn flat_map(self, f: F) -> FlatMap - where - F: FnMut(Self::Item) -> U, - U: Stream, - Self: Sized, - { - assert_stream::(FlatMap::new(self, f)) - } - - /// Maps a stream like [`StreamExt::map`] but flattens nested `Stream`s - /// and polls them concurrently, yielding items in any order, as they made - /// available. - /// - /// [`StreamExt::map`] is very useful, but if it produces `Stream`s - /// instead, and you need to poll all of them concurrently, you would - /// have to use something like `for_each_concurrent` and merge values - /// by hand. This combinator provides ability to collect all values - /// from concurrently polled streams into one stream. - /// - /// The first argument is an optional limit on the number of concurrently - /// polled streams. If this limit is not `None`, no more than `limit` streams - /// will be polled at the same time. The `limit` argument is of type - /// `Into>`, and so can be provided as either `None`, - /// `Some(10)`, or just `10`. Note: a limit of zero is interpreted as - /// no limit at all, and will have the same result as passing in `None`. - /// - /// The provided closure which produces inner streams is executed over - /// all elements of stream as next stream item is available and limit - /// of concurrently processed streams isn't exceeded. - /// - /// Note that this function consumes the stream passed into it and - /// returns a wrapped version of it. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..5); - /// let stream = stream.flat_map_unordered(1, |x| stream::iter(vec![x; x])); - /// let mut values = stream.collect::>().await; - /// values.sort(); - /// - /// assert_eq!(vec![1usize, 2, 2, 3, 3, 3, 4, 4, 4, 4], values); - /// # }); - /// ``` - #[cfg(not(futures_no_atomic_cas))] - #[cfg(feature = "alloc")] - fn flat_map_unordered( - self, - limit: impl Into>, - f: F, - ) -> FlatMapUnordered - where - U: Stream + Unpin, - F: FnMut(Self::Item) -> U, - Self: Sized, - { - assert_stream::(FlatMapUnordered::new(self, limit.into(), f)) - } - - /// Combinator similar to [`StreamExt::fold`] that holds internal state - /// and produces a new stream. - /// - /// Accepts initial state and closure which will be applied to each element - /// of the stream until provided closure returns `None`. Once `None` is - /// returned, stream will be terminated. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=10); - /// - /// let stream = stream.scan(0, |state, x| { - /// *state += x; - /// future::ready(if *state < 10 { Some(x) } else { None }) - /// }); - /// - /// assert_eq!(vec![1, 2, 3], stream.collect::>().await); - /// # }); - /// ``` - fn scan(self, initial_state: S, f: F) -> Scan - where - F: FnMut(&mut S, Self::Item) -> Fut, - Fut: Future>, - Self: Sized, - { - assert_stream::(Scan::new(self, initial_state, f)) - } - - /// Skip elements on this stream while the provided asynchronous predicate - /// resolves to `true`. - /// - /// This function, like `Iterator::skip_while`, will skip elements on the - /// stream until the predicate `f` resolves to `false`. Once one element - /// returns `false`, all future elements will be returned from the underlying - /// stream. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=10); - /// - /// let stream = stream.skip_while(|x| future::ready(*x <= 5)); - /// - /// assert_eq!(vec![6, 7, 8, 9, 10], stream.collect::>().await); - /// # }); - /// ``` - fn skip_while(self, f: F) -> SkipWhile - where - F: FnMut(&Self::Item) -> Fut, - Fut: Future, - Self: Sized, - { - assert_stream::(SkipWhile::new(self, f)) - } - - /// Take elements from this stream while the provided asynchronous predicate - /// resolves to `true`. - /// - /// This function, like `Iterator::take_while`, will take elements from the - /// stream until the predicate `f` resolves to `false`. Once one element - /// returns `false`, it will always return that the stream is done. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=10); - /// - /// let stream = stream.take_while(|x| future::ready(*x <= 5)); - /// - /// assert_eq!(vec![1, 2, 3, 4, 5], stream.collect::>().await); - /// # }); - /// ``` - fn take_while(self, f: F) -> TakeWhile - where - F: FnMut(&Self::Item) -> Fut, - Fut: Future, - Self: Sized, - { - assert_stream::(TakeWhile::new(self, f)) - } - - /// Take elements from this stream until the provided future resolves. - /// - /// This function will take elements from the stream until the provided - /// stopping future `fut` resolves. Once the `fut` future becomes ready, - /// this stream combinator will always return that the stream is done. - /// - /// The stopping future may return any type. Once the stream is stopped - /// the result of the stopping future may be accessed with `TakeUntil::take_result()`. - /// The stream may also be resumed with `TakeUntil::take_future()`. - /// See the documentation of [`TakeUntil`] for more information. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, StreamExt}; - /// use futures::task::Poll; - /// - /// let stream = stream::iter(1..=10); - /// - /// let mut i = 0; - /// let stop_fut = future::poll_fn(|_cx| { - /// i += 1; - /// if i <= 5 { - /// Poll::Pending - /// } else { - /// Poll::Ready(()) - /// } - /// }); - /// - /// let stream = stream.take_until(stop_fut); - /// - /// assert_eq!(vec![1, 2, 3, 4, 5], stream.collect::>().await); - /// # }); - /// ``` - fn take_until(self, fut: Fut) -> TakeUntil - where - Fut: Future, - Self: Sized, - { - assert_stream::(TakeUntil::new(self, fut)) - } - - /// Runs this stream to completion, executing the provided asynchronous - /// closure for each element on the stream. - /// - /// The closure provided will be called for each item this stream produces, - /// yielding a future. That future will then be executed to completion - /// before moving on to the next item. - /// - /// The returned value is a `Future` where the `Output` type is `()`; it is - /// executed entirely for its side effects. - /// - /// To process each item in the stream and produce another stream instead - /// of a single future, use `then` instead. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, StreamExt}; - /// - /// let mut x = 0; - /// - /// { - /// let fut = stream::repeat(1).take(3).for_each(|item| { - /// x += item; - /// future::ready(()) - /// }); - /// fut.await; - /// } - /// - /// assert_eq!(x, 3); - /// # }); - /// ``` - fn for_each(self, f: F) -> ForEach - where - F: FnMut(Self::Item) -> Fut, - Fut: Future, - Self: Sized, - { - assert_future::<(), _>(ForEach::new(self, f)) - } - - /// Runs this stream to completion, executing the provided asynchronous - /// closure for each element on the stream concurrently as elements become - /// available. - /// - /// This is similar to [`StreamExt::for_each`], but the futures - /// produced by the closure are run concurrently (but not in parallel-- - /// this combinator does not introduce any threads). - /// - /// The closure provided will be called for each item this stream produces, - /// yielding a future. That future will then be executed to completion - /// concurrently with the other futures produced by the closure. - /// - /// The first argument is an optional limit on the number of concurrent - /// futures. If this limit is not `None`, no more than `limit` futures - /// will be run concurrently. The `limit` argument is of type - /// `Into>`, and so can be provided as either `None`, - /// `Some(10)`, or just `10`. Note: a limit of zero is interpreted as - /// no limit at all, and will have the same result as passing in `None`. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::oneshot; - /// use futures::stream::{self, StreamExt}; - /// - /// let (tx1, rx1) = oneshot::channel(); - /// let (tx2, rx2) = oneshot::channel(); - /// let (tx3, rx3) = oneshot::channel(); - /// - /// let fut = stream::iter(vec![rx1, rx2, rx3]).for_each_concurrent( - /// /* limit */ 2, - /// |rx| async move { - /// rx.await.unwrap(); - /// } - /// ); - /// tx1.send(()).unwrap(); - /// tx2.send(()).unwrap(); - /// tx3.send(()).unwrap(); - /// fut.await; - /// # }) - /// ``` - #[cfg(not(futures_no_atomic_cas))] - #[cfg(feature = "alloc")] - fn for_each_concurrent( - self, - limit: impl Into>, - f: F, - ) -> ForEachConcurrent - where - F: FnMut(Self::Item) -> Fut, - Fut: Future, - Self: Sized, - { - assert_future::<(), _>(ForEachConcurrent::new(self, limit.into(), f)) - } - - /// Creates a new stream of at most `n` items of the underlying stream. - /// - /// Once `n` items have been yielded from this stream then it will always - /// return that the stream is done. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=10).take(3); - /// - /// assert_eq!(vec![1, 2, 3], stream.collect::>().await); - /// # }); - /// ``` - fn take(self, n: usize) -> Take - where - Self: Sized, - { - assert_stream::(Take::new(self, n)) - } - - /// Creates a new stream which skips `n` items of the underlying stream. - /// - /// Once `n` items have been skipped from this stream then it will always - /// return the remaining items on this stream. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(1..=10).skip(5); - /// - /// assert_eq!(vec![6, 7, 8, 9, 10], stream.collect::>().await); - /// # }); - /// ``` - fn skip(self, n: usize) -> Skip - where - Self: Sized, - { - assert_stream::(Skip::new(self, n)) - } - - /// Fuse a stream such that [`poll_next`](Stream::poll_next) will never - /// again be called once it has finished. This method can be used to turn - /// any `Stream` into a `FusedStream`. - /// - /// Normally, once a stream has returned [`None`] from - /// [`poll_next`](Stream::poll_next) any further calls could exhibit bad - /// behavior such as block forever, panic, never return, etc. If it is known - /// that [`poll_next`](Stream::poll_next) may be called after stream - /// has already finished, then this method can be used to ensure that it has - /// defined semantics. - /// - /// The [`poll_next`](Stream::poll_next) method of a `fuse`d stream - /// is guaranteed to return [`None`] after the underlying stream has - /// finished. - /// - /// # Examples - /// - /// ``` - /// use futures::executor::block_on_stream; - /// use futures::stream::{self, StreamExt}; - /// use futures::task::Poll; - /// - /// let mut x = 0; - /// let stream = stream::poll_fn(|_| { - /// x += 1; - /// match x { - /// 0..=2 => Poll::Ready(Some(x)), - /// 3 => Poll::Ready(None), - /// _ => panic!("should not happen") - /// } - /// }).fuse(); - /// - /// let mut iter = block_on_stream(stream); - /// assert_eq!(Some(1), iter.next()); - /// assert_eq!(Some(2), iter.next()); - /// assert_eq!(None, iter.next()); - /// assert_eq!(None, iter.next()); - /// // ... - /// ``` - fn fuse(self) -> Fuse - where - Self: Sized, - { - assert_stream::(Fuse::new(self)) - } - - /// Borrows a stream, rather than consuming it. - /// - /// This is useful to allow applying stream adaptors while still retaining - /// ownership of the original stream. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let mut stream = stream::iter(1..5); - /// - /// let sum = stream.by_ref() - /// .take(2) - /// .fold(0, |a, b| async move { a + b }) - /// .await; - /// assert_eq!(sum, 3); - /// - /// // You can use the stream again - /// let sum = stream.take(2) - /// .fold(0, |a, b| async move { a + b }) - /// .await; - /// assert_eq!(sum, 7); - /// # }); - /// ``` - fn by_ref(&mut self) -> &mut Self { - self - } - - /// Catches unwinding panics while polling the stream. - /// - /// Caught panic (if any) will be the last element of the resulting stream. - /// - /// In general, panics within a stream can propagate all the way out to the - /// task level. This combinator makes it possible to halt unwinding within - /// the stream itself. It's most commonly used within task executors. This - /// method should not be used for error handling. - /// - /// Note that this method requires the `UnwindSafe` bound from the standard - /// library. This isn't always applied automatically, and the standard - /// library provides an `AssertUnwindSafe` wrapper type to apply it - /// after-the fact. To assist using this method, the [`Stream`] trait is - /// also implemented for `AssertUnwindSafe` where `St` implements - /// [`Stream`]. - /// - /// This method is only available when the `std` feature of this - /// library is activated, and it is activated by default. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream = stream::iter(vec![Some(10), None, Some(11)]); - /// // Panic on second element - /// let stream_panicking = stream.map(|o| o.unwrap()); - /// // Collect all the results - /// let stream = stream_panicking.catch_unwind(); - /// - /// let results: Vec> = stream.collect().await; - /// match results[0] { - /// Ok(10) => {} - /// _ => panic!("unexpected result!"), - /// } - /// assert!(results[1].is_err()); - /// assert_eq!(results.len(), 2); - /// # }); - /// ``` - #[cfg(feature = "std")] - fn catch_unwind(self) -> CatchUnwind - where - Self: Sized + std::panic::UnwindSafe, - { - assert_stream(CatchUnwind::new(self)) - } - - /// Wrap the stream in a Box, pinning it. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - #[cfg(feature = "alloc")] - fn boxed<'a>(self) -> BoxStream<'a, Self::Item> - where - Self: Sized + Send + 'a, - { - assert_stream::(Box::pin(self)) - } - - /// Wrap the stream in a Box, pinning it. - /// - /// Similar to `boxed`, but without the `Send` requirement. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - #[cfg(feature = "alloc")] - fn boxed_local<'a>(self) -> LocalBoxStream<'a, Self::Item> - where - Self: Sized + 'a, - { - assert_stream::(Box::pin(self)) - } - - /// An adaptor for creating a buffered list of pending futures. - /// - /// If this stream's item can be converted into a future, then this adaptor - /// will buffer up to at most `n` futures and then return the outputs in the - /// same order as the underlying stream. No more than `n` futures will be - /// buffered at any point in time, and less than `n` may also be buffered - /// depending on the state of each future. - /// - /// The returned stream will be a stream of each future's output. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - #[cfg(not(futures_no_atomic_cas))] - #[cfg(feature = "alloc")] - fn buffered(self, n: usize) -> Buffered - where - Self::Item: Future, - Self: Sized, - { - assert_stream::<::Output, _>(Buffered::new(self, n)) - } - - /// An adaptor for creating a buffered list of pending futures (unordered). - /// - /// If this stream's item can be converted into a future, then this adaptor - /// will buffer up to `n` futures and then return the outputs in the order - /// in which they complete. No more than `n` futures will be buffered at - /// any point in time, and less than `n` may also be buffered depending on - /// the state of each future. - /// - /// The returned stream will be a stream of each future's output. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::oneshot; - /// use futures::stream::{self, StreamExt}; - /// - /// let (send_one, recv_one) = oneshot::channel(); - /// let (send_two, recv_two) = oneshot::channel(); - /// - /// let stream_of_futures = stream::iter(vec![recv_one, recv_two]); - /// let mut buffered = stream_of_futures.buffer_unordered(10); - /// - /// send_two.send(2i32)?; - /// assert_eq!(buffered.next().await, Some(Ok(2i32))); - /// - /// send_one.send(1i32)?; - /// assert_eq!(buffered.next().await, Some(Ok(1i32))); - /// - /// assert_eq!(buffered.next().await, None); - /// # Ok::<(), i32>(()) }).unwrap(); - /// ``` - #[cfg(not(futures_no_atomic_cas))] - #[cfg(feature = "alloc")] - fn buffer_unordered(self, n: usize) -> BufferUnordered - where - Self::Item: Future, - Self: Sized, - { - assert_stream::<::Output, _>(BufferUnordered::new(self, n)) - } - - /// An adapter for zipping two streams together. - /// - /// The zipped stream waits for both streams to produce an item, and then - /// returns that pair. If either stream ends then the zipped stream will - /// also end. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream1 = stream::iter(1..=3); - /// let stream2 = stream::iter(5..=10); - /// - /// let vec = stream1.zip(stream2) - /// .collect::>() - /// .await; - /// assert_eq!(vec![(1, 5), (2, 6), (3, 7)], vec); - /// # }); - /// ``` - /// - fn zip(self, other: St) -> Zip - where - St: Stream, - Self: Sized, - { - assert_stream::<(Self::Item, St::Item), _>(Zip::new(self, other)) - } - - /// Adapter for chaining two streams. - /// - /// The resulting stream emits elements from the first stream, and when - /// first stream reaches the end, emits the elements from the second stream. - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// - /// let stream1 = stream::iter(vec![Ok(10), Err(false)]); - /// let stream2 = stream::iter(vec![Err(true), Ok(20)]); - /// - /// let stream = stream1.chain(stream2); - /// - /// let result: Vec<_> = stream.collect().await; - /// assert_eq!(result, vec![ - /// Ok(10), - /// Err(false), - /// Err(true), - /// Ok(20), - /// ]); - /// # }); - /// ``` - fn chain(self, other: St) -> Chain - where - St: Stream, - Self: Sized, - { - assert_stream::(Chain::new(self, other)) - } - - /// Creates a new stream which exposes a `peek` method. - /// - /// Calling `peek` returns a reference to the next item in the stream. - fn peekable(self) -> Peekable - where - Self: Sized, - { - assert_stream::(Peekable::new(self)) - } - - /// An adaptor for chunking up items of the stream inside a vector. - /// - /// This combinator will attempt to pull items from this stream and buffer - /// them into a local vector. At most `capacity` items will get buffered - /// before they're yielded from the returned stream. - /// - /// Note that the vectors returned from this iterator may not always have - /// `capacity` elements. If the underlying stream ended and only a partial - /// vector was created, it'll be returned. Additionally if an error happens - /// from the underlying stream then the currently buffered items will be - /// yielded. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - /// - /// # Panics - /// - /// This method will panic if `capacity` is zero. - #[cfg(feature = "alloc")] - fn chunks(self, capacity: usize) -> Chunks - where - Self: Sized, - { - assert_stream::, _>(Chunks::new(self, capacity)) - } - - /// An adaptor for chunking up ready items of the stream inside a vector. - /// - /// This combinator will attempt to pull ready items from this stream and - /// buffer them into a local vector. At most `capacity` items will get - /// buffered before they're yielded from the returned stream. If underlying - /// stream returns `Poll::Pending`, and collected chunk is not empty, it will - /// be immediately returned. - /// - /// If the underlying stream ended and only a partial vector was created, - /// it will be returned. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - /// - /// # Panics - /// - /// This method will panic if `capacity` is zero. - #[cfg(feature = "alloc")] - fn ready_chunks(self, capacity: usize) -> ReadyChunks - where - Self: Sized, - { - assert_stream::, _>(ReadyChunks::new(self, capacity)) - } - - /// A future that completes after the given stream has been fully processed - /// into the sink and the sink has been flushed and closed. - /// - /// This future will drive the stream to keep producing items until it is - /// exhausted, sending each item to the sink. It will complete once the - /// stream is exhausted, the sink has received and flushed all items, and - /// the sink is closed. Note that neither the original stream nor provided - /// sink will be output by this future. Pass the sink by `Pin<&mut S>` - /// (for example, via `forward(&mut sink)` inside an `async` fn/block) in - /// order to preserve access to the `Sink`. If the stream produces an error, - /// that error will be returned by this future without flushing/closing the sink. - #[cfg(feature = "sink")] - #[cfg_attr(docsrs, doc(cfg(feature = "sink")))] - fn forward(self, sink: S) -> Forward - where - S: Sink, - Self: TryStream + Sized, - // Self: TryStream + Sized + Stream::Ok, ::Error>>, - { - // TODO: type mismatch resolving `::Item == std::result::Result<::Ok, ::Error>` - // assert_future::, _>(Forward::new(self, sink)) - Forward::new(self, sink) - } - - /// Splits this `Stream + Sink` object into separate `Sink` and `Stream` - /// objects. - /// - /// This can be useful when you want to split ownership between tasks, or - /// allow direct interaction between the two objects (e.g. via - /// `Sink::send_all`). - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - #[cfg(feature = "sink")] - #[cfg_attr(docsrs, doc(cfg(feature = "sink")))] - #[cfg(not(futures_no_atomic_cas))] - #[cfg(feature = "alloc")] - fn split(self) -> (SplitSink, SplitStream) - where - Self: Sink + Sized, - { - let (sink, stream) = split::split(self); - ( - crate::sink::assert_sink::(sink), - assert_stream::(stream), - ) - } - - /// Do something with each item of this stream, afterwards passing it on. - /// - /// This is similar to the `Iterator::inspect` method in the standard - /// library where it allows easily inspecting each value as it passes - /// through the stream, for example to debug what's going on. - fn inspect(self, f: F) -> Inspect - where - F: FnMut(&Self::Item), - Self: Sized, - { - assert_stream::(Inspect::new(self, f)) - } - - /// Wrap this stream in an `Either` stream, making it the left-hand variant - /// of that `Either`. - /// - /// This can be used in combination with the `right_stream` method to write `if` - /// statements that evaluate to different streams in different branches. - fn left_stream(self) -> Either - where - B: Stream, - Self: Sized, - { - assert_stream::(Either::Left(self)) - } - - /// Wrap this stream in an `Either` stream, making it the right-hand variant - /// of that `Either`. - /// - /// This can be used in combination with the `left_stream` method to write `if` - /// statements that evaluate to different streams in different branches. - fn right_stream(self) -> Either - where - B: Stream, - Self: Sized, - { - assert_stream::(Either::Right(self)) - } - - /// A convenience method for calling [`Stream::poll_next`] on [`Unpin`] - /// stream types. - fn poll_next_unpin(&mut self, cx: &mut Context<'_>) -> Poll> - where - Self: Unpin, - { - Pin::new(self).poll_next(cx) - } - - /// Returns a [`Future`] that resolves when the next item in this stream is - /// ready. - /// - /// This is similar to the [`next`][StreamExt::next] method, but it won't - /// resolve to [`None`] if used on an empty [`Stream`]. Instead, the - /// returned future type will return `true` from - /// [`FusedFuture::is_terminated`][] when the [`Stream`] is empty, allowing - /// [`select_next_some`][StreamExt::select_next_some] to be easily used with - /// the [`select!`] macro. - /// - /// If the future is polled after this [`Stream`] is empty it will panic. - /// Using the future with a [`FusedFuture`][]-aware primitive like the - /// [`select!`] macro will prevent this. - /// - /// [`FusedFuture`]: futures_core::future::FusedFuture - /// [`FusedFuture::is_terminated`]: futures_core::future::FusedFuture::is_terminated - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::{future, select}; - /// use futures::stream::{StreamExt, FuturesUnordered}; - /// - /// let mut fut = future::ready(1); - /// let mut async_tasks = FuturesUnordered::new(); - /// let mut total = 0; - /// loop { - /// select! { - /// num = fut => { - /// // First, the `ready` future completes. - /// total += num; - /// // Then we spawn a new task onto `async_tasks`, - /// async_tasks.push(async { 5 }); - /// }, - /// // On the next iteration of the loop, the task we spawned - /// // completes. - /// num = async_tasks.select_next_some() => { - /// total += num; - /// } - /// // Finally, both the `ready` future and `async_tasks` have - /// // finished, so we enter the `complete` branch. - /// complete => break, - /// } - /// } - /// assert_eq!(total, 6); - /// # }); - /// ``` - /// - /// [`select!`]: crate::select - fn select_next_some(&mut self) -> SelectNextSome<'_, Self> - where - Self: Unpin + FusedStream, - { - assert_future::(SelectNextSome::new(self)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/next.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/next.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/next.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/next.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,34 +0,0 @@ -use crate::stream::StreamExt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; - -/// Future for the [`next`](super::StreamExt::next) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Next<'a, St: ?Sized> { - stream: &'a mut St, -} - -impl Unpin for Next<'_, St> {} - -impl<'a, St: ?Sized + Stream + Unpin> Next<'a, St> { - pub(super) fn new(stream: &'a mut St) -> Self { - Self { stream } - } -} - -impl FusedFuture for Next<'_, St> { - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -impl Future for Next<'_, St> { - type Output = Option; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.stream.poll_next_unpin(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/peek.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/peek.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/peek.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/peek.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,433 +0,0 @@ -use crate::fns::FnOnce1; -use crate::stream::{Fuse, StreamExt}; -use core::fmt; -use core::marker::PhantomData; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// A `Stream` that implements a `peek` method. - /// - /// The `peek` method can be used to retrieve a reference - /// to the next `Stream::Item` if available. A subsequent - /// call to `poll` will return the owned item. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Peekable { - #[pin] - stream: Fuse, - peeked: Option, - } -} - -impl Peekable { - pub(super) fn new(stream: St) -> Self { - Self { stream: stream.fuse(), peeked: None } - } - - delegate_access_inner!(stream, St, (.)); - - /// Produces a future which retrieves a reference to the next item - /// in the stream, or `None` if the underlying stream terminates. - pub fn peek(self: Pin<&mut Self>) -> Peek<'_, St> { - Peek { inner: Some(self) } - } - - /// Peek retrieves a reference to the next item in the stream. - /// - /// This method polls the underlying stream and return either a reference - /// to the next item if the stream is ready or passes through any errors. - pub fn poll_peek(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - Poll::Ready(loop { - if this.peeked.is_some() { - break this.peeked.as_ref(); - } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) { - *this.peeked = Some(item); - } else { - break None; - } - }) - } - - /// Produces a future which retrieves a mutable reference to the next item - /// in the stream, or `None` if the underlying stream terminates. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// use futures::pin_mut; - /// - /// let stream = stream::iter(vec![1, 2, 3]).peekable(); - /// pin_mut!(stream); - /// - /// assert_eq!(stream.as_mut().peek_mut().await, Some(&mut 1)); - /// assert_eq!(stream.as_mut().next().await, Some(1)); - /// - /// // Peek into the stream and modify the value which will be returned next - /// if let Some(p) = stream.as_mut().peek_mut().await { - /// if *p == 2 { - /// *p = 5; - /// } - /// } - /// - /// assert_eq!(stream.collect::>().await, vec![5, 3]); - /// # }); - /// ``` - pub fn peek_mut(self: Pin<&mut Self>) -> PeekMut<'_, St> { - PeekMut { inner: Some(self) } - } - - /// Peek retrieves a mutable reference to the next item in the stream. - pub fn poll_peek_mut( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - let mut this = self.project(); - - Poll::Ready(loop { - if this.peeked.is_some() { - break this.peeked.as_mut(); - } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) { - *this.peeked = Some(item); - } else { - break None; - } - }) - } - - /// Creates a future which will consume and return the next value of this - /// stream if a condition is true. - /// - /// If `func` returns `true` for the next value of this stream, consume and - /// return it. Otherwise, return `None`. - /// - /// # Examples - /// - /// Consume a number if it's equal to 0. - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// use futures::pin_mut; - /// - /// let stream = stream::iter(0..5).peekable(); - /// pin_mut!(stream); - /// // The first item of the stream is 0; consume it. - /// assert_eq!(stream.as_mut().next_if(|&x| x == 0).await, Some(0)); - /// // The next item returned is now 1, so `consume` will return `false`. - /// assert_eq!(stream.as_mut().next_if(|&x| x == 0).await, None); - /// // `next_if` saves the value of the next item if it was not equal to `expected`. - /// assert_eq!(stream.next().await, Some(1)); - /// # }); - /// ``` - /// - /// Consume any number less than 10. - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// use futures::pin_mut; - /// - /// let stream = stream::iter(1..20).peekable(); - /// pin_mut!(stream); - /// // Consume all numbers less than 10 - /// while stream.as_mut().next_if(|&x| x < 10).await.is_some() {} - /// // The next value returned will be 10 - /// assert_eq!(stream.next().await, Some(10)); - /// # }); - /// ``` - pub fn next_if(self: Pin<&mut Self>, func: F) -> NextIf<'_, St, F> - where - F: FnOnce(&St::Item) -> bool, - { - NextIf { inner: Some((self, func)) } - } - - /// Creates a future which will consume and return the next item if it is - /// equal to `expected`. - /// - /// # Example - /// - /// Consume a number if it's equal to 0. - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt}; - /// use futures::pin_mut; - /// - /// let stream = stream::iter(0..5).peekable(); - /// pin_mut!(stream); - /// // The first item of the stream is 0; consume it. - /// assert_eq!(stream.as_mut().next_if_eq(&0).await, Some(0)); - /// // The next item returned is now 1, so `consume` will return `false`. - /// assert_eq!(stream.as_mut().next_if_eq(&0).await, None); - /// // `next_if_eq` saves the value of the next item if it was not equal to `expected`. - /// assert_eq!(stream.next().await, Some(1)); - /// # }); - /// ``` - pub fn next_if_eq<'a, T>(self: Pin<&'a mut Self>, expected: &'a T) -> NextIfEq<'a, St, T> - where - T: ?Sized, - St::Item: PartialEq, - { - NextIfEq { - inner: NextIf { inner: Some((self, NextIfEqFn { expected, _next: PhantomData })) }, - } - } -} - -impl FusedStream for Peekable { - fn is_terminated(&self) -> bool { - self.peeked.is_none() && self.stream.is_terminated() - } -} - -impl Stream for Peekable { - type Item = S::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - if let Some(item) = this.peeked.take() { - return Poll::Ready(Some(item)); - } - this.stream.poll_next(cx) - } - - fn size_hint(&self) -> (usize, Option) { - let peek_len = usize::from(self.peeked.is_some()); - let (lower, upper) = self.stream.size_hint(); - let lower = lower.saturating_add(peek_len); - let upper = match upper { - Some(x) => x.checked_add(peek_len), - None => None, - }; - (lower, upper) - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for Peekable -where - S: Sink + Stream, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} - -pin_project! { - /// Future for the [`Peekable::peek`](self::Peekable::peek) method. - #[must_use = "futures do nothing unless polled"] - pub struct Peek<'a, St: Stream> { - inner: Option>>, - } -} - -impl fmt::Debug for Peek<'_, St> -where - St: Stream + fmt::Debug, - St::Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Peek").field("inner", &self.inner).finish() - } -} - -impl FusedFuture for Peek<'_, St> { - fn is_terminated(&self) -> bool { - self.inner.is_none() - } -} - -impl<'a, St> Future for Peek<'a, St> -where - St: Stream, -{ - type Output = Option<&'a St::Item>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let inner = self.project().inner; - if let Some(peekable) = inner { - ready!(peekable.as_mut().poll_peek(cx)); - - inner.take().unwrap().poll_peek(cx) - } else { - panic!("Peek polled after completion") - } - } -} - -pin_project! { - /// Future for the [`Peekable::peek_mut`](self::Peekable::peek_mut) method. - #[must_use = "futures do nothing unless polled"] - pub struct PeekMut<'a, St: Stream> { - inner: Option>>, - } -} - -impl fmt::Debug for PeekMut<'_, St> -where - St: Stream + fmt::Debug, - St::Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PeekMut").field("inner", &self.inner).finish() - } -} - -impl FusedFuture for PeekMut<'_, St> { - fn is_terminated(&self) -> bool { - self.inner.is_none() - } -} - -impl<'a, St> Future for PeekMut<'a, St> -where - St: Stream, -{ - type Output = Option<&'a mut St::Item>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let inner = self.project().inner; - if let Some(peekable) = inner { - ready!(peekable.as_mut().poll_peek_mut(cx)); - - inner.take().unwrap().poll_peek_mut(cx) - } else { - panic!("PeekMut polled after completion") - } - } -} - -pin_project! { - /// Future for the [`Peekable::next_if`](self::Peekable::next_if) method. - #[must_use = "futures do nothing unless polled"] - pub struct NextIf<'a, St: Stream, F> { - inner: Option<(Pin<&'a mut Peekable>, F)>, - } -} - -impl fmt::Debug for NextIf<'_, St, F> -where - St: Stream + fmt::Debug, - St::Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NextIf").field("inner", &self.inner.as_ref().map(|(s, _f)| s)).finish() - } -} - -#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 -impl FusedFuture for NextIf<'_, St, F> -where - St: Stream, - F: for<'a> FnOnce1<&'a St::Item, Output = bool>, -{ - fn is_terminated(&self) -> bool { - self.inner.is_none() - } -} - -#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 -impl Future for NextIf<'_, St, F> -where - St: Stream, - F: for<'a> FnOnce1<&'a St::Item, Output = bool>, -{ - type Output = Option; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let inner = self.project().inner; - if let Some((peekable, _)) = inner { - let res = ready!(peekable.as_mut().poll_next(cx)); - - let (peekable, func) = inner.take().unwrap(); - match res { - Some(ref matched) if func.call_once(matched) => Poll::Ready(res), - other => { - let peekable = peekable.project(); - // Since we called `self.next()`, we consumed `self.peeked`. - assert!(peekable.peeked.is_none()); - *peekable.peeked = other; - Poll::Ready(None) - } - } - } else { - panic!("NextIf polled after completion") - } - } -} - -pin_project! { - /// Future for the [`Peekable::next_if_eq`](self::Peekable::next_if_eq) method. - #[must_use = "futures do nothing unless polled"] - pub struct NextIfEq<'a, St: Stream, T: ?Sized> { - #[pin] - inner: NextIf<'a, St, NextIfEqFn<'a, T, St::Item>>, - } -} - -impl fmt::Debug for NextIfEq<'_, St, T> -where - St: Stream + fmt::Debug, - St::Item: fmt::Debug, - T: ?Sized, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NextIfEq") - .field("inner", &self.inner.inner.as_ref().map(|(s, _f)| s)) - .finish() - } -} - -impl FusedFuture for NextIfEq<'_, St, T> -where - St: Stream, - T: ?Sized, - St::Item: PartialEq, -{ - fn is_terminated(&self) -> bool { - self.inner.is_terminated() - } -} - -impl Future for NextIfEq<'_, St, T> -where - St: Stream, - T: ?Sized, - St::Item: PartialEq, -{ - type Output = Option; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.project().inner.poll(cx) - } -} - -struct NextIfEqFn<'a, T: ?Sized, Item> { - expected: &'a T, - _next: PhantomData, -} - -impl FnOnce1<&Item> for NextIfEqFn<'_, T, Item> -where - T: ?Sized, - Item: PartialEq, -{ - type Output = bool; - - fn call_once(self, next: &Item) -> Self::Output { - next == self.expected - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/ready_chunks.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/ready_chunks.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/ready_chunks.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/ready_chunks.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,93 +0,0 @@ -use crate::stream::{Fuse, StreamExt}; -use alloc::vec::Vec; -use core::pin::Pin; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`ready_chunks`](super::StreamExt::ready_chunks) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct ReadyChunks { - #[pin] - stream: Fuse, - cap: usize, // https://github.com/rust-lang/futures-rs/issues/1475 - } -} - -impl ReadyChunks { - pub(super) fn new(stream: St, capacity: usize) -> Self { - assert!(capacity > 0); - - Self { stream: stream.fuse(), cap: capacity } - } - - delegate_access_inner!(stream, St, (.)); -} - -impl Stream for ReadyChunks { - type Item = Vec; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - let mut items: Vec = Vec::new(); - - loop { - match this.stream.as_mut().poll_next(cx) { - // Flush all collected data if underlying stream doesn't contain - // more ready values - Poll::Pending => { - return if items.is_empty() { Poll::Pending } else { Poll::Ready(Some(items)) } - } - - // Push the ready item into the buffer and check whether it is full. - // If so, replace our buffer with a new and empty one and return - // the full one. - Poll::Ready(Some(item)) => { - if items.is_empty() { - items.reserve(*this.cap); - } - items.push(item); - if items.len() >= *this.cap { - return Poll::Ready(Some(items)); - } - } - - // Since the underlying stream ran out of values, return what we - // have buffered, if we have anything. - Poll::Ready(None) => { - let last = if items.is_empty() { None } else { Some(items) }; - - return Poll::Ready(last); - } - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let (lower, upper) = self.stream.size_hint(); - let lower = lower / self.cap; - (lower, upper) - } -} - -impl FusedStream for ReadyChunks { - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for ReadyChunks -where - S: Stream + Sink, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/scan.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/scan.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/scan.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/scan.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,128 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -struct StateFn { - state: S, - f: F, -} - -pin_project! { - /// Stream for the [`scan`](super::StreamExt::scan) method. - #[must_use = "streams do nothing unless polled"] - pub struct Scan { - #[pin] - stream: St, - state_f: Option>, - #[pin] - future: Option, - } -} - -impl fmt::Debug for Scan -where - St: Stream + fmt::Debug, - St::Item: fmt::Debug, - S: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Scan") - .field("stream", &self.stream) - .field("state", &self.state_f.as_ref().map(|s| &s.state)) - .field("future", &self.future) - .field("done_taking", &self.is_done_taking()) - .finish() - } -} - -impl Scan { - /// Checks if internal state is `None`. - fn is_done_taking(&self) -> bool { - self.state_f.is_none() - } -} - -impl Scan -where - St: Stream, - F: FnMut(&mut S, St::Item) -> Fut, - Fut: Future>, -{ - pub(super) fn new(stream: St, initial_state: S, f: F) -> Self { - Self { stream, state_f: Some(StateFn { state: initial_state, f }), future: None } - } - - delegate_access_inner!(stream, St, ()); -} - -impl Stream for Scan -where - St: Stream, - F: FnMut(&mut S, St::Item) -> Fut, - Fut: Future>, -{ - type Item = B; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if self.is_done_taking() { - return Poll::Ready(None); - } - - let mut this = self.project(); - - Poll::Ready(loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - let item = ready!(fut.poll(cx)); - this.future.set(None); - - if item.is_none() { - *this.state_f = None; - } - - break item; - } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) { - let state_f = this.state_f.as_mut().unwrap(); - this.future.set(Some((state_f.f)(&mut state_f.state, item))) - } else { - break None; - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - if self.is_done_taking() { - (0, Some(0)) - } else { - self.stream.size_hint() // can't know a lower bound, due to the predicate - } - } -} - -impl FusedStream for Scan -where - St: FusedStream, - F: FnMut(&mut S, St::Item) -> Fut, - Fut: Future>, -{ - fn is_terminated(&self) -> bool { - self.is_done_taking() || self.future.is_none() && self.stream.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for Scan -where - St: Stream + Sink, -{ - type Error = St::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/select_next_some.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/select_next_some.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/select_next_some.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/select_next_some.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,42 +0,0 @@ -use crate::stream::StreamExt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::FusedStream; -use futures_core::task::{Context, Poll}; - -/// Future for the [`select_next_some`](super::StreamExt::select_next_some) -/// method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SelectNextSome<'a, St: ?Sized> { - stream: &'a mut St, -} - -impl<'a, St: ?Sized> SelectNextSome<'a, St> { - pub(super) fn new(stream: &'a mut St) -> Self { - Self { stream } - } -} - -impl FusedFuture for SelectNextSome<'_, St> { - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -impl Future for SelectNextSome<'_, St> { - type Output = St::Item; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - assert!(!self.stream.is_terminated(), "SelectNextSome polled after terminated"); - - if let Some(item) = ready!(self.stream.poll_next_unpin(cx)) { - Poll::Ready(item) - } else { - debug_assert!(self.stream.is_terminated()); - cx.waker().wake_by_ref(); - Poll::Pending - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/skip.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/skip.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/skip.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/skip.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,70 +0,0 @@ -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`skip`](super::StreamExt::skip) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Skip { - #[pin] - stream: St, - remaining: usize, - } -} - -impl Skip { - pub(super) fn new(stream: St, n: usize) -> Self { - Self { stream, remaining: n } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for Skip { - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -impl Stream for Skip { - type Item = St::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - while *this.remaining > 0 { - if ready!(this.stream.as_mut().poll_next(cx)).is_some() { - *this.remaining -= 1; - } else { - return Poll::Ready(None); - } - } - - this.stream.poll_next(cx) - } - - fn size_hint(&self) -> (usize, Option) { - let (lower, upper) = self.stream.size_hint(); - - let lower = lower.saturating_sub(self.remaining); - let upper = upper.map(|x| x.saturating_sub(self.remaining)); - - (lower, upper) - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for Skip -where - S: Stream + Sink, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/skip_while.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/skip_while.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/skip_while.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/skip_while.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,124 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`skip_while`](super::StreamExt::skip_while) method. - #[must_use = "streams do nothing unless polled"] - pub struct SkipWhile where St: Stream { - #[pin] - stream: St, - f: F, - #[pin] - pending_fut: Option, - pending_item: Option, - done_skipping: bool, - } -} - -impl fmt::Debug for SkipWhile -where - St: Stream + fmt::Debug, - St::Item: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SkipWhile") - .field("stream", &self.stream) - .field("pending_fut", &self.pending_fut) - .field("pending_item", &self.pending_item) - .field("done_skipping", &self.done_skipping) - .finish() - } -} - -impl SkipWhile -where - St: Stream, - F: FnMut(&St::Item) -> Fut, - Fut: Future, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, pending_fut: None, pending_item: None, done_skipping: false } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for SkipWhile -where - St: FusedStream, - F: FnMut(&St::Item) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.pending_item.is_none() && self.stream.is_terminated() - } -} - -impl Stream for SkipWhile -where - St: Stream, - F: FnMut(&St::Item) -> Fut, - Fut: Future, -{ - type Item = St::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - if *this.done_skipping { - return this.stream.poll_next(cx); - } - - Poll::Ready(loop { - if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() { - let skipped = ready!(fut.poll(cx)); - let item = this.pending_item.take(); - this.pending_fut.set(None); - if !skipped { - *this.done_skipping = true; - break item; - } - } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) { - this.pending_fut.set(Some((this.f)(&item))); - *this.pending_item = Some(item); - } else { - break None; - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - if self.done_skipping { - self.stream.size_hint() - } else { - let pending_len = usize::from(self.pending_item.is_some()); - let (_, upper) = self.stream.size_hint(); - let upper = match upper { - Some(x) => x.checked_add(pending_len), - None => None, - }; - (0, upper) // can't know a lower bound, due to the predicate - } - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for SkipWhile -where - S: Stream + Sink, - F: FnMut(&S::Item) -> Fut, - Fut: Future, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/split.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/split.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/split.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/split.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,144 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; -use futures_sink::Sink; - -use crate::lock::BiLock; - -/// A `Stream` part of the split pair -#[derive(Debug)] -#[must_use = "streams do nothing unless polled"] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -pub struct SplitStream(BiLock); - -impl Unpin for SplitStream {} - -impl SplitStream { - /// Attempts to put the two "halves" of a split `Stream + Sink` back - /// together. Succeeds only if the `SplitStream` and `SplitSink` are - /// a matching pair originating from the same call to `StreamExt::split`. - pub fn reunite(self, other: SplitSink) -> Result> - where - S: Sink, - { - other.reunite(self) - } -} - -impl Stream for SplitStream { - type Item = S::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.0.poll_lock(cx)).as_pin_mut().poll_next(cx) - } -} - -#[allow(non_snake_case)] -fn SplitSink, Item>(lock: BiLock) -> SplitSink { - SplitSink { lock, slot: None } -} - -/// A `Sink` part of the split pair -#[derive(Debug)] -#[must_use = "sinks do nothing unless polled"] -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -pub struct SplitSink { - lock: BiLock, - slot: Option, -} - -impl Unpin for SplitSink {} - -impl + Unpin, Item> SplitSink { - /// Attempts to put the two "halves" of a split `Stream + Sink` back - /// together. Succeeds only if the `SplitStream` and `SplitSink` are - /// a matching pair originating from the same call to `StreamExt::split`. - pub fn reunite(self, other: SplitStream) -> Result> { - self.lock.reunite(other.0).map_err(|err| ReuniteError(SplitSink(err.0), SplitStream(err.1))) - } -} - -impl, Item> SplitSink { - fn poll_flush_slot( - mut inner: Pin<&mut S>, - slot: &mut Option, - cx: &mut Context<'_>, - ) -> Poll> { - if slot.is_some() { - ready!(inner.as_mut().poll_ready(cx))?; - Poll::Ready(inner.start_send(slot.take().unwrap())) - } else { - Poll::Ready(Ok(())) - } - } - - fn poll_lock_and_flush_slot( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - let this = &mut *self; - let mut inner = ready!(this.lock.poll_lock(cx)); - Self::poll_flush_slot(inner.as_pin_mut(), &mut this.slot, cx) - } -} - -impl, Item> Sink for SplitSink { - type Error = S::Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - if self.slot.is_none() { - return Poll::Ready(Ok(())); - } - ready!(self.as_mut().poll_lock_and_flush_slot(cx))?; - } - } - - fn start_send(mut self: Pin<&mut Self>, item: Item) -> Result<(), S::Error> { - self.slot = Some(item); - Ok(()) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = &mut *self; - let mut inner = ready!(this.lock.poll_lock(cx)); - ready!(Self::poll_flush_slot(inner.as_pin_mut(), &mut this.slot, cx))?; - inner.as_pin_mut().poll_flush(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = &mut *self; - let mut inner = ready!(this.lock.poll_lock(cx)); - ready!(Self::poll_flush_slot(inner.as_pin_mut(), &mut this.slot, cx))?; - inner.as_pin_mut().poll_close(cx) - } -} - -pub(super) fn split, Item>(s: S) -> (SplitSink, SplitStream) { - let (a, b) = BiLock::new(s); - let read = SplitStream(a); - let write = SplitSink(b); - (write, read) -} - -/// Error indicating a `SplitSink` and `SplitStream` were not two halves -/// of a `Stream + Split`, and thus could not be `reunite`d. -#[cfg_attr(docsrs, doc(cfg(feature = "sink")))] -pub struct ReuniteError(pub SplitSink, pub SplitStream); - -impl fmt::Debug for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("ReuniteError").field(&"...").finish() - } -} - -impl fmt::Display for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "tried to reunite a SplitStream and SplitSink that don't form a pair") - } -} - -#[cfg(feature = "std")] -impl std::error::Error for ReuniteError {} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/take.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/take.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/take.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/take.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,86 +0,0 @@ -use core::cmp; -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`take`](super::StreamExt::take) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Take { - #[pin] - stream: St, - remaining: usize, - } -} - -impl Take { - pub(super) fn new(stream: St, n: usize) -> Self { - Self { stream, remaining: n } - } - - delegate_access_inner!(stream, St, ()); -} - -impl Stream for Take -where - St: Stream, -{ - type Item = St::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if self.remaining == 0 { - Poll::Ready(None) - } else { - let this = self.project(); - let next = ready!(this.stream.poll_next(cx)); - if next.is_some() { - *this.remaining -= 1; - } else { - *this.remaining = 0; - } - Poll::Ready(next) - } - } - - fn size_hint(&self) -> (usize, Option) { - if self.remaining == 0 { - return (0, Some(0)); - } - - let (lower, upper) = self.stream.size_hint(); - - let lower = cmp::min(lower, self.remaining); - - let upper = match upper { - Some(x) if x < self.remaining => Some(x), - _ => Some(self.remaining), - }; - - (lower, upper) - } -} - -impl FusedStream for Take -where - St: FusedStream, -{ - fn is_terminated(&self) -> bool { - self.remaining == 0 || self.stream.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for Take -where - S: Stream + Sink, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/take_until.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/take_until.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/take_until.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/take_until.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,170 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -// FIXME: docs, tests - -pin_project! { - /// Stream for the [`take_until`](super::StreamExt::take_until) method. - #[must_use = "streams do nothing unless polled"] - pub struct TakeUntil { - #[pin] - stream: St, - // Contains the inner Future on start and None once the inner Future is resolved - // or taken out by the user. - #[pin] - fut: Option, - // Contains fut's return value once fut is resolved - fut_result: Option, - // Whether the future was taken out by the user. - free: bool, - } -} - -impl fmt::Debug for TakeUntil -where - St: Stream + fmt::Debug, - St::Item: fmt::Debug, - Fut: Future + fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TakeUntil").field("stream", &self.stream).field("fut", &self.fut).finish() - } -} - -impl TakeUntil -where - St: Stream, - Fut: Future, -{ - pub(super) fn new(stream: St, fut: Fut) -> Self { - Self { stream, fut: Some(fut), fut_result: None, free: false } - } - - delegate_access_inner!(stream, St, ()); - - /// Extract the stopping future out of the combinator. - /// The future is returned only if it isn't resolved yet, ie. if the stream isn't stopped yet. - /// Taking out the future means the combinator will be yielding - /// elements from the wrapped stream without ever stopping it. - pub fn take_future(&mut self) -> Option { - if self.fut.is_some() { - self.free = true; - } - - self.fut.take() - } - - /// Once the stopping future is resolved, this method can be used - /// to extract the value returned by the stopping future. - /// - /// This may be used to retrieve arbitrary data from the stopping - /// future, for example a reason why the stream was stopped. - /// - /// This method will return `None` if the future isn't resolved yet, - /// or if the result was already taken out. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, StreamExt}; - /// use futures::task::Poll; - /// - /// let stream = stream::iter(1..=10); - /// - /// let mut i = 0; - /// let stop_fut = future::poll_fn(|_cx| { - /// i += 1; - /// if i <= 5 { - /// Poll::Pending - /// } else { - /// Poll::Ready("reason") - /// } - /// }); - /// - /// let mut stream = stream.take_until(stop_fut); - /// let _ = stream.by_ref().collect::>().await; - /// - /// let result = stream.take_result().unwrap(); - /// assert_eq!(result, "reason"); - /// # }); - /// ``` - pub fn take_result(&mut self) -> Option { - self.fut_result.take() - } - - /// Whether the stream was stopped yet by the stopping future - /// being resolved. - pub fn is_stopped(&self) -> bool { - !self.free && self.fut.is_none() - } -} - -impl Stream for TakeUntil -where - St: Stream, - Fut: Future, -{ - type Item = St::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - if let Some(f) = this.fut.as_mut().as_pin_mut() { - if let Poll::Ready(result) = f.poll(cx) { - this.fut.set(None); - *this.fut_result = Some(result); - } - } - - if !*this.free && this.fut.is_none() { - // Future resolved, inner stream stopped - Poll::Ready(None) - } else { - // Future either not resolved yet or taken out by the user - let item = ready!(this.stream.poll_next(cx)); - if item.is_none() { - this.fut.set(None); - } - Poll::Ready(item) - } - } - - fn size_hint(&self) -> (usize, Option) { - if self.is_stopped() { - return (0, Some(0)); - } - - self.stream.size_hint() - } -} - -impl FusedStream for TakeUntil -where - St: Stream, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.is_stopped() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for TakeUntil -where - S: Stream + Sink, - Fut: Future, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/take_while.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/take_while.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/take_while.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/take_while.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,124 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`take_while`](super::StreamExt::take_while) method. - #[must_use = "streams do nothing unless polled"] - pub struct TakeWhile { - #[pin] - stream: St, - f: F, - #[pin] - pending_fut: Option, - pending_item: Option, - done_taking: bool, - } -} - -impl fmt::Debug for TakeWhile -where - St: Stream + fmt::Debug, - St::Item: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TakeWhile") - .field("stream", &self.stream) - .field("pending_fut", &self.pending_fut) - .field("pending_item", &self.pending_item) - .field("done_taking", &self.done_taking) - .finish() - } -} - -impl TakeWhile -where - St: Stream, - F: FnMut(&St::Item) -> Fut, - Fut: Future, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, pending_fut: None, pending_item: None, done_taking: false } - } - - delegate_access_inner!(stream, St, ()); -} - -impl Stream for TakeWhile -where - St: Stream, - F: FnMut(&St::Item) -> Fut, - Fut: Future, -{ - type Item = St::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if self.done_taking { - return Poll::Ready(None); - } - - let mut this = self.project(); - - Poll::Ready(loop { - if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() { - let take = ready!(fut.poll(cx)); - let item = this.pending_item.take(); - this.pending_fut.set(None); - if take { - break item; - } else { - *this.done_taking = true; - break None; - } - } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) { - this.pending_fut.set(Some((this.f)(&item))); - *this.pending_item = Some(item); - } else { - break None; - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - if self.done_taking { - return (0, Some(0)); - } - - let pending_len = usize::from(self.pending_item.is_some()); - let (_, upper) = self.stream.size_hint(); - let upper = match upper { - Some(x) => x.checked_add(pending_len), - None => None, - }; - (0, upper) // can't know a lower bound, due to the predicate - } -} - -impl FusedStream for TakeWhile -where - St: FusedStream, - F: FnMut(&St::Item) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.done_taking || self.pending_item.is_none() && self.stream.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for TakeWhile -where - S: Stream + Sink, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/then.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/then.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/then.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/then.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,101 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`then`](super::StreamExt::then) method. - #[must_use = "streams do nothing unless polled"] - pub struct Then { - #[pin] - stream: St, - #[pin] - future: Option, - f: F, - } -} - -impl fmt::Debug for Then -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Then").field("stream", &self.stream).field("future", &self.future).finish() - } -} - -impl Then -where - St: Stream, - F: FnMut(St::Item) -> Fut, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, future: None, f } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for Then -where - St: FusedStream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.future.is_none() && self.stream.is_terminated() - } -} - -impl Stream for Then -where - St: Stream, - F: FnMut(St::Item) -> Fut, - Fut: Future, -{ - type Item = Fut::Output; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - Poll::Ready(loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - let item = ready!(fut.poll(cx)); - this.future.set(None); - break Some(item); - } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) { - this.future.set(Some((this.f)(item))); - } else { - break None; - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - let future_len = usize::from(self.future.is_some()); - let (lower, upper) = self.stream.size_hint(); - let lower = lower.saturating_add(future_len); - let upper = match upper { - Some(x) => x.checked_add(future_len), - None => None, - }; - (lower, upper) - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for Then -where - S: Sink, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/unzip.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/unzip.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/unzip.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/unzip.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,63 +0,0 @@ -use core::mem; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`unzip`](super::StreamExt::unzip) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Unzip { - #[pin] - stream: St, - left: FromA, - right: FromB, - } -} - -impl Unzip { - fn finish(self: Pin<&mut Self>) -> (FromA, FromB) { - let this = self.project(); - (mem::take(this.left), mem::take(this.right)) - } - - pub(super) fn new(stream: St) -> Self { - Self { stream, left: Default::default(), right: Default::default() } - } -} - -impl FusedFuture for Unzip -where - St: FusedStream, - FromA: Default + Extend, - FromB: Default + Extend, -{ - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -impl Future for Unzip -where - St: Stream, - FromA: Default + Extend, - FromB: Default + Extend, -{ - type Output = (FromA, FromB); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(FromA, FromB)> { - let mut this = self.as_mut().project(); - loop { - match ready!(this.stream.as_mut().poll_next(cx)) { - Some(e) => { - this.left.extend(Some(e.0)); - this.right.extend(Some(e.1)); - } - None => return Poll::Ready(self.finish()), - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/zip.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/zip.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/stream/zip.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/stream/zip.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,128 +0,0 @@ -use crate::stream::{Fuse, StreamExt}; -use core::cmp; -use core::pin::Pin; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`zip`](super::StreamExt::zip) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct Zip { - #[pin] - stream1: Fuse, - #[pin] - stream2: Fuse, - queued1: Option, - queued2: Option, - } -} - -impl Zip { - pub(super) fn new(stream1: St1, stream2: St2) -> Self { - Self { stream1: stream1.fuse(), stream2: stream2.fuse(), queued1: None, queued2: None } - } - - /// Acquires a reference to the underlying streams that this combinator is - /// pulling from. - pub fn get_ref(&self) -> (&St1, &St2) { - (self.stream1.get_ref(), self.stream2.get_ref()) - } - - /// Acquires a mutable reference to the underlying streams that this - /// combinator is pulling from. - /// - /// Note that care must be taken to avoid tampering with the state of the - /// stream which may otherwise confuse this combinator. - pub fn get_mut(&mut self) -> (&mut St1, &mut St2) { - (self.stream1.get_mut(), self.stream2.get_mut()) - } - - /// Acquires a pinned mutable reference to the underlying streams that this - /// combinator is pulling from. - /// - /// Note that care must be taken to avoid tampering with the state of the - /// stream which may otherwise confuse this combinator. - pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut St1>, Pin<&mut St2>) { - let this = self.project(); - (this.stream1.get_pin_mut(), this.stream2.get_pin_mut()) - } - - /// Consumes this combinator, returning the underlying streams. - /// - /// Note that this may discard intermediate state of this combinator, so - /// care should be taken to avoid losing resources when this is called. - pub fn into_inner(self) -> (St1, St2) { - (self.stream1.into_inner(), self.stream2.into_inner()) - } -} - -impl FusedStream for Zip -where - St1: Stream, - St2: Stream, -{ - fn is_terminated(&self) -> bool { - self.stream1.is_terminated() && self.stream2.is_terminated() - } -} - -impl Stream for Zip -where - St1: Stream, - St2: Stream, -{ - type Item = (St1::Item, St2::Item); - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - if this.queued1.is_none() { - match this.stream1.as_mut().poll_next(cx) { - Poll::Ready(Some(item1)) => *this.queued1 = Some(item1), - Poll::Ready(None) | Poll::Pending => {} - } - } - if this.queued2.is_none() { - match this.stream2.as_mut().poll_next(cx) { - Poll::Ready(Some(item2)) => *this.queued2 = Some(item2), - Poll::Ready(None) | Poll::Pending => {} - } - } - - if this.queued1.is_some() && this.queued2.is_some() { - let pair = (this.queued1.take().unwrap(), this.queued2.take().unwrap()); - Poll::Ready(Some(pair)) - } else if this.stream1.is_done() || this.stream2.is_done() { - Poll::Ready(None) - } else { - Poll::Pending - } - } - - fn size_hint(&self) -> (usize, Option) { - let queued1_len = usize::from(self.queued1.is_some()); - let queued2_len = usize::from(self.queued2.is_some()); - let (stream1_lower, stream1_upper) = self.stream1.size_hint(); - let (stream2_lower, stream2_upper) = self.stream2.size_hint(); - - let stream1_lower = stream1_lower.saturating_add(queued1_len); - let stream2_lower = stream2_lower.saturating_add(queued2_len); - - let lower = cmp::min(stream1_lower, stream2_lower); - - let upper = match (stream1_upper, stream2_upper) { - (Some(x), Some(y)) => { - let x = x.saturating_add(queued1_len); - let y = y.saturating_add(queued2_len); - Some(cmp::min(x, y)) - } - (Some(x), None) => x.checked_add(queued1_len), - (None, Some(y)) => y.checked_add(queued2_len), - (None, None) => None, - }; - - (lower, upper) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/and_then.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/and_then.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/and_then.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/and_then.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,105 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::TryFuture; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`and_then`](super::TryStreamExt::and_then) method. - #[must_use = "streams do nothing unless polled"] - pub struct AndThen { - #[pin] - stream: St, - #[pin] - future: Option, - f: F, - } -} - -impl fmt::Debug for AndThen -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AndThen") - .field("stream", &self.stream) - .field("future", &self.future) - .finish() - } -} - -impl AndThen -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: TryFuture, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, future: None, f } - } - - delegate_access_inner!(stream, St, ()); -} - -impl Stream for AndThen -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: TryFuture, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - Poll::Ready(loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - let item = ready!(fut.try_poll(cx)); - this.future.set(None); - break Some(item); - } else if let Some(item) = ready!(this.stream.as_mut().try_poll_next(cx)?) { - this.future.set(Some((this.f)(item))); - } else { - break None; - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - let future_len = usize::from(self.future.is_some()); - let (lower, upper) = self.stream.size_hint(); - let lower = lower.saturating_add(future_len); - let upper = match upper { - Some(x) => x.checked_add(future_len), - None => None, - }; - (lower, upper) - } -} - -impl FusedStream for AndThen -where - St: TryStream + FusedStream, - F: FnMut(St::Ok) -> Fut, - Fut: TryFuture, -{ - fn is_terminated(&self) -> bool { - self.future.is_none() && self.stream.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for AndThen -where - S: Sink, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/into_async_read.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/into_async_read.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/into_async_read.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/into_async_read.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,166 +0,0 @@ -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::TryStream; -use futures_core::task::{Context, Poll}; -use futures_io::{AsyncBufRead, AsyncRead, AsyncWrite}; -use pin_project_lite::pin_project; -use std::cmp; -use std::io::{Error, Result}; - -pin_project! { - /// Reader for the [`into_async_read`](super::TryStreamExt::into_async_read) method. - #[derive(Debug)] - #[must_use = "readers do nothing unless polled"] - #[cfg_attr(docsrs, doc(cfg(feature = "io")))] - pub struct IntoAsyncRead - where - St: TryStream, - St::Ok: AsRef<[u8]>, - { - #[pin] - stream: St, - state: ReadState, - } -} - -#[derive(Debug)] -enum ReadState> { - Ready { chunk: T, chunk_start: usize }, - PendingChunk, - Eof, -} - -impl IntoAsyncRead -where - St: TryStream, - St::Ok: AsRef<[u8]>, -{ - pub(super) fn new(stream: St) -> Self { - Self { stream, state: ReadState::PendingChunk } - } -} - -impl AsyncRead for IntoAsyncRead -where - St: TryStream, - St::Ok: AsRef<[u8]>, -{ - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - let mut this = self.project(); - - loop { - match this.state { - ReadState::Ready { chunk, chunk_start } => { - let chunk = chunk.as_ref(); - let len = cmp::min(buf.len(), chunk.len() - *chunk_start); - - buf[..len].copy_from_slice(&chunk[*chunk_start..*chunk_start + len]); - *chunk_start += len; - - if chunk.len() == *chunk_start { - *this.state = ReadState::PendingChunk; - } - - return Poll::Ready(Ok(len)); - } - ReadState::PendingChunk => match ready!(this.stream.as_mut().try_poll_next(cx)) { - Some(Ok(chunk)) => { - if !chunk.as_ref().is_empty() { - *this.state = ReadState::Ready { chunk, chunk_start: 0 }; - } - } - Some(Err(err)) => { - *this.state = ReadState::Eof; - return Poll::Ready(Err(err)); - } - None => { - *this.state = ReadState::Eof; - return Poll::Ready(Ok(0)); - } - }, - ReadState::Eof => { - return Poll::Ready(Ok(0)); - } - } - } - } -} - -impl AsyncWrite for IntoAsyncRead -where - St: TryStream + AsyncWrite, - St::Ok: AsRef<[u8]>, -{ - fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - let this = self.project(); - this.stream.poll_write(cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - this.stream.poll_flush(cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - this.stream.poll_close(cx) - } -} - -impl AsyncBufRead for IntoAsyncRead -where - St: TryStream, - St::Ok: AsRef<[u8]>, -{ - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - while let ReadState::PendingChunk = this.state { - match ready!(this.stream.as_mut().try_poll_next(cx)) { - Some(Ok(chunk)) => { - if !chunk.as_ref().is_empty() { - *this.state = ReadState::Ready { chunk, chunk_start: 0 }; - } - } - Some(Err(err)) => { - *this.state = ReadState::Eof; - return Poll::Ready(Err(err)); - } - None => { - *this.state = ReadState::Eof; - return Poll::Ready(Ok(&[])); - } - } - } - - if let &mut ReadState::Ready { ref chunk, chunk_start } = this.state { - let chunk = chunk.as_ref(); - return Poll::Ready(Ok(&chunk[chunk_start..])); - } - - // To get to this point we must be in ReadState::Eof - Poll::Ready(Ok(&[])) - } - - fn consume(self: Pin<&mut Self>, amount: usize) { - let this = self.project(); - - // https://github.com/rust-lang/futures-rs/pull/1556#discussion_r281644295 - if amount == 0 { - return; - } - if let ReadState::Ready { chunk, chunk_start } = this.state { - *chunk_start += amount; - debug_assert!(*chunk_start <= chunk.as_ref().len()); - if *chunk_start >= chunk.as_ref().len() { - *this.state = ReadState::PendingChunk; - } - } else { - debug_assert!(false, "Attempted to consume from IntoAsyncRead without chunk"); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/into_stream.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/into_stream.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/into_stream.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/into_stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,52 +0,0 @@ -use core::pin::Pin; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`into_stream`](super::TryStreamExt::into_stream) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct IntoStream { - #[pin] - stream: St, - } -} - -impl IntoStream { - #[inline] - pub(super) fn new(stream: St) -> Self { - Self { stream } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for IntoStream { - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -impl Stream for IntoStream { - type Item = Result; - - #[inline] - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().stream.try_poll_next(cx) - } - - fn size_hint(&self) -> (usize, Option) { - self.stream.size_hint() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl, Item> Sink for IntoStream { - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1251 +0,0 @@ -//! Streams -//! -//! This module contains a number of functions for working with `Streams`s -//! that return `Result`s, allowing for short-circuiting computations. - -#[cfg(feature = "compat")] -use crate::compat::Compat; -use crate::fns::{ - inspect_err_fn, inspect_ok_fn, into_fn, map_err_fn, map_ok_fn, InspectErrFn, InspectOkFn, - IntoFn, MapErrFn, MapOkFn, -}; -use crate::future::assert_future; -use crate::stream::assert_stream; -use crate::stream::{Inspect, Map}; -#[cfg(feature = "alloc")] -use alloc::vec::Vec; -use core::pin::Pin; - -use futures_core::{ - future::{Future, TryFuture}, - stream::TryStream, - task::{Context, Poll}, -}; - -mod and_then; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::and_then::AndThen; - -delegate_all!( - /// Stream for the [`err_into`](super::TryStreamExt::err_into) method. - ErrInto( - MapErr> - ): Debug + Sink + Stream + FusedStream + AccessInner[St, (.)] + New[|x: St| MapErr::new(x, into_fn())] -); - -delegate_all!( - /// Stream for the [`inspect_ok`](super::TryStreamExt::inspect_ok) method. - InspectOk( - Inspect, InspectOkFn> - ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Inspect::new(IntoStream::new(x), inspect_ok_fn(f))] -); - -delegate_all!( - /// Stream for the [`inspect_err`](super::TryStreamExt::inspect_err) method. - InspectErr( - Inspect, InspectErrFn> - ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Inspect::new(IntoStream::new(x), inspect_err_fn(f))] -); - -mod into_stream; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::into_stream::IntoStream; - -delegate_all!( - /// Stream for the [`map_ok`](super::TryStreamExt::map_ok) method. - MapOk( - Map, MapOkFn> - ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Map::new(IntoStream::new(x), map_ok_fn(f))] -); - -delegate_all!( - /// Stream for the [`map_err`](super::TryStreamExt::map_err) method. - MapErr( - Map, MapErrFn> - ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Map::new(IntoStream::new(x), map_err_fn(f))] -); - -mod or_else; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::or_else::OrElse; - -mod try_next; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_next::TryNext; - -mod try_for_each; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_for_each::TryForEach; - -mod try_filter; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_filter::TryFilter; - -mod try_filter_map; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_filter_map::TryFilterMap; - -mod try_flatten; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_flatten::TryFlatten; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod try_flatten_unordered; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_flatten_unordered::TryFlattenUnordered; - -mod try_collect; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_collect::TryCollect; - -mod try_concat; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_concat::TryConcat; - -#[cfg(feature = "alloc")] -mod try_chunks; -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_chunks::{TryChunks, TryChunksError}; - -#[cfg(feature = "alloc")] -mod try_ready_chunks; -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_ready_chunks::{TryReadyChunks, TryReadyChunksError}; - -mod try_fold; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_fold::TryFold; - -mod try_unfold; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_unfold::{try_unfold, TryUnfold}; - -mod try_skip_while; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_skip_while::TrySkipWhile; - -mod try_take_while; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_take_while::TryTakeWhile; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod try_buffer_unordered; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_buffer_unordered::TryBufferUnordered; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod try_buffered; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_buffered::TryBuffered; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -mod try_for_each_concurrent; -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_for_each_concurrent::TryForEachConcurrent; - -#[cfg(feature = "io")] -#[cfg(feature = "std")] -mod into_async_read; -#[cfg(feature = "io")] -#[cfg_attr(docsrs, doc(cfg(feature = "io")))] -#[cfg(feature = "std")] -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::into_async_read::IntoAsyncRead; - -mod try_all; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_all::TryAll; - -mod try_any; -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::try_any::TryAny; - -impl TryStreamExt for S {} - -/// Adapters specific to `Result`-returning streams -pub trait TryStreamExt: TryStream { - /// Wraps the current stream in a new stream which converts the error type - /// into the one provided. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, TryStreamExt}; - /// - /// let mut stream = - /// stream::iter(vec![Ok(()), Err(5i32)]) - /// .err_into::(); - /// - /// assert_eq!(stream.try_next().await, Ok(Some(()))); - /// assert_eq!(stream.try_next().await, Err(5i64)); - /// # }) - /// ``` - fn err_into(self) -> ErrInto - where - Self: Sized, - Self::Error: Into, - { - assert_stream::, _>(ErrInto::new(self)) - } - - /// Wraps the current stream in a new stream which maps the success value - /// using the provided closure. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, TryStreamExt}; - /// - /// let mut stream = - /// stream::iter(vec![Ok(5), Err(0)]) - /// .map_ok(|x| x + 2); - /// - /// assert_eq!(stream.try_next().await, Ok(Some(7))); - /// assert_eq!(stream.try_next().await, Err(0)); - /// # }) - /// ``` - fn map_ok(self, f: F) -> MapOk - where - Self: Sized, - F: FnMut(Self::Ok) -> T, - { - assert_stream::, _>(MapOk::new(self, f)) - } - - /// Wraps the current stream in a new stream which maps the error value - /// using the provided closure. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, TryStreamExt}; - /// - /// let mut stream = - /// stream::iter(vec![Ok(5), Err(0)]) - /// .map_err(|x| x + 2); - /// - /// assert_eq!(stream.try_next().await, Ok(Some(5))); - /// assert_eq!(stream.try_next().await, Err(2)); - /// # }) - /// ``` - fn map_err(self, f: F) -> MapErr - where - Self: Sized, - F: FnMut(Self::Error) -> E, - { - assert_stream::, _>(MapErr::new(self, f)) - } - - /// Chain on a computation for when a value is ready, passing the successful - /// results to the provided closure `f`. - /// - /// This function can be used to run a unit of work when the next successful - /// value on a stream is ready. The closure provided will be yielded a value - /// when ready, and the returned future will then be run to completion to - /// produce the next value on this stream. - /// - /// Any errors produced by this stream will not be passed to the closure, - /// and will be passed through. - /// - /// The returned value of the closure must implement the `TryFuture` trait - /// and can represent some more work to be done before the composed stream - /// is finished. - /// - /// Note that this function consumes the receiving stream and returns a - /// wrapped version of it. - /// - /// To process the entire stream and return a single future representing - /// success or error, use `try_for_each` instead. - /// - /// # Examples - /// - /// ``` - /// use futures::channel::mpsc; - /// use futures::future; - /// use futures::stream::TryStreamExt; - /// - /// let (_tx, rx) = mpsc::channel::>(1); - /// - /// let rx = rx.and_then(|result| { - /// future::ok(if result % 2 == 0 { - /// Some(result) - /// } else { - /// None - /// }) - /// }); - /// ``` - fn and_then(self, f: F) -> AndThen - where - F: FnMut(Self::Ok) -> Fut, - Fut: TryFuture, - Self: Sized, - { - assert_stream::, _>(AndThen::new(self, f)) - } - - /// Chain on a computation for when an error happens, passing the - /// erroneous result to the provided closure `f`. - /// - /// This function can be used to run a unit of work and attempt to recover from - /// an error if one happens. The closure provided will be yielded an error - /// when one appears, and the returned future will then be run to completion - /// to produce the next value on this stream. - /// - /// Any successful values produced by this stream will not be passed to the - /// closure, and will be passed through. - /// - /// The returned value of the closure must implement the [`TryFuture`](futures_core::future::TryFuture) trait - /// and can represent some more work to be done before the composed stream - /// is finished. - /// - /// Note that this function consumes the receiving stream and returns a - /// wrapped version of it. - fn or_else(self, f: F) -> OrElse - where - F: FnMut(Self::Error) -> Fut, - Fut: TryFuture, - Self: Sized, - { - assert_stream::, _>(OrElse::new(self, f)) - } - - /// Do something with the success value of this stream, afterwards passing - /// it on. - /// - /// This is similar to the `StreamExt::inspect` method where it allows - /// easily inspecting the success value as it passes through the stream, for - /// example to debug what's going on. - fn inspect_ok(self, f: F) -> InspectOk - where - F: FnMut(&Self::Ok), - Self: Sized, - { - assert_stream::, _>(InspectOk::new(self, f)) - } - - /// Do something with the error value of this stream, afterwards passing it on. - /// - /// This is similar to the `StreamExt::inspect` method where it allows - /// easily inspecting the error value as it passes through the stream, for - /// example to debug what's going on. - fn inspect_err(self, f: F) -> InspectErr - where - F: FnMut(&Self::Error), - Self: Sized, - { - assert_stream::, _>(InspectErr::new(self, f)) - } - - /// Wraps a [`TryStream`] into a type that implements - /// [`Stream`](futures_core::stream::Stream) - /// - /// [`TryStream`]s currently do not implement the - /// [`Stream`](futures_core::stream::Stream) trait because of limitations - /// of the compiler. - /// - /// # Examples - /// - /// ``` - /// use futures::stream::{Stream, TryStream, TryStreamExt}; - /// - /// # type T = i32; - /// # type E = (); - /// fn make_try_stream() -> impl TryStream { // ... } - /// # futures::stream::empty() - /// # } - /// fn take_stream(stream: impl Stream>) { /* ... */ } - /// - /// take_stream(make_try_stream().into_stream()); - /// ``` - fn into_stream(self) -> IntoStream - where - Self: Sized, - { - assert_stream::, _>(IntoStream::new(self)) - } - - /// Creates a future that attempts to resolve the next item in the stream. - /// If an error is encountered before the next item, the error is returned - /// instead. - /// - /// This is similar to the `Stream::next` combinator, but returns a - /// `Result, E>` rather than an `Option>`, making - /// for easy use with the `?` operator. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, TryStreamExt}; - /// - /// let mut stream = stream::iter(vec![Ok(()), Err(())]); - /// - /// assert_eq!(stream.try_next().await, Ok(Some(()))); - /// assert_eq!(stream.try_next().await, Err(())); - /// # }) - /// ``` - fn try_next(&mut self) -> TryNext<'_, Self> - where - Self: Unpin, - { - assert_future::, Self::Error>, _>(TryNext::new(self)) - } - - /// Attempts to run this stream to completion, executing the provided - /// asynchronous closure for each element on the stream. - /// - /// The provided closure will be called for each item this stream produces, - /// yielding a future. That future will then be executed to completion - /// before moving on to the next item. - /// - /// The returned value is a [`Future`](futures_core::future::Future) where the - /// [`Output`](futures_core::future::Future::Output) type is - /// `Result<(), Self::Error>`. If any of the intermediate - /// futures or the stream returns an error, this future will return - /// immediately with an error. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, TryStreamExt}; - /// - /// let mut x = 0i32; - /// - /// { - /// let fut = stream::repeat(Ok(1)).try_for_each(|item| { - /// x += item; - /// future::ready(if x == 3 { Err(()) } else { Ok(()) }) - /// }); - /// assert_eq!(fut.await, Err(())); - /// } - /// - /// assert_eq!(x, 3); - /// # }) - /// ``` - fn try_for_each(self, f: F) -> TryForEach - where - F: FnMut(Self::Ok) -> Fut, - Fut: TryFuture, - Self: Sized, - { - assert_future::, _>(TryForEach::new(self, f)) - } - - /// Skip elements on this stream while the provided asynchronous predicate - /// resolves to `true`. - /// - /// This function is similar to - /// [`StreamExt::skip_while`](crate::stream::StreamExt::skip_while) but exits - /// early if an error occurs. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, TryStreamExt}; - /// - /// let stream = stream::iter(vec![Ok::(1), Ok(3), Ok(2)]); - /// let stream = stream.try_skip_while(|x| future::ready(Ok(*x < 3))); - /// - /// let output: Result, i32> = stream.try_collect().await; - /// assert_eq!(output, Ok(vec![3, 2])); - /// # }) - /// ``` - fn try_skip_while(self, f: F) -> TrySkipWhile - where - F: FnMut(&Self::Ok) -> Fut, - Fut: TryFuture, - Self: Sized, - { - assert_stream::, _>(TrySkipWhile::new(self, f)) - } - - /// Take elements on this stream while the provided asynchronous predicate - /// resolves to `true`. - /// - /// This function is similar to - /// [`StreamExt::take_while`](crate::stream::StreamExt::take_while) but exits - /// early if an error occurs. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, TryStreamExt}; - /// - /// let stream = stream::iter(vec![Ok::(1), Ok(2), Ok(3), Ok(2)]); - /// let stream = stream.try_take_while(|x| future::ready(Ok(*x < 3))); - /// - /// let output: Result, i32> = stream.try_collect().await; - /// assert_eq!(output, Ok(vec![1, 2])); - /// # }) - /// ``` - fn try_take_while(self, f: F) -> TryTakeWhile - where - F: FnMut(&Self::Ok) -> Fut, - Fut: TryFuture, - Self: Sized, - { - assert_stream::, _>(TryTakeWhile::new(self, f)) - } - - /// Attempts to run this stream to completion, executing the provided asynchronous - /// closure for each element on the stream concurrently as elements become - /// available, exiting as soon as an error occurs. - /// - /// This is similar to - /// [`StreamExt::for_each_concurrent`](crate::stream::StreamExt::for_each_concurrent), - /// but will resolve to an error immediately if the underlying stream or the provided - /// closure return an error. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::oneshot; - /// use futures::stream::{self, StreamExt, TryStreamExt}; - /// - /// let (tx1, rx1) = oneshot::channel(); - /// let (tx2, rx2) = oneshot::channel(); - /// let (_tx3, rx3) = oneshot::channel(); - /// - /// let stream = stream::iter(vec![rx1, rx2, rx3]); - /// let fut = stream.map(Ok).try_for_each_concurrent( - /// /* limit */ 2, - /// |rx| async move { - /// let res: Result<(), oneshot::Canceled> = rx.await; - /// res - /// } - /// ); - /// - /// tx1.send(()).unwrap(); - /// // Drop the second sender so that `rx2` resolves to `Canceled`. - /// drop(tx2); - /// - /// // The final result is an error because the second future - /// // resulted in an error. - /// assert_eq!(Err(oneshot::Canceled), fut.await); - /// # }) - /// ``` - #[cfg(not(futures_no_atomic_cas))] - #[cfg(feature = "alloc")] - fn try_for_each_concurrent( - self, - limit: impl Into>, - f: F, - ) -> TryForEachConcurrent - where - F: FnMut(Self::Ok) -> Fut, - Fut: Future>, - Self: Sized, - { - assert_future::, _>(TryForEachConcurrent::new( - self, - limit.into(), - f, - )) - } - - /// Attempt to transform a stream into a collection, - /// returning a future representing the result of that computation. - /// - /// This combinator will collect all successful results of this stream and - /// collect them into the specified collection type. If an error happens then all - /// collected elements will be dropped and the error will be returned. - /// - /// The returned future will be resolved when the stream terminates. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::stream::TryStreamExt; - /// use std::thread; - /// - /// let (tx, rx) = mpsc::unbounded(); - /// - /// thread::spawn(move || { - /// for i in 1..=5 { - /// tx.unbounded_send(Ok(i)).unwrap(); - /// } - /// tx.unbounded_send(Err(6)).unwrap(); - /// }); - /// - /// let output: Result, i32> = rx.try_collect().await; - /// assert_eq!(output, Err(6)); - /// # }) - /// ``` - fn try_collect>(self) -> TryCollect - where - Self: Sized, - { - assert_future::, _>(TryCollect::new(self)) - } - - /// An adaptor for chunking up successful items of the stream inside a vector. - /// - /// This combinator will attempt to pull successful items from this stream and buffer - /// them into a local vector. At most `capacity` items will get buffered - /// before they're yielded from the returned stream. - /// - /// Note that the vectors returned from this iterator may not always have - /// `capacity` elements. If the underlying stream ended and only a partial - /// vector was created, it'll be returned. Additionally if an error happens - /// from the underlying stream then the currently buffered items will be - /// yielded. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - /// - /// This function is similar to - /// [`StreamExt::chunks`](crate::stream::StreamExt::chunks) but exits - /// early if an error occurs. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, TryChunksError, TryStreamExt}; - /// - /// let stream = stream::iter(vec![Ok::(1), Ok(2), Ok(3), Err(4), Ok(5), Ok(6)]); - /// let mut stream = stream.try_chunks(2); - /// - /// assert_eq!(stream.try_next().await, Ok(Some(vec![1, 2]))); - /// assert_eq!(stream.try_next().await, Err(TryChunksError(vec![3], 4))); - /// assert_eq!(stream.try_next().await, Ok(Some(vec![5, 6]))); - /// # }) - /// ``` - /// - /// # Panics - /// - /// This method will panic if `capacity` is zero. - #[cfg(feature = "alloc")] - fn try_chunks(self, capacity: usize) -> TryChunks - where - Self: Sized, - { - assert_stream::, TryChunksError>, _>( - TryChunks::new(self, capacity), - ) - } - - /// An adaptor for chunking up successful, ready items of the stream inside a vector. - /// - /// This combinator will attempt to pull successful items from this stream and buffer - /// them into a local vector. At most `capacity` items will get buffered - /// before they're yielded from the returned stream. If the underlying stream - /// returns `Poll::Pending`, and the collected chunk is not empty, it will - /// be immidiatly returned. - /// - /// Note that the vectors returned from this iterator may not always have - /// `capacity` elements. If the underlying stream ended and only a partial - /// vector was created, it'll be returned. Additionally if an error happens - /// from the underlying stream then the currently buffered items will be - /// yielded. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - /// - /// This function is similar to - /// [`StreamExt::ready_chunks`](crate::stream::StreamExt::ready_chunks) but exits - /// early if an error occurs. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, TryReadyChunksError, TryStreamExt}; - /// - /// let stream = stream::iter(vec![Ok::(1), Ok(2), Ok(3), Err(4), Ok(5), Ok(6)]); - /// let mut stream = stream.try_ready_chunks(2); - /// - /// assert_eq!(stream.try_next().await, Ok(Some(vec![1, 2]))); - /// assert_eq!(stream.try_next().await, Err(TryReadyChunksError(vec![3], 4))); - /// assert_eq!(stream.try_next().await, Ok(Some(vec![5, 6]))); - /// # }) - /// ``` - /// - /// # Panics - /// - /// This method will panic if `capacity` is zero. - #[cfg(feature = "alloc")] - fn try_ready_chunks(self, capacity: usize) -> TryReadyChunks - where - Self: Sized, - { - assert_stream::, TryReadyChunksError>, _>( - TryReadyChunks::new(self, capacity), - ) - } - - /// Attempt to filter the values produced by this stream according to the - /// provided asynchronous closure. - /// - /// As values of this stream are made available, the provided predicate `f` - /// will be run on them. If the predicate returns a `Future` which resolves - /// to `true`, then the stream will yield the value, but if the predicate - /// return a `Future` which resolves to `false`, then the value will be - /// discarded and the next value will be produced. - /// - /// All errors are passed through without filtering in this combinator. - /// - /// Note that this function consumes the stream passed into it and returns a - /// wrapped version of it, similar to the existing `filter` methods in - /// the standard library. - /// - /// # Examples - /// ``` - /// # futures::executor::block_on(async { - /// use futures::future; - /// use futures::stream::{self, StreamExt, TryStreamExt}; - /// - /// let stream = stream::iter(vec![Ok(1i32), Ok(2i32), Ok(3i32), Err("error")]); - /// let mut evens = stream.try_filter(|x| { - /// future::ready(x % 2 == 0) - /// }); - /// - /// assert_eq!(evens.next().await, Some(Ok(2))); - /// assert_eq!(evens.next().await, Some(Err("error"))); - /// # }) - /// ``` - fn try_filter(self, f: F) -> TryFilter - where - Fut: Future, - F: FnMut(&Self::Ok) -> Fut, - Self: Sized, - { - assert_stream::, _>(TryFilter::new(self, f)) - } - - /// Attempt to filter the values produced by this stream while - /// simultaneously mapping them to a different type according to the - /// provided asynchronous closure. - /// - /// As values of this stream are made available, the provided function will - /// be run on them. If the future returned by the predicate `f` resolves to - /// [`Some(item)`](Some) then the stream will yield the value `item`, but if - /// it resolves to [`None`] then the next value will be produced. - /// - /// All errors are passed through without filtering in this combinator. - /// - /// Note that this function consumes the stream passed into it and returns a - /// wrapped version of it, similar to the existing `filter_map` methods in - /// the standard library. - /// - /// # Examples - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt, TryStreamExt}; - /// use futures::pin_mut; - /// - /// let stream = stream::iter(vec![Ok(1i32), Ok(6i32), Err("error")]); - /// let halves = stream.try_filter_map(|x| async move { - /// let ret = if x % 2 == 0 { Some(x / 2) } else { None }; - /// Ok(ret) - /// }); - /// - /// pin_mut!(halves); - /// assert_eq!(halves.next().await, Some(Ok(3))); - /// assert_eq!(halves.next().await, Some(Err("error"))); - /// # }) - /// ``` - fn try_filter_map(self, f: F) -> TryFilterMap - where - Fut: TryFuture, Error = Self::Error>, - F: FnMut(Self::Ok) -> Fut, - Self: Sized, - { - assert_stream::, _>(TryFilterMap::new(self, f)) - } - - /// Flattens a stream of streams into just one continuous stream. Produced streams - /// will be polled concurrently and any errors will be passed through without looking at them. - /// If the underlying base stream returns an error, it will be **immediately** propagated. - /// - /// The only argument is an optional limit on the number of concurrently - /// polled streams. If this limit is not `None`, no more than `limit` streams - /// will be polled at the same time. The `limit` argument is of type - /// `Into>`, and so can be provided as either `None`, - /// `Some(10)`, or just `10`. Note: a limit of zero is interpreted as - /// no limit at all, and will have the same result as passing in `None`. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::stream::{StreamExt, TryStreamExt}; - /// use std::thread; - /// - /// let (tx1, rx1) = mpsc::unbounded(); - /// let (tx2, rx2) = mpsc::unbounded(); - /// let (tx3, rx3) = mpsc::unbounded(); - /// - /// thread::spawn(move || { - /// tx1.unbounded_send(Ok(1)).unwrap(); - /// }); - /// thread::spawn(move || { - /// tx2.unbounded_send(Ok(2)).unwrap(); - /// tx2.unbounded_send(Err(3)).unwrap(); - /// tx2.unbounded_send(Ok(4)).unwrap(); - /// }); - /// thread::spawn(move || { - /// tx3.unbounded_send(Ok(rx1)).unwrap(); - /// tx3.unbounded_send(Ok(rx2)).unwrap(); - /// tx3.unbounded_send(Err(5)).unwrap(); - /// }); - /// - /// let stream = rx3.try_flatten_unordered(None); - /// let mut values: Vec<_> = stream.collect().await; - /// values.sort(); - /// - /// assert_eq!(values, vec![Ok(1), Ok(2), Ok(4), Err(3), Err(5)]); - /// # }); - /// ``` - #[cfg(not(futures_no_atomic_cas))] - #[cfg(feature = "alloc")] - fn try_flatten_unordered(self, limit: impl Into>) -> TryFlattenUnordered - where - Self::Ok: TryStream + Unpin, - ::Error: From, - Self: Sized, - { - assert_stream::::Ok, ::Error>, _>( - TryFlattenUnordered::new(self, limit), - ) - } - - /// Flattens a stream of streams into just one continuous stream. - /// - /// If this stream's elements are themselves streams then this combinator - /// will flatten out the entire stream to one long chain of elements. Any - /// errors are passed through without looking at them, but otherwise each - /// individual stream will get exhausted before moving on to the next. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::stream::{StreamExt, TryStreamExt}; - /// use std::thread; - /// - /// let (tx1, rx1) = mpsc::unbounded(); - /// let (tx2, rx2) = mpsc::unbounded(); - /// let (tx3, rx3) = mpsc::unbounded(); - /// - /// thread::spawn(move || { - /// tx1.unbounded_send(Ok(1)).unwrap(); - /// }); - /// thread::spawn(move || { - /// tx2.unbounded_send(Ok(2)).unwrap(); - /// tx2.unbounded_send(Err(3)).unwrap(); - /// tx2.unbounded_send(Ok(4)).unwrap(); - /// }); - /// thread::spawn(move || { - /// tx3.unbounded_send(Ok(rx1)).unwrap(); - /// tx3.unbounded_send(Ok(rx2)).unwrap(); - /// tx3.unbounded_send(Err(5)).unwrap(); - /// }); - /// - /// let mut stream = rx3.try_flatten(); - /// assert_eq!(stream.next().await, Some(Ok(1))); - /// assert_eq!(stream.next().await, Some(Ok(2))); - /// assert_eq!(stream.next().await, Some(Err(3))); - /// assert_eq!(stream.next().await, Some(Ok(4))); - /// assert_eq!(stream.next().await, Some(Err(5))); - /// assert_eq!(stream.next().await, None); - /// # }); - /// ``` - fn try_flatten(self) -> TryFlatten - where - Self::Ok: TryStream, - ::Error: From, - Self: Sized, - { - assert_stream::::Ok, ::Error>, _>( - TryFlatten::new(self), - ) - } - - /// Attempt to execute an accumulating asynchronous computation over a - /// stream, collecting all the values into one final result. - /// - /// This combinator will accumulate all values returned by this stream - /// according to the closure provided. The initial state is also provided to - /// this method and then is returned again by each execution of the closure. - /// Once the entire stream has been exhausted the returned future will - /// resolve to this value. - /// - /// This method is similar to [`fold`](crate::stream::StreamExt::fold), but will - /// exit early if an error is encountered in either the stream or the - /// provided closure. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, TryStreamExt}; - /// - /// let number_stream = stream::iter(vec![Ok::(1), Ok(2)]); - /// let sum = number_stream.try_fold(0, |acc, x| async move { Ok(acc + x) }); - /// assert_eq!(sum.await, Ok(3)); - /// - /// let number_stream_with_err = stream::iter(vec![Ok::(1), Err(2), Ok(1)]); - /// let sum = number_stream_with_err.try_fold(0, |acc, x| async move { Ok(acc + x) }); - /// assert_eq!(sum.await, Err(2)); - /// # }) - /// ``` - fn try_fold(self, init: T, f: F) -> TryFold - where - F: FnMut(T, Self::Ok) -> Fut, - Fut: TryFuture, - Self: Sized, - { - assert_future::, _>(TryFold::new(self, f, init)) - } - - /// Attempt to concatenate all items of a stream into a single - /// extendable destination, returning a future representing the end result. - /// - /// This combinator will extend the first item with the contents of all - /// the subsequent successful results of the stream. If the stream is empty, - /// the default value will be returned. - /// - /// Works with all collections that implement the [`Extend`](std::iter::Extend) trait. - /// - /// This method is similar to [`concat`](crate::stream::StreamExt::concat), but will - /// exit early if an error is encountered in the stream. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::stream::TryStreamExt; - /// use std::thread; - /// - /// let (tx, rx) = mpsc::unbounded::, ()>>(); - /// - /// thread::spawn(move || { - /// for i in (0..3).rev() { - /// let n = i * 3; - /// tx.unbounded_send(Ok(vec![n + 1, n + 2, n + 3])).unwrap(); - /// } - /// }); - /// - /// let result = rx.try_concat().await; - /// - /// assert_eq!(result, Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3])); - /// # }); - /// ``` - fn try_concat(self) -> TryConcat - where - Self: Sized, - Self::Ok: Extend<<::Ok as IntoIterator>::Item> + IntoIterator + Default, - { - assert_future::, _>(TryConcat::new(self)) - } - - /// Attempt to execute several futures from a stream concurrently (unordered). - /// - /// This stream's `Ok` type must be a [`TryFuture`](futures_core::future::TryFuture) with an `Error` type - /// that matches the stream's `Error` type. - /// - /// This adaptor will buffer up to `n` futures and then return their - /// outputs in the order in which they complete. If the underlying stream - /// returns an error, it will be immediately propagated. - /// - /// The returned stream will be a stream of results, each containing either - /// an error or a future's output. An error can be produced either by the - /// underlying stream itself or by one of the futures it yielded. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - /// - /// # Examples - /// - /// Results are returned in the order of completion: - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::oneshot; - /// use futures::stream::{self, StreamExt, TryStreamExt}; - /// - /// let (send_one, recv_one) = oneshot::channel(); - /// let (send_two, recv_two) = oneshot::channel(); - /// - /// let stream_of_futures = stream::iter(vec![Ok(recv_one), Ok(recv_two)]); - /// - /// let mut buffered = stream_of_futures.try_buffer_unordered(10); - /// - /// send_two.send(2i32)?; - /// assert_eq!(buffered.next().await, Some(Ok(2i32))); - /// - /// send_one.send(1i32)?; - /// assert_eq!(buffered.next().await, Some(Ok(1i32))); - /// - /// assert_eq!(buffered.next().await, None); - /// # Ok::<(), i32>(()) }).unwrap(); - /// ``` - /// - /// Errors from the underlying stream itself are propagated: - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::stream::{StreamExt, TryStreamExt}; - /// - /// let (sink, stream_of_futures) = mpsc::unbounded(); - /// let mut buffered = stream_of_futures.try_buffer_unordered(10); - /// - /// sink.unbounded_send(Ok(async { Ok(7i32) }))?; - /// assert_eq!(buffered.next().await, Some(Ok(7i32))); - /// - /// sink.unbounded_send(Err("error in the stream"))?; - /// assert_eq!(buffered.next().await, Some(Err("error in the stream"))); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - #[cfg(not(futures_no_atomic_cas))] - #[cfg(feature = "alloc")] - fn try_buffer_unordered(self, n: usize) -> TryBufferUnordered - where - Self::Ok: TryFuture, - Self: Sized, - { - assert_stream::::Ok, Self::Error>, _>( - TryBufferUnordered::new(self, n), - ) - } - - /// Attempt to execute several futures from a stream concurrently. - /// - /// This stream's `Ok` type must be a [`TryFuture`](futures_core::future::TryFuture) with an `Error` type - /// that matches the stream's `Error` type. - /// - /// This adaptor will buffer up to `n` futures and then return their - /// outputs in the same order as the underlying stream. If the underlying stream returns an error, it will - /// be immediately propagated. - /// - /// The returned stream will be a stream of results, each containing either - /// an error or a future's output. An error can be produced either by the - /// underlying stream itself or by one of the futures it yielded. - /// - /// This method is only available when the `std` or `alloc` feature of this - /// library is activated, and it is activated by default. - /// - /// # Examples - /// - /// Results are returned in the order of addition: - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::oneshot; - /// use futures::future::lazy; - /// use futures::stream::{self, StreamExt, TryStreamExt}; - /// - /// let (send_one, recv_one) = oneshot::channel(); - /// let (send_two, recv_two) = oneshot::channel(); - /// - /// let mut buffered = lazy(move |cx| { - /// let stream_of_futures = stream::iter(vec![Ok(recv_one), Ok(recv_two)]); - /// - /// let mut buffered = stream_of_futures.try_buffered(10); - /// - /// assert!(buffered.try_poll_next_unpin(cx).is_pending()); - /// - /// send_two.send(2i32)?; - /// assert!(buffered.try_poll_next_unpin(cx).is_pending()); - /// Ok::<_, i32>(buffered) - /// }).await?; - /// - /// send_one.send(1i32)?; - /// assert_eq!(buffered.next().await, Some(Ok(1i32))); - /// assert_eq!(buffered.next().await, Some(Ok(2i32))); - /// - /// assert_eq!(buffered.next().await, None); - /// # Ok::<(), i32>(()) }).unwrap(); - /// ``` - /// - /// Errors from the underlying stream itself are propagated: - /// ``` - /// # futures::executor::block_on(async { - /// use futures::channel::mpsc; - /// use futures::stream::{StreamExt, TryStreamExt}; - /// - /// let (sink, stream_of_futures) = mpsc::unbounded(); - /// let mut buffered = stream_of_futures.try_buffered(10); - /// - /// sink.unbounded_send(Ok(async { Ok(7i32) }))?; - /// assert_eq!(buffered.next().await, Some(Ok(7i32))); - /// - /// sink.unbounded_send(Err("error in the stream"))?; - /// assert_eq!(buffered.next().await, Some(Err("error in the stream"))); - /// # Ok::<(), Box>(()) }).unwrap(); - /// ``` - #[cfg(not(futures_no_atomic_cas))] - #[cfg(feature = "alloc")] - fn try_buffered(self, n: usize) -> TryBuffered - where - Self::Ok: TryFuture, - Self: Sized, - { - assert_stream::::Ok, Self::Error>, _>(TryBuffered::new( - self, n, - )) - } - - // TODO: false positive warning from rustdoc. Verify once #43466 settles - // - /// A convenience method for calling [`TryStream::try_poll_next`] on [`Unpin`] - /// stream types. - fn try_poll_next_unpin( - &mut self, - cx: &mut Context<'_>, - ) -> Poll>> - where - Self: Unpin, - { - Pin::new(self).try_poll_next(cx) - } - - /// Wraps a [`TryStream`] into a stream compatible with libraries using - /// futures 0.1 `Stream`. Requires the `compat` feature to be enabled. - /// ``` - /// # if cfg!(miri) { return; } // Miri does not support epoll - /// use futures::future::{FutureExt, TryFutureExt}; - /// # let (tx, rx) = futures::channel::oneshot::channel(); - /// - /// let future03 = async { - /// println!("Running on the pool"); - /// tx.send(42).unwrap(); - /// }; - /// - /// let future01 = future03 - /// .unit_error() // Make it a TryFuture - /// .boxed() // Make it Unpin - /// .compat(); - /// - /// tokio::run(future01); - /// # assert_eq!(42, futures::executor::block_on(rx).unwrap()); - /// ``` - #[cfg(feature = "compat")] - #[cfg_attr(docsrs, doc(cfg(feature = "compat")))] - fn compat(self) -> Compat - where - Self: Sized + Unpin, - { - Compat::new(self) - } - - /// Adapter that converts this stream into an [`AsyncBufRead`](crate::io::AsyncBufRead). - /// - /// This method is only available when the `std` feature of this - /// library is activated, and it is activated by default. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, TryStreamExt}; - /// use futures::io::AsyncReadExt; - /// - /// let stream = stream::iter([Ok(vec![1, 2, 3]), Ok(vec![4, 5])]); - /// let mut reader = stream.into_async_read(); - /// - /// let mut buf = Vec::new(); - /// reader.read_to_end(&mut buf).await.unwrap(); - /// assert_eq!(buf, [1, 2, 3, 4, 5]); - /// # }) - /// ``` - #[cfg(feature = "io")] - #[cfg_attr(docsrs, doc(cfg(feature = "io")))] - #[cfg(feature = "std")] - fn into_async_read(self) -> IntoAsyncRead - where - Self: Sized + TryStreamExt, - Self::Ok: AsRef<[u8]>, - { - crate::io::assert_read(IntoAsyncRead::new(self)) - } - - /// Attempt to execute a predicate over an asynchronous stream and evaluate if all items - /// satisfy the predicate. Exits early if an `Err` is encountered or if an `Ok` item is found - /// that does not satisfy the predicate. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt, TryStreamExt}; - /// use std::convert::Infallible; - /// - /// let number_stream = stream::iter(1..10).map(Ok::<_, Infallible>); - /// let positive = number_stream.try_all(|i| async move { i > 0 }); - /// assert_eq!(positive.await, Ok(true)); - /// - /// let stream_with_errors = stream::iter([Ok(1), Err("err"), Ok(3)]); - /// let positive = stream_with_errors.try_all(|i| async move { i > 0 }); - /// assert_eq!(positive.await, Err("err")); - /// # }); - /// ``` - fn try_all(self, f: F) -> TryAll - where - Self: Sized, - F: FnMut(Self::Ok) -> Fut, - Fut: Future, - { - assert_future::, _>(TryAll::new(self, f)) - } - - /// Attempt to execute a predicate over an asynchronous stream and evaluate if any items - /// satisfy the predicate. Exits early if an `Err` is encountered or if an `Ok` item is found - /// that satisfies the predicate. - /// - /// # Examples - /// - /// ``` - /// # futures::executor::block_on(async { - /// use futures::stream::{self, StreamExt, TryStreamExt}; - /// use std::convert::Infallible; - /// - /// let number_stream = stream::iter(0..10).map(Ok::<_, Infallible>); - /// let contain_three = number_stream.try_any(|i| async move { i == 3 }); - /// assert_eq!(contain_three.await, Ok(true)); - /// - /// let stream_with_errors = stream::iter([Ok(1), Err("err"), Ok(3)]); - /// let contain_three = stream_with_errors.try_any(|i| async move { i == 3 }); - /// assert_eq!(contain_three.await, Err("err")); - /// # }); - /// ``` - fn try_any(self, f: F) -> TryAny - where - Self: Sized, - F: FnMut(Self::Ok) -> Fut, - Fut: Future, - { - assert_future::, _>(TryAny::new(self, f)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/or_else.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/or_else.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/or_else.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/or_else.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,109 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::TryFuture; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`or_else`](super::TryStreamExt::or_else) method. - #[must_use = "streams do nothing unless polled"] - pub struct OrElse { - #[pin] - stream: St, - #[pin] - future: Option, - f: F, - } -} - -impl fmt::Debug for OrElse -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("OrElse") - .field("stream", &self.stream) - .field("future", &self.future) - .finish() - } -} - -impl OrElse -where - St: TryStream, - F: FnMut(St::Error) -> Fut, - Fut: TryFuture, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, future: None, f } - } - - delegate_access_inner!(stream, St, ()); -} - -impl Stream for OrElse -where - St: TryStream, - F: FnMut(St::Error) -> Fut, - Fut: TryFuture, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - Poll::Ready(loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - let item = ready!(fut.try_poll(cx)); - this.future.set(None); - break Some(item); - } else { - match ready!(this.stream.as_mut().try_poll_next(cx)) { - Some(Ok(item)) => break Some(Ok(item)), - Some(Err(e)) => { - this.future.set(Some((this.f)(e))); - } - None => break None, - } - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - let future_len = usize::from(self.future.is_some()); - let (lower, upper) = self.stream.size_hint(); - let lower = lower.saturating_add(future_len); - let upper = match upper { - Some(x) => x.checked_add(future_len), - None => None, - }; - (lower, upper) - } -} - -impl FusedStream for OrElse -where - St: TryStream + FusedStream, - F: FnMut(St::Error) -> Fut, - Fut: TryFuture, -{ - fn is_terminated(&self) -> bool { - self.future.is_none() && self.stream.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for OrElse -where - S: Sink, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_all.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_all.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_all.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,98 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::TryStream; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`try_all`](super::TryStreamExt::try_all) method. - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct TryAll { - #[pin] - stream: St, - f: F, - done: bool, - #[pin] - future: Option, - } -} - -impl fmt::Debug for TryAll -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TryAll") - .field("stream", &self.stream) - .field("done", &self.done) - .field("future", &self.future) - .finish() - } -} - -impl TryAll -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: Future, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, done: false, future: None } - } -} - -impl FusedFuture for TryAll -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.done && self.future.is_none() - } -} - -impl Future for TryAll -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: Future, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - Poll::Ready(loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - // we're currently processing a future to produce a new value - let acc = ready!(fut.poll(cx)); - this.future.set(None); - if !acc { - *this.done = true; - break Ok(false); - } // early exit - } else if !*this.done { - // we're waiting on a new item from the stream - match ready!(this.stream.as_mut().try_poll_next(cx)) { - Some(Ok(item)) => { - this.future.set(Some((this.f)(item))); - } - Some(Err(err)) => { - *this.done = true; - break Err(err); - } - None => { - *this.done = true; - break Ok(true); - } - } - } else { - panic!("TryAll polled after completion") - } - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_any.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_any.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_any.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_any.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,98 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::TryStream; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`any`](super::StreamExt::any) method. - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct TryAny { - #[pin] - stream: St, - f: F, - done: bool, - #[pin] - future: Option, - } -} - -impl fmt::Debug for TryAny -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TryAny") - .field("stream", &self.stream) - .field("done", &self.done) - .field("future", &self.future) - .finish() - } -} - -impl TryAny -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: Future, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, done: false, future: None } - } -} - -impl FusedFuture for TryAny -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.done && self.future.is_none() - } -} - -impl Future for TryAny -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: Future, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - Poll::Ready(loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - // we're currently processing a future to produce a new value - let acc = ready!(fut.poll(cx)); - this.future.set(None); - if acc { - *this.done = true; - break Ok(true); - } // early exit - } else if !*this.done { - // we're waiting on a new item from the stream - match ready!(this.stream.as_mut().try_poll_next(cx)) { - Some(Ok(item)) => { - this.future.set(Some((this.f)(item))); - } - Some(Err(err)) => { - *this.done = true; - break Err(err); - } - None => { - *this.done = true; - break Ok(false); - } - } - } else { - panic!("TryAny polled after completion") - } - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_buffered.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_buffered.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_buffered.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_buffered.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,87 +0,0 @@ -use crate::future::{IntoFuture, TryFutureExt}; -use crate::stream::{Fuse, FuturesOrdered, IntoStream, StreamExt}; -use core::pin::Pin; -use futures_core::future::TryFuture; -use futures_core::stream::{Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`try_buffered`](super::TryStreamExt::try_buffered) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct TryBuffered - where - St: TryStream, - St::Ok: TryFuture, - { - #[pin] - stream: Fuse>, - in_progress_queue: FuturesOrdered>, - max: usize, - } -} - -impl TryBuffered -where - St: TryStream, - St::Ok: TryFuture, -{ - pub(super) fn new(stream: St, n: usize) -> Self { - Self { - stream: IntoStream::new(stream).fuse(), - in_progress_queue: FuturesOrdered::new(), - max: n, - } - } - - delegate_access_inner!(stream, St, (. .)); -} - -impl Stream for TryBuffered -where - St: TryStream, - St::Ok: TryFuture, -{ - type Item = Result<::Ok, St::Error>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - // First up, try to spawn off as many futures as possible by filling up - // our queue of futures. Propagate errors from the stream immediately. - while this.in_progress_queue.len() < *this.max { - match this.stream.as_mut().poll_next(cx)? { - Poll::Ready(Some(fut)) => this.in_progress_queue.push_back(fut.into_future()), - Poll::Ready(None) | Poll::Pending => break, - } - } - - // Attempt to pull the next value from the in_progress_queue - match this.in_progress_queue.poll_next_unpin(cx) { - x @ Poll::Pending | x @ Poll::Ready(Some(_)) => return x, - Poll::Ready(None) => {} - } - - // If more values are still coming from the stream, we're not done yet - if this.stream.is_done() { - Poll::Ready(None) - } else { - Poll::Pending - } - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for TryBuffered -where - S: TryStream + Sink, - S::Ok: TryFuture, -{ - type Error = E; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_buffer_unordered.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_buffer_unordered.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_buffer_unordered.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_buffer_unordered.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,86 +0,0 @@ -use crate::future::{IntoFuture, TryFutureExt}; -use crate::stream::{Fuse, FuturesUnordered, IntoStream, StreamExt}; -use core::pin::Pin; -use futures_core::future::TryFuture; -use futures_core::stream::{Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the - /// [`try_buffer_unordered`](super::TryStreamExt::try_buffer_unordered) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct TryBufferUnordered - where St: TryStream - { - #[pin] - stream: Fuse>, - in_progress_queue: FuturesUnordered>, - max: usize, - } -} - -impl TryBufferUnordered -where - St: TryStream, - St::Ok: TryFuture, -{ - pub(super) fn new(stream: St, n: usize) -> Self { - Self { - stream: IntoStream::new(stream).fuse(), - in_progress_queue: FuturesUnordered::new(), - max: n, - } - } - - delegate_access_inner!(stream, St, (. .)); -} - -impl Stream for TryBufferUnordered -where - St: TryStream, - St::Ok: TryFuture, -{ - type Item = Result<::Ok, St::Error>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - // First up, try to spawn off as many futures as possible by filling up - // our queue of futures. Propagate errors from the stream immediately. - while this.in_progress_queue.len() < *this.max { - match this.stream.as_mut().poll_next(cx)? { - Poll::Ready(Some(fut)) => this.in_progress_queue.push(fut.into_future()), - Poll::Ready(None) | Poll::Pending => break, - } - } - - // Attempt to pull the next value from the in_progress_queue - match this.in_progress_queue.poll_next_unpin(cx) { - x @ Poll::Pending | x @ Poll::Ready(Some(_)) => return x, - Poll::Ready(None) => {} - } - - // If more values are still coming from the stream, we're not done yet - if this.stream.is_done() { - Poll::Ready(None) - } else { - Poll::Pending - } - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for TryBufferUnordered -where - S: TryStream + Sink, - S::Ok: TryFuture, -{ - type Error = E; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_chunks.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_chunks.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_chunks.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_chunks.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,132 +0,0 @@ -use crate::stream::{Fuse, IntoStream, StreamExt}; - -use alloc::vec::Vec; -use core::pin::Pin; -use core::{fmt, mem}; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`try_chunks`](super::TryStreamExt::try_chunks) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct TryChunks { - #[pin] - stream: Fuse>, - items: Vec, - cap: usize, // https://github.com/rust-lang/futures-rs/issues/1475 - } -} - -impl TryChunks { - pub(super) fn new(stream: St, capacity: usize) -> Self { - assert!(capacity > 0); - - Self { - stream: IntoStream::new(stream).fuse(), - items: Vec::with_capacity(capacity), - cap: capacity, - } - } - - fn take(self: Pin<&mut Self>) -> Vec { - let cap = self.cap; - mem::replace(self.project().items, Vec::with_capacity(cap)) - } - - delegate_access_inner!(stream, St, (. .)); -} - -type TryChunksStreamError = TryChunksError<::Ok, ::Error>; - -impl Stream for TryChunks { - type Item = Result, TryChunksStreamError>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.as_mut().project(); - loop { - match ready!(this.stream.as_mut().try_poll_next(cx)) { - // Push the item into the buffer and check whether it is full. - // If so, replace our buffer with a new and empty one and return - // the full one. - Some(item) => match item { - Ok(item) => { - this.items.push(item); - if this.items.len() >= *this.cap { - return Poll::Ready(Some(Ok(self.take()))); - } - } - Err(e) => { - return Poll::Ready(Some(Err(TryChunksError(self.take(), e)))); - } - }, - - // Since the underlying stream ran out of values, return what we - // have buffered, if we have anything. - None => { - let last = if this.items.is_empty() { - None - } else { - let full_buf = mem::take(this.items); - Some(full_buf) - }; - - return Poll::Ready(last.map(Ok)); - } - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let chunk_len = usize::from(!self.items.is_empty()); - let (lower, upper) = self.stream.size_hint(); - let lower = (lower / self.cap).saturating_add(chunk_len); - let upper = match upper { - Some(x) => x.checked_add(chunk_len), - None => None, - }; - (lower, upper) - } -} - -impl FusedStream for TryChunks { - fn is_terminated(&self) -> bool { - self.stream.is_terminated() && self.items.is_empty() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for TryChunks -where - S: TryStream + Sink, -{ - type Error = >::Error; - - delegate_sink!(stream, Item); -} - -/// Error indicating, that while chunk was collected inner stream produced an error. -/// -/// Contains all items that were collected before an error occurred, and the stream error itself. -#[derive(PartialEq, Eq)] -pub struct TryChunksError(pub Vec, pub E); - -impl fmt::Debug for TryChunksError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.1.fmt(f) - } -} - -impl fmt::Display for TryChunksError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.1.fmt(f) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for TryChunksError {} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_collect.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_collect.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_collect.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_collect.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,52 +0,0 @@ -use core::mem; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::ready; -use futures_core::stream::{FusedStream, TryStream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`try_collect`](super::TryStreamExt::try_collect) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct TryCollect { - #[pin] - stream: St, - items: C, - } -} - -impl TryCollect { - pub(super) fn new(s: St) -> Self { - Self { stream: s, items: Default::default() } - } -} - -impl FusedFuture for TryCollect -where - St: TryStream + FusedStream, - C: Default + Extend, -{ - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -impl Future for TryCollect -where - St: TryStream, - C: Default + Extend, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - Poll::Ready(Ok(loop { - match ready!(this.stream.as_mut().try_poll_next(cx)?) { - Some(x) => this.items.extend(Some(x)), - None => break mem::take(this.items), - } - })) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_concat.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_concat.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_concat.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_concat.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,51 +0,0 @@ -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::TryStream; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`try_concat`](super::TryStreamExt::try_concat) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct TryConcat { - #[pin] - stream: St, - accum: Option, - } -} - -impl TryConcat -where - St: TryStream, - St::Ok: Extend<::Item> + IntoIterator + Default, -{ - pub(super) fn new(stream: St) -> Self { - Self { stream, accum: None } - } -} - -impl Future for TryConcat -where - St: TryStream, - St::Ok: Extend<::Item> + IntoIterator + Default, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - - Poll::Ready(Ok(loop { - if let Some(x) = ready!(this.stream.as_mut().try_poll_next(cx)?) { - if let Some(a) = this.accum { - a.extend(x) - } else { - *this.accum = Some(x) - } - } else { - break this.accum.take().unwrap_or_default(); - } - })) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_filter_map.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_filter_map.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_filter_map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_filter_map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,106 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::TryFuture; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`try_filter_map`](super::TryStreamExt::try_filter_map) - /// method. - #[must_use = "streams do nothing unless polled"] - pub struct TryFilterMap { - #[pin] - stream: St, - f: F, - #[pin] - pending: Option, - } -} - -impl fmt::Debug for TryFilterMap -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TryFilterMap") - .field("stream", &self.stream) - .field("pending", &self.pending) - .finish() - } -} - -impl TryFilterMap { - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, pending: None } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for TryFilterMap -where - St: TryStream + FusedStream, - Fut: TryFuture, Error = St::Error>, - F: FnMut(St::Ok) -> Fut, -{ - fn is_terminated(&self) -> bool { - self.pending.is_none() && self.stream.is_terminated() - } -} - -impl Stream for TryFilterMap -where - St: TryStream, - Fut: TryFuture, Error = St::Error>, - F: FnMut(St::Ok) -> Fut, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - Poll::Ready(loop { - if let Some(p) = this.pending.as_mut().as_pin_mut() { - // We have an item in progress, poll that until it's done - let res = ready!(p.try_poll(cx)); - this.pending.set(None); - let item = res?; - if item.is_some() { - break item.map(Ok); - } - } else if let Some(item) = ready!(this.stream.as_mut().try_poll_next(cx)?) { - // No item in progress, but the stream is still going - this.pending.set(Some((this.f)(item))); - } else { - // The stream is done - break None; - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - let pending_len = usize::from(self.pending.is_some()); - let (_, upper) = self.stream.size_hint(); - let upper = match upper { - Some(x) => x.checked_add(pending_len), - None => None, - }; - (0, upper) // can't know a lower bound, due to the predicate - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for TryFilterMap -where - S: Sink, -{ - type Error = S::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_filter.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_filter.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_filter.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_filter.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,112 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`try_filter`](super::TryStreamExt::try_filter) - /// method. - #[must_use = "streams do nothing unless polled"] - pub struct TryFilter - where St: TryStream - { - #[pin] - stream: St, - f: F, - #[pin] - pending_fut: Option, - pending_item: Option, - } -} - -impl fmt::Debug for TryFilter -where - St: TryStream + fmt::Debug, - St::Ok: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TryFilter") - .field("stream", &self.stream) - .field("pending_fut", &self.pending_fut) - .field("pending_item", &self.pending_item) - .finish() - } -} - -impl TryFilter -where - St: TryStream, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, pending_fut: None, pending_item: None } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for TryFilter -where - St: TryStream + FusedStream, - F: FnMut(&St::Ok) -> Fut, - Fut: Future, -{ - fn is_terminated(&self) -> bool { - self.pending_fut.is_none() && self.stream.is_terminated() - } -} - -impl Stream for TryFilter -where - St: TryStream, - Fut: Future, - F: FnMut(&St::Ok) -> Fut, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - Poll::Ready(loop { - if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() { - let res = ready!(fut.poll(cx)); - this.pending_fut.set(None); - if res { - break this.pending_item.take().map(Ok); - } - *this.pending_item = None; - } else if let Some(item) = ready!(this.stream.as_mut().try_poll_next(cx)?) { - this.pending_fut.set(Some((this.f)(&item))); - *this.pending_item = Some(item); - } else { - break None; - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - let pending_len = usize::from(self.pending_fut.is_some()); - let (_, upper) = self.stream.size_hint(); - let upper = match upper { - Some(x) => x.checked_add(pending_len), - None => None, - }; - (0, upper) // can't know a lower bound, due to the predicate - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for TryFilter -where - S: TryStream + Sink, -{ - type Error = E; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_flatten.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_flatten.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_flatten.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_flatten.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,84 +0,0 @@ -use core::pin::Pin; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`try_flatten`](super::TryStreamExt::try_flatten) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct TryFlatten - where - St: TryStream, - { - #[pin] - stream: St, - #[pin] - next: Option, - } -} - -impl TryFlatten -where - St: TryStream, - St::Ok: TryStream, - ::Error: From, -{ - pub(super) fn new(stream: St) -> Self { - Self { stream, next: None } - } - - delegate_access_inner!(stream, St, ()); -} - -impl FusedStream for TryFlatten -where - St: TryStream + FusedStream, - St::Ok: TryStream, - ::Error: From, -{ - fn is_terminated(&self) -> bool { - self.next.is_none() && self.stream.is_terminated() - } -} - -impl Stream for TryFlatten -where - St: TryStream, - St::Ok: TryStream, - ::Error: From, -{ - type Item = Result<::Ok, ::Error>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - Poll::Ready(loop { - if let Some(s) = this.next.as_mut().as_pin_mut() { - if let Some(item) = ready!(s.try_poll_next(cx)?) { - break Some(Ok(item)); - } else { - this.next.set(None); - } - } else if let Some(s) = ready!(this.stream.as_mut().try_poll_next(cx)?) { - this.next.set(Some(s)); - } else { - break None; - } - }) - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for TryFlatten -where - S: TryStream + Sink, -{ - type Error = >::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_flatten_unordered.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_flatten_unordered.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_flatten_unordered.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_flatten_unordered.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,176 +0,0 @@ -use core::marker::PhantomData; -use core::pin::Pin; - -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; - -use pin_project_lite::pin_project; - -use crate::future::Either; -use crate::stream::stream::flatten_unordered::{ - FlattenUnorderedWithFlowController, FlowController, FlowStep, -}; -use crate::stream::IntoStream; -use crate::TryStreamExt; - -delegate_all!( - /// Stream for the [`try_flatten_unordered`](super::TryStreamExt::try_flatten_unordered) method. - TryFlattenUnordered( - FlattenUnorderedWithFlowController, PropagateBaseStreamError> - ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] - + New[ - |stream: St, limit: impl Into>| - FlattenUnorderedWithFlowController::new( - NestedTryStreamIntoEitherTryStream::new(stream), - limit.into() - ) - ] - where - St: TryStream, - St::Ok: TryStream, - St::Ok: Unpin, - ::Error: From -); - -pin_project! { - /// Emits either successful streams or single-item streams containing the underlying errors. - /// This's a wrapper for `FlattenUnordered` to reuse its logic over `TryStream`. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct NestedTryStreamIntoEitherTryStream - where - St: TryStream, - St::Ok: TryStream, - St::Ok: Unpin, - ::Error: From - { - #[pin] - stream: St - } -} - -impl NestedTryStreamIntoEitherTryStream -where - St: TryStream, - St::Ok: TryStream + Unpin, - ::Error: From, -{ - fn new(stream: St) -> Self { - Self { stream } - } - - delegate_access_inner!(stream, St, ()); -} - -/// Emits a single item immediately, then stream will be terminated. -#[derive(Debug, Clone)] -pub struct Single(Option); - -impl Single { - /// Constructs new `Single` with the given value. - fn new(val: T) -> Self { - Self(Some(val)) - } - - /// Attempts to take inner item immediately. Will always succeed if the stream isn't terminated. - fn next_immediate(&mut self) -> Option { - self.0.take() - } -} - -impl Unpin for Single {} - -impl Stream for Single { - type Item = T; - - fn poll_next(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(self.0.take()) - } - - fn size_hint(&self) -> (usize, Option) { - self.0.as_ref().map_or((0, Some(0)), |_| (1, Some(1))) - } -} - -/// Immediately propagates errors occurred in the base stream. -#[derive(Debug, Clone, Copy)] -pub struct PropagateBaseStreamError(PhantomData); - -type BaseStreamItem = as Stream>::Item; -type InnerStreamItem = as Stream>::Item; - -impl FlowController, InnerStreamItem> for PropagateBaseStreamError -where - St: TryStream, - St::Ok: TryStream + Unpin, - ::Error: From, -{ - fn next_step(item: BaseStreamItem) -> FlowStep, InnerStreamItem> { - match item { - // A new successful inner stream received - st @ Either::Left(_) => FlowStep::Continue(st), - // An error encountered - Either::Right(mut err) => FlowStep::Return(err.next_immediate().unwrap()), - } - } -} - -type SingleStreamResult = Single::Ok, ::Error>>; - -impl Stream for NestedTryStreamIntoEitherTryStream -where - St: TryStream, - St::Ok: TryStream + Unpin, - ::Error: From, -{ - // Item is either an inner stream or a stream containing a single error. - // This will allow using `Either`'s `Stream` implementation as both branches are actually streams of `Result`'s. - type Item = Either, SingleStreamResult>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let item = ready!(self.project().stream.try_poll_next(cx)); - - let out = match item { - Some(res) => match res { - // Emit successful inner stream as is - Ok(stream) => Either::Left(stream.into_stream()), - // Wrap an error into a stream containing a single item - err @ Err(_) => { - let res = err.map(|_: St::Ok| unreachable!()).map_err(Into::into); - - Either::Right(Single::new(res)) - } - }, - None => return Poll::Ready(None), - }; - - Poll::Ready(Some(out)) - } -} - -impl FusedStream for NestedTryStreamIntoEitherTryStream -where - St: TryStream + FusedStream, - St::Ok: TryStream + Unpin, - ::Error: From, -{ - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for NestedTryStreamIntoEitherTryStream -where - St: TryStream + Sink, - St::Ok: TryStream + Unpin, - ::Error: From<::Error>, -{ - type Error = >::Error; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_fold.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_fold.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_fold.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_fold.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,93 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future, TryFuture}; -use futures_core::ready; -use futures_core::stream::TryStream; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`try_fold`](super::TryStreamExt::try_fold) method. - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct TryFold { - #[pin] - stream: St, - f: F, - accum: Option, - #[pin] - future: Option, - } -} - -impl fmt::Debug for TryFold -where - St: fmt::Debug, - Fut: fmt::Debug, - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TryFold") - .field("stream", &self.stream) - .field("accum", &self.accum) - .field("future", &self.future) - .finish() - } -} - -impl TryFold -where - St: TryStream, - F: FnMut(T, St::Ok) -> Fut, - Fut: TryFuture, -{ - pub(super) fn new(stream: St, f: F, t: T) -> Self { - Self { stream, f, accum: Some(t), future: None } - } -} - -impl FusedFuture for TryFold -where - St: TryStream, - F: FnMut(T, St::Ok) -> Fut, - Fut: TryFuture, -{ - fn is_terminated(&self) -> bool { - self.accum.is_none() && self.future.is_none() - } -} - -impl Future for TryFold -where - St: TryStream, - F: FnMut(T, St::Ok) -> Fut, - Fut: TryFuture, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - - Poll::Ready(loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - // we're currently processing a future to produce a new accum value - let res = ready!(fut.try_poll(cx)); - this.future.set(None); - match res { - Ok(a) => *this.accum = Some(a), - Err(e) => break Err(e), - } - } else if this.accum.is_some() { - // we're waiting on a new item from the stream - let res = ready!(this.stream.as_mut().try_poll_next(cx)); - let a = this.accum.take().unwrap(); - match res { - Some(Ok(item)) => this.future.set(Some((this.f)(a, item))), - Some(Err(e)) => break Err(e), - None => break Ok(a), - } - } else { - panic!("Fold polled after completion") - } - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_for_each_concurrent.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_for_each_concurrent.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_for_each_concurrent.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_for_each_concurrent.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,133 +0,0 @@ -use crate::stream::{FuturesUnordered, StreamExt}; -use core::fmt; -use core::mem; -use core::num::NonZeroUsize; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::stream::TryStream; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the - /// [`try_for_each_concurrent`](super::TryStreamExt::try_for_each_concurrent) - /// method. - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct TryForEachConcurrent { - #[pin] - stream: Option, - f: F, - futures: FuturesUnordered, - limit: Option, - } -} - -impl fmt::Debug for TryForEachConcurrent -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TryForEachConcurrent") - .field("stream", &self.stream) - .field("futures", &self.futures) - .field("limit", &self.limit) - .finish() - } -} - -impl FusedFuture for TryForEachConcurrent -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: Future>, -{ - fn is_terminated(&self) -> bool { - self.stream.is_none() && self.futures.is_empty() - } -} - -impl TryForEachConcurrent -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: Future>, -{ - pub(super) fn new(stream: St, limit: Option, f: F) -> Self { - Self { - stream: Some(stream), - // Note: `limit` = 0 gets ignored. - limit: limit.and_then(NonZeroUsize::new), - f, - futures: FuturesUnordered::new(), - } - } -} - -impl Future for TryForEachConcurrent -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: Future>, -{ - type Output = Result<(), St::Error>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - loop { - let mut made_progress_this_iter = false; - - // Check if we've already created a number of futures greater than `limit` - if this.limit.map(|limit| limit.get() > this.futures.len()).unwrap_or(true) { - let poll_res = match this.stream.as_mut().as_pin_mut() { - Some(stream) => stream.try_poll_next(cx), - None => Poll::Ready(None), - }; - - let elem = match poll_res { - Poll::Ready(Some(Ok(elem))) => { - made_progress_this_iter = true; - Some(elem) - } - Poll::Ready(None) => { - this.stream.set(None); - None - } - Poll::Pending => None, - Poll::Ready(Some(Err(e))) => { - // Empty the stream and futures so that we know - // the future has completed. - this.stream.set(None); - drop(mem::replace(this.futures, FuturesUnordered::new())); - return Poll::Ready(Err(e)); - } - }; - - if let Some(elem) = elem { - this.futures.push((this.f)(elem)); - } - } - - match this.futures.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(()))) => made_progress_this_iter = true, - Poll::Ready(None) => { - if this.stream.is_none() { - return Poll::Ready(Ok(())); - } - } - Poll::Pending => {} - Poll::Ready(Some(Err(e))) => { - // Empty the stream and futures so that we know - // the future has completed. - this.stream.set(None); - drop(mem::replace(this.futures, FuturesUnordered::new())); - return Poll::Ready(Err(e)); - } - } - - if !made_progress_this_iter { - return Poll::Pending; - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_for_each.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_for_each.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_for_each.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_for_each.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,68 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::{Future, TryFuture}; -use futures_core::ready; -use futures_core::stream::TryStream; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -pin_project! { - /// Future for the [`try_for_each`](super::TryStreamExt::try_for_each) method. - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct TryForEach { - #[pin] - stream: St, - f: F, - #[pin] - future: Option, - } -} - -impl fmt::Debug for TryForEach -where - St: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TryForEach") - .field("stream", &self.stream) - .field("future", &self.future) - .finish() - } -} - -impl TryForEach -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: TryFuture, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, future: None } - } -} - -impl Future for TryForEach -where - St: TryStream, - F: FnMut(St::Ok) -> Fut, - Fut: TryFuture, -{ - type Output = Result<(), St::Error>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - loop { - if let Some(fut) = this.future.as_mut().as_pin_mut() { - ready!(fut.try_poll(cx))?; - this.future.set(None); - } else { - match ready!(this.stream.as_mut().try_poll_next(cx)?) { - Some(e) => this.future.set(Some((this.f)(e))), - None => break, - } - } - } - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_next.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_next.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_next.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_next.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,34 +0,0 @@ -use crate::stream::TryStreamExt; -use core::pin::Pin; -use futures_core::future::{FusedFuture, Future}; -use futures_core::stream::{FusedStream, TryStream}; -use futures_core::task::{Context, Poll}; - -/// Future for the [`try_next`](super::TryStreamExt::try_next) method. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct TryNext<'a, St: ?Sized> { - stream: &'a mut St, -} - -impl Unpin for TryNext<'_, St> {} - -impl<'a, St: ?Sized + TryStream + Unpin> TryNext<'a, St> { - pub(super) fn new(stream: &'a mut St) -> Self { - Self { stream } - } -} - -impl FusedFuture for TryNext<'_, St> { - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -impl Future for TryNext<'_, St> { - type Output = Result, St::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.stream.try_poll_next_unpin(cx)?.map(Ok) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_ready_chunks.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_ready_chunks.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_ready_chunks.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_ready_chunks.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,126 +0,0 @@ -use crate::stream::{Fuse, IntoStream, StreamExt}; - -use alloc::vec::Vec; -use core::fmt; -use core::pin::Pin; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`try_ready_chunks`](super::TryStreamExt::try_ready_chunks) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - pub struct TryReadyChunks { - #[pin] - stream: Fuse>, - cap: usize, // https://github.com/rust-lang/futures-rs/issues/1475 - } -} - -impl TryReadyChunks { - pub(super) fn new(stream: St, capacity: usize) -> Self { - assert!(capacity > 0); - - Self { stream: IntoStream::new(stream).fuse(), cap: capacity } - } - - delegate_access_inner!(stream, St, (. .)); -} - -type TryReadyChunksStreamError = - TryReadyChunksError<::Ok, ::Error>; - -impl Stream for TryReadyChunks { - type Item = Result, TryReadyChunksStreamError>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.as_mut().project(); - - let mut items: Vec = Vec::new(); - - loop { - match this.stream.as_mut().poll_next(cx) { - // Flush all the collected data if the underlying stream doesn't - // contain more ready values - Poll::Pending => { - return if items.is_empty() { - Poll::Pending - } else { - Poll::Ready(Some(Ok(items))) - } - } - - // Push the ready item into the buffer and check whether it is full. - // If so, return the buffer. - Poll::Ready(Some(Ok(item))) => { - if items.is_empty() { - items.reserve_exact(*this.cap); - } - items.push(item); - if items.len() >= *this.cap { - return Poll::Ready(Some(Ok(items))); - } - } - - // Return the already collected items and the error. - Poll::Ready(Some(Err(e))) => { - return Poll::Ready(Some(Err(TryReadyChunksError(items, e)))); - } - - // Since the underlying stream ran out of values, return what we - // have buffered, if we have anything. - Poll::Ready(None) => { - let last = if items.is_empty() { None } else { Some(Ok(items)) }; - return Poll::Ready(last); - } - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let (lower, upper) = self.stream.size_hint(); - let lower = lower / self.cap; - (lower, upper) - } -} - -impl FusedStream for TryReadyChunks { - fn is_terminated(&self) -> bool { - self.stream.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for TryReadyChunks -where - S: TryStream + Sink, -{ - type Error = >::Error; - - delegate_sink!(stream, Item); -} - -/// Error indicating, that while chunk was collected inner stream produced an error. -/// -/// Contains all items that were collected before an error occurred, and the stream error itself. -#[derive(PartialEq, Eq)] -pub struct TryReadyChunksError(pub Vec, pub E); - -impl fmt::Debug for TryReadyChunksError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.1.fmt(f) - } -} - -impl fmt::Display for TryReadyChunksError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.1.fmt(f) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for TryReadyChunksError {} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_skip_while.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_skip_while.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_skip_while.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_skip_while.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,120 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::TryFuture; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`try_skip_while`](super::TryStreamExt::try_skip_while) - /// method. - #[must_use = "streams do nothing unless polled"] - pub struct TrySkipWhile where St: TryStream { - #[pin] - stream: St, - f: F, - #[pin] - pending_fut: Option, - pending_item: Option, - done_skipping: bool, - } -} - -impl fmt::Debug for TrySkipWhile -where - St: TryStream + fmt::Debug, - St::Ok: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TrySkipWhile") - .field("stream", &self.stream) - .field("pending_fut", &self.pending_fut) - .field("pending_item", &self.pending_item) - .field("done_skipping", &self.done_skipping) - .finish() - } -} - -impl TrySkipWhile -where - St: TryStream, - F: FnMut(&St::Ok) -> Fut, - Fut: TryFuture, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, pending_fut: None, pending_item: None, done_skipping: false } - } - - delegate_access_inner!(stream, St, ()); -} - -impl Stream for TrySkipWhile -where - St: TryStream, - F: FnMut(&St::Ok) -> Fut, - Fut: TryFuture, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - if *this.done_skipping { - return this.stream.try_poll_next(cx); - } - - Poll::Ready(loop { - if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() { - let res = ready!(fut.try_poll(cx)); - this.pending_fut.set(None); - let skipped = res?; - let item = this.pending_item.take(); - if !skipped { - *this.done_skipping = true; - break item.map(Ok); - } - } else if let Some(item) = ready!(this.stream.as_mut().try_poll_next(cx)?) { - this.pending_fut.set(Some((this.f)(&item))); - *this.pending_item = Some(item); - } else { - break None; - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - let pending_len = usize::from(self.pending_item.is_some()); - let (_, upper) = self.stream.size_hint(); - let upper = match upper { - Some(x) => x.checked_add(pending_len), - None => None, - }; - (0, upper) // can't know a lower bound, due to the predicate - } -} - -impl FusedStream for TrySkipWhile -where - St: TryStream + FusedStream, - F: FnMut(&St::Ok) -> Fut, - Fut: TryFuture, -{ - fn is_terminated(&self) -> bool { - self.pending_item.is_none() && self.stream.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for TrySkipWhile -where - S: TryStream + Sink, -{ - type Error = E; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_take_while.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_take_while.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_take_while.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_take_while.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,129 +0,0 @@ -use core::fmt; -use core::pin::Pin; -use futures_core::future::TryFuture; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream, TryStream}; -use futures_core::task::{Context, Poll}; -#[cfg(feature = "sink")] -use futures_sink::Sink; -use pin_project_lite::pin_project; - -pin_project! { - /// Stream for the [`try_take_while`](super::TryStreamExt::try_take_while) - /// method. - #[must_use = "streams do nothing unless polled"] - pub struct TryTakeWhile - where - St: TryStream, - { - #[pin] - stream: St, - f: F, - #[pin] - pending_fut: Option, - pending_item: Option, - done_taking: bool, - } -} - -impl fmt::Debug for TryTakeWhile -where - St: TryStream + fmt::Debug, - St::Ok: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TryTakeWhile") - .field("stream", &self.stream) - .field("pending_fut", &self.pending_fut) - .field("pending_item", &self.pending_item) - .field("done_taking", &self.done_taking) - .finish() - } -} - -impl TryTakeWhile -where - St: TryStream, - F: FnMut(&St::Ok) -> Fut, - Fut: TryFuture, -{ - pub(super) fn new(stream: St, f: F) -> Self { - Self { stream, f, pending_fut: None, pending_item: None, done_taking: false } - } - - delegate_access_inner!(stream, St, ()); -} - -impl Stream for TryTakeWhile -where - St: TryStream, - F: FnMut(&St::Ok) -> Fut, - Fut: TryFuture, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - if *this.done_taking { - return Poll::Ready(None); - } - - Poll::Ready(loop { - if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() { - let res = ready!(fut.try_poll(cx)); - this.pending_fut.set(None); - let take = res?; - let item = this.pending_item.take(); - if take { - break item.map(Ok); - } else { - *this.done_taking = true; - break None; - } - } else if let Some(item) = ready!(this.stream.as_mut().try_poll_next(cx)?) { - this.pending_fut.set(Some((this.f)(&item))); - *this.pending_item = Some(item); - } else { - break None; - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - if self.done_taking { - return (0, Some(0)); - } - - let pending_len = usize::from(self.pending_item.is_some()); - let (_, upper) = self.stream.size_hint(); - let upper = match upper { - Some(x) => x.checked_add(pending_len), - None => None, - }; - (0, upper) // can't know a lower bound, due to the predicate - } -} - -impl FusedStream for TryTakeWhile -where - St: TryStream + FusedStream, - F: FnMut(&St::Ok) -> Fut, - Fut: TryFuture, -{ - fn is_terminated(&self) -> bool { - self.done_taking || self.pending_item.is_none() && self.stream.is_terminated() - } -} - -// Forwarding impl of Sink from the underlying stream -#[cfg(feature = "sink")] -impl Sink for TryTakeWhile -where - S: TryStream + Sink, -{ - type Error = E; - - delegate_sink!(stream, Item); -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_unfold.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_unfold.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/try_stream/try_unfold.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/try_stream/try_unfold.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,122 +0,0 @@ -use super::assert_stream; -use core::fmt; -use core::pin::Pin; -use futures_core::future::TryFuture; -use futures_core::ready; -use futures_core::stream::Stream; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -/// Creates a `TryStream` from a seed and a closure returning a `TryFuture`. -/// -/// This function is the dual for the `TryStream::try_fold()` adapter: while -/// `TryStream::try_fold()` reduces a `TryStream` to one single value, -/// `try_unfold()` creates a `TryStream` from a seed value. -/// -/// `try_unfold()` will call the provided closure with the provided seed, then -/// wait for the returned `TryFuture` to complete with `(a, b)`. It will then -/// yield the value `a`, and use `b` as the next internal state. -/// -/// If the closure returns `None` instead of `Some(TryFuture)`, then the -/// `try_unfold()` will stop producing items and return `Poll::Ready(None)` in -/// future calls to `poll()`. -/// -/// In case of error generated by the returned `TryFuture`, the error will be -/// returned by the `TryStream`. The `TryStream` will then yield -/// `Poll::Ready(None)` in future calls to `poll()`. -/// -/// This function can typically be used when wanting to go from the "world of -/// futures" to the "world of streams": the provided closure can build a -/// `TryFuture` using other library functions working on futures, and -/// `try_unfold()` will turn it into a `TryStream` by repeating the operation. -/// -/// # Example -/// -/// ``` -/// # #[derive(Debug, PartialEq)] -/// # struct SomeError; -/// # futures::executor::block_on(async { -/// use futures::stream::{self, TryStreamExt}; -/// -/// let stream = stream::try_unfold(0, |state| async move { -/// if state < 0 { -/// return Err(SomeError); -/// } -/// -/// if state <= 2 { -/// let next_state = state + 1; -/// let yielded = state * 2; -/// Ok(Some((yielded, next_state))) -/// } else { -/// Ok(None) -/// } -/// }); -/// -/// let result: Result, _> = stream.try_collect().await; -/// assert_eq!(result, Ok(vec![0, 2, 4])); -/// # }); -/// ``` -pub fn try_unfold(init: T, f: F) -> TryUnfold -where - F: FnMut(T) -> Fut, - Fut: TryFuture>, -{ - assert_stream::, _>(TryUnfold { f, state: Some(init), fut: None }) -} - -pin_project! { - /// Stream for the [`try_unfold`] function. - #[must_use = "streams do nothing unless polled"] - pub struct TryUnfold { - f: F, - state: Option, - #[pin] - fut: Option, - } -} - -impl fmt::Debug for TryUnfold -where - T: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TryUnfold").field("state", &self.state).field("fut", &self.fut).finish() - } -} - -impl Stream for TryUnfold -where - F: FnMut(T) -> Fut, - Fut: TryFuture>, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - if let Some(state) = this.state.take() { - this.fut.set(Some((this.f)(state))); - } - - match this.fut.as_mut().as_pin_mut() { - None => { - // The future previously errored - Poll::Ready(None) - } - Some(future) => { - let step = ready!(future.try_poll(cx)); - this.fut.set(None); - - match step { - Ok(Some((item, next_state))) => { - *this.state = Some(next_state); - Poll::Ready(Some(Ok(item))) - } - Ok(None) => Poll::Ready(None), - Err(e) => Poll::Ready(Some(Err(e))), - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/stream/unfold.rs s390-tools-2.33.1/rust-vendor/futures-util/src/stream/unfold.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/stream/unfold.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/stream/unfold.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,119 +0,0 @@ -use super::assert_stream; -use crate::unfold_state::UnfoldState; -use core::fmt; -use core::pin::Pin; -use futures_core::future::Future; -use futures_core::ready; -use futures_core::stream::{FusedStream, Stream}; -use futures_core::task::{Context, Poll}; -use pin_project_lite::pin_project; - -/// Creates a `Stream` from a seed and a closure returning a `Future`. -/// -/// This function is the dual for the `Stream::fold()` adapter: while -/// `Stream::fold()` reduces a `Stream` to one single value, `unfold()` creates a -/// `Stream` from a seed value. -/// -/// `unfold()` will call the provided closure with the provided seed, then wait -/// for the returned `Future` to complete with `(a, b)`. It will then yield the -/// value `a`, and use `b` as the next internal state. -/// -/// If the closure returns `None` instead of `Some(Future)`, then the `unfold()` -/// will stop producing items and return `Poll::Ready(None)` in future -/// calls to `poll()`. -/// -/// This function can typically be used when wanting to go from the "world of -/// futures" to the "world of streams": the provided closure can build a -/// `Future` using other library functions working on futures, and `unfold()` -/// will turn it into a `Stream` by repeating the operation. -/// -/// # Example -/// -/// ``` -/// # futures::executor::block_on(async { -/// use futures::stream::{self, StreamExt}; -/// -/// let stream = stream::unfold(0, |state| async move { -/// if state <= 2 { -/// let next_state = state + 1; -/// let yielded = state * 2; -/// Some((yielded, next_state)) -/// } else { -/// None -/// } -/// }); -/// -/// let result = stream.collect::>().await; -/// assert_eq!(result, vec![0, 2, 4]); -/// # }); -/// ``` -pub fn unfold(init: T, f: F) -> Unfold -where - F: FnMut(T) -> Fut, - Fut: Future>, -{ - assert_stream::(Unfold { f, state: UnfoldState::Value { value: init } }) -} - -pin_project! { - /// Stream for the [`unfold`] function. - #[must_use = "streams do nothing unless polled"] - pub struct Unfold { - f: F, - #[pin] - state: UnfoldState, - } -} - -impl fmt::Debug for Unfold -where - T: fmt::Debug, - Fut: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Unfold").field("state", &self.state).finish() - } -} - -impl FusedStream for Unfold -where - F: FnMut(T) -> Fut, - Fut: Future>, -{ - fn is_terminated(&self) -> bool { - if let UnfoldState::Empty = self.state { - true - } else { - false - } - } -} - -impl Stream for Unfold -where - F: FnMut(T) -> Fut, - Fut: Future>, -{ - type Item = Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.project(); - - if let Some(state) = this.state.as_mut().take_value() { - this.state.set(UnfoldState::Future { future: (this.f)(state) }); - } - - let step = match this.state.as_mut().project_future() { - Some(fut) => ready!(fut.poll(cx)), - None => panic!("Unfold must not be polled after it returned `Poll::Ready(None)`"), - }; - - if let Some((item, next_state)) = step { - this.state.set(UnfoldState::Value { value: next_state }); - Poll::Ready(Some(item)) - } else { - this.state.set(UnfoldState::Empty); - Poll::Ready(None) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/task/mod.rs s390-tools-2.33.1/rust-vendor/futures-util/src/task/mod.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/task/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/task/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ -//! Tools for working with tasks. -//! -//! This module contains: -//! -//! - [`Spawn`], a trait for spawning new tasks. -//! - [`Context`], a context of an asynchronous task, -//! including a handle for waking up the task. -//! - [`Waker`], a handle for waking up a task. -//! -//! The remaining types and traits in the module are used for implementing -//! executors or dealing with synchronization issues around task wakeup. - -#[doc(no_inline)] -pub use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; - -pub use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError, UnsafeFutureObj}; - -pub use futures_task::noop_waker; -pub use futures_task::noop_waker_ref; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use futures_task::ArcWake; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use futures_task::waker; - -#[cfg(not(futures_no_atomic_cas))] -#[cfg(feature = "alloc")] -pub use futures_task::{waker_ref, WakerRef}; - -#[cfg(any(not(futures_no_atomic_cas), feature = "portable-atomic"))] -pub use futures_core::task::__internal::AtomicWaker; - -mod spawn; -pub use self::spawn::{LocalSpawnExt, SpawnExt}; diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/task/spawn.rs s390-tools-2.33.1/rust-vendor/futures-util/src/task/spawn.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/task/spawn.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/task/spawn.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,169 +0,0 @@ -use futures_task::{LocalSpawn, Spawn}; - -#[cfg(feature = "compat")] -use crate::compat::Compat; - -#[cfg(feature = "channel")] -#[cfg(feature = "std")] -use crate::future::{FutureExt, RemoteHandle}; -#[cfg(feature = "alloc")] -use alloc::boxed::Box; -#[cfg(feature = "alloc")] -use futures_core::future::Future; -#[cfg(feature = "alloc")] -use futures_task::{FutureObj, LocalFutureObj, SpawnError}; - -impl SpawnExt for Sp where Sp: Spawn {} -impl LocalSpawnExt for Sp where Sp: LocalSpawn {} - -/// Extension trait for `Spawn`. -pub trait SpawnExt: Spawn { - /// Spawns a task that polls the given future with output `()` to - /// completion. - /// - /// This method returns a [`Result`] that contains a [`SpawnError`] if - /// spawning fails. - /// - /// You can use [`spawn_with_handle`](SpawnExt::spawn_with_handle) if - /// you want to spawn a future with output other than `()` or if you want - /// to be able to await its completion. - /// - /// Note this method will eventually be replaced with the upcoming - /// `Spawn::spawn` method which will take a `dyn Future` as input. - /// Technical limitations prevent `Spawn::spawn` from being implemented - /// today. Feel free to use this method in the meantime. - /// - /// ``` - /// # { - /// use futures::executor::ThreadPool; - /// use futures::task::SpawnExt; - /// - /// let executor = ThreadPool::new().unwrap(); - /// - /// let future = async { /* ... */ }; - /// executor.spawn(future).unwrap(); - /// # } - /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 - /// ``` - #[cfg(feature = "alloc")] - fn spawn(&self, future: Fut) -> Result<(), SpawnError> - where - Fut: Future + Send + 'static, - { - self.spawn_obj(FutureObj::new(Box::new(future))) - } - - /// Spawns a task that polls the given future to completion and returns a - /// future that resolves to the spawned future's output. - /// - /// This method returns a [`Result`] that contains a [`RemoteHandle`](crate::future::RemoteHandle), or, if - /// spawning fails, a [`SpawnError`]. [`RemoteHandle`](crate::future::RemoteHandle) is a future that - /// resolves to the output of the spawned future. - /// - /// ``` - /// # { - /// use futures::executor::{block_on, ThreadPool}; - /// use futures::future; - /// use futures::task::SpawnExt; - /// - /// let executor = ThreadPool::new().unwrap(); - /// - /// let future = future::ready(1); - /// let join_handle_fut = executor.spawn_with_handle(future).unwrap(); - /// assert_eq!(block_on(join_handle_fut), 1); - /// # } - /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 - /// ``` - #[cfg(feature = "channel")] - #[cfg_attr(docsrs, doc(cfg(feature = "channel")))] - #[cfg(feature = "std")] - fn spawn_with_handle(&self, future: Fut) -> Result, SpawnError> - where - Fut: Future + Send + 'static, - Fut::Output: Send, - { - let (future, handle) = future.remote_handle(); - self.spawn(future)?; - Ok(handle) - } - - /// Wraps a [`Spawn`] and makes it usable as a futures 0.1 `Executor`. - /// Requires the `compat` feature to enable. - #[cfg(feature = "compat")] - #[cfg_attr(docsrs, doc(cfg(feature = "compat")))] - fn compat(self) -> Compat - where - Self: Sized, - { - Compat::new(self) - } -} - -/// Extension trait for `LocalSpawn`. -pub trait LocalSpawnExt: LocalSpawn { - /// Spawns a task that polls the given future with output `()` to - /// completion. - /// - /// This method returns a [`Result`] that contains a [`SpawnError`] if - /// spawning fails. - /// - /// You can use [`spawn_with_handle`](SpawnExt::spawn_with_handle) if - /// you want to spawn a future with output other than `()` or if you want - /// to be able to await its completion. - /// - /// Note this method will eventually be replaced with the upcoming - /// `Spawn::spawn` method which will take a `dyn Future` as input. - /// Technical limitations prevent `Spawn::spawn` from being implemented - /// today. Feel free to use this method in the meantime. - /// - /// ``` - /// use futures::executor::LocalPool; - /// use futures::task::LocalSpawnExt; - /// - /// let executor = LocalPool::new(); - /// let spawner = executor.spawner(); - /// - /// let future = async { /* ... */ }; - /// spawner.spawn_local(future).unwrap(); - /// ``` - #[cfg(feature = "alloc")] - fn spawn_local(&self, future: Fut) -> Result<(), SpawnError> - where - Fut: Future + 'static, - { - self.spawn_local_obj(LocalFutureObj::new(Box::new(future))) - } - - /// Spawns a task that polls the given future to completion and returns a - /// future that resolves to the spawned future's output. - /// - /// This method returns a [`Result`] that contains a [`RemoteHandle`](crate::future::RemoteHandle), or, if - /// spawning fails, a [`SpawnError`]. [`RemoteHandle`](crate::future::RemoteHandle) is a future that - /// resolves to the output of the spawned future. - /// - /// ``` - /// use futures::executor::LocalPool; - /// use futures::task::LocalSpawnExt; - /// - /// let mut executor = LocalPool::new(); - /// let spawner = executor.spawner(); - /// - /// let future = async { 1 }; - /// let join_handle_fut = spawner.spawn_local_with_handle(future).unwrap(); - /// assert_eq!(executor.run_until(join_handle_fut), 1); - /// ``` - #[cfg(feature = "channel")] - #[cfg_attr(docsrs, doc(cfg(feature = "channel")))] - #[cfg(feature = "std")] - fn spawn_local_with_handle( - &self, - future: Fut, - ) -> Result, SpawnError> - where - Fut: Future + 'static, - { - let (future, handle) = future.remote_handle(); - self.spawn_local(future)?; - Ok(handle) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/futures-util/src/unfold_state.rs s390-tools-2.33.1/rust-vendor/futures-util/src/unfold_state.rs --- s390-tools-2.31.0/rust-vendor/futures-util/src/unfold_state.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/futures-util/src/unfold_state.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ -use core::pin::Pin; - -use pin_project_lite::pin_project; - -pin_project! { - /// UnfoldState used for stream and sink unfolds - #[project = UnfoldStateProj] - #[project_replace = UnfoldStateProjReplace] - #[derive(Debug)] - pub(crate) enum UnfoldState { - Value { - value: T, - }, - Future { - #[pin] - future: R, - }, - Empty, - } -} - -impl UnfoldState { - pub(crate) fn project_future(self: Pin<&mut Self>) -> Option> { - match self.project() { - UnfoldStateProj::Future { future } => Some(future), - _ => None, - } - } - - pub(crate) fn take_value(self: Pin<&mut Self>) -> Option { - match &*self { - UnfoldState::Value { .. } => match self.project_replace(UnfoldState::Empty) { - UnfoldStateProjReplace::Value { value } => Some(value), - _ => unreachable!(), - }, - _ => None, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/gimli/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/gimli/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/gimli/Cargo.toml s390-tools-2.33.1/rust-vendor/gimli/Cargo.toml --- s390-tools-2.31.0/rust-vendor/gimli/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,109 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.60" -name = "gimli" -version = "0.28.1" -include = [ - "/CHANGELOG.md", - "/Cargo.toml", - "/LICENSE-APACHE", - "/LICENSE-MIT", - "/README.md", - "/src", -] -description = "A library for reading and writing the DWARF debugging format." -documentation = "https://docs.rs/gimli" -readme = "./README.md" -keywords = [ - "DWARF", - "debug", - "ELF", - "eh_frame", -] -categories = [ - "development-tools::debugging", - "development-tools::profiling", - "parser-implementations", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/gimli-rs/gimli" -resolver = "2" - -[profile.bench] -codegen-units = 1 -debug = 2 -split-debuginfo = "packed" - -[profile.test] -split-debuginfo = "packed" - -[dependencies.alloc] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-alloc" - -[dependencies.compiler_builtins] -version = "0.1.2" -optional = true - -[dependencies.core] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-core" - -[dependencies.fallible-iterator] -version = "0.3.0" -optional = true -default-features = false - -[dependencies.indexmap] -version = "2.0.0" -optional = true - -[dependencies.stable_deref_trait] -version = "1.1.0" -optional = true -default-features = false - -[dev-dependencies.test-assembler] -version = "0.1.3" - -[features] -default = [ - "read-all", - "write", -] -endian-reader = [ - "read", - "dep:stable_deref_trait", -] -fallible-iterator = ["dep:fallible-iterator"] -read = ["read-core"] -read-all = [ - "read", - "std", - "fallible-iterator", - "endian-reader", -] -read-core = [] -rustc-dep-of-std = [ - "dep:core", - "dep:alloc", - "dep:compiler_builtins", -] -std = [ - "fallible-iterator?/std", - "stable_deref_trait?/std", -] -write = ["dep:indexmap"] diff -Nru s390-tools-2.31.0/rust-vendor/gimli/CHANGELOG.md s390-tools-2.33.1/rust-vendor/gimli/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/gimli/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,1022 +0,0 @@ -# `gimli` Change Log - --------------------------------------------------------------------------------- - -## 0.28.1 - -Released 2023/11/24. - -### Changed - -* Changed `read::AbbreviationsCache` to require manual population using - `Dwarf::populate_abbreviations_cache`. - [#679](https://github.com/gimli-rs/gimli/pull/679) - -* Changed the default `read::UnwindContextStorage` to use `Box` instead of `Vec` - so that its memory usage is limited. - [#687](https://github.com/gimli-rs/gimli/pull/687) - -* Changed `read::UnwindTable::new` to always reset the context, because - previous errors may have left the context in an invalid state. - [#684](https://github.com/gimli-rs/gimli/pull/684) - -* Changed the `Debug` implementation for `read::EndianSlice` to limit the number - of bytes it displays. - [#686](https://github.com/gimli-rs/gimli/pull/686) - -### Added - -* Added more AArch64 register definitions. - [#680](https://github.com/gimli-rs/gimli/pull/680) - -* Added `read::Unit::new_with_abbreviations`. - [#677](https://github.com/gimli-rs/gimli/pull/677) - -* Added `read::Evaluation::value_result`. - [#676](https://github.com/gimli-rs/gimli/pull/676) - --------------------------------------------------------------------------------- - -## 0.28.0 - -Released 2023/08/12. - -### Breaking changes - -* Deleted `impl From for &[u8]`. Use `EndianSlice::slice` instead. - [#669](https://github.com/gimli-rs/gimli/pull/669) - -* Deleted `impl Index for EndianSlice` and - `impl Index> for EndianSlice`. - [#669](https://github.com/gimli-rs/gimli/pull/669) - -* Replaced `impl From for u64` with `Pointer::pointer`. - [#670](https://github.com/gimli-rs/gimli/pull/670) - -* Updated `fallible-iterator` to 0.3.0. - [#672](https://github.com/gimli-rs/gimli/pull/672) - -* Changed some optional dependencies to use the `dep:` feature syntax. - [#672](https://github.com/gimli-rs/gimli/pull/672) - -* Added `non_exhaustive` attribute to `read::RegisterRule`, - `read::CallFrameInstruction`, and `write::CallFrameInstruction`. - [#673](https://github.com/gimli-rs/gimli/pull/673) - -### Changed - -* The minimum supported rust version for the `read` feature and its dependencies - increased to 1.60.0. - -* The minimum supported rust version for other features increased to 1.65.0. - -### Added - -* Added `Vendor`, `read::DebugFrame::set_vendor`, and `read::EhFrame::set_vendor`. - [#673](https://github.com/gimli-rs/gimli/pull/673) - -* Added more ARM and AArch64 register definitions, and - `DW_CFA_AARCH64_negate_ra_state` support. - [#673](https://github.com/gimli-rs/gimli/pull/673) - --------------------------------------------------------------------------------- - -## 0.27.3 - -Released 2023/06/14. - -### Changed - -* Excluded test fixtures from published package. - [#661](https://github.com/gimli-rs/gimli/pull/661) - -### Added - -* Added `FallibleIterator` implementation for `read::OperationIter`. - [#649](https://github.com/gimli-rs/gimli/pull/649) - -* Added `DW_AT_GNU_deleted` constant. - [#658](https://github.com/gimli-rs/gimli/pull/658) - --------------------------------------------------------------------------------- - -## 0.27.2 - -Released 2023/02/15. - -### Added - -* Added support for tombstones in `read::LineRows`. - [#642](https://github.com/gimli-rs/gimli/pull/642) - --------------------------------------------------------------------------------- - -## 0.27.1 - -Released 2023/01/23. - -### Added - -* Added `SectionId::xcoff_name` and `read::Section::xcoff_section_name`. - [#635](https://github.com/gimli-rs/gimli/pull/635) - -* Added `read::Dwarf::make_dwo` and `read::Unit::dwo_name`. - [#637](https://github.com/gimli-rs/gimli/pull/637) - -### Changed - -* Changed `read::DwarfPackage::sections` to handle supplementary files. - [#638](https://github.com/gimli-rs/gimli/pull/638) - --------------------------------------------------------------------------------- - -## 0.27.0 - -Released 2022/11/23. - -### Breaking changes - -* Added `read::Dwarf::abbreviations_cache` to cache abbreviations at offset 0. - Changed `read::Dwarf::abbreviations` to return `Result>`, - and changed `read::Unit::abbreviations` to `Arc`. - [#628](https://github.com/gimli-rs/gimli/pull/628) - -### Added - -* Added LoongArch register definitions. - [#624](https://github.com/gimli-rs/gimli/pull/624) - -* Added support for tombstones in `read::LocListIter` and `read::RngListIter`. - [#631](https://github.com/gimli-rs/gimli/pull/631) - --------------------------------------------------------------------------------- - -## 0.26.2 - -Released 2022/07/16. - -### Changed - -* Fixed CFI personality encoding when writing. - [#609](https://github.com/gimli-rs/gimli/pull/609) - -* Fixed use of raw pointer for mutation, detected by Miri. - [#614](https://github.com/gimli-rs/gimli/pull/614) - -* Fixed `DW_OP_GNU_implicit_pointer` handling for DWARF version 2. - [#618](https://github.com/gimli-rs/gimli/pull/618) - -### Added - -* Added `read::EhHdrTable::iter`. - [#619](https://github.com/gimli-rs/gimli/pull/619) - --------------------------------------------------------------------------------- - -## 0.26.1 - -Released 2021/11/02. - -### Changed - -* Fixed segmentation fault in `ArrayVec>::into_vec`, which may be used by - `read::Evaluation::result`. This regression was introduced in 0.26.0. - [#601](https://github.com/gimli-rs/gimli/pull/601) - --------------------------------------------------------------------------------- - -## 0.26.0 - -Released 2021/10/24. - -### Breaking changes - -* Removed `read::UninitializedUnwindContext`. Use `Box` instead. - [#593](https://github.com/gimli-rs/gimli/pull/593) - -* Renamed `read::Error::CfiStackFull` to `StackFull`. - [#595](https://github.com/gimli-rs/gimli/pull/595) - -* Added `UnwindContextStorage` type parameter to `read::UnwindContext`, `read::UnwindTable`, - `read::UnwindTableRow`, and `read::RegisterRuleMap`. - [#595](https://github.com/gimli-rs/gimli/pull/595) - -* Added `EvaluationStorage` type parameter to `read::Evaluation`. - [#595](https://github.com/gimli-rs/gimli/pull/595) - -* Added `read::SectionId::DebugCuIndex` and `read::SectionId::DebugTuIndex`. - [#588](https://github.com/gimli-rs/gimli/pull/588) - -### Changed - -* Fixed `DW_EH_PE_pcrel` handling in default `write::Writer::write_eh_pointer` implementation. - [#576](https://github.com/gimli-rs/gimli/pull/576) - -* Fixed `read::AttributeSpecification::size` for some forms. - [#597](https://github.com/gimli-rs/gimli/pull/597) - -* Display more unit details in dwarfdump. - [#584](https://github.com/gimli-rs/gimli/pull/584) - -### Added - -* Added `write::DebuggingInformationEntry::delete_child`. - [#570](https://github.com/gimli-rs/gimli/pull/570) - -* Added ARM and AArch64 register definitions. - [#574](https://github.com/gimli-rs/gimli/pull/574) - [#577](https://github.com/gimli-rs/gimli/pull/577) - -* Added RISC-V register definitions. - [#579](https://github.com/gimli-rs/gimli/pull/579) - -* Added `read::DwarfPackage`, `read::DebugCuIndex`, and `read::DebugTuIndex`. - [#588](https://github.com/gimli-rs/gimli/pull/588) - -* Added `read-core` feature to allow building without `liballoc`. - [#596](https://github.com/gimli-rs/gimli/pull/596) - -* Added `read::EntriesRaw::skip_attributes`. - [#597](https://github.com/gimli-rs/gimli/pull/597) - --------------------------------------------------------------------------------- - -## 0.25.0 - -Released 2021/07/26. - -### Breaking changes - -* `read::FrameDescriptionEntry::unwind_info_for_address` now returns a reference - instead of cloning. - [#557](https://github.com/gimli-rs/gimli/pull/557) - -* `read::AttributeValue::RangeListsRef` now contains a `RawRangeListsOffset` - to allow handling of GNU split DWARF extensions. - Use `read::Dwarf::ranges_offset_from_raw` to handle it. - [#568](https://github.com/gimli-rs/gimli/pull/568) - [#569](https://github.com/gimli-rs/gimli/pull/569) - -* Added `read::Unit::dwo_id`. - [#569](https://github.com/gimli-rs/gimli/pull/569) - -### Changed - -* `.debug_aranges` parsing now accepts version 3. - [#560](https://github.com/gimli-rs/gimli/pull/560) - -* `read::Dwarf::attr_ranges_offset` and its callers now handle GNU split DWARF extensions. - [#568](https://github.com/gimli-rs/gimli/pull/568) - [#569](https://github.com/gimli-rs/gimli/pull/569) - -### Added - -* Added `read::DebugLineStr::new`. - [#556](https://github.com/gimli-rs/gimli/pull/556) - -* Added `read::UnwindTable::into_current_row`. - [#557](https://github.com/gimli-rs/gimli/pull/557) - -* Added more `DW_LANG` constants. - [#565](https://github.com/gimli-rs/gimli/pull/565) - -* dwarfdump: added DWO parent support. - [#568](https://github.com/gimli-rs/gimli/pull/568) - -* Added `read::Dwarf` methods: `ranges_offset_from_raw`, `raw_ranges`, and `raw_locations`. - [#568](https://github.com/gimli-rs/gimli/pull/568) - [#569](https://github.com/gimli-rs/gimli/pull/569) - --------------------------------------------------------------------------------- - -## 0.24.0 - -Released 2021/05/01. - -### Breaking changes - -* Minimum Rust version increased to 1.42.0. - -* Added `read::Dwarf::debug_aranges`. - [#539](https://github.com/gimli-rs/gimli/pull/539) - -* Replaced `read::DebugAranges::items` with `read::DebugAranges::headers`. - [#539](https://github.com/gimli-rs/gimli/pull/539) - -* Added `read::Operation::Wasm*`. - [#546](https://github.com/gimli-rs/gimli/pull/546) - -* `read::LineRow::line` now returns `Option`. - The `read::ColumnType::Column` variant now contains a `NonZeroU64`. - [#551](https://github.com/gimli-rs/gimli/pull/551) - -* Replaced `read::Dwarf::debug_str_sup` with `read::Dwarf::sup`. - Deleted `sup` parameter of `read::Dwarf::load`. - Added `read::Dwarf::load_sup`. - [#554](https://github.com/gimli-rs/gimli/pull/554) - -### Added - -* dwarfdump: Supplementary object file support. - [#552](https://github.com/gimli-rs/gimli/pull/552) - -### Changed - -* Support `DW_FORM_addrx*` for `DW_AT_low_pc`/`DW_AT_high_pc` in `read::Dwarf`. - [#541](https://github.com/gimli-rs/gimli/pull/541) - -* Performance improvement in `EndianReader`. - [#549](https://github.com/gimli-rs/gimli/pull/549) - --------------------------------------------------------------------------------- - -## 0.23.0 - -Released 2020/10/27. - -### Breaking changes - -* Added more variants to `read::UnitType`. - Added `read::AttributeValue::DwoId` - [#521](https://github.com/gimli-rs/gimli/pull/521) - -* Replaced `CompilationUnitHeader` and `TypeUnitHeader` with `UnitHeader`. - Replaced `CompilationUnitHeadersIter` with `DebugInfoUnitHeadersIter`. - Replaced `TypeUnitHeadersIter` with `DebugTypesUnitHeadersIter`. - [#523](https://github.com/gimli-rs/gimli/pull/523) - - -### Added - -* Added read support for split DWARF. - [#527](https://github.com/gimli-rs/gimli/pull/527) - [#529](https://github.com/gimli-rs/gimli/pull/529) - -* Added `read::Dwarf::attr_address`. - [#524](https://github.com/gimli-rs/gimli/pull/524) - -* Added read support for `DW_AT_GNU_addr_base` and `DW_AT_GNU_ranges_base`. - [#525](https://github.com/gimli-rs/gimli/pull/525) - -* dwarfdump: Display index values for attributes. - [#526](https://github.com/gimli-rs/gimli/pull/526) - -* Added `name_to_register`. - [#532](https://github.com/gimli-rs/gimli/pull/532) - --------------------------------------------------------------------------------- - -## 0.22.0 - -Released 2020/07/03. - -### Breaking changes - -* Fixed `UnitHeader::size_of_header` for DWARF 5 units. - [#518](https://github.com/gimli-rs/gimli/pull/518) - -### Added - -* Added fuzz targets in CI. - [#512](https://github.com/gimli-rs/gimli/pull/512) - -* Added read support for `DW_OP_GNU_addr_index` and `DW_OP_GNU_const_index`. - [#516](https://github.com/gimli-rs/gimli/pull/516) - -* Added `.dwo` support to dwarfdump. - [#516](https://github.com/gimli-rs/gimli/pull/516) - -* Added `SectionId::dwo_name` and `Section::dwo_section_name`. - [#517](https://github.com/gimli-rs/gimli/pull/517) - -### Fixed - -* Fixed panic when reading `DW_FORM_indirect` combined with `DW_FORM_implicit_const`. - [#502](https://github.com/gimli-rs/gimli/pull/502) - -* Fixed panic for `read::Abbreviations::get(0)`. - [#505](https://github.com/gimli-rs/gimli/pull/505) - -* Fixed arithmetic overflow when reading `.debug_line`. - [#508](https://github.com/gimli-rs/gimli/pull/508) - -* Fixed arithmetic overflow when reading CFI. - [#509](https://github.com/gimli-rs/gimli/pull/509) - -* Fixed arithmetic overflow and division by zero when reading `.debug_aranges`. - [#510](https://github.com/gimli-rs/gimli/pull/510) - -* Don't return error from `read::Unit::new` when `DW_AT_name` or `DW_AT_comp_dir` is missing. - [#515](https://github.com/gimli-rs/gimli/pull/515) - --------------------------------------------------------------------------------- - -## 0.21.0 - -Released 2020/05/12. - -### Breaking changes - -* Minimum Rust version increased to 1.38.0. - -* Replaced `read::Operation::Literal` with `Operation::UnsignedConstant` and `Operation::SignedConstant`. - Changed `read::Operation::Bra` and `read::Operation::Skip` to contain the target offset instead of the bytecode. - [#479](https://github.com/gimli-rs/gimli/pull/479) - -* Changed `write::Expression` to support references. Existing users can convert to use `Expression::raw`. - [#479](https://github.com/gimli-rs/gimli/pull/479) - -* Replaced `write::AttributeValue::AnyUnitEntryRef` with `DebugInfoRef`. - Renamed `write::AttributeValue::ThisUnitEntryRef` to `UnitRef`. - [#479](https://github.com/gimli-rs/gimli/pull/479) - -* Added more optional features: `endian-reader` and `fallible-iterator`. - [#495](https://github.com/gimli-rs/gimli/pull/495) - [#498](https://github.com/gimli-rs/gimli/pull/498) - -### Added - -* Added `read::Expression::operations` - [#479](https://github.com/gimli-rs/gimli/pull/479) - -### Fixed - -* Fixed newlines in `dwarfdump` example. - [#470](https://github.com/gimli-rs/gimli/pull/470) - -* Ignore zero terminators when reading `.debug_frame` sections. - [#486](https://github.com/gimli-rs/gimli/pull/486) - -* Increase the number of CFI register rules supported by `read::UnwindContext`. - [#487](https://github.com/gimli-rs/gimli/pull/487) - -* Fixed version handling and return register encoding when reading `.eh_frame` sections. - [#493](https://github.com/gimli-rs/gimli/pull/493) - -### Changed - -* Added `EhFrame` and `DebugFrame` to `write::Sections`. - [#492](https://github.com/gimli-rs/gimli/pull/492) - -* Improved performance of `write::LineProgram::generate_row`. - [#476](https://github.com/gimli-rs/gimli/pull/476) - -* Removed use of the `byteorder`, `arrayvec` and `smallvec` crates. - [#494](https://github.com/gimli-rs/gimli/pull/494) - [#496](https://github.com/gimli-rs/gimli/pull/496) - [#497](https://github.com/gimli-rs/gimli/pull/497) - --------------------------------------------------------------------------------- - -## 0.20.0 - -Released 2020/01/11. - -### Breaking changes - -* Changed type of `DwTag`, `DwAt`, and `DwForm` constants. - [#451](https://github.com/gimli-rs/gimli/pull/451) - -* Added `read/write::AttributeValue::DebugMacroRef`, and returned where - required in `read::Attribute::value`. Added `SectionId::DebugMacro`. - [#454](https://github.com/gimli-rs/gimli/pull/454) - -* Deleted `alloc` feature, and fixed `no-std` builds with stable rust. - [#459](https://github.com/gimli-rs/gimli/pull/459) - -* Deleted `read::Error::description`, and changed `` - to display what was previously the description. - [#462](https://github.com/gimli-rs/gimli/pull/462) - -### Added - -* Added GNU view constants. - [#434](https://github.com/gimli-rs/gimli/pull/434) - -* Added `read::EntriesRaw` for low level DIE parsing. - [#455](https://github.com/gimli-rs/gimli/pull/455) - -* Added `examples/simple-line.rs`. - [#460](https://github.com/gimli-rs/gimli/pull/460) - -### Fixed - -* Fixed handling of CFI augmentations without data. - [#438](https://github.com/gimli-rs/gimli/pull/438) - -* dwarfdump: fix panic for malformed expressions. - [#447](https://github.com/gimli-rs/gimli/pull/447) - -* dwarfdump: fix handling of Mach-O relocations. - [#449](https://github.com/gimli-rs/gimli/pull/449) - -### Changed - -* Improved abbreviation parsing performance. - [#451](https://github.com/gimli-rs/gimli/pull/451) - --------------------------------------------------------------------------------- - -## 0.19.0 - -Released 2019/07/08. - -### Breaking changes - -* Small API changes related to `.debug_loc` and `.debug_loclists`: - added `read::RawLocListEntry::AddressOrOffsetPair` enum variant, - added `write::Sections::debug_loc/debug_loclists` public members, - and replaced `write::AttributeValue::LocationListsRef` with `LocationListRef`. - [#425](https://github.com/gimli-rs/gimli/pull/425) - -### Added - -* Added `read::Attribute::exprloc_value` and `read::AttributeValue::exprloc_value`. - [#422](https://github.com/gimli-rs/gimli/pull/422) - -* Added support for writing `.debug_loc` and `.debug_loclists` sections. - [#425](https://github.com/gimli-rs/gimli/pull/425) - -* Added `-G` flag to `dwarfdump` example to display global offsets. - [#427](https://github.com/gimli-rs/gimli/pull/427) - -* Added `examples/simple.rs`. - [#429](https://github.com/gimli-rs/gimli/pull/429) - -### Fixed - -* `write::LineProgram::from` no longer requires `DW_AT_name` or `DW_AT_comp_dir` - attributes to be present in the unit DIE. - [#430](https://github.com/gimli-rs/gimli/pull/430) - --------------------------------------------------------------------------------- - -## 0.18.0 - -Released 2019/04/25. - -The focus of this release has been on improving support for reading CFI, -and adding support for writing CFI. - -### Breaking changes - -* For types which have an `Offset` type parameter, the default `Offset` - has changed from `usize` to `R::Offset`. - [#392](https://github.com/gimli-rs/gimli/pull/392) - -* Added an `Offset` type parameter to the `read::Unit` type to allow variance. - [#393](https://github.com/gimli-rs/gimli/pull/393) - -* Changed the `UninitializedUnwindContext::initialize` method to borrow `self`, - and return `&mut UnwindContext`. Deleted the `InitializedUnwindContext` type. - [#395](https://github.com/gimli-rs/gimli/pull/395) - -* Deleted the `UnwindSection` type parameters from the `CommonInformationEntry`, - `FrameDescriptionEntry`, `UninitializedUnwindContext`, - `UnwindContext`, and `UnwindTable` types. - [#399](https://github.com/gimli-rs/gimli/pull/399) - -* Changed the signature of the `get_cie` callback parameter for various functions. - The signature now matches the `UnwindSection::cie_from_offset` method, so - that method can be used as the parameter. - [#400](https://github.com/gimli-rs/gimli/pull/400) - -* Reduced the number of lifetime parameters for the `UnwindTable` type. - [#400](https://github.com/gimli-rs/gimli/pull/400) - -* Updated `fallible-iterator` to version 0.2.0. - [#407](https://github.com/gimli-rs/gimli/pull/407) - -* Added a parameter to the `Error::UnexpectedEof` enum variant. - [#408](https://github.com/gimli-rs/gimli/pull/408) - -### Added - -* Update to 2018 edition. - [#391](https://github.com/gimli-rs/gimli/pull/391) - -* Added the `FrameDescriptionEntry::unwind_info_for_address` method. - [#396](https://github.com/gimli-rs/gimli/pull/396) - -* Added the `FrameDescriptionEntry::rows` method. - [#396](https://github.com/gimli-rs/gimli/pull/396) - -* Added the `EhHdrTable::unwind_info_for_address` method. - [#400](https://github.com/gimli-rs/gimli/pull/400) - -* Added the `EhHdrTable::fde_for_address` method and deprecated the - `EhHdrTable::lookup_and_parse` method. - [#400](https://github.com/gimli-rs/gimli/pull/400) - -* Added the `EhHdrTable::pointer_to_offset` method. - [#400](https://github.com/gimli-rs/gimli/pull/400) - -* Added the `UnwindSection::fde_for_address` method. - [#396](https://github.com/gimli-rs/gimli/pull/396) - -* Added the `UnwindSection::fde_from_offset` method. - [#400](https://github.com/gimli-rs/gimli/pull/400) - -* Added the `UnwindSection::partial_fde_from_offset` method. - [#400](https://github.com/gimli-rs/gimli/pull/400) - -* Added the `Section::id` method. - [#406](https://github.com/gimli-rs/gimli/pull/406) - -* Added the `Dwarf::load` method, and corresponding methods for individual sections. - [#406](https://github.com/gimli-rs/gimli/pull/406) - -* Added the `Dwarf::borrow` method, and corresponding methods for individual sections. - [#406](https://github.com/gimli-rs/gimli/pull/406) - -* Added the `Dwarf::format_error` method. - [#408](https://github.com/gimli-rs/gimli/pull/408) - -* Added the `Dwarf::die_ranges` method. - [#417](https://github.com/gimli-rs/gimli/pull/417) - -* Added the `Dwarf::unit_ranges` method. - [#417](https://github.com/gimli-rs/gimli/pull/417) - -* Added support for writing `.debug_frame` and `.eh_frame` sections. - [#412](https://github.com/gimli-rs/gimli/pull/412) - [#419](https://github.com/gimli-rs/gimli/pull/419) - -### Fixed - -* The `code_alignment_factor` is now used when evaluating CFI instructions - that advance the location. - [#401](https://github.com/gimli-rs/gimli/pull/401) - -* Fixed parsing of pointers encoded with `DW_EH_PE_funcrel`. - [#402](https://github.com/gimli-rs/gimli/pull/402) - -* Use the FDE address encoding from the augmentation when parsing `DW_CFA_set_loc`. - [#403](https://github.com/gimli-rs/gimli/pull/403) - -* Fixed setting of `.eh_frame` base addresses in dwarfdump. - [#410](https://github.com/gimli-rs/gimli/pull/410) - -## 0.17.0 - -Released 2019/02/21. - -The focus of this release has been on improving DWARF 5 support, and -adding support for writing DWARF. - -### Breaking changes - -* Changed register values to a `Register` type instead of `u8`/`u64`. - [#328](https://github.com/gimli-rs/gimli/pull/328) - -* Replaced `BaseAddresses::set_cfi` with `set_eh_frame_hdr` and `set_eh_frame`. - Replaced `BaseAddresses::set_data` with `set_got`. - You should now use the same `BaseAddresses` value for parsing both - `.eh_frame` and `.eh_frame_hdr`. - [#351](https://github.com/gimli-rs/gimli/pull/351) - -* Renamed many types and functions related to `.debug_line`. - Renamed `LineNumberProgram` to `LineProgram`. - Renamed `IncompleteLineNumberProgram` to `IncompleteLineProgram`. - Renamed `CompleteLineNumberProgram` to `CompleteLineProgram`. - Renamed `LineNumberProgramHeader` to `LineProgramHeader`. - Renamed `LineNumberRow` to `LineRow`. - Renamed `StateMachine` to `LineRows`. - Renamed `Opcode` to `LineInstruction`. - Renamed `OpcodesIter` to `LineInstructions`. - Renamed `LineNumberSequence` to `LineSequence`. - [#359](https://github.com/gimli-rs/gimli/pull/359) - -* Added `Offset` type parameter to `AttributeValue`, `LineProgram`, - `IncompleteLineProgram`, `CompleteLineProgram`, `LineRows`, `LineInstruction`, - and `FileEntry`. - [#324](https://github.com/gimli-rs/gimli/pull/324) - -* Changed `FileEntry::path_name`, `FileEntry::directory`, and - `LineProgramHeader::directory` to return an `AttributeValue` instead - of a `Reader`. - [#366](https://github.com/gimli-rs/gimli/pull/366) - -* Renamed `FileEntry::last_modification` to `FileEntry::timestamp` - and renamed `FileEntry::length` to `FileEntry::size`. - [#366](https://github.com/gimli-rs/gimli/pull/366) - -* Added an `Encoding` type. Changed many functions that previously accepted - `Format`, version or address size parameters to accept an `Encoding` - parameter instead. - Notable changes are `LocationLists::locations`, `RangeLists::ranges`, - and `Expression::evaluation`. - [#364](https://github.com/gimli-rs/gimli/pull/364) - -* Changed return type of `LocationLists::new` and `RangeLists::new`. - [#370](https://github.com/gimli-rs/gimli/pull/370) - -* Added parameters to `LocationsLists::locations` and `RangeLists::ranges` - to support `.debug_addr`. - [#358](https://github.com/gimli-rs/gimli/pull/358) - -* Added more `AttributeValue` variants: `DebugAddrBase`, `DebugAddrIndex`, - `DebugLocListsBase`, `DebugLocListsIndex`, `DebugRngListsBase`, `DebugRngListsIndex`, - `DebugStrOffsetsBase`, `DebugStrOffsetsIndex`, `DebugLineStrRef`. - [#358](https://github.com/gimli-rs/gimli/pull/358) - -* Changed `AttributeValue::Data*` attributes to native endian integers instead - of byte arrays. - [#365](https://github.com/gimli-rs/gimli/pull/365) - -* Replaced `EvaluationResult::TextBase` with - `EvaluationResult::RequiresRelocatedAddress`. The handling of `TextBase` - was incorrect. - [#335](https://github.com/gimli-rs/gimli/pull/335) - -* Added `EvaluationResult::IndexedAddress` for operations that require an - address from `.debug_addr`. - [#358](https://github.com/gimli-rs/gimli/pull/358) - -* Added `Reader::read_slice`. Added a default implementation of - `Reader::read_u8_array` which uses this. - [#358](https://github.com/gimli-rs/gimli/pull/358) - -### Added - -* Added initial support for writing DWARF. This is targeted at supporting - line number information only. - [#340](https://github.com/gimli-rs/gimli/pull/340) - [#344](https://github.com/gimli-rs/gimli/pull/344) - [#346](https://github.com/gimli-rs/gimli/pull/346) - [#361](https://github.com/gimli-rs/gimli/pull/361) - [#362](https://github.com/gimli-rs/gimli/pull/362) - [#365](https://github.com/gimli-rs/gimli/pull/365) - [#368](https://github.com/gimli-rs/gimli/pull/368) - [#382](https://github.com/gimli-rs/gimli/pull/382) - -* Added `read` and `write` Cargo features. Both are enabled by default. - [#343](https://github.com/gimli-rs/gimli/pull/343) - -* Added support for reading DWARF 5 `.debug_line` and `.debug_line_str` sections. - [#366](https://github.com/gimli-rs/gimli/pull/366) - -* Added support for reading DWARF 5 `.debug_str_offsets` sections, including - parsing `DW_FORM_strx*` attributes. - [#358](https://github.com/gimli-rs/gimli/pull/358) - -* Added support for reading DWARF 5 `.debug_addr` sections, including parsing - `DW_FORM_addrx*` attributes and evaluating `DW_OP_addrx` and `DW_OP_constx` - operations. - [#358](https://github.com/gimli-rs/gimli/pull/358) - -* Added support for reading DWARF 5 indexed addresses and offsets in - `.debug_loclists` and `.debug_rnglists`, including parsing `DW_FORM_rnglistx` - and `DW_FORM_loclistx` attributes. - [#358](https://github.com/gimli-rs/gimli/pull/358) - -* Added high level `Dwarf` and `Unit` types. Existing code does not need to - switch to using these types, but doing so will make DWARF 5 support simpler. - [#352](https://github.com/gimli-rs/gimli/pull/352) - [#380](https://github.com/gimli-rs/gimli/pull/380) - [#381](https://github.com/gimli-rs/gimli/pull/381) - -* Added `EhFrame::set_address_size` and `DebugFrame::set_address_size` methods - to allow parsing non-native CFI sections. The default address size is still - the native size. - [#325](https://github.com/gimli-rs/gimli/pull/325) - -* Added architecture specific definitions for `Register` values and names. - Changed dwarfdump to print them. - [#328](https://github.com/gimli-rs/gimli/pull/328) - -* Added support for reading relocatable DWARF sections. - [#337](https://github.com/gimli-rs/gimli/pull/337) - -* Added parsing of `DW_FORM_data16`. - [#366](https://github.com/gimli-rs/gimli/pull/366) - -### Fixed - -* Fixed parsing DWARF 5 ranges with `start == end == 0`. - [#323](https://github.com/gimli-rs/gimli/pull/323) - -* Changed `LineRows` to be covariant in its `Reader` type parameter. - [#324](https://github.com/gimli-rs/gimli/pull/324) - -* Fixed handling of empty units in dwarfdump. - [#330](https://github.com/gimli-rs/gimli/pull/330) - -* Fixed `UnitHeader::length_including_self` for `Dwarf64`. - [#342](https://github.com/gimli-rs/gimli/pull/342) - -* Fixed parsing of `DW_CFA_set_loc`. - [#355](https://github.com/gimli-rs/gimli/pull/355) - -* Fixed handling of multiple headers in `.debug_loclists` and `.debug_rnglists`. - [#370](https://github.com/gimli-rs/gimli/pull/370) - --------------------------------------------------------------------------------- - -## 0.16.1 - -Released 2018/08/28. - -### Added - -* Added `EhFrameHdr::lookup_and_parse`. [#316][] -* Added support for `DW_CFA_GNU_args_size`. [#319][] - -### Fixed - -* Implement `Send`/`Sync` for `SubRange`. [#305][] -* Fixed `alloc` support on nightly. [#306][] [#310][] - -[#305]: https://github.com/gimli-rs/gimli/pull/305 -[#306]: https://github.com/gimli-rs/gimli/pull/306 -[#310]: https://github.com/gimli-rs/gimli/pull/310 -[#316]: https://github.com/gimli-rs/gimli/pull/316 -[#319]: https://github.com/gimli-rs/gimli/pull/319 - --------------------------------------------------------------------------------- - -## 0.16.0 - -Released 2018/06/01. - -### Added - -* Added support for building in `#![no_std]` environments, when the `alloc` - crate is available. Disable the "std" feature and enable the "alloc" - feature. [#138][] [#271][] - -* Added support for DWARF 5 `.debug_rnglists` and `.debug_loclists` - sections. [#272][] - -* Added support for DWARF 5 `DW_FORM_ref_sup` and `DW_FORM_strp_sup` attribute - forms. [#288][] - -* Added support for DWARF 5 operations on typed values. [#293][] - -* A `dwarf-validate` example program that checks the integrity of the given - DWARF and its references between sections. [#290][] - -* Added the `EndianReader` type, an easy way to define a custom `Reader` - implementation with a reference to a generic buffer of bytes and an associated - endianity. [#298][] [#302][] - -### Changed - -* Various speed improvements for evaluating `.debug_line` line number - programs. [#276][] - -* The example `dwarfdump` clone is a [whole lot faster - now][dwarfdump-faster]. [#282][] [#284][] [#285][] - -### Deprecated - -* `EndianBuf` has been renamed to `EndianSlice`, use that name instead. [#295][] - -### Fixed - -* Evaluating the `DW_CFA_restore_state` opcode properly maintains the current - location. Previously it would incorrectly restore the old location when - popping from evaluation stack. [#274][] - -[#271]: https://github.com/gimli-rs/gimli/issues/271 -[#138]: https://github.com/gimli-rs/gimli/issues/138 -[#274]: https://github.com/gimli-rs/gimli/issues/274 -[#272]: https://github.com/gimli-rs/gimli/issues/272 -[#276]: https://github.com/gimli-rs/gimli/issues/276 -[#282]: https://github.com/gimli-rs/gimli/issues/282 -[#285]: https://github.com/gimli-rs/gimli/issues/285 -[#284]: https://github.com/gimli-rs/gimli/issues/284 -[#288]: https://github.com/gimli-rs/gimli/issues/288 -[#290]: https://github.com/gimli-rs/gimli/issues/290 -[#293]: https://github.com/gimli-rs/gimli/issues/293 -[#295]: https://github.com/gimli-rs/gimli/issues/295 -[#298]: https://github.com/gimli-rs/gimli/issues/298 -[#302]: https://github.com/gimli-rs/gimli/issues/302 -[dwarfdump-faster]: https://robert.ocallahan.org/2018/03/speeding-up-dwarfdump-with-rust.html - --------------------------------------------------------------------------------- - -## 0.15.0 - -Released 2017/12/01. - -### Added - -* Added the `EndianBuf::to_string()` method. [#233][] - -* Added more robust error handling in our example `dwarfdump` clone. [#234][] - -* Added `FrameDescriptionEntry::initial_address` method. [#237][] - -* Added `FrameDescriptionEntry::len` method. [#237][] - -* Added the `FrameDescriptionEntry::entry_len` method. [#241][] - -* Added the `CommonInformationEntry::offset` method. [#241][] - -* Added the `CommonInformationEntry::entry_len` method. [#241][] - -* Added the `CommonInformationEntry::version` method. [#241][] - -* Added the `CommonInformationEntry::augmentation` method. [#241][] - -* Added the `CommonInformationEntry::code_alignment_factor` method. [#241][] - -* Added the `CommonInformationEntry::data_alignment_factor` method. [#241][] - -* Added the `CommonInformationEntry::return_address_register` method. [#241][] - -* Added support for printing `.eh_frame` sections to our example `dwarfdump` - clone. [#241][] - -* Added support for parsing the `.eh_frame_hdr` section. On Linux, the - `.eh_frame_hdr` section provides a pointer to the already-mapped-in-memory - `.eh_frame` data, so that it doesn't need to be duplicated, and a binary - search table of its entries for faster unwinding information lookups. [#250][] - -* Added support for parsing DWARF 5 compilation unit headers. [#257][] - -* Added support for DWARF 5's `DW_FORM_implicit_const`. [#257][] - -### Changed - -* Unwinding methods now give ownership of the unwinding context back to the - caller if errors are encountered, not just on the success path. This allows - recovering from errors in signal-safe code, where constructing a new unwinding - context is not an option because it requires allocation. This is a **breaking - change** affecting `UnwindSection::unwind_info_for_address` and - `UninitializedUnwindContext::initialize`. [#241][] - -* `CfaRule` and `RegisterRule` now expose their `DW_OP` expressions as - `Expression`. This is a minor **breaking change**. [#241][] - -* The `Error::UnknownVersion` variant now contains the unknown version - number. This is a minor **breaking change**. [#245][] - -* `EvaluationResult::RequiresEntryValue` requires an `Expression` instead of a - `Reader` now. This is a minor **breaking change**. [#256][] - - -[#233]: https://github.com/gimli-rs/gimli/pull/233 -[#234]: https://github.com/gimli-rs/gimli/pull/234 -[#237]: https://github.com/gimli-rs/gimli/pull/237 -[#241]: https://github.com/gimli-rs/gimli/pull/241 -[#245]: https://github.com/gimli-rs/gimli/pull/245 -[#250]: https://github.com/gimli-rs/gimli/pull/250 -[#256]: https://github.com/gimli-rs/gimli/pull/256 -[#257]: https://github.com/gimli-rs/gimli/pull/257 - --------------------------------------------------------------------------------- - -## 0.14.0 - -Released 2017/08/08. - -### Added - -* All `pub` types now `derive(Hash)`. [#192][] - -* All the constants from DWARF 5 are now defined. [#193][] - -* Added support for the `DW_OP_GNU_parameter_ref` GNU extension to parsing and - evaluation DWARF opcodes. [#208][] - -* Improved LEB128 parsing performance. [#216][] - -* Improved `.debug_{aranges,pubnames,pubtypes}` parsing performance. [#218][] - -* Added the ability to choose endianity dynamically at run time, rather than - only statically at compile time. [#219][] - -### Changed - -* The biggest change of this release is that `gimli` no longer requires the - object file's section be fully loaded into memory. This enables using `gimli` - on 32 bit platforms where there often isn't enough contiguous virtual memory - address space to load debugging information into. The default behavior is - still geared for 64 bit platforms, where address space overfloweth, and you - can still load the whole sections of the object file (or the entire object - file) into memory. This is abstracted over with the `gimli::Reader` - trait. This manifests as small (but many) breaking changes to much of the - public API. [#182][] - -### Fixed - -* The `DW_END_*` constants for defining endianity of a compilation unit were - previously incorrect. [#193][] - -* The `DW_OP_addr` opcode is relative to the base address of the `.text` section - of the binary, but we were incorrectly treating it as an absolute value. [#210][] - -[GitHub]: https://github.com/gimli-rs/gimli -[crates.io]: https://crates.io/crates/gimli -[contributing]: https://github.com/gimli-rs/gimli/blob/master/CONTRIBUTING.md -[easy]: https://github.com/gimli-rs/gimli/issues?q=is%3Aopen+is%3Aissue+label%3Aeasy -[#192]: https://github.com/gimli-rs/gimli/pull/192 -[#193]: https://github.com/gimli-rs/gimli/pull/193 -[#182]: https://github.com/gimli-rs/gimli/issues/182 -[#208]: https://github.com/gimli-rs/gimli/pull/208 -[#210]: https://github.com/gimli-rs/gimli/pull/210 -[#216]: https://github.com/gimli-rs/gimli/pull/216 -[#218]: https://github.com/gimli-rs/gimli/pull/218 -[#219]: https://github.com/gimli-rs/gimli/pull/219 diff -Nru s390-tools-2.31.0/rust-vendor/gimli/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/gimli/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/gimli/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/gimli/LICENSE-MIT s390-tools-2.33.1/rust-vendor/gimli/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/gimli/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2015 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/gimli/README.md s390-tools-2.33.1/rust-vendor/gimli/README.md --- s390-tools-2.31.0/rust-vendor/gimli/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,81 +0,0 @@ -# `gimli` - -[![](https://img.shields.io/crates/v/gimli.svg) ![](https://img.shields.io/crates/d/gimli.svg)](https://crates.io/crates/gimli) -[![](https://docs.rs/gimli/badge.svg)](https://docs.rs/gimli/) -[![Build Status](https://github.com/gimli-rs/gimli/workflows/Rust/badge.svg)](https://github.com/gimli-rs/gimli/actions) -[![Coverage Status](https://coveralls.io/repos/github/gimli-rs/gimli/badge.svg?branch=master)](https://coveralls.io/github/gimli-rs/gimli?branch=master) - -`gimli` is a library for reading and writing the -[DWARF debugging format](https://dwarfstd.org/). - -* **Zero copy:** everything is just a reference to the original input buffer. No - copies of the input data get made. - -* **Lazy:** you can iterate compilation units without parsing their - contents. Parse only as many debugging information entry (DIE) trees as you - iterate over. `gimli` also uses `DW_AT_sibling` references to avoid parsing a - DIE's children to find its next sibling, when possible. - -* **Cross-platform:** `gimli` makes no assumptions about what kind of object - file you're working with. The flipside to that is that it's up to you to - provide an ELF loader on Linux or Mach-O loader on macOS. - - * Unsure which object file parser to use? Try the cross-platform - [`object`](https://github.com/gimli-rs/object) crate. See the - [`gimli-examples`](./crates/examples/src/bin) crate for usage with `gimli`. - -## Install - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -gimli = "0.28.1" -``` - -The minimum supported Rust version is: - -* 1.60.0 for the `read` feature and its dependencies. -* 1.65.0 for other features. - -## Documentation - -* [Documentation on docs.rs](https://docs.rs/gimli/) - -* Example programs: - - * [A simple `.debug_info` parser](./crates/examples/src/bin/simple.rs) - - * [A simple `.debug_line` parser](./crates/examples/src/bin/simple_line.rs) - - * [A `dwarfdump` clone](./crates/examples/src/bin/dwarfdump.rs) - - * [An `addr2line` clone](https://github.com/gimli-rs/addr2line) - - * [`ddbug`](https://github.com/gimli-rs/ddbug), a utility giving insight into - code generation by making debugging information readable. - - * [`dwprod`](https://github.com/fitzgen/dwprod), a tiny utility to list the - compilers used to create each compilation unit within a shared library or - executable (via `DW_AT_producer`). - - * [`dwarf-validate`](./crates/examples/src/bin/dwarf-validate.rs), a program to validate the - integrity of some DWARF and its references between sections and compilation - units. - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([`LICENSE-APACHE`](./LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([`LICENSE-MIT`](./LICENSE-MIT) or https://opensource.org/licenses/MIT) - -at your option. - -## Contribution - -See [CONTRIBUTING.md](./CONTRIBUTING.md) for hacking. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/arch.rs s390-tools-2.33.1/rust-vendor/gimli/src/arch.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/arch.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/arch.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,839 +0,0 @@ -use crate::common::Register; - -macro_rules! registers { - ($struct_name:ident, { $($name:ident = ($val:expr, $disp:expr)),+ $(,)? } - $(, aliases { $($alias_name:ident = ($alias_val:expr, $alias_disp:expr)),+ $(,)? })?) => { - #[allow(missing_docs)] - impl $struct_name { - $( - pub const $name: Register = Register($val); - )+ - $( - $(pub const $alias_name: Register = Register($alias_val);)+ - )* - } - - impl $struct_name { - /// The name of a register, or `None` if the register number is unknown. - /// - /// Only returns the primary name for registers that alias with others. - pub fn register_name(register: Register) -> Option<&'static str> { - match register { - $( - Self::$name => Some($disp), - )+ - _ => return None, - } - } - - /// Converts a register name into a register number. - pub fn name_to_register(value: &str) -> Option { - match value { - $( - $disp => Some(Self::$name), - )+ - $( - $($alias_disp => Some(Self::$alias_name),)+ - )* - _ => return None, - } - } - } - }; -} - -/// ARM architecture specific definitions. -/// -/// See [DWARF for the ARM Architecture]( -/// https://github.com/ARM-software/abi-aa/blob/main/aadwarf32/aadwarf32.rst). -#[derive(Debug, Clone, Copy)] -pub struct Arm; - -registers!(Arm, { - R0 = (0, "R0"), - R1 = (1, "R1"), - R2 = (2, "R2"), - R3 = (3, "R3"), - R4 = (4, "R4"), - R5 = (5, "R5"), - R6 = (6, "R6"), - R7 = (7, "R7"), - R8 = (8, "R8"), - R9 = (9, "R9"), - R10 = (10, "R10"), - R11 = (11, "R11"), - R12 = (12, "R12"), - R13 = (13, "R13"), - R14 = (14, "R14"), - R15 = (15, "R15"), - - WCGR0 = (104, "wCGR0"), - WCGR1 = (105, "wCGR1"), - WCGR2 = (106, "wCGR2"), - WCGR3 = (107, "wCGR3"), - WCGR4 = (108, "wCGR4"), - WCGR5 = (109, "wCGR5"), - WCGR6 = (110, "wCGR6"), - WCGR7 = (111, "wCGR7"), - - WR0 = (112, "wR0"), - WR1 = (113, "wR1"), - WR2 = (114, "wR2"), - WR3 = (115, "wR3"), - WR4 = (116, "wR4"), - WR5 = (117, "wR5"), - WR6 = (118, "wR6"), - WR7 = (119, "wR7"), - WR8 = (120, "wR8"), - WR9 = (121, "wR9"), - WR10 = (122, "wR10"), - WR11 = (123, "wR11"), - WR12 = (124, "wR12"), - WR13 = (125, "wR13"), - WR14 = (126, "wR14"), - WR15 = (127, "wR15"), - - SPSR = (128, "SPSR"), - SPSR_FIQ = (129, "SPSR_FIQ"), - SPSR_IRQ = (130, "SPSR_IRQ"), - SPSR_ABT = (131, "SPSR_ABT"), - SPSR_UND = (132, "SPSR_UND"), - SPSR_SVC = (133, "SPSR_SVC"), - - RA_AUTH_CODE = (143, "RA_AUTH_CODE"), - - R8_USR = (144, "R8_USR"), - R9_USR = (145, "R9_USR"), - R10_USR = (146, "R10_USR"), - R11_USR = (147, "R11_USR"), - R12_USR = (148, "R12_USR"), - R13_USR = (149, "R13_USR"), - R14_USR = (150, "R14_USR"), - - R8_FIQ = (151, "R8_FIQ"), - R9_FIQ = (152, "R9_FIQ"), - R10_FIQ = (153, "R10_FIQ"), - R11_FIQ = (154, "R11_FIQ"), - R12_FIQ = (155, "R12_FIQ"), - R13_FIQ = (156, "R13_FIQ"), - R14_FIQ = (157, "R14_FIQ"), - - R13_IRQ = (158, "R13_IRQ"), - R14_IRQ = (159, "R14_IRQ"), - - R13_ABT = (160, "R13_ABT"), - R14_ABT = (161, "R14_ABT"), - - R13_UND = (162, "R13_UND"), - R14_UND = (163, "R14_UND"), - - R13_SVC = (164, "R13_SVC"), - R14_SVC = (165, "R14_SVC"), - - WC0 = (192, "wC0"), - WC1 = (193, "wC1"), - WC2 = (194, "wC2"), - WC3 = (195, "wC3"), - WC4 = (196, "wC4"), - WC5 = (197, "wC5"), - WC6 = (198, "wC6"), - WC7 = (199, "wC7"), - - D0 = (256, "D0"), - D1 = (257, "D1"), - D2 = (258, "D2"), - D3 = (259, "D3"), - D4 = (260, "D4"), - D5 = (261, "D5"), - D6 = (262, "D6"), - D7 = (263, "D7"), - D8 = (264, "D8"), - D9 = (265, "D9"), - D10 = (266, "D10"), - D11 = (267, "D11"), - D12 = (268, "D12"), - D13 = (269, "D13"), - D14 = (270, "D14"), - D15 = (271, "D15"), - D16 = (272, "D16"), - D17 = (273, "D17"), - D18 = (274, "D18"), - D19 = (275, "D19"), - D20 = (276, "D20"), - D21 = (277, "D21"), - D22 = (278, "D22"), - D23 = (279, "D23"), - D24 = (280, "D24"), - D25 = (281, "D25"), - D26 = (282, "D26"), - D27 = (283, "D27"), - D28 = (284, "D28"), - D29 = (285, "D29"), - D30 = (286, "D30"), - D31 = (287, "D31"), - - TPIDRURO = (320, "TPIDRURO"), - TPIDRURW = (321, "TPIDRURW"), - TPIDPR = (322, "TPIDPR"), - HTPIDPR = (323, "HTPIDPR"), -}, -aliases { - SP = (13, "SP"), - LR = (14, "LR"), - PC = (15, "PC"), - - ACC0 = (104, "ACC0"), - ACC1 = (105, "ACC1"), - ACC2 = (106, "ACC2"), - ACC3 = (107, "ACC3"), - ACC4 = (108, "ACC4"), - ACC5 = (109, "ACC5"), - ACC6 = (110, "ACC6"), - ACC7 = (111, "ACC7"), - - S0 = (256, "S0"), - S1 = (256, "S1"), - S2 = (257, "S2"), - S3 = (257, "S3"), - S4 = (258, "S4"), - S5 = (258, "S5"), - S6 = (259, "S6"), - S7 = (259, "S7"), - S8 = (260, "S8"), - S9 = (260, "S9"), - S10 = (261, "S10"), - S11 = (261, "S11"), - S12 = (262, "S12"), - S13 = (262, "S13"), - S14 = (263, "S14"), - S15 = (263, "S15"), - S16 = (264, "S16"), - S17 = (264, "S17"), - S18 = (265, "S18"), - S19 = (265, "S19"), - S20 = (266, "S20"), - S21 = (266, "S21"), - S22 = (267, "S22"), - S23 = (267, "S23"), - S24 = (268, "S24"), - S25 = (268, "S25"), - S26 = (269, "S26"), - S27 = (269, "S27"), - S28 = (270, "S28"), - S29 = (270, "S29"), - S30 = (271, "S30"), - S31 = (271, "S31"), -}); - -/// ARM 64-bit (AArch64) architecture specific definitions. -/// -/// See [DWARF for the ARM 64-bit Architecture]( -/// https://github.com/ARM-software/abi-aa/blob/main/aadwarf64/aadwarf64.rst). -#[derive(Debug, Clone, Copy)] -pub struct AArch64; - -registers!(AArch64, { - X0 = (0, "X0"), - X1 = (1, "X1"), - X2 = (2, "X2"), - X3 = (3, "X3"), - X4 = (4, "X4"), - X5 = (5, "X5"), - X6 = (6, "X6"), - X7 = (7, "X7"), - X8 = (8, "X8"), - X9 = (9, "X9"), - X10 = (10, "X10"), - X11 = (11, "X11"), - X12 = (12, "X12"), - X13 = (13, "X13"), - X14 = (14, "X14"), - X15 = (15, "X15"), - X16 = (16, "X16"), - X17 = (17, "X17"), - X18 = (18, "X18"), - X19 = (19, "X19"), - X20 = (20, "X20"), - X21 = (21, "X21"), - X22 = (22, "X22"), - X23 = (23, "X23"), - X24 = (24, "X24"), - X25 = (25, "X25"), - X26 = (26, "X26"), - X27 = (27, "X27"), - X28 = (28, "X28"), - X29 = (29, "X29"), - X30 = (30, "X30"), - SP = (31, "SP"), - PC = (32, "PC"), - ELR_MODE = (33, "ELR_mode"), - RA_SIGN_STATE = (34, "RA_SIGN_STATE"), - TPIDRRO_EL0 = (35, "TPIDRRO_EL0"), - TPIDR_EL0 = (36, "TPIDR_EL0"), - TPIDR_EL1 = (37, "TPIDR_EL1"), - TPIDR_EL2 = (38, "TPIDR_EL2"), - TPIDR_EL3 = (39, "TPIDR_EL3"), - - VG = (46, "VG"), - FFR = (47, "FFR"), - - P0 = (48, "P0"), - P1 = (49, "P1"), - P2 = (50, "P2"), - P3 = (51, "P3"), - P4 = (52, "P4"), - P5 = (53, "P5"), - P6 = (54, "P6"), - P7 = (55, "P7"), - P8 = (56, "P8"), - P9 = (57, "P9"), - P10 = (58, "P10"), - P11 = (59, "P11"), - P12 = (60, "P12"), - P13 = (61, "P13"), - P14 = (62, "P14"), - P15 = (63, "P15"), - - V0 = (64, "V0"), - V1 = (65, "V1"), - V2 = (66, "V2"), - V3 = (67, "V3"), - V4 = (68, "V4"), - V5 = (69, "V5"), - V6 = (70, "V6"), - V7 = (71, "V7"), - V8 = (72, "V8"), - V9 = (73, "V9"), - V10 = (74, "V10"), - V11 = (75, "V11"), - V12 = (76, "V12"), - V13 = (77, "V13"), - V14 = (78, "V14"), - V15 = (79, "V15"), - V16 = (80, "V16"), - V17 = (81, "V17"), - V18 = (82, "V18"), - V19 = (83, "V19"), - V20 = (84, "V20"), - V21 = (85, "V21"), - V22 = (86, "V22"), - V23 = (87, "V23"), - V24 = (88, "V24"), - V25 = (89, "V25"), - V26 = (90, "V26"), - V27 = (91, "V27"), - V28 = (92, "V28"), - V29 = (93, "V29"), - V30 = (94, "V30"), - V31 = (95, "V31"), - - Z0 = (96, "Z0"), - Z1 = (97, "Z1"), - Z2 = (98, "Z2"), - Z3 = (99, "Z3"), - Z4 = (100, "Z4"), - Z5 = (101, "Z5"), - Z6 = (102, "Z6"), - Z7 = (103, "Z7"), - Z8 = (104, "Z8"), - Z9 = (105, "Z9"), - Z10 = (106, "Z10"), - Z11 = (107, "Z11"), - Z12 = (108, "Z12"), - Z13 = (109, "Z13"), - Z14 = (110, "Z14"), - Z15 = (111, "Z15"), - Z16 = (112, "Z16"), - Z17 = (113, "Z17"), - Z18 = (114, "Z18"), - Z19 = (115, "Z19"), - Z20 = (116, "Z20"), - Z21 = (117, "Z21"), - Z22 = (118, "Z22"), - Z23 = (119, "Z23"), - Z24 = (120, "Z24"), - Z25 = (121, "Z25"), - Z26 = (122, "Z26"), - Z27 = (123, "Z27"), - Z28 = (124, "Z28"), - Z29 = (125, "Z29"), - Z30 = (126, "Z30"), - Z31 = (127, "Z31"), -}); - -/// LoongArch architecture specific definitions. -/// -/// See [LoongArch ELF psABI specification](https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html). -#[derive(Debug, Clone, Copy)] -pub struct LoongArch; - -registers!(LoongArch, { - R0 = (0, "$r0"), - R1 = (1, "$r1"), - R2 = (2, "$r2"), - R3 = (3, "$r3"), - R4 = (4, "$r4"), - R5 = (5, "$r5"), - R6 = (6, "$r6"), - R7 = (7, "$r7"), - R8 = (8, "$r8"), - R9 = (9, "$r9"), - R10 = (10, "$r10"), - R11 = (11, "$r11"), - R12 = (12, "$r12"), - R13 = (13, "$r13"), - R14 = (14, "$r14"), - R15 = (15, "$r15"), - R16 = (16, "$r16"), - R17 = (17, "$r17"), - R18 = (18, "$r18"), - R19 = (19, "$r19"), - R20 = (20, "$r20"), - R21 = (21, "$r21"), - R22 = (22, "$r22"), - R23 = (23, "$r23"), - R24 = (24, "$r24"), - R25 = (25, "$r25"), - R26 = (26, "$r26"), - R27 = (27, "$r27"), - R28 = (28, "$r28"), - R29 = (29, "$r29"), - R30 = (30, "$r30"), - R31 = (31, "$r31"), - - F0 = (32, "$f0"), - F1 = (33, "$f1"), - F2 = (34, "$f2"), - F3 = (35, "$f3"), - F4 = (36, "$f4"), - F5 = (37, "$f5"), - F6 = (38, "$f6"), - F7 = (39, "$f7"), - F8 = (40, "$f8"), - F9 = (41, "$f9"), - F10 = (42, "$f10"), - F11 = (43, "$f11"), - F12 = (44, "$f12"), - F13 = (45, "$f13"), - F14 = (46, "$f14"), - F15 = (47, "$f15"), - F16 = (48, "$f16"), - F17 = (49, "$f17"), - F18 = (50, "$f18"), - F19 = (51, "$f19"), - F20 = (52, "$f20"), - F21 = (53, "$f21"), - F22 = (54, "$f22"), - F23 = (55, "$f23"), - F24 = (56, "$f24"), - F25 = (57, "$f25"), - F26 = (58, "$f26"), - F27 = (59, "$f27"), - F28 = (60, "$f28"), - F29 = (61, "$f29"), - F30 = (62, "$f30"), - F31 = (63, "$f31"), - FCC0 = (64, "$fcc0"), - FCC1 = (65, "$fcc1"), - FCC2 = (66, "$fcc2"), - FCC3 = (67, "$fcc3"), - FCC4 = (68, "$fcc4"), - FCC5 = (69, "$fcc5"), - FCC6 = (70, "$fcc6"), - FCC7 = (71, "$fcc7"), -}, -aliases { - ZERO = (0, "$zero"), - RA = (1, "$ra"), - TP = (2, "$tp"), - SP = (3, "$sp"), - A0 = (4, "$a0"), - A1 = (5, "$a1"), - A2 = (6, "$a2"), - A3 = (7, "$a3"), - A4 = (8, "$a4"), - A5 = (9, "$a5"), - A6 = (10, "$a6"), - A7 = (11, "$a7"), - T0 = (12, "$t0"), - T1 = (13, "$t1"), - T2 = (14, "$t2"), - T3 = (15, "$t3"), - T4 = (16, "$t4"), - T5 = (17, "$t5"), - T6 = (18, "$t6"), - T7 = (19, "$t7"), - T8 = (20, "$t8"), - FP = (22, "$fp"), - S0 = (23, "$s0"), - S1 = (24, "$s1"), - S2 = (25, "$s2"), - S3 = (26, "$s3"), - S4 = (27, "$s4"), - S5 = (28, "$s5"), - S6 = (29, "$s6"), - S7 = (30, "$s7"), - S8 = (31, "$s8"), - - FA0 = (32, "$fa0"), - FA1 = (33, "$fa1"), - FA2 = (34, "$fa2"), - FA3 = (35, "$fa3"), - FA4 = (36, "$fa4"), - FA5 = (37, "$fa5"), - FA6 = (38, "$fa6"), - FA7 = (39, "$fa7"), - FT0 = (40, "$ft0"), - FT1 = (41, "$ft1"), - FT2 = (42, "$ft2"), - FT3 = (43, "$ft3"), - FT4 = (44, "$ft4"), - FT5 = (45, "$ft5"), - FT6 = (46, "$ft6"), - FT7 = (47, "$ft7"), - FT8 = (48, "$ft8"), - FT9 = (49, "$ft9"), - FT10 = (50, "$ft10"), - FT11 = (51, "$ft11"), - FT12 = (52, "$ft12"), - FT13 = (53, "$ft13"), - FT14 = (54, "$ft14"), - FT15 = (55, "$ft15"), - FS0 = (56, "$fs0"), - FS1 = (57, "$fs1"), - FS2 = (58, "$fs2"), - FS3 = (59, "$fs3"), - FS4 = (60, "$fs4"), - FS5 = (61, "$fs5"), - FS6 = (62, "$fs6"), - FS7 = (63, "$fs7"), -}); - -/// RISC-V architecture specific definitions. -/// -/// See [RISC-V ELF psABI specification](https://github.com/riscv/riscv-elf-psabi-doc). -#[derive(Debug, Clone, Copy)] -pub struct RiscV; - -registers!(RiscV, { - X0 = (0, "x0"), - X1 = (1, "x1"), - X2 = (2, "x2"), - X3 = (3, "x3"), - X4 = (4, "x4"), - X5 = (5, "x5"), - X6 = (6, "x6"), - X7 = (7, "x7"), - X8 = (8, "x8"), - X9 = (9, "x9"), - X10 = (10, "x10"), - X11 = (11, "x11"), - X12 = (12, "x12"), - X13 = (13, "x13"), - X14 = (14, "x14"), - X15 = (15, "x15"), - X16 = (16, "x16"), - X17 = (17, "x17"), - X18 = (18, "x18"), - X19 = (19, "x19"), - X20 = (20, "x20"), - X21 = (21, "x21"), - X22 = (22, "x22"), - X23 = (23, "x23"), - X24 = (24, "x24"), - X25 = (25, "x25"), - X26 = (26, "x26"), - X27 = (27, "x27"), - X28 = (28, "x28"), - X29 = (29, "x29"), - X30 = (30, "x30"), - X31 = (31, "x31"), - - F0 = (32, "f0"), - F1 = (33, "f1"), - F2 = (34, "f2"), - F3 = (35, "f3"), - F4 = (36, "f4"), - F5 = (37, "f5"), - F6 = (38, "f6"), - F7 = (39, "f7"), - F8 = (40, "f8"), - F9 = (41, "f9"), - F10 = (42, "f10"), - F11 = (43, "f11"), - F12 = (44, "f12"), - F13 = (45, "f13"), - F14 = (46, "f14"), - F15 = (47, "f15"), - F16 = (48, "f16"), - F17 = (49, "f17"), - F18 = (50, "f18"), - F19 = (51, "f19"), - F20 = (52, "f20"), - F21 = (53, "f21"), - F22 = (54, "f22"), - F23 = (55, "f23"), - F24 = (56, "f24"), - F25 = (57, "f25"), - F26 = (58, "f26"), - F27 = (59, "f27"), - F28 = (60, "f28"), - F29 = (61, "f29"), - F30 = (62, "f30"), - F31 = (63, "f31"), -}, -aliases { - ZERO = (0, "zero"), - RA = (1, "ra"), - SP = (2, "sp"), - GP = (3, "gp"), - TP = (4, "tp"), - T0 = (5, "t0"), - T1 = (6, "t1"), - T2 = (7, "t2"), - S0 = (8, "s0"), - S1 = (9, "s1"), - A0 = (10, "a0"), - A1 = (11, "a1"), - A2 = (12, "a2"), - A3 = (13, "a3"), - A4 = (14, "a4"), - A5 = (15, "a5"), - A6 = (16, "a6"), - A7 = (17, "a7"), - S2 = (18, "s2"), - S3 = (19, "s3"), - S4 = (20, "s4"), - S5 = (21, "s5"), - S6 = (22, "s6"), - S7 = (23, "s7"), - S8 = (24, "s8"), - S9 = (25, "s9"), - S10 = (26, "s10"), - S11 = (27, "s11"), - T3 = (28, "t3"), - T4 = (29, "t4"), - T5 = (30, "t5"), - T6 = (31, "t6"), - - FT0 = (32, "ft0"), - FT1 = (33, "ft1"), - FT2 = (34, "ft2"), - FT3 = (35, "ft3"), - FT4 = (36, "ft4"), - FT5 = (37, "ft5"), - FT6 = (38, "ft6"), - FT7 = (39, "ft7"), - FS0 = (40, "fs0"), - FS1 = (41, "fs1"), - FA0 = (42, "fa0"), - FA1 = (43, "fa1"), - FA2 = (44, "fa2"), - FA3 = (45, "fa3"), - FA4 = (46, "fa4"), - FA5 = (47, "fa5"), - FA6 = (48, "fa6"), - FA7 = (49, "fa7"), - FS2 = (50, "fs2"), - FS3 = (51, "fs3"), - FS4 = (52, "fs4"), - FS5 = (53, "fs5"), - FS6 = (54, "fs6"), - FS7 = (55, "fs7"), - FS8 = (56, "fs8"), - FS9 = (57, "fs9"), - FS10 = (58, "fs10"), - FS11 = (59, "fs11"), - FT8 = (60, "ft8"), - FT9 = (61, "ft9"), - FT10 = (62, "ft10"), - FT11 = (63, "ft11"), -}); - -/// Intel i386 architecture specific definitions. -/// -/// See Intel386 psABi version 1.1 at the [X86 psABI wiki](https://github.com/hjl-tools/x86-psABI/wiki/X86-psABI). -#[derive(Debug, Clone, Copy)] -pub struct X86; - -registers!(X86, { - EAX = (0, "eax"), - ECX = (1, "ecx"), - EDX = (2, "edx"), - EBX = (3, "ebx"), - ESP = (4, "esp"), - EBP = (5, "ebp"), - ESI = (6, "esi"), - EDI = (7, "edi"), - - // Return Address register. This is stored in `0(%esp, "")` and is not a physical register. - RA = (8, "RA"), - - ST0 = (11, "st0"), - ST1 = (12, "st1"), - ST2 = (13, "st2"), - ST3 = (14, "st3"), - ST4 = (15, "st4"), - ST5 = (16, "st5"), - ST6 = (17, "st6"), - ST7 = (18, "st7"), - - XMM0 = (21, "xmm0"), - XMM1 = (22, "xmm1"), - XMM2 = (23, "xmm2"), - XMM3 = (24, "xmm3"), - XMM4 = (25, "xmm4"), - XMM5 = (26, "xmm5"), - XMM6 = (27, "xmm6"), - XMM7 = (28, "xmm7"), - - MM0 = (29, "mm0"), - MM1 = (30, "mm1"), - MM2 = (31, "mm2"), - MM3 = (32, "mm3"), - MM4 = (33, "mm4"), - MM5 = (34, "mm5"), - MM6 = (35, "mm6"), - MM7 = (36, "mm7"), - - MXCSR = (39, "mxcsr"), - - ES = (40, "es"), - CS = (41, "cs"), - SS = (42, "ss"), - DS = (43, "ds"), - FS = (44, "fs"), - GS = (45, "gs"), - - TR = (48, "tr"), - LDTR = (49, "ldtr"), - - FS_BASE = (93, "fs.base"), - GS_BASE = (94, "gs.base"), -}); - -/// AMD64 architecture specific definitions. -/// -/// See x86-64 psABI version 1.0 at the [X86 psABI wiki](https://github.com/hjl-tools/x86-psABI/wiki/X86-psABI). -#[derive(Debug, Clone, Copy)] -pub struct X86_64; - -registers!(X86_64, { - RAX = (0, "rax"), - RDX = (1, "rdx"), - RCX = (2, "rcx"), - RBX = (3, "rbx"), - RSI = (4, "rsi"), - RDI = (5, "rdi"), - RBP = (6, "rbp"), - RSP = (7, "rsp"), - - R8 = (8, "r8"), - R9 = (9, "r9"), - R10 = (10, "r10"), - R11 = (11, "r11"), - R12 = (12, "r12"), - R13 = (13, "r13"), - R14 = (14, "r14"), - R15 = (15, "r15"), - - // Return Address register. This is stored in `0(%rsp, "")` and is not a physical register. - RA = (16, "RA"), - - XMM0 = (17, "xmm0"), - XMM1 = (18, "xmm1"), - XMM2 = (19, "xmm2"), - XMM3 = (20, "xmm3"), - XMM4 = (21, "xmm4"), - XMM5 = (22, "xmm5"), - XMM6 = (23, "xmm6"), - XMM7 = (24, "xmm7"), - - XMM8 = (25, "xmm8"), - XMM9 = (26, "xmm9"), - XMM10 = (27, "xmm10"), - XMM11 = (28, "xmm11"), - XMM12 = (29, "xmm12"), - XMM13 = (30, "xmm13"), - XMM14 = (31, "xmm14"), - XMM15 = (32, "xmm15"), - - ST0 = (33, "st0"), - ST1 = (34, "st1"), - ST2 = (35, "st2"), - ST3 = (36, "st3"), - ST4 = (37, "st4"), - ST5 = (38, "st5"), - ST6 = (39, "st6"), - ST7 = (40, "st7"), - - MM0 = (41, "mm0"), - MM1 = (42, "mm1"), - MM2 = (43, "mm2"), - MM3 = (44, "mm3"), - MM4 = (45, "mm4"), - MM5 = (46, "mm5"), - MM6 = (47, "mm6"), - MM7 = (48, "mm7"), - - RFLAGS = (49, "rFLAGS"), - ES = (50, "es"), - CS = (51, "cs"), - SS = (52, "ss"), - DS = (53, "ds"), - FS = (54, "fs"), - GS = (55, "gs"), - - FS_BASE = (58, "fs.base"), - GS_BASE = (59, "gs.base"), - - TR = (62, "tr"), - LDTR = (63, "ldtr"), - MXCSR = (64, "mxcsr"), - FCW = (65, "fcw"), - FSW = (66, "fsw"), - - XMM16 = (67, "xmm16"), - XMM17 = (68, "xmm17"), - XMM18 = (69, "xmm18"), - XMM19 = (70, "xmm19"), - XMM20 = (71, "xmm20"), - XMM21 = (72, "xmm21"), - XMM22 = (73, "xmm22"), - XMM23 = (74, "xmm23"), - XMM24 = (75, "xmm24"), - XMM25 = (76, "xmm25"), - XMM26 = (77, "xmm26"), - XMM27 = (78, "xmm27"), - XMM28 = (79, "xmm28"), - XMM29 = (80, "xmm29"), - XMM30 = (81, "xmm30"), - XMM31 = (82, "xmm31"), - - K0 = (118, "k0"), - K1 = (119, "k1"), - K2 = (120, "k2"), - K3 = (121, "k3"), - K4 = (122, "k4"), - K5 = (123, "k5"), - K6 = (124, "k6"), - K7 = (125, "k7"), -}); - -#[cfg(test)] -mod tests { - - #[test] - #[cfg(feature = "std")] - fn test_aarch64_registers() { - use super::*; - use std::collections::HashSet; - - let mut names = HashSet::new(); - for n in (0..=39).chain(46..=127) { - let name = AArch64::register_name(Register(n)) - .unwrap_or_else(|| panic!("Register {} should have a name.", n)); - assert!(names.insert(name)); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/common.rs s390-tools-2.33.1/rust-vendor/gimli/src/common.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/common.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/common.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,391 +0,0 @@ -/// Whether the format of a compilation unit is 32- or 64-bit. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub enum Format { - /// 64-bit DWARF - Dwarf64 = 8, - /// 32-bit DWARF - Dwarf32 = 4, -} - -impl Format { - /// Return the serialized size of an initial length field for the format. - #[inline] - pub fn initial_length_size(self) -> u8 { - match self { - Format::Dwarf32 => 4, - Format::Dwarf64 => 12, - } - } - - /// Return the natural word size for the format - #[inline] - pub fn word_size(self) -> u8 { - match self { - Format::Dwarf32 => 4, - Format::Dwarf64 => 8, - } - } -} - -/// Which vendor extensions to support. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -#[non_exhaustive] -pub enum Vendor { - /// A default set of extensions, including some common GNU extensions. - Default, - /// AAarch64 extensions. - AArch64, -} - -/// Encoding parameters that are commonly used for multiple DWARF sections. -/// -/// This is intended to be small enough to pass by value. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -// `address_size` and `format` are used more often than `version`, so keep -// them first. -#[repr(C)] -pub struct Encoding { - /// The size of an address. - pub address_size: u8, - - // The size of a segment selector. - // TODO: pub segment_size: u8, - /// Whether the DWARF format is 32- or 64-bit. - pub format: Format, - - /// The DWARF version of the header. - pub version: u16, -} - -/// Encoding parameters for a line number program. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct LineEncoding { - /// The size in bytes of the smallest target machine instruction. - pub minimum_instruction_length: u8, - - /// The maximum number of individual operations that may be encoded in an - /// instruction. - pub maximum_operations_per_instruction: u8, - - /// The initial value of the `is_stmt` register. - pub default_is_stmt: bool, - - /// The minimum value which a special opcode can add to the line register. - pub line_base: i8, - - /// The range of values which a special opcode can add to the line register. - pub line_range: u8, -} - -impl Default for LineEncoding { - fn default() -> Self { - // Values from LLVM. - LineEncoding { - minimum_instruction_length: 1, - maximum_operations_per_instruction: 1, - default_is_stmt: true, - line_base: -5, - line_range: 14, - } - } -} - -/// A DWARF register number. -/// -/// The meaning of this value is ABI dependent. This is generally encoded as -/// a ULEB128, but supported architectures need 16 bits at most. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct Register(pub u16); - -/// An offset into the `.debug_abbrev` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct DebugAbbrevOffset(pub T); - -/// An offset to a set of entries in the `.debug_addr` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugAddrBase(pub T); - -/// An index into a set of addresses in the `.debug_addr` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugAddrIndex(pub T); - -/// An offset into the `.debug_aranges` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugArangesOffset(pub T); - -/// An offset into the `.debug_info` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)] -pub struct DebugInfoOffset(pub T); - -/// An offset into the `.debug_line` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugLineOffset(pub T); - -/// An offset into the `.debug_line_str` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugLineStrOffset(pub T); - -/// An offset into either the `.debug_loc` section or the `.debug_loclists` section, -/// depending on the version of the unit the offset was contained in. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct LocationListsOffset(pub T); - -/// An offset to a set of location list offsets in the `.debug_loclists` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugLocListsBase(pub T); - -/// An index into a set of location list offsets in the `.debug_loclists` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugLocListsIndex(pub T); - -/// An offset into the `.debug_macinfo` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct DebugMacinfoOffset(pub T); - -/// An offset into the `.debug_macro` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct DebugMacroOffset(pub T); - -/// An offset into either the `.debug_ranges` section or the `.debug_rnglists` section, -/// depending on the version of the unit the offset was contained in. -/// -/// If this is from a DWARF 4 DWO file, then it must additionally be offset by the -/// value of `DW_AT_GNU_ranges_base`. You can use `Dwarf::ranges_offset_from_raw` to do this. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct RawRangeListsOffset(pub T); - -/// An offset into either the `.debug_ranges` section or the `.debug_rnglists` section, -/// depending on the version of the unit the offset was contained in. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct RangeListsOffset(pub T); - -/// An offset to a set of range list offsets in the `.debug_rnglists` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugRngListsBase(pub T); - -/// An index into a set of range list offsets in the `.debug_rnglists` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugRngListsIndex(pub T); - -/// An offset into the `.debug_str` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugStrOffset(pub T); - -/// An offset to a set of entries in the `.debug_str_offsets` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugStrOffsetsBase(pub T); - -/// An index into a set of entries in the `.debug_str_offsets` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DebugStrOffsetsIndex(pub T); - -/// An offset into the `.debug_types` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)] -pub struct DebugTypesOffset(pub T); - -/// A type signature as used in the `.debug_types` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct DebugTypeSignature(pub u64); - -/// An offset into the `.debug_frame` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct DebugFrameOffset(pub T); - -impl From for DebugFrameOffset { - #[inline] - fn from(o: T) -> Self { - DebugFrameOffset(o) - } -} - -/// An offset into the `.eh_frame` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct EhFrameOffset(pub T); - -impl From for EhFrameOffset { - #[inline] - fn from(o: T) -> Self { - EhFrameOffset(o) - } -} - -/// An offset into the `.debug_info` or `.debug_types` sections. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)] -pub enum UnitSectionOffset { - /// An offset into the `.debug_info` section. - DebugInfoOffset(DebugInfoOffset), - /// An offset into the `.debug_types` section. - DebugTypesOffset(DebugTypesOffset), -} - -impl From> for UnitSectionOffset { - fn from(offset: DebugInfoOffset) -> Self { - UnitSectionOffset::DebugInfoOffset(offset) - } -} - -impl From> for UnitSectionOffset { - fn from(offset: DebugTypesOffset) -> Self { - UnitSectionOffset::DebugTypesOffset(offset) - } -} - -impl UnitSectionOffset -where - T: Clone, -{ - /// Returns the `DebugInfoOffset` inside, or `None` otherwise. - pub fn as_debug_info_offset(&self) -> Option> { - match self { - UnitSectionOffset::DebugInfoOffset(offset) => Some(offset.clone()), - UnitSectionOffset::DebugTypesOffset(_) => None, - } - } - /// Returns the `DebugTypesOffset` inside, or `None` otherwise. - pub fn as_debug_types_offset(&self) -> Option> { - match self { - UnitSectionOffset::DebugInfoOffset(_) => None, - UnitSectionOffset::DebugTypesOffset(offset) => Some(offset.clone()), - } - } -} - -/// An identifier for a DWARF section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)] -pub enum SectionId { - /// The `.debug_abbrev` section. - DebugAbbrev, - /// The `.debug_addr` section. - DebugAddr, - /// The `.debug_aranges` section. - DebugAranges, - /// The `.debug_cu_index` section. - DebugCuIndex, - /// The `.debug_frame` section. - DebugFrame, - /// The `.eh_frame` section. - EhFrame, - /// The `.eh_frame_hdr` section. - EhFrameHdr, - /// The `.debug_info` section. - DebugInfo, - /// The `.debug_line` section. - DebugLine, - /// The `.debug_line_str` section. - DebugLineStr, - /// The `.debug_loc` section. - DebugLoc, - /// The `.debug_loclists` section. - DebugLocLists, - /// The `.debug_macinfo` section. - DebugMacinfo, - /// The `.debug_macro` section. - DebugMacro, - /// The `.debug_pubnames` section. - DebugPubNames, - /// The `.debug_pubtypes` section. - DebugPubTypes, - /// The `.debug_ranges` section. - DebugRanges, - /// The `.debug_rnglists` section. - DebugRngLists, - /// The `.debug_str` section. - DebugStr, - /// The `.debug_str_offsets` section. - DebugStrOffsets, - /// The `.debug_tu_index` section. - DebugTuIndex, - /// The `.debug_types` section. - DebugTypes, -} - -impl SectionId { - /// Returns the ELF section name for this kind. - pub fn name(self) -> &'static str { - match self { - SectionId::DebugAbbrev => ".debug_abbrev", - SectionId::DebugAddr => ".debug_addr", - SectionId::DebugAranges => ".debug_aranges", - SectionId::DebugCuIndex => ".debug_cu_index", - SectionId::DebugFrame => ".debug_frame", - SectionId::EhFrame => ".eh_frame", - SectionId::EhFrameHdr => ".eh_frame_hdr", - SectionId::DebugInfo => ".debug_info", - SectionId::DebugLine => ".debug_line", - SectionId::DebugLineStr => ".debug_line_str", - SectionId::DebugLoc => ".debug_loc", - SectionId::DebugLocLists => ".debug_loclists", - SectionId::DebugMacinfo => ".debug_macinfo", - SectionId::DebugMacro => ".debug_macro", - SectionId::DebugPubNames => ".debug_pubnames", - SectionId::DebugPubTypes => ".debug_pubtypes", - SectionId::DebugRanges => ".debug_ranges", - SectionId::DebugRngLists => ".debug_rnglists", - SectionId::DebugStr => ".debug_str", - SectionId::DebugStrOffsets => ".debug_str_offsets", - SectionId::DebugTuIndex => ".debug_tu_index", - SectionId::DebugTypes => ".debug_types", - } - } - - /// Returns the ELF section name for this kind, when found in a .dwo or .dwp file. - pub fn dwo_name(self) -> Option<&'static str> { - Some(match self { - SectionId::DebugAbbrev => ".debug_abbrev.dwo", - SectionId::DebugCuIndex => ".debug_cu_index", - SectionId::DebugInfo => ".debug_info.dwo", - SectionId::DebugLine => ".debug_line.dwo", - // The debug_loc section can be present in the dwo when using the - // GNU split-dwarf extension to DWARF4. - SectionId::DebugLoc => ".debug_loc.dwo", - SectionId::DebugLocLists => ".debug_loclists.dwo", - SectionId::DebugMacro => ".debug_macro.dwo", - SectionId::DebugRngLists => ".debug_rnglists.dwo", - SectionId::DebugStr => ".debug_str.dwo", - SectionId::DebugStrOffsets => ".debug_str_offsets.dwo", - SectionId::DebugTuIndex => ".debug_tu_index", - SectionId::DebugTypes => ".debug_types.dwo", - _ => return None, - }) - } - - /// Returns the XCOFF section name for this kind. - pub fn xcoff_name(self) -> Option<&'static str> { - Some(match self { - SectionId::DebugAbbrev => ".dwabrev", - SectionId::DebugAranges => ".dwarnge", - SectionId::DebugFrame => ".dwframe", - SectionId::DebugInfo => ".dwinfo", - SectionId::DebugLine => ".dwline", - SectionId::DebugLoc => ".dwloc", - SectionId::DebugMacinfo => ".dwmac", - SectionId::DebugPubNames => ".dwpbnms", - SectionId::DebugPubTypes => ".dwpbtyp", - SectionId::DebugRanges => ".dwrnges", - SectionId::DebugStr => ".dwstr", - _ => return None, - }) - } -} - -/// An optionally-provided implementation-defined compilation unit ID to enable -/// split DWARF and linking a split compilation unit back together. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct DwoId(pub u64); - -/// The "type" of file with DWARF debugging information. This determines, among other things, -/// which files DWARF sections should be loaded from. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum DwarfFileType { - /// A normal executable or object file. - Main, - /// A .dwo split DWARF file. - Dwo, - // TODO: Supplementary files, .dwps? -} - -impl Default for DwarfFileType { - fn default() -> Self { - DwarfFileType::Main - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/constants.rs s390-tools-2.33.1/rust-vendor/gimli/src/constants.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/constants.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/constants.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1435 +0,0 @@ -// This file originally from https://github.com/philipc/rust-dwarf/ and -// distributed under either MIT or Apache 2.0 licenses. -// -// Copyright 2016 The rust-dwarf Developers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Constant definitions. -//! -//! The DWARF spec's `DW_AT_*` type is represented as `struct DwAt(u16)`, -//! `DW_FORM_*` as `DwForm(u16)`, etc. -//! -//! There are also exported const definitions for each constant. - -#![allow(non_upper_case_globals)] -#![allow(missing_docs)] - -use core::fmt; - -// The `dw!` macro turns this: -// -// dw!(DwFoo(u32) { -// DW_FOO_bar = 0, -// DW_FOO_baz = 1, -// DW_FOO_bang = 2, -// }); -// -// into this: -// -// #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] -// pub struct DwFoo(pub u32); -// -// pub const DW_FOO_bar: DwFoo = DwFoo(0); -// pub const DW_FOO_baz: DwFoo = DwFoo(1); -// pub const DW_FOO_bang: DwFoo = DwFoo(2); -// -// impl DwFoo { -// pub fn static_string(&self) -> Option<&'static str> { -// ... -// } -// } -// -// impl fmt::Display for DwFoo { -// fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { -// ... -// } -// } -macro_rules! dw { - ($(#[$meta:meta])* $struct_name:ident($struct_type:ty) - { $($name:ident = $val:expr),+ $(,)? } - $(, aliases { $($alias_name:ident = $alias_val:expr),+ $(,)? })? - ) => { - $(#[$meta])* - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] - pub struct $struct_name(pub $struct_type); - - $( - pub const $name: $struct_name = $struct_name($val); - )+ - $($( - pub const $alias_name: $struct_name = $struct_name($alias_val); - )+)* - - impl $struct_name { - pub fn static_string(&self) -> Option<&'static str> { - Some(match *self { - $( - $name => stringify!($name), - )+ - _ => return None, - }) - } - } - - impl fmt::Display for $struct_name { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - if let Some(s) = self.static_string() { - f.pad(s) - } else { - #[cfg(feature = "read")] - { - f.pad(&format!("Unknown {}: {}", stringify!($struct_name), self.0)) - } - #[cfg(not(feature = "read"))] - { - write!(f, "Unknown {}: {}", stringify!($struct_name), self.0) - } - } - } - } - }; -} - -dw!( -/// The section type field in a `.dwp` unit index. -/// -/// This is used for version 5 and later. -/// -/// See Section 7.3.5. -DwSect(u32) { - DW_SECT_INFO = 1, - DW_SECT_ABBREV = 3, - DW_SECT_LINE = 4, - DW_SECT_LOCLISTS = 5, - DW_SECT_STR_OFFSETS = 6, - DW_SECT_MACRO = 7, - DW_SECT_RNGLISTS = 8, -}); - -dw!( -/// The section type field in a `.dwp` unit index with version 2. -DwSectV2(u32) { - DW_SECT_V2_INFO = 1, - DW_SECT_V2_TYPES = 2, - DW_SECT_V2_ABBREV = 3, - DW_SECT_V2_LINE = 4, - DW_SECT_V2_LOC = 5, - DW_SECT_V2_STR_OFFSETS = 6, - DW_SECT_V2_MACINFO = 7, - DW_SECT_V2_MACRO = 8, -}); - -dw!( -/// The unit type field in a unit header. -/// -/// See Section 7.5.1, Table 7.2. -DwUt(u8) { - DW_UT_compile = 0x01, - DW_UT_type = 0x02, - DW_UT_partial = 0x03, - DW_UT_skeleton = 0x04, - DW_UT_split_compile = 0x05, - DW_UT_split_type = 0x06, - DW_UT_lo_user = 0x80, - DW_UT_hi_user = 0xff, -}); - -dw!( -/// The opcode for a call frame instruction. -/// -/// Section 7.24: -/// > Call frame instructions are encoded in one or more bytes. The primary -/// > opcode is encoded in the high order two bits of the first byte (that is, -/// > opcode = byte >> 6). An operand or extended opcode may be encoded in the -/// > low order 6 bits. Additional operands are encoded in subsequent bytes. -DwCfa(u8) { - DW_CFA_advance_loc = 0x01 << 6, - DW_CFA_offset = 0x02 << 6, - DW_CFA_restore = 0x03 << 6, - DW_CFA_nop = 0, - DW_CFA_set_loc = 0x01, - DW_CFA_advance_loc1 = 0x02, - DW_CFA_advance_loc2 = 0x03, - DW_CFA_advance_loc4 = 0x04, - DW_CFA_offset_extended = 0x05, - DW_CFA_restore_extended = 0x06, - DW_CFA_undefined = 0x07, - DW_CFA_same_value = 0x08, - DW_CFA_register = 0x09, - DW_CFA_remember_state = 0x0a, - DW_CFA_restore_state = 0x0b, - DW_CFA_def_cfa = 0x0c, - DW_CFA_def_cfa_register = 0x0d, - DW_CFA_def_cfa_offset = 0x0e, - DW_CFA_def_cfa_expression = 0x0f, - DW_CFA_expression = 0x10, - DW_CFA_offset_extended_sf = 0x11, - DW_CFA_def_cfa_sf = 0x12, - DW_CFA_def_cfa_offset_sf = 0x13, - DW_CFA_val_offset = 0x14, - DW_CFA_val_offset_sf = 0x15, - DW_CFA_val_expression = 0x16, - - DW_CFA_lo_user = 0x1c, - DW_CFA_hi_user = 0x3f, - - DW_CFA_MIPS_advance_loc8 = 0x1d, - DW_CFA_GNU_window_save = 0x2d, - DW_CFA_GNU_args_size = 0x2e, - DW_CFA_GNU_negative_offset_extended = 0x2f, -}, -aliases { - DW_CFA_AARCH64_negate_ra_state = 0x2d, -}); - -dw!( -/// The child determination encodings for DIE attributes. -/// -/// See Section 7.5.3, Table 7.4. -DwChildren(u8) { - DW_CHILDREN_no = 0, - DW_CHILDREN_yes = 1, -}); - -dw!( -/// The tag encodings for DIE attributes. -/// -/// See Section 7.5.3, Table 7.3. -DwTag(u16) { - DW_TAG_null = 0x00, - - DW_TAG_array_type = 0x01, - DW_TAG_class_type = 0x02, - DW_TAG_entry_point = 0x03, - DW_TAG_enumeration_type = 0x04, - DW_TAG_formal_parameter = 0x05, - DW_TAG_imported_declaration = 0x08, - DW_TAG_label = 0x0a, - DW_TAG_lexical_block = 0x0b, - DW_TAG_member = 0x0d, - DW_TAG_pointer_type = 0x0f, - DW_TAG_reference_type = 0x10, - DW_TAG_compile_unit = 0x11, - DW_TAG_string_type = 0x12, - DW_TAG_structure_type = 0x13, - DW_TAG_subroutine_type = 0x15, - DW_TAG_typedef = 0x16, - DW_TAG_union_type = 0x17, - DW_TAG_unspecified_parameters = 0x18, - DW_TAG_variant = 0x19, - DW_TAG_common_block = 0x1a, - DW_TAG_common_inclusion = 0x1b, - DW_TAG_inheritance = 0x1c, - DW_TAG_inlined_subroutine = 0x1d, - DW_TAG_module = 0x1e, - DW_TAG_ptr_to_member_type = 0x1f, - DW_TAG_set_type = 0x20, - DW_TAG_subrange_type = 0x21, - DW_TAG_with_stmt = 0x22, - DW_TAG_access_declaration = 0x23, - DW_TAG_base_type = 0x24, - DW_TAG_catch_block = 0x25, - DW_TAG_const_type = 0x26, - DW_TAG_constant = 0x27, - DW_TAG_enumerator = 0x28, - DW_TAG_file_type = 0x29, - DW_TAG_friend = 0x2a, - DW_TAG_namelist = 0x2b, - DW_TAG_namelist_item = 0x2c, - DW_TAG_packed_type = 0x2d, - DW_TAG_subprogram = 0x2e, - DW_TAG_template_type_parameter = 0x2f, - DW_TAG_template_value_parameter = 0x30, - DW_TAG_thrown_type = 0x31, - DW_TAG_try_block = 0x32, - DW_TAG_variant_part = 0x33, - DW_TAG_variable = 0x34, - DW_TAG_volatile_type = 0x35, - -// DWARF 3. - DW_TAG_dwarf_procedure = 0x36, - DW_TAG_restrict_type = 0x37, - DW_TAG_interface_type = 0x38, - DW_TAG_namespace = 0x39, - DW_TAG_imported_module = 0x3a, - DW_TAG_unspecified_type = 0x3b, - DW_TAG_partial_unit = 0x3c, - DW_TAG_imported_unit = 0x3d, - DW_TAG_condition = 0x3f, - DW_TAG_shared_type = 0x40, - -// DWARF 4. - DW_TAG_type_unit = 0x41, - DW_TAG_rvalue_reference_type = 0x42, - DW_TAG_template_alias = 0x43, - -// DWARF 5. - DW_TAG_coarray_type = 0x44, - DW_TAG_generic_subrange = 0x45, - DW_TAG_dynamic_type = 0x46, - DW_TAG_atomic_type = 0x47, - DW_TAG_call_site = 0x48, - DW_TAG_call_site_parameter = 0x49, - DW_TAG_skeleton_unit = 0x4a, - DW_TAG_immutable_type = 0x4b, - - DW_TAG_lo_user = 0x4080, - DW_TAG_hi_user = 0xffff, - -// SGI/MIPS extensions. - DW_TAG_MIPS_loop = 0x4081, - -// HP extensions. - DW_TAG_HP_array_descriptor = 0x4090, - DW_TAG_HP_Bliss_field = 0x4091, - DW_TAG_HP_Bliss_field_set = 0x4092, - -// GNU extensions. - DW_TAG_format_label = 0x4101, - DW_TAG_function_template = 0x4102, - DW_TAG_class_template = 0x4103, - DW_TAG_GNU_BINCL = 0x4104, - DW_TAG_GNU_EINCL = 0x4105, - DW_TAG_GNU_template_template_param = 0x4106, - DW_TAG_GNU_template_parameter_pack = 0x4107, - DW_TAG_GNU_formal_parameter_pack = 0x4108, - DW_TAG_GNU_call_site = 0x4109, - DW_TAG_GNU_call_site_parameter = 0x410a, - - DW_TAG_APPLE_property = 0x4200, - -// SUN extensions. - DW_TAG_SUN_function_template = 0x4201, - DW_TAG_SUN_class_template = 0x4202, - DW_TAG_SUN_struct_template = 0x4203, - DW_TAG_SUN_union_template = 0x4204, - DW_TAG_SUN_indirect_inheritance = 0x4205, - DW_TAG_SUN_codeflags = 0x4206, - DW_TAG_SUN_memop_info = 0x4207, - DW_TAG_SUN_omp_child_func = 0x4208, - DW_TAG_SUN_rtti_descriptor = 0x4209, - DW_TAG_SUN_dtor_info = 0x420a, - DW_TAG_SUN_dtor = 0x420b, - DW_TAG_SUN_f90_interface = 0x420c, - DW_TAG_SUN_fortran_vax_structure = 0x420d, - -// ALTIUM extensions. - DW_TAG_ALTIUM_circ_type = 0x5101, - DW_TAG_ALTIUM_mwa_circ_type = 0x5102, - DW_TAG_ALTIUM_rev_carry_type = 0x5103, - DW_TAG_ALTIUM_rom = 0x5111, - -// Extensions for UPC. - DW_TAG_upc_shared_type = 0x8765, - DW_TAG_upc_strict_type = 0x8766, - DW_TAG_upc_relaxed_type = 0x8767, - -// PGI (STMicroelectronics) extensions. - DW_TAG_PGI_kanji_type = 0xa000, - DW_TAG_PGI_interface_block = 0xa020, - -// Borland extensions. - DW_TAG_BORLAND_property = 0xb000, - DW_TAG_BORLAND_Delphi_string = 0xb001, - DW_TAG_BORLAND_Delphi_dynamic_array = 0xb002, - DW_TAG_BORLAND_Delphi_set = 0xb003, - DW_TAG_BORLAND_Delphi_variant = 0xb004, -}); - -dw!( -/// The attribute encodings for DIE attributes. -/// -/// See Section 7.5.4, Table 7.5. -DwAt(u16) { - DW_AT_null = 0x00, - - DW_AT_sibling = 0x01, - DW_AT_location = 0x02, - DW_AT_name = 0x03, - DW_AT_ordering = 0x09, - DW_AT_byte_size = 0x0b, - DW_AT_bit_offset = 0x0c, - DW_AT_bit_size = 0x0d, - DW_AT_stmt_list = 0x10, - DW_AT_low_pc = 0x11, - DW_AT_high_pc = 0x12, - DW_AT_language = 0x13, - DW_AT_discr = 0x15, - DW_AT_discr_value = 0x16, - DW_AT_visibility = 0x17, - DW_AT_import = 0x18, - DW_AT_string_length = 0x19, - DW_AT_common_reference = 0x1a, - DW_AT_comp_dir = 0x1b, - DW_AT_const_value = 0x1c, - DW_AT_containing_type = 0x1d, - DW_AT_default_value = 0x1e, - DW_AT_inline = 0x20, - DW_AT_is_optional = 0x21, - DW_AT_lower_bound = 0x22, - DW_AT_producer = 0x25, - DW_AT_prototyped = 0x27, - DW_AT_return_addr = 0x2a, - DW_AT_start_scope = 0x2c, - DW_AT_bit_stride = 0x2e, - DW_AT_upper_bound = 0x2f, - DW_AT_abstract_origin = 0x31, - DW_AT_accessibility = 0x32, - DW_AT_address_class = 0x33, - DW_AT_artificial = 0x34, - DW_AT_base_types = 0x35, - DW_AT_calling_convention = 0x36, - DW_AT_count = 0x37, - DW_AT_data_member_location = 0x38, - DW_AT_decl_column = 0x39, - DW_AT_decl_file = 0x3a, - DW_AT_decl_line = 0x3b, - DW_AT_declaration = 0x3c, - DW_AT_discr_list = 0x3d, - DW_AT_encoding = 0x3e, - DW_AT_external = 0x3f, - DW_AT_frame_base = 0x40, - DW_AT_friend = 0x41, - DW_AT_identifier_case = 0x42, - DW_AT_macro_info = 0x43, - DW_AT_namelist_item = 0x44, - DW_AT_priority = 0x45, - DW_AT_segment = 0x46, - DW_AT_specification = 0x47, - DW_AT_static_link = 0x48, - DW_AT_type = 0x49, - DW_AT_use_location = 0x4a, - DW_AT_variable_parameter = 0x4b, - DW_AT_virtuality = 0x4c, - DW_AT_vtable_elem_location = 0x4d, - -// DWARF 3. - DW_AT_allocated = 0x4e, - DW_AT_associated = 0x4f, - DW_AT_data_location = 0x50, - DW_AT_byte_stride = 0x51, - DW_AT_entry_pc = 0x52, - DW_AT_use_UTF8 = 0x53, - DW_AT_extension = 0x54, - DW_AT_ranges = 0x55, - DW_AT_trampoline = 0x56, - DW_AT_call_column = 0x57, - DW_AT_call_file = 0x58, - DW_AT_call_line = 0x59, - DW_AT_description = 0x5a, - DW_AT_binary_scale = 0x5b, - DW_AT_decimal_scale = 0x5c, - DW_AT_small = 0x5d, - DW_AT_decimal_sign = 0x5e, - DW_AT_digit_count = 0x5f, - DW_AT_picture_string = 0x60, - DW_AT_mutable = 0x61, - DW_AT_threads_scaled = 0x62, - DW_AT_explicit = 0x63, - DW_AT_object_pointer = 0x64, - DW_AT_endianity = 0x65, - DW_AT_elemental = 0x66, - DW_AT_pure = 0x67, - DW_AT_recursive = 0x68, - -// DWARF 4. - DW_AT_signature = 0x69, - DW_AT_main_subprogram = 0x6a, - DW_AT_data_bit_offset = 0x6b, - DW_AT_const_expr = 0x6c, - DW_AT_enum_class = 0x6d, - DW_AT_linkage_name = 0x6e, - -// DWARF 5. - DW_AT_string_length_bit_size = 0x6f, - DW_AT_string_length_byte_size = 0x70, - DW_AT_rank = 0x71, - DW_AT_str_offsets_base = 0x72, - DW_AT_addr_base = 0x73, - DW_AT_rnglists_base = 0x74, - DW_AT_dwo_name = 0x76, - DW_AT_reference = 0x77, - DW_AT_rvalue_reference = 0x78, - DW_AT_macros = 0x79, - DW_AT_call_all_calls = 0x7a, - DW_AT_call_all_source_calls = 0x7b, - DW_AT_call_all_tail_calls = 0x7c, - DW_AT_call_return_pc = 0x7d, - DW_AT_call_value = 0x7e, - DW_AT_call_origin = 0x7f, - DW_AT_call_parameter = 0x80, - DW_AT_call_pc = 0x81, - DW_AT_call_tail_call = 0x82, - DW_AT_call_target = 0x83, - DW_AT_call_target_clobbered = 0x84, - DW_AT_call_data_location = 0x85, - DW_AT_call_data_value = 0x86, - DW_AT_noreturn = 0x87, - DW_AT_alignment = 0x88, - DW_AT_export_symbols = 0x89, - DW_AT_deleted = 0x8a, - DW_AT_defaulted = 0x8b, - DW_AT_loclists_base = 0x8c, - - DW_AT_lo_user = 0x2000, - DW_AT_hi_user = 0x3fff, - -// SGI/MIPS extensions. - DW_AT_MIPS_fde = 0x2001, - DW_AT_MIPS_loop_begin = 0x2002, - DW_AT_MIPS_tail_loop_begin = 0x2003, - DW_AT_MIPS_epilog_begin = 0x2004, - DW_AT_MIPS_loop_unroll_factor = 0x2005, - DW_AT_MIPS_software_pipeline_depth = 0x2006, - DW_AT_MIPS_linkage_name = 0x2007, - DW_AT_MIPS_stride = 0x2008, - DW_AT_MIPS_abstract_name = 0x2009, - DW_AT_MIPS_clone_origin = 0x200a, - DW_AT_MIPS_has_inlines = 0x200b, - DW_AT_MIPS_stride_byte = 0x200c, - DW_AT_MIPS_stride_elem = 0x200d, - DW_AT_MIPS_ptr_dopetype = 0x200e, - DW_AT_MIPS_allocatable_dopetype = 0x200f, - DW_AT_MIPS_assumed_shape_dopetype = 0x2010, - -// This one appears to have only been implemented by Open64 for -// fortran and may conflict with other extensions. - DW_AT_MIPS_assumed_size = 0x2011, - -// TODO: HP/CPQ extensions. -// These conflict with the MIPS extensions. - - DW_AT_INTEL_other_endian = 0x2026, - -// GNU extensions - DW_AT_sf_names = 0x2101, - DW_AT_src_info = 0x2102, - DW_AT_mac_info = 0x2103, - DW_AT_src_coords = 0x2104, - DW_AT_body_begin = 0x2105, - DW_AT_body_end = 0x2106, - DW_AT_GNU_vector = 0x2107, - DW_AT_GNU_guarded_by = 0x2108, - DW_AT_GNU_pt_guarded_by = 0x2109, - DW_AT_GNU_guarded = 0x210a, - DW_AT_GNU_pt_guarded = 0x210b, - DW_AT_GNU_locks_excluded = 0x210c, - DW_AT_GNU_exclusive_locks_required = 0x210d, - DW_AT_GNU_shared_locks_required = 0x210e, - DW_AT_GNU_odr_signature = 0x210f, - DW_AT_GNU_template_name = 0x2110, - DW_AT_GNU_call_site_value = 0x2111, - DW_AT_GNU_call_site_data_value = 0x2112, - DW_AT_GNU_call_site_target = 0x2113, - DW_AT_GNU_call_site_target_clobbered = 0x2114, - DW_AT_GNU_tail_call = 0x2115, - DW_AT_GNU_all_tail_call_sites = 0x2116, - DW_AT_GNU_all_call_sites = 0x2117, - DW_AT_GNU_all_source_call_sites = 0x2118, - DW_AT_GNU_macros = 0x2119, - DW_AT_GNU_deleted = 0x211a, - -// Extensions for Fission proposal. - DW_AT_GNU_dwo_name = 0x2130, - DW_AT_GNU_dwo_id = 0x2131, - DW_AT_GNU_ranges_base = 0x2132, - DW_AT_GNU_addr_base = 0x2133, - DW_AT_GNU_pubnames = 0x2134, - DW_AT_GNU_pubtypes = 0x2135, - DW_AT_GNU_discriminator = 0x2136, - DW_AT_GNU_locviews = 0x2137, - DW_AT_GNU_entry_view = 0x2138, - -// Conflict with Sun. -// DW_AT_VMS_rtnbeg_pd_address = 0x2201, - -// Sun extensions. - DW_AT_SUN_template = 0x2201, - DW_AT_SUN_alignment = 0x2202, - DW_AT_SUN_vtable = 0x2203, - DW_AT_SUN_count_guarantee = 0x2204, - DW_AT_SUN_command_line = 0x2205, - DW_AT_SUN_vbase = 0x2206, - DW_AT_SUN_compile_options = 0x2207, - DW_AT_SUN_language = 0x2208, - DW_AT_SUN_browser_file = 0x2209, - DW_AT_SUN_vtable_abi = 0x2210, - DW_AT_SUN_func_offsets = 0x2211, - DW_AT_SUN_cf_kind = 0x2212, - DW_AT_SUN_vtable_index = 0x2213, - DW_AT_SUN_omp_tpriv_addr = 0x2214, - DW_AT_SUN_omp_child_func = 0x2215, - DW_AT_SUN_func_offset = 0x2216, - DW_AT_SUN_memop_type_ref = 0x2217, - DW_AT_SUN_profile_id = 0x2218, - DW_AT_SUN_memop_signature = 0x2219, - DW_AT_SUN_obj_dir = 0x2220, - DW_AT_SUN_obj_file = 0x2221, - DW_AT_SUN_original_name = 0x2222, - DW_AT_SUN_hwcprof_signature = 0x2223, - DW_AT_SUN_amd64_parmdump = 0x2224, - DW_AT_SUN_part_link_name = 0x2225, - DW_AT_SUN_link_name = 0x2226, - DW_AT_SUN_pass_with_const = 0x2227, - DW_AT_SUN_return_with_const = 0x2228, - DW_AT_SUN_import_by_name = 0x2229, - DW_AT_SUN_f90_pointer = 0x222a, - DW_AT_SUN_pass_by_ref = 0x222b, - DW_AT_SUN_f90_allocatable = 0x222c, - DW_AT_SUN_f90_assumed_shape_array = 0x222d, - DW_AT_SUN_c_vla = 0x222e, - DW_AT_SUN_return_value_ptr = 0x2230, - DW_AT_SUN_dtor_start = 0x2231, - DW_AT_SUN_dtor_length = 0x2232, - DW_AT_SUN_dtor_state_initial = 0x2233, - DW_AT_SUN_dtor_state_final = 0x2234, - DW_AT_SUN_dtor_state_deltas = 0x2235, - DW_AT_SUN_import_by_lname = 0x2236, - DW_AT_SUN_f90_use_only = 0x2237, - DW_AT_SUN_namelist_spec = 0x2238, - DW_AT_SUN_is_omp_child_func = 0x2239, - DW_AT_SUN_fortran_main_alias = 0x223a, - DW_AT_SUN_fortran_based = 0x223b, - - DW_AT_ALTIUM_loclist = 0x2300, - - DW_AT_use_GNAT_descriptive_type = 0x2301, - DW_AT_GNAT_descriptive_type = 0x2302, - DW_AT_GNU_numerator = 0x2303, - DW_AT_GNU_denominator = 0x2304, - DW_AT_GNU_bias = 0x2305, - - DW_AT_upc_threads_scaled = 0x3210, - -// PGI (STMicroelectronics) extensions. - DW_AT_PGI_lbase = 0x3a00, - DW_AT_PGI_soffset = 0x3a01, - DW_AT_PGI_lstride = 0x3a02, - -// Borland extensions. - DW_AT_BORLAND_property_read = 0x3b11, - DW_AT_BORLAND_property_write = 0x3b12, - DW_AT_BORLAND_property_implements = 0x3b13, - DW_AT_BORLAND_property_index = 0x3b14, - DW_AT_BORLAND_property_default = 0x3b15, - DW_AT_BORLAND_Delphi_unit = 0x3b20, - DW_AT_BORLAND_Delphi_class = 0x3b21, - DW_AT_BORLAND_Delphi_record = 0x3b22, - DW_AT_BORLAND_Delphi_metaclass = 0x3b23, - DW_AT_BORLAND_Delphi_constructor = 0x3b24, - DW_AT_BORLAND_Delphi_destructor = 0x3b25, - DW_AT_BORLAND_Delphi_anonymous_method = 0x3b26, - DW_AT_BORLAND_Delphi_interface = 0x3b27, - DW_AT_BORLAND_Delphi_ABI = 0x3b28, - DW_AT_BORLAND_Delphi_return = 0x3b29, - DW_AT_BORLAND_Delphi_frameptr = 0x3b30, - DW_AT_BORLAND_closure = 0x3b31, - -// LLVM project extensions. - DW_AT_LLVM_include_path = 0x3e00, - DW_AT_LLVM_config_macros = 0x3e01, - DW_AT_LLVM_isysroot = 0x3e02, - -// Apple extensions. - DW_AT_APPLE_optimized = 0x3fe1, - DW_AT_APPLE_flags = 0x3fe2, - DW_AT_APPLE_isa = 0x3fe3, - DW_AT_APPLE_block = 0x3fe4, - DW_AT_APPLE_major_runtime_vers = 0x3fe5, - DW_AT_APPLE_runtime_class = 0x3fe6, - DW_AT_APPLE_omit_frame_ptr = 0x3fe7, - DW_AT_APPLE_property_name = 0x3fe8, - DW_AT_APPLE_property_getter = 0x3fe9, - DW_AT_APPLE_property_setter = 0x3fea, - DW_AT_APPLE_property_attribute = 0x3feb, - DW_AT_APPLE_objc_complete_type = 0x3fec, - DW_AT_APPLE_property = 0x3fed -}); - -dw!( -/// The attribute form encodings for DIE attributes. -/// -/// See Section 7.5.6, Table 7.6. -DwForm(u16) { - DW_FORM_null = 0x00, - - DW_FORM_addr = 0x01, - DW_FORM_block2 = 0x03, - DW_FORM_block4 = 0x04, - DW_FORM_data2 = 0x05, - DW_FORM_data4 = 0x06, - DW_FORM_data8 = 0x07, - DW_FORM_string = 0x08, - DW_FORM_block = 0x09, - DW_FORM_block1 = 0x0a, - DW_FORM_data1 = 0x0b, - DW_FORM_flag = 0x0c, - DW_FORM_sdata = 0x0d, - DW_FORM_strp = 0x0e, - DW_FORM_udata = 0x0f, - DW_FORM_ref_addr = 0x10, - DW_FORM_ref1 = 0x11, - DW_FORM_ref2 = 0x12, - DW_FORM_ref4 = 0x13, - DW_FORM_ref8 = 0x14, - DW_FORM_ref_udata = 0x15, - DW_FORM_indirect = 0x16, - -// DWARF 4. - DW_FORM_sec_offset = 0x17, - DW_FORM_exprloc = 0x18, - DW_FORM_flag_present = 0x19, - DW_FORM_ref_sig8 = 0x20, - -// DWARF 5. - DW_FORM_strx = 0x1a, - DW_FORM_addrx = 0x1b, - DW_FORM_ref_sup4 = 0x1c, - DW_FORM_strp_sup = 0x1d, - DW_FORM_data16 = 0x1e, - DW_FORM_line_strp = 0x1f, - DW_FORM_implicit_const = 0x21, - DW_FORM_loclistx = 0x22, - DW_FORM_rnglistx = 0x23, - DW_FORM_ref_sup8 = 0x24, - DW_FORM_strx1 = 0x25, - DW_FORM_strx2 = 0x26, - DW_FORM_strx3 = 0x27, - DW_FORM_strx4 = 0x28, - DW_FORM_addrx1 = 0x29, - DW_FORM_addrx2 = 0x2a, - DW_FORM_addrx3 = 0x2b, - DW_FORM_addrx4 = 0x2c, - -// Extensions for Fission proposal - DW_FORM_GNU_addr_index = 0x1f01, - DW_FORM_GNU_str_index = 0x1f02, - -// Alternate debug sections proposal (output of "dwz" tool). - DW_FORM_GNU_ref_alt = 0x1f20, - DW_FORM_GNU_strp_alt = 0x1f21 -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_encoding` attribute. -/// -/// See Section 7.8, Table 7.11. -DwAte(u8) { - DW_ATE_address = 0x01, - DW_ATE_boolean = 0x02, - DW_ATE_complex_float = 0x03, - DW_ATE_float = 0x04, - DW_ATE_signed = 0x05, - DW_ATE_signed_char = 0x06, - DW_ATE_unsigned = 0x07, - DW_ATE_unsigned_char = 0x08, - -// DWARF 3. - DW_ATE_imaginary_float = 0x09, - DW_ATE_packed_decimal = 0x0a, - DW_ATE_numeric_string = 0x0b, - DW_ATE_edited = 0x0c, - DW_ATE_signed_fixed = 0x0d, - DW_ATE_unsigned_fixed = 0x0e, - DW_ATE_decimal_float = 0x0f , - -// DWARF 4. - DW_ATE_UTF = 0x10, - DW_ATE_UCS = 0x11, - DW_ATE_ASCII = 0x12, - - DW_ATE_lo_user = 0x80, - DW_ATE_hi_user = 0xff, -}); - -dw!( -/// The encodings of the constants used in location list entries. -/// -/// See Section 7.7.3, Table 7.10. -DwLle(u8) { - DW_LLE_end_of_list = 0x00, - DW_LLE_base_addressx = 0x01, - DW_LLE_startx_endx = 0x02, - DW_LLE_startx_length = 0x03, - DW_LLE_offset_pair = 0x04, - DW_LLE_default_location = 0x05, - DW_LLE_base_address = 0x06, - DW_LLE_start_end = 0x07, - DW_LLE_start_length = 0x08, - DW_LLE_GNU_view_pair = 0x09, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_decimal_sign` attribute. -/// -/// See Section 7.8, Table 7.12. -DwDs(u8) { - DW_DS_unsigned = 0x01, - DW_DS_leading_overpunch = 0x02, - DW_DS_trailing_overpunch = 0x03, - DW_DS_leading_separate = 0x04, - DW_DS_trailing_separate = 0x05, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_endianity` attribute. -/// -/// See Section 7.8, Table 7.13. -DwEnd(u8) { - DW_END_default = 0x00, - DW_END_big = 0x01, - DW_END_little = 0x02, - DW_END_lo_user = 0x40, - DW_END_hi_user = 0xff, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_accessibility` attribute. -/// -/// See Section 7.9, Table 7.14. -DwAccess(u8) { - DW_ACCESS_public = 0x01, - DW_ACCESS_protected = 0x02, - DW_ACCESS_private = 0x03, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_visibility` attribute. -/// -/// See Section 7.10, Table 7.15. -DwVis(u8) { - DW_VIS_local = 0x01, - DW_VIS_exported = 0x02, - DW_VIS_qualified = 0x03, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_virtuality` attribute. -/// -/// See Section 7.11, Table 7.16. -DwVirtuality(u8) { - DW_VIRTUALITY_none = 0x00, - DW_VIRTUALITY_virtual = 0x01, - DW_VIRTUALITY_pure_virtual = 0x02, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_language` attribute. -/// -/// See Section 7.12, Table 7.17. -DwLang(u16) { - DW_LANG_C89 = 0x0001, - DW_LANG_C = 0x0002, - DW_LANG_Ada83 = 0x0003, - DW_LANG_C_plus_plus = 0x0004, - DW_LANG_Cobol74 = 0x0005, - DW_LANG_Cobol85 = 0x0006, - DW_LANG_Fortran77 = 0x0007, - DW_LANG_Fortran90 = 0x0008, - DW_LANG_Pascal83 = 0x0009, - DW_LANG_Modula2 = 0x000a, - DW_LANG_Java = 0x000b, - DW_LANG_C99 = 0x000c, - DW_LANG_Ada95 = 0x000d, - DW_LANG_Fortran95 = 0x000e, - DW_LANG_PLI = 0x000f, - DW_LANG_ObjC = 0x0010, - DW_LANG_ObjC_plus_plus = 0x0011, - DW_LANG_UPC = 0x0012, - DW_LANG_D = 0x0013, - DW_LANG_Python = 0x0014, - DW_LANG_OpenCL = 0x0015, - DW_LANG_Go = 0x0016, - DW_LANG_Modula3 = 0x0017, - DW_LANG_Haskell = 0x0018, - DW_LANG_C_plus_plus_03 = 0x0019, - DW_LANG_C_plus_plus_11 = 0x001a, - DW_LANG_OCaml = 0x001b, - DW_LANG_Rust = 0x001c, - DW_LANG_C11 = 0x001d, - DW_LANG_Swift = 0x001e, - DW_LANG_Julia = 0x001f, - DW_LANG_Dylan = 0x0020, - DW_LANG_C_plus_plus_14 = 0x0021, - DW_LANG_Fortran03 = 0x0022, - DW_LANG_Fortran08 = 0x0023, - DW_LANG_RenderScript = 0x0024, - DW_LANG_BLISS = 0x0025, - DW_LANG_Kotlin = 0x0026, - DW_LANG_Zig = 0x0027, - DW_LANG_Crystal = 0x0028, - DW_LANG_C_plus_plus_17 = 0x002a, - DW_LANG_C_plus_plus_20 = 0x002b, - DW_LANG_C17 = 0x002c, - DW_LANG_Fortran18 = 0x002d, - DW_LANG_Ada2005 = 0x002e, - DW_LANG_Ada2012 = 0x002f, - - DW_LANG_lo_user = 0x8000, - DW_LANG_hi_user = 0xffff, - - DW_LANG_Mips_Assembler = 0x8001, - DW_LANG_GOOGLE_RenderScript = 0x8e57, - DW_LANG_SUN_Assembler = 0x9001, - DW_LANG_ALTIUM_Assembler = 0x9101, - DW_LANG_BORLAND_Delphi = 0xb000, -}); - -impl DwLang { - /// Get the default DW_AT_lower_bound for this language. - pub fn default_lower_bound(self) -> Option { - match self { - DW_LANG_C89 - | DW_LANG_C - | DW_LANG_C_plus_plus - | DW_LANG_Java - | DW_LANG_C99 - | DW_LANG_ObjC - | DW_LANG_ObjC_plus_plus - | DW_LANG_UPC - | DW_LANG_D - | DW_LANG_Python - | DW_LANG_OpenCL - | DW_LANG_Go - | DW_LANG_Haskell - | DW_LANG_C_plus_plus_03 - | DW_LANG_C_plus_plus_11 - | DW_LANG_OCaml - | DW_LANG_Rust - | DW_LANG_C11 - | DW_LANG_Swift - | DW_LANG_Dylan - | DW_LANG_C_plus_plus_14 - | DW_LANG_RenderScript - | DW_LANG_BLISS => Some(0), - DW_LANG_Ada83 | DW_LANG_Cobol74 | DW_LANG_Cobol85 | DW_LANG_Fortran77 - | DW_LANG_Fortran90 | DW_LANG_Pascal83 | DW_LANG_Modula2 | DW_LANG_Ada95 - | DW_LANG_Fortran95 | DW_LANG_PLI | DW_LANG_Modula3 | DW_LANG_Julia - | DW_LANG_Fortran03 | DW_LANG_Fortran08 => Some(1), - _ => None, - } - } -} - -dw!( -/// The encodings of the constants used in the `DW_AT_address_class` attribute. -/// -/// There is only one value that is common to all target architectures. -/// See Section 7.13. -DwAddr(u64) { - DW_ADDR_none = 0x00, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_identifier_case` attribute. -/// -/// See Section 7.14, Table 7.18. -DwId(u8) { - DW_ID_case_sensitive = 0x00, - DW_ID_up_case = 0x01, - DW_ID_down_case = 0x02, - DW_ID_case_insensitive = 0x03, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_calling_convention` attribute. -/// -/// See Section 7.15, Table 7.19. -DwCc(u8) { - DW_CC_normal = 0x01, - DW_CC_program = 0x02, - DW_CC_nocall = 0x03, - DW_CC_pass_by_reference = 0x04, - DW_CC_pass_by_value = 0x05, - DW_CC_lo_user = 0x40, - DW_CC_hi_user = 0xff, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_inline` attribute. -/// -/// See Section 7.16, Table 7.20. -DwInl(u8) { - DW_INL_not_inlined = 0x00, - DW_INL_inlined = 0x01, - DW_INL_declared_not_inlined = 0x02, - DW_INL_declared_inlined = 0x03, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_ordering` attribute. -/// -/// See Section 7.17, Table 7.17. -DwOrd(u8) { - DW_ORD_row_major = 0x00, - DW_ORD_col_major = 0x01, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_discr_list` attribute. -/// -/// See Section 7.18, Table 7.22. -DwDsc(u8) { - DW_DSC_label = 0x00, - DW_DSC_range = 0x01, -}); - -dw!( -/// Name index attribute encodings. -/// -/// See Section 7.19, Table 7.23. -DwIdx(u16) { - DW_IDX_compile_unit = 1, - DW_IDX_type_unit = 2, - DW_IDX_die_offset = 3, - DW_IDX_parent = 4, - DW_IDX_type_hash = 5, - DW_IDX_lo_user = 0x2000, - DW_IDX_hi_user = 0x3fff, -}); - -dw!( -/// The encodings of the constants used in the `DW_AT_defaulted` attribute. -/// -/// See Section 7.20, Table 7.24. -DwDefaulted(u8) { - DW_DEFAULTED_no = 0x00, - DW_DEFAULTED_in_class = 0x01, - DW_DEFAULTED_out_of_class = 0x02, -}); - -dw!( -/// The encodings for the standard opcodes for line number information. -/// -/// See Section 7.22, Table 7.25. -DwLns(u8) { - DW_LNS_copy = 0x01, - DW_LNS_advance_pc = 0x02, - DW_LNS_advance_line = 0x03, - DW_LNS_set_file = 0x04, - DW_LNS_set_column = 0x05, - DW_LNS_negate_stmt = 0x06, - DW_LNS_set_basic_block = 0x07, - DW_LNS_const_add_pc = 0x08, - DW_LNS_fixed_advance_pc = 0x09, - DW_LNS_set_prologue_end = 0x0a, - DW_LNS_set_epilogue_begin = 0x0b, - DW_LNS_set_isa = 0x0c, -}); - -dw!( -/// The encodings for the extended opcodes for line number information. -/// -/// See Section 7.22, Table 7.26. -DwLne(u8) { - DW_LNE_end_sequence = 0x01, - DW_LNE_set_address = 0x02, - DW_LNE_define_file = 0x03, - DW_LNE_set_discriminator = 0x04, - - DW_LNE_lo_user = 0x80, - DW_LNE_hi_user = 0xff, -}); - -dw!( -/// The encodings for the line number header entry formats. -/// -/// See Section 7.22, Table 7.27. -DwLnct(u16) { - DW_LNCT_path = 0x1, - DW_LNCT_directory_index = 0x2, - DW_LNCT_timestamp = 0x3, - DW_LNCT_size = 0x4, - DW_LNCT_MD5 = 0x5, - DW_LNCT_lo_user = 0x2000, - DW_LNCT_hi_user = 0x3fff, -}); - -dw!( -/// The encodings for macro information entry types. -/// -/// See Section 7.23, Table 7.28. -DwMacro(u8) { - DW_MACRO_define = 0x01, - DW_MACRO_undef = 0x02, - DW_MACRO_start_file = 0x03, - DW_MACRO_end_file = 0x04, - DW_MACRO_define_strp = 0x05, - DW_MACRO_undef_strp = 0x06, - DW_MACRO_import = 0x07, - DW_MACRO_define_sup = 0x08, - DW_MACRO_undef_sup = 0x09, - DW_MACRO_import_sup = 0x0a, - DW_MACRO_define_strx = 0x0b, - DW_MACRO_undef_strx = 0x0c, - DW_MACRO_lo_user = 0xe0, - DW_MACRO_hi_user = 0xff, -}); - -dw!( -/// Range list entry encoding values. -/// -/// See Section 7.25, Table 7.30. -DwRle(u8) { - DW_RLE_end_of_list = 0x00, - DW_RLE_base_addressx = 0x01, - DW_RLE_startx_endx = 0x02, - DW_RLE_startx_length = 0x03, - DW_RLE_offset_pair = 0x04, - DW_RLE_base_address = 0x05, - DW_RLE_start_end = 0x06, - DW_RLE_start_length = 0x07, -}); - -dw!( -/// The encodings for DWARF expression operations. -/// -/// See Section 7.7.1, Table 7.9. -DwOp(u8) { - DW_OP_addr = 0x03, - DW_OP_deref = 0x06, - DW_OP_const1u = 0x08, - DW_OP_const1s = 0x09, - DW_OP_const2u = 0x0a, - DW_OP_const2s = 0x0b, - DW_OP_const4u = 0x0c, - DW_OP_const4s = 0x0d, - DW_OP_const8u = 0x0e, - DW_OP_const8s = 0x0f, - DW_OP_constu = 0x10, - DW_OP_consts = 0x11, - DW_OP_dup = 0x12, - DW_OP_drop = 0x13, - DW_OP_over = 0x14, - DW_OP_pick = 0x15, - DW_OP_swap = 0x16, - DW_OP_rot = 0x17, - DW_OP_xderef = 0x18, - DW_OP_abs = 0x19, - DW_OP_and = 0x1a, - DW_OP_div = 0x1b, - DW_OP_minus = 0x1c, - DW_OP_mod = 0x1d, - DW_OP_mul = 0x1e, - DW_OP_neg = 0x1f, - DW_OP_not = 0x20, - DW_OP_or = 0x21, - DW_OP_plus = 0x22, - DW_OP_plus_uconst = 0x23, - DW_OP_shl = 0x24, - DW_OP_shr = 0x25, - DW_OP_shra = 0x26, - DW_OP_xor = 0x27, - DW_OP_bra = 0x28, - DW_OP_eq = 0x29, - DW_OP_ge = 0x2a, - DW_OP_gt = 0x2b, - DW_OP_le = 0x2c, - DW_OP_lt = 0x2d, - DW_OP_ne = 0x2e, - DW_OP_skip = 0x2f, - DW_OP_lit0 = 0x30, - DW_OP_lit1 = 0x31, - DW_OP_lit2 = 0x32, - DW_OP_lit3 = 0x33, - DW_OP_lit4 = 0x34, - DW_OP_lit5 = 0x35, - DW_OP_lit6 = 0x36, - DW_OP_lit7 = 0x37, - DW_OP_lit8 = 0x38, - DW_OP_lit9 = 0x39, - DW_OP_lit10 = 0x3a, - DW_OP_lit11 = 0x3b, - DW_OP_lit12 = 0x3c, - DW_OP_lit13 = 0x3d, - DW_OP_lit14 = 0x3e, - DW_OP_lit15 = 0x3f, - DW_OP_lit16 = 0x40, - DW_OP_lit17 = 0x41, - DW_OP_lit18 = 0x42, - DW_OP_lit19 = 0x43, - DW_OP_lit20 = 0x44, - DW_OP_lit21 = 0x45, - DW_OP_lit22 = 0x46, - DW_OP_lit23 = 0x47, - DW_OP_lit24 = 0x48, - DW_OP_lit25 = 0x49, - DW_OP_lit26 = 0x4a, - DW_OP_lit27 = 0x4b, - DW_OP_lit28 = 0x4c, - DW_OP_lit29 = 0x4d, - DW_OP_lit30 = 0x4e, - DW_OP_lit31 = 0x4f, - DW_OP_reg0 = 0x50, - DW_OP_reg1 = 0x51, - DW_OP_reg2 = 0x52, - DW_OP_reg3 = 0x53, - DW_OP_reg4 = 0x54, - DW_OP_reg5 = 0x55, - DW_OP_reg6 = 0x56, - DW_OP_reg7 = 0x57, - DW_OP_reg8 = 0x58, - DW_OP_reg9 = 0x59, - DW_OP_reg10 = 0x5a, - DW_OP_reg11 = 0x5b, - DW_OP_reg12 = 0x5c, - DW_OP_reg13 = 0x5d, - DW_OP_reg14 = 0x5e, - DW_OP_reg15 = 0x5f, - DW_OP_reg16 = 0x60, - DW_OP_reg17 = 0x61, - DW_OP_reg18 = 0x62, - DW_OP_reg19 = 0x63, - DW_OP_reg20 = 0x64, - DW_OP_reg21 = 0x65, - DW_OP_reg22 = 0x66, - DW_OP_reg23 = 0x67, - DW_OP_reg24 = 0x68, - DW_OP_reg25 = 0x69, - DW_OP_reg26 = 0x6a, - DW_OP_reg27 = 0x6b, - DW_OP_reg28 = 0x6c, - DW_OP_reg29 = 0x6d, - DW_OP_reg30 = 0x6e, - DW_OP_reg31 = 0x6f, - DW_OP_breg0 = 0x70, - DW_OP_breg1 = 0x71, - DW_OP_breg2 = 0x72, - DW_OP_breg3 = 0x73, - DW_OP_breg4 = 0x74, - DW_OP_breg5 = 0x75, - DW_OP_breg6 = 0x76, - DW_OP_breg7 = 0x77, - DW_OP_breg8 = 0x78, - DW_OP_breg9 = 0x79, - DW_OP_breg10 = 0x7a, - DW_OP_breg11 = 0x7b, - DW_OP_breg12 = 0x7c, - DW_OP_breg13 = 0x7d, - DW_OP_breg14 = 0x7e, - DW_OP_breg15 = 0x7f, - DW_OP_breg16 = 0x80, - DW_OP_breg17 = 0x81, - DW_OP_breg18 = 0x82, - DW_OP_breg19 = 0x83, - DW_OP_breg20 = 0x84, - DW_OP_breg21 = 0x85, - DW_OP_breg22 = 0x86, - DW_OP_breg23 = 0x87, - DW_OP_breg24 = 0x88, - DW_OP_breg25 = 0x89, - DW_OP_breg26 = 0x8a, - DW_OP_breg27 = 0x8b, - DW_OP_breg28 = 0x8c, - DW_OP_breg29 = 0x8d, - DW_OP_breg30 = 0x8e, - DW_OP_breg31 = 0x8f, - DW_OP_regx = 0x90, - DW_OP_fbreg = 0x91, - DW_OP_bregx = 0x92, - DW_OP_piece = 0x93, - DW_OP_deref_size = 0x94, - DW_OP_xderef_size = 0x95, - DW_OP_nop = 0x96, - DW_OP_push_object_address = 0x97, - DW_OP_call2 = 0x98, - DW_OP_call4 = 0x99, - DW_OP_call_ref = 0x9a, - DW_OP_form_tls_address = 0x9b, - DW_OP_call_frame_cfa = 0x9c, - DW_OP_bit_piece = 0x9d, - DW_OP_implicit_value = 0x9e, - DW_OP_stack_value = 0x9f, - DW_OP_implicit_pointer = 0xa0, - DW_OP_addrx = 0xa1, - DW_OP_constx = 0xa2, - DW_OP_entry_value = 0xa3, - DW_OP_const_type = 0xa4, - DW_OP_regval_type = 0xa5, - DW_OP_deref_type = 0xa6, - DW_OP_xderef_type = 0xa7, - DW_OP_convert = 0xa8, - DW_OP_reinterpret = 0xa9, - - // GNU extensions - DW_OP_GNU_push_tls_address = 0xe0, - DW_OP_GNU_implicit_pointer = 0xf2, - DW_OP_GNU_entry_value = 0xf3, - DW_OP_GNU_const_type = 0xf4, - DW_OP_GNU_regval_type = 0xf5, - DW_OP_GNU_deref_type = 0xf6, - DW_OP_GNU_convert = 0xf7, - DW_OP_GNU_reinterpret = 0xf9, - DW_OP_GNU_parameter_ref = 0xfa, - DW_OP_GNU_addr_index = 0xfb, - DW_OP_GNU_const_index = 0xfc, - - // Wasm extensions - DW_OP_WASM_location = 0xed, -}); - -dw!( -/// Pointer encoding used by `.eh_frame`. -/// -/// The four lower bits describe the -/// format of the pointer, the upper four bits describe how the encoding should -/// be applied. -/// -/// Defined in `` -DwEhPe(u8) { -// Format of pointer encoding. - -// "Unsigned value is encoded using the Little Endian Base 128" - DW_EH_PE_uleb128 = 0x1, -// "A 2 bytes unsigned value." - DW_EH_PE_udata2 = 0x2, -// "A 4 bytes unsigned value." - DW_EH_PE_udata4 = 0x3, -// "An 8 bytes unsigned value." - DW_EH_PE_udata8 = 0x4, -// "Signed value is encoded using the Little Endian Base 128" - DW_EH_PE_sleb128 = 0x9, -// "A 2 bytes signed value." - DW_EH_PE_sdata2 = 0x0a, -// "A 4 bytes signed value." - DW_EH_PE_sdata4 = 0x0b, -// "An 8 bytes signed value." - DW_EH_PE_sdata8 = 0x0c, - -// How the pointer encoding should be applied. - -// `DW_EH_PE_pcrel` pointers are relative to their own location. - DW_EH_PE_pcrel = 0x10, -// "Value is relative to the beginning of the .text section." - DW_EH_PE_textrel = 0x20, -// "Value is relative to the beginning of the .got or .eh_frame_hdr section." - DW_EH_PE_datarel = 0x30, -// "Value is relative to the beginning of the function." - DW_EH_PE_funcrel = 0x40, -// "Value is aligned to an address unit sized boundary." - DW_EH_PE_aligned = 0x50, - -// This bit can be set for any of the above encoding applications. When set, -// the encoded value is the address of the real pointer result, not the -// pointer result itself. -// -// This isn't defined in the DWARF or the `.eh_frame` standards, but is -// generated by both GNU/Linux and macOS tooling. - DW_EH_PE_indirect = 0x80, - -// These constants apply to both the lower and upper bits. - -// "The Value is a literal pointer whose size is determined by the -// architecture." - DW_EH_PE_absptr = 0x0, -// The absence of a pointer and encoding. - DW_EH_PE_omit = 0xff, -}); - -const DW_EH_PE_FORMAT_MASK: u8 = 0b0000_1111; - -// Ignores indirection bit. -const DW_EH_PE_APPLICATION_MASK: u8 = 0b0111_0000; - -impl DwEhPe { - /// Get the pointer encoding's format. - #[inline] - pub fn format(self) -> DwEhPe { - DwEhPe(self.0 & DW_EH_PE_FORMAT_MASK) - } - - /// Get the pointer encoding's application. - #[inline] - pub fn application(self) -> DwEhPe { - DwEhPe(self.0 & DW_EH_PE_APPLICATION_MASK) - } - - /// Is this encoding the absent pointer encoding? - #[inline] - pub fn is_absent(self) -> bool { - self == DW_EH_PE_omit - } - - /// Is this coding indirect? If so, its encoded value is the address of the - /// real pointer result, not the pointer result itself. - #[inline] - pub fn is_indirect(self) -> bool { - self.0 & DW_EH_PE_indirect.0 != 0 - } - - /// Is this a known, valid pointer encoding? - pub fn is_valid_encoding(self) -> bool { - if self.is_absent() { - return true; - } - - match self.format() { - DW_EH_PE_absptr | DW_EH_PE_uleb128 | DW_EH_PE_udata2 | DW_EH_PE_udata4 - | DW_EH_PE_udata8 | DW_EH_PE_sleb128 | DW_EH_PE_sdata2 | DW_EH_PE_sdata4 - | DW_EH_PE_sdata8 => {} - _ => return false, - } - - match self.application() { - DW_EH_PE_absptr | DW_EH_PE_pcrel | DW_EH_PE_textrel | DW_EH_PE_datarel - | DW_EH_PE_funcrel | DW_EH_PE_aligned => {} - _ => return false, - } - - true - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_dw_eh_pe_format() { - let encoding = DwEhPe(DW_EH_PE_pcrel.0 | DW_EH_PE_uleb128.0); - assert_eq!(encoding.format(), DW_EH_PE_uleb128); - } - - #[test] - fn test_dw_eh_pe_application() { - let encoding = DwEhPe(DW_EH_PE_pcrel.0 | DW_EH_PE_uleb128.0); - assert_eq!(encoding.application(), DW_EH_PE_pcrel); - } - - #[test] - fn test_dw_eh_pe_is_absent() { - assert_eq!(DW_EH_PE_absptr.is_absent(), false); - assert_eq!(DW_EH_PE_omit.is_absent(), true); - } - - #[test] - fn test_dw_eh_pe_is_valid_encoding_ok() { - let encoding = DwEhPe(DW_EH_PE_uleb128.0 | DW_EH_PE_pcrel.0); - assert!(encoding.is_valid_encoding()); - assert!(DW_EH_PE_absptr.is_valid_encoding()); - assert!(DW_EH_PE_omit.is_valid_encoding()); - } - - #[test] - fn test_dw_eh_pe_is_valid_encoding_bad_format() { - let encoding = DwEhPe((DW_EH_PE_sdata8.0 + 1) | DW_EH_PE_pcrel.0); - assert_eq!(encoding.is_valid_encoding(), false); - } - - #[test] - fn test_dw_eh_pe_is_valid_encoding_bad_application() { - let encoding = DwEhPe(DW_EH_PE_sdata8.0 | (DW_EH_PE_aligned.0 + 1)); - assert_eq!(encoding.is_valid_encoding(), false); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/endianity.rs s390-tools-2.33.1/rust-vendor/gimli/src/endianity.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/endianity.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/endianity.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,256 +0,0 @@ -//! Types for compile-time and run-time endianity. - -use core::convert::TryInto; -use core::fmt::Debug; - -/// A trait describing the endianity of some buffer. -pub trait Endianity: Debug + Default + Clone + Copy + PartialEq + Eq { - /// Return true for big endian byte order. - fn is_big_endian(self) -> bool; - - /// Return true for little endian byte order. - #[inline] - fn is_little_endian(self) -> bool { - !self.is_big_endian() - } - - /// Reads an unsigned 16 bit integer from `buf`. - /// - /// # Panics - /// - /// Panics when `buf.len() < 2`. - #[inline] - fn read_u16(self, buf: &[u8]) -> u16 { - let bytes: &[u8; 2] = buf[..2].try_into().unwrap(); - if self.is_big_endian() { - u16::from_be_bytes(*bytes) - } else { - u16::from_le_bytes(*bytes) - } - } - - /// Reads an unsigned 32 bit integer from `buf`. - /// - /// # Panics - /// - /// Panics when `buf.len() < 4`. - #[inline] - fn read_u32(self, buf: &[u8]) -> u32 { - let bytes: &[u8; 4] = buf[..4].try_into().unwrap(); - if self.is_big_endian() { - u32::from_be_bytes(*bytes) - } else { - u32::from_le_bytes(*bytes) - } - } - - /// Reads an unsigned 64 bit integer from `buf`. - /// - /// # Panics - /// - /// Panics when `buf.len() < 8`. - #[inline] - fn read_u64(self, buf: &[u8]) -> u64 { - let bytes: &[u8; 8] = buf[..8].try_into().unwrap(); - if self.is_big_endian() { - u64::from_be_bytes(*bytes) - } else { - u64::from_le_bytes(*bytes) - } - } - - /// Read an unsigned n-bytes integer u64. - /// - /// # Panics - /// - /// Panics when `buf.len() < 1` or `buf.len() > 8`. - #[inline] - fn read_uint(&mut self, buf: &[u8]) -> u64 { - let mut tmp = [0; 8]; - if self.is_big_endian() { - tmp[8 - buf.len()..].copy_from_slice(buf); - } else { - tmp[..buf.len()].copy_from_slice(buf); - } - self.read_u64(&tmp) - } - - /// Reads a signed 16 bit integer from `buf`. - /// - /// # Panics - /// - /// Panics when `buf.len() < 2`. - #[inline] - fn read_i16(self, buf: &[u8]) -> i16 { - self.read_u16(buf) as i16 - } - - /// Reads a signed 32 bit integer from `buf`. - /// - /// # Panics - /// - /// Panics when `buf.len() < 4`. - #[inline] - fn read_i32(self, buf: &[u8]) -> i32 { - self.read_u32(buf) as i32 - } - - /// Reads a signed 64 bit integer from `buf`. - /// - /// # Panics - /// - /// Panics when `buf.len() < 8`. - #[inline] - fn read_i64(self, buf: &[u8]) -> i64 { - self.read_u64(buf) as i64 - } - - /// Reads a 32 bit floating point number from `buf`. - /// - /// # Panics - /// - /// Panics when `buf.len() < 8`. - #[inline] - fn read_f32(self, buf: &[u8]) -> f32 { - f32::from_bits(self.read_u32(buf)) - } - - /// Reads a 32 bit floating point number from `buf`. - /// - /// # Panics - /// - /// Panics when `buf.len() < 8`. - #[inline] - fn read_f64(self, buf: &[u8]) -> f64 { - f64::from_bits(self.read_u64(buf)) - } - - /// Writes an unsigned 16 bit integer `n` to `buf`. - /// - /// # Panics - /// - /// Panics when `buf.len() < 2`. - #[inline] - fn write_u16(self, buf: &mut [u8], n: u16) { - let bytes = if self.is_big_endian() { - n.to_be_bytes() - } else { - n.to_le_bytes() - }; - buf[..2].copy_from_slice(&bytes); - } - - /// Writes an unsigned 32 bit integer `n` to `buf`. - /// - /// # Panics - /// - /// Panics when `buf.len() < 4`. - #[inline] - fn write_u32(self, buf: &mut [u8], n: u32) { - let bytes = if self.is_big_endian() { - n.to_be_bytes() - } else { - n.to_le_bytes() - }; - buf[..4].copy_from_slice(&bytes); - } - - /// Writes an unsigned 64 bit integer `n` to `buf`. - /// - /// # Panics - /// - /// Panics when `buf.len() < 8`. - #[inline] - fn write_u64(self, buf: &mut [u8], n: u64) { - let bytes = if self.is_big_endian() { - n.to_be_bytes() - } else { - n.to_le_bytes() - }; - buf[..8].copy_from_slice(&bytes); - } -} - -/// Byte order that is selectable at runtime. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub enum RunTimeEndian { - /// Little endian byte order. - Little, - /// Big endian byte order. - Big, -} - -impl Default for RunTimeEndian { - #[cfg(target_endian = "little")] - #[inline] - fn default() -> RunTimeEndian { - RunTimeEndian::Little - } - - #[cfg(target_endian = "big")] - #[inline] - fn default() -> RunTimeEndian { - RunTimeEndian::Big - } -} - -impl Endianity for RunTimeEndian { - #[inline] - fn is_big_endian(self) -> bool { - self != RunTimeEndian::Little - } -} - -/// Little endian byte order. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct LittleEndian; - -impl Default for LittleEndian { - #[inline] - fn default() -> LittleEndian { - LittleEndian - } -} - -impl Endianity for LittleEndian { - #[inline] - fn is_big_endian(self) -> bool { - false - } -} - -/// Big endian byte order. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct BigEndian; - -impl Default for BigEndian { - #[inline] - fn default() -> BigEndian { - BigEndian - } -} - -impl Endianity for BigEndian { - #[inline] - fn is_big_endian(self) -> bool { - true - } -} - -/// The native endianity for the target platform. -#[cfg(target_endian = "little")] -pub type NativeEndian = LittleEndian; - -#[cfg(target_endian = "little")] -#[allow(non_upper_case_globals)] -#[doc(hidden)] -pub const NativeEndian: LittleEndian = LittleEndian; - -/// The native endianity for the target platform. -#[cfg(target_endian = "big")] -pub type NativeEndian = BigEndian; - -#[cfg(target_endian = "big")] -#[allow(non_upper_case_globals)] -#[doc(hidden)] -pub const NativeEndian: BigEndian = BigEndian; diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/leb128.rs s390-tools-2.33.1/rust-vendor/gimli/src/leb128.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/leb128.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/leb128.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,612 +0,0 @@ -//! Read and write DWARF's "Little Endian Base 128" (LEB128) variable length -//! integer encoding. -//! -//! The implementation is a direct translation of the psuedocode in the DWARF 4 -//! standard's appendix C. -//! -//! Read and write signed integers: -//! -//! ``` -//! # #[cfg(all(feature = "read", feature = "write"))] { -//! use gimli::{EndianSlice, NativeEndian, leb128}; -//! -//! let mut buf = [0; 1024]; -//! -//! // Write to anything that implements `std::io::Write`. -//! { -//! let mut writable = &mut buf[..]; -//! leb128::write::signed(&mut writable, -12345).expect("Should write number"); -//! } -//! -//! // Read from anything that implements `gimli::Reader`. -//! let mut readable = EndianSlice::new(&buf[..], NativeEndian); -//! let val = leb128::read::signed(&mut readable).expect("Should read number"); -//! assert_eq!(val, -12345); -//! # } -//! ``` -//! -//! Or read and write unsigned integers: -//! -//! ``` -//! # #[cfg(all(feature = "read", feature = "write"))] { -//! use gimli::{EndianSlice, NativeEndian, leb128}; -//! -//! let mut buf = [0; 1024]; -//! -//! { -//! let mut writable = &mut buf[..]; -//! leb128::write::unsigned(&mut writable, 98765).expect("Should write number"); -//! } -//! -//! let mut readable = EndianSlice::new(&buf[..], NativeEndian); -//! let val = leb128::read::unsigned(&mut readable).expect("Should read number"); -//! assert_eq!(val, 98765); -//! # } -//! ``` - -const CONTINUATION_BIT: u8 = 1 << 7; -#[cfg(feature = "read-core")] -const SIGN_BIT: u8 = 1 << 6; - -#[inline] -fn low_bits_of_byte(byte: u8) -> u8 { - byte & !CONTINUATION_BIT -} - -#[inline] -#[allow(dead_code)] -fn low_bits_of_u64(val: u64) -> u8 { - let byte = val & u64::from(core::u8::MAX); - low_bits_of_byte(byte as u8) -} - -/// A module for reading signed and unsigned integers that have been LEB128 -/// encoded. -#[cfg(feature = "read-core")] -pub mod read { - use super::{low_bits_of_byte, CONTINUATION_BIT, SIGN_BIT}; - use crate::read::{Error, Reader, Result}; - - /// Read bytes until the LEB128 continuation bit is not set. - pub fn skip(r: &mut R) -> Result<()> { - loop { - let byte = r.read_u8()?; - if byte & CONTINUATION_BIT == 0 { - return Ok(()); - } - } - } - - /// Read an unsigned LEB128 number from the given `Reader` and - /// return it or an error if reading failed. - pub fn unsigned(r: &mut R) -> Result { - let mut result = 0; - let mut shift = 0; - - loop { - let byte = r.read_u8()?; - if shift == 63 && byte != 0x00 && byte != 0x01 { - return Err(Error::BadUnsignedLeb128); - } - - let low_bits = u64::from(low_bits_of_byte(byte)); - result |= low_bits << shift; - - if byte & CONTINUATION_BIT == 0 { - return Ok(result); - } - - shift += 7; - } - } - - /// Read an LEB128 u16 from the given `Reader` and - /// return it or an error if reading failed. - pub fn u16(r: &mut R) -> Result { - let byte = r.read_u8()?; - let mut result = u16::from(low_bits_of_byte(byte)); - if byte & CONTINUATION_BIT == 0 { - return Ok(result); - } - - let byte = r.read_u8()?; - result |= u16::from(low_bits_of_byte(byte)) << 7; - if byte & CONTINUATION_BIT == 0 { - return Ok(result); - } - - let byte = r.read_u8()?; - if byte > 0x03 { - return Err(Error::BadUnsignedLeb128); - } - result += u16::from(byte) << 14; - Ok(result) - } - - /// Read a signed LEB128 number from the given `Reader` and - /// return it or an error if reading failed. - pub fn signed(r: &mut R) -> Result { - let mut result = 0; - let mut shift = 0; - let size = 64; - let mut byte; - - loop { - byte = r.read_u8()?; - if shift == 63 && byte != 0x00 && byte != 0x7f { - return Err(Error::BadSignedLeb128); - } - - let low_bits = i64::from(low_bits_of_byte(byte)); - result |= low_bits << shift; - shift += 7; - - if byte & CONTINUATION_BIT == 0 { - break; - } - } - - if shift < size && (SIGN_BIT & byte) == SIGN_BIT { - // Sign extend the result. - result |= !0 << shift; - } - - Ok(result) - } -} - -/// A module for writing integers encoded as LEB128. -#[cfg(feature = "write")] -pub mod write { - use super::{low_bits_of_u64, CONTINUATION_BIT}; - use std::io; - - /// Write the given unsigned number using the LEB128 encoding to the given - /// `std::io::Write`able. Returns the number of bytes written to `w`, or an - /// error if writing failed. - pub fn unsigned(w: &mut W, mut val: u64) -> Result - where - W: io::Write, - { - let mut bytes_written = 0; - loop { - let mut byte = low_bits_of_u64(val); - val >>= 7; - if val != 0 { - // More bytes to come, so set the continuation bit. - byte |= CONTINUATION_BIT; - } - - let buf = [byte]; - w.write_all(&buf)?; - bytes_written += 1; - - if val == 0 { - return Ok(bytes_written); - } - } - } - - /// Return the size of the LEB128 encoding of the given unsigned number. - pub fn uleb128_size(mut val: u64) -> usize { - let mut size = 0; - loop { - val >>= 7; - size += 1; - if val == 0 { - return size; - } - } - } - - /// Write the given signed number using the LEB128 encoding to the given - /// `std::io::Write`able. Returns the number of bytes written to `w`, or an - /// error if writing failed. - pub fn signed(w: &mut W, mut val: i64) -> Result - where - W: io::Write, - { - let mut bytes_written = 0; - loop { - let mut byte = val as u8; - // Keep the sign bit for testing - val >>= 6; - let done = val == 0 || val == -1; - if done { - byte &= !CONTINUATION_BIT; - } else { - // Remove the sign bit - val >>= 1; - // More bytes to come, so set the continuation bit. - byte |= CONTINUATION_BIT; - } - - let buf = [byte]; - w.write_all(&buf)?; - bytes_written += 1; - - if done { - return Ok(bytes_written); - } - } - } - - /// Return the size of the LEB128 encoding of the given signed number. - pub fn sleb128_size(mut val: i64) -> usize { - let mut size = 0; - loop { - val >>= 6; - let done = val == 0 || val == -1; - val >>= 1; - size += 1; - if done { - return size; - } - } - } -} - -#[cfg(test)] -#[cfg(all(feature = "read", feature = "write"))] -mod tests { - use super::{low_bits_of_byte, low_bits_of_u64, read, write, CONTINUATION_BIT}; - use crate::endianity::NativeEndian; - use crate::read::{EndianSlice, Error, ReaderOffsetId}; - - trait ResultExt { - fn map_eof(self, input: &[u8]) -> Self; - } - - impl ResultExt for Result { - fn map_eof(self, input: &[u8]) -> Self { - match self { - Err(Error::UnexpectedEof(id)) => { - let id = ReaderOffsetId(id.0 - input.as_ptr() as u64); - Err(Error::UnexpectedEof(id)) - } - r => r, - } - } - } - - #[test] - fn test_low_bits_of_byte() { - for i in 0..127 { - assert_eq!(i, low_bits_of_byte(i)); - assert_eq!(i, low_bits_of_byte(i | CONTINUATION_BIT)); - } - } - - #[test] - fn test_low_bits_of_u64() { - for i in 0u64..127 { - assert_eq!(i as u8, low_bits_of_u64(1 << 16 | i)); - assert_eq!( - i as u8, - low_bits_of_u64(i << 16 | i | (u64::from(CONTINUATION_BIT))) - ); - } - } - - // Examples from the DWARF 4 standard, section 7.6, figure 22. - #[test] - fn test_read_unsigned() { - let buf = [2u8]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - 2, - read::unsigned(&mut readable).expect("Should read number") - ); - - let buf = [127u8]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - 127, - read::unsigned(&mut readable).expect("Should read number") - ); - - let buf = [CONTINUATION_BIT, 1]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - 128, - read::unsigned(&mut readable).expect("Should read number") - ); - - let buf = [1u8 | CONTINUATION_BIT, 1]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - 129, - read::unsigned(&mut readable).expect("Should read number") - ); - - let buf = [2u8 | CONTINUATION_BIT, 1]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - 130, - read::unsigned(&mut readable).expect("Should read number") - ); - - let buf = [57u8 | CONTINUATION_BIT, 100]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - 12857, - read::unsigned(&mut readable).expect("Should read number") - ); - } - - // Examples from the DWARF 4 standard, section 7.6, figure 23. - #[test] - fn test_read_signed() { - let buf = [2u8]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!(2, read::signed(&mut readable).expect("Should read number")); - - let buf = [0x7eu8]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!(-2, read::signed(&mut readable).expect("Should read number")); - - let buf = [127u8 | CONTINUATION_BIT, 0]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - 127, - read::signed(&mut readable).expect("Should read number") - ); - - let buf = [1u8 | CONTINUATION_BIT, 0x7f]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - -127, - read::signed(&mut readable).expect("Should read number") - ); - - let buf = [CONTINUATION_BIT, 1]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - 128, - read::signed(&mut readable).expect("Should read number") - ); - - let buf = [CONTINUATION_BIT, 0x7f]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - -128, - read::signed(&mut readable).expect("Should read number") - ); - - let buf = [1u8 | CONTINUATION_BIT, 1]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - 129, - read::signed(&mut readable).expect("Should read number") - ); - - let buf = [0x7fu8 | CONTINUATION_BIT, 0x7e]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - -129, - read::signed(&mut readable).expect("Should read number") - ); - } - - #[test] - fn test_read_signed_63_bits() { - let buf = [ - CONTINUATION_BIT, - CONTINUATION_BIT, - CONTINUATION_BIT, - CONTINUATION_BIT, - CONTINUATION_BIT, - CONTINUATION_BIT, - CONTINUATION_BIT, - CONTINUATION_BIT, - 0x40, - ]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - -0x4000_0000_0000_0000, - read::signed(&mut readable).expect("Should read number") - ); - } - - #[test] - fn test_read_unsigned_not_enough_data() { - let buf = [CONTINUATION_BIT]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - read::unsigned(&mut readable).map_eof(&buf), - Err(Error::UnexpectedEof(ReaderOffsetId(1))) - ); - } - - #[test] - fn test_read_signed_not_enough_data() { - let buf = [CONTINUATION_BIT]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - read::signed(&mut readable).map_eof(&buf), - Err(Error::UnexpectedEof(ReaderOffsetId(1))) - ); - } - - #[test] - fn test_write_unsigned_not_enough_space() { - let mut buf = [0; 1]; - let mut writable = &mut buf[..]; - match write::unsigned(&mut writable, 128) { - Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::WriteZero), - otherwise => panic!("Unexpected: {:?}", otherwise), - } - } - - #[test] - fn test_write_signed_not_enough_space() { - let mut buf = [0; 1]; - let mut writable = &mut buf[..]; - match write::signed(&mut writable, 128) { - Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::WriteZero), - otherwise => panic!("Unexpected: {:?}", otherwise), - } - } - - #[test] - fn dogfood_signed() { - fn inner(i: i64) { - let mut buf = [0u8; 1024]; - - { - let mut writable = &mut buf[..]; - write::signed(&mut writable, i).expect("Should write signed number"); - } - - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - let result = read::signed(&mut readable).expect("Should be able to read it back again"); - assert_eq!(i, result); - } - for i in -513..513 { - inner(i); - } - inner(core::i64::MIN); - } - - #[test] - fn dogfood_unsigned() { - for i in 0..1025 { - let mut buf = [0u8; 1024]; - - { - let mut writable = &mut buf[..]; - write::unsigned(&mut writable, i).expect("Should write signed number"); - } - - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - let result = - read::unsigned(&mut readable).expect("Should be able to read it back again"); - assert_eq!(i, result); - } - } - - #[test] - fn test_read_unsigned_overflow() { - let buf = [ - 2u8 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 1, - ]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert!(read::unsigned(&mut readable).is_err()); - } - - #[test] - fn test_read_signed_overflow() { - let buf = [ - 2u8 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 2 | CONTINUATION_BIT, - 1, - ]; - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert!(read::signed(&mut readable).is_err()); - } - - #[test] - fn test_read_multiple() { - let buf = [2u8 | CONTINUATION_BIT, 1u8, 1u8]; - - let mut readable = EndianSlice::new(&buf[..], NativeEndian); - assert_eq!( - read::unsigned(&mut readable).expect("Should read first number"), - 130u64 - ); - assert_eq!( - read::unsigned(&mut readable).expect("Should read first number"), - 1u64 - ); - } - - #[test] - fn test_read_u16() { - for (buf, val) in [ - (&[2][..], 2), - (&[0x7f][..], 0x7f), - (&[0x80, 1][..], 0x80), - (&[0x81, 1][..], 0x81), - (&[0x82, 1][..], 0x82), - (&[0xff, 0x7f][..], 0x3fff), - (&[0x80, 0x80, 1][..], 0x4000), - (&[0xff, 0xff, 1][..], 0x7fff), - (&[0xff, 0xff, 3][..], 0xffff), - ] - .iter() - { - let mut readable = EndianSlice::new(buf, NativeEndian); - assert_eq!(*val, read::u16(&mut readable).expect("Should read number")); - } - - for buf in [ - &[0x80][..], - &[0x80, 0x80][..], - &[0x80, 0x80, 4][..], - &[0x80, 0x80, 0x80, 3][..], - ] - .iter() - { - let mut readable = EndianSlice::new(buf, NativeEndian); - assert!(read::u16(&mut readable).is_err(), "{:?}", buf); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/lib.rs s390-tools-2.33.1/rust-vendor/gimli/src/lib.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,79 +0,0 @@ -//! `gimli` is a library for reading and writing the -//! [DWARF debugging format](https://dwarfstd.org/). -//! -//! See the [read](./read/index.html) and [write](./write/index.html) modules -//! for examples and API documentation. -//! -//! ## Cargo Features -//! -//! Cargo features that can be enabled with `gimli`: -//! -//! * `std`: Enabled by default. Use the `std` library. Disabling this feature -//! allows using `gimli` in embedded environments that do not have access to -//! `std`. Note that even when `std` is disabled, `gimli` still requires an -//! implementation of the `alloc` crate. -//! -//! * `read`: Enabled by default. Enables the `read` module. Use of `std` is -//! optional. -//! -//! * `write`: Enabled by default. Enables the `write` module. Always uses -//! the `std` library. -#![deny(missing_docs)] -#![deny(missing_debug_implementations)] -// Selectively enable rust 2018 warnings -#![warn(bare_trait_objects)] -#![warn(unused_extern_crates)] -#![warn(ellipsis_inclusive_range_patterns)] -//#![warn(elided_lifetimes_in_paths)] -#![warn(explicit_outlives_requirements)] -// Style. -#![allow(clippy::bool_to_int_with_if)] -#![allow(clippy::collapsible_else_if)] -#![allow(clippy::comparison_chain)] -#![allow(clippy::manual_range_contains)] -#![allow(clippy::needless_late_init)] -#![allow(clippy::too_many_arguments)] -// False positives with `fallible_iterator`. -#![allow(clippy::should_implement_trait)] -// False positives. -#![allow(clippy::derive_partial_eq_without_eq)] -#![no_std] - -#[allow(unused_imports)] -#[cfg(any(feature = "read", feature = "write"))] -#[macro_use] -extern crate alloc; - -#[cfg(any(feature = "std", feature = "write"))] -#[macro_use] -extern crate std; - -#[cfg(feature = "endian-reader")] -pub use stable_deref_trait::{CloneStableDeref, StableDeref}; - -mod common; -pub use crate::common::*; - -mod arch; -pub use crate::arch::*; - -pub mod constants; -// For backwards compat. -pub use crate::constants::*; - -mod endianity; -pub use crate::endianity::*; - -pub mod leb128; - -#[cfg(feature = "read-core")] -pub mod read; -// For backwards compat. -#[cfg(feature = "read-core")] -pub use crate::read::*; - -#[cfg(feature = "write")] -pub mod write; - -#[cfg(test)] -mod test_util; diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/abbrev.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/abbrev.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/abbrev.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/abbrev.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1102 +0,0 @@ -//! Functions for parsing DWARF debugging abbreviations. - -use alloc::collections::btree_map; -use alloc::sync::Arc; -use alloc::vec::Vec; -use core::convert::TryFrom; -use core::fmt::{self, Debug}; -use core::iter::FromIterator; -use core::ops::Deref; - -use crate::common::{DebugAbbrevOffset, Encoding, SectionId}; -use crate::constants; -use crate::endianity::Endianity; -use crate::read::{ - DebugInfoUnitHeadersIter, EndianSlice, Error, Reader, ReaderOffset, Result, Section, UnitHeader, -}; - -/// The `DebugAbbrev` struct represents the abbreviations describing -/// `DebuggingInformationEntry`s' attribute names and forms found in the -/// `.debug_abbrev` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugAbbrev { - debug_abbrev_section: R, -} - -impl<'input, Endian> DebugAbbrev> -where - Endian: Endianity, -{ - /// Construct a new `DebugAbbrev` instance from the data in the `.debug_abbrev` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_abbrev` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugAbbrev, LittleEndian}; - /// - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_debug_abbrev_section_somehow = || &buf; - /// let debug_abbrev = DebugAbbrev::new(read_debug_abbrev_section_somehow(), LittleEndian); - /// ``` - pub fn new(debug_abbrev_section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(debug_abbrev_section, endian)) - } -} - -impl DebugAbbrev { - /// Parse the abbreviations at the given `offset` within this - /// `.debug_abbrev` section. - /// - /// The `offset` should generally be retrieved from a unit header. - pub fn abbreviations( - &self, - debug_abbrev_offset: DebugAbbrevOffset, - ) -> Result { - let input = &mut self.debug_abbrev_section.clone(); - input.skip(debug_abbrev_offset.0)?; - Abbreviations::parse(input) - } -} - -impl DebugAbbrev { - /// Create a `DebugAbbrev` section that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// ```rust,no_run - /// # let load_section = || unimplemented!(); - /// // Read the DWARF section into a `Vec` with whatever object loader you're using. - /// let owned_section: gimli::DebugAbbrev> = load_section(); - /// // Create a reference to the DWARF section. - /// let section = owned_section.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> DebugAbbrev - where - F: FnMut(&'a T) -> R, - { - borrow(&self.debug_abbrev_section).into() - } -} - -impl Section for DebugAbbrev { - fn id() -> SectionId { - SectionId::DebugAbbrev - } - - fn reader(&self) -> &R { - &self.debug_abbrev_section - } -} - -impl From for DebugAbbrev { - fn from(debug_abbrev_section: R) -> Self { - DebugAbbrev { - debug_abbrev_section, - } - } -} - -/// The strategy to use for caching abbreviations. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -#[non_exhaustive] -pub enum AbbreviationsCacheStrategy { - /// Cache abbreviations that are used more than once. - /// - /// This is useful if the units in the `.debug_info` section will be parsed only once. - Duplicates, - /// Cache all abbreviations. - /// - /// This is useful if the units in the `.debug_info` section will be parsed more than once. - All, -} - -/// A cache of previously parsed `Abbreviations`. -#[derive(Debug, Default)] -pub struct AbbreviationsCache { - abbreviations: btree_map::BTreeMap>>, -} - -impl AbbreviationsCache { - /// Create an empty abbreviations cache. - pub fn new() -> Self { - Self::default() - } - - /// Parse abbreviations and store them in the cache. - /// - /// This will iterate over the given units to determine the abbreviations - /// offsets. Any existing cache entries are discarded. - /// - /// Errors during parsing abbreviations are also stored in the cache. - /// Errors during iterating over the units are ignored. - pub fn populate( - &mut self, - strategy: AbbreviationsCacheStrategy, - debug_abbrev: &DebugAbbrev, - mut units: DebugInfoUnitHeadersIter, - ) { - let mut offsets = Vec::new(); - match strategy { - AbbreviationsCacheStrategy::Duplicates => { - while let Ok(Some(unit)) = units.next() { - offsets.push(unit.debug_abbrev_offset()); - } - offsets.sort_unstable_by_key(|offset| offset.0); - let mut prev_offset = R::Offset::from_u8(0); - let mut count = 0; - offsets.retain(|offset| { - if count == 0 || prev_offset != offset.0 { - prev_offset = offset.0; - count = 1; - } else { - count += 1; - } - count == 2 - }); - } - AbbreviationsCacheStrategy::All => { - while let Ok(Some(unit)) = units.next() { - offsets.push(unit.debug_abbrev_offset()); - } - offsets.sort_unstable_by_key(|offset| offset.0); - offsets.dedup(); - } - } - self.abbreviations = offsets - .into_iter() - .map(|offset| { - ( - offset.0.into_u64(), - debug_abbrev.abbreviations(offset).map(Arc::new), - ) - }) - .collect(); - } - - /// Set an entry in the abbreviations cache. - /// - /// This is only required if you want to manually populate the cache. - pub fn set( - &mut self, - offset: DebugAbbrevOffset, - abbreviations: Arc, - ) { - self.abbreviations - .insert(offset.0.into_u64(), Ok(abbreviations)); - } - - /// Parse the abbreviations at the given offset. - /// - /// This uses the cache if possible, but does not update it. - pub fn get( - &self, - debug_abbrev: &DebugAbbrev, - offset: DebugAbbrevOffset, - ) -> Result> { - match self.abbreviations.get(&offset.0.into_u64()) { - Some(entry) => entry.clone(), - None => debug_abbrev.abbreviations(offset).map(Arc::new), - } - } -} - -/// A set of type abbreviations. -/// -/// Construct an `Abbreviations` instance with the -/// [`abbreviations()`](struct.UnitHeader.html#method.abbreviations) -/// method. -#[derive(Debug, Default, Clone)] -pub struct Abbreviations { - vec: Vec, - map: btree_map::BTreeMap, -} - -impl Abbreviations { - /// Construct a new, empty set of abbreviations. - fn empty() -> Abbreviations { - Abbreviations { - vec: Vec::new(), - map: btree_map::BTreeMap::new(), - } - } - - /// Insert an abbreviation into the set. - /// - /// Returns `Ok` if it is the first abbreviation in the set with its code, - /// `Err` if the code is a duplicate and there already exists an - /// abbreviation in the set with the given abbreviation's code. - fn insert(&mut self, abbrev: Abbreviation) -> ::core::result::Result<(), ()> { - let code_usize = abbrev.code as usize; - if code_usize as u64 == abbrev.code { - // Optimize for sequential abbreviation codes by storing them - // in a Vec, as long as the map doesn't already contain them. - // A potential further optimization would be to allow some - // holes in the Vec, but there's no need for that yet. - if code_usize - 1 < self.vec.len() { - return Err(()); - } else if code_usize - 1 == self.vec.len() { - if !self.map.is_empty() && self.map.contains_key(&abbrev.code) { - return Err(()); - } else { - self.vec.push(abbrev); - return Ok(()); - } - } - } - match self.map.entry(abbrev.code) { - btree_map::Entry::Occupied(_) => Err(()), - btree_map::Entry::Vacant(entry) => { - entry.insert(abbrev); - Ok(()) - } - } - } - - /// Get the abbreviation associated with the given code. - #[inline] - pub fn get(&self, code: u64) -> Option<&Abbreviation> { - if let Ok(code) = usize::try_from(code) { - let index = code.checked_sub(1)?; - if index < self.vec.len() { - return Some(&self.vec[index]); - } - } - - self.map.get(&code) - } - - /// Parse a series of abbreviations, terminated by a null abbreviation. - fn parse(input: &mut R) -> Result { - let mut abbrevs = Abbreviations::empty(); - - while let Some(abbrev) = Abbreviation::parse(input)? { - if abbrevs.insert(abbrev).is_err() { - return Err(Error::DuplicateAbbreviationCode); - } - } - - Ok(abbrevs) - } -} - -/// An abbreviation describes the shape of a `DebuggingInformationEntry`'s type: -/// its code, tag type, whether it has children, and its set of attributes. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Abbreviation { - code: u64, - tag: constants::DwTag, - has_children: constants::DwChildren, - attributes: Attributes, -} - -impl Abbreviation { - /// Construct a new `Abbreviation`. - /// - /// ### Panics - /// - /// Panics if `code` is `0`. - pub(crate) fn new( - code: u64, - tag: constants::DwTag, - has_children: constants::DwChildren, - attributes: Attributes, - ) -> Abbreviation { - assert_ne!(code, 0); - Abbreviation { - code, - tag, - has_children, - attributes, - } - } - - /// Get this abbreviation's code. - #[inline] - pub fn code(&self) -> u64 { - self.code - } - - /// Get this abbreviation's tag. - #[inline] - pub fn tag(&self) -> constants::DwTag { - self.tag - } - - /// Return true if this abbreviation's type has children, false otherwise. - #[inline] - pub fn has_children(&self) -> bool { - self.has_children == constants::DW_CHILDREN_yes - } - - /// Get this abbreviation's attributes. - #[inline] - pub fn attributes(&self) -> &[AttributeSpecification] { - &self.attributes[..] - } - - /// Parse an abbreviation's tag. - fn parse_tag(input: &mut R) -> Result { - let val = input.read_uleb128_u16()?; - if val == 0 { - Err(Error::AbbreviationTagZero) - } else { - Ok(constants::DwTag(val)) - } - } - - /// Parse an abbreviation's "does the type have children?" byte. - fn parse_has_children(input: &mut R) -> Result { - let val = input.read_u8()?; - let val = constants::DwChildren(val); - if val == constants::DW_CHILDREN_no || val == constants::DW_CHILDREN_yes { - Ok(val) - } else { - Err(Error::BadHasChildren) - } - } - - /// Parse a series of attribute specifications, terminated by a null attribute - /// specification. - fn parse_attributes(input: &mut R) -> Result { - let mut attrs = Attributes::new(); - - while let Some(attr) = AttributeSpecification::parse(input)? { - attrs.push(attr); - } - - Ok(attrs) - } - - /// Parse an abbreviation. Return `None` for the null abbreviation, `Some` - /// for an actual abbreviation. - fn parse(input: &mut R) -> Result> { - let code = input.read_uleb128()?; - if code == 0 { - return Ok(None); - } - - let tag = Self::parse_tag(input)?; - let has_children = Self::parse_has_children(input)?; - let attributes = Self::parse_attributes(input)?; - let abbrev = Abbreviation::new(code, tag, has_children, attributes); - Ok(Some(abbrev)) - } -} - -/// A list of attributes found in an `Abbreviation` -#[derive(Clone)] -pub(crate) enum Attributes { - Inline { - buf: [AttributeSpecification; MAX_ATTRIBUTES_INLINE], - len: usize, - }, - Heap(Vec), -} - -// Length of 5 based on benchmark results for both x86-64 and i686. -const MAX_ATTRIBUTES_INLINE: usize = 5; - -impl Attributes { - /// Returns a new empty list of attributes - fn new() -> Attributes { - let default = - AttributeSpecification::new(constants::DW_AT_null, constants::DW_FORM_null, None); - Attributes::Inline { - buf: [default; 5], - len: 0, - } - } - - /// Pushes a new value onto this list of attributes. - fn push(&mut self, attr: AttributeSpecification) { - match self { - Attributes::Heap(list) => list.push(attr), - Attributes::Inline { - buf, - len: MAX_ATTRIBUTES_INLINE, - } => { - let mut list = buf.to_vec(); - list.push(attr); - *self = Attributes::Heap(list); - } - Attributes::Inline { buf, len } => { - buf[*len] = attr; - *len += 1; - } - } - } -} - -impl Debug for Attributes { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -impl PartialEq for Attributes { - fn eq(&self, other: &Attributes) -> bool { - **self == **other - } -} - -impl Eq for Attributes {} - -impl Deref for Attributes { - type Target = [AttributeSpecification]; - fn deref(&self) -> &[AttributeSpecification] { - match self { - Attributes::Inline { buf, len } => &buf[..*len], - Attributes::Heap(list) => list, - } - } -} - -impl FromIterator for Attributes { - fn from_iter(iter: I) -> Attributes - where - I: IntoIterator, - { - let mut list = Attributes::new(); - for item in iter { - list.push(item); - } - list - } -} - -impl From> for Attributes { - fn from(list: Vec) -> Attributes { - Attributes::Heap(list) - } -} - -/// The description of an attribute in an abbreviated type. It is a pair of name -/// and form. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct AttributeSpecification { - name: constants::DwAt, - form: constants::DwForm, - implicit_const_value: i64, -} - -impl AttributeSpecification { - /// Construct a new `AttributeSpecification` from the given name and form - /// and implicit const value. - #[inline] - pub fn new( - name: constants::DwAt, - form: constants::DwForm, - implicit_const_value: Option, - ) -> AttributeSpecification { - debug_assert!( - (form == constants::DW_FORM_implicit_const && implicit_const_value.is_some()) - || (form != constants::DW_FORM_implicit_const && implicit_const_value.is_none()) - ); - AttributeSpecification { - name, - form, - implicit_const_value: implicit_const_value.unwrap_or(0), - } - } - - /// Get the attribute's name. - #[inline] - pub fn name(&self) -> constants::DwAt { - self.name - } - - /// Get the attribute's form. - #[inline] - pub fn form(&self) -> constants::DwForm { - self.form - } - - /// Get the attribute's implicit const value. - #[inline] - pub fn implicit_const_value(&self) -> Option { - if self.form == constants::DW_FORM_implicit_const { - Some(self.implicit_const_value) - } else { - None - } - } - - /// Return the size of the attribute, in bytes. - /// - /// Note that because some attributes are variably sized, the size cannot - /// always be known without parsing, in which case we return `None`. - pub fn size(&self, header: &UnitHeader) -> Option { - get_attribute_size(self.form, header.encoding()).map(usize::from) - } - - /// Parse an attribute's form. - fn parse_form(input: &mut R) -> Result { - let val = input.read_uleb128_u16()?; - if val == 0 { - Err(Error::AttributeFormZero) - } else { - Ok(constants::DwForm(val)) - } - } - - /// Parse an attribute specification. Returns `None` for the null attribute - /// specification, `Some` for an actual attribute specification. - fn parse(input: &mut R) -> Result> { - let name = input.read_uleb128_u16()?; - if name == 0 { - // Parse the null attribute specification. - let form = input.read_uleb128_u16()?; - return if form == 0 { - Ok(None) - } else { - Err(Error::ExpectedZero) - }; - } - - let name = constants::DwAt(name); - let form = Self::parse_form(input)?; - let implicit_const_value = if form == constants::DW_FORM_implicit_const { - Some(input.read_sleb128()?) - } else { - None - }; - let spec = AttributeSpecification::new(name, form, implicit_const_value); - Ok(Some(spec)) - } -} - -#[inline] -pub(crate) fn get_attribute_size(form: constants::DwForm, encoding: Encoding) -> Option { - match form { - constants::DW_FORM_addr => Some(encoding.address_size), - - constants::DW_FORM_implicit_const | constants::DW_FORM_flag_present => Some(0), - - constants::DW_FORM_data1 - | constants::DW_FORM_flag - | constants::DW_FORM_strx1 - | constants::DW_FORM_ref1 - | constants::DW_FORM_addrx1 => Some(1), - - constants::DW_FORM_data2 - | constants::DW_FORM_ref2 - | constants::DW_FORM_addrx2 - | constants::DW_FORM_strx2 => Some(2), - - constants::DW_FORM_addrx3 | constants::DW_FORM_strx3 => Some(3), - - constants::DW_FORM_data4 - | constants::DW_FORM_ref_sup4 - | constants::DW_FORM_ref4 - | constants::DW_FORM_strx4 - | constants::DW_FORM_addrx4 => Some(4), - - constants::DW_FORM_data8 - | constants::DW_FORM_ref8 - | constants::DW_FORM_ref_sig8 - | constants::DW_FORM_ref_sup8 => Some(8), - - constants::DW_FORM_data16 => Some(16), - - constants::DW_FORM_sec_offset - | constants::DW_FORM_GNU_ref_alt - | constants::DW_FORM_strp - | constants::DW_FORM_strp_sup - | constants::DW_FORM_GNU_strp_alt - | constants::DW_FORM_line_strp => Some(encoding.format.word_size()), - - constants::DW_FORM_ref_addr => { - // This is an offset, but DWARF version 2 specifies that DW_FORM_ref_addr - // has the same size as an address on the target system. This was changed - // in DWARF version 3. - Some(if encoding.version == 2 { - encoding.address_size - } else { - encoding.format.word_size() - }) - } - - // Variably sized forms. - constants::DW_FORM_block - | constants::DW_FORM_block1 - | constants::DW_FORM_block2 - | constants::DW_FORM_block4 - | constants::DW_FORM_exprloc - | constants::DW_FORM_ref_udata - | constants::DW_FORM_string - | constants::DW_FORM_sdata - | constants::DW_FORM_udata - | constants::DW_FORM_indirect => None, - - // We don't know the size of unknown forms. - _ => None, - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use crate::constants; - use crate::endianity::LittleEndian; - use crate::read::{EndianSlice, Error}; - use crate::test_util::GimliSectionMethods; - #[cfg(target_pointer_width = "32")] - use core::u32; - use test_assembler::Section; - - pub trait AbbrevSectionMethods { - fn abbrev(self, code: u64, tag: constants::DwTag, children: constants::DwChildren) -> Self; - fn abbrev_null(self) -> Self; - fn abbrev_attr(self, name: constants::DwAt, form: constants::DwForm) -> Self; - fn abbrev_attr_implicit_const(self, name: constants::DwAt, value: i64) -> Self; - fn abbrev_attr_null(self) -> Self; - } - - impl AbbrevSectionMethods for Section { - fn abbrev(self, code: u64, tag: constants::DwTag, children: constants::DwChildren) -> Self { - self.uleb(code).uleb(tag.0.into()).D8(children.0) - } - - fn abbrev_null(self) -> Self { - self.D8(0) - } - - fn abbrev_attr(self, name: constants::DwAt, form: constants::DwForm) -> Self { - self.uleb(name.0.into()).uleb(form.0.into()) - } - - fn abbrev_attr_implicit_const(self, name: constants::DwAt, value: i64) -> Self { - self.uleb(name.0.into()) - .uleb(constants::DW_FORM_implicit_const.0.into()) - .sleb(value) - } - - fn abbrev_attr_null(self) -> Self { - self.D8(0).D8(0) - } - } - - #[test] - fn test_debug_abbrev_ok() { - let extra_start = [1, 2, 3, 4]; - let expected_rest = [5, 6, 7, 8]; - #[rustfmt::skip] - let buf = Section::new() - .append_bytes(&extra_start) - .abbrev(2, constants::DW_TAG_subprogram, constants::DW_CHILDREN_no) - .abbrev_attr(constants::DW_AT_name, constants::DW_FORM_string) - .abbrev_attr_null() - .abbrev(1, constants::DW_TAG_compile_unit, constants::DW_CHILDREN_yes) - .abbrev_attr(constants::DW_AT_producer, constants::DW_FORM_strp) - .abbrev_attr(constants::DW_AT_language, constants::DW_FORM_data2) - .abbrev_attr_null() - .abbrev_null() - .append_bytes(&expected_rest) - .get_contents() - .unwrap(); - - let abbrev1 = Abbreviation::new( - 1, - constants::DW_TAG_compile_unit, - constants::DW_CHILDREN_yes, - vec![ - AttributeSpecification::new( - constants::DW_AT_producer, - constants::DW_FORM_strp, - None, - ), - AttributeSpecification::new( - constants::DW_AT_language, - constants::DW_FORM_data2, - None, - ), - ] - .into(), - ); - - let abbrev2 = Abbreviation::new( - 2, - constants::DW_TAG_subprogram, - constants::DW_CHILDREN_no, - vec![AttributeSpecification::new( - constants::DW_AT_name, - constants::DW_FORM_string, - None, - )] - .into(), - ); - - let debug_abbrev = DebugAbbrev::new(&buf, LittleEndian); - let debug_abbrev_offset = DebugAbbrevOffset(extra_start.len()); - let abbrevs = debug_abbrev - .abbreviations(debug_abbrev_offset) - .expect("Should parse abbreviations"); - assert_eq!(abbrevs.get(1), Some(&abbrev1)); - assert_eq!(abbrevs.get(2), Some(&abbrev2)); - } - - #[test] - fn test_abbreviations_insert() { - fn abbrev(code: u16) -> Abbreviation { - Abbreviation::new( - code.into(), - constants::DwTag(code), - constants::DW_CHILDREN_no, - vec![].into(), - ) - } - - fn assert_abbrev(abbrevs: &Abbreviations, code: u16) { - let abbrev = abbrevs.get(code.into()).unwrap(); - assert_eq!(abbrev.tag(), constants::DwTag(code)); - } - - // Sequential insert. - let mut abbrevs = Abbreviations::empty(); - abbrevs.insert(abbrev(1)).unwrap(); - abbrevs.insert(abbrev(2)).unwrap(); - assert_eq!(abbrevs.vec.len(), 2); - assert!(abbrevs.map.is_empty()); - assert_abbrev(&abbrevs, 1); - assert_abbrev(&abbrevs, 2); - - // Out of order insert. - let mut abbrevs = Abbreviations::empty(); - abbrevs.insert(abbrev(2)).unwrap(); - abbrevs.insert(abbrev(3)).unwrap(); - assert!(abbrevs.vec.is_empty()); - assert_abbrev(&abbrevs, 2); - assert_abbrev(&abbrevs, 3); - - // Mixed order insert. - let mut abbrevs = Abbreviations::empty(); - abbrevs.insert(abbrev(1)).unwrap(); - abbrevs.insert(abbrev(3)).unwrap(); - abbrevs.insert(abbrev(2)).unwrap(); - assert_eq!(abbrevs.vec.len(), 2); - assert_abbrev(&abbrevs, 1); - assert_abbrev(&abbrevs, 2); - assert_abbrev(&abbrevs, 3); - - // Duplicate code in vec. - let mut abbrevs = Abbreviations::empty(); - abbrevs.insert(abbrev(1)).unwrap(); - abbrevs.insert(abbrev(2)).unwrap(); - assert_eq!(abbrevs.insert(abbrev(1)), Err(())); - assert_eq!(abbrevs.insert(abbrev(2)), Err(())); - - // Duplicate code in map when adding to map. - let mut abbrevs = Abbreviations::empty(); - abbrevs.insert(abbrev(2)).unwrap(); - assert_eq!(abbrevs.insert(abbrev(2)), Err(())); - - // Duplicate code in map when adding to vec. - let mut abbrevs = Abbreviations::empty(); - abbrevs.insert(abbrev(2)).unwrap(); - abbrevs.insert(abbrev(1)).unwrap(); - assert_eq!(abbrevs.insert(abbrev(2)), Err(())); - - // 32-bit usize conversions. - let mut abbrevs = Abbreviations::empty(); - abbrevs.insert(abbrev(2)).unwrap(); - } - - #[test] - #[cfg(target_pointer_width = "32")] - fn test_abbreviations_insert_32() { - fn abbrev(code: u64) -> Abbreviation { - Abbreviation::new( - code, - constants::DwTag(code as u16), - constants::DW_CHILDREN_no, - vec![].into(), - ) - } - - fn assert_abbrev(abbrevs: &Abbreviations, code: u64) { - let abbrev = abbrevs.get(code).unwrap(); - assert_eq!(abbrev.tag(), constants::DwTag(code as u16)); - } - - let mut abbrevs = Abbreviations::empty(); - abbrevs.insert(abbrev(1)).unwrap(); - - let wrap_code = (u32::MAX as u64 + 1) + 1; - // `get` should not treat the wrapped code as `1`. - assert_eq!(abbrevs.get(wrap_code), None); - // `insert` should not treat the wrapped code as `1`. - abbrevs.insert(abbrev(wrap_code)).unwrap(); - assert_abbrev(&abbrevs, 1); - assert_abbrev(&abbrevs, wrap_code); - } - - #[test] - fn test_parse_abbreviations_ok() { - let expected_rest = [1, 2, 3, 4]; - #[rustfmt::skip] - let buf = Section::new() - .abbrev(2, constants::DW_TAG_subprogram, constants::DW_CHILDREN_no) - .abbrev_attr(constants::DW_AT_name, constants::DW_FORM_string) - .abbrev_attr_null() - .abbrev(1, constants::DW_TAG_compile_unit, constants::DW_CHILDREN_yes) - .abbrev_attr(constants::DW_AT_producer, constants::DW_FORM_strp) - .abbrev_attr(constants::DW_AT_language, constants::DW_FORM_data2) - .abbrev_attr_null() - .abbrev_null() - .append_bytes(&expected_rest) - .get_contents() - .unwrap(); - let rest = &mut EndianSlice::new(&*buf, LittleEndian); - - let abbrev1 = Abbreviation::new( - 1, - constants::DW_TAG_compile_unit, - constants::DW_CHILDREN_yes, - vec![ - AttributeSpecification::new( - constants::DW_AT_producer, - constants::DW_FORM_strp, - None, - ), - AttributeSpecification::new( - constants::DW_AT_language, - constants::DW_FORM_data2, - None, - ), - ] - .into(), - ); - - let abbrev2 = Abbreviation::new( - 2, - constants::DW_TAG_subprogram, - constants::DW_CHILDREN_no, - vec![AttributeSpecification::new( - constants::DW_AT_name, - constants::DW_FORM_string, - None, - )] - .into(), - ); - - let abbrevs = Abbreviations::parse(rest).expect("Should parse abbreviations"); - assert_eq!(abbrevs.get(1), Some(&abbrev1)); - assert_eq!(abbrevs.get(2), Some(&abbrev2)); - assert_eq!(*rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_abbreviations_duplicate() { - let expected_rest = [1, 2, 3, 4]; - #[rustfmt::skip] - let buf = Section::new() - .abbrev(1, constants::DW_TAG_subprogram, constants::DW_CHILDREN_no) - .abbrev_attr(constants::DW_AT_name, constants::DW_FORM_string) - .abbrev_attr_null() - .abbrev(1, constants::DW_TAG_compile_unit, constants::DW_CHILDREN_yes) - .abbrev_attr(constants::DW_AT_producer, constants::DW_FORM_strp) - .abbrev_attr(constants::DW_AT_language, constants::DW_FORM_data2) - .abbrev_attr_null() - .abbrev_null() - .append_bytes(&expected_rest) - .get_contents() - .unwrap(); - let buf = &mut EndianSlice::new(&*buf, LittleEndian); - - match Abbreviations::parse(buf) { - Err(Error::DuplicateAbbreviationCode) => {} - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_abbreviation_tag_ok() { - let buf = [0x01, 0x02]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - let tag = Abbreviation::parse_tag(rest).expect("Should parse tag"); - assert_eq!(tag, constants::DW_TAG_array_type); - assert_eq!(*rest, EndianSlice::new(&buf[1..], LittleEndian)); - } - - #[test] - fn test_parse_abbreviation_tag_zero() { - let buf = [0x00]; - let buf = &mut EndianSlice::new(&buf, LittleEndian); - match Abbreviation::parse_tag(buf) { - Err(Error::AbbreviationTagZero) => {} - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_abbreviation_has_children() { - let buf = [0x00, 0x01, 0x02]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - let val = Abbreviation::parse_has_children(rest).expect("Should parse children"); - assert_eq!(val, constants::DW_CHILDREN_no); - let val = Abbreviation::parse_has_children(rest).expect("Should parse children"); - assert_eq!(val, constants::DW_CHILDREN_yes); - match Abbreviation::parse_has_children(rest) { - Err(Error::BadHasChildren) => {} - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_abbreviation_ok() { - let expected_rest = [0x01, 0x02, 0x03, 0x04]; - let buf = Section::new() - .abbrev(1, constants::DW_TAG_subprogram, constants::DW_CHILDREN_no) - .abbrev_attr(constants::DW_AT_name, constants::DW_FORM_string) - .abbrev_attr_null() - .append_bytes(&expected_rest) - .get_contents() - .unwrap(); - let rest = &mut EndianSlice::new(&*buf, LittleEndian); - - let expect = Some(Abbreviation::new( - 1, - constants::DW_TAG_subprogram, - constants::DW_CHILDREN_no, - vec![AttributeSpecification::new( - constants::DW_AT_name, - constants::DW_FORM_string, - None, - )] - .into(), - )); - - let abbrev = Abbreviation::parse(rest).expect("Should parse abbreviation"); - assert_eq!(abbrev, expect); - assert_eq!(*rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_abbreviation_implicit_const_ok() { - let expected_rest = [0x01, 0x02, 0x03, 0x04]; - let buf = Section::new() - .abbrev(1, constants::DW_TAG_subprogram, constants::DW_CHILDREN_no) - .abbrev_attr_implicit_const(constants::DW_AT_name, -42) - .abbrev_attr_null() - .append_bytes(&expected_rest) - .get_contents() - .unwrap(); - let rest = &mut EndianSlice::new(&*buf, LittleEndian); - - let expect = Some(Abbreviation::new( - 1, - constants::DW_TAG_subprogram, - constants::DW_CHILDREN_no, - vec![AttributeSpecification::new( - constants::DW_AT_name, - constants::DW_FORM_implicit_const, - Some(-42), - )] - .into(), - )); - - let abbrev = Abbreviation::parse(rest).expect("Should parse abbreviation"); - assert_eq!(abbrev, expect); - assert_eq!(*rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_abbreviation_implicit_const_no_const() { - let buf = Section::new() - .abbrev(1, constants::DW_TAG_subprogram, constants::DW_CHILDREN_no) - .abbrev_attr(constants::DW_AT_name, constants::DW_FORM_implicit_const) - .get_contents() - .unwrap(); - let buf = &mut EndianSlice::new(&*buf, LittleEndian); - - match Abbreviation::parse(buf) { - Err(Error::UnexpectedEof(_)) => {} - otherwise => panic!("Unexpected result: {:?}", otherwise), - } - } - - #[test] - fn test_parse_null_abbreviation_ok() { - let expected_rest = [0x01, 0x02, 0x03, 0x04]; - let buf = Section::new() - .abbrev_null() - .append_bytes(&expected_rest) - .get_contents() - .unwrap(); - let rest = &mut EndianSlice::new(&*buf, LittleEndian); - - let abbrev = Abbreviation::parse(rest).expect("Should parse null abbreviation"); - assert!(abbrev.is_none()); - assert_eq!(*rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_attribute_form_ok() { - let buf = [0x01, 0x02]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - let tag = AttributeSpecification::parse_form(rest).expect("Should parse form"); - assert_eq!(tag, constants::DW_FORM_addr); - assert_eq!(*rest, EndianSlice::new(&buf[1..], LittleEndian)); - } - - #[test] - fn test_parse_attribute_form_zero() { - let buf = [0x00]; - let buf = &mut EndianSlice::new(&buf, LittleEndian); - match AttributeSpecification::parse_form(buf) { - Err(Error::AttributeFormZero) => {} - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_null_attribute_specification_ok() { - let buf = [0x00, 0x00, 0x01]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - let attr = - AttributeSpecification::parse(rest).expect("Should parse null attribute specification"); - assert!(attr.is_none()); - assert_eq!(*rest, EndianSlice::new(&buf[2..], LittleEndian)); - } - - #[test] - fn test_parse_attribute_specifications_name_zero() { - let buf = [0x00, 0x01, 0x00, 0x00]; - let buf = &mut EndianSlice::new(&buf, LittleEndian); - match AttributeSpecification::parse(buf) { - Err(Error::ExpectedZero) => {} - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_attribute_specifications_form_zero() { - let buf = [0x01, 0x00, 0x00, 0x00]; - let buf = &mut EndianSlice::new(&buf, LittleEndian); - match AttributeSpecification::parse(buf) { - Err(Error::AttributeFormZero) => {} - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_get_abbrev_zero() { - let mut abbrevs = Abbreviations::empty(); - abbrevs - .insert(Abbreviation::new( - 1, - constants::DwTag(1), - constants::DW_CHILDREN_no, - vec![].into(), - )) - .unwrap(); - assert!(abbrevs.get(0).is_none()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/addr.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/addr.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/addr.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/addr.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,128 +0,0 @@ -use crate::common::{DebugAddrBase, DebugAddrIndex, SectionId}; -use crate::read::{Reader, ReaderOffset, Result, Section}; - -/// The raw contents of the `.debug_addr` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugAddr { - section: R, -} - -impl DebugAddr { - // TODO: add an iterator over the sets of addresses in the section. - // This is not needed for common usage of the section though. - - /// Returns the address at the given `base` and `index`. - /// - /// A set of addresses in the `.debug_addr` section consists of a header - /// followed by a series of addresses. - /// - /// The `base` must be the `DW_AT_addr_base` value from the compilation unit DIE. - /// This is an offset that points to the first address following the header. - /// - /// The `index` is the value of a `DW_FORM_addrx` attribute. - /// - /// The `address_size` must be the size of the address for the compilation unit. - /// This value must also match the header. However, note that we do not parse the - /// header to validate this, since locating the header is unreliable, and the GNU - /// extensions do not emit it. - pub fn get_address( - &self, - address_size: u8, - base: DebugAddrBase, - index: DebugAddrIndex, - ) -> Result { - let input = &mut self.section.clone(); - input.skip(base.0)?; - input.skip(R::Offset::from_u64( - index.0.into_u64() * u64::from(address_size), - )?)?; - input.read_address(address_size) - } -} - -impl DebugAddr { - /// Create a `DebugAddr` section that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// ```rust,no_run - /// # let load_section = || unimplemented!(); - /// // Read the DWARF section into a `Vec` with whatever object loader you're using. - /// let owned_section: gimli::DebugAddr> = load_section(); - /// // Create a reference to the DWARF section. - /// let section = owned_section.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> DebugAddr - where - F: FnMut(&'a T) -> R, - { - borrow(&self.section).into() - } -} - -impl Section for DebugAddr { - fn id() -> SectionId { - SectionId::DebugAddr - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for DebugAddr { - fn from(section: R) -> Self { - DebugAddr { section } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::read::EndianSlice; - use crate::test_util::GimliSectionMethods; - use crate::{Format, LittleEndian}; - use test_assembler::{Endian, Label, LabelMaker, Section}; - - #[test] - fn test_get_address() { - for format in vec![Format::Dwarf32, Format::Dwarf64] { - for address_size in vec![4, 8] { - let zero = Label::new(); - let length = Label::new(); - let start = Label::new(); - let first = Label::new(); - let end = Label::new(); - let mut section = Section::with_endian(Endian::Little) - .mark(&zero) - .initial_length(format, &length, &start) - .D16(5) - .D8(address_size) - .D8(0) - .mark(&first); - for i in 0..20 { - section = section.word(address_size, 1000 + i); - } - section = section.mark(&end); - length.set_const((&end - &start) as u64); - - let section = section.get_contents().unwrap(); - let debug_addr = DebugAddr::from(EndianSlice::new(§ion, LittleEndian)); - let base = DebugAddrBase((&first - &zero) as usize); - - assert_eq!( - debug_addr.get_address(address_size, base, DebugAddrIndex(0)), - Ok(1000) - ); - assert_eq!( - debug_addr.get_address(address_size, base, DebugAddrIndex(19)), - Ok(1019) - ); - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/aranges.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/aranges.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/aranges.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/aranges.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,660 +0,0 @@ -use crate::common::{DebugArangesOffset, DebugInfoOffset, Encoding, SectionId}; -use crate::endianity::Endianity; -use crate::read::{EndianSlice, Error, Range, Reader, ReaderOffset, Result, Section}; - -/// The `DebugAranges` struct represents the DWARF address range information -/// found in the `.debug_aranges` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugAranges { - section: R, -} - -impl<'input, Endian> DebugAranges> -where - Endian: Endianity, -{ - /// Construct a new `DebugAranges` instance from the data in the `.debug_aranges` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_aranges` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugAranges, LittleEndian}; - /// - /// # let buf = []; - /// # let read_debug_aranges_section = || &buf; - /// let debug_aranges = - /// DebugAranges::new(read_debug_aranges_section(), LittleEndian); - /// ``` - pub fn new(section: &'input [u8], endian: Endian) -> Self { - DebugAranges { - section: EndianSlice::new(section, endian), - } - } -} - -impl DebugAranges { - /// Iterate the sets of entries in the `.debug_aranges` section. - /// - /// Each set of entries belongs to a single unit. - pub fn headers(&self) -> ArangeHeaderIter { - ArangeHeaderIter { - input: self.section.clone(), - offset: DebugArangesOffset(R::Offset::from_u8(0)), - } - } - - /// Get the header at the given offset. - pub fn header(&self, offset: DebugArangesOffset) -> Result> { - let mut input = self.section.clone(); - input.skip(offset.0)?; - ArangeHeader::parse(&mut input, offset) - } -} - -impl DebugAranges { - /// Create a `DebugAranges` section that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// ```rust,no_run - /// # let load_section = || unimplemented!(); - /// // Read the DWARF section into a `Vec` with whatever object loader you're using. - /// let owned_section: gimli::DebugAranges> = load_section(); - /// // Create a reference to the DWARF section. - /// let section = owned_section.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> DebugAranges - where - F: FnMut(&'a T) -> R, - { - borrow(&self.section).into() - } -} - -impl Section for DebugAranges { - fn id() -> SectionId { - SectionId::DebugAranges - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for DebugAranges { - fn from(section: R) -> Self { - DebugAranges { section } - } -} - -/// An iterator over the headers of a `.debug_aranges` section. -#[derive(Clone, Debug)] -pub struct ArangeHeaderIter { - input: R, - offset: DebugArangesOffset, -} - -impl ArangeHeaderIter { - /// Advance the iterator to the next header. - pub fn next(&mut self) -> Result>> { - if self.input.is_empty() { - return Ok(None); - } - - let len = self.input.len(); - match ArangeHeader::parse(&mut self.input, self.offset) { - Ok(header) => { - self.offset.0 += len - self.input.len(); - Ok(Some(header)) - } - Err(e) => { - self.input.empty(); - Err(e) - } - } - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for ArangeHeaderIter { - type Item = ArangeHeader; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - ArangeHeaderIter::next(self) - } -} - -/// A header for a set of entries in the `.debug_arange` section. -/// -/// These entries all belong to a single unit. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ArangeHeader::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - offset: DebugArangesOffset, - encoding: Encoding, - length: Offset, - debug_info_offset: DebugInfoOffset, - segment_size: u8, - entries: R, -} - -impl ArangeHeader -where - R: Reader, - Offset: ReaderOffset, -{ - fn parse(input: &mut R, offset: DebugArangesOffset) -> Result { - let (length, format) = input.read_initial_length()?; - let mut rest = input.split(length)?; - - // Check the version. The DWARF 5 spec says that this is always 2, but version 3 - // has been observed in the wild, potentially due to a bug; see - // https://github.com/gimli-rs/gimli/issues/559 for more information. - // lldb allows versions 2 through 5, possibly by mistake. - let version = rest.read_u16()?; - if version != 2 && version != 3 { - return Err(Error::UnknownVersion(u64::from(version))); - } - - let debug_info_offset = rest.read_offset(format).map(DebugInfoOffset)?; - let address_size = rest.read_u8()?; - let segment_size = rest.read_u8()?; - - // unit_length + version + offset + address_size + segment_size - let header_length = format.initial_length_size() + 2 + format.word_size() + 1 + 1; - - // The first tuple following the header in each set begins at an offset that is - // a multiple of the size of a single tuple (that is, the size of a segment selector - // plus twice the size of an address). - let tuple_length = address_size - .checked_mul(2) - .and_then(|x| x.checked_add(segment_size)) - .ok_or(Error::InvalidAddressRange)?; - if tuple_length == 0 { - return Err(Error::InvalidAddressRange)?; - } - let padding = if header_length % tuple_length == 0 { - 0 - } else { - tuple_length - header_length % tuple_length - }; - rest.skip(R::Offset::from_u8(padding))?; - - let encoding = Encoding { - format, - version, - address_size, - // TODO: segment_size - }; - Ok(ArangeHeader { - offset, - encoding, - length, - debug_info_offset, - segment_size, - entries: rest, - }) - } - - /// Return the offset of this header within the `.debug_aranges` section. - #[inline] - pub fn offset(&self) -> DebugArangesOffset { - self.offset - } - - /// Return the length of this set of entries, including the header. - #[inline] - pub fn length(&self) -> Offset { - self.length - } - - /// Return the encoding parameters for this set of entries. - #[inline] - pub fn encoding(&self) -> Encoding { - self.encoding - } - - /// Return the segment size for this set of entries. - #[inline] - pub fn segment_size(&self) -> u8 { - self.segment_size - } - - /// Return the offset into the .debug_info section for this set of arange entries. - #[inline] - pub fn debug_info_offset(&self) -> DebugInfoOffset { - self.debug_info_offset - } - - /// Return the arange entries in this set. - #[inline] - pub fn entries(&self) -> ArangeEntryIter { - ArangeEntryIter { - input: self.entries.clone(), - encoding: self.encoding, - segment_size: self.segment_size, - } - } -} - -/// An iterator over the aranges from a `.debug_aranges` section. -/// -/// Can be [used with -/// `FallibleIterator`](./index.html#using-with-fallibleiterator). -#[derive(Debug, Clone)] -pub struct ArangeEntryIter { - input: R, - encoding: Encoding, - segment_size: u8, -} - -impl ArangeEntryIter { - /// Advance the iterator and return the next arange. - /// - /// Returns the newly parsed arange as `Ok(Some(arange))`. Returns `Ok(None)` - /// when iteration is complete and all aranges have already been parsed and - /// yielded. If an error occurs while parsing the next arange, then this error - /// is returned as `Err(e)`, and all subsequent calls return `Ok(None)`. - pub fn next(&mut self) -> Result> { - if self.input.is_empty() { - return Ok(None); - } - - match ArangeEntry::parse(&mut self.input, self.encoding, self.segment_size) { - Ok(Some(entry)) => Ok(Some(entry)), - Ok(None) => { - self.input.empty(); - Ok(None) - } - Err(e) => { - self.input.empty(); - Err(e) - } - } - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for ArangeEntryIter { - type Item = ArangeEntry; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - ArangeEntryIter::next(self) - } -} - -/// A single parsed arange. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct ArangeEntry { - segment: Option, - address: u64, - length: u64, -} - -impl ArangeEntry { - /// Parse a single arange. Return `None` for the null arange, `Some` for an actual arange. - fn parse( - input: &mut R, - encoding: Encoding, - segment_size: u8, - ) -> Result> { - let address_size = encoding.address_size; - - let tuple_length = R::Offset::from_u8(2 * address_size + segment_size); - if tuple_length > input.len() { - input.empty(); - return Ok(None); - } - - let segment = if segment_size != 0 { - input.read_address(segment_size)? - } else { - 0 - }; - let address = input.read_address(address_size)?; - let length = input.read_address(address_size)?; - - match (segment, address, length) { - // This is meant to be a null terminator, but in practice it can occur - // before the end, possibly due to a linker omitting a function and - // leaving an unrelocated entry. - (0, 0, 0) => Self::parse(input, encoding, segment_size), - _ => Ok(Some(ArangeEntry { - segment: if segment_size != 0 { - Some(segment) - } else { - None - }, - address, - length, - })), - } - } - - /// Return the segment selector of this arange. - #[inline] - pub fn segment(&self) -> Option { - self.segment - } - - /// Return the beginning address of this arange. - #[inline] - pub fn address(&self) -> u64 { - self.address - } - - /// Return the length of this arange. - #[inline] - pub fn length(&self) -> u64 { - self.length - } - - /// Return the range. - #[inline] - pub fn range(&self) -> Range { - Range { - begin: self.address, - end: self.address.wrapping_add(self.length), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::common::{DebugInfoOffset, Format}; - use crate::endianity::LittleEndian; - use crate::read::EndianSlice; - - #[test] - fn test_iterate_headers() { - #[rustfmt::skip] - let buf = [ - // 32-bit length = 28. - 0x1c, 0x00, 0x00, 0x00, - // Version. - 0x02, 0x00, - // Offset. - 0x01, 0x02, 0x03, 0x04, - // Address size. - 0x04, - // Segment size. - 0x00, - // Dummy padding and arange tuples. - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - - // 32-bit length = 36. - 0x24, 0x00, 0x00, 0x00, - // Version. - 0x02, 0x00, - // Offset. - 0x11, 0x12, 0x13, 0x14, - // Address size. - 0x04, - // Segment size. - 0x00, - // Dummy padding and arange tuples. - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - ]; - - let debug_aranges = DebugAranges::new(&buf, LittleEndian); - let mut headers = debug_aranges.headers(); - - let header = headers - .next() - .expect("should parse header ok") - .expect("should have a header"); - assert_eq!(header.offset(), DebugArangesOffset(0)); - assert_eq!(header.debug_info_offset(), DebugInfoOffset(0x0403_0201)); - - let header = headers - .next() - .expect("should parse header ok") - .expect("should have a header"); - assert_eq!(header.offset(), DebugArangesOffset(0x20)); - assert_eq!(header.debug_info_offset(), DebugInfoOffset(0x1413_1211)); - } - - #[test] - fn test_parse_header_ok() { - #[rustfmt::skip] - let buf = [ - // 32-bit length = 32. - 0x20, 0x00, 0x00, 0x00, - // Version. - 0x02, 0x00, - // Offset. - 0x01, 0x02, 0x03, 0x04, - // Address size. - 0x08, - // Segment size. - 0x04, - // Length to here = 12, tuple length = 20. - // Padding to tuple length multiple = 4. - 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - - // Dummy arange tuple data. - 0x20, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - - // Dummy next arange. - 0x30, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - ]; - - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - let header = - ArangeHeader::parse(rest, DebugArangesOffset(0x10)).expect("should parse header ok"); - - assert_eq!( - *rest, - EndianSlice::new(&buf[buf.len() - 16..], LittleEndian) - ); - assert_eq!( - header, - ArangeHeader { - offset: DebugArangesOffset(0x10), - encoding: Encoding { - format: Format::Dwarf32, - version: 2, - address_size: 8, - }, - length: 0x20, - debug_info_offset: DebugInfoOffset(0x0403_0201), - segment_size: 4, - entries: EndianSlice::new(&buf[buf.len() - 32..buf.len() - 16], LittleEndian), - } - ); - } - - #[test] - fn test_parse_header_overflow_error() { - #[rustfmt::skip] - let buf = [ - // 32-bit length = 32. - 0x20, 0x00, 0x00, 0x00, - // Version. - 0x02, 0x00, - // Offset. - 0x01, 0x02, 0x03, 0x04, - // Address size. - 0xff, - // Segment size. - 0xff, - // Length to here = 12, tuple length = 20. - // Padding to tuple length multiple = 4. - 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - - // Dummy arange tuple data. - 0x20, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - - // Dummy next arange. - 0x30, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - ]; - - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - let error = ArangeHeader::parse(rest, DebugArangesOffset(0x10)) - .expect_err("should fail to parse header"); - assert_eq!(error, Error::InvalidAddressRange); - } - - #[test] - fn test_parse_header_div_by_zero_error() { - #[rustfmt::skip] - let buf = [ - // 32-bit length = 32. - 0x20, 0x00, 0x00, 0x00, - // Version. - 0x02, 0x00, - // Offset. - 0x01, 0x02, 0x03, 0x04, - // Address size = 0. Could cause a division by zero if we aren't - // careful. - 0x00, - // Segment size. - 0x00, - // Length to here = 12, tuple length = 20. - // Padding to tuple length multiple = 4. - 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - - // Dummy arange tuple data. - 0x20, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - - // Dummy next arange. - 0x30, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - ]; - - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - let error = ArangeHeader::parse(rest, DebugArangesOffset(0x10)) - .expect_err("should fail to parse header"); - assert_eq!(error, Error::InvalidAddressRange); - } - - #[test] - fn test_parse_entry_ok() { - let encoding = Encoding { - format: Format::Dwarf32, - version: 2, - address_size: 4, - }; - let segment_size = 0; - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - let entry = - ArangeEntry::parse(rest, encoding, segment_size).expect("should parse entry ok"); - assert_eq!(*rest, EndianSlice::new(&buf[buf.len() - 1..], LittleEndian)); - assert_eq!( - entry, - Some(ArangeEntry { - segment: None, - address: 0x0403_0201, - length: 0x0807_0605, - }) - ); - } - - #[test] - fn test_parse_entry_segment() { - let encoding = Encoding { - format: Format::Dwarf32, - version: 2, - address_size: 4, - }; - let segment_size = 8; - #[rustfmt::skip] - let buf = [ - // Segment. - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - // Address. - 0x01, 0x02, 0x03, 0x04, - // Length. - 0x05, 0x06, 0x07, 0x08, - // Next tuple. - 0x09 - ]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - let entry = - ArangeEntry::parse(rest, encoding, segment_size).expect("should parse entry ok"); - assert_eq!(*rest, EndianSlice::new(&buf[buf.len() - 1..], LittleEndian)); - assert_eq!( - entry, - Some(ArangeEntry { - segment: Some(0x1817_1615_1413_1211), - address: 0x0403_0201, - length: 0x0807_0605, - }) - ); - } - - #[test] - fn test_parse_entry_zero() { - let encoding = Encoding { - format: Format::Dwarf32, - version: 2, - address_size: 4, - }; - let segment_size = 0; - #[rustfmt::skip] - let buf = [ - // Zero tuple. - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // Address. - 0x01, 0x02, 0x03, 0x04, - // Length. - 0x05, 0x06, 0x07, 0x08, - // Next tuple. - 0x09 - ]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - let entry = - ArangeEntry::parse(rest, encoding, segment_size).expect("should parse entry ok"); - assert_eq!(*rest, EndianSlice::new(&buf[buf.len() - 1..], LittleEndian)); - assert_eq!( - entry, - Some(ArangeEntry { - segment: None, - address: 0x0403_0201, - length: 0x0807_0605, - }) - ); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/cfi.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/cfi.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/cfi.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/cfi.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,7823 +0,0 @@ -#[cfg(feature = "read")] -use alloc::boxed::Box; - -use core::cmp::{Ord, Ordering}; -use core::fmt::{self, Debug}; -use core::iter::FromIterator; -use core::mem; -use core::num::Wrapping; - -use super::util::{ArrayLike, ArrayVec}; -use crate::common::{ - DebugFrameOffset, EhFrameOffset, Encoding, Format, Register, SectionId, Vendor, -}; -use crate::constants::{self, DwEhPe}; -use crate::endianity::Endianity; -use crate::read::{ - EndianSlice, Error, Expression, Reader, ReaderOffset, Result, Section, StoreOnHeap, -}; - -/// `DebugFrame` contains the `.debug_frame` section's frame unwinding -/// information required to unwind to and recover registers from older frames on -/// the stack. For example, this is useful for a debugger that wants to print -/// locals in a backtrace. -/// -/// Most interesting methods are defined in the -/// [`UnwindSection`](trait.UnwindSection.html) trait. -/// -/// ### Differences between `.debug_frame` and `.eh_frame` -/// -/// While the `.debug_frame` section's information has a lot of overlap with the -/// `.eh_frame` section's information, the `.eh_frame` information tends to only -/// encode the subset of information needed for exception handling. Often, only -/// one of `.eh_frame` or `.debug_frame` will be present in an object file. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct DebugFrame { - section: R, - address_size: u8, - segment_size: u8, - vendor: Vendor, -} - -impl DebugFrame { - /// Set the size of a target address in bytes. - /// - /// This defaults to the native word size. - /// This is only used if the CIE version is less than 4. - pub fn set_address_size(&mut self, address_size: u8) { - self.address_size = address_size - } - - /// Set the size of a segment selector in bytes. - /// - /// This defaults to 0. - /// This is only used if the CIE version is less than 4. - pub fn set_segment_size(&mut self, segment_size: u8) { - self.segment_size = segment_size - } - - /// Set the vendor extensions to use. - /// - /// This defaults to `Vendor::Default`. - pub fn set_vendor(&mut self, vendor: Vendor) { - self.vendor = vendor; - } -} - -impl<'input, Endian> DebugFrame> -where - Endian: Endianity, -{ - /// Construct a new `DebugFrame` instance from the data in the - /// `.debug_frame` section. - /// - /// It is the caller's responsibility to read the section and present it as - /// a `&[u8]` slice. That means using some ELF loader on Linux, a Mach-O - /// loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugFrame, NativeEndian}; - /// - /// // Use with `.debug_frame` - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_debug_frame_section_somehow = || &buf; - /// let debug_frame = DebugFrame::new(read_debug_frame_section_somehow(), NativeEndian); - /// ``` - pub fn new(section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(section, endian)) - } -} - -impl Section for DebugFrame { - fn id() -> SectionId { - SectionId::DebugFrame - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for DebugFrame { - fn from(section: R) -> Self { - // Default to no segments and native word size. - DebugFrame { - section, - address_size: mem::size_of::() as u8, - segment_size: 0, - vendor: Vendor::Default, - } - } -} - -/// `EhFrameHdr` contains the information about the `.eh_frame_hdr` section. -/// -/// A pointer to the start of the `.eh_frame` data, and optionally, a binary -/// search table of pointers to the `.eh_frame` records that are found in this section. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct EhFrameHdr(R); - -/// `ParsedEhFrameHdr` contains the parsed information from the `.eh_frame_hdr` section. -#[derive(Clone, Debug)] -pub struct ParsedEhFrameHdr { - address_size: u8, - section: R, - - eh_frame_ptr: Pointer, - fde_count: u64, - table_enc: DwEhPe, - table: R, -} - -impl<'input, Endian> EhFrameHdr> -where - Endian: Endianity, -{ - /// Constructs a new `EhFrameHdr` instance from the data in the `.eh_frame_hdr` section. - pub fn new(section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(section, endian)) - } -} - -impl EhFrameHdr { - /// Parses this `EhFrameHdr` to a `ParsedEhFrameHdr`. - pub fn parse(&self, bases: &BaseAddresses, address_size: u8) -> Result> { - let mut reader = self.0.clone(); - let version = reader.read_u8()?; - if version != 1 { - return Err(Error::UnknownVersion(u64::from(version))); - } - - let eh_frame_ptr_enc = parse_pointer_encoding(&mut reader)?; - let fde_count_enc = parse_pointer_encoding(&mut reader)?; - let table_enc = parse_pointer_encoding(&mut reader)?; - - let parameters = PointerEncodingParameters { - bases: &bases.eh_frame_hdr, - func_base: None, - address_size, - section: &self.0, - }; - - // Omitting this pointer is not valid (defeats the purpose of .eh_frame_hdr entirely) - if eh_frame_ptr_enc == constants::DW_EH_PE_omit { - return Err(Error::CannotParseOmitPointerEncoding); - } - let eh_frame_ptr = parse_encoded_pointer(eh_frame_ptr_enc, ¶meters, &mut reader)?; - - let fde_count; - if fde_count_enc == constants::DW_EH_PE_omit || table_enc == constants::DW_EH_PE_omit { - fde_count = 0 - } else { - fde_count = parse_encoded_pointer(fde_count_enc, ¶meters, &mut reader)?.direct()?; - } - - Ok(ParsedEhFrameHdr { - address_size, - section: self.0.clone(), - - eh_frame_ptr, - fde_count, - table_enc, - table: reader, - }) - } -} - -impl Section for EhFrameHdr { - fn id() -> SectionId { - SectionId::EhFrameHdr - } - - fn reader(&self) -> &R { - &self.0 - } -} - -impl From for EhFrameHdr { - fn from(section: R) -> Self { - EhFrameHdr(section) - } -} - -impl ParsedEhFrameHdr { - /// Returns the address of the binary's `.eh_frame` section. - pub fn eh_frame_ptr(&self) -> Pointer { - self.eh_frame_ptr - } - - /// Retrieves the CFI binary search table, if there is one. - pub fn table(&self) -> Option> { - // There are two big edge cases here: - // * You search the table for an invalid address. As this is just a binary - // search table, we always have to return a valid result for that (unless - // you specify an address that is lower than the first address in the - // table). Since this means that you have to recheck that the FDE contains - // your address anyways, we just return the first FDE even when the address - // is too low. After all, we're just doing a normal binary search. - // * This falls apart when the table is empty - there is no entry we could - // return. We conclude that an empty table is not really a table at all. - if self.fde_count == 0 { - None - } else { - Some(EhHdrTable { hdr: self }) - } - } -} - -/// An iterator for `.eh_frame_hdr` section's binary search table. -/// -/// Each table entry consists of a tuple containing an `initial_location` and `address`. -/// The `initial location` represents the first address that the targeted FDE -/// is able to decode. The `address` is the address of the FDE in the `.eh_frame` section. -/// The `address` can be converted with `EhHdrTable::pointer_to_offset` and `EhFrame::fde_from_offset` to an FDE. -#[derive(Debug)] -pub struct EhHdrTableIter<'a, 'bases, R: Reader> { - hdr: &'a ParsedEhFrameHdr, - table: R, - bases: &'bases BaseAddresses, - remain: u64, -} - -impl<'a, 'bases, R: Reader> EhHdrTableIter<'a, 'bases, R> { - /// Yield the next entry in the `EhHdrTableIter`. - pub fn next(&mut self) -> Result> { - if self.remain == 0 { - return Ok(None); - } - - let parameters = PointerEncodingParameters { - bases: &self.bases.eh_frame_hdr, - func_base: None, - address_size: self.hdr.address_size, - section: &self.hdr.section, - }; - - self.remain -= 1; - let from = parse_encoded_pointer(self.hdr.table_enc, ¶meters, &mut self.table)?; - let to = parse_encoded_pointer(self.hdr.table_enc, ¶meters, &mut self.table)?; - Ok(Some((from, to))) - } - /// Yield the nth entry in the `EhHdrTableIter` - pub fn nth(&mut self, n: usize) -> Result> { - use core::convert::TryFrom; - let size = match self.hdr.table_enc.format() { - constants::DW_EH_PE_uleb128 | constants::DW_EH_PE_sleb128 => { - return Err(Error::VariableLengthSearchTable); - } - constants::DW_EH_PE_sdata2 | constants::DW_EH_PE_udata2 => 2, - constants::DW_EH_PE_sdata4 | constants::DW_EH_PE_udata4 => 4, - constants::DW_EH_PE_sdata8 | constants::DW_EH_PE_udata8 => 8, - _ => return Err(Error::UnknownPointerEncoding), - }; - - let row_size = size * 2; - let n = u64::try_from(n).map_err(|_| Error::UnsupportedOffset)?; - self.remain = self.remain.saturating_sub(n); - self.table.skip(R::Offset::from_u64(n * row_size)?)?; - self.next() - } -} - -#[cfg(feature = "fallible-iterator")] -impl<'a, 'bases, R: Reader> fallible_iterator::FallibleIterator for EhHdrTableIter<'a, 'bases, R> { - type Item = (Pointer, Pointer); - type Error = Error; - fn next(&mut self) -> Result> { - EhHdrTableIter::next(self) - } - - fn size_hint(&self) -> (usize, Option) { - use core::convert::TryInto; - ( - self.remain.try_into().unwrap_or(0), - self.remain.try_into().ok(), - ) - } - - fn nth(&mut self, n: usize) -> Result> { - EhHdrTableIter::nth(self, n) - } -} - -/// The CFI binary search table that is an optional part of the `.eh_frame_hdr` section. -#[derive(Debug, Clone)] -pub struct EhHdrTable<'a, R: Reader> { - hdr: &'a ParsedEhFrameHdr, -} - -impl<'a, R: Reader + 'a> EhHdrTable<'a, R> { - /// Return an iterator that can walk the `.eh_frame_hdr` table. - /// - /// Each table entry consists of a tuple containing an `initial_location` and `address`. - /// The `initial location` represents the first address that the targeted FDE - /// is able to decode. The `address` is the address of the FDE in the `.eh_frame` section. - /// The `address` can be converted with `EhHdrTable::pointer_to_offset` and `EhFrame::fde_from_offset` to an FDE. - pub fn iter<'bases>(&self, bases: &'bases BaseAddresses) -> EhHdrTableIter<'_, 'bases, R> { - EhHdrTableIter { - hdr: self.hdr, - bases, - remain: self.hdr.fde_count, - table: self.hdr.table.clone(), - } - } - /// *Probably* returns a pointer to the FDE for the given address. - /// - /// This performs a binary search, so if there is no FDE for the given address, - /// this function **will** return a pointer to any other FDE that's close by. - /// - /// To be sure, you **must** call `contains` on the FDE. - pub fn lookup(&self, address: u64, bases: &BaseAddresses) -> Result { - let size = match self.hdr.table_enc.format() { - constants::DW_EH_PE_uleb128 | constants::DW_EH_PE_sleb128 => { - return Err(Error::VariableLengthSearchTable); - } - constants::DW_EH_PE_sdata2 | constants::DW_EH_PE_udata2 => 2, - constants::DW_EH_PE_sdata4 | constants::DW_EH_PE_udata4 => 4, - constants::DW_EH_PE_sdata8 | constants::DW_EH_PE_udata8 => 8, - _ => return Err(Error::UnknownPointerEncoding), - }; - - let row_size = size * 2; - - let mut len = self.hdr.fde_count; - - let mut reader = self.hdr.table.clone(); - - let parameters = PointerEncodingParameters { - bases: &bases.eh_frame_hdr, - func_base: None, - address_size: self.hdr.address_size, - section: &self.hdr.section, - }; - - while len > 1 { - let head = reader.split(R::Offset::from_u64((len / 2) * row_size)?)?; - let tail = reader.clone(); - - let pivot = - parse_encoded_pointer(self.hdr.table_enc, ¶meters, &mut reader)?.direct()?; - - match pivot.cmp(&address) { - Ordering::Equal => { - reader = tail; - break; - } - Ordering::Less => { - reader = tail; - len = len - (len / 2); - } - Ordering::Greater => { - reader = head; - len /= 2; - } - } - } - - reader.skip(R::Offset::from_u64(size)?)?; - - parse_encoded_pointer(self.hdr.table_enc, ¶meters, &mut reader) - } - - /// Convert a `Pointer` to a section offset. - /// - /// This does not support indirect pointers. - pub fn pointer_to_offset(&self, ptr: Pointer) -> Result> { - let ptr = ptr.direct()?; - let eh_frame_ptr = self.hdr.eh_frame_ptr().direct()?; - - // Calculate the offset in the EhFrame section - R::Offset::from_u64(ptr - eh_frame_ptr).map(EhFrameOffset) - } - - /// Returns a parsed FDE for the given address, or `NoUnwindInfoForAddress` - /// if there are none. - /// - /// You must provide a function to get its associated CIE. See - /// `PartialFrameDescriptionEntry::parse` for more information. - /// - /// # Example - /// - /// ``` - /// # use gimli::{BaseAddresses, EhFrame, ParsedEhFrameHdr, EndianSlice, NativeEndian, Error, UnwindSection}; - /// # fn foo() -> Result<(), Error> { - /// # let eh_frame: EhFrame> = unreachable!(); - /// # let eh_frame_hdr: ParsedEhFrameHdr> = unimplemented!(); - /// # let addr = 0; - /// # let bases = unimplemented!(); - /// let table = eh_frame_hdr.table().unwrap(); - /// let fde = table.fde_for_address(&eh_frame, &bases, addr, EhFrame::cie_from_offset)?; - /// # Ok(()) - /// # } - /// ``` - pub fn fde_for_address( - &self, - frame: &EhFrame, - bases: &BaseAddresses, - address: u64, - get_cie: F, - ) -> Result> - where - F: FnMut( - &EhFrame, - &BaseAddresses, - EhFrameOffset, - ) -> Result>, - { - let fdeptr = self.lookup(address, bases)?; - let offset = self.pointer_to_offset(fdeptr)?; - let entry = frame.fde_from_offset(bases, offset, get_cie)?; - if entry.contains(address) { - Ok(entry) - } else { - Err(Error::NoUnwindInfoForAddress) - } - } - - #[inline] - #[doc(hidden)] - #[deprecated(note = "Method renamed to fde_for_address; use that instead.")] - pub fn lookup_and_parse( - &self, - address: u64, - bases: &BaseAddresses, - frame: EhFrame, - get_cie: F, - ) -> Result> - where - F: FnMut( - &EhFrame, - &BaseAddresses, - EhFrameOffset, - ) -> Result>, - { - self.fde_for_address(&frame, bases, address, get_cie) - } - - /// Returns the frame unwind information for the given address, - /// or `NoUnwindInfoForAddress` if there are none. - /// - /// You must provide a function to get the associated CIE. See - /// `PartialFrameDescriptionEntry::parse` for more information. - pub fn unwind_info_for_address<'ctx, F, A: UnwindContextStorage>( - &self, - frame: &EhFrame, - bases: &BaseAddresses, - ctx: &'ctx mut UnwindContext, - address: u64, - get_cie: F, - ) -> Result<&'ctx UnwindTableRow> - where - F: FnMut( - &EhFrame, - &BaseAddresses, - EhFrameOffset, - ) -> Result>, - { - let fde = self.fde_for_address(frame, bases, address, get_cie)?; - fde.unwind_info_for_address(frame, bases, ctx, address) - } -} - -/// `EhFrame` contains the frame unwinding information needed during exception -/// handling found in the `.eh_frame` section. -/// -/// Most interesting methods are defined in the -/// [`UnwindSection`](trait.UnwindSection.html) trait. -/// -/// See -/// [`DebugFrame`](./struct.DebugFrame.html#differences-between-debug_frame-and-eh_frame) -/// for some discussion on the differences between `.debug_frame` and -/// `.eh_frame`. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct EhFrame { - section: R, - address_size: u8, - vendor: Vendor, -} - -impl EhFrame { - /// Set the size of a target address in bytes. - /// - /// This defaults to the native word size. - pub fn set_address_size(&mut self, address_size: u8) { - self.address_size = address_size - } - - /// Set the vendor extensions to use. - /// - /// This defaults to `Vendor::Default`. - pub fn set_vendor(&mut self, vendor: Vendor) { - self.vendor = vendor; - } -} - -impl<'input, Endian> EhFrame> -where - Endian: Endianity, -{ - /// Construct a new `EhFrame` instance from the data in the - /// `.eh_frame` section. - /// - /// It is the caller's responsibility to read the section and present it as - /// a `&[u8]` slice. That means using some ELF loader on Linux, a Mach-O - /// loader on macOS, etc. - /// - /// ``` - /// use gimli::{EhFrame, EndianSlice, NativeEndian}; - /// - /// // Use with `.eh_frame` - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_eh_frame_section_somehow = || &buf; - /// let eh_frame = EhFrame::new(read_eh_frame_section_somehow(), NativeEndian); - /// ``` - pub fn new(section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(section, endian)) - } -} - -impl Section for EhFrame { - fn id() -> SectionId { - SectionId::EhFrame - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for EhFrame { - fn from(section: R) -> Self { - // Default to native word size. - EhFrame { - section, - address_size: mem::size_of::() as u8, - vendor: Vendor::Default, - } - } -} - -// This has to be `pub` to silence a warning (that is deny(..)'d by default) in -// rustc. Eventually, not having this `pub` will become a hard error. -#[doc(hidden)] -#[allow(missing_docs)] -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum CieOffsetEncoding { - U32, - U64, -} - -/// An offset into an `UnwindSection`. -// -// Needed to avoid conflicting implementations of `Into`. -pub trait UnwindOffset: Copy + Debug + Eq + From -where - T: ReaderOffset, -{ - /// Convert an `UnwindOffset` into a `T`. - fn into(self) -> T; -} - -impl UnwindOffset for DebugFrameOffset -where - T: ReaderOffset, -{ - #[inline] - fn into(self) -> T { - self.0 - } -} - -impl UnwindOffset for EhFrameOffset -where - T: ReaderOffset, -{ - #[inline] - fn into(self) -> T { - self.0 - } -} - -/// This trait completely encapsulates everything that is different between -/// `.eh_frame` and `.debug_frame`, as well as all the bits that can change -/// between DWARF versions. -#[doc(hidden)] -pub trait _UnwindSectionPrivate { - /// Get the underlying section data. - fn section(&self) -> &R; - - /// Returns true if the given length value should be considered an - /// end-of-entries sentinel. - fn length_value_is_end_of_entries(length: R::Offset) -> bool; - - /// Return true if the given offset if the CIE sentinel, false otherwise. - fn is_cie(format: Format, id: u64) -> bool; - - /// Return the CIE offset/ID encoding used by this unwind section with the - /// given DWARF format. - fn cie_offset_encoding(format: Format) -> CieOffsetEncoding; - - /// For `.eh_frame`, CIE offsets are relative to the current position. For - /// `.debug_frame`, they are relative to the start of the section. We always - /// internally store them relative to the section, so we handle translating - /// `.eh_frame`'s relative offsets in this method. If the offset calculation - /// underflows, return `None`. - fn resolve_cie_offset(&self, base: R::Offset, offset: R::Offset) -> Option; - - /// Does this version of this unwind section encode address and segment - /// sizes in its CIEs? - fn has_address_and_segment_sizes(version: u8) -> bool; - - /// The address size to use if `has_address_and_segment_sizes` returns false. - fn address_size(&self) -> u8; - - /// The segment size to use if `has_address_and_segment_sizes` returns false. - fn segment_size(&self) -> u8; - - /// The vendor extensions to use. - fn vendor(&self) -> Vendor; -} - -/// A section holding unwind information: either `.debug_frame` or -/// `.eh_frame`. See [`DebugFrame`](./struct.DebugFrame.html) and -/// [`EhFrame`](./struct.EhFrame.html) respectively. -pub trait UnwindSection: Clone + Debug + _UnwindSectionPrivate { - /// The offset type associated with this CFI section. Either - /// `DebugFrameOffset` or `EhFrameOffset`. - type Offset: UnwindOffset; - - /// Iterate over the `CommonInformationEntry`s and `FrameDescriptionEntry`s - /// in this `.debug_frame` section. - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - fn entries<'bases>(&self, bases: &'bases BaseAddresses) -> CfiEntriesIter<'bases, Self, R> { - CfiEntriesIter { - section: self.clone(), - bases, - input: self.section().clone(), - } - } - - /// Parse the `CommonInformationEntry` at the given offset. - fn cie_from_offset( - &self, - bases: &BaseAddresses, - offset: Self::Offset, - ) -> Result> { - let offset = UnwindOffset::into(offset); - let input = &mut self.section().clone(); - input.skip(offset)?; - CommonInformationEntry::parse(bases, self, input) - } - - /// Parse the `PartialFrameDescriptionEntry` at the given offset. - fn partial_fde_from_offset<'bases>( - &self, - bases: &'bases BaseAddresses, - offset: Self::Offset, - ) -> Result> { - let offset = UnwindOffset::into(offset); - let input = &mut self.section().clone(); - input.skip(offset)?; - PartialFrameDescriptionEntry::parse_partial(self, bases, input) - } - - /// Parse the `FrameDescriptionEntry` at the given offset. - fn fde_from_offset( - &self, - bases: &BaseAddresses, - offset: Self::Offset, - get_cie: F, - ) -> Result> - where - F: FnMut(&Self, &BaseAddresses, Self::Offset) -> Result>, - { - let partial = self.partial_fde_from_offset(bases, offset)?; - partial.parse(get_cie) - } - - /// Find the `FrameDescriptionEntry` for the given address. - /// - /// If found, the FDE is returned. If not found, - /// `Err(gimli::Error::NoUnwindInfoForAddress)` is returned. - /// If parsing fails, the error is returned. - /// - /// You must provide a function to get its associated CIE. See - /// `PartialFrameDescriptionEntry::parse` for more information. - /// - /// Note: this iterates over all FDEs. If available, it is possible - /// to do a binary search with `EhFrameHdr::fde_for_address` instead. - fn fde_for_address( - &self, - bases: &BaseAddresses, - address: u64, - mut get_cie: F, - ) -> Result> - where - F: FnMut(&Self, &BaseAddresses, Self::Offset) -> Result>, - { - let mut entries = self.entries(bases); - while let Some(entry) = entries.next()? { - match entry { - CieOrFde::Cie(_) => {} - CieOrFde::Fde(partial) => { - let fde = partial.parse(&mut get_cie)?; - if fde.contains(address) { - return Ok(fde); - } - } - } - } - Err(Error::NoUnwindInfoForAddress) - } - - /// Find the frame unwind information for the given address. - /// - /// If found, the unwind information is returned. If not found, - /// `Err(gimli::Error::NoUnwindInfoForAddress)` is returned. If parsing or - /// CFI evaluation fails, the error is returned. - /// - /// ``` - /// use gimli::{BaseAddresses, EhFrame, EndianSlice, NativeEndian, UnwindContext, - /// UnwindSection}; - /// - /// # fn foo() -> gimli::Result<()> { - /// # let read_eh_frame_section = || unimplemented!(); - /// // Get the `.eh_frame` section from the object file. Alternatively, - /// // use `EhFrame` with the `.eh_frame` section of the object file. - /// let eh_frame = EhFrame::new(read_eh_frame_section(), NativeEndian); - /// - /// # let get_frame_pc = || unimplemented!(); - /// // Get the address of the PC for a frame you'd like to unwind. - /// let address = get_frame_pc(); - /// - /// // This context is reusable, which cuts down on heap allocations. - /// let ctx = UnwindContext::new(); - /// - /// // Optionally provide base addresses for any relative pointers. If a - /// // base address isn't provided and a pointer is found that is relative to - /// // it, we will return an `Err`. - /// # let address_of_text_section_in_memory = unimplemented!(); - /// # let address_of_got_section_in_memory = unimplemented!(); - /// let bases = BaseAddresses::default() - /// .set_text(address_of_text_section_in_memory) - /// .set_got(address_of_got_section_in_memory); - /// - /// let unwind_info = eh_frame.unwind_info_for_address( - /// &bases, - /// &mut ctx, - /// address, - /// EhFrame::cie_from_offset, - /// )?; - /// - /// # let do_stuff_with = |_| unimplemented!(); - /// do_stuff_with(unwind_info); - /// # let _ = ctx; - /// # unreachable!() - /// # } - /// ``` - #[inline] - fn unwind_info_for_address<'ctx, F, A: UnwindContextStorage>( - &self, - bases: &BaseAddresses, - ctx: &'ctx mut UnwindContext, - address: u64, - get_cie: F, - ) -> Result<&'ctx UnwindTableRow> - where - F: FnMut(&Self, &BaseAddresses, Self::Offset) -> Result>, - { - let fde = self.fde_for_address(bases, address, get_cie)?; - fde.unwind_info_for_address(self, bases, ctx, address) - } -} - -impl _UnwindSectionPrivate for DebugFrame { - fn section(&self) -> &R { - &self.section - } - - fn length_value_is_end_of_entries(_: R::Offset) -> bool { - false - } - - fn is_cie(format: Format, id: u64) -> bool { - match format { - Format::Dwarf32 => id == 0xffff_ffff, - Format::Dwarf64 => id == 0xffff_ffff_ffff_ffff, - } - } - - fn cie_offset_encoding(format: Format) -> CieOffsetEncoding { - match format { - Format::Dwarf32 => CieOffsetEncoding::U32, - Format::Dwarf64 => CieOffsetEncoding::U64, - } - } - - fn resolve_cie_offset(&self, _: R::Offset, offset: R::Offset) -> Option { - Some(offset) - } - - fn has_address_and_segment_sizes(version: u8) -> bool { - version == 4 - } - - fn address_size(&self) -> u8 { - self.address_size - } - - fn segment_size(&self) -> u8 { - self.segment_size - } - - fn vendor(&self) -> Vendor { - self.vendor - } -} - -impl UnwindSection for DebugFrame { - type Offset = DebugFrameOffset; -} - -impl _UnwindSectionPrivate for EhFrame { - fn section(&self) -> &R { - &self.section - } - - fn length_value_is_end_of_entries(length: R::Offset) -> bool { - length.into_u64() == 0 - } - - fn is_cie(_: Format, id: u64) -> bool { - id == 0 - } - - fn cie_offset_encoding(_format: Format) -> CieOffsetEncoding { - // `.eh_frame` offsets are always 4 bytes, regardless of the DWARF - // format. - CieOffsetEncoding::U32 - } - - fn resolve_cie_offset(&self, base: R::Offset, offset: R::Offset) -> Option { - base.checked_sub(offset) - } - - fn has_address_and_segment_sizes(_version: u8) -> bool { - false - } - - fn address_size(&self) -> u8 { - self.address_size - } - - fn segment_size(&self) -> u8 { - 0 - } - - fn vendor(&self) -> Vendor { - self.vendor - } -} - -impl UnwindSection for EhFrame { - type Offset = EhFrameOffset; -} - -/// Optional base addresses for the relative `DW_EH_PE_*` encoded pointers. -/// -/// During CIE/FDE parsing, if a relative pointer is encountered for a base -/// address that is unknown, an Err will be returned. -/// -/// ``` -/// use gimli::BaseAddresses; -/// -/// # fn foo() { -/// # let address_of_eh_frame_hdr_section_in_memory = unimplemented!(); -/// # let address_of_eh_frame_section_in_memory = unimplemented!(); -/// # let address_of_text_section_in_memory = unimplemented!(); -/// # let address_of_got_section_in_memory = unimplemented!(); -/// # let address_of_the_start_of_current_func = unimplemented!(); -/// let bases = BaseAddresses::default() -/// .set_eh_frame_hdr(address_of_eh_frame_hdr_section_in_memory) -/// .set_eh_frame(address_of_eh_frame_section_in_memory) -/// .set_text(address_of_text_section_in_memory) -/// .set_got(address_of_got_section_in_memory); -/// # let _ = bases; -/// # } -/// ``` -#[derive(Clone, Default, Debug, PartialEq, Eq)] -pub struct BaseAddresses { - /// The base addresses to use for pointers in the `.eh_frame_hdr` section. - pub eh_frame_hdr: SectionBaseAddresses, - - /// The base addresses to use for pointers in the `.eh_frame` section. - pub eh_frame: SectionBaseAddresses, -} - -/// Optional base addresses for the relative `DW_EH_PE_*` encoded pointers -/// in a particular section. -/// -/// See `BaseAddresses` for methods that are helpful in setting these addresses. -#[derive(Clone, Default, Debug, PartialEq, Eq)] -pub struct SectionBaseAddresses { - /// The address of the section containing the pointer. - pub section: Option, - - /// The base address for text relative pointers. - /// This is generally the address of the `.text` section. - pub text: Option, - - /// The base address for data relative pointers. - /// - /// For pointers in the `.eh_frame_hdr` section, this is the address - /// of the `.eh_frame_hdr` section - /// - /// For pointers in the `.eh_frame` section, this is generally the - /// global pointer, such as the address of the `.got` section. - pub data: Option, -} - -impl BaseAddresses { - /// Set the `.eh_frame_hdr` section base address. - #[inline] - pub fn set_eh_frame_hdr(mut self, addr: u64) -> Self { - self.eh_frame_hdr.section = Some(addr); - self.eh_frame_hdr.data = Some(addr); - self - } - - /// Set the `.eh_frame` section base address. - #[inline] - pub fn set_eh_frame(mut self, addr: u64) -> Self { - self.eh_frame.section = Some(addr); - self - } - - /// Set the `.text` section base address. - #[inline] - pub fn set_text(mut self, addr: u64) -> Self { - self.eh_frame_hdr.text = Some(addr); - self.eh_frame.text = Some(addr); - self - } - - /// Set the `.got` section base address. - #[inline] - pub fn set_got(mut self, addr: u64) -> Self { - self.eh_frame.data = Some(addr); - self - } -} - -/// An iterator over CIE and FDE entries in a `.debug_frame` or `.eh_frame` -/// section. -/// -/// Some pointers may be encoded relative to various base addresses. Use the -/// [`BaseAddresses`](./struct.BaseAddresses.html) parameter to provide them. By -/// default, none are provided. If a relative pointer is encountered for a base -/// address that is unknown, an `Err` will be returned and iteration will abort. -/// -/// Can be [used with -/// `FallibleIterator`](./index.html#using-with-fallibleiterator). -/// -/// ``` -/// use gimli::{BaseAddresses, EhFrame, EndianSlice, NativeEndian, UnwindSection}; -/// -/// # fn foo() -> gimli::Result<()> { -/// # let read_eh_frame_somehow = || unimplemented!(); -/// let eh_frame = EhFrame::new(read_eh_frame_somehow(), NativeEndian); -/// -/// # let address_of_eh_frame_hdr_section_in_memory = unimplemented!(); -/// # let address_of_eh_frame_section_in_memory = unimplemented!(); -/// # let address_of_text_section_in_memory = unimplemented!(); -/// # let address_of_got_section_in_memory = unimplemented!(); -/// # let address_of_the_start_of_current_func = unimplemented!(); -/// // Provide base addresses for relative pointers. -/// let bases = BaseAddresses::default() -/// .set_eh_frame_hdr(address_of_eh_frame_hdr_section_in_memory) -/// .set_eh_frame(address_of_eh_frame_section_in_memory) -/// .set_text(address_of_text_section_in_memory) -/// .set_got(address_of_got_section_in_memory); -/// -/// let mut entries = eh_frame.entries(&bases); -/// -/// # let do_stuff_with = |_| unimplemented!(); -/// while let Some(entry) = entries.next()? { -/// do_stuff_with(entry) -/// } -/// # unreachable!() -/// # } -/// ``` -#[derive(Clone, Debug)] -pub struct CfiEntriesIter<'bases, Section, R> -where - R: Reader, - Section: UnwindSection, -{ - section: Section, - bases: &'bases BaseAddresses, - input: R, -} - -impl<'bases, Section, R> CfiEntriesIter<'bases, Section, R> -where - R: Reader, - Section: UnwindSection, -{ - /// Advance the iterator to the next entry. - pub fn next(&mut self) -> Result>> { - if self.input.is_empty() { - return Ok(None); - } - - match parse_cfi_entry(self.bases, &self.section, &mut self.input) { - Err(e) => { - self.input.empty(); - Err(e) - } - Ok(None) => { - self.input.empty(); - Ok(None) - } - Ok(Some(entry)) => Ok(Some(entry)), - } - } -} - -#[cfg(feature = "fallible-iterator")] -impl<'bases, Section, R> fallible_iterator::FallibleIterator for CfiEntriesIter<'bases, Section, R> -where - R: Reader, - Section: UnwindSection, -{ - type Item = CieOrFde<'bases, Section, R>; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - CfiEntriesIter::next(self) - } -} - -/// Either a `CommonInformationEntry` (CIE) or a `FrameDescriptionEntry` (FDE). -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum CieOrFde<'bases, Section, R> -where - R: Reader, - Section: UnwindSection, -{ - /// This CFI entry is a `CommonInformationEntry`. - Cie(CommonInformationEntry), - /// This CFI entry is a `FrameDescriptionEntry`, however fully parsing it - /// requires parsing its CIE first, so it is left in a partially parsed - /// state. - Fde(PartialFrameDescriptionEntry<'bases, Section, R>), -} - -fn parse_cfi_entry<'bases, Section, R>( - bases: &'bases BaseAddresses, - section: &Section, - input: &mut R, -) -> Result>> -where - R: Reader, - Section: UnwindSection, -{ - let (offset, length, format) = loop { - let offset = input.offset_from(section.section()); - let (length, format) = input.read_initial_length()?; - - if Section::length_value_is_end_of_entries(length) { - return Ok(None); - } - - // Hack: skip zero padding inserted by buggy compilers/linkers. - // We require that the padding is a multiple of 32-bits, otherwise - // there is no reliable way to determine when the padding ends. This - // should be okay since CFI entries must be aligned to the address size. - - if length.into_u64() != 0 || format != Format::Dwarf32 { - break (offset, length, format); - } - }; - - let mut rest = input.split(length)?; - let cie_offset_base = rest.offset_from(section.section()); - let cie_id_or_offset = match Section::cie_offset_encoding(format) { - CieOffsetEncoding::U32 => rest.read_u32().map(u64::from)?, - CieOffsetEncoding::U64 => rest.read_u64()?, - }; - - if Section::is_cie(format, cie_id_or_offset) { - let cie = CommonInformationEntry::parse_rest(offset, length, format, bases, section, rest)?; - Ok(Some(CieOrFde::Cie(cie))) - } else { - let cie_offset = R::Offset::from_u64(cie_id_or_offset)?; - let cie_offset = match section.resolve_cie_offset(cie_offset_base, cie_offset) { - None => return Err(Error::OffsetOutOfBounds), - Some(cie_offset) => cie_offset, - }; - - let fde = PartialFrameDescriptionEntry { - offset, - length, - format, - cie_offset: cie_offset.into(), - rest, - section: section.clone(), - bases, - }; - - Ok(Some(CieOrFde::Fde(fde))) - } -} - -/// We support the z-style augmentation [defined by `.eh_frame`][ehframe]. -/// -/// [ehframe]: https://refspecs.linuxfoundation.org/LSB_3.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] -pub struct Augmentation { - /// > A 'L' may be present at any position after the first character of the - /// > string. This character may only be present if 'z' is the first character - /// > of the string. If present, it indicates the presence of one argument in - /// > the Augmentation Data of the CIE, and a corresponding argument in the - /// > Augmentation Data of the FDE. The argument in the Augmentation Data of - /// > the CIE is 1-byte and represents the pointer encoding used for the - /// > argument in the Augmentation Data of the FDE, which is the address of a - /// > language-specific data area (LSDA). The size of the LSDA pointer is - /// > specified by the pointer encoding used. - lsda: Option, - - /// > A 'P' may be present at any position after the first character of the - /// > string. This character may only be present if 'z' is the first character - /// > of the string. If present, it indicates the presence of two arguments in - /// > the Augmentation Data of the CIE. The first argument is 1-byte and - /// > represents the pointer encoding used for the second argument, which is - /// > the address of a personality routine handler. The size of the - /// > personality routine pointer is specified by the pointer encoding used. - personality: Option<(constants::DwEhPe, Pointer)>, - - /// > A 'R' may be present at any position after the first character of the - /// > string. This character may only be present if 'z' is the first character - /// > of the string. If present, The Augmentation Data shall include a 1 byte - /// > argument that represents the pointer encoding for the address pointers - /// > used in the FDE. - fde_address_encoding: Option, - - /// True if this CIE's FDEs are trampolines for signal handlers. - is_signal_trampoline: bool, -} - -impl Augmentation { - fn parse( - augmentation_str: &mut R, - bases: &BaseAddresses, - address_size: u8, - section: &Section, - input: &mut R, - ) -> Result - where - R: Reader, - Section: UnwindSection, - { - debug_assert!( - !augmentation_str.is_empty(), - "Augmentation::parse should only be called if we have an augmentation" - ); - - let mut augmentation = Augmentation::default(); - - let mut parsed_first = false; - let mut data = None; - - while !augmentation_str.is_empty() { - let ch = augmentation_str.read_u8()?; - match ch { - b'z' => { - if parsed_first { - return Err(Error::UnknownAugmentation); - } - - let augmentation_length = input.read_uleb128().and_then(R::Offset::from_u64)?; - data = Some(input.split(augmentation_length)?); - } - b'L' => { - let rest = data.as_mut().ok_or(Error::UnknownAugmentation)?; - let encoding = parse_pointer_encoding(rest)?; - augmentation.lsda = Some(encoding); - } - b'P' => { - let rest = data.as_mut().ok_or(Error::UnknownAugmentation)?; - let encoding = parse_pointer_encoding(rest)?; - let parameters = PointerEncodingParameters { - bases: &bases.eh_frame, - func_base: None, - address_size, - section: section.section(), - }; - - let personality = parse_encoded_pointer(encoding, ¶meters, rest)?; - augmentation.personality = Some((encoding, personality)); - } - b'R' => { - let rest = data.as_mut().ok_or(Error::UnknownAugmentation)?; - let encoding = parse_pointer_encoding(rest)?; - augmentation.fde_address_encoding = Some(encoding); - } - b'S' => augmentation.is_signal_trampoline = true, - _ => return Err(Error::UnknownAugmentation), - } - - parsed_first = true; - } - - Ok(augmentation) - } -} - -/// Parsed augmentation data for a `FrameDescriptEntry`. -#[derive(Clone, Debug, Default, PartialEq, Eq)] -struct AugmentationData { - lsda: Option, -} - -impl AugmentationData { - fn parse( - augmentation: &Augmentation, - encoding_parameters: &PointerEncodingParameters, - input: &mut R, - ) -> Result { - // In theory, we should be iterating over the original augmentation - // string, interpreting each character, and reading the appropriate bits - // out of the augmentation data as we go. However, the only character - // that defines augmentation data in the FDE is the 'L' character, so we - // can just check for its presence directly. - - let aug_data_len = input.read_uleb128().and_then(R::Offset::from_u64)?; - let rest = &mut input.split(aug_data_len)?; - let mut augmentation_data = AugmentationData::default(); - if let Some(encoding) = augmentation.lsda { - let lsda = parse_encoded_pointer(encoding, encoding_parameters, rest)?; - augmentation_data.lsda = Some(lsda); - } - Ok(augmentation_data) - } -} - -/// > A Common Information Entry holds information that is shared among many -/// > Frame Description Entries. There is at least one CIE in every non-empty -/// > `.debug_frame` section. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct CommonInformationEntry::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - /// The offset of this entry from the start of its containing section. - offset: Offset, - - /// > A constant that gives the number of bytes of the CIE structure, not - /// > including the length field itself (see Section 7.2.2). The size of the - /// > length field plus the value of length must be an integral multiple of - /// > the address size. - length: Offset, - - format: Format, - - /// > A version number (see Section 7.23). This number is specific to the - /// > call frame information and is independent of the DWARF version number. - version: u8, - - /// The parsed augmentation, if any. - augmentation: Option, - - /// > The size of a target address in this CIE and any FDEs that use it, in - /// > bytes. If a compilation unit exists for this frame, its address size - /// > must match the address size here. - address_size: u8, - - /// "The size of a segment selector in this CIE and any FDEs that use it, in - /// bytes." - segment_size: u8, - - /// "A constant that is factored out of all advance location instructions - /// (see Section 6.4.2.1)." - code_alignment_factor: u64, - - /// > A constant that is factored out of certain offset instructions (see - /// > below). The resulting value is (operand * data_alignment_factor). - data_alignment_factor: i64, - - /// > An unsigned LEB128 constant that indicates which column in the rule - /// > table represents the return address of the function. Note that this - /// > column might not correspond to an actual machine register. - return_address_register: Register, - - /// > A sequence of rules that are interpreted to create the initial setting - /// > of each column in the table. - /// - /// > The default rule for all columns before interpretation of the initial - /// > instructions is the undefined rule. However, an ABI authoring body or a - /// > compilation system authoring body may specify an alternate default - /// > value for any or all columns. - /// - /// This is followed by `DW_CFA_nop` padding until the end of `length` bytes - /// in the input. - initial_instructions: R, -} - -impl CommonInformationEntry { - fn parse>( - bases: &BaseAddresses, - section: &Section, - input: &mut R, - ) -> Result> { - match parse_cfi_entry(bases, section, input)? { - Some(CieOrFde::Cie(cie)) => Ok(cie), - Some(CieOrFde::Fde(_)) => Err(Error::NotCieId), - None => Err(Error::NoEntryAtGivenOffset), - } - } - - fn parse_rest>( - offset: R::Offset, - length: R::Offset, - format: Format, - bases: &BaseAddresses, - section: &Section, - mut rest: R, - ) -> Result> { - let version = rest.read_u8()?; - - // Version 1 of `.debug_frame` corresponds to DWARF 2, and then for - // DWARF 3 and 4, I think they decided to just match the standard's - // version. - match version { - 1 | 3 | 4 => (), - _ => return Err(Error::UnknownVersion(u64::from(version))), - } - - let mut augmentation_string = rest.read_null_terminated_slice()?; - - let (address_size, segment_size) = if Section::has_address_and_segment_sizes(version) { - let address_size = rest.read_u8()?; - let segment_size = rest.read_u8()?; - (address_size, segment_size) - } else { - (section.address_size(), section.segment_size()) - }; - - let code_alignment_factor = rest.read_uleb128()?; - let data_alignment_factor = rest.read_sleb128()?; - - let return_address_register = if version == 1 { - Register(rest.read_u8()?.into()) - } else { - rest.read_uleb128().and_then(Register::from_u64)? - }; - - let augmentation = if augmentation_string.is_empty() { - None - } else { - Some(Augmentation::parse( - &mut augmentation_string, - bases, - address_size, - section, - &mut rest, - )?) - }; - - let entry = CommonInformationEntry { - offset, - length, - format, - version, - augmentation, - address_size, - segment_size, - code_alignment_factor, - data_alignment_factor, - return_address_register, - initial_instructions: rest, - }; - - Ok(entry) - } -} - -/// # Signal Safe Methods -/// -/// These methods are guaranteed not to allocate, acquire locks, or perform any -/// other signal-unsafe operations. -impl CommonInformationEntry { - /// Get the offset of this entry from the start of its containing section. - pub fn offset(&self) -> R::Offset { - self.offset - } - - /// Return the encoding parameters for this CIE. - pub fn encoding(&self) -> Encoding { - Encoding { - format: self.format, - version: u16::from(self.version), - address_size: self.address_size, - } - } - - /// The size of addresses (in bytes) in this CIE. - pub fn address_size(&self) -> u8 { - self.address_size - } - - /// Iterate over this CIE's initial instructions. - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - pub fn instructions<'a, Section>( - &self, - section: &'a Section, - bases: &'a BaseAddresses, - ) -> CallFrameInstructionIter<'a, R> - where - Section: UnwindSection, - { - CallFrameInstructionIter { - input: self.initial_instructions.clone(), - address_encoding: None, - parameters: PointerEncodingParameters { - bases: &bases.eh_frame, - func_base: None, - address_size: self.address_size, - section: section.section(), - }, - vendor: section.vendor(), - } - } - - /// > A constant that gives the number of bytes of the CIE structure, not - /// > including the length field itself (see Section 7.2.2). The size of the - /// > length field plus the value of length must be an integral multiple of - /// > the address size. - pub fn entry_len(&self) -> R::Offset { - self.length - } - - /// > A version number (see Section 7.23). This number is specific to the - /// > call frame information and is independent of the DWARF version number. - pub fn version(&self) -> u8 { - self.version - } - - /// Get the augmentation data, if any exists. - /// - /// The only augmentation understood by `gimli` is that which is defined by - /// `.eh_frame`. - pub fn augmentation(&self) -> Option<&Augmentation> { - self.augmentation.as_ref() - } - - /// True if this CIE's FDEs have a LSDA. - pub fn has_lsda(&self) -> bool { - self.augmentation.map_or(false, |a| a.lsda.is_some()) - } - - /// Return the encoding of the LSDA address for this CIE's FDEs. - pub fn lsda_encoding(&self) -> Option { - self.augmentation.and_then(|a| a.lsda) - } - - /// Return the encoding and address of the personality routine handler - /// for this CIE's FDEs. - pub fn personality_with_encoding(&self) -> Option<(constants::DwEhPe, Pointer)> { - self.augmentation.as_ref().and_then(|a| a.personality) - } - - /// Return the address of the personality routine handler - /// for this CIE's FDEs. - pub fn personality(&self) -> Option { - self.augmentation - .as_ref() - .and_then(|a| a.personality) - .map(|(_, p)| p) - } - - /// Return the encoding of the addresses for this CIE's FDEs. - pub fn fde_address_encoding(&self) -> Option { - self.augmentation.and_then(|a| a.fde_address_encoding) - } - - /// True if this CIE's FDEs are trampolines for signal handlers. - pub fn is_signal_trampoline(&self) -> bool { - self.augmentation.map_or(false, |a| a.is_signal_trampoline) - } - - /// > A constant that is factored out of all advance location instructions - /// > (see Section 6.4.2.1). - pub fn code_alignment_factor(&self) -> u64 { - self.code_alignment_factor - } - - /// > A constant that is factored out of certain offset instructions (see - /// > below). The resulting value is (operand * data_alignment_factor). - pub fn data_alignment_factor(&self) -> i64 { - self.data_alignment_factor - } - - /// > An unsigned ... constant that indicates which column in the rule - /// > table represents the return address of the function. Note that this - /// > column might not correspond to an actual machine register. - pub fn return_address_register(&self) -> Register { - self.return_address_register - } -} - -/// A partially parsed `FrameDescriptionEntry`. -/// -/// Fully parsing this FDE requires first parsing its CIE. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct PartialFrameDescriptionEntry<'bases, Section, R> -where - R: Reader, - Section: UnwindSection, -{ - offset: R::Offset, - length: R::Offset, - format: Format, - cie_offset: Section::Offset, - rest: R, - section: Section, - bases: &'bases BaseAddresses, -} - -impl<'bases, Section, R> PartialFrameDescriptionEntry<'bases, Section, R> -where - R: Reader, - Section: UnwindSection, -{ - fn parse_partial( - section: &Section, - bases: &'bases BaseAddresses, - input: &mut R, - ) -> Result> { - match parse_cfi_entry(bases, section, input)? { - Some(CieOrFde::Cie(_)) => Err(Error::NotFdePointer), - Some(CieOrFde::Fde(partial)) => Ok(partial), - None => Err(Error::NoEntryAtGivenOffset), - } - } - - /// Fully parse this FDE. - /// - /// You must provide a function get its associated CIE (either by parsing it - /// on demand, or looking it up in some table mapping offsets to CIEs that - /// you've already parsed, etc.) - pub fn parse(&self, get_cie: F) -> Result> - where - F: FnMut(&Section, &BaseAddresses, Section::Offset) -> Result>, - { - FrameDescriptionEntry::parse_rest( - self.offset, - self.length, - self.format, - self.cie_offset, - self.rest.clone(), - &self.section, - self.bases, - get_cie, - ) - } - - /// Get the offset of this entry from the start of its containing section. - pub fn offset(&self) -> R::Offset { - self.offset - } - - /// Get the offset of this FDE's CIE. - pub fn cie_offset(&self) -> Section::Offset { - self.cie_offset - } - - /// > A constant that gives the number of bytes of the header and - /// > instruction stream for this function, not including the length field - /// > itself (see Section 7.2.2). The size of the length field plus the value - /// > of length must be an integral multiple of the address size. - pub fn entry_len(&self) -> R::Offset { - self.length - } -} - -/// A `FrameDescriptionEntry` is a set of CFA instructions for an address range. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct FrameDescriptionEntry::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - /// The start of this entry within its containing section. - offset: Offset, - - /// > A constant that gives the number of bytes of the header and - /// > instruction stream for this function, not including the length field - /// > itself (see Section 7.2.2). The size of the length field plus the value - /// > of length must be an integral multiple of the address size. - length: Offset, - - format: Format, - - /// "A constant offset into the .debug_frame section that denotes the CIE - /// that is associated with this FDE." - /// - /// This is the CIE at that offset. - cie: CommonInformationEntry, - - /// > The address of the first location associated with this table entry. If - /// > the segment_size field of this FDE's CIE is non-zero, the initial - /// > location is preceded by a segment selector of the given length. - initial_segment: u64, - initial_address: u64, - - /// "The number of bytes of program instructions described by this entry." - address_range: u64, - - /// The parsed augmentation data, if we have any. - augmentation: Option, - - /// "A sequence of table defining instructions that are described below." - /// - /// This is followed by `DW_CFA_nop` padding until `length` bytes of the - /// input are consumed. - instructions: R, -} - -impl FrameDescriptionEntry { - fn parse_rest( - offset: R::Offset, - length: R::Offset, - format: Format, - cie_pointer: Section::Offset, - mut rest: R, - section: &Section, - bases: &BaseAddresses, - mut get_cie: F, - ) -> Result> - where - Section: UnwindSection, - F: FnMut(&Section, &BaseAddresses, Section::Offset) -> Result>, - { - let cie = get_cie(section, bases, cie_pointer)?; - - let initial_segment = if cie.segment_size > 0 { - rest.read_address(cie.segment_size)? - } else { - 0 - }; - - let mut parameters = PointerEncodingParameters { - bases: &bases.eh_frame, - func_base: None, - address_size: cie.address_size, - section: section.section(), - }; - - let (initial_address, address_range) = Self::parse_addresses(&mut rest, &cie, ¶meters)?; - parameters.func_base = Some(initial_address); - - let aug_data = if let Some(ref augmentation) = cie.augmentation { - Some(AugmentationData::parse( - augmentation, - ¶meters, - &mut rest, - )?) - } else { - None - }; - - let entry = FrameDescriptionEntry { - offset, - length, - format, - cie, - initial_segment, - initial_address, - address_range, - augmentation: aug_data, - instructions: rest, - }; - - Ok(entry) - } - - fn parse_addresses( - input: &mut R, - cie: &CommonInformationEntry, - parameters: &PointerEncodingParameters, - ) -> Result<(u64, u64)> { - let encoding = cie.augmentation().and_then(|a| a.fde_address_encoding); - if let Some(encoding) = encoding { - let initial_address = parse_encoded_pointer(encoding, parameters, input)?; - - // Ignore indirection. - let initial_address = initial_address.pointer(); - - // Address ranges cannot be relative to anything, so just grab the - // data format bits from the encoding. - let address_range = parse_encoded_pointer(encoding.format(), parameters, input)?; - Ok((initial_address, address_range.pointer())) - } else { - let initial_address = input.read_address(cie.address_size)?; - let address_range = input.read_address(cie.address_size)?; - Ok((initial_address, address_range)) - } - } - - /// Return the table of unwind information for this FDE. - #[inline] - pub fn rows<'a, 'ctx, Section: UnwindSection, A: UnwindContextStorage>( - &self, - section: &'a Section, - bases: &'a BaseAddresses, - ctx: &'ctx mut UnwindContext, - ) -> Result> { - UnwindTable::new(section, bases, ctx, self) - } - - /// Find the frame unwind information for the given address. - /// - /// If found, the unwind information is returned along with the reset - /// context in the form `Ok((unwind_info, context))`. If not found, - /// `Err(gimli::Error::NoUnwindInfoForAddress)` is returned. If parsing or - /// CFI evaluation fails, the error is returned. - pub fn unwind_info_for_address<'ctx, Section: UnwindSection, A: UnwindContextStorage>( - &self, - section: &Section, - bases: &BaseAddresses, - ctx: &'ctx mut UnwindContext, - address: u64, - ) -> Result<&'ctx UnwindTableRow> { - let mut table = self.rows(section, bases, ctx)?; - while let Some(row) = table.next_row()? { - if row.contains(address) { - return Ok(table.ctx.row()); - } - } - Err(Error::NoUnwindInfoForAddress) - } -} - -/// # Signal Safe Methods -/// -/// These methods are guaranteed not to allocate, acquire locks, or perform any -/// other signal-unsafe operations. -#[allow(clippy::len_without_is_empty)] -impl FrameDescriptionEntry { - /// Get the offset of this entry from the start of its containing section. - pub fn offset(&self) -> R::Offset { - self.offset - } - - /// Get a reference to this FDE's CIE. - pub fn cie(&self) -> &CommonInformationEntry { - &self.cie - } - - /// > A constant that gives the number of bytes of the header and - /// > instruction stream for this function, not including the length field - /// > itself (see Section 7.2.2). The size of the length field plus the value - /// > of length must be an integral multiple of the address size. - pub fn entry_len(&self) -> R::Offset { - self.length - } - - /// Iterate over this FDE's instructions. - /// - /// Will not include the CIE's initial instructions, if you want those do - /// `fde.cie().instructions()` first. - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - pub fn instructions<'a, Section>( - &self, - section: &'a Section, - bases: &'a BaseAddresses, - ) -> CallFrameInstructionIter<'a, R> - where - Section: UnwindSection, - { - CallFrameInstructionIter { - input: self.instructions.clone(), - address_encoding: self.cie.augmentation().and_then(|a| a.fde_address_encoding), - parameters: PointerEncodingParameters { - bases: &bases.eh_frame, - func_base: None, - address_size: self.cie.address_size, - section: section.section(), - }, - vendor: section.vendor(), - } - } - - /// The first address for which this entry has unwind information for. - pub fn initial_address(&self) -> u64 { - self.initial_address - } - - /// The number of bytes of instructions that this entry has unwind - /// information for. - pub fn len(&self) -> u64 { - self.address_range - } - - /// Return `true` if the given address is within this FDE, `false` - /// otherwise. - /// - /// This is equivalent to `entry.initial_address() <= address < - /// entry.initial_address() + entry.len()`. - pub fn contains(&self, address: u64) -> bool { - let start = self.initial_address(); - let end = start + self.len(); - start <= address && address < end - } - - /// The address of this FDE's language-specific data area (LSDA), if it has - /// any. - pub fn lsda(&self) -> Option { - self.augmentation.as_ref().and_then(|a| a.lsda) - } - - /// Return true if this FDE's function is a trampoline for a signal handler. - #[inline] - pub fn is_signal_trampoline(&self) -> bool { - self.cie().is_signal_trampoline() - } - - /// Return the address of the FDE's function's personality routine - /// handler. The personality routine does language-specific clean up when - /// unwinding the stack frames with the intent to not run them again. - #[inline] - pub fn personality(&self) -> Option { - self.cie().personality() - } -} - -/// Specification of what storage should be used for [`UnwindContext`]. -/// -#[cfg_attr( - feature = "read", - doc = " -Normally you would only need to use [`StoreOnHeap`], which places the stack -on the heap using [`Vec`]. This is the default storage type parameter for [`UnwindContext`]. -" -)] -/// -/// If you need to avoid [`UnwindContext`] from allocating memory, e.g. for signal safety, -/// you can provide you own storage specification: -/// ```rust,no_run -/// # use gimli::*; -/// # -/// # fn foo<'a>(some_fde: gimli::FrameDescriptionEntry>) -/// # -> gimli::Result<()> { -/// # let eh_frame: gimli::EhFrame<_> = unreachable!(); -/// # let bases = unimplemented!(); -/// # -/// struct StoreOnStack; -/// -/// impl UnwindContextStorage for StoreOnStack { -/// type Rules = [(Register, RegisterRule); 192]; -/// type Stack = [UnwindTableRow; 4]; -/// } -/// -/// let mut ctx = UnwindContext::<_, StoreOnStack>::new_in(); -/// -/// // Initialize the context by evaluating the CIE's initial instruction program, -/// // and generate the unwind table. -/// let mut table = some_fde.rows(&eh_frame, &bases, &mut ctx)?; -/// while let Some(row) = table.next_row()? { -/// // Do stuff with each row... -/// # let _ = row; -/// } -/// # unreachable!() -/// # } -/// ``` -pub trait UnwindContextStorage: Sized { - /// The storage used for register rules in a unwind table row. - /// - /// Note that this is nested within the stack. - type Rules: ArrayLike)>; - - /// The storage used for unwind table row stack. - type Stack: ArrayLike>; -} - -#[cfg(feature = "read")] -const MAX_RULES: usize = 192; -#[cfg(feature = "read")] -const MAX_UNWIND_STACK_DEPTH: usize = 4; - -#[cfg(feature = "read")] -impl UnwindContextStorage for StoreOnHeap { - type Rules = [(Register, RegisterRule); MAX_RULES]; - type Stack = Box<[UnwindTableRow; MAX_UNWIND_STACK_DEPTH]>; -} - -/// Common context needed when evaluating the call frame unwinding information. -/// -/// This structure can be large so it is advisable to place it on the heap. -/// To avoid re-allocating the context multiple times when evaluating multiple -/// CFI programs, it can be reused. -/// -/// ``` -/// use gimli::{UnwindContext, UnwindTable}; -/// -/// # fn foo<'a>(some_fde: gimli::FrameDescriptionEntry>) -/// # -> gimli::Result<()> { -/// # let eh_frame: gimli::EhFrame<_> = unreachable!(); -/// # let bases = unimplemented!(); -/// // An uninitialized context. -/// let mut ctx = Box::new(UnwindContext::new()); -/// -/// // Initialize the context by evaluating the CIE's initial instruction program, -/// // and generate the unwind table. -/// let mut table = some_fde.rows(&eh_frame, &bases, &mut ctx)?; -/// while let Some(row) = table.next_row()? { -/// // Do stuff with each row... -/// # let _ = row; -/// } -/// # unreachable!() -/// # } -/// ``` -#[derive(Clone, PartialEq, Eq)] -pub struct UnwindContext = StoreOnHeap> { - // Stack of rows. The last row is the row currently being built by the - // program. There is always at least one row. The vast majority of CFI - // programs will only ever have one row on the stack. - stack: ArrayVec, - - // If we are evaluating an FDE's instructions, then `is_initialized` will be - // `true`. If `initial_rule` is `Some`, then the initial register rules are either - // all default rules or have just 1 non-default rule, stored in `initial_rule`. - // If it's `None`, `stack[0]` will contain the initial register rules - // described by the CIE's initial instructions. These rules are used by - // `DW_CFA_restore`. Otherwise, when we are currently evaluating a CIE's - // initial instructions, `is_initialized` will be `false` and initial rules - // cannot be read. - initial_rule: Option<(Register, RegisterRule)>, - - is_initialized: bool, -} - -impl> Debug for UnwindContext { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("UnwindContext") - .field("stack", &self.stack) - .field("initial_rule", &self.initial_rule) - .field("is_initialized", &self.is_initialized) - .finish() - } -} - -impl> Default for UnwindContext { - fn default() -> Self { - Self::new_in() - } -} - -#[cfg(feature = "read")] -impl UnwindContext { - /// Construct a new call frame unwinding context. - pub fn new() -> Self { - Self::new_in() - } -} - -/// # Signal Safe Methods -/// -/// These methods are guaranteed not to allocate, acquire locks, or perform any -/// other signal-unsafe operations, if an non-allocating storage is used. -impl> UnwindContext { - /// Construct a new call frame unwinding context. - pub fn new_in() -> Self { - let mut ctx = UnwindContext { - stack: Default::default(), - initial_rule: None, - is_initialized: false, - }; - ctx.reset(); - ctx - } - - /// Run the CIE's initial instructions and initialize this `UnwindContext`. - fn initialize>( - &mut self, - section: &Section, - bases: &BaseAddresses, - cie: &CommonInformationEntry, - ) -> Result<()> { - // Always reset because previous initialization failure may leave dirty state. - self.reset(); - - let mut table = UnwindTable::new_for_cie(section, bases, self, cie); - while table.next_row()?.is_some() {} - - self.save_initial_rules()?; - Ok(()) - } - - fn reset(&mut self) { - self.stack.clear(); - self.stack.try_push(UnwindTableRow::default()).unwrap(); - debug_assert!(self.stack[0].is_default()); - self.initial_rule = None; - self.is_initialized = false; - } - - fn row(&self) -> &UnwindTableRow { - self.stack.last().unwrap() - } - - fn row_mut(&mut self) -> &mut UnwindTableRow { - self.stack.last_mut().unwrap() - } - - fn save_initial_rules(&mut self) -> Result<()> { - debug_assert!(!self.is_initialized); - self.initial_rule = match *self.stack.last().unwrap().registers.rules { - // All rules are default (undefined). In this case just synthesize - // an undefined rule. - [] => Some((Register(0), RegisterRule::Undefined)), - [ref rule] => Some(rule.clone()), - _ => { - let rules = self.stack.last().unwrap().clone(); - self.stack - .try_insert(0, rules) - .map_err(|_| Error::StackFull)?; - None - } - }; - self.is_initialized = true; - Ok(()) - } - - fn start_address(&self) -> u64 { - self.row().start_address - } - - fn set_start_address(&mut self, start_address: u64) { - let row = self.row_mut(); - row.start_address = start_address; - } - - fn set_register_rule(&mut self, register: Register, rule: RegisterRule) -> Result<()> { - let row = self.row_mut(); - row.registers.set(register, rule) - } - - /// Returns `None` if we have not completed evaluation of a CIE's initial - /// instructions. - fn get_initial_rule(&self, register: Register) -> Option> { - if !self.is_initialized { - return None; - } - Some(match self.initial_rule { - None => self.stack[0].registers.get(register), - Some((r, ref rule)) if r == register => rule.clone(), - _ => RegisterRule::Undefined, - }) - } - - fn set_cfa(&mut self, cfa: CfaRule) { - self.row_mut().cfa = cfa; - } - - fn cfa_mut(&mut self) -> &mut CfaRule { - &mut self.row_mut().cfa - } - - fn push_row(&mut self) -> Result<()> { - let new_row = self.row().clone(); - self.stack.try_push(new_row).map_err(|_| Error::StackFull) - } - - fn pop_row(&mut self) -> Result<()> { - let min_size = if self.is_initialized && self.initial_rule.is_none() { - 2 - } else { - 1 - }; - if self.stack.len() <= min_size { - return Err(Error::PopWithEmptyStack); - } - self.stack.pop().unwrap(); - Ok(()) - } -} - -/// The `UnwindTable` iteratively evaluates a `FrameDescriptionEntry`'s -/// `CallFrameInstruction` program, yielding the each row one at a time. -/// -/// > 6.4.1 Structure of Call Frame Information -/// > -/// > DWARF supports virtual unwinding by defining an architecture independent -/// > basis for recording how procedures save and restore registers during their -/// > lifetimes. This basis must be augmented on some machines with specific -/// > information that is defined by an architecture specific ABI authoring -/// > committee, a hardware vendor, or a compiler producer. The body defining a -/// > specific augmentation is referred to below as the “augmenter.†-/// > -/// > Abstractly, this mechanism describes a very large table that has the -/// > following structure: -/// > -/// > -/// > -/// > -/// > -/// > -/// > -/// > -/// > -/// > -/// > -/// > -/// > -/// > -/// > -/// > -/// > -/// >
LOCCFAR0R1...RN
L0
L1
...
LN
-/// > -/// > The first column indicates an address for every location that contains code -/// > in a program. (In shared objects, this is an object-relative offset.) The -/// > remaining columns contain virtual unwinding rules that are associated with -/// > the indicated location. -/// > -/// > The CFA column defines the rule which computes the Canonical Frame Address -/// > value; it may be either a register and a signed offset that are added -/// > together, or a DWARF expression that is evaluated. -/// > -/// > The remaining columns are labeled by register number. This includes some -/// > registers that have special designation on some architectures such as the PC -/// > and the stack pointer register. (The actual mapping of registers for a -/// > particular architecture is defined by the augmenter.) The register columns -/// > contain rules that describe whether a given register has been saved and the -/// > rule to find the value for the register in the previous frame. -/// > -/// > ... -/// > -/// > This table would be extremely large if actually constructed as -/// > described. Most of the entries at any point in the table are identical to -/// > the ones above them. The whole table can be represented quite compactly by -/// > recording just the differences starting at the beginning address of each -/// > subroutine in the program. -#[derive(Debug)] -pub struct UnwindTable<'a, 'ctx, R: Reader, A: UnwindContextStorage = StoreOnHeap> { - code_alignment_factor: Wrapping, - data_alignment_factor: Wrapping, - next_start_address: u64, - last_end_address: u64, - returned_last_row: bool, - current_row_valid: bool, - instructions: CallFrameInstructionIter<'a, R>, - ctx: &'ctx mut UnwindContext, -} - -/// # Signal Safe Methods -/// -/// These methods are guaranteed not to allocate, acquire locks, or perform any -/// other signal-unsafe operations. -impl<'a, 'ctx, R: Reader, A: UnwindContextStorage> UnwindTable<'a, 'ctx, R, A> { - /// Construct a new `UnwindTable` for the given - /// `FrameDescriptionEntry`'s CFI unwinding program. - pub fn new>( - section: &'a Section, - bases: &'a BaseAddresses, - ctx: &'ctx mut UnwindContext, - fde: &FrameDescriptionEntry, - ) -> Result { - ctx.initialize(section, bases, fde.cie())?; - Ok(Self::new_for_fde(section, bases, ctx, fde)) - } - - fn new_for_fde>( - section: &'a Section, - bases: &'a BaseAddresses, - ctx: &'ctx mut UnwindContext, - fde: &FrameDescriptionEntry, - ) -> Self { - assert!(ctx.stack.len() >= 1); - UnwindTable { - code_alignment_factor: Wrapping(fde.cie().code_alignment_factor()), - data_alignment_factor: Wrapping(fde.cie().data_alignment_factor()), - next_start_address: fde.initial_address(), - last_end_address: fde.initial_address().wrapping_add(fde.len()), - returned_last_row: false, - current_row_valid: false, - instructions: fde.instructions(section, bases), - ctx, - } - } - - fn new_for_cie>( - section: &'a Section, - bases: &'a BaseAddresses, - ctx: &'ctx mut UnwindContext, - cie: &CommonInformationEntry, - ) -> Self { - assert!(ctx.stack.len() >= 1); - UnwindTable { - code_alignment_factor: Wrapping(cie.code_alignment_factor()), - data_alignment_factor: Wrapping(cie.data_alignment_factor()), - next_start_address: 0, - last_end_address: 0, - returned_last_row: false, - current_row_valid: false, - instructions: cie.instructions(section, bases), - ctx, - } - } - - /// Evaluate call frame instructions until the next row of the table is - /// completed, and return it. - /// - /// Unfortunately, this cannot be used with `FallibleIterator` because of - /// the restricted lifetime of the yielded item. - pub fn next_row(&mut self) -> Result>> { - assert!(self.ctx.stack.len() >= 1); - self.ctx.set_start_address(self.next_start_address); - self.current_row_valid = false; - - loop { - match self.instructions.next() { - Err(e) => return Err(e), - - Ok(None) => { - if self.returned_last_row { - return Ok(None); - } - - let row = self.ctx.row_mut(); - row.end_address = self.last_end_address; - - self.returned_last_row = true; - self.current_row_valid = true; - return Ok(Some(row)); - } - - Ok(Some(instruction)) => { - if self.evaluate(instruction)? { - self.current_row_valid = true; - return Ok(Some(self.ctx.row())); - } - } - }; - } - } - - /// Returns the current row with the lifetime of the context. - pub fn into_current_row(self) -> Option<&'ctx UnwindTableRow> { - if self.current_row_valid { - Some(self.ctx.row()) - } else { - None - } - } - - /// Evaluate one call frame instruction. Return `Ok(true)` if the row is - /// complete, `Ok(false)` otherwise. - fn evaluate(&mut self, instruction: CallFrameInstruction) -> Result { - use crate::CallFrameInstruction::*; - - match instruction { - // Instructions that complete the current row and advance the - // address for the next row. - SetLoc { address } => { - if address < self.ctx.start_address() { - return Err(Error::InvalidAddressRange); - } - - self.next_start_address = address; - self.ctx.row_mut().end_address = self.next_start_address; - return Ok(true); - } - AdvanceLoc { delta } => { - let delta = Wrapping(u64::from(delta)) * self.code_alignment_factor; - self.next_start_address = (Wrapping(self.ctx.start_address()) + delta).0; - self.ctx.row_mut().end_address = self.next_start_address; - return Ok(true); - } - - // Instructions that modify the CFA. - DefCfa { register, offset } => { - self.ctx.set_cfa(CfaRule::RegisterAndOffset { - register, - offset: offset as i64, - }); - } - DefCfaSf { - register, - factored_offset, - } => { - let data_align = self.data_alignment_factor; - self.ctx.set_cfa(CfaRule::RegisterAndOffset { - register, - offset: (Wrapping(factored_offset) * data_align).0, - }); - } - DefCfaRegister { register } => { - if let CfaRule::RegisterAndOffset { - register: ref mut reg, - .. - } = *self.ctx.cfa_mut() - { - *reg = register; - } else { - return Err(Error::CfiInstructionInInvalidContext); - } - } - DefCfaOffset { offset } => { - if let CfaRule::RegisterAndOffset { - offset: ref mut off, - .. - } = *self.ctx.cfa_mut() - { - *off = offset as i64; - } else { - return Err(Error::CfiInstructionInInvalidContext); - } - } - DefCfaOffsetSf { factored_offset } => { - if let CfaRule::RegisterAndOffset { - offset: ref mut off, - .. - } = *self.ctx.cfa_mut() - { - let data_align = self.data_alignment_factor; - *off = (Wrapping(factored_offset) * data_align).0; - } else { - return Err(Error::CfiInstructionInInvalidContext); - } - } - DefCfaExpression { expression } => { - self.ctx.set_cfa(CfaRule::Expression(expression)); - } - - // Instructions that define register rules. - Undefined { register } => { - self.ctx - .set_register_rule(register, RegisterRule::Undefined)?; - } - SameValue { register } => { - self.ctx - .set_register_rule(register, RegisterRule::SameValue)?; - } - Offset { - register, - factored_offset, - } => { - let offset = Wrapping(factored_offset as i64) * self.data_alignment_factor; - self.ctx - .set_register_rule(register, RegisterRule::Offset(offset.0))?; - } - OffsetExtendedSf { - register, - factored_offset, - } => { - let offset = Wrapping(factored_offset) * self.data_alignment_factor; - self.ctx - .set_register_rule(register, RegisterRule::Offset(offset.0))?; - } - ValOffset { - register, - factored_offset, - } => { - let offset = Wrapping(factored_offset as i64) * self.data_alignment_factor; - self.ctx - .set_register_rule(register, RegisterRule::ValOffset(offset.0))?; - } - ValOffsetSf { - register, - factored_offset, - } => { - let offset = Wrapping(factored_offset) * self.data_alignment_factor; - self.ctx - .set_register_rule(register, RegisterRule::ValOffset(offset.0))?; - } - Register { - dest_register, - src_register, - } => { - self.ctx - .set_register_rule(dest_register, RegisterRule::Register(src_register))?; - } - Expression { - register, - expression, - } => { - let expression = RegisterRule::Expression(expression); - self.ctx.set_register_rule(register, expression)?; - } - ValExpression { - register, - expression, - } => { - let expression = RegisterRule::ValExpression(expression); - self.ctx.set_register_rule(register, expression)?; - } - Restore { register } => { - let initial_rule = if let Some(rule) = self.ctx.get_initial_rule(register) { - rule - } else { - // Can't restore the initial rule when we are - // evaluating the initial rules! - return Err(Error::CfiInstructionInInvalidContext); - }; - - self.ctx.set_register_rule(register, initial_rule)?; - } - - // Row push and pop instructions. - RememberState => { - self.ctx.push_row()?; - } - RestoreState => { - // Pop state while preserving current location. - let start_address = self.ctx.start_address(); - self.ctx.pop_row()?; - self.ctx.set_start_address(start_address); - } - - // GNU Extension. Save the size somewhere so the unwinder can use - // it when restoring IP - ArgsSize { size } => { - self.ctx.row_mut().saved_args_size = size; - } - - // AArch64 extension. - NegateRaState => { - let register = crate::AArch64::RA_SIGN_STATE; - let value = match self.ctx.row().register(register) { - RegisterRule::Undefined => 0, - RegisterRule::Constant(value) => value, - _ => return Err(Error::CfiInstructionInInvalidContext), - }; - self.ctx - .set_register_rule(register, RegisterRule::Constant(value ^ 1))?; - } - - // No operation. - Nop => {} - }; - - Ok(false) - } -} - -// We tend to have very few register rules: usually only a couple. Even if we -// have a rule for every register, on x86-64 with SSE and everything we're -// talking about ~100 rules. So rather than keeping the rules in a hash map, or -// a vector indexed by register number (which would lead to filling lots of -// empty entries), we store them as a vec of (register number, register rule) -// pairs. -// -// Additionally, because every register's default rule is implicitly -// `RegisterRule::Undefined`, we never store a register's rule in this vec if it -// is undefined and save a little bit more space and do a little fewer -// comparisons that way. -// -// The maximum number of rules preallocated by libunwind is 97 for AArch64, 128 -// for ARM, and even 188 for MIPS. It is extremely unlikely to encounter this -// many register rules in practice. -// -// See: -// - https://github.com/libunwind/libunwind/blob/11fd461095ea98f4b3e3a361f5a8a558519363fa/include/tdep-x86_64/dwarf-config.h#L36 -// - https://github.com/libunwind/libunwind/blob/11fd461095ea98f4b3e3a361f5a8a558519363fa/include/tdep-aarch64/dwarf-config.h#L32 -// - https://github.com/libunwind/libunwind/blob/11fd461095ea98f4b3e3a361f5a8a558519363fa/include/tdep-arm/dwarf-config.h#L31 -// - https://github.com/libunwind/libunwind/blob/11fd461095ea98f4b3e3a361f5a8a558519363fa/include/tdep-mips/dwarf-config.h#L31 -struct RegisterRuleMap = StoreOnHeap> { - rules: ArrayVec, -} - -impl> Debug for RegisterRuleMap { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("RegisterRuleMap") - .field("rules", &self.rules) - .finish() - } -} - -impl> Clone for RegisterRuleMap { - fn clone(&self) -> Self { - Self { - rules: self.rules.clone(), - } - } -} - -impl> Default for RegisterRuleMap { - fn default() -> Self { - RegisterRuleMap { - rules: Default::default(), - } - } -} - -/// # Signal Safe Methods -/// -/// These methods are guaranteed not to allocate, acquire locks, or perform any -/// other signal-unsafe operations. -impl> RegisterRuleMap { - fn is_default(&self) -> bool { - self.rules.is_empty() - } - - fn get(&self, register: Register) -> RegisterRule { - self.rules - .iter() - .find(|rule| rule.0 == register) - .map(|r| { - debug_assert!(r.1.is_defined()); - r.1.clone() - }) - .unwrap_or(RegisterRule::Undefined) - } - - fn set(&mut self, register: Register, rule: RegisterRule) -> Result<()> { - if !rule.is_defined() { - let idx = self - .rules - .iter() - .enumerate() - .find(|&(_, r)| r.0 == register) - .map(|(i, _)| i); - if let Some(idx) = idx { - self.rules.swap_remove(idx); - } - return Ok(()); - } - - for &mut (reg, ref mut old_rule) in &mut *self.rules { - debug_assert!(old_rule.is_defined()); - if reg == register { - *old_rule = rule; - return Ok(()); - } - } - - self.rules - .try_push((register, rule)) - .map_err(|_| Error::TooManyRegisterRules) - } - - fn iter(&self) -> RegisterRuleIter { - RegisterRuleIter(self.rules.iter()) - } -} - -impl<'a, R, S: UnwindContextStorage> FromIterator<&'a (Register, RegisterRule)> - for RegisterRuleMap -where - R: 'a + Reader, -{ - fn from_iter(iter: T) -> Self - where - T: IntoIterator)>, - { - let iter = iter.into_iter(); - let mut rules = RegisterRuleMap::default(); - for &(reg, ref rule) in iter.filter(|r| r.1.is_defined()) { - rules.set(reg, rule.clone()).expect( - "This is only used in tests, impl isn't exposed publicly. - If you trip this, fix your test", - ); - } - rules - } -} - -impl> PartialEq for RegisterRuleMap -where - R: Reader + PartialEq, -{ - fn eq(&self, rhs: &Self) -> bool { - for &(reg, ref rule) in &*self.rules { - debug_assert!(rule.is_defined()); - if *rule != rhs.get(reg) { - return false; - } - } - - for &(reg, ref rhs_rule) in &*rhs.rules { - debug_assert!(rhs_rule.is_defined()); - if *rhs_rule != self.get(reg) { - return false; - } - } - - true - } -} - -impl> Eq for RegisterRuleMap where R: Reader + Eq {} - -/// An unordered iterator for register rules. -#[derive(Debug, Clone)] -pub struct RegisterRuleIter<'iter, R>(::core::slice::Iter<'iter, (Register, RegisterRule)>) -where - R: Reader; - -impl<'iter, R: Reader> Iterator for RegisterRuleIter<'iter, R> { - type Item = &'iter (Register, RegisterRule); - - fn next(&mut self) -> Option { - self.0.next() - } -} - -/// A row in the virtual unwind table that describes how to find the values of -/// the registers in the *previous* frame for a range of PC addresses. -#[derive(PartialEq, Eq)] -pub struct UnwindTableRow = StoreOnHeap> { - start_address: u64, - end_address: u64, - saved_args_size: u64, - cfa: CfaRule, - registers: RegisterRuleMap, -} - -impl> Debug for UnwindTableRow { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("UnwindTableRow") - .field("start_address", &self.start_address) - .field("end_address", &self.end_address) - .field("saved_args_size", &self.saved_args_size) - .field("cfa", &self.cfa) - .field("registers", &self.registers) - .finish() - } -} - -impl> Clone for UnwindTableRow { - fn clone(&self) -> Self { - Self { - start_address: self.start_address, - end_address: self.end_address, - saved_args_size: self.saved_args_size, - cfa: self.cfa.clone(), - registers: self.registers.clone(), - } - } -} - -impl> Default for UnwindTableRow { - fn default() -> Self { - UnwindTableRow { - start_address: 0, - end_address: 0, - saved_args_size: 0, - cfa: Default::default(), - registers: Default::default(), - } - } -} - -impl> UnwindTableRow { - fn is_default(&self) -> bool { - self.start_address == 0 - && self.end_address == 0 - && self.cfa.is_default() - && self.registers.is_default() - } - - /// Get the starting PC address that this row applies to. - pub fn start_address(&self) -> u64 { - self.start_address - } - - /// Get the end PC address where this row's register rules become - /// unapplicable. - /// - /// In other words, this row describes how to recover the last frame's - /// registers for all PCs where `row.start_address() <= PC < - /// row.end_address()`. This row does NOT describe how to recover registers - /// when `PC == row.end_address()`. - pub fn end_address(&self) -> u64 { - self.end_address - } - - /// Return `true` if the given `address` is within this row's address range, - /// `false` otherwise. - pub fn contains(&self, address: u64) -> bool { - self.start_address <= address && address < self.end_address - } - - /// Returns the amount of args currently on the stack. - /// - /// When unwinding, if the personality function requested a change in IP, - /// the SP needs to be adjusted by saved_args_size. - pub fn saved_args_size(&self) -> u64 { - self.saved_args_size - } - - /// Get the canonical frame address (CFA) recovery rule for this row. - pub fn cfa(&self) -> &CfaRule { - &self.cfa - } - - /// Get the register recovery rule for the given register number. - /// - /// The register number mapping is architecture dependent. For example, in - /// the x86-64 ABI the register number mapping is defined in Figure 3.36: - /// - /// > Figure 3.36: DWARF Register Number Mapping - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// > - /// >
Register Name Number Abbreviation
General Purpose Register RAX 0 %rax
General Purpose Register RDX 1 %rdx
General Purpose Register RCX 2 %rcx
General Purpose Register RBX 3 %rbx
General Purpose Register RSI 4 %rsi
General Purpose Register RDI 5 %rdi
General Purpose Register RBP 6 %rbp
Stack Pointer Register RSP 7 %rsp
Extended Integer Registers 8-15 8-15 %r8-%r15
Return Address RA 16
Vector Registers 0–7 17-24 %xmm0–%xmm7
Extended Vector Registers 8–15 25-32 %xmm8–%xmm15
Floating Point Registers 0–7 33-40 %st0–%st7
MMX Registers 0–7 41-48 %mm0–%mm7
Flag Register 49 %rFLAGS
Segment Register ES 50 %es
Segment Register CS 51 %cs
Segment Register SS 52 %ss
Segment Register DS 53 %ds
Segment Register FS 54 %fs
Segment Register GS 55 %gs
Reserved 56-57
FS Base address 58 %fs.base
GS Base address 59 %gs.base
Reserved 60-61
Task Register 62 %tr
LDT Register 63 %ldtr
128-bit Media Control and Status 64 %mxcsr
x87 Control Word 65 %fcw
x87 Status Word 66 %fsw
Upper Vector Registers 16–31 67-82 %xmm16–%xmm31
Reserved 83-117
Vector Mask Registers 0–7 118-125 %k0–%k7
Reserved 126-129
- pub fn register(&self, register: Register) -> RegisterRule { - self.registers.get(register) - } - - /// Iterate over all defined register `(number, rule)` pairs. - /// - /// The rules are not iterated in any guaranteed order. Any register that - /// does not make an appearance in the iterator implicitly has the rule - /// `RegisterRule::Undefined`. - /// - /// ``` - /// # use gimli::{EndianSlice, LittleEndian, UnwindTableRow}; - /// # fn foo<'input>(unwind_table_row: UnwindTableRow>) { - /// for &(register, ref rule) in unwind_table_row.registers() { - /// // ... - /// # drop(register); drop(rule); - /// } - /// # } - /// ``` - pub fn registers(&self) -> RegisterRuleIter { - self.registers.iter() - } -} - -/// The canonical frame address (CFA) recovery rules. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum CfaRule { - /// The CFA is given offset from the given register's value. - RegisterAndOffset { - /// The register containing the base value. - register: Register, - /// The offset from the register's base value. - offset: i64, - }, - /// The CFA is obtained by evaluating this `Reader` as a DWARF expression - /// program. - Expression(Expression), -} - -impl Default for CfaRule { - fn default() -> Self { - CfaRule::RegisterAndOffset { - register: Register(0), - offset: 0, - } - } -} - -impl CfaRule { - fn is_default(&self) -> bool { - match *self { - CfaRule::RegisterAndOffset { register, offset } => { - register == Register(0) && offset == 0 - } - _ => false, - } - } -} - -/// An entry in the abstract CFI table that describes how to find the value of a -/// register. -/// -/// "The register columns contain rules that describe whether a given register -/// has been saved and the rule to find the value for the register in the -/// previous frame." -#[derive(Clone, Debug, PartialEq, Eq)] -#[non_exhaustive] -pub enum RegisterRule { - /// > A register that has this rule has no recoverable value in the previous - /// > frame. (By convention, it is not preserved by a callee.) - Undefined, - - /// > This register has not been modified from the previous frame. (By - /// > convention, it is preserved by the callee, but the callee has not - /// > modified it.) - SameValue, - - /// "The previous value of this register is saved at the address CFA+N where - /// CFA is the current CFA value and N is a signed offset." - Offset(i64), - - /// "The previous value of this register is the value CFA+N where CFA is the - /// current CFA value and N is a signed offset." - ValOffset(i64), - - /// "The previous value of this register is stored in another register - /// numbered R." - Register(Register), - - /// "The previous value of this register is located at the address produced - /// by executing the DWARF expression." - Expression(Expression), - - /// "The previous value of this register is the value produced by executing - /// the DWARF expression." - ValExpression(Expression), - - /// "The rule is defined externally to this specification by the augmenter." - Architectural, - - /// This is a pseudo-register with a constant value. - Constant(u64), -} - -impl RegisterRule { - fn is_defined(&self) -> bool { - !matches!(*self, RegisterRule::Undefined) - } -} - -/// A parsed call frame instruction. -#[derive(Clone, Debug, PartialEq, Eq)] -#[non_exhaustive] -pub enum CallFrameInstruction { - // 6.4.2.1 Row Creation Methods - /// > 1. DW_CFA_set_loc - /// > - /// > The DW_CFA_set_loc instruction takes a single operand that represents - /// > a target address. The required action is to create a new table row - /// > using the specified address as the location. All other values in the - /// > new row are initially identical to the current row. The new location - /// > value is always greater than the current one. If the segment_size - /// > field of this FDE's CIE is non- zero, the initial location is preceded - /// > by a segment selector of the given length. - SetLoc { - /// The target address. - address: u64, - }, - - /// The `AdvanceLoc` instruction is used for all of `DW_CFA_advance_loc` and - /// `DW_CFA_advance_loc{1,2,4}`. - /// - /// > 2. DW_CFA_advance_loc - /// > - /// > The DW_CFA_advance instruction takes a single operand (encoded with - /// > the opcode) that represents a constant delta. The required action is - /// > to create a new table row with a location value that is computed by - /// > taking the current entry’s location value and adding the value of - /// > delta * code_alignment_factor. All other values in the new row are - /// > initially identical to the current row. - AdvanceLoc { - /// The delta to be added to the current address. - delta: u32, - }, - - // 6.4.2.2 CFA Definition Methods - /// > 1. DW_CFA_def_cfa - /// > - /// > The DW_CFA_def_cfa instruction takes two unsigned LEB128 operands - /// > representing a register number and a (non-factored) offset. The - /// > required action is to define the current CFA rule to use the provided - /// > register and offset. - DefCfa { - /// The target register's number. - register: Register, - /// The non-factored offset. - offset: u64, - }, - - /// > 2. DW_CFA_def_cfa_sf - /// > - /// > The DW_CFA_def_cfa_sf instruction takes two operands: an unsigned - /// > LEB128 value representing a register number and a signed LEB128 - /// > factored offset. This instruction is identical to DW_CFA_def_cfa - /// > except that the second operand is signed and factored. The resulting - /// > offset is factored_offset * data_alignment_factor. - DefCfaSf { - /// The target register's number. - register: Register, - /// The factored offset. - factored_offset: i64, - }, - - /// > 3. DW_CFA_def_cfa_register - /// > - /// > The DW_CFA_def_cfa_register instruction takes a single unsigned LEB128 - /// > operand representing a register number. The required action is to - /// > define the current CFA rule to use the provided register (but to keep - /// > the old offset). This operation is valid only if the current CFA rule - /// > is defined to use a register and offset. - DefCfaRegister { - /// The target register's number. - register: Register, - }, - - /// > 4. DW_CFA_def_cfa_offset - /// > - /// > The DW_CFA_def_cfa_offset instruction takes a single unsigned LEB128 - /// > operand representing a (non-factored) offset. The required action is - /// > to define the current CFA rule to use the provided offset (but to keep - /// > the old register). This operation is valid only if the current CFA - /// > rule is defined to use a register and offset. - DefCfaOffset { - /// The non-factored offset. - offset: u64, - }, - - /// > 5. DW_CFA_def_cfa_offset_sf - /// > - /// > The DW_CFA_def_cfa_offset_sf instruction takes a signed LEB128 operand - /// > representing a factored offset. This instruction is identical to - /// > DW_CFA_def_cfa_offset except that the operand is signed and - /// > factored. The resulting offset is factored_offset * - /// > data_alignment_factor. This operation is valid only if the current CFA - /// > rule is defined to use a register and offset. - DefCfaOffsetSf { - /// The factored offset. - factored_offset: i64, - }, - - /// > 6. DW_CFA_def_cfa_expression - /// > - /// > The DW_CFA_def_cfa_expression instruction takes a single operand - /// > encoded as a DW_FORM_exprloc value representing a DWARF - /// > expression. The required action is to establish that expression as the - /// > means by which the current CFA is computed. - DefCfaExpression { - /// The DWARF expression. - expression: Expression, - }, - - // 6.4.2.3 Register Rule Instructions - /// > 1. DW_CFA_undefined - /// > - /// > The DW_CFA_undefined instruction takes a single unsigned LEB128 - /// > operand that represents a register number. The required action is to - /// > set the rule for the specified register to “undefined.†- Undefined { - /// The target register's number. - register: Register, - }, - - /// > 2. DW_CFA_same_value - /// > - /// > The DW_CFA_same_value instruction takes a single unsigned LEB128 - /// > operand that represents a register number. The required action is to - /// > set the rule for the specified register to “same value.†- SameValue { - /// The target register's number. - register: Register, - }, - - /// The `Offset` instruction represents both `DW_CFA_offset` and - /// `DW_CFA_offset_extended`. - /// - /// > 3. DW_CFA_offset - /// > - /// > The DW_CFA_offset instruction takes two operands: a register number - /// > (encoded with the opcode) and an unsigned LEB128 constant representing - /// > a factored offset. The required action is to change the rule for the - /// > register indicated by the register number to be an offset(N) rule - /// > where the value of N is factored offset * data_alignment_factor. - Offset { - /// The target register's number. - register: Register, - /// The factored offset. - factored_offset: u64, - }, - - /// > 5. DW_CFA_offset_extended_sf - /// > - /// > The DW_CFA_offset_extended_sf instruction takes two operands: an - /// > unsigned LEB128 value representing a register number and a signed - /// > LEB128 factored offset. This instruction is identical to - /// > DW_CFA_offset_extended except that the second operand is signed and - /// > factored. The resulting offset is factored_offset * - /// > data_alignment_factor. - OffsetExtendedSf { - /// The target register's number. - register: Register, - /// The factored offset. - factored_offset: i64, - }, - - /// > 6. DW_CFA_val_offset - /// > - /// > The DW_CFA_val_offset instruction takes two unsigned LEB128 operands - /// > representing a register number and a factored offset. The required - /// > action is to change the rule for the register indicated by the - /// > register number to be a val_offset(N) rule where the value of N is - /// > factored_offset * data_alignment_factor. - ValOffset { - /// The target register's number. - register: Register, - /// The factored offset. - factored_offset: u64, - }, - - /// > 7. DW_CFA_val_offset_sf - /// > - /// > The DW_CFA_val_offset_sf instruction takes two operands: an unsigned - /// > LEB128 value representing a register number and a signed LEB128 - /// > factored offset. This instruction is identical to DW_CFA_val_offset - /// > except that the second operand is signed and factored. The resulting - /// > offset is factored_offset * data_alignment_factor. - ValOffsetSf { - /// The target register's number. - register: Register, - /// The factored offset. - factored_offset: i64, - }, - - /// > 8. DW_CFA_register - /// > - /// > The DW_CFA_register instruction takes two unsigned LEB128 operands - /// > representing register numbers. The required action is to set the rule - /// > for the first register to be register(R) where R is the second - /// > register. - Register { - /// The number of the register whose rule is being changed. - dest_register: Register, - /// The number of the register where the other register's value can be - /// found. - src_register: Register, - }, - - /// > 9. DW_CFA_expression - /// > - /// > The DW_CFA_expression instruction takes two operands: an unsigned - /// > LEB128 value representing a register number, and a DW_FORM_block value - /// > representing a DWARF expression. The required action is to change the - /// > rule for the register indicated by the register number to be an - /// > expression(E) rule where E is the DWARF expression. That is, the DWARF - /// > expression computes the address. The value of the CFA is pushed on the - /// > DWARF evaluation stack prior to execution of the DWARF expression. - Expression { - /// The target register's number. - register: Register, - /// The DWARF expression. - expression: Expression, - }, - - /// > 10. DW_CFA_val_expression - /// > - /// > The DW_CFA_val_expression instruction takes two operands: an unsigned - /// > LEB128 value representing a register number, and a DW_FORM_block value - /// > representing a DWARF expression. The required action is to change the - /// > rule for the register indicated by the register number to be a - /// > val_expression(E) rule where E is the DWARF expression. That is, the - /// > DWARF expression computes the value of the given register. The value - /// > of the CFA is pushed on the DWARF evaluation stack prior to execution - /// > of the DWARF expression. - ValExpression { - /// The target register's number. - register: Register, - /// The DWARF expression. - expression: Expression, - }, - - /// The `Restore` instruction represents both `DW_CFA_restore` and - /// `DW_CFA_restore_extended`. - /// - /// > 11. DW_CFA_restore - /// > - /// > The DW_CFA_restore instruction takes a single operand (encoded with - /// > the opcode) that represents a register number. The required action is - /// > to change the rule for the indicated register to the rule assigned it - /// > by the initial_instructions in the CIE. - Restore { - /// The register to be reset. - register: Register, - }, - - // 6.4.2.4 Row State Instructions - /// > 1. DW_CFA_remember_state - /// > - /// > The DW_CFA_remember_state instruction takes no operands. The required - /// > action is to push the set of rules for every register onto an implicit - /// > stack. - RememberState, - - /// > 2. DW_CFA_restore_state - /// > - /// > The DW_CFA_restore_state instruction takes no operands. The required - /// > action is to pop the set of rules off the implicit stack and place - /// > them in the current row. - RestoreState, - - /// > DW_CFA_GNU_args_size - /// > - /// > GNU Extension - /// > - /// > The DW_CFA_GNU_args_size instruction takes an unsigned LEB128 operand - /// > representing an argument size. This instruction specifies the total of - /// > the size of the arguments which have been pushed onto the stack. - ArgsSize { - /// The size of the arguments which have been pushed onto the stack - size: u64, - }, - - /// > DW_CFA_AARCH64_negate_ra_state - /// > - /// > AArch64 Extension - /// > - /// > The DW_CFA_AARCH64_negate_ra_state operation negates bit 0 of the - /// > RA_SIGN_STATE pseudo-register. It does not take any operands. The - /// > DW_CFA_AARCH64_negate_ra_state must not be mixed with other DWARF Register - /// > Rule Instructions on the RA_SIGN_STATE pseudo-register in one Common - /// > Information Entry (CIE) and Frame Descriptor Entry (FDE) program sequence. - NegateRaState, - - // 6.4.2.5 Padding Instruction - /// > 1. DW_CFA_nop - /// > - /// > The DW_CFA_nop instruction has no operands and no required actions. It - /// > is used as padding to make a CIE or FDE an appropriate size. - Nop, -} - -const CFI_INSTRUCTION_HIGH_BITS_MASK: u8 = 0b1100_0000; -const CFI_INSTRUCTION_LOW_BITS_MASK: u8 = !CFI_INSTRUCTION_HIGH_BITS_MASK; - -impl CallFrameInstruction { - fn parse( - input: &mut R, - address_encoding: Option, - parameters: &PointerEncodingParameters, - vendor: Vendor, - ) -> Result> { - let instruction = input.read_u8()?; - let high_bits = instruction & CFI_INSTRUCTION_HIGH_BITS_MASK; - - if high_bits == constants::DW_CFA_advance_loc.0 { - let delta = instruction & CFI_INSTRUCTION_LOW_BITS_MASK; - return Ok(CallFrameInstruction::AdvanceLoc { - delta: u32::from(delta), - }); - } - - if high_bits == constants::DW_CFA_offset.0 { - let register = Register((instruction & CFI_INSTRUCTION_LOW_BITS_MASK).into()); - let offset = input.read_uleb128()?; - return Ok(CallFrameInstruction::Offset { - register, - factored_offset: offset, - }); - } - - if high_bits == constants::DW_CFA_restore.0 { - let register = Register((instruction & CFI_INSTRUCTION_LOW_BITS_MASK).into()); - return Ok(CallFrameInstruction::Restore { register }); - } - - debug_assert_eq!(high_bits, 0); - let instruction = constants::DwCfa(instruction); - - match instruction { - constants::DW_CFA_nop => Ok(CallFrameInstruction::Nop), - - constants::DW_CFA_set_loc => { - let address = if let Some(encoding) = address_encoding { - parse_encoded_pointer(encoding, parameters, input)?.direct()? - } else { - input.read_address(parameters.address_size)? - }; - Ok(CallFrameInstruction::SetLoc { address }) - } - - constants::DW_CFA_advance_loc1 => { - let delta = input.read_u8()?; - Ok(CallFrameInstruction::AdvanceLoc { - delta: u32::from(delta), - }) - } - - constants::DW_CFA_advance_loc2 => { - let delta = input.read_u16()?; - Ok(CallFrameInstruction::AdvanceLoc { - delta: u32::from(delta), - }) - } - - constants::DW_CFA_advance_loc4 => { - let delta = input.read_u32()?; - Ok(CallFrameInstruction::AdvanceLoc { delta }) - } - - constants::DW_CFA_offset_extended => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - let offset = input.read_uleb128()?; - Ok(CallFrameInstruction::Offset { - register, - factored_offset: offset, - }) - } - - constants::DW_CFA_restore_extended => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - Ok(CallFrameInstruction::Restore { register }) - } - - constants::DW_CFA_undefined => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - Ok(CallFrameInstruction::Undefined { register }) - } - - constants::DW_CFA_same_value => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - Ok(CallFrameInstruction::SameValue { register }) - } - - constants::DW_CFA_register => { - let dest = input.read_uleb128().and_then(Register::from_u64)?; - let src = input.read_uleb128().and_then(Register::from_u64)?; - Ok(CallFrameInstruction::Register { - dest_register: dest, - src_register: src, - }) - } - - constants::DW_CFA_remember_state => Ok(CallFrameInstruction::RememberState), - - constants::DW_CFA_restore_state => Ok(CallFrameInstruction::RestoreState), - - constants::DW_CFA_def_cfa => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - let offset = input.read_uleb128()?; - Ok(CallFrameInstruction::DefCfa { register, offset }) - } - - constants::DW_CFA_def_cfa_register => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - Ok(CallFrameInstruction::DefCfaRegister { register }) - } - - constants::DW_CFA_def_cfa_offset => { - let offset = input.read_uleb128()?; - Ok(CallFrameInstruction::DefCfaOffset { offset }) - } - - constants::DW_CFA_def_cfa_expression => { - let len = input.read_uleb128().and_then(R::Offset::from_u64)?; - let expression = input.split(len)?; - Ok(CallFrameInstruction::DefCfaExpression { - expression: Expression(expression), - }) - } - - constants::DW_CFA_expression => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - let len = input.read_uleb128().and_then(R::Offset::from_u64)?; - let expression = input.split(len)?; - Ok(CallFrameInstruction::Expression { - register, - expression: Expression(expression), - }) - } - - constants::DW_CFA_offset_extended_sf => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - let offset = input.read_sleb128()?; - Ok(CallFrameInstruction::OffsetExtendedSf { - register, - factored_offset: offset, - }) - } - - constants::DW_CFA_def_cfa_sf => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - let offset = input.read_sleb128()?; - Ok(CallFrameInstruction::DefCfaSf { - register, - factored_offset: offset, - }) - } - - constants::DW_CFA_def_cfa_offset_sf => { - let offset = input.read_sleb128()?; - Ok(CallFrameInstruction::DefCfaOffsetSf { - factored_offset: offset, - }) - } - - constants::DW_CFA_val_offset => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - let offset = input.read_uleb128()?; - Ok(CallFrameInstruction::ValOffset { - register, - factored_offset: offset, - }) - } - - constants::DW_CFA_val_offset_sf => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - let offset = input.read_sleb128()?; - Ok(CallFrameInstruction::ValOffsetSf { - register, - factored_offset: offset, - }) - } - - constants::DW_CFA_val_expression => { - let register = input.read_uleb128().and_then(Register::from_u64)?; - let len = input.read_uleb128().and_then(R::Offset::from_u64)?; - let expression = input.split(len)?; - Ok(CallFrameInstruction::ValExpression { - register, - expression: Expression(expression), - }) - } - - constants::DW_CFA_GNU_args_size => { - let size = input.read_uleb128()?; - Ok(CallFrameInstruction::ArgsSize { size }) - } - - constants::DW_CFA_AARCH64_negate_ra_state if vendor == Vendor::AArch64 => { - Ok(CallFrameInstruction::NegateRaState) - } - - otherwise => Err(Error::UnknownCallFrameInstruction(otherwise)), - } - } -} - -/// A lazy iterator parsing call frame instructions. -/// -/// Can be [used with -/// `FallibleIterator`](./index.html#using-with-fallibleiterator). -#[derive(Clone, Debug)] -pub struct CallFrameInstructionIter<'a, R: Reader> { - input: R, - address_encoding: Option, - parameters: PointerEncodingParameters<'a, R>, - vendor: Vendor, -} - -impl<'a, R: Reader> CallFrameInstructionIter<'a, R> { - /// Parse the next call frame instruction. - pub fn next(&mut self) -> Result>> { - if self.input.is_empty() { - return Ok(None); - } - - match CallFrameInstruction::parse( - &mut self.input, - self.address_encoding, - &self.parameters, - self.vendor, - ) { - Ok(instruction) => Ok(Some(instruction)), - Err(e) => { - self.input.empty(); - Err(e) - } - } - } -} - -#[cfg(feature = "fallible-iterator")] -impl<'a, R: Reader> fallible_iterator::FallibleIterator for CallFrameInstructionIter<'a, R> { - type Item = CallFrameInstruction; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - CallFrameInstructionIter::next(self) - } -} - -/// Parse a `DW_EH_PE_*` pointer encoding. -#[doc(hidden)] -#[inline] -fn parse_pointer_encoding(input: &mut R) -> Result { - let eh_pe = input.read_u8()?; - let eh_pe = constants::DwEhPe(eh_pe); - - if eh_pe.is_valid_encoding() { - Ok(eh_pe) - } else { - Err(Error::UnknownPointerEncoding) - } -} - -/// A decoded pointer. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum Pointer { - /// This value is the decoded pointer value. - Direct(u64), - - /// This value is *not* the pointer value, but points to the address of - /// where the real pointer value lives. In other words, deref this pointer - /// to get the real pointer value. - /// - /// Chase this pointer at your own risk: do you trust the DWARF data it came - /// from? - Indirect(u64), -} - -impl Default for Pointer { - #[inline] - fn default() -> Self { - Pointer::Direct(0) - } -} - -impl Pointer { - #[inline] - fn new(encoding: constants::DwEhPe, address: u64) -> Pointer { - if encoding.is_indirect() { - Pointer::Indirect(address) - } else { - Pointer::Direct(address) - } - } - - /// Return the direct pointer value. - #[inline] - pub fn direct(self) -> Result { - match self { - Pointer::Direct(p) => Ok(p), - Pointer::Indirect(_) => Err(Error::UnsupportedPointerEncoding), - } - } - - /// Return the pointer value, discarding indirectness information. - #[inline] - pub fn pointer(self) -> u64 { - match self { - Pointer::Direct(p) | Pointer::Indirect(p) => p, - } - } -} - -#[derive(Clone, Debug)] -struct PointerEncodingParameters<'a, R: Reader> { - bases: &'a SectionBaseAddresses, - func_base: Option, - address_size: u8, - section: &'a R, -} - -fn parse_encoded_pointer( - encoding: constants::DwEhPe, - parameters: &PointerEncodingParameters, - input: &mut R, -) -> Result { - // TODO: check this once only in parse_pointer_encoding - if !encoding.is_valid_encoding() { - return Err(Error::UnknownPointerEncoding); - } - - if encoding == constants::DW_EH_PE_omit { - return Err(Error::CannotParseOmitPointerEncoding); - } - - let base = match encoding.application() { - constants::DW_EH_PE_absptr => 0, - constants::DW_EH_PE_pcrel => { - if let Some(section_base) = parameters.bases.section { - let offset_from_section = input.offset_from(parameters.section); - section_base.wrapping_add(offset_from_section.into_u64()) - } else { - return Err(Error::PcRelativePointerButSectionBaseIsUndefined); - } - } - constants::DW_EH_PE_textrel => { - if let Some(text) = parameters.bases.text { - text - } else { - return Err(Error::TextRelativePointerButTextBaseIsUndefined); - } - } - constants::DW_EH_PE_datarel => { - if let Some(data) = parameters.bases.data { - data - } else { - return Err(Error::DataRelativePointerButDataBaseIsUndefined); - } - } - constants::DW_EH_PE_funcrel => { - if let Some(func) = parameters.func_base { - func - } else { - return Err(Error::FuncRelativePointerInBadContext); - } - } - constants::DW_EH_PE_aligned => return Err(Error::UnsupportedPointerEncoding), - _ => unreachable!(), - }; - - let offset = match encoding.format() { - // Unsigned variants. - constants::DW_EH_PE_absptr => input.read_address(parameters.address_size), - constants::DW_EH_PE_uleb128 => input.read_uleb128(), - constants::DW_EH_PE_udata2 => input.read_u16().map(u64::from), - constants::DW_EH_PE_udata4 => input.read_u32().map(u64::from), - constants::DW_EH_PE_udata8 => input.read_u64(), - - // Signed variants. Here we sign extend the values (happens by - // default when casting a signed integer to a larger range integer - // in Rust), return them as u64, and rely on wrapping addition to do - // the right thing when adding these offsets to their bases. - constants::DW_EH_PE_sleb128 => input.read_sleb128().map(|a| a as u64), - constants::DW_EH_PE_sdata2 => input.read_i16().map(|a| a as u64), - constants::DW_EH_PE_sdata4 => input.read_i32().map(|a| a as u64), - constants::DW_EH_PE_sdata8 => input.read_i64().map(|a| a as u64), - - // That was all of the valid encoding formats. - _ => unreachable!(), - }?; - - Ok(Pointer::new(encoding, base.wrapping_add(offset))) -} - -#[cfg(test)] -mod tests { - use super::*; - use super::{parse_cfi_entry, AugmentationData, RegisterRuleMap, UnwindContext}; - use crate::common::Format; - use crate::constants; - use crate::endianity::{BigEndian, Endianity, LittleEndian, NativeEndian}; - use crate::read::{ - EndianSlice, Error, Expression, Pointer, ReaderOffsetId, Result, Section as ReadSection, - }; - use crate::test_util::GimliSectionMethods; - use alloc::boxed::Box; - use alloc::vec::Vec; - use core::marker::PhantomData; - use core::mem; - use core::u64; - use test_assembler::{Endian, Label, LabelMaker, LabelOrNum, Section, ToLabelOrNum}; - - // Ensure each test tries to read the same section kind that it wrote. - #[derive(Clone, Copy)] - struct SectionKind

(PhantomData
); - - impl SectionKind { - fn endian<'input, E>(self) -> Endian - where - E: Endianity, - T: UnwindSection>, - T::Offset: UnwindOffset, - { - if E::default().is_big_endian() { - Endian::Big - } else { - Endian::Little - } - } - - fn section<'input, E>(self, contents: &'input [u8]) -> T - where - E: Endianity, - T: UnwindSection> + ReadSection>, - T::Offset: UnwindOffset, - { - EndianSlice::new(contents, E::default()).into() - } - } - - fn debug_frame_le<'a>() -> SectionKind>> { - SectionKind(PhantomData) - } - - fn debug_frame_be<'a>() -> SectionKind>> { - SectionKind(PhantomData) - } - - fn eh_frame_le<'a>() -> SectionKind>> { - SectionKind(PhantomData) - } - - fn parse_fde( - section: Section, - input: &mut R, - get_cie: F, - ) -> Result> - where - R: Reader, - Section: UnwindSection, - O: UnwindOffset, - F: FnMut(&Section, &BaseAddresses, O) -> Result>, - { - let bases = Default::default(); - match parse_cfi_entry(&bases, §ion, input) { - Ok(Some(CieOrFde::Fde(partial))) => partial.parse(get_cie), - Ok(_) => Err(Error::NoEntryAtGivenOffset), - Err(e) => Err(e), - } - } - - // Mixin methods for `Section` to help define binary test data. - - trait CfiSectionMethods: GimliSectionMethods { - fn cie<'aug, 'input, E, T>( - self, - _kind: SectionKind, - augmentation: Option<&'aug str>, - cie: &mut CommonInformationEntry>, - ) -> Self - where - E: Endianity, - T: UnwindSection>, - T::Offset: UnwindOffset; - fn fde<'a, 'input, E, T, L>( - self, - _kind: SectionKind, - cie_offset: L, - fde: &mut FrameDescriptionEntry>, - ) -> Self - where - E: Endianity, - T: UnwindSection>, - T::Offset: UnwindOffset, - L: ToLabelOrNum<'a, u64>; - } - - impl CfiSectionMethods for Section { - fn cie<'aug, 'input, E, T>( - self, - _kind: SectionKind, - augmentation: Option<&'aug str>, - cie: &mut CommonInformationEntry>, - ) -> Self - where - E: Endianity, - T: UnwindSection>, - T::Offset: UnwindOffset, - { - cie.offset = self.size() as _; - let length = Label::new(); - let start = Label::new(); - let end = Label::new(); - - let section = match cie.format { - Format::Dwarf32 => self.D32(&length).mark(&start).D32(0xffff_ffff), - Format::Dwarf64 => { - let section = self.D32(0xffff_ffff); - section.D64(&length).mark(&start).D64(0xffff_ffff_ffff_ffff) - } - }; - - let mut section = section.D8(cie.version); - - if let Some(augmentation) = augmentation { - section = section.append_bytes(augmentation.as_bytes()); - } - - // Null terminator for augmentation string. - let section = section.D8(0); - - let section = if T::has_address_and_segment_sizes(cie.version) { - section.D8(cie.address_size).D8(cie.segment_size) - } else { - section - }; - - let section = section - .uleb(cie.code_alignment_factor) - .sleb(cie.data_alignment_factor) - .uleb(cie.return_address_register.0.into()) - .append_bytes(cie.initial_instructions.slice()) - .mark(&end); - - cie.length = (&end - &start) as usize; - length.set_const(cie.length as u64); - - section - } - - fn fde<'a, 'input, E, T, L>( - self, - _kind: SectionKind, - cie_offset: L, - fde: &mut FrameDescriptionEntry>, - ) -> Self - where - E: Endianity, - T: UnwindSection>, - T::Offset: UnwindOffset, - L: ToLabelOrNum<'a, u64>, - { - fde.offset = self.size() as _; - let length = Label::new(); - let start = Label::new(); - let end = Label::new(); - - assert_eq!(fde.format, fde.cie.format); - - let section = match T::cie_offset_encoding(fde.format) { - CieOffsetEncoding::U32 => { - let section = self.D32(&length).mark(&start); - match cie_offset.to_labelornum() { - LabelOrNum::Label(ref l) => section.D32(l), - LabelOrNum::Num(o) => section.D32(o as u32), - } - } - CieOffsetEncoding::U64 => { - let section = self.D32(0xffff_ffff); - section.D64(&length).mark(&start).D64(cie_offset) - } - }; - - let section = match fde.cie.segment_size { - 0 => section, - 4 => section.D32(fde.initial_segment as u32), - 8 => section.D64(fde.initial_segment), - x => panic!("Unsupported test segment size: {}", x), - }; - - let section = match fde.cie.address_size { - 4 => section - .D32(fde.initial_address() as u32) - .D32(fde.len() as u32), - 8 => section.D64(fde.initial_address()).D64(fde.len()), - x => panic!("Unsupported address size: {}", x), - }; - - let section = if let Some(ref augmentation) = fde.augmentation { - let cie_aug = fde - .cie - .augmentation - .expect("FDE has augmentation, but CIE doesn't"); - - if let Some(lsda) = augmentation.lsda { - // We only support writing `DW_EH_PE_absptr` here. - assert_eq!( - cie_aug - .lsda - .expect("FDE has lsda, but CIE doesn't") - .format(), - constants::DW_EH_PE_absptr - ); - - // Augmentation data length - let section = section.uleb(u64::from(fde.cie.address_size)); - match fde.cie.address_size { - 4 => section.D32({ - let x: u64 = lsda.pointer(); - x as u32 - }), - 8 => section.D64({ - let x: u64 = lsda.pointer(); - x - }), - x => panic!("Unsupported address size: {}", x), - } - } else { - // Even if we don't have any augmentation data, if there is - // an augmentation defined, we need to put the length in. - section.uleb(0) - } - } else { - section - }; - - let section = section.append_bytes(fde.instructions.slice()).mark(&end); - - fde.length = (&end - &start) as usize; - length.set_const(fde.length as u64); - - section - } - } - - trait ResultExt { - fn map_eof(self, input: &[u8]) -> Self; - } - - impl ResultExt for Result { - fn map_eof(self, input: &[u8]) -> Self { - match self { - Err(Error::UnexpectedEof(id)) => { - let id = ReaderOffsetId(id.0 - input.as_ptr() as u64); - Err(Error::UnexpectedEof(id)) - } - r => r, - } - } - } - - fn assert_parse_cie<'input, E>( - kind: SectionKind>>, - section: Section, - address_size: u8, - expected: Result<( - EndianSlice<'input, E>, - CommonInformationEntry>, - )>, - ) where - E: Endianity, - { - let section = section.get_contents().unwrap(); - let mut debug_frame = kind.section(§ion); - debug_frame.set_address_size(address_size); - let input = &mut EndianSlice::new(§ion, E::default()); - let bases = Default::default(); - let result = CommonInformationEntry::parse(&bases, &debug_frame, input); - let result = result.map(|cie| (*input, cie)).map_eof(§ion); - assert_eq!(result, expected); - } - - #[test] - fn test_parse_cie_incomplete_length_32() { - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()).L16(5); - assert_parse_cie( - kind, - section, - 8, - Err(Error::UnexpectedEof(ReaderOffsetId(0))), - ); - } - - #[test] - fn test_parse_cie_incomplete_length_64() { - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()) - .L32(0xffff_ffff) - .L32(12345); - assert_parse_cie( - kind, - section, - 8, - Err(Error::UnexpectedEof(ReaderOffsetId(4))), - ); - } - - #[test] - fn test_parse_cie_incomplete_id_32() { - let kind = debug_frame_be(); - let section = Section::with_endian(kind.endian()) - // The length is not large enough to contain the ID. - .B32(3) - .B32(0xffff_ffff); - assert_parse_cie( - kind, - section, - 8, - Err(Error::UnexpectedEof(ReaderOffsetId(4))), - ); - } - - #[test] - fn test_parse_cie_bad_id_32() { - let kind = debug_frame_be(); - let section = Section::with_endian(kind.endian()) - // Initial length - .B32(4) - // Not the CIE Id. - .B32(0xbad1_bad2); - assert_parse_cie(kind, section, 8, Err(Error::NotCieId)); - } - - #[test] - fn test_parse_cie_32_bad_version() { - let mut cie = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 99, - augmentation: None, - address_size: 4, - segment_size: 0, - code_alignment_factor: 1, - data_alignment_factor: 2, - return_address_register: Register(3), - initial_instructions: EndianSlice::new(&[], LittleEndian), - }; - - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()).cie(kind, None, &mut cie); - assert_parse_cie(kind, section, 4, Err(Error::UnknownVersion(99))); - } - - #[test] - fn test_parse_cie_unknown_augmentation() { - let length = Label::new(); - let start = Label::new(); - let end = Label::new(); - - let augmentation = Some("replicant"); - let expected_rest = [1, 2, 3]; - - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()) - // Initial length - .L32(&length) - .mark(&start) - // CIE Id - .L32(0xffff_ffff) - // Version - .D8(4) - // Augmentation - .append_bytes(augmentation.unwrap().as_bytes()) - // Null terminator - .D8(0) - // Extra augmented data that we can't understand. - .L32(1) - .L32(2) - .L32(3) - .L32(4) - .L32(5) - .L32(6) - .mark(&end) - .append_bytes(&expected_rest); - - let expected_length = (&end - &start) as u64; - length.set_const(expected_length); - - assert_parse_cie(kind, section, 8, Err(Error::UnknownAugmentation)); - } - - fn test_parse_cie(format: Format, version: u8, address_size: u8) { - let expected_rest = [1, 2, 3, 4, 5, 6, 7, 8, 9]; - let expected_instrs: Vec<_> = (0..4).map(|_| constants::DW_CFA_nop.0).collect(); - - let mut cie = CommonInformationEntry { - offset: 0, - length: 0, - format, - version, - augmentation: None, - address_size, - segment_size: 0, - code_alignment_factor: 16, - data_alignment_factor: 32, - return_address_register: Register(1), - initial_instructions: EndianSlice::new(&expected_instrs, LittleEndian), - }; - - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()) - .cie(kind, None, &mut cie) - .append_bytes(&expected_rest); - - assert_parse_cie( - kind, - section, - address_size, - Ok((EndianSlice::new(&expected_rest, LittleEndian), cie)), - ); - } - - #[test] - fn test_parse_cie_32_ok() { - test_parse_cie(Format::Dwarf32, 1, 4); - test_parse_cie(Format::Dwarf32, 1, 8); - test_parse_cie(Format::Dwarf32, 4, 4); - test_parse_cie(Format::Dwarf32, 4, 8); - } - - #[test] - fn test_parse_cie_64_ok() { - test_parse_cie(Format::Dwarf64, 1, 4); - test_parse_cie(Format::Dwarf64, 1, 8); - test_parse_cie(Format::Dwarf64, 4, 4); - test_parse_cie(Format::Dwarf64, 4, 8); - } - - #[test] - fn test_parse_cie_length_too_big() { - let expected_instrs: Vec<_> = (0..13).map(|_| constants::DW_CFA_nop.0).collect(); - - let mut cie = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 4, - segment_size: 0, - code_alignment_factor: 0, - data_alignment_factor: 0, - return_address_register: Register(3), - initial_instructions: EndianSlice::new(&expected_instrs, LittleEndian), - }; - - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()).cie(kind, None, &mut cie); - - let mut contents = section.get_contents().unwrap(); - - // Overwrite the length to be too big. - contents[0] = 0; - contents[1] = 0; - contents[2] = 0; - contents[3] = 255; - - let debug_frame = DebugFrame::new(&contents, LittleEndian); - let bases = Default::default(); - assert_eq!( - CommonInformationEntry::parse( - &bases, - &debug_frame, - &mut EndianSlice::new(&contents, LittleEndian) - ) - .map_eof(&contents), - Err(Error::UnexpectedEof(ReaderOffsetId(4))) - ); - } - - #[test] - fn test_parse_fde_incomplete_length_32() { - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()).L16(5); - let section = section.get_contents().unwrap(); - let debug_frame = kind.section(§ion); - let rest = &mut EndianSlice::new(§ion, LittleEndian); - assert_eq!( - parse_fde(debug_frame, rest, UnwindSection::cie_from_offset).map_eof(§ion), - Err(Error::UnexpectedEof(ReaderOffsetId(0))) - ); - } - - #[test] - fn test_parse_fde_incomplete_length_64() { - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()) - .L32(0xffff_ffff) - .L32(12345); - let section = section.get_contents().unwrap(); - let debug_frame = kind.section(§ion); - let rest = &mut EndianSlice::new(§ion, LittleEndian); - assert_eq!( - parse_fde(debug_frame, rest, UnwindSection::cie_from_offset).map_eof(§ion), - Err(Error::UnexpectedEof(ReaderOffsetId(4))) - ); - } - - #[test] - fn test_parse_fde_incomplete_cie_pointer_32() { - let kind = debug_frame_be(); - let section = Section::with_endian(kind.endian()) - // The length is not large enough to contain the CIE pointer. - .B32(3) - .B32(1994); - let section = section.get_contents().unwrap(); - let debug_frame = kind.section(§ion); - let rest = &mut EndianSlice::new(§ion, BigEndian); - assert_eq!( - parse_fde(debug_frame, rest, UnwindSection::cie_from_offset).map_eof(§ion), - Err(Error::UnexpectedEof(ReaderOffsetId(4))) - ); - } - - #[test] - fn test_parse_fde_32_ok() { - let expected_rest = [1, 2, 3, 4, 5, 6, 7, 8, 9]; - let cie_offset = 0xbad0_bad1; - let expected_instrs: Vec<_> = (0..7).map(|_| constants::DW_CFA_nop.0).collect(); - - let cie = CommonInformationEntry { - offset: 0, - length: 100, - format: Format::Dwarf32, - version: 4, - augmentation: None, - // DWARF32 with a 64 bit address size! Holy moly! - address_size: 8, - segment_size: 0, - code_alignment_factor: 3, - data_alignment_factor: 2, - return_address_register: Register(1), - initial_instructions: EndianSlice::new(&[], LittleEndian), - }; - - let mut fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0xfeed_beef, - address_range: 39, - augmentation: None, - instructions: EndianSlice::new(&expected_instrs, LittleEndian), - }; - - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()) - .fde(kind, cie_offset, &mut fde) - .append_bytes(&expected_rest); - - let section = section.get_contents().unwrap(); - let debug_frame = kind.section(§ion); - let rest = &mut EndianSlice::new(§ion, LittleEndian); - - let get_cie = |_: &_, _: &_, offset| { - assert_eq!(offset, DebugFrameOffset(cie_offset as usize)); - Ok(cie.clone()) - }; - - assert_eq!(parse_fde(debug_frame, rest, get_cie), Ok(fde)); - assert_eq!(*rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_fde_32_with_segment_ok() { - let expected_rest = [1, 2, 3, 4, 5, 6, 7, 8, 9]; - let cie_offset = 0xbad0_bad1; - let expected_instrs: Vec<_> = (0..92).map(|_| constants::DW_CFA_nop.0).collect(); - - let cie = CommonInformationEntry { - offset: 0, - length: 100, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 4, - segment_size: 4, - code_alignment_factor: 3, - data_alignment_factor: 2, - return_address_register: Register(1), - initial_instructions: EndianSlice::new(&[], LittleEndian), - }; - - let mut fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0xbadb_ad11, - initial_address: 0xfeed_beef, - address_range: 999, - augmentation: None, - instructions: EndianSlice::new(&expected_instrs, LittleEndian), - }; - - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()) - .fde(kind, cie_offset, &mut fde) - .append_bytes(&expected_rest); - - let section = section.get_contents().unwrap(); - let debug_frame = kind.section(§ion); - let rest = &mut EndianSlice::new(§ion, LittleEndian); - - let get_cie = |_: &_, _: &_, offset| { - assert_eq!(offset, DebugFrameOffset(cie_offset as usize)); - Ok(cie.clone()) - }; - - assert_eq!(parse_fde(debug_frame, rest, get_cie), Ok(fde)); - assert_eq!(*rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_fde_64_ok() { - let expected_rest = [1, 2, 3, 4, 5, 6, 7, 8, 9]; - let cie_offset = 0xbad0_bad1; - let expected_instrs: Vec<_> = (0..7).map(|_| constants::DW_CFA_nop.0).collect(); - - let cie = CommonInformationEntry { - offset: 0, - length: 100, - format: Format::Dwarf64, - version: 4, - augmentation: None, - address_size: 8, - segment_size: 0, - code_alignment_factor: 3, - data_alignment_factor: 2, - return_address_register: Register(1), - initial_instructions: EndianSlice::new(&[], LittleEndian), - }; - - let mut fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf64, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0xfeed_beef, - address_range: 999, - augmentation: None, - instructions: EndianSlice::new(&expected_instrs, LittleEndian), - }; - - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()) - .fde(kind, cie_offset, &mut fde) - .append_bytes(&expected_rest); - - let section = section.get_contents().unwrap(); - let debug_frame = kind.section(§ion); - let rest = &mut EndianSlice::new(§ion, LittleEndian); - - let get_cie = |_: &_, _: &_, offset| { - assert_eq!(offset, DebugFrameOffset(cie_offset as usize)); - Ok(cie.clone()) - }; - - assert_eq!(parse_fde(debug_frame, rest, get_cie), Ok(fde)); - assert_eq!(*rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_entry_on_cie_32_ok() { - let expected_rest = [1, 2, 3, 4, 5, 6, 7, 8, 9]; - let expected_instrs: Vec<_> = (0..4).map(|_| constants::DW_CFA_nop.0).collect(); - - let mut cie = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 4, - segment_size: 0, - code_alignment_factor: 16, - data_alignment_factor: 32, - return_address_register: Register(1), - initial_instructions: EndianSlice::new(&expected_instrs, BigEndian), - }; - - let kind = debug_frame_be(); - let section = Section::with_endian(kind.endian()) - .cie(kind, None, &mut cie) - .append_bytes(&expected_rest); - let section = section.get_contents().unwrap(); - let debug_frame = kind.section(§ion); - let rest = &mut EndianSlice::new(§ion, BigEndian); - - let bases = Default::default(); - assert_eq!( - parse_cfi_entry(&bases, &debug_frame, rest), - Ok(Some(CieOrFde::Cie(cie))) - ); - assert_eq!(*rest, EndianSlice::new(&expected_rest, BigEndian)); - } - - #[test] - fn test_parse_cfi_entry_on_fde_32_ok() { - let cie_offset = 0x1234_5678; - let expected_rest = [1, 2, 3, 4, 5, 6, 7, 8, 9]; - let expected_instrs: Vec<_> = (0..4).map(|_| constants::DW_CFA_nop.0).collect(); - - let cie = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 4, - segment_size: 0, - code_alignment_factor: 16, - data_alignment_factor: 32, - return_address_register: Register(1), - initial_instructions: EndianSlice::new(&[], BigEndian), - }; - - let mut fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0xfeed_beef, - address_range: 39, - augmentation: None, - instructions: EndianSlice::new(&expected_instrs, BigEndian), - }; - - let kind = debug_frame_be(); - let section = Section::with_endian(kind.endian()) - .fde(kind, cie_offset, &mut fde) - .append_bytes(&expected_rest); - - let section = section.get_contents().unwrap(); - let debug_frame = kind.section(§ion); - let rest = &mut EndianSlice::new(§ion, BigEndian); - - let bases = Default::default(); - match parse_cfi_entry(&bases, &debug_frame, rest) { - Ok(Some(CieOrFde::Fde(partial))) => { - assert_eq!(*rest, EndianSlice::new(&expected_rest, BigEndian)); - - assert_eq!(partial.length, fde.length); - assert_eq!(partial.format, fde.format); - assert_eq!(partial.cie_offset, DebugFrameOffset(cie_offset as usize)); - - let get_cie = |_: &_, _: &_, offset| { - assert_eq!(offset, DebugFrameOffset(cie_offset as usize)); - Ok(cie.clone()) - }; - - assert_eq!(partial.parse(get_cie), Ok(fde)); - } - otherwise => panic!("Unexpected result: {:#?}", otherwise), - } - } - - #[test] - fn test_cfi_entries_iter() { - let expected_instrs1: Vec<_> = (0..4).map(|_| constants::DW_CFA_nop.0).collect(); - - let expected_instrs2: Vec<_> = (0..8).map(|_| constants::DW_CFA_nop.0).collect(); - - let expected_instrs3: Vec<_> = (0..12).map(|_| constants::DW_CFA_nop.0).collect(); - - let expected_instrs4: Vec<_> = (0..16).map(|_| constants::DW_CFA_nop.0).collect(); - - let mut cie1 = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 4, - segment_size: 0, - code_alignment_factor: 1, - data_alignment_factor: 2, - return_address_register: Register(3), - initial_instructions: EndianSlice::new(&expected_instrs1, BigEndian), - }; - - let mut cie2 = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 4, - segment_size: 0, - code_alignment_factor: 3, - data_alignment_factor: 2, - return_address_register: Register(1), - initial_instructions: EndianSlice::new(&expected_instrs2, BigEndian), - }; - - let cie1_location = Label::new(); - let cie2_location = Label::new(); - - // Write the CIEs first so that their length gets set before we clone - // them into the FDEs and our equality assertions down the line end up - // with all the CIEs always having he correct length. - let kind = debug_frame_be(); - let section = Section::with_endian(kind.endian()) - .mark(&cie1_location) - .cie(kind, None, &mut cie1) - .mark(&cie2_location) - .cie(kind, None, &mut cie2); - - let mut fde1 = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie1.clone(), - initial_segment: 0, - initial_address: 0xfeed_beef, - address_range: 39, - augmentation: None, - instructions: EndianSlice::new(&expected_instrs3, BigEndian), - }; - - let mut fde2 = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie2.clone(), - initial_segment: 0, - initial_address: 0xfeed_face, - address_range: 9000, - augmentation: None, - instructions: EndianSlice::new(&expected_instrs4, BigEndian), - }; - - let section = - section - .fde(kind, &cie1_location, &mut fde1) - .fde(kind, &cie2_location, &mut fde2); - - section.start().set_const(0); - - let cie1_offset = cie1_location.value().unwrap() as usize; - let cie2_offset = cie2_location.value().unwrap() as usize; - - let contents = section.get_contents().unwrap(); - let debug_frame = kind.section(&contents); - - let bases = Default::default(); - let mut entries = debug_frame.entries(&bases); - - assert_eq!(entries.next(), Ok(Some(CieOrFde::Cie(cie1.clone())))); - assert_eq!(entries.next(), Ok(Some(CieOrFde::Cie(cie2.clone())))); - - match entries.next() { - Ok(Some(CieOrFde::Fde(partial))) => { - assert_eq!(partial.length, fde1.length); - assert_eq!(partial.format, fde1.format); - assert_eq!(partial.cie_offset, DebugFrameOffset(cie1_offset)); - - let get_cie = |_: &_, _: &_, offset| { - assert_eq!(offset, DebugFrameOffset(cie1_offset)); - Ok(cie1.clone()) - }; - assert_eq!(partial.parse(get_cie), Ok(fde1)); - } - otherwise => panic!("Unexpected result: {:#?}", otherwise), - } - - match entries.next() { - Ok(Some(CieOrFde::Fde(partial))) => { - assert_eq!(partial.length, fde2.length); - assert_eq!(partial.format, fde2.format); - assert_eq!(partial.cie_offset, DebugFrameOffset(cie2_offset)); - - let get_cie = |_: &_, _: &_, offset| { - assert_eq!(offset, DebugFrameOffset(cie2_offset)); - Ok(cie2.clone()) - }; - assert_eq!(partial.parse(get_cie), Ok(fde2)); - } - otherwise => panic!("Unexpected result: {:#?}", otherwise), - } - - assert_eq!(entries.next(), Ok(None)); - } - - #[test] - fn test_parse_cie_from_offset() { - let filler = [1, 2, 3, 4, 5, 6, 7, 8, 9]; - let instrs: Vec<_> = (0..5).map(|_| constants::DW_CFA_nop.0).collect(); - - let mut cie = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf64, - version: 4, - augmentation: None, - address_size: 4, - segment_size: 0, - code_alignment_factor: 4, - data_alignment_factor: 8, - return_address_register: Register(12), - initial_instructions: EndianSlice::new(&instrs, LittleEndian), - }; - - let cie_location = Label::new(); - - let kind = debug_frame_le(); - let section = Section::with_endian(kind.endian()) - .append_bytes(&filler) - .mark(&cie_location) - .cie(kind, None, &mut cie) - .append_bytes(&filler); - - section.start().set_const(0); - - let cie_offset = DebugFrameOffset(cie_location.value().unwrap() as usize); - - let contents = section.get_contents().unwrap(); - let debug_frame = kind.section(&contents); - let bases = Default::default(); - - assert_eq!(debug_frame.cie_from_offset(&bases, cie_offset), Ok(cie)); - } - - fn parse_cfi_instruction( - input: &mut R, - address_size: u8, - ) -> Result> { - let parameters = &PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size, - section: &R::default(), - }; - CallFrameInstruction::parse(input, None, parameters, Vendor::Default) - } - - #[test] - fn test_parse_cfi_instruction_advance_loc() { - let expected_rest = [1, 2, 3, 4]; - let expected_delta = 42; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_advance_loc.0 | expected_delta) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::AdvanceLoc { - delta: u32::from(expected_delta), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_offset() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 3; - let expected_offset = 1997; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_offset.0 | expected_reg) - .uleb(expected_offset) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::Offset { - register: Register(expected_reg.into()), - factored_offset: expected_offset, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_restore() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 3; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_restore.0 | expected_reg) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::Restore { - register: Register(expected_reg.into()), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_nop() { - let expected_rest = [1, 2, 3, 4]; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_nop.0) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::Nop) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_set_loc() { - let expected_rest = [1, 2, 3, 4]; - let expected_addr = 0xdead_beef; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_set_loc.0) - .L64(expected_addr) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::SetLoc { - address: expected_addr, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_set_loc_encoding() { - let text_base = 0xfeed_face; - let addr_offset = 0xbeef; - let expected_addr = text_base + addr_offset; - let expected_rest = [1, 2, 3, 4]; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_set_loc.0) - .L64(addr_offset) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - let parameters = &PointerEncodingParameters { - bases: &BaseAddresses::default().set_text(text_base).eh_frame, - func_base: None, - address_size: 8, - section: &EndianSlice::new(&[], LittleEndian), - }; - assert_eq!( - CallFrameInstruction::parse( - input, - Some(constants::DW_EH_PE_textrel), - parameters, - Vendor::Default - ), - Ok(CallFrameInstruction::SetLoc { - address: expected_addr, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_advance_loc1() { - let expected_rest = [1, 2, 3, 4]; - let expected_delta = 8; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_advance_loc1.0) - .D8(expected_delta) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::AdvanceLoc { - delta: u32::from(expected_delta), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_advance_loc2() { - let expected_rest = [1, 2, 3, 4]; - let expected_delta = 500; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_advance_loc2.0) - .L16(expected_delta) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::AdvanceLoc { - delta: u32::from(expected_delta), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_advance_loc4() { - let expected_rest = [1, 2, 3, 4]; - let expected_delta = 1 << 20; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_advance_loc4.0) - .L32(expected_delta) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::AdvanceLoc { - delta: expected_delta, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_offset_extended() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 7; - let expected_offset = 33; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_offset_extended.0) - .uleb(expected_reg.into()) - .uleb(expected_offset) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::Offset { - register: Register(expected_reg), - factored_offset: expected_offset, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_restore_extended() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 7; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_restore_extended.0) - .uleb(expected_reg.into()) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::Restore { - register: Register(expected_reg), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_undefined() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 7; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_undefined.0) - .uleb(expected_reg.into()) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::Undefined { - register: Register(expected_reg), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_same_value() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 7; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_same_value.0) - .uleb(expected_reg.into()) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::SameValue { - register: Register(expected_reg), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_register() { - let expected_rest = [1, 2, 3, 4]; - let expected_dest_reg = 7; - let expected_src_reg = 8; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_register.0) - .uleb(expected_dest_reg.into()) - .uleb(expected_src_reg.into()) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::Register { - dest_register: Register(expected_dest_reg), - src_register: Register(expected_src_reg), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_remember_state() { - let expected_rest = [1, 2, 3, 4]; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_remember_state.0) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::RememberState) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_restore_state() { - let expected_rest = [1, 2, 3, 4]; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_restore_state.0) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::RestoreState) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_def_cfa() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 2; - let expected_offset = 0; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_def_cfa.0) - .uleb(expected_reg.into()) - .uleb(expected_offset) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::DefCfa { - register: Register(expected_reg), - offset: expected_offset, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_def_cfa_register() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 2; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_def_cfa_register.0) - .uleb(expected_reg.into()) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::DefCfaRegister { - register: Register(expected_reg), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_def_cfa_offset() { - let expected_rest = [1, 2, 3, 4]; - let expected_offset = 23; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_def_cfa_offset.0) - .uleb(expected_offset) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::DefCfaOffset { - offset: expected_offset, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_def_cfa_expression() { - let expected_rest = [1, 2, 3, 4]; - let expected_expr = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]; - - let length = Label::new(); - let start = Label::new(); - let end = Label::new(); - - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_def_cfa_expression.0) - .D8(&length) - .mark(&start) - .append_bytes(&expected_expr) - .mark(&end) - .append_bytes(&expected_rest); - - length.set_const((&end - &start) as u64); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::DefCfaExpression { - expression: Expression(EndianSlice::new(&expected_expr, LittleEndian)), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_expression() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 99; - let expected_expr = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]; - - let length = Label::new(); - let start = Label::new(); - let end = Label::new(); - - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_expression.0) - .uleb(expected_reg.into()) - .D8(&length) - .mark(&start) - .append_bytes(&expected_expr) - .mark(&end) - .append_bytes(&expected_rest); - - length.set_const((&end - &start) as u64); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::Expression { - register: Register(expected_reg), - expression: Expression(EndianSlice::new(&expected_expr, LittleEndian)), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_offset_extended_sf() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 7; - let expected_offset = -33; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_offset_extended_sf.0) - .uleb(expected_reg.into()) - .sleb(expected_offset) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::OffsetExtendedSf { - register: Register(expected_reg), - factored_offset: expected_offset, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_def_cfa_sf() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 2; - let expected_offset = -9999; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_def_cfa_sf.0) - .uleb(expected_reg.into()) - .sleb(expected_offset) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::DefCfaSf { - register: Register(expected_reg), - factored_offset: expected_offset, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_def_cfa_offset_sf() { - let expected_rest = [1, 2, 3, 4]; - let expected_offset = -123; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_def_cfa_offset_sf.0) - .sleb(expected_offset) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::DefCfaOffsetSf { - factored_offset: expected_offset, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_val_offset() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 50; - let expected_offset = 23; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_val_offset.0) - .uleb(expected_reg.into()) - .uleb(expected_offset) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::ValOffset { - register: Register(expected_reg), - factored_offset: expected_offset, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_val_offset_sf() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 50; - let expected_offset = -23; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_val_offset_sf.0) - .uleb(expected_reg.into()) - .sleb(expected_offset) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::ValOffsetSf { - register: Register(expected_reg), - factored_offset: expected_offset, - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_val_expression() { - let expected_rest = [1, 2, 3, 4]; - let expected_reg = 50; - let expected_expr = [2, 2, 1, 1, 5, 5]; - - let length = Label::new(); - let start = Label::new(); - let end = Label::new(); - - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_val_expression.0) - .uleb(expected_reg.into()) - .D8(&length) - .mark(&start) - .append_bytes(&expected_expr) - .mark(&end) - .append_bytes(&expected_rest); - - length.set_const((&end - &start) as u64); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - - assert_eq!( - parse_cfi_instruction(input, 8), - Ok(CallFrameInstruction::ValExpression { - register: Register(expected_reg), - expression: Expression(EndianSlice::new(&expected_expr, LittleEndian)), - }) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_negate_ra_state() { - let expected_rest = [1, 2, 3, 4]; - let section = Section::with_endian(Endian::Little) - .D8(constants::DW_CFA_AARCH64_negate_ra_state.0) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - let parameters = &PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 8, - section: &EndianSlice::default(), - }; - assert_eq!( - CallFrameInstruction::parse(input, None, parameters, Vendor::AArch64), - Ok(CallFrameInstruction::NegateRaState) - ); - assert_eq!(*input, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_cfi_instruction_unknown_instruction() { - let expected_rest = [1, 2, 3, 4]; - let unknown_instr = constants::DwCfa(0b0011_1111); - let section = Section::with_endian(Endian::Little) - .D8(unknown_instr.0) - .append_bytes(&expected_rest); - let contents = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&contents, LittleEndian); - assert_eq!( - parse_cfi_instruction(input, 8), - Err(Error::UnknownCallFrameInstruction(unknown_instr)) - ); - } - - #[test] - fn test_call_frame_instruction_iter_ok() { - let expected_reg = 50; - let expected_expr = [2, 2, 1, 1, 5, 5]; - let expected_delta = 230; - - let length = Label::new(); - let start = Label::new(); - let end = Label::new(); - - let section = Section::with_endian(Endian::Big) - .D8(constants::DW_CFA_val_expression.0) - .uleb(expected_reg.into()) - .D8(&length) - .mark(&start) - .append_bytes(&expected_expr) - .mark(&end) - .D8(constants::DW_CFA_advance_loc1.0) - .D8(expected_delta); - - length.set_const((&end - &start) as u64); - let contents = section.get_contents().unwrap(); - let input = EndianSlice::new(&contents, BigEndian); - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 8, - section: &EndianSlice::default(), - }; - let mut iter = CallFrameInstructionIter { - input, - address_encoding: None, - parameters, - vendor: Vendor::Default, - }; - - assert_eq!( - iter.next(), - Ok(Some(CallFrameInstruction::ValExpression { - register: Register(expected_reg), - expression: Expression(EndianSlice::new(&expected_expr, BigEndian)), - })) - ); - - assert_eq!( - iter.next(), - Ok(Some(CallFrameInstruction::AdvanceLoc { - delta: u32::from(expected_delta), - })) - ); - - assert_eq!(iter.next(), Ok(None)); - } - - #[test] - fn test_call_frame_instruction_iter_err() { - // DW_CFA_advance_loc1 without an operand. - let section = Section::with_endian(Endian::Big).D8(constants::DW_CFA_advance_loc1.0); - - let contents = section.get_contents().unwrap(); - let input = EndianSlice::new(&contents, BigEndian); - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 8, - section: &EndianSlice::default(), - }; - let mut iter = CallFrameInstructionIter { - input, - address_encoding: None, - parameters, - vendor: Vendor::Default, - }; - - assert_eq!( - iter.next().map_eof(&contents), - Err(Error::UnexpectedEof(ReaderOffsetId(1))) - ); - assert_eq!(iter.next(), Ok(None)); - } - - fn assert_eval<'a, I>( - mut initial_ctx: UnwindContext>, - expected_ctx: UnwindContext>, - cie: CommonInformationEntry>, - fde: Option>>, - instructions: I, - ) where - I: AsRef< - [( - Result, - CallFrameInstruction>, - )], - >, - { - { - let section = &DebugFrame::from(EndianSlice::default()); - let bases = &BaseAddresses::default(); - let mut table = match fde { - Some(fde) => UnwindTable::new_for_fde(section, bases, &mut initial_ctx, &fde), - None => UnwindTable::new_for_cie(section, bases, &mut initial_ctx, &cie), - }; - for &(ref expected_result, ref instruction) in instructions.as_ref() { - assert_eq!(*expected_result, table.evaluate(instruction.clone())); - } - } - - assert_eq!(expected_ctx, initial_ctx); - } - - fn make_test_cie<'a>() -> CommonInformationEntry> { - CommonInformationEntry { - offset: 0, - format: Format::Dwarf64, - length: 0, - return_address_register: Register(0), - version: 4, - address_size: mem::size_of::() as u8, - initial_instructions: EndianSlice::new(&[], LittleEndian), - augmentation: None, - segment_size: 0, - data_alignment_factor: 2, - code_alignment_factor: 3, - } - } - - #[test] - fn test_eval_set_loc() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected.row_mut().end_address = 42; - let instructions = [(Ok(true), CallFrameInstruction::SetLoc { address: 42 })]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_set_loc_backwards() { - let cie = make_test_cie(); - let mut ctx = UnwindContext::new(); - ctx.row_mut().start_address = 999; - let expected = ctx.clone(); - let instructions = [( - Err(Error::InvalidAddressRange), - CallFrameInstruction::SetLoc { address: 42 }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_advance_loc() { - let cie = make_test_cie(); - let mut ctx = UnwindContext::new(); - ctx.row_mut().start_address = 3; - let mut expected = ctx.clone(); - expected.row_mut().end_address = 3 + 2 * cie.code_alignment_factor; - let instructions = [(Ok(true), CallFrameInstruction::AdvanceLoc { delta: 2 })]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_advance_loc_overflow() { - let cie = make_test_cie(); - let mut ctx = UnwindContext::new(); - ctx.row_mut().start_address = u64::MAX; - let mut expected = ctx.clone(); - expected.row_mut().end_address = 42 * cie.code_alignment_factor - 1; - let instructions = [(Ok(true), CallFrameInstruction::AdvanceLoc { delta: 42 })]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_def_cfa() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected.set_cfa(CfaRule::RegisterAndOffset { - register: Register(42), - offset: 36, - }); - let instructions = [( - Ok(false), - CallFrameInstruction::DefCfa { - register: Register(42), - offset: 36, - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_def_cfa_sf() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected.set_cfa(CfaRule::RegisterAndOffset { - register: Register(42), - offset: 36 * cie.data_alignment_factor as i64, - }); - let instructions = [( - Ok(false), - CallFrameInstruction::DefCfaSf { - register: Register(42), - factored_offset: 36, - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_def_cfa_register() { - let cie = make_test_cie(); - let mut ctx = UnwindContext::new(); - ctx.set_cfa(CfaRule::RegisterAndOffset { - register: Register(3), - offset: 8, - }); - let mut expected = ctx.clone(); - expected.set_cfa(CfaRule::RegisterAndOffset { - register: Register(42), - offset: 8, - }); - let instructions = [( - Ok(false), - CallFrameInstruction::DefCfaRegister { - register: Register(42), - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_def_cfa_register_invalid_context() { - let cie = make_test_cie(); - let mut ctx = UnwindContext::new(); - ctx.set_cfa(CfaRule::Expression(Expression(EndianSlice::new( - &[], - LittleEndian, - )))); - let expected = ctx.clone(); - let instructions = [( - Err(Error::CfiInstructionInInvalidContext), - CallFrameInstruction::DefCfaRegister { - register: Register(42), - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_def_cfa_offset() { - let cie = make_test_cie(); - let mut ctx = UnwindContext::new(); - ctx.set_cfa(CfaRule::RegisterAndOffset { - register: Register(3), - offset: 8, - }); - let mut expected = ctx.clone(); - expected.set_cfa(CfaRule::RegisterAndOffset { - register: Register(3), - offset: 42, - }); - let instructions = [(Ok(false), CallFrameInstruction::DefCfaOffset { offset: 42 })]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_def_cfa_offset_invalid_context() { - let cie = make_test_cie(); - let mut ctx = UnwindContext::new(); - ctx.set_cfa(CfaRule::Expression(Expression(EndianSlice::new( - &[], - LittleEndian, - )))); - let expected = ctx.clone(); - let instructions = [( - Err(Error::CfiInstructionInInvalidContext), - CallFrameInstruction::DefCfaOffset { offset: 1993 }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_def_cfa_expression() { - let expr = [1, 2, 3, 4]; - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected.set_cfa(CfaRule::Expression(Expression(EndianSlice::new( - &expr, - LittleEndian, - )))); - let instructions = [( - Ok(false), - CallFrameInstruction::DefCfaExpression { - expression: Expression(EndianSlice::new(&expr, LittleEndian)), - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_undefined() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected - .set_register_rule(Register(5), RegisterRule::Undefined) - .unwrap(); - let instructions = [( - Ok(false), - CallFrameInstruction::Undefined { - register: Register(5), - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_same_value() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected - .set_register_rule(Register(0), RegisterRule::SameValue) - .unwrap(); - let instructions = [( - Ok(false), - CallFrameInstruction::SameValue { - register: Register(0), - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_offset() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected - .set_register_rule( - Register(2), - RegisterRule::Offset(3 * cie.data_alignment_factor), - ) - .unwrap(); - let instructions = [( - Ok(false), - CallFrameInstruction::Offset { - register: Register(2), - factored_offset: 3, - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_offset_extended_sf() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected - .set_register_rule( - Register(4), - RegisterRule::Offset(-3 * cie.data_alignment_factor), - ) - .unwrap(); - let instructions = [( - Ok(false), - CallFrameInstruction::OffsetExtendedSf { - register: Register(4), - factored_offset: -3, - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_val_offset() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected - .set_register_rule( - Register(5), - RegisterRule::ValOffset(7 * cie.data_alignment_factor), - ) - .unwrap(); - let instructions = [( - Ok(false), - CallFrameInstruction::ValOffset { - register: Register(5), - factored_offset: 7, - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_val_offset_sf() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected - .set_register_rule( - Register(5), - RegisterRule::ValOffset(-7 * cie.data_alignment_factor), - ) - .unwrap(); - let instructions = [( - Ok(false), - CallFrameInstruction::ValOffsetSf { - register: Register(5), - factored_offset: -7, - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_expression() { - let expr = [1, 2, 3, 4]; - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected - .set_register_rule( - Register(9), - RegisterRule::Expression(Expression(EndianSlice::new(&expr, LittleEndian))), - ) - .unwrap(); - let instructions = [( - Ok(false), - CallFrameInstruction::Expression { - register: Register(9), - expression: Expression(EndianSlice::new(&expr, LittleEndian)), - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_val_expression() { - let expr = [1, 2, 3, 4]; - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected - .set_register_rule( - Register(9), - RegisterRule::ValExpression(Expression(EndianSlice::new(&expr, LittleEndian))), - ) - .unwrap(); - let instructions = [( - Ok(false), - CallFrameInstruction::ValExpression { - register: Register(9), - expression: Expression(EndianSlice::new(&expr, LittleEndian)), - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_restore() { - let cie = make_test_cie(); - let fde = FrameDescriptionEntry { - offset: 0, - format: Format::Dwarf64, - length: 0, - address_range: 0, - augmentation: None, - initial_address: 0, - initial_segment: 0, - cie: cie.clone(), - instructions: EndianSlice::new(&[], LittleEndian), - }; - - let mut ctx = UnwindContext::new(); - ctx.set_register_rule(Register(0), RegisterRule::Offset(1)) - .unwrap(); - ctx.save_initial_rules().unwrap(); - let expected = ctx.clone(); - ctx.set_register_rule(Register(0), RegisterRule::Offset(2)) - .unwrap(); - - let instructions = [( - Ok(false), - CallFrameInstruction::Restore { - register: Register(0), - }, - )]; - assert_eval(ctx, expected, cie, Some(fde), instructions); - } - - #[test] - fn test_eval_restore_havent_saved_initial_context() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let expected = ctx.clone(); - let instructions = [( - Err(Error::CfiInstructionInInvalidContext), - CallFrameInstruction::Restore { - register: Register(0), - }, - )]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_remember_state() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected.push_row().unwrap(); - let instructions = [(Ok(false), CallFrameInstruction::RememberState)]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_restore_state() { - let cie = make_test_cie(); - - let mut ctx = UnwindContext::new(); - ctx.set_start_address(1); - ctx.set_register_rule(Register(0), RegisterRule::SameValue) - .unwrap(); - let mut expected = ctx.clone(); - ctx.push_row().unwrap(); - ctx.set_start_address(2); - ctx.set_register_rule(Register(0), RegisterRule::Offset(16)) - .unwrap(); - - // Restore state should preserve current location. - expected.set_start_address(2); - - let instructions = [ - // First one pops just fine. - (Ok(false), CallFrameInstruction::RestoreState), - // Second pop would try to pop out of bounds. - ( - Err(Error::PopWithEmptyStack), - CallFrameInstruction::RestoreState, - ), - ]; - - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_negate_ra_state() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected - .set_register_rule(crate::AArch64::RA_SIGN_STATE, RegisterRule::Constant(1)) - .unwrap(); - let instructions = [(Ok(false), CallFrameInstruction::NegateRaState)]; - assert_eval(ctx, expected, cie, None, instructions); - - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected - .set_register_rule(crate::AArch64::RA_SIGN_STATE, RegisterRule::Constant(0)) - .unwrap(); - let instructions = [ - (Ok(false), CallFrameInstruction::NegateRaState), - (Ok(false), CallFrameInstruction::NegateRaState), - ]; - assert_eval(ctx, expected, cie, None, instructions); - - // NegateRaState can't be used with other instructions. - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let mut expected = ctx.clone(); - expected - .set_register_rule( - crate::AArch64::RA_SIGN_STATE, - RegisterRule::Offset(cie.data_alignment_factor as i64), - ) - .unwrap(); - let instructions = [ - ( - Ok(false), - CallFrameInstruction::Offset { - register: crate::AArch64::RA_SIGN_STATE, - factored_offset: 1, - }, - ), - ( - Err(Error::CfiInstructionInInvalidContext), - CallFrameInstruction::NegateRaState, - ), - ]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_eval_nop() { - let cie = make_test_cie(); - let ctx = UnwindContext::new(); - let expected = ctx.clone(); - let instructions = [(Ok(false), CallFrameInstruction::Nop)]; - assert_eval(ctx, expected, cie, None, instructions); - } - - #[test] - fn test_unwind_table_cie_no_rule() { - let initial_instructions = Section::with_endian(Endian::Little) - // The CFA is -12 from register 4. - .D8(constants::DW_CFA_def_cfa_sf.0) - .uleb(4) - .sleb(-12) - .append_repeated(constants::DW_CFA_nop.0, 4); - let initial_instructions = initial_instructions.get_contents().unwrap(); - - let cie = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 8, - segment_size: 0, - code_alignment_factor: 1, - data_alignment_factor: 1, - return_address_register: Register(3), - initial_instructions: EndianSlice::new(&initial_instructions, LittleEndian), - }; - - let instructions = Section::with_endian(Endian::Little) - // A bunch of nop padding. - .append_repeated(constants::DW_CFA_nop.0, 8); - let instructions = instructions.get_contents().unwrap(); - - let fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0, - address_range: 100, - augmentation: None, - instructions: EndianSlice::new(&instructions, LittleEndian), - }; - - let section = &DebugFrame::from(EndianSlice::default()); - let bases = &BaseAddresses::default(); - let mut ctx = Box::new(UnwindContext::new()); - - let mut table = fde - .rows(section, bases, &mut ctx) - .expect("Should run initial program OK"); - assert!(table.ctx.is_initialized); - let expected_initial_rule = (Register(0), RegisterRule::Undefined); - assert_eq!(table.ctx.initial_rule, Some(expected_initial_rule)); - - { - let row = table.next_row().expect("Should evaluate first row OK"); - let expected = UnwindTableRow { - start_address: 0, - end_address: 100, - saved_args_size: 0, - cfa: CfaRule::RegisterAndOffset { - register: Register(4), - offset: -12, - }, - registers: [].iter().collect(), - }; - assert_eq!(Some(&expected), row); - } - - // All done! - assert_eq!(Ok(None), table.next_row()); - assert_eq!(Ok(None), table.next_row()); - } - - #[test] - fn test_unwind_table_cie_single_rule() { - let initial_instructions = Section::with_endian(Endian::Little) - // The CFA is -12 from register 4. - .D8(constants::DW_CFA_def_cfa_sf.0) - .uleb(4) - .sleb(-12) - // Register 3 is 4 from the CFA. - .D8(constants::DW_CFA_offset.0 | 3) - .uleb(4) - .append_repeated(constants::DW_CFA_nop.0, 4); - let initial_instructions = initial_instructions.get_contents().unwrap(); - - let cie = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 8, - segment_size: 0, - code_alignment_factor: 1, - data_alignment_factor: 1, - return_address_register: Register(3), - initial_instructions: EndianSlice::new(&initial_instructions, LittleEndian), - }; - - let instructions = Section::with_endian(Endian::Little) - // A bunch of nop padding. - .append_repeated(constants::DW_CFA_nop.0, 8); - let instructions = instructions.get_contents().unwrap(); - - let fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0, - address_range: 100, - augmentation: None, - instructions: EndianSlice::new(&instructions, LittleEndian), - }; - - let section = &DebugFrame::from(EndianSlice::default()); - let bases = &BaseAddresses::default(); - let mut ctx = Box::new(UnwindContext::new()); - - let mut table = fde - .rows(section, bases, &mut ctx) - .expect("Should run initial program OK"); - assert!(table.ctx.is_initialized); - let expected_initial_rule = (Register(3), RegisterRule::Offset(4)); - assert_eq!(table.ctx.initial_rule, Some(expected_initial_rule)); - - { - let row = table.next_row().expect("Should evaluate first row OK"); - let expected = UnwindTableRow { - start_address: 0, - end_address: 100, - saved_args_size: 0, - cfa: CfaRule::RegisterAndOffset { - register: Register(4), - offset: -12, - }, - registers: [(Register(3), RegisterRule::Offset(4))].iter().collect(), - }; - assert_eq!(Some(&expected), row); - } - - // All done! - assert_eq!(Ok(None), table.next_row()); - assert_eq!(Ok(None), table.next_row()); - } - - #[test] - fn test_unwind_table_cie_invalid_rule() { - let initial_instructions1 = Section::with_endian(Endian::Little) - // Test that stack length is reset. - .D8(constants::DW_CFA_remember_state.0) - // Test that stack value is reset (different register from that used later). - .D8(constants::DW_CFA_offset.0 | 4) - .uleb(8) - // Invalid due to missing operands. - .D8(constants::DW_CFA_offset.0); - let initial_instructions1 = initial_instructions1.get_contents().unwrap(); - - let cie1 = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 8, - segment_size: 0, - code_alignment_factor: 1, - data_alignment_factor: 1, - return_address_register: Register(3), - initial_instructions: EndianSlice::new(&initial_instructions1, LittleEndian), - }; - - let initial_instructions2 = Section::with_endian(Endian::Little) - // Register 3 is 4 from the CFA. - .D8(constants::DW_CFA_offset.0 | 3) - .uleb(4) - .append_repeated(constants::DW_CFA_nop.0, 4); - let initial_instructions2 = initial_instructions2.get_contents().unwrap(); - - let cie2 = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 8, - segment_size: 0, - code_alignment_factor: 1, - data_alignment_factor: 1, - return_address_register: Register(3), - initial_instructions: EndianSlice::new(&initial_instructions2, LittleEndian), - }; - - let fde1 = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie1.clone(), - initial_segment: 0, - initial_address: 0, - address_range: 100, - augmentation: None, - instructions: EndianSlice::new(&[], LittleEndian), - }; - - let fde2 = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie2.clone(), - initial_segment: 0, - initial_address: 0, - address_range: 100, - augmentation: None, - instructions: EndianSlice::new(&[], LittleEndian), - }; - - let section = &DebugFrame::from(EndianSlice::default()); - let bases = &BaseAddresses::default(); - let mut ctx = Box::new(UnwindContext::new()); - - let table = fde1 - .rows(section, bases, &mut ctx) - .map_eof(&initial_instructions1); - assert_eq!(table.err(), Some(Error::UnexpectedEof(ReaderOffsetId(4)))); - assert!(!ctx.is_initialized); - assert_eq!(ctx.stack.len(), 2); - assert_eq!(ctx.initial_rule, None); - - let _table = fde2 - .rows(section, bases, &mut ctx) - .expect("Should run initial program OK"); - assert!(ctx.is_initialized); - assert_eq!(ctx.stack.len(), 1); - let expected_initial_rule = (Register(3), RegisterRule::Offset(4)); - assert_eq!(ctx.initial_rule, Some(expected_initial_rule)); - } - - #[test] - fn test_unwind_table_next_row() { - let initial_instructions = Section::with_endian(Endian::Little) - // The CFA is -12 from register 4. - .D8(constants::DW_CFA_def_cfa_sf.0) - .uleb(4) - .sleb(-12) - // Register 0 is 8 from the CFA. - .D8(constants::DW_CFA_offset.0 | 0) - .uleb(8) - // Register 3 is 4 from the CFA. - .D8(constants::DW_CFA_offset.0 | 3) - .uleb(4) - .append_repeated(constants::DW_CFA_nop.0, 4); - let initial_instructions = initial_instructions.get_contents().unwrap(); - - let cie = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 8, - segment_size: 0, - code_alignment_factor: 1, - data_alignment_factor: 1, - return_address_register: Register(3), - initial_instructions: EndianSlice::new(&initial_instructions, LittleEndian), - }; - - let instructions = Section::with_endian(Endian::Little) - // Initial instructions form a row, advance the address by 1. - .D8(constants::DW_CFA_advance_loc1.0) - .D8(1) - // Register 0 is -16 from the CFA. - .D8(constants::DW_CFA_offset_extended_sf.0) - .uleb(0) - .sleb(-16) - // Finish this row, advance the address by 32. - .D8(constants::DW_CFA_advance_loc1.0) - .D8(32) - // Register 3 is -4 from the CFA. - .D8(constants::DW_CFA_offset_extended_sf.0) - .uleb(3) - .sleb(-4) - // Finish this row, advance the address by 64. - .D8(constants::DW_CFA_advance_loc1.0) - .D8(64) - // Register 5 is 4 from the CFA. - .D8(constants::DW_CFA_offset.0 | 5) - .uleb(4) - // A bunch of nop padding. - .append_repeated(constants::DW_CFA_nop.0, 8); - let instructions = instructions.get_contents().unwrap(); - - let fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0, - address_range: 100, - augmentation: None, - instructions: EndianSlice::new(&instructions, LittleEndian), - }; - - let section = &DebugFrame::from(EndianSlice::default()); - let bases = &BaseAddresses::default(); - let mut ctx = Box::new(UnwindContext::new()); - - let mut table = fde - .rows(section, bases, &mut ctx) - .expect("Should run initial program OK"); - assert!(table.ctx.is_initialized); - assert!(table.ctx.initial_rule.is_none()); - let expected_initial_rules: RegisterRuleMap<_> = [ - (Register(0), RegisterRule::Offset(8)), - (Register(3), RegisterRule::Offset(4)), - ] - .iter() - .collect(); - assert_eq!(table.ctx.stack[0].registers, expected_initial_rules); - - { - let row = table.next_row().expect("Should evaluate first row OK"); - let expected = UnwindTableRow { - start_address: 0, - end_address: 1, - saved_args_size: 0, - cfa: CfaRule::RegisterAndOffset { - register: Register(4), - offset: -12, - }, - registers: [ - (Register(0), RegisterRule::Offset(8)), - (Register(3), RegisterRule::Offset(4)), - ] - .iter() - .collect(), - }; - assert_eq!(Some(&expected), row); - } - - { - let row = table.next_row().expect("Should evaluate second row OK"); - let expected = UnwindTableRow { - start_address: 1, - end_address: 33, - saved_args_size: 0, - cfa: CfaRule::RegisterAndOffset { - register: Register(4), - offset: -12, - }, - registers: [ - (Register(0), RegisterRule::Offset(-16)), - (Register(3), RegisterRule::Offset(4)), - ] - .iter() - .collect(), - }; - assert_eq!(Some(&expected), row); - } - - { - let row = table.next_row().expect("Should evaluate third row OK"); - let expected = UnwindTableRow { - start_address: 33, - end_address: 97, - saved_args_size: 0, - cfa: CfaRule::RegisterAndOffset { - register: Register(4), - offset: -12, - }, - registers: [ - (Register(0), RegisterRule::Offset(-16)), - (Register(3), RegisterRule::Offset(-4)), - ] - .iter() - .collect(), - }; - assert_eq!(Some(&expected), row); - } - - { - let row = table.next_row().expect("Should evaluate fourth row OK"); - let expected = UnwindTableRow { - start_address: 97, - end_address: 100, - saved_args_size: 0, - cfa: CfaRule::RegisterAndOffset { - register: Register(4), - offset: -12, - }, - registers: [ - (Register(0), RegisterRule::Offset(-16)), - (Register(3), RegisterRule::Offset(-4)), - (Register(5), RegisterRule::Offset(4)), - ] - .iter() - .collect(), - }; - assert_eq!(Some(&expected), row); - } - - // All done! - assert_eq!(Ok(None), table.next_row()); - assert_eq!(Ok(None), table.next_row()); - } - - #[test] - fn test_unwind_info_for_address_ok() { - let instrs1 = Section::with_endian(Endian::Big) - // The CFA is -12 from register 4. - .D8(constants::DW_CFA_def_cfa_sf.0) - .uleb(4) - .sleb(-12); - let instrs1 = instrs1.get_contents().unwrap(); - - let instrs2: Vec<_> = (0..8).map(|_| constants::DW_CFA_nop.0).collect(); - - let instrs3 = Section::with_endian(Endian::Big) - // Initial instructions form a row, advance the address by 100. - .D8(constants::DW_CFA_advance_loc1.0) - .D8(100) - // Register 0 is -16 from the CFA. - .D8(constants::DW_CFA_offset_extended_sf.0) - .uleb(0) - .sleb(-16); - let instrs3 = instrs3.get_contents().unwrap(); - - let instrs4: Vec<_> = (0..16).map(|_| constants::DW_CFA_nop.0).collect(); - - let mut cie1 = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 8, - segment_size: 0, - code_alignment_factor: 1, - data_alignment_factor: 1, - return_address_register: Register(3), - initial_instructions: EndianSlice::new(&instrs1, BigEndian), - }; - - let mut cie2 = CommonInformationEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - version: 4, - augmentation: None, - address_size: 4, - segment_size: 0, - code_alignment_factor: 1, - data_alignment_factor: 1, - return_address_register: Register(1), - initial_instructions: EndianSlice::new(&instrs2, BigEndian), - }; - - let cie1_location = Label::new(); - let cie2_location = Label::new(); - - // Write the CIEs first so that their length gets set before we clone - // them into the FDEs and our equality assertions down the line end up - // with all the CIEs always having he correct length. - let kind = debug_frame_be(); - let section = Section::with_endian(kind.endian()) - .mark(&cie1_location) - .cie(kind, None, &mut cie1) - .mark(&cie2_location) - .cie(kind, None, &mut cie2); - - let mut fde1 = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie1.clone(), - initial_segment: 0, - initial_address: 0xfeed_beef, - address_range: 200, - augmentation: None, - instructions: EndianSlice::new(&instrs3, BigEndian), - }; - - let mut fde2 = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie2.clone(), - initial_segment: 0, - initial_address: 0xfeed_face, - address_range: 9000, - augmentation: None, - instructions: EndianSlice::new(&instrs4, BigEndian), - }; - - let section = - section - .fde(kind, &cie1_location, &mut fde1) - .fde(kind, &cie2_location, &mut fde2); - section.start().set_const(0); - - let contents = section.get_contents().unwrap(); - let debug_frame = kind.section(&contents); - - // Get the second row of the unwind table in `instrs3`. - let bases = Default::default(); - let mut ctx = Box::new(UnwindContext::new()); - let result = debug_frame.unwind_info_for_address( - &bases, - &mut ctx, - 0xfeed_beef + 150, - DebugFrame::cie_from_offset, - ); - assert!(result.is_ok()); - let unwind_info = result.unwrap(); - - assert_eq!( - *unwind_info, - UnwindTableRow { - start_address: fde1.initial_address() + 100, - end_address: fde1.initial_address() + fde1.len(), - saved_args_size: 0, - cfa: CfaRule::RegisterAndOffset { - register: Register(4), - offset: -12, - }, - registers: [(Register(0), RegisterRule::Offset(-16))].iter().collect(), - } - ); - } - - #[test] - fn test_unwind_info_for_address_not_found() { - let debug_frame = DebugFrame::new(&[], NativeEndian); - let bases = Default::default(); - let mut ctx = Box::new(UnwindContext::new()); - let result = debug_frame.unwind_info_for_address( - &bases, - &mut ctx, - 0xbadb_ad99, - DebugFrame::cie_from_offset, - ); - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), Error::NoUnwindInfoForAddress); - } - - #[test] - fn test_eh_frame_hdr_unknown_version() { - let bases = BaseAddresses::default(); - let buf = &[42]; - let result = EhFrameHdr::new(buf, NativeEndian).parse(&bases, 8); - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), Error::UnknownVersion(42)); - } - - #[test] - fn test_eh_frame_hdr_omit_ehptr() { - let section = Section::with_endian(Endian::Little) - .L8(1) - .L8(0xff) - .L8(0x03) - .L8(0x0b) - .L32(2) - .L32(10) - .L32(1) - .L32(20) - .L32(2) - .L32(0); - let section = section.get_contents().unwrap(); - let bases = BaseAddresses::default(); - let result = EhFrameHdr::new(§ion, LittleEndian).parse(&bases, 8); - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), Error::CannotParseOmitPointerEncoding); - } - - #[test] - fn test_eh_frame_hdr_omit_count() { - let section = Section::with_endian(Endian::Little) - .L8(1) - .L8(0x0b) - .L8(0xff) - .L8(0x0b) - .L32(0x12345); - let section = section.get_contents().unwrap(); - let bases = BaseAddresses::default(); - let result = EhFrameHdr::new(§ion, LittleEndian).parse(&bases, 8); - assert!(result.is_ok()); - let result = result.unwrap(); - assert_eq!(result.eh_frame_ptr(), Pointer::Direct(0x12345)); - assert!(result.table().is_none()); - } - - #[test] - fn test_eh_frame_hdr_omit_table() { - let section = Section::with_endian(Endian::Little) - .L8(1) - .L8(0x0b) - .L8(0x03) - .L8(0xff) - .L32(0x12345) - .L32(2); - let section = section.get_contents().unwrap(); - let bases = BaseAddresses::default(); - let result = EhFrameHdr::new(§ion, LittleEndian).parse(&bases, 8); - assert!(result.is_ok()); - let result = result.unwrap(); - assert_eq!(result.eh_frame_ptr(), Pointer::Direct(0x12345)); - assert!(result.table().is_none()); - } - - #[test] - fn test_eh_frame_hdr_varlen_table() { - let section = Section::with_endian(Endian::Little) - .L8(1) - .L8(0x0b) - .L8(0x03) - .L8(0x01) - .L32(0x12345) - .L32(2); - let section = section.get_contents().unwrap(); - let bases = BaseAddresses::default(); - let result = EhFrameHdr::new(§ion, LittleEndian).parse(&bases, 8); - assert!(result.is_ok()); - let result = result.unwrap(); - assert_eq!(result.eh_frame_ptr(), Pointer::Direct(0x12345)); - let table = result.table(); - assert!(table.is_some()); - let table = table.unwrap(); - assert_eq!( - table.lookup(0, &bases), - Err(Error::VariableLengthSearchTable) - ); - } - - #[test] - fn test_eh_frame_hdr_indirect_length() { - let section = Section::with_endian(Endian::Little) - .L8(1) - .L8(0x0b) - .L8(0x83) - .L8(0x0b) - .L32(0x12345) - .L32(2); - let section = section.get_contents().unwrap(); - let bases = BaseAddresses::default(); - let result = EhFrameHdr::new(§ion, LittleEndian).parse(&bases, 8); - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), Error::UnsupportedPointerEncoding); - } - - #[test] - fn test_eh_frame_hdr_indirect_ptrs() { - let section = Section::with_endian(Endian::Little) - .L8(1) - .L8(0x8b) - .L8(0x03) - .L8(0x8b) - .L32(0x12345) - .L32(2) - .L32(10) - .L32(1) - .L32(20) - .L32(2); - let section = section.get_contents().unwrap(); - let bases = BaseAddresses::default(); - let result = EhFrameHdr::new(§ion, LittleEndian).parse(&bases, 8); - assert!(result.is_ok()); - let result = result.unwrap(); - assert_eq!(result.eh_frame_ptr(), Pointer::Indirect(0x12345)); - let table = result.table(); - assert!(table.is_some()); - let table = table.unwrap(); - assert_eq!( - table.lookup(0, &bases), - Err(Error::UnsupportedPointerEncoding) - ); - } - - #[test] - fn test_eh_frame_hdr_good() { - let section = Section::with_endian(Endian::Little) - .L8(1) - .L8(0x0b) - .L8(0x03) - .L8(0x0b) - .L32(0x12345) - .L32(2) - .L32(10) - .L32(1) - .L32(20) - .L32(2); - let section = section.get_contents().unwrap(); - let bases = BaseAddresses::default(); - let result = EhFrameHdr::new(§ion, LittleEndian).parse(&bases, 8); - assert!(result.is_ok()); - let result = result.unwrap(); - assert_eq!(result.eh_frame_ptr(), Pointer::Direct(0x12345)); - let table = result.table(); - assert!(table.is_some()); - let table = table.unwrap(); - assert_eq!(table.lookup(0, &bases), Ok(Pointer::Direct(1))); - assert_eq!(table.lookup(9, &bases), Ok(Pointer::Direct(1))); - assert_eq!(table.lookup(10, &bases), Ok(Pointer::Direct(1))); - assert_eq!(table.lookup(11, &bases), Ok(Pointer::Direct(1))); - assert_eq!(table.lookup(19, &bases), Ok(Pointer::Direct(1))); - assert_eq!(table.lookup(20, &bases), Ok(Pointer::Direct(2))); - assert_eq!(table.lookup(21, &bases), Ok(Pointer::Direct(2))); - assert_eq!(table.lookup(100_000, &bases), Ok(Pointer::Direct(2))); - } - - #[test] - fn test_eh_frame_fde_for_address_good() { - // First, setup eh_frame - // Write the CIE first so that its length gets set before we clone it - // into the FDE. - let mut cie = make_test_cie(); - cie.format = Format::Dwarf32; - cie.version = 1; - - let start_of_cie = Label::new(); - let end_of_cie = Label::new(); - - let kind = eh_frame_le(); - let section = Section::with_endian(kind.endian()) - .append_repeated(0, 16) - .mark(&start_of_cie) - .cie(kind, None, &mut cie) - .mark(&end_of_cie); - - let mut fde1 = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 9, - address_range: 4, - augmentation: None, - instructions: EndianSlice::new(&[], LittleEndian), - }; - let mut fde2 = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 20, - address_range: 8, - augmentation: None, - instructions: EndianSlice::new(&[], LittleEndian), - }; - - let start_of_fde1 = Label::new(); - let start_of_fde2 = Label::new(); - - let section = section - // +4 for the FDE length before the CIE offset. - .mark(&start_of_fde1) - .fde(kind, (&start_of_fde1 - &start_of_cie + 4) as u64, &mut fde1) - .mark(&start_of_fde2) - .fde(kind, (&start_of_fde2 - &start_of_cie + 4) as u64, &mut fde2); - - section.start().set_const(0); - let section = section.get_contents().unwrap(); - let eh_frame = kind.section(§ion); - - // Setup eh_frame_hdr - let section = Section::with_endian(kind.endian()) - .L8(1) - .L8(0x0b) - .L8(0x03) - .L8(0x0b) - .L32(0x12345) - .L32(2) - .L32(10) - .L32(0x12345 + start_of_fde1.value().unwrap() as u32) - .L32(20) - .L32(0x12345 + start_of_fde2.value().unwrap() as u32); - - let section = section.get_contents().unwrap(); - let bases = BaseAddresses::default(); - let eh_frame_hdr = EhFrameHdr::new(§ion, LittleEndian).parse(&bases, 8); - assert!(eh_frame_hdr.is_ok()); - let eh_frame_hdr = eh_frame_hdr.unwrap(); - - let table = eh_frame_hdr.table(); - assert!(table.is_some()); - let table = table.unwrap(); - - let bases = Default::default(); - let mut iter = table.iter(&bases); - assert_eq!( - iter.next(), - Ok(Some(( - Pointer::Direct(10), - Pointer::Direct(0x12345 + start_of_fde1.value().unwrap() as u64) - ))) - ); - assert_eq!( - iter.next(), - Ok(Some(( - Pointer::Direct(20), - Pointer::Direct(0x12345 + start_of_fde2.value().unwrap() as u64) - ))) - ); - assert_eq!(iter.next(), Ok(None)); - - assert_eq!( - table.iter(&bases).nth(0), - Ok(Some(( - Pointer::Direct(10), - Pointer::Direct(0x12345 + start_of_fde1.value().unwrap() as u64) - ))) - ); - - assert_eq!( - table.iter(&bases).nth(1), - Ok(Some(( - Pointer::Direct(20), - Pointer::Direct(0x12345 + start_of_fde2.value().unwrap() as u64) - ))) - ); - assert_eq!(table.iter(&bases).nth(2), Ok(None)); - - let f = |_: &_, _: &_, o: EhFrameOffset| { - assert_eq!(o, EhFrameOffset(start_of_cie.value().unwrap() as usize)); - Ok(cie.clone()) - }; - assert_eq!( - table.fde_for_address(&eh_frame, &bases, 9, f), - Ok(fde1.clone()) - ); - assert_eq!( - table.fde_for_address(&eh_frame, &bases, 10, f), - Ok(fde1.clone()) - ); - assert_eq!(table.fde_for_address(&eh_frame, &bases, 11, f), Ok(fde1)); - assert_eq!( - table.fde_for_address(&eh_frame, &bases, 19, f), - Err(Error::NoUnwindInfoForAddress) - ); - assert_eq!( - table.fde_for_address(&eh_frame, &bases, 20, f), - Ok(fde2.clone()) - ); - assert_eq!(table.fde_for_address(&eh_frame, &bases, 21, f), Ok(fde2)); - assert_eq!( - table.fde_for_address(&eh_frame, &bases, 100_000, f), - Err(Error::NoUnwindInfoForAddress) - ); - } - - #[test] - fn test_eh_frame_stops_at_zero_length() { - let section = Section::with_endian(Endian::Little).L32(0); - let section = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(§ion, LittleEndian); - let bases = Default::default(); - - assert_eq!( - parse_cfi_entry(&bases, &EhFrame::new(&*section, LittleEndian), rest), - Ok(None) - ); - - assert_eq!( - EhFrame::new(§ion, LittleEndian).cie_from_offset(&bases, EhFrameOffset(0)), - Err(Error::NoEntryAtGivenOffset) - ); - } - - fn resolve_cie_offset(buf: &[u8], cie_offset: usize) -> Result { - let mut fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf64, - cie: make_test_cie(), - initial_segment: 0, - initial_address: 0xfeed_beef, - address_range: 39, - augmentation: None, - instructions: EndianSlice::new(&[], LittleEndian), - }; - - let kind = eh_frame_le(); - let section = Section::with_endian(kind.endian()) - .append_bytes(&buf) - .fde(kind, cie_offset as u64, &mut fde) - .append_bytes(&buf); - - let section = section.get_contents().unwrap(); - let eh_frame = kind.section(§ion); - let input = &mut EndianSlice::new(§ion[buf.len()..], LittleEndian); - - let bases = Default::default(); - match parse_cfi_entry(&bases, &eh_frame, input) { - Ok(Some(CieOrFde::Fde(partial))) => Ok(partial.cie_offset.0), - Err(e) => Err(e), - otherwise => panic!("Unexpected result: {:#?}", otherwise), - } - } - - #[test] - fn test_eh_frame_resolve_cie_offset_ok() { - let buf = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let cie_offset = 2; - // + 4 for size of length field - assert_eq!( - resolve_cie_offset(&buf, buf.len() + 4 - cie_offset), - Ok(cie_offset) - ); - } - - #[test] - fn test_eh_frame_resolve_cie_offset_out_of_bounds() { - let buf = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - assert_eq!( - resolve_cie_offset(&buf, buf.len() + 4 + 2), - Err(Error::OffsetOutOfBounds) - ); - } - - #[test] - fn test_eh_frame_resolve_cie_offset_underflow() { - let buf = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - assert_eq!( - resolve_cie_offset(&buf, ::core::usize::MAX), - Err(Error::OffsetOutOfBounds) - ); - } - - #[test] - fn test_eh_frame_fde_ok() { - let mut cie = make_test_cie(); - cie.format = Format::Dwarf32; - cie.version = 1; - - let start_of_cie = Label::new(); - let end_of_cie = Label::new(); - - // Write the CIE first so that its length gets set before we clone it - // into the FDE. - let kind = eh_frame_le(); - let section = Section::with_endian(kind.endian()) - .append_repeated(0, 16) - .mark(&start_of_cie) - .cie(kind, None, &mut cie) - .mark(&end_of_cie); - - let mut fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0xfeed_beef, - address_range: 999, - augmentation: None, - instructions: EndianSlice::new(&[], LittleEndian), - }; - - let section = section - // +4 for the FDE length before the CIE offset. - .fde(kind, (&end_of_cie - &start_of_cie + 4) as u64, &mut fde); - - section.start().set_const(0); - let section = section.get_contents().unwrap(); - let eh_frame = kind.section(§ion); - let section = EndianSlice::new(§ion, LittleEndian); - - let mut offset = None; - match parse_fde( - eh_frame, - &mut section.range_from(end_of_cie.value().unwrap() as usize..), - |_, _, o| { - offset = Some(o); - assert_eq!(o, EhFrameOffset(start_of_cie.value().unwrap() as usize)); - Ok(cie.clone()) - }, - ) { - Ok(actual) => assert_eq!(actual, fde), - otherwise => panic!("Unexpected result {:?}", otherwise), - } - assert!(offset.is_some()); - } - - #[test] - fn test_eh_frame_fde_out_of_bounds() { - let mut cie = make_test_cie(); - cie.version = 1; - - let end_of_cie = Label::new(); - - let mut fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf64, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0xfeed_beef, - address_range: 999, - augmentation: None, - instructions: EndianSlice::new(&[], LittleEndian), - }; - - let kind = eh_frame_le(); - let section = Section::with_endian(kind.endian()) - .cie(kind, None, &mut cie) - .mark(&end_of_cie) - .fde(kind, 99_999_999_999_999, &mut fde); - - section.start().set_const(0); - let section = section.get_contents().unwrap(); - let eh_frame = kind.section(§ion); - let section = EndianSlice::new(§ion, LittleEndian); - - let result = parse_fde( - eh_frame, - &mut section.range_from(end_of_cie.value().unwrap() as usize..), - UnwindSection::cie_from_offset, - ); - assert_eq!(result, Err(Error::OffsetOutOfBounds)); - } - - #[test] - fn test_augmentation_parse_not_z_augmentation() { - let augmentation = &mut EndianSlice::new(b"wtf", NativeEndian); - let bases = Default::default(); - let address_size = 8; - let section = EhFrame::new(&[], NativeEndian); - let input = &mut EndianSlice::new(&[], NativeEndian); - assert_eq!( - Augmentation::parse(augmentation, &bases, address_size, §ion, input), - Err(Error::UnknownAugmentation) - ); - } - - #[test] - fn test_augmentation_parse_just_signal_trampoline() { - let aug_str = &mut EndianSlice::new(b"S", LittleEndian); - let bases = Default::default(); - let address_size = 8; - let section = EhFrame::new(&[], LittleEndian); - let input = &mut EndianSlice::new(&[], LittleEndian); - - let mut augmentation = Augmentation::default(); - augmentation.is_signal_trampoline = true; - - assert_eq!( - Augmentation::parse(aug_str, &bases, address_size, §ion, input), - Ok(augmentation) - ); - } - - #[test] - fn test_augmentation_parse_unknown_part_of_z_augmentation() { - // The 'Z' character is not defined by the z-style augmentation. - let bases = Default::default(); - let address_size = 8; - let section = Section::with_endian(Endian::Little) - .uleb(4) - .append_repeated(4, 4) - .get_contents() - .unwrap(); - let section = EhFrame::new(§ion, LittleEndian); - let input = &mut section.section().clone(); - let augmentation = &mut EndianSlice::new(b"zZ", LittleEndian); - assert_eq!( - Augmentation::parse(augmentation, &bases, address_size, §ion, input), - Err(Error::UnknownAugmentation) - ); - } - - #[test] - #[allow(non_snake_case)] - fn test_augmentation_parse_L() { - let bases = Default::default(); - let address_size = 8; - let rest = [9, 8, 7, 6, 5, 4, 3, 2, 1]; - - let section = Section::with_endian(Endian::Little) - .uleb(1) - .D8(constants::DW_EH_PE_uleb128.0) - .append_bytes(&rest) - .get_contents() - .unwrap(); - let section = EhFrame::new(§ion, LittleEndian); - let input = &mut section.section().clone(); - let aug_str = &mut EndianSlice::new(b"zL", LittleEndian); - - let mut augmentation = Augmentation::default(); - augmentation.lsda = Some(constants::DW_EH_PE_uleb128); - - assert_eq!( - Augmentation::parse(aug_str, &bases, address_size, §ion, input), - Ok(augmentation) - ); - assert_eq!(*input, EndianSlice::new(&rest, LittleEndian)); - } - - #[test] - #[allow(non_snake_case)] - fn test_augmentation_parse_P() { - let bases = Default::default(); - let address_size = 8; - let rest = [9, 8, 7, 6, 5, 4, 3, 2, 1]; - - let section = Section::with_endian(Endian::Little) - .uleb(9) - .D8(constants::DW_EH_PE_udata8.0) - .L64(0xf00d_f00d) - .append_bytes(&rest) - .get_contents() - .unwrap(); - let section = EhFrame::new(§ion, LittleEndian); - let input = &mut section.section().clone(); - let aug_str = &mut EndianSlice::new(b"zP", LittleEndian); - - let mut augmentation = Augmentation::default(); - augmentation.personality = Some((constants::DW_EH_PE_udata8, Pointer::Direct(0xf00d_f00d))); - - assert_eq!( - Augmentation::parse(aug_str, &bases, address_size, §ion, input), - Ok(augmentation) - ); - assert_eq!(*input, EndianSlice::new(&rest, LittleEndian)); - } - - #[test] - #[allow(non_snake_case)] - fn test_augmentation_parse_R() { - let bases = Default::default(); - let address_size = 8; - let rest = [9, 8, 7, 6, 5, 4, 3, 2, 1]; - - let section = Section::with_endian(Endian::Little) - .uleb(1) - .D8(constants::DW_EH_PE_udata4.0) - .append_bytes(&rest) - .get_contents() - .unwrap(); - let section = EhFrame::new(§ion, LittleEndian); - let input = &mut section.section().clone(); - let aug_str = &mut EndianSlice::new(b"zR", LittleEndian); - - let mut augmentation = Augmentation::default(); - augmentation.fde_address_encoding = Some(constants::DW_EH_PE_udata4); - - assert_eq!( - Augmentation::parse(aug_str, &bases, address_size, §ion, input), - Ok(augmentation) - ); - assert_eq!(*input, EndianSlice::new(&rest, LittleEndian)); - } - - #[test] - #[allow(non_snake_case)] - fn test_augmentation_parse_S() { - let bases = Default::default(); - let address_size = 8; - let rest = [9, 8, 7, 6, 5, 4, 3, 2, 1]; - - let section = Section::with_endian(Endian::Little) - .uleb(0) - .append_bytes(&rest) - .get_contents() - .unwrap(); - let section = EhFrame::new(§ion, LittleEndian); - let input = &mut section.section().clone(); - let aug_str = &mut EndianSlice::new(b"zS", LittleEndian); - - let mut augmentation = Augmentation::default(); - augmentation.is_signal_trampoline = true; - - assert_eq!( - Augmentation::parse(aug_str, &bases, address_size, §ion, input), - Ok(augmentation) - ); - assert_eq!(*input, EndianSlice::new(&rest, LittleEndian)); - } - - #[test] - fn test_augmentation_parse_all() { - let bases = Default::default(); - let address_size = 8; - let rest = [9, 8, 7, 6, 5, 4, 3, 2, 1]; - - let section = Section::with_endian(Endian::Little) - .uleb(1 + 9 + 1) - // L - .D8(constants::DW_EH_PE_uleb128.0) - // P - .D8(constants::DW_EH_PE_udata8.0) - .L64(0x1bad_f00d) - // R - .D8(constants::DW_EH_PE_uleb128.0) - .append_bytes(&rest) - .get_contents() - .unwrap(); - let section = EhFrame::new(§ion, LittleEndian); - let input = &mut section.section().clone(); - let aug_str = &mut EndianSlice::new(b"zLPRS", LittleEndian); - - let augmentation = Augmentation { - lsda: Some(constants::DW_EH_PE_uleb128), - personality: Some((constants::DW_EH_PE_udata8, Pointer::Direct(0x1bad_f00d))), - fde_address_encoding: Some(constants::DW_EH_PE_uleb128), - is_signal_trampoline: true, - }; - - assert_eq!( - Augmentation::parse(aug_str, &bases, address_size, §ion, input), - Ok(augmentation) - ); - assert_eq!(*input, EndianSlice::new(&rest, LittleEndian)); - } - - #[test] - fn test_eh_frame_fde_no_augmentation() { - let instrs = [1, 2, 3, 4]; - let cie_offset = 1; - - let mut cie = make_test_cie(); - cie.format = Format::Dwarf32; - cie.version = 1; - - let mut fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0xfeed_face, - address_range: 9000, - augmentation: None, - instructions: EndianSlice::new(&instrs, LittleEndian), - }; - - let rest = [1, 2, 3, 4]; - - let kind = eh_frame_le(); - let section = Section::with_endian(kind.endian()) - .fde(kind, cie_offset, &mut fde) - .append_bytes(&rest) - .get_contents() - .unwrap(); - let section = kind.section(§ion); - let input = &mut section.section().clone(); - - let result = parse_fde(section, input, |_, _, _| Ok(cie.clone())); - assert_eq!(result, Ok(fde)); - assert_eq!(*input, EndianSlice::new(&rest, LittleEndian)); - } - - #[test] - fn test_eh_frame_fde_empty_augmentation() { - let instrs = [1, 2, 3, 4]; - let cie_offset = 1; - - let mut cie = make_test_cie(); - cie.format = Format::Dwarf32; - cie.version = 1; - cie.augmentation = Some(Augmentation::default()); - - let mut fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0xfeed_face, - address_range: 9000, - augmentation: Some(AugmentationData::default()), - instructions: EndianSlice::new(&instrs, LittleEndian), - }; - - let rest = [1, 2, 3, 4]; - - let kind = eh_frame_le(); - let section = Section::with_endian(kind.endian()) - .fde(kind, cie_offset, &mut fde) - .append_bytes(&rest) - .get_contents() - .unwrap(); - let section = kind.section(§ion); - let input = &mut section.section().clone(); - - let result = parse_fde(section, input, |_, _, _| Ok(cie.clone())); - assert_eq!(result, Ok(fde)); - assert_eq!(*input, EndianSlice::new(&rest, LittleEndian)); - } - - #[test] - fn test_eh_frame_fde_lsda_augmentation() { - let instrs = [1, 2, 3, 4]; - let cie_offset = 1; - - let mut cie = make_test_cie(); - cie.format = Format::Dwarf32; - cie.version = 1; - cie.augmentation = Some(Augmentation::default()); - cie.augmentation.as_mut().unwrap().lsda = Some(constants::DW_EH_PE_absptr); - - let mut fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0xfeed_face, - address_range: 9000, - augmentation: Some(AugmentationData { - lsda: Some(Pointer::Direct(0x1122_3344)), - }), - instructions: EndianSlice::new(&instrs, LittleEndian), - }; - - let rest = [1, 2, 3, 4]; - - let kind = eh_frame_le(); - let section = Section::with_endian(kind.endian()) - .fde(kind, cie_offset, &mut fde) - .append_bytes(&rest) - .get_contents() - .unwrap(); - let section = kind.section(§ion); - let input = &mut section.section().clone(); - - let result = parse_fde(section, input, |_, _, _| Ok(cie.clone())); - assert_eq!(result, Ok(fde)); - assert_eq!(*input, EndianSlice::new(&rest, LittleEndian)); - } - - #[test] - fn test_eh_frame_fde_lsda_function_relative() { - let instrs = [1, 2, 3, 4]; - let cie_offset = 1; - - let mut cie = make_test_cie(); - cie.format = Format::Dwarf32; - cie.version = 1; - cie.augmentation = Some(Augmentation::default()); - cie.augmentation.as_mut().unwrap().lsda = Some(constants::DwEhPe( - constants::DW_EH_PE_funcrel.0 | constants::DW_EH_PE_absptr.0, - )); - - let mut fde = FrameDescriptionEntry { - offset: 0, - length: 0, - format: Format::Dwarf32, - cie: cie.clone(), - initial_segment: 0, - initial_address: 0xfeed_face, - address_range: 9000, - augmentation: Some(AugmentationData { - lsda: Some(Pointer::Direct(0xbeef)), - }), - instructions: EndianSlice::new(&instrs, LittleEndian), - }; - - let rest = [1, 2, 3, 4]; - - let kind = eh_frame_le(); - let section = Section::with_endian(kind.endian()) - .append_repeated(10, 10) - .fde(kind, cie_offset, &mut fde) - .append_bytes(&rest) - .get_contents() - .unwrap(); - let section = kind.section(§ion); - let input = &mut section.section().range_from(10..); - - // Adjust the FDE's augmentation to be relative to the function. - fde.augmentation.as_mut().unwrap().lsda = Some(Pointer::Direct(0xfeed_face + 0xbeef)); - - let result = parse_fde(section, input, |_, _, _| Ok(cie.clone())); - assert_eq!(result, Ok(fde)); - assert_eq!(*input, EndianSlice::new(&rest, LittleEndian)); - } - - #[test] - fn test_eh_frame_cie_personality_function_relative_bad_context() { - let instrs = [1, 2, 3, 4]; - - let length = Label::new(); - let start = Label::new(); - let end = Label::new(); - - let aug_len = Label::new(); - let aug_start = Label::new(); - let aug_end = Label::new(); - - let section = Section::with_endian(Endian::Little) - // Length - .L32(&length) - .mark(&start) - // CIE ID - .L32(0) - // Version - .D8(1) - // Augmentation - .append_bytes(b"zP\0") - // Code alignment factor - .uleb(1) - // Data alignment factor - .sleb(1) - // Return address register - .uleb(1) - // Augmentation data length. This is a uleb, be we rely on the value - // being less than 2^7 and therefore a valid uleb (can't use Label - // with uleb). - .D8(&aug_len) - .mark(&aug_start) - // Augmentation data. Personality encoding and then encoded pointer. - .D8(constants::DW_EH_PE_funcrel.0 | constants::DW_EH_PE_uleb128.0) - .uleb(1) - .mark(&aug_end) - // Initial instructions - .append_bytes(&instrs) - .mark(&end); - - length.set_const((&end - &start) as u64); - aug_len.set_const((&aug_end - &aug_start) as u64); - - let section = section.get_contents().unwrap(); - let section = EhFrame::new(§ion, LittleEndian); - - let bases = BaseAddresses::default(); - let mut iter = section.entries(&bases); - assert_eq!(iter.next(), Err(Error::FuncRelativePointerInBadContext)); - } - - #[test] - fn register_rule_map_eq() { - // Different order, but still equal. - let map1: RegisterRuleMap> = [ - (Register(0), RegisterRule::SameValue), - (Register(3), RegisterRule::Offset(1)), - ] - .iter() - .collect(); - let map2: RegisterRuleMap> = [ - (Register(3), RegisterRule::Offset(1)), - (Register(0), RegisterRule::SameValue), - ] - .iter() - .collect(); - assert_eq!(map1, map2); - assert_eq!(map2, map1); - - // Not equal. - let map3: RegisterRuleMap> = [ - (Register(0), RegisterRule::SameValue), - (Register(2), RegisterRule::Offset(1)), - ] - .iter() - .collect(); - let map4: RegisterRuleMap> = [ - (Register(3), RegisterRule::Offset(1)), - (Register(0), RegisterRule::SameValue), - ] - .iter() - .collect(); - assert!(map3 != map4); - assert!(map4 != map3); - - // One has undefined explicitly set, other implicitly has undefined. - let mut map5 = RegisterRuleMap::>::default(); - map5.set(Register(0), RegisterRule::SameValue).unwrap(); - map5.set(Register(0), RegisterRule::Undefined).unwrap(); - let map6 = RegisterRuleMap::>::default(); - assert_eq!(map5, map6); - assert_eq!(map6, map5); - } - - #[test] - fn iter_register_rules() { - let mut row = UnwindTableRow::>::default(); - row.registers = [ - (Register(0), RegisterRule::SameValue), - (Register(1), RegisterRule::Offset(1)), - (Register(2), RegisterRule::ValOffset(2)), - ] - .iter() - .collect(); - - let mut found0 = false; - let mut found1 = false; - let mut found2 = false; - - for &(register, ref rule) in row.registers() { - match register.0 { - 0 => { - assert_eq!(found0, false); - found0 = true; - assert_eq!(*rule, RegisterRule::SameValue); - } - 1 => { - assert_eq!(found1, false); - found1 = true; - assert_eq!(*rule, RegisterRule::Offset(1)); - } - 2 => { - assert_eq!(found2, false); - found2 = true; - assert_eq!(*rule, RegisterRule::ValOffset(2)); - } - x => panic!("Unexpected register rule: ({}, {:?})", x, rule), - } - } - - assert_eq!(found0, true); - assert_eq!(found1, true); - assert_eq!(found2, true); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn size_of_unwind_ctx() { - use core::mem; - let size = mem::size_of::>>(); - let max_size = 30968; - if size > max_size { - assert_eq!(size, max_size); - } - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn size_of_register_rule_map() { - use core::mem; - let size = mem::size_of::>>(); - let max_size = 6152; - if size > max_size { - assert_eq!(size, max_size); - } - } - - #[test] - fn test_parse_pointer_encoding_ok() { - use crate::endianity::NativeEndian; - let expected = - constants::DwEhPe(constants::DW_EH_PE_uleb128.0 | constants::DW_EH_PE_pcrel.0); - let input = [expected.0, 1, 2, 3, 4]; - let input = &mut EndianSlice::new(&input, NativeEndian); - assert_eq!(parse_pointer_encoding(input), Ok(expected)); - assert_eq!(*input, EndianSlice::new(&[1, 2, 3, 4], NativeEndian)); - } - - #[test] - fn test_parse_pointer_encoding_bad_encoding() { - use crate::endianity::NativeEndian; - let expected = - constants::DwEhPe((constants::DW_EH_PE_sdata8.0 + 1) | constants::DW_EH_PE_pcrel.0); - let input = [expected.0, 1, 2, 3, 4]; - let input = &mut EndianSlice::new(&input, NativeEndian); - assert_eq!( - Err(Error::UnknownPointerEncoding), - parse_pointer_encoding(input) - ); - } - - #[test] - fn test_parse_encoded_pointer_absptr() { - let encoding = constants::DW_EH_PE_absptr; - let expected_rest = [1, 2, 3, 4]; - - let input = Section::with_endian(Endian::Little) - .L32(0xf00d_f00d) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(0xf00d_f00d)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_pcrel() { - let encoding = constants::DW_EH_PE_pcrel; - let expected_rest = [1, 2, 3, 4]; - - let input = Section::with_endian(Endian::Little) - .append_repeated(0, 0x10) - .L32(0x1) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input.range_from(0x10..); - - let parameters = PointerEncodingParameters { - bases: &BaseAddresses::default().set_eh_frame(0x100).eh_frame, - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(0x111)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_pcrel_undefined() { - let encoding = constants::DW_EH_PE_pcrel; - - let input = Section::with_endian(Endian::Little).L32(0x1); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Err(Error::PcRelativePointerButSectionBaseIsUndefined) - ); - } - - #[test] - fn test_parse_encoded_pointer_textrel() { - let encoding = constants::DW_EH_PE_textrel; - let expected_rest = [1, 2, 3, 4]; - - let input = Section::with_endian(Endian::Little) - .L32(0x1) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &BaseAddresses::default().set_text(0x10).eh_frame, - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(0x11)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_textrel_undefined() { - let encoding = constants::DW_EH_PE_textrel; - - let input = Section::with_endian(Endian::Little).L32(0x1); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Err(Error::TextRelativePointerButTextBaseIsUndefined) - ); - } - - #[test] - fn test_parse_encoded_pointer_datarel() { - let encoding = constants::DW_EH_PE_datarel; - let expected_rest = [1, 2, 3, 4]; - - let input = Section::with_endian(Endian::Little) - .L32(0x1) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &BaseAddresses::default().set_got(0x10).eh_frame, - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(0x11)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_datarel_undefined() { - let encoding = constants::DW_EH_PE_datarel; - - let input = Section::with_endian(Endian::Little).L32(0x1); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Err(Error::DataRelativePointerButDataBaseIsUndefined) - ); - } - - #[test] - fn test_parse_encoded_pointer_funcrel() { - let encoding = constants::DW_EH_PE_funcrel; - let expected_rest = [1, 2, 3, 4]; - - let input = Section::with_endian(Endian::Little) - .L32(0x1) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: Some(0x10), - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(0x11)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_funcrel_undefined() { - let encoding = constants::DW_EH_PE_funcrel; - - let input = Section::with_endian(Endian::Little).L32(0x1); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Err(Error::FuncRelativePointerInBadContext) - ); - } - - #[test] - fn test_parse_encoded_pointer_uleb128() { - let encoding = - constants::DwEhPe(constants::DW_EH_PE_absptr.0 | constants::DW_EH_PE_uleb128.0); - let expected_rest = [1, 2, 3, 4]; - - let input = Section::with_endian(Endian::Little) - .uleb(0x12_3456) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(0x12_3456)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_udata2() { - let encoding = - constants::DwEhPe(constants::DW_EH_PE_absptr.0 | constants::DW_EH_PE_udata2.0); - let expected_rest = [1, 2, 3, 4]; - - let input = Section::with_endian(Endian::Little) - .L16(0x1234) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(0x1234)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_udata4() { - let encoding = - constants::DwEhPe(constants::DW_EH_PE_absptr.0 | constants::DW_EH_PE_udata4.0); - let expected_rest = [1, 2, 3, 4]; - - let input = Section::with_endian(Endian::Little) - .L32(0x1234_5678) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(0x1234_5678)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_udata8() { - let encoding = - constants::DwEhPe(constants::DW_EH_PE_absptr.0 | constants::DW_EH_PE_udata8.0); - let expected_rest = [1, 2, 3, 4]; - - let input = Section::with_endian(Endian::Little) - .L64(0x1234_5678_1234_5678) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(0x1234_5678_1234_5678)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_sleb128() { - let encoding = - constants::DwEhPe(constants::DW_EH_PE_textrel.0 | constants::DW_EH_PE_sleb128.0); - let expected_rest = [1, 2, 3, 4]; - - let input = Section::with_endian(Endian::Little) - .sleb(-0x1111) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &BaseAddresses::default().set_text(0x1111_1111).eh_frame, - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(0x1111_0000)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_sdata2() { - let encoding = - constants::DwEhPe(constants::DW_EH_PE_absptr.0 | constants::DW_EH_PE_sdata2.0); - let expected_rest = [1, 2, 3, 4]; - let expected = 0x111 as i16; - - let input = Section::with_endian(Endian::Little) - .L16(expected as u16) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(expected as u64)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_sdata4() { - let encoding = - constants::DwEhPe(constants::DW_EH_PE_absptr.0 | constants::DW_EH_PE_sdata4.0); - let expected_rest = [1, 2, 3, 4]; - let expected = 0x111_1111 as i32; - - let input = Section::with_endian(Endian::Little) - .L32(expected as u32) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(expected as u64)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_sdata8() { - let encoding = - constants::DwEhPe(constants::DW_EH_PE_absptr.0 | constants::DW_EH_PE_sdata8.0); - let expected_rest = [1, 2, 3, 4]; - let expected = -0x11_1111_1222_2222 as i64; - - let input = Section::with_endian(Endian::Little) - .L64(expected as u64) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Direct(expected as u64)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_encoded_pointer_omit() { - let encoding = constants::DW_EH_PE_omit; - - let input = Section::with_endian(Endian::Little).L32(0x1); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Err(Error::CannotParseOmitPointerEncoding) - ); - assert_eq!(rest, input); - } - - #[test] - fn test_parse_encoded_pointer_bad_encoding() { - let encoding = constants::DwEhPe(constants::DW_EH_PE_sdata8.0 + 1); - - let input = Section::with_endian(Endian::Little).L32(0x1); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Err(Error::UnknownPointerEncoding) - ); - } - - #[test] - fn test_parse_encoded_pointer_aligned() { - // FIXME: support this encoding! - - let encoding = constants::DW_EH_PE_aligned; - - let input = Section::with_endian(Endian::Little).L32(0x1); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Err(Error::UnsupportedPointerEncoding) - ); - } - - #[test] - fn test_parse_encoded_pointer_indirect() { - let expected_rest = [1, 2, 3, 4]; - let encoding = constants::DW_EH_PE_indirect; - - let input = Section::with_endian(Endian::Little) - .L32(0x1234_5678) - .append_bytes(&expected_rest); - let input = input.get_contents().unwrap(); - let input = EndianSlice::new(&input, LittleEndian); - let mut rest = input; - - let parameters = PointerEncodingParameters { - bases: &SectionBaseAddresses::default(), - func_base: None, - address_size: 4, - section: &input, - }; - assert_eq!( - parse_encoded_pointer(encoding, ¶meters, &mut rest), - Ok(Pointer::Indirect(0x1234_5678)) - ); - assert_eq!(rest, EndianSlice::new(&expected_rest, LittleEndian)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/dwarf.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/dwarf.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/dwarf.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/dwarf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1210 +0,0 @@ -use alloc::string::String; -use alloc::sync::Arc; - -use crate::common::{ - DebugAddrBase, DebugAddrIndex, DebugInfoOffset, DebugLineStrOffset, DebugLocListsBase, - DebugLocListsIndex, DebugRngListsBase, DebugRngListsIndex, DebugStrOffset, DebugStrOffsetsBase, - DebugStrOffsetsIndex, DebugTypeSignature, DebugTypesOffset, DwarfFileType, DwoId, Encoding, - LocationListsOffset, RangeListsOffset, RawRangeListsOffset, SectionId, UnitSectionOffset, -}; -use crate::constants; -use crate::read::{ - Abbreviations, AbbreviationsCache, AbbreviationsCacheStrategy, AttributeValue, DebugAbbrev, - DebugAddr, DebugAranges, DebugCuIndex, DebugInfo, DebugInfoUnitHeadersIter, DebugLine, - DebugLineStr, DebugLoc, DebugLocLists, DebugRngLists, DebugStr, DebugStrOffsets, DebugTuIndex, - DebugTypes, DebugTypesUnitHeadersIter, DebuggingInformationEntry, EntriesCursor, EntriesRaw, - EntriesTree, Error, IncompleteLineProgram, LocListIter, LocationLists, Range, RangeLists, - RawLocListIter, RawRngListIter, Reader, ReaderOffset, ReaderOffsetId, Result, RngListIter, - Section, UnitHeader, UnitIndex, UnitIndexSectionIterator, UnitOffset, UnitType, -}; - -/// All of the commonly used DWARF sections, and other common information. -#[derive(Debug, Default)] -pub struct Dwarf { - /// The `.debug_abbrev` section. - pub debug_abbrev: DebugAbbrev, - - /// The `.debug_addr` section. - pub debug_addr: DebugAddr, - - /// The `.debug_aranges` section. - pub debug_aranges: DebugAranges, - - /// The `.debug_info` section. - pub debug_info: DebugInfo, - - /// The `.debug_line` section. - pub debug_line: DebugLine, - - /// The `.debug_line_str` section. - pub debug_line_str: DebugLineStr, - - /// The `.debug_str` section. - pub debug_str: DebugStr, - - /// The `.debug_str_offsets` section. - pub debug_str_offsets: DebugStrOffsets, - - /// The `.debug_types` section. - pub debug_types: DebugTypes, - - /// The location lists in the `.debug_loc` and `.debug_loclists` sections. - pub locations: LocationLists, - - /// The range lists in the `.debug_ranges` and `.debug_rnglists` sections. - pub ranges: RangeLists, - - /// The type of this file. - pub file_type: DwarfFileType, - - /// The DWARF sections for a supplementary object file. - pub sup: Option>>, - - /// A cache of previously parsed abbreviations for units in this file. - pub abbreviations_cache: AbbreviationsCache, -} - -impl Dwarf { - /// Try to load the DWARF sections using the given loader function. - /// - /// `section` loads a DWARF section from the object file. - /// It should return an empty section if the section does not exist. - /// - /// `section` may either directly return a `Reader` instance (such as - /// `EndianSlice`), or it may return some other type and then convert - /// that type into a `Reader` using `Dwarf::borrow`. - /// - /// After loading, the user should set the `file_type` field and - /// call `load_sup` if required. - pub fn load(mut section: F) -> core::result::Result - where - F: FnMut(SectionId) -> core::result::Result, - { - // Section types are inferred. - let debug_loc = Section::load(&mut section)?; - let debug_loclists = Section::load(&mut section)?; - let debug_ranges = Section::load(&mut section)?; - let debug_rnglists = Section::load(&mut section)?; - Ok(Dwarf { - debug_abbrev: Section::load(&mut section)?, - debug_addr: Section::load(&mut section)?, - debug_aranges: Section::load(&mut section)?, - debug_info: Section::load(&mut section)?, - debug_line: Section::load(&mut section)?, - debug_line_str: Section::load(&mut section)?, - debug_str: Section::load(&mut section)?, - debug_str_offsets: Section::load(&mut section)?, - debug_types: Section::load(&mut section)?, - locations: LocationLists::new(debug_loc, debug_loclists), - ranges: RangeLists::new(debug_ranges, debug_rnglists), - file_type: DwarfFileType::Main, - sup: None, - abbreviations_cache: AbbreviationsCache::new(), - }) - } - - /// Load the DWARF sections from the supplementary object file. - /// - /// `section` operates the same as for `load`. - /// - /// Sets `self.sup`, replacing any previous value. - pub fn load_sup(&mut self, section: F) -> core::result::Result<(), E> - where - F: FnMut(SectionId) -> core::result::Result, - { - self.sup = Some(Arc::new(Self::load(section)?)); - Ok(()) - } - - /// Create a `Dwarf` structure that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// It can be useful to load DWARF sections into owned data structures, - /// such as `Vec`. However, we do not implement the `Reader` trait - /// for `Vec`, because it would be very inefficient, but this trait - /// is required for all of the methods that parse the DWARF data. - /// So we first load the DWARF sections into `Vec`s, and then use - /// `borrow` to create `Reader`s that reference the data. - /// - /// ```rust,no_run - /// # fn example() -> Result<(), gimli::Error> { - /// # let loader = |name| -> Result<_, gimli::Error> { unimplemented!() }; - /// # let sup_loader = |name| -> Result<_, gimli::Error> { unimplemented!() }; - /// // Read the DWARF sections into `Vec`s with whatever object loader you're using. - /// let mut owned_dwarf: gimli::Dwarf> = gimli::Dwarf::load(loader)?; - /// owned_dwarf.load_sup(sup_loader)?; - /// // Create references to the DWARF sections. - /// let dwarf = owned_dwarf.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// # unreachable!() - /// # } - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> Dwarf - where - F: FnMut(&'a T) -> R, - { - Dwarf { - debug_abbrev: self.debug_abbrev.borrow(&mut borrow), - debug_addr: self.debug_addr.borrow(&mut borrow), - debug_aranges: self.debug_aranges.borrow(&mut borrow), - debug_info: self.debug_info.borrow(&mut borrow), - debug_line: self.debug_line.borrow(&mut borrow), - debug_line_str: self.debug_line_str.borrow(&mut borrow), - debug_str: self.debug_str.borrow(&mut borrow), - debug_str_offsets: self.debug_str_offsets.borrow(&mut borrow), - debug_types: self.debug_types.borrow(&mut borrow), - locations: self.locations.borrow(&mut borrow), - ranges: self.ranges.borrow(&mut borrow), - file_type: self.file_type, - sup: self.sup().map(|sup| Arc::new(sup.borrow(borrow))), - abbreviations_cache: AbbreviationsCache::new(), - } - } - - /// Return a reference to the DWARF sections for supplementary object file. - pub fn sup(&self) -> Option<&Dwarf> { - self.sup.as_ref().map(Arc::as_ref) - } -} - -impl Dwarf { - /// Parse abbreviations and store them in the cache. - /// - /// This will iterate over the units in `self.debug_info` to determine the - /// abbreviations offsets. - /// - /// Errors during parsing abbreviations are also stored in the cache. - /// Errors during iterating over the units are ignored. - pub fn populate_abbreviations_cache(&mut self, strategy: AbbreviationsCacheStrategy) { - self.abbreviations_cache - .populate(strategy, &self.debug_abbrev, self.debug_info.units()); - } - - /// Iterate the unit headers in the `.debug_info` section. - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - #[inline] - pub fn units(&self) -> DebugInfoUnitHeadersIter { - self.debug_info.units() - } - - /// Construct a new `Unit` from the given unit header. - #[inline] - pub fn unit(&self, header: UnitHeader) -> Result> { - Unit::new(self, header) - } - - /// Iterate the type-unit headers in the `.debug_types` section. - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - #[inline] - pub fn type_units(&self) -> DebugTypesUnitHeadersIter { - self.debug_types.units() - } - - /// Parse the abbreviations for a compilation unit. - #[inline] - pub fn abbreviations(&self, unit: &UnitHeader) -> Result> { - self.abbreviations_cache - .get(&self.debug_abbrev, unit.debug_abbrev_offset()) - } - - /// Return the string offset at the given index. - #[inline] - pub fn string_offset( - &self, - unit: &Unit, - index: DebugStrOffsetsIndex, - ) -> Result> { - self.debug_str_offsets - .get_str_offset(unit.header.format(), unit.str_offsets_base, index) - } - - /// Return the string at the given offset in `.debug_str`. - #[inline] - pub fn string(&self, offset: DebugStrOffset) -> Result { - self.debug_str.get_str(offset) - } - - /// Return the string at the given offset in `.debug_line_str`. - #[inline] - pub fn line_string(&self, offset: DebugLineStrOffset) -> Result { - self.debug_line_str.get_str(offset) - } - - /// Return an attribute value as a string slice. - /// - /// If the attribute value is one of: - /// - /// - an inline `DW_FORM_string` string - /// - a `DW_FORM_strp` reference to an offset into the `.debug_str` section - /// - a `DW_FORM_strp_sup` reference to an offset into a supplementary - /// object file - /// - a `DW_FORM_line_strp` reference to an offset into the `.debug_line_str` - /// section - /// - a `DW_FORM_strx` index into the `.debug_str_offsets` entries for the unit - /// - /// then return the attribute's string value. Returns an error if the attribute - /// value does not have a string form, or if a string form has an invalid value. - pub fn attr_string(&self, unit: &Unit, attr: AttributeValue) -> Result { - match attr { - AttributeValue::String(string) => Ok(string), - AttributeValue::DebugStrRef(offset) => self.debug_str.get_str(offset), - AttributeValue::DebugStrRefSup(offset) => { - if let Some(sup) = self.sup() { - sup.debug_str.get_str(offset) - } else { - Err(Error::ExpectedStringAttributeValue) - } - } - AttributeValue::DebugLineStrRef(offset) => self.debug_line_str.get_str(offset), - AttributeValue::DebugStrOffsetsIndex(index) => { - let offset = self.debug_str_offsets.get_str_offset( - unit.header.format(), - unit.str_offsets_base, - index, - )?; - self.debug_str.get_str(offset) - } - _ => Err(Error::ExpectedStringAttributeValue), - } - } - - /// Return the address at the given index. - pub fn address(&self, unit: &Unit, index: DebugAddrIndex) -> Result { - self.debug_addr - .get_address(unit.encoding().address_size, unit.addr_base, index) - } - - /// Try to return an attribute value as an address. - /// - /// If the attribute value is one of: - /// - /// - a `DW_FORM_addr` - /// - a `DW_FORM_addrx` index into the `.debug_addr` entries for the unit - /// - /// then return the address. - /// Returns `None` for other forms. - pub fn attr_address(&self, unit: &Unit, attr: AttributeValue) -> Result> { - match attr { - AttributeValue::Addr(addr) => Ok(Some(addr)), - AttributeValue::DebugAddrIndex(index) => self.address(unit, index).map(Some), - _ => Ok(None), - } - } - - /// Return the range list offset for the given raw offset. - /// - /// This handles adding `DW_AT_GNU_ranges_base` if required. - pub fn ranges_offset_from_raw( - &self, - unit: &Unit, - offset: RawRangeListsOffset, - ) -> RangeListsOffset { - if self.file_type == DwarfFileType::Dwo && unit.header.version() < 5 { - RangeListsOffset(offset.0.wrapping_add(unit.rnglists_base.0)) - } else { - RangeListsOffset(offset.0) - } - } - - /// Return the range list offset at the given index. - pub fn ranges_offset( - &self, - unit: &Unit, - index: DebugRngListsIndex, - ) -> Result> { - self.ranges - .get_offset(unit.encoding(), unit.rnglists_base, index) - } - - /// Iterate over the `RangeListEntry`s starting at the given offset. - pub fn ranges( - &self, - unit: &Unit, - offset: RangeListsOffset, - ) -> Result> { - self.ranges.ranges( - offset, - unit.encoding(), - unit.low_pc, - &self.debug_addr, - unit.addr_base, - ) - } - - /// Iterate over the `RawRngListEntry`ies starting at the given offset. - pub fn raw_ranges( - &self, - unit: &Unit, - offset: RangeListsOffset, - ) -> Result> { - self.ranges.raw_ranges(offset, unit.encoding()) - } - - /// Try to return an attribute value as a range list offset. - /// - /// If the attribute value is one of: - /// - /// - a `DW_FORM_sec_offset` reference to the `.debug_ranges` or `.debug_rnglists` sections - /// - a `DW_FORM_rnglistx` index into the `.debug_rnglists` entries for the unit - /// - /// then return the range list offset of the range list. - /// Returns `None` for other forms. - pub fn attr_ranges_offset( - &self, - unit: &Unit, - attr: AttributeValue, - ) -> Result>> { - match attr { - AttributeValue::RangeListsRef(offset) => { - Ok(Some(self.ranges_offset_from_raw(unit, offset))) - } - AttributeValue::DebugRngListsIndex(index) => self.ranges_offset(unit, index).map(Some), - _ => Ok(None), - } - } - - /// Try to return an attribute value as a range list entry iterator. - /// - /// If the attribute value is one of: - /// - /// - a `DW_FORM_sec_offset` reference to the `.debug_ranges` or `.debug_rnglists` sections - /// - a `DW_FORM_rnglistx` index into the `.debug_rnglists` entries for the unit - /// - /// then return an iterator over the entries in the range list. - /// Returns `None` for other forms. - pub fn attr_ranges( - &self, - unit: &Unit, - attr: AttributeValue, - ) -> Result>> { - match self.attr_ranges_offset(unit, attr)? { - Some(offset) => Ok(Some(self.ranges(unit, offset)?)), - None => Ok(None), - } - } - - /// Return an iterator for the address ranges of a `DebuggingInformationEntry`. - /// - /// This uses `DW_AT_low_pc`, `DW_AT_high_pc` and `DW_AT_ranges`. - pub fn die_ranges( - &self, - unit: &Unit, - entry: &DebuggingInformationEntry, - ) -> Result> { - let mut low_pc = None; - let mut high_pc = None; - let mut size = None; - let mut attrs = entry.attrs(); - while let Some(attr) = attrs.next()? { - match attr.name() { - constants::DW_AT_low_pc => { - low_pc = Some( - self.attr_address(unit, attr.value())? - .ok_or(Error::UnsupportedAttributeForm)?, - ); - } - constants::DW_AT_high_pc => match attr.value() { - AttributeValue::Udata(val) => size = Some(val), - attr => { - high_pc = Some( - self.attr_address(unit, attr)? - .ok_or(Error::UnsupportedAttributeForm)?, - ); - } - }, - constants::DW_AT_ranges => { - if let Some(list) = self.attr_ranges(unit, attr.value())? { - return Ok(RangeIter(RangeIterInner::List(list))); - } - } - _ => {} - } - } - let range = low_pc.and_then(|begin| { - let end = size.map(|size| begin + size).or(high_pc); - // TODO: perhaps return an error if `end` is `None` - end.map(|end| Range { begin, end }) - }); - Ok(RangeIter(RangeIterInner::Single(range))) - } - - /// Return an iterator for the address ranges of a `Unit`. - /// - /// This uses `DW_AT_low_pc`, `DW_AT_high_pc` and `DW_AT_ranges` of the - /// root `DebuggingInformationEntry`. - pub fn unit_ranges(&self, unit: &Unit) -> Result> { - let mut cursor = unit.header.entries(&unit.abbreviations); - cursor.next_dfs()?; - let root = cursor.current().ok_or(Error::MissingUnitDie)?; - self.die_ranges(unit, root) - } - - /// Return the location list offset at the given index. - pub fn locations_offset( - &self, - unit: &Unit, - index: DebugLocListsIndex, - ) -> Result> { - self.locations - .get_offset(unit.encoding(), unit.loclists_base, index) - } - - /// Iterate over the `LocationListEntry`s starting at the given offset. - pub fn locations( - &self, - unit: &Unit, - offset: LocationListsOffset, - ) -> Result> { - match self.file_type { - DwarfFileType::Main => self.locations.locations( - offset, - unit.encoding(), - unit.low_pc, - &self.debug_addr, - unit.addr_base, - ), - DwarfFileType::Dwo => self.locations.locations_dwo( - offset, - unit.encoding(), - unit.low_pc, - &self.debug_addr, - unit.addr_base, - ), - } - } - - /// Iterate over the raw `LocationListEntry`s starting at the given offset. - pub fn raw_locations( - &self, - unit: &Unit, - offset: LocationListsOffset, - ) -> Result> { - match self.file_type { - DwarfFileType::Main => self.locations.raw_locations(offset, unit.encoding()), - DwarfFileType::Dwo => self.locations.raw_locations_dwo(offset, unit.encoding()), - } - } - - /// Try to return an attribute value as a location list offset. - /// - /// If the attribute value is one of: - /// - /// - a `DW_FORM_sec_offset` reference to the `.debug_loc` or `.debug_loclists` sections - /// - a `DW_FORM_loclistx` index into the `.debug_loclists` entries for the unit - /// - /// then return the location list offset of the location list. - /// Returns `None` for other forms. - pub fn attr_locations_offset( - &self, - unit: &Unit, - attr: AttributeValue, - ) -> Result>> { - match attr { - AttributeValue::LocationListsRef(offset) => Ok(Some(offset)), - AttributeValue::DebugLocListsIndex(index) => { - self.locations_offset(unit, index).map(Some) - } - _ => Ok(None), - } - } - - /// Try to return an attribute value as a location list entry iterator. - /// - /// If the attribute value is one of: - /// - /// - a `DW_FORM_sec_offset` reference to the `.debug_loc` or `.debug_loclists` sections - /// - a `DW_FORM_loclistx` index into the `.debug_loclists` entries for the unit - /// - /// then return an iterator over the entries in the location list. - /// Returns `None` for other forms. - pub fn attr_locations( - &self, - unit: &Unit, - attr: AttributeValue, - ) -> Result>> { - match self.attr_locations_offset(unit, attr)? { - Some(offset) => Ok(Some(self.locations(unit, offset)?)), - None => Ok(None), - } - } - - /// Call `Reader::lookup_offset_id` for each section, and return the first match. - /// - /// The first element of the tuple is `true` for supplementary sections. - pub fn lookup_offset_id(&self, id: ReaderOffsetId) -> Option<(bool, SectionId, R::Offset)> { - None.or_else(|| self.debug_abbrev.lookup_offset_id(id)) - .or_else(|| self.debug_addr.lookup_offset_id(id)) - .or_else(|| self.debug_aranges.lookup_offset_id(id)) - .or_else(|| self.debug_info.lookup_offset_id(id)) - .or_else(|| self.debug_line.lookup_offset_id(id)) - .or_else(|| self.debug_line_str.lookup_offset_id(id)) - .or_else(|| self.debug_str.lookup_offset_id(id)) - .or_else(|| self.debug_str_offsets.lookup_offset_id(id)) - .or_else(|| self.debug_types.lookup_offset_id(id)) - .or_else(|| self.locations.lookup_offset_id(id)) - .or_else(|| self.ranges.lookup_offset_id(id)) - .map(|(id, offset)| (false, id, offset)) - .or_else(|| { - self.sup() - .and_then(|sup| sup.lookup_offset_id(id)) - .map(|(_, id, offset)| (true, id, offset)) - }) - } - - /// Returns a string representation of the given error. - /// - /// This uses information from the DWARF sections to provide more information in some cases. - pub fn format_error(&self, err: Error) -> String { - #[allow(clippy::single_match)] - match err { - Error::UnexpectedEof(id) => match self.lookup_offset_id(id) { - Some((sup, section, offset)) => { - return format!( - "{} at {}{}+0x{:x}", - err, - section.name(), - if sup { "(sup)" } else { "" }, - offset.into_u64(), - ); - } - None => {} - }, - _ => {} - } - err.description().into() - } -} - -impl Dwarf { - /// Assuming `self` was loaded from a .dwo, take the appropriate - /// sections from `parent` (which contains the skeleton unit for this - /// dwo) such as `.debug_addr` and merge them into this `Dwarf`. - pub fn make_dwo(&mut self, parent: &Dwarf) { - self.file_type = DwarfFileType::Dwo; - // These sections are always taken from the parent file and not the dwo. - self.debug_addr = parent.debug_addr.clone(); - // .debug_rnglists comes from the DWO, .debug_ranges comes from the - // parent file. - self.ranges - .set_debug_ranges(parent.ranges.debug_ranges().clone()); - self.sup = parent.sup.clone(); - } -} - -/// The sections from a `.dwp` file. -#[derive(Debug)] -pub struct DwarfPackage { - /// The compilation unit index in the `.debug_cu_index` section. - pub cu_index: UnitIndex, - - /// The type unit index in the `.debug_tu_index` section. - pub tu_index: UnitIndex, - - /// The `.debug_abbrev.dwo` section. - pub debug_abbrev: DebugAbbrev, - - /// The `.debug_info.dwo` section. - pub debug_info: DebugInfo, - - /// The `.debug_line.dwo` section. - pub debug_line: DebugLine, - - /// The `.debug_str.dwo` section. - pub debug_str: DebugStr, - - /// The `.debug_str_offsets.dwo` section. - pub debug_str_offsets: DebugStrOffsets, - - /// The `.debug_loc.dwo` section. - /// - /// Only present when using GNU split-dwarf extension to DWARF 4. - pub debug_loc: DebugLoc, - - /// The `.debug_loclists.dwo` section. - pub debug_loclists: DebugLocLists, - - /// The `.debug_rnglists.dwo` section. - pub debug_rnglists: DebugRngLists, - - /// The `.debug_types.dwo` section. - /// - /// Only present when using GNU split-dwarf extension to DWARF 4. - pub debug_types: DebugTypes, - - /// An empty section. - /// - /// Used when creating `Dwarf`. - pub empty: R, -} - -impl DwarfPackage { - /// Try to load the `.dwp` sections using the given loader function. - /// - /// `section` loads a DWARF section from the object file. - /// It should return an empty section if the section does not exist. - pub fn load(mut section: F, empty: R) -> core::result::Result - where - F: FnMut(SectionId) -> core::result::Result, - E: From, - { - Ok(DwarfPackage { - cu_index: DebugCuIndex::load(&mut section)?.index()?, - tu_index: DebugTuIndex::load(&mut section)?.index()?, - // Section types are inferred. - debug_abbrev: Section::load(&mut section)?, - debug_info: Section::load(&mut section)?, - debug_line: Section::load(&mut section)?, - debug_str: Section::load(&mut section)?, - debug_str_offsets: Section::load(&mut section)?, - debug_loc: Section::load(&mut section)?, - debug_loclists: Section::load(&mut section)?, - debug_rnglists: Section::load(&mut section)?, - debug_types: Section::load(&mut section)?, - empty, - }) - } - - /// Find the compilation unit with the given DWO identifier and return its section - /// contributions. - pub fn find_cu(&self, id: DwoId, parent: &Dwarf) -> Result>> { - let row = match self.cu_index.find(id.0) { - Some(row) => row, - None => return Ok(None), - }; - self.cu_sections(row, parent).map(Some) - } - - /// Find the type unit with the given type signature and return its section - /// contributions. - pub fn find_tu( - &self, - signature: DebugTypeSignature, - parent: &Dwarf, - ) -> Result>> { - let row = match self.tu_index.find(signature.0) { - Some(row) => row, - None => return Ok(None), - }; - self.tu_sections(row, parent).map(Some) - } - - /// Return the section contributions of the compilation unit at the given index. - /// - /// The index must be in the range `1..cu_index.unit_count`. - /// - /// This function should only be needed by low level parsers. - pub fn cu_sections(&self, index: u32, parent: &Dwarf) -> Result> { - self.sections(self.cu_index.sections(index)?, parent) - } - - /// Return the section contributions of the compilation unit at the given index. - /// - /// The index must be in the range `1..tu_index.unit_count`. - /// - /// This function should only be needed by low level parsers. - pub fn tu_sections(&self, index: u32, parent: &Dwarf) -> Result> { - self.sections(self.tu_index.sections(index)?, parent) - } - - /// Return the section contributions of a unit. - /// - /// This function should only be needed by low level parsers. - pub fn sections( - &self, - sections: UnitIndexSectionIterator, - parent: &Dwarf, - ) -> Result> { - let mut abbrev_offset = 0; - let mut abbrev_size = 0; - let mut info_offset = 0; - let mut info_size = 0; - let mut line_offset = 0; - let mut line_size = 0; - let mut loc_offset = 0; - let mut loc_size = 0; - let mut loclists_offset = 0; - let mut loclists_size = 0; - let mut str_offsets_offset = 0; - let mut str_offsets_size = 0; - let mut rnglists_offset = 0; - let mut rnglists_size = 0; - let mut types_offset = 0; - let mut types_size = 0; - for section in sections { - match section.section { - SectionId::DebugAbbrev => { - abbrev_offset = section.offset; - abbrev_size = section.size; - } - SectionId::DebugInfo => { - info_offset = section.offset; - info_size = section.size; - } - SectionId::DebugLine => { - line_offset = section.offset; - line_size = section.size; - } - SectionId::DebugLoc => { - loc_offset = section.offset; - loc_size = section.size; - } - SectionId::DebugLocLists => { - loclists_offset = section.offset; - loclists_size = section.size; - } - SectionId::DebugStrOffsets => { - str_offsets_offset = section.offset; - str_offsets_size = section.size; - } - SectionId::DebugRngLists => { - rnglists_offset = section.offset; - rnglists_size = section.size; - } - SectionId::DebugTypes => { - types_offset = section.offset; - types_size = section.size; - } - SectionId::DebugMacro | SectionId::DebugMacinfo => { - // These are valid but we can't parse these yet. - } - _ => return Err(Error::UnknownIndexSection), - } - } - - let debug_abbrev = self.debug_abbrev.dwp_range(abbrev_offset, abbrev_size)?; - let debug_info = self.debug_info.dwp_range(info_offset, info_size)?; - let debug_line = self.debug_line.dwp_range(line_offset, line_size)?; - let debug_loc = self.debug_loc.dwp_range(loc_offset, loc_size)?; - let debug_loclists = self - .debug_loclists - .dwp_range(loclists_offset, loclists_size)?; - let debug_str_offsets = self - .debug_str_offsets - .dwp_range(str_offsets_offset, str_offsets_size)?; - let debug_rnglists = self - .debug_rnglists - .dwp_range(rnglists_offset, rnglists_size)?; - let debug_types = self.debug_types.dwp_range(types_offset, types_size)?; - - let debug_str = self.debug_str.clone(); - - let debug_addr = parent.debug_addr.clone(); - let debug_ranges = parent.ranges.debug_ranges().clone(); - - let debug_aranges = self.empty.clone().into(); - let debug_line_str = self.empty.clone().into(); - - Ok(Dwarf { - debug_abbrev, - debug_addr, - debug_aranges, - debug_info, - debug_line, - debug_line_str, - debug_str, - debug_str_offsets, - debug_types, - locations: LocationLists::new(debug_loc, debug_loclists), - ranges: RangeLists::new(debug_ranges, debug_rnglists), - file_type: DwarfFileType::Dwo, - sup: parent.sup.clone(), - abbreviations_cache: AbbreviationsCache::new(), - }) - } -} - -/// All of the commonly used information for a unit in the `.debug_info` or `.debug_types` -/// sections. -#[derive(Debug)] -pub struct Unit::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - /// The header of the unit. - pub header: UnitHeader, - - /// The parsed abbreviations for the unit. - pub abbreviations: Arc, - - /// The `DW_AT_name` attribute of the unit. - pub name: Option, - - /// The `DW_AT_comp_dir` attribute of the unit. - pub comp_dir: Option, - - /// The `DW_AT_low_pc` attribute of the unit. Defaults to 0. - pub low_pc: u64, - - /// The `DW_AT_str_offsets_base` attribute of the unit. Defaults to 0. - pub str_offsets_base: DebugStrOffsetsBase, - - /// The `DW_AT_addr_base` attribute of the unit. Defaults to 0. - pub addr_base: DebugAddrBase, - - /// The `DW_AT_loclists_base` attribute of the unit. Defaults to 0. - pub loclists_base: DebugLocListsBase, - - /// The `DW_AT_rnglists_base` attribute of the unit. Defaults to 0. - pub rnglists_base: DebugRngListsBase, - - /// The line number program of the unit. - pub line_program: Option>, - - /// The DWO ID of a skeleton unit or split compilation unit. - pub dwo_id: Option, -} - -impl Unit { - /// Construct a new `Unit` from the given unit header. - #[inline] - pub fn new(dwarf: &Dwarf, header: UnitHeader) -> Result { - let abbreviations = dwarf.abbreviations(&header)?; - Self::new_with_abbreviations(dwarf, header, abbreviations) - } - - /// Construct a new `Unit` from the given unit header and abbreviations. - /// - /// The abbreviations for this call can be obtained using `dwarf.abbreviations(&header)`. - /// The caller may implement caching to reuse the `Abbreviations` across units with the - /// same `header.debug_abbrev_offset()` value. - #[inline] - pub fn new_with_abbreviations( - dwarf: &Dwarf, - header: UnitHeader, - abbreviations: Arc, - ) -> Result { - let mut unit = Unit { - abbreviations, - name: None, - comp_dir: None, - low_pc: 0, - str_offsets_base: DebugStrOffsetsBase::default_for_encoding_and_file( - header.encoding(), - dwarf.file_type, - ), - // NB: Because the .debug_addr section never lives in a .dwo, we can assume its base is always 0 or provided. - addr_base: DebugAddrBase(R::Offset::from_u8(0)), - loclists_base: DebugLocListsBase::default_for_encoding_and_file( - header.encoding(), - dwarf.file_type, - ), - rnglists_base: DebugRngListsBase::default_for_encoding_and_file( - header.encoding(), - dwarf.file_type, - ), - line_program: None, - dwo_id: match header.type_() { - UnitType::Skeleton(dwo_id) | UnitType::SplitCompilation(dwo_id) => Some(dwo_id), - _ => None, - }, - header, - }; - let mut name = None; - let mut comp_dir = None; - let mut line_program_offset = None; - let mut low_pc_attr = None; - - { - let mut cursor = unit.header.entries(&unit.abbreviations); - cursor.next_dfs()?; - let root = cursor.current().ok_or(Error::MissingUnitDie)?; - let mut attrs = root.attrs(); - while let Some(attr) = attrs.next()? { - match attr.name() { - constants::DW_AT_name => { - name = Some(attr.value()); - } - constants::DW_AT_comp_dir => { - comp_dir = Some(attr.value()); - } - constants::DW_AT_low_pc => { - low_pc_attr = Some(attr.value()); - } - constants::DW_AT_stmt_list => { - if let AttributeValue::DebugLineRef(offset) = attr.value() { - line_program_offset = Some(offset); - } - } - constants::DW_AT_str_offsets_base => { - if let AttributeValue::DebugStrOffsetsBase(base) = attr.value() { - unit.str_offsets_base = base; - } - } - constants::DW_AT_addr_base | constants::DW_AT_GNU_addr_base => { - if let AttributeValue::DebugAddrBase(base) = attr.value() { - unit.addr_base = base; - } - } - constants::DW_AT_loclists_base => { - if let AttributeValue::DebugLocListsBase(base) = attr.value() { - unit.loclists_base = base; - } - } - constants::DW_AT_rnglists_base | constants::DW_AT_GNU_ranges_base => { - if let AttributeValue::DebugRngListsBase(base) = attr.value() { - unit.rnglists_base = base; - } - } - constants::DW_AT_GNU_dwo_id => { - if unit.dwo_id.is_none() { - if let AttributeValue::DwoId(dwo_id) = attr.value() { - unit.dwo_id = Some(dwo_id); - } - } - } - _ => {} - } - } - } - - unit.name = match name { - Some(val) => dwarf.attr_string(&unit, val).ok(), - None => None, - }; - unit.comp_dir = match comp_dir { - Some(val) => dwarf.attr_string(&unit, val).ok(), - None => None, - }; - unit.line_program = match line_program_offset { - Some(offset) => Some(dwarf.debug_line.program( - offset, - unit.header.address_size(), - unit.comp_dir.clone(), - unit.name.clone(), - )?), - None => None, - }; - if let Some(low_pc_attr) = low_pc_attr { - if let Some(addr) = dwarf.attr_address(&unit, low_pc_attr)? { - unit.low_pc = addr; - } - } - Ok(unit) - } - - /// Return the encoding parameters for this unit. - #[inline] - pub fn encoding(&self) -> Encoding { - self.header.encoding() - } - - /// Read the `DebuggingInformationEntry` at the given offset. - pub fn entry(&self, offset: UnitOffset) -> Result> { - self.header.entry(&self.abbreviations, offset) - } - - /// Navigate this unit's `DebuggingInformationEntry`s. - #[inline] - pub fn entries(&self) -> EntriesCursor { - self.header.entries(&self.abbreviations) - } - - /// Navigate this unit's `DebuggingInformationEntry`s - /// starting at the given offset. - #[inline] - pub fn entries_at_offset(&self, offset: UnitOffset) -> Result> { - self.header.entries_at_offset(&self.abbreviations, offset) - } - - /// Navigate this unit's `DebuggingInformationEntry`s as a tree - /// starting at the given offset. - #[inline] - pub fn entries_tree(&self, offset: Option>) -> Result> { - self.header.entries_tree(&self.abbreviations, offset) - } - - /// Read the raw data that defines the Debugging Information Entries. - #[inline] - pub fn entries_raw(&self, offset: Option>) -> Result> { - self.header.entries_raw(&self.abbreviations, offset) - } - - /// Copy attributes that are subject to relocation from another unit. This is intended - /// to be used to copy attributes from a skeleton compilation unit to the corresponding - /// split compilation unit. - pub fn copy_relocated_attributes(&mut self, other: &Unit) { - self.low_pc = other.low_pc; - self.addr_base = other.addr_base; - if self.header.version() < 5 { - self.rnglists_base = other.rnglists_base; - } - } - - /// Find the dwo name (if any) for this unit, automatically handling the differences - /// between the standardized DWARF 5 split DWARF format and the pre-DWARF 5 GNU - /// extension. - /// - /// The returned value is relative to this unit's `comp_dir`. - pub fn dwo_name(&self) -> Result>> { - let mut entries = self.entries(); - if let None = entries.next_entry()? { - return Ok(None); - } - - let entry = entries.current().unwrap(); - if self.header.version() < 5 { - entry.attr_value(constants::DW_AT_GNU_dwo_name) - } else { - entry.attr_value(constants::DW_AT_dwo_name) - } - } -} - -impl UnitSectionOffset { - /// Convert an offset to be relative to the start of the given unit, - /// instead of relative to the start of the section. - /// Returns `None` if the offset is not within the unit entries. - pub fn to_unit_offset(&self, unit: &Unit) -> Option> - where - R: Reader, - { - let (offset, unit_offset) = match (self, unit.header.offset()) { - ( - UnitSectionOffset::DebugInfoOffset(offset), - UnitSectionOffset::DebugInfoOffset(unit_offset), - ) => (offset.0, unit_offset.0), - ( - UnitSectionOffset::DebugTypesOffset(offset), - UnitSectionOffset::DebugTypesOffset(unit_offset), - ) => (offset.0, unit_offset.0), - _ => return None, - }; - let offset = match offset.checked_sub(unit_offset) { - Some(offset) => UnitOffset(offset), - None => return None, - }; - if !unit.header.is_valid_offset(offset) { - return None; - } - Some(offset) - } -} - -impl UnitOffset { - /// Convert an offset to be relative to the start of the .debug_info section, - /// instead of relative to the start of the given compilation unit. - /// - /// Does not check that the offset is valid. - pub fn to_unit_section_offset(&self, unit: &Unit) -> UnitSectionOffset - where - R: Reader, - { - match unit.header.offset() { - UnitSectionOffset::DebugInfoOffset(unit_offset) => { - DebugInfoOffset(unit_offset.0 + self.0).into() - } - UnitSectionOffset::DebugTypesOffset(unit_offset) => { - DebugTypesOffset(unit_offset.0 + self.0).into() - } - } - } -} - -/// An iterator for the address ranges of a `DebuggingInformationEntry`. -/// -/// Returned by `Dwarf::die_ranges` and `Dwarf::unit_ranges`. -#[derive(Debug)] -pub struct RangeIter(RangeIterInner); - -#[derive(Debug)] -enum RangeIterInner { - Single(Option), - List(RngListIter), -} - -impl Default for RangeIter { - fn default() -> Self { - RangeIter(RangeIterInner::Single(None)) - } -} - -impl RangeIter { - /// Advance the iterator to the next range. - pub fn next(&mut self) -> Result> { - match self.0 { - RangeIterInner::Single(ref mut range) => Ok(range.take()), - RangeIterInner::List(ref mut list) => list.next(), - } - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for RangeIter { - type Item = Range; - type Error = Error; - - #[inline] - fn next(&mut self) -> ::core::result::Result, Self::Error> { - RangeIter::next(self) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::read::EndianSlice; - use crate::{Endianity, LittleEndian}; - - /// Ensure that `Dwarf` is covariant wrt R. - #[test] - fn test_dwarf_variance() { - /// This only needs to compile. - fn _f<'a: 'b, 'b, E: Endianity>(x: Dwarf>) -> Dwarf> { - x - } - } - - /// Ensure that `Unit` is covariant wrt R. - #[test] - fn test_dwarf_unit_variance() { - /// This only needs to compile. - fn _f<'a: 'b, 'b, E: Endianity>(x: Unit>) -> Unit> { - x - } - } - - #[test] - fn test_send() { - fn assert_is_send() {} - assert_is_send::>>(); - assert_is_send::>>(); - } - - #[test] - fn test_format_error() { - let mut owned_dwarf = Dwarf::load(|_| -> Result<_> { Ok(vec![1, 2]) }).unwrap(); - owned_dwarf - .load_sup(|_| -> Result<_> { Ok(vec![1, 2]) }) - .unwrap(); - let dwarf = owned_dwarf.borrow(|section| EndianSlice::new(§ion, LittleEndian)); - - match dwarf.debug_str.get_str(DebugStrOffset(1)) { - Ok(r) => panic!("Unexpected str {:?}", r), - Err(e) => { - assert_eq!( - dwarf.format_error(e), - "Hit the end of input before it was expected at .debug_str+0x1" - ); - } - } - match dwarf.sup().unwrap().debug_str.get_str(DebugStrOffset(1)) { - Ok(r) => panic!("Unexpected str {:?}", r), - Err(e) => { - assert_eq!( - dwarf.format_error(e), - "Hit the end of input before it was expected at .debug_str(sup)+0x1" - ); - } - } - assert_eq!(dwarf.format_error(Error::Io), Error::Io.description()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/endian_reader.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/endian_reader.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/endian_reader.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/endian_reader.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,639 +0,0 @@ -//! Defining custom `Reader`s quickly. - -use alloc::borrow::Cow; -use alloc::rc::Rc; -use alloc::string::String; -use alloc::sync::Arc; -use core::fmt::Debug; -use core::ops::{Deref, Index, Range, RangeFrom, RangeTo}; -use core::slice; -use core::str; -use stable_deref_trait::CloneStableDeref; - -use crate::endianity::Endianity; -use crate::read::{Error, Reader, ReaderOffsetId, Result}; - -/// A reference counted, non-thread-safe slice of bytes and associated -/// endianity. -/// -/// ``` -/// # #[cfg(feature = "std")] { -/// use std::rc::Rc; -/// -/// let buf = Rc::from(&[1, 2, 3, 4][..]); -/// let reader = gimli::EndianRcSlice::new(buf, gimli::NativeEndian); -/// # let _ = reader; -/// # } -/// ``` -pub type EndianRcSlice = EndianReader>; - -/// An atomically reference counted, thread-safe slice of bytes and associated -/// endianity. -/// -/// ``` -/// # #[cfg(feature = "std")] { -/// use std::sync::Arc; -/// -/// let buf = Arc::from(&[1, 2, 3, 4][..]); -/// let reader = gimli::EndianArcSlice::new(buf, gimli::NativeEndian); -/// # let _ = reader; -/// # } -/// ``` -pub type EndianArcSlice = EndianReader>; - -/// An easy way to define a custom `Reader` implementation with a reference to a -/// generic buffer of bytes and an associated endianity. -/// -/// Note that the whole original buffer is kept alive in memory even if there is -/// only one reader that references only a handful of bytes from that original -/// buffer. That is, `EndianReader` will not do any copying, moving, or -/// compacting in order to free up unused regions of the original buffer. If you -/// require this kind of behavior, it is up to you to implement `Reader` -/// directly by-hand. -/// -/// # Example -/// -/// Say you have an `mmap`ed file that you want to serve as a `gimli::Reader`. -/// You can wrap that `mmap`ed file up in a `MmapFile` type and use -/// `EndianReader>` or `EndianReader>` as readers as -/// long as `MmapFile` dereferences to the underlying `[u8]` data. -/// -/// ``` -/// use std::io; -/// use std::ops::Deref; -/// use std::path::Path; -/// use std::slice; -/// use std::sync::Arc; -/// -/// /// A type that represents an `mmap`ed file. -/// #[derive(Debug)] -/// pub struct MmapFile { -/// ptr: *const u8, -/// len: usize, -/// } -/// -/// impl MmapFile { -/// pub fn new(path: &Path) -> io::Result { -/// // Call `mmap` and check for errors and all that... -/// # unimplemented!() -/// } -/// } -/// -/// impl Drop for MmapFile { -/// fn drop(&mut self) { -/// // Call `munmap` to clean up after ourselves... -/// # unimplemented!() -/// } -/// } -/// -/// // And `MmapFile` can deref to a slice of the `mmap`ed region of memory. -/// impl Deref for MmapFile { -/// type Target = [u8]; -/// fn deref(&self) -> &[u8] { -/// unsafe { -/// slice::from_raw_parts(self.ptr, self.len) -/// } -/// } -/// } -/// -/// /// A type that represents a shared `mmap`ed file. -/// #[derive(Debug, Clone)] -/// pub struct ArcMmapFile(Arc); -/// -/// // And `ArcMmapFile` can deref to a slice of the `mmap`ed region of memory. -/// impl Deref for ArcMmapFile { -/// type Target = [u8]; -/// fn deref(&self) -> &[u8] { -/// &self.0 -/// } -/// } -/// -/// // These are both valid for any `Rc` or `Arc`. -/// unsafe impl gimli::StableDeref for ArcMmapFile {} -/// unsafe impl gimli::CloneStableDeref for ArcMmapFile {} -/// -/// /// A `gimli::Reader` that is backed by an `mmap`ed file! -/// pub type MmapFileReader = gimli::EndianReader; -/// # fn test(_: &MmapFileReader) { } -/// ``` -#[derive(Debug, Clone, Copy, Hash)] -pub struct EndianReader -where - Endian: Endianity, - T: CloneStableDeref + Debug, -{ - range: SubRange, - endian: Endian, -} - -impl PartialEq> for EndianReader -where - Endian: Endianity, - T1: CloneStableDeref + Debug, - T2: CloneStableDeref + Debug, -{ - fn eq(&self, rhs: &EndianReader) -> bool { - self.bytes() == rhs.bytes() - } -} - -impl Eq for EndianReader -where - Endian: Endianity, - T: CloneStableDeref + Debug, -{ -} - -// This is separated out from `EndianReader` so that we can avoid running afoul -// of borrowck. We need to `read_slice(&mut self, ...) -> &[u8]` and then call -// `self.endian.read_whatever` on the result. The problem is that the returned -// slice keeps the `&mut self` borrow active, so we wouldn't be able to access -// `self.endian`. Splitting the sub-range out from the endian lets us work -// around this, making it so that only the `self.range` borrow is held active, -// not all of `self`. -// -// This also serves to encapsulate the unsafe code concerning `CloneStableDeref`. -// The `bytes` member is held so that the bytes live long enough, and the -// `CloneStableDeref` ensures these bytes never move. The `ptr` and `len` -// members point inside `bytes`, and are updated during read operations. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -struct SubRange -where - T: CloneStableDeref + Debug, -{ - bytes: T, - ptr: *const u8, - len: usize, -} - -unsafe impl Send for SubRange where T: CloneStableDeref + Debug + Send {} - -unsafe impl Sync for SubRange where T: CloneStableDeref + Debug + Sync {} - -impl SubRange -where - T: CloneStableDeref + Debug, -{ - #[inline] - fn new(bytes: T) -> Self { - let ptr = bytes.as_ptr(); - let len = bytes.len(); - SubRange { bytes, ptr, len } - } - - #[inline] - fn bytes(&self) -> &[u8] { - // Safe because `T` implements `CloneStableDeref`, `bytes` can't be modified, - // and all operations that modify `ptr` and `len` ensure they stay in range. - unsafe { slice::from_raw_parts(self.ptr, self.len) } - } - - #[inline] - fn len(&self) -> usize { - self.len - } - - #[inline] - fn truncate(&mut self, len: usize) { - assert!(len <= self.len); - self.len = len; - } - - #[inline] - fn skip(&mut self, len: usize) { - assert!(len <= self.len); - self.ptr = unsafe { self.ptr.add(len) }; - self.len -= len; - } - - #[inline] - fn read_slice(&mut self, len: usize) -> Option<&[u8]> { - if self.len() < len { - None - } else { - // Same as for `bytes()`. - let bytes = unsafe { slice::from_raw_parts(self.ptr, len) }; - self.skip(len); - Some(bytes) - } - } -} - -impl EndianReader -where - Endian: Endianity, - T: CloneStableDeref + Debug, -{ - /// Construct a new `EndianReader` with the given bytes. - #[inline] - pub fn new(bytes: T, endian: Endian) -> EndianReader { - EndianReader { - range: SubRange::new(bytes), - endian, - } - } - - /// Return a reference to the raw bytes underlying this reader. - #[inline] - pub fn bytes(&self) -> &[u8] { - self.range.bytes() - } -} - -/// # Range Methods -/// -/// Unfortunately, `std::ops::Index` *must* return a reference, so we can't -/// implement `Index>` to return a new `EndianReader` the way we -/// would like to. Instead, we abandon fancy indexing operators and have these -/// plain old methods. -impl EndianReader -where - Endian: Endianity, - T: CloneStableDeref + Debug, -{ - /// Take the given `start..end` range of the underlying buffer and return a - /// new `EndianReader`. - /// - /// ``` - /// # #[cfg(feature = "std")] { - /// use gimli::{EndianReader, LittleEndian}; - /// use std::sync::Arc; - /// - /// let buf = Arc::<[u8]>::from(&[0x01, 0x02, 0x03, 0x04][..]); - /// let reader = EndianReader::new(buf.clone(), LittleEndian); - /// assert_eq!(reader.range(1..3), - /// EndianReader::new(&buf[1..3], LittleEndian)); - /// # } - /// ``` - /// - /// # Panics - /// - /// Panics if the range is out of bounds. - pub fn range(&self, idx: Range) -> EndianReader { - let mut r = self.clone(); - r.range.skip(idx.start); - r.range.truncate(idx.len()); - r - } - - /// Take the given `start..` range of the underlying buffer and return a new - /// `EndianReader`. - /// - /// ``` - /// # #[cfg(feature = "std")] { - /// use gimli::{EndianReader, LittleEndian}; - /// use std::sync::Arc; - /// - /// let buf = Arc::<[u8]>::from(&[0x01, 0x02, 0x03, 0x04][..]); - /// let reader = EndianReader::new(buf.clone(), LittleEndian); - /// assert_eq!(reader.range_from(2..), - /// EndianReader::new(&buf[2..], LittleEndian)); - /// # } - /// ``` - /// - /// # Panics - /// - /// Panics if the range is out of bounds. - pub fn range_from(&self, idx: RangeFrom) -> EndianReader { - let mut r = self.clone(); - r.range.skip(idx.start); - r - } - - /// Take the given `..end` range of the underlying buffer and return a new - /// `EndianReader`. - /// - /// ``` - /// # #[cfg(feature = "std")] { - /// use gimli::{EndianReader, LittleEndian}; - /// use std::sync::Arc; - /// - /// let buf = Arc::<[u8]>::from(&[0x01, 0x02, 0x03, 0x04][..]); - /// let reader = EndianReader::new(buf.clone(), LittleEndian); - /// assert_eq!(reader.range_to(..3), - /// EndianReader::new(&buf[..3], LittleEndian)); - /// # } - /// ``` - /// - /// # Panics - /// - /// Panics if the range is out of bounds. - pub fn range_to(&self, idx: RangeTo) -> EndianReader { - let mut r = self.clone(); - r.range.truncate(idx.end); - r - } -} - -impl Index for EndianReader -where - Endian: Endianity, - T: CloneStableDeref + Debug, -{ - type Output = u8; - fn index(&self, idx: usize) -> &Self::Output { - &self.bytes()[idx] - } -} - -impl Index> for EndianReader -where - Endian: Endianity, - T: CloneStableDeref + Debug, -{ - type Output = [u8]; - fn index(&self, idx: RangeFrom) -> &Self::Output { - &self.bytes()[idx] - } -} - -impl Deref for EndianReader -where - Endian: Endianity, - T: CloneStableDeref + Debug, -{ - type Target = [u8]; - fn deref(&self) -> &Self::Target { - self.bytes() - } -} - -impl Reader for EndianReader -where - Endian: Endianity, - T: CloneStableDeref + Debug, -{ - type Endian = Endian; - type Offset = usize; - - #[inline] - fn endian(&self) -> Endian { - self.endian - } - - #[inline] - fn len(&self) -> usize { - self.range.len() - } - - #[inline] - fn empty(&mut self) { - self.range.truncate(0); - } - - #[inline] - fn truncate(&mut self, len: usize) -> Result<()> { - if self.len() < len { - Err(Error::UnexpectedEof(self.offset_id())) - } else { - self.range.truncate(len); - Ok(()) - } - } - - #[inline] - fn offset_from(&self, base: &EndianReader) -> usize { - let base_ptr = base.bytes().as_ptr() as *const u8 as usize; - let ptr = self.bytes().as_ptr() as *const u8 as usize; - debug_assert!(base_ptr <= ptr); - debug_assert!(ptr + self.bytes().len() <= base_ptr + base.bytes().len()); - ptr - base_ptr - } - - #[inline] - fn offset_id(&self) -> ReaderOffsetId { - ReaderOffsetId(self.bytes().as_ptr() as u64) - } - - #[inline] - fn lookup_offset_id(&self, id: ReaderOffsetId) -> Option { - let id = id.0; - let self_id = self.bytes().as_ptr() as u64; - let self_len = self.bytes().len() as u64; - if id >= self_id && id <= self_id + self_len { - Some((id - self_id) as usize) - } else { - None - } - } - - #[inline] - fn find(&self, byte: u8) -> Result { - self.bytes() - .iter() - .position(|x| *x == byte) - .ok_or_else(|| Error::UnexpectedEof(self.offset_id())) - } - - #[inline] - fn skip(&mut self, len: usize) -> Result<()> { - if self.len() < len { - Err(Error::UnexpectedEof(self.offset_id())) - } else { - self.range.skip(len); - Ok(()) - } - } - - #[inline] - fn split(&mut self, len: usize) -> Result { - if self.len() < len { - Err(Error::UnexpectedEof(self.offset_id())) - } else { - let mut r = self.clone(); - r.range.truncate(len); - self.range.skip(len); - Ok(r) - } - } - - #[inline] - fn to_slice(&self) -> Result> { - Ok(self.bytes().into()) - } - - #[inline] - fn to_string(&self) -> Result> { - match str::from_utf8(self.bytes()) { - Ok(s) => Ok(s.into()), - _ => Err(Error::BadUtf8), - } - } - - #[inline] - fn to_string_lossy(&self) -> Result> { - Ok(String::from_utf8_lossy(self.bytes())) - } - - #[inline] - fn read_slice(&mut self, buf: &mut [u8]) -> Result<()> { - match self.range.read_slice(buf.len()) { - Some(slice) => { - buf.copy_from_slice(slice); - Ok(()) - } - None => Err(Error::UnexpectedEof(self.offset_id())), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::endianity::NativeEndian; - use crate::read::Reader; - - fn native_reader + Debug>( - bytes: T, - ) -> EndianReader { - EndianReader::new(bytes, NativeEndian) - } - - const BUF: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]; - - #[test] - fn test_reader_split() { - let mut reader = native_reader(BUF); - let left = reader.split(3).unwrap(); - assert_eq!(left, native_reader(&BUF[..3])); - assert_eq!(reader, native_reader(&BUF[3..])); - } - - #[test] - fn test_reader_split_out_of_bounds() { - let mut reader = native_reader(BUF); - assert!(reader.split(30).is_err()); - } - - #[test] - fn bytes_and_len_and_range_and_eq() { - let reader = native_reader(BUF); - assert_eq!(reader.len(), BUF.len()); - assert_eq!(reader.bytes(), BUF); - assert_eq!(reader, native_reader(BUF)); - - let range = reader.range(2..8); - let buf_range = &BUF[2..8]; - assert_eq!(range.len(), buf_range.len()); - assert_eq!(range.bytes(), buf_range); - assert_ne!(range, native_reader(BUF)); - assert_eq!(range, native_reader(buf_range)); - - let range_from = range.range_from(1..); - let buf_range_from = &buf_range[1..]; - assert_eq!(range_from.len(), buf_range_from.len()); - assert_eq!(range_from.bytes(), buf_range_from); - assert_ne!(range_from, native_reader(BUF)); - assert_eq!(range_from, native_reader(buf_range_from)); - - let range_to = range_from.range_to(..4); - let buf_range_to = &buf_range_from[..4]; - assert_eq!(range_to.len(), buf_range_to.len()); - assert_eq!(range_to.bytes(), buf_range_to); - assert_ne!(range_to, native_reader(BUF)); - assert_eq!(range_to, native_reader(buf_range_to)); - } - - #[test] - fn find() { - let mut reader = native_reader(BUF); - reader.skip(2).unwrap(); - assert_eq!( - reader.find(5), - Ok(BUF[2..].iter().position(|x| *x == 5).unwrap()) - ); - } - - #[test] - fn indexing() { - let mut reader = native_reader(BUF); - reader.skip(2).unwrap(); - assert_eq!(reader[0], BUF[2]); - } - - #[test] - #[should_panic] - fn indexing_out_of_bounds() { - let mut reader = native_reader(BUF); - reader.skip(2).unwrap(); - let _ = reader[900]; - } - - #[test] - fn endian() { - let reader = native_reader(BUF); - assert_eq!(reader.endian(), NativeEndian); - } - - #[test] - fn empty() { - let mut reader = native_reader(BUF); - assert!(!reader.is_empty()); - reader.empty(); - assert!(reader.is_empty()); - assert!(reader.bytes().is_empty()); - } - - #[test] - fn truncate() { - let reader = native_reader(BUF); - let mut reader = reader.range(2..8); - reader.truncate(2).unwrap(); - assert_eq!(reader.bytes(), &BUF[2..4]); - } - - #[test] - fn offset_from() { - let reader = native_reader(BUF); - let sub = reader.range(2..8); - assert_eq!(sub.offset_from(&reader), 2); - } - - #[test] - fn skip() { - let mut reader = native_reader(BUF); - reader.skip(2).unwrap(); - assert_eq!(reader.bytes(), &BUF[2..]); - } - - #[test] - fn to_slice() { - assert_eq!( - native_reader(BUF).range(2..5).to_slice(), - Ok(Cow::from(&BUF[2..5])) - ); - } - - #[test] - fn to_string_ok() { - let buf = b"hello, world!"; - let reader = native_reader(&buf[..]); - let reader = reader.range_from(7..); - assert_eq!(reader.to_string(), Ok(Cow::from("world!"))); - } - - // The rocket emoji (🚀 = [0xf0, 0x9f, 0x9a, 0x80]) but rotated left by one - // to make it invalid UTF-8. - const BAD_UTF8: &[u8] = &[0x9f, 0x9a, 0x80, 0xf0]; - - #[test] - fn to_string_err() { - let reader = native_reader(BAD_UTF8); - assert!(reader.to_string().is_err()); - } - - #[test] - fn to_string_lossy() { - let reader = native_reader(BAD_UTF8); - assert_eq!(reader.to_string_lossy(), Ok(Cow::from("����"))); - } - - #[test] - fn read_u8_array() { - let mut reader = native_reader(BAD_UTF8); - reader.skip(1).unwrap(); - let arr: [u8; 2] = reader.read_u8_array().unwrap(); - assert_eq!(arr, &BAD_UTF8[1..3]); - assert_eq!(reader.bytes(), &BAD_UTF8[3..]); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/endian_slice.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/endian_slice.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/endian_slice.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/endian_slice.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,360 +0,0 @@ -//! Working with byte slices that have an associated endianity. - -#[cfg(feature = "read")] -use alloc::borrow::Cow; -#[cfg(feature = "read")] -use alloc::string::String; -use core::fmt; -use core::ops::{Deref, Range, RangeFrom, RangeTo}; -use core::str; - -use crate::endianity::Endianity; -use crate::read::{Error, Reader, ReaderOffsetId, Result}; - -/// A `&[u8]` slice with endianity metadata. -/// -/// This implements the `Reader` trait, which is used for all reading of DWARF sections. -#[derive(Default, Clone, Copy, PartialEq, Eq, Hash)] -pub struct EndianSlice<'input, Endian> -where - Endian: Endianity, -{ - slice: &'input [u8], - endian: Endian, -} - -impl<'input, Endian> EndianSlice<'input, Endian> -where - Endian: Endianity, -{ - /// Construct a new `EndianSlice` with the given slice and endianity. - #[inline] - pub fn new(slice: &'input [u8], endian: Endian) -> EndianSlice<'input, Endian> { - EndianSlice { slice, endian } - } - - /// Return a reference to the raw slice. - #[inline] - #[doc(hidden)] - #[deprecated(note = "Method renamed to EndianSlice::slice; use that instead.")] - pub fn buf(&self) -> &'input [u8] { - self.slice - } - - /// Return a reference to the raw slice. - #[inline] - pub fn slice(&self) -> &'input [u8] { - self.slice - } - - /// Split the slice in two at the given index, resulting in the tuple where - /// the first item has range [0, idx), and the second has range [idx, - /// len). Panics if the index is out of bounds. - #[inline] - pub fn split_at( - &self, - idx: usize, - ) -> (EndianSlice<'input, Endian>, EndianSlice<'input, Endian>) { - (self.range_to(..idx), self.range_from(idx..)) - } - - /// Find the first occurrence of a byte in the slice, and return its index. - #[inline] - pub fn find(&self, byte: u8) -> Option { - self.slice.iter().position(|ch| *ch == byte) - } - - /// Return the offset of the start of the slice relative to the start - /// of the given slice. - #[inline] - pub fn offset_from(&self, base: EndianSlice<'input, Endian>) -> usize { - let base_ptr = base.slice.as_ptr() as *const u8 as usize; - let ptr = self.slice.as_ptr() as *const u8 as usize; - debug_assert!(base_ptr <= ptr); - debug_assert!(ptr + self.slice.len() <= base_ptr + base.slice.len()); - ptr - base_ptr - } - - /// Converts the slice to a string using `str::from_utf8`. - /// - /// Returns an error if the slice contains invalid characters. - #[inline] - pub fn to_string(&self) -> Result<&'input str> { - str::from_utf8(self.slice).map_err(|_| Error::BadUtf8) - } - - /// Converts the slice to a string, including invalid characters, - /// using `String::from_utf8_lossy`. - #[cfg(feature = "read")] - #[inline] - pub fn to_string_lossy(&self) -> Cow<'input, str> { - String::from_utf8_lossy(self.slice) - } - - #[inline] - fn read_slice(&mut self, len: usize) -> Result<&'input [u8]> { - if self.slice.len() < len { - Err(Error::UnexpectedEof(self.offset_id())) - } else { - let val = &self.slice[..len]; - self.slice = &self.slice[len..]; - Ok(val) - } - } -} - -/// # Range Methods -/// -/// Unfortunately, `std::ops::Index` *must* return a reference, so we can't -/// implement `Index>` to return a new `EndianSlice` the way we would -/// like to. Instead, we abandon fancy indexing operators and have these plain -/// old methods. -impl<'input, Endian> EndianSlice<'input, Endian> -where - Endian: Endianity, -{ - /// Take the given `start..end` range of the underlying slice and return a - /// new `EndianSlice`. - /// - /// ``` - /// use gimli::{EndianSlice, LittleEndian}; - /// - /// let slice = &[0x01, 0x02, 0x03, 0x04]; - /// let endian_slice = EndianSlice::new(slice, LittleEndian); - /// assert_eq!(endian_slice.range(1..3), - /// EndianSlice::new(&slice[1..3], LittleEndian)); - /// ``` - pub fn range(&self, idx: Range) -> EndianSlice<'input, Endian> { - EndianSlice { - slice: &self.slice[idx], - endian: self.endian, - } - } - - /// Take the given `start..` range of the underlying slice and return a new - /// `EndianSlice`. - /// - /// ``` - /// use gimli::{EndianSlice, LittleEndian}; - /// - /// let slice = &[0x01, 0x02, 0x03, 0x04]; - /// let endian_slice = EndianSlice::new(slice, LittleEndian); - /// assert_eq!(endian_slice.range_from(2..), - /// EndianSlice::new(&slice[2..], LittleEndian)); - /// ``` - pub fn range_from(&self, idx: RangeFrom) -> EndianSlice<'input, Endian> { - EndianSlice { - slice: &self.slice[idx], - endian: self.endian, - } - } - - /// Take the given `..end` range of the underlying slice and return a new - /// `EndianSlice`. - /// - /// ``` - /// use gimli::{EndianSlice, LittleEndian}; - /// - /// let slice = &[0x01, 0x02, 0x03, 0x04]; - /// let endian_slice = EndianSlice::new(slice, LittleEndian); - /// assert_eq!(endian_slice.range_to(..3), - /// EndianSlice::new(&slice[..3], LittleEndian)); - /// ``` - pub fn range_to(&self, idx: RangeTo) -> EndianSlice<'input, Endian> { - EndianSlice { - slice: &self.slice[idx], - endian: self.endian, - } - } -} - -impl<'input, Endian> Deref for EndianSlice<'input, Endian> -where - Endian: Endianity, -{ - type Target = [u8]; - fn deref(&self) -> &Self::Target { - self.slice - } -} - -impl<'input, Endian: Endianity> fmt::Debug for EndianSlice<'input, Endian> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> core::result::Result<(), fmt::Error> { - fmt.debug_tuple("EndianSlice") - .field(&self.endian) - .field(&DebugBytes(self.slice)) - .finish() - } -} - -struct DebugBytes<'input>(&'input [u8]); - -impl<'input> core::fmt::Debug for DebugBytes<'input> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> core::result::Result<(), fmt::Error> { - let mut list = fmt.debug_list(); - list.entries(self.0.iter().take(8).copied().map(DebugByte)); - if self.0.len() > 8 { - list.entry(&DebugLen(self.0.len())); - } - list.finish() - } -} - -struct DebugByte(u8); - -impl fmt::Debug for DebugByte { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "0x{:02x}", self.0) - } -} - -struct DebugLen(usize); - -impl fmt::Debug for DebugLen { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "...; {}", self.0) - } -} - -impl<'input, Endian> Reader for EndianSlice<'input, Endian> -where - Endian: Endianity, -{ - type Endian = Endian; - type Offset = usize; - - #[inline] - fn endian(&self) -> Endian { - self.endian - } - - #[inline] - fn len(&self) -> usize { - self.slice.len() - } - - #[inline] - fn is_empty(&self) -> bool { - self.slice.is_empty() - } - - #[inline] - fn empty(&mut self) { - self.slice = &[]; - } - - #[inline] - fn truncate(&mut self, len: usize) -> Result<()> { - if self.slice.len() < len { - Err(Error::UnexpectedEof(self.offset_id())) - } else { - self.slice = &self.slice[..len]; - Ok(()) - } - } - - #[inline] - fn offset_from(&self, base: &Self) -> usize { - self.offset_from(*base) - } - - #[inline] - fn offset_id(&self) -> ReaderOffsetId { - ReaderOffsetId(self.slice.as_ptr() as u64) - } - - #[inline] - fn lookup_offset_id(&self, id: ReaderOffsetId) -> Option { - let id = id.0; - let self_id = self.slice.as_ptr() as u64; - let self_len = self.slice.len() as u64; - if id >= self_id && id <= self_id + self_len { - Some((id - self_id) as usize) - } else { - None - } - } - - #[inline] - fn find(&self, byte: u8) -> Result { - self.find(byte) - .ok_or_else(|| Error::UnexpectedEof(self.offset_id())) - } - - #[inline] - fn skip(&mut self, len: usize) -> Result<()> { - if self.slice.len() < len { - Err(Error::UnexpectedEof(self.offset_id())) - } else { - self.slice = &self.slice[len..]; - Ok(()) - } - } - - #[inline] - fn split(&mut self, len: usize) -> Result { - let slice = self.read_slice(len)?; - Ok(EndianSlice::new(slice, self.endian)) - } - - #[cfg(not(feature = "read"))] - fn cannot_implement() -> super::reader::seal_if_no_alloc::Sealed { - super::reader::seal_if_no_alloc::Sealed - } - - #[cfg(feature = "read")] - #[inline] - fn to_slice(&self) -> Result> { - Ok(self.slice.into()) - } - - #[cfg(feature = "read")] - #[inline] - fn to_string(&self) -> Result> { - match str::from_utf8(self.slice) { - Ok(s) => Ok(s.into()), - _ => Err(Error::BadUtf8), - } - } - - #[cfg(feature = "read")] - #[inline] - fn to_string_lossy(&self) -> Result> { - Ok(String::from_utf8_lossy(self.slice)) - } - - #[inline] - fn read_slice(&mut self, buf: &mut [u8]) -> Result<()> { - let slice = self.read_slice(buf.len())?; - buf.copy_from_slice(slice); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::endianity::NativeEndian; - - #[test] - fn test_endian_slice_split_at() { - let endian = NativeEndian; - let slice = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]; - let eb = EndianSlice::new(slice, endian); - assert_eq!( - eb.split_at(3), - ( - EndianSlice::new(&slice[..3], endian), - EndianSlice::new(&slice[3..], endian) - ) - ); - } - - #[test] - #[should_panic] - fn test_endian_slice_split_at_out_of_bounds() { - let slice = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]; - let eb = EndianSlice::new(slice, NativeEndian); - eb.split_at(30); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/index.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/index.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/index.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/index.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,535 +0,0 @@ -use core::slice; - -use crate::common::SectionId; -use crate::constants; -use crate::endianity::Endianity; -use crate::read::{EndianSlice, Error, Reader, ReaderOffset, Result, Section}; - -/// The data in the `.debug_cu_index` section of a `.dwp` file. -/// -/// This section contains the compilation unit index. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugCuIndex { - section: R, -} - -impl<'input, Endian> DebugCuIndex> -where - Endian: Endianity, -{ - /// Construct a new `DebugCuIndex` instance from the data in the `.debug_cu_index` - /// section. - pub fn new(section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(section, endian)) - } -} - -impl Section for DebugCuIndex { - fn id() -> SectionId { - SectionId::DebugCuIndex - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for DebugCuIndex { - fn from(section: R) -> Self { - DebugCuIndex { section } - } -} - -impl DebugCuIndex { - /// Parse the index header. - pub fn index(self) -> Result> { - UnitIndex::parse(self.section) - } -} - -/// The data in the `.debug_tu_index` section of a `.dwp` file. -/// -/// This section contains the type unit index. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugTuIndex { - section: R, -} - -impl<'input, Endian> DebugTuIndex> -where - Endian: Endianity, -{ - /// Construct a new `DebugTuIndex` instance from the data in the `.debug_tu_index` - /// section. - pub fn new(section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(section, endian)) - } -} - -impl Section for DebugTuIndex { - fn id() -> SectionId { - SectionId::DebugTuIndex - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for DebugTuIndex { - fn from(section: R) -> Self { - DebugTuIndex { section } - } -} - -impl DebugTuIndex { - /// Parse the index header. - pub fn index(self) -> Result> { - UnitIndex::parse(self.section) - } -} - -const SECTION_COUNT_MAX: u8 = 8; - -/// The partially parsed index from a `DebugCuIndex` or `DebugTuIndex`. -#[derive(Debug, Clone)] -pub struct UnitIndex { - version: u16, - section_count: u32, - unit_count: u32, - slot_count: u32, - hash_ids: R, - hash_rows: R, - // Only `section_count` values are valid. - sections: [SectionId; SECTION_COUNT_MAX as usize], - offsets: R, - sizes: R, -} - -impl UnitIndex { - fn parse(mut input: R) -> Result> { - if input.is_empty() { - return Ok(UnitIndex { - version: 5, - section_count: 0, - unit_count: 0, - slot_count: 0, - hash_ids: input.clone(), - hash_rows: input.clone(), - sections: [SectionId::DebugAbbrev; SECTION_COUNT_MAX as usize], - offsets: input.clone(), - sizes: input.clone(), - }); - } - - // GNU split-dwarf extension to DWARF 4 uses a 32-bit version, - // but DWARF 5 uses a 16-bit version followed by 16-bit padding. - let mut original_input = input.clone(); - let version; - if input.read_u32()? == 2 { - version = 2 - } else { - version = original_input.read_u16()?; - if version != 5 { - return Err(Error::UnknownVersion(version.into())); - } - } - - let section_count = input.read_u32()?; - let unit_count = input.read_u32()?; - let slot_count = input.read_u32()?; - if slot_count == 0 || slot_count & (slot_count - 1) != 0 || slot_count <= unit_count { - return Err(Error::InvalidIndexSlotCount); - } - - let hash_ids = input.split(R::Offset::from_u64(u64::from(slot_count) * 8)?)?; - let hash_rows = input.split(R::Offset::from_u64(u64::from(slot_count) * 4)?)?; - - let mut sections = [SectionId::DebugAbbrev; SECTION_COUNT_MAX as usize]; - if section_count > SECTION_COUNT_MAX.into() { - return Err(Error::InvalidIndexSectionCount); - } - for i in 0..section_count { - let section = input.read_u32()?; - sections[i as usize] = if version == 2 { - match constants::DwSectV2(section) { - constants::DW_SECT_V2_INFO => SectionId::DebugInfo, - constants::DW_SECT_V2_TYPES => SectionId::DebugTypes, - constants::DW_SECT_V2_ABBREV => SectionId::DebugAbbrev, - constants::DW_SECT_V2_LINE => SectionId::DebugLine, - constants::DW_SECT_V2_LOC => SectionId::DebugLoc, - constants::DW_SECT_V2_STR_OFFSETS => SectionId::DebugStrOffsets, - constants::DW_SECT_V2_MACINFO => SectionId::DebugMacinfo, - constants::DW_SECT_V2_MACRO => SectionId::DebugMacro, - _ => return Err(Error::UnknownIndexSection), - } - } else { - match constants::DwSect(section) { - constants::DW_SECT_INFO => SectionId::DebugInfo, - constants::DW_SECT_ABBREV => SectionId::DebugAbbrev, - constants::DW_SECT_LINE => SectionId::DebugLine, - constants::DW_SECT_LOCLISTS => SectionId::DebugLocLists, - constants::DW_SECT_STR_OFFSETS => SectionId::DebugStrOffsets, - constants::DW_SECT_MACRO => SectionId::DebugMacro, - constants::DW_SECT_RNGLISTS => SectionId::DebugRngLists, - _ => return Err(Error::UnknownIndexSection), - } - }; - } - - let offsets = input.split(R::Offset::from_u64( - u64::from(unit_count) * u64::from(section_count) * 4, - )?)?; - let sizes = input.split(R::Offset::from_u64( - u64::from(unit_count) * u64::from(section_count) * 4, - )?)?; - - Ok(UnitIndex { - version, - section_count, - unit_count, - slot_count, - hash_ids, - hash_rows, - sections, - offsets, - sizes, - }) - } - - /// Find `id` in the index hash table, and return the row index. - /// - /// `id` may be a compilation unit ID if this index is from `.debug_cu_index`, - /// or a type signature if this index is from `.debug_tu_index`. - pub fn find(&self, id: u64) -> Option { - if self.slot_count == 0 { - return None; - } - let mask = u64::from(self.slot_count - 1); - let mut hash1 = id & mask; - let hash2 = ((id >> 32) & mask) | 1; - for _ in 0..self.slot_count { - // The length of these arrays was validated in `UnitIndex::parse`. - let mut hash_ids = self.hash_ids.clone(); - hash_ids.skip(R::Offset::from_u64(hash1 * 8).ok()?).ok()?; - let hash_id = hash_ids.read_u64().ok()?; - if hash_id == id { - let mut hash_rows = self.hash_rows.clone(); - hash_rows.skip(R::Offset::from_u64(hash1 * 4).ok()?).ok()?; - let hash_row = hash_rows.read_u32().ok()?; - return Some(hash_row); - } - if hash_id == 0 { - return None; - } - hash1 = (hash1 + hash2) & mask; - } - None - } - - /// Return the section offsets and sizes for the given row index. - pub fn sections(&self, mut row: u32) -> Result> { - if row == 0 { - return Err(Error::InvalidIndexRow); - } - row -= 1; - if row >= self.unit_count { - return Err(Error::InvalidIndexRow); - } - let mut offsets = self.offsets.clone(); - offsets.skip(R::Offset::from_u64( - u64::from(row) * u64::from(self.section_count) * 4, - )?)?; - let mut sizes = self.sizes.clone(); - sizes.skip(R::Offset::from_u64( - u64::from(row) * u64::from(self.section_count) * 4, - )?)?; - Ok(UnitIndexSectionIterator { - sections: self.sections[..self.section_count as usize].iter(), - offsets, - sizes, - }) - } - - /// Return the version. - pub fn version(&self) -> u16 { - self.version - } - - /// Return the number of sections. - pub fn section_count(&self) -> u32 { - self.section_count - } - - /// Return the number of units. - pub fn unit_count(&self) -> u32 { - self.unit_count - } - - /// Return the number of slots. - pub fn slot_count(&self) -> u32 { - self.slot_count - } -} - -/// An iterator over the section offsets and sizes for a row in a `UnitIndex`. -#[derive(Debug, Clone)] -pub struct UnitIndexSectionIterator<'index, R: Reader> { - sections: slice::Iter<'index, SectionId>, - offsets: R, - sizes: R, -} - -impl<'index, R: Reader> Iterator for UnitIndexSectionIterator<'index, R> { - type Item = UnitIndexSection; - - fn next(&mut self) -> Option { - let section = *self.sections.next()?; - // The length of these arrays was validated in `UnitIndex::parse`. - let offset = self.offsets.read_u32().ok()?; - let size = self.sizes.read_u32().ok()?; - Some(UnitIndexSection { - section, - offset, - size, - }) - } -} - -/// Information about a unit's contribution to a section in a `.dwp` file. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct UnitIndexSection { - /// The section kind. - pub section: SectionId, - /// The base offset of the unit's contribution to the section. - pub offset: u32, - /// The size of the unit's contribution to the section. - pub size: u32, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::endianity::BigEndian; - use test_assembler::{Endian, Section}; - - #[test] - fn test_empty() { - let buf = EndianSlice::new(&[], BigEndian); - let index = UnitIndex::parse(buf).unwrap(); - assert!(index.find(0).is_none()); - } - - #[test] - fn test_version_2() { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Big) - // Header. - .D32(2).D32(0).D32(0).D32(1) - // Slots. - .D64(0).D32(0); - let buf = section.get_contents().unwrap(); - let buf = EndianSlice::new(&buf, BigEndian); - let index = UnitIndex::parse(buf).unwrap(); - assert_eq!(index.version, 2); - } - - #[test] - fn test_version_5() { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Big) - // Header. - .D16(5).D16(0).D32(0).D32(0).D32(1) - // Slots. - .D64(0).D32(0); - let buf = section.get_contents().unwrap(); - let buf = EndianSlice::new(&buf, BigEndian); - let index = UnitIndex::parse(buf).unwrap(); - assert_eq!(index.version, 5); - } - - #[test] - fn test_version_5_invalid() { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Big) - // Header. - .D32(5).D32(0).D32(0).D32(1) - // Slots. - .D64(0).D32(0); - let buf = section.get_contents().unwrap(); - let buf = EndianSlice::new(&buf, BigEndian); - assert!(UnitIndex::parse(buf).is_err()); - } - - #[test] - fn test_version_2_sections() { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Big) - // Header. - .D32(2).D32(8).D32(1).D32(2) - // Slots. - .D64(0).D64(0).D32(0).D32(0) - // Sections. - .D32(constants::DW_SECT_V2_INFO.0) - .D32(constants::DW_SECT_V2_TYPES.0) - .D32(constants::DW_SECT_V2_ABBREV.0) - .D32(constants::DW_SECT_V2_LINE.0) - .D32(constants::DW_SECT_V2_LOC.0) - .D32(constants::DW_SECT_V2_STR_OFFSETS.0) - .D32(constants::DW_SECT_V2_MACINFO.0) - .D32(constants::DW_SECT_V2_MACRO.0) - // Offsets. - .D32(11).D32(12).D32(13).D32(14).D32(15).D32(16).D32(17).D32(18) - // Sizes. - .D32(21).D32(22).D32(23).D32(24).D32(25).D32(26).D32(27).D32(28); - let buf = section.get_contents().unwrap(); - let buf = EndianSlice::new(&buf, BigEndian); - let index = UnitIndex::parse(buf).unwrap(); - assert_eq!(index.section_count, 8); - assert_eq!( - index.sections, - [ - SectionId::DebugInfo, - SectionId::DebugTypes, - SectionId::DebugAbbrev, - SectionId::DebugLine, - SectionId::DebugLoc, - SectionId::DebugStrOffsets, - SectionId::DebugMacinfo, - SectionId::DebugMacro, - ] - ); - #[rustfmt::skip] - let expect = [ - UnitIndexSection { section: SectionId::DebugInfo, offset: 11, size: 21 }, - UnitIndexSection { section: SectionId::DebugTypes, offset: 12, size: 22 }, - UnitIndexSection { section: SectionId::DebugAbbrev, offset: 13, size: 23 }, - UnitIndexSection { section: SectionId::DebugLine, offset: 14, size: 24 }, - UnitIndexSection { section: SectionId::DebugLoc, offset: 15, size: 25 }, - UnitIndexSection { section: SectionId::DebugStrOffsets, offset: 16, size: 26 }, - UnitIndexSection { section: SectionId::DebugMacinfo, offset: 17, size: 27 }, - UnitIndexSection { section: SectionId::DebugMacro, offset: 18, size: 28 }, - ]; - let mut sections = index.sections(1).unwrap(); - for section in &expect { - assert_eq!(*section, sections.next().unwrap()); - } - assert!(sections.next().is_none()); - } - - #[test] - fn test_version_5_sections() { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Big) - // Header. - .D16(5).D16(0).D32(7).D32(1).D32(2) - // Slots. - .D64(0).D64(0).D32(0).D32(0) - // Sections. - .D32(constants::DW_SECT_INFO.0) - .D32(constants::DW_SECT_ABBREV.0) - .D32(constants::DW_SECT_LINE.0) - .D32(constants::DW_SECT_LOCLISTS.0) - .D32(constants::DW_SECT_STR_OFFSETS.0) - .D32(constants::DW_SECT_MACRO.0) - .D32(constants::DW_SECT_RNGLISTS.0) - // Offsets. - .D32(11).D32(12).D32(13).D32(14).D32(15).D32(16).D32(17) - // Sizes. - .D32(21).D32(22).D32(23).D32(24).D32(25).D32(26).D32(27); - let buf = section.get_contents().unwrap(); - let buf = EndianSlice::new(&buf, BigEndian); - let index = UnitIndex::parse(buf).unwrap(); - assert_eq!(index.section_count, 7); - assert_eq!( - index.sections[..7], - [ - SectionId::DebugInfo, - SectionId::DebugAbbrev, - SectionId::DebugLine, - SectionId::DebugLocLists, - SectionId::DebugStrOffsets, - SectionId::DebugMacro, - SectionId::DebugRngLists, - ] - ); - #[rustfmt::skip] - let expect = [ - UnitIndexSection { section: SectionId::DebugInfo, offset: 11, size: 21 }, - UnitIndexSection { section: SectionId::DebugAbbrev, offset: 12, size: 22 }, - UnitIndexSection { section: SectionId::DebugLine, offset: 13, size: 23 }, - UnitIndexSection { section: SectionId::DebugLocLists, offset: 14, size: 24 }, - UnitIndexSection { section: SectionId::DebugStrOffsets, offset: 15, size: 25 }, - UnitIndexSection { section: SectionId::DebugMacro, offset: 16, size: 26 }, - UnitIndexSection { section: SectionId::DebugRngLists, offset: 17, size: 27 }, - ]; - let mut sections = index.sections(1).unwrap(); - for section in &expect { - assert_eq!(*section, sections.next().unwrap()); - } - assert!(sections.next().is_none()); - - assert!(index.sections(0).is_err()); - assert!(index.sections(2).is_err()); - } - - #[test] - fn test_hash() { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Big) - // Header. - .D16(5).D16(0).D32(2).D32(3).D32(4) - // Slots. - .D64(0xffff_fff2_ffff_fff1) - .D64(0xffff_fff0_ffff_fff1) - .D64(0xffff_fff1_ffff_fff1) - .D64(0) - .D32(3).D32(1).D32(2).D32(0) - // Sections. - .D32(constants::DW_SECT_INFO.0) - .D32(constants::DW_SECT_ABBREV.0) - // Offsets. - .D32(0).D32(0).D32(0).D32(0).D32(0).D32(0) - // Sizes. - .D32(0).D32(0).D32(0).D32(0).D32(0).D32(0); - let buf = section.get_contents().unwrap(); - let buf = EndianSlice::new(&buf, BigEndian); - let index = UnitIndex::parse(buf).unwrap(); - assert_eq!(index.version(), 5); - assert_eq!(index.slot_count(), 4); - assert_eq!(index.unit_count(), 3); - assert_eq!(index.section_count(), 2); - assert_eq!(index.find(0xffff_fff0_ffff_fff1), Some(1)); - assert_eq!(index.find(0xffff_fff1_ffff_fff1), Some(2)); - assert_eq!(index.find(0xffff_fff2_ffff_fff1), Some(3)); - assert_eq!(index.find(0xffff_fff3_ffff_fff1), None); - } - - #[test] - fn test_cu_index() { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Big) - // Header. - .D16(5).D16(0).D32(0).D32(0).D32(1) - // Slots. - .D64(0).D32(0); - let buf = section.get_contents().unwrap(); - let cu_index = DebugCuIndex::new(&buf, BigEndian); - let index = cu_index.index().unwrap(); - assert_eq!(index.version, 5); - } - - #[test] - fn test_tu_index() { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Big) - // Header. - .D16(5).D16(0).D32(0).D32(0).D32(1) - // Slots. - .D64(0).D32(0); - let buf = section.get_contents().unwrap(); - let tu_index = DebugTuIndex::new(&buf, BigEndian); - let index = tu_index.index().unwrap(); - assert_eq!(index.version, 5); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/line.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/line.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/line.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/line.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,3130 +0,0 @@ -use alloc::vec::Vec; -use core::fmt; -use core::num::{NonZeroU64, Wrapping}; -use core::result; - -use crate::common::{ - DebugLineOffset, DebugLineStrOffset, DebugStrOffset, DebugStrOffsetsIndex, Encoding, Format, - LineEncoding, SectionId, -}; -use crate::constants; -use crate::endianity::Endianity; -use crate::read::{AttributeValue, EndianSlice, Error, Reader, ReaderOffset, Result, Section}; - -/// The `DebugLine` struct contains the source location to instruction mapping -/// found in the `.debug_line` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugLine { - debug_line_section: R, -} - -impl<'input, Endian> DebugLine> -where - Endian: Endianity, -{ - /// Construct a new `DebugLine` instance from the data in the `.debug_line` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_line` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugLine, LittleEndian}; - /// - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_debug_line_section_somehow = || &buf; - /// let debug_line = DebugLine::new(read_debug_line_section_somehow(), LittleEndian); - /// ``` - pub fn new(debug_line_section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(debug_line_section, endian)) - } -} - -impl DebugLine { - /// Parse the line number program whose header is at the given `offset` in the - /// `.debug_line` section. - /// - /// The `address_size` must match the compilation unit that the lines apply to. - /// The `comp_dir` should be from the `DW_AT_comp_dir` attribute of the compilation - /// unit. The `comp_name` should be from the `DW_AT_name` attribute of the - /// compilation unit. - /// - /// ```rust,no_run - /// use gimli::{DebugLine, DebugLineOffset, IncompleteLineProgram, EndianSlice, LittleEndian}; - /// - /// # let buf = []; - /// # let read_debug_line_section_somehow = || &buf; - /// let debug_line = DebugLine::new(read_debug_line_section_somehow(), LittleEndian); - /// - /// // In a real example, we'd grab the offset via a compilation unit - /// // entry's `DW_AT_stmt_list` attribute, and the address size from that - /// // unit directly. - /// let offset = DebugLineOffset(0); - /// let address_size = 8; - /// - /// let program = debug_line.program(offset, address_size, None, None) - /// .expect("should have found a header at that offset, and parsed it OK"); - /// ``` - pub fn program( - &self, - offset: DebugLineOffset, - address_size: u8, - comp_dir: Option, - comp_name: Option, - ) -> Result> { - let input = &mut self.debug_line_section.clone(); - input.skip(offset.0)?; - let header = LineProgramHeader::parse(input, offset, address_size, comp_dir, comp_name)?; - let program = IncompleteLineProgram { header }; - Ok(program) - } -} - -impl DebugLine { - /// Create a `DebugLine` section that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// ```rust,no_run - /// # let load_section = || unimplemented!(); - /// // Read the DWARF section into a `Vec` with whatever object loader you're using. - /// let owned_section: gimli::DebugLine> = load_section(); - /// // Create a reference to the DWARF section. - /// let section = owned_section.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> DebugLine - where - F: FnMut(&'a T) -> R, - { - borrow(&self.debug_line_section).into() - } -} - -impl Section for DebugLine { - fn id() -> SectionId { - SectionId::DebugLine - } - - fn reader(&self) -> &R { - &self.debug_line_section - } -} - -impl From for DebugLine { - fn from(debug_line_section: R) -> Self { - DebugLine { debug_line_section } - } -} - -/// Deprecated. `LineNumberProgram` has been renamed to `LineProgram`. -#[deprecated(note = "LineNumberProgram has been renamed to LineProgram, use that instead.")] -pub type LineNumberProgram = dyn LineProgram; - -/// A `LineProgram` provides access to a `LineProgramHeader` and -/// a way to add files to the files table if necessary. Gimli consumers should -/// never need to use or see this trait. -pub trait LineProgram::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - /// Get a reference to the held `LineProgramHeader`. - fn header(&self) -> &LineProgramHeader; - /// Add a file to the file table if necessary. - fn add_file(&mut self, file: FileEntry); -} - -impl LineProgram for IncompleteLineProgram -where - R: Reader, - Offset: ReaderOffset, -{ - fn header(&self) -> &LineProgramHeader { - &self.header - } - fn add_file(&mut self, file: FileEntry) { - self.header.file_names.push(file); - } -} - -impl<'program, R, Offset> LineProgram for &'program CompleteLineProgram -where - R: Reader, - Offset: ReaderOffset, -{ - fn header(&self) -> &LineProgramHeader { - &self.header - } - fn add_file(&mut self, _: FileEntry) { - // Nop. Our file table is already complete. - } -} - -/// Deprecated. `StateMachine` has been renamed to `LineRows`. -#[deprecated(note = "StateMachine has been renamed to LineRows, use that instead.")] -pub type StateMachine = LineRows; - -/// Executes a `LineProgram` to iterate over the rows in the matrix of line number information. -/// -/// "The hypothetical machine used by a consumer of the line number information -/// to expand the byte-coded instruction stream into a matrix of line number -/// information." -- Section 6.2.1 -#[derive(Debug, Clone)] -pub struct LineRows::Offset> -where - Program: LineProgram, - R: Reader, - Offset: ReaderOffset, -{ - program: Program, - row: LineRow, - instructions: LineInstructions, -} - -type OneShotLineRows::Offset> = - LineRows, Offset>; - -type ResumedLineRows<'program, R, Offset = ::Offset> = - LineRows, Offset>; - -impl LineRows -where - Program: LineProgram, - R: Reader, - Offset: ReaderOffset, -{ - fn new(program: IncompleteLineProgram) -> OneShotLineRows { - let row = LineRow::new(program.header()); - let instructions = LineInstructions { - input: program.header().program_buf.clone(), - }; - LineRows { - program, - row, - instructions, - } - } - - fn resume<'program>( - program: &'program CompleteLineProgram, - sequence: &LineSequence, - ) -> ResumedLineRows<'program, R, Offset> { - let row = LineRow::new(program.header()); - let instructions = sequence.instructions.clone(); - LineRows { - program, - row, - instructions, - } - } - - /// Get a reference to the header for this state machine's line number - /// program. - #[inline] - pub fn header(&self) -> &LineProgramHeader { - self.program.header() - } - - /// Parse and execute the next instructions in the line number program until - /// another row in the line number matrix is computed. - /// - /// The freshly computed row is returned as `Ok(Some((header, row)))`. - /// If the matrix is complete, and there are no more new rows in the line - /// number matrix, then `Ok(None)` is returned. If there was an error parsing - /// an instruction, then `Err(e)` is returned. - /// - /// Unfortunately, the references mean that this cannot be a - /// `FallibleIterator`. - pub fn next_row(&mut self) -> Result, &LineRow)>> { - // Perform any reset that was required after copying the previous row. - self.row.reset(self.program.header()); - - loop { - // Split the borrow here, rather than calling `self.header()`. - match self.instructions.next_instruction(self.program.header()) { - Err(err) => return Err(err), - Ok(None) => return Ok(None), - Ok(Some(instruction)) => { - if self.row.execute(instruction, &mut self.program) { - if self.row.tombstone { - // Perform any reset that was required for the tombstone row. - // Normally this is done when `next_row` is called again, but for - // tombstones we loop immediately. - self.row.reset(self.program.header()); - } else { - return Ok(Some((self.header(), &self.row))); - } - } - // Fall through, parse the next instruction, and see if that - // yields a row. - } - } - } - } -} - -/// Deprecated. `Opcode` has been renamed to `LineInstruction`. -#[deprecated(note = "Opcode has been renamed to LineInstruction, use that instead.")] -pub type Opcode = LineInstruction::Offset>; - -/// A parsed line number program instruction. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum LineInstruction::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - /// > ### 6.2.5.1 Special Opcodes - /// > - /// > Each ubyte special opcode has the following effect on the state machine: - /// > - /// > 1. Add a signed integer to the line register. - /// > - /// > 2. Modify the operation pointer by incrementing the address and - /// > op_index registers as described below. - /// > - /// > 3. Append a row to the matrix using the current values of the state - /// > machine registers. - /// > - /// > 4. Set the basic_block register to “false.†- /// > - /// > 5. Set the prologue_end register to “false.†- /// > - /// > 6. Set the epilogue_begin register to “false.†- /// > - /// > 7. Set the discriminator register to 0. - /// > - /// > All of the special opcodes do those same seven things; they differ from - /// > one another only in what values they add to the line, address and - /// > op_index registers. - Special(u8), - - /// "[`LineInstruction::Copy`] appends a row to the matrix using the current - /// values of the state machine registers. Then it sets the discriminator - /// register to 0, and sets the basic_block, prologue_end and epilogue_begin - /// registers to “false.â€" - Copy, - - /// "The DW_LNS_advance_pc opcode takes a single unsigned LEB128 operand as - /// the operation advance and modifies the address and op_index registers - /// [the same as `LineInstruction::Special`]" - AdvancePc(u64), - - /// "The DW_LNS_advance_line opcode takes a single signed LEB128 operand and - /// adds that value to the line register of the state machine." - AdvanceLine(i64), - - /// "The DW_LNS_set_file opcode takes a single unsigned LEB128 operand and - /// stores it in the file register of the state machine." - SetFile(u64), - - /// "The DW_LNS_set_column opcode takes a single unsigned LEB128 operand and - /// stores it in the column register of the state machine." - SetColumn(u64), - - /// "The DW_LNS_negate_stmt opcode takes no operands. It sets the is_stmt - /// register of the state machine to the logical negation of its current - /// value." - NegateStatement, - - /// "The DW_LNS_set_basic_block opcode takes no operands. It sets the - /// basic_block register of the state machine to “true.â€" - SetBasicBlock, - - /// > The DW_LNS_const_add_pc opcode takes no operands. It advances the - /// > address and op_index registers by the increments corresponding to - /// > special opcode 255. - /// > - /// > When the line number program needs to advance the address by a small - /// > amount, it can use a single special opcode, which occupies a single - /// > byte. When it needs to advance the address by up to twice the range of - /// > the last special opcode, it can use DW_LNS_const_add_pc followed by a - /// > special opcode, for a total of two bytes. Only if it needs to advance - /// > the address by more than twice that range will it need to use both - /// > DW_LNS_advance_pc and a special opcode, requiring three or more bytes. - ConstAddPc, - - /// > The DW_LNS_fixed_advance_pc opcode takes a single uhalf (unencoded) - /// > operand and adds it to the address register of the state machine and - /// > sets the op_index register to 0. This is the only standard opcode whose - /// > operand is not a variable length number. It also does not multiply the - /// > operand by the minimum_instruction_length field of the header. - FixedAddPc(u16), - - /// "[`LineInstruction::SetPrologueEnd`] sets the prologue_end register to “trueâ€." - SetPrologueEnd, - - /// "[`LineInstruction::SetEpilogueBegin`] sets the epilogue_begin register to - /// “trueâ€." - SetEpilogueBegin, - - /// "The DW_LNS_set_isa opcode takes a single unsigned LEB128 operand and - /// stores that value in the isa register of the state machine." - SetIsa(u64), - - /// An unknown standard opcode with zero operands. - UnknownStandard0(constants::DwLns), - - /// An unknown standard opcode with one operand. - UnknownStandard1(constants::DwLns, u64), - - /// An unknown standard opcode with multiple operands. - UnknownStandardN(constants::DwLns, R), - - /// > [`LineInstruction::EndSequence`] sets the end_sequence register of the state - /// > machine to “true†and appends a row to the matrix using the current - /// > values of the state-machine registers. Then it resets the registers to - /// > the initial values specified above (see Section 6.2.2). Every line - /// > number program sequence must end with a DW_LNE_end_sequence instruction - /// > which creates a row whose address is that of the byte after the last - /// > target machine instruction of the sequence. - EndSequence, - - /// > The DW_LNE_set_address opcode takes a single relocatable address as an - /// > operand. The size of the operand is the size of an address on the target - /// > machine. It sets the address register to the value given by the - /// > relocatable address and sets the op_index register to 0. - /// > - /// > All of the other line number program opcodes that affect the address - /// > register add a delta to it. This instruction stores a relocatable value - /// > into it instead. - SetAddress(u64), - - /// Defines a new source file in the line number program and appends it to - /// the line number program header's list of source files. - DefineFile(FileEntry), - - /// "The DW_LNE_set_discriminator opcode takes a single parameter, an - /// unsigned LEB128 integer. It sets the discriminator register to the new - /// value." - SetDiscriminator(u64), - - /// An unknown extended opcode and the slice of its unparsed operands. - UnknownExtended(constants::DwLne, R), -} - -impl LineInstruction -where - R: Reader, - Offset: ReaderOffset, -{ - fn parse<'header>( - header: &'header LineProgramHeader, - input: &mut R, - ) -> Result> - where - R: 'header, - { - let opcode = input.read_u8()?; - if opcode == 0 { - let length = input.read_uleb128().and_then(R::Offset::from_u64)?; - let mut instr_rest = input.split(length)?; - let opcode = instr_rest.read_u8()?; - - match constants::DwLne(opcode) { - constants::DW_LNE_end_sequence => Ok(LineInstruction::EndSequence), - - constants::DW_LNE_set_address => { - let address = instr_rest.read_address(header.address_size())?; - Ok(LineInstruction::SetAddress(address)) - } - - constants::DW_LNE_define_file => { - if header.version() <= 4 { - let path_name = instr_rest.read_null_terminated_slice()?; - let entry = FileEntry::parse(&mut instr_rest, path_name)?; - Ok(LineInstruction::DefineFile(entry)) - } else { - Ok(LineInstruction::UnknownExtended( - constants::DW_LNE_define_file, - instr_rest, - )) - } - } - - constants::DW_LNE_set_discriminator => { - let discriminator = instr_rest.read_uleb128()?; - Ok(LineInstruction::SetDiscriminator(discriminator)) - } - - otherwise => Ok(LineInstruction::UnknownExtended(otherwise, instr_rest)), - } - } else if opcode >= header.opcode_base { - Ok(LineInstruction::Special(opcode)) - } else { - match constants::DwLns(opcode) { - constants::DW_LNS_copy => Ok(LineInstruction::Copy), - - constants::DW_LNS_advance_pc => { - let advance = input.read_uleb128()?; - Ok(LineInstruction::AdvancePc(advance)) - } - - constants::DW_LNS_advance_line => { - let increment = input.read_sleb128()?; - Ok(LineInstruction::AdvanceLine(increment)) - } - - constants::DW_LNS_set_file => { - let file = input.read_uleb128()?; - Ok(LineInstruction::SetFile(file)) - } - - constants::DW_LNS_set_column => { - let column = input.read_uleb128()?; - Ok(LineInstruction::SetColumn(column)) - } - - constants::DW_LNS_negate_stmt => Ok(LineInstruction::NegateStatement), - - constants::DW_LNS_set_basic_block => Ok(LineInstruction::SetBasicBlock), - - constants::DW_LNS_const_add_pc => Ok(LineInstruction::ConstAddPc), - - constants::DW_LNS_fixed_advance_pc => { - let advance = input.read_u16()?; - Ok(LineInstruction::FixedAddPc(advance)) - } - - constants::DW_LNS_set_prologue_end => Ok(LineInstruction::SetPrologueEnd), - - constants::DW_LNS_set_epilogue_begin => Ok(LineInstruction::SetEpilogueBegin), - - constants::DW_LNS_set_isa => { - let isa = input.read_uleb128()?; - Ok(LineInstruction::SetIsa(isa)) - } - - otherwise => { - let mut opcode_lengths = header.standard_opcode_lengths().clone(); - opcode_lengths.skip(R::Offset::from_u8(opcode - 1))?; - let num_args = opcode_lengths.read_u8()? as usize; - match num_args { - 0 => Ok(LineInstruction::UnknownStandard0(otherwise)), - 1 => { - let arg = input.read_uleb128()?; - Ok(LineInstruction::UnknownStandard1(otherwise, arg)) - } - _ => { - let mut args = input.clone(); - for _ in 0..num_args { - input.read_uleb128()?; - } - let len = input.offset_from(&args); - args.truncate(len)?; - Ok(LineInstruction::UnknownStandardN(otherwise, args)) - } - } - } - } - } - } -} - -impl fmt::Display for LineInstruction -where - R: Reader, - Offset: ReaderOffset, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - match *self { - LineInstruction::Special(opcode) => write!(f, "Special opcode {}", opcode), - LineInstruction::Copy => write!(f, "{}", constants::DW_LNS_copy), - LineInstruction::AdvancePc(advance) => { - write!(f, "{} by {}", constants::DW_LNS_advance_pc, advance) - } - LineInstruction::AdvanceLine(increment) => { - write!(f, "{} by {}", constants::DW_LNS_advance_line, increment) - } - LineInstruction::SetFile(file) => { - write!(f, "{} to {}", constants::DW_LNS_set_file, file) - } - LineInstruction::SetColumn(column) => { - write!(f, "{} to {}", constants::DW_LNS_set_column, column) - } - LineInstruction::NegateStatement => write!(f, "{}", constants::DW_LNS_negate_stmt), - LineInstruction::SetBasicBlock => write!(f, "{}", constants::DW_LNS_set_basic_block), - LineInstruction::ConstAddPc => write!(f, "{}", constants::DW_LNS_const_add_pc), - LineInstruction::FixedAddPc(advance) => { - write!(f, "{} by {}", constants::DW_LNS_fixed_advance_pc, advance) - } - LineInstruction::SetPrologueEnd => write!(f, "{}", constants::DW_LNS_set_prologue_end), - LineInstruction::SetEpilogueBegin => { - write!(f, "{}", constants::DW_LNS_set_epilogue_begin) - } - LineInstruction::SetIsa(isa) => write!(f, "{} to {}", constants::DW_LNS_set_isa, isa), - LineInstruction::UnknownStandard0(opcode) => write!(f, "Unknown {}", opcode), - LineInstruction::UnknownStandard1(opcode, arg) => { - write!(f, "Unknown {} with operand {}", opcode, arg) - } - LineInstruction::UnknownStandardN(opcode, ref args) => { - write!(f, "Unknown {} with operands {:?}", opcode, args) - } - LineInstruction::EndSequence => write!(f, "{}", constants::DW_LNE_end_sequence), - LineInstruction::SetAddress(address) => { - write!(f, "{} to {}", constants::DW_LNE_set_address, address) - } - LineInstruction::DefineFile(_) => write!(f, "{}", constants::DW_LNE_define_file), - LineInstruction::SetDiscriminator(discr) => { - write!(f, "{} to {}", constants::DW_LNE_set_discriminator, discr) - } - LineInstruction::UnknownExtended(opcode, _) => write!(f, "Unknown {}", opcode), - } - } -} - -/// Deprecated. `OpcodesIter` has been renamed to `LineInstructions`. -#[deprecated(note = "OpcodesIter has been renamed to LineInstructions, use that instead.")] -pub type OpcodesIter = LineInstructions; - -/// An iterator yielding parsed instructions. -/// -/// See -/// [`LineProgramHeader::instructions`](./struct.LineProgramHeader.html#method.instructions) -/// for more details. -#[derive(Clone, Debug)] -pub struct LineInstructions { - input: R, -} - -impl LineInstructions { - fn remove_trailing(&self, other: &LineInstructions) -> Result> { - let offset = other.input.offset_from(&self.input); - let mut input = self.input.clone(); - input.truncate(offset)?; - Ok(LineInstructions { input }) - } -} - -impl LineInstructions { - /// Advance the iterator and return the next instruction. - /// - /// Returns the newly parsed instruction as `Ok(Some(instruction))`. Returns - /// `Ok(None)` when iteration is complete and all instructions have already been - /// parsed and yielded. If an error occurs while parsing the next attribute, - /// then this error is returned as `Err(e)`, and all subsequent calls return - /// `Ok(None)`. - /// - /// Unfortunately, the `header` parameter means that this cannot be a - /// `FallibleIterator`. - #[inline(always)] - pub fn next_instruction( - &mut self, - header: &LineProgramHeader, - ) -> Result>> { - if self.input.is_empty() { - return Ok(None); - } - - match LineInstruction::parse(header, &mut self.input) { - Ok(instruction) => Ok(Some(instruction)), - Err(e) => { - self.input.empty(); - Err(e) - } - } - } -} - -/// Deprecated. `LineNumberRow` has been renamed to `LineRow`. -#[deprecated(note = "LineNumberRow has been renamed to LineRow, use that instead.")] -pub type LineNumberRow = LineRow; - -/// A row in the line number program's resulting matrix. -/// -/// Each row is a copy of the registers of the state machine, as defined in section 6.2.2. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct LineRow { - tombstone: bool, - address: Wrapping, - op_index: Wrapping, - file: u64, - line: Wrapping, - column: u64, - is_stmt: bool, - basic_block: bool, - end_sequence: bool, - prologue_end: bool, - epilogue_begin: bool, - isa: u64, - discriminator: u64, -} - -impl LineRow { - /// Create a line number row in the initial state for the given program. - pub fn new(header: &LineProgramHeader) -> Self { - LineRow { - // "At the beginning of each sequence within a line number program, the - // state of the registers is:" -- Section 6.2.2 - tombstone: false, - address: Wrapping(0), - op_index: Wrapping(0), - file: 1, - line: Wrapping(1), - column: 0, - // "determined by default_is_stmt in the line number program header" - is_stmt: header.line_encoding.default_is_stmt, - basic_block: false, - end_sequence: false, - prologue_end: false, - epilogue_begin: false, - // "The isa value 0 specifies that the instruction set is the - // architecturally determined default instruction set. This may be fixed - // by the ABI, or it may be specified by other means, for example, by - // the object file description." - isa: 0, - discriminator: 0, - } - } - - /// "The program-counter value corresponding to a machine instruction - /// generated by the compiler." - #[inline] - pub fn address(&self) -> u64 { - self.address.0 - } - - /// > An unsigned integer representing the index of an operation within a VLIW - /// > instruction. The index of the first operation is 0. For non-VLIW - /// > architectures, this register will always be 0. - /// > - /// > The address and op_index registers, taken together, form an operation - /// > pointer that can reference any individual operation with the - /// > instruction stream. - #[inline] - pub fn op_index(&self) -> u64 { - self.op_index.0 - } - - /// "An unsigned integer indicating the identity of the source file - /// corresponding to a machine instruction." - #[inline] - pub fn file_index(&self) -> u64 { - self.file - } - - /// The source file corresponding to the current machine instruction. - #[inline] - pub fn file<'header, R: Reader>( - &self, - header: &'header LineProgramHeader, - ) -> Option<&'header FileEntry> { - header.file(self.file) - } - - /// "An unsigned integer indicating a source line number. Lines are numbered - /// beginning at 1. The compiler may emit the value 0 in cases where an - /// instruction cannot be attributed to any source line." - /// Line number values of 0 are represented as `None`. - #[inline] - pub fn line(&self) -> Option { - NonZeroU64::new(self.line.0) - } - - /// "An unsigned integer indicating a column number within a source - /// line. Columns are numbered beginning at 1. The value 0 is reserved to - /// indicate that a statement begins at the “left edge†of the line." - #[inline] - pub fn column(&self) -> ColumnType { - NonZeroU64::new(self.column) - .map(ColumnType::Column) - .unwrap_or(ColumnType::LeftEdge) - } - - /// "A boolean indicating that the current instruction is a recommended - /// breakpoint location. A recommended breakpoint location is intended to - /// “represent†a line, a statement and/or a semantically distinct subpart - /// of a statement." - #[inline] - pub fn is_stmt(&self) -> bool { - self.is_stmt - } - - /// "A boolean indicating that the current instruction is the beginning of a - /// basic block." - #[inline] - pub fn basic_block(&self) -> bool { - self.basic_block - } - - /// "A boolean indicating that the current address is that of the first byte - /// after the end of a sequence of target machine instructions. end_sequence - /// terminates a sequence of lines; therefore other information in the same - /// row is not meaningful." - #[inline] - pub fn end_sequence(&self) -> bool { - self.end_sequence - } - - /// "A boolean indicating that the current address is one (of possibly many) - /// where execution should be suspended for an entry breakpoint of a - /// function." - #[inline] - pub fn prologue_end(&self) -> bool { - self.prologue_end - } - - /// "A boolean indicating that the current address is one (of possibly many) - /// where execution should be suspended for an exit breakpoint of a - /// function." - #[inline] - pub fn epilogue_begin(&self) -> bool { - self.epilogue_begin - } - - /// Tag for the current instruction set architecture. - /// - /// > An unsigned integer whose value encodes the applicable instruction set - /// > architecture for the current instruction. - /// > - /// > The encoding of instruction sets should be shared by all users of a - /// > given architecture. It is recommended that this encoding be defined by - /// > the ABI authoring committee for each architecture. - #[inline] - pub fn isa(&self) -> u64 { - self.isa - } - - /// "An unsigned integer identifying the block to which the current - /// instruction belongs. Discriminator values are assigned arbitrarily by - /// the DWARF producer and serve to distinguish among multiple blocks that - /// may all be associated with the same source file, line, and column. Where - /// only one block exists for a given source position, the discriminator - /// value should be zero." - #[inline] - pub fn discriminator(&self) -> u64 { - self.discriminator - } - - /// Execute the given instruction, and return true if a new row in the - /// line number matrix needs to be generated. - /// - /// Unknown opcodes are treated as no-ops. - #[inline] - pub fn execute( - &mut self, - instruction: LineInstruction, - program: &mut Program, - ) -> bool - where - Program: LineProgram, - R: Reader, - { - match instruction { - LineInstruction::Special(opcode) => { - self.exec_special_opcode(opcode, program.header()); - true - } - - LineInstruction::Copy => true, - - LineInstruction::AdvancePc(operation_advance) => { - self.apply_operation_advance(operation_advance, program.header()); - false - } - - LineInstruction::AdvanceLine(line_increment) => { - self.apply_line_advance(line_increment); - false - } - - LineInstruction::SetFile(file) => { - self.file = file; - false - } - - LineInstruction::SetColumn(column) => { - self.column = column; - false - } - - LineInstruction::NegateStatement => { - self.is_stmt = !self.is_stmt; - false - } - - LineInstruction::SetBasicBlock => { - self.basic_block = true; - false - } - - LineInstruction::ConstAddPc => { - let adjusted = self.adjust_opcode(255, program.header()); - let operation_advance = adjusted / program.header().line_encoding.line_range; - self.apply_operation_advance(u64::from(operation_advance), program.header()); - false - } - - LineInstruction::FixedAddPc(operand) => { - self.address += Wrapping(u64::from(operand)); - self.op_index.0 = 0; - false - } - - LineInstruction::SetPrologueEnd => { - self.prologue_end = true; - false - } - - LineInstruction::SetEpilogueBegin => { - self.epilogue_begin = true; - false - } - - LineInstruction::SetIsa(isa) => { - self.isa = isa; - false - } - - LineInstruction::EndSequence => { - self.end_sequence = true; - true - } - - LineInstruction::SetAddress(address) => { - let tombstone_address = !0 >> (64 - program.header().encoding.address_size * 8); - self.tombstone = address == tombstone_address; - self.address.0 = address; - self.op_index.0 = 0; - false - } - - LineInstruction::DefineFile(entry) => { - program.add_file(entry); - false - } - - LineInstruction::SetDiscriminator(discriminator) => { - self.discriminator = discriminator; - false - } - - // Compatibility with future opcodes. - LineInstruction::UnknownStandard0(_) - | LineInstruction::UnknownStandard1(_, _) - | LineInstruction::UnknownStandardN(_, _) - | LineInstruction::UnknownExtended(_, _) => false, - } - } - - /// Perform any reset that was required after copying the previous row. - #[inline] - pub fn reset(&mut self, header: &LineProgramHeader) { - if self.end_sequence { - // Previous instruction was EndSequence, so reset everything - // as specified in Section 6.2.5.3. - *self = Self::new(header); - } else { - // Previous instruction was one of: - // - Special - specified in Section 6.2.5.1, steps 4-7 - // - Copy - specified in Section 6.2.5.2 - // The reset behaviour is the same in both cases. - self.discriminator = 0; - self.basic_block = false; - self.prologue_end = false; - self.epilogue_begin = false; - } - } - - /// Step 1 of section 6.2.5.1 - fn apply_line_advance(&mut self, line_increment: i64) { - if line_increment < 0 { - let decrement = -line_increment as u64; - if decrement <= self.line.0 { - self.line.0 -= decrement; - } else { - self.line.0 = 0; - } - } else { - self.line += Wrapping(line_increment as u64); - } - } - - /// Step 2 of section 6.2.5.1 - fn apply_operation_advance( - &mut self, - operation_advance: u64, - header: &LineProgramHeader, - ) { - let operation_advance = Wrapping(operation_advance); - - let minimum_instruction_length = u64::from(header.line_encoding.minimum_instruction_length); - let minimum_instruction_length = Wrapping(minimum_instruction_length); - - let maximum_operations_per_instruction = - u64::from(header.line_encoding.maximum_operations_per_instruction); - let maximum_operations_per_instruction = Wrapping(maximum_operations_per_instruction); - - if maximum_operations_per_instruction.0 == 1 { - self.address += minimum_instruction_length * operation_advance; - self.op_index.0 = 0; - } else { - let op_index_with_advance = self.op_index + operation_advance; - self.address += minimum_instruction_length - * (op_index_with_advance / maximum_operations_per_instruction); - self.op_index = op_index_with_advance % maximum_operations_per_instruction; - } - } - - #[inline] - fn adjust_opcode(&self, opcode: u8, header: &LineProgramHeader) -> u8 { - opcode - header.opcode_base - } - - /// Section 6.2.5.1 - fn exec_special_opcode(&mut self, opcode: u8, header: &LineProgramHeader) { - let adjusted_opcode = self.adjust_opcode(opcode, header); - - let line_range = header.line_encoding.line_range; - let line_advance = adjusted_opcode % line_range; - let operation_advance = adjusted_opcode / line_range; - - // Step 1 - let line_base = i64::from(header.line_encoding.line_base); - self.apply_line_advance(line_base + i64::from(line_advance)); - - // Step 2 - self.apply_operation_advance(u64::from(operation_advance), header); - } -} - -/// The type of column that a row is referring to. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub enum ColumnType { - /// The `LeftEdge` means that the statement begins at the start of the new - /// line. - LeftEdge, - /// A column number, whose range begins at 1. - Column(NonZeroU64), -} - -/// Deprecated. `LineNumberSequence` has been renamed to `LineSequence`. -#[deprecated(note = "LineNumberSequence has been renamed to LineSequence, use that instead.")] -pub type LineNumberSequence = LineSequence; - -/// A sequence within a line number program. A sequence, as defined in section -/// 6.2.5 of the standard, is a linear subset of a line number program within -/// which addresses are monotonically increasing. -#[derive(Clone, Debug)] -pub struct LineSequence { - /// The first address that is covered by this sequence within the line number - /// program. - pub start: u64, - /// The first address that is *not* covered by this sequence within the line - /// number program. - pub end: u64, - instructions: LineInstructions, -} - -/// Deprecated. `LineNumberProgramHeader` has been renamed to `LineProgramHeader`. -#[deprecated( - note = "LineNumberProgramHeader has been renamed to LineProgramHeader, use that instead." -)] -pub type LineNumberProgramHeader = LineProgramHeader; - -/// A header for a line number program in the `.debug_line` section, as defined -/// in section 6.2.4 of the standard. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct LineProgramHeader::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - encoding: Encoding, - offset: DebugLineOffset, - unit_length: Offset, - - header_length: Offset, - - line_encoding: LineEncoding, - - /// "The number assigned to the first special opcode." - opcode_base: u8, - - /// "This array specifies the number of LEB128 operands for each of the - /// standard opcodes. The first element of the array corresponds to the - /// opcode whose value is 1, and the last element corresponds to the opcode - /// whose value is `opcode_base - 1`." - standard_opcode_lengths: R, - - /// "A sequence of directory entry format descriptions." - directory_entry_format: Vec, - - /// > Entries in this sequence describe each path that was searched for - /// > included source files in this compilation. (The paths include those - /// > directories specified explicitly by the user for the compiler to search - /// > and those the compiler searches without explicit direction.) Each path - /// > entry is either a full path name or is relative to the current directory - /// > of the compilation. - /// > - /// > The last entry is followed by a single null byte. - include_directories: Vec>, - - /// "A sequence of file entry format descriptions." - file_name_entry_format: Vec, - - /// "Entries in this sequence describe source files that contribute to the - /// line number information for this compilation unit or is used in other - /// contexts." - file_names: Vec>, - - /// The encoded line program instructions. - program_buf: R, - - /// The current directory of the compilation. - comp_dir: Option, - - /// The primary source file. - comp_file: Option>, -} - -impl LineProgramHeader -where - R: Reader, - Offset: ReaderOffset, -{ - /// Return the offset of the line number program header in the `.debug_line` section. - pub fn offset(&self) -> DebugLineOffset { - self.offset - } - - /// Return the length of the line number program and header, not including - /// the length of the encoded length itself. - pub fn unit_length(&self) -> R::Offset { - self.unit_length - } - - /// Return the encoding parameters for this header's line program. - pub fn encoding(&self) -> Encoding { - self.encoding - } - - /// Get the version of this header's line program. - pub fn version(&self) -> u16 { - self.encoding.version - } - - /// Get the length of the encoded line number program header, not including - /// the length of the encoded length itself. - pub fn header_length(&self) -> R::Offset { - self.header_length - } - - /// Get the size in bytes of a target machine address. - pub fn address_size(&self) -> u8 { - self.encoding.address_size - } - - /// Whether this line program is encoded in 64- or 32-bit DWARF. - pub fn format(&self) -> Format { - self.encoding.format - } - - /// Get the line encoding parameters for this header's line program. - pub fn line_encoding(&self) -> LineEncoding { - self.line_encoding - } - - /// Get the minimum instruction length any instruction in this header's line - /// program may have. - pub fn minimum_instruction_length(&self) -> u8 { - self.line_encoding.minimum_instruction_length - } - - /// Get the maximum number of operations each instruction in this header's - /// line program may have. - pub fn maximum_operations_per_instruction(&self) -> u8 { - self.line_encoding.maximum_operations_per_instruction - } - - /// Get the default value of the `is_stmt` register for this header's line - /// program. - pub fn default_is_stmt(&self) -> bool { - self.line_encoding.default_is_stmt - } - - /// Get the line base for this header's line program. - pub fn line_base(&self) -> i8 { - self.line_encoding.line_base - } - - /// Get the line range for this header's line program. - pub fn line_range(&self) -> u8 { - self.line_encoding.line_range - } - - /// Get opcode base for this header's line program. - pub fn opcode_base(&self) -> u8 { - self.opcode_base - } - - /// An array of `u8` that specifies the number of LEB128 operands for - /// each of the standard opcodes. - pub fn standard_opcode_lengths(&self) -> &R { - &self.standard_opcode_lengths - } - - /// Get the format of a directory entry. - pub fn directory_entry_format(&self) -> &[FileEntryFormat] { - &self.directory_entry_format[..] - } - - /// Get the set of include directories for this header's line program. - /// - /// For DWARF version <= 4, the compilation's current directory is not included - /// in the return value, but is implicitly considered to be in the set per spec. - pub fn include_directories(&self) -> &[AttributeValue] { - &self.include_directories[..] - } - - /// The include directory with the given directory index. - /// - /// A directory index of 0 corresponds to the compilation unit directory. - pub fn directory(&self, directory: u64) -> Option> { - if self.encoding.version <= 4 { - if directory == 0 { - self.comp_dir.clone().map(AttributeValue::String) - } else { - let directory = directory as usize - 1; - self.include_directories.get(directory).cloned() - } - } else { - self.include_directories.get(directory as usize).cloned() - } - } - - /// Get the format of a file name entry. - pub fn file_name_entry_format(&self) -> &[FileEntryFormat] { - &self.file_name_entry_format[..] - } - - /// Return true if the file entries may have valid timestamps. - /// - /// Only returns false if we definitely know that all timestamp fields - /// are invalid. - pub fn file_has_timestamp(&self) -> bool { - self.encoding.version <= 4 - || self - .file_name_entry_format - .iter() - .any(|x| x.content_type == constants::DW_LNCT_timestamp) - } - - /// Return true if the file entries may have valid sizes. - /// - /// Only returns false if we definitely know that all size fields - /// are invalid. - pub fn file_has_size(&self) -> bool { - self.encoding.version <= 4 - || self - .file_name_entry_format - .iter() - .any(|x| x.content_type == constants::DW_LNCT_size) - } - - /// Return true if the file name entry format contains an MD5 field. - pub fn file_has_md5(&self) -> bool { - self.file_name_entry_format - .iter() - .any(|x| x.content_type == constants::DW_LNCT_MD5) - } - - /// Get the list of source files that appear in this header's line program. - pub fn file_names(&self) -> &[FileEntry] { - &self.file_names[..] - } - - /// The source file with the given file index. - /// - /// A file index of 0 corresponds to the compilation unit file. - /// Note that a file index of 0 is invalid for DWARF version <= 4, - /// but we support it anyway. - pub fn file(&self, file: u64) -> Option<&FileEntry> { - if self.encoding.version <= 4 { - if file == 0 { - self.comp_file.as_ref() - } else { - let file = file as usize - 1; - self.file_names.get(file) - } - } else { - self.file_names.get(file as usize) - } - } - - /// Get the raw, un-parsed `EndianSlice` containing this header's line number - /// program. - /// - /// ``` - /// # fn foo() { - /// use gimli::{LineProgramHeader, EndianSlice, NativeEndian}; - /// - /// fn get_line_number_program_header<'a>() -> LineProgramHeader> { - /// // Get a line number program header from some offset in a - /// // `.debug_line` section... - /// # unimplemented!() - /// } - /// - /// let header = get_line_number_program_header(); - /// let raw_program = header.raw_program_buf(); - /// println!("The length of the raw program in bytes is {}", raw_program.len()); - /// # } - /// ``` - pub fn raw_program_buf(&self) -> R { - self.program_buf.clone() - } - - /// Iterate over the instructions in this header's line number program, parsing - /// them as we go. - pub fn instructions(&self) -> LineInstructions { - LineInstructions { - input: self.program_buf.clone(), - } - } - - fn parse( - input: &mut R, - offset: DebugLineOffset, - mut address_size: u8, - mut comp_dir: Option, - comp_name: Option, - ) -> Result> { - let (unit_length, format) = input.read_initial_length()?; - let rest = &mut input.split(unit_length)?; - - let version = rest.read_u16()?; - if version < 2 || version > 5 { - return Err(Error::UnknownVersion(u64::from(version))); - } - - if version >= 5 { - address_size = rest.read_u8()?; - let segment_selector_size = rest.read_u8()?; - if segment_selector_size != 0 { - return Err(Error::UnsupportedSegmentSize); - } - } - - let encoding = Encoding { - format, - version, - address_size, - }; - - let header_length = rest.read_length(format)?; - - let mut program_buf = rest.clone(); - program_buf.skip(header_length)?; - rest.truncate(header_length)?; - - let minimum_instruction_length = rest.read_u8()?; - if minimum_instruction_length == 0 { - return Err(Error::MinimumInstructionLengthZero); - } - - // This field did not exist before DWARF 4, but is specified to be 1 for - // non-VLIW architectures, which makes it a no-op. - let maximum_operations_per_instruction = if version >= 4 { rest.read_u8()? } else { 1 }; - if maximum_operations_per_instruction == 0 { - return Err(Error::MaximumOperationsPerInstructionZero); - } - - let default_is_stmt = rest.read_u8()? != 0; - let line_base = rest.read_i8()?; - let line_range = rest.read_u8()?; - if line_range == 0 { - return Err(Error::LineRangeZero); - } - let line_encoding = LineEncoding { - minimum_instruction_length, - maximum_operations_per_instruction, - default_is_stmt, - line_base, - line_range, - }; - - let opcode_base = rest.read_u8()?; - if opcode_base == 0 { - return Err(Error::OpcodeBaseZero); - } - - let standard_opcode_count = R::Offset::from_u8(opcode_base - 1); - let standard_opcode_lengths = rest.split(standard_opcode_count)?; - - let directory_entry_format; - let mut include_directories = Vec::new(); - if version <= 4 { - directory_entry_format = Vec::new(); - loop { - let directory = rest.read_null_terminated_slice()?; - if directory.is_empty() { - break; - } - include_directories.push(AttributeValue::String(directory)); - } - } else { - comp_dir = None; - directory_entry_format = FileEntryFormat::parse(rest)?; - let count = rest.read_uleb128()?; - for _ in 0..count { - include_directories.push(parse_directory_v5( - rest, - encoding, - &directory_entry_format, - )?); - } - } - - let comp_file; - let file_name_entry_format; - let mut file_names = Vec::new(); - if version <= 4 { - comp_file = comp_name.map(|name| FileEntry { - path_name: AttributeValue::String(name), - directory_index: 0, - timestamp: 0, - size: 0, - md5: [0; 16], - }); - - file_name_entry_format = Vec::new(); - loop { - let path_name = rest.read_null_terminated_slice()?; - if path_name.is_empty() { - break; - } - file_names.push(FileEntry::parse(rest, path_name)?); - } - } else { - comp_file = None; - file_name_entry_format = FileEntryFormat::parse(rest)?; - let count = rest.read_uleb128()?; - for _ in 0..count { - file_names.push(parse_file_v5(rest, encoding, &file_name_entry_format)?); - } - } - - let header = LineProgramHeader { - encoding, - offset, - unit_length, - header_length, - line_encoding, - opcode_base, - standard_opcode_lengths, - directory_entry_format, - include_directories, - file_name_entry_format, - file_names, - program_buf, - comp_dir, - comp_file, - }; - Ok(header) - } -} - -/// Deprecated. `IncompleteLineNumberProgram` has been renamed to `IncompleteLineProgram`. -#[deprecated( - note = "IncompleteLineNumberProgram has been renamed to IncompleteLineProgram, use that instead." -)] -pub type IncompleteLineNumberProgram = IncompleteLineProgram; - -/// A line number program that has not been run to completion. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct IncompleteLineProgram::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - header: LineProgramHeader, -} - -impl IncompleteLineProgram -where - R: Reader, - Offset: ReaderOffset, -{ - /// Retrieve the `LineProgramHeader` for this program. - pub fn header(&self) -> &LineProgramHeader { - &self.header - } - - /// Construct a new `LineRows` for executing this program to iterate - /// over rows in the line information matrix. - pub fn rows(self) -> OneShotLineRows { - OneShotLineRows::new(self) - } - - /// Execute the line number program, completing the `IncompleteLineProgram` - /// into a `CompleteLineProgram` and producing an array of sequences within - /// the line number program that can later be used with - /// `CompleteLineProgram::resume_from`. - /// - /// ``` - /// # fn foo() { - /// use gimli::{IncompleteLineProgram, EndianSlice, NativeEndian}; - /// - /// fn get_line_number_program<'a>() -> IncompleteLineProgram> { - /// // Get a line number program from some offset in a - /// // `.debug_line` section... - /// # unimplemented!() - /// } - /// - /// let program = get_line_number_program(); - /// let (program, sequences) = program.sequences().unwrap(); - /// println!("There are {} sequences in this line number program", sequences.len()); - /// # } - /// ``` - #[allow(clippy::type_complexity)] - pub fn sequences(self) -> Result<(CompleteLineProgram, Vec>)> { - let mut sequences = Vec::new(); - let mut rows = self.rows(); - let mut instructions = rows.instructions.clone(); - let mut sequence_start_addr = None; - loop { - let sequence_end_addr; - if rows.next_row()?.is_none() { - break; - } - - let row = &rows.row; - if row.end_sequence() { - sequence_end_addr = row.address(); - } else if sequence_start_addr.is_none() { - sequence_start_addr = Some(row.address()); - continue; - } else { - continue; - } - - // We just finished a sequence. - sequences.push(LineSequence { - // In theory one could have multiple DW_LNE_end_sequence instructions - // in a row. - start: sequence_start_addr.unwrap_or(0), - end: sequence_end_addr, - instructions: instructions.remove_trailing(&rows.instructions)?, - }); - sequence_start_addr = None; - instructions = rows.instructions.clone(); - } - - let program = CompleteLineProgram { - header: rows.program.header, - }; - Ok((program, sequences)) - } -} - -/// Deprecated. `CompleteLineNumberProgram` has been renamed to `CompleteLineProgram`. -#[deprecated( - note = "CompleteLineNumberProgram has been renamed to CompleteLineProgram, use that instead." -)] -pub type CompleteLineNumberProgram = CompleteLineProgram; - -/// A line number program that has previously been run to completion. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct CompleteLineProgram::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - header: LineProgramHeader, -} - -impl CompleteLineProgram -where - R: Reader, - Offset: ReaderOffset, -{ - /// Retrieve the `LineProgramHeader` for this program. - pub fn header(&self) -> &LineProgramHeader { - &self.header - } - - /// Construct a new `LineRows` for executing the subset of the line - /// number program identified by 'sequence' and generating the line information - /// matrix. - /// - /// ``` - /// # fn foo() { - /// use gimli::{IncompleteLineProgram, EndianSlice, NativeEndian}; - /// - /// fn get_line_number_program<'a>() -> IncompleteLineProgram> { - /// // Get a line number program from some offset in a - /// // `.debug_line` section... - /// # unimplemented!() - /// } - /// - /// let program = get_line_number_program(); - /// let (program, sequences) = program.sequences().unwrap(); - /// for sequence in &sequences { - /// let mut sm = program.resume_from(sequence); - /// } - /// # } - /// ``` - pub fn resume_from<'program>( - &'program self, - sequence: &LineSequence, - ) -> ResumedLineRows<'program, R, Offset> { - ResumedLineRows::resume(self, sequence) - } -} - -/// An entry in the `LineProgramHeader`'s `file_names` set. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub struct FileEntry::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - path_name: AttributeValue, - directory_index: u64, - timestamp: u64, - size: u64, - md5: [u8; 16], -} - -impl FileEntry -where - R: Reader, - Offset: ReaderOffset, -{ - // version 2-4 - fn parse(input: &mut R, path_name: R) -> Result> { - let directory_index = input.read_uleb128()?; - let timestamp = input.read_uleb128()?; - let size = input.read_uleb128()?; - - let entry = FileEntry { - path_name: AttributeValue::String(path_name), - directory_index, - timestamp, - size, - md5: [0; 16], - }; - - Ok(entry) - } - - /// > A slice containing the full or relative path name of - /// > a source file. If the entry contains a file name or a relative path - /// > name, the file is located relative to either the compilation directory - /// > (as specified by the DW_AT_comp_dir attribute given in the compilation - /// > unit) or one of the directories in the include_directories section. - pub fn path_name(&self) -> AttributeValue { - self.path_name.clone() - } - - /// > An unsigned LEB128 number representing the directory index of the - /// > directory in which the file was found. - /// > - /// > ... - /// > - /// > The directory index represents an entry in the include_directories - /// > section of the line number program header. The index is 0 if the file - /// > was found in the current directory of the compilation, 1 if it was found - /// > in the first directory in the include_directories section, and so - /// > on. The directory index is ignored for file names that represent full - /// > path names. - pub fn directory_index(&self) -> u64 { - self.directory_index - } - - /// Get this file's directory. - /// - /// A directory index of 0 corresponds to the compilation unit directory. - pub fn directory(&self, header: &LineProgramHeader) -> Option> { - header.directory(self.directory_index) - } - - /// The implementation-defined time of last modification of the file, - /// or 0 if not available. - pub fn timestamp(&self) -> u64 { - self.timestamp - } - - /// "An unsigned LEB128 number representing the time of last modification of - /// the file, or 0 if not available." - // Terminology changed in DWARF version 5. - #[doc(hidden)] - pub fn last_modification(&self) -> u64 { - self.timestamp - } - - /// The size of the file in bytes, or 0 if not available. - pub fn size(&self) -> u64 { - self.size - } - - /// "An unsigned LEB128 number representing the length in bytes of the file, - /// or 0 if not available." - // Terminology changed in DWARF version 5. - #[doc(hidden)] - pub fn length(&self) -> u64 { - self.size - } - - /// A 16-byte MD5 digest of the file contents. - /// - /// Only valid if `LineProgramHeader::file_has_md5` returns `true`. - pub fn md5(&self) -> &[u8; 16] { - &self.md5 - } -} - -/// The format of a component of an include directory or file name entry. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub struct FileEntryFormat { - /// The type of information that is represented by the component. - pub content_type: constants::DwLnct, - - /// The encoding form of the component value. - pub form: constants::DwForm, -} - -impl FileEntryFormat { - fn parse(input: &mut R) -> Result> { - let format_count = input.read_u8()? as usize; - let mut format = Vec::with_capacity(format_count); - let mut path_count = 0; - for _ in 0..format_count { - let content_type = input.read_uleb128()?; - let content_type = if content_type > u64::from(u16::max_value()) { - constants::DwLnct(u16::max_value()) - } else { - constants::DwLnct(content_type as u16) - }; - if content_type == constants::DW_LNCT_path { - path_count += 1; - } - - let form = constants::DwForm(input.read_uleb128_u16()?); - - format.push(FileEntryFormat { content_type, form }); - } - if path_count != 1 { - return Err(Error::MissingFileEntryFormatPath); - } - Ok(format) - } -} - -fn parse_directory_v5( - input: &mut R, - encoding: Encoding, - formats: &[FileEntryFormat], -) -> Result> { - let mut path_name = None; - - for format in formats { - let value = parse_attribute(input, encoding, format.form)?; - if format.content_type == constants::DW_LNCT_path { - path_name = Some(value); - } - } - - Ok(path_name.unwrap()) -} - -fn parse_file_v5( - input: &mut R, - encoding: Encoding, - formats: &[FileEntryFormat], -) -> Result> { - let mut path_name = None; - let mut directory_index = 0; - let mut timestamp = 0; - let mut size = 0; - let mut md5 = [0; 16]; - - for format in formats { - let value = parse_attribute(input, encoding, format.form)?; - match format.content_type { - constants::DW_LNCT_path => path_name = Some(value), - constants::DW_LNCT_directory_index => { - if let Some(value) = value.udata_value() { - directory_index = value; - } - } - constants::DW_LNCT_timestamp => { - if let Some(value) = value.udata_value() { - timestamp = value; - } - } - constants::DW_LNCT_size => { - if let Some(value) = value.udata_value() { - size = value; - } - } - constants::DW_LNCT_MD5 => { - if let AttributeValue::Block(mut value) = value { - if value.len().into_u64() == 16 { - md5 = value.read_u8_array()?; - } - } - } - // Ignore unknown content types. - _ => {} - } - } - - Ok(FileEntry { - path_name: path_name.unwrap(), - directory_index, - timestamp, - size, - md5, - }) -} - -// TODO: this should be shared with unit::parse_attribute(), but that is hard to do. -fn parse_attribute( - input: &mut R, - encoding: Encoding, - form: constants::DwForm, -) -> Result> { - Ok(match form { - constants::DW_FORM_block1 => { - let len = input.read_u8().map(R::Offset::from_u8)?; - let block = input.split(len)?; - AttributeValue::Block(block) - } - constants::DW_FORM_block2 => { - let len = input.read_u16().map(R::Offset::from_u16)?; - let block = input.split(len)?; - AttributeValue::Block(block) - } - constants::DW_FORM_block4 => { - let len = input.read_u32().map(R::Offset::from_u32)?; - let block = input.split(len)?; - AttributeValue::Block(block) - } - constants::DW_FORM_block => { - let len = input.read_uleb128().and_then(R::Offset::from_u64)?; - let block = input.split(len)?; - AttributeValue::Block(block) - } - constants::DW_FORM_data1 => { - let data = input.read_u8()?; - AttributeValue::Data1(data) - } - constants::DW_FORM_data2 => { - let data = input.read_u16()?; - AttributeValue::Data2(data) - } - constants::DW_FORM_data4 => { - let data = input.read_u32()?; - AttributeValue::Data4(data) - } - constants::DW_FORM_data8 => { - let data = input.read_u64()?; - AttributeValue::Data8(data) - } - constants::DW_FORM_data16 => { - let block = input.split(R::Offset::from_u8(16))?; - AttributeValue::Block(block) - } - constants::DW_FORM_udata => { - let data = input.read_uleb128()?; - AttributeValue::Udata(data) - } - constants::DW_FORM_sdata => { - let data = input.read_sleb128()?; - AttributeValue::Sdata(data) - } - constants::DW_FORM_flag => { - let present = input.read_u8()?; - AttributeValue::Flag(present != 0) - } - constants::DW_FORM_sec_offset => { - let offset = input.read_offset(encoding.format)?; - AttributeValue::SecOffset(offset) - } - constants::DW_FORM_string => { - let string = input.read_null_terminated_slice()?; - AttributeValue::String(string) - } - constants::DW_FORM_strp => { - let offset = input.read_offset(encoding.format)?; - AttributeValue::DebugStrRef(DebugStrOffset(offset)) - } - constants::DW_FORM_strp_sup | constants::DW_FORM_GNU_strp_alt => { - let offset = input.read_offset(encoding.format)?; - AttributeValue::DebugStrRefSup(DebugStrOffset(offset)) - } - constants::DW_FORM_line_strp => { - let offset = input.read_offset(encoding.format)?; - AttributeValue::DebugLineStrRef(DebugLineStrOffset(offset)) - } - constants::DW_FORM_strx | constants::DW_FORM_GNU_str_index => { - let index = input.read_uleb128().and_then(R::Offset::from_u64)?; - AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(index)) - } - constants::DW_FORM_strx1 => { - let index = input.read_u8().map(R::Offset::from_u8)?; - AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(index)) - } - constants::DW_FORM_strx2 => { - let index = input.read_u16().map(R::Offset::from_u16)?; - AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(index)) - } - constants::DW_FORM_strx3 => { - let index = input.read_uint(3).and_then(R::Offset::from_u64)?; - AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(index)) - } - constants::DW_FORM_strx4 => { - let index = input.read_u32().map(R::Offset::from_u32)?; - AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(index)) - } - _ => { - return Err(Error::UnknownForm); - } - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::constants; - use crate::endianity::LittleEndian; - use crate::read::{EndianSlice, Error}; - use crate::test_util::GimliSectionMethods; - use core::u64; - use core::u8; - use test_assembler::{Endian, Label, LabelMaker, Section}; - - #[test] - fn test_parse_debug_line_32_ok() { - #[rustfmt::skip] - let buf = [ - // 32-bit length = 62. - 0x3e, 0x00, 0x00, 0x00, - // Version. - 0x04, 0x00, - // Header length = 40. - 0x28, 0x00, 0x00, 0x00, - // Minimum instruction length. - 0x01, - // Maximum operations per byte. - 0x01, - // Default is_stmt. - 0x01, - // Line base. - 0x00, - // Line range. - 0x01, - // Opcode base. - 0x03, - // Standard opcode lengths for opcodes 1 .. opcode base - 1. - 0x01, 0x02, - // Include directories = '/', 'i', 'n', 'c', '\0', '/', 'i', 'n', 'c', '2', '\0', '\0' - 0x2f, 0x69, 0x6e, 0x63, 0x00, 0x2f, 0x69, 0x6e, 0x63, 0x32, 0x00, 0x00, - // File names - // foo.rs - 0x66, 0x6f, 0x6f, 0x2e, 0x72, 0x73, 0x00, - 0x00, - 0x00, - 0x00, - // bar.h - 0x62, 0x61, 0x72, 0x2e, 0x68, 0x00, - 0x01, - 0x00, - 0x00, - // End file names. - 0x00, - - // Dummy line program data. - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - - // Dummy next line program. - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - ]; - - let rest = &mut EndianSlice::new(&buf, LittleEndian); - let comp_dir = EndianSlice::new(b"/comp_dir", LittleEndian); - let comp_name = EndianSlice::new(b"/comp_name", LittleEndian); - - let header = - LineProgramHeader::parse(rest, DebugLineOffset(0), 4, Some(comp_dir), Some(comp_name)) - .expect("should parse header ok"); - - assert_eq!( - *rest, - EndianSlice::new(&buf[buf.len() - 16..], LittleEndian) - ); - - assert_eq!(header.offset, DebugLineOffset(0)); - assert_eq!(header.version(), 4); - assert_eq!(header.minimum_instruction_length(), 1); - assert_eq!(header.maximum_operations_per_instruction(), 1); - assert_eq!(header.default_is_stmt(), true); - assert_eq!(header.line_base(), 0); - assert_eq!(header.line_range(), 1); - assert_eq!(header.opcode_base(), 3); - assert_eq!(header.directory(0), Some(AttributeValue::String(comp_dir))); - assert_eq!( - header.file(0).unwrap().path_name, - AttributeValue::String(comp_name) - ); - - let expected_lengths = [1, 2]; - assert_eq!(header.standard_opcode_lengths().slice(), &expected_lengths); - - let expected_include_directories = [ - AttributeValue::String(EndianSlice::new(b"/inc", LittleEndian)), - AttributeValue::String(EndianSlice::new(b"/inc2", LittleEndian)), - ]; - assert_eq!(header.include_directories(), &expected_include_directories); - - let expected_file_names = [ - FileEntry { - path_name: AttributeValue::String(EndianSlice::new(b"foo.rs", LittleEndian)), - directory_index: 0, - timestamp: 0, - size: 0, - md5: [0; 16], - }, - FileEntry { - path_name: AttributeValue::String(EndianSlice::new(b"bar.h", LittleEndian)), - directory_index: 1, - timestamp: 0, - size: 0, - md5: [0; 16], - }, - ]; - assert_eq!(&*header.file_names(), &expected_file_names); - } - - #[test] - fn test_parse_debug_line_header_length_too_short() { - #[rustfmt::skip] - let buf = [ - // 32-bit length = 62. - 0x3e, 0x00, 0x00, 0x00, - // Version. - 0x04, 0x00, - // Header length = 20. TOO SHORT!!! - 0x15, 0x00, 0x00, 0x00, - // Minimum instruction length. - 0x01, - // Maximum operations per byte. - 0x01, - // Default is_stmt. - 0x01, - // Line base. - 0x00, - // Line range. - 0x01, - // Opcode base. - 0x03, - // Standard opcode lengths for opcodes 1 .. opcode base - 1. - 0x01, 0x02, - // Include directories = '/', 'i', 'n', 'c', '\0', '/', 'i', 'n', 'c', '2', '\0', '\0' - 0x2f, 0x69, 0x6e, 0x63, 0x00, 0x2f, 0x69, 0x6e, 0x63, 0x32, 0x00, 0x00, - // File names - // foo.rs - 0x66, 0x6f, 0x6f, 0x2e, 0x72, 0x73, 0x00, - 0x00, - 0x00, - 0x00, - // bar.h - 0x62, 0x61, 0x72, 0x2e, 0x68, 0x00, - 0x01, - 0x00, - 0x00, - // End file names. - 0x00, - - // Dummy line program data. - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - - // Dummy next line program. - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - ]; - - let input = &mut EndianSlice::new(&buf, LittleEndian); - - match LineProgramHeader::parse(input, DebugLineOffset(0), 4, None, None) { - Err(Error::UnexpectedEof(_)) => return, - otherwise => panic!("Unexpected result: {:?}", otherwise), - } - } - - #[test] - fn test_parse_debug_line_unit_length_too_short() { - #[rustfmt::skip] - let buf = [ - // 32-bit length = 40. TOO SHORT!!! - 0x28, 0x00, 0x00, 0x00, - // Version. - 0x04, 0x00, - // Header length = 40. - 0x28, 0x00, 0x00, 0x00, - // Minimum instruction length. - 0x01, - // Maximum operations per byte. - 0x01, - // Default is_stmt. - 0x01, - // Line base. - 0x00, - // Line range. - 0x01, - // Opcode base. - 0x03, - // Standard opcode lengths for opcodes 1 .. opcode base - 1. - 0x01, 0x02, - // Include directories = '/', 'i', 'n', 'c', '\0', '/', 'i', 'n', 'c', '2', '\0', '\0' - 0x2f, 0x69, 0x6e, 0x63, 0x00, 0x2f, 0x69, 0x6e, 0x63, 0x32, 0x00, 0x00, - // File names - // foo.rs - 0x66, 0x6f, 0x6f, 0x2e, 0x72, 0x73, 0x00, - 0x00, - 0x00, - 0x00, - // bar.h - 0x62, 0x61, 0x72, 0x2e, 0x68, 0x00, - 0x01, - 0x00, - 0x00, - // End file names. - 0x00, - - // Dummy line program data. - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - - // Dummy next line program. - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - ]; - - let input = &mut EndianSlice::new(&buf, LittleEndian); - - match LineProgramHeader::parse(input, DebugLineOffset(0), 4, None, None) { - Err(Error::UnexpectedEof(_)) => return, - otherwise => panic!("Unexpected result: {:?}", otherwise), - } - } - - const OPCODE_BASE: u8 = 13; - const STANDARD_OPCODE_LENGTHS: &[u8] = &[0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1]; - - fn make_test_header( - buf: EndianSlice, - ) -> LineProgramHeader> { - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 8, - }; - let line_encoding = LineEncoding { - line_base: -3, - line_range: 12, - ..Default::default() - }; - LineProgramHeader { - encoding, - offset: DebugLineOffset(0), - unit_length: 1, - header_length: 1, - line_encoding, - opcode_base: OPCODE_BASE, - standard_opcode_lengths: EndianSlice::new(STANDARD_OPCODE_LENGTHS, LittleEndian), - file_names: vec![ - FileEntry { - path_name: AttributeValue::String(EndianSlice::new(b"foo.c", LittleEndian)), - directory_index: 0, - timestamp: 0, - size: 0, - md5: [0; 16], - }, - FileEntry { - path_name: AttributeValue::String(EndianSlice::new(b"bar.rs", LittleEndian)), - directory_index: 0, - timestamp: 0, - size: 0, - md5: [0; 16], - }, - ], - include_directories: vec![], - directory_entry_format: vec![], - file_name_entry_format: vec![], - program_buf: buf, - comp_dir: None, - comp_file: None, - } - } - - fn make_test_program( - buf: EndianSlice, - ) -> IncompleteLineProgram> { - IncompleteLineProgram { - header: make_test_header(buf), - } - } - - #[test] - fn test_parse_special_opcodes() { - for i in OPCODE_BASE..u8::MAX { - let input = [i, 0, 0, 0]; - let input = EndianSlice::new(&input, LittleEndian); - let header = make_test_header(input); - - let mut rest = input; - let opcode = - LineInstruction::parse(&header, &mut rest).expect("Should parse the opcode OK"); - - assert_eq!(*rest, *input.range_from(1..)); - assert_eq!(opcode, LineInstruction::Special(i)); - } - } - - #[test] - fn test_parse_standard_opcodes() { - fn test( - raw: constants::DwLns, - operands: Operands, - expected: LineInstruction>, - ) where - Operands: AsRef<[u8]>, - { - let mut input = Vec::new(); - input.push(raw.0); - input.extend_from_slice(operands.as_ref()); - - let expected_rest = [0, 1, 2, 3, 4]; - input.extend_from_slice(&expected_rest); - - let input = EndianSlice::new(&*input, LittleEndian); - let header = make_test_header(input); - - let mut rest = input; - let opcode = - LineInstruction::parse(&header, &mut rest).expect("Should parse the opcode OK"); - - assert_eq!(opcode, expected); - assert_eq!(*rest, expected_rest); - } - - test(constants::DW_LNS_copy, [], LineInstruction::Copy); - test( - constants::DW_LNS_advance_pc, - [42], - LineInstruction::AdvancePc(42), - ); - test( - constants::DW_LNS_advance_line, - [9], - LineInstruction::AdvanceLine(9), - ); - test(constants::DW_LNS_set_file, [7], LineInstruction::SetFile(7)); - test( - constants::DW_LNS_set_column, - [1], - LineInstruction::SetColumn(1), - ); - test( - constants::DW_LNS_negate_stmt, - [], - LineInstruction::NegateStatement, - ); - test( - constants::DW_LNS_set_basic_block, - [], - LineInstruction::SetBasicBlock, - ); - test( - constants::DW_LNS_const_add_pc, - [], - LineInstruction::ConstAddPc, - ); - test( - constants::DW_LNS_fixed_advance_pc, - [42, 0], - LineInstruction::FixedAddPc(42), - ); - test( - constants::DW_LNS_set_prologue_end, - [], - LineInstruction::SetPrologueEnd, - ); - test( - constants::DW_LNS_set_isa, - [57 + 0x80, 100], - LineInstruction::SetIsa(12857), - ); - } - - #[test] - fn test_parse_unknown_standard_opcode_no_args() { - let input = [OPCODE_BASE, 1, 2, 3]; - let input = EndianSlice::new(&input, LittleEndian); - let mut standard_opcode_lengths = Vec::new(); - let mut header = make_test_header(input); - standard_opcode_lengths.extend(header.standard_opcode_lengths.slice()); - standard_opcode_lengths.push(0); - header.opcode_base += 1; - header.standard_opcode_lengths = EndianSlice::new(&standard_opcode_lengths, LittleEndian); - - let mut rest = input; - let opcode = - LineInstruction::parse(&header, &mut rest).expect("Should parse the opcode OK"); - - assert_eq!( - opcode, - LineInstruction::UnknownStandard0(constants::DwLns(OPCODE_BASE)) - ); - assert_eq!(*rest, *input.range_from(1..)); - } - - #[test] - fn test_parse_unknown_standard_opcode_one_arg() { - let input = [OPCODE_BASE, 1, 2, 3]; - let input = EndianSlice::new(&input, LittleEndian); - let mut standard_opcode_lengths = Vec::new(); - let mut header = make_test_header(input); - standard_opcode_lengths.extend(header.standard_opcode_lengths.slice()); - standard_opcode_lengths.push(1); - header.opcode_base += 1; - header.standard_opcode_lengths = EndianSlice::new(&standard_opcode_lengths, LittleEndian); - - let mut rest = input; - let opcode = - LineInstruction::parse(&header, &mut rest).expect("Should parse the opcode OK"); - - assert_eq!( - opcode, - LineInstruction::UnknownStandard1(constants::DwLns(OPCODE_BASE), 1) - ); - assert_eq!(*rest, *input.range_from(2..)); - } - - #[test] - fn test_parse_unknown_standard_opcode_many_args() { - let input = [OPCODE_BASE, 1, 2, 3]; - let input = EndianSlice::new(&input, LittleEndian); - let args = input.range_from(1..); - let mut standard_opcode_lengths = Vec::new(); - let mut header = make_test_header(input); - standard_opcode_lengths.extend(header.standard_opcode_lengths.slice()); - standard_opcode_lengths.push(3); - header.opcode_base += 1; - header.standard_opcode_lengths = EndianSlice::new(&standard_opcode_lengths, LittleEndian); - - let mut rest = input; - let opcode = - LineInstruction::parse(&header, &mut rest).expect("Should parse the opcode OK"); - - assert_eq!( - opcode, - LineInstruction::UnknownStandardN(constants::DwLns(OPCODE_BASE), args) - ); - assert_eq!(*rest, []); - } - - #[test] - fn test_parse_extended_opcodes() { - fn test( - raw: constants::DwLne, - operands: Operands, - expected: LineInstruction>, - ) where - Operands: AsRef<[u8]>, - { - let mut input = Vec::new(); - input.push(0); - - let operands = operands.as_ref(); - input.push(1 + operands.len() as u8); - - input.push(raw.0); - input.extend_from_slice(operands); - - let expected_rest = [0, 1, 2, 3, 4]; - input.extend_from_slice(&expected_rest); - - let input = EndianSlice::new(&input, LittleEndian); - let header = make_test_header(input); - - let mut rest = input; - let opcode = - LineInstruction::parse(&header, &mut rest).expect("Should parse the opcode OK"); - - assert_eq!(opcode, expected); - assert_eq!(*rest, expected_rest); - } - - test( - constants::DW_LNE_end_sequence, - [], - LineInstruction::EndSequence, - ); - test( - constants::DW_LNE_set_address, - [1, 2, 3, 4, 5, 6, 7, 8], - LineInstruction::SetAddress(578_437_695_752_307_201), - ); - test( - constants::DW_LNE_set_discriminator, - [42], - LineInstruction::SetDiscriminator(42), - ); - - let mut file = Vec::new(); - // "foo.c" - let path_name = [b'f', b'o', b'o', b'.', b'c', 0]; - file.extend_from_slice(&path_name); - // Directory index. - file.push(0); - // Last modification of file. - file.push(1); - // Size of file. - file.push(2); - - test( - constants::DW_LNE_define_file, - file, - LineInstruction::DefineFile(FileEntry { - path_name: AttributeValue::String(EndianSlice::new(b"foo.c", LittleEndian)), - directory_index: 0, - timestamp: 1, - size: 2, - md5: [0; 16], - }), - ); - - // Unknown extended opcode. - let operands = [1, 2, 3, 4, 5, 6]; - let opcode = constants::DwLne(99); - test( - opcode, - operands, - LineInstruction::UnknownExtended(opcode, EndianSlice::new(&operands, LittleEndian)), - ); - } - - #[test] - fn test_file_entry_directory() { - let path_name = [b'f', b'o', b'o', b'.', b'r', b's', 0]; - - let mut file = FileEntry { - path_name: AttributeValue::String(EndianSlice::new(&path_name, LittleEndian)), - directory_index: 1, - timestamp: 0, - size: 0, - md5: [0; 16], - }; - - let mut header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let dir = AttributeValue::String(EndianSlice::new(b"dir", LittleEndian)); - header.include_directories.push(dir); - - assert_eq!(file.directory(&header), Some(dir)); - - // Now test the compilation's current directory. - file.directory_index = 0; - assert_eq!(file.directory(&header), None); - } - - fn assert_exec_opcode<'input>( - header: LineProgramHeader>, - mut registers: LineRow, - opcode: LineInstruction>, - expected_registers: LineRow, - expect_new_row: bool, - ) { - let mut program = IncompleteLineProgram { header }; - let is_new_row = registers.execute(opcode, &mut program); - - assert_eq!(is_new_row, expect_new_row); - assert_eq!(registers, expected_registers); - } - - #[test] - fn test_exec_special_noop() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::Special(16); - let expected_registers = initial_registers; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, true); - } - - #[test] - fn test_exec_special_negative_line_advance() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let mut initial_registers = LineRow::new(&header); - initial_registers.line.0 = 10; - - let opcode = LineInstruction::Special(13); - - let mut expected_registers = initial_registers; - expected_registers.line.0 -= 3; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, true); - } - - #[test] - fn test_exec_special_positive_line_advance() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let initial_registers = LineRow::new(&header); - - let opcode = LineInstruction::Special(19); - - let mut expected_registers = initial_registers; - expected_registers.line.0 += 3; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, true); - } - - #[test] - fn test_exec_special_positive_address_advance() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let initial_registers = LineRow::new(&header); - - let opcode = LineInstruction::Special(52); - - let mut expected_registers = initial_registers; - expected_registers.address.0 += 3; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, true); - } - - #[test] - fn test_exec_special_positive_address_and_line_advance() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let initial_registers = LineRow::new(&header); - - let opcode = LineInstruction::Special(55); - - let mut expected_registers = initial_registers; - expected_registers.address.0 += 3; - expected_registers.line.0 += 3; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, true); - } - - #[test] - fn test_exec_special_positive_address_and_negative_line_advance() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let mut initial_registers = LineRow::new(&header); - initial_registers.line.0 = 10; - - let opcode = LineInstruction::Special(49); - - let mut expected_registers = initial_registers; - expected_registers.address.0 += 3; - expected_registers.line.0 -= 3; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, true); - } - - #[test] - fn test_exec_special_line_underflow() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let mut initial_registers = LineRow::new(&header); - initial_registers.line.0 = 2; - - // -3 line advance. - let opcode = LineInstruction::Special(13); - - let mut expected_registers = initial_registers; - // Clamp at 0. No idea if this is the best way to handle this situation - // or not... - expected_registers.line.0 = 0; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, true); - } - - #[test] - fn test_exec_copy() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let mut initial_registers = LineRow::new(&header); - initial_registers.address.0 = 1337; - initial_registers.line.0 = 42; - - let opcode = LineInstruction::Copy; - - let expected_registers = initial_registers; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, true); - } - - #[test] - fn test_exec_advance_pc() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::AdvancePc(42); - - let mut expected_registers = initial_registers; - expected_registers.address.0 += 42; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_advance_pc_overflow() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let opcode = LineInstruction::AdvancePc(42); - - let mut initial_registers = LineRow::new(&header); - initial_registers.address.0 = u64::MAX; - - let mut expected_registers = initial_registers; - expected_registers.address.0 = 41; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_advance_line() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::AdvanceLine(42); - - let mut expected_registers = initial_registers; - expected_registers.line.0 += 42; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_advance_line_overflow() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let opcode = LineInstruction::AdvanceLine(42); - - let mut initial_registers = LineRow::new(&header); - initial_registers.line.0 = u64::MAX; - - let mut expected_registers = initial_registers; - expected_registers.line.0 = 41; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_set_file_in_bounds() { - for file_idx in 1..3 { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::SetFile(file_idx); - - let mut expected_registers = initial_registers; - expected_registers.file = file_idx; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - } - - #[test] - fn test_exec_set_file_out_of_bounds() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::SetFile(100); - - // The spec doesn't say anything about rejecting input programs - // that set the file register out of bounds of the actual number - // of files that have been defined. Instead, we cross our - // fingers and hope that one gets defined before - // `LineRow::file` gets called and handle the error at - // that time if need be. - let mut expected_registers = initial_registers; - expected_registers.file = 100; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_file_entry_file_index_out_of_bounds() { - // These indices are 1-based, so 0 is invalid. 100 is way more than the - // number of files defined in the header. - let out_of_bounds_indices = [0, 100]; - - for file_idx in &out_of_bounds_indices[..] { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let mut row = LineRow::new(&header); - - row.file = *file_idx; - - assert_eq!(row.file(&header), None); - } - } - - #[test] - fn test_file_entry_file_index_in_bounds() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let mut row = LineRow::new(&header); - - row.file = 2; - - assert_eq!(row.file(&header), Some(&header.file_names()[1])); - } - - #[test] - fn test_exec_set_column() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::SetColumn(42); - - let mut expected_registers = initial_registers; - expected_registers.column = 42; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_negate_statement() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::NegateStatement; - - let mut expected_registers = initial_registers; - expected_registers.is_stmt = !initial_registers.is_stmt; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_set_basic_block() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let mut initial_registers = LineRow::new(&header); - initial_registers.basic_block = false; - - let opcode = LineInstruction::SetBasicBlock; - - let mut expected_registers = initial_registers; - expected_registers.basic_block = true; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_const_add_pc() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::ConstAddPc; - - let mut expected_registers = initial_registers; - expected_registers.address.0 += 20; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_fixed_add_pc() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let mut initial_registers = LineRow::new(&header); - initial_registers.op_index.0 = 1; - - let opcode = LineInstruction::FixedAddPc(10); - - let mut expected_registers = initial_registers; - expected_registers.address.0 += 10; - expected_registers.op_index.0 = 0; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_set_prologue_end() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - - let mut initial_registers = LineRow::new(&header); - initial_registers.prologue_end = false; - - let opcode = LineInstruction::SetPrologueEnd; - - let mut expected_registers = initial_registers; - expected_registers.prologue_end = true; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_set_isa() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::SetIsa(1993); - - let mut expected_registers = initial_registers; - expected_registers.isa = 1993; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_unknown_standard_0() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::UnknownStandard0(constants::DwLns(111)); - let expected_registers = initial_registers; - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_unknown_standard_1() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::UnknownStandard1(constants::DwLns(111), 2); - let expected_registers = initial_registers; - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_unknown_standard_n() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::UnknownStandardN( - constants::DwLns(111), - EndianSlice::new(&[2, 2, 2], LittleEndian), - ); - let expected_registers = initial_registers; - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_end_sequence() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::EndSequence; - - let mut expected_registers = initial_registers; - expected_registers.end_sequence = true; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, true); - } - - #[test] - fn test_exec_set_address() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::SetAddress(3030); - - let mut expected_registers = initial_registers; - expected_registers.address.0 = 3030; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_set_address_tombstone() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::SetAddress(!0); - - let mut expected_registers = initial_registers; - expected_registers.tombstone = true; - expected_registers.address.0 = !0; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_define_file() { - let mut program = make_test_program(EndianSlice::new(&[], LittleEndian)); - let mut row = LineRow::new(program.header()); - - let file = FileEntry { - path_name: AttributeValue::String(EndianSlice::new(b"test.cpp", LittleEndian)), - directory_index: 0, - timestamp: 0, - size: 0, - md5: [0; 16], - }; - - let opcode = LineInstruction::DefineFile(file); - let is_new_row = row.execute(opcode, &mut program); - - assert_eq!(is_new_row, false); - assert_eq!(Some(&file), program.header().file_names.last()); - } - - #[test] - fn test_exec_set_discriminator() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::SetDiscriminator(9); - - let mut expected_registers = initial_registers; - expected_registers.discriminator = 9; - - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - #[test] - fn test_exec_unknown_extended() { - let header = make_test_header(EndianSlice::new(&[], LittleEndian)); - let initial_registers = LineRow::new(&header); - let opcode = LineInstruction::UnknownExtended( - constants::DwLne(74), - EndianSlice::new(&[], LittleEndian), - ); - let expected_registers = initial_registers; - assert_exec_opcode(header, initial_registers, opcode, expected_registers, false); - } - - /// Ensure that `LineRows` is covariant wrt R. - /// This only needs to compile. - #[allow(dead_code, unreachable_code, unused_variables)] - fn test_line_rows_variance<'a, 'b>(_: &'a [u8], _: &'b [u8]) - where - 'a: 'b, - { - let a: &OneShotLineRows> = unimplemented!(); - let _: &OneShotLineRows> = a; - } - - #[test] - fn test_parse_debug_line_v5_ok() { - let expected_lengths = &[1, 2]; - let expected_program = &[0, 1, 2, 3, 4]; - let expected_rest = &[5, 6, 7, 8, 9]; - let expected_include_directories = [ - AttributeValue::String(EndianSlice::new(b"dir1", LittleEndian)), - AttributeValue::String(EndianSlice::new(b"dir2", LittleEndian)), - ]; - let expected_file_names = [ - FileEntry { - path_name: AttributeValue::String(EndianSlice::new(b"file1", LittleEndian)), - directory_index: 0, - timestamp: 0, - size: 0, - md5: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - }, - FileEntry { - path_name: AttributeValue::String(EndianSlice::new(b"file2", LittleEndian)), - directory_index: 1, - timestamp: 0, - size: 0, - md5: [ - 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, - ], - }, - ]; - - for format in vec![Format::Dwarf32, Format::Dwarf64] { - let length = Label::new(); - let header_length = Label::new(); - let start = Label::new(); - let header_start = Label::new(); - let end = Label::new(); - let header_end = Label::new(); - let section = Section::with_endian(Endian::Little) - .initial_length(format, &length, &start) - .D16(5) - // Address size. - .D8(4) - // Segment selector size. - .D8(0) - .word_label(format.word_size(), &header_length) - .mark(&header_start) - // Minimum instruction length. - .D8(1) - // Maximum operations per byte. - .D8(1) - // Default is_stmt. - .D8(1) - // Line base. - .D8(0) - // Line range. - .D8(1) - // Opcode base. - .D8(expected_lengths.len() as u8 + 1) - // Standard opcode lengths for opcodes 1 .. opcode base - 1. - .append_bytes(expected_lengths) - // Directory entry format count. - .D8(1) - .uleb(constants::DW_LNCT_path.0 as u64) - .uleb(constants::DW_FORM_string.0 as u64) - // Directory count. - .D8(2) - .append_bytes(b"dir1\0") - .append_bytes(b"dir2\0") - // File entry format count. - .D8(3) - .uleb(constants::DW_LNCT_path.0 as u64) - .uleb(constants::DW_FORM_string.0 as u64) - .uleb(constants::DW_LNCT_directory_index.0 as u64) - .uleb(constants::DW_FORM_data1.0 as u64) - .uleb(constants::DW_LNCT_MD5.0 as u64) - .uleb(constants::DW_FORM_data16.0 as u64) - // File count. - .D8(2) - .append_bytes(b"file1\0") - .D8(0) - .append_bytes(&expected_file_names[0].md5) - .append_bytes(b"file2\0") - .D8(1) - .append_bytes(&expected_file_names[1].md5) - .mark(&header_end) - // Dummy line program data. - .append_bytes(expected_program) - .mark(&end) - // Dummy trailing data. - .append_bytes(expected_rest); - length.set_const((&end - &start) as u64); - header_length.set_const((&header_end - &header_start) as u64); - let section = section.get_contents().unwrap(); - - let input = &mut EndianSlice::new(§ion, LittleEndian); - - let header = LineProgramHeader::parse(input, DebugLineOffset(0), 0, None, None) - .expect("should parse header ok"); - - assert_eq!(header.raw_program_buf().slice(), expected_program); - assert_eq!(input.slice(), expected_rest); - - assert_eq!(header.offset, DebugLineOffset(0)); - assert_eq!(header.version(), 5); - assert_eq!(header.address_size(), 4); - assert_eq!(header.minimum_instruction_length(), 1); - assert_eq!(header.maximum_operations_per_instruction(), 1); - assert_eq!(header.default_is_stmt(), true); - assert_eq!(header.line_base(), 0); - assert_eq!(header.line_range(), 1); - assert_eq!(header.opcode_base(), expected_lengths.len() as u8 + 1); - assert_eq!(header.standard_opcode_lengths().slice(), expected_lengths); - assert_eq!( - header.directory_entry_format(), - &[FileEntryFormat { - content_type: constants::DW_LNCT_path, - form: constants::DW_FORM_string, - }] - ); - assert_eq!(header.include_directories(), expected_include_directories); - assert_eq!(header.directory(0), Some(expected_include_directories[0])); - assert_eq!( - header.file_name_entry_format(), - &[ - FileEntryFormat { - content_type: constants::DW_LNCT_path, - form: constants::DW_FORM_string, - }, - FileEntryFormat { - content_type: constants::DW_LNCT_directory_index, - form: constants::DW_FORM_data1, - }, - FileEntryFormat { - content_type: constants::DW_LNCT_MD5, - form: constants::DW_FORM_data16, - } - ] - ); - assert_eq!(header.file_names(), expected_file_names); - assert_eq!(header.file(0), Some(&expected_file_names[0])); - } - } - - #[test] - fn test_sequences() { - #[rustfmt::skip] - let buf = [ - // 32-bit length - 94, 0x00, 0x00, 0x00, - // Version. - 0x04, 0x00, - // Header length = 40. - 0x28, 0x00, 0x00, 0x00, - // Minimum instruction length. - 0x01, - // Maximum operations per byte. - 0x01, - // Default is_stmt. - 0x01, - // Line base. - 0x00, - // Line range. - 0x01, - // Opcode base. - 0x03, - // Standard opcode lengths for opcodes 1 .. opcode base - 1. - 0x01, 0x02, - // Include directories = '/', 'i', 'n', 'c', '\0', '/', 'i', 'n', 'c', '2', '\0', '\0' - 0x2f, 0x69, 0x6e, 0x63, 0x00, 0x2f, 0x69, 0x6e, 0x63, 0x32, 0x00, 0x00, - // File names - // foo.rs - 0x66, 0x6f, 0x6f, 0x2e, 0x72, 0x73, 0x00, - 0x00, - 0x00, - 0x00, - // bar.h - 0x62, 0x61, 0x72, 0x2e, 0x68, 0x00, - 0x01, - 0x00, - 0x00, - // End file names. - 0x00, - - 0, 5, constants::DW_LNE_set_address.0, 1, 0, 0, 0, - constants::DW_LNS_copy.0, - constants::DW_LNS_advance_pc.0, 1, - constants::DW_LNS_copy.0, - constants::DW_LNS_advance_pc.0, 2, - 0, 1, constants::DW_LNE_end_sequence.0, - - // Tombstone - 0, 5, constants::DW_LNE_set_address.0, 0xff, 0xff, 0xff, 0xff, - constants::DW_LNS_copy.0, - constants::DW_LNS_advance_pc.0, 1, - constants::DW_LNS_copy.0, - constants::DW_LNS_advance_pc.0, 2, - 0, 1, constants::DW_LNE_end_sequence.0, - - 0, 5, constants::DW_LNE_set_address.0, 11, 0, 0, 0, - constants::DW_LNS_copy.0, - constants::DW_LNS_advance_pc.0, 1, - constants::DW_LNS_copy.0, - constants::DW_LNS_advance_pc.0, 2, - 0, 1, constants::DW_LNE_end_sequence.0, - ]; - assert_eq!(buf[0] as usize, buf.len() - 4); - - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - let header = LineProgramHeader::parse(rest, DebugLineOffset(0), 4, None, None) - .expect("should parse header ok"); - let program = IncompleteLineProgram { header }; - - let sequences = program.sequences().unwrap().1; - assert_eq!(sequences.len(), 2); - assert_eq!(sequences[0].start, 1); - assert_eq!(sequences[0].end, 4); - assert_eq!(sequences[1].start, 11); - assert_eq!(sequences[1].end, 14); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/lists.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/lists.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/lists.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/lists.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,68 +0,0 @@ -use crate::common::{Encoding, Format}; -use crate::read::{Error, Reader, Result}; - -#[derive(Debug, Clone, Copy)] -pub(crate) struct ListsHeader { - encoding: Encoding, - #[allow(dead_code)] - offset_entry_count: u32, -} - -impl Default for ListsHeader { - fn default() -> Self { - ListsHeader { - encoding: Encoding { - format: Format::Dwarf32, - version: 5, - address_size: 0, - }, - offset_entry_count: 0, - } - } -} - -impl ListsHeader { - /// Return the serialized size of the table header. - #[allow(dead_code)] - #[inline] - fn size(self) -> u8 { - // initial_length + version + address_size + segment_selector_size + offset_entry_count - ListsHeader::size_for_encoding(self.encoding) - } - - /// Return the serialized size of the table header. - #[inline] - pub(crate) fn size_for_encoding(encoding: Encoding) -> u8 { - // initial_length + version + address_size + segment_selector_size + offset_entry_count - encoding.format.initial_length_size() + 2 + 1 + 1 + 4 - } -} - -// TODO: add an iterator over headers in the appropriate sections section -#[allow(dead_code)] -fn parse_header(input: &mut R) -> Result { - let (length, format) = input.read_initial_length()?; - input.truncate(length)?; - - let version = input.read_u16()?; - if version != 5 { - return Err(Error::UnknownVersion(u64::from(version))); - } - - let address_size = input.read_u8()?; - let segment_selector_size = input.read_u8()?; - if segment_selector_size != 0 { - return Err(Error::UnsupportedSegmentSize); - } - let offset_entry_count = input.read_u32()?; - - let encoding = Encoding { - format, - version, - address_size, - }; - Ok(ListsHeader { - encoding, - offset_entry_count, - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/loclists.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/loclists.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/loclists.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/loclists.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1627 +0,0 @@ -use crate::common::{ - DebugAddrBase, DebugAddrIndex, DebugLocListsBase, DebugLocListsIndex, DwarfFileType, Encoding, - LocationListsOffset, SectionId, -}; -use crate::constants; -use crate::endianity::Endianity; -use crate::read::{ - lists::ListsHeader, DebugAddr, EndianSlice, Error, Expression, Range, RawRange, Reader, - ReaderOffset, ReaderOffsetId, Result, Section, -}; - -/// The raw contents of the `.debug_loc` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugLoc { - pub(crate) section: R, -} - -impl<'input, Endian> DebugLoc> -where - Endian: Endianity, -{ - /// Construct a new `DebugLoc` instance from the data in the `.debug_loc` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_loc` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugLoc, LittleEndian}; - /// - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_debug_loc_section_somehow = || &buf; - /// let debug_loc = DebugLoc::new(read_debug_loc_section_somehow(), LittleEndian); - /// ``` - pub fn new(section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(section, endian)) - } -} - -impl Section for DebugLoc { - fn id() -> SectionId { - SectionId::DebugLoc - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for DebugLoc { - fn from(section: R) -> Self { - DebugLoc { section } - } -} - -/// The `DebugLocLists` struct represents the DWARF data -/// found in the `.debug_loclists` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugLocLists { - section: R, -} - -impl<'input, Endian> DebugLocLists> -where - Endian: Endianity, -{ - /// Construct a new `DebugLocLists` instance from the data in the `.debug_loclists` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_loclists` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugLocLists, LittleEndian}; - /// - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_debug_loclists_section_somehow = || &buf; - /// let debug_loclists = DebugLocLists::new(read_debug_loclists_section_somehow(), LittleEndian); - /// ``` - pub fn new(section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(section, endian)) - } -} - -impl Section for DebugLocLists { - fn id() -> SectionId { - SectionId::DebugLocLists - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for DebugLocLists { - fn from(section: R) -> Self { - DebugLocLists { section } - } -} - -pub(crate) type LocListsHeader = ListsHeader; - -impl DebugLocListsBase -where - Offset: ReaderOffset, -{ - /// Returns a `DebugLocListsBase` with the default value of DW_AT_loclists_base - /// for the given `Encoding` and `DwarfFileType`. - pub fn default_for_encoding_and_file( - encoding: Encoding, - file_type: DwarfFileType, - ) -> DebugLocListsBase { - if encoding.version >= 5 && file_type == DwarfFileType::Dwo { - // In .dwo files, the compiler omits the DW_AT_loclists_base attribute (because there is - // only a single unit in the file) but we must skip past the header, which the attribute - // would normally do for us. - DebugLocListsBase(Offset::from_u8(LocListsHeader::size_for_encoding(encoding))) - } else { - DebugLocListsBase(Offset::from_u8(0)) - } - } -} - -/// The DWARF data found in `.debug_loc` and `.debug_loclists` sections. -#[derive(Debug, Default, Clone, Copy)] -pub struct LocationLists { - debug_loc: DebugLoc, - debug_loclists: DebugLocLists, -} - -impl LocationLists { - /// Construct a new `LocationLists` instance from the data in the `.debug_loc` and - /// `.debug_loclists` sections. - pub fn new(debug_loc: DebugLoc, debug_loclists: DebugLocLists) -> LocationLists { - LocationLists { - debug_loc, - debug_loclists, - } - } -} - -impl LocationLists { - /// Create a `LocationLists` that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// ```rust,no_run - /// # let load_section = || unimplemented!(); - /// // Read the DWARF section into a `Vec` with whatever object loader you're using. - /// let owned_section: gimli::LocationLists> = load_section(); - /// // Create a reference to the DWARF section. - /// let section = owned_section.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> LocationLists - where - F: FnMut(&'a T) -> R, - { - LocationLists { - debug_loc: borrow(&self.debug_loc.section).into(), - debug_loclists: borrow(&self.debug_loclists.section).into(), - } - } -} - -impl LocationLists { - /// Iterate over the `LocationListEntry`s starting at the given offset. - /// - /// The `unit_encoding` must match the compilation unit that the - /// offset was contained in. - /// - /// The `base_address` should be obtained from the `DW_AT_low_pc` attribute in the - /// `DW_TAG_compile_unit` entry for the compilation unit that contains this location - /// list. - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - pub fn locations( - &self, - offset: LocationListsOffset, - unit_encoding: Encoding, - base_address: u64, - debug_addr: &DebugAddr, - debug_addr_base: DebugAddrBase, - ) -> Result> { - Ok(LocListIter::new( - self.raw_locations(offset, unit_encoding)?, - base_address, - debug_addr.clone(), - debug_addr_base, - )) - } - - /// Similar to `locations`, but with special handling for .dwo files. - /// This should only been used when this `LocationLists` was loaded from a - /// .dwo file. - pub fn locations_dwo( - &self, - offset: LocationListsOffset, - unit_encoding: Encoding, - base_address: u64, - debug_addr: &DebugAddr, - debug_addr_base: DebugAddrBase, - ) -> Result> { - Ok(LocListIter::new( - self.raw_locations_dwo(offset, unit_encoding)?, - base_address, - debug_addr.clone(), - debug_addr_base, - )) - } - - /// Iterate over the raw `LocationListEntry`s starting at the given offset. - /// - /// The `unit_encoding` must match the compilation unit that the - /// offset was contained in. - /// - /// This iterator does not perform any processing of the location entries, - /// such as handling base addresses. - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - pub fn raw_locations( - &self, - offset: LocationListsOffset, - unit_encoding: Encoding, - ) -> Result> { - let (mut input, format) = if unit_encoding.version <= 4 { - (self.debug_loc.section.clone(), LocListsFormat::Bare) - } else { - (self.debug_loclists.section.clone(), LocListsFormat::Lle) - }; - input.skip(offset.0)?; - Ok(RawLocListIter::new(input, unit_encoding, format)) - } - - /// Similar to `raw_locations`, but with special handling for .dwo files. - /// This should only been used when this `LocationLists` was loaded from a - /// .dwo file. - pub fn raw_locations_dwo( - &self, - offset: LocationListsOffset, - unit_encoding: Encoding, - ) -> Result> { - let mut input = if unit_encoding.version <= 4 { - // In the GNU split dwarf extension the locations are present in the - // .debug_loc section but are encoded with the DW_LLE values used - // for the DWARF 5 .debug_loclists section. - self.debug_loc.section.clone() - } else { - self.debug_loclists.section.clone() - }; - input.skip(offset.0)?; - Ok(RawLocListIter::new( - input, - unit_encoding, - LocListsFormat::Lle, - )) - } - - /// Returns the `.debug_loclists` offset at the given `base` and `index`. - /// - /// The `base` must be the `DW_AT_loclists_base` value from the compilation unit DIE. - /// This is an offset that points to the first entry following the header. - /// - /// The `index` is the value of a `DW_FORM_loclistx` attribute. - pub fn get_offset( - &self, - unit_encoding: Encoding, - base: DebugLocListsBase, - index: DebugLocListsIndex, - ) -> Result> { - let format = unit_encoding.format; - let input = &mut self.debug_loclists.section.clone(); - input.skip(base.0)?; - input.skip(R::Offset::from_u64( - index.0.into_u64() * u64::from(format.word_size()), - )?)?; - input - .read_offset(format) - .map(|x| LocationListsOffset(base.0 + x)) - } - - /// Call `Reader::lookup_offset_id` for each section, and return the first match. - pub fn lookup_offset_id(&self, id: ReaderOffsetId) -> Option<(SectionId, R::Offset)> { - self.debug_loc - .lookup_offset_id(id) - .or_else(|| self.debug_loclists.lookup_offset_id(id)) - } -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum LocListsFormat { - /// The bare location list format used before DWARF 5. - Bare, - /// The DW_LLE encoded range list format used in DWARF 5 and the non-standard GNU - /// split dwarf extension. - Lle, -} - -/// A raw iterator over a location list. -/// -/// This iterator does not perform any processing of the location entries, -/// such as handling base addresses. -#[derive(Debug)] -pub struct RawLocListIter { - input: R, - encoding: Encoding, - format: LocListsFormat, -} - -/// A raw entry in .debug_loclists. -#[derive(Clone, Debug)] -pub enum RawLocListEntry { - /// A location from DWARF version <= 4. - AddressOrOffsetPair { - /// Start of range. May be an address or an offset. - begin: u64, - /// End of range. May be an address or an offset. - end: u64, - /// expression - data: Expression, - }, - /// DW_LLE_base_address - BaseAddress { - /// base address - addr: u64, - }, - /// DW_LLE_base_addressx - BaseAddressx { - /// base address - addr: DebugAddrIndex, - }, - /// DW_LLE_startx_endx - StartxEndx { - /// start of range - begin: DebugAddrIndex, - /// end of range - end: DebugAddrIndex, - /// expression - data: Expression, - }, - /// DW_LLE_startx_length - StartxLength { - /// start of range - begin: DebugAddrIndex, - /// length of range - length: u64, - /// expression - data: Expression, - }, - /// DW_LLE_offset_pair - OffsetPair { - /// start of range - begin: u64, - /// end of range - end: u64, - /// expression - data: Expression, - }, - /// DW_LLE_default_location - DefaultLocation { - /// expression - data: Expression, - }, - /// DW_LLE_start_end - StartEnd { - /// start of range - begin: u64, - /// end of range - end: u64, - /// expression - data: Expression, - }, - /// DW_LLE_start_length - StartLength { - /// start of range - begin: u64, - /// length of range - length: u64, - /// expression - data: Expression, - }, -} - -fn parse_data(input: &mut R, encoding: Encoding) -> Result> { - if encoding.version >= 5 { - let len = R::Offset::from_u64(input.read_uleb128()?)?; - Ok(Expression(input.split(len)?)) - } else { - // In the GNU split-dwarf extension this is a fixed 2 byte value. - let len = R::Offset::from_u16(input.read_u16()?); - Ok(Expression(input.split(len)?)) - } -} - -impl RawLocListEntry { - /// Parse a location list entry from `.debug_loclists` - fn parse(input: &mut R, encoding: Encoding, format: LocListsFormat) -> Result> { - Ok(match format { - LocListsFormat::Bare => { - let range = RawRange::parse(input, encoding.address_size)?; - if range.is_end() { - None - } else if range.is_base_address(encoding.address_size) { - Some(RawLocListEntry::BaseAddress { addr: range.end }) - } else { - let len = R::Offset::from_u16(input.read_u16()?); - let data = Expression(input.split(len)?); - Some(RawLocListEntry::AddressOrOffsetPair { - begin: range.begin, - end: range.end, - data, - }) - } - } - LocListsFormat::Lle => match constants::DwLle(input.read_u8()?) { - constants::DW_LLE_end_of_list => None, - constants::DW_LLE_base_addressx => Some(RawLocListEntry::BaseAddressx { - addr: DebugAddrIndex(input.read_uleb128().and_then(R::Offset::from_u64)?), - }), - constants::DW_LLE_startx_endx => Some(RawLocListEntry::StartxEndx { - begin: DebugAddrIndex(input.read_uleb128().and_then(R::Offset::from_u64)?), - end: DebugAddrIndex(input.read_uleb128().and_then(R::Offset::from_u64)?), - data: parse_data(input, encoding)?, - }), - constants::DW_LLE_startx_length => Some(RawLocListEntry::StartxLength { - begin: DebugAddrIndex(input.read_uleb128().and_then(R::Offset::from_u64)?), - length: if encoding.version >= 5 { - input.read_uleb128()? - } else { - // In the GNU split-dwarf extension this is a fixed 4 byte value. - input.read_u32()? as u64 - }, - data: parse_data(input, encoding)?, - }), - constants::DW_LLE_offset_pair => Some(RawLocListEntry::OffsetPair { - begin: input.read_uleb128()?, - end: input.read_uleb128()?, - data: parse_data(input, encoding)?, - }), - constants::DW_LLE_default_location => Some(RawLocListEntry::DefaultLocation { - data: parse_data(input, encoding)?, - }), - constants::DW_LLE_base_address => Some(RawLocListEntry::BaseAddress { - addr: input.read_address(encoding.address_size)?, - }), - constants::DW_LLE_start_end => Some(RawLocListEntry::StartEnd { - begin: input.read_address(encoding.address_size)?, - end: input.read_address(encoding.address_size)?, - data: parse_data(input, encoding)?, - }), - constants::DW_LLE_start_length => Some(RawLocListEntry::StartLength { - begin: input.read_address(encoding.address_size)?, - length: input.read_uleb128()?, - data: parse_data(input, encoding)?, - }), - _ => { - return Err(Error::InvalidAddressRange); - } - }, - }) - } -} - -impl RawLocListIter { - /// Construct a `RawLocListIter`. - fn new(input: R, encoding: Encoding, format: LocListsFormat) -> RawLocListIter { - RawLocListIter { - input, - encoding, - format, - } - } - - /// Advance the iterator to the next location. - pub fn next(&mut self) -> Result>> { - if self.input.is_empty() { - return Ok(None); - } - - match RawLocListEntry::parse(&mut self.input, self.encoding, self.format) { - Ok(entry) => { - if entry.is_none() { - self.input.empty(); - } - Ok(entry) - } - Err(e) => { - self.input.empty(); - Err(e) - } - } - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for RawLocListIter { - type Item = RawLocListEntry; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - RawLocListIter::next(self) - } -} - -/// An iterator over a location list. -/// -/// This iterator internally handles processing of base address selection entries -/// and list end entries. Thus, it only returns location entries that are valid -/// and already adjusted for the base address. -#[derive(Debug)] -pub struct LocListIter { - raw: RawLocListIter, - base_address: u64, - debug_addr: DebugAddr, - debug_addr_base: DebugAddrBase, -} - -impl LocListIter { - /// Construct a `LocListIter`. - fn new( - raw: RawLocListIter, - base_address: u64, - debug_addr: DebugAddr, - debug_addr_base: DebugAddrBase, - ) -> LocListIter { - LocListIter { - raw, - base_address, - debug_addr, - debug_addr_base, - } - } - - #[inline] - fn get_address(&self, index: DebugAddrIndex) -> Result { - self.debug_addr - .get_address(self.raw.encoding.address_size, self.debug_addr_base, index) - } - - /// Advance the iterator to the next location. - pub fn next(&mut self) -> Result>> { - loop { - let raw_loc = match self.raw.next()? { - Some(loc) => loc, - None => return Ok(None), - }; - - let loc = self.convert_raw(raw_loc)?; - if loc.is_some() { - return Ok(loc); - } - } - } - - /// Return the next raw location. - /// - /// The raw location should be passed to `convert_raw`. - #[doc(hidden)] - pub fn next_raw(&mut self) -> Result>> { - self.raw.next() - } - - /// Convert a raw location into a location, and update the state of the iterator. - /// - /// The raw location should have been obtained from `next_raw`. - #[doc(hidden)] - pub fn convert_raw( - &mut self, - raw_loc: RawLocListEntry, - ) -> Result>> { - let mask = !0 >> (64 - self.raw.encoding.address_size * 8); - let tombstone = if self.raw.encoding.version <= 4 { - mask - 1 - } else { - mask - }; - - let (range, data) = match raw_loc { - RawLocListEntry::BaseAddress { addr } => { - self.base_address = addr; - return Ok(None); - } - RawLocListEntry::BaseAddressx { addr } => { - self.base_address = self.get_address(addr)?; - return Ok(None); - } - RawLocListEntry::StartxEndx { begin, end, data } => { - let begin = self.get_address(begin)?; - let end = self.get_address(end)?; - (Range { begin, end }, data) - } - RawLocListEntry::StartxLength { - begin, - length, - data, - } => { - let begin = self.get_address(begin)?; - let end = begin.wrapping_add(length) & mask; - (Range { begin, end }, data) - } - RawLocListEntry::DefaultLocation { data } => ( - Range { - begin: 0, - end: u64::max_value(), - }, - data, - ), - RawLocListEntry::AddressOrOffsetPair { begin, end, data } - | RawLocListEntry::OffsetPair { begin, end, data } => { - if self.base_address == tombstone { - return Ok(None); - } - let mut range = Range { begin, end }; - range.add_base_address(self.base_address, self.raw.encoding.address_size); - (range, data) - } - RawLocListEntry::StartEnd { begin, end, data } => (Range { begin, end }, data), - RawLocListEntry::StartLength { - begin, - length, - data, - } => { - let end = begin.wrapping_add(length) & mask; - (Range { begin, end }, data) - } - }; - - if range.begin == tombstone { - return Ok(None); - } - - if range.begin > range.end { - self.raw.input.empty(); - return Err(Error::InvalidLocationAddressRange); - } - - Ok(Some(LocationListEntry { range, data })) - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for LocListIter { - type Item = LocationListEntry; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - LocListIter::next(self) - } -} - -/// A location list entry from the `.debug_loc` or `.debug_loclists` sections. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct LocationListEntry { - /// The address range that this location is valid for. - pub range: Range, - - /// The data containing a single location description. - pub data: Expression, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::common::Format; - use crate::endianity::LittleEndian; - use crate::read::{EndianSlice, Range}; - use crate::test_util::GimliSectionMethods; - use test_assembler::{Endian, Label, LabelMaker, Section}; - - #[test] - fn test_loclists_32() { - let tombstone = !0u32; - let encoding = Encoding { - format: Format::Dwarf32, - version: 5, - address_size: 4, - }; - - let section = Section::with_endian(Endian::Little) - .L32(0x0300_0000) - .L32(0x0301_0300) - .L32(0x0301_0400) - .L32(0x0301_0500) - .L32(tombstone) - .L32(0x0301_0600); - let buf = section.get_contents().unwrap(); - let debug_addr = &DebugAddr::from(EndianSlice::new(&buf, LittleEndian)); - let debug_addr_base = DebugAddrBase(0); - - let start = Label::new(); - let first = Label::new(); - let size = Label::new(); - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - // Header - .mark(&start) - .L32(&size) - .L16(encoding.version) - .L8(encoding.address_size) - .L8(0) - .L32(0) - .mark(&first) - // OffsetPair - .L8(4).uleb(0x10200).uleb(0x10300).uleb(4).L32(2) - // A base address selection followed by an OffsetPair. - .L8(6).L32(0x0200_0000) - .L8(4).uleb(0x10400).uleb(0x10500).uleb(4).L32(3) - // An empty OffsetPair followed by a normal OffsetPair. - .L8(4).uleb(0x10600).uleb(0x10600).uleb(4).L32(4) - .L8(4).uleb(0x10800).uleb(0x10900).uleb(4).L32(5) - // A StartEnd - .L8(7).L32(0x201_0a00).L32(0x201_0b00).uleb(4).L32(6) - // A StartLength - .L8(8).L32(0x201_0c00).uleb(0x100).uleb(4).L32(7) - // An OffsetPair that starts at 0. - .L8(4).uleb(0).uleb(1).uleb(4).L32(8) - // An OffsetPair that ends at -1. - .L8(6).L32(0) - .L8(4).uleb(0).uleb(0xffff_ffff).uleb(4).L32(9) - // A DefaultLocation - .L8(5).uleb(4).L32(10) - // A BaseAddressx + OffsetPair - .L8(1).uleb(0) - .L8(4).uleb(0x10100).uleb(0x10200).uleb(4).L32(11) - // A StartxEndx - .L8(2).uleb(1).uleb(2).uleb(4).L32(12) - // A StartxLength - .L8(3).uleb(3).uleb(0x100).uleb(4).L32(13) - - // Tombstone entries, all of which should be ignored. - // A BaseAddressx that is a tombstone. - .L8(1).uleb(4) - .L8(4).uleb(0x11100).uleb(0x11200).uleb(4).L32(20) - // A BaseAddress that is a tombstone. - .L8(6).L32(tombstone) - .L8(4).uleb(0x11300).uleb(0x11400).uleb(4).L32(21) - // A StartxEndx that is a tombstone. - .L8(2).uleb(4).uleb(5).uleb(4).L32(22) - // A StartxLength that is a tombstone. - .L8(3).uleb(4).uleb(0x100).uleb(4).L32(23) - // A StartEnd that is a tombstone. - .L8(7).L32(tombstone).L32(0x201_1500).uleb(4).L32(24) - // A StartLength that is a tombstone. - .L8(8).L32(tombstone).uleb(0x100).uleb(4).L32(25) - // A StartEnd (not ignored) - .L8(7).L32(0x201_1600).L32(0x201_1700).uleb(4).L32(26) - - // A range end. - .L8(0) - // Some extra data. - .L32(0xffff_ffff); - size.set_const((§ion.here() - &start - 4) as u64); - - let buf = section.get_contents().unwrap(); - let debug_loc = DebugLoc::new(&[], LittleEndian); - let debug_loclists = DebugLocLists::new(&buf, LittleEndian); - let loclists = LocationLists::new(debug_loc, debug_loclists); - let offset = LocationListsOffset((&first - &start) as usize); - let mut locations = loclists - .locations(offset, encoding, 0x0100_0000, debug_addr, debug_addr_base) - .unwrap(); - - // A normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0101_0200, - end: 0x0101_0300, - }, - data: Expression(EndianSlice::new(&[2, 0, 0, 0], LittleEndian)), - })) - ); - - // A base address selection followed by a normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0400, - end: 0x0201_0500, - }, - data: Expression(EndianSlice::new(&[3, 0, 0, 0], LittleEndian)), - })) - ); - - // An empty location range followed by a normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0600, - end: 0x0201_0600, - }, - data: Expression(EndianSlice::new(&[4, 0, 0, 0], LittleEndian)), - })) - ); - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0800, - end: 0x0201_0900, - }, - data: Expression(EndianSlice::new(&[5, 0, 0, 0], LittleEndian)), - })) - ); - - // A normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0a00, - end: 0x0201_0b00, - }, - data: Expression(EndianSlice::new(&[6, 0, 0, 0], LittleEndian)), - })) - ); - - // A normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0c00, - end: 0x0201_0d00, - }, - data: Expression(EndianSlice::new(&[7, 0, 0, 0], LittleEndian)), - })) - ); - - // A location range that starts at 0. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0200_0000, - end: 0x0200_0001, - }, - data: Expression(EndianSlice::new(&[8, 0, 0, 0], LittleEndian)), - })) - ); - - // A location range that ends at -1. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0000_0000, - end: 0xffff_ffff, - }, - data: Expression(EndianSlice::new(&[9, 0, 0, 0], LittleEndian)), - })) - ); - - // A DefaultLocation. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0, - end: u64::max_value(), - }, - data: Expression(EndianSlice::new(&[10, 0, 0, 0], LittleEndian)), - })) - ); - - // A BaseAddressx + OffsetPair - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0301_0100, - end: 0x0301_0200, - }, - data: Expression(EndianSlice::new(&[11, 0, 0, 0], LittleEndian)), - })) - ); - - // A StartxEndx - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0301_0300, - end: 0x0301_0400, - }, - data: Expression(EndianSlice::new(&[12, 0, 0, 0], LittleEndian)), - })) - ); - - // A StartxLength - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0301_0500, - end: 0x0301_0600, - }, - data: Expression(EndianSlice::new(&[13, 0, 0, 0], LittleEndian)), - })) - ); - - // A StartEnd location following the tombstones - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_1600, - end: 0x0201_1700, - }, - data: Expression(EndianSlice::new(&[26, 0, 0, 0], LittleEndian)), - })) - ); - - // A location list end. - assert_eq!(locations.next(), Ok(None)); - - // An offset at the end of buf. - let mut locations = loclists - .locations( - LocationListsOffset(buf.len()), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(locations.next(), Ok(None)); - } - - #[test] - fn test_loclists_64() { - let tombstone = !0u64; - let encoding = Encoding { - format: Format::Dwarf64, - version: 5, - address_size: 8, - }; - - let section = Section::with_endian(Endian::Little) - .L64(0x0300_0000) - .L64(0x0301_0300) - .L64(0x0301_0400) - .L64(0x0301_0500) - .L64(tombstone) - .L64(0x0301_0600); - let buf = section.get_contents().unwrap(); - let debug_addr = &DebugAddr::from(EndianSlice::new(&buf, LittleEndian)); - let debug_addr_base = DebugAddrBase(0); - - let start = Label::new(); - let first = Label::new(); - let size = Label::new(); - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - // Header - .mark(&start) - .L32(0xffff_ffff) - .L64(&size) - .L16(encoding.version) - .L8(encoding.address_size) - .L8(0) - .L32(0) - .mark(&first) - // OffsetPair - .L8(4).uleb(0x10200).uleb(0x10300).uleb(4).L32(2) - // A base address selection followed by an OffsetPair. - .L8(6).L64(0x0200_0000) - .L8(4).uleb(0x10400).uleb(0x10500).uleb(4).L32(3) - // An empty OffsetPair followed by a normal OffsetPair. - .L8(4).uleb(0x10600).uleb(0x10600).uleb(4).L32(4) - .L8(4).uleb(0x10800).uleb(0x10900).uleb(4).L32(5) - // A StartEnd - .L8(7).L64(0x201_0a00).L64(0x201_0b00).uleb(4).L32(6) - // A StartLength - .L8(8).L64(0x201_0c00).uleb(0x100).uleb(4).L32(7) - // An OffsetPair that starts at 0. - .L8(4).uleb(0).uleb(1).uleb(4).L32(8) - // An OffsetPair that ends at -1. - .L8(6).L64(0) - .L8(4).uleb(0).uleb(0xffff_ffff).uleb(4).L32(9) - // A DefaultLocation - .L8(5).uleb(4).L32(10) - // A BaseAddressx + OffsetPair - .L8(1).uleb(0) - .L8(4).uleb(0x10100).uleb(0x10200).uleb(4).L32(11) - // A StartxEndx - .L8(2).uleb(1).uleb(2).uleb(4).L32(12) - // A StartxLength - .L8(3).uleb(3).uleb(0x100).uleb(4).L32(13) - - // Tombstone entries, all of which should be ignored. - // A BaseAddressx that is a tombstone. - .L8(1).uleb(4) - .L8(4).uleb(0x11100).uleb(0x11200).uleb(4).L32(20) - // A BaseAddress that is a tombstone. - .L8(6).L64(tombstone) - .L8(4).uleb(0x11300).uleb(0x11400).uleb(4).L32(21) - // A StartxEndx that is a tombstone. - .L8(2).uleb(4).uleb(5).uleb(4).L32(22) - // A StartxLength that is a tombstone. - .L8(3).uleb(4).uleb(0x100).uleb(4).L32(23) - // A StartEnd that is a tombstone. - .L8(7).L64(tombstone).L64(0x201_1500).uleb(4).L32(24) - // A StartLength that is a tombstone. - .L8(8).L64(tombstone).uleb(0x100).uleb(4).L32(25) - // A StartEnd (not ignored) - .L8(7).L64(0x201_1600).L64(0x201_1700).uleb(4).L32(26) - - // A range end. - .L8(0) - // Some extra data. - .L32(0xffff_ffff); - size.set_const((§ion.here() - &start - 12) as u64); - - let buf = section.get_contents().unwrap(); - let debug_loc = DebugLoc::new(&[], LittleEndian); - let debug_loclists = DebugLocLists::new(&buf, LittleEndian); - let loclists = LocationLists::new(debug_loc, debug_loclists); - let offset = LocationListsOffset((&first - &start) as usize); - let mut locations = loclists - .locations(offset, encoding, 0x0100_0000, debug_addr, debug_addr_base) - .unwrap(); - - // A normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0101_0200, - end: 0x0101_0300, - }, - data: Expression(EndianSlice::new(&[2, 0, 0, 0], LittleEndian)), - })) - ); - - // A base address selection followed by a normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0400, - end: 0x0201_0500, - }, - data: Expression(EndianSlice::new(&[3, 0, 0, 0], LittleEndian)), - })) - ); - - // An empty location range followed by a normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0600, - end: 0x0201_0600, - }, - data: Expression(EndianSlice::new(&[4, 0, 0, 0], LittleEndian)), - })) - ); - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0800, - end: 0x0201_0900, - }, - data: Expression(EndianSlice::new(&[5, 0, 0, 0], LittleEndian)), - })) - ); - - // A normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0a00, - end: 0x0201_0b00, - }, - data: Expression(EndianSlice::new(&[6, 0, 0, 0], LittleEndian)), - })) - ); - - // A normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0c00, - end: 0x0201_0d00, - }, - data: Expression(EndianSlice::new(&[7, 0, 0, 0], LittleEndian)), - })) - ); - - // A location range that starts at 0. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0200_0000, - end: 0x0200_0001, - }, - data: Expression(EndianSlice::new(&[8, 0, 0, 0], LittleEndian)), - })) - ); - - // A location range that ends at -1. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0000_0000, - end: 0xffff_ffff, - }, - data: Expression(EndianSlice::new(&[9, 0, 0, 0], LittleEndian)), - })) - ); - - // A DefaultLocation. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0, - end: u64::max_value(), - }, - data: Expression(EndianSlice::new(&[10, 0, 0, 0], LittleEndian)), - })) - ); - - // A BaseAddressx + OffsetPair - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0301_0100, - end: 0x0301_0200, - }, - data: Expression(EndianSlice::new(&[11, 0, 0, 0], LittleEndian)), - })) - ); - - // A StartxEndx - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0301_0300, - end: 0x0301_0400, - }, - data: Expression(EndianSlice::new(&[12, 0, 0, 0], LittleEndian)), - })) - ); - - // A StartxLength - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0301_0500, - end: 0x0301_0600, - }, - data: Expression(EndianSlice::new(&[13, 0, 0, 0], LittleEndian)), - })) - ); - - // A StartEnd location following the tombstones - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_1600, - end: 0x0201_1700, - }, - data: Expression(EndianSlice::new(&[26, 0, 0, 0], LittleEndian)), - })) - ); - - // A location list end. - assert_eq!(locations.next(), Ok(None)); - - // An offset at the end of buf. - let mut locations = loclists - .locations( - LocationListsOffset(buf.len()), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(locations.next(), Ok(None)); - } - - #[test] - fn test_location_list_32() { - let tombstone = !0u32 - 1; - let start = Label::new(); - let first = Label::new(); - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - // A location before the offset. - .mark(&start) - .L32(0x10000).L32(0x10100).L16(4).L32(1) - .mark(&first) - // A normal location. - .L32(0x10200).L32(0x10300).L16(4).L32(2) - // A base address selection followed by a normal location. - .L32(0xffff_ffff).L32(0x0200_0000) - .L32(0x10400).L32(0x10500).L16(4).L32(3) - // An empty location range followed by a normal location. - .L32(0x10600).L32(0x10600).L16(4).L32(4) - .L32(0x10800).L32(0x10900).L16(4).L32(5) - // A location range that starts at 0. - .L32(0).L32(1).L16(4).L32(6) - // A location range that ends at -1. - .L32(0xffff_ffff).L32(0x0000_0000) - .L32(0).L32(0xffff_ffff).L16(4).L32(7) - // A normal location with tombstone. - .L32(tombstone).L32(tombstone).L16(4).L32(8) - // A base address selection with tombstone followed by a normal location. - .L32(0xffff_ffff).L32(tombstone) - .L32(0x10a00).L32(0x10b00).L16(4).L32(9) - // A location list end. - .L32(0).L32(0) - // Some extra data. - .L32(0); - - let buf = section.get_contents().unwrap(); - let debug_loc = DebugLoc::new(&buf, LittleEndian); - let debug_loclists = DebugLocLists::new(&[], LittleEndian); - let loclists = LocationLists::new(debug_loc, debug_loclists); - let offset = LocationListsOffset((&first - &start) as usize); - let debug_addr = &DebugAddr::from(EndianSlice::new(&[], LittleEndian)); - let debug_addr_base = DebugAddrBase(0); - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut locations = loclists - .locations(offset, encoding, 0x0100_0000, debug_addr, debug_addr_base) - .unwrap(); - - // A normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0101_0200, - end: 0x0101_0300, - }, - data: Expression(EndianSlice::new(&[2, 0, 0, 0], LittleEndian)), - })) - ); - - // A base address selection followed by a normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0400, - end: 0x0201_0500, - }, - data: Expression(EndianSlice::new(&[3, 0, 0, 0], LittleEndian)), - })) - ); - - // An empty location range followed by a normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0600, - end: 0x0201_0600, - }, - data: Expression(EndianSlice::new(&[4, 0, 0, 0], LittleEndian)), - })) - ); - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0800, - end: 0x0201_0900, - }, - data: Expression(EndianSlice::new(&[5, 0, 0, 0], LittleEndian)), - })) - ); - - // A location range that starts at 0. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0200_0000, - end: 0x0200_0001, - }, - data: Expression(EndianSlice::new(&[6, 0, 0, 0], LittleEndian)), - })) - ); - - // A location range that ends at -1. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0000_0000, - end: 0xffff_ffff, - }, - data: Expression(EndianSlice::new(&[7, 0, 0, 0], LittleEndian)), - })) - ); - - // A location list end. - assert_eq!(locations.next(), Ok(None)); - - // An offset at the end of buf. - let mut locations = loclists - .locations( - LocationListsOffset(buf.len()), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(locations.next(), Ok(None)); - } - - #[test] - fn test_location_list_64() { - let tombstone = !0u64 - 1; - let start = Label::new(); - let first = Label::new(); - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - // A location before the offset. - .mark(&start) - .L64(0x10000).L64(0x10100).L16(4).L32(1) - .mark(&first) - // A normal location. - .L64(0x10200).L64(0x10300).L16(4).L32(2) - // A base address selection followed by a normal location. - .L64(0xffff_ffff_ffff_ffff).L64(0x0200_0000) - .L64(0x10400).L64(0x10500).L16(4).L32(3) - // An empty location range followed by a normal location. - .L64(0x10600).L64(0x10600).L16(4).L32(4) - .L64(0x10800).L64(0x10900).L16(4).L32(5) - // A location range that starts at 0. - .L64(0).L64(1).L16(4).L32(6) - // A location range that ends at -1. - .L64(0xffff_ffff_ffff_ffff).L64(0x0000_0000) - .L64(0).L64(0xffff_ffff_ffff_ffff).L16(4).L32(7) - // A normal location with tombstone. - .L64(tombstone).L64(tombstone).L16(4).L32(8) - // A base address selection with tombstone followed by a normal location. - .L64(0xffff_ffff_ffff_ffff).L64(tombstone) - .L64(0x10a00).L64(0x10b00).L16(4).L32(9) - // A location list end. - .L64(0).L64(0) - // Some extra data. - .L64(0); - - let buf = section.get_contents().unwrap(); - let debug_loc = DebugLoc::new(&buf, LittleEndian); - let debug_loclists = DebugLocLists::new(&[], LittleEndian); - let loclists = LocationLists::new(debug_loc, debug_loclists); - let offset = LocationListsOffset((&first - &start) as usize); - let debug_addr = &DebugAddr::from(EndianSlice::new(&[], LittleEndian)); - let debug_addr_base = DebugAddrBase(0); - let encoding = Encoding { - format: Format::Dwarf64, - version: 4, - address_size: 8, - }; - let mut locations = loclists - .locations(offset, encoding, 0x0100_0000, debug_addr, debug_addr_base) - .unwrap(); - - // A normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0101_0200, - end: 0x0101_0300, - }, - data: Expression(EndianSlice::new(&[2, 0, 0, 0], LittleEndian)), - })) - ); - - // A base address selection followed by a normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0400, - end: 0x0201_0500, - }, - data: Expression(EndianSlice::new(&[3, 0, 0, 0], LittleEndian)), - })) - ); - - // An empty location range followed by a normal location. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0600, - end: 0x0201_0600, - }, - data: Expression(EndianSlice::new(&[4, 0, 0, 0], LittleEndian)), - })) - ); - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0201_0800, - end: 0x0201_0900, - }, - data: Expression(EndianSlice::new(&[5, 0, 0, 0], LittleEndian)), - })) - ); - - // A location range that starts at 0. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0200_0000, - end: 0x0200_0001, - }, - data: Expression(EndianSlice::new(&[6, 0, 0, 0], LittleEndian)), - })) - ); - - // A location range that ends at -1. - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0, - end: 0xffff_ffff_ffff_ffff, - }, - data: Expression(EndianSlice::new(&[7, 0, 0, 0], LittleEndian)), - })) - ); - - // A location list end. - assert_eq!(locations.next(), Ok(None)); - - // An offset at the end of buf. - let mut locations = loclists - .locations( - LocationListsOffset(buf.len()), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(locations.next(), Ok(None)); - } - - #[test] - fn test_locations_invalid() { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - // An invalid location range. - .L32(0x20000).L32(0x10000).L16(4).L32(1) - // An invalid range after wrapping. - .L32(0x20000).L32(0xff01_0000).L16(4).L32(2); - - let buf = section.get_contents().unwrap(); - let debug_loc = DebugLoc::new(&buf, LittleEndian); - let debug_loclists = DebugLocLists::new(&[], LittleEndian); - let loclists = LocationLists::new(debug_loc, debug_loclists); - let debug_addr = &DebugAddr::from(EndianSlice::new(&[], LittleEndian)); - let debug_addr_base = DebugAddrBase(0); - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - - // An invalid location range. - let mut locations = loclists - .locations( - LocationListsOffset(0x0), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(locations.next(), Err(Error::InvalidLocationAddressRange)); - - // An invalid location range after wrapping. - let mut locations = loclists - .locations( - LocationListsOffset(14), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(locations.next(), Err(Error::InvalidLocationAddressRange)); - - // An invalid offset. - match loclists.locations( - LocationListsOffset(buf.len() + 1), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) { - Err(Error::UnexpectedEof(_)) => {} - otherwise => panic!("Unexpected result: {:?}", otherwise), - } - } - - #[test] - fn test_get_offset() { - for format in vec![Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version: 5, - address_size: 4, - }; - - let zero = Label::new(); - let length = Label::new(); - let start = Label::new(); - let first = Label::new(); - let end = Label::new(); - let mut section = Section::with_endian(Endian::Little) - .mark(&zero) - .initial_length(format, &length, &start) - .D16(encoding.version) - .D8(encoding.address_size) - .D8(0) - .D32(20) - .mark(&first); - for i in 0..20 { - section = section.word(format.word_size(), 1000 + i); - } - section = section.mark(&end); - length.set_const((&end - &start) as u64); - let section = section.get_contents().unwrap(); - - let debug_loc = DebugLoc::from(EndianSlice::new(&[], LittleEndian)); - let debug_loclists = DebugLocLists::from(EndianSlice::new(§ion, LittleEndian)); - let locations = LocationLists::new(debug_loc, debug_loclists); - - let base = DebugLocListsBase((&first - &zero) as usize); - assert_eq!( - locations.get_offset(encoding, base, DebugLocListsIndex(0)), - Ok(LocationListsOffset(base.0 + 1000)) - ); - assert_eq!( - locations.get_offset(encoding, base, DebugLocListsIndex(19)), - Ok(LocationListsOffset(base.0 + 1019)) - ); - } - } - - #[test] - fn test_loclists_gnu_v4_split_dwarf() { - #[rustfmt::skip] - let buf = [ - 0x03, // DW_LLE_startx_length - 0x00, // ULEB encoded b7 - 0x08, 0x00, 0x00, 0x00, // Fixed 4 byte length of 8 - 0x03, 0x00, // Fixed two byte length of the location - 0x11, 0x00, // DW_OP_constu 0 - 0x9f, // DW_OP_stack_value - // Padding data - //0x99, 0x99, 0x99, 0x99 - ]; - let data_buf = [0x11, 0x00, 0x9f]; - let expected_data = EndianSlice::new(&data_buf, LittleEndian); - let debug_loc = DebugLoc::new(&buf, LittleEndian); - let debug_loclists = DebugLocLists::new(&[], LittleEndian); - let loclists = LocationLists::new(debug_loc, debug_loclists); - let debug_addr = - &DebugAddr::from(EndianSlice::new(&[0x01, 0x02, 0x03, 0x04], LittleEndian)); - let debug_addr_base = DebugAddrBase(0); - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - - // An invalid location range. - let mut locations = loclists - .locations_dwo( - LocationListsOffset(0x0), - encoding, - 0, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!( - locations.next(), - Ok(Some(LocationListEntry { - range: Range { - begin: 0x0403_0201, - end: 0x0403_0209 - }, - data: Expression(expected_data), - })) - ); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/lookup.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/lookup.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/lookup.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/lookup.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ -use core::marker::PhantomData; - -use crate::common::{DebugInfoOffset, Format}; -use crate::read::{parse_debug_info_offset, Error, Reader, ReaderOffset, Result, UnitOffset}; - -// The various "Accelerated Access" sections (DWARF standard v4 Section 6.1) all have -// similar structures. They consist of a header with metadata and an offset into the -// .debug_info section for the entire compilation unit, and a series -// of following entries that list addresses (for .debug_aranges) or names -// (for .debug_pubnames and .debug_pubtypes) that are covered. -// -// Because these three tables all have similar structures, we abstract out some of -// the parsing mechanics. - -pub trait LookupParser { - /// The type of the produced header. - type Header; - /// The type of the produced entry. - type Entry; - - /// Parse a header from `input`. Returns a tuple of `input` sliced to contain just the entries - /// corresponding to this header (without the header itself), and the parsed representation of - /// the header itself. - fn parse_header(input: &mut R) -> Result<(R, Self::Header)>; - - /// Parse a single entry from `input`. Returns either a parsed representation of the entry - /// or None if `input` is exhausted. - fn parse_entry(input: &mut R, header: &Self::Header) -> Result>; -} - -#[derive(Clone, Debug)] -pub struct DebugLookup -where - R: Reader, - Parser: LookupParser, -{ - input_buffer: R, - phantom: PhantomData, -} - -impl From for DebugLookup -where - R: Reader, - Parser: LookupParser, -{ - fn from(input_buffer: R) -> Self { - DebugLookup { - input_buffer, - phantom: PhantomData, - } - } -} - -impl DebugLookup -where - R: Reader, - Parser: LookupParser, -{ - pub fn items(&self) -> LookupEntryIter { - LookupEntryIter { - current_set: None, - remaining_input: self.input_buffer.clone(), - } - } - - pub fn reader(&self) -> &R { - &self.input_buffer - } -} - -#[derive(Clone, Debug)] -pub struct LookupEntryIter -where - R: Reader, - Parser: LookupParser, -{ - current_set: Option<(R, Parser::Header)>, // Only none at the very beginning and end. - remaining_input: R, -} - -impl LookupEntryIter -where - R: Reader, - Parser: LookupParser, -{ - /// Advance the iterator and return the next entry. - /// - /// Returns the newly parsed entry as `Ok(Some(Parser::Entry))`. Returns - /// `Ok(None)` when iteration is complete and all entries have already been - /// parsed and yielded. If an error occurs while parsing the next entry, - /// then this error is returned as `Err(e)`, and all subsequent calls return - /// `Ok(None)`. - /// - /// Can be [used with `FallibleIterator`](./index.html#using-with-fallibleiterator). - pub fn next(&mut self) -> Result> { - loop { - if let Some((ref mut input, ref header)) = self.current_set { - if !input.is_empty() { - match Parser::parse_entry(input, header) { - Ok(Some(entry)) => return Ok(Some(entry)), - Ok(None) => {} - Err(e) => { - input.empty(); - self.remaining_input.empty(); - return Err(e); - } - } - } - } - if self.remaining_input.is_empty() { - self.current_set = None; - return Ok(None); - } - match Parser::parse_header(&mut self.remaining_input) { - Ok(set) => { - self.current_set = Some(set); - } - Err(e) => { - self.current_set = None; - self.remaining_input.empty(); - return Err(e); - } - } - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct PubStuffHeader { - format: Format, - length: T, - version: u16, - unit_offset: DebugInfoOffset, - unit_length: T, -} - -pub trait PubStuffEntry { - fn new( - die_offset: UnitOffset, - name: R, - unit_header_offset: DebugInfoOffset, - ) -> Self; -} - -#[derive(Clone, Debug)] -pub struct PubStuffParser -where - R: Reader, - Entry: PubStuffEntry, -{ - // This struct is never instantiated. - phantom: PhantomData<(R, Entry)>, -} - -impl LookupParser for PubStuffParser -where - R: Reader, - Entry: PubStuffEntry, -{ - type Header = PubStuffHeader; - type Entry = Entry; - - /// Parse an pubthings set header. Returns a tuple of the - /// pubthings to be parsed for this set, and the newly created PubThingHeader struct. - fn parse_header(input: &mut R) -> Result<(R, Self::Header)> { - let (length, format) = input.read_initial_length()?; - let mut rest = input.split(length)?; - - let version = rest.read_u16()?; - if version != 2 { - return Err(Error::UnknownVersion(u64::from(version))); - } - - let unit_offset = parse_debug_info_offset(&mut rest, format)?; - let unit_length = rest.read_length(format)?; - - let header = PubStuffHeader { - format, - length, - version, - unit_offset, - unit_length, - }; - Ok((rest, header)) - } - - /// Parse a single pubthing. Return `None` for the null pubthing, `Some` for an actual pubthing. - fn parse_entry(input: &mut R, header: &Self::Header) -> Result> { - let offset = input.read_offset(header.format)?; - if offset.into_u64() == 0 { - input.empty(); - Ok(None) - } else { - let name = input.read_null_terminated_slice()?; - Ok(Some(Self::Entry::new( - UnitOffset(offset), - name, - header.unit_offset, - ))) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/mod.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/mod.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,827 +0,0 @@ -//! Read DWARF debugging information. -//! -//! * [Example Usage](#example-usage) -//! * [API Structure](#api-structure) -//! * [Using with `FallibleIterator`](#using-with-fallibleiterator) -//! -//! ## Example Usage -//! -//! Print out all of the functions in the debuggee program: -//! -//! ```rust,no_run -//! # fn example() -> Result<(), gimli::Error> { -//! # type R = gimli::EndianSlice<'static, gimli::LittleEndian>; -//! # let get_file_section_reader = |name| -> Result { unimplemented!() }; -//! # let get_sup_file_section_reader = |name| -> Result { unimplemented!() }; -//! // Read the DWARF sections with whatever object loader you're using. -//! // These closures should return a `Reader` instance (e.g. `EndianSlice`). -//! let loader = |section: gimli::SectionId| { get_file_section_reader(section.name()) }; -//! let sup_loader = |section: gimli::SectionId| { get_sup_file_section_reader(section.name()) }; -//! let mut dwarf = gimli::Dwarf::load(loader)?; -//! dwarf.load_sup(sup_loader)?; -//! -//! // Iterate over all compilation units. -//! let mut iter = dwarf.units(); -//! while let Some(header) = iter.next()? { -//! // Parse the abbreviations and other information for this compilation unit. -//! let unit = dwarf.unit(header)?; -//! -//! // Iterate over all of this compilation unit's entries. -//! let mut entries = unit.entries(); -//! while let Some((_, entry)) = entries.next_dfs()? { -//! // If we find an entry for a function, print it. -//! if entry.tag() == gimli::DW_TAG_subprogram { -//! println!("Found a function: {:?}", entry); -//! } -//! } -//! } -//! # unreachable!() -//! # } -//! ``` -//! -//! Full example programs: -//! -//! * [A simple parser](https://github.com/gimli-rs/gimli/blob/master/crates/examples/src/bin/simple.rs) -//! -//! * [A `dwarfdump` -//! clone](https://github.com/gimli-rs/gimli/blob/master/crates/examples/src/bin/dwarfdump.rs) -//! -//! * [An `addr2line` clone](https://github.com/gimli-rs/addr2line) -//! -//! * [`ddbug`](https://github.com/gimli-rs/ddbug), a utility giving insight into -//! code generation by making debugging information readable -//! -//! * [`dwprod`](https://github.com/fitzgen/dwprod), a tiny utility to list the -//! compilers used to create each compilation unit within a shared library or -//! executable (via `DW_AT_producer`) -//! -//! * [`dwarf-validate`](https://github.com/gimli-rs/gimli/blob/master/crates/examples/src/bin/dwarf-validate.rs), -//! a program to validate the integrity of some DWARF and its references -//! between sections and compilation units. -//! -//! ## API Structure -//! -//! * Basic familiarity with DWARF is assumed. -//! -//! * The [`Dwarf`](./struct.Dwarf.html) type contains the commonly used DWARF -//! sections. It has methods that simplify access to debugging data that spans -//! multiple sections. Use of this type is optional, but recommended. -//! -//! * Each section gets its own type. Consider these types the entry points to -//! the library: -//! -//! * [`DebugAbbrev`](./struct.DebugAbbrev.html): The `.debug_abbrev` section. -//! -//! * [`DebugAddr`](./struct.DebugAddr.html): The `.debug_addr` section. -//! -//! * [`DebugAranges`](./struct.DebugAranges.html): The `.debug_aranges` -//! section. -//! -//! * [`DebugFrame`](./struct.DebugFrame.html): The `.debug_frame` section. -//! -//! * [`DebugInfo`](./struct.DebugInfo.html): The `.debug_info` section. -//! -//! * [`DebugLine`](./struct.DebugLine.html): The `.debug_line` section. -//! -//! * [`DebugLineStr`](./struct.DebugLineStr.html): The `.debug_line_str` section. -//! -//! * [`DebugLoc`](./struct.DebugLoc.html): The `.debug_loc` section. -//! -//! * [`DebugLocLists`](./struct.DebugLocLists.html): The `.debug_loclists` section. -//! -//! * [`DebugPubNames`](./struct.DebugPubNames.html): The `.debug_pubnames` -//! section. -//! -//! * [`DebugPubTypes`](./struct.DebugPubTypes.html): The `.debug_pubtypes` -//! section. -//! -//! * [`DebugRanges`](./struct.DebugRanges.html): The `.debug_ranges` section. -//! -//! * [`DebugRngLists`](./struct.DebugRngLists.html): The `.debug_rnglists` section. -//! -//! * [`DebugStr`](./struct.DebugStr.html): The `.debug_str` section. -//! -//! * [`DebugStrOffsets`](./struct.DebugStrOffsets.html): The `.debug_str_offsets` section. -//! -//! * [`DebugTypes`](./struct.DebugTypes.html): The `.debug_types` section. -//! -//! * [`DebugCuIndex`](./struct.DebugCuIndex.html): The `.debug_cu_index` section. -//! -//! * [`DebugTuIndex`](./struct.DebugTuIndex.html): The `.debug_tu_index` section. -//! -//! * [`EhFrame`](./struct.EhFrame.html): The `.eh_frame` section. -//! -//! * [`EhFrameHdr`](./struct.EhFrameHdr.html): The `.eh_frame_hdr` section. -//! -//! * Each section type exposes methods for accessing the debugging data encoded -//! in that section. For example, the [`DebugInfo`](./struct.DebugInfo.html) -//! struct has the [`units`](./struct.DebugInfo.html#method.units) method for -//! iterating over the compilation units defined within it. -//! -//! * Offsets into a section are strongly typed: an offset into `.debug_info` is -//! the [`DebugInfoOffset`](./struct.DebugInfoOffset.html) type. It cannot be -//! used to index into the [`DebugLine`](./struct.DebugLine.html) type because -//! `DebugLine` represents the `.debug_line` section. There are similar types -//! for offsets relative to a compilation unit rather than a section. -//! -//! ## Using with `FallibleIterator` -//! -//! The standard library's `Iterator` trait and related APIs do not play well -//! with iterators where the `next` operation is fallible. One can make the -//! `Iterator`'s associated `Item` type be a `Result`, however the -//! provided methods cannot gracefully handle the case when an `Err` is -//! returned. -//! -//! This situation led to the -//! [`fallible-iterator`](https://crates.io/crates/fallible-iterator) crate's -//! existence. You can read more of the rationale for its existence in its -//! docs. The crate provides the helpers you have come to expect (eg `map`, -//! `filter`, etc) for iterators that can fail. -//! -//! `gimli`'s many lazy parsing iterators are a perfect match for the -//! `fallible-iterator` crate's `FallibleIterator` trait because parsing is not -//! done eagerly. Parse errors later in the input might only be discovered after -//! having iterated through many items. -//! -//! To use `gimli` iterators with `FallibleIterator`, import the crate and trait -//! into your code: -//! -//! ``` -//! # #[cfg(feature = "fallible-iterator")] -//! # fn foo() { -//! // Use the `FallibleIterator` trait so its methods are in scope! -//! use fallible_iterator::FallibleIterator; -//! use gimli::{DebugAranges, EndianSlice, LittleEndian}; -//! -//! fn find_sum_of_address_range_lengths(aranges: DebugAranges>) -//! -> gimli::Result -//! { -//! // `DebugAranges::headers` returns a `FallibleIterator`! -//! aranges.headers() -//! // `flat_map` is provided by `FallibleIterator`! -//! .flat_map(|header| Ok(header.entries())) -//! // `map` is provided by `FallibleIterator`! -//! .map(|arange| Ok(arange.length())) -//! // `fold` is provided by `FallibleIterator`! -//! .fold(0, |sum, len| Ok(sum + len)) -//! } -//! # } -//! # fn main() {} -//! ``` - -use core::fmt::{self, Debug}; -use core::result; -#[cfg(feature = "std")] -use std::{error, io}; - -use crate::common::{Register, SectionId}; -use crate::constants; - -mod util; -pub use util::*; - -mod addr; -pub use self::addr::*; - -mod cfi; -pub use self::cfi::*; - -#[cfg(feature = "read")] -mod dwarf; -#[cfg(feature = "read")] -pub use self::dwarf::*; - -mod endian_slice; -pub use self::endian_slice::*; - -#[cfg(feature = "endian-reader")] -mod endian_reader; -#[cfg(feature = "endian-reader")] -pub use self::endian_reader::*; - -mod reader; -pub use self::reader::*; - -#[cfg(feature = "read")] -mod abbrev; -#[cfg(feature = "read")] -pub use self::abbrev::*; - -mod aranges; -pub use self::aranges::*; - -mod index; -pub use self::index::*; - -#[cfg(feature = "read")] -mod line; -#[cfg(feature = "read")] -pub use self::line::*; - -mod lists; - -mod loclists; -pub use self::loclists::*; - -#[cfg(feature = "read")] -mod lookup; - -mod op; -pub use self::op::*; - -#[cfg(feature = "read")] -mod pubnames; -#[cfg(feature = "read")] -pub use self::pubnames::*; - -#[cfg(feature = "read")] -mod pubtypes; -#[cfg(feature = "read")] -pub use self::pubtypes::*; - -mod rnglists; -pub use self::rnglists::*; - -mod str; -pub use self::str::*; - -/// An offset into the current compilation or type unit. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)] -pub struct UnitOffset(pub T); - -#[cfg(feature = "read")] -mod unit; -#[cfg(feature = "read")] -pub use self::unit::*; - -mod value; -pub use self::value::*; - -/// Indicates that storage should be allocated on heap. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct StoreOnHeap; - -/// `EndianBuf` has been renamed to `EndianSlice`. For ease of upgrading across -/// `gimli` versions, we export this type alias. -#[deprecated(note = "EndianBuf has been renamed to EndianSlice, use that instead.")] -pub type EndianBuf<'input, Endian> = EndianSlice<'input, Endian>; - -/// An error that occurred when parsing. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Error { - /// An I/O error occurred while reading. - Io, - /// Found a PC relative pointer, but the section base is undefined. - PcRelativePointerButSectionBaseIsUndefined, - /// Found a `.text` relative pointer, but the `.text` base is undefined. - TextRelativePointerButTextBaseIsUndefined, - /// Found a data relative pointer, but the data base is undefined. - DataRelativePointerButDataBaseIsUndefined, - /// Found a function relative pointer in a context that does not have a - /// function base. - FuncRelativePointerInBadContext, - /// Cannot parse a pointer with a `DW_EH_PE_omit` encoding. - CannotParseOmitPointerEncoding, - /// An error parsing an unsigned LEB128 value. - BadUnsignedLeb128, - /// An error parsing a signed LEB128 value. - BadSignedLeb128, - /// An abbreviation declared that its tag is zero, but zero is reserved for - /// null records. - AbbreviationTagZero, - /// An attribute specification declared that its form is zero, but zero is - /// reserved for null records. - AttributeFormZero, - /// The abbreviation's has-children byte was not one of - /// `DW_CHILDREN_{yes,no}`. - BadHasChildren, - /// The specified length is impossible. - BadLength, - /// Found an unknown `DW_FORM_*` type. - UnknownForm, - /// Expected a zero, found something else. - ExpectedZero, - /// Found an abbreviation code that has already been used. - DuplicateAbbreviationCode, - /// Found a duplicate arange. - DuplicateArange, - /// Found an unknown reserved length value. - UnknownReservedLength, - /// Found an unknown DWARF version. - UnknownVersion(u64), - /// Found a record with an unknown abbreviation code. - UnknownAbbreviation, - /// Hit the end of input before it was expected. - UnexpectedEof(ReaderOffsetId), - /// Read a null entry before it was expected. - UnexpectedNull, - /// Found an unknown standard opcode. - UnknownStandardOpcode(constants::DwLns), - /// Found an unknown extended opcode. - UnknownExtendedOpcode(constants::DwLne), - /// The specified address size is not supported. - UnsupportedAddressSize(u8), - /// The specified offset size is not supported. - UnsupportedOffsetSize(u8), - /// The specified field size is not supported. - UnsupportedFieldSize(u8), - /// The minimum instruction length must not be zero. - MinimumInstructionLengthZero, - /// The maximum operations per instruction must not be zero. - MaximumOperationsPerInstructionZero, - /// The line range must not be zero. - LineRangeZero, - /// The opcode base must not be zero. - OpcodeBaseZero, - /// Found an invalid UTF-8 string. - BadUtf8, - /// Expected to find the CIE ID, but found something else. - NotCieId, - /// Expected to find a pointer to a CIE, but found the CIE ID instead. - NotCiePointer, - /// Expected to find a pointer to an FDE, but found a CIE instead. - NotFdePointer, - /// Invalid branch target for a DW_OP_bra or DW_OP_skip. - BadBranchTarget(u64), - /// DW_OP_push_object_address used but no address passed in. - InvalidPushObjectAddress, - /// Not enough items on the stack when evaluating an expression. - NotEnoughStackItems, - /// Too many iterations to compute the expression. - TooManyIterations, - /// An unrecognized operation was found while parsing a DWARF - /// expression. - InvalidExpression(constants::DwOp), - /// An unsupported operation was found while evaluating a DWARF expression. - UnsupportedEvaluation, - /// The expression had a piece followed by an expression - /// terminator without a piece. - InvalidPiece, - /// An expression-terminating operation was followed by something - /// other than the end of the expression or a piece operation. - InvalidExpressionTerminator(u64), - /// Division or modulus by zero when evaluating an expression. - DivisionByZero, - /// An expression operation used mismatching types. - TypeMismatch, - /// An expression operation required an integral type but saw a - /// floating point type. - IntegralTypeRequired, - /// An expression operation used types that are not supported. - UnsupportedTypeOperation, - /// The shift value in an expression must be a non-negative integer. - InvalidShiftExpression, - /// An unknown DW_CFA_* instruction. - UnknownCallFrameInstruction(constants::DwCfa), - /// The end of an address range was before the beginning. - InvalidAddressRange, - /// The end offset of a loc list entry was before the beginning. - InvalidLocationAddressRange, - /// Encountered a call frame instruction in a context in which it is not - /// valid. - CfiInstructionInInvalidContext, - /// When evaluating call frame instructions, found a `DW_CFA_restore_state` - /// stack pop instruction, but the stack was empty, and had nothing to pop. - PopWithEmptyStack, - /// Do not have unwind info for the given address. - NoUnwindInfoForAddress, - /// An offset value was larger than the maximum supported value. - UnsupportedOffset, - /// The given pointer encoding is either unknown or invalid. - UnknownPointerEncoding, - /// Did not find an entry at the given offset. - NoEntryAtGivenOffset, - /// The given offset is out of bounds. - OffsetOutOfBounds, - /// Found an unknown CFI augmentation. - UnknownAugmentation, - /// We do not support the given pointer encoding yet. - UnsupportedPointerEncoding, - /// Registers larger than `u16` are not supported. - UnsupportedRegister(u64), - /// The CFI program defined more register rules than we have storage for. - TooManyRegisterRules, - /// Attempted to push onto the CFI or evaluation stack, but it was already - /// at full capacity. - StackFull, - /// The `.eh_frame_hdr` binary search table claims to be variable-length encoded, - /// which makes binary search impossible. - VariableLengthSearchTable, - /// The `DW_UT_*` value for this unit is not supported yet. - UnsupportedUnitType, - /// Ranges using AddressIndex are not supported yet. - UnsupportedAddressIndex, - /// Nonzero segment selector sizes aren't supported yet. - UnsupportedSegmentSize, - /// A compilation unit or type unit is missing its top level DIE. - MissingUnitDie, - /// A DIE attribute used an unsupported form. - UnsupportedAttributeForm, - /// Missing DW_LNCT_path in file entry format. - MissingFileEntryFormatPath, - /// Expected an attribute value to be a string form. - ExpectedStringAttributeValue, - /// `DW_FORM_implicit_const` used in an invalid context. - InvalidImplicitConst, - /// Invalid section count in `.dwp` index. - InvalidIndexSectionCount, - /// Invalid slot count in `.dwp` index. - InvalidIndexSlotCount, - /// Invalid hash row in `.dwp` index. - InvalidIndexRow, - /// Unknown section type in `.dwp` index. - UnknownIndexSection, -} - -impl fmt::Display for Error { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> ::core::result::Result<(), fmt::Error> { - write!(f, "{}", self.description()) - } -} - -impl Error { - /// A short description of the error. - pub fn description(&self) -> &str { - match *self { - Error::Io => "An I/O error occurred while reading.", - Error::PcRelativePointerButSectionBaseIsUndefined => { - "Found a PC relative pointer, but the section base is undefined." - } - Error::TextRelativePointerButTextBaseIsUndefined => { - "Found a `.text` relative pointer, but the `.text` base is undefined." - } - Error::DataRelativePointerButDataBaseIsUndefined => { - "Found a data relative pointer, but the data base is undefined." - } - Error::FuncRelativePointerInBadContext => { - "Found a function relative pointer in a context that does not have a function base." - } - Error::CannotParseOmitPointerEncoding => { - "Cannot parse a pointer with a `DW_EH_PE_omit` encoding." - } - Error::BadUnsignedLeb128 => "An error parsing an unsigned LEB128 value", - Error::BadSignedLeb128 => "An error parsing a signed LEB128 value", - Error::AbbreviationTagZero => { - "An abbreviation declared that its tag is zero, - but zero is reserved for null records" - } - Error::AttributeFormZero => { - "An attribute specification declared that its form is zero, - but zero is reserved for null records" - } - Error::BadHasChildren => { - "The abbreviation's has-children byte was not one of - `DW_CHILDREN_{yes,no}`" - } - Error::BadLength => "The specified length is impossible", - Error::UnknownForm => "Found an unknown `DW_FORM_*` type", - Error::ExpectedZero => "Expected a zero, found something else", - Error::DuplicateAbbreviationCode => { - "Found an abbreviation code that has already been used" - } - Error::DuplicateArange => "Found a duplicate arange", - Error::UnknownReservedLength => "Found an unknown reserved length value", - Error::UnknownVersion(_) => "Found an unknown DWARF version", - Error::UnknownAbbreviation => "Found a record with an unknown abbreviation code", - Error::UnexpectedEof(_) => "Hit the end of input before it was expected", - Error::UnexpectedNull => "Read a null entry before it was expected.", - Error::UnknownStandardOpcode(_) => "Found an unknown standard opcode", - Error::UnknownExtendedOpcode(_) => "Found an unknown extended opcode", - Error::UnsupportedAddressSize(_) => "The specified address size is not supported", - Error::UnsupportedOffsetSize(_) => "The specified offset size is not supported", - Error::UnsupportedFieldSize(_) => "The specified field size is not supported", - Error::MinimumInstructionLengthZero => { - "The minimum instruction length must not be zero." - } - Error::MaximumOperationsPerInstructionZero => { - "The maximum operations per instruction must not be zero." - } - Error::LineRangeZero => "The line range must not be zero.", - Error::OpcodeBaseZero => "The opcode base must not be zero.", - Error::BadUtf8 => "Found an invalid UTF-8 string.", - Error::NotCieId => "Expected to find the CIE ID, but found something else.", - Error::NotCiePointer => "Expected to find a CIE pointer, but found the CIE ID instead.", - Error::NotFdePointer => { - "Expected to find an FDE pointer, but found a CIE pointer instead." - } - Error::BadBranchTarget(_) => "Invalid branch target in DWARF expression", - Error::InvalidPushObjectAddress => { - "DW_OP_push_object_address used but no object address given" - } - Error::NotEnoughStackItems => "Not enough items on stack when evaluating expression", - Error::TooManyIterations => "Too many iterations to evaluate DWARF expression", - Error::InvalidExpression(_) => "Invalid opcode in DWARF expression", - Error::UnsupportedEvaluation => "Unsupported operation when evaluating expression", - Error::InvalidPiece => { - "DWARF expression has piece followed by non-piece expression at end" - } - Error::InvalidExpressionTerminator(_) => "Expected DW_OP_piece or DW_OP_bit_piece", - Error::DivisionByZero => "Division or modulus by zero when evaluating expression", - Error::TypeMismatch => "Type mismatch when evaluating expression", - Error::IntegralTypeRequired => "Integral type expected when evaluating expression", - Error::UnsupportedTypeOperation => { - "An expression operation used types that are not supported" - } - Error::InvalidShiftExpression => { - "The shift value in an expression must be a non-negative integer." - } - Error::UnknownCallFrameInstruction(_) => "An unknown DW_CFA_* instructiion", - Error::InvalidAddressRange => { - "The end of an address range must not be before the beginning." - } - Error::InvalidLocationAddressRange => { - "The end offset of a location list entry must not be before the beginning." - } - Error::CfiInstructionInInvalidContext => { - "Encountered a call frame instruction in a context in which it is not valid." - } - Error::PopWithEmptyStack => { - "When evaluating call frame instructions, found a `DW_CFA_restore_state` stack pop \ - instruction, but the stack was empty, and had nothing to pop." - } - Error::NoUnwindInfoForAddress => "Do not have unwind info for the given address.", - Error::UnsupportedOffset => { - "An offset value was larger than the maximum supported value." - } - Error::UnknownPointerEncoding => { - "The given pointer encoding is either unknown or invalid." - } - Error::NoEntryAtGivenOffset => "Did not find an entry at the given offset.", - Error::OffsetOutOfBounds => "The given offset is out of bounds.", - Error::UnknownAugmentation => "Found an unknown CFI augmentation.", - Error::UnsupportedPointerEncoding => { - "We do not support the given pointer encoding yet." - } - Error::UnsupportedRegister(_) => "Registers larger than `u16` are not supported.", - Error::TooManyRegisterRules => { - "The CFI program defined more register rules than we have storage for." - } - Error::StackFull => { - "Attempted to push onto the CFI stack, but it was already at full capacity." - } - Error::VariableLengthSearchTable => { - "The `.eh_frame_hdr` binary search table claims to be variable-length encoded, \ - which makes binary search impossible." - } - Error::UnsupportedUnitType => "The `DW_UT_*` value for this unit is not supported yet", - Error::UnsupportedAddressIndex => "Ranges involving AddressIndex are not supported yet", - Error::UnsupportedSegmentSize => "Nonzero segment size not supported yet", - Error::MissingUnitDie => { - "A compilation unit or type unit is missing its top level DIE." - } - Error::UnsupportedAttributeForm => "A DIE attribute used an unsupported form.", - Error::MissingFileEntryFormatPath => "Missing DW_LNCT_path in file entry format.", - Error::ExpectedStringAttributeValue => { - "Expected an attribute value to be a string form." - } - Error::InvalidImplicitConst => "DW_FORM_implicit_const used in an invalid context.", - Error::InvalidIndexSectionCount => "Invalid section count in `.dwp` index.", - Error::InvalidIndexSlotCount => "Invalid slot count in `.dwp` index.", - Error::InvalidIndexRow => "Invalid hash row in `.dwp` index.", - Error::UnknownIndexSection => "Unknown section type in `.dwp` index.", - } - } -} - -#[cfg(feature = "std")] -impl error::Error for Error {} - -#[cfg(feature = "std")] -impl From for Error { - fn from(_: io::Error) -> Self { - Error::Io - } -} - -/// The result of a parse. -pub type Result = result::Result; - -/// A convenience trait for loading DWARF sections from object files. To be -/// used like: -/// -/// ``` -/// use gimli::{DebugInfo, EndianSlice, LittleEndian, Reader, Section}; -/// -/// let buf = [0x00, 0x01, 0x02, 0x03]; -/// let reader = EndianSlice::new(&buf, LittleEndian); -/// let loader = |name| -> Result<_, ()> { Ok(reader) }; -/// -/// let debug_info: DebugInfo<_> = Section::load(loader).unwrap(); -/// ``` -pub trait Section: From { - /// Returns the section id for this type. - fn id() -> SectionId; - - /// Returns the ELF section name for this type. - fn section_name() -> &'static str { - Self::id().name() - } - - /// Returns the ELF section name (if any) for this type when used in a dwo - /// file. - fn dwo_section_name() -> Option<&'static str> { - Self::id().dwo_name() - } - - /// Returns the XCOFF section name (if any) for this type when used in a XCOFF - /// file. - fn xcoff_section_name() -> Option<&'static str> { - Self::id().xcoff_name() - } - - /// Try to load the section using the given loader function. - fn load(f: F) -> core::result::Result - where - F: FnOnce(SectionId) -> core::result::Result, - { - f(Self::id()).map(From::from) - } - - /// Returns the `Reader` for this section. - fn reader(&self) -> &R - where - R: Reader; - - /// Returns the subrange of the section that is the contribution of - /// a unit in a `.dwp` file. - fn dwp_range(&self, offset: u32, size: u32) -> Result - where - R: Reader, - { - let mut data = self.reader().clone(); - data.skip(R::Offset::from_u32(offset))?; - data.truncate(R::Offset::from_u32(size))?; - Ok(data.into()) - } - - /// Returns the `Reader` for this section. - fn lookup_offset_id(&self, id: ReaderOffsetId) -> Option<(SectionId, R::Offset)> - where - R: Reader, - { - self.reader() - .lookup_offset_id(id) - .map(|offset| (Self::id(), offset)) - } -} - -impl Register { - pub(crate) fn from_u64(x: u64) -> Result { - let y = x as u16; - if u64::from(y) == x { - Ok(Register(y)) - } else { - Err(Error::UnsupportedRegister(x)) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::common::Format; - use crate::endianity::LittleEndian; - use test_assembler::{Endian, Section}; - - #[test] - fn test_parse_initial_length_32_ok() { - let section = Section::with_endian(Endian::Little).L32(0x7856_3412); - let buf = section.get_contents().unwrap(); - - let input = &mut EndianSlice::new(&buf, LittleEndian); - match input.read_initial_length() { - Ok((length, format)) => { - assert_eq!(input.len(), 0); - assert_eq!(format, Format::Dwarf32); - assert_eq!(0x7856_3412, length); - } - otherwise => panic!("Unexpected result: {:?}", otherwise), - } - } - - #[test] - fn test_parse_initial_length_64_ok() { - let section = Section::with_endian(Endian::Little) - // Dwarf_64_INITIAL_UNIT_LENGTH - .L32(0xffff_ffff) - // Actual length - .L64(0xffde_bc9a_7856_3412); - let buf = section.get_contents().unwrap(); - let input = &mut EndianSlice::new(&buf, LittleEndian); - - #[cfg(target_pointer_width = "64")] - match input.read_initial_length() { - Ok((length, format)) => { - assert_eq!(input.len(), 0); - assert_eq!(format, Format::Dwarf64); - assert_eq!(0xffde_bc9a_7856_3412, length); - } - otherwise => panic!("Unexpected result: {:?}", otherwise), - } - - #[cfg(target_pointer_width = "32")] - match input.read_initial_length() { - Err(Error::UnsupportedOffset) => {} - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_initial_length_unknown_reserved_value() { - let section = Section::with_endian(Endian::Little).L32(0xffff_fffe); - let buf = section.get_contents().unwrap(); - - let input = &mut EndianSlice::new(&buf, LittleEndian); - match input.read_initial_length() { - Err(Error::UnknownReservedLength) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_initial_length_incomplete() { - let buf = [0xff, 0xff, 0xff]; // Need at least 4 bytes. - - let input = &mut EndianSlice::new(&buf, LittleEndian); - match input.read_initial_length() { - Err(Error::UnexpectedEof(_)) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_initial_length_64_incomplete() { - let section = Section::with_endian(Endian::Little) - // Dwarf_64_INITIAL_UNIT_LENGTH - .L32(0xffff_ffff) - // Actual length is not long enough. - .L32(0x7856_3412); - let buf = section.get_contents().unwrap(); - - let input = &mut EndianSlice::new(&buf, LittleEndian); - match input.read_initial_length() { - Err(Error::UnexpectedEof(_)) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_offset_32() { - let section = Section::with_endian(Endian::Little).L32(0x0123_4567); - let buf = section.get_contents().unwrap(); - - let input = &mut EndianSlice::new(&buf, LittleEndian); - match input.read_offset(Format::Dwarf32) { - Ok(val) => { - assert_eq!(input.len(), 0); - assert_eq!(val, 0x0123_4567); - } - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_offset_64_small() { - let section = Section::with_endian(Endian::Little).L64(0x0123_4567); - let buf = section.get_contents().unwrap(); - - let input = &mut EndianSlice::new(&buf, LittleEndian); - match input.read_offset(Format::Dwarf64) { - Ok(val) => { - assert_eq!(input.len(), 0); - assert_eq!(val, 0x0123_4567); - } - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_offset_64_large() { - let section = Section::with_endian(Endian::Little).L64(0x0123_4567_89ab_cdef); - let buf = section.get_contents().unwrap(); - - let input = &mut EndianSlice::new(&buf, LittleEndian); - match input.read_offset(Format::Dwarf64) { - Ok(val) => { - assert_eq!(input.len(), 0); - assert_eq!(val, 0x0123_4567_89ab_cdef); - } - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - #[cfg(target_pointer_width = "32")] - fn test_parse_offset_64_large() { - let section = Section::with_endian(Endian::Little).L64(0x0123_4567_89ab_cdef); - let buf = section.get_contents().unwrap(); - - let input = &mut EndianSlice::new(&buf, LittleEndian); - match input.read_offset(Format::Dwarf64) { - Err(Error::UnsupportedOffset) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/op.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/op.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/op.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/op.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,4140 +0,0 @@ -//! Functions for parsing and evaluating DWARF expressions. - -#[cfg(feature = "read")] -use alloc::vec::Vec; -use core::mem; - -use super::util::{ArrayLike, ArrayVec}; -use crate::common::{DebugAddrIndex, DebugInfoOffset, Encoding, Register}; -use crate::constants; -use crate::read::{Error, Reader, ReaderOffset, Result, StoreOnHeap, UnitOffset, Value, ValueType}; - -/// A reference to a DIE, either relative to the current CU or -/// relative to the section. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum DieReference { - /// A CU-relative reference. - UnitRef(UnitOffset), - /// A section-relative reference. - DebugInfoRef(DebugInfoOffset), -} - -/// A single decoded DWARF expression operation. -/// -/// DWARF expression evaluation is done in two parts: first the raw -/// bytes of the next part of the expression are decoded; and then the -/// decoded operation is evaluated. This approach lets other -/// consumers inspect the DWARF expression without reimplementing the -/// decoding operation. -/// -/// Multiple DWARF opcodes may decode into a single `Operation`. For -/// example, both `DW_OP_deref` and `DW_OP_xderef` are represented -/// using `Operation::Deref`. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Operation::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - /// Dereference the topmost value of the stack. - Deref { - /// The DIE of the base type or 0 to indicate the generic type - base_type: UnitOffset, - /// The size of the data to dereference. - size: u8, - /// True if the dereference operation takes an address space - /// argument from the stack; false otherwise. - space: bool, - }, - /// Drop an item from the stack. - Drop, - /// Pick an item from the stack and push it on top of the stack. - /// This operation handles `DW_OP_pick`, `DW_OP_dup`, and - /// `DW_OP_over`. - Pick { - /// The index, from the top of the stack, of the item to copy. - index: u8, - }, - /// Swap the top two stack items. - Swap, - /// Rotate the top three stack items. - Rot, - /// Take the absolute value of the top of the stack. - Abs, - /// Bitwise `and` of the top two values on the stack. - And, - /// Divide the top two values on the stack. - Div, - /// Subtract the top two values on the stack. - Minus, - /// Modulus of the top two values on the stack. - Mod, - /// Multiply the top two values on the stack. - Mul, - /// Negate the top of the stack. - Neg, - /// Bitwise `not` of the top of the stack. - Not, - /// Bitwise `or` of the top two values on the stack. - Or, - /// Add the top two values on the stack. - Plus, - /// Add a constant to the topmost value on the stack. - PlusConstant { - /// The value to add. - value: u64, - }, - /// Logical left shift of the 2nd value on the stack by the number - /// of bits given by the topmost value on the stack. - Shl, - /// Right shift of the 2nd value on the stack by the number of - /// bits given by the topmost value on the stack. - Shr, - /// Arithmetic left shift of the 2nd value on the stack by the - /// number of bits given by the topmost value on the stack. - Shra, - /// Bitwise `xor` of the top two values on the stack. - Xor, - /// Branch to the target location if the top of stack is nonzero. - Bra { - /// The relative offset to the target bytecode. - target: i16, - }, - /// Compare the top two stack values for equality. - Eq, - /// Compare the top two stack values using `>=`. - Ge, - /// Compare the top two stack values using `>`. - Gt, - /// Compare the top two stack values using `<=`. - Le, - /// Compare the top two stack values using `<`. - Lt, - /// Compare the top two stack values using `!=`. - Ne, - /// Unconditional branch to the target location. - Skip { - /// The relative offset to the target bytecode. - target: i16, - }, - /// Push an unsigned constant value on the stack. This handles multiple - /// DWARF opcodes. - UnsignedConstant { - /// The value to push. - value: u64, - }, - /// Push a signed constant value on the stack. This handles multiple - /// DWARF opcodes. - SignedConstant { - /// The value to push. - value: i64, - }, - /// Indicate that this piece's location is in the given register. - /// - /// Completes the piece or expression. - Register { - /// The register number. - register: Register, - }, - /// Find the value of the given register, add the offset, and then - /// push the resulting sum on the stack. - RegisterOffset { - /// The register number. - register: Register, - /// The offset to add. - offset: i64, - /// The DIE of the base type or 0 to indicate the generic type - base_type: UnitOffset, - }, - /// Compute the frame base (using `DW_AT_frame_base`), add the - /// given offset, and then push the resulting sum on the stack. - FrameOffset { - /// The offset to add. - offset: i64, - }, - /// No operation. - Nop, - /// Push the object address on the stack. - PushObjectAddress, - /// Evaluate a DWARF expression as a subroutine. The expression - /// comes from the `DW_AT_location` attribute of the indicated - /// DIE. - Call { - /// The DIE to use. - offset: DieReference, - }, - /// Compute the address of a thread-local variable and push it on - /// the stack. - TLS, - /// Compute the call frame CFA and push it on the stack. - CallFrameCFA, - /// Terminate a piece. - Piece { - /// The size of this piece in bits. - size_in_bits: u64, - /// The bit offset of this piece. If `None`, then this piece - /// was specified using `DW_OP_piece` and should start at the - /// next byte boundary. - bit_offset: Option, - }, - /// The object has no location, but has a known constant value. - /// - /// Represents `DW_OP_implicit_value`. - /// Completes the piece or expression. - ImplicitValue { - /// The implicit value to use. - data: R, - }, - /// The object has no location, but its value is at the top of the stack. - /// - /// Represents `DW_OP_stack_value`. - /// Completes the piece or expression. - StackValue, - /// The object is a pointer to a value which has no actual location, - /// such as an implicit value or a stack value. - /// - /// Represents `DW_OP_implicit_pointer`. - /// Completes the piece or expression. - ImplicitPointer { - /// The `.debug_info` offset of the value that this is an implicit pointer into. - value: DebugInfoOffset, - /// The byte offset into the value that the implicit pointer points to. - byte_offset: i64, - }, - /// Evaluate an expression at the entry to the current subprogram, and push it on the stack. - /// - /// Represents `DW_OP_entry_value`. - EntryValue { - /// The expression to be evaluated. - expression: R, - }, - /// This represents a parameter that was optimized out. - /// - /// The offset points to the definition of the parameter, and is - /// matched to the `DW_TAG_GNU_call_site_parameter` in the caller that also - /// points to the same definition of the parameter. - /// - /// Represents `DW_OP_GNU_parameter_ref`. - ParameterRef { - /// The DIE to use. - offset: UnitOffset, - }, - /// Relocate the address if needed, and push it on the stack. - /// - /// Represents `DW_OP_addr`. - Address { - /// The offset to add. - address: u64, - }, - /// Read the address at the given index in `.debug_addr, relocate the address if needed, - /// and push it on the stack. - /// - /// Represents `DW_OP_addrx`. - AddressIndex { - /// The index of the address in `.debug_addr`. - index: DebugAddrIndex, - }, - /// Read the address at the given index in `.debug_addr, and push it on the stack. - /// Do not relocate the address. - /// - /// Represents `DW_OP_constx`. - ConstantIndex { - /// The index of the address in `.debug_addr`. - index: DebugAddrIndex, - }, - /// Interpret the value bytes as a constant of a given type, and push it on the stack. - /// - /// Represents `DW_OP_const_type`. - TypedLiteral { - /// The DIE of the base type. - base_type: UnitOffset, - /// The value bytes. - value: R, - }, - /// Pop the top stack entry, convert it to a different type, and push it on the stack. - /// - /// Represents `DW_OP_convert`. - Convert { - /// The DIE of the base type. - base_type: UnitOffset, - }, - /// Pop the top stack entry, reinterpret the bits in its value as a different type, - /// and push it on the stack. - /// - /// Represents `DW_OP_reinterpret`. - Reinterpret { - /// The DIE of the base type. - base_type: UnitOffset, - }, - /// The index of a local in the currently executing function. - /// - /// Represents `DW_OP_WASM_location 0x00`. - /// Completes the piece or expression. - WasmLocal { - /// The index of the local. - index: u32, - }, - /// The index of a global. - /// - /// Represents `DW_OP_WASM_location 0x01` or `DW_OP_WASM_location 0x03`. - /// Completes the piece or expression. - WasmGlobal { - /// The index of the global. - index: u32, - }, - /// The index of an item on the operand stack. - /// - /// Represents `DW_OP_WASM_location 0x02`. - /// Completes the piece or expression. - WasmStack { - /// The index of the stack item. 0 is the bottom of the operand stack. - index: u32, - }, -} - -#[derive(Debug)] -enum OperationEvaluationResult { - Piece, - Incomplete, - Complete { location: Location }, - Waiting(EvaluationWaiting, EvaluationResult), -} - -/// A single location of a piece of the result of a DWARF expression. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum Location::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - /// The piece is empty. Ordinarily this means the piece has been - /// optimized away. - Empty, - /// The piece is found in a register. - Register { - /// The register number. - register: Register, - }, - /// The piece is found in memory. - Address { - /// The address. - address: u64, - }, - /// The piece has no location but its value is known. - Value { - /// The value. - value: Value, - }, - /// The piece is represented by some constant bytes. - Bytes { - /// The value. - value: R, - }, - /// The piece is a pointer to a value which has no actual location. - ImplicitPointer { - /// The `.debug_info` offset of the value that this is an implicit pointer into. - value: DebugInfoOffset, - /// The byte offset into the value that the implicit pointer points to. - byte_offset: i64, - }, -} - -impl Location -where - R: Reader, - Offset: ReaderOffset, -{ - /// Return true if the piece is empty. - pub fn is_empty(&self) -> bool { - matches!(*self, Location::Empty) - } -} - -/// The description of a single piece of the result of a DWARF -/// expression. -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct Piece::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - /// If given, the size of the piece in bits. If `None`, there - /// must be only one piece whose size is all of the object. - pub size_in_bits: Option, - /// If given, the bit offset of the piece within the location. - /// If the location is a `Location::Register` or `Location::Value`, - /// then this offset is from the least significant bit end of - /// the register or value. - /// If the location is a `Location::Address` then the offset uses - /// the bit numbering and direction conventions of the language - /// and target system. - /// - /// If `None`, the piece starts at the location. If the - /// location is a register whose size is larger than the piece, - /// then placement within the register is defined by the ABI. - pub bit_offset: Option, - /// Where this piece is to be found. - pub location: Location, -} - -// A helper function to handle branch offsets. -fn compute_pc(pc: &R, bytecode: &R, offset: i16) -> Result { - let pc_offset = pc.offset_from(bytecode); - let new_pc_offset = pc_offset.wrapping_add(R::Offset::from_i16(offset)); - if new_pc_offset > bytecode.len() { - Err(Error::BadBranchTarget(new_pc_offset.into_u64())) - } else { - let mut new_pc = bytecode.clone(); - new_pc.skip(new_pc_offset)?; - Ok(new_pc) - } -} - -fn generic_type() -> UnitOffset { - UnitOffset(O::from_u64(0).unwrap()) -} - -impl Operation -where - R: Reader, - Offset: ReaderOffset, -{ - /// Parse a single DWARF expression operation. - /// - /// This is useful when examining a DWARF expression for reasons other - /// than direct evaluation. - /// - /// `bytes` points to a the operation to decode. It should point into - /// the same array as `bytecode`, which should be the entire - /// expression. - pub fn parse(bytes: &mut R, encoding: Encoding) -> Result> { - let opcode = bytes.read_u8()?; - let name = constants::DwOp(opcode); - match name { - constants::DW_OP_addr => { - let address = bytes.read_address(encoding.address_size)?; - Ok(Operation::Address { address }) - } - constants::DW_OP_deref => Ok(Operation::Deref { - base_type: generic_type(), - size: encoding.address_size, - space: false, - }), - constants::DW_OP_const1u => { - let value = bytes.read_u8()?; - Ok(Operation::UnsignedConstant { - value: u64::from(value), - }) - } - constants::DW_OP_const1s => { - let value = bytes.read_i8()?; - Ok(Operation::SignedConstant { - value: i64::from(value), - }) - } - constants::DW_OP_const2u => { - let value = bytes.read_u16()?; - Ok(Operation::UnsignedConstant { - value: u64::from(value), - }) - } - constants::DW_OP_const2s => { - let value = bytes.read_i16()?; - Ok(Operation::SignedConstant { - value: i64::from(value), - }) - } - constants::DW_OP_const4u => { - let value = bytes.read_u32()?; - Ok(Operation::UnsignedConstant { - value: u64::from(value), - }) - } - constants::DW_OP_const4s => { - let value = bytes.read_i32()?; - Ok(Operation::SignedConstant { - value: i64::from(value), - }) - } - constants::DW_OP_const8u => { - let value = bytes.read_u64()?; - Ok(Operation::UnsignedConstant { value }) - } - constants::DW_OP_const8s => { - let value = bytes.read_i64()?; - Ok(Operation::SignedConstant { value }) - } - constants::DW_OP_constu => { - let value = bytes.read_uleb128()?; - Ok(Operation::UnsignedConstant { value }) - } - constants::DW_OP_consts => { - let value = bytes.read_sleb128()?; - Ok(Operation::SignedConstant { value }) - } - constants::DW_OP_dup => Ok(Operation::Pick { index: 0 }), - constants::DW_OP_drop => Ok(Operation::Drop), - constants::DW_OP_over => Ok(Operation::Pick { index: 1 }), - constants::DW_OP_pick => { - let value = bytes.read_u8()?; - Ok(Operation::Pick { index: value }) - } - constants::DW_OP_swap => Ok(Operation::Swap), - constants::DW_OP_rot => Ok(Operation::Rot), - constants::DW_OP_xderef => Ok(Operation::Deref { - base_type: generic_type(), - size: encoding.address_size, - space: true, - }), - constants::DW_OP_abs => Ok(Operation::Abs), - constants::DW_OP_and => Ok(Operation::And), - constants::DW_OP_div => Ok(Operation::Div), - constants::DW_OP_minus => Ok(Operation::Minus), - constants::DW_OP_mod => Ok(Operation::Mod), - constants::DW_OP_mul => Ok(Operation::Mul), - constants::DW_OP_neg => Ok(Operation::Neg), - constants::DW_OP_not => Ok(Operation::Not), - constants::DW_OP_or => Ok(Operation::Or), - constants::DW_OP_plus => Ok(Operation::Plus), - constants::DW_OP_plus_uconst => { - let value = bytes.read_uleb128()?; - Ok(Operation::PlusConstant { value }) - } - constants::DW_OP_shl => Ok(Operation::Shl), - constants::DW_OP_shr => Ok(Operation::Shr), - constants::DW_OP_shra => Ok(Operation::Shra), - constants::DW_OP_xor => Ok(Operation::Xor), - constants::DW_OP_bra => { - let target = bytes.read_i16()?; - Ok(Operation::Bra { target }) - } - constants::DW_OP_eq => Ok(Operation::Eq), - constants::DW_OP_ge => Ok(Operation::Ge), - constants::DW_OP_gt => Ok(Operation::Gt), - constants::DW_OP_le => Ok(Operation::Le), - constants::DW_OP_lt => Ok(Operation::Lt), - constants::DW_OP_ne => Ok(Operation::Ne), - constants::DW_OP_skip => { - let target = bytes.read_i16()?; - Ok(Operation::Skip { target }) - } - constants::DW_OP_lit0 - | constants::DW_OP_lit1 - | constants::DW_OP_lit2 - | constants::DW_OP_lit3 - | constants::DW_OP_lit4 - | constants::DW_OP_lit5 - | constants::DW_OP_lit6 - | constants::DW_OP_lit7 - | constants::DW_OP_lit8 - | constants::DW_OP_lit9 - | constants::DW_OP_lit10 - | constants::DW_OP_lit11 - | constants::DW_OP_lit12 - | constants::DW_OP_lit13 - | constants::DW_OP_lit14 - | constants::DW_OP_lit15 - | constants::DW_OP_lit16 - | constants::DW_OP_lit17 - | constants::DW_OP_lit18 - | constants::DW_OP_lit19 - | constants::DW_OP_lit20 - | constants::DW_OP_lit21 - | constants::DW_OP_lit22 - | constants::DW_OP_lit23 - | constants::DW_OP_lit24 - | constants::DW_OP_lit25 - | constants::DW_OP_lit26 - | constants::DW_OP_lit27 - | constants::DW_OP_lit28 - | constants::DW_OP_lit29 - | constants::DW_OP_lit30 - | constants::DW_OP_lit31 => Ok(Operation::UnsignedConstant { - value: (opcode - constants::DW_OP_lit0.0).into(), - }), - constants::DW_OP_reg0 - | constants::DW_OP_reg1 - | constants::DW_OP_reg2 - | constants::DW_OP_reg3 - | constants::DW_OP_reg4 - | constants::DW_OP_reg5 - | constants::DW_OP_reg6 - | constants::DW_OP_reg7 - | constants::DW_OP_reg8 - | constants::DW_OP_reg9 - | constants::DW_OP_reg10 - | constants::DW_OP_reg11 - | constants::DW_OP_reg12 - | constants::DW_OP_reg13 - | constants::DW_OP_reg14 - | constants::DW_OP_reg15 - | constants::DW_OP_reg16 - | constants::DW_OP_reg17 - | constants::DW_OP_reg18 - | constants::DW_OP_reg19 - | constants::DW_OP_reg20 - | constants::DW_OP_reg21 - | constants::DW_OP_reg22 - | constants::DW_OP_reg23 - | constants::DW_OP_reg24 - | constants::DW_OP_reg25 - | constants::DW_OP_reg26 - | constants::DW_OP_reg27 - | constants::DW_OP_reg28 - | constants::DW_OP_reg29 - | constants::DW_OP_reg30 - | constants::DW_OP_reg31 => Ok(Operation::Register { - register: Register((opcode - constants::DW_OP_reg0.0).into()), - }), - constants::DW_OP_breg0 - | constants::DW_OP_breg1 - | constants::DW_OP_breg2 - | constants::DW_OP_breg3 - | constants::DW_OP_breg4 - | constants::DW_OP_breg5 - | constants::DW_OP_breg6 - | constants::DW_OP_breg7 - | constants::DW_OP_breg8 - | constants::DW_OP_breg9 - | constants::DW_OP_breg10 - | constants::DW_OP_breg11 - | constants::DW_OP_breg12 - | constants::DW_OP_breg13 - | constants::DW_OP_breg14 - | constants::DW_OP_breg15 - | constants::DW_OP_breg16 - | constants::DW_OP_breg17 - | constants::DW_OP_breg18 - | constants::DW_OP_breg19 - | constants::DW_OP_breg20 - | constants::DW_OP_breg21 - | constants::DW_OP_breg22 - | constants::DW_OP_breg23 - | constants::DW_OP_breg24 - | constants::DW_OP_breg25 - | constants::DW_OP_breg26 - | constants::DW_OP_breg27 - | constants::DW_OP_breg28 - | constants::DW_OP_breg29 - | constants::DW_OP_breg30 - | constants::DW_OP_breg31 => { - let value = bytes.read_sleb128()?; - Ok(Operation::RegisterOffset { - register: Register((opcode - constants::DW_OP_breg0.0).into()), - offset: value, - base_type: generic_type(), - }) - } - constants::DW_OP_regx => { - let register = bytes.read_uleb128().and_then(Register::from_u64)?; - Ok(Operation::Register { register }) - } - constants::DW_OP_fbreg => { - let value = bytes.read_sleb128()?; - Ok(Operation::FrameOffset { offset: value }) - } - constants::DW_OP_bregx => { - let register = bytes.read_uleb128().and_then(Register::from_u64)?; - let offset = bytes.read_sleb128()?; - Ok(Operation::RegisterOffset { - register, - offset, - base_type: generic_type(), - }) - } - constants::DW_OP_piece => { - let size = bytes.read_uleb128()?; - Ok(Operation::Piece { - size_in_bits: 8 * size, - bit_offset: None, - }) - } - constants::DW_OP_deref_size => { - let size = bytes.read_u8()?; - Ok(Operation::Deref { - base_type: generic_type(), - size, - space: false, - }) - } - constants::DW_OP_xderef_size => { - let size = bytes.read_u8()?; - Ok(Operation::Deref { - base_type: generic_type(), - size, - space: true, - }) - } - constants::DW_OP_nop => Ok(Operation::Nop), - constants::DW_OP_push_object_address => Ok(Operation::PushObjectAddress), - constants::DW_OP_call2 => { - let value = bytes.read_u16().map(R::Offset::from_u16)?; - Ok(Operation::Call { - offset: DieReference::UnitRef(UnitOffset(value)), - }) - } - constants::DW_OP_call4 => { - let value = bytes.read_u32().map(R::Offset::from_u32)?; - Ok(Operation::Call { - offset: DieReference::UnitRef(UnitOffset(value)), - }) - } - constants::DW_OP_call_ref => { - let value = bytes.read_offset(encoding.format)?; - Ok(Operation::Call { - offset: DieReference::DebugInfoRef(DebugInfoOffset(value)), - }) - } - constants::DW_OP_form_tls_address | constants::DW_OP_GNU_push_tls_address => { - Ok(Operation::TLS) - } - constants::DW_OP_call_frame_cfa => Ok(Operation::CallFrameCFA), - constants::DW_OP_bit_piece => { - let size = bytes.read_uleb128()?; - let offset = bytes.read_uleb128()?; - Ok(Operation::Piece { - size_in_bits: size, - bit_offset: Some(offset), - }) - } - constants::DW_OP_implicit_value => { - let len = bytes.read_uleb128().and_then(R::Offset::from_u64)?; - let data = bytes.split(len)?; - Ok(Operation::ImplicitValue { data }) - } - constants::DW_OP_stack_value => Ok(Operation::StackValue), - constants::DW_OP_implicit_pointer | constants::DW_OP_GNU_implicit_pointer => { - let value = if encoding.version == 2 { - bytes - .read_address(encoding.address_size) - .and_then(Offset::from_u64)? - } else { - bytes.read_offset(encoding.format)? - }; - let byte_offset = bytes.read_sleb128()?; - Ok(Operation::ImplicitPointer { - value: DebugInfoOffset(value), - byte_offset, - }) - } - constants::DW_OP_addrx | constants::DW_OP_GNU_addr_index => { - let index = bytes.read_uleb128().and_then(R::Offset::from_u64)?; - Ok(Operation::AddressIndex { - index: DebugAddrIndex(index), - }) - } - constants::DW_OP_constx | constants::DW_OP_GNU_const_index => { - let index = bytes.read_uleb128().and_then(R::Offset::from_u64)?; - Ok(Operation::ConstantIndex { - index: DebugAddrIndex(index), - }) - } - constants::DW_OP_entry_value | constants::DW_OP_GNU_entry_value => { - let len = bytes.read_uleb128().and_then(R::Offset::from_u64)?; - let expression = bytes.split(len)?; - Ok(Operation::EntryValue { expression }) - } - constants::DW_OP_GNU_parameter_ref => { - let value = bytes.read_u32().map(R::Offset::from_u32)?; - Ok(Operation::ParameterRef { - offset: UnitOffset(value), - }) - } - constants::DW_OP_const_type | constants::DW_OP_GNU_const_type => { - let base_type = bytes.read_uleb128().and_then(R::Offset::from_u64)?; - let len = bytes.read_u8()?; - let value = bytes.split(R::Offset::from_u8(len))?; - Ok(Operation::TypedLiteral { - base_type: UnitOffset(base_type), - value, - }) - } - constants::DW_OP_regval_type | constants::DW_OP_GNU_regval_type => { - let register = bytes.read_uleb128().and_then(Register::from_u64)?; - let base_type = bytes.read_uleb128().and_then(R::Offset::from_u64)?; - Ok(Operation::RegisterOffset { - register, - offset: 0, - base_type: UnitOffset(base_type), - }) - } - constants::DW_OP_deref_type | constants::DW_OP_GNU_deref_type => { - let size = bytes.read_u8()?; - let base_type = bytes.read_uleb128().and_then(R::Offset::from_u64)?; - Ok(Operation::Deref { - base_type: UnitOffset(base_type), - size, - space: false, - }) - } - constants::DW_OP_xderef_type => { - let size = bytes.read_u8()?; - let base_type = bytes.read_uleb128().and_then(R::Offset::from_u64)?; - Ok(Operation::Deref { - base_type: UnitOffset(base_type), - size, - space: true, - }) - } - constants::DW_OP_convert | constants::DW_OP_GNU_convert => { - let base_type = bytes.read_uleb128().and_then(R::Offset::from_u64)?; - Ok(Operation::Convert { - base_type: UnitOffset(base_type), - }) - } - constants::DW_OP_reinterpret | constants::DW_OP_GNU_reinterpret => { - let base_type = bytes.read_uleb128().and_then(R::Offset::from_u64)?; - Ok(Operation::Reinterpret { - base_type: UnitOffset(base_type), - }) - } - constants::DW_OP_WASM_location => match bytes.read_u8()? { - 0x0 => { - let index = bytes.read_uleb128_u32()?; - Ok(Operation::WasmLocal { index }) - } - 0x1 => { - let index = bytes.read_uleb128_u32()?; - Ok(Operation::WasmGlobal { index }) - } - 0x2 => { - let index = bytes.read_uleb128_u32()?; - Ok(Operation::WasmStack { index }) - } - 0x3 => { - let index = bytes.read_u32()?; - Ok(Operation::WasmGlobal { index }) - } - _ => Err(Error::InvalidExpression(name)), - }, - _ => Err(Error::InvalidExpression(name)), - } - } -} - -#[derive(Debug)] -enum EvaluationState { - Start(Option), - Ready, - Error(Error), - Complete, - Waiting(EvaluationWaiting), -} - -#[derive(Debug)] -enum EvaluationWaiting { - Memory, - Register { offset: i64 }, - FrameBase { offset: i64 }, - Tls, - Cfa, - AtLocation, - EntryValue, - ParameterRef, - RelocatedAddress, - IndexedAddress, - TypedLiteral { value: R }, - Convert, - Reinterpret, -} - -/// The state of an `Evaluation` after evaluating a DWARF expression. -/// The evaluation is either `Complete`, or it requires more data -/// to continue, as described by the variant. -#[derive(Debug, PartialEq)] -pub enum EvaluationResult { - /// The `Evaluation` is complete, and `Evaluation::result()` can be called. - Complete, - /// The `Evaluation` needs a value from memory to proceed further. Once the - /// caller determines what value to provide it should resume the `Evaluation` - /// by calling `Evaluation::resume_with_memory`. - RequiresMemory { - /// The address of the value required. - address: u64, - /// The size of the value required. This is guaranteed to be at most the - /// word size of the target architecture. - size: u8, - /// If not `None`, a target-specific address space value. - space: Option, - /// The DIE of the base type or 0 to indicate the generic type - base_type: UnitOffset, - }, - /// The `Evaluation` needs a value from a register to proceed further. Once - /// the caller determines what value to provide it should resume the - /// `Evaluation` by calling `Evaluation::resume_with_register`. - RequiresRegister { - /// The register number. - register: Register, - /// The DIE of the base type or 0 to indicate the generic type - base_type: UnitOffset, - }, - /// The `Evaluation` needs the frame base address to proceed further. Once - /// the caller determines what value to provide it should resume the - /// `Evaluation` by calling `Evaluation::resume_with_frame_base`. The frame - /// base address is the address produced by the location description in the - /// `DW_AT_frame_base` attribute of the current function. - RequiresFrameBase, - /// The `Evaluation` needs a value from TLS to proceed further. Once the - /// caller determines what value to provide it should resume the - /// `Evaluation` by calling `Evaluation::resume_with_tls`. - RequiresTls(u64), - /// The `Evaluation` needs the CFA to proceed further. Once the caller - /// determines what value to provide it should resume the `Evaluation` by - /// calling `Evaluation::resume_with_call_frame_cfa`. - RequiresCallFrameCfa, - /// The `Evaluation` needs the DWARF expression at the given location to - /// proceed further. Once the caller determines what value to provide it - /// should resume the `Evaluation` by calling - /// `Evaluation::resume_with_at_location`. - RequiresAtLocation(DieReference), - /// The `Evaluation` needs the value produced by evaluating a DWARF - /// expression at the entry point of the current subprogram. Once the - /// caller determines what value to provide it should resume the - /// `Evaluation` by calling `Evaluation::resume_with_entry_value`. - RequiresEntryValue(Expression), - /// The `Evaluation` needs the value of the parameter at the given location - /// in the current function's caller. Once the caller determines what value - /// to provide it should resume the `Evaluation` by calling - /// `Evaluation::resume_with_parameter_ref`. - RequiresParameterRef(UnitOffset), - /// The `Evaluation` needs an address to be relocated to proceed further. - /// Once the caller determines what value to provide it should resume the - /// `Evaluation` by calling `Evaluation::resume_with_relocated_address`. - RequiresRelocatedAddress(u64), - /// The `Evaluation` needs an address from the `.debug_addr` section. - /// This address may also need to be relocated. - /// Once the caller determines what value to provide it should resume the - /// `Evaluation` by calling `Evaluation::resume_with_indexed_address`. - RequiresIndexedAddress { - /// The index of the address in the `.debug_addr` section, - /// relative to the `DW_AT_addr_base` of the compilation unit. - index: DebugAddrIndex, - /// Whether the address also needs to be relocated. - relocate: bool, - }, - /// The `Evaluation` needs the `ValueType` for the base type DIE at - /// the give unit offset. Once the caller determines what value to provide it - /// should resume the `Evaluation` by calling - /// `Evaluation::resume_with_base_type`. - RequiresBaseType(UnitOffset), -} - -/// The bytecode for a DWARF expression or location description. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct Expression(pub R); - -impl Expression { - /// Create an evaluation for this expression. - /// - /// The `encoding` is determined by the - /// [`CompilationUnitHeader`](struct.CompilationUnitHeader.html) or - /// [`TypeUnitHeader`](struct.TypeUnitHeader.html) that this expression - /// relates to. - /// - /// # Examples - /// ```rust,no_run - /// use gimli::Expression; - /// # let endian = gimli::LittleEndian; - /// # let debug_info = gimli::DebugInfo::from(gimli::EndianSlice::new(&[], endian)); - /// # let unit = debug_info.units().next().unwrap().unwrap(); - /// # let bytecode = gimli::EndianSlice::new(&[], endian); - /// let expression = gimli::Expression(bytecode); - /// let mut eval = expression.evaluation(unit.encoding()); - /// let mut result = eval.evaluate().unwrap(); - /// ``` - #[cfg(feature = "read")] - #[inline] - pub fn evaluation(self, encoding: Encoding) -> Evaluation { - Evaluation::new(self.0, encoding) - } - - /// Return an iterator for the operations in the expression. - pub fn operations(self, encoding: Encoding) -> OperationIter { - OperationIter { - input: self.0, - encoding, - } - } -} - -/// An iterator for the operations in an expression. -#[derive(Debug, Clone, Copy)] -pub struct OperationIter { - input: R, - encoding: Encoding, -} - -impl OperationIter { - /// Read the next operation in an expression. - pub fn next(&mut self) -> Result>> { - if self.input.is_empty() { - return Ok(None); - } - match Operation::parse(&mut self.input, self.encoding) { - Ok(op) => Ok(Some(op)), - Err(e) => { - self.input.empty(); - Err(e) - } - } - } - - /// Return the current byte offset of the iterator. - pub fn offset_from(&self, expression: &Expression) -> R::Offset { - self.input.offset_from(&expression.0) - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for OperationIter { - type Item = Operation; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - OperationIter::next(self) - } -} - -/// Specification of what storage should be used for [`Evaluation`]. -/// -#[cfg_attr( - feature = "read", - doc = " -Normally you would only need to use [`StoreOnHeap`], which places the stacks and the results -on the heap using [`Vec`]. This is the default storage type parameter for [`Evaluation`]. -" -)] -/// -/// If you need to avoid [`Evaluation`] from allocating memory, e.g. for signal safety, -/// you can provide you own storage specification: -/// ```rust,no_run -/// # use gimli::*; -/// # let bytecode = EndianSlice::new(&[], LittleEndian); -/// # let encoding = unimplemented!(); -/// # let get_register_value = |_, _| Value::Generic(42); -/// # let get_frame_base = || 0xdeadbeef; -/// # -/// struct StoreOnStack; -/// -/// impl EvaluationStorage for StoreOnStack { -/// type Stack = [Value; 64]; -/// type ExpressionStack = [(R, R); 4]; -/// type Result = [Piece; 1]; -/// } -/// -/// let mut eval = Evaluation::<_, StoreOnStack>::new_in(bytecode, encoding); -/// let mut result = eval.evaluate().unwrap(); -/// while result != EvaluationResult::Complete { -/// match result { -/// EvaluationResult::RequiresRegister { register, base_type } => { -/// let value = get_register_value(register, base_type); -/// result = eval.resume_with_register(value).unwrap(); -/// }, -/// EvaluationResult::RequiresFrameBase => { -/// let frame_base = get_frame_base(); -/// result = eval.resume_with_frame_base(frame_base).unwrap(); -/// }, -/// _ => unimplemented!(), -/// }; -/// } -/// -/// let result = eval.as_result(); -/// println!("{:?}", result); -/// ``` -pub trait EvaluationStorage { - /// The storage used for the evaluation stack. - type Stack: ArrayLike; - /// The storage used for the expression stack. - type ExpressionStack: ArrayLike; - /// The storage used for the results. - type Result: ArrayLike>; -} - -#[cfg(feature = "read")] -impl EvaluationStorage for StoreOnHeap { - type Stack = Vec; - type ExpressionStack = Vec<(R, R)>; - type Result = Vec>; -} - -/// A DWARF expression evaluator. -/// -/// # Usage -/// A DWARF expression may require additional data to produce a final result, -/// such as the value of a register or a memory location. Once initial setup -/// is complete (i.e. `set_initial_value()`, `set_object_address()`) the -/// consumer calls the `evaluate()` method. That returns an `EvaluationResult`, -/// which is either `EvaluationResult::Complete` or a value indicating what -/// data is needed to resume the `Evaluation`. The consumer is responsible for -/// producing that data and resuming the computation with the correct method, -/// as documented for `EvaluationResult`. Only once an `EvaluationResult::Complete` -/// is returned can the consumer call `result()`. -/// -/// This design allows the consumer of `Evaluation` to decide how and when to -/// produce the required data and resume the computation. The `Evaluation` can -/// be driven synchronously (as shown below) or by some asynchronous mechanism -/// such as futures. -/// -/// # Examples -/// ```rust,no_run -/// use gimli::{Evaluation, EvaluationResult, Expression}; -/// # let bytecode = gimli::EndianSlice::new(&[], gimli::LittleEndian); -/// # let encoding = unimplemented!(); -/// # let get_register_value = |_, _| gimli::Value::Generic(42); -/// # let get_frame_base = || 0xdeadbeef; -/// -/// let mut eval = Evaluation::new(bytecode, encoding); -/// let mut result = eval.evaluate().unwrap(); -/// while result != EvaluationResult::Complete { -/// match result { -/// EvaluationResult::RequiresRegister { register, base_type } => { -/// let value = get_register_value(register, base_type); -/// result = eval.resume_with_register(value).unwrap(); -/// }, -/// EvaluationResult::RequiresFrameBase => { -/// let frame_base = get_frame_base(); -/// result = eval.resume_with_frame_base(frame_base).unwrap(); -/// }, -/// _ => unimplemented!(), -/// }; -/// } -/// -/// let result = eval.result(); -/// println!("{:?}", result); -/// ``` -#[derive(Debug)] -pub struct Evaluation = StoreOnHeap> { - bytecode: R, - encoding: Encoding, - object_address: Option, - max_iterations: Option, - iteration: u32, - state: EvaluationState, - - // Stack operations are done on word-sized values. We do all - // operations on 64-bit values, and then mask the results - // appropriately when popping. - addr_mask: u64, - - // The stack. - stack: ArrayVec, - - // The next operation to decode and evaluate. - pc: R, - - // If we see a DW_OP_call* operation, the previous PC and bytecode - // is stored here while evaluating the subroutine. - expression_stack: ArrayVec, - - value_result: Option, - result: ArrayVec, -} - -#[cfg(feature = "read")] -impl Evaluation { - /// Create a new DWARF expression evaluator. - /// - /// The new evaluator is created without an initial value, without - /// an object address, and without a maximum number of iterations. - pub fn new(bytecode: R, encoding: Encoding) -> Self { - Self::new_in(bytecode, encoding) - } - - /// Get the result of this `Evaluation`. - /// - /// # Panics - /// Panics if this `Evaluation` has not been driven to completion. - pub fn result(self) -> Vec> { - match self.state { - EvaluationState::Complete => self.result.into_vec(), - _ => { - panic!("Called `Evaluation::result` on an `Evaluation` that has not been completed") - } - } - } -} - -impl> Evaluation { - /// Create a new DWARF expression evaluator. - /// - /// The new evaluator is created without an initial value, without - /// an object address, and without a maximum number of iterations. - pub fn new_in(bytecode: R, encoding: Encoding) -> Self { - let pc = bytecode.clone(); - Evaluation { - bytecode, - encoding, - object_address: None, - max_iterations: None, - iteration: 0, - state: EvaluationState::Start(None), - addr_mask: if encoding.address_size == 8 { - !0u64 - } else { - (1 << (8 * u64::from(encoding.address_size))) - 1 - }, - stack: Default::default(), - expression_stack: Default::default(), - pc, - value_result: None, - result: Default::default(), - } - } - - /// Set an initial value to be pushed on the DWARF expression - /// evaluator's stack. This can be used in cases like - /// `DW_AT_vtable_elem_location`, which require a value on the - /// stack before evaluation commences. If no initial value is - /// set, and the expression uses an opcode requiring the initial - /// value, then evaluation will fail with an error. - /// - /// # Panics - /// Panics if `set_initial_value()` has already been called, or if - /// `evaluate()` has already been called. - pub fn set_initial_value(&mut self, value: u64) { - match self.state { - EvaluationState::Start(None) => { - self.state = EvaluationState::Start(Some(value)); - } - _ => panic!( - "`Evaluation::set_initial_value` was called twice, or after evaluation began." - ), - }; - } - - /// Set the enclosing object's address, as used by - /// `DW_OP_push_object_address`. If no object address is set, and - /// the expression uses an opcode requiring the object address, - /// then evaluation will fail with an error. - pub fn set_object_address(&mut self, value: u64) { - self.object_address = Some(value); - } - - /// Set the maximum number of iterations to be allowed by the - /// expression evaluator. - /// - /// An iteration corresponds approximately to the evaluation of a - /// single operation in an expression ("approximately" because the - /// implementation may allow two such operations in some cases). - /// The default is not to have a maximum; once set, it's not - /// possible to go back to this default state. This value can be - /// set to avoid denial of service attacks by bad DWARF bytecode. - pub fn set_max_iterations(&mut self, value: u32) { - self.max_iterations = Some(value); - } - - fn pop(&mut self) -> Result { - match self.stack.pop() { - Some(value) => Ok(value), - None => Err(Error::NotEnoughStackItems), - } - } - - fn push(&mut self, value: Value) -> Result<()> { - self.stack.try_push(value).map_err(|_| Error::StackFull) - } - - fn evaluate_one_operation(&mut self) -> Result> { - let operation = Operation::parse(&mut self.pc, self.encoding)?; - - match operation { - Operation::Deref { - base_type, - size, - space, - } => { - let entry = self.pop()?; - let addr = entry.to_u64(self.addr_mask)?; - let addr_space = if space { - let entry = self.pop()?; - let value = entry.to_u64(self.addr_mask)?; - Some(value) - } else { - None - }; - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::Memory, - EvaluationResult::RequiresMemory { - address: addr, - size, - space: addr_space, - base_type, - }, - )); - } - - Operation::Drop => { - self.pop()?; - } - Operation::Pick { index } => { - let len = self.stack.len(); - let index = index as usize; - if index >= len { - return Err(Error::NotEnoughStackItems); - } - let value = self.stack[len - index - 1]; - self.push(value)?; - } - Operation::Swap => { - let top = self.pop()?; - let next = self.pop()?; - self.push(top)?; - self.push(next)?; - } - Operation::Rot => { - let one = self.pop()?; - let two = self.pop()?; - let three = self.pop()?; - self.push(one)?; - self.push(three)?; - self.push(two)?; - } - - Operation::Abs => { - let value = self.pop()?; - let result = value.abs(self.addr_mask)?; - self.push(result)?; - } - Operation::And => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.and(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Div => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.div(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Minus => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.sub(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Mod => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.rem(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Mul => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.mul(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Neg => { - let v = self.pop()?; - let result = v.neg(self.addr_mask)?; - self.push(result)?; - } - Operation::Not => { - let value = self.pop()?; - let result = value.not(self.addr_mask)?; - self.push(result)?; - } - Operation::Or => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.or(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Plus => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.add(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::PlusConstant { value } => { - let lhs = self.pop()?; - let rhs = Value::from_u64(lhs.value_type(), value)?; - let result = lhs.add(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Shl => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.shl(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Shr => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.shr(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Shra => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.shra(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Xor => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.xor(rhs, self.addr_mask)?; - self.push(result)?; - } - - Operation::Bra { target } => { - let entry = self.pop()?; - let v = entry.to_u64(self.addr_mask)?; - if v != 0 { - self.pc = compute_pc(&self.pc, &self.bytecode, target)?; - } - } - - Operation::Eq => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.eq(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Ge => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.ge(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Gt => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.gt(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Le => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.le(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Lt => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.lt(rhs, self.addr_mask)?; - self.push(result)?; - } - Operation::Ne => { - let rhs = self.pop()?; - let lhs = self.pop()?; - let result = lhs.ne(rhs, self.addr_mask)?; - self.push(result)?; - } - - Operation::Skip { target } => { - self.pc = compute_pc(&self.pc, &self.bytecode, target)?; - } - - Operation::UnsignedConstant { value } => { - self.push(Value::Generic(value))?; - } - - Operation::SignedConstant { value } => { - self.push(Value::Generic(value as u64))?; - } - - Operation::RegisterOffset { - register, - offset, - base_type, - } => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::Register { offset }, - EvaluationResult::RequiresRegister { - register, - base_type, - }, - )); - } - - Operation::FrameOffset { offset } => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::FrameBase { offset }, - EvaluationResult::RequiresFrameBase, - )); - } - - Operation::Nop => {} - - Operation::PushObjectAddress => { - if let Some(value) = self.object_address { - self.push(Value::Generic(value))?; - } else { - return Err(Error::InvalidPushObjectAddress); - } - } - - Operation::Call { offset } => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::AtLocation, - EvaluationResult::RequiresAtLocation(offset), - )); - } - - Operation::TLS => { - let entry = self.pop()?; - let index = entry.to_u64(self.addr_mask)?; - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::Tls, - EvaluationResult::RequiresTls(index), - )); - } - - Operation::CallFrameCFA => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::Cfa, - EvaluationResult::RequiresCallFrameCfa, - )); - } - - Operation::Register { register } => { - let location = Location::Register { register }; - return Ok(OperationEvaluationResult::Complete { location }); - } - - Operation::ImplicitValue { ref data } => { - let location = Location::Bytes { - value: data.clone(), - }; - return Ok(OperationEvaluationResult::Complete { location }); - } - - Operation::StackValue => { - let value = self.pop()?; - let location = Location::Value { value }; - return Ok(OperationEvaluationResult::Complete { location }); - } - - Operation::ImplicitPointer { value, byte_offset } => { - let location = Location::ImplicitPointer { value, byte_offset }; - return Ok(OperationEvaluationResult::Complete { location }); - } - - Operation::EntryValue { ref expression } => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::EntryValue, - EvaluationResult::RequiresEntryValue(Expression(expression.clone())), - )); - } - - Operation::ParameterRef { offset } => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::ParameterRef, - EvaluationResult::RequiresParameterRef(offset), - )); - } - - Operation::Address { address } => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::RelocatedAddress, - EvaluationResult::RequiresRelocatedAddress(address), - )); - } - - Operation::AddressIndex { index } => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::IndexedAddress, - EvaluationResult::RequiresIndexedAddress { - index, - relocate: true, - }, - )); - } - - Operation::ConstantIndex { index } => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::IndexedAddress, - EvaluationResult::RequiresIndexedAddress { - index, - relocate: false, - }, - )); - } - - Operation::Piece { - size_in_bits, - bit_offset, - } => { - let location = if self.stack.is_empty() { - Location::Empty - } else { - let entry = self.pop()?; - let address = entry.to_u64(self.addr_mask)?; - Location::Address { address } - }; - self.result - .try_push(Piece { - size_in_bits: Some(size_in_bits), - bit_offset, - location, - }) - .map_err(|_| Error::StackFull)?; - return Ok(OperationEvaluationResult::Piece); - } - - Operation::TypedLiteral { base_type, value } => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::TypedLiteral { value }, - EvaluationResult::RequiresBaseType(base_type), - )); - } - Operation::Convert { base_type } => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::Convert, - EvaluationResult::RequiresBaseType(base_type), - )); - } - Operation::Reinterpret { base_type } => { - return Ok(OperationEvaluationResult::Waiting( - EvaluationWaiting::Reinterpret, - EvaluationResult::RequiresBaseType(base_type), - )); - } - Operation::WasmLocal { .. } - | Operation::WasmGlobal { .. } - | Operation::WasmStack { .. } => { - return Err(Error::UnsupportedEvaluation); - } - } - - Ok(OperationEvaluationResult::Incomplete) - } - - /// Get the result if this is an evaluation for a value. - /// - /// Returns `None` if the evaluation contained operations that are only - /// valid for location descriptions. - /// - /// # Panics - /// Panics if this `Evaluation` has not been driven to completion. - pub fn value_result(&self) -> Option { - match self.state { - EvaluationState::Complete => self.value_result, - _ => { - panic!("Called `Evaluation::value_result` on an `Evaluation` that has not been completed") - } - } - } - - /// Get the result of this `Evaluation`. - /// - /// # Panics - /// Panics if this `Evaluation` has not been driven to completion. - pub fn as_result(&self) -> &[Piece] { - match self.state { - EvaluationState::Complete => &self.result, - _ => { - panic!( - "Called `Evaluation::as_result` on an `Evaluation` that has not been completed" - ) - } - } - } - - /// Evaluate a DWARF expression. This method should only ever be called - /// once. If the returned `EvaluationResult` is not - /// `EvaluationResult::Complete`, the caller should provide the required - /// value and resume the evaluation by calling the appropriate resume_with - /// method on `Evaluation`. - pub fn evaluate(&mut self) -> Result> { - match self.state { - EvaluationState::Start(initial_value) => { - if let Some(value) = initial_value { - self.push(Value::Generic(value))?; - } - self.state = EvaluationState::Ready; - } - EvaluationState::Ready => {} - EvaluationState::Error(err) => return Err(err), - EvaluationState::Complete => return Ok(EvaluationResult::Complete), - EvaluationState::Waiting(_) => panic!(), - }; - - match self.evaluate_internal() { - Ok(r) => Ok(r), - Err(e) => { - self.state = EvaluationState::Error(e); - Err(e) - } - } - } - - /// Resume the `Evaluation` with the provided memory `value`. This will apply - /// the provided memory value to the evaluation and continue evaluating - /// opcodes until the evaluation is completed, reaches an error, or needs - /// more information again. - /// - /// # Panics - /// Panics if this `Evaluation` did not previously stop with `EvaluationResult::RequiresMemory`. - pub fn resume_with_memory(&mut self, value: Value) -> Result> { - match self.state { - EvaluationState::Error(err) => return Err(err), - EvaluationState::Waiting(EvaluationWaiting::Memory) => { - self.push(value)?; - } - _ => panic!( - "Called `Evaluation::resume_with_memory` without a preceding `EvaluationResult::RequiresMemory`" - ), - }; - - self.evaluate_internal() - } - - /// Resume the `Evaluation` with the provided `register` value. This will apply - /// the provided register value to the evaluation and continue evaluating - /// opcodes until the evaluation is completed, reaches an error, or needs - /// more information again. - /// - /// # Panics - /// Panics if this `Evaluation` did not previously stop with `EvaluationResult::RequiresRegister`. - pub fn resume_with_register(&mut self, value: Value) -> Result> { - match self.state { - EvaluationState::Error(err) => return Err(err), - EvaluationState::Waiting(EvaluationWaiting::Register { offset }) => { - let offset = Value::from_u64(value.value_type(), offset as u64)?; - let value = value.add(offset, self.addr_mask)?; - self.push(value)?; - } - _ => panic!( - "Called `Evaluation::resume_with_register` without a preceding `EvaluationResult::RequiresRegister`" - ), - }; - - self.evaluate_internal() - } - - /// Resume the `Evaluation` with the provided `frame_base`. This will - /// apply the provided frame base value to the evaluation and continue - /// evaluating opcodes until the evaluation is completed, reaches an error, - /// or needs more information again. - /// - /// # Panics - /// Panics if this `Evaluation` did not previously stop with `EvaluationResult::RequiresFrameBase`. - pub fn resume_with_frame_base(&mut self, frame_base: u64) -> Result> { - match self.state { - EvaluationState::Error(err) => return Err(err), - EvaluationState::Waiting(EvaluationWaiting::FrameBase { offset }) => { - self.push(Value::Generic(frame_base.wrapping_add(offset as u64)))?; - } - _ => panic!( - "Called `Evaluation::resume_with_frame_base` without a preceding `EvaluationResult::RequiresFrameBase`" - ), - }; - - self.evaluate_internal() - } - - /// Resume the `Evaluation` with the provided `value`. This will apply - /// the provided TLS value to the evaluation and continue evaluating - /// opcodes until the evaluation is completed, reaches an error, or needs - /// more information again. - /// - /// # Panics - /// Panics if this `Evaluation` did not previously stop with `EvaluationResult::RequiresTls`. - pub fn resume_with_tls(&mut self, value: u64) -> Result> { - match self.state { - EvaluationState::Error(err) => return Err(err), - EvaluationState::Waiting(EvaluationWaiting::Tls) => { - self.push(Value::Generic(value))?; - } - _ => panic!( - "Called `Evaluation::resume_with_tls` without a preceding `EvaluationResult::RequiresTls`" - ), - }; - - self.evaluate_internal() - } - - /// Resume the `Evaluation` with the provided `cfa`. This will - /// apply the provided CFA value to the evaluation and continue evaluating - /// opcodes until the evaluation is completed, reaches an error, or needs - /// more information again. - /// - /// # Panics - /// Panics if this `Evaluation` did not previously stop with `EvaluationResult::RequiresCallFrameCfa`. - pub fn resume_with_call_frame_cfa(&mut self, cfa: u64) -> Result> { - match self.state { - EvaluationState::Error(err) => return Err(err), - EvaluationState::Waiting(EvaluationWaiting::Cfa) => { - self.push(Value::Generic(cfa))?; - } - _ => panic!( - "Called `Evaluation::resume_with_call_frame_cfa` without a preceding `EvaluationResult::RequiresCallFrameCfa`" - ), - }; - - self.evaluate_internal() - } - - /// Resume the `Evaluation` with the provided `bytes`. This will - /// continue processing the evaluation with the new expression provided - /// until the evaluation is completed, reaches an error, or needs more - /// information again. - /// - /// # Panics - /// Panics if this `Evaluation` did not previously stop with `EvaluationResult::RequiresAtLocation`. - pub fn resume_with_at_location(&mut self, mut bytes: R) -> Result> { - match self.state { - EvaluationState::Error(err) => return Err(err), - EvaluationState::Waiting(EvaluationWaiting::AtLocation) => { - if !bytes.is_empty() { - let mut pc = bytes.clone(); - mem::swap(&mut pc, &mut self.pc); - mem::swap(&mut bytes, &mut self.bytecode); - self.expression_stack.try_push((pc, bytes)).map_err(|_| Error::StackFull)?; - } - } - _ => panic!( - "Called `Evaluation::resume_with_at_location` without a precedeing `EvaluationResult::RequiresAtLocation`" - ), - }; - - self.evaluate_internal() - } - - /// Resume the `Evaluation` with the provided `entry_value`. This will - /// apply the provided entry value to the evaluation and continue evaluating - /// opcodes until the evaluation is completed, reaches an error, or needs - /// more information again. - /// - /// # Panics - /// Panics if this `Evaluation` did not previously stop with `EvaluationResult::RequiresEntryValue`. - pub fn resume_with_entry_value(&mut self, entry_value: Value) -> Result> { - match self.state { - EvaluationState::Error(err) => return Err(err), - EvaluationState::Waiting(EvaluationWaiting::EntryValue) => { - self.push(entry_value)?; - } - _ => panic!( - "Called `Evaluation::resume_with_entry_value` without a preceding `EvaluationResult::RequiresEntryValue`" - ), - }; - - self.evaluate_internal() - } - - /// Resume the `Evaluation` with the provided `parameter_value`. This will - /// apply the provided parameter value to the evaluation and continue evaluating - /// opcodes until the evaluation is completed, reaches an error, or needs - /// more information again. - /// - /// # Panics - /// Panics if this `Evaluation` did not previously stop with `EvaluationResult::RequiresParameterRef`. - pub fn resume_with_parameter_ref( - &mut self, - parameter_value: u64, - ) -> Result> { - match self.state { - EvaluationState::Error(err) => return Err(err), - EvaluationState::Waiting(EvaluationWaiting::ParameterRef) => { - self.push(Value::Generic(parameter_value))?; - } - _ => panic!( - "Called `Evaluation::resume_with_parameter_ref` without a preceding `EvaluationResult::RequiresParameterRef`" - ), - }; - - self.evaluate_internal() - } - - /// Resume the `Evaluation` with the provided relocated `address`. This will use the - /// provided relocated address for the operation that required it, and continue evaluating - /// opcodes until the evaluation is completed, reaches an error, or needs - /// more information again. - /// - /// # Panics - /// Panics if this `Evaluation` did not previously stop with - /// `EvaluationResult::RequiresRelocatedAddress`. - pub fn resume_with_relocated_address(&mut self, address: u64) -> Result> { - match self.state { - EvaluationState::Error(err) => return Err(err), - EvaluationState::Waiting(EvaluationWaiting::RelocatedAddress) => { - self.push(Value::Generic(address))?; - } - _ => panic!( - "Called `Evaluation::resume_with_relocated_address` without a preceding `EvaluationResult::RequiresRelocatedAddress`" - ), - }; - - self.evaluate_internal() - } - - /// Resume the `Evaluation` with the provided indexed `address`. This will use the - /// provided indexed address for the operation that required it, and continue evaluating - /// opcodes until the evaluation is completed, reaches an error, or needs - /// more information again. - /// - /// # Panics - /// Panics if this `Evaluation` did not previously stop with - /// `EvaluationResult::RequiresIndexedAddress`. - pub fn resume_with_indexed_address(&mut self, address: u64) -> Result> { - match self.state { - EvaluationState::Error(err) => return Err(err), - EvaluationState::Waiting(EvaluationWaiting::IndexedAddress) => { - self.push(Value::Generic(address))?; - } - _ => panic!( - "Called `Evaluation::resume_with_indexed_address` without a preceding `EvaluationResult::RequiresIndexedAddress`" - ), - }; - - self.evaluate_internal() - } - - /// Resume the `Evaluation` with the provided `base_type`. This will use the - /// provided base type for the operation that required it, and continue evaluating - /// opcodes until the evaluation is completed, reaches an error, or needs - /// more information again. - /// - /// # Panics - /// Panics if this `Evaluation` did not previously stop with `EvaluationResult::RequiresBaseType`. - pub fn resume_with_base_type(&mut self, base_type: ValueType) -> Result> { - let value = match self.state { - EvaluationState::Error(err) => return Err(err), - EvaluationState::Waiting(EvaluationWaiting::TypedLiteral { ref value }) => { - Value::parse(base_type, value.clone())? - } - EvaluationState::Waiting(EvaluationWaiting::Convert) => { - let entry = self.pop()?; - entry.convert(base_type, self.addr_mask)? - } - EvaluationState::Waiting(EvaluationWaiting::Reinterpret) => { - let entry = self.pop()?; - entry.reinterpret(base_type, self.addr_mask)? - } - _ => panic!( - "Called `Evaluation::resume_with_base_type` without a preceding `EvaluationResult::RequiresBaseType`" - ), - }; - self.push(value)?; - self.evaluate_internal() - } - - fn end_of_expression(&mut self) -> bool { - while self.pc.is_empty() { - match self.expression_stack.pop() { - Some((newpc, newbytes)) => { - self.pc = newpc; - self.bytecode = newbytes; - } - None => return true, - } - } - false - } - - fn evaluate_internal(&mut self) -> Result> { - while !self.end_of_expression() { - self.iteration += 1; - if let Some(max_iterations) = self.max_iterations { - if self.iteration > max_iterations { - return Err(Error::TooManyIterations); - } - } - - let op_result = self.evaluate_one_operation()?; - match op_result { - OperationEvaluationResult::Piece => {} - OperationEvaluationResult::Incomplete => { - if self.end_of_expression() && !self.result.is_empty() { - // We saw a piece earlier and then some - // unterminated piece. It's not clear this is - // well-defined. - return Err(Error::InvalidPiece); - } - } - OperationEvaluationResult::Complete { location } => { - if self.end_of_expression() { - if !self.result.is_empty() { - // We saw a piece earlier and then some - // unterminated piece. It's not clear this is - // well-defined. - return Err(Error::InvalidPiece); - } - self.result - .try_push(Piece { - size_in_bits: None, - bit_offset: None, - location, - }) - .map_err(|_| Error::StackFull)?; - } else { - // If there are more operations, then the next operation must - // be a Piece. - match Operation::parse(&mut self.pc, self.encoding)? { - Operation::Piece { - size_in_bits, - bit_offset, - } => { - self.result - .try_push(Piece { - size_in_bits: Some(size_in_bits), - bit_offset, - location, - }) - .map_err(|_| Error::StackFull)?; - } - _ => { - let value = - self.bytecode.len().into_u64() - self.pc.len().into_u64() - 1; - return Err(Error::InvalidExpressionTerminator(value)); - } - } - } - } - OperationEvaluationResult::Waiting(waiting, result) => { - self.state = EvaluationState::Waiting(waiting); - return Ok(result); - } - } - } - - // If no pieces have been seen, use the stack top as the - // result. - if self.result.is_empty() { - let entry = self.pop()?; - self.value_result = Some(entry); - let addr = entry.to_u64(self.addr_mask)?; - self.result - .try_push(Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Address { address: addr }, - }) - .map_err(|_| Error::StackFull)?; - } - - self.state = EvaluationState::Complete; - Ok(EvaluationResult::Complete) - } -} - -#[cfg(test)] -// Tests require leb128::write. -#[cfg(feature = "write")] -mod tests { - use super::*; - use crate::common::Format; - use crate::constants; - use crate::endianity::LittleEndian; - use crate::leb128; - use crate::read::{EndianSlice, Error, Result, UnitOffset}; - use crate::test_util::GimliSectionMethods; - use core::usize; - use test_assembler::{Endian, Section}; - - fn encoding4() -> Encoding { - Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - } - } - - fn encoding8() -> Encoding { - Encoding { - format: Format::Dwarf64, - version: 4, - address_size: 8, - } - } - - #[test] - fn test_compute_pc() { - // Contents don't matter for this test, just length. - let bytes = [0, 1, 2, 3, 4]; - let bytecode = &bytes[..]; - let ebuf = &EndianSlice::new(bytecode, LittleEndian); - - assert_eq!(compute_pc(ebuf, ebuf, 0), Ok(*ebuf)); - assert_eq!( - compute_pc(ebuf, ebuf, -1), - Err(Error::BadBranchTarget(usize::MAX as u64)) - ); - assert_eq!(compute_pc(ebuf, ebuf, 5), Ok(ebuf.range_from(5..))); - assert_eq!( - compute_pc(&ebuf.range_from(3..), ebuf, -2), - Ok(ebuf.range_from(1..)) - ); - assert_eq!( - compute_pc(&ebuf.range_from(2..), ebuf, 2), - Ok(ebuf.range_from(4..)) - ); - } - - fn check_op_parse_simple<'input>( - input: &'input [u8], - expect: &Operation>, - encoding: Encoding, - ) { - let buf = EndianSlice::new(input, LittleEndian); - let mut pc = buf; - let value = Operation::parse(&mut pc, encoding); - match value { - Ok(val) => { - assert_eq!(val, *expect); - assert_eq!(pc.len(), 0); - } - _ => panic!("Unexpected result"), - } - } - - fn check_op_parse_eof(input: &[u8], encoding: Encoding) { - let buf = EndianSlice::new(input, LittleEndian); - let mut pc = buf; - match Operation::parse(&mut pc, encoding) { - Err(Error::UnexpectedEof(id)) => { - assert!(buf.lookup_offset_id(id).is_some()); - } - - _ => panic!("Unexpected result"), - } - } - - fn check_op_parse( - input: F, - expect: &Operation>, - encoding: Encoding, - ) where - F: Fn(Section) -> Section, - { - let input = input(Section::with_endian(Endian::Little)) - .get_contents() - .unwrap(); - for i in 1..input.len() { - check_op_parse_eof(&input[..i], encoding); - } - check_op_parse_simple(&input, expect, encoding); - } - - #[test] - fn test_op_parse_onebyte() { - // Doesn't matter for this test. - let encoding = encoding4(); - - // Test all single-byte opcodes. - #[rustfmt::skip] - let inputs = [ - ( - constants::DW_OP_deref, - Operation::Deref { - base_type: generic_type(), - size: encoding.address_size, - space: false, - }, - ), - (constants::DW_OP_dup, Operation::Pick { index: 0 }), - (constants::DW_OP_drop, Operation::Drop), - (constants::DW_OP_over, Operation::Pick { index: 1 }), - (constants::DW_OP_swap, Operation::Swap), - (constants::DW_OP_rot, Operation::Rot), - ( - constants::DW_OP_xderef, - Operation::Deref { - base_type: generic_type(), - size: encoding.address_size, - space: true, - }, - ), - (constants::DW_OP_abs, Operation::Abs), - (constants::DW_OP_and, Operation::And), - (constants::DW_OP_div, Operation::Div), - (constants::DW_OP_minus, Operation::Minus), - (constants::DW_OP_mod, Operation::Mod), - (constants::DW_OP_mul, Operation::Mul), - (constants::DW_OP_neg, Operation::Neg), - (constants::DW_OP_not, Operation::Not), - (constants::DW_OP_or, Operation::Or), - (constants::DW_OP_plus, Operation::Plus), - (constants::DW_OP_shl, Operation::Shl), - (constants::DW_OP_shr, Operation::Shr), - (constants::DW_OP_shra, Operation::Shra), - (constants::DW_OP_xor, Operation::Xor), - (constants::DW_OP_eq, Operation::Eq), - (constants::DW_OP_ge, Operation::Ge), - (constants::DW_OP_gt, Operation::Gt), - (constants::DW_OP_le, Operation::Le), - (constants::DW_OP_lt, Operation::Lt), - (constants::DW_OP_ne, Operation::Ne), - (constants::DW_OP_lit0, Operation::UnsignedConstant { value: 0 }), - (constants::DW_OP_lit1, Operation::UnsignedConstant { value: 1 }), - (constants::DW_OP_lit2, Operation::UnsignedConstant { value: 2 }), - (constants::DW_OP_lit3, Operation::UnsignedConstant { value: 3 }), - (constants::DW_OP_lit4, Operation::UnsignedConstant { value: 4 }), - (constants::DW_OP_lit5, Operation::UnsignedConstant { value: 5 }), - (constants::DW_OP_lit6, Operation::UnsignedConstant { value: 6 }), - (constants::DW_OP_lit7, Operation::UnsignedConstant { value: 7 }), - (constants::DW_OP_lit8, Operation::UnsignedConstant { value: 8 }), - (constants::DW_OP_lit9, Operation::UnsignedConstant { value: 9 }), - (constants::DW_OP_lit10, Operation::UnsignedConstant { value: 10 }), - (constants::DW_OP_lit11, Operation::UnsignedConstant { value: 11 }), - (constants::DW_OP_lit12, Operation::UnsignedConstant { value: 12 }), - (constants::DW_OP_lit13, Operation::UnsignedConstant { value: 13 }), - (constants::DW_OP_lit14, Operation::UnsignedConstant { value: 14 }), - (constants::DW_OP_lit15, Operation::UnsignedConstant { value: 15 }), - (constants::DW_OP_lit16, Operation::UnsignedConstant { value: 16 }), - (constants::DW_OP_lit17, Operation::UnsignedConstant { value: 17 }), - (constants::DW_OP_lit18, Operation::UnsignedConstant { value: 18 }), - (constants::DW_OP_lit19, Operation::UnsignedConstant { value: 19 }), - (constants::DW_OP_lit20, Operation::UnsignedConstant { value: 20 }), - (constants::DW_OP_lit21, Operation::UnsignedConstant { value: 21 }), - (constants::DW_OP_lit22, Operation::UnsignedConstant { value: 22 }), - (constants::DW_OP_lit23, Operation::UnsignedConstant { value: 23 }), - (constants::DW_OP_lit24, Operation::UnsignedConstant { value: 24 }), - (constants::DW_OP_lit25, Operation::UnsignedConstant { value: 25 }), - (constants::DW_OP_lit26, Operation::UnsignedConstant { value: 26 }), - (constants::DW_OP_lit27, Operation::UnsignedConstant { value: 27 }), - (constants::DW_OP_lit28, Operation::UnsignedConstant { value: 28 }), - (constants::DW_OP_lit29, Operation::UnsignedConstant { value: 29 }), - (constants::DW_OP_lit30, Operation::UnsignedConstant { value: 30 }), - (constants::DW_OP_lit31, Operation::UnsignedConstant { value: 31 }), - (constants::DW_OP_reg0, Operation::Register { register: Register(0) }), - (constants::DW_OP_reg1, Operation::Register { register: Register(1) }), - (constants::DW_OP_reg2, Operation::Register { register: Register(2) }), - (constants::DW_OP_reg3, Operation::Register { register: Register(3) }), - (constants::DW_OP_reg4, Operation::Register { register: Register(4) }), - (constants::DW_OP_reg5, Operation::Register { register: Register(5) }), - (constants::DW_OP_reg6, Operation::Register { register: Register(6) }), - (constants::DW_OP_reg7, Operation::Register { register: Register(7) }), - (constants::DW_OP_reg8, Operation::Register { register: Register(8) }), - (constants::DW_OP_reg9, Operation::Register { register: Register(9) }), - (constants::DW_OP_reg10, Operation::Register { register: Register(10) }), - (constants::DW_OP_reg11, Operation::Register { register: Register(11) }), - (constants::DW_OP_reg12, Operation::Register { register: Register(12) }), - (constants::DW_OP_reg13, Operation::Register { register: Register(13) }), - (constants::DW_OP_reg14, Operation::Register { register: Register(14) }), - (constants::DW_OP_reg15, Operation::Register { register: Register(15) }), - (constants::DW_OP_reg16, Operation::Register { register: Register(16) }), - (constants::DW_OP_reg17, Operation::Register { register: Register(17) }), - (constants::DW_OP_reg18, Operation::Register { register: Register(18) }), - (constants::DW_OP_reg19, Operation::Register { register: Register(19) }), - (constants::DW_OP_reg20, Operation::Register { register: Register(20) }), - (constants::DW_OP_reg21, Operation::Register { register: Register(21) }), - (constants::DW_OP_reg22, Operation::Register { register: Register(22) }), - (constants::DW_OP_reg23, Operation::Register { register: Register(23) }), - (constants::DW_OP_reg24, Operation::Register { register: Register(24) }), - (constants::DW_OP_reg25, Operation::Register { register: Register(25) }), - (constants::DW_OP_reg26, Operation::Register { register: Register(26) }), - (constants::DW_OP_reg27, Operation::Register { register: Register(27) }), - (constants::DW_OP_reg28, Operation::Register { register: Register(28) }), - (constants::DW_OP_reg29, Operation::Register { register: Register(29) }), - (constants::DW_OP_reg30, Operation::Register { register: Register(30) }), - (constants::DW_OP_reg31, Operation::Register { register: Register(31) }), - (constants::DW_OP_nop, Operation::Nop), - (constants::DW_OP_push_object_address, Operation::PushObjectAddress), - (constants::DW_OP_form_tls_address, Operation::TLS), - (constants::DW_OP_GNU_push_tls_address, Operation::TLS), - (constants::DW_OP_call_frame_cfa, Operation::CallFrameCFA), - (constants::DW_OP_stack_value, Operation::StackValue), - ]; - - let input = []; - check_op_parse_eof(&input[..], encoding); - - for item in inputs.iter() { - let (opcode, ref result) = *item; - check_op_parse(|s| s.D8(opcode.0), result, encoding); - } - } - - #[test] - fn test_op_parse_twobyte() { - // Doesn't matter for this test. - let encoding = encoding4(); - - let inputs = [ - ( - constants::DW_OP_const1u, - 23, - Operation::UnsignedConstant { value: 23 }, - ), - ( - constants::DW_OP_const1s, - (-23i8) as u8, - Operation::SignedConstant { value: -23 }, - ), - (constants::DW_OP_pick, 7, Operation::Pick { index: 7 }), - ( - constants::DW_OP_deref_size, - 19, - Operation::Deref { - base_type: generic_type(), - size: 19, - space: false, - }, - ), - ( - constants::DW_OP_xderef_size, - 19, - Operation::Deref { - base_type: generic_type(), - size: 19, - space: true, - }, - ), - ]; - - for item in inputs.iter() { - let (opcode, arg, ref result) = *item; - check_op_parse(|s| s.D8(opcode.0).D8(arg), result, encoding); - } - } - - #[test] - fn test_op_parse_threebyte() { - // Doesn't matter for this test. - let encoding = encoding4(); - - // While bra and skip are 3-byte opcodes, they aren't tested here, - // but rather specially in their own function. - let inputs = [ - ( - constants::DW_OP_const2u, - 23, - Operation::UnsignedConstant { value: 23 }, - ), - ( - constants::DW_OP_const2s, - (-23i16) as u16, - Operation::SignedConstant { value: -23 }, - ), - ( - constants::DW_OP_call2, - 1138, - Operation::Call { - offset: DieReference::UnitRef(UnitOffset(1138)), - }, - ), - ( - constants::DW_OP_bra, - (-23i16) as u16, - Operation::Bra { target: -23 }, - ), - ( - constants::DW_OP_skip, - (-23i16) as u16, - Operation::Skip { target: -23 }, - ), - ]; - - for item in inputs.iter() { - let (opcode, arg, ref result) = *item; - check_op_parse(|s| s.D8(opcode.0).L16(arg), result, encoding); - } - } - - #[test] - fn test_op_parse_fivebyte() { - // There are some tests here that depend on address size. - let encoding = encoding4(); - - let inputs = [ - ( - constants::DW_OP_addr, - 0x1234_5678, - Operation::Address { - address: 0x1234_5678, - }, - ), - ( - constants::DW_OP_const4u, - 0x1234_5678, - Operation::UnsignedConstant { value: 0x1234_5678 }, - ), - ( - constants::DW_OP_const4s, - (-23i32) as u32, - Operation::SignedConstant { value: -23 }, - ), - ( - constants::DW_OP_call4, - 0x1234_5678, - Operation::Call { - offset: DieReference::UnitRef(UnitOffset(0x1234_5678)), - }, - ), - ( - constants::DW_OP_call_ref, - 0x1234_5678, - Operation::Call { - offset: DieReference::DebugInfoRef(DebugInfoOffset(0x1234_5678)), - }, - ), - ]; - - for item in inputs.iter() { - let (op, arg, ref expect) = *item; - check_op_parse(|s| s.D8(op.0).L32(arg), expect, encoding); - } - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_op_parse_ninebyte() { - // There are some tests here that depend on address size. - let encoding = encoding8(); - - let inputs = [ - ( - constants::DW_OP_addr, - 0x1234_5678_1234_5678, - Operation::Address { - address: 0x1234_5678_1234_5678, - }, - ), - ( - constants::DW_OP_const8u, - 0x1234_5678_1234_5678, - Operation::UnsignedConstant { - value: 0x1234_5678_1234_5678, - }, - ), - ( - constants::DW_OP_const8s, - (-23i64) as u64, - Operation::SignedConstant { value: -23 }, - ), - ( - constants::DW_OP_call_ref, - 0x1234_5678_1234_5678, - Operation::Call { - offset: DieReference::DebugInfoRef(DebugInfoOffset(0x1234_5678_1234_5678)), - }, - ), - ]; - - for item in inputs.iter() { - let (op, arg, ref expect) = *item; - check_op_parse(|s| s.D8(op.0).L64(arg), expect, encoding); - } - } - - #[test] - fn test_op_parse_sleb() { - // Doesn't matter for this test. - let encoding = encoding4(); - - let values = [ - -1i64, - 0, - 1, - 0x100, - 0x1eee_eeee, - 0x7fff_ffff_ffff_ffff, - -0x100, - -0x1eee_eeee, - -0x7fff_ffff_ffff_ffff, - ]; - for value in values.iter() { - let mut inputs = vec![ - ( - constants::DW_OP_consts.0, - Operation::SignedConstant { value: *value }, - ), - ( - constants::DW_OP_fbreg.0, - Operation::FrameOffset { offset: *value }, - ), - ]; - - for i in 0..32 { - inputs.push(( - constants::DW_OP_breg0.0 + i, - Operation::RegisterOffset { - register: Register(i.into()), - offset: *value, - base_type: UnitOffset(0), - }, - )); - } - - for item in inputs.iter() { - let (op, ref expect) = *item; - check_op_parse(|s| s.D8(op).sleb(*value), expect, encoding); - } - } - } - - #[test] - fn test_op_parse_uleb() { - // Doesn't matter for this test. - let encoding = encoding4(); - - let values = [ - 0, - 1, - 0x100, - (!0u16).into(), - 0x1eee_eeee, - 0x7fff_ffff_ffff_ffff, - !0u64, - ]; - for value in values.iter() { - let mut inputs = vec![ - ( - constants::DW_OP_constu, - Operation::UnsignedConstant { value: *value }, - ), - ( - constants::DW_OP_plus_uconst, - Operation::PlusConstant { value: *value }, - ), - ]; - - if *value <= (!0u16).into() { - inputs.push(( - constants::DW_OP_regx, - Operation::Register { - register: Register::from_u64(*value).unwrap(), - }, - )); - } - - if *value <= (!0u32).into() { - inputs.extend(&[ - ( - constants::DW_OP_addrx, - Operation::AddressIndex { - index: DebugAddrIndex(*value as usize), - }, - ), - ( - constants::DW_OP_constx, - Operation::ConstantIndex { - index: DebugAddrIndex(*value as usize), - }, - ), - ]); - } - - // FIXME - if *value < !0u64 / 8 { - inputs.push(( - constants::DW_OP_piece, - Operation::Piece { - size_in_bits: 8 * value, - bit_offset: None, - }, - )); - } - - for item in inputs.iter() { - let (op, ref expect) = *item; - let input = Section::with_endian(Endian::Little) - .D8(op.0) - .uleb(*value) - .get_contents() - .unwrap(); - check_op_parse_simple(&input, expect, encoding); - } - } - } - - #[test] - fn test_op_parse_bregx() { - // Doesn't matter for this test. - let encoding = encoding4(); - - let uvalues = [0, 1, 0x100, !0u16]; - let svalues = [ - -1i64, - 0, - 1, - 0x100, - 0x1eee_eeee, - 0x7fff_ffff_ffff_ffff, - -0x100, - -0x1eee_eeee, - -0x7fff_ffff_ffff_ffff, - ]; - - for v1 in uvalues.iter() { - for v2 in svalues.iter() { - check_op_parse( - |s| s.D8(constants::DW_OP_bregx.0).uleb((*v1).into()).sleb(*v2), - &Operation::RegisterOffset { - register: Register(*v1), - offset: *v2, - base_type: UnitOffset(0), - }, - encoding, - ); - } - } - } - - #[test] - fn test_op_parse_bit_piece() { - // Doesn't matter for this test. - let encoding = encoding4(); - - let values = [0, 1, 0x100, 0x1eee_eeee, 0x7fff_ffff_ffff_ffff, !0u64]; - - for v1 in values.iter() { - for v2 in values.iter() { - let input = Section::with_endian(Endian::Little) - .D8(constants::DW_OP_bit_piece.0) - .uleb(*v1) - .uleb(*v2) - .get_contents() - .unwrap(); - check_op_parse_simple( - &input, - &Operation::Piece { - size_in_bits: *v1, - bit_offset: Some(*v2), - }, - encoding, - ); - } - } - } - - #[test] - fn test_op_parse_implicit_value() { - // Doesn't matter for this test. - let encoding = encoding4(); - - let data = b"hello"; - - check_op_parse( - |s| { - s.D8(constants::DW_OP_implicit_value.0) - .uleb(data.len() as u64) - .append_bytes(&data[..]) - }, - &Operation::ImplicitValue { - data: EndianSlice::new(&data[..], LittleEndian), - }, - encoding, - ); - } - - #[test] - fn test_op_parse_const_type() { - // Doesn't matter for this test. - let encoding = encoding4(); - - let data = b"hello"; - - check_op_parse( - |s| { - s.D8(constants::DW_OP_const_type.0) - .uleb(100) - .D8(data.len() as u8) - .append_bytes(&data[..]) - }, - &Operation::TypedLiteral { - base_type: UnitOffset(100), - value: EndianSlice::new(&data[..], LittleEndian), - }, - encoding, - ); - check_op_parse( - |s| { - s.D8(constants::DW_OP_GNU_const_type.0) - .uleb(100) - .D8(data.len() as u8) - .append_bytes(&data[..]) - }, - &Operation::TypedLiteral { - base_type: UnitOffset(100), - value: EndianSlice::new(&data[..], LittleEndian), - }, - encoding, - ); - } - - #[test] - fn test_op_parse_regval_type() { - // Doesn't matter for this test. - let encoding = encoding4(); - - check_op_parse( - |s| s.D8(constants::DW_OP_regval_type.0).uleb(1).uleb(100), - &Operation::RegisterOffset { - register: Register(1), - offset: 0, - base_type: UnitOffset(100), - }, - encoding, - ); - check_op_parse( - |s| s.D8(constants::DW_OP_GNU_regval_type.0).uleb(1).uleb(100), - &Operation::RegisterOffset { - register: Register(1), - offset: 0, - base_type: UnitOffset(100), - }, - encoding, - ); - } - - #[test] - fn test_op_parse_deref_type() { - // Doesn't matter for this test. - let encoding = encoding4(); - - check_op_parse( - |s| s.D8(constants::DW_OP_deref_type.0).D8(8).uleb(100), - &Operation::Deref { - base_type: UnitOffset(100), - size: 8, - space: false, - }, - encoding, - ); - check_op_parse( - |s| s.D8(constants::DW_OP_GNU_deref_type.0).D8(8).uleb(100), - &Operation::Deref { - base_type: UnitOffset(100), - size: 8, - space: false, - }, - encoding, - ); - check_op_parse( - |s| s.D8(constants::DW_OP_xderef_type.0).D8(8).uleb(100), - &Operation::Deref { - base_type: UnitOffset(100), - size: 8, - space: true, - }, - encoding, - ); - } - - #[test] - fn test_op_convert() { - // Doesn't matter for this test. - let encoding = encoding4(); - - check_op_parse( - |s| s.D8(constants::DW_OP_convert.0).uleb(100), - &Operation::Convert { - base_type: UnitOffset(100), - }, - encoding, - ); - check_op_parse( - |s| s.D8(constants::DW_OP_GNU_convert.0).uleb(100), - &Operation::Convert { - base_type: UnitOffset(100), - }, - encoding, - ); - } - - #[test] - fn test_op_reinterpret() { - // Doesn't matter for this test. - let encoding = encoding4(); - - check_op_parse( - |s| s.D8(constants::DW_OP_reinterpret.0).uleb(100), - &Operation::Reinterpret { - base_type: UnitOffset(100), - }, - encoding, - ); - check_op_parse( - |s| s.D8(constants::DW_OP_GNU_reinterpret.0).uleb(100), - &Operation::Reinterpret { - base_type: UnitOffset(100), - }, - encoding, - ); - } - - #[test] - fn test_op_parse_implicit_pointer() { - for op in &[ - constants::DW_OP_implicit_pointer, - constants::DW_OP_GNU_implicit_pointer, - ] { - check_op_parse( - |s| s.D8(op.0).D32(0x1234_5678).sleb(0x123), - &Operation::ImplicitPointer { - value: DebugInfoOffset(0x1234_5678), - byte_offset: 0x123, - }, - encoding4(), - ); - - check_op_parse( - |s| s.D8(op.0).D64(0x1234_5678).sleb(0x123), - &Operation::ImplicitPointer { - value: DebugInfoOffset(0x1234_5678), - byte_offset: 0x123, - }, - encoding8(), - ); - - check_op_parse( - |s| s.D8(op.0).D64(0x1234_5678).sleb(0x123), - &Operation::ImplicitPointer { - value: DebugInfoOffset(0x1234_5678), - byte_offset: 0x123, - }, - Encoding { - format: Format::Dwarf32, - version: 2, - address_size: 8, - }, - ) - } - } - - #[test] - fn test_op_parse_entry_value() { - for op in &[ - constants::DW_OP_entry_value, - constants::DW_OP_GNU_entry_value, - ] { - let data = b"hello"; - check_op_parse( - |s| s.D8(op.0).uleb(data.len() as u64).append_bytes(&data[..]), - &Operation::EntryValue { - expression: EndianSlice::new(&data[..], LittleEndian), - }, - encoding4(), - ); - } - } - - #[test] - fn test_op_parse_gnu_parameter_ref() { - check_op_parse( - |s| s.D8(constants::DW_OP_GNU_parameter_ref.0).D32(0x1234_5678), - &Operation::ParameterRef { - offset: UnitOffset(0x1234_5678), - }, - encoding4(), - ) - } - - #[test] - fn test_op_wasm() { - // Doesn't matter for this test. - let encoding = encoding4(); - - check_op_parse( - |s| s.D8(constants::DW_OP_WASM_location.0).D8(0).uleb(1000), - &Operation::WasmLocal { index: 1000 }, - encoding, - ); - check_op_parse( - |s| s.D8(constants::DW_OP_WASM_location.0).D8(1).uleb(1000), - &Operation::WasmGlobal { index: 1000 }, - encoding, - ); - check_op_parse( - |s| s.D8(constants::DW_OP_WASM_location.0).D8(2).uleb(1000), - &Operation::WasmStack { index: 1000 }, - encoding, - ); - check_op_parse( - |s| s.D8(constants::DW_OP_WASM_location.0).D8(3).D32(1000), - &Operation::WasmGlobal { index: 1000 }, - encoding, - ); - } - - enum AssemblerEntry { - Op(constants::DwOp), - Mark(u8), - Branch(u8), - U8(u8), - U16(u16), - U32(u32), - U64(u64), - Uleb(u64), - Sleb(u64), - } - - fn assemble(entries: &[AssemblerEntry]) -> Vec { - let mut result = Vec::new(); - - struct Marker(Option, Vec); - - let mut markers = Vec::new(); - for _ in 0..256 { - markers.push(Marker(None, Vec::new())); - } - - fn write(stack: &mut Vec, index: usize, mut num: u64, nbytes: u8) { - for i in 0..nbytes as usize { - stack[index + i] = (num & 0xff) as u8; - num >>= 8; - } - } - - fn push(stack: &mut Vec, num: u64, nbytes: u8) { - let index = stack.len(); - for _ in 0..nbytes { - stack.push(0); - } - write(stack, index, num, nbytes); - } - - for item in entries { - match *item { - AssemblerEntry::Op(op) => result.push(op.0), - AssemblerEntry::Mark(num) => { - assert!(markers[num as usize].0.is_none()); - markers[num as usize].0 = Some(result.len()); - } - AssemblerEntry::Branch(num) => { - markers[num as usize].1.push(result.len()); - push(&mut result, 0, 2); - } - AssemblerEntry::U8(num) => result.push(num), - AssemblerEntry::U16(num) => push(&mut result, u64::from(num), 2), - AssemblerEntry::U32(num) => push(&mut result, u64::from(num), 4), - AssemblerEntry::U64(num) => push(&mut result, num, 8), - AssemblerEntry::Uleb(num) => { - leb128::write::unsigned(&mut result, num).unwrap(); - } - AssemblerEntry::Sleb(num) => { - leb128::write::signed(&mut result, num as i64).unwrap(); - } - } - } - - // Update all the branches. - for marker in markers { - if let Some(offset) = marker.0 { - for branch_offset in marker.1 { - let delta = offset.wrapping_sub(branch_offset + 2) as u64; - write(&mut result, branch_offset, delta, 2); - } - } - } - - result - } - - fn check_eval_with_args( - program: &[AssemblerEntry], - expect: Result<&[Piece>]>, - encoding: Encoding, - object_address: Option, - initial_value: Option, - max_iterations: Option, - f: F, - ) where - for<'a> F: Fn( - &mut Evaluation>, - EvaluationResult>, - ) -> Result>>, - { - let bytes = assemble(program); - let bytes = EndianSlice::new(&bytes, LittleEndian); - - let mut eval = Evaluation::new(bytes, encoding); - - if let Some(val) = object_address { - eval.set_object_address(val); - } - if let Some(val) = initial_value { - eval.set_initial_value(val); - } - if let Some(val) = max_iterations { - eval.set_max_iterations(val); - } - - let result = match eval.evaluate() { - Err(e) => Err(e), - Ok(r) => f(&mut eval, r), - }; - - match (result, expect) { - (Ok(EvaluationResult::Complete), Ok(pieces)) => { - let vec = eval.result(); - assert_eq!(vec.len(), pieces.len()); - for i in 0..pieces.len() { - assert_eq!(vec[i], pieces[i]); - } - } - (Err(f1), Err(f2)) => { - assert_eq!(f1, f2); - } - otherwise => panic!("Unexpected result: {:?}", otherwise), - } - } - - fn check_eval( - program: &[AssemblerEntry], - expect: Result<&[Piece>]>, - encoding: Encoding, - ) { - check_eval_with_args(program, expect, encoding, None, None, None, |_, result| { - Ok(result) - }); - } - - #[test] - fn test_eval_arith() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - // Indices of marks in the assembly. - let done = 0; - let fail = 1; - - #[rustfmt::skip] - let program = [ - Op(DW_OP_const1u), U8(23), - Op(DW_OP_const1s), U8((-23i8) as u8), - Op(DW_OP_plus), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const2u), U16(23), - Op(DW_OP_const2s), U16((-23i16) as u16), - Op(DW_OP_plus), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const4u), U32(0x1111_2222), - Op(DW_OP_const4s), U32((-0x1111_2222i32) as u32), - Op(DW_OP_plus), - Op(DW_OP_bra), Branch(fail), - - // Plus should overflow. - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_const1u), U8(1), - Op(DW_OP_plus), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_plus_uconst), Uleb(1), - Op(DW_OP_bra), Branch(fail), - - // Minus should underflow. - Op(DW_OP_const1s), U8(0), - Op(DW_OP_const1u), U8(1), - Op(DW_OP_minus), - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_abs), - Op(DW_OP_const1u), U8(1), - Op(DW_OP_minus), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const4u), U32(0xf078_fffe), - Op(DW_OP_const4u), U32(0x0f87_0001), - Op(DW_OP_and), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const4u), U32(0xf078_fffe), - Op(DW_OP_const4u), U32(0xf000_00fe), - Op(DW_OP_and), - Op(DW_OP_const4u), U32(0xf000_00fe), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - // Division is signed. - Op(DW_OP_const1s), U8(0xfe), - Op(DW_OP_const1s), U8(2), - Op(DW_OP_div), - Op(DW_OP_plus_uconst), Uleb(1), - Op(DW_OP_bra), Branch(fail), - - // Mod is unsigned. - Op(DW_OP_const1s), U8(0xfd), - Op(DW_OP_const1s), U8(2), - Op(DW_OP_mod), - Op(DW_OP_neg), - Op(DW_OP_plus_uconst), Uleb(1), - Op(DW_OP_bra), Branch(fail), - - // Overflow is defined for multiplication. - Op(DW_OP_const4u), U32(0x8000_0001), - Op(DW_OP_lit2), - Op(DW_OP_mul), - Op(DW_OP_lit2), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const4u), U32(0xf0f0_f0f0), - Op(DW_OP_const4u), U32(0xf0f0_f0f0), - Op(DW_OP_xor), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const4u), U32(0xf0f0_f0f0), - Op(DW_OP_const4u), U32(0x0f0f_0f0f), - Op(DW_OP_or), - Op(DW_OP_not), - Op(DW_OP_bra), Branch(fail), - - // In 32 bit mode, values are truncated. - Op(DW_OP_const8u), U64(0xffff_ffff_0000_0000), - Op(DW_OP_lit2), - Op(DW_OP_div), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1u), U8(0xff), - Op(DW_OP_lit1), - Op(DW_OP_shl), - Op(DW_OP_const2u), U16(0x1fe), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1u), U8(0xff), - Op(DW_OP_const1u), U8(50), - Op(DW_OP_shl), - Op(DW_OP_bra), Branch(fail), - - // Absurd shift. - Op(DW_OP_const1u), U8(0xff), - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_shl), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_lit1), - Op(DW_OP_shr), - Op(DW_OP_const4u), U32(0x7fff_ffff), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_const1u), U8(0xff), - Op(DW_OP_shr), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_lit1), - Op(DW_OP_shra), - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_const1u), U8(0xff), - Op(DW_OP_shra), - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - // Success. - Op(DW_OP_lit0), - Op(DW_OP_nop), - Op(DW_OP_skip), Branch(done), - - Mark(fail), - Op(DW_OP_lit1), - - Mark(done), - Op(DW_OP_stack_value), - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { - value: Value::Generic(0), - }, - }]; - - check_eval(&program, Ok(&result), encoding4()); - } - - #[test] - fn test_eval_arith64() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - // Indices of marks in the assembly. - let done = 0; - let fail = 1; - - #[rustfmt::skip] - let program = [ - Op(DW_OP_const8u), U64(0x1111_2222_3333_4444), - Op(DW_OP_const8s), U64((-0x1111_2222_3333_4444i64) as u64), - Op(DW_OP_plus), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_constu), Uleb(0x1111_2222_3333_4444), - Op(DW_OP_consts), Sleb((-0x1111_2222_3333_4444i64) as u64), - Op(DW_OP_plus), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_lit1), - Op(DW_OP_plus_uconst), Uleb(!0u64), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_lit1), - Op(DW_OP_neg), - Op(DW_OP_not), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const8u), U64(0x8000_0000_0000_0000), - Op(DW_OP_const1u), U8(63), - Op(DW_OP_shr), - Op(DW_OP_lit1), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const8u), U64(0x8000_0000_0000_0000), - Op(DW_OP_const1u), U8(62), - Op(DW_OP_shra), - Op(DW_OP_plus_uconst), Uleb(2), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_lit1), - Op(DW_OP_const1u), U8(63), - Op(DW_OP_shl), - Op(DW_OP_const8u), U64(0x8000_0000_0000_0000), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - // Success. - Op(DW_OP_lit0), - Op(DW_OP_nop), - Op(DW_OP_skip), Branch(done), - - Mark(fail), - Op(DW_OP_lit1), - - Mark(done), - Op(DW_OP_stack_value), - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { - value: Value::Generic(0), - }, - }]; - - check_eval(&program, Ok(&result), encoding8()); - } - - #[test] - fn test_eval_compare() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - // Indices of marks in the assembly. - let done = 0; - let fail = 1; - - #[rustfmt::skip] - let program = [ - // Comparisons are signed. - Op(DW_OP_const1s), U8(1), - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_lt), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_const1s), U8(1), - Op(DW_OP_gt), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1s), U8(1), - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_le), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_const1s), U8(1), - Op(DW_OP_ge), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const1s), U8(0xff), - Op(DW_OP_const1s), U8(1), - Op(DW_OP_eq), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_const4s), U32(1), - Op(DW_OP_const1s), U8(1), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - // Success. - Op(DW_OP_lit0), - Op(DW_OP_nop), - Op(DW_OP_skip), Branch(done), - - Mark(fail), - Op(DW_OP_lit1), - - Mark(done), - Op(DW_OP_stack_value), - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { - value: Value::Generic(0), - }, - }]; - - check_eval(&program, Ok(&result), encoding4()); - } - - #[test] - fn test_eval_stack() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - #[rustfmt::skip] - let program = [ - Op(DW_OP_lit17), // -- 17 - Op(DW_OP_dup), // -- 17 17 - Op(DW_OP_over), // -- 17 17 17 - Op(DW_OP_minus), // -- 17 0 - Op(DW_OP_swap), // -- 0 17 - Op(DW_OP_dup), // -- 0 17 17 - Op(DW_OP_plus_uconst), Uleb(1), // -- 0 17 18 - Op(DW_OP_rot), // -- 18 0 17 - Op(DW_OP_pick), U8(2), // -- 18 0 17 18 - Op(DW_OP_pick), U8(3), // -- 18 0 17 18 18 - Op(DW_OP_minus), // -- 18 0 17 0 - Op(DW_OP_drop), // -- 18 0 17 - Op(DW_OP_swap), // -- 18 17 0 - Op(DW_OP_drop), // -- 18 17 - Op(DW_OP_minus), // -- 1 - Op(DW_OP_stack_value), - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { - value: Value::Generic(1), - }, - }]; - - check_eval(&program, Ok(&result), encoding4()); - } - - #[test] - fn test_eval_lit_and_reg() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - let mut program = Vec::new(); - program.push(Op(DW_OP_lit0)); - for i in 0..32 { - program.push(Op(DwOp(DW_OP_lit0.0 + i))); - program.push(Op(DwOp(DW_OP_breg0.0 + i))); - program.push(Sleb(u64::from(i))); - program.push(Op(DW_OP_plus)); - program.push(Op(DW_OP_plus)); - } - - program.push(Op(DW_OP_bregx)); - program.push(Uleb(0x1234)); - program.push(Sleb(0x1234)); - program.push(Op(DW_OP_plus)); - - program.push(Op(DW_OP_stack_value)); - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { - value: Value::Generic(496), - }, - }]; - - check_eval_with_args( - &program, - Ok(&result), - encoding4(), - None, - None, - None, - |eval, mut result| { - while result != EvaluationResult::Complete { - result = eval.resume_with_register(match result { - EvaluationResult::RequiresRegister { - register, - base_type, - } => { - assert_eq!(base_type, UnitOffset(0)); - Value::Generic(u64::from(register.0).wrapping_neg()) - } - _ => panic!(), - })?; - } - Ok(result) - }, - ); - } - - #[test] - fn test_eval_memory() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - // Indices of marks in the assembly. - let done = 0; - let fail = 1; - - #[rustfmt::skip] - let program = [ - Op(DW_OP_addr), U32(0x7fff_ffff), - Op(DW_OP_deref), - Op(DW_OP_const4u), U32(0xffff_fffc), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_addr), U32(0x7fff_ffff), - Op(DW_OP_deref_size), U8(2), - Op(DW_OP_const4u), U32(0xfffc), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_lit1), - Op(DW_OP_addr), U32(0x7fff_ffff), - Op(DW_OP_xderef), - Op(DW_OP_const4u), U32(0xffff_fffd), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_lit1), - Op(DW_OP_addr), U32(0x7fff_ffff), - Op(DW_OP_xderef_size), U8(2), - Op(DW_OP_const4u), U32(0xfffd), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_lit17), - Op(DW_OP_form_tls_address), - Op(DW_OP_constu), Uleb(!17), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_lit17), - Op(DW_OP_GNU_push_tls_address), - Op(DW_OP_constu), Uleb(!17), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_addrx), Uleb(0x10), - Op(DW_OP_deref), - Op(DW_OP_const4u), U32(0x4040), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - Op(DW_OP_constx), Uleb(17), - Op(DW_OP_form_tls_address), - Op(DW_OP_constu), Uleb(!27), - Op(DW_OP_ne), - Op(DW_OP_bra), Branch(fail), - - // Success. - Op(DW_OP_lit0), - Op(DW_OP_nop), - Op(DW_OP_skip), Branch(done), - - Mark(fail), - Op(DW_OP_lit1), - - Mark(done), - Op(DW_OP_stack_value), - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { - value: Value::Generic(0), - }, - }]; - - check_eval_with_args( - &program, - Ok(&result), - encoding4(), - None, - None, - None, - |eval, mut result| { - while result != EvaluationResult::Complete { - result = match result { - EvaluationResult::RequiresMemory { - address, - size, - space, - base_type, - } => { - assert_eq!(base_type, UnitOffset(0)); - let mut v = address << 2; - if let Some(value) = space { - v += value; - } - v &= (1u64 << (8 * size)) - 1; - eval.resume_with_memory(Value::Generic(v))? - } - EvaluationResult::RequiresTls(slot) => eval.resume_with_tls(!slot)?, - EvaluationResult::RequiresRelocatedAddress(address) => { - eval.resume_with_relocated_address(address)? - } - EvaluationResult::RequiresIndexedAddress { index, relocate } => { - if relocate { - eval.resume_with_indexed_address(0x1000 + index.0 as u64)? - } else { - eval.resume_with_indexed_address(10 + index.0 as u64)? - } - } - _ => panic!(), - }; - } - - Ok(result) - }, - ); - } - - #[test] - fn test_eval_register() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - for i in 0..32 { - #[rustfmt::skip] - let program = [ - Op(DwOp(DW_OP_reg0.0 + i)), - // Included only in the "bad" run. - Op(DW_OP_lit23), - ]; - let ok_result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Register { - register: Register(i.into()), - }, - }]; - - check_eval(&program[..1], Ok(&ok_result), encoding4()); - - check_eval( - &program, - Err(Error::InvalidExpressionTerminator(1)), - encoding4(), - ); - } - - #[rustfmt::skip] - let program = [ - Op(DW_OP_regx), Uleb(0x1234) - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Register { - register: Register(0x1234), - }, - }]; - - check_eval(&program, Ok(&result), encoding4()); - } - - #[test] - fn test_eval_context() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - // Test `frame_base` and `call_frame_cfa` callbacks. - #[rustfmt::skip] - let program = [ - Op(DW_OP_fbreg), Sleb((-8i8) as u64), - Op(DW_OP_call_frame_cfa), - Op(DW_OP_plus), - Op(DW_OP_neg), - Op(DW_OP_stack_value) - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { - value: Value::Generic(9), - }, - }]; - - check_eval_with_args( - &program, - Ok(&result), - encoding8(), - None, - None, - None, - |eval, result| { - match result { - EvaluationResult::RequiresFrameBase => {} - _ => panic!(), - }; - match eval.resume_with_frame_base(0x0123_4567_89ab_cdef)? { - EvaluationResult::RequiresCallFrameCfa => {} - _ => panic!(), - }; - eval.resume_with_call_frame_cfa(0xfedc_ba98_7654_3210) - }, - ); - - // Test `evaluate_entry_value` callback. - #[rustfmt::skip] - let program = [ - Op(DW_OP_entry_value), Uleb(8), U64(0x1234_5678), - Op(DW_OP_stack_value) - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { - value: Value::Generic(0x1234_5678), - }, - }]; - - check_eval_with_args( - &program, - Ok(&result), - encoding8(), - None, - None, - None, - |eval, result| { - let entry_value = match result { - EvaluationResult::RequiresEntryValue(mut expression) => { - expression.0.read_u64()? - } - _ => panic!(), - }; - eval.resume_with_entry_value(Value::Generic(entry_value)) - }, - ); - - // Test missing `object_address` field. - #[rustfmt::skip] - let program = [ - Op(DW_OP_push_object_address), - ]; - - check_eval_with_args( - &program, - Err(Error::InvalidPushObjectAddress), - encoding4(), - None, - None, - None, - |_, _| panic!(), - ); - - // Test `object_address` field. - #[rustfmt::skip] - let program = [ - Op(DW_OP_push_object_address), - Op(DW_OP_stack_value), - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { - value: Value::Generic(0xff), - }, - }]; - - check_eval_with_args( - &program, - Ok(&result), - encoding8(), - Some(0xff), - None, - None, - |_, result| Ok(result), - ); - - // Test `initial_value` field. - #[rustfmt::skip] - let program = [ - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Address { - address: 0x1234_5678, - }, - }]; - - check_eval_with_args( - &program, - Ok(&result), - encoding8(), - None, - Some(0x1234_5678), - None, - |_, result| Ok(result), - ); - } - - #[test] - fn test_eval_empty_stack() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - #[rustfmt::skip] - let program = [ - Op(DW_OP_stack_value) - ]; - - check_eval(&program, Err(Error::NotEnoughStackItems), encoding4()); - } - - #[test] - fn test_eval_call() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - #[rustfmt::skip] - let program = [ - Op(DW_OP_lit23), - Op(DW_OP_call2), U16(0x7755), - Op(DW_OP_call4), U32(0x7755_aaee), - Op(DW_OP_call_ref), U32(0x7755_aaee), - Op(DW_OP_stack_value) - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { - value: Value::Generic(23), - }, - }]; - - check_eval_with_args( - &program, - Ok(&result), - encoding4(), - None, - None, - None, - |eval, result| { - let buf = EndianSlice::new(&[], LittleEndian); - match result { - EvaluationResult::RequiresAtLocation(_) => {} - _ => panic!(), - }; - - eval.resume_with_at_location(buf)?; - - match result { - EvaluationResult::RequiresAtLocation(_) => {} - _ => panic!(), - }; - - eval.resume_with_at_location(buf)?; - - match result { - EvaluationResult::RequiresAtLocation(_) => {} - _ => panic!(), - }; - - eval.resume_with_at_location(buf) - }, - ); - - // DW_OP_lit2 DW_OP_mul - const SUBR: &[u8] = &[0x32, 0x1e]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { - value: Value::Generic(184), - }, - }]; - - check_eval_with_args( - &program, - Ok(&result), - encoding4(), - None, - None, - None, - |eval, result| { - let buf = EndianSlice::new(SUBR, LittleEndian); - match result { - EvaluationResult::RequiresAtLocation(_) => {} - _ => panic!(), - }; - - eval.resume_with_at_location(buf)?; - - match result { - EvaluationResult::RequiresAtLocation(_) => {} - _ => panic!(), - }; - - eval.resume_with_at_location(buf)?; - - match result { - EvaluationResult::RequiresAtLocation(_) => {} - _ => panic!(), - }; - - eval.resume_with_at_location(buf) - }, - ); - } - - #[test] - fn test_eval_pieces() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - // Example from DWARF 2.6.1.3. - #[rustfmt::skip] - let program = [ - Op(DW_OP_reg3), - Op(DW_OP_piece), Uleb(4), - Op(DW_OP_reg4), - Op(DW_OP_piece), Uleb(2), - ]; - - let result = [ - Piece { - size_in_bits: Some(32), - bit_offset: None, - location: Location::Register { - register: Register(3), - }, - }, - Piece { - size_in_bits: Some(16), - bit_offset: None, - location: Location::Register { - register: Register(4), - }, - }, - ]; - - check_eval(&program, Ok(&result), encoding4()); - - // Example from DWARF 2.6.1.3 (but hacked since dealing with fbreg - // in the tests is a pain). - #[rustfmt::skip] - let program = [ - Op(DW_OP_reg0), - Op(DW_OP_piece), Uleb(4), - Op(DW_OP_piece), Uleb(4), - Op(DW_OP_addr), U32(0x7fff_ffff), - Op(DW_OP_piece), Uleb(4), - ]; - - let result = [ - Piece { - size_in_bits: Some(32), - bit_offset: None, - location: Location::Register { - register: Register(0), - }, - }, - Piece { - size_in_bits: Some(32), - bit_offset: None, - location: Location::Empty, - }, - Piece { - size_in_bits: Some(32), - bit_offset: None, - location: Location::Address { - address: 0x7fff_ffff, - }, - }, - ]; - - check_eval_with_args( - &program, - Ok(&result), - encoding4(), - None, - None, - None, - |eval, mut result| { - while result != EvaluationResult::Complete { - result = match result { - EvaluationResult::RequiresRelocatedAddress(address) => { - eval.resume_with_relocated_address(address)? - } - _ => panic!(), - }; - } - - Ok(result) - }, - ); - - #[rustfmt::skip] - let program = [ - Op(DW_OP_implicit_value), Uleb(5), - U8(23), U8(24), U8(25), U8(26), U8(0), - ]; - - const BYTES: &[u8] = &[23, 24, 25, 26, 0]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Bytes { - value: EndianSlice::new(BYTES, LittleEndian), - }, - }]; - - check_eval(&program, Ok(&result), encoding4()); - - #[rustfmt::skip] - let program = [ - Op(DW_OP_lit7), - Op(DW_OP_stack_value), - Op(DW_OP_bit_piece), Uleb(5), Uleb(0), - Op(DW_OP_bit_piece), Uleb(3), Uleb(0), - ]; - - let result = [ - Piece { - size_in_bits: Some(5), - bit_offset: Some(0), - location: Location::Value { - value: Value::Generic(7), - }, - }, - Piece { - size_in_bits: Some(3), - bit_offset: Some(0), - location: Location::Empty, - }, - ]; - - check_eval(&program, Ok(&result), encoding4()); - - #[rustfmt::skip] - let program = [ - Op(DW_OP_lit7), - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Address { address: 7 }, - }]; - - check_eval(&program, Ok(&result), encoding4()); - - #[rustfmt::skip] - let program = [ - Op(DW_OP_implicit_pointer), U32(0x1234_5678), Sleb(0x123), - ]; - - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::ImplicitPointer { - value: DebugInfoOffset(0x1234_5678), - byte_offset: 0x123, - }, - }]; - - check_eval(&program, Ok(&result), encoding4()); - - #[rustfmt::skip] - let program = [ - Op(DW_OP_reg3), - Op(DW_OP_piece), Uleb(4), - Op(DW_OP_reg4), - ]; - - check_eval(&program, Err(Error::InvalidPiece), encoding4()); - - #[rustfmt::skip] - let program = [ - Op(DW_OP_reg3), - Op(DW_OP_piece), Uleb(4), - Op(DW_OP_lit0), - ]; - - check_eval(&program, Err(Error::InvalidPiece), encoding4()); - } - - #[test] - fn test_eval_max_iterations() { - // It's nice if an operation and its arguments can fit on a single - // line in the test program. - use self::AssemblerEntry::*; - use crate::constants::*; - - #[rustfmt::skip] - let program = [ - Mark(1), - Op(DW_OP_skip), Branch(1), - ]; - - check_eval_with_args( - &program, - Err(Error::TooManyIterations), - encoding4(), - None, - None, - Some(150), - |_, _| panic!(), - ); - } - - #[test] - fn test_eval_typed_stack() { - use self::AssemblerEntry::*; - use crate::constants::*; - - let base_types = [ - ValueType::Generic, - ValueType::U16, - ValueType::U32, - ValueType::F32, - ]; - - // TODO: convert, reinterpret - #[rustfmt::skip] - let tests = [ - ( - &[ - Op(DW_OP_const_type), Uleb(1), U8(2), U16(0x1234), - Op(DW_OP_stack_value), - ][..], - Value::U16(0x1234), - ), - ( - &[ - Op(DW_OP_regval_type), Uleb(0x1234), Uleb(1), - Op(DW_OP_stack_value), - ][..], - Value::U16(0x2340), - ), - ( - &[ - Op(DW_OP_addr), U32(0x7fff_ffff), - Op(DW_OP_deref_type), U8(2), Uleb(1), - Op(DW_OP_stack_value), - ][..], - Value::U16(0xfff0), - ), - ( - &[ - Op(DW_OP_lit1), - Op(DW_OP_addr), U32(0x7fff_ffff), - Op(DW_OP_xderef_type), U8(2), Uleb(1), - Op(DW_OP_stack_value), - ][..], - Value::U16(0xfff1), - ), - ( - &[ - Op(DW_OP_const_type), Uleb(1), U8(2), U16(0x1234), - Op(DW_OP_convert), Uleb(2), - Op(DW_OP_stack_value), - ][..], - Value::U32(0x1234), - ), - ( - &[ - Op(DW_OP_const_type), Uleb(2), U8(4), U32(0x3f80_0000), - Op(DW_OP_reinterpret), Uleb(3), - Op(DW_OP_stack_value), - ][..], - Value::F32(1.0), - ), - ]; - for &(program, value) in &tests { - let result = [Piece { - size_in_bits: None, - bit_offset: None, - location: Location::Value { value }, - }]; - - check_eval_with_args( - program, - Ok(&result), - encoding4(), - None, - None, - None, - |eval, mut result| { - while result != EvaluationResult::Complete { - result = match result { - EvaluationResult::RequiresMemory { - address, - size, - space, - base_type, - } => { - let mut v = address << 4; - if let Some(value) = space { - v += value; - } - v &= (1u64 << (8 * size)) - 1; - let v = Value::from_u64(base_types[base_type.0], v)?; - eval.resume_with_memory(v)? - } - EvaluationResult::RequiresRegister { - register, - base_type, - } => { - let v = Value::from_u64( - base_types[base_type.0], - u64::from(register.0) << 4, - )?; - eval.resume_with_register(v)? - } - EvaluationResult::RequiresBaseType(offset) => { - eval.resume_with_base_type(base_types[offset.0])? - } - EvaluationResult::RequiresRelocatedAddress(address) => { - eval.resume_with_relocated_address(address)? - } - _ => panic!("Unexpected result {:?}", result), - } - } - Ok(result) - }, - ); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/pubnames.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/pubnames.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/pubnames.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/pubnames.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,141 +0,0 @@ -use crate::common::{DebugInfoOffset, SectionId}; -use crate::endianity::Endianity; -use crate::read::lookup::{DebugLookup, LookupEntryIter, PubStuffEntry, PubStuffParser}; -use crate::read::{EndianSlice, Reader, Result, Section, UnitOffset}; - -/// A single parsed pubname. -#[derive(Debug, Clone)] -pub struct PubNamesEntry { - unit_header_offset: DebugInfoOffset, - die_offset: UnitOffset, - name: R, -} - -impl PubNamesEntry { - /// Returns the name this entry refers to. - pub fn name(&self) -> &R { - &self.name - } - - /// Returns the offset into the .debug_info section for the header of the compilation unit - /// which contains this name. - pub fn unit_header_offset(&self) -> DebugInfoOffset { - self.unit_header_offset - } - - /// Returns the offset into the compilation unit for the debugging information entry which - /// has this name. - pub fn die_offset(&self) -> UnitOffset { - self.die_offset - } -} - -impl PubStuffEntry for PubNamesEntry { - fn new( - die_offset: UnitOffset, - name: R, - unit_header_offset: DebugInfoOffset, - ) -> Self { - PubNamesEntry { - unit_header_offset, - die_offset, - name, - } - } -} - -/// The `DebugPubNames` struct represents the DWARF public names information -/// found in the `.debug_pubnames` section. -#[derive(Debug, Clone)] -pub struct DebugPubNames(DebugLookup>>); - -impl<'input, Endian> DebugPubNames> -where - Endian: Endianity, -{ - /// Construct a new `DebugPubNames` instance from the data in the `.debug_pubnames` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_pubnames` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugPubNames, LittleEndian}; - /// - /// # let buf = []; - /// # let read_debug_pubnames_section_somehow = || &buf; - /// let debug_pubnames = - /// DebugPubNames::new(read_debug_pubnames_section_somehow(), LittleEndian); - /// ``` - pub fn new(debug_pubnames_section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(debug_pubnames_section, endian)) - } -} - -impl DebugPubNames { - /// Iterate the pubnames in the `.debug_pubnames` section. - /// - /// ``` - /// use gimli::{DebugPubNames, EndianSlice, LittleEndian}; - /// - /// # let buf = []; - /// # let read_debug_pubnames_section_somehow = || &buf; - /// let debug_pubnames = - /// DebugPubNames::new(read_debug_pubnames_section_somehow(), LittleEndian); - /// - /// let mut iter = debug_pubnames.items(); - /// while let Some(pubname) = iter.next().unwrap() { - /// println!("pubname {} found!", pubname.name().to_string_lossy()); - /// } - /// ``` - pub fn items(&self) -> PubNamesEntryIter { - PubNamesEntryIter(self.0.items()) - } -} - -impl Section for DebugPubNames { - fn id() -> SectionId { - SectionId::DebugPubNames - } - - fn reader(&self) -> &R { - self.0.reader() - } -} - -impl From for DebugPubNames { - fn from(debug_pubnames_section: R) -> Self { - DebugPubNames(DebugLookup::from(debug_pubnames_section)) - } -} - -/// An iterator over the pubnames from a `.debug_pubnames` section. -/// -/// Can be [used with -/// `FallibleIterator`](./index.html#using-with-fallibleiterator). -#[derive(Debug, Clone)] -pub struct PubNamesEntryIter(LookupEntryIter>>); - -impl PubNamesEntryIter { - /// Advance the iterator and return the next pubname. - /// - /// Returns the newly parsed pubname as `Ok(Some(pubname))`. Returns - /// `Ok(None)` when iteration is complete and all pubnames have already been - /// parsed and yielded. If an error occurs while parsing the next pubname, - /// then this error is returned as `Err(e)`, and all subsequent calls return - /// `Ok(None)`. - pub fn next(&mut self) -> Result>> { - self.0.next() - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for PubNamesEntryIter { - type Item = PubNamesEntry; - type Error = crate::read::Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - self.0.next() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/pubtypes.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/pubtypes.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/pubtypes.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/pubtypes.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,141 +0,0 @@ -use crate::common::{DebugInfoOffset, SectionId}; -use crate::endianity::Endianity; -use crate::read::lookup::{DebugLookup, LookupEntryIter, PubStuffEntry, PubStuffParser}; -use crate::read::{EndianSlice, Reader, Result, Section, UnitOffset}; - -/// A single parsed pubtype. -#[derive(Debug, Clone)] -pub struct PubTypesEntry { - unit_header_offset: DebugInfoOffset, - die_offset: UnitOffset, - name: R, -} - -impl PubTypesEntry { - /// Returns the name of the type this entry refers to. - pub fn name(&self) -> &R { - &self.name - } - - /// Returns the offset into the .debug_info section for the header of the compilation unit - /// which contains the type with this name. - pub fn unit_header_offset(&self) -> DebugInfoOffset { - self.unit_header_offset - } - - /// Returns the offset into the compilation unit for the debugging information entry which - /// the type with this name. - pub fn die_offset(&self) -> UnitOffset { - self.die_offset - } -} - -impl PubStuffEntry for PubTypesEntry { - fn new( - die_offset: UnitOffset, - name: R, - unit_header_offset: DebugInfoOffset, - ) -> Self { - PubTypesEntry { - unit_header_offset, - die_offset, - name, - } - } -} - -/// The `DebugPubTypes` struct represents the DWARF public types information -/// found in the `.debug_info` section. -#[derive(Debug, Clone)] -pub struct DebugPubTypes(DebugLookup>>); - -impl<'input, Endian> DebugPubTypes> -where - Endian: Endianity, -{ - /// Construct a new `DebugPubTypes` instance from the data in the `.debug_pubtypes` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_pubtypes` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugPubTypes, LittleEndian}; - /// - /// # let buf = []; - /// # let read_debug_pubtypes_somehow = || &buf; - /// let debug_pubtypes = - /// DebugPubTypes::new(read_debug_pubtypes_somehow(), LittleEndian); - /// ``` - pub fn new(debug_pubtypes_section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(debug_pubtypes_section, endian)) - } -} - -impl DebugPubTypes { - /// Iterate the pubtypes in the `.debug_pubtypes` section. - /// - /// ``` - /// use gimli::{DebugPubTypes, EndianSlice, LittleEndian}; - /// - /// # let buf = []; - /// # let read_debug_pubtypes_section_somehow = || &buf; - /// let debug_pubtypes = - /// DebugPubTypes::new(read_debug_pubtypes_section_somehow(), LittleEndian); - /// - /// let mut iter = debug_pubtypes.items(); - /// while let Some(pubtype) = iter.next().unwrap() { - /// println!("pubtype {} found!", pubtype.name().to_string_lossy()); - /// } - /// ``` - pub fn items(&self) -> PubTypesEntryIter { - PubTypesEntryIter(self.0.items()) - } -} - -impl Section for DebugPubTypes { - fn id() -> SectionId { - SectionId::DebugPubTypes - } - - fn reader(&self) -> &R { - self.0.reader() - } -} - -impl From for DebugPubTypes { - fn from(debug_pubtypes_section: R) -> Self { - DebugPubTypes(DebugLookup::from(debug_pubtypes_section)) - } -} - -/// An iterator over the pubtypes from a `.debug_pubtypes` section. -/// -/// Can be [used with -/// `FallibleIterator`](./index.html#using-with-fallibleiterator). -#[derive(Debug, Clone)] -pub struct PubTypesEntryIter(LookupEntryIter>>); - -impl PubTypesEntryIter { - /// Advance the iterator and return the next pubtype. - /// - /// Returns the newly parsed pubtype as `Ok(Some(pubtype))`. Returns - /// `Ok(None)` when iteration is complete and all pubtypes have already been - /// parsed and yielded. If an error occurs while parsing the next pubtype, - /// then this error is returned as `Err(e)`, and all subsequent calls return - /// `Ok(None)`. - pub fn next(&mut self) -> Result>> { - self.0.next() - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for PubTypesEntryIter { - type Item = PubTypesEntry; - type Error = crate::read::Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - self.0.next() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/reader.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/reader.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/reader.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/reader.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,502 +0,0 @@ -#[cfg(feature = "read")] -use alloc::borrow::Cow; -use core::convert::TryInto; -use core::fmt::Debug; -use core::hash::Hash; -use core::ops::{Add, AddAssign, Sub}; - -use crate::common::Format; -use crate::endianity::Endianity; -use crate::leb128; -use crate::read::{Error, Result}; - -/// An identifier for an offset within a section reader. -/// -/// This is used for error reporting. The meaning of this value is specific to -/// each reader implementation. The values should be chosen to be unique amongst -/// all readers. If values are not unique then errors may point to the wrong reader. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct ReaderOffsetId(pub u64); - -/// A trait for offsets with a DWARF section. -/// -/// This allows consumers to choose a size that is appropriate for their address space. -pub trait ReaderOffset: - Debug + Copy + Eq + Ord + Hash + Add + AddAssign + Sub -{ - /// Convert a u8 to an offset. - fn from_u8(offset: u8) -> Self; - - /// Convert a u16 to an offset. - fn from_u16(offset: u16) -> Self; - - /// Convert an i16 to an offset. - fn from_i16(offset: i16) -> Self; - - /// Convert a u32 to an offset. - fn from_u32(offset: u32) -> Self; - - /// Convert a u64 to an offset. - /// - /// Returns `Error::UnsupportedOffset` if the value is too large. - fn from_u64(offset: u64) -> Result; - - /// Convert an offset to a u64. - fn into_u64(self) -> u64; - - /// Wrapping (modular) addition. Computes `self + other`. - fn wrapping_add(self, other: Self) -> Self; - - /// Checked subtraction. Computes `self - other`. - fn checked_sub(self, other: Self) -> Option; -} - -impl ReaderOffset for u64 { - #[inline] - fn from_u8(offset: u8) -> Self { - u64::from(offset) - } - - #[inline] - fn from_u16(offset: u16) -> Self { - u64::from(offset) - } - - #[inline] - fn from_i16(offset: i16) -> Self { - offset as u64 - } - - #[inline] - fn from_u32(offset: u32) -> Self { - u64::from(offset) - } - - #[inline] - fn from_u64(offset: u64) -> Result { - Ok(offset) - } - - #[inline] - fn into_u64(self) -> u64 { - self - } - - #[inline] - fn wrapping_add(self, other: Self) -> Self { - self.wrapping_add(other) - } - - #[inline] - fn checked_sub(self, other: Self) -> Option { - self.checked_sub(other) - } -} - -impl ReaderOffset for u32 { - #[inline] - fn from_u8(offset: u8) -> Self { - u32::from(offset) - } - - #[inline] - fn from_u16(offset: u16) -> Self { - u32::from(offset) - } - - #[inline] - fn from_i16(offset: i16) -> Self { - offset as u32 - } - - #[inline] - fn from_u32(offset: u32) -> Self { - offset - } - - #[inline] - fn from_u64(offset64: u64) -> Result { - let offset = offset64 as u32; - if u64::from(offset) == offset64 { - Ok(offset) - } else { - Err(Error::UnsupportedOffset) - } - } - - #[inline] - fn into_u64(self) -> u64 { - u64::from(self) - } - - #[inline] - fn wrapping_add(self, other: Self) -> Self { - self.wrapping_add(other) - } - - #[inline] - fn checked_sub(self, other: Self) -> Option { - self.checked_sub(other) - } -} - -impl ReaderOffset for usize { - #[inline] - fn from_u8(offset: u8) -> Self { - offset as usize - } - - #[inline] - fn from_u16(offset: u16) -> Self { - offset as usize - } - - #[inline] - fn from_i16(offset: i16) -> Self { - offset as usize - } - - #[inline] - fn from_u32(offset: u32) -> Self { - offset as usize - } - - #[inline] - fn from_u64(offset64: u64) -> Result { - let offset = offset64 as usize; - if offset as u64 == offset64 { - Ok(offset) - } else { - Err(Error::UnsupportedOffset) - } - } - - #[inline] - fn into_u64(self) -> u64 { - self as u64 - } - - #[inline] - fn wrapping_add(self, other: Self) -> Self { - self.wrapping_add(other) - } - - #[inline] - fn checked_sub(self, other: Self) -> Option { - self.checked_sub(other) - } -} - -#[cfg(not(feature = "read"))] -pub(crate) mod seal_if_no_alloc { - #[derive(Debug)] - pub struct Sealed; -} - -/// A trait for reading the data from a DWARF section. -/// -/// All read operations advance the section offset of the reader -/// unless specified otherwise. -/// -/// ## Choosing a `Reader` Implementation -/// -/// `gimli` comes with a few different `Reader` implementations and lets you -/// choose the one that is right for your use case. A `Reader` is essentially a -/// view into the raw bytes that make up some DWARF, but this view might borrow -/// the underlying data or use reference counting ownership, and it might be -/// thread safe or not. -/// -/// | Implementation | Ownership | Thread Safe | Notes | -/// |:------------------|:------------------|:------------|:------| -/// | [`EndianSlice`](./struct.EndianSlice.html) | Borrowed | Yes | Fastest, but requires that all of your code work with borrows. | -/// | [`EndianRcSlice`](./struct.EndianRcSlice.html) | Reference counted | No | Shared ownership via reference counting, which alleviates the borrow restrictions of `EndianSlice` but imposes reference counting increments and decrements. Cannot be sent across threads, because the reference count is not atomic. | -/// | [`EndianArcSlice`](./struct.EndianArcSlice.html) | Reference counted | Yes | The same as `EndianRcSlice`, but uses atomic reference counting, and therefore reference counting operations are slower but `EndianArcSlice`s may be sent across threads. | -/// | [`EndianReader`](./struct.EndianReader.html) | Same as `T` | Same as `T` | Escape hatch for easily defining your own type of `Reader`. | -pub trait Reader: Debug + Clone { - /// The endianity of bytes that are read. - type Endian: Endianity; - - /// The type used for offsets and lengths. - type Offset: ReaderOffset; - - /// Return the endianity of bytes that are read. - fn endian(&self) -> Self::Endian; - - /// Return the number of bytes remaining. - fn len(&self) -> Self::Offset; - - /// Set the number of bytes remaining to zero. - fn empty(&mut self); - - /// Set the number of bytes remaining to the specified length. - fn truncate(&mut self, len: Self::Offset) -> Result<()>; - - /// Return the offset of this reader's data relative to the start of - /// the given base reader's data. - /// - /// May panic if this reader's data is not contained within the given - /// base reader's data. - fn offset_from(&self, base: &Self) -> Self::Offset; - - /// Return an identifier for the current reader offset. - fn offset_id(&self) -> ReaderOffsetId; - - /// Return the offset corresponding to the given `id` if - /// it is associated with this reader. - fn lookup_offset_id(&self, id: ReaderOffsetId) -> Option; - - /// Find the index of the first occurrence of the given byte. - /// The offset of the reader is not changed. - fn find(&self, byte: u8) -> Result; - - /// Discard the specified number of bytes. - fn skip(&mut self, len: Self::Offset) -> Result<()>; - - /// Split a reader in two. - /// - /// A new reader is returned that can be used to read the next - /// `len` bytes, and `self` is advanced so that it reads the remainder. - fn split(&mut self, len: Self::Offset) -> Result; - - /// This trait cannot be implemented if "read" feature is not enabled. - /// - /// `Reader` trait has a few methods that depend on `alloc` crate. - /// Disallowing `Reader` trait implementation prevents a crate that only depends on - /// "read-core" from being broken if another crate depending on `gimli` enables - /// "read" feature. - #[cfg(not(feature = "read"))] - fn cannot_implement() -> seal_if_no_alloc::Sealed; - - /// Return all remaining data as a clone-on-write slice. - /// - /// The slice will be borrowed where possible, but some readers may - /// always return an owned vector. - /// - /// Does not advance the reader. - #[cfg(feature = "read")] - fn to_slice(&self) -> Result>; - - /// Convert all remaining data to a clone-on-write string. - /// - /// The string will be borrowed where possible, but some readers may - /// always return an owned string. - /// - /// Does not advance the reader. - /// - /// Returns an error if the data contains invalid characters. - #[cfg(feature = "read")] - fn to_string(&self) -> Result>; - - /// Convert all remaining data to a clone-on-write string, including invalid characters. - /// - /// The string will be borrowed where possible, but some readers may - /// always return an owned string. - /// - /// Does not advance the reader. - #[cfg(feature = "read")] - fn to_string_lossy(&self) -> Result>; - - /// Read exactly `buf.len()` bytes into `buf`. - fn read_slice(&mut self, buf: &mut [u8]) -> Result<()>; - - /// Read a u8 array. - #[inline] - fn read_u8_array(&mut self) -> Result - where - A: Sized + Default + AsMut<[u8]>, - { - let mut val = Default::default(); - self.read_slice(>::as_mut(&mut val))?; - Ok(val) - } - - /// Return true if the number of bytes remaining is zero. - #[inline] - fn is_empty(&self) -> bool { - self.len() == Self::Offset::from_u8(0) - } - - /// Read a u8. - #[inline] - fn read_u8(&mut self) -> Result { - let a: [u8; 1] = self.read_u8_array()?; - Ok(a[0]) - } - - /// Read an i8. - #[inline] - fn read_i8(&mut self) -> Result { - let a: [u8; 1] = self.read_u8_array()?; - Ok(a[0] as i8) - } - - /// Read a u16. - #[inline] - fn read_u16(&mut self) -> Result { - let a: [u8; 2] = self.read_u8_array()?; - Ok(self.endian().read_u16(&a)) - } - - /// Read an i16. - #[inline] - fn read_i16(&mut self) -> Result { - let a: [u8; 2] = self.read_u8_array()?; - Ok(self.endian().read_i16(&a)) - } - - /// Read a u32. - #[inline] - fn read_u32(&mut self) -> Result { - let a: [u8; 4] = self.read_u8_array()?; - Ok(self.endian().read_u32(&a)) - } - - /// Read an i32. - #[inline] - fn read_i32(&mut self) -> Result { - let a: [u8; 4] = self.read_u8_array()?; - Ok(self.endian().read_i32(&a)) - } - - /// Read a u64. - #[inline] - fn read_u64(&mut self) -> Result { - let a: [u8; 8] = self.read_u8_array()?; - Ok(self.endian().read_u64(&a)) - } - - /// Read an i64. - #[inline] - fn read_i64(&mut self) -> Result { - let a: [u8; 8] = self.read_u8_array()?; - Ok(self.endian().read_i64(&a)) - } - - /// Read a f32. - #[inline] - fn read_f32(&mut self) -> Result { - let a: [u8; 4] = self.read_u8_array()?; - Ok(self.endian().read_f32(&a)) - } - - /// Read a f64. - #[inline] - fn read_f64(&mut self) -> Result { - let a: [u8; 8] = self.read_u8_array()?; - Ok(self.endian().read_f64(&a)) - } - - /// Read an unsigned n-bytes integer u64. - /// - /// # Panics - /// - /// Panics when nbytes < 1 or nbytes > 8 - #[inline] - fn read_uint(&mut self, n: usize) -> Result { - let mut buf = [0; 8]; - self.read_slice(&mut buf[..n])?; - Ok(self.endian().read_uint(&buf[..n])) - } - - /// Read a null-terminated slice, and return it (excluding the null). - fn read_null_terminated_slice(&mut self) -> Result { - let idx = self.find(0)?; - let val = self.split(idx)?; - self.skip(Self::Offset::from_u8(1))?; - Ok(val) - } - - /// Skip a LEB128 encoded integer. - fn skip_leb128(&mut self) -> Result<()> { - leb128::read::skip(self) - } - - /// Read an unsigned LEB128 encoded integer. - fn read_uleb128(&mut self) -> Result { - leb128::read::unsigned(self) - } - - /// Read an unsigned LEB128 encoded u32. - fn read_uleb128_u32(&mut self) -> Result { - leb128::read::unsigned(self)? - .try_into() - .map_err(|_| Error::BadUnsignedLeb128) - } - - /// Read an unsigned LEB128 encoded u16. - fn read_uleb128_u16(&mut self) -> Result { - leb128::read::u16(self) - } - - /// Read a signed LEB128 encoded integer. - fn read_sleb128(&mut self) -> Result { - leb128::read::signed(self) - } - - /// Read an initial length field. - /// - /// This field is encoded as either a 32-bit length or - /// a 64-bit length, and the returned `Format` indicates which. - fn read_initial_length(&mut self) -> Result<(Self::Offset, Format)> { - const MAX_DWARF_32_UNIT_LENGTH: u32 = 0xffff_fff0; - const DWARF_64_INITIAL_UNIT_LENGTH: u32 = 0xffff_ffff; - - let val = self.read_u32()?; - if val < MAX_DWARF_32_UNIT_LENGTH { - Ok((Self::Offset::from_u32(val), Format::Dwarf32)) - } else if val == DWARF_64_INITIAL_UNIT_LENGTH { - let val = self.read_u64().and_then(Self::Offset::from_u64)?; - Ok((val, Format::Dwarf64)) - } else { - Err(Error::UnknownReservedLength) - } - } - - /// Read an address-sized integer, and return it as a `u64`. - fn read_address(&mut self, address_size: u8) -> Result { - match address_size { - 1 => self.read_u8().map(u64::from), - 2 => self.read_u16().map(u64::from), - 4 => self.read_u32().map(u64::from), - 8 => self.read_u64(), - otherwise => Err(Error::UnsupportedAddressSize(otherwise)), - } - } - - /// Parse a word-sized integer according to the DWARF format. - /// - /// These are always used to encode section offsets or lengths, - /// and so have a type of `Self::Offset`. - fn read_word(&mut self, format: Format) -> Result { - match format { - Format::Dwarf32 => self.read_u32().map(Self::Offset::from_u32), - Format::Dwarf64 => self.read_u64().and_then(Self::Offset::from_u64), - } - } - - /// Parse a word-sized section length according to the DWARF format. - #[inline] - fn read_length(&mut self, format: Format) -> Result { - self.read_word(format) - } - - /// Parse a word-sized section offset according to the DWARF format. - #[inline] - fn read_offset(&mut self, format: Format) -> Result { - self.read_word(format) - } - - /// Parse a section offset of the given size. - /// - /// This is used for `DW_FORM_ref_addr` values in DWARF version 2. - fn read_sized_offset(&mut self, size: u8) -> Result { - match size { - 1 => self.read_u8().map(u64::from), - 2 => self.read_u16().map(u64::from), - 4 => self.read_u32().map(u64::from), - 8 => self.read_u64(), - otherwise => Err(Error::UnsupportedOffsetSize(otherwise)), - } - .and_then(Self::Offset::from_u64) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/rnglists.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/rnglists.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/rnglists.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/rnglists.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1458 +0,0 @@ -use crate::common::{ - DebugAddrBase, DebugAddrIndex, DebugRngListsBase, DebugRngListsIndex, DwarfFileType, Encoding, - RangeListsOffset, SectionId, -}; -use crate::constants; -use crate::endianity::Endianity; -use crate::read::{ - lists::ListsHeader, DebugAddr, EndianSlice, Error, Reader, ReaderOffset, ReaderOffsetId, - Result, Section, -}; - -/// The raw contents of the `.debug_ranges` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugRanges { - pub(crate) section: R, -} - -impl<'input, Endian> DebugRanges> -where - Endian: Endianity, -{ - /// Construct a new `DebugRanges` instance from the data in the `.debug_ranges` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_ranges` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugRanges, LittleEndian}; - /// - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_debug_ranges_section_somehow = || &buf; - /// let debug_ranges = DebugRanges::new(read_debug_ranges_section_somehow(), LittleEndian); - /// ``` - pub fn new(section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(section, endian)) - } -} - -impl Section for DebugRanges { - fn id() -> SectionId { - SectionId::DebugRanges - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for DebugRanges { - fn from(section: R) -> Self { - DebugRanges { section } - } -} - -/// The `DebugRngLists` struct represents the contents of the -/// `.debug_rnglists` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugRngLists { - section: R, -} - -impl<'input, Endian> DebugRngLists> -where - Endian: Endianity, -{ - /// Construct a new `DebugRngLists` instance from the data in the - /// `.debug_rnglists` section. - /// - /// It is the caller's responsibility to read the `.debug_rnglists` - /// section and present it as a `&[u8]` slice. That means using some ELF - /// loader on Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugRngLists, LittleEndian}; - /// - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_debug_rnglists_section_somehow = || &buf; - /// let debug_rnglists = - /// DebugRngLists::new(read_debug_rnglists_section_somehow(), LittleEndian); - /// ``` - pub fn new(section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(section, endian)) - } -} - -impl Section for DebugRngLists { - fn id() -> SectionId { - SectionId::DebugRngLists - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for DebugRngLists { - fn from(section: R) -> Self { - DebugRngLists { section } - } -} - -#[allow(unused)] -pub(crate) type RngListsHeader = ListsHeader; - -impl DebugRngListsBase -where - Offset: ReaderOffset, -{ - /// Returns a `DebugRngListsBase` with the default value of DW_AT_rnglists_base - /// for the given `Encoding` and `DwarfFileType`. - pub fn default_for_encoding_and_file( - encoding: Encoding, - file_type: DwarfFileType, - ) -> DebugRngListsBase { - if encoding.version >= 5 && file_type == DwarfFileType::Dwo { - // In .dwo files, the compiler omits the DW_AT_rnglists_base attribute (because there is - // only a single unit in the file) but we must skip past the header, which the attribute - // would normally do for us. - DebugRngListsBase(Offset::from_u8(RngListsHeader::size_for_encoding(encoding))) - } else { - DebugRngListsBase(Offset::from_u8(0)) - } - } -} - -/// The DWARF data found in `.debug_ranges` and `.debug_rnglists` sections. -#[derive(Debug, Default, Clone, Copy)] -pub struct RangeLists { - debug_ranges: DebugRanges, - debug_rnglists: DebugRngLists, -} - -impl RangeLists { - /// Construct a new `RangeLists` instance from the data in the `.debug_ranges` and - /// `.debug_rnglists` sections. - pub fn new(debug_ranges: DebugRanges, debug_rnglists: DebugRngLists) -> RangeLists { - RangeLists { - debug_ranges, - debug_rnglists, - } - } - - /// Return the `.debug_ranges` section. - pub fn debug_ranges(&self) -> &DebugRanges { - &self.debug_ranges - } - - /// Replace the `.debug_ranges` section. - /// - /// This is useful for `.dwo` files when using the GNU split-dwarf extension to DWARF 4. - pub fn set_debug_ranges(&mut self, debug_ranges: DebugRanges) { - self.debug_ranges = debug_ranges; - } - - /// Return the `.debug_rnglists` section. - pub fn debug_rnglists(&self) -> &DebugRngLists { - &self.debug_rnglists - } -} - -impl RangeLists { - /// Create a `RangeLists` that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// ```rust,no_run - /// # let load_section = || unimplemented!(); - /// // Read the DWARF section into a `Vec` with whatever object loader you're using. - /// let owned_section: gimli::RangeLists> = load_section(); - /// // Create a reference to the DWARF section. - /// let section = owned_section.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> RangeLists - where - F: FnMut(&'a T) -> R, - { - RangeLists { - debug_ranges: borrow(&self.debug_ranges.section).into(), - debug_rnglists: borrow(&self.debug_rnglists.section).into(), - } - } -} - -impl RangeLists { - /// Iterate over the `Range` list entries starting at the given offset. - /// - /// The `unit_version` and `address_size` must match the compilation unit that the - /// offset was contained in. - /// - /// The `base_address` should be obtained from the `DW_AT_low_pc` attribute in the - /// `DW_TAG_compile_unit` entry for the compilation unit that contains this range list. - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - pub fn ranges( - &self, - offset: RangeListsOffset, - unit_encoding: Encoding, - base_address: u64, - debug_addr: &DebugAddr, - debug_addr_base: DebugAddrBase, - ) -> Result> { - Ok(RngListIter::new( - self.raw_ranges(offset, unit_encoding)?, - base_address, - debug_addr.clone(), - debug_addr_base, - )) - } - - /// Iterate over the `RawRngListEntry`ies starting at the given offset. - /// - /// The `unit_encoding` must match the compilation unit that the - /// offset was contained in. - /// - /// This iterator does not perform any processing of the range entries, - /// such as handling base addresses. - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - pub fn raw_ranges( - &self, - offset: RangeListsOffset, - unit_encoding: Encoding, - ) -> Result> { - let (mut input, format) = if unit_encoding.version <= 4 { - (self.debug_ranges.section.clone(), RangeListsFormat::Bare) - } else { - (self.debug_rnglists.section.clone(), RangeListsFormat::Rle) - }; - input.skip(offset.0)?; - Ok(RawRngListIter::new(input, unit_encoding, format)) - } - - /// Returns the `.debug_rnglists` offset at the given `base` and `index`. - /// - /// The `base` must be the `DW_AT_rnglists_base` value from the compilation unit DIE. - /// This is an offset that points to the first entry following the header. - /// - /// The `index` is the value of a `DW_FORM_rnglistx` attribute. - /// - /// The `unit_encoding` must match the compilation unit that the - /// index was contained in. - pub fn get_offset( - &self, - unit_encoding: Encoding, - base: DebugRngListsBase, - index: DebugRngListsIndex, - ) -> Result> { - let format = unit_encoding.format; - let input = &mut self.debug_rnglists.section.clone(); - input.skip(base.0)?; - input.skip(R::Offset::from_u64( - index.0.into_u64() * u64::from(format.word_size()), - )?)?; - input - .read_offset(format) - .map(|x| RangeListsOffset(base.0 + x)) - } - - /// Call `Reader::lookup_offset_id` for each section, and return the first match. - pub fn lookup_offset_id(&self, id: ReaderOffsetId) -> Option<(SectionId, R::Offset)> { - self.debug_ranges - .lookup_offset_id(id) - .or_else(|| self.debug_rnglists.lookup_offset_id(id)) - } -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum RangeListsFormat { - /// The bare range list format used before DWARF 5. - Bare, - /// The DW_RLE encoded range list format used in DWARF 5. - Rle, -} - -/// A raw iterator over an address range list. -/// -/// This iterator does not perform any processing of the range entries, -/// such as handling base addresses. -#[derive(Debug)] -pub struct RawRngListIter { - input: R, - encoding: Encoding, - format: RangeListsFormat, -} - -/// A raw entry in .debug_rnglists -#[derive(Clone, Debug)] -pub enum RawRngListEntry { - /// A range from DWARF version <= 4. - AddressOrOffsetPair { - /// Start of range. May be an address or an offset. - begin: u64, - /// End of range. May be an address or an offset. - end: u64, - }, - /// DW_RLE_base_address - BaseAddress { - /// base address - addr: u64, - }, - /// DW_RLE_base_addressx - BaseAddressx { - /// base address - addr: DebugAddrIndex, - }, - /// DW_RLE_startx_endx - StartxEndx { - /// start of range - begin: DebugAddrIndex, - /// end of range - end: DebugAddrIndex, - }, - /// DW_RLE_startx_length - StartxLength { - /// start of range - begin: DebugAddrIndex, - /// length of range - length: u64, - }, - /// DW_RLE_offset_pair - OffsetPair { - /// start of range - begin: u64, - /// end of range - end: u64, - }, - /// DW_RLE_start_end - StartEnd { - /// start of range - begin: u64, - /// end of range - end: u64, - }, - /// DW_RLE_start_length - StartLength { - /// start of range - begin: u64, - /// length of range - length: u64, - }, -} - -impl RawRngListEntry { - /// Parse a range entry from `.debug_rnglists` - fn parse>( - input: &mut R, - encoding: Encoding, - format: RangeListsFormat, - ) -> Result> { - Ok(match format { - RangeListsFormat::Bare => { - let range = RawRange::parse(input, encoding.address_size)?; - if range.is_end() { - None - } else if range.is_base_address(encoding.address_size) { - Some(RawRngListEntry::BaseAddress { addr: range.end }) - } else { - Some(RawRngListEntry::AddressOrOffsetPair { - begin: range.begin, - end: range.end, - }) - } - } - RangeListsFormat::Rle => match constants::DwRle(input.read_u8()?) { - constants::DW_RLE_end_of_list => None, - constants::DW_RLE_base_addressx => Some(RawRngListEntry::BaseAddressx { - addr: DebugAddrIndex(input.read_uleb128().and_then(R::Offset::from_u64)?), - }), - constants::DW_RLE_startx_endx => Some(RawRngListEntry::StartxEndx { - begin: DebugAddrIndex(input.read_uleb128().and_then(R::Offset::from_u64)?), - end: DebugAddrIndex(input.read_uleb128().and_then(R::Offset::from_u64)?), - }), - constants::DW_RLE_startx_length => Some(RawRngListEntry::StartxLength { - begin: DebugAddrIndex(input.read_uleb128().and_then(R::Offset::from_u64)?), - length: input.read_uleb128()?, - }), - constants::DW_RLE_offset_pair => Some(RawRngListEntry::OffsetPair { - begin: input.read_uleb128()?, - end: input.read_uleb128()?, - }), - constants::DW_RLE_base_address => Some(RawRngListEntry::BaseAddress { - addr: input.read_address(encoding.address_size)?, - }), - constants::DW_RLE_start_end => Some(RawRngListEntry::StartEnd { - begin: input.read_address(encoding.address_size)?, - end: input.read_address(encoding.address_size)?, - }), - constants::DW_RLE_start_length => Some(RawRngListEntry::StartLength { - begin: input.read_address(encoding.address_size)?, - length: input.read_uleb128()?, - }), - _ => { - return Err(Error::InvalidAddressRange); - } - }, - }) - } -} - -impl RawRngListIter { - /// Construct a `RawRngListIter`. - fn new(input: R, encoding: Encoding, format: RangeListsFormat) -> RawRngListIter { - RawRngListIter { - input, - encoding, - format, - } - } - - /// Advance the iterator to the next range. - pub fn next(&mut self) -> Result>> { - if self.input.is_empty() { - return Ok(None); - } - - match RawRngListEntry::parse(&mut self.input, self.encoding, self.format) { - Ok(range) => { - if range.is_none() { - self.input.empty(); - } - Ok(range) - } - Err(e) => { - self.input.empty(); - Err(e) - } - } - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for RawRngListIter { - type Item = RawRngListEntry; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - RawRngListIter::next(self) - } -} - -/// An iterator over an address range list. -/// -/// This iterator internally handles processing of base addresses and different -/// entry types. Thus, it only returns range entries that are valid -/// and already adjusted for the base address. -#[derive(Debug)] -pub struct RngListIter { - raw: RawRngListIter, - base_address: u64, - debug_addr: DebugAddr, - debug_addr_base: DebugAddrBase, -} - -impl RngListIter { - /// Construct a `RngListIter`. - fn new( - raw: RawRngListIter, - base_address: u64, - debug_addr: DebugAddr, - debug_addr_base: DebugAddrBase, - ) -> RngListIter { - RngListIter { - raw, - base_address, - debug_addr, - debug_addr_base, - } - } - - #[inline] - fn get_address(&self, index: DebugAddrIndex) -> Result { - self.debug_addr - .get_address(self.raw.encoding.address_size, self.debug_addr_base, index) - } - - /// Advance the iterator to the next range. - pub fn next(&mut self) -> Result> { - loop { - let raw_range = match self.raw.next()? { - Some(range) => range, - None => return Ok(None), - }; - - let range = self.convert_raw(raw_range)?; - if range.is_some() { - return Ok(range); - } - } - } - - /// Return the next raw range. - /// - /// The raw range should be passed to `convert_range`. - #[doc(hidden)] - pub fn next_raw(&mut self) -> Result>> { - self.raw.next() - } - - /// Convert a raw range into a range, and update the state of the iterator. - /// - /// The raw range should have been obtained from `next_raw`. - #[doc(hidden)] - pub fn convert_raw(&mut self, raw_range: RawRngListEntry) -> Result> { - let mask = !0 >> (64 - self.raw.encoding.address_size * 8); - let tombstone = if self.raw.encoding.version <= 4 { - mask - 1 - } else { - mask - }; - - let range = match raw_range { - RawRngListEntry::BaseAddress { addr } => { - self.base_address = addr; - return Ok(None); - } - RawRngListEntry::BaseAddressx { addr } => { - self.base_address = self.get_address(addr)?; - return Ok(None); - } - RawRngListEntry::StartxEndx { begin, end } => { - let begin = self.get_address(begin)?; - let end = self.get_address(end)?; - Range { begin, end } - } - RawRngListEntry::StartxLength { begin, length } => { - let begin = self.get_address(begin)?; - let end = begin.wrapping_add(length) & mask; - Range { begin, end } - } - RawRngListEntry::AddressOrOffsetPair { begin, end } - | RawRngListEntry::OffsetPair { begin, end } => { - if self.base_address == tombstone { - return Ok(None); - } - let mut range = Range { begin, end }; - range.add_base_address(self.base_address, self.raw.encoding.address_size); - range - } - RawRngListEntry::StartEnd { begin, end } => Range { begin, end }, - RawRngListEntry::StartLength { begin, length } => { - let end = begin.wrapping_add(length) & mask; - Range { begin, end } - } - }; - - if range.begin == tombstone { - return Ok(None); - } - - if range.begin > range.end { - self.raw.input.empty(); - return Err(Error::InvalidAddressRange); - } - - Ok(Some(range)) - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for RngListIter { - type Item = Range; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - RngListIter::next(self) - } -} - -/// A raw address range from the `.debug_ranges` section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub(crate) struct RawRange { - /// The beginning address of the range. - pub begin: u64, - - /// The first address past the end of the range. - pub end: u64, -} - -impl RawRange { - /// Check if this is a range end entry. - #[inline] - pub fn is_end(&self) -> bool { - self.begin == 0 && self.end == 0 - } - - /// Check if this is a base address selection entry. - /// - /// A base address selection entry changes the base address that subsequent - /// range entries are relative to. - #[inline] - pub fn is_base_address(&self, address_size: u8) -> bool { - self.begin == !0 >> (64 - address_size * 8) - } - - /// Parse an address range entry from `.debug_ranges` or `.debug_loc`. - #[inline] - pub fn parse(input: &mut R, address_size: u8) -> Result { - let begin = input.read_address(address_size)?; - let end = input.read_address(address_size)?; - let range = RawRange { begin, end }; - Ok(range) - } -} - -/// An address range from the `.debug_ranges`, `.debug_rnglists`, or `.debug_aranges` sections. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct Range { - /// The beginning address of the range. - pub begin: u64, - - /// The first address past the end of the range. - pub end: u64, -} - -impl Range { - /// Add a base address to this range. - #[inline] - pub(crate) fn add_base_address(&mut self, base_address: u64, address_size: u8) { - let mask = !0 >> (64 - address_size * 8); - self.begin = base_address.wrapping_add(self.begin) & mask; - self.end = base_address.wrapping_add(self.end) & mask; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::common::Format; - use crate::endianity::LittleEndian; - use crate::test_util::GimliSectionMethods; - use test_assembler::{Endian, Label, LabelMaker, Section}; - - #[test] - fn test_rnglists_32() { - let tombstone = !0u32; - let encoding = Encoding { - format: Format::Dwarf32, - version: 5, - address_size: 4, - }; - let section = Section::with_endian(Endian::Little) - .L32(0x0300_0000) - .L32(0x0301_0300) - .L32(0x0301_0400) - .L32(0x0301_0500) - .L32(tombstone) - .L32(0x0301_0600); - let buf = section.get_contents().unwrap(); - let debug_addr = &DebugAddr::from(EndianSlice::new(&buf, LittleEndian)); - let debug_addr_base = DebugAddrBase(0); - - let start = Label::new(); - let first = Label::new(); - let size = Label::new(); - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - // Header - .mark(&start) - .L32(&size) - .L16(encoding.version) - .L8(encoding.address_size) - .L8(0) - .L32(0) - .mark(&first) - // An OffsetPair using the unit base address. - .L8(4).uleb(0x10200).uleb(0x10300) - // A base address selection followed by an OffsetPair. - .L8(5).L32(0x0200_0000) - .L8(4).uleb(0x10400).uleb(0x10500) - // An empty OffsetPair followed by a normal OffsetPair. - .L8(4).uleb(0x10600).uleb(0x10600) - .L8(4).uleb(0x10800).uleb(0x10900) - // A StartEnd - .L8(6).L32(0x201_0a00).L32(0x201_0b00) - // A StartLength - .L8(7).L32(0x201_0c00).uleb(0x100) - // An OffsetPair that starts at 0. - .L8(4).uleb(0).uleb(1) - // An OffsetPair that starts and ends at 0. - .L8(4).uleb(0).uleb(0) - // An OffsetPair that ends at -1. - .L8(5).L32(0) - .L8(4).uleb(0).uleb(0xffff_ffff) - // A BaseAddressx + OffsetPair - .L8(1).uleb(0) - .L8(4).uleb(0x10100).uleb(0x10200) - // A StartxEndx - .L8(2).uleb(1).uleb(2) - // A StartxLength - .L8(3).uleb(3).uleb(0x100) - - // Tombstone entries, all of which should be ignored. - // A BaseAddressx that is a tombstone. - .L8(1).uleb(4) - .L8(4).uleb(0x11100).uleb(0x11200) - // A BaseAddress that is a tombstone. - .L8(5).L32(tombstone) - .L8(4).uleb(0x11300).uleb(0x11400) - // A StartxEndx that is a tombstone. - .L8(2).uleb(4).uleb(5) - // A StartxLength that is a tombstone. - .L8(3).uleb(4).uleb(0x100) - // A StartEnd that is a tombstone. - .L8(6).L32(tombstone).L32(0x201_1500) - // A StartLength that is a tombstone. - .L8(7).L32(tombstone).uleb(0x100) - // A StartEnd (not ignored) - .L8(6).L32(0x201_1600).L32(0x201_1700) - - // A range end. - .L8(0) - // Some extra data. - .L32(0xffff_ffff); - size.set_const((§ion.here() - &start - 4) as u64); - - let buf = section.get_contents().unwrap(); - let debug_ranges = DebugRanges::new(&[], LittleEndian); - let debug_rnglists = DebugRngLists::new(&buf, LittleEndian); - let rnglists = RangeLists::new(debug_ranges, debug_rnglists); - let offset = RangeListsOffset((&first - &start) as usize); - let mut ranges = rnglists - .ranges(offset, encoding, 0x0100_0000, debug_addr, debug_addr_base) - .unwrap(); - - // A normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0101_0200, - end: 0x0101_0300, - })) - ); - - // A base address selection followed by a normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0400, - end: 0x0201_0500, - })) - ); - - // An empty range followed by a normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0600, - end: 0x0201_0600, - })) - ); - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0800, - end: 0x0201_0900, - })) - ); - - // A normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0a00, - end: 0x0201_0b00, - })) - ); - - // A normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0c00, - end: 0x0201_0d00, - })) - ); - - // A range that starts at 0. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0200_0000, - end: 0x0200_0001, - })) - ); - - // A range that starts and ends at 0. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0200_0000, - end: 0x0200_0000, - })) - ); - - // A range that ends at -1. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0000_0000, - end: 0xffff_ffff, - })) - ); - - // A BaseAddressx + OffsetPair - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0301_0100, - end: 0x0301_0200, - })) - ); - - // A StartxEndx - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0301_0300, - end: 0x0301_0400, - })) - ); - - // A StartxLength - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0301_0500, - end: 0x0301_0600, - })) - ); - - // A StartEnd range following the tombstones - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_1600, - end: 0x0201_1700, - })) - ); - - // A range end. - assert_eq!(ranges.next(), Ok(None)); - - // An offset at the end of buf. - let mut ranges = rnglists - .ranges( - RangeListsOffset(buf.len()), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(ranges.next(), Ok(None)); - } - - #[test] - fn test_rnglists_64() { - let tombstone = !0u64; - let encoding = Encoding { - format: Format::Dwarf64, - version: 5, - address_size: 8, - }; - let section = Section::with_endian(Endian::Little) - .L64(0x0300_0000) - .L64(0x0301_0300) - .L64(0x0301_0400) - .L64(0x0301_0500) - .L64(tombstone) - .L64(0x0301_0600); - let buf = section.get_contents().unwrap(); - let debug_addr = &DebugAddr::from(EndianSlice::new(&buf, LittleEndian)); - let debug_addr_base = DebugAddrBase(0); - - let start = Label::new(); - let first = Label::new(); - let size = Label::new(); - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - // Header - .mark(&start) - .L32(0xffff_ffff) - .L64(&size) - .L16(encoding.version) - .L8(encoding.address_size) - .L8(0) - .L32(0) - .mark(&first) - // An OffsetPair using the unit base address. - .L8(4).uleb(0x10200).uleb(0x10300) - // A base address selection followed by an OffsetPair. - .L8(5).L64(0x0200_0000) - .L8(4).uleb(0x10400).uleb(0x10500) - // An empty OffsetPair followed by a normal OffsetPair. - .L8(4).uleb(0x10600).uleb(0x10600) - .L8(4).uleb(0x10800).uleb(0x10900) - // A StartEnd - .L8(6).L64(0x201_0a00).L64(0x201_0b00) - // A StartLength - .L8(7).L64(0x201_0c00).uleb(0x100) - // An OffsetPair that starts at 0. - .L8(4).uleb(0).uleb(1) - // An OffsetPair that starts and ends at 0. - .L8(4).uleb(0).uleb(0) - // An OffsetPair that ends at -1. - .L8(5).L64(0) - .L8(4).uleb(0).uleb(0xffff_ffff) - // A BaseAddressx + OffsetPair - .L8(1).uleb(0) - .L8(4).uleb(0x10100).uleb(0x10200) - // A StartxEndx - .L8(2).uleb(1).uleb(2) - // A StartxLength - .L8(3).uleb(3).uleb(0x100) - - // Tombstone entries, all of which should be ignored. - // A BaseAddressx that is a tombstone. - .L8(1).uleb(4) - .L8(4).uleb(0x11100).uleb(0x11200) - // A BaseAddress that is a tombstone. - .L8(5).L64(tombstone) - .L8(4).uleb(0x11300).uleb(0x11400) - // A StartxEndx that is a tombstone. - .L8(2).uleb(4).uleb(5) - // A StartxLength that is a tombstone. - .L8(3).uleb(4).uleb(0x100) - // A StartEnd that is a tombstone. - .L8(6).L64(tombstone).L64(0x201_1500) - // A StartLength that is a tombstone. - .L8(7).L64(tombstone).uleb(0x100) - // A StartEnd (not ignored) - .L8(6).L64(0x201_1600).L64(0x201_1700) - - // A range end. - .L8(0) - // Some extra data. - .L32(0xffff_ffff); - size.set_const((§ion.here() - &start - 12) as u64); - - let buf = section.get_contents().unwrap(); - let debug_ranges = DebugRanges::new(&[], LittleEndian); - let debug_rnglists = DebugRngLists::new(&buf, LittleEndian); - let rnglists = RangeLists::new(debug_ranges, debug_rnglists); - let offset = RangeListsOffset((&first - &start) as usize); - let mut ranges = rnglists - .ranges(offset, encoding, 0x0100_0000, debug_addr, debug_addr_base) - .unwrap(); - - // A normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0101_0200, - end: 0x0101_0300, - })) - ); - - // A base address selection followed by a normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0400, - end: 0x0201_0500, - })) - ); - - // An empty range followed by a normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0600, - end: 0x0201_0600, - })) - ); - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0800, - end: 0x0201_0900, - })) - ); - - // A normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0a00, - end: 0x0201_0b00, - })) - ); - - // A normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0c00, - end: 0x0201_0d00, - })) - ); - - // A range that starts at 0. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0200_0000, - end: 0x0200_0001, - })) - ); - - // A range that starts and ends at 0. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0200_0000, - end: 0x0200_0000, - })) - ); - - // A range that ends at -1. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0000_0000, - end: 0xffff_ffff, - })) - ); - - // A BaseAddressx + OffsetPair - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0301_0100, - end: 0x0301_0200, - })) - ); - - // A StartxEndx - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0301_0300, - end: 0x0301_0400, - })) - ); - - // A StartxLength - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0301_0500, - end: 0x0301_0600, - })) - ); - - // A StartEnd range following the tombstones - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_1600, - end: 0x0201_1700, - })) - ); - - // A range end. - assert_eq!(ranges.next(), Ok(None)); - - // An offset at the end of buf. - let mut ranges = rnglists - .ranges( - RangeListsOffset(buf.len()), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(ranges.next(), Ok(None)); - } - - #[test] - fn test_raw_range() { - let range = RawRange { - begin: 0, - end: 0xffff_ffff, - }; - assert!(!range.is_end()); - assert!(!range.is_base_address(4)); - assert!(!range.is_base_address(8)); - - let range = RawRange { begin: 0, end: 0 }; - assert!(range.is_end()); - assert!(!range.is_base_address(4)); - assert!(!range.is_base_address(8)); - - let range = RawRange { - begin: 0xffff_ffff, - end: 0, - }; - assert!(!range.is_end()); - assert!(range.is_base_address(4)); - assert!(!range.is_base_address(8)); - - let range = RawRange { - begin: 0xffff_ffff_ffff_ffff, - end: 0, - }; - assert!(!range.is_end()); - assert!(!range.is_base_address(4)); - assert!(range.is_base_address(8)); - } - - #[test] - fn test_ranges_32() { - let tombstone = !0u32 - 1; - let start = Label::new(); - let first = Label::new(); - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - // A range before the offset. - .mark(&start) - .L32(0x10000).L32(0x10100) - .mark(&first) - // A normal range. - .L32(0x10200).L32(0x10300) - // A base address selection followed by a normal range. - .L32(0xffff_ffff).L32(0x0200_0000) - .L32(0x10400).L32(0x10500) - // An empty range followed by a normal range. - .L32(0x10600).L32(0x10600) - .L32(0x10800).L32(0x10900) - // A range that starts at 0. - .L32(0).L32(1) - // A range that ends at -1. - .L32(0xffff_ffff).L32(0x0000_0000) - .L32(0).L32(0xffff_ffff) - // A normal range with tombstone. - .L32(tombstone).L32(tombstone) - // A base address selection with tombstone followed by a normal range. - .L32(0xffff_ffff).L32(tombstone) - .L32(0x10a00).L32(0x10b00) - // A range end. - .L32(0).L32(0) - // Some extra data. - .L32(0); - - let buf = section.get_contents().unwrap(); - let debug_ranges = DebugRanges::new(&buf, LittleEndian); - let debug_rnglists = DebugRngLists::new(&[], LittleEndian); - let rnglists = RangeLists::new(debug_ranges, debug_rnglists); - let offset = RangeListsOffset((&first - &start) as usize); - let debug_addr = &DebugAddr::from(EndianSlice::new(&[], LittleEndian)); - let debug_addr_base = DebugAddrBase(0); - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut ranges = rnglists - .ranges(offset, encoding, 0x0100_0000, debug_addr, debug_addr_base) - .unwrap(); - - // A normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0101_0200, - end: 0x0101_0300, - })) - ); - - // A base address selection followed by a normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0400, - end: 0x0201_0500, - })) - ); - - // An empty range followed by a normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0600, - end: 0x0201_0600, - })) - ); - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0800, - end: 0x0201_0900, - })) - ); - - // A range that starts at 0. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0200_0000, - end: 0x0200_0001, - })) - ); - - // A range that ends at -1. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0000_0000, - end: 0xffff_ffff, - })) - ); - - // A range end. - assert_eq!(ranges.next(), Ok(None)); - - // An offset at the end of buf. - let mut ranges = rnglists - .ranges( - RangeListsOffset(buf.len()), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(ranges.next(), Ok(None)); - } - - #[test] - fn test_ranges_64() { - let tombstone = !0u64 - 1; - let start = Label::new(); - let first = Label::new(); - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - // A range before the offset. - .mark(&start) - .L64(0x10000).L64(0x10100) - .mark(&first) - // A normal range. - .L64(0x10200).L64(0x10300) - // A base address selection followed by a normal range. - .L64(0xffff_ffff_ffff_ffff).L64(0x0200_0000) - .L64(0x10400).L64(0x10500) - // An empty range followed by a normal range. - .L64(0x10600).L64(0x10600) - .L64(0x10800).L64(0x10900) - // A range that starts at 0. - .L64(0).L64(1) - // A range that ends at -1. - .L64(0xffff_ffff_ffff_ffff).L64(0x0000_0000) - .L64(0).L64(0xffff_ffff_ffff_ffff) - // A normal range with tombstone. - .L64(tombstone).L64(tombstone) - // A base address selection with tombstone followed by a normal range. - .L64(0xffff_ffff_ffff_ffff).L64(tombstone) - .L64(0x10a00).L64(0x10b00) - // A range end. - .L64(0).L64(0) - // Some extra data. - .L64(0); - - let buf = section.get_contents().unwrap(); - let debug_ranges = DebugRanges::new(&buf, LittleEndian); - let debug_rnglists = DebugRngLists::new(&[], LittleEndian); - let rnglists = RangeLists::new(debug_ranges, debug_rnglists); - let offset = RangeListsOffset((&first - &start) as usize); - let debug_addr = &DebugAddr::from(EndianSlice::new(&[], LittleEndian)); - let debug_addr_base = DebugAddrBase(0); - let encoding = Encoding { - format: Format::Dwarf64, - version: 4, - address_size: 8, - }; - let mut ranges = rnglists - .ranges(offset, encoding, 0x0100_0000, debug_addr, debug_addr_base) - .unwrap(); - - // A normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0101_0200, - end: 0x0101_0300, - })) - ); - - // A base address selection followed by a normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0400, - end: 0x0201_0500, - })) - ); - - // An empty range followed by a normal range. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0600, - end: 0x0201_0600, - })) - ); - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0201_0800, - end: 0x0201_0900, - })) - ); - - // A range that starts at 0. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0200_0000, - end: 0x0200_0001, - })) - ); - - // A range that ends at -1. - assert_eq!( - ranges.next(), - Ok(Some(Range { - begin: 0x0, - end: 0xffff_ffff_ffff_ffff, - })) - ); - - // A range end. - assert_eq!(ranges.next(), Ok(None)); - - // An offset at the end of buf. - let mut ranges = rnglists - .ranges( - RangeListsOffset(buf.len()), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(ranges.next(), Ok(None)); - } - - #[test] - fn test_ranges_invalid() { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - // An invalid range. - .L32(0x20000).L32(0x10000) - // An invalid range after wrapping. - .L32(0x20000).L32(0xff01_0000); - - let buf = section.get_contents().unwrap(); - let debug_ranges = DebugRanges::new(&buf, LittleEndian); - let debug_rnglists = DebugRngLists::new(&[], LittleEndian); - let rnglists = RangeLists::new(debug_ranges, debug_rnglists); - let debug_addr = &DebugAddr::from(EndianSlice::new(&[], LittleEndian)); - let debug_addr_base = DebugAddrBase(0); - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - - // An invalid range. - let mut ranges = rnglists - .ranges( - RangeListsOffset(0x0), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(ranges.next(), Err(Error::InvalidAddressRange)); - - // An invalid range after wrapping. - let mut ranges = rnglists - .ranges( - RangeListsOffset(0x8), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) - .unwrap(); - assert_eq!(ranges.next(), Err(Error::InvalidAddressRange)); - - // An invalid offset. - match rnglists.ranges( - RangeListsOffset(buf.len() + 1), - encoding, - 0x0100_0000, - debug_addr, - debug_addr_base, - ) { - Err(Error::UnexpectedEof(_)) => {} - otherwise => panic!("Unexpected result: {:?}", otherwise), - } - } - - #[test] - fn test_get_offset() { - for format in vec![Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version: 5, - address_size: 4, - }; - - let zero = Label::new(); - let length = Label::new(); - let start = Label::new(); - let first = Label::new(); - let end = Label::new(); - let mut section = Section::with_endian(Endian::Little) - .mark(&zero) - .initial_length(format, &length, &start) - .D16(encoding.version) - .D8(encoding.address_size) - .D8(0) - .D32(20) - .mark(&first); - for i in 0..20 { - section = section.word(format.word_size(), 1000 + i); - } - section = section.mark(&end); - length.set_const((&end - &start) as u64); - let section = section.get_contents().unwrap(); - - let debug_ranges = DebugRanges::from(EndianSlice::new(&[], LittleEndian)); - let debug_rnglists = DebugRngLists::from(EndianSlice::new(§ion, LittleEndian)); - let ranges = RangeLists::new(debug_ranges, debug_rnglists); - - let base = DebugRngListsBase((&first - &zero) as usize); - assert_eq!( - ranges.get_offset(encoding, base, DebugRngListsIndex(0)), - Ok(RangeListsOffset(base.0 + 1000)) - ); - assert_eq!( - ranges.get_offset(encoding, base, DebugRngListsIndex(19)), - Ok(RangeListsOffset(base.0 + 1019)) - ); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/str.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/str.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/str.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/str.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,321 +0,0 @@ -use crate::common::{ - DebugLineStrOffset, DebugStrOffset, DebugStrOffsetsBase, DebugStrOffsetsIndex, DwarfFileType, - Encoding, SectionId, -}; -use crate::endianity::Endianity; -use crate::read::{EndianSlice, Reader, ReaderOffset, Result, Section}; -use crate::Format; - -/// The `DebugStr` struct represents the DWARF strings -/// found in the `.debug_str` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugStr { - debug_str_section: R, -} - -impl<'input, Endian> DebugStr> -where - Endian: Endianity, -{ - /// Construct a new `DebugStr` instance from the data in the `.debug_str` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_str` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugStr, LittleEndian}; - /// - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_debug_str_section_somehow = || &buf; - /// let debug_str = DebugStr::new(read_debug_str_section_somehow(), LittleEndian); - /// ``` - pub fn new(debug_str_section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(debug_str_section, endian)) - } -} - -impl DebugStr { - /// Lookup a string from the `.debug_str` section by DebugStrOffset. - /// - /// ``` - /// use gimli::{DebugStr, DebugStrOffset, LittleEndian}; - /// - /// # let buf = [0x01, 0x02, 0x00]; - /// # let offset = DebugStrOffset(0); - /// # let read_debug_str_section_somehow = || &buf; - /// # let debug_str_offset_somehow = || offset; - /// let debug_str = DebugStr::new(read_debug_str_section_somehow(), LittleEndian); - /// println!("Found string {:?}", debug_str.get_str(debug_str_offset_somehow())); - /// ``` - pub fn get_str(&self, offset: DebugStrOffset) -> Result { - let input = &mut self.debug_str_section.clone(); - input.skip(offset.0)?; - input.read_null_terminated_slice() - } -} - -impl DebugStr { - /// Create a `DebugStr` section that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// ```rust,no_run - /// # let load_section = || unimplemented!(); - /// // Read the DWARF section into a `Vec` with whatever object loader you're using. - /// let owned_section: gimli::DebugStr> = load_section(); - /// // Create a reference to the DWARF section. - /// let section = owned_section.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> DebugStr - where - F: FnMut(&'a T) -> R, - { - borrow(&self.debug_str_section).into() - } -} - -impl Section for DebugStr { - fn id() -> SectionId { - SectionId::DebugStr - } - - fn reader(&self) -> &R { - &self.debug_str_section - } -} - -impl From for DebugStr { - fn from(debug_str_section: R) -> Self { - DebugStr { debug_str_section } - } -} - -/// The raw contents of the `.debug_str_offsets` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugStrOffsets { - section: R, -} - -impl DebugStrOffsets { - // TODO: add an iterator over the sets of entries in the section. - // This is not needed for common usage of the section though. - - /// Returns the `.debug_str` offset at the given `base` and `index`. - /// - /// A set of entries in the `.debug_str_offsets` section consists of a header - /// followed by a series of string table offsets. - /// - /// The `base` must be the `DW_AT_str_offsets_base` value from the compilation unit DIE. - /// This is an offset that points to the first entry following the header. - /// - /// The `index` is the value of a `DW_FORM_strx` attribute. - /// - /// The `format` must be the DWARF format of the compilation unit. This format must - /// match the header. However, note that we do not parse the header to validate this, - /// since locating the header is unreliable, and the GNU extensions do not emit it. - pub fn get_str_offset( - &self, - format: Format, - base: DebugStrOffsetsBase, - index: DebugStrOffsetsIndex, - ) -> Result> { - let input = &mut self.section.clone(); - input.skip(base.0)?; - input.skip(R::Offset::from_u64( - index.0.into_u64() * u64::from(format.word_size()), - )?)?; - input.read_offset(format).map(DebugStrOffset) - } -} - -impl DebugStrOffsets { - /// Create a `DebugStrOffsets` section that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// ```rust,no_run - /// # let load_section = || unimplemented!(); - /// // Read the DWARF section into a `Vec` with whatever object loader you're using. - /// let owned_section: gimli::DebugStrOffsets> = load_section(); - /// // Create a reference to the DWARF section. - /// let section = owned_section.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> DebugStrOffsets - where - F: FnMut(&'a T) -> R, - { - borrow(&self.section).into() - } -} - -impl Section for DebugStrOffsets { - fn id() -> SectionId { - SectionId::DebugStrOffsets - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for DebugStrOffsets { - fn from(section: R) -> Self { - DebugStrOffsets { section } - } -} - -impl DebugStrOffsetsBase -where - Offset: ReaderOffset, -{ - /// Returns a `DebugStrOffsetsBase` with the default value of DW_AT_str_offsets_base - /// for the given `Encoding` and `DwarfFileType`. - pub fn default_for_encoding_and_file( - encoding: Encoding, - file_type: DwarfFileType, - ) -> DebugStrOffsetsBase { - if encoding.version >= 5 && file_type == DwarfFileType::Dwo { - // In .dwo files, the compiler omits the DW_AT_str_offsets_base attribute (because there is - // only a single unit in the file) but we must skip past the header, which the attribute - // would normally do for us. - // initial_length_size + version + 2 bytes of padding. - DebugStrOffsetsBase(Offset::from_u8( - encoding.format.initial_length_size() + 2 + 2, - )) - } else { - DebugStrOffsetsBase(Offset::from_u8(0)) - } - } -} - -/// The `DebugLineStr` struct represents the DWARF strings -/// found in the `.debug_line_str` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugLineStr { - section: R, -} - -impl<'input, Endian> DebugLineStr> -where - Endian: Endianity, -{ - /// Construct a new `DebugLineStr` instance from the data in the `.debug_line_str` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_line_str` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugLineStr, LittleEndian}; - /// - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_debug_line_str_section_somehow = || &buf; - /// let debug_str = DebugLineStr::new(read_debug_line_str_section_somehow(), LittleEndian); - /// ``` - pub fn new(debug_line_str_section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(debug_line_str_section, endian)) - } -} - -impl DebugLineStr { - /// Lookup a string from the `.debug_line_str` section by DebugLineStrOffset. - pub fn get_str(&self, offset: DebugLineStrOffset) -> Result { - let input = &mut self.section.clone(); - input.skip(offset.0)?; - input.read_null_terminated_slice() - } -} - -impl DebugLineStr { - /// Create a `DebugLineStr` section that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// ```rust,no_run - /// # let load_section = || unimplemented!(); - /// // Read the DWARF section into a `Vec` with whatever object loader you're using. - /// let owned_section: gimli::DebugLineStr> = load_section(); - /// // Create a reference to the DWARF section. - /// let section = owned_section.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> DebugLineStr - where - F: FnMut(&'a T) -> R, - { - borrow(&self.section).into() - } -} - -impl Section for DebugLineStr { - fn id() -> SectionId { - SectionId::DebugLineStr - } - - fn reader(&self) -> &R { - &self.section - } -} - -impl From for DebugLineStr { - fn from(section: R) -> Self { - DebugLineStr { section } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_util::GimliSectionMethods; - use crate::LittleEndian; - use test_assembler::{Endian, Label, LabelMaker, Section}; - - #[test] - fn test_get_str_offset() { - for format in vec![Format::Dwarf32, Format::Dwarf64] { - let zero = Label::new(); - let length = Label::new(); - let start = Label::new(); - let first = Label::new(); - let end = Label::new(); - let mut section = Section::with_endian(Endian::Little) - .mark(&zero) - .initial_length(format, &length, &start) - .D16(5) - .D16(0) - .mark(&first); - for i in 0..20 { - section = section.word(format.word_size(), 1000 + i); - } - section = section.mark(&end); - length.set_const((&end - &start) as u64); - - let section = section.get_contents().unwrap(); - let debug_str_offsets = DebugStrOffsets::from(EndianSlice::new(§ion, LittleEndian)); - let base = DebugStrOffsetsBase((&first - &zero) as usize); - - assert_eq!( - debug_str_offsets.get_str_offset(format, base, DebugStrOffsetsIndex(0)), - Ok(DebugStrOffset(1000)) - ); - assert_eq!( - debug_str_offsets.get_str_offset(format, base, DebugStrOffsetsIndex(19)), - Ok(DebugStrOffset(1019)) - ); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/unit.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/unit.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/unit.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/unit.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,6139 +0,0 @@ -//! Functions for parsing DWARF `.debug_info` and `.debug_types` sections. - -use core::cell::Cell; -use core::ops::{Range, RangeFrom, RangeTo}; -use core::{u16, u8}; - -use crate::common::{ - DebugAbbrevOffset, DebugAddrBase, DebugAddrIndex, DebugInfoOffset, DebugLineOffset, - DebugLineStrOffset, DebugLocListsBase, DebugLocListsIndex, DebugMacinfoOffset, - DebugMacroOffset, DebugRngListsBase, DebugRngListsIndex, DebugStrOffset, DebugStrOffsetsBase, - DebugStrOffsetsIndex, DebugTypeSignature, DebugTypesOffset, DwoId, Encoding, Format, - LocationListsOffset, RawRangeListsOffset, SectionId, UnitSectionOffset, -}; -use crate::constants; -use crate::endianity::Endianity; -use crate::read::abbrev::get_attribute_size; -use crate::read::{ - Abbreviation, Abbreviations, AttributeSpecification, DebugAbbrev, DebugStr, EndianSlice, Error, - Expression, Reader, ReaderOffset, Result, Section, UnitOffset, -}; - -impl DebugTypesOffset { - /// Convert an offset to be relative to the start of the given unit, - /// instead of relative to the start of the .debug_types section. - /// Returns `None` if the offset is not within the unit entries. - pub fn to_unit_offset(&self, unit: &UnitHeader) -> Option> - where - R: Reader, - { - let unit_offset = unit.offset().as_debug_types_offset()?; - let offset = UnitOffset(self.0.checked_sub(unit_offset.0)?); - if !unit.is_valid_offset(offset) { - return None; - } - Some(offset) - } -} - -impl DebugInfoOffset { - /// Convert an offset to be relative to the start of the given unit, - /// instead of relative to the start of the .debug_info section. - /// Returns `None` if the offset is not within this unit entries. - pub fn to_unit_offset(&self, unit: &UnitHeader) -> Option> - where - R: Reader, - { - let unit_offset = unit.offset().as_debug_info_offset()?; - let offset = UnitOffset(self.0.checked_sub(unit_offset.0)?); - if !unit.is_valid_offset(offset) { - return None; - } - Some(offset) - } -} - -impl UnitOffset { - /// Convert an offset to be relative to the start of the .debug_info section, - /// instead of relative to the start of the given unit. Returns None if the - /// provided unit lives in the .debug_types section. - pub fn to_debug_info_offset(&self, unit: &UnitHeader) -> Option> - where - R: Reader, - { - let unit_offset = unit.offset().as_debug_info_offset()?; - Some(DebugInfoOffset(unit_offset.0 + self.0)) - } - - /// Convert an offset to be relative to the start of the .debug_types section, - /// instead of relative to the start of the given unit. Returns None if the - /// provided unit lives in the .debug_info section. - pub fn to_debug_types_offset(&self, unit: &UnitHeader) -> Option> - where - R: Reader, - { - let unit_offset = unit.offset().as_debug_types_offset()?; - Some(DebugTypesOffset(unit_offset.0 + self.0)) - } -} - -/// The `DebugInfo` struct represents the DWARF debugging information found in -/// the `.debug_info` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugInfo { - debug_info_section: R, -} - -impl<'input, Endian> DebugInfo> -where - Endian: Endianity, -{ - /// Construct a new `DebugInfo` instance from the data in the `.debug_info` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_info` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugInfo, LittleEndian}; - /// - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_debug_info_section_somehow = || &buf; - /// let debug_info = DebugInfo::new(read_debug_info_section_somehow(), LittleEndian); - /// ``` - pub fn new(debug_info_section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(debug_info_section, endian)) - } -} - -impl DebugInfo { - /// Iterate the units in this `.debug_info` section. - /// - /// ``` - /// use gimli::{DebugInfo, LittleEndian}; - /// - /// # let buf = []; - /// # let read_debug_info_section_somehow = || &buf; - /// let debug_info = DebugInfo::new(read_debug_info_section_somehow(), LittleEndian); - /// - /// let mut iter = debug_info.units(); - /// while let Some(unit) = iter.next().unwrap() { - /// println!("unit's length is {}", unit.unit_length()); - /// } - /// ``` - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - pub fn units(&self) -> DebugInfoUnitHeadersIter { - DebugInfoUnitHeadersIter { - input: self.debug_info_section.clone(), - offset: DebugInfoOffset(R::Offset::from_u8(0)), - } - } - - /// Get the UnitHeader located at offset from this .debug_info section. - /// - /// - pub fn header_from_offset(&self, offset: DebugInfoOffset) -> Result> { - let input = &mut self.debug_info_section.clone(); - input.skip(offset.0)?; - parse_unit_header(input, offset.into()) - } -} - -impl DebugInfo { - /// Create a `DebugInfo` section that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// ```rust,no_run - /// # let load_section = || unimplemented!(); - /// // Read the DWARF section into a `Vec` with whatever object loader you're using. - /// let owned_section: gimli::DebugInfo> = load_section(); - /// // Create a reference to the DWARF section. - /// let section = owned_section.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> DebugInfo - where - F: FnMut(&'a T) -> R, - { - borrow(&self.debug_info_section).into() - } -} - -impl Section for DebugInfo { - fn id() -> SectionId { - SectionId::DebugInfo - } - - fn reader(&self) -> &R { - &self.debug_info_section - } -} - -impl From for DebugInfo { - fn from(debug_info_section: R) -> Self { - DebugInfo { debug_info_section } - } -} - -/// An iterator over the units of a .debug_info section. -/// -/// See the [documentation on -/// `DebugInfo::units`](./struct.DebugInfo.html#method.units) for more detail. -#[derive(Clone, Debug)] -pub struct DebugInfoUnitHeadersIter { - input: R, - offset: DebugInfoOffset, -} - -impl DebugInfoUnitHeadersIter { - /// Advance the iterator to the next unit header. - pub fn next(&mut self) -> Result>> { - if self.input.is_empty() { - Ok(None) - } else { - let len = self.input.len(); - match parse_unit_header(&mut self.input, self.offset.into()) { - Ok(header) => { - self.offset.0 += len - self.input.len(); - Ok(Some(header)) - } - Err(e) => { - self.input.empty(); - Err(e) - } - } - } - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for DebugInfoUnitHeadersIter { - type Item = UnitHeader; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - DebugInfoUnitHeadersIter::next(self) - } -} - -/// Parse the unit type from the unit header. -fn parse_unit_type(input: &mut R) -> Result { - let val = input.read_u8()?; - Ok(constants::DwUt(val)) -} - -/// Parse the `debug_abbrev_offset` in the compilation unit header. -fn parse_debug_abbrev_offset( - input: &mut R, - format: Format, -) -> Result> { - input.read_offset(format).map(DebugAbbrevOffset) -} - -/// Parse the `debug_info_offset` in the arange header. -pub(crate) fn parse_debug_info_offset( - input: &mut R, - format: Format, -) -> Result> { - input.read_offset(format).map(DebugInfoOffset) -} - -/// This enum specifies the type of the unit and any type -/// specific data carried in the header (e.g. the type -/// signature/type offset of a type unit). -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum UnitType -where - Offset: ReaderOffset, -{ - /// In DWARF5, a unit with type `DW_UT_compile`. In previous DWARF versions, - /// any unit appearing in the .debug_info section. - Compilation, - /// In DWARF5, a unit with type `DW_UT_type`. In DWARF4, any unit appearing - /// in the .debug_types section. - Type { - /// The unique type signature for this type unit. - type_signature: DebugTypeSignature, - /// The offset within this type unit where the type is defined. - type_offset: UnitOffset, - }, - /// A unit with type `DW_UT_partial`. The root DIE of this unit should be a - /// `DW_TAG_partial_unit`. - Partial, - /// A unit with type `DW_UT_skeleton`. The enclosed dwo_id can be used to - /// link this with the corresponding `SplitCompilation` unit in a dwo file. - /// NB: The non-standard GNU split DWARF extension to DWARF 4 will instead - /// be a `Compilation` unit with the dwo_id present as an attribute on the - /// root DIE. - Skeleton(DwoId), - /// A unit with type `DW_UT_split_compile`. The enclosed dwo_id can be used to - /// link this with the corresponding `Skeleton` unit in the original binary. - /// NB: The non-standard GNU split DWARF extension to DWARF 4 will instead - /// be a `Compilation` unit with the dwo_id present as an attribute on the - /// root DIE. - SplitCompilation(DwoId), - /// A unit with type `DW_UT_split_type`. A split type unit is identical to a - /// conventional type unit except for the section in which it appears. - SplitType { - /// The unique type signature for this type unit. - type_signature: DebugTypeSignature, - /// The offset within this type unit where the type is defined. - type_offset: UnitOffset, - }, -} - -impl UnitType -where - Offset: ReaderOffset, -{ - // TODO: This will be used by the DWARF writing code once it - // supports unit types other than simple compilation units. - #[allow(unused)] - pub(crate) fn dw_ut(&self) -> constants::DwUt { - match self { - UnitType::Compilation => constants::DW_UT_compile, - UnitType::Type { .. } => constants::DW_UT_type, - UnitType::Partial => constants::DW_UT_partial, - UnitType::Skeleton(_) => constants::DW_UT_skeleton, - UnitType::SplitCompilation(_) => constants::DW_UT_split_compile, - UnitType::SplitType { .. } => constants::DW_UT_split_type, - } - } -} - -/// The common fields for the headers of compilation units and -/// type units. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct UnitHeader::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - encoding: Encoding, - unit_length: Offset, - unit_type: UnitType, - debug_abbrev_offset: DebugAbbrevOffset, - unit_offset: UnitSectionOffset, - entries_buf: R, -} - -/// Static methods. -impl UnitHeader -where - R: Reader, - Offset: ReaderOffset, -{ - /// Construct a new `UnitHeader`. - pub fn new( - encoding: Encoding, - unit_length: Offset, - unit_type: UnitType, - debug_abbrev_offset: DebugAbbrevOffset, - unit_offset: UnitSectionOffset, - entries_buf: R, - ) -> Self { - UnitHeader { - encoding, - unit_length, - unit_type, - debug_abbrev_offset, - unit_offset, - entries_buf, - } - } -} - -/// Instance methods. -impl UnitHeader -where - R: Reader, - Offset: ReaderOffset, -{ - /// Get the offset of this unit within its section. - pub fn offset(&self) -> UnitSectionOffset { - self.unit_offset - } - - /// Return the serialized size of the common unit header for the given - /// DWARF format. - pub fn size_of_header(&self) -> usize { - let unit_length_size = self.encoding.format.initial_length_size() as usize; - let version_size = 2; - let debug_abbrev_offset_size = self.encoding.format.word_size() as usize; - let address_size_size = 1; - let unit_type_size = if self.encoding.version == 5 { 1 } else { 0 }; - let type_specific_size = match self.unit_type { - UnitType::Compilation | UnitType::Partial => 0, - UnitType::Type { .. } | UnitType::SplitType { .. } => { - let type_signature_size = 8; - let type_offset_size = self.encoding.format.word_size() as usize; - type_signature_size + type_offset_size - } - UnitType::Skeleton(_) | UnitType::SplitCompilation(_) => 8, - }; - - unit_length_size - + version_size - + debug_abbrev_offset_size - + address_size_size - + unit_type_size - + type_specific_size - } - - /// Get the length of the debugging info for this compilation unit, not - /// including the byte length of the encoded length itself. - pub fn unit_length(&self) -> Offset { - self.unit_length - } - - /// Get the length of the debugging info for this compilation unit, - /// including the byte length of the encoded length itself. - pub fn length_including_self(&self) -> Offset { - Offset::from_u8(self.format().initial_length_size()) + self.unit_length - } - - /// Return the encoding parameters for this unit. - pub fn encoding(&self) -> Encoding { - self.encoding - } - - /// Get the DWARF version of the debugging info for this compilation unit. - pub fn version(&self) -> u16 { - self.encoding.version - } - - /// Get the UnitType of this unit. - pub fn type_(&self) -> UnitType { - self.unit_type - } - - /// The offset into the `.debug_abbrev` section for this compilation unit's - /// debugging information entries' abbreviations. - pub fn debug_abbrev_offset(&self) -> DebugAbbrevOffset { - self.debug_abbrev_offset - } - - /// The size of addresses (in bytes) in this compilation unit. - pub fn address_size(&self) -> u8 { - self.encoding.address_size - } - - /// Whether this compilation unit is encoded in 64- or 32-bit DWARF. - pub fn format(&self) -> Format { - self.encoding.format - } - - /// The serialized size of the header for this compilation unit. - pub fn header_size(&self) -> Offset { - self.length_including_self() - self.entries_buf.len() - } - - pub(crate) fn is_valid_offset(&self, offset: UnitOffset) -> bool { - let size_of_header = self.header_size(); - if offset.0 < size_of_header { - return false; - } - - let relative_to_entries_buf = offset.0 - size_of_header; - relative_to_entries_buf < self.entries_buf.len() - } - - /// Get the underlying bytes for the supplied range. - pub fn range(&self, idx: Range>) -> Result { - if !self.is_valid_offset(idx.start) { - return Err(Error::OffsetOutOfBounds); - } - if !self.is_valid_offset(idx.end) { - return Err(Error::OffsetOutOfBounds); - } - assert!(idx.start <= idx.end); - let size_of_header = self.header_size(); - let start = idx.start.0 - size_of_header; - let end = idx.end.0 - size_of_header; - let mut input = self.entries_buf.clone(); - input.skip(start)?; - input.truncate(end - start)?; - Ok(input) - } - - /// Get the underlying bytes for the supplied range. - pub fn range_from(&self, idx: RangeFrom>) -> Result { - if !self.is_valid_offset(idx.start) { - return Err(Error::OffsetOutOfBounds); - } - let start = idx.start.0 - self.header_size(); - let mut input = self.entries_buf.clone(); - input.skip(start)?; - Ok(input) - } - - /// Get the underlying bytes for the supplied range. - pub fn range_to(&self, idx: RangeTo>) -> Result { - if !self.is_valid_offset(idx.end) { - return Err(Error::OffsetOutOfBounds); - } - let end = idx.end.0 - self.header_size(); - let mut input = self.entries_buf.clone(); - input.truncate(end)?; - Ok(input) - } - - /// Read the `DebuggingInformationEntry` at the given offset. - pub fn entry<'me, 'abbrev>( - &'me self, - abbreviations: &'abbrev Abbreviations, - offset: UnitOffset, - ) -> Result> { - let mut input = self.range_from(offset..)?; - let entry = DebuggingInformationEntry::parse(&mut input, self, abbreviations)?; - entry.ok_or(Error::NoEntryAtGivenOffset) - } - - /// Navigate this unit's `DebuggingInformationEntry`s. - pub fn entries<'me, 'abbrev>( - &'me self, - abbreviations: &'abbrev Abbreviations, - ) -> EntriesCursor<'abbrev, 'me, R> { - EntriesCursor { - unit: self, - input: self.entries_buf.clone(), - abbreviations, - cached_current: None, - delta_depth: 0, - } - } - - /// Navigate this compilation unit's `DebuggingInformationEntry`s - /// starting at the given offset. - pub fn entries_at_offset<'me, 'abbrev>( - &'me self, - abbreviations: &'abbrev Abbreviations, - offset: UnitOffset, - ) -> Result> { - let input = self.range_from(offset..)?; - Ok(EntriesCursor { - unit: self, - input, - abbreviations, - cached_current: None, - delta_depth: 0, - }) - } - - /// Navigate this unit's `DebuggingInformationEntry`s as a tree - /// starting at the given offset. - pub fn entries_tree<'me, 'abbrev>( - &'me self, - abbreviations: &'abbrev Abbreviations, - offset: Option>, - ) -> Result> { - let input = match offset { - Some(offset) => self.range_from(offset..)?, - None => self.entries_buf.clone(), - }; - Ok(EntriesTree::new(input, self, abbreviations)) - } - - /// Read the raw data that defines the Debugging Information Entries. - pub fn entries_raw<'me, 'abbrev>( - &'me self, - abbreviations: &'abbrev Abbreviations, - offset: Option>, - ) -> Result> { - let input = match offset { - Some(offset) => self.range_from(offset..)?, - None => self.entries_buf.clone(), - }; - Ok(EntriesRaw { - input, - unit: self, - abbreviations, - depth: 0, - }) - } - - /// Parse this unit's abbreviations. - pub fn abbreviations(&self, debug_abbrev: &DebugAbbrev) -> Result { - debug_abbrev.abbreviations(self.debug_abbrev_offset()) - } -} - -/// Parse a unit header. -fn parse_unit_header( - input: &mut R, - unit_offset: UnitSectionOffset, -) -> Result> -where - R: Reader, - Offset: ReaderOffset, -{ - let (unit_length, format) = input.read_initial_length()?; - let mut rest = input.split(unit_length)?; - - let version = rest.read_u16()?; - let abbrev_offset; - let address_size; - let unit_type; - // DWARF 1 was very different, and is obsolete, so isn't supported by this - // reader. - if 2 <= version && version <= 4 { - abbrev_offset = parse_debug_abbrev_offset(&mut rest, format)?; - address_size = rest.read_u8()?; - // Before DWARF5, all units in the .debug_info section are compilation - // units, and all units in the .debug_types section are type units. - unit_type = match unit_offset { - UnitSectionOffset::DebugInfoOffset(_) => constants::DW_UT_compile, - UnitSectionOffset::DebugTypesOffset(_) => constants::DW_UT_type, - }; - } else if version == 5 { - unit_type = parse_unit_type(&mut rest)?; - address_size = rest.read_u8()?; - abbrev_offset = parse_debug_abbrev_offset(&mut rest, format)?; - } else { - return Err(Error::UnknownVersion(u64::from(version))); - } - let encoding = Encoding { - format, - version, - address_size, - }; - - // Parse any data specific to this type of unit. - let unit_type = match unit_type { - constants::DW_UT_compile => UnitType::Compilation, - constants::DW_UT_type => { - let type_signature = parse_type_signature(&mut rest)?; - let type_offset = parse_type_offset(&mut rest, format)?; - UnitType::Type { - type_signature, - type_offset, - } - } - constants::DW_UT_partial => UnitType::Partial, - constants::DW_UT_skeleton => { - let dwo_id = parse_dwo_id(&mut rest)?; - UnitType::Skeleton(dwo_id) - } - constants::DW_UT_split_compile => { - let dwo_id = parse_dwo_id(&mut rest)?; - UnitType::SplitCompilation(dwo_id) - } - constants::DW_UT_split_type => { - let type_signature = parse_type_signature(&mut rest)?; - let type_offset = parse_type_offset(&mut rest, format)?; - UnitType::SplitType { - type_signature, - type_offset, - } - } - _ => return Err(Error::UnsupportedUnitType), - }; - - Ok(UnitHeader::new( - encoding, - unit_length, - unit_type, - abbrev_offset, - unit_offset, - rest, - )) -} - -/// Parse a dwo_id from a header -fn parse_dwo_id(input: &mut R) -> Result { - Ok(DwoId(input.read_u64()?)) -} - -/// A Debugging Information Entry (DIE). -/// -/// DIEs have a set of attributes and optionally have children DIEs as well. -#[derive(Clone, Debug)] -pub struct DebuggingInformationEntry<'abbrev, 'unit, R, Offset = ::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - offset: UnitOffset, - attrs_slice: R, - attrs_len: Cell>, - abbrev: &'abbrev Abbreviation, - unit: &'unit UnitHeader, -} - -impl<'abbrev, 'unit, R, Offset> DebuggingInformationEntry<'abbrev, 'unit, R, Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - /// Construct a new `DebuggingInformationEntry`. - pub fn new( - offset: UnitOffset, - attrs_slice: R, - abbrev: &'abbrev Abbreviation, - unit: &'unit UnitHeader, - ) -> Self { - DebuggingInformationEntry { - offset, - attrs_slice, - attrs_len: Cell::new(None), - abbrev, - unit, - } - } - - /// Get this entry's code. - pub fn code(&self) -> u64 { - self.abbrev.code() - } - - /// Get this entry's offset. - pub fn offset(&self) -> UnitOffset { - self.offset - } - - /// Get this entry's `DW_TAG_whatever` tag. - /// - /// ``` - /// # use gimli::{DebugAbbrev, DebugInfo, LittleEndian}; - /// # let info_buf = [ - /// # // Comilation unit header - /// # - /// # // 32-bit unit length = 12 - /// # 0x0c, 0x00, 0x00, 0x00, - /// # // Version 4 - /// # 0x04, 0x00, - /// # // debug_abbrev_offset - /// # 0x00, 0x00, 0x00, 0x00, - /// # // Address size - /// # 0x04, - /// # - /// # // DIEs - /// # - /// # // Abbreviation code - /// # 0x01, - /// # // Attribute of form DW_FORM_string = "foo\0" - /// # 0x66, 0x6f, 0x6f, 0x00, - /// # ]; - /// # let debug_info = DebugInfo::new(&info_buf, LittleEndian); - /// # let abbrev_buf = [ - /// # // Code - /// # 0x01, - /// # // DW_TAG_subprogram - /// # 0x2e, - /// # // DW_CHILDREN_no - /// # 0x00, - /// # // Begin attributes - /// # // Attribute name = DW_AT_name - /// # 0x03, - /// # // Attribute form = DW_FORM_string - /// # 0x08, - /// # // End attributes - /// # 0x00, - /// # 0x00, - /// # // Null terminator - /// # 0x00 - /// # ]; - /// # let debug_abbrev = DebugAbbrev::new(&abbrev_buf, LittleEndian); - /// # let unit = debug_info.units().next().unwrap().unwrap(); - /// # let abbrevs = unit.abbreviations(&debug_abbrev).unwrap(); - /// # let mut cursor = unit.entries(&abbrevs); - /// # let (_, entry) = cursor.next_dfs().unwrap().unwrap(); - /// # let mut get_some_entry = || entry; - /// let entry = get_some_entry(); - /// - /// match entry.tag() { - /// gimli::DW_TAG_subprogram => - /// println!("this entry contains debug info about a function"), - /// gimli::DW_TAG_inlined_subroutine => - /// println!("this entry contains debug info about a particular instance of inlining"), - /// gimli::DW_TAG_variable => - /// println!("this entry contains debug info about a local variable"), - /// gimli::DW_TAG_formal_parameter => - /// println!("this entry contains debug info about a function parameter"), - /// otherwise => - /// println!("this entry is some other kind of data: {:?}", otherwise), - /// }; - /// ``` - pub fn tag(&self) -> constants::DwTag { - self.abbrev.tag() - } - - /// Return true if this entry's type can have children, false otherwise. - pub fn has_children(&self) -> bool { - self.abbrev.has_children() - } - - /// Iterate over this entry's set of attributes. - /// - /// ``` - /// use gimli::{DebugAbbrev, DebugInfo, LittleEndian}; - /// - /// // Read the `.debug_info` section. - /// - /// # let info_buf = [ - /// # // Comilation unit header - /// # - /// # // 32-bit unit length = 12 - /// # 0x0c, 0x00, 0x00, 0x00, - /// # // Version 4 - /// # 0x04, 0x00, - /// # // debug_abbrev_offset - /// # 0x00, 0x00, 0x00, 0x00, - /// # // Address size - /// # 0x04, - /// # - /// # // DIEs - /// # - /// # // Abbreviation code - /// # 0x01, - /// # // Attribute of form DW_FORM_string = "foo\0" - /// # 0x66, 0x6f, 0x6f, 0x00, - /// # ]; - /// # let read_debug_info_section_somehow = || &info_buf; - /// let debug_info = DebugInfo::new(read_debug_info_section_somehow(), LittleEndian); - /// - /// // Get the data about the first compilation unit out of the `.debug_info`. - /// - /// let unit = debug_info.units().next() - /// .expect("Should have at least one compilation unit") - /// .expect("and it should parse ok"); - /// - /// // Read the `.debug_abbrev` section and parse the - /// // abbreviations for our compilation unit. - /// - /// # let abbrev_buf = [ - /// # // Code - /// # 0x01, - /// # // DW_TAG_subprogram - /// # 0x2e, - /// # // DW_CHILDREN_no - /// # 0x00, - /// # // Begin attributes - /// # // Attribute name = DW_AT_name - /// # 0x03, - /// # // Attribute form = DW_FORM_string - /// # 0x08, - /// # // End attributes - /// # 0x00, - /// # 0x00, - /// # // Null terminator - /// # 0x00 - /// # ]; - /// # let read_debug_abbrev_section_somehow = || &abbrev_buf; - /// let debug_abbrev = DebugAbbrev::new(read_debug_abbrev_section_somehow(), LittleEndian); - /// let abbrevs = unit.abbreviations(&debug_abbrev).unwrap(); - /// - /// // Get the first entry from that compilation unit. - /// - /// let mut cursor = unit.entries(&abbrevs); - /// let (_, entry) = cursor.next_dfs() - /// .expect("Should parse next entry") - /// .expect("Should have at least one entry"); - /// - /// // Finally, print the first entry's attributes. - /// - /// let mut attrs = entry.attrs(); - /// while let Some(attr) = attrs.next().unwrap() { - /// println!("Attribute name = {:?}", attr.name()); - /// println!("Attribute value = {:?}", attr.value()); - /// } - /// ``` - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - pub fn attrs<'me>(&'me self) -> AttrsIter<'abbrev, 'me, 'unit, R> { - AttrsIter { - input: self.attrs_slice.clone(), - attributes: self.abbrev.attributes(), - entry: self, - } - } - - /// Find the first attribute in this entry which has the given name, - /// and return it. Returns `Ok(None)` if no attribute is found. - pub fn attr(&self, name: constants::DwAt) -> Result>> { - let mut attrs = self.attrs(); - while let Some(attr) = attrs.next()? { - if attr.name() == name { - return Ok(Some(attr)); - } - } - Ok(None) - } - - /// Find the first attribute in this entry which has the given name, - /// and return its raw value. Returns `Ok(None)` if no attribute is found. - pub fn attr_value_raw(&self, name: constants::DwAt) -> Result>> { - self.attr(name) - .map(|attr| attr.map(|attr| attr.raw_value())) - } - - /// Find the first attribute in this entry which has the given name, - /// and return its normalized value. Returns `Ok(None)` if no - /// attribute is found. - pub fn attr_value(&self, name: constants::DwAt) -> Result>> { - self.attr(name).map(|attr| attr.map(|attr| attr.value())) - } - - /// Return the input buffer after the last attribute. - #[inline(always)] - fn after_attrs(&self) -> Result { - if let Some(attrs_len) = self.attrs_len.get() { - let mut input = self.attrs_slice.clone(); - input.skip(attrs_len)?; - Ok(input) - } else { - let mut attrs = self.attrs(); - while attrs.next()?.is_some() {} - Ok(attrs.input) - } - } - - /// Use the `DW_AT_sibling` attribute to find the input buffer for the - /// next sibling. Returns `None` if the attribute is missing or invalid. - fn sibling(&self) -> Option { - let attr = self.attr_value(constants::DW_AT_sibling); - if let Ok(Some(AttributeValue::UnitRef(offset))) = attr { - if offset.0 > self.offset.0 { - if let Ok(input) = self.unit.range_from(offset..) { - return Some(input); - } - } - } - None - } - - /// Parse an entry. Returns `Ok(None)` for null entries. - #[inline(always)] - fn parse( - input: &mut R, - unit: &'unit UnitHeader, - abbreviations: &'abbrev Abbreviations, - ) -> Result> { - let offset = unit.header_size() + input.offset_from(&unit.entries_buf); - let code = input.read_uleb128()?; - if code == 0 { - return Ok(None); - }; - let abbrev = abbreviations.get(code).ok_or(Error::UnknownAbbreviation)?; - Ok(Some(DebuggingInformationEntry { - offset: UnitOffset(offset), - attrs_slice: input.clone(), - attrs_len: Cell::new(None), - abbrev, - unit, - })) - } -} - -/// The value of an attribute in a `DebuggingInformationEntry`. -// -// Set the discriminant size so that all variants use the same alignment -// for their data. This gives better code generation in `parse_attribute`. -#[repr(u64)] -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum AttributeValue::Offset> -where - R: Reader, - Offset: ReaderOffset, -{ - /// "Refers to some location in the address space of the described program." - Addr(u64), - - /// A slice of an arbitrary number of bytes. - Block(R), - - /// A one byte constant data value. How to interpret the byte depends on context. - /// - /// From section 7 of the standard: "Depending on context, it may be a - /// signed integer, an unsigned integer, a floating-point constant, or - /// anything else." - Data1(u8), - - /// A two byte constant data value. How to interpret the bytes depends on context. - /// - /// These bytes have been converted from `R::Endian`. This may need to be reversed - /// if this was not required. - /// - /// From section 7 of the standard: "Depending on context, it may be a - /// signed integer, an unsigned integer, a floating-point constant, or - /// anything else." - Data2(u16), - - /// A four byte constant data value. How to interpret the bytes depends on context. - /// - /// These bytes have been converted from `R::Endian`. This may need to be reversed - /// if this was not required. - /// - /// From section 7 of the standard: "Depending on context, it may be a - /// signed integer, an unsigned integer, a floating-point constant, or - /// anything else." - Data4(u32), - - /// An eight byte constant data value. How to interpret the bytes depends on context. - /// - /// These bytes have been converted from `R::Endian`. This may need to be reversed - /// if this was not required. - /// - /// From section 7 of the standard: "Depending on context, it may be a - /// signed integer, an unsigned integer, a floating-point constant, or - /// anything else." - Data8(u64), - - /// A signed integer constant. - Sdata(i64), - - /// An unsigned integer constant. - Udata(u64), - - /// "The information bytes contain a DWARF expression (see Section 2.5) or - /// location description (see Section 2.6)." - Exprloc(Expression), - - /// A boolean that indicates presence or absence of the attribute. - Flag(bool), - - /// An offset into another section. Which section this is an offset into - /// depends on context. - SecOffset(Offset), - - /// An offset to a set of addresses in the `.debug_addr` section. - DebugAddrBase(DebugAddrBase), - - /// An index into a set of addresses in the `.debug_addr` section. - DebugAddrIndex(DebugAddrIndex), - - /// An offset into the current compilation unit. - UnitRef(UnitOffset), - - /// An offset into the current `.debug_info` section, but possibly a - /// different compilation unit from the current one. - DebugInfoRef(DebugInfoOffset), - - /// An offset into the `.debug_info` section of the supplementary object file. - DebugInfoRefSup(DebugInfoOffset), - - /// An offset into the `.debug_line` section. - DebugLineRef(DebugLineOffset), - - /// An offset into either the `.debug_loc` section or the `.debug_loclists` section. - LocationListsRef(LocationListsOffset), - - /// An offset to a set of offsets in the `.debug_loclists` section. - DebugLocListsBase(DebugLocListsBase), - - /// An index into a set of offsets in the `.debug_loclists` section. - DebugLocListsIndex(DebugLocListsIndex), - - /// An offset into the `.debug_macinfo` section. - DebugMacinfoRef(DebugMacinfoOffset), - - /// An offset into the `.debug_macro` section. - DebugMacroRef(DebugMacroOffset), - - /// An offset into the `.debug_ranges` section. - RangeListsRef(RawRangeListsOffset), - - /// An offset to a set of offsets in the `.debug_rnglists` section. - DebugRngListsBase(DebugRngListsBase), - - /// An index into a set of offsets in the `.debug_rnglists` section. - DebugRngListsIndex(DebugRngListsIndex), - - /// A type signature. - DebugTypesRef(DebugTypeSignature), - - /// An offset into the `.debug_str` section. - DebugStrRef(DebugStrOffset), - - /// An offset into the `.debug_str` section of the supplementary object file. - DebugStrRefSup(DebugStrOffset), - - /// An offset to a set of entries in the `.debug_str_offsets` section. - DebugStrOffsetsBase(DebugStrOffsetsBase), - - /// An index into a set of entries in the `.debug_str_offsets` section. - DebugStrOffsetsIndex(DebugStrOffsetsIndex), - - /// An offset into the `.debug_line_str` section. - DebugLineStrRef(DebugLineStrOffset), - - /// A slice of bytes representing a string. Does not include a final null byte. - /// Not guaranteed to be UTF-8 or anything like that. - String(R), - - /// The value of a `DW_AT_encoding` attribute. - Encoding(constants::DwAte), - - /// The value of a `DW_AT_decimal_sign` attribute. - DecimalSign(constants::DwDs), - - /// The value of a `DW_AT_endianity` attribute. - Endianity(constants::DwEnd), - - /// The value of a `DW_AT_accessibility` attribute. - Accessibility(constants::DwAccess), - - /// The value of a `DW_AT_visibility` attribute. - Visibility(constants::DwVis), - - /// The value of a `DW_AT_virtuality` attribute. - Virtuality(constants::DwVirtuality), - - /// The value of a `DW_AT_language` attribute. - Language(constants::DwLang), - - /// The value of a `DW_AT_address_class` attribute. - AddressClass(constants::DwAddr), - - /// The value of a `DW_AT_identifier_case` attribute. - IdentifierCase(constants::DwId), - - /// The value of a `DW_AT_calling_convention` attribute. - CallingConvention(constants::DwCc), - - /// The value of a `DW_AT_inline` attribute. - Inline(constants::DwInl), - - /// The value of a `DW_AT_ordering` attribute. - Ordering(constants::DwOrd), - - /// An index into the filename entries from the line number information - /// table for the compilation unit containing this value. - FileIndex(u64), - - /// An implementation-defined identifier uniquely identifying a compilation - /// unit. - DwoId(DwoId), -} - -/// An attribute in a `DebuggingInformationEntry`, consisting of a name and -/// associated value. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub struct Attribute { - name: constants::DwAt, - value: AttributeValue, -} - -impl Attribute { - /// Get this attribute's name. - pub fn name(&self) -> constants::DwAt { - self.name - } - - /// Get this attribute's raw value. - pub fn raw_value(&self) -> AttributeValue { - self.value.clone() - } - - /// Get this attribute's normalized value. - /// - /// Attribute values can potentially be encoded in multiple equivalent forms, - /// and may have special meaning depending on the attribute name. This method - /// converts the attribute value to a normalized form based on the attribute - /// name. - /// - /// See "Table 7.5: Attribute encodings" and "Table 7.6: Attribute form encodings". - pub fn value(&self) -> AttributeValue { - // Table 7.5 shows the possible attribute classes for each name. - // Table 7.6 shows the possible attribute classes for each form. - // For each attribute name, we need to match on the form, and - // convert it to one of the classes that is allowed for both - // the name and the form. - // - // The individual class conversions rarely vary for each name, - // so for each class conversion we define a macro that matches - // on the allowed forms for that class. - // - // For some classes, we don't need to do any conversion, so their - // macro is empty. In the future we may want to fill them in to - // provide strict checking of the forms for each class. For now, - // they simply provide a way to document the allowed classes for - // each name. - - // DW_FORM_addr - // DW_FORM_addrx - // DW_FORM_addrx1 - // DW_FORM_addrx2 - // DW_FORM_addrx3 - // DW_FORM_addrx4 - macro_rules! address { - () => {}; - } - // DW_FORM_sec_offset - macro_rules! addrptr { - () => { - if let Some(offset) = self.offset_value() { - return AttributeValue::DebugAddrBase(DebugAddrBase(offset)); - } - }; - } - // DW_FORM_block - // DW_FORM_block1 - // DW_FORM_block2 - // DW_FORM_block4 - macro_rules! block { - () => {}; - } - // DW_FORM_sdata - // DW_FORM_udata - // DW_FORM_data1 - // DW_FORM_data2 - // DW_FORM_data4 - // DW_FORM_data8 - // DW_FORM_data16 - // DW_FORM_implicit_const - macro_rules! constant { - ($value:ident, $variant:ident) => { - if let Some(value) = self.$value() { - return AttributeValue::$variant(value); - } - }; - ($value:ident, $variant:ident, $constant:ident) => { - if let Some(value) = self.$value() { - return AttributeValue::$variant(constants::$constant(value)); - } - }; - } - // DW_FORM_exprloc - macro_rules! exprloc { - () => { - if let Some(value) = self.exprloc_value() { - return AttributeValue::Exprloc(value); - } - }; - } - // DW_FORM_flag - // DW_FORM_flag_present - macro_rules! flag { - () => {}; - } - // DW_FORM_sec_offset - macro_rules! lineptr { - () => { - if let Some(offset) = self.offset_value() { - return AttributeValue::DebugLineRef(DebugLineOffset(offset)); - } - }; - } - // This also covers `loclist` in DWARF version 5. - // DW_FORM_sec_offset - // DW_FORM_loclistx - macro_rules! loclistptr { - () => { - // DebugLocListsIndex is also an allowed form in DWARF version 5. - if let Some(offset) = self.offset_value() { - return AttributeValue::LocationListsRef(LocationListsOffset(offset)); - } - }; - } - // DW_FORM_sec_offset - macro_rules! loclistsptr { - () => { - if let Some(offset) = self.offset_value() { - return AttributeValue::DebugLocListsBase(DebugLocListsBase(offset)); - } - }; - } - // DWARF version <= 4. - // DW_FORM_sec_offset - macro_rules! macinfoptr { - () => { - if let Some(offset) = self.offset_value() { - return AttributeValue::DebugMacinfoRef(DebugMacinfoOffset(offset)); - } - }; - } - // DWARF version >= 5. - // DW_FORM_sec_offset - macro_rules! macroptr { - () => { - if let Some(offset) = self.offset_value() { - return AttributeValue::DebugMacroRef(DebugMacroOffset(offset)); - } - }; - } - // DW_FORM_ref_addr - // DW_FORM_ref1 - // DW_FORM_ref2 - // DW_FORM_ref4 - // DW_FORM_ref8 - // DW_FORM_ref_udata - // DW_FORM_ref_sig8 - // DW_FORM_ref_sup4 - // DW_FORM_ref_sup8 - macro_rules! reference { - () => {}; - } - // This also covers `rnglist` in DWARF version 5. - // DW_FORM_sec_offset - // DW_FORM_rnglistx - macro_rules! rangelistptr { - () => { - // DebugRngListsIndex is also an allowed form in DWARF version 5. - if let Some(offset) = self.offset_value() { - return AttributeValue::RangeListsRef(RawRangeListsOffset(offset)); - } - }; - } - // DW_FORM_sec_offset - macro_rules! rnglistsptr { - () => { - if let Some(offset) = self.offset_value() { - return AttributeValue::DebugRngListsBase(DebugRngListsBase(offset)); - } - }; - } - // DW_FORM_string - // DW_FORM_strp - // DW_FORM_strx - // DW_FORM_strx1 - // DW_FORM_strx2 - // DW_FORM_strx3 - // DW_FORM_strx4 - // DW_FORM_strp_sup - // DW_FORM_line_strp - macro_rules! string { - () => {}; - } - // DW_FORM_sec_offset - macro_rules! stroffsetsptr { - () => { - if let Some(offset) = self.offset_value() { - return AttributeValue::DebugStrOffsetsBase(DebugStrOffsetsBase(offset)); - } - }; - } - // This isn't a separate form but it's useful to distinguish it from a generic udata. - macro_rules! dwoid { - () => { - if let Some(value) = self.udata_value() { - return AttributeValue::DwoId(DwoId(value)); - } - }; - } - - // Perform the allowed class conversions for each attribute name. - match self.name { - constants::DW_AT_sibling => { - reference!(); - } - constants::DW_AT_location => { - exprloc!(); - loclistptr!(); - } - constants::DW_AT_name => { - string!(); - } - constants::DW_AT_ordering => { - constant!(u8_value, Ordering, DwOrd); - } - constants::DW_AT_byte_size - | constants::DW_AT_bit_offset - | constants::DW_AT_bit_size => { - constant!(udata_value, Udata); - exprloc!(); - reference!(); - } - constants::DW_AT_stmt_list => { - lineptr!(); - } - constants::DW_AT_low_pc => { - address!(); - } - constants::DW_AT_high_pc => { - address!(); - constant!(udata_value, Udata); - } - constants::DW_AT_language => { - constant!(u16_value, Language, DwLang); - } - constants::DW_AT_discr => { - reference!(); - } - constants::DW_AT_discr_value => { - // constant: depends on type of DW_TAG_variant_part, - // so caller must normalize. - } - constants::DW_AT_visibility => { - constant!(u8_value, Visibility, DwVis); - } - constants::DW_AT_import => { - reference!(); - } - constants::DW_AT_string_length => { - exprloc!(); - loclistptr!(); - reference!(); - } - constants::DW_AT_common_reference => { - reference!(); - } - constants::DW_AT_comp_dir => { - string!(); - } - constants::DW_AT_const_value => { - // TODO: constant: sign depends on DW_AT_type. - block!(); - string!(); - } - constants::DW_AT_containing_type => { - reference!(); - } - constants::DW_AT_default_value => { - // TODO: constant: sign depends on DW_AT_type. - reference!(); - flag!(); - } - constants::DW_AT_inline => { - constant!(u8_value, Inline, DwInl); - } - constants::DW_AT_is_optional => { - flag!(); - } - constants::DW_AT_lower_bound => { - // TODO: constant: sign depends on DW_AT_type. - exprloc!(); - reference!(); - } - constants::DW_AT_producer => { - string!(); - } - constants::DW_AT_prototyped => { - flag!(); - } - constants::DW_AT_return_addr => { - exprloc!(); - loclistptr!(); - } - constants::DW_AT_start_scope => { - // TODO: constant - rangelistptr!(); - } - constants::DW_AT_bit_stride => { - constant!(udata_value, Udata); - exprloc!(); - reference!(); - } - constants::DW_AT_upper_bound => { - // TODO: constant: sign depends on DW_AT_type. - exprloc!(); - reference!(); - } - constants::DW_AT_abstract_origin => { - reference!(); - } - constants::DW_AT_accessibility => { - constant!(u8_value, Accessibility, DwAccess); - } - constants::DW_AT_address_class => { - constant!(udata_value, AddressClass, DwAddr); - } - constants::DW_AT_artificial => { - flag!(); - } - constants::DW_AT_base_types => { - reference!(); - } - constants::DW_AT_calling_convention => { - constant!(u8_value, CallingConvention, DwCc); - } - constants::DW_AT_count => { - // TODO: constant - exprloc!(); - reference!(); - } - constants::DW_AT_data_member_location => { - // Constants must be handled before loclistptr so that DW_FORM_data4/8 - // are correctly interpreted for DWARF version 4+. - constant!(udata_value, Udata); - exprloc!(); - loclistptr!(); - } - constants::DW_AT_decl_column => { - constant!(udata_value, Udata); - } - constants::DW_AT_decl_file => { - constant!(udata_value, FileIndex); - } - constants::DW_AT_decl_line => { - constant!(udata_value, Udata); - } - constants::DW_AT_declaration => { - flag!(); - } - constants::DW_AT_discr_list => { - block!(); - } - constants::DW_AT_encoding => { - constant!(u8_value, Encoding, DwAte); - } - constants::DW_AT_external => { - flag!(); - } - constants::DW_AT_frame_base => { - exprloc!(); - loclistptr!(); - } - constants::DW_AT_friend => { - reference!(); - } - constants::DW_AT_identifier_case => { - constant!(u8_value, IdentifierCase, DwId); - } - constants::DW_AT_macro_info => { - macinfoptr!(); - } - constants::DW_AT_namelist_item => { - reference!(); - } - constants::DW_AT_priority => { - reference!(); - } - constants::DW_AT_segment => { - exprloc!(); - loclistptr!(); - } - constants::DW_AT_specification => { - reference!(); - } - constants::DW_AT_static_link => { - exprloc!(); - loclistptr!(); - } - constants::DW_AT_type => { - reference!(); - } - constants::DW_AT_use_location => { - exprloc!(); - loclistptr!(); - } - constants::DW_AT_variable_parameter => { - flag!(); - } - constants::DW_AT_virtuality => { - constant!(u8_value, Virtuality, DwVirtuality); - } - constants::DW_AT_vtable_elem_location => { - exprloc!(); - loclistptr!(); - } - constants::DW_AT_allocated => { - // TODO: constant - exprloc!(); - reference!(); - } - constants::DW_AT_associated => { - // TODO: constant - exprloc!(); - reference!(); - } - constants::DW_AT_data_location => { - exprloc!(); - } - constants::DW_AT_byte_stride => { - constant!(udata_value, Udata); - exprloc!(); - reference!(); - } - constants::DW_AT_entry_pc => { - // TODO: constant - address!(); - } - constants::DW_AT_use_UTF8 => { - flag!(); - } - constants::DW_AT_extension => { - reference!(); - } - constants::DW_AT_ranges => { - rangelistptr!(); - } - constants::DW_AT_trampoline => { - address!(); - flag!(); - reference!(); - string!(); - } - constants::DW_AT_call_column => { - constant!(udata_value, Udata); - } - constants::DW_AT_call_file => { - constant!(udata_value, FileIndex); - } - constants::DW_AT_call_line => { - constant!(udata_value, Udata); - } - constants::DW_AT_description => { - string!(); - } - constants::DW_AT_binary_scale => { - // TODO: constant - } - constants::DW_AT_decimal_scale => { - // TODO: constant - } - constants::DW_AT_small => { - reference!(); - } - constants::DW_AT_decimal_sign => { - constant!(u8_value, DecimalSign, DwDs); - } - constants::DW_AT_digit_count => { - // TODO: constant - } - constants::DW_AT_picture_string => { - string!(); - } - constants::DW_AT_mutable => { - flag!(); - } - constants::DW_AT_threads_scaled => { - flag!(); - } - constants::DW_AT_explicit => { - flag!(); - } - constants::DW_AT_object_pointer => { - reference!(); - } - constants::DW_AT_endianity => { - constant!(u8_value, Endianity, DwEnd); - } - constants::DW_AT_elemental => { - flag!(); - } - constants::DW_AT_pure => { - flag!(); - } - constants::DW_AT_recursive => { - flag!(); - } - constants::DW_AT_signature => { - reference!(); - } - constants::DW_AT_main_subprogram => { - flag!(); - } - constants::DW_AT_data_bit_offset => { - // TODO: constant - } - constants::DW_AT_const_expr => { - flag!(); - } - constants::DW_AT_enum_class => { - flag!(); - } - constants::DW_AT_linkage_name => { - string!(); - } - constants::DW_AT_string_length_bit_size => { - // TODO: constant - } - constants::DW_AT_string_length_byte_size => { - // TODO: constant - } - constants::DW_AT_rank => { - // TODO: constant - exprloc!(); - } - constants::DW_AT_str_offsets_base => { - stroffsetsptr!(); - } - constants::DW_AT_addr_base | constants::DW_AT_GNU_addr_base => { - addrptr!(); - } - constants::DW_AT_rnglists_base | constants::DW_AT_GNU_ranges_base => { - rnglistsptr!(); - } - constants::DW_AT_dwo_name => { - string!(); - } - constants::DW_AT_reference => { - flag!(); - } - constants::DW_AT_rvalue_reference => { - flag!(); - } - constants::DW_AT_macros => { - macroptr!(); - } - constants::DW_AT_call_all_calls => { - flag!(); - } - constants::DW_AT_call_all_source_calls => { - flag!(); - } - constants::DW_AT_call_all_tail_calls => { - flag!(); - } - constants::DW_AT_call_return_pc => { - address!(); - } - constants::DW_AT_call_value => { - exprloc!(); - } - constants::DW_AT_call_origin => { - exprloc!(); - } - constants::DW_AT_call_parameter => { - reference!(); - } - constants::DW_AT_call_pc => { - address!(); - } - constants::DW_AT_call_tail_call => { - flag!(); - } - constants::DW_AT_call_target => { - exprloc!(); - } - constants::DW_AT_call_target_clobbered => { - exprloc!(); - } - constants::DW_AT_call_data_location => { - exprloc!(); - } - constants::DW_AT_call_data_value => { - exprloc!(); - } - constants::DW_AT_noreturn => { - flag!(); - } - constants::DW_AT_alignment => { - // TODO: constant - } - constants::DW_AT_export_symbols => { - flag!(); - } - constants::DW_AT_deleted => { - flag!(); - } - constants::DW_AT_defaulted => { - // TODO: constant - } - constants::DW_AT_loclists_base => { - loclistsptr!(); - } - constants::DW_AT_GNU_dwo_id => { - dwoid!(); - } - _ => {} - } - self.value.clone() - } - - /// Try to convert this attribute's value to a u8. - #[inline] - pub fn u8_value(&self) -> Option { - self.value.u8_value() - } - - /// Try to convert this attribute's value to a u16. - #[inline] - pub fn u16_value(&self) -> Option { - self.value.u16_value() - } - - /// Try to convert this attribute's value to an unsigned integer. - #[inline] - pub fn udata_value(&self) -> Option { - self.value.udata_value() - } - - /// Try to convert this attribute's value to a signed integer. - #[inline] - pub fn sdata_value(&self) -> Option { - self.value.sdata_value() - } - - /// Try to convert this attribute's value to an offset. - #[inline] - pub fn offset_value(&self) -> Option { - self.value.offset_value() - } - - /// Try to convert this attribute's value to an expression or location buffer. - /// - /// Expressions and locations may be `DW_FORM_block*` or `DW_FORM_exprloc`. - /// The standard doesn't mention `DW_FORM_block*` as a possible form, but - /// it is encountered in practice. - #[inline] - pub fn exprloc_value(&self) -> Option> { - self.value.exprloc_value() - } - - /// Try to return this attribute's value as a string slice. - /// - /// If this attribute's value is either an inline `DW_FORM_string` string, - /// or a `DW_FORM_strp` reference to an offset into the `.debug_str` - /// section, return the attribute's string value as `Some`. Other attribute - /// value forms are returned as `None`. - /// - /// Warning: this function does not handle all possible string forms. - /// Use `Dwarf::attr_string` instead. - #[inline] - pub fn string_value(&self, debug_str: &DebugStr) -> Option { - self.value.string_value(debug_str) - } - - /// Try to return this attribute's value as a string slice. - /// - /// If this attribute's value is either an inline `DW_FORM_string` string, - /// or a `DW_FORM_strp` reference to an offset into the `.debug_str` - /// section, or a `DW_FORM_strp_sup` reference to an offset into a supplementary - /// object file, return the attribute's string value as `Some`. Other attribute - /// value forms are returned as `None`. - /// - /// Warning: this function does not handle all possible string forms. - /// Use `Dwarf::attr_string` instead. - #[inline] - pub fn string_value_sup( - &self, - debug_str: &DebugStr, - debug_str_sup: Option<&DebugStr>, - ) -> Option { - self.value.string_value_sup(debug_str, debug_str_sup) - } -} - -impl AttributeValue -where - R: Reader, - Offset: ReaderOffset, -{ - /// Try to convert this attribute's value to a u8. - pub fn u8_value(&self) -> Option { - if let Some(value) = self.udata_value() { - if value <= u64::from(u8::MAX) { - return Some(value as u8); - } - } - None - } - - /// Try to convert this attribute's value to a u16. - pub fn u16_value(&self) -> Option { - if let Some(value) = self.udata_value() { - if value <= u64::from(u16::MAX) { - return Some(value as u16); - } - } - None - } - - /// Try to convert this attribute's value to an unsigned integer. - pub fn udata_value(&self) -> Option { - Some(match *self { - AttributeValue::Data1(data) => u64::from(data), - AttributeValue::Data2(data) => u64::from(data), - AttributeValue::Data4(data) => u64::from(data), - AttributeValue::Data8(data) => data, - AttributeValue::Udata(data) => data, - AttributeValue::Sdata(data) => { - if data < 0 { - // Maybe we should emit a warning here - return None; - } - data as u64 - } - _ => return None, - }) - } - - /// Try to convert this attribute's value to a signed integer. - pub fn sdata_value(&self) -> Option { - Some(match *self { - AttributeValue::Data1(data) => i64::from(data as i8), - AttributeValue::Data2(data) => i64::from(data as i16), - AttributeValue::Data4(data) => i64::from(data as i32), - AttributeValue::Data8(data) => data as i64, - AttributeValue::Sdata(data) => data, - AttributeValue::Udata(data) => { - if data > i64::max_value() as u64 { - // Maybe we should emit a warning here - return None; - } - data as i64 - } - _ => return None, - }) - } - - /// Try to convert this attribute's value to an offset. - pub fn offset_value(&self) -> Option { - // While offsets will be DW_FORM_data4/8 in DWARF version 2/3, - // these have already been converted to `SecOffset. - if let AttributeValue::SecOffset(offset) = *self { - Some(offset) - } else { - None - } - } - - /// Try to convert this attribute's value to an expression or location buffer. - /// - /// Expressions and locations may be `DW_FORM_block*` or `DW_FORM_exprloc`. - /// The standard doesn't mention `DW_FORM_block*` as a possible form, but - /// it is encountered in practice. - pub fn exprloc_value(&self) -> Option> { - Some(match *self { - AttributeValue::Block(ref data) => Expression(data.clone()), - AttributeValue::Exprloc(ref data) => data.clone(), - _ => return None, - }) - } - - /// Try to return this attribute's value as a string slice. - /// - /// If this attribute's value is either an inline `DW_FORM_string` string, - /// or a `DW_FORM_strp` reference to an offset into the `.debug_str` - /// section, return the attribute's string value as `Some`. Other attribute - /// value forms are returned as `None`. - /// - /// Warning: this function does not handle all possible string forms. - /// Use `Dwarf::attr_string` instead. - pub fn string_value(&self, debug_str: &DebugStr) -> Option { - match *self { - AttributeValue::String(ref string) => Some(string.clone()), - AttributeValue::DebugStrRef(offset) => debug_str.get_str(offset).ok(), - _ => None, - } - } - - /// Try to return this attribute's value as a string slice. - /// - /// If this attribute's value is either an inline `DW_FORM_string` string, - /// or a `DW_FORM_strp` reference to an offset into the `.debug_str` - /// section, or a `DW_FORM_strp_sup` reference to an offset into a supplementary - /// object file, return the attribute's string value as `Some`. Other attribute - /// value forms are returned as `None`. - /// - /// Warning: this function does not handle all possible string forms. - /// Use `Dwarf::attr_string` instead. - pub fn string_value_sup( - &self, - debug_str: &DebugStr, - debug_str_sup: Option<&DebugStr>, - ) -> Option { - match *self { - AttributeValue::String(ref string) => Some(string.clone()), - AttributeValue::DebugStrRef(offset) => debug_str.get_str(offset).ok(), - AttributeValue::DebugStrRefSup(offset) => { - debug_str_sup.and_then(|s| s.get_str(offset).ok()) - } - _ => None, - } - } -} - -fn length_u8_value(input: &mut R) -> Result { - let len = input.read_u8().map(R::Offset::from_u8)?; - input.split(len) -} - -fn length_u16_value(input: &mut R) -> Result { - let len = input.read_u16().map(R::Offset::from_u16)?; - input.split(len) -} - -fn length_u32_value(input: &mut R) -> Result { - let len = input.read_u32().map(R::Offset::from_u32)?; - input.split(len) -} - -fn length_uleb128_value(input: &mut R) -> Result { - let len = input.read_uleb128().and_then(R::Offset::from_u64)?; - input.split(len) -} - -// Return true if the given `name` can be a section offset in DWARF version 2/3. -// This is required to correctly handle relocations. -fn allow_section_offset(name: constants::DwAt, version: u16) -> bool { - match name { - constants::DW_AT_location - | constants::DW_AT_stmt_list - | constants::DW_AT_string_length - | constants::DW_AT_return_addr - | constants::DW_AT_start_scope - | constants::DW_AT_frame_base - | constants::DW_AT_macro_info - | constants::DW_AT_macros - | constants::DW_AT_segment - | constants::DW_AT_static_link - | constants::DW_AT_use_location - | constants::DW_AT_vtable_elem_location - | constants::DW_AT_ranges => true, - constants::DW_AT_data_member_location => version == 2 || version == 3, - _ => false, - } -} - -pub(crate) fn parse_attribute( - input: &mut R, - encoding: Encoding, - spec: AttributeSpecification, -) -> Result> { - let mut form = spec.form(); - loop { - let value = match form { - constants::DW_FORM_indirect => { - let dynamic_form = input.read_uleb128_u16()?; - form = constants::DwForm(dynamic_form); - continue; - } - constants::DW_FORM_addr => { - let addr = input.read_address(encoding.address_size)?; - AttributeValue::Addr(addr) - } - constants::DW_FORM_block1 => { - let block = length_u8_value(input)?; - AttributeValue::Block(block) - } - constants::DW_FORM_block2 => { - let block = length_u16_value(input)?; - AttributeValue::Block(block) - } - constants::DW_FORM_block4 => { - let block = length_u32_value(input)?; - AttributeValue::Block(block) - } - constants::DW_FORM_block => { - let block = length_uleb128_value(input)?; - AttributeValue::Block(block) - } - constants::DW_FORM_data1 => { - let data = input.read_u8()?; - AttributeValue::Data1(data) - } - constants::DW_FORM_data2 => { - let data = input.read_u16()?; - AttributeValue::Data2(data) - } - constants::DW_FORM_data4 => { - // DWARF version 2/3 may use DW_FORM_data4/8 for section offsets. - // Ensure we handle relocations here. - if encoding.format == Format::Dwarf32 - && allow_section_offset(spec.name(), encoding.version) - { - let offset = input.read_offset(Format::Dwarf32)?; - AttributeValue::SecOffset(offset) - } else { - let data = input.read_u32()?; - AttributeValue::Data4(data) - } - } - constants::DW_FORM_data8 => { - // DWARF version 2/3 may use DW_FORM_data4/8 for section offsets. - // Ensure we handle relocations here. - if encoding.format == Format::Dwarf64 - && allow_section_offset(spec.name(), encoding.version) - { - let offset = input.read_offset(Format::Dwarf64)?; - AttributeValue::SecOffset(offset) - } else { - let data = input.read_u64()?; - AttributeValue::Data8(data) - } - } - constants::DW_FORM_data16 => { - let block = input.split(R::Offset::from_u8(16))?; - AttributeValue::Block(block) - } - constants::DW_FORM_udata => { - let data = input.read_uleb128()?; - AttributeValue::Udata(data) - } - constants::DW_FORM_sdata => { - let data = input.read_sleb128()?; - AttributeValue::Sdata(data) - } - constants::DW_FORM_exprloc => { - let block = length_uleb128_value(input)?; - AttributeValue::Exprloc(Expression(block)) - } - constants::DW_FORM_flag => { - let present = input.read_u8()?; - AttributeValue::Flag(present != 0) - } - constants::DW_FORM_flag_present => { - // FlagPresent is this weird compile time always true thing that - // isn't actually present in the serialized DIEs, only in the abbreviation. - AttributeValue::Flag(true) - } - constants::DW_FORM_sec_offset => { - let offset = input.read_offset(encoding.format)?; - AttributeValue::SecOffset(offset) - } - constants::DW_FORM_ref1 => { - let reference = input.read_u8().map(R::Offset::from_u8)?; - AttributeValue::UnitRef(UnitOffset(reference)) - } - constants::DW_FORM_ref2 => { - let reference = input.read_u16().map(R::Offset::from_u16)?; - AttributeValue::UnitRef(UnitOffset(reference)) - } - constants::DW_FORM_ref4 => { - let reference = input.read_u32().map(R::Offset::from_u32)?; - AttributeValue::UnitRef(UnitOffset(reference)) - } - constants::DW_FORM_ref8 => { - let reference = input.read_u64().and_then(R::Offset::from_u64)?; - AttributeValue::UnitRef(UnitOffset(reference)) - } - constants::DW_FORM_ref_udata => { - let reference = input.read_uleb128().and_then(R::Offset::from_u64)?; - AttributeValue::UnitRef(UnitOffset(reference)) - } - constants::DW_FORM_ref_addr => { - // This is an offset, but DWARF version 2 specifies that DW_FORM_ref_addr - // has the same size as an address on the target system. This was changed - // in DWARF version 3. - let offset = if encoding.version == 2 { - input.read_sized_offset(encoding.address_size)? - } else { - input.read_offset(encoding.format)? - }; - AttributeValue::DebugInfoRef(DebugInfoOffset(offset)) - } - constants::DW_FORM_ref_sig8 => { - let signature = input.read_u64()?; - AttributeValue::DebugTypesRef(DebugTypeSignature(signature)) - } - constants::DW_FORM_ref_sup4 => { - let offset = input.read_u32().map(R::Offset::from_u32)?; - AttributeValue::DebugInfoRefSup(DebugInfoOffset(offset)) - } - constants::DW_FORM_ref_sup8 => { - let offset = input.read_u64().and_then(R::Offset::from_u64)?; - AttributeValue::DebugInfoRefSup(DebugInfoOffset(offset)) - } - constants::DW_FORM_GNU_ref_alt => { - let offset = input.read_offset(encoding.format)?; - AttributeValue::DebugInfoRefSup(DebugInfoOffset(offset)) - } - constants::DW_FORM_string => { - let string = input.read_null_terminated_slice()?; - AttributeValue::String(string) - } - constants::DW_FORM_strp => { - let offset = input.read_offset(encoding.format)?; - AttributeValue::DebugStrRef(DebugStrOffset(offset)) - } - constants::DW_FORM_strp_sup | constants::DW_FORM_GNU_strp_alt => { - let offset = input.read_offset(encoding.format)?; - AttributeValue::DebugStrRefSup(DebugStrOffset(offset)) - } - constants::DW_FORM_line_strp => { - let offset = input.read_offset(encoding.format)?; - AttributeValue::DebugLineStrRef(DebugLineStrOffset(offset)) - } - constants::DW_FORM_implicit_const => { - let data = spec - .implicit_const_value() - .ok_or(Error::InvalidImplicitConst)?; - AttributeValue::Sdata(data) - } - constants::DW_FORM_strx | constants::DW_FORM_GNU_str_index => { - let index = input.read_uleb128().and_then(R::Offset::from_u64)?; - AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(index)) - } - constants::DW_FORM_strx1 => { - let index = input.read_u8().map(R::Offset::from_u8)?; - AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(index)) - } - constants::DW_FORM_strx2 => { - let index = input.read_u16().map(R::Offset::from_u16)?; - AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(index)) - } - constants::DW_FORM_strx3 => { - let index = input.read_uint(3).and_then(R::Offset::from_u64)?; - AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(index)) - } - constants::DW_FORM_strx4 => { - let index = input.read_u32().map(R::Offset::from_u32)?; - AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(index)) - } - constants::DW_FORM_addrx | constants::DW_FORM_GNU_addr_index => { - let index = input.read_uleb128().and_then(R::Offset::from_u64)?; - AttributeValue::DebugAddrIndex(DebugAddrIndex(index)) - } - constants::DW_FORM_addrx1 => { - let index = input.read_u8().map(R::Offset::from_u8)?; - AttributeValue::DebugAddrIndex(DebugAddrIndex(index)) - } - constants::DW_FORM_addrx2 => { - let index = input.read_u16().map(R::Offset::from_u16)?; - AttributeValue::DebugAddrIndex(DebugAddrIndex(index)) - } - constants::DW_FORM_addrx3 => { - let index = input.read_uint(3).and_then(R::Offset::from_u64)?; - AttributeValue::DebugAddrIndex(DebugAddrIndex(index)) - } - constants::DW_FORM_addrx4 => { - let index = input.read_u32().map(R::Offset::from_u32)?; - AttributeValue::DebugAddrIndex(DebugAddrIndex(index)) - } - constants::DW_FORM_loclistx => { - let index = input.read_uleb128().and_then(R::Offset::from_u64)?; - AttributeValue::DebugLocListsIndex(DebugLocListsIndex(index)) - } - constants::DW_FORM_rnglistx => { - let index = input.read_uleb128().and_then(R::Offset::from_u64)?; - AttributeValue::DebugRngListsIndex(DebugRngListsIndex(index)) - } - _ => { - return Err(Error::UnknownForm); - } - }; - let attr = Attribute { - name: spec.name(), - value, - }; - return Ok(attr); - } -} - -pub(crate) fn skip_attributes( - input: &mut R, - encoding: Encoding, - specs: &[AttributeSpecification], -) -> Result<()> { - let mut skip_bytes = R::Offset::from_u8(0); - for spec in specs { - let mut form = spec.form(); - loop { - if let Some(len) = get_attribute_size(form, encoding) { - // We know the length of this attribute. Accumulate that length. - skip_bytes += R::Offset::from_u8(len); - break; - } - - // We have encountered a variable-length attribute. - if skip_bytes != R::Offset::from_u8(0) { - // Skip the accumulated skip bytes and then read the attribute normally. - input.skip(skip_bytes)?; - skip_bytes = R::Offset::from_u8(0); - } - - match form { - constants::DW_FORM_indirect => { - let dynamic_form = input.read_uleb128_u16()?; - form = constants::DwForm(dynamic_form); - continue; - } - constants::DW_FORM_block1 => { - skip_bytes = input.read_u8().map(R::Offset::from_u8)?; - } - constants::DW_FORM_block2 => { - skip_bytes = input.read_u16().map(R::Offset::from_u16)?; - } - constants::DW_FORM_block4 => { - skip_bytes = input.read_u32().map(R::Offset::from_u32)?; - } - constants::DW_FORM_block | constants::DW_FORM_exprloc => { - skip_bytes = input.read_uleb128().and_then(R::Offset::from_u64)?; - } - constants::DW_FORM_string => { - let _ = input.read_null_terminated_slice()?; - } - constants::DW_FORM_udata - | constants::DW_FORM_sdata - | constants::DW_FORM_ref_udata - | constants::DW_FORM_strx - | constants::DW_FORM_GNU_str_index - | constants::DW_FORM_addrx - | constants::DW_FORM_GNU_addr_index - | constants::DW_FORM_loclistx - | constants::DW_FORM_rnglistx => { - input.skip_leb128()?; - } - _ => { - return Err(Error::UnknownForm); - } - }; - break; - } - } - if skip_bytes != R::Offset::from_u8(0) { - // Skip the remaining accumulated skip bytes. - input.skip(skip_bytes)?; - } - Ok(()) -} - -/// An iterator over a particular entry's attributes. -/// -/// See [the documentation for -/// `DebuggingInformationEntry::attrs()`](./struct.DebuggingInformationEntry.html#method.attrs) -/// for details. -/// -/// Can be [used with -/// `FallibleIterator`](./index.html#using-with-fallibleiterator). -#[derive(Clone, Copy, Debug)] -pub struct AttrsIter<'abbrev, 'entry, 'unit, R: Reader> { - input: R, - attributes: &'abbrev [AttributeSpecification], - entry: &'entry DebuggingInformationEntry<'abbrev, 'unit, R>, -} - -impl<'abbrev, 'entry, 'unit, R: Reader> AttrsIter<'abbrev, 'entry, 'unit, R> { - /// Advance the iterator and return the next attribute. - /// - /// Returns `None` when iteration is finished. If an error - /// occurs while parsing the next attribute, then this error - /// is returned, and all subsequent calls return `None`. - #[inline(always)] - pub fn next(&mut self) -> Result>> { - if self.attributes.is_empty() { - // Now that we have parsed all of the attributes, we know where - // either (1) this entry's children start, if the abbreviation says - // this entry has children; or (2) where this entry's siblings - // begin. - if let Some(end) = self.entry.attrs_len.get() { - debug_assert_eq!(end, self.input.offset_from(&self.entry.attrs_slice)); - } else { - self.entry - .attrs_len - .set(Some(self.input.offset_from(&self.entry.attrs_slice))); - } - - return Ok(None); - } - - let spec = self.attributes[0]; - let rest_spec = &self.attributes[1..]; - match parse_attribute(&mut self.input, self.entry.unit.encoding(), spec) { - Ok(attr) => { - self.attributes = rest_spec; - Ok(Some(attr)) - } - Err(e) => { - self.input.empty(); - Err(e) - } - } - } -} - -#[cfg(feature = "fallible-iterator")] -impl<'abbrev, 'entry, 'unit, R: Reader> fallible_iterator::FallibleIterator - for AttrsIter<'abbrev, 'entry, 'unit, R> -{ - type Item = Attribute; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - AttrsIter::next(self) - } -} - -/// A raw reader of the data that defines the Debugging Information Entries. -/// -/// `EntriesRaw` provides primitives to read the components of Debugging Information -/// Entries (DIEs). A DIE consists of an abbreviation code (read with `read_abbreviation`) -/// followed by a number of attributes (read with `read_attribute`). -/// The user must provide the control flow to read these correctly. -/// In particular, all attributes must always be read before reading another -/// abbreviation code. -/// -/// `EntriesRaw` lacks some features of `EntriesCursor`, such as the ability to skip -/// to the next sibling DIE. However, this also allows it to optimize better, since it -/// does not need to perform the extra bookkeeping required to support these features, -/// and thus it is suitable for cases where performance is important. -/// -/// ## Example Usage -/// ```rust,no_run -/// # fn example() -> Result<(), gimli::Error> { -/// # let debug_info = gimli::DebugInfo::new(&[], gimli::LittleEndian); -/// # let get_some_unit = || debug_info.units().next().unwrap().unwrap(); -/// let unit = get_some_unit(); -/// # let debug_abbrev = gimli::DebugAbbrev::new(&[], gimli::LittleEndian); -/// # let get_abbrevs_for_unit = |_| unit.abbreviations(&debug_abbrev).unwrap(); -/// let abbrevs = get_abbrevs_for_unit(&unit); -/// -/// let mut entries = unit.entries_raw(&abbrevs, None)?; -/// while !entries.is_empty() { -/// let abbrev = if let Some(abbrev) = entries.read_abbreviation()? { -/// abbrev -/// } else { -/// // Null entry with no attributes. -/// continue -/// }; -/// match abbrev.tag() { -/// gimli::DW_TAG_subprogram => { -/// // Loop over attributes for DIEs we care about. -/// for spec in abbrev.attributes() { -/// let attr = entries.read_attribute(*spec)?; -/// match attr.name() { -/// // Handle attributes. -/// _ => {} -/// } -/// } -/// } -/// _ => { -/// // Skip attributes for DIEs we don't care about. -/// entries.skip_attributes(abbrev.attributes()); -/// } -/// } -/// } -/// # unreachable!() -/// # } -/// ``` -#[derive(Clone, Debug)] -pub struct EntriesRaw<'abbrev, 'unit, R> -where - R: Reader, -{ - input: R, - unit: &'unit UnitHeader, - abbreviations: &'abbrev Abbreviations, - depth: isize, -} - -impl<'abbrev, 'unit, R: Reader> EntriesRaw<'abbrev, 'unit, R> { - /// Return true if there is no more input. - #[inline] - pub fn is_empty(&self) -> bool { - self.input.is_empty() - } - - /// Return the unit offset at which the reader will read next. - /// - /// If you want the offset of the next entry, then this must be called prior to reading - /// the next entry. - pub fn next_offset(&self) -> UnitOffset { - UnitOffset(self.unit.header_size() + self.input.offset_from(&self.unit.entries_buf)) - } - - /// Return the depth of the next entry. - /// - /// This depth is updated when `read_abbreviation` is called, and is updated - /// based on null entries and the `has_children` field in the abbreviation. - #[inline] - pub fn next_depth(&self) -> isize { - self.depth - } - - /// Read an abbreviation code and lookup the corresponding `Abbreviation`. - /// - /// Returns `Ok(None)` for null entries. - #[inline] - pub fn read_abbreviation(&mut self) -> Result> { - let code = self.input.read_uleb128()?; - if code == 0 { - self.depth -= 1; - return Ok(None); - }; - let abbrev = self - .abbreviations - .get(code) - .ok_or(Error::UnknownAbbreviation)?; - if abbrev.has_children() { - self.depth += 1; - } - Ok(Some(abbrev)) - } - - /// Read an attribute. - #[inline] - pub fn read_attribute(&mut self, spec: AttributeSpecification) -> Result> { - parse_attribute(&mut self.input, self.unit.encoding(), spec) - } - - /// Skip all the attributes of an abbreviation. - #[inline] - pub fn skip_attributes(&mut self, specs: &[AttributeSpecification]) -> Result<()> { - skip_attributes(&mut self.input, self.unit.encoding(), specs) - } -} - -/// A cursor into the Debugging Information Entries tree for a compilation unit. -/// -/// The `EntriesCursor` can traverse the DIE tree in DFS order using `next_dfs()`, -/// or skip to the next sibling of the entry the cursor is currently pointing to -/// using `next_sibling()`. -/// -/// It is also possible to traverse the DIE tree at a lower abstraction level -/// using `next_entry()`. This method does not skip over null entries, or provide -/// any indication of the current tree depth. In this case, you must use `current()` -/// to obtain the current entry, and `current().has_children()` to determine if -/// the entry following the current entry will be a sibling or child. `current()` -/// will return `None` if the current entry is a null entry, which signifies the -/// end of the current tree depth. -#[derive(Clone, Debug)] -pub struct EntriesCursor<'abbrev, 'unit, R> -where - R: Reader, -{ - input: R, - unit: &'unit UnitHeader, - abbreviations: &'abbrev Abbreviations, - cached_current: Option>, - delta_depth: isize, -} - -impl<'abbrev, 'unit, R: Reader> EntriesCursor<'abbrev, 'unit, R> { - /// Get a reference to the entry that the cursor is currently pointing to. - /// - /// If the cursor is not pointing at an entry, or if the current entry is a - /// null entry, then `None` is returned. - #[inline] - pub fn current(&self) -> Option<&DebuggingInformationEntry<'abbrev, 'unit, R>> { - self.cached_current.as_ref() - } - - /// Move the cursor to the next DIE in the tree. - /// - /// Returns `Some` if there is a next entry, even if this entry is null. - /// If there is no next entry, then `None` is returned. - pub fn next_entry(&mut self) -> Result> { - if let Some(ref current) = self.cached_current { - self.input = current.after_attrs()?; - } - - if self.input.is_empty() { - self.cached_current = None; - self.delta_depth = 0; - return Ok(None); - } - - match DebuggingInformationEntry::parse(&mut self.input, self.unit, self.abbreviations) { - Ok(Some(entry)) => { - self.delta_depth = entry.has_children() as isize; - self.cached_current = Some(entry); - Ok(Some(())) - } - Ok(None) => { - self.delta_depth = -1; - self.cached_current = None; - Ok(Some(())) - } - Err(e) => { - self.input.empty(); - self.delta_depth = 0; - self.cached_current = None; - Err(e) - } - } - } - - /// Move the cursor to the next DIE in the tree in DFS order. - /// - /// Upon successful movement of the cursor, return the delta traversal - /// depth and the entry: - /// - /// * If we moved down into the previous current entry's children, we get - /// `Some((1, entry))`. - /// - /// * If we moved to the previous current entry's sibling, we get - /// `Some((0, entry))`. - /// - /// * If the previous entry does not have any siblings and we move up to - /// its parent's next sibling, then we get `Some((-1, entry))`. Note that - /// if the parent doesn't have a next sibling, then it could go up to the - /// parent's parent's next sibling and return `Some((-2, entry))`, etc. - /// - /// If there is no next entry, then `None` is returned. - /// - /// Here is an example that finds the first entry in a compilation unit that - /// does not have any children. - /// - /// ``` - /// # use gimli::{DebugAbbrev, DebugInfo, LittleEndian}; - /// # let info_buf = [ - /// # // Comilation unit header - /// # - /// # // 32-bit unit length = 25 - /// # 0x19, 0x00, 0x00, 0x00, - /// # // Version 4 - /// # 0x04, 0x00, - /// # // debug_abbrev_offset - /// # 0x00, 0x00, 0x00, 0x00, - /// # // Address size - /// # 0x04, - /// # - /// # // DIEs - /// # - /// # // Abbreviation code - /// # 0x01, - /// # // Attribute of form DW_FORM_string = "foo\0" - /// # 0x66, 0x6f, 0x6f, 0x00, - /// # - /// # // Children - /// # - /// # // Abbreviation code - /// # 0x01, - /// # // Attribute of form DW_FORM_string = "foo\0" - /// # 0x66, 0x6f, 0x6f, 0x00, - /// # - /// # // Children - /// # - /// # // Abbreviation code - /// # 0x01, - /// # // Attribute of form DW_FORM_string = "foo\0" - /// # 0x66, 0x6f, 0x6f, 0x00, - /// # - /// # // Children - /// # - /// # // End of children - /// # 0x00, - /// # - /// # // End of children - /// # 0x00, - /// # - /// # // End of children - /// # 0x00, - /// # ]; - /// # let debug_info = DebugInfo::new(&info_buf, LittleEndian); - /// # - /// # let abbrev_buf = [ - /// # // Code - /// # 0x01, - /// # // DW_TAG_subprogram - /// # 0x2e, - /// # // DW_CHILDREN_yes - /// # 0x01, - /// # // Begin attributes - /// # // Attribute name = DW_AT_name - /// # 0x03, - /// # // Attribute form = DW_FORM_string - /// # 0x08, - /// # // End attributes - /// # 0x00, - /// # 0x00, - /// # // Null terminator - /// # 0x00 - /// # ]; - /// # let debug_abbrev = DebugAbbrev::new(&abbrev_buf, LittleEndian); - /// # - /// # let get_some_unit = || debug_info.units().next().unwrap().unwrap(); - /// - /// let unit = get_some_unit(); - /// # let get_abbrevs_for_unit = |_| unit.abbreviations(&debug_abbrev).unwrap(); - /// let abbrevs = get_abbrevs_for_unit(&unit); - /// - /// let mut first_entry_with_no_children = None; - /// let mut cursor = unit.entries(&abbrevs); - /// - /// // Move the cursor to the root. - /// assert!(cursor.next_dfs().unwrap().is_some()); - /// - /// // Traverse the DIE tree in depth-first search order. - /// let mut depth = 0; - /// while let Some((delta_depth, current)) = cursor.next_dfs().expect("Should parse next dfs") { - /// // Update depth value, and break out of the loop when we - /// // return to the original starting position. - /// depth += delta_depth; - /// if depth <= 0 { - /// break; - /// } - /// - /// first_entry_with_no_children = Some(current.clone()); - /// } - /// - /// println!("The first entry with no children is {:?}", - /// first_entry_with_no_children.unwrap()); - /// ``` - pub fn next_dfs( - &mut self, - ) -> Result)>> { - let mut delta_depth = self.delta_depth; - loop { - // The next entry should be the one we want. - if self.next_entry()?.is_some() { - if let Some(ref entry) = self.cached_current { - return Ok(Some((delta_depth, entry))); - } - - // next_entry() read a null entry. - delta_depth += self.delta_depth; - } else { - return Ok(None); - } - } - } - - /// Move the cursor to the next sibling DIE of the current one. - /// - /// Returns `Ok(Some(entry))` when the cursor has been moved to - /// the next sibling, `Ok(None)` when there is no next sibling. - /// - /// The depth of the cursor is never changed if this method returns `Ok`. - /// Once `Ok(None)` is returned, this method will continue to return - /// `Ok(None)` until either `next_entry` or `next_dfs` is called. - /// - /// Here is an example that iterates over all of the direct children of the - /// root entry: - /// - /// ``` - /// # use gimli::{DebugAbbrev, DebugInfo, LittleEndian}; - /// # let info_buf = [ - /// # // Comilation unit header - /// # - /// # // 32-bit unit length = 25 - /// # 0x19, 0x00, 0x00, 0x00, - /// # // Version 4 - /// # 0x04, 0x00, - /// # // debug_abbrev_offset - /// # 0x00, 0x00, 0x00, 0x00, - /// # // Address size - /// # 0x04, - /// # - /// # // DIEs - /// # - /// # // Abbreviation code - /// # 0x01, - /// # // Attribute of form DW_FORM_string = "foo\0" - /// # 0x66, 0x6f, 0x6f, 0x00, - /// # - /// # // Children - /// # - /// # // Abbreviation code - /// # 0x01, - /// # // Attribute of form DW_FORM_string = "foo\0" - /// # 0x66, 0x6f, 0x6f, 0x00, - /// # - /// # // Children - /// # - /// # // Abbreviation code - /// # 0x01, - /// # // Attribute of form DW_FORM_string = "foo\0" - /// # 0x66, 0x6f, 0x6f, 0x00, - /// # - /// # // Children - /// # - /// # // End of children - /// # 0x00, - /// # - /// # // End of children - /// # 0x00, - /// # - /// # // End of children - /// # 0x00, - /// # ]; - /// # let debug_info = DebugInfo::new(&info_buf, LittleEndian); - /// # - /// # let get_some_unit = || debug_info.units().next().unwrap().unwrap(); - /// - /// # let abbrev_buf = [ - /// # // Code - /// # 0x01, - /// # // DW_TAG_subprogram - /// # 0x2e, - /// # // DW_CHILDREN_yes - /// # 0x01, - /// # // Begin attributes - /// # // Attribute name = DW_AT_name - /// # 0x03, - /// # // Attribute form = DW_FORM_string - /// # 0x08, - /// # // End attributes - /// # 0x00, - /// # 0x00, - /// # // Null terminator - /// # 0x00 - /// # ]; - /// # let debug_abbrev = DebugAbbrev::new(&abbrev_buf, LittleEndian); - /// # - /// let unit = get_some_unit(); - /// # let get_abbrevs_for_unit = |_| unit.abbreviations(&debug_abbrev).unwrap(); - /// let abbrevs = get_abbrevs_for_unit(&unit); - /// - /// let mut cursor = unit.entries(&abbrevs); - /// - /// // Move the cursor to the root. - /// assert!(cursor.next_dfs().unwrap().is_some()); - /// - /// // Move the cursor to the root's first child. - /// assert!(cursor.next_dfs().unwrap().is_some()); - /// - /// // Iterate the root's children. - /// loop { - /// { - /// let current = cursor.current().expect("Should be at an entry"); - /// println!("{:?} is a child of the root", current); - /// } - /// - /// if cursor.next_sibling().expect("Should parse next sibling").is_none() { - /// break; - /// } - /// } - /// ``` - pub fn next_sibling( - &mut self, - ) -> Result>> { - if self.current().is_none() { - // We're already at the null for the end of the sibling list. - return Ok(None); - } - - // Loop until we find an entry at the current level. - let mut depth = 0; - loop { - // Use is_some() and unwrap() to keep borrow checker happy. - if self.current().is_some() && self.current().unwrap().has_children() { - if let Some(sibling_input) = self.current().unwrap().sibling() { - // Fast path: this entry has a DW_AT_sibling - // attribute pointing to its sibling, so jump - // to it (which keeps us at the same depth). - self.input = sibling_input; - self.cached_current = None; - } else { - // This entry has children, so the next entry is - // down one level. - depth += 1; - } - } - - if self.next_entry()?.is_none() { - // End of input. - return Ok(None); - } - - if depth == 0 { - // Found an entry at the current level. - return Ok(self.current()); - } - - if self.current().is_none() { - // A null entry means the end of a child list, so we're - // back up a level. - depth -= 1; - } - } - } -} - -/// The state information for a tree view of the Debugging Information Entries. -/// -/// The `EntriesTree` can be used to recursively iterate through the DIE -/// tree, following the parent/child relationships. The `EntriesTree` contains -/// shared state for all nodes in the tree, avoiding any duplicate parsing of -/// entries during the traversal. -/// -/// ## Example Usage -/// ```rust,no_run -/// # fn example() -> Result<(), gimli::Error> { -/// # let debug_info = gimli::DebugInfo::new(&[], gimli::LittleEndian); -/// # let get_some_unit = || debug_info.units().next().unwrap().unwrap(); -/// let unit = get_some_unit(); -/// # let debug_abbrev = gimli::DebugAbbrev::new(&[], gimli::LittleEndian); -/// # let get_abbrevs_for_unit = |_| unit.abbreviations(&debug_abbrev).unwrap(); -/// let abbrevs = get_abbrevs_for_unit(&unit); -/// -/// let mut tree = unit.entries_tree(&abbrevs, None)?; -/// let root = tree.root()?; -/// process_tree(root)?; -/// # unreachable!() -/// # } -/// -/// fn process_tree(mut node: gimli::EntriesTreeNode) -> gimli::Result<()> -/// where R: gimli::Reader -/// { -/// { -/// // Examine the entry attributes. -/// let mut attrs = node.entry().attrs(); -/// while let Some(attr) = attrs.next()? { -/// } -/// } -/// let mut children = node.children(); -/// while let Some(child) = children.next()? { -/// // Recursively process a child. -/// process_tree(child); -/// } -/// Ok(()) -/// } -/// ``` -#[derive(Clone, Debug)] -pub struct EntriesTree<'abbrev, 'unit, R> -where - R: Reader, -{ - root: R, - unit: &'unit UnitHeader, - abbreviations: &'abbrev Abbreviations, - input: R, - entry: Option>, - depth: isize, -} - -impl<'abbrev, 'unit, R: Reader> EntriesTree<'abbrev, 'unit, R> { - fn new(root: R, unit: &'unit UnitHeader, abbreviations: &'abbrev Abbreviations) -> Self { - let input = root.clone(); - EntriesTree { - root, - unit, - abbreviations, - input, - entry: None, - depth: 0, - } - } - - /// Returns the root node of the tree. - pub fn root<'me>(&'me mut self) -> Result> { - self.input = self.root.clone(); - self.entry = - DebuggingInformationEntry::parse(&mut self.input, self.unit, self.abbreviations)?; - if self.entry.is_none() { - return Err(Error::UnexpectedNull); - } - self.depth = 0; - Ok(EntriesTreeNode::new(self, 1)) - } - - /// Move the cursor to the next entry at the specified depth. - /// - /// Requires `depth <= self.depth + 1`. - /// - /// Returns `true` if successful. - fn next(&mut self, depth: isize) -> Result { - if self.depth < depth { - debug_assert_eq!(self.depth + 1, depth); - - match self.entry { - Some(ref entry) => { - if !entry.has_children() { - return Ok(false); - } - self.depth += 1; - self.input = entry.after_attrs()?; - } - None => return Ok(false), - } - - if self.input.is_empty() { - self.entry = None; - return Ok(false); - } - - return match DebuggingInformationEntry::parse( - &mut self.input, - self.unit, - self.abbreviations, - ) { - Ok(entry) => { - self.entry = entry; - Ok(self.entry.is_some()) - } - Err(e) => { - self.input.empty(); - self.entry = None; - Err(e) - } - }; - } - - loop { - match self.entry { - Some(ref entry) => { - if entry.has_children() { - if let Some(sibling_input) = entry.sibling() { - // Fast path: this entry has a DW_AT_sibling - // attribute pointing to its sibling, so jump - // to it (which keeps us at the same depth). - self.input = sibling_input; - } else { - // This entry has children, so the next entry is - // down one level. - self.depth += 1; - self.input = entry.after_attrs()?; - } - } else { - // This entry has no children, so next entry is at same depth. - self.input = entry.after_attrs()?; - } - } - None => { - // This entry is a null, so next entry is up one level. - self.depth -= 1; - } - } - - if self.input.is_empty() { - self.entry = None; - return Ok(false); - } - - match DebuggingInformationEntry::parse(&mut self.input, self.unit, self.abbreviations) { - Ok(entry) => { - self.entry = entry; - if self.depth == depth { - return Ok(self.entry.is_some()); - } - } - Err(e) => { - self.input.empty(); - self.entry = None; - return Err(e); - } - } - } - } -} - -/// A node in the Debugging Information Entry tree. -/// -/// The root node of a tree can be obtained -/// via [`EntriesTree::root`](./struct.EntriesTree.html#method.root). -#[derive(Debug)] -pub struct EntriesTreeNode<'abbrev, 'unit, 'tree, R: Reader> { - tree: &'tree mut EntriesTree<'abbrev, 'unit, R>, - depth: isize, -} - -impl<'abbrev, 'unit, 'tree, R: Reader> EntriesTreeNode<'abbrev, 'unit, 'tree, R> { - fn new( - tree: &'tree mut EntriesTree<'abbrev, 'unit, R>, - depth: isize, - ) -> EntriesTreeNode<'abbrev, 'unit, 'tree, R> { - debug_assert!(tree.entry.is_some()); - EntriesTreeNode { tree, depth } - } - - /// Returns the current entry in the tree. - pub fn entry(&self) -> &DebuggingInformationEntry<'abbrev, 'unit, R> { - // We never create a node without an entry. - self.tree.entry.as_ref().unwrap() - } - - /// Create an iterator for the children of the current entry. - /// - /// The current entry can no longer be accessed after creating the - /// iterator. - pub fn children(self) -> EntriesTreeIter<'abbrev, 'unit, 'tree, R> { - EntriesTreeIter::new(self.tree, self.depth) - } -} - -/// An iterator that allows traversal of the children of an -/// `EntriesTreeNode`. -/// -/// The items returned by this iterator are also `EntriesTreeNode`s, -/// which allow recursive traversal of grandchildren, etc. -#[derive(Debug)] -pub struct EntriesTreeIter<'abbrev, 'unit, 'tree, R: Reader> { - tree: &'tree mut EntriesTree<'abbrev, 'unit, R>, - depth: isize, - empty: bool, -} - -impl<'abbrev, 'unit, 'tree, R: Reader> EntriesTreeIter<'abbrev, 'unit, 'tree, R> { - fn new( - tree: &'tree mut EntriesTree<'abbrev, 'unit, R>, - depth: isize, - ) -> EntriesTreeIter<'abbrev, 'unit, 'tree, R> { - EntriesTreeIter { - tree, - depth, - empty: false, - } - } - - /// Returns an `EntriesTreeNode` for the next child entry. - /// - /// Returns `None` if there are no more children. - pub fn next<'me>(&'me mut self) -> Result>> { - if self.empty { - Ok(None) - } else if self.tree.next(self.depth)? { - Ok(Some(EntriesTreeNode::new(self.tree, self.depth + 1))) - } else { - self.empty = true; - Ok(None) - } - } -} - -/// Parse a type unit header's unique type signature. Callers should handle -/// unique-ness checking. -fn parse_type_signature(input: &mut R) -> Result { - input.read_u64().map(DebugTypeSignature) -} - -/// Parse a type unit header's type offset. -fn parse_type_offset(input: &mut R, format: Format) -> Result> { - input.read_offset(format).map(UnitOffset) -} - -/// The `DebugTypes` struct represents the DWARF type information -/// found in the `.debug_types` section. -#[derive(Debug, Default, Clone, Copy)] -pub struct DebugTypes { - debug_types_section: R, -} - -impl<'input, Endian> DebugTypes> -where - Endian: Endianity, -{ - /// Construct a new `DebugTypes` instance from the data in the `.debug_types` - /// section. - /// - /// It is the caller's responsibility to read the `.debug_types` section and - /// present it as a `&[u8]` slice. That means using some ELF loader on - /// Linux, a Mach-O loader on macOS, etc. - /// - /// ``` - /// use gimli::{DebugTypes, LittleEndian}; - /// - /// # let buf = [0x00, 0x01, 0x02, 0x03]; - /// # let read_debug_types_section_somehow = || &buf; - /// let debug_types = DebugTypes::new(read_debug_types_section_somehow(), LittleEndian); - /// ``` - pub fn new(debug_types_section: &'input [u8], endian: Endian) -> Self { - Self::from(EndianSlice::new(debug_types_section, endian)) - } -} - -impl DebugTypes { - /// Create a `DebugTypes` section that references the data in `self`. - /// - /// This is useful when `R` implements `Reader` but `T` does not. - /// - /// ## Example Usage - /// - /// ```rust,no_run - /// # let load_section = || unimplemented!(); - /// // Read the DWARF section into a `Vec` with whatever object loader you're using. - /// let owned_section: gimli::DebugTypes> = load_section(); - /// // Create a reference to the DWARF section. - /// let section = owned_section.borrow(|section| { - /// gimli::EndianSlice::new(§ion, gimli::LittleEndian) - /// }); - /// ``` - pub fn borrow<'a, F, R>(&'a self, mut borrow: F) -> DebugTypes - where - F: FnMut(&'a T) -> R, - { - borrow(&self.debug_types_section).into() - } -} - -impl Section for DebugTypes { - fn id() -> SectionId { - SectionId::DebugTypes - } - - fn reader(&self) -> &R { - &self.debug_types_section - } -} - -impl From for DebugTypes { - fn from(debug_types_section: R) -> Self { - DebugTypes { - debug_types_section, - } - } -} - -impl DebugTypes { - /// Iterate the type-units in this `.debug_types` section. - /// - /// ``` - /// use gimli::{DebugTypes, LittleEndian}; - /// - /// # let buf = []; - /// # let read_debug_types_section_somehow = || &buf; - /// let debug_types = DebugTypes::new(read_debug_types_section_somehow(), LittleEndian); - /// - /// let mut iter = debug_types.units(); - /// while let Some(unit) = iter.next().unwrap() { - /// println!("unit's length is {}", unit.unit_length()); - /// } - /// ``` - /// - /// Can be [used with - /// `FallibleIterator`](./index.html#using-with-fallibleiterator). - pub fn units(&self) -> DebugTypesUnitHeadersIter { - DebugTypesUnitHeadersIter { - input: self.debug_types_section.clone(), - offset: DebugTypesOffset(R::Offset::from_u8(0)), - } - } -} - -/// An iterator over the type-units of this `.debug_types` section. -/// -/// See the [documentation on -/// `DebugTypes::units`](./struct.DebugTypes.html#method.units) for -/// more detail. -#[derive(Clone, Debug)] -pub struct DebugTypesUnitHeadersIter { - input: R, - offset: DebugTypesOffset, -} - -impl DebugTypesUnitHeadersIter { - /// Advance the iterator to the next type unit header. - pub fn next(&mut self) -> Result>> { - if self.input.is_empty() { - Ok(None) - } else { - let len = self.input.len(); - match parse_unit_header(&mut self.input, self.offset.into()) { - Ok(header) => { - self.offset.0 += len - self.input.len(); - Ok(Some(header)) - } - Err(e) => { - self.input.empty(); - Err(e) - } - } - } - } -} - -#[cfg(feature = "fallible-iterator")] -impl fallible_iterator::FallibleIterator for DebugTypesUnitHeadersIter { - type Item = UnitHeader; - type Error = Error; - - fn next(&mut self) -> ::core::result::Result, Self::Error> { - DebugTypesUnitHeadersIter::next(self) - } -} - -#[cfg(test)] -// Tests require leb128::write. -#[cfg(feature = "write")] -mod tests { - use super::*; - use crate::constants; - use crate::constants::*; - use crate::endianity::{Endianity, LittleEndian}; - use crate::leb128; - use crate::read::abbrev::tests::AbbrevSectionMethods; - use crate::read::{ - Abbreviation, AttributeSpecification, DebugAbbrev, EndianSlice, Error, Result, - }; - use crate::test_util::GimliSectionMethods; - use alloc::vec::Vec; - use core::cell::Cell; - use test_assembler::{Endian, Label, LabelMaker, Section}; - - // Mixin methods for `Section` to help define binary test data. - - trait UnitSectionMethods { - fn unit<'input, E>(self, unit: &mut UnitHeader>) -> Self - where - E: Endianity; - fn die(self, code: u64, attr: F) -> Self - where - F: Fn(Section) -> Section; - fn die_null(self) -> Self; - fn attr_string(self, s: &str) -> Self; - fn attr_ref1(self, o: u8) -> Self; - fn offset(self, offset: usize, format: Format) -> Self; - } - - impl UnitSectionMethods for Section { - fn unit<'input, E>(self, unit: &mut UnitHeader>) -> Self - where - E: Endianity, - { - let size = self.size(); - let length = Label::new(); - let start = Label::new(); - let end = Label::new(); - - let section = match unit.format() { - Format::Dwarf32 => self.L32(&length), - Format::Dwarf64 => self.L32(0xffff_ffff).L64(&length), - }; - - let section = match unit.version() { - 2 | 3 | 4 => section - .mark(&start) - .L16(unit.version()) - .offset(unit.debug_abbrev_offset.0, unit.format()) - .D8(unit.address_size()), - 5 => section - .mark(&start) - .L16(unit.version()) - .D8(unit.type_().dw_ut().0) - .D8(unit.address_size()) - .offset(unit.debug_abbrev_offset.0, unit.format()), - _ => unreachable!(), - }; - - let section = match unit.type_() { - UnitType::Compilation | UnitType::Partial => { - unit.unit_offset = DebugInfoOffset(size as usize).into(); - section - } - UnitType::Type { - type_signature, - type_offset, - } - | UnitType::SplitType { - type_signature, - type_offset, - } => { - if unit.version() == 5 { - unit.unit_offset = DebugInfoOffset(size as usize).into(); - } else { - unit.unit_offset = DebugTypesOffset(size as usize).into(); - } - section - .L64(type_signature.0) - .offset(type_offset.0, unit.format()) - } - UnitType::Skeleton(dwo_id) | UnitType::SplitCompilation(dwo_id) => { - unit.unit_offset = DebugInfoOffset(size as usize).into(); - section.L64(dwo_id.0) - } - }; - - let section = section.append_bytes(unit.entries_buf.slice()).mark(&end); - - unit.unit_length = (&end - &start) as usize; - length.set_const(unit.unit_length as u64); - - section - } - - fn die(self, code: u64, attr: F) -> Self - where - F: Fn(Section) -> Section, - { - let section = self.uleb(code); - attr(section) - } - - fn die_null(self) -> Self { - self.D8(0) - } - - fn attr_string(self, attr: &str) -> Self { - self.append_bytes(attr.as_bytes()).D8(0) - } - - fn attr_ref1(self, attr: u8) -> Self { - self.D8(attr) - } - - fn offset(self, offset: usize, format: Format) -> Self { - match format { - Format::Dwarf32 => self.L32(offset as u32), - Format::Dwarf64 => self.L64(offset as u64), - } - } - } - - /// Ensure that `UnitHeader` is covariant wrt R. - #[test] - fn test_unit_header_variance() { - /// This only needs to compile. - fn _f<'a: 'b, 'b, E: Endianity>( - x: UnitHeader>, - ) -> UnitHeader> { - x - } - } - - #[test] - fn test_parse_debug_abbrev_offset_32() { - let section = Section::with_endian(Endian::Little).L32(0x0403_0201); - let buf = section.get_contents().unwrap(); - let buf = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_debug_abbrev_offset(buf, Format::Dwarf32) { - Ok(val) => assert_eq!(val, DebugAbbrevOffset(0x0403_0201)), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_debug_abbrev_offset_32_incomplete() { - let buf = [0x01, 0x02]; - let buf = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_debug_abbrev_offset(buf, Format::Dwarf32) { - Err(Error::UnexpectedEof(_)) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_debug_abbrev_offset_64() { - let section = Section::with_endian(Endian::Little).L64(0x0807_0605_0403_0201); - let buf = section.get_contents().unwrap(); - let buf = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_debug_abbrev_offset(buf, Format::Dwarf64) { - Ok(val) => assert_eq!(val, DebugAbbrevOffset(0x0807_0605_0403_0201)), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_debug_abbrev_offset_64_incomplete() { - let buf = [0x01, 0x02]; - let buf = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_debug_abbrev_offset(buf, Format::Dwarf64) { - Err(Error::UnexpectedEof(_)) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_debug_info_offset_32() { - let section = Section::with_endian(Endian::Little).L32(0x0403_0201); - let buf = section.get_contents().unwrap(); - let buf = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_debug_info_offset(buf, Format::Dwarf32) { - Ok(val) => assert_eq!(val, DebugInfoOffset(0x0403_0201)), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_debug_info_offset_32_incomplete() { - let buf = [0x01, 0x02]; - let buf = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_debug_info_offset(buf, Format::Dwarf32) { - Err(Error::UnexpectedEof(_)) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_debug_info_offset_64() { - let section = Section::with_endian(Endian::Little).L64(0x0807_0605_0403_0201); - let buf = section.get_contents().unwrap(); - let buf = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_debug_info_offset(buf, Format::Dwarf64) { - Ok(val) => assert_eq!(val, DebugInfoOffset(0x0807_0605_0403_0201)), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_debug_info_offset_64_incomplete() { - let buf = [0x01, 0x02]; - let buf = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_debug_info_offset(buf, Format::Dwarf64) { - Err(Error::UnexpectedEof(_)) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_units() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let mut unit64 = UnitHeader { - encoding: Encoding { - format: Format::Dwarf64, - version: 4, - address_size: 8, - }, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0x0102_0304_0506_0708), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let mut unit32 = UnitHeader { - encoding: Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut unit64) - .unit(&mut unit32); - let buf = section.get_contents().unwrap(); - - let debug_info = DebugInfo::new(&buf, LittleEndian); - let mut units = debug_info.units(); - - assert_eq!(units.next(), Ok(Some(unit64))); - assert_eq!(units.next(), Ok(Some(unit32))); - assert_eq!(units.next(), Ok(None)); - } - - #[test] - fn test_unit_version_unknown_version() { - let buf = [0x02, 0x00, 0x00, 0x00, 0xab, 0xcd]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_unit_header(rest, DebugInfoOffset(0).into()) { - Err(Error::UnknownVersion(0xcdab)) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - - let buf = [0x02, 0x00, 0x00, 0x00, 0x1, 0x0]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_unit_header(rest, DebugInfoOffset(0).into()) { - Err(Error::UnknownVersion(1)) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_unit_version_incomplete() { - let buf = [0x01, 0x00, 0x00, 0x00, 0x04]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_unit_header(rest, DebugInfoOffset(0).into()) { - Err(Error::UnexpectedEof(_)) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_unit_header_32_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_unit_header_64_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf64, - version: 4, - address_size: 8, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0x0102_0304_0506_0708), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_v5_unit_header_32_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf32, - version: 5, - address_size: 4, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_v5_unit_header_64_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf64, - version: 5, - address_size: 8, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0x0102_0304_0506_0708), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_v5_partial_unit_header_32_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf32, - version: 5, - address_size: 4, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Partial, - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_v5_partial_unit_header_64_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf64, - version: 5, - address_size: 8, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Partial, - debug_abbrev_offset: DebugAbbrevOffset(0x0102_0304_0506_0708), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_v5_skeleton_unit_header_32_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf32, - version: 5, - address_size: 4, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Skeleton(DwoId(0x0706_5040_0302_1000)), - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_v5_skeleton_unit_header_64_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf64, - version: 5, - address_size: 8, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Skeleton(DwoId(0x0706_5040_0302_1000)), - debug_abbrev_offset: DebugAbbrevOffset(0x0102_0304_0506_0708), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_v5_split_compilation_unit_header_32_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf32, - version: 5, - address_size: 4, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::SplitCompilation(DwoId(0x0706_5040_0302_1000)), - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_v5_split_compilation_unit_header_64_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf64, - version: 5, - address_size: 8, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::SplitCompilation(DwoId(0x0706_5040_0302_1000)), - debug_abbrev_offset: DebugAbbrevOffset(0x0102_0304_0506_0708), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_type_offset_32_ok() { - let buf = [0x12, 0x34, 0x56, 0x78, 0x00]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_type_offset(rest, Format::Dwarf32) { - Ok(offset) => { - assert_eq!(rest.len(), 1); - assert_eq!(UnitOffset(0x7856_3412), offset); - } - otherwise => panic!("Unexpected result: {:?}", otherwise), - } - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_type_offset_64_ok() { - let buf = [0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xff, 0x00]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_type_offset(rest, Format::Dwarf64) { - Ok(offset) => { - assert_eq!(rest.len(), 1); - assert_eq!(UnitOffset(0xffde_bc9a_7856_3412), offset); - } - otherwise => panic!("Unexpected result: {:?}", otherwise), - } - } - - #[test] - fn test_parse_type_offset_incomplete() { - // Need at least 4 bytes. - let buf = [0xff, 0xff, 0xff]; - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - match parse_type_offset(rest, Format::Dwarf32) { - Err(Error::UnexpectedEof(_)) => assert!(true), - otherwise => panic!("Unexpected result: {:?}", otherwise), - }; - } - - #[test] - fn test_parse_type_unit_header_32_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 8, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Type { - type_signature: DebugTypeSignature(0xdead_beef_dead_beef), - type_offset: UnitOffset(0x7856_3412), - }, - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugTypesOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugTypesOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_type_unit_header_64_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf64, - version: 4, - address_size: 8, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Type { - type_signature: DebugTypeSignature(0xdead_beef_dead_beef), - type_offset: UnitOffset(0x7856_3412_7856_3412), - }, - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugTypesOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugTypesOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_v5_type_unit_header_32_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf32, - version: 5, - address_size: 8, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Type { - type_signature: DebugTypeSignature(0xdead_beef_dead_beef), - type_offset: UnitOffset(0x7856_3412), - }, - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_v5_type_unit_header_64_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf64, - version: 5, - address_size: 8, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Type { - type_signature: DebugTypeSignature(0xdead_beef_dead_beef), - type_offset: UnitOffset(0x7856_3412_7856_3412), - }, - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - fn test_parse_v5_split_type_unit_header_32_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf32, - version: 5, - address_size: 8, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::SplitType { - type_signature: DebugTypeSignature(0xdead_beef_dead_beef), - type_offset: UnitOffset(0x7856_3412), - }, - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_v5_split_type_unit_header_64_ok() { - let expected_rest = &[1, 2, 3, 4, 5, 6, 7, 8, 9]; - let encoding = Encoding { - format: Format::Dwarf64, - version: 5, - address_size: 8, - }; - let mut expected_unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::SplitType { - type_signature: DebugTypeSignature(0xdead_beef_dead_beef), - type_offset: UnitOffset(0x7856_3412_7856_3412), - }, - debug_abbrev_offset: DebugAbbrevOffset(0x0807_0605), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(expected_rest, LittleEndian), - }; - let section = Section::with_endian(Endian::Little) - .unit(&mut expected_unit) - .append_bytes(expected_rest); - let buf = section.get_contents().unwrap(); - let rest = &mut EndianSlice::new(&buf, LittleEndian); - - assert_eq!( - parse_unit_header(rest, DebugInfoOffset(0).into()), - Ok(expected_unit) - ); - assert_eq!(*rest, EndianSlice::new(expected_rest, LittleEndian)); - } - - fn section_contents(f: F) -> Vec - where - F: Fn(Section) -> Section, - { - f(Section::with_endian(Endian::Little)) - .get_contents() - .unwrap() - } - - #[test] - fn test_attribute_value() { - let mut unit = test_parse_attribute_unit_default(); - let endian = unit.entries_buf.endian(); - - let block_data = &[1, 2, 3, 4]; - let buf = section_contents(|s| s.uleb(block_data.len() as u64).append_bytes(block_data)); - let block = EndianSlice::new(&buf, endian); - - let buf = section_contents(|s| s.L32(0x0102_0304)); - let data4 = EndianSlice::new(&buf, endian); - - let buf = section_contents(|s| s.L64(0x0102_0304_0506_0708)); - let data8 = EndianSlice::new(&buf, endian); - - let tests = [ - ( - Format::Dwarf32, - 2, - constants::DW_AT_data_member_location, - constants::DW_FORM_block, - block, - AttributeValue::Block(EndianSlice::new(block_data, endian)), - AttributeValue::Exprloc(Expression(EndianSlice::new(block_data, endian))), - ), - ( - Format::Dwarf32, - 2, - constants::DW_AT_data_member_location, - constants::DW_FORM_data4, - data4, - AttributeValue::SecOffset(0x0102_0304), - AttributeValue::LocationListsRef(LocationListsOffset(0x0102_0304)), - ), - ( - Format::Dwarf64, - 2, - constants::DW_AT_data_member_location, - constants::DW_FORM_data4, - data4, - AttributeValue::Data4(0x0102_0304), - AttributeValue::Udata(0x0102_0304), - ), - ( - Format::Dwarf32, - 4, - constants::DW_AT_data_member_location, - constants::DW_FORM_data4, - data4, - AttributeValue::Data4(0x0102_0304), - AttributeValue::Udata(0x0102_0304), - ), - ( - Format::Dwarf32, - 2, - constants::DW_AT_data_member_location, - constants::DW_FORM_data8, - data8, - AttributeValue::Data8(0x0102_0304_0506_0708), - AttributeValue::Udata(0x0102_0304_0506_0708), - ), - #[cfg(target_pointer_width = "64")] - ( - Format::Dwarf64, - 2, - constants::DW_AT_data_member_location, - constants::DW_FORM_data8, - data8, - AttributeValue::SecOffset(0x0102_0304_0506_0708), - AttributeValue::LocationListsRef(LocationListsOffset(0x0102_0304_0506_0708)), - ), - ( - Format::Dwarf64, - 4, - constants::DW_AT_data_member_location, - constants::DW_FORM_data8, - data8, - AttributeValue::Data8(0x0102_0304_0506_0708), - AttributeValue::Udata(0x0102_0304_0506_0708), - ), - ( - Format::Dwarf32, - 4, - constants::DW_AT_location, - constants::DW_FORM_data4, - data4, - AttributeValue::SecOffset(0x0102_0304), - AttributeValue::LocationListsRef(LocationListsOffset(0x0102_0304)), - ), - #[cfg(target_pointer_width = "64")] - ( - Format::Dwarf64, - 4, - constants::DW_AT_location, - constants::DW_FORM_data8, - data8, - AttributeValue::SecOffset(0x0102_0304_0506_0708), - AttributeValue::LocationListsRef(LocationListsOffset(0x0102_0304_0506_0708)), - ), - ( - Format::Dwarf32, - 4, - constants::DW_AT_str_offsets_base, - constants::DW_FORM_sec_offset, - data4, - AttributeValue::SecOffset(0x0102_0304), - AttributeValue::DebugStrOffsetsBase(DebugStrOffsetsBase(0x0102_0304)), - ), - ( - Format::Dwarf32, - 4, - constants::DW_AT_stmt_list, - constants::DW_FORM_sec_offset, - data4, - AttributeValue::SecOffset(0x0102_0304), - AttributeValue::DebugLineRef(DebugLineOffset(0x0102_0304)), - ), - ( - Format::Dwarf32, - 4, - constants::DW_AT_addr_base, - constants::DW_FORM_sec_offset, - data4, - AttributeValue::SecOffset(0x0102_0304), - AttributeValue::DebugAddrBase(DebugAddrBase(0x0102_0304)), - ), - ( - Format::Dwarf32, - 4, - constants::DW_AT_rnglists_base, - constants::DW_FORM_sec_offset, - data4, - AttributeValue::SecOffset(0x0102_0304), - AttributeValue::DebugRngListsBase(DebugRngListsBase(0x0102_0304)), - ), - ( - Format::Dwarf32, - 4, - constants::DW_AT_loclists_base, - constants::DW_FORM_sec_offset, - data4, - AttributeValue::SecOffset(0x0102_0304), - AttributeValue::DebugLocListsBase(DebugLocListsBase(0x0102_0304)), - ), - ]; - - for test in tests.iter() { - let (format, version, name, form, mut input, expect_raw, expect_value) = *test; - unit.encoding.format = format; - unit.encoding.version = version; - let spec = AttributeSpecification::new(name, form, None); - let attribute = - parse_attribute(&mut input, unit.encoding(), spec).expect("Should parse attribute"); - assert_eq!(attribute.raw_value(), expect_raw); - assert_eq!(attribute.value(), expect_value); - } - } - - #[test] - fn test_attribute_udata_sdata_value() { - let tests: &[( - AttributeValue>, - Option, - Option, - )] = &[ - (AttributeValue::Data1(1), Some(1), Some(1)), - ( - AttributeValue::Data1(core::u8::MAX), - Some(u64::from(std::u8::MAX)), - Some(-1), - ), - (AttributeValue::Data2(1), Some(1), Some(1)), - ( - AttributeValue::Data2(core::u16::MAX), - Some(u64::from(std::u16::MAX)), - Some(-1), - ), - (AttributeValue::Data4(1), Some(1), Some(1)), - ( - AttributeValue::Data4(core::u32::MAX), - Some(u64::from(std::u32::MAX)), - Some(-1), - ), - (AttributeValue::Data8(1), Some(1), Some(1)), - ( - AttributeValue::Data8(core::u64::MAX), - Some(core::u64::MAX), - Some(-1), - ), - (AttributeValue::Sdata(1), Some(1), Some(1)), - (AttributeValue::Sdata(-1), None, Some(-1)), - (AttributeValue::Udata(1), Some(1), Some(1)), - (AttributeValue::Udata(1u64 << 63), Some(1u64 << 63), None), - ]; - for test in tests.iter() { - let (value, expect_udata, expect_sdata) = *test; - let attribute = Attribute { - name: DW_AT_data_member_location, - value, - }; - assert_eq!(attribute.udata_value(), expect_udata); - assert_eq!(attribute.sdata_value(), expect_sdata); - } - } - - fn test_parse_attribute_unit( - address_size: u8, - format: Format, - endian: Endian, - ) -> UnitHeader> - where - Endian: Endianity, - { - let encoding = Encoding { - format, - version: 4, - address_size, - }; - UnitHeader::new( - encoding, - 7, - UnitType::Compilation, - DebugAbbrevOffset(0x0807_0605), - DebugInfoOffset(0).into(), - EndianSlice::new(&[], endian), - ) - } - - fn test_parse_attribute_unit_default() -> UnitHeader> { - test_parse_attribute_unit(4, Format::Dwarf32, LittleEndian) - } - - fn test_parse_attribute<'input, Endian>( - buf: &'input [u8], - len: usize, - unit: &UnitHeader>, - form: constants::DwForm, - value: AttributeValue>, - ) where - Endian: Endianity, - { - let spec = AttributeSpecification::new(constants::DW_AT_low_pc, form, None); - - let expect = Attribute { - name: constants::DW_AT_low_pc, - value, - }; - - let rest = &mut EndianSlice::new(buf, Endian::default()); - match parse_attribute(rest, unit.encoding(), spec) { - Ok(attr) => { - assert_eq!(attr, expect); - assert_eq!(*rest, EndianSlice::new(&buf[len..], Endian::default())); - if let Some(size) = spec.size(unit) { - assert_eq!(rest.len() + size, buf.len()); - } - } - otherwise => { - assert!(false, "Unexpected parse result = {:#?}", otherwise); - } - }; - } - - #[test] - fn test_parse_attribute_addr() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]; - let unit = test_parse_attribute_unit(4, Format::Dwarf32, LittleEndian); - let form = constants::DW_FORM_addr; - let value = AttributeValue::Addr(0x0403_0201); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - fn test_parse_attribute_addr8() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]; - let unit = test_parse_attribute_unit(8, Format::Dwarf32, LittleEndian); - let form = constants::DW_FORM_addr; - let value = AttributeValue::Addr(0x0807_0605_0403_0201); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_block1() { - // Length of data (3), three bytes of data, two bytes of left over input. - let buf = [0x03, 0x09, 0x09, 0x09, 0x00, 0x00]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_block1; - let value = AttributeValue::Block(EndianSlice::new(&buf[1..4], LittleEndian)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - fn test_parse_attribute_block2() { - // Two byte length of data (2), two bytes of data, two bytes of left over input. - let buf = [0x02, 0x00, 0x09, 0x09, 0x00, 0x00]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_block2; - let value = AttributeValue::Block(EndianSlice::new(&buf[2..4], LittleEndian)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - fn test_parse_attribute_block4() { - // Four byte length of data (2), two bytes of data, no left over input. - let buf = [0x02, 0x00, 0x00, 0x00, 0x99, 0x99]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_block4; - let value = AttributeValue::Block(EndianSlice::new(&buf[4..], LittleEndian)); - test_parse_attribute(&buf, 6, &unit, form, value); - } - - #[test] - fn test_parse_attribute_block() { - // LEB length of data (2, one byte), two bytes of data, no left over input. - let buf = [0x02, 0x99, 0x99]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_block; - let value = AttributeValue::Block(EndianSlice::new(&buf[1..], LittleEndian)); - test_parse_attribute(&buf, 3, &unit, form, value); - } - - #[test] - fn test_parse_attribute_data1() { - let buf = [0x03]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_data1; - let value = AttributeValue::Data1(0x03); - test_parse_attribute(&buf, 1, &unit, form, value); - } - - #[test] - fn test_parse_attribute_data2() { - let buf = [0x02, 0x01, 0x0]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_data2; - let value = AttributeValue::Data2(0x0102); - test_parse_attribute(&buf, 2, &unit, form, value); - } - - #[test] - fn test_parse_attribute_data4() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x99, 0x99]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_data4; - let value = AttributeValue::Data4(0x0403_0201); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - fn test_parse_attribute_data8() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_data8; - let value = AttributeValue::Data8(0x0807_0605_0403_0201); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_udata() { - let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - - let bytes_written = { - let mut writable = &mut buf[..]; - leb128::write::unsigned(&mut writable, 4097).expect("should write ok") - }; - - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_udata; - let value = AttributeValue::Udata(4097); - test_parse_attribute(&buf, bytes_written, &unit, form, value); - } - - #[test] - fn test_parse_attribute_sdata() { - let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - - let bytes_written = { - let mut writable = &mut buf[..]; - leb128::write::signed(&mut writable, -4097).expect("should write ok") - }; - - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_sdata; - let value = AttributeValue::Sdata(-4097); - test_parse_attribute(&buf, bytes_written, &unit, form, value); - } - - #[test] - fn test_parse_attribute_exprloc() { - // LEB length of data (2, one byte), two bytes of data, one byte left over input. - let buf = [0x02, 0x99, 0x99, 0x11]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_exprloc; - let value = AttributeValue::Exprloc(Expression(EndianSlice::new(&buf[1..3], LittleEndian))); - test_parse_attribute(&buf, 3, &unit, form, value); - } - - #[test] - fn test_parse_attribute_flag_true() { - let buf = [0x42]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_flag; - let value = AttributeValue::Flag(true); - test_parse_attribute(&buf, 1, &unit, form, value); - } - - #[test] - fn test_parse_attribute_flag_false() { - let buf = [0x00]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_flag; - let value = AttributeValue::Flag(false); - test_parse_attribute(&buf, 1, &unit, form, value); - } - - #[test] - fn test_parse_attribute_flag_present() { - let buf = [0x01, 0x02, 0x03, 0x04]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_flag_present; - let value = AttributeValue::Flag(true); - // DW_FORM_flag_present does not consume any bytes of the input stream. - test_parse_attribute(&buf, 0, &unit, form, value); - } - - #[test] - fn test_parse_attribute_sec_offset_32() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10]; - let unit = test_parse_attribute_unit(4, Format::Dwarf32, LittleEndian); - let form = constants::DW_FORM_sec_offset; - let value = AttributeValue::SecOffset(0x0403_0201); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_attribute_sec_offset_64() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_sec_offset; - let value = AttributeValue::SecOffset(0x0807_0605_0403_0201); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_ref1() { - let buf = [0x03]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_ref1; - let value = AttributeValue::UnitRef(UnitOffset(3)); - test_parse_attribute(&buf, 1, &unit, form, value); - } - - #[test] - fn test_parse_attribute_ref2() { - let buf = [0x02, 0x01, 0x0]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_ref2; - let value = AttributeValue::UnitRef(UnitOffset(258)); - test_parse_attribute(&buf, 2, &unit, form, value); - } - - #[test] - fn test_parse_attribute_ref4() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x99, 0x99]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_ref4; - let value = AttributeValue::UnitRef(UnitOffset(0x0403_0201)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_attribute_ref8() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_ref8; - let value = AttributeValue::UnitRef(UnitOffset(0x0807_0605_0403_0201)); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_ref_sup4() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x99, 0x99]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_ref_sup4; - let value = AttributeValue::DebugInfoRefSup(DebugInfoOffset(0x0403_0201)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_attribute_ref_sup8() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_ref_sup8; - let value = AttributeValue::DebugInfoRefSup(DebugInfoOffset(0x0807_0605_0403_0201)); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_refudata() { - let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - - let bytes_written = { - let mut writable = &mut buf[..]; - leb128::write::unsigned(&mut writable, 4097).expect("should write ok") - }; - - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_ref_udata; - let value = AttributeValue::UnitRef(UnitOffset(4097)); - test_parse_attribute(&buf, bytes_written, &unit, form, value); - } - - #[test] - fn test_parse_attribute_refaddr_32() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf32, LittleEndian); - let form = constants::DW_FORM_ref_addr; - let value = AttributeValue::DebugInfoRef(DebugInfoOffset(0x0403_0201)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_attribute_refaddr_64() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_ref_addr; - let value = AttributeValue::DebugInfoRef(DebugInfoOffset(0x0807_0605_0403_0201)); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_refaddr_version2() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let mut unit = test_parse_attribute_unit(4, Format::Dwarf32, LittleEndian); - unit.encoding.version = 2; - let form = constants::DW_FORM_ref_addr; - let value = AttributeValue::DebugInfoRef(DebugInfoOffset(0x0403_0201)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_attribute_refaddr8_version2() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let mut unit = test_parse_attribute_unit(8, Format::Dwarf32, LittleEndian); - unit.encoding.version = 2; - let form = constants::DW_FORM_ref_addr; - let value = AttributeValue::DebugInfoRef(DebugInfoOffset(0x0807_0605_0403_0201)); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_gnu_ref_alt_32() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf32, LittleEndian); - let form = constants::DW_FORM_GNU_ref_alt; - let value = AttributeValue::DebugInfoRefSup(DebugInfoOffset(0x0403_0201)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_attribute_gnu_ref_alt_64() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_GNU_ref_alt; - let value = AttributeValue::DebugInfoRefSup(DebugInfoOffset(0x0807_0605_0403_0201)); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_refsig8() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_ref_sig8; - let value = AttributeValue::DebugTypesRef(DebugTypeSignature(0x0807_0605_0403_0201)); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_string() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x0, 0x99, 0x99]; - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_string; - let value = AttributeValue::String(EndianSlice::new(&buf[..5], LittleEndian)); - test_parse_attribute(&buf, 6, &unit, form, value); - } - - #[test] - fn test_parse_attribute_strp_32() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf32, LittleEndian); - let form = constants::DW_FORM_strp; - let value = AttributeValue::DebugStrRef(DebugStrOffset(0x0403_0201)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_attribute_strp_64() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_strp; - let value = AttributeValue::DebugStrRef(DebugStrOffset(0x0807_0605_0403_0201)); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_strp_sup_32() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf32, LittleEndian); - let form = constants::DW_FORM_strp_sup; - let value = AttributeValue::DebugStrRefSup(DebugStrOffset(0x0403_0201)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_attribute_strp_sup_64() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_strp_sup; - let value = AttributeValue::DebugStrRefSup(DebugStrOffset(0x0807_0605_0403_0201)); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_gnu_strp_alt_32() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf32, LittleEndian); - let form = constants::DW_FORM_GNU_strp_alt; - let value = AttributeValue::DebugStrRefSup(DebugStrOffset(0x0403_0201)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - #[cfg(target_pointer_width = "64")] - fn test_parse_attribute_gnu_strp_alt_64() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_GNU_strp_alt; - let value = AttributeValue::DebugStrRefSup(DebugStrOffset(0x0807_0605_0403_0201)); - test_parse_attribute(&buf, 8, &unit, form, value); - } - - #[test] - fn test_parse_attribute_strx() { - let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - - let bytes_written = { - let mut writable = &mut buf[..]; - leb128::write::unsigned(&mut writable, 4097).expect("should write ok") - }; - - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_strx; - let value = AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(4097)); - test_parse_attribute(&buf, bytes_written, &unit, form, value); - } - - #[test] - fn test_parse_attribute_strx1() { - let buf = [0x01, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_strx1; - let value = AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(0x01)); - test_parse_attribute(&buf, 1, &unit, form, value); - } - - #[test] - fn test_parse_attribute_strx2() { - let buf = [0x01, 0x02, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_strx2; - let value = AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(0x0201)); - test_parse_attribute(&buf, 2, &unit, form, value); - } - - #[test] - fn test_parse_attribute_strx3() { - let buf = [0x01, 0x02, 0x03, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_strx3; - let value = AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(0x03_0201)); - test_parse_attribute(&buf, 3, &unit, form, value); - } - - #[test] - fn test_parse_attribute_strx4() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_strx4; - let value = AttributeValue::DebugStrOffsetsIndex(DebugStrOffsetsIndex(0x0403_0201)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - fn test_parse_attribute_addrx() { - let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - - let bytes_written = { - let mut writable = &mut buf[..]; - leb128::write::unsigned(&mut writable, 4097).expect("should write ok") - }; - - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_addrx; - let value = AttributeValue::DebugAddrIndex(DebugAddrIndex(4097)); - test_parse_attribute(&buf, bytes_written, &unit, form, value); - } - - #[test] - fn test_parse_attribute_addrx1() { - let buf = [0x01, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_addrx1; - let value = AttributeValue::DebugAddrIndex(DebugAddrIndex(0x01)); - test_parse_attribute(&buf, 1, &unit, form, value); - } - - #[test] - fn test_parse_attribute_addrx2() { - let buf = [0x01, 0x02, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_addrx2; - let value = AttributeValue::DebugAddrIndex(DebugAddrIndex(0x0201)); - test_parse_attribute(&buf, 2, &unit, form, value); - } - - #[test] - fn test_parse_attribute_addrx3() { - let buf = [0x01, 0x02, 0x03, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_addrx3; - let value = AttributeValue::DebugAddrIndex(DebugAddrIndex(0x03_0201)); - test_parse_attribute(&buf, 3, &unit, form, value); - } - - #[test] - fn test_parse_attribute_addrx4() { - let buf = [0x01, 0x02, 0x03, 0x04, 0x99, 0x99]; - let unit = test_parse_attribute_unit(4, Format::Dwarf64, LittleEndian); - let form = constants::DW_FORM_addrx4; - let value = AttributeValue::DebugAddrIndex(DebugAddrIndex(0x0403_0201)); - test_parse_attribute(&buf, 4, &unit, form, value); - } - - #[test] - fn test_parse_attribute_loclistx() { - let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - - let bytes_written = { - let mut writable = &mut buf[..]; - leb128::write::unsigned(&mut writable, 4097).expect("should write ok") - }; - - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_loclistx; - let value = AttributeValue::DebugLocListsIndex(DebugLocListsIndex(4097)); - test_parse_attribute(&buf, bytes_written, &unit, form, value); - } - - #[test] - fn test_parse_attribute_rnglistx() { - let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; - - let bytes_written = { - let mut writable = &mut buf[..]; - leb128::write::unsigned(&mut writable, 4097).expect("should write ok") - }; - - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_rnglistx; - let value = AttributeValue::DebugRngListsIndex(DebugRngListsIndex(4097)); - test_parse_attribute(&buf, bytes_written, &unit, form, value); - } - - #[test] - fn test_parse_attribute_indirect() { - let mut buf = [0; 100]; - - let bytes_written = { - let mut writable = &mut buf[..]; - leb128::write::unsigned(&mut writable, constants::DW_FORM_udata.0.into()) - .expect("should write udata") - + leb128::write::unsigned(&mut writable, 9_999_999).expect("should write value") - }; - - let unit = test_parse_attribute_unit_default(); - let form = constants::DW_FORM_indirect; - let value = AttributeValue::Udata(9_999_999); - test_parse_attribute(&buf, bytes_written, &unit, form, value); - } - - #[test] - fn test_parse_attribute_indirect_implicit_const() { - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut buf = [0; 100]; - let mut writable = &mut buf[..]; - leb128::write::unsigned(&mut writable, constants::DW_FORM_implicit_const.0.into()) - .expect("should write implicit_const"); - - let input = &mut EndianSlice::new(&buf, LittleEndian); - let spec = - AttributeSpecification::new(constants::DW_AT_low_pc, constants::DW_FORM_indirect, None); - assert_eq!( - parse_attribute(input, encoding, spec), - Err(Error::InvalidImplicitConst) - ); - } - - #[test] - fn test_attrs_iter() { - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let unit = UnitHeader::new( - encoding, - 7, - UnitType::Compilation, - DebugAbbrevOffset(0x0807_0605), - DebugInfoOffset(0).into(), - EndianSlice::new(&[], LittleEndian), - ); - - let abbrev = Abbreviation::new( - 42, - constants::DW_TAG_subprogram, - constants::DW_CHILDREN_yes, - vec![ - AttributeSpecification::new(constants::DW_AT_name, constants::DW_FORM_string, None), - AttributeSpecification::new(constants::DW_AT_low_pc, constants::DW_FORM_addr, None), - AttributeSpecification::new( - constants::DW_AT_high_pc, - constants::DW_FORM_addr, - None, - ), - ] - .into(), - ); - - // "foo", 42, 1337, 4 dangling bytes of 0xaa where children would be - let buf = [ - 0x66, 0x6f, 0x6f, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x39, 0x05, 0x00, 0x00, 0xaa, 0xaa, - 0xaa, 0xaa, - ]; - - let entry = DebuggingInformationEntry { - offset: UnitOffset(0), - attrs_slice: EndianSlice::new(&buf, LittleEndian), - attrs_len: Cell::new(None), - abbrev: &abbrev, - unit: &unit, - }; - - let mut attrs = AttrsIter { - input: EndianSlice::new(&buf, LittleEndian), - attributes: abbrev.attributes(), - entry: &entry, - }; - - match attrs.next() { - Ok(Some(attr)) => { - assert_eq!( - attr, - Attribute { - name: constants::DW_AT_name, - value: AttributeValue::String(EndianSlice::new(b"foo", LittleEndian)), - } - ); - } - otherwise => { - assert!(false, "Unexpected parse result = {:#?}", otherwise); - } - } - - assert!(entry.attrs_len.get().is_none()); - - match attrs.next() { - Ok(Some(attr)) => { - assert_eq!( - attr, - Attribute { - name: constants::DW_AT_low_pc, - value: AttributeValue::Addr(0x2a), - } - ); - } - otherwise => { - assert!(false, "Unexpected parse result = {:#?}", otherwise); - } - } - - assert!(entry.attrs_len.get().is_none()); - - match attrs.next() { - Ok(Some(attr)) => { - assert_eq!( - attr, - Attribute { - name: constants::DW_AT_high_pc, - value: AttributeValue::Addr(0x539), - } - ); - } - otherwise => { - assert!(false, "Unexpected parse result = {:#?}", otherwise); - } - } - - assert!(entry.attrs_len.get().is_none()); - - assert!(attrs.next().expect("should parse next").is_none()); - assert!(entry.attrs_len.get().is_some()); - assert_eq!( - entry.attrs_len.get().expect("should have entry.attrs_len"), - buf.len() - 4 - ) - } - - #[test] - fn test_attrs_iter_incomplete() { - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let unit = UnitHeader::new( - encoding, - 7, - UnitType::Compilation, - DebugAbbrevOffset(0x0807_0605), - DebugInfoOffset(0).into(), - EndianSlice::new(&[], LittleEndian), - ); - - let abbrev = Abbreviation::new( - 42, - constants::DW_TAG_subprogram, - constants::DW_CHILDREN_yes, - vec![ - AttributeSpecification::new(constants::DW_AT_name, constants::DW_FORM_string, None), - AttributeSpecification::new(constants::DW_AT_low_pc, constants::DW_FORM_addr, None), - AttributeSpecification::new( - constants::DW_AT_high_pc, - constants::DW_FORM_addr, - None, - ), - ] - .into(), - ); - - // "foo" - let buf = [0x66, 0x6f, 0x6f, 0x00]; - - let entry = DebuggingInformationEntry { - offset: UnitOffset(0), - attrs_slice: EndianSlice::new(&buf, LittleEndian), - attrs_len: Cell::new(None), - abbrev: &abbrev, - unit: &unit, - }; - - let mut attrs = AttrsIter { - input: EndianSlice::new(&buf, LittleEndian), - attributes: abbrev.attributes(), - entry: &entry, - }; - - match attrs.next() { - Ok(Some(attr)) => { - assert_eq!( - attr, - Attribute { - name: constants::DW_AT_name, - value: AttributeValue::String(EndianSlice::new(b"foo", LittleEndian)), - } - ); - } - otherwise => { - assert!(false, "Unexpected parse result = {:#?}", otherwise); - } - } - - assert!(entry.attrs_len.get().is_none()); - - // Return error for incomplete attribute. - assert!(attrs.next().is_err()); - assert!(entry.attrs_len.get().is_none()); - - // Return error for all subsequent calls. - assert!(attrs.next().is_err()); - assert!(attrs.next().is_err()); - assert!(attrs.next().is_err()); - assert!(attrs.next().is_err()); - assert!(entry.attrs_len.get().is_none()); - } - - fn assert_entry_name(entry: &DebuggingInformationEntry>, name: &str) - where - Endian: Endianity, - { - let value = entry - .attr_value(constants::DW_AT_name) - .expect("Should have parsed the name attribute") - .expect("Should have found the name attribute"); - - assert_eq!( - value, - AttributeValue::String(EndianSlice::new(name.as_bytes(), Endian::default())) - ); - } - - fn assert_current_name(cursor: &EntriesCursor>, name: &str) - where - Endian: Endianity, - { - let entry = cursor.current().expect("Should have an entry result"); - assert_entry_name(entry, name); - } - - fn assert_next_entry(cursor: &mut EntriesCursor>, name: &str) - where - Endian: Endianity, - { - cursor - .next_entry() - .expect("Should parse next entry") - .expect("Should have an entry"); - assert_current_name(cursor, name); - } - - fn assert_next_entry_null(cursor: &mut EntriesCursor>) - where - Endian: Endianity, - { - cursor - .next_entry() - .expect("Should parse next entry") - .expect("Should have an entry"); - assert!(cursor.current().is_none()); - } - - fn assert_next_dfs( - cursor: &mut EntriesCursor>, - name: &str, - depth: isize, - ) where - Endian: Endianity, - { - { - let (val, entry) = cursor - .next_dfs() - .expect("Should parse next dfs") - .expect("Should not be done with traversal"); - assert_eq!(val, depth); - assert_entry_name(entry, name); - } - assert_current_name(cursor, name); - } - - fn assert_next_sibling(cursor: &mut EntriesCursor>, name: &str) - where - Endian: Endianity, - { - { - let entry = cursor - .next_sibling() - .expect("Should parse next sibling") - .expect("Should not be done with traversal"); - assert_entry_name(entry, name); - } - assert_current_name(cursor, name); - } - - fn assert_valid_sibling_ptr(cursor: &EntriesCursor>) - where - Endian: Endianity, - { - let sibling_ptr = cursor - .current() - .expect("Should have current entry") - .attr_value(constants::DW_AT_sibling); - match sibling_ptr { - Ok(Some(AttributeValue::UnitRef(offset))) => { - cursor - .unit - .range_from(offset..) - .expect("Sibling offset should be valid"); - } - _ => panic!("Invalid sibling pointer {:?}", sibling_ptr), - } - } - - fn entries_cursor_tests_abbrev_buf() -> Vec { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - .abbrev(1, DW_TAG_subprogram, DW_CHILDREN_yes) - .abbrev_attr(DW_AT_name, DW_FORM_string) - .abbrev_attr_null() - .abbrev_null(); - section.get_contents().unwrap() - } - - fn entries_cursor_tests_debug_info_buf() -> Vec { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - .die(1, |s| s.attr_string("001")) - .die(1, |s| s.attr_string("002")) - .die(1, |s| s.attr_string("003")) - .die_null() - .die_null() - .die(1, |s| s.attr_string("004")) - .die(1, |s| s.attr_string("005")) - .die_null() - .die(1, |s| s.attr_string("006")) - .die_null() - .die_null() - .die(1, |s| s.attr_string("007")) - .die(1, |s| s.attr_string("008")) - .die(1, |s| s.attr_string("009")) - .die_null() - .die_null() - .die_null() - .die(1, |s| s.attr_string("010")) - .die_null() - .die_null(); - let entries_buf = section.get_contents().unwrap(); - - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(&entries_buf, LittleEndian), - }; - let section = Section::with_endian(Endian::Little).unit(&mut unit); - section.get_contents().unwrap() - } - - #[test] - fn test_cursor_next_entry_incomplete() { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - .die(1, |s| s.attr_string("001")) - .die(1, |s| s.attr_string("002")) - .die(1, |s| s); - let entries_buf = section.get_contents().unwrap(); - - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(&entries_buf, LittleEndian), - }; - let section = Section::with_endian(Endian::Little).unit(&mut unit); - let info_buf = §ion.get_contents().unwrap(); - let debug_info = DebugInfo::new(info_buf, LittleEndian); - - let unit = debug_info - .units() - .next() - .expect("should have a unit result") - .expect("and it should be ok"); - - let abbrevs_buf = &entries_cursor_tests_abbrev_buf(); - let debug_abbrev = DebugAbbrev::new(abbrevs_buf, LittleEndian); - - let abbrevs = unit - .abbreviations(&debug_abbrev) - .expect("Should parse abbreviations"); - - let mut cursor = unit.entries(&abbrevs); - - assert_next_entry(&mut cursor, "001"); - assert_next_entry(&mut cursor, "002"); - - { - // Entry code is present, but none of the attributes. - cursor - .next_entry() - .expect("Should parse next entry") - .expect("Should have an entry"); - let entry = cursor.current().expect("Should have an entry result"); - assert!(entry.attrs().next().is_err()); - } - - assert!(cursor.next_entry().is_err()); - assert!(cursor.next_entry().is_err()); - } - - #[test] - fn test_cursor_next_entry() { - let info_buf = &entries_cursor_tests_debug_info_buf(); - let debug_info = DebugInfo::new(info_buf, LittleEndian); - - let unit = debug_info - .units() - .next() - .expect("should have a unit result") - .expect("and it should be ok"); - - let abbrevs_buf = &entries_cursor_tests_abbrev_buf(); - let debug_abbrev = DebugAbbrev::new(abbrevs_buf, LittleEndian); - - let abbrevs = unit - .abbreviations(&debug_abbrev) - .expect("Should parse abbreviations"); - - let mut cursor = unit.entries(&abbrevs); - - assert_next_entry(&mut cursor, "001"); - assert_next_entry(&mut cursor, "002"); - assert_next_entry(&mut cursor, "003"); - assert_next_entry_null(&mut cursor); - assert_next_entry_null(&mut cursor); - assert_next_entry(&mut cursor, "004"); - assert_next_entry(&mut cursor, "005"); - assert_next_entry_null(&mut cursor); - assert_next_entry(&mut cursor, "006"); - assert_next_entry_null(&mut cursor); - assert_next_entry_null(&mut cursor); - assert_next_entry(&mut cursor, "007"); - assert_next_entry(&mut cursor, "008"); - assert_next_entry(&mut cursor, "009"); - assert_next_entry_null(&mut cursor); - assert_next_entry_null(&mut cursor); - assert_next_entry_null(&mut cursor); - assert_next_entry(&mut cursor, "010"); - assert_next_entry_null(&mut cursor); - assert_next_entry_null(&mut cursor); - - assert!(cursor - .next_entry() - .expect("Should parse next entry") - .is_none()); - assert!(cursor.current().is_none()); - } - - #[test] - fn test_cursor_next_dfs() { - let info_buf = &entries_cursor_tests_debug_info_buf(); - let debug_info = DebugInfo::new(info_buf, LittleEndian); - - let unit = debug_info - .units() - .next() - .expect("should have a unit result") - .expect("and it should be ok"); - - let abbrevs_buf = &entries_cursor_tests_abbrev_buf(); - let debug_abbrev = DebugAbbrev::new(abbrevs_buf, LittleEndian); - - let abbrevs = unit - .abbreviations(&debug_abbrev) - .expect("Should parse abbreviations"); - - let mut cursor = unit.entries(&abbrevs); - - assert_next_dfs(&mut cursor, "001", 0); - assert_next_dfs(&mut cursor, "002", 1); - assert_next_dfs(&mut cursor, "003", 1); - assert_next_dfs(&mut cursor, "004", -1); - assert_next_dfs(&mut cursor, "005", 1); - assert_next_dfs(&mut cursor, "006", 0); - assert_next_dfs(&mut cursor, "007", -1); - assert_next_dfs(&mut cursor, "008", 1); - assert_next_dfs(&mut cursor, "009", 1); - assert_next_dfs(&mut cursor, "010", -2); - - assert!(cursor.next_dfs().expect("Should parse next dfs").is_none()); - assert!(cursor.current().is_none()); - } - - #[test] - fn test_cursor_next_sibling_no_sibling_ptr() { - let info_buf = &entries_cursor_tests_debug_info_buf(); - let debug_info = DebugInfo::new(info_buf, LittleEndian); - - let unit = debug_info - .units() - .next() - .expect("should have a unit result") - .expect("and it should be ok"); - - let abbrevs_buf = &entries_cursor_tests_abbrev_buf(); - let debug_abbrev = DebugAbbrev::new(abbrevs_buf, LittleEndian); - - let abbrevs = unit - .abbreviations(&debug_abbrev) - .expect("Should parse abbreviations"); - - let mut cursor = unit.entries(&abbrevs); - - assert_next_dfs(&mut cursor, "001", 0); - - // Down to the first child of the root entry. - - assert_next_dfs(&mut cursor, "002", 1); - - // Now iterate all children of the root via `next_sibling`. - - assert_next_sibling(&mut cursor, "004"); - assert_next_sibling(&mut cursor, "007"); - assert_next_sibling(&mut cursor, "010"); - - // There should be no more siblings. - - assert!(cursor - .next_sibling() - .expect("Should parse next sibling") - .is_none()); - assert!(cursor.current().is_none()); - } - - #[test] - fn test_cursor_next_sibling_continuation() { - let info_buf = &entries_cursor_tests_debug_info_buf(); - let debug_info = DebugInfo::new(info_buf, LittleEndian); - - let unit = debug_info - .units() - .next() - .expect("should have a unit result") - .expect("and it should be ok"); - - let abbrevs_buf = &entries_cursor_tests_abbrev_buf(); - let debug_abbrev = DebugAbbrev::new(abbrevs_buf, LittleEndian); - - let abbrevs = unit - .abbreviations(&debug_abbrev) - .expect("Should parse abbreviations"); - - let mut cursor = unit.entries(&abbrevs); - - assert_next_dfs(&mut cursor, "001", 0); - - // Down to the first child of the root entry. - - assert_next_dfs(&mut cursor, "002", 1); - - // Get the next sibling, then iterate its children - - assert_next_sibling(&mut cursor, "004"); - assert_next_dfs(&mut cursor, "005", 1); - assert_next_sibling(&mut cursor, "006"); - assert!(cursor - .next_sibling() - .expect("Should parse next sibling") - .is_none()); - assert!(cursor - .next_sibling() - .expect("Should parse next sibling") - .is_none()); - assert!(cursor - .next_sibling() - .expect("Should parse next sibling") - .is_none()); - assert!(cursor - .next_sibling() - .expect("Should parse next sibling") - .is_none()); - - // And we should be able to continue with the children of the root entry. - - assert_next_dfs(&mut cursor, "007", -1); - assert_next_sibling(&mut cursor, "010"); - - // There should be no more siblings. - - assert!(cursor - .next_sibling() - .expect("Should parse next sibling") - .is_none()); - assert!(cursor.current().is_none()); - } - - fn entries_cursor_sibling_abbrev_buf() -> Vec { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - .abbrev(1, DW_TAG_subprogram, DW_CHILDREN_yes) - .abbrev_attr(DW_AT_name, DW_FORM_string) - .abbrev_attr(DW_AT_sibling, DW_FORM_ref1) - .abbrev_attr_null() - .abbrev(2, DW_TAG_subprogram, DW_CHILDREN_yes) - .abbrev_attr(DW_AT_name, DW_FORM_string) - .abbrev_attr_null() - .abbrev_null(); - section.get_contents().unwrap() - } - - fn entries_cursor_sibling_entries_buf(header_size: usize) -> Vec { - let start = Label::new(); - let sibling004_ref = Label::new(); - let sibling004 = Label::new(); - let sibling009_ref = Label::new(); - let sibling009 = Label::new(); - - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - .mark(&start) - .die(2, |s| s.attr_string("001")) - // Valid sibling attribute. - .die(1, |s| s.attr_string("002").D8(&sibling004_ref)) - // Invalid code to ensure the sibling attribute was used. - .die(10, |s| s.attr_string("003")) - .die_null() - .die_null() - .mark(&sibling004) - // Invalid sibling attribute. - .die(1, |s| s.attr_string("004").attr_ref1(255)) - .die(2, |s| s.attr_string("005")) - .die_null() - .die_null() - // Sibling attribute in child only. - .die(2, |s| s.attr_string("006")) - // Valid sibling attribute. - .die(1, |s| s.attr_string("007").D8(&sibling009_ref)) - // Invalid code to ensure the sibling attribute was used. - .die(10, |s| s.attr_string("008")) - .die_null() - .die_null() - .mark(&sibling009) - .die(2, |s| s.attr_string("009")) - .die_null() - .die_null() - // No sibling attribute. - .die(2, |s| s.attr_string("010")) - .die(2, |s| s.attr_string("011")) - .die_null() - .die_null() - .die_null(); - - let offset = header_size as u64 + (&sibling004 - &start) as u64; - sibling004_ref.set_const(offset); - - let offset = header_size as u64 + (&sibling009 - &start) as u64; - sibling009_ref.set_const(offset); - - section.get_contents().unwrap() - } - - fn test_cursor_next_sibling_with_ptr(cursor: &mut EntriesCursor>) { - assert_next_dfs(cursor, "001", 0); - - // Down to the first child of the root. - - assert_next_dfs(cursor, "002", 1); - - // Now iterate all children of the root via `next_sibling`. - - assert_valid_sibling_ptr(&cursor); - assert_next_sibling(cursor, "004"); - assert_next_sibling(cursor, "006"); - assert_next_sibling(cursor, "010"); - - // There should be no more siblings. - - assert!(cursor - .next_sibling() - .expect("Should parse next sibling") - .is_none()); - assert!(cursor.current().is_none()); - } - - #[test] - fn test_debug_info_next_sibling_with_ptr() { - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - - let mut unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(&[], LittleEndian), - }; - let header_size = unit.size_of_header(); - let entries_buf = entries_cursor_sibling_entries_buf(header_size); - unit.entries_buf = EndianSlice::new(&entries_buf, LittleEndian); - let section = Section::with_endian(Endian::Little).unit(&mut unit); - let info_buf = section.get_contents().unwrap(); - let debug_info = DebugInfo::new(&info_buf, LittleEndian); - - let unit = debug_info - .units() - .next() - .expect("should have a unit result") - .expect("and it should be ok"); - - let abbrev_buf = entries_cursor_sibling_abbrev_buf(); - let debug_abbrev = DebugAbbrev::new(&abbrev_buf, LittleEndian); - - let abbrevs = unit - .abbreviations(&debug_abbrev) - .expect("Should parse abbreviations"); - - let mut cursor = unit.entries(&abbrevs); - test_cursor_next_sibling_with_ptr(&mut cursor); - } - - #[test] - fn test_debug_types_next_sibling_with_ptr() { - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Type { - type_signature: DebugTypeSignature(0), - type_offset: UnitOffset(0), - }, - debug_abbrev_offset: DebugAbbrevOffset(0), - unit_offset: DebugTypesOffset(0).into(), - entries_buf: EndianSlice::new(&[], LittleEndian), - }; - let header_size = unit.size_of_header(); - let entries_buf = entries_cursor_sibling_entries_buf(header_size); - unit.entries_buf = EndianSlice::new(&entries_buf, LittleEndian); - let section = Section::with_endian(Endian::Little).unit(&mut unit); - let info_buf = section.get_contents().unwrap(); - let debug_types = DebugTypes::new(&info_buf, LittleEndian); - - let unit = debug_types - .units() - .next() - .expect("should have a unit result") - .expect("and it should be ok"); - - let abbrev_buf = entries_cursor_sibling_abbrev_buf(); - let debug_abbrev = DebugAbbrev::new(&abbrev_buf, LittleEndian); - - let abbrevs = unit - .abbreviations(&debug_abbrev) - .expect("Should parse abbreviations"); - - let mut cursor = unit.entries(&abbrevs); - test_cursor_next_sibling_with_ptr(&mut cursor); - } - - #[test] - fn test_entries_at_offset() { - let info_buf = &entries_cursor_tests_debug_info_buf(); - let debug_info = DebugInfo::new(info_buf, LittleEndian); - - let unit = debug_info - .units() - .next() - .expect("should have a unit result") - .expect("and it should be ok"); - - let abbrevs_buf = &entries_cursor_tests_abbrev_buf(); - let debug_abbrev = DebugAbbrev::new(abbrevs_buf, LittleEndian); - - let abbrevs = unit - .abbreviations(&debug_abbrev) - .expect("Should parse abbreviations"); - - let mut cursor = unit - .entries_at_offset(&abbrevs, UnitOffset(unit.header_size())) - .unwrap(); - assert_next_entry(&mut cursor, "001"); - - let cursor = unit.entries_at_offset(&abbrevs, UnitOffset(0)); - match cursor { - Err(Error::OffsetOutOfBounds) => {} - otherwise => { - assert!(false, "Unexpected parse result = {:#?}", otherwise); - } - } - } - - fn entries_tree_tests_debug_abbrevs_buf() -> Vec { - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - .abbrev(1, DW_TAG_subprogram, DW_CHILDREN_yes) - .abbrev_attr(DW_AT_name, DW_FORM_string) - .abbrev_attr_null() - .abbrev(2, DW_TAG_subprogram, DW_CHILDREN_no) - .abbrev_attr(DW_AT_name, DW_FORM_string) - .abbrev_attr_null() - .abbrev_null() - .get_contents() - .unwrap(); - section - } - - fn entries_tree_tests_debug_info_buf(header_size: usize) -> (Vec, UnitOffset) { - let start = Label::new(); - let entry2 = Label::new(); - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - .mark(&start) - .die(1, |s| s.attr_string("root")) - .die(1, |s| s.attr_string("1")) - .die(1, |s| s.attr_string("1a")) - .die_null() - .die(2, |s| s.attr_string("1b")) - .die_null() - .mark(&entry2) - .die(1, |s| s.attr_string("2")) - .die(1, |s| s.attr_string("2a")) - .die(1, |s| s.attr_string("2a1")) - .die_null() - .die_null() - .die(1, |s| s.attr_string("2b")) - .die(2, |s| s.attr_string("2b1")) - .die_null() - .die_null() - .die(1, |s| s.attr_string("3")) - .die(1, |s| s.attr_string("3a")) - .die(2, |s| s.attr_string("3a1")) - .die(2, |s| s.attr_string("3a2")) - .die_null() - .die(2, |s| s.attr_string("3b")) - .die_null() - .die(2, |s| s.attr_string("final")) - .die_null() - .get_contents() - .unwrap(); - let entry2 = UnitOffset(header_size + (&entry2 - &start) as usize); - (section, entry2) - } - - #[test] - fn test_entries_tree() { - fn assert_entry<'input, 'abbrev, 'unit, 'tree, Endian>( - node: Result< - Option>>, - >, - name: &str, - ) -> EntriesTreeIter<'abbrev, 'unit, 'tree, EndianSlice<'input, Endian>> - where - Endian: Endianity, - { - let node = node - .expect("Should parse entry") - .expect("Should have entry"); - assert_entry_name(node.entry(), name); - node.children() - } - - fn assert_null(node: Result>>>) { - match node { - Ok(None) => {} - otherwise => { - assert!(false, "Unexpected parse result = {:#?}", otherwise); - } - } - } - - let abbrevs_buf = entries_tree_tests_debug_abbrevs_buf(); - let debug_abbrev = DebugAbbrev::new(&abbrevs_buf, LittleEndian); - - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(&[], LittleEndian), - }; - let header_size = unit.size_of_header(); - let (entries_buf, entry2) = entries_tree_tests_debug_info_buf(header_size); - unit.entries_buf = EndianSlice::new(&entries_buf, LittleEndian); - let info_buf = Section::with_endian(Endian::Little) - .unit(&mut unit) - .get_contents() - .unwrap(); - let debug_info = DebugInfo::new(&info_buf, LittleEndian); - - let unit = debug_info - .units() - .next() - .expect("Should parse unit") - .expect("and it should be some"); - let abbrevs = unit - .abbreviations(&debug_abbrev) - .expect("Should parse abbreviations"); - let mut tree = unit - .entries_tree(&abbrevs, None) - .expect("Should have entries tree"); - - // Test we can restart iteration of the tree. - { - let mut iter = assert_entry(tree.root().map(Some), "root"); - assert_entry(iter.next(), "1"); - } - { - let mut iter = assert_entry(tree.root().map(Some), "root"); - assert_entry(iter.next(), "1"); - } - - let mut iter = assert_entry(tree.root().map(Some), "root"); - { - // Test iteration with children. - let mut iter = assert_entry(iter.next(), "1"); - { - // Test iteration with children flag, but no children. - let mut iter = assert_entry(iter.next(), "1a"); - assert_null(iter.next()); - assert_null(iter.next()); - } - { - // Test iteration without children flag. - let mut iter = assert_entry(iter.next(), "1b"); - assert_null(iter.next()); - assert_null(iter.next()); - } - assert_null(iter.next()); - assert_null(iter.next()); - } - { - // Test skipping over children. - let mut iter = assert_entry(iter.next(), "2"); - assert_entry(iter.next(), "2a"); - assert_entry(iter.next(), "2b"); - assert_null(iter.next()); - } - { - // Test skipping after partial iteration. - let mut iter = assert_entry(iter.next(), "3"); - { - let mut iter = assert_entry(iter.next(), "3a"); - assert_entry(iter.next(), "3a1"); - // Parent iter should be able to skip over "3a2". - } - assert_entry(iter.next(), "3b"); - assert_null(iter.next()); - } - assert_entry(iter.next(), "final"); - assert_null(iter.next()); - - // Test starting at an offset. - let mut tree = unit - .entries_tree(&abbrevs, Some(entry2)) - .expect("Should have entries tree"); - let mut iter = assert_entry(tree.root().map(Some), "2"); - assert_entry(iter.next(), "2a"); - assert_entry(iter.next(), "2b"); - assert_null(iter.next()); - } - - #[test] - fn test_entries_raw() { - fn assert_abbrev<'input, 'abbrev, 'unit, Endian>( - entries: &mut EntriesRaw<'abbrev, 'unit, EndianSlice<'input, Endian>>, - tag: DwTag, - ) -> &'abbrev Abbreviation - where - Endian: Endianity, - { - let abbrev = entries - .read_abbreviation() - .expect("Should parse abbrev") - .expect("Should have abbrev"); - assert_eq!(abbrev.tag(), tag); - abbrev - } - - fn assert_null<'input, 'abbrev, 'unit, Endian>( - entries: &mut EntriesRaw<'abbrev, 'unit, EndianSlice<'input, Endian>>, - ) where - Endian: Endianity, - { - match entries.read_abbreviation() { - Ok(None) => {} - otherwise => { - assert!(false, "Unexpected parse result = {:#?}", otherwise); - } - } - } - - fn assert_attr<'input, 'abbrev, 'unit, Endian>( - entries: &mut EntriesRaw<'abbrev, 'unit, EndianSlice<'input, Endian>>, - spec: Option, - name: DwAt, - value: &str, - ) where - Endian: Endianity, - { - let spec = spec.expect("Should have attribute specification"); - let attr = entries - .read_attribute(spec) - .expect("Should parse attribute"); - assert_eq!(attr.name(), name); - assert_eq!( - attr.value(), - AttributeValue::String(EndianSlice::new(value.as_bytes(), Endian::default())) - ); - } - - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - .abbrev(1, DW_TAG_subprogram, DW_CHILDREN_yes) - .abbrev_attr(DW_AT_name, DW_FORM_string) - .abbrev_attr(DW_AT_linkage_name, DW_FORM_string) - .abbrev_attr_null() - .abbrev(2, DW_TAG_variable, DW_CHILDREN_no) - .abbrev_attr(DW_AT_name, DW_FORM_string) - .abbrev_attr_null() - .abbrev_null(); - let abbrevs_buf = section.get_contents().unwrap(); - let debug_abbrev = DebugAbbrev::new(&abbrevs_buf, LittleEndian); - - #[rustfmt::skip] - let section = Section::with_endian(Endian::Little) - .die(1, |s| s.attr_string("f1").attr_string("l1")) - .die(2, |s| s.attr_string("v1")) - .die(2, |s| s.attr_string("v2")) - .die(1, |s| s.attr_string("f2").attr_string("l2")) - .die_null() - .die_null(); - let entries_buf = section.get_contents().unwrap(); - - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(&entries_buf, LittleEndian), - }; - let section = Section::with_endian(Endian::Little).unit(&mut unit); - let info_buf = section.get_contents().unwrap(); - let debug_info = DebugInfo::new(&info_buf, LittleEndian); - - let unit = debug_info - .units() - .next() - .expect("should have a unit result") - .expect("and it should be ok"); - - let abbrevs = unit - .abbreviations(&debug_abbrev) - .expect("Should parse abbreviations"); - - let mut entries = unit - .entries_raw(&abbrevs, None) - .expect("Should have entries"); - - assert_eq!(entries.next_depth(), 0); - let abbrev = assert_abbrev(&mut entries, DW_TAG_subprogram); - let mut attrs = abbrev.attributes().iter().copied(); - assert_attr(&mut entries, attrs.next(), DW_AT_name, "f1"); - assert_attr(&mut entries, attrs.next(), DW_AT_linkage_name, "l1"); - assert!(attrs.next().is_none()); - - assert_eq!(entries.next_depth(), 1); - let abbrev = assert_abbrev(&mut entries, DW_TAG_variable); - let mut attrs = abbrev.attributes().iter().copied(); - assert_attr(&mut entries, attrs.next(), DW_AT_name, "v1"); - assert!(attrs.next().is_none()); - - assert_eq!(entries.next_depth(), 1); - let abbrev = assert_abbrev(&mut entries, DW_TAG_variable); - let mut attrs = abbrev.attributes().iter().copied(); - assert_attr(&mut entries, attrs.next(), DW_AT_name, "v2"); - assert!(attrs.next().is_none()); - - assert_eq!(entries.next_depth(), 1); - let abbrev = assert_abbrev(&mut entries, DW_TAG_subprogram); - let mut attrs = abbrev.attributes().iter().copied(); - assert_attr(&mut entries, attrs.next(), DW_AT_name, "f2"); - assert_attr(&mut entries, attrs.next(), DW_AT_linkage_name, "l2"); - assert!(attrs.next().is_none()); - - assert_eq!(entries.next_depth(), 2); - assert_null(&mut entries); - - assert_eq!(entries.next_depth(), 1); - assert_null(&mut entries); - - assert_eq!(entries.next_depth(), 0); - assert!(entries.is_empty()); - } - - #[test] - fn test_debug_info_offset() { - let padding = &[0; 10]; - let entries = &[0; 20]; - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(entries, LittleEndian), - }; - Section::with_endian(Endian::Little) - .append_bytes(padding) - .unit(&mut unit); - let offset = padding.len(); - let header_length = unit.size_of_header(); - let length = unit.length_including_self(); - assert_eq!(DebugInfoOffset(0).to_unit_offset(&unit), None); - assert_eq!(DebugInfoOffset(offset - 1).to_unit_offset(&unit), None); - assert_eq!(DebugInfoOffset(offset).to_unit_offset(&unit), None); - assert_eq!( - DebugInfoOffset(offset + header_length - 1).to_unit_offset(&unit), - None - ); - assert_eq!( - DebugInfoOffset(offset + header_length).to_unit_offset(&unit), - Some(UnitOffset(header_length)) - ); - assert_eq!( - DebugInfoOffset(offset + length - 1).to_unit_offset(&unit), - Some(UnitOffset(length - 1)) - ); - assert_eq!(DebugInfoOffset(offset + length).to_unit_offset(&unit), None); - assert_eq!( - UnitOffset(header_length).to_debug_info_offset(&unit), - Some(DebugInfoOffset(offset + header_length)) - ); - assert_eq!( - UnitOffset(length - 1).to_debug_info_offset(&unit), - Some(DebugInfoOffset(offset + length - 1)) - ); - } - - #[test] - fn test_debug_types_offset() { - let padding = &[0; 10]; - let entries = &[0; 20]; - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Type { - type_signature: DebugTypeSignature(0), - type_offset: UnitOffset(0), - }, - debug_abbrev_offset: DebugAbbrevOffset(0), - unit_offset: DebugTypesOffset(0).into(), - entries_buf: EndianSlice::new(entries, LittleEndian), - }; - Section::with_endian(Endian::Little) - .append_bytes(padding) - .unit(&mut unit); - let offset = padding.len(); - let header_length = unit.size_of_header(); - let length = unit.length_including_self(); - assert_eq!(DebugTypesOffset(0).to_unit_offset(&unit), None); - assert_eq!(DebugTypesOffset(offset - 1).to_unit_offset(&unit), None); - assert_eq!(DebugTypesOffset(offset).to_unit_offset(&unit), None); - assert_eq!( - DebugTypesOffset(offset + header_length - 1).to_unit_offset(&unit), - None - ); - assert_eq!( - DebugTypesOffset(offset + header_length).to_unit_offset(&unit), - Some(UnitOffset(header_length)) - ); - assert_eq!( - DebugTypesOffset(offset + length - 1).to_unit_offset(&unit), - Some(UnitOffset(length - 1)) - ); - assert_eq!( - DebugTypesOffset(offset + length).to_unit_offset(&unit), - None - ); - assert_eq!( - UnitOffset(header_length).to_debug_types_offset(&unit), - Some(DebugTypesOffset(offset + header_length)) - ); - assert_eq!( - UnitOffset(length - 1).to_debug_types_offset(&unit), - Some(DebugTypesOffset(offset + length - 1)) - ); - } - - #[test] - fn test_length_including_self() { - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let mut unit = UnitHeader { - encoding, - unit_length: 0, - unit_type: UnitType::Compilation, - debug_abbrev_offset: DebugAbbrevOffset(0), - unit_offset: DebugInfoOffset(0).into(), - entries_buf: EndianSlice::new(&[], LittleEndian), - }; - unit.encoding.format = Format::Dwarf32; - assert_eq!(unit.length_including_self(), 4); - unit.encoding.format = Format::Dwarf64; - assert_eq!(unit.length_including_self(), 12); - unit.unit_length = 10; - assert_eq!(unit.length_including_self(), 22); - } - - #[test] - fn test_parse_type_unit_abbrevs() { - let types_buf = [ - // Type unit header - 0x25, 0x00, 0x00, 0x00, // 32-bit unit length = 37 - 0x04, 0x00, // Version 4 - 0x00, 0x00, 0x00, 0x00, // debug_abbrev_offset - 0x04, // Address size - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, // Type signature - 0x01, 0x02, 0x03, 0x04, // Type offset - // DIEs - // Abbreviation code - 0x01, // Attribute of form DW_FORM_string = "foo\0" - 0x66, 0x6f, 0x6f, 0x00, // Children - // Abbreviation code - 0x01, // Attribute of form DW_FORM_string = "foo\0" - 0x66, 0x6f, 0x6f, 0x00, // Children - // Abbreviation code - 0x01, // Attribute of form DW_FORM_string = "foo\0" - 0x66, 0x6f, 0x6f, 0x00, // Children - 0x00, // End of children - 0x00, // End of children - 0x00, // End of children - ]; - let debug_types = DebugTypes::new(&types_buf, LittleEndian); - - let abbrev_buf = [ - // Code - 0x01, // DW_TAG_subprogram - 0x2e, // DW_CHILDREN_yes - 0x01, // Begin attributes - 0x03, // Attribute name = DW_AT_name - 0x08, // Attribute form = DW_FORM_string - 0x00, 0x00, // End attributes - 0x00, // Null terminator - ]; - - let get_some_type_unit = || debug_types.units().next().unwrap().unwrap(); - - let unit = get_some_type_unit(); - - let read_debug_abbrev_section_somehow = || &abbrev_buf; - let debug_abbrev = DebugAbbrev::new(read_debug_abbrev_section_somehow(), LittleEndian); - let _abbrevs_for_unit = unit.abbreviations(&debug_abbrev).unwrap(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/util.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/util.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/util.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/util.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,283 +0,0 @@ -#[cfg(feature = "read")] -use alloc::boxed::Box; -#[cfg(feature = "read")] -use alloc::vec::Vec; -use core::fmt; -use core::mem::MaybeUninit; -use core::ops; -use core::ptr; -use core::slice; - -mod sealed { - /// # Safety - /// Implementer must not modify the content in storage. - pub unsafe trait Sealed { - type Storage; - - fn new_storage() -> Self::Storage; - - fn grow(_storage: &mut Self::Storage, _additional: usize) -> Result<(), CapacityFull> { - Err(CapacityFull) - } - } - - #[derive(Clone, Copy, Debug)] - pub struct CapacityFull; -} - -use sealed::*; - -/// Marker trait for types that can be used as backing storage when a growable array type is needed. -/// -/// This trait is sealed and cannot be implemented for types outside this crate. -pub trait ArrayLike: Sealed { - /// Type of the elements being stored. - type Item; - - #[doc(hidden)] - fn as_slice(storage: &Self::Storage) -> &[MaybeUninit]; - - #[doc(hidden)] - fn as_mut_slice(storage: &mut Self::Storage) -> &mut [MaybeUninit]; -} - -// Use macro since const generics can't be used due to MSRV. -macro_rules! impl_array { - () => {}; - ($n:literal $($rest:tt)*) => { - // SAFETY: does not modify the content in storage. - unsafe impl Sealed for [T; $n] { - type Storage = [MaybeUninit; $n]; - - fn new_storage() -> Self::Storage { - // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. - unsafe { MaybeUninit::uninit().assume_init() } - } - } - - impl ArrayLike for [T; $n] { - type Item = T; - - fn as_slice(storage: &Self::Storage) -> &[MaybeUninit] { - storage - } - - fn as_mut_slice(storage: &mut Self::Storage) -> &mut [MaybeUninit] { - storage - } - } - - impl_array!($($rest)*); - } -} - -#[cfg(feature = "read")] -macro_rules! impl_box { - () => {}; - ($n:literal $($rest:tt)*) => { - // SAFETY: does not modify the content in storage. - unsafe impl Sealed for Box<[T; $n]> { - type Storage = Box<[MaybeUninit; $n]>; - - fn new_storage() -> Self::Storage { - // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. - Box::new(unsafe { MaybeUninit::uninit().assume_init() }) - } - } - - impl ArrayLike for Box<[T; $n]> { - type Item = T; - - fn as_slice(storage: &Self::Storage) -> &[MaybeUninit] { - &storage[..] - } - - fn as_mut_slice(storage: &mut Self::Storage) -> &mut [MaybeUninit] { - &mut storage[..] - } - } - - impl_box!($($rest)*); - } -} - -impl_array!(0 1 2 3 4 8 16 32 64 128 192); -#[cfg(feature = "read")] -impl_box!(0 1 2 3 4 8 16 32 64 128 192); - -#[cfg(feature = "read")] -unsafe impl Sealed for Vec { - type Storage = Box<[MaybeUninit]>; - - fn new_storage() -> Self::Storage { - Box::new([]) - } - - fn grow(storage: &mut Self::Storage, additional: usize) -> Result<(), CapacityFull> { - let mut vec: Vec<_> = core::mem::replace(storage, Box::new([])).into(); - vec.reserve(additional); - // SAFETY: This is a `Vec` of `MaybeUninit`. - unsafe { vec.set_len(vec.capacity()) }; - *storage = vec.into_boxed_slice(); - Ok(()) - } -} - -#[cfg(feature = "read")] -impl ArrayLike for Vec { - type Item = T; - - fn as_slice(storage: &Self::Storage) -> &[MaybeUninit] { - storage - } - - fn as_mut_slice(storage: &mut Self::Storage) -> &mut [MaybeUninit] { - storage - } -} - -pub(crate) struct ArrayVec { - storage: A::Storage, - len: usize, -} - -impl ArrayVec { - pub fn new() -> Self { - Self { - storage: A::new_storage(), - len: 0, - } - } - - pub fn clear(&mut self) { - let ptr: *mut [A::Item] = &mut **self; - // Set length first so the type invariant is upheld even if `drop_in_place` panicks. - self.len = 0; - // SAFETY: `ptr` contains valid elements only and we "forget" them by setting the length. - unsafe { ptr::drop_in_place(ptr) }; - } - - pub fn try_push(&mut self, value: A::Item) -> Result<(), CapacityFull> { - let mut storage = A::as_mut_slice(&mut self.storage); - if self.len >= storage.len() { - A::grow(&mut self.storage, 1)?; - storage = A::as_mut_slice(&mut self.storage); - } - - storage[self.len] = MaybeUninit::new(value); - self.len += 1; - Ok(()) - } - - pub fn try_insert(&mut self, index: usize, element: A::Item) -> Result<(), CapacityFull> { - assert!(index <= self.len); - - let mut storage = A::as_mut_slice(&mut self.storage); - if self.len >= storage.len() { - A::grow(&mut self.storage, 1)?; - storage = A::as_mut_slice(&mut self.storage); - } - - // SAFETY: storage[index] is filled later. - unsafe { - let p = storage.as_mut_ptr().add(index); - core::ptr::copy(p as *const _, p.add(1), self.len - index); - } - storage[index] = MaybeUninit::new(element); - self.len += 1; - Ok(()) - } - - pub fn pop(&mut self) -> Option { - if self.len == 0 { - None - } else { - self.len -= 1; - // SAFETY: this element is valid and we "forget" it by setting the length. - Some(unsafe { A::as_slice(&self.storage)[self.len].as_ptr().read() }) - } - } - - pub fn swap_remove(&mut self, index: usize) -> A::Item { - assert!(self.len > 0); - A::as_mut_slice(&mut self.storage).swap(index, self.len - 1); - self.pop().unwrap() - } -} - -#[cfg(feature = "read")] -impl ArrayVec> { - pub fn into_vec(mut self) -> Vec { - let len = core::mem::replace(&mut self.len, 0); - let storage = core::mem::replace(&mut self.storage, Box::new([])); - let slice = Box::leak(storage); - debug_assert!(len <= slice.len()); - // SAFETY: valid elements. - unsafe { Vec::from_raw_parts(slice.as_mut_ptr() as *mut T, len, slice.len()) } - } -} - -impl Drop for ArrayVec { - fn drop(&mut self) { - self.clear(); - } -} - -impl Default for ArrayVec { - fn default() -> Self { - Self::new() - } -} - -impl ops::Deref for ArrayVec { - type Target = [A::Item]; - - fn deref(&self) -> &[A::Item] { - let slice = &A::as_slice(&self.storage); - debug_assert!(self.len <= slice.len()); - // SAFETY: valid elements. - unsafe { slice::from_raw_parts(slice.as_ptr() as _, self.len) } - } -} - -impl ops::DerefMut for ArrayVec { - fn deref_mut(&mut self) -> &mut [A::Item] { - let slice = &mut A::as_mut_slice(&mut self.storage); - debug_assert!(self.len <= slice.len()); - // SAFETY: valid elements. - unsafe { slice::from_raw_parts_mut(slice.as_mut_ptr() as _, self.len) } - } -} - -impl Clone for ArrayVec -where - A::Item: Clone, -{ - fn clone(&self) -> Self { - let mut new = Self::default(); - for value in &**self { - new.try_push(value.clone()).unwrap(); - } - new - } -} - -impl PartialEq for ArrayVec -where - A::Item: PartialEq, -{ - fn eq(&self, other: &Self) -> bool { - **self == **other - } -} - -impl Eq for ArrayVec where A::Item: Eq {} - -impl fmt::Debug for ArrayVec -where - A::Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/read/value.rs s390-tools-2.33.1/rust-vendor/gimli/src/read/value.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/read/value.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/read/value.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1621 +0,0 @@ -//! Definitions for values used in DWARF expressions. - -use crate::constants; -#[cfg(feature = "read")] -use crate::read::{AttributeValue, DebuggingInformationEntry}; -use crate::read::{Error, Reader, Result}; - -/// Convert a u64 to an i64, with sign extension if required. -/// -/// This is primarily used when needing to treat `Value::Generic` -/// as a signed value. -#[inline] -fn sign_extend(value: u64, mask: u64) -> i64 { - let value = (value & mask) as i64; - let sign = ((mask >> 1) + 1) as i64; - (value ^ sign).wrapping_sub(sign) -} - -#[inline] -fn mask_bit_size(addr_mask: u64) -> u32 { - 64 - addr_mask.leading_zeros() -} - -/// The type of an entry on the DWARF stack. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum ValueType { - /// The generic type, which is address-sized and of unspecified sign, - /// as specified in the DWARF 5 standard, section 2.5.1. - /// This type is also used to represent address base types. - Generic, - /// Signed 8-bit integer type. - I8, - /// Unsigned 8-bit integer type. - U8, - /// Signed 16-bit integer type. - I16, - /// Unsigned 16-bit integer type. - U16, - /// Signed 32-bit integer type. - I32, - /// Unsigned 32-bit integer type. - U32, - /// Signed 64-bit integer type. - I64, - /// Unsigned 64-bit integer type. - U64, - /// 32-bit floating point type. - F32, - /// 64-bit floating point type. - F64, -} - -/// The value of an entry on the DWARF stack. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum Value { - /// A generic value, which is address-sized and of unspecified sign. - Generic(u64), - /// A signed 8-bit integer value. - I8(i8), - /// An unsigned 8-bit integer value. - U8(u8), - /// A signed 16-bit integer value. - I16(i16), - /// An unsigned 16-bit integer value. - U16(u16), - /// A signed 32-bit integer value. - I32(i32), - /// An unsigned 32-bit integer value. - U32(u32), - /// A signed 64-bit integer value. - I64(i64), - /// An unsigned 64-bit integer value. - U64(u64), - /// A 32-bit floating point value. - F32(f32), - /// A 64-bit floating point value. - F64(f64), -} - -impl ValueType { - /// The size in bits of a value for this type. - pub fn bit_size(self, addr_mask: u64) -> u32 { - match self { - ValueType::Generic => mask_bit_size(addr_mask), - ValueType::I8 | ValueType::U8 => 8, - ValueType::I16 | ValueType::U16 => 16, - ValueType::I32 | ValueType::U32 | ValueType::F32 => 32, - ValueType::I64 | ValueType::U64 | ValueType::F64 => 64, - } - } - - /// Construct a `ValueType` from the attributes of a base type DIE. - pub fn from_encoding(encoding: constants::DwAte, byte_size: u64) -> Option { - Some(match (encoding, byte_size) { - (constants::DW_ATE_signed, 1) => ValueType::I8, - (constants::DW_ATE_signed, 2) => ValueType::I16, - (constants::DW_ATE_signed, 4) => ValueType::I32, - (constants::DW_ATE_signed, 8) => ValueType::I64, - (constants::DW_ATE_unsigned, 1) => ValueType::U8, - (constants::DW_ATE_unsigned, 2) => ValueType::U16, - (constants::DW_ATE_unsigned, 4) => ValueType::U32, - (constants::DW_ATE_unsigned, 8) => ValueType::U64, - (constants::DW_ATE_float, 4) => ValueType::F32, - (constants::DW_ATE_float, 8) => ValueType::F64, - _ => return None, - }) - } - - /// Construct a `ValueType` from a base type DIE. - #[cfg(feature = "read")] - pub fn from_entry( - entry: &DebuggingInformationEntry, - ) -> Result> { - if entry.tag() != constants::DW_TAG_base_type { - return Ok(None); - } - let mut encoding = None; - let mut byte_size = None; - let mut endianity = constants::DW_END_default; - let mut attrs = entry.attrs(); - while let Some(attr) = attrs.next()? { - match attr.name() { - constants::DW_AT_byte_size => byte_size = attr.udata_value(), - constants::DW_AT_encoding => { - if let AttributeValue::Encoding(x) = attr.value() { - encoding = Some(x); - } - } - constants::DW_AT_endianity => { - if let AttributeValue::Endianity(x) = attr.value() { - endianity = x; - } - } - _ => {} - } - } - - if endianity != constants::DW_END_default { - // TODO: we could check if it matches the reader endianity, - // but normally it would use DW_END_default in that case. - return Ok(None); - } - - if let (Some(encoding), Some(byte_size)) = (encoding, byte_size) { - Ok(ValueType::from_encoding(encoding, byte_size)) - } else { - Ok(None) - } - } -} - -impl Value { - /// Return the `ValueType` corresponding to this `Value`. - pub fn value_type(&self) -> ValueType { - match *self { - Value::Generic(_) => ValueType::Generic, - Value::I8(_) => ValueType::I8, - Value::U8(_) => ValueType::U8, - Value::I16(_) => ValueType::I16, - Value::U16(_) => ValueType::U16, - Value::I32(_) => ValueType::I32, - Value::U32(_) => ValueType::U32, - Value::I64(_) => ValueType::I64, - Value::U64(_) => ValueType::U64, - Value::F32(_) => ValueType::F32, - Value::F64(_) => ValueType::F64, - } - } - - /// Read a `Value` with the given `value_type` from a `Reader`. - pub fn parse(value_type: ValueType, mut bytes: R) -> Result { - let value = match value_type { - ValueType::I8 => Value::I8(bytes.read_i8()?), - ValueType::U8 => Value::U8(bytes.read_u8()?), - ValueType::I16 => Value::I16(bytes.read_i16()?), - ValueType::U16 => Value::U16(bytes.read_u16()?), - ValueType::I32 => Value::I32(bytes.read_i32()?), - ValueType::U32 => Value::U32(bytes.read_u32()?), - ValueType::I64 => Value::I64(bytes.read_i64()?), - ValueType::U64 => Value::U64(bytes.read_u64()?), - ValueType::F32 => Value::F32(bytes.read_f32()?), - ValueType::F64 => Value::F64(bytes.read_f64()?), - _ => return Err(Error::UnsupportedTypeOperation), - }; - Ok(value) - } - - /// Convert a `Value` to a `u64`. - /// - /// The `ValueType` of `self` must be integral. - /// Values are sign extended if the source value is signed. - pub fn to_u64(self, addr_mask: u64) -> Result { - let value = match self { - Value::Generic(value) => value & addr_mask, - Value::I8(value) => value as u64, - Value::U8(value) => u64::from(value), - Value::I16(value) => value as u64, - Value::U16(value) => u64::from(value), - Value::I32(value) => value as u64, - Value::U32(value) => u64::from(value), - Value::I64(value) => value as u64, - Value::U64(value) => value as u64, - _ => return Err(Error::IntegralTypeRequired), - }; - Ok(value) - } - - /// Create a `Value` with the given `value_type` from a `u64` value. - /// - /// The `value_type` may be integral or floating point. - /// The result is truncated if the `u64` value does - /// not fit the bounds of the `value_type`. - pub fn from_u64(value_type: ValueType, value: u64) -> Result { - let value = match value_type { - ValueType::Generic => Value::Generic(value), - ValueType::I8 => Value::I8(value as i8), - ValueType::U8 => Value::U8(value as u8), - ValueType::I16 => Value::I16(value as i16), - ValueType::U16 => Value::U16(value as u16), - ValueType::I32 => Value::I32(value as i32), - ValueType::U32 => Value::U32(value as u32), - ValueType::I64 => Value::I64(value as i64), - ValueType::U64 => Value::U64(value), - ValueType::F32 => Value::F32(value as f32), - ValueType::F64 => Value::F64(value as f64), - }; - Ok(value) - } - - /// Create a `Value` with the given `value_type` from a `f32` value. - /// - /// The `value_type` may be integral or floating point. - /// The result is not defined if the `f32` value does - /// not fit the bounds of the `value_type`. - fn from_f32(value_type: ValueType, value: f32) -> Result { - let value = match value_type { - ValueType::Generic => Value::Generic(value as u64), - ValueType::I8 => Value::I8(value as i8), - ValueType::U8 => Value::U8(value as u8), - ValueType::I16 => Value::I16(value as i16), - ValueType::U16 => Value::U16(value as u16), - ValueType::I32 => Value::I32(value as i32), - ValueType::U32 => Value::U32(value as u32), - ValueType::I64 => Value::I64(value as i64), - ValueType::U64 => Value::U64(value as u64), - ValueType::F32 => Value::F32(value), - ValueType::F64 => Value::F64(f64::from(value)), - }; - Ok(value) - } - - /// Create a `Value` with the given `value_type` from a `f64` value. - /// - /// The `value_type` may be integral or floating point. - /// The result is not defined if the `f64` value does - /// not fit the bounds of the `value_type`. - fn from_f64(value_type: ValueType, value: f64) -> Result { - let value = match value_type { - ValueType::Generic => Value::Generic(value as u64), - ValueType::I8 => Value::I8(value as i8), - ValueType::U8 => Value::U8(value as u8), - ValueType::I16 => Value::I16(value as i16), - ValueType::U16 => Value::U16(value as u16), - ValueType::I32 => Value::I32(value as i32), - ValueType::U32 => Value::U32(value as u32), - ValueType::I64 => Value::I64(value as i64), - ValueType::U64 => Value::U64(value as u64), - ValueType::F32 => Value::F32(value as f32), - ValueType::F64 => Value::F64(value), - }; - Ok(value) - } - - /// Convert a `Value` to the given `value_type`. - /// - /// When converting between integral types, the result is truncated - /// if the source value does not fit the bounds of the `value_type`. - /// When converting from floating point types, the result is not defined - /// if the source value does not fit the bounds of the `value_type`. - /// - /// This corresponds to the DWARF `DW_OP_convert` operation. - pub fn convert(self, value_type: ValueType, addr_mask: u64) -> Result { - match self { - Value::F32(value) => Value::from_f32(value_type, value), - Value::F64(value) => Value::from_f64(value_type, value), - _ => Value::from_u64(value_type, self.to_u64(addr_mask)?), - } - } - - /// Reinterpret the bits in a `Value` as the given `value_type`. - /// - /// The source and result value types must have equal sizes. - /// - /// This corresponds to the DWARF `DW_OP_reinterpret` operation. - pub fn reinterpret(self, value_type: ValueType, addr_mask: u64) -> Result { - if self.value_type().bit_size(addr_mask) != value_type.bit_size(addr_mask) { - return Err(Error::TypeMismatch); - } - let bits = match self { - Value::Generic(value) => value, - Value::I8(value) => value as u64, - Value::U8(value) => u64::from(value), - Value::I16(value) => value as u64, - Value::U16(value) => u64::from(value), - Value::I32(value) => value as u64, - Value::U32(value) => u64::from(value), - Value::I64(value) => value as u64, - Value::U64(value) => value, - Value::F32(value) => u64::from(f32::to_bits(value)), - Value::F64(value) => f64::to_bits(value), - }; - let value = match value_type { - ValueType::Generic => Value::Generic(bits), - ValueType::I8 => Value::I8(bits as i8), - ValueType::U8 => Value::U8(bits as u8), - ValueType::I16 => Value::I16(bits as i16), - ValueType::U16 => Value::U16(bits as u16), - ValueType::I32 => Value::I32(bits as i32), - ValueType::U32 => Value::U32(bits as u32), - ValueType::I64 => Value::I64(bits as i64), - ValueType::U64 => Value::U64(bits), - ValueType::F32 => Value::F32(f32::from_bits(bits as u32)), - ValueType::F64 => Value::F64(f64::from_bits(bits)), - }; - Ok(value) - } - - /// Perform an absolute value operation. - /// - /// If the value type is `Generic`, then it is interpreted as a signed value. - /// - /// This corresponds to the DWARF `DW_OP_abs` operation. - pub fn abs(self, addr_mask: u64) -> Result { - // wrapping_abs() can be used because DWARF specifies that the result is undefined - // for negative minimal values. - let value = match self { - Value::Generic(value) => { - Value::Generic(sign_extend(value, addr_mask).wrapping_abs() as u64) - } - Value::I8(value) => Value::I8(value.wrapping_abs()), - Value::I16(value) => Value::I16(value.wrapping_abs()), - Value::I32(value) => Value::I32(value.wrapping_abs()), - Value::I64(value) => Value::I64(value.wrapping_abs()), - // f32/f64::abs() is not available in libcore - Value::F32(value) => Value::F32(if value < 0. { -value } else { value }), - Value::F64(value) => Value::F64(if value < 0. { -value } else { value }), - Value::U8(_) | Value::U16(_) | Value::U32(_) | Value::U64(_) => self, - }; - Ok(value) - } - - /// Perform a negation operation. - /// - /// If the value type is `Generic`, then it is interpreted as a signed value. - /// - /// This corresponds to the DWARF `DW_OP_neg` operation. - pub fn neg(self, addr_mask: u64) -> Result { - // wrapping_neg() can be used because DWARF specifies that the result is undefined - // for negative minimal values. - let value = match self { - Value::Generic(value) => { - Value::Generic(sign_extend(value, addr_mask).wrapping_neg() as u64) - } - Value::I8(value) => Value::I8(value.wrapping_neg()), - Value::I16(value) => Value::I16(value.wrapping_neg()), - Value::I32(value) => Value::I32(value.wrapping_neg()), - Value::I64(value) => Value::I64(value.wrapping_neg()), - Value::F32(value) => Value::F32(-value), - Value::F64(value) => Value::F64(-value), - // It's unclear if these should implicitly convert to a signed value. - // For now, we don't support them. - Value::U8(_) | Value::U16(_) | Value::U32(_) | Value::U64(_) => { - return Err(Error::UnsupportedTypeOperation); - } - }; - Ok(value) - } - - /// Perform an addition operation. - /// - /// This operation requires matching types. - /// - /// This corresponds to the DWARF `DW_OP_plus` operation. - pub fn add(self, rhs: Value, addr_mask: u64) -> Result { - let value = match (self, rhs) { - (Value::Generic(v1), Value::Generic(v2)) => { - Value::Generic(v1.wrapping_add(v2) & addr_mask) - } - (Value::I8(v1), Value::I8(v2)) => Value::I8(v1.wrapping_add(v2)), - (Value::U8(v1), Value::U8(v2)) => Value::U8(v1.wrapping_add(v2)), - (Value::I16(v1), Value::I16(v2)) => Value::I16(v1.wrapping_add(v2)), - (Value::U16(v1), Value::U16(v2)) => Value::U16(v1.wrapping_add(v2)), - (Value::I32(v1), Value::I32(v2)) => Value::I32(v1.wrapping_add(v2)), - (Value::U32(v1), Value::U32(v2)) => Value::U32(v1.wrapping_add(v2)), - (Value::I64(v1), Value::I64(v2)) => Value::I64(v1.wrapping_add(v2)), - (Value::U64(v1), Value::U64(v2)) => Value::U64(v1.wrapping_add(v2)), - (Value::F32(v1), Value::F32(v2)) => Value::F32(v1 + v2), - (Value::F64(v1), Value::F64(v2)) => Value::F64(v1 + v2), - _ => return Err(Error::TypeMismatch), - }; - Ok(value) - } - - /// Perform a subtraction operation. - /// - /// This operation requires matching types. - /// - /// This corresponds to the DWARF `DW_OP_minus` operation. - pub fn sub(self, rhs: Value, addr_mask: u64) -> Result { - let value = match (self, rhs) { - (Value::Generic(v1), Value::Generic(v2)) => { - Value::Generic(v1.wrapping_sub(v2) & addr_mask) - } - (Value::I8(v1), Value::I8(v2)) => Value::I8(v1.wrapping_sub(v2)), - (Value::U8(v1), Value::U8(v2)) => Value::U8(v1.wrapping_sub(v2)), - (Value::I16(v1), Value::I16(v2)) => Value::I16(v1.wrapping_sub(v2)), - (Value::U16(v1), Value::U16(v2)) => Value::U16(v1.wrapping_sub(v2)), - (Value::I32(v1), Value::I32(v2)) => Value::I32(v1.wrapping_sub(v2)), - (Value::U32(v1), Value::U32(v2)) => Value::U32(v1.wrapping_sub(v2)), - (Value::I64(v1), Value::I64(v2)) => Value::I64(v1.wrapping_sub(v2)), - (Value::U64(v1), Value::U64(v2)) => Value::U64(v1.wrapping_sub(v2)), - (Value::F32(v1), Value::F32(v2)) => Value::F32(v1 - v2), - (Value::F64(v1), Value::F64(v2)) => Value::F64(v1 - v2), - _ => return Err(Error::TypeMismatch), - }; - Ok(value) - } - - /// Perform a multiplication operation. - /// - /// This operation requires matching types. - /// - /// This corresponds to the DWARF `DW_OP_mul` operation. - pub fn mul(self, rhs: Value, addr_mask: u64) -> Result { - let value = match (self, rhs) { - (Value::Generic(v1), Value::Generic(v2)) => { - Value::Generic(v1.wrapping_mul(v2) & addr_mask) - } - (Value::I8(v1), Value::I8(v2)) => Value::I8(v1.wrapping_mul(v2)), - (Value::U8(v1), Value::U8(v2)) => Value::U8(v1.wrapping_mul(v2)), - (Value::I16(v1), Value::I16(v2)) => Value::I16(v1.wrapping_mul(v2)), - (Value::U16(v1), Value::U16(v2)) => Value::U16(v1.wrapping_mul(v2)), - (Value::I32(v1), Value::I32(v2)) => Value::I32(v1.wrapping_mul(v2)), - (Value::U32(v1), Value::U32(v2)) => Value::U32(v1.wrapping_mul(v2)), - (Value::I64(v1), Value::I64(v2)) => Value::I64(v1.wrapping_mul(v2)), - (Value::U64(v1), Value::U64(v2)) => Value::U64(v1.wrapping_mul(v2)), - (Value::F32(v1), Value::F32(v2)) => Value::F32(v1 * v2), - (Value::F64(v1), Value::F64(v2)) => Value::F64(v1 * v2), - _ => return Err(Error::TypeMismatch), - }; - Ok(value) - } - - /// Perform a division operation. - /// - /// This operation requires matching types. - /// If the value type is `Generic`, then it is interpreted as a signed value. - /// - /// This corresponds to the DWARF `DW_OP_div` operation. - pub fn div(self, rhs: Value, addr_mask: u64) -> Result { - match rhs { - Value::Generic(v2) if sign_extend(v2, addr_mask) == 0 => { - return Err(Error::DivisionByZero); - } - Value::I8(0) - | Value::U8(0) - | Value::I16(0) - | Value::U16(0) - | Value::I32(0) - | Value::U32(0) - | Value::I64(0) - | Value::U64(0) => { - return Err(Error::DivisionByZero); - } - _ => {} - } - let value = match (self, rhs) { - (Value::Generic(v1), Value::Generic(v2)) => { - // Signed division - Value::Generic( - sign_extend(v1, addr_mask).wrapping_div(sign_extend(v2, addr_mask)) as u64, - ) - } - (Value::I8(v1), Value::I8(v2)) => Value::I8(v1.wrapping_div(v2)), - (Value::U8(v1), Value::U8(v2)) => Value::U8(v1.wrapping_div(v2)), - (Value::I16(v1), Value::I16(v2)) => Value::I16(v1.wrapping_div(v2)), - (Value::U16(v1), Value::U16(v2)) => Value::U16(v1.wrapping_div(v2)), - (Value::I32(v1), Value::I32(v2)) => Value::I32(v1.wrapping_div(v2)), - (Value::U32(v1), Value::U32(v2)) => Value::U32(v1.wrapping_div(v2)), - (Value::I64(v1), Value::I64(v2)) => Value::I64(v1.wrapping_div(v2)), - (Value::U64(v1), Value::U64(v2)) => Value::U64(v1.wrapping_div(v2)), - (Value::F32(v1), Value::F32(v2)) => Value::F32(v1 / v2), - (Value::F64(v1), Value::F64(v2)) => Value::F64(v1 / v2), - _ => return Err(Error::TypeMismatch), - }; - Ok(value) - } - - /// Perform a remainder operation. - /// - /// This operation requires matching integral types. - /// If the value type is `Generic`, then it is interpreted as an unsigned value. - /// - /// This corresponds to the DWARF `DW_OP_mod` operation. - pub fn rem(self, rhs: Value, addr_mask: u64) -> Result { - match rhs { - Value::Generic(rhs) if (rhs & addr_mask) == 0 => { - return Err(Error::DivisionByZero); - } - Value::I8(0) - | Value::U8(0) - | Value::I16(0) - | Value::U16(0) - | Value::I32(0) - | Value::U32(0) - | Value::I64(0) - | Value::U64(0) => { - return Err(Error::DivisionByZero); - } - _ => {} - } - let value = match (self, rhs) { - (Value::Generic(v1), Value::Generic(v2)) => { - // Unsigned modulus - Value::Generic((v1 & addr_mask).wrapping_rem(v2 & addr_mask)) - } - (Value::I8(v1), Value::I8(v2)) => Value::I8(v1.wrapping_rem(v2)), - (Value::U8(v1), Value::U8(v2)) => Value::U8(v1.wrapping_rem(v2)), - (Value::I16(v1), Value::I16(v2)) => Value::I16(v1.wrapping_rem(v2)), - (Value::U16(v1), Value::U16(v2)) => Value::U16(v1.wrapping_rem(v2)), - (Value::I32(v1), Value::I32(v2)) => Value::I32(v1.wrapping_rem(v2)), - (Value::U32(v1), Value::U32(v2)) => Value::U32(v1.wrapping_rem(v2)), - (Value::I64(v1), Value::I64(v2)) => Value::I64(v1.wrapping_rem(v2)), - (Value::U64(v1), Value::U64(v2)) => Value::U64(v1.wrapping_rem(v2)), - (Value::F32(_), Value::F32(_)) => return Err(Error::IntegralTypeRequired), - (Value::F64(_), Value::F64(_)) => return Err(Error::IntegralTypeRequired), - _ => return Err(Error::TypeMismatch), - }; - Ok(value) - } - - /// Perform a bitwise not operation. - /// - /// This operation requires matching integral types. - /// - /// This corresponds to the DWARF `DW_OP_not` operation. - pub fn not(self, addr_mask: u64) -> Result { - let value_type = self.value_type(); - let v = self.to_u64(addr_mask)?; - Value::from_u64(value_type, !v) - } - - /// Perform a bitwise and operation. - /// - /// This operation requires matching integral types. - /// - /// This corresponds to the DWARF `DW_OP_and` operation. - pub fn and(self, rhs: Value, addr_mask: u64) -> Result { - let value_type = self.value_type(); - if value_type != rhs.value_type() { - return Err(Error::TypeMismatch); - } - let v1 = self.to_u64(addr_mask)?; - let v2 = rhs.to_u64(addr_mask)?; - Value::from_u64(value_type, v1 & v2) - } - - /// Perform a bitwise or operation. - /// - /// This operation requires matching integral types. - /// - /// This corresponds to the DWARF `DW_OP_or` operation. - pub fn or(self, rhs: Value, addr_mask: u64) -> Result { - let value_type = self.value_type(); - if value_type != rhs.value_type() { - return Err(Error::TypeMismatch); - } - let v1 = self.to_u64(addr_mask)?; - let v2 = rhs.to_u64(addr_mask)?; - Value::from_u64(value_type, v1 | v2) - } - - /// Perform a bitwise exclusive-or operation. - /// - /// This operation requires matching integral types. - /// - /// This corresponds to the DWARF `DW_OP_xor` operation. - pub fn xor(self, rhs: Value, addr_mask: u64) -> Result { - let value_type = self.value_type(); - if value_type != rhs.value_type() { - return Err(Error::TypeMismatch); - } - let v1 = self.to_u64(addr_mask)?; - let v2 = rhs.to_u64(addr_mask)?; - Value::from_u64(value_type, v1 ^ v2) - } - - /// Convert value to bit length suitable for a shift operation. - /// - /// If the value is negative then an error is returned. - fn shift_length(self) -> Result { - let value = match self { - Value::Generic(value) => value, - Value::I8(value) if value >= 0 => value as u64, - Value::U8(value) => u64::from(value), - Value::I16(value) if value >= 0 => value as u64, - Value::U16(value) => u64::from(value), - Value::I32(value) if value >= 0 => value as u64, - Value::U32(value) => u64::from(value), - Value::I64(value) if value >= 0 => value as u64, - Value::U64(value) => value, - _ => return Err(Error::InvalidShiftExpression), - }; - Ok(value) - } - - /// Perform a shift left operation. - /// - /// This operation requires integral types. - /// If the shift length exceeds the type size, then 0 is returned. - /// If the shift length is negative then an error is returned. - /// - /// This corresponds to the DWARF `DW_OP_shl` operation. - pub fn shl(self, rhs: Value, addr_mask: u64) -> Result { - let v2 = rhs.shift_length()?; - let value = match self { - Value::Generic(v1) => Value::Generic(if v2 >= u64::from(mask_bit_size(addr_mask)) { - 0 - } else { - (v1 & addr_mask) << v2 - }), - Value::I8(v1) => Value::I8(if v2 >= 8 { 0 } else { v1 << v2 }), - Value::U8(v1) => Value::U8(if v2 >= 8 { 0 } else { v1 << v2 }), - Value::I16(v1) => Value::I16(if v2 >= 16 { 0 } else { v1 << v2 }), - Value::U16(v1) => Value::U16(if v2 >= 16 { 0 } else { v1 << v2 }), - Value::I32(v1) => Value::I32(if v2 >= 32 { 0 } else { v1 << v2 }), - Value::U32(v1) => Value::U32(if v2 >= 32 { 0 } else { v1 << v2 }), - Value::I64(v1) => Value::I64(if v2 >= 64 { 0 } else { v1 << v2 }), - Value::U64(v1) => Value::U64(if v2 >= 64 { 0 } else { v1 << v2 }), - _ => return Err(Error::IntegralTypeRequired), - }; - Ok(value) - } - - /// Perform a logical shift right operation. - /// - /// This operation requires an unsigned integral type for the value. - /// If the value type is `Generic`, then it is interpreted as an unsigned value. - /// - /// This operation requires an integral type for the shift length. - /// If the shift length exceeds the type size, then 0 is returned. - /// If the shift length is negative then an error is returned. - /// - /// This corresponds to the DWARF `DW_OP_shr` operation. - pub fn shr(self, rhs: Value, addr_mask: u64) -> Result { - let v2 = rhs.shift_length()?; - let value = match self { - Value::Generic(v1) => Value::Generic(if v2 >= u64::from(mask_bit_size(addr_mask)) { - 0 - } else { - (v1 & addr_mask) >> v2 - }), - Value::U8(v1) => Value::U8(if v2 >= 8 { 0 } else { v1 >> v2 }), - Value::U16(v1) => Value::U16(if v2 >= 16 { 0 } else { v1 >> v2 }), - Value::U32(v1) => Value::U32(if v2 >= 32 { 0 } else { v1 >> v2 }), - Value::U64(v1) => Value::U64(if v2 >= 64 { 0 } else { v1 >> v2 }), - // It's unclear if signed values should implicitly convert to an unsigned value. - // For now, we don't support them. - Value::I8(_) | Value::I16(_) | Value::I32(_) | Value::I64(_) => { - return Err(Error::UnsupportedTypeOperation); - } - _ => return Err(Error::IntegralTypeRequired), - }; - Ok(value) - } - - /// Perform an arithmetic shift right operation. - /// - /// This operation requires a signed integral type for the value. - /// If the value type is `Generic`, then it is interpreted as a signed value. - /// - /// This operation requires an integral type for the shift length. - /// If the shift length exceeds the type size, then 0 is returned for positive values, - /// and -1 is returned for negative values. - /// If the shift length is negative then an error is returned. - /// - /// This corresponds to the DWARF `DW_OP_shra` operation. - pub fn shra(self, rhs: Value, addr_mask: u64) -> Result { - let v2 = rhs.shift_length()?; - let value = match self { - Value::Generic(v1) => { - let v1 = sign_extend(v1, addr_mask); - let value = if v2 >= u64::from(mask_bit_size(addr_mask)) { - if v1 < 0 { - !0 - } else { - 0 - } - } else { - (v1 >> v2) as u64 - }; - Value::Generic(value) - } - Value::I8(v1) => Value::I8(if v2 >= 8 { - if v1 < 0 { - !0 - } else { - 0 - } - } else { - v1 >> v2 - }), - Value::I16(v1) => Value::I16(if v2 >= 16 { - if v1 < 0 { - !0 - } else { - 0 - } - } else { - v1 >> v2 - }), - Value::I32(v1) => Value::I32(if v2 >= 32 { - if v1 < 0 { - !0 - } else { - 0 - } - } else { - v1 >> v2 - }), - Value::I64(v1) => Value::I64(if v2 >= 64 { - if v1 < 0 { - !0 - } else { - 0 - } - } else { - v1 >> v2 - }), - // It's unclear if unsigned values should implicitly convert to a signed value. - // For now, we don't support them. - Value::U8(_) | Value::U16(_) | Value::U32(_) | Value::U64(_) => { - return Err(Error::UnsupportedTypeOperation); - } - _ => return Err(Error::IntegralTypeRequired), - }; - Ok(value) - } - - /// Perform the `==` relational operation. - /// - /// This operation requires matching integral types. - /// If the value type is `Generic`, then it is interpreted as a signed value. - /// - /// This corresponds to the DWARF `DW_OP_eq` operation. - pub fn eq(self, rhs: Value, addr_mask: u64) -> Result { - let value = match (self, rhs) { - (Value::Generic(v1), Value::Generic(v2)) => { - sign_extend(v1, addr_mask) == sign_extend(v2, addr_mask) - } - (Value::I8(v1), Value::I8(v2)) => v1 == v2, - (Value::U8(v1), Value::U8(v2)) => v1 == v2, - (Value::I16(v1), Value::I16(v2)) => v1 == v2, - (Value::U16(v1), Value::U16(v2)) => v1 == v2, - (Value::I32(v1), Value::I32(v2)) => v1 == v2, - (Value::U32(v1), Value::U32(v2)) => v1 == v2, - (Value::I64(v1), Value::I64(v2)) => v1 == v2, - (Value::U64(v1), Value::U64(v2)) => v1 == v2, - (Value::F32(v1), Value::F32(v2)) => v1 == v2, - (Value::F64(v1), Value::F64(v2)) => v1 == v2, - _ => return Err(Error::TypeMismatch), - }; - Ok(Value::Generic(value as u64)) - } - - /// Perform the `>=` relational operation. - /// - /// This operation requires matching integral types. - /// If the value type is `Generic`, then it is interpreted as a signed value. - /// - /// This corresponds to the DWARF `DW_OP_ge` operation. - pub fn ge(self, rhs: Value, addr_mask: u64) -> Result { - let value = match (self, rhs) { - (Value::Generic(v1), Value::Generic(v2)) => { - sign_extend(v1, addr_mask) >= sign_extend(v2, addr_mask) - } - (Value::I8(v1), Value::I8(v2)) => v1 >= v2, - (Value::U8(v1), Value::U8(v2)) => v1 >= v2, - (Value::I16(v1), Value::I16(v2)) => v1 >= v2, - (Value::U16(v1), Value::U16(v2)) => v1 >= v2, - (Value::I32(v1), Value::I32(v2)) => v1 >= v2, - (Value::U32(v1), Value::U32(v2)) => v1 >= v2, - (Value::I64(v1), Value::I64(v2)) => v1 >= v2, - (Value::U64(v1), Value::U64(v2)) => v1 >= v2, - (Value::F32(v1), Value::F32(v2)) => v1 >= v2, - (Value::F64(v1), Value::F64(v2)) => v1 >= v2, - _ => return Err(Error::TypeMismatch), - }; - Ok(Value::Generic(value as u64)) - } - - /// Perform the `>` relational operation. - /// - /// This operation requires matching integral types. - /// If the value type is `Generic`, then it is interpreted as a signed value. - /// - /// This corresponds to the DWARF `DW_OP_gt` operation. - pub fn gt(self, rhs: Value, addr_mask: u64) -> Result { - let value = match (self, rhs) { - (Value::Generic(v1), Value::Generic(v2)) => { - sign_extend(v1, addr_mask) > sign_extend(v2, addr_mask) - } - (Value::I8(v1), Value::I8(v2)) => v1 > v2, - (Value::U8(v1), Value::U8(v2)) => v1 > v2, - (Value::I16(v1), Value::I16(v2)) => v1 > v2, - (Value::U16(v1), Value::U16(v2)) => v1 > v2, - (Value::I32(v1), Value::I32(v2)) => v1 > v2, - (Value::U32(v1), Value::U32(v2)) => v1 > v2, - (Value::I64(v1), Value::I64(v2)) => v1 > v2, - (Value::U64(v1), Value::U64(v2)) => v1 > v2, - (Value::F32(v1), Value::F32(v2)) => v1 > v2, - (Value::F64(v1), Value::F64(v2)) => v1 > v2, - _ => return Err(Error::TypeMismatch), - }; - Ok(Value::Generic(value as u64)) - } - - /// Perform the `<= relational operation. - /// - /// This operation requires matching integral types. - /// If the value type is `Generic`, then it is interpreted as a signed value. - /// - /// This corresponds to the DWARF `DW_OP_le` operation. - pub fn le(self, rhs: Value, addr_mask: u64) -> Result { - let value = match (self, rhs) { - (Value::Generic(v1), Value::Generic(v2)) => { - sign_extend(v1, addr_mask) <= sign_extend(v2, addr_mask) - } - (Value::I8(v1), Value::I8(v2)) => v1 <= v2, - (Value::U8(v1), Value::U8(v2)) => v1 <= v2, - (Value::I16(v1), Value::I16(v2)) => v1 <= v2, - (Value::U16(v1), Value::U16(v2)) => v1 <= v2, - (Value::I32(v1), Value::I32(v2)) => v1 <= v2, - (Value::U32(v1), Value::U32(v2)) => v1 <= v2, - (Value::I64(v1), Value::I64(v2)) => v1 <= v2, - (Value::U64(v1), Value::U64(v2)) => v1 <= v2, - (Value::F32(v1), Value::F32(v2)) => v1 <= v2, - (Value::F64(v1), Value::F64(v2)) => v1 <= v2, - _ => return Err(Error::TypeMismatch), - }; - Ok(Value::Generic(value as u64)) - } - - /// Perform the `< relational operation. - /// - /// This operation requires matching integral types. - /// If the value type is `Generic`, then it is interpreted as a signed value. - /// - /// This corresponds to the DWARF `DW_OP_lt` operation. - pub fn lt(self, rhs: Value, addr_mask: u64) -> Result { - let value = match (self, rhs) { - (Value::Generic(v1), Value::Generic(v2)) => { - sign_extend(v1, addr_mask) < sign_extend(v2, addr_mask) - } - (Value::I8(v1), Value::I8(v2)) => v1 < v2, - (Value::U8(v1), Value::U8(v2)) => v1 < v2, - (Value::I16(v1), Value::I16(v2)) => v1 < v2, - (Value::U16(v1), Value::U16(v2)) => v1 < v2, - (Value::I32(v1), Value::I32(v2)) => v1 < v2, - (Value::U32(v1), Value::U32(v2)) => v1 < v2, - (Value::I64(v1), Value::I64(v2)) => v1 < v2, - (Value::U64(v1), Value::U64(v2)) => v1 < v2, - (Value::F32(v1), Value::F32(v2)) => v1 < v2, - (Value::F64(v1), Value::F64(v2)) => v1 < v2, - _ => return Err(Error::TypeMismatch), - }; - Ok(Value::Generic(value as u64)) - } - - /// Perform the `!= relational operation. - /// - /// This operation requires matching integral types. - /// If the value type is `Generic`, then it is interpreted as a signed value. - /// - /// This corresponds to the DWARF `DW_OP_ne` operation. - pub fn ne(self, rhs: Value, addr_mask: u64) -> Result { - let value = match (self, rhs) { - (Value::Generic(v1), Value::Generic(v2)) => { - sign_extend(v1, addr_mask) != sign_extend(v2, addr_mask) - } - (Value::I8(v1), Value::I8(v2)) => v1 != v2, - (Value::U8(v1), Value::U8(v2)) => v1 != v2, - (Value::I16(v1), Value::I16(v2)) => v1 != v2, - (Value::U16(v1), Value::U16(v2)) => v1 != v2, - (Value::I32(v1), Value::I32(v2)) => v1 != v2, - (Value::U32(v1), Value::U32(v2)) => v1 != v2, - (Value::I64(v1), Value::I64(v2)) => v1 != v2, - (Value::U64(v1), Value::U64(v2)) => v1 != v2, - (Value::F32(v1), Value::F32(v2)) => v1 != v2, - (Value::F64(v1), Value::F64(v2)) => v1 != v2, - _ => return Err(Error::TypeMismatch), - }; - Ok(Value::Generic(value as u64)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::common::{DebugAbbrevOffset, DebugInfoOffset, Encoding, Format}; - use crate::endianity::LittleEndian; - use crate::read::{ - Abbreviation, AttributeSpecification, DebuggingInformationEntry, EndianSlice, UnitHeader, - UnitOffset, UnitType, - }; - - #[test] - #[rustfmt::skip] - fn valuetype_from_encoding() { - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 4, - }; - let unit = UnitHeader::new( - encoding, - 7, - UnitType::Compilation, - DebugAbbrevOffset(0), - DebugInfoOffset(0).into(), - EndianSlice::new(&[], LittleEndian), - ); - - let abbrev = Abbreviation::new( - 42, - constants::DW_TAG_base_type, - constants::DW_CHILDREN_no, - vec![ - AttributeSpecification::new( - constants::DW_AT_byte_size, - constants::DW_FORM_udata, - None, - ), - AttributeSpecification::new( - constants::DW_AT_encoding, - constants::DW_FORM_udata, - None, - ), - AttributeSpecification::new( - constants::DW_AT_endianity, - constants::DW_FORM_udata, - None, - ), - ].into(), - ); - - for &(attrs, result) in &[ - ([0x01, constants::DW_ATE_signed.0, constants::DW_END_default.0], ValueType::I8), - ([0x02, constants::DW_ATE_signed.0, constants::DW_END_default.0], ValueType::I16), - ([0x04, constants::DW_ATE_signed.0, constants::DW_END_default.0], ValueType::I32), - ([0x08, constants::DW_ATE_signed.0, constants::DW_END_default.0], ValueType::I64), - ([0x01, constants::DW_ATE_unsigned.0, constants::DW_END_default.0], ValueType::U8), - ([0x02, constants::DW_ATE_unsigned.0, constants::DW_END_default.0], ValueType::U16), - ([0x04, constants::DW_ATE_unsigned.0, constants::DW_END_default.0], ValueType::U32), - ([0x08, constants::DW_ATE_unsigned.0, constants::DW_END_default.0], ValueType::U64), - ([0x04, constants::DW_ATE_float.0, constants::DW_END_default.0], ValueType::F32), - ([0x08, constants::DW_ATE_float.0, constants::DW_END_default.0], ValueType::F64), - ] { - let entry = DebuggingInformationEntry::new( - UnitOffset(0), - EndianSlice::new(&attrs, LittleEndian), - &abbrev, - &unit, - ); - assert_eq!(ValueType::from_entry(&entry), Ok(Some(result))); - } - - for attrs in &[ - [0x03, constants::DW_ATE_signed.0, constants::DW_END_default.0], - [0x02, constants::DW_ATE_signed.0, constants::DW_END_big.0], - ] { - let entry = DebuggingInformationEntry::new( - UnitOffset(0), - EndianSlice::new(attrs, LittleEndian), - &abbrev, - &unit, - ); - assert_eq!(ValueType::from_entry(&entry), Ok(None)); - } - } - - #[test] - fn value_convert() { - let addr_mask = !0 >> 32; - for &(v, t, result) in &[ - (Value::Generic(1), ValueType::I8, Ok(Value::I8(1))), - (Value::I8(1), ValueType::U8, Ok(Value::U8(1))), - (Value::U8(1), ValueType::I16, Ok(Value::I16(1))), - (Value::I16(1), ValueType::U16, Ok(Value::U16(1))), - (Value::U16(1), ValueType::I32, Ok(Value::I32(1))), - (Value::I32(1), ValueType::U32, Ok(Value::U32(1))), - (Value::U32(1), ValueType::F32, Ok(Value::F32(1.))), - (Value::F32(1.), ValueType::I64, Ok(Value::I64(1))), - (Value::I64(1), ValueType::U64, Ok(Value::U64(1))), - (Value::U64(1), ValueType::F64, Ok(Value::F64(1.))), - (Value::F64(1.), ValueType::Generic, Ok(Value::Generic(1))), - ] { - assert_eq!(v.convert(t, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_reinterpret() { - let addr_mask = !0 >> 32; - for &(v, t, result) in &[ - // 8-bit - (Value::I8(-1), ValueType::U8, Ok(Value::U8(0xff))), - (Value::U8(0xff), ValueType::I8, Ok(Value::I8(-1))), - // 16-bit - (Value::I16(1), ValueType::U16, Ok(Value::U16(1))), - (Value::U16(1), ValueType::I16, Ok(Value::I16(1))), - // 32-bit - (Value::Generic(1), ValueType::I32, Ok(Value::I32(1))), - (Value::I32(1), ValueType::U32, Ok(Value::U32(1))), - (Value::U32(0x3f80_0000), ValueType::F32, Ok(Value::F32(1.0))), - (Value::F32(1.0), ValueType::Generic, Ok(Value::Generic(0x3f80_0000))), - // Type mismatches - (Value::Generic(1), ValueType::U8, Err(Error::TypeMismatch)), - (Value::U8(1), ValueType::U16, Err(Error::TypeMismatch)), - (Value::U16(1), ValueType::U32, Err(Error::TypeMismatch)), - (Value::U32(1), ValueType::U64, Err(Error::TypeMismatch)), - (Value::U64(1), ValueType::Generic, Err(Error::TypeMismatch)), - ] { - assert_eq!(v.reinterpret(t, addr_mask), result); - } - - let addr_mask = !0; - for &(v, t, result) in &[ - // 64-bit - (Value::Generic(1), ValueType::I64, Ok(Value::I64(1))), - (Value::I64(1), ValueType::U64, Ok(Value::U64(1))), - (Value::U64(0x3ff0_0000_0000_0000), ValueType::F64, Ok(Value::F64(1.0))), - (Value::F64(1.0), ValueType::Generic, Ok(Value::Generic(0x3ff0_0000_0000_0000))), - ] { - assert_eq!(v.reinterpret(t, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_abs() { - let addr_mask = 0xffff_ffff; - for &(v, result) in &[ - (Value::Generic(0xffff_ffff), Ok(Value::Generic(1))), - (Value::I8(-1), Ok(Value::I8(1))), - (Value::U8(1), Ok(Value::U8(1))), - (Value::I16(-1), Ok(Value::I16(1))), - (Value::U16(1), Ok(Value::U16(1))), - (Value::I32(-1), Ok(Value::I32(1))), - (Value::U32(1), Ok(Value::U32(1))), - (Value::I64(-1), Ok(Value::I64(1))), - (Value::U64(1), Ok(Value::U64(1))), - (Value::F32(-1.), Ok(Value::F32(1.))), - (Value::F64(-1.), Ok(Value::F64(1.))), - ] { - assert_eq!(v.abs(addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_neg() { - let addr_mask = 0xffff_ffff; - for &(v, result) in &[ - (Value::Generic(0xffff_ffff), Ok(Value::Generic(1))), - (Value::I8(1), Ok(Value::I8(-1))), - (Value::U8(1), Err(Error::UnsupportedTypeOperation)), - (Value::I16(1), Ok(Value::I16(-1))), - (Value::U16(1), Err(Error::UnsupportedTypeOperation)), - (Value::I32(1), Ok(Value::I32(-1))), - (Value::U32(1), Err(Error::UnsupportedTypeOperation)), - (Value::I64(1), Ok(Value::I64(-1))), - (Value::U64(1), Err(Error::UnsupportedTypeOperation)), - (Value::F32(1.), Ok(Value::F32(-1.))), - (Value::F64(1.), Ok(Value::F64(-1.))), - ] { - assert_eq!(v.neg(addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_add() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(1), Value::Generic(2), Ok(Value::Generic(3))), - (Value::I8(-1), Value::I8(2), Ok(Value::I8(1))), - (Value::U8(1), Value::U8(2), Ok(Value::U8(3))), - (Value::I16(-1), Value::I16(2), Ok(Value::I16(1))), - (Value::U16(1), Value::U16(2), Ok(Value::U16(3))), - (Value::I32(-1), Value::I32(2), Ok(Value::I32(1))), - (Value::U32(1), Value::U32(2), Ok(Value::U32(3))), - (Value::I64(-1), Value::I64(2), Ok(Value::I64(1))), - (Value::U64(1), Value::U64(2), Ok(Value::U64(3))), - (Value::F32(-1.), Value::F32(2.), Ok(Value::F32(1.))), - (Value::F64(-1.), Value::F64(2.), Ok(Value::F64(1.))), - (Value::Generic(1), Value::U32(2), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.add(v2, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_sub() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(2), Ok(Value::Generic(1))), - (Value::I8(-1), Value::I8(2), Ok(Value::I8(-3))), - (Value::U8(3), Value::U8(2), Ok(Value::U8(1))), - (Value::I16(-1), Value::I16(2), Ok(Value::I16(-3))), - (Value::U16(3), Value::U16(2), Ok(Value::U16(1))), - (Value::I32(-1), Value::I32(2), Ok(Value::I32(-3))), - (Value::U32(3), Value::U32(2), Ok(Value::U32(1))), - (Value::I64(-1), Value::I64(2), Ok(Value::I64(-3))), - (Value::U64(3), Value::U64(2), Ok(Value::U64(1))), - (Value::F32(-1.), Value::F32(2.), Ok(Value::F32(-3.))), - (Value::F64(-1.), Value::F64(2.), Ok(Value::F64(-3.))), - (Value::Generic(3), Value::U32(2), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.sub(v2, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_mul() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(2), Value::Generic(3), Ok(Value::Generic(6))), - (Value::I8(-2), Value::I8(3), Ok(Value::I8(-6))), - (Value::U8(2), Value::U8(3), Ok(Value::U8(6))), - (Value::I16(-2), Value::I16(3), Ok(Value::I16(-6))), - (Value::U16(2), Value::U16(3), Ok(Value::U16(6))), - (Value::I32(-2), Value::I32(3), Ok(Value::I32(-6))), - (Value::U32(2), Value::U32(3), Ok(Value::U32(6))), - (Value::I64(-2), Value::I64(3), Ok(Value::I64(-6))), - (Value::U64(2), Value::U64(3), Ok(Value::U64(6))), - (Value::F32(-2.), Value::F32(3.), Ok(Value::F32(-6.))), - (Value::F64(-2.), Value::F64(3.), Ok(Value::F64(-6.))), - (Value::Generic(2), Value::U32(3), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.mul(v2, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_div() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(6), Value::Generic(3), Ok(Value::Generic(2))), - (Value::I8(-6), Value::I8(3), Ok(Value::I8(-2))), - (Value::U8(6), Value::U8(3), Ok(Value::U8(2))), - (Value::I16(-6), Value::I16(3), Ok(Value::I16(-2))), - (Value::U16(6), Value::U16(3), Ok(Value::U16(2))), - (Value::I32(-6), Value::I32(3), Ok(Value::I32(-2))), - (Value::U32(6), Value::U32(3), Ok(Value::U32(2))), - (Value::I64(-6), Value::I64(3), Ok(Value::I64(-2))), - (Value::U64(6), Value::U64(3), Ok(Value::U64(2))), - (Value::F32(-6.), Value::F32(3.), Ok(Value::F32(-2.))), - (Value::F64(-6.), Value::F64(3.), Ok(Value::F64(-2.))), - (Value::Generic(6), Value::U32(3), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.div(v2, addr_mask), result); - } - for &(v1, v2, result) in &[ - (Value::Generic(6), Value::Generic(0), Err(Error::DivisionByZero)), - (Value::I8(-6), Value::I8(0), Err(Error::DivisionByZero)), - (Value::U8(6), Value::U8(0), Err(Error::DivisionByZero)), - (Value::I16(-6), Value::I16(0), Err(Error::DivisionByZero)), - (Value::U16(6), Value::U16(0), Err(Error::DivisionByZero)), - (Value::I32(-6), Value::I32(0), Err(Error::DivisionByZero)), - (Value::U32(6), Value::U32(0), Err(Error::DivisionByZero)), - (Value::I64(-6), Value::I64(0), Err(Error::DivisionByZero)), - (Value::U64(6), Value::U64(0), Err(Error::DivisionByZero)), - (Value::F32(-6.), Value::F32(0.), Ok(Value::F32(-6. / 0.))), - (Value::F64(-6.), Value::F64(0.), Ok(Value::F64(-6. / 0.))), - ] { - assert_eq!(v1.div(v2, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_rem() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(2), Ok(Value::Generic(1))), - (Value::I8(-3), Value::I8(2), Ok(Value::I8(-1))), - (Value::U8(3), Value::U8(2), Ok(Value::U8(1))), - (Value::I16(-3), Value::I16(2), Ok(Value::I16(-1))), - (Value::U16(3), Value::U16(2), Ok(Value::U16(1))), - (Value::I32(-3), Value::I32(2), Ok(Value::I32(-1))), - (Value::U32(3), Value::U32(2), Ok(Value::U32(1))), - (Value::I64(-3), Value::I64(2), Ok(Value::I64(-1))), - (Value::U64(3), Value::U64(2), Ok(Value::U64(1))), - (Value::F32(-3.), Value::F32(2.), Err(Error::IntegralTypeRequired)), - (Value::F64(-3.), Value::F64(2.), Err(Error::IntegralTypeRequired)), - (Value::Generic(3), Value::U32(2), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.rem(v2, addr_mask), result); - } - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(0), Err(Error::DivisionByZero)), - (Value::I8(-3), Value::I8(0), Err(Error::DivisionByZero)), - (Value::U8(3), Value::U8(0), Err(Error::DivisionByZero)), - (Value::I16(-3), Value::I16(0), Err(Error::DivisionByZero)), - (Value::U16(3), Value::U16(0), Err(Error::DivisionByZero)), - (Value::I32(-3), Value::I32(0), Err(Error::DivisionByZero)), - (Value::U32(3), Value::U32(0), Err(Error::DivisionByZero)), - (Value::I64(-3), Value::I64(0), Err(Error::DivisionByZero)), - (Value::U64(3), Value::U64(0), Err(Error::DivisionByZero)), - ] { - assert_eq!(v1.rem(v2, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_not() { - let addr_mask = 0xffff_ffff; - for &(v, result) in &[ - (Value::Generic(1), Ok(Value::Generic(!1))), - (Value::I8(1), Ok(Value::I8(!1))), - (Value::U8(1), Ok(Value::U8(!1))), - (Value::I16(1), Ok(Value::I16(!1))), - (Value::U16(1), Ok(Value::U16(!1))), - (Value::I32(1), Ok(Value::I32(!1))), - (Value::U32(1), Ok(Value::U32(!1))), - (Value::I64(1), Ok(Value::I64(!1))), - (Value::U64(1), Ok(Value::U64(!1))), - (Value::F32(1.), Err(Error::IntegralTypeRequired)), - (Value::F64(1.), Err(Error::IntegralTypeRequired)), - ] { - assert_eq!(v.not(addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_and() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(5), Ok(Value::Generic(1))), - (Value::I8(3), Value::I8(5), Ok(Value::I8(1))), - (Value::U8(3), Value::U8(5), Ok(Value::U8(1))), - (Value::I16(3), Value::I16(5), Ok(Value::I16(1))), - (Value::U16(3), Value::U16(5), Ok(Value::U16(1))), - (Value::I32(3), Value::I32(5), Ok(Value::I32(1))), - (Value::U32(3), Value::U32(5), Ok(Value::U32(1))), - (Value::I64(3), Value::I64(5), Ok(Value::I64(1))), - (Value::U64(3), Value::U64(5), Ok(Value::U64(1))), - (Value::F32(3.), Value::F32(5.), Err(Error::IntegralTypeRequired)), - (Value::F64(3.), Value::F64(5.), Err(Error::IntegralTypeRequired)), - (Value::Generic(3), Value::U32(5), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.and(v2, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_or() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(5), Ok(Value::Generic(7))), - (Value::I8(3), Value::I8(5), Ok(Value::I8(7))), - (Value::U8(3), Value::U8(5), Ok(Value::U8(7))), - (Value::I16(3), Value::I16(5), Ok(Value::I16(7))), - (Value::U16(3), Value::U16(5), Ok(Value::U16(7))), - (Value::I32(3), Value::I32(5), Ok(Value::I32(7))), - (Value::U32(3), Value::U32(5), Ok(Value::U32(7))), - (Value::I64(3), Value::I64(5), Ok(Value::I64(7))), - (Value::U64(3), Value::U64(5), Ok(Value::U64(7))), - (Value::F32(3.), Value::F32(5.), Err(Error::IntegralTypeRequired)), - (Value::F64(3.), Value::F64(5.), Err(Error::IntegralTypeRequired)), - (Value::Generic(3), Value::U32(5), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.or(v2, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_xor() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(5), Ok(Value::Generic(6))), - (Value::I8(3), Value::I8(5), Ok(Value::I8(6))), - (Value::U8(3), Value::U8(5), Ok(Value::U8(6))), - (Value::I16(3), Value::I16(5), Ok(Value::I16(6))), - (Value::U16(3), Value::U16(5), Ok(Value::U16(6))), - (Value::I32(3), Value::I32(5), Ok(Value::I32(6))), - (Value::U32(3), Value::U32(5), Ok(Value::U32(6))), - (Value::I64(3), Value::I64(5), Ok(Value::I64(6))), - (Value::U64(3), Value::U64(5), Ok(Value::U64(6))), - (Value::F32(3.), Value::F32(5.), Err(Error::IntegralTypeRequired)), - (Value::F64(3.), Value::F64(5.), Err(Error::IntegralTypeRequired)), - (Value::Generic(3), Value::U32(5), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.xor(v2, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_shl() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - // One of each type - (Value::Generic(3), Value::Generic(5), Ok(Value::Generic(96))), - (Value::I8(3), Value::U8(5), Ok(Value::I8(96))), - (Value::U8(3), Value::I8(5), Ok(Value::U8(96))), - (Value::I16(3), Value::U16(5), Ok(Value::I16(96))), - (Value::U16(3), Value::I16(5), Ok(Value::U16(96))), - (Value::I32(3), Value::U32(5), Ok(Value::I32(96))), - (Value::U32(3), Value::I32(5), Ok(Value::U32(96))), - (Value::I64(3), Value::U64(5), Ok(Value::I64(96))), - (Value::U64(3), Value::I64(5), Ok(Value::U64(96))), - (Value::F32(3.), Value::U8(5), Err(Error::IntegralTypeRequired)), - (Value::F64(3.), Value::U8(5), Err(Error::IntegralTypeRequired)), - // Invalid shifts - (Value::U8(3), Value::I8(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(3), Value::I16(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(3), Value::I32(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(3), Value::I64(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(3), Value::F32(5.), Err(Error::InvalidShiftExpression)), - (Value::U8(3), Value::F64(5.), Err(Error::InvalidShiftExpression)), - // Large shifts - (Value::Generic(3), Value::Generic(32), Ok(Value::Generic(0))), - (Value::I8(3), Value::U8(8), Ok(Value::I8(0))), - (Value::U8(3), Value::I8(9), Ok(Value::U8(0))), - (Value::I16(3), Value::U16(17), Ok(Value::I16(0))), - (Value::U16(3), Value::I16(16), Ok(Value::U16(0))), - (Value::I32(3), Value::U32(32), Ok(Value::I32(0))), - (Value::U32(3), Value::I32(33), Ok(Value::U32(0))), - (Value::I64(3), Value::U64(65), Ok(Value::I64(0))), - (Value::U64(3), Value::I64(64), Ok(Value::U64(0))), - ] { - assert_eq!(v1.shl(v2, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_shr() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - // One of each type - (Value::Generic(96), Value::Generic(5), Ok(Value::Generic(3))), - (Value::I8(96), Value::U8(5), Err(Error::UnsupportedTypeOperation)), - (Value::U8(96), Value::I8(5), Ok(Value::U8(3))), - (Value::I16(96), Value::U16(5), Err(Error::UnsupportedTypeOperation)), - (Value::U16(96), Value::I16(5), Ok(Value::U16(3))), - (Value::I32(96), Value::U32(5), Err(Error::UnsupportedTypeOperation)), - (Value::U32(96), Value::I32(5), Ok(Value::U32(3))), - (Value::I64(96), Value::U64(5), Err(Error::UnsupportedTypeOperation)), - (Value::U64(96), Value::I64(5), Ok(Value::U64(3))), - (Value::F32(96.), Value::U8(5), Err(Error::IntegralTypeRequired)), - (Value::F64(96.), Value::U8(5), Err(Error::IntegralTypeRequired)), - // Invalid shifts - (Value::U8(96), Value::I8(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(96), Value::I16(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(96), Value::I32(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(96), Value::I64(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(96), Value::F32(5.), Err(Error::InvalidShiftExpression)), - (Value::U8(96), Value::F64(5.), Err(Error::InvalidShiftExpression)), - // Large shifts - (Value::Generic(96), Value::Generic(32), Ok(Value::Generic(0))), - (Value::U8(96), Value::I8(9), Ok(Value::U8(0))), - (Value::U16(96), Value::I16(16), Ok(Value::U16(0))), - (Value::U32(96), Value::I32(33), Ok(Value::U32(0))), - (Value::U64(96), Value::I64(64), Ok(Value::U64(0))), - ] { - assert_eq!(v1.shr(v2, addr_mask), result); - } - } - - #[test] - #[rustfmt::skip] - fn value_shra() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - // One of each type - (Value::Generic(u64::from(-96i32 as u32)), Value::Generic(5), Ok(Value::Generic(-3i64 as u64))), - (Value::I8(-96), Value::U8(5), Ok(Value::I8(-3))), - (Value::U8(96), Value::I8(5), Err(Error::UnsupportedTypeOperation)), - (Value::I16(-96), Value::U16(5), Ok(Value::I16(-3))), - (Value::U16(96), Value::I16(5), Err(Error::UnsupportedTypeOperation)), - (Value::I32(-96), Value::U32(5), Ok(Value::I32(-3))), - (Value::U32(96), Value::I32(5), Err(Error::UnsupportedTypeOperation)), - (Value::I64(-96), Value::U64(5), Ok(Value::I64(-3))), - (Value::U64(96), Value::I64(5), Err(Error::UnsupportedTypeOperation)), - (Value::F32(96.), Value::U8(5), Err(Error::IntegralTypeRequired)), - (Value::F64(96.), Value::U8(5), Err(Error::IntegralTypeRequired)), - // Invalid shifts - (Value::U8(96), Value::I8(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(96), Value::I16(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(96), Value::I32(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(96), Value::I64(-5), Err(Error::InvalidShiftExpression)), - (Value::U8(96), Value::F32(5.), Err(Error::InvalidShiftExpression)), - (Value::U8(96), Value::F64(5.), Err(Error::InvalidShiftExpression)), - // Large shifts - (Value::Generic(96), Value::Generic(32), Ok(Value::Generic(0))), - (Value::I8(96), Value::U8(8), Ok(Value::I8(0))), - (Value::I8(-96), Value::U8(8), Ok(Value::I8(-1))), - (Value::I16(96), Value::U16(17), Ok(Value::I16(0))), - (Value::I16(-96), Value::U16(17), Ok(Value::I16(-1))), - (Value::I32(96), Value::U32(32), Ok(Value::I32(0))), - (Value::I32(-96), Value::U32(32), Ok(Value::I32(-1))), - (Value::I64(96), Value::U64(65), Ok(Value::I64(0))), - (Value::I64(-96), Value::U64(65), Ok(Value::I64(-1))), - ] { - assert_eq!(v1.shra(v2, addr_mask), result); - } - } - - #[test] - fn value_eq() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(3), Ok(Value::Generic(1))), - (Value::Generic(!3), Value::Generic(3), Ok(Value::Generic(0))), - (Value::I8(3), Value::I8(3), Ok(Value::Generic(1))), - (Value::I8(!3), Value::I8(3), Ok(Value::Generic(0))), - (Value::U8(3), Value::U8(3), Ok(Value::Generic(1))), - (Value::U8(!3), Value::U8(3), Ok(Value::Generic(0))), - (Value::I16(3), Value::I16(3), Ok(Value::Generic(1))), - (Value::I16(!3), Value::I16(3), Ok(Value::Generic(0))), - (Value::U16(3), Value::U16(3), Ok(Value::Generic(1))), - (Value::U16(!3), Value::U16(3), Ok(Value::Generic(0))), - (Value::I32(3), Value::I32(3), Ok(Value::Generic(1))), - (Value::I32(!3), Value::I32(3), Ok(Value::Generic(0))), - (Value::U32(3), Value::U32(3), Ok(Value::Generic(1))), - (Value::U32(!3), Value::U32(3), Ok(Value::Generic(0))), - (Value::I64(3), Value::I64(3), Ok(Value::Generic(1))), - (Value::I64(!3), Value::I64(3), Ok(Value::Generic(0))), - (Value::U64(3), Value::U64(3), Ok(Value::Generic(1))), - (Value::U64(!3), Value::U64(3), Ok(Value::Generic(0))), - (Value::F32(3.), Value::F32(3.), Ok(Value::Generic(1))), - (Value::F32(-3.), Value::F32(3.), Ok(Value::Generic(0))), - (Value::F64(3.), Value::F64(3.), Ok(Value::Generic(1))), - (Value::F64(-3.), Value::F64(3.), Ok(Value::Generic(0))), - (Value::Generic(3), Value::U32(3), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.eq(v2, addr_mask), result); - } - } - - #[test] - fn value_ne() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(3), Ok(Value::Generic(0))), - (Value::Generic(!3), Value::Generic(3), Ok(Value::Generic(1))), - (Value::I8(3), Value::I8(3), Ok(Value::Generic(0))), - (Value::I8(!3), Value::I8(3), Ok(Value::Generic(1))), - (Value::U8(3), Value::U8(3), Ok(Value::Generic(0))), - (Value::U8(!3), Value::U8(3), Ok(Value::Generic(1))), - (Value::I16(3), Value::I16(3), Ok(Value::Generic(0))), - (Value::I16(!3), Value::I16(3), Ok(Value::Generic(1))), - (Value::U16(3), Value::U16(3), Ok(Value::Generic(0))), - (Value::U16(!3), Value::U16(3), Ok(Value::Generic(1))), - (Value::I32(3), Value::I32(3), Ok(Value::Generic(0))), - (Value::I32(!3), Value::I32(3), Ok(Value::Generic(1))), - (Value::U32(3), Value::U32(3), Ok(Value::Generic(0))), - (Value::U32(!3), Value::U32(3), Ok(Value::Generic(1))), - (Value::I64(3), Value::I64(3), Ok(Value::Generic(0))), - (Value::I64(!3), Value::I64(3), Ok(Value::Generic(1))), - (Value::U64(3), Value::U64(3), Ok(Value::Generic(0))), - (Value::U64(!3), Value::U64(3), Ok(Value::Generic(1))), - (Value::F32(3.), Value::F32(3.), Ok(Value::Generic(0))), - (Value::F32(-3.), Value::F32(3.), Ok(Value::Generic(1))), - (Value::F64(3.), Value::F64(3.), Ok(Value::Generic(0))), - (Value::F64(-3.), Value::F64(3.), Ok(Value::Generic(1))), - (Value::Generic(3), Value::U32(3), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.ne(v2, addr_mask), result); - } - } - - #[test] - fn value_ge() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(!3), Ok(Value::Generic(1))), - (Value::Generic(!3), Value::Generic(3), Ok(Value::Generic(0))), - (Value::I8(3), Value::I8(!3), Ok(Value::Generic(1))), - (Value::I8(!3), Value::I8(3), Ok(Value::Generic(0))), - (Value::U8(3), Value::U8(!3), Ok(Value::Generic(0))), - (Value::U8(!3), Value::U8(3), Ok(Value::Generic(1))), - (Value::I16(3), Value::I16(!3), Ok(Value::Generic(1))), - (Value::I16(!3), Value::I16(3), Ok(Value::Generic(0))), - (Value::U16(3), Value::U16(!3), Ok(Value::Generic(0))), - (Value::U16(!3), Value::U16(3), Ok(Value::Generic(1))), - (Value::I32(3), Value::I32(!3), Ok(Value::Generic(1))), - (Value::I32(!3), Value::I32(3), Ok(Value::Generic(0))), - (Value::U32(3), Value::U32(!3), Ok(Value::Generic(0))), - (Value::U32(!3), Value::U32(3), Ok(Value::Generic(1))), - (Value::I64(3), Value::I64(!3), Ok(Value::Generic(1))), - (Value::I64(!3), Value::I64(3), Ok(Value::Generic(0))), - (Value::U64(3), Value::U64(!3), Ok(Value::Generic(0))), - (Value::U64(!3), Value::U64(3), Ok(Value::Generic(1))), - (Value::F32(3.), Value::F32(-3.), Ok(Value::Generic(1))), - (Value::F32(-3.), Value::F32(3.), Ok(Value::Generic(0))), - (Value::F64(3.), Value::F64(-3.), Ok(Value::Generic(1))), - (Value::F64(-3.), Value::F64(3.), Ok(Value::Generic(0))), - (Value::Generic(3), Value::U32(3), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.ge(v2, addr_mask), result); - } - } - - #[test] - fn value_gt() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(!3), Ok(Value::Generic(1))), - (Value::Generic(!3), Value::Generic(3), Ok(Value::Generic(0))), - (Value::I8(3), Value::I8(!3), Ok(Value::Generic(1))), - (Value::I8(!3), Value::I8(3), Ok(Value::Generic(0))), - (Value::U8(3), Value::U8(!3), Ok(Value::Generic(0))), - (Value::U8(!3), Value::U8(3), Ok(Value::Generic(1))), - (Value::I16(3), Value::I16(!3), Ok(Value::Generic(1))), - (Value::I16(!3), Value::I16(3), Ok(Value::Generic(0))), - (Value::U16(3), Value::U16(!3), Ok(Value::Generic(0))), - (Value::U16(!3), Value::U16(3), Ok(Value::Generic(1))), - (Value::I32(3), Value::I32(!3), Ok(Value::Generic(1))), - (Value::I32(!3), Value::I32(3), Ok(Value::Generic(0))), - (Value::U32(3), Value::U32(!3), Ok(Value::Generic(0))), - (Value::U32(!3), Value::U32(3), Ok(Value::Generic(1))), - (Value::I64(3), Value::I64(!3), Ok(Value::Generic(1))), - (Value::I64(!3), Value::I64(3), Ok(Value::Generic(0))), - (Value::U64(3), Value::U64(!3), Ok(Value::Generic(0))), - (Value::U64(!3), Value::U64(3), Ok(Value::Generic(1))), - (Value::F32(3.), Value::F32(-3.), Ok(Value::Generic(1))), - (Value::F32(-3.), Value::F32(3.), Ok(Value::Generic(0))), - (Value::F64(3.), Value::F64(-3.), Ok(Value::Generic(1))), - (Value::F64(-3.), Value::F64(3.), Ok(Value::Generic(0))), - (Value::Generic(3), Value::U32(3), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.gt(v2, addr_mask), result); - } - } - - #[test] - fn value_le() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(!3), Ok(Value::Generic(0))), - (Value::Generic(!3), Value::Generic(3), Ok(Value::Generic(1))), - (Value::I8(3), Value::I8(!3), Ok(Value::Generic(0))), - (Value::I8(!3), Value::I8(3), Ok(Value::Generic(1))), - (Value::U8(3), Value::U8(!3), Ok(Value::Generic(1))), - (Value::U8(!3), Value::U8(3), Ok(Value::Generic(0))), - (Value::I16(3), Value::I16(!3), Ok(Value::Generic(0))), - (Value::I16(!3), Value::I16(3), Ok(Value::Generic(1))), - (Value::U16(3), Value::U16(!3), Ok(Value::Generic(1))), - (Value::U16(!3), Value::U16(3), Ok(Value::Generic(0))), - (Value::I32(3), Value::I32(!3), Ok(Value::Generic(0))), - (Value::I32(!3), Value::I32(3), Ok(Value::Generic(1))), - (Value::U32(3), Value::U32(!3), Ok(Value::Generic(1))), - (Value::U32(!3), Value::U32(3), Ok(Value::Generic(0))), - (Value::I64(3), Value::I64(!3), Ok(Value::Generic(0))), - (Value::I64(!3), Value::I64(3), Ok(Value::Generic(1))), - (Value::U64(3), Value::U64(!3), Ok(Value::Generic(1))), - (Value::U64(!3), Value::U64(3), Ok(Value::Generic(0))), - (Value::F32(3.), Value::F32(-3.), Ok(Value::Generic(0))), - (Value::F32(-3.), Value::F32(3.), Ok(Value::Generic(1))), - (Value::F64(3.), Value::F64(-3.), Ok(Value::Generic(0))), - (Value::F64(-3.), Value::F64(3.), Ok(Value::Generic(1))), - (Value::Generic(3), Value::U32(3), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.le(v2, addr_mask), result); - } - } - - #[test] - fn value_lt() { - let addr_mask = 0xffff_ffff; - for &(v1, v2, result) in &[ - (Value::Generic(3), Value::Generic(!3), Ok(Value::Generic(0))), - (Value::Generic(!3), Value::Generic(3), Ok(Value::Generic(1))), - (Value::I8(3), Value::I8(!3), Ok(Value::Generic(0))), - (Value::I8(!3), Value::I8(3), Ok(Value::Generic(1))), - (Value::U8(3), Value::U8(!3), Ok(Value::Generic(1))), - (Value::U8(!3), Value::U8(3), Ok(Value::Generic(0))), - (Value::I16(3), Value::I16(!3), Ok(Value::Generic(0))), - (Value::I16(!3), Value::I16(3), Ok(Value::Generic(1))), - (Value::U16(3), Value::U16(!3), Ok(Value::Generic(1))), - (Value::U16(!3), Value::U16(3), Ok(Value::Generic(0))), - (Value::I32(3), Value::I32(!3), Ok(Value::Generic(0))), - (Value::I32(!3), Value::I32(3), Ok(Value::Generic(1))), - (Value::U32(3), Value::U32(!3), Ok(Value::Generic(1))), - (Value::U32(!3), Value::U32(3), Ok(Value::Generic(0))), - (Value::I64(3), Value::I64(!3), Ok(Value::Generic(0))), - (Value::I64(!3), Value::I64(3), Ok(Value::Generic(1))), - (Value::U64(3), Value::U64(!3), Ok(Value::Generic(1))), - (Value::U64(!3), Value::U64(3), Ok(Value::Generic(0))), - (Value::F32(3.), Value::F32(-3.), Ok(Value::Generic(0))), - (Value::F32(-3.), Value::F32(3.), Ok(Value::Generic(1))), - (Value::F64(3.), Value::F64(-3.), Ok(Value::Generic(0))), - (Value::F64(-3.), Value::F64(3.), Ok(Value::Generic(1))), - (Value::Generic(3), Value::U32(3), Err(Error::TypeMismatch)), - ] { - assert_eq!(v1.lt(v2, addr_mask), result); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/test_util.rs s390-tools-2.33.1/rust-vendor/gimli/src/test_util.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/test_util.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/test_util.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,53 +0,0 @@ -#![allow(missing_docs)] - -use crate::Format; -use test_assembler::{Label, Section}; - -pub trait GimliSectionMethods { - fn sleb(self, val: i64) -> Self; - fn uleb(self, val: u64) -> Self; - fn initial_length(self, format: Format, length: &Label, start: &Label) -> Self; - fn word(self, size: u8, val: u64) -> Self; - fn word_label(self, size: u8, val: &Label) -> Self; -} - -impl GimliSectionMethods for Section { - fn sleb(mut self, mut val: i64) -> Self { - while val & !0x3f != 0 && val | 0x3f != -1 { - self = self.D8(val as u8 | 0x80); - val >>= 7; - } - self.D8(val as u8 & 0x7f) - } - - fn uleb(mut self, mut val: u64) -> Self { - while val & !0x7f != 0 { - self = self.D8(val as u8 | 0x80); - val >>= 7; - } - self.D8(val as u8) - } - - fn initial_length(self, format: Format, length: &Label, start: &Label) -> Self { - match format { - Format::Dwarf32 => self.D32(length).mark(start), - Format::Dwarf64 => self.D32(0xffff_ffff).D64(length).mark(start), - } - } - - fn word(self, size: u8, val: u64) -> Self { - match size { - 4 => self.D32(val as u32), - 8 => self.D64(val), - _ => panic!("unsupported word size"), - } - } - - fn word_label(self, size: u8, val: &Label) -> Self { - match size { - 4 => self.D32(val), - 8 => self.D64(val), - _ => panic!("unsupported word size"), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/abbrev.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/abbrev.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/abbrev.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/abbrev.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,188 +0,0 @@ -use alloc::vec::Vec; -use indexmap::IndexSet; -use std::ops::{Deref, DerefMut}; - -use crate::common::{DebugAbbrevOffset, SectionId}; -use crate::constants; -use crate::write::{Result, Section, Writer}; - -/// A table of abbreviations that will be stored in a `.debug_abbrev` section. -// Requirements: -// - values are `Abbreviation` -// - insertion returns an abbreviation code for use in writing a DIE -// - inserting a duplicate returns the code of the existing value -#[derive(Debug, Default)] -pub(crate) struct AbbreviationTable { - abbrevs: IndexSet, -} - -impl AbbreviationTable { - /// Add an abbreviation to the table and return its code. - pub fn add(&mut self, abbrev: Abbreviation) -> u64 { - let (code, _) = self.abbrevs.insert_full(abbrev); - // Code must be non-zero - (code + 1) as u64 - } - - /// Write the abbreviation table to the `.debug_abbrev` section. - pub fn write(&self, w: &mut DebugAbbrev) -> Result<()> { - for (code, abbrev) in self.abbrevs.iter().enumerate() { - w.write_uleb128((code + 1) as u64)?; - abbrev.write(w)?; - } - // Null abbreviation code - w.write_u8(0) - } -} - -/// An abbreviation describes the shape of a `DebuggingInformationEntry`'s type: -/// its tag type, whether it has children, and its set of attributes. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub(crate) struct Abbreviation { - tag: constants::DwTag, - has_children: bool, - attributes: Vec, -} - -impl Abbreviation { - /// Construct a new `Abbreviation`. - #[inline] - pub fn new( - tag: constants::DwTag, - has_children: bool, - attributes: Vec, - ) -> Abbreviation { - Abbreviation { - tag, - has_children, - attributes, - } - } - - /// Write the abbreviation to the `.debug_abbrev` section. - pub fn write(&self, w: &mut DebugAbbrev) -> Result<()> { - w.write_uleb128(self.tag.0.into())?; - w.write_u8(if self.has_children { - constants::DW_CHILDREN_yes.0 - } else { - constants::DW_CHILDREN_no.0 - })?; - for attr in &self.attributes { - attr.write(w)?; - } - // Null name and form - w.write_u8(0)?; - w.write_u8(0) - } -} - -/// The description of an attribute in an abbreviated type. -// TODO: support implicit const -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub(crate) struct AttributeSpecification { - name: constants::DwAt, - form: constants::DwForm, -} - -impl AttributeSpecification { - /// Construct a new `AttributeSpecification`. - #[inline] - pub fn new(name: constants::DwAt, form: constants::DwForm) -> AttributeSpecification { - AttributeSpecification { name, form } - } - - /// Write the attribute specification to the `.debug_abbrev` section. - #[inline] - pub fn write(&self, w: &mut DebugAbbrev) -> Result<()> { - w.write_uleb128(self.name.0.into())?; - w.write_uleb128(self.form.0.into()) - } -} - -define_section!( - DebugAbbrev, - DebugAbbrevOffset, - "A writable `.debug_abbrev` section." -); - -#[cfg(test)] -#[cfg(feature = "read")] -mod tests { - use super::*; - use crate::constants; - use crate::read; - use crate::write::EndianVec; - use crate::LittleEndian; - - #[test] - fn test_abbreviation_table() { - let mut abbrevs = AbbreviationTable::default(); - let abbrev1 = Abbreviation::new( - constants::DW_TAG_subprogram, - false, - vec![AttributeSpecification::new( - constants::DW_AT_name, - constants::DW_FORM_string, - )], - ); - let abbrev2 = Abbreviation::new( - constants::DW_TAG_compile_unit, - true, - vec![ - AttributeSpecification::new(constants::DW_AT_producer, constants::DW_FORM_strp), - AttributeSpecification::new(constants::DW_AT_language, constants::DW_FORM_data2), - ], - ); - let code1 = abbrevs.add(abbrev1.clone()); - assert_eq!(code1, 1); - let code2 = abbrevs.add(abbrev2.clone()); - assert_eq!(code2, 2); - assert_eq!(abbrevs.add(abbrev1.clone()), code1); - assert_eq!(abbrevs.add(abbrev2.clone()), code2); - - let mut debug_abbrev = DebugAbbrev::from(EndianVec::new(LittleEndian)); - let debug_abbrev_offset = debug_abbrev.offset(); - assert_eq!(debug_abbrev_offset, DebugAbbrevOffset(0)); - abbrevs.write(&mut debug_abbrev).unwrap(); - assert_eq!(debug_abbrev.offset(), DebugAbbrevOffset(17)); - - let read_debug_abbrev = read::DebugAbbrev::new(debug_abbrev.slice(), LittleEndian); - let read_abbrevs = read_debug_abbrev - .abbreviations(debug_abbrev_offset) - .unwrap(); - - let read_abbrev1 = read_abbrevs.get(code1).unwrap(); - assert_eq!(abbrev1.tag, read_abbrev1.tag()); - assert_eq!(abbrev1.has_children, read_abbrev1.has_children()); - assert_eq!(abbrev1.attributes.len(), read_abbrev1.attributes().len()); - assert_eq!( - abbrev1.attributes[0].name, - read_abbrev1.attributes()[0].name() - ); - assert_eq!( - abbrev1.attributes[0].form, - read_abbrev1.attributes()[0].form() - ); - - let read_abbrev2 = read_abbrevs.get(code2).unwrap(); - assert_eq!(abbrev2.tag, read_abbrev2.tag()); - assert_eq!(abbrev2.has_children, read_abbrev2.has_children()); - assert_eq!(abbrev2.attributes.len(), read_abbrev2.attributes().len()); - assert_eq!( - abbrev2.attributes[0].name, - read_abbrev2.attributes()[0].name() - ); - assert_eq!( - abbrev2.attributes[0].form, - read_abbrev2.attributes()[0].form() - ); - assert_eq!( - abbrev2.attributes[1].name, - read_abbrev2.attributes()[1].name() - ); - assert_eq!( - abbrev2.attributes[1].form, - read_abbrev2.attributes()[1].form() - ); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/cfi.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/cfi.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/cfi.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/cfi.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1050 +0,0 @@ -use alloc::vec::Vec; -use indexmap::IndexSet; -use std::ops::{Deref, DerefMut}; - -use crate::common::{DebugFrameOffset, EhFrameOffset, Encoding, Format, Register, SectionId}; -use crate::constants; -use crate::write::{Address, BaseId, Error, Expression, Result, Section, Writer}; - -define_section!( - DebugFrame, - DebugFrameOffset, - "A writable `.debug_frame` section." -); - -define_section!(EhFrame, EhFrameOffset, "A writable `.eh_frame` section."); - -define_id!(CieId, "An identifier for a CIE in a `FrameTable`."); - -/// A table of frame description entries. -#[derive(Debug, Default)] -pub struct FrameTable { - /// Base id for CIEs. - base_id: BaseId, - /// The common information entries. - cies: IndexSet, - /// The frame description entries. - fdes: Vec<(CieId, FrameDescriptionEntry)>, -} - -impl FrameTable { - /// Add a CIE and return its id. - /// - /// If the CIE already exists, then return the id of the existing CIE. - pub fn add_cie(&mut self, cie: CommonInformationEntry) -> CieId { - let (index, _) = self.cies.insert_full(cie); - CieId::new(self.base_id, index) - } - - /// The number of CIEs. - pub fn cie_count(&self) -> usize { - self.cies.len() - } - - /// Add a FDE. - /// - /// Does not check for duplicates. - /// - /// # Panics - /// - /// Panics if the CIE id is invalid. - pub fn add_fde(&mut self, cie: CieId, fde: FrameDescriptionEntry) { - debug_assert_eq!(self.base_id, cie.base_id); - self.fdes.push((cie, fde)); - } - - /// The number of FDEs. - pub fn fde_count(&self) -> usize { - self.fdes.len() - } - - /// Write the frame table entries to the given `.debug_frame` section. - pub fn write_debug_frame(&self, w: &mut DebugFrame) -> Result<()> { - self.write(&mut w.0, false) - } - - /// Write the frame table entries to the given `.eh_frame` section. - pub fn write_eh_frame(&self, w: &mut EhFrame) -> Result<()> { - self.write(&mut w.0, true) - } - - fn write(&self, w: &mut W, eh_frame: bool) -> Result<()> { - let mut cie_offsets = vec![None; self.cies.len()]; - for (cie_id, fde) in &self.fdes { - let cie_index = cie_id.index; - let cie = self.cies.get_index(cie_index).unwrap(); - let cie_offset = match cie_offsets[cie_index] { - Some(offset) => offset, - None => { - // Only write CIEs as they are referenced. - let offset = cie.write(w, eh_frame)?; - cie_offsets[cie_index] = Some(offset); - offset - } - }; - - fde.write(w, eh_frame, cie_offset, cie)?; - } - // TODO: write length 0 terminator for eh_frame? - Ok(()) - } -} - -/// A common information entry. This contains information that is shared between FDEs. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct CommonInformationEntry { - encoding: Encoding, - - /// A constant that is factored out of code offsets. - /// - /// This should be set to the minimum instruction length. - /// Writing a code offset that is not a multiple of this factor will generate an error. - code_alignment_factor: u8, - - /// A constant that is factored out of data offsets. - /// - /// This should be set to the minimum data alignment for the frame. - /// Writing a data offset that is not a multiple of this factor will generate an error. - data_alignment_factor: i8, - - /// The return address register. This might not correspond to an actual machine register. - return_address_register: Register, - - /// The address of the personality function and its encoding. - pub personality: Option<(constants::DwEhPe, Address)>, - - /// The encoding to use for the LSDA address in FDEs. - /// - /// If set then all FDEs which use this CIE must have a LSDA address. - pub lsda_encoding: Option, - - /// The encoding to use for addresses in FDEs. - pub fde_address_encoding: constants::DwEhPe, - - /// True for signal trampolines. - pub signal_trampoline: bool, - - /// The initial instructions upon entry to this function. - instructions: Vec, -} - -impl CommonInformationEntry { - /// Create a new common information entry. - /// - /// The encoding version must be a CFI version, not a DWARF version. - pub fn new( - encoding: Encoding, - code_alignment_factor: u8, - data_alignment_factor: i8, - return_address_register: Register, - ) -> Self { - CommonInformationEntry { - encoding, - code_alignment_factor, - data_alignment_factor, - return_address_register, - personality: None, - lsda_encoding: None, - fde_address_encoding: constants::DW_EH_PE_absptr, - signal_trampoline: false, - instructions: Vec::new(), - } - } - - /// Add an initial instruction. - pub fn add_instruction(&mut self, instruction: CallFrameInstruction) { - self.instructions.push(instruction); - } - - fn has_augmentation(&self) -> bool { - self.personality.is_some() - || self.lsda_encoding.is_some() - || self.signal_trampoline - || self.fde_address_encoding != constants::DW_EH_PE_absptr - } - - /// Returns the section offset of the CIE. - fn write(&self, w: &mut W, eh_frame: bool) -> Result { - let encoding = self.encoding; - let offset = w.len(); - - let length_offset = w.write_initial_length(encoding.format)?; - let length_base = w.len(); - - if eh_frame { - w.write_u32(0)?; - } else { - match encoding.format { - Format::Dwarf32 => w.write_u32(0xffff_ffff)?, - Format::Dwarf64 => w.write_u64(0xffff_ffff_ffff_ffff)?, - } - } - - if eh_frame { - if encoding.version != 1 { - return Err(Error::UnsupportedVersion(encoding.version)); - }; - } else { - match encoding.version { - 1 | 3 | 4 => {} - _ => return Err(Error::UnsupportedVersion(encoding.version)), - }; - } - w.write_u8(encoding.version as u8)?; - - let augmentation = self.has_augmentation(); - if augmentation { - w.write_u8(b'z')?; - if self.lsda_encoding.is_some() { - w.write_u8(b'L')?; - } - if self.personality.is_some() { - w.write_u8(b'P')?; - } - if self.fde_address_encoding != constants::DW_EH_PE_absptr { - w.write_u8(b'R')?; - } - if self.signal_trampoline { - w.write_u8(b'S')?; - } - } - w.write_u8(0)?; - - if encoding.version >= 4 { - w.write_u8(encoding.address_size)?; - // TODO: segment_selector_size - w.write_u8(0)?; - } - - w.write_uleb128(self.code_alignment_factor.into())?; - w.write_sleb128(self.data_alignment_factor.into())?; - - if !eh_frame && encoding.version == 1 { - let register = self.return_address_register.0 as u8; - if u16::from(register) != self.return_address_register.0 { - return Err(Error::ValueTooLarge); - } - w.write_u8(register)?; - } else { - w.write_uleb128(self.return_address_register.0.into())?; - } - - if augmentation { - let augmentation_length_offset = w.len(); - w.write_u8(0)?; - let augmentation_length_base = w.len(); - - if let Some(eh_pe) = self.lsda_encoding { - w.write_u8(eh_pe.0)?; - } - if let Some((eh_pe, address)) = self.personality { - w.write_u8(eh_pe.0)?; - w.write_eh_pointer(address, eh_pe, encoding.address_size)?; - } - if self.fde_address_encoding != constants::DW_EH_PE_absptr { - w.write_u8(self.fde_address_encoding.0)?; - } - - let augmentation_length = (w.len() - augmentation_length_base) as u64; - debug_assert!(augmentation_length < 0x80); - w.write_udata_at(augmentation_length_offset, augmentation_length, 1)?; - } - - for instruction in &self.instructions { - instruction.write(w, encoding, self)?; - } - - write_nop( - w, - encoding.format.word_size() as usize + w.len() - length_base, - encoding.address_size, - )?; - - let length = (w.len() - length_base) as u64; - w.write_initial_length_at(length_offset, length, encoding.format)?; - - Ok(offset) - } -} - -/// A frame description entry. There should be one FDE per function. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct FrameDescriptionEntry { - /// The initial address of the function. - address: Address, - - /// The length in bytes of the function. - length: u32, - - /// The address of the LSDA. - pub lsda: Option
, - - /// The instructions for this function, ordered by offset. - instructions: Vec<(u32, CallFrameInstruction)>, -} - -impl FrameDescriptionEntry { - /// Create a new frame description entry for a function. - pub fn new(address: Address, length: u32) -> Self { - FrameDescriptionEntry { - address, - length, - lsda: None, - instructions: Vec::new(), - } - } - - /// Add an instruction. - /// - /// Instructions must be added in increasing order of offset, or writing will fail. - pub fn add_instruction(&mut self, offset: u32, instruction: CallFrameInstruction) { - debug_assert!(self.instructions.last().map(|x| x.0).unwrap_or(0) <= offset); - self.instructions.push((offset, instruction)); - } - - fn write( - &self, - w: &mut W, - eh_frame: bool, - cie_offset: usize, - cie: &CommonInformationEntry, - ) -> Result<()> { - let encoding = cie.encoding; - let length_offset = w.write_initial_length(encoding.format)?; - let length_base = w.len(); - - if eh_frame { - // .eh_frame uses a relative offset which doesn't need relocation. - w.write_udata((w.len() - cie_offset) as u64, 4)?; - } else { - w.write_offset( - cie_offset, - SectionId::DebugFrame, - encoding.format.word_size(), - )?; - } - - if cie.fde_address_encoding != constants::DW_EH_PE_absptr { - w.write_eh_pointer( - self.address, - cie.fde_address_encoding, - encoding.address_size, - )?; - w.write_eh_pointer_data( - self.length.into(), - cie.fde_address_encoding.format(), - encoding.address_size, - )?; - } else { - w.write_address(self.address, encoding.address_size)?; - w.write_udata(self.length.into(), encoding.address_size)?; - } - - if cie.has_augmentation() { - let mut augmentation_length = 0u64; - if self.lsda.is_some() { - augmentation_length += u64::from(encoding.address_size); - } - w.write_uleb128(augmentation_length)?; - - debug_assert_eq!(self.lsda.is_some(), cie.lsda_encoding.is_some()); - if let (Some(lsda), Some(lsda_encoding)) = (self.lsda, cie.lsda_encoding) { - w.write_eh_pointer(lsda, lsda_encoding, encoding.address_size)?; - } - } - - let mut prev_offset = 0; - for (offset, instruction) in &self.instructions { - write_advance_loc(w, cie.code_alignment_factor, prev_offset, *offset)?; - prev_offset = *offset; - instruction.write(w, encoding, cie)?; - } - - write_nop( - w, - encoding.format.word_size() as usize + w.len() - length_base, - encoding.address_size, - )?; - - let length = (w.len() - length_base) as u64; - w.write_initial_length_at(length_offset, length, encoding.format)?; - - Ok(()) - } -} - -/// An instruction in a frame description entry. -/// -/// This may be a CFA definition, a register rule, or some other directive. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum CallFrameInstruction { - /// Define the CFA rule to use the provided register and offset. - Cfa(Register, i32), - /// Update the CFA rule to use the provided register. The offset is unchanged. - CfaRegister(Register), - /// Update the CFA rule to use the provided offset. The register is unchanged. - CfaOffset(i32), - /// Define the CFA rule to use the provided expression. - CfaExpression(Expression), - - /// Restore the initial rule for the register. - Restore(Register), - /// The previous value of the register is not recoverable. - Undefined(Register), - /// The register has not been modified. - SameValue(Register), - /// The previous value of the register is saved at address CFA + offset. - Offset(Register, i32), - /// The previous value of the register is CFA + offset. - ValOffset(Register, i32), - /// The previous value of the register is stored in another register. - Register(Register, Register), - /// The previous value of the register is saved at address given by the expression. - Expression(Register, Expression), - /// The previous value of the register is given by the expression. - ValExpression(Register, Expression), - - /// Push all register rules onto a stack. - RememberState, - /// Pop all register rules off the stack. - RestoreState, - /// The size of the arguments that have been pushed onto the stack. - ArgsSize(u32), - - /// AAarch64 extension: negate the `RA_SIGN_STATE` pseudo-register. - NegateRaState, -} - -impl CallFrameInstruction { - fn write( - &self, - w: &mut W, - encoding: Encoding, - cie: &CommonInformationEntry, - ) -> Result<()> { - match *self { - CallFrameInstruction::Cfa(register, offset) => { - if offset < 0 { - let offset = factored_data_offset(offset, cie.data_alignment_factor)?; - w.write_u8(constants::DW_CFA_def_cfa_sf.0)?; - w.write_uleb128(register.0.into())?; - w.write_sleb128(offset.into())?; - } else { - // Unfactored offset. - w.write_u8(constants::DW_CFA_def_cfa.0)?; - w.write_uleb128(register.0.into())?; - w.write_uleb128(offset as u64)?; - } - } - CallFrameInstruction::CfaRegister(register) => { - w.write_u8(constants::DW_CFA_def_cfa_register.0)?; - w.write_uleb128(register.0.into())?; - } - CallFrameInstruction::CfaOffset(offset) => { - if offset < 0 { - let offset = factored_data_offset(offset, cie.data_alignment_factor)?; - w.write_u8(constants::DW_CFA_def_cfa_offset_sf.0)?; - w.write_sleb128(offset.into())?; - } else { - // Unfactored offset. - w.write_u8(constants::DW_CFA_def_cfa_offset.0)?; - w.write_uleb128(offset as u64)?; - } - } - CallFrameInstruction::CfaExpression(ref expression) => { - w.write_u8(constants::DW_CFA_def_cfa_expression.0)?; - w.write_uleb128(expression.size(encoding, None) as u64)?; - expression.write(w, None, encoding, None)?; - } - CallFrameInstruction::Restore(register) => { - if register.0 < 0x40 { - w.write_u8(constants::DW_CFA_restore.0 | register.0 as u8)?; - } else { - w.write_u8(constants::DW_CFA_restore_extended.0)?; - w.write_uleb128(register.0.into())?; - } - } - CallFrameInstruction::Undefined(register) => { - w.write_u8(constants::DW_CFA_undefined.0)?; - w.write_uleb128(register.0.into())?; - } - CallFrameInstruction::SameValue(register) => { - w.write_u8(constants::DW_CFA_same_value.0)?; - w.write_uleb128(register.0.into())?; - } - CallFrameInstruction::Offset(register, offset) => { - let offset = factored_data_offset(offset, cie.data_alignment_factor)?; - if offset < 0 { - w.write_u8(constants::DW_CFA_offset_extended_sf.0)?; - w.write_uleb128(register.0.into())?; - w.write_sleb128(offset.into())?; - } else if register.0 < 0x40 { - w.write_u8(constants::DW_CFA_offset.0 | register.0 as u8)?; - w.write_uleb128(offset as u64)?; - } else { - w.write_u8(constants::DW_CFA_offset_extended.0)?; - w.write_uleb128(register.0.into())?; - w.write_uleb128(offset as u64)?; - } - } - CallFrameInstruction::ValOffset(register, offset) => { - let offset = factored_data_offset(offset, cie.data_alignment_factor)?; - if offset < 0 { - w.write_u8(constants::DW_CFA_val_offset_sf.0)?; - w.write_uleb128(register.0.into())?; - w.write_sleb128(offset.into())?; - } else { - w.write_u8(constants::DW_CFA_val_offset.0)?; - w.write_uleb128(register.0.into())?; - w.write_uleb128(offset as u64)?; - } - } - CallFrameInstruction::Register(register1, register2) => { - w.write_u8(constants::DW_CFA_register.0)?; - w.write_uleb128(register1.0.into())?; - w.write_uleb128(register2.0.into())?; - } - CallFrameInstruction::Expression(register, ref expression) => { - w.write_u8(constants::DW_CFA_expression.0)?; - w.write_uleb128(register.0.into())?; - w.write_uleb128(expression.size(encoding, None) as u64)?; - expression.write(w, None, encoding, None)?; - } - CallFrameInstruction::ValExpression(register, ref expression) => { - w.write_u8(constants::DW_CFA_val_expression.0)?; - w.write_uleb128(register.0.into())?; - w.write_uleb128(expression.size(encoding, None) as u64)?; - expression.write(w, None, encoding, None)?; - } - CallFrameInstruction::RememberState => { - w.write_u8(constants::DW_CFA_remember_state.0)?; - } - CallFrameInstruction::RestoreState => { - w.write_u8(constants::DW_CFA_restore_state.0)?; - } - CallFrameInstruction::ArgsSize(size) => { - w.write_u8(constants::DW_CFA_GNU_args_size.0)?; - w.write_uleb128(size.into())?; - } - CallFrameInstruction::NegateRaState => { - w.write_u8(constants::DW_CFA_AARCH64_negate_ra_state.0)?; - } - } - Ok(()) - } -} - -fn write_advance_loc( - w: &mut W, - code_alignment_factor: u8, - prev_offset: u32, - offset: u32, -) -> Result<()> { - if offset == prev_offset { - return Ok(()); - } - let delta = factored_code_delta(prev_offset, offset, code_alignment_factor)?; - if delta < 0x40 { - w.write_u8(constants::DW_CFA_advance_loc.0 | delta as u8)?; - } else if delta < 0x100 { - w.write_u8(constants::DW_CFA_advance_loc1.0)?; - w.write_u8(delta as u8)?; - } else if delta < 0x10000 { - w.write_u8(constants::DW_CFA_advance_loc2.0)?; - w.write_u16(delta as u16)?; - } else { - w.write_u8(constants::DW_CFA_advance_loc4.0)?; - w.write_u32(delta)?; - } - Ok(()) -} - -fn write_nop(w: &mut W, len: usize, align: u8) -> Result<()> { - debug_assert_eq!(align & (align - 1), 0); - let tail_len = (!len + 1) & (align as usize - 1); - for _ in 0..tail_len { - w.write_u8(constants::DW_CFA_nop.0)?; - } - Ok(()) -} - -fn factored_code_delta(prev_offset: u32, offset: u32, factor: u8) -> Result { - if offset < prev_offset { - return Err(Error::InvalidFrameCodeOffset(offset)); - } - let delta = offset - prev_offset; - let factor = u32::from(factor); - let factored_delta = delta / factor; - if delta != factored_delta * factor { - return Err(Error::InvalidFrameCodeOffset(offset)); - } - Ok(factored_delta) -} - -fn factored_data_offset(offset: i32, factor: i8) -> Result { - let factor = i32::from(factor); - let factored_offset = offset / factor; - if offset != factored_offset * factor { - return Err(Error::InvalidFrameDataOffset(offset)); - } - Ok(factored_offset) -} - -#[cfg(feature = "read")] -pub(crate) mod convert { - use super::*; - use crate::read::{self, Reader}; - use crate::write::{ConvertError, ConvertResult}; - use std::collections::{hash_map, HashMap}; - - impl FrameTable { - /// Create a frame table by reading the data in the given section. - /// - /// `convert_address` is a function to convert read addresses into the `Address` - /// type. For non-relocatable addresses, this function may simply return - /// `Address::Constant(address)`. For relocatable addresses, it is the caller's - /// responsibility to determine the symbol and addend corresponding to the address - /// and return `Address::Symbol { symbol, addend }`. - pub fn from( - frame: &Section, - convert_address: &dyn Fn(u64) -> Option
, - ) -> ConvertResult - where - R: Reader, - Section: read::UnwindSection, - Section::Offset: read::UnwindOffset, - { - let bases = read::BaseAddresses::default().set_eh_frame(0); - - let mut frame_table = FrameTable::default(); - - let mut cie_ids = HashMap::new(); - let mut entries = frame.entries(&bases); - while let Some(entry) = entries.next()? { - let partial = match entry { - read::CieOrFde::Cie(_) => continue, - read::CieOrFde::Fde(partial) => partial, - }; - - // TODO: is it worth caching the parsed CIEs? It would be better if FDEs only - // stored a reference. - let from_fde = partial.parse(Section::cie_from_offset)?; - let from_cie = from_fde.cie(); - let cie_id = match cie_ids.entry(from_cie.offset()) { - hash_map::Entry::Occupied(o) => *o.get(), - hash_map::Entry::Vacant(e) => { - let cie = - CommonInformationEntry::from(from_cie, frame, &bases, convert_address)?; - let cie_id = frame_table.add_cie(cie); - e.insert(cie_id); - cie_id - } - }; - let fde = FrameDescriptionEntry::from(&from_fde, frame, &bases, convert_address)?; - frame_table.add_fde(cie_id, fde); - } - - Ok(frame_table) - } - } - - impl CommonInformationEntry { - fn from( - from_cie: &read::CommonInformationEntry, - frame: &Section, - bases: &read::BaseAddresses, - convert_address: &dyn Fn(u64) -> Option
, - ) -> ConvertResult - where - R: Reader, - Section: read::UnwindSection, - Section::Offset: read::UnwindOffset, - { - let mut cie = CommonInformationEntry::new( - from_cie.encoding(), - from_cie.code_alignment_factor() as u8, - from_cie.data_alignment_factor() as i8, - from_cie.return_address_register(), - ); - - cie.personality = match from_cie.personality_with_encoding() { - // We treat these the same because the encoding already determines - // whether it is indirect. - Some((eh_pe, read::Pointer::Direct(p))) - | Some((eh_pe, read::Pointer::Indirect(p))) => { - let address = convert_address(p).ok_or(ConvertError::InvalidAddress)?; - Some((eh_pe, address)) - } - _ => None, - }; - cie.lsda_encoding = from_cie.lsda_encoding(); - cie.fde_address_encoding = from_cie - .fde_address_encoding() - .unwrap_or(constants::DW_EH_PE_absptr); - cie.signal_trampoline = from_cie.is_signal_trampoline(); - - let mut offset = 0; - let mut from_instructions = from_cie.instructions(frame, bases); - while let Some(from_instruction) = from_instructions.next()? { - if let Some(instruction) = CallFrameInstruction::from( - from_instruction, - from_cie, - convert_address, - &mut offset, - )? { - cie.instructions.push(instruction); - } - } - Ok(cie) - } - } - - impl FrameDescriptionEntry { - fn from( - from_fde: &read::FrameDescriptionEntry, - frame: &Section, - bases: &read::BaseAddresses, - convert_address: &dyn Fn(u64) -> Option
, - ) -> ConvertResult - where - R: Reader, - Section: read::UnwindSection, - Section::Offset: read::UnwindOffset, - { - let address = - convert_address(from_fde.initial_address()).ok_or(ConvertError::InvalidAddress)?; - let length = from_fde.len() as u32; - let mut fde = FrameDescriptionEntry::new(address, length); - - match from_fde.lsda() { - // We treat these the same because the encoding already determines - // whether it is indirect. - Some(read::Pointer::Direct(p)) | Some(read::Pointer::Indirect(p)) => { - let address = convert_address(p).ok_or(ConvertError::InvalidAddress)?; - fde.lsda = Some(address); - } - None => {} - } - - let from_cie = from_fde.cie(); - let mut offset = 0; - let mut from_instructions = from_fde.instructions(frame, bases); - while let Some(from_instruction) = from_instructions.next()? { - if let Some(instruction) = CallFrameInstruction::from( - from_instruction, - from_cie, - convert_address, - &mut offset, - )? { - fde.instructions.push((offset, instruction)); - } - } - - Ok(fde) - } - } - - impl CallFrameInstruction { - fn from>( - from_instruction: read::CallFrameInstruction, - from_cie: &read::CommonInformationEntry, - convert_address: &dyn Fn(u64) -> Option
, - offset: &mut u32, - ) -> ConvertResult> { - let convert_expression = - |x| Expression::from(x, from_cie.encoding(), None, None, None, convert_address); - // TODO: validate integer type conversions - Ok(Some(match from_instruction { - read::CallFrameInstruction::SetLoc { .. } => { - return Err(ConvertError::UnsupportedCfiInstruction); - } - read::CallFrameInstruction::AdvanceLoc { delta } => { - *offset += delta * from_cie.code_alignment_factor() as u32; - return Ok(None); - } - read::CallFrameInstruction::DefCfa { register, offset } => { - CallFrameInstruction::Cfa(register, offset as i32) - } - read::CallFrameInstruction::DefCfaSf { - register, - factored_offset, - } => { - let offset = factored_offset * from_cie.data_alignment_factor(); - CallFrameInstruction::Cfa(register, offset as i32) - } - read::CallFrameInstruction::DefCfaRegister { register } => { - CallFrameInstruction::CfaRegister(register) - } - - read::CallFrameInstruction::DefCfaOffset { offset } => { - CallFrameInstruction::CfaOffset(offset as i32) - } - read::CallFrameInstruction::DefCfaOffsetSf { factored_offset } => { - let offset = factored_offset * from_cie.data_alignment_factor(); - CallFrameInstruction::CfaOffset(offset as i32) - } - read::CallFrameInstruction::DefCfaExpression { expression } => { - CallFrameInstruction::CfaExpression(convert_expression(expression)?) - } - read::CallFrameInstruction::Undefined { register } => { - CallFrameInstruction::Undefined(register) - } - read::CallFrameInstruction::SameValue { register } => { - CallFrameInstruction::SameValue(register) - } - read::CallFrameInstruction::Offset { - register, - factored_offset, - } => { - let offset = factored_offset as i64 * from_cie.data_alignment_factor(); - CallFrameInstruction::Offset(register, offset as i32) - } - read::CallFrameInstruction::OffsetExtendedSf { - register, - factored_offset, - } => { - let offset = factored_offset * from_cie.data_alignment_factor(); - CallFrameInstruction::Offset(register, offset as i32) - } - read::CallFrameInstruction::ValOffset { - register, - factored_offset, - } => { - let offset = factored_offset as i64 * from_cie.data_alignment_factor(); - CallFrameInstruction::ValOffset(register, offset as i32) - } - read::CallFrameInstruction::ValOffsetSf { - register, - factored_offset, - } => { - let offset = factored_offset * from_cie.data_alignment_factor(); - CallFrameInstruction::ValOffset(register, offset as i32) - } - read::CallFrameInstruction::Register { - dest_register, - src_register, - } => CallFrameInstruction::Register(dest_register, src_register), - read::CallFrameInstruction::Expression { - register, - expression, - } => CallFrameInstruction::Expression(register, convert_expression(expression)?), - read::CallFrameInstruction::ValExpression { - register, - expression, - } => CallFrameInstruction::ValExpression(register, convert_expression(expression)?), - read::CallFrameInstruction::Restore { register } => { - CallFrameInstruction::Restore(register) - } - read::CallFrameInstruction::RememberState => CallFrameInstruction::RememberState, - read::CallFrameInstruction::RestoreState => CallFrameInstruction::RestoreState, - read::CallFrameInstruction::ArgsSize { size } => { - CallFrameInstruction::ArgsSize(size as u32) - } - read::CallFrameInstruction::NegateRaState => CallFrameInstruction::NegateRaState, - read::CallFrameInstruction::Nop => return Ok(None), - })) - } - } -} - -#[cfg(test)] -#[cfg(feature = "read")] -mod tests { - use super::*; - use crate::arch::X86_64; - use crate::read; - use crate::write::EndianVec; - use crate::{LittleEndian, Vendor}; - - #[test] - fn test_frame_table() { - for &version in &[1, 3, 4] { - for &address_size in &[4, 8] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - let mut frames = FrameTable::default(); - - let cie1 = CommonInformationEntry::new(encoding, 1, 8, X86_64::RA); - let cie1_id = frames.add_cie(cie1.clone()); - assert_eq!(cie1_id, frames.add_cie(cie1.clone())); - - let mut cie2 = CommonInformationEntry::new(encoding, 1, 8, X86_64::RA); - cie2.lsda_encoding = Some(constants::DW_EH_PE_absptr); - cie2.personality = - Some((constants::DW_EH_PE_absptr, Address::Constant(0x1234))); - cie2.signal_trampoline = true; - let cie2_id = frames.add_cie(cie2.clone()); - assert_ne!(cie1_id, cie2_id); - assert_eq!(cie2_id, frames.add_cie(cie2.clone())); - - let fde1 = FrameDescriptionEntry::new(Address::Constant(0x1000), 0x10); - frames.add_fde(cie1_id, fde1.clone()); - - let fde2 = FrameDescriptionEntry::new(Address::Constant(0x2000), 0x20); - frames.add_fde(cie1_id, fde2.clone()); - - let mut fde3 = FrameDescriptionEntry::new(Address::Constant(0x3000), 0x30); - fde3.lsda = Some(Address::Constant(0x3300)); - frames.add_fde(cie2_id, fde3.clone()); - - let mut fde4 = FrameDescriptionEntry::new(Address::Constant(0x4000), 0x40); - fde4.lsda = Some(Address::Constant(0x4400)); - frames.add_fde(cie2_id, fde4.clone()); - - let mut cie3 = CommonInformationEntry::new(encoding, 1, 8, X86_64::RA); - cie3.fde_address_encoding = constants::DW_EH_PE_pcrel; - cie3.lsda_encoding = Some(constants::DW_EH_PE_pcrel); - cie3.personality = Some((constants::DW_EH_PE_pcrel, Address::Constant(0x1235))); - cie3.signal_trampoline = true; - let cie3_id = frames.add_cie(cie3.clone()); - assert_ne!(cie2_id, cie3_id); - assert_eq!(cie3_id, frames.add_cie(cie3.clone())); - - let mut fde5 = FrameDescriptionEntry::new(Address::Constant(0x5000), 0x50); - fde5.lsda = Some(Address::Constant(0x5500)); - frames.add_fde(cie3_id, fde5.clone()); - - // Test writing `.debug_frame`. - let mut debug_frame = DebugFrame::from(EndianVec::new(LittleEndian)); - frames.write_debug_frame(&mut debug_frame).unwrap(); - - let mut read_debug_frame = - read::DebugFrame::new(debug_frame.slice(), LittleEndian); - read_debug_frame.set_address_size(address_size); - let convert_frames = FrameTable::from(&read_debug_frame, &|address| { - Some(Address::Constant(address)) - }) - .unwrap(); - assert_eq!(frames.cies, convert_frames.cies); - assert_eq!(frames.fdes.len(), convert_frames.fdes.len()); - for (a, b) in frames.fdes.iter().zip(convert_frames.fdes.iter()) { - assert_eq!(a.1, b.1); - } - - if version == 1 { - // Test writing `.eh_frame`. - let mut eh_frame = EhFrame::from(EndianVec::new(LittleEndian)); - frames.write_eh_frame(&mut eh_frame).unwrap(); - - let mut read_eh_frame = read::EhFrame::new(eh_frame.slice(), LittleEndian); - read_eh_frame.set_address_size(address_size); - let convert_frames = FrameTable::from(&read_eh_frame, &|address| { - Some(Address::Constant(address)) - }) - .unwrap(); - assert_eq!(frames.cies, convert_frames.cies); - assert_eq!(frames.fdes.len(), convert_frames.fdes.len()); - for (a, b) in frames.fdes.iter().zip(convert_frames.fdes.iter()) { - assert_eq!(a.1, b.1); - } - } - } - } - } - } - - #[test] - fn test_frame_instruction() { - let mut expression = Expression::new(); - expression.op_constu(0); - - let cie_instructions = [ - CallFrameInstruction::Cfa(X86_64::RSP, 8), - CallFrameInstruction::Offset(X86_64::RA, -8), - ]; - - let fde_instructions = [ - (0, CallFrameInstruction::Cfa(X86_64::RSP, 0)), - (0, CallFrameInstruction::Cfa(X86_64::RSP, -8)), - (2, CallFrameInstruction::CfaRegister(X86_64::RBP)), - (4, CallFrameInstruction::CfaOffset(8)), - (4, CallFrameInstruction::CfaOffset(0)), - (4, CallFrameInstruction::CfaOffset(-8)), - (6, CallFrameInstruction::CfaExpression(expression.clone())), - (8, CallFrameInstruction::Restore(Register(1))), - (8, CallFrameInstruction::Restore(Register(101))), - (10, CallFrameInstruction::Undefined(Register(2))), - (12, CallFrameInstruction::SameValue(Register(3))), - (14, CallFrameInstruction::Offset(Register(4), 16)), - (14, CallFrameInstruction::Offset(Register(104), 16)), - (16, CallFrameInstruction::ValOffset(Register(5), -24)), - (16, CallFrameInstruction::ValOffset(Register(5), 24)), - (18, CallFrameInstruction::Register(Register(6), Register(7))), - ( - 20, - CallFrameInstruction::Expression(Register(8), expression.clone()), - ), - ( - 22, - CallFrameInstruction::ValExpression(Register(9), expression.clone()), - ), - (24 + 0x80, CallFrameInstruction::RememberState), - (26 + 0x280, CallFrameInstruction::RestoreState), - (28 + 0x20280, CallFrameInstruction::ArgsSize(23)), - ]; - - let fde_instructions_aarch64 = [(0, CallFrameInstruction::NegateRaState)]; - - for &version in &[1, 3, 4] { - for &address_size in &[4, 8] { - for &vendor in &[Vendor::Default, Vendor::AArch64] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - let mut frames = FrameTable::default(); - - let mut cie = CommonInformationEntry::new(encoding, 2, 8, X86_64::RA); - for i in &cie_instructions { - cie.add_instruction(i.clone()); - } - let cie_id = frames.add_cie(cie); - - let mut fde = FrameDescriptionEntry::new(Address::Constant(0x1000), 0x10); - for (o, i) in &fde_instructions { - fde.add_instruction(*o, i.clone()); - } - frames.add_fde(cie_id, fde); - - if vendor == Vendor::AArch64 { - let mut fde = - FrameDescriptionEntry::new(Address::Constant(0x2000), 0x10); - for (o, i) in &fde_instructions_aarch64 { - fde.add_instruction(*o, i.clone()); - } - frames.add_fde(cie_id, fde); - } - - let mut debug_frame = DebugFrame::from(EndianVec::new(LittleEndian)); - frames.write_debug_frame(&mut debug_frame).unwrap(); - - let mut read_debug_frame = - read::DebugFrame::new(debug_frame.slice(), LittleEndian); - read_debug_frame.set_address_size(address_size); - read_debug_frame.set_vendor(vendor); - let frames = FrameTable::from(&read_debug_frame, &|address| { - Some(Address::Constant(address)) - }) - .unwrap(); - - assert_eq!( - &frames.cies.get_index(0).unwrap().instructions, - &cie_instructions - ); - assert_eq!(&frames.fdes[0].1.instructions, &fde_instructions); - if vendor == Vendor::AArch64 { - assert_eq!(&frames.fdes[1].1.instructions, &fde_instructions_aarch64); - } - } - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/dwarf.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/dwarf.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/dwarf.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/dwarf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,138 +0,0 @@ -use alloc::vec::Vec; - -use crate::common::Encoding; -use crate::write::{ - AbbreviationTable, LineProgram, LineStringTable, Result, Sections, StringTable, Unit, - UnitTable, Writer, -}; - -/// Writable DWARF information for more than one unit. -#[derive(Debug, Default)] -pub struct Dwarf { - /// A table of units. These are primarily stored in the `.debug_info` section, - /// but they also contain information that is stored in other sections. - pub units: UnitTable, - - /// Extra line number programs that are not associated with a unit. - /// - /// These should only be used when generating DWARF5 line-only debug - /// information. - pub line_programs: Vec, - - /// A table of strings that will be stored in the `.debug_line_str` section. - pub line_strings: LineStringTable, - - /// A table of strings that will be stored in the `.debug_str` section. - pub strings: StringTable, -} - -impl Dwarf { - /// Create a new `Dwarf` instance. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Write the DWARF information to the given sections. - pub fn write(&mut self, sections: &mut Sections) -> Result<()> { - let line_strings = self.line_strings.write(&mut sections.debug_line_str)?; - let strings = self.strings.write(&mut sections.debug_str)?; - self.units.write(sections, &line_strings, &strings)?; - for line_program in &self.line_programs { - line_program.write( - &mut sections.debug_line, - line_program.encoding(), - &line_strings, - &strings, - )?; - } - Ok(()) - } -} - -/// Writable DWARF information for a single unit. -#[derive(Debug)] -pub struct DwarfUnit { - /// A unit. This is primarily stored in the `.debug_info` section, - /// but also contains information that is stored in other sections. - pub unit: Unit, - - /// A table of strings that will be stored in the `.debug_line_str` section. - pub line_strings: LineStringTable, - - /// A table of strings that will be stored in the `.debug_str` section. - pub strings: StringTable, -} - -impl DwarfUnit { - /// Create a new `DwarfUnit`. - /// - /// Note: you should set `self.unit.line_program` after creation. - /// This cannot be done earlier because it may need to reference - /// `self.line_strings`. - pub fn new(encoding: Encoding) -> Self { - let unit = Unit::new(encoding, LineProgram::none()); - DwarfUnit { - unit, - line_strings: LineStringTable::default(), - strings: StringTable::default(), - } - } - - /// Write the DWARf information to the given sections. - pub fn write(&mut self, sections: &mut Sections) -> Result<()> { - let line_strings = self.line_strings.write(&mut sections.debug_line_str)?; - let strings = self.strings.write(&mut sections.debug_str)?; - - let abbrev_offset = sections.debug_abbrev.offset(); - let mut abbrevs = AbbreviationTable::default(); - - self.unit.write( - sections, - abbrev_offset, - &mut abbrevs, - &line_strings, - &strings, - )?; - // None should exist because we didn't give out any UnitId. - assert!(sections.debug_info_refs.is_empty()); - assert!(sections.debug_loc_refs.is_empty()); - assert!(sections.debug_loclists_refs.is_empty()); - - abbrevs.write(&mut sections.debug_abbrev)?; - Ok(()) - } -} - -#[cfg(feature = "read")] -pub(crate) mod convert { - use super::*; - use crate::read::{self, Reader}; - use crate::write::{Address, ConvertResult}; - - impl Dwarf { - /// Create a `write::Dwarf` by converting a `read::Dwarf`. - /// - /// `convert_address` is a function to convert read addresses into the `Address` - /// type. For non-relocatable addresses, this function may simply return - /// `Address::Constant(address)`. For relocatable addresses, it is the caller's - /// responsibility to determine the symbol and addend corresponding to the address - /// and return `Address::Symbol { symbol, addend }`. - pub fn from>( - dwarf: &read::Dwarf, - convert_address: &dyn Fn(u64) -> Option
, - ) -> ConvertResult { - let mut line_strings = LineStringTable::default(); - let mut strings = StringTable::default(); - let units = UnitTable::from(dwarf, &mut line_strings, &mut strings, convert_address)?; - // TODO: convert the line programs that were not referenced by a unit. - let line_programs = Vec::new(); - Ok(Dwarf { - units, - line_programs, - line_strings, - strings, - }) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/endian_vec.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/endian_vec.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/endian_vec.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/endian_vec.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,117 +0,0 @@ -use alloc::vec::Vec; -use std::mem; - -use crate::endianity::Endianity; -use crate::write::{Error, Result, Writer}; - -/// A `Vec` with endianity metadata. -/// -/// This implements the `Writer` trait, which is used for all writing of DWARF sections. -#[derive(Debug, Clone)] -pub struct EndianVec -where - Endian: Endianity, -{ - vec: Vec, - endian: Endian, -} - -impl EndianVec -where - Endian: Endianity, -{ - /// Construct an empty `EndianVec` with the given endianity. - pub fn new(endian: Endian) -> EndianVec { - EndianVec { - vec: Vec::new(), - endian, - } - } - - /// Return a reference to the raw slice. - pub fn slice(&self) -> &[u8] { - &self.vec - } - - /// Convert into a `Vec`. - pub fn into_vec(self) -> Vec { - self.vec - } - - /// Take any written data out of the `EndianVec`, leaving an empty `Vec` in its place. - pub fn take(&mut self) -> Vec { - let mut vec = Vec::new(); - mem::swap(&mut self.vec, &mut vec); - vec - } -} - -impl Writer for EndianVec -where - Endian: Endianity, -{ - type Endian = Endian; - - #[inline] - fn endian(&self) -> Self::Endian { - self.endian - } - - #[inline] - fn len(&self) -> usize { - self.vec.len() - } - - fn write(&mut self, bytes: &[u8]) -> Result<()> { - self.vec.extend(bytes); - Ok(()) - } - - fn write_at(&mut self, offset: usize, bytes: &[u8]) -> Result<()> { - if offset > self.vec.len() { - return Err(Error::OffsetOutOfBounds); - } - let to = &mut self.vec[offset..]; - if bytes.len() > to.len() { - return Err(Error::LengthOutOfBounds); - } - let to = &mut to[..bytes.len()]; - to.copy_from_slice(bytes); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::LittleEndian; - - #[test] - fn test_endian_vec() { - let mut w = EndianVec::new(LittleEndian); - assert_eq!(w.endian(), LittleEndian); - assert_eq!(w.len(), 0); - - w.write(&[1, 2]).unwrap(); - assert_eq!(w.slice(), &[1, 2]); - assert_eq!(w.len(), 2); - - w.write(&[3, 4, 5]).unwrap(); - assert_eq!(w.slice(), &[1, 2, 3, 4, 5]); - assert_eq!(w.len(), 5); - - w.write_at(0, &[6, 7]).unwrap(); - assert_eq!(w.slice(), &[6, 7, 3, 4, 5]); - assert_eq!(w.len(), 5); - - w.write_at(3, &[8, 9]).unwrap(); - assert_eq!(w.slice(), &[6, 7, 3, 8, 9]); - assert_eq!(w.len(), 5); - - assert_eq!(w.write_at(4, &[6, 7]), Err(Error::LengthOutOfBounds)); - assert_eq!(w.write_at(5, &[6, 7]), Err(Error::LengthOutOfBounds)); - assert_eq!(w.write_at(6, &[6, 7]), Err(Error::OffsetOutOfBounds)); - - assert_eq!(w.into_vec(), vec![6, 7, 3, 8, 9]); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/line.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/line.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/line.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/line.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1957 +0,0 @@ -use alloc::vec::Vec; -use indexmap::{IndexMap, IndexSet}; -use std::ops::{Deref, DerefMut}; - -use crate::common::{DebugLineOffset, Encoding, Format, LineEncoding, SectionId}; -use crate::constants; -use crate::leb128; -use crate::write::{ - Address, DebugLineStrOffsets, DebugStrOffsets, Error, LineStringId, LineStringTable, Result, - Section, StringId, Writer, -}; - -/// The number assigned to the first special opcode. -// -// We output all instructions for all DWARF versions, since readers -// should be able to ignore instructions they don't support. -const OPCODE_BASE: u8 = 13; - -/// A line number program. -#[derive(Debug, Clone)] -pub struct LineProgram { - /// True if this line program was created with `LineProgram::none()`. - none: bool, - encoding: Encoding, - line_encoding: LineEncoding, - - /// A list of source directory path names. - /// - /// If a path is relative, then the directory is located relative to the working - /// directory of the compilation unit. - /// - /// The first entry is for the working directory of the compilation unit. - directories: IndexSet, - - /// A list of source file entries. - /// - /// Each entry has a path name and a directory. - /// - /// If a path is a relative, then the file is located relative to the - /// directory. Otherwise the directory is meaningless. - /// - /// Does not include comp_file, even for version >= 5. - files: IndexMap<(LineString, DirectoryId), FileInfo>, - - /// The primary source file of the compilation unit. - /// This is required for version >= 5, but we never reference it elsewhere - /// because DWARF defines DW_AT_decl_file=0 to mean not specified. - comp_file: (LineString, FileInfo), - - /// True if the file entries may have valid timestamps. - /// - /// Entries may still have a timestamp of 0 even if this is set. - /// For version <= 4, this is ignored. - /// For version 5, this controls whether to emit `DW_LNCT_timestamp`. - pub file_has_timestamp: bool, - - /// True if the file entries may have valid sizes. - /// - /// Entries may still have a size of 0 even if this is set. - /// For version <= 4, this is ignored. - /// For version 5, this controls whether to emit `DW_LNCT_size`. - pub file_has_size: bool, - - /// True if the file entries have valid MD5 checksums. - /// - /// For version <= 4, this is ignored. - /// For version 5, this controls whether to emit `DW_LNCT_MD5`. - pub file_has_md5: bool, - - prev_row: LineRow, - row: LineRow, - // TODO: this probably should be either rows or sequences instead - instructions: Vec, - in_sequence: bool, -} - -impl LineProgram { - /// Create a new `LineProgram`. - /// - /// `comp_dir` defines the working directory of the compilation unit, - /// and must be the same as the `DW_AT_comp_dir` attribute - /// of the compilation unit DIE. - /// - /// `comp_file` and `comp_file_info` define the primary source file - /// of the compilation unit and must be the same as the `DW_AT_name` - /// attribute of the compilation unit DIE. - /// - /// # Panics - /// - /// Panics if `line_encoding.line_base` > 0. - /// - /// Panics if `line_encoding.line_base` + `line_encoding.line_range` <= 0. - /// - /// Panics if `comp_dir` is empty or contains a null byte. - /// - /// Panics if `comp_file` is empty or contains a null byte. - pub fn new( - encoding: Encoding, - line_encoding: LineEncoding, - comp_dir: LineString, - comp_file: LineString, - comp_file_info: Option, - ) -> LineProgram { - // We require a special opcode for a line advance of 0. - // See the debug_asserts in generate_row(). - assert!(line_encoding.line_base <= 0); - assert!(line_encoding.line_base + line_encoding.line_range as i8 > 0); - let mut program = LineProgram { - none: false, - encoding, - line_encoding, - directories: IndexSet::new(), - files: IndexMap::new(), - comp_file: (comp_file, comp_file_info.unwrap_or_default()), - prev_row: LineRow::initial_state(line_encoding), - row: LineRow::initial_state(line_encoding), - instructions: Vec::new(), - in_sequence: false, - file_has_timestamp: false, - file_has_size: false, - file_has_md5: false, - }; - // For all DWARF versions, directory index 0 is comp_dir. - // For version <= 4, the entry is implicit. We still add - // it here so that we use it, but we don't emit it. - program.add_directory(comp_dir); - program - } - - /// Create a new `LineProgram` with no fields set. - /// - /// This can be used when the `LineProgram` will not be used. - /// - /// You should not attempt to add files or line instructions to - /// this line program, or write it to the `.debug_line` section. - pub fn none() -> Self { - let line_encoding = LineEncoding::default(); - LineProgram { - none: true, - encoding: Encoding { - format: Format::Dwarf32, - version: 2, - address_size: 0, - }, - line_encoding, - directories: IndexSet::new(), - files: IndexMap::new(), - comp_file: (LineString::String(Vec::new()), FileInfo::default()), - prev_row: LineRow::initial_state(line_encoding), - row: LineRow::initial_state(line_encoding), - instructions: Vec::new(), - in_sequence: false, - file_has_timestamp: false, - file_has_size: false, - file_has_md5: false, - } - } - - /// Return true if this line program was created with `LineProgram::none()`. - #[inline] - pub fn is_none(&self) -> bool { - self.none - } - - /// Return the encoding parameters for this line program. - #[inline] - pub fn encoding(&self) -> Encoding { - self.encoding - } - - /// Return the DWARF version for this line program. - #[inline] - pub fn version(&self) -> u16 { - self.encoding.version - } - - /// Return the address size in bytes for this line program. - #[inline] - pub fn address_size(&self) -> u8 { - self.encoding.address_size - } - - /// Return the DWARF format for this line program. - #[inline] - pub fn format(&self) -> Format { - self.encoding.format - } - - /// Return the id for the working directory of the compilation unit. - #[inline] - pub fn default_directory(&self) -> DirectoryId { - DirectoryId(0) - } - - /// Add a directory entry and return its id. - /// - /// If the directory already exists, then return the id of the existing entry. - /// - /// If the path is relative, then the directory is located relative to the working - /// directory of the compilation unit. - /// - /// # Panics - /// - /// Panics if `directory` is empty or contains a null byte. - pub fn add_directory(&mut self, directory: LineString) -> DirectoryId { - if let LineString::String(ref val) = directory { - // For DWARF version <= 4, directories must not be empty. - // The first directory isn't emitted so skip the check for it. - if self.encoding.version <= 4 && !self.directories.is_empty() { - assert!(!val.is_empty()); - } - assert!(!val.contains(&0)); - } - let (index, _) = self.directories.insert_full(directory); - DirectoryId(index) - } - - /// Get a reference to a directory entry. - /// - /// # Panics - /// - /// Panics if `id` is invalid. - pub fn get_directory(&self, id: DirectoryId) -> &LineString { - self.directories.get_index(id.0).unwrap() - } - - /// Add a file entry and return its id. - /// - /// If the file already exists, then return the id of the existing entry. - /// - /// If the file path is relative, then the file is located relative - /// to the directory. Otherwise the directory is meaningless, but it - /// is still used as a key for file entries. - /// - /// If `info` is `None`, then new entries are assigned - /// default information, and existing entries are unmodified. - /// - /// If `info` is not `None`, then it is always assigned to the - /// entry, even if the entry already exists. - /// - /// # Panics - /// - /// Panics if 'file' is empty or contains a null byte. - pub fn add_file( - &mut self, - file: LineString, - directory: DirectoryId, - info: Option, - ) -> FileId { - if let LineString::String(ref val) = file { - assert!(!val.is_empty()); - assert!(!val.contains(&0)); - } - - let key = (file, directory); - let index = if let Some(info) = info { - let (index, _) = self.files.insert_full(key, info); - index - } else { - let entry = self.files.entry(key); - let index = entry.index(); - entry.or_default(); - index - }; - FileId::new(index) - } - - /// Get a reference to a file entry. - /// - /// # Panics - /// - /// Panics if `id` is invalid. - pub fn get_file(&self, id: FileId) -> (&LineString, DirectoryId) { - match id.index() { - None => (&self.comp_file.0, DirectoryId(0)), - Some(index) => self - .files - .get_index(index) - .map(|entry| (&(entry.0).0, (entry.0).1)) - .unwrap(), - } - } - - /// Get a reference to the info for a file entry. - /// - /// # Panics - /// - /// Panics if `id` is invalid. - pub fn get_file_info(&self, id: FileId) -> &FileInfo { - match id.index() { - None => &self.comp_file.1, - Some(index) => self.files.get_index(index).map(|entry| entry.1).unwrap(), - } - } - - /// Get a mutable reference to the info for a file entry. - /// - /// # Panics - /// - /// Panics if `id` is invalid. - pub fn get_file_info_mut(&mut self, id: FileId) -> &mut FileInfo { - match id.index() { - None => &mut self.comp_file.1, - Some(index) => self - .files - .get_index_mut(index) - .map(|entry| entry.1) - .unwrap(), - } - } - - /// Begin a new sequence and set its base address. - /// - /// # Panics - /// - /// Panics if a sequence has already begun. - pub fn begin_sequence(&mut self, address: Option
) { - assert!(!self.in_sequence); - self.in_sequence = true; - if let Some(address) = address { - self.instructions.push(LineInstruction::SetAddress(address)); - } - } - - /// End the sequence, and reset the row to its default values. - /// - /// Only the `address_offset` and op_index` fields of the current row are used. - /// - /// # Panics - /// - /// Panics if a sequence has not begun. - pub fn end_sequence(&mut self, address_offset: u64) { - assert!(self.in_sequence); - self.in_sequence = false; - self.row.address_offset = address_offset; - let op_advance = self.op_advance(); - if op_advance != 0 { - self.instructions - .push(LineInstruction::AdvancePc(op_advance)); - } - self.instructions.push(LineInstruction::EndSequence); - self.prev_row = LineRow::initial_state(self.line_encoding); - self.row = LineRow::initial_state(self.line_encoding); - } - - /// Return true if a sequence has begun. - #[inline] - pub fn in_sequence(&self) -> bool { - self.in_sequence - } - - /// Returns a reference to the data for the current row. - #[inline] - pub fn row(&mut self) -> &mut LineRow { - &mut self.row - } - - /// Generates the line number information instructions for the current row. - /// - /// After the instructions are generated, it sets `discriminator` to 0, and sets - /// `basic_block`, `prologue_end`, and `epilogue_begin` to false. - /// - /// # Panics - /// - /// Panics if a sequence has not begun. - /// Panics if the address_offset decreases. - pub fn generate_row(&mut self) { - assert!(self.in_sequence); - - // Output fields that are reset on every row. - if self.row.discriminator != 0 { - self.instructions - .push(LineInstruction::SetDiscriminator(self.row.discriminator)); - self.row.discriminator = 0; - } - if self.row.basic_block { - self.instructions.push(LineInstruction::SetBasicBlock); - self.row.basic_block = false; - } - if self.row.prologue_end { - self.instructions.push(LineInstruction::SetPrologueEnd); - self.row.prologue_end = false; - } - if self.row.epilogue_begin { - self.instructions.push(LineInstruction::SetEpilogueBegin); - self.row.epilogue_begin = false; - } - - // Output fields that are not reset on every row. - if self.row.is_statement != self.prev_row.is_statement { - self.instructions.push(LineInstruction::NegateStatement); - } - if self.row.file != self.prev_row.file { - self.instructions - .push(LineInstruction::SetFile(self.row.file)); - } - if self.row.column != self.prev_row.column { - self.instructions - .push(LineInstruction::SetColumn(self.row.column)); - } - if self.row.isa != self.prev_row.isa { - self.instructions - .push(LineInstruction::SetIsa(self.row.isa)); - } - - // Advance the line, address, and operation index. - let line_base = i64::from(self.line_encoding.line_base) as u64; - let line_range = u64::from(self.line_encoding.line_range); - let line_advance = self.row.line as i64 - self.prev_row.line as i64; - let op_advance = self.op_advance(); - - // Default to special advances of 0. - let special_base = u64::from(OPCODE_BASE); - // TODO: handle lack of special opcodes for 0 line advance - debug_assert!(self.line_encoding.line_base <= 0); - debug_assert!(self.line_encoding.line_base + self.line_encoding.line_range as i8 >= 0); - let special_default = special_base.wrapping_sub(line_base); - let mut special = special_default; - let mut use_special = false; - - if line_advance != 0 { - let special_line = (line_advance as u64).wrapping_sub(line_base); - if special_line < line_range { - special = special_base + special_line; - use_special = true; - } else { - self.instructions - .push(LineInstruction::AdvanceLine(line_advance)); - } - } - - if op_advance != 0 { - // Using ConstAddPc can save a byte. - let (special_op_advance, const_add_pc) = if special + op_advance * line_range <= 255 { - (op_advance, false) - } else { - let op_range = (255 - special_base) / line_range; - (op_advance - op_range, true) - }; - - let special_op = special_op_advance * line_range; - if special + special_op <= 255 { - special += special_op; - use_special = true; - if const_add_pc { - self.instructions.push(LineInstruction::ConstAddPc); - } - } else { - self.instructions - .push(LineInstruction::AdvancePc(op_advance)); - } - } - - if use_special && special != special_default { - debug_assert!(special >= special_base); - debug_assert!(special <= 255); - self.instructions - .push(LineInstruction::Special(special as u8)); - } else { - self.instructions.push(LineInstruction::Copy); - } - - self.prev_row = self.row; - } - - fn op_advance(&self) -> u64 { - debug_assert!(self.row.address_offset >= self.prev_row.address_offset); - let mut address_advance = self.row.address_offset - self.prev_row.address_offset; - if self.line_encoding.minimum_instruction_length != 1 { - debug_assert_eq!( - self.row.address_offset % u64::from(self.line_encoding.minimum_instruction_length), - 0 - ); - address_advance /= u64::from(self.line_encoding.minimum_instruction_length); - } - address_advance * u64::from(self.line_encoding.maximum_operations_per_instruction) - + self.row.op_index - - self.prev_row.op_index - } - - /// Returns true if the line number program has no instructions. - /// - /// Does not check the file or directory entries. - #[inline] - pub fn is_empty(&self) -> bool { - self.instructions.is_empty() - } - - /// Write the line number program to the given section. - /// - /// # Panics - /// - /// Panics if `self.is_none()`. - pub fn write( - &self, - w: &mut DebugLine, - encoding: Encoding, - debug_line_str_offsets: &DebugLineStrOffsets, - debug_str_offsets: &DebugStrOffsets, - ) -> Result { - assert!(!self.is_none()); - - if encoding.version < self.version() - || encoding.format != self.format() - || encoding.address_size != self.address_size() - { - return Err(Error::IncompatibleLineProgramEncoding); - } - - let offset = w.offset(); - - let length_offset = w.write_initial_length(self.format())?; - let length_base = w.len(); - - if self.version() < 2 || self.version() > 5 { - return Err(Error::UnsupportedVersion(self.version())); - } - w.write_u16(self.version())?; - - if self.version() >= 5 { - w.write_u8(self.address_size())?; - // Segment selector size. - w.write_u8(0)?; - } - - let header_length_offset = w.len(); - w.write_udata(0, self.format().word_size())?; - let header_length_base = w.len(); - - w.write_u8(self.line_encoding.minimum_instruction_length)?; - if self.version() >= 4 { - w.write_u8(self.line_encoding.maximum_operations_per_instruction)?; - } else if self.line_encoding.maximum_operations_per_instruction != 1 { - return Err(Error::NeedVersion(4)); - }; - w.write_u8(if self.line_encoding.default_is_stmt { - 1 - } else { - 0 - })?; - w.write_u8(self.line_encoding.line_base as u8)?; - w.write_u8(self.line_encoding.line_range)?; - w.write_u8(OPCODE_BASE)?; - w.write(&[0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1])?; - - if self.version() <= 4 { - // The first directory is stored as DW_AT_comp_dir. - for dir in self.directories.iter().skip(1) { - dir.write( - w, - constants::DW_FORM_string, - self.encoding, - debug_line_str_offsets, - debug_str_offsets, - )?; - } - w.write_u8(0)?; - - for ((file, dir), info) in self.files.iter() { - file.write( - w, - constants::DW_FORM_string, - self.encoding, - debug_line_str_offsets, - debug_str_offsets, - )?; - w.write_uleb128(dir.0 as u64)?; - w.write_uleb128(info.timestamp)?; - w.write_uleb128(info.size)?; - } - w.write_u8(0)?; - } else { - // Directory entry formats (only ever 1). - w.write_u8(1)?; - w.write_uleb128(u64::from(constants::DW_LNCT_path.0))?; - let dir_form = self.directories.get_index(0).unwrap().form(); - w.write_uleb128(dir_form.0.into())?; - - // Directory entries. - w.write_uleb128(self.directories.len() as u64)?; - for dir in self.directories.iter() { - dir.write( - w, - dir_form, - self.encoding, - debug_line_str_offsets, - debug_str_offsets, - )?; - } - - // File name entry formats. - let count = 2 - + if self.file_has_timestamp { 1 } else { 0 } - + if self.file_has_size { 1 } else { 0 } - + if self.file_has_md5 { 1 } else { 0 }; - w.write_u8(count)?; - w.write_uleb128(u64::from(constants::DW_LNCT_path.0))?; - let file_form = self.comp_file.0.form(); - w.write_uleb128(file_form.0.into())?; - w.write_uleb128(u64::from(constants::DW_LNCT_directory_index.0))?; - w.write_uleb128(constants::DW_FORM_udata.0.into())?; - if self.file_has_timestamp { - w.write_uleb128(u64::from(constants::DW_LNCT_timestamp.0))?; - w.write_uleb128(constants::DW_FORM_udata.0.into())?; - } - if self.file_has_size { - w.write_uleb128(u64::from(constants::DW_LNCT_size.0))?; - w.write_uleb128(constants::DW_FORM_udata.0.into())?; - } - if self.file_has_md5 { - w.write_uleb128(u64::from(constants::DW_LNCT_MD5.0))?; - w.write_uleb128(constants::DW_FORM_data16.0.into())?; - } - - // File name entries. - w.write_uleb128(self.files.len() as u64 + 1)?; - let mut write_file = |file: &LineString, dir: DirectoryId, info: &FileInfo| { - file.write( - w, - file_form, - self.encoding, - debug_line_str_offsets, - debug_str_offsets, - )?; - w.write_uleb128(dir.0 as u64)?; - if self.file_has_timestamp { - w.write_uleb128(info.timestamp)?; - } - if self.file_has_size { - w.write_uleb128(info.size)?; - } - if self.file_has_md5 { - w.write(&info.md5)?; - } - Ok(()) - }; - write_file(&self.comp_file.0, DirectoryId(0), &self.comp_file.1)?; - for ((file, dir), info) in self.files.iter() { - write_file(file, *dir, info)?; - } - } - - let header_length = (w.len() - header_length_base) as u64; - w.write_udata_at( - header_length_offset, - header_length, - self.format().word_size(), - )?; - - for instruction in &self.instructions { - instruction.write(w, self.address_size())?; - } - - let length = (w.len() - length_base) as u64; - w.write_initial_length_at(length_offset, length, self.format())?; - - Ok(offset) - } -} - -/// A row in the line number table that corresponds to a machine instruction. -#[derive(Debug, Clone, Copy)] -pub struct LineRow { - /// The offset of the instruction from the start address of the sequence. - pub address_offset: u64, - /// The index of an operation within a VLIW instruction. - /// - /// The index of the first operation is 0. - /// Set to 0 for non-VLIW instructions. - pub op_index: u64, - - /// The source file corresponding to the instruction. - pub file: FileId, - /// The line number within the source file. - /// - /// Lines are numbered beginning at 1. Set to 0 if there is no source line. - pub line: u64, - /// The column number within the source line. - /// - /// Columns are numbered beginning at 1. Set to 0 for the "left edge" of the line. - pub column: u64, - /// An additional discriminator used to distinguish between source locations. - /// This value is assigned arbitrarily by the DWARF producer. - pub discriminator: u64, - - /// Set to true if the instruction is a recommended breakpoint for a statement. - pub is_statement: bool, - /// Set to true if the instruction is the beginning of a basic block. - pub basic_block: bool, - /// Set to true if the instruction is a recommended breakpoint at the entry of a - /// function. - pub prologue_end: bool, - /// Set to true if the instruction is a recommended breakpoint prior to the exit of - /// a function. - pub epilogue_begin: bool, - - /// The instruction set architecture of the instruction. - /// - /// Set to 0 for the default ISA. Other values are defined by the architecture ABI. - pub isa: u64, -} - -impl LineRow { - /// Return the initial state as specified in the DWARF standard. - fn initial_state(line_encoding: LineEncoding) -> Self { - LineRow { - address_offset: 0, - op_index: 0, - - file: FileId::initial_state(), - line: 1, - column: 0, - discriminator: 0, - - is_statement: line_encoding.default_is_stmt, - basic_block: false, - prologue_end: false, - epilogue_begin: false, - - isa: 0, - } - } -} - -/// An instruction in a line number program. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum LineInstruction { - // Special opcodes - Special(u8), - - // Standard opcodes - Copy, - AdvancePc(u64), - AdvanceLine(i64), - SetFile(FileId), - SetColumn(u64), - NegateStatement, - SetBasicBlock, - ConstAddPc, - // DW_LNS_fixed_advance_pc is not supported. - SetPrologueEnd, - SetEpilogueBegin, - SetIsa(u64), - - // Extended opcodes - EndSequence, - // TODO: this doubles the size of this enum. - SetAddress(Address), - // DW_LNE_define_file is not supported. - SetDiscriminator(u64), -} - -impl LineInstruction { - /// Write the line number instruction to the given section. - fn write(self, w: &mut DebugLine, address_size: u8) -> Result<()> { - use self::LineInstruction::*; - match self { - Special(val) => w.write_u8(val)?, - Copy => w.write_u8(constants::DW_LNS_copy.0)?, - AdvancePc(val) => { - w.write_u8(constants::DW_LNS_advance_pc.0)?; - w.write_uleb128(val)?; - } - AdvanceLine(val) => { - w.write_u8(constants::DW_LNS_advance_line.0)?; - w.write_sleb128(val)?; - } - SetFile(val) => { - w.write_u8(constants::DW_LNS_set_file.0)?; - w.write_uleb128(val.raw())?; - } - SetColumn(val) => { - w.write_u8(constants::DW_LNS_set_column.0)?; - w.write_uleb128(val)?; - } - NegateStatement => w.write_u8(constants::DW_LNS_negate_stmt.0)?, - SetBasicBlock => w.write_u8(constants::DW_LNS_set_basic_block.0)?, - ConstAddPc => w.write_u8(constants::DW_LNS_const_add_pc.0)?, - SetPrologueEnd => w.write_u8(constants::DW_LNS_set_prologue_end.0)?, - SetEpilogueBegin => w.write_u8(constants::DW_LNS_set_epilogue_begin.0)?, - SetIsa(val) => { - w.write_u8(constants::DW_LNS_set_isa.0)?; - w.write_uleb128(val)?; - } - EndSequence => { - w.write_u8(0)?; - w.write_uleb128(1)?; - w.write_u8(constants::DW_LNE_end_sequence.0)?; - } - SetAddress(address) => { - w.write_u8(0)?; - w.write_uleb128(1 + u64::from(address_size))?; - w.write_u8(constants::DW_LNE_set_address.0)?; - w.write_address(address, address_size)?; - } - SetDiscriminator(val) => { - let mut bytes = [0u8; 10]; - // bytes is long enough so this will never fail. - let len = leb128::write::unsigned(&mut { &mut bytes[..] }, val).unwrap(); - w.write_u8(0)?; - w.write_uleb128(1 + len as u64)?; - w.write_u8(constants::DW_LNE_set_discriminator.0)?; - w.write(&bytes[..len])?; - } - } - Ok(()) - } -} - -/// A string value for use in defining paths in line number programs. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum LineString { - /// A slice of bytes representing a string. Must not include null bytes. - /// Not guaranteed to be UTF-8 or anything like that. - String(Vec), - - /// A reference to a string in the `.debug_str` section. - StringRef(StringId), - - /// A reference to a string in the `.debug_line_str` section. - LineStringRef(LineStringId), -} - -impl LineString { - /// Create a `LineString` using the normal form for the given encoding. - pub fn new(val: T, encoding: Encoding, line_strings: &mut LineStringTable) -> Self - where - T: Into>, - { - let val = val.into(); - if encoding.version <= 4 { - LineString::String(val) - } else { - LineString::LineStringRef(line_strings.add(val)) - } - } - - fn form(&self) -> constants::DwForm { - match *self { - LineString::String(..) => constants::DW_FORM_string, - LineString::StringRef(..) => constants::DW_FORM_strp, - LineString::LineStringRef(..) => constants::DW_FORM_line_strp, - } - } - - fn write( - &self, - w: &mut DebugLine, - form: constants::DwForm, - encoding: Encoding, - debug_line_str_offsets: &DebugLineStrOffsets, - debug_str_offsets: &DebugStrOffsets, - ) -> Result<()> { - if form != self.form() { - return Err(Error::LineStringFormMismatch); - } - - match *self { - LineString::String(ref val) => { - if encoding.version <= 4 { - debug_assert!(!val.is_empty()); - } - w.write(val)?; - w.write_u8(0)?; - } - LineString::StringRef(val) => { - if encoding.version < 5 { - return Err(Error::NeedVersion(5)); - } - w.write_offset( - debug_str_offsets.get(val).0, - SectionId::DebugStr, - encoding.format.word_size(), - )?; - } - LineString::LineStringRef(val) => { - if encoding.version < 5 { - return Err(Error::NeedVersion(5)); - } - w.write_offset( - debug_line_str_offsets.get(val).0, - SectionId::DebugLineStr, - encoding.format.word_size(), - )?; - } - } - Ok(()) - } -} - -/// An identifier for a directory in a `LineProgram`. -/// -/// Defaults to the working directory of the compilation unit. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct DirectoryId(usize); - -// Force FileId access via the methods. -mod id { - /// An identifier for a file in a `LineProgram`. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] - pub struct FileId(usize); - - impl FileId { - /// Create a FileId given an index into `LineProgram::files`. - pub(crate) fn new(index: usize) -> Self { - FileId(index + 1) - } - - /// The index of the file in `LineProgram::files`. - pub(super) fn index(self) -> Option { - if self.0 == 0 { - None - } else { - Some(self.0 - 1) - } - } - - /// The initial state of the file register. - pub(super) fn initial_state() -> Self { - FileId(1) - } - - /// The raw value used when writing. - pub(crate) fn raw(self) -> u64 { - self.0 as u64 - } - - /// The id for file index 0 in DWARF version 5. - /// Only used when converting. - // Used for tests only. - #[allow(unused)] - pub(super) fn zero() -> Self { - FileId(0) - } - } -} -pub use self::id::*; - -/// Extra information for file in a `LineProgram`. -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] -pub struct FileInfo { - /// The implementation defined timestamp of the last modification of the file, - /// or 0 if not available. - pub timestamp: u64, - - /// The size of the file in bytes, or 0 if not available. - pub size: u64, - - /// A 16-byte MD5 digest of the file contents. - /// - /// Only used if version >= 5 and `LineProgram::file_has_md5` is `true`. - pub md5: [u8; 16], -} - -define_section!( - DebugLine, - DebugLineOffset, - "A writable `.debug_line` section." -); - -#[cfg(feature = "read")] -mod convert { - use super::*; - use crate::read::{self, Reader}; - use crate::write::{self, ConvertError, ConvertResult}; - - impl LineProgram { - /// Create a line number program by reading the data from the given program. - /// - /// Return the program and a mapping from file index to `FileId`. - pub fn from>( - mut from_program: read::IncompleteLineProgram, - dwarf: &read::Dwarf, - line_strings: &mut write::LineStringTable, - strings: &mut write::StringTable, - convert_address: &dyn Fn(u64) -> Option
, - ) -> ConvertResult<(LineProgram, Vec)> { - // Create mappings in case the source has duplicate files or directories. - let mut dirs = Vec::new(); - let mut files = Vec::new(); - - let mut program = { - let from_header = from_program.header(); - let encoding = from_header.encoding(); - - let comp_dir = match from_header.directory(0) { - Some(comp_dir) => LineString::from(comp_dir, dwarf, line_strings, strings)?, - None => LineString::new(&[][..], encoding, line_strings), - }; - - let (comp_name, comp_file_info) = match from_header.file(0) { - Some(comp_file) => { - if comp_file.directory_index() != 0 { - return Err(ConvertError::InvalidDirectoryIndex); - } - ( - LineString::from(comp_file.path_name(), dwarf, line_strings, strings)?, - Some(FileInfo { - timestamp: comp_file.timestamp(), - size: comp_file.size(), - md5: *comp_file.md5(), - }), - ) - } - None => (LineString::new(&[][..], encoding, line_strings), None), - }; - - if from_header.line_base() > 0 { - return Err(ConvertError::InvalidLineBase); - } - let mut program = LineProgram::new( - encoding, - from_header.line_encoding(), - comp_dir, - comp_name, - comp_file_info, - ); - - let file_skip; - if from_header.version() <= 4 { - // The first directory is implicit. - dirs.push(DirectoryId(0)); - // A file index of 0 is invalid for version <= 4, but putting - // something there makes the indexing easier. - file_skip = 0; - files.push(FileId::zero()); - } else { - // We don't add the first file to `files`, but still allow - // it to be referenced from converted instructions. - file_skip = 1; - files.push(FileId::zero()); - } - - for from_dir in from_header.include_directories() { - let from_dir = - LineString::from(from_dir.clone(), dwarf, line_strings, strings)?; - dirs.push(program.add_directory(from_dir)); - } - - program.file_has_timestamp = from_header.file_has_timestamp(); - program.file_has_size = from_header.file_has_size(); - program.file_has_md5 = from_header.file_has_md5(); - for from_file in from_header.file_names().iter().skip(file_skip) { - let from_name = - LineString::from(from_file.path_name(), dwarf, line_strings, strings)?; - let from_dir = from_file.directory_index(); - if from_dir >= dirs.len() as u64 { - return Err(ConvertError::InvalidDirectoryIndex); - } - let from_dir = dirs[from_dir as usize]; - let from_info = Some(FileInfo { - timestamp: from_file.timestamp(), - size: from_file.size(), - md5: *from_file.md5(), - }); - files.push(program.add_file(from_name, from_dir, from_info)); - } - - program - }; - - // We can't use the `from_program.rows()` because that wouldn't let - // us preserve address relocations. - let mut from_row = read::LineRow::new(from_program.header()); - let mut instructions = from_program.header().instructions(); - let mut address = None; - while let Some(instruction) = instructions.next_instruction(from_program.header())? { - match instruction { - read::LineInstruction::SetAddress(val) => { - if program.in_sequence() { - return Err(ConvertError::UnsupportedLineInstruction); - } - match convert_address(val) { - Some(val) => address = Some(val), - None => return Err(ConvertError::InvalidAddress), - } - from_row.execute(read::LineInstruction::SetAddress(0), &mut from_program); - } - read::LineInstruction::DefineFile(_) => { - return Err(ConvertError::UnsupportedLineInstruction); - } - _ => { - if from_row.execute(instruction, &mut from_program) { - if !program.in_sequence() { - program.begin_sequence(address); - address = None; - } - if from_row.end_sequence() { - program.end_sequence(from_row.address()); - } else { - program.row().address_offset = from_row.address(); - program.row().op_index = from_row.op_index(); - program.row().file = { - let file = from_row.file_index(); - if file >= files.len() as u64 { - return Err(ConvertError::InvalidFileIndex); - } - if file == 0 && program.version() <= 4 { - return Err(ConvertError::InvalidFileIndex); - } - files[file as usize] - }; - program.row().line = match from_row.line() { - Some(line) => line.get(), - None => 0, - }; - program.row().column = match from_row.column() { - read::ColumnType::LeftEdge => 0, - read::ColumnType::Column(val) => val.get(), - }; - program.row().discriminator = from_row.discriminator(); - program.row().is_statement = from_row.is_stmt(); - program.row().basic_block = from_row.basic_block(); - program.row().prologue_end = from_row.prologue_end(); - program.row().epilogue_begin = from_row.epilogue_begin(); - program.row().isa = from_row.isa(); - program.generate_row(); - } - from_row.reset(from_program.header()); - } - } - }; - } - Ok((program, files)) - } - } - - impl LineString { - fn from>( - from_attr: read::AttributeValue, - dwarf: &read::Dwarf, - line_strings: &mut write::LineStringTable, - strings: &mut write::StringTable, - ) -> ConvertResult { - Ok(match from_attr { - read::AttributeValue::String(r) => LineString::String(r.to_slice()?.to_vec()), - read::AttributeValue::DebugStrRef(offset) => { - let r = dwarf.debug_str.get_str(offset)?; - let id = strings.add(r.to_slice()?); - LineString::StringRef(id) - } - read::AttributeValue::DebugLineStrRef(offset) => { - let r = dwarf.debug_line_str.get_str(offset)?; - let id = line_strings.add(r.to_slice()?); - LineString::LineStringRef(id) - } - _ => return Err(ConvertError::UnsupportedLineStringForm), - }) - } - } -} - -#[cfg(test)] -#[cfg(feature = "read")] -mod tests { - use super::*; - use crate::read; - use crate::write::{DebugLineStr, DebugStr, EndianVec, StringTable}; - use crate::LittleEndian; - - #[test] - fn test_line_program_table() { - let dir1 = LineString::String(b"dir1".to_vec()); - let file1 = LineString::String(b"file1".to_vec()); - let dir2 = LineString::String(b"dir2".to_vec()); - let file2 = LineString::String(b"file2".to_vec()); - - let mut programs = Vec::new(); - for &version in &[2, 3, 4, 5] { - for &address_size in &[4, 8] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - let mut program = LineProgram::new( - encoding, - LineEncoding::default(), - dir1.clone(), - file1.clone(), - None, - ); - - { - assert_eq!(&dir1, program.get_directory(program.default_directory())); - program.file_has_timestamp = true; - program.file_has_size = true; - if encoding.version >= 5 { - program.file_has_md5 = true; - } - - let dir_id = program.add_directory(dir2.clone()); - assert_eq!(&dir2, program.get_directory(dir_id)); - assert_eq!(dir_id, program.add_directory(dir2.clone())); - - let file_info = FileInfo { - timestamp: 1, - size: 2, - md5: if encoding.version >= 5 { - [3; 16] - } else { - [0; 16] - }, - }; - let file_id = program.add_file(file2.clone(), dir_id, Some(file_info)); - assert_eq!((&file2, dir_id), program.get_file(file_id)); - assert_eq!(file_info, *program.get_file_info(file_id)); - - program.get_file_info_mut(file_id).size = 3; - assert_ne!(file_info, *program.get_file_info(file_id)); - assert_eq!(file_id, program.add_file(file2.clone(), dir_id, None)); - assert_ne!(file_info, *program.get_file_info(file_id)); - assert_eq!( - file_id, - program.add_file(file2.clone(), dir_id, Some(file_info)) - ); - assert_eq!(file_info, *program.get_file_info(file_id)); - - programs.push((program, file_id, encoding)); - } - } - } - } - - let debug_line_str_offsets = DebugLineStrOffsets::none(); - let debug_str_offsets = DebugStrOffsets::none(); - let mut debug_line = DebugLine::from(EndianVec::new(LittleEndian)); - let mut debug_line_offsets = Vec::new(); - for (program, _, encoding) in &programs { - debug_line_offsets.push( - program - .write( - &mut debug_line, - *encoding, - &debug_line_str_offsets, - &debug_str_offsets, - ) - .unwrap(), - ); - } - - let read_debug_line = read::DebugLine::new(debug_line.slice(), LittleEndian); - - let convert_address = &|address| Some(Address::Constant(address)); - for ((program, file_id, encoding), offset) in programs.iter().zip(debug_line_offsets.iter()) - { - let read_program = read_debug_line - .program( - *offset, - encoding.address_size, - Some(read::EndianSlice::new(b"dir1", LittleEndian)), - Some(read::EndianSlice::new(b"file1", LittleEndian)), - ) - .unwrap(); - - let dwarf = read::Dwarf::default(); - let mut convert_line_strings = LineStringTable::default(); - let mut convert_strings = StringTable::default(); - let (convert_program, convert_files) = LineProgram::from( - read_program, - &dwarf, - &mut convert_line_strings, - &mut convert_strings, - convert_address, - ) - .unwrap(); - assert_eq!(convert_program.version(), program.version()); - assert_eq!(convert_program.address_size(), program.address_size()); - assert_eq!(convert_program.format(), program.format()); - - let convert_file_id = convert_files[file_id.raw() as usize]; - let (file, dir) = program.get_file(*file_id); - let (convert_file, convert_dir) = convert_program.get_file(convert_file_id); - assert_eq!(file, convert_file); - assert_eq!( - program.get_directory(dir), - convert_program.get_directory(convert_dir) - ); - assert_eq!( - program.get_file_info(*file_id), - convert_program.get_file_info(convert_file_id) - ); - } - } - - #[test] - fn test_line_row() { - let dir1 = &b"dir1"[..]; - let file1 = &b"file1"[..]; - let file2 = &b"file2"[..]; - let convert_address = &|address| Some(Address::Constant(address)); - - let debug_line_str_offsets = DebugLineStrOffsets::none(); - let debug_str_offsets = DebugStrOffsets::none(); - - for &version in &[2, 3, 4, 5] { - for &address_size in &[4, 8] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - let line_base = -5; - let line_range = 14; - let neg_line_base = (-line_base) as u8; - let mut program = LineProgram::new( - encoding, - LineEncoding { - line_base, - line_range, - ..Default::default() - }, - LineString::String(dir1.to_vec()), - LineString::String(file1.to_vec()), - None, - ); - let dir_id = program.default_directory(); - program.add_file(LineString::String(file1.to_vec()), dir_id, None); - let file_id = - program.add_file(LineString::String(file2.to_vec()), dir_id, None); - - // Test sequences. - { - let mut program = program.clone(); - let address = Address::Constant(0x12); - program.begin_sequence(Some(address)); - assert_eq!( - program.instructions, - vec![LineInstruction::SetAddress(address)] - ); - } - - { - let mut program = program.clone(); - program.begin_sequence(None); - assert_eq!(program.instructions, Vec::new()); - } - - { - let mut program = program.clone(); - program.begin_sequence(None); - program.end_sequence(0x1234); - assert_eq!( - program.instructions, - vec![ - LineInstruction::AdvancePc(0x1234), - LineInstruction::EndSequence - ] - ); - } - - // Create a base program. - program.begin_sequence(None); - program.row.line = 0x1000; - program.generate_row(); - let base_row = program.row; - let base_instructions = program.instructions.clone(); - - // Create test cases. - let mut tests = Vec::new(); - - let row = base_row; - tests.push((row, vec![LineInstruction::Copy])); - - let mut row = base_row; - row.line -= u64::from(neg_line_base); - tests.push((row, vec![LineInstruction::Special(OPCODE_BASE)])); - - let mut row = base_row; - row.line += u64::from(line_range) - 1; - row.line -= u64::from(neg_line_base); - tests.push(( - row, - vec![LineInstruction::Special(OPCODE_BASE + line_range - 1)], - )); - - let mut row = base_row; - row.line += u64::from(line_range); - row.line -= u64::from(neg_line_base); - tests.push(( - row, - vec![ - LineInstruction::AdvanceLine(i64::from(line_range - neg_line_base)), - LineInstruction::Copy, - ], - )); - - let mut row = base_row; - row.address_offset = 1; - row.line -= u64::from(neg_line_base); - tests.push(( - row, - vec![LineInstruction::Special(OPCODE_BASE + line_range)], - )); - - let op_range = (255 - OPCODE_BASE) / line_range; - let mut row = base_row; - row.address_offset = u64::from(op_range); - row.line -= u64::from(neg_line_base); - tests.push(( - row, - vec![LineInstruction::Special( - OPCODE_BASE + op_range * line_range, - )], - )); - - let mut row = base_row; - row.address_offset = u64::from(op_range); - row.line += u64::from(255 - OPCODE_BASE - op_range * line_range); - row.line -= u64::from(neg_line_base); - tests.push((row, vec![LineInstruction::Special(255)])); - - let mut row = base_row; - row.address_offset = u64::from(op_range); - row.line += u64::from(255 - OPCODE_BASE - op_range * line_range) + 1; - row.line -= u64::from(neg_line_base); - tests.push(( - row, - vec![LineInstruction::ConstAddPc, LineInstruction::Copy], - )); - - let mut row = base_row; - row.address_offset = u64::from(op_range); - row.line += u64::from(255 - OPCODE_BASE - op_range * line_range) + 2; - row.line -= u64::from(neg_line_base); - tests.push(( - row, - vec![ - LineInstruction::ConstAddPc, - LineInstruction::Special(OPCODE_BASE + 6), - ], - )); - - let mut row = base_row; - row.address_offset = u64::from(op_range) * 2; - row.line += u64::from(255 - OPCODE_BASE - op_range * line_range); - row.line -= u64::from(neg_line_base); - tests.push(( - row, - vec![LineInstruction::ConstAddPc, LineInstruction::Special(255)], - )); - - let mut row = base_row; - row.address_offset = u64::from(op_range) * 2; - row.line += u64::from(255 - OPCODE_BASE - op_range * line_range) + 1; - row.line -= u64::from(neg_line_base); - tests.push(( - row, - vec![ - LineInstruction::AdvancePc(row.address_offset), - LineInstruction::Copy, - ], - )); - - let mut row = base_row; - row.address_offset = u64::from(op_range) * 2; - row.line += u64::from(255 - OPCODE_BASE - op_range * line_range) + 2; - row.line -= u64::from(neg_line_base); - tests.push(( - row, - vec![ - LineInstruction::AdvancePc(row.address_offset), - LineInstruction::Special(OPCODE_BASE + 6), - ], - )); - - let mut row = base_row; - row.address_offset = 0x1234; - tests.push(( - row, - vec![LineInstruction::AdvancePc(0x1234), LineInstruction::Copy], - )); - - let mut row = base_row; - row.line += 0x1234; - tests.push(( - row, - vec![LineInstruction::AdvanceLine(0x1234), LineInstruction::Copy], - )); - - let mut row = base_row; - row.file = file_id; - tests.push(( - row, - vec![LineInstruction::SetFile(file_id), LineInstruction::Copy], - )); - - let mut row = base_row; - row.column = 0x1234; - tests.push(( - row, - vec![LineInstruction::SetColumn(0x1234), LineInstruction::Copy], - )); - - let mut row = base_row; - row.discriminator = 0x1234; - tests.push(( - row, - vec![ - LineInstruction::SetDiscriminator(0x1234), - LineInstruction::Copy, - ], - )); - - let mut row = base_row; - row.is_statement = !row.is_statement; - tests.push(( - row, - vec![LineInstruction::NegateStatement, LineInstruction::Copy], - )); - - let mut row = base_row; - row.basic_block = true; - tests.push(( - row, - vec![LineInstruction::SetBasicBlock, LineInstruction::Copy], - )); - - let mut row = base_row; - row.prologue_end = true; - tests.push(( - row, - vec![LineInstruction::SetPrologueEnd, LineInstruction::Copy], - )); - - let mut row = base_row; - row.epilogue_begin = true; - tests.push(( - row, - vec![LineInstruction::SetEpilogueBegin, LineInstruction::Copy], - )); - - let mut row = base_row; - row.isa = 0x1234; - tests.push(( - row, - vec![LineInstruction::SetIsa(0x1234), LineInstruction::Copy], - )); - - for test in tests { - // Test generate_row(). - let mut program = program.clone(); - program.row = test.0; - program.generate_row(); - assert_eq!( - &program.instructions[base_instructions.len()..], - &test.1[..] - ); - - // Test LineProgram::from(). - let mut debug_line = DebugLine::from(EndianVec::new(LittleEndian)); - let debug_line_offset = program - .write( - &mut debug_line, - encoding, - &debug_line_str_offsets, - &debug_str_offsets, - ) - .unwrap(); - - let read_debug_line = - read::DebugLine::new(debug_line.slice(), LittleEndian); - let read_program = read_debug_line - .program( - debug_line_offset, - address_size, - Some(read::EndianSlice::new(dir1, LittleEndian)), - Some(read::EndianSlice::new(file1, LittleEndian)), - ) - .unwrap(); - - let dwarf = read::Dwarf::default(); - let mut convert_line_strings = LineStringTable::default(); - let mut convert_strings = StringTable::default(); - let (convert_program, _convert_files) = LineProgram::from( - read_program, - &dwarf, - &mut convert_line_strings, - &mut convert_strings, - convert_address, - ) - .unwrap(); - assert_eq!( - &convert_program.instructions[base_instructions.len()..], - &test.1[..] - ); - } - } - } - } - } - - #[test] - fn test_line_instruction() { - let dir1 = &b"dir1"[..]; - let file1 = &b"file1"[..]; - - let debug_line_str_offsets = DebugLineStrOffsets::none(); - let debug_str_offsets = DebugStrOffsets::none(); - - for &version in &[2, 3, 4, 5] { - for &address_size in &[4, 8] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - let mut program = LineProgram::new( - encoding, - LineEncoding::default(), - LineString::String(dir1.to_vec()), - LineString::String(file1.to_vec()), - None, - ); - let dir_id = program.default_directory(); - let file_id = - program.add_file(LineString::String(file1.to_vec()), dir_id, None); - - for &(ref inst, ref expect_inst) in &[ - ( - LineInstruction::Special(OPCODE_BASE), - read::LineInstruction::Special(OPCODE_BASE), - ), - ( - LineInstruction::Special(255), - read::LineInstruction::Special(255), - ), - (LineInstruction::Copy, read::LineInstruction::Copy), - ( - LineInstruction::AdvancePc(0x12), - read::LineInstruction::AdvancePc(0x12), - ), - ( - LineInstruction::AdvanceLine(0x12), - read::LineInstruction::AdvanceLine(0x12), - ), - ( - LineInstruction::SetFile(file_id), - read::LineInstruction::SetFile(file_id.raw()), - ), - ( - LineInstruction::SetColumn(0x12), - read::LineInstruction::SetColumn(0x12), - ), - ( - LineInstruction::NegateStatement, - read::LineInstruction::NegateStatement, - ), - ( - LineInstruction::SetBasicBlock, - read::LineInstruction::SetBasicBlock, - ), - ( - LineInstruction::ConstAddPc, - read::LineInstruction::ConstAddPc, - ), - ( - LineInstruction::SetPrologueEnd, - read::LineInstruction::SetPrologueEnd, - ), - ( - LineInstruction::SetEpilogueBegin, - read::LineInstruction::SetEpilogueBegin, - ), - ( - LineInstruction::SetIsa(0x12), - read::LineInstruction::SetIsa(0x12), - ), - ( - LineInstruction::EndSequence, - read::LineInstruction::EndSequence, - ), - ( - LineInstruction::SetAddress(Address::Constant(0x12)), - read::LineInstruction::SetAddress(0x12), - ), - ( - LineInstruction::SetDiscriminator(0x12), - read::LineInstruction::SetDiscriminator(0x12), - ), - ][..] - { - let mut program = program.clone(); - program.instructions.push(*inst); - - let mut debug_line = DebugLine::from(EndianVec::new(LittleEndian)); - let debug_line_offset = program - .write( - &mut debug_line, - encoding, - &debug_line_str_offsets, - &debug_str_offsets, - ) - .unwrap(); - - let read_debug_line = - read::DebugLine::new(debug_line.slice(), LittleEndian); - let read_program = read_debug_line - .program( - debug_line_offset, - address_size, - Some(read::EndianSlice::new(dir1, LittleEndian)), - Some(read::EndianSlice::new(file1, LittleEndian)), - ) - .unwrap(); - let read_header = read_program.header(); - let mut read_insts = read_header.instructions(); - assert_eq!( - *expect_inst, - read_insts.next_instruction(read_header).unwrap().unwrap() - ); - assert_eq!(None, read_insts.next_instruction(read_header).unwrap()); - } - } - } - } - } - - // Test that the address/line advance is correct. We don't test for optimality. - #[test] - fn test_advance() { - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 8, - }; - - let dir1 = &b"dir1"[..]; - let file1 = &b"file1"[..]; - - let addresses = 0..50; - let lines = -10..25i64; - - let debug_line_str_offsets = DebugLineStrOffsets::none(); - let debug_str_offsets = DebugStrOffsets::none(); - - for minimum_instruction_length in vec![1, 4] { - for maximum_operations_per_instruction in vec![1, 3] { - for line_base in vec![-5, 0] { - for line_range in vec![10, 20] { - let line_encoding = LineEncoding { - minimum_instruction_length, - maximum_operations_per_instruction, - line_base, - line_range, - default_is_stmt: true, - }; - let mut program = LineProgram::new( - encoding, - line_encoding, - LineString::String(dir1.to_vec()), - LineString::String(file1.to_vec()), - None, - ); - for address_advance in addresses.clone() { - program.begin_sequence(Some(Address::Constant(0x1000))); - program.row().line = 0x10000; - program.generate_row(); - for line_advance in lines.clone() { - { - let row = program.row(); - row.address_offset += - address_advance * u64::from(minimum_instruction_length); - row.line = row.line.wrapping_add(line_advance as u64); - } - program.generate_row(); - } - let address_offset = program.row().address_offset - + u64::from(minimum_instruction_length); - program.end_sequence(address_offset); - } - - let mut debug_line = DebugLine::from(EndianVec::new(LittleEndian)); - let debug_line_offset = program - .write( - &mut debug_line, - encoding, - &debug_line_str_offsets, - &debug_str_offsets, - ) - .unwrap(); - - let read_debug_line = - read::DebugLine::new(debug_line.slice(), LittleEndian); - let read_program = read_debug_line - .program( - debug_line_offset, - 8, - Some(read::EndianSlice::new(dir1, LittleEndian)), - Some(read::EndianSlice::new(file1, LittleEndian)), - ) - .unwrap(); - - let mut rows = read_program.rows(); - for address_advance in addresses.clone() { - let mut address; - let mut line; - { - let row = rows.next_row().unwrap().unwrap().1; - address = row.address(); - line = row.line().unwrap().get(); - } - assert_eq!(address, 0x1000); - assert_eq!(line, 0x10000); - for line_advance in lines.clone() { - let row = rows.next_row().unwrap().unwrap().1; - assert_eq!( - row.address() - address, - address_advance * u64::from(minimum_instruction_length) - ); - assert_eq!( - (row.line().unwrap().get() as i64) - (line as i64), - line_advance - ); - address = row.address(); - line = row.line().unwrap().get(); - } - let row = rows.next_row().unwrap().unwrap().1; - assert!(row.end_sequence()); - } - } - } - } - } - } - - #[test] - fn test_line_string() { - let version = 5; - - let file = b"file1"; - - let mut strings = StringTable::default(); - let string_id = strings.add("file2"); - let mut debug_str = DebugStr::from(EndianVec::new(LittleEndian)); - let debug_str_offsets = strings.write(&mut debug_str).unwrap(); - - let mut line_strings = LineStringTable::default(); - let line_string_id = line_strings.add("file3"); - let mut debug_line_str = DebugLineStr::from(EndianVec::new(LittleEndian)); - let debug_line_str_offsets = line_strings.write(&mut debug_line_str).unwrap(); - - for &address_size in &[4, 8] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - - for (file, expect_file) in vec![ - ( - LineString::String(file.to_vec()), - read::AttributeValue::String(read::EndianSlice::new(file, LittleEndian)), - ), - ( - LineString::StringRef(string_id), - read::AttributeValue::DebugStrRef(debug_str_offsets.get(string_id)), - ), - ( - LineString::LineStringRef(line_string_id), - read::AttributeValue::DebugLineStrRef( - debug_line_str_offsets.get(line_string_id), - ), - ), - ] { - let program = LineProgram::new( - encoding, - LineEncoding::default(), - LineString::String(b"dir".to_vec()), - file, - None, - ); - - let mut debug_line = DebugLine::from(EndianVec::new(LittleEndian)); - let debug_line_offset = program - .write( - &mut debug_line, - encoding, - &debug_line_str_offsets, - &debug_str_offsets, - ) - .unwrap(); - - let read_debug_line = read::DebugLine::new(debug_line.slice(), LittleEndian); - let read_program = read_debug_line - .program(debug_line_offset, address_size, None, None) - .unwrap(); - let read_header = read_program.header(); - assert_eq!(read_header.file(0).unwrap().path_name(), expect_file); - } - } - } - } - - #[test] - fn test_missing_comp_dir() { - let debug_line_str_offsets = DebugLineStrOffsets::none(); - let debug_str_offsets = DebugStrOffsets::none(); - - for &version in &[2, 3, 4, 5] { - for &address_size in &[4, 8] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - let program = LineProgram::new( - encoding, - LineEncoding::default(), - LineString::String(Vec::new()), - LineString::String(Vec::new()), - None, - ); - - let mut debug_line = DebugLine::from(EndianVec::new(LittleEndian)); - let debug_line_offset = program - .write( - &mut debug_line, - encoding, - &debug_line_str_offsets, - &debug_str_offsets, - ) - .unwrap(); - - let read_debug_line = read::DebugLine::new(debug_line.slice(), LittleEndian); - let read_program = read_debug_line - .program( - debug_line_offset, - address_size, - // Testing missing comp_dir/comp_name. - None, - None, - ) - .unwrap(); - - let dwarf = read::Dwarf::default(); - let mut convert_line_strings = LineStringTable::default(); - let mut convert_strings = StringTable::default(); - let convert_address = &|address| Some(Address::Constant(address)); - LineProgram::from( - read_program, - &dwarf, - &mut convert_line_strings, - &mut convert_strings, - convert_address, - ) - .unwrap(); - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/loc.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/loc.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/loc.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/loc.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,550 +0,0 @@ -use alloc::vec::Vec; -use indexmap::IndexSet; -use std::ops::{Deref, DerefMut}; - -use crate::common::{Encoding, LocationListsOffset, SectionId}; -use crate::write::{ - Address, BaseId, DebugInfoReference, Error, Expression, Result, Section, Sections, UnitOffsets, - Writer, -}; - -define_section!( - DebugLoc, - LocationListsOffset, - "A writable `.debug_loc` section." -); -define_section!( - DebugLocLists, - LocationListsOffset, - "A writable `.debug_loclists` section." -); - -define_offsets!( - LocationListOffsets: LocationListId => LocationListsOffset, - "The section offsets of a series of location lists within the `.debug_loc` or `.debug_loclists` sections." -); - -define_id!( - LocationListId, - "An identifier for a location list in a `LocationListTable`." -); - -/// A table of location lists that will be stored in a `.debug_loc` or `.debug_loclists` section. -#[derive(Debug, Default)] -pub struct LocationListTable { - base_id: BaseId, - locations: IndexSet, -} - -impl LocationListTable { - /// Add a location list to the table. - pub fn add(&mut self, loc_list: LocationList) -> LocationListId { - let (index, _) = self.locations.insert_full(loc_list); - LocationListId::new(self.base_id, index) - } - - /// Write the location list table to the appropriate section for the given DWARF version. - pub(crate) fn write( - &self, - sections: &mut Sections, - encoding: Encoding, - unit_offsets: Option<&UnitOffsets>, - ) -> Result { - if self.locations.is_empty() { - return Ok(LocationListOffsets::none()); - } - - match encoding.version { - 2..=4 => self.write_loc( - &mut sections.debug_loc, - &mut sections.debug_loc_refs, - encoding, - unit_offsets, - ), - 5 => self.write_loclists( - &mut sections.debug_loclists, - &mut sections.debug_loclists_refs, - encoding, - unit_offsets, - ), - _ => Err(Error::UnsupportedVersion(encoding.version)), - } - } - - /// Write the location list table to the `.debug_loc` section. - fn write_loc( - &self, - w: &mut DebugLoc, - refs: &mut Vec, - encoding: Encoding, - unit_offsets: Option<&UnitOffsets>, - ) -> Result { - let address_size = encoding.address_size; - let mut offsets = Vec::new(); - for loc_list in self.locations.iter() { - offsets.push(w.offset()); - for loc in &loc_list.0 { - // Note that we must ensure none of the ranges have both begin == 0 and end == 0. - // We do this by ensuring that begin != end, which is a bit more restrictive - // than required, but still seems reasonable. - match *loc { - Location::BaseAddress { address } => { - let marker = !0 >> (64 - address_size * 8); - w.write_udata(marker, address_size)?; - w.write_address(address, address_size)?; - } - Location::OffsetPair { - begin, - end, - ref data, - } => { - if begin == end { - return Err(Error::InvalidRange); - } - w.write_udata(begin, address_size)?; - w.write_udata(end, address_size)?; - write_expression(&mut w.0, refs, encoding, unit_offsets, data)?; - } - Location::StartEnd { - begin, - end, - ref data, - } => { - if begin == end { - return Err(Error::InvalidRange); - } - w.write_address(begin, address_size)?; - w.write_address(end, address_size)?; - write_expression(&mut w.0, refs, encoding, unit_offsets, data)?; - } - Location::StartLength { - begin, - length, - ref data, - } => { - let end = match begin { - Address::Constant(begin) => Address::Constant(begin + length), - Address::Symbol { symbol, addend } => Address::Symbol { - symbol, - addend: addend + length as i64, - }, - }; - if begin == end { - return Err(Error::InvalidRange); - } - w.write_address(begin, address_size)?; - w.write_address(end, address_size)?; - write_expression(&mut w.0, refs, encoding, unit_offsets, data)?; - } - Location::DefaultLocation { .. } => { - return Err(Error::InvalidRange); - } - } - } - w.write_udata(0, address_size)?; - w.write_udata(0, address_size)?; - } - Ok(LocationListOffsets { - base_id: self.base_id, - offsets, - }) - } - - /// Write the location list table to the `.debug_loclists` section. - fn write_loclists( - &self, - w: &mut DebugLocLists, - refs: &mut Vec, - encoding: Encoding, - unit_offsets: Option<&UnitOffsets>, - ) -> Result { - let mut offsets = Vec::new(); - - if encoding.version != 5 { - return Err(Error::NeedVersion(5)); - } - - let length_offset = w.write_initial_length(encoding.format)?; - let length_base = w.len(); - - w.write_u16(encoding.version)?; - w.write_u8(encoding.address_size)?; - w.write_u8(0)?; // segment_selector_size - w.write_u32(0)?; // offset_entry_count (when set to zero DW_FORM_rnglistx can't be used, see section 7.28) - // FIXME implement DW_FORM_rnglistx writing and implement the offset entry list - - for loc_list in self.locations.iter() { - offsets.push(w.offset()); - for loc in &loc_list.0 { - match *loc { - Location::BaseAddress { address } => { - w.write_u8(crate::constants::DW_LLE_base_address.0)?; - w.write_address(address, encoding.address_size)?; - } - Location::OffsetPair { - begin, - end, - ref data, - } => { - w.write_u8(crate::constants::DW_LLE_offset_pair.0)?; - w.write_uleb128(begin)?; - w.write_uleb128(end)?; - write_expression(&mut w.0, refs, encoding, unit_offsets, data)?; - } - Location::StartEnd { - begin, - end, - ref data, - } => { - w.write_u8(crate::constants::DW_LLE_start_end.0)?; - w.write_address(begin, encoding.address_size)?; - w.write_address(end, encoding.address_size)?; - write_expression(&mut w.0, refs, encoding, unit_offsets, data)?; - } - Location::StartLength { - begin, - length, - ref data, - } => { - w.write_u8(crate::constants::DW_LLE_start_length.0)?; - w.write_address(begin, encoding.address_size)?; - w.write_uleb128(length)?; - write_expression(&mut w.0, refs, encoding, unit_offsets, data)?; - } - Location::DefaultLocation { ref data } => { - w.write_u8(crate::constants::DW_LLE_default_location.0)?; - write_expression(&mut w.0, refs, encoding, unit_offsets, data)?; - } - } - } - - w.write_u8(crate::constants::DW_LLE_end_of_list.0)?; - } - - let length = (w.len() - length_base) as u64; - w.write_initial_length_at(length_offset, length, encoding.format)?; - - Ok(LocationListOffsets { - base_id: self.base_id, - offsets, - }) - } -} - -/// A locations list that will be stored in a `.debug_loc` or `.debug_loclists` section. -#[derive(Clone, Debug, Eq, PartialEq, Hash)] -pub struct LocationList(pub Vec); - -/// A single location. -#[derive(Clone, Debug, Eq, PartialEq, Hash)] -pub enum Location { - /// DW_LLE_base_address - BaseAddress { - /// Base address. - address: Address, - }, - /// DW_LLE_offset_pair - OffsetPair { - /// Start of range relative to base address. - begin: u64, - /// End of range relative to base address. - end: u64, - /// Location description. - data: Expression, - }, - /// DW_LLE_start_end - StartEnd { - /// Start of range. - begin: Address, - /// End of range. - end: Address, - /// Location description. - data: Expression, - }, - /// DW_LLE_start_length - StartLength { - /// Start of range. - begin: Address, - /// Length of range. - length: u64, - /// Location description. - data: Expression, - }, - /// DW_LLE_default_location - DefaultLocation { - /// Location description. - data: Expression, - }, -} - -fn write_expression( - w: &mut W, - refs: &mut Vec, - encoding: Encoding, - unit_offsets: Option<&UnitOffsets>, - val: &Expression, -) -> Result<()> { - let size = val.size(encoding, unit_offsets) as u64; - if encoding.version <= 4 { - w.write_udata(size, 2)?; - } else { - w.write_uleb128(size)?; - } - val.write(w, Some(refs), encoding, unit_offsets)?; - Ok(()) -} - -#[cfg(feature = "read")] -mod convert { - use super::*; - - use crate::read::{self, Reader}; - use crate::write::{ConvertError, ConvertResult, ConvertUnitContext}; - - impl LocationList { - /// Create a location list by reading the data from the give location list iter. - pub(crate) fn from>( - mut from: read::RawLocListIter, - context: &ConvertUnitContext, - ) -> ConvertResult { - let mut have_base_address = context.base_address != Address::Constant(0); - let convert_address = - |x| (context.convert_address)(x).ok_or(ConvertError::InvalidAddress); - let convert_expression = |x| { - Expression::from( - x, - context.unit.encoding(), - Some(context.dwarf), - Some(context.unit), - Some(context.entry_ids), - context.convert_address, - ) - }; - let mut loc_list = Vec::new(); - while let Some(from_loc) = from.next()? { - let loc = match from_loc { - read::RawLocListEntry::AddressOrOffsetPair { begin, end, data } => { - // These were parsed as addresses, even if they are offsets. - let begin = convert_address(begin)?; - let end = convert_address(end)?; - let data = convert_expression(data)?; - match (begin, end) { - (Address::Constant(begin_offset), Address::Constant(end_offset)) => { - if have_base_address { - Location::OffsetPair { - begin: begin_offset, - end: end_offset, - data, - } - } else { - Location::StartEnd { begin, end, data } - } - } - _ => { - if have_base_address { - // At least one of begin/end is an address, but we also have - // a base address. Adding addresses is undefined. - return Err(ConvertError::InvalidRangeRelativeAddress); - } - Location::StartEnd { begin, end, data } - } - } - } - read::RawLocListEntry::BaseAddress { addr } => { - have_base_address = true; - let address = convert_address(addr)?; - Location::BaseAddress { address } - } - read::RawLocListEntry::BaseAddressx { addr } => { - have_base_address = true; - let address = convert_address(context.dwarf.address(context.unit, addr)?)?; - Location::BaseAddress { address } - } - read::RawLocListEntry::StartxEndx { begin, end, data } => { - let begin = convert_address(context.dwarf.address(context.unit, begin)?)?; - let end = convert_address(context.dwarf.address(context.unit, end)?)?; - let data = convert_expression(data)?; - Location::StartEnd { begin, end, data } - } - read::RawLocListEntry::StartxLength { - begin, - length, - data, - } => { - let begin = convert_address(context.dwarf.address(context.unit, begin)?)?; - let data = convert_expression(data)?; - Location::StartLength { - begin, - length, - data, - } - } - read::RawLocListEntry::OffsetPair { begin, end, data } => { - let data = convert_expression(data)?; - Location::OffsetPair { begin, end, data } - } - read::RawLocListEntry::StartEnd { begin, end, data } => { - let begin = convert_address(begin)?; - let end = convert_address(end)?; - let data = convert_expression(data)?; - Location::StartEnd { begin, end, data } - } - read::RawLocListEntry::StartLength { - begin, - length, - data, - } => { - let begin = convert_address(begin)?; - let data = convert_expression(data)?; - Location::StartLength { - begin, - length, - data, - } - } - read::RawLocListEntry::DefaultLocation { data } => { - let data = convert_expression(data)?; - Location::DefaultLocation { data } - } - }; - // In some cases, existing data may contain begin == end, filtering - // these out. - match loc { - Location::StartLength { length, .. } if length == 0 => continue, - Location::StartEnd { begin, end, .. } if begin == end => continue, - Location::OffsetPair { begin, end, .. } if begin == end => continue, - _ => (), - } - loc_list.push(loc); - } - Ok(LocationList(loc_list)) - } - } -} - -#[cfg(test)] -#[cfg(feature = "read")] -mod tests { - use super::*; - use crate::common::{ - DebugAbbrevOffset, DebugAddrBase, DebugInfoOffset, DebugLocListsBase, DebugRngListsBase, - DebugStrOffsetsBase, Format, - }; - use crate::read; - use crate::write::{ - ConvertUnitContext, EndianVec, LineStringTable, RangeListTable, StringTable, - }; - use crate::LittleEndian; - use std::collections::HashMap; - use std::sync::Arc; - - #[test] - fn test_loc_list() { - let mut line_strings = LineStringTable::default(); - let mut strings = StringTable::default(); - let mut expression = Expression::new(); - expression.op_constu(0); - - for &version in &[2, 3, 4, 5] { - for &address_size in &[4, 8] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - - let mut loc_list = LocationList(vec![ - Location::StartLength { - begin: Address::Constant(6666), - length: 7777, - data: expression.clone(), - }, - Location::StartEnd { - begin: Address::Constant(4444), - end: Address::Constant(5555), - data: expression.clone(), - }, - Location::BaseAddress { - address: Address::Constant(1111), - }, - Location::OffsetPair { - begin: 2222, - end: 3333, - data: expression.clone(), - }, - ]); - if version >= 5 { - loc_list.0.push(Location::DefaultLocation { - data: expression.clone(), - }); - } - - let mut locations = LocationListTable::default(); - let loc_list_id = locations.add(loc_list.clone()); - - let mut sections = Sections::new(EndianVec::new(LittleEndian)); - let loc_list_offsets = locations.write(&mut sections, encoding, None).unwrap(); - assert!(sections.debug_loc_refs.is_empty()); - assert!(sections.debug_loclists_refs.is_empty()); - - let read_debug_loc = - read::DebugLoc::new(sections.debug_loc.slice(), LittleEndian); - let read_debug_loclists = - read::DebugLocLists::new(sections.debug_loclists.slice(), LittleEndian); - let read_loc = read::LocationLists::new(read_debug_loc, read_debug_loclists); - let offset = loc_list_offsets.get(loc_list_id); - let read_loc_list = read_loc.raw_locations(offset, encoding).unwrap(); - - let dwarf = read::Dwarf { - locations: read_loc, - ..Default::default() - }; - let unit = read::Unit { - header: read::UnitHeader::new( - encoding, - 0, - read::UnitType::Compilation, - DebugAbbrevOffset(0), - DebugInfoOffset(0).into(), - read::EndianSlice::default(), - ), - abbreviations: Arc::new(read::Abbreviations::default()), - name: None, - comp_dir: None, - low_pc: 0, - str_offsets_base: DebugStrOffsetsBase(0), - addr_base: DebugAddrBase(0), - loclists_base: DebugLocListsBase(0), - rnglists_base: DebugRngListsBase(0), - line_program: None, - dwo_id: None, - }; - let context = ConvertUnitContext { - dwarf: &dwarf, - unit: &unit, - line_strings: &mut line_strings, - strings: &mut strings, - ranges: &mut RangeListTable::default(), - locations: &mut locations, - convert_address: &|address| Some(Address::Constant(address)), - base_address: Address::Constant(0), - line_program_offset: None, - line_program_files: Vec::new(), - entry_ids: &HashMap::new(), - }; - let convert_loc_list = LocationList::from(read_loc_list, &context).unwrap(); - - if version <= 4 { - loc_list.0[0] = Location::StartEnd { - begin: Address::Constant(6666), - end: Address::Constant(6666 + 7777), - data: expression.clone(), - }; - } - assert_eq!(loc_list, convert_loc_list); - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/mod.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/mod.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,425 +0,0 @@ -//! Write DWARF debugging information. -//! -//! ## API Structure -//! -//! This module works by building up a representation of the debugging information -//! in memory, and then writing it all at once. It supports two major use cases: -//! -//! * Use the [`DwarfUnit`](./struct.DwarfUnit.html) type when writing DWARF -//! for a single compilation unit. -//! -//! * Use the [`Dwarf`](./struct.Dwarf.html) type when writing DWARF for multiple -//! compilation units. -//! -//! The module also supports reading in DWARF debugging information and writing it out -//! again, possibly after modifying it. Create a [`read::Dwarf`](../read/struct.Dwarf.html) -//! instance, and then use [`Dwarf::from`](./struct.Dwarf.html#method.from) to convert -//! it to a writable instance. -//! -//! ## Example Usage -//! -//! Write a compilation unit containing only the top level DIE. -//! -//! ```rust -//! use gimli::write::{ -//! Address, AttributeValue, DwarfUnit, EndianVec, Error, Range, RangeList, Sections, -//! }; -//! -//! fn example() -> Result<(), Error> { -//! // Choose the encoding parameters. -//! let encoding = gimli::Encoding { -//! format: gimli::Format::Dwarf32, -//! version: 5, -//! address_size: 8, -//! }; -//! // Create a container for a single compilation unit. -//! let mut dwarf = DwarfUnit::new(encoding); -//! // Set a range attribute on the root DIE. -//! let range_list = RangeList(vec![Range::StartLength { -//! begin: Address::Constant(0x100), -//! length: 42, -//! }]); -//! let range_list_id = dwarf.unit.ranges.add(range_list); -//! let root = dwarf.unit.root(); -//! dwarf.unit.get_mut(root).set( -//! gimli::DW_AT_ranges, -//! AttributeValue::RangeListRef(range_list_id), -//! ); -//! // Create a `Vec` for each DWARF section. -//! let mut sections = Sections::new(EndianVec::new(gimli::LittleEndian)); -//! // Finally, write the DWARF data to the sections. -//! dwarf.write(&mut sections)?; -//! sections.for_each(|id, data| { -//! // Here you can add the data to the output object file. -//! Ok(()) -//! }) -//! } -//! # fn main() { -//! # example().unwrap(); -//! # } - -use std::error; -use std::fmt; -use std::result; - -use crate::constants; - -mod endian_vec; -pub use self::endian_vec::*; - -mod writer; -pub use self::writer::*; - -#[macro_use] -mod section; -pub use self::section::*; - -macro_rules! define_id { - ($name:ident, $docs:expr) => { - #[doc=$docs] - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] - pub struct $name { - base_id: BaseId, - index: usize, - } - - impl $name { - #[inline] - fn new(base_id: BaseId, index: usize) -> Self { - $name { base_id, index } - } - } - }; -} - -macro_rules! define_offsets { - ($offsets:ident: $id:ident => $offset:ident, $off_doc:expr) => { - #[doc=$off_doc] - #[derive(Debug)] - pub struct $offsets { - base_id: BaseId, - // We know ids start at 0. - offsets: Vec<$offset>, - } - - impl $offsets { - /// Return an empty list of offsets. - #[inline] - pub fn none() -> Self { - $offsets { - base_id: BaseId::default(), - offsets: Vec::new(), - } - } - - /// Get the offset - /// - /// # Panics - /// - /// Panics if `id` is invalid. - #[inline] - pub fn get(&self, id: $id) -> $offset { - debug_assert_eq!(self.base_id, id.base_id); - self.offsets[id.index] - } - - /// Return the number of offsets. - #[inline] - pub fn count(&self) -> usize { - self.offsets.len() - } - } - }; -} - -mod abbrev; -pub use self::abbrev::*; - -mod cfi; -pub use self::cfi::*; - -mod dwarf; -pub use self::dwarf::*; - -mod line; -pub use self::line::*; - -mod loc; -pub use self::loc::*; - -mod op; -pub use self::op::*; - -mod range; -pub use self::range::*; - -mod str; -pub use self::str::*; - -mod unit; -pub use self::unit::*; - -/// An error that occurred when writing. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Error { - /// The given offset is out of bounds. - OffsetOutOfBounds, - /// The given length is out of bounds. - LengthOutOfBounds, - /// The attribute value is an invalid for writing. - InvalidAttributeValue, - /// The value is too large for the encoding form. - ValueTooLarge, - /// Unsupported word size. - UnsupportedWordSize(u8), - /// Unsupported DWARF version. - UnsupportedVersion(u16), - /// The unit length is too large for the requested DWARF format. - InitialLengthOverflow, - /// The address is invalid. - InvalidAddress, - /// The reference is invalid. - InvalidReference, - /// A requested feature requires a different DWARF version. - NeedVersion(u16), - /// Strings in line number program have mismatched forms. - LineStringFormMismatch, - /// The range is empty or otherwise invalid. - InvalidRange, - /// The line number program encoding is incompatible with the unit encoding. - IncompatibleLineProgramEncoding, - /// Could not encode code offset for a frame instruction. - InvalidFrameCodeOffset(u32), - /// Could not encode data offset for a frame instruction. - InvalidFrameDataOffset(i32), - /// Unsupported eh_frame pointer encoding. - UnsupportedPointerEncoding(constants::DwEhPe), - /// Unsupported reference in CFI expression. - UnsupportedCfiExpressionReference, - /// Unsupported forward reference in expression. - UnsupportedExpressionForwardReference, -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - match *self { - Error::OffsetOutOfBounds => write!(f, "The given offset is out of bounds."), - Error::LengthOutOfBounds => write!(f, "The given length is out of bounds."), - Error::InvalidAttributeValue => { - write!(f, "The attribute value is an invalid for writing.") - } - Error::ValueTooLarge => write!(f, "The value is too large for the encoding form."), - Error::UnsupportedWordSize(size) => write!(f, "Unsupported word size: {}", size), - Error::UnsupportedVersion(version) => { - write!(f, "Unsupported DWARF version: {}", version) - } - Error::InitialLengthOverflow => write!( - f, - "The unit length is too large for the requested DWARF format." - ), - Error::InvalidAddress => write!(f, "The address is invalid."), - Error::InvalidReference => write!(f, "The reference is invalid."), - Error::NeedVersion(version) => write!( - f, - "A requested feature requires a DWARF version {}.", - version - ), - Error::LineStringFormMismatch => { - write!(f, "Strings in line number program have mismatched forms.") - } - Error::InvalidRange => write!(f, "The range is empty or otherwise invalid."), - Error::IncompatibleLineProgramEncoding => write!( - f, - "The line number program encoding is incompatible with the unit encoding." - ), - Error::InvalidFrameCodeOffset(offset) => write!( - f, - "Could not encode code offset ({}) for a frame instruction.", - offset, - ), - Error::InvalidFrameDataOffset(offset) => write!( - f, - "Could not encode data offset ({}) for a frame instruction.", - offset, - ), - Error::UnsupportedPointerEncoding(eh_pe) => { - write!(f, "Unsupported eh_frame pointer encoding ({}).", eh_pe) - } - Error::UnsupportedCfiExpressionReference => { - write!(f, "Unsupported reference in CFI expression.") - } - Error::UnsupportedExpressionForwardReference => { - write!(f, "Unsupported forward reference in expression.") - } - } - } -} - -impl error::Error for Error {} - -/// The result of a write. -pub type Result = result::Result; - -/// An address. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub enum Address { - /// A fixed address that does not require relocation. - Constant(u64), - /// An address that is relative to a symbol which may be relocated. - Symbol { - /// The symbol that the address is relative to. - /// - /// The meaning of this value is decided by the writer, but - /// will typically be an index into a symbol table. - symbol: usize, - /// The offset of the address relative to the symbol. - /// - /// This will typically be used as the addend in a relocation. - addend: i64, - }, -} - -/// A reference to a `.debug_info` entry. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub enum Reference { - /// An external symbol. - /// - /// The meaning of this value is decided by the writer, but - /// will typically be an index into a symbol table. - Symbol(usize), - /// An entry in the same section. - /// - /// This only supports references in units that are emitted together. - Entry(UnitId, UnitEntryId), -} - -// This type is only used in debug assertions. -#[cfg(not(debug_assertions))] -type BaseId = (); - -#[cfg(debug_assertions)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -struct BaseId(usize); - -#[cfg(debug_assertions)] -impl Default for BaseId { - fn default() -> Self { - use std::sync::atomic; - static BASE_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(0); - BaseId(BASE_ID.fetch_add(1, atomic::Ordering::Relaxed)) - } -} - -#[cfg(feature = "read")] -mod convert { - use super::*; - use crate::read; - - pub(crate) use super::unit::convert::*; - - /// An error that occurred when converting a read value into a write value. - #[derive(Debug, Clone, Copy, PartialEq, Eq)] - pub enum ConvertError { - /// An error occurred when reading. - Read(read::Error), - /// Writing of this attribute value is not implemented yet. - UnsupportedAttributeValue, - /// This attribute value is an invalid name/form combination. - InvalidAttributeValue, - /// A `.debug_info` reference does not refer to a valid entry. - InvalidDebugInfoOffset, - /// An address could not be converted. - InvalidAddress, - /// Writing this line number instruction is not implemented yet. - UnsupportedLineInstruction, - /// Writing this form of line string is not implemented yet. - UnsupportedLineStringForm, - /// A `.debug_line` file index is invalid. - InvalidFileIndex, - /// A `.debug_line` directory index is invalid. - InvalidDirectoryIndex, - /// A `.debug_line` line base is invalid. - InvalidLineBase, - /// A `.debug_line` reference is invalid. - InvalidLineRef, - /// A `.debug_info` unit entry reference is invalid. - InvalidUnitRef, - /// A `.debug_info` reference is invalid. - InvalidDebugInfoRef, - /// Invalid relative address in a range list. - InvalidRangeRelativeAddress, - /// Writing this CFI instruction is not implemented yet. - UnsupportedCfiInstruction, - /// Writing indirect pointers is not implemented yet. - UnsupportedIndirectAddress, - /// Writing this expression operation is not implemented yet. - UnsupportedOperation, - /// Operation branch target is invalid. - InvalidBranchTarget, - /// Writing this unit type is not supported yet. - UnsupportedUnitType, - } - - impl fmt::Display for ConvertError { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - use self::ConvertError::*; - match *self { - Read(ref e) => e.fmt(f), - UnsupportedAttributeValue => { - write!(f, "Writing of this attribute value is not implemented yet.") - } - InvalidAttributeValue => write!( - f, - "This attribute value is an invalid name/form combination." - ), - InvalidDebugInfoOffset => write!( - f, - "A `.debug_info` reference does not refer to a valid entry." - ), - InvalidAddress => write!(f, "An address could not be converted."), - UnsupportedLineInstruction => write!( - f, - "Writing this line number instruction is not implemented yet." - ), - UnsupportedLineStringForm => write!( - f, - "Writing this form of line string is not implemented yet." - ), - InvalidFileIndex => write!(f, "A `.debug_line` file index is invalid."), - InvalidDirectoryIndex => write!(f, "A `.debug_line` directory index is invalid."), - InvalidLineBase => write!(f, "A `.debug_line` line base is invalid."), - InvalidLineRef => write!(f, "A `.debug_line` reference is invalid."), - InvalidUnitRef => write!(f, "A `.debug_info` unit entry reference is invalid."), - InvalidDebugInfoRef => write!(f, "A `.debug_info` reference is invalid."), - InvalidRangeRelativeAddress => { - write!(f, "Invalid relative address in a range list.") - } - UnsupportedCfiInstruction => { - write!(f, "Writing this CFI instruction is not implemented yet.") - } - UnsupportedIndirectAddress => { - write!(f, "Writing indirect pointers is not implemented yet.") - } - UnsupportedOperation => write!( - f, - "Writing this expression operation is not implemented yet." - ), - InvalidBranchTarget => write!(f, "Operation branch target is invalid."), - UnsupportedUnitType => write!(f, "Writing this unit type is not supported yet."), - } - } - } - - impl error::Error for ConvertError {} - - impl From for ConvertError { - fn from(e: read::Error) -> Self { - ConvertError::Read(e) - } - } - - /// The result of a conversion. - pub type ConvertResult = result::Result; -} -#[cfg(feature = "read")] -pub use self::convert::*; diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/op.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/op.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/op.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/op.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1618 +0,0 @@ -use alloc::boxed::Box; -use alloc::vec::Vec; - -use crate::common::{Encoding, Register}; -use crate::constants::{self, DwOp}; -use crate::leb128::write::{sleb128_size, uleb128_size}; -use crate::write::{ - Address, DebugInfoReference, Error, Reference, Result, UnitEntryId, UnitOffsets, Writer, -}; - -/// The bytecode for a DWARF expression or location description. -#[derive(Debug, Default, Clone, PartialEq, Eq, Hash)] -pub struct Expression { - operations: Vec, -} - -impl Expression { - /// Create an empty expression. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Create an expression from raw bytecode. - /// - /// This does not support operations that require references, such as `DW_OP_addr`. - #[inline] - pub fn raw(bytecode: Vec) -> Self { - Expression { - operations: vec![Operation::Raw(bytecode)], - } - } - - /// Add an operation to the expression. - /// - /// This should only be used for operations that have no explicit operands. - pub fn op(&mut self, opcode: DwOp) { - self.operations.push(Operation::Simple(opcode)); - } - - /// Add a `DW_OP_addr` operation to the expression. - pub fn op_addr(&mut self, address: Address) { - self.operations.push(Operation::Address(address)); - } - - /// Add a `DW_OP_constu` operation to the expression. - /// - /// This may be emitted as a smaller equivalent operation. - pub fn op_constu(&mut self, value: u64) { - self.operations.push(Operation::UnsignedConstant(value)); - } - - /// Add a `DW_OP_consts` operation to the expression. - /// - /// This may be emitted as a smaller equivalent operation. - pub fn op_consts(&mut self, value: i64) { - self.operations.push(Operation::SignedConstant(value)); - } - - /// Add a `DW_OP_const_type` or `DW_OP_GNU_const_type` operation to the expression. - pub fn op_const_type(&mut self, base: UnitEntryId, value: Box<[u8]>) { - self.operations.push(Operation::ConstantType(base, value)); - } - - /// Add a `DW_OP_fbreg` operation to the expression. - pub fn op_fbreg(&mut self, offset: i64) { - self.operations.push(Operation::FrameOffset(offset)); - } - - /// Add a `DW_OP_bregx` operation to the expression. - /// - /// This may be emitted as a smaller equivalent operation. - pub fn op_breg(&mut self, register: Register, offset: i64) { - self.operations - .push(Operation::RegisterOffset(register, offset)); - } - - /// Add a `DW_OP_regval_type` or `DW_OP_GNU_regval_type` operation to the expression. - /// - /// This may be emitted as a smaller equivalent operation. - pub fn op_regval_type(&mut self, register: Register, base: UnitEntryId) { - self.operations - .push(Operation::RegisterType(register, base)); - } - - /// Add a `DW_OP_pick` operation to the expression. - /// - /// This may be emitted as a `DW_OP_dup` or `DW_OP_over` operation. - pub fn op_pick(&mut self, index: u8) { - self.operations.push(Operation::Pick(index)); - } - - /// Add a `DW_OP_deref` operation to the expression. - pub fn op_deref(&mut self) { - self.operations.push(Operation::Deref { space: false }); - } - - /// Add a `DW_OP_xderef` operation to the expression. - pub fn op_xderef(&mut self) { - self.operations.push(Operation::Deref { space: true }); - } - - /// Add a `DW_OP_deref_size` operation to the expression. - pub fn op_deref_size(&mut self, size: u8) { - self.operations - .push(Operation::DerefSize { size, space: false }); - } - - /// Add a `DW_OP_xderef_size` operation to the expression. - pub fn op_xderef_size(&mut self, size: u8) { - self.operations - .push(Operation::DerefSize { size, space: true }); - } - - /// Add a `DW_OP_deref_type` or `DW_OP_GNU_deref_type` operation to the expression. - pub fn op_deref_type(&mut self, size: u8, base: UnitEntryId) { - self.operations.push(Operation::DerefType { - size, - base, - space: false, - }); - } - - /// Add a `DW_OP_xderef_type` operation to the expression. - pub fn op_xderef_type(&mut self, size: u8, base: UnitEntryId) { - self.operations.push(Operation::DerefType { - size, - base, - space: true, - }); - } - - /// Add a `DW_OP_plus_uconst` operation to the expression. - pub fn op_plus_uconst(&mut self, value: u64) { - self.operations.push(Operation::PlusConstant(value)); - } - - /// Add a `DW_OP_skip` operation to the expression. - /// - /// Returns the index of the operation. The caller must call `set_target` with - /// this index to set the target of the branch. - pub fn op_skip(&mut self) -> usize { - let index = self.next_index(); - self.operations.push(Operation::Skip(!0)); - index - } - - /// Add a `DW_OP_bra` operation to the expression. - /// - /// Returns the index of the operation. The caller must call `set_target` with - /// this index to set the target of the branch. - pub fn op_bra(&mut self) -> usize { - let index = self.next_index(); - self.operations.push(Operation::Branch(!0)); - index - } - - /// Return the index that will be assigned to the next operation. - /// - /// This can be passed to `set_target`. - #[inline] - pub fn next_index(&self) -> usize { - self.operations.len() - } - - /// Set the target of a `DW_OP_skip` or `DW_OP_bra` operation . - pub fn set_target(&mut self, operation: usize, new_target: usize) { - debug_assert!(new_target <= self.next_index()); - debug_assert_ne!(operation, new_target); - match self.operations[operation] { - Operation::Skip(ref mut target) | Operation::Branch(ref mut target) => { - *target = new_target; - } - _ => unimplemented!(), - } - } - - /// Add a `DW_OP_call4` operation to the expression. - pub fn op_call(&mut self, entry: UnitEntryId) { - self.operations.push(Operation::Call(entry)); - } - - /// Add a `DW_OP_call_ref` operation to the expression. - pub fn op_call_ref(&mut self, entry: Reference) { - self.operations.push(Operation::CallRef(entry)); - } - - /// Add a `DW_OP_convert` or `DW_OP_GNU_convert` operation to the expression. - /// - /// `base` is the DIE of the base type, or `None` for the generic type. - pub fn op_convert(&mut self, base: Option) { - self.operations.push(Operation::Convert(base)); - } - - /// Add a `DW_OP_reinterpret` or `DW_OP_GNU_reinterpret` operation to the expression. - /// - /// `base` is the DIE of the base type, or `None` for the generic type. - pub fn op_reinterpret(&mut self, base: Option) { - self.operations.push(Operation::Reinterpret(base)); - } - - /// Add a `DW_OP_entry_value` or `DW_OP_GNU_entry_value` operation to the expression. - pub fn op_entry_value(&mut self, expression: Expression) { - self.operations.push(Operation::EntryValue(expression)); - } - - /// Add a `DW_OP_regx` operation to the expression. - /// - /// This may be emitted as a smaller equivalent operation. - pub fn op_reg(&mut self, register: Register) { - self.operations.push(Operation::Register(register)); - } - - /// Add a `DW_OP_implicit_value` operation to the expression. - pub fn op_implicit_value(&mut self, data: Box<[u8]>) { - self.operations.push(Operation::ImplicitValue(data)); - } - - /// Add a `DW_OP_implicit_pointer` or `DW_OP_GNU_implicit_pointer` operation to the expression. - pub fn op_implicit_pointer(&mut self, entry: Reference, byte_offset: i64) { - self.operations - .push(Operation::ImplicitPointer { entry, byte_offset }); - } - - /// Add a `DW_OP_piece` operation to the expression. - pub fn op_piece(&mut self, size_in_bytes: u64) { - self.operations.push(Operation::Piece { size_in_bytes }); - } - - /// Add a `DW_OP_bit_piece` operation to the expression. - pub fn op_bit_piece(&mut self, size_in_bits: u64, bit_offset: u64) { - self.operations.push(Operation::BitPiece { - size_in_bits, - bit_offset, - }); - } - - /// Add a `DW_OP_GNU_parameter_ref` operation to the expression. - pub fn op_gnu_parameter_ref(&mut self, entry: UnitEntryId) { - self.operations.push(Operation::ParameterRef(entry)); - } - - /// Add a `DW_OP_WASM_location 0x0` operation to the expression. - pub fn op_wasm_local(&mut self, index: u32) { - self.operations.push(Operation::WasmLocal(index)); - } - - /// Add a `DW_OP_WASM_location 0x1` operation to the expression. - pub fn op_wasm_global(&mut self, index: u32) { - self.operations.push(Operation::WasmGlobal(index)); - } - - /// Add a `DW_OP_WASM_location 0x2` operation to the expression. - pub fn op_wasm_stack(&mut self, index: u32) { - self.operations.push(Operation::WasmStack(index)); - } - - pub(crate) fn size(&self, encoding: Encoding, unit_offsets: Option<&UnitOffsets>) -> usize { - let mut size = 0; - for operation in &self.operations { - size += operation.size(encoding, unit_offsets); - } - size - } - - pub(crate) fn write( - &self, - w: &mut W, - mut refs: Option<&mut Vec>, - encoding: Encoding, - unit_offsets: Option<&UnitOffsets>, - ) -> Result<()> { - // TODO: only calculate offsets if needed? - let mut offsets = Vec::with_capacity(self.operations.len()); - let mut offset = w.len(); - for operation in &self.operations { - offsets.push(offset); - offset += operation.size(encoding, unit_offsets); - } - offsets.push(offset); - for (operation, offset) in self.operations.iter().zip(offsets.iter().copied()) { - debug_assert_eq!(w.len(), offset); - operation.write(w, refs.as_deref_mut(), encoding, unit_offsets, &offsets)?; - } - Ok(()) - } -} - -/// A single DWARF operation. -// -// This type is intentionally not public so that we can change the -// representation of expressions as needed. -// -// Variants are listed in the order they appear in Section 2.5. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -enum Operation { - /// Raw bytecode. - /// - /// Does not support references. - Raw(Vec), - /// An operation that has no explicit operands. - /// - /// Represents: - /// - `DW_OP_drop`, `DW_OP_swap`, `DW_OP_rot` - /// - `DW_OP_push_object_address`, `DW_OP_form_tls_address`, `DW_OP_call_frame_cfa` - /// - `DW_OP_abs`, `DW_OP_and`, `DW_OP_div`, `DW_OP_minus`, `DW_OP_mod`, `DW_OP_mul`, - /// `DW_OP_neg`, `DW_OP_not`, `DW_OP_or`, `DW_OP_plus`, `DW_OP_shl`, `DW_OP_shr`, - /// `DW_OP_shra`, `DW_OP_xor` - /// - `DW_OP_le`, `DW_OP_ge`, `DW_OP_eq`, `DW_OP_lt`, `DW_OP_gt`, `DW_OP_ne` - /// - `DW_OP_nop` - /// - `DW_OP_stack_value` - Simple(DwOp), - /// Relocate the address if needed, and push it on the stack. - /// - /// Represents `DW_OP_addr`. - Address(Address), - /// Push an unsigned constant value on the stack. - /// - /// Represents `DW_OP_constu`. - UnsignedConstant(u64), - /// Push a signed constant value on the stack. - /// - /// Represents `DW_OP_consts`. - SignedConstant(i64), - /* TODO: requires .debug_addr write support - /// Read the address at the given index in `.debug_addr, relocate the address if needed, - /// and push it on the stack. - /// - /// Represents `DW_OP_addrx`. - AddressIndex(DebugAddrIndex), - /// Read the address at the given index in `.debug_addr, and push it on the stack. - /// Do not relocate the address. - /// - /// Represents `DW_OP_constx`. - ConstantIndex(DebugAddrIndex), - */ - /// Interpret the value bytes as a constant of a given type, and push it on the stack. - /// - /// Represents `DW_OP_const_type`. - ConstantType(UnitEntryId, Box<[u8]>), - /// Compute the frame base (using `DW_AT_frame_base`), add the - /// given offset, and then push the resulting sum on the stack. - /// - /// Represents `DW_OP_fbreg`. - FrameOffset(i64), - /// Find the contents of the given register, add the offset, and then - /// push the resulting sum on the stack. - /// - /// Represents `DW_OP_bregx`. - RegisterOffset(Register, i64), - /// Interpret the contents of the given register as a value of the given type, - /// and push it on the stack. - /// - /// Represents `DW_OP_regval_type`. - RegisterType(Register, UnitEntryId), - /// Copy the item at a stack index and push it on top of the stack. - /// - /// Represents `DW_OP_pick`, `DW_OP_dup`, and `DW_OP_over`. - Pick(u8), - /// Pop the topmost value of the stack, dereference it, and push the - /// resulting value. - /// - /// Represents `DW_OP_deref` and `DW_OP_xderef`. - Deref { - /// True if the dereference operation takes an address space - /// argument from the stack; false otherwise. - space: bool, - }, - /// Pop the topmost value of the stack, dereference it to obtain a value - /// of the given size, and push the resulting value. - /// - /// Represents `DW_OP_deref_size` and `DW_OP_xderef_size`. - DerefSize { - /// True if the dereference operation takes an address space - /// argument from the stack; false otherwise. - space: bool, - /// The size of the data to dereference. - size: u8, - }, - /// Pop the topmost value of the stack, dereference it to obtain a value - /// of the given type, and push the resulting value. - /// - /// Represents `DW_OP_deref_type` and `DW_OP_xderef_type`. - DerefType { - /// True if the dereference operation takes an address space - /// argument from the stack; false otherwise. - space: bool, - /// The size of the data to dereference. - size: u8, - /// The DIE of the base type, or `None` for the generic type. - base: UnitEntryId, - }, - /// Add an unsigned constant to the topmost value on the stack. - /// - /// Represents `DW_OP_plus_uconst`. - PlusConstant(u64), - /// Unconditional branch to the target location. - /// - /// The value is the index within the expression of the operation to branch to. - /// This will be converted to a relative offset when writing. - /// - /// Represents `DW_OP_skip`. - Skip(usize), - /// Branch to the target location if the top of stack is nonzero. - /// - /// The value is the index within the expression of the operation to branch to. - /// This will be converted to a relative offset when writing. - /// - /// Represents `DW_OP_bra`. - Branch(usize), - /// Evaluate a DWARF expression as a subroutine. - /// - /// The expression comes from the `DW_AT_location` attribute of the indicated DIE. - /// - /// Represents `DW_OP_call4`. - Call(UnitEntryId), - /// Evaluate an external DWARF expression as a subroutine. - /// - /// The expression comes from the `DW_AT_location` attribute of the indicated DIE, - /// which may be in another compilation unit or shared object. - /// - /// Represents `DW_OP_call_ref`. - CallRef(Reference), - /// Pop the top stack entry, convert it to a different type, and push it on the stack. - /// - /// Represents `DW_OP_convert`. - Convert(Option), - /// Pop the top stack entry, reinterpret the bits in its value as a different type, - /// and push it on the stack. - /// - /// Represents `DW_OP_reinterpret`. - Reinterpret(Option), - /// Evaluate an expression at the entry to the current subprogram, and push it on the stack. - /// - /// Represents `DW_OP_entry_value`. - EntryValue(Expression), - // FIXME: EntryRegister - /// Indicate that this piece's location is in the given register. - /// - /// Completes the piece or expression. - /// - /// Represents `DW_OP_regx`. - Register(Register), - /// The object has no location, but has a known constant value. - /// - /// Completes the piece or expression. - /// - /// Represents `DW_OP_implicit_value`. - ImplicitValue(Box<[u8]>), - /// The object is a pointer to a value which has no actual location, such as - /// an implicit value or a stack value. - /// - /// Completes the piece or expression. - /// - /// Represents `DW_OP_implicit_pointer`. - ImplicitPointer { - /// The DIE of the value that this is an implicit pointer into. - entry: Reference, - /// The byte offset into the value that the implicit pointer points to. - byte_offset: i64, - }, - /// Terminate a piece. - /// - /// Represents `DW_OP_piece`. - Piece { - /// The size of this piece in bytes. - size_in_bytes: u64, - }, - /// Terminate a piece with a size in bits. - /// - /// Represents `DW_OP_bit_piece`. - BitPiece { - /// The size of this piece in bits. - size_in_bits: u64, - /// The bit offset of this piece. - bit_offset: u64, - }, - /// This represents a parameter that was optimized out. - /// - /// The entry is the definition of the parameter, and is matched to - /// the `DW_TAG_GNU_call_site_parameter` in the caller that also - /// points to the same definition of the parameter. - /// - /// Represents `DW_OP_GNU_parameter_ref`. - ParameterRef(UnitEntryId), - /// The index of a local in the currently executing function. - /// - /// Represents `DW_OP_WASM_location 0x00`. - WasmLocal(u32), - /// The index of a global. - /// - /// Represents `DW_OP_WASM_location 0x01`. - WasmGlobal(u32), - /// The index of an item on the operand stack. - /// - /// Represents `DW_OP_WASM_location 0x02`. - WasmStack(u32), -} - -impl Operation { - fn size(&self, encoding: Encoding, unit_offsets: Option<&UnitOffsets>) -> usize { - let base_size = |base| { - // Errors are handled during writes. - match unit_offsets { - Some(offsets) => uleb128_size(offsets.unit_offset(base)), - None => 0, - } - }; - 1 + match *self { - Operation::Raw(ref bytecode) => return bytecode.len(), - Operation::Simple(_) => 0, - Operation::Address(_) => encoding.address_size as usize, - Operation::UnsignedConstant(value) => { - if value < 32 { - 0 - } else { - uleb128_size(value) - } - } - Operation::SignedConstant(value) => sleb128_size(value), - Operation::ConstantType(base, ref value) => base_size(base) + 1 + value.len(), - Operation::FrameOffset(offset) => sleb128_size(offset), - Operation::RegisterOffset(register, offset) => { - if register.0 < 32 { - sleb128_size(offset) - } else { - uleb128_size(register.0.into()) + sleb128_size(offset) - } - } - Operation::RegisterType(register, base) => { - uleb128_size(register.0.into()) + base_size(base) - } - Operation::Pick(index) => { - if index > 1 { - 1 - } else { - 0 - } - } - Operation::Deref { .. } => 0, - Operation::DerefSize { .. } => 1, - Operation::DerefType { base, .. } => 1 + base_size(base), - Operation::PlusConstant(value) => uleb128_size(value), - Operation::Skip(_) => 2, - Operation::Branch(_) => 2, - Operation::Call(_) => 4, - Operation::CallRef(_) => encoding.format.word_size() as usize, - Operation::Convert(base) => match base { - Some(base) => base_size(base), - None => 1, - }, - Operation::Reinterpret(base) => match base { - Some(base) => base_size(base), - None => 1, - }, - Operation::EntryValue(ref expression) => { - let length = expression.size(encoding, unit_offsets); - uleb128_size(length as u64) + length - } - Operation::Register(register) => { - if register.0 < 32 { - 0 - } else { - uleb128_size(register.0.into()) - } - } - Operation::ImplicitValue(ref data) => uleb128_size(data.len() as u64) + data.len(), - Operation::ImplicitPointer { byte_offset, .. } => { - encoding.format.word_size() as usize + sleb128_size(byte_offset) - } - Operation::Piece { size_in_bytes } => uleb128_size(size_in_bytes), - Operation::BitPiece { - size_in_bits, - bit_offset, - } => uleb128_size(size_in_bits) + uleb128_size(bit_offset), - Operation::ParameterRef(_) => 4, - Operation::WasmLocal(index) - | Operation::WasmGlobal(index) - | Operation::WasmStack(index) => 1 + uleb128_size(index.into()), - } - } - - pub(crate) fn write( - &self, - w: &mut W, - refs: Option<&mut Vec>, - encoding: Encoding, - unit_offsets: Option<&UnitOffsets>, - offsets: &[usize], - ) -> Result<()> { - let entry_offset = |entry| match unit_offsets { - Some(offsets) => { - let offset = offsets.unit_offset(entry); - if offset == 0 { - Err(Error::UnsupportedExpressionForwardReference) - } else { - Ok(offset) - } - } - None => Err(Error::UnsupportedCfiExpressionReference), - }; - match *self { - Operation::Raw(ref bytecode) => w.write(bytecode)?, - Operation::Simple(opcode) => w.write_u8(opcode.0)?, - Operation::Address(address) => { - w.write_u8(constants::DW_OP_addr.0)?; - w.write_address(address, encoding.address_size)?; - } - Operation::UnsignedConstant(value) => { - if value < 32 { - w.write_u8(constants::DW_OP_lit0.0 + value as u8)?; - } else { - w.write_u8(constants::DW_OP_constu.0)?; - w.write_uleb128(value)?; - } - } - Operation::SignedConstant(value) => { - w.write_u8(constants::DW_OP_consts.0)?; - w.write_sleb128(value)?; - } - Operation::ConstantType(base, ref value) => { - if encoding.version >= 5 { - w.write_u8(constants::DW_OP_const_type.0)?; - } else { - w.write_u8(constants::DW_OP_GNU_const_type.0)?; - } - w.write_uleb128(entry_offset(base)?)?; - w.write_udata(value.len() as u64, 1)?; - w.write(value)?; - } - Operation::FrameOffset(offset) => { - w.write_u8(constants::DW_OP_fbreg.0)?; - w.write_sleb128(offset)?; - } - Operation::RegisterOffset(register, offset) => { - if register.0 < 32 { - w.write_u8(constants::DW_OP_breg0.0 + register.0 as u8)?; - } else { - w.write_u8(constants::DW_OP_bregx.0)?; - w.write_uleb128(register.0.into())?; - } - w.write_sleb128(offset)?; - } - Operation::RegisterType(register, base) => { - if encoding.version >= 5 { - w.write_u8(constants::DW_OP_regval_type.0)?; - } else { - w.write_u8(constants::DW_OP_GNU_regval_type.0)?; - } - w.write_uleb128(register.0.into())?; - w.write_uleb128(entry_offset(base)?)?; - } - Operation::Pick(index) => match index { - 0 => w.write_u8(constants::DW_OP_dup.0)?, - 1 => w.write_u8(constants::DW_OP_over.0)?, - _ => { - w.write_u8(constants::DW_OP_pick.0)?; - w.write_u8(index)?; - } - }, - Operation::Deref { space } => { - if space { - w.write_u8(constants::DW_OP_xderef.0)?; - } else { - w.write_u8(constants::DW_OP_deref.0)?; - } - } - Operation::DerefSize { space, size } => { - if space { - w.write_u8(constants::DW_OP_xderef_size.0)?; - } else { - w.write_u8(constants::DW_OP_deref_size.0)?; - } - w.write_u8(size)?; - } - Operation::DerefType { space, size, base } => { - if space { - w.write_u8(constants::DW_OP_xderef_type.0)?; - } else { - if encoding.version >= 5 { - w.write_u8(constants::DW_OP_deref_type.0)?; - } else { - w.write_u8(constants::DW_OP_GNU_deref_type.0)?; - } - } - w.write_u8(size)?; - w.write_uleb128(entry_offset(base)?)?; - } - Operation::PlusConstant(value) => { - w.write_u8(constants::DW_OP_plus_uconst.0)?; - w.write_uleb128(value)?; - } - Operation::Skip(target) => { - w.write_u8(constants::DW_OP_skip.0)?; - let offset = offsets[target] as i64 - (w.len() as i64 + 2); - w.write_sdata(offset, 2)?; - } - Operation::Branch(target) => { - w.write_u8(constants::DW_OP_bra.0)?; - let offset = offsets[target] as i64 - (w.len() as i64 + 2); - w.write_sdata(offset, 2)?; - } - Operation::Call(entry) => { - w.write_u8(constants::DW_OP_call4.0)?; - // TODO: this probably won't work in practice, because we may - // only know the offsets of base type DIEs at this point. - w.write_udata(entry_offset(entry)?, 4)?; - } - Operation::CallRef(entry) => { - w.write_u8(constants::DW_OP_call_ref.0)?; - let size = encoding.format.word_size(); - match entry { - Reference::Symbol(symbol) => w.write_reference(symbol, size)?, - Reference::Entry(unit, entry) => { - let refs = refs.ok_or(Error::InvalidReference)?; - refs.push(DebugInfoReference { - offset: w.len(), - unit, - entry, - size, - }); - w.write_udata(0, size)?; - } - } - } - Operation::Convert(base) => { - if encoding.version >= 5 { - w.write_u8(constants::DW_OP_convert.0)?; - } else { - w.write_u8(constants::DW_OP_GNU_convert.0)?; - } - match base { - Some(base) => w.write_uleb128(entry_offset(base)?)?, - None => w.write_u8(0)?, - } - } - Operation::Reinterpret(base) => { - if encoding.version >= 5 { - w.write_u8(constants::DW_OP_reinterpret.0)?; - } else { - w.write_u8(constants::DW_OP_GNU_reinterpret.0)?; - } - match base { - Some(base) => w.write_uleb128(entry_offset(base)?)?, - None => w.write_u8(0)?, - } - } - Operation::EntryValue(ref expression) => { - if encoding.version >= 5 { - w.write_u8(constants::DW_OP_entry_value.0)?; - } else { - w.write_u8(constants::DW_OP_GNU_entry_value.0)?; - } - let length = expression.size(encoding, unit_offsets); - w.write_uleb128(length as u64)?; - expression.write(w, refs, encoding, unit_offsets)?; - } - Operation::Register(register) => { - if register.0 < 32 { - w.write_u8(constants::DW_OP_reg0.0 + register.0 as u8)?; - } else { - w.write_u8(constants::DW_OP_regx.0)?; - w.write_uleb128(register.0.into())?; - } - } - Operation::ImplicitValue(ref data) => { - w.write_u8(constants::DW_OP_implicit_value.0)?; - w.write_uleb128(data.len() as u64)?; - w.write(data)?; - } - Operation::ImplicitPointer { entry, byte_offset } => { - if encoding.version >= 5 { - w.write_u8(constants::DW_OP_implicit_pointer.0)?; - } else { - w.write_u8(constants::DW_OP_GNU_implicit_pointer.0)?; - } - let size = if encoding.version == 2 { - encoding.address_size - } else { - encoding.format.word_size() - }; - match entry { - Reference::Symbol(symbol) => { - w.write_reference(symbol, size)?; - } - Reference::Entry(unit, entry) => { - let refs = refs.ok_or(Error::InvalidReference)?; - refs.push(DebugInfoReference { - offset: w.len(), - unit, - entry, - size, - }); - w.write_udata(0, size)?; - } - } - w.write_sleb128(byte_offset)?; - } - Operation::Piece { size_in_bytes } => { - w.write_u8(constants::DW_OP_piece.0)?; - w.write_uleb128(size_in_bytes)?; - } - Operation::BitPiece { - size_in_bits, - bit_offset, - } => { - w.write_u8(constants::DW_OP_bit_piece.0)?; - w.write_uleb128(size_in_bits)?; - w.write_uleb128(bit_offset)?; - } - Operation::ParameterRef(entry) => { - w.write_u8(constants::DW_OP_GNU_parameter_ref.0)?; - w.write_udata(entry_offset(entry)?, 4)?; - } - Operation::WasmLocal(index) => { - w.write(&[constants::DW_OP_WASM_location.0, 0])?; - w.write_uleb128(index.into())?; - } - Operation::WasmGlobal(index) => { - w.write(&[constants::DW_OP_WASM_location.0, 1])?; - w.write_uleb128(index.into())?; - } - Operation::WasmStack(index) => { - w.write(&[constants::DW_OP_WASM_location.0, 2])?; - w.write_uleb128(index.into())?; - } - } - Ok(()) - } -} - -#[cfg(feature = "read")] -pub(crate) mod convert { - use super::*; - use crate::common::UnitSectionOffset; - use crate::read::{self, Reader}; - use crate::write::{ConvertError, ConvertResult, UnitEntryId, UnitId}; - use std::collections::HashMap; - - impl Expression { - /// Create an expression from the input expression. - pub fn from>( - from_expression: read::Expression, - encoding: Encoding, - dwarf: Option<&read::Dwarf>, - unit: Option<&read::Unit>, - entry_ids: Option<&HashMap>, - convert_address: &dyn Fn(u64) -> Option
, - ) -> ConvertResult { - let convert_unit_offset = |offset: read::UnitOffset| -> ConvertResult<_> { - let entry_ids = entry_ids.ok_or(ConvertError::UnsupportedOperation)?; - let unit = unit.ok_or(ConvertError::UnsupportedOperation)?; - let id = entry_ids - .get(&offset.to_unit_section_offset(unit)) - .ok_or(ConvertError::InvalidUnitRef)?; - Ok(id.1) - }; - let convert_debug_info_offset = |offset| -> ConvertResult<_> { - // TODO: support relocations - let entry_ids = entry_ids.ok_or(ConvertError::UnsupportedOperation)?; - let id = entry_ids - .get(&UnitSectionOffset::DebugInfoOffset(offset)) - .ok_or(ConvertError::InvalidDebugInfoRef)?; - Ok(Reference::Entry(id.0, id.1)) - }; - - // Calculate offsets for use in branch/skip operations. - let mut offsets = Vec::new(); - let mut offset = 0; - let mut from_operations = from_expression.clone().operations(encoding); - while from_operations.next()?.is_some() { - offsets.push(offset); - offset = from_operations.offset_from(&from_expression); - } - offsets.push(from_expression.0.len()); - - let mut from_operations = from_expression.clone().operations(encoding); - let mut operations = Vec::new(); - while let Some(from_operation) = from_operations.next()? { - let operation = match from_operation { - read::Operation::Deref { - base_type, - size, - space, - } => { - if base_type.0 != 0 { - let base = convert_unit_offset(base_type)?; - Operation::DerefType { space, size, base } - } else if size != encoding.address_size { - Operation::DerefSize { space, size } - } else { - Operation::Deref { space } - } - } - read::Operation::Drop => Operation::Simple(constants::DW_OP_drop), - read::Operation::Pick { index } => Operation::Pick(index), - read::Operation::Swap => Operation::Simple(constants::DW_OP_swap), - read::Operation::Rot => Operation::Simple(constants::DW_OP_rot), - read::Operation::Abs => Operation::Simple(constants::DW_OP_abs), - read::Operation::And => Operation::Simple(constants::DW_OP_and), - read::Operation::Div => Operation::Simple(constants::DW_OP_div), - read::Operation::Minus => Operation::Simple(constants::DW_OP_minus), - read::Operation::Mod => Operation::Simple(constants::DW_OP_mod), - read::Operation::Mul => Operation::Simple(constants::DW_OP_mul), - read::Operation::Neg => Operation::Simple(constants::DW_OP_neg), - read::Operation::Not => Operation::Simple(constants::DW_OP_not), - read::Operation::Or => Operation::Simple(constants::DW_OP_or), - read::Operation::Plus => Operation::Simple(constants::DW_OP_plus), - read::Operation::PlusConstant { value } => Operation::PlusConstant(value), - read::Operation::Shl => Operation::Simple(constants::DW_OP_shl), - read::Operation::Shr => Operation::Simple(constants::DW_OP_shr), - read::Operation::Shra => Operation::Simple(constants::DW_OP_shra), - read::Operation::Xor => Operation::Simple(constants::DW_OP_xor), - read::Operation::Eq => Operation::Simple(constants::DW_OP_eq), - read::Operation::Ge => Operation::Simple(constants::DW_OP_ge), - read::Operation::Gt => Operation::Simple(constants::DW_OP_gt), - read::Operation::Le => Operation::Simple(constants::DW_OP_le), - read::Operation::Lt => Operation::Simple(constants::DW_OP_lt), - read::Operation::Ne => Operation::Simple(constants::DW_OP_ne), - read::Operation::Bra { target } => { - let offset = from_operations - .offset_from(&from_expression) - .wrapping_add(i64::from(target) as usize); - let index = offsets - .binary_search(&offset) - .map_err(|_| ConvertError::InvalidBranchTarget)?; - Operation::Branch(index) - } - read::Operation::Skip { target } => { - let offset = from_operations - .offset_from(&from_expression) - .wrapping_add(i64::from(target) as usize); - let index = offsets - .binary_search(&offset) - .map_err(|_| ConvertError::InvalidBranchTarget)?; - Operation::Skip(index) - } - read::Operation::UnsignedConstant { value } => { - Operation::UnsignedConstant(value) - } - read::Operation::SignedConstant { value } => Operation::SignedConstant(value), - read::Operation::Register { register } => Operation::Register(register), - read::Operation::RegisterOffset { - register, - offset, - base_type, - } => { - if base_type.0 != 0 { - Operation::RegisterType(register, convert_unit_offset(base_type)?) - } else { - Operation::RegisterOffset(register, offset) - } - } - read::Operation::FrameOffset { offset } => Operation::FrameOffset(offset), - read::Operation::Nop => Operation::Simple(constants::DW_OP_nop), - read::Operation::PushObjectAddress => { - Operation::Simple(constants::DW_OP_push_object_address) - } - read::Operation::Call { offset } => match offset { - read::DieReference::UnitRef(offset) => { - Operation::Call(convert_unit_offset(offset)?) - } - read::DieReference::DebugInfoRef(offset) => { - Operation::CallRef(convert_debug_info_offset(offset)?) - } - }, - read::Operation::TLS => Operation::Simple(constants::DW_OP_form_tls_address), - read::Operation::CallFrameCFA => { - Operation::Simple(constants::DW_OP_call_frame_cfa) - } - read::Operation::Piece { - size_in_bits, - bit_offset: None, - } => Operation::Piece { - size_in_bytes: size_in_bits / 8, - }, - read::Operation::Piece { - size_in_bits, - bit_offset: Some(bit_offset), - } => Operation::BitPiece { - size_in_bits, - bit_offset, - }, - read::Operation::ImplicitValue { data } => { - Operation::ImplicitValue(data.to_slice()?.into_owned().into()) - } - read::Operation::StackValue => Operation::Simple(constants::DW_OP_stack_value), - read::Operation::ImplicitPointer { value, byte_offset } => { - let entry = convert_debug_info_offset(value)?; - Operation::ImplicitPointer { entry, byte_offset } - } - read::Operation::EntryValue { expression } => { - let expression = Expression::from( - read::Expression(expression), - encoding, - dwarf, - unit, - entry_ids, - convert_address, - )?; - Operation::EntryValue(expression) - } - read::Operation::ParameterRef { offset } => { - let entry = convert_unit_offset(offset)?; - Operation::ParameterRef(entry) - } - read::Operation::Address { address } => { - let address = - convert_address(address).ok_or(ConvertError::InvalidAddress)?; - Operation::Address(address) - } - read::Operation::AddressIndex { index } => { - let dwarf = dwarf.ok_or(ConvertError::UnsupportedOperation)?; - let unit = unit.ok_or(ConvertError::UnsupportedOperation)?; - let val = dwarf.address(unit, index)?; - let address = convert_address(val).ok_or(ConvertError::InvalidAddress)?; - Operation::Address(address) - } - read::Operation::ConstantIndex { index } => { - let dwarf = dwarf.ok_or(ConvertError::UnsupportedOperation)?; - let unit = unit.ok_or(ConvertError::UnsupportedOperation)?; - let val = dwarf.address(unit, index)?; - Operation::UnsignedConstant(val) - } - read::Operation::TypedLiteral { base_type, value } => { - let entry = convert_unit_offset(base_type)?; - Operation::ConstantType(entry, value.to_slice()?.into_owned().into()) - } - read::Operation::Convert { base_type } => { - if base_type.0 == 0 { - Operation::Convert(None) - } else { - let entry = convert_unit_offset(base_type)?; - Operation::Convert(Some(entry)) - } - } - read::Operation::Reinterpret { base_type } => { - if base_type.0 == 0 { - Operation::Reinterpret(None) - } else { - let entry = convert_unit_offset(base_type)?; - Operation::Reinterpret(Some(entry)) - } - } - read::Operation::WasmLocal { index } => Operation::WasmLocal(index), - read::Operation::WasmGlobal { index } => Operation::WasmGlobal(index), - read::Operation::WasmStack { index } => Operation::WasmStack(index), - }; - operations.push(operation); - } - Ok(Expression { operations }) - } - } -} - -#[cfg(test)] -#[cfg(feature = "read")] -mod tests { - use super::*; - use crate::common::{ - DebugAbbrevOffset, DebugAddrBase, DebugInfoOffset, DebugLocListsBase, DebugRngListsBase, - DebugStrOffsetsBase, Format, SectionId, - }; - use crate::read; - use crate::write::{ - DebugLineStrOffsets, DebugStrOffsets, EndianVec, LineProgram, Sections, Unit, UnitTable, - }; - use crate::LittleEndian; - use std::collections::HashMap; - use std::sync::Arc; - - #[test] - fn test_operation() { - for &version in &[3, 4, 5] { - for &address_size in &[4, 8] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - - let mut units = UnitTable::default(); - let unit_id = units.add(Unit::new(encoding, LineProgram::none())); - let unit = units.get_mut(unit_id); - let entry_id = unit.add(unit.root(), constants::DW_TAG_base_type); - let reference = Reference::Entry(unit_id, entry_id); - - let mut sections = Sections::new(EndianVec::new(LittleEndian)); - let debug_line_str_offsets = DebugLineStrOffsets::none(); - let debug_str_offsets = DebugStrOffsets::none(); - let debug_info_offsets = units - .write(&mut sections, &debug_line_str_offsets, &debug_str_offsets) - .unwrap(); - let unit_offsets = debug_info_offsets.unit_offsets(unit_id); - let debug_info_offset = unit_offsets.debug_info_offset(entry_id); - let entry_offset = - read::UnitOffset(unit_offsets.unit_offset(entry_id) as usize); - - let mut reg_expression = Expression::new(); - reg_expression.op_reg(Register(23)); - - let operations: &[(&dyn Fn(&mut Expression), Operation, read::Operation<_>)] = - &[ - ( - &|x| x.op_deref(), - Operation::Deref { space: false }, - read::Operation::Deref { - base_type: read::UnitOffset(0), - size: address_size, - space: false, - }, - ), - ( - &|x| x.op_xderef(), - Operation::Deref { space: true }, - read::Operation::Deref { - base_type: read::UnitOffset(0), - size: address_size, - space: true, - }, - ), - ( - &|x| x.op_deref_size(2), - Operation::DerefSize { - space: false, - size: 2, - }, - read::Operation::Deref { - base_type: read::UnitOffset(0), - size: 2, - space: false, - }, - ), - ( - &|x| x.op_xderef_size(2), - Operation::DerefSize { - space: true, - size: 2, - }, - read::Operation::Deref { - base_type: read::UnitOffset(0), - size: 2, - space: true, - }, - ), - ( - &|x| x.op_deref_type(2, entry_id), - Operation::DerefType { - space: false, - size: 2, - base: entry_id, - }, - read::Operation::Deref { - base_type: entry_offset, - size: 2, - space: false, - }, - ), - ( - &|x| x.op_xderef_type(2, entry_id), - Operation::DerefType { - space: true, - size: 2, - base: entry_id, - }, - read::Operation::Deref { - base_type: entry_offset, - size: 2, - space: true, - }, - ), - ( - &|x| x.op(constants::DW_OP_drop), - Operation::Simple(constants::DW_OP_drop), - read::Operation::Drop, - ), - ( - &|x| x.op_pick(0), - Operation::Pick(0), - read::Operation::Pick { index: 0 }, - ), - ( - &|x| x.op_pick(1), - Operation::Pick(1), - read::Operation::Pick { index: 1 }, - ), - ( - &|x| x.op_pick(2), - Operation::Pick(2), - read::Operation::Pick { index: 2 }, - ), - ( - &|x| x.op(constants::DW_OP_swap), - Operation::Simple(constants::DW_OP_swap), - read::Operation::Swap, - ), - ( - &|x| x.op(constants::DW_OP_rot), - Operation::Simple(constants::DW_OP_rot), - read::Operation::Rot, - ), - ( - &|x| x.op(constants::DW_OP_abs), - Operation::Simple(constants::DW_OP_abs), - read::Operation::Abs, - ), - ( - &|x| x.op(constants::DW_OP_and), - Operation::Simple(constants::DW_OP_and), - read::Operation::And, - ), - ( - &|x| x.op(constants::DW_OP_div), - Operation::Simple(constants::DW_OP_div), - read::Operation::Div, - ), - ( - &|x| x.op(constants::DW_OP_minus), - Operation::Simple(constants::DW_OP_minus), - read::Operation::Minus, - ), - ( - &|x| x.op(constants::DW_OP_mod), - Operation::Simple(constants::DW_OP_mod), - read::Operation::Mod, - ), - ( - &|x| x.op(constants::DW_OP_mul), - Operation::Simple(constants::DW_OP_mul), - read::Operation::Mul, - ), - ( - &|x| x.op(constants::DW_OP_neg), - Operation::Simple(constants::DW_OP_neg), - read::Operation::Neg, - ), - ( - &|x| x.op(constants::DW_OP_not), - Operation::Simple(constants::DW_OP_not), - read::Operation::Not, - ), - ( - &|x| x.op(constants::DW_OP_or), - Operation::Simple(constants::DW_OP_or), - read::Operation::Or, - ), - ( - &|x| x.op(constants::DW_OP_plus), - Operation::Simple(constants::DW_OP_plus), - read::Operation::Plus, - ), - ( - &|x| x.op_plus_uconst(23), - Operation::PlusConstant(23), - read::Operation::PlusConstant { value: 23 }, - ), - ( - &|x| x.op(constants::DW_OP_shl), - Operation::Simple(constants::DW_OP_shl), - read::Operation::Shl, - ), - ( - &|x| x.op(constants::DW_OP_shr), - Operation::Simple(constants::DW_OP_shr), - read::Operation::Shr, - ), - ( - &|x| x.op(constants::DW_OP_shra), - Operation::Simple(constants::DW_OP_shra), - read::Operation::Shra, - ), - ( - &|x| x.op(constants::DW_OP_xor), - Operation::Simple(constants::DW_OP_xor), - read::Operation::Xor, - ), - ( - &|x| x.op(constants::DW_OP_eq), - Operation::Simple(constants::DW_OP_eq), - read::Operation::Eq, - ), - ( - &|x| x.op(constants::DW_OP_ge), - Operation::Simple(constants::DW_OP_ge), - read::Operation::Ge, - ), - ( - &|x| x.op(constants::DW_OP_gt), - Operation::Simple(constants::DW_OP_gt), - read::Operation::Gt, - ), - ( - &|x| x.op(constants::DW_OP_le), - Operation::Simple(constants::DW_OP_le), - read::Operation::Le, - ), - ( - &|x| x.op(constants::DW_OP_lt), - Operation::Simple(constants::DW_OP_lt), - read::Operation::Lt, - ), - ( - &|x| x.op(constants::DW_OP_ne), - Operation::Simple(constants::DW_OP_ne), - read::Operation::Ne, - ), - ( - &|x| x.op_constu(23), - Operation::UnsignedConstant(23), - read::Operation::UnsignedConstant { value: 23 }, - ), - ( - &|x| x.op_consts(-23), - Operation::SignedConstant(-23), - read::Operation::SignedConstant { value: -23 }, - ), - ( - &|x| x.op_reg(Register(23)), - Operation::Register(Register(23)), - read::Operation::Register { - register: Register(23), - }, - ), - ( - &|x| x.op_reg(Register(123)), - Operation::Register(Register(123)), - read::Operation::Register { - register: Register(123), - }, - ), - ( - &|x| x.op_breg(Register(23), 34), - Operation::RegisterOffset(Register(23), 34), - read::Operation::RegisterOffset { - register: Register(23), - offset: 34, - base_type: read::UnitOffset(0), - }, - ), - ( - &|x| x.op_breg(Register(123), 34), - Operation::RegisterOffset(Register(123), 34), - read::Operation::RegisterOffset { - register: Register(123), - offset: 34, - base_type: read::UnitOffset(0), - }, - ), - ( - &|x| x.op_regval_type(Register(23), entry_id), - Operation::RegisterType(Register(23), entry_id), - read::Operation::RegisterOffset { - register: Register(23), - offset: 0, - base_type: entry_offset, - }, - ), - ( - &|x| x.op_fbreg(34), - Operation::FrameOffset(34), - read::Operation::FrameOffset { offset: 34 }, - ), - ( - &|x| x.op(constants::DW_OP_nop), - Operation::Simple(constants::DW_OP_nop), - read::Operation::Nop, - ), - ( - &|x| x.op(constants::DW_OP_push_object_address), - Operation::Simple(constants::DW_OP_push_object_address), - read::Operation::PushObjectAddress, - ), - ( - &|x| x.op_call(entry_id), - Operation::Call(entry_id), - read::Operation::Call { - offset: read::DieReference::UnitRef(entry_offset), - }, - ), - ( - &|x| x.op_call_ref(reference), - Operation::CallRef(reference), - read::Operation::Call { - offset: read::DieReference::DebugInfoRef(debug_info_offset), - }, - ), - ( - &|x| x.op(constants::DW_OP_form_tls_address), - Operation::Simple(constants::DW_OP_form_tls_address), - read::Operation::TLS, - ), - ( - &|x| x.op(constants::DW_OP_call_frame_cfa), - Operation::Simple(constants::DW_OP_call_frame_cfa), - read::Operation::CallFrameCFA, - ), - ( - &|x| x.op_piece(23), - Operation::Piece { size_in_bytes: 23 }, - read::Operation::Piece { - size_in_bits: 23 * 8, - bit_offset: None, - }, - ), - ( - &|x| x.op_bit_piece(23, 34), - Operation::BitPiece { - size_in_bits: 23, - bit_offset: 34, - }, - read::Operation::Piece { - size_in_bits: 23, - bit_offset: Some(34), - }, - ), - ( - &|x| x.op_implicit_value(vec![23].into()), - Operation::ImplicitValue(vec![23].into()), - read::Operation::ImplicitValue { - data: read::EndianSlice::new(&[23], LittleEndian), - }, - ), - ( - &|x| x.op(constants::DW_OP_stack_value), - Operation::Simple(constants::DW_OP_stack_value), - read::Operation::StackValue, - ), - ( - &|x| x.op_implicit_pointer(reference, 23), - Operation::ImplicitPointer { - entry: reference, - byte_offset: 23, - }, - read::Operation::ImplicitPointer { - value: debug_info_offset, - byte_offset: 23, - }, - ), - ( - &|x| x.op_entry_value(reg_expression.clone()), - Operation::EntryValue(reg_expression.clone()), - read::Operation::EntryValue { - expression: read::EndianSlice::new( - &[constants::DW_OP_reg23.0], - LittleEndian, - ), - }, - ), - ( - &|x| x.op_gnu_parameter_ref(entry_id), - Operation::ParameterRef(entry_id), - read::Operation::ParameterRef { - offset: entry_offset, - }, - ), - ( - &|x| x.op_addr(Address::Constant(23)), - Operation::Address(Address::Constant(23)), - read::Operation::Address { address: 23 }, - ), - ( - &|x| x.op_const_type(entry_id, vec![23].into()), - Operation::ConstantType(entry_id, vec![23].into()), - read::Operation::TypedLiteral { - base_type: entry_offset, - value: read::EndianSlice::new(&[23], LittleEndian), - }, - ), - ( - &|x| x.op_convert(None), - Operation::Convert(None), - read::Operation::Convert { - base_type: read::UnitOffset(0), - }, - ), - ( - &|x| x.op_convert(Some(entry_id)), - Operation::Convert(Some(entry_id)), - read::Operation::Convert { - base_type: entry_offset, - }, - ), - ( - &|x| x.op_reinterpret(None), - Operation::Reinterpret(None), - read::Operation::Reinterpret { - base_type: read::UnitOffset(0), - }, - ), - ( - &|x| x.op_reinterpret(Some(entry_id)), - Operation::Reinterpret(Some(entry_id)), - read::Operation::Reinterpret { - base_type: entry_offset, - }, - ), - ( - &|x| x.op_wasm_local(1000), - Operation::WasmLocal(1000), - read::Operation::WasmLocal { index: 1000 }, - ), - ( - &|x| x.op_wasm_global(1000), - Operation::WasmGlobal(1000), - read::Operation::WasmGlobal { index: 1000 }, - ), - ( - &|x| x.op_wasm_stack(1000), - Operation::WasmStack(1000), - read::Operation::WasmStack { index: 1000 }, - ), - ]; - - let mut expression = Expression::new(); - let start_index = expression.next_index(); - for (f, o, _) in operations { - f(&mut expression); - assert_eq!(expression.operations.last(), Some(o)); - } - - let bra_index = expression.op_bra(); - let skip_index = expression.op_skip(); - expression.op(constants::DW_OP_nop); - let end_index = expression.next_index(); - expression.set_target(bra_index, start_index); - expression.set_target(skip_index, end_index); - - let mut w = EndianVec::new(LittleEndian); - let mut refs = Vec::new(); - expression - .write(&mut w, Some(&mut refs), encoding, Some(&unit_offsets)) - .unwrap(); - for r in &refs { - assert_eq!(r.unit, unit_id); - assert_eq!(r.entry, entry_id); - w.write_offset_at( - r.offset, - debug_info_offset.0, - SectionId::DebugInfo, - r.size, - ) - .unwrap(); - } - - let read_expression = - read::Expression(read::EndianSlice::new(w.slice(), LittleEndian)); - let mut read_operations = read_expression.operations(encoding); - for (_, _, operation) in operations { - assert_eq!(read_operations.next(), Ok(Some(*operation))); - } - - // 4 = DW_OP_skip + i16 + DW_OP_nop - assert_eq!( - read_operations.next(), - Ok(Some(read::Operation::Bra { - target: -(w.len() as i16) + 4 - })) - ); - // 1 = DW_OP_nop - assert_eq!( - read_operations.next(), - Ok(Some(read::Operation::Skip { target: 1 })) - ); - assert_eq!(read_operations.next(), Ok(Some(read::Operation::Nop))); - assert_eq!(read_operations.next(), Ok(None)); - - // Fake the unit. - let unit = read::Unit { - header: read::UnitHeader::new( - encoding, - 0, - read::UnitType::Compilation, - DebugAbbrevOffset(0), - DebugInfoOffset(0).into(), - read::EndianSlice::new(&[], LittleEndian), - ), - abbreviations: Arc::new(read::Abbreviations::default()), - name: None, - comp_dir: None, - low_pc: 0, - str_offsets_base: DebugStrOffsetsBase(0), - addr_base: DebugAddrBase(0), - loclists_base: DebugLocListsBase(0), - rnglists_base: DebugRngListsBase(0), - line_program: None, - dwo_id: None, - }; - - let mut entry_ids = HashMap::new(); - entry_ids.insert(debug_info_offset.into(), (unit_id, entry_id)); - let convert_expression = Expression::from( - read_expression, - encoding, - None, /* dwarf */ - Some(&unit), - Some(&entry_ids), - &|address| Some(Address::Constant(address)), - ) - .unwrap(); - let mut convert_operations = convert_expression.operations.iter(); - for (_, operation, _) in operations { - assert_eq!(convert_operations.next(), Some(operation)); - } - assert_eq!( - convert_operations.next(), - Some(&Operation::Branch(start_index)) - ); - assert_eq!(convert_operations.next(), Some(&Operation::Skip(end_index))); - assert_eq!( - convert_operations.next(), - Some(&Operation::Simple(constants::DW_OP_nop)) - ); - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/range.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/range.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/range.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/range.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,416 +0,0 @@ -use alloc::vec::Vec; -use indexmap::IndexSet; -use std::ops::{Deref, DerefMut}; - -use crate::common::{Encoding, RangeListsOffset, SectionId}; -use crate::write::{Address, BaseId, Error, Result, Section, Sections, Writer}; - -define_section!( - DebugRanges, - RangeListsOffset, - "A writable `.debug_ranges` section." -); -define_section!( - DebugRngLists, - RangeListsOffset, - "A writable `.debug_rnglists` section." -); - -define_offsets!( - RangeListOffsets: RangeListId => RangeListsOffset, - "The section offsets of a series of range lists within the `.debug_ranges` or `.debug_rnglists` sections." -); - -define_id!( - RangeListId, - "An identifier for a range list in a `RangeListTable`." -); - -/// A table of range lists that will be stored in a `.debug_ranges` or `.debug_rnglists` section. -#[derive(Debug, Default)] -pub struct RangeListTable { - base_id: BaseId, - ranges: IndexSet, -} - -impl RangeListTable { - /// Add a range list to the table. - pub fn add(&mut self, range_list: RangeList) -> RangeListId { - let (index, _) = self.ranges.insert_full(range_list); - RangeListId::new(self.base_id, index) - } - - /// Write the range list table to the appropriate section for the given DWARF version. - pub(crate) fn write( - &self, - sections: &mut Sections, - encoding: Encoding, - ) -> Result { - if self.ranges.is_empty() { - return Ok(RangeListOffsets::none()); - } - - match encoding.version { - 2..=4 => self.write_ranges(&mut sections.debug_ranges, encoding.address_size), - 5 => self.write_rnglists(&mut sections.debug_rnglists, encoding), - _ => Err(Error::UnsupportedVersion(encoding.version)), - } - } - - /// Write the range list table to the `.debug_ranges` section. - fn write_ranges( - &self, - w: &mut DebugRanges, - address_size: u8, - ) -> Result { - let mut offsets = Vec::new(); - for range_list in self.ranges.iter() { - offsets.push(w.offset()); - for range in &range_list.0 { - // Note that we must ensure none of the ranges have both begin == 0 and end == 0. - // We do this by ensuring that begin != end, which is a bit more restrictive - // than required, but still seems reasonable. - match *range { - Range::BaseAddress { address } => { - let marker = !0 >> (64 - address_size * 8); - w.write_udata(marker, address_size)?; - w.write_address(address, address_size)?; - } - Range::OffsetPair { begin, end } => { - if begin == end { - return Err(Error::InvalidRange); - } - w.write_udata(begin, address_size)?; - w.write_udata(end, address_size)?; - } - Range::StartEnd { begin, end } => { - if begin == end { - return Err(Error::InvalidRange); - } - w.write_address(begin, address_size)?; - w.write_address(end, address_size)?; - } - Range::StartLength { begin, length } => { - let end = match begin { - Address::Constant(begin) => Address::Constant(begin + length), - Address::Symbol { symbol, addend } => Address::Symbol { - symbol, - addend: addend + length as i64, - }, - }; - if begin == end { - return Err(Error::InvalidRange); - } - w.write_address(begin, address_size)?; - w.write_address(end, address_size)?; - } - } - } - w.write_udata(0, address_size)?; - w.write_udata(0, address_size)?; - } - Ok(RangeListOffsets { - base_id: self.base_id, - offsets, - }) - } - - /// Write the range list table to the `.debug_rnglists` section. - fn write_rnglists( - &self, - w: &mut DebugRngLists, - encoding: Encoding, - ) -> Result { - let mut offsets = Vec::new(); - - if encoding.version != 5 { - return Err(Error::NeedVersion(5)); - } - - let length_offset = w.write_initial_length(encoding.format)?; - let length_base = w.len(); - - w.write_u16(encoding.version)?; - w.write_u8(encoding.address_size)?; - w.write_u8(0)?; // segment_selector_size - w.write_u32(0)?; // offset_entry_count (when set to zero DW_FORM_rnglistx can't be used, see section 7.28) - // FIXME implement DW_FORM_rnglistx writing and implement the offset entry list - - for range_list in self.ranges.iter() { - offsets.push(w.offset()); - for range in &range_list.0 { - match *range { - Range::BaseAddress { address } => { - w.write_u8(crate::constants::DW_RLE_base_address.0)?; - w.write_address(address, encoding.address_size)?; - } - Range::OffsetPair { begin, end } => { - w.write_u8(crate::constants::DW_RLE_offset_pair.0)?; - w.write_uleb128(begin)?; - w.write_uleb128(end)?; - } - Range::StartEnd { begin, end } => { - w.write_u8(crate::constants::DW_RLE_start_end.0)?; - w.write_address(begin, encoding.address_size)?; - w.write_address(end, encoding.address_size)?; - } - Range::StartLength { begin, length } => { - w.write_u8(crate::constants::DW_RLE_start_length.0)?; - w.write_address(begin, encoding.address_size)?; - w.write_uleb128(length)?; - } - } - } - - w.write_u8(crate::constants::DW_RLE_end_of_list.0)?; - } - - let length = (w.len() - length_base) as u64; - w.write_initial_length_at(length_offset, length, encoding.format)?; - - Ok(RangeListOffsets { - base_id: self.base_id, - offsets, - }) - } -} - -/// A range list that will be stored in a `.debug_ranges` or `.debug_rnglists` section. -#[derive(Clone, Debug, Eq, PartialEq, Hash)] -pub struct RangeList(pub Vec); - -/// A single range. -#[derive(Clone, Debug, Eq, PartialEq, Hash)] -pub enum Range { - /// DW_RLE_base_address - BaseAddress { - /// Base address. - address: Address, - }, - /// DW_RLE_offset_pair - OffsetPair { - /// Start of range relative to base address. - begin: u64, - /// End of range relative to base address. - end: u64, - }, - /// DW_RLE_start_end - StartEnd { - /// Start of range. - begin: Address, - /// End of range. - end: Address, - }, - /// DW_RLE_start_length - StartLength { - /// Start of range. - begin: Address, - /// Length of range. - length: u64, - }, -} - -#[cfg(feature = "read")] -mod convert { - use super::*; - - use crate::read::{self, Reader}; - use crate::write::{ConvertError, ConvertResult, ConvertUnitContext}; - - impl RangeList { - /// Create a range list by reading the data from the give range list iter. - pub(crate) fn from>( - mut from: read::RawRngListIter, - context: &ConvertUnitContext, - ) -> ConvertResult { - let mut have_base_address = context.base_address != Address::Constant(0); - let convert_address = - |x| (context.convert_address)(x).ok_or(ConvertError::InvalidAddress); - let mut ranges = Vec::new(); - while let Some(from_range) = from.next()? { - let range = match from_range { - read::RawRngListEntry::AddressOrOffsetPair { begin, end } => { - // These were parsed as addresses, even if they are offsets. - let begin = convert_address(begin)?; - let end = convert_address(end)?; - match (begin, end) { - (Address::Constant(begin_offset), Address::Constant(end_offset)) => { - if have_base_address { - Range::OffsetPair { - begin: begin_offset, - end: end_offset, - } - } else { - Range::StartEnd { begin, end } - } - } - _ => { - if have_base_address { - // At least one of begin/end is an address, but we also have - // a base address. Adding addresses is undefined. - return Err(ConvertError::InvalidRangeRelativeAddress); - } - Range::StartEnd { begin, end } - } - } - } - read::RawRngListEntry::BaseAddress { addr } => { - have_base_address = true; - let address = convert_address(addr)?; - Range::BaseAddress { address } - } - read::RawRngListEntry::BaseAddressx { addr } => { - have_base_address = true; - let address = convert_address(context.dwarf.address(context.unit, addr)?)?; - Range::BaseAddress { address } - } - read::RawRngListEntry::StartxEndx { begin, end } => { - let begin = convert_address(context.dwarf.address(context.unit, begin)?)?; - let end = convert_address(context.dwarf.address(context.unit, end)?)?; - Range::StartEnd { begin, end } - } - read::RawRngListEntry::StartxLength { begin, length } => { - let begin = convert_address(context.dwarf.address(context.unit, begin)?)?; - Range::StartLength { begin, length } - } - read::RawRngListEntry::OffsetPair { begin, end } => { - Range::OffsetPair { begin, end } - } - read::RawRngListEntry::StartEnd { begin, end } => { - let begin = convert_address(begin)?; - let end = convert_address(end)?; - Range::StartEnd { begin, end } - } - read::RawRngListEntry::StartLength { begin, length } => { - let begin = convert_address(begin)?; - Range::StartLength { begin, length } - } - }; - // Filtering empty ranges out. - match range { - Range::StartLength { length, .. } if length == 0 => continue, - Range::StartEnd { begin, end, .. } if begin == end => continue, - Range::OffsetPair { begin, end, .. } if begin == end => continue, - _ => (), - } - ranges.push(range); - } - Ok(RangeList(ranges)) - } - } -} - -#[cfg(test)] -#[cfg(feature = "read")] -mod tests { - use super::*; - use crate::common::{ - DebugAbbrevOffset, DebugAddrBase, DebugInfoOffset, DebugLocListsBase, DebugRngListsBase, - DebugStrOffsetsBase, Format, - }; - use crate::read; - use crate::write::{ - ConvertUnitContext, EndianVec, LineStringTable, LocationListTable, Range, RangeListTable, - StringTable, - }; - use crate::LittleEndian; - use std::collections::HashMap; - use std::sync::Arc; - - #[test] - fn test_range() { - let mut line_strings = LineStringTable::default(); - let mut strings = StringTable::default(); - - for &version in &[2, 3, 4, 5] { - for &address_size in &[4, 8] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - - let mut range_list = RangeList(vec![ - Range::StartLength { - begin: Address::Constant(6666), - length: 7777, - }, - Range::StartEnd { - begin: Address::Constant(4444), - end: Address::Constant(5555), - }, - Range::BaseAddress { - address: Address::Constant(1111), - }, - Range::OffsetPair { - begin: 2222, - end: 3333, - }, - ]); - - let mut ranges = RangeListTable::default(); - let range_list_id = ranges.add(range_list.clone()); - - let mut sections = Sections::new(EndianVec::new(LittleEndian)); - let range_list_offsets = ranges.write(&mut sections, encoding).unwrap(); - - let read_debug_ranges = - read::DebugRanges::new(sections.debug_ranges.slice(), LittleEndian); - let read_debug_rnglists = - read::DebugRngLists::new(sections.debug_rnglists.slice(), LittleEndian); - let read_ranges = read::RangeLists::new(read_debug_ranges, read_debug_rnglists); - let offset = range_list_offsets.get(range_list_id); - let read_range_list = read_ranges.raw_ranges(offset, encoding).unwrap(); - - let dwarf = read::Dwarf { - ranges: read_ranges, - ..Default::default() - }; - let unit = read::Unit { - header: read::UnitHeader::new( - encoding, - 0, - read::UnitType::Compilation, - DebugAbbrevOffset(0), - DebugInfoOffset(0).into(), - read::EndianSlice::default(), - ), - abbreviations: Arc::new(read::Abbreviations::default()), - name: None, - comp_dir: None, - low_pc: 0, - str_offsets_base: DebugStrOffsetsBase(0), - addr_base: DebugAddrBase(0), - loclists_base: DebugLocListsBase(0), - rnglists_base: DebugRngListsBase(0), - line_program: None, - dwo_id: None, - }; - let context = ConvertUnitContext { - dwarf: &dwarf, - unit: &unit, - line_strings: &mut line_strings, - strings: &mut strings, - ranges: &mut ranges, - locations: &mut LocationListTable::default(), - convert_address: &|address| Some(Address::Constant(address)), - base_address: Address::Constant(0), - line_program_offset: None, - line_program_files: Vec::new(), - entry_ids: &HashMap::new(), - }; - let convert_range_list = RangeList::from(read_range_list, &context).unwrap(); - - if version <= 4 { - range_list.0[0] = Range::StartEnd { - begin: Address::Constant(6666), - end: Address::Constant(6666 + 7777), - }; - } - assert_eq!(range_list, convert_range_list); - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/section.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/section.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/section.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/section.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,172 +0,0 @@ -use std::ops::DerefMut; -use std::result; -use std::vec::Vec; - -use crate::common::SectionId; -use crate::write::{ - DebugAbbrev, DebugFrame, DebugInfo, DebugInfoReference, DebugLine, DebugLineStr, DebugLoc, - DebugLocLists, DebugRanges, DebugRngLists, DebugStr, EhFrame, Writer, -}; - -macro_rules! define_section { - ($name:ident, $offset:ident, $docs:expr) => { - #[doc=$docs] - #[derive(Debug, Default)] - pub struct $name(pub W); - - impl $name { - /// Return the offset of the next write. - pub fn offset(&self) -> $offset { - $offset(self.len()) - } - } - - impl From for $name { - #[inline] - fn from(w: W) -> Self { - $name(w) - } - } - - impl Deref for $name { - type Target = W; - - #[inline] - fn deref(&self) -> &W { - &self.0 - } - } - - impl DerefMut for $name { - #[inline] - fn deref_mut(&mut self) -> &mut W { - &mut self.0 - } - } - - impl Section for $name { - #[inline] - fn id(&self) -> SectionId { - SectionId::$name - } - } - }; -} - -/// Functionality common to all writable DWARF sections. -pub trait Section: DerefMut { - /// Returns the DWARF section kind for this type. - fn id(&self) -> SectionId; - - /// Returns the ELF section name for this type. - fn name(&self) -> &'static str { - self.id().name() - } -} - -/// All of the writable DWARF sections. -#[derive(Debug, Default)] -pub struct Sections { - /// The `.debug_abbrev` section. - pub debug_abbrev: DebugAbbrev, - /// The `.debug_info` section. - pub debug_info: DebugInfo, - /// The `.debug_line` section. - pub debug_line: DebugLine, - /// The `.debug_line_str` section. - pub debug_line_str: DebugLineStr, - /// The `.debug_ranges` section. - pub debug_ranges: DebugRanges, - /// The `.debug_rnglists` section. - pub debug_rnglists: DebugRngLists, - /// The `.debug_loc` section. - pub debug_loc: DebugLoc, - /// The `.debug_loclists` section. - pub debug_loclists: DebugLocLists, - /// The `.debug_str` section. - pub debug_str: DebugStr, - /// The `.debug_frame` section. - pub debug_frame: DebugFrame, - /// The `.eh_frame` section. - pub eh_frame: EhFrame, - /// Unresolved references in the `.debug_info` section. - pub(crate) debug_info_refs: Vec, - /// Unresolved references in the `.debug_loc` section. - pub(crate) debug_loc_refs: Vec, - /// Unresolved references in the `.debug_loclists` section. - pub(crate) debug_loclists_refs: Vec, -} - -impl Sections { - /// Create a new `Sections` using clones of the given `section`. - pub fn new(section: W) -> Self { - Sections { - debug_abbrev: DebugAbbrev(section.clone()), - debug_info: DebugInfo(section.clone()), - debug_line: DebugLine(section.clone()), - debug_line_str: DebugLineStr(section.clone()), - debug_ranges: DebugRanges(section.clone()), - debug_rnglists: DebugRngLists(section.clone()), - debug_loc: DebugLoc(section.clone()), - debug_loclists: DebugLocLists(section.clone()), - debug_str: DebugStr(section.clone()), - debug_frame: DebugFrame(section.clone()), - eh_frame: EhFrame(section), - debug_info_refs: Vec::new(), - debug_loc_refs: Vec::new(), - debug_loclists_refs: Vec::new(), - } - } -} - -impl Sections { - /// For each section, call `f` once with a shared reference. - pub fn for_each(&self, mut f: F) -> result::Result<(), E> - where - F: FnMut(SectionId, &W) -> result::Result<(), E>, - { - macro_rules! f { - ($s:expr) => { - f($s.id(), &$s) - }; - } - // Ordered so that earlier sections do not reference later sections. - f!(self.debug_abbrev)?; - f!(self.debug_str)?; - f!(self.debug_line_str)?; - f!(self.debug_line)?; - f!(self.debug_ranges)?; - f!(self.debug_rnglists)?; - f!(self.debug_loc)?; - f!(self.debug_loclists)?; - f!(self.debug_info)?; - f!(self.debug_frame)?; - f!(self.eh_frame)?; - Ok(()) - } - - /// For each section, call `f` once with a mutable reference. - pub fn for_each_mut(&mut self, mut f: F) -> result::Result<(), E> - where - F: FnMut(SectionId, &mut W) -> result::Result<(), E>, - { - macro_rules! f { - ($s:expr) => { - f($s.id(), &mut $s) - }; - } - // Ordered so that earlier sections do not reference later sections. - f!(self.debug_abbrev)?; - f!(self.debug_str)?; - f!(self.debug_line_str)?; - f!(self.debug_line)?; - f!(self.debug_ranges)?; - f!(self.debug_rnglists)?; - f!(self.debug_loc)?; - f!(self.debug_loclists)?; - f!(self.debug_info)?; - f!(self.debug_frame)?; - f!(self.eh_frame)?; - Ok(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/str.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/str.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/str.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/str.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,172 +0,0 @@ -use alloc::vec::Vec; -use indexmap::IndexSet; -use std::ops::{Deref, DerefMut}; - -use crate::common::{DebugLineStrOffset, DebugStrOffset, SectionId}; -use crate::write::{BaseId, Result, Section, Writer}; - -// Requirements: -// - values are `[u8]`, null bytes are not allowed -// - insertion returns a fixed id -// - inserting a duplicate returns the id of the existing value -// - able to convert an id to a section offset -// Optional? -// - able to get an existing value given an id -// -// Limitations of current implementation (using IndexSet): -// - inserting requires either an allocation for duplicates, -// or a double lookup for non-duplicates -// - doesn't preserve offsets when updating an existing `.debug_str` section -// -// Possible changes: -// - calculate offsets as we add values, and use that as the id. -// This would avoid the need for DebugStrOffsets but would make it -// hard to implement `get`. -macro_rules! define_string_table { - ($name:ident, $id:ident, $section:ident, $offsets:ident, $docs:expr) => { - #[doc=$docs] - #[derive(Debug, Default)] - pub struct $name { - base_id: BaseId, - strings: IndexSet>, - } - - impl $name { - /// Add a string to the string table and return its id. - /// - /// If the string already exists, then return the id of the existing string. - /// - /// # Panics - /// - /// Panics if `bytes` contains a null byte. - pub fn add(&mut self, bytes: T) -> $id - where - T: Into>, - { - let bytes = bytes.into(); - assert!(!bytes.contains(&0)); - let (index, _) = self.strings.insert_full(bytes); - $id::new(self.base_id, index) - } - - /// Return the number of strings in the table. - #[inline] - pub fn count(&self) -> usize { - self.strings.len() - } - - /// Get a reference to a string in the table. - /// - /// # Panics - /// - /// Panics if `id` is invalid. - pub fn get(&self, id: $id) -> &[u8] { - debug_assert_eq!(self.base_id, id.base_id); - self.strings.get_index(id.index).map(Vec::as_slice).unwrap() - } - - /// Write the string table to the `.debug_str` section. - /// - /// Returns the offsets at which the strings are written. - pub fn write(&self, w: &mut $section) -> Result<$offsets> { - let mut offsets = Vec::new(); - for bytes in self.strings.iter() { - offsets.push(w.offset()); - w.write(bytes)?; - w.write_u8(0)?; - } - - Ok($offsets { - base_id: self.base_id, - offsets, - }) - } - } - }; -} - -define_id!(StringId, "An identifier for a string in a `StringTable`."); - -define_string_table!( - StringTable, - StringId, - DebugStr, - DebugStrOffsets, - "A table of strings that will be stored in a `.debug_str` section." -); - -define_section!(DebugStr, DebugStrOffset, "A writable `.debug_str` section."); - -define_offsets!( - DebugStrOffsets: StringId => DebugStrOffset, - "The section offsets of all strings within a `.debug_str` section." -); - -define_id!( - LineStringId, - "An identifier for a string in a `LineStringTable`." -); - -define_string_table!( - LineStringTable, - LineStringId, - DebugLineStr, - DebugLineStrOffsets, - "A table of strings that will be stored in a `.debug_line_str` section." -); - -define_section!( - DebugLineStr, - DebugLineStrOffset, - "A writable `.debug_line_str` section." -); - -define_offsets!( - DebugLineStrOffsets: LineStringId => DebugLineStrOffset, - "The section offsets of all strings within a `.debug_line_str` section." -); - -#[cfg(test)] -#[cfg(feature = "read")] -mod tests { - use super::*; - use crate::read; - use crate::write::EndianVec; - use crate::LittleEndian; - - #[test] - fn test_string_table() { - let mut strings = StringTable::default(); - assert_eq!(strings.count(), 0); - let id1 = strings.add(&b"one"[..]); - let id2 = strings.add(&b"two"[..]); - assert_eq!(strings.add(&b"one"[..]), id1); - assert_eq!(strings.add(&b"two"[..]), id2); - assert_eq!(strings.get(id1), &b"one"[..]); - assert_eq!(strings.get(id2), &b"two"[..]); - assert_eq!(strings.count(), 2); - - let mut debug_str = DebugStr::from(EndianVec::new(LittleEndian)); - let offsets = strings.write(&mut debug_str).unwrap(); - assert_eq!(debug_str.slice(), b"one\0two\0"); - assert_eq!(offsets.get(id1), DebugStrOffset(0)); - assert_eq!(offsets.get(id2), DebugStrOffset(4)); - assert_eq!(offsets.count(), 2); - } - - #[test] - fn test_string_table_read() { - let mut strings = StringTable::default(); - let id1 = strings.add(&b"one"[..]); - let id2 = strings.add(&b"two"[..]); - - let mut debug_str = DebugStr::from(EndianVec::new(LittleEndian)); - let offsets = strings.write(&mut debug_str).unwrap(); - - let read_debug_str = read::DebugStr::new(debug_str.slice(), LittleEndian); - let str1 = read_debug_str.get_str(offsets.get(id1)).unwrap(); - let str2 = read_debug_str.get_str(offsets.get(id2)).unwrap(); - assert_eq!(str1.slice(), &b"one"[..]); - assert_eq!(str2.slice(), &b"two"[..]); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/unit.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/unit.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/unit.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/unit.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,3152 +0,0 @@ -use alloc::vec::Vec; -use std::ops::{Deref, DerefMut}; -use std::{slice, usize}; - -use crate::common::{ - DebugAbbrevOffset, DebugInfoOffset, DebugLineOffset, DebugMacinfoOffset, DebugMacroOffset, - DebugStrOffset, DebugTypeSignature, Encoding, Format, SectionId, -}; -use crate::constants; -use crate::leb128::write::{sleb128_size, uleb128_size}; -use crate::write::{ - Abbreviation, AbbreviationTable, Address, AttributeSpecification, BaseId, DebugLineStrOffsets, - DebugStrOffsets, Error, Expression, FileId, LineProgram, LineStringId, LocationListId, - LocationListOffsets, LocationListTable, RangeListId, RangeListOffsets, RangeListTable, - Reference, Result, Section, Sections, StringId, Writer, -}; - -define_id!(UnitId, "An identifier for a unit in a `UnitTable`."); - -define_id!(UnitEntryId, "An identifier for an entry in a `Unit`."); - -/// A table of units that will be stored in the `.debug_info` section. -#[derive(Debug, Default)] -pub struct UnitTable { - base_id: BaseId, - units: Vec, -} - -impl UnitTable { - /// Create a new unit and add it to the table. - /// - /// `address_size` must be in bytes. - /// - /// Returns the `UnitId` of the new unit. - #[inline] - pub fn add(&mut self, unit: Unit) -> UnitId { - let id = UnitId::new(self.base_id, self.units.len()); - self.units.push(unit); - id - } - - /// Return the number of units. - #[inline] - pub fn count(&self) -> usize { - self.units.len() - } - - /// Return the id of a unit. - /// - /// # Panics - /// - /// Panics if `index >= self.count()`. - #[inline] - pub fn id(&self, index: usize) -> UnitId { - assert!(index < self.count()); - UnitId::new(self.base_id, index) - } - - /// Get a reference to a unit. - /// - /// # Panics - /// - /// Panics if `id` is invalid. - #[inline] - pub fn get(&self, id: UnitId) -> &Unit { - debug_assert_eq!(self.base_id, id.base_id); - &self.units[id.index] - } - - /// Get a mutable reference to a unit. - /// - /// # Panics - /// - /// Panics if `id` is invalid. - #[inline] - pub fn get_mut(&mut self, id: UnitId) -> &mut Unit { - debug_assert_eq!(self.base_id, id.base_id); - &mut self.units[id.index] - } - - /// Write the units to the given sections. - /// - /// `strings` must contain the `.debug_str` offsets of the corresponding - /// `StringTable`. - pub fn write( - &mut self, - sections: &mut Sections, - line_strings: &DebugLineStrOffsets, - strings: &DebugStrOffsets, - ) -> Result { - let mut offsets = DebugInfoOffsets { - base_id: self.base_id, - units: Vec::new(), - }; - for unit in &mut self.units { - // TODO: maybe share abbreviation tables - let abbrev_offset = sections.debug_abbrev.offset(); - let mut abbrevs = AbbreviationTable::default(); - - offsets.units.push(unit.write( - sections, - abbrev_offset, - &mut abbrevs, - line_strings, - strings, - )?); - - abbrevs.write(&mut sections.debug_abbrev)?; - } - - write_section_refs( - &mut sections.debug_info_refs, - &mut sections.debug_info.0, - &offsets, - )?; - write_section_refs( - &mut sections.debug_loc_refs, - &mut sections.debug_loc.0, - &offsets, - )?; - write_section_refs( - &mut sections.debug_loclists_refs, - &mut sections.debug_loclists.0, - &offsets, - )?; - - Ok(offsets) - } -} - -fn write_section_refs( - references: &mut Vec, - w: &mut W, - offsets: &DebugInfoOffsets, -) -> Result<()> { - for r in references.drain(..) { - let entry_offset = offsets.entry(r.unit, r.entry).0; - debug_assert_ne!(entry_offset, 0); - w.write_offset_at(r.offset, entry_offset, SectionId::DebugInfo, r.size)?; - } - Ok(()) -} - -/// A unit's debugging information. -#[derive(Debug)] -pub struct Unit { - base_id: BaseId, - /// The encoding parameters for this unit. - encoding: Encoding, - /// The line number program for this unit. - pub line_program: LineProgram, - /// A table of range lists used by this unit. - pub ranges: RangeListTable, - /// A table of location lists used by this unit. - pub locations: LocationListTable, - /// All entries in this unit. The order is unrelated to the tree order. - // Requirements: - // - entries form a tree - // - entries can be added in any order - // - entries have a fixed id - // - able to quickly lookup an entry from its id - // Limitations of current implementation: - // - mutable iteration of children is messy due to borrow checker - entries: Vec, - /// The index of the root entry in entries. - root: UnitEntryId, -} - -impl Unit { - /// Create a new `Unit`. - pub fn new(encoding: Encoding, line_program: LineProgram) -> Self { - let base_id = BaseId::default(); - let ranges = RangeListTable::default(); - let locations = LocationListTable::default(); - let mut entries = Vec::new(); - let root = DebuggingInformationEntry::new( - base_id, - &mut entries, - None, - constants::DW_TAG_compile_unit, - ); - Unit { - base_id, - encoding, - line_program, - ranges, - locations, - entries, - root, - } - } - - /// Return the encoding parameters for this unit. - #[inline] - pub fn encoding(&self) -> Encoding { - self.encoding - } - - /// Return the DWARF version for this unit. - #[inline] - pub fn version(&self) -> u16 { - self.encoding.version - } - - /// Return the address size in bytes for this unit. - #[inline] - pub fn address_size(&self) -> u8 { - self.encoding.address_size - } - - /// Return the DWARF format for this unit. - #[inline] - pub fn format(&self) -> Format { - self.encoding.format - } - - /// Return the number of `DebuggingInformationEntry`s created for this unit. - /// - /// This includes entries that no longer have a parent. - #[inline] - pub fn count(&self) -> usize { - self.entries.len() - } - - /// Return the id of the root entry. - #[inline] - pub fn root(&self) -> UnitEntryId { - self.root - } - - /// Add a new `DebuggingInformationEntry` to this unit and return its id. - /// - /// The `parent` must be within the same unit. - /// - /// # Panics - /// - /// Panics if `parent` is invalid. - #[inline] - pub fn add(&mut self, parent: UnitEntryId, tag: constants::DwTag) -> UnitEntryId { - debug_assert_eq!(self.base_id, parent.base_id); - DebuggingInformationEntry::new(self.base_id, &mut self.entries, Some(parent), tag) - } - - /// Get a reference to an entry. - /// - /// # Panics - /// - /// Panics if `id` is invalid. - #[inline] - pub fn get(&self, id: UnitEntryId) -> &DebuggingInformationEntry { - debug_assert_eq!(self.base_id, id.base_id); - &self.entries[id.index] - } - - /// Get a mutable reference to an entry. - /// - /// # Panics - /// - /// Panics if `id` is invalid. - #[inline] - pub fn get_mut(&mut self, id: UnitEntryId) -> &mut DebuggingInformationEntry { - debug_assert_eq!(self.base_id, id.base_id); - &mut self.entries[id.index] - } - - /// Return true if `self.line_program` is used by a DIE. - fn line_program_in_use(&self) -> bool { - if self.line_program.is_none() { - return false; - } - if !self.line_program.is_empty() { - return true; - } - - for entry in &self.entries { - for attr in &entry.attrs { - if let AttributeValue::FileIndex(Some(_)) = attr.value { - return true; - } - } - } - - false - } - - /// Write the unit to the given sections. - pub(crate) fn write( - &mut self, - sections: &mut Sections, - abbrev_offset: DebugAbbrevOffset, - abbrevs: &mut AbbreviationTable, - line_strings: &DebugLineStrOffsets, - strings: &DebugStrOffsets, - ) -> Result { - let line_program = if self.line_program_in_use() { - self.entries[self.root.index] - .set(constants::DW_AT_stmt_list, AttributeValue::LineProgramRef); - Some(self.line_program.write( - &mut sections.debug_line, - self.encoding, - line_strings, - strings, - )?) - } else { - self.entries[self.root.index].delete(constants::DW_AT_stmt_list); - None - }; - - // TODO: use .debug_types for type units in DWARF v4. - let w = &mut sections.debug_info; - - let mut offsets = UnitOffsets { - base_id: self.base_id, - unit: w.offset(), - // Entries can be written in any order, so create the complete vec now. - entries: vec![EntryOffset::none(); self.entries.len()], - }; - - let length_offset = w.write_initial_length(self.format())?; - let length_base = w.len(); - - w.write_u16(self.version())?; - if 2 <= self.version() && self.version() <= 4 { - w.write_offset( - abbrev_offset.0, - SectionId::DebugAbbrev, - self.format().word_size(), - )?; - w.write_u8(self.address_size())?; - } else if self.version() == 5 { - w.write_u8(constants::DW_UT_compile.0)?; - w.write_u8(self.address_size())?; - w.write_offset( - abbrev_offset.0, - SectionId::DebugAbbrev, - self.format().word_size(), - )?; - } else { - return Err(Error::UnsupportedVersion(self.version())); - } - - // Calculate all DIE offsets, so that we are able to output references to them. - // However, references to base types in expressions use ULEB128, so base types - // must be moved to the front before we can calculate offsets. - self.reorder_base_types(); - let mut offset = w.len(); - self.entries[self.root.index].calculate_offsets( - self, - &mut offset, - &mut offsets, - abbrevs, - )?; - - let range_lists = self.ranges.write(sections, self.encoding)?; - // Location lists can't be written until we have DIE offsets. - let loc_lists = self - .locations - .write(sections, self.encoding, Some(&offsets))?; - - let w = &mut sections.debug_info; - let mut unit_refs = Vec::new(); - self.entries[self.root.index].write( - w, - &mut sections.debug_info_refs, - &mut unit_refs, - self, - &mut offsets, - line_program, - line_strings, - strings, - &range_lists, - &loc_lists, - )?; - - let length = (w.len() - length_base) as u64; - w.write_initial_length_at(length_offset, length, self.format())?; - - for (offset, entry) in unit_refs { - // This does not need relocation. - w.write_udata_at( - offset.0, - offsets.unit_offset(entry), - self.format().word_size(), - )?; - } - - Ok(offsets) - } - - /// Reorder base types to come first so that typed stack operations - /// can get their offset. - fn reorder_base_types(&mut self) { - let root = &self.entries[self.root.index]; - let mut root_children = Vec::with_capacity(root.children.len()); - for entry in &root.children { - if self.entries[entry.index].tag == constants::DW_TAG_base_type { - root_children.push(*entry); - } - } - for entry in &root.children { - if self.entries[entry.index].tag != constants::DW_TAG_base_type { - root_children.push(*entry); - } - } - self.entries[self.root.index].children = root_children; - } -} - -/// A Debugging Information Entry (DIE). -/// -/// DIEs have a set of attributes and optionally have children DIEs as well. -/// -/// DIEs form a tree without any cycles. This is enforced by specifying the -/// parent when creating a DIE, and disallowing changes of parent. -#[derive(Debug)] -pub struct DebuggingInformationEntry { - id: UnitEntryId, - parent: Option, - tag: constants::DwTag, - /// Whether to emit `DW_AT_sibling`. - sibling: bool, - attrs: Vec, - children: Vec, -} - -impl DebuggingInformationEntry { - /// Create a new `DebuggingInformationEntry`. - /// - /// # Panics - /// - /// Panics if `parent` is invalid. - #[allow(clippy::new_ret_no_self)] - fn new( - base_id: BaseId, - entries: &mut Vec, - parent: Option, - tag: constants::DwTag, - ) -> UnitEntryId { - let id = UnitEntryId::new(base_id, entries.len()); - entries.push(DebuggingInformationEntry { - id, - parent, - tag, - sibling: false, - attrs: Vec::new(), - children: Vec::new(), - }); - if let Some(parent) = parent { - debug_assert_eq!(base_id, parent.base_id); - assert_ne!(parent, id); - entries[parent.index].children.push(id); - } - id - } - - /// Return the id of this entry. - #[inline] - pub fn id(&self) -> UnitEntryId { - self.id - } - - /// Return the parent of this entry. - #[inline] - pub fn parent(&self) -> Option { - self.parent - } - - /// Return the tag of this entry. - #[inline] - pub fn tag(&self) -> constants::DwTag { - self.tag - } - - /// Return `true` if a `DW_AT_sibling` attribute will be emitted. - #[inline] - pub fn sibling(&self) -> bool { - self.sibling - } - - /// Set whether a `DW_AT_sibling` attribute will be emitted. - /// - /// The attribute will only be emitted if the DIE has children. - #[inline] - pub fn set_sibling(&mut self, sibling: bool) { - self.sibling = sibling; - } - - /// Iterate over the attributes of this entry. - #[inline] - pub fn attrs(&self) -> slice::Iter { - self.attrs.iter() - } - - /// Iterate over the attributes of this entry for modification. - #[inline] - pub fn attrs_mut(&mut self) -> slice::IterMut { - self.attrs.iter_mut() - } - - /// Get an attribute. - pub fn get(&self, name: constants::DwAt) -> Option<&AttributeValue> { - self.attrs - .iter() - .find(|attr| attr.name == name) - .map(|attr| &attr.value) - } - - /// Get an attribute for modification. - pub fn get_mut(&mut self, name: constants::DwAt) -> Option<&mut AttributeValue> { - self.attrs - .iter_mut() - .find(|attr| attr.name == name) - .map(|attr| &mut attr.value) - } - - /// Set an attribute. - /// - /// Replaces any existing attribute with the same name. - /// - /// # Panics - /// - /// Panics if `name` is `DW_AT_sibling`. Use `set_sibling` instead. - pub fn set(&mut self, name: constants::DwAt, value: AttributeValue) { - assert_ne!(name, constants::DW_AT_sibling); - if let Some(attr) = self.attrs.iter_mut().find(|attr| attr.name == name) { - attr.value = value; - return; - } - self.attrs.push(Attribute { name, value }); - } - - /// Delete an attribute. - /// - /// Replaces any existing attribute with the same name. - pub fn delete(&mut self, name: constants::DwAt) { - self.attrs.retain(|x| x.name != name); - } - - /// Iterate over the children of this entry. - /// - /// Note: use `Unit::add` to add a new child to this entry. - #[inline] - pub fn children(&self) -> slice::Iter { - self.children.iter() - } - - /// Delete a child entry and all of its children. - pub fn delete_child(&mut self, id: UnitEntryId) { - self.children.retain(|&child| child != id); - } - - /// Return the type abbreviation for this DIE. - fn abbreviation(&self, encoding: Encoding) -> Result { - let mut attrs = Vec::new(); - - if self.sibling && !self.children.is_empty() { - let form = match encoding.format { - Format::Dwarf32 => constants::DW_FORM_ref4, - Format::Dwarf64 => constants::DW_FORM_ref8, - }; - attrs.push(AttributeSpecification::new(constants::DW_AT_sibling, form)); - } - - for attr in &self.attrs { - attrs.push(attr.specification(encoding)?); - } - - Ok(Abbreviation::new( - self.tag, - !self.children.is_empty(), - attrs, - )) - } - - fn calculate_offsets( - &self, - unit: &Unit, - offset: &mut usize, - offsets: &mut UnitOffsets, - abbrevs: &mut AbbreviationTable, - ) -> Result<()> { - offsets.entries[self.id.index].offset = DebugInfoOffset(*offset); - offsets.entries[self.id.index].abbrev = abbrevs.add(self.abbreviation(unit.encoding())?); - *offset += self.size(unit, offsets); - if !self.children.is_empty() { - for child in &self.children { - unit.entries[child.index].calculate_offsets(unit, offset, offsets, abbrevs)?; - } - // Null child - *offset += 1; - } - Ok(()) - } - - fn size(&self, unit: &Unit, offsets: &UnitOffsets) -> usize { - let mut size = uleb128_size(offsets.abbrev(self.id)); - if self.sibling && !self.children.is_empty() { - size += unit.format().word_size() as usize; - } - for attr in &self.attrs { - size += attr.value.size(unit, offsets); - } - size - } - - /// Write the entry to the given sections. - fn write( - &self, - w: &mut DebugInfo, - debug_info_refs: &mut Vec, - unit_refs: &mut Vec<(DebugInfoOffset, UnitEntryId)>, - unit: &Unit, - offsets: &mut UnitOffsets, - line_program: Option, - line_strings: &DebugLineStrOffsets, - strings: &DebugStrOffsets, - range_lists: &RangeListOffsets, - loc_lists: &LocationListOffsets, - ) -> Result<()> { - debug_assert_eq!(offsets.debug_info_offset(self.id), w.offset()); - w.write_uleb128(offsets.abbrev(self.id))?; - - let sibling_offset = if self.sibling && !self.children.is_empty() { - let offset = w.offset(); - w.write_udata(0, unit.format().word_size())?; - Some(offset) - } else { - None - }; - - for attr in &self.attrs { - attr.value.write( - w, - debug_info_refs, - unit_refs, - unit, - offsets, - line_program, - line_strings, - strings, - range_lists, - loc_lists, - )?; - } - - if !self.children.is_empty() { - for child in &self.children { - unit.entries[child.index].write( - w, - debug_info_refs, - unit_refs, - unit, - offsets, - line_program, - line_strings, - strings, - range_lists, - loc_lists, - )?; - } - // Null child - w.write_u8(0)?; - } - - if let Some(offset) = sibling_offset { - let next_offset = (w.offset().0 - offsets.unit.0) as u64; - // This does not need relocation. - w.write_udata_at(offset.0, next_offset, unit.format().word_size())?; - } - Ok(()) - } -} - -/// An attribute in a `DebuggingInformationEntry`, consisting of a name and -/// associated value. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Attribute { - name: constants::DwAt, - value: AttributeValue, -} - -impl Attribute { - /// Get the name of this attribute. - #[inline] - pub fn name(&self) -> constants::DwAt { - self.name - } - - /// Get the value of this attribute. - #[inline] - pub fn get(&self) -> &AttributeValue { - &self.value - } - - /// Set the value of this attribute. - #[inline] - pub fn set(&mut self, value: AttributeValue) { - self.value = value; - } - - /// Return the type specification for this attribute. - fn specification(&self, encoding: Encoding) -> Result { - Ok(AttributeSpecification::new( - self.name, - self.value.form(encoding)?, - )) - } -} - -/// The value of an attribute in a `DebuggingInformationEntry`. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum AttributeValue { - /// "Refers to some location in the address space of the described program." - Address(Address), - - /// A slice of an arbitrary number of bytes. - Block(Vec), - - /// A one byte constant data value. How to interpret the byte depends on context. - /// - /// From section 7 of the standard: "Depending on context, it may be a - /// signed integer, an unsigned integer, a floating-point constant, or - /// anything else." - Data1(u8), - - /// A two byte constant data value. How to interpret the bytes depends on context. - /// - /// This value will be converted to the target endian before writing. - /// - /// From section 7 of the standard: "Depending on context, it may be a - /// signed integer, an unsigned integer, a floating-point constant, or - /// anything else." - Data2(u16), - - /// A four byte constant data value. How to interpret the bytes depends on context. - /// - /// This value will be converted to the target endian before writing. - /// - /// From section 7 of the standard: "Depending on context, it may be a - /// signed integer, an unsigned integer, a floating-point constant, or - /// anything else." - Data4(u32), - - /// An eight byte constant data value. How to interpret the bytes depends on context. - /// - /// This value will be converted to the target endian before writing. - /// - /// From section 7 of the standard: "Depending on context, it may be a - /// signed integer, an unsigned integer, a floating-point constant, or - /// anything else." - Data8(u64), - - /// A signed integer constant. - Sdata(i64), - - /// An unsigned integer constant. - Udata(u64), - - /// "The information bytes contain a DWARF expression (see Section 2.5) or - /// location description (see Section 2.6)." - Exprloc(Expression), - - /// A boolean that indicates presence or absence of the attribute. - Flag(bool), - - /// An attribute that is always present. - FlagPresent, - - /// A reference to a `DebuggingInformationEntry` in this unit. - UnitRef(UnitEntryId), - - /// A reference to a `DebuggingInformationEntry` in a potentially different unit. - DebugInfoRef(Reference), - - /// An offset into the `.debug_info` section of the supplementary object file. - /// - /// The API does not currently assist with generating this offset. - /// This variant will be removed from the API once support for writing - /// supplementary object files is implemented. - DebugInfoRefSup(DebugInfoOffset), - - /// A reference to a line number program. - LineProgramRef, - - /// A reference to a location list. - LocationListRef(LocationListId), - - /// An offset into the `.debug_macinfo` section. - /// - /// The API does not currently assist with generating this offset. - /// This variant will be removed from the API once support for writing - /// `.debug_macinfo` sections is implemented. - DebugMacinfoRef(DebugMacinfoOffset), - - /// An offset into the `.debug_macro` section. - /// - /// The API does not currently assist with generating this offset. - /// This variant will be removed from the API once support for writing - /// `.debug_macro` sections is implemented. - DebugMacroRef(DebugMacroOffset), - - /// A reference to a range list. - RangeListRef(RangeListId), - - /// A type signature. - /// - /// The API does not currently assist with generating this signature. - /// This variant will be removed from the API once support for writing - /// `.debug_types` sections is implemented. - DebugTypesRef(DebugTypeSignature), - - /// A reference to a string in the `.debug_str` section. - StringRef(StringId), - - /// An offset into the `.debug_str` section of the supplementary object file. - /// - /// The API does not currently assist with generating this offset. - /// This variant will be removed from the API once support for writing - /// supplementary object files is implemented. - DebugStrRefSup(DebugStrOffset), - - /// A reference to a string in the `.debug_line_str` section. - LineStringRef(LineStringId), - - /// A slice of bytes representing a string. Must not include null bytes. - /// Not guaranteed to be UTF-8 or anything like that. - String(Vec), - - /// The value of a `DW_AT_encoding` attribute. - Encoding(constants::DwAte), - - /// The value of a `DW_AT_decimal_sign` attribute. - DecimalSign(constants::DwDs), - - /// The value of a `DW_AT_endianity` attribute. - Endianity(constants::DwEnd), - - /// The value of a `DW_AT_accessibility` attribute. - Accessibility(constants::DwAccess), - - /// The value of a `DW_AT_visibility` attribute. - Visibility(constants::DwVis), - - /// The value of a `DW_AT_virtuality` attribute. - Virtuality(constants::DwVirtuality), - - /// The value of a `DW_AT_language` attribute. - Language(constants::DwLang), - - /// The value of a `DW_AT_address_class` attribute. - AddressClass(constants::DwAddr), - - /// The value of a `DW_AT_identifier_case` attribute. - IdentifierCase(constants::DwId), - - /// The value of a `DW_AT_calling_convention` attribute. - CallingConvention(constants::DwCc), - - /// The value of a `DW_AT_inline` attribute. - Inline(constants::DwInl), - - /// The value of a `DW_AT_ordering` attribute. - Ordering(constants::DwOrd), - - /// An index into the filename entries from the line number information - /// table for the unit containing this value. - FileIndex(Option), -} - -impl AttributeValue { - /// Return the form that will be used to encode this value. - pub fn form(&self, encoding: Encoding) -> Result { - // TODO: missing forms: - // - DW_FORM_indirect - // - DW_FORM_implicit_const - // - FW_FORM_block1/block2/block4 - // - DW_FORM_str/strx1/strx2/strx3/strx4 - // - DW_FORM_addrx/addrx1/addrx2/addrx3/addrx4 - // - DW_FORM_data16 - // - DW_FORM_line_strp - // - DW_FORM_loclistx - // - DW_FORM_rnglistx - let form = match *self { - AttributeValue::Address(_) => constants::DW_FORM_addr, - AttributeValue::Block(_) => constants::DW_FORM_block, - AttributeValue::Data1(_) => constants::DW_FORM_data1, - AttributeValue::Data2(_) => constants::DW_FORM_data2, - AttributeValue::Data4(_) => constants::DW_FORM_data4, - AttributeValue::Data8(_) => constants::DW_FORM_data8, - AttributeValue::Exprloc(_) => constants::DW_FORM_exprloc, - AttributeValue::Flag(_) => constants::DW_FORM_flag, - AttributeValue::FlagPresent => constants::DW_FORM_flag_present, - AttributeValue::UnitRef(_) => { - // Using a fixed size format lets us write a placeholder before we know - // the value. - match encoding.format { - Format::Dwarf32 => constants::DW_FORM_ref4, - Format::Dwarf64 => constants::DW_FORM_ref8, - } - } - AttributeValue::DebugInfoRef(_) => constants::DW_FORM_ref_addr, - AttributeValue::DebugInfoRefSup(_) => { - // TODO: should this depend on the size of supplementary section? - match encoding.format { - Format::Dwarf32 => constants::DW_FORM_ref_sup4, - Format::Dwarf64 => constants::DW_FORM_ref_sup8, - } - } - AttributeValue::LineProgramRef - | AttributeValue::LocationListRef(_) - | AttributeValue::DebugMacinfoRef(_) - | AttributeValue::DebugMacroRef(_) - | AttributeValue::RangeListRef(_) => { - if encoding.version == 2 || encoding.version == 3 { - match encoding.format { - Format::Dwarf32 => constants::DW_FORM_data4, - Format::Dwarf64 => constants::DW_FORM_data8, - } - } else { - constants::DW_FORM_sec_offset - } - } - AttributeValue::DebugTypesRef(_) => constants::DW_FORM_ref_sig8, - AttributeValue::StringRef(_) => constants::DW_FORM_strp, - AttributeValue::DebugStrRefSup(_) => constants::DW_FORM_strp_sup, - AttributeValue::LineStringRef(_) => constants::DW_FORM_line_strp, - AttributeValue::String(_) => constants::DW_FORM_string, - AttributeValue::Encoding(_) - | AttributeValue::DecimalSign(_) - | AttributeValue::Endianity(_) - | AttributeValue::Accessibility(_) - | AttributeValue::Visibility(_) - | AttributeValue::Virtuality(_) - | AttributeValue::Language(_) - | AttributeValue::AddressClass(_) - | AttributeValue::IdentifierCase(_) - | AttributeValue::CallingConvention(_) - | AttributeValue::Inline(_) - | AttributeValue::Ordering(_) - | AttributeValue::FileIndex(_) - | AttributeValue::Udata(_) => constants::DW_FORM_udata, - AttributeValue::Sdata(_) => constants::DW_FORM_sdata, - }; - Ok(form) - } - - fn size(&self, unit: &Unit, offsets: &UnitOffsets) -> usize { - macro_rules! debug_assert_form { - ($form:expr) => { - debug_assert_eq!(self.form(unit.encoding()).unwrap(), $form) - }; - } - match *self { - AttributeValue::Address(_) => { - debug_assert_form!(constants::DW_FORM_addr); - unit.address_size() as usize - } - AttributeValue::Block(ref val) => { - debug_assert_form!(constants::DW_FORM_block); - uleb128_size(val.len() as u64) + val.len() - } - AttributeValue::Data1(_) => { - debug_assert_form!(constants::DW_FORM_data1); - 1 - } - AttributeValue::Data2(_) => { - debug_assert_form!(constants::DW_FORM_data2); - 2 - } - AttributeValue::Data4(_) => { - debug_assert_form!(constants::DW_FORM_data4); - 4 - } - AttributeValue::Data8(_) => { - debug_assert_form!(constants::DW_FORM_data8); - 8 - } - AttributeValue::Sdata(val) => { - debug_assert_form!(constants::DW_FORM_sdata); - sleb128_size(val) - } - AttributeValue::Udata(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val) - } - AttributeValue::Exprloc(ref val) => { - debug_assert_form!(constants::DW_FORM_exprloc); - let size = val.size(unit.encoding(), Some(offsets)); - uleb128_size(size as u64) + size - } - AttributeValue::Flag(_) => { - debug_assert_form!(constants::DW_FORM_flag); - 1 - } - AttributeValue::FlagPresent => { - debug_assert_form!(constants::DW_FORM_flag_present); - 0 - } - AttributeValue::UnitRef(_) => { - match unit.format() { - Format::Dwarf32 => debug_assert_form!(constants::DW_FORM_ref4), - Format::Dwarf64 => debug_assert_form!(constants::DW_FORM_ref8), - } - unit.format().word_size() as usize - } - AttributeValue::DebugInfoRef(_) => { - debug_assert_form!(constants::DW_FORM_ref_addr); - if unit.version() == 2 { - unit.address_size() as usize - } else { - unit.format().word_size() as usize - } - } - AttributeValue::DebugInfoRefSup(_) => { - match unit.format() { - Format::Dwarf32 => debug_assert_form!(constants::DW_FORM_ref_sup4), - Format::Dwarf64 => debug_assert_form!(constants::DW_FORM_ref_sup8), - } - unit.format().word_size() as usize - } - AttributeValue::LineProgramRef => { - if unit.version() >= 4 { - debug_assert_form!(constants::DW_FORM_sec_offset); - } - unit.format().word_size() as usize - } - AttributeValue::LocationListRef(_) => { - if unit.version() >= 4 { - debug_assert_form!(constants::DW_FORM_sec_offset); - } - unit.format().word_size() as usize - } - AttributeValue::DebugMacinfoRef(_) => { - if unit.version() >= 4 { - debug_assert_form!(constants::DW_FORM_sec_offset); - } - unit.format().word_size() as usize - } - AttributeValue::DebugMacroRef(_) => { - if unit.version() >= 4 { - debug_assert_form!(constants::DW_FORM_sec_offset); - } - unit.format().word_size() as usize - } - AttributeValue::RangeListRef(_) => { - if unit.version() >= 4 { - debug_assert_form!(constants::DW_FORM_sec_offset); - } - unit.format().word_size() as usize - } - AttributeValue::DebugTypesRef(_) => { - debug_assert_form!(constants::DW_FORM_ref_sig8); - 8 - } - AttributeValue::StringRef(_) => { - debug_assert_form!(constants::DW_FORM_strp); - unit.format().word_size() as usize - } - AttributeValue::DebugStrRefSup(_) => { - debug_assert_form!(constants::DW_FORM_strp_sup); - unit.format().word_size() as usize - } - AttributeValue::LineStringRef(_) => { - debug_assert_form!(constants::DW_FORM_line_strp); - unit.format().word_size() as usize - } - AttributeValue::String(ref val) => { - debug_assert_form!(constants::DW_FORM_string); - val.len() + 1 - } - AttributeValue::Encoding(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::DecimalSign(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::Endianity(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::Accessibility(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::Visibility(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::Virtuality(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::Language(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::AddressClass(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::IdentifierCase(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::CallingConvention(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::Inline(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::Ordering(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.0 as u64) - } - AttributeValue::FileIndex(val) => { - debug_assert_form!(constants::DW_FORM_udata); - uleb128_size(val.map(FileId::raw).unwrap_or(0)) - } - } - } - - /// Write the attribute value to the given sections. - fn write( - &self, - w: &mut DebugInfo, - debug_info_refs: &mut Vec, - unit_refs: &mut Vec<(DebugInfoOffset, UnitEntryId)>, - unit: &Unit, - offsets: &UnitOffsets, - line_program: Option, - line_strings: &DebugLineStrOffsets, - strings: &DebugStrOffsets, - range_lists: &RangeListOffsets, - loc_lists: &LocationListOffsets, - ) -> Result<()> { - macro_rules! debug_assert_form { - ($form:expr) => { - debug_assert_eq!(self.form(unit.encoding()).unwrap(), $form) - }; - } - match *self { - AttributeValue::Address(val) => { - debug_assert_form!(constants::DW_FORM_addr); - w.write_address(val, unit.address_size())?; - } - AttributeValue::Block(ref val) => { - debug_assert_form!(constants::DW_FORM_block); - w.write_uleb128(val.len() as u64)?; - w.write(val)?; - } - AttributeValue::Data1(val) => { - debug_assert_form!(constants::DW_FORM_data1); - w.write_u8(val)?; - } - AttributeValue::Data2(val) => { - debug_assert_form!(constants::DW_FORM_data2); - w.write_u16(val)?; - } - AttributeValue::Data4(val) => { - debug_assert_form!(constants::DW_FORM_data4); - w.write_u32(val)?; - } - AttributeValue::Data8(val) => { - debug_assert_form!(constants::DW_FORM_data8); - w.write_u64(val)?; - } - AttributeValue::Sdata(val) => { - debug_assert_form!(constants::DW_FORM_sdata); - w.write_sleb128(val)?; - } - AttributeValue::Udata(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(val)?; - } - AttributeValue::Exprloc(ref val) => { - debug_assert_form!(constants::DW_FORM_exprloc); - w.write_uleb128(val.size(unit.encoding(), Some(offsets)) as u64)?; - val.write( - &mut w.0, - Some(debug_info_refs), - unit.encoding(), - Some(offsets), - )?; - } - AttributeValue::Flag(val) => { - debug_assert_form!(constants::DW_FORM_flag); - w.write_u8(val as u8)?; - } - AttributeValue::FlagPresent => { - debug_assert_form!(constants::DW_FORM_flag_present); - } - AttributeValue::UnitRef(id) => { - match unit.format() { - Format::Dwarf32 => debug_assert_form!(constants::DW_FORM_ref4), - Format::Dwarf64 => debug_assert_form!(constants::DW_FORM_ref8), - } - unit_refs.push((w.offset(), id)); - w.write_udata(0, unit.format().word_size())?; - } - AttributeValue::DebugInfoRef(reference) => { - debug_assert_form!(constants::DW_FORM_ref_addr); - let size = if unit.version() == 2 { - unit.address_size() - } else { - unit.format().word_size() - }; - match reference { - Reference::Symbol(symbol) => w.write_reference(symbol, size)?, - Reference::Entry(unit, entry) => { - debug_info_refs.push(DebugInfoReference { - offset: w.len(), - unit, - entry, - size, - }); - w.write_udata(0, size)?; - } - } - } - AttributeValue::DebugInfoRefSup(val) => { - match unit.format() { - Format::Dwarf32 => debug_assert_form!(constants::DW_FORM_ref_sup4), - Format::Dwarf64 => debug_assert_form!(constants::DW_FORM_ref_sup8), - } - w.write_udata(val.0 as u64, unit.format().word_size())?; - } - AttributeValue::LineProgramRef => { - if unit.version() >= 4 { - debug_assert_form!(constants::DW_FORM_sec_offset); - } - match line_program { - Some(line_program) => { - w.write_offset( - line_program.0, - SectionId::DebugLine, - unit.format().word_size(), - )?; - } - None => return Err(Error::InvalidAttributeValue), - } - } - AttributeValue::LocationListRef(val) => { - if unit.version() >= 4 { - debug_assert_form!(constants::DW_FORM_sec_offset); - } - let section = if unit.version() <= 4 { - SectionId::DebugLoc - } else { - SectionId::DebugLocLists - }; - w.write_offset(loc_lists.get(val).0, section, unit.format().word_size())?; - } - AttributeValue::DebugMacinfoRef(val) => { - if unit.version() >= 4 { - debug_assert_form!(constants::DW_FORM_sec_offset); - } - w.write_offset(val.0, SectionId::DebugMacinfo, unit.format().word_size())?; - } - AttributeValue::DebugMacroRef(val) => { - if unit.version() >= 4 { - debug_assert_form!(constants::DW_FORM_sec_offset); - } - w.write_offset(val.0, SectionId::DebugMacro, unit.format().word_size())?; - } - AttributeValue::RangeListRef(val) => { - if unit.version() >= 4 { - debug_assert_form!(constants::DW_FORM_sec_offset); - } - let section = if unit.version() <= 4 { - SectionId::DebugRanges - } else { - SectionId::DebugRngLists - }; - w.write_offset(range_lists.get(val).0, section, unit.format().word_size())?; - } - AttributeValue::DebugTypesRef(val) => { - debug_assert_form!(constants::DW_FORM_ref_sig8); - w.write_u64(val.0)?; - } - AttributeValue::StringRef(val) => { - debug_assert_form!(constants::DW_FORM_strp); - w.write_offset( - strings.get(val).0, - SectionId::DebugStr, - unit.format().word_size(), - )?; - } - AttributeValue::DebugStrRefSup(val) => { - debug_assert_form!(constants::DW_FORM_strp_sup); - w.write_udata(val.0 as u64, unit.format().word_size())?; - } - AttributeValue::LineStringRef(val) => { - debug_assert_form!(constants::DW_FORM_line_strp); - w.write_offset( - line_strings.get(val).0, - SectionId::DebugLineStr, - unit.format().word_size(), - )?; - } - AttributeValue::String(ref val) => { - debug_assert_form!(constants::DW_FORM_string); - w.write(val)?; - w.write_u8(0)?; - } - AttributeValue::Encoding(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(u64::from(val.0))?; - } - AttributeValue::DecimalSign(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(u64::from(val.0))?; - } - AttributeValue::Endianity(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(u64::from(val.0))?; - } - AttributeValue::Accessibility(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(u64::from(val.0))?; - } - AttributeValue::Visibility(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(u64::from(val.0))?; - } - AttributeValue::Virtuality(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(u64::from(val.0))?; - } - AttributeValue::Language(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(u64::from(val.0))?; - } - AttributeValue::AddressClass(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(val.0)?; - } - AttributeValue::IdentifierCase(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(u64::from(val.0))?; - } - AttributeValue::CallingConvention(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(u64::from(val.0))?; - } - AttributeValue::Inline(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(u64::from(val.0))?; - } - AttributeValue::Ordering(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(u64::from(val.0))?; - } - AttributeValue::FileIndex(val) => { - debug_assert_form!(constants::DW_FORM_udata); - w.write_uleb128(val.map(FileId::raw).unwrap_or(0))?; - } - } - Ok(()) - } -} - -define_section!( - DebugInfo, - DebugInfoOffset, - "A writable `.debug_info` section." -); - -/// The section offsets of all elements within a `.debug_info` section. -#[derive(Debug, Default)] -pub struct DebugInfoOffsets { - base_id: BaseId, - units: Vec, -} - -impl DebugInfoOffsets { - #[cfg(test)] - #[cfg(feature = "read")] - pub(crate) fn unit_offsets(&self, unit: UnitId) -> &UnitOffsets { - debug_assert_eq!(self.base_id, unit.base_id); - &self.units[unit.index] - } - - /// Get the `.debug_info` section offset for the given unit. - #[inline] - pub fn unit(&self, unit: UnitId) -> DebugInfoOffset { - debug_assert_eq!(self.base_id, unit.base_id); - self.units[unit.index].unit - } - - /// Get the `.debug_info` section offset for the given entry. - #[inline] - pub fn entry(&self, unit: UnitId, entry: UnitEntryId) -> DebugInfoOffset { - debug_assert_eq!(self.base_id, unit.base_id); - self.units[unit.index].debug_info_offset(entry) - } -} - -/// The section offsets of all elements of a unit within a `.debug_info` section. -#[derive(Debug)] -pub(crate) struct UnitOffsets { - base_id: BaseId, - unit: DebugInfoOffset, - entries: Vec, -} - -impl UnitOffsets { - #[cfg(test)] - #[cfg(feature = "read")] - fn none() -> Self { - UnitOffsets { - base_id: BaseId::default(), - unit: DebugInfoOffset(0), - entries: Vec::new(), - } - } - - /// Get the .debug_info offset for the given entry. - #[inline] - pub(crate) fn debug_info_offset(&self, entry: UnitEntryId) -> DebugInfoOffset { - debug_assert_eq!(self.base_id, entry.base_id); - let offset = self.entries[entry.index].offset; - debug_assert_ne!(offset.0, 0); - offset - } - - /// Get the unit offset for the given entry. - #[inline] - pub(crate) fn unit_offset(&self, entry: UnitEntryId) -> u64 { - let offset = self.debug_info_offset(entry); - (offset.0 - self.unit.0) as u64 - } - - /// Get the abbreviation code for the given entry. - #[inline] - pub(crate) fn abbrev(&self, entry: UnitEntryId) -> u64 { - debug_assert_eq!(self.base_id, entry.base_id); - self.entries[entry.index].abbrev - } -} - -#[derive(Debug, Clone, Copy)] -pub(crate) struct EntryOffset { - offset: DebugInfoOffset, - abbrev: u64, -} - -impl EntryOffset { - fn none() -> Self { - EntryOffset { - offset: DebugInfoOffset(0), - abbrev: 0, - } - } -} - -/// A reference to a `.debug_info` entry that has yet to be resolved. -#[derive(Debug, Clone, Copy)] -pub(crate) struct DebugInfoReference { - /// The offset within the section of the reference. - pub offset: usize, - /// The size of the reference. - pub size: u8, - /// The unit containing the entry. - pub unit: UnitId, - /// The entry being referenced. - pub entry: UnitEntryId, -} - -#[cfg(feature = "read")] -pub(crate) mod convert { - use super::*; - use crate::common::{DwoId, UnitSectionOffset}; - use crate::read::{self, Reader}; - use crate::write::{self, ConvertError, ConvertResult, LocationList, RangeList}; - use std::collections::HashMap; - - pub(crate) struct ConvertUnit> { - from_unit: read::Unit, - base_id: BaseId, - encoding: Encoding, - entries: Vec, - entry_offsets: Vec, - root: UnitEntryId, - } - - pub(crate) struct ConvertUnitContext<'a, R: Reader> { - pub dwarf: &'a read::Dwarf, - pub unit: &'a read::Unit, - pub line_strings: &'a mut write::LineStringTable, - pub strings: &'a mut write::StringTable, - pub ranges: &'a mut write::RangeListTable, - pub locations: &'a mut write::LocationListTable, - pub convert_address: &'a dyn Fn(u64) -> Option
, - pub base_address: Address, - pub line_program_offset: Option, - pub line_program_files: Vec, - pub entry_ids: &'a HashMap, - } - - impl UnitTable { - /// Create a unit table by reading the data in the given sections. - /// - /// This also updates the given tables with the values that are referenced from - /// attributes in this section. - /// - /// `convert_address` is a function to convert read addresses into the `Address` - /// type. For non-relocatable addresses, this function may simply return - /// `Address::Constant(address)`. For relocatable addresses, it is the caller's - /// responsibility to determine the symbol and addend corresponding to the address - /// and return `Address::Symbol { symbol, addend }`. - pub fn from>( - dwarf: &read::Dwarf, - line_strings: &mut write::LineStringTable, - strings: &mut write::StringTable, - convert_address: &dyn Fn(u64) -> Option
, - ) -> ConvertResult { - let base_id = BaseId::default(); - let mut unit_entries = Vec::new(); - let mut entry_ids = HashMap::new(); - - let mut from_units = dwarf.units(); - while let Some(from_unit) = from_units.next()? { - let unit_id = UnitId::new(base_id, unit_entries.len()); - unit_entries.push(Unit::convert_entries( - from_unit, - unit_id, - &mut entry_ids, - dwarf, - )?); - } - - // Attributes must be converted in a separate pass so that we can handle - // references to other compilation units. - let mut units = Vec::new(); - for unit_entries in unit_entries.drain(..) { - units.push(Unit::convert_attributes( - unit_entries, - &entry_ids, - dwarf, - line_strings, - strings, - convert_address, - )?); - } - - Ok(UnitTable { base_id, units }) - } - } - - impl Unit { - /// Create a unit by reading the data in the input sections. - /// - /// Does not add entry attributes. - pub(crate) fn convert_entries>( - from_header: read::UnitHeader, - unit_id: UnitId, - entry_ids: &mut HashMap, - dwarf: &read::Dwarf, - ) -> ConvertResult> { - match from_header.type_() { - read::UnitType::Compilation => (), - _ => return Err(ConvertError::UnsupportedUnitType), - } - let base_id = BaseId::default(); - - let from_unit = dwarf.unit(from_header)?; - let encoding = from_unit.encoding(); - - let mut entries = Vec::new(); - let mut entry_offsets = Vec::new(); - - let mut from_tree = from_unit.entries_tree(None)?; - let from_root = from_tree.root()?; - let root = DebuggingInformationEntry::convert_entry( - from_root, - &from_unit, - base_id, - &mut entries, - &mut entry_offsets, - entry_ids, - None, - unit_id, - )?; - - Ok(ConvertUnit { - from_unit, - base_id, - encoding, - entries, - entry_offsets, - root, - }) - } - - /// Create entry attributes by reading the data in the input sections. - fn convert_attributes>( - unit: ConvertUnit, - entry_ids: &HashMap, - dwarf: &read::Dwarf, - line_strings: &mut write::LineStringTable, - strings: &mut write::StringTable, - convert_address: &dyn Fn(u64) -> Option
, - ) -> ConvertResult { - let from_unit = unit.from_unit; - let base_address = - convert_address(from_unit.low_pc).ok_or(ConvertError::InvalidAddress)?; - - let (line_program_offset, line_program, line_program_files) = - match from_unit.line_program { - Some(ref from_program) => { - let from_program = from_program.clone(); - let line_program_offset = from_program.header().offset(); - let (line_program, line_program_files) = LineProgram::from( - from_program, - dwarf, - line_strings, - strings, - convert_address, - )?; - (Some(line_program_offset), line_program, line_program_files) - } - None => (None, LineProgram::none(), Vec::new()), - }; - - let mut ranges = RangeListTable::default(); - let mut locations = LocationListTable::default(); - - let mut context = ConvertUnitContext { - entry_ids, - dwarf, - unit: &from_unit, - line_strings, - strings, - ranges: &mut ranges, - locations: &mut locations, - convert_address, - base_address, - line_program_offset, - line_program_files, - }; - - let mut entries = unit.entries; - for entry in &mut entries { - entry.convert_attributes(&mut context, &unit.entry_offsets)?; - } - - Ok(Unit { - base_id: unit.base_id, - encoding: unit.encoding, - line_program, - ranges, - locations, - entries, - root: unit.root, - }) - } - } - - impl DebuggingInformationEntry { - /// Create an entry by reading the data in the input sections. - /// - /// Does not add the entry attributes. - fn convert_entry>( - from: read::EntriesTreeNode, - from_unit: &read::Unit, - base_id: BaseId, - entries: &mut Vec, - entry_offsets: &mut Vec, - entry_ids: &mut HashMap, - parent: Option, - unit_id: UnitId, - ) -> ConvertResult { - let from_entry = from.entry(); - let id = DebuggingInformationEntry::new(base_id, entries, parent, from_entry.tag()); - let offset = from_entry.offset(); - entry_offsets.push(offset); - entry_ids.insert(offset.to_unit_section_offset(from_unit), (unit_id, id)); - - let mut from_children = from.children(); - while let Some(from_child) = from_children.next()? { - DebuggingInformationEntry::convert_entry( - from_child, - from_unit, - base_id, - entries, - entry_offsets, - entry_ids, - Some(id), - unit_id, - )?; - } - Ok(id) - } - - /// Create an entry's attributes by reading the data in the input sections. - fn convert_attributes>( - &mut self, - context: &mut ConvertUnitContext, - entry_offsets: &[read::UnitOffset], - ) -> ConvertResult<()> { - let offset = entry_offsets[self.id.index]; - let from = context.unit.entry(offset)?; - let mut from_attrs = from.attrs(); - while let Some(from_attr) = from_attrs.next()? { - if from_attr.name() == constants::DW_AT_sibling { - // This may point to a null entry, so we have to treat it differently. - self.set_sibling(true); - } else if let Some(attr) = Attribute::from(context, &from_attr)? { - self.set(attr.name, attr.value); - } - } - Ok(()) - } - } - - impl Attribute { - /// Create an attribute by reading the data in the given sections. - pub(crate) fn from>( - context: &mut ConvertUnitContext, - from: &read::Attribute, - ) -> ConvertResult> { - let value = AttributeValue::from(context, from.value())?; - Ok(value.map(|value| Attribute { - name: from.name(), - value, - })) - } - } - - impl AttributeValue { - /// Create an attribute value by reading the data in the given sections. - pub(crate) fn from>( - context: &mut ConvertUnitContext, - from: read::AttributeValue, - ) -> ConvertResult> { - let to = match from { - read::AttributeValue::Addr(val) => match (context.convert_address)(val) { - Some(val) => AttributeValue::Address(val), - None => return Err(ConvertError::InvalidAddress), - }, - read::AttributeValue::Block(r) => AttributeValue::Block(r.to_slice()?.into()), - read::AttributeValue::Data1(val) => AttributeValue::Data1(val), - read::AttributeValue::Data2(val) => AttributeValue::Data2(val), - read::AttributeValue::Data4(val) => AttributeValue::Data4(val), - read::AttributeValue::Data8(val) => AttributeValue::Data8(val), - read::AttributeValue::Sdata(val) => AttributeValue::Sdata(val), - read::AttributeValue::Udata(val) => AttributeValue::Udata(val), - read::AttributeValue::Exprloc(expression) => { - let expression = Expression::from( - expression, - context.unit.encoding(), - Some(context.dwarf), - Some(context.unit), - Some(context.entry_ids), - context.convert_address, - )?; - AttributeValue::Exprloc(expression) - } - // TODO: it would be nice to preserve the flag form. - read::AttributeValue::Flag(val) => AttributeValue::Flag(val), - read::AttributeValue::DebugAddrBase(_base) => { - // We convert all address indices to addresses, - // so this is unneeded. - return Ok(None); - } - read::AttributeValue::DebugAddrIndex(index) => { - let val = context.dwarf.address(context.unit, index)?; - match (context.convert_address)(val) { - Some(val) => AttributeValue::Address(val), - None => return Err(ConvertError::InvalidAddress), - } - } - read::AttributeValue::UnitRef(val) => { - if !context.unit.header.is_valid_offset(val) { - return Err(ConvertError::InvalidUnitRef); - } - let id = context - .entry_ids - .get(&val.to_unit_section_offset(context.unit)) - .ok_or(ConvertError::InvalidUnitRef)?; - AttributeValue::UnitRef(id.1) - } - read::AttributeValue::DebugInfoRef(val) => { - // TODO: support relocation of this value - let id = context - .entry_ids - .get(&UnitSectionOffset::DebugInfoOffset(val)) - .ok_or(ConvertError::InvalidDebugInfoRef)?; - AttributeValue::DebugInfoRef(Reference::Entry(id.0, id.1)) - } - read::AttributeValue::DebugInfoRefSup(val) => AttributeValue::DebugInfoRefSup(val), - read::AttributeValue::DebugLineRef(val) => { - // There should only be the line program in the CU DIE which we've already - // converted, so check if it matches that. - if Some(val) == context.line_program_offset { - AttributeValue::LineProgramRef - } else { - return Err(ConvertError::InvalidLineRef); - } - } - read::AttributeValue::DebugMacinfoRef(val) => AttributeValue::DebugMacinfoRef(val), - read::AttributeValue::DebugMacroRef(val) => AttributeValue::DebugMacroRef(val), - read::AttributeValue::LocationListsRef(val) => { - let iter = context - .dwarf - .locations - .raw_locations(val, context.unit.encoding())?; - let loc_list = LocationList::from(iter, context)?; - let loc_id = context.locations.add(loc_list); - AttributeValue::LocationListRef(loc_id) - } - read::AttributeValue::DebugLocListsBase(_base) => { - // We convert all location list indices to offsets, - // so this is unneeded. - return Ok(None); - } - read::AttributeValue::DebugLocListsIndex(index) => { - let offset = context.dwarf.locations_offset(context.unit, index)?; - let iter = context - .dwarf - .locations - .raw_locations(offset, context.unit.encoding())?; - let loc_list = LocationList::from(iter, context)?; - let loc_id = context.locations.add(loc_list); - AttributeValue::LocationListRef(loc_id) - } - read::AttributeValue::RangeListsRef(offset) => { - let offset = context.dwarf.ranges_offset_from_raw(context.unit, offset); - let iter = context.dwarf.raw_ranges(context.unit, offset)?; - let range_list = RangeList::from(iter, context)?; - let range_id = context.ranges.add(range_list); - AttributeValue::RangeListRef(range_id) - } - read::AttributeValue::DebugRngListsBase(_base) => { - // We convert all range list indices to offsets, - // so this is unneeded. - return Ok(None); - } - read::AttributeValue::DebugRngListsIndex(index) => { - let offset = context.dwarf.ranges_offset(context.unit, index)?; - let iter = context - .dwarf - .ranges - .raw_ranges(offset, context.unit.encoding())?; - let range_list = RangeList::from(iter, context)?; - let range_id = context.ranges.add(range_list); - AttributeValue::RangeListRef(range_id) - } - read::AttributeValue::DebugTypesRef(val) => AttributeValue::DebugTypesRef(val), - read::AttributeValue::DebugStrRef(offset) => { - let r = context.dwarf.string(offset)?; - let id = context.strings.add(r.to_slice()?); - AttributeValue::StringRef(id) - } - read::AttributeValue::DebugStrRefSup(val) => AttributeValue::DebugStrRefSup(val), - read::AttributeValue::DebugStrOffsetsBase(_base) => { - // We convert all string offsets to `.debug_str` references, - // so this is unneeded. - return Ok(None); - } - read::AttributeValue::DebugStrOffsetsIndex(index) => { - let offset = context.dwarf.string_offset(context.unit, index)?; - let r = context.dwarf.string(offset)?; - let id = context.strings.add(r.to_slice()?); - AttributeValue::StringRef(id) - } - read::AttributeValue::DebugLineStrRef(offset) => { - let r = context.dwarf.line_string(offset)?; - let id = context.line_strings.add(r.to_slice()?); - AttributeValue::LineStringRef(id) - } - read::AttributeValue::String(r) => AttributeValue::String(r.to_slice()?.into()), - read::AttributeValue::Encoding(val) => AttributeValue::Encoding(val), - read::AttributeValue::DecimalSign(val) => AttributeValue::DecimalSign(val), - read::AttributeValue::Endianity(val) => AttributeValue::Endianity(val), - read::AttributeValue::Accessibility(val) => AttributeValue::Accessibility(val), - read::AttributeValue::Visibility(val) => AttributeValue::Visibility(val), - read::AttributeValue::Virtuality(val) => AttributeValue::Virtuality(val), - read::AttributeValue::Language(val) => AttributeValue::Language(val), - read::AttributeValue::AddressClass(val) => AttributeValue::AddressClass(val), - read::AttributeValue::IdentifierCase(val) => AttributeValue::IdentifierCase(val), - read::AttributeValue::CallingConvention(val) => { - AttributeValue::CallingConvention(val) - } - read::AttributeValue::Inline(val) => AttributeValue::Inline(val), - read::AttributeValue::Ordering(val) => AttributeValue::Ordering(val), - read::AttributeValue::FileIndex(val) => { - if val == 0 { - // 0 means not specified, even for version 5. - AttributeValue::FileIndex(None) - } else { - match context.line_program_files.get(val as usize) { - Some(id) => AttributeValue::FileIndex(Some(*id)), - None => return Err(ConvertError::InvalidFileIndex), - } - } - } - // Should always be a more specific section reference. - read::AttributeValue::SecOffset(_) => { - return Err(ConvertError::InvalidAttributeValue); - } - read::AttributeValue::DwoId(DwoId(val)) => AttributeValue::Udata(val), - }; - Ok(Some(to)) - } - } -} - -#[cfg(test)] -#[cfg(feature = "read")] -mod tests { - use super::*; - use crate::common::{ - DebugAddrBase, DebugLocListsBase, DebugRngListsBase, DebugStrOffsetsBase, LineEncoding, - }; - use crate::constants; - use crate::read; - use crate::write::{ - DebugLine, DebugLineStr, DebugStr, DwarfUnit, EndianVec, LineString, LineStringTable, - Location, LocationList, LocationListTable, Range, RangeList, RangeListOffsets, - RangeListTable, StringTable, - }; - use crate::LittleEndian; - use std::collections::HashMap; - use std::mem; - use std::sync::Arc; - - #[test] - fn test_unit_table() { - let mut strings = StringTable::default(); - - let mut units = UnitTable::default(); - let unit_id1 = units.add(Unit::new( - Encoding { - version: 4, - address_size: 8, - format: Format::Dwarf32, - }, - LineProgram::none(), - )); - let unit2 = units.add(Unit::new( - Encoding { - version: 2, - address_size: 4, - format: Format::Dwarf64, - }, - LineProgram::none(), - )); - let unit3 = units.add(Unit::new( - Encoding { - version: 5, - address_size: 4, - format: Format::Dwarf32, - }, - LineProgram::none(), - )); - assert_eq!(units.count(), 3); - { - let unit1 = units.get_mut(unit_id1); - assert_eq!(unit1.version(), 4); - assert_eq!(unit1.address_size(), 8); - assert_eq!(unit1.format(), Format::Dwarf32); - assert_eq!(unit1.count(), 1); - - let root_id = unit1.root(); - assert_eq!(root_id, UnitEntryId::new(unit1.base_id, 0)); - { - let root = unit1.get_mut(root_id); - assert_eq!(root.id(), root_id); - assert!(root.parent().is_none()); - assert_eq!(root.tag(), constants::DW_TAG_compile_unit); - - // Test get/get_mut - assert!(root.get(constants::DW_AT_producer).is_none()); - assert!(root.get_mut(constants::DW_AT_producer).is_none()); - let mut producer = AttributeValue::String(b"root"[..].into()); - root.set(constants::DW_AT_producer, producer.clone()); - assert_eq!(root.get(constants::DW_AT_producer), Some(&producer)); - assert_eq!(root.get_mut(constants::DW_AT_producer), Some(&mut producer)); - - // Test attrs - let mut attrs = root.attrs(); - let attr = attrs.next().unwrap(); - assert_eq!(attr.name(), constants::DW_AT_producer); - assert_eq!(attr.get(), &producer); - assert!(attrs.next().is_none()); - } - - let child1 = unit1.add(root_id, constants::DW_TAG_subprogram); - assert_eq!(child1, UnitEntryId::new(unit1.base_id, 1)); - { - let child1 = unit1.get_mut(child1); - assert_eq!(child1.parent(), Some(root_id)); - - let tmp = AttributeValue::String(b"tmp"[..].into()); - child1.set(constants::DW_AT_name, tmp.clone()); - assert_eq!(child1.get(constants::DW_AT_name), Some(&tmp)); - - // Test attrs_mut - let name = AttributeValue::StringRef(strings.add(&b"child1"[..])); - { - let attr = child1.attrs_mut().next().unwrap(); - assert_eq!(attr.name(), constants::DW_AT_name); - attr.set(name.clone()); - } - assert_eq!(child1.get(constants::DW_AT_name), Some(&name)); - } - - let child2 = unit1.add(root_id, constants::DW_TAG_subprogram); - assert_eq!(child2, UnitEntryId::new(unit1.base_id, 2)); - { - let child2 = unit1.get_mut(child2); - assert_eq!(child2.parent(), Some(root_id)); - - let tmp = AttributeValue::String(b"tmp"[..].into()); - child2.set(constants::DW_AT_name, tmp.clone()); - assert_eq!(child2.get(constants::DW_AT_name), Some(&tmp)); - - // Test replace - let name = AttributeValue::StringRef(strings.add(&b"child2"[..])); - child2.set(constants::DW_AT_name, name.clone()); - assert_eq!(child2.get(constants::DW_AT_name), Some(&name)); - } - - { - let root = unit1.get(root_id); - assert_eq!( - root.children().cloned().collect::>(), - vec![child1, child2] - ); - } - } - { - let unit2 = units.get(unit2); - assert_eq!(unit2.version(), 2); - assert_eq!(unit2.address_size(), 4); - assert_eq!(unit2.format(), Format::Dwarf64); - assert_eq!(unit2.count(), 1); - - let root = unit2.root(); - assert_eq!(root, UnitEntryId::new(unit2.base_id, 0)); - let root = unit2.get(root); - assert_eq!(root.id(), UnitEntryId::new(unit2.base_id, 0)); - assert!(root.parent().is_none()); - assert_eq!(root.tag(), constants::DW_TAG_compile_unit); - } - - let mut sections = Sections::new(EndianVec::new(LittleEndian)); - let debug_line_str_offsets = DebugLineStrOffsets::none(); - let debug_str_offsets = strings.write(&mut sections.debug_str).unwrap(); - units - .write(&mut sections, &debug_line_str_offsets, &debug_str_offsets) - .unwrap(); - - println!("{:?}", sections.debug_str); - println!("{:?}", sections.debug_info); - println!("{:?}", sections.debug_abbrev); - - let dwarf = read::Dwarf { - debug_abbrev: read::DebugAbbrev::new(sections.debug_abbrev.slice(), LittleEndian), - debug_info: read::DebugInfo::new(sections.debug_info.slice(), LittleEndian), - debug_str: read::DebugStr::new(sections.debug_str.slice(), LittleEndian), - ..Default::default() - }; - let mut read_units = dwarf.units(); - - { - let read_unit1 = read_units.next().unwrap().unwrap(); - let unit1 = units.get(unit_id1); - assert_eq!(unit1.version(), read_unit1.version()); - assert_eq!(unit1.address_size(), read_unit1.address_size()); - assert_eq!(unit1.format(), read_unit1.format()); - - let read_unit1 = dwarf.unit(read_unit1).unwrap(); - let mut read_entries = read_unit1.entries(); - - let root = unit1.get(unit1.root()); - { - let (depth, read_root) = read_entries.next_dfs().unwrap().unwrap(); - assert_eq!(depth, 0); - assert_eq!(root.tag(), read_root.tag()); - assert!(read_root.has_children()); - - let producer = match root.get(constants::DW_AT_producer).unwrap() { - AttributeValue::String(ref producer) => &**producer, - otherwise => panic!("unexpected {:?}", otherwise), - }; - assert_eq!(producer, b"root"); - let read_producer = read_root - .attr_value(constants::DW_AT_producer) - .unwrap() - .unwrap(); - assert_eq!( - dwarf - .attr_string(&read_unit1, read_producer) - .unwrap() - .slice(), - producer - ); - } - - let mut children = root.children().cloned(); - - { - let child = children.next().unwrap(); - assert_eq!(child, UnitEntryId::new(unit1.base_id, 1)); - let child = unit1.get(child); - let (depth, read_child) = read_entries.next_dfs().unwrap().unwrap(); - assert_eq!(depth, 1); - assert_eq!(child.tag(), read_child.tag()); - assert!(!read_child.has_children()); - - let name = match child.get(constants::DW_AT_name).unwrap() { - AttributeValue::StringRef(name) => *name, - otherwise => panic!("unexpected {:?}", otherwise), - }; - let name = strings.get(name); - assert_eq!(name, b"child1"); - let read_name = read_child - .attr_value(constants::DW_AT_name) - .unwrap() - .unwrap(); - assert_eq!( - dwarf.attr_string(&read_unit1, read_name).unwrap().slice(), - name - ); - } - - { - let child = children.next().unwrap(); - assert_eq!(child, UnitEntryId::new(unit1.base_id, 2)); - let child = unit1.get(child); - let (depth, read_child) = read_entries.next_dfs().unwrap().unwrap(); - assert_eq!(depth, 0); - assert_eq!(child.tag(), read_child.tag()); - assert!(!read_child.has_children()); - - let name = match child.get(constants::DW_AT_name).unwrap() { - AttributeValue::StringRef(name) => *name, - otherwise => panic!("unexpected {:?}", otherwise), - }; - let name = strings.get(name); - assert_eq!(name, b"child2"); - let read_name = read_child - .attr_value(constants::DW_AT_name) - .unwrap() - .unwrap(); - assert_eq!( - dwarf.attr_string(&read_unit1, read_name).unwrap().slice(), - name - ); - } - - assert!(read_entries.next_dfs().unwrap().is_none()); - } - - { - let read_unit2 = read_units.next().unwrap().unwrap(); - let unit2 = units.get(unit2); - assert_eq!(unit2.version(), read_unit2.version()); - assert_eq!(unit2.address_size(), read_unit2.address_size()); - assert_eq!(unit2.format(), read_unit2.format()); - - let abbrevs = dwarf.abbreviations(&read_unit2).unwrap(); - let mut read_entries = read_unit2.entries(&abbrevs); - - { - let root = unit2.get(unit2.root()); - let (depth, read_root) = read_entries.next_dfs().unwrap().unwrap(); - assert_eq!(depth, 0); - assert_eq!(root.tag(), read_root.tag()); - assert!(!read_root.has_children()); - } - - assert!(read_entries.next_dfs().unwrap().is_none()); - } - - { - let read_unit3 = read_units.next().unwrap().unwrap(); - let unit3 = units.get(unit3); - assert_eq!(unit3.version(), read_unit3.version()); - assert_eq!(unit3.address_size(), read_unit3.address_size()); - assert_eq!(unit3.format(), read_unit3.format()); - - let abbrevs = dwarf.abbreviations(&read_unit3).unwrap(); - let mut read_entries = read_unit3.entries(&abbrevs); - - { - let root = unit3.get(unit3.root()); - let (depth, read_root) = read_entries.next_dfs().unwrap().unwrap(); - assert_eq!(depth, 0); - assert_eq!(root.tag(), read_root.tag()); - assert!(!read_root.has_children()); - } - - assert!(read_entries.next_dfs().unwrap().is_none()); - } - - assert!(read_units.next().unwrap().is_none()); - - let mut convert_line_strings = LineStringTable::default(); - let mut convert_strings = StringTable::default(); - let convert_units = UnitTable::from( - &dwarf, - &mut convert_line_strings, - &mut convert_strings, - &|address| Some(Address::Constant(address)), - ) - .unwrap(); - assert_eq!(convert_units.count(), units.count()); - - for i in 0..convert_units.count() { - let unit_id = units.id(i); - let unit = units.get(unit_id); - let convert_unit_id = convert_units.id(i); - let convert_unit = convert_units.get(convert_unit_id); - assert_eq!(convert_unit.version(), unit.version()); - assert_eq!(convert_unit.address_size(), unit.address_size()); - assert_eq!(convert_unit.format(), unit.format()); - assert_eq!(convert_unit.count(), unit.count()); - - let root = unit.get(unit.root()); - let convert_root = convert_unit.get(convert_unit.root()); - assert_eq!(convert_root.tag(), root.tag()); - for (convert_attr, attr) in convert_root.attrs().zip(root.attrs()) { - assert_eq!(convert_attr, attr); - } - } - } - - #[test] - fn test_attribute_value() { - // Create a string table and a string with a non-zero id/offset. - let mut strings = StringTable::default(); - strings.add("string one"); - let string_id = strings.add("string two"); - let mut debug_str = DebugStr::from(EndianVec::new(LittleEndian)); - let debug_str_offsets = strings.write(&mut debug_str).unwrap(); - let read_debug_str = read::DebugStr::new(debug_str.slice(), LittleEndian); - - let mut line_strings = LineStringTable::default(); - line_strings.add("line string one"); - let line_string_id = line_strings.add("line string two"); - let mut debug_line_str = DebugLineStr::from(EndianVec::new(LittleEndian)); - let debug_line_str_offsets = line_strings.write(&mut debug_line_str).unwrap(); - let read_debug_line_str = - read::DebugLineStr::from(read::EndianSlice::new(debug_line_str.slice(), LittleEndian)); - - let data = vec![1, 2, 3, 4]; - let read_data = read::EndianSlice::new(&[1, 2, 3, 4], LittleEndian); - - let mut expression = Expression::new(); - expression.op_constu(57); - let read_expression = read::Expression(read::EndianSlice::new( - &[constants::DW_OP_constu.0, 57], - LittleEndian, - )); - - let mut ranges = RangeListTable::default(); - let range_id = ranges.add(RangeList(vec![Range::StartEnd { - begin: Address::Constant(0x1234), - end: Address::Constant(0x2345), - }])); - - let mut locations = LocationListTable::default(); - let loc_id = locations.add(LocationList(vec![Location::StartEnd { - begin: Address::Constant(0x1234), - end: Address::Constant(0x2345), - data: expression.clone(), - }])); - - for &version in &[2, 3, 4, 5] { - for &address_size in &[4, 8] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - - let mut sections = Sections::new(EndianVec::new(LittleEndian)); - let range_list_offsets = ranges.write(&mut sections, encoding).unwrap(); - let loc_list_offsets = locations.write(&mut sections, encoding, None).unwrap(); - - let read_debug_ranges = - read::DebugRanges::new(sections.debug_ranges.slice(), LittleEndian); - let read_debug_rnglists = - read::DebugRngLists::new(sections.debug_rnglists.slice(), LittleEndian); - - let read_debug_loc = - read::DebugLoc::new(sections.debug_loc.slice(), LittleEndian); - let read_debug_loclists = - read::DebugLocLists::new(sections.debug_loclists.slice(), LittleEndian); - - let mut units = UnitTable::default(); - let unit = units.add(Unit::new(encoding, LineProgram::none())); - let unit = units.get(unit); - let encoding = Encoding { - format, - version, - address_size, - }; - let from_unit = read::UnitHeader::new( - encoding, - 0, - read::UnitType::Compilation, - DebugAbbrevOffset(0), - DebugInfoOffset(0).into(), - read::EndianSlice::new(&[], LittleEndian), - ); - - for &(ref name, ref value, ref expect_value) in &[ - ( - constants::DW_AT_name, - AttributeValue::Address(Address::Constant(0x1234)), - read::AttributeValue::Addr(0x1234), - ), - ( - constants::DW_AT_name, - AttributeValue::Block(data.clone()), - read::AttributeValue::Block(read_data), - ), - ( - constants::DW_AT_name, - AttributeValue::Data1(0x12), - read::AttributeValue::Data1(0x12), - ), - ( - constants::DW_AT_name, - AttributeValue::Data2(0x1234), - read::AttributeValue::Data2(0x1234), - ), - ( - constants::DW_AT_name, - AttributeValue::Data4(0x1234), - read::AttributeValue::Data4(0x1234), - ), - ( - constants::DW_AT_name, - AttributeValue::Data8(0x1234), - read::AttributeValue::Data8(0x1234), - ), - ( - constants::DW_AT_name, - AttributeValue::Sdata(0x1234), - read::AttributeValue::Sdata(0x1234), - ), - ( - constants::DW_AT_name, - AttributeValue::Udata(0x1234), - read::AttributeValue::Udata(0x1234), - ), - ( - constants::DW_AT_name, - AttributeValue::Exprloc(expression.clone()), - read::AttributeValue::Exprloc(read_expression), - ), - ( - constants::DW_AT_name, - AttributeValue::Flag(false), - read::AttributeValue::Flag(false), - ), - /* - ( - constants::DW_AT_name, - AttributeValue::FlagPresent, - read::AttributeValue::Flag(true), - ), - */ - ( - constants::DW_AT_name, - AttributeValue::DebugInfoRefSup(DebugInfoOffset(0x1234)), - read::AttributeValue::DebugInfoRefSup(DebugInfoOffset(0x1234)), - ), - ( - constants::DW_AT_location, - AttributeValue::LocationListRef(loc_id), - read::AttributeValue::SecOffset(loc_list_offsets.get(loc_id).0), - ), - ( - constants::DW_AT_macro_info, - AttributeValue::DebugMacinfoRef(DebugMacinfoOffset(0x1234)), - read::AttributeValue::SecOffset(0x1234), - ), - ( - constants::DW_AT_macros, - AttributeValue::DebugMacroRef(DebugMacroOffset(0x1234)), - read::AttributeValue::SecOffset(0x1234), - ), - ( - constants::DW_AT_ranges, - AttributeValue::RangeListRef(range_id), - read::AttributeValue::SecOffset(range_list_offsets.get(range_id).0), - ), - ( - constants::DW_AT_name, - AttributeValue::DebugTypesRef(DebugTypeSignature(0x1234)), - read::AttributeValue::DebugTypesRef(DebugTypeSignature(0x1234)), - ), - ( - constants::DW_AT_name, - AttributeValue::StringRef(string_id), - read::AttributeValue::DebugStrRef(debug_str_offsets.get(string_id)), - ), - ( - constants::DW_AT_name, - AttributeValue::DebugStrRefSup(DebugStrOffset(0x1234)), - read::AttributeValue::DebugStrRefSup(DebugStrOffset(0x1234)), - ), - ( - constants::DW_AT_name, - AttributeValue::LineStringRef(line_string_id), - read::AttributeValue::DebugLineStrRef( - debug_line_str_offsets.get(line_string_id), - ), - ), - ( - constants::DW_AT_name, - AttributeValue::String(data.clone()), - read::AttributeValue::String(read_data), - ), - ( - constants::DW_AT_encoding, - AttributeValue::Encoding(constants::DwAte(0x12)), - read::AttributeValue::Udata(0x12), - ), - ( - constants::DW_AT_decimal_sign, - AttributeValue::DecimalSign(constants::DwDs(0x12)), - read::AttributeValue::Udata(0x12), - ), - ( - constants::DW_AT_endianity, - AttributeValue::Endianity(constants::DwEnd(0x12)), - read::AttributeValue::Udata(0x12), - ), - ( - constants::DW_AT_accessibility, - AttributeValue::Accessibility(constants::DwAccess(0x12)), - read::AttributeValue::Udata(0x12), - ), - ( - constants::DW_AT_visibility, - AttributeValue::Visibility(constants::DwVis(0x12)), - read::AttributeValue::Udata(0x12), - ), - ( - constants::DW_AT_virtuality, - AttributeValue::Virtuality(constants::DwVirtuality(0x12)), - read::AttributeValue::Udata(0x12), - ), - ( - constants::DW_AT_language, - AttributeValue::Language(constants::DwLang(0x12)), - read::AttributeValue::Udata(0x12), - ), - ( - constants::DW_AT_address_class, - AttributeValue::AddressClass(constants::DwAddr(0x12)), - read::AttributeValue::Udata(0x12), - ), - ( - constants::DW_AT_identifier_case, - AttributeValue::IdentifierCase(constants::DwId(0x12)), - read::AttributeValue::Udata(0x12), - ), - ( - constants::DW_AT_calling_convention, - AttributeValue::CallingConvention(constants::DwCc(0x12)), - read::AttributeValue::Udata(0x12), - ), - ( - constants::DW_AT_ordering, - AttributeValue::Ordering(constants::DwOrd(0x12)), - read::AttributeValue::Udata(0x12), - ), - ( - constants::DW_AT_inline, - AttributeValue::Inline(constants::DwInl(0x12)), - read::AttributeValue::Udata(0x12), - ), - ][..] - { - let form = value.form(encoding).unwrap(); - let attr = Attribute { - name: *name, - value: value.clone(), - }; - - let offsets = UnitOffsets::none(); - let line_program_offset = None; - let mut debug_info_refs = Vec::new(); - let mut unit_refs = Vec::new(); - let mut debug_info = DebugInfo::from(EndianVec::new(LittleEndian)); - attr.value - .write( - &mut debug_info, - &mut debug_info_refs, - &mut unit_refs, - &unit, - &offsets, - line_program_offset, - &debug_line_str_offsets, - &debug_str_offsets, - &range_list_offsets, - &loc_list_offsets, - ) - .unwrap(); - - let spec = read::AttributeSpecification::new(*name, form, None); - let mut r = read::EndianSlice::new(debug_info.slice(), LittleEndian); - let read_attr = read::parse_attribute(&mut r, encoding, spec).unwrap(); - let read_value = &read_attr.raw_value(); - // read::AttributeValue is invariant in the lifetime of R. - // The lifetimes here are all okay, so transmute it. - let read_value = unsafe { - mem::transmute::< - &read::AttributeValue>, - &read::AttributeValue>, - >(read_value) - }; - assert_eq!(read_value, expect_value); - - let dwarf = read::Dwarf { - debug_str: read_debug_str.clone(), - debug_line_str: read_debug_line_str.clone(), - ranges: read::RangeLists::new(read_debug_ranges, read_debug_rnglists), - locations: read::LocationLists::new( - read_debug_loc, - read_debug_loclists, - ), - ..Default::default() - }; - - let unit = read::Unit { - header: from_unit, - abbreviations: Arc::new(read::Abbreviations::default()), - name: None, - comp_dir: None, - low_pc: 0, - str_offsets_base: DebugStrOffsetsBase(0), - addr_base: DebugAddrBase(0), - loclists_base: DebugLocListsBase(0), - rnglists_base: DebugRngListsBase(0), - line_program: None, - dwo_id: None, - }; - - let mut context = convert::ConvertUnitContext { - dwarf: &dwarf, - unit: &unit, - line_strings: &mut line_strings, - strings: &mut strings, - ranges: &mut ranges, - locations: &mut locations, - convert_address: &|address| Some(Address::Constant(address)), - base_address: Address::Constant(0), - line_program_offset: None, - line_program_files: Vec::new(), - entry_ids: &HashMap::new(), - }; - - let convert_attr = - Attribute::from(&mut context, &read_attr).unwrap().unwrap(); - assert_eq!(convert_attr, attr); - } - } - } - } - } - - #[test] - fn test_unit_ref() { - let mut units = UnitTable::default(); - let unit_id1 = units.add(Unit::new( - Encoding { - version: 4, - address_size: 8, - format: Format::Dwarf32, - }, - LineProgram::none(), - )); - assert_eq!(unit_id1, units.id(0)); - let unit_id2 = units.add(Unit::new( - Encoding { - version: 2, - address_size: 4, - format: Format::Dwarf64, - }, - LineProgram::none(), - )); - assert_eq!(unit_id2, units.id(1)); - let unit1_child1 = UnitEntryId::new(units.get(unit_id1).base_id, 1); - let unit1_child2 = UnitEntryId::new(units.get(unit_id1).base_id, 2); - let unit2_child1 = UnitEntryId::new(units.get(unit_id2).base_id, 1); - let unit2_child2 = UnitEntryId::new(units.get(unit_id2).base_id, 2); - { - let unit1 = units.get_mut(unit_id1); - let root = unit1.root(); - let child_id1 = unit1.add(root, constants::DW_TAG_subprogram); - assert_eq!(child_id1, unit1_child1); - let child_id2 = unit1.add(root, constants::DW_TAG_subprogram); - assert_eq!(child_id2, unit1_child2); - { - let child1 = unit1.get_mut(child_id1); - child1.set(constants::DW_AT_type, AttributeValue::UnitRef(child_id2)); - } - { - let child2 = unit1.get_mut(child_id2); - child2.set( - constants::DW_AT_type, - AttributeValue::DebugInfoRef(Reference::Entry(unit_id2, unit2_child1)), - ); - } - } - { - let unit2 = units.get_mut(unit_id2); - let root = unit2.root(); - let child_id1 = unit2.add(root, constants::DW_TAG_subprogram); - assert_eq!(child_id1, unit2_child1); - let child_id2 = unit2.add(root, constants::DW_TAG_subprogram); - assert_eq!(child_id2, unit2_child2); - { - let child1 = unit2.get_mut(child_id1); - child1.set(constants::DW_AT_type, AttributeValue::UnitRef(child_id2)); - } - { - let child2 = unit2.get_mut(child_id2); - child2.set( - constants::DW_AT_type, - AttributeValue::DebugInfoRef(Reference::Entry(unit_id1, unit1_child1)), - ); - } - } - - let debug_line_str_offsets = DebugLineStrOffsets::none(); - let debug_str_offsets = DebugStrOffsets::none(); - let mut sections = Sections::new(EndianVec::new(LittleEndian)); - let debug_info_offsets = units - .write(&mut sections, &debug_line_str_offsets, &debug_str_offsets) - .unwrap(); - - println!("{:?}", sections.debug_info); - println!("{:?}", sections.debug_abbrev); - - let dwarf = read::Dwarf { - debug_abbrev: read::DebugAbbrev::new(sections.debug_abbrev.slice(), LittleEndian), - debug_info: read::DebugInfo::new(sections.debug_info.slice(), LittleEndian), - ..Default::default() - }; - - let mut read_units = dwarf.units(); - { - let read_unit1 = read_units.next().unwrap().unwrap(); - assert_eq!( - read_unit1.offset(), - debug_info_offsets.unit(unit_id1).into() - ); - - let abbrevs = dwarf.abbreviations(&read_unit1).unwrap(); - let mut read_entries = read_unit1.entries(&abbrevs); - { - let (_, _read_root) = read_entries.next_dfs().unwrap().unwrap(); - } - { - let (_, read_child1) = read_entries.next_dfs().unwrap().unwrap(); - let offset = debug_info_offsets - .entry(unit_id1, unit1_child2) - .to_unit_offset(&read_unit1) - .unwrap(); - assert_eq!( - read_child1.attr_value(constants::DW_AT_type).unwrap(), - Some(read::AttributeValue::UnitRef(offset)) - ); - } - { - let (_, read_child2) = read_entries.next_dfs().unwrap().unwrap(); - let offset = debug_info_offsets.entry(unit_id2, unit2_child1); - assert_eq!( - read_child2.attr_value(constants::DW_AT_type).unwrap(), - Some(read::AttributeValue::DebugInfoRef(offset)) - ); - } - } - { - let read_unit2 = read_units.next().unwrap().unwrap(); - assert_eq!( - read_unit2.offset(), - debug_info_offsets.unit(unit_id2).into() - ); - - let abbrevs = dwarf.abbreviations(&read_unit2).unwrap(); - let mut read_entries = read_unit2.entries(&abbrevs); - { - let (_, _read_root) = read_entries.next_dfs().unwrap().unwrap(); - } - { - let (_, read_child1) = read_entries.next_dfs().unwrap().unwrap(); - let offset = debug_info_offsets - .entry(unit_id2, unit2_child2) - .to_unit_offset(&read_unit2) - .unwrap(); - assert_eq!( - read_child1.attr_value(constants::DW_AT_type).unwrap(), - Some(read::AttributeValue::UnitRef(offset)) - ); - } - { - let (_, read_child2) = read_entries.next_dfs().unwrap().unwrap(); - let offset = debug_info_offsets.entry(unit_id1, unit1_child1); - assert_eq!( - read_child2.attr_value(constants::DW_AT_type).unwrap(), - Some(read::AttributeValue::DebugInfoRef(offset)) - ); - } - } - - let mut convert_line_strings = LineStringTable::default(); - let mut convert_strings = StringTable::default(); - let convert_units = UnitTable::from( - &dwarf, - &mut convert_line_strings, - &mut convert_strings, - &|address| Some(Address::Constant(address)), - ) - .unwrap(); - assert_eq!(convert_units.count(), units.count()); - - for i in 0..convert_units.count() { - let unit = units.get(units.id(i)); - let convert_unit = convert_units.get(convert_units.id(i)); - assert_eq!(convert_unit.version(), unit.version()); - assert_eq!(convert_unit.address_size(), unit.address_size()); - assert_eq!(convert_unit.format(), unit.format()); - assert_eq!(convert_unit.count(), unit.count()); - - let root = unit.get(unit.root()); - let convert_root = convert_unit.get(convert_unit.root()); - assert_eq!(convert_root.tag(), root.tag()); - for (convert_attr, attr) in convert_root.attrs().zip(root.attrs()) { - assert_eq!(convert_attr, attr); - } - - let child1 = unit.get(UnitEntryId::new(unit.base_id, 1)); - let convert_child1 = convert_unit.get(UnitEntryId::new(convert_unit.base_id, 1)); - assert_eq!(convert_child1.tag(), child1.tag()); - for (convert_attr, attr) in convert_child1.attrs().zip(child1.attrs()) { - assert_eq!(convert_attr.name, attr.name); - match (convert_attr.value.clone(), attr.value.clone()) { - ( - AttributeValue::DebugInfoRef(Reference::Entry(convert_unit, convert_entry)), - AttributeValue::DebugInfoRef(Reference::Entry(unit, entry)), - ) => { - assert_eq!(convert_unit.index, unit.index); - assert_eq!(convert_entry.index, entry.index); - } - (AttributeValue::UnitRef(convert_id), AttributeValue::UnitRef(id)) => { - assert_eq!(convert_id.index, id.index); - } - (convert_value, value) => assert_eq!(convert_value, value), - } - } - - let child2 = unit.get(UnitEntryId::new(unit.base_id, 2)); - let convert_child2 = convert_unit.get(UnitEntryId::new(convert_unit.base_id, 2)); - assert_eq!(convert_child2.tag(), child2.tag()); - for (convert_attr, attr) in convert_child2.attrs().zip(child2.attrs()) { - assert_eq!(convert_attr.name, attr.name); - match (convert_attr.value.clone(), attr.value.clone()) { - ( - AttributeValue::DebugInfoRef(Reference::Entry(convert_unit, convert_entry)), - AttributeValue::DebugInfoRef(Reference::Entry(unit, entry)), - ) => { - assert_eq!(convert_unit.index, unit.index); - assert_eq!(convert_entry.index, entry.index); - } - (AttributeValue::UnitRef(convert_id), AttributeValue::UnitRef(id)) => { - assert_eq!(convert_id.index, id.index); - } - (convert_value, value) => assert_eq!(convert_value, value), - } - } - } - } - - #[test] - fn test_sibling() { - fn add_child( - unit: &mut Unit, - parent: UnitEntryId, - tag: constants::DwTag, - name: &str, - ) -> UnitEntryId { - let id = unit.add(parent, tag); - let child = unit.get_mut(id); - child.set(constants::DW_AT_name, AttributeValue::String(name.into())); - child.set_sibling(true); - id - } - - fn add_children(units: &mut UnitTable, unit_id: UnitId) { - let unit = units.get_mut(unit_id); - let root = unit.root(); - let child1 = add_child(unit, root, constants::DW_TAG_subprogram, "child1"); - add_child(unit, child1, constants::DW_TAG_variable, "grandchild1"); - add_child(unit, root, constants::DW_TAG_subprogram, "child2"); - add_child(unit, root, constants::DW_TAG_subprogram, "child3"); - } - - fn next_child>( - entries: &mut read::EntriesCursor, - ) -> (read::UnitOffset, Option) { - let (_, entry) = entries.next_dfs().unwrap().unwrap(); - let offset = entry.offset(); - let sibling = - entry - .attr_value(constants::DW_AT_sibling) - .unwrap() - .map(|attr| match attr { - read::AttributeValue::UnitRef(offset) => offset, - _ => panic!("bad sibling value"), - }); - (offset, sibling) - } - - fn check_sibling>( - unit: &read::UnitHeader, - debug_abbrev: &read::DebugAbbrev, - ) { - let abbrevs = unit.abbreviations(debug_abbrev).unwrap(); - let mut entries = unit.entries(&abbrevs); - // root - entries.next_dfs().unwrap().unwrap(); - // child1 - let (_, sibling1) = next_child(&mut entries); - // grandchild1 - entries.next_dfs().unwrap().unwrap(); - // child2 - let (offset2, sibling2) = next_child(&mut entries); - // child3 - let (_, _) = next_child(&mut entries); - assert_eq!(sibling1, Some(offset2)); - assert_eq!(sibling2, None); - } - - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 8, - }; - let mut units = UnitTable::default(); - let unit_id1 = units.add(Unit::new(encoding, LineProgram::none())); - add_children(&mut units, unit_id1); - let unit_id2 = units.add(Unit::new(encoding, LineProgram::none())); - add_children(&mut units, unit_id2); - - let debug_line_str_offsets = DebugLineStrOffsets::none(); - let debug_str_offsets = DebugStrOffsets::none(); - let mut sections = Sections::new(EndianVec::new(LittleEndian)); - units - .write(&mut sections, &debug_line_str_offsets, &debug_str_offsets) - .unwrap(); - - println!("{:?}", sections.debug_info); - println!("{:?}", sections.debug_abbrev); - - let read_debug_info = read::DebugInfo::new(sections.debug_info.slice(), LittleEndian); - let read_debug_abbrev = read::DebugAbbrev::new(sections.debug_abbrev.slice(), LittleEndian); - let mut read_units = read_debug_info.units(); - check_sibling(&read_units.next().unwrap().unwrap(), &read_debug_abbrev); - check_sibling(&read_units.next().unwrap().unwrap(), &read_debug_abbrev); - } - - #[test] - fn test_line_ref() { - for &version in &[2, 3, 4, 5] { - for &address_size in &[4, 8] { - for &format in &[Format::Dwarf32, Format::Dwarf64] { - let encoding = Encoding { - format, - version, - address_size, - }; - - // The line program we'll be referencing. - let mut line_program = LineProgram::new( - encoding, - LineEncoding::default(), - LineString::String(b"comp_dir".to_vec()), - LineString::String(b"comp_name".to_vec()), - None, - ); - let dir = line_program.default_directory(); - let file1 = - line_program.add_file(LineString::String(b"file1".to_vec()), dir, None); - let file2 = - line_program.add_file(LineString::String(b"file2".to_vec()), dir, None); - - // Write, read, and convert the line program, so that we have the info - // required to convert the attributes. - let line_strings = DebugLineStrOffsets::none(); - let strings = DebugStrOffsets::none(); - let mut debug_line = DebugLine::from(EndianVec::new(LittleEndian)); - let line_program_offset = line_program - .write(&mut debug_line, encoding, &line_strings, &strings) - .unwrap(); - let read_debug_line = read::DebugLine::new(debug_line.slice(), LittleEndian); - let read_line_program = read_debug_line - .program( - line_program_offset, - address_size, - Some(read::EndianSlice::new(b"comp_dir", LittleEndian)), - Some(read::EndianSlice::new(b"comp_name", LittleEndian)), - ) - .unwrap(); - let dwarf = read::Dwarf::default(); - let mut convert_line_strings = LineStringTable::default(); - let mut convert_strings = StringTable::default(); - let (_, line_program_files) = LineProgram::from( - read_line_program, - &dwarf, - &mut convert_line_strings, - &mut convert_strings, - &|address| Some(Address::Constant(address)), - ) - .unwrap(); - - // Fake the unit. - let mut units = UnitTable::default(); - let unit = units.add(Unit::new(encoding, LineProgram::none())); - let unit = units.get(unit); - let from_unit = read::UnitHeader::new( - encoding, - 0, - read::UnitType::Compilation, - DebugAbbrevOffset(0), - DebugInfoOffset(0).into(), - read::EndianSlice::new(&[], LittleEndian), - ); - - for &(ref name, ref value, ref expect_value) in &[ - ( - constants::DW_AT_stmt_list, - AttributeValue::LineProgramRef, - read::AttributeValue::SecOffset(line_program_offset.0), - ), - ( - constants::DW_AT_decl_file, - AttributeValue::FileIndex(Some(file1)), - read::AttributeValue::Udata(file1.raw()), - ), - ( - constants::DW_AT_decl_file, - AttributeValue::FileIndex(Some(file2)), - read::AttributeValue::Udata(file2.raw()), - ), - ][..] - { - let mut ranges = RangeListTable::default(); - let mut locations = LocationListTable::default(); - let mut strings = StringTable::default(); - let mut line_strings = LineStringTable::default(); - - let form = value.form(encoding).unwrap(); - let attr = Attribute { - name: *name, - value: value.clone(), - }; - - let mut debug_info_refs = Vec::new(); - let mut unit_refs = Vec::new(); - let mut debug_info = DebugInfo::from(EndianVec::new(LittleEndian)); - let offsets = UnitOffsets::none(); - let debug_line_str_offsets = DebugLineStrOffsets::none(); - let debug_str_offsets = DebugStrOffsets::none(); - let range_list_offsets = RangeListOffsets::none(); - let loc_list_offsets = LocationListOffsets::none(); - attr.value - .write( - &mut debug_info, - &mut debug_info_refs, - &mut unit_refs, - &unit, - &offsets, - Some(line_program_offset), - &debug_line_str_offsets, - &debug_str_offsets, - &range_list_offsets, - &loc_list_offsets, - ) - .unwrap(); - - let spec = read::AttributeSpecification::new(*name, form, None); - let mut r = read::EndianSlice::new(debug_info.slice(), LittleEndian); - let read_attr = read::parse_attribute(&mut r, encoding, spec).unwrap(); - let read_value = &read_attr.raw_value(); - // read::AttributeValue is invariant in the lifetime of R. - // The lifetimes here are all okay, so transmute it. - let read_value = unsafe { - mem::transmute::< - &read::AttributeValue>, - &read::AttributeValue>, - >(read_value) - }; - assert_eq!(read_value, expect_value); - - let unit = read::Unit { - header: from_unit, - abbreviations: Arc::new(read::Abbreviations::default()), - name: None, - comp_dir: None, - low_pc: 0, - str_offsets_base: DebugStrOffsetsBase(0), - addr_base: DebugAddrBase(0), - loclists_base: DebugLocListsBase(0), - rnglists_base: DebugRngListsBase(0), - line_program: None, - dwo_id: None, - }; - - let mut context = convert::ConvertUnitContext { - dwarf: &dwarf, - unit: &unit, - line_strings: &mut line_strings, - strings: &mut strings, - ranges: &mut ranges, - locations: &mut locations, - convert_address: &|address| Some(Address::Constant(address)), - base_address: Address::Constant(0), - line_program_offset: Some(line_program_offset), - line_program_files: line_program_files.clone(), - entry_ids: &HashMap::new(), - }; - - let convert_attr = - Attribute::from(&mut context, &read_attr).unwrap().unwrap(); - assert_eq!(convert_attr, attr); - } - } - } - } - } - - #[test] - fn test_line_program_used() { - for used in vec![false, true] { - let encoding = Encoding { - format: Format::Dwarf32, - version: 5, - address_size: 8, - }; - - let line_program = LineProgram::new( - encoding, - LineEncoding::default(), - LineString::String(b"comp_dir".to_vec()), - LineString::String(b"comp_name".to_vec()), - None, - ); - - let mut unit = Unit::new(encoding, line_program); - let file_id = if used { Some(FileId::new(0)) } else { None }; - let root = unit.root(); - unit.get_mut(root).set( - constants::DW_AT_decl_file, - AttributeValue::FileIndex(file_id), - ); - - let mut units = UnitTable::default(); - units.add(unit); - - let debug_line_str_offsets = DebugLineStrOffsets::none(); - let debug_str_offsets = DebugStrOffsets::none(); - let mut sections = Sections::new(EndianVec::new(LittleEndian)); - units - .write(&mut sections, &debug_line_str_offsets, &debug_str_offsets) - .unwrap(); - assert_eq!(!used, sections.debug_line.slice().is_empty()); - } - } - - #[test] - fn test_delete_child() { - fn set_name(unit: &mut Unit, id: UnitEntryId, name: &str) { - let entry = unit.get_mut(id); - entry.set(constants::DW_AT_name, AttributeValue::String(name.into())); - } - fn check_name( - entry: &read::DebuggingInformationEntry, - debug_str: &read::DebugStr, - name: &str, - ) { - let name_attr = entry.attr(constants::DW_AT_name).unwrap().unwrap(); - let entry_name = name_attr.string_value(debug_str).unwrap(); - let entry_name_str = entry_name.to_string().unwrap(); - assert_eq!(entry_name_str, name); - } - let encoding = Encoding { - format: Format::Dwarf32, - version: 4, - address_size: 8, - }; - let mut dwarf = DwarfUnit::new(encoding); - let root = dwarf.unit.root(); - - // Add and delete entries in the root unit - let child1 = dwarf.unit.add(root, constants::DW_TAG_subprogram); - set_name(&mut dwarf.unit, child1, "child1"); - let grandchild1 = dwarf.unit.add(child1, constants::DW_TAG_variable); - set_name(&mut dwarf.unit, grandchild1, "grandchild1"); - let child2 = dwarf.unit.add(root, constants::DW_TAG_subprogram); - set_name(&mut dwarf.unit, child2, "child2"); - // This deletes both `child1` and its child `grandchild1` - dwarf.unit.get_mut(root).delete_child(child1); - let child3 = dwarf.unit.add(root, constants::DW_TAG_subprogram); - set_name(&mut dwarf.unit, child3, "child3"); - let child4 = dwarf.unit.add(root, constants::DW_TAG_subprogram); - set_name(&mut dwarf.unit, child4, "child4"); - let grandchild4 = dwarf.unit.add(child4, constants::DW_TAG_variable); - set_name(&mut dwarf.unit, grandchild4, "grandchild4"); - dwarf.unit.get_mut(child4).delete_child(grandchild4); - - let mut sections = Sections::new(EndianVec::new(LittleEndian)); - - // Write DWARF data which should only include `child2`, `child3` and `child4` - dwarf.write(&mut sections).unwrap(); - - let read_debug_info = read::DebugInfo::new(sections.debug_info.slice(), LittleEndian); - let read_debug_abbrev = read::DebugAbbrev::new(sections.debug_abbrev.slice(), LittleEndian); - let read_debug_str = read::DebugStr::new(sections.debug_str.slice(), LittleEndian); - let read_unit = read_debug_info.units().next().unwrap().unwrap(); - let abbrevs = read_unit.abbreviations(&read_debug_abbrev).unwrap(); - let mut entries = read_unit.entries(&abbrevs); - // root - entries.next_dfs().unwrap().unwrap(); - // child2 - let (_, read_child2) = entries.next_dfs().unwrap().unwrap(); - check_name(read_child2, &read_debug_str, "child2"); - // child3 - let (_, read_child3) = entries.next_dfs().unwrap().unwrap(); - check_name(read_child3, &read_debug_str, "child3"); - // child4 - let (_, read_child4) = entries.next_dfs().unwrap().unwrap(); - check_name(read_child4, &read_debug_str, "child4"); - // There should be no more entries - assert!(entries.next_dfs().unwrap().is_none()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/gimli/src/write/writer.rs s390-tools-2.33.1/rust-vendor/gimli/src/write/writer.rs --- s390-tools-2.31.0/rust-vendor/gimli/src/write/writer.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/gimli/src/write/writer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,494 +0,0 @@ -use crate::common::{Format, SectionId}; -use crate::constants; -use crate::endianity::Endianity; -use crate::leb128; -use crate::write::{Address, Error, Result}; - -/// A trait for writing the data to a DWARF section. -/// -/// All write operations append to the section unless otherwise specified. -#[allow(clippy::len_without_is_empty)] -pub trait Writer { - /// The endianity of bytes that are written. - type Endian: Endianity; - - /// Return the endianity of bytes that are written. - fn endian(&self) -> Self::Endian; - - /// Return the current section length. - /// - /// This may be used as an offset for future `write_at` calls. - fn len(&self) -> usize; - - /// Write a slice. - fn write(&mut self, bytes: &[u8]) -> Result<()>; - - /// Write a slice at a given offset. - /// - /// The write must not extend past the current section length. - fn write_at(&mut self, offset: usize, bytes: &[u8]) -> Result<()>; - - /// Write an address. - /// - /// If the writer supports relocations, then it must provide its own implementation - /// of this method. - // TODO: use write_reference instead? - fn write_address(&mut self, address: Address, size: u8) -> Result<()> { - match address { - Address::Constant(val) => self.write_udata(val, size), - Address::Symbol { .. } => Err(Error::InvalidAddress), - } - } - - /// Write an address with a `.eh_frame` pointer encoding. - /// - /// The given size is only used for `DW_EH_PE_absptr` formats. - /// - /// If the writer supports relocations, then it must provide its own implementation - /// of this method. - fn write_eh_pointer( - &mut self, - address: Address, - eh_pe: constants::DwEhPe, - size: u8, - ) -> Result<()> { - match address { - Address::Constant(val) => { - // Indirect doesn't matter here. - let val = match eh_pe.application() { - constants::DW_EH_PE_absptr => val, - constants::DW_EH_PE_pcrel => { - // TODO: better handling of sign - let offset = self.len() as u64; - val.wrapping_sub(offset) - } - _ => { - return Err(Error::UnsupportedPointerEncoding(eh_pe)); - } - }; - self.write_eh_pointer_data(val, eh_pe.format(), size) - } - Address::Symbol { .. } => Err(Error::InvalidAddress), - } - } - - /// Write a value with a `.eh_frame` pointer format. - /// - /// The given size is only used for `DW_EH_PE_absptr` formats. - /// - /// This must not be used directly for values that may require relocation. - fn write_eh_pointer_data( - &mut self, - val: u64, - format: constants::DwEhPe, - size: u8, - ) -> Result<()> { - match format { - constants::DW_EH_PE_absptr => self.write_udata(val, size), - constants::DW_EH_PE_uleb128 => self.write_uleb128(val), - constants::DW_EH_PE_udata2 => self.write_udata(val, 2), - constants::DW_EH_PE_udata4 => self.write_udata(val, 4), - constants::DW_EH_PE_udata8 => self.write_udata(val, 8), - constants::DW_EH_PE_sleb128 => self.write_sleb128(val as i64), - constants::DW_EH_PE_sdata2 => self.write_sdata(val as i64, 2), - constants::DW_EH_PE_sdata4 => self.write_sdata(val as i64, 4), - constants::DW_EH_PE_sdata8 => self.write_sdata(val as i64, 8), - _ => Err(Error::UnsupportedPointerEncoding(format)), - } - } - - /// Write an offset that is relative to the start of the given section. - /// - /// If the writer supports relocations, then it must provide its own implementation - /// of this method. - fn write_offset(&mut self, val: usize, _section: SectionId, size: u8) -> Result<()> { - self.write_udata(val as u64, size) - } - - /// Write an offset that is relative to the start of the given section. - /// - /// If the writer supports relocations, then it must provide its own implementation - /// of this method. - fn write_offset_at( - &mut self, - offset: usize, - val: usize, - _section: SectionId, - size: u8, - ) -> Result<()> { - self.write_udata_at(offset, val as u64, size) - } - - /// Write a reference to a symbol. - /// - /// If the writer supports symbols, then it must provide its own implementation - /// of this method. - fn write_reference(&mut self, _symbol: usize, _size: u8) -> Result<()> { - Err(Error::InvalidReference) - } - - /// Write a u8. - fn write_u8(&mut self, val: u8) -> Result<()> { - let bytes = [val]; - self.write(&bytes) - } - - /// Write a u16. - fn write_u16(&mut self, val: u16) -> Result<()> { - let mut bytes = [0; 2]; - self.endian().write_u16(&mut bytes, val); - self.write(&bytes) - } - - /// Write a u32. - fn write_u32(&mut self, val: u32) -> Result<()> { - let mut bytes = [0; 4]; - self.endian().write_u32(&mut bytes, val); - self.write(&bytes) - } - - /// Write a u64. - fn write_u64(&mut self, val: u64) -> Result<()> { - let mut bytes = [0; 8]; - self.endian().write_u64(&mut bytes, val); - self.write(&bytes) - } - - /// Write a u8 at the given offset. - fn write_u8_at(&mut self, offset: usize, val: u8) -> Result<()> { - let bytes = [val]; - self.write_at(offset, &bytes) - } - - /// Write a u16 at the given offset. - fn write_u16_at(&mut self, offset: usize, val: u16) -> Result<()> { - let mut bytes = [0; 2]; - self.endian().write_u16(&mut bytes, val); - self.write_at(offset, &bytes) - } - - /// Write a u32 at the given offset. - fn write_u32_at(&mut self, offset: usize, val: u32) -> Result<()> { - let mut bytes = [0; 4]; - self.endian().write_u32(&mut bytes, val); - self.write_at(offset, &bytes) - } - - /// Write a u64 at the given offset. - fn write_u64_at(&mut self, offset: usize, val: u64) -> Result<()> { - let mut bytes = [0; 8]; - self.endian().write_u64(&mut bytes, val); - self.write_at(offset, &bytes) - } - - /// Write unsigned data of the given size. - /// - /// Returns an error if the value is too large for the size. - /// This must not be used directly for values that may require relocation. - fn write_udata(&mut self, val: u64, size: u8) -> Result<()> { - match size { - 1 => { - let write_val = val as u8; - if val != u64::from(write_val) { - return Err(Error::ValueTooLarge); - } - self.write_u8(write_val) - } - 2 => { - let write_val = val as u16; - if val != u64::from(write_val) { - return Err(Error::ValueTooLarge); - } - self.write_u16(write_val) - } - 4 => { - let write_val = val as u32; - if val != u64::from(write_val) { - return Err(Error::ValueTooLarge); - } - self.write_u32(write_val) - } - 8 => self.write_u64(val), - otherwise => Err(Error::UnsupportedWordSize(otherwise)), - } - } - - /// Write signed data of the given size. - /// - /// Returns an error if the value is too large for the size. - /// This must not be used directly for values that may require relocation. - fn write_sdata(&mut self, val: i64, size: u8) -> Result<()> { - match size { - 1 => { - let write_val = val as i8; - if val != i64::from(write_val) { - return Err(Error::ValueTooLarge); - } - self.write_u8(write_val as u8) - } - 2 => { - let write_val = val as i16; - if val != i64::from(write_val) { - return Err(Error::ValueTooLarge); - } - self.write_u16(write_val as u16) - } - 4 => { - let write_val = val as i32; - if val != i64::from(write_val) { - return Err(Error::ValueTooLarge); - } - self.write_u32(write_val as u32) - } - 8 => self.write_u64(val as u64), - otherwise => Err(Error::UnsupportedWordSize(otherwise)), - } - } - - /// Write a word of the given size at the given offset. - /// - /// Returns an error if the value is too large for the size. - /// This must not be used directly for values that may require relocation. - fn write_udata_at(&mut self, offset: usize, val: u64, size: u8) -> Result<()> { - match size { - 1 => { - let write_val = val as u8; - if val != u64::from(write_val) { - return Err(Error::ValueTooLarge); - } - self.write_u8_at(offset, write_val) - } - 2 => { - let write_val = val as u16; - if val != u64::from(write_val) { - return Err(Error::ValueTooLarge); - } - self.write_u16_at(offset, write_val) - } - 4 => { - let write_val = val as u32; - if val != u64::from(write_val) { - return Err(Error::ValueTooLarge); - } - self.write_u32_at(offset, write_val) - } - 8 => self.write_u64_at(offset, val), - otherwise => Err(Error::UnsupportedWordSize(otherwise)), - } - } - - /// Write an unsigned LEB128 encoded integer. - fn write_uleb128(&mut self, val: u64) -> Result<()> { - let mut bytes = [0u8; 10]; - // bytes is long enough so this will never fail. - let len = leb128::write::unsigned(&mut { &mut bytes[..] }, val).unwrap(); - self.write(&bytes[..len]) - } - - /// Read an unsigned LEB128 encoded integer. - fn write_sleb128(&mut self, val: i64) -> Result<()> { - let mut bytes = [0u8; 10]; - // bytes is long enough so this will never fail. - let len = leb128::write::signed(&mut { &mut bytes[..] }, val).unwrap(); - self.write(&bytes[..len]) - } - - /// Write an initial length according to the given DWARF format. - /// - /// This will only write a length of zero, since the length isn't - /// known yet, and a subsequent call to `write_initial_length_at` - /// will write the actual length. - fn write_initial_length(&mut self, format: Format) -> Result { - if format == Format::Dwarf64 { - self.write_u32(0xffff_ffff)?; - } - let offset = InitialLengthOffset(self.len()); - self.write_udata(0, format.word_size())?; - Ok(offset) - } - - /// Write an initial length at the given offset according to the given DWARF format. - /// - /// `write_initial_length` must have previously returned the offset. - fn write_initial_length_at( - &mut self, - offset: InitialLengthOffset, - length: u64, - format: Format, - ) -> Result<()> { - self.write_udata_at(offset.0, length, format.word_size()) - } -} - -/// The offset at which an initial length should be written. -#[derive(Debug, Clone, Copy)] -pub struct InitialLengthOffset(usize); - -#[cfg(test)] -mod tests { - use super::*; - use crate::write; - use crate::{BigEndian, LittleEndian}; - use std::{i64, u64}; - - #[test] - fn test_writer() { - let mut w = write::EndianVec::new(LittleEndian); - w.write_address(Address::Constant(0x1122_3344), 4).unwrap(); - assert_eq!(w.slice(), &[0x44, 0x33, 0x22, 0x11]); - assert_eq!( - w.write_address( - Address::Symbol { - symbol: 0, - addend: 0 - }, - 4 - ), - Err(Error::InvalidAddress) - ); - - let mut w = write::EndianVec::new(LittleEndian); - w.write_offset(0x1122_3344, SectionId::DebugInfo, 4) - .unwrap(); - assert_eq!(w.slice(), &[0x44, 0x33, 0x22, 0x11]); - w.write_offset_at(1, 0x5566, SectionId::DebugInfo, 2) - .unwrap(); - assert_eq!(w.slice(), &[0x44, 0x66, 0x55, 0x11]); - - let mut w = write::EndianVec::new(LittleEndian); - w.write_u8(0x11).unwrap(); - w.write_u16(0x2233).unwrap(); - w.write_u32(0x4455_6677).unwrap(); - w.write_u64(0x8081_8283_8485_8687).unwrap(); - #[rustfmt::skip] - assert_eq!(w.slice(), &[ - 0x11, - 0x33, 0x22, - 0x77, 0x66, 0x55, 0x44, - 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, - ]); - w.write_u8_at(14, 0x11).unwrap(); - w.write_u16_at(12, 0x2233).unwrap(); - w.write_u32_at(8, 0x4455_6677).unwrap(); - w.write_u64_at(0, 0x8081_8283_8485_8687).unwrap(); - #[rustfmt::skip] - assert_eq!(w.slice(), &[ - 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, - 0x77, 0x66, 0x55, 0x44, - 0x33, 0x22, - 0x11, - ]); - - let mut w = write::EndianVec::new(BigEndian); - w.write_u8(0x11).unwrap(); - w.write_u16(0x2233).unwrap(); - w.write_u32(0x4455_6677).unwrap(); - w.write_u64(0x8081_8283_8485_8687).unwrap(); - #[rustfmt::skip] - assert_eq!(w.slice(), &[ - 0x11, - 0x22, 0x33, - 0x44, 0x55, 0x66, 0x77, - 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, - ]); - w.write_u8_at(14, 0x11).unwrap(); - w.write_u16_at(12, 0x2233).unwrap(); - w.write_u32_at(8, 0x4455_6677).unwrap(); - w.write_u64_at(0, 0x8081_8283_8485_8687).unwrap(); - #[rustfmt::skip] - assert_eq!(w.slice(), &[ - 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, - 0x44, 0x55, 0x66, 0x77, - 0x22, 0x33, - 0x11, - ]); - - let mut w = write::EndianVec::new(LittleEndian); - w.write_udata(0x11, 1).unwrap(); - w.write_udata(0x2233, 2).unwrap(); - w.write_udata(0x4455_6677, 4).unwrap(); - w.write_udata(0x8081_8283_8485_8687, 8).unwrap(); - #[rustfmt::skip] - assert_eq!(w.slice(), &[ - 0x11, - 0x33, 0x22, - 0x77, 0x66, 0x55, 0x44, - 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, - ]); - assert_eq!(w.write_udata(0x100, 1), Err(Error::ValueTooLarge)); - assert_eq!(w.write_udata(0x1_0000, 2), Err(Error::ValueTooLarge)); - assert_eq!(w.write_udata(0x1_0000_0000, 4), Err(Error::ValueTooLarge)); - assert_eq!(w.write_udata(0x00, 3), Err(Error::UnsupportedWordSize(3))); - w.write_udata_at(14, 0x11, 1).unwrap(); - w.write_udata_at(12, 0x2233, 2).unwrap(); - w.write_udata_at(8, 0x4455_6677, 4).unwrap(); - w.write_udata_at(0, 0x8081_8283_8485_8687, 8).unwrap(); - #[rustfmt::skip] - assert_eq!(w.slice(), &[ - 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, - 0x77, 0x66, 0x55, 0x44, - 0x33, 0x22, - 0x11, - ]); - assert_eq!(w.write_udata_at(0, 0x100, 1), Err(Error::ValueTooLarge)); - assert_eq!(w.write_udata_at(0, 0x1_0000, 2), Err(Error::ValueTooLarge)); - assert_eq!( - w.write_udata_at(0, 0x1_0000_0000, 4), - Err(Error::ValueTooLarge) - ); - assert_eq!( - w.write_udata_at(0, 0x00, 3), - Err(Error::UnsupportedWordSize(3)) - ); - - let mut w = write::EndianVec::new(LittleEndian); - w.write_uleb128(0).unwrap(); - assert_eq!(w.slice(), &[0]); - - let mut w = write::EndianVec::new(LittleEndian); - w.write_uleb128(u64::MAX).unwrap(); - assert_eq!( - w.slice(), - &[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 1] - ); - - let mut w = write::EndianVec::new(LittleEndian); - w.write_sleb128(0).unwrap(); - assert_eq!(w.slice(), &[0]); - - let mut w = write::EndianVec::new(LittleEndian); - w.write_sleb128(i64::MAX).unwrap(); - assert_eq!( - w.slice(), - &[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0] - ); - - let mut w = write::EndianVec::new(LittleEndian); - w.write_sleb128(i64::MIN).unwrap(); - assert_eq!( - w.slice(), - &[0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7f] - ); - - let mut w = write::EndianVec::new(LittleEndian); - let offset = w.write_initial_length(Format::Dwarf32).unwrap(); - assert_eq!(w.slice(), &[0, 0, 0, 0]); - w.write_initial_length_at(offset, 0x1122_3344, Format::Dwarf32) - .unwrap(); - assert_eq!(w.slice(), &[0x44, 0x33, 0x22, 0x11]); - assert_eq!( - w.write_initial_length_at(offset, 0x1_0000_0000, Format::Dwarf32), - Err(Error::ValueTooLarge) - ); - - let mut w = write::EndianVec::new(LittleEndian); - let offset = w.write_initial_length(Format::Dwarf64).unwrap(); - assert_eq!(w.slice(), &[0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0]); - w.write_initial_length_at(offset, 0x1122_3344_5566_7788, Format::Dwarf64) - .unwrap(); - assert_eq!( - w.slice(), - &[0xff, 0xff, 0xff, 0xff, 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11] - ); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/h2/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/h2/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/h2/Cargo.lock s390-tools-2.33.1/rust-vendor/h2/Cargo.lock --- s390-tools-2.31.0/rust-vendor/h2/Cargo.lock 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/Cargo.lock 1970-01-01 01:00:00.000000000 +0100 @@ -1,673 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addr2line" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "backtrace" -version = "0.3.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "bytes" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" - -[[package]] -name = "cc" -version = "1.0.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "env_logger" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" -dependencies = [ - "log", -] - -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "futures-core" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" - -[[package]] -name = "futures-sink" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" - -[[package]] -name = "futures-task" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" - -[[package]] -name = "futures-util" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" -dependencies = [ - "futures-core", - "futures-task", - "pin-project-lite", - "pin-utils", -] - -[[package]] -name = "getrandom" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "gimli" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" - -[[package]] -name = "h2" -version = "0.3.22" -dependencies = [ - "bytes", - "env_logger", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "hex", - "http", - "indexmap", - "quickcheck", - "rand", - "serde", - "serde_json", - "slab", - "tokio", - "tokio-rustls", - "tokio-util", - "tracing", - "walkdir", - "webpki-roots", -] - -[[package]] -name = "hashbrown" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" - -[[package]] -name = "hermit-abi" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "http" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "indexmap" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" -dependencies = [ - "equivalent", - "hashbrown", -] - -[[package]] -name = "itoa" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" - -[[package]] -name = "libc" -version = "0.2.149" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" - -[[package]] -name = "log" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" - -[[package]] -name = "memchr" -version = "2.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" - -[[package]] -name = "miniz_oxide" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" -dependencies = [ - "libc", - "wasi", - "windows-sys", -] - -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "object" -version = "0.32.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" - -[[package]] -name = "pin-project-lite" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "proc-macro2" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quickcheck" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" -dependencies = [ - "rand", -] - -[[package]] -name = "quote" -version = "1.0.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "ring" -version = "0.17.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" -dependencies = [ - "cc", - "getrandom", - "libc", - "spin", - "untrusted", - "windows-sys", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - -[[package]] -name = "rustls" -version = "0.21.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" -dependencies = [ - "log", - "ring", - "rustls-webpki", - "sct", -] - -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "ryu" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "serde" -version = "1.0.190" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d3c334ca1ee894a2c6f6ad698fe8c435b76d504b13d436f0685d648d6d96f7" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.190" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.107" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] - -[[package]] -name = "socket2" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" -dependencies = [ - "libc", - "windows-sys", -] - -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - -[[package]] -name = "syn" -version = "2.0.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tokio" -version = "1.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" -dependencies = [ - "backtrace", - "bytes", - "libc", - "mio", - "num_cpus", - "pin-project-lite", - "socket2", - "tokio-macros", - "windows-sys", -] - -[[package]] -name = "tokio-macros" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", - "tracing", -] - -[[package]] -name = "tracing" -version = "0.1.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" -dependencies = [ - "pin-project-lite", - "tracing-core", -] - -[[package]] -name = "tracing-core" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" -dependencies = [ - "once_cell", -] - -[[package]] -name = "unicode-ident" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" - -[[package]] -name = "untrusted" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" - -[[package]] -name = "walkdir" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "webpki-roots" -version = "0.25.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" diff -Nru s390-tools-2.31.0/rust-vendor/h2/Cargo.toml s390-tools-2.33.1/rust-vendor/h2/Cargo.toml --- s390-tools-2.31.0/rust-vendor/h2/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,128 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.63" -name = "h2" -version = "0.3.22" -authors = [ - "Carl Lerche ", - "Sean McArthur ", -] -exclude = [ - "fixtures/**", - "ci/**", -] -description = "An HTTP/2 client and server" -documentation = "https://docs.rs/h2" -readme = "README.md" -keywords = [ - "http", - "async", - "non-blocking", -] -categories = [ - "asynchronous", - "web-programming", - "network-programming", -] -license = "MIT" -repository = "https://github.com/hyperium/h2" - -[package.metadata.docs.rs] -features = ["stream"] - -[dependencies.bytes] -version = "1" - -[dependencies.fnv] -version = "1.0.5" - -[dependencies.futures-core] -version = "0.3" -default-features = false - -[dependencies.futures-sink] -version = "0.3" -default-features = false - -[dependencies.futures-util] -version = "0.3" -default-features = false - -[dependencies.http] -version = "0.2" - -[dependencies.indexmap] -version = "2" -features = ["std"] - -[dependencies.slab] -version = "0.4.2" - -[dependencies.tokio] -version = "1" -features = ["io-util"] - -[dependencies.tokio-util] -version = "0.7.1" -features = [ - "codec", - "io", -] - -[dependencies.tracing] -version = "0.1.35" -features = ["std"] -default-features = false - -[dev-dependencies.env_logger] -version = "0.10" -default-features = false - -[dev-dependencies.hex] -version = "0.4.3" - -[dev-dependencies.quickcheck] -version = "1.0.3" -default-features = false - -[dev-dependencies.rand] -version = "0.8.4" - -[dev-dependencies.serde] -version = "1.0.0" - -[dev-dependencies.serde_json] -version = "1.0.0" - -[dev-dependencies.tokio] -version = "1" -features = [ - "rt-multi-thread", - "macros", - "sync", - "net", -] - -[dev-dependencies.tokio-rustls] -version = "0.24" - -[dev-dependencies.walkdir] -version = "2.3.2" - -[dev-dependencies.webpki-roots] -version = "0.25" - -[features] -stream = [] -unstable = [] diff -Nru s390-tools-2.31.0/rust-vendor/h2/CHANGELOG.md s390-tools-2.33.1/rust-vendor/h2/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/h2/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,316 +0,0 @@ -# 0.3.22 (November 15, 2023) - -* Add `header_table_size(usize)` option to client and server builders. -* Improve throughput when vectored IO is not available. -* Update indexmap to 2. - -# 0.3.21 (August 21, 2023) - -* Fix opening of new streams over peer's max concurrent limit. -* Fix `RecvStream` to return data even if it has received a `CANCEL` stream error. -* Update MSRV to 1.63. - -# 0.3.20 (June 26, 2023) - -* Fix panic if a server received a request with a `:status` pseudo header in the 1xx range. -* Fix panic if a reset stream had pending push promises that were more than allowed. -* Fix potential flow control overflow by subtraction, instead returning a connection error. - -# 0.3.19 (May 12, 2023) - -* Fix counting reset streams when triggered by a GOAWAY. -* Send `too_many_resets` in opaque debug data of GOAWAY when too many resets received. - -# 0.3.18 (April 17, 2023) - -* Fix panic because of opposite check in `is_remote_local()`. - -# 0.3.17 (April 13, 2023) - -* Add `Error::is_library()` method to check if the originated inside `h2`. -* Add `max_pending_accept_reset_streams(usize)` option to client and server - builders. -* Fix theoretical memory growth when receiving too many HEADERS and then - RST_STREAM frames faster than an application can accept them off the queue. - (CVE-2023-26964) - -# 0.3.16 (February 27, 2023) - -* Set `Protocol` extension on requests when received Extended CONNECT requests. -* Remove `B: Unpin + 'static` bound requiremented of bufs -* Fix releasing of frames when stream is finished, reducing memory usage. -* Fix panic when trying to send data and connection window is available, but stream window is not. -* Fix spurious wakeups when stream capacity is not available. - -# 0.3.15 (October 21, 2022) - -* Remove `B: Buf` bound on `SendStream`'s parameter -* add accessor for `StreamId` u32 - -# 0.3.14 (August 16, 2022) - -* Add `Error::is_reset` function. -* Bump MSRV to Rust 1.56. -* Return `RST_STREAM(NO_ERROR)` when the server early responds. - -# 0.3.13 (March 31, 2022) - -* Update private internal `tokio-util` dependency. - -# 0.3.12 (March 9, 2022) - -* Avoid time operations that can panic (#599) -* Bump MSRV to Rust 1.49 (#606) -* Fix header decoding error when a header name is contained at a continuation - header boundary (#589) -* Remove I/O type names from handshake `tracing` spans (#608) - -# 0.3.11 (January 26, 2022) - -* Make `SendStream::poll_capacity` never return `Ok(Some(0))` (#596) -* Fix panic when receiving already reset push promise (#597) - -# 0.3.10 (January 6, 2022) - -* Add `Error::is_go_away()` and `Error::is_remote()` methods. -* Fix panic if receiving malformed PUSH_PROMISE with stream ID of 0. - -# 0.3.9 (December 9, 2021) - -* Fix hang related to new `max_send_buffer_size`. - -# 0.3.8 (December 8, 2021) - -* Add "extended CONNECT support". Adds `h2::ext::Protocol`, which is used for request and response extensions to connect new protocols over an HTTP/2 stream. -* Add `max_send_buffer_size` options to client and server builders, and a default of ~400MB. This acts like a high-water mark for the `poll_capacity()` method. -* Fix panic if receiving malformed HEADERS with stream ID of 0. - -# 0.3.7 (October 22, 2021) - -* Fix panic if server sends a malformed frame on a stream client was about to open. -* Fix server to treat `:status` in a request as a stream error instead of connection error. - -# 0.3.6 (September 30, 2021) - -* Fix regression of `h2::Error` that were created via `From` not returning their reason code in `Error::reason()`. - -# 0.3.5 (September 29, 2021) - -* Fix sending of very large headers. Previously when a single header was too big to fit in a single `HEADERS` frame, an error was returned. Now it is broken up and sent correctly. -* Fix buffered data field to be a bigger integer size. -* Refactor error format to include what initiated the error (remote, local, or user), if it was a stream or connection-level error, and any received debug data. - -# 0.3.4 (August 20, 2021) - -* Fix panic when encoding header size update over a certain size. -* Fix `SendRequest` to wake up connection when dropped. -* Fix potential hang if `RecvStream` is placed in the request or response `extensions`. -* Stop calling `Instant::now` if zero reset streams are configured. - -# 0.3.3 (April 29, 2021) - -* Fix client being able to make `CONNECT` requests without a `:path`. -* Expose `RecvStream::poll_data`. -* Fix some docs. - -# 0.3.2 (March 24, 2021) - -* Fix incorrect handling of received 1xx responses on the client when the request body is still streaming. - -# 0.3.1 (February 26, 2021) - -* Add `Connection::max_concurrent_recv_streams()` getter. -* Add `Connection::max_concurrent_send_streams()` getter. -* Fix client to ignore receipt of 1xx headers frames. -* Fix incorrect calculation of pseudo header lengths when determining if a received header is too big. -* Reduce monomorphized code size of internal code. - -# 0.3.0 (December 23, 2020) - -* Update to Tokio v1 and Bytes v1. -* Disable `tracing`'s `log` feature. (It can still be enabled by a user in their own `Cargo.toml`.) - -# 0.2.7 (October 22, 2020) - -* Fix stream ref count when sending a push promise -* Fix receiving empty DATA frames in response to a HEAD request -* Fix handling of client disabling SERVER_PUSH - -# 0.2.6 (July 13, 2020) - -* Integrate `tracing` directly where `log` was used. (For 0.2.x, `log`s are still emitted by default.) - -# 0.2.5 (May 6, 2020) - -* Fix rare debug assert failure in store shutdown. - -# 0.2.4 (March 30, 2020) - -* Fix when receiving `SETTINGS_HEADER_TABLE_SIZE` setting. - -# 0.2.3 (March 25, 2020) - -* Fix server being able to accept `CONNECT` requests without `:scheme` or `:path`. -* Fix receiving a GOAWAY frame from updating the recv max ID, it should only update max send ID. - -# 0.2.2 (March 3, 2020) - -* Reduce size of `FlowControl` and `RecvStream`. - -# 0.2.1 (December 6, 2019) - -* Relax `Unpin` bounds on the send `Buf` generic. - -# 0.2.0 (December 3, 2019) - -* Add `server::Connection::set_initial_window_size` and `client::Connection::set_initial_window_size` which can adjust the `INITIAL_WINDOW_SIZE` setting on an existing connection (#421). -* Update to `http` v0.2. -* Update to `tokio` v0.2. -* Change `unstable-stream` feature to `stream`. -* Change `ReserveCapacity` to `FlowControl` (#423). -* Remove `From` for `Error`. - -# 0.2.0-alpha.3 (October 1, 2019) - -* Update to futures `0.3.0-alpha.19`. -* Update to tokio `0.2.0-alpha.6`. - -# 0.2.0-alpha.2 (September 20, 2019) - -* Add server support for `PUSH_PROMISE`s (#327). -* Update to tokio `0.2.0-alpha.5`. -* Change `stream` feature to `unstable-stream`. - -# 0.2.0-alpha.1 (August 30, 2019) - -* Update from `futures` 0.1 to `std::future::Future`. -* Update `AsyncRead`/`AsyncWrite` to `tokio-io` 0.2 alpha. -* Change `Stream` implementations to be optional, default disabled. Specific async and poll functions are now inherent, and `Stream` can be re-enabled with the `stream` cargo feature. - -# 0.1.25 (June 28, 2019) - -* Fix to send a `RST_STREAM` instead of `GOAWAY` if receiving a frame on a previously closed stream. -* Fix receiving trailers without an end-stream flag to be a stream error instead of connection error. - -# 0.1.24 (June 17, 2019) - -* Fix server wrongly rejecting requests that don't have an `:authority` header (#372). - -# 0.1.23 (June 4, 2019) - -* Fix leaking of received DATA frames if the `RecvStream` is never polled (#368). - -# 0.1.22 (June 3, 2019) - -* Fix rare panic when remote sends `RST_STREAM` or `GOAWAY` for a stream pending window capacity (#364). - -# 0.1.21 (May 30, 2019) - -* Fix write loop when a header didn't fit in write buffer. - -# 0.1.20 (May 16, 2019) - -* Fix lifetime conflict for older compilers. - -# 0.1.19 (May 15, 2019) - -* Fix rare crash if `CONTINUATION` frame resumed in the middle of headers with the same name. -* Fix HPACK encoder using an old evicted index for repeated header names. - -# 0.1.18 (April 9, 2019) - -* Fix `server::Connection::abrupt_shutdown` to no longer return the same error the user sent (#352). - -# 0.1.17 (March 12, 2019) - -* Add user PING support (#346). -* Fix notifying a `RecvStream` task if locally sending a reset. -* Fix connections "hanging" when all handles are dropped but some streams had been reset. - -# 0.1.16 (January 24, 2019) - -* Log header values when malformed (#342). - -# 0.1.15 (January 12, 2019) - -* Fix race condition bug related to shutting down the client (#338). - -# 0.1.14 (December 5, 2018) - -* Fix closed streams to always return window capacity to the connection (#334). -* Fix locking when `Debug` printing an `OpaqueStreamRef` (#333). -* Fix inverted split for DATA frame padding (#330). -* Reduce `Debug` noise for `Frame` (#329). - -# 0.1.13 (October 16, 2018) - -* Add client support for Push Promises (#314). -* Expose `io::Error` from `h2::Error` (#311) -* Misc bug fixes (#304, #309, #319, #313, #320). - -# 0.1.12 (August 8, 2018) - -* Fix initial send window size (#301). -* Fix panic when calling `reserve_capacity` after connection has been closed (#302). -* Fix handling of incoming `SETTINGS_INITIAL_WINDOW_SIZE`. (#299) - -# 0.1.11 (July 31, 2018) - -* Add `stream_id` accessors to public API types (#292). -* Fix potential panic when dropping clients (#295). -* Fix busy loop when shutting down server (#296). - -# 0.1.10 (June 15, 2018) - -* Fix potential panic in `SendRequest::poll_ready()` (#281). -* Fix infinite loop on reset connection during prefix (#285). - -# 0.1.9 (May 31, 2018) - -* Add `poll_reset` to `SendResponse` and `SendStream` (#279). - -# 0.1.8 (May 23, 2018) - -* Fix client bug when max streams is reached. (#277) - -# 0.1.7 (May 14, 2018) - -* Misc bug fixes (#266, #273, #261, #275). - -# 0.1.6 (April 24, 2018) - -* Misc bug fixes related to stream management (#258, #260, #262). - -# 0.1.5 (April 6, 2018) - -* Fix the `last_stream_id` sent during graceful GOAWAY (#254). - -# 0.1.4 (April 5, 2018) - -* Add `initial_connection_window_size` to client and server `Builder`s (#249). -* Add `graceful_shutdown` and `abrupt_shutdown` to `server::Connection`, - deprecating `close_connection` (#250). - -# 0.1.3 (March 28, 2018) - -* Allow configuring max streams before the peer's settings frame is - received (#242). -* Fix HPACK decoding bug with regards to large literals (#244). -* Fix state transition bug triggered by receiving a RST_STREAM frame (#247). - -# 0.1.2 (March 13, 2018) - -* Fix another bug relating to resetting connections and reaching - max concurrency (#238). - -# 0.1.1 (March 8, 2018) - -* When streams are dropped, close the connection (#222). -* Notify send tasks on connection error (#231). -* Fix bug relating to resetting connections and reaching max concurrency (#235). -* Normalize HTTP request path to satisfy HTTP/2.0 specification (#228). -* Update internal dependencies. - -# 0.1.0 (Jan 12, 2018) - -* Initial release diff -Nru s390-tools-2.31.0/rust-vendor/h2/CONTRIBUTING.md s390-tools-2.33.1/rust-vendor/h2/CONTRIBUTING.md --- s390-tools-2.31.0/rust-vendor/h2/CONTRIBUTING.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/CONTRIBUTING.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,84 +0,0 @@ -# Contributing to _h2_ # - -:balloon: Thanks for your help improving the project! - -## Getting Help ## - -If you have a question about the h2 library or have encountered problems using it, you may -[file an issue][issue] or ask a question on the [Tokio Gitter][gitter]. - -## Submitting a Pull Request ## - -Do you have an improvement? - -1. Submit an [issue][issue] describing your proposed change. -2. We will try to respond to your issue promptly. -3. Fork this repo, develop and test your code changes. See the project's [README](README.md) for further information about working in this repository. -4. Submit a pull request against this repo's `master` branch. -5. Your branch may be merged once all configured checks pass, including: - - Code review has been completed. - - The branch has passed tests in CI. - -## Committing ## - -When initially submitting a pull request, we prefer a single squashed commit. It -is preferable to split up contributions into multiple pull requests if the -changes are unrelated. All pull requests are squashed when merged, but -squashing yourself gives you better control over the commit message. - -After the pull request is submitted, all changes should be done in separate -commits. This makes reviewing the evolution of the pull request easier. We will -squash all the changes into a single commit when we merge the pull request. - -### Commit messages ### - -Finalized commit messages should be in the following format: - -``` -Subject - -Problem - -Solution - -Validation -``` - -#### Subject #### - -- one line, <= 50 characters -- describe what is done; not the result -- use the active voice -- capitalize first word and proper nouns -- do not end in a period — this is a title/subject -- reference the github issue by number - -##### Examples ##### - -``` -bad: server disconnects should cause dst client disconnects. -good: Propagate disconnects from source to destination -``` - -``` -bad: support tls servers -good: Introduce support for server-side TLS (#347) -``` - -#### Problem #### - -Explain the context and why you're making that change. What is the problem -you're trying to solve? In some cases there is not a problem and this can be -thought of as being the motivation for your change. - -#### Solution #### - -Describe the modifications you've made. - -#### Validation #### - -Describe the testing you've done to validate your change. Performance-related -changes should include before- and after- benchmark results. - -[issue]: https://github.com/hyperium/h2/issues/new -[gitter]: https://gitter.im/tokio-rs/tokio diff -Nru s390-tools-2.31.0/rust-vendor/h2/examples/akamai.rs s390-tools-2.33.1/rust-vendor/h2/examples/akamai.rs --- s390-tools-2.31.0/rust-vendor/h2/examples/akamai.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/examples/akamai.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,82 +0,0 @@ -use h2::client; -use http::{Method, Request}; -use tokio::net::TcpStream; -use tokio_rustls::TlsConnector; - -use tokio_rustls::rustls::{OwnedTrustAnchor, RootCertStore, ServerName}; - -use std::convert::TryFrom; -use std::error::Error; -use std::net::ToSocketAddrs; - -const ALPN_H2: &str = "h2"; - -#[tokio::main] -pub async fn main() -> Result<(), Box> { - let _ = env_logger::try_init(); - - let tls_client_config = std::sync::Arc::new({ - let mut root_store = RootCertStore::empty(); - root_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject, - ta.spki, - ta.name_constraints, - ) - })); - - let mut c = tokio_rustls::rustls::ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates(root_store) - .with_no_client_auth(); - c.alpn_protocols.push(ALPN_H2.as_bytes().to_owned()); - c - }); - - // Sync DNS resolution. - let addr = "http2.akamai.com:443" - .to_socket_addrs() - .unwrap() - .next() - .unwrap(); - - println!("ADDR: {:?}", addr); - - let tcp = TcpStream::connect(&addr).await?; - let dns_name = ServerName::try_from("http2.akamai.com").unwrap(); - let connector = TlsConnector::from(tls_client_config); - let res = connector.connect(dns_name, tcp).await; - let tls = res.unwrap(); - { - let (_, session) = tls.get_ref(); - let negotiated_protocol = session.alpn_protocol(); - assert_eq!(Some(ALPN_H2.as_bytes()), negotiated_protocol); - } - - println!("Starting client handshake"); - let (mut client, h2) = client::handshake(tls).await?; - - println!("building request"); - let request = Request::builder() - .method(Method::GET) - .uri("https://http2.akamai.com/") - .body(()) - .unwrap(); - - println!("sending request"); - let (response, other) = client.send_request(request, true).unwrap(); - - tokio::spawn(async move { - if let Err(e) = h2.await { - println!("GOT ERR={:?}", e); - } - }); - - println!("waiting on response : {:?}", other); - let (_, mut body) = response.await?.into_parts(); - println!("processing body"); - while let Some(chunk) = body.data().await { - println!("RX: {:?}", chunk?); - } - Ok(()) -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/examples/client.rs s390-tools-2.33.1/rust-vendor/h2/examples/client.rs --- s390-tools-2.31.0/rust-vendor/h2/examples/client.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/examples/client.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,52 +0,0 @@ -use h2::client; -use http::{HeaderMap, Request}; - -use std::error::Error; - -use tokio::net::TcpStream; - -#[tokio::main] -pub async fn main() -> Result<(), Box> { - let _ = env_logger::try_init(); - - let tcp = TcpStream::connect("127.0.0.1:5928").await?; - let (mut client, h2) = client::handshake(tcp).await?; - - println!("sending request"); - - let request = Request::builder() - .uri("https://http2.akamai.com/") - .body(()) - .unwrap(); - - let mut trailers = HeaderMap::new(); - trailers.insert("zomg", "hello".parse().unwrap()); - - let (response, mut stream) = client.send_request(request, false).unwrap(); - - // send trailers - stream.send_trailers(trailers).unwrap(); - - // Spawn a task to run the conn... - tokio::spawn(async move { - if let Err(e) = h2.await { - println!("GOT ERR={:?}", e); - } - }); - - let response = response.await?; - println!("GOT RESPONSE: {:?}", response); - - // Get the body - let mut body = response.into_body(); - - while let Some(chunk) = body.data().await { - println!("GOT CHUNK = {:?}", chunk?); - } - - if let Some(trailers) = body.trailers().await? { - println!("GOT TRAILERS: {:?}", trailers); - } - - Ok(()) -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/examples/server.rs s390-tools-2.33.1/rust-vendor/h2/examples/server.rs --- s390-tools-2.31.0/rust-vendor/h2/examples/server.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/examples/server.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,65 +0,0 @@ -use std::error::Error; - -use bytes::Bytes; -use h2::server::{self, SendResponse}; -use h2::RecvStream; -use http::Request; -use tokio::net::{TcpListener, TcpStream}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - let _ = env_logger::try_init(); - - let listener = TcpListener::bind("127.0.0.1:5928").await?; - - println!("listening on {:?}", listener.local_addr()); - - loop { - if let Ok((socket, _peer_addr)) = listener.accept().await { - tokio::spawn(async move { - if let Err(e) = serve(socket).await { - println!(" -> err={:?}", e); - } - }); - } - } -} - -async fn serve(socket: TcpStream) -> Result<(), Box> { - let mut connection = server::handshake(socket).await?; - println!("H2 connection bound"); - - while let Some(result) = connection.accept().await { - let (request, respond) = result?; - tokio::spawn(async move { - if let Err(e) = handle_request(request, respond).await { - println!("error while handling request: {}", e); - } - }); - } - - println!("~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~"); - Ok(()) -} - -async fn handle_request( - mut request: Request, - mut respond: SendResponse, -) -> Result<(), Box> { - println!("GOT request: {:?}", request); - - let body = request.body_mut(); - while let Some(data) = body.data().await { - let data = data?; - println!("<<<< recv {:?}", data); - let _ = body.flow_control().release_capacity(data.len()); - } - - let response = http::Response::new(()); - let mut send = respond.send_response(response, false)?; - println!(">>>> send"); - send.send_data(Bytes::from_static(b"hello "), false)?; - send.send_data(Bytes::from_static(b"world\n"), true)?; - - Ok(()) -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/LICENSE s390-tools-2.33.1/rust-vendor/h2/LICENSE --- s390-tools-2.31.0/rust-vendor/h2/LICENSE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2017 h2 authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/h2/README.md s390-tools-2.33.1/rust-vendor/h2/README.md --- s390-tools-2.31.0/rust-vendor/h2/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,71 +0,0 @@ -# H2 - -A Tokio aware, HTTP/2 client & server implementation for Rust. - -[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) -[![Crates.io](https://img.shields.io/crates/v/h2.svg)](https://crates.io/crates/h2) -[![Documentation](https://docs.rs/h2/badge.svg)][dox] - -More information about this crate can be found in the [crate documentation][dox]. - -[dox]: https://docs.rs/h2 - -## Features - -* Client and server HTTP/2 implementation. -* Implements the full HTTP/2 specification. -* Passes [h2spec](https://github.com/summerwind/h2spec). -* Focus on performance and correctness. -* Built on [Tokio](https://tokio.rs). - -## Non goals - -This crate is intended to only be an implementation of the HTTP/2 -specification. It does not handle: - -* Managing TCP connections -* HTTP 1.0 upgrade -* TLS -* Any feature not described by the HTTP/2 specification. - -This crate is now used by [hyper](https://github.com/hyperium/hyper), which will provide all of these features. - -## Usage - -To use `h2`, first add this to your `Cargo.toml`: - -```toml -[dependencies] -h2 = "0.3" -``` - -Next, add this to your crate: - -```rust -extern crate h2; - -use h2::server::Connection; - -fn main() { - // ... -} -``` - -## FAQ - -**How does h2 compare to [solicit] or [rust-http2]?** - -The h2 library has implemented more of the details of the HTTP/2 specification -than any other Rust library. It also passes the [h2spec] set of tests. The h2 -library is rapidly approaching "production ready" quality. - -Besides the above, Solicit is built on blocking I/O and does not appear to be -actively maintained. - -**Is this an embedded Java SQL database engine?** - -[No](https://www.h2database.com). - -[solicit]: https://github.com/mlalic/solicit -[rust-http2]: https://github.com/stepancheg/rust-http2 -[h2spec]: https://github.com/summerwind/h2spec diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/client.rs s390-tools-2.33.1/rust-vendor/h2/src/client.rs --- s390-tools-2.31.0/rust-vendor/h2/src/client.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/client.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1639 +0,0 @@ -//! Client implementation of the HTTP/2 protocol. -//! -//! # Getting started -//! -//! Running an HTTP/2 client requires the caller to establish the underlying -//! connection as well as get the connection to a state that is ready to begin -//! the HTTP/2 handshake. See [here](../index.html#handshake) for more -//! details. -//! -//! This could be as basic as using Tokio's [`TcpStream`] to connect to a remote -//! host, but usually it means using either ALPN or HTTP/1.1 protocol upgrades. -//! -//! Once a connection is obtained, it is passed to [`handshake`], which will -//! begin the [HTTP/2 handshake]. This returns a future that completes once -//! the handshake process is performed and HTTP/2 streams may be initialized. -//! -//! [`handshake`] uses default configuration values. There are a number of -//! settings that can be changed by using [`Builder`] instead. -//! -//! Once the handshake future completes, the caller is provided with a -//! [`Connection`] instance and a [`SendRequest`] instance. The [`Connection`] -//! instance is used to drive the connection (see [Managing the connection]). -//! The [`SendRequest`] instance is used to initialize new streams (see [Making -//! requests]). -//! -//! # Making requests -//! -//! Requests are made using the [`SendRequest`] handle provided by the handshake -//! future. Once a request is submitted, an HTTP/2 stream is initialized and -//! the request is sent to the server. -//! -//! A request body and request trailers are sent using [`SendRequest`] and the -//! server's response is returned once the [`ResponseFuture`] future completes. -//! Both the [`SendStream`] and [`ResponseFuture`] instances are returned by -//! [`SendRequest::send_request`] and are tied to the HTTP/2 stream -//! initialized by the sent request. -//! -//! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2 -//! stream can be created, i.e. as long as the current number of active streams -//! is below [`MAX_CONCURRENT_STREAMS`]. If a new stream cannot be created, the -//! caller will be notified once an existing stream closes, freeing capacity for -//! the caller. The caller should use [`SendRequest::poll_ready`] to check for -//! capacity before sending a request to the server. -//! -//! [`SendRequest`] enforces the [`MAX_CONCURRENT_STREAMS`] setting. The user -//! must not send a request if `poll_ready` does not return `Ready`. Attempting -//! to do so will result in an [`Error`] being returned. -//! -//! # Managing the connection -//! -//! The [`Connection`] instance is used to manage connection state. The caller -//! is required to call [`Connection::poll`] in order to advance state. -//! [`SendRequest::send_request`] and other functions have no effect unless -//! [`Connection::poll`] is called. -//! -//! The [`Connection`] instance should only be dropped once [`Connection::poll`] -//! returns `Ready`. At this point, the underlying socket has been closed and no -//! further work needs to be done. -//! -//! The easiest way to ensure that the [`Connection`] instance gets polled is to -//! submit the [`Connection`] instance to an [executor]. The executor will then -//! manage polling the connection until the connection is complete. -//! Alternatively, the caller can call `poll` manually. -//! -//! # Example -//! -//! ```rust, no_run -//! -//! use h2::client; -//! -//! use http::{Request, Method}; -//! use std::error::Error; -//! use tokio::net::TcpStream; -//! -//! #[tokio::main] -//! pub async fn main() -> Result<(), Box> { -//! // Establish TCP connection to the server. -//! let tcp = TcpStream::connect("127.0.0.1:5928").await?; -//! let (h2, connection) = client::handshake(tcp).await?; -//! tokio::spawn(async move { -//! connection.await.unwrap(); -//! }); -//! -//! let mut h2 = h2.ready().await?; -//! // Prepare the HTTP request to send to the server. -//! let request = Request::builder() -//! .method(Method::GET) -//! .uri("https://www.example.com/") -//! .body(()) -//! .unwrap(); -//! -//! // Send the request. The second tuple item allows the caller -//! // to stream a request body. -//! let (response, _) = h2.send_request(request, true).unwrap(); -//! -//! let (head, mut body) = response.await?.into_parts(); -//! -//! println!("Received response: {:?}", head); -//! -//! // The `flow_control` handle allows the caller to manage -//! // flow control. -//! // -//! // Whenever data is received, the caller is responsible for -//! // releasing capacity back to the server once it has freed -//! // the data from memory. -//! let mut flow_control = body.flow_control().clone(); -//! -//! while let Some(chunk) = body.data().await { -//! let chunk = chunk?; -//! println!("RX: {:?}", chunk); -//! -//! // Let the server send more data. -//! let _ = flow_control.release_capacity(chunk.len()); -//! } -//! -//! Ok(()) -//! } -//! ``` -//! -//! [`TcpStream`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpStream.html -//! [`handshake`]: fn.handshake.html -//! [executor]: https://docs.rs/futures/0.1/futures/future/trait.Executor.html -//! [`SendRequest`]: struct.SendRequest.html -//! [`SendStream`]: ../struct.SendStream.html -//! [Making requests]: #making-requests -//! [Managing the connection]: #managing-the-connection -//! [`Connection`]: struct.Connection.html -//! [`Connection::poll`]: struct.Connection.html#method.poll -//! [`SendRequest::send_request`]: struct.SendRequest.html#method.send_request -//! [`MAX_CONCURRENT_STREAMS`]: http://httpwg.org/specs/rfc7540.html#SettingValues -//! [`SendRequest`]: struct.SendRequest.html -//! [`ResponseFuture`]: struct.ResponseFuture.html -//! [`SendRequest::poll_ready`]: struct.SendRequest.html#method.poll_ready -//! [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader -//! [`Builder`]: struct.Builder.html -//! [`Error`]: ../struct.Error.html - -use crate::codec::{Codec, SendError, UserError}; -use crate::ext::Protocol; -use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId}; -use crate::proto::{self, Error}; -use crate::{FlowControl, PingPong, RecvStream, SendStream}; - -use bytes::{Buf, Bytes}; -use http::{uri, HeaderMap, Method, Request, Response, Version}; -use std::fmt; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::usize; -use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use tracing::Instrument; - -/// Initializes new HTTP/2 streams on a connection by sending a request. -/// -/// This type does no work itself. Instead, it is a handle to the inner -/// connection state held by [`Connection`]. If the associated connection -/// instance is dropped, all `SendRequest` functions will return [`Error`]. -/// -/// [`SendRequest`] instances are able to move to and operate on separate tasks -/// / threads than their associated [`Connection`] instance. Internally, there -/// is a buffer used to stage requests before they get written to the -/// connection. There is no guarantee that requests get written to the -/// connection in FIFO order as HTTP/2 prioritization logic can play a role. -/// -/// [`SendRequest`] implements [`Clone`], enabling the creation of many -/// instances that are backed by a single connection. -/// -/// See [module] level documentation for more details. -/// -/// [module]: index.html -/// [`Connection`]: struct.Connection.html -/// [`Clone`]: https://doc.rust-lang.org/std/clone/trait.Clone.html -/// [`Error`]: ../struct.Error.html -pub struct SendRequest { - inner: proto::Streams, - pending: Option, -} - -/// Returns a `SendRequest` instance once it is ready to send at least one -/// request. -#[derive(Debug)] -pub struct ReadySendRequest { - inner: Option>, -} - -/// Manages all state associated with an HTTP/2 client connection. -/// -/// A `Connection` is backed by an I/O resource (usually a TCP socket) and -/// implements the HTTP/2 client logic for that connection. It is responsible -/// for driving the internal state forward, performing the work requested of the -/// associated handles ([`SendRequest`], [`ResponseFuture`], [`SendStream`], -/// [`RecvStream`]). -/// -/// `Connection` values are created by calling [`handshake`]. Once a -/// `Connection` value is obtained, the caller must repeatedly call [`poll`] -/// until `Ready` is returned. The easiest way to do this is to submit the -/// `Connection` instance to an [executor]. -/// -/// [module]: index.html -/// [`handshake`]: fn.handshake.html -/// [`SendRequest`]: struct.SendRequest.html -/// [`ResponseFuture`]: struct.ResponseFuture.html -/// [`SendStream`]: ../struct.SendStream.html -/// [`RecvStream`]: ../struct.RecvStream.html -/// [`poll`]: #method.poll -/// [executor]: https://docs.rs/futures/0.1/futures/future/trait.Executor.html -/// -/// # Examples -/// -/// ``` -/// # use tokio::io::{AsyncRead, AsyncWrite}; -/// # use h2::client; -/// # use h2::client::*; -/// # -/// # async fn doc(my_io: T) -> Result<(), h2::Error> -/// # where T: AsyncRead + AsyncWrite + Send + Unpin + 'static, -/// # { -/// let (send_request, connection) = client::handshake(my_io).await?; -/// // Submit the connection handle to an executor. -/// tokio::spawn(async { connection.await.expect("connection failed"); }); -/// -/// // Now, use `send_request` to initialize HTTP/2 streams. -/// // ... -/// # Ok(()) -/// # } -/// # -/// # pub fn main() {} -/// ``` -#[must_use = "futures do nothing unless polled"] -pub struct Connection { - inner: proto::Connection, -} - -/// A future of an HTTP response. -#[derive(Debug)] -#[must_use = "futures do nothing unless polled"] -pub struct ResponseFuture { - inner: proto::OpaqueStreamRef, - push_promise_consumed: bool, -} - -/// A future of a pushed HTTP response. -/// -/// We have to differentiate between pushed and non pushed because of the spec -/// -/// > PUSH_PROMISE frames MUST only be sent on a peer-initiated stream -/// > that is in either the "open" or "half-closed (remote)" state. -#[derive(Debug)] -#[must_use = "futures do nothing unless polled"] -pub struct PushedResponseFuture { - inner: ResponseFuture, -} - -/// A pushed response and corresponding request headers -#[derive(Debug)] -pub struct PushPromise { - /// The request headers - request: Request<()>, - - /// The pushed response - response: PushedResponseFuture, -} - -/// A stream of pushed responses and corresponding promised requests -#[derive(Debug)] -#[must_use = "streams do nothing unless polled"] -pub struct PushPromises { - inner: proto::OpaqueStreamRef, -} - -/// Builds client connections with custom configuration values. -/// -/// Methods can be chained in order to set the configuration values. -/// -/// The client is constructed by calling [`handshake`] and passing the I/O -/// handle that will back the HTTP/2 server. -/// -/// New instances of `Builder` are obtained via [`Builder::new`]. -/// -/// See function level documentation for details on the various client -/// configuration settings. -/// -/// [`Builder::new`]: struct.Builder.html#method.new -/// [`handshake`]: struct.Builder.html#method.handshake -/// -/// # Examples -/// -/// ``` -/// # use tokio::io::{AsyncRead, AsyncWrite}; -/// # use h2::client::*; -/// # use bytes::Bytes; -/// # -/// # async fn doc(my_io: T) -/// -> Result<((SendRequest, Connection)), h2::Error> -/// # { -/// // `client_fut` is a future representing the completion of the HTTP/2 -/// // handshake. -/// let client_fut = Builder::new() -/// .initial_window_size(1_000_000) -/// .max_concurrent_streams(1000) -/// .handshake(my_io); -/// # client_fut.await -/// # } -/// # -/// # pub fn main() {} -/// ``` -#[derive(Clone, Debug)] -pub struct Builder { - /// Time to keep locally reset streams around before reaping. - reset_stream_duration: Duration, - - /// Initial maximum number of locally initiated (send) streams. - /// After receiving a Settings frame from the remote peer, - /// the connection will overwrite this value with the - /// MAX_CONCURRENT_STREAMS specified in the frame. - initial_max_send_streams: usize, - - /// Initial target window size for new connections. - initial_target_connection_window_size: Option, - - /// Maximum amount of bytes to "buffer" for writing per stream. - max_send_buffer_size: usize, - - /// Maximum number of locally reset streams to keep at a time. - reset_stream_max: usize, - - /// Maximum number of remotely reset streams to allow in the pending - /// accept queue. - pending_accept_reset_stream_max: usize, - - /// Initial `Settings` frame to send as part of the handshake. - settings: Settings, - - /// The stream ID of the first (lowest) stream. Subsequent streams will use - /// monotonically increasing stream IDs. - stream_id: StreamId, -} - -#[derive(Debug)] -pub(crate) struct Peer; - -// ===== impl SendRequest ===== - -impl SendRequest -where - B: Buf, -{ - /// Returns `Ready` when the connection can initialize a new HTTP/2 - /// stream. - /// - /// This function must return `Ready` before `send_request` is called. When - /// `Poll::Pending` is returned, the task will be notified once the readiness - /// state changes. - /// - /// See [module] level docs for more details. - /// - /// [module]: index.html - pub fn poll_ready(&mut self, cx: &mut Context) -> Poll> { - ready!(self.inner.poll_pending_open(cx, self.pending.as_ref()))?; - self.pending = None; - Poll::Ready(Ok(())) - } - - /// Consumes `self`, returning a future that returns `self` back once it is - /// ready to send a request. - /// - /// This function should be called before calling `send_request`. - /// - /// This is a functional combinator for [`poll_ready`]. The returned future - /// will call `SendStream::poll_ready` until `Ready`, then returns `self` to - /// the caller. - /// - /// # Examples - /// - /// ```rust - /// # use h2::client::*; - /// # use http::*; - /// # async fn doc(send_request: SendRequest<&'static [u8]>) - /// # { - /// // First, wait until the `send_request` handle is ready to send a new - /// // request - /// let mut send_request = send_request.ready().await.unwrap(); - /// // Use `send_request` here. - /// # } - /// # pub fn main() {} - /// ``` - /// - /// See [module] level docs for more details. - /// - /// [`poll_ready`]: #method.poll_ready - /// [module]: index.html - pub fn ready(self) -> ReadySendRequest { - ReadySendRequest { inner: Some(self) } - } - - /// Sends a HTTP/2 request to the server. - /// - /// `send_request` initializes a new HTTP/2 stream on the associated - /// connection, then sends the given request using this new stream. Only the - /// request head is sent. - /// - /// On success, a [`ResponseFuture`] instance and [`SendStream`] instance - /// are returned. The [`ResponseFuture`] instance is used to get the - /// server's response and the [`SendStream`] instance is used to send a - /// request body or trailers to the server over the same HTTP/2 stream. - /// - /// To send a request body or trailers, set `end_of_stream` to `false`. - /// Then, use the returned [`SendStream`] instance to stream request body - /// chunks or send trailers. If `end_of_stream` is **not** set to `false` - /// then attempting to call [`SendStream::send_data`] or - /// [`SendStream::send_trailers`] will result in an error. - /// - /// If no request body or trailers are to be sent, set `end_of_stream` to - /// `true` and drop the returned [`SendStream`] instance. - /// - /// # A note on HTTP versions - /// - /// The provided `Request` will be encoded differently depending on the - /// value of its version field. If the version is set to 2.0, then the - /// request is encoded as per the specification recommends. - /// - /// If the version is set to a lower value, then the request is encoded to - /// preserve the characteristics of HTTP 1.1 and lower. Specifically, host - /// headers are permitted and the `:authority` pseudo header is not - /// included. - /// - /// The caller should always set the request's version field to 2.0 unless - /// specifically transmitting an HTTP 1.1 request over 2.0. - /// - /// # Examples - /// - /// Sending a request with no body - /// - /// ```rust - /// # use h2::client::*; - /// # use http::*; - /// # async fn doc(send_request: SendRequest<&'static [u8]>) - /// # { - /// // First, wait until the `send_request` handle is ready to send a new - /// // request - /// let mut send_request = send_request.ready().await.unwrap(); - /// // Prepare the HTTP request to send to the server. - /// let request = Request::get("https://www.example.com/") - /// .body(()) - /// .unwrap(); - /// - /// // Send the request to the server. Since we are not sending a - /// // body or trailers, we can drop the `SendStream` instance. - /// let (response, _) = send_request.send_request(request, true).unwrap(); - /// let response = response.await.unwrap(); - /// // Process the response - /// # } - /// # pub fn main() {} - /// ``` - /// - /// Sending a request with a body and trailers - /// - /// ```rust - /// # use h2::client::*; - /// # use http::*; - /// # async fn doc(send_request: SendRequest<&'static [u8]>) - /// # { - /// // First, wait until the `send_request` handle is ready to send a new - /// // request - /// let mut send_request = send_request.ready().await.unwrap(); - /// - /// // Prepare the HTTP request to send to the server. - /// let request = Request::get("https://www.example.com/") - /// .body(()) - /// .unwrap(); - /// - /// // Send the request to the server. If we are not sending a - /// // body or trailers, we can drop the `SendStream` instance. - /// let (response, mut send_stream) = send_request - /// .send_request(request, false).unwrap(); - /// - /// // At this point, one option would be to wait for send capacity. - /// // Doing so would allow us to not hold data in memory that - /// // cannot be sent. However, this is not a requirement, so this - /// // example will skip that step. See `SendStream` documentation - /// // for more details. - /// send_stream.send_data(b"hello", false).unwrap(); - /// send_stream.send_data(b"world", false).unwrap(); - /// - /// // Send the trailers. - /// let mut trailers = HeaderMap::new(); - /// trailers.insert( - /// header::HeaderName::from_bytes(b"my-trailer").unwrap(), - /// header::HeaderValue::from_bytes(b"hello").unwrap()); - /// - /// send_stream.send_trailers(trailers).unwrap(); - /// - /// let response = response.await.unwrap(); - /// // Process the response - /// # } - /// # pub fn main() {} - /// ``` - /// - /// [`ResponseFuture`]: struct.ResponseFuture.html - /// [`SendStream`]: ../struct.SendStream.html - /// [`SendStream::send_data`]: ../struct.SendStream.html#method.send_data - /// [`SendStream::send_trailers`]: ../struct.SendStream.html#method.send_trailers - pub fn send_request( - &mut self, - request: Request<()>, - end_of_stream: bool, - ) -> Result<(ResponseFuture, SendStream), crate::Error> { - self.inner - .send_request(request, end_of_stream, self.pending.as_ref()) - .map_err(Into::into) - .map(|(stream, is_full)| { - if stream.is_pending_open() && is_full { - // Only prevent sending another request when the request queue - // is not full. - self.pending = Some(stream.clone_to_opaque()); - } - - let response = ResponseFuture { - inner: stream.clone_to_opaque(), - push_promise_consumed: false, - }; - - let stream = SendStream::new(stream); - - (response, stream) - }) - } - - /// Returns whether the [extended CONNECT protocol][1] is enabled or not. - /// - /// This setting is configured by the server peer by sending the - /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame. - /// This method returns the currently acknowledged value received from the - /// remote. - /// - /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 - /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3 - pub fn is_extended_connect_protocol_enabled(&self) -> bool { - self.inner.is_extended_connect_protocol_enabled() - } -} - -impl fmt::Debug for SendRequest -where - B: Buf, -{ - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("SendRequest").finish() - } -} - -impl Clone for SendRequest -where - B: Buf, -{ - fn clone(&self) -> Self { - SendRequest { - inner: self.inner.clone(), - pending: None, - } - } -} - -#[cfg(feature = "unstable")] -impl SendRequest -where - B: Buf, -{ - /// Returns the number of active streams. - /// - /// An active stream is a stream that has not yet transitioned to a closed - /// state. - pub fn num_active_streams(&self) -> usize { - self.inner.num_active_streams() - } - - /// Returns the number of streams that are held in memory. - /// - /// A wired stream is a stream that is either active or is closed but must - /// stay in memory for some reason. For example, there are still outstanding - /// userspace handles pointing to the slot. - pub fn num_wired_streams(&self) -> usize { - self.inner.num_wired_streams() - } -} - -// ===== impl ReadySendRequest ===== - -impl Future for ReadySendRequest -where - B: Buf, -{ - type Output = Result, crate::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match &mut self.inner { - Some(send_request) => { - ready!(send_request.poll_ready(cx))?; - } - None => panic!("called `poll` after future completed"), - } - - Poll::Ready(Ok(self.inner.take().unwrap())) - } -} - -// ===== impl Builder ===== - -impl Builder { - /// Returns a new client builder instance initialized with default - /// configuration values. - /// - /// Configuration methods can be chained on the return value. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .initial_window_size(1_000_000) - /// .max_concurrent_streams(1000) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn new() -> Builder { - Builder { - max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, - reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), - reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, - pending_accept_reset_stream_max: proto::DEFAULT_REMOTE_RESET_STREAM_MAX, - initial_target_connection_window_size: None, - initial_max_send_streams: usize::MAX, - settings: Default::default(), - stream_id: 1.into(), - } - } - - /// Indicates the initial window size (in octets) for stream-level - /// flow control for received data. - /// - /// The initial window of a stream is used as part of flow control. For more - /// details, see [`FlowControl`]. - /// - /// The default value is 65,535. - /// - /// [`FlowControl`]: ../struct.FlowControl.html - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .initial_window_size(1_000_000) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn initial_window_size(&mut self, size: u32) -> &mut Self { - self.settings.set_initial_window_size(Some(size)); - self - } - - /// Indicates the initial window size (in octets) for connection-level flow control - /// for received data. - /// - /// The initial window of a connection is used as part of flow control. For more details, - /// see [`FlowControl`]. - /// - /// The default value is 65,535. - /// - /// [`FlowControl`]: ../struct.FlowControl.html - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .initial_connection_window_size(1_000_000) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn initial_connection_window_size(&mut self, size: u32) -> &mut Self { - self.initial_target_connection_window_size = Some(size); - self - } - - /// Indicates the size (in octets) of the largest HTTP/2 frame payload that the - /// configured client is able to accept. - /// - /// The sender may send data frames that are **smaller** than this value, - /// but any data larger than `max` will be broken up into multiple `DATA` - /// frames. - /// - /// The value **must** be between 16,384 and 16,777,215. The default value is 16,384. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .max_frame_size(1_000_000) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - /// - /// # Panics - /// - /// This function panics if `max` is not within the legal range specified - /// above. - pub fn max_frame_size(&mut self, max: u32) -> &mut Self { - self.settings.set_max_frame_size(Some(max)); - self - } - - /// Sets the max size of received header frames. - /// - /// This advisory setting informs a peer of the maximum size of header list - /// that the sender is prepared to accept, in octets. The value is based on - /// the uncompressed size of header fields, including the length of the name - /// and value in octets plus an overhead of 32 octets for each header field. - /// - /// This setting is also used to limit the maximum amount of data that is - /// buffered to decode HEADERS frames. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .max_header_list_size(16 * 1024) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { - self.settings.set_max_header_list_size(Some(max)); - self - } - - /// Sets the maximum number of concurrent streams. - /// - /// The maximum concurrent streams setting only controls the maximum number - /// of streams that can be initiated by the remote peer. In other words, - /// when this setting is set to 100, this does not limit the number of - /// concurrent streams that can be created by the caller. - /// - /// It is recommended that this value be no smaller than 100, so as to not - /// unnecessarily limit parallelism. However, any value is legal, including - /// 0. If `max` is set to 0, then the remote will not be permitted to - /// initiate streams. - /// - /// Note that streams in the reserved state, i.e., push promises that have - /// been reserved but the stream has not started, do not count against this - /// setting. - /// - /// Also note that if the remote *does* exceed the value set here, it is not - /// a protocol level error. Instead, the `h2` library will immediately reset - /// the stream. - /// - /// See [Section 5.1.2] in the HTTP/2 spec for more details. - /// - /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .max_concurrent_streams(1000) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn max_concurrent_streams(&mut self, max: u32) -> &mut Self { - self.settings.set_max_concurrent_streams(Some(max)); - self - } - - /// Sets the initial maximum of locally initiated (send) streams. - /// - /// The initial settings will be overwritten by the remote peer when - /// the Settings frame is received. The new value will be set to the - /// `max_concurrent_streams()` from the frame. - /// - /// This setting prevents the caller from exceeding this number of - /// streams that are counted towards the concurrency limit. - /// - /// Sending streams past the limit returned by the peer will be treated - /// as a stream error of type PROTOCOL_ERROR or REFUSED_STREAM. - /// - /// See [Section 5.1.2] in the HTTP/2 spec for more details. - /// - /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .initial_max_send_streams(1000) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn initial_max_send_streams(&mut self, initial: usize) -> &mut Self { - self.initial_max_send_streams = initial; - self - } - - /// Sets the maximum number of concurrent locally reset streams. - /// - /// When a stream is explicitly reset, the HTTP/2 specification requires - /// that any further frames received for that stream must be ignored for - /// "some time". - /// - /// In order to satisfy the specification, internal state must be maintained - /// to implement the behavior. This state grows linearly with the number of - /// streams that are locally reset. - /// - /// The `max_concurrent_reset_streams` setting configures sets an upper - /// bound on the amount of state that is maintained. When this max value is - /// reached, the oldest reset stream is purged from memory. - /// - /// Once the stream has been fully purged from memory, any additional frames - /// received for that stream will result in a connection level protocol - /// error, forcing the connection to terminate. - /// - /// The default value is 10. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .max_concurrent_reset_streams(1000) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { - self.reset_stream_max = max; - self - } - - /// Sets the duration to remember locally reset streams. - /// - /// When a stream is explicitly reset, the HTTP/2 specification requires - /// that any further frames received for that stream must be ignored for - /// "some time". - /// - /// In order to satisfy the specification, internal state must be maintained - /// to implement the behavior. This state grows linearly with the number of - /// streams that are locally reset. - /// - /// The `reset_stream_duration` setting configures the max amount of time - /// this state will be maintained in memory. Once the duration elapses, the - /// stream state is purged from memory. - /// - /// Once the stream has been fully purged from memory, any additional frames - /// received for that stream will result in a connection level protocol - /// error, forcing the connection to terminate. - /// - /// The default value is 30 seconds. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use std::time::Duration; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .reset_stream_duration(Duration::from_secs(10)) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn reset_stream_duration(&mut self, dur: Duration) -> &mut Self { - self.reset_stream_duration = dur; - self - } - - /// Sets the maximum number of pending-accept remotely-reset streams. - /// - /// Streams that have been received by the peer, but not accepted by the - /// user, can also receive a RST_STREAM. This is a legitimate pattern: one - /// could send a request and then shortly after, realize it is not needed, - /// sending a CANCEL. - /// - /// However, since those streams are now "closed", they don't count towards - /// the max concurrent streams. So, they will sit in the accept queue, - /// using memory. - /// - /// When the number of remotely-reset streams sitting in the pending-accept - /// queue reaches this maximum value, a connection error with the code of - /// `ENHANCE_YOUR_CALM` will be sent to the peer, and returned by the - /// `Future`. - /// - /// The default value is currently 20, but could change. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .max_pending_accept_reset_streams(100) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn max_pending_accept_reset_streams(&mut self, max: usize) -> &mut Self { - self.pending_accept_reset_stream_max = max; - self - } - - /// Sets the maximum send buffer size per stream. - /// - /// Once a stream has buffered up to (or over) the maximum, the stream's - /// flow control will not "poll" additional capacity. Once bytes for the - /// stream have been written to the connection, the send buffer capacity - /// will be freed up again. - /// - /// The default is currently ~400KB, but may change. - /// - /// # Panics - /// - /// This function panics if `max` is larger than `u32::MAX`. - pub fn max_send_buffer_size(&mut self, max: usize) -> &mut Self { - assert!(max <= std::u32::MAX as usize); - self.max_send_buffer_size = max; - self - } - - /// Enables or disables server push promises. - /// - /// This value is included in the initial SETTINGS handshake. - /// Setting this value to value to - /// false in the initial SETTINGS handshake guarantees that the remote server - /// will never send a push promise. - /// - /// This setting can be changed during the life of a single HTTP/2 - /// connection by sending another settings frame updating the value. - /// - /// Default value: `true`. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use std::time::Duration; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .enable_push(false) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn enable_push(&mut self, enabled: bool) -> &mut Self { - self.settings.set_enable_push(enabled); - self - } - - /// Sets the header table size. - /// - /// This setting informs the peer of the maximum size of the header compression - /// table used to encode header blocks, in octets. The encoder may select any value - /// equal to or less than the header table size specified by the sender. - /// - /// The default value is 4,096. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .header_table_size(1_000_000) - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn header_table_size(&mut self, size: u32) -> &mut Self { - self.settings.set_header_table_size(Some(size)); - self - } - - /// Sets the first stream ID to something other than 1. - #[cfg(feature = "unstable")] - pub fn initial_stream_id(&mut self, stream_id: u32) -> &mut Self { - self.stream_id = stream_id.into(); - assert!( - self.stream_id.is_client_initiated(), - "stream id must be odd" - ); - self - } - - /// Creates a new configured HTTP/2 client backed by `io`. - /// - /// It is expected that `io` already be in an appropriate state to commence - /// the [HTTP/2 handshake]. The handshake is completed once both the connection - /// preface and the initial settings frame is sent by the client. - /// - /// The handshake future does not wait for the initial settings frame from the - /// server. - /// - /// Returns a future which resolves to the [`Connection`] / [`SendRequest`] - /// tuple once the HTTP/2 handshake has been completed. - /// - /// This function also allows the caller to configure the send payload data - /// type. See [Outbound data type] for more details. - /// - /// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader - /// [`Connection`]: struct.Connection.html - /// [`SendRequest`]: struct.SendRequest.html - /// [Outbound data type]: ../index.html#outbound-data-type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # use bytes::Bytes; - /// # - /// # async fn doc(my_io: T) - /// -> Result<((SendRequest, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .handshake(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - /// - /// Configures the send-payload data type. In this case, the outbound data - /// type will be `&'static [u8]`. - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::client::*; - /// # - /// # async fn doc(my_io: T) - /// # -> Result<((SendRequest<&'static [u8]>, Connection)), h2::Error> - /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let client_fut = Builder::new() - /// .handshake::<_, &'static [u8]>(my_io); - /// # client_fut.await - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn handshake( - &self, - io: T, - ) -> impl Future, Connection), crate::Error>> - where - T: AsyncRead + AsyncWrite + Unpin, - B: Buf, - { - Connection::handshake2(io, self.clone()) - } -} - -impl Default for Builder { - fn default() -> Builder { - Builder::new() - } -} - -/// Creates a new configured HTTP/2 client with default configuration -/// values backed by `io`. -/// -/// It is expected that `io` already be in an appropriate state to commence -/// the [HTTP/2 handshake]. See [Handshake] for more details. -/// -/// Returns a future which resolves to the [`Connection`] / [`SendRequest`] -/// tuple once the HTTP/2 handshake has been completed. The returned -/// [`Connection`] instance will be using default configuration values. Use -/// [`Builder`] to customize the configuration values used by a [`Connection`] -/// instance. -/// -/// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader -/// [Handshake]: ../index.html#handshake -/// [`Connection`]: struct.Connection.html -/// [`SendRequest`]: struct.SendRequest.html -/// -/// # Examples -/// -/// ``` -/// # use tokio::io::{AsyncRead, AsyncWrite}; -/// # use h2::client; -/// # use h2::client::*; -/// # -/// # async fn doc(my_io: T) -> Result<(), h2::Error> -/// # { -/// let (send_request, connection) = client::handshake(my_io).await?; -/// // The HTTP/2 handshake has completed, now start polling -/// // `connection` and use `send_request` to send requests to the -/// // server. -/// # Ok(()) -/// # } -/// # -/// # pub fn main() {} -/// ``` -pub async fn handshake(io: T) -> Result<(SendRequest, Connection), crate::Error> -where - T: AsyncRead + AsyncWrite + Unpin, -{ - let builder = Builder::new(); - builder - .handshake(io) - .instrument(tracing::trace_span!("client_handshake")) - .await -} - -// ===== impl Connection ===== - -async fn bind_connection(io: &mut T) -> Result<(), crate::Error> -where - T: AsyncRead + AsyncWrite + Unpin, -{ - tracing::debug!("binding client connection"); - - let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; - io.write_all(msg).await.map_err(crate::Error::from_io)?; - - tracing::debug!("client connection bound"); - - Ok(()) -} - -impl Connection -where - T: AsyncRead + AsyncWrite + Unpin, - B: Buf, -{ - async fn handshake2( - mut io: T, - builder: Builder, - ) -> Result<(SendRequest, Connection), crate::Error> { - bind_connection(&mut io).await?; - - // Create the codec - let mut codec = Codec::new(io); - - if let Some(max) = builder.settings.max_frame_size() { - codec.set_max_recv_frame_size(max as usize); - } - - if let Some(max) = builder.settings.max_header_list_size() { - codec.set_max_recv_header_list_size(max as usize); - } - - // Send initial settings frame - codec - .buffer(builder.settings.clone().into()) - .expect("invalid SETTINGS frame"); - - let inner = proto::Connection::new( - codec, - proto::Config { - next_stream_id: builder.stream_id, - initial_max_send_streams: builder.initial_max_send_streams, - max_send_buffer_size: builder.max_send_buffer_size, - reset_stream_duration: builder.reset_stream_duration, - reset_stream_max: builder.reset_stream_max, - remote_reset_stream_max: builder.pending_accept_reset_stream_max, - settings: builder.settings.clone(), - }, - ); - let send_request = SendRequest { - inner: inner.streams().clone(), - pending: None, - }; - - let mut connection = Connection { inner }; - if let Some(sz) = builder.initial_target_connection_window_size { - connection.set_target_window_size(sz); - } - - Ok((send_request, connection)) - } - - /// Sets the target window size for the whole connection. - /// - /// If `size` is greater than the current value, then a `WINDOW_UPDATE` - /// frame will be immediately sent to the remote, increasing the connection - /// level window by `size - current_value`. - /// - /// If `size` is less than the current value, nothing will happen - /// immediately. However, as window capacity is released by - /// [`FlowControl`] instances, no `WINDOW_UPDATE` frames will be sent - /// out until the number of "in flight" bytes drops below `size`. - /// - /// The default value is 65,535. - /// - /// See [`FlowControl`] documentation for more details. - /// - /// [`FlowControl`]: ../struct.FlowControl.html - /// [library level]: ../index.html#flow-control - pub fn set_target_window_size(&mut self, size: u32) { - assert!(size <= proto::MAX_WINDOW_SIZE); - self.inner.set_target_window_size(size); - } - - /// Set a new `INITIAL_WINDOW_SIZE` setting (in octets) for stream-level - /// flow control for received data. - /// - /// The `SETTINGS` will be sent to the remote, and only applied once the - /// remote acknowledges the change. - /// - /// This can be used to increase or decrease the window size for existing - /// streams. - /// - /// # Errors - /// - /// Returns an error if a previous call is still pending acknowledgement - /// from the remote endpoint. - pub fn set_initial_window_size(&mut self, size: u32) -> Result<(), crate::Error> { - assert!(size <= proto::MAX_WINDOW_SIZE); - self.inner.set_initial_window_size(size)?; - Ok(()) - } - - /// Takes a `PingPong` instance from the connection. - /// - /// # Note - /// - /// This may only be called once. Calling multiple times will return `None`. - pub fn ping_pong(&mut self) -> Option { - self.inner.take_user_pings().map(PingPong::new) - } - - /// Returns the maximum number of concurrent streams that may be initiated - /// by this client. - /// - /// This limit is configured by the server peer by sending the - /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame. - /// This method returns the currently acknowledged value received from the - /// remote. - /// - /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 - pub fn max_concurrent_send_streams(&self) -> usize { - self.inner.max_send_streams() - } - /// Returns the maximum number of concurrent streams that may be initiated - /// by the server on this connection. - /// - /// This returns the value of the [`SETTINGS_MAX_CONCURRENT_STREAMS` - /// parameter][1] sent in a `SETTINGS` frame that has been - /// acknowledged by the remote peer. The value to be sent is configured by - /// the [`Builder::max_concurrent_streams`][2] method before handshaking - /// with the remote peer. - /// - /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 - /// [2]: ../struct.Builder.html#method.max_concurrent_streams - pub fn max_concurrent_recv_streams(&self) -> usize { - self.inner.max_recv_streams() - } -} - -impl Future for Connection -where - T: AsyncRead + AsyncWrite + Unpin, - B: Buf, -{ - type Output = Result<(), crate::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.inner.maybe_close_connection_if_no_streams(); - self.inner.poll(cx).map_err(Into::into) - } -} - -impl fmt::Debug for Connection -where - T: AsyncRead + AsyncWrite, - T: fmt::Debug, - B: fmt::Debug + Buf, -{ - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&self.inner, fmt) - } -} - -// ===== impl ResponseFuture ===== - -impl Future for ResponseFuture { - type Output = Result, crate::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let (parts, _) = ready!(self.inner.poll_response(cx))?.into_parts(); - let body = RecvStream::new(FlowControl::new(self.inner.clone())); - - Poll::Ready(Ok(Response::from_parts(parts, body))) - } -} - -impl ResponseFuture { - /// Returns the stream ID of the response stream. - /// - /// # Panics - /// - /// If the lock on the stream store has been poisoned. - pub fn stream_id(&self) -> crate::StreamId { - crate::StreamId::from_internal(self.inner.stream_id()) - } - /// Returns a stream of PushPromises - /// - /// # Panics - /// - /// If this method has been called before - /// or the stream was itself was pushed - pub fn push_promises(&mut self) -> PushPromises { - if self.push_promise_consumed { - panic!("Reference to push promises stream taken!"); - } - self.push_promise_consumed = true; - PushPromises { - inner: self.inner.clone(), - } - } -} - -// ===== impl PushPromises ===== - -impl PushPromises { - /// Get the next `PushPromise`. - pub async fn push_promise(&mut self) -> Option> { - futures_util::future::poll_fn(move |cx| self.poll_push_promise(cx)).await - } - - #[doc(hidden)] - pub fn poll_push_promise( - &mut self, - cx: &mut Context<'_>, - ) -> Poll>> { - match self.inner.poll_pushed(cx) { - Poll::Ready(Some(Ok((request, response)))) => { - let response = PushedResponseFuture { - inner: ResponseFuture { - inner: response, - push_promise_consumed: false, - }, - }; - Poll::Ready(Some(Ok(PushPromise { request, response }))) - } - Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.into()))), - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - } - } -} - -#[cfg(feature = "stream")] -impl futures_core::Stream for PushPromises { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_push_promise(cx) - } -} - -// ===== impl PushPromise ===== - -impl PushPromise { - /// Returns a reference to the push promise's request headers. - pub fn request(&self) -> &Request<()> { - &self.request - } - - /// Returns a mutable reference to the push promise's request headers. - pub fn request_mut(&mut self) -> &mut Request<()> { - &mut self.request - } - - /// Consumes `self`, returning the push promise's request headers and - /// response future. - pub fn into_parts(self) -> (Request<()>, PushedResponseFuture) { - (self.request, self.response) - } -} - -// ===== impl PushedResponseFuture ===== - -impl Future for PushedResponseFuture { - type Output = Result, crate::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.inner).poll(cx) - } -} - -impl PushedResponseFuture { - /// Returns the stream ID of the response stream. - /// - /// # Panics - /// - /// If the lock on the stream store has been poisoned. - pub fn stream_id(&self) -> crate::StreamId { - self.inner.stream_id() - } -} - -// ===== impl Peer ===== - -impl Peer { - pub fn convert_send_message( - id: StreamId, - request: Request<()>, - protocol: Option, - end_of_stream: bool, - ) -> Result { - use http::request::Parts; - - let ( - Parts { - method, - uri, - headers, - version, - .. - }, - _, - ) = request.into_parts(); - - let is_connect = method == Method::CONNECT; - - // Build the set pseudo header set. All requests will include `method` - // and `path`. - let mut pseudo = Pseudo::request(method, uri, protocol); - - if pseudo.scheme.is_none() { - // If the scheme is not set, then there are a two options. - // - // 1) Authority is not set. In this case, a request was issued with - // a relative URI. This is permitted **only** when forwarding - // HTTP 1.x requests. If the HTTP version is set to 2.0, then - // this is an error. - // - // 2) Authority is set, then the HTTP method *must* be CONNECT. - // - // It is not possible to have a scheme but not an authority set (the - // `http` crate does not allow it). - // - if pseudo.authority.is_none() { - if version == Version::HTTP_2 { - return Err(UserError::MissingUriSchemeAndAuthority.into()); - } else { - // This is acceptable as per the above comment. However, - // HTTP/2 requires that a scheme is set. Since we are - // forwarding an HTTP 1.1 request, the scheme is set to - // "http". - pseudo.set_scheme(uri::Scheme::HTTP); - } - } else if !is_connect { - // TODO: Error - } - } - - // Create the HEADERS frame - let mut frame = Headers::new(id, pseudo, headers); - - if end_of_stream { - frame.set_end_stream() - } - - Ok(frame) - } -} - -impl proto::Peer for Peer { - type Poll = Response<()>; - - const NAME: &'static str = "Client"; - - fn r#dyn() -> proto::DynPeer { - proto::DynPeer::Client - } - - fn is_server() -> bool { - false - } - - fn convert_poll_message( - pseudo: Pseudo, - fields: HeaderMap, - stream_id: StreamId, - ) -> Result { - let mut b = Response::builder(); - - b = b.version(Version::HTTP_2); - - if let Some(status) = pseudo.status { - b = b.status(status); - } - - let mut response = match b.body(()) { - Ok(response) => response, - Err(_) => { - // TODO: Should there be more specialized handling for different - // kinds of errors - return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR)); - } - }; - - *response.headers_mut() = fields; - - Ok(response) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/codec/error.rs s390-tools-2.33.1/rust-vendor/h2/src/codec/error.rs --- s390-tools-2.31.0/rust-vendor/h2/src/codec/error.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/codec/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,102 +0,0 @@ -use crate::proto::Error; - -use std::{error, fmt, io}; - -/// Errors caused by sending a message -#[derive(Debug)] -pub enum SendError { - Connection(Error), - User(UserError), -} - -/// Errors caused by users of the library -#[derive(Debug)] -pub enum UserError { - /// The stream ID is no longer accepting frames. - InactiveStreamId, - - /// The stream is not currently expecting a frame of this type. - UnexpectedFrameType, - - /// The payload size is too big - PayloadTooBig, - - /// The application attempted to initiate too many streams to remote. - Rejected, - - /// The released capacity is larger than claimed capacity. - ReleaseCapacityTooBig, - - /// The stream ID space is overflowed. - /// - /// A new connection is needed. - OverflowedStreamId, - - /// Illegal headers, such as connection-specific headers. - MalformedHeaders, - - /// Request submitted with relative URI. - MissingUriSchemeAndAuthority, - - /// Calls `SendResponse::poll_reset` after having called `send_response`. - PollResetAfterSendResponse, - - /// Calls `PingPong::send_ping` before receiving a pong. - SendPingWhilePending, - - /// Tries to update local SETTINGS while ACK has not been received. - SendSettingsWhilePending, - - /// Tries to send push promise to peer who has disabled server push - PeerDisabledServerPush, -} - -// ===== impl SendError ===== - -impl error::Error for SendError {} - -impl fmt::Display for SendError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match *self { - Self::Connection(ref e) => e.fmt(fmt), - Self::User(ref e) => e.fmt(fmt), - } - } -} - -impl From for SendError { - fn from(src: io::Error) -> Self { - Self::Connection(src.into()) - } -} - -impl From for SendError { - fn from(src: UserError) -> Self { - SendError::User(src) - } -} - -// ===== impl UserError ===== - -impl error::Error for UserError {} - -impl fmt::Display for UserError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use self::UserError::*; - - fmt.write_str(match *self { - InactiveStreamId => "inactive stream", - UnexpectedFrameType => "unexpected frame type", - PayloadTooBig => "payload too big", - Rejected => "rejected", - ReleaseCapacityTooBig => "release capacity too big", - OverflowedStreamId => "stream ID overflowed", - MalformedHeaders => "malformed headers", - MissingUriSchemeAndAuthority => "request URI missing scheme and authority", - PollResetAfterSendResponse => "poll_reset after send_response is illegal", - SendPingWhilePending => "send_ping before received previous pong", - SendSettingsWhilePending => "sending SETTINGS before received previous ACK", - PeerDisabledServerPush => "sending PUSH_PROMISE to peer who disabled server push", - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/codec/framed_read.rs s390-tools-2.33.1/rust-vendor/h2/src/codec/framed_read.rs --- s390-tools-2.31.0/rust-vendor/h2/src/codec/framed_read.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/codec/framed_read.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,421 +0,0 @@ -use crate::frame::{self, Frame, Kind, Reason}; -use crate::frame::{ - DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE, -}; -use crate::proto::Error; - -use crate::hpack; - -use futures_core::Stream; - -use bytes::BytesMut; - -use std::io; - -use std::pin::Pin; -use std::task::{Context, Poll}; -use tokio::io::AsyncRead; -use tokio_util::codec::FramedRead as InnerFramedRead; -use tokio_util::codec::{LengthDelimitedCodec, LengthDelimitedCodecError}; - -// 16 MB "sane default" taken from golang http2 -const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: usize = 16 << 20; - -#[derive(Debug)] -pub struct FramedRead { - inner: InnerFramedRead, - - // hpack decoder state - hpack: hpack::Decoder, - - max_header_list_size: usize, - - partial: Option, -} - -/// Partially loaded headers frame -#[derive(Debug)] -struct Partial { - /// Empty frame - frame: Continuable, - - /// Partial header payload - buf: BytesMut, -} - -#[derive(Debug)] -enum Continuable { - Headers(frame::Headers), - PushPromise(frame::PushPromise), -} - -impl FramedRead { - pub fn new(inner: InnerFramedRead) -> FramedRead { - FramedRead { - inner, - hpack: hpack::Decoder::new(DEFAULT_SETTINGS_HEADER_TABLE_SIZE), - max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE, - partial: None, - } - } - - pub fn get_ref(&self) -> &T { - self.inner.get_ref() - } - - pub fn get_mut(&mut self) -> &mut T { - self.inner.get_mut() - } - - /// Returns the current max frame size setting - #[cfg(feature = "unstable")] - #[inline] - pub fn max_frame_size(&self) -> usize { - self.inner.decoder().max_frame_length() - } - - /// Updates the max frame size setting. - /// - /// Must be within 16,384 and 16,777,215. - #[inline] - pub fn set_max_frame_size(&mut self, val: usize) { - assert!(DEFAULT_MAX_FRAME_SIZE as usize <= val && val <= MAX_MAX_FRAME_SIZE as usize); - self.inner.decoder_mut().set_max_frame_length(val) - } - - /// Update the max header list size setting. - #[inline] - pub fn set_max_header_list_size(&mut self, val: usize) { - self.max_header_list_size = val; - } - - /// Update the header table size setting. - #[inline] - pub fn set_header_table_size(&mut self, val: usize) { - self.hpack.queue_size_update(val); - } -} - -/// Decodes a frame. -/// -/// This method is intentionally de-generified and outlined because it is very large. -fn decode_frame( - hpack: &mut hpack::Decoder, - max_header_list_size: usize, - partial_inout: &mut Option, - mut bytes: BytesMut, -) -> Result, Error> { - let span = tracing::trace_span!("FramedRead::decode_frame", offset = bytes.len()); - let _e = span.enter(); - - tracing::trace!("decoding frame from {}B", bytes.len()); - - // Parse the head - let head = frame::Head::parse(&bytes); - - if partial_inout.is_some() && head.kind() != Kind::Continuation { - proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind()); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - - let kind = head.kind(); - - tracing::trace!(frame.kind = ?kind); - - macro_rules! header_block { - ($frame:ident, $head:ident, $bytes:ident) => ({ - // Drop the frame header - // TODO: Change to drain: carllerche/bytes#130 - let _ = $bytes.split_to(frame::HEADER_LEN); - - // Parse the header frame w/o parsing the payload - let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) { - Ok(res) => res, - Err(frame::Error::InvalidDependencyId) => { - proto_err!(stream: "invalid HEADERS dependency ID"); - // A stream cannot depend on itself. An endpoint MUST - // treat this as a stream error (Section 5.4.2) of type - // `PROTOCOL_ERROR`. - return Err(Error::library_reset($head.stream_id(), Reason::PROTOCOL_ERROR)); - }, - Err(e) => { - proto_err!(conn: "failed to load frame; err={:?}", e); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - }; - - let is_end_headers = frame.is_end_headers(); - - // Load the HPACK encoded headers - match frame.load_hpack(&mut payload, max_header_list_size, hpack) { - Ok(_) => {}, - Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}, - Err(frame::Error::MalformedMessage) => { - let id = $head.stream_id(); - proto_err!(stream: "malformed header block; stream={:?}", id); - return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); - }, - Err(e) => { - proto_err!(conn: "failed HPACK decoding; err={:?}", e); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - } - - if is_end_headers { - frame.into() - } else { - tracing::trace!("loaded partial header block"); - // Defer returning the frame - *partial_inout = Some(Partial { - frame: Continuable::$frame(frame), - buf: payload, - }); - - return Ok(None); - } - }); - } - - let frame = match kind { - Kind::Settings => { - let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]); - - res.map_err(|e| { - proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e); - Error::library_go_away(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::Ping => { - let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]); - - res.map_err(|e| { - proto_err!(conn: "failed to load PING frame; err={:?}", e); - Error::library_go_away(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::WindowUpdate => { - let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]); - - res.map_err(|e| { - proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e); - Error::library_go_away(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::Data => { - let _ = bytes.split_to(frame::HEADER_LEN); - let res = frame::Data::load(head, bytes.freeze()); - - // TODO: Should this always be connection level? Probably not... - res.map_err(|e| { - proto_err!(conn: "failed to load DATA frame; err={:?}", e); - Error::library_go_away(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::Headers => header_block!(Headers, head, bytes), - Kind::Reset => { - let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]); - res.map_err(|e| { - proto_err!(conn: "failed to load RESET frame; err={:?}", e); - Error::library_go_away(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::GoAway => { - let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]); - res.map_err(|e| { - proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e); - Error::library_go_away(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::PushPromise => header_block!(PushPromise, head, bytes), - Kind::Priority => { - if head.stream_id() == 0 { - // Invalid stream identifier - proto_err!(conn: "invalid stream ID 0"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - - match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) { - Ok(frame) => frame.into(), - Err(frame::Error::InvalidDependencyId) => { - // A stream cannot depend on itself. An endpoint MUST - // treat this as a stream error (Section 5.4.2) of type - // `PROTOCOL_ERROR`. - let id = head.stream_id(); - proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id); - return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); - } - Err(e) => { - proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - } - } - Kind::Continuation => { - let is_end_headers = (head.flag() & 0x4) == 0x4; - - let mut partial = match partial_inout.take() { - Some(partial) => partial, - None => { - proto_err!(conn: "received unexpected CONTINUATION frame"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - }; - - // The stream identifiers must match - if partial.frame.stream_id() != head.stream_id() { - proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - - // Extend the buf - if partial.buf.is_empty() { - partial.buf = bytes.split_off(frame::HEADER_LEN); - } else { - if partial.frame.is_over_size() { - // If there was left over bytes previously, they may be - // needed to continue decoding, even though we will - // be ignoring this frame. This is done to keep the HPACK - // decoder state up-to-date. - // - // Still, we need to be careful, because if a malicious - // attacker were to try to send a gigantic string, such - // that it fits over multiple header blocks, we could - // grow memory uncontrollably again, and that'd be a shame. - // - // Instead, we use a simple heuristic to determine if - // we should continue to ignore decoding, or to tell - // the attacker to go away. - if partial.buf.len() + bytes.len() > max_header_list_size { - proto_err!(conn: "CONTINUATION frame header block size over ignorable limit"); - return Err(Error::library_go_away(Reason::COMPRESSION_ERROR)); - } - } - partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]); - } - - match partial - .frame - .load_hpack(&mut partial.buf, max_header_list_size, hpack) - { - Ok(_) => {} - Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {} - Err(frame::Error::MalformedMessage) => { - let id = head.stream_id(); - proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id); - return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); - } - Err(e) => { - proto_err!(conn: "failed HPACK decoding; err={:?}", e); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - } - - if is_end_headers { - partial.frame.into() - } else { - *partial_inout = Some(partial); - return Ok(None); - } - } - Kind::Unknown => { - // Unknown frames are ignored - return Ok(None); - } - }; - - Ok(Some(frame)) -} - -impl Stream for FramedRead -where - T: AsyncRead + Unpin, -{ - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let span = tracing::trace_span!("FramedRead::poll_next"); - let _e = span.enter(); - loop { - tracing::trace!("poll"); - let bytes = match ready!(Pin::new(&mut self.inner).poll_next(cx)) { - Some(Ok(bytes)) => bytes, - Some(Err(e)) => return Poll::Ready(Some(Err(map_err(e)))), - None => return Poll::Ready(None), - }; - - tracing::trace!(read.bytes = bytes.len()); - let Self { - ref mut hpack, - max_header_list_size, - ref mut partial, - .. - } = *self; - if let Some(frame) = decode_frame(hpack, max_header_list_size, partial, bytes)? { - tracing::debug!(?frame, "received"); - return Poll::Ready(Some(Ok(frame))); - } - } - } -} - -fn map_err(err: io::Error) -> Error { - if let io::ErrorKind::InvalidData = err.kind() { - if let Some(custom) = err.get_ref() { - if custom.is::() { - return Error::library_go_away(Reason::FRAME_SIZE_ERROR); - } - } - } - err.into() -} - -// ===== impl Continuable ===== - -impl Continuable { - fn stream_id(&self) -> frame::StreamId { - match *self { - Continuable::Headers(ref h) => h.stream_id(), - Continuable::PushPromise(ref p) => p.stream_id(), - } - } - - fn is_over_size(&self) -> bool { - match *self { - Continuable::Headers(ref h) => h.is_over_size(), - Continuable::PushPromise(ref p) => p.is_over_size(), - } - } - - fn load_hpack( - &mut self, - src: &mut BytesMut, - max_header_list_size: usize, - decoder: &mut hpack::Decoder, - ) -> Result<(), frame::Error> { - match *self { - Continuable::Headers(ref mut h) => h.load_hpack(src, max_header_list_size, decoder), - Continuable::PushPromise(ref mut p) => p.load_hpack(src, max_header_list_size, decoder), - } - } -} - -impl From for Frame { - fn from(cont: Continuable) -> Self { - match cont { - Continuable::Headers(mut headers) => { - headers.set_end_headers(); - headers.into() - } - Continuable::PushPromise(mut push) => { - push.set_end_headers(); - push.into() - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/codec/framed_write.rs s390-tools-2.33.1/rust-vendor/h2/src/codec/framed_write.rs --- s390-tools-2.31.0/rust-vendor/h2/src/codec/framed_write.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/codec/framed_write.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,362 +0,0 @@ -use crate::codec::UserError; -use crate::codec::UserError::*; -use crate::frame::{self, Frame, FrameSize}; -use crate::hpack; - -use bytes::{Buf, BufMut, BytesMut}; -use std::pin::Pin; -use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tokio_util::io::poll_write_buf; - -use std::io::{self, Cursor}; - -// A macro to get around a method needing to borrow &mut self -macro_rules! limited_write_buf { - ($self:expr) => {{ - let limit = $self.max_frame_size() + frame::HEADER_LEN; - $self.buf.get_mut().limit(limit) - }}; -} - -#[derive(Debug)] -pub struct FramedWrite { - /// Upstream `AsyncWrite` - inner: T, - - encoder: Encoder, -} - -#[derive(Debug)] -struct Encoder { - /// HPACK encoder - hpack: hpack::Encoder, - - /// Write buffer - /// - /// TODO: Should this be a ring buffer? - buf: Cursor, - - /// Next frame to encode - next: Option>, - - /// Last data frame - last_data_frame: Option>, - - /// Max frame size, this is specified by the peer - max_frame_size: FrameSize, - - /// Chain payloads bigger than this. - chain_threshold: usize, - - /// Min buffer required to attempt to write a frame - min_buffer_capacity: usize, -} - -#[derive(Debug)] -enum Next { - Data(frame::Data), - Continuation(frame::Continuation), -} - -/// Initialize the connection with this amount of write buffer. -/// -/// The minimum MAX_FRAME_SIZE is 16kb, so always be able to send a HEADERS -/// frame that big. -const DEFAULT_BUFFER_CAPACITY: usize = 16 * 1_024; - -/// Chain payloads bigger than this when vectored I/O is enabled. The remote -/// will never advertise a max frame size less than this (well, the spec says -/// the max frame size can't be less than 16kb, so not even close). -const CHAIN_THRESHOLD: usize = 256; - -/// Chain payloads bigger than this when vectored I/O is **not** enabled. -/// A larger value in this scenario will reduce the number of small and -/// fragmented data being sent, and hereby improve the throughput. -const CHAIN_THRESHOLD_WITHOUT_VECTORED_IO: usize = 1024; - -// TODO: Make generic -impl FramedWrite -where - T: AsyncWrite + Unpin, - B: Buf, -{ - pub fn new(inner: T) -> FramedWrite { - let chain_threshold = if inner.is_write_vectored() { - CHAIN_THRESHOLD - } else { - CHAIN_THRESHOLD_WITHOUT_VECTORED_IO - }; - FramedWrite { - inner, - encoder: Encoder { - hpack: hpack::Encoder::default(), - buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)), - next: None, - last_data_frame: None, - max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE, - chain_threshold, - min_buffer_capacity: chain_threshold + frame::HEADER_LEN, - }, - } - } - - /// Returns `Ready` when `send` is able to accept a frame - /// - /// Calling this function may result in the current contents of the buffer - /// to be flushed to `T`. - pub fn poll_ready(&mut self, cx: &mut Context) -> Poll> { - if !self.encoder.has_capacity() { - // Try flushing - ready!(self.flush(cx))?; - - if !self.encoder.has_capacity() { - return Poll::Pending; - } - } - - Poll::Ready(Ok(())) - } - - /// Buffer a frame. - /// - /// `poll_ready` must be called first to ensure that a frame may be - /// accepted. - pub fn buffer(&mut self, item: Frame) -> Result<(), UserError> { - self.encoder.buffer(item) - } - - /// Flush buffered data to the wire - pub fn flush(&mut self, cx: &mut Context) -> Poll> { - let span = tracing::trace_span!("FramedWrite::flush"); - let _e = span.enter(); - - loop { - while !self.encoder.is_empty() { - match self.encoder.next { - Some(Next::Data(ref mut frame)) => { - tracing::trace!(queued_data_frame = true); - let mut buf = (&mut self.encoder.buf).chain(frame.payload_mut()); - ready!(poll_write_buf(Pin::new(&mut self.inner), cx, &mut buf))? - } - _ => { - tracing::trace!(queued_data_frame = false); - ready!(poll_write_buf( - Pin::new(&mut self.inner), - cx, - &mut self.encoder.buf - ))? - } - }; - } - - match self.encoder.unset_frame() { - ControlFlow::Continue => (), - ControlFlow::Break => break, - } - } - - tracing::trace!("flushing buffer"); - // Flush the upstream - ready!(Pin::new(&mut self.inner).poll_flush(cx))?; - - Poll::Ready(Ok(())) - } - - /// Close the codec - pub fn shutdown(&mut self, cx: &mut Context) -> Poll> { - ready!(self.flush(cx))?; - Pin::new(&mut self.inner).poll_shutdown(cx) - } -} - -#[must_use] -enum ControlFlow { - Continue, - Break, -} - -impl Encoder -where - B: Buf, -{ - fn unset_frame(&mut self) -> ControlFlow { - // Clear internal buffer - self.buf.set_position(0); - self.buf.get_mut().clear(); - - // The data frame has been written, so unset it - match self.next.take() { - Some(Next::Data(frame)) => { - self.last_data_frame = Some(frame); - debug_assert!(self.is_empty()); - ControlFlow::Break - } - Some(Next::Continuation(frame)) => { - // Buffer the continuation frame, then try to write again - let mut buf = limited_write_buf!(self); - if let Some(continuation) = frame.encode(&mut buf) { - self.next = Some(Next::Continuation(continuation)); - } - ControlFlow::Continue - } - None => ControlFlow::Break, - } - } - - fn buffer(&mut self, item: Frame) -> Result<(), UserError> { - // Ensure that we have enough capacity to accept the write. - assert!(self.has_capacity()); - let span = tracing::trace_span!("FramedWrite::buffer", frame = ?item); - let _e = span.enter(); - - tracing::debug!(frame = ?item, "send"); - - match item { - Frame::Data(mut v) => { - // Ensure that the payload is not greater than the max frame. - let len = v.payload().remaining(); - - if len > self.max_frame_size() { - return Err(PayloadTooBig); - } - - if len >= self.chain_threshold { - let head = v.head(); - - // Encode the frame head to the buffer - head.encode(len, self.buf.get_mut()); - - if self.buf.get_ref().remaining() < self.chain_threshold { - let extra_bytes = self.chain_threshold - self.buf.remaining(); - self.buf.get_mut().put(v.payload_mut().take(extra_bytes)); - } - - // Save the data frame - self.next = Some(Next::Data(v)); - } else { - v.encode_chunk(self.buf.get_mut()); - - // The chunk has been fully encoded, so there is no need to - // keep it around - assert_eq!(v.payload().remaining(), 0, "chunk not fully encoded"); - - // Save off the last frame... - self.last_data_frame = Some(v); - } - } - Frame::Headers(v) => { - let mut buf = limited_write_buf!(self); - if let Some(continuation) = v.encode(&mut self.hpack, &mut buf) { - self.next = Some(Next::Continuation(continuation)); - } - } - Frame::PushPromise(v) => { - let mut buf = limited_write_buf!(self); - if let Some(continuation) = v.encode(&mut self.hpack, &mut buf) { - self.next = Some(Next::Continuation(continuation)); - } - } - Frame::Settings(v) => { - v.encode(self.buf.get_mut()); - tracing::trace!(rem = self.buf.remaining(), "encoded settings"); - } - Frame::GoAway(v) => { - v.encode(self.buf.get_mut()); - tracing::trace!(rem = self.buf.remaining(), "encoded go_away"); - } - Frame::Ping(v) => { - v.encode(self.buf.get_mut()); - tracing::trace!(rem = self.buf.remaining(), "encoded ping"); - } - Frame::WindowUpdate(v) => { - v.encode(self.buf.get_mut()); - tracing::trace!(rem = self.buf.remaining(), "encoded window_update"); - } - - Frame::Priority(_) => { - /* - v.encode(self.buf.get_mut()); - tracing::trace!("encoded priority; rem={:?}", self.buf.remaining()); - */ - unimplemented!(); - } - Frame::Reset(v) => { - v.encode(self.buf.get_mut()); - tracing::trace!(rem = self.buf.remaining(), "encoded reset"); - } - } - - Ok(()) - } - - fn has_capacity(&self) -> bool { - self.next.is_none() - && (self.buf.get_ref().capacity() - self.buf.get_ref().len() - >= self.min_buffer_capacity) - } - - fn is_empty(&self) -> bool { - match self.next { - Some(Next::Data(ref frame)) => !frame.payload().has_remaining(), - _ => !self.buf.has_remaining(), - } - } -} - -impl Encoder { - fn max_frame_size(&self) -> usize { - self.max_frame_size as usize - } -} - -impl FramedWrite { - /// Returns the max frame size that can be sent - pub fn max_frame_size(&self) -> usize { - self.encoder.max_frame_size() - } - - /// Set the peer's max frame size. - pub fn set_max_frame_size(&mut self, val: usize) { - assert!(val <= frame::MAX_MAX_FRAME_SIZE as usize); - self.encoder.max_frame_size = val as FrameSize; - } - - /// Set the peer's header table size. - pub fn set_header_table_size(&mut self, val: usize) { - self.encoder.hpack.update_max_size(val); - } - - /// Retrieve the last data frame that has been sent - pub fn take_last_data_frame(&mut self) -> Option> { - self.encoder.last_data_frame.take() - } - - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } -} - -impl AsyncRead for FramedWrite { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf, - ) -> Poll> { - Pin::new(&mut self.inner).poll_read(cx, buf) - } -} - -// We never project the Pin to `B`. -impl Unpin for FramedWrite {} - -#[cfg(feature = "unstable")] -mod unstable { - use super::*; - - impl FramedWrite { - pub fn get_ref(&self) -> &T { - &self.inner - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/codec/mod.rs s390-tools-2.33.1/rust-vendor/h2/src/codec/mod.rs --- s390-tools-2.31.0/rust-vendor/h2/src/codec/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/codec/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,206 +0,0 @@ -mod error; -mod framed_read; -mod framed_write; - -pub use self::error::{SendError, UserError}; - -use self::framed_read::FramedRead; -use self::framed_write::FramedWrite; - -use crate::frame::{self, Data, Frame}; -use crate::proto::Error; - -use bytes::Buf; -use futures_core::Stream; -use futures_sink::Sink; -use std::pin::Pin; -use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio_util::codec::length_delimited; - -use std::io; - -#[derive(Debug)] -pub struct Codec { - inner: FramedRead>, -} - -impl Codec -where - T: AsyncRead + AsyncWrite + Unpin, - B: Buf, -{ - /// Returns a new `Codec` with the default max frame size - #[inline] - pub fn new(io: T) -> Self { - Self::with_max_recv_frame_size(io, frame::DEFAULT_MAX_FRAME_SIZE as usize) - } - - /// Returns a new `Codec` with the given maximum frame size - pub fn with_max_recv_frame_size(io: T, max_frame_size: usize) -> Self { - // Wrap with writer - let framed_write = FramedWrite::new(io); - - // Delimit the frames - let delimited = length_delimited::Builder::new() - .big_endian() - .length_field_length(3) - .length_adjustment(9) - .num_skip(0) // Don't skip the header - .new_read(framed_write); - - let mut inner = FramedRead::new(delimited); - - // Use FramedRead's method since it checks the value is within range. - inner.set_max_frame_size(max_frame_size); - - Codec { inner } - } -} - -impl Codec { - /// Updates the max received frame size. - /// - /// The change takes effect the next time a frame is decoded. In other - /// words, if a frame is currently in process of being decoded with a frame - /// size greater than `val` but less than the max frame size in effect - /// before calling this function, then the frame will be allowed. - #[inline] - pub fn set_max_recv_frame_size(&mut self, val: usize) { - self.inner.set_max_frame_size(val) - } - - /// Returns the current max received frame size setting. - /// - /// This is the largest size this codec will accept from the wire. Larger - /// frames will be rejected. - #[cfg(feature = "unstable")] - #[inline] - pub fn max_recv_frame_size(&self) -> usize { - self.inner.max_frame_size() - } - - /// Returns the max frame size that can be sent to the peer. - pub fn max_send_frame_size(&self) -> usize { - self.inner.get_ref().max_frame_size() - } - - /// Set the peer's max frame size. - pub fn set_max_send_frame_size(&mut self, val: usize) { - self.framed_write().set_max_frame_size(val) - } - - /// Set the peer's header table size size. - pub fn set_send_header_table_size(&mut self, val: usize) { - self.framed_write().set_header_table_size(val) - } - - /// Set the decoder header table size size. - pub fn set_recv_header_table_size(&mut self, val: usize) { - self.inner.set_header_table_size(val) - } - - /// Set the max header list size that can be received. - pub fn set_max_recv_header_list_size(&mut self, val: usize) { - self.inner.set_max_header_list_size(val); - } - - /// Get a reference to the inner stream. - #[cfg(feature = "unstable")] - pub fn get_ref(&self) -> &T { - self.inner.get_ref().get_ref() - } - - /// Get a mutable reference to the inner stream. - pub fn get_mut(&mut self) -> &mut T { - self.inner.get_mut().get_mut() - } - - /// Takes the data payload value that was fully written to the socket - pub(crate) fn take_last_data_frame(&mut self) -> Option> { - self.framed_write().take_last_data_frame() - } - - fn framed_write(&mut self) -> &mut FramedWrite { - self.inner.get_mut() - } -} - -impl Codec -where - T: AsyncWrite + Unpin, - B: Buf, -{ - /// Returns `Ready` when the codec can buffer a frame - pub fn poll_ready(&mut self, cx: &mut Context) -> Poll> { - self.framed_write().poll_ready(cx) - } - - /// Buffer a frame. - /// - /// `poll_ready` must be called first to ensure that a frame may be - /// accepted. - /// - /// TODO: Rename this to avoid conflicts with Sink::buffer - pub fn buffer(&mut self, item: Frame) -> Result<(), UserError> { - self.framed_write().buffer(item) - } - - /// Flush buffered data to the wire - pub fn flush(&mut self, cx: &mut Context) -> Poll> { - self.framed_write().flush(cx) - } - - /// Shutdown the send half - pub fn shutdown(&mut self, cx: &mut Context) -> Poll> { - self.framed_write().shutdown(cx) - } -} - -impl Stream for Codec -where - T: AsyncRead + Unpin, -{ - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_next(cx) - } -} - -impl Sink> for Codec -where - T: AsyncWrite + Unpin, - B: Buf, -{ - type Error = SendError; - - fn start_send(mut self: Pin<&mut Self>, item: Frame) -> Result<(), Self::Error> { - Codec::buffer(&mut self, item)?; - Ok(()) - } - /// Returns `Ready` when the codec can buffer a frame - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.framed_write().poll_ready(cx).map_err(Into::into) - } - - /// Flush buffered data to the wire - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.framed_write().flush(cx).map_err(Into::into) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.shutdown(cx))?; - Poll::Ready(Ok(())) - } -} - -// TODO: remove (or improve) this -impl From for Codec -where - T: AsyncRead + AsyncWrite + Unpin, -{ - fn from(src: T) -> Self { - Self::new(src) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/error.rs s390-tools-2.33.1/rust-vendor/h2/src/error.rs --- s390-tools-2.31.0/rust-vendor/h2/src/error.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,210 +0,0 @@ -use crate::codec::{SendError, UserError}; -use crate::frame::StreamId; -use crate::proto::{self, Initiator}; - -use bytes::Bytes; -use std::{error, fmt, io}; - -pub use crate::frame::Reason; - -/// Represents HTTP/2 operation errors. -/// -/// `Error` covers error cases raised by protocol errors caused by the -/// peer, I/O (transport) errors, and errors caused by the user of the library. -/// -/// If the error was caused by the remote peer, then it will contain a -/// [`Reason`] which can be obtained with the [`reason`] function. -/// -/// [`Reason`]: struct.Reason.html -/// [`reason`]: #method.reason -#[derive(Debug)] -pub struct Error { - kind: Kind, -} - -#[derive(Debug)] -enum Kind { - /// A RST_STREAM frame was received or sent. - Reset(StreamId, Reason, Initiator), - - /// A GO_AWAY frame was received or sent. - GoAway(Bytes, Reason, Initiator), - - /// The user created an error from a bare Reason. - Reason(Reason), - - /// An error resulting from an invalid action taken by the user of this - /// library. - User(UserError), - - /// An `io::Error` occurred while trying to read or write. - Io(io::Error), -} - -// ===== impl Error ===== - -impl Error { - /// If the error was caused by the remote peer, the error reason. - /// - /// This is either an error received by the peer or caused by an invalid - /// action taken by the peer (i.e. a protocol error). - pub fn reason(&self) -> Option { - match self.kind { - Kind::Reset(_, reason, _) | Kind::GoAway(_, reason, _) | Kind::Reason(reason) => { - Some(reason) - } - _ => None, - } - } - - /// Returns true if the error is an io::Error - pub fn is_io(&self) -> bool { - matches!(self.kind, Kind::Io(..)) - } - - /// Returns the error if the error is an io::Error - pub fn get_io(&self) -> Option<&io::Error> { - match self.kind { - Kind::Io(ref e) => Some(e), - _ => None, - } - } - - /// Returns the error if the error is an io::Error - pub fn into_io(self) -> Option { - match self.kind { - Kind::Io(e) => Some(e), - _ => None, - } - } - - pub(crate) fn from_io(err: io::Error) -> Self { - Error { - kind: Kind::Io(err), - } - } - - /// Returns true if the error is from a `GOAWAY`. - pub fn is_go_away(&self) -> bool { - matches!(self.kind, Kind::GoAway(..)) - } - - /// Returns true if the error is from a `RST_STREAM`. - pub fn is_reset(&self) -> bool { - matches!(self.kind, Kind::Reset(..)) - } - - /// Returns true if the error was received in a frame from the remote. - /// - /// Such as from a received `RST_STREAM` or `GOAWAY` frame. - pub fn is_remote(&self) -> bool { - matches!( - self.kind, - Kind::GoAway(_, _, Initiator::Remote) | Kind::Reset(_, _, Initiator::Remote) - ) - } - - /// Returns true if the error was created by `h2`. - /// - /// Such as noticing some protocol error and sending a GOAWAY or RST_STREAM. - pub fn is_library(&self) -> bool { - matches!( - self.kind, - Kind::GoAway(_, _, Initiator::Library) | Kind::Reset(_, _, Initiator::Library) - ) - } -} - -impl From for Error { - fn from(src: proto::Error) -> Error { - use crate::proto::Error::*; - - Error { - kind: match src { - Reset(stream_id, reason, initiator) => Kind::Reset(stream_id, reason, initiator), - GoAway(debug_data, reason, initiator) => { - Kind::GoAway(debug_data, reason, initiator) - } - Io(kind, inner) => { - Kind::Io(inner.map_or_else(|| kind.into(), |inner| io::Error::new(kind, inner))) - } - }, - } - } -} - -impl From for Error { - fn from(src: Reason) -> Error { - Error { - kind: Kind::Reason(src), - } - } -} - -impl From for Error { - fn from(src: SendError) -> Error { - match src { - SendError::User(e) => e.into(), - SendError::Connection(e) => e.into(), - } - } -} - -impl From for Error { - fn from(src: UserError) -> Error { - Error { - kind: Kind::User(src), - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let debug_data = match self.kind { - Kind::Reset(_, reason, Initiator::User) => { - return write!(fmt, "stream error sent by user: {}", reason) - } - Kind::Reset(_, reason, Initiator::Library) => { - return write!(fmt, "stream error detected: {}", reason) - } - Kind::Reset(_, reason, Initiator::Remote) => { - return write!(fmt, "stream error received: {}", reason) - } - Kind::GoAway(ref debug_data, reason, Initiator::User) => { - write!(fmt, "connection error sent by user: {}", reason)?; - debug_data - } - Kind::GoAway(ref debug_data, reason, Initiator::Library) => { - write!(fmt, "connection error detected: {}", reason)?; - debug_data - } - Kind::GoAway(ref debug_data, reason, Initiator::Remote) => { - write!(fmt, "connection error received: {}", reason)?; - debug_data - } - Kind::Reason(reason) => return write!(fmt, "protocol error: {}", reason), - Kind::User(ref e) => return write!(fmt, "user error: {}", e), - Kind::Io(ref e) => return e.fmt(fmt), - }; - - if !debug_data.is_empty() { - write!(fmt, " ({:?})", debug_data)?; - } - - Ok(()) - } -} - -impl error::Error for Error {} - -#[cfg(test)] -mod tests { - use super::Error; - use crate::Reason; - - #[test] - fn error_from_reason() { - let err = Error::from(Reason::HTTP_1_1_REQUIRED); - assert_eq!(err.reason(), Some(Reason::HTTP_1_1_REQUIRED)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/ext.rs s390-tools-2.33.1/rust-vendor/h2/src/ext.rs --- s390-tools-2.31.0/rust-vendor/h2/src/ext.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/ext.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -//! Extensions specific to the HTTP/2 protocol. - -use crate::hpack::BytesStr; - -use bytes::Bytes; -use std::fmt; - -/// Represents the `:protocol` pseudo-header used by -/// the [Extended CONNECT Protocol]. -/// -/// [Extended CONNECT Protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 -#[derive(Clone, Eq, PartialEq)] -pub struct Protocol { - value: BytesStr, -} - -impl Protocol { - /// Converts a static string to a protocol name. - pub const fn from_static(value: &'static str) -> Self { - Self { - value: BytesStr::from_static(value), - } - } - - /// Returns a str representation of the header. - pub fn as_str(&self) -> &str { - self.value.as_str() - } - - pub(crate) fn try_from(bytes: Bytes) -> Result { - Ok(Self { - value: BytesStr::try_from(bytes)?, - }) - } -} - -impl<'a> From<&'a str> for Protocol { - fn from(value: &'a str) -> Self { - Self { - value: BytesStr::from(value), - } - } -} - -impl AsRef<[u8]> for Protocol { - fn as_ref(&self) -> &[u8] { - self.value.as_ref() - } -} - -impl fmt::Debug for Protocol { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.value.fmt(f) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/data.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/data.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/data.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/data.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,227 +0,0 @@ -use crate::frame::{util, Error, Frame, Head, Kind, StreamId}; -use bytes::{Buf, BufMut, Bytes}; - -use std::fmt; - -/// Data frame -/// -/// Data frames convey arbitrary, variable-length sequences of octets associated -/// with a stream. One or more DATA frames are used, for instance, to carry HTTP -/// request or response payloads. -#[derive(Eq, PartialEq)] -pub struct Data { - stream_id: StreamId, - data: T, - flags: DataFlags, - pad_len: Option, -} - -#[derive(Copy, Clone, Default, Eq, PartialEq)] -struct DataFlags(u8); - -const END_STREAM: u8 = 0x1; -const PADDED: u8 = 0x8; -const ALL: u8 = END_STREAM | PADDED; - -impl Data { - /// Creates a new DATA frame. - pub fn new(stream_id: StreamId, payload: T) -> Self { - assert!(!stream_id.is_zero()); - - Data { - stream_id, - data: payload, - flags: DataFlags::default(), - pad_len: None, - } - } - - /// Returns the stream identifier that this frame is associated with. - /// - /// This cannot be a zero stream identifier. - pub fn stream_id(&self) -> StreamId { - self.stream_id - } - - /// Gets the value of the `END_STREAM` flag for this frame. - /// - /// If true, this frame is the last that the endpoint will send for the - /// identified stream. - /// - /// Setting this flag causes the stream to enter one of the "half-closed" - /// states or the "closed" state (Section 5.1). - pub fn is_end_stream(&self) -> bool { - self.flags.is_end_stream() - } - - /// Sets the value for the `END_STREAM` flag on this frame. - pub fn set_end_stream(&mut self, val: bool) { - if val { - self.flags.set_end_stream(); - } else { - self.flags.unset_end_stream(); - } - } - - /// Returns whether the `PADDED` flag is set on this frame. - #[cfg(feature = "unstable")] - pub fn is_padded(&self) -> bool { - self.flags.is_padded() - } - - /// Sets the value for the `PADDED` flag on this frame. - #[cfg(feature = "unstable")] - pub fn set_padded(&mut self) { - self.flags.set_padded(); - } - - /// Returns a reference to this frame's payload. - /// - /// This does **not** include any padding that might have been originally - /// included. - pub fn payload(&self) -> &T { - &self.data - } - - /// Returns a mutable reference to this frame's payload. - /// - /// This does **not** include any padding that might have been originally - /// included. - pub fn payload_mut(&mut self) -> &mut T { - &mut self.data - } - - /// Consumes `self` and returns the frame's payload. - /// - /// This does **not** include any padding that might have been originally - /// included. - pub fn into_payload(self) -> T { - self.data - } - - pub(crate) fn head(&self) -> Head { - Head::new(Kind::Data, self.flags.into(), self.stream_id) - } - - pub(crate) fn map(self, f: F) -> Data - where - F: FnOnce(T) -> U, - { - Data { - stream_id: self.stream_id, - data: f(self.data), - flags: self.flags, - pad_len: self.pad_len, - } - } -} - -impl Data { - pub(crate) fn load(head: Head, mut payload: Bytes) -> Result { - let flags = DataFlags::load(head.flag()); - - // The stream identifier must not be zero - if head.stream_id().is_zero() { - return Err(Error::InvalidStreamId); - } - - let pad_len = if flags.is_padded() { - let len = util::strip_padding(&mut payload)?; - Some(len) - } else { - None - }; - - Ok(Data { - stream_id: head.stream_id(), - data: payload, - flags, - pad_len, - }) - } -} - -impl Data { - /// Encode the data frame into the `dst` buffer. - /// - /// # Panics - /// - /// Panics if `dst` cannot contain the data frame. - pub(crate) fn encode_chunk(&mut self, dst: &mut U) { - let len = self.data.remaining(); - - assert!(dst.remaining_mut() >= len); - - self.head().encode(len, dst); - dst.put(&mut self.data); - } -} - -impl From> for Frame { - fn from(src: Data) -> Self { - Frame::Data(src) - } -} - -impl fmt::Debug for Data { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut f = fmt.debug_struct("Data"); - f.field("stream_id", &self.stream_id); - if !self.flags.is_empty() { - f.field("flags", &self.flags); - } - if let Some(ref pad_len) = self.pad_len { - f.field("pad_len", pad_len); - } - // `data` bytes purposefully excluded - f.finish() - } -} - -// ===== impl DataFlags ===== - -impl DataFlags { - fn load(bits: u8) -> DataFlags { - DataFlags(bits & ALL) - } - - fn is_empty(&self) -> bool { - self.0 == 0 - } - - fn is_end_stream(&self) -> bool { - self.0 & END_STREAM == END_STREAM - } - - fn set_end_stream(&mut self) { - self.0 |= END_STREAM - } - - fn unset_end_stream(&mut self) { - self.0 &= !END_STREAM - } - - fn is_padded(&self) -> bool { - self.0 & PADDED == PADDED - } - - #[cfg(feature = "unstable")] - fn set_padded(&mut self) { - self.0 |= PADDED - } -} - -impl From for u8 { - fn from(src: DataFlags) -> u8 { - src.0 - } -} - -impl fmt::Debug for DataFlags { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - util::debug_flags(fmt, self.0) - .flag_if(self.is_end_stream(), "END_STREAM") - .flag_if(self.is_padded(), "PADDED") - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/go_away.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/go_away.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/go_away.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/go_away.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,87 +0,0 @@ -use std::fmt; - -use bytes::{BufMut, Bytes}; - -use crate::frame::{self, Error, Head, Kind, Reason, StreamId}; - -#[derive(Clone, Eq, PartialEq)] -pub struct GoAway { - last_stream_id: StreamId, - error_code: Reason, - debug_data: Bytes, -} - -impl GoAway { - pub fn new(last_stream_id: StreamId, reason: Reason) -> Self { - GoAway { - last_stream_id, - error_code: reason, - debug_data: Bytes::new(), - } - } - - pub fn with_debug_data(last_stream_id: StreamId, reason: Reason, debug_data: Bytes) -> Self { - Self { - last_stream_id, - error_code: reason, - debug_data, - } - } - - pub fn last_stream_id(&self) -> StreamId { - self.last_stream_id - } - - pub fn reason(&self) -> Reason { - self.error_code - } - - pub fn debug_data(&self) -> &Bytes { - &self.debug_data - } - - pub fn load(payload: &[u8]) -> Result { - if payload.len() < 8 { - return Err(Error::BadFrameSize); - } - - let (last_stream_id, _) = StreamId::parse(&payload[..4]); - let error_code = unpack_octets_4!(payload, 4, u32); - let debug_data = Bytes::copy_from_slice(&payload[8..]); - - Ok(GoAway { - last_stream_id, - error_code: error_code.into(), - debug_data, - }) - } - - pub fn encode(&self, dst: &mut B) { - tracing::trace!("encoding GO_AWAY; code={:?}", self.error_code); - let head = Head::new(Kind::GoAway, 0, StreamId::zero()); - head.encode(8 + self.debug_data.len(), dst); - dst.put_u32(self.last_stream_id.into()); - dst.put_u32(self.error_code.into()); - dst.put(self.debug_data.slice(..)); - } -} - -impl From for frame::Frame { - fn from(src: GoAway) -> Self { - frame::Frame::GoAway(src) - } -} - -impl fmt::Debug for GoAway { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut builder = f.debug_struct("GoAway"); - builder.field("error_code", &self.error_code); - builder.field("last_stream_id", &self.last_stream_id); - - if !self.debug_data.is_empty() { - builder.field("debug_data", &self.debug_data); - } - - builder.finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/headers.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/headers.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/headers.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/headers.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1042 +0,0 @@ -use super::{util, StreamDependency, StreamId}; -use crate::ext::Protocol; -use crate::frame::{Error, Frame, Head, Kind}; -use crate::hpack::{self, BytesStr}; - -use http::header::{self, HeaderName, HeaderValue}; -use http::{uri, HeaderMap, Method, Request, StatusCode, Uri}; - -use bytes::{BufMut, Bytes, BytesMut}; - -use std::fmt; -use std::io::Cursor; - -type EncodeBuf<'a> = bytes::buf::Limit<&'a mut BytesMut>; -/// Header frame -/// -/// This could be either a request or a response. -#[derive(Eq, PartialEq)] -pub struct Headers { - /// The ID of the stream with which this frame is associated. - stream_id: StreamId, - - /// The stream dependency information, if any. - stream_dep: Option, - - /// The header block fragment - header_block: HeaderBlock, - - /// The associated flags - flags: HeadersFlag, -} - -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct HeadersFlag(u8); - -#[derive(Eq, PartialEq)] -pub struct PushPromise { - /// The ID of the stream with which this frame is associated. - stream_id: StreamId, - - /// The ID of the stream being reserved by this PushPromise. - promised_id: StreamId, - - /// The header block fragment - header_block: HeaderBlock, - - /// The associated flags - flags: PushPromiseFlag, -} - -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct PushPromiseFlag(u8); - -#[derive(Debug)] -pub struct Continuation { - /// Stream ID of continuation frame - stream_id: StreamId, - - header_block: EncodingHeaderBlock, -} - -// TODO: These fields shouldn't be `pub` -#[derive(Debug, Default, Eq, PartialEq)] -pub struct Pseudo { - // Request - pub method: Option, - pub scheme: Option, - pub authority: Option, - pub path: Option, - pub protocol: Option, - - // Response - pub status: Option, -} - -#[derive(Debug)] -pub struct Iter { - /// Pseudo headers - pseudo: Option, - - /// Header fields - fields: header::IntoIter, -} - -#[derive(Debug, PartialEq, Eq)] -struct HeaderBlock { - /// The decoded header fields - fields: HeaderMap, - - /// Set to true if decoding went over the max header list size. - is_over_size: bool, - - /// Pseudo headers, these are broken out as they must be sent as part of the - /// headers frame. - pseudo: Pseudo, -} - -#[derive(Debug)] -struct EncodingHeaderBlock { - hpack: Bytes, -} - -const END_STREAM: u8 = 0x1; -const END_HEADERS: u8 = 0x4; -const PADDED: u8 = 0x8; -const PRIORITY: u8 = 0x20; -const ALL: u8 = END_STREAM | END_HEADERS | PADDED | PRIORITY; - -// ===== impl Headers ===== - -impl Headers { - /// Create a new HEADERS frame - pub fn new(stream_id: StreamId, pseudo: Pseudo, fields: HeaderMap) -> Self { - Headers { - stream_id, - stream_dep: None, - header_block: HeaderBlock { - fields, - is_over_size: false, - pseudo, - }, - flags: HeadersFlag::default(), - } - } - - pub fn trailers(stream_id: StreamId, fields: HeaderMap) -> Self { - let mut flags = HeadersFlag::default(); - flags.set_end_stream(); - - Headers { - stream_id, - stream_dep: None, - header_block: HeaderBlock { - fields, - is_over_size: false, - pseudo: Pseudo::default(), - }, - flags, - } - } - - /// Loads the header frame but doesn't actually do HPACK decoding. - /// - /// HPACK decoding is done in the `load_hpack` step. - pub fn load(head: Head, mut src: BytesMut) -> Result<(Self, BytesMut), Error> { - let flags = HeadersFlag(head.flag()); - let mut pad = 0; - - tracing::trace!("loading headers; flags={:?}", flags); - - if head.stream_id().is_zero() { - return Err(Error::InvalidStreamId); - } - - // Read the padding length - if flags.is_padded() { - if src.is_empty() { - return Err(Error::MalformedMessage); - } - pad = src[0] as usize; - - // Drop the padding - let _ = src.split_to(1); - } - - // Read the stream dependency - let stream_dep = if flags.is_priority() { - if src.len() < 5 { - return Err(Error::MalformedMessage); - } - let stream_dep = StreamDependency::load(&src[..5])?; - - if stream_dep.dependency_id() == head.stream_id() { - return Err(Error::InvalidDependencyId); - } - - // Drop the next 5 bytes - let _ = src.split_to(5); - - Some(stream_dep) - } else { - None - }; - - if pad > 0 { - if pad > src.len() { - return Err(Error::TooMuchPadding); - } - - let len = src.len() - pad; - src.truncate(len); - } - - let headers = Headers { - stream_id: head.stream_id(), - stream_dep, - header_block: HeaderBlock { - fields: HeaderMap::new(), - is_over_size: false, - pseudo: Pseudo::default(), - }, - flags, - }; - - Ok((headers, src)) - } - - pub fn load_hpack( - &mut self, - src: &mut BytesMut, - max_header_list_size: usize, - decoder: &mut hpack::Decoder, - ) -> Result<(), Error> { - self.header_block.load(src, max_header_list_size, decoder) - } - - pub fn stream_id(&self) -> StreamId { - self.stream_id - } - - pub fn is_end_headers(&self) -> bool { - self.flags.is_end_headers() - } - - pub fn set_end_headers(&mut self) { - self.flags.set_end_headers(); - } - - pub fn is_end_stream(&self) -> bool { - self.flags.is_end_stream() - } - - pub fn set_end_stream(&mut self) { - self.flags.set_end_stream() - } - - pub fn is_over_size(&self) -> bool { - self.header_block.is_over_size - } - - pub fn into_parts(self) -> (Pseudo, HeaderMap) { - (self.header_block.pseudo, self.header_block.fields) - } - - #[cfg(feature = "unstable")] - pub fn pseudo_mut(&mut self) -> &mut Pseudo { - &mut self.header_block.pseudo - } - - /// Whether it has status 1xx - pub(crate) fn is_informational(&self) -> bool { - self.header_block.pseudo.is_informational() - } - - pub fn fields(&self) -> &HeaderMap { - &self.header_block.fields - } - - pub fn into_fields(self) -> HeaderMap { - self.header_block.fields - } - - pub fn encode( - self, - encoder: &mut hpack::Encoder, - dst: &mut EncodeBuf<'_>, - ) -> Option { - // At this point, the `is_end_headers` flag should always be set - debug_assert!(self.flags.is_end_headers()); - - // Get the HEADERS frame head - let head = self.head(); - - self.header_block - .into_encoding(encoder) - .encode(&head, dst, |_| {}) - } - - fn head(&self) -> Head { - Head::new(Kind::Headers, self.flags.into(), self.stream_id) - } -} - -impl From for Frame { - fn from(src: Headers) -> Self { - Frame::Headers(src) - } -} - -impl fmt::Debug for Headers { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut builder = f.debug_struct("Headers"); - builder - .field("stream_id", &self.stream_id) - .field("flags", &self.flags); - - if let Some(ref protocol) = self.header_block.pseudo.protocol { - builder.field("protocol", protocol); - } - - if let Some(ref dep) = self.stream_dep { - builder.field("stream_dep", dep); - } - - // `fields` and `pseudo` purposefully not included - builder.finish() - } -} - -// ===== util ===== - -#[derive(Debug, PartialEq, Eq)] -pub struct ParseU64Error; - -pub fn parse_u64(src: &[u8]) -> Result { - if src.len() > 19 { - // At danger for overflow... - return Err(ParseU64Error); - } - - let mut ret = 0; - - for &d in src { - if d < b'0' || d > b'9' { - return Err(ParseU64Error); - } - - ret *= 10; - ret += (d - b'0') as u64; - } - - Ok(ret) -} - -// ===== impl PushPromise ===== - -#[derive(Debug)] -pub enum PushPromiseHeaderError { - InvalidContentLength(Result), - NotSafeAndCacheable, -} - -impl PushPromise { - pub fn new( - stream_id: StreamId, - promised_id: StreamId, - pseudo: Pseudo, - fields: HeaderMap, - ) -> Self { - PushPromise { - flags: PushPromiseFlag::default(), - header_block: HeaderBlock { - fields, - is_over_size: false, - pseudo, - }, - promised_id, - stream_id, - } - } - - pub fn validate_request(req: &Request<()>) -> Result<(), PushPromiseHeaderError> { - use PushPromiseHeaderError::*; - // The spec has some requirements for promised request headers - // [https://httpwg.org/specs/rfc7540.html#PushRequests] - - // A promised request "that indicates the presence of a request body - // MUST reset the promised stream with a stream error" - if let Some(content_length) = req.headers().get(header::CONTENT_LENGTH) { - let parsed_length = parse_u64(content_length.as_bytes()); - if parsed_length != Ok(0) { - return Err(InvalidContentLength(parsed_length)); - } - } - // "The server MUST include a method in the :method pseudo-header field - // that is safe and cacheable" - if !Self::safe_and_cacheable(req.method()) { - return Err(NotSafeAndCacheable); - } - - Ok(()) - } - - fn safe_and_cacheable(method: &Method) -> bool { - // Cacheable: https://httpwg.org/specs/rfc7231.html#cacheable.methods - // Safe: https://httpwg.org/specs/rfc7231.html#safe.methods - method == Method::GET || method == Method::HEAD - } - - pub fn fields(&self) -> &HeaderMap { - &self.header_block.fields - } - - #[cfg(feature = "unstable")] - pub fn into_fields(self) -> HeaderMap { - self.header_block.fields - } - - /// Loads the push promise frame but doesn't actually do HPACK decoding. - /// - /// HPACK decoding is done in the `load_hpack` step. - pub fn load(head: Head, mut src: BytesMut) -> Result<(Self, BytesMut), Error> { - let flags = PushPromiseFlag(head.flag()); - let mut pad = 0; - - if head.stream_id().is_zero() { - return Err(Error::InvalidStreamId); - } - - // Read the padding length - if flags.is_padded() { - if src.is_empty() { - return Err(Error::MalformedMessage); - } - - // TODO: Ensure payload is sized correctly - pad = src[0] as usize; - - // Drop the padding - let _ = src.split_to(1); - } - - if src.len() < 5 { - return Err(Error::MalformedMessage); - } - - let (promised_id, _) = StreamId::parse(&src[..4]); - // Drop promised_id bytes - let _ = src.split_to(4); - - if pad > 0 { - if pad > src.len() { - return Err(Error::TooMuchPadding); - } - - let len = src.len() - pad; - src.truncate(len); - } - - let frame = PushPromise { - flags, - header_block: HeaderBlock { - fields: HeaderMap::new(), - is_over_size: false, - pseudo: Pseudo::default(), - }, - promised_id, - stream_id: head.stream_id(), - }; - Ok((frame, src)) - } - - pub fn load_hpack( - &mut self, - src: &mut BytesMut, - max_header_list_size: usize, - decoder: &mut hpack::Decoder, - ) -> Result<(), Error> { - self.header_block.load(src, max_header_list_size, decoder) - } - - pub fn stream_id(&self) -> StreamId { - self.stream_id - } - - pub fn promised_id(&self) -> StreamId { - self.promised_id - } - - pub fn is_end_headers(&self) -> bool { - self.flags.is_end_headers() - } - - pub fn set_end_headers(&mut self) { - self.flags.set_end_headers(); - } - - pub fn is_over_size(&self) -> bool { - self.header_block.is_over_size - } - - pub fn encode( - self, - encoder: &mut hpack::Encoder, - dst: &mut EncodeBuf<'_>, - ) -> Option { - // At this point, the `is_end_headers` flag should always be set - debug_assert!(self.flags.is_end_headers()); - - let head = self.head(); - let promised_id = self.promised_id; - - self.header_block - .into_encoding(encoder) - .encode(&head, dst, |dst| { - dst.put_u32(promised_id.into()); - }) - } - - fn head(&self) -> Head { - Head::new(Kind::PushPromise, self.flags.into(), self.stream_id) - } - - /// Consume `self`, returning the parts of the frame - pub fn into_parts(self) -> (Pseudo, HeaderMap) { - (self.header_block.pseudo, self.header_block.fields) - } -} - -impl From for Frame { - fn from(src: PushPromise) -> Self { - Frame::PushPromise(src) - } -} - -impl fmt::Debug for PushPromise { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("PushPromise") - .field("stream_id", &self.stream_id) - .field("promised_id", &self.promised_id) - .field("flags", &self.flags) - // `fields` and `pseudo` purposefully not included - .finish() - } -} - -// ===== impl Continuation ===== - -impl Continuation { - fn head(&self) -> Head { - Head::new(Kind::Continuation, END_HEADERS, self.stream_id) - } - - pub fn encode(self, dst: &mut EncodeBuf<'_>) -> Option { - // Get the CONTINUATION frame head - let head = self.head(); - - self.header_block.encode(&head, dst, |_| {}) - } -} - -// ===== impl Pseudo ===== - -impl Pseudo { - pub fn request(method: Method, uri: Uri, protocol: Option) -> Self { - let parts = uri::Parts::from(uri); - - let mut path = parts - .path_and_query - .map(|v| BytesStr::from(v.as_str())) - .unwrap_or(BytesStr::from_static("")); - - match method { - Method::OPTIONS | Method::CONNECT => {} - _ if path.is_empty() => { - path = BytesStr::from_static("/"); - } - _ => {} - } - - let mut pseudo = Pseudo { - method: Some(method), - scheme: None, - authority: None, - path: Some(path).filter(|p| !p.is_empty()), - protocol, - status: None, - }; - - // If the URI includes a scheme component, add it to the pseudo headers - // - // TODO: Scheme must be set... - if let Some(scheme) = parts.scheme { - pseudo.set_scheme(scheme); - } - - // If the URI includes an authority component, add it to the pseudo - // headers - if let Some(authority) = parts.authority { - pseudo.set_authority(BytesStr::from(authority.as_str())); - } - - pseudo - } - - pub fn response(status: StatusCode) -> Self { - Pseudo { - method: None, - scheme: None, - authority: None, - path: None, - protocol: None, - status: Some(status), - } - } - - #[cfg(feature = "unstable")] - pub fn set_status(&mut self, value: StatusCode) { - self.status = Some(value); - } - - pub fn set_scheme(&mut self, scheme: uri::Scheme) { - let bytes_str = match scheme.as_str() { - "http" => BytesStr::from_static("http"), - "https" => BytesStr::from_static("https"), - s => BytesStr::from(s), - }; - self.scheme = Some(bytes_str); - } - - #[cfg(feature = "unstable")] - pub fn set_protocol(&mut self, protocol: Protocol) { - self.protocol = Some(protocol); - } - - pub fn set_authority(&mut self, authority: BytesStr) { - self.authority = Some(authority); - } - - /// Whether it has status 1xx - pub(crate) fn is_informational(&self) -> bool { - self.status - .map_or(false, |status| status.is_informational()) - } -} - -// ===== impl EncodingHeaderBlock ===== - -impl EncodingHeaderBlock { - fn encode(mut self, head: &Head, dst: &mut EncodeBuf<'_>, f: F) -> Option - where - F: FnOnce(&mut EncodeBuf<'_>), - { - let head_pos = dst.get_ref().len(); - - // At this point, we don't know how big the h2 frame will be. - // So, we write the head with length 0, then write the body, and - // finally write the length once we know the size. - head.encode(0, dst); - - let payload_pos = dst.get_ref().len(); - - f(dst); - - // Now, encode the header payload - let continuation = if self.hpack.len() > dst.remaining_mut() { - dst.put_slice(&self.hpack.split_to(dst.remaining_mut())); - - Some(Continuation { - stream_id: head.stream_id(), - header_block: self, - }) - } else { - dst.put_slice(&self.hpack); - - None - }; - - // Compute the header block length - let payload_len = (dst.get_ref().len() - payload_pos) as u64; - - // Write the frame length - let payload_len_be = payload_len.to_be_bytes(); - assert!(payload_len_be[0..5].iter().all(|b| *b == 0)); - (dst.get_mut()[head_pos..head_pos + 3]).copy_from_slice(&payload_len_be[5..]); - - if continuation.is_some() { - // There will be continuation frames, so the `is_end_headers` flag - // must be unset - debug_assert!(dst.get_ref()[head_pos + 4] & END_HEADERS == END_HEADERS); - - dst.get_mut()[head_pos + 4] -= END_HEADERS; - } - - continuation - } -} - -// ===== impl Iter ===== - -impl Iterator for Iter { - type Item = hpack::Header>; - - fn next(&mut self) -> Option { - use crate::hpack::Header::*; - - if let Some(ref mut pseudo) = self.pseudo { - if let Some(method) = pseudo.method.take() { - return Some(Method(method)); - } - - if let Some(scheme) = pseudo.scheme.take() { - return Some(Scheme(scheme)); - } - - if let Some(authority) = pseudo.authority.take() { - return Some(Authority(authority)); - } - - if let Some(path) = pseudo.path.take() { - return Some(Path(path)); - } - - if let Some(protocol) = pseudo.protocol.take() { - return Some(Protocol(protocol)); - } - - if let Some(status) = pseudo.status.take() { - return Some(Status(status)); - } - } - - self.pseudo = None; - - self.fields - .next() - .map(|(name, value)| Field { name, value }) - } -} - -// ===== impl HeadersFlag ===== - -impl HeadersFlag { - pub fn empty() -> HeadersFlag { - HeadersFlag(0) - } - - pub fn load(bits: u8) -> HeadersFlag { - HeadersFlag(bits & ALL) - } - - pub fn is_end_stream(&self) -> bool { - self.0 & END_STREAM == END_STREAM - } - - pub fn set_end_stream(&mut self) { - self.0 |= END_STREAM; - } - - pub fn is_end_headers(&self) -> bool { - self.0 & END_HEADERS == END_HEADERS - } - - pub fn set_end_headers(&mut self) { - self.0 |= END_HEADERS; - } - - pub fn is_padded(&self) -> bool { - self.0 & PADDED == PADDED - } - - pub fn is_priority(&self) -> bool { - self.0 & PRIORITY == PRIORITY - } -} - -impl Default for HeadersFlag { - /// Returns a `HeadersFlag` value with `END_HEADERS` set. - fn default() -> Self { - HeadersFlag(END_HEADERS) - } -} - -impl From for u8 { - fn from(src: HeadersFlag) -> u8 { - src.0 - } -} - -impl fmt::Debug for HeadersFlag { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - util::debug_flags(fmt, self.0) - .flag_if(self.is_end_headers(), "END_HEADERS") - .flag_if(self.is_end_stream(), "END_STREAM") - .flag_if(self.is_padded(), "PADDED") - .flag_if(self.is_priority(), "PRIORITY") - .finish() - } -} - -// ===== impl PushPromiseFlag ===== - -impl PushPromiseFlag { - pub fn empty() -> PushPromiseFlag { - PushPromiseFlag(0) - } - - pub fn load(bits: u8) -> PushPromiseFlag { - PushPromiseFlag(bits & ALL) - } - - pub fn is_end_headers(&self) -> bool { - self.0 & END_HEADERS == END_HEADERS - } - - pub fn set_end_headers(&mut self) { - self.0 |= END_HEADERS; - } - - pub fn is_padded(&self) -> bool { - self.0 & PADDED == PADDED - } -} - -impl Default for PushPromiseFlag { - /// Returns a `PushPromiseFlag` value with `END_HEADERS` set. - fn default() -> Self { - PushPromiseFlag(END_HEADERS) - } -} - -impl From for u8 { - fn from(src: PushPromiseFlag) -> u8 { - src.0 - } -} - -impl fmt::Debug for PushPromiseFlag { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - util::debug_flags(fmt, self.0) - .flag_if(self.is_end_headers(), "END_HEADERS") - .flag_if(self.is_padded(), "PADDED") - .finish() - } -} - -// ===== HeaderBlock ===== - -impl HeaderBlock { - fn load( - &mut self, - src: &mut BytesMut, - max_header_list_size: usize, - decoder: &mut hpack::Decoder, - ) -> Result<(), Error> { - let mut reg = !self.fields.is_empty(); - let mut malformed = false; - let mut headers_size = self.calculate_header_list_size(); - - macro_rules! set_pseudo { - ($field:ident, $val:expr) => {{ - if reg { - tracing::trace!("load_hpack; header malformed -- pseudo not at head of block"); - malformed = true; - } else if self.pseudo.$field.is_some() { - tracing::trace!("load_hpack; header malformed -- repeated pseudo"); - malformed = true; - } else { - let __val = $val; - headers_size += - decoded_header_size(stringify!($field).len() + 1, __val.as_str().len()); - if headers_size < max_header_list_size { - self.pseudo.$field = Some(__val); - } else if !self.is_over_size { - tracing::trace!("load_hpack; header list size over max"); - self.is_over_size = true; - } - } - }}; - } - - let mut cursor = Cursor::new(src); - - // If the header frame is malformed, we still have to continue decoding - // the headers. A malformed header frame is a stream level error, but - // the hpack state is connection level. In order to maintain correct - // state for other streams, the hpack decoding process must complete. - let res = decoder.decode(&mut cursor, |header| { - use crate::hpack::Header::*; - - match header { - Field { name, value } => { - // Connection level header fields are not supported and must - // result in a protocol error. - - if name == header::CONNECTION - || name == header::TRANSFER_ENCODING - || name == header::UPGRADE - || name == "keep-alive" - || name == "proxy-connection" - { - tracing::trace!("load_hpack; connection level header"); - malformed = true; - } else if name == header::TE && value != "trailers" { - tracing::trace!( - "load_hpack; TE header not set to trailers; val={:?}", - value - ); - malformed = true; - } else { - reg = true; - - headers_size += decoded_header_size(name.as_str().len(), value.len()); - if headers_size < max_header_list_size { - self.fields.append(name, value); - } else if !self.is_over_size { - tracing::trace!("load_hpack; header list size over max"); - self.is_over_size = true; - } - } - } - Authority(v) => set_pseudo!(authority, v), - Method(v) => set_pseudo!(method, v), - Scheme(v) => set_pseudo!(scheme, v), - Path(v) => set_pseudo!(path, v), - Protocol(v) => set_pseudo!(protocol, v), - Status(v) => set_pseudo!(status, v), - } - }); - - if let Err(e) = res { - tracing::trace!("hpack decoding error; err={:?}", e); - return Err(e.into()); - } - - if malformed { - tracing::trace!("malformed message"); - return Err(Error::MalformedMessage); - } - - Ok(()) - } - - fn into_encoding(self, encoder: &mut hpack::Encoder) -> EncodingHeaderBlock { - let mut hpack = BytesMut::new(); - let headers = Iter { - pseudo: Some(self.pseudo), - fields: self.fields.into_iter(), - }; - - encoder.encode(headers, &mut hpack); - - EncodingHeaderBlock { - hpack: hpack.freeze(), - } - } - - /// Calculates the size of the currently decoded header list. - /// - /// According to http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE - /// - /// > The value is based on the uncompressed size of header fields, - /// > including the length of the name and value in octets plus an - /// > overhead of 32 octets for each header field. - fn calculate_header_list_size(&self) -> usize { - macro_rules! pseudo_size { - ($name:ident) => {{ - self.pseudo - .$name - .as_ref() - .map(|m| decoded_header_size(stringify!($name).len() + 1, m.as_str().len())) - .unwrap_or(0) - }}; - } - - pseudo_size!(method) - + pseudo_size!(scheme) - + pseudo_size!(status) - + pseudo_size!(authority) - + pseudo_size!(path) - + self - .fields - .iter() - .map(|(name, value)| decoded_header_size(name.as_str().len(), value.len())) - .sum::() - } -} - -fn decoded_header_size(name: usize, value: usize) -> usize { - name + value + 32 -} - -#[cfg(test)] -mod test { - use std::iter::FromIterator; - - use http::HeaderValue; - - use super::*; - use crate::frame; - use crate::hpack::{huffman, Encoder}; - - #[test] - fn test_nameless_header_at_resume() { - let mut encoder = Encoder::default(); - let mut dst = BytesMut::new(); - - let headers = Headers::new( - StreamId::ZERO, - Default::default(), - HeaderMap::from_iter(vec![ - ( - HeaderName::from_static("hello"), - HeaderValue::from_static("world"), - ), - ( - HeaderName::from_static("hello"), - HeaderValue::from_static("zomg"), - ), - ( - HeaderName::from_static("hello"), - HeaderValue::from_static("sup"), - ), - ]), - ); - - let continuation = headers - .encode(&mut encoder, &mut (&mut dst).limit(frame::HEADER_LEN + 8)) - .unwrap(); - - assert_eq!(17, dst.len()); - assert_eq!([0, 0, 8, 1, 0, 0, 0, 0, 0], &dst[0..9]); - assert_eq!(&[0x40, 0x80 | 4], &dst[9..11]); - assert_eq!("hello", huff_decode(&dst[11..15])); - assert_eq!(0x80 | 4, dst[15]); - - let mut world = dst[16..17].to_owned(); - - dst.clear(); - - assert!(continuation - .encode(&mut (&mut dst).limit(frame::HEADER_LEN + 16)) - .is_none()); - - world.extend_from_slice(&dst[9..12]); - assert_eq!("world", huff_decode(&world)); - - assert_eq!(24, dst.len()); - assert_eq!([0, 0, 15, 9, 4, 0, 0, 0, 0], &dst[0..9]); - - // // Next is not indexed - assert_eq!(&[15, 47, 0x80 | 3], &dst[12..15]); - assert_eq!("zomg", huff_decode(&dst[15..18])); - assert_eq!(&[15, 47, 0x80 | 3], &dst[18..21]); - assert_eq!("sup", huff_decode(&dst[21..])); - } - - fn huff_decode(src: &[u8]) -> BytesMut { - let mut buf = BytesMut::new(); - huffman::decode(src, &mut buf).unwrap() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/head.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/head.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/head.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/head.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,94 +0,0 @@ -use super::StreamId; - -use bytes::BufMut; - -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct Head { - kind: Kind, - flag: u8, - stream_id: StreamId, -} - -#[repr(u8)] -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum Kind { - Data = 0, - Headers = 1, - Priority = 2, - Reset = 3, - Settings = 4, - PushPromise = 5, - Ping = 6, - GoAway = 7, - WindowUpdate = 8, - Continuation = 9, - Unknown, -} - -// ===== impl Head ===== - -impl Head { - pub fn new(kind: Kind, flag: u8, stream_id: StreamId) -> Head { - Head { - kind, - flag, - stream_id, - } - } - - /// Parse an HTTP/2 frame header - pub fn parse(header: &[u8]) -> Head { - let (stream_id, _) = StreamId::parse(&header[5..]); - - Head { - kind: Kind::new(header[3]), - flag: header[4], - stream_id, - } - } - - pub fn stream_id(&self) -> StreamId { - self.stream_id - } - - pub fn kind(&self) -> Kind { - self.kind - } - - pub fn flag(&self) -> u8 { - self.flag - } - - pub fn encode_len(&self) -> usize { - super::HEADER_LEN - } - - pub fn encode(&self, payload_len: usize, dst: &mut T) { - debug_assert!(self.encode_len() <= dst.remaining_mut()); - - dst.put_uint(payload_len as u64, 3); - dst.put_u8(self.kind as u8); - dst.put_u8(self.flag); - dst.put_u32(self.stream_id.into()); - } -} - -// ===== impl Kind ===== - -impl Kind { - pub fn new(byte: u8) -> Kind { - match byte { - 0 => Kind::Data, - 1 => Kind::Headers, - 2 => Kind::Priority, - 3 => Kind::Reset, - 4 => Kind::Settings, - 5 => Kind::PushPromise, - 6 => Kind::Ping, - 7 => Kind::GoAway, - 8 => Kind::WindowUpdate, - 9 => Kind::Continuation, - _ => Kind::Unknown, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/mod.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/mod.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,171 +0,0 @@ -use crate::hpack; - -use bytes::Bytes; - -use std::fmt; - -/// A helper macro that unpacks a sequence of 4 bytes found in the buffer with -/// the given identifier, starting at the given offset, into the given integer -/// type. Obviously, the integer type should be able to support at least 4 -/// bytes. -/// -/// # Examples -/// -/// ```ignore -/// # // We ignore this doctest because the macro is not exported. -/// let buf: [u8; 4] = [0, 0, 0, 1]; -/// assert_eq!(1u32, unpack_octets_4!(buf, 0, u32)); -/// ``` -macro_rules! unpack_octets_4 { - // TODO: Get rid of this macro - ($buf:expr, $offset:expr, $tip:ty) => { - (($buf[$offset + 0] as $tip) << 24) - | (($buf[$offset + 1] as $tip) << 16) - | (($buf[$offset + 2] as $tip) << 8) - | (($buf[$offset + 3] as $tip) << 0) - }; -} - -#[cfg(test)] -mod tests { - #[test] - fn test_unpack_octets_4() { - let buf: [u8; 4] = [0, 0, 0, 1]; - assert_eq!(1u32, unpack_octets_4!(buf, 0, u32)); - } -} - -mod data; -mod go_away; -mod head; -mod headers; -mod ping; -mod priority; -mod reason; -mod reset; -mod settings; -mod stream_id; -mod util; -mod window_update; - -pub use self::data::Data; -pub use self::go_away::GoAway; -pub use self::head::{Head, Kind}; -pub use self::headers::{ - parse_u64, Continuation, Headers, Pseudo, PushPromise, PushPromiseHeaderError, -}; -pub use self::ping::Ping; -pub use self::priority::{Priority, StreamDependency}; -pub use self::reason::Reason; -pub use self::reset::Reset; -pub use self::settings::Settings; -pub use self::stream_id::{StreamId, StreamIdOverflow}; -pub use self::window_update::WindowUpdate; - -#[cfg(feature = "unstable")] -pub use crate::hpack::BytesStr; - -// Re-export some constants - -pub use self::settings::{ - DEFAULT_INITIAL_WINDOW_SIZE, DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, - MAX_MAX_FRAME_SIZE, -}; - -pub type FrameSize = u32; - -pub const HEADER_LEN: usize = 9; - -#[derive(Eq, PartialEq)] -pub enum Frame { - Data(Data), - Headers(Headers), - Priority(Priority), - PushPromise(PushPromise), - Settings(Settings), - Ping(Ping), - GoAway(GoAway), - WindowUpdate(WindowUpdate), - Reset(Reset), -} - -impl Frame { - pub fn map(self, f: F) -> Frame - where - F: FnOnce(T) -> U, - { - use self::Frame::*; - - match self { - Data(frame) => frame.map(f).into(), - Headers(frame) => frame.into(), - Priority(frame) => frame.into(), - PushPromise(frame) => frame.into(), - Settings(frame) => frame.into(), - Ping(frame) => frame.into(), - GoAway(frame) => frame.into(), - WindowUpdate(frame) => frame.into(), - Reset(frame) => frame.into(), - } - } -} - -impl fmt::Debug for Frame { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use self::Frame::*; - - match *self { - Data(ref frame) => fmt::Debug::fmt(frame, fmt), - Headers(ref frame) => fmt::Debug::fmt(frame, fmt), - Priority(ref frame) => fmt::Debug::fmt(frame, fmt), - PushPromise(ref frame) => fmt::Debug::fmt(frame, fmt), - Settings(ref frame) => fmt::Debug::fmt(frame, fmt), - Ping(ref frame) => fmt::Debug::fmt(frame, fmt), - GoAway(ref frame) => fmt::Debug::fmt(frame, fmt), - WindowUpdate(ref frame) => fmt::Debug::fmt(frame, fmt), - Reset(ref frame) => fmt::Debug::fmt(frame, fmt), - } - } -} - -/// Errors that can occur during parsing an HTTP/2 frame. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum Error { - /// A length value other than 8 was set on a PING message. - BadFrameSize, - - /// The padding length was larger than the frame-header-specified - /// length of the payload. - TooMuchPadding, - - /// An invalid setting value was provided - InvalidSettingValue, - - /// An invalid window update value - InvalidWindowUpdateValue, - - /// The payload length specified by the frame header was not the - /// value necessary for the specific frame type. - InvalidPayloadLength, - - /// Received a payload with an ACK settings frame - InvalidPayloadAckSettings, - - /// An invalid stream identifier was provided. - /// - /// This is returned if a SETTINGS or PING frame is received with a stream - /// identifier other than zero. - InvalidStreamId, - - /// A request or response is malformed. - MalformedMessage, - - /// An invalid stream dependency ID was provided - /// - /// This is returned if a HEADERS or PRIORITY frame is received with an - /// invalid stream identifier. - InvalidDependencyId, - - /// Failed to perform HPACK decoding - Hpack(hpack::DecoderError), -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/ping.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/ping.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/ping.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/ping.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,102 +0,0 @@ -use crate::frame::{Error, Frame, Head, Kind, StreamId}; -use bytes::BufMut; - -const ACK_FLAG: u8 = 0x1; - -pub type Payload = [u8; 8]; - -#[derive(Debug, Eq, PartialEq)] -pub struct Ping { - ack: bool, - payload: Payload, -} - -// This was just 8 randomly generated bytes. We use something besides just -// zeroes to distinguish this specific PING from any other. -const SHUTDOWN_PAYLOAD: Payload = [0x0b, 0x7b, 0xa2, 0xf0, 0x8b, 0x9b, 0xfe, 0x54]; -const USER_PAYLOAD: Payload = [0x3b, 0x7c, 0xdb, 0x7a, 0x0b, 0x87, 0x16, 0xb4]; - -impl Ping { - #[cfg(feature = "unstable")] - pub const SHUTDOWN: Payload = SHUTDOWN_PAYLOAD; - - #[cfg(not(feature = "unstable"))] - pub(crate) const SHUTDOWN: Payload = SHUTDOWN_PAYLOAD; - - #[cfg(feature = "unstable")] - pub const USER: Payload = USER_PAYLOAD; - - #[cfg(not(feature = "unstable"))] - pub(crate) const USER: Payload = USER_PAYLOAD; - - pub fn new(payload: Payload) -> Ping { - Ping { - ack: false, - payload, - } - } - - pub fn pong(payload: Payload) -> Ping { - Ping { ack: true, payload } - } - - pub fn is_ack(&self) -> bool { - self.ack - } - - pub fn payload(&self) -> &Payload { - &self.payload - } - - pub fn into_payload(self) -> Payload { - self.payload - } - - /// Builds a `Ping` frame from a raw frame. - pub fn load(head: Head, bytes: &[u8]) -> Result { - debug_assert_eq!(head.kind(), crate::frame::Kind::Ping); - - // PING frames are not associated with any individual stream. If a PING - // frame is received with a stream identifier field value other than - // 0x0, the recipient MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - if !head.stream_id().is_zero() { - return Err(Error::InvalidStreamId); - } - - // In addition to the frame header, PING frames MUST contain 8 octets of opaque - // data in the payload. - if bytes.len() != 8 { - return Err(Error::BadFrameSize); - } - - let mut payload = [0; 8]; - payload.copy_from_slice(bytes); - - // The PING frame defines the following flags: - // - // ACK (0x1): When set, bit 0 indicates that this PING frame is a PING - // response. An endpoint MUST set this flag in PING responses. An - // endpoint MUST NOT respond to PING frames containing this flag. - let ack = head.flag() & ACK_FLAG != 0; - - Ok(Ping { ack, payload }) - } - - pub fn encode(&self, dst: &mut B) { - let sz = self.payload.len(); - tracing::trace!("encoding PING; ack={} len={}", self.ack, sz); - - let flags = if self.ack { ACK_FLAG } else { 0 }; - let head = Head::new(Kind::Ping, flags, StreamId::zero()); - - head.encode(sz, dst); - dst.put_slice(&self.payload); - } -} - -impl From for Frame { - fn from(src: Ping) -> Frame { - Frame::Ping(src) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/priority.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/priority.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/priority.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/priority.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,72 +0,0 @@ -use crate::frame::*; - -#[derive(Debug, Eq, PartialEq)] -pub struct Priority { - stream_id: StreamId, - dependency: StreamDependency, -} - -#[derive(Debug, Eq, PartialEq)] -pub struct StreamDependency { - /// The ID of the stream dependency target - dependency_id: StreamId, - - /// The weight for the stream. The value exposed (and set) here is always in - /// the range [0, 255], instead of [1, 256] (as defined in section 5.3.2.) - /// so that the value fits into a `u8`. - weight: u8, - - /// True if the stream dependency is exclusive. - is_exclusive: bool, -} - -impl Priority { - pub fn load(head: Head, payload: &[u8]) -> Result { - let dependency = StreamDependency::load(payload)?; - - if dependency.dependency_id() == head.stream_id() { - return Err(Error::InvalidDependencyId); - } - - Ok(Priority { - stream_id: head.stream_id(), - dependency, - }) - } -} - -impl From for Frame { - fn from(src: Priority) -> Self { - Frame::Priority(src) - } -} - -// ===== impl StreamDependency ===== - -impl StreamDependency { - pub fn new(dependency_id: StreamId, weight: u8, is_exclusive: bool) -> Self { - StreamDependency { - dependency_id, - weight, - is_exclusive, - } - } - - pub fn load(src: &[u8]) -> Result { - if src.len() != 5 { - return Err(Error::InvalidPayloadLength); - } - - // Parse the stream ID and exclusive flag - let (dependency_id, is_exclusive) = StreamId::parse(&src[..4]); - - // Read the weight - let weight = src[4]; - - Ok(StreamDependency::new(dependency_id, weight, is_exclusive)) - } - - pub fn dependency_id(&self) -> StreamId { - self.dependency_id - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/reason.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/reason.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/reason.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/reason.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,134 +0,0 @@ -use std::fmt; - -/// HTTP/2 error codes. -/// -/// Error codes are used in `RST_STREAM` and `GOAWAY` frames to convey the -/// reasons for the stream or connection error. For example, -/// [`SendStream::send_reset`] takes a `Reason` argument. Also, the `Error` type -/// may contain a `Reason`. -/// -/// Error codes share a common code space. Some error codes apply only to -/// streams, others apply only to connections, and others may apply to either. -/// See [RFC 7540] for more information. -/// -/// See [Error Codes in the spec][spec]. -/// -/// [spec]: http://httpwg.org/specs/rfc7540.html#ErrorCodes -/// [`SendStream::send_reset`]: struct.SendStream.html#method.send_reset -#[derive(PartialEq, Eq, Clone, Copy)] -pub struct Reason(u32); - -impl Reason { - /// The associated condition is not a result of an error. - /// - /// For example, a GOAWAY might include this code to indicate graceful - /// shutdown of a connection. - pub const NO_ERROR: Reason = Reason(0); - /// The endpoint detected an unspecific protocol error. - /// - /// This error is for use when a more specific error code is not available. - pub const PROTOCOL_ERROR: Reason = Reason(1); - /// The endpoint encountered an unexpected internal error. - pub const INTERNAL_ERROR: Reason = Reason(2); - /// The endpoint detected that its peer violated the flow-control protocol. - pub const FLOW_CONTROL_ERROR: Reason = Reason(3); - /// The endpoint sent a SETTINGS frame but did not receive a response in - /// a timely manner. - pub const SETTINGS_TIMEOUT: Reason = Reason(4); - /// The endpoint received a frame after a stream was half-closed. - pub const STREAM_CLOSED: Reason = Reason(5); - /// The endpoint received a frame with an invalid size. - pub const FRAME_SIZE_ERROR: Reason = Reason(6); - /// The endpoint refused the stream prior to performing any application - /// processing. - pub const REFUSED_STREAM: Reason = Reason(7); - /// Used by the endpoint to indicate that the stream is no longer needed. - pub const CANCEL: Reason = Reason(8); - /// The endpoint is unable to maintain the header compression context for - /// the connection. - pub const COMPRESSION_ERROR: Reason = Reason(9); - /// The connection established in response to a CONNECT request was reset - /// or abnormally closed. - pub const CONNECT_ERROR: Reason = Reason(10); - /// The endpoint detected that its peer is exhibiting a behavior that might - /// be generating excessive load. - pub const ENHANCE_YOUR_CALM: Reason = Reason(11); - /// The underlying transport has properties that do not meet minimum - /// security requirements. - pub const INADEQUATE_SECURITY: Reason = Reason(12); - /// The endpoint requires that HTTP/1.1 be used instead of HTTP/2. - pub const HTTP_1_1_REQUIRED: Reason = Reason(13); - - /// Get a string description of the error code. - pub fn description(&self) -> &str { - match self.0 { - 0 => "not a result of an error", - 1 => "unspecific protocol error detected", - 2 => "unexpected internal error encountered", - 3 => "flow-control protocol violated", - 4 => "settings ACK not received in timely manner", - 5 => "received frame when stream half-closed", - 6 => "frame with invalid size", - 7 => "refused stream before processing any application logic", - 8 => "stream no longer needed", - 9 => "unable to maintain the header compression context", - 10 => { - "connection established in response to a CONNECT request was reset or abnormally \ - closed" - } - 11 => "detected excessive load generating behavior", - 12 => "security properties do not meet minimum requirements", - 13 => "endpoint requires HTTP/1.1", - _ => "unknown reason", - } - } -} - -impl From for Reason { - fn from(src: u32) -> Reason { - Reason(src) - } -} - -impl From for u32 { - fn from(src: Reason) -> u32 { - src.0 - } -} - -impl fmt::Debug for Reason { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let name = match self.0 { - 0 => "NO_ERROR", - 1 => "PROTOCOL_ERROR", - 2 => "INTERNAL_ERROR", - 3 => "FLOW_CONTROL_ERROR", - 4 => "SETTINGS_TIMEOUT", - 5 => "STREAM_CLOSED", - 6 => "FRAME_SIZE_ERROR", - 7 => "REFUSED_STREAM", - 8 => "CANCEL", - 9 => "COMPRESSION_ERROR", - 10 => "CONNECT_ERROR", - 11 => "ENHANCE_YOUR_CALM", - 12 => "INADEQUATE_SECURITY", - 13 => "HTTP_1_1_REQUIRED", - other => return f.debug_tuple("Reason").field(&Hex(other)).finish(), - }; - f.write_str(name) - } -} - -struct Hex(u32); - -impl fmt::Debug for Hex { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::LowerHex::fmt(&self.0, f) - } -} - -impl fmt::Display for Reason { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{}", self.description()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/reset.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/reset.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/reset.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/reset.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -use crate::frame::{self, Error, Head, Kind, Reason, StreamId}; - -use bytes::BufMut; - -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub struct Reset { - stream_id: StreamId, - error_code: Reason, -} - -impl Reset { - pub fn new(stream_id: StreamId, error: Reason) -> Reset { - Reset { - stream_id, - error_code: error, - } - } - - pub fn stream_id(&self) -> StreamId { - self.stream_id - } - - pub fn reason(&self) -> Reason { - self.error_code - } - - pub fn load(head: Head, payload: &[u8]) -> Result { - if payload.len() != 4 { - return Err(Error::InvalidPayloadLength); - } - - let error_code = unpack_octets_4!(payload, 0, u32); - - Ok(Reset { - stream_id: head.stream_id(), - error_code: error_code.into(), - }) - } - - pub fn encode(&self, dst: &mut B) { - tracing::trace!( - "encoding RESET; id={:?} code={:?}", - self.stream_id, - self.error_code - ); - let head = Head::new(Kind::Reset, 0, self.stream_id); - head.encode(4, dst); - dst.put_u32(self.error_code.into()); - } -} - -impl From for frame::Frame { - fn from(src: Reset) -> Self { - frame::Frame::Reset(src) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/settings.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/settings.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/settings.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/settings.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,389 +0,0 @@ -use std::fmt; - -use crate::frame::{util, Error, Frame, FrameSize, Head, Kind, StreamId}; -use bytes::{BufMut, BytesMut}; - -#[derive(Clone, Default, Eq, PartialEq)] -pub struct Settings { - flags: SettingsFlags, - // Fields - header_table_size: Option, - enable_push: Option, - max_concurrent_streams: Option, - initial_window_size: Option, - max_frame_size: Option, - max_header_list_size: Option, - enable_connect_protocol: Option, -} - -/// An enum that lists all valid settings that can be sent in a SETTINGS -/// frame. -/// -/// Each setting has a value that is a 32 bit unsigned integer (6.5.1.). -#[derive(Debug)] -pub enum Setting { - HeaderTableSize(u32), - EnablePush(u32), - MaxConcurrentStreams(u32), - InitialWindowSize(u32), - MaxFrameSize(u32), - MaxHeaderListSize(u32), - EnableConnectProtocol(u32), -} - -#[derive(Copy, Clone, Eq, PartialEq, Default)] -pub struct SettingsFlags(u8); - -const ACK: u8 = 0x1; -const ALL: u8 = ACK; - -/// The default value of SETTINGS_HEADER_TABLE_SIZE -pub const DEFAULT_SETTINGS_HEADER_TABLE_SIZE: usize = 4_096; - -/// The default value of SETTINGS_INITIAL_WINDOW_SIZE -pub const DEFAULT_INITIAL_WINDOW_SIZE: u32 = 65_535; - -/// The default value of MAX_FRAME_SIZE -pub const DEFAULT_MAX_FRAME_SIZE: FrameSize = 16_384; - -/// INITIAL_WINDOW_SIZE upper bound -pub const MAX_INITIAL_WINDOW_SIZE: usize = (1 << 31) - 1; - -/// MAX_FRAME_SIZE upper bound -pub const MAX_MAX_FRAME_SIZE: FrameSize = (1 << 24) - 1; - -// ===== impl Settings ===== - -impl Settings { - pub fn ack() -> Settings { - Settings { - flags: SettingsFlags::ack(), - ..Settings::default() - } - } - - pub fn is_ack(&self) -> bool { - self.flags.is_ack() - } - - pub fn initial_window_size(&self) -> Option { - self.initial_window_size - } - - pub fn set_initial_window_size(&mut self, size: Option) { - self.initial_window_size = size; - } - - pub fn max_concurrent_streams(&self) -> Option { - self.max_concurrent_streams - } - - pub fn set_max_concurrent_streams(&mut self, max: Option) { - self.max_concurrent_streams = max; - } - - pub fn max_frame_size(&self) -> Option { - self.max_frame_size - } - - pub fn set_max_frame_size(&mut self, size: Option) { - if let Some(val) = size { - assert!(DEFAULT_MAX_FRAME_SIZE <= val && val <= MAX_MAX_FRAME_SIZE); - } - self.max_frame_size = size; - } - - pub fn max_header_list_size(&self) -> Option { - self.max_header_list_size - } - - pub fn set_max_header_list_size(&mut self, size: Option) { - self.max_header_list_size = size; - } - - pub fn is_push_enabled(&self) -> Option { - self.enable_push.map(|val| val != 0) - } - - pub fn set_enable_push(&mut self, enable: bool) { - self.enable_push = Some(enable as u32); - } - - pub fn is_extended_connect_protocol_enabled(&self) -> Option { - self.enable_connect_protocol.map(|val| val != 0) - } - - pub fn set_enable_connect_protocol(&mut self, val: Option) { - self.enable_connect_protocol = val; - } - - pub fn header_table_size(&self) -> Option { - self.header_table_size - } - - pub fn set_header_table_size(&mut self, size: Option) { - self.header_table_size = size; - } - - pub fn load(head: Head, payload: &[u8]) -> Result { - use self::Setting::*; - - debug_assert_eq!(head.kind(), crate::frame::Kind::Settings); - - if !head.stream_id().is_zero() { - return Err(Error::InvalidStreamId); - } - - // Load the flag - let flag = SettingsFlags::load(head.flag()); - - if flag.is_ack() { - // Ensure that the payload is empty - if !payload.is_empty() { - return Err(Error::InvalidPayloadLength); - } - - // Return the ACK frame - return Ok(Settings::ack()); - } - - // Ensure the payload length is correct, each setting is 6 bytes long. - if payload.len() % 6 != 0 { - tracing::debug!("invalid settings payload length; len={:?}", payload.len()); - return Err(Error::InvalidPayloadAckSettings); - } - - let mut settings = Settings::default(); - debug_assert!(!settings.flags.is_ack()); - - for raw in payload.chunks(6) { - match Setting::load(raw) { - Some(HeaderTableSize(val)) => { - settings.header_table_size = Some(val); - } - Some(EnablePush(val)) => match val { - 0 | 1 => { - settings.enable_push = Some(val); - } - _ => { - return Err(Error::InvalidSettingValue); - } - }, - Some(MaxConcurrentStreams(val)) => { - settings.max_concurrent_streams = Some(val); - } - Some(InitialWindowSize(val)) => { - if val as usize > MAX_INITIAL_WINDOW_SIZE { - return Err(Error::InvalidSettingValue); - } else { - settings.initial_window_size = Some(val); - } - } - Some(MaxFrameSize(val)) => { - if DEFAULT_MAX_FRAME_SIZE <= val && val <= MAX_MAX_FRAME_SIZE { - settings.max_frame_size = Some(val); - } else { - return Err(Error::InvalidSettingValue); - } - } - Some(MaxHeaderListSize(val)) => { - settings.max_header_list_size = Some(val); - } - Some(EnableConnectProtocol(val)) => match val { - 0 | 1 => { - settings.enable_connect_protocol = Some(val); - } - _ => { - return Err(Error::InvalidSettingValue); - } - }, - None => {} - } - } - - Ok(settings) - } - - fn payload_len(&self) -> usize { - let mut len = 0; - self.for_each(|_| len += 6); - len - } - - pub fn encode(&self, dst: &mut BytesMut) { - // Create & encode an appropriate frame head - let head = Head::new(Kind::Settings, self.flags.into(), StreamId::zero()); - let payload_len = self.payload_len(); - - tracing::trace!("encoding SETTINGS; len={}", payload_len); - - head.encode(payload_len, dst); - - // Encode the settings - self.for_each(|setting| { - tracing::trace!("encoding setting; val={:?}", setting); - setting.encode(dst) - }); - } - - fn for_each(&self, mut f: F) { - use self::Setting::*; - - if let Some(v) = self.header_table_size { - f(HeaderTableSize(v)); - } - - if let Some(v) = self.enable_push { - f(EnablePush(v)); - } - - if let Some(v) = self.max_concurrent_streams { - f(MaxConcurrentStreams(v)); - } - - if let Some(v) = self.initial_window_size { - f(InitialWindowSize(v)); - } - - if let Some(v) = self.max_frame_size { - f(MaxFrameSize(v)); - } - - if let Some(v) = self.max_header_list_size { - f(MaxHeaderListSize(v)); - } - - if let Some(v) = self.enable_connect_protocol { - f(EnableConnectProtocol(v)); - } - } -} - -impl From for Frame { - fn from(src: Settings) -> Frame { - Frame::Settings(src) - } -} - -impl fmt::Debug for Settings { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut builder = f.debug_struct("Settings"); - builder.field("flags", &self.flags); - - self.for_each(|setting| match setting { - Setting::EnablePush(v) => { - builder.field("enable_push", &v); - } - Setting::HeaderTableSize(v) => { - builder.field("header_table_size", &v); - } - Setting::InitialWindowSize(v) => { - builder.field("initial_window_size", &v); - } - Setting::MaxConcurrentStreams(v) => { - builder.field("max_concurrent_streams", &v); - } - Setting::MaxFrameSize(v) => { - builder.field("max_frame_size", &v); - } - Setting::MaxHeaderListSize(v) => { - builder.field("max_header_list_size", &v); - } - Setting::EnableConnectProtocol(v) => { - builder.field("enable_connect_protocol", &v); - } - }); - - builder.finish() - } -} - -// ===== impl Setting ===== - -impl Setting { - /// Creates a new `Setting` with the correct variant corresponding to the - /// given setting id, based on the settings IDs defined in section - /// 6.5.2. - pub fn from_id(id: u16, val: u32) -> Option { - use self::Setting::*; - - match id { - 1 => Some(HeaderTableSize(val)), - 2 => Some(EnablePush(val)), - 3 => Some(MaxConcurrentStreams(val)), - 4 => Some(InitialWindowSize(val)), - 5 => Some(MaxFrameSize(val)), - 6 => Some(MaxHeaderListSize(val)), - 8 => Some(EnableConnectProtocol(val)), - _ => None, - } - } - - /// Creates a new `Setting` by parsing the given buffer of 6 bytes, which - /// contains the raw byte representation of the setting, according to the - /// "SETTINGS format" defined in section 6.5.1. - /// - /// The `raw` parameter should have length at least 6 bytes, since the - /// length of the raw setting is exactly 6 bytes. - /// - /// # Panics - /// - /// If given a buffer shorter than 6 bytes, the function will panic. - fn load(raw: &[u8]) -> Option { - let id: u16 = (u16::from(raw[0]) << 8) | u16::from(raw[1]); - let val: u32 = unpack_octets_4!(raw, 2, u32); - - Setting::from_id(id, val) - } - - fn encode(&self, dst: &mut BytesMut) { - use self::Setting::*; - - let (kind, val) = match *self { - HeaderTableSize(v) => (1, v), - EnablePush(v) => (2, v), - MaxConcurrentStreams(v) => (3, v), - InitialWindowSize(v) => (4, v), - MaxFrameSize(v) => (5, v), - MaxHeaderListSize(v) => (6, v), - EnableConnectProtocol(v) => (8, v), - }; - - dst.put_u16(kind); - dst.put_u32(val); - } -} - -// ===== impl SettingsFlags ===== - -impl SettingsFlags { - pub fn empty() -> SettingsFlags { - SettingsFlags(0) - } - - pub fn load(bits: u8) -> SettingsFlags { - SettingsFlags(bits & ALL) - } - - pub fn ack() -> SettingsFlags { - SettingsFlags(ACK) - } - - pub fn is_ack(&self) -> bool { - self.0 & ACK == ACK - } -} - -impl From for u8 { - fn from(src: SettingsFlags) -> u8 { - src.0 - } -} - -impl fmt::Debug for SettingsFlags { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - util::debug_flags(f, self.0) - .flag_if(self.is_ack(), "ACK") - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/stream_id.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/stream_id.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/stream_id.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/stream_id.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,96 +0,0 @@ -use std::u32; - -/// A stream identifier, as described in [Section 5.1.1] of RFC 7540. -/// -/// Streams are identified with an unsigned 31-bit integer. Streams -/// initiated by a client MUST use odd-numbered stream identifiers; those -/// initiated by the server MUST use even-numbered stream identifiers. A -/// stream identifier of zero (0x0) is used for connection control -/// messages; the stream identifier of zero cannot be used to establish a -/// new stream. -/// -/// [Section 5.1.1]: https://tools.ietf.org/html/rfc7540#section-5.1.1 -#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct StreamId(u32); - -#[derive(Debug, Copy, Clone)] -pub struct StreamIdOverflow; - -const STREAM_ID_MASK: u32 = 1 << 31; - -impl StreamId { - /// Stream ID 0. - pub const ZERO: StreamId = StreamId(0); - - /// The maximum allowed stream ID. - pub const MAX: StreamId = StreamId(u32::MAX >> 1); - - /// Parse the stream ID - #[inline] - pub fn parse(buf: &[u8]) -> (StreamId, bool) { - let mut ubuf = [0; 4]; - ubuf.copy_from_slice(&buf[0..4]); - let unpacked = u32::from_be_bytes(ubuf); - let flag = unpacked & STREAM_ID_MASK == STREAM_ID_MASK; - - // Now clear the most significant bit, as that is reserved and MUST be - // ignored when received. - (StreamId(unpacked & !STREAM_ID_MASK), flag) - } - - /// Returns true if this stream ID corresponds to a stream that - /// was initiated by the client. - pub fn is_client_initiated(&self) -> bool { - let id = self.0; - id != 0 && id % 2 == 1 - } - - /// Returns true if this stream ID corresponds to a stream that - /// was initiated by the server. - pub fn is_server_initiated(&self) -> bool { - let id = self.0; - id != 0 && id % 2 == 0 - } - - /// Return a new `StreamId` for stream 0. - #[inline] - pub fn zero() -> StreamId { - StreamId::ZERO - } - - /// Returns true if this stream ID is zero. - pub fn is_zero(&self) -> bool { - self.0 == 0 - } - - /// Returns the next stream ID initiated by the same peer as this stream - /// ID, or an error if incrementing this stream ID would overflow the - /// maximum. - pub fn next_id(&self) -> Result { - let next = self.0 + 2; - if next > StreamId::MAX.0 { - Err(StreamIdOverflow) - } else { - Ok(StreamId(next)) - } - } -} - -impl From for StreamId { - fn from(src: u32) -> Self { - assert_eq!(src & STREAM_ID_MASK, 0, "invalid stream ID -- MSB is set"); - StreamId(src) - } -} - -impl From for u32 { - fn from(src: StreamId) -> Self { - src.0 - } -} - -impl PartialEq for StreamId { - fn eq(&self, other: &u32) -> bool { - self.0 == *other - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/util.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/util.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/util.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/util.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,79 +0,0 @@ -use std::fmt; - -use super::Error; -use bytes::Bytes; - -/// Strip padding from the given payload. -/// -/// It is assumed that the frame had the padded flag set. This means that the -/// first byte is the length of the padding with that many -/// 0 bytes expected to follow the actual payload. -/// -/// # Returns -/// -/// A slice of the given payload where the actual one is found and the length -/// of the padding. -/// -/// If the padded payload is invalid (e.g. the length of the padding is equal -/// to the total length), returns `None`. -pub fn strip_padding(payload: &mut Bytes) -> Result { - let payload_len = payload.len(); - if payload_len == 0 { - // If this is the case, the frame is invalid as no padding length can be - // extracted, even though the frame should be padded. - return Err(Error::TooMuchPadding); - } - - let pad_len = payload[0] as usize; - - if pad_len >= payload_len { - // This is invalid: the padding length MUST be less than the - // total frame size. - return Err(Error::TooMuchPadding); - } - - let _ = payload.split_to(1); - let _ = payload.split_off(payload_len - pad_len - 1); - - Ok(pad_len as u8) -} - -pub(super) fn debug_flags<'a, 'f: 'a>( - fmt: &'a mut fmt::Formatter<'f>, - bits: u8, -) -> DebugFlags<'a, 'f> { - let result = write!(fmt, "({:#x}", bits); - DebugFlags { - fmt, - result, - started: false, - } -} - -pub(super) struct DebugFlags<'a, 'f: 'a> { - fmt: &'a mut fmt::Formatter<'f>, - result: fmt::Result, - started: bool, -} - -impl<'a, 'f: 'a> DebugFlags<'a, 'f> { - pub(super) fn flag_if(&mut self, enabled: bool, name: &str) -> &mut Self { - if enabled { - self.result = self.result.and_then(|()| { - let prefix = if self.started { - " | " - } else { - self.started = true; - ": " - }; - - write!(self.fmt, "{}{}", prefix, name) - }); - } - self - } - - pub(super) fn finish(&mut self) -> fmt::Result { - self.result.and_then(|()| write!(self.fmt, ")")) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/frame/window_update.rs s390-tools-2.33.1/rust-vendor/h2/src/frame/window_update.rs --- s390-tools-2.31.0/rust-vendor/h2/src/frame/window_update.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/frame/window_update.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,62 +0,0 @@ -use crate::frame::{self, Error, Head, Kind, StreamId}; - -use bytes::BufMut; - -const SIZE_INCREMENT_MASK: u32 = 1 << 31; - -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub struct WindowUpdate { - stream_id: StreamId, - size_increment: u32, -} - -impl WindowUpdate { - pub fn new(stream_id: StreamId, size_increment: u32) -> WindowUpdate { - WindowUpdate { - stream_id, - size_increment, - } - } - - pub fn stream_id(&self) -> StreamId { - self.stream_id - } - - pub fn size_increment(&self) -> u32 { - self.size_increment - } - - /// Builds a `WindowUpdate` frame from a raw frame. - pub fn load(head: Head, payload: &[u8]) -> Result { - debug_assert_eq!(head.kind(), crate::frame::Kind::WindowUpdate); - if payload.len() != 4 { - return Err(Error::BadFrameSize); - } - - // Clear the most significant bit, as that is reserved and MUST be ignored - // when received. - let size_increment = unpack_octets_4!(payload, 0, u32) & !SIZE_INCREMENT_MASK; - - if size_increment == 0 { - return Err(Error::InvalidWindowUpdateValue); - } - - Ok(WindowUpdate { - stream_id: head.stream_id(), - size_increment, - }) - } - - pub fn encode(&self, dst: &mut B) { - tracing::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id); - let head = Head::new(Kind::WindowUpdate, 0, self.stream_id); - head.encode(4, dst); - dst.put_u32(self.size_increment); - } -} - -impl From for frame::Frame { - fn from(src: WindowUpdate) -> Self { - frame::Frame::WindowUpdate(src) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/fuzz_bridge.rs s390-tools-2.33.1/rust-vendor/h2/src/fuzz_bridge.rs --- s390-tools-2.31.0/rust-vendor/h2/src/fuzz_bridge.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/fuzz_bridge.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,28 +0,0 @@ -#[cfg(fuzzing)] -pub mod fuzz_logic { - use crate::hpack; - use bytes::BytesMut; - use http::header::HeaderName; - use std::io::Cursor; - - pub fn fuzz_hpack(data_: &[u8]) { - let mut decoder_ = hpack::Decoder::new(0); - let mut buf = BytesMut::new(); - buf.extend(data_); - let _dec_res = decoder_.decode(&mut Cursor::new(&mut buf), |_h| {}); - - if let Ok(s) = std::str::from_utf8(data_) { - if let Ok(h) = http::Method::from_bytes(s.as_bytes()) { - let m_ = hpack::Header::Method(h); - let mut encoder = hpack::Encoder::new(0, 0); - let _res = encode(&mut encoder, vec![m_]); - } - } - } - - fn encode(e: &mut hpack::Encoder, hdrs: Vec>>) -> BytesMut { - let mut dst = BytesMut::with_capacity(1024); - e.encode(&mut hdrs.into_iter(), &mut dst); - dst - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/hpack/decoder.rs s390-tools-2.33.1/rust-vendor/h2/src/hpack/decoder.rs --- s390-tools-2.31.0/rust-vendor/h2/src/hpack/decoder.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/hpack/decoder.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,937 +0,0 @@ -use super::{header::BytesStr, huffman, Header}; -use crate::frame; - -use bytes::{Buf, Bytes, BytesMut}; -use http::header; -use http::method::{self, Method}; -use http::status::{self, StatusCode}; - -use std::cmp; -use std::collections::VecDeque; -use std::io::Cursor; -use std::str::Utf8Error; - -/// Decodes headers using HPACK -#[derive(Debug)] -pub struct Decoder { - // Protocol indicated that the max table size will update - max_size_update: Option, - last_max_update: usize, - table: Table, - buffer: BytesMut, -} - -/// Represents all errors that can be encountered while performing the decoding -/// of an HPACK header set. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum DecoderError { - InvalidRepresentation, - InvalidIntegerPrefix, - InvalidTableIndex, - InvalidHuffmanCode, - InvalidUtf8, - InvalidStatusCode, - InvalidPseudoheader, - InvalidMaxDynamicSize, - IntegerOverflow, - NeedMore(NeedMore), -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum NeedMore { - UnexpectedEndOfStream, - IntegerUnderflow, - StringUnderflow, -} - -enum Representation { - /// Indexed header field representation - /// - /// An indexed header field representation identifies an entry in either the - /// static table or the dynamic table (see Section 2.3). - /// - /// # Header encoding - /// - /// ```text - /// 0 1 2 3 4 5 6 7 - /// +---+---+---+---+---+---+---+---+ - /// | 1 | Index (7+) | - /// +---+---------------------------+ - /// ``` - Indexed, - - /// Literal Header Field with Incremental Indexing - /// - /// A literal header field with incremental indexing representation results - /// in appending a header field to the decoded header list and inserting it - /// as a new entry into the dynamic table. - /// - /// # Header encoding - /// - /// ```text - /// 0 1 2 3 4 5 6 7 - /// +---+---+---+---+---+---+---+---+ - /// | 0 | 1 | Index (6+) | - /// +---+---+-----------------------+ - /// | H | Value Length (7+) | - /// +---+---------------------------+ - /// | Value String (Length octets) | - /// +-------------------------------+ - /// ``` - LiteralWithIndexing, - - /// Literal Header Field without Indexing - /// - /// A literal header field without indexing representation results in - /// appending a header field to the decoded header list without altering the - /// dynamic table. - /// - /// # Header encoding - /// - /// ```text - /// 0 1 2 3 4 5 6 7 - /// +---+---+---+---+---+---+---+---+ - /// | 0 | 0 | 0 | 0 | Index (4+) | - /// +---+---+-----------------------+ - /// | H | Value Length (7+) | - /// +---+---------------------------+ - /// | Value String (Length octets) | - /// +-------------------------------+ - /// ``` - LiteralWithoutIndexing, - - /// Literal Header Field Never Indexed - /// - /// A literal header field never-indexed representation results in appending - /// a header field to the decoded header list without altering the dynamic - /// table. Intermediaries MUST use the same representation for encoding this - /// header field. - /// - /// ```text - /// 0 1 2 3 4 5 6 7 - /// +---+---+---+---+---+---+---+---+ - /// | 0 | 0 | 0 | 1 | Index (4+) | - /// +---+---+-----------------------+ - /// | H | Value Length (7+) | - /// +---+---------------------------+ - /// | Value String (Length octets) | - /// +-------------------------------+ - /// ``` - LiteralNeverIndexed, - - /// Dynamic Table Size Update - /// - /// A dynamic table size update signals a change to the size of the dynamic - /// table. - /// - /// # Header encoding - /// - /// ```text - /// 0 1 2 3 4 5 6 7 - /// +---+---+---+---+---+---+---+---+ - /// | 0 | 0 | 1 | Max size (5+) | - /// +---+---------------------------+ - /// ``` - SizeUpdate, -} - -#[derive(Debug)] -struct Table { - entries: VecDeque
, - size: usize, - max_size: usize, -} - -struct StringMarker { - offset: usize, - len: usize, - string: Option, -} - -// ===== impl Decoder ===== - -impl Decoder { - /// Creates a new `Decoder` with all settings set to default values. - pub fn new(size: usize) -> Decoder { - Decoder { - max_size_update: None, - last_max_update: size, - table: Table::new(size), - buffer: BytesMut::with_capacity(4096), - } - } - - /// Queues a potential size update - #[allow(dead_code)] - pub fn queue_size_update(&mut self, size: usize) { - let size = match self.max_size_update { - Some(v) => cmp::max(v, size), - None => size, - }; - - self.max_size_update = Some(size); - } - - /// Decodes the headers found in the given buffer. - pub fn decode( - &mut self, - src: &mut Cursor<&mut BytesMut>, - mut f: F, - ) -> Result<(), DecoderError> - where - F: FnMut(Header), - { - use self::Representation::*; - - let mut can_resize = true; - - if let Some(size) = self.max_size_update.take() { - self.last_max_update = size; - } - - let span = tracing::trace_span!("hpack::decode"); - let _e = span.enter(); - - tracing::trace!("decode"); - - while let Some(ty) = peek_u8(src) { - // At this point we are always at the beginning of the next block - // within the HPACK data. The type of the block can always be - // determined from the first byte. - match Representation::load(ty)? { - Indexed => { - tracing::trace!(rem = src.remaining(), kind = %"Indexed"); - can_resize = false; - let entry = self.decode_indexed(src)?; - consume(src); - f(entry); - } - LiteralWithIndexing => { - tracing::trace!(rem = src.remaining(), kind = %"LiteralWithIndexing"); - can_resize = false; - let entry = self.decode_literal(src, true)?; - - // Insert the header into the table - self.table.insert(entry.clone()); - consume(src); - - f(entry); - } - LiteralWithoutIndexing => { - tracing::trace!(rem = src.remaining(), kind = %"LiteralWithoutIndexing"); - can_resize = false; - let entry = self.decode_literal(src, false)?; - consume(src); - f(entry); - } - LiteralNeverIndexed => { - tracing::trace!(rem = src.remaining(), kind = %"LiteralNeverIndexed"); - can_resize = false; - let entry = self.decode_literal(src, false)?; - consume(src); - - // TODO: Track that this should never be indexed - - f(entry); - } - SizeUpdate => { - tracing::trace!(rem = src.remaining(), kind = %"SizeUpdate"); - if !can_resize { - return Err(DecoderError::InvalidMaxDynamicSize); - } - - // Handle the dynamic table size update - self.process_size_update(src)?; - consume(src); - } - } - } - - Ok(()) - } - - fn process_size_update(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result<(), DecoderError> { - let new_size = decode_int(buf, 5)?; - - if new_size > self.last_max_update { - return Err(DecoderError::InvalidMaxDynamicSize); - } - - tracing::debug!( - from = self.table.size(), - to = new_size, - "Decoder changed max table size" - ); - - self.table.set_max_size(new_size); - - Ok(()) - } - - fn decode_indexed(&self, buf: &mut Cursor<&mut BytesMut>) -> Result { - let index = decode_int(buf, 7)?; - self.table.get(index) - } - - fn decode_literal( - &mut self, - buf: &mut Cursor<&mut BytesMut>, - index: bool, - ) -> Result { - let prefix = if index { 6 } else { 4 }; - - // Extract the table index for the name, or 0 if not indexed - let table_idx = decode_int(buf, prefix)?; - - // First, read the header name - if table_idx == 0 { - let old_pos = buf.position(); - let name_marker = self.try_decode_string(buf)?; - let value_marker = self.try_decode_string(buf)?; - buf.set_position(old_pos); - // Read the name as a literal - let name = name_marker.consume(buf); - let value = value_marker.consume(buf); - Header::new(name, value) - } else { - let e = self.table.get(table_idx)?; - let value = self.decode_string(buf)?; - - e.name().into_entry(value) - } - } - - fn try_decode_string( - &mut self, - buf: &mut Cursor<&mut BytesMut>, - ) -> Result { - let old_pos = buf.position(); - const HUFF_FLAG: u8 = 0b1000_0000; - - // The first bit in the first byte contains the huffman encoded flag. - let huff = match peek_u8(buf) { - Some(hdr) => (hdr & HUFF_FLAG) == HUFF_FLAG, - None => return Err(DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream)), - }; - - // Decode the string length using 7 bit prefix - let len = decode_int(buf, 7)?; - - if len > buf.remaining() { - tracing::trace!(len, remaining = buf.remaining(), "decode_string underflow",); - return Err(DecoderError::NeedMore(NeedMore::StringUnderflow)); - } - - let offset = (buf.position() - old_pos) as usize; - if huff { - let ret = { - let raw = &buf.chunk()[..len]; - huffman::decode(raw, &mut self.buffer).map(|buf| StringMarker { - offset, - len, - string: Some(BytesMut::freeze(buf)), - }) - }; - - buf.advance(len); - ret - } else { - buf.advance(len); - Ok(StringMarker { - offset, - len, - string: None, - }) - } - } - - fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result { - let old_pos = buf.position(); - let marker = self.try_decode_string(buf)?; - buf.set_position(old_pos); - Ok(marker.consume(buf)) - } -} - -impl Default for Decoder { - fn default() -> Decoder { - Decoder::new(4096) - } -} - -// ===== impl Representation ===== - -impl Representation { - pub fn load(byte: u8) -> Result { - const INDEXED: u8 = 0b1000_0000; - const LITERAL_WITH_INDEXING: u8 = 0b0100_0000; - const LITERAL_WITHOUT_INDEXING: u8 = 0b1111_0000; - const LITERAL_NEVER_INDEXED: u8 = 0b0001_0000; - const SIZE_UPDATE_MASK: u8 = 0b1110_0000; - const SIZE_UPDATE: u8 = 0b0010_0000; - - // TODO: What did I even write here? - - if byte & INDEXED == INDEXED { - Ok(Representation::Indexed) - } else if byte & LITERAL_WITH_INDEXING == LITERAL_WITH_INDEXING { - Ok(Representation::LiteralWithIndexing) - } else if byte & LITERAL_WITHOUT_INDEXING == 0 { - Ok(Representation::LiteralWithoutIndexing) - } else if byte & LITERAL_WITHOUT_INDEXING == LITERAL_NEVER_INDEXED { - Ok(Representation::LiteralNeverIndexed) - } else if byte & SIZE_UPDATE_MASK == SIZE_UPDATE { - Ok(Representation::SizeUpdate) - } else { - Err(DecoderError::InvalidRepresentation) - } - } -} - -fn decode_int(buf: &mut B, prefix_size: u8) -> Result { - // The octet limit is chosen such that the maximum allowed *value* can - // never overflow an unsigned 32-bit integer. The maximum value of any - // integer that can be encoded with 5 octets is ~2^28 - const MAX_BYTES: usize = 5; - const VARINT_MASK: u8 = 0b0111_1111; - const VARINT_FLAG: u8 = 0b1000_0000; - - if prefix_size < 1 || prefix_size > 8 { - return Err(DecoderError::InvalidIntegerPrefix); - } - - if !buf.has_remaining() { - return Err(DecoderError::NeedMore(NeedMore::IntegerUnderflow)); - } - - let mask = if prefix_size == 8 { - 0xFF - } else { - (1u8 << prefix_size).wrapping_sub(1) - }; - - let mut ret = (buf.get_u8() & mask) as usize; - - if ret < mask as usize { - // Value fits in the prefix bits - return Ok(ret); - } - - // The int did not fit in the prefix bits, so continue reading. - // - // The total number of bytes used to represent the int. The first byte was - // the prefix, so start at 1. - let mut bytes = 1; - - // The rest of the int is stored as a varint -- 7 bits for the value and 1 - // bit to indicate if it is the last byte. - let mut shift = 0; - - while buf.has_remaining() { - let b = buf.get_u8(); - - bytes += 1; - ret += ((b & VARINT_MASK) as usize) << shift; - shift += 7; - - if b & VARINT_FLAG == 0 { - return Ok(ret); - } - - if bytes == MAX_BYTES { - // The spec requires that this situation is an error - return Err(DecoderError::IntegerOverflow); - } - } - - Err(DecoderError::NeedMore(NeedMore::IntegerUnderflow)) -} - -fn peek_u8(buf: &B) -> Option { - if buf.has_remaining() { - Some(buf.chunk()[0]) - } else { - None - } -} - -fn take(buf: &mut Cursor<&mut BytesMut>, n: usize) -> Bytes { - let pos = buf.position() as usize; - let mut head = buf.get_mut().split_to(pos + n); - buf.set_position(0); - head.advance(pos); - head.freeze() -} - -impl StringMarker { - fn consume(self, buf: &mut Cursor<&mut BytesMut>) -> Bytes { - buf.advance(self.offset); - match self.string { - Some(string) => { - buf.advance(self.len); - string - } - None => take(buf, self.len), - } - } -} - -fn consume(buf: &mut Cursor<&mut BytesMut>) { - // remove bytes from the internal BytesMut when they have been successfully - // decoded. This is a more permanent cursor position, which will be - // used to resume if decoding was only partial. - take(buf, 0); -} - -// ===== impl Table ===== - -impl Table { - fn new(max_size: usize) -> Table { - Table { - entries: VecDeque::new(), - size: 0, - max_size, - } - } - - fn size(&self) -> usize { - self.size - } - - /// Returns the entry located at the given index. - /// - /// The table is 1-indexed and constructed in such a way that the first - /// entries belong to the static table, followed by entries in the dynamic - /// table. They are merged into a single index address space, though. - /// - /// This is according to the [HPACK spec, section 2.3.3.] - /// (http://http2.github.io/http2-spec/compression.html#index.address.space) - pub fn get(&self, index: usize) -> Result { - if index == 0 { - return Err(DecoderError::InvalidTableIndex); - } - - if index <= 61 { - return Ok(get_static(index)); - } - - // Convert the index for lookup in the entries structure. - match self.entries.get(index - 62) { - Some(e) => Ok(e.clone()), - None => Err(DecoderError::InvalidTableIndex), - } - } - - fn insert(&mut self, entry: Header) { - let len = entry.len(); - - self.reserve(len); - - if self.size + len <= self.max_size { - self.size += len; - - // Track the entry - self.entries.push_front(entry); - } - } - - fn set_max_size(&mut self, size: usize) { - self.max_size = size; - // Make the table size fit within the new constraints. - self.consolidate(); - } - - fn reserve(&mut self, size: usize) { - while self.size + size > self.max_size { - match self.entries.pop_back() { - Some(last) => { - self.size -= last.len(); - } - None => return, - } - } - } - - fn consolidate(&mut self) { - while self.size > self.max_size { - { - let last = match self.entries.back() { - Some(x) => x, - None => { - // Can never happen as the size of the table must reach - // 0 by the time we've exhausted all elements. - panic!("Size of table != 0, but no headers left!"); - } - }; - - self.size -= last.len(); - } - - self.entries.pop_back(); - } - } -} - -// ===== impl DecoderError ===== - -impl From for DecoderError { - fn from(_: Utf8Error) -> DecoderError { - // TODO: Better error? - DecoderError::InvalidUtf8 - } -} - -impl From for DecoderError { - fn from(_: header::InvalidHeaderValue) -> DecoderError { - // TODO: Better error? - DecoderError::InvalidUtf8 - } -} - -impl From for DecoderError { - fn from(_: header::InvalidHeaderName) -> DecoderError { - // TODO: Better error - DecoderError::InvalidUtf8 - } -} - -impl From for DecoderError { - fn from(_: method::InvalidMethod) -> DecoderError { - // TODO: Better error - DecoderError::InvalidUtf8 - } -} - -impl From for DecoderError { - fn from(_: status::InvalidStatusCode) -> DecoderError { - // TODO: Better error - DecoderError::InvalidUtf8 - } -} - -impl From for frame::Error { - fn from(src: DecoderError) -> Self { - frame::Error::Hpack(src) - } -} - -/// Get an entry from the static table -pub fn get_static(idx: usize) -> Header { - use http::header::HeaderValue; - - match idx { - 1 => Header::Authority(BytesStr::from_static("")), - 2 => Header::Method(Method::GET), - 3 => Header::Method(Method::POST), - 4 => Header::Path(BytesStr::from_static("/")), - 5 => Header::Path(BytesStr::from_static("/index.html")), - 6 => Header::Scheme(BytesStr::from_static("http")), - 7 => Header::Scheme(BytesStr::from_static("https")), - 8 => Header::Status(StatusCode::OK), - 9 => Header::Status(StatusCode::NO_CONTENT), - 10 => Header::Status(StatusCode::PARTIAL_CONTENT), - 11 => Header::Status(StatusCode::NOT_MODIFIED), - 12 => Header::Status(StatusCode::BAD_REQUEST), - 13 => Header::Status(StatusCode::NOT_FOUND), - 14 => Header::Status(StatusCode::INTERNAL_SERVER_ERROR), - 15 => Header::Field { - name: header::ACCEPT_CHARSET, - value: HeaderValue::from_static(""), - }, - 16 => Header::Field { - name: header::ACCEPT_ENCODING, - value: HeaderValue::from_static("gzip, deflate"), - }, - 17 => Header::Field { - name: header::ACCEPT_LANGUAGE, - value: HeaderValue::from_static(""), - }, - 18 => Header::Field { - name: header::ACCEPT_RANGES, - value: HeaderValue::from_static(""), - }, - 19 => Header::Field { - name: header::ACCEPT, - value: HeaderValue::from_static(""), - }, - 20 => Header::Field { - name: header::ACCESS_CONTROL_ALLOW_ORIGIN, - value: HeaderValue::from_static(""), - }, - 21 => Header::Field { - name: header::AGE, - value: HeaderValue::from_static(""), - }, - 22 => Header::Field { - name: header::ALLOW, - value: HeaderValue::from_static(""), - }, - 23 => Header::Field { - name: header::AUTHORIZATION, - value: HeaderValue::from_static(""), - }, - 24 => Header::Field { - name: header::CACHE_CONTROL, - value: HeaderValue::from_static(""), - }, - 25 => Header::Field { - name: header::CONTENT_DISPOSITION, - value: HeaderValue::from_static(""), - }, - 26 => Header::Field { - name: header::CONTENT_ENCODING, - value: HeaderValue::from_static(""), - }, - 27 => Header::Field { - name: header::CONTENT_LANGUAGE, - value: HeaderValue::from_static(""), - }, - 28 => Header::Field { - name: header::CONTENT_LENGTH, - value: HeaderValue::from_static(""), - }, - 29 => Header::Field { - name: header::CONTENT_LOCATION, - value: HeaderValue::from_static(""), - }, - 30 => Header::Field { - name: header::CONTENT_RANGE, - value: HeaderValue::from_static(""), - }, - 31 => Header::Field { - name: header::CONTENT_TYPE, - value: HeaderValue::from_static(""), - }, - 32 => Header::Field { - name: header::COOKIE, - value: HeaderValue::from_static(""), - }, - 33 => Header::Field { - name: header::DATE, - value: HeaderValue::from_static(""), - }, - 34 => Header::Field { - name: header::ETAG, - value: HeaderValue::from_static(""), - }, - 35 => Header::Field { - name: header::EXPECT, - value: HeaderValue::from_static(""), - }, - 36 => Header::Field { - name: header::EXPIRES, - value: HeaderValue::from_static(""), - }, - 37 => Header::Field { - name: header::FROM, - value: HeaderValue::from_static(""), - }, - 38 => Header::Field { - name: header::HOST, - value: HeaderValue::from_static(""), - }, - 39 => Header::Field { - name: header::IF_MATCH, - value: HeaderValue::from_static(""), - }, - 40 => Header::Field { - name: header::IF_MODIFIED_SINCE, - value: HeaderValue::from_static(""), - }, - 41 => Header::Field { - name: header::IF_NONE_MATCH, - value: HeaderValue::from_static(""), - }, - 42 => Header::Field { - name: header::IF_RANGE, - value: HeaderValue::from_static(""), - }, - 43 => Header::Field { - name: header::IF_UNMODIFIED_SINCE, - value: HeaderValue::from_static(""), - }, - 44 => Header::Field { - name: header::LAST_MODIFIED, - value: HeaderValue::from_static(""), - }, - 45 => Header::Field { - name: header::LINK, - value: HeaderValue::from_static(""), - }, - 46 => Header::Field { - name: header::LOCATION, - value: HeaderValue::from_static(""), - }, - 47 => Header::Field { - name: header::MAX_FORWARDS, - value: HeaderValue::from_static(""), - }, - 48 => Header::Field { - name: header::PROXY_AUTHENTICATE, - value: HeaderValue::from_static(""), - }, - 49 => Header::Field { - name: header::PROXY_AUTHORIZATION, - value: HeaderValue::from_static(""), - }, - 50 => Header::Field { - name: header::RANGE, - value: HeaderValue::from_static(""), - }, - 51 => Header::Field { - name: header::REFERER, - value: HeaderValue::from_static(""), - }, - 52 => Header::Field { - name: header::REFRESH, - value: HeaderValue::from_static(""), - }, - 53 => Header::Field { - name: header::RETRY_AFTER, - value: HeaderValue::from_static(""), - }, - 54 => Header::Field { - name: header::SERVER, - value: HeaderValue::from_static(""), - }, - 55 => Header::Field { - name: header::SET_COOKIE, - value: HeaderValue::from_static(""), - }, - 56 => Header::Field { - name: header::STRICT_TRANSPORT_SECURITY, - value: HeaderValue::from_static(""), - }, - 57 => Header::Field { - name: header::TRANSFER_ENCODING, - value: HeaderValue::from_static(""), - }, - 58 => Header::Field { - name: header::USER_AGENT, - value: HeaderValue::from_static(""), - }, - 59 => Header::Field { - name: header::VARY, - value: HeaderValue::from_static(""), - }, - 60 => Header::Field { - name: header::VIA, - value: HeaderValue::from_static(""), - }, - 61 => Header::Field { - name: header::WWW_AUTHENTICATE, - value: HeaderValue::from_static(""), - }, - _ => unreachable!(), - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::hpack::Header; - - #[test] - fn test_peek_u8() { - let b = 0xff; - let mut buf = Cursor::new(vec![b]); - assert_eq!(peek_u8(&buf), Some(b)); - assert_eq!(buf.get_u8(), b); - assert_eq!(peek_u8(&buf), None); - } - - #[test] - fn test_decode_string_empty() { - let mut de = Decoder::new(0); - let mut buf = BytesMut::new(); - let err = de.decode_string(&mut Cursor::new(&mut buf)).unwrap_err(); - assert_eq!(err, DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream)); - } - - #[test] - fn test_decode_empty() { - let mut de = Decoder::new(0); - let mut buf = BytesMut::new(); - let _: () = de.decode(&mut Cursor::new(&mut buf), |_| {}).unwrap(); - } - - #[test] - fn test_decode_indexed_larger_than_table() { - let mut de = Decoder::new(0); - - let mut buf = BytesMut::new(); - buf.extend([0b01000000, 0x80 | 2]); - buf.extend(huff_encode(b"foo")); - buf.extend([0x80 | 3]); - buf.extend(huff_encode(b"bar")); - - let mut res = vec![]; - de.decode(&mut Cursor::new(&mut buf), |h| { - res.push(h); - }) - .unwrap(); - - assert_eq!(res.len(), 1); - assert_eq!(de.table.size(), 0); - - match res[0] { - Header::Field { - ref name, - ref value, - } => { - assert_eq!(name, "foo"); - assert_eq!(value, "bar"); - } - _ => panic!(), - } - } - - fn huff_encode(src: &[u8]) -> BytesMut { - let mut buf = BytesMut::new(); - huffman::encode(src, &mut buf); - buf - } - - #[test] - fn test_decode_continuation_header_with_non_huff_encoded_name() { - let mut de = Decoder::new(0); - let value = huff_encode(b"bar"); - let mut buf = BytesMut::new(); - // header name is non_huff encoded - buf.extend([0b01000000, 3]); - buf.extend(b"foo"); - // header value is partial - buf.extend([0x80 | 3]); - buf.extend(&value[0..1]); - - let mut res = vec![]; - let e = de - .decode(&mut Cursor::new(&mut buf), |h| { - res.push(h); - }) - .unwrap_err(); - // decode error because the header value is partial - assert_eq!(e, DecoderError::NeedMore(NeedMore::StringUnderflow)); - - // extend buf with the remaining header value - buf.extend(&value[1..]); - de.decode(&mut Cursor::new(&mut buf), |h| { - res.push(h); - }) - .unwrap(); - - assert_eq!(res.len(), 1); - assert_eq!(de.table.size(), 0); - - match res[0] { - Header::Field { - ref name, - ref value, - } => { - assert_eq!(name, "foo"); - assert_eq!(value, "bar"); - } - _ => panic!(), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/hpack/encoder.rs s390-tools-2.33.1/rust-vendor/h2/src/hpack/encoder.rs --- s390-tools-2.31.0/rust-vendor/h2/src/hpack/encoder.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/hpack/encoder.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,721 +0,0 @@ -use super::table::{Index, Table}; -use super::{huffman, Header}; - -use bytes::{BufMut, BytesMut}; -use http::header::{HeaderName, HeaderValue}; - -#[derive(Debug)] -pub struct Encoder { - table: Table, - size_update: Option, -} - -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -enum SizeUpdate { - One(usize), - Two(usize, usize), // min, max -} - -impl Encoder { - pub fn new(max_size: usize, capacity: usize) -> Encoder { - Encoder { - table: Table::new(max_size, capacity), - size_update: None, - } - } - - /// Queues a max size update. - /// - /// The next call to `encode` will include a dynamic size update frame. - pub fn update_max_size(&mut self, val: usize) { - match self.size_update { - Some(SizeUpdate::One(old)) => { - if val > old { - if old > self.table.max_size() { - self.size_update = Some(SizeUpdate::One(val)); - } else { - self.size_update = Some(SizeUpdate::Two(old, val)); - } - } else { - self.size_update = Some(SizeUpdate::One(val)); - } - } - Some(SizeUpdate::Two(min, _)) => { - if val < min { - self.size_update = Some(SizeUpdate::One(val)); - } else { - self.size_update = Some(SizeUpdate::Two(min, val)); - } - } - None => { - if val != self.table.max_size() { - // Don't bother writing a frame if the value already matches - // the table's max size. - self.size_update = Some(SizeUpdate::One(val)); - } - } - } - } - - /// Encode a set of headers into the provide buffer - pub fn encode(&mut self, headers: I, dst: &mut BytesMut) - where - I: IntoIterator>>, - { - let span = tracing::trace_span!("hpack::encode"); - let _e = span.enter(); - - self.encode_size_updates(dst); - - let mut last_index = None; - - for header in headers { - match header.reify() { - // The header has an associated name. In which case, try to - // index it in the table. - Ok(header) => { - let index = self.table.index(header); - self.encode_header(&index, dst); - - last_index = Some(index); - } - // The header does not have an associated name. This means that - // the name is the same as the previously yielded header. In - // which case, we skip table lookup and just use the same index - // as the previous entry. - Err(value) => { - self.encode_header_without_name( - last_index.as_ref().unwrap_or_else(|| { - panic!("encoding header without name, but no previous index to use for name"); - }), - &value, - dst, - ); - } - } - } - } - - fn encode_size_updates(&mut self, dst: &mut BytesMut) { - match self.size_update.take() { - Some(SizeUpdate::One(val)) => { - self.table.resize(val); - encode_size_update(val, dst); - } - Some(SizeUpdate::Two(min, max)) => { - self.table.resize(min); - self.table.resize(max); - encode_size_update(min, dst); - encode_size_update(max, dst); - } - None => {} - } - } - - fn encode_header(&mut self, index: &Index, dst: &mut BytesMut) { - match *index { - Index::Indexed(idx, _) => { - encode_int(idx, 7, 0x80, dst); - } - Index::Name(idx, _) => { - let header = self.table.resolve(index); - - encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst); - } - Index::Inserted(_) => { - let header = self.table.resolve(index); - - assert!(!header.is_sensitive()); - - dst.put_u8(0b0100_0000); - - encode_str(header.name().as_slice(), dst); - encode_str(header.value_slice(), dst); - } - Index::InsertedValue(idx, _) => { - let header = self.table.resolve(index); - - assert!(!header.is_sensitive()); - - encode_int(idx, 6, 0b0100_0000, dst); - encode_str(header.value_slice(), dst); - } - Index::NotIndexed(_) => { - let header = self.table.resolve(index); - - encode_not_indexed2( - header.name().as_slice(), - header.value_slice(), - header.is_sensitive(), - dst, - ); - } - } - } - - fn encode_header_without_name( - &mut self, - last: &Index, - value: &HeaderValue, - dst: &mut BytesMut, - ) { - match *last { - Index::Indexed(..) - | Index::Name(..) - | Index::Inserted(..) - | Index::InsertedValue(..) => { - let idx = self.table.resolve_idx(last); - - encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst); - } - Index::NotIndexed(_) => { - let last = self.table.resolve(last); - - encode_not_indexed2( - last.name().as_slice(), - value.as_ref(), - value.is_sensitive(), - dst, - ); - } - } - } -} - -impl Default for Encoder { - fn default() -> Encoder { - Encoder::new(4096, 0) - } -} - -fn encode_size_update(val: usize, dst: &mut BytesMut) { - encode_int(val, 5, 0b0010_0000, dst) -} - -fn encode_not_indexed(name: usize, value: &[u8], sensitive: bool, dst: &mut BytesMut) { - if sensitive { - encode_int(name, 4, 0b10000, dst); - } else { - encode_int(name, 4, 0, dst); - } - - encode_str(value, dst); -} - -fn encode_not_indexed2(name: &[u8], value: &[u8], sensitive: bool, dst: &mut BytesMut) { - if sensitive { - dst.put_u8(0b10000); - } else { - dst.put_u8(0); - } - - encode_str(name, dst); - encode_str(value, dst); -} - -fn encode_str(val: &[u8], dst: &mut BytesMut) { - if !val.is_empty() { - let idx = position(dst); - - // Push a placeholder byte for the length header - dst.put_u8(0); - - // Encode with huffman - huffman::encode(val, dst); - - let huff_len = position(dst) - (idx + 1); - - if encode_int_one_byte(huff_len, 7) { - // Write the string head - dst[idx] = 0x80 | huff_len as u8; - } else { - // Write the head to a placeholder - const PLACEHOLDER_LEN: usize = 8; - let mut buf = [0u8; PLACEHOLDER_LEN]; - - let head_len = { - let mut head_dst = &mut buf[..]; - encode_int(huff_len, 7, 0x80, &mut head_dst); - PLACEHOLDER_LEN - head_dst.remaining_mut() - }; - - // This is just done to reserve space in the destination - dst.put_slice(&buf[1..head_len]); - - // Shift the header forward - for i in 0..huff_len { - let src_i = idx + 1 + (huff_len - (i + 1)); - let dst_i = idx + head_len + (huff_len - (i + 1)); - dst[dst_i] = dst[src_i]; - } - - // Copy in the head - for i in 0..head_len { - dst[idx + i] = buf[i]; - } - } - } else { - // Write an empty string - dst.put_u8(0); - } -} - -/// Encode an integer into the given destination buffer -fn encode_int( - mut value: usize, // The integer to encode - prefix_bits: usize, // The number of bits in the prefix - first_byte: u8, // The base upon which to start encoding the int - dst: &mut B, -) { - if encode_int_one_byte(value, prefix_bits) { - dst.put_u8(first_byte | value as u8); - return; - } - - let low = (1 << prefix_bits) - 1; - - value -= low; - - dst.put_u8(first_byte | low as u8); - - while value >= 128 { - dst.put_u8(0b1000_0000 | value as u8); - - value >>= 7; - } - - dst.put_u8(value as u8); -} - -/// Returns true if the in the int can be fully encoded in the first byte. -fn encode_int_one_byte(value: usize, prefix_bits: usize) -> bool { - value < (1 << prefix_bits) - 1 -} - -fn position(buf: &BytesMut) -> usize { - buf.len() -} - -#[cfg(test)] -mod test { - use super::*; - use crate::hpack::Header; - use http::*; - - #[test] - fn test_encode_method_get() { - let mut encoder = Encoder::default(); - let res = encode(&mut encoder, vec![method("GET")]); - assert_eq!(*res, [0x80 | 2]); - assert_eq!(encoder.table.len(), 0); - } - - #[test] - fn test_encode_method_post() { - let mut encoder = Encoder::default(); - let res = encode(&mut encoder, vec![method("POST")]); - assert_eq!(*res, [0x80 | 3]); - assert_eq!(encoder.table.len(), 0); - } - - #[test] - fn test_encode_method_patch() { - let mut encoder = Encoder::default(); - let res = encode(&mut encoder, vec![method("PATCH")]); - - assert_eq!(res[0], 0b01000000 | 2); // Incremental indexing w/ name pulled from table - assert_eq!(res[1], 0x80 | 5); // header value w/ huffman coding - - assert_eq!("PATCH", huff_decode(&res[2..7])); - assert_eq!(encoder.table.len(), 1); - - let res = encode(&mut encoder, vec![method("PATCH")]); - - assert_eq!(1 << 7 | 62, res[0]); - assert_eq!(1, res.len()); - } - - #[test] - fn test_encode_indexed_name_literal_value() { - let mut encoder = Encoder::default(); - let res = encode(&mut encoder, vec![header("content-language", "foo")]); - - assert_eq!(res[0], 0b01000000 | 27); // Indexed name - assert_eq!(res[1], 0x80 | 2); // header value w/ huffman coding - - assert_eq!("foo", huff_decode(&res[2..4])); - - // Same name, new value should still use incremental - let res = encode(&mut encoder, vec![header("content-language", "bar")]); - assert_eq!(res[0], 0b01000000 | 27); // Indexed name - assert_eq!(res[1], 0x80 | 3); // header value w/ huffman coding - assert_eq!("bar", huff_decode(&res[2..5])); - } - - #[test] - fn test_repeated_headers_are_indexed() { - let mut encoder = Encoder::default(); - let res = encode(&mut encoder, vec![header("foo", "hello")]); - - assert_eq!(&[0b01000000, 0x80 | 2], &res[0..2]); - assert_eq!("foo", huff_decode(&res[2..4])); - assert_eq!(0x80 | 4, res[4]); - assert_eq!("hello", huff_decode(&res[5..])); - assert_eq!(9, res.len()); - - assert_eq!(1, encoder.table.len()); - - let res = encode(&mut encoder, vec![header("foo", "hello")]); - assert_eq!([0x80 | 62], *res); - - assert_eq!(encoder.table.len(), 1); - } - - #[test] - fn test_evicting_headers() { - let mut encoder = Encoder::default(); - - // Fill the table - for i in 0..64 { - let key = format!("x-hello-world-{:02}", i); - let res = encode(&mut encoder, vec![header(&key, &key)]); - - assert_eq!(&[0b01000000, 0x80 | 12], &res[0..2]); - assert_eq!(key, huff_decode(&res[2..14])); - assert_eq!(0x80 | 12, res[14]); - assert_eq!(key, huff_decode(&res[15..])); - assert_eq!(27, res.len()); - - // Make sure the header can be found... - let res = encode(&mut encoder, vec![header(&key, &key)]); - - // Only check that it is found - assert_eq!(0x80, res[0] & 0x80); - } - - assert_eq!(4096, encoder.table.size()); - assert_eq!(64, encoder.table.len()); - - // Find existing headers - for i in 0..64 { - let key = format!("x-hello-world-{:02}", i); - let res = encode(&mut encoder, vec![header(&key, &key)]); - assert_eq!(0x80, res[0] & 0x80); - } - - // Insert a new header - let key = "x-hello-world-64"; - let res = encode(&mut encoder, vec![header(key, key)]); - - assert_eq!(&[0b01000000, 0x80 | 12], &res[0..2]); - assert_eq!(key, huff_decode(&res[2..14])); - assert_eq!(0x80 | 12, res[14]); - assert_eq!(key, huff_decode(&res[15..])); - assert_eq!(27, res.len()); - - assert_eq!(64, encoder.table.len()); - - // Now try encoding entries that should exist in the table - for i in 1..65 { - let key = format!("x-hello-world-{:02}", i); - let res = encode(&mut encoder, vec![header(&key, &key)]); - assert_eq!(0x80 | (61 + (65 - i)), res[0]); - } - } - - #[test] - fn test_large_headers_are_not_indexed() { - let mut encoder = Encoder::new(128, 0); - let key = "hello-world-hello-world-HELLO-zzz"; - - let res = encode(&mut encoder, vec![header(key, key)]); - - assert_eq!(&[0, 0x80 | 25], &res[..2]); - - assert_eq!(0, encoder.table.len()); - assert_eq!(0, encoder.table.size()); - } - - #[test] - fn test_sensitive_headers_are_never_indexed() { - use http::header::HeaderValue; - - let name = "my-password".parse().unwrap(); - let mut value = HeaderValue::from_bytes(b"12345").unwrap(); - value.set_sensitive(true); - - let header = Header::Field { - name: Some(name), - value, - }; - - // Now, try to encode the sensitive header - - let mut encoder = Encoder::default(); - let res = encode(&mut encoder, vec![header]); - - assert_eq!(&[0b10000, 0x80 | 8], &res[..2]); - assert_eq!("my-password", huff_decode(&res[2..10])); - assert_eq!(0x80 | 4, res[10]); - assert_eq!("12345", huff_decode(&res[11..])); - - // Now, try to encode a sensitive header w/ a name in the static table - let name = "authorization".parse().unwrap(); - let mut value = HeaderValue::from_bytes(b"12345").unwrap(); - value.set_sensitive(true); - - let header = Header::Field { - name: Some(name), - value, - }; - - let mut encoder = Encoder::default(); - let res = encode(&mut encoder, vec![header]); - - assert_eq!(&[0b11111, 8], &res[..2]); - assert_eq!(0x80 | 4, res[2]); - assert_eq!("12345", huff_decode(&res[3..])); - - // Using the name component of a previously indexed header (without - // sensitive flag set) - - let _ = encode( - &mut encoder, - vec![self::header("my-password", "not-so-secret")], - ); - - let name = "my-password".parse().unwrap(); - let mut value = HeaderValue::from_bytes(b"12345").unwrap(); - value.set_sensitive(true); - - let header = Header::Field { - name: Some(name), - value, - }; - let res = encode(&mut encoder, vec![header]); - - assert_eq!(&[0b11111, 47], &res[..2]); - assert_eq!(0x80 | 4, res[2]); - assert_eq!("12345", huff_decode(&res[3..])); - } - - #[test] - fn test_content_length_value_not_indexed() { - let mut encoder = Encoder::default(); - let res = encode(&mut encoder, vec![header("content-length", "1234")]); - - assert_eq!(&[15, 13, 0x80 | 3], &res[0..3]); - assert_eq!("1234", huff_decode(&res[3..])); - assert_eq!(6, res.len()); - } - - #[test] - fn test_encoding_headers_with_same_name() { - let mut encoder = Encoder::default(); - let name = "hello"; - - // Encode first one - let _ = encode(&mut encoder, vec![header(name, "one")]); - - // Encode second one - let res = encode(&mut encoder, vec![header(name, "two")]); - assert_eq!(&[0x40 | 62, 0x80 | 3], &res[0..2]); - assert_eq!("two", huff_decode(&res[2..])); - assert_eq!(5, res.len()); - - // Encode the first one again - let res = encode(&mut encoder, vec![header(name, "one")]); - assert_eq!(&[0x80 | 63], &res[..]); - - // Now the second one - let res = encode(&mut encoder, vec![header(name, "two")]); - assert_eq!(&[0x80 | 62], &res[..]); - } - - #[test] - fn test_evicting_headers_when_multiple_of_same_name_are_in_table() { - // The encoder only has space for 2 headers - let mut encoder = Encoder::new(76, 0); - - let _ = encode(&mut encoder, vec![header("foo", "bar")]); - assert_eq!(1, encoder.table.len()); - - let _ = encode(&mut encoder, vec![header("bar", "foo")]); - assert_eq!(2, encoder.table.len()); - - // This will evict the first header, while still referencing the header - // name - let res = encode(&mut encoder, vec![header("foo", "baz")]); - assert_eq!(&[0x40 | 63, 0, 0x80 | 3], &res[..3]); - assert_eq!(2, encoder.table.len()); - - // Try adding the same header again - let res = encode(&mut encoder, vec![header("foo", "baz")]); - assert_eq!(&[0x80 | 62], &res[..]); - assert_eq!(2, encoder.table.len()); - } - - #[test] - fn test_max_size_zero() { - // Static table only - let mut encoder = Encoder::new(0, 0); - let res = encode(&mut encoder, vec![method("GET")]); - assert_eq!(*res, [0x80 | 2]); - assert_eq!(encoder.table.len(), 0); - - let res = encode(&mut encoder, vec![header("foo", "bar")]); - assert_eq!(&[0, 0x80 | 2], &res[..2]); - assert_eq!("foo", huff_decode(&res[2..4])); - assert_eq!(0x80 | 3, res[4]); - assert_eq!("bar", huff_decode(&res[5..8])); - assert_eq!(0, encoder.table.len()); - - // Encode a custom value - let res = encode(&mut encoder, vec![header("transfer-encoding", "chunked")]); - assert_eq!(&[15, 42, 0x80 | 6], &res[..3]); - assert_eq!("chunked", huff_decode(&res[3..])); - } - - #[test] - fn test_update_max_size_combos() { - let mut encoder = Encoder::default(); - assert!(encoder.size_update.is_none()); - assert_eq!(4096, encoder.table.max_size()); - - encoder.update_max_size(4096); // Default size - assert!(encoder.size_update.is_none()); - - encoder.update_max_size(0); - assert_eq!(Some(SizeUpdate::One(0)), encoder.size_update); - - encoder.update_max_size(100); - assert_eq!(Some(SizeUpdate::Two(0, 100)), encoder.size_update); - - let mut encoder = Encoder::default(); - encoder.update_max_size(8000); - assert_eq!(Some(SizeUpdate::One(8000)), encoder.size_update); - - encoder.update_max_size(100); - assert_eq!(Some(SizeUpdate::One(100)), encoder.size_update); - - encoder.update_max_size(8000); - assert_eq!(Some(SizeUpdate::Two(100, 8000)), encoder.size_update); - - encoder.update_max_size(4000); - assert_eq!(Some(SizeUpdate::Two(100, 4000)), encoder.size_update); - - encoder.update_max_size(50); - assert_eq!(Some(SizeUpdate::One(50)), encoder.size_update); - } - - #[test] - fn test_resizing_table() { - let mut encoder = Encoder::default(); - - // Add a header - let _ = encode(&mut encoder, vec![header("foo", "bar")]); - - encoder.update_max_size(1); - assert_eq!(1, encoder.table.len()); - - let res = encode(&mut encoder, vec![method("GET")]); - assert_eq!(&[32 | 1, 0x80 | 2], &res[..]); - assert_eq!(0, encoder.table.len()); - - let res = encode(&mut encoder, vec![header("foo", "bar")]); - assert_eq!(0, res[0]); - - encoder.update_max_size(100); - let res = encode(&mut encoder, vec![header("foo", "bar")]); - assert_eq!(&[32 | 31, 69, 64], &res[..3]); - - encoder.update_max_size(0); - let res = encode(&mut encoder, vec![header("foo", "bar")]); - assert_eq!(&[32, 0], &res[..2]); - } - - #[test] - fn test_decreasing_table_size_without_eviction() { - let mut encoder = Encoder::default(); - - // Add a header - let _ = encode(&mut encoder, vec![header("foo", "bar")]); - - encoder.update_max_size(100); - assert_eq!(1, encoder.table.len()); - - let res = encode(&mut encoder, vec![header("foo", "bar")]); - assert_eq!(&[32 | 31, 69, 0x80 | 62], &res[..]); - } - - #[test] - fn test_nameless_header() { - let mut encoder = Encoder::default(); - - let res = encode( - &mut encoder, - vec![ - Header::Field { - name: Some("hello".parse().unwrap()), - value: HeaderValue::from_bytes(b"world").unwrap(), - }, - Header::Field { - name: None, - value: HeaderValue::from_bytes(b"zomg").unwrap(), - }, - ], - ); - - assert_eq!(&[0x40, 0x80 | 4], &res[0..2]); - assert_eq!("hello", huff_decode(&res[2..6])); - assert_eq!(0x80 | 4, res[6]); - assert_eq!("world", huff_decode(&res[7..11])); - - // Next is not indexed - assert_eq!(&[15, 47, 0x80 | 3], &res[11..14]); - assert_eq!("zomg", huff_decode(&res[14..])); - } - - #[test] - fn test_large_size_update() { - let mut encoder = Encoder::default(); - - encoder.update_max_size(1912930560); - assert_eq!(Some(SizeUpdate::One(1912930560)), encoder.size_update); - - let mut dst = BytesMut::with_capacity(6); - encoder.encode_size_updates(&mut dst); - assert_eq!([63, 225, 129, 148, 144, 7], &dst[..]); - } - - #[test] - #[ignore] - fn test_evicted_overflow() { - // Not sure what the best way to do this is. - } - - fn encode(e: &mut Encoder, hdrs: Vec>>) -> BytesMut { - let mut dst = BytesMut::with_capacity(1024); - e.encode(&mut hdrs.into_iter(), &mut dst); - dst - } - - fn method(s: &str) -> Header> { - Header::Method(Method::from_bytes(s.as_bytes()).unwrap()) - } - - fn header(name: &str, val: &str) -> Header> { - let name = HeaderName::from_bytes(name.as_bytes()).unwrap(); - let value = HeaderValue::from_bytes(val.as_bytes()).unwrap(); - - Header::Field { - name: Some(name), - value, - } - } - - fn huff_decode(src: &[u8]) -> BytesMut { - let mut buf = BytesMut::new(); - huffman::decode(src, &mut buf).unwrap() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/hpack/header.rs s390-tools-2.33.1/rust-vendor/h2/src/hpack/header.rs --- s390-tools-2.31.0/rust-vendor/h2/src/hpack/header.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/hpack/header.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,308 +0,0 @@ -use super::{DecoderError, NeedMore}; -use crate::ext::Protocol; - -use bytes::Bytes; -use http::header::{HeaderName, HeaderValue}; -use http::{Method, StatusCode}; -use std::fmt; - -/// HTTP/2 Header -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum Header { - Field { name: T, value: HeaderValue }, - // TODO: Change these types to `http::uri` types. - Authority(BytesStr), - Method(Method), - Scheme(BytesStr), - Path(BytesStr), - Protocol(Protocol), - Status(StatusCode), -} - -/// The header field name -#[derive(Debug, Clone, Eq, PartialEq, Hash)] -pub enum Name<'a> { - Field(&'a HeaderName), - Authority, - Method, - Scheme, - Path, - Protocol, - Status, -} - -#[doc(hidden)] -#[derive(Clone, Eq, PartialEq, Default)] -pub struct BytesStr(Bytes); - -pub fn len(name: &HeaderName, value: &HeaderValue) -> usize { - let n: &str = name.as_ref(); - 32 + n.len() + value.len() -} - -impl Header> { - pub fn reify(self) -> Result { - use self::Header::*; - - Ok(match self { - Field { - name: Some(n), - value, - } => Field { name: n, value }, - Field { name: None, value } => return Err(value), - Authority(v) => Authority(v), - Method(v) => Method(v), - Scheme(v) => Scheme(v), - Path(v) => Path(v), - Protocol(v) => Protocol(v), - Status(v) => Status(v), - }) - } -} - -impl Header { - pub fn new(name: Bytes, value: Bytes) -> Result { - if name.is_empty() { - return Err(DecoderError::NeedMore(NeedMore::UnexpectedEndOfStream)); - } - if name[0] == b':' { - match &name[1..] { - b"authority" => { - let value = BytesStr::try_from(value)?; - Ok(Header::Authority(value)) - } - b"method" => { - let method = Method::from_bytes(&value)?; - Ok(Header::Method(method)) - } - b"scheme" => { - let value = BytesStr::try_from(value)?; - Ok(Header::Scheme(value)) - } - b"path" => { - let value = BytesStr::try_from(value)?; - Ok(Header::Path(value)) - } - b"protocol" => { - let value = Protocol::try_from(value)?; - Ok(Header::Protocol(value)) - } - b"status" => { - let status = StatusCode::from_bytes(&value)?; - Ok(Header::Status(status)) - } - _ => Err(DecoderError::InvalidPseudoheader), - } - } else { - // HTTP/2 requires lower case header names - let name = HeaderName::from_lowercase(&name)?; - let value = HeaderValue::from_bytes(&value)?; - - Ok(Header::Field { name, value }) - } - } - - pub fn len(&self) -> usize { - match *self { - Header::Field { - ref name, - ref value, - } => len(name, value), - Header::Authority(ref v) => 32 + 10 + v.len(), - Header::Method(ref v) => 32 + 7 + v.as_ref().len(), - Header::Scheme(ref v) => 32 + 7 + v.len(), - Header::Path(ref v) => 32 + 5 + v.len(), - Header::Protocol(ref v) => 32 + 9 + v.as_str().len(), - Header::Status(_) => 32 + 7 + 3, - } - } - - /// Returns the header name - pub fn name(&self) -> Name { - match *self { - Header::Field { ref name, .. } => Name::Field(name), - Header::Authority(..) => Name::Authority, - Header::Method(..) => Name::Method, - Header::Scheme(..) => Name::Scheme, - Header::Path(..) => Name::Path, - Header::Protocol(..) => Name::Protocol, - Header::Status(..) => Name::Status, - } - } - - pub fn value_slice(&self) -> &[u8] { - match *self { - Header::Field { ref value, .. } => value.as_ref(), - Header::Authority(ref v) => v.as_ref(), - Header::Method(ref v) => v.as_ref().as_ref(), - Header::Scheme(ref v) => v.as_ref(), - Header::Path(ref v) => v.as_ref(), - Header::Protocol(ref v) => v.as_ref(), - Header::Status(ref v) => v.as_str().as_ref(), - } - } - - pub fn value_eq(&self, other: &Header) -> bool { - match *self { - Header::Field { ref value, .. } => { - let a = value; - match *other { - Header::Field { ref value, .. } => a == value, - _ => false, - } - } - Header::Authority(ref a) => match *other { - Header::Authority(ref b) => a == b, - _ => false, - }, - Header::Method(ref a) => match *other { - Header::Method(ref b) => a == b, - _ => false, - }, - Header::Scheme(ref a) => match *other { - Header::Scheme(ref b) => a == b, - _ => false, - }, - Header::Path(ref a) => match *other { - Header::Path(ref b) => a == b, - _ => false, - }, - Header::Protocol(ref a) => match *other { - Header::Protocol(ref b) => a == b, - _ => false, - }, - Header::Status(ref a) => match *other { - Header::Status(ref b) => a == b, - _ => false, - }, - } - } - - pub fn is_sensitive(&self) -> bool { - match *self { - Header::Field { ref value, .. } => value.is_sensitive(), - // TODO: Technically these other header values can be sensitive too. - _ => false, - } - } - - pub fn skip_value_index(&self) -> bool { - use http::header; - - match *self { - Header::Field { ref name, .. } => matches!( - *name, - header::AGE - | header::AUTHORIZATION - | header::CONTENT_LENGTH - | header::ETAG - | header::IF_MODIFIED_SINCE - | header::IF_NONE_MATCH - | header::LOCATION - | header::COOKIE - | header::SET_COOKIE - ), - Header::Path(..) => true, - _ => false, - } - } -} - -// Mostly for tests -impl From
for Header> { - fn from(src: Header) -> Self { - match src { - Header::Field { name, value } => Header::Field { - name: Some(name), - value, - }, - Header::Authority(v) => Header::Authority(v), - Header::Method(v) => Header::Method(v), - Header::Scheme(v) => Header::Scheme(v), - Header::Path(v) => Header::Path(v), - Header::Protocol(v) => Header::Protocol(v), - Header::Status(v) => Header::Status(v), - } - } -} - -impl<'a> Name<'a> { - pub fn into_entry(self, value: Bytes) -> Result { - match self { - Name::Field(name) => Ok(Header::Field { - name: name.clone(), - value: HeaderValue::from_bytes(&value)?, - }), - Name::Authority => Ok(Header::Authority(BytesStr::try_from(value)?)), - Name::Method => Ok(Header::Method(Method::from_bytes(&value)?)), - Name::Scheme => Ok(Header::Scheme(BytesStr::try_from(value)?)), - Name::Path => Ok(Header::Path(BytesStr::try_from(value)?)), - Name::Protocol => Ok(Header::Protocol(Protocol::try_from(value)?)), - Name::Status => { - match StatusCode::from_bytes(&value) { - Ok(status) => Ok(Header::Status(status)), - // TODO: better error handling - Err(_) => Err(DecoderError::InvalidStatusCode), - } - } - } - } - - pub fn as_slice(&self) -> &[u8] { - match *self { - Name::Field(ref name) => name.as_ref(), - Name::Authority => b":authority", - Name::Method => b":method", - Name::Scheme => b":scheme", - Name::Path => b":path", - Name::Protocol => b":protocol", - Name::Status => b":status", - } - } -} - -// ===== impl BytesStr ===== - -impl BytesStr { - pub(crate) const fn from_static(value: &'static str) -> Self { - BytesStr(Bytes::from_static(value.as_bytes())) - } - - pub(crate) fn from(value: &str) -> Self { - BytesStr(Bytes::copy_from_slice(value.as_bytes())) - } - - #[doc(hidden)] - pub fn try_from(bytes: Bytes) -> Result { - std::str::from_utf8(bytes.as_ref())?; - Ok(BytesStr(bytes)) - } - - pub(crate) fn as_str(&self) -> &str { - // Safety: check valid utf-8 in constructor - unsafe { std::str::from_utf8_unchecked(self.0.as_ref()) } - } - - pub(crate) fn into_inner(self) -> Bytes { - self.0 - } -} - -impl std::ops::Deref for BytesStr { - type Target = str; - fn deref(&self) -> &str { - self.as_str() - } -} - -impl AsRef<[u8]> for BytesStr { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl fmt::Debug for BytesStr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/hpack/huffman/mod.rs s390-tools-2.33.1/rust-vendor/h2/src/hpack/huffman/mod.rs --- s390-tools-2.31.0/rust-vendor/h2/src/hpack/huffman/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/hpack/huffman/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,199 +0,0 @@ -mod table; - -use self::table::{DECODE_TABLE, ENCODE_TABLE}; -use crate::hpack::DecoderError; - -use bytes::{BufMut, BytesMut}; - -// Constructed in the generated `table.rs` file -struct Decoder { - state: usize, - maybe_eos: bool, -} - -// These flags must match the ones in genhuff.rs - -const MAYBE_EOS: u8 = 1; -const DECODED: u8 = 2; -const ERROR: u8 = 4; - -pub fn decode(src: &[u8], buf: &mut BytesMut) -> Result { - let mut decoder = Decoder::new(); - - // Max compression ratio is >= 0.5 - buf.reserve(src.len() << 1); - - for b in src { - if let Some(b) = decoder.decode4(b >> 4)? { - buf.put_u8(b); - } - - if let Some(b) = decoder.decode4(b & 0xf)? { - buf.put_u8(b); - } - } - - if !decoder.is_final() { - return Err(DecoderError::InvalidHuffmanCode); - } - - Ok(buf.split()) -} - -pub fn encode(src: &[u8], dst: &mut BytesMut) { - let mut bits: u64 = 0; - let mut bits_left = 40; - - for &b in src { - let (nbits, code) = ENCODE_TABLE[b as usize]; - - bits |= code << (bits_left - nbits); - bits_left -= nbits; - - while bits_left <= 32 { - dst.put_u8((bits >> 32) as u8); - - bits <<= 8; - bits_left += 8; - } - } - - if bits_left != 40 { - // This writes the EOS token - bits |= (1 << bits_left) - 1; - dst.put_u8((bits >> 32) as u8); - } -} - -impl Decoder { - fn new() -> Decoder { - Decoder { - state: 0, - maybe_eos: false, - } - } - - // Decodes 4 bits - fn decode4(&mut self, input: u8) -> Result, DecoderError> { - // (next-state, byte, flags) - let (next, byte, flags) = DECODE_TABLE[self.state][input as usize]; - - if flags & ERROR == ERROR { - // Data followed the EOS marker - return Err(DecoderError::InvalidHuffmanCode); - } - - let mut ret = None; - - if flags & DECODED == DECODED { - ret = Some(byte); - } - - self.state = next; - self.maybe_eos = flags & MAYBE_EOS == MAYBE_EOS; - - Ok(ret) - } - - fn is_final(&self) -> bool { - self.state == 0 || self.maybe_eos - } -} - -#[cfg(test)] -mod test { - use super::*; - - fn decode(src: &[u8]) -> Result { - let mut buf = BytesMut::new(); - super::decode(src, &mut buf) - } - - #[test] - fn decode_single_byte() { - assert_eq!("o", decode(&[0b00111111]).unwrap()); - assert_eq!("0", decode(&[7]).unwrap()); - assert_eq!("A", decode(&[(0x21 << 2) + 3]).unwrap()); - } - - #[test] - fn single_char_multi_byte() { - assert_eq!("#", decode(&[255, 160 + 15]).unwrap()); - assert_eq!("$", decode(&[255, 200 + 7]).unwrap()); - assert_eq!("\x0a", decode(&[255, 255, 255, 240 + 3]).unwrap()); - } - - #[test] - fn multi_char() { - assert_eq!("!0", decode(&[254, 1]).unwrap()); - assert_eq!(" !", decode(&[0b01010011, 0b11111000]).unwrap()); - } - - #[test] - fn encode_single_byte() { - let mut dst = BytesMut::with_capacity(1); - - encode(b"o", &mut dst); - assert_eq!(&dst[..], &[0b00111111]); - - dst.clear(); - encode(b"0", &mut dst); - assert_eq!(&dst[..], &[7]); - - dst.clear(); - encode(b"A", &mut dst); - assert_eq!(&dst[..], &[(0x21 << 2) + 3]); - } - - #[test] - fn encode_decode_str() { - const DATA: &[&str] = &[ - "hello world", - ":method", - ":scheme", - ":authority", - "yahoo.co.jp", - "GET", - "http", - ":path", - "/images/top/sp2/cmn/logo-ns-130528.png", - "example.com", - "hpack-test", - "xxxxxxx1", - "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) Gecko/20100101 Firefox/16.0", - "accept", - "Accept", - "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", - "cookie", - "B=76j09a189a6h4&b=3&s=0b", - "TE", - "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi non bibendum libero. \ - Etiam ultrices lorem ut.", - ]; - - for s in DATA { - let mut dst = BytesMut::with_capacity(s.len()); - - encode(s.as_bytes(), &mut dst); - - let decoded = decode(&dst).unwrap(); - - assert_eq!(&decoded[..], s.as_bytes()); - } - } - - #[test] - fn encode_decode_u8() { - const DATA: &[&[u8]] = &[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"]; - - for s in DATA { - let mut dst = BytesMut::with_capacity(s.len()); - - encode(s, &mut dst); - - let decoded = decode(&dst).unwrap(); - - assert_eq!(&decoded[..], &s[..]); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/hpack/huffman/table.rs s390-tools-2.33.1/rust-vendor/h2/src/hpack/huffman/table.rs --- s390-tools-2.31.0/rust-vendor/h2/src/hpack/huffman/table.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/hpack/huffman/table.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,5130 +0,0 @@ -// !!! DO NOT EDIT !!! Generated by util/genhuff/src/main.rs - -// (num-bits, bits) -pub const ENCODE_TABLE: [(usize, u64); 257] = [ - (13, 0x1ff8), - (23, 0x007f_ffd8), - (28, 0x0fff_ffe2), - (28, 0x0fff_ffe3), - (28, 0x0fff_ffe4), - (28, 0x0fff_ffe5), - (28, 0x0fff_ffe6), - (28, 0x0fff_ffe7), - (28, 0x0fff_ffe8), - (24, 0x00ff_ffea), - (30, 0x3fff_fffc), - (28, 0x0fff_ffe9), - (28, 0x0fff_ffea), - (30, 0x3fff_fffd), - (28, 0x0fff_ffeb), - (28, 0x0fff_ffec), - (28, 0x0fff_ffed), - (28, 0x0fff_ffee), - (28, 0x0fff_ffef), - (28, 0x0fff_fff0), - (28, 0x0fff_fff1), - (28, 0x0fff_fff2), - (30, 0x3fff_fffe), - (28, 0x0fff_fff3), - (28, 0x0fff_fff4), - (28, 0x0fff_fff5), - (28, 0x0fff_fff6), - (28, 0x0fff_fff7), - (28, 0x0fff_fff8), - (28, 0x0fff_fff9), - (28, 0x0fff_fffa), - (28, 0x0fff_fffb), - (6, 0x14), - (10, 0x3f8), - (10, 0x3f9), - (12, 0xffa), - (13, 0x1ff9), - (6, 0x15), - (8, 0xf8), - (11, 0x7fa), - (10, 0x3fa), - (10, 0x3fb), - (8, 0xf9), - (11, 0x7fb), - (8, 0xfa), - (6, 0x16), - (6, 0x17), - (6, 0x18), - (5, 0x0), - (5, 0x1), - (5, 0x2), - (6, 0x19), - (6, 0x1a), - (6, 0x1b), - (6, 0x1c), - (6, 0x1d), - (6, 0x1e), - (6, 0x1f), - (7, 0x5c), - (8, 0xfb), - (15, 0x7ffc), - (6, 0x20), - (12, 0xffb), - (10, 0x3fc), - (13, 0x1ffa), - (6, 0x21), - (7, 0x5d), - (7, 0x5e), - (7, 0x5f), - (7, 0x60), - (7, 0x61), - (7, 0x62), - (7, 0x63), - (7, 0x64), - (7, 0x65), - (7, 0x66), - (7, 0x67), - (7, 0x68), - (7, 0x69), - (7, 0x6a), - (7, 0x6b), - (7, 0x6c), - (7, 0x6d), - (7, 0x6e), - (7, 0x6f), - (7, 0x70), - (7, 0x71), - (7, 0x72), - (8, 0xfc), - (7, 0x73), - (8, 0xfd), - (13, 0x1ffb), - (19, 0x7fff0), - (13, 0x1ffc), - (14, 0x3ffc), - (6, 0x22), - (15, 0x7ffd), - (5, 0x3), - (6, 0x23), - (5, 0x4), - (6, 0x24), - (5, 0x5), - (6, 0x25), - (6, 0x26), - (6, 0x27), - (5, 0x6), - (7, 0x74), - (7, 0x75), - (6, 0x28), - (6, 0x29), - (6, 0x2a), - (5, 0x7), - (6, 0x2b), - (7, 0x76), - (6, 0x2c), - (5, 0x8), - (5, 0x9), - (6, 0x2d), - (7, 0x77), - (7, 0x78), - (7, 0x79), - (7, 0x7a), - (7, 0x7b), - (15, 0x7ffe), - (11, 0x7fc), - (14, 0x3ffd), - (13, 0x1ffd), - (28, 0x0fff_fffc), - (20, 0xfffe6), - (22, 0x003f_ffd2), - (20, 0xfffe7), - (20, 0xfffe8), - (22, 0x003f_ffd3), - (22, 0x003f_ffd4), - (22, 0x003f_ffd5), - (23, 0x007f_ffd9), - (22, 0x003f_ffd6), - (23, 0x007f_ffda), - (23, 0x007f_ffdb), - (23, 0x007f_ffdc), - (23, 0x007f_ffdd), - (23, 0x007f_ffde), - (24, 0x00ff_ffeb), - (23, 0x007f_ffdf), - (24, 0x00ff_ffec), - (24, 0x00ff_ffed), - (22, 0x003f_ffd7), - (23, 0x007f_ffe0), - (24, 0x00ff_ffee), - (23, 0x007f_ffe1), - (23, 0x007f_ffe2), - (23, 0x007f_ffe3), - (23, 0x007f_ffe4), - (21, 0x001f_ffdc), - (22, 0x003f_ffd8), - (23, 0x007f_ffe5), - (22, 0x003f_ffd9), - (23, 0x007f_ffe6), - (23, 0x007f_ffe7), - (24, 0x00ff_ffef), - (22, 0x003f_ffda), - (21, 0x001f_ffdd), - (20, 0xfffe9), - (22, 0x003f_ffdb), - (22, 0x003f_ffdc), - (23, 0x007f_ffe8), - (23, 0x007f_ffe9), - (21, 0x001f_ffde), - (23, 0x007f_ffea), - (22, 0x003f_ffdd), - (22, 0x003f_ffde), - (24, 0x00ff_fff0), - (21, 0x001f_ffdf), - (22, 0x003f_ffdf), - (23, 0x007f_ffeb), - (23, 0x007f_ffec), - (21, 0x001f_ffe0), - (21, 0x001f_ffe1), - (22, 0x003f_ffe0), - (21, 0x001f_ffe2), - (23, 0x007f_ffed), - (22, 0x003f_ffe1), - (23, 0x007f_ffee), - (23, 0x007f_ffef), - (20, 0xfffea), - (22, 0x003f_ffe2), - (22, 0x003f_ffe3), - (22, 0x003f_ffe4), - (23, 0x007f_fff0), - (22, 0x003f_ffe5), - (22, 0x003f_ffe6), - (23, 0x007f_fff1), - (26, 0x03ff_ffe0), - (26, 0x03ff_ffe1), - (20, 0xfffeb), - (19, 0x7fff1), - (22, 0x003f_ffe7), - (23, 0x007f_fff2), - (22, 0x003f_ffe8), - (25, 0x01ff_ffec), - (26, 0x03ff_ffe2), - (26, 0x03ff_ffe3), - (26, 0x03ff_ffe4), - (27, 0x07ff_ffde), - (27, 0x07ff_ffdf), - (26, 0x03ff_ffe5), - (24, 0x00ff_fff1), - (25, 0x01ff_ffed), - (19, 0x7fff2), - (21, 0x001f_ffe3), - (26, 0x03ff_ffe6), - (27, 0x07ff_ffe0), - (27, 0x07ff_ffe1), - (26, 0x03ff_ffe7), - (27, 0x07ff_ffe2), - (24, 0x00ff_fff2), - (21, 0x001f_ffe4), - (21, 0x001f_ffe5), - (26, 0x03ff_ffe8), - (26, 0x03ff_ffe9), - (28, 0x0fff_fffd), - (27, 0x07ff_ffe3), - (27, 0x07ff_ffe4), - (27, 0x07ff_ffe5), - (20, 0xfffec), - (24, 0x00ff_fff3), - (20, 0xfffed), - (21, 0x001f_ffe6), - (22, 0x003f_ffe9), - (21, 0x001f_ffe7), - (21, 0x001f_ffe8), - (23, 0x007f_fff3), - (22, 0x003f_ffea), - (22, 0x003f_ffeb), - (25, 0x01ff_ffee), - (25, 0x01ff_ffef), - (24, 0x00ff_fff4), - (24, 0x00ff_fff5), - (26, 0x03ff_ffea), - (23, 0x007f_fff4), - (26, 0x03ff_ffeb), - (27, 0x07ff_ffe6), - (26, 0x03ff_ffec), - (26, 0x03ff_ffed), - (27, 0x07ff_ffe7), - (27, 0x07ff_ffe8), - (27, 0x07ff_ffe9), - (27, 0x07ff_ffea), - (27, 0x07ff_ffeb), - (28, 0x0fff_fffe), - (27, 0x07ff_ffec), - (27, 0x07ff_ffed), - (27, 0x07ff_ffee), - (27, 0x07ff_ffef), - (27, 0x07ff_fff0), - (26, 0x03ff_ffee), - (30, 0x3fff_ffff), -]; - -// (next-state, byte, flags) -pub const DECODE_TABLE: [[(usize, u8, u8); 16]; 256] = [ - // 0 - [ - (4, 0, 0x00), - (5, 0, 0x00), - (7, 0, 0x00), - (8, 0, 0x00), - (11, 0, 0x00), - (12, 0, 0x00), - (16, 0, 0x00), - (19, 0, 0x00), - (25, 0, 0x00), - (28, 0, 0x00), - (32, 0, 0x00), - (35, 0, 0x00), - (42, 0, 0x00), - (49, 0, 0x00), - (57, 0, 0x00), - (64, 0, 0x01), - ], - // 1 - [ - (0, 48, 0x02), - (0, 49, 0x02), - (0, 50, 0x02), - (0, 97, 0x02), - (0, 99, 0x02), - (0, 101, 0x02), - (0, 105, 0x02), - (0, 111, 0x02), - (0, 115, 0x02), - (0, 116, 0x02), - (13, 0, 0x00), - (14, 0, 0x00), - (17, 0, 0x00), - (18, 0, 0x00), - (20, 0, 0x00), - (21, 0, 0x00), - ], - // 2 - [ - (1, 48, 0x02), - (22, 48, 0x03), - (1, 49, 0x02), - (22, 49, 0x03), - (1, 50, 0x02), - (22, 50, 0x03), - (1, 97, 0x02), - (22, 97, 0x03), - (1, 99, 0x02), - (22, 99, 0x03), - (1, 101, 0x02), - (22, 101, 0x03), - (1, 105, 0x02), - (22, 105, 0x03), - (1, 111, 0x02), - (22, 111, 0x03), - ], - // 3 - [ - (2, 48, 0x02), - (9, 48, 0x02), - (23, 48, 0x02), - (40, 48, 0x03), - (2, 49, 0x02), - (9, 49, 0x02), - (23, 49, 0x02), - (40, 49, 0x03), - (2, 50, 0x02), - (9, 50, 0x02), - (23, 50, 0x02), - (40, 50, 0x03), - (2, 97, 0x02), - (9, 97, 0x02), - (23, 97, 0x02), - (40, 97, 0x03), - ], - // 4 - [ - (3, 48, 0x02), - (6, 48, 0x02), - (10, 48, 0x02), - (15, 48, 0x02), - (24, 48, 0x02), - (31, 48, 0x02), - (41, 48, 0x02), - (56, 48, 0x03), - (3, 49, 0x02), - (6, 49, 0x02), - (10, 49, 0x02), - (15, 49, 0x02), - (24, 49, 0x02), - (31, 49, 0x02), - (41, 49, 0x02), - (56, 49, 0x03), - ], - // 5 - [ - (3, 50, 0x02), - (6, 50, 0x02), - (10, 50, 0x02), - (15, 50, 0x02), - (24, 50, 0x02), - (31, 50, 0x02), - (41, 50, 0x02), - (56, 50, 0x03), - (3, 97, 0x02), - (6, 97, 0x02), - (10, 97, 0x02), - (15, 97, 0x02), - (24, 97, 0x02), - (31, 97, 0x02), - (41, 97, 0x02), - (56, 97, 0x03), - ], - // 6 - [ - (2, 99, 0x02), - (9, 99, 0x02), - (23, 99, 0x02), - (40, 99, 0x03), - (2, 101, 0x02), - (9, 101, 0x02), - (23, 101, 0x02), - (40, 101, 0x03), - (2, 105, 0x02), - (9, 105, 0x02), - (23, 105, 0x02), - (40, 105, 0x03), - (2, 111, 0x02), - (9, 111, 0x02), - (23, 111, 0x02), - (40, 111, 0x03), - ], - // 7 - [ - (3, 99, 0x02), - (6, 99, 0x02), - (10, 99, 0x02), - (15, 99, 0x02), - (24, 99, 0x02), - (31, 99, 0x02), - (41, 99, 0x02), - (56, 99, 0x03), - (3, 101, 0x02), - (6, 101, 0x02), - (10, 101, 0x02), - (15, 101, 0x02), - (24, 101, 0x02), - (31, 101, 0x02), - (41, 101, 0x02), - (56, 101, 0x03), - ], - // 8 - [ - (3, 105, 0x02), - (6, 105, 0x02), - (10, 105, 0x02), - (15, 105, 0x02), - (24, 105, 0x02), - (31, 105, 0x02), - (41, 105, 0x02), - (56, 105, 0x03), - (3, 111, 0x02), - (6, 111, 0x02), - (10, 111, 0x02), - (15, 111, 0x02), - (24, 111, 0x02), - (31, 111, 0x02), - (41, 111, 0x02), - (56, 111, 0x03), - ], - // 9 - [ - (1, 115, 0x02), - (22, 115, 0x03), - (1, 116, 0x02), - (22, 116, 0x03), - (0, 32, 0x02), - (0, 37, 0x02), - (0, 45, 0x02), - (0, 46, 0x02), - (0, 47, 0x02), - (0, 51, 0x02), - (0, 52, 0x02), - (0, 53, 0x02), - (0, 54, 0x02), - (0, 55, 0x02), - (0, 56, 0x02), - (0, 57, 0x02), - ], - // 10 - [ - (2, 115, 0x02), - (9, 115, 0x02), - (23, 115, 0x02), - (40, 115, 0x03), - (2, 116, 0x02), - (9, 116, 0x02), - (23, 116, 0x02), - (40, 116, 0x03), - (1, 32, 0x02), - (22, 32, 0x03), - (1, 37, 0x02), - (22, 37, 0x03), - (1, 45, 0x02), - (22, 45, 0x03), - (1, 46, 0x02), - (22, 46, 0x03), - ], - // 11 - [ - (3, 115, 0x02), - (6, 115, 0x02), - (10, 115, 0x02), - (15, 115, 0x02), - (24, 115, 0x02), - (31, 115, 0x02), - (41, 115, 0x02), - (56, 115, 0x03), - (3, 116, 0x02), - (6, 116, 0x02), - (10, 116, 0x02), - (15, 116, 0x02), - (24, 116, 0x02), - (31, 116, 0x02), - (41, 116, 0x02), - (56, 116, 0x03), - ], - // 12 - [ - (2, 32, 0x02), - (9, 32, 0x02), - (23, 32, 0x02), - (40, 32, 0x03), - (2, 37, 0x02), - (9, 37, 0x02), - (23, 37, 0x02), - (40, 37, 0x03), - (2, 45, 0x02), - (9, 45, 0x02), - (23, 45, 0x02), - (40, 45, 0x03), - (2, 46, 0x02), - (9, 46, 0x02), - (23, 46, 0x02), - (40, 46, 0x03), - ], - // 13 - [ - (3, 32, 0x02), - (6, 32, 0x02), - (10, 32, 0x02), - (15, 32, 0x02), - (24, 32, 0x02), - (31, 32, 0x02), - (41, 32, 0x02), - (56, 32, 0x03), - (3, 37, 0x02), - (6, 37, 0x02), - (10, 37, 0x02), - (15, 37, 0x02), - (24, 37, 0x02), - (31, 37, 0x02), - (41, 37, 0x02), - (56, 37, 0x03), - ], - // 14 - [ - (3, 45, 0x02), - (6, 45, 0x02), - (10, 45, 0x02), - (15, 45, 0x02), - (24, 45, 0x02), - (31, 45, 0x02), - (41, 45, 0x02), - (56, 45, 0x03), - (3, 46, 0x02), - (6, 46, 0x02), - (10, 46, 0x02), - (15, 46, 0x02), - (24, 46, 0x02), - (31, 46, 0x02), - (41, 46, 0x02), - (56, 46, 0x03), - ], - // 15 - [ - (1, 47, 0x02), - (22, 47, 0x03), - (1, 51, 0x02), - (22, 51, 0x03), - (1, 52, 0x02), - (22, 52, 0x03), - (1, 53, 0x02), - (22, 53, 0x03), - (1, 54, 0x02), - (22, 54, 0x03), - (1, 55, 0x02), - (22, 55, 0x03), - (1, 56, 0x02), - (22, 56, 0x03), - (1, 57, 0x02), - (22, 57, 0x03), - ], - // 16 - [ - (2, 47, 0x02), - (9, 47, 0x02), - (23, 47, 0x02), - (40, 47, 0x03), - (2, 51, 0x02), - (9, 51, 0x02), - (23, 51, 0x02), - (40, 51, 0x03), - (2, 52, 0x02), - (9, 52, 0x02), - (23, 52, 0x02), - (40, 52, 0x03), - (2, 53, 0x02), - (9, 53, 0x02), - (23, 53, 0x02), - (40, 53, 0x03), - ], - // 17 - [ - (3, 47, 0x02), - (6, 47, 0x02), - (10, 47, 0x02), - (15, 47, 0x02), - (24, 47, 0x02), - (31, 47, 0x02), - (41, 47, 0x02), - (56, 47, 0x03), - (3, 51, 0x02), - (6, 51, 0x02), - (10, 51, 0x02), - (15, 51, 0x02), - (24, 51, 0x02), - (31, 51, 0x02), - (41, 51, 0x02), - (56, 51, 0x03), - ], - // 18 - [ - (3, 52, 0x02), - (6, 52, 0x02), - (10, 52, 0x02), - (15, 52, 0x02), - (24, 52, 0x02), - (31, 52, 0x02), - (41, 52, 0x02), - (56, 52, 0x03), - (3, 53, 0x02), - (6, 53, 0x02), - (10, 53, 0x02), - (15, 53, 0x02), - (24, 53, 0x02), - (31, 53, 0x02), - (41, 53, 0x02), - (56, 53, 0x03), - ], - // 19 - [ - (2, 54, 0x02), - (9, 54, 0x02), - (23, 54, 0x02), - (40, 54, 0x03), - (2, 55, 0x02), - (9, 55, 0x02), - (23, 55, 0x02), - (40, 55, 0x03), - (2, 56, 0x02), - (9, 56, 0x02), - (23, 56, 0x02), - (40, 56, 0x03), - (2, 57, 0x02), - (9, 57, 0x02), - (23, 57, 0x02), - (40, 57, 0x03), - ], - // 20 - [ - (3, 54, 0x02), - (6, 54, 0x02), - (10, 54, 0x02), - (15, 54, 0x02), - (24, 54, 0x02), - (31, 54, 0x02), - (41, 54, 0x02), - (56, 54, 0x03), - (3, 55, 0x02), - (6, 55, 0x02), - (10, 55, 0x02), - (15, 55, 0x02), - (24, 55, 0x02), - (31, 55, 0x02), - (41, 55, 0x02), - (56, 55, 0x03), - ], - // 21 - [ - (3, 56, 0x02), - (6, 56, 0x02), - (10, 56, 0x02), - (15, 56, 0x02), - (24, 56, 0x02), - (31, 56, 0x02), - (41, 56, 0x02), - (56, 56, 0x03), - (3, 57, 0x02), - (6, 57, 0x02), - (10, 57, 0x02), - (15, 57, 0x02), - (24, 57, 0x02), - (31, 57, 0x02), - (41, 57, 0x02), - (56, 57, 0x03), - ], - // 22 - [ - (26, 0, 0x00), - (27, 0, 0x00), - (29, 0, 0x00), - (30, 0, 0x00), - (33, 0, 0x00), - (34, 0, 0x00), - (36, 0, 0x00), - (37, 0, 0x00), - (43, 0, 0x00), - (46, 0, 0x00), - (50, 0, 0x00), - (53, 0, 0x00), - (58, 0, 0x00), - (61, 0, 0x00), - (65, 0, 0x00), - (68, 0, 0x01), - ], - // 23 - [ - (0, 61, 0x02), - (0, 65, 0x02), - (0, 95, 0x02), - (0, 98, 0x02), - (0, 100, 0x02), - (0, 102, 0x02), - (0, 103, 0x02), - (0, 104, 0x02), - (0, 108, 0x02), - (0, 109, 0x02), - (0, 110, 0x02), - (0, 112, 0x02), - (0, 114, 0x02), - (0, 117, 0x02), - (38, 0, 0x00), - (39, 0, 0x00), - ], - // 24 - [ - (1, 61, 0x02), - (22, 61, 0x03), - (1, 65, 0x02), - (22, 65, 0x03), - (1, 95, 0x02), - (22, 95, 0x03), - (1, 98, 0x02), - (22, 98, 0x03), - (1, 100, 0x02), - (22, 100, 0x03), - (1, 102, 0x02), - (22, 102, 0x03), - (1, 103, 0x02), - (22, 103, 0x03), - (1, 104, 0x02), - (22, 104, 0x03), - ], - // 25 - [ - (2, 61, 0x02), - (9, 61, 0x02), - (23, 61, 0x02), - (40, 61, 0x03), - (2, 65, 0x02), - (9, 65, 0x02), - (23, 65, 0x02), - (40, 65, 0x03), - (2, 95, 0x02), - (9, 95, 0x02), - (23, 95, 0x02), - (40, 95, 0x03), - (2, 98, 0x02), - (9, 98, 0x02), - (23, 98, 0x02), - (40, 98, 0x03), - ], - // 26 - [ - (3, 61, 0x02), - (6, 61, 0x02), - (10, 61, 0x02), - (15, 61, 0x02), - (24, 61, 0x02), - (31, 61, 0x02), - (41, 61, 0x02), - (56, 61, 0x03), - (3, 65, 0x02), - (6, 65, 0x02), - (10, 65, 0x02), - (15, 65, 0x02), - (24, 65, 0x02), - (31, 65, 0x02), - (41, 65, 0x02), - (56, 65, 0x03), - ], - // 27 - [ - (3, 95, 0x02), - (6, 95, 0x02), - (10, 95, 0x02), - (15, 95, 0x02), - (24, 95, 0x02), - (31, 95, 0x02), - (41, 95, 0x02), - (56, 95, 0x03), - (3, 98, 0x02), - (6, 98, 0x02), - (10, 98, 0x02), - (15, 98, 0x02), - (24, 98, 0x02), - (31, 98, 0x02), - (41, 98, 0x02), - (56, 98, 0x03), - ], - // 28 - [ - (2, 100, 0x02), - (9, 100, 0x02), - (23, 100, 0x02), - (40, 100, 0x03), - (2, 102, 0x02), - (9, 102, 0x02), - (23, 102, 0x02), - (40, 102, 0x03), - (2, 103, 0x02), - (9, 103, 0x02), - (23, 103, 0x02), - (40, 103, 0x03), - (2, 104, 0x02), - (9, 104, 0x02), - (23, 104, 0x02), - (40, 104, 0x03), - ], - // 29 - [ - (3, 100, 0x02), - (6, 100, 0x02), - (10, 100, 0x02), - (15, 100, 0x02), - (24, 100, 0x02), - (31, 100, 0x02), - (41, 100, 0x02), - (56, 100, 0x03), - (3, 102, 0x02), - (6, 102, 0x02), - (10, 102, 0x02), - (15, 102, 0x02), - (24, 102, 0x02), - (31, 102, 0x02), - (41, 102, 0x02), - (56, 102, 0x03), - ], - // 30 - [ - (3, 103, 0x02), - (6, 103, 0x02), - (10, 103, 0x02), - (15, 103, 0x02), - (24, 103, 0x02), - (31, 103, 0x02), - (41, 103, 0x02), - (56, 103, 0x03), - (3, 104, 0x02), - (6, 104, 0x02), - (10, 104, 0x02), - (15, 104, 0x02), - (24, 104, 0x02), - (31, 104, 0x02), - (41, 104, 0x02), - (56, 104, 0x03), - ], - // 31 - [ - (1, 108, 0x02), - (22, 108, 0x03), - (1, 109, 0x02), - (22, 109, 0x03), - (1, 110, 0x02), - (22, 110, 0x03), - (1, 112, 0x02), - (22, 112, 0x03), - (1, 114, 0x02), - (22, 114, 0x03), - (1, 117, 0x02), - (22, 117, 0x03), - (0, 58, 0x02), - (0, 66, 0x02), - (0, 67, 0x02), - (0, 68, 0x02), - ], - // 32 - [ - (2, 108, 0x02), - (9, 108, 0x02), - (23, 108, 0x02), - (40, 108, 0x03), - (2, 109, 0x02), - (9, 109, 0x02), - (23, 109, 0x02), - (40, 109, 0x03), - (2, 110, 0x02), - (9, 110, 0x02), - (23, 110, 0x02), - (40, 110, 0x03), - (2, 112, 0x02), - (9, 112, 0x02), - (23, 112, 0x02), - (40, 112, 0x03), - ], - // 33 - [ - (3, 108, 0x02), - (6, 108, 0x02), - (10, 108, 0x02), - (15, 108, 0x02), - (24, 108, 0x02), - (31, 108, 0x02), - (41, 108, 0x02), - (56, 108, 0x03), - (3, 109, 0x02), - (6, 109, 0x02), - (10, 109, 0x02), - (15, 109, 0x02), - (24, 109, 0x02), - (31, 109, 0x02), - (41, 109, 0x02), - (56, 109, 0x03), - ], - // 34 - [ - (3, 110, 0x02), - (6, 110, 0x02), - (10, 110, 0x02), - (15, 110, 0x02), - (24, 110, 0x02), - (31, 110, 0x02), - (41, 110, 0x02), - (56, 110, 0x03), - (3, 112, 0x02), - (6, 112, 0x02), - (10, 112, 0x02), - (15, 112, 0x02), - (24, 112, 0x02), - (31, 112, 0x02), - (41, 112, 0x02), - (56, 112, 0x03), - ], - // 35 - [ - (2, 114, 0x02), - (9, 114, 0x02), - (23, 114, 0x02), - (40, 114, 0x03), - (2, 117, 0x02), - (9, 117, 0x02), - (23, 117, 0x02), - (40, 117, 0x03), - (1, 58, 0x02), - (22, 58, 0x03), - (1, 66, 0x02), - (22, 66, 0x03), - (1, 67, 0x02), - (22, 67, 0x03), - (1, 68, 0x02), - (22, 68, 0x03), - ], - // 36 - [ - (3, 114, 0x02), - (6, 114, 0x02), - (10, 114, 0x02), - (15, 114, 0x02), - (24, 114, 0x02), - (31, 114, 0x02), - (41, 114, 0x02), - (56, 114, 0x03), - (3, 117, 0x02), - (6, 117, 0x02), - (10, 117, 0x02), - (15, 117, 0x02), - (24, 117, 0x02), - (31, 117, 0x02), - (41, 117, 0x02), - (56, 117, 0x03), - ], - // 37 - [ - (2, 58, 0x02), - (9, 58, 0x02), - (23, 58, 0x02), - (40, 58, 0x03), - (2, 66, 0x02), - (9, 66, 0x02), - (23, 66, 0x02), - (40, 66, 0x03), - (2, 67, 0x02), - (9, 67, 0x02), - (23, 67, 0x02), - (40, 67, 0x03), - (2, 68, 0x02), - (9, 68, 0x02), - (23, 68, 0x02), - (40, 68, 0x03), - ], - // 38 - [ - (3, 58, 0x02), - (6, 58, 0x02), - (10, 58, 0x02), - (15, 58, 0x02), - (24, 58, 0x02), - (31, 58, 0x02), - (41, 58, 0x02), - (56, 58, 0x03), - (3, 66, 0x02), - (6, 66, 0x02), - (10, 66, 0x02), - (15, 66, 0x02), - (24, 66, 0x02), - (31, 66, 0x02), - (41, 66, 0x02), - (56, 66, 0x03), - ], - // 39 - [ - (3, 67, 0x02), - (6, 67, 0x02), - (10, 67, 0x02), - (15, 67, 0x02), - (24, 67, 0x02), - (31, 67, 0x02), - (41, 67, 0x02), - (56, 67, 0x03), - (3, 68, 0x02), - (6, 68, 0x02), - (10, 68, 0x02), - (15, 68, 0x02), - (24, 68, 0x02), - (31, 68, 0x02), - (41, 68, 0x02), - (56, 68, 0x03), - ], - // 40 - [ - (44, 0, 0x00), - (45, 0, 0x00), - (47, 0, 0x00), - (48, 0, 0x00), - (51, 0, 0x00), - (52, 0, 0x00), - (54, 0, 0x00), - (55, 0, 0x00), - (59, 0, 0x00), - (60, 0, 0x00), - (62, 0, 0x00), - (63, 0, 0x00), - (66, 0, 0x00), - (67, 0, 0x00), - (69, 0, 0x00), - (72, 0, 0x01), - ], - // 41 - [ - (0, 69, 0x02), - (0, 70, 0x02), - (0, 71, 0x02), - (0, 72, 0x02), - (0, 73, 0x02), - (0, 74, 0x02), - (0, 75, 0x02), - (0, 76, 0x02), - (0, 77, 0x02), - (0, 78, 0x02), - (0, 79, 0x02), - (0, 80, 0x02), - (0, 81, 0x02), - (0, 82, 0x02), - (0, 83, 0x02), - (0, 84, 0x02), - ], - // 42 - [ - (1, 69, 0x02), - (22, 69, 0x03), - (1, 70, 0x02), - (22, 70, 0x03), - (1, 71, 0x02), - (22, 71, 0x03), - (1, 72, 0x02), - (22, 72, 0x03), - (1, 73, 0x02), - (22, 73, 0x03), - (1, 74, 0x02), - (22, 74, 0x03), - (1, 75, 0x02), - (22, 75, 0x03), - (1, 76, 0x02), - (22, 76, 0x03), - ], - // 43 - [ - (2, 69, 0x02), - (9, 69, 0x02), - (23, 69, 0x02), - (40, 69, 0x03), - (2, 70, 0x02), - (9, 70, 0x02), - (23, 70, 0x02), - (40, 70, 0x03), - (2, 71, 0x02), - (9, 71, 0x02), - (23, 71, 0x02), - (40, 71, 0x03), - (2, 72, 0x02), - (9, 72, 0x02), - (23, 72, 0x02), - (40, 72, 0x03), - ], - // 44 - [ - (3, 69, 0x02), - (6, 69, 0x02), - (10, 69, 0x02), - (15, 69, 0x02), - (24, 69, 0x02), - (31, 69, 0x02), - (41, 69, 0x02), - (56, 69, 0x03), - (3, 70, 0x02), - (6, 70, 0x02), - (10, 70, 0x02), - (15, 70, 0x02), - (24, 70, 0x02), - (31, 70, 0x02), - (41, 70, 0x02), - (56, 70, 0x03), - ], - // 45 - [ - (3, 71, 0x02), - (6, 71, 0x02), - (10, 71, 0x02), - (15, 71, 0x02), - (24, 71, 0x02), - (31, 71, 0x02), - (41, 71, 0x02), - (56, 71, 0x03), - (3, 72, 0x02), - (6, 72, 0x02), - (10, 72, 0x02), - (15, 72, 0x02), - (24, 72, 0x02), - (31, 72, 0x02), - (41, 72, 0x02), - (56, 72, 0x03), - ], - // 46 - [ - (2, 73, 0x02), - (9, 73, 0x02), - (23, 73, 0x02), - (40, 73, 0x03), - (2, 74, 0x02), - (9, 74, 0x02), - (23, 74, 0x02), - (40, 74, 0x03), - (2, 75, 0x02), - (9, 75, 0x02), - (23, 75, 0x02), - (40, 75, 0x03), - (2, 76, 0x02), - (9, 76, 0x02), - (23, 76, 0x02), - (40, 76, 0x03), - ], - // 47 - [ - (3, 73, 0x02), - (6, 73, 0x02), - (10, 73, 0x02), - (15, 73, 0x02), - (24, 73, 0x02), - (31, 73, 0x02), - (41, 73, 0x02), - (56, 73, 0x03), - (3, 74, 0x02), - (6, 74, 0x02), - (10, 74, 0x02), - (15, 74, 0x02), - (24, 74, 0x02), - (31, 74, 0x02), - (41, 74, 0x02), - (56, 74, 0x03), - ], - // 48 - [ - (3, 75, 0x02), - (6, 75, 0x02), - (10, 75, 0x02), - (15, 75, 0x02), - (24, 75, 0x02), - (31, 75, 0x02), - (41, 75, 0x02), - (56, 75, 0x03), - (3, 76, 0x02), - (6, 76, 0x02), - (10, 76, 0x02), - (15, 76, 0x02), - (24, 76, 0x02), - (31, 76, 0x02), - (41, 76, 0x02), - (56, 76, 0x03), - ], - // 49 - [ - (1, 77, 0x02), - (22, 77, 0x03), - (1, 78, 0x02), - (22, 78, 0x03), - (1, 79, 0x02), - (22, 79, 0x03), - (1, 80, 0x02), - (22, 80, 0x03), - (1, 81, 0x02), - (22, 81, 0x03), - (1, 82, 0x02), - (22, 82, 0x03), - (1, 83, 0x02), - (22, 83, 0x03), - (1, 84, 0x02), - (22, 84, 0x03), - ], - // 50 - [ - (2, 77, 0x02), - (9, 77, 0x02), - (23, 77, 0x02), - (40, 77, 0x03), - (2, 78, 0x02), - (9, 78, 0x02), - (23, 78, 0x02), - (40, 78, 0x03), - (2, 79, 0x02), - (9, 79, 0x02), - (23, 79, 0x02), - (40, 79, 0x03), - (2, 80, 0x02), - (9, 80, 0x02), - (23, 80, 0x02), - (40, 80, 0x03), - ], - // 51 - [ - (3, 77, 0x02), - (6, 77, 0x02), - (10, 77, 0x02), - (15, 77, 0x02), - (24, 77, 0x02), - (31, 77, 0x02), - (41, 77, 0x02), - (56, 77, 0x03), - (3, 78, 0x02), - (6, 78, 0x02), - (10, 78, 0x02), - (15, 78, 0x02), - (24, 78, 0x02), - (31, 78, 0x02), - (41, 78, 0x02), - (56, 78, 0x03), - ], - // 52 - [ - (3, 79, 0x02), - (6, 79, 0x02), - (10, 79, 0x02), - (15, 79, 0x02), - (24, 79, 0x02), - (31, 79, 0x02), - (41, 79, 0x02), - (56, 79, 0x03), - (3, 80, 0x02), - (6, 80, 0x02), - (10, 80, 0x02), - (15, 80, 0x02), - (24, 80, 0x02), - (31, 80, 0x02), - (41, 80, 0x02), - (56, 80, 0x03), - ], - // 53 - [ - (2, 81, 0x02), - (9, 81, 0x02), - (23, 81, 0x02), - (40, 81, 0x03), - (2, 82, 0x02), - (9, 82, 0x02), - (23, 82, 0x02), - (40, 82, 0x03), - (2, 83, 0x02), - (9, 83, 0x02), - (23, 83, 0x02), - (40, 83, 0x03), - (2, 84, 0x02), - (9, 84, 0x02), - (23, 84, 0x02), - (40, 84, 0x03), - ], - // 54 - [ - (3, 81, 0x02), - (6, 81, 0x02), - (10, 81, 0x02), - (15, 81, 0x02), - (24, 81, 0x02), - (31, 81, 0x02), - (41, 81, 0x02), - (56, 81, 0x03), - (3, 82, 0x02), - (6, 82, 0x02), - (10, 82, 0x02), - (15, 82, 0x02), - (24, 82, 0x02), - (31, 82, 0x02), - (41, 82, 0x02), - (56, 82, 0x03), - ], - // 55 - [ - (3, 83, 0x02), - (6, 83, 0x02), - (10, 83, 0x02), - (15, 83, 0x02), - (24, 83, 0x02), - (31, 83, 0x02), - (41, 83, 0x02), - (56, 83, 0x03), - (3, 84, 0x02), - (6, 84, 0x02), - (10, 84, 0x02), - (15, 84, 0x02), - (24, 84, 0x02), - (31, 84, 0x02), - (41, 84, 0x02), - (56, 84, 0x03), - ], - // 56 - [ - (0, 85, 0x02), - (0, 86, 0x02), - (0, 87, 0x02), - (0, 89, 0x02), - (0, 106, 0x02), - (0, 107, 0x02), - (0, 113, 0x02), - (0, 118, 0x02), - (0, 119, 0x02), - (0, 120, 0x02), - (0, 121, 0x02), - (0, 122, 0x02), - (70, 0, 0x00), - (71, 0, 0x00), - (73, 0, 0x00), - (74, 0, 0x01), - ], - // 57 - [ - (1, 85, 0x02), - (22, 85, 0x03), - (1, 86, 0x02), - (22, 86, 0x03), - (1, 87, 0x02), - (22, 87, 0x03), - (1, 89, 0x02), - (22, 89, 0x03), - (1, 106, 0x02), - (22, 106, 0x03), - (1, 107, 0x02), - (22, 107, 0x03), - (1, 113, 0x02), - (22, 113, 0x03), - (1, 118, 0x02), - (22, 118, 0x03), - ], - // 58 - [ - (2, 85, 0x02), - (9, 85, 0x02), - (23, 85, 0x02), - (40, 85, 0x03), - (2, 86, 0x02), - (9, 86, 0x02), - (23, 86, 0x02), - (40, 86, 0x03), - (2, 87, 0x02), - (9, 87, 0x02), - (23, 87, 0x02), - (40, 87, 0x03), - (2, 89, 0x02), - (9, 89, 0x02), - (23, 89, 0x02), - (40, 89, 0x03), - ], - // 59 - [ - (3, 85, 0x02), - (6, 85, 0x02), - (10, 85, 0x02), - (15, 85, 0x02), - (24, 85, 0x02), - (31, 85, 0x02), - (41, 85, 0x02), - (56, 85, 0x03), - (3, 86, 0x02), - (6, 86, 0x02), - (10, 86, 0x02), - (15, 86, 0x02), - (24, 86, 0x02), - (31, 86, 0x02), - (41, 86, 0x02), - (56, 86, 0x03), - ], - // 60 - [ - (3, 87, 0x02), - (6, 87, 0x02), - (10, 87, 0x02), - (15, 87, 0x02), - (24, 87, 0x02), - (31, 87, 0x02), - (41, 87, 0x02), - (56, 87, 0x03), - (3, 89, 0x02), - (6, 89, 0x02), - (10, 89, 0x02), - (15, 89, 0x02), - (24, 89, 0x02), - (31, 89, 0x02), - (41, 89, 0x02), - (56, 89, 0x03), - ], - // 61 - [ - (2, 106, 0x02), - (9, 106, 0x02), - (23, 106, 0x02), - (40, 106, 0x03), - (2, 107, 0x02), - (9, 107, 0x02), - (23, 107, 0x02), - (40, 107, 0x03), - (2, 113, 0x02), - (9, 113, 0x02), - (23, 113, 0x02), - (40, 113, 0x03), - (2, 118, 0x02), - (9, 118, 0x02), - (23, 118, 0x02), - (40, 118, 0x03), - ], - // 62 - [ - (3, 106, 0x02), - (6, 106, 0x02), - (10, 106, 0x02), - (15, 106, 0x02), - (24, 106, 0x02), - (31, 106, 0x02), - (41, 106, 0x02), - (56, 106, 0x03), - (3, 107, 0x02), - (6, 107, 0x02), - (10, 107, 0x02), - (15, 107, 0x02), - (24, 107, 0x02), - (31, 107, 0x02), - (41, 107, 0x02), - (56, 107, 0x03), - ], - // 63 - [ - (3, 113, 0x02), - (6, 113, 0x02), - (10, 113, 0x02), - (15, 113, 0x02), - (24, 113, 0x02), - (31, 113, 0x02), - (41, 113, 0x02), - (56, 113, 0x03), - (3, 118, 0x02), - (6, 118, 0x02), - (10, 118, 0x02), - (15, 118, 0x02), - (24, 118, 0x02), - (31, 118, 0x02), - (41, 118, 0x02), - (56, 118, 0x03), - ], - // 64 - [ - (1, 119, 0x02), - (22, 119, 0x03), - (1, 120, 0x02), - (22, 120, 0x03), - (1, 121, 0x02), - (22, 121, 0x03), - (1, 122, 0x02), - (22, 122, 0x03), - (0, 38, 0x02), - (0, 42, 0x02), - (0, 44, 0x02), - (0, 59, 0x02), - (0, 88, 0x02), - (0, 90, 0x02), - (75, 0, 0x00), - (78, 0, 0x00), - ], - // 65 - [ - (2, 119, 0x02), - (9, 119, 0x02), - (23, 119, 0x02), - (40, 119, 0x03), - (2, 120, 0x02), - (9, 120, 0x02), - (23, 120, 0x02), - (40, 120, 0x03), - (2, 121, 0x02), - (9, 121, 0x02), - (23, 121, 0x02), - (40, 121, 0x03), - (2, 122, 0x02), - (9, 122, 0x02), - (23, 122, 0x02), - (40, 122, 0x03), - ], - // 66 - [ - (3, 119, 0x02), - (6, 119, 0x02), - (10, 119, 0x02), - (15, 119, 0x02), - (24, 119, 0x02), - (31, 119, 0x02), - (41, 119, 0x02), - (56, 119, 0x03), - (3, 120, 0x02), - (6, 120, 0x02), - (10, 120, 0x02), - (15, 120, 0x02), - (24, 120, 0x02), - (31, 120, 0x02), - (41, 120, 0x02), - (56, 120, 0x03), - ], - // 67 - [ - (3, 121, 0x02), - (6, 121, 0x02), - (10, 121, 0x02), - (15, 121, 0x02), - (24, 121, 0x02), - (31, 121, 0x02), - (41, 121, 0x02), - (56, 121, 0x03), - (3, 122, 0x02), - (6, 122, 0x02), - (10, 122, 0x02), - (15, 122, 0x02), - (24, 122, 0x02), - (31, 122, 0x02), - (41, 122, 0x02), - (56, 122, 0x03), - ], - // 68 - [ - (1, 38, 0x02), - (22, 38, 0x03), - (1, 42, 0x02), - (22, 42, 0x03), - (1, 44, 0x02), - (22, 44, 0x03), - (1, 59, 0x02), - (22, 59, 0x03), - (1, 88, 0x02), - (22, 88, 0x03), - (1, 90, 0x02), - (22, 90, 0x03), - (76, 0, 0x00), - (77, 0, 0x00), - (79, 0, 0x00), - (81, 0, 0x00), - ], - // 69 - [ - (2, 38, 0x02), - (9, 38, 0x02), - (23, 38, 0x02), - (40, 38, 0x03), - (2, 42, 0x02), - (9, 42, 0x02), - (23, 42, 0x02), - (40, 42, 0x03), - (2, 44, 0x02), - (9, 44, 0x02), - (23, 44, 0x02), - (40, 44, 0x03), - (2, 59, 0x02), - (9, 59, 0x02), - (23, 59, 0x02), - (40, 59, 0x03), - ], - // 70 - [ - (3, 38, 0x02), - (6, 38, 0x02), - (10, 38, 0x02), - (15, 38, 0x02), - (24, 38, 0x02), - (31, 38, 0x02), - (41, 38, 0x02), - (56, 38, 0x03), - (3, 42, 0x02), - (6, 42, 0x02), - (10, 42, 0x02), - (15, 42, 0x02), - (24, 42, 0x02), - (31, 42, 0x02), - (41, 42, 0x02), - (56, 42, 0x03), - ], - // 71 - [ - (3, 44, 0x02), - (6, 44, 0x02), - (10, 44, 0x02), - (15, 44, 0x02), - (24, 44, 0x02), - (31, 44, 0x02), - (41, 44, 0x02), - (56, 44, 0x03), - (3, 59, 0x02), - (6, 59, 0x02), - (10, 59, 0x02), - (15, 59, 0x02), - (24, 59, 0x02), - (31, 59, 0x02), - (41, 59, 0x02), - (56, 59, 0x03), - ], - // 72 - [ - (2, 88, 0x02), - (9, 88, 0x02), - (23, 88, 0x02), - (40, 88, 0x03), - (2, 90, 0x02), - (9, 90, 0x02), - (23, 90, 0x02), - (40, 90, 0x03), - (0, 33, 0x02), - (0, 34, 0x02), - (0, 40, 0x02), - (0, 41, 0x02), - (0, 63, 0x02), - (80, 0, 0x00), - (82, 0, 0x00), - (84, 0, 0x00), - ], - // 73 - [ - (3, 88, 0x02), - (6, 88, 0x02), - (10, 88, 0x02), - (15, 88, 0x02), - (24, 88, 0x02), - (31, 88, 0x02), - (41, 88, 0x02), - (56, 88, 0x03), - (3, 90, 0x02), - (6, 90, 0x02), - (10, 90, 0x02), - (15, 90, 0x02), - (24, 90, 0x02), - (31, 90, 0x02), - (41, 90, 0x02), - (56, 90, 0x03), - ], - // 74 - [ - (1, 33, 0x02), - (22, 33, 0x03), - (1, 34, 0x02), - (22, 34, 0x03), - (1, 40, 0x02), - (22, 40, 0x03), - (1, 41, 0x02), - (22, 41, 0x03), - (1, 63, 0x02), - (22, 63, 0x03), - (0, 39, 0x02), - (0, 43, 0x02), - (0, 124, 0x02), - (83, 0, 0x00), - (85, 0, 0x00), - (88, 0, 0x00), - ], - // 75 - [ - (2, 33, 0x02), - (9, 33, 0x02), - (23, 33, 0x02), - (40, 33, 0x03), - (2, 34, 0x02), - (9, 34, 0x02), - (23, 34, 0x02), - (40, 34, 0x03), - (2, 40, 0x02), - (9, 40, 0x02), - (23, 40, 0x02), - (40, 40, 0x03), - (2, 41, 0x02), - (9, 41, 0x02), - (23, 41, 0x02), - (40, 41, 0x03), - ], - // 76 - [ - (3, 33, 0x02), - (6, 33, 0x02), - (10, 33, 0x02), - (15, 33, 0x02), - (24, 33, 0x02), - (31, 33, 0x02), - (41, 33, 0x02), - (56, 33, 0x03), - (3, 34, 0x02), - (6, 34, 0x02), - (10, 34, 0x02), - (15, 34, 0x02), - (24, 34, 0x02), - (31, 34, 0x02), - (41, 34, 0x02), - (56, 34, 0x03), - ], - // 77 - [ - (3, 40, 0x02), - (6, 40, 0x02), - (10, 40, 0x02), - (15, 40, 0x02), - (24, 40, 0x02), - (31, 40, 0x02), - (41, 40, 0x02), - (56, 40, 0x03), - (3, 41, 0x02), - (6, 41, 0x02), - (10, 41, 0x02), - (15, 41, 0x02), - (24, 41, 0x02), - (31, 41, 0x02), - (41, 41, 0x02), - (56, 41, 0x03), - ], - // 78 - [ - (2, 63, 0x02), - (9, 63, 0x02), - (23, 63, 0x02), - (40, 63, 0x03), - (1, 39, 0x02), - (22, 39, 0x03), - (1, 43, 0x02), - (22, 43, 0x03), - (1, 124, 0x02), - (22, 124, 0x03), - (0, 35, 0x02), - (0, 62, 0x02), - (86, 0, 0x00), - (87, 0, 0x00), - (89, 0, 0x00), - (90, 0, 0x00), - ], - // 79 - [ - (3, 63, 0x02), - (6, 63, 0x02), - (10, 63, 0x02), - (15, 63, 0x02), - (24, 63, 0x02), - (31, 63, 0x02), - (41, 63, 0x02), - (56, 63, 0x03), - (2, 39, 0x02), - (9, 39, 0x02), - (23, 39, 0x02), - (40, 39, 0x03), - (2, 43, 0x02), - (9, 43, 0x02), - (23, 43, 0x02), - (40, 43, 0x03), - ], - // 80 - [ - (3, 39, 0x02), - (6, 39, 0x02), - (10, 39, 0x02), - (15, 39, 0x02), - (24, 39, 0x02), - (31, 39, 0x02), - (41, 39, 0x02), - (56, 39, 0x03), - (3, 43, 0x02), - (6, 43, 0x02), - (10, 43, 0x02), - (15, 43, 0x02), - (24, 43, 0x02), - (31, 43, 0x02), - (41, 43, 0x02), - (56, 43, 0x03), - ], - // 81 - [ - (2, 124, 0x02), - (9, 124, 0x02), - (23, 124, 0x02), - (40, 124, 0x03), - (1, 35, 0x02), - (22, 35, 0x03), - (1, 62, 0x02), - (22, 62, 0x03), - (0, 0, 0x02), - (0, 36, 0x02), - (0, 64, 0x02), - (0, 91, 0x02), - (0, 93, 0x02), - (0, 126, 0x02), - (91, 0, 0x00), - (92, 0, 0x00), - ], - // 82 - [ - (3, 124, 0x02), - (6, 124, 0x02), - (10, 124, 0x02), - (15, 124, 0x02), - (24, 124, 0x02), - (31, 124, 0x02), - (41, 124, 0x02), - (56, 124, 0x03), - (2, 35, 0x02), - (9, 35, 0x02), - (23, 35, 0x02), - (40, 35, 0x03), - (2, 62, 0x02), - (9, 62, 0x02), - (23, 62, 0x02), - (40, 62, 0x03), - ], - // 83 - [ - (3, 35, 0x02), - (6, 35, 0x02), - (10, 35, 0x02), - (15, 35, 0x02), - (24, 35, 0x02), - (31, 35, 0x02), - (41, 35, 0x02), - (56, 35, 0x03), - (3, 62, 0x02), - (6, 62, 0x02), - (10, 62, 0x02), - (15, 62, 0x02), - (24, 62, 0x02), - (31, 62, 0x02), - (41, 62, 0x02), - (56, 62, 0x03), - ], - // 84 - [ - (1, 0, 0x02), - (22, 0, 0x03), - (1, 36, 0x02), - (22, 36, 0x03), - (1, 64, 0x02), - (22, 64, 0x03), - (1, 91, 0x02), - (22, 91, 0x03), - (1, 93, 0x02), - (22, 93, 0x03), - (1, 126, 0x02), - (22, 126, 0x03), - (0, 94, 0x02), - (0, 125, 0x02), - (93, 0, 0x00), - (94, 0, 0x00), - ], - // 85 - [ - (2, 0, 0x02), - (9, 0, 0x02), - (23, 0, 0x02), - (40, 0, 0x03), - (2, 36, 0x02), - (9, 36, 0x02), - (23, 36, 0x02), - (40, 36, 0x03), - (2, 64, 0x02), - (9, 64, 0x02), - (23, 64, 0x02), - (40, 64, 0x03), - (2, 91, 0x02), - (9, 91, 0x02), - (23, 91, 0x02), - (40, 91, 0x03), - ], - // 86 - [ - (3, 0, 0x02), - (6, 0, 0x02), - (10, 0, 0x02), - (15, 0, 0x02), - (24, 0, 0x02), - (31, 0, 0x02), - (41, 0, 0x02), - (56, 0, 0x03), - (3, 36, 0x02), - (6, 36, 0x02), - (10, 36, 0x02), - (15, 36, 0x02), - (24, 36, 0x02), - (31, 36, 0x02), - (41, 36, 0x02), - (56, 36, 0x03), - ], - // 87 - [ - (3, 64, 0x02), - (6, 64, 0x02), - (10, 64, 0x02), - (15, 64, 0x02), - (24, 64, 0x02), - (31, 64, 0x02), - (41, 64, 0x02), - (56, 64, 0x03), - (3, 91, 0x02), - (6, 91, 0x02), - (10, 91, 0x02), - (15, 91, 0x02), - (24, 91, 0x02), - (31, 91, 0x02), - (41, 91, 0x02), - (56, 91, 0x03), - ], - // 88 - [ - (2, 93, 0x02), - (9, 93, 0x02), - (23, 93, 0x02), - (40, 93, 0x03), - (2, 126, 0x02), - (9, 126, 0x02), - (23, 126, 0x02), - (40, 126, 0x03), - (1, 94, 0x02), - (22, 94, 0x03), - (1, 125, 0x02), - (22, 125, 0x03), - (0, 60, 0x02), - (0, 96, 0x02), - (0, 123, 0x02), - (95, 0, 0x00), - ], - // 89 - [ - (3, 93, 0x02), - (6, 93, 0x02), - (10, 93, 0x02), - (15, 93, 0x02), - (24, 93, 0x02), - (31, 93, 0x02), - (41, 93, 0x02), - (56, 93, 0x03), - (3, 126, 0x02), - (6, 126, 0x02), - (10, 126, 0x02), - (15, 126, 0x02), - (24, 126, 0x02), - (31, 126, 0x02), - (41, 126, 0x02), - (56, 126, 0x03), - ], - // 90 - [ - (2, 94, 0x02), - (9, 94, 0x02), - (23, 94, 0x02), - (40, 94, 0x03), - (2, 125, 0x02), - (9, 125, 0x02), - (23, 125, 0x02), - (40, 125, 0x03), - (1, 60, 0x02), - (22, 60, 0x03), - (1, 96, 0x02), - (22, 96, 0x03), - (1, 123, 0x02), - (22, 123, 0x03), - (96, 0, 0x00), - (110, 0, 0x00), - ], - // 91 - [ - (3, 94, 0x02), - (6, 94, 0x02), - (10, 94, 0x02), - (15, 94, 0x02), - (24, 94, 0x02), - (31, 94, 0x02), - (41, 94, 0x02), - (56, 94, 0x03), - (3, 125, 0x02), - (6, 125, 0x02), - (10, 125, 0x02), - (15, 125, 0x02), - (24, 125, 0x02), - (31, 125, 0x02), - (41, 125, 0x02), - (56, 125, 0x03), - ], - // 92 - [ - (2, 60, 0x02), - (9, 60, 0x02), - (23, 60, 0x02), - (40, 60, 0x03), - (2, 96, 0x02), - (9, 96, 0x02), - (23, 96, 0x02), - (40, 96, 0x03), - (2, 123, 0x02), - (9, 123, 0x02), - (23, 123, 0x02), - (40, 123, 0x03), - (97, 0, 0x00), - (101, 0, 0x00), - (111, 0, 0x00), - (133, 0, 0x00), - ], - // 93 - [ - (3, 60, 0x02), - (6, 60, 0x02), - (10, 60, 0x02), - (15, 60, 0x02), - (24, 60, 0x02), - (31, 60, 0x02), - (41, 60, 0x02), - (56, 60, 0x03), - (3, 96, 0x02), - (6, 96, 0x02), - (10, 96, 0x02), - (15, 96, 0x02), - (24, 96, 0x02), - (31, 96, 0x02), - (41, 96, 0x02), - (56, 96, 0x03), - ], - // 94 - [ - (3, 123, 0x02), - (6, 123, 0x02), - (10, 123, 0x02), - (15, 123, 0x02), - (24, 123, 0x02), - (31, 123, 0x02), - (41, 123, 0x02), - (56, 123, 0x03), - (98, 0, 0x00), - (99, 0, 0x00), - (102, 0, 0x00), - (105, 0, 0x00), - (112, 0, 0x00), - (119, 0, 0x00), - (134, 0, 0x00), - (153, 0, 0x00), - ], - // 95 - [ - (0, 92, 0x02), - (0, 195, 0x02), - (0, 208, 0x02), - (100, 0, 0x00), - (103, 0, 0x00), - (104, 0, 0x00), - (106, 0, 0x00), - (107, 0, 0x00), - (113, 0, 0x00), - (116, 0, 0x00), - (120, 0, 0x00), - (126, 0, 0x00), - (135, 0, 0x00), - (142, 0, 0x00), - (154, 0, 0x00), - (169, 0, 0x00), - ], - // 96 - [ - (1, 92, 0x02), - (22, 92, 0x03), - (1, 195, 0x02), - (22, 195, 0x03), - (1, 208, 0x02), - (22, 208, 0x03), - (0, 128, 0x02), - (0, 130, 0x02), - (0, 131, 0x02), - (0, 162, 0x02), - (0, 184, 0x02), - (0, 194, 0x02), - (0, 224, 0x02), - (0, 226, 0x02), - (108, 0, 0x00), - (109, 0, 0x00), - ], - // 97 - [ - (2, 92, 0x02), - (9, 92, 0x02), - (23, 92, 0x02), - (40, 92, 0x03), - (2, 195, 0x02), - (9, 195, 0x02), - (23, 195, 0x02), - (40, 195, 0x03), - (2, 208, 0x02), - (9, 208, 0x02), - (23, 208, 0x02), - (40, 208, 0x03), - (1, 128, 0x02), - (22, 128, 0x03), - (1, 130, 0x02), - (22, 130, 0x03), - ], - // 98 - [ - (3, 92, 0x02), - (6, 92, 0x02), - (10, 92, 0x02), - (15, 92, 0x02), - (24, 92, 0x02), - (31, 92, 0x02), - (41, 92, 0x02), - (56, 92, 0x03), - (3, 195, 0x02), - (6, 195, 0x02), - (10, 195, 0x02), - (15, 195, 0x02), - (24, 195, 0x02), - (31, 195, 0x02), - (41, 195, 0x02), - (56, 195, 0x03), - ], - // 99 - [ - (3, 208, 0x02), - (6, 208, 0x02), - (10, 208, 0x02), - (15, 208, 0x02), - (24, 208, 0x02), - (31, 208, 0x02), - (41, 208, 0x02), - (56, 208, 0x03), - (2, 128, 0x02), - (9, 128, 0x02), - (23, 128, 0x02), - (40, 128, 0x03), - (2, 130, 0x02), - (9, 130, 0x02), - (23, 130, 0x02), - (40, 130, 0x03), - ], - // 100 - [ - (3, 128, 0x02), - (6, 128, 0x02), - (10, 128, 0x02), - (15, 128, 0x02), - (24, 128, 0x02), - (31, 128, 0x02), - (41, 128, 0x02), - (56, 128, 0x03), - (3, 130, 0x02), - (6, 130, 0x02), - (10, 130, 0x02), - (15, 130, 0x02), - (24, 130, 0x02), - (31, 130, 0x02), - (41, 130, 0x02), - (56, 130, 0x03), - ], - // 101 - [ - (1, 131, 0x02), - (22, 131, 0x03), - (1, 162, 0x02), - (22, 162, 0x03), - (1, 184, 0x02), - (22, 184, 0x03), - (1, 194, 0x02), - (22, 194, 0x03), - (1, 224, 0x02), - (22, 224, 0x03), - (1, 226, 0x02), - (22, 226, 0x03), - (0, 153, 0x02), - (0, 161, 0x02), - (0, 167, 0x02), - (0, 172, 0x02), - ], - // 102 - [ - (2, 131, 0x02), - (9, 131, 0x02), - (23, 131, 0x02), - (40, 131, 0x03), - (2, 162, 0x02), - (9, 162, 0x02), - (23, 162, 0x02), - (40, 162, 0x03), - (2, 184, 0x02), - (9, 184, 0x02), - (23, 184, 0x02), - (40, 184, 0x03), - (2, 194, 0x02), - (9, 194, 0x02), - (23, 194, 0x02), - (40, 194, 0x03), - ], - // 103 - [ - (3, 131, 0x02), - (6, 131, 0x02), - (10, 131, 0x02), - (15, 131, 0x02), - (24, 131, 0x02), - (31, 131, 0x02), - (41, 131, 0x02), - (56, 131, 0x03), - (3, 162, 0x02), - (6, 162, 0x02), - (10, 162, 0x02), - (15, 162, 0x02), - (24, 162, 0x02), - (31, 162, 0x02), - (41, 162, 0x02), - (56, 162, 0x03), - ], - // 104 - [ - (3, 184, 0x02), - (6, 184, 0x02), - (10, 184, 0x02), - (15, 184, 0x02), - (24, 184, 0x02), - (31, 184, 0x02), - (41, 184, 0x02), - (56, 184, 0x03), - (3, 194, 0x02), - (6, 194, 0x02), - (10, 194, 0x02), - (15, 194, 0x02), - (24, 194, 0x02), - (31, 194, 0x02), - (41, 194, 0x02), - (56, 194, 0x03), - ], - // 105 - [ - (2, 224, 0x02), - (9, 224, 0x02), - (23, 224, 0x02), - (40, 224, 0x03), - (2, 226, 0x02), - (9, 226, 0x02), - (23, 226, 0x02), - (40, 226, 0x03), - (1, 153, 0x02), - (22, 153, 0x03), - (1, 161, 0x02), - (22, 161, 0x03), - (1, 167, 0x02), - (22, 167, 0x03), - (1, 172, 0x02), - (22, 172, 0x03), - ], - // 106 - [ - (3, 224, 0x02), - (6, 224, 0x02), - (10, 224, 0x02), - (15, 224, 0x02), - (24, 224, 0x02), - (31, 224, 0x02), - (41, 224, 0x02), - (56, 224, 0x03), - (3, 226, 0x02), - (6, 226, 0x02), - (10, 226, 0x02), - (15, 226, 0x02), - (24, 226, 0x02), - (31, 226, 0x02), - (41, 226, 0x02), - (56, 226, 0x03), - ], - // 107 - [ - (2, 153, 0x02), - (9, 153, 0x02), - (23, 153, 0x02), - (40, 153, 0x03), - (2, 161, 0x02), - (9, 161, 0x02), - (23, 161, 0x02), - (40, 161, 0x03), - (2, 167, 0x02), - (9, 167, 0x02), - (23, 167, 0x02), - (40, 167, 0x03), - (2, 172, 0x02), - (9, 172, 0x02), - (23, 172, 0x02), - (40, 172, 0x03), - ], - // 108 - [ - (3, 153, 0x02), - (6, 153, 0x02), - (10, 153, 0x02), - (15, 153, 0x02), - (24, 153, 0x02), - (31, 153, 0x02), - (41, 153, 0x02), - (56, 153, 0x03), - (3, 161, 0x02), - (6, 161, 0x02), - (10, 161, 0x02), - (15, 161, 0x02), - (24, 161, 0x02), - (31, 161, 0x02), - (41, 161, 0x02), - (56, 161, 0x03), - ], - // 109 - [ - (3, 167, 0x02), - (6, 167, 0x02), - (10, 167, 0x02), - (15, 167, 0x02), - (24, 167, 0x02), - (31, 167, 0x02), - (41, 167, 0x02), - (56, 167, 0x03), - (3, 172, 0x02), - (6, 172, 0x02), - (10, 172, 0x02), - (15, 172, 0x02), - (24, 172, 0x02), - (31, 172, 0x02), - (41, 172, 0x02), - (56, 172, 0x03), - ], - // 110 - [ - (114, 0, 0x00), - (115, 0, 0x00), - (117, 0, 0x00), - (118, 0, 0x00), - (121, 0, 0x00), - (123, 0, 0x00), - (127, 0, 0x00), - (130, 0, 0x00), - (136, 0, 0x00), - (139, 0, 0x00), - (143, 0, 0x00), - (146, 0, 0x00), - (155, 0, 0x00), - (162, 0, 0x00), - (170, 0, 0x00), - (180, 0, 0x00), - ], - // 111 - [ - (0, 176, 0x02), - (0, 177, 0x02), - (0, 179, 0x02), - (0, 209, 0x02), - (0, 216, 0x02), - (0, 217, 0x02), - (0, 227, 0x02), - (0, 229, 0x02), - (0, 230, 0x02), - (122, 0, 0x00), - (124, 0, 0x00), - (125, 0, 0x00), - (128, 0, 0x00), - (129, 0, 0x00), - (131, 0, 0x00), - (132, 0, 0x00), - ], - // 112 - [ - (1, 176, 0x02), - (22, 176, 0x03), - (1, 177, 0x02), - (22, 177, 0x03), - (1, 179, 0x02), - (22, 179, 0x03), - (1, 209, 0x02), - (22, 209, 0x03), - (1, 216, 0x02), - (22, 216, 0x03), - (1, 217, 0x02), - (22, 217, 0x03), - (1, 227, 0x02), - (22, 227, 0x03), - (1, 229, 0x02), - (22, 229, 0x03), - ], - // 113 - [ - (2, 176, 0x02), - (9, 176, 0x02), - (23, 176, 0x02), - (40, 176, 0x03), - (2, 177, 0x02), - (9, 177, 0x02), - (23, 177, 0x02), - (40, 177, 0x03), - (2, 179, 0x02), - (9, 179, 0x02), - (23, 179, 0x02), - (40, 179, 0x03), - (2, 209, 0x02), - (9, 209, 0x02), - (23, 209, 0x02), - (40, 209, 0x03), - ], - // 114 - [ - (3, 176, 0x02), - (6, 176, 0x02), - (10, 176, 0x02), - (15, 176, 0x02), - (24, 176, 0x02), - (31, 176, 0x02), - (41, 176, 0x02), - (56, 176, 0x03), - (3, 177, 0x02), - (6, 177, 0x02), - (10, 177, 0x02), - (15, 177, 0x02), - (24, 177, 0x02), - (31, 177, 0x02), - (41, 177, 0x02), - (56, 177, 0x03), - ], - // 115 - [ - (3, 179, 0x02), - (6, 179, 0x02), - (10, 179, 0x02), - (15, 179, 0x02), - (24, 179, 0x02), - (31, 179, 0x02), - (41, 179, 0x02), - (56, 179, 0x03), - (3, 209, 0x02), - (6, 209, 0x02), - (10, 209, 0x02), - (15, 209, 0x02), - (24, 209, 0x02), - (31, 209, 0x02), - (41, 209, 0x02), - (56, 209, 0x03), - ], - // 116 - [ - (2, 216, 0x02), - (9, 216, 0x02), - (23, 216, 0x02), - (40, 216, 0x03), - (2, 217, 0x02), - (9, 217, 0x02), - (23, 217, 0x02), - (40, 217, 0x03), - (2, 227, 0x02), - (9, 227, 0x02), - (23, 227, 0x02), - (40, 227, 0x03), - (2, 229, 0x02), - (9, 229, 0x02), - (23, 229, 0x02), - (40, 229, 0x03), - ], - // 117 - [ - (3, 216, 0x02), - (6, 216, 0x02), - (10, 216, 0x02), - (15, 216, 0x02), - (24, 216, 0x02), - (31, 216, 0x02), - (41, 216, 0x02), - (56, 216, 0x03), - (3, 217, 0x02), - (6, 217, 0x02), - (10, 217, 0x02), - (15, 217, 0x02), - (24, 217, 0x02), - (31, 217, 0x02), - (41, 217, 0x02), - (56, 217, 0x03), - ], - // 118 - [ - (3, 227, 0x02), - (6, 227, 0x02), - (10, 227, 0x02), - (15, 227, 0x02), - (24, 227, 0x02), - (31, 227, 0x02), - (41, 227, 0x02), - (56, 227, 0x03), - (3, 229, 0x02), - (6, 229, 0x02), - (10, 229, 0x02), - (15, 229, 0x02), - (24, 229, 0x02), - (31, 229, 0x02), - (41, 229, 0x02), - (56, 229, 0x03), - ], - // 119 - [ - (1, 230, 0x02), - (22, 230, 0x03), - (0, 129, 0x02), - (0, 132, 0x02), - (0, 133, 0x02), - (0, 134, 0x02), - (0, 136, 0x02), - (0, 146, 0x02), - (0, 154, 0x02), - (0, 156, 0x02), - (0, 160, 0x02), - (0, 163, 0x02), - (0, 164, 0x02), - (0, 169, 0x02), - (0, 170, 0x02), - (0, 173, 0x02), - ], - // 120 - [ - (2, 230, 0x02), - (9, 230, 0x02), - (23, 230, 0x02), - (40, 230, 0x03), - (1, 129, 0x02), - (22, 129, 0x03), - (1, 132, 0x02), - (22, 132, 0x03), - (1, 133, 0x02), - (22, 133, 0x03), - (1, 134, 0x02), - (22, 134, 0x03), - (1, 136, 0x02), - (22, 136, 0x03), - (1, 146, 0x02), - (22, 146, 0x03), - ], - // 121 - [ - (3, 230, 0x02), - (6, 230, 0x02), - (10, 230, 0x02), - (15, 230, 0x02), - (24, 230, 0x02), - (31, 230, 0x02), - (41, 230, 0x02), - (56, 230, 0x03), - (2, 129, 0x02), - (9, 129, 0x02), - (23, 129, 0x02), - (40, 129, 0x03), - (2, 132, 0x02), - (9, 132, 0x02), - (23, 132, 0x02), - (40, 132, 0x03), - ], - // 122 - [ - (3, 129, 0x02), - (6, 129, 0x02), - (10, 129, 0x02), - (15, 129, 0x02), - (24, 129, 0x02), - (31, 129, 0x02), - (41, 129, 0x02), - (56, 129, 0x03), - (3, 132, 0x02), - (6, 132, 0x02), - (10, 132, 0x02), - (15, 132, 0x02), - (24, 132, 0x02), - (31, 132, 0x02), - (41, 132, 0x02), - (56, 132, 0x03), - ], - // 123 - [ - (2, 133, 0x02), - (9, 133, 0x02), - (23, 133, 0x02), - (40, 133, 0x03), - (2, 134, 0x02), - (9, 134, 0x02), - (23, 134, 0x02), - (40, 134, 0x03), - (2, 136, 0x02), - (9, 136, 0x02), - (23, 136, 0x02), - (40, 136, 0x03), - (2, 146, 0x02), - (9, 146, 0x02), - (23, 146, 0x02), - (40, 146, 0x03), - ], - // 124 - [ - (3, 133, 0x02), - (6, 133, 0x02), - (10, 133, 0x02), - (15, 133, 0x02), - (24, 133, 0x02), - (31, 133, 0x02), - (41, 133, 0x02), - (56, 133, 0x03), - (3, 134, 0x02), - (6, 134, 0x02), - (10, 134, 0x02), - (15, 134, 0x02), - (24, 134, 0x02), - (31, 134, 0x02), - (41, 134, 0x02), - (56, 134, 0x03), - ], - // 125 - [ - (3, 136, 0x02), - (6, 136, 0x02), - (10, 136, 0x02), - (15, 136, 0x02), - (24, 136, 0x02), - (31, 136, 0x02), - (41, 136, 0x02), - (56, 136, 0x03), - (3, 146, 0x02), - (6, 146, 0x02), - (10, 146, 0x02), - (15, 146, 0x02), - (24, 146, 0x02), - (31, 146, 0x02), - (41, 146, 0x02), - (56, 146, 0x03), - ], - // 126 - [ - (1, 154, 0x02), - (22, 154, 0x03), - (1, 156, 0x02), - (22, 156, 0x03), - (1, 160, 0x02), - (22, 160, 0x03), - (1, 163, 0x02), - (22, 163, 0x03), - (1, 164, 0x02), - (22, 164, 0x03), - (1, 169, 0x02), - (22, 169, 0x03), - (1, 170, 0x02), - (22, 170, 0x03), - (1, 173, 0x02), - (22, 173, 0x03), - ], - // 127 - [ - (2, 154, 0x02), - (9, 154, 0x02), - (23, 154, 0x02), - (40, 154, 0x03), - (2, 156, 0x02), - (9, 156, 0x02), - (23, 156, 0x02), - (40, 156, 0x03), - (2, 160, 0x02), - (9, 160, 0x02), - (23, 160, 0x02), - (40, 160, 0x03), - (2, 163, 0x02), - (9, 163, 0x02), - (23, 163, 0x02), - (40, 163, 0x03), - ], - // 128 - [ - (3, 154, 0x02), - (6, 154, 0x02), - (10, 154, 0x02), - (15, 154, 0x02), - (24, 154, 0x02), - (31, 154, 0x02), - (41, 154, 0x02), - (56, 154, 0x03), - (3, 156, 0x02), - (6, 156, 0x02), - (10, 156, 0x02), - (15, 156, 0x02), - (24, 156, 0x02), - (31, 156, 0x02), - (41, 156, 0x02), - (56, 156, 0x03), - ], - // 129 - [ - (3, 160, 0x02), - (6, 160, 0x02), - (10, 160, 0x02), - (15, 160, 0x02), - (24, 160, 0x02), - (31, 160, 0x02), - (41, 160, 0x02), - (56, 160, 0x03), - (3, 163, 0x02), - (6, 163, 0x02), - (10, 163, 0x02), - (15, 163, 0x02), - (24, 163, 0x02), - (31, 163, 0x02), - (41, 163, 0x02), - (56, 163, 0x03), - ], - // 130 - [ - (2, 164, 0x02), - (9, 164, 0x02), - (23, 164, 0x02), - (40, 164, 0x03), - (2, 169, 0x02), - (9, 169, 0x02), - (23, 169, 0x02), - (40, 169, 0x03), - (2, 170, 0x02), - (9, 170, 0x02), - (23, 170, 0x02), - (40, 170, 0x03), - (2, 173, 0x02), - (9, 173, 0x02), - (23, 173, 0x02), - (40, 173, 0x03), - ], - // 131 - [ - (3, 164, 0x02), - (6, 164, 0x02), - (10, 164, 0x02), - (15, 164, 0x02), - (24, 164, 0x02), - (31, 164, 0x02), - (41, 164, 0x02), - (56, 164, 0x03), - (3, 169, 0x02), - (6, 169, 0x02), - (10, 169, 0x02), - (15, 169, 0x02), - (24, 169, 0x02), - (31, 169, 0x02), - (41, 169, 0x02), - (56, 169, 0x03), - ], - // 132 - [ - (3, 170, 0x02), - (6, 170, 0x02), - (10, 170, 0x02), - (15, 170, 0x02), - (24, 170, 0x02), - (31, 170, 0x02), - (41, 170, 0x02), - (56, 170, 0x03), - (3, 173, 0x02), - (6, 173, 0x02), - (10, 173, 0x02), - (15, 173, 0x02), - (24, 173, 0x02), - (31, 173, 0x02), - (41, 173, 0x02), - (56, 173, 0x03), - ], - // 133 - [ - (137, 0, 0x00), - (138, 0, 0x00), - (140, 0, 0x00), - (141, 0, 0x00), - (144, 0, 0x00), - (145, 0, 0x00), - (147, 0, 0x00), - (150, 0, 0x00), - (156, 0, 0x00), - (159, 0, 0x00), - (163, 0, 0x00), - (166, 0, 0x00), - (171, 0, 0x00), - (174, 0, 0x00), - (181, 0, 0x00), - (190, 0, 0x00), - ], - // 134 - [ - (0, 178, 0x02), - (0, 181, 0x02), - (0, 185, 0x02), - (0, 186, 0x02), - (0, 187, 0x02), - (0, 189, 0x02), - (0, 190, 0x02), - (0, 196, 0x02), - (0, 198, 0x02), - (0, 228, 0x02), - (0, 232, 0x02), - (0, 233, 0x02), - (148, 0, 0x00), - (149, 0, 0x00), - (151, 0, 0x00), - (152, 0, 0x00), - ], - // 135 - [ - (1, 178, 0x02), - (22, 178, 0x03), - (1, 181, 0x02), - (22, 181, 0x03), - (1, 185, 0x02), - (22, 185, 0x03), - (1, 186, 0x02), - (22, 186, 0x03), - (1, 187, 0x02), - (22, 187, 0x03), - (1, 189, 0x02), - (22, 189, 0x03), - (1, 190, 0x02), - (22, 190, 0x03), - (1, 196, 0x02), - (22, 196, 0x03), - ], - // 136 - [ - (2, 178, 0x02), - (9, 178, 0x02), - (23, 178, 0x02), - (40, 178, 0x03), - (2, 181, 0x02), - (9, 181, 0x02), - (23, 181, 0x02), - (40, 181, 0x03), - (2, 185, 0x02), - (9, 185, 0x02), - (23, 185, 0x02), - (40, 185, 0x03), - (2, 186, 0x02), - (9, 186, 0x02), - (23, 186, 0x02), - (40, 186, 0x03), - ], - // 137 - [ - (3, 178, 0x02), - (6, 178, 0x02), - (10, 178, 0x02), - (15, 178, 0x02), - (24, 178, 0x02), - (31, 178, 0x02), - (41, 178, 0x02), - (56, 178, 0x03), - (3, 181, 0x02), - (6, 181, 0x02), - (10, 181, 0x02), - (15, 181, 0x02), - (24, 181, 0x02), - (31, 181, 0x02), - (41, 181, 0x02), - (56, 181, 0x03), - ], - // 138 - [ - (3, 185, 0x02), - (6, 185, 0x02), - (10, 185, 0x02), - (15, 185, 0x02), - (24, 185, 0x02), - (31, 185, 0x02), - (41, 185, 0x02), - (56, 185, 0x03), - (3, 186, 0x02), - (6, 186, 0x02), - (10, 186, 0x02), - (15, 186, 0x02), - (24, 186, 0x02), - (31, 186, 0x02), - (41, 186, 0x02), - (56, 186, 0x03), - ], - // 139 - [ - (2, 187, 0x02), - (9, 187, 0x02), - (23, 187, 0x02), - (40, 187, 0x03), - (2, 189, 0x02), - (9, 189, 0x02), - (23, 189, 0x02), - (40, 189, 0x03), - (2, 190, 0x02), - (9, 190, 0x02), - (23, 190, 0x02), - (40, 190, 0x03), - (2, 196, 0x02), - (9, 196, 0x02), - (23, 196, 0x02), - (40, 196, 0x03), - ], - // 140 - [ - (3, 187, 0x02), - (6, 187, 0x02), - (10, 187, 0x02), - (15, 187, 0x02), - (24, 187, 0x02), - (31, 187, 0x02), - (41, 187, 0x02), - (56, 187, 0x03), - (3, 189, 0x02), - (6, 189, 0x02), - (10, 189, 0x02), - (15, 189, 0x02), - (24, 189, 0x02), - (31, 189, 0x02), - (41, 189, 0x02), - (56, 189, 0x03), - ], - // 141 - [ - (3, 190, 0x02), - (6, 190, 0x02), - (10, 190, 0x02), - (15, 190, 0x02), - (24, 190, 0x02), - (31, 190, 0x02), - (41, 190, 0x02), - (56, 190, 0x03), - (3, 196, 0x02), - (6, 196, 0x02), - (10, 196, 0x02), - (15, 196, 0x02), - (24, 196, 0x02), - (31, 196, 0x02), - (41, 196, 0x02), - (56, 196, 0x03), - ], - // 142 - [ - (1, 198, 0x02), - (22, 198, 0x03), - (1, 228, 0x02), - (22, 228, 0x03), - (1, 232, 0x02), - (22, 232, 0x03), - (1, 233, 0x02), - (22, 233, 0x03), - (0, 1, 0x02), - (0, 135, 0x02), - (0, 137, 0x02), - (0, 138, 0x02), - (0, 139, 0x02), - (0, 140, 0x02), - (0, 141, 0x02), - (0, 143, 0x02), - ], - // 143 - [ - (2, 198, 0x02), - (9, 198, 0x02), - (23, 198, 0x02), - (40, 198, 0x03), - (2, 228, 0x02), - (9, 228, 0x02), - (23, 228, 0x02), - (40, 228, 0x03), - (2, 232, 0x02), - (9, 232, 0x02), - (23, 232, 0x02), - (40, 232, 0x03), - (2, 233, 0x02), - (9, 233, 0x02), - (23, 233, 0x02), - (40, 233, 0x03), - ], - // 144 - [ - (3, 198, 0x02), - (6, 198, 0x02), - (10, 198, 0x02), - (15, 198, 0x02), - (24, 198, 0x02), - (31, 198, 0x02), - (41, 198, 0x02), - (56, 198, 0x03), - (3, 228, 0x02), - (6, 228, 0x02), - (10, 228, 0x02), - (15, 228, 0x02), - (24, 228, 0x02), - (31, 228, 0x02), - (41, 228, 0x02), - (56, 228, 0x03), - ], - // 145 - [ - (3, 232, 0x02), - (6, 232, 0x02), - (10, 232, 0x02), - (15, 232, 0x02), - (24, 232, 0x02), - (31, 232, 0x02), - (41, 232, 0x02), - (56, 232, 0x03), - (3, 233, 0x02), - (6, 233, 0x02), - (10, 233, 0x02), - (15, 233, 0x02), - (24, 233, 0x02), - (31, 233, 0x02), - (41, 233, 0x02), - (56, 233, 0x03), - ], - // 146 - [ - (1, 1, 0x02), - (22, 1, 0x03), - (1, 135, 0x02), - (22, 135, 0x03), - (1, 137, 0x02), - (22, 137, 0x03), - (1, 138, 0x02), - (22, 138, 0x03), - (1, 139, 0x02), - (22, 139, 0x03), - (1, 140, 0x02), - (22, 140, 0x03), - (1, 141, 0x02), - (22, 141, 0x03), - (1, 143, 0x02), - (22, 143, 0x03), - ], - // 147 - [ - (2, 1, 0x02), - (9, 1, 0x02), - (23, 1, 0x02), - (40, 1, 0x03), - (2, 135, 0x02), - (9, 135, 0x02), - (23, 135, 0x02), - (40, 135, 0x03), - (2, 137, 0x02), - (9, 137, 0x02), - (23, 137, 0x02), - (40, 137, 0x03), - (2, 138, 0x02), - (9, 138, 0x02), - (23, 138, 0x02), - (40, 138, 0x03), - ], - // 148 - [ - (3, 1, 0x02), - (6, 1, 0x02), - (10, 1, 0x02), - (15, 1, 0x02), - (24, 1, 0x02), - (31, 1, 0x02), - (41, 1, 0x02), - (56, 1, 0x03), - (3, 135, 0x02), - (6, 135, 0x02), - (10, 135, 0x02), - (15, 135, 0x02), - (24, 135, 0x02), - (31, 135, 0x02), - (41, 135, 0x02), - (56, 135, 0x03), - ], - // 149 - [ - (3, 137, 0x02), - (6, 137, 0x02), - (10, 137, 0x02), - (15, 137, 0x02), - (24, 137, 0x02), - (31, 137, 0x02), - (41, 137, 0x02), - (56, 137, 0x03), - (3, 138, 0x02), - (6, 138, 0x02), - (10, 138, 0x02), - (15, 138, 0x02), - (24, 138, 0x02), - (31, 138, 0x02), - (41, 138, 0x02), - (56, 138, 0x03), - ], - // 150 - [ - (2, 139, 0x02), - (9, 139, 0x02), - (23, 139, 0x02), - (40, 139, 0x03), - (2, 140, 0x02), - (9, 140, 0x02), - (23, 140, 0x02), - (40, 140, 0x03), - (2, 141, 0x02), - (9, 141, 0x02), - (23, 141, 0x02), - (40, 141, 0x03), - (2, 143, 0x02), - (9, 143, 0x02), - (23, 143, 0x02), - (40, 143, 0x03), - ], - // 151 - [ - (3, 139, 0x02), - (6, 139, 0x02), - (10, 139, 0x02), - (15, 139, 0x02), - (24, 139, 0x02), - (31, 139, 0x02), - (41, 139, 0x02), - (56, 139, 0x03), - (3, 140, 0x02), - (6, 140, 0x02), - (10, 140, 0x02), - (15, 140, 0x02), - (24, 140, 0x02), - (31, 140, 0x02), - (41, 140, 0x02), - (56, 140, 0x03), - ], - // 152 - [ - (3, 141, 0x02), - (6, 141, 0x02), - (10, 141, 0x02), - (15, 141, 0x02), - (24, 141, 0x02), - (31, 141, 0x02), - (41, 141, 0x02), - (56, 141, 0x03), - (3, 143, 0x02), - (6, 143, 0x02), - (10, 143, 0x02), - (15, 143, 0x02), - (24, 143, 0x02), - (31, 143, 0x02), - (41, 143, 0x02), - (56, 143, 0x03), - ], - // 153 - [ - (157, 0, 0x00), - (158, 0, 0x00), - (160, 0, 0x00), - (161, 0, 0x00), - (164, 0, 0x00), - (165, 0, 0x00), - (167, 0, 0x00), - (168, 0, 0x00), - (172, 0, 0x00), - (173, 0, 0x00), - (175, 0, 0x00), - (177, 0, 0x00), - (182, 0, 0x00), - (185, 0, 0x00), - (191, 0, 0x00), - (207, 0, 0x00), - ], - // 154 - [ - (0, 147, 0x02), - (0, 149, 0x02), - (0, 150, 0x02), - (0, 151, 0x02), - (0, 152, 0x02), - (0, 155, 0x02), - (0, 157, 0x02), - (0, 158, 0x02), - (0, 165, 0x02), - (0, 166, 0x02), - (0, 168, 0x02), - (0, 174, 0x02), - (0, 175, 0x02), - (0, 180, 0x02), - (0, 182, 0x02), - (0, 183, 0x02), - ], - // 155 - [ - (1, 147, 0x02), - (22, 147, 0x03), - (1, 149, 0x02), - (22, 149, 0x03), - (1, 150, 0x02), - (22, 150, 0x03), - (1, 151, 0x02), - (22, 151, 0x03), - (1, 152, 0x02), - (22, 152, 0x03), - (1, 155, 0x02), - (22, 155, 0x03), - (1, 157, 0x02), - (22, 157, 0x03), - (1, 158, 0x02), - (22, 158, 0x03), - ], - // 156 - [ - (2, 147, 0x02), - (9, 147, 0x02), - (23, 147, 0x02), - (40, 147, 0x03), - (2, 149, 0x02), - (9, 149, 0x02), - (23, 149, 0x02), - (40, 149, 0x03), - (2, 150, 0x02), - (9, 150, 0x02), - (23, 150, 0x02), - (40, 150, 0x03), - (2, 151, 0x02), - (9, 151, 0x02), - (23, 151, 0x02), - (40, 151, 0x03), - ], - // 157 - [ - (3, 147, 0x02), - (6, 147, 0x02), - (10, 147, 0x02), - (15, 147, 0x02), - (24, 147, 0x02), - (31, 147, 0x02), - (41, 147, 0x02), - (56, 147, 0x03), - (3, 149, 0x02), - (6, 149, 0x02), - (10, 149, 0x02), - (15, 149, 0x02), - (24, 149, 0x02), - (31, 149, 0x02), - (41, 149, 0x02), - (56, 149, 0x03), - ], - // 158 - [ - (3, 150, 0x02), - (6, 150, 0x02), - (10, 150, 0x02), - (15, 150, 0x02), - (24, 150, 0x02), - (31, 150, 0x02), - (41, 150, 0x02), - (56, 150, 0x03), - (3, 151, 0x02), - (6, 151, 0x02), - (10, 151, 0x02), - (15, 151, 0x02), - (24, 151, 0x02), - (31, 151, 0x02), - (41, 151, 0x02), - (56, 151, 0x03), - ], - // 159 - [ - (2, 152, 0x02), - (9, 152, 0x02), - (23, 152, 0x02), - (40, 152, 0x03), - (2, 155, 0x02), - (9, 155, 0x02), - (23, 155, 0x02), - (40, 155, 0x03), - (2, 157, 0x02), - (9, 157, 0x02), - (23, 157, 0x02), - (40, 157, 0x03), - (2, 158, 0x02), - (9, 158, 0x02), - (23, 158, 0x02), - (40, 158, 0x03), - ], - // 160 - [ - (3, 152, 0x02), - (6, 152, 0x02), - (10, 152, 0x02), - (15, 152, 0x02), - (24, 152, 0x02), - (31, 152, 0x02), - (41, 152, 0x02), - (56, 152, 0x03), - (3, 155, 0x02), - (6, 155, 0x02), - (10, 155, 0x02), - (15, 155, 0x02), - (24, 155, 0x02), - (31, 155, 0x02), - (41, 155, 0x02), - (56, 155, 0x03), - ], - // 161 - [ - (3, 157, 0x02), - (6, 157, 0x02), - (10, 157, 0x02), - (15, 157, 0x02), - (24, 157, 0x02), - (31, 157, 0x02), - (41, 157, 0x02), - (56, 157, 0x03), - (3, 158, 0x02), - (6, 158, 0x02), - (10, 158, 0x02), - (15, 158, 0x02), - (24, 158, 0x02), - (31, 158, 0x02), - (41, 158, 0x02), - (56, 158, 0x03), - ], - // 162 - [ - (1, 165, 0x02), - (22, 165, 0x03), - (1, 166, 0x02), - (22, 166, 0x03), - (1, 168, 0x02), - (22, 168, 0x03), - (1, 174, 0x02), - (22, 174, 0x03), - (1, 175, 0x02), - (22, 175, 0x03), - (1, 180, 0x02), - (22, 180, 0x03), - (1, 182, 0x02), - (22, 182, 0x03), - (1, 183, 0x02), - (22, 183, 0x03), - ], - // 163 - [ - (2, 165, 0x02), - (9, 165, 0x02), - (23, 165, 0x02), - (40, 165, 0x03), - (2, 166, 0x02), - (9, 166, 0x02), - (23, 166, 0x02), - (40, 166, 0x03), - (2, 168, 0x02), - (9, 168, 0x02), - (23, 168, 0x02), - (40, 168, 0x03), - (2, 174, 0x02), - (9, 174, 0x02), - (23, 174, 0x02), - (40, 174, 0x03), - ], - // 164 - [ - (3, 165, 0x02), - (6, 165, 0x02), - (10, 165, 0x02), - (15, 165, 0x02), - (24, 165, 0x02), - (31, 165, 0x02), - (41, 165, 0x02), - (56, 165, 0x03), - (3, 166, 0x02), - (6, 166, 0x02), - (10, 166, 0x02), - (15, 166, 0x02), - (24, 166, 0x02), - (31, 166, 0x02), - (41, 166, 0x02), - (56, 166, 0x03), - ], - // 165 - [ - (3, 168, 0x02), - (6, 168, 0x02), - (10, 168, 0x02), - (15, 168, 0x02), - (24, 168, 0x02), - (31, 168, 0x02), - (41, 168, 0x02), - (56, 168, 0x03), - (3, 174, 0x02), - (6, 174, 0x02), - (10, 174, 0x02), - (15, 174, 0x02), - (24, 174, 0x02), - (31, 174, 0x02), - (41, 174, 0x02), - (56, 174, 0x03), - ], - // 166 - [ - (2, 175, 0x02), - (9, 175, 0x02), - (23, 175, 0x02), - (40, 175, 0x03), - (2, 180, 0x02), - (9, 180, 0x02), - (23, 180, 0x02), - (40, 180, 0x03), - (2, 182, 0x02), - (9, 182, 0x02), - (23, 182, 0x02), - (40, 182, 0x03), - (2, 183, 0x02), - (9, 183, 0x02), - (23, 183, 0x02), - (40, 183, 0x03), - ], - // 167 - [ - (3, 175, 0x02), - (6, 175, 0x02), - (10, 175, 0x02), - (15, 175, 0x02), - (24, 175, 0x02), - (31, 175, 0x02), - (41, 175, 0x02), - (56, 175, 0x03), - (3, 180, 0x02), - (6, 180, 0x02), - (10, 180, 0x02), - (15, 180, 0x02), - (24, 180, 0x02), - (31, 180, 0x02), - (41, 180, 0x02), - (56, 180, 0x03), - ], - // 168 - [ - (3, 182, 0x02), - (6, 182, 0x02), - (10, 182, 0x02), - (15, 182, 0x02), - (24, 182, 0x02), - (31, 182, 0x02), - (41, 182, 0x02), - (56, 182, 0x03), - (3, 183, 0x02), - (6, 183, 0x02), - (10, 183, 0x02), - (15, 183, 0x02), - (24, 183, 0x02), - (31, 183, 0x02), - (41, 183, 0x02), - (56, 183, 0x03), - ], - // 169 - [ - (0, 188, 0x02), - (0, 191, 0x02), - (0, 197, 0x02), - (0, 231, 0x02), - (0, 239, 0x02), - (176, 0, 0x00), - (178, 0, 0x00), - (179, 0, 0x00), - (183, 0, 0x00), - (184, 0, 0x00), - (186, 0, 0x00), - (187, 0, 0x00), - (192, 0, 0x00), - (199, 0, 0x00), - (208, 0, 0x00), - (223, 0, 0x00), - ], - // 170 - [ - (1, 188, 0x02), - (22, 188, 0x03), - (1, 191, 0x02), - (22, 191, 0x03), - (1, 197, 0x02), - (22, 197, 0x03), - (1, 231, 0x02), - (22, 231, 0x03), - (1, 239, 0x02), - (22, 239, 0x03), - (0, 9, 0x02), - (0, 142, 0x02), - (0, 144, 0x02), - (0, 145, 0x02), - (0, 148, 0x02), - (0, 159, 0x02), - ], - // 171 - [ - (2, 188, 0x02), - (9, 188, 0x02), - (23, 188, 0x02), - (40, 188, 0x03), - (2, 191, 0x02), - (9, 191, 0x02), - (23, 191, 0x02), - (40, 191, 0x03), - (2, 197, 0x02), - (9, 197, 0x02), - (23, 197, 0x02), - (40, 197, 0x03), - (2, 231, 0x02), - (9, 231, 0x02), - (23, 231, 0x02), - (40, 231, 0x03), - ], - // 172 - [ - (3, 188, 0x02), - (6, 188, 0x02), - (10, 188, 0x02), - (15, 188, 0x02), - (24, 188, 0x02), - (31, 188, 0x02), - (41, 188, 0x02), - (56, 188, 0x03), - (3, 191, 0x02), - (6, 191, 0x02), - (10, 191, 0x02), - (15, 191, 0x02), - (24, 191, 0x02), - (31, 191, 0x02), - (41, 191, 0x02), - (56, 191, 0x03), - ], - // 173 - [ - (3, 197, 0x02), - (6, 197, 0x02), - (10, 197, 0x02), - (15, 197, 0x02), - (24, 197, 0x02), - (31, 197, 0x02), - (41, 197, 0x02), - (56, 197, 0x03), - (3, 231, 0x02), - (6, 231, 0x02), - (10, 231, 0x02), - (15, 231, 0x02), - (24, 231, 0x02), - (31, 231, 0x02), - (41, 231, 0x02), - (56, 231, 0x03), - ], - // 174 - [ - (2, 239, 0x02), - (9, 239, 0x02), - (23, 239, 0x02), - (40, 239, 0x03), - (1, 9, 0x02), - (22, 9, 0x03), - (1, 142, 0x02), - (22, 142, 0x03), - (1, 144, 0x02), - (22, 144, 0x03), - (1, 145, 0x02), - (22, 145, 0x03), - (1, 148, 0x02), - (22, 148, 0x03), - (1, 159, 0x02), - (22, 159, 0x03), - ], - // 175 - [ - (3, 239, 0x02), - (6, 239, 0x02), - (10, 239, 0x02), - (15, 239, 0x02), - (24, 239, 0x02), - (31, 239, 0x02), - (41, 239, 0x02), - (56, 239, 0x03), - (2, 9, 0x02), - (9, 9, 0x02), - (23, 9, 0x02), - (40, 9, 0x03), - (2, 142, 0x02), - (9, 142, 0x02), - (23, 142, 0x02), - (40, 142, 0x03), - ], - // 176 - [ - (3, 9, 0x02), - (6, 9, 0x02), - (10, 9, 0x02), - (15, 9, 0x02), - (24, 9, 0x02), - (31, 9, 0x02), - (41, 9, 0x02), - (56, 9, 0x03), - (3, 142, 0x02), - (6, 142, 0x02), - (10, 142, 0x02), - (15, 142, 0x02), - (24, 142, 0x02), - (31, 142, 0x02), - (41, 142, 0x02), - (56, 142, 0x03), - ], - // 177 - [ - (2, 144, 0x02), - (9, 144, 0x02), - (23, 144, 0x02), - (40, 144, 0x03), - (2, 145, 0x02), - (9, 145, 0x02), - (23, 145, 0x02), - (40, 145, 0x03), - (2, 148, 0x02), - (9, 148, 0x02), - (23, 148, 0x02), - (40, 148, 0x03), - (2, 159, 0x02), - (9, 159, 0x02), - (23, 159, 0x02), - (40, 159, 0x03), - ], - // 178 - [ - (3, 144, 0x02), - (6, 144, 0x02), - (10, 144, 0x02), - (15, 144, 0x02), - (24, 144, 0x02), - (31, 144, 0x02), - (41, 144, 0x02), - (56, 144, 0x03), - (3, 145, 0x02), - (6, 145, 0x02), - (10, 145, 0x02), - (15, 145, 0x02), - (24, 145, 0x02), - (31, 145, 0x02), - (41, 145, 0x02), - (56, 145, 0x03), - ], - // 179 - [ - (3, 148, 0x02), - (6, 148, 0x02), - (10, 148, 0x02), - (15, 148, 0x02), - (24, 148, 0x02), - (31, 148, 0x02), - (41, 148, 0x02), - (56, 148, 0x03), - (3, 159, 0x02), - (6, 159, 0x02), - (10, 159, 0x02), - (15, 159, 0x02), - (24, 159, 0x02), - (31, 159, 0x02), - (41, 159, 0x02), - (56, 159, 0x03), - ], - // 180 - [ - (0, 171, 0x02), - (0, 206, 0x02), - (0, 215, 0x02), - (0, 225, 0x02), - (0, 236, 0x02), - (0, 237, 0x02), - (188, 0, 0x00), - (189, 0, 0x00), - (193, 0, 0x00), - (196, 0, 0x00), - (200, 0, 0x00), - (203, 0, 0x00), - (209, 0, 0x00), - (216, 0, 0x00), - (224, 0, 0x00), - (238, 0, 0x00), - ], - // 181 - [ - (1, 171, 0x02), - (22, 171, 0x03), - (1, 206, 0x02), - (22, 206, 0x03), - (1, 215, 0x02), - (22, 215, 0x03), - (1, 225, 0x02), - (22, 225, 0x03), - (1, 236, 0x02), - (22, 236, 0x03), - (1, 237, 0x02), - (22, 237, 0x03), - (0, 199, 0x02), - (0, 207, 0x02), - (0, 234, 0x02), - (0, 235, 0x02), - ], - // 182 - [ - (2, 171, 0x02), - (9, 171, 0x02), - (23, 171, 0x02), - (40, 171, 0x03), - (2, 206, 0x02), - (9, 206, 0x02), - (23, 206, 0x02), - (40, 206, 0x03), - (2, 215, 0x02), - (9, 215, 0x02), - (23, 215, 0x02), - (40, 215, 0x03), - (2, 225, 0x02), - (9, 225, 0x02), - (23, 225, 0x02), - (40, 225, 0x03), - ], - // 183 - [ - (3, 171, 0x02), - (6, 171, 0x02), - (10, 171, 0x02), - (15, 171, 0x02), - (24, 171, 0x02), - (31, 171, 0x02), - (41, 171, 0x02), - (56, 171, 0x03), - (3, 206, 0x02), - (6, 206, 0x02), - (10, 206, 0x02), - (15, 206, 0x02), - (24, 206, 0x02), - (31, 206, 0x02), - (41, 206, 0x02), - (56, 206, 0x03), - ], - // 184 - [ - (3, 215, 0x02), - (6, 215, 0x02), - (10, 215, 0x02), - (15, 215, 0x02), - (24, 215, 0x02), - (31, 215, 0x02), - (41, 215, 0x02), - (56, 215, 0x03), - (3, 225, 0x02), - (6, 225, 0x02), - (10, 225, 0x02), - (15, 225, 0x02), - (24, 225, 0x02), - (31, 225, 0x02), - (41, 225, 0x02), - (56, 225, 0x03), - ], - // 185 - [ - (2, 236, 0x02), - (9, 236, 0x02), - (23, 236, 0x02), - (40, 236, 0x03), - (2, 237, 0x02), - (9, 237, 0x02), - (23, 237, 0x02), - (40, 237, 0x03), - (1, 199, 0x02), - (22, 199, 0x03), - (1, 207, 0x02), - (22, 207, 0x03), - (1, 234, 0x02), - (22, 234, 0x03), - (1, 235, 0x02), - (22, 235, 0x03), - ], - // 186 - [ - (3, 236, 0x02), - (6, 236, 0x02), - (10, 236, 0x02), - (15, 236, 0x02), - (24, 236, 0x02), - (31, 236, 0x02), - (41, 236, 0x02), - (56, 236, 0x03), - (3, 237, 0x02), - (6, 237, 0x02), - (10, 237, 0x02), - (15, 237, 0x02), - (24, 237, 0x02), - (31, 237, 0x02), - (41, 237, 0x02), - (56, 237, 0x03), - ], - // 187 - [ - (2, 199, 0x02), - (9, 199, 0x02), - (23, 199, 0x02), - (40, 199, 0x03), - (2, 207, 0x02), - (9, 207, 0x02), - (23, 207, 0x02), - (40, 207, 0x03), - (2, 234, 0x02), - (9, 234, 0x02), - (23, 234, 0x02), - (40, 234, 0x03), - (2, 235, 0x02), - (9, 235, 0x02), - (23, 235, 0x02), - (40, 235, 0x03), - ], - // 188 - [ - (3, 199, 0x02), - (6, 199, 0x02), - (10, 199, 0x02), - (15, 199, 0x02), - (24, 199, 0x02), - (31, 199, 0x02), - (41, 199, 0x02), - (56, 199, 0x03), - (3, 207, 0x02), - (6, 207, 0x02), - (10, 207, 0x02), - (15, 207, 0x02), - (24, 207, 0x02), - (31, 207, 0x02), - (41, 207, 0x02), - (56, 207, 0x03), - ], - // 189 - [ - (3, 234, 0x02), - (6, 234, 0x02), - (10, 234, 0x02), - (15, 234, 0x02), - (24, 234, 0x02), - (31, 234, 0x02), - (41, 234, 0x02), - (56, 234, 0x03), - (3, 235, 0x02), - (6, 235, 0x02), - (10, 235, 0x02), - (15, 235, 0x02), - (24, 235, 0x02), - (31, 235, 0x02), - (41, 235, 0x02), - (56, 235, 0x03), - ], - // 190 - [ - (194, 0, 0x00), - (195, 0, 0x00), - (197, 0, 0x00), - (198, 0, 0x00), - (201, 0, 0x00), - (202, 0, 0x00), - (204, 0, 0x00), - (205, 0, 0x00), - (210, 0, 0x00), - (213, 0, 0x00), - (217, 0, 0x00), - (220, 0, 0x00), - (225, 0, 0x00), - (231, 0, 0x00), - (239, 0, 0x00), - (246, 0, 0x00), - ], - // 191 - [ - (0, 192, 0x02), - (0, 193, 0x02), - (0, 200, 0x02), - (0, 201, 0x02), - (0, 202, 0x02), - (0, 205, 0x02), - (0, 210, 0x02), - (0, 213, 0x02), - (0, 218, 0x02), - (0, 219, 0x02), - (0, 238, 0x02), - (0, 240, 0x02), - (0, 242, 0x02), - (0, 243, 0x02), - (0, 255, 0x02), - (206, 0, 0x00), - ], - // 192 - [ - (1, 192, 0x02), - (22, 192, 0x03), - (1, 193, 0x02), - (22, 193, 0x03), - (1, 200, 0x02), - (22, 200, 0x03), - (1, 201, 0x02), - (22, 201, 0x03), - (1, 202, 0x02), - (22, 202, 0x03), - (1, 205, 0x02), - (22, 205, 0x03), - (1, 210, 0x02), - (22, 210, 0x03), - (1, 213, 0x02), - (22, 213, 0x03), - ], - // 193 - [ - (2, 192, 0x02), - (9, 192, 0x02), - (23, 192, 0x02), - (40, 192, 0x03), - (2, 193, 0x02), - (9, 193, 0x02), - (23, 193, 0x02), - (40, 193, 0x03), - (2, 200, 0x02), - (9, 200, 0x02), - (23, 200, 0x02), - (40, 200, 0x03), - (2, 201, 0x02), - (9, 201, 0x02), - (23, 201, 0x02), - (40, 201, 0x03), - ], - // 194 - [ - (3, 192, 0x02), - (6, 192, 0x02), - (10, 192, 0x02), - (15, 192, 0x02), - (24, 192, 0x02), - (31, 192, 0x02), - (41, 192, 0x02), - (56, 192, 0x03), - (3, 193, 0x02), - (6, 193, 0x02), - (10, 193, 0x02), - (15, 193, 0x02), - (24, 193, 0x02), - (31, 193, 0x02), - (41, 193, 0x02), - (56, 193, 0x03), - ], - // 195 - [ - (3, 200, 0x02), - (6, 200, 0x02), - (10, 200, 0x02), - (15, 200, 0x02), - (24, 200, 0x02), - (31, 200, 0x02), - (41, 200, 0x02), - (56, 200, 0x03), - (3, 201, 0x02), - (6, 201, 0x02), - (10, 201, 0x02), - (15, 201, 0x02), - (24, 201, 0x02), - (31, 201, 0x02), - (41, 201, 0x02), - (56, 201, 0x03), - ], - // 196 - [ - (2, 202, 0x02), - (9, 202, 0x02), - (23, 202, 0x02), - (40, 202, 0x03), - (2, 205, 0x02), - (9, 205, 0x02), - (23, 205, 0x02), - (40, 205, 0x03), - (2, 210, 0x02), - (9, 210, 0x02), - (23, 210, 0x02), - (40, 210, 0x03), - (2, 213, 0x02), - (9, 213, 0x02), - (23, 213, 0x02), - (40, 213, 0x03), - ], - // 197 - [ - (3, 202, 0x02), - (6, 202, 0x02), - (10, 202, 0x02), - (15, 202, 0x02), - (24, 202, 0x02), - (31, 202, 0x02), - (41, 202, 0x02), - (56, 202, 0x03), - (3, 205, 0x02), - (6, 205, 0x02), - (10, 205, 0x02), - (15, 205, 0x02), - (24, 205, 0x02), - (31, 205, 0x02), - (41, 205, 0x02), - (56, 205, 0x03), - ], - // 198 - [ - (3, 210, 0x02), - (6, 210, 0x02), - (10, 210, 0x02), - (15, 210, 0x02), - (24, 210, 0x02), - (31, 210, 0x02), - (41, 210, 0x02), - (56, 210, 0x03), - (3, 213, 0x02), - (6, 213, 0x02), - (10, 213, 0x02), - (15, 213, 0x02), - (24, 213, 0x02), - (31, 213, 0x02), - (41, 213, 0x02), - (56, 213, 0x03), - ], - // 199 - [ - (1, 218, 0x02), - (22, 218, 0x03), - (1, 219, 0x02), - (22, 219, 0x03), - (1, 238, 0x02), - (22, 238, 0x03), - (1, 240, 0x02), - (22, 240, 0x03), - (1, 242, 0x02), - (22, 242, 0x03), - (1, 243, 0x02), - (22, 243, 0x03), - (1, 255, 0x02), - (22, 255, 0x03), - (0, 203, 0x02), - (0, 204, 0x02), - ], - // 200 - [ - (2, 218, 0x02), - (9, 218, 0x02), - (23, 218, 0x02), - (40, 218, 0x03), - (2, 219, 0x02), - (9, 219, 0x02), - (23, 219, 0x02), - (40, 219, 0x03), - (2, 238, 0x02), - (9, 238, 0x02), - (23, 238, 0x02), - (40, 238, 0x03), - (2, 240, 0x02), - (9, 240, 0x02), - (23, 240, 0x02), - (40, 240, 0x03), - ], - // 201 - [ - (3, 218, 0x02), - (6, 218, 0x02), - (10, 218, 0x02), - (15, 218, 0x02), - (24, 218, 0x02), - (31, 218, 0x02), - (41, 218, 0x02), - (56, 218, 0x03), - (3, 219, 0x02), - (6, 219, 0x02), - (10, 219, 0x02), - (15, 219, 0x02), - (24, 219, 0x02), - (31, 219, 0x02), - (41, 219, 0x02), - (56, 219, 0x03), - ], - // 202 - [ - (3, 238, 0x02), - (6, 238, 0x02), - (10, 238, 0x02), - (15, 238, 0x02), - (24, 238, 0x02), - (31, 238, 0x02), - (41, 238, 0x02), - (56, 238, 0x03), - (3, 240, 0x02), - (6, 240, 0x02), - (10, 240, 0x02), - (15, 240, 0x02), - (24, 240, 0x02), - (31, 240, 0x02), - (41, 240, 0x02), - (56, 240, 0x03), - ], - // 203 - [ - (2, 242, 0x02), - (9, 242, 0x02), - (23, 242, 0x02), - (40, 242, 0x03), - (2, 243, 0x02), - (9, 243, 0x02), - (23, 243, 0x02), - (40, 243, 0x03), - (2, 255, 0x02), - (9, 255, 0x02), - (23, 255, 0x02), - (40, 255, 0x03), - (1, 203, 0x02), - (22, 203, 0x03), - (1, 204, 0x02), - (22, 204, 0x03), - ], - // 204 - [ - (3, 242, 0x02), - (6, 242, 0x02), - (10, 242, 0x02), - (15, 242, 0x02), - (24, 242, 0x02), - (31, 242, 0x02), - (41, 242, 0x02), - (56, 242, 0x03), - (3, 243, 0x02), - (6, 243, 0x02), - (10, 243, 0x02), - (15, 243, 0x02), - (24, 243, 0x02), - (31, 243, 0x02), - (41, 243, 0x02), - (56, 243, 0x03), - ], - // 205 - [ - (3, 255, 0x02), - (6, 255, 0x02), - (10, 255, 0x02), - (15, 255, 0x02), - (24, 255, 0x02), - (31, 255, 0x02), - (41, 255, 0x02), - (56, 255, 0x03), - (2, 203, 0x02), - (9, 203, 0x02), - (23, 203, 0x02), - (40, 203, 0x03), - (2, 204, 0x02), - (9, 204, 0x02), - (23, 204, 0x02), - (40, 204, 0x03), - ], - // 206 - [ - (3, 203, 0x02), - (6, 203, 0x02), - (10, 203, 0x02), - (15, 203, 0x02), - (24, 203, 0x02), - (31, 203, 0x02), - (41, 203, 0x02), - (56, 203, 0x03), - (3, 204, 0x02), - (6, 204, 0x02), - (10, 204, 0x02), - (15, 204, 0x02), - (24, 204, 0x02), - (31, 204, 0x02), - (41, 204, 0x02), - (56, 204, 0x03), - ], - // 207 - [ - (211, 0, 0x00), - (212, 0, 0x00), - (214, 0, 0x00), - (215, 0, 0x00), - (218, 0, 0x00), - (219, 0, 0x00), - (221, 0, 0x00), - (222, 0, 0x00), - (226, 0, 0x00), - (228, 0, 0x00), - (232, 0, 0x00), - (235, 0, 0x00), - (240, 0, 0x00), - (243, 0, 0x00), - (247, 0, 0x00), - (250, 0, 0x00), - ], - // 208 - [ - (0, 211, 0x02), - (0, 212, 0x02), - (0, 214, 0x02), - (0, 221, 0x02), - (0, 222, 0x02), - (0, 223, 0x02), - (0, 241, 0x02), - (0, 244, 0x02), - (0, 245, 0x02), - (0, 246, 0x02), - (0, 247, 0x02), - (0, 248, 0x02), - (0, 250, 0x02), - (0, 251, 0x02), - (0, 252, 0x02), - (0, 253, 0x02), - ], - // 209 - [ - (1, 211, 0x02), - (22, 211, 0x03), - (1, 212, 0x02), - (22, 212, 0x03), - (1, 214, 0x02), - (22, 214, 0x03), - (1, 221, 0x02), - (22, 221, 0x03), - (1, 222, 0x02), - (22, 222, 0x03), - (1, 223, 0x02), - (22, 223, 0x03), - (1, 241, 0x02), - (22, 241, 0x03), - (1, 244, 0x02), - (22, 244, 0x03), - ], - // 210 - [ - (2, 211, 0x02), - (9, 211, 0x02), - (23, 211, 0x02), - (40, 211, 0x03), - (2, 212, 0x02), - (9, 212, 0x02), - (23, 212, 0x02), - (40, 212, 0x03), - (2, 214, 0x02), - (9, 214, 0x02), - (23, 214, 0x02), - (40, 214, 0x03), - (2, 221, 0x02), - (9, 221, 0x02), - (23, 221, 0x02), - (40, 221, 0x03), - ], - // 211 - [ - (3, 211, 0x02), - (6, 211, 0x02), - (10, 211, 0x02), - (15, 211, 0x02), - (24, 211, 0x02), - (31, 211, 0x02), - (41, 211, 0x02), - (56, 211, 0x03), - (3, 212, 0x02), - (6, 212, 0x02), - (10, 212, 0x02), - (15, 212, 0x02), - (24, 212, 0x02), - (31, 212, 0x02), - (41, 212, 0x02), - (56, 212, 0x03), - ], - // 212 - [ - (3, 214, 0x02), - (6, 214, 0x02), - (10, 214, 0x02), - (15, 214, 0x02), - (24, 214, 0x02), - (31, 214, 0x02), - (41, 214, 0x02), - (56, 214, 0x03), - (3, 221, 0x02), - (6, 221, 0x02), - (10, 221, 0x02), - (15, 221, 0x02), - (24, 221, 0x02), - (31, 221, 0x02), - (41, 221, 0x02), - (56, 221, 0x03), - ], - // 213 - [ - (2, 222, 0x02), - (9, 222, 0x02), - (23, 222, 0x02), - (40, 222, 0x03), - (2, 223, 0x02), - (9, 223, 0x02), - (23, 223, 0x02), - (40, 223, 0x03), - (2, 241, 0x02), - (9, 241, 0x02), - (23, 241, 0x02), - (40, 241, 0x03), - (2, 244, 0x02), - (9, 244, 0x02), - (23, 244, 0x02), - (40, 244, 0x03), - ], - // 214 - [ - (3, 222, 0x02), - (6, 222, 0x02), - (10, 222, 0x02), - (15, 222, 0x02), - (24, 222, 0x02), - (31, 222, 0x02), - (41, 222, 0x02), - (56, 222, 0x03), - (3, 223, 0x02), - (6, 223, 0x02), - (10, 223, 0x02), - (15, 223, 0x02), - (24, 223, 0x02), - (31, 223, 0x02), - (41, 223, 0x02), - (56, 223, 0x03), - ], - // 215 - [ - (3, 241, 0x02), - (6, 241, 0x02), - (10, 241, 0x02), - (15, 241, 0x02), - (24, 241, 0x02), - (31, 241, 0x02), - (41, 241, 0x02), - (56, 241, 0x03), - (3, 244, 0x02), - (6, 244, 0x02), - (10, 244, 0x02), - (15, 244, 0x02), - (24, 244, 0x02), - (31, 244, 0x02), - (41, 244, 0x02), - (56, 244, 0x03), - ], - // 216 - [ - (1, 245, 0x02), - (22, 245, 0x03), - (1, 246, 0x02), - (22, 246, 0x03), - (1, 247, 0x02), - (22, 247, 0x03), - (1, 248, 0x02), - (22, 248, 0x03), - (1, 250, 0x02), - (22, 250, 0x03), - (1, 251, 0x02), - (22, 251, 0x03), - (1, 252, 0x02), - (22, 252, 0x03), - (1, 253, 0x02), - (22, 253, 0x03), - ], - // 217 - [ - (2, 245, 0x02), - (9, 245, 0x02), - (23, 245, 0x02), - (40, 245, 0x03), - (2, 246, 0x02), - (9, 246, 0x02), - (23, 246, 0x02), - (40, 246, 0x03), - (2, 247, 0x02), - (9, 247, 0x02), - (23, 247, 0x02), - (40, 247, 0x03), - (2, 248, 0x02), - (9, 248, 0x02), - (23, 248, 0x02), - (40, 248, 0x03), - ], - // 218 - [ - (3, 245, 0x02), - (6, 245, 0x02), - (10, 245, 0x02), - (15, 245, 0x02), - (24, 245, 0x02), - (31, 245, 0x02), - (41, 245, 0x02), - (56, 245, 0x03), - (3, 246, 0x02), - (6, 246, 0x02), - (10, 246, 0x02), - (15, 246, 0x02), - (24, 246, 0x02), - (31, 246, 0x02), - (41, 246, 0x02), - (56, 246, 0x03), - ], - // 219 - [ - (3, 247, 0x02), - (6, 247, 0x02), - (10, 247, 0x02), - (15, 247, 0x02), - (24, 247, 0x02), - (31, 247, 0x02), - (41, 247, 0x02), - (56, 247, 0x03), - (3, 248, 0x02), - (6, 248, 0x02), - (10, 248, 0x02), - (15, 248, 0x02), - (24, 248, 0x02), - (31, 248, 0x02), - (41, 248, 0x02), - (56, 248, 0x03), - ], - // 220 - [ - (2, 250, 0x02), - (9, 250, 0x02), - (23, 250, 0x02), - (40, 250, 0x03), - (2, 251, 0x02), - (9, 251, 0x02), - (23, 251, 0x02), - (40, 251, 0x03), - (2, 252, 0x02), - (9, 252, 0x02), - (23, 252, 0x02), - (40, 252, 0x03), - (2, 253, 0x02), - (9, 253, 0x02), - (23, 253, 0x02), - (40, 253, 0x03), - ], - // 221 - [ - (3, 250, 0x02), - (6, 250, 0x02), - (10, 250, 0x02), - (15, 250, 0x02), - (24, 250, 0x02), - (31, 250, 0x02), - (41, 250, 0x02), - (56, 250, 0x03), - (3, 251, 0x02), - (6, 251, 0x02), - (10, 251, 0x02), - (15, 251, 0x02), - (24, 251, 0x02), - (31, 251, 0x02), - (41, 251, 0x02), - (56, 251, 0x03), - ], - // 222 - [ - (3, 252, 0x02), - (6, 252, 0x02), - (10, 252, 0x02), - (15, 252, 0x02), - (24, 252, 0x02), - (31, 252, 0x02), - (41, 252, 0x02), - (56, 252, 0x03), - (3, 253, 0x02), - (6, 253, 0x02), - (10, 253, 0x02), - (15, 253, 0x02), - (24, 253, 0x02), - (31, 253, 0x02), - (41, 253, 0x02), - (56, 253, 0x03), - ], - // 223 - [ - (0, 254, 0x02), - (227, 0, 0x00), - (229, 0, 0x00), - (230, 0, 0x00), - (233, 0, 0x00), - (234, 0, 0x00), - (236, 0, 0x00), - (237, 0, 0x00), - (241, 0, 0x00), - (242, 0, 0x00), - (244, 0, 0x00), - (245, 0, 0x00), - (248, 0, 0x00), - (249, 0, 0x00), - (251, 0, 0x00), - (252, 0, 0x00), - ], - // 224 - [ - (1, 254, 0x02), - (22, 254, 0x03), - (0, 2, 0x02), - (0, 3, 0x02), - (0, 4, 0x02), - (0, 5, 0x02), - (0, 6, 0x02), - (0, 7, 0x02), - (0, 8, 0x02), - (0, 11, 0x02), - (0, 12, 0x02), - (0, 14, 0x02), - (0, 15, 0x02), - (0, 16, 0x02), - (0, 17, 0x02), - (0, 18, 0x02), - ], - // 225 - [ - (2, 254, 0x02), - (9, 254, 0x02), - (23, 254, 0x02), - (40, 254, 0x03), - (1, 2, 0x02), - (22, 2, 0x03), - (1, 3, 0x02), - (22, 3, 0x03), - (1, 4, 0x02), - (22, 4, 0x03), - (1, 5, 0x02), - (22, 5, 0x03), - (1, 6, 0x02), - (22, 6, 0x03), - (1, 7, 0x02), - (22, 7, 0x03), - ], - // 226 - [ - (3, 254, 0x02), - (6, 254, 0x02), - (10, 254, 0x02), - (15, 254, 0x02), - (24, 254, 0x02), - (31, 254, 0x02), - (41, 254, 0x02), - (56, 254, 0x03), - (2, 2, 0x02), - (9, 2, 0x02), - (23, 2, 0x02), - (40, 2, 0x03), - (2, 3, 0x02), - (9, 3, 0x02), - (23, 3, 0x02), - (40, 3, 0x03), - ], - // 227 - [ - (3, 2, 0x02), - (6, 2, 0x02), - (10, 2, 0x02), - (15, 2, 0x02), - (24, 2, 0x02), - (31, 2, 0x02), - (41, 2, 0x02), - (56, 2, 0x03), - (3, 3, 0x02), - (6, 3, 0x02), - (10, 3, 0x02), - (15, 3, 0x02), - (24, 3, 0x02), - (31, 3, 0x02), - (41, 3, 0x02), - (56, 3, 0x03), - ], - // 228 - [ - (2, 4, 0x02), - (9, 4, 0x02), - (23, 4, 0x02), - (40, 4, 0x03), - (2, 5, 0x02), - (9, 5, 0x02), - (23, 5, 0x02), - (40, 5, 0x03), - (2, 6, 0x02), - (9, 6, 0x02), - (23, 6, 0x02), - (40, 6, 0x03), - (2, 7, 0x02), - (9, 7, 0x02), - (23, 7, 0x02), - (40, 7, 0x03), - ], - // 229 - [ - (3, 4, 0x02), - (6, 4, 0x02), - (10, 4, 0x02), - (15, 4, 0x02), - (24, 4, 0x02), - (31, 4, 0x02), - (41, 4, 0x02), - (56, 4, 0x03), - (3, 5, 0x02), - (6, 5, 0x02), - (10, 5, 0x02), - (15, 5, 0x02), - (24, 5, 0x02), - (31, 5, 0x02), - (41, 5, 0x02), - (56, 5, 0x03), - ], - // 230 - [ - (3, 6, 0x02), - (6, 6, 0x02), - (10, 6, 0x02), - (15, 6, 0x02), - (24, 6, 0x02), - (31, 6, 0x02), - (41, 6, 0x02), - (56, 6, 0x03), - (3, 7, 0x02), - (6, 7, 0x02), - (10, 7, 0x02), - (15, 7, 0x02), - (24, 7, 0x02), - (31, 7, 0x02), - (41, 7, 0x02), - (56, 7, 0x03), - ], - // 231 - [ - (1, 8, 0x02), - (22, 8, 0x03), - (1, 11, 0x02), - (22, 11, 0x03), - (1, 12, 0x02), - (22, 12, 0x03), - (1, 14, 0x02), - (22, 14, 0x03), - (1, 15, 0x02), - (22, 15, 0x03), - (1, 16, 0x02), - (22, 16, 0x03), - (1, 17, 0x02), - (22, 17, 0x03), - (1, 18, 0x02), - (22, 18, 0x03), - ], - // 232 - [ - (2, 8, 0x02), - (9, 8, 0x02), - (23, 8, 0x02), - (40, 8, 0x03), - (2, 11, 0x02), - (9, 11, 0x02), - (23, 11, 0x02), - (40, 11, 0x03), - (2, 12, 0x02), - (9, 12, 0x02), - (23, 12, 0x02), - (40, 12, 0x03), - (2, 14, 0x02), - (9, 14, 0x02), - (23, 14, 0x02), - (40, 14, 0x03), - ], - // 233 - [ - (3, 8, 0x02), - (6, 8, 0x02), - (10, 8, 0x02), - (15, 8, 0x02), - (24, 8, 0x02), - (31, 8, 0x02), - (41, 8, 0x02), - (56, 8, 0x03), - (3, 11, 0x02), - (6, 11, 0x02), - (10, 11, 0x02), - (15, 11, 0x02), - (24, 11, 0x02), - (31, 11, 0x02), - (41, 11, 0x02), - (56, 11, 0x03), - ], - // 234 - [ - (3, 12, 0x02), - (6, 12, 0x02), - (10, 12, 0x02), - (15, 12, 0x02), - (24, 12, 0x02), - (31, 12, 0x02), - (41, 12, 0x02), - (56, 12, 0x03), - (3, 14, 0x02), - (6, 14, 0x02), - (10, 14, 0x02), - (15, 14, 0x02), - (24, 14, 0x02), - (31, 14, 0x02), - (41, 14, 0x02), - (56, 14, 0x03), - ], - // 235 - [ - (2, 15, 0x02), - (9, 15, 0x02), - (23, 15, 0x02), - (40, 15, 0x03), - (2, 16, 0x02), - (9, 16, 0x02), - (23, 16, 0x02), - (40, 16, 0x03), - (2, 17, 0x02), - (9, 17, 0x02), - (23, 17, 0x02), - (40, 17, 0x03), - (2, 18, 0x02), - (9, 18, 0x02), - (23, 18, 0x02), - (40, 18, 0x03), - ], - // 236 - [ - (3, 15, 0x02), - (6, 15, 0x02), - (10, 15, 0x02), - (15, 15, 0x02), - (24, 15, 0x02), - (31, 15, 0x02), - (41, 15, 0x02), - (56, 15, 0x03), - (3, 16, 0x02), - (6, 16, 0x02), - (10, 16, 0x02), - (15, 16, 0x02), - (24, 16, 0x02), - (31, 16, 0x02), - (41, 16, 0x02), - (56, 16, 0x03), - ], - // 237 - [ - (3, 17, 0x02), - (6, 17, 0x02), - (10, 17, 0x02), - (15, 17, 0x02), - (24, 17, 0x02), - (31, 17, 0x02), - (41, 17, 0x02), - (56, 17, 0x03), - (3, 18, 0x02), - (6, 18, 0x02), - (10, 18, 0x02), - (15, 18, 0x02), - (24, 18, 0x02), - (31, 18, 0x02), - (41, 18, 0x02), - (56, 18, 0x03), - ], - // 238 - [ - (0, 19, 0x02), - (0, 20, 0x02), - (0, 21, 0x02), - (0, 23, 0x02), - (0, 24, 0x02), - (0, 25, 0x02), - (0, 26, 0x02), - (0, 27, 0x02), - (0, 28, 0x02), - (0, 29, 0x02), - (0, 30, 0x02), - (0, 31, 0x02), - (0, 127, 0x02), - (0, 220, 0x02), - (0, 249, 0x02), - (253, 0, 0x00), - ], - // 239 - [ - (1, 19, 0x02), - (22, 19, 0x03), - (1, 20, 0x02), - (22, 20, 0x03), - (1, 21, 0x02), - (22, 21, 0x03), - (1, 23, 0x02), - (22, 23, 0x03), - (1, 24, 0x02), - (22, 24, 0x03), - (1, 25, 0x02), - (22, 25, 0x03), - (1, 26, 0x02), - (22, 26, 0x03), - (1, 27, 0x02), - (22, 27, 0x03), - ], - // 240 - [ - (2, 19, 0x02), - (9, 19, 0x02), - (23, 19, 0x02), - (40, 19, 0x03), - (2, 20, 0x02), - (9, 20, 0x02), - (23, 20, 0x02), - (40, 20, 0x03), - (2, 21, 0x02), - (9, 21, 0x02), - (23, 21, 0x02), - (40, 21, 0x03), - (2, 23, 0x02), - (9, 23, 0x02), - (23, 23, 0x02), - (40, 23, 0x03), - ], - // 241 - [ - (3, 19, 0x02), - (6, 19, 0x02), - (10, 19, 0x02), - (15, 19, 0x02), - (24, 19, 0x02), - (31, 19, 0x02), - (41, 19, 0x02), - (56, 19, 0x03), - (3, 20, 0x02), - (6, 20, 0x02), - (10, 20, 0x02), - (15, 20, 0x02), - (24, 20, 0x02), - (31, 20, 0x02), - (41, 20, 0x02), - (56, 20, 0x03), - ], - // 242 - [ - (3, 21, 0x02), - (6, 21, 0x02), - (10, 21, 0x02), - (15, 21, 0x02), - (24, 21, 0x02), - (31, 21, 0x02), - (41, 21, 0x02), - (56, 21, 0x03), - (3, 23, 0x02), - (6, 23, 0x02), - (10, 23, 0x02), - (15, 23, 0x02), - (24, 23, 0x02), - (31, 23, 0x02), - (41, 23, 0x02), - (56, 23, 0x03), - ], - // 243 - [ - (2, 24, 0x02), - (9, 24, 0x02), - (23, 24, 0x02), - (40, 24, 0x03), - (2, 25, 0x02), - (9, 25, 0x02), - (23, 25, 0x02), - (40, 25, 0x03), - (2, 26, 0x02), - (9, 26, 0x02), - (23, 26, 0x02), - (40, 26, 0x03), - (2, 27, 0x02), - (9, 27, 0x02), - (23, 27, 0x02), - (40, 27, 0x03), - ], - // 244 - [ - (3, 24, 0x02), - (6, 24, 0x02), - (10, 24, 0x02), - (15, 24, 0x02), - (24, 24, 0x02), - (31, 24, 0x02), - (41, 24, 0x02), - (56, 24, 0x03), - (3, 25, 0x02), - (6, 25, 0x02), - (10, 25, 0x02), - (15, 25, 0x02), - (24, 25, 0x02), - (31, 25, 0x02), - (41, 25, 0x02), - (56, 25, 0x03), - ], - // 245 - [ - (3, 26, 0x02), - (6, 26, 0x02), - (10, 26, 0x02), - (15, 26, 0x02), - (24, 26, 0x02), - (31, 26, 0x02), - (41, 26, 0x02), - (56, 26, 0x03), - (3, 27, 0x02), - (6, 27, 0x02), - (10, 27, 0x02), - (15, 27, 0x02), - (24, 27, 0x02), - (31, 27, 0x02), - (41, 27, 0x02), - (56, 27, 0x03), - ], - // 246 - [ - (1, 28, 0x02), - (22, 28, 0x03), - (1, 29, 0x02), - (22, 29, 0x03), - (1, 30, 0x02), - (22, 30, 0x03), - (1, 31, 0x02), - (22, 31, 0x03), - (1, 127, 0x02), - (22, 127, 0x03), - (1, 220, 0x02), - (22, 220, 0x03), - (1, 249, 0x02), - (22, 249, 0x03), - (254, 0, 0x00), - (255, 0, 0x00), - ], - // 247 - [ - (2, 28, 0x02), - (9, 28, 0x02), - (23, 28, 0x02), - (40, 28, 0x03), - (2, 29, 0x02), - (9, 29, 0x02), - (23, 29, 0x02), - (40, 29, 0x03), - (2, 30, 0x02), - (9, 30, 0x02), - (23, 30, 0x02), - (40, 30, 0x03), - (2, 31, 0x02), - (9, 31, 0x02), - (23, 31, 0x02), - (40, 31, 0x03), - ], - // 248 - [ - (3, 28, 0x02), - (6, 28, 0x02), - (10, 28, 0x02), - (15, 28, 0x02), - (24, 28, 0x02), - (31, 28, 0x02), - (41, 28, 0x02), - (56, 28, 0x03), - (3, 29, 0x02), - (6, 29, 0x02), - (10, 29, 0x02), - (15, 29, 0x02), - (24, 29, 0x02), - (31, 29, 0x02), - (41, 29, 0x02), - (56, 29, 0x03), - ], - // 249 - [ - (3, 30, 0x02), - (6, 30, 0x02), - (10, 30, 0x02), - (15, 30, 0x02), - (24, 30, 0x02), - (31, 30, 0x02), - (41, 30, 0x02), - (56, 30, 0x03), - (3, 31, 0x02), - (6, 31, 0x02), - (10, 31, 0x02), - (15, 31, 0x02), - (24, 31, 0x02), - (31, 31, 0x02), - (41, 31, 0x02), - (56, 31, 0x03), - ], - // 250 - [ - (2, 127, 0x02), - (9, 127, 0x02), - (23, 127, 0x02), - (40, 127, 0x03), - (2, 220, 0x02), - (9, 220, 0x02), - (23, 220, 0x02), - (40, 220, 0x03), - (2, 249, 0x02), - (9, 249, 0x02), - (23, 249, 0x02), - (40, 249, 0x03), - (0, 10, 0x02), - (0, 13, 0x02), - (0, 22, 0x02), - (0, 0, 0x04), - ], - // 251 - [ - (3, 127, 0x02), - (6, 127, 0x02), - (10, 127, 0x02), - (15, 127, 0x02), - (24, 127, 0x02), - (31, 127, 0x02), - (41, 127, 0x02), - (56, 127, 0x03), - (3, 220, 0x02), - (6, 220, 0x02), - (10, 220, 0x02), - (15, 220, 0x02), - (24, 220, 0x02), - (31, 220, 0x02), - (41, 220, 0x02), - (56, 220, 0x03), - ], - // 252 - [ - (3, 249, 0x02), - (6, 249, 0x02), - (10, 249, 0x02), - (15, 249, 0x02), - (24, 249, 0x02), - (31, 249, 0x02), - (41, 249, 0x02), - (56, 249, 0x03), - (1, 10, 0x02), - (22, 10, 0x03), - (1, 13, 0x02), - (22, 13, 0x03), - (1, 22, 0x02), - (22, 22, 0x03), - (0, 0, 0x04), - (0, 0, 0x05), - ], - // 253 - [ - (2, 10, 0x02), - (9, 10, 0x02), - (23, 10, 0x02), - (40, 10, 0x03), - (2, 13, 0x02), - (9, 13, 0x02), - (23, 13, 0x02), - (40, 13, 0x03), - (2, 22, 0x02), - (9, 22, 0x02), - (23, 22, 0x02), - (40, 22, 0x03), - (0, 0, 0x04), - (0, 0, 0x04), - (0, 0, 0x04), - (0, 0, 0x05), - ], - // 254 - [ - (3, 10, 0x02), - (6, 10, 0x02), - (10, 10, 0x02), - (15, 10, 0x02), - (24, 10, 0x02), - (31, 10, 0x02), - (41, 10, 0x02), - (56, 10, 0x03), - (3, 13, 0x02), - (6, 13, 0x02), - (10, 13, 0x02), - (15, 13, 0x02), - (24, 13, 0x02), - (31, 13, 0x02), - (41, 13, 0x02), - (56, 13, 0x03), - ], - // 255 - [ - (3, 22, 0x02), - (6, 22, 0x02), - (10, 22, 0x02), - (15, 22, 0x02), - (24, 22, 0x02), - (31, 22, 0x02), - (41, 22, 0x02), - (56, 22, 0x03), - (0, 0, 0x04), - (0, 0, 0x04), - (0, 0, 0x04), - (0, 0, 0x04), - (0, 0, 0x04), - (0, 0, 0x04), - (0, 0, 0x04), - (0, 0, 0x05), - ], -]; diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/hpack/mod.rs s390-tools-2.33.1/rust-vendor/h2/src/hpack/mod.rs --- s390-tools-2.31.0/rust-vendor/h2/src/hpack/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/hpack/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -mod decoder; -mod encoder; -pub(crate) mod header; -pub(crate) mod huffman; -mod table; - -#[cfg(test)] -mod test; - -pub use self::decoder::{Decoder, DecoderError, NeedMore}; -pub use self::encoder::Encoder; -pub use self::header::{BytesStr, Header}; diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/hpack/table.rs s390-tools-2.33.1/rust-vendor/h2/src/hpack/table.rs --- s390-tools-2.31.0/rust-vendor/h2/src/hpack/table.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/hpack/table.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,766 +0,0 @@ -use super::Header; - -use fnv::FnvHasher; -use http::header; -use http::method::Method; - -use std::collections::VecDeque; -use std::hash::{Hash, Hasher}; -use std::{cmp, mem, usize}; - -/// HPACK encoder table -#[derive(Debug)] -pub struct Table { - mask: usize, - indices: Vec>, - slots: VecDeque, - inserted: usize, - // Size is in bytes - size: usize, - max_size: usize, -} - -#[derive(Debug)] -pub enum Index { - // The header is already fully indexed - Indexed(usize, Header), - - // The name is indexed, but not the value - Name(usize, Header), - - // The full header has been inserted into the table. - Inserted(usize), - - // Only the value has been inserted (hpack table idx, slots idx) - InsertedValue(usize, usize), - - // The header is not indexed by this table - NotIndexed(Header), -} - -#[derive(Debug)] -struct Slot { - hash: HashValue, - header: Header, - next: Option, -} - -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -struct Pos { - index: usize, - hash: HashValue, -} - -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -struct HashValue(usize); - -const MAX_SIZE: usize = 1 << 16; -const DYN_OFFSET: usize = 62; - -macro_rules! probe_loop { - ($probe_var: ident < $len: expr, $body: expr) => { - debug_assert!($len > 0); - loop { - if $probe_var < $len { - $body - $probe_var += 1; - } else { - $probe_var = 0; - } - } - }; -} - -impl Table { - pub fn new(max_size: usize, capacity: usize) -> Table { - if capacity == 0 { - Table { - mask: 0, - indices: vec![], - slots: VecDeque::new(), - inserted: 0, - size: 0, - max_size, - } - } else { - let capacity = cmp::max(to_raw_capacity(capacity).next_power_of_two(), 8); - - Table { - mask: capacity.wrapping_sub(1), - indices: vec![None; capacity], - slots: VecDeque::with_capacity(usable_capacity(capacity)), - inserted: 0, - size: 0, - max_size, - } - } - } - - #[inline] - pub fn capacity(&self) -> usize { - usable_capacity(self.indices.len()) - } - - pub fn max_size(&self) -> usize { - self.max_size - } - - /// Gets the header stored in the table - pub fn resolve<'a>(&'a self, index: &'a Index) -> &'a Header { - use self::Index::*; - - match *index { - Indexed(_, ref h) => h, - Name(_, ref h) => h, - Inserted(idx) => &self.slots[idx].header, - InsertedValue(_, idx) => &self.slots[idx].header, - NotIndexed(ref h) => h, - } - } - - pub fn resolve_idx(&self, index: &Index) -> usize { - use self::Index::*; - - match *index { - Indexed(idx, ..) => idx, - Name(idx, ..) => idx, - Inserted(idx) => idx + DYN_OFFSET, - InsertedValue(_name_idx, slot_idx) => slot_idx + DYN_OFFSET, - NotIndexed(_) => panic!("cannot resolve index"), - } - } - - /// Index the header in the HPACK table. - pub fn index(&mut self, header: Header) -> Index { - // Check the static table - let statik = index_static(&header); - - // Don't index certain headers. This logic is borrowed from nghttp2. - if header.skip_value_index() { - // Right now, if this is true, the header name is always in the - // static table. At some point in the future, this might not be true - // and this logic will need to be updated. - debug_assert!(statik.is_some(), "skip_value_index requires a static name",); - return Index::new(statik, header); - } - - // If the header is already indexed by the static table, return that - if let Some((n, true)) = statik { - return Index::Indexed(n, header); - } - - // Don't index large headers - if header.len() * 4 > self.max_size * 3 { - return Index::new(statik, header); - } - - self.index_dynamic(header, statik) - } - - fn index_dynamic(&mut self, header: Header, statik: Option<(usize, bool)>) -> Index { - debug_assert!(self.assert_valid_state("one")); - - if header.len() + self.size < self.max_size || !header.is_sensitive() { - // Only grow internal storage if needed - self.reserve_one(); - } - - if self.indices.is_empty() { - // If `indices` is not empty, then it is impossible for all - // `indices` entries to be `Some`. So, we only need to check for the - // empty case. - return Index::new(statik, header); - } - - let hash = hash_header(&header); - - let desired_pos = desired_pos(self.mask, hash); - let mut probe = desired_pos; - let mut dist = 0; - - // Start at the ideal position, checking all slots - probe_loop!(probe < self.indices.len(), { - if let Some(pos) = self.indices[probe] { - // The slot is already occupied, but check if it has a lower - // displacement. - let their_dist = probe_distance(self.mask, pos.hash, probe); - - let slot_idx = pos.index.wrapping_add(self.inserted); - - if their_dist < dist { - // Index robinhood - return self.index_vacant(header, hash, dist, probe, statik); - } else if pos.hash == hash && self.slots[slot_idx].header.name() == header.name() { - // Matching name, check values - return self.index_occupied(header, hash, pos.index, statik.map(|(n, _)| n)); - } - } else { - return self.index_vacant(header, hash, dist, probe, statik); - } - - dist += 1; - }); - } - - fn index_occupied( - &mut self, - header: Header, - hash: HashValue, - mut index: usize, - statik: Option, - ) -> Index { - debug_assert!(self.assert_valid_state("top")); - - // There already is a match for the given header name. Check if a value - // matches. The header will also only be inserted if the table is not at - // capacity. - loop { - // Compute the real index into the VecDeque - let real_idx = index.wrapping_add(self.inserted); - - if self.slots[real_idx].header.value_eq(&header) { - // We have a full match! - return Index::Indexed(real_idx + DYN_OFFSET, header); - } - - if let Some(next) = self.slots[real_idx].next { - index = next; - continue; - } - - if header.is_sensitive() { - // Should we assert this? - // debug_assert!(statik.is_none()); - return Index::Name(real_idx + DYN_OFFSET, header); - } - - self.update_size(header.len(), Some(index)); - - // Insert the new header - self.insert(header, hash); - - // Recompute real_idx as it just changed. - let new_real_idx = index.wrapping_add(self.inserted); - - // The previous node in the linked list may have gotten evicted - // while making room for this header. - if new_real_idx < self.slots.len() { - let idx = 0usize.wrapping_sub(self.inserted); - - self.slots[new_real_idx].next = Some(idx); - } - - debug_assert!(self.assert_valid_state("bottom")); - - // Even if the previous header was evicted, we can still reference - // it when inserting the new one... - return if let Some(n) = statik { - // If name is in static table, use it instead - Index::InsertedValue(n, 0) - } else { - Index::InsertedValue(real_idx + DYN_OFFSET, 0) - }; - } - } - - fn index_vacant( - &mut self, - header: Header, - hash: HashValue, - mut dist: usize, - mut probe: usize, - statik: Option<(usize, bool)>, - ) -> Index { - if header.is_sensitive() { - return Index::new(statik, header); - } - - debug_assert!(self.assert_valid_state("top")); - debug_assert!(dist == 0 || self.indices[probe.wrapping_sub(1) & self.mask].is_some()); - - // Passing in `usize::MAX` for prev_idx since there is no previous - // header in this case. - if self.update_size(header.len(), None) { - while dist != 0 { - let back = probe.wrapping_sub(1) & self.mask; - - if let Some(pos) = self.indices[back] { - let their_dist = probe_distance(self.mask, pos.hash, back); - - if their_dist < (dist - 1) { - probe = back; - dist -= 1; - } else { - break; - } - } else { - probe = back; - dist -= 1; - } - } - } - - debug_assert!(self.assert_valid_state("after update")); - - self.insert(header, hash); - - let pos_idx = 0usize.wrapping_sub(self.inserted); - - let prev = mem::replace( - &mut self.indices[probe], - Some(Pos { - index: pos_idx, - hash, - }), - ); - - if let Some(mut prev) = prev { - // Shift forward - let mut probe = probe + 1; - - probe_loop!(probe < self.indices.len(), { - let pos = &mut self.indices[probe]; - - prev = match mem::replace(pos, Some(prev)) { - Some(p) => p, - None => break, - }; - }); - } - - debug_assert!(self.assert_valid_state("bottom")); - - if let Some((n, _)) = statik { - Index::InsertedValue(n, 0) - } else { - Index::Inserted(0) - } - } - - fn insert(&mut self, header: Header, hash: HashValue) { - self.inserted = self.inserted.wrapping_add(1); - - self.slots.push_front(Slot { - hash, - header, - next: None, - }); - } - - pub fn resize(&mut self, size: usize) { - self.max_size = size; - - if size == 0 { - self.size = 0; - - for i in &mut self.indices { - *i = None; - } - - self.slots.clear(); - self.inserted = 0; - } else { - self.converge(None); - } - } - - fn update_size(&mut self, len: usize, prev_idx: Option) -> bool { - self.size += len; - self.converge(prev_idx) - } - - fn converge(&mut self, prev_idx: Option) -> bool { - let mut ret = false; - - while self.size > self.max_size { - ret = true; - self.evict(prev_idx); - } - - ret - } - - fn evict(&mut self, prev_idx: Option) { - let pos_idx = (self.slots.len() - 1).wrapping_sub(self.inserted); - - debug_assert!(!self.slots.is_empty()); - debug_assert!(self.assert_valid_state("one")); - - // Remove the header - let slot = self.slots.pop_back().unwrap(); - let mut probe = desired_pos(self.mask, slot.hash); - - // Update the size - self.size -= slot.header.len(); - - debug_assert_eq!( - self.indices - .iter() - .filter_map(|p| *p) - .filter(|p| p.index == pos_idx) - .count(), - 1 - ); - - // Find the associated position - probe_loop!(probe < self.indices.len(), { - debug_assert!(self.indices[probe].is_some()); - - let mut pos = self.indices[probe].unwrap(); - - if pos.index == pos_idx { - if let Some(idx) = slot.next { - pos.index = idx; - self.indices[probe] = Some(pos); - } else if Some(pos.index) == prev_idx { - pos.index = 0usize.wrapping_sub(self.inserted + 1); - self.indices[probe] = Some(pos); - } else { - self.indices[probe] = None; - self.remove_phase_two(probe); - } - - break; - } - }); - - debug_assert!(self.assert_valid_state("two")); - } - - // Shifts all indices that were displaced by the header that has just been - // removed. - fn remove_phase_two(&mut self, probe: usize) { - let mut last_probe = probe; - let mut probe = probe + 1; - - probe_loop!(probe < self.indices.len(), { - if let Some(pos) = self.indices[probe] { - if probe_distance(self.mask, pos.hash, probe) > 0 { - self.indices[last_probe] = self.indices[probe].take(); - } else { - break; - } - } else { - break; - } - - last_probe = probe; - }); - - debug_assert!(self.assert_valid_state("two")); - } - - fn reserve_one(&mut self) { - let len = self.slots.len(); - - if len == self.capacity() { - if len == 0 { - let new_raw_cap = 8; - self.mask = 8 - 1; - self.indices = vec![None; new_raw_cap]; - } else { - let raw_cap = self.indices.len(); - self.grow(raw_cap << 1); - } - } - } - - #[inline] - fn grow(&mut self, new_raw_cap: usize) { - // This path can never be reached when handling the first allocation in - // the map. - - debug_assert!(self.assert_valid_state("top")); - - // find first ideally placed element -- start of cluster - let mut first_ideal = 0; - - for (i, pos) in self.indices.iter().enumerate() { - if let Some(pos) = *pos { - if 0 == probe_distance(self.mask, pos.hash, i) { - first_ideal = i; - break; - } - } - } - - // visit the entries in an order where we can simply reinsert them - // into self.indices without any bucket stealing. - let old_indices = mem::replace(&mut self.indices, vec![None; new_raw_cap]); - self.mask = new_raw_cap.wrapping_sub(1); - - for &pos in &old_indices[first_ideal..] { - self.reinsert_entry_in_order(pos); - } - - for &pos in &old_indices[..first_ideal] { - self.reinsert_entry_in_order(pos); - } - - debug_assert!(self.assert_valid_state("bottom")); - } - - fn reinsert_entry_in_order(&mut self, pos: Option) { - if let Some(pos) = pos { - // Find first empty bucket and insert there - let mut probe = desired_pos(self.mask, pos.hash); - - probe_loop!(probe < self.indices.len(), { - if self.indices[probe].is_none() { - // empty bucket, insert here - self.indices[probe] = Some(pos); - return; - } - - debug_assert!({ - let them = self.indices[probe].unwrap(); - let their_distance = probe_distance(self.mask, them.hash, probe); - let our_distance = probe_distance(self.mask, pos.hash, probe); - - their_distance >= our_distance - }); - }); - } - } - - #[cfg(not(test))] - fn assert_valid_state(&self, _: &'static str) -> bool { - true - } - - #[cfg(test)] - fn assert_valid_state(&self, _msg: &'static str) -> bool { - /* - // Checks that the internal map structure is valid - // - // Ensure all hash codes in indices match the associated slot - for pos in &self.indices { - if let Some(pos) = *pos { - let real_idx = pos.index.wrapping_add(self.inserted); - - if real_idx.wrapping_add(1) != 0 { - assert!(real_idx < self.slots.len(), - "out of index; real={}; len={}, msg={}", - real_idx, self.slots.len(), msg); - - assert_eq!(pos.hash, self.slots[real_idx].hash, - "index hash does not match slot; msg={}", msg); - } - } - } - - // Every index is only available once - for i in 0..self.indices.len() { - if self.indices[i].is_none() { - continue; - } - - for j in i+1..self.indices.len() { - assert_ne!(self.indices[i], self.indices[j], - "duplicate indices; msg={}", msg); - } - } - - for (index, slot) in self.slots.iter().enumerate() { - let mut indexed = None; - - // First, see if the slot is indexed - for (i, pos) in self.indices.iter().enumerate() { - if let Some(pos) = *pos { - let real_idx = pos.index.wrapping_add(self.inserted); - if real_idx == index { - indexed = Some(i); - // Already know that there is no dup, so break - break; - } - } - } - - if let Some(actual) = indexed { - // Ensure that it is accessible.. - let desired = desired_pos(self.mask, slot.hash); - let mut probe = desired; - let mut dist = 0; - - probe_loop!(probe < self.indices.len(), { - assert!(self.indices[probe].is_some(), - "unexpected empty slot; probe={}; hash={:?}; msg={}", - probe, slot.hash, msg); - - let pos = self.indices[probe].unwrap(); - - let their_dist = probe_distance(self.mask, pos.hash, probe); - let real_idx = pos.index.wrapping_add(self.inserted); - - if real_idx == index { - break; - } - - assert!(dist <= their_dist, - "could not find entry; actual={}; desired={}" + - "probe={}, dist={}; their_dist={}; index={}; msg={}", - actual, desired, probe, dist, their_dist, - index.wrapping_sub(self.inserted), msg); - - dist += 1; - }); - } else { - // There is exactly one next link - let cnt = self.slots.iter().map(|s| s.next) - .filter(|n| *n == Some(index.wrapping_sub(self.inserted))) - .count(); - - assert_eq!(1, cnt, "more than one node pointing here; msg={}", msg); - } - } - */ - - // TODO: Ensure linked lists are correct: no cycles, etc... - - true - } -} - -#[cfg(test)] -impl Table { - /// Returns the number of headers in the table - pub fn len(&self) -> usize { - self.slots.len() - } - - /// Returns the table size - pub fn size(&self) -> usize { - self.size - } -} - -impl Index { - fn new(v: Option<(usize, bool)>, e: Header) -> Index { - match v { - None => Index::NotIndexed(e), - Some((n, true)) => Index::Indexed(n, e), - Some((n, false)) => Index::Name(n, e), - } - } -} - -#[inline] -fn usable_capacity(cap: usize) -> usize { - cap - cap / 4 -} - -#[inline] -fn to_raw_capacity(n: usize) -> usize { - n + n / 3 -} - -#[inline] -fn desired_pos(mask: usize, hash: HashValue) -> usize { - hash.0 & mask -} - -#[inline] -fn probe_distance(mask: usize, hash: HashValue, current: usize) -> usize { - current.wrapping_sub(desired_pos(mask, hash)) & mask -} - -fn hash_header(header: &Header) -> HashValue { - const MASK: u64 = (MAX_SIZE as u64) - 1; - - let mut h = FnvHasher::default(); - header.name().hash(&mut h); - HashValue((h.finish() & MASK) as usize) -} - -/// Checks the static table for the header. If found, returns the index and a -/// boolean representing if the value matched as well. -fn index_static(header: &Header) -> Option<(usize, bool)> { - match *header { - Header::Field { - ref name, - ref value, - } => match *name { - header::ACCEPT_CHARSET => Some((15, false)), - header::ACCEPT_ENCODING => { - if value == "gzip, deflate" { - Some((16, true)) - } else { - Some((16, false)) - } - } - header::ACCEPT_LANGUAGE => Some((17, false)), - header::ACCEPT_RANGES => Some((18, false)), - header::ACCEPT => Some((19, false)), - header::ACCESS_CONTROL_ALLOW_ORIGIN => Some((20, false)), - header::AGE => Some((21, false)), - header::ALLOW => Some((22, false)), - header::AUTHORIZATION => Some((23, false)), - header::CACHE_CONTROL => Some((24, false)), - header::CONTENT_DISPOSITION => Some((25, false)), - header::CONTENT_ENCODING => Some((26, false)), - header::CONTENT_LANGUAGE => Some((27, false)), - header::CONTENT_LENGTH => Some((28, false)), - header::CONTENT_LOCATION => Some((29, false)), - header::CONTENT_RANGE => Some((30, false)), - header::CONTENT_TYPE => Some((31, false)), - header::COOKIE => Some((32, false)), - header::DATE => Some((33, false)), - header::ETAG => Some((34, false)), - header::EXPECT => Some((35, false)), - header::EXPIRES => Some((36, false)), - header::FROM => Some((37, false)), - header::HOST => Some((38, false)), - header::IF_MATCH => Some((39, false)), - header::IF_MODIFIED_SINCE => Some((40, false)), - header::IF_NONE_MATCH => Some((41, false)), - header::IF_RANGE => Some((42, false)), - header::IF_UNMODIFIED_SINCE => Some((43, false)), - header::LAST_MODIFIED => Some((44, false)), - header::LINK => Some((45, false)), - header::LOCATION => Some((46, false)), - header::MAX_FORWARDS => Some((47, false)), - header::PROXY_AUTHENTICATE => Some((48, false)), - header::PROXY_AUTHORIZATION => Some((49, false)), - header::RANGE => Some((50, false)), - header::REFERER => Some((51, false)), - header::REFRESH => Some((52, false)), - header::RETRY_AFTER => Some((53, false)), - header::SERVER => Some((54, false)), - header::SET_COOKIE => Some((55, false)), - header::STRICT_TRANSPORT_SECURITY => Some((56, false)), - header::TRANSFER_ENCODING => Some((57, false)), - header::USER_AGENT => Some((58, false)), - header::VARY => Some((59, false)), - header::VIA => Some((60, false)), - header::WWW_AUTHENTICATE => Some((61, false)), - _ => None, - }, - Header::Authority(_) => Some((1, false)), - Header::Method(ref v) => match *v { - Method::GET => Some((2, true)), - Method::POST => Some((3, true)), - _ => Some((2, false)), - }, - Header::Scheme(ref v) => match &**v { - "http" => Some((6, true)), - "https" => Some((7, true)), - _ => Some((6, false)), - }, - Header::Path(ref v) => match &**v { - "/" => Some((4, true)), - "/index.html" => Some((5, true)), - _ => Some((4, false)), - }, - Header::Protocol(..) => None, - Header::Status(ref v) => match u16::from(*v) { - 200 => Some((8, true)), - 204 => Some((9, true)), - 206 => Some((10, true)), - 304 => Some((11, true)), - 400 => Some((12, true)), - 404 => Some((13, true)), - 500 => Some((14, true)), - _ => Some((8, false)), - }, - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/hpack/test/fixture.rs s390-tools-2.33.1/rust-vendor/h2/src/hpack/test/fixture.rs --- s390-tools-2.31.0/rust-vendor/h2/src/hpack/test/fixture.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/hpack/test/fixture.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,615 +0,0 @@ -use crate::hpack::{Decoder, Encoder, Header}; - -use bytes::BytesMut; -use hex::FromHex; -use serde_json::Value; - -use std::fs::File; -use std::io::prelude::*; -use std::io::Cursor; -use std::path::Path; -use std::str; - -fn test_fixture(path: &Path) { - let mut file = File::open(path).unwrap(); - let mut data = String::new(); - file.read_to_string(&mut data).unwrap(); - - let story: Value = serde_json::from_str(&data).unwrap(); - test_story(story); -} - -fn test_story(story: Value) { - let story = story.as_object().unwrap(); - - if let Some(cases) = story.get("cases") { - let mut cases: Vec<_> = cases - .as_array() - .unwrap() - .iter() - .map(|case| { - let case = case.as_object().unwrap(); - - let size = case - .get("header_table_size") - .map(|v| v.as_u64().unwrap() as usize); - - let wire = case.get("wire").unwrap().as_str().unwrap(); - let wire: Vec = FromHex::from_hex(wire.as_bytes()).unwrap(); - - let expect: Vec<_> = case - .get("headers") - .unwrap() - .as_array() - .unwrap() - .iter() - .map(|h| { - let h = h.as_object().unwrap(); - let (name, val) = h.iter().next().unwrap(); - (name.clone(), val.as_str().unwrap().to_string()) - }) - .collect(); - - Case { - seqno: case.get("seqno").unwrap().as_u64().unwrap(), - wire, - expect, - header_table_size: size, - } - }) - .collect(); - - cases.sort_by_key(|c| c.seqno); - - let mut decoder = Decoder::default(); - - // First, check decoding against the fixtures - for case in &cases { - let mut expect = case.expect.clone(); - - if let Some(size) = case.header_table_size { - decoder.queue_size_update(size); - } - - let mut buf = BytesMut::with_capacity(case.wire.len()); - buf.extend_from_slice(&case.wire); - decoder - .decode(&mut Cursor::new(&mut buf), |e| { - let (name, value) = expect.remove(0); - assert_eq!(name, key_str(&e)); - assert_eq!(value, value_str(&e)); - }) - .unwrap(); - - assert_eq!(0, expect.len()); - } - - let mut encoder = Encoder::default(); - let mut decoder = Decoder::default(); - - // Now, encode the headers - for case in &cases { - let limit = 64 * 1024; - let mut buf = BytesMut::with_capacity(limit); - - if let Some(size) = case.header_table_size { - encoder.update_max_size(size); - decoder.queue_size_update(size); - } - - let mut input: Vec<_> = case - .expect - .iter() - .map(|(name, value)| { - Header::new(name.clone().into(), value.clone().into()) - .unwrap() - .into() - }) - .collect(); - - encoder.encode(&mut input.clone().into_iter(), &mut buf); - - decoder - .decode(&mut Cursor::new(&mut buf), |e| { - assert_eq!(e, input.remove(0).reify().unwrap()); - }) - .unwrap(); - - assert_eq!(0, input.len()); - } - } -} - -struct Case { - seqno: u64, - wire: Vec, - expect: Vec<(String, String)>, - header_table_size: Option, -} - -fn key_str(e: &Header) -> &str { - match *e { - Header::Field { ref name, .. } => name.as_str(), - Header::Authority(..) => ":authority", - Header::Method(..) => ":method", - Header::Scheme(..) => ":scheme", - Header::Path(..) => ":path", - Header::Protocol(..) => ":protocol", - Header::Status(..) => ":status", - } -} - -fn value_str(e: &Header) -> &str { - match *e { - Header::Field { ref value, .. } => value.to_str().unwrap(), - Header::Authority(ref v) => v, - Header::Method(ref m) => m.as_str(), - Header::Scheme(ref v) => v, - Header::Path(ref v) => v, - Header::Protocol(ref v) => v.as_str(), - Header::Status(ref v) => v.as_str(), - } -} - -macro_rules! fixture_mod { - ($module:ident => { - $( - ($fn:ident, $path:expr); - )+ - }) => { - mod $module { - $( - #[test] - fn $fn() { - let path = ::std::path::Path::new(env!("CARGO_MANIFEST_DIR")) - .join("fixtures/hpack") - .join($path); - - super::test_fixture(path.as_ref()); - } - )+ - } - } -} - -fixture_mod!( - haskell_http2_linear_huffman => { - (story_00, "haskell-http2-linear-huffman/story_00.json"); - (story_01, "haskell-http2-linear-huffman/story_01.json"); - (story_02, "haskell-http2-linear-huffman/story_02.json"); - (story_03, "haskell-http2-linear-huffman/story_03.json"); - (story_04, "haskell-http2-linear-huffman/story_04.json"); - (story_05, "haskell-http2-linear-huffman/story_05.json"); - (story_06, "haskell-http2-linear-huffman/story_06.json"); - (story_07, "haskell-http2-linear-huffman/story_07.json"); - (story_08, "haskell-http2-linear-huffman/story_08.json"); - (story_09, "haskell-http2-linear-huffman/story_09.json"); - (story_10, "haskell-http2-linear-huffman/story_10.json"); - (story_11, "haskell-http2-linear-huffman/story_11.json"); - (story_12, "haskell-http2-linear-huffman/story_12.json"); - (story_13, "haskell-http2-linear-huffman/story_13.json"); - (story_14, "haskell-http2-linear-huffman/story_14.json"); - (story_15, "haskell-http2-linear-huffman/story_15.json"); - (story_16, "haskell-http2-linear-huffman/story_16.json"); - (story_17, "haskell-http2-linear-huffman/story_17.json"); - (story_18, "haskell-http2-linear-huffman/story_18.json"); - (story_19, "haskell-http2-linear-huffman/story_19.json"); - (story_20, "haskell-http2-linear-huffman/story_20.json"); - (story_21, "haskell-http2-linear-huffman/story_21.json"); - (story_22, "haskell-http2-linear-huffman/story_22.json"); - (story_23, "haskell-http2-linear-huffman/story_23.json"); - (story_24, "haskell-http2-linear-huffman/story_24.json"); - (story_25, "haskell-http2-linear-huffman/story_25.json"); - (story_26, "haskell-http2-linear-huffman/story_26.json"); - (story_27, "haskell-http2-linear-huffman/story_27.json"); - (story_28, "haskell-http2-linear-huffman/story_28.json"); - (story_29, "haskell-http2-linear-huffman/story_29.json"); - (story_30, "haskell-http2-linear-huffman/story_30.json"); - (story_31, "haskell-http2-linear-huffman/story_31.json"); - } -); - -fixture_mod!( - python_hpack => { - (story_00, "python-hpack/story_00.json"); - (story_01, "python-hpack/story_01.json"); - (story_02, "python-hpack/story_02.json"); - (story_03, "python-hpack/story_03.json"); - (story_04, "python-hpack/story_04.json"); - (story_05, "python-hpack/story_05.json"); - (story_06, "python-hpack/story_06.json"); - (story_07, "python-hpack/story_07.json"); - (story_08, "python-hpack/story_08.json"); - (story_09, "python-hpack/story_09.json"); - (story_10, "python-hpack/story_10.json"); - (story_11, "python-hpack/story_11.json"); - (story_12, "python-hpack/story_12.json"); - (story_13, "python-hpack/story_13.json"); - (story_14, "python-hpack/story_14.json"); - (story_15, "python-hpack/story_15.json"); - (story_16, "python-hpack/story_16.json"); - (story_17, "python-hpack/story_17.json"); - (story_18, "python-hpack/story_18.json"); - (story_19, "python-hpack/story_19.json"); - (story_20, "python-hpack/story_20.json"); - (story_21, "python-hpack/story_21.json"); - (story_22, "python-hpack/story_22.json"); - (story_23, "python-hpack/story_23.json"); - (story_24, "python-hpack/story_24.json"); - (story_25, "python-hpack/story_25.json"); - (story_26, "python-hpack/story_26.json"); - (story_27, "python-hpack/story_27.json"); - (story_28, "python-hpack/story_28.json"); - (story_29, "python-hpack/story_29.json"); - (story_30, "python-hpack/story_30.json"); - (story_31, "python-hpack/story_31.json"); - } -); - -fixture_mod!( - nghttp2_16384_4096 => { - (story_00, "nghttp2-16384-4096/story_00.json"); - (story_01, "nghttp2-16384-4096/story_01.json"); - (story_02, "nghttp2-16384-4096/story_02.json"); - (story_03, "nghttp2-16384-4096/story_03.json"); - (story_04, "nghttp2-16384-4096/story_04.json"); - (story_05, "nghttp2-16384-4096/story_05.json"); - (story_06, "nghttp2-16384-4096/story_06.json"); - (story_07, "nghttp2-16384-4096/story_07.json"); - (story_08, "nghttp2-16384-4096/story_08.json"); - (story_09, "nghttp2-16384-4096/story_09.json"); - (story_10, "nghttp2-16384-4096/story_10.json"); - (story_11, "nghttp2-16384-4096/story_11.json"); - (story_12, "nghttp2-16384-4096/story_12.json"); - (story_13, "nghttp2-16384-4096/story_13.json"); - (story_14, "nghttp2-16384-4096/story_14.json"); - (story_15, "nghttp2-16384-4096/story_15.json"); - (story_16, "nghttp2-16384-4096/story_16.json"); - (story_17, "nghttp2-16384-4096/story_17.json"); - (story_18, "nghttp2-16384-4096/story_18.json"); - (story_19, "nghttp2-16384-4096/story_19.json"); - (story_20, "nghttp2-16384-4096/story_20.json"); - (story_21, "nghttp2-16384-4096/story_21.json"); - (story_22, "nghttp2-16384-4096/story_22.json"); - (story_23, "nghttp2-16384-4096/story_23.json"); - (story_24, "nghttp2-16384-4096/story_24.json"); - (story_25, "nghttp2-16384-4096/story_25.json"); - (story_26, "nghttp2-16384-4096/story_26.json"); - (story_27, "nghttp2-16384-4096/story_27.json"); - (story_28, "nghttp2-16384-4096/story_28.json"); - (story_29, "nghttp2-16384-4096/story_29.json"); - (story_30, "nghttp2-16384-4096/story_30.json"); - } -); - -fixture_mod!( - node_http2_hpack => { - (story_00, "node-http2-hpack/story_00.json"); - (story_01, "node-http2-hpack/story_01.json"); - (story_02, "node-http2-hpack/story_02.json"); - (story_03, "node-http2-hpack/story_03.json"); - (story_04, "node-http2-hpack/story_04.json"); - (story_05, "node-http2-hpack/story_05.json"); - (story_06, "node-http2-hpack/story_06.json"); - (story_07, "node-http2-hpack/story_07.json"); - (story_08, "node-http2-hpack/story_08.json"); - (story_09, "node-http2-hpack/story_09.json"); - (story_10, "node-http2-hpack/story_10.json"); - (story_11, "node-http2-hpack/story_11.json"); - (story_12, "node-http2-hpack/story_12.json"); - (story_13, "node-http2-hpack/story_13.json"); - (story_14, "node-http2-hpack/story_14.json"); - (story_15, "node-http2-hpack/story_15.json"); - (story_16, "node-http2-hpack/story_16.json"); - (story_17, "node-http2-hpack/story_17.json"); - (story_18, "node-http2-hpack/story_18.json"); - (story_19, "node-http2-hpack/story_19.json"); - (story_20, "node-http2-hpack/story_20.json"); - (story_21, "node-http2-hpack/story_21.json"); - (story_22, "node-http2-hpack/story_22.json"); - (story_23, "node-http2-hpack/story_23.json"); - (story_24, "node-http2-hpack/story_24.json"); - (story_25, "node-http2-hpack/story_25.json"); - (story_26, "node-http2-hpack/story_26.json"); - (story_27, "node-http2-hpack/story_27.json"); - (story_28, "node-http2-hpack/story_28.json"); - (story_29, "node-http2-hpack/story_29.json"); - (story_30, "node-http2-hpack/story_30.json"); - (story_31, "node-http2-hpack/story_31.json"); - } -); - -fixture_mod!( - nghttp2_change_table_size => { - (story_00, "nghttp2-change-table-size/story_00.json"); - (story_01, "nghttp2-change-table-size/story_01.json"); - (story_02, "nghttp2-change-table-size/story_02.json"); - (story_03, "nghttp2-change-table-size/story_03.json"); - (story_04, "nghttp2-change-table-size/story_04.json"); - (story_05, "nghttp2-change-table-size/story_05.json"); - (story_06, "nghttp2-change-table-size/story_06.json"); - (story_07, "nghttp2-change-table-size/story_07.json"); - (story_08, "nghttp2-change-table-size/story_08.json"); - (story_09, "nghttp2-change-table-size/story_09.json"); - (story_10, "nghttp2-change-table-size/story_10.json"); - (story_11, "nghttp2-change-table-size/story_11.json"); - (story_12, "nghttp2-change-table-size/story_12.json"); - (story_13, "nghttp2-change-table-size/story_13.json"); - (story_14, "nghttp2-change-table-size/story_14.json"); - (story_15, "nghttp2-change-table-size/story_15.json"); - (story_16, "nghttp2-change-table-size/story_16.json"); - (story_17, "nghttp2-change-table-size/story_17.json"); - (story_18, "nghttp2-change-table-size/story_18.json"); - (story_19, "nghttp2-change-table-size/story_19.json"); - (story_20, "nghttp2-change-table-size/story_20.json"); - (story_21, "nghttp2-change-table-size/story_21.json"); - (story_22, "nghttp2-change-table-size/story_22.json"); - (story_23, "nghttp2-change-table-size/story_23.json"); - (story_24, "nghttp2-change-table-size/story_24.json"); - (story_25, "nghttp2-change-table-size/story_25.json"); - (story_26, "nghttp2-change-table-size/story_26.json"); - (story_27, "nghttp2-change-table-size/story_27.json"); - (story_28, "nghttp2-change-table-size/story_28.json"); - (story_29, "nghttp2-change-table-size/story_29.json"); - (story_30, "nghttp2-change-table-size/story_30.json"); - } -); - -fixture_mod!( - haskell_http2_static_huffman => { - (story_00, "haskell-http2-static-huffman/story_00.json"); - (story_01, "haskell-http2-static-huffman/story_01.json"); - (story_02, "haskell-http2-static-huffman/story_02.json"); - (story_03, "haskell-http2-static-huffman/story_03.json"); - (story_04, "haskell-http2-static-huffman/story_04.json"); - (story_05, "haskell-http2-static-huffman/story_05.json"); - (story_06, "haskell-http2-static-huffman/story_06.json"); - (story_07, "haskell-http2-static-huffman/story_07.json"); - (story_08, "haskell-http2-static-huffman/story_08.json"); - (story_09, "haskell-http2-static-huffman/story_09.json"); - (story_10, "haskell-http2-static-huffman/story_10.json"); - (story_11, "haskell-http2-static-huffman/story_11.json"); - (story_12, "haskell-http2-static-huffman/story_12.json"); - (story_13, "haskell-http2-static-huffman/story_13.json"); - (story_14, "haskell-http2-static-huffman/story_14.json"); - (story_15, "haskell-http2-static-huffman/story_15.json"); - (story_16, "haskell-http2-static-huffman/story_16.json"); - (story_17, "haskell-http2-static-huffman/story_17.json"); - (story_18, "haskell-http2-static-huffman/story_18.json"); - (story_19, "haskell-http2-static-huffman/story_19.json"); - (story_20, "haskell-http2-static-huffman/story_20.json"); - (story_21, "haskell-http2-static-huffman/story_21.json"); - (story_22, "haskell-http2-static-huffman/story_22.json"); - (story_23, "haskell-http2-static-huffman/story_23.json"); - (story_24, "haskell-http2-static-huffman/story_24.json"); - (story_25, "haskell-http2-static-huffman/story_25.json"); - (story_26, "haskell-http2-static-huffman/story_26.json"); - (story_27, "haskell-http2-static-huffman/story_27.json"); - (story_28, "haskell-http2-static-huffman/story_28.json"); - (story_29, "haskell-http2-static-huffman/story_29.json"); - (story_30, "haskell-http2-static-huffman/story_30.json"); - (story_31, "haskell-http2-static-huffman/story_31.json"); - } -); - -fixture_mod!( - haskell_http2_naive_huffman => { - (story_00, "haskell-http2-naive-huffman/story_00.json"); - (story_01, "haskell-http2-naive-huffman/story_01.json"); - (story_02, "haskell-http2-naive-huffman/story_02.json"); - (story_03, "haskell-http2-naive-huffman/story_03.json"); - (story_04, "haskell-http2-naive-huffman/story_04.json"); - (story_05, "haskell-http2-naive-huffman/story_05.json"); - (story_06, "haskell-http2-naive-huffman/story_06.json"); - (story_07, "haskell-http2-naive-huffman/story_07.json"); - (story_08, "haskell-http2-naive-huffman/story_08.json"); - (story_09, "haskell-http2-naive-huffman/story_09.json"); - (story_10, "haskell-http2-naive-huffman/story_10.json"); - (story_11, "haskell-http2-naive-huffman/story_11.json"); - (story_12, "haskell-http2-naive-huffman/story_12.json"); - (story_13, "haskell-http2-naive-huffman/story_13.json"); - (story_14, "haskell-http2-naive-huffman/story_14.json"); - (story_15, "haskell-http2-naive-huffman/story_15.json"); - (story_16, "haskell-http2-naive-huffman/story_16.json"); - (story_17, "haskell-http2-naive-huffman/story_17.json"); - (story_18, "haskell-http2-naive-huffman/story_18.json"); - (story_19, "haskell-http2-naive-huffman/story_19.json"); - (story_20, "haskell-http2-naive-huffman/story_20.json"); - (story_21, "haskell-http2-naive-huffman/story_21.json"); - (story_22, "haskell-http2-naive-huffman/story_22.json"); - (story_23, "haskell-http2-naive-huffman/story_23.json"); - (story_24, "haskell-http2-naive-huffman/story_24.json"); - (story_25, "haskell-http2-naive-huffman/story_25.json"); - (story_26, "haskell-http2-naive-huffman/story_26.json"); - (story_27, "haskell-http2-naive-huffman/story_27.json"); - (story_28, "haskell-http2-naive-huffman/story_28.json"); - (story_29, "haskell-http2-naive-huffman/story_29.json"); - (story_30, "haskell-http2-naive-huffman/story_30.json"); - (story_31, "haskell-http2-naive-huffman/story_31.json"); - } -); - -fixture_mod!( - haskell_http2_naive => { - (story_00, "haskell-http2-naive/story_00.json"); - (story_01, "haskell-http2-naive/story_01.json"); - (story_02, "haskell-http2-naive/story_02.json"); - (story_03, "haskell-http2-naive/story_03.json"); - (story_04, "haskell-http2-naive/story_04.json"); - (story_05, "haskell-http2-naive/story_05.json"); - (story_06, "haskell-http2-naive/story_06.json"); - (story_07, "haskell-http2-naive/story_07.json"); - (story_08, "haskell-http2-naive/story_08.json"); - (story_09, "haskell-http2-naive/story_09.json"); - (story_10, "haskell-http2-naive/story_10.json"); - (story_11, "haskell-http2-naive/story_11.json"); - (story_12, "haskell-http2-naive/story_12.json"); - (story_13, "haskell-http2-naive/story_13.json"); - (story_14, "haskell-http2-naive/story_14.json"); - (story_15, "haskell-http2-naive/story_15.json"); - (story_16, "haskell-http2-naive/story_16.json"); - (story_17, "haskell-http2-naive/story_17.json"); - (story_18, "haskell-http2-naive/story_18.json"); - (story_19, "haskell-http2-naive/story_19.json"); - (story_20, "haskell-http2-naive/story_20.json"); - (story_21, "haskell-http2-naive/story_21.json"); - (story_22, "haskell-http2-naive/story_22.json"); - (story_23, "haskell-http2-naive/story_23.json"); - (story_24, "haskell-http2-naive/story_24.json"); - (story_25, "haskell-http2-naive/story_25.json"); - (story_26, "haskell-http2-naive/story_26.json"); - (story_27, "haskell-http2-naive/story_27.json"); - (story_28, "haskell-http2-naive/story_28.json"); - (story_29, "haskell-http2-naive/story_29.json"); - (story_30, "haskell-http2-naive/story_30.json"); - (story_31, "haskell-http2-naive/story_31.json"); - } -); - -fixture_mod!( - haskell_http2_static => { - (story_00, "haskell-http2-static/story_00.json"); - (story_01, "haskell-http2-static/story_01.json"); - (story_02, "haskell-http2-static/story_02.json"); - (story_03, "haskell-http2-static/story_03.json"); - (story_04, "haskell-http2-static/story_04.json"); - (story_05, "haskell-http2-static/story_05.json"); - (story_06, "haskell-http2-static/story_06.json"); - (story_07, "haskell-http2-static/story_07.json"); - (story_08, "haskell-http2-static/story_08.json"); - (story_09, "haskell-http2-static/story_09.json"); - (story_10, "haskell-http2-static/story_10.json"); - (story_11, "haskell-http2-static/story_11.json"); - (story_12, "haskell-http2-static/story_12.json"); - (story_13, "haskell-http2-static/story_13.json"); - (story_14, "haskell-http2-static/story_14.json"); - (story_15, "haskell-http2-static/story_15.json"); - (story_16, "haskell-http2-static/story_16.json"); - (story_17, "haskell-http2-static/story_17.json"); - (story_18, "haskell-http2-static/story_18.json"); - (story_19, "haskell-http2-static/story_19.json"); - (story_20, "haskell-http2-static/story_20.json"); - (story_21, "haskell-http2-static/story_21.json"); - (story_22, "haskell-http2-static/story_22.json"); - (story_23, "haskell-http2-static/story_23.json"); - (story_24, "haskell-http2-static/story_24.json"); - (story_25, "haskell-http2-static/story_25.json"); - (story_26, "haskell-http2-static/story_26.json"); - (story_27, "haskell-http2-static/story_27.json"); - (story_28, "haskell-http2-static/story_28.json"); - (story_29, "haskell-http2-static/story_29.json"); - (story_30, "haskell-http2-static/story_30.json"); - (story_31, "haskell-http2-static/story_31.json"); - } -); - -fixture_mod!( - nghttp2 => { - (story_00, "nghttp2/story_00.json"); - (story_01, "nghttp2/story_01.json"); - (story_02, "nghttp2/story_02.json"); - (story_03, "nghttp2/story_03.json"); - (story_04, "nghttp2/story_04.json"); - (story_05, "nghttp2/story_05.json"); - (story_06, "nghttp2/story_06.json"); - (story_07, "nghttp2/story_07.json"); - (story_08, "nghttp2/story_08.json"); - (story_09, "nghttp2/story_09.json"); - (story_10, "nghttp2/story_10.json"); - (story_11, "nghttp2/story_11.json"); - (story_12, "nghttp2/story_12.json"); - (story_13, "nghttp2/story_13.json"); - (story_14, "nghttp2/story_14.json"); - (story_15, "nghttp2/story_15.json"); - (story_16, "nghttp2/story_16.json"); - (story_17, "nghttp2/story_17.json"); - (story_18, "nghttp2/story_18.json"); - (story_19, "nghttp2/story_19.json"); - (story_20, "nghttp2/story_20.json"); - (story_21, "nghttp2/story_21.json"); - (story_22, "nghttp2/story_22.json"); - (story_23, "nghttp2/story_23.json"); - (story_24, "nghttp2/story_24.json"); - (story_25, "nghttp2/story_25.json"); - (story_26, "nghttp2/story_26.json"); - (story_27, "nghttp2/story_27.json"); - (story_28, "nghttp2/story_28.json"); - (story_29, "nghttp2/story_29.json"); - (story_30, "nghttp2/story_30.json"); - (story_31, "nghttp2/story_31.json"); - } -); - -fixture_mod!( - haskell_http2_linear => { - (story_00, "haskell-http2-linear/story_00.json"); - (story_01, "haskell-http2-linear/story_01.json"); - (story_02, "haskell-http2-linear/story_02.json"); - (story_03, "haskell-http2-linear/story_03.json"); - (story_04, "haskell-http2-linear/story_04.json"); - (story_05, "haskell-http2-linear/story_05.json"); - (story_06, "haskell-http2-linear/story_06.json"); - (story_07, "haskell-http2-linear/story_07.json"); - (story_08, "haskell-http2-linear/story_08.json"); - (story_09, "haskell-http2-linear/story_09.json"); - (story_10, "haskell-http2-linear/story_10.json"); - (story_11, "haskell-http2-linear/story_11.json"); - (story_12, "haskell-http2-linear/story_12.json"); - (story_13, "haskell-http2-linear/story_13.json"); - (story_14, "haskell-http2-linear/story_14.json"); - (story_15, "haskell-http2-linear/story_15.json"); - (story_16, "haskell-http2-linear/story_16.json"); - (story_17, "haskell-http2-linear/story_17.json"); - (story_18, "haskell-http2-linear/story_18.json"); - (story_19, "haskell-http2-linear/story_19.json"); - (story_20, "haskell-http2-linear/story_20.json"); - (story_21, "haskell-http2-linear/story_21.json"); - (story_22, "haskell-http2-linear/story_22.json"); - (story_23, "haskell-http2-linear/story_23.json"); - (story_24, "haskell-http2-linear/story_24.json"); - (story_25, "haskell-http2-linear/story_25.json"); - (story_26, "haskell-http2-linear/story_26.json"); - (story_27, "haskell-http2-linear/story_27.json"); - (story_28, "haskell-http2-linear/story_28.json"); - (story_29, "haskell-http2-linear/story_29.json"); - (story_30, "haskell-http2-linear/story_30.json"); - (story_31, "haskell-http2-linear/story_31.json"); - } -); - -fixture_mod!( - go_hpack => { - (story_00, "go-hpack/story_00.json"); - (story_01, "go-hpack/story_01.json"); - (story_02, "go-hpack/story_02.json"); - (story_03, "go-hpack/story_03.json"); - (story_04, "go-hpack/story_04.json"); - (story_05, "go-hpack/story_05.json"); - (story_06, "go-hpack/story_06.json"); - (story_07, "go-hpack/story_07.json"); - (story_08, "go-hpack/story_08.json"); - (story_09, "go-hpack/story_09.json"); - (story_10, "go-hpack/story_10.json"); - (story_11, "go-hpack/story_11.json"); - (story_12, "go-hpack/story_12.json"); - (story_13, "go-hpack/story_13.json"); - (story_14, "go-hpack/story_14.json"); - (story_15, "go-hpack/story_15.json"); - (story_16, "go-hpack/story_16.json"); - (story_17, "go-hpack/story_17.json"); - (story_18, "go-hpack/story_18.json"); - (story_19, "go-hpack/story_19.json"); - (story_20, "go-hpack/story_20.json"); - (story_21, "go-hpack/story_21.json"); - (story_22, "go-hpack/story_22.json"); - (story_23, "go-hpack/story_23.json"); - (story_24, "go-hpack/story_24.json"); - (story_25, "go-hpack/story_25.json"); - (story_26, "go-hpack/story_26.json"); - (story_27, "go-hpack/story_27.json"); - (story_28, "go-hpack/story_28.json"); - (story_29, "go-hpack/story_29.json"); - (story_30, "go-hpack/story_30.json"); - (story_31, "go-hpack/story_31.json"); - } -); diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/hpack/test/fuzz.rs s390-tools-2.33.1/rust-vendor/h2/src/hpack/test/fuzz.rs --- s390-tools-2.31.0/rust-vendor/h2/src/hpack/test/fuzz.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/hpack/test/fuzz.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,365 +0,0 @@ -use crate::hpack::{Decoder, Encoder, Header}; - -use http::header::{HeaderName, HeaderValue}; - -use bytes::BytesMut; -use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; -use rand::distributions::Slice; -use rand::rngs::StdRng; -use rand::{thread_rng, Rng, SeedableRng}; - -use std::io::Cursor; - -const MAX_CHUNK: usize = 2 * 1024; - -#[test] -fn hpack_fuzz() { - let _ = env_logger::try_init(); - fn prop(fuzz: FuzzHpack) -> TestResult { - fuzz.run(); - TestResult::from_bool(true) - } - - QuickCheck::new() - .tests(100) - .quickcheck(prop as fn(FuzzHpack) -> TestResult) -} - -/* -// If wanting to test with a specific feed, uncomment and fill in the seed. -#[test] -fn hpack_fuzz_seeded() { - let _ = env_logger::try_init(); - let seed = [/* fill me in*/]; - FuzzHpack::new(seed).run(); -} -*/ - -#[derive(Debug, Clone)] -struct FuzzHpack { - // The set of headers to encode / decode - frames: Vec, -} - -#[derive(Debug, Clone)] -struct HeaderFrame { - resizes: Vec, - headers: Vec>>, -} - -impl FuzzHpack { - fn new(seed: [u8; 32]) -> FuzzHpack { - // Seed the RNG - let mut rng = StdRng::from_seed(seed); - - // Generates a bunch of source headers - let mut source: Vec>> = vec![]; - - for _ in 0..2000 { - source.push(gen_header(&mut rng)); - } - - // Actual test run headers - let num: usize = rng.gen_range(40..500); - - let mut frames: Vec = vec![]; - let mut added = 0; - - let skew: i32 = rng.gen_range(1..5); - - // Rough number of headers to add - while added < num { - let mut frame = HeaderFrame { - resizes: vec![], - headers: vec![], - }; - - match rng.gen_range(0..20) { - 0 => { - // Two resizes - let high = rng.gen_range(128..MAX_CHUNK * 2); - let low = rng.gen_range(0..high); - - frame.resizes.extend([low, high]); - } - 1..=3 => { - frame.resizes.push(rng.gen_range(128..MAX_CHUNK * 2)); - } - _ => {} - } - - let mut is_name_required = true; - - for _ in 0..rng.gen_range(1..(num - added) + 1) { - let x: f64 = rng.gen_range(0.0..1.0); - let x = x.powi(skew); - - let i = (x * source.len() as f64) as usize; - - let header = &source[i]; - match header { - Header::Field { name: None, .. } => { - if is_name_required { - continue; - } - } - Header::Field { .. } => { - is_name_required = false; - } - _ => { - // pseudos can't be followed by a header with no name - is_name_required = true; - } - } - - frame.headers.push(header.clone()); - - added += 1; - } - - frames.push(frame); - } - - FuzzHpack { frames } - } - - fn run(self) { - let frames = self.frames; - let mut expect = vec![]; - - let mut encoder = Encoder::default(); - let mut decoder = Decoder::default(); - - for frame in frames { - // build "expected" frames, such that decoding headers always - // includes a name - let mut prev_name = None; - for header in &frame.headers { - match header.clone().reify() { - Ok(h) => { - prev_name = match h { - Header::Field { ref name, .. } => Some(name.clone()), - _ => None, - }; - expect.push(h); - } - Err(value) => { - expect.push(Header::Field { - name: prev_name.as_ref().cloned().expect("previous header name"), - value, - }); - } - } - } - - let mut buf = BytesMut::new(); - - if let Some(max) = frame.resizes.iter().max() { - decoder.queue_size_update(*max); - } - - // Apply resizes - for resize in &frame.resizes { - encoder.update_max_size(*resize); - } - - encoder.encode(frame.headers, &mut buf); - - // Decode the chunk! - decoder - .decode(&mut Cursor::new(&mut buf), |h| { - let e = expect.remove(0); - assert_eq!(h, e); - }) - .expect("full decode"); - } - - assert_eq!(0, expect.len()); - } -} - -impl Arbitrary for FuzzHpack { - fn arbitrary(_: &mut Gen) -> Self { - FuzzHpack::new(thread_rng().gen()) - } -} - -fn gen_header(g: &mut StdRng) -> Header> { - use http::{Method, StatusCode}; - - if g.gen_ratio(1, 10) { - match g.gen_range(0u32..5) { - 0 => { - let value = gen_string(g, 4, 20); - Header::Authority(to_shared(value)) - } - 1 => { - let method = match g.gen_range(0u32..6) { - 0 => Method::GET, - 1 => Method::POST, - 2 => Method::PUT, - 3 => Method::PATCH, - 4 => Method::DELETE, - 5 => { - let n: usize = g.gen_range(3..7); - let bytes: Vec = (0..n) - .map(|_| *g.sample(Slice::new(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ").unwrap())) - .collect(); - - Method::from_bytes(&bytes).unwrap() - } - _ => unreachable!(), - }; - - Header::Method(method) - } - 2 => { - let value = match g.gen_range(0u32..2) { - 0 => "http", - 1 => "https", - _ => unreachable!(), - }; - - Header::Scheme(to_shared(value.to_string())) - } - 3 => { - let value = match g.gen_range(0u32..100) { - 0 => "/".to_string(), - 1 => "/index.html".to_string(), - _ => gen_string(g, 2, 20), - }; - - Header::Path(to_shared(value)) - } - 4 => { - let status = (g.gen::() % 500) + 100; - - Header::Status(StatusCode::from_u16(status).unwrap()) - } - _ => unreachable!(), - } - } else { - let name = if g.gen_ratio(1, 10) { - None - } else { - Some(gen_header_name(g)) - }; - let mut value = gen_header_value(g); - - if g.gen_ratio(1, 30) { - value.set_sensitive(true); - } - - Header::Field { name, value } - } -} - -fn gen_header_name(g: &mut StdRng) -> HeaderName { - use http::header; - - if g.gen_ratio(1, 2) { - g.sample( - Slice::new(&[ - header::ACCEPT, - header::ACCEPT_CHARSET, - header::ACCEPT_ENCODING, - header::ACCEPT_LANGUAGE, - header::ACCEPT_RANGES, - header::ACCESS_CONTROL_ALLOW_CREDENTIALS, - header::ACCESS_CONTROL_ALLOW_HEADERS, - header::ACCESS_CONTROL_ALLOW_METHODS, - header::ACCESS_CONTROL_ALLOW_ORIGIN, - header::ACCESS_CONTROL_EXPOSE_HEADERS, - header::ACCESS_CONTROL_MAX_AGE, - header::ACCESS_CONTROL_REQUEST_HEADERS, - header::ACCESS_CONTROL_REQUEST_METHOD, - header::AGE, - header::ALLOW, - header::ALT_SVC, - header::AUTHORIZATION, - header::CACHE_CONTROL, - header::CONNECTION, - header::CONTENT_DISPOSITION, - header::CONTENT_ENCODING, - header::CONTENT_LANGUAGE, - header::CONTENT_LENGTH, - header::CONTENT_LOCATION, - header::CONTENT_RANGE, - header::CONTENT_SECURITY_POLICY, - header::CONTENT_SECURITY_POLICY_REPORT_ONLY, - header::CONTENT_TYPE, - header::COOKIE, - header::DNT, - header::DATE, - header::ETAG, - header::EXPECT, - header::EXPIRES, - header::FORWARDED, - header::FROM, - header::HOST, - header::IF_MATCH, - header::IF_MODIFIED_SINCE, - header::IF_NONE_MATCH, - header::IF_RANGE, - header::IF_UNMODIFIED_SINCE, - header::LAST_MODIFIED, - header::LINK, - header::LOCATION, - header::MAX_FORWARDS, - header::ORIGIN, - header::PRAGMA, - header::PROXY_AUTHENTICATE, - header::PROXY_AUTHORIZATION, - header::PUBLIC_KEY_PINS, - header::PUBLIC_KEY_PINS_REPORT_ONLY, - header::RANGE, - header::REFERER, - header::REFERRER_POLICY, - header::REFRESH, - header::RETRY_AFTER, - header::SERVER, - header::SET_COOKIE, - header::STRICT_TRANSPORT_SECURITY, - header::TE, - header::TRAILER, - header::TRANSFER_ENCODING, - header::USER_AGENT, - header::UPGRADE, - header::UPGRADE_INSECURE_REQUESTS, - header::VARY, - header::VIA, - header::WARNING, - header::WWW_AUTHENTICATE, - header::X_CONTENT_TYPE_OPTIONS, - header::X_DNS_PREFETCH_CONTROL, - header::X_FRAME_OPTIONS, - header::X_XSS_PROTECTION, - ]) - .unwrap(), - ) - .clone() - } else { - let value = gen_string(g, 1, 25); - HeaderName::from_bytes(value.as_bytes()).unwrap() - } -} - -fn gen_header_value(g: &mut StdRng) -> HeaderValue { - let value = gen_string(g, 0, 70); - HeaderValue::from_bytes(value.as_bytes()).unwrap() -} - -fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String { - let bytes: Vec<_> = (min..max) - .map(|_| { - // Chars to pick from - *g.sample(Slice::new(b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----").unwrap()) - }) - .collect(); - - String::from_utf8(bytes).unwrap() -} - -fn to_shared(src: String) -> crate::hpack::BytesStr { - crate::hpack::BytesStr::from(src.as_str()) -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/hpack/test/mod.rs s390-tools-2.33.1/rust-vendor/h2/src/hpack/test/mod.rs --- s390-tools-2.31.0/rust-vendor/h2/src/hpack/test/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/hpack/test/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2 +0,0 @@ -mod fixture; -mod fuzz; diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/lib.rs s390-tools-2.33.1/rust-vendor/h2/src/lib.rs --- s390-tools-2.31.0/rust-vendor/h2/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,141 +0,0 @@ -//! An asynchronous, HTTP/2 server and client implementation. -//! -//! This library implements the [HTTP/2] specification. The implementation is -//! asynchronous, using [futures] as the basis for the API. The implementation -//! is also decoupled from TCP or TLS details. The user must handle ALPN and -//! HTTP/1.1 upgrades themselves. -//! -//! # Getting started -//! -//! Add the following to your `Cargo.toml` file: -//! -//! ```toml -//! [dependencies] -//! h2 = "0.3" -//! ``` -//! -//! # Layout -//! -//! The crate is split into [`client`] and [`server`] modules. Types that are -//! common to both clients and servers are located at the root of the crate. -//! -//! See module level documentation for more details on how to use `h2`. -//! -//! # Handshake -//! -//! Both the client and the server require a connection to already be in a state -//! ready to start the HTTP/2 handshake. This library does not provide -//! facilities to do this. -//! -//! There are three ways to reach an appropriate state to start the HTTP/2 -//! handshake. -//! -//! * Opening an HTTP/1.1 connection and performing an [upgrade]. -//! * Opening a connection with TLS and use ALPN to negotiate the protocol. -//! * Open a connection with prior knowledge, i.e. both the client and the -//! server assume that the connection is immediately ready to start the -//! HTTP/2 handshake once opened. -//! -//! Once the connection is ready to start the HTTP/2 handshake, it can be -//! passed to [`server::handshake`] or [`client::handshake`]. At this point, the -//! library will start the handshake process, which consists of: -//! -//! * The client sends the connection preface (a predefined sequence of 24 -//! octets). -//! * Both the client and the server sending a SETTINGS frame. -//! -//! See the [Starting HTTP/2] in the specification for more details. -//! -//! # Flow control -//! -//! [Flow control] is a fundamental feature of HTTP/2. The `h2` library -//! exposes flow control to the user. -//! -//! An HTTP/2 client or server may not send unlimited data to the peer. When a -//! stream is initiated, both the client and the server are provided with an -//! initial window size for that stream. A window size is the number of bytes -//! the endpoint can send to the peer. At any point in time, the peer may -//! increase this window size by sending a `WINDOW_UPDATE` frame. Once a client -//! or server has sent data filling the window for a stream, no further data may -//! be sent on that stream until the peer increases the window. -//! -//! There is also a **connection level** window governing data sent across all -//! streams. -//! -//! Managing flow control for inbound data is done through [`FlowControl`]. -//! Managing flow control for outbound data is done through [`SendStream`]. See -//! the struct level documentation for those two types for more details. -//! -//! [HTTP/2]: https://http2.github.io/ -//! [futures]: https://docs.rs/futures/ -//! [`client`]: client/index.html -//! [`server`]: server/index.html -//! [Flow control]: http://httpwg.org/specs/rfc7540.html#FlowControl -//! [`FlowControl`]: struct.FlowControl.html -//! [`SendStream`]: struct.SendStream.html -//! [Starting HTTP/2]: http://httpwg.org/specs/rfc7540.html#starting -//! [upgrade]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Protocol_upgrade_mechanism -//! [`server::handshake`]: server/fn.handshake.html -//! [`client::handshake`]: client/fn.handshake.html - -#![doc(html_root_url = "https://docs.rs/h2/0.3.22")] -#![deny( - missing_debug_implementations, - missing_docs, - clippy::missing_safety_doc, - clippy::undocumented_unsafe_blocks -)] -#![allow(clippy::type_complexity, clippy::manual_range_contains)] -#![cfg_attr(test, deny(warnings))] - -macro_rules! proto_err { - (conn: $($msg:tt)+) => { - tracing::debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) - }; - (stream: $($msg:tt)+) => { - tracing::debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) - }; -} - -macro_rules! ready { - ($e:expr) => { - match $e { - ::std::task::Poll::Ready(r) => r, - ::std::task::Poll::Pending => return ::std::task::Poll::Pending, - } - }; -} - -#[cfg_attr(feature = "unstable", allow(missing_docs))] -mod codec; -mod error; -mod hpack; - -#[cfg(not(feature = "unstable"))] -mod proto; - -#[cfg(feature = "unstable")] -#[allow(missing_docs)] -pub mod proto; - -#[cfg(not(feature = "unstable"))] -mod frame; - -#[cfg(feature = "unstable")] -#[allow(missing_docs)] -pub mod frame; - -pub mod client; -pub mod ext; -pub mod server; -mod share; - -#[cfg(fuzzing)] -#[cfg_attr(feature = "unstable", allow(missing_docs))] -pub mod fuzz_bridge; - -pub use crate::error::{Error, Reason}; -pub use crate::share::{FlowControl, Ping, PingPong, Pong, RecvStream, SendStream, StreamId}; - -#[cfg(feature = "unstable")] -pub use codec::{Codec, SendError, UserError}; diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/connection.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/connection.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/connection.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/connection.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,598 +0,0 @@ -use crate::codec::UserError; -use crate::frame::{Reason, StreamId}; -use crate::{client, frame, server}; - -use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE; -use crate::proto::*; - -use bytes::{Buf, Bytes}; -use futures_core::Stream; -use std::io; -use std::marker::PhantomData; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; -use tokio::io::{AsyncRead, AsyncWrite}; - -/// An H2 connection -#[derive(Debug)] -pub(crate) struct Connection -where - P: Peer, -{ - /// Read / write frame values - codec: Codec>, - - inner: ConnectionInner, -} - -// Extracted part of `Connection` which does not depend on `T`. Reduces the amount of duplicated -// method instantiations. -#[derive(Debug)] -struct ConnectionInner -where - P: Peer, -{ - /// Tracks the connection level state transitions. - state: State, - - /// An error to report back once complete. - /// - /// This exists separately from State in order to support - /// graceful shutdown. - error: Option, - - /// Pending GOAWAY frames to write. - go_away: GoAway, - - /// Ping/pong handler - ping_pong: PingPong, - - /// Connection settings - settings: Settings, - - /// Stream state handler - streams: Streams, - - /// A `tracing` span tracking the lifetime of the connection. - span: tracing::Span, - - /// Client or server - _phantom: PhantomData

, -} - -struct DynConnection<'a, B: Buf = Bytes> { - state: &'a mut State, - - go_away: &'a mut GoAway, - - streams: DynStreams<'a, B>, - - error: &'a mut Option, - - ping_pong: &'a mut PingPong, -} - -#[derive(Debug, Clone)] -pub(crate) struct Config { - pub next_stream_id: StreamId, - pub initial_max_send_streams: usize, - pub max_send_buffer_size: usize, - pub reset_stream_duration: Duration, - pub reset_stream_max: usize, - pub remote_reset_stream_max: usize, - pub settings: frame::Settings, -} - -#[derive(Debug)] -enum State { - /// Currently open in a sane state - Open, - - /// The codec must be flushed - Closing(Reason, Initiator), - - /// In a closed state - Closed(Reason, Initiator), -} - -impl Connection -where - T: AsyncRead + AsyncWrite + Unpin, - P: Peer, - B: Buf, -{ - pub fn new(codec: Codec>, config: Config) -> Connection { - fn streams_config(config: &Config) -> streams::Config { - streams::Config { - local_init_window_sz: config - .settings - .initial_window_size() - .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), - initial_max_send_streams: config.initial_max_send_streams, - local_max_buffer_size: config.max_send_buffer_size, - local_next_stream_id: config.next_stream_id, - local_push_enabled: config.settings.is_push_enabled().unwrap_or(true), - extended_connect_protocol_enabled: config - .settings - .is_extended_connect_protocol_enabled() - .unwrap_or(false), - local_reset_duration: config.reset_stream_duration, - local_reset_max: config.reset_stream_max, - remote_reset_max: config.remote_reset_stream_max, - remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, - remote_max_initiated: config - .settings - .max_concurrent_streams() - .map(|max| max as usize), - } - } - let streams = Streams::new(streams_config(&config)); - Connection { - codec, - inner: ConnectionInner { - state: State::Open, - error: None, - go_away: GoAway::new(), - ping_pong: PingPong::new(), - settings: Settings::new(config.settings), - streams, - span: tracing::debug_span!("Connection", peer = %P::NAME), - _phantom: PhantomData, - }, - } - } - - /// connection flow control - pub(crate) fn set_target_window_size(&mut self, size: WindowSize) { - let _res = self.inner.streams.set_target_connection_window_size(size); - // TODO: proper error handling - debug_assert!(_res.is_ok()); - } - - /// Send a new SETTINGS frame with an updated initial window size. - pub(crate) fn set_initial_window_size(&mut self, size: WindowSize) -> Result<(), UserError> { - let mut settings = frame::Settings::default(); - settings.set_initial_window_size(Some(size)); - self.inner.settings.send_settings(settings) - } - - /// Send a new SETTINGS frame with extended CONNECT protocol enabled. - pub(crate) fn set_enable_connect_protocol(&mut self) -> Result<(), UserError> { - let mut settings = frame::Settings::default(); - settings.set_enable_connect_protocol(Some(1)); - self.inner.settings.send_settings(settings) - } - - /// Returns the maximum number of concurrent streams that may be initiated - /// by this peer. - pub(crate) fn max_send_streams(&self) -> usize { - self.inner.streams.max_send_streams() - } - - /// Returns the maximum number of concurrent streams that may be initiated - /// by the remote peer. - pub(crate) fn max_recv_streams(&self) -> usize { - self.inner.streams.max_recv_streams() - } - - #[cfg(feature = "unstable")] - pub fn num_wired_streams(&self) -> usize { - self.inner.streams.num_wired_streams() - } - - /// Returns `Ready` when the connection is ready to receive a frame. - /// - /// Returns `Error` as this may raise errors that are caused by delayed - /// processing of received frames. - fn poll_ready(&mut self, cx: &mut Context) -> Poll> { - let _e = self.inner.span.enter(); - let span = tracing::trace_span!("poll_ready"); - let _e = span.enter(); - // The order of these calls don't really matter too much - ready!(self.inner.ping_pong.send_pending_pong(cx, &mut self.codec))?; - ready!(self.inner.ping_pong.send_pending_ping(cx, &mut self.codec))?; - ready!(self - .inner - .settings - .poll_send(cx, &mut self.codec, &mut self.inner.streams))?; - ready!(self.inner.streams.send_pending_refusal(cx, &mut self.codec))?; - - Poll::Ready(Ok(())) - } - - /// Send any pending GOAWAY frames. - /// - /// This will return `Some(reason)` if the connection should be closed - /// afterwards. If this is a graceful shutdown, this returns `None`. - fn poll_go_away(&mut self, cx: &mut Context) -> Poll>> { - self.inner.go_away.send_pending_go_away(cx, &mut self.codec) - } - - pub fn go_away_from_user(&mut self, e: Reason) { - self.inner.as_dyn().go_away_from_user(e) - } - - fn take_error(&mut self, ours: Reason, initiator: Initiator) -> Result<(), Error> { - let (debug_data, theirs) = self - .inner - .error - .take() - .as_ref() - .map_or((Bytes::new(), Reason::NO_ERROR), |frame| { - (frame.debug_data().clone(), frame.reason()) - }); - - match (ours, theirs) { - (Reason::NO_ERROR, Reason::NO_ERROR) => Ok(()), - (ours, Reason::NO_ERROR) => Err(Error::GoAway(Bytes::new(), ours, initiator)), - // If both sides reported an error, give their - // error back to th user. We assume our error - // was a consequence of their error, and less - // important. - (_, theirs) => Err(Error::remote_go_away(debug_data, theirs)), - } - } - - /// Closes the connection by transitioning to a GOAWAY state - /// iff there are no streams or references - pub fn maybe_close_connection_if_no_streams(&mut self) { - // If we poll() and realize that there are no streams or references - // then we can close the connection by transitioning to GOAWAY - if !self.inner.streams.has_streams_or_other_references() { - self.inner.as_dyn().go_away_now(Reason::NO_ERROR); - } - } - - pub(crate) fn take_user_pings(&mut self) -> Option { - self.inner.ping_pong.take_user_pings() - } - - /// Advances the internal state of the connection. - pub fn poll(&mut self, cx: &mut Context) -> Poll> { - // XXX(eliza): cloning the span is unfortunately necessary here in - // order to placate the borrow checker — `self` is mutably borrowed by - // `poll2`, which means that we can't borrow `self.span` to enter it. - // The clone is just an atomic ref bump. - let span = self.inner.span.clone(); - let _e = span.enter(); - let span = tracing::trace_span!("poll"); - let _e = span.enter(); - - loop { - tracing::trace!(connection.state = ?self.inner.state); - // TODO: probably clean up this glob of code - match self.inner.state { - // When open, continue to poll a frame - State::Open => { - let result = match self.poll2(cx) { - Poll::Ready(result) => result, - // The connection is not ready to make progress - Poll::Pending => { - // Ensure all window updates have been sent. - // - // This will also handle flushing `self.codec` - ready!(self.inner.streams.poll_complete(cx, &mut self.codec))?; - - if (self.inner.error.is_some() - || self.inner.go_away.should_close_on_idle()) - && !self.inner.streams.has_streams() - { - self.inner.as_dyn().go_away_now(Reason::NO_ERROR); - continue; - } - - return Poll::Pending; - } - }; - - self.inner.as_dyn().handle_poll2_result(result)? - } - State::Closing(reason, initiator) => { - tracing::trace!("connection closing after flush"); - // Flush/shutdown the codec - ready!(self.codec.shutdown(cx))?; - - // Transition the state to error - self.inner.state = State::Closed(reason, initiator); - } - State::Closed(reason, initiator) => { - return Poll::Ready(self.take_error(reason, initiator)); - } - } - } - } - - fn poll2(&mut self, cx: &mut Context) -> Poll> { - // This happens outside of the loop to prevent needing to do a clock - // check and then comparison of the queue possibly multiple times a - // second (and thus, the clock wouldn't have changed enough to matter). - self.clear_expired_reset_streams(); - - loop { - // First, ensure that the `Connection` is able to receive a frame - // - // The order here matters: - // - poll_go_away may buffer a graceful shutdown GOAWAY frame - // - If it has, we've also added a PING to be sent in poll_ready - if let Some(reason) = ready!(self.poll_go_away(cx)?) { - if self.inner.go_away.should_close_now() { - if self.inner.go_away.is_user_initiated() { - // A user initiated abrupt shutdown shouldn't return - // the same error back to the user. - return Poll::Ready(Ok(())); - } else { - return Poll::Ready(Err(Error::library_go_away(reason))); - } - } - // Only NO_ERROR should be waiting for idle - debug_assert_eq!( - reason, - Reason::NO_ERROR, - "graceful GOAWAY should be NO_ERROR" - ); - } - ready!(self.poll_ready(cx))?; - - match self - .inner - .as_dyn() - .recv_frame(ready!(Pin::new(&mut self.codec).poll_next(cx)?))? - { - ReceivedFrame::Settings(frame) => { - self.inner.settings.recv_settings( - frame, - &mut self.codec, - &mut self.inner.streams, - )?; - } - ReceivedFrame::Continue => (), - ReceivedFrame::Done => { - return Poll::Ready(Ok(())); - } - } - } - } - - fn clear_expired_reset_streams(&mut self) { - self.inner.streams.clear_expired_reset_streams(); - } -} - -impl ConnectionInner -where - P: Peer, - B: Buf, -{ - fn as_dyn(&mut self) -> DynConnection<'_, B> { - let ConnectionInner { - state, - go_away, - streams, - error, - ping_pong, - .. - } = self; - let streams = streams.as_dyn(); - DynConnection { - state, - go_away, - streams, - error, - ping_pong, - } - } -} - -impl DynConnection<'_, B> -where - B: Buf, -{ - fn go_away(&mut self, id: StreamId, e: Reason) { - let frame = frame::GoAway::new(id, e); - self.streams.send_go_away(id); - self.go_away.go_away(frame); - } - - fn go_away_now(&mut self, e: Reason) { - let last_processed_id = self.streams.last_processed_id(); - let frame = frame::GoAway::new(last_processed_id, e); - self.go_away.go_away_now(frame); - } - - fn go_away_now_data(&mut self, e: Reason, data: Bytes) { - let last_processed_id = self.streams.last_processed_id(); - let frame = frame::GoAway::with_debug_data(last_processed_id, e, data); - self.go_away.go_away_now(frame); - } - - fn go_away_from_user(&mut self, e: Reason) { - let last_processed_id = self.streams.last_processed_id(); - let frame = frame::GoAway::new(last_processed_id, e); - self.go_away.go_away_from_user(frame); - - // Notify all streams of reason we're abruptly closing. - self.streams.handle_error(Error::user_go_away(e)); - } - - fn handle_poll2_result(&mut self, result: Result<(), Error>) -> Result<(), Error> { - match result { - // The connection has shutdown normally - Ok(()) => { - *self.state = State::Closing(Reason::NO_ERROR, Initiator::Library); - Ok(()) - } - // Attempting to read a frame resulted in a connection level - // error. This is handled by setting a GOAWAY frame followed by - // terminating the connection. - Err(Error::GoAway(debug_data, reason, initiator)) => { - let e = Error::GoAway(debug_data.clone(), reason, initiator); - tracing::debug!(error = ?e, "Connection::poll; connection error"); - - // We may have already sent a GOAWAY for this error, - // if so, don't send another, just flush and close up. - if self - .go_away - .going_away() - .map_or(false, |frame| frame.reason() == reason) - { - tracing::trace!(" -> already going away"); - *self.state = State::Closing(reason, initiator); - return Ok(()); - } - - // Reset all active streams - self.streams.handle_error(e); - self.go_away_now_data(reason, debug_data); - Ok(()) - } - // Attempting to read a frame resulted in a stream level error. - // This is handled by resetting the frame then trying to read - // another frame. - Err(Error::Reset(id, reason, initiator)) => { - debug_assert_eq!(initiator, Initiator::Library); - tracing::trace!(?id, ?reason, "stream error"); - self.streams.send_reset(id, reason); - Ok(()) - } - // Attempting to read a frame resulted in an I/O error. All - // active streams must be reset. - // - // TODO: Are I/O errors recoverable? - Err(Error::Io(e, inner)) => { - tracing::debug!(error = ?e, "Connection::poll; IO error"); - let e = Error::Io(e, inner); - - // Reset all active streams - self.streams.handle_error(e.clone()); - - // Return the error - Err(e) - } - } - } - - fn recv_frame(&mut self, frame: Option) -> Result { - use crate::frame::Frame::*; - match frame { - Some(Headers(frame)) => { - tracing::trace!(?frame, "recv HEADERS"); - self.streams.recv_headers(frame)?; - } - Some(Data(frame)) => { - tracing::trace!(?frame, "recv DATA"); - self.streams.recv_data(frame)?; - } - Some(Reset(frame)) => { - tracing::trace!(?frame, "recv RST_STREAM"); - self.streams.recv_reset(frame)?; - } - Some(PushPromise(frame)) => { - tracing::trace!(?frame, "recv PUSH_PROMISE"); - self.streams.recv_push_promise(frame)?; - } - Some(Settings(frame)) => { - tracing::trace!(?frame, "recv SETTINGS"); - return Ok(ReceivedFrame::Settings(frame)); - } - Some(GoAway(frame)) => { - tracing::trace!(?frame, "recv GOAWAY"); - // This should prevent starting new streams, - // but should allow continuing to process current streams - // until they are all EOS. Once they are, State should - // transition to GoAway. - self.streams.recv_go_away(&frame)?; - *self.error = Some(frame); - } - Some(Ping(frame)) => { - tracing::trace!(?frame, "recv PING"); - let status = self.ping_pong.recv_ping(frame); - if status.is_shutdown() { - assert!( - self.go_away.is_going_away(), - "received unexpected shutdown ping" - ); - - let last_processed_id = self.streams.last_processed_id(); - self.go_away(last_processed_id, Reason::NO_ERROR); - } - } - Some(WindowUpdate(frame)) => { - tracing::trace!(?frame, "recv WINDOW_UPDATE"); - self.streams.recv_window_update(frame)?; - } - Some(Priority(frame)) => { - tracing::trace!(?frame, "recv PRIORITY"); - // TODO: handle - } - None => { - tracing::trace!("codec closed"); - self.streams.recv_eof(false).expect("mutex poisoned"); - return Ok(ReceivedFrame::Done); - } - } - Ok(ReceivedFrame::Continue) - } -} - -enum ReceivedFrame { - Settings(frame::Settings), - Continue, - Done, -} - -impl Connection -where - T: AsyncRead + AsyncWrite, - B: Buf, -{ - pub(crate) fn streams(&self) -> &Streams { - &self.inner.streams - } -} - -impl Connection -where - T: AsyncRead + AsyncWrite + Unpin, - B: Buf, -{ - pub fn next_incoming(&mut self) -> Option> { - self.inner.streams.next_incoming() - } - - // Graceful shutdown only makes sense for server peers. - pub fn go_away_gracefully(&mut self) { - if self.inner.go_away.is_going_away() { - // No reason to start a new one. - return; - } - - // According to http://httpwg.org/specs/rfc7540.html#GOAWAY: - // - // > A server that is attempting to gracefully shut down a connection - // > SHOULD send an initial GOAWAY frame with the last stream - // > identifier set to 2^31-1 and a NO_ERROR code. This signals to the - // > client that a shutdown is imminent and that initiating further - // > requests is prohibited. After allowing time for any in-flight - // > stream creation (at least one round-trip time), the server can - // > send another GOAWAY frame with an updated last stream identifier. - // > This ensures that a connection can be cleanly shut down without - // > losing requests. - self.inner.as_dyn().go_away(StreamId::MAX, Reason::NO_ERROR); - - // We take the advice of waiting 1 RTT literally, and wait - // for a pong before proceeding. - self.inner.ping_pong.ping_shutdown(); - } -} - -impl Drop for Connection -where - P: Peer, - B: Buf, -{ - fn drop(&mut self) { - // Ignore errors as this indicates that the mutex is poisoned. - let _ = self.inner.streams.recv_eof(true); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/error.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/error.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/error.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,91 +0,0 @@ -use crate::codec::SendError; -use crate::frame::{Reason, StreamId}; - -use bytes::Bytes; -use std::fmt; -use std::io; - -/// Either an H2 reason or an I/O error -#[derive(Clone, Debug)] -pub enum Error { - Reset(StreamId, Reason, Initiator), - GoAway(Bytes, Reason, Initiator), - Io(io::ErrorKind, Option), -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum Initiator { - User, - Library, - Remote, -} - -impl Error { - pub(crate) fn is_local(&self) -> bool { - match *self { - Self::Reset(_, _, initiator) | Self::GoAway(_, _, initiator) => initiator.is_local(), - Self::Io(..) => true, - } - } - - pub(crate) fn user_go_away(reason: Reason) -> Self { - Self::GoAway(Bytes::new(), reason, Initiator::User) - } - - pub(crate) fn library_reset(stream_id: StreamId, reason: Reason) -> Self { - Self::Reset(stream_id, reason, Initiator::Library) - } - - pub(crate) fn library_go_away(reason: Reason) -> Self { - Self::GoAway(Bytes::new(), reason, Initiator::Library) - } - - pub(crate) fn library_go_away_data(reason: Reason, debug_data: impl Into) -> Self { - Self::GoAway(debug_data.into(), reason, Initiator::Library) - } - - pub(crate) fn remote_reset(stream_id: StreamId, reason: Reason) -> Self { - Self::Reset(stream_id, reason, Initiator::Remote) - } - - pub(crate) fn remote_go_away(debug_data: Bytes, reason: Reason) -> Self { - Self::GoAway(debug_data, reason, Initiator::Remote) - } -} - -impl Initiator { - fn is_local(&self) -> bool { - match *self { - Self::User | Self::Library => true, - Self::Remote => false, - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match *self { - Self::Reset(_, reason, _) | Self::GoAway(_, reason, _) => reason.fmt(fmt), - Self::Io(_, Some(ref inner)) => inner.fmt(fmt), - Self::Io(kind, None) => io::Error::from(kind).fmt(fmt), - } - } -} - -impl From for Error { - fn from(src: io::ErrorKind) -> Self { - Error::Io(src, None) - } -} - -impl From for Error { - fn from(src: io::Error) -> Self { - Error::Io(src.kind(), src.get_ref().map(|inner| inner.to_string())) - } -} - -impl From for SendError { - fn from(src: Error) -> Self { - Self::Connection(src) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/go_away.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/go_away.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/go_away.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/go_away.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,154 +0,0 @@ -use crate::codec::Codec; -use crate::frame::{self, Reason, StreamId}; - -use bytes::Buf; -use std::io; -use std::task::{Context, Poll}; -use tokio::io::AsyncWrite; - -/// Manages our sending of GOAWAY frames. -#[derive(Debug)] -pub(super) struct GoAway { - /// Whether the connection should close now, or wait until idle. - close_now: bool, - /// Records if we've sent any GOAWAY before. - going_away: Option, - /// Whether the user started the GOAWAY by calling `abrupt_shutdown`. - is_user_initiated: bool, - /// A GOAWAY frame that must be buffered in the Codec immediately. - pending: Option, -} - -/// Keeps a memory of any GOAWAY frames we've sent before. -/// -/// This looks very similar to a `frame::GoAway`, but is a separate type. Why? -/// Mostly for documentation purposes. This type is to record status. If it -/// were a `frame::GoAway`, it might appear like we eventually wanted to -/// serialize it. We **only** want to be able to look up these fields at a -/// later time. -#[derive(Debug)] -pub(crate) struct GoingAway { - /// Stores the highest stream ID of a GOAWAY that has been sent. - /// - /// It's illegal to send a subsequent GOAWAY with a higher ID. - last_processed_id: StreamId, - - /// Records the error code of any GOAWAY frame sent. - reason: Reason, -} - -impl GoAway { - pub fn new() -> Self { - GoAway { - close_now: false, - going_away: None, - is_user_initiated: false, - pending: None, - } - } - - /// Enqueue a GOAWAY frame to be written. - /// - /// The connection is expected to continue to run until idle. - pub fn go_away(&mut self, f: frame::GoAway) { - if let Some(ref going_away) = self.going_away { - assert!( - f.last_stream_id() <= going_away.last_processed_id, - "GOAWAY stream IDs shouldn't be higher; \ - last_processed_id = {:?}, f.last_stream_id() = {:?}", - going_away.last_processed_id, - f.last_stream_id(), - ); - } - - self.going_away = Some(GoingAway { - last_processed_id: f.last_stream_id(), - reason: f.reason(), - }); - self.pending = Some(f); - } - - pub fn go_away_now(&mut self, f: frame::GoAway) { - self.close_now = true; - if let Some(ref going_away) = self.going_away { - // Prevent sending the same GOAWAY twice. - if going_away.last_processed_id == f.last_stream_id() && going_away.reason == f.reason() - { - return; - } - } - self.go_away(f); - } - - pub fn go_away_from_user(&mut self, f: frame::GoAway) { - self.is_user_initiated = true; - self.go_away_now(f); - } - - /// Return if a GOAWAY has ever been scheduled. - pub fn is_going_away(&self) -> bool { - self.going_away.is_some() - } - - pub fn is_user_initiated(&self) -> bool { - self.is_user_initiated - } - - /// Returns the going away info, if any. - pub fn going_away(&self) -> Option<&GoingAway> { - self.going_away.as_ref() - } - - /// Returns if the connection should close now, or wait until idle. - pub fn should_close_now(&self) -> bool { - self.pending.is_none() && self.close_now - } - - /// Returns if the connection should be closed when idle. - pub fn should_close_on_idle(&self) -> bool { - !self.close_now - && self - .going_away - .as_ref() - .map(|g| g.last_processed_id != StreamId::MAX) - .unwrap_or(false) - } - - /// Try to write a pending GOAWAY frame to the buffer. - /// - /// If a frame is written, the `Reason` of the GOAWAY is returned. - pub fn send_pending_go_away( - &mut self, - cx: &mut Context, - dst: &mut Codec, - ) -> Poll>> - where - T: AsyncWrite + Unpin, - B: Buf, - { - if let Some(frame) = self.pending.take() { - if !dst.poll_ready(cx)?.is_ready() { - self.pending = Some(frame); - return Poll::Pending; - } - - let reason = frame.reason(); - dst.buffer(frame.into()).expect("invalid GOAWAY frame"); - - return Poll::Ready(Some(Ok(reason))); - } else if self.should_close_now() { - return match self.going_away().map(|going_away| going_away.reason) { - Some(reason) => Poll::Ready(Some(Ok(reason))), - None => Poll::Ready(None), - }; - } - - Poll::Ready(None) - } -} - -impl GoingAway { - pub(crate) fn reason(&self) -> Reason { - self.reason - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/mod.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/mod.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ -mod connection; -mod error; -mod go_away; -mod peer; -mod ping_pong; -mod settings; -mod streams; - -pub(crate) use self::connection::{Config, Connection}; -pub use self::error::{Error, Initiator}; -pub(crate) use self::peer::{Dyn as DynPeer, Peer}; -pub(crate) use self::ping_pong::UserPings; -pub(crate) use self::streams::{DynStreams, OpaqueStreamRef, StreamRef, Streams}; -pub(crate) use self::streams::{Open, PollReset, Prioritized}; - -use crate::codec::Codec; - -use self::go_away::GoAway; -use self::ping_pong::PingPong; -use self::settings::Settings; - -use crate::frame::{self, Frame}; - -use bytes::Buf; - -use tokio::io::AsyncWrite; - -pub type PingPayload = [u8; 8]; - -pub type WindowSize = u32; - -// Constants -pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; // i32::MAX as u32 -pub const DEFAULT_REMOTE_RESET_STREAM_MAX: usize = 20; -pub const DEFAULT_RESET_STREAM_MAX: usize = 10; -pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; -pub const DEFAULT_MAX_SEND_BUFFER_SIZE: usize = 1024 * 400; diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/peer.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/peer.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/peer.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/peer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,93 +0,0 @@ -use crate::error::Reason; -use crate::frame::{Pseudo, StreamId}; -use crate::proto::{Error, Open}; - -use http::{HeaderMap, Request, Response}; - -use std::fmt; - -/// Either a Client or a Server -pub(crate) trait Peer { - /// Message type polled from the transport - type Poll: fmt::Debug; - const NAME: &'static str; - - fn r#dyn() -> Dyn; - - fn is_server() -> bool; - - fn convert_poll_message( - pseudo: Pseudo, - fields: HeaderMap, - stream_id: StreamId, - ) -> Result; - - fn is_local_init(id: StreamId) -> bool { - assert!(!id.is_zero()); - Self::is_server() == id.is_server_initiated() - } -} - -/// A dynamic representation of `Peer`. -/// -/// This is used internally to avoid incurring a generic on all internal types. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub(crate) enum Dyn { - Client, - Server, -} - -#[derive(Debug)] -pub enum PollMessage { - Client(Response<()>), - Server(Request<()>), -} - -// ===== impl Dyn ===== - -impl Dyn { - pub fn is_server(&self) -> bool { - *self == Dyn::Server - } - - pub fn is_local_init(&self, id: StreamId) -> bool { - assert!(!id.is_zero()); - self.is_server() == id.is_server_initiated() - } - - pub fn convert_poll_message( - &self, - pseudo: Pseudo, - fields: HeaderMap, - stream_id: StreamId, - ) -> Result { - if self.is_server() { - crate::server::Peer::convert_poll_message(pseudo, fields, stream_id) - .map(PollMessage::Server) - } else { - crate::client::Peer::convert_poll_message(pseudo, fields, stream_id) - .map(PollMessage::Client) - } - } - - /// Returns true if the remote peer can initiate a stream with the given ID. - pub fn ensure_can_open(&self, id: StreamId, mode: Open) -> Result<(), Error> { - if self.is_server() { - // Ensure that the ID is a valid client initiated ID - if mode.is_push_promise() || !id.is_client_initiated() { - proto_err!(conn: "cannot open stream {:?} - not client initiated", id); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - - Ok(()) - } else { - // Ensure that the ID is a valid server initiated ID - if !mode.is_push_promise() || !id.is_server_initiated() { - proto_err!(conn: "cannot open stream {:?} - not server initiated", id); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - - Ok(()) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/ping_pong.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/ping_pong.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/ping_pong.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/ping_pong.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,291 +0,0 @@ -use crate::codec::Codec; -use crate::frame::Ping; -use crate::proto::{self, PingPayload}; - -use bytes::Buf; -use futures_util::task::AtomicWaker; -use std::io; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::task::{Context, Poll}; -use tokio::io::AsyncWrite; - -/// Acknowledges ping requests from the remote. -#[derive(Debug)] -pub(crate) struct PingPong { - pending_ping: Option, - pending_pong: Option, - user_pings: Option, -} - -#[derive(Debug)] -pub(crate) struct UserPings(Arc); - -#[derive(Debug)] -struct UserPingsRx(Arc); - -#[derive(Debug)] -struct UserPingsInner { - state: AtomicUsize, - /// Task to wake up the main `Connection`. - ping_task: AtomicWaker, - /// Task to wake up `share::PingPong::poll_pong`. - pong_task: AtomicWaker, -} - -#[derive(Debug)] -struct PendingPing { - payload: PingPayload, - sent: bool, -} - -/// Status returned from `PingPong::recv_ping`. -#[derive(Debug)] -pub(crate) enum ReceivedPing { - MustAck, - Unknown, - Shutdown, -} - -/// No user ping pending. -const USER_STATE_EMPTY: usize = 0; -/// User has called `send_ping`, but PING hasn't been written yet. -const USER_STATE_PENDING_PING: usize = 1; -/// User PING has been written, waiting for PONG. -const USER_STATE_PENDING_PONG: usize = 2; -/// We've received user PONG, waiting for user to `poll_pong`. -const USER_STATE_RECEIVED_PONG: usize = 3; -/// The connection is closed. -const USER_STATE_CLOSED: usize = 4; - -// ===== impl PingPong ===== - -impl PingPong { - pub(crate) fn new() -> Self { - PingPong { - pending_ping: None, - pending_pong: None, - user_pings: None, - } - } - - /// Can only be called once. If called a second time, returns `None`. - pub(crate) fn take_user_pings(&mut self) -> Option { - if self.user_pings.is_some() { - return None; - } - - let user_pings = Arc::new(UserPingsInner { - state: AtomicUsize::new(USER_STATE_EMPTY), - ping_task: AtomicWaker::new(), - pong_task: AtomicWaker::new(), - }); - self.user_pings = Some(UserPingsRx(user_pings.clone())); - Some(UserPings(user_pings)) - } - - pub(crate) fn ping_shutdown(&mut self) { - assert!(self.pending_ping.is_none()); - - self.pending_ping = Some(PendingPing { - payload: Ping::SHUTDOWN, - sent: false, - }); - } - - /// Process a ping - pub(crate) fn recv_ping(&mut self, ping: Ping) -> ReceivedPing { - // The caller should always check that `send_pongs` returns ready before - // calling `recv_ping`. - assert!(self.pending_pong.is_none()); - - if ping.is_ack() { - if let Some(pending) = self.pending_ping.take() { - if &pending.payload == ping.payload() { - assert_eq!( - &pending.payload, - &Ping::SHUTDOWN, - "pending_ping should be for shutdown", - ); - tracing::trace!("recv PING SHUTDOWN ack"); - return ReceivedPing::Shutdown; - } - - // if not the payload we expected, put it back. - self.pending_ping = Some(pending); - } - - if let Some(ref users) = self.user_pings { - if ping.payload() == &Ping::USER && users.receive_pong() { - tracing::trace!("recv PING USER ack"); - return ReceivedPing::Unknown; - } - } - - // else we were acked a ping we didn't send? - // The spec doesn't require us to do anything about this, - // so for resiliency, just ignore it for now. - tracing::warn!("recv PING ack that we never sent: {:?}", ping); - ReceivedPing::Unknown - } else { - // Save the ping's payload to be sent as an acknowledgement. - self.pending_pong = Some(ping.into_payload()); - ReceivedPing::MustAck - } - } - - /// Send any pending pongs. - pub(crate) fn send_pending_pong( - &mut self, - cx: &mut Context, - dst: &mut Codec, - ) -> Poll> - where - T: AsyncWrite + Unpin, - B: Buf, - { - if let Some(pong) = self.pending_pong.take() { - if !dst.poll_ready(cx)?.is_ready() { - self.pending_pong = Some(pong); - return Poll::Pending; - } - - dst.buffer(Ping::pong(pong).into()) - .expect("invalid pong frame"); - } - - Poll::Ready(Ok(())) - } - - /// Send any pending pings. - pub(crate) fn send_pending_ping( - &mut self, - cx: &mut Context, - dst: &mut Codec, - ) -> Poll> - where - T: AsyncWrite + Unpin, - B: Buf, - { - if let Some(ref mut ping) = self.pending_ping { - if !ping.sent { - if !dst.poll_ready(cx)?.is_ready() { - return Poll::Pending; - } - - dst.buffer(Ping::new(ping.payload).into()) - .expect("invalid ping frame"); - ping.sent = true; - } - } else if let Some(ref users) = self.user_pings { - if users.0.state.load(Ordering::Acquire) == USER_STATE_PENDING_PING { - if !dst.poll_ready(cx)?.is_ready() { - return Poll::Pending; - } - - dst.buffer(Ping::new(Ping::USER).into()) - .expect("invalid ping frame"); - users - .0 - .state - .store(USER_STATE_PENDING_PONG, Ordering::Release); - } else { - users.0.ping_task.register(cx.waker()); - } - } - - Poll::Ready(Ok(())) - } -} - -impl ReceivedPing { - pub(crate) fn is_shutdown(&self) -> bool { - matches!(*self, Self::Shutdown) - } -} - -// ===== impl UserPings ===== - -impl UserPings { - pub(crate) fn send_ping(&self) -> Result<(), Option> { - let prev = self - .0 - .state - .compare_exchange( - USER_STATE_EMPTY, // current - USER_STATE_PENDING_PING, // new - Ordering::AcqRel, - Ordering::Acquire, - ) - .unwrap_or_else(|v| v); - - match prev { - USER_STATE_EMPTY => { - self.0.ping_task.wake(); - Ok(()) - } - USER_STATE_CLOSED => Err(Some(broken_pipe().into())), - _ => { - // Was already pending, user error! - Err(None) - } - } - } - - pub(crate) fn poll_pong(&self, cx: &mut Context) -> Poll> { - // Must register before checking state, in case state were to change - // before we could register, and then the ping would just be lost. - self.0.pong_task.register(cx.waker()); - let prev = self - .0 - .state - .compare_exchange( - USER_STATE_RECEIVED_PONG, // current - USER_STATE_EMPTY, // new - Ordering::AcqRel, - Ordering::Acquire, - ) - .unwrap_or_else(|v| v); - - match prev { - USER_STATE_RECEIVED_PONG => Poll::Ready(Ok(())), - USER_STATE_CLOSED => Poll::Ready(Err(broken_pipe().into())), - _ => Poll::Pending, - } - } -} - -// ===== impl UserPingsRx ===== - -impl UserPingsRx { - fn receive_pong(&self) -> bool { - let prev = self - .0 - .state - .compare_exchange( - USER_STATE_PENDING_PONG, // current - USER_STATE_RECEIVED_PONG, // new - Ordering::AcqRel, - Ordering::Acquire, - ) - .unwrap_or_else(|v| v); - - if prev == USER_STATE_PENDING_PONG { - self.0.pong_task.wake(); - true - } else { - false - } - } -} - -impl Drop for UserPingsRx { - fn drop(&mut self) { - self.0.state.store(USER_STATE_CLOSED, Ordering::Release); - self.0.pong_task.wake(); - } -} - -fn broken_pipe() -> io::Error { - io::ErrorKind::BrokenPipe.into() -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/settings.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/settings.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/settings.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/settings.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,155 +0,0 @@ -use crate::codec::UserError; -use crate::error::Reason; -use crate::frame; -use crate::proto::*; -use std::task::{Context, Poll}; - -#[derive(Debug)] -pub(crate) struct Settings { - /// Our local SETTINGS sync state with the remote. - local: Local, - /// Received SETTINGS frame pending processing. The ACK must be written to - /// the socket first then the settings applied **before** receiving any - /// further frames. - remote: Option, -} - -#[derive(Debug)] -enum Local { - /// We want to send these SETTINGS to the remote when the socket is ready. - ToSend(frame::Settings), - /// We have sent these SETTINGS and are waiting for the remote to ACK - /// before we apply them. - WaitingAck(frame::Settings), - /// Our local settings are in sync with the remote. - Synced, -} - -impl Settings { - pub(crate) fn new(local: frame::Settings) -> Self { - Settings { - // We assume the initial local SETTINGS were flushed during - // the handshake process. - local: Local::WaitingAck(local), - remote: None, - } - } - - pub(crate) fn recv_settings( - &mut self, - frame: frame::Settings, - codec: &mut Codec, - streams: &mut Streams, - ) -> Result<(), Error> - where - T: AsyncWrite + Unpin, - B: Buf, - C: Buf, - P: Peer, - { - if frame.is_ack() { - match &self.local { - Local::WaitingAck(local) => { - tracing::debug!("received settings ACK; applying {:?}", local); - - if let Some(max) = local.max_frame_size() { - codec.set_max_recv_frame_size(max as usize); - } - - if let Some(max) = local.max_header_list_size() { - codec.set_max_recv_header_list_size(max as usize); - } - - if let Some(val) = local.header_table_size() { - codec.set_recv_header_table_size(val as usize); - } - - streams.apply_local_settings(local)?; - self.local = Local::Synced; - Ok(()) - } - Local::ToSend(..) | Local::Synced => { - // We haven't sent any SETTINGS frames to be ACKed, so - // this is very bizarre! Remote is either buggy or malicious. - proto_err!(conn: "received unexpected settings ack"); - Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) - } - } - } else { - // We always ACK before reading more frames, so `remote` should - // always be none! - assert!(self.remote.is_none()); - self.remote = Some(frame); - Ok(()) - } - } - - pub(crate) fn send_settings(&mut self, frame: frame::Settings) -> Result<(), UserError> { - assert!(!frame.is_ack()); - match &self.local { - Local::ToSend(..) | Local::WaitingAck(..) => Err(UserError::SendSettingsWhilePending), - Local::Synced => { - tracing::trace!("queue to send local settings: {:?}", frame); - self.local = Local::ToSend(frame); - Ok(()) - } - } - } - - pub(crate) fn poll_send( - &mut self, - cx: &mut Context, - dst: &mut Codec, - streams: &mut Streams, - ) -> Poll> - where - T: AsyncWrite + Unpin, - B: Buf, - C: Buf, - P: Peer, - { - if let Some(settings) = &self.remote { - if !dst.poll_ready(cx)?.is_ready() { - return Poll::Pending; - } - - // Create an ACK settings frame - let frame = frame::Settings::ack(); - - // Buffer the settings frame - dst.buffer(frame.into()).expect("invalid settings frame"); - - tracing::trace!("ACK sent; applying settings"); - - streams.apply_remote_settings(settings)?; - - if let Some(val) = settings.header_table_size() { - dst.set_send_header_table_size(val as usize); - } - - if let Some(val) = settings.max_frame_size() { - dst.set_max_send_frame_size(val as usize); - } - } - - self.remote = None; - - match &self.local { - Local::ToSend(settings) => { - if !dst.poll_ready(cx)?.is_ready() { - return Poll::Pending; - } - - // Buffer the settings frame - dst.buffer(settings.clone().into()) - .expect("invalid settings frame"); - tracing::trace!("local settings sent; waiting for ack: {:?}", settings); - - self.local = Local::WaitingAck(settings.clone()); - } - Local::WaitingAck(..) | Local::Synced => {} - } - - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/buffer.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/buffer.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/buffer.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/buffer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,95 +0,0 @@ -use slab::Slab; - -/// Buffers frames for multiple streams. -#[derive(Debug)] -pub struct Buffer { - slab: Slab>, -} - -/// A sequence of frames in a `Buffer` -#[derive(Debug)] -pub struct Deque { - indices: Option, -} - -/// Tracks the head & tail for a sequence of frames in a `Buffer`. -#[derive(Debug, Default, Copy, Clone)] -struct Indices { - head: usize, - tail: usize, -} - -#[derive(Debug)] -struct Slot { - value: T, - next: Option, -} - -impl Buffer { - pub fn new() -> Self { - Buffer { slab: Slab::new() } - } -} - -impl Deque { - pub fn new() -> Self { - Deque { indices: None } - } - - pub fn is_empty(&self) -> bool { - self.indices.is_none() - } - - pub fn push_back(&mut self, buf: &mut Buffer, value: T) { - let key = buf.slab.insert(Slot { value, next: None }); - - match self.indices { - Some(ref mut idxs) => { - buf.slab[idxs.tail].next = Some(key); - idxs.tail = key; - } - None => { - self.indices = Some(Indices { - head: key, - tail: key, - }); - } - } - } - - pub fn push_front(&mut self, buf: &mut Buffer, value: T) { - let key = buf.slab.insert(Slot { value, next: None }); - - match self.indices { - Some(ref mut idxs) => { - buf.slab[key].next = Some(idxs.head); - idxs.head = key; - } - None => { - self.indices = Some(Indices { - head: key, - tail: key, - }); - } - } - } - - pub fn pop_front(&mut self, buf: &mut Buffer) -> Option { - match self.indices { - Some(mut idxs) => { - let mut slot = buf.slab.remove(idxs.head); - - if idxs.head == idxs.tail { - assert!(slot.next.is_none()); - self.indices = None; - } else { - idxs.head = slot.next.take().unwrap(); - self.indices = Some(idxs); - } - - Some(slot.value) - } - None => None, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/counts.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/counts.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/counts.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/counts.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,253 +0,0 @@ -use super::*; - -use std::usize; - -#[derive(Debug)] -pub(super) struct Counts { - /// Acting as a client or server. This allows us to track which values to - /// inc / dec. - peer: peer::Dyn, - - /// Maximum number of locally initiated streams - max_send_streams: usize, - - /// Current number of remote initiated streams - num_send_streams: usize, - - /// Maximum number of remote initiated streams - max_recv_streams: usize, - - /// Current number of locally initiated streams - num_recv_streams: usize, - - /// Maximum number of pending locally reset streams - max_local_reset_streams: usize, - - /// Current number of pending locally reset streams - num_local_reset_streams: usize, - - /// Max number of "pending accept" streams that were remotely reset - max_remote_reset_streams: usize, - - /// Current number of "pending accept" streams that were remotely reset - num_remote_reset_streams: usize, -} - -impl Counts { - /// Create a new `Counts` using the provided configuration values. - pub fn new(peer: peer::Dyn, config: &Config) -> Self { - Counts { - peer, - max_send_streams: config.initial_max_send_streams, - num_send_streams: 0, - max_recv_streams: config.remote_max_initiated.unwrap_or(usize::MAX), - num_recv_streams: 0, - max_local_reset_streams: config.local_reset_max, - num_local_reset_streams: 0, - max_remote_reset_streams: config.remote_reset_max, - num_remote_reset_streams: 0, - } - } - - /// Returns true when the next opened stream will reach capacity of outbound streams - /// - /// The number of client send streams is incremented in prioritize; send_request has to guess if - /// it should wait before allowing another request to be sent. - pub fn next_send_stream_will_reach_capacity(&self) -> bool { - self.max_send_streams <= (self.num_send_streams + 1) - } - - /// Returns the current peer - pub fn peer(&self) -> peer::Dyn { - self.peer - } - - pub fn has_streams(&self) -> bool { - self.num_send_streams != 0 || self.num_recv_streams != 0 - } - - /// Returns true if the receive stream concurrency can be incremented - pub fn can_inc_num_recv_streams(&self) -> bool { - self.max_recv_streams > self.num_recv_streams - } - - /// Increments the number of concurrent receive streams. - /// - /// # Panics - /// - /// Panics on failure as this should have been validated before hand. - pub fn inc_num_recv_streams(&mut self, stream: &mut store::Ptr) { - assert!(self.can_inc_num_recv_streams()); - assert!(!stream.is_counted); - - // Increment the number of remote initiated streams - self.num_recv_streams += 1; - stream.is_counted = true; - } - - /// Returns true if the send stream concurrency can be incremented - pub fn can_inc_num_send_streams(&self) -> bool { - self.max_send_streams > self.num_send_streams - } - - /// Increments the number of concurrent send streams. - /// - /// # Panics - /// - /// Panics on failure as this should have been validated before hand. - pub fn inc_num_send_streams(&mut self, stream: &mut store::Ptr) { - assert!(self.can_inc_num_send_streams()); - assert!(!stream.is_counted); - - // Increment the number of remote initiated streams - self.num_send_streams += 1; - stream.is_counted = true; - } - - /// Returns true if the number of pending reset streams can be incremented. - pub fn can_inc_num_reset_streams(&self) -> bool { - self.max_local_reset_streams > self.num_local_reset_streams - } - - /// Increments the number of pending reset streams. - /// - /// # Panics - /// - /// Panics on failure as this should have been validated before hand. - pub fn inc_num_reset_streams(&mut self) { - assert!(self.can_inc_num_reset_streams()); - - self.num_local_reset_streams += 1; - } - - pub(crate) fn max_remote_reset_streams(&self) -> usize { - self.max_remote_reset_streams - } - - /// Returns true if the number of pending REMOTE reset streams can be - /// incremented. - pub(crate) fn can_inc_num_remote_reset_streams(&self) -> bool { - self.max_remote_reset_streams > self.num_remote_reset_streams - } - - /// Increments the number of pending REMOTE reset streams. - /// - /// # Panics - /// - /// Panics on failure as this should have been validated before hand. - pub(crate) fn inc_num_remote_reset_streams(&mut self) { - assert!(self.can_inc_num_remote_reset_streams()); - - self.num_remote_reset_streams += 1; - } - - pub(crate) fn dec_num_remote_reset_streams(&mut self) { - assert!(self.num_remote_reset_streams > 0); - - self.num_remote_reset_streams -= 1; - } - - pub fn apply_remote_settings(&mut self, settings: &frame::Settings) { - if let Some(val) = settings.max_concurrent_streams() { - self.max_send_streams = val as usize; - } - } - - /// Run a block of code that could potentially transition a stream's state. - /// - /// If the stream state transitions to closed, this function will perform - /// all necessary cleanup. - /// - /// TODO: Is this function still needed? - pub fn transition(&mut self, mut stream: store::Ptr, f: F) -> U - where - F: FnOnce(&mut Self, &mut store::Ptr) -> U, - { - // TODO: Does this need to be computed before performing the action? - let is_pending_reset = stream.is_pending_reset_expiration(); - - // Run the action - let ret = f(self, &mut stream); - - self.transition_after(stream, is_pending_reset); - - ret - } - - // TODO: move this to macro? - pub fn transition_after(&mut self, mut stream: store::Ptr, is_reset_counted: bool) { - tracing::trace!( - "transition_after; stream={:?}; state={:?}; is_closed={:?}; \ - pending_send_empty={:?}; buffered_send_data={}; \ - num_recv={}; num_send={}", - stream.id, - stream.state, - stream.is_closed(), - stream.pending_send.is_empty(), - stream.buffered_send_data, - self.num_recv_streams, - self.num_send_streams - ); - - if stream.is_closed() { - if !stream.is_pending_reset_expiration() { - stream.unlink(); - if is_reset_counted { - self.dec_num_reset_streams(); - } - } - - if stream.is_counted { - tracing::trace!("dec_num_streams; stream={:?}", stream.id); - // Decrement the number of active streams. - self.dec_num_streams(&mut stream); - } - } - - // Release the stream if it requires releasing - if stream.is_released() { - stream.remove(); - } - } - - /// Returns the maximum number of streams that can be initiated by this - /// peer. - pub(crate) fn max_send_streams(&self) -> usize { - self.max_send_streams - } - - /// Returns the maximum number of streams that can be initiated by the - /// remote peer. - pub(crate) fn max_recv_streams(&self) -> usize { - self.max_recv_streams - } - - fn dec_num_streams(&mut self, stream: &mut store::Ptr) { - assert!(stream.is_counted); - - if self.peer.is_local_init(stream.id) { - assert!(self.num_send_streams > 0); - self.num_send_streams -= 1; - stream.is_counted = false; - } else { - assert!(self.num_recv_streams > 0); - self.num_recv_streams -= 1; - stream.is_counted = false; - } - } - - fn dec_num_reset_streams(&mut self) { - assert!(self.num_local_reset_streams > 0); - self.num_local_reset_streams -= 1; - } -} - -impl Drop for Counts { - fn drop(&mut self) { - use std::thread; - - if !thread::panicking() { - debug_assert!(!self.has_streams()); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/flow_control.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/flow_control.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/flow_control.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/flow_control.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,269 +0,0 @@ -use crate::frame::Reason; -use crate::proto::{WindowSize, MAX_WINDOW_SIZE}; - -use std::fmt; - -// We don't want to send WINDOW_UPDATE frames for tiny changes, but instead -// aggregate them when the changes are significant. Many implementations do -// this by keeping a "ratio" of the update version the allowed window size. -// -// While some may wish to represent this ratio as percentage, using a f32, -// we skip having to deal with float math and stick to integers. To do so, -// the "ratio" is represented by 2 i32s, split into the numerator and -// denominator. For example, a 50% ratio is simply represented as 1/2. -// -// An example applying this ratio: If a stream has an allowed window size of -// 100 bytes, WINDOW_UPDATE frames are scheduled when the unclaimed change -// becomes greater than 1/2, or 50 bytes. -const UNCLAIMED_NUMERATOR: i32 = 1; -const UNCLAIMED_DENOMINATOR: i32 = 2; - -#[test] -#[allow(clippy::assertions_on_constants)] -fn sanity_unclaimed_ratio() { - assert!(UNCLAIMED_NUMERATOR < UNCLAIMED_DENOMINATOR); - assert!(UNCLAIMED_NUMERATOR >= 0); - assert!(UNCLAIMED_DENOMINATOR > 0); -} - -#[derive(Copy, Clone, Debug)] -pub struct FlowControl { - /// Window the peer knows about. - /// - /// This can go negative if a SETTINGS_INITIAL_WINDOW_SIZE is received. - /// - /// For example, say the peer sends a request and uses 32kb of the window. - /// We send a SETTINGS_INITIAL_WINDOW_SIZE of 16kb. The peer has to adjust - /// its understanding of the capacity of the window, and that would be: - /// - /// ```notrust - /// default (64kb) - used (32kb) - settings_diff (64kb - 16kb): -16kb - /// ``` - window_size: Window, - - /// Window that we know about. - /// - /// This can go negative if a user declares a smaller target window than - /// the peer knows about. - available: Window, -} - -impl FlowControl { - pub fn new() -> FlowControl { - FlowControl { - window_size: Window(0), - available: Window(0), - } - } - - /// Returns the window size as known by the peer - pub fn window_size(&self) -> WindowSize { - self.window_size.as_size() - } - - /// Returns the window size available to the consumer - pub fn available(&self) -> Window { - self.available - } - - /// Returns true if there is unavailable window capacity - pub fn has_unavailable(&self) -> bool { - if self.window_size < 0 { - return false; - } - - self.window_size > self.available - } - - pub fn claim_capacity(&mut self, capacity: WindowSize) -> Result<(), Reason> { - self.available.decrease_by(capacity) - } - - pub fn assign_capacity(&mut self, capacity: WindowSize) -> Result<(), Reason> { - self.available.increase_by(capacity) - } - - /// If a WINDOW_UPDATE frame should be sent, returns a positive number - /// representing the increment to be used. - /// - /// If there is no available bytes to be reclaimed, or the number of - /// available bytes does not reach the threshold, this returns `None`. - /// - /// This represents pending outbound WINDOW_UPDATE frames. - pub fn unclaimed_capacity(&self) -> Option { - let available = self.available; - - if self.window_size >= available { - return None; - } - - let unclaimed = available.0 - self.window_size.0; - let threshold = self.window_size.0 / UNCLAIMED_DENOMINATOR * UNCLAIMED_NUMERATOR; - - if unclaimed < threshold { - None - } else { - Some(unclaimed as WindowSize) - } - } - - /// Increase the window size. - /// - /// This is called after receiving a WINDOW_UPDATE frame - pub fn inc_window(&mut self, sz: WindowSize) -> Result<(), Reason> { - let (val, overflow) = self.window_size.0.overflowing_add(sz as i32); - - if overflow { - return Err(Reason::FLOW_CONTROL_ERROR); - } - - if val > MAX_WINDOW_SIZE as i32 { - return Err(Reason::FLOW_CONTROL_ERROR); - } - - tracing::trace!( - "inc_window; sz={}; old={}; new={}", - sz, - self.window_size, - val - ); - - self.window_size = Window(val); - Ok(()) - } - - /// Decrement the send-side window size. - /// - /// This is called after receiving a SETTINGS frame with a lower - /// INITIAL_WINDOW_SIZE value. - pub fn dec_send_window(&mut self, sz: WindowSize) -> Result<(), Reason> { - tracing::trace!( - "dec_window; sz={}; window={}, available={}", - sz, - self.window_size, - self.available - ); - // ~~This should not be able to overflow `window_size` from the bottom.~~ wrong. it can. - self.window_size.decrease_by(sz)?; - Ok(()) - } - - /// Decrement the recv-side window size. - /// - /// This is called after receiving a SETTINGS ACK frame with a lower - /// INITIAL_WINDOW_SIZE value. - pub fn dec_recv_window(&mut self, sz: WindowSize) -> Result<(), Reason> { - tracing::trace!( - "dec_recv_window; sz={}; window={}, available={}", - sz, - self.window_size, - self.available - ); - // This should not be able to overflow `window_size` from the bottom. - self.window_size.decrease_by(sz)?; - self.available.decrease_by(sz)?; - Ok(()) - } - - /// Decrements the window reflecting data has actually been sent. The caller - /// must ensure that the window has capacity. - pub fn send_data(&mut self, sz: WindowSize) -> Result<(), Reason> { - tracing::trace!( - "send_data; sz={}; window={}; available={}", - sz, - self.window_size, - self.available - ); - - // If send size is zero it's meaningless to update flow control window - if sz > 0 { - // Ensure that the argument is correct - assert!(self.window_size.0 >= sz as i32); - - // Update values - self.window_size.decrease_by(sz)?; - self.available.decrease_by(sz)?; - } - Ok(()) - } -} - -/// The current capacity of a flow-controlled Window. -/// -/// This number can go negative when either side has used a certain amount -/// of capacity when the other side advertises a reduction in size. -/// -/// This type tries to centralize the knowledge of addition and subtraction -/// to this capacity, instead of having integer casts throughout the source. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd)] -pub struct Window(i32); - -impl Window { - pub fn as_size(&self) -> WindowSize { - if self.0 < 0 { - 0 - } else { - self.0 as WindowSize - } - } - - pub fn checked_size(&self) -> WindowSize { - assert!(self.0 >= 0, "negative Window"); - self.0 as WindowSize - } - - pub fn decrease_by(&mut self, other: WindowSize) -> Result<(), Reason> { - if let Some(v) = self.0.checked_sub(other as i32) { - self.0 = v; - Ok(()) - } else { - Err(Reason::FLOW_CONTROL_ERROR) - } - } - - pub fn increase_by(&mut self, other: WindowSize) -> Result<(), Reason> { - let other = self.add(other)?; - self.0 = other.0; - Ok(()) - } - - pub fn add(&self, other: WindowSize) -> Result { - if let Some(v) = self.0.checked_add(other as i32) { - Ok(Self(v)) - } else { - Err(Reason::FLOW_CONTROL_ERROR) - } - } -} - -impl PartialEq for Window { - fn eq(&self, other: &usize) -> bool { - if self.0 < 0 { - false - } else { - (self.0 as usize).eq(other) - } - } -} - -impl PartialOrd for Window { - fn partial_cmp(&self, other: &usize) -> Option<::std::cmp::Ordering> { - if self.0 < 0 { - Some(::std::cmp::Ordering::Less) - } else { - (self.0 as usize).partial_cmp(other) - } - } -} - -impl fmt::Display for Window { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.0, f) - } -} - -impl From for isize { - fn from(w: Window) -> isize { - w.0 as isize - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/mod.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/mod.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,72 +0,0 @@ -mod buffer; -mod counts; -mod flow_control; -mod prioritize; -mod recv; -mod send; -mod state; -mod store; -mod stream; -#[allow(clippy::module_inception)] -mod streams; - -pub(crate) use self::prioritize::Prioritized; -pub(crate) use self::recv::Open; -pub(crate) use self::send::PollReset; -pub(crate) use self::streams::{DynStreams, OpaqueStreamRef, StreamRef, Streams}; - -use self::buffer::Buffer; -use self::counts::Counts; -use self::flow_control::FlowControl; -use self::prioritize::Prioritize; -use self::recv::Recv; -use self::send::Send; -use self::state::State; -use self::store::Store; -use self::stream::Stream; - -use crate::frame::{StreamId, StreamIdOverflow}; -use crate::proto::*; - -use bytes::Bytes; -use std::time::Duration; - -#[derive(Debug)] -pub struct Config { - /// Initial window size of locally initiated streams - pub local_init_window_sz: WindowSize, - - /// Initial maximum number of locally initiated streams. - /// After receiving a Settings frame from the remote peer, - /// the connection will overwrite this value with the - /// MAX_CONCURRENT_STREAMS specified in the frame. - pub initial_max_send_streams: usize, - - /// Max amount of DATA bytes to buffer per stream. - pub local_max_buffer_size: usize, - - /// The stream ID to start the next local stream with - pub local_next_stream_id: StreamId, - - /// If the local peer is willing to receive push promises - pub local_push_enabled: bool, - - /// If extended connect protocol is enabled. - pub extended_connect_protocol_enabled: bool, - - /// How long a locally reset stream should ignore frames - pub local_reset_duration: Duration, - - /// Maximum number of locally reset streams to keep at a time - pub local_reset_max: usize, - - /// Maximum number of remotely reset "pending accept" streams to keep at a - /// time. Going over this number results in a connection error. - pub remote_reset_max: usize, - - /// Initial window size of remote initiated streams - pub remote_init_window_sz: WindowSize, - - /// Maximum number of remote initiated streams - pub remote_max_initiated: Option, -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/prioritize.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/prioritize.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/prioritize.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/prioritize.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,931 +0,0 @@ -use super::store::Resolve; -use super::*; - -use crate::frame::{Reason, StreamId}; - -use crate::codec::UserError; -use crate::codec::UserError::*; - -use bytes::buf::{Buf, Take}; -use std::{ - cmp::{self, Ordering}, - fmt, io, mem, - task::{Context, Poll, Waker}, -}; - -/// # Warning -/// -/// Queued streams are ordered by stream ID, as we need to ensure that -/// lower-numbered streams are sent headers before higher-numbered ones. -/// This is because "idle" stream IDs – those which have been initiated but -/// have yet to receive frames – will be implicitly closed on receipt of a -/// frame on a higher stream ID. If these queues was not ordered by stream -/// IDs, some mechanism would be necessary to ensure that the lowest-numbered] -/// idle stream is opened first. -#[derive(Debug)] -pub(super) struct Prioritize { - /// Queue of streams waiting for socket capacity to send a frame. - pending_send: store::Queue, - - /// Queue of streams waiting for window capacity to produce data. - pending_capacity: store::Queue, - - /// Streams waiting for capacity due to max concurrency - /// - /// The `SendRequest` handle is `Clone`. This enables initiating requests - /// from many tasks. However, offering this capability while supporting - /// backpressure at some level is tricky. If there are many `SendRequest` - /// handles and a single stream becomes available, which handle gets - /// assigned that stream? Maybe that handle is no longer ready to send a - /// request. - /// - /// The strategy used is to allow each `SendRequest` handle one buffered - /// request. A `SendRequest` handle is ready to send a request if it has no - /// associated buffered requests. This is the same strategy as `mpsc` in the - /// futures library. - pending_open: store::Queue, - - /// Connection level flow control governing sent data - flow: FlowControl, - - /// Stream ID of the last stream opened. - last_opened_id: StreamId, - - /// What `DATA` frame is currently being sent in the codec. - in_flight_data_frame: InFlightData, - - /// The maximum amount of bytes a stream should buffer. - max_buffer_size: usize, -} - -#[derive(Debug, Eq, PartialEq)] -enum InFlightData { - /// There is no `DATA` frame in flight. - Nothing, - /// There is a `DATA` frame in flight belonging to the given stream. - DataFrame(store::Key), - /// There was a `DATA` frame, but the stream's queue was since cleared. - Drop, -} - -pub(crate) struct Prioritized { - // The buffer - inner: Take, - - end_of_stream: bool, - - // The stream that this is associated with - stream: store::Key, -} - -// ===== impl Prioritize ===== - -impl Prioritize { - pub fn new(config: &Config) -> Prioritize { - let mut flow = FlowControl::new(); - - flow.inc_window(config.remote_init_window_sz) - .expect("invalid initial window size"); - - // TODO: proper error handling - let _res = flow.assign_capacity(config.remote_init_window_sz); - debug_assert!(_res.is_ok()); - - tracing::trace!("Prioritize::new; flow={:?}", flow); - - Prioritize { - pending_send: store::Queue::new(), - pending_capacity: store::Queue::new(), - pending_open: store::Queue::new(), - flow, - last_opened_id: StreamId::ZERO, - in_flight_data_frame: InFlightData::Nothing, - max_buffer_size: config.local_max_buffer_size, - } - } - - pub(crate) fn max_buffer_size(&self) -> usize { - self.max_buffer_size - } - - /// Queue a frame to be sent to the remote - pub fn queue_frame( - &mut self, - frame: Frame, - buffer: &mut Buffer>, - stream: &mut store::Ptr, - task: &mut Option, - ) { - let span = tracing::trace_span!("Prioritize::queue_frame", ?stream.id); - let _e = span.enter(); - // Queue the frame in the buffer - stream.pending_send.push_back(buffer, frame); - self.schedule_send(stream, task); - } - - pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option) { - // If the stream is waiting to be opened, nothing more to do. - if stream.is_send_ready() { - tracing::trace!(?stream.id, "schedule_send"); - // Queue the stream - self.pending_send.push(stream); - - // Notify the connection. - if let Some(task) = task.take() { - task.wake(); - } - } - } - - pub fn queue_open(&mut self, stream: &mut store::Ptr) { - self.pending_open.push(stream); - } - - /// Send a data frame - pub fn send_data( - &mut self, - frame: frame::Data, - buffer: &mut Buffer>, - stream: &mut store::Ptr, - counts: &mut Counts, - task: &mut Option, - ) -> Result<(), UserError> - where - B: Buf, - { - let sz = frame.payload().remaining(); - - if sz > MAX_WINDOW_SIZE as usize { - return Err(UserError::PayloadTooBig); - } - - let sz = sz as WindowSize; - - if !stream.state.is_send_streaming() { - if stream.state.is_closed() { - return Err(InactiveStreamId); - } else { - return Err(UnexpectedFrameType); - } - } - - // Update the buffered data counter - stream.buffered_send_data += sz as usize; - - let span = - tracing::trace_span!("send_data", sz, requested = stream.requested_send_capacity); - let _e = span.enter(); - tracing::trace!(buffered = stream.buffered_send_data); - - // Implicitly request more send capacity if not enough has been - // requested yet. - if (stream.requested_send_capacity as usize) < stream.buffered_send_data { - // Update the target requested capacity - stream.requested_send_capacity = - cmp::min(stream.buffered_send_data, WindowSize::MAX as usize) as WindowSize; - - self.try_assign_capacity(stream); - } - - if frame.is_end_stream() { - stream.state.send_close(); - self.reserve_capacity(0, stream, counts); - } - - tracing::trace!( - available = %stream.send_flow.available(), - buffered = stream.buffered_send_data, - ); - - // The `stream.buffered_send_data == 0` check is here so that, if a zero - // length data frame is queued to the front (there is no previously - // queued data), it gets sent out immediately even if there is no - // available send window. - // - // Sending out zero length data frames can be done to signal - // end-of-stream. - // - if stream.send_flow.available() > 0 || stream.buffered_send_data == 0 { - // The stream currently has capacity to send the data frame, so - // queue it up and notify the connection task. - self.queue_frame(frame.into(), buffer, stream, task); - } else { - // The stream has no capacity to send the frame now, save it but - // don't notify the connection task. Once additional capacity - // becomes available, the frame will be flushed. - stream.pending_send.push_back(buffer, frame.into()); - } - - Ok(()) - } - - /// Request capacity to send data - pub fn reserve_capacity( - &mut self, - capacity: WindowSize, - stream: &mut store::Ptr, - counts: &mut Counts, - ) { - let span = tracing::trace_span!( - "reserve_capacity", - ?stream.id, - requested = capacity, - effective = (capacity as usize) + stream.buffered_send_data, - curr = stream.requested_send_capacity - ); - let _e = span.enter(); - - // Actual capacity is `capacity` + the current amount of buffered data. - // If it were less, then we could never send out the buffered data. - let capacity = (capacity as usize) + stream.buffered_send_data; - - match capacity.cmp(&(stream.requested_send_capacity as usize)) { - Ordering::Equal => { - // Nothing to do - } - Ordering::Less => { - // Update the target requested capacity - stream.requested_send_capacity = capacity as WindowSize; - - // Currently available capacity assigned to the stream - let available = stream.send_flow.available().as_size(); - - // If the stream has more assigned capacity than requested, reclaim - // some for the connection - if available as usize > capacity { - let diff = available - capacity as WindowSize; - - // TODO: proper error handling - let _res = stream.send_flow.claim_capacity(diff); - debug_assert!(_res.is_ok()); - - self.assign_connection_capacity(diff, stream, counts); - } - } - Ordering::Greater => { - // If trying to *add* capacity, but the stream send side is closed, - // there's nothing to be done. - if stream.state.is_send_closed() { - return; - } - - // Update the target requested capacity - stream.requested_send_capacity = - cmp::min(capacity, WindowSize::MAX as usize) as WindowSize; - - // Try to assign additional capacity to the stream. If none is - // currently available, the stream will be queued to receive some - // when more becomes available. - self.try_assign_capacity(stream); - } - } - } - - pub fn recv_stream_window_update( - &mut self, - inc: WindowSize, - stream: &mut store::Ptr, - ) -> Result<(), Reason> { - let span = tracing::trace_span!( - "recv_stream_window_update", - ?stream.id, - ?stream.state, - inc, - flow = ?stream.send_flow - ); - let _e = span.enter(); - - if stream.state.is_send_closed() && stream.buffered_send_data == 0 { - // We can't send any data, so don't bother doing anything else. - return Ok(()); - } - - // Update the stream level flow control. - stream.send_flow.inc_window(inc)?; - - // If the stream is waiting on additional capacity, then this will - // assign it (if available on the connection) and notify the producer - self.try_assign_capacity(stream); - - Ok(()) - } - - pub fn recv_connection_window_update( - &mut self, - inc: WindowSize, - store: &mut Store, - counts: &mut Counts, - ) -> Result<(), Reason> { - // Update the connection's window - self.flow.inc_window(inc)?; - - self.assign_connection_capacity(inc, store, counts); - Ok(()) - } - - /// Reclaim all capacity assigned to the stream and re-assign it to the - /// connection - pub fn reclaim_all_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { - let available = stream.send_flow.available().as_size(); - if available > 0 { - // TODO: proper error handling - let _res = stream.send_flow.claim_capacity(available); - debug_assert!(_res.is_ok()); - // Re-assign all capacity to the connection - self.assign_connection_capacity(available, stream, counts); - } - } - - /// Reclaim just reserved capacity, not buffered capacity, and re-assign - /// it to the connection - pub fn reclaim_reserved_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { - // only reclaim requested capacity that isn't already buffered - if stream.requested_send_capacity as usize > stream.buffered_send_data { - let reserved = stream.requested_send_capacity - stream.buffered_send_data as WindowSize; - - // TODO: proper error handling - let _res = stream.send_flow.claim_capacity(reserved); - debug_assert!(_res.is_ok()); - self.assign_connection_capacity(reserved, stream, counts); - } - } - - pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) { - let span = tracing::trace_span!("clear_pending_capacity"); - let _e = span.enter(); - while let Some(stream) = self.pending_capacity.pop(store) { - counts.transition(stream, |_, stream| { - tracing::trace!(?stream.id, "clear_pending_capacity"); - }) - } - } - - pub fn assign_connection_capacity( - &mut self, - inc: WindowSize, - store: &mut R, - counts: &mut Counts, - ) where - R: Resolve, - { - let span = tracing::trace_span!("assign_connection_capacity", inc); - let _e = span.enter(); - - // TODO: proper error handling - let _res = self.flow.assign_capacity(inc); - debug_assert!(_res.is_ok()); - - // Assign newly acquired capacity to streams pending capacity. - while self.flow.available() > 0 { - let stream = match self.pending_capacity.pop(store) { - Some(stream) => stream, - None => return, - }; - - // Streams pending capacity may have been reset before capacity - // became available. In that case, the stream won't want any - // capacity, and so we shouldn't "transition" on it, but just evict - // it and continue the loop. - if !(stream.state.is_send_streaming() || stream.buffered_send_data > 0) { - continue; - } - - counts.transition(stream, |_, stream| { - // Try to assign capacity to the stream. This will also re-queue the - // stream if there isn't enough connection level capacity to fulfill - // the capacity request. - self.try_assign_capacity(stream); - }) - } - } - - /// Request capacity to send data - fn try_assign_capacity(&mut self, stream: &mut store::Ptr) { - let total_requested = stream.requested_send_capacity; - - // Total requested should never go below actual assigned - // (Note: the window size can go lower than assigned) - debug_assert!(stream.send_flow.available() <= total_requested as usize); - - // The amount of additional capacity that the stream requests. - // Don't assign more than the window has available! - let additional = cmp::min( - total_requested - stream.send_flow.available().as_size(), - // Can't assign more than what is available - stream.send_flow.window_size() - stream.send_flow.available().as_size(), - ); - let span = tracing::trace_span!("try_assign_capacity", ?stream.id); - let _e = span.enter(); - tracing::trace!( - requested = total_requested, - additional, - buffered = stream.buffered_send_data, - window = stream.send_flow.window_size(), - conn = %self.flow.available() - ); - - if additional == 0 { - // Nothing more to do - return; - } - - // If the stream has requested capacity, then it must be in the - // streaming state (more data could be sent) or there is buffered data - // waiting to be sent. - debug_assert!( - stream.state.is_send_streaming() || stream.buffered_send_data > 0, - "state={:?}", - stream.state - ); - - // The amount of currently available capacity on the connection - let conn_available = self.flow.available().as_size(); - - // First check if capacity is immediately available - if conn_available > 0 { - // The amount of capacity to assign to the stream - // TODO: Should prioritization factor into this? - let assign = cmp::min(conn_available, additional); - - tracing::trace!(capacity = assign, "assigning"); - - // Assign the capacity to the stream - stream.assign_capacity(assign, self.max_buffer_size); - - // Claim the capacity from the connection - // TODO: proper error handling - let _res = self.flow.claim_capacity(assign); - debug_assert!(_res.is_ok()); - } - - tracing::trace!( - available = %stream.send_flow.available(), - requested = stream.requested_send_capacity, - buffered = stream.buffered_send_data, - has_unavailable = %stream.send_flow.has_unavailable() - ); - - if stream.send_flow.available() < stream.requested_send_capacity as usize - && stream.send_flow.has_unavailable() - { - // The stream requires additional capacity and the stream's - // window has available capacity, but the connection window - // does not. - // - // In this case, the stream needs to be queued up for when the - // connection has more capacity. - self.pending_capacity.push(stream); - } - - // If data is buffered and the stream is send ready, then - // schedule the stream for execution - if stream.buffered_send_data > 0 && stream.is_send_ready() { - // TODO: This assertion isn't *exactly* correct. There can still be - // buffered send data while the stream's pending send queue is - // empty. This can happen when a large data frame is in the process - // of being **partially** sent. Once the window has been sent, the - // data frame will be returned to the prioritization layer to be - // re-scheduled. - // - // That said, it would be nice to figure out how to make this - // assertion correctly. - // - // debug_assert!(!stream.pending_send.is_empty()); - - self.pending_send.push(stream); - } - } - - pub fn poll_complete( - &mut self, - cx: &mut Context, - buffer: &mut Buffer>, - store: &mut Store, - counts: &mut Counts, - dst: &mut Codec>, - ) -> Poll> - where - T: AsyncWrite + Unpin, - B: Buf, - { - // Ensure codec is ready - ready!(dst.poll_ready(cx))?; - - // Reclaim any frame that has previously been written - self.reclaim_frame(buffer, store, dst); - - // The max frame length - let max_frame_len = dst.max_send_frame_size(); - - tracing::trace!("poll_complete"); - - loop { - if let Some(mut stream) = self.pop_pending_open(store, counts) { - self.pending_send.push_front(&mut stream); - } - - match self.pop_frame(buffer, store, max_frame_len, counts) { - Some(frame) => { - tracing::trace!(?frame, "writing"); - - debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing); - if let Frame::Data(ref frame) = frame { - self.in_flight_data_frame = InFlightData::DataFrame(frame.payload().stream); - } - dst.buffer(frame).expect("invalid frame"); - - // Ensure the codec is ready to try the loop again. - ready!(dst.poll_ready(cx))?; - - // Because, always try to reclaim... - self.reclaim_frame(buffer, store, dst); - } - None => { - // Try to flush the codec. - ready!(dst.flush(cx))?; - - // This might release a data frame... - if !self.reclaim_frame(buffer, store, dst) { - return Poll::Ready(Ok(())); - } - - // No need to poll ready as poll_complete() does this for - // us... - } - } - } - } - - /// Tries to reclaim a pending data frame from the codec. - /// - /// Returns true if a frame was reclaimed. - /// - /// When a data frame is written to the codec, it may not be written in its - /// entirety (large chunks are split up into potentially many data frames). - /// In this case, the stream needs to be reprioritized. - fn reclaim_frame( - &mut self, - buffer: &mut Buffer>, - store: &mut Store, - dst: &mut Codec>, - ) -> bool - where - B: Buf, - { - let span = tracing::trace_span!("try_reclaim_frame"); - let _e = span.enter(); - - // First check if there are any data chunks to take back - if let Some(frame) = dst.take_last_data_frame() { - self.reclaim_frame_inner(buffer, store, frame) - } else { - false - } - } - - fn reclaim_frame_inner( - &mut self, - buffer: &mut Buffer>, - store: &mut Store, - frame: frame::Data>, - ) -> bool - where - B: Buf, - { - tracing::trace!( - ?frame, - sz = frame.payload().inner.get_ref().remaining(), - "reclaimed" - ); - - let mut eos = false; - let key = frame.payload().stream; - - match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) { - InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"), - InFlightData::Drop => { - tracing::trace!("not reclaiming frame for cancelled stream"); - return false; - } - InFlightData::DataFrame(k) => { - debug_assert_eq!(k, key); - } - } - - let mut frame = frame.map(|prioritized| { - // TODO: Ensure fully written - eos = prioritized.end_of_stream; - prioritized.inner.into_inner() - }); - - if frame.payload().has_remaining() { - let mut stream = store.resolve(key); - - if eos { - frame.set_end_stream(true); - } - - self.push_back_frame(frame.into(), buffer, &mut stream); - - return true; - } - - false - } - - /// Push the frame to the front of the stream's deque, scheduling the - /// stream if needed. - fn push_back_frame( - &mut self, - frame: Frame, - buffer: &mut Buffer>, - stream: &mut store::Ptr, - ) { - // Push the frame to the front of the stream's deque - stream.pending_send.push_front(buffer, frame); - - // If needed, schedule the sender - if stream.send_flow.available() > 0 { - debug_assert!(!stream.pending_send.is_empty()); - self.pending_send.push(stream); - } - } - - pub fn clear_queue(&mut self, buffer: &mut Buffer>, stream: &mut store::Ptr) { - let span = tracing::trace_span!("clear_queue", ?stream.id); - let _e = span.enter(); - - // TODO: make this more efficient? - while let Some(frame) = stream.pending_send.pop_front(buffer) { - tracing::trace!(?frame, "dropping"); - } - - stream.buffered_send_data = 0; - stream.requested_send_capacity = 0; - if let InFlightData::DataFrame(key) = self.in_flight_data_frame { - if stream.key() == key { - // This stream could get cleaned up now - don't allow the buffered frame to get reclaimed. - self.in_flight_data_frame = InFlightData::Drop; - } - } - } - - pub fn clear_pending_send(&mut self, store: &mut Store, counts: &mut Counts) { - while let Some(stream) = self.pending_send.pop(store) { - let is_pending_reset = stream.is_pending_reset_expiration(); - counts.transition_after(stream, is_pending_reset); - } - } - - pub fn clear_pending_open(&mut self, store: &mut Store, counts: &mut Counts) { - while let Some(stream) = self.pending_open.pop(store) { - let is_pending_reset = stream.is_pending_reset_expiration(); - counts.transition_after(stream, is_pending_reset); - } - } - - fn pop_frame( - &mut self, - buffer: &mut Buffer>, - store: &mut Store, - max_len: usize, - counts: &mut Counts, - ) -> Option>> - where - B: Buf, - { - let span = tracing::trace_span!("pop_frame"); - let _e = span.enter(); - - loop { - match self.pending_send.pop(store) { - Some(mut stream) => { - let span = tracing::trace_span!("popped", ?stream.id, ?stream.state); - let _e = span.enter(); - - // It's possible that this stream, besides having data to send, - // is also queued to send a reset, and thus is already in the queue - // to wait for "some time" after a reset. - // - // To be safe, we just always ask the stream. - let is_pending_reset = stream.is_pending_reset_expiration(); - - tracing::trace!(is_pending_reset); - - let frame = match stream.pending_send.pop_front(buffer) { - Some(Frame::Data(mut frame)) => { - // Get the amount of capacity remaining for stream's - // window. - let stream_capacity = stream.send_flow.available(); - let sz = frame.payload().remaining(); - - tracing::trace!( - sz, - eos = frame.is_end_stream(), - window = %stream_capacity, - available = %stream.send_flow.available(), - requested = stream.requested_send_capacity, - buffered = stream.buffered_send_data, - "data frame" - ); - - // Zero length data frames always have capacity to - // be sent. - if sz > 0 && stream_capacity == 0 { - tracing::trace!("stream capacity is 0"); - - // Ensure that the stream is waiting for - // connection level capacity - // - // TODO: uncomment - // debug_assert!(stream.is_pending_send_capacity); - - // The stream has no more capacity, this can - // happen if the remote reduced the stream - // window. In this case, we need to buffer the - // frame and wait for a window update... - stream.pending_send.push_front(buffer, frame.into()); - - continue; - } - - // Only send up to the max frame length - let len = cmp::min(sz, max_len); - - // Only send up to the stream's window capacity - let len = - cmp::min(len, stream_capacity.as_size() as usize) as WindowSize; - - // There *must* be be enough connection level - // capacity at this point. - debug_assert!(len <= self.flow.window_size()); - - // Check if the stream level window the peer knows is available. In some - // scenarios, maybe the window we know is available but the window which - // peer knows is not. - if len > 0 && len > stream.send_flow.window_size() { - stream.pending_send.push_front(buffer, frame.into()); - continue; - } - - tracing::trace!(len, "sending data frame"); - - // Update the flow control - tracing::trace_span!("updating stream flow").in_scope(|| { - stream.send_data(len, self.max_buffer_size); - - // Assign the capacity back to the connection that - // was just consumed from the stream in the previous - // line. - // TODO: proper error handling - let _res = self.flow.assign_capacity(len); - debug_assert!(_res.is_ok()); - }); - - let (eos, len) = tracing::trace_span!("updating connection flow") - .in_scope(|| { - // TODO: proper error handling - let _res = self.flow.send_data(len); - debug_assert!(_res.is_ok()); - - // Wrap the frame's data payload to ensure that the - // correct amount of data gets written. - - let eos = frame.is_end_stream(); - let len = len as usize; - - if frame.payload().remaining() > len { - frame.set_end_stream(false); - } - (eos, len) - }); - - Frame::Data(frame.map(|buf| Prioritized { - inner: buf.take(len), - end_of_stream: eos, - stream: stream.key(), - })) - } - Some(Frame::PushPromise(pp)) => { - let mut pushed = - stream.store_mut().find_mut(&pp.promised_id()).unwrap(); - pushed.is_pending_push = false; - // Transition stream from pending_push to pending_open - // if possible - if !pushed.pending_send.is_empty() { - if counts.can_inc_num_send_streams() { - counts.inc_num_send_streams(&mut pushed); - self.pending_send.push(&mut pushed); - } else { - self.queue_open(&mut pushed); - } - } - Frame::PushPromise(pp) - } - Some(frame) => frame.map(|_| { - unreachable!( - "Frame::map closure will only be called \ - on DATA frames." - ) - }), - None => { - if let Some(reason) = stream.state.get_scheduled_reset() { - let stream_id = stream.id; - stream - .state - .set_reset(stream_id, reason, Initiator::Library); - - let frame = frame::Reset::new(stream.id, reason); - Frame::Reset(frame) - } else { - // If the stream receives a RESET from the peer, it may have - // had data buffered to be sent, but all the frames are cleared - // in clear_queue(). Instead of doing O(N) traversal through queue - // to remove, lets just ignore the stream here. - tracing::trace!("removing dangling stream from pending_send"); - // Since this should only happen as a consequence of `clear_queue`, - // we must be in a closed state of some kind. - debug_assert!(stream.state.is_closed()); - counts.transition_after(stream, is_pending_reset); - continue; - } - } - }; - - tracing::trace!("pop_frame; frame={:?}", frame); - - if cfg!(debug_assertions) && stream.state.is_idle() { - debug_assert!(stream.id > self.last_opened_id); - self.last_opened_id = stream.id; - } - - if !stream.pending_send.is_empty() || stream.state.is_scheduled_reset() { - // TODO: Only requeue the sender IF it is ready to send - // the next frame. i.e. don't requeue it if the next - // frame is a data frame and the stream does not have - // any more capacity. - self.pending_send.push(&mut stream); - } - - counts.transition_after(stream, is_pending_reset); - - return Some(frame); - } - None => return None, - } - } - } - - fn pop_pending_open<'s>( - &mut self, - store: &'s mut Store, - counts: &mut Counts, - ) -> Option> { - tracing::trace!("schedule_pending_open"); - // check for any pending open streams - if counts.can_inc_num_send_streams() { - if let Some(mut stream) = self.pending_open.pop(store) { - tracing::trace!("schedule_pending_open; stream={:?}", stream.id); - - counts.inc_num_send_streams(&mut stream); - stream.notify_send(); - return Some(stream); - } - } - - None - } -} - -// ===== impl Prioritized ===== - -impl Buf for Prioritized -where - B: Buf, -{ - fn remaining(&self) -> usize { - self.inner.remaining() - } - - fn chunk(&self) -> &[u8] { - self.inner.chunk() - } - - fn chunks_vectored<'a>(&'a self, dst: &mut [std::io::IoSlice<'a>]) -> usize { - self.inner.chunks_vectored(dst) - } - - fn advance(&mut self, cnt: usize) { - self.inner.advance(cnt) - } -} - -impl fmt::Debug for Prioritized { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Prioritized") - .field("remaining", &self.inner.get_ref().remaining()) - .field("end_of_stream", &self.end_of_stream) - .field("stream", &self.stream) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/recv.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/recv.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/recv.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/recv.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1166 +0,0 @@ -use super::*; -use crate::codec::UserError; -use crate::frame::{self, PushPromiseHeaderError, Reason, DEFAULT_INITIAL_WINDOW_SIZE}; -use crate::proto::{self, Error}; - -use http::{HeaderMap, Request, Response}; - -use std::cmp::Ordering; -use std::io; -use std::task::{Context, Poll, Waker}; -use std::time::{Duration, Instant}; - -#[derive(Debug)] -pub(super) struct Recv { - /// Initial window size of remote initiated streams - init_window_sz: WindowSize, - - /// Connection level flow control governing received data - flow: FlowControl, - - /// Amount of connection window capacity currently used by outstanding streams. - in_flight_data: WindowSize, - - /// The lowest stream ID that is still idle - next_stream_id: Result, - - /// The stream ID of the last processed stream - last_processed_id: StreamId, - - /// Any streams with a higher ID are ignored. - /// - /// This starts as MAX, but is lowered when a GOAWAY is received. - /// - /// > After sending a GOAWAY frame, the sender can discard frames for - /// > streams initiated by the receiver with identifiers higher than - /// > the identified last stream. - max_stream_id: StreamId, - - /// Streams that have pending window updates - pending_window_updates: store::Queue, - - /// New streams to be accepted - pending_accept: store::Queue, - - /// Locally reset streams that should be reaped when they expire - pending_reset_expired: store::Queue, - - /// How long locally reset streams should ignore received frames - reset_duration: Duration, - - /// Holds frames that are waiting to be read - buffer: Buffer, - - /// Refused StreamId, this represents a frame that must be sent out. - refused: Option, - - /// If push promises are allowed to be received. - is_push_enabled: bool, - - /// If extended connect protocol is enabled. - is_extended_connect_protocol_enabled: bool, -} - -#[derive(Debug)] -pub(super) enum Event { - Headers(peer::PollMessage), - Data(Bytes), - Trailers(HeaderMap), -} - -#[derive(Debug)] -pub(super) enum RecvHeaderBlockError { - Oversize(T), - State(Error), -} - -#[derive(Debug)] -pub(crate) enum Open { - PushPromise, - Headers, -} - -impl Recv { - pub fn new(peer: peer::Dyn, config: &Config) -> Self { - let next_stream_id = if peer.is_server() { 1 } else { 2 }; - - let mut flow = FlowControl::new(); - - // connections always have the default window size, regardless of - // settings - flow.inc_window(DEFAULT_INITIAL_WINDOW_SIZE) - .expect("invalid initial remote window size"); - flow.assign_capacity(DEFAULT_INITIAL_WINDOW_SIZE).unwrap(); - - Recv { - init_window_sz: config.local_init_window_sz, - flow, - in_flight_data: 0 as WindowSize, - next_stream_id: Ok(next_stream_id.into()), - pending_window_updates: store::Queue::new(), - last_processed_id: StreamId::ZERO, - max_stream_id: StreamId::MAX, - pending_accept: store::Queue::new(), - pending_reset_expired: store::Queue::new(), - reset_duration: config.local_reset_duration, - buffer: Buffer::new(), - refused: None, - is_push_enabled: config.local_push_enabled, - is_extended_connect_protocol_enabled: config.extended_connect_protocol_enabled, - } - } - - /// Returns the initial receive window size - pub fn init_window_sz(&self) -> WindowSize { - self.init_window_sz - } - - /// Returns the ID of the last processed stream - pub fn last_processed_id(&self) -> StreamId { - self.last_processed_id - } - - /// Update state reflecting a new, remotely opened stream - /// - /// Returns the stream state if successful. `None` if refused - pub fn open( - &mut self, - id: StreamId, - mode: Open, - counts: &mut Counts, - ) -> Result, Error> { - assert!(self.refused.is_none()); - - counts.peer().ensure_can_open(id, mode)?; - - let next_id = self.next_stream_id()?; - if id < next_id { - proto_err!(conn: "id ({:?}) < next_id ({:?})", id, next_id); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - - self.next_stream_id = id.next_id(); - - if !counts.can_inc_num_recv_streams() { - self.refused = Some(id); - return Ok(None); - } - - Ok(Some(id)) - } - - /// Transition the stream state based on receiving headers - /// - /// The caller ensures that the frame represents headers and not trailers. - pub fn recv_headers( - &mut self, - frame: frame::Headers, - stream: &mut store::Ptr, - counts: &mut Counts, - ) -> Result<(), RecvHeaderBlockError>> { - tracing::trace!("opening stream; init_window={}", self.init_window_sz); - let is_initial = stream.state.recv_open(&frame)?; - - if is_initial { - // TODO: be smarter about this logic - if frame.stream_id() > self.last_processed_id { - self.last_processed_id = frame.stream_id(); - } - - // Increment the number of concurrent streams - counts.inc_num_recv_streams(stream); - } - - if !stream.content_length.is_head() { - use super::stream::ContentLength; - use http::header; - - if let Some(content_length) = frame.fields().get(header::CONTENT_LENGTH) { - let content_length = match frame::parse_u64(content_length.as_bytes()) { - Ok(v) => v, - Err(_) => { - proto_err!(stream: "could not parse content-length; stream={:?}", stream.id); - return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); - } - }; - - stream.content_length = ContentLength::Remaining(content_length); - } - } - - if frame.is_over_size() { - // A frame is over size if the decoded header block was bigger than - // SETTINGS_MAX_HEADER_LIST_SIZE. - // - // > A server that receives a larger header block than it is willing - // > to handle can send an HTTP 431 (Request Header Fields Too - // > Large) status code [RFC6585]. A client can discard responses - // > that it cannot process. - // - // So, if peer is a server, we'll send a 431. In either case, - // an error is recorded, which will send a REFUSED_STREAM, - // since we don't want any of the data frames either. - tracing::debug!( - "stream error REQUEST_HEADER_FIELDS_TOO_LARGE -- \ - recv_headers: frame is over size; stream={:?}", - stream.id - ); - return if counts.peer().is_server() && is_initial { - let mut res = frame::Headers::new( - stream.id, - frame::Pseudo::response(::http::StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE), - HeaderMap::new(), - ); - res.set_end_stream(); - Err(RecvHeaderBlockError::Oversize(Some(res))) - } else { - Err(RecvHeaderBlockError::Oversize(None)) - }; - } - - let stream_id = frame.stream_id(); - let (pseudo, fields) = frame.into_parts(); - - if pseudo.protocol.is_some() - && counts.peer().is_server() - && !self.is_extended_connect_protocol_enabled - { - proto_err!(stream: "cannot use :protocol if extended connect protocol is disabled; stream={:?}", stream.id); - return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); - } - - if pseudo.status.is_some() && counts.peer().is_server() { - proto_err!(stream: "cannot use :status header for requests; stream={:?}", stream.id); - return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); - } - - if !pseudo.is_informational() { - let message = counts - .peer() - .convert_poll_message(pseudo, fields, stream_id)?; - - // Push the frame onto the stream's recv buffer - stream - .pending_recv - .push_back(&mut self.buffer, Event::Headers(message)); - stream.notify_recv(); - - // Only servers can receive a headers frame that initiates the stream. - // This is verified in `Streams` before calling this function. - if counts.peer().is_server() { - // Correctness: never push a stream to `pending_accept` without having the - // corresponding headers frame pushed to `stream.pending_recv`. - self.pending_accept.push(stream); - } - } - - Ok(()) - } - - /// Called by the server to get the request - /// - /// # Panics - /// - /// Panics if `stream.pending_recv` has no `Event::Headers` queued. - /// - pub fn take_request(&mut self, stream: &mut store::Ptr) -> Request<()> { - use super::peer::PollMessage::*; - - match stream.pending_recv.pop_front(&mut self.buffer) { - Some(Event::Headers(Server(request))) => request, - _ => unreachable!("server stream queue must start with Headers"), - } - } - - /// Called by the client to get pushed response - pub fn poll_pushed( - &mut self, - cx: &Context, - stream: &mut store::Ptr, - ) -> Poll, store::Key), proto::Error>>> { - use super::peer::PollMessage::*; - - let mut ppp = stream.pending_push_promises.take(); - let pushed = ppp.pop(stream.store_mut()).map(|mut pushed| { - match pushed.pending_recv.pop_front(&mut self.buffer) { - Some(Event::Headers(Server(headers))) => (headers, pushed.key()), - // When frames are pushed into the queue, it is verified that - // the first frame is a HEADERS frame. - _ => panic!("Headers not set on pushed stream"), - } - }); - stream.pending_push_promises = ppp; - if let Some(p) = pushed { - Poll::Ready(Some(Ok(p))) - } else { - let is_open = stream.state.ensure_recv_open()?; - - if is_open { - stream.recv_task = Some(cx.waker().clone()); - Poll::Pending - } else { - Poll::Ready(None) - } - } - } - - /// Called by the client to get the response - pub fn poll_response( - &mut self, - cx: &Context, - stream: &mut store::Ptr, - ) -> Poll, proto::Error>> { - use super::peer::PollMessage::*; - - // If the buffer is not empty, then the first frame must be a HEADERS - // frame or the user violated the contract. - match stream.pending_recv.pop_front(&mut self.buffer) { - Some(Event::Headers(Client(response))) => Poll::Ready(Ok(response)), - Some(_) => panic!("poll_response called after response returned"), - None => { - if !stream.state.ensure_recv_open()? { - proto_err!(stream: "poll_response: stream={:?} is not opened;", stream.id); - return Poll::Ready(Err(Error::library_reset( - stream.id, - Reason::PROTOCOL_ERROR, - ))); - } - - stream.recv_task = Some(cx.waker().clone()); - Poll::Pending - } - } - } - - /// Transition the stream based on receiving trailers - pub fn recv_trailers( - &mut self, - frame: frame::Headers, - stream: &mut store::Ptr, - ) -> Result<(), Error> { - // Transition the state - stream.state.recv_close()?; - - if stream.ensure_content_length_zero().is_err() { - proto_err!(stream: "recv_trailers: content-length is not zero; stream={:?};", stream.id); - return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); - } - - let trailers = frame.into_fields(); - - // Push the frame onto the stream's recv buffer - stream - .pending_recv - .push_back(&mut self.buffer, Event::Trailers(trailers)); - stream.notify_recv(); - - Ok(()) - } - - /// Releases capacity of the connection - pub fn release_connection_capacity(&mut self, capacity: WindowSize, task: &mut Option) { - tracing::trace!( - "release_connection_capacity; size={}, connection in_flight_data={}", - capacity, - self.in_flight_data, - ); - - // Decrement in-flight data - self.in_flight_data -= capacity; - - // Assign capacity to connection - // TODO: proper error handling - let _res = self.flow.assign_capacity(capacity); - debug_assert!(_res.is_ok()); - - if self.flow.unclaimed_capacity().is_some() { - if let Some(task) = task.take() { - task.wake(); - } - } - } - - /// Releases capacity back to the connection & stream - pub fn release_capacity( - &mut self, - capacity: WindowSize, - stream: &mut store::Ptr, - task: &mut Option, - ) -> Result<(), UserError> { - tracing::trace!("release_capacity; size={}", capacity); - - if capacity > stream.in_flight_recv_data { - return Err(UserError::ReleaseCapacityTooBig); - } - - self.release_connection_capacity(capacity, task); - - // Decrement in-flight data - stream.in_flight_recv_data -= capacity; - - // Assign capacity to stream - // TODO: proper error handling - let _res = stream.recv_flow.assign_capacity(capacity); - debug_assert!(_res.is_ok()); - - if stream.recv_flow.unclaimed_capacity().is_some() { - // Queue the stream for sending the WINDOW_UPDATE frame. - self.pending_window_updates.push(stream); - - if let Some(task) = task.take() { - task.wake(); - } - } - - Ok(()) - } - - /// Release any unclaimed capacity for a closed stream. - pub fn release_closed_capacity(&mut self, stream: &mut store::Ptr, task: &mut Option) { - debug_assert_eq!(stream.ref_count, 0); - - if stream.in_flight_recv_data == 0 { - return; - } - - tracing::trace!( - "auto-release closed stream ({:?}) capacity: {:?}", - stream.id, - stream.in_flight_recv_data, - ); - - self.release_connection_capacity(stream.in_flight_recv_data, task); - stream.in_flight_recv_data = 0; - - self.clear_recv_buffer(stream); - } - - /// Set the "target" connection window size. - /// - /// By default, all new connections start with 64kb of window size. As - /// streams used and release capacity, we will send WINDOW_UPDATEs for the - /// connection to bring it back up to the initial "target". - /// - /// Setting a target means that we will try to tell the peer about - /// WINDOW_UPDATEs so the peer knows it has about `target` window to use - /// for the whole connection. - /// - /// The `task` is an optional parked task for the `Connection` that might - /// be blocked on needing more window capacity. - pub fn set_target_connection_window( - &mut self, - target: WindowSize, - task: &mut Option, - ) -> Result<(), Reason> { - tracing::trace!( - "set_target_connection_window; target={}; available={}, reserved={}", - target, - self.flow.available(), - self.in_flight_data, - ); - - // The current target connection window is our `available` plus any - // in-flight data reserved by streams. - // - // Update the flow controller with the difference between the new - // target and the current target. - let current = self - .flow - .available() - .add(self.in_flight_data)? - .checked_size(); - if target > current { - self.flow.assign_capacity(target - current)?; - } else { - self.flow.claim_capacity(current - target)?; - } - - // If changing the target capacity means we gained a bunch of capacity, - // enough that we went over the update threshold, then schedule sending - // a connection WINDOW_UPDATE. - if self.flow.unclaimed_capacity().is_some() { - if let Some(task) = task.take() { - task.wake(); - } - } - Ok(()) - } - - pub(crate) fn apply_local_settings( - &mut self, - settings: &frame::Settings, - store: &mut Store, - ) -> Result<(), proto::Error> { - if let Some(val) = settings.is_extended_connect_protocol_enabled() { - self.is_extended_connect_protocol_enabled = val; - } - - if let Some(target) = settings.initial_window_size() { - let old_sz = self.init_window_sz; - self.init_window_sz = target; - - tracing::trace!("update_initial_window_size; new={}; old={}", target, old_sz,); - - // Per RFC 7540 §6.9.2: - // - // In addition to changing the flow-control window for streams that are - // not yet active, a SETTINGS frame can alter the initial flow-control - // window size for streams with active flow-control windows (that is, - // streams in the "open" or "half-closed (remote)" state). When the - // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust - // the size of all stream flow-control windows that it maintains by the - // difference between the new value and the old value. - // - // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available - // space in a flow-control window to become negative. A sender MUST - // track the negative flow-control window and MUST NOT send new - // flow-controlled frames until it receives WINDOW_UPDATE frames that - // cause the flow-control window to become positive. - - match target.cmp(&old_sz) { - Ordering::Less => { - // We must decrease the (local) window on every open stream. - let dec = old_sz - target; - tracing::trace!("decrementing all windows; dec={}", dec); - - store.try_for_each(|mut stream| { - stream - .recv_flow - .dec_recv_window(dec) - .map_err(proto::Error::library_go_away)?; - Ok::<_, proto::Error>(()) - })?; - } - Ordering::Greater => { - // We must increase the (local) window on every open stream. - let inc = target - old_sz; - tracing::trace!("incrementing all windows; inc={}", inc); - store.try_for_each(|mut stream| { - // XXX: Shouldn't the peer have already noticed our - // overflow and sent us a GOAWAY? - stream - .recv_flow - .inc_window(inc) - .map_err(proto::Error::library_go_away)?; - stream - .recv_flow - .assign_capacity(inc) - .map_err(proto::Error::library_go_away)?; - Ok::<_, proto::Error>(()) - })?; - } - Ordering::Equal => (), - } - } - - Ok(()) - } - - pub fn is_end_stream(&self, stream: &store::Ptr) -> bool { - if !stream.state.is_recv_closed() { - return false; - } - - stream.pending_recv.is_empty() - } - - pub fn recv_data(&mut self, frame: frame::Data, stream: &mut store::Ptr) -> Result<(), Error> { - let sz = frame.payload().len(); - - // This should have been enforced at the codec::FramedRead layer, so - // this is just a sanity check. - assert!(sz <= MAX_WINDOW_SIZE as usize); - - let sz = sz as WindowSize; - - let is_ignoring_frame = stream.state.is_local_error(); - - if !is_ignoring_frame && !stream.state.is_recv_streaming() { - // TODO: There are cases where this can be a stream error of - // STREAM_CLOSED instead... - - // Receiving a DATA frame when not expecting one is a protocol - // error. - proto_err!(conn: "unexpected DATA frame; stream={:?}", stream.id); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - - tracing::trace!( - "recv_data; size={}; connection={}; stream={}", - sz, - self.flow.window_size(), - stream.recv_flow.window_size() - ); - - if is_ignoring_frame { - tracing::trace!( - "recv_data; frame ignored on locally reset {:?} for some time", - stream.id, - ); - return self.ignore_data(sz); - } - - // Ensure that there is enough capacity on the connection before acting - // on the stream. - self.consume_connection_window(sz)?; - - if stream.recv_flow.window_size() < sz { - // http://httpwg.org/specs/rfc7540.html#WINDOW_UPDATE - // > A receiver MAY respond with a stream error (Section 5.4.2) or - // > connection error (Section 5.4.1) of type FLOW_CONTROL_ERROR if - // > it is unable to accept a frame. - // - // So, for violating the **stream** window, we can send either a - // stream or connection error. We've opted to send a stream - // error. - return Err(Error::library_reset(stream.id, Reason::FLOW_CONTROL_ERROR)); - } - - if stream.dec_content_length(frame.payload().len()).is_err() { - proto_err!(stream: - "recv_data: content-length overflow; stream={:?}; len={:?}", - stream.id, - frame.payload().len(), - ); - return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); - } - - if frame.is_end_stream() { - if stream.ensure_content_length_zero().is_err() { - proto_err!(stream: - "recv_data: content-length underflow; stream={:?}; len={:?}", - stream.id, - frame.payload().len(), - ); - return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); - } - - if stream.state.recv_close().is_err() { - proto_err!(conn: "recv_data: failed to transition to closed state; stream={:?}", stream.id); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - } - - // Received a frame, but no one cared about it. fix issue#648 - if !stream.is_recv { - tracing::trace!( - "recv_data; frame ignored on stream release {:?} for some time", - stream.id, - ); - self.release_connection_capacity(sz, &mut None); - return Ok(()); - } - - // Update stream level flow control - stream - .recv_flow - .send_data(sz) - .map_err(proto::Error::library_go_away)?; - - // Track the data as in-flight - stream.in_flight_recv_data += sz; - - let event = Event::Data(frame.into_payload()); - - // Push the frame onto the recv buffer - stream.pending_recv.push_back(&mut self.buffer, event); - stream.notify_recv(); - - Ok(()) - } - - pub fn ignore_data(&mut self, sz: WindowSize) -> Result<(), Error> { - // Ensure that there is enough capacity on the connection... - self.consume_connection_window(sz)?; - - // Since we are ignoring this frame, - // we aren't returning the frame to the user. That means they - // have no way to release the capacity back to the connection. So - // we have to release it automatically. - // - // This call doesn't send a WINDOW_UPDATE immediately, just marks - // the capacity as available to be reclaimed. When the available - // capacity meets a threshold, a WINDOW_UPDATE is then sent. - self.release_connection_capacity(sz, &mut None); - Ok(()) - } - - pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), Error> { - if self.flow.window_size() < sz { - tracing::debug!( - "connection error FLOW_CONTROL_ERROR -- window_size ({:?}) < sz ({:?});", - self.flow.window_size(), - sz, - ); - return Err(Error::library_go_away(Reason::FLOW_CONTROL_ERROR)); - } - - // Update connection level flow control - self.flow.send_data(sz).map_err(Error::library_go_away)?; - - // Track the data as in-flight - self.in_flight_data += sz; - Ok(()) - } - - pub fn recv_push_promise( - &mut self, - frame: frame::PushPromise, - stream: &mut store::Ptr, - ) -> Result<(), Error> { - stream.state.reserve_remote()?; - if frame.is_over_size() { - // A frame is over size if the decoded header block was bigger than - // SETTINGS_MAX_HEADER_LIST_SIZE. - // - // > A server that receives a larger header block than it is willing - // > to handle can send an HTTP 431 (Request Header Fields Too - // > Large) status code [RFC6585]. A client can discard responses - // > that it cannot process. - // - // So, if peer is a server, we'll send a 431. In either case, - // an error is recorded, which will send a REFUSED_STREAM, - // since we don't want any of the data frames either. - tracing::debug!( - "stream error REFUSED_STREAM -- recv_push_promise: \ - headers frame is over size; promised_id={:?};", - frame.promised_id(), - ); - return Err(Error::library_reset( - frame.promised_id(), - Reason::REFUSED_STREAM, - )); - } - - let promised_id = frame.promised_id(); - let (pseudo, fields) = frame.into_parts(); - let req = crate::server::Peer::convert_poll_message(pseudo, fields, promised_id)?; - - if let Err(e) = frame::PushPromise::validate_request(&req) { - use PushPromiseHeaderError::*; - match e { - NotSafeAndCacheable => proto_err!( - stream: - "recv_push_promise: method {} is not safe and cacheable; promised_id={:?}", - req.method(), - promised_id, - ), - InvalidContentLength(e) => proto_err!( - stream: - "recv_push_promise; promised request has invalid content-length {:?}; promised_id={:?}", - e, - promised_id, - ), - } - return Err(Error::library_reset(promised_id, Reason::PROTOCOL_ERROR)); - } - - use super::peer::PollMessage::*; - stream - .pending_recv - .push_back(&mut self.buffer, Event::Headers(Server(req))); - stream.notify_recv(); - Ok(()) - } - - /// Ensures that `id` is not in the `Idle` state. - pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> { - if let Ok(next) = self.next_stream_id { - if id >= next { - tracing::debug!( - "stream ID implicitly closed, PROTOCOL_ERROR; stream={:?}", - id - ); - return Err(Reason::PROTOCOL_ERROR); - } - } - // if next_stream_id is overflowed, that's ok. - - Ok(()) - } - - /// Handle remote sending an explicit RST_STREAM. - pub fn recv_reset( - &mut self, - frame: frame::Reset, - stream: &mut Stream, - counts: &mut Counts, - ) -> Result<(), Error> { - // Reseting a stream that the user hasn't accepted is possible, - // but should be done with care. These streams will continue - // to take up memory in the accept queue, but will no longer be - // counted as "concurrent" streams. - // - // So, we have a separate limit for these. - // - // See https://github.com/hyperium/hyper/issues/2877 - if stream.is_pending_accept { - if counts.can_inc_num_remote_reset_streams() { - counts.inc_num_remote_reset_streams(); - } else { - tracing::warn!( - "recv_reset; remotely-reset pending-accept streams reached limit ({:?})", - counts.max_remote_reset_streams(), - ); - return Err(Error::library_go_away_data( - Reason::ENHANCE_YOUR_CALM, - "too_many_resets", - )); - } - } - - // Notify the stream - stream.state.recv_reset(frame, stream.is_pending_send); - - stream.notify_send(); - stream.notify_recv(); - - Ok(()) - } - - /// Handle a connection-level error - pub fn handle_error(&mut self, err: &proto::Error, stream: &mut Stream) { - // Receive an error - stream.state.handle_error(err); - - // If a receiver is waiting, notify it - stream.notify_send(); - stream.notify_recv(); - } - - pub fn go_away(&mut self, last_processed_id: StreamId) { - assert!(self.max_stream_id >= last_processed_id); - self.max_stream_id = last_processed_id; - } - - pub fn recv_eof(&mut self, stream: &mut Stream) { - stream.state.recv_eof(); - stream.notify_send(); - stream.notify_recv(); - } - - pub(super) fn clear_recv_buffer(&mut self, stream: &mut Stream) { - while stream.pending_recv.pop_front(&mut self.buffer).is_some() { - // drop it - } - } - - /// Get the max ID of streams we can receive. - /// - /// This gets lowered if we send a GOAWAY frame. - pub fn max_stream_id(&self) -> StreamId { - self.max_stream_id - } - - pub fn next_stream_id(&self) -> Result { - if let Ok(id) = self.next_stream_id { - Ok(id) - } else { - Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) - } - } - - pub fn may_have_created_stream(&self, id: StreamId) -> bool { - if let Ok(next_id) = self.next_stream_id { - // Peer::is_local_init should have been called beforehand - debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated(),); - id < next_id - } else { - true - } - } - - pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) { - if let Ok(next_id) = self.next_stream_id { - // !Peer::is_local_init should have been called beforehand - debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated()); - if id >= next_id { - self.next_stream_id = id.next_id(); - } - } - } - - /// Returns true if the remote peer can reserve a stream with the given ID. - pub fn ensure_can_reserve(&self) -> Result<(), Error> { - if !self.is_push_enabled { - proto_err!(conn: "recv_push_promise: push is disabled"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - - Ok(()) - } - - /// Add a locally reset stream to queue to be eventually reaped. - pub fn enqueue_reset_expiration(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { - if !stream.state.is_local_error() || stream.is_pending_reset_expiration() { - return; - } - - tracing::trace!("enqueue_reset_expiration; {:?}", stream.id); - - if counts.can_inc_num_reset_streams() { - counts.inc_num_reset_streams(); - self.pending_reset_expired.push(stream); - } - } - - /// Send any pending refusals. - pub fn send_pending_refusal( - &mut self, - cx: &mut Context, - dst: &mut Codec>, - ) -> Poll> - where - T: AsyncWrite + Unpin, - B: Buf, - { - if let Some(stream_id) = self.refused { - ready!(dst.poll_ready(cx))?; - - // Create the RST_STREAM frame - let frame = frame::Reset::new(stream_id, Reason::REFUSED_STREAM); - - // Buffer the frame - dst.buffer(frame.into()).expect("invalid RST_STREAM frame"); - } - - self.refused = None; - - Poll::Ready(Ok(())) - } - - pub fn clear_expired_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) { - if !self.pending_reset_expired.is_empty() { - let now = Instant::now(); - let reset_duration = self.reset_duration; - while let Some(stream) = self.pending_reset_expired.pop_if(store, |stream| { - let reset_at = stream.reset_at.expect("reset_at must be set if in queue"); - // rust-lang/rust#86470 tracks a bug in the standard library where `Instant` - // subtraction can panic (because, on some platforms, `Instant` isn't actually - // monotonic). We use a saturating operation to avoid this panic here. - now.saturating_duration_since(reset_at) > reset_duration - }) { - counts.transition_after(stream, true); - } - } - } - - pub fn clear_queues( - &mut self, - clear_pending_accept: bool, - store: &mut Store, - counts: &mut Counts, - ) { - self.clear_stream_window_update_queue(store, counts); - self.clear_all_reset_streams(store, counts); - - if clear_pending_accept { - self.clear_all_pending_accept(store, counts); - } - } - - fn clear_stream_window_update_queue(&mut self, store: &mut Store, counts: &mut Counts) { - while let Some(stream) = self.pending_window_updates.pop(store) { - counts.transition(stream, |_, stream| { - tracing::trace!("clear_stream_window_update_queue; stream={:?}", stream.id); - }) - } - } - - /// Called on EOF - fn clear_all_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) { - while let Some(stream) = self.pending_reset_expired.pop(store) { - counts.transition_after(stream, true); - } - } - - fn clear_all_pending_accept(&mut self, store: &mut Store, counts: &mut Counts) { - while let Some(stream) = self.pending_accept.pop(store) { - counts.transition_after(stream, false); - } - } - - pub fn poll_complete( - &mut self, - cx: &mut Context, - store: &mut Store, - counts: &mut Counts, - dst: &mut Codec>, - ) -> Poll> - where - T: AsyncWrite + Unpin, - B: Buf, - { - // Send any pending connection level window updates - ready!(self.send_connection_window_update(cx, dst))?; - - // Send any pending stream level window updates - ready!(self.send_stream_window_updates(cx, store, counts, dst))?; - - Poll::Ready(Ok(())) - } - - /// Send connection level window update - fn send_connection_window_update( - &mut self, - cx: &mut Context, - dst: &mut Codec>, - ) -> Poll> - where - T: AsyncWrite + Unpin, - B: Buf, - { - if let Some(incr) = self.flow.unclaimed_capacity() { - let frame = frame::WindowUpdate::new(StreamId::zero(), incr); - - // Ensure the codec has capacity - ready!(dst.poll_ready(cx))?; - - // Buffer the WINDOW_UPDATE frame - dst.buffer(frame.into()) - .expect("invalid WINDOW_UPDATE frame"); - - // Update flow control - self.flow - .inc_window(incr) - .expect("unexpected flow control state"); - } - - Poll::Ready(Ok(())) - } - - /// Send stream level window update - pub fn send_stream_window_updates( - &mut self, - cx: &mut Context, - store: &mut Store, - counts: &mut Counts, - dst: &mut Codec>, - ) -> Poll> - where - T: AsyncWrite + Unpin, - B: Buf, - { - loop { - // Ensure the codec has capacity - ready!(dst.poll_ready(cx))?; - - // Get the next stream - let stream = match self.pending_window_updates.pop(store) { - Some(stream) => stream, - None => return Poll::Ready(Ok(())), - }; - - counts.transition(stream, |_, stream| { - tracing::trace!("pending_window_updates -- pop; stream={:?}", stream.id); - debug_assert!(!stream.is_pending_window_update); - - if !stream.state.is_recv_streaming() { - // No need to send window updates on the stream if the stream is - // no longer receiving data. - // - // TODO: is this correct? We could possibly send a window - // update on a ReservedRemote stream if we already know - // we want to stream the data faster... - return; - } - - // TODO: de-dup - if let Some(incr) = stream.recv_flow.unclaimed_capacity() { - // Create the WINDOW_UPDATE frame - let frame = frame::WindowUpdate::new(stream.id, incr); - - // Buffer it - dst.buffer(frame.into()) - .expect("invalid WINDOW_UPDATE frame"); - - // Update flow control - stream - .recv_flow - .inc_window(incr) - .expect("unexpected flow control state"); - } - }) - } - } - - pub fn next_incoming(&mut self, store: &mut Store) -> Option { - self.pending_accept.pop(store).map(|ptr| ptr.key()) - } - - pub fn poll_data( - &mut self, - cx: &Context, - stream: &mut Stream, - ) -> Poll>> { - match stream.pending_recv.pop_front(&mut self.buffer) { - Some(Event::Data(payload)) => Poll::Ready(Some(Ok(payload))), - Some(event) => { - // Frame is trailer - stream.pending_recv.push_front(&mut self.buffer, event); - - // Notify the recv task. This is done just in case - // `poll_trailers` was called. - // - // It is very likely that `notify_recv` will just be a no-op (as - // the task will be None), so this isn't really much of a - // performance concern. It also means we don't have to track - // state to see if `poll_trailers` was called before `poll_data` - // returned `None`. - stream.notify_recv(); - - // No more data frames - Poll::Ready(None) - } - None => self.schedule_recv(cx, stream), - } - } - - pub fn poll_trailers( - &mut self, - cx: &Context, - stream: &mut Stream, - ) -> Poll>> { - match stream.pending_recv.pop_front(&mut self.buffer) { - Some(Event::Trailers(trailers)) => Poll::Ready(Some(Ok(trailers))), - Some(event) => { - // Frame is not trailers.. not ready to poll trailers yet. - stream.pending_recv.push_front(&mut self.buffer, event); - - Poll::Pending - } - None => self.schedule_recv(cx, stream), - } - } - - fn schedule_recv( - &mut self, - cx: &Context, - stream: &mut Stream, - ) -> Poll>> { - if stream.state.ensure_recv_open()? { - // Request to get notified once more frames arrive - stream.recv_task = Some(cx.waker().clone()); - Poll::Pending - } else { - // No more frames will be received - Poll::Ready(None) - } - } -} - -// ===== impl Open ===== - -impl Open { - pub fn is_push_promise(&self) -> bool { - matches!(*self, Self::PushPromise) - } -} - -// ===== impl RecvHeaderBlockError ===== - -impl From for RecvHeaderBlockError { - fn from(err: Error) -> Self { - RecvHeaderBlockError::State(err) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/send.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/send.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/send.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/send.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,585 +0,0 @@ -use super::{ - store, Buffer, Codec, Config, Counts, Frame, Prioritize, Prioritized, Store, Stream, StreamId, - StreamIdOverflow, WindowSize, -}; -use crate::codec::UserError; -use crate::frame::{self, Reason}; -use crate::proto::{self, Error, Initiator}; - -use bytes::Buf; -use tokio::io::AsyncWrite; - -use std::cmp::Ordering; -use std::io; -use std::task::{Context, Poll, Waker}; - -/// Manages state transitions related to outbound frames. -#[derive(Debug)] -pub(super) struct Send { - /// Stream identifier to use for next initialized stream. - next_stream_id: Result, - - /// Any streams with a higher ID are ignored. - /// - /// This starts as MAX, but is lowered when a GOAWAY is received. - /// - /// > After sending a GOAWAY frame, the sender can discard frames for - /// > streams initiated by the receiver with identifiers higher than - /// > the identified last stream. - max_stream_id: StreamId, - - /// Initial window size of locally initiated streams - init_window_sz: WindowSize, - - /// Prioritization layer - prioritize: Prioritize, - - is_push_enabled: bool, - - /// If extended connect protocol is enabled. - is_extended_connect_protocol_enabled: bool, -} - -/// A value to detect which public API has called `poll_reset`. -#[derive(Debug)] -pub(crate) enum PollReset { - AwaitingHeaders, - Streaming, -} - -impl Send { - /// Create a new `Send` - pub fn new(config: &Config) -> Self { - Send { - init_window_sz: config.remote_init_window_sz, - max_stream_id: StreamId::MAX, - next_stream_id: Ok(config.local_next_stream_id), - prioritize: Prioritize::new(config), - is_push_enabled: true, - is_extended_connect_protocol_enabled: false, - } - } - - /// Returns the initial send window size - pub fn init_window_sz(&self) -> WindowSize { - self.init_window_sz - } - - pub fn open(&mut self) -> Result { - let stream_id = self.ensure_next_stream_id()?; - self.next_stream_id = stream_id.next_id(); - Ok(stream_id) - } - - pub fn reserve_local(&mut self) -> Result { - let stream_id = self.ensure_next_stream_id()?; - self.next_stream_id = stream_id.next_id(); - Ok(stream_id) - } - - fn check_headers(fields: &http::HeaderMap) -> Result<(), UserError> { - // 8.1.2.2. Connection-Specific Header Fields - if fields.contains_key(http::header::CONNECTION) - || fields.contains_key(http::header::TRANSFER_ENCODING) - || fields.contains_key(http::header::UPGRADE) - || fields.contains_key("keep-alive") - || fields.contains_key("proxy-connection") - { - tracing::debug!("illegal connection-specific headers found"); - return Err(UserError::MalformedHeaders); - } else if let Some(te) = fields.get(http::header::TE) { - if te != "trailers" { - tracing::debug!("illegal connection-specific headers found"); - return Err(UserError::MalformedHeaders); - } - } - Ok(()) - } - - pub fn send_push_promise( - &mut self, - frame: frame::PushPromise, - buffer: &mut Buffer>, - stream: &mut store::Ptr, - task: &mut Option, - ) -> Result<(), UserError> { - if !self.is_push_enabled { - return Err(UserError::PeerDisabledServerPush); - } - - tracing::trace!( - "send_push_promise; frame={:?}; init_window={:?}", - frame, - self.init_window_sz - ); - - Self::check_headers(frame.fields())?; - - // Queue the frame for sending - self.prioritize - .queue_frame(frame.into(), buffer, stream, task); - - Ok(()) - } - - pub fn send_headers( - &mut self, - frame: frame::Headers, - buffer: &mut Buffer>, - stream: &mut store::Ptr, - counts: &mut Counts, - task: &mut Option, - ) -> Result<(), UserError> { - tracing::trace!( - "send_headers; frame={:?}; init_window={:?}", - frame, - self.init_window_sz - ); - - Self::check_headers(frame.fields())?; - - let end_stream = frame.is_end_stream(); - - // Update the state - stream.state.send_open(end_stream)?; - - let mut pending_open = false; - if counts.peer().is_local_init(frame.stream_id()) && !stream.is_pending_push { - self.prioritize.queue_open(stream); - pending_open = true; - } - - // Queue the frame for sending - // - // This call expects that, since new streams are in the open queue, new - // streams won't be pushed on pending_send. - self.prioritize - .queue_frame(frame.into(), buffer, stream, task); - - // Need to notify the connection when pushing onto pending_open since - // queue_frame only notifies for pending_send. - if pending_open { - if let Some(task) = task.take() { - task.wake(); - } - } - - Ok(()) - } - - /// Send an explicit RST_STREAM frame - pub fn send_reset( - &mut self, - reason: Reason, - initiator: Initiator, - buffer: &mut Buffer>, - stream: &mut store::Ptr, - counts: &mut Counts, - task: &mut Option, - ) { - let is_reset = stream.state.is_reset(); - let is_closed = stream.state.is_closed(); - let is_empty = stream.pending_send.is_empty(); - let stream_id = stream.id; - - tracing::trace!( - "send_reset(..., reason={:?}, initiator={:?}, stream={:?}, ..., \ - is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \ - state={:?} \ - ", - reason, - initiator, - stream_id, - is_reset, - is_closed, - is_empty, - stream.state - ); - - if is_reset { - // Don't double reset - tracing::trace!( - " -> not sending RST_STREAM ({:?} is already reset)", - stream_id - ); - return; - } - - // Transition the state to reset no matter what. - stream.state.set_reset(stream_id, reason, initiator); - - // If closed AND the send queue is flushed, then the stream cannot be - // reset explicitly, either. Implicit resets can still be queued. - if is_closed && is_empty { - tracing::trace!( - " -> not sending explicit RST_STREAM ({:?} was closed \ - and send queue was flushed)", - stream_id - ); - return; - } - - // Clear all pending outbound frames. - // Note that we don't call `self.recv_err` because we want to enqueue - // the reset frame before transitioning the stream inside - // `reclaim_all_capacity`. - self.prioritize.clear_queue(buffer, stream); - - let frame = frame::Reset::new(stream.id, reason); - - tracing::trace!("send_reset -- queueing; frame={:?}", frame); - self.prioritize - .queue_frame(frame.into(), buffer, stream, task); - self.prioritize.reclaim_all_capacity(stream, counts); - } - - pub fn schedule_implicit_reset( - &mut self, - stream: &mut store::Ptr, - reason: Reason, - counts: &mut Counts, - task: &mut Option, - ) { - if stream.state.is_closed() { - // Stream is already closed, nothing more to do - return; - } - - stream.state.set_scheduled_reset(reason); - - self.prioritize.reclaim_reserved_capacity(stream, counts); - self.prioritize.schedule_send(stream, task); - } - - pub fn send_data( - &mut self, - frame: frame::Data, - buffer: &mut Buffer>, - stream: &mut store::Ptr, - counts: &mut Counts, - task: &mut Option, - ) -> Result<(), UserError> - where - B: Buf, - { - self.prioritize - .send_data(frame, buffer, stream, counts, task) - } - - pub fn send_trailers( - &mut self, - frame: frame::Headers, - buffer: &mut Buffer>, - stream: &mut store::Ptr, - counts: &mut Counts, - task: &mut Option, - ) -> Result<(), UserError> { - // TODO: Should this logic be moved into state.rs? - if !stream.state.is_send_streaming() { - return Err(UserError::UnexpectedFrameType); - } - - stream.state.send_close(); - - tracing::trace!("send_trailers -- queuing; frame={:?}", frame); - self.prioritize - .queue_frame(frame.into(), buffer, stream, task); - - // Release any excess capacity - self.prioritize.reserve_capacity(0, stream, counts); - - Ok(()) - } - - pub fn poll_complete( - &mut self, - cx: &mut Context, - buffer: &mut Buffer>, - store: &mut Store, - counts: &mut Counts, - dst: &mut Codec>, - ) -> Poll> - where - T: AsyncWrite + Unpin, - B: Buf, - { - self.prioritize - .poll_complete(cx, buffer, store, counts, dst) - } - - /// Request capacity to send data - pub fn reserve_capacity( - &mut self, - capacity: WindowSize, - stream: &mut store::Ptr, - counts: &mut Counts, - ) { - self.prioritize.reserve_capacity(capacity, stream, counts) - } - - pub fn poll_capacity( - &mut self, - cx: &Context, - stream: &mut store::Ptr, - ) -> Poll>> { - if !stream.state.is_send_streaming() { - return Poll::Ready(None); - } - - if !stream.send_capacity_inc { - stream.wait_send(cx); - return Poll::Pending; - } - - stream.send_capacity_inc = false; - - Poll::Ready(Some(Ok(self.capacity(stream)))) - } - - /// Current available stream send capacity - pub fn capacity(&self, stream: &mut store::Ptr) -> WindowSize { - stream.capacity(self.prioritize.max_buffer_size()) - } - - pub fn poll_reset( - &self, - cx: &Context, - stream: &mut Stream, - mode: PollReset, - ) -> Poll> { - match stream.state.ensure_reason(mode)? { - Some(reason) => Poll::Ready(Ok(reason)), - None => { - stream.wait_send(cx); - Poll::Pending - } - } - } - - pub fn recv_connection_window_update( - &mut self, - frame: frame::WindowUpdate, - store: &mut Store, - counts: &mut Counts, - ) -> Result<(), Reason> { - self.prioritize - .recv_connection_window_update(frame.size_increment(), store, counts) - } - - pub fn recv_stream_window_update( - &mut self, - sz: WindowSize, - buffer: &mut Buffer>, - stream: &mut store::Ptr, - counts: &mut Counts, - task: &mut Option, - ) -> Result<(), Reason> { - if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) { - tracing::debug!("recv_stream_window_update !!; err={:?}", e); - - self.send_reset( - Reason::FLOW_CONTROL_ERROR, - Initiator::Library, - buffer, - stream, - counts, - task, - ); - - return Err(e); - } - - Ok(()) - } - - pub(super) fn recv_go_away(&mut self, last_stream_id: StreamId) -> Result<(), Error> { - if last_stream_id > self.max_stream_id { - // The remote endpoint sent a `GOAWAY` frame indicating a stream - // that we never sent, or that we have already terminated on account - // of previous `GOAWAY` frame. In either case, that is illegal. - // (When sending multiple `GOAWAY`s, "Endpoints MUST NOT increase - // the value they send in the last stream identifier, since the - // peers might already have retried unprocessed requests on another - // connection.") - proto_err!(conn: - "recv_go_away: last_stream_id ({:?}) > max_stream_id ({:?})", - last_stream_id, self.max_stream_id, - ); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - - self.max_stream_id = last_stream_id; - Ok(()) - } - - pub fn handle_error( - &mut self, - buffer: &mut Buffer>, - stream: &mut store::Ptr, - counts: &mut Counts, - ) { - // Clear all pending outbound frames - self.prioritize.clear_queue(buffer, stream); - self.prioritize.reclaim_all_capacity(stream, counts); - } - - pub fn apply_remote_settings( - &mut self, - settings: &frame::Settings, - buffer: &mut Buffer>, - store: &mut Store, - counts: &mut Counts, - task: &mut Option, - ) -> Result<(), Error> { - if let Some(val) = settings.is_extended_connect_protocol_enabled() { - self.is_extended_connect_protocol_enabled = val; - } - - // Applies an update to the remote endpoint's initial window size. - // - // Per RFC 7540 §6.9.2: - // - // In addition to changing the flow-control window for streams that are - // not yet active, a SETTINGS frame can alter the initial flow-control - // window size for streams with active flow-control windows (that is, - // streams in the "open" or "half-closed (remote)" state). When the - // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust - // the size of all stream flow-control windows that it maintains by the - // difference between the new value and the old value. - // - // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available - // space in a flow-control window to become negative. A sender MUST - // track the negative flow-control window and MUST NOT send new - // flow-controlled frames until it receives WINDOW_UPDATE frames that - // cause the flow-control window to become positive. - if let Some(val) = settings.initial_window_size() { - let old_val = self.init_window_sz; - self.init_window_sz = val; - - match val.cmp(&old_val) { - Ordering::Less => { - // We must decrease the (remote) window on every open stream. - let dec = old_val - val; - tracing::trace!("decrementing all windows; dec={}", dec); - - let mut total_reclaimed = 0; - store.try_for_each(|mut stream| { - let stream = &mut *stream; - - tracing::trace!( - "decrementing stream window; id={:?}; decr={}; flow={:?}", - stream.id, - dec, - stream.send_flow - ); - - // TODO: this decrement can underflow based on received frames! - stream - .send_flow - .dec_send_window(dec) - .map_err(proto::Error::library_go_away)?; - - // It's possible that decreasing the window causes - // `window_size` (the stream-specific window) to fall below - // `available` (the portion of the connection-level window - // that we have allocated to the stream). - // In this case, we should take that excess allocation away - // and reassign it to other streams. - let window_size = stream.send_flow.window_size(); - let available = stream.send_flow.available().as_size(); - let reclaimed = if available > window_size { - // Drop down to `window_size`. - let reclaim = available - window_size; - stream - .send_flow - .claim_capacity(reclaim) - .map_err(proto::Error::library_go_away)?; - total_reclaimed += reclaim; - reclaim - } else { - 0 - }; - - tracing::trace!( - "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}", - stream.id, - dec, - reclaimed, - stream.send_flow - ); - - // TODO: Should this notify the producer when the capacity - // of a stream is reduced? Maybe it should if the capacity - // is reduced to zero, allowing the producer to stop work. - - Ok::<_, proto::Error>(()) - })?; - - self.prioritize - .assign_connection_capacity(total_reclaimed, store, counts); - } - Ordering::Greater => { - let inc = val - old_val; - - store.try_for_each(|mut stream| { - self.recv_stream_window_update(inc, buffer, &mut stream, counts, task) - .map_err(Error::library_go_away) - })?; - } - Ordering::Equal => (), - } - } - - if let Some(val) = settings.is_push_enabled() { - self.is_push_enabled = val - } - - Ok(()) - } - - pub fn clear_queues(&mut self, store: &mut Store, counts: &mut Counts) { - self.prioritize.clear_pending_capacity(store, counts); - self.prioritize.clear_pending_send(store, counts); - self.prioritize.clear_pending_open(store, counts); - } - - pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> { - if let Ok(next) = self.next_stream_id { - if id >= next { - return Err(Reason::PROTOCOL_ERROR); - } - } - // if next_stream_id is overflowed, that's ok. - - Ok(()) - } - - pub fn ensure_next_stream_id(&self) -> Result { - self.next_stream_id - .map_err(|_| UserError::OverflowedStreamId) - } - - pub fn may_have_created_stream(&self, id: StreamId) -> bool { - if let Ok(next_id) = self.next_stream_id { - // Peer::is_local_init should have been called beforehand - debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated(),); - id < next_id - } else { - true - } - } - - pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) { - if let Ok(next_id) = self.next_stream_id { - // Peer::is_local_init should have been called beforehand - debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated()); - if id >= next_id { - self.next_stream_id = id.next_id(); - } - } - } - - pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { - self.is_extended_connect_protocol_enabled - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/state.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/state.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/state.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/state.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,469 +0,0 @@ -use std::io; - -use crate::codec::UserError; -use crate::frame::{self, Reason, StreamId}; -use crate::proto::{self, Error, Initiator, PollReset}; - -use self::Inner::*; -use self::Peer::*; - -/// Represents the state of an H2 stream -/// -/// ```not_rust -/// +--------+ -/// send PP | | recv PP -/// ,--------| idle |--------. -/// / | | \ -/// v +--------+ v -/// +----------+ | +----------+ -/// | | | send H / | | -/// ,------| reserved | | recv H | reserved |------. -/// | | (local) | | | (remote) | | -/// | +----------+ v +----------+ | -/// | | +--------+ | | -/// | | recv ES | | send ES | | -/// | send H | ,-------| open |-------. | recv H | -/// | | / | | \ | | -/// | v v +--------+ v v | -/// | +----------+ | +----------+ | -/// | | half | | | half | | -/// | | closed | | send R / | closed | | -/// | | (remote) | | recv R | (local) | | -/// | +----------+ | +----------+ | -/// | | | | | -/// | | send ES / | recv ES / | | -/// | | send R / v send R / | | -/// | | recv R +--------+ recv R | | -/// | send R / `----------->| |<-----------' send R / | -/// | recv R | closed | recv R | -/// `----------------------->| |<----------------------' -/// +--------+ -/// -/// send: endpoint sends this frame -/// recv: endpoint receives this frame -/// -/// H: HEADERS frame (with implied CONTINUATIONs) -/// PP: PUSH_PROMISE frame (with implied CONTINUATIONs) -/// ES: END_STREAM flag -/// R: RST_STREAM frame -/// ``` -#[derive(Debug, Clone)] -pub struct State { - inner: Inner, -} - -#[derive(Debug, Clone)] -enum Inner { - Idle, - // TODO: these states shouldn't count against concurrency limits: - ReservedLocal, - ReservedRemote, - Open { local: Peer, remote: Peer }, - HalfClosedLocal(Peer), // TODO: explicitly name this value - HalfClosedRemote(Peer), - Closed(Cause), -} - -#[derive(Debug, Copy, Clone, Default)] -enum Peer { - #[default] - AwaitingHeaders, - Streaming, -} - -#[derive(Debug, Clone)] -enum Cause { - EndStream, - Error(Error), - - /// This indicates to the connection that a reset frame must be sent out - /// once the send queue has been flushed. - /// - /// Examples of when this could happen: - /// - User drops all references to a stream, so we want to CANCEL the it. - /// - Header block size was too large, so we want to REFUSE, possibly - /// after sending a 431 response frame. - ScheduledLibraryReset(Reason), -} - -impl State { - /// Opens the send-half of a stream if it is not already open. - pub fn send_open(&mut self, eos: bool) -> Result<(), UserError> { - let local = Streaming; - - self.inner = match self.inner { - Idle => { - if eos { - HalfClosedLocal(AwaitingHeaders) - } else { - Open { - local, - remote: AwaitingHeaders, - } - } - } - Open { - local: AwaitingHeaders, - remote, - } => { - if eos { - HalfClosedLocal(remote) - } else { - Open { local, remote } - } - } - HalfClosedRemote(AwaitingHeaders) | ReservedLocal => { - if eos { - Closed(Cause::EndStream) - } else { - HalfClosedRemote(local) - } - } - _ => { - // All other transitions result in a protocol error - return Err(UserError::UnexpectedFrameType); - } - }; - - Ok(()) - } - - /// Opens the receive-half of the stream when a HEADERS frame is received. - /// - /// Returns true if this transitions the state to Open. - pub fn recv_open(&mut self, frame: &frame::Headers) -> Result { - let mut initial = false; - let eos = frame.is_end_stream(); - - self.inner = match self.inner { - Idle => { - initial = true; - - if eos { - HalfClosedRemote(AwaitingHeaders) - } else { - Open { - local: AwaitingHeaders, - remote: if frame.is_informational() { - tracing::trace!("skipping 1xx response headers"); - AwaitingHeaders - } else { - Streaming - }, - } - } - } - ReservedRemote => { - initial = true; - - if eos { - Closed(Cause::EndStream) - } else if frame.is_informational() { - tracing::trace!("skipping 1xx response headers"); - ReservedRemote - } else { - HalfClosedLocal(Streaming) - } - } - Open { - local, - remote: AwaitingHeaders, - } => { - if eos { - HalfClosedRemote(local) - } else { - Open { - local, - remote: if frame.is_informational() { - tracing::trace!("skipping 1xx response headers"); - AwaitingHeaders - } else { - Streaming - }, - } - } - } - HalfClosedLocal(AwaitingHeaders) => { - if eos { - Closed(Cause::EndStream) - } else if frame.is_informational() { - tracing::trace!("skipping 1xx response headers"); - HalfClosedLocal(AwaitingHeaders) - } else { - HalfClosedLocal(Streaming) - } - } - ref state => { - // All other transitions result in a protocol error - proto_err!(conn: "recv_open: in unexpected state {:?}", state); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - }; - - Ok(initial) - } - - /// Transition from Idle -> ReservedRemote - pub fn reserve_remote(&mut self) -> Result<(), Error> { - match self.inner { - Idle => { - self.inner = ReservedRemote; - Ok(()) - } - ref state => { - proto_err!(conn: "reserve_remote: in unexpected state {:?}", state); - Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) - } - } - } - - /// Transition from Idle -> ReservedLocal - pub fn reserve_local(&mut self) -> Result<(), UserError> { - match self.inner { - Idle => { - self.inner = ReservedLocal; - Ok(()) - } - _ => Err(UserError::UnexpectedFrameType), - } - } - - /// Indicates that the remote side will not send more data to the local. - pub fn recv_close(&mut self) -> Result<(), Error> { - match self.inner { - Open { local, .. } => { - // The remote side will continue to receive data. - tracing::trace!("recv_close: Open => HalfClosedRemote({:?})", local); - self.inner = HalfClosedRemote(local); - Ok(()) - } - HalfClosedLocal(..) => { - tracing::trace!("recv_close: HalfClosedLocal => Closed"); - self.inner = Closed(Cause::EndStream); - Ok(()) - } - ref state => { - proto_err!(conn: "recv_close: in unexpected state {:?}", state); - Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) - } - } - } - - /// The remote explicitly sent a RST_STREAM. - /// - /// # Arguments - /// - `frame`: the received RST_STREAM frame. - /// - `queued`: true if this stream has frames in the pending send queue. - pub fn recv_reset(&mut self, frame: frame::Reset, queued: bool) { - match self.inner { - // If the stream is already in a `Closed` state, do nothing, - // provided that there are no frames still in the send queue. - Closed(..) if !queued => {} - // A notionally `Closed` stream may still have queued frames in - // the following cases: - // - // - if the cause is `Cause::Scheduled(..)` (i.e. we have not - // actually closed the stream yet). - // - if the cause is `Cause::EndStream`: we transition to this - // state when an EOS frame is *enqueued* (so that it's invalid - // to enqueue more frames), not when the EOS frame is *sent*; - // therefore, there may still be frames ahead of the EOS frame - // in the send queue. - // - // In either of these cases, we want to overwrite the stream's - // previous state with the received RST_STREAM, so that the queue - // will be cleared by `Prioritize::pop_frame`. - ref state => { - tracing::trace!( - "recv_reset; frame={:?}; state={:?}; queued={:?}", - frame, - state, - queued - ); - self.inner = Closed(Cause::Error(Error::remote_reset( - frame.stream_id(), - frame.reason(), - ))); - } - } - } - - /// Handle a connection-level error. - pub fn handle_error(&mut self, err: &proto::Error) { - match self.inner { - Closed(..) => {} - _ => { - tracing::trace!("handle_error; err={:?}", err); - self.inner = Closed(Cause::Error(err.clone())); - } - } - } - - pub fn recv_eof(&mut self) { - match self.inner { - Closed(..) => {} - ref state => { - tracing::trace!("recv_eof; state={:?}", state); - self.inner = Closed(Cause::Error( - io::Error::new( - io::ErrorKind::BrokenPipe, - "stream closed because of a broken pipe", - ) - .into(), - )); - } - } - } - - /// Indicates that the local side will not send more data to the local. - pub fn send_close(&mut self) { - match self.inner { - Open { remote, .. } => { - // The remote side will continue to receive data. - tracing::trace!("send_close: Open => HalfClosedLocal({:?})", remote); - self.inner = HalfClosedLocal(remote); - } - HalfClosedRemote(..) => { - tracing::trace!("send_close: HalfClosedRemote => Closed"); - self.inner = Closed(Cause::EndStream); - } - ref state => panic!("send_close: unexpected state {:?}", state), - } - } - - /// Set the stream state to reset locally. - pub fn set_reset(&mut self, stream_id: StreamId, reason: Reason, initiator: Initiator) { - self.inner = Closed(Cause::Error(Error::Reset(stream_id, reason, initiator))); - } - - /// Set the stream state to a scheduled reset. - pub fn set_scheduled_reset(&mut self, reason: Reason) { - debug_assert!(!self.is_closed()); - self.inner = Closed(Cause::ScheduledLibraryReset(reason)); - } - - pub fn get_scheduled_reset(&self) -> Option { - match self.inner { - Closed(Cause::ScheduledLibraryReset(reason)) => Some(reason), - _ => None, - } - } - - pub fn is_scheduled_reset(&self) -> bool { - matches!(self.inner, Closed(Cause::ScheduledLibraryReset(..))) - } - - pub fn is_local_error(&self) -> bool { - match self.inner { - Closed(Cause::Error(ref e)) => e.is_local(), - Closed(Cause::ScheduledLibraryReset(..)) => true, - _ => false, - } - } - - pub fn is_remote_reset(&self) -> bool { - matches!( - self.inner, - Closed(Cause::Error(Error::Reset(_, _, Initiator::Remote))) - ) - } - - /// Returns true if the stream is already reset. - pub fn is_reset(&self) -> bool { - match self.inner { - Closed(Cause::EndStream) => false, - Closed(_) => true, - _ => false, - } - } - - pub fn is_send_streaming(&self) -> bool { - matches!( - self.inner, - Open { - local: Streaming, - .. - } | HalfClosedRemote(Streaming) - ) - } - - /// Returns true when the stream is in a state to receive headers - pub fn is_recv_headers(&self) -> bool { - matches!( - self.inner, - Idle | Open { - remote: AwaitingHeaders, - .. - } | HalfClosedLocal(AwaitingHeaders) - | ReservedRemote - ) - } - - pub fn is_recv_streaming(&self) -> bool { - matches!( - self.inner, - Open { - remote: Streaming, - .. - } | HalfClosedLocal(Streaming) - ) - } - - pub fn is_closed(&self) -> bool { - matches!(self.inner, Closed(_)) - } - - pub fn is_recv_closed(&self) -> bool { - matches!( - self.inner, - Closed(..) | HalfClosedRemote(..) | ReservedLocal - ) - } - - pub fn is_send_closed(&self) -> bool { - matches!( - self.inner, - Closed(..) | HalfClosedLocal(..) | ReservedRemote - ) - } - - pub fn is_idle(&self) -> bool { - matches!(self.inner, Idle) - } - - pub fn ensure_recv_open(&self) -> Result { - // TODO: Is this correct? - match self.inner { - Closed(Cause::Error(ref e)) => Err(e.clone()), - Closed(Cause::ScheduledLibraryReset(reason)) => { - Err(proto::Error::library_go_away(reason)) - } - Closed(Cause::EndStream) | HalfClosedRemote(..) | ReservedLocal => Ok(false), - _ => Ok(true), - } - } - - /// Returns a reason if the stream has been reset. - pub(super) fn ensure_reason(&self, mode: PollReset) -> Result, crate::Error> { - match self.inner { - Closed(Cause::Error(Error::Reset(_, reason, _))) - | Closed(Cause::Error(Error::GoAway(_, reason, _))) - | Closed(Cause::ScheduledLibraryReset(reason)) => Ok(Some(reason)), - Closed(Cause::Error(ref e)) => Err(e.clone().into()), - Open { - local: Streaming, .. - } - | HalfClosedRemote(Streaming) => match mode { - PollReset::AwaitingHeaders => Err(UserError::PollResetAfterSendResponse.into()), - PollReset::Streaming => Ok(None), - }, - _ => Ok(None), - } - } -} - -impl Default for State { - fn default() -> State { - State { inner: Inner::Idle } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/store.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/store.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/store.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/store.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,464 +0,0 @@ -use super::*; - -use indexmap::{self, IndexMap}; - -use std::convert::Infallible; -use std::fmt; -use std::marker::PhantomData; -use std::ops; - -/// Storage for streams -#[derive(Debug)] -pub(super) struct Store { - slab: slab::Slab, - ids: IndexMap, -} - -/// "Pointer" to an entry in the store -pub(super) struct Ptr<'a> { - key: Key, - store: &'a mut Store, -} - -/// References an entry in the store. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub(crate) struct Key { - index: SlabIndex, - /// Keep the stream ID in the key as an ABA guard, since slab indices - /// could be re-used with a new stream. - stream_id: StreamId, -} - -// We can never have more than `StreamId::MAX` streams in the store, -// so we can save a smaller index (u32 vs usize). -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -struct SlabIndex(u32); - -#[derive(Debug)] -pub(super) struct Queue { - indices: Option, - _p: PhantomData, -} - -pub(super) trait Next { - fn next(stream: &Stream) -> Option; - - fn set_next(stream: &mut Stream, key: Option); - - fn take_next(stream: &mut Stream) -> Option; - - fn is_queued(stream: &Stream) -> bool; - - fn set_queued(stream: &mut Stream, val: bool); -} - -/// A linked list -#[derive(Debug, Clone, Copy)] -struct Indices { - pub head: Key, - pub tail: Key, -} - -pub(super) enum Entry<'a> { - Occupied(OccupiedEntry<'a>), - Vacant(VacantEntry<'a>), -} - -pub(super) struct OccupiedEntry<'a> { - ids: indexmap::map::OccupiedEntry<'a, StreamId, SlabIndex>, -} - -pub(super) struct VacantEntry<'a> { - ids: indexmap::map::VacantEntry<'a, StreamId, SlabIndex>, - slab: &'a mut slab::Slab, -} - -pub(super) trait Resolve { - fn resolve(&mut self, key: Key) -> Ptr; -} - -// ===== impl Store ===== - -impl Store { - pub fn new() -> Self { - Store { - slab: slab::Slab::new(), - ids: IndexMap::new(), - } - } - - pub fn find_mut(&mut self, id: &StreamId) -> Option { - let index = match self.ids.get(id) { - Some(key) => *key, - None => return None, - }; - - Some(Ptr { - key: Key { - index, - stream_id: *id, - }, - store: self, - }) - } - - pub fn insert(&mut self, id: StreamId, val: Stream) -> Ptr { - let index = SlabIndex(self.slab.insert(val) as u32); - assert!(self.ids.insert(id, index).is_none()); - - Ptr { - key: Key { - index, - stream_id: id, - }, - store: self, - } - } - - pub fn find_entry(&mut self, id: StreamId) -> Entry { - use self::indexmap::map::Entry::*; - - match self.ids.entry(id) { - Occupied(e) => Entry::Occupied(OccupiedEntry { ids: e }), - Vacant(e) => Entry::Vacant(VacantEntry { - ids: e, - slab: &mut self.slab, - }), - } - } - - pub(crate) fn for_each(&mut self, mut f: F) - where - F: FnMut(Ptr), - { - match self.try_for_each(|ptr| { - f(ptr); - Ok::<_, Infallible>(()) - }) { - Ok(()) => (), - Err(infallible) => match infallible {}, - } - } - - pub fn try_for_each(&mut self, mut f: F) -> Result<(), E> - where - F: FnMut(Ptr) -> Result<(), E>, - { - let mut len = self.ids.len(); - let mut i = 0; - - while i < len { - // Get the key by index, this makes the borrow checker happy - let (stream_id, index) = { - let entry = self.ids.get_index(i).unwrap(); - (*entry.0, *entry.1) - }; - - f(Ptr { - key: Key { index, stream_id }, - store: self, - })?; - - // TODO: This logic probably could be better... - let new_len = self.ids.len(); - - if new_len < len { - debug_assert!(new_len == len - 1); - len -= 1; - } else { - i += 1; - } - } - - Ok(()) - } -} - -impl Resolve for Store { - fn resolve(&mut self, key: Key) -> Ptr { - Ptr { key, store: self } - } -} - -impl ops::Index for Store { - type Output = Stream; - - fn index(&self, key: Key) -> &Self::Output { - self.slab - .get(key.index.0 as usize) - .filter(|s| s.id == key.stream_id) - .unwrap_or_else(|| { - panic!("dangling store key for stream_id={:?}", key.stream_id); - }) - } -} - -impl ops::IndexMut for Store { - fn index_mut(&mut self, key: Key) -> &mut Self::Output { - self.slab - .get_mut(key.index.0 as usize) - .filter(|s| s.id == key.stream_id) - .unwrap_or_else(|| { - panic!("dangling store key for stream_id={:?}", key.stream_id); - }) - } -} - -impl Store { - #[cfg(feature = "unstable")] - pub fn num_active_streams(&self) -> usize { - self.ids.len() - } - - #[cfg(feature = "unstable")] - pub fn num_wired_streams(&self) -> usize { - self.slab.len() - } -} - -// While running h2 unit/integration tests, enable this debug assertion. -// -// In practice, we don't need to ensure this. But the integration tests -// help to make sure we've cleaned up in cases where we could (like, the -// runtime isn't suddenly dropping the task for unknown reasons). -#[cfg(feature = "unstable")] -impl Drop for Store { - fn drop(&mut self) { - use std::thread; - - if !thread::panicking() { - debug_assert!(self.slab.is_empty()); - } - } -} - -// ===== impl Queue ===== - -impl Queue -where - N: Next, -{ - pub fn new() -> Self { - Queue { - indices: None, - _p: PhantomData, - } - } - - pub fn take(&mut self) -> Self { - Queue { - indices: self.indices.take(), - _p: PhantomData, - } - } - - /// Queue the stream. - /// - /// If the stream is already contained by the list, return `false`. - pub fn push(&mut self, stream: &mut store::Ptr) -> bool { - tracing::trace!("Queue::push_back"); - - if N::is_queued(stream) { - tracing::trace!(" -> already queued"); - return false; - } - - N::set_queued(stream, true); - - // The next pointer shouldn't be set - debug_assert!(N::next(stream).is_none()); - - // Queue the stream - match self.indices { - Some(ref mut idxs) => { - tracing::trace!(" -> existing entries"); - - // Update the current tail node to point to `stream` - let key = stream.key(); - N::set_next(&mut stream.resolve(idxs.tail), Some(key)); - - // Update the tail pointer - idxs.tail = stream.key(); - } - None => { - tracing::trace!(" -> first entry"); - self.indices = Some(store::Indices { - head: stream.key(), - tail: stream.key(), - }); - } - } - - true - } - - /// Queue the stream - /// - /// If the stream is already contained by the list, return `false`. - pub fn push_front(&mut self, stream: &mut store::Ptr) -> bool { - tracing::trace!("Queue::push_front"); - - if N::is_queued(stream) { - tracing::trace!(" -> already queued"); - return false; - } - - N::set_queued(stream, true); - - // The next pointer shouldn't be set - debug_assert!(N::next(stream).is_none()); - - // Queue the stream - match self.indices { - Some(ref mut idxs) => { - tracing::trace!(" -> existing entries"); - - // Update the provided stream to point to the head node - let head_key = stream.resolve(idxs.head).key(); - N::set_next(stream, Some(head_key)); - - // Update the head pointer - idxs.head = stream.key(); - } - None => { - tracing::trace!(" -> first entry"); - self.indices = Some(store::Indices { - head: stream.key(), - tail: stream.key(), - }); - } - } - - true - } - - pub fn pop<'a, R>(&mut self, store: &'a mut R) -> Option> - where - R: Resolve, - { - if let Some(mut idxs) = self.indices { - let mut stream = store.resolve(idxs.head); - - if idxs.head == idxs.tail { - assert!(N::next(&stream).is_none()); - self.indices = None; - } else { - idxs.head = N::take_next(&mut stream).unwrap(); - self.indices = Some(idxs); - } - - debug_assert!(N::is_queued(&stream)); - N::set_queued(&mut stream, false); - - return Some(stream); - } - - None - } - - pub fn is_empty(&self) -> bool { - self.indices.is_none() - } - - pub fn pop_if<'a, R, F>(&mut self, store: &'a mut R, f: F) -> Option> - where - R: Resolve, - F: Fn(&Stream) -> bool, - { - if let Some(idxs) = self.indices { - let should_pop = f(&store.resolve(idxs.head)); - if should_pop { - return self.pop(store); - } - } - - None - } -} - -// ===== impl Ptr ===== - -impl<'a> Ptr<'a> { - /// Returns the Key associated with the stream - pub fn key(&self) -> Key { - self.key - } - - pub fn store_mut(&mut self) -> &mut Store { - self.store - } - - /// Remove the stream from the store - pub fn remove(self) -> StreamId { - // The stream must have been unlinked before this point - debug_assert!(!self.store.ids.contains_key(&self.key.stream_id)); - - // Remove the stream state - let stream = self.store.slab.remove(self.key.index.0 as usize); - assert_eq!(stream.id, self.key.stream_id); - stream.id - } - - /// Remove the StreamId -> stream state association. - /// - /// This will effectively remove the stream as far as the H2 protocol is - /// concerned. - pub fn unlink(&mut self) { - let id = self.key.stream_id; - self.store.ids.swap_remove(&id); - } -} - -impl<'a> Resolve for Ptr<'a> { - fn resolve(&mut self, key: Key) -> Ptr { - Ptr { - key, - store: &mut *self.store, - } - } -} - -impl<'a> ops::Deref for Ptr<'a> { - type Target = Stream; - - fn deref(&self) -> &Stream { - &self.store[self.key] - } -} - -impl<'a> ops::DerefMut for Ptr<'a> { - fn deref_mut(&mut self) -> &mut Stream { - &mut self.store[self.key] - } -} - -impl<'a> fmt::Debug for Ptr<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - (**self).fmt(fmt) - } -} - -// ===== impl OccupiedEntry ===== - -impl<'a> OccupiedEntry<'a> { - pub fn key(&self) -> Key { - let stream_id = *self.ids.key(); - let index = *self.ids.get(); - Key { index, stream_id } - } -} - -// ===== impl VacantEntry ===== - -impl<'a> VacantEntry<'a> { - pub fn insert(self, value: Stream) -> Key { - // Insert the value in the slab - let stream_id = value.id; - let index = SlabIndex(self.slab.insert(value) as u32); - - // Insert the handle in the ID map - self.ids.insert(index); - - Key { index, stream_id } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/stream.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/stream.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/stream.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,527 +0,0 @@ -use super::*; - -use std::task::{Context, Waker}; -use std::time::Instant; -use std::usize; - -/// Tracks Stream related state -/// -/// # Reference counting -/// -/// There can be a number of outstanding handles to a single Stream. These are -/// tracked using reference counting. The `ref_count` field represents the -/// number of outstanding userspace handles that can reach this stream. -/// -/// It's important to note that when the stream is placed in an internal queue -/// (such as an accept queue), this is **not** tracked by a reference count. -/// Thus, `ref_count` can be zero and the stream still has to be kept around. -#[derive(Debug)] -pub(super) struct Stream { - /// The h2 stream identifier - pub id: StreamId, - - /// Current state of the stream - pub state: State, - - /// Set to `true` when the stream is counted against the connection's max - /// concurrent streams. - pub is_counted: bool, - - /// Number of outstanding handles pointing to this stream - pub ref_count: usize, - - // ===== Fields related to sending ===== - /// Next node in the accept linked list - pub next_pending_send: Option, - - /// Set to true when the stream is pending accept - pub is_pending_send: bool, - - /// Send data flow control - pub send_flow: FlowControl, - - /// Amount of send capacity that has been requested, but not yet allocated. - pub requested_send_capacity: WindowSize, - - /// Amount of data buffered at the prioritization layer. - /// TODO: Technically this could be greater than the window size... - pub buffered_send_data: usize, - - /// Task tracking additional send capacity (i.e. window updates). - send_task: Option, - - /// Frames pending for this stream being sent to the socket - pub pending_send: buffer::Deque, - - /// Next node in the linked list of streams waiting for additional - /// connection level capacity. - pub next_pending_send_capacity: Option, - - /// True if the stream is waiting for outbound connection capacity - pub is_pending_send_capacity: bool, - - /// Set to true when the send capacity has been incremented - pub send_capacity_inc: bool, - - /// Next node in the open linked list - pub next_open: Option, - - /// Set to true when the stream is pending to be opened - pub is_pending_open: bool, - - /// Set to true when a push is pending for this stream - pub is_pending_push: bool, - - // ===== Fields related to receiving ===== - /// Next node in the accept linked list - pub next_pending_accept: Option, - - /// Set to true when the stream is pending accept - pub is_pending_accept: bool, - - /// Receive data flow control - pub recv_flow: FlowControl, - - pub in_flight_recv_data: WindowSize, - - /// Next node in the linked list of streams waiting to send window updates. - pub next_window_update: Option, - - /// True if the stream is waiting to send a window update - pub is_pending_window_update: bool, - - /// The time when this stream may have been locally reset. - pub reset_at: Option, - - /// Next node in list of reset streams that should expire eventually - pub next_reset_expire: Option, - - /// Frames pending for this stream to read - pub pending_recv: buffer::Deque, - - /// When the RecvStream drop occurs, no data should be received. - pub is_recv: bool, - - /// Task tracking receiving frames - pub recv_task: Option, - - /// The stream's pending push promises - pub pending_push_promises: store::Queue, - - /// Validate content-length headers - pub content_length: ContentLength, -} - -/// State related to validating a stream's content-length -#[derive(Debug)] -pub enum ContentLength { - Omitted, - Head, - Remaining(u64), -} - -#[derive(Debug)] -pub(super) struct NextAccept; - -#[derive(Debug)] -pub(super) struct NextSend; - -#[derive(Debug)] -pub(super) struct NextSendCapacity; - -#[derive(Debug)] -pub(super) struct NextWindowUpdate; - -#[derive(Debug)] -pub(super) struct NextOpen; - -#[derive(Debug)] -pub(super) struct NextResetExpire; - -impl Stream { - pub fn new(id: StreamId, init_send_window: WindowSize, init_recv_window: WindowSize) -> Stream { - let mut send_flow = FlowControl::new(); - let mut recv_flow = FlowControl::new(); - - recv_flow - .inc_window(init_recv_window) - .expect("invalid initial receive window"); - // TODO: proper error handling? - let _res = recv_flow.assign_capacity(init_recv_window); - debug_assert!(_res.is_ok()); - - send_flow - .inc_window(init_send_window) - .expect("invalid initial send window size"); - - Stream { - id, - state: State::default(), - ref_count: 0, - is_counted: false, - - // ===== Fields related to sending ===== - next_pending_send: None, - is_pending_send: false, - send_flow, - requested_send_capacity: 0, - buffered_send_data: 0, - send_task: None, - pending_send: buffer::Deque::new(), - is_pending_send_capacity: false, - next_pending_send_capacity: None, - send_capacity_inc: false, - is_pending_open: false, - next_open: None, - is_pending_push: false, - - // ===== Fields related to receiving ===== - next_pending_accept: None, - is_pending_accept: false, - recv_flow, - in_flight_recv_data: 0, - next_window_update: None, - is_pending_window_update: false, - reset_at: None, - next_reset_expire: None, - pending_recv: buffer::Deque::new(), - is_recv: true, - recv_task: None, - pending_push_promises: store::Queue::new(), - content_length: ContentLength::Omitted, - } - } - - /// Increment the stream's ref count - pub fn ref_inc(&mut self) { - assert!(self.ref_count < usize::MAX); - self.ref_count += 1; - } - - /// Decrements the stream's ref count - pub fn ref_dec(&mut self) { - assert!(self.ref_count > 0); - self.ref_count -= 1; - } - - /// Returns true if stream is currently being held for some time because of - /// a local reset. - pub fn is_pending_reset_expiration(&self) -> bool { - self.reset_at.is_some() - } - - /// Returns true if frames for this stream are ready to be sent over the wire - pub fn is_send_ready(&self) -> bool { - // Why do we check pending_open? - // - // We allow users to call send_request() which schedules a stream to be pending_open - // if there is no room according to the concurrency limit (max_send_streams), and we - // also allow data to be buffered for send with send_data() if there is no capacity for - // the stream to send the data, which attempts to place the stream in pending_send. - // If the stream is not open, we don't want the stream to be scheduled for - // execution (pending_send). Note that if the stream is in pending_open, it will be - // pushed to pending_send when there is room for an open stream. - // - // In pending_push we track whether a PushPromise still needs to be sent - // from a different stream before we can start sending frames on this one. - // This is different from the "open" check because reserved streams don't count - // toward the concurrency limit. - // See https://httpwg.org/specs/rfc7540.html#rfc.section.5.1.2 - !self.is_pending_open && !self.is_pending_push - } - - /// Returns true if the stream is closed - pub fn is_closed(&self) -> bool { - // The state has fully transitioned to closed. - self.state.is_closed() && - // Because outbound frames transition the stream state before being - // buffered, we have to ensure that all frames have been flushed. - self.pending_send.is_empty() && - // Sometimes large data frames are sent out in chunks. After a chunk - // of the frame is sent, the remainder is pushed back onto the send - // queue to be rescheduled. - // - // Checking for additional buffered data lets us catch this case. - self.buffered_send_data == 0 - } - - /// Returns true if the stream is no longer in use - pub fn is_released(&self) -> bool { - // The stream is closed and fully flushed - self.is_closed() && - // There are no more outstanding references to the stream - self.ref_count == 0 && - // The stream is not in any queue - !self.is_pending_send && !self.is_pending_send_capacity && - !self.is_pending_accept && !self.is_pending_window_update && - !self.is_pending_open && self.reset_at.is_none() - } - - /// Returns true when the consumer of the stream has dropped all handles - /// (indicating no further interest in the stream) and the stream state is - /// not actually closed. - /// - /// In this case, a reset should be sent. - pub fn is_canceled_interest(&self) -> bool { - self.ref_count == 0 && !self.state.is_closed() - } - - /// Current available stream send capacity - pub fn capacity(&self, max_buffer_size: usize) -> WindowSize { - let available = self.send_flow.available().as_size() as usize; - let buffered = self.buffered_send_data; - - available.min(max_buffer_size).saturating_sub(buffered) as WindowSize - } - - pub fn assign_capacity(&mut self, capacity: WindowSize, max_buffer_size: usize) { - let prev_capacity = self.capacity(max_buffer_size); - debug_assert!(capacity > 0); - // TODO: proper error handling - let _res = self.send_flow.assign_capacity(capacity); - debug_assert!(_res.is_ok()); - - tracing::trace!( - " assigned capacity to stream; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}", - self.send_flow.available(), - self.buffered_send_data, - self.id, - max_buffer_size, - prev_capacity, - ); - - if prev_capacity < self.capacity(max_buffer_size) { - self.notify_capacity(); - } - } - - pub fn send_data(&mut self, len: WindowSize, max_buffer_size: usize) { - let prev_capacity = self.capacity(max_buffer_size); - - // TODO: proper error handling - let _res = self.send_flow.send_data(len); - debug_assert!(_res.is_ok()); - - // Decrement the stream's buffered data counter - debug_assert!(self.buffered_send_data >= len as usize); - self.buffered_send_data -= len as usize; - self.requested_send_capacity -= len; - - tracing::trace!( - " sent stream data; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}", - self.send_flow.available(), - self.buffered_send_data, - self.id, - max_buffer_size, - prev_capacity, - ); - - if prev_capacity < self.capacity(max_buffer_size) { - self.notify_capacity(); - } - } - - /// If the capacity was limited because of the max_send_buffer_size, - /// then consider waking the send task again... - pub fn notify_capacity(&mut self) { - self.send_capacity_inc = true; - tracing::trace!(" notifying task"); - self.notify_send(); - } - - /// Returns `Err` when the decrement cannot be completed due to overflow. - pub fn dec_content_length(&mut self, len: usize) -> Result<(), ()> { - match self.content_length { - ContentLength::Remaining(ref mut rem) => match rem.checked_sub(len as u64) { - Some(val) => *rem = val, - None => return Err(()), - }, - ContentLength::Head => { - if len != 0 { - return Err(()); - } - } - _ => {} - } - - Ok(()) - } - - pub fn ensure_content_length_zero(&self) -> Result<(), ()> { - match self.content_length { - ContentLength::Remaining(0) => Ok(()), - ContentLength::Remaining(_) => Err(()), - _ => Ok(()), - } - } - - pub fn notify_send(&mut self) { - if let Some(task) = self.send_task.take() { - task.wake(); - } - } - - pub fn wait_send(&mut self, cx: &Context) { - self.send_task = Some(cx.waker().clone()); - } - - pub fn notify_recv(&mut self) { - if let Some(task) = self.recv_task.take() { - task.wake(); - } - } -} - -impl store::Next for NextAccept { - fn next(stream: &Stream) -> Option { - stream.next_pending_accept - } - - fn set_next(stream: &mut Stream, key: Option) { - stream.next_pending_accept = key; - } - - fn take_next(stream: &mut Stream) -> Option { - stream.next_pending_accept.take() - } - - fn is_queued(stream: &Stream) -> bool { - stream.is_pending_accept - } - - fn set_queued(stream: &mut Stream, val: bool) { - stream.is_pending_accept = val; - } -} - -impl store::Next for NextSend { - fn next(stream: &Stream) -> Option { - stream.next_pending_send - } - - fn set_next(stream: &mut Stream, key: Option) { - stream.next_pending_send = key; - } - - fn take_next(stream: &mut Stream) -> Option { - stream.next_pending_send.take() - } - - fn is_queued(stream: &Stream) -> bool { - stream.is_pending_send - } - - fn set_queued(stream: &mut Stream, val: bool) { - if val { - // ensure that stream is not queued for being opened - // if it's being put into queue for sending data - debug_assert!(!stream.is_pending_open); - } - stream.is_pending_send = val; - } -} - -impl store::Next for NextSendCapacity { - fn next(stream: &Stream) -> Option { - stream.next_pending_send_capacity - } - - fn set_next(stream: &mut Stream, key: Option) { - stream.next_pending_send_capacity = key; - } - - fn take_next(stream: &mut Stream) -> Option { - stream.next_pending_send_capacity.take() - } - - fn is_queued(stream: &Stream) -> bool { - stream.is_pending_send_capacity - } - - fn set_queued(stream: &mut Stream, val: bool) { - stream.is_pending_send_capacity = val; - } -} - -impl store::Next for NextWindowUpdate { - fn next(stream: &Stream) -> Option { - stream.next_window_update - } - - fn set_next(stream: &mut Stream, key: Option) { - stream.next_window_update = key; - } - - fn take_next(stream: &mut Stream) -> Option { - stream.next_window_update.take() - } - - fn is_queued(stream: &Stream) -> bool { - stream.is_pending_window_update - } - - fn set_queued(stream: &mut Stream, val: bool) { - stream.is_pending_window_update = val; - } -} - -impl store::Next for NextOpen { - fn next(stream: &Stream) -> Option { - stream.next_open - } - - fn set_next(stream: &mut Stream, key: Option) { - stream.next_open = key; - } - - fn take_next(stream: &mut Stream) -> Option { - stream.next_open.take() - } - - fn is_queued(stream: &Stream) -> bool { - stream.is_pending_open - } - - fn set_queued(stream: &mut Stream, val: bool) { - if val { - // ensure that stream is not queued for being sent - // if it's being put into queue for opening the stream - debug_assert!(!stream.is_pending_send); - } - stream.is_pending_open = val; - } -} - -impl store::Next for NextResetExpire { - fn next(stream: &Stream) -> Option { - stream.next_reset_expire - } - - fn set_next(stream: &mut Stream, key: Option) { - stream.next_reset_expire = key; - } - - fn take_next(stream: &mut Stream) -> Option { - stream.next_reset_expire.take() - } - - fn is_queued(stream: &Stream) -> bool { - stream.reset_at.is_some() - } - - fn set_queued(stream: &mut Stream, val: bool) { - if val { - stream.reset_at = Some(Instant::now()); - } else { - stream.reset_at = None; - } - } -} - -// ===== impl ContentLength ===== - -impl ContentLength { - pub fn is_head(&self) -> bool { - matches!(*self, Self::Head) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/streams.rs s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/streams.rs --- s390-tools-2.31.0/rust-vendor/h2/src/proto/streams/streams.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/proto/streams/streams.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1594 +0,0 @@ -use super::recv::RecvHeaderBlockError; -use super::store::{self, Entry, Resolve, Store}; -use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId}; -use crate::codec::{Codec, SendError, UserError}; -use crate::ext::Protocol; -use crate::frame::{self, Frame, Reason}; -use crate::proto::{peer, Error, Initiator, Open, Peer, WindowSize}; -use crate::{client, proto, server}; - -use bytes::{Buf, Bytes}; -use http::{HeaderMap, Request, Response}; -use std::task::{Context, Poll, Waker}; -use tokio::io::AsyncWrite; - -use std::sync::{Arc, Mutex}; -use std::{fmt, io}; - -#[derive(Debug)] -pub(crate) struct Streams -where - P: Peer, -{ - /// Holds most of the connection and stream related state for processing - /// HTTP/2 frames associated with streams. - inner: Arc>, - - /// This is the queue of frames to be written to the wire. This is split out - /// to avoid requiring a `B` generic on all public API types even if `B` is - /// not technically required. - /// - /// Currently, splitting this out requires a second `Arc` + `Mutex`. - /// However, it should be possible to avoid this duplication with a little - /// bit of unsafe code. This optimization has been postponed until it has - /// been shown to be necessary. - send_buffer: Arc>, - - _p: ::std::marker::PhantomData

, -} - -// Like `Streams` but with a `peer::Dyn` field instead of a static `P: Peer` type parameter. -// Ensures that the methods only get one instantiation, instead of two (client and server) -#[derive(Debug)] -pub(crate) struct DynStreams<'a, B> { - inner: &'a Mutex, - - send_buffer: &'a SendBuffer, - - peer: peer::Dyn, -} - -/// Reference to the stream state -#[derive(Debug)] -pub(crate) struct StreamRef { - opaque: OpaqueStreamRef, - send_buffer: Arc>, -} - -/// Reference to the stream state that hides the send data chunk generic -pub(crate) struct OpaqueStreamRef { - inner: Arc>, - key: store::Key, -} - -/// Fields needed to manage state related to managing the set of streams. This -/// is mostly split out to make ownership happy. -/// -/// TODO: better name -#[derive(Debug)] -struct Inner { - /// Tracks send & recv stream concurrency. - counts: Counts, - - /// Connection level state and performs actions on streams - actions: Actions, - - /// Stores stream state - store: Store, - - /// The number of stream refs to this shared state. - refs: usize, -} - -#[derive(Debug)] -struct Actions { - /// Manages state transitions initiated by receiving frames - recv: Recv, - - /// Manages state transitions initiated by sending frames - send: Send, - - /// Task that calls `poll_complete`. - task: Option, - - /// If the connection errors, a copy is kept for any StreamRefs. - conn_error: Option, -} - -/// Contains the buffer of frames to be written to the wire. -#[derive(Debug)] -struct SendBuffer { - inner: Mutex>>, -} - -// ===== impl Streams ===== - -impl Streams -where - B: Buf, - P: Peer, -{ - pub fn new(config: Config) -> Self { - let peer = P::r#dyn(); - - Streams { - inner: Inner::new(peer, config), - send_buffer: Arc::new(SendBuffer::new()), - _p: ::std::marker::PhantomData, - } - } - - pub fn set_target_connection_window_size(&mut self, size: WindowSize) -> Result<(), Reason> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - me.actions - .recv - .set_target_connection_window(size, &mut me.actions.task) - } - - pub fn next_incoming(&mut self) -> Option> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - me.actions.recv.next_incoming(&mut me.store).map(|key| { - let stream = &mut me.store.resolve(key); - tracing::trace!( - "next_incoming; id={:?}, state={:?}", - stream.id, - stream.state - ); - // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding - // the lock, so it can't. - me.refs += 1; - - // Pending-accepted remotely-reset streams are counted. - if stream.state.is_remote_reset() { - me.counts.dec_num_remote_reset_streams(); - } - - StreamRef { - opaque: OpaqueStreamRef::new(self.inner.clone(), stream), - send_buffer: self.send_buffer.clone(), - } - }) - } - - pub fn send_pending_refusal( - &mut self, - cx: &mut Context, - dst: &mut Codec>, - ) -> Poll> - where - T: AsyncWrite + Unpin, - { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - me.actions.recv.send_pending_refusal(cx, dst) - } - - pub fn clear_expired_reset_streams(&mut self) { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - me.actions - .recv - .clear_expired_reset_streams(&mut me.store, &mut me.counts); - } - - pub fn poll_complete( - &mut self, - cx: &mut Context, - dst: &mut Codec>, - ) -> Poll> - where - T: AsyncWrite + Unpin, - { - let mut me = self.inner.lock().unwrap(); - me.poll_complete(&self.send_buffer, cx, dst) - } - - pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), Error> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - me.counts.apply_remote_settings(frame); - - me.actions.send.apply_remote_settings( - frame, - send_buffer, - &mut me.store, - &mut me.counts, - &mut me.actions.task, - ) - } - - pub fn apply_local_settings(&mut self, frame: &frame::Settings) -> Result<(), Error> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - me.actions.recv.apply_local_settings(frame, &mut me.store) - } - - pub fn send_request( - &mut self, - mut request: Request<()>, - end_of_stream: bool, - pending: Option<&OpaqueStreamRef>, - ) -> Result<(StreamRef, bool), SendError> { - use super::stream::ContentLength; - use http::Method; - - let protocol = request.extensions_mut().remove::(); - - // Clear before taking lock, incase extensions contain a StreamRef. - request.extensions_mut().clear(); - - // TODO: There is a hazard with assigning a stream ID before the - // prioritize layer. If prioritization reorders new streams, this - // implicitly closes the earlier stream IDs. - // - // See: hyperium/h2#11 - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - me.actions.ensure_no_conn_error()?; - me.actions.send.ensure_next_stream_id()?; - - // The `pending` argument is provided by the `Client`, and holds - // a store `Key` of a `Stream` that may have been not been opened - // yet. - // - // If that stream is still pending, the Client isn't allowed to - // queue up another pending stream. They should use `poll_ready`. - if let Some(stream) = pending { - if me.store.resolve(stream.key).is_pending_open { - return Err(UserError::Rejected.into()); - } - } - - if me.counts.peer().is_server() { - // Servers cannot open streams. PushPromise must first be reserved. - return Err(UserError::UnexpectedFrameType.into()); - } - - let stream_id = me.actions.send.open()?; - - let mut stream = Stream::new( - stream_id, - me.actions.send.init_window_sz(), - me.actions.recv.init_window_sz(), - ); - - if *request.method() == Method::HEAD { - stream.content_length = ContentLength::Head; - } - - // Convert the message - let headers = - client::Peer::convert_send_message(stream_id, request, protocol, end_of_stream)?; - - let mut stream = me.store.insert(stream.id, stream); - - let sent = me.actions.send.send_headers( - headers, - send_buffer, - &mut stream, - &mut me.counts, - &mut me.actions.task, - ); - - // send_headers can return a UserError, if it does, - // we should forget about this stream. - if let Err(err) = sent { - stream.unlink(); - stream.remove(); - return Err(err.into()); - } - - // Given that the stream has been initialized, it should not be in the - // closed state. - debug_assert!(!stream.state.is_closed()); - - // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding - // the lock, so it can't. - me.refs += 1; - - let is_full = me.counts.next_send_stream_will_reach_capacity(); - Ok(( - StreamRef { - opaque: OpaqueStreamRef::new(self.inner.clone(), &mut stream), - send_buffer: self.send_buffer.clone(), - }, - is_full, - )) - } - - pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { - self.inner - .lock() - .unwrap() - .actions - .send - .is_extended_connect_protocol_enabled() - } -} - -impl DynStreams<'_, B> { - pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), Error> { - let mut me = self.inner.lock().unwrap(); - - me.recv_headers(self.peer, self.send_buffer, frame) - } - - pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), Error> { - let mut me = self.inner.lock().unwrap(); - me.recv_data(self.peer, self.send_buffer, frame) - } - - pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), Error> { - let mut me = self.inner.lock().unwrap(); - - me.recv_reset(self.send_buffer, frame) - } - - /// Notify all streams that a connection-level error happened. - pub fn handle_error(&mut self, err: proto::Error) -> StreamId { - let mut me = self.inner.lock().unwrap(); - me.handle_error(self.send_buffer, err) - } - - pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), Error> { - let mut me = self.inner.lock().unwrap(); - me.recv_go_away(self.send_buffer, frame) - } - - pub fn last_processed_id(&self) -> StreamId { - self.inner.lock().unwrap().actions.recv.last_processed_id() - } - - pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), Error> { - let mut me = self.inner.lock().unwrap(); - me.recv_window_update(self.send_buffer, frame) - } - - pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), Error> { - let mut me = self.inner.lock().unwrap(); - me.recv_push_promise(self.send_buffer, frame) - } - - pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> { - let mut me = self.inner.lock().map_err(|_| ())?; - me.recv_eof(self.send_buffer, clear_pending_accept) - } - - pub fn send_reset(&mut self, id: StreamId, reason: Reason) { - let mut me = self.inner.lock().unwrap(); - me.send_reset(self.send_buffer, id, reason) - } - - pub fn send_go_away(&mut self, last_processed_id: StreamId) { - let mut me = self.inner.lock().unwrap(); - me.actions.recv.go_away(last_processed_id); - } -} - -impl Inner { - fn new(peer: peer::Dyn, config: Config) -> Arc> { - Arc::new(Mutex::new(Inner { - counts: Counts::new(peer, &config), - actions: Actions { - recv: Recv::new(peer, &config), - send: Send::new(&config), - task: None, - conn_error: None, - }, - store: Store::new(), - refs: 1, - })) - } - - fn recv_headers( - &mut self, - peer: peer::Dyn, - send_buffer: &SendBuffer, - frame: frame::Headers, - ) -> Result<(), Error> { - let id = frame.stream_id(); - - // The GOAWAY process has begun. All streams with a greater ID than - // specified as part of GOAWAY should be ignored. - if id > self.actions.recv.max_stream_id() { - tracing::trace!( - "id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", - id, - self.actions.recv.max_stream_id() - ); - return Ok(()); - } - - let key = match self.store.find_entry(id) { - Entry::Occupied(e) => e.key(), - Entry::Vacant(e) => { - // Client: it's possible to send a request, and then send - // a RST_STREAM while the response HEADERS were in transit. - // - // Server: we can't reset a stream before having received - // the request headers, so don't allow. - if !peer.is_server() { - // This may be response headers for a stream we've already - // forgotten about... - if self.actions.may_have_forgotten_stream(peer, id) { - tracing::debug!( - "recv_headers for old stream={:?}, sending STREAM_CLOSED", - id, - ); - return Err(Error::library_reset(id, Reason::STREAM_CLOSED)); - } - } - - match self - .actions - .recv - .open(id, Open::Headers, &mut self.counts)? - { - Some(stream_id) => { - let stream = Stream::new( - stream_id, - self.actions.send.init_window_sz(), - self.actions.recv.init_window_sz(), - ); - - e.insert(stream) - } - None => return Ok(()), - } - } - }; - - let stream = self.store.resolve(key); - - if stream.state.is_local_error() { - // Locally reset streams must ignore frames "for some time". - // This is because the remote may have sent trailers before - // receiving the RST_STREAM frame. - tracing::trace!("recv_headers; ignoring trailers on {:?}", stream.id); - return Ok(()); - } - - let actions = &mut self.actions; - let mut send_buffer = send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - self.counts.transition(stream, |counts, stream| { - tracing::trace!( - "recv_headers; stream={:?}; state={:?}", - stream.id, - stream.state - ); - - let res = if stream.state.is_recv_headers() { - match actions.recv.recv_headers(frame, stream, counts) { - Ok(()) => Ok(()), - Err(RecvHeaderBlockError::Oversize(resp)) => { - if let Some(resp) = resp { - let sent = actions.send.send_headers( - resp, send_buffer, stream, counts, &mut actions.task); - debug_assert!(sent.is_ok(), "oversize response should not fail"); - - actions.send.schedule_implicit_reset( - stream, - Reason::REFUSED_STREAM, - counts, - &mut actions.task); - - actions.recv.enqueue_reset_expiration(stream, counts); - - Ok(()) - } else { - Err(Error::library_reset(stream.id, Reason::REFUSED_STREAM)) - } - }, - Err(RecvHeaderBlockError::State(err)) => Err(err), - } - } else { - if !frame.is_end_stream() { - // Receiving trailers that don't set EOS is a "malformed" - // message. Malformed messages are a stream error. - proto_err!(stream: "recv_headers: trailers frame was not EOS; stream={:?}", stream.id); - return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); - } - - actions.recv.recv_trailers(frame, stream) - }; - - actions.reset_on_recv_stream_err(send_buffer, stream, counts, res) - }) - } - - fn recv_data( - &mut self, - peer: peer::Dyn, - send_buffer: &SendBuffer, - frame: frame::Data, - ) -> Result<(), Error> { - let id = frame.stream_id(); - - let stream = match self.store.find_mut(&id) { - Some(stream) => stream, - None => { - // The GOAWAY process has begun. All streams with a greater ID - // than specified as part of GOAWAY should be ignored. - if id > self.actions.recv.max_stream_id() { - tracing::trace!( - "id ({:?}) > max_stream_id ({:?}), ignoring DATA", - id, - self.actions.recv.max_stream_id() - ); - return Ok(()); - } - - if self.actions.may_have_forgotten_stream(peer, id) { - tracing::debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,); - - let sz = frame.payload().len(); - // This should have been enforced at the codec::FramedRead layer, so - // this is just a sanity check. - assert!(sz <= super::MAX_WINDOW_SIZE as usize); - let sz = sz as WindowSize; - - self.actions.recv.ignore_data(sz)?; - return Err(Error::library_reset(id, Reason::STREAM_CLOSED)); - } - - proto_err!(conn: "recv_data: stream not found; id={:?}", id); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - }; - - let actions = &mut self.actions; - let mut send_buffer = send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - self.counts.transition(stream, |counts, stream| { - let sz = frame.payload().len(); - let res = actions.recv.recv_data(frame, stream); - - // Any stream error after receiving a DATA frame means - // we won't give the data to the user, and so they can't - // release the capacity. We do it automatically. - if let Err(Error::Reset(..)) = res { - actions - .recv - .release_connection_capacity(sz as WindowSize, &mut None); - } - actions.reset_on_recv_stream_err(send_buffer, stream, counts, res) - }) - } - - fn recv_reset( - &mut self, - send_buffer: &SendBuffer, - frame: frame::Reset, - ) -> Result<(), Error> { - let id = frame.stream_id(); - - if id.is_zero() { - proto_err!(conn: "recv_reset: invalid stream ID 0"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - - // The GOAWAY process has begun. All streams with a greater ID than - // specified as part of GOAWAY should be ignored. - if id > self.actions.recv.max_stream_id() { - tracing::trace!( - "id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", - id, - self.actions.recv.max_stream_id() - ); - return Ok(()); - } - - let stream = match self.store.find_mut(&id) { - Some(stream) => stream, - None => { - // TODO: Are there other error cases? - self.actions - .ensure_not_idle(self.counts.peer(), id) - .map_err(Error::library_go_away)?; - - return Ok(()); - } - }; - - let mut send_buffer = send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - let actions = &mut self.actions; - - self.counts.transition(stream, |counts, stream| { - actions.recv.recv_reset(frame, stream, counts)?; - actions.send.handle_error(send_buffer, stream, counts); - assert!(stream.state.is_closed()); - Ok(()) - }) - } - - fn recv_window_update( - &mut self, - send_buffer: &SendBuffer, - frame: frame::WindowUpdate, - ) -> Result<(), Error> { - let id = frame.stream_id(); - - let mut send_buffer = send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - if id.is_zero() { - self.actions - .send - .recv_connection_window_update(frame, &mut self.store, &mut self.counts) - .map_err(Error::library_go_away)?; - } else { - // The remote may send window updates for streams that the local now - // considers closed. It's ok... - if let Some(mut stream) = self.store.find_mut(&id) { - // This result is ignored as there is nothing to do when there - // is an error. The stream is reset by the function on error and - // the error is informational. - let _ = self.actions.send.recv_stream_window_update( - frame.size_increment(), - send_buffer, - &mut stream, - &mut self.counts, - &mut self.actions.task, - ); - } else { - self.actions - .ensure_not_idle(self.counts.peer(), id) - .map_err(Error::library_go_away)?; - } - } - - Ok(()) - } - - fn handle_error(&mut self, send_buffer: &SendBuffer, err: proto::Error) -> StreamId { - let actions = &mut self.actions; - let counts = &mut self.counts; - let mut send_buffer = send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - let last_processed_id = actions.recv.last_processed_id(); - - self.store.for_each(|stream| { - counts.transition(stream, |counts, stream| { - actions.recv.handle_error(&err, &mut *stream); - actions.send.handle_error(send_buffer, stream, counts); - }) - }); - - actions.conn_error = Some(err); - - last_processed_id - } - - fn recv_go_away( - &mut self, - send_buffer: &SendBuffer, - frame: &frame::GoAway, - ) -> Result<(), Error> { - let actions = &mut self.actions; - let counts = &mut self.counts; - let mut send_buffer = send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - let last_stream_id = frame.last_stream_id(); - - actions.send.recv_go_away(last_stream_id)?; - - let err = Error::remote_go_away(frame.debug_data().clone(), frame.reason()); - - self.store.for_each(|stream| { - if stream.id > last_stream_id { - counts.transition(stream, |counts, stream| { - actions.recv.handle_error(&err, &mut *stream); - actions.send.handle_error(send_buffer, stream, counts); - }) - } - }); - - actions.conn_error = Some(err); - - Ok(()) - } - - fn recv_push_promise( - &mut self, - send_buffer: &SendBuffer, - frame: frame::PushPromise, - ) -> Result<(), Error> { - let id = frame.stream_id(); - let promised_id = frame.promised_id(); - - // First, ensure that the initiating stream is still in a valid state. - let parent_key = match self.store.find_mut(&id) { - Some(stream) => { - // The GOAWAY process has begun. All streams with a greater ID - // than specified as part of GOAWAY should be ignored. - if id > self.actions.recv.max_stream_id() { - tracing::trace!( - "id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", - id, - self.actions.recv.max_stream_id() - ); - return Ok(()); - } - - // The stream must be receive open - if !stream.state.ensure_recv_open()? { - proto_err!(conn: "recv_push_promise: initiating stream is not opened"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - - stream.key() - } - None => { - proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); - } - }; - - // TODO: Streams in the reserved states do not count towards the concurrency - // limit. However, it seems like there should be a cap otherwise this - // could grow in memory indefinitely. - - // Ensure that we can reserve streams - self.actions.recv.ensure_can_reserve()?; - - // Next, open the stream. - // - // If `None` is returned, then the stream is being refused. There is no - // further work to be done. - if self - .actions - .recv - .open(promised_id, Open::PushPromise, &mut self.counts)? - .is_none() - { - return Ok(()); - } - - // Try to handle the frame and create a corresponding key for the pushed stream - // this requires a bit of indirection to make the borrow checker happy. - let child_key: Option = { - // Create state for the stream - let stream = self.store.insert(promised_id, { - Stream::new( - promised_id, - self.actions.send.init_window_sz(), - self.actions.recv.init_window_sz(), - ) - }); - - let actions = &mut self.actions; - - self.counts.transition(stream, |counts, stream| { - let stream_valid = actions.recv.recv_push_promise(frame, stream); - - match stream_valid { - Ok(()) => Ok(Some(stream.key())), - _ => { - let mut send_buffer = send_buffer.inner.lock().unwrap(); - actions - .reset_on_recv_stream_err( - &mut *send_buffer, - stream, - counts, - stream_valid, - ) - .map(|()| None) - } - } - })? - }; - // If we're successful, push the headers and stream... - if let Some(child) = child_key { - let mut ppp = self.store[parent_key].pending_push_promises.take(); - ppp.push(&mut self.store.resolve(child)); - - let parent = &mut self.store.resolve(parent_key); - parent.pending_push_promises = ppp; - parent.notify_recv(); - }; - - Ok(()) - } - - fn recv_eof( - &mut self, - send_buffer: &SendBuffer, - clear_pending_accept: bool, - ) -> Result<(), ()> { - let actions = &mut self.actions; - let counts = &mut self.counts; - let mut send_buffer = send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - if actions.conn_error.is_none() { - actions.conn_error = Some( - io::Error::new( - io::ErrorKind::BrokenPipe, - "connection closed because of a broken pipe", - ) - .into(), - ); - } - - tracing::trace!("Streams::recv_eof"); - - self.store.for_each(|stream| { - counts.transition(stream, |counts, stream| { - actions.recv.recv_eof(stream); - - // This handles resetting send state associated with the - // stream - actions.send.handle_error(send_buffer, stream, counts); - }) - }); - - actions.clear_queues(clear_pending_accept, &mut self.store, counts); - Ok(()) - } - - fn poll_complete( - &mut self, - send_buffer: &SendBuffer, - cx: &mut Context, - dst: &mut Codec>, - ) -> Poll> - where - T: AsyncWrite + Unpin, - B: Buf, - { - let mut send_buffer = send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - // Send WINDOW_UPDATE frames first - // - // TODO: It would probably be better to interleave updates w/ data - // frames. - ready!(self - .actions - .recv - .poll_complete(cx, &mut self.store, &mut self.counts, dst))?; - - // Send any other pending frames - ready!(self.actions.send.poll_complete( - cx, - send_buffer, - &mut self.store, - &mut self.counts, - dst - ))?; - - // Nothing else to do, track the task - self.actions.task = Some(cx.waker().clone()); - - Poll::Ready(Ok(())) - } - - fn send_reset(&mut self, send_buffer: &SendBuffer, id: StreamId, reason: Reason) { - let key = match self.store.find_entry(id) { - Entry::Occupied(e) => e.key(), - Entry::Vacant(e) => { - // Resetting a stream we don't know about? That could be OK... - // - // 1. As a server, we just received a request, but that request - // was bad, so we're resetting before even accepting it. - // This is totally fine. - // - // 2. The remote may have sent us a frame on new stream that - // it's *not* supposed to have done, and thus, we don't know - // the stream. In that case, sending a reset will "open" the - // stream in our store. Maybe that should be a connection - // error instead? At least for now, we need to update what - // our vision of the next stream is. - if self.counts.peer().is_local_init(id) { - // We normally would open this stream, so update our - // next-send-id record. - self.actions.send.maybe_reset_next_stream_id(id); - } else { - // We normally would recv this stream, so update our - // next-recv-id record. - self.actions.recv.maybe_reset_next_stream_id(id); - } - - let stream = Stream::new(id, 0, 0); - - e.insert(stream) - } - }; - - let stream = self.store.resolve(key); - let mut send_buffer = send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - self.actions.send_reset( - stream, - reason, - Initiator::Library, - &mut self.counts, - send_buffer, - ); - } -} - -impl Streams -where - B: Buf, -{ - pub fn poll_pending_open( - &mut self, - cx: &Context, - pending: Option<&OpaqueStreamRef>, - ) -> Poll> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - me.actions.ensure_no_conn_error()?; - me.actions.send.ensure_next_stream_id()?; - - if let Some(pending) = pending { - let mut stream = me.store.resolve(pending.key); - tracing::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open); - if stream.is_pending_open { - stream.wait_send(cx); - return Poll::Pending; - } - } - Poll::Ready(Ok(())) - } -} - -impl Streams -where - P: Peer, -{ - pub fn as_dyn(&self) -> DynStreams { - let Self { - inner, - send_buffer, - _p, - } = self; - DynStreams { - inner, - send_buffer, - peer: P::r#dyn(), - } - } - - /// This function is safe to call multiple times. - /// - /// A `Result` is returned to avoid panicking if the mutex is poisoned. - pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> { - self.as_dyn().recv_eof(clear_pending_accept) - } - - pub(crate) fn max_send_streams(&self) -> usize { - self.inner.lock().unwrap().counts.max_send_streams() - } - - pub(crate) fn max_recv_streams(&self) -> usize { - self.inner.lock().unwrap().counts.max_recv_streams() - } - - #[cfg(feature = "unstable")] - pub fn num_active_streams(&self) -> usize { - let me = self.inner.lock().unwrap(); - me.store.num_active_streams() - } - - pub fn has_streams(&self) -> bool { - let me = self.inner.lock().unwrap(); - me.counts.has_streams() - } - - pub fn has_streams_or_other_references(&self) -> bool { - let me = self.inner.lock().unwrap(); - me.counts.has_streams() || me.refs > 1 - } - - #[cfg(feature = "unstable")] - pub fn num_wired_streams(&self) -> usize { - let me = self.inner.lock().unwrap(); - me.store.num_wired_streams() - } -} - -// no derive because we don't need B and P to be Clone. -impl Clone for Streams -where - P: Peer, -{ - fn clone(&self) -> Self { - self.inner.lock().unwrap().refs += 1; - Streams { - inner: self.inner.clone(), - send_buffer: self.send_buffer.clone(), - _p: ::std::marker::PhantomData, - } - } -} - -impl Drop for Streams -where - P: Peer, -{ - fn drop(&mut self) { - if let Ok(mut inner) = self.inner.lock() { - inner.refs -= 1; - if inner.refs == 1 { - if let Some(task) = inner.actions.task.take() { - task.wake(); - } - } - } - } -} - -// ===== impl StreamRef ===== - -impl StreamRef { - pub fn send_data(&mut self, data: B, end_stream: bool) -> Result<(), UserError> - where - B: Buf, - { - let mut me = self.opaque.inner.lock().unwrap(); - let me = &mut *me; - - let stream = me.store.resolve(self.opaque.key); - let actions = &mut me.actions; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - me.counts.transition(stream, |counts, stream| { - // Create the data frame - let mut frame = frame::Data::new(stream.id, data); - frame.set_end_stream(end_stream); - - // Send the data frame - actions - .send - .send_data(frame, send_buffer, stream, counts, &mut actions.task) - }) - } - - pub fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), UserError> { - let mut me = self.opaque.inner.lock().unwrap(); - let me = &mut *me; - - let stream = me.store.resolve(self.opaque.key); - let actions = &mut me.actions; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - me.counts.transition(stream, |counts, stream| { - // Create the trailers frame - let frame = frame::Headers::trailers(stream.id, trailers); - - // Send the trailers frame - actions - .send - .send_trailers(frame, send_buffer, stream, counts, &mut actions.task) - }) - } - - pub fn send_reset(&mut self, reason: Reason) { - let mut me = self.opaque.inner.lock().unwrap(); - let me = &mut *me; - - let stream = me.store.resolve(self.opaque.key); - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - me.actions - .send_reset(stream, reason, Initiator::User, &mut me.counts, send_buffer); - } - - pub fn send_response( - &mut self, - mut response: Response<()>, - end_of_stream: bool, - ) -> Result<(), UserError> { - // Clear before taking lock, incase extensions contain a StreamRef. - response.extensions_mut().clear(); - let mut me = self.opaque.inner.lock().unwrap(); - let me = &mut *me; - - let stream = me.store.resolve(self.opaque.key); - let actions = &mut me.actions; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - me.counts.transition(stream, |counts, stream| { - let frame = server::Peer::convert_send_message(stream.id, response, end_of_stream); - - actions - .send - .send_headers(frame, send_buffer, stream, counts, &mut actions.task) - }) - } - - pub fn send_push_promise( - &mut self, - mut request: Request<()>, - ) -> Result, UserError> { - // Clear before taking lock, incase extensions contain a StreamRef. - request.extensions_mut().clear(); - let mut me = self.opaque.inner.lock().unwrap(); - let me = &mut *me; - - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - let actions = &mut me.actions; - let promised_id = actions.send.reserve_local()?; - - let child_key = { - let mut child_stream = me.store.insert( - promised_id, - Stream::new( - promised_id, - actions.send.init_window_sz(), - actions.recv.init_window_sz(), - ), - ); - child_stream.state.reserve_local()?; - child_stream.is_pending_push = true; - child_stream.key() - }; - - let pushed = { - let mut stream = me.store.resolve(self.opaque.key); - - let frame = crate::server::Peer::convert_push_message(stream.id, promised_id, request)?; - - actions - .send - .send_push_promise(frame, send_buffer, &mut stream, &mut actions.task) - }; - - if let Err(err) = pushed { - let mut child_stream = me.store.resolve(child_key); - child_stream.unlink(); - child_stream.remove(); - return Err(err); - } - - me.refs += 1; - let opaque = - OpaqueStreamRef::new(self.opaque.inner.clone(), &mut me.store.resolve(child_key)); - - Ok(StreamRef { - opaque, - send_buffer: self.send_buffer.clone(), - }) - } - - /// Called by the server after the stream is accepted. Given that clients - /// initialize streams by sending HEADERS, the request will always be - /// available. - /// - /// # Panics - /// - /// This function panics if the request isn't present. - pub fn take_request(&self) -> Request<()> { - let mut me = self.opaque.inner.lock().unwrap(); - let me = &mut *me; - - let mut stream = me.store.resolve(self.opaque.key); - me.actions.recv.take_request(&mut stream) - } - - /// Called by a client to see if the current stream is pending open - pub fn is_pending_open(&self) -> bool { - let mut me = self.opaque.inner.lock().unwrap(); - me.store.resolve(self.opaque.key).is_pending_open - } - - /// Request capacity to send data - pub fn reserve_capacity(&mut self, capacity: WindowSize) { - let mut me = self.opaque.inner.lock().unwrap(); - let me = &mut *me; - - let mut stream = me.store.resolve(self.opaque.key); - - me.actions - .send - .reserve_capacity(capacity, &mut stream, &mut me.counts) - } - - /// Returns the stream's current send capacity. - pub fn capacity(&self) -> WindowSize { - let mut me = self.opaque.inner.lock().unwrap(); - let me = &mut *me; - - let mut stream = me.store.resolve(self.opaque.key); - - me.actions.send.capacity(&mut stream) - } - - /// Request to be notified when the stream's capacity increases - pub fn poll_capacity(&mut self, cx: &Context) -> Poll>> { - let mut me = self.opaque.inner.lock().unwrap(); - let me = &mut *me; - - let mut stream = me.store.resolve(self.opaque.key); - - me.actions.send.poll_capacity(cx, &mut stream) - } - - /// Request to be notified for if a `RST_STREAM` is received for this stream. - pub(crate) fn poll_reset( - &mut self, - cx: &Context, - mode: proto::PollReset, - ) -> Poll> { - let mut me = self.opaque.inner.lock().unwrap(); - let me = &mut *me; - - let mut stream = me.store.resolve(self.opaque.key); - - me.actions - .send - .poll_reset(cx, &mut stream, mode) - .map_err(From::from) - } - - pub fn clone_to_opaque(&self) -> OpaqueStreamRef { - self.opaque.clone() - } - - pub fn stream_id(&self) -> StreamId { - self.opaque.stream_id() - } -} - -impl Clone for StreamRef { - fn clone(&self) -> Self { - StreamRef { - opaque: self.opaque.clone(), - send_buffer: self.send_buffer.clone(), - } - } -} - -// ===== impl OpaqueStreamRef ===== - -impl OpaqueStreamRef { - fn new(inner: Arc>, stream: &mut store::Ptr) -> OpaqueStreamRef { - stream.ref_inc(); - OpaqueStreamRef { - inner, - key: stream.key(), - } - } - /// Called by a client to check for a received response. - pub fn poll_response(&mut self, cx: &Context) -> Poll, proto::Error>> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut stream = me.store.resolve(self.key); - - me.actions.recv.poll_response(cx, &mut stream) - } - /// Called by a client to check for a pushed request. - pub fn poll_pushed( - &mut self, - cx: &Context, - ) -> Poll, OpaqueStreamRef), proto::Error>>> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut stream = me.store.resolve(self.key); - me.actions - .recv - .poll_pushed(cx, &mut stream) - .map_ok(|(h, key)| { - me.refs += 1; - let opaque_ref = - OpaqueStreamRef::new(self.inner.clone(), &mut me.store.resolve(key)); - (h, opaque_ref) - }) - } - - pub fn is_end_stream(&self) -> bool { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let stream = me.store.resolve(self.key); - - me.actions.recv.is_end_stream(&stream) - } - - pub fn poll_data(&mut self, cx: &Context) -> Poll>> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut stream = me.store.resolve(self.key); - - me.actions.recv.poll_data(cx, &mut stream) - } - - pub fn poll_trailers(&mut self, cx: &Context) -> Poll>> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut stream = me.store.resolve(self.key); - - me.actions.recv.poll_trailers(cx, &mut stream) - } - - pub(crate) fn available_recv_capacity(&self) -> isize { - let me = self.inner.lock().unwrap(); - let me = &*me; - - let stream = &me.store[self.key]; - stream.recv_flow.available().into() - } - - pub(crate) fn used_recv_capacity(&self) -> WindowSize { - let me = self.inner.lock().unwrap(); - let me = &*me; - - let stream = &me.store[self.key]; - stream.in_flight_recv_data - } - - /// Releases recv capacity back to the peer. This may result in sending - /// WINDOW_UPDATE frames on both the stream and connection. - pub fn release_capacity(&mut self, capacity: WindowSize) -> Result<(), UserError> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut stream = me.store.resolve(self.key); - - me.actions - .recv - .release_capacity(capacity, &mut stream, &mut me.actions.task) - } - - /// Clear the receive queue and set the status to no longer receive data frames. - pub(crate) fn clear_recv_buffer(&mut self) { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut stream = me.store.resolve(self.key); - stream.is_recv = false; - me.actions.recv.clear_recv_buffer(&mut stream); - } - - pub fn stream_id(&self) -> StreamId { - self.inner.lock().unwrap().store[self.key].id - } -} - -impl fmt::Debug for OpaqueStreamRef { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use std::sync::TryLockError::*; - - match self.inner.try_lock() { - Ok(me) => { - let stream = &me.store[self.key]; - fmt.debug_struct("OpaqueStreamRef") - .field("stream_id", &stream.id) - .field("ref_count", &stream.ref_count) - .finish() - } - Err(Poisoned(_)) => fmt - .debug_struct("OpaqueStreamRef") - .field("inner", &"") - .finish(), - Err(WouldBlock) => fmt - .debug_struct("OpaqueStreamRef") - .field("inner", &"") - .finish(), - } - } -} - -impl Clone for OpaqueStreamRef { - fn clone(&self) -> Self { - // Increment the ref count - let mut inner = self.inner.lock().unwrap(); - inner.store.resolve(self.key).ref_inc(); - inner.refs += 1; - - OpaqueStreamRef { - inner: self.inner.clone(), - key: self.key, - } - } -} - -impl Drop for OpaqueStreamRef { - fn drop(&mut self) { - drop_stream_ref(&self.inner, self.key); - } -} - -// TODO: Move back in fn above -fn drop_stream_ref(inner: &Mutex, key: store::Key) { - let mut me = match inner.lock() { - Ok(inner) => inner, - Err(_) => { - if ::std::thread::panicking() { - tracing::trace!("StreamRef::drop; mutex poisoned"); - return; - } else { - panic!("StreamRef::drop; mutex poisoned"); - } - } - }; - - let me = &mut *me; - me.refs -= 1; - let mut stream = me.store.resolve(key); - - tracing::trace!("drop_stream_ref; stream={:?}", stream); - - // decrement the stream's ref count by 1. - stream.ref_dec(); - - let actions = &mut me.actions; - - // If the stream is not referenced and it is already - // closed (does not have to go through logic below - // of canceling the stream), we should notify the task - // (connection) so that it can close properly - if stream.ref_count == 0 && stream.is_closed() { - if let Some(task) = actions.task.take() { - task.wake(); - } - } - - me.counts.transition(stream, |counts, stream| { - maybe_cancel(stream, actions, counts); - - if stream.ref_count == 0 { - // Release any recv window back to connection, no one can access - // it anymore. - actions - .recv - .release_closed_capacity(stream, &mut actions.task); - - // We won't be able to reach our push promises anymore - let mut ppp = stream.pending_push_promises.take(); - while let Some(promise) = ppp.pop(stream.store_mut()) { - counts.transition(promise, |counts, stream| { - maybe_cancel(stream, actions, counts); - }); - } - } - }); -} - -fn maybe_cancel(stream: &mut store::Ptr, actions: &mut Actions, counts: &mut Counts) { - if stream.is_canceled_interest() { - // Server is allowed to early respond without fully consuming the client input stream - // But per the RFC, must send a RST_STREAM(NO_ERROR) in such cases. https://www.rfc-editor.org/rfc/rfc7540#section-8.1 - // Some other http2 implementation may interpret other error code as fatal if not respected (i.e: nginx https://trac.nginx.org/nginx/ticket/2376) - let reason = if counts.peer().is_server() - && stream.state.is_send_closed() - && stream.state.is_recv_streaming() - { - Reason::NO_ERROR - } else { - Reason::CANCEL - }; - - actions - .send - .schedule_implicit_reset(stream, reason, counts, &mut actions.task); - actions.recv.enqueue_reset_expiration(stream, counts); - } -} - -// ===== impl SendBuffer ===== - -impl SendBuffer { - fn new() -> Self { - let inner = Mutex::new(Buffer::new()); - SendBuffer { inner } - } -} - -// ===== impl Actions ===== - -impl Actions { - fn send_reset( - &mut self, - stream: store::Ptr, - reason: Reason, - initiator: Initiator, - counts: &mut Counts, - send_buffer: &mut Buffer>, - ) { - counts.transition(stream, |counts, stream| { - self.send.send_reset( - reason, - initiator, - send_buffer, - stream, - counts, - &mut self.task, - ); - self.recv.enqueue_reset_expiration(stream, counts); - // if a RecvStream is parked, ensure it's notified - stream.notify_recv(); - }); - } - - fn reset_on_recv_stream_err( - &mut self, - buffer: &mut Buffer>, - stream: &mut store::Ptr, - counts: &mut Counts, - res: Result<(), Error>, - ) -> Result<(), Error> { - if let Err(Error::Reset(stream_id, reason, initiator)) = res { - debug_assert_eq!(stream_id, stream.id); - // Reset the stream. - self.send - .send_reset(reason, initiator, buffer, stream, counts, &mut self.task); - Ok(()) - } else { - res - } - } - - fn ensure_not_idle(&mut self, peer: peer::Dyn, id: StreamId) -> Result<(), Reason> { - if peer.is_local_init(id) { - self.send.ensure_not_idle(id) - } else { - self.recv.ensure_not_idle(id) - } - } - - fn ensure_no_conn_error(&self) -> Result<(), proto::Error> { - if let Some(ref err) = self.conn_error { - Err(err.clone()) - } else { - Ok(()) - } - } - - /// Check if we possibly could have processed and since forgotten this stream. - /// - /// If we send a RST_STREAM for a stream, we will eventually "forget" about - /// the stream to free up memory. It's possible that the remote peer had - /// frames in-flight, and by the time we receive them, our own state is - /// gone. We *could* tear everything down by sending a GOAWAY, but it - /// is more likely to be latency/memory constraints that caused this, - /// and not a bad actor. So be less catastrophic, the spec allows - /// us to send another RST_STREAM of STREAM_CLOSED. - fn may_have_forgotten_stream(&self, peer: peer::Dyn, id: StreamId) -> bool { - if id.is_zero() { - return false; - } - if peer.is_local_init(id) { - self.send.may_have_created_stream(id) - } else { - self.recv.may_have_created_stream(id) - } - } - - fn clear_queues(&mut self, clear_pending_accept: bool, store: &mut Store, counts: &mut Counts) { - self.recv.clear_queues(clear_pending_accept, store, counts); - self.send.clear_queues(store, counts); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/server.rs s390-tools-2.33.1/rust-vendor/h2/src/server.rs --- s390-tools-2.31.0/rust-vendor/h2/src/server.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/server.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1613 +0,0 @@ -//! Server implementation of the HTTP/2 protocol. -//! -//! # Getting started -//! -//! Running an HTTP/2 server requires the caller to manage accepting the -//! connections as well as getting the connections to a state that is ready to -//! begin the HTTP/2 handshake. See [here](../index.html#handshake) for more -//! details. -//! -//! This could be as basic as using Tokio's [`TcpListener`] to accept -//! connections, but usually it means using either ALPN or HTTP/1.1 protocol -//! upgrades. -//! -//! Once a connection is obtained, it is passed to [`handshake`], -//! which will begin the [HTTP/2 handshake]. This returns a future that -//! completes once the handshake process is performed and HTTP/2 streams may -//! be received. -//! -//! [`handshake`] uses default configuration values. There are a number of -//! settings that can be changed by using [`Builder`] instead. -//! -//! # Inbound streams -//! -//! The [`Connection`] instance is used to accept inbound HTTP/2 streams. It -//! does this by implementing [`futures::Stream`]. When a new stream is -//! received, a call to [`Connection::accept`] will return `(request, response)`. -//! The `request` handle (of type [`http::Request`]) contains the -//! HTTP request head as well as provides a way to receive the inbound data -//! stream and the trailers. The `response` handle (of type [`SendResponse`]) -//! allows responding to the request, stream the response payload, send -//! trailers, and send push promises. -//! -//! The send ([`SendStream`]) and receive ([`RecvStream`]) halves of the stream -//! can be operated independently. -//! -//! # Managing the connection -//! -//! The [`Connection`] instance is used to manage connection state. The caller -//! is required to call either [`Connection::accept`] or -//! [`Connection::poll_close`] in order to advance the connection state. Simply -//! operating on [`SendStream`] or [`RecvStream`] will have no effect unless the -//! connection state is advanced. -//! -//! It is not required to call **both** [`Connection::accept`] and -//! [`Connection::poll_close`]. If the caller is ready to accept a new stream, -//! then only [`Connection::accept`] should be called. When the caller **does -//! not** want to accept a new stream, [`Connection::poll_close`] should be -//! called. -//! -//! The [`Connection`] instance should only be dropped once -//! [`Connection::poll_close`] returns `Ready`. Once [`Connection::accept`] -//! returns `Ready(None)`, there will no longer be any more inbound streams. At -//! this point, only [`Connection::poll_close`] should be called. -//! -//! # Shutting down the server -//! -//! Graceful shutdown of the server is [not yet -//! implemented](https://github.com/hyperium/h2/issues/69). -//! -//! # Example -//! -//! A basic HTTP/2 server example that runs over TCP and assumes [prior -//! knowledge], i.e. both the client and the server assume that the TCP socket -//! will use the HTTP/2 protocol without prior negotiation. -//! -//! ```no_run -//! use h2::server; -//! use http::{Response, StatusCode}; -//! use tokio::net::TcpListener; -//! -//! #[tokio::main] -//! pub async fn main() { -//! let mut listener = TcpListener::bind("127.0.0.1:5928").await.unwrap(); -//! -//! // Accept all incoming TCP connections. -//! loop { -//! if let Ok((socket, _peer_addr)) = listener.accept().await { -//! // Spawn a new task to process each connection. -//! tokio::spawn(async { -//! // Start the HTTP/2 connection handshake -//! let mut h2 = server::handshake(socket).await.unwrap(); -//! // Accept all inbound HTTP/2 streams sent over the -//! // connection. -//! while let Some(request) = h2.accept().await { -//! let (request, mut respond) = request.unwrap(); -//! println!("Received request: {:?}", request); -//! -//! // Build a response with no body -//! let response = Response::builder() -//! .status(StatusCode::OK) -//! .body(()) -//! .unwrap(); -//! -//! // Send the response back to the client -//! respond.send_response(response, true) -//! .unwrap(); -//! } -//! -//! }); -//! } -//! } -//! } -//! ``` -//! -//! [prior knowledge]: http://httpwg.org/specs/rfc7540.html#known-http -//! [`handshake`]: fn.handshake.html -//! [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader -//! [`Builder`]: struct.Builder.html -//! [`Connection`]: struct.Connection.html -//! [`Connection::poll`]: struct.Connection.html#method.poll -//! [`Connection::poll_close`]: struct.Connection.html#method.poll_close -//! [`futures::Stream`]: https://docs.rs/futures/0.1/futures/stream/trait.Stream.html -//! [`http::Request`]: ../struct.RecvStream.html -//! [`RecvStream`]: ../struct.RecvStream.html -//! [`SendStream`]: ../struct.SendStream.html -//! [`TcpListener`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpListener.html - -use crate::codec::{Codec, UserError}; -use crate::frame::{self, Pseudo, PushPromiseHeaderError, Reason, Settings, StreamId}; -use crate::proto::{self, Config, Error, Prioritized}; -use crate::{FlowControl, PingPong, RecvStream, SendStream}; - -use bytes::{Buf, Bytes}; -use http::{HeaderMap, Method, Request, Response}; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::{fmt, io}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tracing::instrument::{Instrument, Instrumented}; - -/// In progress HTTP/2 connection handshake future. -/// -/// This type implements `Future`, yielding a `Connection` instance once the -/// handshake has completed. -/// -/// The handshake is completed once the connection preface is fully received -/// from the client **and** the initial settings frame is sent to the client. -/// -/// The handshake future does not wait for the initial settings frame from the -/// client. -/// -/// See [module] level docs for more details. -/// -/// [module]: index.html -#[must_use = "futures do nothing unless polled"] -pub struct Handshake { - /// The config to pass to Connection::new after handshake succeeds. - builder: Builder, - /// The current state of the handshake. - state: Handshaking, - /// Span tracking the handshake - span: tracing::Span, -} - -/// Accepts inbound HTTP/2 streams on a connection. -/// -/// A `Connection` is backed by an I/O resource (usually a TCP socket) and -/// implements the HTTP/2 server logic for that connection. It is responsible -/// for receiving inbound streams initiated by the client as well as driving the -/// internal state forward. -/// -/// `Connection` values are created by calling [`handshake`]. Once a -/// `Connection` value is obtained, the caller must call [`poll`] or -/// [`poll_close`] in order to drive the internal connection state forward. -/// -/// See [module level] documentation for more details -/// -/// [module level]: index.html -/// [`handshake`]: struct.Connection.html#method.handshake -/// [`poll`]: struct.Connection.html#method.poll -/// [`poll_close`]: struct.Connection.html#method.poll_close -/// -/// # Examples -/// -/// ``` -/// # use tokio::io::{AsyncRead, AsyncWrite}; -/// # use h2::server; -/// # use h2::server::*; -/// # -/// # async fn doc(my_io: T) { -/// let mut server = server::handshake(my_io).await.unwrap(); -/// while let Some(request) = server.accept().await { -/// tokio::spawn(async move { -/// let (request, respond) = request.unwrap(); -/// // Process the request and send the response back to the client -/// // using `respond`. -/// }); -/// } -/// # } -/// # -/// # pub fn main() {} -/// ``` -#[must_use = "streams do nothing unless polled"] -pub struct Connection { - connection: proto::Connection, -} - -/// Builds server connections with custom configuration values. -/// -/// Methods can be chained in order to set the configuration values. -/// -/// The server is constructed by calling [`handshake`] and passing the I/O -/// handle that will back the HTTP/2 server. -/// -/// New instances of `Builder` are obtained via [`Builder::new`]. -/// -/// See function level documentation for details on the various server -/// configuration settings. -/// -/// [`Builder::new`]: struct.Builder.html#method.new -/// [`handshake`]: struct.Builder.html#method.handshake -/// -/// # Examples -/// -/// ``` -/// # use tokio::io::{AsyncRead, AsyncWrite}; -/// # use h2::server::*; -/// # -/// # fn doc(my_io: T) -/// # -> Handshake -/// # { -/// // `server_fut` is a future representing the completion of the HTTP/2 -/// // handshake. -/// let server_fut = Builder::new() -/// .initial_window_size(1_000_000) -/// .max_concurrent_streams(1000) -/// .handshake(my_io); -/// # server_fut -/// # } -/// # -/// # pub fn main() {} -/// ``` -#[derive(Clone, Debug)] -pub struct Builder { - /// Time to keep locally reset streams around before reaping. - reset_stream_duration: Duration, - - /// Maximum number of locally reset streams to keep at a time. - reset_stream_max: usize, - - /// Maximum number of remotely reset streams to allow in the pending - /// accept queue. - pending_accept_reset_stream_max: usize, - - /// Initial `Settings` frame to send as part of the handshake. - settings: Settings, - - /// Initial target window size for new connections. - initial_target_connection_window_size: Option, - - /// Maximum amount of bytes to "buffer" for writing per stream. - max_send_buffer_size: usize, -} - -/// Send a response back to the client -/// -/// A `SendResponse` instance is provided when receiving a request and is used -/// to send the associated response back to the client. It is also used to -/// explicitly reset the stream with a custom reason. -/// -/// It will also be used to initiate push promises linked with the associated -/// stream. -/// -/// If the `SendResponse` instance is dropped without sending a response, then -/// the HTTP/2 stream will be reset. -/// -/// See [module] level docs for more details. -/// -/// [module]: index.html -#[derive(Debug)] -pub struct SendResponse { - inner: proto::StreamRef, -} - -/// Send a response to a promised request -/// -/// A `SendPushedResponse` instance is provided when promising a request and is used -/// to send the associated response to the client. It is also used to -/// explicitly reset the stream with a custom reason. -/// -/// It can not be used to initiate push promises. -/// -/// If the `SendPushedResponse` instance is dropped without sending a response, then -/// the HTTP/2 stream will be reset. -/// -/// See [module] level docs for more details. -/// -/// [module]: index.html -pub struct SendPushedResponse { - inner: SendResponse, -} - -// Manual implementation necessary because of rust-lang/rust#26925 -impl fmt::Debug for SendPushedResponse { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "SendPushedResponse {{ {:?} }}", self.inner) - } -} - -/// Stages of an in-progress handshake. -enum Handshaking { - /// State 1. Connection is flushing pending SETTINGS frame. - Flushing(Instrumented>>), - /// State 2. Connection is waiting for the client preface. - ReadingPreface(Instrumented>>), - /// State 3. Handshake is done, polling again would panic. - Done, -} - -/// Flush a Sink -struct Flush { - codec: Option>, -} - -/// Read the client connection preface -struct ReadPreface { - codec: Option>, - pos: usize, -} - -#[derive(Debug)] -pub(crate) struct Peer; - -const PREFACE: [u8; 24] = *b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; - -/// Creates a new configured HTTP/2 server with default configuration -/// values backed by `io`. -/// -/// It is expected that `io` already be in an appropriate state to commence -/// the [HTTP/2 handshake]. See [Handshake] for more details. -/// -/// Returns a future which resolves to the [`Connection`] instance once the -/// HTTP/2 handshake has been completed. The returned [`Connection`] -/// instance will be using default configuration values. Use [`Builder`] to -/// customize the configuration values used by a [`Connection`] instance. -/// -/// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader -/// [Handshake]: ../index.html#handshake -/// [`Connection`]: struct.Connection.html -/// -/// # Examples -/// -/// ``` -/// # use tokio::io::{AsyncRead, AsyncWrite}; -/// # use h2::server; -/// # use h2::server::*; -/// # -/// # async fn doc(my_io: T) -/// # { -/// let connection = server::handshake(my_io).await.unwrap(); -/// // The HTTP/2 handshake has completed, now use `connection` to -/// // accept inbound HTTP/2 streams. -/// # } -/// # -/// # pub fn main() {} -/// ``` -pub fn handshake(io: T) -> Handshake -where - T: AsyncRead + AsyncWrite + Unpin, -{ - Builder::new().handshake(io) -} - -// ===== impl Connection ===== - -impl Connection -where - T: AsyncRead + AsyncWrite + Unpin, - B: Buf, -{ - fn handshake2(io: T, builder: Builder) -> Handshake { - let span = tracing::trace_span!("server_handshake"); - let entered = span.enter(); - - // Create the codec. - let mut codec = Codec::new(io); - - if let Some(max) = builder.settings.max_frame_size() { - codec.set_max_recv_frame_size(max as usize); - } - - if let Some(max) = builder.settings.max_header_list_size() { - codec.set_max_recv_header_list_size(max as usize); - } - - // Send initial settings frame. - codec - .buffer(builder.settings.clone().into()) - .expect("invalid SETTINGS frame"); - - // Create the handshake future. - let state = - Handshaking::Flushing(Flush::new(codec).instrument(tracing::trace_span!("flush"))); - - drop(entered); - - Handshake { - builder, - state, - span, - } - } - - /// Accept the next incoming request on this connection. - pub async fn accept( - &mut self, - ) -> Option, SendResponse), crate::Error>> { - futures_util::future::poll_fn(move |cx| self.poll_accept(cx)).await - } - - #[doc(hidden)] - pub fn poll_accept( - &mut self, - cx: &mut Context<'_>, - ) -> Poll, SendResponse), crate::Error>>> { - // Always try to advance the internal state. Getting Pending also is - // needed to allow this function to return Pending. - if self.poll_closed(cx)?.is_ready() { - // If the socket is closed, don't return anything - // TODO: drop any pending streams - return Poll::Ready(None); - } - - if let Some(inner) = self.connection.next_incoming() { - tracing::trace!("received incoming"); - let (head, _) = inner.take_request().into_parts(); - let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); - - let request = Request::from_parts(head, body); - let respond = SendResponse { inner }; - - return Poll::Ready(Some(Ok((request, respond)))); - } - - Poll::Pending - } - - /// Sets the target window size for the whole connection. - /// - /// If `size` is greater than the current value, then a `WINDOW_UPDATE` - /// frame will be immediately sent to the remote, increasing the connection - /// level window by `size - current_value`. - /// - /// If `size` is less than the current value, nothing will happen - /// immediately. However, as window capacity is released by - /// [`FlowControl`] instances, no `WINDOW_UPDATE` frames will be sent - /// out until the number of "in flight" bytes drops below `size`. - /// - /// The default value is 65,535. - /// - /// See [`FlowControl`] documentation for more details. - /// - /// [`FlowControl`]: ../struct.FlowControl.html - /// [library level]: ../index.html#flow-control - pub fn set_target_window_size(&mut self, size: u32) { - assert!(size <= proto::MAX_WINDOW_SIZE); - self.connection.set_target_window_size(size); - } - - /// Set a new `INITIAL_WINDOW_SIZE` setting (in octets) for stream-level - /// flow control for received data. - /// - /// The `SETTINGS` will be sent to the remote, and only applied once the - /// remote acknowledges the change. - /// - /// This can be used to increase or decrease the window size for existing - /// streams. - /// - /// # Errors - /// - /// Returns an error if a previous call is still pending acknowledgement - /// from the remote endpoint. - pub fn set_initial_window_size(&mut self, size: u32) -> Result<(), crate::Error> { - assert!(size <= proto::MAX_WINDOW_SIZE); - self.connection.set_initial_window_size(size)?; - Ok(()) - } - - /// Enables the [extended CONNECT protocol]. - /// - /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 - /// - /// # Errors - /// - /// Returns an error if a previous call is still pending acknowledgement - /// from the remote endpoint. - pub fn enable_connect_protocol(&mut self) -> Result<(), crate::Error> { - self.connection.set_enable_connect_protocol()?; - Ok(()) - } - - /// Returns `Ready` when the underlying connection has closed. - /// - /// If any new inbound streams are received during a call to `poll_closed`, - /// they will be queued and returned on the next call to [`poll_accept`]. - /// - /// This function will advance the internal connection state, driving - /// progress on all the other handles (e.g. [`RecvStream`] and [`SendStream`]). - /// - /// See [here](index.html#managing-the-connection) for more details. - /// - /// [`poll_accept`]: struct.Connection.html#method.poll_accept - /// [`RecvStream`]: ../struct.RecvStream.html - /// [`SendStream`]: ../struct.SendStream.html - pub fn poll_closed(&mut self, cx: &mut Context) -> Poll> { - self.connection.poll(cx).map_err(Into::into) - } - - #[doc(hidden)] - #[deprecated(note = "renamed to poll_closed")] - pub fn poll_close(&mut self, cx: &mut Context) -> Poll> { - self.poll_closed(cx) - } - - /// Sets the connection to a GOAWAY state. - /// - /// Does not terminate the connection. Must continue being polled to close - /// connection. - /// - /// After flushing the GOAWAY frame, the connection is closed. Any - /// outstanding streams do not prevent the connection from closing. This - /// should usually be reserved for shutting down when something bad - /// external to `h2` has happened, and open streams cannot be properly - /// handled. - /// - /// For graceful shutdowns, see [`graceful_shutdown`](Connection::graceful_shutdown). - pub fn abrupt_shutdown(&mut self, reason: Reason) { - self.connection.go_away_from_user(reason); - } - - /// Starts a [graceful shutdown][1] process. - /// - /// Must continue being polled to close connection. - /// - /// It's possible to receive more requests after calling this method, since - /// they might have been in-flight from the client already. After about - /// 1 RTT, no new requests should be accepted. Once all active streams - /// have completed, the connection is closed. - /// - /// [1]: http://httpwg.org/specs/rfc7540.html#GOAWAY - pub fn graceful_shutdown(&mut self) { - self.connection.go_away_gracefully(); - } - - /// Takes a `PingPong` instance from the connection. - /// - /// # Note - /// - /// This may only be called once. Calling multiple times will return `None`. - pub fn ping_pong(&mut self) -> Option { - self.connection.take_user_pings().map(PingPong::new) - } - - /// Returns the maximum number of concurrent streams that may be initiated - /// by the server on this connection. - /// - /// This limit is configured by the client peer by sending the - /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame. - /// This method returns the currently acknowledged value received from the - /// remote. - /// - /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 - pub fn max_concurrent_send_streams(&self) -> usize { - self.connection.max_send_streams() - } - - /// Returns the maximum number of concurrent streams that may be initiated - /// by the client on this connection. - /// - /// This returns the value of the [`SETTINGS_MAX_CONCURRENT_STREAMS` - /// parameter][1] sent in a `SETTINGS` frame that has been - /// acknowledged by the remote peer. The value to be sent is configured by - /// the [`Builder::max_concurrent_streams`][2] method before handshaking - /// with the remote peer. - /// - /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 - /// [2]: ../struct.Builder.html#method.max_concurrent_streams - pub fn max_concurrent_recv_streams(&self) -> usize { - self.connection.max_recv_streams() - } - - // Could disappear at anytime. - #[doc(hidden)] - #[cfg(feature = "unstable")] - pub fn num_wired_streams(&self) -> usize { - self.connection.num_wired_streams() - } -} - -#[cfg(feature = "stream")] -impl futures_core::Stream for Connection -where - T: AsyncRead + AsyncWrite + Unpin, - B: Buf, -{ - type Item = Result<(Request, SendResponse), crate::Error>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_accept(cx) - } -} - -impl fmt::Debug for Connection -where - T: fmt::Debug, - B: fmt::Debug + Buf, -{ - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Connection") - .field("connection", &self.connection) - .finish() - } -} - -// ===== impl Builder ===== - -impl Builder { - /// Returns a new server builder instance initialized with default - /// configuration values. - /// - /// Configuration methods can be chained on the return value. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::server::*; - /// # - /// # fn doc(my_io: T) - /// # -> Handshake - /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let server_fut = Builder::new() - /// .initial_window_size(1_000_000) - /// .max_concurrent_streams(1000) - /// .handshake(my_io); - /// # server_fut - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn new() -> Builder { - Builder { - reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), - reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, - pending_accept_reset_stream_max: proto::DEFAULT_REMOTE_RESET_STREAM_MAX, - settings: Settings::default(), - initial_target_connection_window_size: None, - max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, - } - } - - /// Indicates the initial window size (in octets) for stream-level - /// flow control for received data. - /// - /// The initial window of a stream is used as part of flow control. For more - /// details, see [`FlowControl`]. - /// - /// The default value is 65,535. - /// - /// [`FlowControl`]: ../struct.FlowControl.html - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::server::*; - /// # - /// # fn doc(my_io: T) - /// # -> Handshake - /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let server_fut = Builder::new() - /// .initial_window_size(1_000_000) - /// .handshake(my_io); - /// # server_fut - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn initial_window_size(&mut self, size: u32) -> &mut Self { - self.settings.set_initial_window_size(Some(size)); - self - } - - /// Indicates the initial window size (in octets) for connection-level flow control - /// for received data. - /// - /// The initial window of a connection is used as part of flow control. For more details, - /// see [`FlowControl`]. - /// - /// The default value is 65,535. - /// - /// [`FlowControl`]: ../struct.FlowControl.html - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::server::*; - /// # - /// # fn doc(my_io: T) - /// # -> Handshake - /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let server_fut = Builder::new() - /// .initial_connection_window_size(1_000_000) - /// .handshake(my_io); - /// # server_fut - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn initial_connection_window_size(&mut self, size: u32) -> &mut Self { - self.initial_target_connection_window_size = Some(size); - self - } - - /// Indicates the size (in octets) of the largest HTTP/2 frame payload that the - /// configured server is able to accept. - /// - /// The sender may send data frames that are **smaller** than this value, - /// but any data larger than `max` will be broken up into multiple `DATA` - /// frames. - /// - /// The value **must** be between 16,384 and 16,777,215. The default value is 16,384. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::server::*; - /// # - /// # fn doc(my_io: T) - /// # -> Handshake - /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let server_fut = Builder::new() - /// .max_frame_size(1_000_000) - /// .handshake(my_io); - /// # server_fut - /// # } - /// # - /// # pub fn main() {} - /// ``` - /// - /// # Panics - /// - /// This function panics if `max` is not within the legal range specified - /// above. - pub fn max_frame_size(&mut self, max: u32) -> &mut Self { - self.settings.set_max_frame_size(Some(max)); - self - } - - /// Sets the max size of received header frames. - /// - /// This advisory setting informs a peer of the maximum size of header list - /// that the sender is prepared to accept, in octets. The value is based on - /// the uncompressed size of header fields, including the length of the name - /// and value in octets plus an overhead of 32 octets for each header field. - /// - /// This setting is also used to limit the maximum amount of data that is - /// buffered to decode HEADERS frames. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::server::*; - /// # - /// # fn doc(my_io: T) - /// # -> Handshake - /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let server_fut = Builder::new() - /// .max_header_list_size(16 * 1024) - /// .handshake(my_io); - /// # server_fut - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { - self.settings.set_max_header_list_size(Some(max)); - self - } - - /// Sets the maximum number of concurrent streams. - /// - /// The maximum concurrent streams setting only controls the maximum number - /// of streams that can be initiated by the remote peer. In other words, - /// when this setting is set to 100, this does not limit the number of - /// concurrent streams that can be created by the caller. - /// - /// It is recommended that this value be no smaller than 100, so as to not - /// unnecessarily limit parallelism. However, any value is legal, including - /// 0. If `max` is set to 0, then the remote will not be permitted to - /// initiate streams. - /// - /// Note that streams in the reserved state, i.e., push promises that have - /// been reserved but the stream has not started, do not count against this - /// setting. - /// - /// Also note that if the remote *does* exceed the value set here, it is not - /// a protocol level error. Instead, the `h2` library will immediately reset - /// the stream. - /// - /// See [Section 5.1.2] in the HTTP/2 spec for more details. - /// - /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::server::*; - /// # - /// # fn doc(my_io: T) - /// # -> Handshake - /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let server_fut = Builder::new() - /// .max_concurrent_streams(1000) - /// .handshake(my_io); - /// # server_fut - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn max_concurrent_streams(&mut self, max: u32) -> &mut Self { - self.settings.set_max_concurrent_streams(Some(max)); - self - } - - /// Sets the maximum number of concurrent locally reset streams. - /// - /// When a stream is explicitly reset by either calling - /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance - /// before completing the stream, the HTTP/2 specification requires that - /// any further frames received for that stream must be ignored for "some - /// time". - /// - /// In order to satisfy the specification, internal state must be maintained - /// to implement the behavior. This state grows linearly with the number of - /// streams that are locally reset. - /// - /// The `max_concurrent_reset_streams` setting configures sets an upper - /// bound on the amount of state that is maintained. When this max value is - /// reached, the oldest reset stream is purged from memory. - /// - /// Once the stream has been fully purged from memory, any additional frames - /// received for that stream will result in a connection level protocol - /// error, forcing the connection to terminate. - /// - /// The default value is 10. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::server::*; - /// # - /// # fn doc(my_io: T) - /// # -> Handshake - /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let server_fut = Builder::new() - /// .max_concurrent_reset_streams(1000) - /// .handshake(my_io); - /// # server_fut - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { - self.reset_stream_max = max; - self - } - - /// Sets the maximum number of pending-accept remotely-reset streams. - /// - /// Streams that have been received by the peer, but not accepted by the - /// user, can also receive a RST_STREAM. This is a legitimate pattern: one - /// could send a request and then shortly after, realize it is not needed, - /// sending a CANCEL. - /// - /// However, since those streams are now "closed", they don't count towards - /// the max concurrent streams. So, they will sit in the accept queue, - /// using memory. - /// - /// When the number of remotely-reset streams sitting in the pending-accept - /// queue reaches this maximum value, a connection error with the code of - /// `ENHANCE_YOUR_CALM` will be sent to the peer, and returned by the - /// `Future`. - /// - /// The default value is currently 20, but could change. - /// - /// # Examples - /// - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::server::*; - /// # - /// # fn doc(my_io: T) - /// # -> Handshake - /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let server_fut = Builder::new() - /// .max_pending_accept_reset_streams(100) - /// .handshake(my_io); - /// # server_fut - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn max_pending_accept_reset_streams(&mut self, max: usize) -> &mut Self { - self.pending_accept_reset_stream_max = max; - self - } - - /// Sets the maximum send buffer size per stream. - /// - /// Once a stream has buffered up to (or over) the maximum, the stream's - /// flow control will not "poll" additional capacity. Once bytes for the - /// stream have been written to the connection, the send buffer capacity - /// will be freed up again. - /// - /// The default is currently ~400KB, but may change. - /// - /// # Panics - /// - /// This function panics if `max` is larger than `u32::MAX`. - pub fn max_send_buffer_size(&mut self, max: usize) -> &mut Self { - assert!(max <= std::u32::MAX as usize); - self.max_send_buffer_size = max; - self - } - - /// Sets the maximum number of concurrent locally reset streams. - /// - /// When a stream is explicitly reset by either calling - /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance - /// before completing the stream, the HTTP/2 specification requires that - /// any further frames received for that stream must be ignored for "some - /// time". - /// - /// In order to satisfy the specification, internal state must be maintained - /// to implement the behavior. This state grows linearly with the number of - /// streams that are locally reset. - /// - /// The `reset_stream_duration` setting configures the max amount of time - /// this state will be maintained in memory. Once the duration elapses, the - /// stream state is purged from memory. - /// - /// Once the stream has been fully purged from memory, any additional frames - /// received for that stream will result in a connection level protocol - /// error, forcing the connection to terminate. - /// - /// The default value is 30 seconds. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::server::*; - /// # use std::time::Duration; - /// # - /// # fn doc(my_io: T) - /// # -> Handshake - /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let server_fut = Builder::new() - /// .reset_stream_duration(Duration::from_secs(10)) - /// .handshake(my_io); - /// # server_fut - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn reset_stream_duration(&mut self, dur: Duration) -> &mut Self { - self.reset_stream_duration = dur; - self - } - - /// Enables the [extended CONNECT protocol]. - /// - /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 - pub fn enable_connect_protocol(&mut self) -> &mut Self { - self.settings.set_enable_connect_protocol(Some(1)); - self - } - - /// Creates a new configured HTTP/2 server backed by `io`. - /// - /// It is expected that `io` already be in an appropriate state to commence - /// the [HTTP/2 handshake]. See [Handshake] for more details. - /// - /// Returns a future which resolves to the [`Connection`] instance once the - /// HTTP/2 handshake has been completed. - /// - /// This function also allows the caller to configure the send payload data - /// type. See [Outbound data type] for more details. - /// - /// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader - /// [Handshake]: ../index.html#handshake - /// [`Connection`]: struct.Connection.html - /// [Outbound data type]: ../index.html#outbound-data-type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::server::*; - /// # - /// # fn doc(my_io: T) - /// # -> Handshake - /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let server_fut = Builder::new() - /// .handshake(my_io); - /// # server_fut - /// # } - /// # - /// # pub fn main() {} - /// ``` - /// - /// Configures the send-payload data type. In this case, the outbound data - /// type will be `&'static [u8]`. - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use h2::server::*; - /// # - /// # fn doc(my_io: T) - /// # -> Handshake - /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2 - /// // handshake. - /// let server_fut: Handshake<_, &'static [u8]> = Builder::new() - /// .handshake(my_io); - /// # server_fut - /// # } - /// # - /// # pub fn main() {} - /// ``` - pub fn handshake(&self, io: T) -> Handshake - where - T: AsyncRead + AsyncWrite + Unpin, - B: Buf, - { - Connection::handshake2(io, self.clone()) - } -} - -impl Default for Builder { - fn default() -> Builder { - Builder::new() - } -} - -// ===== impl SendResponse ===== - -impl SendResponse { - /// Send a response to a client request. - /// - /// On success, a [`SendStream`] instance is returned. This instance can be - /// used to stream the response body and send trailers. - /// - /// If a body or trailers will be sent on the returned [`SendStream`] - /// instance, then `end_of_stream` must be set to `false` when calling this - /// function. - /// - /// The [`SendResponse`] instance is already associated with a received - /// request. This function may only be called once per instance and only if - /// [`send_reset`] has not been previously called. - /// - /// [`SendResponse`]: # - /// [`SendStream`]: ../struct.SendStream.html - /// [`send_reset`]: #method.send_reset - pub fn send_response( - &mut self, - response: Response<()>, - end_of_stream: bool, - ) -> Result, crate::Error> { - self.inner - .send_response(response, end_of_stream) - .map(|_| SendStream::new(self.inner.clone())) - .map_err(Into::into) - } - - /// Push a request and response to the client - /// - /// On success, a [`SendResponse`] instance is returned. - /// - /// [`SendResponse`]: # - pub fn push_request( - &mut self, - request: Request<()>, - ) -> Result, crate::Error> { - self.inner - .send_push_promise(request) - .map(|inner| SendPushedResponse { - inner: SendResponse { inner }, - }) - .map_err(Into::into) - } - - /// Send a stream reset to the peer. - /// - /// This essentially cancels the stream, including any inbound or outbound - /// data streams. - /// - /// If this function is called before [`send_response`], a call to - /// [`send_response`] will result in an error. - /// - /// If this function is called while a [`SendStream`] instance is active, - /// any further use of the instance will result in an error. - /// - /// This function should only be called once. - /// - /// [`send_response`]: #method.send_response - /// [`SendStream`]: ../struct.SendStream.html - pub fn send_reset(&mut self, reason: Reason) { - self.inner.send_reset(reason) - } - - /// Polls to be notified when the client resets this stream. - /// - /// If stream is still open, this returns `Poll::Pending`, and - /// registers the task to be notified if a `RST_STREAM` is received. - /// - /// If a `RST_STREAM` frame is received for this stream, calling this - /// method will yield the `Reason` for the reset. - /// - /// # Error - /// - /// Calling this method after having called `send_response` will return - /// a user error. - pub fn poll_reset(&mut self, cx: &mut Context) -> Poll> { - self.inner.poll_reset(cx, proto::PollReset::AwaitingHeaders) - } - - /// Returns the stream ID of the response stream. - /// - /// # Panics - /// - /// If the lock on the stream store has been poisoned. - pub fn stream_id(&self) -> crate::StreamId { - crate::StreamId::from_internal(self.inner.stream_id()) - } -} - -// ===== impl SendPushedResponse ===== - -impl SendPushedResponse { - /// Send a response to a promised request. - /// - /// On success, a [`SendStream`] instance is returned. This instance can be - /// used to stream the response body and send trailers. - /// - /// If a body or trailers will be sent on the returned [`SendStream`] - /// instance, then `end_of_stream` must be set to `false` when calling this - /// function. - /// - /// The [`SendPushedResponse`] instance is associated with a promised - /// request. This function may only be called once per instance and only if - /// [`send_reset`] has not been previously called. - /// - /// [`SendPushedResponse`]: # - /// [`SendStream`]: ../struct.SendStream.html - /// [`send_reset`]: #method.send_reset - pub fn send_response( - &mut self, - response: Response<()>, - end_of_stream: bool, - ) -> Result, crate::Error> { - self.inner.send_response(response, end_of_stream) - } - - /// Send a stream reset to the peer. - /// - /// This essentially cancels the stream, including any inbound or outbound - /// data streams. - /// - /// If this function is called before [`send_response`], a call to - /// [`send_response`] will result in an error. - /// - /// If this function is called while a [`SendStream`] instance is active, - /// any further use of the instance will result in an error. - /// - /// This function should only be called once. - /// - /// [`send_response`]: #method.send_response - /// [`SendStream`]: ../struct.SendStream.html - pub fn send_reset(&mut self, reason: Reason) { - self.inner.send_reset(reason) - } - - /// Polls to be notified when the client resets this stream. - /// - /// If stream is still open, this returns `Poll::Pending`, and - /// registers the task to be notified if a `RST_STREAM` is received. - /// - /// If a `RST_STREAM` frame is received for this stream, calling this - /// method will yield the `Reason` for the reset. - /// - /// # Error - /// - /// Calling this method after having called `send_response` will return - /// a user error. - pub fn poll_reset(&mut self, cx: &mut Context) -> Poll> { - self.inner.poll_reset(cx) - } - - /// Returns the stream ID of the response stream. - /// - /// # Panics - /// - /// If the lock on the stream store has been poisoned. - pub fn stream_id(&self) -> crate::StreamId { - self.inner.stream_id() - } -} - -// ===== impl Flush ===== - -impl Flush { - fn new(codec: Codec) -> Self { - Flush { codec: Some(codec) } - } -} - -impl Future for Flush -where - T: AsyncWrite + Unpin, - B: Buf, -{ - type Output = Result, crate::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // Flush the codec - ready!(self.codec.as_mut().unwrap().flush(cx)).map_err(crate::Error::from_io)?; - - // Return the codec - Poll::Ready(Ok(self.codec.take().unwrap())) - } -} - -impl ReadPreface { - fn new(codec: Codec) -> Self { - ReadPreface { - codec: Some(codec), - pos: 0, - } - } - - fn inner_mut(&mut self) -> &mut T { - self.codec.as_mut().unwrap().get_mut() - } -} - -impl Future for ReadPreface -where - T: AsyncRead + Unpin, - B: Buf, -{ - type Output = Result, crate::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut buf = [0; 24]; - let mut rem = PREFACE.len() - self.pos; - - while rem > 0 { - let mut buf = ReadBuf::new(&mut buf[..rem]); - ready!(Pin::new(self.inner_mut()).poll_read(cx, &mut buf)) - .map_err(crate::Error::from_io)?; - let n = buf.filled().len(); - if n == 0 { - return Poll::Ready(Err(crate::Error::from_io(io::Error::new( - io::ErrorKind::UnexpectedEof, - "connection closed before reading preface", - )))); - } - - if &PREFACE[self.pos..self.pos + n] != buf.filled() { - proto_err!(conn: "read_preface: invalid preface"); - // TODO: Should this just write the GO_AWAY frame directly? - return Poll::Ready(Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into())); - } - - self.pos += n; - rem -= n; // TODO test - } - - Poll::Ready(Ok(self.codec.take().unwrap())) - } -} - -// ===== impl Handshake ===== - -impl Future for Handshake -where - T: AsyncRead + AsyncWrite + Unpin, - B: Buf, -{ - type Output = Result, crate::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let span = self.span.clone(); // XXX(eliza): T_T - let _e = span.enter(); - tracing::trace!(state = ?self.state); - - loop { - match &mut self.state { - Handshaking::Flushing(flush) => { - // We're currently flushing a pending SETTINGS frame. Poll the - // flush future, and, if it's completed, advance our state to wait - // for the client preface. - let codec = match Pin::new(flush).poll(cx)? { - Poll::Pending => { - tracing::trace!(flush.poll = %"Pending"); - return Poll::Pending; - } - Poll::Ready(flushed) => { - tracing::trace!(flush.poll = %"Ready"); - flushed - } - }; - self.state = Handshaking::ReadingPreface( - ReadPreface::new(codec).instrument(tracing::trace_span!("read_preface")), - ); - } - Handshaking::ReadingPreface(read) => { - let codec = ready!(Pin::new(read).poll(cx)?); - - self.state = Handshaking::Done; - - let connection = proto::Connection::new( - codec, - Config { - next_stream_id: 2.into(), - // Server does not need to locally initiate any streams - initial_max_send_streams: 0, - max_send_buffer_size: self.builder.max_send_buffer_size, - reset_stream_duration: self.builder.reset_stream_duration, - reset_stream_max: self.builder.reset_stream_max, - remote_reset_stream_max: self.builder.pending_accept_reset_stream_max, - settings: self.builder.settings.clone(), - }, - ); - - tracing::trace!("connection established!"); - let mut c = Connection { connection }; - if let Some(sz) = self.builder.initial_target_connection_window_size { - c.set_target_window_size(sz); - } - - return Poll::Ready(Ok(c)); - } - Handshaking::Done => { - panic!("Handshaking::poll() called again after handshaking was complete") - } - } - } - } -} - -impl fmt::Debug for Handshake -where - T: AsyncRead + AsyncWrite + fmt::Debug, - B: fmt::Debug + Buf, -{ - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "server::Handshake") - } -} - -impl Peer { - pub fn convert_send_message( - id: StreamId, - response: Response<()>, - end_of_stream: bool, - ) -> frame::Headers { - use http::response::Parts; - - // Extract the components of the HTTP request - let ( - Parts { - status, headers, .. - }, - _, - ) = response.into_parts(); - - // Build the set pseudo header set. All requests will include `method` - // and `path`. - let pseudo = Pseudo::response(status); - - // Create the HEADERS frame - let mut frame = frame::Headers::new(id, pseudo, headers); - - if end_of_stream { - frame.set_end_stream() - } - - frame - } - - pub fn convert_push_message( - stream_id: StreamId, - promised_id: StreamId, - request: Request<()>, - ) -> Result { - use http::request::Parts; - - if let Err(e) = frame::PushPromise::validate_request(&request) { - use PushPromiseHeaderError::*; - match e { - NotSafeAndCacheable => tracing::debug!( - ?promised_id, - "convert_push_message: method {} is not safe and cacheable", - request.method(), - ), - InvalidContentLength(e) => tracing::debug!( - ?promised_id, - "convert_push_message; promised request has invalid content-length {:?}", - e, - ), - } - return Err(UserError::MalformedHeaders); - } - - // Extract the components of the HTTP request - let ( - Parts { - method, - uri, - headers, - .. - }, - _, - ) = request.into_parts(); - - let pseudo = Pseudo::request(method, uri, None); - - Ok(frame::PushPromise::new( - stream_id, - promised_id, - pseudo, - headers, - )) - } -} - -impl proto::Peer for Peer { - type Poll = Request<()>; - - const NAME: &'static str = "Server"; - - fn is_server() -> bool { - true - } - - fn r#dyn() -> proto::DynPeer { - proto::DynPeer::Server - } - - fn convert_poll_message( - pseudo: Pseudo, - fields: HeaderMap, - stream_id: StreamId, - ) -> Result { - use http::{uri, Version}; - - let mut b = Request::builder(); - - macro_rules! malformed { - ($($arg:tt)*) => {{ - tracing::debug!($($arg)*); - return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR)); - }} - } - - b = b.version(Version::HTTP_2); - - let is_connect; - if let Some(method) = pseudo.method { - is_connect = method == Method::CONNECT; - b = b.method(method); - } else { - malformed!("malformed headers: missing method"); - } - - let has_protocol = pseudo.protocol.is_some(); - if has_protocol { - if is_connect { - // Assert that we have the right type. - b = b.extension::(pseudo.protocol.unwrap()); - } else { - malformed!("malformed headers: :protocol on non-CONNECT request"); - } - } - - if pseudo.status.is_some() { - malformed!("malformed headers: :status field on request"); - } - - // Convert the URI - let mut parts = uri::Parts::default(); - - // A request translated from HTTP/1 must not include the :authority - // header - if let Some(authority) = pseudo.authority { - let maybe_authority = uri::Authority::from_maybe_shared(authority.clone().into_inner()); - parts.authority = Some(maybe_authority.or_else(|why| { - malformed!( - "malformed headers: malformed authority ({:?}): {}", - authority, - why, - ) - })?); - } - - // A :scheme is required, except CONNECT. - if let Some(scheme) = pseudo.scheme { - if is_connect && !has_protocol { - malformed!("malformed headers: :scheme in CONNECT"); - } - let maybe_scheme = scheme.parse(); - let scheme = maybe_scheme.or_else(|why| { - malformed!( - "malformed headers: malformed scheme ({:?}): {}", - scheme, - why, - ) - })?; - - // It's not possible to build an `Uri` from a scheme and path. So, - // after validating is was a valid scheme, we just have to drop it - // if there isn't an :authority. - if parts.authority.is_some() { - parts.scheme = Some(scheme); - } - } else if !is_connect || has_protocol { - malformed!("malformed headers: missing scheme"); - } - - if let Some(path) = pseudo.path { - if is_connect && !has_protocol { - malformed!("malformed headers: :path in CONNECT"); - } - - // This cannot be empty - if path.is_empty() { - malformed!("malformed headers: missing path"); - } - - let maybe_path = uri::PathAndQuery::from_maybe_shared(path.clone().into_inner()); - parts.path_and_query = Some(maybe_path.or_else(|why| { - malformed!("malformed headers: malformed path ({:?}): {}", path, why,) - })?); - } else if is_connect && has_protocol { - malformed!("malformed headers: missing path in extended CONNECT"); - } - - b = b.uri(parts); - - let mut request = match b.body(()) { - Ok(request) => request, - Err(e) => { - // TODO: Should there be more specialized handling for different - // kinds of errors - proto_err!(stream: "error building request: {}; stream={:?}", e, stream_id); - return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR)); - } - }; - - *request.headers_mut() = fields; - - Ok(request) - } -} - -// ===== impl Handshaking ===== - -impl fmt::Debug for Handshaking -where - B: Buf, -{ - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match *self { - Handshaking::Flushing(_) => f.write_str("Flushing(_)"), - Handshaking::ReadingPreface(_) => f.write_str("ReadingPreface(_)"), - Handshaking::Done => f.write_str("Done"), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/h2/src/share.rs s390-tools-2.33.1/rust-vendor/h2/src/share.rs --- s390-tools-2.31.0/rust-vendor/h2/src/share.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/h2/src/share.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,606 +0,0 @@ -use crate::codec::UserError; -use crate::frame::Reason; -use crate::proto::{self, WindowSize}; - -use bytes::{Buf, Bytes}; -use http::HeaderMap; - -use std::fmt; -#[cfg(feature = "stream")] -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Sends the body stream and trailers to the remote peer. -/// -/// # Overview -/// -/// A `SendStream` is provided by [`SendRequest`] and [`SendResponse`] once the -/// HTTP/2 message header has been sent sent. It is used to stream the message -/// body and send the message trailers. See method level documentation for more -/// details. -/// -/// The `SendStream` instance is also used to manage outbound flow control. -/// -/// If a `SendStream` is dropped without explicitly closing the send stream, a -/// `RST_STREAM` frame will be sent. This essentially cancels the request / -/// response exchange. -/// -/// The ways to explicitly close the send stream are: -/// -/// * Set `end_of_stream` to true when calling [`send_request`], -/// [`send_response`], or [`send_data`]. -/// * Send trailers with [`send_trailers`]. -/// * Explicitly reset the stream with [`send_reset`]. -/// -/// # Flow control -/// -/// In HTTP/2, data cannot be sent to the remote peer unless there is -/// available window capacity on both the stream and the connection. When a data -/// frame is sent, both the stream window and the connection window are -/// decremented. When the stream level window reaches zero, no further data can -/// be sent on that stream. When the connection level window reaches zero, no -/// further data can be sent on any stream for that connection. -/// -/// When the remote peer is ready to receive more data, it sends `WINDOW_UPDATE` -/// frames. These frames increment the windows. See the [specification] for more -/// details on the principles of HTTP/2 flow control. -/// -/// The implications for sending data are that the caller **should** ensure that -/// both the stream and the connection has available window capacity before -/// loading the data to send into memory. The `SendStream` instance provides the -/// necessary APIs to perform this logic. This, however, is not an obligation. -/// If the caller attempts to send data on a stream when there is no available -/// window capacity, the library will buffer the data until capacity becomes -/// available, at which point the buffer will be flushed to the connection. -/// -/// **NOTE**: There is no bound on the amount of data that the library will -/// buffer. If you are sending large amounts of data, you really should hook -/// into the flow control lifecycle. Otherwise, you risk using up significant -/// amounts of memory. -/// -/// To hook into the flow control lifecycle, the caller signals to the library -/// that it intends to send data by calling [`reserve_capacity`], specifying the -/// amount of data, in octets, that the caller intends to send. After this, -/// `poll_capacity` is used to be notified when the requested capacity is -/// assigned to the stream. Once [`poll_capacity`] returns `Ready` with the number -/// of octets available to the stream, the caller is able to actually send the -/// data using [`send_data`]. -/// -/// Because there is also a connection level window that applies to **all** -/// streams on a connection, when capacity is assigned to a stream (indicated by -/// `poll_capacity` returning `Ready`), this capacity is reserved on the -/// connection and will **not** be assigned to any other stream. If data is -/// never written to the stream, that capacity is effectively lost to other -/// streams and this introduces the risk of deadlocking a connection. -/// -/// To avoid throttling data on a connection, the caller should not reserve -/// capacity until ready to send data and once any capacity is assigned to the -/// stream, the caller should immediately send data consuming this capacity. -/// There is no guarantee as to when the full capacity requested will become -/// available. For example, if the caller requests 64 KB of data and 512 bytes -/// become available, the caller should immediately send 512 bytes of data. -/// -/// See [`reserve_capacity`] documentation for more details. -/// -/// [`SendRequest`]: client/struct.SendRequest.html -/// [`SendResponse`]: server/struct.SendResponse.html -/// [specification]: http://httpwg.org/specs/rfc7540.html#FlowControl -/// [`reserve_capacity`]: #method.reserve_capacity -/// [`poll_capacity`]: #method.poll_capacity -/// [`send_data`]: #method.send_data -/// [`send_request`]: client/struct.SendRequest.html#method.send_request -/// [`send_response`]: server/struct.SendResponse.html#method.send_response -/// [`send_data`]: #method.send_data -/// [`send_trailers`]: #method.send_trailers -/// [`send_reset`]: #method.send_reset -#[derive(Debug)] -pub struct SendStream { - inner: proto::StreamRef, -} - -/// A stream identifier, as described in [Section 5.1.1] of RFC 7540. -/// -/// Streams are identified with an unsigned 31-bit integer. Streams -/// initiated by a client MUST use odd-numbered stream identifiers; those -/// initiated by the server MUST use even-numbered stream identifiers. A -/// stream identifier of zero (0x0) is used for connection control -/// messages; the stream identifier of zero cannot be used to establish a -/// new stream. -/// -/// [Section 5.1.1]: https://tools.ietf.org/html/rfc7540#section-5.1.1 -#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] -pub struct StreamId(u32); - -impl From for u32 { - fn from(src: StreamId) -> Self { - src.0 - } -} - -/// Receives the body stream and trailers from the remote peer. -/// -/// A `RecvStream` is provided by [`client::ResponseFuture`] and -/// [`server::Connection`] with the received HTTP/2 message head (the response -/// and request head respectively). -/// -/// A `RecvStream` instance is used to receive the streaming message body and -/// any trailers from the remote peer. It is also used to manage inbound flow -/// control. -/// -/// See method level documentation for more details on receiving data. See -/// [`FlowControl`] for more details on inbound flow control. -/// -/// [`client::ResponseFuture`]: client/struct.ResponseFuture.html -/// [`server::Connection`]: server/struct.Connection.html -/// [`FlowControl`]: struct.FlowControl.html -/// [`Stream`]: https://docs.rs/futures/0.1/futures/stream/trait.Stream.html -#[must_use = "streams do nothing unless polled"] -pub struct RecvStream { - inner: FlowControl, -} - -/// A handle to release window capacity to a remote stream. -/// -/// This type allows the caller to manage inbound data [flow control]. The -/// caller is expected to call [`release_capacity`] after dropping data frames. -/// -/// # Overview -/// -/// Each stream has a window size. This window size is the maximum amount of -/// inbound data that can be in-flight. In-flight data is defined as data that -/// has been received, but not yet released. -/// -/// When a stream is created, the window size is set to the connection's initial -/// window size value. When a data frame is received, the window size is then -/// decremented by size of the data frame before the data is provided to the -/// caller. As the caller finishes using the data, [`release_capacity`] must be -/// called. This will then increment the window size again, allowing the peer to -/// send more data. -/// -/// There is also a connection level window as well as the stream level window. -/// Received data counts against the connection level window as well and calls -/// to [`release_capacity`] will also increment the connection level window. -/// -/// # Sending `WINDOW_UPDATE` frames -/// -/// `WINDOW_UPDATE` frames will not be sent out for **every** call to -/// `release_capacity`, as this would end up slowing down the protocol. Instead, -/// `h2` waits until the window size is increased to a certain threshold and -/// then sends out a single `WINDOW_UPDATE` frame representing all the calls to -/// `release_capacity` since the last `WINDOW_UPDATE` frame. -/// -/// This essentially batches window updating. -/// -/// # Scenarios -/// -/// Following is a basic scenario with an HTTP/2 connection containing a -/// single active stream. -/// -/// * A new stream is activated. The receive window is initialized to 1024 (the -/// value of the initial window size for this connection). -/// * A `DATA` frame is received containing a payload of 600 bytes. -/// * The receive window size is reduced to 424 bytes. -/// * [`release_capacity`] is called with 200. -/// * The receive window size is now 624 bytes. The peer may send no more than -/// this. -/// * A `DATA` frame is received with a payload of 624 bytes. -/// * The window size is now 0 bytes. The peer may not send any more data. -/// * [`release_capacity`] is called with 1024. -/// * The receive window size is now 1024 bytes. The peer may now send more -/// data. -/// -/// [flow control]: ../index.html#flow-control -/// [`release_capacity`]: struct.FlowControl.html#method.release_capacity -#[derive(Clone, Debug)] -pub struct FlowControl { - inner: proto::OpaqueStreamRef, -} - -/// A handle to send and receive PING frames with the peer. -// NOT Clone on purpose -pub struct PingPong { - inner: proto::UserPings, -} - -/// Sent via [`PingPong`][] to send a PING frame to a peer. -/// -/// [`PingPong`]: struct.PingPong.html -pub struct Ping { - _p: (), -} - -/// Received via [`PingPong`][] when a peer acknowledges a [`Ping`][]. -/// -/// [`PingPong`]: struct.PingPong.html -/// [`Ping`]: struct.Ping.html -pub struct Pong { - _p: (), -} - -// ===== impl SendStream ===== - -impl SendStream { - pub(crate) fn new(inner: proto::StreamRef) -> Self { - SendStream { inner } - } - - /// Requests capacity to send data. - /// - /// This function is used to express intent to send data. This requests - /// connection level capacity. Once the capacity is available, it is - /// assigned to the stream and not reused by other streams. - /// - /// This function may be called repeatedly. The `capacity` argument is the - /// **total** amount of requested capacity. Sequential calls to - /// `reserve_capacity` are *not* additive. Given the following: - /// - /// ```rust - /// # use h2::*; - /// # fn doc(mut send_stream: SendStream<&'static [u8]>) { - /// send_stream.reserve_capacity(100); - /// send_stream.reserve_capacity(200); - /// # } - /// ``` - /// - /// After the second call to `reserve_capacity`, the *total* requested - /// capacity will be 200. - /// - /// `reserve_capacity` is also used to cancel previous capacity requests. - /// Given the following: - /// - /// ```rust - /// # use h2::*; - /// # fn doc(mut send_stream: SendStream<&'static [u8]>) { - /// send_stream.reserve_capacity(100); - /// send_stream.reserve_capacity(0); - /// # } - /// ``` - /// - /// After the second call to `reserve_capacity`, the *total* requested - /// capacity will be 0, i.e. there is no requested capacity for the stream. - /// - /// If `reserve_capacity` is called with a lower value than the amount of - /// capacity **currently** assigned to the stream, this capacity will be - /// returned to the connection to be re-assigned to other streams. - /// - /// Also, the amount of capacity that is reserved gets decremented as data - /// is sent. For example: - /// - /// ```rust - /// # use h2::*; - /// # async fn doc(mut send_stream: SendStream<&'static [u8]>) { - /// send_stream.reserve_capacity(100); - /// - /// send_stream.send_data(b"hello", false).unwrap(); - /// // At this point, the total amount of requested capacity is 95 bytes. - /// - /// // Calling `reserve_capacity` with `100` again essentially requests an - /// // additional 5 bytes. - /// send_stream.reserve_capacity(100); - /// # } - /// ``` - /// - /// See [Flow control](struct.SendStream.html#flow-control) for an overview - /// of how send flow control works. - pub fn reserve_capacity(&mut self, capacity: usize) { - // TODO: Check for overflow - self.inner.reserve_capacity(capacity as WindowSize) - } - - /// Returns the stream's current send capacity. - /// - /// This allows the caller to check the current amount of available capacity - /// before sending data. - pub fn capacity(&self) -> usize { - self.inner.capacity() as usize - } - - /// Requests to be notified when the stream's capacity increases. - /// - /// Before calling this, capacity should be requested with - /// `reserve_capacity`. Once capacity is requested, the connection will - /// assign capacity to the stream **as it becomes available**. There is no - /// guarantee as to when and in what increments capacity gets assigned to - /// the stream. - /// - /// To get notified when the available capacity increases, the caller calls - /// `poll_capacity`, which returns `Ready(Some(n))` when `n` has been - /// increased by the connection. Note that `n` here represents the **total** - /// amount of assigned capacity at that point in time. It is also possible - /// that `n` is lower than the previous call if, since then, the caller has - /// sent data. - pub fn poll_capacity(&mut self, cx: &mut Context) -> Poll>> { - self.inner - .poll_capacity(cx) - .map_ok(|w| w as usize) - .map_err(Into::into) - } - - /// Sends a single data frame to the remote peer. - /// - /// This function may be called repeatedly as long as `end_of_stream` is set - /// to `false`. Setting `end_of_stream` to `true` sets the end stream flag - /// on the data frame. Any further calls to `send_data` or `send_trailers` - /// will return an [`Error`]. - /// - /// `send_data` can be called without reserving capacity. In this case, the - /// data is buffered and the capacity is implicitly requested. Once the - /// capacity becomes available, the data is flushed to the connection. - /// However, this buffering is unbounded. As such, sending large amounts of - /// data without reserving capacity before hand could result in large - /// amounts of data being buffered in memory. - /// - /// [`Error`]: struct.Error.html - pub fn send_data(&mut self, data: B, end_of_stream: bool) -> Result<(), crate::Error> { - self.inner - .send_data(data, end_of_stream) - .map_err(Into::into) - } - - /// Sends trailers to the remote peer. - /// - /// Sending trailers implicitly closes the send stream. Once the send stream - /// is closed, no more data can be sent. - pub fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), crate::Error> { - self.inner.send_trailers(trailers).map_err(Into::into) - } - - /// Resets the stream. - /// - /// This cancels the request / response exchange. If the response has not - /// yet been received, the associated `ResponseFuture` will return an - /// [`Error`] to reflect the canceled exchange. - /// - /// [`Error`]: struct.Error.html - pub fn send_reset(&mut self, reason: Reason) { - self.inner.send_reset(reason) - } - - /// Polls to be notified when the client resets this stream. - /// - /// If stream is still open, this returns `Poll::Pending`, and - /// registers the task to be notified if a `RST_STREAM` is received. - /// - /// If a `RST_STREAM` frame is received for this stream, calling this - /// method will yield the `Reason` for the reset. - /// - /// # Error - /// - /// If connection sees an error, this returns that error instead of a - /// `Reason`. - pub fn poll_reset(&mut self, cx: &mut Context) -> Poll> { - self.inner.poll_reset(cx, proto::PollReset::Streaming) - } - - /// Returns the stream ID of this `SendStream`. - /// - /// # Panics - /// - /// If the lock on the stream store has been poisoned. - pub fn stream_id(&self) -> StreamId { - StreamId::from_internal(self.inner.stream_id()) - } -} - -// ===== impl StreamId ===== - -impl StreamId { - pub(crate) fn from_internal(id: crate::frame::StreamId) -> Self { - StreamId(id.into()) - } - - /// Returns the `u32` corresponding to this `StreamId` - /// - /// # Note - /// - /// This is the same as the `From` implementation, but - /// included as an inherent method because that implementation doesn't - /// appear in rustdocs, as well as a way to force the type instead of - /// relying on inference. - pub fn as_u32(&self) -> u32 { - (*self).into() - } -} -// ===== impl RecvStream ===== - -impl RecvStream { - pub(crate) fn new(inner: FlowControl) -> Self { - RecvStream { inner } - } - - /// Get the next data frame. - pub async fn data(&mut self) -> Option> { - futures_util::future::poll_fn(move |cx| self.poll_data(cx)).await - } - - /// Get optional trailers for this stream. - pub async fn trailers(&mut self) -> Result, crate::Error> { - futures_util::future::poll_fn(move |cx| self.poll_trailers(cx)).await - } - - /// Poll for the next data frame. - pub fn poll_data(&mut self, cx: &mut Context<'_>) -> Poll>> { - self.inner.inner.poll_data(cx).map_err(Into::into) - } - - #[doc(hidden)] - pub fn poll_trailers( - &mut self, - cx: &mut Context, - ) -> Poll, crate::Error>> { - match ready!(self.inner.inner.poll_trailers(cx)) { - Some(Ok(map)) => Poll::Ready(Ok(Some(map))), - Some(Err(e)) => Poll::Ready(Err(e.into())), - None => Poll::Ready(Ok(None)), - } - } - - /// Returns true if the receive half has reached the end of stream. - /// - /// A return value of `true` means that calls to `poll` and `poll_trailers` - /// will both return `None`. - pub fn is_end_stream(&self) -> bool { - self.inner.inner.is_end_stream() - } - - /// Get a mutable reference to this stream's `FlowControl`. - /// - /// It can be used immediately, or cloned to be used later. - pub fn flow_control(&mut self) -> &mut FlowControl { - &mut self.inner - } - - /// Returns the stream ID of this stream. - /// - /// # Panics - /// - /// If the lock on the stream store has been poisoned. - pub fn stream_id(&self) -> StreamId { - self.inner.stream_id() - } -} - -#[cfg(feature = "stream")] -impl futures_core::Stream for RecvStream { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_data(cx) - } -} - -impl fmt::Debug for RecvStream { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("RecvStream") - .field("inner", &self.inner) - .finish() - } -} - -impl Drop for RecvStream { - fn drop(&mut self) { - // Eagerly clear any received DATA frames now, since its no longer - // possible to retrieve them. However, this will be called - // again once *all* stream refs have been dropped, since - // this won't send a RST_STREAM frame, in case the user wishes to - // still *send* DATA. - self.inner.inner.clear_recv_buffer(); - } -} - -// ===== impl FlowControl ===== - -impl FlowControl { - pub(crate) fn new(inner: proto::OpaqueStreamRef) -> Self { - FlowControl { inner } - } - - /// Returns the stream ID of the stream whose capacity will - /// be released by this `FlowControl`. - pub fn stream_id(&self) -> StreamId { - StreamId::from_internal(self.inner.stream_id()) - } - - /// Get the current available capacity of data this stream *could* receive. - pub fn available_capacity(&self) -> isize { - self.inner.available_recv_capacity() - } - - /// Get the currently *used* capacity for this stream. - /// - /// This is the amount of bytes that can be released back to the remote. - pub fn used_capacity(&self) -> usize { - self.inner.used_recv_capacity() as usize - } - - /// Release window capacity back to remote stream. - /// - /// This releases capacity back to the stream level and the connection level - /// windows. Both window sizes will be increased by `sz`. - /// - /// See [struct level] documentation for more details. - /// - /// # Errors - /// - /// This function errors if increasing the receive window size by `sz` would - /// result in a window size greater than the target window size. In other - /// words, the caller cannot release more capacity than data has been - /// received. If 1024 bytes of data have been received, at most 1024 bytes - /// can be released. - /// - /// [struct level]: # - pub fn release_capacity(&mut self, sz: usize) -> Result<(), crate::Error> { - if sz > proto::MAX_WINDOW_SIZE as usize { - return Err(UserError::ReleaseCapacityTooBig.into()); - } - self.inner - .release_capacity(sz as proto::WindowSize) - .map_err(Into::into) - } -} - -// ===== impl PingPong ===== - -impl PingPong { - pub(crate) fn new(inner: proto::UserPings) -> Self { - PingPong { inner } - } - - /// Send a PING frame and wait for the peer to send the pong. - pub async fn ping(&mut self, ping: Ping) -> Result { - self.send_ping(ping)?; - futures_util::future::poll_fn(|cx| self.poll_pong(cx)).await - } - - #[doc(hidden)] - pub fn send_ping(&mut self, ping: Ping) -> Result<(), crate::Error> { - // Passing a `Ping` here is just to be forwards-compatible with - // eventually allowing choosing a ping payload. For now, we can - // just ignore it. - let _ = ping; - - self.inner.send_ping().map_err(|err| match err { - Some(err) => err.into(), - None => UserError::SendPingWhilePending.into(), - }) - } - - #[doc(hidden)] - pub fn poll_pong(&mut self, cx: &mut Context) -> Poll> { - ready!(self.inner.poll_pong(cx))?; - Poll::Ready(Ok(Pong { _p: () })) - } -} - -impl fmt::Debug for PingPong { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("PingPong").finish() - } -} - -// ===== impl Ping ===== - -impl Ping { - /// Creates a new opaque `Ping` to be sent via a [`PingPong`][]. - /// - /// The payload is "opaque", such that it shouldn't be depended on. - /// - /// [`PingPong`]: struct.PingPong.html - pub fn opaque() -> Ping { - Ping { _p: () } - } -} - -impl fmt::Debug for Ping { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Ping").finish() - } -} - -// ===== impl Pong ===== - -impl fmt::Debug for Pong { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Pong").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/benches/bench.rs s390-tools-2.33.1/rust-vendor/hashbrown/benches/bench.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/benches/bench.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/benches/bench.rs 2024-05-28 11:57:36.000000000 +0200 @@ -311,7 +311,7 @@ // Each loop triggers one rehash for _ in 0..10 { - for i in 0..223 { + for i in 0..224 { set.insert(i); } diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/hashbrown/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/hashbrown/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/.cargo-checksum.json 2024-05-28 11:57:39.000000000 +0200 @@ -1 +1 @@ -{"files":{},"package":"290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"} \ No newline at end of file +{"files":{},"package":"8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/Cargo.toml s390-tools-2.33.1/rust-vendor/hashbrown/Cargo.toml --- s390-tools-2.31.0/rust-vendor/hashbrown/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/Cargo.toml 2024-05-28 11:57:36.000000000 +0200 @@ -11,9 +11,9 @@ [package] edition = "2021" -rust-version = "1.63.0" +rust-version = "1.56.0" name = "hashbrown" -version = "0.14.3" +version = "0.12.3" authors = ["Amanieu d'Antras "] exclude = [ ".github", @@ -33,6 +33,7 @@ ] license = "MIT OR Apache-2.0" repository = "https://github.com/rust-lang/hashbrown" +resolver = "2" [package.metadata.docs.rs] features = [ @@ -41,10 +42,9 @@ "serde", "raw", ] -rustdoc-args = ["--generate-link-to-definition"] [dependencies.ahash] -version = "0.8.6" +version = "0.7.0" optional = true default-features = false @@ -53,11 +53,9 @@ optional = true package = "rustc-std-workspace-alloc" -[dependencies.allocator-api2] -version = "0.2.9" -features = ["alloc"] +[dependencies.bumpalo] +version = "3.5.0" optional = true -default-features = false [dependencies.compiler_builtins] version = "0.1.2" @@ -68,30 +66,15 @@ optional = true package = "rustc-std-workspace-core" -[dependencies.equivalent] -version = "1.0" -optional = true -default-features = false - [dependencies.rayon] version = "1.0" optional = true -[dependencies.rkyv] -version = "0.7.42" -features = ["alloc"] -optional = true -default-features = false - [dependencies.serde] version = "1.0.25" optional = true default-features = false -[dev-dependencies.bumpalo] -version = "3.13.0" -features = ["allocator-api2"] - [dev-dependencies.doc-comment] version = "0.3.1" @@ -108,24 +91,17 @@ [dev-dependencies.rayon] version = "1.0" -[dev-dependencies.rkyv] -version = "0.7.42" -features = ["validation"] - [dev-dependencies.serde_test] version = "1.0" [features] +ahash-compile-time-rng = ["ahash/compile-time-rng"] default = [ "ahash", "inline-more", - "allocator-api2", ] inline-more = [] -nightly = [ - "allocator-api2?/nightly", - "bumpalo/allocator_api", -] +nightly = [] raw = [] rustc-dep-of-std = [ "nightly", diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/CHANGELOG.md s390-tools-2.33.1/rust-vendor/hashbrown/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/hashbrown/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/CHANGELOG.md 2024-05-28 11:57:36.000000000 +0200 @@ -7,143 +7,35 @@ ## [Unreleased] -## [v0.14.3] - 2023-11-26 - -### Added - -- Specialized `fold` implementation of iterators. (#480) - -### Fixed - -- Avoid using unstable `ptr::invalid_mut` on nightly. (#481) - -## [v0.14.2] - 2023-10-19 - -### Added - -- `HashTable` type which provides a low-level but safe API with explicit hashing. (#466) - -### Fixed - -- Disabled the use of NEON instructions on big-endian ARM. (#475) -- Disabled the use of NEON instructions on Miri. (#476) - -## [v0.14.1] - 2023-09-28 - -### Added - -- Allow serializing `HashMap`s that use a custom allocator. (#449) - -### Changed - -- Use the `Equivalent` trait from the `equivalent` crate. (#442) -- Slightly improved performance of table resizing. (#451) -- Relaxed MSRV to 1.63.0. (#457) -- Removed `Clone` requirement from custom allocators. (#468) - -### Fixed - -- Fixed custom allocators being leaked in some situations. (#439, #465) - -## [v0.14.0] - 2023-06-01 - -### Added - -- Support for `allocator-api2` crate - for interfacing with custom allocators on stable. (#417) -- Optimized implementation for ARM using NEON instructions. (#430) -- Support for rkyv serialization. (#432) -- `Equivalent` trait to look up values without `Borrow`. (#345) -- `Hash{Map,Set}::raw_table_mut` is added whic returns a mutable reference. (#404) -- Fast path for `clear` on empty tables. (#428) - -### Changed - -- Optimized insertion to only perform a single lookup. (#277) -- `DrainFilter` (`drain_filter`) has been renamed to `ExtractIf` and no longer drops remaining - elements when the iterator is dropped. #(374) -- Bumped MSRV to 1.64.0. (#431) -- `{Map,Set}::raw_table` now returns an immutable reference. (#404) -- `VacantEntry` and `OccupiedEntry` now use the default hasher if none is - specified in generics. (#389) -- `RawTable::data_start` now returns a `NonNull` to match `RawTable::data_end`. (#387) -- `RawIter::{reflect_insert, reflect_remove}` are now unsafe. (#429) -- `RawTable::find_potential` is renamed to `find_or_find_insert_slot` and returns an `InsertSlot`. (#429) -- `RawTable::remove` now also returns an `InsertSlot`. (#429) -- `InsertSlot` can be used to insert an element with `RawTable::insert_in_slot`. (#429) -- `RawIterHash` no longer has a lifetime tied to that of the `RawTable`. (#427) -- The trait bounds of `HashSet::raw_table` have been relaxed to not require `Eq + Hash`. (#423) -- `EntryRef::and_replace_entry_with` and `OccupiedEntryRef::replace_entry_with` - were changed to give a `&K` instead of a `&Q` to the closure. - -### Removed - -- Support for `bumpalo` as an allocator with custom wrapper. - Use `allocator-api2` feature in `bumpalo` to use it as an allocator - for `hashbrown` collections. (#417) - -## [v0.13.2] - 2023-01-12 - -### Fixed - -- Added `#[inline(always)]` to `find_inner`. (#375) -- Fixed `RawTable::allocation_info` for empty tables. (#376) - -## [v0.13.1] - 2022-11-10 - -### Added - -- Added `Equivalent` trait to customize key lookups. (#350) -- Added support for 16-bit targets. (#368) -- Added `RawTable::allocation_info` which provides information about the memory - usage of a table. (#371) - -### Changed - -- Bumped MSRV to 1.61.0. -- Upgraded to `ahash` 0.8. (#357) -- Make `with_hasher_in` const. (#355) -- The following methods have been removed from the `RawTable` API in favor of - safer alternatives: - - `RawTable::erase_no_drop` => Use `RawTable::erase` or `RawTable::remove` instead. - - `Bucket::read` => Use `RawTable::remove` instead. - - `Bucket::drop` => Use `RawTable::erase` instead. - - `Bucket::write` => Use `Bucket::as_mut` instead. - -### Fixed - -- Ensure that `HashMap` allocations don't exceed `isize::MAX`. (#362) -- Fixed issue with field retagging in scopeguard. (#359) - ## [v0.12.3] - 2022-07-17 -### Fixed +## Fixed - Fixed double-drop in `RawTable::clone_from`. (#348) ## [v0.12.2] - 2022-07-09 -### Added +## Added - Added `Entry` API for `HashSet`. (#342) - Added `Extend<&'a (K, V)> for HashMap`. (#340) - Added length-based short-circuiting for hash table iteration. (#338) - Added a function to access the `RawTable` of a `HashMap`. (#335) -### Changed +## Changed - Edited `do_alloc` to reduce LLVM IR generated. (#341) ## [v0.12.1] - 2022-05-02 -### Fixed +## Fixed - Fixed underflow in `RawIterRange::size_hint`. (#325) - Fixed the implementation of `Debug` for `ValuesMut` and `IntoValues`. (#325) ## [v0.12.0] - 2022-01-17 -### Added +## Added - Added `From<[T; N]>` and `From<[(K, V); N]>` for `HashSet` and `HashMap` respectively. (#297) - Added an `allocator()` getter to HashMap and HashSet. (#257) @@ -152,7 +44,7 @@ - Implement `From` on `HashSet` and `HashMap`. (#298) - Added `entry_ref` API to `HashMap`. (#201) -### Changed +## Changed - Bumped minimum Rust version to 1.56.1 and edition to 2021. - Use u64 for the GroupWord on WebAssembly. (#271) @@ -164,7 +56,7 @@ - Rename `get_each_mut` to `get_many_mut` and align API with the stdlib. (#291) - Don't hash the key when searching in an empty table. (#305) -### Fixed +## Fixed - Guard against allocations exceeding isize::MAX. (#268) - Made `RawTable::insert_no_grow` unsafe. (#254) @@ -173,19 +65,19 @@ ## [v0.11.2] - 2021-03-25 -### Fixed +## Fixed - Added missing allocator type parameter to `HashMap`'s and `HashSet`'s `Clone` impls. (#252) ## [v0.11.1] - 2021-03-20 -### Fixed +## Fixed - Added missing `pub` modifier to `BumpWrapper`. (#251) ## [v0.11.0] - 2021-03-14 -### Added +## Added - Added safe `try_insert_no_grow` method to `RawTable`. (#229) - Added support for `bumpalo` as an allocator without the `nightly` feature. (#231) - Implemented `Default` for `RawTable`. (#237) @@ -194,22 +86,22 @@ - Added `From>` for `HashSet`. (#235) - Added `try_insert` method to `HashMap`. (#247) -### Changed +## Changed - The minimum Rust version has been bumped to 1.49.0. (#230) - Significantly improved compilation times by reducing the amount of generated IR. (#205) -### Removed +## Removed - We no longer re-export the unstable allocator items from the standard library, nor the stable shims approximating the same. (#227) - Removed hasher specialization support from `aHash`, which was resulting in inconsistent hashes being generated for a key. (#248) -### Fixed +## Fixed - Fixed union length comparison. (#228) ## ~~[v0.10.0] - 2021-01-16~~ This release was _yanked_ due to inconsistent hashes being generated with the `nightly` feature. (#248) -### Changed +## Changed - Parametrized `RawTable`, `HashSet` and `HashMap` over an allocator. (#133) - Improved branch prediction hints on stable. (#209) - Optimized hashing of primitive types with AHash using specialization. (#207) @@ -217,7 +109,7 @@ ## [v0.9.1] - 2020-09-28 -### Added +## Added - Added safe methods to `RawTable` (#202): - `get`: `find` and `as_ref` - `get_mut`: `find` and `as_mut` @@ -225,7 +117,7 @@ - `remove_entry`: `find` and `remove` - `erase_entry`: `find` and `erase` -### Changed +## Changed - Removed `from_key_hashed_nocheck`'s `Q: Hash`. (#200) - Made `RawTable::drain` safe. (#201) @@ -323,7 +215,7 @@ ## [v0.6.2] - 2019-10-23 ### Added -- Added an `inline-more` feature (enabled by default) which allows choosing a tradeoff between +- Added an `inline-more` feature (enabled by default) which allows choosing a tradeoff between runtime performance and compilation time. (#119) ## [v0.6.1] - 2019-10-04 @@ -471,13 +363,7 @@ - Initial release -[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.14.3...HEAD -[v0.14.3]: https://github.com/rust-lang/hashbrown/compare/v0.14.2...v0.14.3 -[v0.14.2]: https://github.com/rust-lang/hashbrown/compare/v0.14.1...v0.14.2 -[v0.14.1]: https://github.com/rust-lang/hashbrown/compare/v0.14.0...v0.14.1 -[v0.14.0]: https://github.com/rust-lang/hashbrown/compare/v0.13.2...v0.14.0 -[v0.13.2]: https://github.com/rust-lang/hashbrown/compare/v0.13.1...v0.13.2 -[v0.13.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.3...v0.13.1 +[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.12.3...HEAD [v0.12.3]: https://github.com/rust-lang/hashbrown/compare/v0.12.2...v0.12.3 [v0.12.2]: https://github.com/rust-lang/hashbrown/compare/v0.12.1...v0.12.2 [v0.12.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.0...v0.12.1 diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/README.md s390-tools-2.33.1/rust-vendor/hashbrown/README.md --- s390-tools-2.31.0/rust-vendor/hashbrown/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/README.md 2024-05-28 11:57:36.000000000 +0200 @@ -4,7 +4,7 @@ [![Build Status](https://github.com/rust-lang/hashbrown/actions/workflows/rust.yml/badge.svg)](https://github.com/rust-lang/hashbrown/actions) [![Crates.io](https://img.shields.io/crates/v/hashbrown.svg)](https://crates.io/crates/hashbrown) [![Documentation](https://docs.rs/hashbrown/badge.svg)](https://docs.rs/hashbrown) -[![Rust](https://img.shields.io/badge/rust-1.63.0%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown) +[![Rust](https://img.shields.io/badge/rust-1.56.1%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown) This crate is a Rust port of Google's high-performance [SwissTable] hash map, adapted to make it a drop-in replacement for Rust's standard `HashMap` @@ -40,44 +40,44 @@ With the hashbrown default AHash hasher: -| name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | -| :-------------------------- | :----------------: | ----------------: | :----------: | ------: | ------- | -| insert_ahash_highbits | 18,865 | 8,020 | -10,845 | -57.49% | x 2.35 | -| insert_ahash_random | 19,711 | 8,019 | -11,692 | -59.32% | x 2.46 | -| insert_ahash_serial | 19,365 | 6,463 | -12,902 | -66.63% | x 3.00 | -| insert_erase_ahash_highbits | 51,136 | 17,916 | -33,220 | -64.96% | x 2.85 | -| insert_erase_ahash_random | 51,157 | 17,688 | -33,469 | -65.42% | x 2.89 | -| insert_erase_ahash_serial | 45,479 | 14,895 | -30,584 | -67.25% | x 3.05 | -| iter_ahash_highbits | 1,399 | 1,092 | -307 | -21.94% | x 1.28 | -| iter_ahash_random | 1,586 | 1,059 | -527 | -33.23% | x 1.50 | -| iter_ahash_serial | 3,168 | 1,079 | -2,089 | -65.94% | x 2.94 | -| lookup_ahash_highbits | 32,351 | 4,792 | -27,559 | -85.19% | x 6.75 | -| lookup_ahash_random | 17,419 | 4,817 | -12,602 | -72.35% | x 3.62 | -| lookup_ahash_serial | 15,254 | 3,606 | -11,648 | -76.36% | x 4.23 | -| lookup_fail_ahash_highbits | 21,187 | 4,369 | -16,818 | -79.38% | x 4.85 | -| lookup_fail_ahash_random | 21,550 | 4,395 | -17,155 | -79.61% | x 4.90 | -| lookup_fail_ahash_serial | 19,450 | 3,176 | -16,274 | -83.67% | x 6.12 | +| name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | +|:------------------------|:-------------------:|------------------:|:------------:|---------:|---------| +| insert_ahash_highbits | 18,865 | 8,020 | -10,845 | -57.49% | x 2.35 | +| insert_ahash_random | 19,711 | 8,019 | -11,692 | -59.32% | x 2.46 | +| insert_ahash_serial | 19,365 | 6,463 | -12,902 | -66.63% | x 3.00 | +| insert_erase_ahash_highbits | 51,136 | 17,916 | -33,220 | -64.96% | x 2.85 | +| insert_erase_ahash_random | 51,157 | 17,688 | -33,469 | -65.42% | x 2.89 | +| insert_erase_ahash_serial | 45,479 | 14,895 | -30,584 | -67.25% | x 3.05 | +| iter_ahash_highbits | 1,399 | 1,092 | -307 | -21.94% | x 1.28 | +| iter_ahash_random | 1,586 | 1,059 | -527 | -33.23% | x 1.50 | +| iter_ahash_serial | 3,168 | 1,079 | -2,089 | -65.94% | x 2.94 | +| lookup_ahash_highbits | 32,351 | 4,792 | -27,559 | -85.19% | x 6.75 | +| lookup_ahash_random | 17,419 | 4,817 | -12,602 | -72.35% | x 3.62 | +| lookup_ahash_serial | 15,254 | 3,606 | -11,648 | -76.36% | x 4.23 | +| lookup_fail_ahash_highbits | 21,187 | 4,369 | -16,818 | -79.38% | x 4.85 | +| lookup_fail_ahash_random | 21,550 | 4,395 | -17,155 | -79.61% | x 4.90 | +| lookup_fail_ahash_serial | 19,450 | 3,176 | -16,274 | -83.67% | x 6.12 | With the libstd default SipHash hasher: -| name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | -| :------------------------ | :----------------: | ----------------: | :----------: | ------: | ------- | -| insert_std_highbits | 19,216 | 16,885 | -2,331 | -12.13% | x 1.14 | -| insert_std_random | 19,179 | 17,034 | -2,145 | -11.18% | x 1.13 | -| insert_std_serial | 19,462 | 17,493 | -1,969 | -10.12% | x 1.11 | -| insert_erase_std_highbits | 50,825 | 35,847 | -14,978 | -29.47% | x 1.42 | -| insert_erase_std_random | 51,448 | 35,392 | -16,056 | -31.21% | x 1.45 | -| insert_erase_std_serial | 87,711 | 38,091 | -49,620 | -56.57% | x 2.30 | -| iter_std_highbits | 1,378 | 1,159 | -219 | -15.89% | x 1.19 | -| iter_std_random | 1,395 | 1,132 | -263 | -18.85% | x 1.23 | -| iter_std_serial | 1,704 | 1,105 | -599 | -35.15% | x 1.54 | -| lookup_std_highbits | 17,195 | 13,642 | -3,553 | -20.66% | x 1.26 | -| lookup_std_random | 17,181 | 13,773 | -3,408 | -19.84% | x 1.25 | -| lookup_std_serial | 15,483 | 13,651 | -1,832 | -11.83% | x 1.13 | -| lookup_fail_std_highbits | 20,926 | 13,474 | -7,452 | -35.61% | x 1.55 | -| lookup_fail_std_random | 21,766 | 13,505 | -8,261 | -37.95% | x 1.61 | -| lookup_fail_std_serial | 19,336 | 13,519 | -5,817 | -30.08% | x 1.43 | +|name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | +|:------------------------|:-------------------:|------------------:|:------------:|---------:|---------| +|insert_std_highbits |19,216 |16,885 | -2,331 | -12.13% | x 1.14 | +|insert_std_random |19,179 |17,034 | -2,145 | -11.18% | x 1.13 | +|insert_std_serial |19,462 |17,493 | -1,969 | -10.12% | x 1.11 | +|insert_erase_std_highbits |50,825 |35,847 | -14,978 | -29.47% | x 1.42 | +|insert_erase_std_random |51,448 |35,392 | -16,056 | -31.21% | x 1.45 | +|insert_erase_std_serial |87,711 |38,091 | -49,620 | -56.57% | x 2.30 | +|iter_std_highbits |1,378 |1,159 | -219 | -15.89% | x 1.19 | +|iter_std_random |1,395 |1,132 | -263 | -18.85% | x 1.23 | +|iter_std_serial |1,704 |1,105 | -599 | -35.15% | x 1.54 | +|lookup_std_highbits |17,195 |13,642 | -3,553 | -20.66% | x 1.26 | +|lookup_std_random |17,181 |13,773 | -3,408 | -19.84% | x 1.25 | +|lookup_std_serial |15,483 |13,651 | -1,832 | -11.83% | x 1.13 | +|lookup_fail_std_highbits |20,926 |13,474 | -7,452 | -35.61% | x 1.55 | +|lookup_fail_std_random |21,766 |13,505 | -8,261 | -37.95% | x 1.61 | +|lookup_fail_std_serial |19,336 |13,519 | -5,817 | -30.08% | x 1.43 | ## Usage @@ -85,7 +85,7 @@ ```toml [dependencies] -hashbrown = "0.14" +hashbrown = "0.12" ``` Then: @@ -101,13 +101,14 @@ - `nightly`: Enables nightly-only features including: `#[may_dangle]`. - `serde`: Enables serde serialization support. -- `rkyv`: Enables rkyv serialization support. - `rayon`: Enables rayon parallel iterator support. - `raw`: Enables access to the experimental and unsafe `RawTable` API. - `inline-more`: Adds inline hints to most functions, improving run-time performance at the cost of compilation time. (enabled by default) +- `bumpalo`: Provides a `BumpWrapper` type which allows `bumpalo` to be used for memory allocation. - `ahash`: Compiles with ahash as default hasher. (enabled by default) -- `allocator-api2`: Enables support for allocators that support `allocator-api2`. (enabled by default) +- `ahash-compile-time-rng`: Activates the `compile-time-rng` feature of ahash. For targets with no random number generator +this pre-generates seeds at compile time and embeds them as constants. See [aHash's documentation](https://github.com/tkaitchuck/aHash#flags) (disabled by default) ## License diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/mod.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/mod.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/mod.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,6 +1,4 @@ #[cfg(feature = "rayon")] pub(crate) mod rayon; -#[cfg(feature = "rkyv")] -mod rkyv; #[cfg(feature = "serde")] mod serde; diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rayon/map.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rayon/map.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rayon/map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rayon/map.rs 2024-05-28 11:57:36.000000000 +0200 @@ -232,11 +232,11 @@ /// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter /// [`HashMap`]: /hashbrown/struct.HashMap.html /// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html -pub struct IntoParIter { +pub struct IntoParIter { inner: RawIntoParIter<(K, V), A>, } -impl ParallelIterator for IntoParIter { +impl ParallelIterator for IntoParIter { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -248,7 +248,9 @@ } } -impl fmt::Debug for IntoParIter { +impl fmt::Debug + for IntoParIter +{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ParIter { inner: unsafe { self.inner.par_iter() }, @@ -265,11 +267,11 @@ /// /// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain /// [`HashMap`]: /hashbrown/struct.HashMap.html -pub struct ParDrain<'a, K, V, A: Allocator = Global> { +pub struct ParDrain<'a, K, V, A: Allocator + Clone = Global> { inner: RawParDrain<'a, (K, V), A>, } -impl ParallelIterator for ParDrain<'_, K, V, A> { +impl ParallelIterator for ParDrain<'_, K, V, A> { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -281,7 +283,9 @@ } } -impl fmt::Debug for ParDrain<'_, K, V, A> { +impl fmt::Debug + for ParDrain<'_, K, V, A> +{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ParIter { inner: unsafe { self.inner.par_iter() }, @@ -291,7 +295,7 @@ } } -impl HashMap { +impl HashMap { /// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order. #[cfg_attr(feature = "inline-more", inline)] pub fn par_keys(&self) -> ParKeys<'_, K, V> { @@ -311,7 +315,7 @@ } } -impl HashMap { +impl HashMap { /// Visits (potentially in parallel) mutably borrowed values in an arbitrary order. #[cfg_attr(feature = "inline-more", inline)] pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { @@ -336,7 +340,7 @@ K: Eq + Hash + Sync, V: PartialEq + Sync, S: BuildHasher + Sync, - A: Allocator + Sync, + A: Allocator + Clone + Sync, { /// Returns `true` if the map is equal to another, /// i.e. both maps contain the same keys mapped to the same values. @@ -350,7 +354,9 @@ } } -impl IntoParallelIterator for HashMap { +impl IntoParallelIterator + for HashMap +{ type Item = (K, V); type Iter = IntoParIter; @@ -362,7 +368,9 @@ } } -impl<'a, K: Sync, V: Sync, S, A: Allocator> IntoParallelIterator for &'a HashMap { +impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator + for &'a HashMap +{ type Item = (&'a K, &'a V); type Iter = ParIter<'a, K, V>; @@ -375,7 +383,9 @@ } } -impl<'a, K: Sync, V: Send, S, A: Allocator> IntoParallelIterator for &'a mut HashMap { +impl<'a, K: Sync, V: Send, S, A: Allocator + Clone> IntoParallelIterator + for &'a mut HashMap +{ type Item = (&'a K, &'a mut V); type Iter = ParIterMut<'a, K, V>; @@ -414,7 +424,7 @@ K: Eq + Hash + Send, V: Send, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { fn par_extend(&mut self, par_iter: I) where @@ -430,7 +440,7 @@ K: Copy + Eq + Hash + Sync, V: Copy + Sync, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { fn par_extend(&mut self, par_iter: I) where @@ -446,7 +456,7 @@ K: Eq + Hash, S: BuildHasher, I: IntoParallelIterator, - A: Allocator, + A: Allocator + Clone, HashMap: Extend, { let (list, len) = super::helpers::collect(par_iter); @@ -551,7 +561,10 @@ assert_eq!(value.load(Ordering::Relaxed), 100); // retain only half - let _v: Vec<_> = hm.into_par_iter().filter(|(key, _)| key.k < 50).collect(); + let _v: Vec<_> = hm + .into_par_iter() + .filter(|&(ref key, _)| key.k < 50) + .collect(); assert_eq!(key.load(Ordering::Relaxed), 50); assert_eq!(value.load(Ordering::Relaxed), 50); @@ -598,7 +611,7 @@ assert_eq!(value.load(Ordering::Relaxed), 100); // retain only half - let _v: Vec<_> = hm.drain().filter(|(key, _)| key.k < 50).collect(); + let _v: Vec<_> = hm.drain().filter(|&(ref key, _)| key.k < 50).collect(); assert!(hm.is_empty()); assert_eq!(key.load(Ordering::Relaxed), 50); diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rayon/mod.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rayon/mod.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rayon/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rayon/mod.rs 2024-05-28 11:57:36.000000000 +0200 @@ -2,4 +2,3 @@ pub(crate) mod map; pub(crate) mod raw; pub(crate) mod set; -pub(crate) mod table; diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rayon/raw.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rayon/raw.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rayon/raw.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rayon/raw.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,6 +1,7 @@ use crate::raw::Bucket; use crate::raw::{Allocator, Global, RawIter, RawIterRange, RawTable}; use crate::scopeguard::guard; +use alloc::alloc::dealloc; use core::marker::PhantomData; use core::mem; use core::ptr::NonNull; @@ -75,18 +76,18 @@ } /// Parallel iterator which consumes a table and returns elements. -pub struct RawIntoParIter { +pub struct RawIntoParIter { table: RawTable, } -impl RawIntoParIter { +impl RawIntoParIter { #[cfg_attr(feature = "inline-more", inline)] pub(super) unsafe fn par_iter(&self) -> RawParIter { self.table.par_iter() } } -impl ParallelIterator for RawIntoParIter { +impl ParallelIterator for RawIntoParIter { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -96,9 +97,9 @@ { let iter = unsafe { self.table.iter().iter }; let _guard = guard(self.table.into_allocation(), |alloc| { - if let Some((ptr, layout, ref alloc)) = *alloc { + if let Some((ptr, layout)) = *alloc { unsafe { - alloc.deallocate(ptr, layout); + dealloc(ptr.as_ptr(), layout); } } }); @@ -108,23 +109,23 @@ } /// Parallel iterator which consumes elements without freeing the table storage. -pub struct RawParDrain<'a, T, A: Allocator = Global> { +pub struct RawParDrain<'a, T, A: Allocator + Clone = Global> { // We don't use a &'a mut RawTable because we want RawParDrain to be // covariant over T. table: NonNull>, marker: PhantomData<&'a RawTable>, } -unsafe impl Send for RawParDrain<'_, T, A> {} +unsafe impl Send for RawParDrain<'_, T, A> {} -impl RawParDrain<'_, T, A> { +impl RawParDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] pub(super) unsafe fn par_iter(&self) -> RawParIter { self.table.as_ref().par_iter() } } -impl ParallelIterator for RawParDrain<'_, T, A> { +impl ParallelIterator for RawParDrain<'_, T, A> { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -142,7 +143,7 @@ } } -impl Drop for RawParDrain<'_, T, A> { +impl Drop for RawParDrain<'_, T, A> { fn drop(&mut self) { // If drive_unindexed is not called then simply clear the table. unsafe { @@ -203,7 +204,7 @@ } } -impl RawTable { +impl RawTable { /// Returns a parallel iterator over the elements in a `RawTable`. #[cfg_attr(feature = "inline-more", inline)] pub unsafe fn par_iter(&self) -> RawParIter { diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rayon/set.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rayon/set.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rayon/set.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rayon/set.rs 2024-05-28 11:57:36.000000000 +0200 @@ -16,11 +16,11 @@ /// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter /// [`HashSet`]: /hashbrown/struct.HashSet.html /// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html -pub struct IntoParIter { +pub struct IntoParIter { inner: map::IntoParIter, } -impl ParallelIterator for IntoParIter { +impl ParallelIterator for IntoParIter { type Item = T; fn drive_unindexed(self, consumer: C) -> C::Result @@ -38,11 +38,11 @@ /// /// [`par_drain`]: /hashbrown/struct.HashSet.html#method.par_drain /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParDrain<'a, T, A: Allocator = Global> { +pub struct ParDrain<'a, T, A: Allocator + Clone = Global> { inner: map::ParDrain<'a, T, (), A>, } -impl ParallelIterator for ParDrain<'_, T, A> { +impl ParallelIterator for ParDrain<'_, T, A> { type Item = T; fn drive_unindexed(self, consumer: C) -> C::Result @@ -85,7 +85,7 @@ /// /// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParDifference<'a, T, S, A: Allocator = Global> { +pub struct ParDifference<'a, T, S, A: Allocator + Clone = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -94,7 +94,7 @@ where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Sync, + A: Allocator + Clone + Sync, { type Item = &'a T; @@ -118,7 +118,7 @@ /// /// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParSymmetricDifference<'a, T, S, A: Allocator = Global> { +pub struct ParSymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -127,7 +127,7 @@ where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Sync, + A: Allocator + Clone + Sync, { type Item = &'a T; @@ -150,7 +150,7 @@ /// /// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParIntersection<'a, T, S, A: Allocator = Global> { +pub struct ParIntersection<'a, T, S, A: Allocator + Clone = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -159,7 +159,7 @@ where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Sync, + A: Allocator + Clone + Sync, { type Item = &'a T; @@ -181,7 +181,7 @@ /// /// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParUnion<'a, T, S, A: Allocator = Global> { +pub struct ParUnion<'a, T, S, A: Allocator + Clone = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -190,7 +190,7 @@ where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Sync, + A: Allocator + Clone + Sync, { type Item = &'a T; @@ -216,7 +216,7 @@ where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Sync, + A: Allocator + Clone + Sync, { /// Visits (potentially in parallel) the values representing the union, /// i.e. all the values in `self` or `other`, without duplicates. @@ -289,7 +289,7 @@ impl HashSet where T: Eq + Hash + Send, - A: Allocator + Send, + A: Allocator + Clone + Send, { /// Consumes (potentially in parallel) all values in an arbitrary order, /// while preserving the set's allocated memory for reuse. @@ -301,7 +301,7 @@ } } -impl IntoParallelIterator for HashSet { +impl IntoParallelIterator for HashSet { type Item = T; type Iter = IntoParIter; @@ -313,7 +313,7 @@ } } -impl<'a, T: Sync, S, A: Allocator> IntoParallelIterator for &'a HashSet { +impl<'a, T: Sync, S, A: Allocator + Clone> IntoParallelIterator for &'a HashSet { type Item = &'a T; type Iter = ParIter<'a, T>; @@ -374,7 +374,7 @@ where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, I: IntoParallelIterator, HashSet: Extend, { diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rayon/table.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rayon/table.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rayon/table.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rayon/table.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,252 +0,0 @@ -//! Rayon extensions for `HashTable`. - -use super::raw::{RawIntoParIter, RawParDrain, RawParIter}; -use crate::hash_table::HashTable; -use crate::raw::{Allocator, Global}; -use core::fmt; -use core::marker::PhantomData; -use rayon::iter::plumbing::UnindexedConsumer; -use rayon::iter::{IntoParallelIterator, ParallelIterator}; - -/// Parallel iterator over shared references to entries in a map. -/// -/// This iterator is created by the [`par_iter`] method on [`HashTable`] -/// (provided by the [`IntoParallelRefIterator`] trait). -/// See its documentation for more. -/// -/// [`par_iter`]: /hashbrown/struct.HashTable.html#method.par_iter -/// [`HashTable`]: /hashbrown/struct.HashTable.html -/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html -pub struct ParIter<'a, T> { - inner: RawParIter, - marker: PhantomData<&'a T>, -} - -impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { - type Item = &'a T; - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner - .map(|x| unsafe { x.as_ref() }) - .drive_unindexed(consumer) - } -} - -impl Clone for ParIter<'_, T> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - marker: PhantomData, - } - } -} - -impl fmt::Debug for ParIter<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = unsafe { self.inner.iter() }.map(|x| unsafe { x.as_ref() }); - f.debug_list().entries(iter).finish() - } -} - -/// Parallel iterator over mutable references to entries in a map. -/// -/// This iterator is created by the [`par_iter_mut`] method on [`HashTable`] -/// (provided by the [`IntoParallelRefMutIterator`] trait). -/// See its documentation for more. -/// -/// [`par_iter_mut`]: /hashbrown/struct.HashTable.html#method.par_iter_mut -/// [`HashTable`]: /hashbrown/struct.HashTable.html -/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html -pub struct ParIterMut<'a, T> { - inner: RawParIter, - marker: PhantomData<&'a mut T>, -} - -impl<'a, T: Send> ParallelIterator for ParIterMut<'a, T> { - type Item = &'a mut T; - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner - .map(|x| unsafe { x.as_mut() }) - .drive_unindexed(consumer) - } -} - -impl fmt::Debug for ParIterMut<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ParIter { - inner: self.inner.clone(), - marker: PhantomData, - } - .fmt(f) - } -} - -/// Parallel iterator over entries of a consumed map. -/// -/// This iterator is created by the [`into_par_iter`] method on [`HashTable`] -/// (provided by the [`IntoParallelIterator`] trait). -/// See its documentation for more. -/// -/// [`into_par_iter`]: /hashbrown/struct.HashTable.html#method.into_par_iter -/// [`HashTable`]: /hashbrown/struct.HashTable.html -/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html -pub struct IntoParIter { - inner: RawIntoParIter, -} - -impl ParallelIterator for IntoParIter { - type Item = T; - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner.drive_unindexed(consumer) - } -} - -impl fmt::Debug for IntoParIter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ParIter { - inner: unsafe { self.inner.par_iter() }, - marker: PhantomData, - } - .fmt(f) - } -} - -/// Parallel draining iterator over entries of a map. -/// -/// This iterator is created by the [`par_drain`] method on [`HashTable`]. -/// See its documentation for more. -/// -/// [`par_drain`]: /hashbrown/struct.HashTable.html#method.par_drain -/// [`HashTable`]: /hashbrown/struct.HashTable.html -pub struct ParDrain<'a, T, A: Allocator = Global> { - inner: RawParDrain<'a, T, A>, -} - -impl ParallelIterator for ParDrain<'_, T, A> { - type Item = T; - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner.drive_unindexed(consumer) - } -} - -impl fmt::Debug for ParDrain<'_, T, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ParIter { - inner: unsafe { self.inner.par_iter() }, - marker: PhantomData, - } - .fmt(f) - } -} - -impl HashTable { - /// Consumes (potentially in parallel) all values in an arbitrary order, - /// while preserving the map's allocated memory for reuse. - #[cfg_attr(feature = "inline-more", inline)] - pub fn par_drain(&mut self) -> ParDrain<'_, T, A> { - ParDrain { - inner: self.raw.par_drain(), - } - } -} - -impl IntoParallelIterator for HashTable { - type Item = T; - type Iter = IntoParIter; - - #[cfg_attr(feature = "inline-more", inline)] - fn into_par_iter(self) -> Self::Iter { - IntoParIter { - inner: self.raw.into_par_iter(), - } - } -} - -impl<'a, T: Sync, A: Allocator> IntoParallelIterator for &'a HashTable { - type Item = &'a T; - type Iter = ParIter<'a, T>; - - #[cfg_attr(feature = "inline-more", inline)] - fn into_par_iter(self) -> Self::Iter { - ParIter { - inner: unsafe { self.raw.par_iter() }, - marker: PhantomData, - } - } -} - -impl<'a, T: Send, A: Allocator> IntoParallelIterator for &'a mut HashTable { - type Item = &'a mut T; - type Iter = ParIterMut<'a, T>; - - #[cfg_attr(feature = "inline-more", inline)] - fn into_par_iter(self) -> Self::Iter { - ParIterMut { - inner: unsafe { self.raw.par_iter() }, - marker: PhantomData, - } - } -} - -#[cfg(test)] -mod test_par_table { - use alloc::vec::Vec; - use core::sync::atomic::{AtomicUsize, Ordering}; - - use rayon::prelude::*; - - use crate::{ - hash_map::{make_hash, DefaultHashBuilder}, - hash_table::HashTable, - }; - - #[test] - fn test_iterate() { - let hasher = DefaultHashBuilder::default(); - let mut a = HashTable::new(); - for i in 0..32 { - a.insert_unique(make_hash(&hasher, &i), i, |x| make_hash(&hasher, x)); - } - let observed = AtomicUsize::new(0); - a.par_iter().for_each(|k| { - observed.fetch_or(1 << *k, Ordering::Relaxed); - }); - assert_eq!(observed.into_inner(), 0xFFFF_FFFF); - } - - #[test] - fn test_move_iter() { - let hasher = DefaultHashBuilder::default(); - let hs = { - let mut hs = HashTable::new(); - - hs.insert_unique(make_hash(&hasher, &'a'), 'a', |x| make_hash(&hasher, x)); - hs.insert_unique(make_hash(&hasher, &'b'), 'b', |x| make_hash(&hasher, x)); - - hs - }; - - let v = hs.into_par_iter().collect::>(); - assert!(v == ['a', 'b'] || v == ['b', 'a']); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,125 +0,0 @@ -use crate::HashMap; -use core::{ - borrow::Borrow, - hash::{BuildHasher, Hash}, -}; -use rkyv::{ - collections::hash_map::{ArchivedHashMap, HashMapResolver}, - ser::{ScratchSpace, Serializer}, - Archive, Deserialize, Fallible, Serialize, -}; - -impl Archive for HashMap -where - K::Archived: Hash + Eq, -{ - type Archived = ArchivedHashMap; - type Resolver = HashMapResolver; - - #[inline] - unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) { - ArchivedHashMap::resolve_from_len(self.len(), pos, resolver, out); - } -} - -impl Serialize for HashMap -where - K: Serialize + Hash + Eq, - K::Archived: Hash + Eq, - V: Serialize, - S: Serializer + ScratchSpace + ?Sized, -{ - #[inline] - fn serialize(&self, serializer: &mut S) -> Result { - unsafe { ArchivedHashMap::serialize_from_iter(self.iter(), serializer) } - } -} - -impl - Deserialize, D> for ArchivedHashMap -where - K::Archived: Deserialize + Hash + Eq, - V::Archived: Deserialize, -{ - #[inline] - fn deserialize(&self, deserializer: &mut D) -> Result, D::Error> { - let mut result = HashMap::with_capacity_and_hasher(self.len(), S::default()); - for (k, v) in self.iter() { - result.insert(k.deserialize(deserializer)?, v.deserialize(deserializer)?); - } - Ok(result) - } -} - -impl, V, AK: Hash + Eq, AV: PartialEq, S: BuildHasher> - PartialEq> for ArchivedHashMap -{ - #[inline] - fn eq(&self, other: &HashMap) -> bool { - if self.len() != other.len() { - false - } else { - self.iter() - .all(|(key, value)| other.get(key).map_or(false, |v| value.eq(v))) - } - } -} - -impl, V, AK: Hash + Eq, AV: PartialEq> - PartialEq> for HashMap -{ - #[inline] - fn eq(&self, other: &ArchivedHashMap) -> bool { - other.eq(self) - } -} - -#[cfg(test)] -mod tests { - use crate::HashMap; - use alloc::string::String; - use rkyv::{ - archived_root, check_archived_root, - ser::{serializers::AllocSerializer, Serializer}, - Deserialize, Infallible, - }; - - #[test] - fn index_map() { - let mut value = HashMap::new(); - value.insert(String::from("foo"), 10); - value.insert(String::from("bar"), 20); - value.insert(String::from("baz"), 40); - value.insert(String::from("bat"), 80); - - let mut serializer = AllocSerializer::<4096>::default(); - serializer.serialize_value(&value).unwrap(); - let result = serializer.into_serializer().into_inner(); - let archived = unsafe { archived_root::>(result.as_ref()) }; - - assert_eq!(value.len(), archived.len()); - for (k, v) in value.iter() { - let (ak, av) = archived.get_key_value(k.as_str()).unwrap(); - assert_eq!(k, ak); - assert_eq!(v, av); - } - - let deserialized: HashMap = archived.deserialize(&mut Infallible).unwrap(); - assert_eq!(value, deserialized); - } - - #[test] - fn validate_index_map() { - let mut value = HashMap::new(); - value.insert(String::from("foo"), 10); - value.insert(String::from("bar"), 20); - value.insert(String::from("baz"), 40); - value.insert(String::from("bat"), 80); - - let mut serializer = AllocSerializer::<4096>::default(); - serializer.serialize_value(&value).unwrap(); - let result = serializer.into_serializer().into_inner(); - check_archived_root::>(result.as_ref()) - .expect("failed to validate archived index map"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,123 +0,0 @@ -use crate::HashSet; -use core::{ - borrow::Borrow, - hash::{BuildHasher, Hash}, -}; -use rkyv::{ - collections::hash_set::{ArchivedHashSet, HashSetResolver}, - ser::{ScratchSpace, Serializer}, - Archive, Deserialize, Fallible, Serialize, -}; - -impl Archive for HashSet -where - K::Archived: Hash + Eq, -{ - type Archived = ArchivedHashSet; - type Resolver = HashSetResolver; - - #[inline] - unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) { - ArchivedHashSet::::resolve_from_len(self.len(), pos, resolver, out); - } -} - -impl Serialize for HashSet -where - K::Archived: Hash + Eq, - K: Serialize + Hash + Eq, - S: ScratchSpace + Serializer + ?Sized, -{ - #[inline] - fn serialize(&self, serializer: &mut S) -> Result { - unsafe { ArchivedHashSet::serialize_from_iter(self.iter(), serializer) } - } -} - -impl Deserialize, D> for ArchivedHashSet -where - K: Archive + Hash + Eq, - K::Archived: Deserialize + Hash + Eq, - D: Fallible + ?Sized, - S: Default + BuildHasher, -{ - #[inline] - fn deserialize(&self, deserializer: &mut D) -> Result, D::Error> { - let mut result = HashSet::with_hasher(S::default()); - for k in self.iter() { - result.insert(k.deserialize(deserializer)?); - } - Ok(result) - } -} - -impl, AK: Hash + Eq, S: BuildHasher> PartialEq> - for ArchivedHashSet -{ - #[inline] - fn eq(&self, other: &HashSet) -> bool { - if self.len() != other.len() { - false - } else { - self.iter().all(|key| other.get(key).is_some()) - } - } -} - -impl, AK: Hash + Eq, S: BuildHasher> PartialEq> - for HashSet -{ - #[inline] - fn eq(&self, other: &ArchivedHashSet) -> bool { - other.eq(self) - } -} - -#[cfg(test)] -mod tests { - use crate::HashSet; - use alloc::string::String; - use rkyv::{ - archived_root, check_archived_root, - ser::{serializers::AllocSerializer, Serializer}, - Deserialize, Infallible, - }; - - #[test] - fn index_set() { - let mut value = HashSet::new(); - value.insert(String::from("foo")); - value.insert(String::from("bar")); - value.insert(String::from("baz")); - value.insert(String::from("bat")); - - let mut serializer = AllocSerializer::<4096>::default(); - serializer.serialize_value(&value).unwrap(); - let result = serializer.into_serializer().into_inner(); - let archived = unsafe { archived_root::>(result.as_ref()) }; - - assert_eq!(value.len(), archived.len()); - for k in value.iter() { - let ak = archived.get(k.as_str()).unwrap(); - assert_eq!(k, ak); - } - - let deserialized: HashSet = archived.deserialize(&mut Infallible).unwrap(); - assert_eq!(value, deserialized); - } - - #[test] - fn validate_index_set() { - let mut value = HashSet::new(); - value.insert(String::from("foo")); - value.insert(String::from("bar")); - value.insert(String::from("baz")); - value.insert(String::from("bat")); - - let mut serializer = AllocSerializer::<4096>::default(); - serializer.serialize_value(&value).unwrap(); - let result = serializer.into_serializer().into_inner(); - check_archived_root::>(result.as_ref()) - .expect("failed to validate archived index set"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2 +0,0 @@ -mod hash_map; -mod hash_set; diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/serde.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/serde.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/external_trait_impls/serde.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/external_trait_impls/serde.rs 2024-05-28 11:57:36.000000000 +0200 @@ -11,7 +11,6 @@ } mod map { - use crate::raw::Allocator; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::marker::PhantomData; @@ -22,12 +21,11 @@ use super::size_hint; - impl Serialize for HashMap + impl Serialize for HashMap where K: Serialize + Eq + Hash, V: Serialize, H: BuildHasher, - A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn serialize(&self, serializer: S) -> Result @@ -38,46 +36,40 @@ } } - impl<'de, K, V, S, A> Deserialize<'de> for HashMap + impl<'de, K, V, S> Deserialize<'de> for HashMap where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: BuildHasher + Default, - A: Allocator + Default, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - struct MapVisitor - where - A: Allocator, - { - marker: PhantomData>, + struct MapVisitor { + marker: PhantomData>, } - impl<'de, K, V, S, A> Visitor<'de> for MapVisitor + impl<'de, K, V, S> Visitor<'de> for MapVisitor where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: BuildHasher + Default, - A: Allocator + Default, { - type Value = HashMap; + type Value = HashMap; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a map") } #[cfg_attr(feature = "inline-more", inline)] - fn visit_map(self, mut map: M) -> Result + fn visit_map(self, mut map: A) -> Result where - M: MapAccess<'de>, + A: MapAccess<'de>, { - let mut values = HashMap::with_capacity_and_hasher_in( + let mut values = HashMap::with_capacity_and_hasher( size_hint::cautious(map.size_hint()), S::default(), - A::default(), ); while let Some((key, value)) = map.next_entry()? { @@ -97,7 +89,6 @@ } mod set { - use crate::raw::Allocator; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::marker::PhantomData; @@ -108,11 +99,10 @@ use super::size_hint; - impl Serialize for HashSet + impl Serialize for HashSet where T: Serialize + Eq + Hash, H: BuildHasher, - A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn serialize(&self, serializer: S) -> Result @@ -123,44 +113,38 @@ } } - impl<'de, T, S, A> Deserialize<'de> for HashSet + impl<'de, T, S> Deserialize<'de> for HashSet where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, - A: Allocator + Default, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - struct SeqVisitor - where - A: Allocator, - { - marker: PhantomData>, + struct SeqVisitor { + marker: PhantomData>, } - impl<'de, T, S, A> Visitor<'de> for SeqVisitor + impl<'de, T, S> Visitor<'de> for SeqVisitor where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, - A: Allocator + Default, { - type Value = HashSet; + type Value = HashSet; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") } #[cfg_attr(feature = "inline-more", inline)] - fn visit_seq(self, mut seq: M) -> Result + fn visit_seq(self, mut seq: A) -> Result where - M: SeqAccess<'de>, + A: SeqAccess<'de>, { - let mut values = HashSet::with_capacity_and_hasher_in( + let mut values = HashSet::with_capacity_and_hasher( size_hint::cautious(seq.size_hint()), S::default(), - A::default(), ); while let Some(value) = seq.next_element()? { @@ -182,15 +166,12 @@ where D: Deserializer<'de>, { - struct SeqInPlaceVisitor<'a, T, S, A>(&'a mut HashSet) - where - A: Allocator; + struct SeqInPlaceVisitor<'a, T, S>(&'a mut HashSet); - impl<'a, 'de, T, S, A> Visitor<'de> for SeqInPlaceVisitor<'a, T, S, A> + impl<'a, 'de, T, S> Visitor<'de> for SeqInPlaceVisitor<'a, T, S> where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, - A: Allocator, { type Value = (); @@ -199,9 +180,9 @@ } #[cfg_attr(feature = "inline-more", inline)] - fn visit_seq(self, mut seq: M) -> Result + fn visit_seq(self, mut seq: A) -> Result where - M: SeqAccess<'de>, + A: SeqAccess<'de>, { self.0.clear(); self.0.reserve(size_hint::cautious(seq.size_hint())); diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/lib.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/lib.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/lib.rs 2024-05-28 11:57:36.000000000 +0200 @@ -20,8 +20,9 @@ extend_one, allocator_api, slice_ptr_get, + nonnull_slice_from_raw_parts, maybe_uninit_array_assume_init, - strict_provenance + build_hasher_simple_hash_one ) )] #![allow( @@ -36,7 +37,6 @@ )] #![warn(missing_docs)] #![warn(rust_2018_idioms)] -#![cfg_attr(feature = "nightly", warn(fuzzy_provenance_casts))] #[cfg(test)] #[macro_use] @@ -81,7 +81,6 @@ mod rustc_entry; mod scopeguard; mod set; -mod table; pub mod hash_map { //! A hash map implemented with quadratic probing and SIMD lookup. @@ -114,63 +113,9 @@ pub use crate::external_trait_impls::rayon::set::*; } } -pub mod hash_table { - //! A hash table implemented with quadratic probing and SIMD lookup. - pub use crate::table::*; - - #[cfg(feature = "rayon")] - /// [rayon]-based parallel iterator types for hash tables. - /// You will rarely need to interact with it directly unless you have need - /// to name one of the iterator types. - /// - /// [rayon]: https://docs.rs/rayon/1.0/rayon - pub mod rayon { - pub use crate::external_trait_impls::rayon::table::*; - } -} pub use crate::map::HashMap; pub use crate::set::HashSet; -pub use crate::table::HashTable; - -#[cfg(feature = "equivalent")] -pub use equivalent::Equivalent; - -// This is only used as a fallback when building as part of `std`. -#[cfg(not(feature = "equivalent"))] -/// Key equivalence trait. -/// -/// This trait defines the function used to compare the input value with the -/// map keys (or set values) during a lookup operation such as [`HashMap::get`] -/// or [`HashSet::contains`]. -/// It is provided with a blanket implementation based on the -/// [`Borrow`](core::borrow::Borrow) trait. -/// -/// # Correctness -/// -/// Equivalent values must hash to the same value. -pub trait Equivalent { - /// Checks if this value is equivalent to the given key. - /// - /// Returns `true` if both values are equivalent, and `false` otherwise. - /// - /// # Correctness - /// - /// When this function returns `true`, both `self` and `key` must hash to - /// the same value. - fn equivalent(&self, key: &K) -> bool; -} - -#[cfg(not(feature = "equivalent"))] -impl Equivalent for Q -where - Q: Eq, - K: core::borrow::Borrow, -{ - fn equivalent(&self, key: &K) -> bool { - self == key.borrow() - } -} /// The error type for `try_reserve` methods. #[derive(Clone, PartialEq, Eq, Debug)] @@ -185,3 +130,21 @@ layout: alloc::alloc::Layout, }, } + +/// Wrapper around `Bump` which allows it to be used as an allocator for +/// `HashMap`, `HashSet` and `RawTable`. +/// +/// `Bump` can be used directly without this wrapper on nightly if you enable +/// the `allocator-api` feature of the `bumpalo` crate. +#[cfg(feature = "bumpalo")] +#[derive(Clone, Copy, Debug)] +pub struct BumpWrapper<'a>(pub &'a bumpalo::Bump); + +#[cfg(feature = "bumpalo")] +#[test] +fn test_bumpalo() { + use bumpalo::Bump; + let bump = Bump::new(); + let mut map = HashMap::new_in(BumpWrapper(&bump)); + map.insert(0, 1); +} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/macros.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/macros.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/macros.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/macros.rs 2024-05-28 11:57:36.000000000 +0200 @@ -37,7 +37,7 @@ // semicolon is all the remaining items (@__items ($($not:meta,)*) ; ) => {}; (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { - // Emit all items within one block, applying an appropriate #[cfg]. The + // Emit all items within one block, applying an approprate #[cfg]. The // #[cfg] will require all `$m` matchers specified and must also negate // all previous matchers. cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/map.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/map.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/map.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,7 +1,5 @@ -use crate::raw::{ - Allocator, Bucket, Global, RawDrain, RawExtractIf, RawIntoIter, RawIter, RawTable, -}; -use crate::{Equivalent, TryReserveError}; +use crate::raw::{Allocator, Bucket, Global, RawDrain, RawIntoIter, RawIter, RawTable}; +use crate::TryReserveError; use core::borrow::Borrow; use core::fmt::{self, Debug}; use core::hash::{BuildHasher, Hash}; @@ -12,7 +10,7 @@ /// Default hasher for `HashMap`. #[cfg(feature = "ahash")] -pub type DefaultHashBuilder = core::hash::BuildHasherDefault; +pub type DefaultHashBuilder = ahash::RandomState; /// Dummy default hasher for `HashMap`. #[cfg(not(feature = "ahash"))] @@ -187,7 +185,7 @@ /// .iter().cloned().collect(); /// // use the values stored in map /// ``` -pub struct HashMap { +pub struct HashMap { pub(crate) hash_builder: S, pub(crate) table: RawTable<(K, V), A>, } @@ -211,12 +209,13 @@ /// Ensures that a single closure type across uses of this which, in turn prevents multiple /// instances of any functions like RawTable::reserve from being generated #[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_hasher(hash_builder: &S) -> impl Fn(&(Q, V)) -> u64 + '_ +pub(crate) fn make_hasher(hash_builder: &S) -> impl Fn(&(Q, V)) -> u64 + '_ where + K: Borrow, Q: Hash, S: BuildHasher, { - move |val| make_hash::(hash_builder, &val.0) + move |val| make_hash::(hash_builder, &val.0) } /// Ensures that a single closure type across uses of this which, in turn prevents multiple @@ -224,9 +223,10 @@ #[cfg_attr(feature = "inline-more", inline)] fn equivalent_key(k: &Q) -> impl Fn(&(K, V)) -> bool + '_ where - Q: ?Sized + Equivalent, + K: Borrow, + Q: ?Sized + Eq, { - move |x| k.equivalent(&x.0) + move |x| k.eq(x.0.borrow()) } /// Ensures that a single closure type across uses of this which, in turn prevents multiple @@ -234,15 +234,17 @@ #[cfg_attr(feature = "inline-more", inline)] fn equivalent(k: &Q) -> impl Fn(&K) -> bool + '_ where - Q: ?Sized + Equivalent, + K: Borrow, + Q: ?Sized + Eq, { - move |x| k.equivalent(x) + move |x| k.eq(x.borrow()) } #[cfg(not(feature = "nightly"))] #[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 +pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 where + K: Borrow, Q: Hash + ?Sized, S: BuildHasher, { @@ -254,14 +256,38 @@ #[cfg(feature = "nightly")] #[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 +pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 where + K: Borrow, Q: Hash + ?Sized, S: BuildHasher, { hash_builder.hash_one(val) } +#[cfg(not(feature = "nightly"))] +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn make_insert_hash(hash_builder: &S, val: &K) -> u64 +where + K: Hash, + S: BuildHasher, +{ + use core::hash::Hasher; + let mut state = hash_builder.build_hasher(); + val.hash(&mut state); + state.finish() +} + +#[cfg(feature = "nightly")] +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn make_insert_hash(hash_builder: &S, val: &K) -> u64 +where + K: Hash, + S: BuildHasher, +{ + hash_builder.hash_one(val) +} + #[cfg(feature = "ahash")] impl HashMap { /// Creates an empty `HashMap`. @@ -269,18 +295,6 @@ /// The hash map is initially created with a capacity of 0, so it will not allocate until it /// is first inserted into. /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashMap`], for example with - /// [`with_hasher`](HashMap::with_hasher) method. - /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// /// # Examples /// /// ``` @@ -299,18 +313,6 @@ /// The hash map will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash map will not allocate. /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashMap`], for example with - /// [`with_capacity_and_hasher`](HashMap::with_capacity_and_hasher) method. - /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// /// # Examples /// /// ``` @@ -326,46 +328,11 @@ } #[cfg(feature = "ahash")] -impl HashMap { +impl HashMap { /// Creates an empty `HashMap` using the given allocator. /// /// The hash map is initially created with a capacity of 0, so it will not allocate until it /// is first inserted into. - /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashMap`], for example with - /// [`with_hasher_in`](HashMap::with_hasher_in) method. - /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use bumpalo::Bump; - /// - /// let bump = Bump::new(); - /// let mut map = HashMap::new_in(&bump); - /// - /// // The created HashMap holds none elements - /// assert_eq!(map.len(), 0); - /// - /// // The created HashMap also doesn't allocate memory - /// assert_eq!(map.capacity(), 0); - /// - /// // Now we insert element inside created HashMap - /// map.insert("One", 1); - /// // We can see that the HashMap holds 1 element - /// assert_eq!(map.len(), 1); - /// // And it also allocates some capacity - /// assert!(map.capacity() > 1); - /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn new_in(alloc: A) -> Self { Self::with_hasher_in(DefaultHashBuilder::default(), alloc) @@ -375,46 +342,6 @@ /// /// The hash map will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash map will not allocate. - /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashMap`], for example with - /// [`with_capacity_and_hasher_in`](HashMap::with_capacity_and_hasher_in) method. - /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use bumpalo::Bump; - /// - /// let bump = Bump::new(); - /// let mut map = HashMap::with_capacity_in(5, &bump); - /// - /// // The created HashMap holds none elements - /// assert_eq!(map.len(), 0); - /// // But it can hold at least 5 elements without reallocating - /// let empty_map_capacity = map.capacity(); - /// assert!(empty_map_capacity >= 5); - /// - /// // Now we insert some 5 elements inside created HashMap - /// map.insert("One", 1); - /// map.insert("Two", 2); - /// map.insert("Three", 3); - /// map.insert("Four", 4); - /// map.insert("Five", 5); - /// - /// // We can see that the HashMap holds 5 elements - /// assert_eq!(map.len(), 5); - /// // But its capacity isn't changed - /// assert_eq!(map.capacity(), empty_map_capacity) - /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { Self::with_capacity_and_hasher_in(capacity, DefaultHashBuilder::default(), alloc) @@ -428,21 +355,14 @@ /// The hash map is initially created with a capacity of 0, so it will not /// allocate until it is first inserted into. /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashMap`]. + /// Warning: `hash_builder` is normally randomly generated, and + /// is designed to allow HashMaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. /// /// The `hash_builder` passed should implement the [`BuildHasher`] trait for /// the HashMap to be useful, see its documentation for details. /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html - /// /// # Examples /// /// ``` @@ -456,6 +376,8 @@ /// /// map.insert(1, 2); /// ``` + /// + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html #[cfg_attr(feature = "inline-more", inline)] pub const fn with_hasher(hash_builder: S) -> Self { Self { @@ -470,21 +392,14 @@ /// The hash map will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash map will not allocate. /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashMap`]. + /// Warning: `hash_builder` is normally randomly generated, and + /// is designed to allow HashMaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. /// /// The `hash_builder` passed should implement the [`BuildHasher`] trait for /// the HashMap to be useful, see its documentation for details. /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html - /// /// # Examples /// /// ``` @@ -498,6 +413,8 @@ /// /// map.insert(1, 2); /// ``` + /// + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html #[cfg_attr(feature = "inline-more", inline)] pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { Self { @@ -507,7 +424,7 @@ } } -impl HashMap { +impl HashMap { /// Returns a reference to the underlying allocator. #[inline] pub fn allocator(&self) -> &A { @@ -517,19 +434,12 @@ /// Creates an empty `HashMap` which will use the given hash builder to hash /// keys. It will be allocated with the given allocator. /// - /// The hash map is initially created with a capacity of 0, so it will not allocate until it - /// is first inserted into. - /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashMap`]. + /// The created map has the default initial capacity. /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// Warning: `hash_builder` is normally randomly generated, and + /// is designed to allow HashMaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. /// /// # Examples /// @@ -542,7 +452,7 @@ /// map.insert(1, 2); /// ``` #[cfg_attr(feature = "inline-more", inline)] - pub const fn with_hasher_in(hash_builder: S, alloc: A) -> Self { + pub fn with_hasher_in(hash_builder: S, alloc: A) -> Self { Self { hash_builder, table: RawTable::new_in(alloc), @@ -555,16 +465,10 @@ /// The hash map will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash map will not allocate. /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashMap`]. - /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// Warning: `hash_builder` is normally randomly generated, and + /// is designed to allow HashMaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. /// /// # Examples /// @@ -906,11 +810,14 @@ /// /// let mut map: HashMap = (0..8).map(|x|(x, x*10)).collect(); /// assert_eq!(map.len(), 8); + /// let capacity_before_retain = map.capacity(); /// /// map.retain(|&k, _| k % 2 == 0); /// /// // We can see, that the number of elements inside map is changed. /// assert_eq!(map.len(), 4); + /// // But map capacity is equal to old one. + /// assert_eq!(map.capacity(), capacity_before_retain); /// /// let mut vec: Vec<(i32, i32)> = map.iter().map(|(&k, &v)| (k, v)).collect(); /// vec.sort_unstable(); @@ -937,16 +844,17 @@ /// In other words, move all pairs `(k, v)` such that `f(&k, &mut v)` returns `true` out /// into another iterator. /// - /// Note that `extract_if` lets you mutate every value in the filter closure, regardless of + /// Note that `drain_filter` lets you mutate every value in the filter closure, regardless of /// whether you choose to keep or remove it. /// - /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating - /// or the iteration short-circuits, then the remaining elements will be retained. - /// Use [`retain()`] with a negated predicate if you do not need the returned iterator. + /// When the returned DrainedFilter is dropped, any remaining elements that satisfy + /// the predicate are dropped from the table. /// - /// Keeps the allocated memory for reuse. + /// It is unspecified how many more elements will be subjected to the closure + /// if a panic occurs in the closure, or a panic occurs while dropping an element, + /// or if the `DrainFilter` value is leaked. /// - /// [`retain()`]: HashMap::retain + /// Keeps the allocated memory for reuse. /// /// # Examples /// @@ -954,8 +862,8 @@ /// use hashbrown::HashMap; /// /// let mut map: HashMap = (0..8).map(|x| (x, x)).collect(); - /// - /// let drained: HashMap = map.extract_if(|k, _v| k % 2 == 0).collect(); + /// let capacity_before_drain_filter = map.capacity(); + /// let drained: HashMap = map.drain_filter(|k, _v| k % 2 == 0).collect(); /// /// let mut evens = drained.keys().cloned().collect::>(); /// let mut odds = map.keys().cloned().collect::>(); @@ -964,24 +872,27 @@ /// /// assert_eq!(evens, vec![0, 2, 4, 6]); /// assert_eq!(odds, vec![1, 3, 5, 7]); + /// // Map capacity is equal to old one. + /// assert_eq!(map.capacity(), capacity_before_drain_filter); /// /// let mut map: HashMap = (0..8).map(|x| (x, x)).collect(); /// /// { // Iterator is dropped without being consumed. - /// let d = map.extract_if(|k, _v| k % 2 != 0); + /// let d = map.drain_filter(|k, _v| k % 2 != 0); /// } /// - /// // ExtractIf was not exhausted, therefore no elements were drained. - /// assert_eq!(map.len(), 8); + /// // But the map lens have been reduced by half + /// // even if we do not use DrainFilter iterator. + /// assert_eq!(map.len(), 4); /// ``` #[cfg_attr(feature = "inline-more", inline)] - pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, K, V, F, A> + pub fn drain_filter(&mut self, f: F) -> DrainFilter<'_, K, V, F, A> where F: FnMut(&K, &mut V) -> bool, { - ExtractIf { + DrainFilter { f, - inner: RawExtractIf { + inner: DrainFilterInner { iter: unsafe { self.table.iter() }, table: &mut self.table, }, @@ -1073,7 +984,7 @@ where K: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { /// Reserves capacity for at least `additional` more elements to be inserted /// in the `HashMap`. The collection may reserve more space to avoid @@ -1081,12 +992,9 @@ /// /// # Panics /// - /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program - /// in case of allocation error. Use [`try_reserve`](HashMap::try_reserve) instead - /// if you want to handle memory allocation failure. + /// Panics if the new allocation size overflows [`usize`]. /// - /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html - /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html + /// [`usize`]: https://doc.rust-lang.org/std/primitive.usize.html /// /// # Examples /// @@ -1104,7 +1012,7 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn reserve(&mut self, additional: usize) { self.table - .reserve(additional, make_hasher::<_, V, S>(&self.hash_builder)); + .reserve(additional, make_hasher::(&self.hash_builder)); } /// Tries to reserve capacity for at least `additional` more elements to be inserted @@ -1154,7 +1062,7 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { self.table - .try_reserve(additional, make_hasher::<_, V, S>(&self.hash_builder)) + .try_reserve(additional, make_hasher::(&self.hash_builder)) } /// Shrinks the capacity of the map as much as possible. It will drop @@ -1176,7 +1084,7 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn shrink_to_fit(&mut self) { self.table - .shrink_to(0, make_hasher::<_, V, S>(&self.hash_builder)); + .shrink_to(0, make_hasher::(&self.hash_builder)); } /// Shrinks the capacity of the map with a lower limit. It will drop @@ -1205,7 +1113,7 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn shrink_to(&mut self, min_capacity: usize) { self.table - .shrink_to(min_capacity, make_hasher::<_, V, S>(&self.hash_builder)); + .shrink_to(min_capacity, make_hasher::(&self.hash_builder)); } /// Gets the given key's corresponding entry in the map for in-place manipulation. @@ -1229,7 +1137,7 @@ /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn entry(&mut self, key: K) -> Entry<'_, K, V, S, A> { - let hash = make_hash::(&self.hash_builder, &key); + let hash = make_insert_hash::(&self.hash_builder, &key); if let Some(elem) = self.table.find(hash, equivalent_key(&key)) { Entry::Occupied(OccupiedEntry { hash, @@ -1266,9 +1174,10 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn entry_ref<'a, 'b, Q: ?Sized>(&'a mut self, key: &'b Q) -> EntryRef<'a, 'b, K, Q, V, S, A> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { - let hash = make_hash::(&self.hash_builder, key); + let hash = make_hash::(&self.hash_builder, key); if let Some(elem) = self.table.find(hash, equivalent_key(key)) { EntryRef::Occupied(OccupiedEntryRef { hash, @@ -1307,11 +1216,12 @@ #[inline] pub fn get(&self, k: &Q) -> Option<&V> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { // Avoid `Option::map` because it bloats LLVM IR. match self.get_inner(k) { - Some((_, v)) => Some(v), + Some(&(_, ref v)) => Some(v), None => None, } } @@ -1338,11 +1248,12 @@ #[inline] pub fn get_key_value(&self, k: &Q) -> Option<(&K, &V)> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { // Avoid `Option::map` because it bloats LLVM IR. match self.get_inner(k) { - Some((key, value)) => Some((key, value)), + Some(&(ref key, ref value)) => Some((key, value)), None => None, } } @@ -1350,12 +1261,13 @@ #[inline] fn get_inner(&self, k: &Q) -> Option<&(K, V)> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { if self.table.is_empty() { None } else { - let hash = make_hash::(&self.hash_builder, k); + let hash = make_hash::(&self.hash_builder, k); self.table.get(hash, equivalent_key(k)) } } @@ -1386,7 +1298,8 @@ #[inline] pub fn get_key_value_mut(&mut self, k: &Q) -> Option<(&K, &mut V)> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { // Avoid `Option::map` because it bloats LLVM IR. match self.get_inner_mut(k) { @@ -1417,7 +1330,8 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn contains_key(&self, k: &Q) -> bool where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { self.get_inner(k).is_some() } @@ -1448,7 +1362,8 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { // Avoid `Option::map` because it bloats LLVM IR. match self.get_inner_mut(k) { @@ -1460,12 +1375,13 @@ #[inline] fn get_inner_mut(&mut self, k: &Q) -> Option<&mut (K, V)> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { if self.table.is_empty() { None } else { - let hash = make_hash::(&self.hash_builder, k); + let hash = make_hash::(&self.hash_builder, k); self.table.get_mut(hash, equivalent_key(k)) } } @@ -1515,7 +1431,8 @@ /// ``` pub fn get_many_mut(&mut self, ks: [&Q; N]) -> Option<[&'_ mut V; N]> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { self.get_many_mut_inner(ks).map(|res| res.map(|(_, v)| v)) } @@ -1570,7 +1487,8 @@ ks: [&Q; N], ) -> Option<[&'_ mut V; N]> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { self.get_many_unchecked_mut_inner(ks) .map(|res| res.map(|(_, v)| v)) @@ -1625,7 +1543,8 @@ ks: [&Q; N], ) -> Option<[(&'_ K, &'_ mut V); N]> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { self.get_many_mut_inner(ks) .map(|res| res.map(|(k, v)| (&*k, v))) @@ -1680,7 +1599,8 @@ ks: [&Q; N], ) -> Option<[(&'_ K, &'_ mut V); N]> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { self.get_many_unchecked_mut_inner(ks) .map(|res| res.map(|(k, v)| (&*k, v))) @@ -1691,11 +1611,12 @@ ks: [&Q; N], ) -> Option<[&'_ mut (K, V); N]> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { let hashes = self.build_hashes_inner(ks); self.table - .get_many_mut(hashes, |i, (k, _)| ks[i].equivalent(k)) + .get_many_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow())) } unsafe fn get_many_unchecked_mut_inner( @@ -1703,20 +1624,22 @@ ks: [&Q; N], ) -> Option<[&'_ mut (K, V); N]> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { let hashes = self.build_hashes_inner(ks); self.table - .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].equivalent(k)) + .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow())) } fn build_hashes_inner(&self, ks: [&Q; N]) -> [u64; N] where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { let mut hashes = [0_u64; N]; for i in 0..N { - hashes[i] = make_hash::(&self.hash_builder, ks[i]); + hashes[i] = make_hash::(&self.hash_builder, ks[i]); } hashes } @@ -1749,19 +1672,13 @@ /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn insert(&mut self, k: K, v: V) -> Option { - let hash = make_hash::(&self.hash_builder, &k); - let hasher = make_hasher::<_, V, S>(&self.hash_builder); - match self - .table - .find_or_find_insert_slot(hash, equivalent_key(&k), hasher) - { - Ok(bucket) => Some(mem::replace(unsafe { &mut bucket.as_mut().1 }, v)), - Err(slot) => { - unsafe { - self.table.insert_in_slot(hash, slot, (k, v)); - } - None - } + let hash = make_insert_hash::(&self.hash_builder, &k); + if let Some((_, item)) = self.table.get_mut(hash, equivalent_key(&k)) { + Some(mem::replace(item, v)) + } else { + self.table + .insert(hash, (k, v), make_hasher::(&self.hash_builder)); + None } } @@ -1816,10 +1733,10 @@ /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn insert_unique_unchecked(&mut self, k: K, v: V) -> (&K, &mut V) { - let hash = make_hash::(&self.hash_builder, &k); + let hash = make_insert_hash::(&self.hash_builder, &k); let bucket = self .table - .insert(hash, (k, v), make_hasher::<_, V, S>(&self.hash_builder)); + .insert(hash, (k, v), make_hasher::(&self.hash_builder)); let (k_ref, v_ref) = unsafe { bucket.as_mut() }; (k_ref, v_ref) } @@ -1884,17 +1801,19 @@ /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.insert(1, "a"); + /// let capacity_before_remove = map.capacity(); /// /// assert_eq!(map.remove(&1), Some("a")); /// assert_eq!(map.remove(&1), None); /// - /// // Now map holds none elements - /// assert!(map.is_empty()); + /// // Now map holds none elements but capacity is equal to the old one + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove(&mut self, k: &Q) -> Option where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { // Avoid `Option::map` because it bloats LLVM IR. match self.remove_entry(k) { @@ -1923,24 +1842,26 @@ /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.insert(1, "a"); + /// let capacity_before_remove = map.capacity(); /// /// assert_eq!(map.remove_entry(&1), Some((1, "a"))); /// assert_eq!(map.remove(&1), None); /// - /// // Now map hold none elements - /// assert!(map.is_empty()); + /// // Now map hold none elements but capacity is equal to the old one + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove_entry(&mut self, k: &Q) -> Option<(K, V)> where - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { - let hash = make_hash::(&self.hash_builder, k); + let hash = make_hash::(&self.hash_builder, k); self.table.remove_entry(hash, equivalent_key(k)) } } -impl HashMap { +impl HashMap { /// Creates a raw entry builder for the HashMap. /// /// Raw entries provide the lowest level of control for searching and @@ -2092,31 +2013,19 @@ RawEntryBuilder { map: self } } - /// Returns a reference to the [`RawTable`] used underneath [`HashMap`]. - /// This function is only available if the `raw` feature of the crate is enabled. - /// - /// See [`raw_table_mut`] for more. - /// - /// [`raw_table_mut`]: Self::raw_table_mut - #[cfg(feature = "raw")] - #[cfg_attr(feature = "inline-more", inline)] - pub fn raw_table(&self) -> &RawTable<(K, V), A> { - &self.table - } - /// Returns a mutable reference to the [`RawTable`] used underneath [`HashMap`]. /// This function is only available if the `raw` feature of the crate is enabled. /// /// # Note /// - /// Calling this function is safe, but using the raw hash table API may require + /// Calling the function safe, but using raw hash table API's may require /// unsafe functions or blocks. /// /// `RawTable` API gives the lowest level of control under the map that can be useful /// for extending the HashMap's API, but may lead to *[undefined behavior]*. /// /// [`HashMap`]: struct.HashMap.html - /// [`RawTable`]: crate::raw::RawTable + /// [`RawTable`]: raw/struct.RawTable.html /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html /// /// # Examples @@ -2140,9 +2049,9 @@ /// where /// F: Fn(&(K, V)) -> bool, /// { - /// let raw_table = map.raw_table_mut(); + /// let raw_table = map.raw_table(); /// match raw_table.find(hash, is_match) { - /// Some(bucket) => Some(unsafe { raw_table.remove(bucket).0 }), + /// Some(bucket) => Some(unsafe { raw_table.remove(bucket) }), /// None => None, /// } /// } @@ -2161,7 +2070,7 @@ /// ``` #[cfg(feature = "raw")] #[cfg_attr(feature = "inline-more", inline)] - pub fn raw_table_mut(&mut self) -> &mut RawTable<(K, V), A> { + pub fn raw_table(&mut self) -> &mut RawTable<(K, V), A> { &mut self.table } } @@ -2171,7 +2080,7 @@ K: Eq + Hash, V: PartialEq, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { fn eq(&self, other: &Self) -> bool { if self.len() != other.len() { @@ -2188,7 +2097,7 @@ K: Eq + Hash, V: Eq, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { } @@ -2196,7 +2105,7 @@ where K: Debug, V: Debug, - A: Allocator, + A: Allocator + Clone, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() @@ -2206,7 +2115,7 @@ impl Default for HashMap where S: Default, - A: Default + Allocator, + A: Default + Allocator + Clone, { /// Creates an empty `HashMap`, with the `Default` value for the hasher and allocator. /// @@ -2231,10 +2140,10 @@ impl Index<&Q> for HashMap where - K: Eq + Hash, - Q: Hash + Equivalent, + K: Eq + Hash + Borrow, + Q: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { type Output = V; @@ -2265,7 +2174,7 @@ impl From<[(K, V); N]> for HashMap where K: Eq + Hash, - A: Default + Allocator, + A: Default + Allocator + Clone, { /// # Examples /// @@ -2410,11 +2319,11 @@ /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); /// ``` -pub struct IntoIter { +pub struct IntoIter { inner: RawIntoIter<(K, V), A>, } -impl IntoIter { +impl IntoIter { /// Returns a iterator of references over the remaining items. #[cfg_attr(feature = "inline-more", inline)] pub(super) fn iter(&self) -> Iter<'_, K, V> { @@ -2454,11 +2363,11 @@ /// assert_eq!(keys.next(), None); /// assert_eq!(keys.next(), None); /// ``` -pub struct IntoKeys { +pub struct IntoKeys { inner: IntoIter, } -impl Iterator for IntoKeys { +impl Iterator for IntoKeys { type Item = K; #[inline] @@ -2469,26 +2378,18 @@ fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } - #[inline] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner.fold(init, |acc, (k, _)| f(acc, k)) - } } -impl ExactSizeIterator for IntoKeys { +impl ExactSizeIterator for IntoKeys { #[inline] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for IntoKeys {} +impl FusedIterator for IntoKeys {} -impl fmt::Debug for IntoKeys { +impl fmt::Debug for IntoKeys { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.inner.iter().map(|(k, _)| k)) @@ -2524,11 +2425,11 @@ /// assert_eq!(values.next(), None); /// assert_eq!(values.next(), None); /// ``` -pub struct IntoValues { +pub struct IntoValues { inner: IntoIter, } -impl Iterator for IntoValues { +impl Iterator for IntoValues { type Item = V; #[inline] @@ -2539,26 +2440,18 @@ fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } - #[inline] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner.fold(init, |acc, (_, v)| f(acc, v)) - } } -impl ExactSizeIterator for IntoValues { +impl ExactSizeIterator for IntoValues { #[inline] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for IntoValues {} +impl FusedIterator for IntoValues {} -impl fmt::Debug for IntoValues { +impl fmt::Debug for IntoValues { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.inner.iter().map(|(_, v)| v)) @@ -2690,11 +2583,11 @@ /// assert_eq!(drain_iter.next(), None); /// assert_eq!(drain_iter.next(), None); /// ``` -pub struct Drain<'a, K, V, A: Allocator = Global> { +pub struct Drain<'a, K, V, A: Allocator + Clone = Global> { inner: RawDrain<'a, (K, V), A>, } -impl Drain<'_, K, V, A> { +impl Drain<'_, K, V, A> { /// Returns a iterator of references over the remaining items. #[cfg_attr(feature = "inline-more", inline)] pub(super) fn iter(&self) -> Iter<'_, K, V> { @@ -2708,10 +2601,10 @@ /// A draining iterator over entries of a `HashMap` which don't satisfy the predicate /// `f(&k, &mut v)` in arbitrary order. The iterator element type is `(K, V)`. /// -/// This `struct` is created by the [`extract_if`] method on [`HashMap`]. See its +/// This `struct` is created by the [`drain_filter`] method on [`HashMap`]. See its /// documentation for more. /// -/// [`extract_if`]: struct.HashMap.html#method.extract_if +/// [`drain_filter`]: struct.HashMap.html#method.drain_filter /// [`HashMap`]: struct.HashMap.html /// /// # Examples @@ -2721,40 +2614,63 @@ /// /// let mut map: HashMap = [(1, "a"), (2, "b"), (3, "c")].into(); /// -/// let mut extract_if = map.extract_if(|k, _v| k % 2 != 0); -/// let mut vec = vec![extract_if.next(), extract_if.next()]; +/// let mut drain_filter = map.drain_filter(|k, _v| k % 2 != 0); +/// let mut vec = vec![drain_filter.next(), drain_filter.next()]; /// -/// // The `ExtractIf` iterator produces items in arbitrary order, so the +/// // The `DrainFilter` iterator produces items in arbitrary order, so the /// // items must be sorted to test them against a sorted array. /// vec.sort_unstable(); /// assert_eq!(vec, [Some((1, "a")),Some((3, "c"))]); /// /// // It is fused iterator -/// assert_eq!(extract_if.next(), None); -/// assert_eq!(extract_if.next(), None); -/// drop(extract_if); +/// assert_eq!(drain_filter.next(), None); +/// assert_eq!(drain_filter.next(), None); +/// drop(drain_filter); /// /// assert_eq!(map.len(), 1); /// ``` -#[must_use = "Iterators are lazy unless consumed"] -pub struct ExtractIf<'a, K, V, F, A: Allocator = Global> +pub struct DrainFilter<'a, K, V, F, A: Allocator + Clone = Global> where F: FnMut(&K, &mut V) -> bool, { f: F, - inner: RawExtractIf<'a, (K, V), A>, + inner: DrainFilterInner<'a, K, V, A>, } -impl Iterator for ExtractIf<'_, K, V, F, A> +impl<'a, K, V, F, A> Drop for DrainFilter<'a, K, V, F, A> where F: FnMut(&K, &mut V) -> bool, - A: Allocator, + A: Allocator + Clone, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + while let Some(item) = self.next() { + let guard = ConsumeAllOnDrop(self); + drop(item); + mem::forget(guard); + } + } +} + +pub(super) struct ConsumeAllOnDrop<'a, T: Iterator>(pub &'a mut T); + +impl Drop for ConsumeAllOnDrop<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + self.0.for_each(drop); + } +} + +impl Iterator for DrainFilter<'_, K, V, F, A> +where + F: FnMut(&K, &mut V) -> bool, + A: Allocator + Clone, { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] fn next(&mut self) -> Option { - self.inner.next(|&mut (ref k, ref mut v)| (self.f)(k, v)) + self.inner.next(&mut self.f) } #[inline] @@ -2763,7 +2679,31 @@ } } -impl FusedIterator for ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} +impl FusedIterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} + +/// Portions of `DrainFilter` shared with `set::DrainFilter` +pub(super) struct DrainFilterInner<'a, K, V, A: Allocator + Clone> { + pub iter: RawIter<(K, V)>, + pub table: &'a mut RawTable<(K, V), A>, +} + +impl DrainFilterInner<'_, K, V, A> { + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn next(&mut self, f: &mut F) -> Option<(K, V)> + where + F: FnMut(&K, &mut V) -> bool, + { + unsafe { + for item in &mut self.iter { + let &mut (ref key, ref mut value) = item.as_mut(); + if f(key, value) { + return Some(self.table.remove(item)); + } + } + } + None + } +} /// A mutable iterator over the values of a `HashMap` in arbitrary order. /// The iterator element type is `&'a mut V`. @@ -2851,7 +2791,7 @@ /// /// assert_eq!(map.len(), 6); /// ``` -pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator = Global> { +pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator + Clone = Global> { map: &'a mut HashMap, } @@ -2939,7 +2879,7 @@ /// vec.sort_unstable(); /// assert_eq!(vec, [('a', 10), ('b', 20), ('c', 30), ('d', 40), ('e', 50), ('f', 60)]); /// ``` -pub enum RawEntryMut<'a, K, V, S, A: Allocator = Global> { +pub enum RawEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { /// An occupied entry. /// /// # Examples @@ -3030,7 +2970,7 @@ /// assert_eq!(map.get(&"b"), None); /// assert_eq!(map.len(), 1); /// ``` -pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator = Global> { +pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { elem: Bucket<(K, V)>, table: &'a mut RawTable<(K, V), A>, hash_builder: &'a S, @@ -3041,7 +2981,7 @@ K: Send, V: Send, S: Send, - A: Send + Allocator, + A: Send + Allocator + Clone, { } unsafe impl Sync for RawOccupiedEntryMut<'_, K, V, S, A> @@ -3049,7 +2989,7 @@ K: Sync, V: Sync, S: Sync, - A: Sync + Allocator, + A: Sync + Allocator + Clone, { } @@ -3101,7 +3041,7 @@ /// } /// assert!(map[&"c"] == 30 && map.len() == 3); /// ``` -pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator = Global> { +pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { table: &'a mut RawTable<(K, V), A>, hash_builder: &'a S, } @@ -3140,11 +3080,11 @@ /// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); /// } /// ``` -pub struct RawEntryBuilder<'a, K, V, S, A: Allocator = Global> { +pub struct RawEntryBuilder<'a, K, V, S, A: Allocator + Clone = Global> { map: &'a HashMap, } -impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { /// Creates a `RawEntryMut` from the given key. /// /// # Examples @@ -3163,9 +3103,10 @@ pub fn from_key(self, k: &Q) -> RawEntryMut<'a, K, V, S, A> where S: BuildHasher, - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { - let hash = make_hash::(&self.map.hash_builder, k); + let hash = make_hash::(&self.map.hash_builder, k); self.from_key_hashed_nocheck(hash, k) } @@ -3195,13 +3136,14 @@ #[allow(clippy::wrong_self_convention)] pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S, A> where - Q: Equivalent, + K: Borrow, + Q: Eq, { self.from_hash(hash, equivalent(k)) } } -impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { /// Creates a `RawEntryMut` from the given hash and matching function. /// /// # Examples @@ -3252,7 +3194,7 @@ } } -impl<'a, K, V, S, A: Allocator> RawEntryBuilder<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { /// Access an immutable entry by key. /// /// # Examples @@ -3269,9 +3211,10 @@ pub fn from_key(self, k: &Q) -> Option<(&'a K, &'a V)> where S: BuildHasher, - Q: Hash + Equivalent, + K: Borrow, + Q: Hash + Eq, { - let hash = make_hash::(&self.map.hash_builder, k); + let hash = make_hash::(&self.map.hash_builder, k); self.from_key_hashed_nocheck(hash, k) } @@ -3299,7 +3242,8 @@ #[allow(clippy::wrong_self_convention)] pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)> where - Q: Equivalent, + K: Borrow, + Q: Eq, { self.from_hash(hash, equivalent(k)) } @@ -3310,7 +3254,7 @@ F: FnMut(&K) -> bool, { match self.map.table.get(hash, |(k, _)| is_match(k)) { - Some((key, value)) => Some((key, value)), + Some(&(ref key, ref value)) => Some((key, value)), None => None, } } @@ -3345,7 +3289,7 @@ } } -impl<'a, K, V, S, A: Allocator> RawEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator + Clone> RawEntryMut<'a, K, V, S, A> { /// Sets the value of the entry, and returns a RawOccupiedEntryMut. /// /// # Examples @@ -3539,7 +3483,7 @@ } } -impl<'a, K, V, S, A: Allocator> RawOccupiedEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -3706,7 +3650,7 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn get_key_value(&self) -> (&K, &V) { unsafe { - let (key, value) = self.elem.as_ref(); + let &(ref key, ref value) = self.elem.as_ref(); (key, value) } } @@ -3878,7 +3822,7 @@ /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.remove(self.elem).0 } + unsafe { self.table.remove(self.elem) } } /// Provides shared access to the key and owned access to the value of @@ -3938,7 +3882,7 @@ } } -impl<'a, K, V, S, A: Allocator> RawVacantEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { /// Sets the value of the entry with the VacantEntry's key, /// and returns a mutable reference to it. /// @@ -3962,7 +3906,7 @@ K: Hash, S: BuildHasher, { - let hash = make_hash::(self.hash_builder, &key); + let hash = make_insert_hash::(self.hash_builder, &key); self.insert_hashed_nocheck(hash, key, value) } @@ -4006,7 +3950,7 @@ let &mut (ref mut k, ref mut v) = self.table.insert_entry( hash, (key, value), - make_hasher::<_, V, S>(self.hash_builder), + make_hasher::(self.hash_builder), ); (k, v) } @@ -4070,11 +4014,11 @@ K: Hash, S: BuildHasher, { - let hash = make_hash::(self.hash_builder, &key); + let hash = make_insert_hash::(self.hash_builder, &key); let elem = self.table.insert( hash, (key, value), - make_hasher::<_, V, S>(self.hash_builder), + make_hasher::(self.hash_builder), ); RawOccupiedEntryMut { elem, @@ -4084,13 +4028,13 @@ } } -impl Debug for RawEntryBuilderMut<'_, K, V, S, A> { +impl Debug for RawEntryBuilderMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawEntryBuilder").finish() } } -impl Debug for RawEntryMut<'_, K, V, S, A> { +impl Debug for RawEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { RawEntryMut::Vacant(ref v) => f.debug_tuple("RawEntry").field(v).finish(), @@ -4099,7 +4043,7 @@ } } -impl Debug for RawOccupiedEntryMut<'_, K, V, S, A> { +impl Debug for RawOccupiedEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawOccupiedEntryMut") .field("key", self.key()) @@ -4108,13 +4052,13 @@ } } -impl Debug for RawVacantEntryMut<'_, K, V, S, A> { +impl Debug for RawVacantEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawVacantEntryMut").finish() } } -impl Debug for RawEntryBuilder<'_, K, V, S, A> { +impl Debug for RawEntryBuilder<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawEntryBuilder").finish() } @@ -4165,7 +4109,7 @@ /// ``` pub enum Entry<'a, K, V, S, A = Global> where - A: Allocator, + A: Allocator + Clone, { /// An occupied entry. /// @@ -4198,7 +4142,7 @@ Vacant(VacantEntry<'a, K, V, S, A>), } -impl Debug for Entry<'_, K, V, S, A> { +impl Debug for Entry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -4247,7 +4191,7 @@ /// assert_eq!(map.get(&"c"), None); /// assert_eq!(map.len(), 2); /// ``` -pub struct OccupiedEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> { +pub struct OccupiedEntry<'a, K, V, S, A: Allocator + Clone = Global> { hash: u64, key: Option, elem: Bucket<(K, V)>, @@ -4259,7 +4203,7 @@ K: Send, V: Send, S: Send, - A: Send + Allocator, + A: Send + Allocator + Clone, { } unsafe impl Sync for OccupiedEntry<'_, K, V, S, A> @@ -4267,11 +4211,11 @@ K: Sync, V: Sync, S: Sync, - A: Sync + Allocator, + A: Sync + Allocator + Clone, { } -impl Debug for OccupiedEntry<'_, K, V, S, A> { +impl Debug for OccupiedEntry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) @@ -4310,13 +4254,13 @@ /// } /// assert!(map[&"b"] == 20 && map.len() == 2); /// ``` -pub struct VacantEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> { +pub struct VacantEntry<'a, K, V, S, A: Allocator + Clone = Global> { hash: u64, key: K, table: &'a mut HashMap, } -impl Debug for VacantEntry<'_, K, V, S, A> { +impl Debug for VacantEntry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.key()).finish() } @@ -4376,7 +4320,7 @@ /// ``` pub enum EntryRef<'a, 'b, K, Q: ?Sized, V, S, A = Global> where - A: Allocator, + A: Allocator + Clone, { /// An occupied entry. /// @@ -4409,7 +4353,7 @@ Vacant(VacantEntryRef<'a, 'b, K, Q, V, S, A>), } -impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator> Debug +impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug for EntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -4487,7 +4431,7 @@ /// assert_eq!(map.get("c"), None); /// assert_eq!(map.len(), 2); /// ``` -pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator = Global> { +pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { hash: u64, key: Option>, elem: Bucket<(K, V)>, @@ -4500,7 +4444,7 @@ Q: Sync + ?Sized, V: Send, S: Send, - A: Send + Allocator, + A: Send + Allocator + Clone, { } unsafe impl<'a, 'b, K, Q, V, S, A> Sync for OccupiedEntryRef<'a, 'b, K, Q, V, S, A> @@ -4509,16 +4453,16 @@ Q: Sync + ?Sized, V: Sync, S: Sync, - A: Sync + Allocator, + A: Sync + Allocator + Clone, { } -impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator> Debug +impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug for OccupiedEntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntryRef") - .field("key", &self.key().borrow()) + .field("key", &self.key()) .field("value", &self.get()) .finish() } @@ -4554,13 +4498,13 @@ /// } /// assert!(map["b"] == 20 && map.len() == 2); /// ``` -pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator = Global> { +pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { hash: u64, key: KeyOrRef<'b, K, Q>, table: &'a mut HashMap, } -impl, Q: ?Sized + Debug, V, S, A: Allocator> Debug +impl, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug for VacantEntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -4592,14 +4536,14 @@ /// } /// assert_eq!(map[&"a"], 100); /// ``` -pub struct OccupiedError<'a, K, V, S, A: Allocator = Global> { +pub struct OccupiedError<'a, K, V, S, A: Allocator + Clone = Global> { /// The entry in the map that was already occupied. pub entry: OccupiedEntry<'a, K, V, S, A>, /// The value which was not inserted, because the entry was already occupied. pub value: V, } -impl Debug for OccupiedError<'_, K, V, S, A> { +impl Debug for OccupiedError<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedError") .field("key", self.entry.key()) @@ -4609,7 +4553,9 @@ } } -impl<'a, K: Debug, V: Debug, S, A: Allocator> fmt::Display for OccupiedError<'a, K, V, S, A> { +impl<'a, K: Debug, V: Debug, S, A: Allocator + Clone> fmt::Display + for OccupiedError<'a, K, V, S, A> +{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, @@ -4621,7 +4567,7 @@ } } -impl<'a, K, V, S, A: Allocator> IntoIterator for &'a HashMap { +impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a HashMap { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; @@ -4653,7 +4599,7 @@ } } -impl<'a, K, V, S, A: Allocator> IntoIterator for &'a mut HashMap { +impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a mut HashMap { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; @@ -4690,7 +4636,7 @@ } } -impl IntoIterator for HashMap { +impl IntoIterator for HashMap { type Item = (K, V); type IntoIter = IntoIter; @@ -4738,17 +4684,6 @@ fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner.fold(init, |acc, x| unsafe { - let (k, v) = x.as_ref(); - f(acc, (k, v)) - }) - } } impl ExactSizeIterator for Iter<'_, K, V> { #[cfg_attr(feature = "inline-more", inline)] @@ -4777,17 +4712,6 @@ fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner.fold(init, |acc, x| unsafe { - let (k, v) = x.as_mut(); - f(acc, (k, v)) - }) - } } impl ExactSizeIterator for IterMut<'_, K, V> { #[cfg_attr(feature = "inline-more", inline)] @@ -4807,7 +4731,7 @@ } } -impl Iterator for IntoIter { +impl Iterator for IntoIter { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -4818,24 +4742,16 @@ fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner.fold(init, f) - } } -impl ExactSizeIterator for IntoIter { +impl ExactSizeIterator for IntoIter { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for IntoIter {} +impl FusedIterator for IntoIter {} -impl fmt::Debug for IntoIter { +impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } @@ -4856,14 +4772,6 @@ fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner.fold(init, |acc, (k, _)| f(acc, k)) - } } impl ExactSizeIterator for Keys<'_, K, V> { #[cfg_attr(feature = "inline-more", inline)] @@ -4888,14 +4796,6 @@ fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner.fold(init, |acc, (_, v)| f(acc, v)) - } } impl ExactSizeIterator for Values<'_, K, V> { #[cfg_attr(feature = "inline-more", inline)] @@ -4920,14 +4820,6 @@ fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner.fold(init, |acc, (_, v)| f(acc, v)) - } } impl ExactSizeIterator for ValuesMut<'_, K, V> { #[cfg_attr(feature = "inline-more", inline)] @@ -4945,7 +4837,7 @@ } } -impl<'a, K, V, A: Allocator> Iterator for Drain<'a, K, V, A> { +impl<'a, K, V, A: Allocator + Clone> Iterator for Drain<'a, K, V, A> { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -4956,35 +4848,27 @@ fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner.fold(init, f) - } } -impl ExactSizeIterator for Drain<'_, K, V, A> { +impl ExactSizeIterator for Drain<'_, K, V, A> { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for Drain<'_, K, V, A> {} +impl FusedIterator for Drain<'_, K, V, A> {} impl fmt::Debug for Drain<'_, K, V, A> where K: fmt::Debug, V: fmt::Debug, - A: Allocator, + A: Allocator + Clone, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } -impl<'a, K, V, S, A: Allocator> Entry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { /// Sets the value of the entry, and returns an OccupiedEntry. /// /// # Examples @@ -5231,7 +5115,7 @@ } } -impl<'a, K, V: Default, S, A: Allocator> Entry<'a, K, V, S, A> { +impl<'a, K, V: Default, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -5264,7 +5148,7 @@ } } -impl<'a, K, V, S, A: Allocator> OccupiedEntry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -5299,6 +5183,7 @@ /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.entry("poneyland").or_insert(12); + /// let capacity_before_remove = map.capacity(); /// /// if let Entry::Occupied(o) = map.entry("poneyland") { /// // We delete the entry from the map. @@ -5306,12 +5191,12 @@ /// } /// /// assert_eq!(map.contains_key("poneyland"), false); - /// // Now map hold none elements - /// assert!(map.is_empty()); + /// // Now map hold none elements but capacity is equal to the old one + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.table.remove(self.elem).0 } + unsafe { self.table.table.remove(self.elem) } } /// Gets a reference to the value in the entry. @@ -5434,14 +5319,15 @@ /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.entry("poneyland").or_insert(12); + /// let capacity_before_remove = map.capacity(); /// /// if let Entry::Occupied(o) = map.entry("poneyland") { /// assert_eq!(o.remove(), 12); /// } /// /// assert_eq!(map.contains_key("poneyland"), false); - /// // Now map hold none elements - /// assert!(map.is_empty()); + /// // Now map hold none elements but capacity is equal to the old one + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove(self) -> V { @@ -5619,7 +5505,7 @@ } } -impl<'a, K, V, S, A: Allocator> VacantEntry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `VacantEntry`. /// @@ -5681,7 +5567,7 @@ let entry = table.insert_entry( self.hash, (self.key, value), - make_hasher::<_, V, S>(&self.table.hash_builder), + make_hasher::(&self.table.hash_builder), ); &mut entry.1 } @@ -5695,7 +5581,7 @@ let elem = self.table.table.insert( self.hash, (self.key, value), - make_hasher::<_, V, S>(&self.table.hash_builder), + make_hasher::(&self.table.hash_builder), ); OccupiedEntry { hash: self.hash, @@ -5706,7 +5592,7 @@ } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { /// Sets the value of the entry, and returns an OccupiedEntryRef. /// /// # Examples @@ -5796,7 +5682,10 @@ /// Ensures a value is in the entry by inserting, if empty, the result of the default function. /// This method allows for generating key-derived values for insertion by providing the default - /// function an access to the borrower form of the key. + /// function a reference to the key that was moved during the `.entry_ref(key)` method call. + /// + /// The reference to the moved key is provided so that cloning or copying the key is + /// unnecessary, unlike with `.or_insert_with(|| ... )`. /// /// # Examples /// @@ -5848,7 +5737,7 @@ K: Borrow, { match *self { - EntryRef::Occupied(ref entry) => entry.key().borrow(), + EntryRef::Occupied(ref entry) => entry.key(), EntryRef::Vacant(ref entry) => entry.key(), } } @@ -5944,7 +5833,8 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn and_replace_entry_with(self, f: F) -> Self where - F: FnOnce(&K, V) -> Option, + F: FnOnce(&Q, V) -> Option, + K: Borrow, { match self { EntryRef::Occupied(entry) => entry.replace_entry_with(f), @@ -5953,7 +5843,7 @@ } } -impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -5986,7 +5876,7 @@ } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -6003,8 +5893,11 @@ /// } /// ``` #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &K { - unsafe { &self.elem.as_ref().0 } + pub fn key(&self) -> &Q + where + K: Borrow, + { + unsafe { &self.elem.as_ref().0 }.borrow() } /// Take the ownership of the key and value from the map. @@ -6021,6 +5914,7 @@ /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.entry_ref("poneyland").or_insert(12); + /// let capacity_before_remove = map.capacity(); /// /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") { /// // We delete the entry from the map. @@ -6029,11 +5923,11 @@ /// /// assert_eq!(map.contains_key("poneyland"), false); /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.is_empty()); + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.table.remove(self.elem).0 } + unsafe { self.table.table.remove(self.elem) } } /// Gets a reference to the value in the entry. @@ -6154,6 +6048,7 @@ /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.entry_ref("poneyland").or_insert(12); + /// let capacity_before_remove = map.capacity(); /// /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") { /// assert_eq!(o.remove(), 12); @@ -6161,7 +6056,7 @@ /// /// assert_eq!(map.contains_key("poneyland"), false); /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.is_empty()); + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove(self) -> V { @@ -6173,7 +6068,7 @@ /// /// # Panics /// - /// Will panic if this OccupiedEntryRef was created through [`EntryRef::insert`]. + /// Will panic if this OccupiedEntry was created through [`EntryRef::insert`]. /// /// # Examples /// @@ -6215,7 +6110,7 @@ /// /// # Panics /// - /// Will panic if this OccupiedEntryRef was created through [`EntryRef::insert`]. + /// Will panic if this OccupiedEntry was created through [`Entry::insert`]. /// /// # Examples /// @@ -6243,7 +6138,7 @@ /// fn reclaim_memory(map: &mut HashMap, usize>, keys: &[Rc]) { /// for key in keys { /// if let EntryRef::Occupied(entry) = map.entry_ref(key.as_ref()) { - /// // Replaces the entry's key with our version of it in `keys`. + /// /// Replaces the entry's key with our version of it in `keys`. /// entry.replace_key(); /// } /// } @@ -6309,7 +6204,8 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn replace_entry_with(self, f: F) -> EntryRef<'a, 'b, K, Q, V, S, A> where - F: FnOnce(&K, V) -> Option, + F: FnOnce(&Q, V) -> Option, + K: Borrow, { unsafe { let mut spare_key = None; @@ -6317,7 +6213,7 @@ self.table .table .replace_bucket_with(self.elem.clone(), |(key, value)| { - if let Some(new_value) = f(&key, value) { + if let Some(new_value) = f(key.borrow(), value) { Some((key, new_value)) } else { spare_key = Some(KeyOrRef::Owned(key)); @@ -6338,7 +6234,7 @@ } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> VacantEntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K, Q, V, S, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `VacantEntryRef`. /// @@ -6409,7 +6305,7 @@ let entry = table.insert_entry( self.hash, (self.key.into_owned(), value), - make_hasher::<_, V, S>(&self.table.hash_builder), + make_hasher::(&self.table.hash_builder), ); &mut entry.1 } @@ -6423,7 +6319,7 @@ let elem = self.table.table.insert( self.hash, (self.key.into_owned(), value), - make_hasher::<_, V, S>(&self.table.hash_builder), + make_hasher::(&self.table.hash_builder), ); OccupiedEntryRef { hash: self.hash, @@ -6438,7 +6334,7 @@ where K: Eq + Hash, S: BuildHasher + Default, - A: Default + Allocator, + A: Default + Allocator + Clone, { #[cfg_attr(feature = "inline-more", inline)] fn from_iter>(iter: T) -> Self { @@ -6458,7 +6354,7 @@ where K: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { /// Inserts all new key-values from the iterator to existing `HashMap`. /// Replace values with existing keys with new values returned from the iterator. @@ -6542,7 +6438,7 @@ K: Eq + Hash + Copy, V: Copy, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { /// Inserts all new key-values from the iterator to existing `HashMap`. /// Replace values with existing keys with new values returned from the iterator. @@ -6559,17 +6455,17 @@ /// map.insert(1, 100); /// /// let arr = [(1, 1), (2, 2)]; - /// let some_iter = arr.iter().map(|(k, v)| (k, v)); + /// let some_iter = arr.iter().map(|&(k, v)| (k, v)); /// map.extend(some_iter); /// // Replace values with existing keys with new values returned from the iterator. /// // So that the map.get(&1) doesn't return Some(&100). /// assert_eq!(map.get(&1), Some(&1)); /// /// let some_vec: Vec<_> = vec![(3, 3), (4, 4)]; - /// map.extend(some_vec.iter().map(|(k, v)| (k, v))); + /// map.extend(some_vec.iter().map(|&(k, v)| (k, v))); /// /// let some_arr = [(5, 5), (6, 6)]; - /// map.extend(some_arr.iter().map(|(k, v)| (k, v))); + /// map.extend(some_arr.iter().map(|&(k, v)| (k, v))); /// /// // You can also extend from another HashMap /// let mut new_map = HashMap::new(); @@ -6607,7 +6503,7 @@ K: Eq + Hash + Copy, V: Copy, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { /// Inserts all new key-values from the iterator to existing `HashMap`. /// Replace values with existing keys with new values returned from the iterator. @@ -6674,12 +6570,12 @@ fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> { v } - fn into_iter_key<'new, A: Allocator>( + fn into_iter_key<'new, A: Allocator + Clone>( v: IntoIter<&'static str, u8, A>, ) -> IntoIter<&'new str, u8, A> { v } - fn into_iter_val<'new, A: Allocator>( + fn into_iter_val<'new, A: Allocator + Clone>( v: IntoIter, ) -> IntoIter { v @@ -6709,12 +6605,6 @@ use super::Entry::{Occupied, Vacant}; use super::EntryRef; use super::{HashMap, RawEntryMut}; - use alloc::string::{String, ToString}; - use alloc::sync::Arc; - use allocator_api2::alloc::{AllocError, Allocator, Global}; - use core::alloc::Layout; - use core::ptr::NonNull; - use core::sync::atomic::{AtomicI8, Ordering}; use rand::{rngs::SmallRng, Rng, SeedableRng}; use std::borrow::ToOwned; use std::cell::RefCell; @@ -6937,6 +6827,7 @@ } }); + #[allow(clippy::let_underscore_drop)] // kind-of a false positive for _ in half.by_ref() {} DROP_VECTOR.with(|v| { @@ -7264,10 +7155,10 @@ map.insert(1, 2); map.insert(3, 4); - let map_str = format!("{map:?}"); + let map_str = format!("{:?}", map); assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}"); - assert_eq!(format!("{empty:?}"), "{}"); + assert_eq!(format!("{:?}", empty), "{}"); } #[test] @@ -7583,7 +7474,7 @@ // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { - assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); + assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); } } @@ -7619,7 +7510,7 @@ // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { - assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); + assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); } } @@ -7668,7 +7559,6 @@ } #[test] - #[allow(clippy::needless_borrow)] fn test_extend_ref_kv_tuple() { use std::ops::AddAssign; let mut a = HashMap::new(); @@ -7690,7 +7580,7 @@ let vec: Vec<_> = (100..200).map(|i| (i, i)).collect(); a.extend(iter); a.extend(&vec); - a.extend(create_arr::(200, 1)); + a.extend(&create_arr::(200, 1)); assert_eq!(a.len(), 300); @@ -8091,7 +7981,7 @@ // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { - assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); + assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); } } @@ -8121,7 +8011,7 @@ // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { - assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); + assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); } } @@ -8159,10 +8049,10 @@ } #[test] - fn test_extract_if() { + fn test_drain_filter() { { let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); - let drained = map.extract_if(|&k, _| k % 2 == 0); + let drained = map.drain_filter(|&k, _| k % 2 == 0); let mut out = drained.collect::>(); out.sort_unstable(); assert_eq!(vec![(0, 0), (2, 20), (4, 40), (6, 60)], out); @@ -8170,7 +8060,7 @@ } { let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); - map.extract_if(|&k, _| k % 2 == 0).for_each(drop); + drop(map.drain_filter(|&k, _| k % 2 == 0)); assert_eq!(map.len(), 4); } } @@ -8180,32 +8070,27 @@ fn test_try_reserve() { use crate::TryReserveError::{AllocError, CapacityOverflow}; - const MAX_ISIZE: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; let mut empty_bytes: HashMap = HashMap::new(); - if let Err(CapacityOverflow) = empty_bytes.try_reserve(usize::MAX) { + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { } else { panic!("usize::MAX should trigger an overflow!"); } - if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_ISIZE) { - } else { - panic!("isize::MAX should trigger an overflow!"); - } - - if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_ISIZE / 5) { + if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 16) { } else { // This may succeed if there is enough free memory. Attempt to // allocate a few more hashmaps to ensure the allocation will fail. let mut empty_bytes2: HashMap = HashMap::new(); - let _ = empty_bytes2.try_reserve(MAX_ISIZE / 5); + let _ = empty_bytes2.try_reserve(MAX_USIZE / 16); let mut empty_bytes3: HashMap = HashMap::new(); - let _ = empty_bytes3.try_reserve(MAX_ISIZE / 5); + let _ = empty_bytes3.try_reserve(MAX_USIZE / 16); let mut empty_bytes4: HashMap = HashMap::new(); - if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_ISIZE / 5) { + if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_USIZE / 16) { } else { - panic!("isize::MAX / 5 should trigger an OOM!"); + panic!("usize::MAX / 8 should trigger an OOM!"); } } } @@ -8219,7 +8104,7 @@ let mut map: HashMap<_, _> = xs.iter().copied().collect(); let compute_hash = |map: &HashMap, k: i32| -> u64 { - super::make_hash::(map.hasher(), &k) + super::make_insert_hash::(map.hasher(), &k) }; // Existing key (insert) @@ -8381,21 +8266,21 @@ loop { // occasionally remove some elements if i < n && rng.gen_bool(0.1) { - let hash_value = super::make_hash(&hash_builder, &i); + let hash_value = super::make_insert_hash(&hash_builder, &i); unsafe { let e = map.table.find(hash_value, |q| q.0.eq(&i)); if let Some(e) = e { it.reflect_remove(&e); - let t = map.table.remove(e).0; + let t = map.table.remove(e); removed.push(t); left -= 1; } else { - assert!(removed.contains(&(i, 2 * i)), "{i} not in {removed:?}"); + assert!(removed.contains(&(i, 2 * i)), "{} not in {:?}", i, removed); let e = map.table.insert( hash_value, (i, 2 * i), - super::make_hasher::<_, usize, _>(&hash_builder), + super::make_hasher::(&hash_builder), ); it.reflect_insert(&e); if let Some(p) = removed.iter().position(|e| e == &(i, 2 * i)) { @@ -8520,441 +8405,4 @@ map2.clone_from(&map1); } - - #[test] - #[should_panic = "panic in clone"] - fn test_clone_from_memory_leaks() { - use ::alloc::vec::Vec; - - struct CheckedClone { - panic_in_clone: bool, - need_drop: Vec, - } - impl Clone for CheckedClone { - fn clone(&self) -> Self { - if self.panic_in_clone { - panic!("panic in clone") - } - Self { - panic_in_clone: self.panic_in_clone, - need_drop: self.need_drop.clone(), - } - } - } - let mut map1 = HashMap::new(); - map1.insert( - 1, - CheckedClone { - panic_in_clone: false, - need_drop: vec![0, 1, 2], - }, - ); - map1.insert( - 2, - CheckedClone { - panic_in_clone: false, - need_drop: vec![3, 4, 5], - }, - ); - map1.insert( - 3, - CheckedClone { - panic_in_clone: true, - need_drop: vec![6, 7, 8], - }, - ); - let _map2 = map1.clone(); - } - - struct MyAllocInner { - drop_count: Arc, - } - - #[derive(Clone)] - struct MyAlloc { - _inner: Arc, - } - - impl MyAlloc { - fn new(drop_count: Arc) -> Self { - MyAlloc { - _inner: Arc::new(MyAllocInner { drop_count }), - } - } - } - - impl Drop for MyAllocInner { - fn drop(&mut self) { - println!("MyAlloc freed."); - self.drop_count.fetch_sub(1, Ordering::SeqCst); - } - } - - unsafe impl Allocator for MyAlloc { - fn allocate(&self, layout: Layout) -> std::result::Result, AllocError> { - let g = Global; - g.allocate(layout) - } - - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - let g = Global; - g.deallocate(ptr, layout) - } - } - - #[test] - fn test_hashmap_into_iter_bug() { - let dropped: Arc = Arc::new(AtomicI8::new(1)); - - { - let mut map = HashMap::with_capacity_in(10, MyAlloc::new(dropped.clone())); - for i in 0..10 { - map.entry(i).or_insert_with(|| "i".to_string()); - } - - for (k, v) in map { - println!("{}, {}", k, v); - } - } - - // All allocator clones should already be dropped. - assert_eq!(dropped.load(Ordering::SeqCst), 0); - } - - #[derive(Debug)] - struct CheckedCloneDrop { - panic_in_clone: bool, - panic_in_drop: bool, - dropped: bool, - data: T, - } - - impl CheckedCloneDrop { - fn new(panic_in_clone: bool, panic_in_drop: bool, data: T) -> Self { - CheckedCloneDrop { - panic_in_clone, - panic_in_drop, - dropped: false, - data, - } - } - } - - impl Clone for CheckedCloneDrop { - fn clone(&self) -> Self { - if self.panic_in_clone { - panic!("panic in clone") - } - Self { - panic_in_clone: self.panic_in_clone, - panic_in_drop: self.panic_in_drop, - dropped: self.dropped, - data: self.data.clone(), - } - } - } - - impl Drop for CheckedCloneDrop { - fn drop(&mut self) { - if self.panic_in_drop { - self.dropped = true; - panic!("panic in drop"); - } - if self.dropped { - panic!("double drop"); - } - self.dropped = true; - } - } - - /// Return hashmap with predefined distribution of elements. - /// All elements will be located in the same order as elements - /// returned by iterator. - /// - /// This function does not panic, but returns an error as a `String` - /// to distinguish between a test panic and an error in the input data. - fn get_test_map( - iter: I, - mut fun: impl FnMut(u64) -> T, - alloc: A, - ) -> Result, DefaultHashBuilder, A>, String> - where - I: Iterator + Clone + ExactSizeIterator, - A: Allocator, - T: PartialEq + core::fmt::Debug, - { - use crate::scopeguard::guard; - - let mut map: HashMap, _, A> = - HashMap::with_capacity_in(iter.size_hint().0, alloc); - { - let mut guard = guard(&mut map, |map| { - for (_, value) in map.iter_mut() { - value.panic_in_drop = false - } - }); - - let mut count = 0; - // Hash and Key must be equal to each other for controlling the elements placement. - for (panic_in_clone, panic_in_drop) in iter.clone() { - if core::mem::needs_drop::() && panic_in_drop { - return Err(String::from( - "panic_in_drop can be set with a type that doesn't need to be dropped", - )); - } - guard.table.insert( - count, - ( - count, - CheckedCloneDrop::new(panic_in_clone, panic_in_drop, fun(count)), - ), - |(k, _)| *k, - ); - count += 1; - } - - // Let's check that all elements are located as we wanted - let mut check_count = 0; - for ((key, value), (panic_in_clone, panic_in_drop)) in guard.iter().zip(iter) { - if *key != check_count { - return Err(format!( - "key != check_count,\nkey: `{}`,\ncheck_count: `{}`", - key, check_count - )); - } - if value.dropped - || value.panic_in_clone != panic_in_clone - || value.panic_in_drop != panic_in_drop - || value.data != fun(check_count) - { - return Err(format!( - "Value is not equal to expected,\nvalue: `{:?}`,\nexpected: \ - `CheckedCloneDrop {{ panic_in_clone: {}, panic_in_drop: {}, dropped: {}, data: {:?} }}`", - value, panic_in_clone, panic_in_drop, false, fun(check_count) - )); - } - check_count += 1; - } - - if guard.len() != check_count as usize { - return Err(format!( - "map.len() != check_count,\nmap.len(): `{}`,\ncheck_count: `{}`", - guard.len(), - check_count - )); - } - - if count != check_count { - return Err(format!( - "count != check_count,\ncount: `{}`,\ncheck_count: `{}`", - count, check_count - )); - } - core::mem::forget(guard); - } - Ok(map) - } - - const DISARMED: bool = false; - const ARMED: bool = true; - - const ARMED_FLAGS: [bool; 8] = [ - DISARMED, DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED, - ]; - - const DISARMED_FLAGS: [bool; 8] = [ - DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, - ]; - - #[test] - #[should_panic = "panic in clone"] - fn test_clone_memory_leaks_and_double_drop_one() { - let dropped: Arc = Arc::new(AtomicI8::new(2)); - - { - assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); - - let map: HashMap>, DefaultHashBuilder, MyAlloc> = - match get_test_map( - ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), - |n| vec![n], - MyAlloc::new(dropped.clone()), - ) { - Ok(map) => map, - Err(msg) => panic!("{msg}"), - }; - - // Clone should normally clone a few elements, and then (when the - // clone function panics), deallocate both its own memory, memory - // of `dropped: Arc` and the memory of already cloned - // elements (Vec memory inside CheckedCloneDrop). - let _map2 = map.clone(); - } - } - - #[test] - #[should_panic = "panic in drop"] - fn test_clone_memory_leaks_and_double_drop_two() { - let dropped: Arc = Arc::new(AtomicI8::new(2)); - - { - assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); - - let map: HashMap, DefaultHashBuilder, _> = match get_test_map( - DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), - |n| n, - MyAlloc::new(dropped.clone()), - ) { - Ok(map) => map, - Err(msg) => panic!("{msg}"), - }; - - let mut map2 = match get_test_map( - DISARMED_FLAGS.into_iter().zip(ARMED_FLAGS), - |n| n, - MyAlloc::new(dropped.clone()), - ) { - Ok(map) => map, - Err(msg) => panic!("{msg}"), - }; - - // The `clone_from` should try to drop the elements of `map2` without - // double drop and leaking the allocator. Elements that have not been - // dropped leak their memory. - map2.clone_from(&map); - } - } - - /// We check that we have a working table if the clone operation from another - /// thread ended in a panic (when buckets of maps are equal to each other). - #[test] - fn test_catch_panic_clone_from_when_len_is_equal() { - use std::thread; - - let dropped: Arc = Arc::new(AtomicI8::new(2)); - - { - assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); - - let mut map = match get_test_map( - DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), - |n| vec![n], - MyAlloc::new(dropped.clone()), - ) { - Ok(map) => map, - Err(msg) => panic!("{msg}"), - }; - - thread::scope(|s| { - let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| { - let scope_map = - match get_test_map(ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), |n| vec![n * 2], MyAlloc::new(dropped.clone())) { - Ok(map) => map, - Err(msg) => return msg, - }; - if map.table.buckets() != scope_map.table.buckets() { - return format!( - "map.table.buckets() != scope_map.table.buckets(),\nleft: `{}`,\nright: `{}`", - map.table.buckets(), scope_map.table.buckets() - ); - } - map.clone_from(&scope_map); - "We must fail the cloning!!!".to_owned() - }); - if let Ok(msg) = result.join() { - panic!("{msg}") - } - }); - - // Let's check that all iterators work fine and do not return elements - // (especially `RawIterRange`, which does not depend on the number of - // elements in the table, but looks directly at the control bytes) - // - // SAFETY: We know for sure that `RawTable` will outlive - // the returned `RawIter / RawIterRange` iterator. - assert_eq!(map.len(), 0); - assert_eq!(map.iter().count(), 0); - assert_eq!(unsafe { map.table.iter().count() }, 0); - assert_eq!(unsafe { map.table.iter().iter.count() }, 0); - - for idx in 0..map.table.buckets() { - let idx = idx as u64; - assert!( - map.table.find(idx, |(k, _)| *k == idx).is_none(), - "Index: {idx}" - ); - } - } - - // All allocator clones should already be dropped. - assert_eq!(dropped.load(Ordering::SeqCst), 0); - } - - /// We check that we have a working table if the clone operation from another - /// thread ended in a panic (when buckets of maps are not equal to each other). - #[test] - fn test_catch_panic_clone_from_when_len_is_not_equal() { - use std::thread; - - let dropped: Arc = Arc::new(AtomicI8::new(2)); - - { - assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); - - let mut map = match get_test_map( - [DISARMED].into_iter().zip([DISARMED]), - |n| vec![n], - MyAlloc::new(dropped.clone()), - ) { - Ok(map) => map, - Err(msg) => panic!("{msg}"), - }; - - thread::scope(|s| { - let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| { - let scope_map = match get_test_map( - ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), - |n| vec![n * 2], - MyAlloc::new(dropped.clone()), - ) { - Ok(map) => map, - Err(msg) => return msg, - }; - if map.table.buckets() == scope_map.table.buckets() { - return format!( - "map.table.buckets() == scope_map.table.buckets(): `{}`", - map.table.buckets() - ); - } - map.clone_from(&scope_map); - "We must fail the cloning!!!".to_owned() - }); - if let Ok(msg) = result.join() { - panic!("{msg}") - } - }); - - // Let's check that all iterators work fine and do not return elements - // (especially `RawIterRange`, which does not depend on the number of - // elements in the table, but looks directly at the control bytes) - // - // SAFETY: We know for sure that `RawTable` will outlive - // the returned `RawIter / RawIterRange` iterator. - assert_eq!(map.len(), 0); - assert_eq!(map.iter().count(), 0); - assert_eq!(unsafe { map.table.iter().count() }, 0); - assert_eq!(unsafe { map.table.iter().iter.count() }, 0); - - for idx in 0..map.table.buckets() { - let idx = idx as u64; - assert!( - map.table.find(idx, |(k, _)| *k == idx).is_none(), - "Index: {idx}" - ); - } - } - - // All allocator clones should already be dropped. - assert_eq!(dropped.load(Ordering::SeqCst), 0); - } } diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/alloc.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/alloc.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/alloc.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/alloc.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,9 +1,5 @@ pub(crate) use self::inner::{do_alloc, Allocator, Global}; -// Nightly-case. -// Use unstable `allocator_api` feature. -// This is compatible with `allocator-api2` which can be enabled or not. -// This is used when building for `std`. #[cfg(feature = "nightly")] mod inner { use crate::alloc::alloc::Layout; @@ -11,44 +7,28 @@ use core::ptr::NonNull; #[allow(clippy::map_err_ignore)] - pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + pub fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { match alloc.allocate(layout) { Ok(ptr) => Ok(ptr.as_non_null_ptr()), Err(_) => Err(()), } } -} - -// Basic non-nightly case. -// This uses `allocator-api2` enabled by default. -// If any crate enables "nightly" in `allocator-api2`, -// this will be equivalent to the nightly case, -// since `allocator_api2::alloc::Allocator` would be re-export of -// `core::alloc::Allocator`. -#[cfg(all(not(feature = "nightly"), feature = "allocator-api2"))] -mod inner { - use crate::alloc::alloc::Layout; - pub use allocator_api2::alloc::{Allocator, Global}; - use core::ptr::NonNull; - #[allow(clippy::map_err_ignore)] - pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { - match alloc.allocate(layout) { - Ok(ptr) => Ok(ptr.cast()), - Err(_) => Err(()), + #[cfg(feature = "bumpalo")] + unsafe impl Allocator for crate::BumpWrapper<'_> { + #[inline] + fn allocate(&self, layout: Layout) -> Result, core::alloc::AllocError> { + match self.0.try_alloc_layout(layout) { + Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())), + Err(_) => Err(core::alloc::AllocError), + } } + #[inline] + unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) {} } } -// No-defaults case. -// When building with default-features turned off and -// neither `nightly` nor `allocator-api2` is enabled, -// this will be used. -// Making it impossible to use any custom allocator with collections defined -// in this crate. -// Any crate in build-tree can enable `allocator-api2`, -// or `nightly` without disturbing users that don't want to use it. -#[cfg(not(any(feature = "nightly", feature = "allocator-api2")))] +#[cfg(not(feature = "nightly"))] mod inner { use crate::alloc::alloc::{alloc, dealloc, Layout}; use core::ptr::NonNull; @@ -61,7 +41,6 @@ #[derive(Copy, Clone)] pub struct Global; - unsafe impl Allocator for Global { #[inline] fn allocate(&self, layout: Layout) -> Result, ()> { @@ -72,7 +51,6 @@ dealloc(ptr.as_ptr(), layout); } } - impl Default for Global { #[inline] fn default() -> Self { @@ -80,7 +58,16 @@ } } - pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + pub fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { alloc.allocate(layout) } + + #[cfg(feature = "bumpalo")] + unsafe impl Allocator for crate::BumpWrapper<'_> { + #[allow(clippy::map_err_ignore)] + fn allocate(&self, layout: Layout) -> Result, ()> { + self.0.try_alloc_layout(layout).map_err(|_| ()) + } + unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) {} + } } diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/bitmask.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/bitmask.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/bitmask.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/bitmask.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,6 +1,6 @@ -use super::imp::{ - BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE, -}; +use super::imp::{BitMaskWord, BITMASK_MASK, BITMASK_STRIDE}; +#[cfg(feature = "nightly")] +use core::intrinsics; /// A bit mask which contains the result of a `Match` operation on a `Group` and /// allows iterating through them. @@ -8,55 +8,75 @@ /// The bit mask is arranged so that low-order bits represent lower memory /// addresses for group match results. /// -/// For implementation reasons, the bits in the set may be sparsely packed with -/// groups of 8 bits representing one element. If any of these bits are non-zero -/// then this element is considered to true in the mask. If this is the +/// For implementation reasons, the bits in the set may be sparsely packed, so +/// that there is only one bit-per-byte used (the high bit, 7). If this is the /// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be /// performed on counts/indices to normalize this difference. `BITMASK_MASK` is /// similarly a mask of all the actually-used bits. -/// -/// To iterate over a bit mask, it must be converted to a form where only 1 bit -/// is set per element. This is done by applying `BITMASK_ITER_MASK` on the -/// mask bits. #[derive(Copy, Clone)] -pub(crate) struct BitMask(pub(crate) BitMaskWord); +pub struct BitMask(pub BitMaskWord); #[allow(clippy::use_self)] impl BitMask { /// Returns a new `BitMask` with all bits inverted. #[inline] #[must_use] - #[allow(dead_code)] - pub(crate) fn invert(self) -> Self { + pub fn invert(self) -> Self { BitMask(self.0 ^ BITMASK_MASK) } + /// Flip the bit in the mask for the entry at the given index. + /// + /// Returns the bit's previous state. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + #[cfg(feature = "raw")] + pub unsafe fn flip(&mut self, index: usize) -> bool { + // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit. + let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1); + self.0 ^= mask; + // The bit was set if the bit is now 0. + self.0 & mask == 0 + } + /// Returns a new `BitMask` with the lowest bit removed. #[inline] #[must_use] - fn remove_lowest_bit(self) -> Self { + pub fn remove_lowest_bit(self) -> Self { BitMask(self.0 & (self.0 - 1)) } - /// Returns whether the `BitMask` has at least one set bit. #[inline] - pub(crate) fn any_bit_set(self) -> bool { + pub fn any_bit_set(self) -> bool { self.0 != 0 } /// Returns the first set bit in the `BitMask`, if there is one. #[inline] - pub(crate) fn lowest_set_bit(self) -> Option { - if let Some(nonzero) = NonZeroBitMaskWord::new(self.0) { - Some(Self::nonzero_trailing_zeros(nonzero)) - } else { + pub fn lowest_set_bit(self) -> Option { + if self.0 == 0 { None + } else { + Some(unsafe { self.lowest_set_bit_nonzero() }) } } + /// Returns the first set bit in the `BitMask`, if there is one. The + /// bitmask must not be empty. + #[inline] + #[cfg(feature = "nightly")] + pub unsafe fn lowest_set_bit_nonzero(self) -> usize { + intrinsics::cttz_nonzero(self.0) as usize / BITMASK_STRIDE + } + #[inline] + #[cfg(not(feature = "nightly"))] + pub unsafe fn lowest_set_bit_nonzero(self) -> usize { + self.trailing_zeros() + } + /// Returns the number of trailing zeroes in the `BitMask`. #[inline] - pub(crate) fn trailing_zeros(self) -> usize { + pub fn trailing_zeros(self) -> usize { // ARM doesn't have a trailing_zeroes instruction, and instead uses // reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM // versions (pre-ARMv7) don't have RBIT and need to emulate it @@ -69,21 +89,9 @@ } } - /// Same as above but takes a `NonZeroBitMaskWord`. - #[inline] - fn nonzero_trailing_zeros(nonzero: NonZeroBitMaskWord) -> usize { - if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 { - // SAFETY: A byte-swapped non-zero value is still non-zero. - let swapped = unsafe { NonZeroBitMaskWord::new_unchecked(nonzero.get().swap_bytes()) }; - swapped.leading_zeros() as usize / BITMASK_STRIDE - } else { - nonzero.trailing_zeros() as usize / BITMASK_STRIDE - } - } - /// Returns the number of leading zeroes in the `BitMask`. #[inline] - pub(crate) fn leading_zeros(self) -> usize { + pub fn leading_zeros(self) -> usize { self.0.leading_zeros() as usize / BITMASK_STRIDE } } @@ -94,32 +102,13 @@ #[inline] fn into_iter(self) -> BitMaskIter { - // A BitMask only requires each element (group of bits) to be non-zero. - // However for iteration we need each element to only contain 1 bit. - BitMaskIter(BitMask(self.0 & BITMASK_ITER_MASK)) + BitMaskIter(self) } } /// Iterator over the contents of a `BitMask`, returning the indices of set /// bits. -#[derive(Copy, Clone)] -pub(crate) struct BitMaskIter(pub(crate) BitMask); - -impl BitMaskIter { - /// Flip the bit in the mask for the entry at the given index. - /// - /// Returns the bit's previous state. - #[inline] - #[allow(clippy::cast_ptr_alignment)] - #[cfg(feature = "raw")] - pub(crate) unsafe fn flip(&mut self, index: usize) -> bool { - // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit. - let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1); - self.0 .0 ^= mask; - // The bit was set if the bit is now 0. - self.0 .0 & mask == 0 - } -} +pub struct BitMaskIter(BitMask); impl Iterator for BitMaskIter { type Item = usize; diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/generic.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/generic.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/generic.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/generic.rs 2024-05-28 11:57:36.000000000 +0200 @@ -5,29 +5,26 @@ // Use the native word size as the group size. Using a 64-bit group size on // a 32-bit architecture will just end up being more expensive because // shifts and multiplies will need to be emulated. +#[cfg(any( + target_pointer_width = "64", + target_arch = "aarch64", + target_arch = "x86_64", + target_arch = "wasm32", +))] +type GroupWord = u64; +#[cfg(all( + target_pointer_width = "32", + not(target_arch = "aarch64"), + not(target_arch = "x86_64"), + not(target_arch = "wasm32"), +))] +type GroupWord = u32; -cfg_if! { - if #[cfg(any( - target_pointer_width = "64", - target_arch = "aarch64", - target_arch = "x86_64", - target_arch = "wasm32", - ))] { - type GroupWord = u64; - type NonZeroGroupWord = core::num::NonZeroU64; - } else { - type GroupWord = u32; - type NonZeroGroupWord = core::num::NonZeroU32; - } -} - -pub(crate) type BitMaskWord = GroupWord; -pub(crate) type NonZeroBitMaskWord = NonZeroGroupWord; -pub(crate) const BITMASK_STRIDE: usize = 8; +pub type BitMaskWord = GroupWord; +pub const BITMASK_STRIDE: usize = 8; // We only care about the highest bit of each byte for the mask. #[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)] -pub(crate) const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord; -pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; +pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord; /// Helper function to replicate a byte across a `GroupWord`. #[inline] @@ -40,7 +37,7 @@ /// /// This implementation uses a word-sized integer. #[derive(Copy, Clone)] -pub(crate) struct Group(GroupWord); +pub struct Group(GroupWord); // We perform all operations in the native endianness, and convert to // little-endian just before creating a BitMask. The can potentially @@ -49,14 +46,14 @@ #[allow(clippy::use_self)] impl Group { /// Number of bytes in the group. - pub(crate) const WIDTH: usize = mem::size_of::(); + pub const WIDTH: usize = mem::size_of::(); /// Returns a full group of empty bytes, suitable for use as the initial /// value for an empty hash table. /// /// This is guaranteed to be aligned to the group size. #[inline] - pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] { + pub const fn static_empty() -> &'static [u8; Group::WIDTH] { #[repr(C)] struct AlignedBytes { _align: [Group; 0], @@ -72,7 +69,7 @@ /// Loads a group of bytes starting at the given address. #[inline] #[allow(clippy::cast_ptr_alignment)] // unaligned load - pub(crate) unsafe fn load(ptr: *const u8) -> Self { + pub unsafe fn load(ptr: *const u8) -> Self { Group(ptr::read_unaligned(ptr.cast())) } @@ -80,7 +77,7 @@ /// aligned to `mem::align_of::()`. #[inline] #[allow(clippy::cast_ptr_alignment)] - pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self { + pub unsafe fn load_aligned(ptr: *const u8) -> Self { // FIXME: use align_offset once it stabilizes debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); Group(ptr::read(ptr.cast())) @@ -90,7 +87,7 @@ /// aligned to `mem::align_of::()`. #[inline] #[allow(clippy::cast_ptr_alignment)] - pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) { + pub unsafe fn store_aligned(self, ptr: *mut u8) { // FIXME: use align_offset once it stabilizes debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); ptr::write(ptr.cast(), self.0); @@ -107,7 +104,7 @@ /// - This only happens if there is at least 1 true match. /// - The chance of this happening is very low (< 1% chance per byte). #[inline] - pub(crate) fn match_byte(self, byte: u8) -> BitMask { + pub fn match_byte(self, byte: u8) -> BitMask { // This algorithm is derived from // https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord let cmp = self.0 ^ repeat(byte); @@ -117,7 +114,7 @@ /// Returns a `BitMask` indicating all bytes in the group which are /// `EMPTY`. #[inline] - pub(crate) fn match_empty(self) -> BitMask { + pub fn match_empty(self) -> BitMask { // If the high bit is set, then the byte must be either: // 1111_1111 (EMPTY) or 1000_0000 (DELETED). // So we can just check if the top two bits are 1 by ANDing them. @@ -127,14 +124,14 @@ /// Returns a `BitMask` indicating all bytes in the group which are /// `EMPTY` or `DELETED`. #[inline] - pub(crate) fn match_empty_or_deleted(self) -> BitMask { + pub fn match_empty_or_deleted(self) -> BitMask { // A byte is EMPTY or DELETED iff the high bit is set BitMask((self.0 & repeat(0x80)).to_le()) } /// Returns a `BitMask` indicating all bytes in the group which are full. #[inline] - pub(crate) fn match_full(self) -> BitMask { + pub fn match_full(self) -> BitMask { self.match_empty_or_deleted().invert() } @@ -143,7 +140,7 @@ /// - `DELETED => EMPTY` /// - `FULL => DELETED` #[inline] - pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self { // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 // and high_bit = 0 (FULL) to 1000_0000 // diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/mod.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/mod.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/mod.rs 2024-05-28 11:57:36.000000000 +0200 @@ -4,6 +4,7 @@ use core::iter::FusedIterator; use core::marker::PhantomData; use core::mem; +use core::mem::ManuallyDrop; use core::mem::MaybeUninit; use core::ptr::NonNull; use core::{hint, ptr}; @@ -20,21 +21,12 @@ if #[cfg(all( target_feature = "sse2", any(target_arch = "x86", target_arch = "x86_64"), - not(miri), + not(miri) ))] { mod sse2; use sse2 as imp; - } else if #[cfg(all( - target_arch = "aarch64", - target_feature = "neon", - // NEON intrinsics are currently broken on big-endian targets. - // See https://github.com/rust-lang/stdarch/issues/1484. - target_endian = "little", - not(miri), - ))] { - mod neon; - use neon as imp; } else { + #[path = "generic.rs"] mod generic; use generic as imp; } @@ -45,24 +37,36 @@ mod bitmask; -use self::bitmask::BitMaskIter; +use self::bitmask::{BitMask, BitMaskIter}; use self::imp::Group; // Branch prediction hint. This is currently only available on nightly but it // consistently improves performance by 10-15%. -#[cfg(not(feature = "nightly"))] -use core::convert::identity as likely; -#[cfg(not(feature = "nightly"))] -use core::convert::identity as unlikely; #[cfg(feature = "nightly")] use core::intrinsics::{likely, unlikely}; -// FIXME: use strict provenance functions once they are stable. -// Implement it with a transmute for now. -#[inline(always)] -#[allow(clippy::useless_transmute)] // clippy is wrong, cast and transmute are different here -fn invalid_mut(addr: usize) -> *mut T { - unsafe { core::mem::transmute(addr) } +// On stable we can use #[cold] to get a equivalent effect: this attributes +// suggests that the function is unlikely to be called +#[cfg(not(feature = "nightly"))] +#[inline] +#[cold] +fn cold() {} + +#[cfg(not(feature = "nightly"))] +#[inline] +fn likely(b: bool) -> bool { + if !b { + cold(); + } + b +} +#[cfg(not(feature = "nightly"))] +#[inline] +fn unlikely(b: bool) -> bool { + if b { + cold(); + } + b } #[inline] @@ -97,13 +101,6 @@ } } -trait SizedTypeProperties: Sized { - const IS_ZERO_SIZED: bool = mem::size_of::() == 0; - const NEEDS_DROP: bool = mem::needs_drop::(); -} - -impl SizedTypeProperties for T {} - /// Control byte value for an empty bucket. const EMPTY: u8 = 0b1111_1111; @@ -137,13 +134,6 @@ hash as usize } -// Constant for h2 function that grabing the top 7 bits of the hash. -const MIN_HASH_LEN: usize = if mem::size_of::() < mem::size_of::() { - mem::size_of::() -} else { - mem::size_of::() -}; - /// Secondary hash function, saved in the low 7 bits of the control byte. #[inline] #[allow(clippy::cast_possible_truncation)] @@ -151,8 +141,8 @@ // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit // value, some hash functions (such as FxHash) produce a usize result // instead, which means that the top 32 bits are 0 on 32-bit platforms. - // So we use MIN_HASH_LEN constant to handle this. - let top7 = hash >> (MIN_HASH_LEN * 8 - 7); + let hash_len = usize::min(mem::size_of::(), mem::size_of::()); + let top7 = hash >> (hash_len * 8 - 7); (top7 & 0x7f) as u8 // truncation } @@ -240,15 +230,11 @@ impl TableLayout { #[inline] - const fn new() -> Self { + fn new() -> Self { let layout = Layout::new::(); Self { size: layout.size(), - ctrl_align: if layout.align() > Group::WIDTH { - layout.align() - } else { - Group::WIDTH - }, + ctrl_align: usize::max(layout.align(), Group::WIDTH), } } @@ -262,12 +248,6 @@ size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1); let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?; - // We need an additional check to ensure that the allocation doesn't - // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295). - if len > isize::MAX as usize - (ctrl_align - 1) { - return None; - } - Some(( unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }, ctrl_offset, @@ -275,9 +255,14 @@ } } -/// A reference to an empty bucket into which an can be inserted. -pub struct InsertSlot { - index: usize, +/// Returns a Layout which describes the allocation required for a hash table, +/// and the offset of the control bytes in the allocation. +/// (the offset is also one past last element of buckets) +/// +/// Returns `None` if an overflow occurs. +#[cfg_attr(feature = "inline-more", inline)] +fn calculate_layout(buckets: usize) -> Option<(Layout, usize)> { + TableLayout::new::().calculate_layout_for(buckets) } /// A reference to a hash table bucket containing a `T`. @@ -305,79 +290,11 @@ } impl Bucket { - /// Creates a [`Bucket`] that contain pointer to the data. - /// The pointer calculation is performed by calculating the - /// offset from given `base` pointer (convenience for - /// `base.as_ptr().sub(index)`). - /// - /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer - /// offset of `3 * size_of::()` bytes. - /// - /// If the `T` is a ZST, then we instead track the index of the element - /// in the table so that `erase` works properly (return - /// `NonNull::new_unchecked((index + 1) as *mut T)`) - /// - /// # Safety - /// - /// If `mem::size_of::() != 0`, then the safety rules are directly derived - /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety - /// rules of [`NonNull::new_unchecked`] function. - /// - /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method - /// and [`NonNull::new_unchecked`] function, as well as for the correct - /// logic of the work of this crate, the following rules are necessary and - /// sufficient: - /// - /// * the `base` pointer must not be `dangling` and must points to the - /// end of the first `value element` from the `data part` of the table, i.e. - /// must be the pointer that returned by [`RawTable::data_end`] or by - /// [`RawTableInner::data_end`]; - /// - /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e. - /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` - /// must be no greater than the number returned by the function - /// [`RawTable::buckets`] or [`RawTableInner::buckets`]. - /// - /// If `mem::size_of::() == 0`, then the only requirement is that the - /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e. - /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` - /// must be no greater than the number returned by the function - /// [`RawTable::buckets`] or [`RawTableInner::buckets`]. - /// - /// [`Bucket`]: crate::raw::Bucket - /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1 - /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked - /// [`RawTable::data_end`]: crate::raw::RawTable::data_end - /// [`RawTableInner::data_end`]: RawTableInner::data_end - /// [`RawTable::buckets`]: crate::raw::RawTable::buckets - /// [`RawTableInner::buckets`]: RawTableInner::buckets #[inline] unsafe fn from_base_index(base: NonNull, index: usize) -> Self { - // If mem::size_of::() != 0 then return a pointer to an `element` in - // the data part of the table (we start counting from "0", so that - // in the expression T[last], the "last" index actually one less than the - // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"): - // - // `from_base_index(base, 1).as_ptr()` returns a pointer that - // points here in the data part of the table - // (to the start of T1) - // | - // | `base: NonNull` must point here - // | (to the end of T0 or to the start of C0) - // v v - // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast - // ^ - // `from_base_index(base, 1)` returns a pointer - // that points here in the data part of the table - // (to the end of T1) - // - // where: T0...Tlast - our stored data; C0...Clast - control bytes - // or metadata for data. - let ptr = if T::IS_ZERO_SIZED { - // won't overflow because index must be less than length (bucket_mask) - // and bucket_mask is guaranteed to be less than `isize::MAX` - // (see TableLayout::calculate_layout_for method) - invalid_mut(index + 1) + let ptr = if mem::size_of::() == 0 { + // won't overflow because index must be less than length + (index + 1) as *mut T } else { base.as_ptr().sub(index) }; @@ -385,183 +302,27 @@ ptr: NonNull::new_unchecked(ptr), } } - - /// Calculates the index of a [`Bucket`] as distance between two pointers - /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`). - /// The returned value is in units of T: the distance in bytes divided by - /// [`core::mem::size_of::()`]. - /// - /// If the `T` is a ZST, then we return the index of the element in - /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`). - /// - /// This function is the inverse of [`from_base_index`]. - /// - /// # Safety - /// - /// If `mem::size_of::() != 0`, then the safety rules are directly derived - /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`. - /// - /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`] - /// method, as well as for the correct logic of the work of this crate, the - /// following rules are necessary and sufficient: - /// - /// * `base` contained pointer must not be `dangling` and must point to the - /// end of the first `element` from the `data part` of the table, i.e. - /// must be a pointer that returns by [`RawTable::data_end`] or by - /// [`RawTableInner::data_end`]; - /// - /// * `self` also must not contain dangling pointer; - /// - /// * both `self` and `base` must be created from the same [`RawTable`] - /// (or [`RawTableInner`]). - /// - /// If `mem::size_of::() == 0`, this function is always safe. - /// - /// [`Bucket`]: crate::raw::Bucket - /// [`from_base_index`]: crate::raw::Bucket::from_base_index - /// [`RawTable::data_end`]: crate::raw::RawTable::data_end - /// [`RawTableInner::data_end`]: RawTableInner::data_end - /// [`RawTable`]: crate::raw::RawTable - /// [`RawTableInner`]: RawTableInner - /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from #[inline] unsafe fn to_base_index(&self, base: NonNull) -> usize { - // If mem::size_of::() != 0 then return an index under which we used to store the - // `element` in the data part of the table (we start counting from "0", so - // that in the expression T[last], the "last" index actually is one less than the - // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"). - // For example for 5th element in table calculation is performed like this: - // - // mem::size_of::() - // | - // | `self = from_base_index(base, 5)` that returns pointer - // | that points here in tha data part of the table - // | (to the end of T5) - // | | `base: NonNull` must point here - // v | (to the end of T0 or to the start of C0) - // /???\ v v - // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast - // \__________ __________/ - // \/ - // `bucket.to_base_index(base)` = 5 - // (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::() - // - // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data. - if T::IS_ZERO_SIZED { - // this can not be UB + if mem::size_of::() == 0 { self.ptr.as_ptr() as usize - 1 } else { offset_from(base.as_ptr(), self.ptr.as_ptr()) } } - - /// Acquires the underlying raw pointer `*mut T` to `data`. - /// - /// # Note - /// - /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the - /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because - /// for properly dropping the data we also need to clear `data` control bytes. If we - /// drop data, but do not clear `data control byte` it leads to double drop when - /// [`RawTable`] goes out of scope. - /// - /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new - /// `T` value and its borrowed form *must* match those for the old `T` value, as the map - /// will not re-evaluate where the new value should go, meaning the value may become - /// "lost" if their location does not reflect their state. - /// - /// [`RawTable`]: crate::raw::RawTable - /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place - /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html - /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "raw")] - /// # fn test() { - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::raw::{Bucket, RawTable}; - /// - /// type NewHashBuilder = core::hash::BuildHasherDefault; - /// - /// fn make_hash(hash_builder: &S, key: &K) -> u64 { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// - /// let hash_builder = NewHashBuilder::default(); - /// let mut table = RawTable::new(); - /// - /// let value = ("a", 100); - /// let hash = make_hash(&hash_builder, &value.0); - /// - /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0)); - /// - /// let bucket: Bucket<(&str, i32)> = table.find(hash, |(k1, _)| k1 == &value.0).unwrap(); - /// - /// assert_eq!(unsafe { &*bucket.as_ptr() }, &("a", 100)); - /// # } - /// # fn main() { - /// # #[cfg(feature = "raw")] - /// # test() - /// # } - /// ``` #[inline] pub fn as_ptr(&self) -> *mut T { - if T::IS_ZERO_SIZED { + if mem::size_of::() == 0 { // Just return an arbitrary ZST pointer which is properly aligned - // invalid pointer is good enough for ZST - invalid_mut(mem::align_of::()) + mem::align_of::() as *mut T } else { unsafe { self.ptr.as_ptr().sub(1) } } } - - /// Create a new [`Bucket`] that is offset from the `self` by the given - /// `offset`. The pointer calculation is performed by calculating the - /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`). - /// This function is used for iterators. - /// - /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer - /// offset of `3 * size_of::()` bytes. - /// - /// # Safety - /// - /// If `mem::size_of::() != 0`, then the safety rules are directly derived - /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety - /// rules of [`NonNull::new_unchecked`] function. - /// - /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method - /// and [`NonNull::new_unchecked`] function, as well as for the correct - /// logic of the work of this crate, the following rules are necessary and - /// sufficient: - /// - /// * `self` contained pointer must not be `dangling`; - /// - /// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`, - /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other - /// words, `self.to_base_index() + ofset + 1` must be no greater than the number returned - /// by the function [`RawTable::buckets`] or [`RawTableInner::buckets`]. - /// - /// If `mem::size_of::() == 0`, then the only requirement is that the - /// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`, - /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words, - /// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the - /// function [`RawTable::buckets`] or [`RawTableInner::buckets`]. - /// - /// [`Bucket`]: crate::raw::Bucket - /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1 - /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked - /// [`RawTable::buckets`]: crate::raw::RawTable::buckets - /// [`RawTableInner::buckets`]: RawTableInner::buckets #[inline] unsafe fn next_n(&self, offset: usize) -> Self { - let ptr = if T::IS_ZERO_SIZED { - // invalid pointer is good enough for ZST - invalid_mut(self.ptr.as_ptr() as usize + offset) + let ptr = if mem::size_of::() == 0 { + (self.ptr.as_ptr() as usize + offset) as *mut T } else { self.ptr.as_ptr().sub(offset) }; @@ -569,212 +330,26 @@ ptr: NonNull::new_unchecked(ptr), } } - - /// Executes the destructor (if any) of the pointed-to `data`. - /// - /// # Safety - /// - /// See [`ptr::drop_in_place`] for safety concerns. - /// - /// You should use [`RawTable::erase`] instead of this function, - /// or be careful with calling this function directly, because for - /// properly dropping the data we need also clear `data` control bytes. - /// If we drop data, but do not erase `data control byte` it leads to - /// double drop when [`RawTable`] goes out of scope. - /// - /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html - /// [`RawTable`]: crate::raw::RawTable - /// [`RawTable::erase`]: crate::raw::RawTable::erase #[cfg_attr(feature = "inline-more", inline)] - pub(crate) unsafe fn drop(&self) { + pub unsafe fn drop(&self) { self.as_ptr().drop_in_place(); } - - /// Reads the `value` from `self` without moving it. This leaves the - /// memory in `self` unchanged. - /// - /// # Safety - /// - /// See [`ptr::read`] for safety concerns. - /// - /// You should use [`RawTable::remove`] instead of this function, - /// or be careful with calling this function directly, because compiler - /// calls its destructor when readed `value` goes out of scope. It - /// can cause double dropping when [`RawTable`] goes out of scope, - /// because of not erased `data control byte`. - /// - /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html - /// [`RawTable`]: crate::raw::RawTable - /// [`RawTable::remove`]: crate::raw::RawTable::remove #[inline] - pub(crate) unsafe fn read(&self) -> T { + pub unsafe fn read(&self) -> T { self.as_ptr().read() } - - /// Overwrites a memory location with the given `value` without reading - /// or dropping the old value (like [`ptr::write`] function). - /// - /// # Safety - /// - /// See [`ptr::write`] for safety concerns. - /// - /// # Note - /// - /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match - /// those for the old `T` value, as the map will not re-evaluate where the new - /// value should go, meaning the value may become "lost" if their location - /// does not reflect their state. - /// - /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html - /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html - /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html #[inline] - pub(crate) unsafe fn write(&self, val: T) { + pub unsafe fn write(&self, val: T) { self.as_ptr().write(val); } - - /// Returns a shared immutable reference to the `value`. - /// - /// # Safety - /// - /// See [`NonNull::as_ref`] for safety concerns. - /// - /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "raw")] - /// # fn test() { - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::raw::{Bucket, RawTable}; - /// - /// type NewHashBuilder = core::hash::BuildHasherDefault; - /// - /// fn make_hash(hash_builder: &S, key: &K) -> u64 { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// - /// let hash_builder = NewHashBuilder::default(); - /// let mut table = RawTable::new(); - /// - /// let value: (&str, String) = ("A pony", "is a small horse".to_owned()); - /// let hash = make_hash(&hash_builder, &value.0); - /// - /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0)); - /// - /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap(); - /// - /// assert_eq!( - /// unsafe { bucket.as_ref() }, - /// &("A pony", "is a small horse".to_owned()) - /// ); - /// # } - /// # fn main() { - /// # #[cfg(feature = "raw")] - /// # test() - /// # } - /// ``` #[inline] pub unsafe fn as_ref<'a>(&self) -> &'a T { &*self.as_ptr() } - - /// Returns a unique mutable reference to the `value`. - /// - /// # Safety - /// - /// See [`NonNull::as_mut`] for safety concerns. - /// - /// # Note - /// - /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match - /// those for the old `T` value, as the map will not re-evaluate where the new - /// value should go, meaning the value may become "lost" if their location - /// does not reflect their state. - /// - /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut - /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html - /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "raw")] - /// # fn test() { - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::raw::{Bucket, RawTable}; - /// - /// type NewHashBuilder = core::hash::BuildHasherDefault; - /// - /// fn make_hash(hash_builder: &S, key: &K) -> u64 { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// - /// let hash_builder = NewHashBuilder::default(); - /// let mut table = RawTable::new(); - /// - /// let value: (&str, String) = ("A pony", "is a small horse".to_owned()); - /// let hash = make_hash(&hash_builder, &value.0); - /// - /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0)); - /// - /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap(); - /// - /// unsafe { - /// bucket - /// .as_mut() - /// .1 - /// .push_str(" less than 147 cm at the withers") - /// }; - /// assert_eq!( - /// unsafe { bucket.as_ref() }, - /// &( - /// "A pony", - /// "is a small horse less than 147 cm at the withers".to_owned() - /// ) - /// ); - /// # } - /// # fn main() { - /// # #[cfg(feature = "raw")] - /// # test() - /// # } - /// ``` #[inline] pub unsafe fn as_mut<'a>(&self) -> &'a mut T { &mut *self.as_ptr() } - - /// Copies `size_of` bytes from `other` to `self`. The source - /// and destination may *not* overlap. - /// - /// # Safety - /// - /// See [`ptr::copy_nonoverlapping`] for safety concerns. - /// - /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of - /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values - /// in the region beginning at `*self` and the region beginning at `*other` can - /// [violate memory safety]. - /// - /// # Note - /// - /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match - /// those for the old `T` value, as the map will not re-evaluate where the new - /// value should go, meaning the value may become "lost" if their location - /// does not reflect their state. - /// - /// [`ptr::copy_nonoverlapping`]: https://doc.rust-lang.org/core/ptr/fn.copy_nonoverlapping.html - /// [`read`]: https://doc.rust-lang.org/core/ptr/fn.read.html - /// [violate memory safety]: https://doc.rust-lang.org/std/ptr/fn.read.html#ownership-of-the-returned-value - /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html - /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html #[cfg(feature = "raw")] #[inline] pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) { @@ -783,16 +358,15 @@ } /// A raw hash table with an unsafe API. -pub struct RawTable { - table: RawTableInner, - alloc: A, +pub struct RawTable { + table: RawTableInner, // Tell dropck that we own instances of T. marker: PhantomData, } /// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless /// of how many different key-value types are used. -struct RawTableInner { +struct RawTableInner { // Mask to get an index from a hash value. The value is one less than the // number of buckets in the table. bucket_mask: usize, @@ -806,6 +380,8 @@ // Number of elements in the table, only really used by len() items: usize, + + alloc: A, } impl RawTable { @@ -817,8 +393,7 @@ #[inline] pub const fn new() -> Self { Self { - table: RawTableInner::NEW, - alloc: Global, + table: RawTableInner::new_in(Global), marker: PhantomData, } } @@ -837,9 +412,7 @@ } } -impl RawTable { - const TABLE_LAYOUT: TableLayout = TableLayout::new::(); - +impl RawTable { /// Creates a new empty hash table without allocating any memory, using the /// given allocator. /// @@ -847,10 +420,9 @@ /// leave the data pointer dangling since that bucket is never written to /// due to our load factor forcing us to always have at least 1 free bucket. #[inline] - pub const fn new_in(alloc: A) -> Self { + pub fn new_in(alloc: A) -> Self { Self { - table: RawTableInner::NEW, - alloc, + table: RawTableInner::new_in(alloc), marker: PhantomData, } } @@ -868,99 +440,73 @@ Ok(Self { table: RawTableInner::new_uninitialized( - &alloc, - Self::TABLE_LAYOUT, + alloc, + TableLayout::new::(), buckets, fallibility, )?, - alloc, marker: PhantomData, }) } - /// Attempts to allocate a new hash table using the given allocator, with at least enough - /// capacity for inserting the given number of elements without reallocating. - #[cfg(feature = "raw")] - pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { + /// Attempts to allocate a new hash table with at least enough capacity + /// for inserting the given number of elements without reallocating. + fn fallible_with_capacity( + alloc: A, + capacity: usize, + fallibility: Fallibility, + ) -> Result { Ok(Self { table: RawTableInner::fallible_with_capacity( - &alloc, - Self::TABLE_LAYOUT, + alloc, + TableLayout::new::(), capacity, - Fallibility::Fallible, + fallibility, )?, - alloc, marker: PhantomData, }) } + /// Attempts to allocate a new hash table using the given allocator, with at least enough + /// capacity for inserting the given number of elements without reallocating. + #[cfg(feature = "raw")] + pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { + Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible) + } + /// Allocates a new hash table using the given allocator, with at least enough capacity for /// inserting the given number of elements without reallocating. pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Self { - table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity), - alloc, - marker: PhantomData, + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) { + Ok(capacity) => capacity, + Err(_) => unsafe { hint::unreachable_unchecked() }, } } /// Returns a reference to the underlying allocator. #[inline] pub fn allocator(&self) -> &A { - &self.alloc + &self.table.alloc } - /// Returns pointer to one past last `data` element in the the table as viewed from - /// the start point of the allocation. - /// - /// The caller must ensure that the `RawTable` outlives the returned [`NonNull`], - /// otherwise using it may result in [`undefined behavior`]. - /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[inline] - pub fn data_end(&self) -> NonNull { - // SAFETY: `self.table.ctrl` is `NonNull`, so casting it is safe - // - // `self.table.ctrl.as_ptr().cast()` returns pointer that - // points here (to the end of `T0`) - // ∨ - // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m - // \________ ________/ - // \/ - // `n = buckets - 1`, i.e. `RawTable::buckets() - 1` - // - // where: T0...T_n - our stored data; - // CT0...CT_n - control bytes or metadata for `data`. - // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search - // with loading `Group` bytes from the heap works properly, even if the result - // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also - // `RawTableInner::set_ctrl` function. - // - // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number - // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. - unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().cast()) } + /// Deallocates the table without dropping any entries. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn free_buckets(&mut self) { + self.table.free_buckets(TableLayout::new::()); } - /// Returns pointer to start of data table. + /// Returns pointer to one past last element of data table. #[inline] - #[cfg(any(feature = "raw", feature = "nightly"))] - pub unsafe fn data_start(&self) -> NonNull { - NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets())) + pub unsafe fn data_end(&self) -> NonNull { + NonNull::new_unchecked(self.table.ctrl.as_ptr().cast()) } - /// Return the information about memory allocated by the table. - /// - /// `RawTable` allocates single memory block to store both data and metadata. - /// This function returns allocation size and alignment and the beginning of the area. - /// These are the arguments which will be passed to `dealloc` when the table is dropped. - /// - /// This function might be useful for memory profiling. + /// Returns pointer to start of data table. #[inline] - #[cfg(feature = "raw")] - pub fn allocation_info(&self) -> (NonNull, Layout) { - // SAFETY: We use the same `table_layout` that was used to allocate - // this table. - unsafe { self.table.allocation_info_or_zero(Self::TABLE_LAYOUT) } + #[cfg(feature = "nightly")] + pub unsafe fn data_start(&self) -> *mut T { + self.data_end().as_ptr().wrapping_sub(self.buckets()) } /// Returns the index of a bucket from a `Bucket`. @@ -970,55 +516,8 @@ } /// Returns a pointer to an element in the table. - /// - /// The caller must ensure that the `RawTable` outlives the returned [`Bucket`], - /// otherwise using it may result in [`undefined behavior`]. - /// - /// # Safety - /// - /// If `mem::size_of::() != 0`, then the caller of this function must observe the - /// following safety rules: - /// - /// * The table must already be allocated; - /// - /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`] - /// function, i.e. `(index + 1) <= self.buckets()`. - /// - /// It is safe to call this function with index of zero (`index == 0`) on a table that has - /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`]. - /// - /// If `mem::size_of::() == 0`, then the only requirement is that the `index` must - /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e. - /// `(index + 1) <= self.buckets()`. - /// - /// [`RawTable::buckets`]: RawTable::buckets - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] pub unsafe fn bucket(&self, index: usize) -> Bucket { - // If mem::size_of::() != 0 then return a pointer to the `element` in the `data part` of the table - // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than - // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"): - // - // `table.bucket(3).as_ptr()` returns a pointer that points here in the `data` - // part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`) - // | - // | `base = self.data_end()` points here - // | (to the start of CT0 or to the end of T0) - // v v - // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m - // ^ \__________ __________/ - // `table.bucket(3)` returns a pointer that points \/ - // here in the `data` part of the `RawTable` (to additional control bytes - // the end of T3) `m = Group::WIDTH - 1` - // - // where: T0...T_n - our stored data; - // CT0...CT_n - control bytes or metadata for `data`; - // CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from - // the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask` - // is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function. - // - // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number - // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`. debug_assert_ne!(self.table.bucket_mask, 0); debug_assert!(index < self.buckets()); Bucket::from_base_index(self.data_end(), index) @@ -1026,7 +525,8 @@ /// Erases an element from the table without dropping it. #[cfg_attr(feature = "inline-more", inline)] - unsafe fn erase_no_drop(&mut self, item: &Bucket) { + #[deprecated(since = "0.8.1", note = "use erase or remove instead")] + pub unsafe fn erase_no_drop(&mut self, item: &Bucket) { let index = self.bucket_index(item); self.table.erase(index); } @@ -1034,6 +534,7 @@ /// Erases an element from the table, dropping it in place. #[cfg_attr(feature = "inline-more", inline)] #[allow(clippy::needless_pass_by_value)] + #[allow(deprecated)] pub unsafe fn erase(&mut self, item: Bucket) { // Erase the element from the table first since drop might panic. self.erase_no_drop(&item); @@ -1057,18 +558,12 @@ } /// Removes an element from the table, returning it. - /// - /// This also returns an `InsertSlot` pointing to the newly free bucket. #[cfg_attr(feature = "inline-more", inline)] #[allow(clippy::needless_pass_by_value)] - pub unsafe fn remove(&mut self, item: Bucket) -> (T, InsertSlot) { + #[allow(deprecated)] + pub unsafe fn remove(&mut self, item: Bucket) -> T { self.erase_no_drop(&item); - ( - item.read(), - InsertSlot { - index: self.bucket_index(&item), - }, - ) + item.read() } /// Finds and removes an element from the table, returning it. @@ -1076,7 +571,7 @@ pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { // Avoid `Option::map` because it bloats LLVM IR. match self.find(hash, eq) { - Some(bucket) => Some(unsafe { self.remove(bucket).0 }), + Some(bucket) => Some(unsafe { self.remove(bucket) }), None => None, } } @@ -1090,17 +585,18 @@ /// Removes all elements from the table without freeing the backing memory. #[cfg_attr(feature = "inline-more", inline)] pub fn clear(&mut self) { - if self.is_empty() { - // Special case empty table to avoid surprising O(capacity) time. - return; - } // Ensure that the table is reset even if one of the drops panic let mut self_ = guard(self, |self_| self_.clear_no_drop()); unsafe { - // SAFETY: ScopeGuard sets to zero the `items` field of the table - // even in case of panic during the dropping of the elements so - // that there will be no double drop of the elements. - self_.table.drop_elements::(); + self_.drop_elements(); + } + } + + unsafe fn drop_elements(&mut self) { + if mem::needs_drop::() && !self.is_empty() { + for item in self.iter() { + item.drop(); + } } } @@ -1111,16 +607,7 @@ // space for. let min_size = usize::max(self.table.items, min_size); if min_size == 0 { - let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW); - unsafe { - // SAFETY: - // 1. We call the function only once; - // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] - // and [`TableLayout`] that were used to allocate this table. - // 3. If any elements' drop function panics, then there will only be a memory leak, - // because we have replaced the inner table with a new one. - old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); - } + *self = Self::new_in(self.table.alloc.clone()); return; } @@ -1137,33 +624,14 @@ if min_buckets < self.buckets() { // Fast path if the table is empty if self.table.items == 0 { - let new_inner = - RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size); - let mut old_inner = mem::replace(&mut self.table, new_inner); - unsafe { - // SAFETY: - // 1. We call the function only once; - // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] - // and [`TableLayout`] that were used to allocate this table. - // 3. If any elements' drop function panics, then there will only be a memory leak, - // because we have replaced the inner table with a new one. - old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); - } + *self = Self::with_capacity_in(min_size, self.table.alloc.clone()); } else { // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - unsafe { - // SAFETY: - // 1. We know for sure that `min_size >= self.table.items`. - // 2. The [`RawTableInner`] must already have properly initialized control bytes since - // we will never expose RawTable::new_uninitialized in a public API. - if self - .resize(min_size, hasher, Fallibility::Infallible) - .is_err() - { - // SAFETY: The result of calling the `resize` function cannot be an error - // because `fallibility == Fallibility::Infallible. - hint::unreachable_unchecked() - } + if self + .resize(min_size, hasher, Fallibility::Infallible) + .is_err() + { + unsafe { hint::unreachable_unchecked() } } } } @@ -1173,18 +641,13 @@ /// without reallocation. #[cfg_attr(feature = "inline-more", inline)] pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { - if unlikely(additional > self.table.growth_left) { + if additional > self.table.growth_left { // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - unsafe { - // SAFETY: The [`RawTableInner`] must already have properly initialized control - // bytes since we will never expose RawTable::new_uninitialized in a public API. - if self - .reserve_rehash(additional, hasher, Fallibility::Infallible) - .is_err() - { - // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`. - hint::unreachable_unchecked() - } + if self + .reserve_rehash(additional, hasher, Fallibility::Infallible) + .is_err() + { + unsafe { hint::unreachable_unchecked() } } } } @@ -1198,45 +661,28 @@ hasher: impl Fn(&T) -> u64, ) -> Result<(), TryReserveError> { if additional > self.table.growth_left { - // SAFETY: The [`RawTableInner`] must already have properly initialized control - // bytes since we will never expose RawTable::new_uninitialized in a public API. - unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) } + self.reserve_rehash(additional, hasher, Fallibility::Fallible) } else { Ok(()) } } /// Out-of-line slow path for `reserve` and `try_reserve`. - /// - /// # Safety - /// - /// The [`RawTableInner`] must have properly initialized control bytes, - /// otherwise calling this function results in [`undefined behavior`] - /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[cold] #[inline(never)] - unsafe fn reserve_rehash( + fn reserve_rehash( &mut self, additional: usize, hasher: impl Fn(&T) -> u64, fallibility: Fallibility, ) -> Result<(), TryReserveError> { unsafe { - // SAFETY: - // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and - // [`TableLayout`] that were used to allocate this table. - // 2. The `drop` function is the actual drop function of the elements stored in - // the table. - // 3. The caller ensures that the control bytes of the `RawTableInner` - // are already initialized. self.table.reserve_rehash_inner( - &self.alloc, additional, &|table, index| hasher(table.bucket::(index).as_ref()), fallibility, - Self::TABLE_LAYOUT, - if T::NEEDS_DROP { + TableLayout::new::(), + if mem::needs_drop::() { Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) } else { None @@ -1247,50 +693,20 @@ /// Allocates a new table of a different size and moves the contents of the /// current table into it. - /// - /// # Safety - /// - /// The [`RawTableInner`] must have properly initialized control bytes, - /// otherwise calling this function results in [`undefined behavior`] - /// - /// The caller of this function must ensure that `capacity >= self.table.items` - /// otherwise: - /// - /// * If `self.table.items != 0`, calling of this function with `capacity` - /// equal to 0 (`capacity == 0`) results in [`undefined behavior`]. - /// - /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and - /// `self.table.items > capacity_to_buckets(capacity)` - /// calling this function results in [`undefined behavior`]. - /// - /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and - /// `self.table.items > capacity_to_buckets(capacity)` - /// calling this function are never return (will go into an - /// infinite loop). - /// - /// See [`RawTableInner::find_insert_slot`] for more information. - /// - /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - unsafe fn resize( + fn resize( &mut self, capacity: usize, hasher: impl Fn(&T) -> u64, fallibility: Fallibility, ) -> Result<(), TryReserveError> { - // SAFETY: - // 1. The caller of this function guarantees that `capacity >= self.table.items`. - // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and - // [`TableLayout`] that were used to allocate this table. - // 3. The caller ensures that the control bytes of the `RawTableInner` - // are already initialized. - self.table.resize_inner( - &self.alloc, - capacity, - &|table, index| hasher(table.bucket::(index).as_ref()), - fallibility, - Self::TABLE_LAYOUT, - ) + unsafe { + self.table.resize_inner( + capacity, + &|table, index| hasher(table.bucket::(index).as_ref()), + fallibility, + TableLayout::new::(), + ) + } } /// Inserts a new element into the table, and returns its raw bucket. @@ -1299,27 +715,22 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { unsafe { - // SAFETY: - // 1. The [`RawTableInner`] must already have properly initialized control bytes since - // we will never expose `RawTable::new_uninitialized` in a public API. - // - // 2. We reserve additional space (if necessary) right after calling this function. - let mut slot = self.table.find_insert_slot(hash); + let mut index = self.table.find_insert_slot(hash); - // We can avoid growing the table once we have reached our load factor if we are replacing - // a tombstone. This works since the number of EMPTY slots does not change in this case. - // - // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index - // in the range `0..=self.buckets()`. - let old_ctrl = *self.table.ctrl(slot.index); + // We can avoid growing the table once we have reached our load + // factor if we are replacing a tombstone. This works since the + // number of EMPTY slots does not change in this case. + let old_ctrl = *self.table.ctrl(index); if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) { self.reserve(1, hasher); - // SAFETY: We know for sure that `RawTableInner` has control bytes - // initialized and that there is extra space in the table. - slot = self.table.find_insert_slot(hash); + index = self.table.find_insert_slot(hash); } - self.insert_in_slot(hash, slot, value) + self.table.record_item_insert_at(index, old_ctrl, hash); + + let bucket = self.bucket(index); + bucket.write(value); + bucket } } @@ -1385,9 +796,9 @@ { let index = self.bucket_index(&bucket); let old_ctrl = *self.table.ctrl(index); - debug_assert!(self.is_bucket_full(index)); + debug_assert!(is_full(old_ctrl)); let old_growth_left = self.table.growth_left; - let item = self.remove(bucket).0; + let item = self.remove(bucket); if let Some(new_item) = f(item) { self.table.growth_left = old_growth_left; self.table.set_ctrl(index, old_ctrl); @@ -1399,78 +810,17 @@ } } - /// Searches for an element in the table. If the element is not found, - /// returns `Err` with the position of a slot where an element with the - /// same hash could be inserted. - /// - /// This function may resize the table if additional space is required for - /// inserting an element. - #[inline] - pub fn find_or_find_insert_slot( - &mut self, - hash: u64, - mut eq: impl FnMut(&T) -> bool, - hasher: impl Fn(&T) -> u64, - ) -> Result, InsertSlot> { - self.reserve(1, hasher); - - unsafe { - // SAFETY: - // 1. We know for sure that there is at least one empty `bucket` in the table. - // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will - // never expose `RawTable::new_uninitialized` in a public API. - // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket, - // which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in - // the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe. - match self - .table - .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref())) - { - // SAFETY: See explanation above. - Ok(index) => Ok(self.bucket(index)), - Err(slot) => Err(slot), - } - } - } - - /// Inserts a new element into the table in the given slot, and returns its - /// raw bucket. - /// - /// # Safety - /// - /// `slot` must point to a slot previously returned by - /// `find_or_find_insert_slot`, and no mutation of the table must have - /// occurred since that call. - #[inline] - pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket { - let old_ctrl = *self.table.ctrl(slot.index); - self.table.record_item_insert_at(slot.index, old_ctrl, hash); - - let bucket = self.bucket(slot.index); - bucket.write(value); - bucket - } - /// Searches for an element in the table. #[inline] pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { - unsafe { - // SAFETY: - // 1. The [`RawTableInner`] must already have properly initialized control bytes since we - // will never expose `RawTable::new_uninitialized` in a public API. - // 1. The `find_inner` function returns the `index` of only the full bucket, which is in - // the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref` - // is safe. - let result = self - .table - .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref())); - - // Avoid `Option::map` because it bloats LLVM IR. - match result { - // SAFETY: See explanation above. - Some(index) => Some(self.bucket(index)), - None => None, - } + let result = self.table.find_inner(hash, &mut |index| unsafe { + eq(self.bucket(index).as_ref()) + }); + + // Avoid `Option::map` because it bloats LLVM IR. + match result { + Some(index) => Some(unsafe { self.bucket(index) }), + None => None, } } @@ -1578,27 +928,17 @@ self.table.bucket_mask + 1 } - /// Checks whether the bucket at `index` is full. - /// - /// # Safety - /// - /// The caller must ensure `index` is less than the number of buckets. - #[inline] - pub unsafe fn is_bucket_full(&self, index: usize) -> bool { - self.table.is_bucket_full(index) - } - /// Returns an iterator over every element in the table. It is up to /// the caller to ensure that the `RawTable` outlives the `RawIter`. /// Because we cannot make the `next` method unsafe on the `RawIter` /// struct, we have to make the `iter` method unsafe. #[inline] pub unsafe fn iter(&self) -> RawIter { - // SAFETY: - // 1. The caller must uphold the safety contract for `iter` method. - // 2. The [`RawTableInner`] must already have properly initialized control bytes since - // we will never expose RawTable::new_uninitialized in a public API. - self.table.iter() + let data = Bucket::from_base_index(self.data_end(), 0); + RawIter { + iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()), + items: self.table.items, + } } /// Returns an iterator over occupied buckets that could match a given hash. @@ -1612,7 +952,7 @@ /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe. #[cfg_attr(feature = "inline-more", inline)] #[cfg(feature = "raw")] - pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash { + pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> { RawIterHash::new(self, hash) } @@ -1638,8 +978,8 @@ debug_assert_eq!(iter.len(), self.len()); RawDrain { iter, - table: mem::replace(&mut self.table, RawTableInner::NEW), - orig_table: NonNull::from(&mut self.table), + table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))), + orig_table: NonNull::from(self), marker: PhantomData, } } @@ -1653,31 +993,31 @@ pub unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { debug_assert_eq!(iter.len(), self.len()); + let alloc = self.table.alloc.clone(); let allocation = self.into_allocation(); RawIntoIter { iter, allocation, marker: PhantomData, + alloc, } } /// Converts the table into a raw allocation. The contents of the table /// should be dropped using a `RawIter` before freeing the allocation. #[cfg_attr(feature = "inline-more", inline)] - pub(crate) fn into_allocation(self) -> Option<(NonNull, Layout, A)> { + pub(crate) fn into_allocation(self) -> Option<(NonNull, Layout)> { let alloc = if self.table.is_empty_singleton() { None } else { // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. - let (layout, ctrl_offset) = - match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) { - Some(lco) => lco, - None => unsafe { hint::unreachable_unchecked() }, - }; + let (layout, ctrl_offset) = match calculate_layout::(self.table.buckets()) { + Some(lco) => lco, + None => unsafe { hint::unreachable_unchecked() }, + }; Some(( unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }, layout, - unsafe { ptr::read(&self.alloc) }, )) }; mem::forget(self); @@ -1685,62 +1025,41 @@ } } -unsafe impl Send for RawTable +unsafe impl Send for RawTable where T: Send, A: Send, { } -unsafe impl Sync for RawTable +unsafe impl Sync for RawTable where T: Sync, A: Sync, { } -impl RawTableInner { - const NEW: Self = RawTableInner::new(); - - /// Creates a new empty hash table without allocating any memory. - /// - /// In effect this returns a table with exactly 1 bucket. However we can - /// leave the data pointer dangling since that bucket is never accessed - /// due to our load factor forcing us to always have at least 1 free bucket. +impl RawTableInner { #[inline] - const fn new() -> Self { + const fn new_in(alloc: A) -> Self { Self { // Be careful to cast the entire slice to a raw pointer. ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, bucket_mask: 0, items: 0, growth_left: 0, + alloc, } } } -impl RawTableInner { - /// Allocates a new [`RawTableInner`] with the given number of buckets. - /// The control bytes and buckets are left uninitialized. - /// - /// # Safety - /// - /// The caller of this function must ensure that the `buckets` is power of two - /// and also initialize all control bytes of the length `self.bucket_mask + 1 + - /// Group::WIDTH` with the [`EMPTY`] bytes. - /// - /// See also [`Allocator`] API for other safety concerns. - /// - /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html +impl RawTableInner { #[cfg_attr(feature = "inline-more", inline)] - unsafe fn new_uninitialized( - alloc: &A, + unsafe fn new_uninitialized( + alloc: A, table_layout: TableLayout, buckets: usize, fallibility: Fallibility, - ) -> Result - where - A: Allocator, - { + ) -> Result { debug_assert!(buckets.is_power_of_two()); // Avoid `Option::ok_or_else` because it bloats LLVM IR. @@ -1749,48 +1068,45 @@ None => return Err(fallibility.capacity_overflow()), }; - let ptr: NonNull = match do_alloc(alloc, layout) { + // We need an additional check to ensure that the allocation doesn't + // exceed `isize::MAX`. We can skip this check on 64-bit systems since + // such allocations will never succeed anyways. + // + // This mirrors what Vec does in the standard library. + if mem::size_of::() < 8 && layout.size() > isize::MAX as usize { + return Err(fallibility.capacity_overflow()); + } + + let ptr: NonNull = match do_alloc(&alloc, layout) { Ok(block) => block.cast(), Err(_) => return Err(fallibility.alloc_err(layout)), }; - // SAFETY: null pointer will be caught in above check let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); Ok(Self { ctrl, bucket_mask: buckets - 1, items: 0, growth_left: bucket_mask_to_capacity(buckets - 1), + alloc, }) } - /// Attempts to allocate a new [`RawTableInner`] with at least enough - /// capacity for inserting the given number of elements without reallocating. - /// - /// All the control bytes are initialized with the [`EMPTY`] bytes. #[inline] - fn fallible_with_capacity( - alloc: &A, + fn fallible_with_capacity( + alloc: A, table_layout: TableLayout, capacity: usize, fallibility: Fallibility, - ) -> Result - where - A: Allocator, - { + ) -> Result { if capacity == 0 { - Ok(Self::NEW) + Ok(Self::new_in(alloc)) } else { - // SAFETY: We checked that we could successfully allocate the new table, and then - // initialized all control bytes with the constant `EMPTY` byte. unsafe { let buckets = capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?; let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?; - // SAFETY: We checked that the table is allocated and therefore the table already has - // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for) - // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe. result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes()); Ok(result) @@ -1798,397 +1114,66 @@ } } - /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting - /// the given number of elements without reallocating. - /// - /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program - /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to - /// handle memory allocation failure. - /// - /// All the control bytes are initialized with the [`EMPTY`] bytes. - /// - /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity - /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html - fn with_capacity(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self - where - A: Allocator, - { - // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) { - Ok(table_inner) => table_inner, - // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`. - Err(_) => unsafe { hint::unreachable_unchecked() }, - } - } - - /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method. - /// - /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control - /// bytes outside the range of the table are filled with [`EMPTY`] entries. These will unfortunately - /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because - /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking - /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied. - /// We detect this situation here and perform a second scan starting at the beginning of the table. - /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the - /// trailing control bytes (containing [`EMPTY`] bytes). - /// - /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an - /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and - /// `Safety`). - /// - /// # Warning - /// - /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than - /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the - /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that - /// index will cause immediate [`undefined behavior`]. - /// - /// # Safety - /// - /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method. - /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work - /// of this crate, the following rules are necessary and sufficient: - /// - /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this - /// function results in [`undefined behavior`]. - /// - /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`] - /// (after the `find_insert_slot_in_group` function, but before insertion into the table). - /// - /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()` - /// (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function). - /// - /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`] - /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the - /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`). - /// - /// [`RawTableInner::ctrl`]: RawTableInner::ctrl - /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[inline] - unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot { - // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`. - if unlikely(self.is_bucket_full(index)) { - debug_assert!(self.bucket_mask < Group::WIDTH); - // SAFETY: - // - // * Since the caller of this function ensures that the control bytes are properly - // initialized and `ptr = self.ctrl(0)` points to the start of the array of control - // bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH` - // and points to the properly initialized control bytes (see also - // `TableLayout::calculate_layout_for` and `ptr::read`); - // - // * Because the caller of this function ensures that the index was provided by the - // `self.find_insert_slot_in_group()` function, so for for tables larger than the - // group width (self.buckets() >= Group::WIDTH), we will never end up in the given - // branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group` - // cannot return a full bucket index. For tables smaller than the group width, calling - // the `unwrap_unchecked` function is also safe, as the trailing control bytes outside - // the range of the table are filled with EMPTY bytes (and we know for sure that there - // is at least one FULL bucket), so this second scan either finds an empty slot (due to - // the load factor) or hits the trailing control bytes (containing EMPTY). - index = Group::load_aligned(self.ctrl(0)) - .match_empty_or_deleted() - .lowest_set_bit() - .unwrap_unchecked(); - } - InsertSlot { index } - } - - /// Finds the position to insert something in a group. - /// - /// **This may have false positives and must be fixed up with `fix_insert_slot` - /// before it's used.** - /// - /// The function is guaranteed to return the index of an empty or deleted [`Bucket`] - /// in the range `0..self.buckets()` (`0..=self.bucket_mask`). - #[inline] - fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option { - let bit = group.match_empty_or_deleted().lowest_set_bit(); - - if likely(bit.is_some()) { - // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number - // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. - Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask) - } else { - None - } - } - - /// Searches for an element in the table, or a potential slot where that element could - /// be inserted (an empty or deleted [`Bucket`] index). - /// - /// This uses dynamic dispatch to reduce the amount of code generated, but that is - /// eliminated by LLVM optimizations. - /// - /// This function does not make any changes to the `data` part of the table, or any - /// changes to the `items` or `growth_left` field of the table. - /// - /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the - /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function - /// will never return (will go into an infinite loop) for tables larger than the group - /// width, or return an index outside of the table indices range if the table is less - /// than the group width. - /// - /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool` - /// function with only `FULL` buckets' indices and return the `index` of the found - /// element (as `Ok(index)`). If the element is not found and there is at least 1 - /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return - /// [InsertSlot] with an index in the range `0..self.buckets()`, but in any case, - /// if this function returns [`InsertSlot`], it will contain an index in the range - /// `0..=self.buckets()`. - /// - /// # Safety - /// - /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling - /// this function results in [`undefined behavior`]. - /// - /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is - /// less than the group width and if there was not at least one empty or deleted bucket in - /// the table will cause immediate [`undefined behavior`]. This is because in this case the - /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY] - /// control bytes outside the table range. + /// Searches for an empty or deleted bucket which is suitable for inserting + /// a new element and sets the hash for that slot. /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// There must be at least 1 empty bucket in the table. #[inline] - unsafe fn find_or_find_insert_slot_inner( - &self, - hash: u64, - eq: &mut dyn FnMut(usize) -> bool, - ) -> Result { - let mut insert_slot = None; - - let h2_hash = h2(hash); - let mut probe_seq = self.probe_seq(hash); - - loop { - // SAFETY: - // * Caller of this function ensures that the control bytes are properly initialized. - // - // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` - // of the table due to masking with `self.bucket_mask` and also because mumber of - // buckets is a power of two (see `self.probe_seq` function). - // - // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to - // call `Group::load` due to the extended control bytes range, which is - // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control - // byte will never be read for the allocated table); - // - // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will - // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` - // bytes, which is safe (see RawTableInner::new). - let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; - - for bit in group.match_byte(h2_hash) { - let index = (probe_seq.pos + bit) & self.bucket_mask; - - if likely(eq(index)) { - return Ok(index); - } - } - - // We didn't find the element we were looking for in the group, try to get an - // insertion slot from the group if we don't have one yet. - if likely(insert_slot.is_none()) { - insert_slot = self.find_insert_slot_in_group(&group, &probe_seq); - } - - // Only stop the search if the group contains at least one empty element. - // Otherwise, the element that we are looking for might be in a following group. - if likely(group.match_empty().any_bit_set()) { - // We must have found a insert slot by now, since the current group contains at - // least one. For tables smaller than the group width, there will still be an - // empty element in the current (and only) group due to the load factor. - unsafe { - // SAFETY: - // * Caller of this function ensures that the control bytes are properly initialized. - // - // * We use this function with the slot / index found by `self.find_insert_slot_in_group` - return Err(self.fix_insert_slot(insert_slot.unwrap_unchecked())); - } - } - - probe_seq.move_next(self.bucket_mask); - } - } - - /// Searches for an empty or deleted bucket which is suitable for inserting a new - /// element and sets the hash for that slot. Returns an index of that slot and the - /// old control byte stored in the found index. - /// - /// This function does not check if the given element exists in the table. Also, - /// this function does not check if there is enough space in the table to insert - /// a new element. Caller of the funtion must make ensure that the table has at - /// least 1 empty or deleted `bucket`, otherwise this function will never return - /// (will go into an infinite loop) for tables larger than the group width, or - /// return an index outside of the table indices range if the table is less than - /// the group width. - /// - /// If there is at least 1 empty or deleted `bucket` in the table, the function is - /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case, - /// if this function returns an `index` it will be in the range `0..=self.buckets()`. - /// - /// This function does not make any changes to the `data` parts of the table, - /// or any changes to the the `items` or `growth_left` field of the table. - /// - /// # Safety - /// - /// The safety rules are directly derived from the safety rules for the - /// [`RawTableInner::set_ctrl_h2`] and [`RawTableInner::find_insert_slot`] methods. - /// Thus, in order to uphold the safety contracts for that methods, as well as for - /// the correct logic of the work of this crate, you must observe the following rules - /// when calling this function: - /// - /// * The [`RawTableInner`] has already been allocated and has properly initialized - /// control bytes otherwise calling this function results in [`undefined behavior`]. - /// - /// * The caller of this function must ensure that the "data" parts of the table - /// will have an entry in the returned index (matching the given hash) right - /// after calling this function. - /// - /// Attempt to write data at the `index` returned by this function when the table is - /// less than the group width and if there was not at least one empty or deleted bucket in - /// the table will cause immediate [`undefined behavior`]. This is because in this case the - /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY] - /// control bytes outside the table range. - /// - /// The caller must independently increase the `items` field of the table, and also, - /// if the old control byte was [`EMPTY`], then decrease the table's `growth_left` - /// field, and do not change it if the old control byte was [`DELETED`]. - /// - /// See also [`Bucket::as_ptr`] method, for more information about of properly removing - /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`]. - /// - /// [`Bucket::as_ptr`]: Bucket::as_ptr - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - /// [`RawTableInner::ctrl`]: RawTableInner::ctrl - /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2 - /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot - #[inline] - unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, u8) { - // SAFETY: Caller of this function ensures that the control bytes are properly initialized. - let index: usize = self.find_insert_slot(hash).index; - // SAFETY: - // 1. The `find_insert_slot` function either returns an `index` less than or - // equal to `self.buckets() = self.bucket_mask + 1` of the table, or never - // returns if it cannot find an empty or deleted slot. - // 2. The caller of this function guarantees that the table has already been - // allocated + unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) { + let index = self.find_insert_slot(hash); let old_ctrl = *self.ctrl(index); self.set_ctrl_h2(index, hash); (index, old_ctrl) } /// Searches for an empty or deleted bucket which is suitable for inserting - /// a new element, returning the `index` for the new [`Bucket`]. - /// - /// This function does not make any changes to the `data` part of the table, or any - /// changes to the `items` or `growth_left` field of the table. - /// - /// The table must have at least 1 empty or deleted `bucket`, otherwise this function - /// will never return (will go into an infinite loop) for tables larger than the group - /// width, or return an index outside of the table indices range if the table is less - /// than the group width. - /// - /// If there is at least 1 empty or deleted `bucket` in the table, the function is - /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`, - /// but in any case, if this function returns [`InsertSlot`], it will contain an index - /// in the range `0..=self.buckets()`. - /// - /// # Safety + /// a new element. /// - /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling - /// this function results in [`undefined behavior`]. - /// - /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is - /// less than the group width and if there was not at least one empty or deleted bucket in - /// the table will cause immediate [`undefined behavior`]. This is because in this case the - /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY] - /// control bytes outside the table range. - /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// There must be at least 1 empty bucket in the table. #[inline] - unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot { + fn find_insert_slot(&self, hash: u64) -> usize { let mut probe_seq = self.probe_seq(hash); loop { - // SAFETY: - // * Caller of this function ensures that the control bytes are properly initialized. - // - // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` - // of the table due to masking with `self.bucket_mask` and also because mumber of - // buckets is a power of two (see `self.probe_seq` function). - // - // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to - // call `Group::load` due to the extended control bytes range, which is - // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control - // byte will never be read for the allocated table); - // - // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will - // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` - // bytes, which is safe (see RawTableInner::new). - let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; + unsafe { + let group = Group::load(self.ctrl(probe_seq.pos)); + if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() { + let result = (probe_seq.pos + bit) & self.bucket_mask; + + // In tables smaller than the group width, trailing control + // bytes outside the range of the table are filled with + // EMPTY entries. These will unfortunately trigger a + // match, but once masked may point to a full bucket that + // is already occupied. We detect this situation here and + // perform a second scan starting at the beginning of the + // table. This second scan is guaranteed to find an empty + // slot (due to the load factor) before hitting the trailing + // control bytes (containing EMPTY). + if unlikely(is_full(*self.ctrl(result))) { + debug_assert!(self.bucket_mask < Group::WIDTH); + debug_assert_ne!(probe_seq.pos, 0); + return Group::load_aligned(self.ctrl(0)) + .match_empty_or_deleted() + .lowest_set_bit_nonzero(); + } - let index = self.find_insert_slot_in_group(&group, &probe_seq); - if likely(index.is_some()) { - // SAFETY: - // * Caller of this function ensures that the control bytes are properly initialized. - // - // * We use this function with the slot / index found by `self.find_insert_slot_in_group` - unsafe { - return self.fix_insert_slot(index.unwrap_unchecked()); + return result; } } probe_seq.move_next(self.bucket_mask); } } - /// Searches for an element in a table, returning the `index` of the found element. - /// This uses dynamic dispatch to reduce the amount of code generated, but it is - /// eliminated by LLVM optimizations. - /// - /// This function does not make any changes to the `data` part of the table, or any - /// changes to the `items` or `growth_left` field of the table. - /// - /// The table must have at least 1 empty `bucket`, otherwise, if the - /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, - /// this function will also never return (will go into an infinite loop). - /// - /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool` - /// function with only `FULL` buckets' indices and return the `index` of the found - /// element as `Some(index)`, so the index will always be in the range - /// `0..self.buckets()`. - /// - /// # Safety - /// - /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling - /// this function results in [`undefined behavior`]. - /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[inline(always)] - unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option { + /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations. + #[inline] + fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option { let h2_hash = h2(hash); let mut probe_seq = self.probe_seq(hash); loop { - // SAFETY: - // * Caller of this function ensures that the control bytes are properly initialized. - // - // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` - // of the table due to masking with `self.bucket_mask`. - // - // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to - // call `Group::load` due to the extended control bytes range, which is - // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control - // byte will never be read for the allocated table); - // - // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will - // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` - // bytes, which is safe (see RawTableInner::new_in). let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; for bit in group.match_byte(h2_hash) { - // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number - // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. let index = (probe_seq.pos + bit) & self.bucket_mask; if likely(eq(index)) { @@ -2204,52 +1189,12 @@ } } - /// Prepares for rehashing data in place (that is, without allocating new memory). - /// Converts all full index `control bytes` to `DELETED` and all `DELETED` control - /// bytes to `EMPTY`, i.e. performs the following conversion: - /// - /// - `EMPTY` control bytes -> `EMPTY`; - /// - `DELETED` control bytes -> `EMPTY`; - /// - `FULL` control bytes -> `DELETED`. - /// - /// This function does not make any changes to the `data` parts of the table, - /// or any changes to the the `items` or `growth_left` field of the table. - /// - /// # Safety - /// - /// You must observe the following safety rules when calling this function: - /// - /// * The [`RawTableInner`] has already been allocated; - /// - /// * The caller of this function must convert the `DELETED` bytes back to `FULL` - /// bytes when re-inserting them into their ideal position (which was impossible - /// to do during the first insert due to tombstones). If the caller does not do - /// this, then calling this function may result in a memory leak. - /// - /// * The [`RawTableInner`] must have properly initialized control bytes otherwise - /// calling this function results in [`undefined behavior`]. - /// - /// Calling this function on a table that has not been allocated results in - /// [`undefined behavior`]. - /// - /// See also [`Bucket::as_ptr`] method, for more information about of properly removing - /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. - /// - /// [`Bucket::as_ptr`]: Bucket::as_ptr - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::mut_mut)] #[inline] unsafe fn prepare_rehash_in_place(&mut self) { - // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY. - // This effectively frees up all buckets containing a DELETED entry. - // - // SAFETY: - // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`; - // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned` - // due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`; - // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated; - // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0 - // and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for). + // Bulk convert all full control bytes to DELETED, and all DELETED + // control bytes to EMPTY. This effectively frees up all buckets + // containing a DELETED entry. for i in (0..self.buckets()).step_by(Group::WIDTH) { let group = Group::load_aligned(self.ctrl(i)); let group = group.convert_special_to_empty_and_full_to_deleted(); @@ -2258,245 +1203,15 @@ // Fix up the trailing control bytes. See the comments in set_ctrl // for the handling of tables smaller than the group width. - // - // SAFETY: The caller of this function guarantees that [`RawTableInner`] - // has already been allocated - if unlikely(self.buckets() < Group::WIDTH) { - // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes, - // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to - // `Group::WIDTH` is safe + if self.buckets() < Group::WIDTH { self.ctrl(0) .copy_to(self.ctrl(Group::WIDTH), self.buckets()); } else { - // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of - // control bytes,so copying `Group::WIDTH` bytes with offset equal - // to `self.buckets() == self.bucket_mask + 1` is safe self.ctrl(0) .copy_to(self.ctrl(self.buckets()), Group::WIDTH); } } - /// Returns an iterator over every element in the table. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result - /// is [`undefined behavior`]: - /// - /// * The caller has to ensure that the `RawTableInner` outlives the - /// `RawIter`. Because we cannot make the `next` method unsafe on - /// the `RawIter` struct, we have to make the `iter` method unsafe. - /// - /// * The [`RawTableInner`] must have properly initialized control bytes. - /// - /// The type `T` must be the actual type of the elements stored in the table, - /// otherwise using the returned [`RawIter`] results in [`undefined behavior`]. - /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[inline] - unsafe fn iter(&self) -> RawIter { - // SAFETY: - // 1. Since the caller of this function ensures that the control bytes - // are properly initialized and `self.data_end()` points to the start - // of the array of control bytes, therefore: `ctrl` is valid for reads, - // properly aligned to `Group::WIDTH` and points to the properly initialized - // control bytes. - // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e. - // equal to zero). - // 3. We pass the exact value of buckets of the table to the function. - // - // `ctrl` points here (to the start - // of the first control byte `CT0`) - // ∨ - // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m - // \________ ________/ - // \/ - // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` - // - // where: T0...T_n - our stored data; - // CT0...CT_n - control bytes or metadata for `data`. - // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search - // with loading `Group` bytes from the heap works properly, even if the result - // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also - // `RawTableInner::set_ctrl` function. - // - // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number - // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. - let data = Bucket::from_base_index(self.data_end(), 0); - RawIter { - // SAFETY: See explanation above - iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()), - items: self.items, - } - } - - /// Executes the destructors (if any) of the values stored in the table. - /// - /// # Note - /// - /// This function does not erase the control bytes of the table and does - /// not make any changes to the `items` or `growth_left` fields of the - /// table. If necessary, the caller of this function must manually set - /// up these table fields, for example using the [`clear_no_drop`] function. - /// - /// Be careful during calling this function, because drop function of - /// the elements can panic, and this can leave table in an inconsistent - /// state. - /// - /// # Safety - /// - /// The type `T` must be the actual type of the elements stored in the table, - /// otherwise calling this function may result in [`undefined behavior`]. - /// - /// If `T` is a type that should be dropped and **the table is not empty**, - /// calling this function more than once results in [`undefined behavior`]. - /// - /// If `T` is not [`Copy`], attempting to use values stored in the table after - /// calling this function may result in [`undefined behavior`]. - /// - /// It is safe to call this function on a table that has not been allocated, - /// on a table with uninitialized control bytes, and on a table with no actual - /// data but with `Full` control bytes if `self.items == 0`. - /// - /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information - /// about of properly removing or saving `element` from / into the [`RawTable`] / - /// [`RawTableInner`]. - /// - /// [`Bucket::drop`]: Bucket::drop - /// [`Bucket::as_ptr`]: Bucket::as_ptr - /// [`clear_no_drop`]: RawTableInner::clear_no_drop - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - unsafe fn drop_elements(&mut self) { - // Check that `self.items != 0`. Protects against the possibility - // of creating an iterator on an table with uninitialized control bytes. - if T::NEEDS_DROP && self.items != 0 { - // SAFETY: We know for sure that RawTableInner will outlive the - // returned `RawIter` iterator, and the caller of this function - // must uphold the safety contract for `drop_elements` method. - for item in self.iter::() { - // SAFETY: The caller must uphold the safety contract for - // `drop_elements` method. - item.drop(); - } - } - } - - /// Executes the destructors (if any) of the values stored in the table and than - /// deallocates the table. - /// - /// # Note - /// - /// Calling this function automatically makes invalid (dangling) all instances of - /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table. - /// - /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left` - /// fields of the table. If necessary, the caller of this function must manually set - /// up these table fields. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is [`undefined behavior`]: - /// - /// * Calling this function more than once; - /// - /// * The type `T` must be the actual type of the elements stored in the table. - /// - /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used - /// to allocate this table. - /// - /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that - /// was used to allocate this table. - /// - /// The caller of this function should pay attention to the possibility of the - /// elements' drop function panicking, because this: - /// - /// * May leave the table in an inconsistent state; - /// - /// * Memory is never deallocated, so a memory leak may occur. - /// - /// Attempt to use the `ctrl` field of the table (dereference) after calling this - /// function results in [`undefined behavior`]. - /// - /// It is safe to call this function on a table that has not been allocated, - /// on a table with uninitialized control bytes, and on a table with no actual - /// data but with `Full` control bytes if `self.items == 0`. - /// - /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`] - /// for more information. - /// - /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements - /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - unsafe fn drop_inner_table(&mut self, alloc: &A, table_layout: TableLayout) { - if !self.is_empty_singleton() { - unsafe { - // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method. - self.drop_elements::(); - // SAFETY: - // 1. We have checked that our table is allocated. - // 2. The caller must uphold the safety contract for `drop_inner_table` method. - self.free_buckets(alloc, table_layout); - } - } - } - - /// Returns a pointer to an element in the table (convenience for - /// `Bucket::from_base_index(self.data_end::(), index)`). - /// - /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket`], - /// otherwise using it may result in [`undefined behavior`]. - /// - /// # Safety - /// - /// If `mem::size_of::() != 0`, then the safety rules are directly derived from the - /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling - /// this function, the following safety rules must be observed: - /// - /// * The table must already be allocated; - /// - /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`] - /// function, i.e. `(index + 1) <= self.buckets()`. - /// - /// * The type `T` must be the actual type of the elements stored in the table, otherwise - /// using the returned [`Bucket`] may result in [`undefined behavior`]. - /// - /// It is safe to call this function with index of zero (`index == 0`) on a table that has - /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`]. - /// - /// If `mem::size_of::() == 0`, then the only requirement is that the `index` must - /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e. - /// `(index + 1) <= self.buckets()`. - /// - /// ```none - /// If mem::size_of::() != 0 then return a pointer to the `element` in the `data part` of the table - /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than - /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"): - /// - /// `table.bucket(3).as_ptr()` returns a pointer that points here in the `data` - /// part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`]) - /// | - /// | `base = table.data_end::()` points here - /// | (to the start of CT0 or to the end of T0) - /// v v - /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m - /// ^ \__________ __________/ - /// `table.bucket(3)` returns a pointer that points \/ - /// here in the `data` part of the `RawTableInner` additional control bytes - /// (to the end of T3) `m = Group::WIDTH - 1` - /// - /// where: T0...T_n - our stored data; - /// CT0...CT_n - control bytes or metadata for `data`; - /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from - /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask` - /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function. - /// - /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number - /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. - /// ``` - /// - /// [`Bucket::from_base_index`]: Bucket::from_base_index - /// [`RawTableInner::buckets`]: RawTableInner::buckets - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn bucket(&self, index: usize) -> Bucket { debug_assert_ne!(self.bucket_mask, 0); @@ -2504,52 +1219,6 @@ Bucket::from_base_index(self.data_end(), index) } - /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table - /// (convenience for `self.data_end::().as_ptr().sub((index + 1) * size_of)`). - /// - /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`, - /// otherwise using it may result in [`undefined behavior`]. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is [`undefined behavior`]: - /// - /// * The table must already be allocated; - /// - /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`] - /// function, i.e. `(index + 1) <= self.buckets()`; - /// - /// * The `size_of` must be equal to the size of the elements stored in the table; - /// - /// ```none - /// If mem::size_of::() != 0 then return a pointer to the `element` in the `data part` of the table - /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than - /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"): - /// - /// `table.bucket_ptr(3, mem::size_of::())` returns a pointer that points here in the - /// `data` part of the `RawTableInner`, i.e. to the start of T3 - /// | - /// | `base = table.data_end::()` points here - /// | (to the start of CT0 or to the end of T0) - /// v v - /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m - /// \__________ __________/ - /// \/ - /// additional control bytes - /// `m = Group::WIDTH - 1` - /// - /// where: T0...T_n - our stored data; - /// CT0...CT_n - control bytes or metadata for `data`; - /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from - /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask` - /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function. - /// - /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number - /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. - /// ``` - /// - /// [`RawTableInner::buckets`]: RawTableInner::buckets - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 { debug_assert_ne!(self.bucket_mask, 0); @@ -2558,47 +1227,9 @@ base.sub((index + 1) * size_of) } - /// Returns pointer to one past last `data` element in the the table as viewed from - /// the start point of the allocation (convenience for `self.ctrl.cast()`). - /// - /// This function actually returns a pointer to the end of the `data element` at - /// index "0" (zero). - /// - /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull`], - /// otherwise using it may result in [`undefined behavior`]. - /// - /// # Note - /// - /// The type `T` must be the actual type of the elements stored in the table, otherwise - /// using the returned [`NonNull`] may result in [`undefined behavior`]. - /// - /// ```none - /// `table.data_end::()` returns pointer that points here - /// (to the end of `T0`) - /// ∨ - /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m - /// \________ ________/ - /// \/ - /// `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` - /// - /// where: T0...T_n - our stored data; - /// CT0...CT_n - control bytes or metadata for `data`. - /// CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search - /// with loading `Group` bytes from the heap works properly, even if the result - /// of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also - /// `RawTableInner::set_ctrl` function. - /// - /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number - /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. - /// ``` - /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - fn data_end(&self) -> NonNull { - unsafe { - // SAFETY: `self.ctrl` is `NonNull`, so casting it is safe - NonNull::new_unchecked(self.ctrl.as_ptr().cast()) - } + unsafe fn data_end(&self) -> NonNull { + NonNull::new_unchecked(self.ctrl.as_ptr().cast()) } /// Returns an iterator-like object for a probe sequence on the table. @@ -2609,8 +1240,6 @@ #[inline] fn probe_seq(&self, hash: u64) -> ProbeSeq { ProbeSeq { - // This is the same as `hash as usize % self.buckets()` because the number - // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. pos: h1(hash) & self.bucket_mask, stride: 0, } @@ -2621,7 +1250,7 @@ #[cfg(feature = "raw")] #[inline] unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result { - let index = self.find_insert_slot(hash).index; + let index = self.find_insert_slot(hash); let old_ctrl = *self.ctrl(index); if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) { Err(()) @@ -2648,68 +1277,13 @@ /// Sets a control byte to the hash, and possibly also the replicated control byte at /// the end of the array. - /// - /// This function does not make any changes to the `data` parts of the table, - /// or any changes to the the `items` or `growth_left` field of the table. - /// - /// # Safety - /// - /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`] - /// method. Thus, in order to uphold the safety contracts for the method, you must observe the - /// following rules when calling this function: - /// - /// * The [`RawTableInner`] has already been allocated; - /// - /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. - /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must - /// be no greater than the number returned by the function [`RawTableInner::buckets`]. - /// - /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. - /// - /// See also [`Bucket::as_ptr`] method, for more information about of properly removing - /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. - /// - /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl - /// [`RawTableInner::buckets`]: RawTableInner::buckets - /// [`Bucket::as_ptr`]: Bucket::as_ptr - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn set_ctrl_h2(&mut self, index: usize, hash: u64) { - // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_h2`] + unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) { self.set_ctrl(index, h2(hash)); } - /// Replaces the hash in the control byte at the given index with the provided one, - /// and possibly also replicates the new control byte at the end of the array of control - /// bytes, returning the old control byte. - /// - /// This function does not make any changes to the `data` parts of the table, - /// or any changes to the the `items` or `growth_left` field of the table. - /// - /// # Safety - /// - /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_h2`] - /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both - /// methods, you must observe the following rules when calling this function: - /// - /// * The [`RawTableInner`] has already been allocated; - /// - /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. - /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must - /// be no greater than the number returned by the function [`RawTableInner::buckets`]. - /// - /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. - /// - /// See also [`Bucket::as_ptr`] method, for more information about of properly removing - /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. - /// - /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2 - /// [`RawTableInner::buckets`]: RawTableInner::buckets - /// [`Bucket::as_ptr`]: Bucket::as_ptr - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn replace_ctrl_h2(&mut self, index: usize, hash: u64) -> u8 { - // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_h2`] + unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 { let prev_ctrl = *self.ctrl(index); self.set_ctrl_h2(index, hash); prev_ctrl @@ -2717,35 +1291,10 @@ /// Sets a control byte, and possibly also the replicated control byte at /// the end of the array. - /// - /// This function does not make any changes to the `data` parts of the table, - /// or any changes to the the `items` or `growth_left` field of the table. - /// - /// # Safety - /// - /// You must observe the following safety rules when calling this function: - /// - /// * The [`RawTableInner`] has already been allocated; - /// - /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. - /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must - /// be no greater than the number returned by the function [`RawTableInner::buckets`]. - /// - /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. - /// - /// See also [`Bucket::as_ptr`] method, for more information about of properly removing - /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. - /// - /// [`RawTableInner::buckets`]: RawTableInner::buckets - /// [`Bucket::as_ptr`]: Bucket::as_ptr - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn set_ctrl(&mut self, index: usize, ctrl: u8) { + unsafe fn set_ctrl(&self, index: usize, ctrl: u8) { // Replicate the first Group::WIDTH control bytes at the end of - // the array without using a branch. If the tables smaller than - // the group width (self.buckets() < Group::WIDTH), - // `index2 = Group::WIDTH + index`, otherwise `index2` is: - // + // the array without using a branch: // - If index >= Group::WIDTH then index == index2. // - Otherwise index2 == self.bucket_mask + 1 + index. // @@ -2762,43 +1311,16 @@ // --------------------------------------------- // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] | // --------------------------------------------- - - // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH` - // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; - // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`] *self.ctrl(index) = ctrl; *self.ctrl(index2) = ctrl; } /// Returns a pointer to a control byte. - /// - /// # Safety - /// - /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`], - /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`. - /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH` - /// will return a pointer to the end of the allocated table and it is useless on its own. - /// - /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a - /// table that has not been allocated results in [`Undefined Behavior`]. - /// - /// So to satisfy both requirements you should always follow the rule that - /// `index < self.bucket_mask + 1 + Group::WIDTH` - /// - /// Calling this function on [`RawTableInner`] that are not already allocated is safe - /// for read-only purpose. - /// - /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing - /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. - /// - /// [`Bucket::as_ptr()`]: Bucket::as_ptr() - /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn ctrl(&self, index: usize) -> *mut u8 { debug_assert!(index < self.num_ctrl_bytes()); - // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`] self.ctrl.as_ptr().add(index) } @@ -2807,17 +1329,6 @@ self.bucket_mask + 1 } - /// Checks whether the bucket at `index` is full. - /// - /// # Safety - /// - /// The caller must ensure `index` is less than the number of buckets. - #[inline] - unsafe fn is_bucket_full(&self, index: usize) -> bool { - debug_assert!(index < self.buckets()); - is_full(*self.ctrl(index)) - } - #[inline] fn num_ctrl_bytes(&self) -> usize { self.bucket_mask + 1 + Group::WIDTH @@ -2828,45 +1339,25 @@ self.bucket_mask == 0 } - /// Attempts to allocate a new hash table with at least enough capacity - /// for inserting the given number of elements without reallocating, - /// and return it inside ScopeGuard to protect against panic in the hash - /// function. - /// - /// # Note - /// - /// It is recommended (but not required): - /// - /// * That the new table's `capacity` be greater than or equal to `self.items`. - /// - /// * The `alloc` is the same [`Allocator`] as the `Allocator` used - /// to allocate this table. - /// - /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used - /// to allocate this table. - /// - /// If `table_layout` does not match the `TableLayout` that was used to allocate - /// this table, then using `mem::swap` with the `self` and the new table returned - /// by this function results in [`undefined behavior`]. - /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::mut_mut)] #[inline] - fn prepare_resize<'a, A>( + unsafe fn prepare_resize( &self, - alloc: &'a A, table_layout: TableLayout, capacity: usize, fallibility: Fallibility, - ) -> Result, TryReserveError> - where - A: Allocator, - { + ) -> Result, TryReserveError> { debug_assert!(self.items <= capacity); // Allocate and initialize the new table. - let new_table = - RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?; + let mut new_table = RawTableInner::fallible_with_capacity( + self.alloc.clone(), + table_layout, + capacity, + fallibility, + )?; + new_table.growth_left -= self.items; + new_table.items = self.items; // The hash function may panic, in which case we simply free the new // table without dropping any elements that may have been copied into @@ -2876,11 +1367,7 @@ // the comment at the bottom of this function. Ok(guard(new_table, move |self_| { if !self_.is_empty_singleton() { - // SAFETY: - // 1. We have checked that our table is allocated. - // 2. We know for sure that the `alloc` and `table_layout` matches the - // [`Allocator`] and [`TableLayout`] used to allocate this table. - unsafe { self_.free_buckets(alloc, table_layout) }; + self_.free_buckets(table_layout); } })) } @@ -2889,38 +1376,16 @@ /// /// This uses dynamic dispatch to reduce the amount of /// code generated, but it is eliminated by LLVM optimizations when inlined. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is - /// [`undefined behavior`]: - /// - /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used - /// to allocate this table. - /// - /// * The `layout` must be the same [`TableLayout`] as the `TableLayout` - /// used to allocate this table. - /// - /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of - /// the elements stored in the table. - /// - /// * The [`RawTableInner`] must have properly initialized control bytes. - /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[inline(always)] - unsafe fn reserve_rehash_inner( + unsafe fn reserve_rehash_inner( &mut self, - alloc: &A, additional: usize, hasher: &dyn Fn(&mut Self, usize) -> u64, fallibility: Fallibility, layout: TableLayout, drop: Option, - ) -> Result<(), TryReserveError> - where - A: Allocator, - { + ) -> Result<(), TryReserveError> { // Avoid `Option::ok_or_else` because it bloats LLVM IR. let new_items = match self.items.checked_add(additional) { Some(new_items) => new_items, @@ -2930,30 +1395,12 @@ if new_items <= full_capacity / 2 { // Rehash in-place without re-allocating if we have plenty of spare // capacity that is locked up due to DELETED entries. - - // SAFETY: - // 1. We know for sure that `[`RawTableInner`]` has already been allocated - // (since new_items <= full_capacity / 2); - // 2. The caller ensures that `drop` function is the actual drop function of - // the elements stored in the table. - // 3. The caller ensures that `layout` matches the [`TableLayout`] that was - // used to allocate this table. - // 4. The caller ensures that the control bytes of the `RawTableInner` - // are already initialized. self.rehash_in_place(hasher, layout.size, drop); Ok(()) } else { // Otherwise, conservatively resize to at least the next size up // to avoid churning deletes into frequent rehashes. - // - // SAFETY: - // 1. We know for sure that `capacity >= self.items`. - // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and - // [`TableLayout`] that were used to allocate this table. - // 3. The caller ensures that the control bytes of the `RawTableInner` - // are already initialized. self.resize_inner( - alloc, usize::max(new_items, full_capacity + 1), hasher, fallibility, @@ -2962,160 +1409,48 @@ } } - /// Returns an iterator over full buckets indices in the table. - /// - /// # Safety - /// - /// Behavior is undefined if any of the following conditions are violated: - /// - /// * The caller has to ensure that the `RawTableInner` outlives the - /// `FullBucketsIndices`. Because we cannot make the `next` method - /// unsafe on the `FullBucketsIndices` struct, we have to make the - /// `full_buckets_indices` method unsafe. - /// - /// * The [`RawTableInner`] must have properly initialized control bytes. - #[inline(always)] - unsafe fn full_buckets_indices(&self) -> FullBucketsIndices { - // SAFETY: - // 1. Since the caller of this function ensures that the control bytes - // are properly initialized and `self.ctrl(0)` points to the start - // of the array of control bytes, therefore: `ctrl` is valid for reads, - // properly aligned to `Group::WIDTH` and points to the properly initialized - // control bytes. - // 2. The value of `items` is equal to the amount of data (values) added - // to the table. - // - // `ctrl` points here (to the start - // of the first control byte `CT0`) - // ∨ - // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH - // \________ ________/ - // \/ - // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` - // - // where: T0...T_n - our stored data; - // CT0...CT_n - control bytes or metadata for `data`. - let ctrl = NonNull::new_unchecked(self.ctrl(0)); - - FullBucketsIndices { - // Load the first group - // SAFETY: See explanation above. - current_group: Group::load_aligned(ctrl.as_ptr()).match_full().into_iter(), - group_first_index: 0, - ctrl, - items: self.items, - } - } - /// Allocates a new table of a different size and moves the contents of the /// current table into it. /// /// This uses dynamic dispatch to reduce the amount of /// code generated, but it is eliminated by LLVM optimizations when inlined. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is - /// [`undefined behavior`]: - /// - /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used - /// to allocate this table; - /// - /// * The `layout` must be the same [`TableLayout`] as the `TableLayout` - /// used to allocate this table; - /// - /// * The [`RawTableInner`] must have properly initialized control bytes. - /// - /// The caller of this function must ensure that `capacity >= self.items` - /// otherwise: - /// - /// * If `self.items != 0`, calling of this function with `capacity == 0` - /// results in [`undefined behavior`]. - /// - /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and - /// `self.items > capacity_to_buckets(capacity)` calling this function - /// results in [`undefined behavior`]. - /// - /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and - /// `self.items > capacity_to_buckets(capacity)` calling this function - /// are never return (will go into an infinite loop). - /// - /// Note: It is recommended (but not required) that the new table's `capacity` - /// be greater than or equal to `self.items`. In case if `capacity <= self.items` - /// this function can never return. See [`RawTableInner::find_insert_slot`] for - /// more information. - /// - /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[inline(always)] - unsafe fn resize_inner( + unsafe fn resize_inner( &mut self, - alloc: &A, capacity: usize, hasher: &dyn Fn(&mut Self, usize) -> u64, fallibility: Fallibility, layout: TableLayout, - ) -> Result<(), TryReserveError> - where - A: Allocator, - { - // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`] - // that were used to allocate this table. - let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?; - - // SAFETY: We know for sure that RawTableInner will outlive the - // returned `FullBucketsIndices` iterator, and the caller of this - // function ensures that the control bytes are properly initialized. - for full_byte_index in self.full_buckets_indices() { + ) -> Result<(), TryReserveError> { + let mut new_table = self.prepare_resize(layout, capacity, fallibility)?; + + // Copy all elements to the new table. + for i in 0..self.buckets() { + if !is_full(*self.ctrl(i)) { + continue; + } + // This may panic. - let hash = hasher(self, full_byte_index); + let hash = hasher(self, i); - // SAFETY: // We can use a simpler version of insert() here since: - // 1. There are no DELETED entries. - // 2. We know there is enough space in the table. - // 3. All elements are unique. - // 4. The caller of this function guarantees that `capacity > 0` - // so `new_table` must already have some allocated memory. - // 5. We set `growth_left` and `items` fields of the new table - // after the loop. - // 6. We insert into the table, at the returned index, the data - // matching the given hash immediately after calling this function. - let (new_index, _) = new_table.prepare_insert_slot(hash); + // - there are no DELETED entries. + // - we know there is enough space in the table. + // - all elements are unique. + let (index, _) = new_table.prepare_insert_slot(hash); - // SAFETY: - // - // * `src` is valid for reads of `layout.size` bytes, since the - // table is alive and the `full_byte_index` is guaranteed to be - // within bounds (see `FullBucketsIndices::next_impl`); - // - // * `dst` is valid for writes of `layout.size` bytes, since the - // caller ensures that `table_layout` matches the [`TableLayout`] - // that was used to allocate old table and we have the `new_index` - // returned by `prepare_insert_slot`. - // - // * Both `src` and `dst` are properly aligned. - // - // * Both `src` and `dst` point to different region of memory. ptr::copy_nonoverlapping( - self.bucket_ptr(full_byte_index, layout.size), - new_table.bucket_ptr(new_index, layout.size), + self.bucket_ptr(i, layout.size), + new_table.bucket_ptr(index, layout.size), layout.size, ); } - // The hash function didn't panic, so we can safely set the - // `growth_left` and `items` fields of the new table. - new_table.growth_left -= self.items; - new_table.items = self.items; - // We successfully copied all elements without panicking. Now replace // self with the new table. The old table will have its memory freed but // the items will not be dropped (since they have been moved into the // new table). - // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`] - // that was used to allocate this table. mem::swap(self, &mut new_table); Ok(()) @@ -3128,21 +1463,6 @@ /// /// This uses dynamic dispatch to reduce the amount of /// code generated, but it is eliminated by LLVM optimizations when inlined. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is [`undefined behavior`]: - /// - /// * The `size_of` must be equal to the size of the elements stored in the table; - /// - /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of - /// the elements stored in the table. - /// - /// * The [`RawTableInner`] has already been allocated; - /// - /// * The [`RawTableInner`] must have properly initialized control bytes. - /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[cfg_attr(feature = "inline-more", inline(always))] #[cfg_attr(not(feature = "inline-more"), inline)] @@ -3186,10 +1506,8 @@ let hash = hasher(*guard, i); // Search for a suitable place to put it - // - // SAFETY: Caller of this function ensures that the control bytes - // are properly initialized. - let new_i = guard.find_insert_slot(hash).index; + let new_i = guard.find_insert_slot(hash); + let new_i_p = guard.bucket_ptr(new_i, size_of); // Probing works by scanning through all of the control // bytes in groups, which may not be aligned to the group @@ -3201,8 +1519,6 @@ continue 'outer; } - let new_i_p = guard.bucket_ptr(new_i, size_of); - // We are moving the current item to a new position. Write // our H2 to the control byte of the new position. let prev_ctrl = guard.replace_ctrl_h2(new_i, hash); @@ -3229,107 +1545,17 @@ mem::forget(guard); } - /// Deallocates the table without dropping any entries. - /// - /// # Note - /// - /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements), - /// else it can lead to leaking of memory. Also calling this function automatically - /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid - /// (dangling) the `ctrl` field of the table. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is [`Undefined Behavior`]: - /// - /// * The [`RawTableInner`] has already been allocated; - /// - /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used - /// to allocate this table. - /// - /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used - /// to allocate this table. - /// - /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information. - /// - /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc - /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate #[inline] - unsafe fn free_buckets(&mut self, alloc: &A, table_layout: TableLayout) - where - A: Allocator, - { - // SAFETY: The caller must uphold the safety contract for `free_buckets` - // method. - let (ptr, layout) = self.allocation_info(table_layout); - alloc.deallocate(ptr, layout); - } - - /// Returns a pointer to the allocated memory and the layout that was used to - /// allocate the table. - /// - /// # Safety - /// - /// Caller of this function must observe the following safety rules: - /// - /// * The [`RawTableInner`] has already been allocated, otherwise - /// calling this function results in [`undefined behavior`] - /// - /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` - /// that was used to allocate this table. Failure to comply with this condition - /// may result in [`undefined behavior`]. - /// - /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information. - /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc - /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate - #[inline] - unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull, Layout) { - debug_assert!( - !self.is_empty_singleton(), - "this function can only be called on non-empty tables" - ); - + unsafe fn free_buckets(&mut self, table_layout: TableLayout) { // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) { Some(lco) => lco, - None => unsafe { hint::unreachable_unchecked() }, + None => hint::unreachable_unchecked(), }; - ( - // SAFETY: The caller must uphold the safety contract for `allocation_info` method. - unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) }, + self.alloc.deallocate( + NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)), layout, - ) - } - - /// Returns a pointer to the allocated memory and the layout that was used to - /// allocate the table. If [`RawTableInner`] has not been allocated, this - /// function return `dangling` pointer and `()` (unit) layout. - /// - /// # Safety - /// - /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout` - /// that was used to allocate this table. Failure to comply with this condition - /// may result in [`undefined behavior`]. - /// - /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information. - /// - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc - /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate - #[cfg(feature = "raw")] - unsafe fn allocation_info_or_zero(&self, table_layout: TableLayout) -> (NonNull, Layout) { - if self.is_empty_singleton() { - (NonNull::dangling(), Layout::new::<()>()) - } else { - // SAFETY: - // 1. We have checked that our table is allocated. - // 2. The caller ensures that `table_layout` matches the [`TableLayout`] - // that was used to allocate this table. - unsafe { self.allocation_info(table_layout) } - } + ); } /// Marks all table buckets as empty without dropping their contents. @@ -3344,95 +1570,27 @@ self.growth_left = bucket_mask_to_capacity(self.bucket_mask); } - /// Erases the [`Bucket`]'s control byte at the given index so that it does not - /// triggered as full, decreases the `items` of the table and, if it can be done, - /// increases `self.growth_left`. - /// - /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it - /// does not make any changes to the `data` parts of the table. The caller of this - /// function must take care to properly drop the `data`, otherwise calling this - /// function may result in a memory leak. - /// - /// # Safety - /// - /// You must observe the following safety rules when calling this function: - /// - /// * The [`RawTableInner`] has already been allocated; - /// - /// * It must be the full control byte at the given position; - /// - /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. - /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must - /// be no greater than the number returned by the function [`RawTableInner::buckets`]. - /// - /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. - /// - /// Calling this function on a table with no elements is unspecified, but calling subsequent - /// functions is likely to result in [`undefined behavior`] due to overflow subtraction - /// (`self.items -= 1 cause overflow when self.items == 0`). - /// - /// See also [`Bucket::as_ptr`] method, for more information about of properly removing - /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. - /// - /// [`RawTableInner::buckets`]: RawTableInner::buckets - /// [`Bucket::as_ptr`]: Bucket::as_ptr - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn erase(&mut self, index: usize) { - debug_assert!(self.is_bucket_full(index)); - - // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because - // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + debug_assert!(is_full(*self.ctrl(index))); let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask; - // SAFETY: - // - The caller must uphold the safety contract for `erase` method; - // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask` let empty_before = Group::load(self.ctrl(index_before)).match_empty(); let empty_after = Group::load(self.ctrl(index)).match_empty(); - // Inserting and searching in the map is performed by two key functions: - // - // - The `find_insert_slot` function that looks up the index of any `EMPTY` or `DELETED` - // slot in a group to be able to insert. If it doesn't find an `EMPTY` or `DELETED` - // slot immediately in the first group, it jumps to the next `Group` looking for it, - // and so on until it has gone through all the groups in the control bytes. - // - // - The `find_inner` function that looks for the index of the desired element by looking - // at all the `FULL` bytes in the group. If it did not find the element right away, and - // there is no `EMPTY` byte in the group, then this means that the `find_insert_slot` - // function may have found a suitable slot in the next group. Therefore, `find_inner` - // jumps further, and if it does not find the desired element and again there is no `EMPTY` - // byte, then it jumps further, and so on. The search stops only if `find_inner` function - // finds the desired element or hits an `EMPTY` slot/byte. - // - // Accordingly, this leads to two consequences: - // - // - The map must have `EMPTY` slots (bytes); - // - // - You can't just mark the byte to be erased as `EMPTY`, because otherwise the `find_inner` - // function may stumble upon an `EMPTY` byte before finding the desired element and stop - // searching. - // - // Thus it is necessary to check all bytes after and before the erased element. If we are in - // a contiguous `Group` of `FULL` or `DELETED` bytes (the number of `FULL` or `DELETED` bytes - // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as - // `DELETED` in order for the `find_inner` function to go further. On the other hand, if there - // is at least one `EMPTY` slot in the `Group`, then the `find_inner` function will still stumble - // upon an `EMPTY` byte, so we can safely mark our erased byte as `EMPTY` as well. - // - // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index` - // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH) - // cannot have `DELETED` bytes. - // - // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while - // `trailing_zeros` refers to the bytes at the beginning of a group. + // If we are inside a continuous block of Group::WIDTH full or deleted + // cells then a probe window may have seen a full block when trying to + // insert. We therefore need to keep that block non-empty so that + // lookups will continue searching to the next probe window. + // + // Note that in this context `leading_zeros` refers to the bytes at the + // end of a group, while `trailing_zeros` refers to the bytes at the + // beginning of a group. let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH { DELETED } else { self.growth_left += 1; EMPTY }; - // SAFETY: the caller must uphold the safety contract for `erase` method. self.set_ctrl(index, ctrl); self.items -= 1; } @@ -3441,16 +1599,12 @@ impl Clone for RawTable { fn clone(&self) -> Self { if self.table.is_empty_singleton() { - Self::new_in(self.alloc.clone()) + Self::new_in(self.table.alloc.clone()) } else { unsafe { // Avoid `Result::ok_or_else` because it bloats LLVM IR. - // - // SAFETY: This is safe as we are taking the size of an already allocated table - // and therefore Ñapacity overflow cannot occur, `self.table.buckets()` is power - // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`. - let mut new_table = match Self::new_uninitialized( - self.alloc.clone(), + let new_table = match Self::new_uninitialized( + self.table.alloc.clone(), self.table.buckets(), Fallibility::Infallible, ) { @@ -3458,32 +1612,24 @@ Err(_) => hint::unreachable_unchecked(), }; - // Cloning elements may fail (the clone function may panic). But we don't - // need to worry about uninitialized control bits, since: - // 1. The number of items (elements) in the table is zero, which means that - // the control bits will not be readed by Drop function. - // 2. The `clone_from_spec` method will first copy all control bits from - // `self` (thus initializing them). But this will not affect the `Drop` - // function, since the `clone_from_spec` function sets `items` only after - // successfully clonning all elements. - new_table.clone_from_spec(self); - new_table + // If cloning fails then we need to free the allocation for the + // new table. However we don't run its drop since its control + // bytes are not initialized yet. + let mut guard = guard(ManuallyDrop::new(new_table), |new_table| { + new_table.free_buckets(); + }); + + guard.clone_from_spec(self); + + // Disarm the scope guard and return the newly created table. + ManuallyDrop::into_inner(ScopeGuard::into_inner(guard)) } } } fn clone_from(&mut self, source: &Self) { if source.table.is_empty_singleton() { - let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW); - unsafe { - // SAFETY: - // 1. We call the function only once; - // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] - // and [`TableLayout`] that were used to allocate this table. - // 3. If any elements' drop function panics, then there will only be a memory leak, - // because we have replaced the inner table with a new one. - old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); - } + *self = Self::new_in(self.table.alloc.clone()); } else { unsafe { // Make sure that if any panics occurs, we clear the table and @@ -3498,38 +1644,27 @@ // // This leak is unavoidable: we can't try dropping more elements // since this could lead to another panic and abort the process. - // - // SAFETY: If something gets wrong we clear our table right after - // dropping the elements, so there is no double drop, since `items` - // will be equal to zero. - self_.table.drop_elements::(); + self_.drop_elements(); // If necessary, resize our table to match the source. if self_.buckets() != source.buckets() { - let new_inner = match RawTableInner::new_uninitialized( - &self_.alloc, - Self::TABLE_LAYOUT, - source.buckets(), - Fallibility::Infallible, - ) { - Ok(table) => table, - Err(_) => hint::unreachable_unchecked(), - }; - // Replace the old inner with new uninitialized one. It's ok, since if something gets - // wrong `ScopeGuard` will initialize all control bytes and leave empty table. - let mut old_inner = mem::replace(&mut self_.table, new_inner); - if !old_inner.is_empty_singleton() { - // SAFETY: - // 1. We have checked that our table is allocated. - // 2. We know for sure that `alloc` and `table_layout` matches - // the [`Allocator`] and [`TableLayout`] that were used to allocate this table. - old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT); + // Skip our drop by using ptr::write. + if !self_.table.is_empty_singleton() { + self_.free_buckets(); } + (&mut **self_ as *mut Self).write( + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + match Self::new_uninitialized( + self_.table.alloc.clone(), + source.buckets(), + Fallibility::Infallible, + ) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }, + ); } - // Cloning elements may fail (the clone function may panic), but the `ScopeGuard` - // inside the `clone_from_impl` function will take care of that, dropping all - // cloned elements if necessary. Our `ScopeGuard` will clear the table. self_.clone_from_spec(source); // Disarm the scope guard if cloning was successful. @@ -3561,8 +1696,7 @@ .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); source .data_start() - .as_ptr() - .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets()); + .copy_to_nonoverlapping(self.data_start(), self.table.buckets()); self.table.items = source.table.items; self.table.growth_left = source.table.growth_left; @@ -3586,9 +1720,9 @@ // to make sure we drop only the elements that have been // cloned so far. let mut guard = guard((0, &mut *self), |(index, self_)| { - if T::NEEDS_DROP { + if mem::needs_drop::() && !self_.is_empty() { for i in 0..=*index { - if self_.is_bucket_full(i) { + if is_full(*self_.table.ctrl(i)) { self_.bucket(i).drop(); } } @@ -3623,7 +1757,7 @@ { self.clear(); - let mut guard_self = guard(&mut *self, |self_| { + let guard_self = guard(&mut *self, |self_| { // Clear the partially copied table if a panic occurs, otherwise // items and growth_left will be out of sync with the contents // of the table. @@ -3656,7 +1790,7 @@ } } -impl Default for RawTable { +impl Default for RawTable { #[inline] fn default() -> Self { Self::new_in(Default::default()) @@ -3664,41 +1798,31 @@ } #[cfg(feature = "nightly")] -unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable { +unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { - unsafe { - // SAFETY: - // 1. We call the function only once; - // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] - // and [`TableLayout`] that were used to allocate this table. - // 3. If the drop function of any elements fails, then only a memory leak will occur, - // and we don't care because we are inside the `Drop` function of the `RawTable`, - // so there won't be any table left in an inconsistent state. - self.table - .drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + if !self.table.is_empty_singleton() { + unsafe { + self.drop_elements(); + self.free_buckets(); + } } } } #[cfg(not(feature = "nightly"))] -impl Drop for RawTable { +impl Drop for RawTable { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { - unsafe { - // SAFETY: - // 1. We call the function only once; - // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] - // and [`TableLayout`] that were used to allocate this table. - // 3. If the drop function of any elements fails, then only a memory leak will occur, - // and we don't care because we are inside the `Drop` function of the `RawTable`, - // so there won't be any table left in an inconsistent state. - self.table - .drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + if !self.table.is_empty_singleton() { + unsafe { + self.drop_elements(); + self.free_buckets(); + } } } } -impl IntoIterator for RawTable { +impl IntoIterator for RawTable { type Item = T; type IntoIter = RawIntoIter; @@ -3716,7 +1840,7 @@ pub(crate) struct RawIterRange { // Mask of full buckets in the current group. Bits are cleared from this // mask as each element is processed. - current_group: BitMaskIter, + current_group: BitMask, // Pointer to the buckets for the current group. data: Bucket, @@ -3732,44 +1856,19 @@ impl RawIterRange { /// Returns a `RawIterRange` covering a subset of a table. /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is - /// [`undefined behavior`]: - /// - /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`; - /// - /// * `ctrl` must be properly aligned to the group size (Group::WIDTH); - /// - /// * `ctrl` must point to the array of properly initialized control bytes; - /// - /// * `data` must be the [`Bucket`] at the `ctrl` index in the table; - /// - /// * the value of `len` must be less than or equal to the number of table buckets, - /// and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())` - /// must be positive. - /// - /// * The `ctrl.add(len)` pointer must be either in bounds or one - /// byte past the end of the same [allocated table]. - /// - /// * The `len` must be a power of two. - /// - /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety - /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// The control byte address must be aligned to the group size. #[cfg_attr(feature = "inline-more", inline)] unsafe fn new(ctrl: *const u8, data: Bucket, len: usize) -> Self { debug_assert_ne!(len, 0); debug_assert_eq!(ctrl as usize % Group::WIDTH, 0); - // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] let end = ctrl.add(len); // Load the first group and advance ctrl to point to the next group - // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] let current_group = Group::load_aligned(ctrl).match_full(); let next_ctrl = ctrl.add(Group::WIDTH); Self { - current_group: current_group.into_iter(), + current_group, data, next_ctrl, end, @@ -3826,7 +1925,8 @@ #[cfg_attr(feature = "inline-more", inline)] unsafe fn next_impl(&mut self) -> Option> { loop { - if let Some(index) = self.current_group.next() { + if let Some(index) = self.current_group.lowest_set_bit() { + self.current_group = self.current_group.remove_lowest_bit(); return Some(self.data.next_n(index)); } @@ -3839,86 +1939,7 @@ // than the group size where the trailing control bytes are all // EMPTY. On larger tables self.end is guaranteed to be aligned // to the group size (since tables are power-of-two sized). - self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter(); - self.data = self.data.next_n(Group::WIDTH); - self.next_ctrl = self.next_ctrl.add(Group::WIDTH); - } - } - - /// Folds every element into an accumulator by applying an operation, - /// returning the final result. - /// - /// `fold_impl()` takes three arguments: the number of items remaining in - /// the iterator, an initial value, and a closure with two arguments: an - /// 'accumulator', and an element. The closure returns the value that the - /// accumulator should have for the next iteration. - /// - /// The initial value is the value the accumulator will have on the first call. - /// - /// After applying this closure to every element of the iterator, `fold_impl()` - /// returns the accumulator. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is - /// [`Undefined Behavior`]: - /// - /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved, - /// i.e. table outlives the `RawIterRange`; - /// - /// * The provided `n` value must match the actual number of items - /// in the table. - /// - /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[allow(clippy::while_let_on_iterator)] - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn fold_impl(mut self, mut n: usize, mut acc: B, mut f: F) -> B - where - F: FnMut(B, Bucket) -> B, - { - loop { - while let Some(index) = self.current_group.next() { - // The returned `index` will always be in the range `0..Group::WIDTH`, - // so that calling `self.data.next_n(index)` is safe (see detailed explanation below). - debug_assert!(n != 0); - let bucket = self.data.next_n(index); - acc = f(acc, bucket); - n -= 1; - } - - if n == 0 { - return acc; - } - - // SAFETY: The caller of this function ensures that: - // - // 1. The provided `n` value matches the actual number of items in the table; - // 2. The table is alive and did not moved. - // - // Taking the above into account, we always stay within the bounds, because: - // - // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH), - // we will never end up in the given branch, since we should have already - // yielded all the elements of the table. - // - // 2. For tables larger than the group width. The the number of buckets is a - // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Sinse - // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the - // the start of the array of control bytes, and never try to iterate after - // getting all the elements, the last `self.current_group` will read bytes - // from the `self.buckets() - Group::WIDTH` index. We know also that - // `self.current_group.next()` will always retun indices within the range - // `0..Group::WIDTH`. - // - // Knowing all of the above and taking into account that we are synchronizing - // the `self.data` index with the index we used to read the `self.current_group`, - // the subsequent `self.data.next_n(index)` will always return a bucket with - // an index number less than `self.buckets()`. - // - // The last `self.next_ctrl`, whose index would be `self.buckets()`, will never - // actually be read, since we should have already yielded all the elements of - // the table. - self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter(); + self.current_group = Group::load_aligned(self.next_ctrl).match_full(); self.data = self.data.next_n(Group::WIDTH); self.next_ctrl = self.next_ctrl.add(Group::WIDTH); } @@ -3995,7 +2016,7 @@ /// This method should be called _before_ the removal is made. It is not necessary to call this /// method if you are removing an item that this iterator yielded in the past. #[cfg(feature = "raw")] - pub unsafe fn reflect_remove(&mut self, b: &Bucket) { + pub fn reflect_remove(&mut self, b: &Bucket) { self.reflect_toggle_full(b, false); } @@ -4009,76 +2030,36 @@ /// /// This method should be called _after_ the given insert is made. #[cfg(feature = "raw")] - pub unsafe fn reflect_insert(&mut self, b: &Bucket) { + pub fn reflect_insert(&mut self, b: &Bucket) { self.reflect_toggle_full(b, true); } /// Refresh the iterator so that it reflects a change to the state of the given bucket. #[cfg(feature = "raw")] - unsafe fn reflect_toggle_full(&mut self, b: &Bucket, is_insert: bool) { - if b.as_ptr() > self.iter.data.as_ptr() { - // The iterator has already passed the bucket's group. - // So the toggle isn't relevant to this iterator. - return; - } - - if self.iter.next_ctrl < self.iter.end - && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr() - { - // The iterator has not yet reached the bucket's group. - // We don't need to reload anything, but we do need to adjust the item count. - - if cfg!(debug_assertions) { - // Double-check that the user isn't lying to us by checking the bucket state. - // To do that, we need to find its control byte. We know that self.iter.data is - // at self.iter.next_ctrl - Group::WIDTH, so we work from there: - let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr()); - let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset); - // This method should be called _before_ a removal, or _after_ an insert, - // so in both cases the ctrl byte should indicate that the bucket is full. - assert!(is_full(*ctrl)); - } - - if is_insert { - self.items += 1; - } else { - self.items -= 1; + fn reflect_toggle_full(&mut self, b: &Bucket, is_insert: bool) { + unsafe { + if b.as_ptr() > self.iter.data.as_ptr() { + // The iterator has already passed the bucket's group. + // So the toggle isn't relevant to this iterator. + return; } - return; - } + if self.iter.next_ctrl < self.iter.end + && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr() + { + // The iterator has not yet reached the bucket's group. + // We don't need to reload anything, but we do need to adjust the item count. - // The iterator is at the bucket group that the toggled bucket is in. - // We need to do two things: - // - // - Determine if the iterator already yielded the toggled bucket. - // If it did, we're done. - // - Otherwise, update the iterator cached group so that it won't - // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket. - // We'll also need to update the item count accordingly. - if let Some(index) = self.iter.current_group.0.lowest_set_bit() { - let next_bucket = self.iter.data.next_n(index); - if b.as_ptr() > next_bucket.as_ptr() { - // The toggled bucket is "before" the bucket the iterator would yield next. We - // therefore don't need to do anything --- the iterator has already passed the - // bucket in question. - // - // The item count must already be correct, since a removal or insert "prior" to - // the iterator's position wouldn't affect the item count. - } else { - // The removed bucket is an upcoming bucket. We need to make sure it does _not_ - // get yielded, and also that it's no longer included in the item count. - // - // NOTE: We can't just reload the group here, both since that might reflect - // inserts we've already passed, and because that might inadvertently unset the - // bits for _other_ removals. If we do that, we'd have to also decrement the - // item count for those other bits that we unset. But the presumably subsequent - // call to reflect for those buckets might _also_ decrement the item count. - // Instead, we _just_ flip the bit for the particular bucket the caller asked - // us to reflect. - let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr()); - let was_full = self.iter.current_group.flip(our_bit); - debug_assert_ne!(was_full, is_insert); + if cfg!(debug_assertions) { + // Double-check that the user isn't lying to us by checking the bucket state. + // To do that, we need to find its control byte. We know that self.iter.data is + // at self.iter.next_ctrl - Group::WIDTH, so we work from there: + let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr()); + let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset); + // This method should be called _before_ a removal, or _after_ an insert, + // so in both cases the ctrl byte should indicate that the bucket is full. + assert!(is_full(*ctrl)); + } if is_insert { self.items += 1; @@ -4086,23 +2067,65 @@ self.items -= 1; } - if cfg!(debug_assertions) { - if b.as_ptr() == next_bucket.as_ptr() { - // The removed bucket should no longer be next - debug_assert_ne!(self.iter.current_group.0.lowest_set_bit(), Some(index)); + return; + } + + // The iterator is at the bucket group that the toggled bucket is in. + // We need to do two things: + // + // - Determine if the iterator already yielded the toggled bucket. + // If it did, we're done. + // - Otherwise, update the iterator cached group so that it won't + // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket. + // We'll also need to update the item count accordingly. + if let Some(index) = self.iter.current_group.lowest_set_bit() { + let next_bucket = self.iter.data.next_n(index); + if b.as_ptr() > next_bucket.as_ptr() { + // The toggled bucket is "before" the bucket the iterator would yield next. We + // therefore don't need to do anything --- the iterator has already passed the + // bucket in question. + // + // The item count must already be correct, since a removal or insert "prior" to + // the iterator's position wouldn't affect the item count. + } else { + // The removed bucket is an upcoming bucket. We need to make sure it does _not_ + // get yielded, and also that it's no longer included in the item count. + // + // NOTE: We can't just reload the group here, both since that might reflect + // inserts we've already passed, and because that might inadvertently unset the + // bits for _other_ removals. If we do that, we'd have to also decrement the + // item count for those other bits that we unset. But the presumably subsequent + // call to reflect for those buckets might _also_ decrement the item count. + // Instead, we _just_ flip the bit for the particular bucket the caller asked + // us to reflect. + let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr()); + let was_full = self.iter.current_group.flip(our_bit); + debug_assert_ne!(was_full, is_insert); + + if is_insert { + self.items += 1; } else { - // We should not have changed what bucket comes next. - debug_assert_eq!(self.iter.current_group.0.lowest_set_bit(), Some(index)); + self.items -= 1; + } + + if cfg!(debug_assertions) { + if b.as_ptr() == next_bucket.as_ptr() { + // The removed bucket should no longer be next + debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index)); + } else { + // We should not have changed what bucket comes next. + debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index)); + } } } + } else { + // We must have already iterated past the removed item. } - } else { - // We must have already iterated past the removed item. } } unsafe fn drop_elements(&mut self) { - if T::NEEDS_DROP && self.items != 0 { + if mem::needs_drop::() && self.len() != 0 { for item in self { item.drop(); } @@ -4136,8 +2159,9 @@ self.iter.next_impl::() }; - debug_assert!(nxt.is_some()); - self.items -= 1; + if nxt.is_some() { + self.items -= 1; + } nxt } @@ -4146,160 +2170,33 @@ fn size_hint(&self) -> (usize, Option) { (self.items, Some(self.items)) } - - #[inline] - fn fold(self, init: B, f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - unsafe { self.iter.fold_impl(self.items, init, f) } - } } impl ExactSizeIterator for RawIter {} impl FusedIterator for RawIter {} -/// Iterator which returns an index of every full bucket in the table. -/// -/// For maximum flexibility this iterator is not bound by a lifetime, but you -/// must observe several rules when using it: -/// - You must not free the hash table while iterating (including via growing/shrinking). -/// - It is fine to erase a bucket that has been yielded by the iterator. -/// - Erasing a bucket that has not yet been yielded by the iterator may still -/// result in the iterator yielding index of that bucket. -/// - It is unspecified whether an element inserted after the iterator was -/// created will be yielded by that iterator. -/// - The order in which the iterator yields indices of the buckets is unspecified -/// and may change in the future. -pub(crate) struct FullBucketsIndices { - // Mask of full buckets in the current group. Bits are cleared from this - // mask as each element is processed. - current_group: BitMaskIter, - - // Initial value of the bytes' indices of the current group (relative - // to the start of the control bytes). - group_first_index: usize, - - // Pointer to the current group of control bytes, - // Must be aligned to the group size (Group::WIDTH). - ctrl: NonNull, - - // Number of elements in the table. - items: usize, -} - -impl FullBucketsIndices { - /// Advances the iterator and returns the next value. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is - /// [`Undefined Behavior`]: - /// - /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved, - /// i.e. table outlives the `FullBucketsIndices`; - /// - /// * It never tries to iterate after getting all elements. - /// - /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[inline(always)] - unsafe fn next_impl(&mut self) -> Option { - loop { - if let Some(index) = self.current_group.next() { - // The returned `self.group_first_index + index` will always - // be in the range `0..self.buckets()`. See explanation below. - return Some(self.group_first_index + index); - } - - // SAFETY: The caller of this function ensures that: - // - // 1. It never tries to iterate after getting all the elements; - // 2. The table is alive and did not moved; - // 3. The first `self.ctrl` pointed to the start of the array of control bytes. - // - // Taking the above into account, we always stay within the bounds, because: - // - // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH), - // we will never end up in the given branch, since we should have already - // yielded all the elements of the table. - // - // 2. For tables larger than the group width. The the number of buckets is a - // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Sinse - // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the - // the start of the array of control bytes, and never try to iterate after - // getting all the elements, the last `self.ctrl` will be equal to - // the `self.buckets() - Group::WIDTH`, so `self.current_group.next()` - // will always contains indices within the range `0..Group::WIDTH`, - // and subsequent `self.group_first_index + index` will always return a - // number less than `self.buckets()`. - self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH)); - - // SAFETY: See explanation above. - self.current_group = Group::load_aligned(self.ctrl.as_ptr()) - .match_full() - .into_iter(); - self.group_first_index += Group::WIDTH; - } - } -} - -impl Iterator for FullBucketsIndices { - type Item = usize; - - /// Advances the iterator and returns the next value. It is up to - /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`, - /// because we cannot make the `next` method unsafe. - #[inline(always)] - fn next(&mut self) -> Option { - // Return if we already yielded all items. - if self.items == 0 { - return None; - } - - let nxt = unsafe { - // SAFETY: - // 1. We check number of items to yield using `items` field. - // 2. The caller ensures that the table is alive and has not moved. - self.next_impl() - }; - - debug_assert!(nxt.is_some()); - self.items -= 1; - - nxt - } - - #[inline(always)] - fn size_hint(&self) -> (usize, Option) { - (self.items, Some(self.items)) - } -} - -impl ExactSizeIterator for FullBucketsIndices {} -impl FusedIterator for FullBucketsIndices {} - /// Iterator which consumes a table and returns elements. -pub struct RawIntoIter { +pub struct RawIntoIter { iter: RawIter, - allocation: Option<(NonNull, Layout, A)>, + allocation: Option<(NonNull, Layout)>, marker: PhantomData, + alloc: A, } -impl RawIntoIter { +impl RawIntoIter { #[cfg_attr(feature = "inline-more", inline)] pub fn iter(&self) -> RawIter { self.iter.clone() } } -unsafe impl Send for RawIntoIter +unsafe impl Send for RawIntoIter where T: Send, A: Send, { } -unsafe impl Sync for RawIntoIter +unsafe impl Sync for RawIntoIter where T: Sync, A: Sync, @@ -4307,7 +2204,7 @@ } #[cfg(feature = "nightly")] -unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter { +unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -4315,14 +2212,14 @@ self.iter.drop_elements(); // Free the table - if let Some((ptr, layout, ref alloc)) = self.allocation { - alloc.deallocate(ptr, layout); + if let Some((ptr, layout)) = self.allocation { + self.alloc.deallocate(ptr, layout); } } } } #[cfg(not(feature = "nightly"))] -impl Drop for RawIntoIter { +impl Drop for RawIntoIter { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -4330,14 +2227,14 @@ self.iter.drop_elements(); // Free the table - if let Some((ptr, layout, ref alloc)) = self.allocation { - alloc.deallocate(ptr, layout); + if let Some((ptr, layout)) = self.allocation { + self.alloc.deallocate(ptr, layout); } } } } -impl Iterator for RawIntoIter { +impl Iterator for RawIntoIter { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -4351,45 +2248,45 @@ } } -impl ExactSizeIterator for RawIntoIter {} -impl FusedIterator for RawIntoIter {} +impl ExactSizeIterator for RawIntoIter {} +impl FusedIterator for RawIntoIter {} /// Iterator which consumes elements without freeing the table storage. -pub struct RawDrain<'a, T, A: Allocator = Global> { +pub struct RawDrain<'a, T, A: Allocator + Clone = Global> { iter: RawIter, // The table is moved into the iterator for the duration of the drain. This // ensures that an empty table is left if the drain iterator is leaked // without dropping. - table: RawTableInner, - orig_table: NonNull, + table: ManuallyDrop>, + orig_table: NonNull>, // We don't use a &'a mut RawTable because we want RawDrain to be // covariant over T. marker: PhantomData<&'a RawTable>, } -impl RawDrain<'_, T, A> { +impl RawDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] pub fn iter(&self) -> RawIter { self.iter.clone() } } -unsafe impl Send for RawDrain<'_, T, A> +unsafe impl Send for RawDrain<'_, T, A> where T: Send, A: Send, { } -unsafe impl Sync for RawDrain<'_, T, A> +unsafe impl Sync for RawDrain<'_, T, A> where T: Sync, A: Sync, { } -impl Drop for RawDrain<'_, T, A> { +impl Drop for RawDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -4403,12 +2300,12 @@ // Move the now empty table back to its original location. self.orig_table .as_ptr() - .copy_from_nonoverlapping(&self.table, 1); + .copy_from_nonoverlapping(&*self.table, 1); } } } -impl Iterator for RawDrain<'_, T, A> { +impl Iterator for RawDrain<'_, T, A> { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -4425,36 +2322,21 @@ } } -impl ExactSizeIterator for RawDrain<'_, T, A> {} -impl FusedIterator for RawDrain<'_, T, A> {} +impl ExactSizeIterator for RawDrain<'_, T, A> {} +impl FusedIterator for RawDrain<'_, T, A> {} /// Iterator over occupied buckets that could match a given hash. /// /// `RawTable` only stores 7 bits of the hash value, so this iterator may return /// items that have a hash value different than the one provided. You should /// always validate the returned values before using them. -/// -/// For maximum flexibility this iterator is not bound by a lifetime, but you -/// must observe several rules when using it: -/// - You must not free the hash table while iterating (including via growing/shrinking). -/// - It is fine to erase a bucket that has been yielded by the iterator. -/// - Erasing a bucket that has not yet been yielded by the iterator may still -/// result in the iterator yielding that bucket. -/// - It is unspecified whether an element inserted after the iterator was -/// created will be yielded by that iterator. -/// - The order in which the iterator yields buckets is unspecified and may -/// change in the future. -pub struct RawIterHash { - inner: RawIterHashInner, +pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> { + inner: RawIterHashInner<'a, A>, _marker: PhantomData, } -struct RawIterHashInner { - // See `RawTableInner`'s corresponding fields for details. - // We can't store a `*const RawTableInner` as it would get - // invalidated by the user calling `&mut` methods on `RawTable`. - bucket_mask: usize, - ctrl: NonNull, +struct RawIterHashInner<'a, A: Allocator + Clone> { + table: &'a RawTableInner, // The top 7 bits of the hash. h2_hash: u8, @@ -4468,105 +2350,71 @@ bitmask: BitMaskIter, } -impl RawIterHash { +impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> { #[cfg_attr(feature = "inline-more", inline)] #[cfg(feature = "raw")] - unsafe fn new(table: &RawTable, hash: u64) -> Self { + fn new(table: &'a RawTable, hash: u64) -> Self { RawIterHash { inner: RawIterHashInner::new(&table.table, hash), _marker: PhantomData, } } } -impl RawIterHashInner { +impl<'a, A: Allocator + Clone> RawIterHashInner<'a, A> { #[cfg_attr(feature = "inline-more", inline)] #[cfg(feature = "raw")] - unsafe fn new(table: &RawTableInner, hash: u64) -> Self { - let h2_hash = h2(hash); - let probe_seq = table.probe_seq(hash); - let group = Group::load(table.ctrl(probe_seq.pos)); - let bitmask = group.match_byte(h2_hash).into_iter(); + fn new(table: &'a RawTableInner, hash: u64) -> Self { + unsafe { + let h2_hash = h2(hash); + let probe_seq = table.probe_seq(hash); + let group = Group::load(table.ctrl(probe_seq.pos)); + let bitmask = group.match_byte(h2_hash).into_iter(); - RawIterHashInner { - bucket_mask: table.bucket_mask, - ctrl: table.ctrl, - h2_hash, - probe_seq, - group, - bitmask, + RawIterHashInner { + table, + h2_hash, + probe_seq, + group, + bitmask, + } } } } -impl Iterator for RawIterHash { +impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> { type Item = Bucket; fn next(&mut self) -> Option> { unsafe { match self.inner.next() { - Some(index) => { - // Can't use `RawTable::bucket` here as we don't have - // an actual `RawTable` reference to use. - debug_assert!(index <= self.inner.bucket_mask); - let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index); - Some(bucket) - } + Some(index) => Some(self.inner.table.bucket(index)), None => None, } } } } -impl Iterator for RawIterHashInner { +impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> { type Item = usize; fn next(&mut self) -> Option { unsafe { loop { if let Some(bit) = self.bitmask.next() { - let index = (self.probe_seq.pos + bit) & self.bucket_mask; + let index = (self.probe_seq.pos + bit) & self.table.bucket_mask; return Some(index); } if likely(self.group.match_empty().any_bit_set()) { return None; } - self.probe_seq.move_next(self.bucket_mask); - - // Can't use `RawTableInner::ctrl` here as we don't have - // an actual `RawTableInner` reference to use. - let index = self.probe_seq.pos; - debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH); - let group_ctrl = self.ctrl.as_ptr().add(index); - - self.group = Group::load(group_ctrl); + self.probe_seq.move_next(self.table.bucket_mask); + self.group = Group::load(self.table.ctrl(self.probe_seq.pos)); self.bitmask = self.group.match_byte(self.h2_hash).into_iter(); } } } } -pub(crate) struct RawExtractIf<'a, T, A: Allocator> { - pub iter: RawIter, - pub table: &'a mut RawTable, -} - -impl RawExtractIf<'_, T, A> { - #[cfg_attr(feature = "inline-more", inline)] - pub(crate) fn next(&mut self, mut f: F) -> Option - where - F: FnMut(&mut T) -> bool, - { - unsafe { - for item in &mut self.iter { - if f(item.as_mut()) { - return Some(self.table.remove(item).0); - } - } - } - None - } -} - #[cfg(test)] mod test_map { use super::*; @@ -4609,214 +2457,4 @@ assert!(table.find(i + 100, |x| *x == i + 100).is_none()); } } - - /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF - /// AN UNINITIALIZED TABLE DURING THE DROP - #[test] - fn test_drop_uninitialized() { - use ::alloc::vec::Vec; - - let table = unsafe { - // SAFETY: The `buckets` is power of two and we're not - // trying to actually use the returned RawTable. - RawTable::<(u64, Vec)>::new_uninitialized(Global, 8, Fallibility::Infallible) - .unwrap() - }; - drop(table); - } - - /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS` - /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES. - #[test] - fn test_drop_zero_items() { - use ::alloc::vec::Vec; - unsafe { - // SAFETY: The `buckets` is power of two and we're not - // trying to actually use the returned RawTable. - let table = - RawTable::<(u64, Vec)>::new_uninitialized(Global, 8, Fallibility::Infallible) - .unwrap(); - - // WE SIMULATE, AS IT WERE, A FULL TABLE. - - // SAFETY: We checked that the table is allocated and therefore the table already has - // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for) - // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe. - table - .table - .ctrl(0) - .write_bytes(EMPTY, table.table.num_ctrl_bytes()); - - // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets() - table.table.ctrl(0).write_bytes(0, table.capacity()); - - // Fix up the trailing control bytes. See the comments in set_ctrl - // for the handling of tables smaller than the group width. - if table.buckets() < Group::WIDTH { - // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes, - // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to - // `Group::WIDTH` is safe - table - .table - .ctrl(0) - .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets()); - } else { - // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of - // control bytes,so copying `Group::WIDTH` bytes with offset equal - // to `self.buckets() == self.bucket_mask + 1` is safe - table - .table - .ctrl(0) - .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH); - } - drop(table); - } - } - - /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS` - /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES. - #[test] - fn test_catch_panic_clone_from() { - use ::alloc::sync::Arc; - use ::alloc::vec::Vec; - use allocator_api2::alloc::{AllocError, Allocator, Global}; - use core::sync::atomic::{AtomicI8, Ordering}; - use std::thread; - - struct MyAllocInner { - drop_count: Arc, - } - - #[derive(Clone)] - struct MyAlloc { - _inner: Arc, - } - - impl Drop for MyAllocInner { - fn drop(&mut self) { - println!("MyAlloc freed."); - self.drop_count.fetch_sub(1, Ordering::SeqCst); - } - } - - unsafe impl Allocator for MyAlloc { - fn allocate(&self, layout: Layout) -> std::result::Result, AllocError> { - let g = Global; - g.allocate(layout) - } - - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - let g = Global; - g.deallocate(ptr, layout) - } - } - - const DISARMED: bool = false; - const ARMED: bool = true; - - struct CheckedCloneDrop { - panic_in_clone: bool, - dropped: bool, - need_drop: Vec, - } - - impl Clone for CheckedCloneDrop { - fn clone(&self) -> Self { - if self.panic_in_clone { - panic!("panic in clone") - } - Self { - panic_in_clone: self.panic_in_clone, - dropped: self.dropped, - need_drop: self.need_drop.clone(), - } - } - } - - impl Drop for CheckedCloneDrop { - fn drop(&mut self) { - if self.dropped { - panic!("double drop"); - } - self.dropped = true; - } - } - - let dropped: Arc = Arc::new(AtomicI8::new(2)); - - let mut table = RawTable::new_in(MyAlloc { - _inner: Arc::new(MyAllocInner { - drop_count: dropped.clone(), - }), - }); - - for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() { - let idx = idx as u64; - table.insert( - idx, - ( - idx, - CheckedCloneDrop { - panic_in_clone, - dropped: false, - need_drop: vec![idx], - }, - ), - |(k, _)| *k, - ); - } - - assert_eq!(table.len(), 7); - - thread::scope(|s| { - let result = s.spawn(|| { - let armed_flags = [ - DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED, - ]; - let mut scope_table = RawTable::new_in(MyAlloc { - _inner: Arc::new(MyAllocInner { - drop_count: dropped.clone(), - }), - }); - for (idx, &panic_in_clone) in armed_flags.iter().enumerate() { - let idx = idx as u64; - scope_table.insert( - idx, - ( - idx, - CheckedCloneDrop { - panic_in_clone, - dropped: false, - need_drop: vec![idx + 100], - }, - ), - |(k, _)| *k, - ); - } - table.clone_from(&scope_table); - }); - assert!(result.join().is_err()); - }); - - // Let's check that all iterators work fine and do not return elements - // (especially `RawIterRange`, which does not depend on the number of - // elements in the table, but looks directly at the control bytes) - // - // SAFETY: We know for sure that `RawTable` will outlive - // the returned `RawIter / RawIterRange` iterator. - assert_eq!(table.len(), 0); - assert_eq!(unsafe { table.iter().count() }, 0); - assert_eq!(unsafe { table.iter().iter.count() }, 0); - - for idx in 0..table.buckets() { - let idx = idx as u64; - assert!( - table.find(idx, |(k, _)| *k == idx).is_none(), - "Index: {idx}" - ); - } - - // All allocator clones should already be dropped. - assert_eq!(dropped.load(Ordering::SeqCst), 1); - } } diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/neon.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/neon.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/neon.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/neon.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,124 +0,0 @@ -use super::bitmask::BitMask; -use super::EMPTY; -use core::arch::aarch64 as neon; -use core::mem; -use core::num::NonZeroU64; - -pub(crate) type BitMaskWord = u64; -pub(crate) type NonZeroBitMaskWord = NonZeroU64; -pub(crate) const BITMASK_STRIDE: usize = 8; -pub(crate) const BITMASK_MASK: BitMaskWord = !0; -pub(crate) const BITMASK_ITER_MASK: BitMaskWord = 0x8080_8080_8080_8080; - -/// Abstraction over a group of control bytes which can be scanned in -/// parallel. -/// -/// This implementation uses a 64-bit NEON value. -#[derive(Copy, Clone)] -pub(crate) struct Group(neon::uint8x8_t); - -#[allow(clippy::use_self)] -impl Group { - /// Number of bytes in the group. - pub(crate) const WIDTH: usize = mem::size_of::(); - - /// Returns a full group of empty bytes, suitable for use as the initial - /// value for an empty hash table. - /// - /// This is guaranteed to be aligned to the group size. - #[inline] - pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] { - #[repr(C)] - struct AlignedBytes { - _align: [Group; 0], - bytes: [u8; Group::WIDTH], - } - const ALIGNED_BYTES: AlignedBytes = AlignedBytes { - _align: [], - bytes: [EMPTY; Group::WIDTH], - }; - &ALIGNED_BYTES.bytes - } - - /// Loads a group of bytes starting at the given address. - #[inline] - #[allow(clippy::cast_ptr_alignment)] // unaligned load - pub(crate) unsafe fn load(ptr: *const u8) -> Self { - Group(neon::vld1_u8(ptr)) - } - - /// Loads a group of bytes starting at the given address, which must be - /// aligned to `mem::align_of::()`. - #[inline] - #[allow(clippy::cast_ptr_alignment)] - pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self { - // FIXME: use align_offset once it stabilizes - debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); - Group(neon::vld1_u8(ptr)) - } - - /// Stores the group of bytes to the given address, which must be - /// aligned to `mem::align_of::()`. - #[inline] - #[allow(clippy::cast_ptr_alignment)] - pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) { - // FIXME: use align_offset once it stabilizes - debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); - neon::vst1_u8(ptr, self.0); - } - - /// Returns a `BitMask` indicating all bytes in the group which *may* - /// have the given value. - #[inline] - pub(crate) fn match_byte(self, byte: u8) -> BitMask { - unsafe { - let cmp = neon::vceq_u8(self.0, neon::vdup_n_u8(byte)); - BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) - } - } - - /// Returns a `BitMask` indicating all bytes in the group which are - /// `EMPTY`. - #[inline] - pub(crate) fn match_empty(self) -> BitMask { - self.match_byte(EMPTY) - } - - /// Returns a `BitMask` indicating all bytes in the group which are - /// `EMPTY` or `DELETED`. - #[inline] - pub(crate) fn match_empty_or_deleted(self) -> BitMask { - unsafe { - let cmp = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0)); - BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) - } - } - - /// Returns a `BitMask` indicating all bytes in the group which are full. - #[inline] - pub(crate) fn match_full(self) -> BitMask { - unsafe { - let cmp = neon::vcgez_s8(neon::vreinterpret_s8_u8(self.0)); - BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) - } - } - - /// Performs the following transformation on all bytes in the group: - /// - `EMPTY => EMPTY` - /// - `DELETED => EMPTY` - /// - `FULL => DELETED` - #[inline] - pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { - // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 - // and high_bit = 0 (FULL) to 1000_0000 - // - // Here's this logic expanded to concrete values: - // let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false) - // 1111_1111 | 1000_0000 = 1111_1111 - // 0000_0000 | 1000_0000 = 1000_0000 - unsafe { - let special = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0)); - Group(neon::vorr_u8(special, neon::vdup_n_u8(0x80))) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/sse2.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/sse2.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/raw/sse2.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/raw/sse2.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,31 +1,28 @@ use super::bitmask::BitMask; use super::EMPTY; use core::mem; -use core::num::NonZeroU16; #[cfg(target_arch = "x86")] use core::arch::x86; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as x86; -pub(crate) type BitMaskWord = u16; -pub(crate) type NonZeroBitMaskWord = NonZeroU16; -pub(crate) const BITMASK_STRIDE: usize = 1; -pub(crate) const BITMASK_MASK: BitMaskWord = 0xffff; -pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; +pub type BitMaskWord = u16; +pub const BITMASK_STRIDE: usize = 1; +pub const BITMASK_MASK: BitMaskWord = 0xffff; /// Abstraction over a group of control bytes which can be scanned in /// parallel. /// /// This implementation uses a 128-bit SSE value. #[derive(Copy, Clone)] -pub(crate) struct Group(x86::__m128i); +pub struct Group(x86::__m128i); // FIXME: https://github.com/rust-lang/rust-clippy/issues/3859 #[allow(clippy::use_self)] impl Group { /// Number of bytes in the group. - pub(crate) const WIDTH: usize = mem::size_of::(); + pub const WIDTH: usize = mem::size_of::(); /// Returns a full group of empty bytes, suitable for use as the initial /// value for an empty hash table. @@ -33,7 +30,7 @@ /// This is guaranteed to be aligned to the group size. #[inline] #[allow(clippy::items_after_statements)] - pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] { + pub const fn static_empty() -> &'static [u8; Group::WIDTH] { #[repr(C)] struct AlignedBytes { _align: [Group; 0], @@ -49,7 +46,7 @@ /// Loads a group of bytes starting at the given address. #[inline] #[allow(clippy::cast_ptr_alignment)] // unaligned load - pub(crate) unsafe fn load(ptr: *const u8) -> Self { + pub unsafe fn load(ptr: *const u8) -> Self { Group(x86::_mm_loadu_si128(ptr.cast())) } @@ -57,7 +54,7 @@ /// aligned to `mem::align_of::()`. #[inline] #[allow(clippy::cast_ptr_alignment)] - pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self { + pub unsafe fn load_aligned(ptr: *const u8) -> Self { // FIXME: use align_offset once it stabilizes debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); Group(x86::_mm_load_si128(ptr.cast())) @@ -67,7 +64,7 @@ /// aligned to `mem::align_of::()`. #[inline] #[allow(clippy::cast_ptr_alignment)] - pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) { + pub unsafe fn store_aligned(self, ptr: *mut u8) { // FIXME: use align_offset once it stabilizes debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); x86::_mm_store_si128(ptr.cast(), self.0); @@ -76,7 +73,7 @@ /// Returns a `BitMask` indicating all bytes in the group which have /// the given value. #[inline] - pub(crate) fn match_byte(self, byte: u8) -> BitMask { + pub fn match_byte(self, byte: u8) -> BitMask { #[allow( clippy::cast_possible_wrap, // byte: u8 as i8 // byte: i32 as u16 @@ -94,14 +91,14 @@ /// Returns a `BitMask` indicating all bytes in the group which are /// `EMPTY`. #[inline] - pub(crate) fn match_empty(self) -> BitMask { + pub fn match_empty(self) -> BitMask { self.match_byte(EMPTY) } /// Returns a `BitMask` indicating all bytes in the group which are /// `EMPTY` or `DELETED`. #[inline] - pub(crate) fn match_empty_or_deleted(self) -> BitMask { + pub fn match_empty_or_deleted(self) -> BitMask { #[allow( // byte: i32 as u16 // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the @@ -117,7 +114,7 @@ /// Returns a `BitMask` indicating all bytes in the group which are full. #[inline] - pub(crate) fn match_full(&self) -> BitMask { + pub fn match_full(&self) -> BitMask { self.match_empty_or_deleted().invert() } @@ -126,7 +123,7 @@ /// - `DELETED => EMPTY` /// - `FULL => DELETED` #[inline] - pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self { // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 // and high_bit = 0 (FULL) to 1000_0000 // diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/rustc_entry.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/rustc_entry.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/rustc_entry.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/rustc_entry.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,5 +1,5 @@ use self::RustcEntry::*; -use crate::map::{make_hash, Drain, HashMap, IntoIter, Iter, IterMut}; +use crate::map::{make_insert_hash, Drain, HashMap, IntoIter, Iter, IterMut}; use crate::raw::{Allocator, Bucket, Global, RawTable}; use core::fmt::{self, Debug}; use core::hash::{BuildHasher, Hash}; @@ -9,7 +9,7 @@ where K: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { /// Gets the given key's corresponding entry in the map for in-place manipulation. /// @@ -32,7 +32,7 @@ /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn rustc_entry(&mut self, key: K) -> RustcEntry<'_, K, V, A> { - let hash = make_hash(&self.hash_builder, &key); + let hash = make_insert_hash(&self.hash_builder, &key); if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) { RustcEntry::Occupied(RustcOccupiedEntry { key: Some(key), @@ -62,7 +62,7 @@ /// [`rustc_entry`]: struct.HashMap.html#method.rustc_entry pub enum RustcEntry<'a, K, V, A = Global> where - A: Allocator, + A: Allocator + Clone, { /// An occupied entry. Occupied(RustcOccupiedEntry<'a, K, V, A>), @@ -71,7 +71,7 @@ Vacant(RustcVacantEntry<'a, K, V, A>), } -impl Debug for RustcEntry<'_, K, V, A> { +impl Debug for RustcEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -86,7 +86,7 @@ /// [`RustcEntry`]: enum.RustcEntry.html pub struct RustcOccupiedEntry<'a, K, V, A = Global> where - A: Allocator, + A: Allocator + Clone, { key: Option, elem: Bucket<(K, V)>, @@ -97,18 +97,18 @@ where K: Send, V: Send, - A: Allocator + Send, + A: Allocator + Clone + Send, { } unsafe impl Sync for RustcOccupiedEntry<'_, K, V, A> where K: Sync, V: Sync, - A: Allocator + Sync, + A: Allocator + Clone + Sync, { } -impl Debug for RustcOccupiedEntry<'_, K, V, A> { +impl Debug for RustcOccupiedEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) @@ -123,20 +123,20 @@ /// [`RustcEntry`]: enum.RustcEntry.html pub struct RustcVacantEntry<'a, K, V, A = Global> where - A: Allocator, + A: Allocator + Clone, { hash: u64, key: K, table: &'a mut RawTable<(K, V), A>, } -impl Debug for RustcVacantEntry<'_, K, V, A> { +impl Debug for RustcVacantEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.key()).finish() } } -impl<'a, K, V, A: Allocator> RustcEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> { /// Sets the value of the entry, and returns a RustcOccupiedEntry. /// /// # Examples @@ -265,7 +265,7 @@ } } -impl<'a, K, V: Default, A: Allocator> RustcEntry<'a, K, V, A> { +impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -293,7 +293,7 @@ } } -impl<'a, K, V, A: Allocator> RustcOccupiedEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -330,7 +330,7 @@ /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.remove(self.elem).0 } + unsafe { self.table.remove(self.elem) } } /// Gets a reference to the value in the entry. @@ -518,7 +518,7 @@ } } -impl<'a, K, V, A: Allocator> RustcVacantEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator + Clone> RustcVacantEntry<'a, K, V, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `RustcVacantEntry`. /// diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/scopeguard.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/scopeguard.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/scopeguard.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/scopeguard.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,6 +1,6 @@ // Extracted from the scopeguard crate use core::{ - mem::ManuallyDrop, + mem, ops::{Deref, DerefMut}, ptr, }; @@ -28,13 +28,15 @@ #[inline] pub fn into_inner(guard: Self) -> T { // Cannot move out of Drop-implementing types, so - // ptr::read the value out of a ManuallyDrop - // Don't use mem::forget as that might invalidate value - let guard = ManuallyDrop::new(guard); + // ptr::read the value and forget the guard. unsafe { let value = ptr::read(&guard.value); - // read the closure so that it is dropped - let _ = ptr::read(&guard.dropfn); + // read the closure so that it is dropped, and assign it to a local + // variable to ensure that it is only dropped after the guard has + // been forgotten. (In case the Drop impl of the closure, or that + // of any consumed captured variable, panics). + let _dropfn = ptr::read(&guard.dropfn); + mem::forget(guard); value } } diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/set.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/set.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/set.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/set.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,14 +1,14 @@ -#[cfg(feature = "raw")] -use crate::raw::RawTable; -use crate::{Equivalent, TryReserveError}; +use crate::TryReserveError; use alloc::borrow::ToOwned; +use core::borrow::Borrow; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::iter::{Chain, FromIterator, FusedIterator}; +use core::mem; use core::ops::{BitAnd, BitOr, BitXor, Sub}; -use super::map::{self, DefaultHashBuilder, HashMap, Keys}; -use crate::raw::{Allocator, Global, RawExtractIf}; +use super::map::{self, ConsumeAllOnDrop, DefaultHashBuilder, DrainFilterInner, HashMap, Keys}; +use crate::raw::{Allocator, Global}; // Future Optimization (FIXME!) // ============================= @@ -112,7 +112,7 @@ /// [`HashMap`]: struct.HashMap.html /// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html /// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html -pub struct HashSet { +pub struct HashSet { pub(crate) map: HashMap, } @@ -135,18 +135,6 @@ /// The hash set is initially created with a capacity of 0, so it will not allocate until it /// is first inserted into. /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashSet`], for example with - /// [`with_hasher`](HashSet::with_hasher) method. - /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// /// # Examples /// /// ``` @@ -165,18 +153,6 @@ /// The hash set will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash set will not allocate. /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashSet`], for example with - /// [`with_capacity_and_hasher`](HashSet::with_capacity_and_hasher) method. - /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// /// # Examples /// /// ``` @@ -193,24 +169,12 @@ } #[cfg(feature = "ahash")] -impl HashSet { +impl HashSet { /// Creates an empty `HashSet`. /// /// The hash set is initially created with a capacity of 0, so it will not allocate until it /// is first inserted into. /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashSet`], for example with - /// [`with_hasher_in`](HashSet::with_hasher_in) method. - /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// /// # Examples /// /// ``` @@ -229,18 +193,6 @@ /// The hash set will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash set will not allocate. /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashSet`], for example with - /// [`with_capacity_and_hasher_in`](HashSet::with_capacity_and_hasher_in) method. - /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// /// # Examples /// /// ``` @@ -256,7 +208,7 @@ } } -impl HashSet { +impl HashSet { /// Returns the number of elements the set can hold without reallocating. /// /// # Examples @@ -379,11 +331,8 @@ /// In other words, move all elements `e` such that `f(&e)` returns `true` out /// into another iterator. /// - /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating - /// or the iteration short-circuits, then the remaining elements will be retained. - /// Use [`retain()`] with a negated predicate if you do not need the returned iterator. - /// - /// [`retain()`]: HashSet::retain + /// When the returned DrainedFilter is dropped, any remaining elements that satisfy + /// the predicate are dropped from the set. /// /// # Examples /// @@ -391,7 +340,7 @@ /// use hashbrown::HashSet; /// /// let mut set: HashSet = (0..8).collect(); - /// let drained: HashSet = set.extract_if(|v| v % 2 == 0).collect(); + /// let drained: HashSet = set.drain_filter(|v| v % 2 == 0).collect(); /// /// let mut evens = drained.into_iter().collect::>(); /// let mut odds = set.into_iter().collect::>(); @@ -402,13 +351,13 @@ /// assert_eq!(odds, vec![1, 3, 5, 7]); /// ``` #[cfg_attr(feature = "inline-more", inline)] - pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, T, F, A> + pub fn drain_filter(&mut self, f: F) -> DrainFilter<'_, T, F, A> where F: FnMut(&T) -> bool, { - ExtractIf { + DrainFilter { f, - inner: RawExtractIf { + inner: DrainFilterInner { iter: unsafe { self.map.table.iter() }, table: &mut self.map.table, }, @@ -437,23 +386,16 @@ /// Creates a new empty hash set which will use the given hasher to hash /// keys. /// - /// The hash set is initially created with a capacity of 0, so it will not - /// allocate until it is first inserted into. - /// - /// # HashDoS resistance + /// The hash set is also created with the default initial capacity. /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashSet`]. + /// Warning: `hasher` is normally randomly generated, and + /// is designed to allow `HashSet`s to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. /// /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the HashSet to be useful, see its documentation for details. + /// the HashMap to be useful, see its documentation for details. /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html /// /// # Examples /// @@ -465,6 +407,8 @@ /// let mut set = HashSet::with_hasher(s); /// set.insert(2); /// ``` + /// + /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html #[cfg_attr(feature = "inline-more", inline)] pub const fn with_hasher(hasher: S) -> Self { Self { @@ -478,20 +422,13 @@ /// The hash set will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash set will not allocate. /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashSet`]. + /// Warning: `hasher` is normally randomly generated, and + /// is designed to allow `HashSet`s to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. /// /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the HashSet to be useful, see its documentation for details. - /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// the HashMap to be useful, see its documentation for details. /// /// # Examples /// @@ -503,6 +440,8 @@ /// let mut set = HashSet::with_capacity_and_hasher(10, s); /// set.insert(1); /// ``` + /// + /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html #[cfg_attr(feature = "inline-more", inline)] pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self { Self { @@ -513,7 +452,7 @@ impl HashSet where - A: Allocator, + A: Allocator + Clone, { /// Returns a reference to the underlying allocator. #[inline] @@ -524,23 +463,12 @@ /// Creates a new empty hash set which will use the given hasher to hash /// keys. /// - /// The hash set is initially created with a capacity of 0, so it will not - /// allocate until it is first inserted into. - /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashSet`]. - /// - /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the HashSet to be useful, see its documentation for details. + /// The hash set is also created with the default initial capacity. /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// Warning: `hasher` is normally randomly generated, and + /// is designed to allow `HashSet`s to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. /// /// # Examples /// @@ -553,7 +481,7 @@ /// set.insert(2); /// ``` #[cfg_attr(feature = "inline-more", inline)] - pub const fn with_hasher_in(hasher: S, alloc: A) -> Self { + pub fn with_hasher_in(hasher: S, alloc: A) -> Self { Self { map: HashMap::with_hasher_in(hasher, alloc), } @@ -565,20 +493,10 @@ /// The hash set will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash set will not allocate. /// - /// # HashDoS resistance - /// - /// The `hash_builder` normally use a fixed key by default and that does - /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. - /// Users who require HashDoS resistance should explicitly use - /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] - /// as the hasher when creating a [`HashSet`]. - /// - /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the HashSet to be useful, see its documentation for details. - /// - /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack - /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// Warning: `hasher` is normally randomly generated, and + /// is designed to allow `HashSet`s to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. /// /// # Examples /// @@ -621,7 +539,7 @@ where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { /// Reserves capacity for at least `additional` more elements to be inserted /// in the `HashSet`. The collection may reserve more space to avoid @@ -629,12 +547,7 @@ /// /// # Panics /// - /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program - /// in case of allocation error. Use [`try_reserve`](HashSet::try_reserve) instead - /// if you want to handle memory allocation failure. - /// - /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html - /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html + /// Panics if the new allocation size overflows `usize`. /// /// # Examples /// @@ -860,7 +773,8 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn contains(&self, value: &Q) -> bool where - Q: Hash + Equivalent, + T: Borrow, + Q: Hash + Eq, { self.map.contains_key(value) } @@ -886,7 +800,8 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn get(&self, value: &Q) -> Option<&T> where - Q: Hash + Equivalent, + T: Borrow, + Q: Hash + Eq, { // Avoid `Option::map` because it bloats LLVM IR. match self.map.get_key_value(value) { @@ -941,7 +856,8 @@ #[inline] pub fn get_or_insert_owned(&mut self, value: &Q) -> &T where - Q: Hash + Equivalent + ToOwned, + T: Borrow, + Q: Hash + Eq + ToOwned, { // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`. @@ -973,7 +889,8 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn get_or_insert_with(&mut self, value: &Q, f: F) -> &T where - Q: Hash + Equivalent, + T: Borrow, + Q: Hash + Eq, F: FnOnce(&Q) -> T, { // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with @@ -1189,7 +1106,8 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn remove(&mut self, value: &Q) -> bool where - Q: Hash + Equivalent, + T: Borrow, + Q: Hash + Eq, { self.map.remove(value).is_some() } @@ -1215,7 +1133,8 @@ #[cfg_attr(feature = "inline-more", inline)] pub fn take(&mut self, value: &Q) -> Option where - Q: Hash + Equivalent, + T: Borrow, + Q: Hash + Eq, { // Avoid `Option::map` because it bloats LLVM IR. match self.map.remove_entry(value) { @@ -1225,53 +1144,11 @@ } } -impl HashSet { - /// Returns a reference to the [`RawTable`] used underneath [`HashSet`]. - /// This function is only available if the `raw` feature of the crate is enabled. - /// - /// # Note - /// - /// Calling this function is safe, but using the raw hash table API may require - /// unsafe functions or blocks. - /// - /// `RawTable` API gives the lowest level of control under the set that can be useful - /// for extending the HashSet's API, but may lead to *[undefined behavior]*. - /// - /// [`HashSet`]: struct.HashSet.html - /// [`RawTable`]: crate::raw::RawTable - /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[cfg(feature = "raw")] - #[cfg_attr(feature = "inline-more", inline)] - pub fn raw_table(&self) -> &RawTable<(T, ()), A> { - self.map.raw_table() - } - - /// Returns a mutable reference to the [`RawTable`] used underneath [`HashSet`]. - /// This function is only available if the `raw` feature of the crate is enabled. - /// - /// # Note - /// - /// Calling this function is safe, but using the raw hash table API may require - /// unsafe functions or blocks. - /// - /// `RawTable` API gives the lowest level of control under the set that can be useful - /// for extending the HashSet's API, but may lead to *[undefined behavior]*. - /// - /// [`HashSet`]: struct.HashSet.html - /// [`RawTable`]: crate::raw::RawTable - /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - #[cfg(feature = "raw")] - #[cfg_attr(feature = "inline-more", inline)] - pub fn raw_table_mut(&mut self) -> &mut RawTable<(T, ()), A> { - self.map.raw_table_mut() - } -} - impl PartialEq for HashSet where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { fn eq(&self, other: &Self) -> bool { if self.len() != other.len() { @@ -1286,14 +1163,14 @@ where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { } impl fmt::Debug for HashSet where T: fmt::Debug, - A: Allocator, + A: Allocator + Clone, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_set().entries(self.iter()).finish() @@ -1302,7 +1179,7 @@ impl From> for HashSet where - A: Allocator, + A: Allocator + Clone, { fn from(map: HashMap) -> Self { Self { map } @@ -1313,7 +1190,7 @@ where T: Eq + Hash, S: BuildHasher + Default, - A: Default + Allocator, + A: Default + Allocator + Clone, { #[cfg_attr(feature = "inline-more", inline)] fn from_iter>(iter: I) -> Self { @@ -1328,7 +1205,7 @@ impl From<[T; N]> for HashSet where T: Eq + Hash, - A: Default + Allocator, + A: Default + Allocator + Clone, { /// # Examples /// @@ -1348,7 +1225,7 @@ where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { #[cfg_attr(feature = "inline-more", inline)] fn extend>(&mut self, iter: I) { @@ -1372,7 +1249,7 @@ where T: 'a + Eq + Hash + Copy, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { #[cfg_attr(feature = "inline-more", inline)] fn extend>(&mut self, iter: I) { @@ -1395,7 +1272,7 @@ impl Default for HashSet where S: Default, - A: Default + Allocator, + A: Default + Allocator + Clone, { /// Creates an empty `HashSet` with the `Default` value for the hasher. #[cfg_attr(feature = "inline-more", inline)] @@ -1410,7 +1287,7 @@ where T: Eq + Hash + Clone, S: BuildHasher + Default, - A: Allocator, + A: Allocator + Clone, { type Output = HashSet; @@ -1443,7 +1320,7 @@ where T: Eq + Hash + Clone, S: BuildHasher + Default, - A: Allocator, + A: Allocator + Clone, { type Output = HashSet; @@ -1554,7 +1431,7 @@ /// /// [`HashSet`]: struct.HashSet.html /// [`into_iter`]: struct.HashSet.html#method.into_iter -pub struct IntoIter { +pub struct IntoIter { iter: map::IntoIter, } @@ -1565,24 +1442,23 @@ /// /// [`HashSet`]: struct.HashSet.html /// [`drain`]: struct.HashSet.html#method.drain -pub struct Drain<'a, K, A: Allocator = Global> { +pub struct Drain<'a, K, A: Allocator + Clone = Global> { iter: map::Drain<'a, K, (), A>, } /// A draining iterator over entries of a `HashSet` which don't satisfy the predicate `f`. /// -/// This `struct` is created by the [`extract_if`] method on [`HashSet`]. See its +/// This `struct` is created by the [`drain_filter`] method on [`HashSet`]. See its /// documentation for more. /// -/// [`extract_if`]: struct.HashSet.html#method.extract_if +/// [`drain_filter`]: struct.HashSet.html#method.drain_filter /// [`HashSet`]: struct.HashSet.html -#[must_use = "Iterators are lazy unless consumed"] -pub struct ExtractIf<'a, K, F, A: Allocator = Global> +pub struct DrainFilter<'a, K, F, A: Allocator + Clone = Global> where F: FnMut(&K) -> bool, { f: F, - inner: RawExtractIf<'a, (K, ()), A>, + inner: DrainFilterInner<'a, K, (), A>, } /// A lazy iterator producing elements in the intersection of `HashSet`s. @@ -1592,7 +1468,7 @@ /// /// [`HashSet`]: struct.HashSet.html /// [`intersection`]: struct.HashSet.html#method.intersection -pub struct Intersection<'a, T, S, A: Allocator = Global> { +pub struct Intersection<'a, T, S, A: Allocator + Clone = Global> { // iterator of the first set iter: Iter<'a, T>, // the second set @@ -1606,7 +1482,7 @@ /// /// [`HashSet`]: struct.HashSet.html /// [`difference`]: struct.HashSet.html#method.difference -pub struct Difference<'a, T, S, A: Allocator = Global> { +pub struct Difference<'a, T, S, A: Allocator + Clone = Global> { // iterator of the first set iter: Iter<'a, T>, // the second set @@ -1620,7 +1496,7 @@ /// /// [`HashSet`]: struct.HashSet.html /// [`symmetric_difference`]: struct.HashSet.html#method.symmetric_difference -pub struct SymmetricDifference<'a, T, S, A: Allocator = Global> { +pub struct SymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { iter: Chain, Difference<'a, T, S, A>>, } @@ -1631,11 +1507,11 @@ /// /// [`HashSet`]: struct.HashSet.html /// [`union`]: struct.HashSet.html#method.union -pub struct Union<'a, T, S, A: Allocator = Global> { +pub struct Union<'a, T, S, A: Allocator + Clone = Global> { iter: Chain, Difference<'a, T, S, A>>, } -impl<'a, T, S, A: Allocator> IntoIterator for &'a HashSet { +impl<'a, T, S, A: Allocator + Clone> IntoIterator for &'a HashSet { type Item = &'a T; type IntoIter = Iter<'a, T>; @@ -1645,7 +1521,7 @@ } } -impl IntoIterator for HashSet { +impl IntoIterator for HashSet { type Item = T; type IntoIter = IntoIter; @@ -1696,14 +1572,6 @@ fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, f) - } } impl<'a, K> ExactSizeIterator for Iter<'a, K> { #[cfg_attr(feature = "inline-more", inline)] @@ -1719,7 +1587,7 @@ } } -impl Iterator for IntoIter { +impl Iterator for IntoIter { type Item = K; #[cfg_attr(feature = "inline-more", inline)] @@ -1734,31 +1602,23 @@ fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, |acc, (k, ())| f(acc, k)) - } } -impl ExactSizeIterator for IntoIter { +impl ExactSizeIterator for IntoIter { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.iter.len() } } -impl FusedIterator for IntoIter {} +impl FusedIterator for IntoIter {} -impl fmt::Debug for IntoIter { +impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let entries_iter = self.iter.iter().map(|(k, _)| k); f.debug_list().entries(entries_iter).finish() } } -impl Iterator for Drain<'_, K, A> { +impl Iterator for Drain<'_, K, A> { type Item = K; #[cfg_attr(feature = "inline-more", inline)] @@ -1773,31 +1633,37 @@ fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, |acc, (k, ())| f(acc, k)) - } } -impl ExactSizeIterator for Drain<'_, K, A> { +impl ExactSizeIterator for Drain<'_, K, A> { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.iter.len() } } -impl FusedIterator for Drain<'_, K, A> {} +impl FusedIterator for Drain<'_, K, A> {} -impl fmt::Debug for Drain<'_, K, A> { +impl fmt::Debug for Drain<'_, K, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let entries_iter = self.iter.iter().map(|(k, _)| k); f.debug_list().entries(entries_iter).finish() } } -impl Iterator for ExtractIf<'_, K, F, A> +impl<'a, K, F, A: Allocator + Clone> Drop for DrainFilter<'a, K, F, A> +where + F: FnMut(&K) -> bool, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + while let Some(item) = self.next() { + let guard = ConsumeAllOnDrop(self); + drop(item); + mem::forget(guard); + } + } +} + +impl Iterator for DrainFilter<'_, K, F, A> where F: FnMut(&K) -> bool, { @@ -1805,9 +1671,9 @@ #[cfg_attr(feature = "inline-more", inline)] fn next(&mut self) -> Option { - self.inner - .next(|&mut (ref k, ())| (self.f)(k)) - .map(|(k, ())| k) + let f = &mut self.f; + let (k, _) = self.inner.next(&mut |k, _| f(k))?; + Some(k) } #[inline] @@ -1816,9 +1682,12 @@ } } -impl FusedIterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool {} +impl FusedIterator for DrainFilter<'_, K, F, A> where + F: FnMut(&K) -> bool +{ +} -impl Clone for Intersection<'_, T, S, A> { +impl Clone for Intersection<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Intersection { @@ -1832,7 +1701,7 @@ where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { type Item = &'a T; @@ -1851,27 +1720,13 @@ let (_, upper) = self.iter.size_hint(); (0, upper) } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, |acc, elt| { - if self.other.contains(elt) { - f(acc, elt) - } else { - acc - } - }) - } } impl fmt::Debug for Intersection<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() @@ -1882,11 +1737,11 @@ where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { } -impl Clone for Difference<'_, T, S, A> { +impl Clone for Difference<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Difference { @@ -1900,7 +1755,7 @@ where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { type Item = &'a T; @@ -1919,27 +1774,13 @@ let (_, upper) = self.iter.size_hint(); (0, upper) } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, |acc, elt| { - if self.other.contains(elt) { - acc - } else { - f(acc, elt) - } - }) - } } impl FusedIterator for Difference<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { } @@ -1947,14 +1788,14 @@ where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } -impl Clone for SymmetricDifference<'_, T, S, A> { +impl Clone for SymmetricDifference<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { SymmetricDifference { @@ -1967,7 +1808,7 @@ where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { type Item = &'a T; @@ -1979,21 +1820,13 @@ fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, f) - } } impl FusedIterator for SymmetricDifference<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { } @@ -2001,14 +1834,14 @@ where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } -impl Clone for Union<'_, T, S, A> { +impl Clone for Union<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Union { @@ -2021,7 +1854,7 @@ where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { } @@ -2029,7 +1862,7 @@ where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() @@ -2040,7 +1873,7 @@ where T: Eq + Hash, S: BuildHasher, - A: Allocator, + A: Allocator + Clone, { type Item = &'a T; @@ -2052,14 +1885,6 @@ fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } - #[cfg_attr(feature = "inline-more", inline)] - fn fold(self, init: B, f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, f) - } } /// A view into a single entry in a set, which may either be vacant or occupied. @@ -2100,7 +1925,7 @@ /// ``` pub enum Entry<'a, T, S, A = Global> where - A: Allocator, + A: Allocator + Clone, { /// An occupied entry. /// @@ -2133,7 +1958,7 @@ Vacant(VacantEntry<'a, T, S, A>), } -impl fmt::Debug for Entry<'_, T, S, A> { +impl fmt::Debug for Entry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -2178,11 +2003,11 @@ /// assert_eq!(set.get(&"c"), None); /// assert_eq!(set.len(), 2); /// ``` -pub struct OccupiedEntry<'a, T, S, A: Allocator = Global> { +pub struct OccupiedEntry<'a, T, S, A: Allocator + Clone = Global> { inner: map::OccupiedEntry<'a, T, (), S, A>, } -impl fmt::Debug for OccupiedEntry<'_, T, S, A> { +impl fmt::Debug for OccupiedEntry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("value", self.get()) @@ -2216,17 +2041,17 @@ /// } /// assert!(set.contains("b") && set.len() == 2); /// ``` -pub struct VacantEntry<'a, T, S, A: Allocator = Global> { +pub struct VacantEntry<'a, T, S, A: Allocator + Clone = Global> { inner: map::VacantEntry<'a, T, (), S, A>, } -impl fmt::Debug for VacantEntry<'_, T, S, A> { +impl fmt::Debug for VacantEntry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.get()).finish() } } -impl<'a, T, S, A: Allocator> Entry<'a, T, S, A> { +impl<'a, T, S, A: Allocator + Clone> Entry<'a, T, S, A> { /// Sets the value of the entry, and returns an OccupiedEntry. /// /// # Examples @@ -2303,7 +2128,7 @@ } } -impl OccupiedEntry<'_, T, S, A> { +impl OccupiedEntry<'_, T, S, A> { /// Gets a reference to the value in the entry. /// /// # Examples @@ -2390,7 +2215,7 @@ } } -impl<'a, T, S, A: Allocator> VacantEntry<'a, T, S, A> { +impl<'a, T, S, A: Allocator + Clone> VacantEntry<'a, T, S, A> { /// Gets a reference to the value that would be used when inserting /// through the `VacantEntry`. /// @@ -2470,30 +2295,34 @@ fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> { v } - fn into_iter<'new, A: Allocator>(v: IntoIter<&'static str, A>) -> IntoIter<&'new str, A> { + fn into_iter<'new, A: Allocator + Clone>( + v: IntoIter<&'static str, A>, + ) -> IntoIter<&'new str, A> { v } - fn difference<'a, 'new, A: Allocator>( + fn difference<'a, 'new, A: Allocator + Clone>( v: Difference<'a, &'static str, DefaultHashBuilder, A>, ) -> Difference<'a, &'new str, DefaultHashBuilder, A> { v } - fn symmetric_difference<'a, 'new, A: Allocator>( + fn symmetric_difference<'a, 'new, A: Allocator + Clone>( v: SymmetricDifference<'a, &'static str, DefaultHashBuilder, A>, ) -> SymmetricDifference<'a, &'new str, DefaultHashBuilder, A> { v } - fn intersection<'a, 'new, A: Allocator>( + fn intersection<'a, 'new, A: Allocator + Clone>( v: Intersection<'a, &'static str, DefaultHashBuilder, A>, ) -> Intersection<'a, &'new str, DefaultHashBuilder, A> { v } - fn union<'a, 'new, A: Allocator>( + fn union<'a, 'new, A: Allocator + Clone>( v: Union<'a, &'static str, DefaultHashBuilder, A>, ) -> Union<'a, &'new str, DefaultHashBuilder, A> { v } - fn drain<'new, A: Allocator>(d: Drain<'static, &'static str, A>) -> Drain<'new, &'new str, A> { + fn drain<'new, A: Allocator + Clone>( + d: Drain<'static, &'static str, A>, + ) -> Drain<'new, &'new str, A> { d } } @@ -2784,10 +2613,10 @@ set.insert(1); set.insert(2); - let set_str = format!("{set:?}"); + let set_str = format!("{:?}", set); assert!(set_str == "{1, 2}" || set_str == "{2, 1}"); - assert_eq!(format!("{empty:?}"), "{}"); + assert_eq!(format!("{:?}", empty), "{}"); } #[test] @@ -2862,12 +2691,11 @@ } #[test] - #[allow(clippy::needless_borrow)] fn test_extend_ref() { let mut a = HashSet::new(); a.insert(1); - a.extend([2, 3, 4]); + a.extend(&[2, 3, 4]); assert_eq!(a.len(), 4); assert!(a.contains(&1)); @@ -2902,10 +2730,10 @@ } #[test] - fn test_extract_if() { + fn test_drain_filter() { { let mut set: HashSet = (0..8).collect(); - let drained = set.extract_if(|&k| k % 2 == 0); + let drained = set.drain_filter(|&k| k % 2 == 0); let mut out = drained.collect::>(); out.sort_unstable(); assert_eq!(vec![0, 2, 4, 6], out); @@ -2913,7 +2741,7 @@ } { let mut set: HashSet = (0..8).collect(); - set.extract_if(|&k| k % 2 == 0).for_each(drop); + drop(set.drain_filter(|&k| k % 2 == 0)); assert_eq!(set.len(), 4, "Removes non-matching items on drop"); } } @@ -2959,11 +2787,4 @@ set.insert(i); } } - - #[test] - fn collect() { - // At the time of writing, this hits the ZST case in from_base_index - // (and without the `map`, it does not). - let mut _set: HashSet<_> = (0..3).map(|_| ()).collect(); - } } diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown/src/table.rs s390-tools-2.33.1/rust-vendor/hashbrown/src/table.rs --- s390-tools-2.31.0/rust-vendor/hashbrown/src/table.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown/src/table.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2064 +0,0 @@ -use core::{fmt, iter::FusedIterator, marker::PhantomData}; - -use crate::{ - raw::{ - Allocator, Bucket, Global, InsertSlot, RawDrain, RawExtractIf, RawIntoIter, RawIter, - RawTable, - }, - TryReserveError, -}; - -/// Low-level hash table with explicit hashing. -/// -/// The primary use case for this type over [`HashMap`] or [`HashSet`] is to -/// support types that do not implement the [`Hash`] and [`Eq`] traits, but -/// instead require additional data not contained in the key itself to compute a -/// hash and compare two elements for equality. -/// -/// Examples of when this can be useful include: -/// - An `IndexMap` implementation where indices into a `Vec` are stored as -/// elements in a `HashTable`. Hashing and comparing the elements -/// requires indexing the associated `Vec` to get the actual value referred to -/// by the index. -/// - Avoiding re-computing a hash when it is already known. -/// - Mutating the key of an element in a way that doesn't affect its hash. -/// -/// To achieve this, `HashTable` methods that search for an element in the table -/// require a hash value and equality function to be explicitly passed in as -/// arguments. The method will then iterate over the elements with the given -/// hash and call the equality function on each of them, until a match is found. -/// -/// In most cases, a `HashTable` will not be exposed directly in an API. It will -/// instead be wrapped in a helper type which handles the work of calculating -/// hash values and comparing elements. -/// -/// Due to its low-level nature, this type provides fewer guarantees than -/// [`HashMap`] and [`HashSet`]. Specifically, the API allows you to shoot -/// yourself in the foot by having multiple elements with identical keys in the -/// table. The table itself will still function correctly and lookups will -/// arbitrarily return one of the matching elements. However you should avoid -/// doing this because it changes the runtime of hash table operations from -/// `O(1)` to `O(k)` where `k` is the number of duplicate entries. -/// -/// [`HashMap`]: super::HashMap -/// [`HashSet`]: super::HashSet -pub struct HashTable -where - A: Allocator, -{ - pub(crate) raw: RawTable, -} - -impl HashTable { - /// Creates an empty `HashTable`. - /// - /// The hash table is initially created with a capacity of 0, so it will not allocate until it - /// is first inserted into. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashTable; - /// let mut table: HashTable<&str> = HashTable::new(); - /// assert_eq!(table.len(), 0); - /// assert_eq!(table.capacity(), 0); - /// ``` - pub const fn new() -> Self { - Self { - raw: RawTable::new(), - } - } - - /// Creates an empty `HashTable` with the specified capacity. - /// - /// The hash table will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the hash table will not allocate. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashTable; - /// let mut table: HashTable<&str> = HashTable::with_capacity(10); - /// assert_eq!(table.len(), 0); - /// assert!(table.capacity() >= 10); - /// ``` - pub fn with_capacity(capacity: usize) -> Self { - Self { - raw: RawTable::with_capacity(capacity), - } - } -} - -impl HashTable -where - A: Allocator, -{ - /// Creates an empty `HashTable` using the given allocator. - /// - /// The hash table is initially created with a capacity of 0, so it will not allocate until it - /// is first inserted into. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use bumpalo::Bump; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let bump = Bump::new(); - /// let mut table = HashTable::new_in(&bump); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// - /// // The created HashTable holds none elements - /// assert_eq!(table.len(), 0); - /// - /// // The created HashTable also doesn't allocate memory - /// assert_eq!(table.capacity(), 0); - /// - /// // Now we insert element inside created HashTable - /// table.insert_unique(hasher(&"One"), "One", hasher); - /// // We can see that the HashTable holds 1 element - /// assert_eq!(table.len(), 1); - /// // And it also allocates some capacity - /// assert!(table.capacity() > 1); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub const fn new_in(alloc: A) -> Self { - Self { - raw: RawTable::new_in(alloc), - } - } - - /// Creates an empty `HashTable` with the specified capacity using the given allocator. - /// - /// The hash table will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the hash table will not allocate. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use bumpalo::Bump; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let bump = Bump::new(); - /// let mut table = HashTable::with_capacity_in(5, &bump); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// - /// // The created HashTable holds none elements - /// assert_eq!(table.len(), 0); - /// // But it can hold at least 5 elements without reallocating - /// let empty_map_capacity = table.capacity(); - /// assert!(empty_map_capacity >= 5); - /// - /// // Now we insert some 5 elements inside created HashTable - /// table.insert_unique(hasher(&"One"), "One", hasher); - /// table.insert_unique(hasher(&"Two"), "Two", hasher); - /// table.insert_unique(hasher(&"Three"), "Three", hasher); - /// table.insert_unique(hasher(&"Four"), "Four", hasher); - /// table.insert_unique(hasher(&"Five"), "Five", hasher); - /// - /// // We can see that the HashTable holds 5 elements - /// assert_eq!(table.len(), 5); - /// // But its capacity isn't changed - /// assert_eq!(table.capacity(), empty_map_capacity) - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Self { - raw: RawTable::with_capacity_in(capacity, alloc), - } - } - - /// Returns a reference to the underlying allocator. - pub fn allocator(&self) -> &A { - self.raw.allocator() - } - - /// Returns a reference to an entry in the table with the given hash and - /// which satisfies the equality function passed. - /// - /// This method will call `eq` for all entries with the given hash, but may - /// also call it for entries with a different hash. `eq` should only return - /// true for the desired entry, at which point the search is stopped. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.insert_unique(hasher(&1), 1, hasher); - /// table.insert_unique(hasher(&2), 2, hasher); - /// table.insert_unique(hasher(&3), 3, hasher); - /// assert_eq!(table.find(hasher(&2), |&val| val == 2), Some(&2)); - /// assert_eq!(table.find(hasher(&4), |&val| val == 4), None); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn find(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { - self.raw.get(hash, eq) - } - - /// Returns a mutable reference to an entry in the table with the given hash - /// and which satisfies the equality function passed. - /// - /// This method will call `eq` for all entries with the given hash, but may - /// also call it for entries with a different hash. `eq` should only return - /// true for the desired entry, at which point the search is stopped. - /// - /// When mutating an entry, you should ensure that it still retains the same - /// hash value as when it was inserted, otherwise lookups of that entry may - /// fail to find it. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); - /// if let Some(val) = table.find_mut(hasher(&1), |val| val.0 == 1) { - /// val.1 = "b"; - /// } - /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), Some(&(1, "b"))); - /// assert_eq!(table.find(hasher(&2), |val| val.0 == 2), None); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn find_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { - self.raw.get_mut(hash, eq) - } - - /// Returns an `OccupiedEntry` for an entry in the table with the given hash - /// and which satisfies the equality function passed. - /// - /// This can be used to remove the entry from the table. Call - /// [`HashTable::entry`] instead if you wish to insert an entry if the - /// lookup fails. - /// - /// This method will call `eq` for all entries with the given hash, but may - /// also call it for entries with a different hash. `eq` should only return - /// true for the desired entry, at which point the search is stopped. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); - /// if let Ok(entry) = table.find_entry(hasher(&1), |val| val.0 == 1) { - /// entry.remove(); - /// } - /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), None); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn find_entry( - &mut self, - hash: u64, - eq: impl FnMut(&T) -> bool, - ) -> Result, AbsentEntry<'_, T, A>> { - match self.raw.find(hash, eq) { - Some(bucket) => Ok(OccupiedEntry { - hash, - bucket, - table: self, - }), - None => Err(AbsentEntry { table: self }), - } - } - - /// Returns an `Entry` for an entry in the table with the given hash - /// and which satisfies the equality function passed. - /// - /// This can be used to remove the entry from the table, or insert a new - /// entry with the given hash if one doesn't already exist. - /// - /// This method will call `eq` for all entries with the given hash, but may - /// also call it for entries with a different hash. `eq` should only return - /// true for the desired entry, at which point the search is stopped. - /// - /// This method may grow the table in preparation for an insertion. Call - /// [`HashTable::find_entry`] if this is undesirable. - /// - /// `hasher` is called if entries need to be moved or copied to a new table. - /// This must return the same hash value that each entry was inserted with. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::hash_table::Entry; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); - /// if let Entry::Occupied(entry) = table.entry(hasher(&1), |val| val.0 == 1, |val| hasher(&val.0)) - /// { - /// entry.remove(); - /// } - /// if let Entry::Vacant(entry) = table.entry(hasher(&2), |val| val.0 == 2, |val| hasher(&val.0)) { - /// entry.insert((2, "b")); - /// } - /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), None); - /// assert_eq!(table.find(hasher(&2), |val| val.0 == 2), Some(&(2, "b"))); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn entry( - &mut self, - hash: u64, - eq: impl FnMut(&T) -> bool, - hasher: impl Fn(&T) -> u64, - ) -> Entry<'_, T, A> { - match self.raw.find_or_find_insert_slot(hash, eq, hasher) { - Ok(bucket) => Entry::Occupied(OccupiedEntry { - hash, - bucket, - table: self, - }), - Err(insert_slot) => Entry::Vacant(VacantEntry { - hash, - insert_slot, - table: self, - }), - } - } - - /// Inserts an element into the `HashTable` with the given hash value, but - /// without checking whether an equivalent element already exists within the - /// table. - /// - /// `hasher` is called if entries need to be moved or copied to a new table. - /// This must return the same hash value that each entry was inserted with. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut v = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// v.insert_unique(hasher(&1), 1, hasher); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn insert_unique( - &mut self, - hash: u64, - value: T, - hasher: impl Fn(&T) -> u64, - ) -> OccupiedEntry<'_, T, A> { - let bucket = self.raw.insert(hash, value, hasher); - OccupiedEntry { - hash, - bucket, - table: self, - } - } - - /// Clears the table, removing all values. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut v = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// v.insert_unique(hasher(&1), 1, hasher); - /// v.clear(); - /// assert!(v.is_empty()); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn clear(&mut self) { - self.raw.clear(); - } - - /// Shrinks the capacity of the table as much as possible. It will drop - /// down as much as possible while maintaining the internal rules - /// and possibly leaving some space in accordance with the resize policy. - /// - /// `hasher` is called if entries need to be moved or copied to a new table. - /// This must return the same hash value that each entry was inserted with. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::with_capacity(100); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.insert_unique(hasher(&1), 1, hasher); - /// table.insert_unique(hasher(&2), 2, hasher); - /// assert!(table.capacity() >= 100); - /// table.shrink_to_fit(hasher); - /// assert!(table.capacity() >= 2); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn shrink_to_fit(&mut self, hasher: impl Fn(&T) -> u64) { - self.raw.shrink_to(self.len(), hasher) - } - - /// Shrinks the capacity of the table with a lower limit. It will drop - /// down no lower than the supplied limit while maintaining the internal rules - /// and possibly leaving some space in accordance with the resize policy. - /// - /// `hasher` is called if entries need to be moved or copied to a new table. - /// This must return the same hash value that each entry was inserted with. - /// - /// Panics if the current capacity is smaller than the supplied - /// minimum capacity. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::with_capacity(100); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.insert_unique(hasher(&1), 1, hasher); - /// table.insert_unique(hasher(&2), 2, hasher); - /// assert!(table.capacity() >= 100); - /// table.shrink_to(10, hasher); - /// assert!(table.capacity() >= 10); - /// table.shrink_to(0, hasher); - /// assert!(table.capacity() >= 2); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn shrink_to(&mut self, min_capacity: usize, hasher: impl Fn(&T) -> u64) { - self.raw.shrink_to(min_capacity, hasher); - } - - /// Reserves capacity for at least `additional` more elements to be inserted - /// in the `HashTable`. The collection may reserve more space to avoid - /// frequent reallocations. - /// - /// `hasher` is called if entries need to be moved or copied to a new table. - /// This must return the same hash value that each entry was inserted with. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program - /// in case of allocation error. Use [`try_reserve`](HashTable::try_reserve) instead - /// if you want to handle memory allocation failure. - /// - /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html - /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table: HashTable = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.reserve(10, hasher); - /// assert!(table.capacity() >= 10); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { - self.raw.reserve(additional, hasher) - } - - /// Tries to reserve capacity for at least `additional` more elements to be inserted - /// in the given `HashTable`. The collection may reserve more space to avoid - /// frequent reallocations. - /// - /// `hasher` is called if entries need to be moved or copied to a new table. - /// This must return the same hash value that each entry was inserted with. - /// - /// # Errors - /// - /// If the capacity overflows, or the allocator reports a failure, then an error - /// is returned. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table: HashTable = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table - /// .try_reserve(10, hasher) - /// .expect("why is the test harness OOMing on 10 bytes?"); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn try_reserve( - &mut self, - additional: usize, - hasher: impl Fn(&T) -> u64, - ) -> Result<(), TryReserveError> { - self.raw.try_reserve(additional, hasher) - } - - /// Returns the number of elements the table can hold without reallocating. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashTable; - /// let table: HashTable = HashTable::with_capacity(100); - /// assert!(table.capacity() >= 100); - /// ``` - pub fn capacity(&self) -> usize { - self.raw.capacity() - } - - /// Returns the number of elements in the table. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// let mut v = HashTable::new(); - /// assert_eq!(v.len(), 0); - /// v.insert_unique(hasher(&1), 1, hasher); - /// assert_eq!(v.len(), 1); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn len(&self) -> usize { - self.raw.len() - } - - /// Returns `true` if the set contains no elements. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// let mut v = HashTable::new(); - /// assert!(v.is_empty()); - /// v.insert_unique(hasher(&1), 1, hasher); - /// assert!(!v.is_empty()); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn is_empty(&self) -> bool { - self.raw.is_empty() - } - - /// An iterator visiting all elements in arbitrary order. - /// The iterator element type is `&'a T`. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.insert_unique(hasher(&"a"), "b", hasher); - /// table.insert_unique(hasher(&"b"), "b", hasher); - /// - /// // Will print in an arbitrary order. - /// for x in table.iter() { - /// println!("{}", x); - /// } - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn iter(&self) -> Iter<'_, T> { - Iter { - inner: unsafe { self.raw.iter() }, - marker: PhantomData, - } - } - - /// An iterator visiting all elements in arbitrary order, - /// with mutable references to the elements. - /// The iterator element type is `&'a mut T`. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.insert_unique(hasher(&1), 1, hasher); - /// table.insert_unique(hasher(&2), 2, hasher); - /// table.insert_unique(hasher(&3), 3, hasher); - /// - /// // Update all values - /// for val in table.iter_mut() { - /// *val *= 2; - /// } - /// - /// assert_eq!(table.len(), 3); - /// let mut vec: Vec = Vec::new(); - /// - /// for val in &table { - /// println!("val: {}", val); - /// vec.push(*val); - /// } - /// - /// // The `Iter` iterator produces items in arbitrary order, so the - /// // items must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, [2, 4, 6]); - /// - /// assert_eq!(table.len(), 3); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn iter_mut(&mut self) -> IterMut<'_, T> { - IterMut { - inner: unsafe { self.raw.iter() }, - marker: PhantomData, - } - } - - /// Retains only the elements specified by the predicate. - /// - /// In other words, remove all elements `e` such that `f(&e)` returns `false`. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// for x in 1..=6 { - /// table.insert_unique(hasher(&x), x, hasher); - /// } - /// table.retain(|&mut x| x % 2 == 0); - /// assert_eq!(table.len(), 3); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn retain(&mut self, mut f: impl FnMut(&mut T) -> bool) { - // Here we only use `iter` as a temporary, preventing use-after-free - unsafe { - for item in self.raw.iter() { - if !f(item.as_mut()) { - self.raw.erase(item); - } - } - } - } - - /// Clears the set, returning all elements in an iterator. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// for x in 1..=3 { - /// table.insert_unique(hasher(&x), x, hasher); - /// } - /// assert!(!table.is_empty()); - /// - /// // print 1, 2, 3 in an arbitrary order - /// for i in table.drain() { - /// println!("{}", i); - /// } - /// - /// assert!(table.is_empty()); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn drain(&mut self) -> Drain<'_, T, A> { - Drain { - inner: self.raw.drain(), - } - } - - /// Drains elements which are true under the given predicate, - /// and returns an iterator over the removed items. - /// - /// In other words, move all elements `e` such that `f(&e)` returns `true` out - /// into another iterator. - /// - /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating - /// or the iteration short-circuits, then the remaining elements will be retained. - /// Use [`retain()`] with a negated predicate if you do not need the returned iterator. - /// - /// [`retain()`]: HashTable::retain - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// for x in 0..8 { - /// table.insert_unique(hasher(&x), x, hasher); - /// } - /// let drained: Vec = table.extract_if(|&mut v| v % 2 == 0).collect(); - /// - /// let mut evens = drained.into_iter().collect::>(); - /// let mut odds = table.into_iter().collect::>(); - /// evens.sort(); - /// odds.sort(); - /// - /// assert_eq!(evens, vec![0, 2, 4, 6]); - /// assert_eq!(odds, vec![1, 3, 5, 7]); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, T, F, A> - where - F: FnMut(&mut T) -> bool, - { - ExtractIf { - f, - inner: RawExtractIf { - iter: unsafe { self.raw.iter() }, - table: &mut self.raw, - }, - } - } - - /// Attempts to get mutable references to `N` values in the map at once. - /// - /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to - /// the `i`th key to be looked up. - /// - /// Returns an array of length `N` with the results of each query. For soundness, at most one - /// mutable reference will be returned to any value. `None` will be returned if any of the - /// keys are duplicates or missing. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::hash_table::Entry; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut libraries: HashTable<(&str, u32)> = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// for (k, v) in [ - /// ("Bodleian Library", 1602), - /// ("Athenæum", 1807), - /// ("Herzogin-Anna-Amalia-Bibliothek", 1691), - /// ("Library of Congress", 1800), - /// ] { - /// libraries.insert_unique(hasher(&k), (k, v), |(k, _)| hasher(&k)); - /// } - /// - /// let keys = ["Athenæum", "Library of Congress"]; - /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); - /// assert_eq!( - /// got, - /// Some([&mut ("Athenæum", 1807), &mut ("Library of Congress", 1800),]), - /// ); - /// - /// // Missing keys result in None - /// let keys = ["Athenæum", "New York Public Library"]; - /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); - /// assert_eq!(got, None); - /// - /// // Duplicate keys result in None - /// let keys = ["Athenæum", "Athenæum"]; - /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); - /// assert_eq!(got, None); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn get_many_mut( - &mut self, - hashes: [u64; N], - eq: impl FnMut(usize, &T) -> bool, - ) -> Option<[&'_ mut T; N]> { - self.raw.get_many_mut(hashes, eq) - } - - /// Attempts to get mutable references to `N` values in the map at once, without validating that - /// the values are unique. - /// - /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to - /// the `i`th key to be looked up. - /// - /// Returns an array of length `N` with the results of each query. `None` will be returned if - /// any of the keys are missing. - /// - /// For a safe alternative see [`get_many_mut`](`HashTable::get_many_mut`). - /// - /// # Safety - /// - /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting - /// references are not used. - /// - /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::hash_table::Entry; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut libraries: HashTable<(&str, u32)> = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// for (k, v) in [ - /// ("Bodleian Library", 1602), - /// ("Athenæum", 1807), - /// ("Herzogin-Anna-Amalia-Bibliothek", 1691), - /// ("Library of Congress", 1800), - /// ] { - /// libraries.insert_unique(hasher(&k), (k, v), |(k, _)| hasher(&k)); - /// } - /// - /// let keys = ["Athenæum", "Library of Congress"]; - /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); - /// assert_eq!( - /// got, - /// Some([&mut ("Athenæum", 1807), &mut ("Library of Congress", 1800),]), - /// ); - /// - /// // Missing keys result in None - /// let keys = ["Athenæum", "New York Public Library"]; - /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); - /// assert_eq!(got, None); - /// - /// // Duplicate keys result in None - /// let keys = ["Athenæum", "Athenæum"]; - /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); - /// assert_eq!(got, None); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub unsafe fn get_many_unchecked_mut( - &mut self, - hashes: [u64; N], - eq: impl FnMut(usize, &T) -> bool, - ) -> Option<[&'_ mut T; N]> { - self.raw.get_many_unchecked_mut(hashes, eq) - } -} - -impl IntoIterator for HashTable -where - A: Allocator, -{ - type Item = T; - type IntoIter = IntoIter; - - fn into_iter(self) -> IntoIter { - IntoIter { - inner: self.raw.into_iter(), - } - } -} - -impl<'a, T, A> IntoIterator for &'a HashTable -where - A: Allocator, -{ - type Item = &'a T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Iter<'a, T> { - self.iter() - } -} - -impl<'a, T, A> IntoIterator for &'a mut HashTable -where - A: Allocator, -{ - type Item = &'a mut T; - type IntoIter = IterMut<'a, T>; - - fn into_iter(self) -> IterMut<'a, T> { - self.iter_mut() - } -} - -impl Default for HashTable -where - A: Allocator + Default, -{ - fn default() -> Self { - Self { - raw: Default::default(), - } - } -} - -impl Clone for HashTable -where - T: Clone, - A: Allocator + Clone, -{ - fn clone(&self) -> Self { - Self { - raw: self.raw.clone(), - } - } -} - -impl fmt::Debug for HashTable -where - T: fmt::Debug, - A: Allocator, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_set().entries(self.iter()).finish() - } -} - -/// A view into a single entry in a table, which may either be vacant or occupied. -/// -/// This `enum` is constructed from the [`entry`] method on [`HashTable`]. -/// -/// [`HashTable`]: struct.HashTable.html -/// [`entry`]: struct.HashTable.html#method.entry -/// -/// # Examples -/// -/// ``` -/// # #[cfg(feature = "nightly")] -/// # fn test() { -/// use ahash::AHasher; -/// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; -/// use std::hash::{BuildHasher, BuildHasherDefault}; -/// -/// let mut table = HashTable::new(); -/// let hasher = BuildHasherDefault::::default(); -/// let hasher = |val: &_| hasher.hash_one(val); -/// for x in ["a", "b", "c"] { -/// table.insert_unique(hasher(&x), x, hasher); -/// } -/// assert_eq!(table.len(), 3); -/// -/// // Existing value (insert) -/// let entry: Entry<_> = table.entry(hasher(&"a"), |&x| x == "a", hasher); -/// let _raw_o: OccupiedEntry<_, _> = entry.insert("a"); -/// assert_eq!(table.len(), 3); -/// // Nonexistent value (insert) -/// table.entry(hasher(&"d"), |&x| x == "d", hasher).insert("d"); -/// -/// // Existing value (or_insert) -/// table -/// .entry(hasher(&"b"), |&x| x == "b", hasher) -/// .or_insert("b"); -/// // Nonexistent value (or_insert) -/// table -/// .entry(hasher(&"e"), |&x| x == "e", hasher) -/// .or_insert("e"); -/// -/// println!("Our HashTable: {:?}", table); -/// -/// let mut vec: Vec<_> = table.iter().copied().collect(); -/// // The `Iter` iterator produces items in arbitrary order, so the -/// // items must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, ["a", "b", "c", "d", "e"]); -/// # } -/// # fn main() { -/// # #[cfg(feature = "nightly")] -/// # test() -/// # } -/// ``` -pub enum Entry<'a, T, A = Global> -where - A: Allocator, -{ - /// An occupied entry. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// for x in ["a", "b"] { - /// table.insert_unique(hasher(&x), x, hasher); - /// } - /// - /// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { - /// Entry::Vacant(_) => unreachable!(), - /// Entry::Occupied(_) => {} - /// } - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - Occupied(OccupiedEntry<'a, T, A>), - - /// A vacant entry. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table = HashTable::<&str>::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// - /// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { - /// Entry::Vacant(_) => {} - /// Entry::Occupied(_) => unreachable!(), - /// } - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - Vacant(VacantEntry<'a, T, A>), -} - -impl fmt::Debug for Entry<'_, T, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), - Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), - } - } -} - -impl<'a, T, A> Entry<'a, T, A> -where - A: Allocator, -{ - /// Sets the value of the entry, replacing any existing value if there is - /// one, and returns an [`OccupiedEntry`]. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table: HashTable<&str> = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// - /// let entry = table - /// .entry(hasher(&"horseyland"), |&x| x == "horseyland", hasher) - /// .insert("horseyland"); - /// - /// assert_eq!(entry.get(), &"horseyland"); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn insert(self, value: T) -> OccupiedEntry<'a, T, A> { - match self { - Entry::Occupied(mut entry) => { - *entry.get_mut() = value; - entry - } - Entry::Vacant(entry) => entry.insert(value), - } - } - - /// Ensures a value is in the entry by inserting if it was vacant. - /// - /// Returns an [`OccupiedEntry`] pointing to the now-occupied entry. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table: HashTable<&str> = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// - /// // nonexistent key - /// table - /// .entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) - /// .or_insert("poneyland"); - /// assert!(table - /// .find(hasher(&"poneyland"), |&x| x == "poneyland") - /// .is_some()); - /// - /// // existing key - /// table - /// .entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) - /// .or_insert("poneyland"); - /// assert!(table - /// .find(hasher(&"poneyland"), |&x| x == "poneyland") - /// .is_some()); - /// assert_eq!(table.len(), 1); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn or_insert(self, default: T) -> OccupiedEntry<'a, T, A> { - match self { - Entry::Occupied(entry) => entry, - Entry::Vacant(entry) => entry.insert(default), - } - } - - /// Ensures a value is in the entry by inserting the result of the default function if empty.. - /// - /// Returns an [`OccupiedEntry`] pointing to the now-occupied entry. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table: HashTable = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// - /// table - /// .entry(hasher("poneyland"), |x| x == "poneyland", |val| hasher(val)) - /// .or_insert_with(|| "poneyland".to_string()); - /// - /// assert!(table - /// .find(hasher(&"poneyland"), |x| x == "poneyland") - /// .is_some()); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn or_insert_with(self, default: impl FnOnce() -> T) -> OccupiedEntry<'a, T, A> { - match self { - Entry::Occupied(entry) => entry, - Entry::Vacant(entry) => entry.insert(default()), - } - } - - /// Provides in-place mutable access to an occupied entry before any - /// potential inserts into the table. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table: HashTable<(&str, u32)> = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// - /// table - /// .entry( - /// hasher(&"poneyland"), - /// |&(x, _)| x == "poneyland", - /// |(k, _)| hasher(&k), - /// ) - /// .and_modify(|(_, v)| *v += 1) - /// .or_insert(("poneyland", 42)); - /// assert_eq!( - /// table.find(hasher(&"poneyland"), |&(k, _)| k == "poneyland"), - /// Some(&("poneyland", 42)) - /// ); - /// - /// table - /// .entry( - /// hasher(&"poneyland"), - /// |&(x, _)| x == "poneyland", - /// |(k, _)| hasher(&k), - /// ) - /// .and_modify(|(_, v)| *v += 1) - /// .or_insert(("poneyland", 42)); - /// assert_eq!( - /// table.find(hasher(&"poneyland"), |&(k, _)| k == "poneyland"), - /// Some(&("poneyland", 43)) - /// ); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn and_modify(self, f: impl FnOnce(&mut T)) -> Self { - match self { - Entry::Occupied(mut entry) => { - f(entry.get_mut()); - Entry::Occupied(entry) - } - Entry::Vacant(entry) => Entry::Vacant(entry), - } - } -} - -/// A view into an occupied entry in a `HashTable`. -/// It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -/// -/// # Examples -/// -/// ``` -/// # #[cfg(feature = "nightly")] -/// # fn test() { -/// use ahash::AHasher; -/// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; -/// use std::hash::{BuildHasher, BuildHasherDefault}; -/// -/// let mut table = HashTable::new(); -/// let hasher = BuildHasherDefault::::default(); -/// let hasher = |val: &_| hasher.hash_one(val); -/// for x in ["a", "b", "c"] { -/// table.insert_unique(hasher(&x), x, hasher); -/// } -/// assert_eq!(table.len(), 3); -/// -/// let _entry_o: OccupiedEntry<_, _> = table.find_entry(hasher(&"a"), |&x| x == "a").unwrap(); -/// assert_eq!(table.len(), 3); -/// -/// // Existing key -/// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { -/// Entry::Vacant(_) => unreachable!(), -/// Entry::Occupied(view) => { -/// assert_eq!(view.get(), &"a"); -/// } -/// } -/// -/// assert_eq!(table.len(), 3); -/// -/// // Existing key (take) -/// match table.entry(hasher(&"c"), |&x| x == "c", hasher) { -/// Entry::Vacant(_) => unreachable!(), -/// Entry::Occupied(view) => { -/// assert_eq!(view.remove().0, "c"); -/// } -/// } -/// assert_eq!(table.find(hasher(&"c"), |&x| x == "c"), None); -/// assert_eq!(table.len(), 2); -/// # } -/// # fn main() { -/// # #[cfg(feature = "nightly")] -/// # test() -/// # } -/// ``` -pub struct OccupiedEntry<'a, T, A = Global> -where - A: Allocator, -{ - hash: u64, - bucket: Bucket, - table: &'a mut HashTable, -} - -unsafe impl Send for OccupiedEntry<'_, T, A> -where - T: Send, - A: Send + Allocator, -{ -} -unsafe impl Sync for OccupiedEntry<'_, T, A> -where - T: Sync, - A: Sync + Allocator, -{ -} - -impl fmt::Debug for OccupiedEntry<'_, T, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("OccupiedEntry") - .field("value", self.get()) - .finish() - } -} - -impl<'a, T, A> OccupiedEntry<'a, T, A> -where - A: Allocator, -{ - /// Takes the value out of the entry, and returns it along with a - /// `VacantEntry` that can be used to insert another value with the same - /// hash as the one that was just removed. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::hash_table::Entry; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table: HashTable<&str> = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// // The table is empty - /// assert!(table.is_empty() && table.capacity() == 0); - /// - /// table.insert_unique(hasher(&"poneyland"), "poneyland", hasher); - /// let capacity_before_remove = table.capacity(); - /// - /// if let Entry::Occupied(o) = table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { - /// assert_eq!(o.remove().0, "poneyland"); - /// } - /// - /// assert!(table - /// .find(hasher(&"poneyland"), |&x| x == "poneyland") - /// .is_none()); - /// // Now table hold none elements but capacity is equal to the old one - /// assert!(table.len() == 0 && table.capacity() == capacity_before_remove); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn remove(self) -> (T, VacantEntry<'a, T, A>) { - let (val, slot) = unsafe { self.table.raw.remove(self.bucket) }; - ( - val, - VacantEntry { - hash: self.hash, - insert_slot: slot, - table: self.table, - }, - ) - } - - /// Gets a reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::hash_table::Entry; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table: HashTable<&str> = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.insert_unique(hasher(&"poneyland"), "poneyland", hasher); - /// - /// match table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { - /// Entry::Vacant(_) => panic!(), - /// Entry::Occupied(entry) => assert_eq!(entry.get(), &"poneyland"), - /// } - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn get(&self) -> &T { - unsafe { self.bucket.as_ref() } - } - - /// Gets a mutable reference to the value in the entry. - /// - /// If you need a reference to the `OccupiedEntry` which may outlive the - /// destruction of the `Entry` value, see [`into_mut`]. - /// - /// [`into_mut`]: #method.into_mut - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::hash_table::Entry; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table: HashTable<(&str, u32)> = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.insert_unique(hasher(&"poneyland"), ("poneyland", 12), |(k, _)| hasher(&k)); - /// - /// assert_eq!( - /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), - /// Some(&("poneyland", 12)) - /// ); - /// - /// if let Entry::Occupied(mut o) = table.entry( - /// hasher(&"poneyland"), - /// |&(x, _)| x == "poneyland", - /// |(k, _)| hasher(&k), - /// ) { - /// o.get_mut().1 += 10; - /// assert_eq!(o.get().1, 22); - /// - /// // We can use the same Entry multiple times. - /// o.get_mut().1 += 2; - /// } - /// - /// assert_eq!( - /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), - /// Some(&("poneyland", 24)) - /// ); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn get_mut(&mut self) -> &mut T { - unsafe { self.bucket.as_mut() } - } - - /// Converts the OccupiedEntry into a mutable reference to the value in the entry - /// with a lifetime bound to the table itself. - /// - /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`]. - /// - /// [`get_mut`]: #method.get_mut - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::hash_table::Entry; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table: HashTable<(&str, u32)> = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// table.insert_unique(hasher(&"poneyland"), ("poneyland", 12), |(k, _)| hasher(&k)); - /// - /// assert_eq!( - /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), - /// Some(&("poneyland", 12)) - /// ); - /// - /// let value: &mut (&str, u32); - /// match table.entry( - /// hasher(&"poneyland"), - /// |&(x, _)| x == "poneyland", - /// |(k, _)| hasher(&k), - /// ) { - /// Entry::Occupied(entry) => value = entry.into_mut(), - /// Entry::Vacant(_) => panic!(), - /// } - /// value.1 += 10; - /// - /// assert_eq!( - /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), - /// Some(&("poneyland", 22)) - /// ); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn into_mut(self) -> &'a mut T { - unsafe { self.bucket.as_mut() } - } - - /// Converts the OccupiedEntry into a mutable reference to the underlying - /// table. - pub fn into_table(self) -> &'a mut HashTable { - self.table - } -} - -/// A view into a vacant entry in a `HashTable`. -/// It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -/// -/// # Examples -/// -/// ``` -/// # #[cfg(feature = "nightly")] -/// # fn test() { -/// use ahash::AHasher; -/// use hashbrown::hash_table::{Entry, HashTable, VacantEntry}; -/// use std::hash::{BuildHasher, BuildHasherDefault}; -/// -/// let mut table: HashTable<&str> = HashTable::new(); -/// let hasher = BuildHasherDefault::::default(); -/// let hasher = |val: &_| hasher.hash_one(val); -/// -/// let entry_v: VacantEntry<_, _> = match table.entry(hasher(&"a"), |&x| x == "a", hasher) { -/// Entry::Vacant(view) => view, -/// Entry::Occupied(_) => unreachable!(), -/// }; -/// entry_v.insert("a"); -/// assert!(table.find(hasher(&"a"), |&x| x == "a").is_some() && table.len() == 1); -/// -/// // Nonexistent key (insert) -/// match table.entry(hasher(&"b"), |&x| x == "b", hasher) { -/// Entry::Vacant(view) => { -/// view.insert("b"); -/// } -/// Entry::Occupied(_) => unreachable!(), -/// } -/// assert!(table.find(hasher(&"b"), |&x| x == "b").is_some() && table.len() == 2); -/// # } -/// # fn main() { -/// # #[cfg(feature = "nightly")] -/// # test() -/// # } -/// ``` -pub struct VacantEntry<'a, T, A = Global> -where - A: Allocator, -{ - hash: u64, - insert_slot: InsertSlot, - table: &'a mut HashTable, -} - -impl fmt::Debug for VacantEntry<'_, T, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("VacantEntry") - } -} - -impl<'a, T, A> VacantEntry<'a, T, A> -where - A: Allocator, -{ - /// Inserts a new element into the table with the hash that was used to - /// obtain the `VacantEntry`. - /// - /// An `OccupiedEntry` is returned for the newly inserted element. - /// - /// # Examples - /// - /// ``` - /// # #[cfg(feature = "nightly")] - /// # fn test() { - /// use ahash::AHasher; - /// use hashbrown::hash_table::Entry; - /// use hashbrown::HashTable; - /// use std::hash::{BuildHasher, BuildHasherDefault}; - /// - /// let mut table: HashTable<&str> = HashTable::new(); - /// let hasher = BuildHasherDefault::::default(); - /// let hasher = |val: &_| hasher.hash_one(val); - /// - /// if let Entry::Vacant(o) = table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { - /// o.insert("poneyland"); - /// } - /// assert_eq!( - /// table.find(hasher(&"poneyland"), |&x| x == "poneyland"), - /// Some(&"poneyland") - /// ); - /// # } - /// # fn main() { - /// # #[cfg(feature = "nightly")] - /// # test() - /// # } - /// ``` - pub fn insert(self, value: T) -> OccupiedEntry<'a, T, A> { - let bucket = unsafe { - self.table - .raw - .insert_in_slot(self.hash, self.insert_slot, value) - }; - OccupiedEntry { - hash: self.hash, - bucket, - table: self.table, - } - } - - /// Converts the VacantEntry into a mutable reference to the underlying - /// table. - pub fn into_table(self) -> &'a mut HashTable { - self.table - } -} - -/// Type representing the absence of an entry, as returned by [`HashTable::find_entry`]. -/// -/// This type only exists due to [limitations] in Rust's NLL borrow checker. In -/// the future, `find_entry` will return an `Option` and this -/// type will be removed. -/// -/// [limitations]: https://smallcultfollowing.com/babysteps/blog/2018/06/15/mir-based-borrow-check-nll-status-update/#polonius -/// -/// # Examples -/// -/// ``` -/// # #[cfg(feature = "nightly")] -/// # fn test() { -/// use ahash::AHasher; -/// use hashbrown::hash_table::{AbsentEntry, Entry, HashTable}; -/// use std::hash::{BuildHasher, BuildHasherDefault}; -/// -/// let mut table: HashTable<&str> = HashTable::new(); -/// let hasher = BuildHasherDefault::::default(); -/// let hasher = |val: &_| hasher.hash_one(val); -/// -/// let entry_v: AbsentEntry<_, _> = table.find_entry(hasher(&"a"), |&x| x == "a").unwrap_err(); -/// entry_v -/// .into_table() -/// .insert_unique(hasher(&"a"), "a", hasher); -/// assert!(table.find(hasher(&"a"), |&x| x == "a").is_some() && table.len() == 1); -/// -/// // Nonexistent key (insert) -/// match table.entry(hasher(&"b"), |&x| x == "b", hasher) { -/// Entry::Vacant(view) => { -/// view.insert("b"); -/// } -/// Entry::Occupied(_) => unreachable!(), -/// } -/// assert!(table.find(hasher(&"b"), |&x| x == "b").is_some() && table.len() == 2); -/// # } -/// # fn main() { -/// # #[cfg(feature = "nightly")] -/// # test() -/// # } -/// ``` -pub struct AbsentEntry<'a, T, A = Global> -where - A: Allocator, -{ - table: &'a mut HashTable, -} - -impl fmt::Debug for AbsentEntry<'_, T, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("AbsentEntry") - } -} - -impl<'a, T, A> AbsentEntry<'a, T, A> -where - A: Allocator, -{ - /// Converts the AbsentEntry into a mutable reference to the underlying - /// table. - pub fn into_table(self) -> &'a mut HashTable { - self.table - } -} - -/// An iterator over the entries of a `HashTable` in arbitrary order. -/// The iterator element type is `&'a T`. -/// -/// This `struct` is created by the [`iter`] method on [`HashTable`]. See its -/// documentation for more. -/// -/// [`iter`]: struct.HashTable.html#method.iter -/// [`HashTable`]: struct.HashTable.html -pub struct Iter<'a, T> { - inner: RawIter, - marker: PhantomData<&'a T>, -} - -impl<'a, T> Iterator for Iter<'a, T> { - type Item = &'a T; - - fn next(&mut self) -> Option { - // Avoid `Option::map` because it bloats LLVM IR. - match self.inner.next() { - Some(bucket) => Some(unsafe { bucket.as_ref() }), - None => None, - } - } - - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } - - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner - .fold(init, |acc, bucket| unsafe { f(acc, bucket.as_ref()) }) - } -} - -impl ExactSizeIterator for Iter<'_, T> { - fn len(&self) -> usize { - self.inner.len() - } -} - -impl FusedIterator for Iter<'_, T> {} - -/// A mutable iterator over the entries of a `HashTable` in arbitrary order. -/// The iterator element type is `&'a mut T`. -/// -/// This `struct` is created by the [`iter_mut`] method on [`HashTable`]. See its -/// documentation for more. -/// -/// [`iter_mut`]: struct.HashTable.html#method.iter_mut -/// [`HashTable`]: struct.HashTable.html -pub struct IterMut<'a, T> { - inner: RawIter, - marker: PhantomData<&'a mut T>, -} - -impl<'a, T> Iterator for IterMut<'a, T> { - type Item = &'a mut T; - - fn next(&mut self) -> Option { - // Avoid `Option::map` because it bloats LLVM IR. - match self.inner.next() { - Some(bucket) => Some(unsafe { bucket.as_mut() }), - None => None, - } - } - - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } - - fn fold(self, init: B, mut f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner - .fold(init, |acc, bucket| unsafe { f(acc, bucket.as_mut()) }) - } -} - -impl ExactSizeIterator for IterMut<'_, T> { - fn len(&self) -> usize { - self.inner.len() - } -} - -impl FusedIterator for IterMut<'_, T> {} - -/// An owning iterator over the entries of a `HashTable` in arbitrary order. -/// The iterator element type is `T`. -/// -/// This `struct` is created by the [`into_iter`] method on [`HashTable`] -/// (provided by the [`IntoIterator`] trait). See its documentation for more. -/// The table cannot be used after calling that method. -/// -/// [`into_iter`]: struct.HashTable.html#method.into_iter -/// [`HashTable`]: struct.HashTable.html -/// [`IntoIterator`]: https://doc.rust-lang.org/core/iter/trait.IntoIterator.html -pub struct IntoIter -where - A: Allocator, -{ - inner: RawIntoIter, -} - -impl Iterator for IntoIter -where - A: Allocator, -{ - type Item = T; - - fn next(&mut self) -> Option { - self.inner.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } - - fn fold(self, init: B, f: F) -> B - where - Self: Sized, - F: FnMut(B, Self::Item) -> B, - { - self.inner.fold(init, f) - } -} - -impl ExactSizeIterator for IntoIter -where - A: Allocator, -{ - fn len(&self) -> usize { - self.inner.len() - } -} - -impl FusedIterator for IntoIter where A: Allocator {} - -/// A draining iterator over the items of a `HashTable`. -/// -/// This `struct` is created by the [`drain`] method on [`HashTable`]. -/// See its documentation for more. -/// -/// [`HashTable`]: struct.HashTable.html -/// [`drain`]: struct.HashTable.html#method.drain -pub struct Drain<'a, T, A: Allocator = Global> { - inner: RawDrain<'a, T, A>, -} - -impl Drain<'_, T, A> { - /// Returns a iterator of references over the remaining items. - fn iter(&self) -> Iter<'_, T> { - Iter { - inner: self.inner.iter(), - marker: PhantomData, - } - } -} - -impl Iterator for Drain<'_, T, A> { - type Item = T; - - fn next(&mut self) -> Option { - self.inner.next() - } - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} -impl ExactSizeIterator for Drain<'_, T, A> { - fn len(&self) -> usize { - self.inner.len() - } -} -impl FusedIterator for Drain<'_, T, A> {} - -impl fmt::Debug for Drain<'_, T, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.iter()).finish() - } -} - -/// A draining iterator over entries of a `HashTable` which don't satisfy the predicate `f`. -/// -/// This `struct` is created by [`HashTable::extract_if`]. See its -/// documentation for more. -#[must_use = "Iterators are lazy unless consumed"] -pub struct ExtractIf<'a, T, F, A: Allocator = Global> -where - F: FnMut(&mut T) -> bool, -{ - f: F, - inner: RawExtractIf<'a, T, A>, -} - -impl Iterator for ExtractIf<'_, T, F, A> -where - F: FnMut(&mut T) -> bool, -{ - type Item = T; - - #[inline] - fn next(&mut self) -> Option { - self.inner.next(|val| (self.f)(val)) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - (0, self.inner.iter.size_hint().1) - } -} - -impl FusedIterator for ExtractIf<'_, T, F, A> where F: FnMut(&mut T) -> bool {} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/benches/bench.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/benches/bench.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/benches/bench.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/benches/bench.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,331 +0,0 @@ -// This benchmark suite contains some benchmarks along a set of dimensions: -// Hasher: std default (SipHash) and crate default (AHash). -// Int key distribution: low bit heavy, top bit heavy, and random. -// Task: basic functionality: insert, insert_erase, lookup, lookup_fail, iter -#![feature(test)] - -extern crate test; - -use test::{black_box, Bencher}; - -use hashbrown::hash_map::DefaultHashBuilder; -use hashbrown::{HashMap, HashSet}; -use std::{ - collections::hash_map::RandomState, - sync::atomic::{self, AtomicUsize}, -}; - -const SIZE: usize = 1000; - -// The default hashmap when using this crate directly. -type AHashMap = HashMap; -// This uses the hashmap from this crate with the default hasher of the stdlib. -type StdHashMap = HashMap; - -// A random key iterator. -#[derive(Clone, Copy)] -struct RandomKeys { - state: usize, -} - -impl RandomKeys { - fn new() -> Self { - RandomKeys { state: 0 } - } -} - -impl Iterator for RandomKeys { - type Item = usize; - fn next(&mut self) -> Option { - // Add 1 then multiply by some 32 bit prime. - self.state = self.state.wrapping_add(1).wrapping_mul(3_787_392_781); - Some(self.state) - } -} - -// Just an arbitrary side effect to make the maps not shortcircuit to the non-dropping path -// when dropping maps/entries (most real world usages likely have drop in the key or value) -lazy_static::lazy_static! { - static ref SIDE_EFFECT: AtomicUsize = AtomicUsize::new(0); -} - -#[derive(Clone)] -struct DropType(usize); -impl Drop for DropType { - fn drop(&mut self) { - SIDE_EFFECT.fetch_add(self.0, atomic::Ordering::SeqCst); - } -} - -macro_rules! bench_suite { - ($bench_macro:ident, $bench_ahash_serial:ident, $bench_std_serial:ident, - $bench_ahash_highbits:ident, $bench_std_highbits:ident, - $bench_ahash_random:ident, $bench_std_random:ident) => { - $bench_macro!($bench_ahash_serial, AHashMap, 0..); - $bench_macro!($bench_std_serial, StdHashMap, 0..); - $bench_macro!( - $bench_ahash_highbits, - AHashMap, - (0..).map(usize::swap_bytes) - ); - $bench_macro!( - $bench_std_highbits, - StdHashMap, - (0..).map(usize::swap_bytes) - ); - $bench_macro!($bench_ahash_random, AHashMap, RandomKeys::new()); - $bench_macro!($bench_std_random, StdHashMap, RandomKeys::new()); - }; -} - -macro_rules! bench_insert { - ($name:ident, $maptype:ident, $keydist:expr) => { - #[bench] - fn $name(b: &mut Bencher) { - let mut m = $maptype::with_capacity_and_hasher(SIZE, Default::default()); - b.iter(|| { - m.clear(); - for i in ($keydist).take(SIZE) { - m.insert(i, (DropType(i), [i; 20])); - } - black_box(&mut m); - }); - eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst)); - } - }; -} - -bench_suite!( - bench_insert, - insert_ahash_serial, - insert_std_serial, - insert_ahash_highbits, - insert_std_highbits, - insert_ahash_random, - insert_std_random -); - -macro_rules! bench_grow_insert { - ($name:ident, $maptype:ident, $keydist:expr) => { - #[bench] - fn $name(b: &mut Bencher) { - b.iter(|| { - let mut m = $maptype::default(); - for i in ($keydist).take(SIZE) { - m.insert(i, DropType(i)); - } - black_box(&mut m); - }) - } - }; -} - -bench_suite!( - bench_grow_insert, - grow_insert_ahash_serial, - grow_insert_std_serial, - grow_insert_ahash_highbits, - grow_insert_std_highbits, - grow_insert_ahash_random, - grow_insert_std_random -); - -macro_rules! bench_insert_erase { - ($name:ident, $maptype:ident, $keydist:expr) => { - #[bench] - fn $name(b: &mut Bencher) { - let mut base = $maptype::default(); - for i in ($keydist).take(SIZE) { - base.insert(i, DropType(i)); - } - let skip = $keydist.skip(SIZE); - b.iter(|| { - let mut m = base.clone(); - let mut add_iter = skip.clone(); - let mut remove_iter = $keydist; - // While keeping the size constant, - // replace the first keydist with the second. - for (add, remove) in (&mut add_iter).zip(&mut remove_iter).take(SIZE) { - m.insert(add, DropType(add)); - black_box(m.remove(&remove)); - } - black_box(m); - }); - eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst)); - } - }; -} - -bench_suite!( - bench_insert_erase, - insert_erase_ahash_serial, - insert_erase_std_serial, - insert_erase_ahash_highbits, - insert_erase_std_highbits, - insert_erase_ahash_random, - insert_erase_std_random -); - -macro_rules! bench_lookup { - ($name:ident, $maptype:ident, $keydist:expr) => { - #[bench] - fn $name(b: &mut Bencher) { - let mut m = $maptype::default(); - for i in $keydist.take(SIZE) { - m.insert(i, DropType(i)); - } - - b.iter(|| { - for i in $keydist.take(SIZE) { - black_box(m.get(&i)); - } - }); - eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst)); - } - }; -} - -bench_suite!( - bench_lookup, - lookup_ahash_serial, - lookup_std_serial, - lookup_ahash_highbits, - lookup_std_highbits, - lookup_ahash_random, - lookup_std_random -); - -macro_rules! bench_lookup_fail { - ($name:ident, $maptype:ident, $keydist:expr) => { - #[bench] - fn $name(b: &mut Bencher) { - let mut m = $maptype::default(); - let mut iter = $keydist; - for i in (&mut iter).take(SIZE) { - m.insert(i, DropType(i)); - } - - b.iter(|| { - for i in (&mut iter).take(SIZE) { - black_box(m.get(&i)); - } - }) - } - }; -} - -bench_suite!( - bench_lookup_fail, - lookup_fail_ahash_serial, - lookup_fail_std_serial, - lookup_fail_ahash_highbits, - lookup_fail_std_highbits, - lookup_fail_ahash_random, - lookup_fail_std_random -); - -macro_rules! bench_iter { - ($name:ident, $maptype:ident, $keydist:expr) => { - #[bench] - fn $name(b: &mut Bencher) { - let mut m = $maptype::default(); - for i in ($keydist).take(SIZE) { - m.insert(i, DropType(i)); - } - - b.iter(|| { - for i in &m { - black_box(i); - } - }) - } - }; -} - -bench_suite!( - bench_iter, - iter_ahash_serial, - iter_std_serial, - iter_ahash_highbits, - iter_std_highbits, - iter_ahash_random, - iter_std_random -); - -#[bench] -fn clone_small(b: &mut Bencher) { - let mut m = HashMap::new(); - for i in 0..10 { - m.insert(i, DropType(i)); - } - - b.iter(|| { - black_box(m.clone()); - }) -} - -#[bench] -fn clone_from_small(b: &mut Bencher) { - let mut m = HashMap::new(); - let mut m2 = HashMap::new(); - for i in 0..10 { - m.insert(i, DropType(i)); - } - - b.iter(|| { - m2.clone_from(&m); - black_box(&mut m2); - }) -} - -#[bench] -fn clone_large(b: &mut Bencher) { - let mut m = HashMap::new(); - for i in 0..1000 { - m.insert(i, DropType(i)); - } - - b.iter(|| { - black_box(m.clone()); - }) -} - -#[bench] -fn clone_from_large(b: &mut Bencher) { - let mut m = HashMap::new(); - let mut m2 = HashMap::new(); - for i in 0..1000 { - m.insert(i, DropType(i)); - } - - b.iter(|| { - m2.clone_from(&m); - black_box(&mut m2); - }) -} - -#[bench] -fn rehash_in_place(b: &mut Bencher) { - b.iter(|| { - let mut set = HashSet::new(); - - // Each loop triggers one rehash - for _ in 0..10 { - for i in 0..224 { - set.insert(i); - } - - assert_eq!( - set.capacity(), - 224, - "The set must be at or close to capacity to trigger a re hashing" - ); - - for i in 100..1400 { - set.remove(&(i - 100)); - set.insert(i); - } - set.clear(); - } - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/benches/insert_unique_unchecked.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/benches/insert_unique_unchecked.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/benches/insert_unique_unchecked.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/benches/insert_unique_unchecked.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,32 +0,0 @@ -//! Compare `insert` and `insert_unique_unchecked` operations performance. - -#![feature(test)] - -extern crate test; - -use hashbrown::HashMap; -use test::Bencher; - -#[bench] -fn insert(b: &mut Bencher) { - let keys: Vec = (0..1000).map(|i| format!("xxxx{}yyyy", i)).collect(); - b.iter(|| { - let mut m = HashMap::with_capacity(1000); - for k in &keys { - m.insert(k, k); - } - m - }); -} - -#[bench] -fn insert_unique_unchecked(b: &mut Bencher) { - let keys: Vec = (0..1000).map(|i| format!("xxxx{}yyyy", i)).collect(); - b.iter(|| { - let mut m = HashMap::with_capacity(1000); - for k in &keys { - m.insert_unique_unchecked(k, k); - } - m - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/Cargo.toml s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/Cargo.toml --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,113 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.56.0" -name = "hashbrown" -version = "0.12.3" -authors = ["Amanieu d'Antras "] -exclude = [ - ".github", - "/ci/*", -] -description = "A Rust port of Google's SwissTable hash map" -readme = "README.md" -keywords = [ - "hash", - "no_std", - "hashmap", - "swisstable", -] -categories = [ - "data-structures", - "no-std", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/hashbrown" -resolver = "2" - -[package.metadata.docs.rs] -features = [ - "nightly", - "rayon", - "serde", - "raw", -] - -[dependencies.ahash] -version = "0.7.0" -optional = true -default-features = false - -[dependencies.alloc] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-alloc" - -[dependencies.bumpalo] -version = "3.5.0" -optional = true - -[dependencies.compiler_builtins] -version = "0.1.2" -optional = true - -[dependencies.core] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-core" - -[dependencies.rayon] -version = "1.0" -optional = true - -[dependencies.serde] -version = "1.0.25" -optional = true -default-features = false - -[dev-dependencies.doc-comment] -version = "0.3.1" - -[dev-dependencies.fnv] -version = "1.0.7" - -[dev-dependencies.lazy_static] -version = "1.4" - -[dev-dependencies.rand] -version = "0.8.3" -features = ["small_rng"] - -[dev-dependencies.rayon] -version = "1.0" - -[dev-dependencies.serde_test] -version = "1.0" - -[features] -ahash-compile-time-rng = ["ahash/compile-time-rng"] -default = [ - "ahash", - "inline-more", -] -inline-more = [] -nightly = [] -raw = [] -rustc-dep-of-std = [ - "nightly", - "core", - "compiler_builtins", - "alloc", - "rustc-internal-api", -] -rustc-internal-api = [] diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/CHANGELOG.md s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,402 +0,0 @@ -# Change Log - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/) -and this project adheres to [Semantic Versioning](https://semver.org/). - -## [Unreleased] - -## [v0.12.3] - 2022-07-17 - -## Fixed - -- Fixed double-drop in `RawTable::clone_from`. (#348) - -## [v0.12.2] - 2022-07-09 - -## Added - -- Added `Entry` API for `HashSet`. (#342) -- Added `Extend<&'a (K, V)> for HashMap`. (#340) -- Added length-based short-circuiting for hash table iteration. (#338) -- Added a function to access the `RawTable` of a `HashMap`. (#335) - -## Changed - -- Edited `do_alloc` to reduce LLVM IR generated. (#341) - -## [v0.12.1] - 2022-05-02 - -## Fixed - -- Fixed underflow in `RawIterRange::size_hint`. (#325) -- Fixed the implementation of `Debug` for `ValuesMut` and `IntoValues`. (#325) - -## [v0.12.0] - 2022-01-17 - -## Added - -- Added `From<[T; N]>` and `From<[(K, V); N]>` for `HashSet` and `HashMap` respectively. (#297) -- Added an `allocator()` getter to HashMap and HashSet. (#257) -- Added `insert_unique_unchecked` to `HashMap` and `HashSet`. (#293) -- Added `into_keys` and `into_values` to HashMap. (#295) -- Implement `From` on `HashSet` and `HashMap`. (#298) -- Added `entry_ref` API to `HashMap`. (#201) - -## Changed - -- Bumped minimum Rust version to 1.56.1 and edition to 2021. -- Use u64 for the GroupWord on WebAssembly. (#271) -- Optimized `find`. (#279) -- Made rehashing and resizing less generic to reduce compilation time. (#282) -- Inlined small functions. (#283) -- Use `BuildHasher::hash_one` when `feature = "nightly"` is enabled. (#292) -- Relaxed the bounds on `Debug` for `HashSet`. (#296) -- Rename `get_each_mut` to `get_many_mut` and align API with the stdlib. (#291) -- Don't hash the key when searching in an empty table. (#305) - -## Fixed - -- Guard against allocations exceeding isize::MAX. (#268) -- Made `RawTable::insert_no_grow` unsafe. (#254) -- Inline `static_empty`. (#280) -- Fixed trait bounds on Send/Sync impls. (#303) - -## [v0.11.2] - 2021-03-25 - -## Fixed - -- Added missing allocator type parameter to `HashMap`'s and `HashSet`'s `Clone` impls. (#252) - -## [v0.11.1] - 2021-03-20 - -## Fixed - -- Added missing `pub` modifier to `BumpWrapper`. (#251) - -## [v0.11.0] - 2021-03-14 - -## Added -- Added safe `try_insert_no_grow` method to `RawTable`. (#229) -- Added support for `bumpalo` as an allocator without the `nightly` feature. (#231) -- Implemented `Default` for `RawTable`. (#237) -- Added new safe methods `RawTable::get_each_mut`, `HashMap::get_each_mut`, and - `HashMap::get_each_key_value_mut`. (#239) -- Added `From>` for `HashSet`. (#235) -- Added `try_insert` method to `HashMap`. (#247) - -## Changed -- The minimum Rust version has been bumped to 1.49.0. (#230) -- Significantly improved compilation times by reducing the amount of generated IR. (#205) - -## Removed -- We no longer re-export the unstable allocator items from the standard library, nor the stable shims approximating the same. (#227) -- Removed hasher specialization support from `aHash`, which was resulting in inconsistent hashes being generated for a key. (#248) - -## Fixed -- Fixed union length comparison. (#228) - -## ~~[v0.10.0] - 2021-01-16~~ - -This release was _yanked_ due to inconsistent hashes being generated with the `nightly` feature. (#248) - -## Changed -- Parametrized `RawTable`, `HashSet` and `HashMap` over an allocator. (#133) -- Improved branch prediction hints on stable. (#209) -- Optimized hashing of primitive types with AHash using specialization. (#207) -- Only instantiate `RawTable`'s reserve functions once per key-value. (#204) - -## [v0.9.1] - 2020-09-28 - -## Added -- Added safe methods to `RawTable` (#202): - - `get`: `find` and `as_ref` - - `get_mut`: `find` and `as_mut` - - `insert_entry`: `insert` and `as_mut` - - `remove_entry`: `find` and `remove` - - `erase_entry`: `find` and `erase` - -## Changed -- Removed `from_key_hashed_nocheck`'s `Q: Hash`. (#200) -- Made `RawTable::drain` safe. (#201) - -## [v0.9.0] - 2020-09-03 - -### Fixed -- `drain_filter` now removes and yields items that do match the predicate, - rather than items that don't. This is a **breaking change** to match the - behavior of the `drain_filter` methods in `std`. (#187) - -### Added -- Added `replace_entry_with` to `OccupiedEntry`, and `and_replace_entry_with` to `Entry`. (#190) -- Implemented `FusedIterator` and `size_hint` for `DrainFilter`. (#188) - -### Changed -- The minimum Rust version has been bumped to 1.36 (due to `crossbeam` dependency). (#193) -- Updated `ahash` dependency to 0.4. (#198) -- `HashMap::with_hasher` and `HashSet::with_hasher` are now `const fn`. (#195) -- Removed `T: Hash + Eq` and `S: BuildHasher` bounds on `HashSet::new`, - `with_capacity`, `with_hasher`, and `with_capacity_and_hasher`. (#185) - -## [v0.8.2] - 2020-08-08 - -### Changed -- Avoid closures to improve compile times. (#183) -- Do not iterate to drop if empty. (#182) - -## [v0.8.1] - 2020-07-16 - -### Added -- Added `erase` and `remove` to `RawTable`. (#171) -- Added `try_with_capacity` to `RawTable`. (#174) -- Added methods that allow re-using a `RawIter` for `RawDrain`, - `RawIntoIter`, and `RawParIter`. (#175) -- Added `reflect_remove` and `reflect_insert` to `RawIter`. (#175) -- Added a `drain_filter` function to `HashSet`. (#179) - -### Changed -- Deprecated `RawTable::erase_no_drop` in favor of `erase` and `remove`. (#176) -- `insert_no_grow` is now exposed under the `"raw"` feature. (#180) - -## [v0.8.0] - 2020-06-18 - -### Fixed -- Marked `RawTable::par_iter` as `unsafe`. (#157) - -### Changed -- Reduced the size of `HashMap`. (#159) -- No longer create tables with a capacity of 1 element. (#162) -- Removed `K: Eq + Hash` bounds on `retain`. (#163) -- Pulled in `HashMap` changes from rust-lang/rust (#164): - - `extend_one` support on nightly. - - `CollectionAllocErr` renamed to `TryReserveError`. - - Added `HashSet::get_or_insert_owned`. - - `Default` for `HashSet` no longer requires `T: Eq + Hash` and `S: BuildHasher`. - -## [v0.7.2] - 2020-04-27 - -### Added -- Added `or_insert_with_key` to `Entry`. (#152) - -### Fixed -- Partially reverted `Clone` optimization which was unsound. (#154) - -### Changed -- Disabled use of `const-random` by default, which prevented reproducible builds. (#155) -- Optimized `repeat` function. (#150) -- Use `NonNull` for buckets, which improves codegen for iterators. (#148) - -## [v0.7.1] - 2020-03-16 - -### Added -- Added `HashMap::get_key_value_mut`. (#145) - -### Changed -- Optimized `Clone` implementation. (#146) - -## [v0.7.0] - 2020-01-31 - -### Added -- Added a `drain_filter` function to `HashMap`. (#135) - -### Changed -- Updated `ahash` dependency to 0.3. (#141) -- Optimized set union and intersection. (#130) -- `raw_entry` can now be used without requiring `S: BuildHasher`. (#123) -- `RawTable::bucket_index` can now be used under the `raw` feature. (#128) - -## [v0.6.3] - 2019-10-31 - -### Added -- Added an `ahash-compile-time-rng` feature (enabled by default) which allows disabling the - `compile-time-rng` feature in `ahash` to work around a Cargo bug. (#125) - -## [v0.6.2] - 2019-10-23 - -### Added -- Added an `inline-more` feature (enabled by default) which allows choosing a tradeoff between - runtime performance and compilation time. (#119) - -## [v0.6.1] - 2019-10-04 - -### Added -- Added `Entry::insert` and `RawEntryMut::insert`. (#118) - -### Changed -- `Group::static_empty` was changed from a `const` to a `static` (#116). - -## [v0.6.0] - 2019-08-13 - -### Fixed -- Fixed AHash accidentally depending on `std`. (#110) - -### Changed -- The minimum Rust version has been bumped to 1.32 (due to `rand` dependency). - -## ~~[v0.5.1] - 2019-08-04~~ - -This release was _yanked_ due to a breaking change for users of `no-default-features`. - -### Added -- The experimental and unsafe `RawTable` API is available under the "raw" feature. (#108) -- Added entry-like methods for `HashSet`. (#98) - -### Changed -- Changed the default hasher from FxHash to AHash. (#97) -- `hashbrown` is now fully `no_std` on recent Rust versions (1.36+). (#96) - -### Fixed -- We now avoid growing the table during insertions when it wasn't necessary. (#106) -- `RawOccupiedEntryMut` now properly implements `Send` and `Sync`. (#100) -- Relaxed `lazy_static` version. (#92) - -## [v0.5.0] - 2019-06-12 - -### Fixed -- Resize with a more conservative amount of space after deletions. (#86) - -### Changed -- Exposed the Layout of the failed allocation in CollectionAllocErr::AllocErr. (#89) - -## [v0.4.0] - 2019-05-30 - -### Fixed -- Fixed `Send` trait bounds on `IterMut` not matching the libstd one. (#82) - -## [v0.3.1] - 2019-05-30 - -### Fixed -- Fixed incorrect use of slice in unsafe code. (#80) - -## [v0.3.0] - 2019-04-23 - -### Changed -- Changed shrink_to to not panic if min_capacity < capacity. (#67) - -### Fixed -- Worked around emscripten bug emscripten-core/emscripten-fastcomp#258. (#66) - -## [v0.2.2] - 2019-04-16 - -### Fixed -- Inlined non-nightly lowest_set_bit_nonzero. (#64) -- Fixed build on latest nightly. (#65) - -## [v0.2.1] - 2019-04-14 - -### Changed -- Use for_each in map Extend and FromIterator. (#58) -- Improved worst-case performance of HashSet.is_subset. (#61) - -### Fixed -- Removed incorrect debug_assert. (#60) - -## [v0.2.0] - 2019-03-31 - -### Changed -- The code has been updated to Rust 2018 edition. This means that the minimum - Rust version has been bumped to 1.31 (2018 edition). - -### Added -- Added `insert_with_hasher` to the raw_entry API to allow `K: !(Hash + Eq)`. (#54) -- Added support for using hashbrown as the hash table implementation in libstd. (#46) - -### Fixed -- Fixed cargo build with minimal-versions. (#45) -- Fixed `#[may_dangle]` attributes to match the libstd `HashMap`. (#46) -- ZST keys and values are now handled properly. (#46) - -## [v0.1.8] - 2019-01-14 - -### Added -- Rayon parallel iterator support (#37) -- `raw_entry` support (#31) -- `#[may_dangle]` on nightly (#31) -- `try_reserve` support (#31) - -### Fixed -- Fixed variance on `IterMut`. (#31) - -## [v0.1.7] - 2018-12-05 - -### Fixed -- Fixed non-SSE version of convert_special_to_empty_and_full_to_deleted. (#32) -- Fixed overflow in rehash_in_place. (#33) - -## [v0.1.6] - 2018-11-17 - -### Fixed -- Fixed compile error on nightly. (#29) - -## [v0.1.5] - 2018-11-08 - -### Fixed -- Fixed subtraction overflow in generic::Group::match_byte. (#28) - -## [v0.1.4] - 2018-11-04 - -### Fixed -- Fixed a bug in the `erase_no_drop` implementation. (#26) - -## [v0.1.3] - 2018-11-01 - -### Added -- Serde support. (#14) - -### Fixed -- Make the compiler inline functions more aggressively. (#20) - -## [v0.1.2] - 2018-10-31 - -### Fixed -- `clear` segfaults when called on an empty table. (#13) - -## [v0.1.1] - 2018-10-30 - -### Fixed -- `erase_no_drop` optimization not triggering in the SSE2 implementation. (#3) -- Missing `Send` and `Sync` for hash map and iterator types. (#7) -- Bug when inserting into a table smaller than the group width. (#5) - -## v0.1.0 - 2018-10-29 - -- Initial release - -[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.12.3...HEAD -[v0.12.3]: https://github.com/rust-lang/hashbrown/compare/v0.12.2...v0.12.3 -[v0.12.2]: https://github.com/rust-lang/hashbrown/compare/v0.12.1...v0.12.2 -[v0.12.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.0...v0.12.1 -[v0.12.0]: https://github.com/rust-lang/hashbrown/compare/v0.11.2...v0.12.0 -[v0.11.2]: https://github.com/rust-lang/hashbrown/compare/v0.11.1...v0.11.2 -[v0.11.1]: https://github.com/rust-lang/hashbrown/compare/v0.11.0...v0.11.1 -[v0.11.0]: https://github.com/rust-lang/hashbrown/compare/v0.10.0...v0.11.0 -[v0.10.0]: https://github.com/rust-lang/hashbrown/compare/v0.9.1...v0.10.0 -[v0.9.1]: https://github.com/rust-lang/hashbrown/compare/v0.9.0...v0.9.1 -[v0.9.0]: https://github.com/rust-lang/hashbrown/compare/v0.8.2...v0.9.0 -[v0.8.2]: https://github.com/rust-lang/hashbrown/compare/v0.8.1...v0.8.2 -[v0.8.1]: https://github.com/rust-lang/hashbrown/compare/v0.8.0...v0.8.1 -[v0.8.0]: https://github.com/rust-lang/hashbrown/compare/v0.7.2...v0.8.0 -[v0.7.2]: https://github.com/rust-lang/hashbrown/compare/v0.7.1...v0.7.2 -[v0.7.1]: https://github.com/rust-lang/hashbrown/compare/v0.7.0...v0.7.1 -[v0.7.0]: https://github.com/rust-lang/hashbrown/compare/v0.6.3...v0.7.0 -[v0.6.3]: https://github.com/rust-lang/hashbrown/compare/v0.6.2...v0.6.3 -[v0.6.2]: https://github.com/rust-lang/hashbrown/compare/v0.6.1...v0.6.2 -[v0.6.1]: https://github.com/rust-lang/hashbrown/compare/v0.6.0...v0.6.1 -[v0.6.0]: https://github.com/rust-lang/hashbrown/compare/v0.5.1...v0.6.0 -[v0.5.1]: https://github.com/rust-lang/hashbrown/compare/v0.5.0...v0.5.1 -[v0.5.0]: https://github.com/rust-lang/hashbrown/compare/v0.4.0...v0.5.0 -[v0.4.0]: https://github.com/rust-lang/hashbrown/compare/v0.3.1...v0.4.0 -[v0.3.1]: https://github.com/rust-lang/hashbrown/compare/v0.3.0...v0.3.1 -[v0.3.0]: https://github.com/rust-lang/hashbrown/compare/v0.2.2...v0.3.0 -[v0.2.2]: https://github.com/rust-lang/hashbrown/compare/v0.2.1...v0.2.2 -[v0.2.1]: https://github.com/rust-lang/hashbrown/compare/v0.2.0...v0.2.1 -[v0.2.0]: https://github.com/rust-lang/hashbrown/compare/v0.1.8...v0.2.0 -[v0.1.8]: https://github.com/rust-lang/hashbrown/compare/v0.1.7...v0.1.8 -[v0.1.7]: https://github.com/rust-lang/hashbrown/compare/v0.1.6...v0.1.7 -[v0.1.6]: https://github.com/rust-lang/hashbrown/compare/v0.1.5...v0.1.6 -[v0.1.5]: https://github.com/rust-lang/hashbrown/compare/v0.1.4...v0.1.5 -[v0.1.4]: https://github.com/rust-lang/hashbrown/compare/v0.1.3...v0.1.4 -[v0.1.3]: https://github.com/rust-lang/hashbrown/compare/v0.1.2...v0.1.3 -[v0.1.2]: https://github.com/rust-lang/hashbrown/compare/v0.1.1...v0.1.2 -[v0.1.1]: https://github.com/rust-lang/hashbrown/compare/v0.1.0...v0.1.1 diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/clippy.toml s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/clippy.toml --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/clippy.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/clippy.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -doc-valid-idents = [ "CppCon", "SwissTable", "SipHash", "HashDoS" ] diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/LICENSE-MIT s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2016 Amanieu d'Antras - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/README.md s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/README.md --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,126 +0,0 @@ -hashbrown -========= - -[![Build Status](https://github.com/rust-lang/hashbrown/actions/workflows/rust.yml/badge.svg)](https://github.com/rust-lang/hashbrown/actions) -[![Crates.io](https://img.shields.io/crates/v/hashbrown.svg)](https://crates.io/crates/hashbrown) -[![Documentation](https://docs.rs/hashbrown/badge.svg)](https://docs.rs/hashbrown) -[![Rust](https://img.shields.io/badge/rust-1.56.1%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown) - -This crate is a Rust port of Google's high-performance [SwissTable] hash -map, adapted to make it a drop-in replacement for Rust's standard `HashMap` -and `HashSet` types. - -The original C++ version of SwissTable can be found [here], and this -[CppCon talk] gives an overview of how the algorithm works. - -Since Rust 1.36, this is now the `HashMap` implementation for the Rust standard -library. However you may still want to use this crate instead since it works -in environments without `std`, such as embedded systems and kernels. - -[SwissTable]: https://abseil.io/blog/20180927-swisstables -[here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h -[CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4 - -## [Change log](CHANGELOG.md) - -## Features - -- Drop-in replacement for the standard library `HashMap` and `HashSet` types. -- Uses [AHash](https://github.com/tkaitchuck/aHash) as the default hasher, which is much faster than SipHash. - However, AHash does *not provide the same level of HashDoS resistance* as SipHash, so if that is important to you, you might want to consider using a different hasher. -- Around 2x faster than the previous standard library `HashMap`. -- Lower memory usage: only 1 byte of overhead per entry instead of 8. -- Compatible with `#[no_std]` (but requires a global allocator with the `alloc` crate). -- Empty hash maps do not allocate any memory. -- SIMD lookups to scan multiple hash entries in parallel. - -## Performance - -Compared to the previous implementation of `std::collections::HashMap` (Rust 1.35). - -With the hashbrown default AHash hasher: - -| name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | -|:------------------------|:-------------------:|------------------:|:------------:|---------:|---------| -| insert_ahash_highbits | 18,865 | 8,020 | -10,845 | -57.49% | x 2.35 | -| insert_ahash_random | 19,711 | 8,019 | -11,692 | -59.32% | x 2.46 | -| insert_ahash_serial | 19,365 | 6,463 | -12,902 | -66.63% | x 3.00 | -| insert_erase_ahash_highbits | 51,136 | 17,916 | -33,220 | -64.96% | x 2.85 | -| insert_erase_ahash_random | 51,157 | 17,688 | -33,469 | -65.42% | x 2.89 | -| insert_erase_ahash_serial | 45,479 | 14,895 | -30,584 | -67.25% | x 3.05 | -| iter_ahash_highbits | 1,399 | 1,092 | -307 | -21.94% | x 1.28 | -| iter_ahash_random | 1,586 | 1,059 | -527 | -33.23% | x 1.50 | -| iter_ahash_serial | 3,168 | 1,079 | -2,089 | -65.94% | x 2.94 | -| lookup_ahash_highbits | 32,351 | 4,792 | -27,559 | -85.19% | x 6.75 | -| lookup_ahash_random | 17,419 | 4,817 | -12,602 | -72.35% | x 3.62 | -| lookup_ahash_serial | 15,254 | 3,606 | -11,648 | -76.36% | x 4.23 | -| lookup_fail_ahash_highbits | 21,187 | 4,369 | -16,818 | -79.38% | x 4.85 | -| lookup_fail_ahash_random | 21,550 | 4,395 | -17,155 | -79.61% | x 4.90 | -| lookup_fail_ahash_serial | 19,450 | 3,176 | -16,274 | -83.67% | x 6.12 | - - -With the libstd default SipHash hasher: - -|name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | -|:------------------------|:-------------------:|------------------:|:------------:|---------:|---------| -|insert_std_highbits |19,216 |16,885 | -2,331 | -12.13% | x 1.14 | -|insert_std_random |19,179 |17,034 | -2,145 | -11.18% | x 1.13 | -|insert_std_serial |19,462 |17,493 | -1,969 | -10.12% | x 1.11 | -|insert_erase_std_highbits |50,825 |35,847 | -14,978 | -29.47% | x 1.42 | -|insert_erase_std_random |51,448 |35,392 | -16,056 | -31.21% | x 1.45 | -|insert_erase_std_serial |87,711 |38,091 | -49,620 | -56.57% | x 2.30 | -|iter_std_highbits |1,378 |1,159 | -219 | -15.89% | x 1.19 | -|iter_std_random |1,395 |1,132 | -263 | -18.85% | x 1.23 | -|iter_std_serial |1,704 |1,105 | -599 | -35.15% | x 1.54 | -|lookup_std_highbits |17,195 |13,642 | -3,553 | -20.66% | x 1.26 | -|lookup_std_random |17,181 |13,773 | -3,408 | -19.84% | x 1.25 | -|lookup_std_serial |15,483 |13,651 | -1,832 | -11.83% | x 1.13 | -|lookup_fail_std_highbits |20,926 |13,474 | -7,452 | -35.61% | x 1.55 | -|lookup_fail_std_random |21,766 |13,505 | -8,261 | -37.95% | x 1.61 | -|lookup_fail_std_serial |19,336 |13,519 | -5,817 | -30.08% | x 1.43 | - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -hashbrown = "0.12" -``` - -Then: - -```rust -use hashbrown::HashMap; - -let mut map = HashMap::new(); -map.insert(1, "one"); -``` -## Flags -This crate has the following Cargo features: - -- `nightly`: Enables nightly-only features including: `#[may_dangle]`. -- `serde`: Enables serde serialization support. -- `rayon`: Enables rayon parallel iterator support. -- `raw`: Enables access to the experimental and unsafe `RawTable` API. -- `inline-more`: Adds inline hints to most functions, improving run-time performance at the cost - of compilation time. (enabled by default) -- `bumpalo`: Provides a `BumpWrapper` type which allows `bumpalo` to be used for memory allocation. -- `ahash`: Compiles with ahash as default hasher. (enabled by default) -- `ahash-compile-time-rng`: Activates the `compile-time-rng` feature of ahash. For targets with no random number generator -this pre-generates seeds at compile time and embeds them as constants. See [aHash's documentation](https://github.com/tkaitchuck/aHash#flags) (disabled by default) - -## License - -Licensed under either of: - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any -additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/mod.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/mod.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,4 +0,0 @@ -#[cfg(feature = "rayon")] -pub(crate) mod rayon; -#[cfg(feature = "serde")] -mod serde; diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/helpers.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/helpers.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/helpers.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/helpers.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -use alloc::collections::LinkedList; -use alloc::vec::Vec; - -use rayon::iter::{IntoParallelIterator, ParallelIterator}; - -/// Helper for collecting parallel iterators to an intermediary -#[allow(clippy::linkedlist)] // yes, we need linked list here for efficient appending! -pub(super) fn collect(iter: I) -> (LinkedList>, usize) { - let list = iter - .into_par_iter() - .fold(Vec::new, |mut vec, elem| { - vec.push(elem); - vec - }) - .map(|vec| { - let mut list = LinkedList::new(); - list.push_back(vec); - list - }) - .reduce(LinkedList::new, |mut list1, mut list2| { - list1.append(&mut list2); - list1 - }); - - let len = list.iter().map(Vec::len).sum(); - (list, len) -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/map.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/map.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,734 +0,0 @@ -//! Rayon extensions for `HashMap`. - -use super::raw::{RawIntoParIter, RawParDrain, RawParIter}; -use crate::hash_map::HashMap; -use crate::raw::{Allocator, Global}; -use core::fmt; -use core::hash::{BuildHasher, Hash}; -use core::marker::PhantomData; -use rayon::iter::plumbing::UnindexedConsumer; -use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator}; - -/// Parallel iterator over shared references to entries in a map. -/// -/// This iterator is created by the [`par_iter`] method on [`HashMap`] -/// (provided by the [`IntoParallelRefIterator`] trait). -/// See its documentation for more. -/// -/// [`par_iter`]: /hashbrown/struct.HashMap.html#method.par_iter -/// [`HashMap`]: /hashbrown/struct.HashMap.html -/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html -pub struct ParIter<'a, K, V> { - inner: RawParIter<(K, V)>, - marker: PhantomData<(&'a K, &'a V)>, -} - -impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> { - type Item = (&'a K, &'a V); - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner - .map(|x| unsafe { - let r = x.as_ref(); - (&r.0, &r.1) - }) - .drive_unindexed(consumer) - } -} - -impl Clone for ParIter<'_, K, V> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - marker: PhantomData, - } - } -} - -impl fmt::Debug for ParIter<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = unsafe { self.inner.iter() }.map(|x| unsafe { - let r = x.as_ref(); - (&r.0, &r.1) - }); - f.debug_list().entries(iter).finish() - } -} - -/// Parallel iterator over shared references to keys in a map. -/// -/// This iterator is created by the [`par_keys`] method on [`HashMap`]. -/// See its documentation for more. -/// -/// [`par_keys`]: /hashbrown/struct.HashMap.html#method.par_keys -/// [`HashMap`]: /hashbrown/struct.HashMap.html -pub struct ParKeys<'a, K, V> { - inner: RawParIter<(K, V)>, - marker: PhantomData<(&'a K, &'a V)>, -} - -impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> { - type Item = &'a K; - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner - .map(|x| unsafe { &x.as_ref().0 }) - .drive_unindexed(consumer) - } -} - -impl Clone for ParKeys<'_, K, V> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - marker: PhantomData, - } - } -} - -impl fmt::Debug for ParKeys<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = unsafe { self.inner.iter() }.map(|x| unsafe { &x.as_ref().0 }); - f.debug_list().entries(iter).finish() - } -} - -/// Parallel iterator over shared references to values in a map. -/// -/// This iterator is created by the [`par_values`] method on [`HashMap`]. -/// See its documentation for more. -/// -/// [`par_values`]: /hashbrown/struct.HashMap.html#method.par_values -/// [`HashMap`]: /hashbrown/struct.HashMap.html -pub struct ParValues<'a, K, V> { - inner: RawParIter<(K, V)>, - marker: PhantomData<(&'a K, &'a V)>, -} - -impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> { - type Item = &'a V; - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner - .map(|x| unsafe { &x.as_ref().1 }) - .drive_unindexed(consumer) - } -} - -impl Clone for ParValues<'_, K, V> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - marker: PhantomData, - } - } -} - -impl fmt::Debug for ParValues<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = unsafe { self.inner.iter() }.map(|x| unsafe { &x.as_ref().1 }); - f.debug_list().entries(iter).finish() - } -} - -/// Parallel iterator over mutable references to entries in a map. -/// -/// This iterator is created by the [`par_iter_mut`] method on [`HashMap`] -/// (provided by the [`IntoParallelRefMutIterator`] trait). -/// See its documentation for more. -/// -/// [`par_iter_mut`]: /hashbrown/struct.HashMap.html#method.par_iter_mut -/// [`HashMap`]: /hashbrown/struct.HashMap.html -/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html -pub struct ParIterMut<'a, K, V> { - inner: RawParIter<(K, V)>, - marker: PhantomData<(&'a K, &'a mut V)>, -} - -impl<'a, K: Sync, V: Send> ParallelIterator for ParIterMut<'a, K, V> { - type Item = (&'a K, &'a mut V); - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner - .map(|x| unsafe { - let r = x.as_mut(); - (&r.0, &mut r.1) - }) - .drive_unindexed(consumer) - } -} - -impl fmt::Debug for ParIterMut<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ParIter { - inner: self.inner.clone(), - marker: PhantomData, - } - .fmt(f) - } -} - -/// Parallel iterator over mutable references to values in a map. -/// -/// This iterator is created by the [`par_values_mut`] method on [`HashMap`]. -/// See its documentation for more. -/// -/// [`par_values_mut`]: /hashbrown/struct.HashMap.html#method.par_values_mut -/// [`HashMap`]: /hashbrown/struct.HashMap.html -pub struct ParValuesMut<'a, K, V> { - inner: RawParIter<(K, V)>, - marker: PhantomData<(&'a K, &'a mut V)>, -} - -impl<'a, K: Sync, V: Send> ParallelIterator for ParValuesMut<'a, K, V> { - type Item = &'a mut V; - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner - .map(|x| unsafe { &mut x.as_mut().1 }) - .drive_unindexed(consumer) - } -} - -impl fmt::Debug for ParValuesMut<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ParValues { - inner: self.inner.clone(), - marker: PhantomData, - } - .fmt(f) - } -} - -/// Parallel iterator over entries of a consumed map. -/// -/// This iterator is created by the [`into_par_iter`] method on [`HashMap`] -/// (provided by the [`IntoParallelIterator`] trait). -/// See its documentation for more. -/// -/// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter -/// [`HashMap`]: /hashbrown/struct.HashMap.html -/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html -pub struct IntoParIter { - inner: RawIntoParIter<(K, V), A>, -} - -impl ParallelIterator for IntoParIter { - type Item = (K, V); - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner.drive_unindexed(consumer) - } -} - -impl fmt::Debug - for IntoParIter -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ParIter { - inner: unsafe { self.inner.par_iter() }, - marker: PhantomData, - } - .fmt(f) - } -} - -/// Parallel draining iterator over entries of a map. -/// -/// This iterator is created by the [`par_drain`] method on [`HashMap`]. -/// See its documentation for more. -/// -/// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain -/// [`HashMap`]: /hashbrown/struct.HashMap.html -pub struct ParDrain<'a, K, V, A: Allocator + Clone = Global> { - inner: RawParDrain<'a, (K, V), A>, -} - -impl ParallelIterator for ParDrain<'_, K, V, A> { - type Item = (K, V); - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner.drive_unindexed(consumer) - } -} - -impl fmt::Debug - for ParDrain<'_, K, V, A> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ParIter { - inner: unsafe { self.inner.par_iter() }, - marker: PhantomData, - } - .fmt(f) - } -} - -impl HashMap { - /// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order. - #[cfg_attr(feature = "inline-more", inline)] - pub fn par_keys(&self) -> ParKeys<'_, K, V> { - ParKeys { - inner: unsafe { self.table.par_iter() }, - marker: PhantomData, - } - } - - /// Visits (potentially in parallel) immutably borrowed values in an arbitrary order. - #[cfg_attr(feature = "inline-more", inline)] - pub fn par_values(&self) -> ParValues<'_, K, V> { - ParValues { - inner: unsafe { self.table.par_iter() }, - marker: PhantomData, - } - } -} - -impl HashMap { - /// Visits (potentially in parallel) mutably borrowed values in an arbitrary order. - #[cfg_attr(feature = "inline-more", inline)] - pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { - ParValuesMut { - inner: unsafe { self.table.par_iter() }, - marker: PhantomData, - } - } - - /// Consumes (potentially in parallel) all values in an arbitrary order, - /// while preserving the map's allocated memory for reuse. - #[cfg_attr(feature = "inline-more", inline)] - pub fn par_drain(&mut self) -> ParDrain<'_, K, V, A> { - ParDrain { - inner: self.table.par_drain(), - } - } -} - -impl HashMap -where - K: Eq + Hash + Sync, - V: PartialEq + Sync, - S: BuildHasher + Sync, - A: Allocator + Clone + Sync, -{ - /// Returns `true` if the map is equal to another, - /// i.e. both maps contain the same keys mapped to the same values. - /// - /// This method runs in a potentially parallel fashion. - pub fn par_eq(&self, other: &Self) -> bool { - self.len() == other.len() - && self - .into_par_iter() - .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) - } -} - -impl IntoParallelIterator - for HashMap -{ - type Item = (K, V); - type Iter = IntoParIter; - - #[cfg_attr(feature = "inline-more", inline)] - fn into_par_iter(self) -> Self::Iter { - IntoParIter { - inner: self.table.into_par_iter(), - } - } -} - -impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator - for &'a HashMap -{ - type Item = (&'a K, &'a V); - type Iter = ParIter<'a, K, V>; - - #[cfg_attr(feature = "inline-more", inline)] - fn into_par_iter(self) -> Self::Iter { - ParIter { - inner: unsafe { self.table.par_iter() }, - marker: PhantomData, - } - } -} - -impl<'a, K: Sync, V: Send, S, A: Allocator + Clone> IntoParallelIterator - for &'a mut HashMap -{ - type Item = (&'a K, &'a mut V); - type Iter = ParIterMut<'a, K, V>; - - #[cfg_attr(feature = "inline-more", inline)] - fn into_par_iter(self) -> Self::Iter { - ParIterMut { - inner: unsafe { self.table.par_iter() }, - marker: PhantomData, - } - } -} - -/// Collect (key, value) pairs from a parallel iterator into a -/// hashmap. If multiple pairs correspond to the same key, then the -/// ones produced earlier in the parallel iterator will be -/// overwritten, just as with a sequential iterator. -impl FromParallelIterator<(K, V)> for HashMap -where - K: Eq + Hash + Send, - V: Send, - S: BuildHasher + Default, -{ - fn from_par_iter

(par_iter: P) -> Self - where - P: IntoParallelIterator, - { - let mut map = HashMap::default(); - map.par_extend(par_iter); - map - } -} - -/// Extend a hash map with items from a parallel iterator. -impl ParallelExtend<(K, V)> for HashMap -where - K: Eq + Hash + Send, - V: Send, - S: BuildHasher, - A: Allocator + Clone, -{ - fn par_extend(&mut self, par_iter: I) - where - I: IntoParallelIterator, - { - extend(self, par_iter); - } -} - -/// Extend a hash map with copied items from a parallel iterator. -impl<'a, K, V, S, A> ParallelExtend<(&'a K, &'a V)> for HashMap -where - K: Copy + Eq + Hash + Sync, - V: Copy + Sync, - S: BuildHasher, - A: Allocator + Clone, -{ - fn par_extend(&mut self, par_iter: I) - where - I: IntoParallelIterator, - { - extend(self, par_iter); - } -} - -// This is equal to the normal `HashMap` -- no custom advantage. -fn extend(map: &mut HashMap, par_iter: I) -where - K: Eq + Hash, - S: BuildHasher, - I: IntoParallelIterator, - A: Allocator + Clone, - HashMap: Extend, -{ - let (list, len) = super::helpers::collect(par_iter); - - // Keys may be already present or show multiple times in the iterator. - // Reserve the entire length if the map is empty. - // Otherwise reserve half the length (rounded up), so the map - // will only resize twice in the worst case. - let reserve = if map.is_empty() { len } else { (len + 1) / 2 }; - map.reserve(reserve); - for vec in list { - map.extend(vec); - } -} - -#[cfg(test)] -mod test_par_map { - use alloc::vec::Vec; - use core::hash::{Hash, Hasher}; - use core::sync::atomic::{AtomicUsize, Ordering}; - - use rayon::prelude::*; - - use crate::hash_map::HashMap; - - struct Dropable<'a> { - k: usize, - counter: &'a AtomicUsize, - } - - impl Dropable<'_> { - fn new(k: usize, counter: &AtomicUsize) -> Dropable<'_> { - counter.fetch_add(1, Ordering::Relaxed); - - Dropable { k, counter } - } - } - - impl Drop for Dropable<'_> { - fn drop(&mut self) { - self.counter.fetch_sub(1, Ordering::Relaxed); - } - } - - impl Clone for Dropable<'_> { - fn clone(&self) -> Self { - Dropable::new(self.k, self.counter) - } - } - - impl Hash for Dropable<'_> { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - self.k.hash(state); - } - } - - impl PartialEq for Dropable<'_> { - fn eq(&self, other: &Self) -> bool { - self.k == other.k - } - } - - impl Eq for Dropable<'_> {} - - #[test] - fn test_into_iter_drops() { - let key = AtomicUsize::new(0); - let value = AtomicUsize::new(0); - - let hm = { - let mut hm = HashMap::new(); - - assert_eq!(key.load(Ordering::Relaxed), 0); - assert_eq!(value.load(Ordering::Relaxed), 0); - - for i in 0..100 { - let d1 = Dropable::new(i, &key); - let d2 = Dropable::new(i + 100, &value); - hm.insert(d1, d2); - } - - assert_eq!(key.load(Ordering::Relaxed), 100); - assert_eq!(value.load(Ordering::Relaxed), 100); - - hm - }; - - // By the way, ensure that cloning doesn't screw up the dropping. - drop(hm.clone()); - - assert_eq!(key.load(Ordering::Relaxed), 100); - assert_eq!(value.load(Ordering::Relaxed), 100); - - // Ensure that dropping the iterator does not leak anything. - drop(hm.clone().into_par_iter()); - - { - assert_eq!(key.load(Ordering::Relaxed), 100); - assert_eq!(value.load(Ordering::Relaxed), 100); - - // retain only half - let _v: Vec<_> = hm - .into_par_iter() - .filter(|&(ref key, _)| key.k < 50) - .collect(); - - assert_eq!(key.load(Ordering::Relaxed), 50); - assert_eq!(value.load(Ordering::Relaxed), 50); - }; - - assert_eq!(key.load(Ordering::Relaxed), 0); - assert_eq!(value.load(Ordering::Relaxed), 0); - } - - #[test] - fn test_drain_drops() { - let key = AtomicUsize::new(0); - let value = AtomicUsize::new(0); - - let mut hm = { - let mut hm = HashMap::new(); - - assert_eq!(key.load(Ordering::Relaxed), 0); - assert_eq!(value.load(Ordering::Relaxed), 0); - - for i in 0..100 { - let d1 = Dropable::new(i, &key); - let d2 = Dropable::new(i + 100, &value); - hm.insert(d1, d2); - } - - assert_eq!(key.load(Ordering::Relaxed), 100); - assert_eq!(value.load(Ordering::Relaxed), 100); - - hm - }; - - // By the way, ensure that cloning doesn't screw up the dropping. - drop(hm.clone()); - - assert_eq!(key.load(Ordering::Relaxed), 100); - assert_eq!(value.load(Ordering::Relaxed), 100); - - // Ensure that dropping the drain iterator does not leak anything. - drop(hm.clone().par_drain()); - - { - assert_eq!(key.load(Ordering::Relaxed), 100); - assert_eq!(value.load(Ordering::Relaxed), 100); - - // retain only half - let _v: Vec<_> = hm.drain().filter(|&(ref key, _)| key.k < 50).collect(); - assert!(hm.is_empty()); - - assert_eq!(key.load(Ordering::Relaxed), 50); - assert_eq!(value.load(Ordering::Relaxed), 50); - }; - - assert_eq!(key.load(Ordering::Relaxed), 0); - assert_eq!(value.load(Ordering::Relaxed), 0); - } - - #[test] - fn test_empty_iter() { - let mut m: HashMap = HashMap::new(); - assert_eq!(m.par_drain().count(), 0); - assert_eq!(m.par_keys().count(), 0); - assert_eq!(m.par_values().count(), 0); - assert_eq!(m.par_values_mut().count(), 0); - assert_eq!(m.par_iter().count(), 0); - assert_eq!(m.par_iter_mut().count(), 0); - assert_eq!(m.len(), 0); - assert!(m.is_empty()); - assert_eq!(m.into_par_iter().count(), 0); - } - - #[test] - fn test_iterate() { - let mut m = HashMap::with_capacity(4); - for i in 0..32 { - assert!(m.insert(i, i * 2).is_none()); - } - assert_eq!(m.len(), 32); - - let observed = AtomicUsize::new(0); - - m.par_iter().for_each(|(k, v)| { - assert_eq!(*v, *k * 2); - observed.fetch_or(1 << *k, Ordering::Relaxed); - }); - assert_eq!(observed.into_inner(), 0xFFFF_FFFF); - } - - #[test] - fn test_keys() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: HashMap<_, _> = vec.into_par_iter().collect(); - let keys: Vec<_> = map.par_keys().cloned().collect(); - assert_eq!(keys.len(), 3); - assert!(keys.contains(&1)); - assert!(keys.contains(&2)); - assert!(keys.contains(&3)); - } - - #[test] - fn test_values() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: HashMap<_, _> = vec.into_par_iter().collect(); - let values: Vec<_> = map.par_values().cloned().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&'a')); - assert!(values.contains(&'b')); - assert!(values.contains(&'c')); - } - - #[test] - fn test_values_mut() { - let vec = vec![(1, 1), (2, 2), (3, 3)]; - let mut map: HashMap<_, _> = vec.into_par_iter().collect(); - map.par_values_mut().for_each(|value| *value *= 2); - let values: Vec<_> = map.par_values().cloned().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&2)); - assert!(values.contains(&4)); - assert!(values.contains(&6)); - } - - #[test] - fn test_eq() { - let mut m1 = HashMap::new(); - m1.insert(1, 2); - m1.insert(2, 3); - m1.insert(3, 4); - - let mut m2 = HashMap::new(); - m2.insert(1, 2); - m2.insert(2, 3); - - assert!(!m1.par_eq(&m2)); - - m2.insert(3, 4); - - assert!(m1.par_eq(&m2)); - } - - #[test] - fn test_from_iter() { - let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; - - let map: HashMap<_, _> = xs.par_iter().cloned().collect(); - - for &(k, v) in &xs { - assert_eq!(map.get(&k), Some(&v)); - } - } - - #[test] - fn test_extend_ref() { - let mut a = HashMap::new(); - a.insert(1, "one"); - let mut b = HashMap::new(); - b.insert(2, "two"); - b.insert(3, "three"); - - a.par_extend(&b); - - assert_eq!(a.len(), 3); - assert_eq!(a[&1], "one"); - assert_eq!(a[&2], "two"); - assert_eq!(a[&3], "three"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/mod.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/mod.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,4 +0,0 @@ -mod helpers; -pub(crate) mod map; -pub(crate) mod raw; -pub(crate) mod set; diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/raw.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/raw.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/raw.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/raw.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,231 +0,0 @@ -use crate::raw::Bucket; -use crate::raw::{Allocator, Global, RawIter, RawIterRange, RawTable}; -use crate::scopeguard::guard; -use alloc::alloc::dealloc; -use core::marker::PhantomData; -use core::mem; -use core::ptr::NonNull; -use rayon::iter::{ - plumbing::{self, Folder, UnindexedConsumer, UnindexedProducer}, - ParallelIterator, -}; - -/// Parallel iterator which returns a raw pointer to every full bucket in the table. -pub struct RawParIter { - iter: RawIterRange, -} - -impl RawParIter { - #[cfg_attr(feature = "inline-more", inline)] - pub(super) unsafe fn iter(&self) -> RawIterRange { - self.iter.clone() - } -} - -impl Clone for RawParIter { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Self { - iter: self.iter.clone(), - } - } -} - -impl From> for RawParIter { - fn from(it: RawIter) -> Self { - RawParIter { iter: it.iter } - } -} - -impl ParallelIterator for RawParIter { - type Item = Bucket; - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - let producer = ParIterProducer { iter: self.iter }; - plumbing::bridge_unindexed(producer, consumer) - } -} - -/// Producer which returns a `Bucket` for every element. -struct ParIterProducer { - iter: RawIterRange, -} - -impl UnindexedProducer for ParIterProducer { - type Item = Bucket; - - #[cfg_attr(feature = "inline-more", inline)] - fn split(self) -> (Self, Option) { - let (left, right) = self.iter.split(); - let left = ParIterProducer { iter: left }; - let right = right.map(|right| ParIterProducer { iter: right }); - (left, right) - } - - #[cfg_attr(feature = "inline-more", inline)] - fn fold_with(self, folder: F) -> F - where - F: Folder, - { - folder.consume_iter(self.iter) - } -} - -/// Parallel iterator which consumes a table and returns elements. -pub struct RawIntoParIter { - table: RawTable, -} - -impl RawIntoParIter { - #[cfg_attr(feature = "inline-more", inline)] - pub(super) unsafe fn par_iter(&self) -> RawParIter { - self.table.par_iter() - } -} - -impl ParallelIterator for RawIntoParIter { - type Item = T; - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - let iter = unsafe { self.table.iter().iter }; - let _guard = guard(self.table.into_allocation(), |alloc| { - if let Some((ptr, layout)) = *alloc { - unsafe { - dealloc(ptr.as_ptr(), layout); - } - } - }); - let producer = ParDrainProducer { iter }; - plumbing::bridge_unindexed(producer, consumer) - } -} - -/// Parallel iterator which consumes elements without freeing the table storage. -pub struct RawParDrain<'a, T, A: Allocator + Clone = Global> { - // We don't use a &'a mut RawTable because we want RawParDrain to be - // covariant over T. - table: NonNull>, - marker: PhantomData<&'a RawTable>, -} - -unsafe impl Send for RawParDrain<'_, T, A> {} - -impl RawParDrain<'_, T, A> { - #[cfg_attr(feature = "inline-more", inline)] - pub(super) unsafe fn par_iter(&self) -> RawParIter { - self.table.as_ref().par_iter() - } -} - -impl ParallelIterator for RawParDrain<'_, T, A> { - type Item = T; - - #[cfg_attr(feature = "inline-more", inline)] - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - let _guard = guard(self.table, |table| unsafe { - table.as_mut().clear_no_drop(); - }); - let iter = unsafe { self.table.as_ref().iter().iter }; - mem::forget(self); - let producer = ParDrainProducer { iter }; - plumbing::bridge_unindexed(producer, consumer) - } -} - -impl Drop for RawParDrain<'_, T, A> { - fn drop(&mut self) { - // If drive_unindexed is not called then simply clear the table. - unsafe { - self.table.as_mut().clear(); - } - } -} - -/// Producer which will consume all elements in the range, even if it is dropped -/// halfway through. -struct ParDrainProducer { - iter: RawIterRange, -} - -impl UnindexedProducer for ParDrainProducer { - type Item = T; - - #[cfg_attr(feature = "inline-more", inline)] - fn split(self) -> (Self, Option) { - let (left, right) = self.iter.clone().split(); - mem::forget(self); - let left = ParDrainProducer { iter: left }; - let right = right.map(|right| ParDrainProducer { iter: right }); - (left, right) - } - - #[cfg_attr(feature = "inline-more", inline)] - fn fold_with(mut self, mut folder: F) -> F - where - F: Folder, - { - // Make sure to modify the iterator in-place so that any remaining - // elements are processed in our Drop impl. - for item in &mut self.iter { - folder = folder.consume(unsafe { item.read() }); - if folder.full() { - return folder; - } - } - - // If we processed all elements then we don't need to run the drop. - mem::forget(self); - folder - } -} - -impl Drop for ParDrainProducer { - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - // Drop all remaining elements - if mem::needs_drop::() { - for item in &mut self.iter { - unsafe { - item.drop(); - } - } - } - } -} - -impl RawTable { - /// Returns a parallel iterator over the elements in a `RawTable`. - #[cfg_attr(feature = "inline-more", inline)] - pub unsafe fn par_iter(&self) -> RawParIter { - RawParIter { - iter: self.iter().iter, - } - } - - /// Returns a parallel iterator over the elements in a `RawTable`. - #[cfg_attr(feature = "inline-more", inline)] - pub fn into_par_iter(self) -> RawIntoParIter { - RawIntoParIter { table: self } - } - - /// Returns a parallel iterator which consumes all elements of a `RawTable` - /// without freeing its memory allocation. - #[cfg_attr(feature = "inline-more", inline)] - pub fn par_drain(&mut self) -> RawParDrain<'_, T, A> { - RawParDrain { - table: NonNull::from(self), - marker: PhantomData, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/set.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/set.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/set.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/set.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,659 +0,0 @@ -//! Rayon extensions for `HashSet`. - -use super::map; -use crate::hash_set::HashSet; -use crate::raw::{Allocator, Global}; -use core::hash::{BuildHasher, Hash}; -use rayon::iter::plumbing::UnindexedConsumer; -use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator}; - -/// Parallel iterator over elements of a consumed set. -/// -/// This iterator is created by the [`into_par_iter`] method on [`HashSet`] -/// (provided by the [`IntoParallelIterator`] trait). -/// See its documentation for more. -/// -/// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter -/// [`HashSet`]: /hashbrown/struct.HashSet.html -/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html -pub struct IntoParIter { - inner: map::IntoParIter, -} - -impl ParallelIterator for IntoParIter { - type Item = T; - - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner.map(|(k, _)| k).drive_unindexed(consumer) - } -} - -/// Parallel draining iterator over entries of a set. -/// -/// This iterator is created by the [`par_drain`] method on [`HashSet`]. -/// See its documentation for more. -/// -/// [`par_drain`]: /hashbrown/struct.HashSet.html#method.par_drain -/// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParDrain<'a, T, A: Allocator + Clone = Global> { - inner: map::ParDrain<'a, T, (), A>, -} - -impl ParallelIterator for ParDrain<'_, T, A> { - type Item = T; - - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner.map(|(k, _)| k).drive_unindexed(consumer) - } -} - -/// Parallel iterator over shared references to elements in a set. -/// -/// This iterator is created by the [`par_iter`] method on [`HashSet`] -/// (provided by the [`IntoParallelRefIterator`] trait). -/// See its documentation for more. -/// -/// [`par_iter`]: /hashbrown/struct.HashSet.html#method.par_iter -/// [`HashSet`]: /hashbrown/struct.HashSet.html -/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html -pub struct ParIter<'a, T> { - inner: map::ParKeys<'a, T, ()>, -} - -impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { - type Item = &'a T; - - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.inner.drive_unindexed(consumer) - } -} - -/// Parallel iterator over shared references to elements in the difference of -/// sets. -/// -/// This iterator is created by the [`par_difference`] method on [`HashSet`]. -/// See its documentation for more. -/// -/// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference -/// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParDifference<'a, T, S, A: Allocator + Clone = Global> { - a: &'a HashSet, - b: &'a HashSet, -} - -impl<'a, T, S, A> ParallelIterator for ParDifference<'a, T, S, A> -where - T: Eq + Hash + Sync, - S: BuildHasher + Sync, - A: Allocator + Clone + Sync, -{ - type Item = &'a T; - - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.a - .into_par_iter() - .filter(|&x| !self.b.contains(x)) - .drive_unindexed(consumer) - } -} - -/// Parallel iterator over shared references to elements in the symmetric -/// difference of sets. -/// -/// This iterator is created by the [`par_symmetric_difference`] method on -/// [`HashSet`]. -/// See its documentation for more. -/// -/// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference -/// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParSymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { - a: &'a HashSet, - b: &'a HashSet, -} - -impl<'a, T, S, A> ParallelIterator for ParSymmetricDifference<'a, T, S, A> -where - T: Eq + Hash + Sync, - S: BuildHasher + Sync, - A: Allocator + Clone + Sync, -{ - type Item = &'a T; - - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.a - .par_difference(self.b) - .chain(self.b.par_difference(self.a)) - .drive_unindexed(consumer) - } -} - -/// Parallel iterator over shared references to elements in the intersection of -/// sets. -/// -/// This iterator is created by the [`par_intersection`] method on [`HashSet`]. -/// See its documentation for more. -/// -/// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection -/// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParIntersection<'a, T, S, A: Allocator + Clone = Global> { - a: &'a HashSet, - b: &'a HashSet, -} - -impl<'a, T, S, A> ParallelIterator for ParIntersection<'a, T, S, A> -where - T: Eq + Hash + Sync, - S: BuildHasher + Sync, - A: Allocator + Clone + Sync, -{ - type Item = &'a T; - - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.a - .into_par_iter() - .filter(|&x| self.b.contains(x)) - .drive_unindexed(consumer) - } -} - -/// Parallel iterator over shared references to elements in the union of sets. -/// -/// This iterator is created by the [`par_union`] method on [`HashSet`]. -/// See its documentation for more. -/// -/// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union -/// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParUnion<'a, T, S, A: Allocator + Clone = Global> { - a: &'a HashSet, - b: &'a HashSet, -} - -impl<'a, T, S, A> ParallelIterator for ParUnion<'a, T, S, A> -where - T: Eq + Hash + Sync, - S: BuildHasher + Sync, - A: Allocator + Clone + Sync, -{ - type Item = &'a T; - - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - // We'll iterate one set in full, and only the remaining difference from the other. - // Use the smaller set for the difference in order to reduce hash lookups. - let (smaller, larger) = if self.a.len() <= self.b.len() { - (self.a, self.b) - } else { - (self.b, self.a) - }; - larger - .into_par_iter() - .chain(smaller.par_difference(larger)) - .drive_unindexed(consumer) - } -} - -impl HashSet -where - T: Eq + Hash + Sync, - S: BuildHasher + Sync, - A: Allocator + Clone + Sync, -{ - /// Visits (potentially in parallel) the values representing the union, - /// i.e. all the values in `self` or `other`, without duplicates. - #[cfg_attr(feature = "inline-more", inline)] - pub fn par_union<'a>(&'a self, other: &'a Self) -> ParUnion<'a, T, S, A> { - ParUnion { a: self, b: other } - } - - /// Visits (potentially in parallel) the values representing the difference, - /// i.e. the values that are in `self` but not in `other`. - #[cfg_attr(feature = "inline-more", inline)] - pub fn par_difference<'a>(&'a self, other: &'a Self) -> ParDifference<'a, T, S, A> { - ParDifference { a: self, b: other } - } - - /// Visits (potentially in parallel) the values representing the symmetric - /// difference, i.e. the values that are in `self` or in `other` but not in both. - #[cfg_attr(feature = "inline-more", inline)] - pub fn par_symmetric_difference<'a>( - &'a self, - other: &'a Self, - ) -> ParSymmetricDifference<'a, T, S, A> { - ParSymmetricDifference { a: self, b: other } - } - - /// Visits (potentially in parallel) the values representing the - /// intersection, i.e. the values that are both in `self` and `other`. - #[cfg_attr(feature = "inline-more", inline)] - pub fn par_intersection<'a>(&'a self, other: &'a Self) -> ParIntersection<'a, T, S, A> { - ParIntersection { a: self, b: other } - } - - /// Returns `true` if `self` has no elements in common with `other`. - /// This is equivalent to checking for an empty intersection. - /// - /// This method runs in a potentially parallel fashion. - pub fn par_is_disjoint(&self, other: &Self) -> bool { - self.into_par_iter().all(|x| !other.contains(x)) - } - - /// Returns `true` if the set is a subset of another, - /// i.e. `other` contains at least all the values in `self`. - /// - /// This method runs in a potentially parallel fashion. - pub fn par_is_subset(&self, other: &Self) -> bool { - if self.len() <= other.len() { - self.into_par_iter().all(|x| other.contains(x)) - } else { - false - } - } - - /// Returns `true` if the set is a superset of another, - /// i.e. `self` contains at least all the values in `other`. - /// - /// This method runs in a potentially parallel fashion. - pub fn par_is_superset(&self, other: &Self) -> bool { - other.par_is_subset(self) - } - - /// Returns `true` if the set is equal to another, - /// i.e. both sets contain the same values. - /// - /// This method runs in a potentially parallel fashion. - pub fn par_eq(&self, other: &Self) -> bool { - self.len() == other.len() && self.par_is_subset(other) - } -} - -impl HashSet -where - T: Eq + Hash + Send, - A: Allocator + Clone + Send, -{ - /// Consumes (potentially in parallel) all values in an arbitrary order, - /// while preserving the set's allocated memory for reuse. - #[cfg_attr(feature = "inline-more", inline)] - pub fn par_drain(&mut self) -> ParDrain<'_, T, A> { - ParDrain { - inner: self.map.par_drain(), - } - } -} - -impl IntoParallelIterator for HashSet { - type Item = T; - type Iter = IntoParIter; - - #[cfg_attr(feature = "inline-more", inline)] - fn into_par_iter(self) -> Self::Iter { - IntoParIter { - inner: self.map.into_par_iter(), - } - } -} - -impl<'a, T: Sync, S, A: Allocator + Clone> IntoParallelIterator for &'a HashSet { - type Item = &'a T; - type Iter = ParIter<'a, T>; - - #[cfg_attr(feature = "inline-more", inline)] - fn into_par_iter(self) -> Self::Iter { - ParIter { - inner: self.map.par_keys(), - } - } -} - -/// Collect values from a parallel iterator into a hashset. -impl FromParallelIterator for HashSet -where - T: Eq + Hash + Send, - S: BuildHasher + Default, -{ - fn from_par_iter

(par_iter: P) -> Self - where - P: IntoParallelIterator, - { - let mut set = HashSet::default(); - set.par_extend(par_iter); - set - } -} - -/// Extend a hash set with items from a parallel iterator. -impl ParallelExtend for HashSet -where - T: Eq + Hash + Send, - S: BuildHasher, -{ - fn par_extend(&mut self, par_iter: I) - where - I: IntoParallelIterator, - { - extend(self, par_iter); - } -} - -/// Extend a hash set with copied items from a parallel iterator. -impl<'a, T, S> ParallelExtend<&'a T> for HashSet -where - T: 'a + Copy + Eq + Hash + Sync, - S: BuildHasher, -{ - fn par_extend(&mut self, par_iter: I) - where - I: IntoParallelIterator, - { - extend(self, par_iter); - } -} - -// This is equal to the normal `HashSet` -- no custom advantage. -fn extend(set: &mut HashSet, par_iter: I) -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, - I: IntoParallelIterator, - HashSet: Extend, -{ - let (list, len) = super::helpers::collect(par_iter); - - // Values may be already present or show multiple times in the iterator. - // Reserve the entire length if the set is empty. - // Otherwise reserve half the length (rounded up), so the set - // will only resize twice in the worst case. - let reserve = if set.is_empty() { len } else { (len + 1) / 2 }; - set.reserve(reserve); - for vec in list { - set.extend(vec); - } -} - -#[cfg(test)] -mod test_par_set { - use alloc::vec::Vec; - use core::sync::atomic::{AtomicUsize, Ordering}; - - use rayon::prelude::*; - - use crate::hash_set::HashSet; - - #[test] - fn test_disjoint() { - let mut xs = HashSet::new(); - let mut ys = HashSet::new(); - assert!(xs.par_is_disjoint(&ys)); - assert!(ys.par_is_disjoint(&xs)); - assert!(xs.insert(5)); - assert!(ys.insert(11)); - assert!(xs.par_is_disjoint(&ys)); - assert!(ys.par_is_disjoint(&xs)); - assert!(xs.insert(7)); - assert!(xs.insert(19)); - assert!(xs.insert(4)); - assert!(ys.insert(2)); - assert!(ys.insert(-11)); - assert!(xs.par_is_disjoint(&ys)); - assert!(ys.par_is_disjoint(&xs)); - assert!(ys.insert(7)); - assert!(!xs.par_is_disjoint(&ys)); - assert!(!ys.par_is_disjoint(&xs)); - } - - #[test] - fn test_subset_and_superset() { - let mut a = HashSet::new(); - assert!(a.insert(0)); - assert!(a.insert(5)); - assert!(a.insert(11)); - assert!(a.insert(7)); - - let mut b = HashSet::new(); - assert!(b.insert(0)); - assert!(b.insert(7)); - assert!(b.insert(19)); - assert!(b.insert(250)); - assert!(b.insert(11)); - assert!(b.insert(200)); - - assert!(!a.par_is_subset(&b)); - assert!(!a.par_is_superset(&b)); - assert!(!b.par_is_subset(&a)); - assert!(!b.par_is_superset(&a)); - - assert!(b.insert(5)); - - assert!(a.par_is_subset(&b)); - assert!(!a.par_is_superset(&b)); - assert!(!b.par_is_subset(&a)); - assert!(b.par_is_superset(&a)); - } - - #[test] - fn test_iterate() { - let mut a = HashSet::new(); - for i in 0..32 { - assert!(a.insert(i)); - } - let observed = AtomicUsize::new(0); - a.par_iter().for_each(|k| { - observed.fetch_or(1 << *k, Ordering::Relaxed); - }); - assert_eq!(observed.into_inner(), 0xFFFF_FFFF); - } - - #[test] - fn test_intersection() { - let mut a = HashSet::new(); - let mut b = HashSet::new(); - - assert!(a.insert(11)); - assert!(a.insert(1)); - assert!(a.insert(3)); - assert!(a.insert(77)); - assert!(a.insert(103)); - assert!(a.insert(5)); - assert!(a.insert(-5)); - - assert!(b.insert(2)); - assert!(b.insert(11)); - assert!(b.insert(77)); - assert!(b.insert(-9)); - assert!(b.insert(-42)); - assert!(b.insert(5)); - assert!(b.insert(3)); - - let expected = [3, 5, 11, 77]; - let i = a - .par_intersection(&b) - .map(|x| { - assert!(expected.contains(x)); - 1 - }) - .sum::(); - assert_eq!(i, expected.len()); - } - - #[test] - fn test_difference() { - let mut a = HashSet::new(); - let mut b = HashSet::new(); - - assert!(a.insert(1)); - assert!(a.insert(3)); - assert!(a.insert(5)); - assert!(a.insert(9)); - assert!(a.insert(11)); - - assert!(b.insert(3)); - assert!(b.insert(9)); - - let expected = [1, 5, 11]; - let i = a - .par_difference(&b) - .map(|x| { - assert!(expected.contains(x)); - 1 - }) - .sum::(); - assert_eq!(i, expected.len()); - } - - #[test] - fn test_symmetric_difference() { - let mut a = HashSet::new(); - let mut b = HashSet::new(); - - assert!(a.insert(1)); - assert!(a.insert(3)); - assert!(a.insert(5)); - assert!(a.insert(9)); - assert!(a.insert(11)); - - assert!(b.insert(-2)); - assert!(b.insert(3)); - assert!(b.insert(9)); - assert!(b.insert(14)); - assert!(b.insert(22)); - - let expected = [-2, 1, 5, 11, 14, 22]; - let i = a - .par_symmetric_difference(&b) - .map(|x| { - assert!(expected.contains(x)); - 1 - }) - .sum::(); - assert_eq!(i, expected.len()); - } - - #[test] - fn test_union() { - let mut a = HashSet::new(); - let mut b = HashSet::new(); - - assert!(a.insert(1)); - assert!(a.insert(3)); - assert!(a.insert(5)); - assert!(a.insert(9)); - assert!(a.insert(11)); - assert!(a.insert(16)); - assert!(a.insert(19)); - assert!(a.insert(24)); - - assert!(b.insert(-2)); - assert!(b.insert(1)); - assert!(b.insert(5)); - assert!(b.insert(9)); - assert!(b.insert(13)); - assert!(b.insert(19)); - - let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24]; - let i = a - .par_union(&b) - .map(|x| { - assert!(expected.contains(x)); - 1 - }) - .sum::(); - assert_eq!(i, expected.len()); - } - - #[test] - fn test_from_iter() { - let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9]; - - let set: HashSet<_> = xs.par_iter().cloned().collect(); - - for x in &xs { - assert!(set.contains(x)); - } - } - - #[test] - fn test_move_iter() { - let hs = { - let mut hs = HashSet::new(); - - hs.insert('a'); - hs.insert('b'); - - hs - }; - - let v = hs.into_par_iter().collect::>(); - assert!(v == ['a', 'b'] || v == ['b', 'a']); - } - - #[test] - fn test_eq() { - // These constants once happened to expose a bug in insert(). - // I'm keeping them around to prevent a regression. - let mut s1 = HashSet::new(); - - s1.insert(1); - s1.insert(2); - s1.insert(3); - - let mut s2 = HashSet::new(); - - s2.insert(1); - s2.insert(2); - - assert!(!s1.par_eq(&s2)); - - s2.insert(3); - - assert!(s1.par_eq(&s2)); - } - - #[test] - fn test_extend_ref() { - let mut a = HashSet::new(); - a.insert(1); - - a.par_extend(&[2, 3, 4][..]); - - assert_eq!(a.len(), 4); - assert!(a.contains(&1)); - assert!(a.contains(&2)); - assert!(a.contains(&3)); - assert!(a.contains(&4)); - - let mut b = HashSet::new(); - b.insert(5); - b.insert(6); - - a.par_extend(&b); - - assert_eq!(a.len(), 6); - assert!(a.contains(&1)); - assert!(a.contains(&2)); - assert!(a.contains(&3)); - assert!(a.contains(&4)); - assert!(a.contains(&5)); - assert!(a.contains(&6)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/serde.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/serde.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/serde.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/external_trait_impls/serde.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ -mod size_hint { - use core::cmp; - - /// This presumably exists to prevent denial of service attacks. - /// - /// Original discussion: https://github.com/serde-rs/serde/issues/1114. - #[cfg_attr(feature = "inline-more", inline)] - pub(super) fn cautious(hint: Option) -> usize { - cmp::min(hint.unwrap_or(0), 4096) - } -} - -mod map { - use core::fmt; - use core::hash::{BuildHasher, Hash}; - use core::marker::PhantomData; - use serde::de::{Deserialize, Deserializer, MapAccess, Visitor}; - use serde::ser::{Serialize, Serializer}; - - use crate::hash_map::HashMap; - - use super::size_hint; - - impl Serialize for HashMap - where - K: Serialize + Eq + Hash, - V: Serialize, - H: BuildHasher, - { - #[cfg_attr(feature = "inline-more", inline)] - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.collect_map(self) - } - } - - impl<'de, K, V, S> Deserialize<'de> for HashMap - where - K: Deserialize<'de> + Eq + Hash, - V: Deserialize<'de>, - S: BuildHasher + Default, - { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct MapVisitor { - marker: PhantomData>, - } - - impl<'de, K, V, S> Visitor<'de> for MapVisitor - where - K: Deserialize<'de> + Eq + Hash, - V: Deserialize<'de>, - S: BuildHasher + Default, - { - type Value = HashMap; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("a map") - } - - #[cfg_attr(feature = "inline-more", inline)] - fn visit_map(self, mut map: A) -> Result - where - A: MapAccess<'de>, - { - let mut values = HashMap::with_capacity_and_hasher( - size_hint::cautious(map.size_hint()), - S::default(), - ); - - while let Some((key, value)) = map.next_entry()? { - values.insert(key, value); - } - - Ok(values) - } - } - - let visitor = MapVisitor { - marker: PhantomData, - }; - deserializer.deserialize_map(visitor) - } - } -} - -mod set { - use core::fmt; - use core::hash::{BuildHasher, Hash}; - use core::marker::PhantomData; - use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; - use serde::ser::{Serialize, Serializer}; - - use crate::hash_set::HashSet; - - use super::size_hint; - - impl Serialize for HashSet - where - T: Serialize + Eq + Hash, - H: BuildHasher, - { - #[cfg_attr(feature = "inline-more", inline)] - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.collect_seq(self) - } - } - - impl<'de, T, S> Deserialize<'de> for HashSet - where - T: Deserialize<'de> + Eq + Hash, - S: BuildHasher + Default, - { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct SeqVisitor { - marker: PhantomData>, - } - - impl<'de, T, S> Visitor<'de> for SeqVisitor - where - T: Deserialize<'de> + Eq + Hash, - S: BuildHasher + Default, - { - type Value = HashSet; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("a sequence") - } - - #[cfg_attr(feature = "inline-more", inline)] - fn visit_seq(self, mut seq: A) -> Result - where - A: SeqAccess<'de>, - { - let mut values = HashSet::with_capacity_and_hasher( - size_hint::cautious(seq.size_hint()), - S::default(), - ); - - while let Some(value) = seq.next_element()? { - values.insert(value); - } - - Ok(values) - } - } - - let visitor = SeqVisitor { - marker: PhantomData, - }; - deserializer.deserialize_seq(visitor) - } - - #[allow(clippy::missing_errors_doc)] - fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> - where - D: Deserializer<'de>, - { - struct SeqInPlaceVisitor<'a, T, S>(&'a mut HashSet); - - impl<'a, 'de, T, S> Visitor<'de> for SeqInPlaceVisitor<'a, T, S> - where - T: Deserialize<'de> + Eq + Hash, - S: BuildHasher + Default, - { - type Value = (); - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("a sequence") - } - - #[cfg_attr(feature = "inline-more", inline)] - fn visit_seq(self, mut seq: A) -> Result - where - A: SeqAccess<'de>, - { - self.0.clear(); - self.0.reserve(size_hint::cautious(seq.size_hint())); - - while let Some(value) = seq.next_element()? { - self.0.insert(value); - } - - Ok(()) - } - } - - deserializer.deserialize_seq(SeqInPlaceVisitor(place)) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/lib.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/lib.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,150 +0,0 @@ -//! This crate is a Rust port of Google's high-performance [SwissTable] hash -//! map, adapted to make it a drop-in replacement for Rust's standard `HashMap` -//! and `HashSet` types. -//! -//! The original C++ version of [SwissTable] can be found [here], and this -//! [CppCon talk] gives an overview of how the algorithm works. -//! -//! [SwissTable]: https://abseil.io/blog/20180927-swisstables -//! [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h -//! [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4 - -#![no_std] -#![cfg_attr( - feature = "nightly", - feature( - test, - core_intrinsics, - dropck_eyepatch, - min_specialization, - extend_one, - allocator_api, - slice_ptr_get, - nonnull_slice_from_raw_parts, - maybe_uninit_array_assume_init, - build_hasher_simple_hash_one - ) -)] -#![allow( - clippy::doc_markdown, - clippy::module_name_repetitions, - clippy::must_use_candidate, - clippy::option_if_let_else, - clippy::redundant_else, - clippy::manual_map, - clippy::missing_safety_doc, - clippy::missing_errors_doc -)] -#![warn(missing_docs)] -#![warn(rust_2018_idioms)] - -#[cfg(test)] -#[macro_use] -extern crate std; - -#[cfg_attr(test, macro_use)] -extern crate alloc; - -#[cfg(feature = "nightly")] -#[cfg(doctest)] -doc_comment::doctest!("../README.md"); - -#[macro_use] -mod macros; - -#[cfg(feature = "raw")] -/// Experimental and unsafe `RawTable` API. This module is only available if the -/// `raw` feature is enabled. -pub mod raw { - // The RawTable API is still experimental and is not properly documented yet. - #[allow(missing_docs)] - #[path = "mod.rs"] - mod inner; - pub use inner::*; - - #[cfg(feature = "rayon")] - /// [rayon]-based parallel iterator types for hash maps. - /// You will rarely need to interact with it directly unless you have need - /// to name one of the iterator types. - /// - /// [rayon]: https://docs.rs/rayon/1.0/rayon - pub mod rayon { - pub use crate::external_trait_impls::rayon::raw::*; - } -} -#[cfg(not(feature = "raw"))] -mod raw; - -mod external_trait_impls; -mod map; -#[cfg(feature = "rustc-internal-api")] -mod rustc_entry; -mod scopeguard; -mod set; - -pub mod hash_map { - //! A hash map implemented with quadratic probing and SIMD lookup. - pub use crate::map::*; - - #[cfg(feature = "rustc-internal-api")] - pub use crate::rustc_entry::*; - - #[cfg(feature = "rayon")] - /// [rayon]-based parallel iterator types for hash maps. - /// You will rarely need to interact with it directly unless you have need - /// to name one of the iterator types. - /// - /// [rayon]: https://docs.rs/rayon/1.0/rayon - pub mod rayon { - pub use crate::external_trait_impls::rayon::map::*; - } -} -pub mod hash_set { - //! A hash set implemented as a `HashMap` where the value is `()`. - pub use crate::set::*; - - #[cfg(feature = "rayon")] - /// [rayon]-based parallel iterator types for hash sets. - /// You will rarely need to interact with it directly unless you have need - /// to name one of the iterator types. - /// - /// [rayon]: https://docs.rs/rayon/1.0/rayon - pub mod rayon { - pub use crate::external_trait_impls::rayon::set::*; - } -} - -pub use crate::map::HashMap; -pub use crate::set::HashSet; - -/// The error type for `try_reserve` methods. -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum TryReserveError { - /// Error due to the computed capacity exceeding the collection's maximum - /// (usually `isize::MAX` bytes). - CapacityOverflow, - - /// The memory allocator returned an error - AllocError { - /// The layout of the allocation request that failed. - layout: alloc::alloc::Layout, - }, -} - -/// Wrapper around `Bump` which allows it to be used as an allocator for -/// `HashMap`, `HashSet` and `RawTable`. -/// -/// `Bump` can be used directly without this wrapper on nightly if you enable -/// the `allocator-api` feature of the `bumpalo` crate. -#[cfg(feature = "bumpalo")] -#[derive(Clone, Copy, Debug)] -pub struct BumpWrapper<'a>(pub &'a bumpalo::Bump); - -#[cfg(feature = "bumpalo")] -#[test] -fn test_bumpalo() { - use bumpalo::Bump; - let bump = Bump::new(); - let mut map = HashMap::new_in(BumpWrapper(&bump)); - map.insert(0, 1); -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/macros.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/macros.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/macros.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/macros.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,70 +0,0 @@ -// See the cfg-if crate. -#[allow(unused_macro_rules)] -macro_rules! cfg_if { - // match if/else chains with a final `else` - ($( - if #[cfg($($meta:meta),*)] { $($it:item)* } - ) else * else { - $($it2:item)* - }) => { - cfg_if! { - @__items - () ; - $( ( ($($meta),*) ($($it)*) ), )* - ( () ($($it2)*) ), - } - }; - - // match if/else chains lacking a final `else` - ( - if #[cfg($($i_met:meta),*)] { $($i_it:item)* } - $( - else if #[cfg($($e_met:meta),*)] { $($e_it:item)* } - )* - ) => { - cfg_if! { - @__items - () ; - ( ($($i_met),*) ($($i_it)*) ), - $( ( ($($e_met),*) ($($e_it)*) ), )* - ( () () ), - } - }; - - // Internal and recursive macro to emit all the items - // - // Collects all the negated cfgs in a list at the beginning and after the - // semicolon is all the remaining items - (@__items ($($not:meta,)*) ; ) => {}; - (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { - // Emit all items within one block, applying an approprate #[cfg]. The - // #[cfg] will require all `$m` matchers specified and must also negate - // all previous matchers. - cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } - - // Recurse to emit all other items in `$rest`, and when we do so add all - // our `$m` matchers to the list of `$not` matchers as future emissions - // will have to negate everything we just matched as well. - cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } - }; - - // Internal macro to Apply a cfg attribute to a list of items - (@__apply $m:meta, $($it:item)*) => { - $(#[$m] $it)* - }; -} - -// Helper macro for specialization. This also helps avoid parse errors if the -// default fn syntax for specialization changes in the future. -#[cfg(feature = "nightly")] -macro_rules! default_fn { - (#[$($a:tt)*] $($tt:tt)*) => { - #[$($a)*] default $($tt)* - } -} -#[cfg(not(feature = "nightly"))] -macro_rules! default_fn { - ($($tt:tt)*) => { - $($tt)* - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/map.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/map.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,8408 +0,0 @@ -use crate::raw::{Allocator, Bucket, Global, RawDrain, RawIntoIter, RawIter, RawTable}; -use crate::TryReserveError; -use core::borrow::Borrow; -use core::fmt::{self, Debug}; -use core::hash::{BuildHasher, Hash}; -use core::iter::{FromIterator, FusedIterator}; -use core::marker::PhantomData; -use core::mem; -use core::ops::Index; - -/// Default hasher for `HashMap`. -#[cfg(feature = "ahash")] -pub type DefaultHashBuilder = ahash::RandomState; - -/// Dummy default hasher for `HashMap`. -#[cfg(not(feature = "ahash"))] -pub enum DefaultHashBuilder {} - -/// A hash map implemented with quadratic probing and SIMD lookup. -/// -/// The default hashing algorithm is currently [`AHash`], though this is -/// subject to change at any point in the future. This hash function is very -/// fast for all types of keys, but this algorithm will typically *not* protect -/// against attacks such as HashDoS. -/// -/// The hashing algorithm can be replaced on a per-`HashMap` basis using the -/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods. Many -/// alternative algorithms are available on crates.io, such as the [`fnv`] crate. -/// -/// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although -/// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`. -/// If you implement these yourself, it is important that the following -/// property holds: -/// -/// ```text -/// k1 == k2 -> hash(k1) == hash(k2) -/// ``` -/// -/// In other words, if two keys are equal, their hashes must be equal. -/// -/// It is a logic error for a key to be modified in such a way that the key's -/// hash, as determined by the [`Hash`] trait, or its equality, as determined by -/// the [`Eq`] trait, changes while it is in the map. This is normally only -/// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. -/// -/// It is also a logic error for the [`Hash`] implementation of a key to panic. -/// This is generally only possible if the trait is implemented manually. If a -/// panic does occur then the contents of the `HashMap` may become corrupted and -/// some items may be dropped from the table. -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// // Type inference lets us omit an explicit type signature (which -/// // would be `HashMap` in this example). -/// let mut book_reviews = HashMap::new(); -/// -/// // Review some books. -/// book_reviews.insert( -/// "Adventures of Huckleberry Finn".to_string(), -/// "My favorite book.".to_string(), -/// ); -/// book_reviews.insert( -/// "Grimms' Fairy Tales".to_string(), -/// "Masterpiece.".to_string(), -/// ); -/// book_reviews.insert( -/// "Pride and Prejudice".to_string(), -/// "Very enjoyable.".to_string(), -/// ); -/// book_reviews.insert( -/// "The Adventures of Sherlock Holmes".to_string(), -/// "Eye lyked it alot.".to_string(), -/// ); -/// -/// // Check for a specific one. -/// // When collections store owned values (String), they can still be -/// // queried using references (&str). -/// if !book_reviews.contains_key("Les Misérables") { -/// println!("We've got {} reviews, but Les Misérables ain't one.", -/// book_reviews.len()); -/// } -/// -/// // oops, this review has a lot of spelling mistakes, let's delete it. -/// book_reviews.remove("The Adventures of Sherlock Holmes"); -/// -/// // Look up the values associated with some keys. -/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"]; -/// for &book in &to_find { -/// match book_reviews.get(book) { -/// Some(review) => println!("{}: {}", book, review), -/// None => println!("{} is unreviewed.", book) -/// } -/// } -/// -/// // Look up the value for a key (will panic if the key is not found). -/// println!("Review for Jane: {}", book_reviews["Pride and Prejudice"]); -/// -/// // Iterate over everything. -/// for (book, review) in &book_reviews { -/// println!("{}: \"{}\"", book, review); -/// } -/// ``` -/// -/// `HashMap` also implements an [`Entry API`](#method.entry), which allows -/// for more complex methods of getting, setting, updating and removing keys and -/// their values: -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// // type inference lets us omit an explicit type signature (which -/// // would be `HashMap<&str, u8>` in this example). -/// let mut player_stats = HashMap::new(); -/// -/// fn random_stat_buff() -> u8 { -/// // could actually return some random value here - let's just return -/// // some fixed value for now -/// 42 -/// } -/// -/// // insert a key only if it doesn't already exist -/// player_stats.entry("health").or_insert(100); -/// -/// // insert a key using a function that provides a new value only if it -/// // doesn't already exist -/// player_stats.entry("defence").or_insert_with(random_stat_buff); -/// -/// // update a key, guarding against the key possibly not being set -/// let stat = player_stats.entry("attack").or_insert(100); -/// *stat += random_stat_buff(); -/// ``` -/// -/// The easiest way to use `HashMap` with a custom key type is to derive [`Eq`] and [`Hash`]. -/// We must also derive [`PartialEq`]. -/// -/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html -/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html -/// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html -/// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html -/// [`Cell`]: https://doc.rust-lang.org/std/cell/struct.Cell.html -/// [`default`]: #method.default -/// [`with_hasher`]: #method.with_hasher -/// [`with_capacity_and_hasher`]: #method.with_capacity_and_hasher -/// [`fnv`]: https://crates.io/crates/fnv -/// [`AHash`]: https://crates.io/crates/ahash -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// #[derive(Hash, Eq, PartialEq, Debug)] -/// struct Viking { -/// name: String, -/// country: String, -/// } -/// -/// impl Viking { -/// /// Creates a new Viking. -/// fn new(name: &str, country: &str) -> Viking { -/// Viking { name: name.to_string(), country: country.to_string() } -/// } -/// } -/// -/// // Use a HashMap to store the vikings' health points. -/// let mut vikings = HashMap::new(); -/// -/// vikings.insert(Viking::new("Einar", "Norway"), 25); -/// vikings.insert(Viking::new("Olaf", "Denmark"), 24); -/// vikings.insert(Viking::new("Harald", "Iceland"), 12); -/// -/// // Use derived implementation to print the status of the vikings. -/// for (viking, health) in &vikings { -/// println!("{:?} has {} hp", viking, health); -/// } -/// ``` -/// -/// A `HashMap` with fixed list of elements can be initialized from an array: -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// let timber_resources: HashMap<&str, i32> = [("Norway", 100), ("Denmark", 50), ("Iceland", 10)] -/// .iter().cloned().collect(); -/// // use the values stored in map -/// ``` -pub struct HashMap { - pub(crate) hash_builder: S, - pub(crate) table: RawTable<(K, V), A>, -} - -impl Clone for HashMap { - fn clone(&self) -> Self { - HashMap { - hash_builder: self.hash_builder.clone(), - table: self.table.clone(), - } - } - - fn clone_from(&mut self, source: &Self) { - self.table.clone_from(&source.table); - - // Update hash_builder only if we successfully cloned all elements. - self.hash_builder.clone_from(&source.hash_builder); - } -} - -/// Ensures that a single closure type across uses of this which, in turn prevents multiple -/// instances of any functions like RawTable::reserve from being generated -#[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_hasher(hash_builder: &S) -> impl Fn(&(Q, V)) -> u64 + '_ -where - K: Borrow, - Q: Hash, - S: BuildHasher, -{ - move |val| make_hash::(hash_builder, &val.0) -} - -/// Ensures that a single closure type across uses of this which, in turn prevents multiple -/// instances of any functions like RawTable::reserve from being generated -#[cfg_attr(feature = "inline-more", inline)] -fn equivalent_key(k: &Q) -> impl Fn(&(K, V)) -> bool + '_ -where - K: Borrow, - Q: ?Sized + Eq, -{ - move |x| k.eq(x.0.borrow()) -} - -/// Ensures that a single closure type across uses of this which, in turn prevents multiple -/// instances of any functions like RawTable::reserve from being generated -#[cfg_attr(feature = "inline-more", inline)] -fn equivalent(k: &Q) -> impl Fn(&K) -> bool + '_ -where - K: Borrow, - Q: ?Sized + Eq, -{ - move |x| k.eq(x.borrow()) -} - -#[cfg(not(feature = "nightly"))] -#[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 -where - K: Borrow, - Q: Hash + ?Sized, - S: BuildHasher, -{ - use core::hash::Hasher; - let mut state = hash_builder.build_hasher(); - val.hash(&mut state); - state.finish() -} - -#[cfg(feature = "nightly")] -#[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 -where - K: Borrow, - Q: Hash + ?Sized, - S: BuildHasher, -{ - hash_builder.hash_one(val) -} - -#[cfg(not(feature = "nightly"))] -#[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_insert_hash(hash_builder: &S, val: &K) -> u64 -where - K: Hash, - S: BuildHasher, -{ - use core::hash::Hasher; - let mut state = hash_builder.build_hasher(); - val.hash(&mut state); - state.finish() -} - -#[cfg(feature = "nightly")] -#[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_insert_hash(hash_builder: &S, val: &K) -> u64 -where - K: Hash, - S: BuildHasher, -{ - hash_builder.hash_one(val) -} - -#[cfg(feature = "ahash")] -impl HashMap { - /// Creates an empty `HashMap`. - /// - /// The hash map is initially created with a capacity of 0, so it will not allocate until it - /// is first inserted into. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// let mut map: HashMap<&str, i32> = HashMap::new(); - /// assert_eq!(map.len(), 0); - /// assert_eq!(map.capacity(), 0); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn new() -> Self { - Self::default() - } - - /// Creates an empty `HashMap` with the specified capacity. - /// - /// The hash map will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the hash map will not allocate. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// let mut map: HashMap<&str, i32> = HashMap::with_capacity(10); - /// assert_eq!(map.len(), 0); - /// assert!(map.capacity() >= 10); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn with_capacity(capacity: usize) -> Self { - Self::with_capacity_and_hasher(capacity, DefaultHashBuilder::default()) - } -} - -#[cfg(feature = "ahash")] -impl HashMap { - /// Creates an empty `HashMap` using the given allocator. - /// - /// The hash map is initially created with a capacity of 0, so it will not allocate until it - /// is first inserted into. - #[cfg_attr(feature = "inline-more", inline)] - pub fn new_in(alloc: A) -> Self { - Self::with_hasher_in(DefaultHashBuilder::default(), alloc) - } - - /// Creates an empty `HashMap` with the specified capacity using the given allocator. - /// - /// The hash map will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the hash map will not allocate. - #[cfg_attr(feature = "inline-more", inline)] - pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Self::with_capacity_and_hasher_in(capacity, DefaultHashBuilder::default(), alloc) - } -} - -impl HashMap { - /// Creates an empty `HashMap` which will use the given hash builder to hash - /// keys. - /// - /// The hash map is initially created with a capacity of 0, so it will not - /// allocate until it is first inserted into. - /// - /// Warning: `hash_builder` is normally randomly generated, and - /// is designed to allow HashMaps to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. - /// - /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the HashMap to be useful, see its documentation for details. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::DefaultHashBuilder; - /// - /// let s = DefaultHashBuilder::default(); - /// let mut map = HashMap::with_hasher(s); - /// assert_eq!(map.len(), 0); - /// assert_eq!(map.capacity(), 0); - /// - /// map.insert(1, 2); - /// ``` - /// - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html - #[cfg_attr(feature = "inline-more", inline)] - pub const fn with_hasher(hash_builder: S) -> Self { - Self { - hash_builder, - table: RawTable::new(), - } - } - - /// Creates an empty `HashMap` with the specified capacity, using `hash_builder` - /// to hash the keys. - /// - /// The hash map will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the hash map will not allocate. - /// - /// Warning: `hash_builder` is normally randomly generated, and - /// is designed to allow HashMaps to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. - /// - /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the HashMap to be useful, see its documentation for details. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::DefaultHashBuilder; - /// - /// let s = DefaultHashBuilder::default(); - /// let mut map = HashMap::with_capacity_and_hasher(10, s); - /// assert_eq!(map.len(), 0); - /// assert!(map.capacity() >= 10); - /// - /// map.insert(1, 2); - /// ``` - /// - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html - #[cfg_attr(feature = "inline-more", inline)] - pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { - Self { - hash_builder, - table: RawTable::with_capacity(capacity), - } - } -} - -impl HashMap { - /// Returns a reference to the underlying allocator. - #[inline] - pub fn allocator(&self) -> &A { - self.table.allocator() - } - - /// Creates an empty `HashMap` which will use the given hash builder to hash - /// keys. It will be allocated with the given allocator. - /// - /// The created map has the default initial capacity. - /// - /// Warning: `hash_builder` is normally randomly generated, and - /// is designed to allow HashMaps to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::DefaultHashBuilder; - /// - /// let s = DefaultHashBuilder::default(); - /// let mut map = HashMap::with_hasher(s); - /// map.insert(1, 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn with_hasher_in(hash_builder: S, alloc: A) -> Self { - Self { - hash_builder, - table: RawTable::new_in(alloc), - } - } - - /// Creates an empty `HashMap` with the specified capacity, using `hash_builder` - /// to hash the keys. It will be allocated with the given allocator. - /// - /// The hash map will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the hash map will not allocate. - /// - /// Warning: `hash_builder` is normally randomly generated, and - /// is designed to allow HashMaps to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::DefaultHashBuilder; - /// - /// let s = DefaultHashBuilder::default(); - /// let mut map = HashMap::with_capacity_and_hasher(10, s); - /// map.insert(1, 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn with_capacity_and_hasher_in(capacity: usize, hash_builder: S, alloc: A) -> Self { - Self { - hash_builder, - table: RawTable::with_capacity_in(capacity, alloc), - } - } - - /// Returns a reference to the map's [`BuildHasher`]. - /// - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::DefaultHashBuilder; - /// - /// let hasher = DefaultHashBuilder::default(); - /// let map: HashMap = HashMap::with_hasher(hasher); - /// let hasher: &DefaultHashBuilder = map.hasher(); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn hasher(&self) -> &S { - &self.hash_builder - } - - /// Returns the number of elements the map can hold without reallocating. - /// - /// This number is a lower bound; the `HashMap` might be able to hold - /// more, but is guaranteed to be able to hold at least this many. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// let map: HashMap = HashMap::with_capacity(100); - /// assert_eq!(map.len(), 0); - /// assert!(map.capacity() >= 100); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn capacity(&self) -> usize { - self.table.capacity() - } - - /// An iterator visiting all keys in arbitrary order. - /// The iterator element type is `&'a K`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert("a", 1); - /// map.insert("b", 2); - /// map.insert("c", 3); - /// assert_eq!(map.len(), 3); - /// let mut vec: Vec<&str> = Vec::new(); - /// - /// for key in map.keys() { - /// println!("{}", key); - /// vec.push(*key); - /// } - /// - /// // The `Keys` iterator produces keys in arbitrary order, so the - /// // keys must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, ["a", "b", "c"]); - /// - /// assert_eq!(map.len(), 3); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn keys(&self) -> Keys<'_, K, V> { - Keys { inner: self.iter() } - } - - /// An iterator visiting all values in arbitrary order. - /// The iterator element type is `&'a V`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert("a", 1); - /// map.insert("b", 2); - /// map.insert("c", 3); - /// assert_eq!(map.len(), 3); - /// let mut vec: Vec = Vec::new(); - /// - /// for val in map.values() { - /// println!("{}", val); - /// vec.push(*val); - /// } - /// - /// // The `Values` iterator produces values in arbitrary order, so the - /// // values must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, [1, 2, 3]); - /// - /// assert_eq!(map.len(), 3); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn values(&self) -> Values<'_, K, V> { - Values { inner: self.iter() } - } - - /// An iterator visiting all values mutably in arbitrary order. - /// The iterator element type is `&'a mut V`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// - /// map.insert("a", 1); - /// map.insert("b", 2); - /// map.insert("c", 3); - /// - /// for val in map.values_mut() { - /// *val = *val + 10; - /// } - /// - /// assert_eq!(map.len(), 3); - /// let mut vec: Vec = Vec::new(); - /// - /// for val in map.values() { - /// println!("{}", val); - /// vec.push(*val); - /// } - /// - /// // The `Values` iterator produces values in arbitrary order, so the - /// // values must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, [11, 12, 13]); - /// - /// assert_eq!(map.len(), 3); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { - ValuesMut { - inner: self.iter_mut(), - } - } - - /// An iterator visiting all key-value pairs in arbitrary order. - /// The iterator element type is `(&'a K, &'a V)`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert("a", 1); - /// map.insert("b", 2); - /// map.insert("c", 3); - /// assert_eq!(map.len(), 3); - /// let mut vec: Vec<(&str, i32)> = Vec::new(); - /// - /// for (key, val) in map.iter() { - /// println!("key: {} val: {}", key, val); - /// vec.push((*key, *val)); - /// } - /// - /// // The `Iter` iterator produces items in arbitrary order, so the - /// // items must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, [("a", 1), ("b", 2), ("c", 3)]); - /// - /// assert_eq!(map.len(), 3); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn iter(&self) -> Iter<'_, K, V> { - // Here we tie the lifetime of self to the iter. - unsafe { - Iter { - inner: self.table.iter(), - marker: PhantomData, - } - } - } - - /// An iterator visiting all key-value pairs in arbitrary order, - /// with mutable references to the values. - /// The iterator element type is `(&'a K, &'a mut V)`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert("a", 1); - /// map.insert("b", 2); - /// map.insert("c", 3); - /// - /// // Update all values - /// for (_, val) in map.iter_mut() { - /// *val *= 2; - /// } - /// - /// assert_eq!(map.len(), 3); - /// let mut vec: Vec<(&str, i32)> = Vec::new(); - /// - /// for (key, val) in &map { - /// println!("key: {} val: {}", key, val); - /// vec.push((*key, *val)); - /// } - /// - /// // The `Iter` iterator produces items in arbitrary order, so the - /// // items must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, [("a", 2), ("b", 4), ("c", 6)]); - /// - /// assert_eq!(map.len(), 3); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { - // Here we tie the lifetime of self to the iter. - unsafe { - IterMut { - inner: self.table.iter(), - marker: PhantomData, - } - } - } - - #[cfg(test)] - #[cfg_attr(feature = "inline-more", inline)] - fn raw_capacity(&self) -> usize { - self.table.buckets() - } - - /// Returns the number of elements in the map. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut a = HashMap::new(); - /// assert_eq!(a.len(), 0); - /// a.insert(1, "a"); - /// assert_eq!(a.len(), 1); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn len(&self) -> usize { - self.table.len() - } - - /// Returns `true` if the map contains no elements. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut a = HashMap::new(); - /// assert!(a.is_empty()); - /// a.insert(1, "a"); - /// assert!(!a.is_empty()); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Clears the map, returning all key-value pairs as an iterator. Keeps the - /// allocated memory for reuse. - /// - /// If the returned iterator is dropped before being fully consumed, it - /// drops the remaining key-value pairs. The returned iterator keeps a - /// mutable borrow on the vector to optimize its implementation. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut a = HashMap::new(); - /// a.insert(1, "a"); - /// a.insert(2, "b"); - /// let capacity_before_drain = a.capacity(); - /// - /// for (k, v) in a.drain().take(1) { - /// assert!(k == 1 || k == 2); - /// assert!(v == "a" || v == "b"); - /// } - /// - /// // As we can see, the map is empty and contains no element. - /// assert!(a.is_empty() && a.len() == 0); - /// // But map capacity is equal to old one. - /// assert_eq!(a.capacity(), capacity_before_drain); - /// - /// let mut a = HashMap::new(); - /// a.insert(1, "a"); - /// a.insert(2, "b"); - /// - /// { // Iterator is dropped without being consumed. - /// let d = a.drain(); - /// } - /// - /// // But the map is empty even if we do not use Drain iterator. - /// assert!(a.is_empty()); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn drain(&mut self) -> Drain<'_, K, V, A> { - Drain { - inner: self.table.drain(), - } - } - - /// Retains only the elements specified by the predicate. Keeps the - /// allocated memory for reuse. - /// - /// In other words, remove all pairs `(k, v)` such that `f(&k, &mut v)` returns `false`. - /// The elements are visited in unsorted (and unspecified) order. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap = (0..8).map(|x|(x, x*10)).collect(); - /// assert_eq!(map.len(), 8); - /// let capacity_before_retain = map.capacity(); - /// - /// map.retain(|&k, _| k % 2 == 0); - /// - /// // We can see, that the number of elements inside map is changed. - /// assert_eq!(map.len(), 4); - /// // But map capacity is equal to old one. - /// assert_eq!(map.capacity(), capacity_before_retain); - /// - /// let mut vec: Vec<(i32, i32)> = map.iter().map(|(&k, &v)| (k, v)).collect(); - /// vec.sort_unstable(); - /// assert_eq!(vec, [(0, 0), (2, 20), (4, 40), (6, 60)]); - /// ``` - pub fn retain(&mut self, mut f: F) - where - F: FnMut(&K, &mut V) -> bool, - { - // Here we only use `iter` as a temporary, preventing use-after-free - unsafe { - for item in self.table.iter() { - let &mut (ref key, ref mut value) = item.as_mut(); - if !f(key, value) { - self.table.erase(item); - } - } - } - } - - /// Drains elements which are true under the given predicate, - /// and returns an iterator over the removed items. - /// - /// In other words, move all pairs `(k, v)` such that `f(&k, &mut v)` returns `true` out - /// into another iterator. - /// - /// Note that `drain_filter` lets you mutate every value in the filter closure, regardless of - /// whether you choose to keep or remove it. - /// - /// When the returned DrainedFilter is dropped, any remaining elements that satisfy - /// the predicate are dropped from the table. - /// - /// It is unspecified how many more elements will be subjected to the closure - /// if a panic occurs in the closure, or a panic occurs while dropping an element, - /// or if the `DrainFilter` value is leaked. - /// - /// Keeps the allocated memory for reuse. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap = (0..8).map(|x| (x, x)).collect(); - /// let capacity_before_drain_filter = map.capacity(); - /// let drained: HashMap = map.drain_filter(|k, _v| k % 2 == 0).collect(); - /// - /// let mut evens = drained.keys().cloned().collect::>(); - /// let mut odds = map.keys().cloned().collect::>(); - /// evens.sort(); - /// odds.sort(); - /// - /// assert_eq!(evens, vec![0, 2, 4, 6]); - /// assert_eq!(odds, vec![1, 3, 5, 7]); - /// // Map capacity is equal to old one. - /// assert_eq!(map.capacity(), capacity_before_drain_filter); - /// - /// let mut map: HashMap = (0..8).map(|x| (x, x)).collect(); - /// - /// { // Iterator is dropped without being consumed. - /// let d = map.drain_filter(|k, _v| k % 2 != 0); - /// } - /// - /// // But the map lens have been reduced by half - /// // even if we do not use DrainFilter iterator. - /// assert_eq!(map.len(), 4); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn drain_filter(&mut self, f: F) -> DrainFilter<'_, K, V, F, A> - where - F: FnMut(&K, &mut V) -> bool, - { - DrainFilter { - f, - inner: DrainFilterInner { - iter: unsafe { self.table.iter() }, - table: &mut self.table, - }, - } - } - - /// Clears the map, removing all key-value pairs. Keeps the allocated memory - /// for reuse. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut a = HashMap::new(); - /// a.insert(1, "a"); - /// let capacity_before_clear = a.capacity(); - /// - /// a.clear(); - /// - /// // Map is empty. - /// assert!(a.is_empty()); - /// // But map capacity is equal to old one. - /// assert_eq!(a.capacity(), capacity_before_clear); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn clear(&mut self) { - self.table.clear(); - } - - /// Creates a consuming iterator visiting all the keys in arbitrary order. - /// The map cannot be used after calling this. - /// The iterator element type is `K`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert("a", 1); - /// map.insert("b", 2); - /// map.insert("c", 3); - /// - /// let mut vec: Vec<&str> = map.into_keys().collect(); - /// - /// // The `IntoKeys` iterator produces keys in arbitrary order, so the - /// // keys must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, ["a", "b", "c"]); - /// ``` - #[inline] - pub fn into_keys(self) -> IntoKeys { - IntoKeys { - inner: self.into_iter(), - } - } - - /// Creates a consuming iterator visiting all the values in arbitrary order. - /// The map cannot be used after calling this. - /// The iterator element type is `V`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert("a", 1); - /// map.insert("b", 2); - /// map.insert("c", 3); - /// - /// let mut vec: Vec = map.into_values().collect(); - /// - /// // The `IntoValues` iterator produces values in arbitrary order, so - /// // the values must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, [1, 2, 3]); - /// ``` - #[inline] - pub fn into_values(self) -> IntoValues { - IntoValues { - inner: self.into_iter(), - } - } -} - -impl HashMap -where - K: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - /// Reserves capacity for at least `additional` more elements to be inserted - /// in the `HashMap`. The collection may reserve more space to avoid - /// frequent reallocations. - /// - /// # Panics - /// - /// Panics if the new allocation size overflows [`usize`]. - /// - /// [`usize`]: https://doc.rust-lang.org/std/primitive.usize.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// let mut map: HashMap<&str, i32> = HashMap::new(); - /// // Map is empty and doesn't allocate memory - /// assert_eq!(map.capacity(), 0); - /// - /// map.reserve(10); - /// - /// // And now map can hold at least 10 elements - /// assert!(map.capacity() >= 10); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn reserve(&mut self, additional: usize) { - self.table - .reserve(additional, make_hasher::(&self.hash_builder)); - } - - /// Tries to reserve capacity for at least `additional` more elements to be inserted - /// in the given `HashMap`. The collection may reserve more space to avoid - /// frequent reallocations. - /// - /// # Errors - /// - /// If the capacity overflows, or the allocator reports a failure, then an error - /// is returned. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, isize> = HashMap::new(); - /// // Map is empty and doesn't allocate memory - /// assert_eq!(map.capacity(), 0); - /// - /// map.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?"); - /// - /// // And now map can hold at least 10 elements - /// assert!(map.capacity() >= 10); - /// ``` - /// If the capacity overflows, or the allocator reports a failure, then an error - /// is returned: - /// ``` - /// # fn test() { - /// use hashbrown::HashMap; - /// use hashbrown::TryReserveError; - /// let mut map: HashMap = HashMap::new(); - /// - /// match map.try_reserve(usize::MAX) { - /// Err(error) => match error { - /// TryReserveError::CapacityOverflow => {} - /// _ => panic!("TryReserveError::AllocError ?"), - /// }, - /// _ => panic!(), - /// } - /// # } - /// # fn main() { - /// # #[cfg(not(miri))] - /// # test() - /// # } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.table - .try_reserve(additional, make_hasher::(&self.hash_builder)) - } - - /// Shrinks the capacity of the map as much as possible. It will drop - /// down as much as possible while maintaining the internal rules - /// and possibly leaving some space in accordance with the resize policy. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap = HashMap::with_capacity(100); - /// map.insert(1, 2); - /// map.insert(3, 4); - /// assert!(map.capacity() >= 100); - /// map.shrink_to_fit(); - /// assert!(map.capacity() >= 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn shrink_to_fit(&mut self) { - self.table - .shrink_to(0, make_hasher::(&self.hash_builder)); - } - - /// Shrinks the capacity of the map with a lower limit. It will drop - /// down no lower than the supplied limit while maintaining the internal rules - /// and possibly leaving some space in accordance with the resize policy. - /// - /// This function does nothing if the current capacity is smaller than the - /// supplied minimum capacity. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap = HashMap::with_capacity(100); - /// map.insert(1, 2); - /// map.insert(3, 4); - /// assert!(map.capacity() >= 100); - /// map.shrink_to(10); - /// assert!(map.capacity() >= 10); - /// map.shrink_to(0); - /// assert!(map.capacity() >= 2); - /// map.shrink_to(10); - /// assert!(map.capacity() >= 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn shrink_to(&mut self, min_capacity: usize) { - self.table - .shrink_to(min_capacity, make_hasher::(&self.hash_builder)); - } - - /// Gets the given key's corresponding entry in the map for in-place manipulation. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut letters = HashMap::new(); - /// - /// for ch in "a short treatise on fungi".chars() { - /// let counter = letters.entry(ch).or_insert(0); - /// *counter += 1; - /// } - /// - /// assert_eq!(letters[&'s'], 2); - /// assert_eq!(letters[&'t'], 3); - /// assert_eq!(letters[&'u'], 1); - /// assert_eq!(letters.get(&'y'), None); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn entry(&mut self, key: K) -> Entry<'_, K, V, S, A> { - let hash = make_insert_hash::(&self.hash_builder, &key); - if let Some(elem) = self.table.find(hash, equivalent_key(&key)) { - Entry::Occupied(OccupiedEntry { - hash, - key: Some(key), - elem, - table: self, - }) - } else { - Entry::Vacant(VacantEntry { - hash, - key, - table: self, - }) - } - } - - /// Gets the given key's corresponding entry by reference in the map for in-place manipulation. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut words: HashMap = HashMap::new(); - /// let source = ["poneyland", "horseyland", "poneyland", "poneyland"]; - /// for (i, &s) in source.iter().enumerate() { - /// let counter = words.entry_ref(s).or_insert(0); - /// *counter += 1; - /// } - /// - /// assert_eq!(words["poneyland"], 3); - /// assert_eq!(words["horseyland"], 1); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn entry_ref<'a, 'b, Q: ?Sized>(&'a mut self, key: &'b Q) -> EntryRef<'a, 'b, K, Q, V, S, A> - where - K: Borrow, - Q: Hash + Eq, - { - let hash = make_hash::(&self.hash_builder, key); - if let Some(elem) = self.table.find(hash, equivalent_key(key)) { - EntryRef::Occupied(OccupiedEntryRef { - hash, - key: Some(KeyOrRef::Borrowed(key)), - elem, - table: self, - }) - } else { - EntryRef::Vacant(VacantEntryRef { - hash, - key: KeyOrRef::Borrowed(key), - table: self, - }) - } - } - - /// Returns a reference to the value corresponding to the key. - /// - /// The key may be any borrowed form of the map's key type, but - /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for - /// the key type. - /// - /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html - /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert(1, "a"); - /// assert_eq!(map.get(&1), Some(&"a")); - /// assert_eq!(map.get(&2), None); - /// ``` - #[inline] - pub fn get(&self, k: &Q) -> Option<&V> - where - K: Borrow, - Q: Hash + Eq, - { - // Avoid `Option::map` because it bloats LLVM IR. - match self.get_inner(k) { - Some(&(_, ref v)) => Some(v), - None => None, - } - } - - /// Returns the key-value pair corresponding to the supplied key. - /// - /// The supplied key may be any borrowed form of the map's key type, but - /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for - /// the key type. - /// - /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html - /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert(1, "a"); - /// assert_eq!(map.get_key_value(&1), Some((&1, &"a"))); - /// assert_eq!(map.get_key_value(&2), None); - /// ``` - #[inline] - pub fn get_key_value(&self, k: &Q) -> Option<(&K, &V)> - where - K: Borrow, - Q: Hash + Eq, - { - // Avoid `Option::map` because it bloats LLVM IR. - match self.get_inner(k) { - Some(&(ref key, ref value)) => Some((key, value)), - None => None, - } - } - - #[inline] - fn get_inner(&self, k: &Q) -> Option<&(K, V)> - where - K: Borrow, - Q: Hash + Eq, - { - if self.table.is_empty() { - None - } else { - let hash = make_hash::(&self.hash_builder, k); - self.table.get(hash, equivalent_key(k)) - } - } - - /// Returns the key-value pair corresponding to the supplied key, with a mutable reference to value. - /// - /// The supplied key may be any borrowed form of the map's key type, but - /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for - /// the key type. - /// - /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html - /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert(1, "a"); - /// let (k, v) = map.get_key_value_mut(&1).unwrap(); - /// assert_eq!(k, &1); - /// assert_eq!(v, &mut "a"); - /// *v = "b"; - /// assert_eq!(map.get_key_value_mut(&1), Some((&1, &mut "b"))); - /// assert_eq!(map.get_key_value_mut(&2), None); - /// ``` - #[inline] - pub fn get_key_value_mut(&mut self, k: &Q) -> Option<(&K, &mut V)> - where - K: Borrow, - Q: Hash + Eq, - { - // Avoid `Option::map` because it bloats LLVM IR. - match self.get_inner_mut(k) { - Some(&mut (ref key, ref mut value)) => Some((key, value)), - None => None, - } - } - - /// Returns `true` if the map contains a value for the specified key. - /// - /// The key may be any borrowed form of the map's key type, but - /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for - /// the key type. - /// - /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html - /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert(1, "a"); - /// assert_eq!(map.contains_key(&1), true); - /// assert_eq!(map.contains_key(&2), false); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn contains_key(&self, k: &Q) -> bool - where - K: Borrow, - Q: Hash + Eq, - { - self.get_inner(k).is_some() - } - - /// Returns a mutable reference to the value corresponding to the key. - /// - /// The key may be any borrowed form of the map's key type, but - /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for - /// the key type. - /// - /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html - /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert(1, "a"); - /// if let Some(x) = map.get_mut(&1) { - /// *x = "b"; - /// } - /// assert_eq!(map[&1], "b"); - /// - /// assert_eq!(map.get_mut(&2), None); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> - where - K: Borrow, - Q: Hash + Eq, - { - // Avoid `Option::map` because it bloats LLVM IR. - match self.get_inner_mut(k) { - Some(&mut (_, ref mut v)) => Some(v), - None => None, - } - } - - #[inline] - fn get_inner_mut(&mut self, k: &Q) -> Option<&mut (K, V)> - where - K: Borrow, - Q: Hash + Eq, - { - if self.table.is_empty() { - None - } else { - let hash = make_hash::(&self.hash_builder, k); - self.table.get_mut(hash, equivalent_key(k)) - } - } - - /// Attempts to get mutable references to `N` values in the map at once. - /// - /// Returns an array of length `N` with the results of each query. For soundness, at most one - /// mutable reference will be returned to any value. `None` will be returned if any of the - /// keys are duplicates or missing. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut libraries = HashMap::new(); - /// libraries.insert("Bodleian Library".to_string(), 1602); - /// libraries.insert("Athenæum".to_string(), 1807); - /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); - /// libraries.insert("Library of Congress".to_string(), 1800); - /// - /// let got = libraries.get_many_mut([ - /// "Athenæum", - /// "Library of Congress", - /// ]); - /// assert_eq!( - /// got, - /// Some([ - /// &mut 1807, - /// &mut 1800, - /// ]), - /// ); - /// - /// // Missing keys result in None - /// let got = libraries.get_many_mut([ - /// "Athenæum", - /// "New York Public Library", - /// ]); - /// assert_eq!(got, None); - /// - /// // Duplicate keys result in None - /// let got = libraries.get_many_mut([ - /// "Athenæum", - /// "Athenæum", - /// ]); - /// assert_eq!(got, None); - /// ``` - pub fn get_many_mut(&mut self, ks: [&Q; N]) -> Option<[&'_ mut V; N]> - where - K: Borrow, - Q: Hash + Eq, - { - self.get_many_mut_inner(ks).map(|res| res.map(|(_, v)| v)) - } - - /// Attempts to get mutable references to `N` values in the map at once, without validating that - /// the values are unique. - /// - /// Returns an array of length `N` with the results of each query. `None` will be returned if - /// any of the keys are missing. - /// - /// For a safe alternative see [`get_many_mut`](`HashMap::get_many_mut`). - /// - /// # Safety - /// - /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting - /// references are not used. - /// - /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut libraries = HashMap::new(); - /// libraries.insert("Bodleian Library".to_string(), 1602); - /// libraries.insert("Athenæum".to_string(), 1807); - /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); - /// libraries.insert("Library of Congress".to_string(), 1800); - /// - /// let got = libraries.get_many_mut([ - /// "Athenæum", - /// "Library of Congress", - /// ]); - /// assert_eq!( - /// got, - /// Some([ - /// &mut 1807, - /// &mut 1800, - /// ]), - /// ); - /// - /// // Missing keys result in None - /// let got = libraries.get_many_mut([ - /// "Athenæum", - /// "New York Public Library", - /// ]); - /// assert_eq!(got, None); - /// ``` - pub unsafe fn get_many_unchecked_mut( - &mut self, - ks: [&Q; N], - ) -> Option<[&'_ mut V; N]> - where - K: Borrow, - Q: Hash + Eq, - { - self.get_many_unchecked_mut_inner(ks) - .map(|res| res.map(|(_, v)| v)) - } - - /// Attempts to get mutable references to `N` values in the map at once, with immutable - /// references to the corresponding keys. - /// - /// Returns an array of length `N` with the results of each query. For soundness, at most one - /// mutable reference will be returned to any value. `None` will be returned if any of the keys - /// are duplicates or missing. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut libraries = HashMap::new(); - /// libraries.insert("Bodleian Library".to_string(), 1602); - /// libraries.insert("Athenæum".to_string(), 1807); - /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); - /// libraries.insert("Library of Congress".to_string(), 1800); - /// - /// let got = libraries.get_many_key_value_mut([ - /// "Bodleian Library", - /// "Herzogin-Anna-Amalia-Bibliothek", - /// ]); - /// assert_eq!( - /// got, - /// Some([ - /// (&"Bodleian Library".to_string(), &mut 1602), - /// (&"Herzogin-Anna-Amalia-Bibliothek".to_string(), &mut 1691), - /// ]), - /// ); - /// // Missing keys result in None - /// let got = libraries.get_many_key_value_mut([ - /// "Bodleian Library", - /// "Gewandhaus", - /// ]); - /// assert_eq!(got, None); - /// - /// // Duplicate keys result in None - /// let got = libraries.get_many_key_value_mut([ - /// "Bodleian Library", - /// "Herzogin-Anna-Amalia-Bibliothek", - /// "Herzogin-Anna-Amalia-Bibliothek", - /// ]); - /// assert_eq!(got, None); - /// ``` - pub fn get_many_key_value_mut( - &mut self, - ks: [&Q; N], - ) -> Option<[(&'_ K, &'_ mut V); N]> - where - K: Borrow, - Q: Hash + Eq, - { - self.get_many_mut_inner(ks) - .map(|res| res.map(|(k, v)| (&*k, v))) - } - - /// Attempts to get mutable references to `N` values in the map at once, with immutable - /// references to the corresponding keys, without validating that the values are unique. - /// - /// Returns an array of length `N` with the results of each query. `None` will be returned if - /// any of the keys are missing. - /// - /// For a safe alternative see [`get_many_key_value_mut`](`HashMap::get_many_key_value_mut`). - /// - /// # Safety - /// - /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting - /// references are not used. - /// - /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut libraries = HashMap::new(); - /// libraries.insert("Bodleian Library".to_string(), 1602); - /// libraries.insert("Athenæum".to_string(), 1807); - /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); - /// libraries.insert("Library of Congress".to_string(), 1800); - /// - /// let got = libraries.get_many_key_value_mut([ - /// "Bodleian Library", - /// "Herzogin-Anna-Amalia-Bibliothek", - /// ]); - /// assert_eq!( - /// got, - /// Some([ - /// (&"Bodleian Library".to_string(), &mut 1602), - /// (&"Herzogin-Anna-Amalia-Bibliothek".to_string(), &mut 1691), - /// ]), - /// ); - /// // Missing keys result in None - /// let got = libraries.get_many_key_value_mut([ - /// "Bodleian Library", - /// "Gewandhaus", - /// ]); - /// assert_eq!(got, None); - /// ``` - pub unsafe fn get_many_key_value_unchecked_mut( - &mut self, - ks: [&Q; N], - ) -> Option<[(&'_ K, &'_ mut V); N]> - where - K: Borrow, - Q: Hash + Eq, - { - self.get_many_unchecked_mut_inner(ks) - .map(|res| res.map(|(k, v)| (&*k, v))) - } - - fn get_many_mut_inner( - &mut self, - ks: [&Q; N], - ) -> Option<[&'_ mut (K, V); N]> - where - K: Borrow, - Q: Hash + Eq, - { - let hashes = self.build_hashes_inner(ks); - self.table - .get_many_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow())) - } - - unsafe fn get_many_unchecked_mut_inner( - &mut self, - ks: [&Q; N], - ) -> Option<[&'_ mut (K, V); N]> - where - K: Borrow, - Q: Hash + Eq, - { - let hashes = self.build_hashes_inner(ks); - self.table - .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow())) - } - - fn build_hashes_inner(&self, ks: [&Q; N]) -> [u64; N] - where - K: Borrow, - Q: Hash + Eq, - { - let mut hashes = [0_u64; N]; - for i in 0..N { - hashes[i] = make_hash::(&self.hash_builder, ks[i]); - } - hashes - } - - /// Inserts a key-value pair into the map. - /// - /// If the map did not have this key present, [`None`] is returned. - /// - /// If the map did have this key present, the value is updated, and the old - /// value is returned. The key is not updated, though; this matters for - /// types that can be `==` without being identical. See the [`std::collections`] - /// [module-level documentation] for more. - /// - /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None - /// [`std::collections`]: https://doc.rust-lang.org/std/collections/index.html - /// [module-level documentation]: https://doc.rust-lang.org/std/collections/index.html#insert-and-complex-keys - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// assert_eq!(map.insert(37, "a"), None); - /// assert_eq!(map.is_empty(), false); - /// - /// map.insert(37, "b"); - /// assert_eq!(map.insert(37, "c"), Some("b")); - /// assert_eq!(map[&37], "c"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(&mut self, k: K, v: V) -> Option { - let hash = make_insert_hash::(&self.hash_builder, &k); - if let Some((_, item)) = self.table.get_mut(hash, equivalent_key(&k)) { - Some(mem::replace(item, v)) - } else { - self.table - .insert(hash, (k, v), make_hasher::(&self.hash_builder)); - None - } - } - - /// Insert a key-value pair into the map without checking - /// if the key already exists in the map. - /// - /// Returns a reference to the key and value just inserted. - /// - /// This operation is safe if a key does not exist in the map. - /// - /// However, if a key exists in the map already, the behavior is unspecified: - /// this operation may panic, loop forever, or any following operation with the map - /// may panic, loop forever or return arbitrary result. - /// - /// That said, this operation (and following operations) are guaranteed to - /// not violate memory safety. - /// - /// This operation is faster than regular insert, because it does not perform - /// lookup before insertion. - /// - /// This operation is useful during initial population of the map. - /// For example, when constructing a map from another map, we know - /// that keys are unique. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map1 = HashMap::new(); - /// assert_eq!(map1.insert(1, "a"), None); - /// assert_eq!(map1.insert(2, "b"), None); - /// assert_eq!(map1.insert(3, "c"), None); - /// assert_eq!(map1.len(), 3); - /// - /// let mut map2 = HashMap::new(); - /// - /// for (key, value) in map1.into_iter() { - /// map2.insert_unique_unchecked(key, value); - /// } - /// - /// let (key, value) = map2.insert_unique_unchecked(4, "d"); - /// assert_eq!(key, &4); - /// assert_eq!(value, &mut "d"); - /// *value = "e"; - /// - /// assert_eq!(map2[&1], "a"); - /// assert_eq!(map2[&2], "b"); - /// assert_eq!(map2[&3], "c"); - /// assert_eq!(map2[&4], "e"); - /// assert_eq!(map2.len(), 4); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert_unique_unchecked(&mut self, k: K, v: V) -> (&K, &mut V) { - let hash = make_insert_hash::(&self.hash_builder, &k); - let bucket = self - .table - .insert(hash, (k, v), make_hasher::(&self.hash_builder)); - let (k_ref, v_ref) = unsafe { bucket.as_mut() }; - (k_ref, v_ref) - } - - /// Tries to insert a key-value pair into the map, and returns - /// a mutable reference to the value in the entry. - /// - /// # Errors - /// - /// If the map already had this key present, nothing is updated, and - /// an error containing the occupied entry and the value is returned. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::OccupiedError; - /// - /// let mut map = HashMap::new(); - /// assert_eq!(map.try_insert(37, "a").unwrap(), &"a"); - /// - /// match map.try_insert(37, "b") { - /// Err(OccupiedError { entry, value }) => { - /// assert_eq!(entry.key(), &37); - /// assert_eq!(entry.get(), &"a"); - /// assert_eq!(value, "b"); - /// } - /// _ => panic!() - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn try_insert( - &mut self, - key: K, - value: V, - ) -> Result<&mut V, OccupiedError<'_, K, V, S, A>> { - match self.entry(key) { - Entry::Occupied(entry) => Err(OccupiedError { entry, value }), - Entry::Vacant(entry) => Ok(entry.insert(value)), - } - } - - /// Removes a key from the map, returning the value at the key if the key - /// was previously in the map. Keeps the allocated memory for reuse. - /// - /// The key may be any borrowed form of the map's key type, but - /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for - /// the key type. - /// - /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html - /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// // The map is empty - /// assert!(map.is_empty() && map.capacity() == 0); - /// - /// map.insert(1, "a"); - /// let capacity_before_remove = map.capacity(); - /// - /// assert_eq!(map.remove(&1), Some("a")); - /// assert_eq!(map.remove(&1), None); - /// - /// // Now map holds none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove(&mut self, k: &Q) -> Option - where - K: Borrow, - Q: Hash + Eq, - { - // Avoid `Option::map` because it bloats LLVM IR. - match self.remove_entry(k) { - Some((_, v)) => Some(v), - None => None, - } - } - - /// Removes a key from the map, returning the stored key and value if the - /// key was previously in the map. Keeps the allocated memory for reuse. - /// - /// The key may be any borrowed form of the map's key type, but - /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for - /// the key type. - /// - /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html - /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// // The map is empty - /// assert!(map.is_empty() && map.capacity() == 0); - /// - /// map.insert(1, "a"); - /// let capacity_before_remove = map.capacity(); - /// - /// assert_eq!(map.remove_entry(&1), Some((1, "a"))); - /// assert_eq!(map.remove(&1), None); - /// - /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove_entry(&mut self, k: &Q) -> Option<(K, V)> - where - K: Borrow, - Q: Hash + Eq, - { - let hash = make_hash::(&self.hash_builder, k); - self.table.remove_entry(hash, equivalent_key(k)) - } -} - -impl HashMap { - /// Creates a raw entry builder for the HashMap. - /// - /// Raw entries provide the lowest level of control for searching and - /// manipulating a map. They must be manually initialized with a hash and - /// then manually searched. After this, insertions into a vacant entry - /// still require an owned key to be provided. - /// - /// Raw entries are useful for such exotic situations as: - /// - /// * Hash memoization - /// * Deferring the creation of an owned key until it is known to be required - /// * Using a search key that doesn't work with the Borrow trait - /// * Using custom comparison logic without newtype wrappers - /// - /// Because raw entries provide much more low-level control, it's much easier - /// to put the HashMap into an inconsistent state which, while memory-safe, - /// will cause the map to produce seemingly random results. Higher-level and - /// more foolproof APIs like `entry` should be preferred when possible. - /// - /// In particular, the hash used to initialized the raw entry must still be - /// consistent with the hash of the key that is ultimately stored in the entry. - /// This is because implementations of HashMap may need to recompute hashes - /// when resizing, at which point only the keys are available. - /// - /// Raw entries give mutable access to the keys. This must not be used - /// to modify how the key would compare or hash, as the map will not re-evaluate - /// where the key should go, meaning the keys may become "lost" if their - /// location does not reflect their state. For instance, if you change a key - /// so that the map now contains keys which compare equal, search may start - /// acting erratically, with two keys randomly masking each other. Implementations - /// are free to assume this doesn't happen (within the limits of memory-safety). - /// - /// # Examples - /// - /// ``` - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map = HashMap::new(); - /// map.extend([("a", 100), ("b", 200), ("c", 300)]); - /// - /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// - /// // Existing key (insert and update) - /// match map.raw_entry_mut().from_key(&"a") { - /// RawEntryMut::Vacant(_) => unreachable!(), - /// RawEntryMut::Occupied(mut view) => { - /// assert_eq!(view.get(), &100); - /// let v = view.get_mut(); - /// let new_v = (*v) * 10; - /// *v = new_v; - /// assert_eq!(view.insert(1111), 1000); - /// } - /// } - /// - /// assert_eq!(map[&"a"], 1111); - /// assert_eq!(map.len(), 3); - /// - /// // Existing key (take) - /// let hash = compute_hash(map.hasher(), &"c"); - /// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &"c") { - /// RawEntryMut::Vacant(_) => unreachable!(), - /// RawEntryMut::Occupied(view) => { - /// assert_eq!(view.remove_entry(), ("c", 300)); - /// } - /// } - /// assert_eq!(map.raw_entry().from_key(&"c"), None); - /// assert_eq!(map.len(), 2); - /// - /// // Nonexistent key (insert and update) - /// let key = "d"; - /// let hash = compute_hash(map.hasher(), &key); - /// match map.raw_entry_mut().from_hash(hash, |q| *q == key) { - /// RawEntryMut::Occupied(_) => unreachable!(), - /// RawEntryMut::Vacant(view) => { - /// let (k, value) = view.insert("d", 4000); - /// assert_eq!((*k, *value), ("d", 4000)); - /// *value = 40000; - /// } - /// } - /// assert_eq!(map[&"d"], 40000); - /// assert_eq!(map.len(), 3); - /// - /// match map.raw_entry_mut().from_hash(hash, |q| *q == key) { - /// RawEntryMut::Vacant(_) => unreachable!(), - /// RawEntryMut::Occupied(view) => { - /// assert_eq!(view.remove_entry(), ("d", 40000)); - /// } - /// } - /// assert_eq!(map.get(&"d"), None); - /// assert_eq!(map.len(), 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, S, A> { - RawEntryBuilderMut { map: self } - } - - /// Creates a raw immutable entry builder for the HashMap. - /// - /// Raw entries provide the lowest level of control for searching and - /// manipulating a map. They must be manually initialized with a hash and - /// then manually searched. - /// - /// This is useful for - /// * Hash memoization - /// * Using a search key that doesn't work with the Borrow trait - /// * Using custom comparison logic without newtype wrappers - /// - /// Unless you are in such a situation, higher-level and more foolproof APIs like - /// `get` should be preferred. - /// - /// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`. - /// - /// # Examples - /// - /// ``` - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.extend([("a", 100), ("b", 200), ("c", 300)]); - /// - /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// - /// for k in ["a", "b", "c", "d", "e", "f"] { - /// let hash = compute_hash(map.hasher(), k); - /// let v = map.get(&k).cloned(); - /// let kv = v.as_ref().map(|v| (&k, v)); - /// - /// println!("Key: {} and value: {:?}", k, v); - /// - /// assert_eq!(map.raw_entry().from_key(&k), kv); - /// assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv); - /// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, S, A> { - RawEntryBuilder { map: self } - } - - /// Returns a mutable reference to the [`RawTable`] used underneath [`HashMap`]. - /// This function is only available if the `raw` feature of the crate is enabled. - /// - /// # Note - /// - /// Calling the function safe, but using raw hash table API's may require - /// unsafe functions or blocks. - /// - /// `RawTable` API gives the lowest level of control under the map that can be useful - /// for extending the HashMap's API, but may lead to *[undefined behavior]*. - /// - /// [`HashMap`]: struct.HashMap.html - /// [`RawTable`]: raw/struct.RawTable.html - /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html - /// - /// # Examples - /// - /// ``` - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.extend([("a", 10), ("b", 20), ("c", 30)]); - /// assert_eq!(map.len(), 3); - /// - /// // Let's imagine that we have a value and a hash of the key, but not the key itself. - /// // However, if you want to remove the value from the map by hash and value, and you - /// // know exactly that the value is unique, then you can create a function like this: - /// fn remove_by_hash( - /// map: &mut HashMap, - /// hash: u64, - /// is_match: F, - /// ) -> Option<(K, V)> - /// where - /// F: Fn(&(K, V)) -> bool, - /// { - /// let raw_table = map.raw_table(); - /// match raw_table.find(hash, is_match) { - /// Some(bucket) => Some(unsafe { raw_table.remove(bucket) }), - /// None => None, - /// } - /// } - /// - /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// - /// let hash = compute_hash(map.hasher(), "a"); - /// assert_eq!(remove_by_hash(&mut map, hash, |(_, v)| *v == 10), Some(("a", 10))); - /// assert_eq!(map.get(&"a"), None); - /// assert_eq!(map.len(), 2); - /// ``` - #[cfg(feature = "raw")] - #[cfg_attr(feature = "inline-more", inline)] - pub fn raw_table(&mut self) -> &mut RawTable<(K, V), A> { - &mut self.table - } -} - -impl PartialEq for HashMap -where - K: Eq + Hash, - V: PartialEq, - S: BuildHasher, - A: Allocator + Clone, -{ - fn eq(&self, other: &Self) -> bool { - if self.len() != other.len() { - return false; - } - - self.iter() - .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) - } -} - -impl Eq for HashMap -where - K: Eq + Hash, - V: Eq, - S: BuildHasher, - A: Allocator + Clone, -{ -} - -impl Debug for HashMap -where - K: Debug, - V: Debug, - A: Allocator + Clone, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_map().entries(self.iter()).finish() - } -} - -impl Default for HashMap -where - S: Default, - A: Default + Allocator + Clone, -{ - /// Creates an empty `HashMap`, with the `Default` value for the hasher and allocator. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use std::collections::hash_map::RandomState; - /// - /// // You can specify all types of HashMap, including hasher and allocator. - /// // Created map is empty and don't allocate memory - /// let map: HashMap = Default::default(); - /// assert_eq!(map.capacity(), 0); - /// let map: HashMap = HashMap::default(); - /// assert_eq!(map.capacity(), 0); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - fn default() -> Self { - Self::with_hasher_in(Default::default(), Default::default()) - } -} - -impl Index<&Q> for HashMap -where - K: Eq + Hash + Borrow, - Q: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - type Output = V; - - /// Returns a reference to the value corresponding to the supplied key. - /// - /// # Panics - /// - /// Panics if the key is not present in the `HashMap`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let map: HashMap<_, _> = [("a", "One"), ("b", "Two")].into(); - /// - /// assert_eq!(map[&"a"], "One"); - /// assert_eq!(map[&"b"], "Two"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - fn index(&self, key: &Q) -> &V { - self.get(key).expect("no entry found for key") - } -} - -// The default hasher is used to match the std implementation signature -#[cfg(feature = "ahash")] -impl From<[(K, V); N]> for HashMap -where - K: Eq + Hash, - A: Default + Allocator + Clone, -{ - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let map1 = HashMap::from([(1, 2), (3, 4)]); - /// let map2: HashMap<_, _> = [(1, 2), (3, 4)].into(); - /// assert_eq!(map1, map2); - /// ``` - fn from(arr: [(K, V); N]) -> Self { - arr.into_iter().collect() - } -} - -/// An iterator over the entries of a `HashMap` in arbitrary order. -/// The iterator element type is `(&'a K, &'a V)`. -/// -/// This `struct` is created by the [`iter`] method on [`HashMap`]. See its -/// documentation for more. -/// -/// [`iter`]: struct.HashMap.html#method.iter -/// [`HashMap`]: struct.HashMap.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); -/// -/// let mut iter = map.iter(); -/// let mut vec = vec![iter.next(), iter.next(), iter.next()]; -/// -/// // The `Iter` iterator produces items in arbitrary order, so the -/// // items must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, [Some((&1, &"a")), Some((&2, &"b")), Some((&3, &"c"))]); -/// -/// // It is fused iterator -/// assert_eq!(iter.next(), None); -/// assert_eq!(iter.next(), None); -/// ``` -pub struct Iter<'a, K, V> { - inner: RawIter<(K, V)>, - marker: PhantomData<(&'a K, &'a V)>, -} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -impl Clone for Iter<'_, K, V> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Iter { - inner: self.inner.clone(), - marker: PhantomData, - } - } -} - -impl fmt::Debug for Iter<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// A mutable iterator over the entries of a `HashMap` in arbitrary order. -/// The iterator element type is `(&'a K, &'a mut V)`. -/// -/// This `struct` is created by the [`iter_mut`] method on [`HashMap`]. See its -/// documentation for more. -/// -/// [`iter_mut`]: struct.HashMap.html#method.iter_mut -/// [`HashMap`]: struct.HashMap.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// let mut map: HashMap<_, _> = [(1, "One".to_owned()), (2, "Two".into())].into(); -/// -/// let mut iter = map.iter_mut(); -/// iter.next().map(|(_, v)| v.push_str(" Mississippi")); -/// iter.next().map(|(_, v)| v.push_str(" Mississippi")); -/// -/// // It is fused iterator -/// assert_eq!(iter.next(), None); -/// assert_eq!(iter.next(), None); -/// -/// assert_eq!(map.get(&1).unwrap(), &"One Mississippi".to_owned()); -/// assert_eq!(map.get(&2).unwrap(), &"Two Mississippi".to_owned()); -/// ``` -pub struct IterMut<'a, K, V> { - inner: RawIter<(K, V)>, - // To ensure invariance with respect to V - marker: PhantomData<(&'a K, &'a mut V)>, -} - -// We override the default Send impl which has K: Sync instead of K: Send. Both -// are correct, but this one is more general since it allows keys which -// implement Send but not Sync. -unsafe impl Send for IterMut<'_, K, V> {} - -impl IterMut<'_, K, V> { - /// Returns a iterator of references over the remaining items. - #[cfg_attr(feature = "inline-more", inline)] - pub(super) fn iter(&self) -> Iter<'_, K, V> { - Iter { - inner: self.inner.clone(), - marker: PhantomData, - } - } -} - -/// An owning iterator over the entries of a `HashMap` in arbitrary order. -/// The iterator element type is `(K, V)`. -/// -/// This `struct` is created by the [`into_iter`] method on [`HashMap`] -/// (provided by the [`IntoIterator`] trait). See its documentation for more. -/// The map cannot be used after calling that method. -/// -/// [`into_iter`]: struct.HashMap.html#method.into_iter -/// [`HashMap`]: struct.HashMap.html -/// [`IntoIterator`]: https://doc.rust-lang.org/core/iter/trait.IntoIterator.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); -/// -/// let mut iter = map.into_iter(); -/// let mut vec = vec![iter.next(), iter.next(), iter.next()]; -/// -/// // The `IntoIter` iterator produces items in arbitrary order, so the -/// // items must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, [Some((1, "a")), Some((2, "b")), Some((3, "c"))]); -/// -/// // It is fused iterator -/// assert_eq!(iter.next(), None); -/// assert_eq!(iter.next(), None); -/// ``` -pub struct IntoIter { - inner: RawIntoIter<(K, V), A>, -} - -impl IntoIter { - /// Returns a iterator of references over the remaining items. - #[cfg_attr(feature = "inline-more", inline)] - pub(super) fn iter(&self) -> Iter<'_, K, V> { - Iter { - inner: self.inner.iter(), - marker: PhantomData, - } - } -} - -/// An owning iterator over the keys of a `HashMap` in arbitrary order. -/// The iterator element type is `K`. -/// -/// This `struct` is created by the [`into_keys`] method on [`HashMap`]. -/// See its documentation for more. -/// The map cannot be used after calling that method. -/// -/// [`into_keys`]: struct.HashMap.html#method.into_keys -/// [`HashMap`]: struct.HashMap.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); -/// -/// let mut keys = map.into_keys(); -/// let mut vec = vec![keys.next(), keys.next(), keys.next()]; -/// -/// // The `IntoKeys` iterator produces keys in arbitrary order, so the -/// // keys must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, [Some(1), Some(2), Some(3)]); -/// -/// // It is fused iterator -/// assert_eq!(keys.next(), None); -/// assert_eq!(keys.next(), None); -/// ``` -pub struct IntoKeys { - inner: IntoIter, -} - -impl Iterator for IntoKeys { - type Item = K; - - #[inline] - fn next(&mut self) -> Option { - self.inner.next().map(|(k, _)| k) - } - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} - -impl ExactSizeIterator for IntoKeys { - #[inline] - fn len(&self) -> usize { - self.inner.len() - } -} - -impl FusedIterator for IntoKeys {} - -impl fmt::Debug for IntoKeys { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.inner.iter().map(|(k, _)| k)) - .finish() - } -} - -/// An owning iterator over the values of a `HashMap` in arbitrary order. -/// The iterator element type is `V`. -/// -/// This `struct` is created by the [`into_values`] method on [`HashMap`]. -/// See its documentation for more. The map cannot be used after calling that method. -/// -/// [`into_values`]: struct.HashMap.html#method.into_values -/// [`HashMap`]: struct.HashMap.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); -/// -/// let mut values = map.into_values(); -/// let mut vec = vec![values.next(), values.next(), values.next()]; -/// -/// // The `IntoValues` iterator produces values in arbitrary order, so -/// // the values must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, [Some("a"), Some("b"), Some("c")]); -/// -/// // It is fused iterator -/// assert_eq!(values.next(), None); -/// assert_eq!(values.next(), None); -/// ``` -pub struct IntoValues { - inner: IntoIter, -} - -impl Iterator for IntoValues { - type Item = V; - - #[inline] - fn next(&mut self) -> Option { - self.inner.next().map(|(_, v)| v) - } - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} - -impl ExactSizeIterator for IntoValues { - #[inline] - fn len(&self) -> usize { - self.inner.len() - } -} - -impl FusedIterator for IntoValues {} - -impl fmt::Debug for IntoValues { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.inner.iter().map(|(_, v)| v)) - .finish() - } -} - -/// An iterator over the keys of a `HashMap` in arbitrary order. -/// The iterator element type is `&'a K`. -/// -/// This `struct` is created by the [`keys`] method on [`HashMap`]. See its -/// documentation for more. -/// -/// [`keys`]: struct.HashMap.html#method.keys -/// [`HashMap`]: struct.HashMap.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); -/// -/// let mut keys = map.keys(); -/// let mut vec = vec![keys.next(), keys.next(), keys.next()]; -/// -/// // The `Keys` iterator produces keys in arbitrary order, so the -/// // keys must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, [Some(&1), Some(&2), Some(&3)]); -/// -/// // It is fused iterator -/// assert_eq!(keys.next(), None); -/// assert_eq!(keys.next(), None); -/// ``` -pub struct Keys<'a, K, V> { - inner: Iter<'a, K, V>, -} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -impl Clone for Keys<'_, K, V> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Keys { - inner: self.inner.clone(), - } - } -} - -impl fmt::Debug for Keys<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// An iterator over the values of a `HashMap` in arbitrary order. -/// The iterator element type is `&'a V`. -/// -/// This `struct` is created by the [`values`] method on [`HashMap`]. See its -/// documentation for more. -/// -/// [`values`]: struct.HashMap.html#method.values -/// [`HashMap`]: struct.HashMap.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); -/// -/// let mut values = map.values(); -/// let mut vec = vec![values.next(), values.next(), values.next()]; -/// -/// // The `Values` iterator produces values in arbitrary order, so the -/// // values must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, [Some(&"a"), Some(&"b"), Some(&"c")]); -/// -/// // It is fused iterator -/// assert_eq!(values.next(), None); -/// assert_eq!(values.next(), None); -/// ``` -pub struct Values<'a, K, V> { - inner: Iter<'a, K, V>, -} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -impl Clone for Values<'_, K, V> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Values { - inner: self.inner.clone(), - } - } -} - -impl fmt::Debug for Values<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// A draining iterator over the entries of a `HashMap` in arbitrary -/// order. The iterator element type is `(K, V)`. -/// -/// This `struct` is created by the [`drain`] method on [`HashMap`]. See its -/// documentation for more. -/// -/// [`drain`]: struct.HashMap.html#method.drain -/// [`HashMap`]: struct.HashMap.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// let mut map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); -/// -/// let mut drain_iter = map.drain(); -/// let mut vec = vec![drain_iter.next(), drain_iter.next(), drain_iter.next()]; -/// -/// // The `Drain` iterator produces items in arbitrary order, so the -/// // items must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, [Some((1, "a")), Some((2, "b")), Some((3, "c"))]); -/// -/// // It is fused iterator -/// assert_eq!(drain_iter.next(), None); -/// assert_eq!(drain_iter.next(), None); -/// ``` -pub struct Drain<'a, K, V, A: Allocator + Clone = Global> { - inner: RawDrain<'a, (K, V), A>, -} - -impl Drain<'_, K, V, A> { - /// Returns a iterator of references over the remaining items. - #[cfg_attr(feature = "inline-more", inline)] - pub(super) fn iter(&self) -> Iter<'_, K, V> { - Iter { - inner: self.inner.iter(), - marker: PhantomData, - } - } -} - -/// A draining iterator over entries of a `HashMap` which don't satisfy the predicate -/// `f(&k, &mut v)` in arbitrary order. The iterator element type is `(K, V)`. -/// -/// This `struct` is created by the [`drain_filter`] method on [`HashMap`]. See its -/// documentation for more. -/// -/// [`drain_filter`]: struct.HashMap.html#method.drain_filter -/// [`HashMap`]: struct.HashMap.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// let mut map: HashMap = [(1, "a"), (2, "b"), (3, "c")].into(); -/// -/// let mut drain_filter = map.drain_filter(|k, _v| k % 2 != 0); -/// let mut vec = vec![drain_filter.next(), drain_filter.next()]; -/// -/// // The `DrainFilter` iterator produces items in arbitrary order, so the -/// // items must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, [Some((1, "a")),Some((3, "c"))]); -/// -/// // It is fused iterator -/// assert_eq!(drain_filter.next(), None); -/// assert_eq!(drain_filter.next(), None); -/// drop(drain_filter); -/// -/// assert_eq!(map.len(), 1); -/// ``` -pub struct DrainFilter<'a, K, V, F, A: Allocator + Clone = Global> -where - F: FnMut(&K, &mut V) -> bool, -{ - f: F, - inner: DrainFilterInner<'a, K, V, A>, -} - -impl<'a, K, V, F, A> Drop for DrainFilter<'a, K, V, F, A> -where - F: FnMut(&K, &mut V) -> bool, - A: Allocator + Clone, -{ - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - while let Some(item) = self.next() { - let guard = ConsumeAllOnDrop(self); - drop(item); - mem::forget(guard); - } - } -} - -pub(super) struct ConsumeAllOnDrop<'a, T: Iterator>(pub &'a mut T); - -impl Drop for ConsumeAllOnDrop<'_, T> { - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - self.0.for_each(drop); - } -} - -impl Iterator for DrainFilter<'_, K, V, F, A> -where - F: FnMut(&K, &mut V) -> bool, - A: Allocator + Clone, -{ - type Item = (K, V); - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option { - self.inner.next(&mut self.f) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - (0, self.inner.iter.size_hint().1) - } -} - -impl FusedIterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} - -/// Portions of `DrainFilter` shared with `set::DrainFilter` -pub(super) struct DrainFilterInner<'a, K, V, A: Allocator + Clone> { - pub iter: RawIter<(K, V)>, - pub table: &'a mut RawTable<(K, V), A>, -} - -impl DrainFilterInner<'_, K, V, A> { - #[cfg_attr(feature = "inline-more", inline)] - pub(super) fn next(&mut self, f: &mut F) -> Option<(K, V)> - where - F: FnMut(&K, &mut V) -> bool, - { - unsafe { - for item in &mut self.iter { - let &mut (ref key, ref mut value) = item.as_mut(); - if f(key, value) { - return Some(self.table.remove(item)); - } - } - } - None - } -} - -/// A mutable iterator over the values of a `HashMap` in arbitrary order. -/// The iterator element type is `&'a mut V`. -/// -/// This `struct` is created by the [`values_mut`] method on [`HashMap`]. See its -/// documentation for more. -/// -/// [`values_mut`]: struct.HashMap.html#method.values_mut -/// [`HashMap`]: struct.HashMap.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashMap; -/// -/// let mut map: HashMap<_, _> = [(1, "One".to_owned()), (2, "Two".into())].into(); -/// -/// let mut values = map.values_mut(); -/// values.next().map(|v| v.push_str(" Mississippi")); -/// values.next().map(|v| v.push_str(" Mississippi")); -/// -/// // It is fused iterator -/// assert_eq!(values.next(), None); -/// assert_eq!(values.next(), None); -/// -/// assert_eq!(map.get(&1).unwrap(), &"One Mississippi".to_owned()); -/// assert_eq!(map.get(&2).unwrap(), &"Two Mississippi".to_owned()); -/// ``` -pub struct ValuesMut<'a, K, V> { - inner: IterMut<'a, K, V>, -} - -/// A builder for computing where in a [`HashMap`] a key-value pair would be stored. -/// -/// See the [`HashMap::raw_entry_mut`] docs for usage examples. -/// -/// [`HashMap::raw_entry_mut`]: struct.HashMap.html#method.raw_entry_mut -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_map::{RawEntryBuilderMut, RawEntryMut::Vacant, RawEntryMut::Occupied}; -/// use hashbrown::HashMap; -/// use core::hash::{BuildHasher, Hash}; -/// -/// let mut map = HashMap::new(); -/// map.extend([(1, 11), (2, 12), (3, 13), (4, 14), (5, 15), (6, 16)]); -/// assert_eq!(map.len(), 6); -/// -/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { -/// use core::hash::Hasher; -/// let mut state = hash_builder.build_hasher(); -/// key.hash(&mut state); -/// state.finish() -/// } -/// -/// let builder: RawEntryBuilderMut<_, _, _> = map.raw_entry_mut(); -/// -/// // Existing key -/// match builder.from_key(&6) { -/// Vacant(_) => unreachable!(), -/// Occupied(view) => assert_eq!(view.get(), &16), -/// } -/// -/// for key in 0..12 { -/// let hash = compute_hash(map.hasher(), &key); -/// let value = map.get(&key).cloned(); -/// let key_value = value.as_ref().map(|v| (&key, v)); -/// -/// println!("Key: {} and value: {:?}", key, value); -/// -/// match map.raw_entry_mut().from_key(&key) { -/// Occupied(mut o) => assert_eq!(Some(o.get_key_value()), key_value), -/// Vacant(_) => assert_eq!(value, None), -/// } -/// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &key) { -/// Occupied(mut o) => assert_eq!(Some(o.get_key_value()), key_value), -/// Vacant(_) => assert_eq!(value, None), -/// } -/// match map.raw_entry_mut().from_hash(hash, |q| *q == key) { -/// Occupied(mut o) => assert_eq!(Some(o.get_key_value()), key_value), -/// Vacant(_) => assert_eq!(value, None), -/// } -/// } -/// -/// assert_eq!(map.len(), 6); -/// ``` -pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator + Clone = Global> { - map: &'a mut HashMap, -} - -/// A view into a single entry in a map, which may either be vacant or occupied. -/// -/// This is a lower-level version of [`Entry`]. -/// -/// This `enum` is constructed through the [`raw_entry_mut`] method on [`HashMap`], -/// then calling one of the methods of that [`RawEntryBuilderMut`]. -/// -/// [`HashMap`]: struct.HashMap.html -/// [`Entry`]: enum.Entry.html -/// [`raw_entry_mut`]: struct.HashMap.html#method.raw_entry_mut -/// [`RawEntryBuilderMut`]: struct.RawEntryBuilderMut.html -/// -/// # Examples -/// -/// ``` -/// use core::hash::{BuildHasher, Hash}; -/// use hashbrown::hash_map::{HashMap, RawEntryMut, RawOccupiedEntryMut}; -/// -/// let mut map = HashMap::new(); -/// map.extend([('a', 1), ('b', 2), ('c', 3)]); -/// assert_eq!(map.len(), 3); -/// -/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { -/// use core::hash::Hasher; -/// let mut state = hash_builder.build_hasher(); -/// key.hash(&mut state); -/// state.finish() -/// } -/// -/// // Existing key (insert) -/// let raw: RawEntryMut<_, _, _> = map.raw_entry_mut().from_key(&'a'); -/// let _raw_o: RawOccupiedEntryMut<_, _, _> = raw.insert('a', 10); -/// assert_eq!(map.len(), 3); -/// -/// // Nonexistent key (insert) -/// map.raw_entry_mut().from_key(&'d').insert('d', 40); -/// assert_eq!(map.len(), 4); -/// -/// // Existing key (or_insert) -/// let hash = compute_hash(map.hasher(), &'b'); -/// let kv = map -/// .raw_entry_mut() -/// .from_key_hashed_nocheck(hash, &'b') -/// .or_insert('b', 20); -/// assert_eq!(kv, (&mut 'b', &mut 2)); -/// *kv.1 = 20; -/// assert_eq!(map.len(), 4); -/// -/// // Nonexistent key (or_insert) -/// let hash = compute_hash(map.hasher(), &'e'); -/// let kv = map -/// .raw_entry_mut() -/// .from_key_hashed_nocheck(hash, &'e') -/// .or_insert('e', 50); -/// assert_eq!(kv, (&mut 'e', &mut 50)); -/// assert_eq!(map.len(), 5); -/// -/// // Existing key (or_insert_with) -/// let hash = compute_hash(map.hasher(), &'c'); -/// let kv = map -/// .raw_entry_mut() -/// .from_hash(hash, |q| q == &'c') -/// .or_insert_with(|| ('c', 30)); -/// assert_eq!(kv, (&mut 'c', &mut 3)); -/// *kv.1 = 30; -/// assert_eq!(map.len(), 5); -/// -/// // Nonexistent key (or_insert_with) -/// let hash = compute_hash(map.hasher(), &'f'); -/// let kv = map -/// .raw_entry_mut() -/// .from_hash(hash, |q| q == &'f') -/// .or_insert_with(|| ('f', 60)); -/// assert_eq!(kv, (&mut 'f', &mut 60)); -/// assert_eq!(map.len(), 6); -/// -/// println!("Our HashMap: {:?}", map); -/// -/// let mut vec: Vec<_> = map.iter().map(|(&k, &v)| (k, v)).collect(); -/// // The `Iter` iterator produces items in arbitrary order, so the -/// // items must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, [('a', 10), ('b', 20), ('c', 30), ('d', 40), ('e', 50), ('f', 60)]); -/// ``` -pub enum RawEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { - /// An occupied entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::{hash_map::RawEntryMut, HashMap}; - /// let mut map: HashMap<_, _> = [("a", 100), ("b", 200)].into(); - /// - /// match map.raw_entry_mut().from_key(&"a") { - /// RawEntryMut::Vacant(_) => unreachable!(), - /// RawEntryMut::Occupied(_) => { } - /// } - /// ``` - Occupied(RawOccupiedEntryMut<'a, K, V, S, A>), - /// A vacant entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::{hash_map::RawEntryMut, HashMap}; - /// let mut map: HashMap<&str, i32> = HashMap::new(); - /// - /// match map.raw_entry_mut().from_key("a") { - /// RawEntryMut::Occupied(_) => unreachable!(), - /// RawEntryMut::Vacant(_) => { } - /// } - /// ``` - Vacant(RawVacantEntryMut<'a, K, V, S, A>), -} - -/// A view into an occupied entry in a `HashMap`. -/// It is part of the [`RawEntryMut`] enum. -/// -/// [`RawEntryMut`]: enum.RawEntryMut.html -/// -/// # Examples -/// -/// ``` -/// use core::hash::{BuildHasher, Hash}; -/// use hashbrown::hash_map::{HashMap, RawEntryMut, RawOccupiedEntryMut}; -/// -/// let mut map = HashMap::new(); -/// map.extend([("a", 10), ("b", 20), ("c", 30)]); -/// -/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { -/// use core::hash::Hasher; -/// let mut state = hash_builder.build_hasher(); -/// key.hash(&mut state); -/// state.finish() -/// } -/// -/// let _raw_o: RawOccupiedEntryMut<_, _, _> = map.raw_entry_mut().from_key(&"a").insert("a", 100); -/// assert_eq!(map.len(), 3); -/// -/// // Existing key (insert and update) -/// match map.raw_entry_mut().from_key(&"a") { -/// RawEntryMut::Vacant(_) => unreachable!(), -/// RawEntryMut::Occupied(mut view) => { -/// assert_eq!(view.get(), &100); -/// let v = view.get_mut(); -/// let new_v = (*v) * 10; -/// *v = new_v; -/// assert_eq!(view.insert(1111), 1000); -/// } -/// } -/// -/// assert_eq!(map[&"a"], 1111); -/// assert_eq!(map.len(), 3); -/// -/// // Existing key (take) -/// let hash = compute_hash(map.hasher(), &"c"); -/// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &"c") { -/// RawEntryMut::Vacant(_) => unreachable!(), -/// RawEntryMut::Occupied(view) => { -/// assert_eq!(view.remove_entry(), ("c", 30)); -/// } -/// } -/// assert_eq!(map.raw_entry().from_key(&"c"), None); -/// assert_eq!(map.len(), 2); -/// -/// let hash = compute_hash(map.hasher(), &"b"); -/// match map.raw_entry_mut().from_hash(hash, |q| *q == "b") { -/// RawEntryMut::Vacant(_) => unreachable!(), -/// RawEntryMut::Occupied(view) => { -/// assert_eq!(view.remove_entry(), ("b", 20)); -/// } -/// } -/// assert_eq!(map.get(&"b"), None); -/// assert_eq!(map.len(), 1); -/// ``` -pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { - elem: Bucket<(K, V)>, - table: &'a mut RawTable<(K, V), A>, - hash_builder: &'a S, -} - -unsafe impl Send for RawOccupiedEntryMut<'_, K, V, S, A> -where - K: Send, - V: Send, - S: Send, - A: Send + Allocator + Clone, -{ -} -unsafe impl Sync for RawOccupiedEntryMut<'_, K, V, S, A> -where - K: Sync, - V: Sync, - S: Sync, - A: Sync + Allocator + Clone, -{ -} - -/// A view into a vacant entry in a `HashMap`. -/// It is part of the [`RawEntryMut`] enum. -/// -/// [`RawEntryMut`]: enum.RawEntryMut.html -/// -/// # Examples -/// -/// ``` -/// use core::hash::{BuildHasher, Hash}; -/// use hashbrown::hash_map::{HashMap, RawEntryMut, RawVacantEntryMut}; -/// -/// let mut map = HashMap::<&str, i32>::new(); -/// -/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { -/// use core::hash::Hasher; -/// let mut state = hash_builder.build_hasher(); -/// key.hash(&mut state); -/// state.finish() -/// } -/// -/// let raw_v: RawVacantEntryMut<_, _, _> = match map.raw_entry_mut().from_key(&"a") { -/// RawEntryMut::Vacant(view) => view, -/// RawEntryMut::Occupied(_) => unreachable!(), -/// }; -/// raw_v.insert("a", 10); -/// assert!(map[&"a"] == 10 && map.len() == 1); -/// -/// // Nonexistent key (insert and update) -/// let hash = compute_hash(map.hasher(), &"b"); -/// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &"b") { -/// RawEntryMut::Occupied(_) => unreachable!(), -/// RawEntryMut::Vacant(view) => { -/// let (k, value) = view.insert("b", 2); -/// assert_eq!((*k, *value), ("b", 2)); -/// *value = 20; -/// } -/// } -/// assert!(map[&"b"] == 20 && map.len() == 2); -/// -/// let hash = compute_hash(map.hasher(), &"c"); -/// match map.raw_entry_mut().from_hash(hash, |q| *q == "c") { -/// RawEntryMut::Occupied(_) => unreachable!(), -/// RawEntryMut::Vacant(view) => { -/// assert_eq!(view.insert("c", 30), (&mut "c", &mut 30)); -/// } -/// } -/// assert!(map[&"c"] == 30 && map.len() == 3); -/// ``` -pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { - table: &'a mut RawTable<(K, V), A>, - hash_builder: &'a S, -} - -/// A builder for computing where in a [`HashMap`] a key-value pair would be stored. -/// -/// See the [`HashMap::raw_entry`] docs for usage examples. -/// -/// [`HashMap::raw_entry`]: struct.HashMap.html#method.raw_entry -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_map::{HashMap, RawEntryBuilder}; -/// use core::hash::{BuildHasher, Hash}; -/// -/// let mut map = HashMap::new(); -/// map.extend([(1, 10), (2, 20), (3, 30)]); -/// -/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { -/// use core::hash::Hasher; -/// let mut state = hash_builder.build_hasher(); -/// key.hash(&mut state); -/// state.finish() -/// } -/// -/// for k in 0..6 { -/// let hash = compute_hash(map.hasher(), &k); -/// let v = map.get(&k).cloned(); -/// let kv = v.as_ref().map(|v| (&k, v)); -/// -/// println!("Key: {} and value: {:?}", k, v); -/// let builder: RawEntryBuilder<_, _, _> = map.raw_entry(); -/// assert_eq!(builder.from_key(&k), kv); -/// assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv); -/// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); -/// } -/// ``` -pub struct RawEntryBuilder<'a, K, V, S, A: Allocator + Clone = Global> { - map: &'a HashMap, -} - -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { - /// Creates a `RawEntryMut` from the given key. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// let key = "a"; - /// let entry: RawEntryMut<&str, u32, _> = map.raw_entry_mut().from_key(&key); - /// entry.insert(key, 100); - /// assert_eq!(map[&"a"], 100); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::wrong_self_convention)] - pub fn from_key(self, k: &Q) -> RawEntryMut<'a, K, V, S, A> - where - S: BuildHasher, - K: Borrow, - Q: Hash + Eq, - { - let hash = make_hash::(&self.map.hash_builder, k); - self.from_key_hashed_nocheck(hash, k) - } - - /// Creates a `RawEntryMut` from the given key and its hash. - /// - /// # Examples - /// - /// ``` - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// let key = "a"; - /// let hash = compute_hash(map.hasher(), &key); - /// let entry: RawEntryMut<&str, u32, _> = map.raw_entry_mut().from_key_hashed_nocheck(hash, &key); - /// entry.insert(key, 100); - /// assert_eq!(map[&"a"], 100); - /// ``` - #[inline] - #[allow(clippy::wrong_self_convention)] - pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S, A> - where - K: Borrow, - Q: Eq, - { - self.from_hash(hash, equivalent(k)) - } -} - -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { - /// Creates a `RawEntryMut` from the given hash and matching function. - /// - /// # Examples - /// - /// ``` - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// let key = "a"; - /// let hash = compute_hash(map.hasher(), &key); - /// let entry: RawEntryMut<&str, u32, _> = map.raw_entry_mut().from_hash(hash, |k| k == &key); - /// entry.insert(key, 100); - /// assert_eq!(map[&"a"], 100); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::wrong_self_convention)] - pub fn from_hash(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S, A> - where - for<'b> F: FnMut(&'b K) -> bool, - { - self.search(hash, is_match) - } - - #[cfg_attr(feature = "inline-more", inline)] - fn search(self, hash: u64, mut is_match: F) -> RawEntryMut<'a, K, V, S, A> - where - for<'b> F: FnMut(&'b K) -> bool, - { - match self.map.table.find(hash, |(k, _)| is_match(k)) { - Some(elem) => RawEntryMut::Occupied(RawOccupiedEntryMut { - elem, - table: &mut self.map.table, - hash_builder: &self.map.hash_builder, - }), - None => RawEntryMut::Vacant(RawVacantEntryMut { - table: &mut self.map.table, - hash_builder: &self.map.hash_builder, - }), - } - } -} - -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { - /// Access an immutable entry by key. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// let key = "a"; - /// assert_eq!(map.raw_entry().from_key(&key), Some((&"a", &100))); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::wrong_self_convention)] - pub fn from_key(self, k: &Q) -> Option<(&'a K, &'a V)> - where - S: BuildHasher, - K: Borrow, - Q: Hash + Eq, - { - let hash = make_hash::(&self.map.hash_builder, k); - self.from_key_hashed_nocheck(hash, k) - } - - /// Access an immutable entry by a key and its hash. - /// - /// # Examples - /// - /// ``` - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::HashMap; - /// - /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// - /// let map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// let key = "a"; - /// let hash = compute_hash(map.hasher(), &key); - /// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &key), Some((&"a", &100))); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::wrong_self_convention)] - pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)> - where - K: Borrow, - Q: Eq, - { - self.from_hash(hash, equivalent(k)) - } - - #[cfg_attr(feature = "inline-more", inline)] - fn search(self, hash: u64, mut is_match: F) -> Option<(&'a K, &'a V)> - where - F: FnMut(&K) -> bool, - { - match self.map.table.get(hash, |(k, _)| is_match(k)) { - Some(&(ref key, ref value)) => Some((key, value)), - None => None, - } - } - - /// Access an immutable entry by hash and matching function. - /// - /// # Examples - /// - /// ``` - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::HashMap; - /// - /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// - /// let map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// let key = "a"; - /// let hash = compute_hash(map.hasher(), &key); - /// assert_eq!(map.raw_entry().from_hash(hash, |k| k == &key), Some((&"a", &100))); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::wrong_self_convention)] - pub fn from_hash(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)> - where - F: FnMut(&K) -> bool, - { - self.search(hash, is_match) - } -} - -impl<'a, K, V, S, A: Allocator + Clone> RawEntryMut<'a, K, V, S, A> { - /// Sets the value of the entry, and returns a RawOccupiedEntryMut. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// let entry = map.raw_entry_mut().from_key("horseyland").insert("horseyland", 37); - /// - /// assert_eq!(entry.remove_entry(), ("horseyland", 37)); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(self, key: K, value: V) -> RawOccupiedEntryMut<'a, K, V, S, A> - where - K: Hash, - S: BuildHasher, - { - match self { - RawEntryMut::Occupied(mut entry) => { - entry.insert(value); - entry - } - RawEntryMut::Vacant(entry) => entry.insert_entry(key, value), - } - } - - /// Ensures a value is in the entry by inserting the default if empty, and returns - /// mutable references to the key and value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 3); - /// assert_eq!(map["poneyland"], 3); - /// - /// *map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 10).1 *= 2; - /// assert_eq!(map["poneyland"], 6); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_insert(self, default_key: K, default_val: V) -> (&'a mut K, &'a mut V) - where - K: Hash, - S: BuildHasher, - { - match self { - RawEntryMut::Occupied(entry) => entry.into_key_value(), - RawEntryMut::Vacant(entry) => entry.insert(default_key, default_val), - } - } - - /// Ensures a value is in the entry by inserting the result of the default function if empty, - /// and returns mutable references to the key and value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, String> = HashMap::new(); - /// - /// map.raw_entry_mut().from_key("poneyland").or_insert_with(|| { - /// ("poneyland", "hoho".to_string()) - /// }); - /// - /// assert_eq!(map["poneyland"], "hoho".to_string()); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_insert_with(self, default: F) -> (&'a mut K, &'a mut V) - where - F: FnOnce() -> (K, V), - K: Hash, - S: BuildHasher, - { - match self { - RawEntryMut::Occupied(entry) => entry.into_key_value(), - RawEntryMut::Vacant(entry) => { - let (k, v) = default(); - entry.insert(k, v) - } - } - } - - /// Provides in-place mutable access to an occupied entry before any - /// potential inserts into the map. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// map.raw_entry_mut() - /// .from_key("poneyland") - /// .and_modify(|_k, v| { *v += 1 }) - /// .or_insert("poneyland", 42); - /// assert_eq!(map["poneyland"], 42); - /// - /// map.raw_entry_mut() - /// .from_key("poneyland") - /// .and_modify(|_k, v| { *v += 1 }) - /// .or_insert("poneyland", 0); - /// assert_eq!(map["poneyland"], 43); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn and_modify(self, f: F) -> Self - where - F: FnOnce(&mut K, &mut V), - { - match self { - RawEntryMut::Occupied(mut entry) => { - { - let (k, v) = entry.get_key_value_mut(); - f(k, v); - } - RawEntryMut::Occupied(entry) - } - RawEntryMut::Vacant(entry) => RawEntryMut::Vacant(entry), - } - } - - /// Provides shared access to the key and owned access to the value of - /// an occupied entry and allows to replace or remove it based on the - /// value of the returned option. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::RawEntryMut; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// let entry = map - /// .raw_entry_mut() - /// .from_key("poneyland") - /// .and_replace_entry_with(|_k, _v| panic!()); - /// - /// match entry { - /// RawEntryMut::Vacant(_) => {}, - /// RawEntryMut::Occupied(_) => panic!(), - /// } - /// - /// map.insert("poneyland", 42); - /// - /// let entry = map - /// .raw_entry_mut() - /// .from_key("poneyland") - /// .and_replace_entry_with(|k, v| { - /// assert_eq!(k, &"poneyland"); - /// assert_eq!(v, 42); - /// Some(v + 1) - /// }); - /// - /// match entry { - /// RawEntryMut::Occupied(e) => { - /// assert_eq!(e.key(), &"poneyland"); - /// assert_eq!(e.get(), &43); - /// }, - /// RawEntryMut::Vacant(_) => panic!(), - /// } - /// - /// assert_eq!(map["poneyland"], 43); - /// - /// let entry = map - /// .raw_entry_mut() - /// .from_key("poneyland") - /// .and_replace_entry_with(|_k, _v| None); - /// - /// match entry { - /// RawEntryMut::Vacant(_) => {}, - /// RawEntryMut::Occupied(_) => panic!(), - /// } - /// - /// assert!(!map.contains_key("poneyland")); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn and_replace_entry_with(self, f: F) -> Self - where - F: FnOnce(&K, V) -> Option, - { - match self { - RawEntryMut::Occupied(entry) => entry.replace_entry_with(f), - RawEntryMut::Vacant(_) => self, - } - } -} - -impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> { - /// Gets a reference to the key in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// - /// match map.raw_entry_mut().from_key(&"a") { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(o) => assert_eq!(o.key(), &"a") - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &K { - unsafe { &self.elem.as_ref().0 } - } - - /// Gets a mutable reference to the key in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// use std::rc::Rc; - /// - /// let key_one = Rc::new("a"); - /// let key_two = Rc::new("a"); - /// - /// let mut map: HashMap, u32> = HashMap::new(); - /// map.insert(key_one.clone(), 10); - /// - /// assert_eq!(map[&key_one], 10); - /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); - /// - /// match map.raw_entry_mut().from_key(&key_one) { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(mut o) => { - /// *o.key_mut() = key_two.clone(); - /// } - /// } - /// assert_eq!(map[&key_two], 10); - /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn key_mut(&mut self) -> &mut K { - unsafe { &mut self.elem.as_mut().0 } - } - - /// Converts the entry into a mutable reference to the key in the entry - /// with a lifetime bound to the map itself. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// use std::rc::Rc; - /// - /// let key_one = Rc::new("a"); - /// let key_two = Rc::new("a"); - /// - /// let mut map: HashMap, u32> = HashMap::new(); - /// map.insert(key_one.clone(), 10); - /// - /// assert_eq!(map[&key_one], 10); - /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); - /// - /// let inside_key: &mut Rc<&str>; - /// - /// match map.raw_entry_mut().from_key(&key_one) { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(o) => inside_key = o.into_key(), - /// } - /// *inside_key = key_two.clone(); - /// - /// assert_eq!(map[&key_two], 10); - /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn into_key(self) -> &'a mut K { - unsafe { &mut self.elem.as_mut().0 } - } - - /// Gets a reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// - /// match map.raw_entry_mut().from_key(&"a") { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(o) => assert_eq!(o.get(), &100), - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get(&self) -> &V { - unsafe { &self.elem.as_ref().1 } - } - - /// Converts the OccupiedEntry into a mutable reference to the value in the entry - /// with a lifetime bound to the map itself. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// - /// let value: &mut u32; - /// - /// match map.raw_entry_mut().from_key(&"a") { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(o) => value = o.into_mut(), - /// } - /// *value += 900; - /// - /// assert_eq!(map[&"a"], 1000); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn into_mut(self) -> &'a mut V { - unsafe { &mut self.elem.as_mut().1 } - } - - /// Gets a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// - /// match map.raw_entry_mut().from_key(&"a") { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(mut o) => *o.get_mut() += 900, - /// } - /// - /// assert_eq!(map[&"a"], 1000); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get_mut(&mut self) -> &mut V { - unsafe { &mut self.elem.as_mut().1 } - } - - /// Gets a reference to the key and value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// - /// match map.raw_entry_mut().from_key(&"a") { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(o) => assert_eq!(o.get_key_value(), (&"a", &100)), - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get_key_value(&self) -> (&K, &V) { - unsafe { - let &(ref key, ref value) = self.elem.as_ref(); - (key, value) - } - } - - /// Gets a mutable reference to the key and value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// use std::rc::Rc; - /// - /// let key_one = Rc::new("a"); - /// let key_two = Rc::new("a"); - /// - /// let mut map: HashMap, u32> = HashMap::new(); - /// map.insert(key_one.clone(), 10); - /// - /// assert_eq!(map[&key_one], 10); - /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); - /// - /// match map.raw_entry_mut().from_key(&key_one) { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(mut o) => { - /// let (inside_key, inside_value) = o.get_key_value_mut(); - /// *inside_key = key_two.clone(); - /// *inside_value = 100; - /// } - /// } - /// assert_eq!(map[&key_two], 100); - /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) { - unsafe { - let &mut (ref mut key, ref mut value) = self.elem.as_mut(); - (key, value) - } - } - - /// Converts the OccupiedEntry into a mutable reference to the key and value in the entry - /// with a lifetime bound to the map itself. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// use std::rc::Rc; - /// - /// let key_one = Rc::new("a"); - /// let key_two = Rc::new("a"); - /// - /// let mut map: HashMap, u32> = HashMap::new(); - /// map.insert(key_one.clone(), 10); - /// - /// assert_eq!(map[&key_one], 10); - /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); - /// - /// let inside_key: &mut Rc<&str>; - /// let inside_value: &mut u32; - /// match map.raw_entry_mut().from_key(&key_one) { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(o) => { - /// let tuple = o.into_key_value(); - /// inside_key = tuple.0; - /// inside_value = tuple.1; - /// } - /// } - /// *inside_key = key_two.clone(); - /// *inside_value = 100; - /// assert_eq!(map[&key_two], 100); - /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn into_key_value(self) -> (&'a mut K, &'a mut V) { - unsafe { - let &mut (ref mut key, ref mut value) = self.elem.as_mut(); - (key, value) - } - } - - /// Sets the value of the entry, and returns the entry's old value. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// - /// match map.raw_entry_mut().from_key(&"a") { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(mut o) => assert_eq!(o.insert(1000), 100), - /// } - /// - /// assert_eq!(map[&"a"], 1000); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(&mut self, value: V) -> V { - mem::replace(self.get_mut(), value) - } - - /// Sets the value of the entry, and returns the entry's old value. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// use std::rc::Rc; - /// - /// let key_one = Rc::new("a"); - /// let key_two = Rc::new("a"); - /// - /// let mut map: HashMap, u32> = HashMap::new(); - /// map.insert(key_one.clone(), 10); - /// - /// assert_eq!(map[&key_one], 10); - /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); - /// - /// match map.raw_entry_mut().from_key(&key_one) { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(mut o) => { - /// let old_key = o.insert_key(key_two.clone()); - /// assert!(Rc::ptr_eq(&old_key, &key_one)); - /// } - /// } - /// assert_eq!(map[&key_two], 10); - /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert_key(&mut self, key: K) -> K { - mem::replace(self.key_mut(), key) - } - - /// Takes the value out of the entry, and returns it. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// - /// match map.raw_entry_mut().from_key(&"a") { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(o) => assert_eq!(o.remove(), 100), - /// } - /// assert_eq!(map.get(&"a"), None); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove(self) -> V { - self.remove_entry().1 - } - - /// Take the ownership of the key and value from the map. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// - /// match map.raw_entry_mut().from_key(&"a") { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(o) => assert_eq!(o.remove_entry(), ("a", 100)), - /// } - /// assert_eq!(map.get(&"a"), None); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.remove(self.elem) } - } - - /// Provides shared access to the key and owned access to the value of - /// the entry and allows to replace or remove it based on the - /// value of the returned option. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// - /// let raw_entry = match map.raw_entry_mut().from_key(&"a") { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(o) => o.replace_entry_with(|k, v| { - /// assert_eq!(k, &"a"); - /// assert_eq!(v, 100); - /// Some(v + 900) - /// }), - /// }; - /// let raw_entry = match raw_entry { - /// RawEntryMut::Vacant(_) => panic!(), - /// RawEntryMut::Occupied(o) => o.replace_entry_with(|k, v| { - /// assert_eq!(k, &"a"); - /// assert_eq!(v, 1000); - /// None - /// }), - /// }; - /// match raw_entry { - /// RawEntryMut::Vacant(_) => { }, - /// RawEntryMut::Occupied(_) => panic!(), - /// }; - /// assert_eq!(map.get(&"a"), None); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn replace_entry_with(self, f: F) -> RawEntryMut<'a, K, V, S, A> - where - F: FnOnce(&K, V) -> Option, - { - unsafe { - let still_occupied = self - .table - .replace_bucket_with(self.elem.clone(), |(key, value)| { - f(&key, value).map(|new_value| (key, new_value)) - }); - - if still_occupied { - RawEntryMut::Occupied(self) - } else { - RawEntryMut::Vacant(RawVacantEntryMut { - table: self.table, - hash_builder: self.hash_builder, - }) - } - } - } -} - -impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { - /// Sets the value of the entry with the VacantEntry's key, - /// and returns a mutable reference to it. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// - /// match map.raw_entry_mut().from_key(&"c") { - /// RawEntryMut::Occupied(_) => panic!(), - /// RawEntryMut::Vacant(v) => assert_eq!(v.insert("c", 300), (&mut "c", &mut 300)), - /// } - /// - /// assert_eq!(map[&"c"], 300); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V) - where - K: Hash, - S: BuildHasher, - { - let hash = make_insert_hash::(self.hash_builder, &key); - self.insert_hashed_nocheck(hash, key, value) - } - - /// Sets the value of the entry with the VacantEntry's key, - /// and returns a mutable reference to it. - /// - /// # Examples - /// - /// ``` - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// - /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); - /// let key = "c"; - /// let hash = compute_hash(map.hasher(), &key); - /// - /// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &key) { - /// RawEntryMut::Occupied(_) => panic!(), - /// RawEntryMut::Vacant(v) => assert_eq!( - /// v.insert_hashed_nocheck(hash, key, 300), - /// (&mut "c", &mut 300) - /// ), - /// } - /// - /// assert_eq!(map[&"c"], 300); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::shadow_unrelated)] - pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) - where - K: Hash, - S: BuildHasher, - { - let &mut (ref mut k, ref mut v) = self.table.insert_entry( - hash, - (key, value), - make_hasher::(self.hash_builder), - ); - (k, v) - } - - /// Set the value of an entry with a custom hasher function. - /// - /// # Examples - /// - /// ``` - /// use core::hash::{BuildHasher, Hash}; - /// use hashbrown::hash_map::{HashMap, RawEntryMut}; - /// - /// fn make_hasher(hash_builder: &S) -> impl Fn(&K) -> u64 + '_ - /// where - /// K: Hash + ?Sized, - /// S: BuildHasher, - /// { - /// move |key: &K| { - /// use core::hash::Hasher; - /// let mut state = hash_builder.build_hasher(); - /// key.hash(&mut state); - /// state.finish() - /// } - /// } - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// let key = "a"; - /// let hash_builder = map.hasher().clone(); - /// let hash = make_hasher(&hash_builder)(&key); - /// - /// match map.raw_entry_mut().from_hash(hash, |q| q == &key) { - /// RawEntryMut::Occupied(_) => panic!(), - /// RawEntryMut::Vacant(v) => assert_eq!( - /// v.insert_with_hasher(hash, key, 100, make_hasher(&hash_builder)), - /// (&mut "a", &mut 100) - /// ), - /// } - /// map.extend([("b", 200), ("c", 300), ("d", 400), ("e", 500), ("f", 600)]); - /// assert_eq!(map[&"a"], 100); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert_with_hasher( - self, - hash: u64, - key: K, - value: V, - hasher: H, - ) -> (&'a mut K, &'a mut V) - where - H: Fn(&K) -> u64, - { - let &mut (ref mut k, ref mut v) = self - .table - .insert_entry(hash, (key, value), |x| hasher(&x.0)); - (k, v) - } - - #[cfg_attr(feature = "inline-more", inline)] - fn insert_entry(self, key: K, value: V) -> RawOccupiedEntryMut<'a, K, V, S, A> - where - K: Hash, - S: BuildHasher, - { - let hash = make_insert_hash::(self.hash_builder, &key); - let elem = self.table.insert( - hash, - (key, value), - make_hasher::(self.hash_builder), - ); - RawOccupiedEntryMut { - elem, - table: self.table, - hash_builder: self.hash_builder, - } - } -} - -impl Debug for RawEntryBuilderMut<'_, K, V, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RawEntryBuilder").finish() - } -} - -impl Debug for RawEntryMut<'_, K, V, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - RawEntryMut::Vacant(ref v) => f.debug_tuple("RawEntry").field(v).finish(), - RawEntryMut::Occupied(ref o) => f.debug_tuple("RawEntry").field(o).finish(), - } - } -} - -impl Debug for RawOccupiedEntryMut<'_, K, V, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RawOccupiedEntryMut") - .field("key", self.key()) - .field("value", self.get()) - .finish() - } -} - -impl Debug for RawVacantEntryMut<'_, K, V, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RawVacantEntryMut").finish() - } -} - -impl Debug for RawEntryBuilder<'_, K, V, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RawEntryBuilder").finish() - } -} - -/// A view into a single entry in a map, which may either be vacant or occupied. -/// -/// This `enum` is constructed from the [`entry`] method on [`HashMap`]. -/// -/// [`HashMap`]: struct.HashMap.html -/// [`entry`]: struct.HashMap.html#method.entry -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_map::{Entry, HashMap, OccupiedEntry}; -/// -/// let mut map = HashMap::new(); -/// map.extend([("a", 10), ("b", 20), ("c", 30)]); -/// assert_eq!(map.len(), 3); -/// -/// // Existing key (insert) -/// let entry: Entry<_, _, _> = map.entry("a"); -/// let _raw_o: OccupiedEntry<_, _, _> = entry.insert(1); -/// assert_eq!(map.len(), 3); -/// // Nonexistent key (insert) -/// map.entry("d").insert(4); -/// -/// // Existing key (or_insert) -/// let v = map.entry("b").or_insert(2); -/// assert_eq!(std::mem::replace(v, 2), 20); -/// // Nonexistent key (or_insert) -/// map.entry("e").or_insert(5); -/// -/// // Existing key (or_insert_with) -/// let v = map.entry("c").or_insert_with(|| 3); -/// assert_eq!(std::mem::replace(v, 3), 30); -/// // Nonexistent key (or_insert_with) -/// map.entry("f").or_insert_with(|| 6); -/// -/// println!("Our HashMap: {:?}", map); -/// -/// let mut vec: Vec<_> = map.iter().map(|(&k, &v)| (k, v)).collect(); -/// // The `Iter` iterator produces items in arbitrary order, so the -/// // items must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, [("a", 1), ("b", 2), ("c", 3), ("d", 4), ("e", 5), ("f", 6)]); -/// ``` -pub enum Entry<'a, K, V, S, A = Global> -where - A: Allocator + Clone, -{ - /// An occupied entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{Entry, HashMap}; - /// let mut map: HashMap<_, _> = [("a", 100), ("b", 200)].into(); - /// - /// match map.entry("a") { - /// Entry::Vacant(_) => unreachable!(), - /// Entry::Occupied(_) => { } - /// } - /// ``` - Occupied(OccupiedEntry<'a, K, V, S, A>), - - /// A vacant entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{Entry, HashMap}; - /// let mut map: HashMap<&str, i32> = HashMap::new(); - /// - /// match map.entry("a") { - /// Entry::Occupied(_) => unreachable!(), - /// Entry::Vacant(_) => { } - /// } - /// ``` - Vacant(VacantEntry<'a, K, V, S, A>), -} - -impl Debug for Entry<'_, K, V, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), - Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), - } - } -} - -/// A view into an occupied entry in a `HashMap`. -/// It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_map::{Entry, HashMap, OccupiedEntry}; -/// -/// let mut map = HashMap::new(); -/// map.extend([("a", 10), ("b", 20), ("c", 30)]); -/// -/// let _entry_o: OccupiedEntry<_, _, _> = map.entry("a").insert(100); -/// assert_eq!(map.len(), 3); -/// -/// // Existing key (insert and update) -/// match map.entry("a") { -/// Entry::Vacant(_) => unreachable!(), -/// Entry::Occupied(mut view) => { -/// assert_eq!(view.get(), &100); -/// let v = view.get_mut(); -/// *v *= 10; -/// assert_eq!(view.insert(1111), 1000); -/// } -/// } -/// -/// assert_eq!(map[&"a"], 1111); -/// assert_eq!(map.len(), 3); -/// -/// // Existing key (take) -/// match map.entry("c") { -/// Entry::Vacant(_) => unreachable!(), -/// Entry::Occupied(view) => { -/// assert_eq!(view.remove_entry(), ("c", 30)); -/// } -/// } -/// assert_eq!(map.get(&"c"), None); -/// assert_eq!(map.len(), 2); -/// ``` -pub struct OccupiedEntry<'a, K, V, S, A: Allocator + Clone = Global> { - hash: u64, - key: Option, - elem: Bucket<(K, V)>, - table: &'a mut HashMap, -} - -unsafe impl Send for OccupiedEntry<'_, K, V, S, A> -where - K: Send, - V: Send, - S: Send, - A: Send + Allocator + Clone, -{ -} -unsafe impl Sync for OccupiedEntry<'_, K, V, S, A> -where - K: Sync, - V: Sync, - S: Sync, - A: Sync + Allocator + Clone, -{ -} - -impl Debug for OccupiedEntry<'_, K, V, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("OccupiedEntry") - .field("key", self.key()) - .field("value", self.get()) - .finish() - } -} - -/// A view into a vacant entry in a `HashMap`. -/// It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_map::{Entry, HashMap, VacantEntry}; -/// -/// let mut map = HashMap::<&str, i32>::new(); -/// -/// let entry_v: VacantEntry<_, _, _> = match map.entry("a") { -/// Entry::Vacant(view) => view, -/// Entry::Occupied(_) => unreachable!(), -/// }; -/// entry_v.insert(10); -/// assert!(map[&"a"] == 10 && map.len() == 1); -/// -/// // Nonexistent key (insert and update) -/// match map.entry("b") { -/// Entry::Occupied(_) => unreachable!(), -/// Entry::Vacant(view) => { -/// let value = view.insert(2); -/// assert_eq!(*value, 2); -/// *value = 20; -/// } -/// } -/// assert!(map[&"b"] == 20 && map.len() == 2); -/// ``` -pub struct VacantEntry<'a, K, V, S, A: Allocator + Clone = Global> { - hash: u64, - key: K, - table: &'a mut HashMap, -} - -impl Debug for VacantEntry<'_, K, V, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("VacantEntry").field(self.key()).finish() - } -} - -/// A view into a single entry in a map, which may either be vacant or occupied, -/// with any borrowed form of the map's key type. -/// -/// -/// This `enum` is constructed from the [`entry_ref`] method on [`HashMap`]. -/// -/// [`Hash`] and [`Eq`] on the borrowed form of the map's key type *must* match those -/// for the key type. It also require that key may be constructed from the borrowed -/// form through the [`From`] trait. -/// -/// [`HashMap`]: struct.HashMap.html -/// [`entry_ref`]: struct.HashMap.html#method.entry_ref -/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html -/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html -/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_map::{EntryRef, HashMap, OccupiedEntryRef}; -/// -/// let mut map = HashMap::new(); -/// map.extend([("a".to_owned(), 10), ("b".into(), 20), ("c".into(), 30)]); -/// assert_eq!(map.len(), 3); -/// -/// // Existing key (insert) -/// let key = String::from("a"); -/// let entry: EntryRef<_, _, _, _> = map.entry_ref(&key); -/// let _raw_o: OccupiedEntryRef<_, _, _, _> = entry.insert(1); -/// assert_eq!(map.len(), 3); -/// // Nonexistent key (insert) -/// map.entry_ref("d").insert(4); -/// -/// // Existing key (or_insert) -/// let v = map.entry_ref("b").or_insert(2); -/// assert_eq!(std::mem::replace(v, 2), 20); -/// // Nonexistent key (or_insert) -/// map.entry_ref("e").or_insert(5); -/// -/// // Existing key (or_insert_with) -/// let v = map.entry_ref("c").or_insert_with(|| 3); -/// assert_eq!(std::mem::replace(v, 3), 30); -/// // Nonexistent key (or_insert_with) -/// map.entry_ref("f").or_insert_with(|| 6); -/// -/// println!("Our HashMap: {:?}", map); -/// -/// for (key, value) in ["a", "b", "c", "d", "e", "f"].into_iter().zip(1..=6) { -/// assert_eq!(map[key], value) -/// } -/// assert_eq!(map.len(), 6); -/// ``` -pub enum EntryRef<'a, 'b, K, Q: ?Sized, V, S, A = Global> -where - A: Allocator + Clone, -{ - /// An occupied entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{EntryRef, HashMap}; - /// let mut map: HashMap<_, _> = [("a".to_owned(), 100), ("b".into(), 200)].into(); - /// - /// match map.entry_ref("a") { - /// EntryRef::Vacant(_) => unreachable!(), - /// EntryRef::Occupied(_) => { } - /// } - /// ``` - Occupied(OccupiedEntryRef<'a, 'b, K, Q, V, S, A>), - - /// A vacant entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{EntryRef, HashMap}; - /// let mut map: HashMap = HashMap::new(); - /// - /// match map.entry_ref("a") { - /// EntryRef::Occupied(_) => unreachable!(), - /// EntryRef::Vacant(_) => { } - /// } - /// ``` - Vacant(VacantEntryRef<'a, 'b, K, Q, V, S, A>), -} - -impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug - for EntryRef<'_, '_, K, Q, V, S, A> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - EntryRef::Vacant(ref v) => f.debug_tuple("EntryRef").field(v).finish(), - EntryRef::Occupied(ref o) => f.debug_tuple("EntryRef").field(o).finish(), - } - } -} - -enum KeyOrRef<'a, K, Q: ?Sized> { - Borrowed(&'a Q), - Owned(K), -} - -impl<'a, K, Q: ?Sized> KeyOrRef<'a, K, Q> { - fn into_owned(self) -> K - where - K: From<&'a Q>, - { - match self { - Self::Borrowed(borrowed) => borrowed.into(), - Self::Owned(owned) => owned, - } - } -} - -impl<'a, K: Borrow, Q: ?Sized> AsRef for KeyOrRef<'a, K, Q> { - fn as_ref(&self) -> &Q { - match self { - Self::Borrowed(borrowed) => borrowed, - Self::Owned(owned) => owned.borrow(), - } - } -} - -/// A view into an occupied entry in a `HashMap`. -/// It is part of the [`EntryRef`] enum. -/// -/// [`EntryRef`]: enum.EntryRef.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_map::{EntryRef, HashMap, OccupiedEntryRef}; -/// -/// let mut map = HashMap::new(); -/// map.extend([("a".to_owned(), 10), ("b".into(), 20), ("c".into(), 30)]); -/// -/// let key = String::from("a"); -/// let _entry_o: OccupiedEntryRef<_, _, _, _> = map.entry_ref(&key).insert(100); -/// assert_eq!(map.len(), 3); -/// -/// // Existing key (insert and update) -/// match map.entry_ref("a") { -/// EntryRef::Vacant(_) => unreachable!(), -/// EntryRef::Occupied(mut view) => { -/// assert_eq!(view.get(), &100); -/// let v = view.get_mut(); -/// *v *= 10; -/// assert_eq!(view.insert(1111), 1000); -/// } -/// } -/// -/// assert_eq!(map["a"], 1111); -/// assert_eq!(map.len(), 3); -/// -/// // Existing key (take) -/// match map.entry_ref("c") { -/// EntryRef::Vacant(_) => unreachable!(), -/// EntryRef::Occupied(view) => { -/// assert_eq!(view.remove_entry(), ("c".to_owned(), 30)); -/// } -/// } -/// assert_eq!(map.get("c"), None); -/// assert_eq!(map.len(), 2); -/// ``` -pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { - hash: u64, - key: Option>, - elem: Bucket<(K, V)>, - table: &'a mut HashMap, -} - -unsafe impl<'a, 'b, K, Q, V, S, A> Send for OccupiedEntryRef<'a, 'b, K, Q, V, S, A> -where - K: Send, - Q: Sync + ?Sized, - V: Send, - S: Send, - A: Send + Allocator + Clone, -{ -} -unsafe impl<'a, 'b, K, Q, V, S, A> Sync for OccupiedEntryRef<'a, 'b, K, Q, V, S, A> -where - K: Sync, - Q: Sync + ?Sized, - V: Sync, - S: Sync, - A: Sync + Allocator + Clone, -{ -} - -impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug - for OccupiedEntryRef<'_, '_, K, Q, V, S, A> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("OccupiedEntryRef") - .field("key", &self.key()) - .field("value", &self.get()) - .finish() - } -} - -/// A view into a vacant entry in a `HashMap`. -/// It is part of the [`EntryRef`] enum. -/// -/// [`EntryRef`]: enum.EntryRef.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_map::{EntryRef, HashMap, VacantEntryRef}; -/// -/// let mut map = HashMap::::new(); -/// -/// let entry_v: VacantEntryRef<_, _, _, _> = match map.entry_ref("a") { -/// EntryRef::Vacant(view) => view, -/// EntryRef::Occupied(_) => unreachable!(), -/// }; -/// entry_v.insert(10); -/// assert!(map["a"] == 10 && map.len() == 1); -/// -/// // Nonexistent key (insert and update) -/// match map.entry_ref("b") { -/// EntryRef::Occupied(_) => unreachable!(), -/// EntryRef::Vacant(view) => { -/// let value = view.insert(2); -/// assert_eq!(*value, 2); -/// *value = 20; -/// } -/// } -/// assert!(map["b"] == 20 && map.len() == 2); -/// ``` -pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { - hash: u64, - key: KeyOrRef<'b, K, Q>, - table: &'a mut HashMap, -} - -impl, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug - for VacantEntryRef<'_, '_, K, Q, V, S, A> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("VacantEntryRef").field(&self.key()).finish() - } -} - -/// The error returned by [`try_insert`](HashMap::try_insert) when the key already exists. -/// -/// Contains the occupied entry, and the value that was not inserted. -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_map::{HashMap, OccupiedError}; -/// -/// let mut map: HashMap<_, _> = [("a", 10), ("b", 20)].into(); -/// -/// // try_insert method returns mutable reference to the value if keys are vacant, -/// // but if the map did have key present, nothing is updated, and the provided -/// // value is returned inside `Err(_)` variant -/// match map.try_insert("a", 100) { -/// Err(OccupiedError { mut entry, value }) => { -/// assert_eq!(entry.key(), &"a"); -/// assert_eq!(value, 100); -/// assert_eq!(entry.insert(100), 10) -/// } -/// _ => unreachable!(), -/// } -/// assert_eq!(map[&"a"], 100); -/// ``` -pub struct OccupiedError<'a, K, V, S, A: Allocator + Clone = Global> { - /// The entry in the map that was already occupied. - pub entry: OccupiedEntry<'a, K, V, S, A>, - /// The value which was not inserted, because the entry was already occupied. - pub value: V, -} - -impl Debug for OccupiedError<'_, K, V, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("OccupiedError") - .field("key", self.entry.key()) - .field("old_value", self.entry.get()) - .field("new_value", &self.value) - .finish() - } -} - -impl<'a, K: Debug, V: Debug, S, A: Allocator + Clone> fmt::Display - for OccupiedError<'a, K, V, S, A> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "failed to insert {:?}, key {:?} already exists with value {:?}", - self.value, - self.entry.key(), - self.entry.get(), - ) - } -} - -impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a HashMap { - type Item = (&'a K, &'a V); - type IntoIter = Iter<'a, K, V>; - - /// Creates an iterator over the entries of a `HashMap` in arbitrary order. - /// The iterator element type is `(&'a K, &'a V)`. - /// - /// Return the same `Iter` struct as by the [`iter`] method on [`HashMap`]. - /// - /// [`iter`]: struct.HashMap.html#method.iter - /// [`HashMap`]: struct.HashMap.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// let map_one: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); - /// let mut map_two = HashMap::new(); - /// - /// for (key, value) in &map_one { - /// println!("Key: {}, Value: {}", key, value); - /// map_two.insert_unique_unchecked(*key, *value); - /// } - /// - /// assert_eq!(map_one, map_two); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - fn into_iter(self) -> Iter<'a, K, V> { - self.iter() - } -} - -impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a mut HashMap { - type Item = (&'a K, &'a mut V); - type IntoIter = IterMut<'a, K, V>; - - /// Creates an iterator over the entries of a `HashMap` in arbitrary order - /// with mutable references to the values. The iterator element type is - /// `(&'a K, &'a mut V)`. - /// - /// Return the same `IterMut` struct as by the [`iter_mut`] method on - /// [`HashMap`]. - /// - /// [`iter_mut`]: struct.HashMap.html#method.iter_mut - /// [`HashMap`]: struct.HashMap.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// let mut map: HashMap<_, _> = [("a", 1), ("b", 2), ("c", 3)].into(); - /// - /// for (key, value) in &mut map { - /// println!("Key: {}, Value: {}", key, value); - /// *value *= 2; - /// } - /// - /// let mut vec = map.iter().collect::>(); - /// // The `Iter` iterator produces items in arbitrary order, so the - /// // items must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, [(&"a", &2), (&"b", &4), (&"c", &6)]); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - fn into_iter(self) -> IterMut<'a, K, V> { - self.iter_mut() - } -} - -impl IntoIterator for HashMap { - type Item = (K, V); - type IntoIter = IntoIter; - - /// Creates a consuming iterator, that is, one that moves each key-value - /// pair out of the map in arbitrary order. The map cannot be used after - /// calling this. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let map: HashMap<_, _> = [("a", 1), ("b", 2), ("c", 3)].into(); - /// - /// // Not possible with .iter() - /// let mut vec: Vec<(&str, i32)> = map.into_iter().collect(); - /// // The `IntoIter` iterator produces items in arbitrary order, so - /// // the items must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, [("a", 1), ("b", 2), ("c", 3)]); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - fn into_iter(self) -> IntoIter { - IntoIter { - inner: self.table.into_iter(), - } - } -} - -impl<'a, K, V> Iterator for Iter<'a, K, V> { - type Item = (&'a K, &'a V); - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<(&'a K, &'a V)> { - // Avoid `Option::map` because it bloats LLVM IR. - match self.inner.next() { - Some(x) => unsafe { - let r = x.as_ref(); - Some((&r.0, &r.1)) - }, - None => None, - } - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} -impl ExactSizeIterator for Iter<'_, K, V> { - #[cfg_attr(feature = "inline-more", inline)] - fn len(&self) -> usize { - self.inner.len() - } -} - -impl FusedIterator for Iter<'_, K, V> {} - -impl<'a, K, V> Iterator for IterMut<'a, K, V> { - type Item = (&'a K, &'a mut V); - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<(&'a K, &'a mut V)> { - // Avoid `Option::map` because it bloats LLVM IR. - match self.inner.next() { - Some(x) => unsafe { - let r = x.as_mut(); - Some((&r.0, &mut r.1)) - }, - None => None, - } - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} -impl ExactSizeIterator for IterMut<'_, K, V> { - #[cfg_attr(feature = "inline-more", inline)] - fn len(&self) -> usize { - self.inner.len() - } -} -impl FusedIterator for IterMut<'_, K, V> {} - -impl fmt::Debug for IterMut<'_, K, V> -where - K: fmt::Debug, - V: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.iter()).finish() - } -} - -impl Iterator for IntoIter { - type Item = (K, V); - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<(K, V)> { - self.inner.next() - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} -impl ExactSizeIterator for IntoIter { - #[cfg_attr(feature = "inline-more", inline)] - fn len(&self) -> usize { - self.inner.len() - } -} -impl FusedIterator for IntoIter {} - -impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.iter()).finish() - } -} - -impl<'a, K, V> Iterator for Keys<'a, K, V> { - type Item = &'a K; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<&'a K> { - // Avoid `Option::map` because it bloats LLVM IR. - match self.inner.next() { - Some((k, _)) => Some(k), - None => None, - } - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} -impl ExactSizeIterator for Keys<'_, K, V> { - #[cfg_attr(feature = "inline-more", inline)] - fn len(&self) -> usize { - self.inner.len() - } -} -impl FusedIterator for Keys<'_, K, V> {} - -impl<'a, K, V> Iterator for Values<'a, K, V> { - type Item = &'a V; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<&'a V> { - // Avoid `Option::map` because it bloats LLVM IR. - match self.inner.next() { - Some((_, v)) => Some(v), - None => None, - } - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} -impl ExactSizeIterator for Values<'_, K, V> { - #[cfg_attr(feature = "inline-more", inline)] - fn len(&self) -> usize { - self.inner.len() - } -} -impl FusedIterator for Values<'_, K, V> {} - -impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { - type Item = &'a mut V; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<&'a mut V> { - // Avoid `Option::map` because it bloats LLVM IR. - match self.inner.next() { - Some((_, v)) => Some(v), - None => None, - } - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} -impl ExactSizeIterator for ValuesMut<'_, K, V> { - #[cfg_attr(feature = "inline-more", inline)] - fn len(&self) -> usize { - self.inner.len() - } -} -impl FusedIterator for ValuesMut<'_, K, V> {} - -impl fmt::Debug for ValuesMut<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.inner.iter().map(|(_, val)| val)) - .finish() - } -} - -impl<'a, K, V, A: Allocator + Clone> Iterator for Drain<'a, K, V, A> { - type Item = (K, V); - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<(K, V)> { - self.inner.next() - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} -impl ExactSizeIterator for Drain<'_, K, V, A> { - #[cfg_attr(feature = "inline-more", inline)] - fn len(&self) -> usize { - self.inner.len() - } -} -impl FusedIterator for Drain<'_, K, V, A> {} - -impl fmt::Debug for Drain<'_, K, V, A> -where - K: fmt::Debug, - V: fmt::Debug, - A: Allocator + Clone, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.iter()).finish() - } -} - -impl<'a, K, V, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { - /// Sets the value of the entry, and returns an OccupiedEntry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// let entry = map.entry("horseyland").insert(37); - /// - /// assert_eq!(entry.key(), &"horseyland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(self, value: V) -> OccupiedEntry<'a, K, V, S, A> - where - K: Hash, - S: BuildHasher, - { - match self { - Entry::Occupied(mut entry) => { - entry.insert(value); - entry - } - Entry::Vacant(entry) => entry.insert_entry(value), - } - } - - /// Ensures a value is in the entry by inserting the default if empty, and returns - /// a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// // nonexistent key - /// map.entry("poneyland").or_insert(3); - /// assert_eq!(map["poneyland"], 3); - /// - /// // existing key - /// *map.entry("poneyland").or_insert(10) *= 2; - /// assert_eq!(map["poneyland"], 6); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_insert(self, default: V) -> &'a mut V - where - K: Hash, - S: BuildHasher, - { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(default), - } - } - - /// Ensures a value is in the entry by inserting the result of the default function if empty, - /// and returns a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// // nonexistent key - /// map.entry("poneyland").or_insert_with(|| 3); - /// assert_eq!(map["poneyland"], 3); - /// - /// // existing key - /// *map.entry("poneyland").or_insert_with(|| 10) *= 2; - /// assert_eq!(map["poneyland"], 6); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_insert_with V>(self, default: F) -> &'a mut V - where - K: Hash, - S: BuildHasher, - { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(default()), - } - } - - /// Ensures a value is in the entry by inserting, if empty, the result of the default function. - /// This method allows for generating key-derived values for insertion by providing the default - /// function a reference to the key that was moved during the `.entry(key)` method call. - /// - /// The reference to the moved key is provided so that cloning or copying the key is - /// unnecessary, unlike with `.or_insert_with(|| ... )`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, usize> = HashMap::new(); - /// - /// // nonexistent key - /// map.entry("poneyland").or_insert_with_key(|key| key.chars().count()); - /// assert_eq!(map["poneyland"], 9); - /// - /// // existing key - /// *map.entry("poneyland").or_insert_with_key(|key| key.chars().count() * 10) *= 2; - /// assert_eq!(map["poneyland"], 18); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_insert_with_key V>(self, default: F) -> &'a mut V - where - K: Hash, - S: BuildHasher, - { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => { - let value = default(entry.key()); - entry.insert(value) - } - } - } - - /// Returns a reference to this entry's key. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.entry("poneyland").or_insert(3); - /// // existing key - /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); - /// // nonexistent key - /// assert_eq!(map.entry("horseland").key(), &"horseland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &K { - match *self { - Entry::Occupied(ref entry) => entry.key(), - Entry::Vacant(ref entry) => entry.key(), - } - } - - /// Provides in-place mutable access to an occupied entry before any - /// potential inserts into the map. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// map.entry("poneyland") - /// .and_modify(|e| { *e += 1 }) - /// .or_insert(42); - /// assert_eq!(map["poneyland"], 42); - /// - /// map.entry("poneyland") - /// .and_modify(|e| { *e += 1 }) - /// .or_insert(42); - /// assert_eq!(map["poneyland"], 43); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn and_modify(self, f: F) -> Self - where - F: FnOnce(&mut V), - { - match self { - Entry::Occupied(mut entry) => { - f(entry.get_mut()); - Entry::Occupied(entry) - } - Entry::Vacant(entry) => Entry::Vacant(entry), - } - } - - /// Provides shared access to the key and owned access to the value of - /// an occupied entry and allows to replace or remove it based on the - /// value of the returned option. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::Entry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// let entry = map - /// .entry("poneyland") - /// .and_replace_entry_with(|_k, _v| panic!()); - /// - /// match entry { - /// Entry::Vacant(e) => { - /// assert_eq!(e.key(), &"poneyland"); - /// } - /// Entry::Occupied(_) => panic!(), - /// } - /// - /// map.insert("poneyland", 42); - /// - /// let entry = map - /// .entry("poneyland") - /// .and_replace_entry_with(|k, v| { - /// assert_eq!(k, &"poneyland"); - /// assert_eq!(v, 42); - /// Some(v + 1) - /// }); - /// - /// match entry { - /// Entry::Occupied(e) => { - /// assert_eq!(e.key(), &"poneyland"); - /// assert_eq!(e.get(), &43); - /// } - /// Entry::Vacant(_) => panic!(), - /// } - /// - /// assert_eq!(map["poneyland"], 43); - /// - /// let entry = map - /// .entry("poneyland") - /// .and_replace_entry_with(|_k, _v| None); - /// - /// match entry { - /// Entry::Vacant(e) => assert_eq!(e.key(), &"poneyland"), - /// Entry::Occupied(_) => panic!(), - /// } - /// - /// assert!(!map.contains_key("poneyland")); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn and_replace_entry_with(self, f: F) -> Self - where - F: FnOnce(&K, V) -> Option, - { - match self { - Entry::Occupied(entry) => entry.replace_entry_with(f), - Entry::Vacant(_) => self, - } - } -} - -impl<'a, K, V: Default, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { - /// Ensures a value is in the entry by inserting the default value if empty, - /// and returns a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, Option> = HashMap::new(); - /// - /// // nonexistent key - /// map.entry("poneyland").or_default(); - /// assert_eq!(map["poneyland"], None); - /// - /// map.insert("horseland", Some(3)); - /// - /// // existing key - /// assert_eq!(map.entry("horseland").or_default(), &mut Some(3)); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_default(self) -> &'a mut V - where - K: Hash, - S: BuildHasher, - { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(Default::default()), - } - } -} - -impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { - /// Gets a reference to the key in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{Entry, HashMap}; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.entry("poneyland").or_insert(12); - /// - /// match map.entry("poneyland") { - /// Entry::Vacant(_) => panic!(), - /// Entry::Occupied(entry) => assert_eq!(entry.key(), &"poneyland"), - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &K { - unsafe { &self.elem.as_ref().0 } - } - - /// Take the ownership of the key and value from the map. - /// Keeps the allocated memory for reuse. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::Entry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// // The map is empty - /// assert!(map.is_empty() && map.capacity() == 0); - /// - /// map.entry("poneyland").or_insert(12); - /// let capacity_before_remove = map.capacity(); - /// - /// if let Entry::Occupied(o) = map.entry("poneyland") { - /// // We delete the entry from the map. - /// assert_eq!(o.remove_entry(), ("poneyland", 12)); - /// } - /// - /// assert_eq!(map.contains_key("poneyland"), false); - /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.table.remove(self.elem) } - } - - /// Gets a reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::Entry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.entry("poneyland").or_insert(12); - /// - /// match map.entry("poneyland") { - /// Entry::Vacant(_) => panic!(), - /// Entry::Occupied(entry) => assert_eq!(entry.get(), &12), - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get(&self) -> &V { - unsafe { &self.elem.as_ref().1 } - } - - /// Gets a mutable reference to the value in the entry. - /// - /// If you need a reference to the `OccupiedEntry` which may outlive the - /// destruction of the `Entry` value, see [`into_mut`]. - /// - /// [`into_mut`]: #method.into_mut - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::Entry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.entry("poneyland").or_insert(12); - /// - /// assert_eq!(map["poneyland"], 12); - /// if let Entry::Occupied(mut o) = map.entry("poneyland") { - /// *o.get_mut() += 10; - /// assert_eq!(*o.get(), 22); - /// - /// // We can use the same Entry multiple times. - /// *o.get_mut() += 2; - /// } - /// - /// assert_eq!(map["poneyland"], 24); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get_mut(&mut self) -> &mut V { - unsafe { &mut self.elem.as_mut().1 } - } - - /// Converts the OccupiedEntry into a mutable reference to the value in the entry - /// with a lifetime bound to the map itself. - /// - /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`]. - /// - /// [`get_mut`]: #method.get_mut - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{Entry, HashMap}; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.entry("poneyland").or_insert(12); - /// - /// assert_eq!(map["poneyland"], 12); - /// - /// let value: &mut u32; - /// match map.entry("poneyland") { - /// Entry::Occupied(entry) => value = entry.into_mut(), - /// Entry::Vacant(_) => panic!(), - /// } - /// *value += 10; - /// - /// assert_eq!(map["poneyland"], 22); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn into_mut(self) -> &'a mut V { - unsafe { &mut self.elem.as_mut().1 } - } - - /// Sets the value of the entry, and returns the entry's old value. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::Entry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.entry("poneyland").or_insert(12); - /// - /// if let Entry::Occupied(mut o) = map.entry("poneyland") { - /// assert_eq!(o.insert(15), 12); - /// } - /// - /// assert_eq!(map["poneyland"], 15); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(&mut self, value: V) -> V { - mem::replace(self.get_mut(), value) - } - - /// Takes the value out of the entry, and returns it. - /// Keeps the allocated memory for reuse. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::Entry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// // The map is empty - /// assert!(map.is_empty() && map.capacity() == 0); - /// - /// map.entry("poneyland").or_insert(12); - /// let capacity_before_remove = map.capacity(); - /// - /// if let Entry::Occupied(o) = map.entry("poneyland") { - /// assert_eq!(o.remove(), 12); - /// } - /// - /// assert_eq!(map.contains_key("poneyland"), false); - /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove(self) -> V { - self.remove_entry().1 - } - - /// Replaces the entry, returning the old key and value. The new key in the hash map will be - /// the key used to create this entry. - /// - /// # Panics - /// - /// Will panic if this OccupiedEntry was created through [`Entry::insert`]. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{Entry, HashMap}; - /// use std::rc::Rc; - /// - /// let mut map: HashMap, u32> = HashMap::new(); - /// let key_one = Rc::new("Stringthing".to_string()); - /// let key_two = Rc::new("Stringthing".to_string()); - /// - /// map.insert(key_one.clone(), 15); - /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); - /// - /// match map.entry(key_two.clone()) { - /// Entry::Occupied(entry) => { - /// let (old_key, old_value): (Rc, u32) = entry.replace_entry(16); - /// assert!(Rc::ptr_eq(&key_one, &old_key) && old_value == 15); - /// } - /// Entry::Vacant(_) => panic!(), - /// } - /// - /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); - /// assert_eq!(map[&"Stringthing".to_owned()], 16); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn replace_entry(self, value: V) -> (K, V) { - let entry = unsafe { self.elem.as_mut() }; - - let old_key = mem::replace(&mut entry.0, self.key.unwrap()); - let old_value = mem::replace(&mut entry.1, value); - - (old_key, old_value) - } - - /// Replaces the key in the hash map with the key used to create this entry. - /// - /// # Panics - /// - /// Will panic if this OccupiedEntry was created through [`Entry::insert`]. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{Entry, HashMap}; - /// use std::rc::Rc; - /// - /// let mut map: HashMap, usize> = HashMap::with_capacity(6); - /// let mut keys_one: Vec> = Vec::with_capacity(6); - /// let mut keys_two: Vec> = Vec::with_capacity(6); - /// - /// for (value, key) in ["a", "b", "c", "d", "e", "f"].into_iter().enumerate() { - /// let rc_key = Rc::new(key.to_owned()); - /// keys_one.push(rc_key.clone()); - /// map.insert(rc_key.clone(), value); - /// keys_two.push(Rc::new(key.to_owned())); - /// } - /// - /// assert!( - /// keys_one.iter().all(|key| Rc::strong_count(key) == 2) - /// && keys_two.iter().all(|key| Rc::strong_count(key) == 1) - /// ); - /// - /// reclaim_memory(&mut map, &keys_two); - /// - /// assert!( - /// keys_one.iter().all(|key| Rc::strong_count(key) == 1) - /// && keys_two.iter().all(|key| Rc::strong_count(key) == 2) - /// ); - /// - /// fn reclaim_memory(map: &mut HashMap, usize>, keys: &[Rc]) { - /// for key in keys { - /// if let Entry::Occupied(entry) = map.entry(key.clone()) { - /// // Replaces the entry's key with our version of it in `keys`. - /// entry.replace_key(); - /// } - /// } - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn replace_key(self) -> K { - let entry = unsafe { self.elem.as_mut() }; - mem::replace(&mut entry.0, self.key.unwrap()) - } - - /// Provides shared access to the key and owned access to the value of - /// the entry and allows to replace or remove it based on the - /// value of the returned option. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::Entry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.insert("poneyland", 42); - /// - /// let entry = match map.entry("poneyland") { - /// Entry::Occupied(e) => { - /// e.replace_entry_with(|k, v| { - /// assert_eq!(k, &"poneyland"); - /// assert_eq!(v, 42); - /// Some(v + 1) - /// }) - /// } - /// Entry::Vacant(_) => panic!(), - /// }; - /// - /// match entry { - /// Entry::Occupied(e) => { - /// assert_eq!(e.key(), &"poneyland"); - /// assert_eq!(e.get(), &43); - /// } - /// Entry::Vacant(_) => panic!(), - /// } - /// - /// assert_eq!(map["poneyland"], 43); - /// - /// let entry = match map.entry("poneyland") { - /// Entry::Occupied(e) => e.replace_entry_with(|_k, _v| None), - /// Entry::Vacant(_) => panic!(), - /// }; - /// - /// match entry { - /// Entry::Vacant(e) => { - /// assert_eq!(e.key(), &"poneyland"); - /// } - /// Entry::Occupied(_) => panic!(), - /// } - /// - /// assert!(!map.contains_key("poneyland")); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn replace_entry_with(self, f: F) -> Entry<'a, K, V, S, A> - where - F: FnOnce(&K, V) -> Option, - { - unsafe { - let mut spare_key = None; - - self.table - .table - .replace_bucket_with(self.elem.clone(), |(key, value)| { - if let Some(new_value) = f(&key, value) { - Some((key, new_value)) - } else { - spare_key = Some(key); - None - } - }); - - if let Some(key) = spare_key { - Entry::Vacant(VacantEntry { - hash: self.hash, - key, - table: self.table, - }) - } else { - Entry::Occupied(self) - } - } - } -} - -impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> { - /// Gets a reference to the key that would be used when inserting a value - /// through the `VacantEntry`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &K { - &self.key - } - - /// Take ownership of the key. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{Entry, HashMap}; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// match map.entry("poneyland") { - /// Entry::Occupied(_) => panic!(), - /// Entry::Vacant(v) => assert_eq!(v.into_key(), "poneyland"), - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn into_key(self) -> K { - self.key - } - - /// Sets the value of the entry with the VacantEntry's key, - /// and returns a mutable reference to it. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::Entry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// if let Entry::Vacant(o) = map.entry("poneyland") { - /// o.insert(37); - /// } - /// assert_eq!(map["poneyland"], 37); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(self, value: V) -> &'a mut V - where - K: Hash, - S: BuildHasher, - { - let table = &mut self.table.table; - let entry = table.insert_entry( - self.hash, - (self.key, value), - make_hasher::(&self.table.hash_builder), - ); - &mut entry.1 - } - - #[cfg_attr(feature = "inline-more", inline)] - pub(crate) fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V, S, A> - where - K: Hash, - S: BuildHasher, - { - let elem = self.table.table.insert( - self.hash, - (self.key, value), - make_hasher::(&self.table.hash_builder), - ); - OccupiedEntry { - hash: self.hash, - key: None, - elem, - table: self.table, - } - } -} - -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { - /// Sets the value of the entry, and returns an OccupiedEntryRef. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap = HashMap::new(); - /// let entry = map.entry_ref("horseyland").insert(37); - /// - /// assert_eq!(entry.key(), "horseyland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(self, value: V) -> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> - where - K: Hash + From<&'b Q>, - S: BuildHasher, - { - match self { - EntryRef::Occupied(mut entry) => { - entry.insert(value); - entry - } - EntryRef::Vacant(entry) => entry.insert_entry(value), - } - } - - /// Ensures a value is in the entry by inserting the default if empty, and returns - /// a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap = HashMap::new(); - /// - /// // nonexistent key - /// map.entry_ref("poneyland").or_insert(3); - /// assert_eq!(map["poneyland"], 3); - /// - /// // existing key - /// *map.entry_ref("poneyland").or_insert(10) *= 2; - /// assert_eq!(map["poneyland"], 6); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_insert(self, default: V) -> &'a mut V - where - K: Hash + From<&'b Q>, - S: BuildHasher, - { - match self { - EntryRef::Occupied(entry) => entry.into_mut(), - EntryRef::Vacant(entry) => entry.insert(default), - } - } - - /// Ensures a value is in the entry by inserting the result of the default function if empty, - /// and returns a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap = HashMap::new(); - /// - /// // nonexistent key - /// map.entry_ref("poneyland").or_insert_with(|| 3); - /// assert_eq!(map["poneyland"], 3); - /// - /// // existing key - /// *map.entry_ref("poneyland").or_insert_with(|| 10) *= 2; - /// assert_eq!(map["poneyland"], 6); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_insert_with V>(self, default: F) -> &'a mut V - where - K: Hash + From<&'b Q>, - S: BuildHasher, - { - match self { - EntryRef::Occupied(entry) => entry.into_mut(), - EntryRef::Vacant(entry) => entry.insert(default()), - } - } - - /// Ensures a value is in the entry by inserting, if empty, the result of the default function. - /// This method allows for generating key-derived values for insertion by providing the default - /// function a reference to the key that was moved during the `.entry_ref(key)` method call. - /// - /// The reference to the moved key is provided so that cloning or copying the key is - /// unnecessary, unlike with `.or_insert_with(|| ... )`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap = HashMap::new(); - /// - /// // nonexistent key - /// map.entry_ref("poneyland").or_insert_with_key(|key| key.chars().count()); - /// assert_eq!(map["poneyland"], 9); - /// - /// // existing key - /// *map.entry_ref("poneyland").or_insert_with_key(|key| key.chars().count() * 10) *= 2; - /// assert_eq!(map["poneyland"], 18); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_insert_with_key V>(self, default: F) -> &'a mut V - where - K: Hash + Borrow + From<&'b Q>, - S: BuildHasher, - { - match self { - EntryRef::Occupied(entry) => entry.into_mut(), - EntryRef::Vacant(entry) => { - let value = default(entry.key.as_ref()); - entry.insert(value) - } - } - } - - /// Returns a reference to this entry's key. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap = HashMap::new(); - /// map.entry_ref("poneyland").or_insert(3); - /// // existing key - /// assert_eq!(map.entry_ref("poneyland").key(), "poneyland"); - /// // nonexistent key - /// assert_eq!(map.entry_ref("horseland").key(), "horseland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &Q - where - K: Borrow, - { - match *self { - EntryRef::Occupied(ref entry) => entry.key(), - EntryRef::Vacant(ref entry) => entry.key(), - } - } - - /// Provides in-place mutable access to an occupied entry before any - /// potential inserts into the map. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap = HashMap::new(); - /// - /// map.entry_ref("poneyland") - /// .and_modify(|e| { *e += 1 }) - /// .or_insert(42); - /// assert_eq!(map["poneyland"], 42); - /// - /// map.entry_ref("poneyland") - /// .and_modify(|e| { *e += 1 }) - /// .or_insert(42); - /// assert_eq!(map["poneyland"], 43); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn and_modify(self, f: F) -> Self - where - F: FnOnce(&mut V), - { - match self { - EntryRef::Occupied(mut entry) => { - f(entry.get_mut()); - EntryRef::Occupied(entry) - } - EntryRef::Vacant(entry) => EntryRef::Vacant(entry), - } - } - - /// Provides shared access to the key and owned access to the value of - /// an occupied entry and allows to replace or remove it based on the - /// value of the returned option. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::EntryRef; - /// - /// let mut map: HashMap = HashMap::new(); - /// - /// let entry = map - /// .entry_ref("poneyland") - /// .and_replace_entry_with(|_k, _v| panic!()); - /// - /// match entry { - /// EntryRef::Vacant(e) => { - /// assert_eq!(e.key(), "poneyland"); - /// } - /// EntryRef::Occupied(_) => panic!(), - /// } - /// - /// map.insert("poneyland".to_string(), 42); - /// - /// let entry = map - /// .entry_ref("poneyland") - /// .and_replace_entry_with(|k, v| { - /// assert_eq!(k, "poneyland"); - /// assert_eq!(v, 42); - /// Some(v + 1) - /// }); - /// - /// match entry { - /// EntryRef::Occupied(e) => { - /// assert_eq!(e.key(), "poneyland"); - /// assert_eq!(e.get(), &43); - /// } - /// EntryRef::Vacant(_) => panic!(), - /// } - /// - /// assert_eq!(map["poneyland"], 43); - /// - /// let entry = map - /// .entry_ref("poneyland") - /// .and_replace_entry_with(|_k, _v| None); - /// - /// match entry { - /// EntryRef::Vacant(e) => assert_eq!(e.key(), "poneyland"), - /// EntryRef::Occupied(_) => panic!(), - /// } - /// - /// assert!(!map.contains_key("poneyland")); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn and_replace_entry_with(self, f: F) -> Self - where - F: FnOnce(&Q, V) -> Option, - K: Borrow, - { - match self { - EntryRef::Occupied(entry) => entry.replace_entry_with(f), - EntryRef::Vacant(_) => self, - } - } -} - -impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { - /// Ensures a value is in the entry by inserting the default value if empty, - /// and returns a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap> = HashMap::new(); - /// - /// // nonexistent key - /// map.entry_ref("poneyland").or_default(); - /// assert_eq!(map["poneyland"], None); - /// - /// map.insert("horseland".to_string(), Some(3)); - /// - /// // existing key - /// assert_eq!(map.entry_ref("horseland").or_default(), &mut Some(3)); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_default(self) -> &'a mut V - where - K: Hash + From<&'b Q>, - S: BuildHasher, - { - match self { - EntryRef::Occupied(entry) => entry.into_mut(), - EntryRef::Vacant(entry) => entry.insert(Default::default()), - } - } -} - -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> { - /// Gets a reference to the key in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{EntryRef, HashMap}; - /// - /// let mut map: HashMap = HashMap::new(); - /// map.entry_ref("poneyland").or_insert(12); - /// - /// match map.entry_ref("poneyland") { - /// EntryRef::Vacant(_) => panic!(), - /// EntryRef::Occupied(entry) => assert_eq!(entry.key(), "poneyland"), - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &Q - where - K: Borrow, - { - unsafe { &self.elem.as_ref().0 }.borrow() - } - - /// Take the ownership of the key and value from the map. - /// Keeps the allocated memory for reuse. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::EntryRef; - /// - /// let mut map: HashMap = HashMap::new(); - /// // The map is empty - /// assert!(map.is_empty() && map.capacity() == 0); - /// - /// map.entry_ref("poneyland").or_insert(12); - /// let capacity_before_remove = map.capacity(); - /// - /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") { - /// // We delete the entry from the map. - /// assert_eq!(o.remove_entry(), ("poneyland".to_owned(), 12)); - /// } - /// - /// assert_eq!(map.contains_key("poneyland"), false); - /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.table.remove(self.elem) } - } - - /// Gets a reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::EntryRef; - /// - /// let mut map: HashMap = HashMap::new(); - /// map.entry_ref("poneyland").or_insert(12); - /// - /// match map.entry_ref("poneyland") { - /// EntryRef::Vacant(_) => panic!(), - /// EntryRef::Occupied(entry) => assert_eq!(entry.get(), &12), - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get(&self) -> &V { - unsafe { &self.elem.as_ref().1 } - } - - /// Gets a mutable reference to the value in the entry. - /// - /// If you need a reference to the `OccupiedEntryRef` which may outlive the - /// destruction of the `EntryRef` value, see [`into_mut`]. - /// - /// [`into_mut`]: #method.into_mut - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::EntryRef; - /// - /// let mut map: HashMap = HashMap::new(); - /// map.entry_ref("poneyland").or_insert(12); - /// - /// assert_eq!(map["poneyland"], 12); - /// if let EntryRef::Occupied(mut o) = map.entry_ref("poneyland") { - /// *o.get_mut() += 10; - /// assert_eq!(*o.get(), 22); - /// - /// // We can use the same Entry multiple times. - /// *o.get_mut() += 2; - /// } - /// - /// assert_eq!(map["poneyland"], 24); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get_mut(&mut self) -> &mut V { - unsafe { &mut self.elem.as_mut().1 } - } - - /// Converts the OccupiedEntryRef into a mutable reference to the value in the entry - /// with a lifetime bound to the map itself. - /// - /// If you need multiple references to the `OccupiedEntryRef`, see [`get_mut`]. - /// - /// [`get_mut`]: #method.get_mut - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{EntryRef, HashMap}; - /// - /// let mut map: HashMap = HashMap::new(); - /// map.entry_ref("poneyland").or_insert(12); - /// - /// let value: &mut u32; - /// match map.entry_ref("poneyland") { - /// EntryRef::Occupied(entry) => value = entry.into_mut(), - /// EntryRef::Vacant(_) => panic!(), - /// } - /// *value += 10; - /// - /// assert_eq!(map["poneyland"], 22); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn into_mut(self) -> &'a mut V { - unsafe { &mut self.elem.as_mut().1 } - } - - /// Sets the value of the entry, and returns the entry's old value. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::EntryRef; - /// - /// let mut map: HashMap = HashMap::new(); - /// map.entry_ref("poneyland").or_insert(12); - /// - /// if let EntryRef::Occupied(mut o) = map.entry_ref("poneyland") { - /// assert_eq!(o.insert(15), 12); - /// } - /// - /// assert_eq!(map["poneyland"], 15); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(&mut self, value: V) -> V { - mem::replace(self.get_mut(), value) - } - - /// Takes the value out of the entry, and returns it. - /// Keeps the allocated memory for reuse. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::EntryRef; - /// - /// let mut map: HashMap = HashMap::new(); - /// // The map is empty - /// assert!(map.is_empty() && map.capacity() == 0); - /// - /// map.entry_ref("poneyland").or_insert(12); - /// let capacity_before_remove = map.capacity(); - /// - /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") { - /// assert_eq!(o.remove(), 12); - /// } - /// - /// assert_eq!(map.contains_key("poneyland"), false); - /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove(self) -> V { - self.remove_entry().1 - } - - /// Replaces the entry, returning the old key and value. The new key in the hash map will be - /// the key used to create this entry. - /// - /// # Panics - /// - /// Will panic if this OccupiedEntry was created through [`EntryRef::insert`]. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{EntryRef, HashMap}; - /// use std::rc::Rc; - /// - /// let mut map: HashMap, u32> = HashMap::new(); - /// let key: Rc = Rc::from("Stringthing"); - /// - /// map.insert(key.clone(), 15); - /// assert_eq!(Rc::strong_count(&key), 2); - /// - /// match map.entry_ref("Stringthing") { - /// EntryRef::Occupied(entry) => { - /// let (old_key, old_value): (Rc, u32) = entry.replace_entry(16); - /// assert!(Rc::ptr_eq(&key, &old_key) && old_value == 15); - /// } - /// EntryRef::Vacant(_) => panic!(), - /// } - /// - /// assert_eq!(Rc::strong_count(&key), 1); - /// assert_eq!(map["Stringthing"], 16); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn replace_entry(self, value: V) -> (K, V) - where - K: From<&'b Q>, - { - let entry = unsafe { self.elem.as_mut() }; - - let old_key = mem::replace(&mut entry.0, self.key.unwrap().into_owned()); - let old_value = mem::replace(&mut entry.1, value); - - (old_key, old_value) - } - - /// Replaces the key in the hash map with the key used to create this entry. - /// - /// # Panics - /// - /// Will panic if this OccupiedEntry was created through [`Entry::insert`]. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{EntryRef, HashMap}; - /// use std::rc::Rc; - /// - /// let mut map: HashMap, usize> = HashMap::with_capacity(6); - /// let mut keys: Vec> = Vec::with_capacity(6); - /// - /// for (value, key) in ["a", "b", "c", "d", "e", "f"].into_iter().enumerate() { - /// let rc_key: Rc = Rc::from(key); - /// keys.push(rc_key.clone()); - /// map.insert(rc_key.clone(), value); - /// } - /// - /// assert!(keys.iter().all(|key| Rc::strong_count(key) == 2)); - /// - /// // It doesn't matter that we kind of use a vector with the same keys, - /// // because all keys will be newly created from the references - /// reclaim_memory(&mut map, &keys); - /// - /// assert!(keys.iter().all(|key| Rc::strong_count(key) == 1)); - /// - /// fn reclaim_memory(map: &mut HashMap, usize>, keys: &[Rc]) { - /// for key in keys { - /// if let EntryRef::Occupied(entry) = map.entry_ref(key.as_ref()) { - /// /// Replaces the entry's key with our version of it in `keys`. - /// entry.replace_key(); - /// } - /// } - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn replace_key(self) -> K - where - K: From<&'b Q>, - { - let entry = unsafe { self.elem.as_mut() }; - mem::replace(&mut entry.0, self.key.unwrap().into_owned()) - } - - /// Provides shared access to the key and owned access to the value of - /// the entry and allows to replace or remove it based on the - /// value of the returned option. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::EntryRef; - /// - /// let mut map: HashMap = HashMap::new(); - /// map.insert("poneyland".to_string(), 42); - /// - /// let entry = match map.entry_ref("poneyland") { - /// EntryRef::Occupied(e) => { - /// e.replace_entry_with(|k, v| { - /// assert_eq!(k, "poneyland"); - /// assert_eq!(v, 42); - /// Some(v + 1) - /// }) - /// } - /// EntryRef::Vacant(_) => panic!(), - /// }; - /// - /// match entry { - /// EntryRef::Occupied(e) => { - /// assert_eq!(e.key(), "poneyland"); - /// assert_eq!(e.get(), &43); - /// } - /// EntryRef::Vacant(_) => panic!(), - /// } - /// - /// assert_eq!(map["poneyland"], 43); - /// - /// let entry = match map.entry_ref("poneyland") { - /// EntryRef::Occupied(e) => e.replace_entry_with(|_k, _v| None), - /// EntryRef::Vacant(_) => panic!(), - /// }; - /// - /// match entry { - /// EntryRef::Vacant(e) => { - /// assert_eq!(e.key(), "poneyland"); - /// } - /// EntryRef::Occupied(_) => panic!(), - /// } - /// - /// assert!(!map.contains_key("poneyland")); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn replace_entry_with(self, f: F) -> EntryRef<'a, 'b, K, Q, V, S, A> - where - F: FnOnce(&Q, V) -> Option, - K: Borrow, - { - unsafe { - let mut spare_key = None; - - self.table - .table - .replace_bucket_with(self.elem.clone(), |(key, value)| { - if let Some(new_value) = f(key.borrow(), value) { - Some((key, new_value)) - } else { - spare_key = Some(KeyOrRef::Owned(key)); - None - } - }); - - if let Some(key) = spare_key { - EntryRef::Vacant(VacantEntryRef { - hash: self.hash, - key, - table: self.table, - }) - } else { - EntryRef::Occupied(self) - } - } - } -} - -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K, Q, V, S, A> { - /// Gets a reference to the key that would be used when inserting a value - /// through the `VacantEntryRef`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap = HashMap::new(); - /// let key: &str = "poneyland"; - /// assert_eq!(map.entry_ref(key).key(), "poneyland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &Q - where - K: Borrow, - { - self.key.as_ref() - } - - /// Take ownership of the key. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{EntryRef, HashMap}; - /// - /// let mut map: HashMap = HashMap::new(); - /// let key: &str = "poneyland"; - /// - /// match map.entry_ref(key) { - /// EntryRef::Occupied(_) => panic!(), - /// EntryRef::Vacant(v) => assert_eq!(v.into_key(), "poneyland".to_owned()), - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn into_key(self) -> K - where - K: From<&'b Q>, - { - self.key.into_owned() - } - - /// Sets the value of the entry with the VacantEntryRef's key, - /// and returns a mutable reference to it. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::EntryRef; - /// - /// let mut map: HashMap = HashMap::new(); - /// let key: &str = "poneyland"; - /// - /// if let EntryRef::Vacant(o) = map.entry_ref(key) { - /// o.insert(37); - /// } - /// assert_eq!(map["poneyland"], 37); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(self, value: V) -> &'a mut V - where - K: Hash + From<&'b Q>, - S: BuildHasher, - { - let table = &mut self.table.table; - let entry = table.insert_entry( - self.hash, - (self.key.into_owned(), value), - make_hasher::(&self.table.hash_builder), - ); - &mut entry.1 - } - - #[cfg_attr(feature = "inline-more", inline)] - fn insert_entry(self, value: V) -> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> - where - K: Hash + From<&'b Q>, - S: BuildHasher, - { - let elem = self.table.table.insert( - self.hash, - (self.key.into_owned(), value), - make_hasher::(&self.table.hash_builder), - ); - OccupiedEntryRef { - hash: self.hash, - key: None, - elem, - table: self.table, - } - } -} - -impl FromIterator<(K, V)> for HashMap -where - K: Eq + Hash, - S: BuildHasher + Default, - A: Default + Allocator + Clone, -{ - #[cfg_attr(feature = "inline-more", inline)] - fn from_iter>(iter: T) -> Self { - let iter = iter.into_iter(); - let mut map = - Self::with_capacity_and_hasher_in(iter.size_hint().0, S::default(), A::default()); - iter.for_each(|(k, v)| { - map.insert(k, v); - }); - map - } -} - -/// Inserts all new key-values from the iterator and replaces values with existing -/// keys with new values returned from the iterator. -impl Extend<(K, V)> for HashMap -where - K: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - /// Inserts all new key-values from the iterator to existing `HashMap`. - /// Replace values with existing keys with new values returned from the iterator. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert(1, 100); - /// - /// let some_iter = [(1, 1), (2, 2)].into_iter(); - /// map.extend(some_iter); - /// // Replace values with existing keys with new values returned from the iterator. - /// // So that the map.get(&1) doesn't return Some(&100). - /// assert_eq!(map.get(&1), Some(&1)); - /// - /// let some_vec: Vec<_> = vec![(3, 3), (4, 4)]; - /// map.extend(some_vec); - /// - /// let some_arr = [(5, 5), (6, 6)]; - /// map.extend(some_arr); - /// let old_map_len = map.len(); - /// - /// // You can also extend from another HashMap - /// let mut new_map = HashMap::new(); - /// new_map.extend(map); - /// assert_eq!(new_map.len(), old_map_len); - /// - /// let mut vec: Vec<_> = new_map.into_iter().collect(); - /// // The `IntoIter` iterator produces items in arbitrary order, so the - /// // items must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - fn extend>(&mut self, iter: T) { - // Keys may be already present or show multiple times in the iterator. - // Reserve the entire hint lower bound if the map is empty. - // Otherwise reserve half the hint (rounded up), so the map - // will only resize twice in the worst case. - let iter = iter.into_iter(); - let reserve = if self.is_empty() { - iter.size_hint().0 - } else { - (iter.size_hint().0 + 1) / 2 - }; - self.reserve(reserve); - iter.for_each(move |(k, v)| { - self.insert(k, v); - }); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_one(&mut self, (k, v): (K, V)) { - self.insert(k, v); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_reserve(&mut self, additional: usize) { - // Keys may be already present or show multiple times in the iterator. - // Reserve the entire hint lower bound if the map is empty. - // Otherwise reserve half the hint (rounded up), so the map - // will only resize twice in the worst case. - let reserve = if self.is_empty() { - additional - } else { - (additional + 1) / 2 - }; - self.reserve(reserve); - } -} - -/// Inserts all new key-values from the iterator and replaces values with existing -/// keys with new values returned from the iterator. -impl<'a, K, V, S, A> Extend<(&'a K, &'a V)> for HashMap -where - K: Eq + Hash + Copy, - V: Copy, - S: BuildHasher, - A: Allocator + Clone, -{ - /// Inserts all new key-values from the iterator to existing `HashMap`. - /// Replace values with existing keys with new values returned from the iterator. - /// The keys and values must implement [`Copy`] trait. - /// - /// [`Copy`]: https://doc.rust-lang.org/core/marker/trait.Copy.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert(1, 100); - /// - /// let arr = [(1, 1), (2, 2)]; - /// let some_iter = arr.iter().map(|&(k, v)| (k, v)); - /// map.extend(some_iter); - /// // Replace values with existing keys with new values returned from the iterator. - /// // So that the map.get(&1) doesn't return Some(&100). - /// assert_eq!(map.get(&1), Some(&1)); - /// - /// let some_vec: Vec<_> = vec![(3, 3), (4, 4)]; - /// map.extend(some_vec.iter().map(|&(k, v)| (k, v))); - /// - /// let some_arr = [(5, 5), (6, 6)]; - /// map.extend(some_arr.iter().map(|&(k, v)| (k, v))); - /// - /// // You can also extend from another HashMap - /// let mut new_map = HashMap::new(); - /// new_map.extend(&map); - /// assert_eq!(new_map, map); - /// - /// let mut vec: Vec<_> = new_map.into_iter().collect(); - /// // The `IntoIter` iterator produces items in arbitrary order, so the - /// // items must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - fn extend>(&mut self, iter: T) { - self.extend(iter.into_iter().map(|(&key, &value)| (key, value))); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_one(&mut self, (k, v): (&'a K, &'a V)) { - self.insert(*k, *v); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_reserve(&mut self, additional: usize) { - Extend::<(K, V)>::extend_reserve(self, additional); - } -} - -/// Inserts all new key-values from the iterator and replaces values with existing -/// keys with new values returned from the iterator. -impl<'a, K, V, S, A> Extend<&'a (K, V)> for HashMap -where - K: Eq + Hash + Copy, - V: Copy, - S: BuildHasher, - A: Allocator + Clone, -{ - /// Inserts all new key-values from the iterator to existing `HashMap`. - /// Replace values with existing keys with new values returned from the iterator. - /// The keys and values must implement [`Copy`] trait. - /// - /// [`Copy`]: https://doc.rust-lang.org/core/marker/trait.Copy.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::HashMap; - /// - /// let mut map = HashMap::new(); - /// map.insert(1, 100); - /// - /// let arr = [(1, 1), (2, 2)]; - /// let some_iter = arr.iter(); - /// map.extend(some_iter); - /// // Replace values with existing keys with new values returned from the iterator. - /// // So that the map.get(&1) doesn't return Some(&100). - /// assert_eq!(map.get(&1), Some(&1)); - /// - /// let some_vec: Vec<_> = vec![(3, 3), (4, 4)]; - /// map.extend(&some_vec); - /// - /// let some_arr = [(5, 5), (6, 6)]; - /// map.extend(&some_arr); - /// - /// let mut vec: Vec<_> = map.into_iter().collect(); - /// // The `IntoIter` iterator produces items in arbitrary order, so the - /// // items must be sorted to test them against a sorted array. - /// vec.sort_unstable(); - /// assert_eq!(vec, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - fn extend>(&mut self, iter: T) { - self.extend(iter.into_iter().map(|&(key, value)| (key, value))); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_one(&mut self, &(k, v): &'a (K, V)) { - self.insert(k, v); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_reserve(&mut self, additional: usize) { - Extend::<(K, V)>::extend_reserve(self, additional); - } -} - -#[allow(dead_code)] -fn assert_covariance() { - fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> { - v - } - fn map_val<'new>(v: HashMap) -> HashMap { - v - } - fn iter_key<'a, 'new>(v: Iter<'a, &'static str, u8>) -> Iter<'a, &'new str, u8> { - v - } - fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> { - v - } - fn into_iter_key<'new, A: Allocator + Clone>( - v: IntoIter<&'static str, u8, A>, - ) -> IntoIter<&'new str, u8, A> { - v - } - fn into_iter_val<'new, A: Allocator + Clone>( - v: IntoIter, - ) -> IntoIter { - v - } - fn keys_key<'a, 'new>(v: Keys<'a, &'static str, u8>) -> Keys<'a, &'new str, u8> { - v - } - fn keys_val<'a, 'new>(v: Keys<'a, u8, &'static str>) -> Keys<'a, u8, &'new str> { - v - } - fn values_key<'a, 'new>(v: Values<'a, &'static str, u8>) -> Values<'a, &'new str, u8> { - v - } - fn values_val<'a, 'new>(v: Values<'a, u8, &'static str>) -> Values<'a, u8, &'new str> { - v - } - fn drain<'new>( - d: Drain<'static, &'static str, &'static str>, - ) -> Drain<'new, &'new str, &'new str> { - d - } -} - -#[cfg(test)] -mod test_map { - use super::DefaultHashBuilder; - use super::Entry::{Occupied, Vacant}; - use super::EntryRef; - use super::{HashMap, RawEntryMut}; - use rand::{rngs::SmallRng, Rng, SeedableRng}; - use std::borrow::ToOwned; - use std::cell::RefCell; - use std::usize; - use std::vec::Vec; - - #[test] - fn test_zero_capacities() { - type HM = HashMap; - - let m = HM::new(); - assert_eq!(m.capacity(), 0); - - let m = HM::default(); - assert_eq!(m.capacity(), 0); - - let m = HM::with_hasher(DefaultHashBuilder::default()); - assert_eq!(m.capacity(), 0); - - let m = HM::with_capacity(0); - assert_eq!(m.capacity(), 0); - - let m = HM::with_capacity_and_hasher(0, DefaultHashBuilder::default()); - assert_eq!(m.capacity(), 0); - - let mut m = HM::new(); - m.insert(1, 1); - m.insert(2, 2); - m.remove(&1); - m.remove(&2); - m.shrink_to_fit(); - assert_eq!(m.capacity(), 0); - - let mut m = HM::new(); - m.reserve(0); - assert_eq!(m.capacity(), 0); - } - - #[test] - fn test_create_capacity_zero() { - let mut m = HashMap::with_capacity(0); - - assert!(m.insert(1, 1).is_none()); - - assert!(m.contains_key(&1)); - assert!(!m.contains_key(&0)); - } - - #[test] - fn test_insert() { - let mut m = HashMap::new(); - assert_eq!(m.len(), 0); - assert!(m.insert(1, 2).is_none()); - assert_eq!(m.len(), 1); - assert!(m.insert(2, 4).is_none()); - assert_eq!(m.len(), 2); - assert_eq!(*m.get(&1).unwrap(), 2); - assert_eq!(*m.get(&2).unwrap(), 4); - } - - #[test] - fn test_clone() { - let mut m = HashMap::new(); - assert_eq!(m.len(), 0); - assert!(m.insert(1, 2).is_none()); - assert_eq!(m.len(), 1); - assert!(m.insert(2, 4).is_none()); - assert_eq!(m.len(), 2); - #[allow(clippy::redundant_clone)] - let m2 = m.clone(); - assert_eq!(*m2.get(&1).unwrap(), 2); - assert_eq!(*m2.get(&2).unwrap(), 4); - assert_eq!(m2.len(), 2); - } - - #[test] - fn test_clone_from() { - let mut m = HashMap::new(); - let mut m2 = HashMap::new(); - assert_eq!(m.len(), 0); - assert!(m.insert(1, 2).is_none()); - assert_eq!(m.len(), 1); - assert!(m.insert(2, 4).is_none()); - assert_eq!(m.len(), 2); - m2.clone_from(&m); - assert_eq!(*m2.get(&1).unwrap(), 2); - assert_eq!(*m2.get(&2).unwrap(), 4); - assert_eq!(m2.len(), 2); - } - - thread_local! { static DROP_VECTOR: RefCell> = RefCell::new(Vec::new()) } - - #[derive(Hash, PartialEq, Eq)] - struct Droppable { - k: usize, - } - - impl Droppable { - fn new(k: usize) -> Droppable { - DROP_VECTOR.with(|slot| { - slot.borrow_mut()[k] += 1; - }); - - Droppable { k } - } - } - - impl Drop for Droppable { - fn drop(&mut self) { - DROP_VECTOR.with(|slot| { - slot.borrow_mut()[self.k] -= 1; - }); - } - } - - impl Clone for Droppable { - fn clone(&self) -> Self { - Droppable::new(self.k) - } - } - - #[test] - fn test_drops() { - DROP_VECTOR.with(|slot| { - *slot.borrow_mut() = vec![0; 200]; - }); - - { - let mut m = HashMap::new(); - - DROP_VECTOR.with(|v| { - for i in 0..200 { - assert_eq!(v.borrow()[i], 0); - } - }); - - for i in 0..100 { - let d1 = Droppable::new(i); - let d2 = Droppable::new(i + 100); - m.insert(d1, d2); - } - - DROP_VECTOR.with(|v| { - for i in 0..200 { - assert_eq!(v.borrow()[i], 1); - } - }); - - for i in 0..50 { - let k = Droppable::new(i); - let v = m.remove(&k); - - assert!(v.is_some()); - - DROP_VECTOR.with(|v| { - assert_eq!(v.borrow()[i], 1); - assert_eq!(v.borrow()[i + 100], 1); - }); - } - - DROP_VECTOR.with(|v| { - for i in 0..50 { - assert_eq!(v.borrow()[i], 0); - assert_eq!(v.borrow()[i + 100], 0); - } - - for i in 50..100 { - assert_eq!(v.borrow()[i], 1); - assert_eq!(v.borrow()[i + 100], 1); - } - }); - } - - DROP_VECTOR.with(|v| { - for i in 0..200 { - assert_eq!(v.borrow()[i], 0); - } - }); - } - - #[test] - fn test_into_iter_drops() { - DROP_VECTOR.with(|v| { - *v.borrow_mut() = vec![0; 200]; - }); - - let hm = { - let mut hm = HashMap::new(); - - DROP_VECTOR.with(|v| { - for i in 0..200 { - assert_eq!(v.borrow()[i], 0); - } - }); - - for i in 0..100 { - let d1 = Droppable::new(i); - let d2 = Droppable::new(i + 100); - hm.insert(d1, d2); - } - - DROP_VECTOR.with(|v| { - for i in 0..200 { - assert_eq!(v.borrow()[i], 1); - } - }); - - hm - }; - - // By the way, ensure that cloning doesn't screw up the dropping. - drop(hm.clone()); - - { - let mut half = hm.into_iter().take(50); - - DROP_VECTOR.with(|v| { - for i in 0..200 { - assert_eq!(v.borrow()[i], 1); - } - }); - - #[allow(clippy::let_underscore_drop)] // kind-of a false positive - for _ in half.by_ref() {} - - DROP_VECTOR.with(|v| { - let nk = (0..100).filter(|&i| v.borrow()[i] == 1).count(); - - let nv = (0..100).filter(|&i| v.borrow()[i + 100] == 1).count(); - - assert_eq!(nk, 50); - assert_eq!(nv, 50); - }); - }; - - DROP_VECTOR.with(|v| { - for i in 0..200 { - assert_eq!(v.borrow()[i], 0); - } - }); - } - - #[test] - fn test_empty_remove() { - let mut m: HashMap = HashMap::new(); - assert_eq!(m.remove(&0), None); - } - - #[test] - fn test_empty_entry() { - let mut m: HashMap = HashMap::new(); - match m.entry(0) { - Occupied(_) => panic!(), - Vacant(_) => {} - } - assert!(*m.entry(0).or_insert(true)); - assert_eq!(m.len(), 1); - } - - #[test] - fn test_empty_entry_ref() { - let mut m: HashMap = HashMap::new(); - match m.entry_ref("poneyland") { - EntryRef::Occupied(_) => panic!(), - EntryRef::Vacant(_) => {} - } - assert!(*m.entry_ref("poneyland").or_insert(true)); - assert_eq!(m.len(), 1); - } - - #[test] - fn test_empty_iter() { - let mut m: HashMap = HashMap::new(); - assert_eq!(m.drain().next(), None); - assert_eq!(m.keys().next(), None); - assert_eq!(m.values().next(), None); - assert_eq!(m.values_mut().next(), None); - assert_eq!(m.iter().next(), None); - assert_eq!(m.iter_mut().next(), None); - assert_eq!(m.len(), 0); - assert!(m.is_empty()); - assert_eq!(m.into_iter().next(), None); - } - - #[test] - #[cfg_attr(miri, ignore)] // FIXME: takes too long - fn test_lots_of_insertions() { - let mut m = HashMap::new(); - - // Try this a few times to make sure we never screw up the hashmap's - // internal state. - for _ in 0..10 { - assert!(m.is_empty()); - - for i in 1..1001 { - assert!(m.insert(i, i).is_none()); - - for j in 1..=i { - let r = m.get(&j); - assert_eq!(r, Some(&j)); - } - - for j in i + 1..1001 { - let r = m.get(&j); - assert_eq!(r, None); - } - } - - for i in 1001..2001 { - assert!(!m.contains_key(&i)); - } - - // remove forwards - for i in 1..1001 { - assert!(m.remove(&i).is_some()); - - for j in 1..=i { - assert!(!m.contains_key(&j)); - } - - for j in i + 1..1001 { - assert!(m.contains_key(&j)); - } - } - - for i in 1..1001 { - assert!(!m.contains_key(&i)); - } - - for i in 1..1001 { - assert!(m.insert(i, i).is_none()); - } - - // remove backwards - for i in (1..1001).rev() { - assert!(m.remove(&i).is_some()); - - for j in i..1001 { - assert!(!m.contains_key(&j)); - } - - for j in 1..i { - assert!(m.contains_key(&j)); - } - } - } - } - - #[test] - fn test_find_mut() { - let mut m = HashMap::new(); - assert!(m.insert(1, 12).is_none()); - assert!(m.insert(2, 8).is_none()); - assert!(m.insert(5, 14).is_none()); - let new = 100; - match m.get_mut(&5) { - None => panic!(), - Some(x) => *x = new, - } - assert_eq!(m.get(&5), Some(&new)); - } - - #[test] - fn test_insert_overwrite() { - let mut m = HashMap::new(); - assert!(m.insert(1, 2).is_none()); - assert_eq!(*m.get(&1).unwrap(), 2); - assert!(m.insert(1, 3).is_some()); - assert_eq!(*m.get(&1).unwrap(), 3); - } - - #[test] - fn test_insert_conflicts() { - let mut m = HashMap::with_capacity(4); - assert!(m.insert(1, 2).is_none()); - assert!(m.insert(5, 3).is_none()); - assert!(m.insert(9, 4).is_none()); - assert_eq!(*m.get(&9).unwrap(), 4); - assert_eq!(*m.get(&5).unwrap(), 3); - assert_eq!(*m.get(&1).unwrap(), 2); - } - - #[test] - fn test_conflict_remove() { - let mut m = HashMap::with_capacity(4); - assert!(m.insert(1, 2).is_none()); - assert_eq!(*m.get(&1).unwrap(), 2); - assert!(m.insert(5, 3).is_none()); - assert_eq!(*m.get(&1).unwrap(), 2); - assert_eq!(*m.get(&5).unwrap(), 3); - assert!(m.insert(9, 4).is_none()); - assert_eq!(*m.get(&1).unwrap(), 2); - assert_eq!(*m.get(&5).unwrap(), 3); - assert_eq!(*m.get(&9).unwrap(), 4); - assert!(m.remove(&1).is_some()); - assert_eq!(*m.get(&9).unwrap(), 4); - assert_eq!(*m.get(&5).unwrap(), 3); - } - - #[test] - fn test_insert_unique_unchecked() { - let mut map = HashMap::new(); - let (k1, v1) = map.insert_unique_unchecked(10, 11); - assert_eq!((&10, &mut 11), (k1, v1)); - let (k2, v2) = map.insert_unique_unchecked(20, 21); - assert_eq!((&20, &mut 21), (k2, v2)); - assert_eq!(Some(&11), map.get(&10)); - assert_eq!(Some(&21), map.get(&20)); - assert_eq!(None, map.get(&30)); - } - - #[test] - fn test_is_empty() { - let mut m = HashMap::with_capacity(4); - assert!(m.insert(1, 2).is_none()); - assert!(!m.is_empty()); - assert!(m.remove(&1).is_some()); - assert!(m.is_empty()); - } - - #[test] - fn test_remove() { - let mut m = HashMap::new(); - m.insert(1, 2); - assert_eq!(m.remove(&1), Some(2)); - assert_eq!(m.remove(&1), None); - } - - #[test] - fn test_remove_entry() { - let mut m = HashMap::new(); - m.insert(1, 2); - assert_eq!(m.remove_entry(&1), Some((1, 2))); - assert_eq!(m.remove(&1), None); - } - - #[test] - fn test_iterate() { - let mut m = HashMap::with_capacity(4); - for i in 0..32 { - assert!(m.insert(i, i * 2).is_none()); - } - assert_eq!(m.len(), 32); - - let mut observed: u32 = 0; - - for (k, v) in &m { - assert_eq!(*v, *k * 2); - observed |= 1 << *k; - } - assert_eq!(observed, 0xFFFF_FFFF); - } - - #[test] - fn test_keys() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: HashMap<_, _> = vec.into_iter().collect(); - let keys: Vec<_> = map.keys().copied().collect(); - assert_eq!(keys.len(), 3); - assert!(keys.contains(&1)); - assert!(keys.contains(&2)); - assert!(keys.contains(&3)); - } - - #[test] - fn test_values() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: HashMap<_, _> = vec.into_iter().collect(); - let values: Vec<_> = map.values().copied().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&'a')); - assert!(values.contains(&'b')); - assert!(values.contains(&'c')); - } - - #[test] - fn test_values_mut() { - let vec = vec![(1, 1), (2, 2), (3, 3)]; - let mut map: HashMap<_, _> = vec.into_iter().collect(); - for value in map.values_mut() { - *value *= 2; - } - let values: Vec<_> = map.values().copied().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&2)); - assert!(values.contains(&4)); - assert!(values.contains(&6)); - } - - #[test] - fn test_into_keys() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: HashMap<_, _> = vec.into_iter().collect(); - let keys: Vec<_> = map.into_keys().collect(); - - assert_eq!(keys.len(), 3); - assert!(keys.contains(&1)); - assert!(keys.contains(&2)); - assert!(keys.contains(&3)); - } - - #[test] - fn test_into_values() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: HashMap<_, _> = vec.into_iter().collect(); - let values: Vec<_> = map.into_values().collect(); - - assert_eq!(values.len(), 3); - assert!(values.contains(&'a')); - assert!(values.contains(&'b')); - assert!(values.contains(&'c')); - } - - #[test] - fn test_find() { - let mut m = HashMap::new(); - assert!(m.get(&1).is_none()); - m.insert(1, 2); - match m.get(&1) { - None => panic!(), - Some(v) => assert_eq!(*v, 2), - } - } - - #[test] - fn test_eq() { - let mut m1 = HashMap::new(); - m1.insert(1, 2); - m1.insert(2, 3); - m1.insert(3, 4); - - let mut m2 = HashMap::new(); - m2.insert(1, 2); - m2.insert(2, 3); - - assert!(m1 != m2); - - m2.insert(3, 4); - - assert_eq!(m1, m2); - } - - #[test] - fn test_show() { - let mut map = HashMap::new(); - let empty: HashMap = HashMap::new(); - - map.insert(1, 2); - map.insert(3, 4); - - let map_str = format!("{:?}", map); - - assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}"); - assert_eq!(format!("{:?}", empty), "{}"); - } - - #[test] - fn test_expand() { - let mut m = HashMap::new(); - - assert_eq!(m.len(), 0); - assert!(m.is_empty()); - - let mut i = 0; - let old_raw_cap = m.raw_capacity(); - while old_raw_cap == m.raw_capacity() { - m.insert(i, i); - i += 1; - } - - assert_eq!(m.len(), i); - assert!(!m.is_empty()); - } - - #[test] - fn test_behavior_resize_policy() { - let mut m = HashMap::new(); - - assert_eq!(m.len(), 0); - assert_eq!(m.raw_capacity(), 1); - assert!(m.is_empty()); - - m.insert(0, 0); - m.remove(&0); - assert!(m.is_empty()); - let initial_raw_cap = m.raw_capacity(); - m.reserve(initial_raw_cap); - let raw_cap = m.raw_capacity(); - - assert_eq!(raw_cap, initial_raw_cap * 2); - - let mut i = 0; - for _ in 0..raw_cap * 3 / 4 { - m.insert(i, i); - i += 1; - } - // three quarters full - - assert_eq!(m.len(), i); - assert_eq!(m.raw_capacity(), raw_cap); - - for _ in 0..raw_cap / 4 { - m.insert(i, i); - i += 1; - } - // half full - - let new_raw_cap = m.raw_capacity(); - assert_eq!(new_raw_cap, raw_cap * 2); - - for _ in 0..raw_cap / 2 - 1 { - i -= 1; - m.remove(&i); - assert_eq!(m.raw_capacity(), new_raw_cap); - } - // A little more than one quarter full. - m.shrink_to_fit(); - assert_eq!(m.raw_capacity(), raw_cap); - // again, a little more than half full - for _ in 0..raw_cap / 2 { - i -= 1; - m.remove(&i); - } - m.shrink_to_fit(); - - assert_eq!(m.len(), i); - assert!(!m.is_empty()); - assert_eq!(m.raw_capacity(), initial_raw_cap); - } - - #[test] - fn test_reserve_shrink_to_fit() { - let mut m = HashMap::new(); - m.insert(0, 0); - m.remove(&0); - assert!(m.capacity() >= m.len()); - for i in 0..128 { - m.insert(i, i); - } - m.reserve(256); - - let usable_cap = m.capacity(); - for i in 128..(128 + 256) { - m.insert(i, i); - assert_eq!(m.capacity(), usable_cap); - } - - for i in 100..(128 + 256) { - assert_eq!(m.remove(&i), Some(i)); - } - m.shrink_to_fit(); - - assert_eq!(m.len(), 100); - assert!(!m.is_empty()); - assert!(m.capacity() >= m.len()); - - for i in 0..100 { - assert_eq!(m.remove(&i), Some(i)); - } - m.shrink_to_fit(); - m.insert(0, 0); - - assert_eq!(m.len(), 1); - assert!(m.capacity() >= m.len()); - assert_eq!(m.remove(&0), Some(0)); - } - - #[test] - fn test_from_iter() { - let xs = [(1, 1), (2, 2), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; - - let map: HashMap<_, _> = xs.iter().copied().collect(); - - for &(k, v) in &xs { - assert_eq!(map.get(&k), Some(&v)); - } - - assert_eq!(map.iter().len(), xs.len() - 1); - } - - #[test] - fn test_size_hint() { - let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; - - let map: HashMap<_, _> = xs.iter().copied().collect(); - - let mut iter = map.iter(); - - for _ in iter.by_ref().take(3) {} - - assert_eq!(iter.size_hint(), (3, Some(3))); - } - - #[test] - fn test_iter_len() { - let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; - - let map: HashMap<_, _> = xs.iter().copied().collect(); - - let mut iter = map.iter(); - - for _ in iter.by_ref().take(3) {} - - assert_eq!(iter.len(), 3); - } - - #[test] - fn test_mut_size_hint() { - let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; - - let mut map: HashMap<_, _> = xs.iter().copied().collect(); - - let mut iter = map.iter_mut(); - - for _ in iter.by_ref().take(3) {} - - assert_eq!(iter.size_hint(), (3, Some(3))); - } - - #[test] - fn test_iter_mut_len() { - let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; - - let mut map: HashMap<_, _> = xs.iter().copied().collect(); - - let mut iter = map.iter_mut(); - - for _ in iter.by_ref().take(3) {} - - assert_eq!(iter.len(), 3); - } - - #[test] - fn test_index() { - let mut map = HashMap::new(); - - map.insert(1, 2); - map.insert(2, 1); - map.insert(3, 4); - - assert_eq!(map[&2], 1); - } - - #[test] - #[should_panic] - fn test_index_nonexistent() { - let mut map = HashMap::new(); - - map.insert(1, 2); - map.insert(2, 1); - map.insert(3, 4); - - #[allow(clippy::no_effect)] // false positive lint - map[&4]; - } - - #[test] - fn test_entry() { - let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)]; - - let mut map: HashMap<_, _> = xs.iter().copied().collect(); - - // Existing key (insert) - match map.entry(1) { - Vacant(_) => unreachable!(), - Occupied(mut view) => { - assert_eq!(view.get(), &10); - assert_eq!(view.insert(100), 10); - } - } - assert_eq!(map.get(&1).unwrap(), &100); - assert_eq!(map.len(), 6); - - // Existing key (update) - match map.entry(2) { - Vacant(_) => unreachable!(), - Occupied(mut view) => { - let v = view.get_mut(); - let new_v = (*v) * 10; - *v = new_v; - } - } - assert_eq!(map.get(&2).unwrap(), &200); - assert_eq!(map.len(), 6); - - // Existing key (take) - match map.entry(3) { - Vacant(_) => unreachable!(), - Occupied(view) => { - assert_eq!(view.remove(), 30); - } - } - assert_eq!(map.get(&3), None); - assert_eq!(map.len(), 5); - - // Inexistent key (insert) - match map.entry(10) { - Occupied(_) => unreachable!(), - Vacant(view) => { - assert_eq!(*view.insert(1000), 1000); - } - } - assert_eq!(map.get(&10).unwrap(), &1000); - assert_eq!(map.len(), 6); - } - - #[test] - fn test_entry_ref() { - let xs = [ - ("One".to_owned(), 10), - ("Two".to_owned(), 20), - ("Three".to_owned(), 30), - ("Four".to_owned(), 40), - ("Five".to_owned(), 50), - ("Six".to_owned(), 60), - ]; - - let mut map: HashMap<_, _> = xs.iter().cloned().collect(); - - // Existing key (insert) - match map.entry_ref("One") { - EntryRef::Vacant(_) => unreachable!(), - EntryRef::Occupied(mut view) => { - assert_eq!(view.get(), &10); - assert_eq!(view.insert(100), 10); - } - } - assert_eq!(map.get("One").unwrap(), &100); - assert_eq!(map.len(), 6); - - // Existing key (update) - match map.entry_ref("Two") { - EntryRef::Vacant(_) => unreachable!(), - EntryRef::Occupied(mut view) => { - let v = view.get_mut(); - let new_v = (*v) * 10; - *v = new_v; - } - } - assert_eq!(map.get("Two").unwrap(), &200); - assert_eq!(map.len(), 6); - - // Existing key (take) - match map.entry_ref("Three") { - EntryRef::Vacant(_) => unreachable!(), - EntryRef::Occupied(view) => { - assert_eq!(view.remove(), 30); - } - } - assert_eq!(map.get("Three"), None); - assert_eq!(map.len(), 5); - - // Inexistent key (insert) - match map.entry_ref("Ten") { - EntryRef::Occupied(_) => unreachable!(), - EntryRef::Vacant(view) => { - assert_eq!(*view.insert(1000), 1000); - } - } - assert_eq!(map.get("Ten").unwrap(), &1000); - assert_eq!(map.len(), 6); - } - - #[test] - fn test_entry_take_doesnt_corrupt() { - #![allow(deprecated)] //rand - // Test for #19292 - fn check(m: &HashMap) { - for k in m.keys() { - assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); - } - } - - let mut m = HashMap::new(); - - let mut rng = { - let seed = u64::from_le_bytes(*b"testseed"); - SmallRng::seed_from_u64(seed) - }; - - // Populate the map with some items. - for _ in 0..50 { - let x = rng.gen_range(-10..10); - m.insert(x, ()); - } - - for _ in 0..1000 { - let x = rng.gen_range(-10..10); - match m.entry(x) { - Vacant(_) => {} - Occupied(e) => { - e.remove(); - } - } - - check(&m); - } - } - - #[test] - fn test_entry_ref_take_doesnt_corrupt() { - #![allow(deprecated)] //rand - // Test for #19292 - fn check(m: &HashMap) { - for k in m.keys() { - assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); - } - } - - let mut m = HashMap::new(); - - let mut rng = { - let seed = u64::from_le_bytes(*b"testseed"); - SmallRng::seed_from_u64(seed) - }; - - // Populate the map with some items. - for _ in 0..50 { - let mut x = std::string::String::with_capacity(1); - x.push(rng.gen_range('a'..='z')); - m.insert(x, ()); - } - - for _ in 0..1000 { - let mut x = std::string::String::with_capacity(1); - x.push(rng.gen_range('a'..='z')); - match m.entry_ref(x.as_str()) { - EntryRef::Vacant(_) => {} - EntryRef::Occupied(e) => { - e.remove(); - } - } - - check(&m); - } - } - - #[test] - fn test_extend_ref_k_ref_v() { - let mut a = HashMap::new(); - a.insert(1, "one"); - let mut b = HashMap::new(); - b.insert(2, "two"); - b.insert(3, "three"); - - a.extend(&b); - - assert_eq!(a.len(), 3); - assert_eq!(a[&1], "one"); - assert_eq!(a[&2], "two"); - assert_eq!(a[&3], "three"); - } - - #[test] - fn test_extend_ref_kv_tuple() { - use std::ops::AddAssign; - let mut a = HashMap::new(); - a.insert(0, 0); - - fn create_arr + Copy, const N: usize>(start: T, step: T) -> [(T, T); N] { - let mut outs: [(T, T); N] = [(start, start); N]; - let mut element = step; - outs.iter_mut().skip(1).for_each(|(k, v)| { - *k += element; - *v += element; - element += step; - }); - outs - } - - let for_iter: Vec<_> = (0..100).map(|i| (i, i)).collect(); - let iter = for_iter.iter(); - let vec: Vec<_> = (100..200).map(|i| (i, i)).collect(); - a.extend(iter); - a.extend(&vec); - a.extend(&create_arr::(200, 1)); - - assert_eq!(a.len(), 300); - - for item in 0..300 { - assert_eq!(a[&item], item); - } - } - - #[test] - fn test_capacity_not_less_than_len() { - let mut a = HashMap::new(); - let mut item = 0; - - for _ in 0..116 { - a.insert(item, 0); - item += 1; - } - - assert!(a.capacity() > a.len()); - - let free = a.capacity() - a.len(); - for _ in 0..free { - a.insert(item, 0); - item += 1; - } - - assert_eq!(a.len(), a.capacity()); - - // Insert at capacity should cause allocation. - a.insert(item, 0); - assert!(a.capacity() > a.len()); - } - - #[test] - fn test_occupied_entry_key() { - let mut a = HashMap::new(); - let key = "hello there"; - let value = "value goes here"; - assert!(a.is_empty()); - a.insert(key, value); - assert_eq!(a.len(), 1); - assert_eq!(a[key], value); - - match a.entry(key) { - Vacant(_) => panic!(), - Occupied(e) => assert_eq!(key, *e.key()), - } - assert_eq!(a.len(), 1); - assert_eq!(a[key], value); - } - - #[test] - fn test_occupied_entry_ref_key() { - let mut a = HashMap::new(); - let key = "hello there"; - let value = "value goes here"; - assert!(a.is_empty()); - a.insert(key.to_owned(), value); - assert_eq!(a.len(), 1); - assert_eq!(a[key], value); - - match a.entry_ref(key) { - EntryRef::Vacant(_) => panic!(), - EntryRef::Occupied(e) => assert_eq!(key, e.key()), - } - assert_eq!(a.len(), 1); - assert_eq!(a[key], value); - } - - #[test] - fn test_vacant_entry_key() { - let mut a = HashMap::new(); - let key = "hello there"; - let value = "value goes here"; - - assert!(a.is_empty()); - match a.entry(key) { - Occupied(_) => panic!(), - Vacant(e) => { - assert_eq!(key, *e.key()); - e.insert(value); - } - } - assert_eq!(a.len(), 1); - assert_eq!(a[key], value); - } - - #[test] - fn test_vacant_entry_ref_key() { - let mut a: HashMap = HashMap::new(); - let key = "hello there"; - let value = "value goes here"; - - assert!(a.is_empty()); - match a.entry_ref(key) { - EntryRef::Occupied(_) => panic!(), - EntryRef::Vacant(e) => { - assert_eq!(key, e.key()); - e.insert(value); - } - } - assert_eq!(a.len(), 1); - assert_eq!(a[key], value); - } - - #[test] - fn test_occupied_entry_replace_entry_with() { - let mut a = HashMap::new(); - - let key = "a key"; - let value = "an initial value"; - let new_value = "a new value"; - - let entry = a.entry(key).insert(value).replace_entry_with(|k, v| { - assert_eq!(k, &key); - assert_eq!(v, value); - Some(new_value) - }); - - match entry { - Occupied(e) => { - assert_eq!(e.key(), &key); - assert_eq!(e.get(), &new_value); - } - Vacant(_) => panic!(), - } - - assert_eq!(a[key], new_value); - assert_eq!(a.len(), 1); - - let entry = match a.entry(key) { - Occupied(e) => e.replace_entry_with(|k, v| { - assert_eq!(k, &key); - assert_eq!(v, new_value); - None - }), - Vacant(_) => panic!(), - }; - - match entry { - Vacant(e) => assert_eq!(e.key(), &key), - Occupied(_) => panic!(), - } - - assert!(!a.contains_key(key)); - assert_eq!(a.len(), 0); - } - - #[test] - fn test_occupied_entry_ref_replace_entry_with() { - let mut a: HashMap = HashMap::new(); - - let key = "a key"; - let value = "an initial value"; - let new_value = "a new value"; - - let entry = a.entry_ref(key).insert(value).replace_entry_with(|k, v| { - assert_eq!(k, key); - assert_eq!(v, value); - Some(new_value) - }); - - match entry { - EntryRef::Occupied(e) => { - assert_eq!(e.key(), key); - assert_eq!(e.get(), &new_value); - } - EntryRef::Vacant(_) => panic!(), - } - - assert_eq!(a[key], new_value); - assert_eq!(a.len(), 1); - - let entry = match a.entry_ref(key) { - EntryRef::Occupied(e) => e.replace_entry_with(|k, v| { - assert_eq!(k, key); - assert_eq!(v, new_value); - None - }), - EntryRef::Vacant(_) => panic!(), - }; - - match entry { - EntryRef::Vacant(e) => assert_eq!(e.key(), key), - EntryRef::Occupied(_) => panic!(), - } - - assert!(!a.contains_key(key)); - assert_eq!(a.len(), 0); - } - - #[test] - fn test_entry_and_replace_entry_with() { - let mut a = HashMap::new(); - - let key = "a key"; - let value = "an initial value"; - let new_value = "a new value"; - - let entry = a.entry(key).and_replace_entry_with(|_, _| panic!()); - - match entry { - Vacant(e) => assert_eq!(e.key(), &key), - Occupied(_) => panic!(), - } - - a.insert(key, value); - - let entry = a.entry(key).and_replace_entry_with(|k, v| { - assert_eq!(k, &key); - assert_eq!(v, value); - Some(new_value) - }); - - match entry { - Occupied(e) => { - assert_eq!(e.key(), &key); - assert_eq!(e.get(), &new_value); - } - Vacant(_) => panic!(), - } - - assert_eq!(a[key], new_value); - assert_eq!(a.len(), 1); - - let entry = a.entry(key).and_replace_entry_with(|k, v| { - assert_eq!(k, &key); - assert_eq!(v, new_value); - None - }); - - match entry { - Vacant(e) => assert_eq!(e.key(), &key), - Occupied(_) => panic!(), - } - - assert!(!a.contains_key(key)); - assert_eq!(a.len(), 0); - } - - #[test] - fn test_entry_ref_and_replace_entry_with() { - let mut a = HashMap::new(); - - let key = "a key"; - let value = "an initial value"; - let new_value = "a new value"; - - let entry = a.entry_ref(key).and_replace_entry_with(|_, _| panic!()); - - match entry { - EntryRef::Vacant(e) => assert_eq!(e.key(), key), - EntryRef::Occupied(_) => panic!(), - } - - a.insert(key.to_owned(), value); - - let entry = a.entry_ref(key).and_replace_entry_with(|k, v| { - assert_eq!(k, key); - assert_eq!(v, value); - Some(new_value) - }); - - match entry { - EntryRef::Occupied(e) => { - assert_eq!(e.key(), key); - assert_eq!(e.get(), &new_value); - } - EntryRef::Vacant(_) => panic!(), - } - - assert_eq!(a[key], new_value); - assert_eq!(a.len(), 1); - - let entry = a.entry_ref(key).and_replace_entry_with(|k, v| { - assert_eq!(k, key); - assert_eq!(v, new_value); - None - }); - - match entry { - EntryRef::Vacant(e) => assert_eq!(e.key(), key), - EntryRef::Occupied(_) => panic!(), - } - - assert!(!a.contains_key(key)); - assert_eq!(a.len(), 0); - } - - #[test] - fn test_raw_occupied_entry_replace_entry_with() { - let mut a = HashMap::new(); - - let key = "a key"; - let value = "an initial value"; - let new_value = "a new value"; - - let entry = a - .raw_entry_mut() - .from_key(&key) - .insert(key, value) - .replace_entry_with(|k, v| { - assert_eq!(k, &key); - assert_eq!(v, value); - Some(new_value) - }); - - match entry { - RawEntryMut::Occupied(e) => { - assert_eq!(e.key(), &key); - assert_eq!(e.get(), &new_value); - } - RawEntryMut::Vacant(_) => panic!(), - } - - assert_eq!(a[key], new_value); - assert_eq!(a.len(), 1); - - let entry = match a.raw_entry_mut().from_key(&key) { - RawEntryMut::Occupied(e) => e.replace_entry_with(|k, v| { - assert_eq!(k, &key); - assert_eq!(v, new_value); - None - }), - RawEntryMut::Vacant(_) => panic!(), - }; - - match entry { - RawEntryMut::Vacant(_) => {} - RawEntryMut::Occupied(_) => panic!(), - } - - assert!(!a.contains_key(key)); - assert_eq!(a.len(), 0); - } - - #[test] - fn test_raw_entry_and_replace_entry_with() { - let mut a = HashMap::new(); - - let key = "a key"; - let value = "an initial value"; - let new_value = "a new value"; - - let entry = a - .raw_entry_mut() - .from_key(&key) - .and_replace_entry_with(|_, _| panic!()); - - match entry { - RawEntryMut::Vacant(_) => {} - RawEntryMut::Occupied(_) => panic!(), - } - - a.insert(key, value); - - let entry = a - .raw_entry_mut() - .from_key(&key) - .and_replace_entry_with(|k, v| { - assert_eq!(k, &key); - assert_eq!(v, value); - Some(new_value) - }); - - match entry { - RawEntryMut::Occupied(e) => { - assert_eq!(e.key(), &key); - assert_eq!(e.get(), &new_value); - } - RawEntryMut::Vacant(_) => panic!(), - } - - assert_eq!(a[key], new_value); - assert_eq!(a.len(), 1); - - let entry = a - .raw_entry_mut() - .from_key(&key) - .and_replace_entry_with(|k, v| { - assert_eq!(k, &key); - assert_eq!(v, new_value); - None - }); - - match entry { - RawEntryMut::Vacant(_) => {} - RawEntryMut::Occupied(_) => panic!(), - } - - assert!(!a.contains_key(key)); - assert_eq!(a.len(), 0); - } - - #[test] - fn test_replace_entry_with_doesnt_corrupt() { - #![allow(deprecated)] //rand - // Test for #19292 - fn check(m: &HashMap) { - for k in m.keys() { - assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); - } - } - - let mut m = HashMap::new(); - - let mut rng = { - let seed = u64::from_le_bytes(*b"testseed"); - SmallRng::seed_from_u64(seed) - }; - - // Populate the map with some items. - for _ in 0..50 { - let x = rng.gen_range(-10..10); - m.insert(x, ()); - } - - for _ in 0..1000 { - let x = rng.gen_range(-10..10); - m.entry(x).and_replace_entry_with(|_, _| None); - check(&m); - } - } - - #[test] - fn test_replace_entry_ref_with_doesnt_corrupt() { - #![allow(deprecated)] //rand - // Test for #19292 - fn check(m: &HashMap) { - for k in m.keys() { - assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); - } - } - - let mut m = HashMap::new(); - - let mut rng = { - let seed = u64::from_le_bytes(*b"testseed"); - SmallRng::seed_from_u64(seed) - }; - - // Populate the map with some items. - for _ in 0..50 { - let mut x = std::string::String::with_capacity(1); - x.push(rng.gen_range('a'..='z')); - m.insert(x, ()); - } - - for _ in 0..1000 { - let mut x = std::string::String::with_capacity(1); - x.push(rng.gen_range('a'..='z')); - m.entry_ref(x.as_str()).and_replace_entry_with(|_, _| None); - check(&m); - } - } - - #[test] - fn test_retain() { - let mut map: HashMap = (0..100).map(|x| (x, x * 10)).collect(); - - map.retain(|&k, _| k % 2 == 0); - assert_eq!(map.len(), 50); - assert_eq!(map[&2], 20); - assert_eq!(map[&4], 40); - assert_eq!(map[&6], 60); - } - - #[test] - fn test_drain_filter() { - { - let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); - let drained = map.drain_filter(|&k, _| k % 2 == 0); - let mut out = drained.collect::>(); - out.sort_unstable(); - assert_eq!(vec![(0, 0), (2, 20), (4, 40), (6, 60)], out); - assert_eq!(map.len(), 4); - } - { - let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); - drop(map.drain_filter(|&k, _| k % 2 == 0)); - assert_eq!(map.len(), 4); - } - } - - #[test] - #[cfg_attr(miri, ignore)] // FIXME: no OOM signalling (https://github.com/rust-lang/miri/issues/613) - fn test_try_reserve() { - use crate::TryReserveError::{AllocError, CapacityOverflow}; - - const MAX_USIZE: usize = usize::MAX; - - let mut empty_bytes: HashMap = HashMap::new(); - - if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { - } else { - panic!("usize::MAX should trigger an overflow!"); - } - - if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 16) { - } else { - // This may succeed if there is enough free memory. Attempt to - // allocate a few more hashmaps to ensure the allocation will fail. - let mut empty_bytes2: HashMap = HashMap::new(); - let _ = empty_bytes2.try_reserve(MAX_USIZE / 16); - let mut empty_bytes3: HashMap = HashMap::new(); - let _ = empty_bytes3.try_reserve(MAX_USIZE / 16); - let mut empty_bytes4: HashMap = HashMap::new(); - if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_USIZE / 16) { - } else { - panic!("usize::MAX / 8 should trigger an OOM!"); - } - } - } - - #[test] - fn test_raw_entry() { - use super::RawEntryMut::{Occupied, Vacant}; - - let xs = [(1_i32, 10_i32), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)]; - - let mut map: HashMap<_, _> = xs.iter().copied().collect(); - - let compute_hash = |map: &HashMap, k: i32| -> u64 { - super::make_insert_hash::(map.hasher(), &k) - }; - - // Existing key (insert) - match map.raw_entry_mut().from_key(&1) { - Vacant(_) => unreachable!(), - Occupied(mut view) => { - assert_eq!(view.get(), &10); - assert_eq!(view.insert(100), 10); - } - } - let hash1 = compute_hash(&map, 1); - assert_eq!(map.raw_entry().from_key(&1).unwrap(), (&1, &100)); - assert_eq!( - map.raw_entry().from_hash(hash1, |k| *k == 1).unwrap(), - (&1, &100) - ); - assert_eq!( - map.raw_entry().from_key_hashed_nocheck(hash1, &1).unwrap(), - (&1, &100) - ); - assert_eq!(map.len(), 6); - - // Existing key (update) - match map.raw_entry_mut().from_key(&2) { - Vacant(_) => unreachable!(), - Occupied(mut view) => { - let v = view.get_mut(); - let new_v = (*v) * 10; - *v = new_v; - } - } - let hash2 = compute_hash(&map, 2); - assert_eq!(map.raw_entry().from_key(&2).unwrap(), (&2, &200)); - assert_eq!( - map.raw_entry().from_hash(hash2, |k| *k == 2).unwrap(), - (&2, &200) - ); - assert_eq!( - map.raw_entry().from_key_hashed_nocheck(hash2, &2).unwrap(), - (&2, &200) - ); - assert_eq!(map.len(), 6); - - // Existing key (take) - let hash3 = compute_hash(&map, 3); - match map.raw_entry_mut().from_key_hashed_nocheck(hash3, &3) { - Vacant(_) => unreachable!(), - Occupied(view) => { - assert_eq!(view.remove_entry(), (3, 30)); - } - } - assert_eq!(map.raw_entry().from_key(&3), None); - assert_eq!(map.raw_entry().from_hash(hash3, |k| *k == 3), None); - assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash3, &3), None); - assert_eq!(map.len(), 5); - - // Nonexistent key (insert) - match map.raw_entry_mut().from_key(&10) { - Occupied(_) => unreachable!(), - Vacant(view) => { - assert_eq!(view.insert(10, 1000), (&mut 10, &mut 1000)); - } - } - assert_eq!(map.raw_entry().from_key(&10).unwrap(), (&10, &1000)); - assert_eq!(map.len(), 6); - - // Ensure all lookup methods produce equivalent results. - for k in 0..12 { - let hash = compute_hash(&map, k); - let v = map.get(&k).copied(); - let kv = v.as_ref().map(|v| (&k, v)); - - assert_eq!(map.raw_entry().from_key(&k), kv); - assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv); - assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); - - match map.raw_entry_mut().from_key(&k) { - Occupied(o) => assert_eq!(Some(o.get_key_value()), kv), - Vacant(_) => assert_eq!(v, None), - } - match map.raw_entry_mut().from_key_hashed_nocheck(hash, &k) { - Occupied(o) => assert_eq!(Some(o.get_key_value()), kv), - Vacant(_) => assert_eq!(v, None), - } - match map.raw_entry_mut().from_hash(hash, |q| *q == k) { - Occupied(o) => assert_eq!(Some(o.get_key_value()), kv), - Vacant(_) => assert_eq!(v, None), - } - } - } - - #[test] - fn test_key_without_hash_impl() { - #[derive(Debug)] - struct IntWrapper(u64); - - let mut m: HashMap = HashMap::default(); - { - assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_none()); - } - { - let vacant_entry = match m.raw_entry_mut().from_hash(0, |k| k.0 == 0) { - RawEntryMut::Occupied(..) => panic!("Found entry for key 0"), - RawEntryMut::Vacant(e) => e, - }; - vacant_entry.insert_with_hasher(0, IntWrapper(0), (), |k| k.0); - } - { - assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_some()); - assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_none()); - assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); - } - { - let vacant_entry = match m.raw_entry_mut().from_hash(1, |k| k.0 == 1) { - RawEntryMut::Occupied(..) => panic!("Found entry for key 1"), - RawEntryMut::Vacant(e) => e, - }; - vacant_entry.insert_with_hasher(1, IntWrapper(1), (), |k| k.0); - } - { - assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_some()); - assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_some()); - assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); - } - { - let occupied_entry = match m.raw_entry_mut().from_hash(0, |k| k.0 == 0) { - RawEntryMut::Occupied(e) => e, - RawEntryMut::Vacant(..) => panic!("Couldn't find entry for key 0"), - }; - occupied_entry.remove(); - } - assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_none()); - assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_some()); - assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); - } - - #[test] - #[cfg(feature = "raw")] - fn test_into_iter_refresh() { - #[cfg(miri)] - const N: usize = 32; - #[cfg(not(miri))] - const N: usize = 128; - - let mut rng = rand::thread_rng(); - for n in 0..N { - let mut map = HashMap::new(); - for i in 0..n { - assert!(map.insert(i, 2 * i).is_none()); - } - let hash_builder = map.hasher().clone(); - - let mut it = unsafe { map.table.iter() }; - assert_eq!(it.len(), n); - - let mut i = 0; - let mut left = n; - let mut removed = Vec::new(); - loop { - // occasionally remove some elements - if i < n && rng.gen_bool(0.1) { - let hash_value = super::make_insert_hash(&hash_builder, &i); - - unsafe { - let e = map.table.find(hash_value, |q| q.0.eq(&i)); - if let Some(e) = e { - it.reflect_remove(&e); - let t = map.table.remove(e); - removed.push(t); - left -= 1; - } else { - assert!(removed.contains(&(i, 2 * i)), "{} not in {:?}", i, removed); - let e = map.table.insert( - hash_value, - (i, 2 * i), - super::make_hasher::(&hash_builder), - ); - it.reflect_insert(&e); - if let Some(p) = removed.iter().position(|e| e == &(i, 2 * i)) { - removed.swap_remove(p); - } - left += 1; - } - } - } - - let e = it.next(); - if e.is_none() { - break; - } - assert!(i < n); - let t = unsafe { e.unwrap().as_ref() }; - assert!(!removed.contains(t)); - let (key, value) = t; - assert_eq!(*value, 2 * key); - i += 1; - } - assert!(i <= n); - - // just for safety: - assert_eq!(map.table.len(), left); - } - } - - #[test] - fn test_const_with_hasher() { - use core::hash::BuildHasher; - use std::collections::hash_map::DefaultHasher; - - #[derive(Clone)] - struct MyHasher; - impl BuildHasher for MyHasher { - type Hasher = DefaultHasher; - - fn build_hasher(&self) -> DefaultHasher { - DefaultHasher::new() - } - } - - const EMPTY_MAP: HashMap = - HashMap::with_hasher(MyHasher); - - let mut map = EMPTY_MAP; - map.insert(17, "seventeen".to_owned()); - assert_eq!("seventeen", map[&17]); - } - - #[test] - fn test_get_each_mut() { - let mut map = HashMap::new(); - map.insert("foo".to_owned(), 0); - map.insert("bar".to_owned(), 10); - map.insert("baz".to_owned(), 20); - map.insert("qux".to_owned(), 30); - - let xs = map.get_many_mut(["foo", "qux"]); - assert_eq!(xs, Some([&mut 0, &mut 30])); - - let xs = map.get_many_mut(["foo", "dud"]); - assert_eq!(xs, None); - - let xs = map.get_many_mut(["foo", "foo"]); - assert_eq!(xs, None); - - let ys = map.get_many_key_value_mut(["bar", "baz"]); - assert_eq!( - ys, - Some([(&"bar".to_owned(), &mut 10), (&"baz".to_owned(), &mut 20),]), - ); - - let ys = map.get_many_key_value_mut(["bar", "dip"]); - assert_eq!(ys, None); - - let ys = map.get_many_key_value_mut(["baz", "baz"]); - assert_eq!(ys, None); - } - - #[test] - #[should_panic = "panic in drop"] - fn test_clone_from_double_drop() { - #[derive(Clone)] - struct CheckedDrop { - panic_in_drop: bool, - dropped: bool, - } - impl Drop for CheckedDrop { - fn drop(&mut self) { - if self.panic_in_drop { - self.dropped = true; - panic!("panic in drop"); - } - if self.dropped { - panic!("double drop"); - } - self.dropped = true; - } - } - const DISARMED: CheckedDrop = CheckedDrop { - panic_in_drop: false, - dropped: false, - }; - const ARMED: CheckedDrop = CheckedDrop { - panic_in_drop: true, - dropped: false, - }; - - let mut map1 = HashMap::new(); - map1.insert(1, DISARMED); - map1.insert(2, DISARMED); - map1.insert(3, DISARMED); - map1.insert(4, DISARMED); - - let mut map2 = HashMap::new(); - map2.insert(1, DISARMED); - map2.insert(2, ARMED); - map2.insert(3, DISARMED); - map2.insert(4, DISARMED); - - map2.clone_from(&map1); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/raw/alloc.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/raw/alloc.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/raw/alloc.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/raw/alloc.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,73 +0,0 @@ -pub(crate) use self::inner::{do_alloc, Allocator, Global}; - -#[cfg(feature = "nightly")] -mod inner { - use crate::alloc::alloc::Layout; - pub use crate::alloc::alloc::{Allocator, Global}; - use core::ptr::NonNull; - - #[allow(clippy::map_err_ignore)] - pub fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { - match alloc.allocate(layout) { - Ok(ptr) => Ok(ptr.as_non_null_ptr()), - Err(_) => Err(()), - } - } - - #[cfg(feature = "bumpalo")] - unsafe impl Allocator for crate::BumpWrapper<'_> { - #[inline] - fn allocate(&self, layout: Layout) -> Result, core::alloc::AllocError> { - match self.0.try_alloc_layout(layout) { - Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())), - Err(_) => Err(core::alloc::AllocError), - } - } - #[inline] - unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) {} - } -} - -#[cfg(not(feature = "nightly"))] -mod inner { - use crate::alloc::alloc::{alloc, dealloc, Layout}; - use core::ptr::NonNull; - - #[allow(clippy::missing_safety_doc)] // not exposed outside of this crate - pub unsafe trait Allocator { - fn allocate(&self, layout: Layout) -> Result, ()>; - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout); - } - - #[derive(Copy, Clone)] - pub struct Global; - unsafe impl Allocator for Global { - #[inline] - fn allocate(&self, layout: Layout) -> Result, ()> { - unsafe { NonNull::new(alloc(layout)).ok_or(()) } - } - #[inline] - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - dealloc(ptr.as_ptr(), layout); - } - } - impl Default for Global { - #[inline] - fn default() -> Self { - Global - } - } - - pub fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { - alloc.allocate(layout) - } - - #[cfg(feature = "bumpalo")] - unsafe impl Allocator for crate::BumpWrapper<'_> { - #[allow(clippy::map_err_ignore)] - fn allocate(&self, layout: Layout) -> Result, ()> { - self.0.try_alloc_layout(layout).map_err(|_| ()) - } - unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) {} - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/raw/bitmask.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/raw/bitmask.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/raw/bitmask.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/raw/bitmask.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,122 +0,0 @@ -use super::imp::{BitMaskWord, BITMASK_MASK, BITMASK_STRIDE}; -#[cfg(feature = "nightly")] -use core::intrinsics; - -/// A bit mask which contains the result of a `Match` operation on a `Group` and -/// allows iterating through them. -/// -/// The bit mask is arranged so that low-order bits represent lower memory -/// addresses for group match results. -/// -/// For implementation reasons, the bits in the set may be sparsely packed, so -/// that there is only one bit-per-byte used (the high bit, 7). If this is the -/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be -/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is -/// similarly a mask of all the actually-used bits. -#[derive(Copy, Clone)] -pub struct BitMask(pub BitMaskWord); - -#[allow(clippy::use_self)] -impl BitMask { - /// Returns a new `BitMask` with all bits inverted. - #[inline] - #[must_use] - pub fn invert(self) -> Self { - BitMask(self.0 ^ BITMASK_MASK) - } - - /// Flip the bit in the mask for the entry at the given index. - /// - /// Returns the bit's previous state. - #[inline] - #[allow(clippy::cast_ptr_alignment)] - #[cfg(feature = "raw")] - pub unsafe fn flip(&mut self, index: usize) -> bool { - // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit. - let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1); - self.0 ^= mask; - // The bit was set if the bit is now 0. - self.0 & mask == 0 - } - - /// Returns a new `BitMask` with the lowest bit removed. - #[inline] - #[must_use] - pub fn remove_lowest_bit(self) -> Self { - BitMask(self.0 & (self.0 - 1)) - } - /// Returns whether the `BitMask` has at least one set bit. - #[inline] - pub fn any_bit_set(self) -> bool { - self.0 != 0 - } - - /// Returns the first set bit in the `BitMask`, if there is one. - #[inline] - pub fn lowest_set_bit(self) -> Option { - if self.0 == 0 { - None - } else { - Some(unsafe { self.lowest_set_bit_nonzero() }) - } - } - - /// Returns the first set bit in the `BitMask`, if there is one. The - /// bitmask must not be empty. - #[inline] - #[cfg(feature = "nightly")] - pub unsafe fn lowest_set_bit_nonzero(self) -> usize { - intrinsics::cttz_nonzero(self.0) as usize / BITMASK_STRIDE - } - #[inline] - #[cfg(not(feature = "nightly"))] - pub unsafe fn lowest_set_bit_nonzero(self) -> usize { - self.trailing_zeros() - } - - /// Returns the number of trailing zeroes in the `BitMask`. - #[inline] - pub fn trailing_zeros(self) -> usize { - // ARM doesn't have a trailing_zeroes instruction, and instead uses - // reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM - // versions (pre-ARMv7) don't have RBIT and need to emulate it - // instead. Since we only have 1 bit set in each byte on ARM, we can - // use swap_bytes (REV) + leading_zeroes instead. - if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 { - self.0.swap_bytes().leading_zeros() as usize / BITMASK_STRIDE - } else { - self.0.trailing_zeros() as usize / BITMASK_STRIDE - } - } - - /// Returns the number of leading zeroes in the `BitMask`. - #[inline] - pub fn leading_zeros(self) -> usize { - self.0.leading_zeros() as usize / BITMASK_STRIDE - } -} - -impl IntoIterator for BitMask { - type Item = usize; - type IntoIter = BitMaskIter; - - #[inline] - fn into_iter(self) -> BitMaskIter { - BitMaskIter(self) - } -} - -/// Iterator over the contents of a `BitMask`, returning the indices of set -/// bits. -pub struct BitMaskIter(BitMask); - -impl Iterator for BitMaskIter { - type Item = usize; - - #[inline] - fn next(&mut self) -> Option { - let bit = self.0.lowest_set_bit()?; - self.0 = self.0.remove_lowest_bit(); - Some(bit) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/raw/generic.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/raw/generic.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/raw/generic.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/raw/generic.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,154 +0,0 @@ -use super::bitmask::BitMask; -use super::EMPTY; -use core::{mem, ptr}; - -// Use the native word size as the group size. Using a 64-bit group size on -// a 32-bit architecture will just end up being more expensive because -// shifts and multiplies will need to be emulated. -#[cfg(any( - target_pointer_width = "64", - target_arch = "aarch64", - target_arch = "x86_64", - target_arch = "wasm32", -))] -type GroupWord = u64; -#[cfg(all( - target_pointer_width = "32", - not(target_arch = "aarch64"), - not(target_arch = "x86_64"), - not(target_arch = "wasm32"), -))] -type GroupWord = u32; - -pub type BitMaskWord = GroupWord; -pub const BITMASK_STRIDE: usize = 8; -// We only care about the highest bit of each byte for the mask. -#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)] -pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord; - -/// Helper function to replicate a byte across a `GroupWord`. -#[inline] -fn repeat(byte: u8) -> GroupWord { - GroupWord::from_ne_bytes([byte; Group::WIDTH]) -} - -/// Abstraction over a group of control bytes which can be scanned in -/// parallel. -/// -/// This implementation uses a word-sized integer. -#[derive(Copy, Clone)] -pub struct Group(GroupWord); - -// We perform all operations in the native endianness, and convert to -// little-endian just before creating a BitMask. The can potentially -// enable the compiler to eliminate unnecessary byte swaps if we are -// only checking whether a BitMask is empty. -#[allow(clippy::use_self)] -impl Group { - /// Number of bytes in the group. - pub const WIDTH: usize = mem::size_of::(); - - /// Returns a full group of empty bytes, suitable for use as the initial - /// value for an empty hash table. - /// - /// This is guaranteed to be aligned to the group size. - #[inline] - pub const fn static_empty() -> &'static [u8; Group::WIDTH] { - #[repr(C)] - struct AlignedBytes { - _align: [Group; 0], - bytes: [u8; Group::WIDTH], - } - const ALIGNED_BYTES: AlignedBytes = AlignedBytes { - _align: [], - bytes: [EMPTY; Group::WIDTH], - }; - &ALIGNED_BYTES.bytes - } - - /// Loads a group of bytes starting at the given address. - #[inline] - #[allow(clippy::cast_ptr_alignment)] // unaligned load - pub unsafe fn load(ptr: *const u8) -> Self { - Group(ptr::read_unaligned(ptr.cast())) - } - - /// Loads a group of bytes starting at the given address, which must be - /// aligned to `mem::align_of::()`. - #[inline] - #[allow(clippy::cast_ptr_alignment)] - pub unsafe fn load_aligned(ptr: *const u8) -> Self { - // FIXME: use align_offset once it stabilizes - debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); - Group(ptr::read(ptr.cast())) - } - - /// Stores the group of bytes to the given address, which must be - /// aligned to `mem::align_of::()`. - #[inline] - #[allow(clippy::cast_ptr_alignment)] - pub unsafe fn store_aligned(self, ptr: *mut u8) { - // FIXME: use align_offset once it stabilizes - debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); - ptr::write(ptr.cast(), self.0); - } - - /// Returns a `BitMask` indicating all bytes in the group which *may* - /// have the given value. - /// - /// This function may return a false positive in certain cases where - /// the byte in the group differs from the searched value only in its - /// lowest bit. This is fine because: - /// - This never happens for `EMPTY` and `DELETED`, only full entries. - /// - The check for key equality will catch these. - /// - This only happens if there is at least 1 true match. - /// - The chance of this happening is very low (< 1% chance per byte). - #[inline] - pub fn match_byte(self, byte: u8) -> BitMask { - // This algorithm is derived from - // https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord - let cmp = self.0 ^ repeat(byte); - BitMask((cmp.wrapping_sub(repeat(0x01)) & !cmp & repeat(0x80)).to_le()) - } - - /// Returns a `BitMask` indicating all bytes in the group which are - /// `EMPTY`. - #[inline] - pub fn match_empty(self) -> BitMask { - // If the high bit is set, then the byte must be either: - // 1111_1111 (EMPTY) or 1000_0000 (DELETED). - // So we can just check if the top two bits are 1 by ANDing them. - BitMask((self.0 & (self.0 << 1) & repeat(0x80)).to_le()) - } - - /// Returns a `BitMask` indicating all bytes in the group which are - /// `EMPTY` or `DELETED`. - #[inline] - pub fn match_empty_or_deleted(self) -> BitMask { - // A byte is EMPTY or DELETED iff the high bit is set - BitMask((self.0 & repeat(0x80)).to_le()) - } - - /// Returns a `BitMask` indicating all bytes in the group which are full. - #[inline] - pub fn match_full(self) -> BitMask { - self.match_empty_or_deleted().invert() - } - - /// Performs the following transformation on all bytes in the group: - /// - `EMPTY => EMPTY` - /// - `DELETED => EMPTY` - /// - `FULL => DELETED` - #[inline] - pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self { - // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 - // and high_bit = 0 (FULL) to 1000_0000 - // - // Here's this logic expanded to concrete values: - // let full = 1000_0000 (true) or 0000_0000 (false) - // !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry) - // !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry) - let full = !self.0 & repeat(0x80); - Group(!full + (full >> 7)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/raw/mod.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/raw/mod.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/raw/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/raw/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2460 +0,0 @@ -use crate::alloc::alloc::{handle_alloc_error, Layout}; -use crate::scopeguard::{guard, ScopeGuard}; -use crate::TryReserveError; -use core::iter::FusedIterator; -use core::marker::PhantomData; -use core::mem; -use core::mem::ManuallyDrop; -use core::mem::MaybeUninit; -use core::ptr::NonNull; -use core::{hint, ptr}; - -cfg_if! { - // Use the SSE2 implementation if possible: it allows us to scan 16 buckets - // at once instead of 8. We don't bother with AVX since it would require - // runtime dispatch and wouldn't gain us much anyways: the probability of - // finding a match drops off drastically after the first few buckets. - // - // I attempted an implementation on ARM using NEON instructions, but it - // turns out that most NEON instructions have multi-cycle latency, which in - // the end outweighs any gains over the generic implementation. - if #[cfg(all( - target_feature = "sse2", - any(target_arch = "x86", target_arch = "x86_64"), - not(miri) - ))] { - mod sse2; - use sse2 as imp; - } else { - #[path = "generic.rs"] - mod generic; - use generic as imp; - } -} - -mod alloc; -pub(crate) use self::alloc::{do_alloc, Allocator, Global}; - -mod bitmask; - -use self::bitmask::{BitMask, BitMaskIter}; -use self::imp::Group; - -// Branch prediction hint. This is currently only available on nightly but it -// consistently improves performance by 10-15%. -#[cfg(feature = "nightly")] -use core::intrinsics::{likely, unlikely}; - -// On stable we can use #[cold] to get a equivalent effect: this attributes -// suggests that the function is unlikely to be called -#[cfg(not(feature = "nightly"))] -#[inline] -#[cold] -fn cold() {} - -#[cfg(not(feature = "nightly"))] -#[inline] -fn likely(b: bool) -> bool { - if !b { - cold(); - } - b -} -#[cfg(not(feature = "nightly"))] -#[inline] -fn unlikely(b: bool) -> bool { - if b { - cold(); - } - b -} - -#[inline] -unsafe fn offset_from(to: *const T, from: *const T) -> usize { - to.offset_from(from) as usize -} - -/// Whether memory allocation errors should return an error or abort. -#[derive(Copy, Clone)] -enum Fallibility { - Fallible, - Infallible, -} - -impl Fallibility { - /// Error to return on capacity overflow. - #[cfg_attr(feature = "inline-more", inline)] - fn capacity_overflow(self) -> TryReserveError { - match self { - Fallibility::Fallible => TryReserveError::CapacityOverflow, - Fallibility::Infallible => panic!("Hash table capacity overflow"), - } - } - - /// Error to return on allocation error. - #[cfg_attr(feature = "inline-more", inline)] - fn alloc_err(self, layout: Layout) -> TryReserveError { - match self { - Fallibility::Fallible => TryReserveError::AllocError { layout }, - Fallibility::Infallible => handle_alloc_error(layout), - } - } -} - -/// Control byte value for an empty bucket. -const EMPTY: u8 = 0b1111_1111; - -/// Control byte value for a deleted bucket. -const DELETED: u8 = 0b1000_0000; - -/// Checks whether a control byte represents a full bucket (top bit is clear). -#[inline] -fn is_full(ctrl: u8) -> bool { - ctrl & 0x80 == 0 -} - -/// Checks whether a control byte represents a special value (top bit is set). -#[inline] -fn is_special(ctrl: u8) -> bool { - ctrl & 0x80 != 0 -} - -/// Checks whether a special control value is EMPTY (just check 1 bit). -#[inline] -fn special_is_empty(ctrl: u8) -> bool { - debug_assert!(is_special(ctrl)); - ctrl & 0x01 != 0 -} - -/// Primary hash function, used to select the initial bucket to probe from. -#[inline] -#[allow(clippy::cast_possible_truncation)] -fn h1(hash: u64) -> usize { - // On 32-bit platforms we simply ignore the higher hash bits. - hash as usize -} - -/// Secondary hash function, saved in the low 7 bits of the control byte. -#[inline] -#[allow(clippy::cast_possible_truncation)] -fn h2(hash: u64) -> u8 { - // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit - // value, some hash functions (such as FxHash) produce a usize result - // instead, which means that the top 32 bits are 0 on 32-bit platforms. - let hash_len = usize::min(mem::size_of::(), mem::size_of::()); - let top7 = hash >> (hash_len * 8 - 7); - (top7 & 0x7f) as u8 // truncation -} - -/// Probe sequence based on triangular numbers, which is guaranteed (since our -/// table size is a power of two) to visit every group of elements exactly once. -/// -/// A triangular probe has us jump by 1 more group every time. So first we -/// jump by 1 group (meaning we just continue our linear scan), then 2 groups -/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on. -/// -/// Proof that the probe will visit every group in the table: -/// -struct ProbeSeq { - pos: usize, - stride: usize, -} - -impl ProbeSeq { - #[inline] - fn move_next(&mut self, bucket_mask: usize) { - // We should have found an empty bucket by now and ended the probe. - debug_assert!( - self.stride <= bucket_mask, - "Went past end of probe sequence" - ); - - self.stride += Group::WIDTH; - self.pos += self.stride; - self.pos &= bucket_mask; - } -} - -/// Returns the number of buckets needed to hold the given number of items, -/// taking the maximum load factor into account. -/// -/// Returns `None` if an overflow occurs. -// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258 -#[cfg_attr(target_os = "emscripten", inline(never))] -#[cfg_attr(not(target_os = "emscripten"), inline)] -fn capacity_to_buckets(cap: usize) -> Option { - debug_assert_ne!(cap, 0); - - // For small tables we require at least 1 empty bucket so that lookups are - // guaranteed to terminate if an element doesn't exist in the table. - if cap < 8 { - // We don't bother with a table size of 2 buckets since that can only - // hold a single element. Instead we skip directly to a 4 bucket table - // which can hold 3 elements. - return Some(if cap < 4 { 4 } else { 8 }); - } - - // Otherwise require 1/8 buckets to be empty (87.5% load) - // - // Be careful when modifying this, calculate_layout relies on the - // overflow check here. - let adjusted_cap = cap.checked_mul(8)? / 7; - - // Any overflows will have been caught by the checked_mul. Also, any - // rounding errors from the division above will be cleaned up by - // next_power_of_two (which can't overflow because of the previous division). - Some(adjusted_cap.next_power_of_two()) -} - -/// Returns the maximum effective capacity for the given bucket mask, taking -/// the maximum load factor into account. -#[inline] -fn bucket_mask_to_capacity(bucket_mask: usize) -> usize { - if bucket_mask < 8 { - // For tables with 1/2/4/8 buckets, we always reserve one empty slot. - // Keep in mind that the bucket mask is one less than the bucket count. - bucket_mask - } else { - // For larger tables we reserve 12.5% of the slots as empty. - ((bucket_mask + 1) / 8) * 7 - } -} - -/// Helper which allows the max calculation for ctrl_align to be statically computed for each T -/// while keeping the rest of `calculate_layout_for` independent of `T` -#[derive(Copy, Clone)] -struct TableLayout { - size: usize, - ctrl_align: usize, -} - -impl TableLayout { - #[inline] - fn new() -> Self { - let layout = Layout::new::(); - Self { - size: layout.size(), - ctrl_align: usize::max(layout.align(), Group::WIDTH), - } - } - - #[inline] - fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> { - debug_assert!(buckets.is_power_of_two()); - - let TableLayout { size, ctrl_align } = self; - // Manual layout calculation since Layout methods are not yet stable. - let ctrl_offset = - size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1); - let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?; - - Some(( - unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }, - ctrl_offset, - )) - } -} - -/// Returns a Layout which describes the allocation required for a hash table, -/// and the offset of the control bytes in the allocation. -/// (the offset is also one past last element of buckets) -/// -/// Returns `None` if an overflow occurs. -#[cfg_attr(feature = "inline-more", inline)] -fn calculate_layout(buckets: usize) -> Option<(Layout, usize)> { - TableLayout::new::().calculate_layout_for(buckets) -} - -/// A reference to a hash table bucket containing a `T`. -/// -/// This is usually just a pointer to the element itself. However if the element -/// is a ZST, then we instead track the index of the element in the table so -/// that `erase` works properly. -pub struct Bucket { - // Actually it is pointer to next element than element itself - // this is needed to maintain pointer arithmetic invariants - // keeping direct pointer to element introduces difficulty. - // Using `NonNull` for variance and niche layout - ptr: NonNull, -} - -// This Send impl is needed for rayon support. This is safe since Bucket is -// never exposed in a public API. -unsafe impl Send for Bucket {} - -impl Clone for Bucket { - #[inline] - fn clone(&self) -> Self { - Self { ptr: self.ptr } - } -} - -impl Bucket { - #[inline] - unsafe fn from_base_index(base: NonNull, index: usize) -> Self { - let ptr = if mem::size_of::() == 0 { - // won't overflow because index must be less than length - (index + 1) as *mut T - } else { - base.as_ptr().sub(index) - }; - Self { - ptr: NonNull::new_unchecked(ptr), - } - } - #[inline] - unsafe fn to_base_index(&self, base: NonNull) -> usize { - if mem::size_of::() == 0 { - self.ptr.as_ptr() as usize - 1 - } else { - offset_from(base.as_ptr(), self.ptr.as_ptr()) - } - } - #[inline] - pub fn as_ptr(&self) -> *mut T { - if mem::size_of::() == 0 { - // Just return an arbitrary ZST pointer which is properly aligned - mem::align_of::() as *mut T - } else { - unsafe { self.ptr.as_ptr().sub(1) } - } - } - #[inline] - unsafe fn next_n(&self, offset: usize) -> Self { - let ptr = if mem::size_of::() == 0 { - (self.ptr.as_ptr() as usize + offset) as *mut T - } else { - self.ptr.as_ptr().sub(offset) - }; - Self { - ptr: NonNull::new_unchecked(ptr), - } - } - #[cfg_attr(feature = "inline-more", inline)] - pub unsafe fn drop(&self) { - self.as_ptr().drop_in_place(); - } - #[inline] - pub unsafe fn read(&self) -> T { - self.as_ptr().read() - } - #[inline] - pub unsafe fn write(&self, val: T) { - self.as_ptr().write(val); - } - #[inline] - pub unsafe fn as_ref<'a>(&self) -> &'a T { - &*self.as_ptr() - } - #[inline] - pub unsafe fn as_mut<'a>(&self) -> &'a mut T { - &mut *self.as_ptr() - } - #[cfg(feature = "raw")] - #[inline] - pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) { - self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1); - } -} - -/// A raw hash table with an unsafe API. -pub struct RawTable { - table: RawTableInner, - // Tell dropck that we own instances of T. - marker: PhantomData, -} - -/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless -/// of how many different key-value types are used. -struct RawTableInner { - // Mask to get an index from a hash value. The value is one less than the - // number of buckets in the table. - bucket_mask: usize, - - // [Padding], T1, T2, ..., Tlast, C1, C2, ... - // ^ points here - ctrl: NonNull, - - // Number of elements that can be inserted before we need to grow the table - growth_left: usize, - - // Number of elements in the table, only really used by len() - items: usize, - - alloc: A, -} - -impl RawTable { - /// Creates a new empty hash table without allocating any memory. - /// - /// In effect this returns a table with exactly 1 bucket. However we can - /// leave the data pointer dangling since that bucket is never written to - /// due to our load factor forcing us to always have at least 1 free bucket. - #[inline] - pub const fn new() -> Self { - Self { - table: RawTableInner::new_in(Global), - marker: PhantomData, - } - } - - /// Attempts to allocate a new hash table with at least enough capacity - /// for inserting the given number of elements without reallocating. - #[cfg(feature = "raw")] - pub fn try_with_capacity(capacity: usize) -> Result { - Self::try_with_capacity_in(capacity, Global) - } - - /// Allocates a new hash table with at least enough capacity for inserting - /// the given number of elements without reallocating. - pub fn with_capacity(capacity: usize) -> Self { - Self::with_capacity_in(capacity, Global) - } -} - -impl RawTable { - /// Creates a new empty hash table without allocating any memory, using the - /// given allocator. - /// - /// In effect this returns a table with exactly 1 bucket. However we can - /// leave the data pointer dangling since that bucket is never written to - /// due to our load factor forcing us to always have at least 1 free bucket. - #[inline] - pub fn new_in(alloc: A) -> Self { - Self { - table: RawTableInner::new_in(alloc), - marker: PhantomData, - } - } - - /// Allocates a new hash table with the given number of buckets. - /// - /// The control bytes are left uninitialized. - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn new_uninitialized( - alloc: A, - buckets: usize, - fallibility: Fallibility, - ) -> Result { - debug_assert!(buckets.is_power_of_two()); - - Ok(Self { - table: RawTableInner::new_uninitialized( - alloc, - TableLayout::new::(), - buckets, - fallibility, - )?, - marker: PhantomData, - }) - } - - /// Attempts to allocate a new hash table with at least enough capacity - /// for inserting the given number of elements without reallocating. - fn fallible_with_capacity( - alloc: A, - capacity: usize, - fallibility: Fallibility, - ) -> Result { - Ok(Self { - table: RawTableInner::fallible_with_capacity( - alloc, - TableLayout::new::(), - capacity, - fallibility, - )?, - marker: PhantomData, - }) - } - - /// Attempts to allocate a new hash table using the given allocator, with at least enough - /// capacity for inserting the given number of elements without reallocating. - #[cfg(feature = "raw")] - pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { - Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible) - } - - /// Allocates a new hash table using the given allocator, with at least enough capacity for - /// inserting the given number of elements without reallocating. - pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) { - Ok(capacity) => capacity, - Err(_) => unsafe { hint::unreachable_unchecked() }, - } - } - - /// Returns a reference to the underlying allocator. - #[inline] - pub fn allocator(&self) -> &A { - &self.table.alloc - } - - /// Deallocates the table without dropping any entries. - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn free_buckets(&mut self) { - self.table.free_buckets(TableLayout::new::()); - } - - /// Returns pointer to one past last element of data table. - #[inline] - pub unsafe fn data_end(&self) -> NonNull { - NonNull::new_unchecked(self.table.ctrl.as_ptr().cast()) - } - - /// Returns pointer to start of data table. - #[inline] - #[cfg(feature = "nightly")] - pub unsafe fn data_start(&self) -> *mut T { - self.data_end().as_ptr().wrapping_sub(self.buckets()) - } - - /// Returns the index of a bucket from a `Bucket`. - #[inline] - pub unsafe fn bucket_index(&self, bucket: &Bucket) -> usize { - bucket.to_base_index(self.data_end()) - } - - /// Returns a pointer to an element in the table. - #[inline] - pub unsafe fn bucket(&self, index: usize) -> Bucket { - debug_assert_ne!(self.table.bucket_mask, 0); - debug_assert!(index < self.buckets()); - Bucket::from_base_index(self.data_end(), index) - } - - /// Erases an element from the table without dropping it. - #[cfg_attr(feature = "inline-more", inline)] - #[deprecated(since = "0.8.1", note = "use erase or remove instead")] - pub unsafe fn erase_no_drop(&mut self, item: &Bucket) { - let index = self.bucket_index(item); - self.table.erase(index); - } - - /// Erases an element from the table, dropping it in place. - #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::needless_pass_by_value)] - #[allow(deprecated)] - pub unsafe fn erase(&mut self, item: Bucket) { - // Erase the element from the table first since drop might panic. - self.erase_no_drop(&item); - item.drop(); - } - - /// Finds and erases an element from the table, dropping it in place. - /// Returns true if an element was found. - #[cfg(feature = "raw")] - #[cfg_attr(feature = "inline-more", inline)] - pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool { - // Avoid `Option::map` because it bloats LLVM IR. - if let Some(bucket) = self.find(hash, eq) { - unsafe { - self.erase(bucket); - } - true - } else { - false - } - } - - /// Removes an element from the table, returning it. - #[cfg_attr(feature = "inline-more", inline)] - #[allow(clippy::needless_pass_by_value)] - #[allow(deprecated)] - pub unsafe fn remove(&mut self, item: Bucket) -> T { - self.erase_no_drop(&item); - item.read() - } - - /// Finds and removes an element from the table, returning it. - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { - // Avoid `Option::map` because it bloats LLVM IR. - match self.find(hash, eq) { - Some(bucket) => Some(unsafe { self.remove(bucket) }), - None => None, - } - } - - /// Marks all table buckets as empty without dropping their contents. - #[cfg_attr(feature = "inline-more", inline)] - pub fn clear_no_drop(&mut self) { - self.table.clear_no_drop(); - } - - /// Removes all elements from the table without freeing the backing memory. - #[cfg_attr(feature = "inline-more", inline)] - pub fn clear(&mut self) { - // Ensure that the table is reset even if one of the drops panic - let mut self_ = guard(self, |self_| self_.clear_no_drop()); - unsafe { - self_.drop_elements(); - } - } - - unsafe fn drop_elements(&mut self) { - if mem::needs_drop::() && !self.is_empty() { - for item in self.iter() { - item.drop(); - } - } - } - - /// Shrinks the table to fit `max(self.len(), min_size)` elements. - #[cfg_attr(feature = "inline-more", inline)] - pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) { - // Calculate the minimal number of elements that we need to reserve - // space for. - let min_size = usize::max(self.table.items, min_size); - if min_size == 0 { - *self = Self::new_in(self.table.alloc.clone()); - return; - } - - // Calculate the number of buckets that we need for this number of - // elements. If the calculation overflows then the requested bucket - // count must be larger than what we have right and nothing needs to be - // done. - let min_buckets = match capacity_to_buckets(min_size) { - Some(buckets) => buckets, - None => return, - }; - - // If we have more buckets than we need, shrink the table. - if min_buckets < self.buckets() { - // Fast path if the table is empty - if self.table.items == 0 { - *self = Self::with_capacity_in(min_size, self.table.alloc.clone()); - } else { - // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - if self - .resize(min_size, hasher, Fallibility::Infallible) - .is_err() - { - unsafe { hint::unreachable_unchecked() } - } - } - } - } - - /// Ensures that at least `additional` items can be inserted into the table - /// without reallocation. - #[cfg_attr(feature = "inline-more", inline)] - pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { - if additional > self.table.growth_left { - // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - if self - .reserve_rehash(additional, hasher, Fallibility::Infallible) - .is_err() - { - unsafe { hint::unreachable_unchecked() } - } - } - } - - /// Tries to ensure that at least `additional` items can be inserted into - /// the table without reallocation. - #[cfg_attr(feature = "inline-more", inline)] - pub fn try_reserve( - &mut self, - additional: usize, - hasher: impl Fn(&T) -> u64, - ) -> Result<(), TryReserveError> { - if additional > self.table.growth_left { - self.reserve_rehash(additional, hasher, Fallibility::Fallible) - } else { - Ok(()) - } - } - - /// Out-of-line slow path for `reserve` and `try_reserve`. - #[cold] - #[inline(never)] - fn reserve_rehash( - &mut self, - additional: usize, - hasher: impl Fn(&T) -> u64, - fallibility: Fallibility, - ) -> Result<(), TryReserveError> { - unsafe { - self.table.reserve_rehash_inner( - additional, - &|table, index| hasher(table.bucket::(index).as_ref()), - fallibility, - TableLayout::new::(), - if mem::needs_drop::() { - Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) - } else { - None - }, - ) - } - } - - /// Allocates a new table of a different size and moves the contents of the - /// current table into it. - fn resize( - &mut self, - capacity: usize, - hasher: impl Fn(&T) -> u64, - fallibility: Fallibility, - ) -> Result<(), TryReserveError> { - unsafe { - self.table.resize_inner( - capacity, - &|table, index| hasher(table.bucket::(index).as_ref()), - fallibility, - TableLayout::new::(), - ) - } - } - - /// Inserts a new element into the table, and returns its raw bucket. - /// - /// This does not check if the given element already exists in the table. - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { - unsafe { - let mut index = self.table.find_insert_slot(hash); - - // We can avoid growing the table once we have reached our load - // factor if we are replacing a tombstone. This works since the - // number of EMPTY slots does not change in this case. - let old_ctrl = *self.table.ctrl(index); - if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) { - self.reserve(1, hasher); - index = self.table.find_insert_slot(hash); - } - - self.table.record_item_insert_at(index, old_ctrl, hash); - - let bucket = self.bucket(index); - bucket.write(value); - bucket - } - } - - /// Attempts to insert a new element without growing the table and return its raw bucket. - /// - /// Returns an `Err` containing the given element if inserting it would require growing the - /// table. - /// - /// This does not check if the given element already exists in the table. - #[cfg(feature = "raw")] - #[cfg_attr(feature = "inline-more", inline)] - pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result, T> { - unsafe { - match self.table.prepare_insert_no_grow(hash) { - Ok(index) => { - let bucket = self.bucket(index); - bucket.write(value); - Ok(bucket) - } - Err(()) => Err(value), - } - } - } - - /// Inserts a new element into the table, and returns a mutable reference to it. - /// - /// This does not check if the given element already exists in the table. - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T { - unsafe { self.insert(hash, value, hasher).as_mut() } - } - - /// Inserts a new element into the table, without growing the table. - /// - /// There must be enough space in the table to insert the new element. - /// - /// This does not check if the given element already exists in the table. - #[cfg_attr(feature = "inline-more", inline)] - #[cfg(any(feature = "raw", feature = "rustc-internal-api"))] - pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket { - let (index, old_ctrl) = self.table.prepare_insert_slot(hash); - let bucket = self.table.bucket(index); - - // If we are replacing a DELETED entry then we don't need to update - // the load counter. - self.table.growth_left -= special_is_empty(old_ctrl) as usize; - - bucket.write(value); - self.table.items += 1; - bucket - } - - /// Temporary removes a bucket, applying the given function to the removed - /// element and optionally put back the returned value in the same bucket. - /// - /// Returns `true` if the bucket still contains an element - /// - /// This does not check if the given bucket is actually occupied. - #[cfg_attr(feature = "inline-more", inline)] - pub unsafe fn replace_bucket_with(&mut self, bucket: Bucket, f: F) -> bool - where - F: FnOnce(T) -> Option, - { - let index = self.bucket_index(&bucket); - let old_ctrl = *self.table.ctrl(index); - debug_assert!(is_full(old_ctrl)); - let old_growth_left = self.table.growth_left; - let item = self.remove(bucket); - if let Some(new_item) = f(item) { - self.table.growth_left = old_growth_left; - self.table.set_ctrl(index, old_ctrl); - self.table.items += 1; - self.bucket(index).write(new_item); - true - } else { - false - } - } - - /// Searches for an element in the table. - #[inline] - pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { - let result = self.table.find_inner(hash, &mut |index| unsafe { - eq(self.bucket(index).as_ref()) - }); - - // Avoid `Option::map` because it bloats LLVM IR. - match result { - Some(index) => Some(unsafe { self.bucket(index) }), - None => None, - } - } - - /// Gets a reference to an element in the table. - #[inline] - pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { - // Avoid `Option::map` because it bloats LLVM IR. - match self.find(hash, eq) { - Some(bucket) => Some(unsafe { bucket.as_ref() }), - None => None, - } - } - - /// Gets a mutable reference to an element in the table. - #[inline] - pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { - // Avoid `Option::map` because it bloats LLVM IR. - match self.find(hash, eq) { - Some(bucket) => Some(unsafe { bucket.as_mut() }), - None => None, - } - } - - /// Attempts to get mutable references to `N` entries in the table at once. - /// - /// Returns an array of length `N` with the results of each query. - /// - /// At most one mutable reference will be returned to any entry. `None` will be returned if any - /// of the hashes are duplicates. `None` will be returned if the hash is not found. - /// - /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to - /// the `i`th key to be looked up. - pub fn get_many_mut( - &mut self, - hashes: [u64; N], - eq: impl FnMut(usize, &T) -> bool, - ) -> Option<[&'_ mut T; N]> { - unsafe { - let ptrs = self.get_many_mut_pointers(hashes, eq)?; - - for (i, &cur) in ptrs.iter().enumerate() { - if ptrs[..i].iter().any(|&prev| ptr::eq::(prev, cur)) { - return None; - } - } - // All bucket are distinct from all previous buckets so we're clear to return the result - // of the lookup. - - // TODO use `MaybeUninit::array_assume_init` here instead once that's stable. - Some(mem::transmute_copy(&ptrs)) - } - } - - pub unsafe fn get_many_unchecked_mut( - &mut self, - hashes: [u64; N], - eq: impl FnMut(usize, &T) -> bool, - ) -> Option<[&'_ mut T; N]> { - let ptrs = self.get_many_mut_pointers(hashes, eq)?; - Some(mem::transmute_copy(&ptrs)) - } - - unsafe fn get_many_mut_pointers( - &mut self, - hashes: [u64; N], - mut eq: impl FnMut(usize, &T) -> bool, - ) -> Option<[*mut T; N]> { - // TODO use `MaybeUninit::uninit_array` here instead once that's stable. - let mut outs: MaybeUninit<[*mut T; N]> = MaybeUninit::uninit(); - let outs_ptr = outs.as_mut_ptr(); - - for (i, &hash) in hashes.iter().enumerate() { - let cur = self.find(hash, |k| eq(i, k))?; - *(*outs_ptr).get_unchecked_mut(i) = cur.as_mut(); - } - - // TODO use `MaybeUninit::array_assume_init` here instead once that's stable. - Some(outs.assume_init()) - } - - /// Returns the number of elements the map can hold without reallocating. - /// - /// This number is a lower bound; the table might be able to hold - /// more, but is guaranteed to be able to hold at least this many. - #[inline] - pub fn capacity(&self) -> usize { - self.table.items + self.table.growth_left - } - - /// Returns the number of elements in the table. - #[inline] - pub fn len(&self) -> usize { - self.table.items - } - - /// Returns `true` if the table contains no elements. - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the number of buckets in the table. - #[inline] - pub fn buckets(&self) -> usize { - self.table.bucket_mask + 1 - } - - /// Returns an iterator over every element in the table. It is up to - /// the caller to ensure that the `RawTable` outlives the `RawIter`. - /// Because we cannot make the `next` method unsafe on the `RawIter` - /// struct, we have to make the `iter` method unsafe. - #[inline] - pub unsafe fn iter(&self) -> RawIter { - let data = Bucket::from_base_index(self.data_end(), 0); - RawIter { - iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()), - items: self.table.items, - } - } - - /// Returns an iterator over occupied buckets that could match a given hash. - /// - /// `RawTable` only stores 7 bits of the hash value, so this iterator may - /// return items that have a hash value different than the one provided. You - /// should always validate the returned values before using them. - /// - /// It is up to the caller to ensure that the `RawTable` outlives the - /// `RawIterHash`. Because we cannot make the `next` method unsafe on the - /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe. - #[cfg_attr(feature = "inline-more", inline)] - #[cfg(feature = "raw")] - pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> { - RawIterHash::new(self, hash) - } - - /// Returns an iterator which removes all elements from the table without - /// freeing the memory. - #[cfg_attr(feature = "inline-more", inline)] - pub fn drain(&mut self) -> RawDrain<'_, T, A> { - unsafe { - let iter = self.iter(); - self.drain_iter_from(iter) - } - } - - /// Returns an iterator which removes all elements from the table without - /// freeing the memory. - /// - /// Iteration starts at the provided iterator's current location. - /// - /// It is up to the caller to ensure that the iterator is valid for this - /// `RawTable` and covers all items that remain in the table. - #[cfg_attr(feature = "inline-more", inline)] - pub unsafe fn drain_iter_from(&mut self, iter: RawIter) -> RawDrain<'_, T, A> { - debug_assert_eq!(iter.len(), self.len()); - RawDrain { - iter, - table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))), - orig_table: NonNull::from(self), - marker: PhantomData, - } - } - - /// Returns an iterator which consumes all elements from the table. - /// - /// Iteration starts at the provided iterator's current location. - /// - /// It is up to the caller to ensure that the iterator is valid for this - /// `RawTable` and covers all items that remain in the table. - pub unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { - debug_assert_eq!(iter.len(), self.len()); - - let alloc = self.table.alloc.clone(); - let allocation = self.into_allocation(); - RawIntoIter { - iter, - allocation, - marker: PhantomData, - alloc, - } - } - - /// Converts the table into a raw allocation. The contents of the table - /// should be dropped using a `RawIter` before freeing the allocation. - #[cfg_attr(feature = "inline-more", inline)] - pub(crate) fn into_allocation(self) -> Option<(NonNull, Layout)> { - let alloc = if self.table.is_empty_singleton() { - None - } else { - // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. - let (layout, ctrl_offset) = match calculate_layout::(self.table.buckets()) { - Some(lco) => lco, - None => unsafe { hint::unreachable_unchecked() }, - }; - Some(( - unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }, - layout, - )) - }; - mem::forget(self); - alloc - } -} - -unsafe impl Send for RawTable -where - T: Send, - A: Send, -{ -} -unsafe impl Sync for RawTable -where - T: Sync, - A: Sync, -{ -} - -impl RawTableInner { - #[inline] - const fn new_in(alloc: A) -> Self { - Self { - // Be careful to cast the entire slice to a raw pointer. - ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, - bucket_mask: 0, - items: 0, - growth_left: 0, - alloc, - } - } -} - -impl RawTableInner { - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn new_uninitialized( - alloc: A, - table_layout: TableLayout, - buckets: usize, - fallibility: Fallibility, - ) -> Result { - debug_assert!(buckets.is_power_of_two()); - - // Avoid `Option::ok_or_else` because it bloats LLVM IR. - let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) { - Some(lco) => lco, - None => return Err(fallibility.capacity_overflow()), - }; - - // We need an additional check to ensure that the allocation doesn't - // exceed `isize::MAX`. We can skip this check on 64-bit systems since - // such allocations will never succeed anyways. - // - // This mirrors what Vec does in the standard library. - if mem::size_of::() < 8 && layout.size() > isize::MAX as usize { - return Err(fallibility.capacity_overflow()); - } - - let ptr: NonNull = match do_alloc(&alloc, layout) { - Ok(block) => block.cast(), - Err(_) => return Err(fallibility.alloc_err(layout)), - }; - - let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); - Ok(Self { - ctrl, - bucket_mask: buckets - 1, - items: 0, - growth_left: bucket_mask_to_capacity(buckets - 1), - alloc, - }) - } - - #[inline] - fn fallible_with_capacity( - alloc: A, - table_layout: TableLayout, - capacity: usize, - fallibility: Fallibility, - ) -> Result { - if capacity == 0 { - Ok(Self::new_in(alloc)) - } else { - unsafe { - let buckets = - capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?; - - let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?; - result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes()); - - Ok(result) - } - } - } - - /// Searches for an empty or deleted bucket which is suitable for inserting - /// a new element and sets the hash for that slot. - /// - /// There must be at least 1 empty bucket in the table. - #[inline] - unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) { - let index = self.find_insert_slot(hash); - let old_ctrl = *self.ctrl(index); - self.set_ctrl_h2(index, hash); - (index, old_ctrl) - } - - /// Searches for an empty or deleted bucket which is suitable for inserting - /// a new element. - /// - /// There must be at least 1 empty bucket in the table. - #[inline] - fn find_insert_slot(&self, hash: u64) -> usize { - let mut probe_seq = self.probe_seq(hash); - loop { - unsafe { - let group = Group::load(self.ctrl(probe_seq.pos)); - if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() { - let result = (probe_seq.pos + bit) & self.bucket_mask; - - // In tables smaller than the group width, trailing control - // bytes outside the range of the table are filled with - // EMPTY entries. These will unfortunately trigger a - // match, but once masked may point to a full bucket that - // is already occupied. We detect this situation here and - // perform a second scan starting at the beginning of the - // table. This second scan is guaranteed to find an empty - // slot (due to the load factor) before hitting the trailing - // control bytes (containing EMPTY). - if unlikely(is_full(*self.ctrl(result))) { - debug_assert!(self.bucket_mask < Group::WIDTH); - debug_assert_ne!(probe_seq.pos, 0); - return Group::load_aligned(self.ctrl(0)) - .match_empty_or_deleted() - .lowest_set_bit_nonzero(); - } - - return result; - } - } - probe_seq.move_next(self.bucket_mask); - } - } - - /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of - /// code generated, but it is eliminated by LLVM optimizations. - #[inline] - fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option { - let h2_hash = h2(hash); - let mut probe_seq = self.probe_seq(hash); - - loop { - let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; - - for bit in group.match_byte(h2_hash) { - let index = (probe_seq.pos + bit) & self.bucket_mask; - - if likely(eq(index)) { - return Some(index); - } - } - - if likely(group.match_empty().any_bit_set()) { - return None; - } - - probe_seq.move_next(self.bucket_mask); - } - } - - #[allow(clippy::mut_mut)] - #[inline] - unsafe fn prepare_rehash_in_place(&mut self) { - // Bulk convert all full control bytes to DELETED, and all DELETED - // control bytes to EMPTY. This effectively frees up all buckets - // containing a DELETED entry. - for i in (0..self.buckets()).step_by(Group::WIDTH) { - let group = Group::load_aligned(self.ctrl(i)); - let group = group.convert_special_to_empty_and_full_to_deleted(); - group.store_aligned(self.ctrl(i)); - } - - // Fix up the trailing control bytes. See the comments in set_ctrl - // for the handling of tables smaller than the group width. - if self.buckets() < Group::WIDTH { - self.ctrl(0) - .copy_to(self.ctrl(Group::WIDTH), self.buckets()); - } else { - self.ctrl(0) - .copy_to(self.ctrl(self.buckets()), Group::WIDTH); - } - } - - #[inline] - unsafe fn bucket(&self, index: usize) -> Bucket { - debug_assert_ne!(self.bucket_mask, 0); - debug_assert!(index < self.buckets()); - Bucket::from_base_index(self.data_end(), index) - } - - #[inline] - unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 { - debug_assert_ne!(self.bucket_mask, 0); - debug_assert!(index < self.buckets()); - let base: *mut u8 = self.data_end().as_ptr(); - base.sub((index + 1) * size_of) - } - - #[inline] - unsafe fn data_end(&self) -> NonNull { - NonNull::new_unchecked(self.ctrl.as_ptr().cast()) - } - - /// Returns an iterator-like object for a probe sequence on the table. - /// - /// This iterator never terminates, but is guaranteed to visit each bucket - /// group exactly once. The loop using `probe_seq` must terminate upon - /// reaching a group containing an empty bucket. - #[inline] - fn probe_seq(&self, hash: u64) -> ProbeSeq { - ProbeSeq { - pos: h1(hash) & self.bucket_mask, - stride: 0, - } - } - - /// Returns the index of a bucket for which a value must be inserted if there is enough rooom - /// in the table, otherwise returns error - #[cfg(feature = "raw")] - #[inline] - unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result { - let index = self.find_insert_slot(hash); - let old_ctrl = *self.ctrl(index); - if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) { - Err(()) - } else { - self.record_item_insert_at(index, old_ctrl, hash); - Ok(index) - } - } - - #[inline] - unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) { - self.growth_left -= usize::from(special_is_empty(old_ctrl)); - self.set_ctrl_h2(index, hash); - self.items += 1; - } - - #[inline] - fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool { - let probe_seq_pos = self.probe_seq(hash).pos; - let probe_index = - |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH; - probe_index(i) == probe_index(new_i) - } - - /// Sets a control byte to the hash, and possibly also the replicated control byte at - /// the end of the array. - #[inline] - unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) { - self.set_ctrl(index, h2(hash)); - } - - #[inline] - unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 { - let prev_ctrl = *self.ctrl(index); - self.set_ctrl_h2(index, hash); - prev_ctrl - } - - /// Sets a control byte, and possibly also the replicated control byte at - /// the end of the array. - #[inline] - unsafe fn set_ctrl(&self, index: usize, ctrl: u8) { - // Replicate the first Group::WIDTH control bytes at the end of - // the array without using a branch: - // - If index >= Group::WIDTH then index == index2. - // - Otherwise index2 == self.bucket_mask + 1 + index. - // - // The very last replicated control byte is never actually read because - // we mask the initial index for unaligned loads, but we write it - // anyways because it makes the set_ctrl implementation simpler. - // - // If there are fewer buckets than Group::WIDTH then this code will - // replicate the buckets at the end of the trailing group. For example - // with 2 buckets and a group size of 4, the control bytes will look - // like this: - // - // Real | Replicated - // --------------------------------------------- - // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] | - // --------------------------------------------- - let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; - - *self.ctrl(index) = ctrl; - *self.ctrl(index2) = ctrl; - } - - /// Returns a pointer to a control byte. - #[inline] - unsafe fn ctrl(&self, index: usize) -> *mut u8 { - debug_assert!(index < self.num_ctrl_bytes()); - self.ctrl.as_ptr().add(index) - } - - #[inline] - fn buckets(&self) -> usize { - self.bucket_mask + 1 - } - - #[inline] - fn num_ctrl_bytes(&self) -> usize { - self.bucket_mask + 1 + Group::WIDTH - } - - #[inline] - fn is_empty_singleton(&self) -> bool { - self.bucket_mask == 0 - } - - #[allow(clippy::mut_mut)] - #[inline] - unsafe fn prepare_resize( - &self, - table_layout: TableLayout, - capacity: usize, - fallibility: Fallibility, - ) -> Result, TryReserveError> { - debug_assert!(self.items <= capacity); - - // Allocate and initialize the new table. - let mut new_table = RawTableInner::fallible_with_capacity( - self.alloc.clone(), - table_layout, - capacity, - fallibility, - )?; - new_table.growth_left -= self.items; - new_table.items = self.items; - - // The hash function may panic, in which case we simply free the new - // table without dropping any elements that may have been copied into - // it. - // - // This guard is also used to free the old table on success, see - // the comment at the bottom of this function. - Ok(guard(new_table, move |self_| { - if !self_.is_empty_singleton() { - self_.free_buckets(table_layout); - } - })) - } - - /// Reserves or rehashes to make room for `additional` more elements. - /// - /// This uses dynamic dispatch to reduce the amount of - /// code generated, but it is eliminated by LLVM optimizations when inlined. - #[allow(clippy::inline_always)] - #[inline(always)] - unsafe fn reserve_rehash_inner( - &mut self, - additional: usize, - hasher: &dyn Fn(&mut Self, usize) -> u64, - fallibility: Fallibility, - layout: TableLayout, - drop: Option, - ) -> Result<(), TryReserveError> { - // Avoid `Option::ok_or_else` because it bloats LLVM IR. - let new_items = match self.items.checked_add(additional) { - Some(new_items) => new_items, - None => return Err(fallibility.capacity_overflow()), - }; - let full_capacity = bucket_mask_to_capacity(self.bucket_mask); - if new_items <= full_capacity / 2 { - // Rehash in-place without re-allocating if we have plenty of spare - // capacity that is locked up due to DELETED entries. - self.rehash_in_place(hasher, layout.size, drop); - Ok(()) - } else { - // Otherwise, conservatively resize to at least the next size up - // to avoid churning deletes into frequent rehashes. - self.resize_inner( - usize::max(new_items, full_capacity + 1), - hasher, - fallibility, - layout, - ) - } - } - - /// Allocates a new table of a different size and moves the contents of the - /// current table into it. - /// - /// This uses dynamic dispatch to reduce the amount of - /// code generated, but it is eliminated by LLVM optimizations when inlined. - #[allow(clippy::inline_always)] - #[inline(always)] - unsafe fn resize_inner( - &mut self, - capacity: usize, - hasher: &dyn Fn(&mut Self, usize) -> u64, - fallibility: Fallibility, - layout: TableLayout, - ) -> Result<(), TryReserveError> { - let mut new_table = self.prepare_resize(layout, capacity, fallibility)?; - - // Copy all elements to the new table. - for i in 0..self.buckets() { - if !is_full(*self.ctrl(i)) { - continue; - } - - // This may panic. - let hash = hasher(self, i); - - // We can use a simpler version of insert() here since: - // - there are no DELETED entries. - // - we know there is enough space in the table. - // - all elements are unique. - let (index, _) = new_table.prepare_insert_slot(hash); - - ptr::copy_nonoverlapping( - self.bucket_ptr(i, layout.size), - new_table.bucket_ptr(index, layout.size), - layout.size, - ); - } - - // We successfully copied all elements without panicking. Now replace - // self with the new table. The old table will have its memory freed but - // the items will not be dropped (since they have been moved into the - // new table). - mem::swap(self, &mut new_table); - - Ok(()) - } - - /// Rehashes the contents of the table in place (i.e. without changing the - /// allocation). - /// - /// If `hasher` panics then some the table's contents may be lost. - /// - /// This uses dynamic dispatch to reduce the amount of - /// code generated, but it is eliminated by LLVM optimizations when inlined. - #[allow(clippy::inline_always)] - #[cfg_attr(feature = "inline-more", inline(always))] - #[cfg_attr(not(feature = "inline-more"), inline)] - unsafe fn rehash_in_place( - &mut self, - hasher: &dyn Fn(&mut Self, usize) -> u64, - size_of: usize, - drop: Option, - ) { - // If the hash function panics then properly clean up any elements - // that we haven't rehashed yet. We unfortunately can't preserve the - // element since we lost their hash and have no way of recovering it - // without risking another panic. - self.prepare_rehash_in_place(); - - let mut guard = guard(self, move |self_| { - if let Some(drop) = drop { - for i in 0..self_.buckets() { - if *self_.ctrl(i) == DELETED { - self_.set_ctrl(i, EMPTY); - drop(self_.bucket_ptr(i, size_of)); - self_.items -= 1; - } - } - } - self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items; - }); - - // At this point, DELETED elements are elements that we haven't - // rehashed yet. Find them and re-insert them at their ideal - // position. - 'outer: for i in 0..guard.buckets() { - if *guard.ctrl(i) != DELETED { - continue; - } - - let i_p = guard.bucket_ptr(i, size_of); - - 'inner: loop { - // Hash the current item - let hash = hasher(*guard, i); - - // Search for a suitable place to put it - let new_i = guard.find_insert_slot(hash); - let new_i_p = guard.bucket_ptr(new_i, size_of); - - // Probing works by scanning through all of the control - // bytes in groups, which may not be aligned to the group - // size. If both the new and old position fall within the - // same unaligned group, then there is no benefit in moving - // it and we can just continue to the next item. - if likely(guard.is_in_same_group(i, new_i, hash)) { - guard.set_ctrl_h2(i, hash); - continue 'outer; - } - - // We are moving the current item to a new position. Write - // our H2 to the control byte of the new position. - let prev_ctrl = guard.replace_ctrl_h2(new_i, hash); - if prev_ctrl == EMPTY { - guard.set_ctrl(i, EMPTY); - // If the target slot is empty, simply move the current - // element into the new slot and clear the old control - // byte. - ptr::copy_nonoverlapping(i_p, new_i_p, size_of); - continue 'outer; - } else { - // If the target slot is occupied, swap the two elements - // and then continue processing the element that we just - // swapped into the old slot. - debug_assert_eq!(prev_ctrl, DELETED); - ptr::swap_nonoverlapping(i_p, new_i_p, size_of); - continue 'inner; - } - } - } - - guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items; - - mem::forget(guard); - } - - #[inline] - unsafe fn free_buckets(&mut self, table_layout: TableLayout) { - // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. - let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) { - Some(lco) => lco, - None => hint::unreachable_unchecked(), - }; - self.alloc.deallocate( - NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)), - layout, - ); - } - - /// Marks all table buckets as empty without dropping their contents. - #[inline] - fn clear_no_drop(&mut self) { - if !self.is_empty_singleton() { - unsafe { - self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes()); - } - } - self.items = 0; - self.growth_left = bucket_mask_to_capacity(self.bucket_mask); - } - - #[inline] - unsafe fn erase(&mut self, index: usize) { - debug_assert!(is_full(*self.ctrl(index))); - let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask; - let empty_before = Group::load(self.ctrl(index_before)).match_empty(); - let empty_after = Group::load(self.ctrl(index)).match_empty(); - - // If we are inside a continuous block of Group::WIDTH full or deleted - // cells then a probe window may have seen a full block when trying to - // insert. We therefore need to keep that block non-empty so that - // lookups will continue searching to the next probe window. - // - // Note that in this context `leading_zeros` refers to the bytes at the - // end of a group, while `trailing_zeros` refers to the bytes at the - // beginning of a group. - let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH { - DELETED - } else { - self.growth_left += 1; - EMPTY - }; - self.set_ctrl(index, ctrl); - self.items -= 1; - } -} - -impl Clone for RawTable { - fn clone(&self) -> Self { - if self.table.is_empty_singleton() { - Self::new_in(self.table.alloc.clone()) - } else { - unsafe { - // Avoid `Result::ok_or_else` because it bloats LLVM IR. - let new_table = match Self::new_uninitialized( - self.table.alloc.clone(), - self.table.buckets(), - Fallibility::Infallible, - ) { - Ok(table) => table, - Err(_) => hint::unreachable_unchecked(), - }; - - // If cloning fails then we need to free the allocation for the - // new table. However we don't run its drop since its control - // bytes are not initialized yet. - let mut guard = guard(ManuallyDrop::new(new_table), |new_table| { - new_table.free_buckets(); - }); - - guard.clone_from_spec(self); - - // Disarm the scope guard and return the newly created table. - ManuallyDrop::into_inner(ScopeGuard::into_inner(guard)) - } - } - } - - fn clone_from(&mut self, source: &Self) { - if source.table.is_empty_singleton() { - *self = Self::new_in(self.table.alloc.clone()); - } else { - unsafe { - // Make sure that if any panics occurs, we clear the table and - // leave it in an empty state. - let mut self_ = guard(self, |self_| { - self_.clear_no_drop(); - }); - - // First, drop all our elements without clearing the control - // bytes. If this panics then the scope guard will clear the - // table, leaking any elements that were not dropped yet. - // - // This leak is unavoidable: we can't try dropping more elements - // since this could lead to another panic and abort the process. - self_.drop_elements(); - - // If necessary, resize our table to match the source. - if self_.buckets() != source.buckets() { - // Skip our drop by using ptr::write. - if !self_.table.is_empty_singleton() { - self_.free_buckets(); - } - (&mut **self_ as *mut Self).write( - // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - match Self::new_uninitialized( - self_.table.alloc.clone(), - source.buckets(), - Fallibility::Infallible, - ) { - Ok(table) => table, - Err(_) => hint::unreachable_unchecked(), - }, - ); - } - - self_.clone_from_spec(source); - - // Disarm the scope guard if cloning was successful. - ScopeGuard::into_inner(self_); - } - } - } -} - -/// Specialization of `clone_from` for `Copy` types -trait RawTableClone { - unsafe fn clone_from_spec(&mut self, source: &Self); -} -impl RawTableClone for RawTable { - default_fn! { - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn clone_from_spec(&mut self, source: &Self) { - self.clone_from_impl(source); - } - } -} -#[cfg(feature = "nightly")] -impl RawTableClone for RawTable { - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn clone_from_spec(&mut self, source: &Self) { - source - .table - .ctrl(0) - .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); - source - .data_start() - .copy_to_nonoverlapping(self.data_start(), self.table.buckets()); - - self.table.items = source.table.items; - self.table.growth_left = source.table.growth_left; - } -} - -impl RawTable { - /// Common code for clone and clone_from. Assumes: - /// - `self.buckets() == source.buckets()`. - /// - Any existing elements have been dropped. - /// - The control bytes are not initialized yet. - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn clone_from_impl(&mut self, source: &Self) { - // Copy the control bytes unchanged. We do this in a single pass - source - .table - .ctrl(0) - .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); - - // The cloning of elements may panic, in which case we need - // to make sure we drop only the elements that have been - // cloned so far. - let mut guard = guard((0, &mut *self), |(index, self_)| { - if mem::needs_drop::() && !self_.is_empty() { - for i in 0..=*index { - if is_full(*self_.table.ctrl(i)) { - self_.bucket(i).drop(); - } - } - } - }); - - for from in source.iter() { - let index = source.bucket_index(&from); - let to = guard.1.bucket(index); - to.write(from.as_ref().clone()); - - // Update the index in case we need to unwind. - guard.0 = index; - } - - // Successfully cloned all items, no need to clean up. - mem::forget(guard); - - self.table.items = source.table.items; - self.table.growth_left = source.table.growth_left; - } - - /// Variant of `clone_from` to use when a hasher is available. - #[cfg(feature = "raw")] - pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) { - // If we have enough capacity in the table, just clear it and insert - // elements one by one. We don't do this if we have the same number of - // buckets as the source since we can just copy the contents directly - // in that case. - if self.table.buckets() != source.table.buckets() - && bucket_mask_to_capacity(self.table.bucket_mask) >= source.len() - { - self.clear(); - - let guard_self = guard(&mut *self, |self_| { - // Clear the partially copied table if a panic occurs, otherwise - // items and growth_left will be out of sync with the contents - // of the table. - self_.clear(); - }); - - unsafe { - for item in source.iter() { - // This may panic. - let item = item.as_ref().clone(); - let hash = hasher(&item); - - // We can use a simpler version of insert() here since: - // - there are no DELETED entries. - // - we know there is enough space in the table. - // - all elements are unique. - let (index, _) = guard_self.table.prepare_insert_slot(hash); - guard_self.bucket(index).write(item); - } - } - - // Successfully cloned all items, no need to clean up. - mem::forget(guard_self); - - self.table.items = source.table.items; - self.table.growth_left -= source.table.items; - } else { - self.clone_from(source); - } - } -} - -impl Default for RawTable { - #[inline] - fn default() -> Self { - Self::new_in(Default::default()) - } -} - -#[cfg(feature = "nightly")] -unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable { - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - if !self.table.is_empty_singleton() { - unsafe { - self.drop_elements(); - self.free_buckets(); - } - } - } -} -#[cfg(not(feature = "nightly"))] -impl Drop for RawTable { - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - if !self.table.is_empty_singleton() { - unsafe { - self.drop_elements(); - self.free_buckets(); - } - } - } -} - -impl IntoIterator for RawTable { - type Item = T; - type IntoIter = RawIntoIter; - - #[cfg_attr(feature = "inline-more", inline)] - fn into_iter(self) -> RawIntoIter { - unsafe { - let iter = self.iter(); - self.into_iter_from(iter) - } - } -} - -/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does -/// not track an item count. -pub(crate) struct RawIterRange { - // Mask of full buckets in the current group. Bits are cleared from this - // mask as each element is processed. - current_group: BitMask, - - // Pointer to the buckets for the current group. - data: Bucket, - - // Pointer to the next group of control bytes, - // Must be aligned to the group size. - next_ctrl: *const u8, - - // Pointer one past the last control byte of this range. - end: *const u8, -} - -impl RawIterRange { - /// Returns a `RawIterRange` covering a subset of a table. - /// - /// The control byte address must be aligned to the group size. - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn new(ctrl: *const u8, data: Bucket, len: usize) -> Self { - debug_assert_ne!(len, 0); - debug_assert_eq!(ctrl as usize % Group::WIDTH, 0); - let end = ctrl.add(len); - - // Load the first group and advance ctrl to point to the next group - let current_group = Group::load_aligned(ctrl).match_full(); - let next_ctrl = ctrl.add(Group::WIDTH); - - Self { - current_group, - data, - next_ctrl, - end, - } - } - - /// Splits a `RawIterRange` into two halves. - /// - /// Returns `None` if the remaining range is smaller than or equal to the - /// group width. - #[cfg_attr(feature = "inline-more", inline)] - #[cfg(feature = "rayon")] - pub(crate) fn split(mut self) -> (Self, Option>) { - unsafe { - if self.end <= self.next_ctrl { - // Nothing to split if the group that we are current processing - // is the last one. - (self, None) - } else { - // len is the remaining number of elements after the group that - // we are currently processing. It must be a multiple of the - // group size (small tables are caught by the check above). - let len = offset_from(self.end, self.next_ctrl); - debug_assert_eq!(len % Group::WIDTH, 0); - - // Split the remaining elements into two halves, but round the - // midpoint down in case there is an odd number of groups - // remaining. This ensures that: - // - The tail is at least 1 group long. - // - The split is roughly even considering we still have the - // current group to process. - let mid = (len / 2) & !(Group::WIDTH - 1); - - let tail = Self::new( - self.next_ctrl.add(mid), - self.data.next_n(Group::WIDTH).next_n(mid), - len - mid, - ); - debug_assert_eq!( - self.data.next_n(Group::WIDTH).next_n(mid).ptr, - tail.data.ptr - ); - debug_assert_eq!(self.end, tail.end); - self.end = self.next_ctrl.add(mid); - debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl); - (self, Some(tail)) - } - } - } - - /// # Safety - /// If DO_CHECK_PTR_RANGE is false, caller must ensure that we never try to iterate - /// after yielding all elements. - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn next_impl(&mut self) -> Option> { - loop { - if let Some(index) = self.current_group.lowest_set_bit() { - self.current_group = self.current_group.remove_lowest_bit(); - return Some(self.data.next_n(index)); - } - - if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end { - return None; - } - - // We might read past self.end up to the next group boundary, - // but this is fine because it only occurs on tables smaller - // than the group size where the trailing control bytes are all - // EMPTY. On larger tables self.end is guaranteed to be aligned - // to the group size (since tables are power-of-two sized). - self.current_group = Group::load_aligned(self.next_ctrl).match_full(); - self.data = self.data.next_n(Group::WIDTH); - self.next_ctrl = self.next_ctrl.add(Group::WIDTH); - } - } -} - -// We make raw iterators unconditionally Send and Sync, and let the PhantomData -// in the actual iterator implementations determine the real Send/Sync bounds. -unsafe impl Send for RawIterRange {} -unsafe impl Sync for RawIterRange {} - -impl Clone for RawIterRange { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Self { - data: self.data.clone(), - next_ctrl: self.next_ctrl, - current_group: self.current_group, - end: self.end, - } - } -} - -impl Iterator for RawIterRange { - type Item = Bucket; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option> { - unsafe { - // SAFETY: We set checker flag to true. - self.next_impl::() - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - // We don't have an item count, so just guess based on the range size. - let remaining_buckets = if self.end > self.next_ctrl { - unsafe { offset_from(self.end, self.next_ctrl) } - } else { - 0 - }; - - // Add a group width to include the group we are currently processing. - (0, Some(Group::WIDTH + remaining_buckets)) - } -} - -impl FusedIterator for RawIterRange {} - -/// Iterator which returns a raw pointer to every full bucket in the table. -/// -/// For maximum flexibility this iterator is not bound by a lifetime, but you -/// must observe several rules when using it: -/// - You must not free the hash table while iterating (including via growing/shrinking). -/// - It is fine to erase a bucket that has been yielded by the iterator. -/// - Erasing a bucket that has not yet been yielded by the iterator may still -/// result in the iterator yielding that bucket (unless `reflect_remove` is called). -/// - It is unspecified whether an element inserted after the iterator was -/// created will be yielded by that iterator (unless `reflect_insert` is called). -/// - The order in which the iterator yields bucket is unspecified and may -/// change in the future. -pub struct RawIter { - pub(crate) iter: RawIterRange, - items: usize, -} - -impl RawIter { - /// Refresh the iterator so that it reflects a removal from the given bucket. - /// - /// For the iterator to remain valid, this method must be called once - /// for each removed bucket before `next` is called again. - /// - /// This method should be called _before_ the removal is made. It is not necessary to call this - /// method if you are removing an item that this iterator yielded in the past. - #[cfg(feature = "raw")] - pub fn reflect_remove(&mut self, b: &Bucket) { - self.reflect_toggle_full(b, false); - } - - /// Refresh the iterator so that it reflects an insertion into the given bucket. - /// - /// For the iterator to remain valid, this method must be called once - /// for each insert before `next` is called again. - /// - /// This method does not guarantee that an insertion of a bucket with a greater - /// index than the last one yielded will be reflected in the iterator. - /// - /// This method should be called _after_ the given insert is made. - #[cfg(feature = "raw")] - pub fn reflect_insert(&mut self, b: &Bucket) { - self.reflect_toggle_full(b, true); - } - - /// Refresh the iterator so that it reflects a change to the state of the given bucket. - #[cfg(feature = "raw")] - fn reflect_toggle_full(&mut self, b: &Bucket, is_insert: bool) { - unsafe { - if b.as_ptr() > self.iter.data.as_ptr() { - // The iterator has already passed the bucket's group. - // So the toggle isn't relevant to this iterator. - return; - } - - if self.iter.next_ctrl < self.iter.end - && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr() - { - // The iterator has not yet reached the bucket's group. - // We don't need to reload anything, but we do need to adjust the item count. - - if cfg!(debug_assertions) { - // Double-check that the user isn't lying to us by checking the bucket state. - // To do that, we need to find its control byte. We know that self.iter.data is - // at self.iter.next_ctrl - Group::WIDTH, so we work from there: - let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr()); - let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset); - // This method should be called _before_ a removal, or _after_ an insert, - // so in both cases the ctrl byte should indicate that the bucket is full. - assert!(is_full(*ctrl)); - } - - if is_insert { - self.items += 1; - } else { - self.items -= 1; - } - - return; - } - - // The iterator is at the bucket group that the toggled bucket is in. - // We need to do two things: - // - // - Determine if the iterator already yielded the toggled bucket. - // If it did, we're done. - // - Otherwise, update the iterator cached group so that it won't - // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket. - // We'll also need to update the item count accordingly. - if let Some(index) = self.iter.current_group.lowest_set_bit() { - let next_bucket = self.iter.data.next_n(index); - if b.as_ptr() > next_bucket.as_ptr() { - // The toggled bucket is "before" the bucket the iterator would yield next. We - // therefore don't need to do anything --- the iterator has already passed the - // bucket in question. - // - // The item count must already be correct, since a removal or insert "prior" to - // the iterator's position wouldn't affect the item count. - } else { - // The removed bucket is an upcoming bucket. We need to make sure it does _not_ - // get yielded, and also that it's no longer included in the item count. - // - // NOTE: We can't just reload the group here, both since that might reflect - // inserts we've already passed, and because that might inadvertently unset the - // bits for _other_ removals. If we do that, we'd have to also decrement the - // item count for those other bits that we unset. But the presumably subsequent - // call to reflect for those buckets might _also_ decrement the item count. - // Instead, we _just_ flip the bit for the particular bucket the caller asked - // us to reflect. - let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr()); - let was_full = self.iter.current_group.flip(our_bit); - debug_assert_ne!(was_full, is_insert); - - if is_insert { - self.items += 1; - } else { - self.items -= 1; - } - - if cfg!(debug_assertions) { - if b.as_ptr() == next_bucket.as_ptr() { - // The removed bucket should no longer be next - debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index)); - } else { - // We should not have changed what bucket comes next. - debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index)); - } - } - } - } else { - // We must have already iterated past the removed item. - } - } - } - - unsafe fn drop_elements(&mut self) { - if mem::needs_drop::() && self.len() != 0 { - for item in self { - item.drop(); - } - } - } -} - -impl Clone for RawIter { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Self { - iter: self.iter.clone(), - items: self.items, - } - } -} - -impl Iterator for RawIter { - type Item = Bucket; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option> { - // Inner iterator iterates over buckets - // so it can do unnecessary work if we already yielded all items. - if self.items == 0 { - return None; - } - - let nxt = unsafe { - // SAFETY: We check number of items to yield using `items` field. - self.iter.next_impl::() - }; - - if nxt.is_some() { - self.items -= 1; - } - - nxt - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - (self.items, Some(self.items)) - } -} - -impl ExactSizeIterator for RawIter {} -impl FusedIterator for RawIter {} - -/// Iterator which consumes a table and returns elements. -pub struct RawIntoIter { - iter: RawIter, - allocation: Option<(NonNull, Layout)>, - marker: PhantomData, - alloc: A, -} - -impl RawIntoIter { - #[cfg_attr(feature = "inline-more", inline)] - pub fn iter(&self) -> RawIter { - self.iter.clone() - } -} - -unsafe impl Send for RawIntoIter -where - T: Send, - A: Send, -{ -} -unsafe impl Sync for RawIntoIter -where - T: Sync, - A: Sync, -{ -} - -#[cfg(feature = "nightly")] -unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter { - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - unsafe { - // Drop all remaining elements - self.iter.drop_elements(); - - // Free the table - if let Some((ptr, layout)) = self.allocation { - self.alloc.deallocate(ptr, layout); - } - } - } -} -#[cfg(not(feature = "nightly"))] -impl Drop for RawIntoIter { - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - unsafe { - // Drop all remaining elements - self.iter.drop_elements(); - - // Free the table - if let Some((ptr, layout)) = self.allocation { - self.alloc.deallocate(ptr, layout); - } - } - } -} - -impl Iterator for RawIntoIter { - type Item = T; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option { - unsafe { Some(self.iter.next()?.read()) } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl ExactSizeIterator for RawIntoIter {} -impl FusedIterator for RawIntoIter {} - -/// Iterator which consumes elements without freeing the table storage. -pub struct RawDrain<'a, T, A: Allocator + Clone = Global> { - iter: RawIter, - - // The table is moved into the iterator for the duration of the drain. This - // ensures that an empty table is left if the drain iterator is leaked - // without dropping. - table: ManuallyDrop>, - orig_table: NonNull>, - - // We don't use a &'a mut RawTable because we want RawDrain to be - // covariant over T. - marker: PhantomData<&'a RawTable>, -} - -impl RawDrain<'_, T, A> { - #[cfg_attr(feature = "inline-more", inline)] - pub fn iter(&self) -> RawIter { - self.iter.clone() - } -} - -unsafe impl Send for RawDrain<'_, T, A> -where - T: Send, - A: Send, -{ -} -unsafe impl Sync for RawDrain<'_, T, A> -where - T: Sync, - A: Sync, -{ -} - -impl Drop for RawDrain<'_, T, A> { - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - unsafe { - // Drop all remaining elements. Note that this may panic. - self.iter.drop_elements(); - - // Reset the contents of the table now that all elements have been - // dropped. - self.table.clear_no_drop(); - - // Move the now empty table back to its original location. - self.orig_table - .as_ptr() - .copy_from_nonoverlapping(&*self.table, 1); - } - } -} - -impl Iterator for RawDrain<'_, T, A> { - type Item = T; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option { - unsafe { - let item = self.iter.next()?; - Some(item.read()) - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl ExactSizeIterator for RawDrain<'_, T, A> {} -impl FusedIterator for RawDrain<'_, T, A> {} - -/// Iterator over occupied buckets that could match a given hash. -/// -/// `RawTable` only stores 7 bits of the hash value, so this iterator may return -/// items that have a hash value different than the one provided. You should -/// always validate the returned values before using them. -pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> { - inner: RawIterHashInner<'a, A>, - _marker: PhantomData, -} - -struct RawIterHashInner<'a, A: Allocator + Clone> { - table: &'a RawTableInner, - - // The top 7 bits of the hash. - h2_hash: u8, - - // The sequence of groups to probe in the search. - probe_seq: ProbeSeq, - - group: Group, - - // The elements within the group with a matching h2-hash. - bitmask: BitMaskIter, -} - -impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> { - #[cfg_attr(feature = "inline-more", inline)] - #[cfg(feature = "raw")] - fn new(table: &'a RawTable, hash: u64) -> Self { - RawIterHash { - inner: RawIterHashInner::new(&table.table, hash), - _marker: PhantomData, - } - } -} -impl<'a, A: Allocator + Clone> RawIterHashInner<'a, A> { - #[cfg_attr(feature = "inline-more", inline)] - #[cfg(feature = "raw")] - fn new(table: &'a RawTableInner, hash: u64) -> Self { - unsafe { - let h2_hash = h2(hash); - let probe_seq = table.probe_seq(hash); - let group = Group::load(table.ctrl(probe_seq.pos)); - let bitmask = group.match_byte(h2_hash).into_iter(); - - RawIterHashInner { - table, - h2_hash, - probe_seq, - group, - bitmask, - } - } - } -} - -impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> { - type Item = Bucket; - - fn next(&mut self) -> Option> { - unsafe { - match self.inner.next() { - Some(index) => Some(self.inner.table.bucket(index)), - None => None, - } - } - } -} - -impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> { - type Item = usize; - - fn next(&mut self) -> Option { - unsafe { - loop { - if let Some(bit) = self.bitmask.next() { - let index = (self.probe_seq.pos + bit) & self.table.bucket_mask; - return Some(index); - } - if likely(self.group.match_empty().any_bit_set()) { - return None; - } - self.probe_seq.move_next(self.table.bucket_mask); - self.group = Group::load(self.table.ctrl(self.probe_seq.pos)); - self.bitmask = self.group.match_byte(self.h2_hash).into_iter(); - } - } - } -} - -#[cfg(test)] -mod test_map { - use super::*; - - fn rehash_in_place(table: &mut RawTable, hasher: impl Fn(&T) -> u64) { - unsafe { - table.table.rehash_in_place( - &|table, index| hasher(table.bucket::(index).as_ref()), - mem::size_of::(), - if mem::needs_drop::() { - Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) - } else { - None - }, - ); - } - } - - #[test] - fn rehash() { - let mut table = RawTable::new(); - let hasher = |i: &u64| *i; - for i in 0..100 { - table.insert(i, i, hasher); - } - - for i in 0..100 { - unsafe { - assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i)); - } - assert!(table.find(i + 100, |x| *x == i + 100).is_none()); - } - - rehash_in_place(&mut table, hasher); - - for i in 0..100 { - unsafe { - assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i)); - } - assert!(table.find(i + 100, |x| *x == i + 100).is_none()); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/raw/sse2.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/raw/sse2.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/raw/sse2.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/raw/sse2.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,146 +0,0 @@ -use super::bitmask::BitMask; -use super::EMPTY; -use core::mem; - -#[cfg(target_arch = "x86")] -use core::arch::x86; -#[cfg(target_arch = "x86_64")] -use core::arch::x86_64 as x86; - -pub type BitMaskWord = u16; -pub const BITMASK_STRIDE: usize = 1; -pub const BITMASK_MASK: BitMaskWord = 0xffff; - -/// Abstraction over a group of control bytes which can be scanned in -/// parallel. -/// -/// This implementation uses a 128-bit SSE value. -#[derive(Copy, Clone)] -pub struct Group(x86::__m128i); - -// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859 -#[allow(clippy::use_self)] -impl Group { - /// Number of bytes in the group. - pub const WIDTH: usize = mem::size_of::(); - - /// Returns a full group of empty bytes, suitable for use as the initial - /// value for an empty hash table. - /// - /// This is guaranteed to be aligned to the group size. - #[inline] - #[allow(clippy::items_after_statements)] - pub const fn static_empty() -> &'static [u8; Group::WIDTH] { - #[repr(C)] - struct AlignedBytes { - _align: [Group; 0], - bytes: [u8; Group::WIDTH], - } - const ALIGNED_BYTES: AlignedBytes = AlignedBytes { - _align: [], - bytes: [EMPTY; Group::WIDTH], - }; - &ALIGNED_BYTES.bytes - } - - /// Loads a group of bytes starting at the given address. - #[inline] - #[allow(clippy::cast_ptr_alignment)] // unaligned load - pub unsafe fn load(ptr: *const u8) -> Self { - Group(x86::_mm_loadu_si128(ptr.cast())) - } - - /// Loads a group of bytes starting at the given address, which must be - /// aligned to `mem::align_of::()`. - #[inline] - #[allow(clippy::cast_ptr_alignment)] - pub unsafe fn load_aligned(ptr: *const u8) -> Self { - // FIXME: use align_offset once it stabilizes - debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); - Group(x86::_mm_load_si128(ptr.cast())) - } - - /// Stores the group of bytes to the given address, which must be - /// aligned to `mem::align_of::()`. - #[inline] - #[allow(clippy::cast_ptr_alignment)] - pub unsafe fn store_aligned(self, ptr: *mut u8) { - // FIXME: use align_offset once it stabilizes - debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); - x86::_mm_store_si128(ptr.cast(), self.0); - } - - /// Returns a `BitMask` indicating all bytes in the group which have - /// the given value. - #[inline] - pub fn match_byte(self, byte: u8) -> BitMask { - #[allow( - clippy::cast_possible_wrap, // byte: u8 as i8 - // byte: i32 as u16 - // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the - // upper 16-bits of the i32 are zeroed: - clippy::cast_sign_loss, - clippy::cast_possible_truncation - )] - unsafe { - let cmp = x86::_mm_cmpeq_epi8(self.0, x86::_mm_set1_epi8(byte as i8)); - BitMask(x86::_mm_movemask_epi8(cmp) as u16) - } - } - - /// Returns a `BitMask` indicating all bytes in the group which are - /// `EMPTY`. - #[inline] - pub fn match_empty(self) -> BitMask { - self.match_byte(EMPTY) - } - - /// Returns a `BitMask` indicating all bytes in the group which are - /// `EMPTY` or `DELETED`. - #[inline] - pub fn match_empty_or_deleted(self) -> BitMask { - #[allow( - // byte: i32 as u16 - // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the - // upper 16-bits of the i32 are zeroed: - clippy::cast_sign_loss, - clippy::cast_possible_truncation - )] - unsafe { - // A byte is EMPTY or DELETED iff the high bit is set - BitMask(x86::_mm_movemask_epi8(self.0) as u16) - } - } - - /// Returns a `BitMask` indicating all bytes in the group which are full. - #[inline] - pub fn match_full(&self) -> BitMask { - self.match_empty_or_deleted().invert() - } - - /// Performs the following transformation on all bytes in the group: - /// - `EMPTY => EMPTY` - /// - `DELETED => EMPTY` - /// - `FULL => DELETED` - #[inline] - pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self { - // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 - // and high_bit = 0 (FULL) to 1000_0000 - // - // Here's this logic expanded to concrete values: - // let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false) - // 1111_1111 | 1000_0000 = 1111_1111 - // 0000_0000 | 1000_0000 = 1000_0000 - #[allow( - clippy::cast_possible_wrap, // byte: 0x80_u8 as i8 - )] - unsafe { - let zero = x86::_mm_setzero_si128(); - let special = x86::_mm_cmpgt_epi8(zero, self.0); - Group(x86::_mm_or_si128( - special, - x86::_mm_set1_epi8(0x80_u8 as i8), - )) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/rustc_entry.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/rustc_entry.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/rustc_entry.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/rustc_entry.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,630 +0,0 @@ -use self::RustcEntry::*; -use crate::map::{make_insert_hash, Drain, HashMap, IntoIter, Iter, IterMut}; -use crate::raw::{Allocator, Bucket, Global, RawTable}; -use core::fmt::{self, Debug}; -use core::hash::{BuildHasher, Hash}; -use core::mem; - -impl HashMap -where - K: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - /// Gets the given key's corresponding entry in the map for in-place manipulation. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut letters = HashMap::new(); - /// - /// for ch in "a short treatise on fungi".chars() { - /// let counter = letters.rustc_entry(ch).or_insert(0); - /// *counter += 1; - /// } - /// - /// assert_eq!(letters[&'s'], 2); - /// assert_eq!(letters[&'t'], 3); - /// assert_eq!(letters[&'u'], 1); - /// assert_eq!(letters.get(&'y'), None); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn rustc_entry(&mut self, key: K) -> RustcEntry<'_, K, V, A> { - let hash = make_insert_hash(&self.hash_builder, &key); - if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) { - RustcEntry::Occupied(RustcOccupiedEntry { - key: Some(key), - elem, - table: &mut self.table, - }) - } else { - // Ideally we would put this in VacantEntry::insert, but Entry is not - // generic over the BuildHasher and adding a generic parameter would be - // a breaking change. - self.reserve(1); - - RustcEntry::Vacant(RustcVacantEntry { - hash, - key, - table: &mut self.table, - }) - } - } -} - -/// A view into a single entry in a map, which may either be vacant or occupied. -/// -/// This `enum` is constructed from the [`rustc_entry`] method on [`HashMap`]. -/// -/// [`HashMap`]: struct.HashMap.html -/// [`rustc_entry`]: struct.HashMap.html#method.rustc_entry -pub enum RustcEntry<'a, K, V, A = Global> -where - A: Allocator + Clone, -{ - /// An occupied entry. - Occupied(RustcOccupiedEntry<'a, K, V, A>), - - /// A vacant entry. - Vacant(RustcVacantEntry<'a, K, V, A>), -} - -impl Debug for RustcEntry<'_, K, V, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), - Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), - } - } -} - -/// A view into an occupied entry in a `HashMap`. -/// It is part of the [`RustcEntry`] enum. -/// -/// [`RustcEntry`]: enum.RustcEntry.html -pub struct RustcOccupiedEntry<'a, K, V, A = Global> -where - A: Allocator + Clone, -{ - key: Option, - elem: Bucket<(K, V)>, - table: &'a mut RawTable<(K, V), A>, -} - -unsafe impl Send for RustcOccupiedEntry<'_, K, V, A> -where - K: Send, - V: Send, - A: Allocator + Clone + Send, -{ -} -unsafe impl Sync for RustcOccupiedEntry<'_, K, V, A> -where - K: Sync, - V: Sync, - A: Allocator + Clone + Sync, -{ -} - -impl Debug for RustcOccupiedEntry<'_, K, V, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("OccupiedEntry") - .field("key", self.key()) - .field("value", self.get()) - .finish() - } -} - -/// A view into a vacant entry in a `HashMap`. -/// It is part of the [`RustcEntry`] enum. -/// -/// [`RustcEntry`]: enum.RustcEntry.html -pub struct RustcVacantEntry<'a, K, V, A = Global> -where - A: Allocator + Clone, -{ - hash: u64, - key: K, - table: &'a mut RawTable<(K, V), A>, -} - -impl Debug for RustcVacantEntry<'_, K, V, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("VacantEntry").field(self.key()).finish() - } -} - -impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> { - /// Sets the value of the entry, and returns a RustcOccupiedEntry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// let entry = map.rustc_entry("horseyland").insert(37); - /// - /// assert_eq!(entry.key(), &"horseyland"); - /// ``` - pub fn insert(self, value: V) -> RustcOccupiedEntry<'a, K, V, A> { - match self { - Vacant(entry) => entry.insert_entry(value), - Occupied(mut entry) => { - entry.insert(value); - entry - } - } - } - - /// Ensures a value is in the entry by inserting the default if empty, and returns - /// a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// map.rustc_entry("poneyland").or_insert(3); - /// assert_eq!(map["poneyland"], 3); - /// - /// *map.rustc_entry("poneyland").or_insert(10) *= 2; - /// assert_eq!(map["poneyland"], 6); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_insert(self, default: V) -> &'a mut V - where - K: Hash, - { - match self { - Occupied(entry) => entry.into_mut(), - Vacant(entry) => entry.insert(default), - } - } - - /// Ensures a value is in the entry by inserting the result of the default function if empty, - /// and returns a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, String> = HashMap::new(); - /// let s = "hoho".to_string(); - /// - /// map.rustc_entry("poneyland").or_insert_with(|| s); - /// - /// assert_eq!(map["poneyland"], "hoho".to_string()); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_insert_with V>(self, default: F) -> &'a mut V - where - K: Hash, - { - match self { - Occupied(entry) => entry.into_mut(), - Vacant(entry) => entry.insert(default()), - } - } - - /// Returns a reference to this entry's key. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &K { - match *self { - Occupied(ref entry) => entry.key(), - Vacant(ref entry) => entry.key(), - } - } - - /// Provides in-place mutable access to an occupied entry before any - /// potential inserts into the map. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// map.rustc_entry("poneyland") - /// .and_modify(|e| { *e += 1 }) - /// .or_insert(42); - /// assert_eq!(map["poneyland"], 42); - /// - /// map.rustc_entry("poneyland") - /// .and_modify(|e| { *e += 1 }) - /// .or_insert(42); - /// assert_eq!(map["poneyland"], 43); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn and_modify(self, f: F) -> Self - where - F: FnOnce(&mut V), - { - match self { - Occupied(mut entry) => { - f(entry.get_mut()); - Occupied(entry) - } - Vacant(entry) => Vacant(entry), - } - } -} - -impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> { - /// Ensures a value is in the entry by inserting the default value if empty, - /// and returns a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// # fn main() { - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, Option> = HashMap::new(); - /// map.rustc_entry("poneyland").or_default(); - /// - /// assert_eq!(map["poneyland"], None); - /// # } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_default(self) -> &'a mut V - where - K: Hash, - { - match self { - Occupied(entry) => entry.into_mut(), - Vacant(entry) => entry.insert(Default::default()), - } - } -} - -impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> { - /// Gets a reference to the key in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.rustc_entry("poneyland").or_insert(12); - /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &K { - unsafe { &self.elem.as_ref().0 } - } - - /// Take the ownership of the key and value from the map. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::RustcEntry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.rustc_entry("poneyland").or_insert(12); - /// - /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { - /// // We delete the entry from the map. - /// o.remove_entry(); - /// } - /// - /// assert_eq!(map.contains_key("poneyland"), false); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.remove(self.elem) } - } - - /// Gets a reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::RustcEntry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.rustc_entry("poneyland").or_insert(12); - /// - /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { - /// assert_eq!(o.get(), &12); - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get(&self) -> &V { - unsafe { &self.elem.as_ref().1 } - } - - /// Gets a mutable reference to the value in the entry. - /// - /// If you need a reference to the `RustcOccupiedEntry` which may outlive the - /// destruction of the `RustcEntry` value, see [`into_mut`]. - /// - /// [`into_mut`]: #method.into_mut - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::RustcEntry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.rustc_entry("poneyland").or_insert(12); - /// - /// assert_eq!(map["poneyland"], 12); - /// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") { - /// *o.get_mut() += 10; - /// assert_eq!(*o.get(), 22); - /// - /// // We can use the same RustcEntry multiple times. - /// *o.get_mut() += 2; - /// } - /// - /// assert_eq!(map["poneyland"], 24); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get_mut(&mut self) -> &mut V { - unsafe { &mut self.elem.as_mut().1 } - } - - /// Converts the RustcOccupiedEntry into a mutable reference to the value in the entry - /// with a lifetime bound to the map itself. - /// - /// If you need multiple references to the `RustcOccupiedEntry`, see [`get_mut`]. - /// - /// [`get_mut`]: #method.get_mut - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::RustcEntry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.rustc_entry("poneyland").or_insert(12); - /// - /// assert_eq!(map["poneyland"], 12); - /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { - /// *o.into_mut() += 10; - /// } - /// - /// assert_eq!(map["poneyland"], 22); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn into_mut(self) -> &'a mut V { - unsafe { &mut self.elem.as_mut().1 } - } - - /// Sets the value of the entry, and returns the entry's old value. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::RustcEntry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.rustc_entry("poneyland").or_insert(12); - /// - /// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") { - /// assert_eq!(o.insert(15), 12); - /// } - /// - /// assert_eq!(map["poneyland"], 15); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(&mut self, value: V) -> V { - mem::replace(self.get_mut(), value) - } - - /// Takes the value out of the entry, and returns it. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::RustcEntry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// map.rustc_entry("poneyland").or_insert(12); - /// - /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { - /// assert_eq!(o.remove(), 12); - /// } - /// - /// assert_eq!(map.contains_key("poneyland"), false); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove(self) -> V { - self.remove_entry().1 - } - - /// Replaces the entry, returning the old key and value. The new key in the hash map will be - /// the key used to create this entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{RustcEntry, HashMap}; - /// use std::rc::Rc; - /// - /// let mut map: HashMap, u32> = HashMap::new(); - /// map.insert(Rc::new("Stringthing".to_string()), 15); - /// - /// let my_key = Rc::new("Stringthing".to_string()); - /// - /// if let RustcEntry::Occupied(entry) = map.rustc_entry(my_key) { - /// // Also replace the key with a handle to our other key. - /// let (old_key, old_value): (Rc, u32) = entry.replace_entry(16); - /// } - /// - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn replace_entry(self, value: V) -> (K, V) { - let entry = unsafe { self.elem.as_mut() }; - - let old_key = mem::replace(&mut entry.0, self.key.unwrap()); - let old_value = mem::replace(&mut entry.1, value); - - (old_key, old_value) - } - - /// Replaces the key in the hash map with the key used to create this entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_map::{RustcEntry, HashMap}; - /// use std::rc::Rc; - /// - /// let mut map: HashMap, u32> = HashMap::new(); - /// let mut known_strings: Vec> = Vec::new(); - /// - /// // Initialise known strings, run program, etc. - /// - /// reclaim_memory(&mut map, &known_strings); - /// - /// fn reclaim_memory(map: &mut HashMap, u32>, known_strings: &[Rc] ) { - /// for s in known_strings { - /// if let RustcEntry::Occupied(entry) = map.rustc_entry(s.clone()) { - /// // Replaces the entry's key with our version of it in `known_strings`. - /// entry.replace_key(); - /// } - /// } - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn replace_key(self) -> K { - let entry = unsafe { self.elem.as_mut() }; - mem::replace(&mut entry.0, self.key.unwrap()) - } -} - -impl<'a, K, V, A: Allocator + Clone> RustcVacantEntry<'a, K, V, A> { - /// Gets a reference to the key that would be used when inserting a value - /// through the `RustcVacantEntry`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &K { - &self.key - } - - /// Take ownership of the key. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::RustcEntry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") { - /// v.into_key(); - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn into_key(self) -> K { - self.key - } - - /// Sets the value of the entry with the RustcVacantEntry's key, - /// and returns a mutable reference to it. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::RustcEntry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// if let RustcEntry::Vacant(o) = map.rustc_entry("poneyland") { - /// o.insert(37); - /// } - /// assert_eq!(map["poneyland"], 37); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(self, value: V) -> &'a mut V { - unsafe { - let bucket = self.table.insert_no_grow(self.hash, (self.key, value)); - &mut bucket.as_mut().1 - } - } - - /// Sets the value of the entry with the RustcVacantEntry's key, - /// and returns a RustcOccupiedEntry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashMap; - /// use hashbrown::hash_map::RustcEntry; - /// - /// let mut map: HashMap<&str, u32> = HashMap::new(); - /// - /// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") { - /// let o = v.insert_entry(37); - /// assert_eq!(o.get(), &37); - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert_entry(self, value: V) -> RustcOccupiedEntry<'a, K, V, A> { - let bucket = unsafe { self.table.insert_no_grow(self.hash, (self.key, value)) }; - RustcOccupiedEntry { - key: None, - elem: bucket, - table: self.table, - } - } -} - -impl IterMut<'_, K, V> { - /// Returns a iterator of references over the remaining items. - #[cfg_attr(feature = "inline-more", inline)] - pub fn rustc_iter(&self) -> Iter<'_, K, V> { - self.iter() - } -} - -impl IntoIter { - /// Returns a iterator of references over the remaining items. - #[cfg_attr(feature = "inline-more", inline)] - pub fn rustc_iter(&self) -> Iter<'_, K, V> { - self.iter() - } -} - -impl Drain<'_, K, V> { - /// Returns a iterator of references over the remaining items. - #[cfg_attr(feature = "inline-more", inline)] - pub fn rustc_iter(&self) -> Iter<'_, K, V> { - self.iter() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/scopeguard.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/scopeguard.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/scopeguard.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/scopeguard.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,74 +0,0 @@ -// Extracted from the scopeguard crate -use core::{ - mem, - ops::{Deref, DerefMut}, - ptr, -}; - -pub struct ScopeGuard -where - F: FnMut(&mut T), -{ - dropfn: F, - value: T, -} - -#[inline] -pub fn guard(value: T, dropfn: F) -> ScopeGuard -where - F: FnMut(&mut T), -{ - ScopeGuard { dropfn, value } -} - -impl ScopeGuard -where - F: FnMut(&mut T), -{ - #[inline] - pub fn into_inner(guard: Self) -> T { - // Cannot move out of Drop-implementing types, so - // ptr::read the value and forget the guard. - unsafe { - let value = ptr::read(&guard.value); - // read the closure so that it is dropped, and assign it to a local - // variable to ensure that it is only dropped after the guard has - // been forgotten. (In case the Drop impl of the closure, or that - // of any consumed captured variable, panics). - let _dropfn = ptr::read(&guard.dropfn); - mem::forget(guard); - value - } - } -} - -impl Deref for ScopeGuard -where - F: FnMut(&mut T), -{ - type Target = T; - #[inline] - fn deref(&self) -> &T { - &self.value - } -} - -impl DerefMut for ScopeGuard -where - F: FnMut(&mut T), -{ - #[inline] - fn deref_mut(&mut self) -> &mut T { - &mut self.value - } -} - -impl Drop for ScopeGuard -where - F: FnMut(&mut T), -{ - #[inline] - fn drop(&mut self) { - (self.dropfn)(&mut self.value); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/set.rs s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/set.rs --- s390-tools-2.31.0/rust-vendor/hashbrown-0.12.3/src/set.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hashbrown-0.12.3/src/set.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2790 +0,0 @@ -use crate::TryReserveError; -use alloc::borrow::ToOwned; -use core::borrow::Borrow; -use core::fmt; -use core::hash::{BuildHasher, Hash}; -use core::iter::{Chain, FromIterator, FusedIterator}; -use core::mem; -use core::ops::{BitAnd, BitOr, BitXor, Sub}; - -use super::map::{self, ConsumeAllOnDrop, DefaultHashBuilder, DrainFilterInner, HashMap, Keys}; -use crate::raw::{Allocator, Global}; - -// Future Optimization (FIXME!) -// ============================= -// -// Iteration over zero sized values is a noop. There is no need -// for `bucket.val` in the case of HashSet. I suppose we would need HKT -// to get rid of it properly. - -/// A hash set implemented as a `HashMap` where the value is `()`. -/// -/// As with the [`HashMap`] type, a `HashSet` requires that the elements -/// implement the [`Eq`] and [`Hash`] traits. This can frequently be achieved by -/// using `#[derive(PartialEq, Eq, Hash)]`. If you implement these yourself, -/// it is important that the following property holds: -/// -/// ```text -/// k1 == k2 -> hash(k1) == hash(k2) -/// ``` -/// -/// In other words, if two keys are equal, their hashes must be equal. -/// -/// -/// It is a logic error for an item to be modified in such a way that the -/// item's hash, as determined by the [`Hash`] trait, or its equality, as -/// determined by the [`Eq`] trait, changes while it is in the set. This is -/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or -/// unsafe code. -/// -/// It is also a logic error for the [`Hash`] implementation of a key to panic. -/// This is generally only possible if the trait is implemented manually. If a -/// panic does occur then the contents of the `HashSet` may become corrupted and -/// some items may be dropped from the table. -/// -/// # Examples -/// -/// ``` -/// use hashbrown::HashSet; -/// // Type inference lets us omit an explicit type signature (which -/// // would be `HashSet` in this example). -/// let mut books = HashSet::new(); -/// -/// // Add some books. -/// books.insert("A Dance With Dragons".to_string()); -/// books.insert("To Kill a Mockingbird".to_string()); -/// books.insert("The Odyssey".to_string()); -/// books.insert("The Great Gatsby".to_string()); -/// -/// // Check for a specific one. -/// if !books.contains("The Winds of Winter") { -/// println!("We have {} books, but The Winds of Winter ain't one.", -/// books.len()); -/// } -/// -/// // Remove a book. -/// books.remove("The Odyssey"); -/// -/// // Iterate over everything. -/// for book in &books { -/// println!("{}", book); -/// } -/// ``` -/// -/// The easiest way to use `HashSet` with a custom type is to derive -/// [`Eq`] and [`Hash`]. We must also derive [`PartialEq`]. This will in the -/// future be implied by [`Eq`]. -/// -/// ``` -/// use hashbrown::HashSet; -/// #[derive(Hash, Eq, PartialEq, Debug)] -/// struct Viking { -/// name: String, -/// power: usize, -/// } -/// -/// let mut vikings = HashSet::new(); -/// -/// vikings.insert(Viking { name: "Einar".to_string(), power: 9 }); -/// vikings.insert(Viking { name: "Einar".to_string(), power: 9 }); -/// vikings.insert(Viking { name: "Olaf".to_string(), power: 4 }); -/// vikings.insert(Viking { name: "Harald".to_string(), power: 8 }); -/// -/// // Use derived implementation to print the vikings. -/// for x in &vikings { -/// println!("{:?}", x); -/// } -/// ``` -/// -/// A `HashSet` with fixed list of elements can be initialized from an array: -/// -/// ``` -/// use hashbrown::HashSet; -/// -/// let viking_names: HashSet<&'static str> = -/// [ "Einar", "Olaf", "Harald" ].iter().cloned().collect(); -/// // use the values stored in the set -/// ``` -/// -/// [`Cell`]: https://doc.rust-lang.org/std/cell/struct.Cell.html -/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html -/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html -/// [`HashMap`]: struct.HashMap.html -/// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html -/// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html -pub struct HashSet { - pub(crate) map: HashMap, -} - -impl Clone for HashSet { - fn clone(&self) -> Self { - HashSet { - map: self.map.clone(), - } - } - - fn clone_from(&mut self, source: &Self) { - self.map.clone_from(&source.map); - } -} - -#[cfg(feature = "ahash")] -impl HashSet { - /// Creates an empty `HashSet`. - /// - /// The hash set is initially created with a capacity of 0, so it will not allocate until it - /// is first inserted into. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let set: HashSet = HashSet::new(); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn new() -> Self { - Self { - map: HashMap::new(), - } - } - - /// Creates an empty `HashSet` with the specified capacity. - /// - /// The hash set will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the hash set will not allocate. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let set: HashSet = HashSet::with_capacity(10); - /// assert!(set.capacity() >= 10); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn with_capacity(capacity: usize) -> Self { - Self { - map: HashMap::with_capacity(capacity), - } - } -} - -#[cfg(feature = "ahash")] -impl HashSet { - /// Creates an empty `HashSet`. - /// - /// The hash set is initially created with a capacity of 0, so it will not allocate until it - /// is first inserted into. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let set: HashSet = HashSet::new(); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn new_in(alloc: A) -> Self { - Self { - map: HashMap::new_in(alloc), - } - } - - /// Creates an empty `HashSet` with the specified capacity. - /// - /// The hash set will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the hash set will not allocate. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let set: HashSet = HashSet::with_capacity(10); - /// assert!(set.capacity() >= 10); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Self { - map: HashMap::with_capacity_in(capacity, alloc), - } - } -} - -impl HashSet { - /// Returns the number of elements the set can hold without reallocating. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let set: HashSet = HashSet::with_capacity(100); - /// assert!(set.capacity() >= 100); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn capacity(&self) -> usize { - self.map.capacity() - } - - /// An iterator visiting all elements in arbitrary order. - /// The iterator element type is `&'a T`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let mut set = HashSet::new(); - /// set.insert("a"); - /// set.insert("b"); - /// - /// // Will print in an arbitrary order. - /// for x in set.iter() { - /// println!("{}", x); - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn iter(&self) -> Iter<'_, T> { - Iter { - iter: self.map.keys(), - } - } - - /// Returns the number of elements in the set. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut v = HashSet::new(); - /// assert_eq!(v.len(), 0); - /// v.insert(1); - /// assert_eq!(v.len(), 1); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn len(&self) -> usize { - self.map.len() - } - - /// Returns `true` if the set contains no elements. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut v = HashSet::new(); - /// assert!(v.is_empty()); - /// v.insert(1); - /// assert!(!v.is_empty()); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn is_empty(&self) -> bool { - self.map.is_empty() - } - - /// Clears the set, returning all elements in an iterator. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); - /// assert!(!set.is_empty()); - /// - /// // print 1, 2, 3 in an arbitrary order - /// for i in set.drain() { - /// println!("{}", i); - /// } - /// - /// assert!(set.is_empty()); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn drain(&mut self) -> Drain<'_, T, A> { - Drain { - iter: self.map.drain(), - } - } - - /// Retains only the elements specified by the predicate. - /// - /// In other words, remove all elements `e` such that `f(&e)` returns `false`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let xs = [1,2,3,4,5,6]; - /// let mut set: HashSet = xs.iter().cloned().collect(); - /// set.retain(|&k| k % 2 == 0); - /// assert_eq!(set.len(), 3); - /// ``` - pub fn retain(&mut self, mut f: F) - where - F: FnMut(&T) -> bool, - { - self.map.retain(|k, _| f(k)); - } - - /// Drains elements which are true under the given predicate, - /// and returns an iterator over the removed items. - /// - /// In other words, move all elements `e` such that `f(&e)` returns `true` out - /// into another iterator. - /// - /// When the returned DrainedFilter is dropped, any remaining elements that satisfy - /// the predicate are dropped from the set. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set: HashSet = (0..8).collect(); - /// let drained: HashSet = set.drain_filter(|v| v % 2 == 0).collect(); - /// - /// let mut evens = drained.into_iter().collect::>(); - /// let mut odds = set.into_iter().collect::>(); - /// evens.sort(); - /// odds.sort(); - /// - /// assert_eq!(evens, vec![0, 2, 4, 6]); - /// assert_eq!(odds, vec![1, 3, 5, 7]); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn drain_filter(&mut self, f: F) -> DrainFilter<'_, T, F, A> - where - F: FnMut(&T) -> bool, - { - DrainFilter { - f, - inner: DrainFilterInner { - iter: unsafe { self.map.table.iter() }, - table: &mut self.map.table, - }, - } - } - - /// Clears the set, removing all values. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut v = HashSet::new(); - /// v.insert(1); - /// v.clear(); - /// assert!(v.is_empty()); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn clear(&mut self) { - self.map.clear(); - } -} - -impl HashSet { - /// Creates a new empty hash set which will use the given hasher to hash - /// keys. - /// - /// The hash set is also created with the default initial capacity. - /// - /// Warning: `hasher` is normally randomly generated, and - /// is designed to allow `HashSet`s to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. - /// - /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the HashMap to be useful, see its documentation for details. - /// - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// use hashbrown::hash_map::DefaultHashBuilder; - /// - /// let s = DefaultHashBuilder::default(); - /// let mut set = HashSet::with_hasher(s); - /// set.insert(2); - /// ``` - /// - /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html - #[cfg_attr(feature = "inline-more", inline)] - pub const fn with_hasher(hasher: S) -> Self { - Self { - map: HashMap::with_hasher(hasher), - } - } - - /// Creates an empty `HashSet` with the specified capacity, using - /// `hasher` to hash the keys. - /// - /// The hash set will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the hash set will not allocate. - /// - /// Warning: `hasher` is normally randomly generated, and - /// is designed to allow `HashSet`s to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. - /// - /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the HashMap to be useful, see its documentation for details. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// use hashbrown::hash_map::DefaultHashBuilder; - /// - /// let s = DefaultHashBuilder::default(); - /// let mut set = HashSet::with_capacity_and_hasher(10, s); - /// set.insert(1); - /// ``` - /// - /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html - #[cfg_attr(feature = "inline-more", inline)] - pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self { - Self { - map: HashMap::with_capacity_and_hasher(capacity, hasher), - } - } -} - -impl HashSet -where - A: Allocator + Clone, -{ - /// Returns a reference to the underlying allocator. - #[inline] - pub fn allocator(&self) -> &A { - self.map.allocator() - } - - /// Creates a new empty hash set which will use the given hasher to hash - /// keys. - /// - /// The hash set is also created with the default initial capacity. - /// - /// Warning: `hasher` is normally randomly generated, and - /// is designed to allow `HashSet`s to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// use hashbrown::hash_map::DefaultHashBuilder; - /// - /// let s = DefaultHashBuilder::default(); - /// let mut set = HashSet::with_hasher(s); - /// set.insert(2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn with_hasher_in(hasher: S, alloc: A) -> Self { - Self { - map: HashMap::with_hasher_in(hasher, alloc), - } - } - - /// Creates an empty `HashSet` with the specified capacity, using - /// `hasher` to hash the keys. - /// - /// The hash set will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the hash set will not allocate. - /// - /// Warning: `hasher` is normally randomly generated, and - /// is designed to allow `HashSet`s to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// use hashbrown::hash_map::DefaultHashBuilder; - /// - /// let s = DefaultHashBuilder::default(); - /// let mut set = HashSet::with_capacity_and_hasher(10, s); - /// set.insert(1); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn with_capacity_and_hasher_in(capacity: usize, hasher: S, alloc: A) -> Self { - Self { - map: HashMap::with_capacity_and_hasher_in(capacity, hasher, alloc), - } - } - - /// Returns a reference to the set's [`BuildHasher`]. - /// - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// use hashbrown::hash_map::DefaultHashBuilder; - /// - /// let hasher = DefaultHashBuilder::default(); - /// let set: HashSet = HashSet::with_hasher(hasher); - /// let hasher: &DefaultHashBuilder = set.hasher(); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn hasher(&self) -> &S { - self.map.hasher() - } -} - -impl HashSet -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - /// Reserves capacity for at least `additional` more elements to be inserted - /// in the `HashSet`. The collection may reserve more space to avoid - /// frequent reallocations. - /// - /// # Panics - /// - /// Panics if the new allocation size overflows `usize`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let mut set: HashSet = HashSet::new(); - /// set.reserve(10); - /// assert!(set.capacity() >= 10); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn reserve(&mut self, additional: usize) { - self.map.reserve(additional); - } - - /// Tries to reserve capacity for at least `additional` more elements to be inserted - /// in the given `HashSet`. The collection may reserve more space to avoid - /// frequent reallocations. - /// - /// # Errors - /// - /// If the capacity overflows, or the allocator reports a failure, then an error - /// is returned. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let mut set: HashSet = HashSet::new(); - /// set.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.map.try_reserve(additional) - } - - /// Shrinks the capacity of the set as much as possible. It will drop - /// down as much as possible while maintaining the internal rules - /// and possibly leaving some space in accordance with the resize policy. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set = HashSet::with_capacity(100); - /// set.insert(1); - /// set.insert(2); - /// assert!(set.capacity() >= 100); - /// set.shrink_to_fit(); - /// assert!(set.capacity() >= 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn shrink_to_fit(&mut self) { - self.map.shrink_to_fit(); - } - - /// Shrinks the capacity of the set with a lower limit. It will drop - /// down no lower than the supplied limit while maintaining the internal rules - /// and possibly leaving some space in accordance with the resize policy. - /// - /// Panics if the current capacity is smaller than the supplied - /// minimum capacity. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set = HashSet::with_capacity(100); - /// set.insert(1); - /// set.insert(2); - /// assert!(set.capacity() >= 100); - /// set.shrink_to(10); - /// assert!(set.capacity() >= 10); - /// set.shrink_to(0); - /// assert!(set.capacity() >= 2); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn shrink_to(&mut self, min_capacity: usize) { - self.map.shrink_to(min_capacity); - } - - /// Visits the values representing the difference, - /// i.e., the values that are in `self` but not in `other`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); - /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); - /// - /// // Can be seen as `a - b`. - /// for x in a.difference(&b) { - /// println!("{}", x); // Print 1 - /// } - /// - /// let diff: HashSet<_> = a.difference(&b).collect(); - /// assert_eq!(diff, [1].iter().collect()); - /// - /// // Note that difference is not symmetric, - /// // and `b - a` means something else: - /// let diff: HashSet<_> = b.difference(&a).collect(); - /// assert_eq!(diff, [4].iter().collect()); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn difference<'a>(&'a self, other: &'a Self) -> Difference<'a, T, S, A> { - Difference { - iter: self.iter(), - other, - } - } - - /// Visits the values representing the symmetric difference, - /// i.e., the values that are in `self` or in `other` but not in both. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); - /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); - /// - /// // Print 1, 4 in arbitrary order. - /// for x in a.symmetric_difference(&b) { - /// println!("{}", x); - /// } - /// - /// let diff1: HashSet<_> = a.symmetric_difference(&b).collect(); - /// let diff2: HashSet<_> = b.symmetric_difference(&a).collect(); - /// - /// assert_eq!(diff1, diff2); - /// assert_eq!(diff1, [1, 4].iter().collect()); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn symmetric_difference<'a>(&'a self, other: &'a Self) -> SymmetricDifference<'a, T, S, A> { - SymmetricDifference { - iter: self.difference(other).chain(other.difference(self)), - } - } - - /// Visits the values representing the intersection, - /// i.e., the values that are both in `self` and `other`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); - /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); - /// - /// // Print 2, 3 in arbitrary order. - /// for x in a.intersection(&b) { - /// println!("{}", x); - /// } - /// - /// let intersection: HashSet<_> = a.intersection(&b).collect(); - /// assert_eq!(intersection, [2, 3].iter().collect()); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn intersection<'a>(&'a self, other: &'a Self) -> Intersection<'a, T, S, A> { - let (smaller, larger) = if self.len() <= other.len() { - (self, other) - } else { - (other, self) - }; - Intersection { - iter: smaller.iter(), - other: larger, - } - } - - /// Visits the values representing the union, - /// i.e., all the values in `self` or `other`, without duplicates. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); - /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); - /// - /// // Print 1, 2, 3, 4 in arbitrary order. - /// for x in a.union(&b) { - /// println!("{}", x); - /// } - /// - /// let union: HashSet<_> = a.union(&b).collect(); - /// assert_eq!(union, [1, 2, 3, 4].iter().collect()); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn union<'a>(&'a self, other: &'a Self) -> Union<'a, T, S, A> { - // We'll iterate one set in full, and only the remaining difference from the other. - // Use the smaller set for the difference in order to reduce hash lookups. - let (smaller, larger) = if self.len() <= other.len() { - (self, other) - } else { - (other, self) - }; - Union { - iter: larger.iter().chain(smaller.difference(larger)), - } - } - - /// Returns `true` if the set contains a value. - /// - /// The value may be any borrowed form of the set's value type, but - /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for - /// the value type. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); - /// assert_eq!(set.contains(&1), true); - /// assert_eq!(set.contains(&4), false); - /// ``` - /// - /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html - /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html - #[cfg_attr(feature = "inline-more", inline)] - pub fn contains(&self, value: &Q) -> bool - where - T: Borrow, - Q: Hash + Eq, - { - self.map.contains_key(value) - } - - /// Returns a reference to the value in the set, if any, that is equal to the given value. - /// - /// The value may be any borrowed form of the set's value type, but - /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for - /// the value type. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); - /// assert_eq!(set.get(&2), Some(&2)); - /// assert_eq!(set.get(&4), None); - /// ``` - /// - /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html - /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html - #[cfg_attr(feature = "inline-more", inline)] - pub fn get(&self, value: &Q) -> Option<&T> - where - T: Borrow, - Q: Hash + Eq, - { - // Avoid `Option::map` because it bloats LLVM IR. - match self.map.get_key_value(value) { - Some((k, _)) => Some(k), - None => None, - } - } - - /// Inserts the given `value` into the set if it is not present, then - /// returns a reference to the value in the set. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); - /// assert_eq!(set.len(), 3); - /// assert_eq!(set.get_or_insert(2), &2); - /// assert_eq!(set.get_or_insert(100), &100); - /// assert_eq!(set.len(), 4); // 100 was inserted - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get_or_insert(&mut self, value: T) -> &T { - // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with - // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`. - self.map - .raw_entry_mut() - .from_key(&value) - .or_insert(value, ()) - .0 - } - - /// Inserts an owned copy of the given `value` into the set if it is not - /// present, then returns a reference to the value in the set. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set: HashSet = ["cat", "dog", "horse"] - /// .iter().map(|&pet| pet.to_owned()).collect(); - /// - /// assert_eq!(set.len(), 3); - /// for &pet in &["cat", "dog", "fish"] { - /// let value = set.get_or_insert_owned(pet); - /// assert_eq!(value, pet); - /// } - /// assert_eq!(set.len(), 4); // a new "fish" was inserted - /// ``` - #[inline] - pub fn get_or_insert_owned(&mut self, value: &Q) -> &T - where - T: Borrow, - Q: Hash + Eq + ToOwned, - { - // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with - // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`. - self.map - .raw_entry_mut() - .from_key(value) - .or_insert_with(|| (value.to_owned(), ())) - .0 - } - - /// Inserts a value computed from `f` into the set if the given `value` is - /// not present, then returns a reference to the value in the set. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set: HashSet = ["cat", "dog", "horse"] - /// .iter().map(|&pet| pet.to_owned()).collect(); - /// - /// assert_eq!(set.len(), 3); - /// for &pet in &["cat", "dog", "fish"] { - /// let value = set.get_or_insert_with(pet, str::to_owned); - /// assert_eq!(value, pet); - /// } - /// assert_eq!(set.len(), 4); // a new "fish" was inserted - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get_or_insert_with(&mut self, value: &Q, f: F) -> &T - where - T: Borrow, - Q: Hash + Eq, - F: FnOnce(&Q) -> T, - { - // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with - // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`. - self.map - .raw_entry_mut() - .from_key(value) - .or_insert_with(|| (f(value), ())) - .0 - } - - /// Gets the given value's corresponding entry in the set for in-place manipulation. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// use hashbrown::hash_set::Entry::*; - /// - /// let mut singles = HashSet::new(); - /// let mut dupes = HashSet::new(); - /// - /// for ch in "a short treatise on fungi".chars() { - /// if let Vacant(dupe_entry) = dupes.entry(ch) { - /// // We haven't already seen a duplicate, so - /// // check if we've at least seen it once. - /// match singles.entry(ch) { - /// Vacant(single_entry) => { - /// // We found a new character for the first time. - /// single_entry.insert() - /// } - /// Occupied(single_entry) => { - /// // We've already seen this once, "move" it to dupes. - /// single_entry.remove(); - /// dupe_entry.insert(); - /// } - /// } - /// } - /// } - /// - /// assert!(!singles.contains(&'t') && dupes.contains(&'t')); - /// assert!(singles.contains(&'u') && !dupes.contains(&'u')); - /// assert!(!singles.contains(&'v') && !dupes.contains(&'v')); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn entry(&mut self, value: T) -> Entry<'_, T, S, A> { - match self.map.entry(value) { - map::Entry::Occupied(entry) => Entry::Occupied(OccupiedEntry { inner: entry }), - map::Entry::Vacant(entry) => Entry::Vacant(VacantEntry { inner: entry }), - } - } - - /// Returns `true` if `self` has no elements in common with `other`. - /// This is equivalent to checking for an empty intersection. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); - /// let mut b = HashSet::new(); - /// - /// assert_eq!(a.is_disjoint(&b), true); - /// b.insert(4); - /// assert_eq!(a.is_disjoint(&b), true); - /// b.insert(1); - /// assert_eq!(a.is_disjoint(&b), false); - /// ``` - pub fn is_disjoint(&self, other: &Self) -> bool { - self.iter().all(|v| !other.contains(v)) - } - - /// Returns `true` if the set is a subset of another, - /// i.e., `other` contains at least all the values in `self`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let sup: HashSet<_> = [1, 2, 3].iter().cloned().collect(); - /// let mut set = HashSet::new(); - /// - /// assert_eq!(set.is_subset(&sup), true); - /// set.insert(2); - /// assert_eq!(set.is_subset(&sup), true); - /// set.insert(4); - /// assert_eq!(set.is_subset(&sup), false); - /// ``` - pub fn is_subset(&self, other: &Self) -> bool { - self.len() <= other.len() && self.iter().all(|v| other.contains(v)) - } - - /// Returns `true` if the set is a superset of another, - /// i.e., `self` contains at least all the values in `other`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let sub: HashSet<_> = [1, 2].iter().cloned().collect(); - /// let mut set = HashSet::new(); - /// - /// assert_eq!(set.is_superset(&sub), false); - /// - /// set.insert(0); - /// set.insert(1); - /// assert_eq!(set.is_superset(&sub), false); - /// - /// set.insert(2); - /// assert_eq!(set.is_superset(&sub), true); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn is_superset(&self, other: &Self) -> bool { - other.is_subset(self) - } - - /// Adds a value to the set. - /// - /// If the set did not have this value present, `true` is returned. - /// - /// If the set did have this value present, `false` is returned. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set = HashSet::new(); - /// - /// assert_eq!(set.insert(2), true); - /// assert_eq!(set.insert(2), false); - /// assert_eq!(set.len(), 1); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(&mut self, value: T) -> bool { - self.map.insert(value, ()).is_none() - } - - /// Insert a value the set without checking if the value already exists in the set. - /// - /// Returns a reference to the value just inserted. - /// - /// This operation is safe if a value does not exist in the set. - /// - /// However, if a value exists in the set already, the behavior is unspecified: - /// this operation may panic, loop forever, or any following operation with the set - /// may panic, loop forever or return arbitrary result. - /// - /// That said, this operation (and following operations) are guaranteed to - /// not violate memory safety. - /// - /// This operation is faster than regular insert, because it does not perform - /// lookup before insertion. - /// - /// This operation is useful during initial population of the set. - /// For example, when constructing a set from another set, we know - /// that values are unique. - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert_unique_unchecked(&mut self, value: T) -> &T { - self.map.insert_unique_unchecked(value, ()).0 - } - - /// Adds a value to the set, replacing the existing value, if any, that is equal to the given - /// one. Returns the replaced value. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set = HashSet::new(); - /// set.insert(Vec::::new()); - /// - /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 0); - /// set.replace(Vec::with_capacity(10)); - /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 10); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn replace(&mut self, value: T) -> Option { - match self.map.entry(value) { - map::Entry::Occupied(occupied) => Some(occupied.replace_key()), - map::Entry::Vacant(vacant) => { - vacant.insert(()); - None - } - } - } - - /// Removes a value from the set. Returns whether the value was - /// present in the set. - /// - /// The value may be any borrowed form of the set's value type, but - /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for - /// the value type. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set = HashSet::new(); - /// - /// set.insert(2); - /// assert_eq!(set.remove(&2), true); - /// assert_eq!(set.remove(&2), false); - /// ``` - /// - /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html - /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove(&mut self, value: &Q) -> bool - where - T: Borrow, - Q: Hash + Eq, - { - self.map.remove(value).is_some() - } - - /// Removes and returns the value in the set, if any, that is equal to the given one. - /// - /// The value may be any borrowed form of the set's value type, but - /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for - /// the value type. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); - /// assert_eq!(set.take(&2), Some(2)); - /// assert_eq!(set.take(&2), None); - /// ``` - /// - /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html - /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html - #[cfg_attr(feature = "inline-more", inline)] - pub fn take(&mut self, value: &Q) -> Option - where - T: Borrow, - Q: Hash + Eq, - { - // Avoid `Option::map` because it bloats LLVM IR. - match self.map.remove_entry(value) { - Some((k, _)) => Some(k), - None => None, - } - } -} - -impl PartialEq for HashSet -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - fn eq(&self, other: &Self) -> bool { - if self.len() != other.len() { - return false; - } - - self.iter().all(|key| other.contains(key)) - } -} - -impl Eq for HashSet -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ -} - -impl fmt::Debug for HashSet -where - T: fmt::Debug, - A: Allocator + Clone, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_set().entries(self.iter()).finish() - } -} - -impl From> for HashSet -where - A: Allocator + Clone, -{ - fn from(map: HashMap) -> Self { - Self { map } - } -} - -impl FromIterator for HashSet -where - T: Eq + Hash, - S: BuildHasher + Default, - A: Default + Allocator + Clone, -{ - #[cfg_attr(feature = "inline-more", inline)] - fn from_iter>(iter: I) -> Self { - let mut set = Self::with_hasher_in(Default::default(), Default::default()); - set.extend(iter); - set - } -} - -// The default hasher is used to match the std implementation signature -#[cfg(feature = "ahash")] -impl From<[T; N]> for HashSet -where - T: Eq + Hash, - A: Default + Allocator + Clone, -{ - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let set1 = HashSet::from([1, 2, 3, 4]); - /// let set2: HashSet<_> = [1, 2, 3, 4].into(); - /// assert_eq!(set1, set2); - /// ``` - fn from(arr: [T; N]) -> Self { - arr.into_iter().collect() - } -} - -impl Extend for HashSet -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - #[cfg_attr(feature = "inline-more", inline)] - fn extend>(&mut self, iter: I) { - self.map.extend(iter.into_iter().map(|k| (k, ()))); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_one(&mut self, k: T) { - self.map.insert(k, ()); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_reserve(&mut self, additional: usize) { - Extend::<(T, ())>::extend_reserve(&mut self.map, additional); - } -} - -impl<'a, T, S, A> Extend<&'a T> for HashSet -where - T: 'a + Eq + Hash + Copy, - S: BuildHasher, - A: Allocator + Clone, -{ - #[cfg_attr(feature = "inline-more", inline)] - fn extend>(&mut self, iter: I) { - self.extend(iter.into_iter().copied()); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_one(&mut self, k: &'a T) { - self.map.insert(*k, ()); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_reserve(&mut self, additional: usize) { - Extend::<(T, ())>::extend_reserve(&mut self.map, additional); - } -} - -impl Default for HashSet -where - S: Default, - A: Default + Allocator + Clone, -{ - /// Creates an empty `HashSet` with the `Default` value for the hasher. - #[cfg_attr(feature = "inline-more", inline)] - fn default() -> Self { - Self { - map: HashMap::default(), - } - } -} - -impl BitOr<&HashSet> for &HashSet -where - T: Eq + Hash + Clone, - S: BuildHasher + Default, - A: Allocator + Clone, -{ - type Output = HashSet; - - /// Returns the union of `self` and `rhs` as a new `HashSet`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); - /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); - /// - /// let set = &a | &b; - /// - /// let mut i = 0; - /// let expected = [1, 2, 3, 4, 5]; - /// for x in &set { - /// assert!(expected.contains(x)); - /// i += 1; - /// } - /// assert_eq!(i, expected.len()); - /// ``` - fn bitor(self, rhs: &HashSet) -> HashSet { - self.union(rhs).cloned().collect() - } -} - -impl BitAnd<&HashSet> for &HashSet -where - T: Eq + Hash + Clone, - S: BuildHasher + Default, - A: Allocator + Clone, -{ - type Output = HashSet; - - /// Returns the intersection of `self` and `rhs` as a new `HashSet`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); - /// let b: HashSet<_> = vec![2, 3, 4].into_iter().collect(); - /// - /// let set = &a & &b; - /// - /// let mut i = 0; - /// let expected = [2, 3]; - /// for x in &set { - /// assert!(expected.contains(x)); - /// i += 1; - /// } - /// assert_eq!(i, expected.len()); - /// ``` - fn bitand(self, rhs: &HashSet) -> HashSet { - self.intersection(rhs).cloned().collect() - } -} - -impl BitXor<&HashSet> for &HashSet -where - T: Eq + Hash + Clone, - S: BuildHasher + Default, -{ - type Output = HashSet; - - /// Returns the symmetric difference of `self` and `rhs` as a new `HashSet`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); - /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); - /// - /// let set = &a ^ &b; - /// - /// let mut i = 0; - /// let expected = [1, 2, 4, 5]; - /// for x in &set { - /// assert!(expected.contains(x)); - /// i += 1; - /// } - /// assert_eq!(i, expected.len()); - /// ``` - fn bitxor(self, rhs: &HashSet) -> HashSet { - self.symmetric_difference(rhs).cloned().collect() - } -} - -impl Sub<&HashSet> for &HashSet -where - T: Eq + Hash + Clone, - S: BuildHasher + Default, -{ - type Output = HashSet; - - /// Returns the difference of `self` and `rhs` as a new `HashSet`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); - /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); - /// - /// let set = &a - &b; - /// - /// let mut i = 0; - /// let expected = [1, 2]; - /// for x in &set { - /// assert!(expected.contains(x)); - /// i += 1; - /// } - /// assert_eq!(i, expected.len()); - /// ``` - fn sub(self, rhs: &HashSet) -> HashSet { - self.difference(rhs).cloned().collect() - } -} - -/// An iterator over the items of a `HashSet`. -/// -/// This `struct` is created by the [`iter`] method on [`HashSet`]. -/// See its documentation for more. -/// -/// [`HashSet`]: struct.HashSet.html -/// [`iter`]: struct.HashSet.html#method.iter -pub struct Iter<'a, K> { - iter: Keys<'a, K, ()>, -} - -/// An owning iterator over the items of a `HashSet`. -/// -/// This `struct` is created by the [`into_iter`] method on [`HashSet`] -/// (provided by the `IntoIterator` trait). See its documentation for more. -/// -/// [`HashSet`]: struct.HashSet.html -/// [`into_iter`]: struct.HashSet.html#method.into_iter -pub struct IntoIter { - iter: map::IntoIter, -} - -/// A draining iterator over the items of a `HashSet`. -/// -/// This `struct` is created by the [`drain`] method on [`HashSet`]. -/// See its documentation for more. -/// -/// [`HashSet`]: struct.HashSet.html -/// [`drain`]: struct.HashSet.html#method.drain -pub struct Drain<'a, K, A: Allocator + Clone = Global> { - iter: map::Drain<'a, K, (), A>, -} - -/// A draining iterator over entries of a `HashSet` which don't satisfy the predicate `f`. -/// -/// This `struct` is created by the [`drain_filter`] method on [`HashSet`]. See its -/// documentation for more. -/// -/// [`drain_filter`]: struct.HashSet.html#method.drain_filter -/// [`HashSet`]: struct.HashSet.html -pub struct DrainFilter<'a, K, F, A: Allocator + Clone = Global> -where - F: FnMut(&K) -> bool, -{ - f: F, - inner: DrainFilterInner<'a, K, (), A>, -} - -/// A lazy iterator producing elements in the intersection of `HashSet`s. -/// -/// This `struct` is created by the [`intersection`] method on [`HashSet`]. -/// See its documentation for more. -/// -/// [`HashSet`]: struct.HashSet.html -/// [`intersection`]: struct.HashSet.html#method.intersection -pub struct Intersection<'a, T, S, A: Allocator + Clone = Global> { - // iterator of the first set - iter: Iter<'a, T>, - // the second set - other: &'a HashSet, -} - -/// A lazy iterator producing elements in the difference of `HashSet`s. -/// -/// This `struct` is created by the [`difference`] method on [`HashSet`]. -/// See its documentation for more. -/// -/// [`HashSet`]: struct.HashSet.html -/// [`difference`]: struct.HashSet.html#method.difference -pub struct Difference<'a, T, S, A: Allocator + Clone = Global> { - // iterator of the first set - iter: Iter<'a, T>, - // the second set - other: &'a HashSet, -} - -/// A lazy iterator producing elements in the symmetric difference of `HashSet`s. -/// -/// This `struct` is created by the [`symmetric_difference`] method on -/// [`HashSet`]. See its documentation for more. -/// -/// [`HashSet`]: struct.HashSet.html -/// [`symmetric_difference`]: struct.HashSet.html#method.symmetric_difference -pub struct SymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { - iter: Chain, Difference<'a, T, S, A>>, -} - -/// A lazy iterator producing elements in the union of `HashSet`s. -/// -/// This `struct` is created by the [`union`] method on [`HashSet`]. -/// See its documentation for more. -/// -/// [`HashSet`]: struct.HashSet.html -/// [`union`]: struct.HashSet.html#method.union -pub struct Union<'a, T, S, A: Allocator + Clone = Global> { - iter: Chain, Difference<'a, T, S, A>>, -} - -impl<'a, T, S, A: Allocator + Clone> IntoIterator for &'a HashSet { - type Item = &'a T; - type IntoIter = Iter<'a, T>; - - #[cfg_attr(feature = "inline-more", inline)] - fn into_iter(self) -> Iter<'a, T> { - self.iter() - } -} - -impl IntoIterator for HashSet { - type Item = T; - type IntoIter = IntoIter; - - /// Creates a consuming iterator, that is, one that moves each value out - /// of the set in arbitrary order. The set cannot be used after calling - /// this. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// let mut set = HashSet::new(); - /// set.insert("a".to_string()); - /// set.insert("b".to_string()); - /// - /// // Not possible to collect to a Vec with a regular `.iter()`. - /// let v: Vec = set.into_iter().collect(); - /// - /// // Will print in an arbitrary order. - /// for x in &v { - /// println!("{}", x); - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - fn into_iter(self) -> IntoIter { - IntoIter { - iter: self.map.into_iter(), - } - } -} - -impl Clone for Iter<'_, K> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Iter { - iter: self.iter.clone(), - } - } -} -impl<'a, K> Iterator for Iter<'a, K> { - type Item = &'a K; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<&'a K> { - self.iter.next() - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} -impl<'a, K> ExactSizeIterator for Iter<'a, K> { - #[cfg_attr(feature = "inline-more", inline)] - fn len(&self) -> usize { - self.iter.len() - } -} -impl FusedIterator for Iter<'_, K> {} - -impl fmt::Debug for Iter<'_, K> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -impl Iterator for IntoIter { - type Item = K; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option { - // Avoid `Option::map` because it bloats LLVM IR. - match self.iter.next() { - Some((k, _)) => Some(k), - None => None, - } - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} -impl ExactSizeIterator for IntoIter { - #[cfg_attr(feature = "inline-more", inline)] - fn len(&self) -> usize { - self.iter.len() - } -} -impl FusedIterator for IntoIter {} - -impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let entries_iter = self.iter.iter().map(|(k, _)| k); - f.debug_list().entries(entries_iter).finish() - } -} - -impl Iterator for Drain<'_, K, A> { - type Item = K; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option { - // Avoid `Option::map` because it bloats LLVM IR. - match self.iter.next() { - Some((k, _)) => Some(k), - None => None, - } - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} -impl ExactSizeIterator for Drain<'_, K, A> { - #[cfg_attr(feature = "inline-more", inline)] - fn len(&self) -> usize { - self.iter.len() - } -} -impl FusedIterator for Drain<'_, K, A> {} - -impl fmt::Debug for Drain<'_, K, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let entries_iter = self.iter.iter().map(|(k, _)| k); - f.debug_list().entries(entries_iter).finish() - } -} - -impl<'a, K, F, A: Allocator + Clone> Drop for DrainFilter<'a, K, F, A> -where - F: FnMut(&K) -> bool, -{ - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - while let Some(item) = self.next() { - let guard = ConsumeAllOnDrop(self); - drop(item); - mem::forget(guard); - } - } -} - -impl Iterator for DrainFilter<'_, K, F, A> -where - F: FnMut(&K) -> bool, -{ - type Item = K; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option { - let f = &mut self.f; - let (k, _) = self.inner.next(&mut |k, _| f(k))?; - Some(k) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - (0, self.inner.iter.size_hint().1) - } -} - -impl FusedIterator for DrainFilter<'_, K, F, A> where - F: FnMut(&K) -> bool -{ -} - -impl Clone for Intersection<'_, T, S, A> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Intersection { - iter: self.iter.clone(), - ..*self - } - } -} - -impl<'a, T, S, A> Iterator for Intersection<'a, T, S, A> -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - type Item = &'a T; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<&'a T> { - loop { - let elt = self.iter.next()?; - if self.other.contains(elt) { - return Some(elt); - } - } - } - - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) - } -} - -impl fmt::Debug for Intersection<'_, T, S, A> -where - T: fmt::Debug + Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -impl FusedIterator for Intersection<'_, T, S, A> -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ -} - -impl Clone for Difference<'_, T, S, A> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Difference { - iter: self.iter.clone(), - ..*self - } - } -} - -impl<'a, T, S, A> Iterator for Difference<'a, T, S, A> -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - type Item = &'a T; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<&'a T> { - loop { - let elt = self.iter.next()?; - if !self.other.contains(elt) { - return Some(elt); - } - } - } - - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) - } -} - -impl FusedIterator for Difference<'_, T, S, A> -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ -} - -impl fmt::Debug for Difference<'_, T, S, A> -where - T: fmt::Debug + Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -impl Clone for SymmetricDifference<'_, T, S, A> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - SymmetricDifference { - iter: self.iter.clone(), - } - } -} - -impl<'a, T, S, A> Iterator for SymmetricDifference<'a, T, S, A> -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - type Item = &'a T; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<&'a T> { - self.iter.next() - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl FusedIterator for SymmetricDifference<'_, T, S, A> -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ -} - -impl fmt::Debug for SymmetricDifference<'_, T, S, A> -where - T: fmt::Debug + Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -impl Clone for Union<'_, T, S, A> { - #[cfg_attr(feature = "inline-more", inline)] - fn clone(&self) -> Self { - Union { - iter: self.iter.clone(), - } - } -} - -impl FusedIterator for Union<'_, T, S, A> -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ -} - -impl fmt::Debug for Union<'_, T, S, A> -where - T: fmt::Debug + Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -impl<'a, T, S, A> Iterator for Union<'a, T, S, A> -where - T: Eq + Hash, - S: BuildHasher, - A: Allocator + Clone, -{ - type Item = &'a T; - - #[cfg_attr(feature = "inline-more", inline)] - fn next(&mut self) -> Option<&'a T> { - self.iter.next() - } - #[cfg_attr(feature = "inline-more", inline)] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -/// A view into a single entry in a set, which may either be vacant or occupied. -/// -/// This `enum` is constructed from the [`entry`] method on [`HashSet`]. -/// -/// [`HashSet`]: struct.HashSet.html -/// [`entry`]: struct.HashSet.html#method.entry -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_set::{Entry, HashSet, OccupiedEntry}; -/// -/// let mut set = HashSet::new(); -/// set.extend(["a", "b", "c"]); -/// assert_eq!(set.len(), 3); -/// -/// // Existing value (insert) -/// let entry: Entry<_, _> = set.entry("a"); -/// let _raw_o: OccupiedEntry<_, _> = entry.insert(); -/// assert_eq!(set.len(), 3); -/// // Nonexistent value (insert) -/// set.entry("d").insert(); -/// -/// // Existing value (or_insert) -/// set.entry("b").or_insert(); -/// // Nonexistent value (or_insert) -/// set.entry("e").or_insert(); -/// -/// println!("Our HashSet: {:?}", set); -/// -/// let mut vec: Vec<_> = set.iter().copied().collect(); -/// // The `Iter` iterator produces items in arbitrary order, so the -/// // items must be sorted to test them against a sorted array. -/// vec.sort_unstable(); -/// assert_eq!(vec, ["a", "b", "c", "d", "e"]); -/// ``` -pub enum Entry<'a, T, S, A = Global> -where - A: Allocator + Clone, -{ - /// An occupied entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_set::{Entry, HashSet}; - /// let mut set: HashSet<_> = ["a", "b"].into(); - /// - /// match set.entry("a") { - /// Entry::Vacant(_) => unreachable!(), - /// Entry::Occupied(_) => { } - /// } - /// ``` - Occupied(OccupiedEntry<'a, T, S, A>), - - /// A vacant entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_set::{Entry, HashSet}; - /// let mut set: HashSet<&str> = HashSet::new(); - /// - /// match set.entry("a") { - /// Entry::Occupied(_) => unreachable!(), - /// Entry::Vacant(_) => { } - /// } - /// ``` - Vacant(VacantEntry<'a, T, S, A>), -} - -impl fmt::Debug for Entry<'_, T, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), - Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), - } - } -} - -/// A view into an occupied entry in a `HashSet`. -/// It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_set::{Entry, HashSet, OccupiedEntry}; -/// -/// let mut set = HashSet::new(); -/// set.extend(["a", "b", "c"]); -/// -/// let _entry_o: OccupiedEntry<_, _> = set.entry("a").insert(); -/// assert_eq!(set.len(), 3); -/// -/// // Existing key -/// match set.entry("a") { -/// Entry::Vacant(_) => unreachable!(), -/// Entry::Occupied(view) => { -/// assert_eq!(view.get(), &"a"); -/// } -/// } -/// -/// assert_eq!(set.len(), 3); -/// -/// // Existing key (take) -/// match set.entry("c") { -/// Entry::Vacant(_) => unreachable!(), -/// Entry::Occupied(view) => { -/// assert_eq!(view.remove(), "c"); -/// } -/// } -/// assert_eq!(set.get(&"c"), None); -/// assert_eq!(set.len(), 2); -/// ``` -pub struct OccupiedEntry<'a, T, S, A: Allocator + Clone = Global> { - inner: map::OccupiedEntry<'a, T, (), S, A>, -} - -impl fmt::Debug for OccupiedEntry<'_, T, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("OccupiedEntry") - .field("value", self.get()) - .finish() - } -} - -/// A view into a vacant entry in a `HashSet`. -/// It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -/// -/// # Examples -/// -/// ``` -/// use hashbrown::hash_set::{Entry, HashSet, VacantEntry}; -/// -/// let mut set = HashSet::<&str>::new(); -/// -/// let entry_v: VacantEntry<_, _> = match set.entry("a") { -/// Entry::Vacant(view) => view, -/// Entry::Occupied(_) => unreachable!(), -/// }; -/// entry_v.insert(); -/// assert!(set.contains("a") && set.len() == 1); -/// -/// // Nonexistent key (insert) -/// match set.entry("b") { -/// Entry::Vacant(view) => view.insert(), -/// Entry::Occupied(_) => unreachable!(), -/// } -/// assert!(set.contains("b") && set.len() == 2); -/// ``` -pub struct VacantEntry<'a, T, S, A: Allocator + Clone = Global> { - inner: map::VacantEntry<'a, T, (), S, A>, -} - -impl fmt::Debug for VacantEntry<'_, T, S, A> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("VacantEntry").field(self.get()).finish() - } -} - -impl<'a, T, S, A: Allocator + Clone> Entry<'a, T, S, A> { - /// Sets the value of the entry, and returns an OccupiedEntry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set: HashSet<&str> = HashSet::new(); - /// let entry = set.entry("horseyland").insert(); - /// - /// assert_eq!(entry.get(), &"horseyland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(self) -> OccupiedEntry<'a, T, S, A> - where - T: Hash, - S: BuildHasher, - { - match self { - Entry::Occupied(entry) => entry, - Entry::Vacant(entry) => entry.insert_entry(), - } - } - - /// Ensures a value is in the entry by inserting if it was vacant. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set: HashSet<&str> = HashSet::new(); - /// - /// // nonexistent key - /// set.entry("poneyland").or_insert(); - /// assert!(set.contains("poneyland")); - /// - /// // existing key - /// set.entry("poneyland").or_insert(); - /// assert!(set.contains("poneyland")); - /// assert_eq!(set.len(), 1); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn or_insert(self) - where - T: Hash, - S: BuildHasher, - { - if let Entry::Vacant(entry) = self { - entry.insert(); - } - } - - /// Returns a reference to this entry's value. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set: HashSet<&str> = HashSet::new(); - /// set.entry("poneyland").or_insert(); - /// // existing key - /// assert_eq!(set.entry("poneyland").get(), &"poneyland"); - /// // nonexistent key - /// assert_eq!(set.entry("horseland").get(), &"horseland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get(&self) -> &T { - match *self { - Entry::Occupied(ref entry) => entry.get(), - Entry::Vacant(ref entry) => entry.get(), - } - } -} - -impl OccupiedEntry<'_, T, S, A> { - /// Gets a reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_set::{Entry, HashSet}; - /// - /// let mut set: HashSet<&str> = HashSet::new(); - /// set.entry("poneyland").or_insert(); - /// - /// match set.entry("poneyland") { - /// Entry::Vacant(_) => panic!(), - /// Entry::Occupied(entry) => assert_eq!(entry.get(), &"poneyland"), - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get(&self) -> &T { - self.inner.key() - } - - /// Takes the value out of the entry, and returns it. - /// Keeps the allocated memory for reuse. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// use hashbrown::hash_set::Entry; - /// - /// let mut set: HashSet<&str> = HashSet::new(); - /// // The set is empty - /// assert!(set.is_empty() && set.capacity() == 0); - /// - /// set.entry("poneyland").or_insert(); - /// let capacity_before_remove = set.capacity(); - /// - /// if let Entry::Occupied(o) = set.entry("poneyland") { - /// assert_eq!(o.remove(), "poneyland"); - /// } - /// - /// assert_eq!(set.contains("poneyland"), false); - /// // Now set hold none elements but capacity is equal to the old one - /// assert!(set.len() == 0 && set.capacity() == capacity_before_remove); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn remove(self) -> T { - self.inner.remove_entry().0 - } - - /// Replaces the entry, returning the old value. The new value in the hash map will be - /// the value used to create this entry. - /// - /// # Panics - /// - /// Will panic if this OccupiedEntry was created through [`Entry::insert`]. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_set::{Entry, HashSet}; - /// use std::rc::Rc; - /// - /// let mut set: HashSet> = HashSet::new(); - /// let key_one = Rc::new("Stringthing".to_string()); - /// let key_two = Rc::new("Stringthing".to_string()); - /// - /// set.insert(key_one.clone()); - /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); - /// - /// match set.entry(key_two.clone()) { - /// Entry::Occupied(entry) => { - /// let old_key: Rc = entry.replace(); - /// assert!(Rc::ptr_eq(&key_one, &old_key)); - /// } - /// Entry::Vacant(_) => panic!(), - /// } - /// - /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); - /// assert!(set.contains(&"Stringthing".to_owned())); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn replace(self) -> T { - self.inner.replace_key() - } -} - -impl<'a, T, S, A: Allocator + Clone> VacantEntry<'a, T, S, A> { - /// Gets a reference to the value that would be used when inserting - /// through the `VacantEntry`. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// - /// let mut set: HashSet<&str> = HashSet::new(); - /// assert_eq!(set.entry("poneyland").get(), &"poneyland"); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn get(&self) -> &T { - self.inner.key() - } - - /// Take ownership of the value. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::hash_set::{Entry, HashSet}; - /// - /// let mut set: HashSet<&str> = HashSet::new(); - /// - /// match set.entry("poneyland") { - /// Entry::Occupied(_) => panic!(), - /// Entry::Vacant(v) => assert_eq!(v.into_value(), "poneyland"), - /// } - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn into_value(self) -> T { - self.inner.into_key() - } - - /// Sets the value of the entry with the VacantEntry's value. - /// - /// # Examples - /// - /// ``` - /// use hashbrown::HashSet; - /// use hashbrown::hash_set::Entry; - /// - /// let mut set: HashSet<&str> = HashSet::new(); - /// - /// if let Entry::Vacant(o) = set.entry("poneyland") { - /// o.insert(); - /// } - /// assert!(set.contains("poneyland")); - /// ``` - #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(self) - where - T: Hash, - S: BuildHasher, - { - self.inner.insert(()); - } - - #[cfg_attr(feature = "inline-more", inline)] - fn insert_entry(self) -> OccupiedEntry<'a, T, S, A> - where - T: Hash, - S: BuildHasher, - { - OccupiedEntry { - inner: self.inner.insert_entry(()), - } - } -} - -#[allow(dead_code)] -fn assert_covariance() { - fn set<'new>(v: HashSet<&'static str>) -> HashSet<&'new str> { - v - } - fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> { - v - } - fn into_iter<'new, A: Allocator + Clone>( - v: IntoIter<&'static str, A>, - ) -> IntoIter<&'new str, A> { - v - } - fn difference<'a, 'new, A: Allocator + Clone>( - v: Difference<'a, &'static str, DefaultHashBuilder, A>, - ) -> Difference<'a, &'new str, DefaultHashBuilder, A> { - v - } - fn symmetric_difference<'a, 'new, A: Allocator + Clone>( - v: SymmetricDifference<'a, &'static str, DefaultHashBuilder, A>, - ) -> SymmetricDifference<'a, &'new str, DefaultHashBuilder, A> { - v - } - fn intersection<'a, 'new, A: Allocator + Clone>( - v: Intersection<'a, &'static str, DefaultHashBuilder, A>, - ) -> Intersection<'a, &'new str, DefaultHashBuilder, A> { - v - } - fn union<'a, 'new, A: Allocator + Clone>( - v: Union<'a, &'static str, DefaultHashBuilder, A>, - ) -> Union<'a, &'new str, DefaultHashBuilder, A> { - v - } - fn drain<'new, A: Allocator + Clone>( - d: Drain<'static, &'static str, A>, - ) -> Drain<'new, &'new str, A> { - d - } -} - -#[cfg(test)] -mod test_set { - use super::super::map::DefaultHashBuilder; - use super::HashSet; - use std::vec::Vec; - - #[test] - fn test_zero_capacities() { - type HS = HashSet; - - let s = HS::new(); - assert_eq!(s.capacity(), 0); - - let s = HS::default(); - assert_eq!(s.capacity(), 0); - - let s = HS::with_hasher(DefaultHashBuilder::default()); - assert_eq!(s.capacity(), 0); - - let s = HS::with_capacity(0); - assert_eq!(s.capacity(), 0); - - let s = HS::with_capacity_and_hasher(0, DefaultHashBuilder::default()); - assert_eq!(s.capacity(), 0); - - let mut s = HS::new(); - s.insert(1); - s.insert(2); - s.remove(&1); - s.remove(&2); - s.shrink_to_fit(); - assert_eq!(s.capacity(), 0); - - let mut s = HS::new(); - s.reserve(0); - assert_eq!(s.capacity(), 0); - } - - #[test] - fn test_disjoint() { - let mut xs = HashSet::new(); - let mut ys = HashSet::new(); - assert!(xs.is_disjoint(&ys)); - assert!(ys.is_disjoint(&xs)); - assert!(xs.insert(5)); - assert!(ys.insert(11)); - assert!(xs.is_disjoint(&ys)); - assert!(ys.is_disjoint(&xs)); - assert!(xs.insert(7)); - assert!(xs.insert(19)); - assert!(xs.insert(4)); - assert!(ys.insert(2)); - assert!(ys.insert(-11)); - assert!(xs.is_disjoint(&ys)); - assert!(ys.is_disjoint(&xs)); - assert!(ys.insert(7)); - assert!(!xs.is_disjoint(&ys)); - assert!(!ys.is_disjoint(&xs)); - } - - #[test] - fn test_subset_and_superset() { - let mut a = HashSet::new(); - assert!(a.insert(0)); - assert!(a.insert(5)); - assert!(a.insert(11)); - assert!(a.insert(7)); - - let mut b = HashSet::new(); - assert!(b.insert(0)); - assert!(b.insert(7)); - assert!(b.insert(19)); - assert!(b.insert(250)); - assert!(b.insert(11)); - assert!(b.insert(200)); - - assert!(!a.is_subset(&b)); - assert!(!a.is_superset(&b)); - assert!(!b.is_subset(&a)); - assert!(!b.is_superset(&a)); - - assert!(b.insert(5)); - - assert!(a.is_subset(&b)); - assert!(!a.is_superset(&b)); - assert!(!b.is_subset(&a)); - assert!(b.is_superset(&a)); - } - - #[test] - fn test_iterate() { - let mut a = HashSet::new(); - for i in 0..32 { - assert!(a.insert(i)); - } - let mut observed: u32 = 0; - for k in &a { - observed |= 1 << *k; - } - assert_eq!(observed, 0xFFFF_FFFF); - } - - #[test] - fn test_intersection() { - let mut a = HashSet::new(); - let mut b = HashSet::new(); - - assert!(a.insert(11)); - assert!(a.insert(1)); - assert!(a.insert(3)); - assert!(a.insert(77)); - assert!(a.insert(103)); - assert!(a.insert(5)); - assert!(a.insert(-5)); - - assert!(b.insert(2)); - assert!(b.insert(11)); - assert!(b.insert(77)); - assert!(b.insert(-9)); - assert!(b.insert(-42)); - assert!(b.insert(5)); - assert!(b.insert(3)); - - let mut i = 0; - let expected = [3, 5, 11, 77]; - for x in a.intersection(&b) { - assert!(expected.contains(x)); - i += 1; - } - assert_eq!(i, expected.len()); - } - - #[test] - fn test_difference() { - let mut a = HashSet::new(); - let mut b = HashSet::new(); - - assert!(a.insert(1)); - assert!(a.insert(3)); - assert!(a.insert(5)); - assert!(a.insert(9)); - assert!(a.insert(11)); - - assert!(b.insert(3)); - assert!(b.insert(9)); - - let mut i = 0; - let expected = [1, 5, 11]; - for x in a.difference(&b) { - assert!(expected.contains(x)); - i += 1; - } - assert_eq!(i, expected.len()); - } - - #[test] - fn test_symmetric_difference() { - let mut a = HashSet::new(); - let mut b = HashSet::new(); - - assert!(a.insert(1)); - assert!(a.insert(3)); - assert!(a.insert(5)); - assert!(a.insert(9)); - assert!(a.insert(11)); - - assert!(b.insert(-2)); - assert!(b.insert(3)); - assert!(b.insert(9)); - assert!(b.insert(14)); - assert!(b.insert(22)); - - let mut i = 0; - let expected = [-2, 1, 5, 11, 14, 22]; - for x in a.symmetric_difference(&b) { - assert!(expected.contains(x)); - i += 1; - } - assert_eq!(i, expected.len()); - } - - #[test] - fn test_union() { - let mut a = HashSet::new(); - let mut b = HashSet::new(); - - assert!(a.insert(1)); - assert!(a.insert(3)); - assert!(a.insert(5)); - assert!(a.insert(9)); - assert!(a.insert(11)); - assert!(a.insert(16)); - assert!(a.insert(19)); - assert!(a.insert(24)); - - assert!(b.insert(-2)); - assert!(b.insert(1)); - assert!(b.insert(5)); - assert!(b.insert(9)); - assert!(b.insert(13)); - assert!(b.insert(19)); - - let mut i = 0; - let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24]; - for x in a.union(&b) { - assert!(expected.contains(x)); - i += 1; - } - assert_eq!(i, expected.len()); - } - - #[test] - fn test_from_map() { - let mut a = crate::HashMap::new(); - a.insert(1, ()); - a.insert(2, ()); - a.insert(3, ()); - a.insert(4, ()); - - let a: HashSet<_> = a.into(); - - assert_eq!(a.len(), 4); - assert!(a.contains(&1)); - assert!(a.contains(&2)); - assert!(a.contains(&3)); - assert!(a.contains(&4)); - } - - #[test] - fn test_from_iter() { - let xs = [1, 2, 2, 3, 4, 5, 6, 7, 8, 9]; - - let set: HashSet<_> = xs.iter().copied().collect(); - - for x in &xs { - assert!(set.contains(x)); - } - - assert_eq!(set.iter().len(), xs.len() - 1); - } - - #[test] - fn test_move_iter() { - let hs = { - let mut hs = HashSet::new(); - - hs.insert('a'); - hs.insert('b'); - - hs - }; - - let v = hs.into_iter().collect::>(); - assert!(v == ['a', 'b'] || v == ['b', 'a']); - } - - #[test] - fn test_eq() { - // These constants once happened to expose a bug in insert(). - // I'm keeping them around to prevent a regression. - let mut s1 = HashSet::new(); - - s1.insert(1); - s1.insert(2); - s1.insert(3); - - let mut s2 = HashSet::new(); - - s2.insert(1); - s2.insert(2); - - assert!(s1 != s2); - - s2.insert(3); - - assert_eq!(s1, s2); - } - - #[test] - fn test_show() { - let mut set = HashSet::new(); - let empty = HashSet::::new(); - - set.insert(1); - set.insert(2); - - let set_str = format!("{:?}", set); - - assert!(set_str == "{1, 2}" || set_str == "{2, 1}"); - assert_eq!(format!("{:?}", empty), "{}"); - } - - #[test] - fn test_trivial_drain() { - let mut s = HashSet::::new(); - for _ in s.drain() {} - assert!(s.is_empty()); - drop(s); - - let mut s = HashSet::::new(); - drop(s.drain()); - assert!(s.is_empty()); - } - - #[test] - fn test_drain() { - let mut s: HashSet<_> = (1..100).collect(); - - // try this a bunch of times to make sure we don't screw up internal state. - for _ in 0..20 { - assert_eq!(s.len(), 99); - - { - let mut last_i = 0; - let mut d = s.drain(); - for (i, x) in d.by_ref().take(50).enumerate() { - last_i = i; - assert!(x != 0); - } - assert_eq!(last_i, 49); - } - - for _ in &s { - panic!("s should be empty!"); - } - - // reset to try again. - s.extend(1..100); - } - } - - #[test] - fn test_replace() { - use core::hash; - - #[derive(Debug)] - struct Foo(&'static str, i32); - - impl PartialEq for Foo { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - } - } - - impl Eq for Foo {} - - impl hash::Hash for Foo { - fn hash(&self, h: &mut H) { - self.0.hash(h); - } - } - - let mut s = HashSet::new(); - assert_eq!(s.replace(Foo("a", 1)), None); - assert_eq!(s.len(), 1); - assert_eq!(s.replace(Foo("a", 2)), Some(Foo("a", 1))); - assert_eq!(s.len(), 1); - - let mut it = s.iter(); - assert_eq!(it.next(), Some(&Foo("a", 2))); - assert_eq!(it.next(), None); - } - - #[test] - fn test_extend_ref() { - let mut a = HashSet::new(); - a.insert(1); - - a.extend(&[2, 3, 4]); - - assert_eq!(a.len(), 4); - assert!(a.contains(&1)); - assert!(a.contains(&2)); - assert!(a.contains(&3)); - assert!(a.contains(&4)); - - let mut b = HashSet::new(); - b.insert(5); - b.insert(6); - - a.extend(&b); - - assert_eq!(a.len(), 6); - assert!(a.contains(&1)); - assert!(a.contains(&2)); - assert!(a.contains(&3)); - assert!(a.contains(&4)); - assert!(a.contains(&5)); - assert!(a.contains(&6)); - } - - #[test] - fn test_retain() { - let xs = [1, 2, 3, 4, 5, 6]; - let mut set: HashSet = xs.iter().copied().collect(); - set.retain(|&k| k % 2 == 0); - assert_eq!(set.len(), 3); - assert!(set.contains(&2)); - assert!(set.contains(&4)); - assert!(set.contains(&6)); - } - - #[test] - fn test_drain_filter() { - { - let mut set: HashSet = (0..8).collect(); - let drained = set.drain_filter(|&k| k % 2 == 0); - let mut out = drained.collect::>(); - out.sort_unstable(); - assert_eq!(vec![0, 2, 4, 6], out); - assert_eq!(set.len(), 4); - } - { - let mut set: HashSet = (0..8).collect(); - drop(set.drain_filter(|&k| k % 2 == 0)); - assert_eq!(set.len(), 4, "Removes non-matching items on drop"); - } - } - - #[test] - fn test_const_with_hasher() { - use core::hash::BuildHasher; - use std::collections::hash_map::DefaultHasher; - - #[derive(Clone)] - struct MyHasher; - impl BuildHasher for MyHasher { - type Hasher = DefaultHasher; - - fn build_hasher(&self) -> DefaultHasher { - DefaultHasher::new() - } - } - - const EMPTY_SET: HashSet = HashSet::with_hasher(MyHasher); - - let mut set = EMPTY_SET; - set.insert(19); - assert!(set.contains(&19)); - } - - #[test] - fn rehash_in_place() { - let mut set = HashSet::new(); - - for i in 0..224 { - set.insert(i); - } - - assert_eq!( - set.capacity(), - 224, - "The set must be at or close to capacity to trigger a re hashing" - ); - - for i in 100..1400 { - set.remove(&(i - 100)); - set.insert(i); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/http/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/http/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/http/Cargo.toml s390-tools-2.33.1/rust-vendor/http/Cargo.toml --- s390-tools-2.31.0/rust-vendor/http/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.49.0" -name = "http" -version = "0.2.11" -authors = [ - "Alex Crichton ", - "Carl Lerche ", - "Sean McArthur ", -] -description = """ -A set of types for representing HTTP requests and responses. -""" -documentation = "https://docs.rs/http" -readme = "README.md" -keywords = ["http"] -categories = ["web-programming"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/hyperium/http" - -[dependencies.bytes] -version = "1" - -[dependencies.fnv] -version = "1.0.5" - -[dependencies.itoa] -version = "1" - -[dev-dependencies.doc-comment] -version = "0.3" - -[dev-dependencies.indexmap] -version = "<=1.8" - -[dev-dependencies.quickcheck] -version = "0.9.0" - -[dev-dependencies.rand] -version = "0.7.0" - -[dev-dependencies.seahash] -version = "3.0.5" - -[dev-dependencies.serde] -version = "1.0" - -[dev-dependencies.serde_json] -version = "1.0" diff -Nru s390-tools-2.31.0/rust-vendor/http/CHANGELOG.md s390-tools-2.33.1/rust-vendor/http/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/http/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,214 +0,0 @@ -# 0.2.11 (November 13, 2023) - -* Fix MIRI error in `header::Iter`. - -# 0.2.10 (November 10, 2023) - -* Fix parsing of `Authority` to handle square brackets in incorrect order. -* Fix `HeaderMap::with_capacity()` to handle arithmetic overflow. - -# 0.2.9 (February 17, 2023) - -* Add `HeaderName` constants for `cache-status` and `cdn-cache-control`. -* Implement `Hash` for `PathAndQuery`. -* Re-export `HeaderName` at crate root. - -# 0.2.8 (June 6, 2022) - -* Fix internal usage of uninitialized memory to use `MaybeUninit` inside `HeaderName`. - -# 0.2.7 (April 28, 2022) - -* MSRV bumped to `1.49`. -* Add `extend()` method to `Extensions`. -* Add `From` and `From` impls for `Uri`. -* Make `HeaderName::from_static` a `const fn`. - -# 0.2.6 (December 30, 2021) - -* Upgrade internal `itoa` dependency to 1.0. - -# 0.2.5 (September 21, 2021) - -* Add `is_empty()` and `len()` methods to `Extensions`. -* Add `version_ref()` method to `request::Builder`. -* Implement `TryFrom>` and `TryFrom` for `Authority`, `Uri`, `PathAndQuery`, and `HeaderName`. -* Make `HeaderValue::from_static` a `const fn`. - -# 0.2.4 (April 4, 2021) - -* Fix `Uri` parsing to allow `{`, `"`, and `}` in paths. - -# 0.2.3 (January 7, 2021) - -* Upgrade internal (private) `bytes` dependency to 1.0. - -# 0.2.2 (December 14, 2020) - -* Fix (potential double) panic of (`HeaderMap`) `OccupiedEntry::remove_entry` and - `remove_entry_mult` when multiple values are present. ([#446], [#449] dekellum) -* Safety audits of (priv) `ByteStr` and refactor of `Authority` ([#408], [#414] sbosnick) -* Fix `HeaderName` to error instead of panic when input is too long ([#432] [#433] acfoltzer) -* Allow `StatusCode` to encode values 100-999 without error. Use of the - unclassified range 600-999 remains discouraged. ([#144], [#438], [#443] quininer dekellum) -* Add `String` and `&String` fallible conversions to `PathAndQuery` ([#450] mkindahl) -* Fix `Authority` (and `Uri`) to error instead of panic on unbalanced brackets - ([#435], [#445] aeryz) - -# 0.2.1 (March 25, 2020) - -* Add `extensions_ref` and `extensions_mut` to `request::Builder` and `response::Builder`. - -# 0.2.0 (December 2, 2019) - -* Add `Version::HTTP_3` constant. -* Add `HeaderValue::from_maybe_shared`, `HeaderValue::from_maybe_shared_unchecked`, `Uri::from_maybe_shared`, `Authority::from_maybe_shared`, and `PathAndQuery::from_maybe_shared`. -* Change `request::Builder`, `response::Builder`, and `uri::Builder` to use by-value methods instead of by-ref. -* Change from `HttpTryFrom` trait to `std::convert::TryFrom`. -* Change `HeaderMap::entry` to no longer return a `Result`. -* Change `HeaderMap::drain` iterator to match the behavior of `IntoIter`. -* Change `Authority::port` to return an `Option` instead of `Option`. -* Change `Uri::scheme` to return `Option<&Scheme>` instead of `Option<&str>`. -* Change `Uri::authority` to return `Option<&Authority>` instead of `Option<&str>`. -* Remove `InvalidUriBytes`, `InvalidHeaderNameBytes`, and `InvalidHeaderValueBytes` error types. -* Remove `HeaderValue::from_shared`, `HeaderValue::from_shared_unchecked`, `Uri::from_shared`, `Authority::from_shared`, `Scheme::from_shared`, and `PathAndQuery::from_shared`. -* Remove `Authority::port_part`. -* Remove `Uri::scheme_part` and `Uri::authority_part`. - -# 0.1.20 (November 26, 2019) - -* Fix possible double-free if `header::Drain` iterator is `std::mem::forgot`en (#357). -* Fix possible data race if multiple `header::ValueDrain`s are iterated on different threads (#362). -* Fix `HeaderMap::reserve` capacity overflows (#360). -* Fix parsing long authority-form `Uri`s (#351). - -# 0.1.19 (October 15, 2019) - -* Allow `%` in IPv6 addresses in `Uri` (#343). - -# 0.1.18 (July 26, 2019) - -* Fix compilation of `HeaderName` parsing on WASM targets (#324). -* Implement `HttpTryFrom` for `HeaderMap` (#326). -* Export `http::header::HeaderValue` as `http::HeaderValue`. - -# 0.1.17 (April 5, 2019) - -* Add `Error::inner_ref()` to view the kind of error (#303) -* Add `headers_ref()` and `headers_mut()` methods to `request::Builder` and `response::Builder` (#293) - -# 0.1.16 (February 19, 2019) - -* Fix `Uri` to permit more characters in the `path` (#296) - -# 0.1.15 (January 22, 2019) - -* Fix `Uri::host()` to include brackets of IPv6 literals (#292) -* Add `scheme_str` and `port_u16` methods to `Uri` (#287) -* Add `method_ref`, `uri_ref`, and `headers_ref` to `request::Builder` (#284) - -# 0.1.14 (November 21, 2018) - -* Add `Port` struct (#252, #255, #265) -* Introduce `Uri` builder (#219) -* Empty `Method` no longer considered valid (#262) -* Fix `Uri` equality when terminating question mark is present (#270) -* Allow % character in userinfo (#269) -* Support additional tokens for header names (#271) -* Export `http::headers::{IterMut, ValuesMut}` (#278) - -# 0.1.13 (September 14, 2018) - -* impl `fmt::Display` for `HeaderName` (#249) -* Fix `uri::Authority` parsing when there is no host after an `@` (#248) -* Fix `Uri` parsing to allow more characters in query strings (#247) - -# 0.1.12 (September 7, 2018) - -* Fix `HeaderValue` parsing to allow HTABs (#244) - -# 0.1.11 (September 5, 2018) - -* Add `From<&Self>` for `HeaderValue`, `Method`, and `StatusCode` (#238) -* Add `Uri::from_static` (#240) - -# 0.1.10 (August 8, 2018) - -* `impl HttpTryFrom` for HeaderValue (#236) - -# 0.1.9 (August 7, 2018) - -* Fix double percent encoding (#233) -* Add additional HttpTryFrom impls (#234) - -# 0.1.8 (July 23, 2018) - -* Add fuller set of `PartialEq` for `Method` (#221) -* Reduce size of `HeaderMap` by using `Box<[Entry]>` instea of `Vec` (#224) -* Reduce size of `Extensions` by storing as `Option>` (#227) -* Implement `Iterator::size_hint` for most iterators in `header` (#226) - -# 0.1.7 (June 22, 2018) - -* Add `From for HeaderValue` for most integer types (#218). -* Add `Uri::into_parts()` inherent method (same as `Parts::from(uri)`) (#214). -* Fix converting `Uri`s in authority-form to `Parts` and then back into `Uri` (#216). -* Fix `Authority` parsing to reject multiple port sections (#215). -* Fix parsing 1 character authority-form `Uri`s into illegal forms (#220). - -# 0.1.6 (June 13, 2018) - -* Add `HeaderName::from_static()` constructor (#195). -* Add `Authority::from_static()` constructor (#186). -* Implement `From` for `HeaderValue` (#184). -* Fix duplicate keys when iterating over `header::Keys` (#201). - -# 0.1.5 (February 28, 2018) - -* Add websocket handshake related header constants (#162). -* Parsing `Authority` with an empty string now returns an error (#164). -* Implement `PartialEq` for `StatusCode` (#153). -* Implement `HttpTryFrom<&Uri>` for `Uri` (#165). -* Implement `FromStr` for `Method` (#167). -* Implement `HttpTryFrom` for `Uri` (#171). -* Add `into_body` fns to `Request` and `Response` (#172). -* Fix `Request::options` (#177). - -# 0.1.4 (January 4, 2018) - -* Add PathAndQuery::from_static (#148). -* Impl PartialOrd / PartialEq for Authority and PathAndQuery (#150). -* Add `map` fn to `Request` and `Response` (#151). - -# 0.1.3 (December 11, 2017) - -* Add `Scheme` associated consts for common protos. - -# 0.1.2 (November 29, 2017) - -* Add Uri accessor for scheme part. -* Fix Uri parsing bug (#134) - -# 0.1.1 (October 9, 2017) - -* Provide Uri accessors for parts (#129) -* Add Request builder helpers. (#123) -* Misc performance improvements (#126) - -# 0.1.0 (September 8, 2017) - -* Initial release. - -[#144]: https://github.com/hyperium/http/issues/144 -[#408]: https://github.com/hyperium/http/pull/408 -[#414]: https://github.com/hyperium/http/pull/414 -[#432]: https://github.com/hyperium/http/issues/432 -[#433]: https://github.com/hyperium/http/pull/433 -[#438]: https://github.com/hyperium/http/pull/438 -[#443]: https://github.com/hyperium/http/pull/443 -[#446]: https://github.com/hyperium/http/issues/446 -[#449]: https://github.com/hyperium/http/pull/449 -[#450]: https://github.com/hyperium/http/pull/450 -[#435]: https://github.com/hyperium/http/issues/435 -[#445]: https://github.com/hyperium/http/pull/445 - diff -Nru s390-tools-2.31.0/rust-vendor/http/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/http/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/http/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2017 http-rs authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/http/LICENSE-MIT s390-tools-2.33.1/rust-vendor/http/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/http/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2017 http-rs authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/http/README.md s390-tools-2.33.1/rust-vendor/http/README.md --- s390-tools-2.31.0/rust-vendor/http/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,80 +0,0 @@ -# HTTP - -A general purpose library of common HTTP types - -[![CI](https://github.com/hyperium/http/workflows/CI/badge.svg)](https://github.com/hyperium/http/actions?query=workflow%3ACI) -[![Crates.io](https://img.shields.io/crates/v/http.svg)](https://crates.io/crates/http) -[![Documentation](https://docs.rs/http/badge.svg)][dox] - -More information about this crate can be found in the [crate -documentation][dox]. - -[dox]: https://docs.rs/http - -## Usage - -To use `http`, first add this to your `Cargo.toml`: - -```toml -[dependencies] -http = "0.2" -``` - -Next, add this to your crate: - -```rust -use http::{Request, Response}; - -fn main() { - // ... -} -``` - -## Examples - -Create an HTTP request: - -```rust -use http::Request; - -fn main() { - let request = Request::builder() - .uri("https://www.rust-lang.org/") - .header("User-Agent", "awesome/1.0") - .body(()) - .unwrap(); -} -``` - -Create an HTTP response: - -```rust -use http::{Response, StatusCode}; - -fn main() { - let response = Response::builder() - .status(StatusCode::MOVED_PERMANENTLY) - .header("Location", "https://www.rust-lang.org/install.html") - .body(()) - .unwrap(); -} -``` - -# Supported Rust Versions - -This project follows the [Tokio MSRV][msrv] and is currently set to `1.49`. - -[msrv]: https://github.com/tokio-rs/tokio/#supported-rust-versions - -# License - -Licensed under either of - -- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) - -# Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/http/src/byte_str.rs s390-tools-2.33.1/rust-vendor/http/src/byte_str.rs --- s390-tools-2.31.0/rust-vendor/http/src/byte_str.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/byte_str.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,85 +0,0 @@ -use bytes::Bytes; - -use std::{ops, str}; - -#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub(crate) struct ByteStr { - // Invariant: bytes contains valid UTF-8 - bytes: Bytes, -} - -impl ByteStr { - #[inline] - pub fn new() -> ByteStr { - ByteStr { - // Invariant: the empty slice is trivially valid UTF-8. - bytes: Bytes::new(), - } - } - - #[inline] - pub const fn from_static(val: &'static str) -> ByteStr { - ByteStr { - // Invariant: val is a str so contains valid UTF-8. - bytes: Bytes::from_static(val.as_bytes()), - } - } - - #[inline] - /// ## Panics - /// In a debug build this will panic if `bytes` is not valid UTF-8. - /// - /// ## Safety - /// `bytes` must contain valid UTF-8. In a release build it is undefined - /// behaviour to call this with `bytes` that is not valid UTF-8. - pub unsafe fn from_utf8_unchecked(bytes: Bytes) -> ByteStr { - if cfg!(debug_assertions) { - match str::from_utf8(&bytes) { - Ok(_) => (), - Err(err) => panic!( - "ByteStr::from_utf8_unchecked() with invalid bytes; error = {}, bytes = {:?}", - err, bytes - ), - } - } - // Invariant: assumed by the safety requirements of this function. - ByteStr { bytes: bytes } - } -} - -impl ops::Deref for ByteStr { - type Target = str; - - #[inline] - fn deref(&self) -> &str { - let b: &[u8] = self.bytes.as_ref(); - // Safety: the invariant of `bytes` is that it contains valid UTF-8. - unsafe { str::from_utf8_unchecked(b) } - } -} - -impl From for ByteStr { - #[inline] - fn from(src: String) -> ByteStr { - ByteStr { - // Invariant: src is a String so contains valid UTF-8. - bytes: Bytes::from(src), - } - } -} - -impl<'a> From<&'a str> for ByteStr { - #[inline] - fn from(src: &'a str) -> ByteStr { - ByteStr { - // Invariant: src is a str so contains valid UTF-8. - bytes: Bytes::copy_from_slice(src.as_bytes()), - } - } -} - -impl From for Bytes { - fn from(src: ByteStr) -> Self { - src.bytes - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/convert.rs s390-tools-2.33.1/rust-vendor/http/src/convert.rs --- s390-tools-2.31.0/rust-vendor/http/src/convert.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/convert.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,17 +0,0 @@ -macro_rules! if_downcast_into { - ($in_ty:ty, $out_ty:ty, $val:ident, $body:expr) => ({ - if std::any::TypeId::of::<$in_ty>() == std::any::TypeId::of::<$out_ty>() { - // Store the value in an `Option` so we can `take` - // it after casting to `&mut dyn Any`. - let mut slot = Some($val); - // Re-write the `$val` ident with the downcasted value. - let $val = (&mut slot as &mut dyn std::any::Any) - .downcast_mut::>() - .unwrap() - .take() - .unwrap(); - // Run the $body in scope of the replaced val. - $body - } - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/error.rs s390-tools-2.33.1/rust-vendor/http/src/error.rs --- s390-tools-2.31.0/rust-vendor/http/src/error.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,149 +0,0 @@ -use std::error; -use std::fmt; -use std::result; - -use crate::header; -use crate::method; -use crate::status; -use crate::uri; - -/// A generic "error" for HTTP connections -/// -/// This error type is less specific than the error returned from other -/// functions in this crate, but all other errors can be converted to this -/// error. Consumers of this crate can typically consume and work with this form -/// of error for conversions with the `?` operator. -pub struct Error { - inner: ErrorKind, -} - -/// A `Result` typedef to use with the `http::Error` type -pub type Result = result::Result; - -enum ErrorKind { - StatusCode(status::InvalidStatusCode), - Method(method::InvalidMethod), - Uri(uri::InvalidUri), - UriParts(uri::InvalidUriParts), - HeaderName(header::InvalidHeaderName), - HeaderValue(header::InvalidHeaderValue), -} - -impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("http::Error") - // Skip the noise of the ErrorKind enum - .field(&self.get_ref()) - .finish() - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self.get_ref(), f) - } -} - -impl Error { - /// Return true if the underlying error has the same type as T. - pub fn is(&self) -> bool { - self.get_ref().is::() - } - - /// Return a reference to the lower level, inner error. - pub fn get_ref(&self) -> &(dyn error::Error + 'static) { - use self::ErrorKind::*; - - match self.inner { - StatusCode(ref e) => e, - Method(ref e) => e, - Uri(ref e) => e, - UriParts(ref e) => e, - HeaderName(ref e) => e, - HeaderValue(ref e) => e, - } - } -} - -impl error::Error for Error { - // Return any available cause from the inner error. Note the inner error is - // not itself the cause. - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - self.get_ref().source() - } -} - -impl From for Error { - fn from(err: status::InvalidStatusCode) -> Error { - Error { - inner: ErrorKind::StatusCode(err), - } - } -} - -impl From for Error { - fn from(err: method::InvalidMethod) -> Error { - Error { - inner: ErrorKind::Method(err), - } - } -} - -impl From for Error { - fn from(err: uri::InvalidUri) -> Error { - Error { - inner: ErrorKind::Uri(err), - } - } -} - -impl From for Error { - fn from(err: uri::InvalidUriParts) -> Error { - Error { - inner: ErrorKind::UriParts(err), - } - } -} - -impl From for Error { - fn from(err: header::InvalidHeaderName) -> Error { - Error { - inner: ErrorKind::HeaderName(err), - } - } -} - -impl From for Error { - fn from(err: header::InvalidHeaderValue) -> Error { - Error { - inner: ErrorKind::HeaderValue(err), - } - } -} - -impl From for Error { - fn from(err: std::convert::Infallible) -> Error { - match err {} - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn inner_error_is_invalid_status_code() { - if let Err(e) = status::StatusCode::from_u16(6666) { - let err: Error = e.into(); - let ie = err.get_ref(); - assert!(!ie.is::()); - assert!(ie.is::()); - ie.downcast_ref::().unwrap(); - - assert!(!err.is::()); - assert!(err.is::()); - } else { - panic!("Bad status allowed!"); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/extensions.rs s390-tools-2.33.1/rust-vendor/http/src/extensions.rs --- s390-tools-2.31.0/rust-vendor/http/src/extensions.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/extensions.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,250 +0,0 @@ -use std::any::{Any, TypeId}; -use std::collections::HashMap; -use std::fmt; -use std::hash::{BuildHasherDefault, Hasher}; - -type AnyMap = HashMap, BuildHasherDefault>; - -// With TypeIds as keys, there's no need to hash them. They are already hashes -// themselves, coming from the compiler. The IdHasher just holds the u64 of -// the TypeId, and then returns it, instead of doing any bit fiddling. -#[derive(Default)] -struct IdHasher(u64); - -impl Hasher for IdHasher { - fn write(&mut self, _: &[u8]) { - unreachable!("TypeId calls write_u64"); - } - - #[inline] - fn write_u64(&mut self, id: u64) { - self.0 = id; - } - - #[inline] - fn finish(&self) -> u64 { - self.0 - } -} - -/// A type map of protocol extensions. -/// -/// `Extensions` can be used by `Request` and `Response` to store -/// extra data derived from the underlying protocol. -#[derive(Default)] -pub struct Extensions { - // If extensions are never used, no need to carry around an empty HashMap. - // That's 3 words. Instead, this is only 1 word. - map: Option>, -} - -impl Extensions { - /// Create an empty `Extensions`. - #[inline] - pub fn new() -> Extensions { - Extensions { map: None } - } - - /// Insert a type into this `Extensions`. - /// - /// If a extension of this type already existed, it will - /// be returned. - /// - /// # Example - /// - /// ``` - /// # use http::Extensions; - /// let mut ext = Extensions::new(); - /// assert!(ext.insert(5i32).is_none()); - /// assert!(ext.insert(4u8).is_none()); - /// assert_eq!(ext.insert(9i32), Some(5i32)); - /// ``` - pub fn insert(&mut self, val: T) -> Option { - self.map - .get_or_insert_with(|| Box::new(HashMap::default())) - .insert(TypeId::of::(), Box::new(val)) - .and_then(|boxed| { - (boxed as Box) - .downcast() - .ok() - .map(|boxed| *boxed) - }) - } - - /// Get a reference to a type previously inserted on this `Extensions`. - /// - /// # Example - /// - /// ``` - /// # use http::Extensions; - /// let mut ext = Extensions::new(); - /// assert!(ext.get::().is_none()); - /// ext.insert(5i32); - /// - /// assert_eq!(ext.get::(), Some(&5i32)); - /// ``` - pub fn get(&self) -> Option<&T> { - self.map - .as_ref() - .and_then(|map| map.get(&TypeId::of::())) - .and_then(|boxed| (&**boxed as &(dyn Any + 'static)).downcast_ref()) - } - - /// Get a mutable reference to a type previously inserted on this `Extensions`. - /// - /// # Example - /// - /// ``` - /// # use http::Extensions; - /// let mut ext = Extensions::new(); - /// ext.insert(String::from("Hello")); - /// ext.get_mut::().unwrap().push_str(" World"); - /// - /// assert_eq!(ext.get::().unwrap(), "Hello World"); - /// ``` - pub fn get_mut(&mut self) -> Option<&mut T> { - self.map - .as_mut() - .and_then(|map| map.get_mut(&TypeId::of::())) - .and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut()) - } - - /// Remove a type from this `Extensions`. - /// - /// If a extension of this type existed, it will be returned. - /// - /// # Example - /// - /// ``` - /// # use http::Extensions; - /// let mut ext = Extensions::new(); - /// ext.insert(5i32); - /// assert_eq!(ext.remove::(), Some(5i32)); - /// assert!(ext.get::().is_none()); - /// ``` - pub fn remove(&mut self) -> Option { - self.map - .as_mut() - .and_then(|map| map.remove(&TypeId::of::())) - .and_then(|boxed| { - (boxed as Box) - .downcast() - .ok() - .map(|boxed| *boxed) - }) - } - - /// Clear the `Extensions` of all inserted extensions. - /// - /// # Example - /// - /// ``` - /// # use http::Extensions; - /// let mut ext = Extensions::new(); - /// ext.insert(5i32); - /// ext.clear(); - /// - /// assert!(ext.get::().is_none()); - /// ``` - #[inline] - pub fn clear(&mut self) { - if let Some(ref mut map) = self.map { - map.clear(); - } - } - - /// Check whether the extension set is empty or not. - /// - /// # Example - /// - /// ``` - /// # use http::Extensions; - /// let mut ext = Extensions::new(); - /// assert!(ext.is_empty()); - /// ext.insert(5i32); - /// assert!(!ext.is_empty()); - /// ``` - #[inline] - pub fn is_empty(&self) -> bool { - self.map - .as_ref() - .map_or(true, |map| map.is_empty()) - } - - /// Get the numer of extensions available. - /// - /// # Example - /// - /// ``` - /// # use http::Extensions; - /// let mut ext = Extensions::new(); - /// assert_eq!(ext.len(), 0); - /// ext.insert(5i32); - /// assert_eq!(ext.len(), 1); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.map - .as_ref() - .map_or(0, |map| map.len()) - } - - /// Extends `self` with another `Extensions`. - /// - /// If an instance of a specific type exists in both, the one in `self` is overwritten with the - /// one from `other`. - /// - /// # Example - /// - /// ``` - /// # use http::Extensions; - /// let mut ext_a = Extensions::new(); - /// ext_a.insert(8u8); - /// ext_a.insert(16u16); - /// - /// let mut ext_b = Extensions::new(); - /// ext_b.insert(4u8); - /// ext_b.insert("hello"); - /// - /// ext_a.extend(ext_b); - /// assert_eq!(ext_a.len(), 3); - /// assert_eq!(ext_a.get::(), Some(&4u8)); - /// assert_eq!(ext_a.get::(), Some(&16u16)); - /// assert_eq!(ext_a.get::<&'static str>().copied(), Some("hello")); - /// ``` - pub fn extend(&mut self, other: Self) { - if let Some(other) = other.map { - if let Some(map) = &mut self.map { - map.extend(*other); - } else { - self.map = Some(other); - } - } - } -} - -impl fmt::Debug for Extensions { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Extensions").finish() - } -} - -#[test] -fn test_extensions() { - #[derive(Debug, PartialEq)] - struct MyType(i32); - - let mut extensions = Extensions::new(); - - extensions.insert(5i32); - extensions.insert(MyType(10)); - - assert_eq!(extensions.get(), Some(&5i32)); - assert_eq!(extensions.get_mut(), Some(&mut 5i32)); - - assert_eq!(extensions.remove::(), Some(5i32)); - assert!(extensions.get::().is_none()); - - assert_eq!(extensions.get::(), None); - assert_eq!(extensions.get(), Some(&MyType(10))); -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/header/map.rs s390-tools-2.33.1/rust-vendor/http/src/header/map.rs --- s390-tools-2.31.0/rust-vendor/http/src/header/map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/header/map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,3545 +0,0 @@ -use std::collections::HashMap; -use std::collections::hash_map::RandomState; -use std::convert::TryFrom; -use std::hash::{BuildHasher, Hash, Hasher}; -use std::iter::{FromIterator, FusedIterator}; -use std::marker::PhantomData; -use std::{fmt, mem, ops, ptr, vec}; - -use crate::Error; - -use super::HeaderValue; -use super::name::{HdrName, HeaderName, InvalidHeaderName}; - -pub use self::as_header_name::AsHeaderName; -pub use self::into_header_name::IntoHeaderName; - -/// A set of HTTP headers -/// -/// `HeaderMap` is an multimap of [`HeaderName`] to values. -/// -/// [`HeaderName`]: struct.HeaderName.html -/// -/// # Examples -/// -/// Basic usage -/// -/// ``` -/// # use http::HeaderMap; -/// # use http::header::{CONTENT_LENGTH, HOST, LOCATION}; -/// let mut headers = HeaderMap::new(); -/// -/// headers.insert(HOST, "example.com".parse().unwrap()); -/// headers.insert(CONTENT_LENGTH, "123".parse().unwrap()); -/// -/// assert!(headers.contains_key(HOST)); -/// assert!(!headers.contains_key(LOCATION)); -/// -/// assert_eq!(headers[HOST], "example.com"); -/// -/// headers.remove(HOST); -/// -/// assert!(!headers.contains_key(HOST)); -/// ``` -#[derive(Clone)] -pub struct HeaderMap { - // Used to mask values to get an index - mask: Size, - indices: Box<[Pos]>, - entries: Vec>, - extra_values: Vec>, - danger: Danger, -} - -// # Implementation notes -// -// Below, you will find a fairly large amount of code. Most of this is to -// provide the necessary functions to efficiently manipulate the header -// multimap. The core hashing table is based on robin hood hashing [1]. While -// this is the same hashing algorithm used as part of Rust's `HashMap` in -// stdlib, many implementation details are different. The two primary reasons -// for this divergence are that `HeaderMap` is a multimap and the structure has -// been optimized to take advantage of the characteristics of HTTP headers. -// -// ## Structure Layout -// -// Most of the data contained by `HeaderMap` is *not* stored in the hash table. -// Instead, pairs of header name and *first* associated header value are stored -// in the `entries` vector. If the header name has more than one associated -// header value, then additional values are stored in `extra_values`. The actual -// hash table (`indices`) only maps hash codes to indices in `entries`. This -// means that, when an eviction happens, the actual header name and value stay -// put and only a tiny amount of memory has to be copied. -// -// Extra values associated with a header name are tracked using a linked list. -// Links are formed with offsets into `extra_values` and not pointers. -// -// [1]: https://en.wikipedia.org/wiki/Hash_table#Robin_Hood_hashing - -/// `HeaderMap` entry iterator. -/// -/// Yields `(&HeaderName, &value)` tuples. The same header name may be yielded -/// more than once if it has more than one associated value. -#[derive(Debug)] -pub struct Iter<'a, T> { - map: &'a HeaderMap, - entry: usize, - cursor: Option, -} - -/// `HeaderMap` mutable entry iterator -/// -/// Yields `(&HeaderName, &mut value)` tuples. The same header name may be -/// yielded more than once if it has more than one associated value. -#[derive(Debug)] -pub struct IterMut<'a, T> { - map: *mut HeaderMap, - entry: usize, - cursor: Option, - lt: PhantomData<&'a mut HeaderMap>, -} - -/// An owning iterator over the entries of a `HeaderMap`. -/// -/// This struct is created by the `into_iter` method on `HeaderMap`. -#[derive(Debug)] -pub struct IntoIter { - // If None, pull from `entries` - next: Option, - entries: vec::IntoIter>, - extra_values: Vec>, -} - -/// An iterator over `HeaderMap` keys. -/// -/// Each header name is yielded only once, even if it has more than one -/// associated value. -#[derive(Debug)] -pub struct Keys<'a, T> { - inner: ::std::slice::Iter<'a, Bucket>, -} - -/// `HeaderMap` value iterator. -/// -/// Each value contained in the `HeaderMap` will be yielded. -#[derive(Debug)] -pub struct Values<'a, T> { - inner: Iter<'a, T>, -} - -/// `HeaderMap` mutable value iterator -#[derive(Debug)] -pub struct ValuesMut<'a, T> { - inner: IterMut<'a, T>, -} - -/// A drain iterator for `HeaderMap`. -#[derive(Debug)] -pub struct Drain<'a, T> { - idx: usize, - len: usize, - entries: *mut [Bucket], - // If None, pull from `entries` - next: Option, - extra_values: *mut Vec>, - lt: PhantomData<&'a mut HeaderMap>, -} - -/// A view to all values stored in a single entry. -/// -/// This struct is returned by `HeaderMap::get_all`. -#[derive(Debug)] -pub struct GetAll<'a, T> { - map: &'a HeaderMap, - index: Option, -} - -/// A view into a single location in a `HeaderMap`, which may be vacant or occupied. -#[derive(Debug)] -pub enum Entry<'a, T: 'a> { - /// An occupied entry - Occupied(OccupiedEntry<'a, T>), - - /// A vacant entry - Vacant(VacantEntry<'a, T>), -} - -/// A view into a single empty location in a `HeaderMap`. -/// -/// This struct is returned as part of the `Entry` enum. -#[derive(Debug)] -pub struct VacantEntry<'a, T> { - map: &'a mut HeaderMap, - key: HeaderName, - hash: HashValue, - probe: usize, - danger: bool, -} - -/// A view into a single occupied location in a `HeaderMap`. -/// -/// This struct is returned as part of the `Entry` enum. -#[derive(Debug)] -pub struct OccupiedEntry<'a, T> { - map: &'a mut HeaderMap, - probe: usize, - index: usize, -} - -/// An iterator of all values associated with a single header name. -#[derive(Debug)] -pub struct ValueIter<'a, T> { - map: &'a HeaderMap, - index: usize, - front: Option, - back: Option, -} - -/// A mutable iterator of all values associated with a single header name. -#[derive(Debug)] -pub struct ValueIterMut<'a, T> { - map: *mut HeaderMap, - index: usize, - front: Option, - back: Option, - lt: PhantomData<&'a mut HeaderMap>, -} - -/// An drain iterator of all values associated with a single header name. -#[derive(Debug)] -pub struct ValueDrain<'a, T> { - first: Option, - next: Option<::std::vec::IntoIter>, - lt: PhantomData<&'a mut HeaderMap>, -} - -/// Tracks the value iterator state -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -enum Cursor { - Head, - Values(usize), -} - -/// Type used for representing the size of a HeaderMap value. -/// -/// 32,768 is more than enough entries for a single header map. Setting this -/// limit enables using `u16` to represent all offsets, which takes 2 bytes -/// instead of 8 on 64 bit processors. -/// -/// Setting this limit is especially beneficial for `indices`, making it more -/// cache friendly. More hash codes can fit in a cache line. -/// -/// You may notice that `u16` may represent more than 32,768 values. This is -/// true, but 32,768 should be plenty and it allows us to reserve the top bit -/// for future usage. -type Size = u16; - -/// This limit falls out from above. -const MAX_SIZE: usize = 1 << 15; - -/// An entry in the hash table. This represents the full hash code for an entry -/// as well as the position of the entry in the `entries` vector. -#[derive(Copy, Clone)] -struct Pos { - // Index in the `entries` vec - index: Size, - // Full hash value for the entry. - hash: HashValue, -} - -/// Hash values are limited to u16 as well. While `fast_hash` and `Hasher` -/// return `usize` hash codes, limiting the effective hash code to the lower 16 -/// bits is fine since we know that the `indices` vector will never grow beyond -/// that size. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -struct HashValue(u16); - -/// Stores the data associated with a `HeaderMap` entry. Only the first value is -/// included in this struct. If a header name has more than one associated -/// value, all extra values are stored in the `extra_values` vector. A doubly -/// linked list of entries is maintained. The doubly linked list is used so that -/// removing a value is constant time. This also has the nice property of -/// enabling double ended iteration. -#[derive(Debug, Clone)] -struct Bucket { - hash: HashValue, - key: HeaderName, - value: T, - links: Option, -} - -/// The head and tail of the value linked list. -#[derive(Debug, Copy, Clone)] -struct Links { - next: usize, - tail: usize, -} - -/// Access to the `links` value in a slice of buckets. -/// -/// It's important that no other field is accessed, since it may have been -/// freed in a `Drain` iterator. -#[derive(Debug)] -struct RawLinks(*mut [Bucket]); - -/// Node in doubly-linked list of header value entries -#[derive(Debug, Clone)] -struct ExtraValue { - value: T, - prev: Link, - next: Link, -} - -/// A header value node is either linked to another node in the `extra_values` -/// list or it points to an entry in `entries`. The entry in `entries` is the -/// start of the list and holds the associated header name. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -enum Link { - Entry(usize), - Extra(usize), -} - -/// Tracks the header map danger level! This relates to the adaptive hashing -/// algorithm. A HeaderMap starts in the "green" state, when a large number of -/// collisions are detected, it transitions to the yellow state. At this point, -/// the header map will either grow and switch back to the green state OR it -/// will transition to the red state. -/// -/// When in the red state, a safe hashing algorithm is used and all values in -/// the header map have to be rehashed. -#[derive(Clone)] -enum Danger { - Green, - Yellow, - Red(RandomState), -} - -// Constants related to detecting DOS attacks. -// -// Displacement is the number of entries that get shifted when inserting a new -// value. Forward shift is how far the entry gets stored from the ideal -// position. -// -// The current constant values were picked from another implementation. It could -// be that there are different values better suited to the header map case. -const DISPLACEMENT_THRESHOLD: usize = 128; -const FORWARD_SHIFT_THRESHOLD: usize = 512; - -// The default strategy for handling the yellow danger state is to increase the -// header map capacity in order to (hopefully) reduce the number of collisions. -// If growing the hash map would cause the load factor to drop bellow this -// threshold, then instead of growing, the headermap is switched to the red -// danger state and safe hashing is used instead. -const LOAD_FACTOR_THRESHOLD: f32 = 0.2; - -// Macro used to iterate the hash table starting at a given point, looping when -// the end is hit. -macro_rules! probe_loop { - ($label:tt: $probe_var: ident < $len: expr, $body: expr) => { - debug_assert!($len > 0); - $label: - loop { - if $probe_var < $len { - $body - $probe_var += 1; - } else { - $probe_var = 0; - } - } - }; - ($probe_var: ident < $len: expr, $body: expr) => { - debug_assert!($len > 0); - loop { - if $probe_var < $len { - $body - $probe_var += 1; - } else { - $probe_var = 0; - } - } - }; -} - -// First part of the robinhood algorithm. Given a key, find the slot in which it -// will be inserted. This is done by starting at the "ideal" spot. Then scanning -// until the destination slot is found. A destination slot is either the next -// empty slot or the next slot that is occupied by an entry that has a lower -// displacement (displacement is the distance from the ideal spot). -// -// This is implemented as a macro instead of a function that takes a closure in -// order to guarantee that it is "inlined". There is no way to annotate closures -// to guarantee inlining. -macro_rules! insert_phase_one { - ($map:ident, - $key:expr, - $probe:ident, - $pos:ident, - $hash:ident, - $danger:ident, - $vacant:expr, - $occupied:expr, - $robinhood:expr) => - {{ - let $hash = hash_elem_using(&$map.danger, &$key); - let mut $probe = desired_pos($map.mask, $hash); - let mut dist = 0; - let ret; - - // Start at the ideal position, checking all slots - probe_loop!('probe: $probe < $map.indices.len(), { - if let Some(($pos, entry_hash)) = $map.indices[$probe].resolve() { - // The slot is already occupied, but check if it has a lower - // displacement. - let their_dist = probe_distance($map.mask, entry_hash, $probe); - - if their_dist < dist { - // The new key's distance is larger, so claim this spot and - // displace the current entry. - // - // Check if this insertion is above the danger threshold. - let $danger = - dist >= FORWARD_SHIFT_THRESHOLD && !$map.danger.is_red(); - - ret = $robinhood; - break 'probe; - } else if entry_hash == $hash && $map.entries[$pos].key == $key { - // There already is an entry with the same key. - ret = $occupied; - break 'probe; - } - } else { - // The entry is vacant, use it for this key. - let $danger = - dist >= FORWARD_SHIFT_THRESHOLD && !$map.danger.is_red(); - - ret = $vacant; - break 'probe; - } - - dist += 1; - }); - - ret - }} -} - -// ===== impl HeaderMap ===== - -impl HeaderMap { - /// Create an empty `HeaderMap`. - /// - /// The map will be created without any capacity. This function will not - /// allocate. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// let map = HeaderMap::new(); - /// - /// assert!(map.is_empty()); - /// assert_eq!(0, map.capacity()); - /// ``` - pub fn new() -> Self { - HeaderMap::with_capacity(0) - } -} - -impl HeaderMap { - /// Create an empty `HeaderMap` with the specified capacity. - /// - /// The returned map will allocate internal storage in order to hold about - /// `capacity` elements without reallocating. However, this is a "best - /// effort" as there are usage patterns that could cause additional - /// allocations before `capacity` headers are stored in the map. - /// - /// More capacity than requested may be allocated. - /// - /// # Panics - /// - /// Requested capacity too large: would overflow `usize`. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// let map: HeaderMap = HeaderMap::with_capacity(10); - /// - /// assert!(map.is_empty()); - /// assert_eq!(12, map.capacity()); - /// ``` - pub fn with_capacity(capacity: usize) -> HeaderMap { - if capacity == 0 { - HeaderMap { - mask: 0, - indices: Box::new([]), // as a ZST, this doesn't actually allocate anything - entries: Vec::new(), - extra_values: Vec::new(), - danger: Danger::Green, - } - } else { - let raw_cap = match to_raw_capacity(capacity).checked_next_power_of_two() { - Some(c) => c, - None => panic!( - "requested capacity {} too large: next power of two would overflow `usize`", - capacity - ), - }; - assert!(raw_cap <= MAX_SIZE, "requested capacity too large"); - debug_assert!(raw_cap > 0); - - HeaderMap { - mask: (raw_cap - 1) as Size, - indices: vec![Pos::none(); raw_cap].into_boxed_slice(), - entries: Vec::with_capacity(raw_cap), - extra_values: Vec::new(), - danger: Danger::Green, - } - } - } - - /// Returns the number of headers stored in the map. - /// - /// This number represents the total number of **values** stored in the map. - /// This number can be greater than or equal to the number of **keys** - /// stored given that a single key may have more than one associated value. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::{ACCEPT, HOST}; - /// let mut map = HeaderMap::new(); - /// - /// assert_eq!(0, map.len()); - /// - /// map.insert(ACCEPT, "text/plain".parse().unwrap()); - /// map.insert(HOST, "localhost".parse().unwrap()); - /// - /// assert_eq!(2, map.len()); - /// - /// map.append(ACCEPT, "text/html".parse().unwrap()); - /// - /// assert_eq!(3, map.len()); - /// ``` - pub fn len(&self) -> usize { - self.entries.len() + self.extra_values.len() - } - - /// Returns the number of keys stored in the map. - /// - /// This number will be less than or equal to `len()` as each key may have - /// more than one associated value. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::{ACCEPT, HOST}; - /// let mut map = HeaderMap::new(); - /// - /// assert_eq!(0, map.keys_len()); - /// - /// map.insert(ACCEPT, "text/plain".parse().unwrap()); - /// map.insert(HOST, "localhost".parse().unwrap()); - /// - /// assert_eq!(2, map.keys_len()); - /// - /// map.insert(ACCEPT, "text/html".parse().unwrap()); - /// - /// assert_eq!(2, map.keys_len()); - /// ``` - pub fn keys_len(&self) -> usize { - self.entries.len() - } - - /// Returns true if the map contains no elements. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// - /// assert!(map.is_empty()); - /// - /// map.insert(HOST, "hello.world".parse().unwrap()); - /// - /// assert!(!map.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - self.entries.len() == 0 - } - - /// Clears the map, removing all key-value pairs. Keeps the allocated memory - /// for reuse. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "hello.world".parse().unwrap()); - /// - /// map.clear(); - /// assert!(map.is_empty()); - /// assert!(map.capacity() > 0); - /// ``` - pub fn clear(&mut self) { - self.entries.clear(); - self.extra_values.clear(); - self.danger = Danger::Green; - - for e in self.indices.iter_mut() { - *e = Pos::none(); - } - } - - /// Returns the number of headers the map can hold without reallocating. - /// - /// This number is an approximation as certain usage patterns could cause - /// additional allocations before the returned capacity is filled. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// - /// assert_eq!(0, map.capacity()); - /// - /// map.insert(HOST, "hello.world".parse().unwrap()); - /// assert_eq!(6, map.capacity()); - /// ``` - pub fn capacity(&self) -> usize { - usable_capacity(self.indices.len()) - } - - /// Reserves capacity for at least `additional` more headers to be inserted - /// into the `HeaderMap`. - /// - /// The header map may reserve more space to avoid frequent reallocations. - /// Like with `with_capacity`, this will be a "best effort" to avoid - /// allocations until `additional` more headers are inserted. Certain usage - /// patterns could cause additional allocations before the number is - /// reached. - /// - /// # Panics - /// - /// Panics if the new allocation size overflows `usize`. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// map.reserve(10); - /// # map.insert(HOST, "bar".parse().unwrap()); - /// ``` - pub fn reserve(&mut self, additional: usize) { - // TODO: This can't overflow if done properly... since the max # of - // elements is u16::MAX. - let cap = self - .entries - .len() - .checked_add(additional) - .expect("reserve overflow"); - - if cap > self.indices.len() { - let cap = cap.next_power_of_two(); - assert!(cap <= MAX_SIZE, "header map reserve over max capacity"); - assert!(cap != 0, "header map reserve overflowed"); - - if self.entries.len() == 0 { - self.mask = cap as Size - 1; - self.indices = vec![Pos::none(); cap].into_boxed_slice(); - self.entries = Vec::with_capacity(usable_capacity(cap)); - } else { - self.grow(cap); - } - } - } - - /// Returns a reference to the value associated with the key. - /// - /// If there are multiple values associated with the key, then the first one - /// is returned. Use `get_all` to get all values associated with a given - /// key. Returns `None` if there are no values associated with the key. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// assert!(map.get("host").is_none()); - /// - /// map.insert(HOST, "hello".parse().unwrap()); - /// assert_eq!(map.get(HOST).unwrap(), &"hello"); - /// assert_eq!(map.get("host").unwrap(), &"hello"); - /// - /// map.append(HOST, "world".parse().unwrap()); - /// assert_eq!(map.get("host").unwrap(), &"hello"); - /// ``` - pub fn get(&self, key: K) -> Option<&T> - where - K: AsHeaderName, - { - self.get2(&key) - } - - fn get2(&self, key: &K) -> Option<&T> - where - K: AsHeaderName, - { - match key.find(self) { - Some((_, found)) => { - let entry = &self.entries[found]; - Some(&entry.value) - } - None => None, - } - } - - /// Returns a mutable reference to the value associated with the key. - /// - /// If there are multiple values associated with the key, then the first one - /// is returned. Use `entry` to get all values associated with a given - /// key. Returns `None` if there are no values associated with the key. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::default(); - /// map.insert(HOST, "hello".to_string()); - /// map.get_mut("host").unwrap().push_str("-world"); - /// - /// assert_eq!(map.get(HOST).unwrap(), &"hello-world"); - /// ``` - pub fn get_mut(&mut self, key: K) -> Option<&mut T> - where - K: AsHeaderName, - { - match key.find(self) { - Some((_, found)) => { - let entry = &mut self.entries[found]; - Some(&mut entry.value) - } - None => None, - } - } - - /// Returns a view of all values associated with a key. - /// - /// The returned view does not incur any allocations and allows iterating - /// the values associated with the key. See [`GetAll`] for more details. - /// Returns `None` if there are no values associated with the key. - /// - /// [`GetAll`]: struct.GetAll.html - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// - /// map.insert(HOST, "hello".parse().unwrap()); - /// map.append(HOST, "goodbye".parse().unwrap()); - /// - /// let view = map.get_all("host"); - /// - /// let mut iter = view.iter(); - /// assert_eq!(&"hello", iter.next().unwrap()); - /// assert_eq!(&"goodbye", iter.next().unwrap()); - /// assert!(iter.next().is_none()); - /// ``` - pub fn get_all(&self, key: K) -> GetAll<'_, T> - where - K: AsHeaderName, - { - GetAll { - map: self, - index: key.find(self).map(|(_, i)| i), - } - } - - /// Returns true if the map contains a value for the specified key. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// assert!(!map.contains_key(HOST)); - /// - /// map.insert(HOST, "world".parse().unwrap()); - /// assert!(map.contains_key("host")); - /// ``` - pub fn contains_key(&self, key: K) -> bool - where - K: AsHeaderName, - { - key.find(self).is_some() - } - - /// An iterator visiting all key-value pairs. - /// - /// The iteration order is arbitrary, but consistent across platforms for - /// the same crate version. Each key will be yielded once per associated - /// value. So, if a key has 3 associated values, it will be yielded 3 times. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::{CONTENT_LENGTH, HOST}; - /// let mut map = HeaderMap::new(); - /// - /// map.insert(HOST, "hello".parse().unwrap()); - /// map.append(HOST, "goodbye".parse().unwrap()); - /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); - /// - /// for (key, value) in map.iter() { - /// println!("{:?}: {:?}", key, value); - /// } - /// ``` - pub fn iter(&self) -> Iter<'_, T> { - Iter { - map: self, - entry: 0, - cursor: self.entries.first().map(|_| Cursor::Head), - } - } - - /// An iterator visiting all key-value pairs, with mutable value references. - /// - /// The iterator order is arbitrary, but consistent across platforms for the - /// same crate version. Each key will be yielded once per associated value, - /// so if a key has 3 associated values, it will be yielded 3 times. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::{CONTENT_LENGTH, HOST}; - /// let mut map = HeaderMap::default(); - /// - /// map.insert(HOST, "hello".to_string()); - /// map.append(HOST, "goodbye".to_string()); - /// map.insert(CONTENT_LENGTH, "123".to_string()); - /// - /// for (key, value) in map.iter_mut() { - /// value.push_str("-boop"); - /// } - /// ``` - pub fn iter_mut(&mut self) -> IterMut<'_, T> { - IterMut { - map: self as *mut _, - entry: 0, - cursor: self.entries.first().map(|_| Cursor::Head), - lt: PhantomData, - } - } - - /// An iterator visiting all keys. - /// - /// The iteration order is arbitrary, but consistent across platforms for - /// the same crate version. Each key will be yielded only once even if it - /// has multiple associated values. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::{CONTENT_LENGTH, HOST}; - /// let mut map = HeaderMap::new(); - /// - /// map.insert(HOST, "hello".parse().unwrap()); - /// map.append(HOST, "goodbye".parse().unwrap()); - /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); - /// - /// for key in map.keys() { - /// println!("{:?}", key); - /// } - /// ``` - pub fn keys(&self) -> Keys<'_, T> { - Keys { - inner: self.entries.iter(), - } - } - - /// An iterator visiting all values. - /// - /// The iteration order is arbitrary, but consistent across platforms for - /// the same crate version. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::{CONTENT_LENGTH, HOST}; - /// let mut map = HeaderMap::new(); - /// - /// map.insert(HOST, "hello".parse().unwrap()); - /// map.append(HOST, "goodbye".parse().unwrap()); - /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); - /// - /// for value in map.values() { - /// println!("{:?}", value); - /// } - /// ``` - pub fn values(&self) -> Values<'_, T> { - Values { inner: self.iter() } - } - - /// An iterator visiting all values mutably. - /// - /// The iteration order is arbitrary, but consistent across platforms for - /// the same crate version. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::{CONTENT_LENGTH, HOST}; - /// let mut map = HeaderMap::default(); - /// - /// map.insert(HOST, "hello".to_string()); - /// map.append(HOST, "goodbye".to_string()); - /// map.insert(CONTENT_LENGTH, "123".to_string()); - /// - /// for value in map.values_mut() { - /// value.push_str("-boop"); - /// } - /// ``` - pub fn values_mut(&mut self) -> ValuesMut<'_, T> { - ValuesMut { - inner: self.iter_mut(), - } - } - - /// Clears the map, returning all entries as an iterator. - /// - /// The internal memory is kept for reuse. - /// - /// For each yielded item that has `None` provided for the `HeaderName`, - /// then the associated header name is the same as that of the previously - /// yielded item. The first yielded item will have `HeaderName` set. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::{CONTENT_LENGTH, HOST}; - /// let mut map = HeaderMap::new(); - /// - /// map.insert(HOST, "hello".parse().unwrap()); - /// map.append(HOST, "goodbye".parse().unwrap()); - /// map.insert(CONTENT_LENGTH, "123".parse().unwrap()); - /// - /// let mut drain = map.drain(); - /// - /// - /// assert_eq!(drain.next(), Some((Some(HOST), "hello".parse().unwrap()))); - /// assert_eq!(drain.next(), Some((None, "goodbye".parse().unwrap()))); - /// - /// assert_eq!(drain.next(), Some((Some(CONTENT_LENGTH), "123".parse().unwrap()))); - /// - /// assert_eq!(drain.next(), None); - /// ``` - pub fn drain(&mut self) -> Drain<'_, T> { - for i in self.indices.iter_mut() { - *i = Pos::none(); - } - - // Memory safety - // - // When the Drain is first created, it shortens the length of - // the source vector to make sure no uninitialized or moved-from - // elements are accessible at all if the Drain's destructor never - // gets to run. - - let entries = &mut self.entries[..] as *mut _; - let extra_values = &mut self.extra_values as *mut _; - let len = self.entries.len(); - unsafe { self.entries.set_len(0); } - - Drain { - idx: 0, - len, - entries, - extra_values, - next: None, - lt: PhantomData, - } - } - - fn value_iter(&self, idx: Option) -> ValueIter<'_, T> { - use self::Cursor::*; - - if let Some(idx) = idx { - let back = { - let entry = &self.entries[idx]; - - entry.links.map(|l| Values(l.tail)).unwrap_or(Head) - }; - - ValueIter { - map: self, - index: idx, - front: Some(Head), - back: Some(back), - } - } else { - ValueIter { - map: self, - index: ::std::usize::MAX, - front: None, - back: None, - } - } - } - - fn value_iter_mut(&mut self, idx: usize) -> ValueIterMut<'_, T> { - use self::Cursor::*; - - let back = { - let entry = &self.entries[idx]; - - entry.links.map(|l| Values(l.tail)).unwrap_or(Head) - }; - - ValueIterMut { - map: self as *mut _, - index: idx, - front: Some(Head), - back: Some(back), - lt: PhantomData, - } - } - - /// Gets the given key's corresponding entry in the map for in-place - /// manipulation. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// let mut map: HeaderMap = HeaderMap::default(); - /// - /// let headers = &[ - /// "content-length", - /// "x-hello", - /// "Content-Length", - /// "x-world", - /// ]; - /// - /// for &header in headers { - /// let counter = map.entry(header).or_insert(0); - /// *counter += 1; - /// } - /// - /// assert_eq!(map["content-length"], 2); - /// assert_eq!(map["x-hello"], 1); - /// ``` - pub fn entry(&mut self, key: K) -> Entry<'_, T> - where - K: IntoHeaderName, - { - key.entry(self) - } - - /// Gets the given key's corresponding entry in the map for in-place - /// manipulation. - /// - /// # Errors - /// - /// This method differs from `entry` by allowing types that may not be - /// valid `HeaderName`s to passed as the key (such as `String`). If they - /// do not parse as a valid `HeaderName`, this returns an - /// `InvalidHeaderName` error. - pub fn try_entry(&mut self, key: K) -> Result, InvalidHeaderName> - where - K: AsHeaderName, - { - key.try_entry(self) - } - - fn entry2(&mut self, key: K) -> Entry<'_, T> - where - K: Hash + Into, - HeaderName: PartialEq, - { - // Ensure that there is space in the map - self.reserve_one(); - - insert_phase_one!( - self, - key, - probe, - pos, - hash, - danger, - Entry::Vacant(VacantEntry { - map: self, - hash: hash, - key: key.into(), - probe: probe, - danger: danger, - }), - Entry::Occupied(OccupiedEntry { - map: self, - index: pos, - probe: probe, - }), - Entry::Vacant(VacantEntry { - map: self, - hash: hash, - key: key.into(), - probe: probe, - danger: danger, - }) - ) - } - - /// Inserts a key-value pair into the map. - /// - /// If the map did not previously have this key present, then `None` is - /// returned. - /// - /// If the map did have this key present, the new value is associated with - /// the key and all previous values are removed. **Note** that only a single - /// one of the previous values is returned. If there are multiple values - /// that have been previously associated with the key, then the first one is - /// returned. See `insert_mult` on `OccupiedEntry` for an API that returns - /// all values. - /// - /// The key is not updated, though; this matters for types that can be `==` - /// without being identical. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// assert!(map.insert(HOST, "world".parse().unwrap()).is_none()); - /// assert!(!map.is_empty()); - /// - /// let mut prev = map.insert(HOST, "earth".parse().unwrap()).unwrap(); - /// assert_eq!("world", prev); - /// ``` - pub fn insert(&mut self, key: K, val: T) -> Option - where - K: IntoHeaderName, - { - key.insert(self, val) - } - - #[inline] - fn insert2(&mut self, key: K, value: T) -> Option - where - K: Hash + Into, - HeaderName: PartialEq, - { - self.reserve_one(); - - insert_phase_one!( - self, - key, - probe, - pos, - hash, - danger, - // Vacant - { - let _ = danger; // Make lint happy - let index = self.entries.len(); - self.insert_entry(hash, key.into(), value); - self.indices[probe] = Pos::new(index, hash); - None - }, - // Occupied - Some(self.insert_occupied(pos, value)), - // Robinhood - { - self.insert_phase_two(key.into(), value, hash, probe, danger); - None - } - ) - } - - /// Set an occupied bucket to the given value - #[inline] - fn insert_occupied(&mut self, index: usize, value: T) -> T { - if let Some(links) = self.entries[index].links { - self.remove_all_extra_values(links.next); - } - - let entry = &mut self.entries[index]; - mem::replace(&mut entry.value, value) - } - - fn insert_occupied_mult(&mut self, index: usize, value: T) -> ValueDrain<'_, T> { - let old; - let links; - - { - let entry = &mut self.entries[index]; - - old = mem::replace(&mut entry.value, value); - links = entry.links.take(); - } - - let raw_links = self.raw_links(); - let extra_values = &mut self.extra_values; - - let next = links.map(|l| { - drain_all_extra_values(raw_links, extra_values, l.next) - .into_iter() - }); - - ValueDrain { - first: Some(old), - next: next, - lt: PhantomData, - } - } - - /// Inserts a key-value pair into the map. - /// - /// If the map did not previously have this key present, then `false` is - /// returned. - /// - /// If the map did have this key present, the new value is pushed to the end - /// of the list of values currently associated with the key. The key is not - /// updated, though; this matters for types that can be `==` without being - /// identical. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// assert!(map.insert(HOST, "world".parse().unwrap()).is_none()); - /// assert!(!map.is_empty()); - /// - /// map.append(HOST, "earth".parse().unwrap()); - /// - /// let values = map.get_all("host"); - /// let mut i = values.iter(); - /// assert_eq!("world", *i.next().unwrap()); - /// assert_eq!("earth", *i.next().unwrap()); - /// ``` - pub fn append(&mut self, key: K, value: T) -> bool - where - K: IntoHeaderName, - { - key.append(self, value) - } - - #[inline] - fn append2(&mut self, key: K, value: T) -> bool - where - K: Hash + Into, - HeaderName: PartialEq, - { - self.reserve_one(); - - insert_phase_one!( - self, - key, - probe, - pos, - hash, - danger, - // Vacant - { - let _ = danger; - let index = self.entries.len(); - self.insert_entry(hash, key.into(), value); - self.indices[probe] = Pos::new(index, hash); - false - }, - // Occupied - { - append_value(pos, &mut self.entries[pos], &mut self.extra_values, value); - true - }, - // Robinhood - { - self.insert_phase_two(key.into(), value, hash, probe, danger); - - false - } - ) - } - - #[inline] - fn find(&self, key: &K) -> Option<(usize, usize)> - where - K: Hash + Into, - HeaderName: PartialEq, - { - if self.entries.is_empty() { - return None; - } - - let hash = hash_elem_using(&self.danger, key); - let mask = self.mask; - let mut probe = desired_pos(mask, hash); - let mut dist = 0; - - probe_loop!(probe < self.indices.len(), { - if let Some((i, entry_hash)) = self.indices[probe].resolve() { - if dist > probe_distance(mask, entry_hash, probe) { - // give up when probe distance is too long - return None; - } else if entry_hash == hash && self.entries[i].key == *key { - return Some((probe, i)); - } - } else { - return None; - } - - dist += 1; - }); - } - - /// phase 2 is post-insert where we forward-shift `Pos` in the indices. - #[inline] - fn insert_phase_two( - &mut self, - key: HeaderName, - value: T, - hash: HashValue, - probe: usize, - danger: bool, - ) -> usize { - // Push the value and get the index - let index = self.entries.len(); - self.insert_entry(hash, key, value); - - let num_displaced = do_insert_phase_two(&mut self.indices, probe, Pos::new(index, hash)); - - if danger || num_displaced >= DISPLACEMENT_THRESHOLD { - // Increase danger level - self.danger.to_yellow(); - } - - index - } - - /// Removes a key from the map, returning the value associated with the key. - /// - /// Returns `None` if the map does not contain the key. If there are - /// multiple values associated with the key, then the first one is returned. - /// See `remove_entry_mult` on `OccupiedEntry` for an API that yields all - /// values. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "hello.world".parse().unwrap()); - /// - /// let prev = map.remove(HOST).unwrap(); - /// assert_eq!("hello.world", prev); - /// - /// assert!(map.remove(HOST).is_none()); - /// ``` - pub fn remove(&mut self, key: K) -> Option - where - K: AsHeaderName, - { - match key.find(self) { - Some((probe, idx)) => { - if let Some(links) = self.entries[idx].links { - self.remove_all_extra_values(links.next); - } - - let entry = self.remove_found(probe, idx); - - Some(entry.value) - } - None => None, - } - } - - /// Remove an entry from the map. - /// - /// Warning: To avoid inconsistent state, extra values _must_ be removed - /// for the `found` index (via `remove_all_extra_values` or similar) - /// _before_ this method is called. - #[inline] - fn remove_found(&mut self, probe: usize, found: usize) -> Bucket { - // index `probe` and entry `found` is to be removed - // use swap_remove, but then we need to update the index that points - // to the other entry that has to move - self.indices[probe] = Pos::none(); - let entry = self.entries.swap_remove(found); - - // correct index that points to the entry that had to swap places - if let Some(entry) = self.entries.get(found) { - // was not last element - // examine new element in `found` and find it in indices - let mut probe = desired_pos(self.mask, entry.hash); - - probe_loop!(probe < self.indices.len(), { - if let Some((i, _)) = self.indices[probe].resolve() { - if i >= self.entries.len() { - // found it - self.indices[probe] = Pos::new(found, entry.hash); - break; - } - } - }); - - // Update links - if let Some(links) = entry.links { - self.extra_values[links.next].prev = Link::Entry(found); - self.extra_values[links.tail].next = Link::Entry(found); - } - } - - // backward shift deletion in self.indices - // after probe, shift all non-ideally placed indices backward - if self.entries.len() > 0 { - let mut last_probe = probe; - let mut probe = probe + 1; - - probe_loop!(probe < self.indices.len(), { - if let Some((_, entry_hash)) = self.indices[probe].resolve() { - if probe_distance(self.mask, entry_hash, probe) > 0 { - self.indices[last_probe] = self.indices[probe]; - self.indices[probe] = Pos::none(); - } else { - break; - } - } else { - break; - } - - last_probe = probe; - }); - } - - entry - } - - /// Removes the `ExtraValue` at the given index. - #[inline] - fn remove_extra_value(&mut self, idx: usize) -> ExtraValue { - let raw_links = self.raw_links(); - remove_extra_value(raw_links, &mut self.extra_values, idx) - } - - fn remove_all_extra_values(&mut self, mut head: usize) { - loop { - let extra = self.remove_extra_value(head); - - if let Link::Extra(idx) = extra.next { - head = idx; - } else { - break; - } - } - } - - #[inline] - fn insert_entry(&mut self, hash: HashValue, key: HeaderName, value: T) { - assert!(self.entries.len() < MAX_SIZE, "header map at capacity"); - - self.entries.push(Bucket { - hash: hash, - key: key, - value: value, - links: None, - }); - } - - fn rebuild(&mut self) { - // Loop over all entries and re-insert them into the map - 'outer: for (index, entry) in self.entries.iter_mut().enumerate() { - let hash = hash_elem_using(&self.danger, &entry.key); - let mut probe = desired_pos(self.mask, hash); - let mut dist = 0; - - // Update the entry's hash code - entry.hash = hash; - - probe_loop!(probe < self.indices.len(), { - if let Some((_, entry_hash)) = self.indices[probe].resolve() { - // if existing element probed less than us, swap - let their_dist = probe_distance(self.mask, entry_hash, probe); - - if their_dist < dist { - // Robinhood - break; - } - } else { - // Vacant slot - self.indices[probe] = Pos::new(index, hash); - continue 'outer; - } - - dist += 1; - }); - - do_insert_phase_two(&mut self.indices, probe, Pos::new(index, hash)); - } - } - - fn reinsert_entry_in_order(&mut self, pos: Pos) { - if let Some((_, entry_hash)) = pos.resolve() { - // Find first empty bucket and insert there - let mut probe = desired_pos(self.mask, entry_hash); - - probe_loop!(probe < self.indices.len(), { - if self.indices[probe].resolve().is_none() { - // empty bucket, insert here - self.indices[probe] = pos; - return; - } - }); - } - } - - fn reserve_one(&mut self) { - let len = self.entries.len(); - - if self.danger.is_yellow() { - let load_factor = self.entries.len() as f32 / self.indices.len() as f32; - - if load_factor >= LOAD_FACTOR_THRESHOLD { - // Transition back to green danger level - self.danger.to_green(); - - // Double the capacity - let new_cap = self.indices.len() * 2; - - // Grow the capacity - self.grow(new_cap); - } else { - self.danger.to_red(); - - // Rebuild hash table - for index in self.indices.iter_mut() { - *index = Pos::none(); - } - - self.rebuild(); - } - } else if len == self.capacity() { - if len == 0 { - let new_raw_cap = 8; - self.mask = 8 - 1; - self.indices = vec![Pos::none(); new_raw_cap].into_boxed_slice(); - self.entries = Vec::with_capacity(usable_capacity(new_raw_cap)); - } else { - let raw_cap = self.indices.len(); - self.grow(raw_cap << 1); - } - } - } - - #[inline] - fn grow(&mut self, new_raw_cap: usize) { - assert!(new_raw_cap <= MAX_SIZE, "requested capacity too large"); - // This path can never be reached when handling the first allocation in - // the map. - - // find first ideally placed element -- start of cluster - let mut first_ideal = 0; - - for (i, pos) in self.indices.iter().enumerate() { - if let Some((_, entry_hash)) = pos.resolve() { - if 0 == probe_distance(self.mask, entry_hash, i) { - first_ideal = i; - break; - } - } - } - - // visit the entries in an order where we can simply reinsert them - // into self.indices without any bucket stealing. - let old_indices = mem::replace( - &mut self.indices, - vec![Pos::none(); new_raw_cap].into_boxed_slice(), - ); - self.mask = new_raw_cap.wrapping_sub(1) as Size; - - for &pos in &old_indices[first_ideal..] { - self.reinsert_entry_in_order(pos); - } - - for &pos in &old_indices[..first_ideal] { - self.reinsert_entry_in_order(pos); - } - - // Reserve additional entry slots - let more = self.capacity() - self.entries.len(); - self.entries.reserve_exact(more); - } - - #[inline] - fn raw_links(&mut self) -> RawLinks { - RawLinks(&mut self.entries[..] as *mut _) - } -} - -/// Removes the `ExtraValue` at the given index. -#[inline] -fn remove_extra_value( - mut raw_links: RawLinks, - extra_values: &mut Vec>, - idx: usize) - -> ExtraValue -{ - let prev; - let next; - - { - debug_assert!(extra_values.len() > idx); - let extra = &extra_values[idx]; - prev = extra.prev; - next = extra.next; - } - - // First unlink the extra value - match (prev, next) { - (Link::Entry(prev), Link::Entry(next)) => { - debug_assert_eq!(prev, next); - - raw_links[prev] = None; - } - (Link::Entry(prev), Link::Extra(next)) => { - debug_assert!(raw_links[prev].is_some()); - - raw_links[prev].as_mut().unwrap() - .next = next; - - debug_assert!(extra_values.len() > next); - extra_values[next].prev = Link::Entry(prev); - } - (Link::Extra(prev), Link::Entry(next)) => { - debug_assert!(raw_links[next].is_some()); - - raw_links[next].as_mut().unwrap() - .tail = prev; - - debug_assert!(extra_values.len() > prev); - extra_values[prev].next = Link::Entry(next); - } - (Link::Extra(prev), Link::Extra(next)) => { - debug_assert!(extra_values.len() > next); - debug_assert!(extra_values.len() > prev); - - extra_values[prev].next = Link::Extra(next); - extra_values[next].prev = Link::Extra(prev); - } - } - - // Remove the extra value - let mut extra = extra_values.swap_remove(idx); - - // This is the index of the value that was moved (possibly `extra`) - let old_idx = extra_values.len(); - - // Update the links - if extra.prev == Link::Extra(old_idx) { - extra.prev = Link::Extra(idx); - } - - if extra.next == Link::Extra(old_idx) { - extra.next = Link::Extra(idx); - } - - // Check if another entry was displaced. If it was, then the links - // need to be fixed. - if idx != old_idx { - let next; - let prev; - - { - debug_assert!(extra_values.len() > idx); - let moved = &extra_values[idx]; - next = moved.next; - prev = moved.prev; - } - - // An entry was moved, we have to the links - match prev { - Link::Entry(entry_idx) => { - // It is critical that we do not attempt to read the - // header name or value as that memory may have been - // "released" already. - debug_assert!(raw_links[entry_idx].is_some()); - - let links = raw_links[entry_idx].as_mut().unwrap(); - links.next = idx; - } - Link::Extra(extra_idx) => { - debug_assert!(extra_values.len() > extra_idx); - extra_values[extra_idx].next = Link::Extra(idx); - } - } - - match next { - Link::Entry(entry_idx) => { - debug_assert!(raw_links[entry_idx].is_some()); - - let links = raw_links[entry_idx].as_mut().unwrap(); - links.tail = idx; - } - Link::Extra(extra_idx) => { - debug_assert!(extra_values.len() > extra_idx); - extra_values[extra_idx].prev = Link::Extra(idx); - } - } - } - - debug_assert!({ - for v in &*extra_values { - assert!(v.next != Link::Extra(old_idx)); - assert!(v.prev != Link::Extra(old_idx)); - } - - true - }); - - extra -} - -fn drain_all_extra_values( - raw_links: RawLinks, - extra_values: &mut Vec>, - mut head: usize) - -> Vec -{ - let mut vec = Vec::new(); - loop { - let extra = remove_extra_value(raw_links, extra_values, head); - vec.push(extra.value); - - if let Link::Extra(idx) = extra.next { - head = idx; - } else { - break; - } - } - vec -} - -impl<'a, T> IntoIterator for &'a HeaderMap { - type Item = (&'a HeaderName, &'a T); - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Iter<'a, T> { - self.iter() - } -} - -impl<'a, T> IntoIterator for &'a mut HeaderMap { - type Item = (&'a HeaderName, &'a mut T); - type IntoIter = IterMut<'a, T>; - - fn into_iter(self) -> IterMut<'a, T> { - self.iter_mut() - } -} - -impl IntoIterator for HeaderMap { - type Item = (Option, T); - type IntoIter = IntoIter; - - /// Creates a consuming iterator, that is, one that moves keys and values - /// out of the map in arbitrary order. The map cannot be used after calling - /// this. - /// - /// For each yielded item that has `None` provided for the `HeaderName`, - /// then the associated header name is the same as that of the previously - /// yielded item. The first yielded item will have `HeaderName` set. - /// - /// # Examples - /// - /// Basic usage. - /// - /// ``` - /// # use http::header; - /// # use http::header::*; - /// let mut map = HeaderMap::new(); - /// map.insert(header::CONTENT_LENGTH, "123".parse().unwrap()); - /// map.insert(header::CONTENT_TYPE, "json".parse().unwrap()); - /// - /// let mut iter = map.into_iter(); - /// assert_eq!(iter.next(), Some((Some(header::CONTENT_LENGTH), "123".parse().unwrap()))); - /// assert_eq!(iter.next(), Some((Some(header::CONTENT_TYPE), "json".parse().unwrap()))); - /// assert!(iter.next().is_none()); - /// ``` - /// - /// Multiple values per key. - /// - /// ``` - /// # use http::header; - /// # use http::header::*; - /// let mut map = HeaderMap::new(); - /// - /// map.append(header::CONTENT_LENGTH, "123".parse().unwrap()); - /// map.append(header::CONTENT_LENGTH, "456".parse().unwrap()); - /// - /// map.append(header::CONTENT_TYPE, "json".parse().unwrap()); - /// map.append(header::CONTENT_TYPE, "html".parse().unwrap()); - /// map.append(header::CONTENT_TYPE, "xml".parse().unwrap()); - /// - /// let mut iter = map.into_iter(); - /// - /// assert_eq!(iter.next(), Some((Some(header::CONTENT_LENGTH), "123".parse().unwrap()))); - /// assert_eq!(iter.next(), Some((None, "456".parse().unwrap()))); - /// - /// assert_eq!(iter.next(), Some((Some(header::CONTENT_TYPE), "json".parse().unwrap()))); - /// assert_eq!(iter.next(), Some((None, "html".parse().unwrap()))); - /// assert_eq!(iter.next(), Some((None, "xml".parse().unwrap()))); - /// assert!(iter.next().is_none()); - /// ``` - fn into_iter(self) -> IntoIter { - IntoIter { - next: None, - entries: self.entries.into_iter(), - extra_values: self.extra_values, - } - } -} - -impl FromIterator<(HeaderName, T)> for HeaderMap { - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - let mut map = HeaderMap::default(); - map.extend(iter); - map - } -} - -/// Try to convert a `HashMap` into a `HeaderMap`. -/// -/// # Examples -/// -/// ``` -/// use std::collections::HashMap; -/// use std::convert::TryInto; -/// use http::HeaderMap; -/// -/// let mut map = HashMap::new(); -/// map.insert("X-Custom-Header".to_string(), "my value".to_string()); -/// -/// let headers: HeaderMap = (&map).try_into().expect("valid headers"); -/// assert_eq!(headers["X-Custom-Header"], "my value"); -/// ``` -impl<'a, K, V, T> TryFrom<&'a HashMap> for HeaderMap - where - K: Eq + Hash, - HeaderName: TryFrom<&'a K>, - >::Error: Into, - T: TryFrom<&'a V>, - T::Error: Into, -{ - type Error = Error; - - fn try_from(c: &'a HashMap) -> Result { - c.into_iter() - .map(|(k, v)| -> crate::Result<(HeaderName, T)> { - let name = TryFrom::try_from(k).map_err(Into::into)?; - let value = TryFrom::try_from(v).map_err(Into::into)?; - Ok((name, value)) - }) - .collect() - } -} - -impl Extend<(Option, T)> for HeaderMap { - /// Extend a `HeaderMap` with the contents of another `HeaderMap`. - /// - /// This function expects the yielded items to follow the same structure as - /// `IntoIter`. - /// - /// # Panics - /// - /// This panics if the first yielded item does not have a `HeaderName`. - /// - /// # Examples - /// - /// ``` - /// # use http::header::*; - /// let mut map = HeaderMap::new(); - /// - /// map.insert(ACCEPT, "text/plain".parse().unwrap()); - /// map.insert(HOST, "hello.world".parse().unwrap()); - /// - /// let mut extra = HeaderMap::new(); - /// - /// extra.insert(HOST, "foo.bar".parse().unwrap()); - /// extra.insert(COOKIE, "hello".parse().unwrap()); - /// extra.append(COOKIE, "world".parse().unwrap()); - /// - /// map.extend(extra); - /// - /// assert_eq!(map["host"], "foo.bar"); - /// assert_eq!(map["accept"], "text/plain"); - /// assert_eq!(map["cookie"], "hello"); - /// - /// let v = map.get_all("host"); - /// assert_eq!(1, v.iter().count()); - /// - /// let v = map.get_all("cookie"); - /// assert_eq!(2, v.iter().count()); - /// ``` - fn extend, T)>>(&mut self, iter: I) { - let mut iter = iter.into_iter(); - - // The structure of this is a bit weird, but it is mostly to make the - // borrow checker happy. - let (mut key, mut val) = match iter.next() { - Some((Some(key), val)) => (key, val), - Some((None, _)) => panic!("expected a header name, but got None"), - None => return, - }; - - 'outer: loop { - let mut entry = match self.entry2(key) { - Entry::Occupied(mut e) => { - // Replace all previous values while maintaining a handle to - // the entry. - e.insert(val); - e - } - Entry::Vacant(e) => e.insert_entry(val), - }; - - // As long as `HeaderName` is none, keep inserting the value into - // the current entry - loop { - match iter.next() { - Some((Some(k), v)) => { - key = k; - val = v; - continue 'outer; - } - Some((None, v)) => { - entry.append(v); - } - None => { - return; - } - } - } - } - } -} - -impl Extend<(HeaderName, T)> for HeaderMap { - fn extend>(&mut self, iter: I) { - // Keys may be already present or show multiple times in the iterator. - // Reserve the entire hint lower bound if the map is empty. - // Otherwise reserve half the hint (rounded up), so the map - // will only resize twice in the worst case. - let iter = iter.into_iter(); - - let reserve = if self.is_empty() { - iter.size_hint().0 - } else { - (iter.size_hint().0 + 1) / 2 - }; - - self.reserve(reserve); - - for (k, v) in iter { - self.append(k, v); - } - } -} - -impl PartialEq for HeaderMap { - fn eq(&self, other: &HeaderMap) -> bool { - if self.len() != other.len() { - return false; - } - - self.keys() - .all(|key| self.get_all(key) == other.get_all(key)) - } -} - -impl Eq for HeaderMap {} - -impl fmt::Debug for HeaderMap { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_map().entries(self.iter()).finish() - } -} - -impl Default for HeaderMap { - fn default() -> Self { - HeaderMap::with_capacity(0) - } -} - -impl<'a, K, T> ops::Index for HeaderMap -where - K: AsHeaderName, -{ - type Output = T; - - /// # Panics - /// Using the index operator will cause a panic if the header you're querying isn't set. - #[inline] - fn index(&self, index: K) -> &T { - match self.get2(&index) { - Some(val) => val, - None => panic!("no entry found for key {:?}", index.as_str()), - } - } -} - -/// phase 2 is post-insert where we forward-shift `Pos` in the indices. -/// -/// returns the number of displaced elements -#[inline] -fn do_insert_phase_two(indices: &mut [Pos], mut probe: usize, mut old_pos: Pos) -> usize { - let mut num_displaced = 0; - - probe_loop!(probe < indices.len(), { - let pos = &mut indices[probe]; - - if pos.is_none() { - *pos = old_pos; - break; - } else { - num_displaced += 1; - old_pos = mem::replace(pos, old_pos); - } - }); - - num_displaced -} - -#[inline] -fn append_value( - entry_idx: usize, - entry: &mut Bucket, - extra: &mut Vec>, - value: T, -) { - match entry.links { - Some(links) => { - let idx = extra.len(); - extra.push(ExtraValue { - value: value, - prev: Link::Extra(links.tail), - next: Link::Entry(entry_idx), - }); - - extra[links.tail].next = Link::Extra(idx); - - entry.links = Some(Links { tail: idx, ..links }); - } - None => { - let idx = extra.len(); - extra.push(ExtraValue { - value: value, - prev: Link::Entry(entry_idx), - next: Link::Entry(entry_idx), - }); - - entry.links = Some(Links { - next: idx, - tail: idx, - }); - } - } -} - -// ===== impl Iter ===== - -impl<'a, T> Iterator for Iter<'a, T> { - type Item = (&'a HeaderName, &'a T); - - fn next(&mut self) -> Option { - use self::Cursor::*; - - if self.cursor.is_none() { - if (self.entry + 1) >= self.map.entries.len() { - return None; - } - - self.entry += 1; - self.cursor = Some(Cursor::Head); - } - - let entry = &self.map.entries[self.entry]; - - match self.cursor.unwrap() { - Head => { - self.cursor = entry.links.map(|l| Values(l.next)); - Some((&entry.key, &entry.value)) - } - Values(idx) => { - let extra = &self.map.extra_values[idx]; - - match extra.next { - Link::Entry(_) => self.cursor = None, - Link::Extra(i) => self.cursor = Some(Values(i)), - } - - Some((&entry.key, &extra.value)) - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let map = self.map; - debug_assert!(map.entries.len() >= self.entry); - - let lower = map.entries.len() - self.entry; - // We could pessimistically guess at the upper bound, saying - // that its lower + map.extra_values.len(). That could be - // way over though, such as if we're near the end, and have - // already gone through several extra values... - (lower, None) - } -} - -impl<'a, T> FusedIterator for Iter<'a, T> {} - -unsafe impl<'a, T: Sync> Sync for Iter<'a, T> {} -unsafe impl<'a, T: Sync> Send for Iter<'a, T> {} - -// ===== impl IterMut ===== - -impl<'a, T> IterMut<'a, T> { - fn next_unsafe(&mut self) -> Option<(&'a HeaderName, *mut T)> { - use self::Cursor::*; - - if self.cursor.is_none() { - if (self.entry + 1) >= unsafe { &*self.map }.entries.len() { - return None; - } - - self.entry += 1; - self.cursor = Some(Cursor::Head); - } - - let entry = unsafe { &mut (*self.map).entries[self.entry] }; - - match self.cursor.unwrap() { - Head => { - self.cursor = entry.links.map(|l| Values(l.next)); - Some((&entry.key, &mut entry.value as *mut _)) - } - Values(idx) => { - let extra = unsafe { &mut (*self.map).extra_values[idx] }; - - match extra.next { - Link::Entry(_) => self.cursor = None, - Link::Extra(i) => self.cursor = Some(Values(i)), - } - - Some((&entry.key, &mut extra.value as *mut _)) - } - } - } -} - -impl<'a, T> Iterator for IterMut<'a, T> { - type Item = (&'a HeaderName, &'a mut T); - - fn next(&mut self) -> Option { - self.next_unsafe() - .map(|(key, ptr)| (key, unsafe { &mut *ptr })) - } - - fn size_hint(&self) -> (usize, Option) { - let map = unsafe { &*self.map }; - debug_assert!(map.entries.len() >= self.entry); - - let lower = map.entries.len() - self.entry; - // We could pessimistically guess at the upper bound, saying - // that its lower + map.extra_values.len(). That could be - // way over though, such as if we're near the end, and have - // already gone through several extra values... - (lower, None) - } -} - -impl<'a, T> FusedIterator for IterMut<'a, T> {} - -unsafe impl<'a, T: Sync> Sync for IterMut<'a, T> {} -unsafe impl<'a, T: Send> Send for IterMut<'a, T> {} - -// ===== impl Keys ===== - -impl<'a, T> Iterator for Keys<'a, T> { - type Item = &'a HeaderName; - - fn next(&mut self) -> Option { - self.inner.next().map(|b| &b.key) - } - - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} - -impl<'a, T> ExactSizeIterator for Keys<'a, T> {} -impl<'a, T> FusedIterator for Keys<'a, T> {} - -// ===== impl Values ==== - -impl<'a, T> Iterator for Values<'a, T> { - type Item = &'a T; - - fn next(&mut self) -> Option { - self.inner.next().map(|(_, v)| v) - } - - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} - -impl<'a, T> FusedIterator for Values<'a, T> {} - -// ===== impl ValuesMut ==== - -impl<'a, T> Iterator for ValuesMut<'a, T> { - type Item = &'a mut T; - - fn next(&mut self) -> Option { - self.inner.next().map(|(_, v)| v) - } - - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} - -impl<'a, T> FusedIterator for ValuesMut<'a, T> {} - -// ===== impl Drain ===== - -impl<'a, T> Iterator for Drain<'a, T> { - type Item = (Option, T); - - fn next(&mut self) -> Option { - if let Some(next) = self.next { - // Remove the extra value - - let raw_links = RawLinks(self.entries); - let extra = unsafe { - remove_extra_value(raw_links, &mut *self.extra_values, next) - }; - - match extra.next { - Link::Extra(idx) => self.next = Some(idx), - Link::Entry(_) => self.next = None, - } - - return Some((None, extra.value)); - } - - let idx = self.idx; - - if idx == self.len { - return None; - } - - self.idx += 1; - - unsafe { - let entry = &(*self.entries)[idx]; - - // Read the header name - let key = ptr::read(&entry.key as *const _); - let value = ptr::read(&entry.value as *const _); - self.next = entry.links.map(|l| l.next); - - Some((Some(key), value)) - } - } - - fn size_hint(&self) -> (usize, Option) { - // At least this many names... It's unknown if the user wants - // to count the extra_values on top. - // - // For instance, extending a new `HeaderMap` wouldn't need to - // reserve the upper-bound in `entries`, only the lower-bound. - let lower = self.len - self.idx; - let upper = unsafe { (*self.extra_values).len() } + lower; - (lower, Some(upper)) - } -} - -impl<'a, T> FusedIterator for Drain<'a, T> {} - -impl<'a, T> Drop for Drain<'a, T> { - fn drop(&mut self) { - for _ in self {} - } -} - -unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {} -unsafe impl<'a, T: Send> Send for Drain<'a, T> {} - -// ===== impl Entry ===== - -impl<'a, T> Entry<'a, T> { - /// Ensures a value is in the entry by inserting the default if empty. - /// - /// Returns a mutable reference to the **first** value in the entry. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// let mut map: HeaderMap = HeaderMap::default(); - /// - /// let headers = &[ - /// "content-length", - /// "x-hello", - /// "Content-Length", - /// "x-world", - /// ]; - /// - /// for &header in headers { - /// let counter = map.entry(header) - /// .or_insert(0); - /// *counter += 1; - /// } - /// - /// assert_eq!(map["content-length"], 2); - /// assert_eq!(map["x-hello"], 1); - /// ``` - pub fn or_insert(self, default: T) -> &'a mut T { - use self::Entry::*; - - match self { - Occupied(e) => e.into_mut(), - Vacant(e) => e.insert(default), - } - } - - /// Ensures a value is in the entry by inserting the result of the default - /// function if empty. - /// - /// The default function is not called if the entry exists in the map. - /// Returns a mutable reference to the **first** value in the entry. - /// - /// # Examples - /// - /// Basic usage. - /// - /// ``` - /// # use http::HeaderMap; - /// let mut map = HeaderMap::new(); - /// - /// let res = map.entry("x-hello") - /// .or_insert_with(|| "world".parse().unwrap()); - /// - /// assert_eq!(res, "world"); - /// ``` - /// - /// The default function is not called if the entry exists in the map. - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "world".parse().unwrap()); - /// - /// let res = map.entry("host") - /// .or_insert_with(|| unreachable!()); - /// - /// - /// assert_eq!(res, "world"); - /// ``` - pub fn or_insert_with T>(self, default: F) -> &'a mut T { - use self::Entry::*; - - match self { - Occupied(e) => e.into_mut(), - Vacant(e) => e.insert(default()), - } - } - - /// Returns a reference to the entry's key - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// let mut map = HeaderMap::new(); - /// - /// assert_eq!(map.entry("x-hello").key(), "x-hello"); - /// ``` - pub fn key(&self) -> &HeaderName { - use self::Entry::*; - - match *self { - Vacant(ref e) => e.key(), - Occupied(ref e) => e.key(), - } - } -} - -// ===== impl VacantEntry ===== - -impl<'a, T> VacantEntry<'a, T> { - /// Returns a reference to the entry's key - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// let mut map = HeaderMap::new(); - /// - /// assert_eq!(map.entry("x-hello").key().as_str(), "x-hello"); - /// ``` - pub fn key(&self) -> &HeaderName { - &self.key - } - - /// Take ownership of the key - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry}; - /// let mut map = HeaderMap::new(); - /// - /// if let Entry::Vacant(v) = map.entry("x-hello") { - /// assert_eq!(v.into_key().as_str(), "x-hello"); - /// } - /// ``` - pub fn into_key(self) -> HeaderName { - self.key - } - - /// Insert the value into the entry. - /// - /// The value will be associated with this entry's key. A mutable reference - /// to the inserted value will be returned. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry}; - /// let mut map = HeaderMap::new(); - /// - /// if let Entry::Vacant(v) = map.entry("x-hello") { - /// v.insert("world".parse().unwrap()); - /// } - /// - /// assert_eq!(map["x-hello"], "world"); - /// ``` - pub fn insert(self, value: T) -> &'a mut T { - // Ensure that there is space in the map - let index = - self.map - .insert_phase_two(self.key, value.into(), self.hash, self.probe, self.danger); - - &mut self.map.entries[index].value - } - - /// Insert the value into the entry. - /// - /// The value will be associated with this entry's key. The new - /// `OccupiedEntry` is returned, allowing for further manipulation. - /// - /// # Examples - /// - /// ``` - /// # use http::header::*; - /// let mut map = HeaderMap::new(); - /// - /// if let Entry::Vacant(v) = map.entry("x-hello") { - /// let mut e = v.insert_entry("world".parse().unwrap()); - /// e.insert("world2".parse().unwrap()); - /// } - /// - /// assert_eq!(map["x-hello"], "world2"); - /// ``` - pub fn insert_entry(self, value: T) -> OccupiedEntry<'a, T> { - // Ensure that there is space in the map - let index = - self.map - .insert_phase_two(self.key, value.into(), self.hash, self.probe, self.danger); - - OccupiedEntry { - map: self.map, - index: index, - probe: self.probe, - } - } -} - -// ===== impl GetAll ===== - -impl<'a, T: 'a> GetAll<'a, T> { - /// Returns an iterator visiting all values associated with the entry. - /// - /// Values are iterated in insertion order. - /// - /// # Examples - /// - /// ``` - /// # use http::HeaderMap; - /// # use http::header::HOST; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "hello.world".parse().unwrap()); - /// map.append(HOST, "hello.earth".parse().unwrap()); - /// - /// let values = map.get_all("host"); - /// let mut iter = values.iter(); - /// assert_eq!(&"hello.world", iter.next().unwrap()); - /// assert_eq!(&"hello.earth", iter.next().unwrap()); - /// assert!(iter.next().is_none()); - /// ``` - pub fn iter(&self) -> ValueIter<'a, T> { - // This creates a new GetAll struct so that the lifetime - // isn't bound to &self. - GetAll { - map: self.map, - index: self.index, - } - .into_iter() - } -} - -impl<'a, T: PartialEq> PartialEq for GetAll<'a, T> { - fn eq(&self, other: &Self) -> bool { - self.iter().eq(other.iter()) - } -} - -impl<'a, T> IntoIterator for GetAll<'a, T> { - type Item = &'a T; - type IntoIter = ValueIter<'a, T>; - - fn into_iter(self) -> ValueIter<'a, T> { - self.map.value_iter(self.index) - } -} - -impl<'a, 'b: 'a, T> IntoIterator for &'b GetAll<'a, T> { - type Item = &'a T; - type IntoIter = ValueIter<'a, T>; - - fn into_iter(self) -> ValueIter<'a, T> { - self.map.value_iter(self.index) - } -} - -// ===== impl ValueIter ===== - -impl<'a, T: 'a> Iterator for ValueIter<'a, T> { - type Item = &'a T; - - fn next(&mut self) -> Option { - use self::Cursor::*; - - match self.front { - Some(Head) => { - let entry = &self.map.entries[self.index]; - - if self.back == Some(Head) { - self.front = None; - self.back = None; - } else { - // Update the iterator state - match entry.links { - Some(links) => { - self.front = Some(Values(links.next)); - } - None => unreachable!(), - } - } - - Some(&entry.value) - } - Some(Values(idx)) => { - let extra = &self.map.extra_values[idx]; - - if self.front == self.back { - self.front = None; - self.back = None; - } else { - match extra.next { - Link::Entry(_) => self.front = None, - Link::Extra(i) => self.front = Some(Values(i)), - } - } - - Some(&extra.value) - } - None => None, - } - } - - fn size_hint(&self) -> (usize, Option) { - match (self.front, self.back) { - // Exactly 1 value... - (Some(Cursor::Head), Some(Cursor::Head)) => (1, Some(1)), - // At least 1... - (Some(_), _) => (1, None), - // No more values... - (None, _) => (0, Some(0)), - } - } -} - -impl<'a, T: 'a> DoubleEndedIterator for ValueIter<'a, T> { - fn next_back(&mut self) -> Option { - use self::Cursor::*; - - match self.back { - Some(Head) => { - self.front = None; - self.back = None; - Some(&self.map.entries[self.index].value) - } - Some(Values(idx)) => { - let extra = &self.map.extra_values[idx]; - - if self.front == self.back { - self.front = None; - self.back = None; - } else { - match extra.prev { - Link::Entry(_) => self.back = Some(Head), - Link::Extra(idx) => self.back = Some(Values(idx)), - } - } - - Some(&extra.value) - } - None => None, - } - } -} - -impl<'a, T> FusedIterator for ValueIter<'a, T> {} - -// ===== impl ValueIterMut ===== - -impl<'a, T: 'a> Iterator for ValueIterMut<'a, T> { - type Item = &'a mut T; - - fn next(&mut self) -> Option { - use self::Cursor::*; - - let entry = unsafe { &mut (*self.map).entries[self.index] }; - - match self.front { - Some(Head) => { - if self.back == Some(Head) { - self.front = None; - self.back = None; - } else { - // Update the iterator state - match entry.links { - Some(links) => { - self.front = Some(Values(links.next)); - } - None => unreachable!(), - } - } - - Some(&mut entry.value) - } - Some(Values(idx)) => { - let extra = unsafe { &mut (*self.map).extra_values[idx] }; - - if self.front == self.back { - self.front = None; - self.back = None; - } else { - match extra.next { - Link::Entry(_) => self.front = None, - Link::Extra(i) => self.front = Some(Values(i)), - } - } - - Some(&mut extra.value) - } - None => None, - } - } -} - -impl<'a, T: 'a> DoubleEndedIterator for ValueIterMut<'a, T> { - fn next_back(&mut self) -> Option { - use self::Cursor::*; - - let entry = unsafe { &mut (*self.map).entries[self.index] }; - - match self.back { - Some(Head) => { - self.front = None; - self.back = None; - Some(&mut entry.value) - } - Some(Values(idx)) => { - let extra = unsafe { &mut (*self.map).extra_values[idx] }; - - if self.front == self.back { - self.front = None; - self.back = None; - } else { - match extra.prev { - Link::Entry(_) => self.back = Some(Head), - Link::Extra(idx) => self.back = Some(Values(idx)), - } - } - - Some(&mut extra.value) - } - None => None, - } - } -} - -impl<'a, T> FusedIterator for ValueIterMut<'a, T> {} - -unsafe impl<'a, T: Sync> Sync for ValueIterMut<'a, T> {} -unsafe impl<'a, T: Send> Send for ValueIterMut<'a, T> {} - -// ===== impl IntoIter ===== - -impl Iterator for IntoIter { - type Item = (Option, T); - - fn next(&mut self) -> Option { - if let Some(next) = self.next { - self.next = match self.extra_values[next].next { - Link::Entry(_) => None, - Link::Extra(v) => Some(v), - }; - - let value = unsafe { ptr::read(&self.extra_values[next].value) }; - - return Some((None, value)); - } - - if let Some(bucket) = self.entries.next() { - self.next = bucket.links.map(|l| l.next); - let name = Some(bucket.key); - let value = bucket.value; - - return Some((name, value)); - } - - None - } - - fn size_hint(&self) -> (usize, Option) { - let (lower, _) = self.entries.size_hint(); - // There could be more than just the entries upper, as there - // could be items in the `extra_values`. We could guess, saying - // `upper + extra_values.len()`, but that could overestimate by a lot. - (lower, None) - } -} - -impl FusedIterator for IntoIter {} - -impl Drop for IntoIter { - fn drop(&mut self) { - // Ensure the iterator is consumed - for _ in self.by_ref() {} - - // All the values have already been yielded out. - unsafe { - self.extra_values.set_len(0); - } - } -} - -// ===== impl OccupiedEntry ===== - -impl<'a, T> OccupiedEntry<'a, T> { - /// Returns a reference to the entry's key. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry, HOST}; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "world".parse().unwrap()); - /// - /// if let Entry::Occupied(e) = map.entry("host") { - /// assert_eq!("host", e.key()); - /// } - /// ``` - pub fn key(&self) -> &HeaderName { - &self.map.entries[self.index].key - } - - /// Get a reference to the first value in the entry. - /// - /// Values are stored in insertion order. - /// - /// # Panics - /// - /// `get` panics if there are no values associated with the entry. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry, HOST}; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "hello.world".parse().unwrap()); - /// - /// if let Entry::Occupied(mut e) = map.entry("host") { - /// assert_eq!(e.get(), &"hello.world"); - /// - /// e.append("hello.earth".parse().unwrap()); - /// - /// assert_eq!(e.get(), &"hello.world"); - /// } - /// ``` - pub fn get(&self) -> &T { - &self.map.entries[self.index].value - } - - /// Get a mutable reference to the first value in the entry. - /// - /// Values are stored in insertion order. - /// - /// # Panics - /// - /// `get_mut` panics if there are no values associated with the entry. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry, HOST}; - /// let mut map = HeaderMap::default(); - /// map.insert(HOST, "hello.world".to_string()); - /// - /// if let Entry::Occupied(mut e) = map.entry("host") { - /// e.get_mut().push_str("-2"); - /// assert_eq!(e.get(), &"hello.world-2"); - /// } - /// ``` - pub fn get_mut(&mut self) -> &mut T { - &mut self.map.entries[self.index].value - } - - /// Converts the `OccupiedEntry` into a mutable reference to the **first** - /// value. - /// - /// The lifetime of the returned reference is bound to the original map. - /// - /// # Panics - /// - /// `into_mut` panics if there are no values associated with the entry. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry, HOST}; - /// let mut map = HeaderMap::default(); - /// map.insert(HOST, "hello.world".to_string()); - /// map.append(HOST, "hello.earth".to_string()); - /// - /// if let Entry::Occupied(e) = map.entry("host") { - /// e.into_mut().push_str("-2"); - /// } - /// - /// assert_eq!("hello.world-2", map["host"]); - /// ``` - pub fn into_mut(self) -> &'a mut T { - &mut self.map.entries[self.index].value - } - - /// Sets the value of the entry. - /// - /// All previous values associated with the entry are removed and the first - /// one is returned. See `insert_mult` for an API that returns all values. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry, HOST}; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "hello.world".parse().unwrap()); - /// - /// if let Entry::Occupied(mut e) = map.entry("host") { - /// let mut prev = e.insert("earth".parse().unwrap()); - /// assert_eq!("hello.world", prev); - /// } - /// - /// assert_eq!("earth", map["host"]); - /// ``` - pub fn insert(&mut self, value: T) -> T { - self.map.insert_occupied(self.index, value.into()) - } - - /// Sets the value of the entry. - /// - /// This function does the same as `insert` except it returns an iterator - /// that yields all values previously associated with the key. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry, HOST}; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "world".parse().unwrap()); - /// map.append(HOST, "world2".parse().unwrap()); - /// - /// if let Entry::Occupied(mut e) = map.entry("host") { - /// let mut prev = e.insert_mult("earth".parse().unwrap()); - /// assert_eq!("world", prev.next().unwrap()); - /// assert_eq!("world2", prev.next().unwrap()); - /// assert!(prev.next().is_none()); - /// } - /// - /// assert_eq!("earth", map["host"]); - /// ``` - pub fn insert_mult(&mut self, value: T) -> ValueDrain<'_, T> { - self.map.insert_occupied_mult(self.index, value.into()) - } - - /// Insert the value into the entry. - /// - /// The new value is appended to the end of the entry's value list. All - /// previous values associated with the entry are retained. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry, HOST}; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "world".parse().unwrap()); - /// - /// if let Entry::Occupied(mut e) = map.entry("host") { - /// e.append("earth".parse().unwrap()); - /// } - /// - /// let values = map.get_all("host"); - /// let mut i = values.iter(); - /// assert_eq!("world", *i.next().unwrap()); - /// assert_eq!("earth", *i.next().unwrap()); - /// ``` - pub fn append(&mut self, value: T) { - let idx = self.index; - let entry = &mut self.map.entries[idx]; - append_value(idx, entry, &mut self.map.extra_values, value.into()); - } - - /// Remove the entry from the map. - /// - /// All values associated with the entry are removed and the first one is - /// returned. See `remove_entry_mult` for an API that returns all values. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry, HOST}; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "world".parse().unwrap()); - /// - /// if let Entry::Occupied(e) = map.entry("host") { - /// let mut prev = e.remove(); - /// assert_eq!("world", prev); - /// } - /// - /// assert!(!map.contains_key("host")); - /// ``` - pub fn remove(self) -> T { - self.remove_entry().1 - } - - /// Remove the entry from the map. - /// - /// The key and all values associated with the entry are removed and the - /// first one is returned. See `remove_entry_mult` for an API that returns - /// all values. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry, HOST}; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "world".parse().unwrap()); - /// - /// if let Entry::Occupied(e) = map.entry("host") { - /// let (key, mut prev) = e.remove_entry(); - /// assert_eq!("host", key.as_str()); - /// assert_eq!("world", prev); - /// } - /// - /// assert!(!map.contains_key("host")); - /// ``` - pub fn remove_entry(self) -> (HeaderName, T) { - if let Some(links) = self.map.entries[self.index].links { - self.map.remove_all_extra_values(links.next); - } - - let entry = self.map.remove_found(self.probe, self.index); - - (entry.key, entry.value) - } - - /// Remove the entry from the map. - /// - /// The key and all values associated with the entry are removed and - /// returned. - pub fn remove_entry_mult(self) -> (HeaderName, ValueDrain<'a, T>) { - let raw_links = self.map.raw_links(); - let extra_values = &mut self.map.extra_values; - - let next = self.map.entries[self.index].links.map(|l| { - drain_all_extra_values(raw_links, extra_values, l.next) - .into_iter() - }); - - let entry = self.map.remove_found(self.probe, self.index); - - let drain = ValueDrain { - first: Some(entry.value), - next, - lt: PhantomData, - }; - (entry.key, drain) - } - - /// Returns an iterator visiting all values associated with the entry. - /// - /// Values are iterated in insertion order. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry, HOST}; - /// let mut map = HeaderMap::new(); - /// map.insert(HOST, "world".parse().unwrap()); - /// map.append(HOST, "earth".parse().unwrap()); - /// - /// if let Entry::Occupied(e) = map.entry("host") { - /// let mut iter = e.iter(); - /// assert_eq!(&"world", iter.next().unwrap()); - /// assert_eq!(&"earth", iter.next().unwrap()); - /// assert!(iter.next().is_none()); - /// } - /// ``` - pub fn iter(&self) -> ValueIter<'_, T> { - self.map.value_iter(Some(self.index)) - } - - /// Returns an iterator mutably visiting all values associated with the - /// entry. - /// - /// Values are iterated in insertion order. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderMap, Entry, HOST}; - /// let mut map = HeaderMap::default(); - /// map.insert(HOST, "world".to_string()); - /// map.append(HOST, "earth".to_string()); - /// - /// if let Entry::Occupied(mut e) = map.entry("host") { - /// for e in e.iter_mut() { - /// e.push_str("-boop"); - /// } - /// } - /// - /// let mut values = map.get_all("host"); - /// let mut i = values.iter(); - /// assert_eq!(&"world-boop", i.next().unwrap()); - /// assert_eq!(&"earth-boop", i.next().unwrap()); - /// ``` - pub fn iter_mut(&mut self) -> ValueIterMut<'_, T> { - self.map.value_iter_mut(self.index) - } -} - -impl<'a, T> IntoIterator for OccupiedEntry<'a, T> { - type Item = &'a mut T; - type IntoIter = ValueIterMut<'a, T>; - - fn into_iter(self) -> ValueIterMut<'a, T> { - self.map.value_iter_mut(self.index) - } -} - -impl<'a, 'b: 'a, T> IntoIterator for &'b OccupiedEntry<'a, T> { - type Item = &'a T; - type IntoIter = ValueIter<'a, T>; - - fn into_iter(self) -> ValueIter<'a, T> { - self.iter() - } -} - -impl<'a, 'b: 'a, T> IntoIterator for &'b mut OccupiedEntry<'a, T> { - type Item = &'a mut T; - type IntoIter = ValueIterMut<'a, T>; - - fn into_iter(self) -> ValueIterMut<'a, T> { - self.iter_mut() - } -} - -// ===== impl ValueDrain ===== - -impl<'a, T> Iterator for ValueDrain<'a, T> { - type Item = T; - - fn next(&mut self) -> Option { - if self.first.is_some() { - self.first.take() - } else if let Some(ref mut extras) = self.next { - extras.next() - } else { - None - } - } - - fn size_hint(&self) -> (usize, Option) { - match (&self.first, &self.next) { - // Exactly 1 - (&Some(_), &None) => (1, Some(1)), - // 1 + extras - (&Some(_), &Some(ref extras)) => { - let (l, u) = extras.size_hint(); - (l + 1, u.map(|u| u + 1)) - }, - // Extras only - (&None, &Some(ref extras)) => extras.size_hint(), - // No more - (&None, &None) => (0, Some(0)), - } - } -} - -impl<'a, T> FusedIterator for ValueDrain<'a, T> {} - -impl<'a, T> Drop for ValueDrain<'a, T> { - fn drop(&mut self) { - while let Some(_) = self.next() {} - } -} - -unsafe impl<'a, T: Sync> Sync for ValueDrain<'a, T> {} -unsafe impl<'a, T: Send> Send for ValueDrain<'a, T> {} - -// ===== impl RawLinks ===== - -impl Clone for RawLinks { - fn clone(&self) -> RawLinks { - *self - } -} - -impl Copy for RawLinks {} - -impl ops::Index for RawLinks { - type Output = Option; - - fn index(&self, idx: usize) -> &Self::Output { - unsafe { - &(*self.0)[idx].links - } - } -} - -impl ops::IndexMut for RawLinks { - fn index_mut(&mut self, idx: usize) -> &mut Self::Output { - unsafe { - &mut (*self.0)[idx].links - } - } -} - -// ===== impl Pos ===== - -impl Pos { - #[inline] - fn new(index: usize, hash: HashValue) -> Self { - debug_assert!(index < MAX_SIZE); - Pos { - index: index as Size, - hash: hash, - } - } - - #[inline] - fn none() -> Self { - Pos { - index: !0, - hash: HashValue(0), - } - } - - #[inline] - fn is_some(&self) -> bool { - !self.is_none() - } - - #[inline] - fn is_none(&self) -> bool { - self.index == !0 - } - - #[inline] - fn resolve(&self) -> Option<(usize, HashValue)> { - if self.is_some() { - Some((self.index as usize, self.hash)) - } else { - None - } - } -} - -impl Danger { - fn is_red(&self) -> bool { - match *self { - Danger::Red(_) => true, - _ => false, - } - } - - fn to_red(&mut self) { - debug_assert!(self.is_yellow()); - *self = Danger::Red(RandomState::new()); - } - - fn is_yellow(&self) -> bool { - match *self { - Danger::Yellow => true, - _ => false, - } - } - - fn to_yellow(&mut self) { - match *self { - Danger::Green => { - *self = Danger::Yellow; - } - _ => {} - } - } - - fn to_green(&mut self) { - debug_assert!(self.is_yellow()); - *self = Danger::Green; - } -} - -// ===== impl Utils ===== - -#[inline] -fn usable_capacity(cap: usize) -> usize { - cap - cap / 4 -} - -#[inline] -fn to_raw_capacity(n: usize) -> usize { - match n.checked_add(n / 3) { - Some(n) => n, - None => panic!( - "requested capacity {} too large: overflow while converting to raw capacity", - n - ), - } -} - -#[inline] -fn desired_pos(mask: Size, hash: HashValue) -> usize { - (hash.0 & mask) as usize -} - -/// The number of steps that `current` is forward of the desired position for hash -#[inline] -fn probe_distance(mask: Size, hash: HashValue, current: usize) -> usize { - current.wrapping_sub(desired_pos(mask, hash)) & mask as usize -} - -fn hash_elem_using(danger: &Danger, k: &K) -> HashValue -where - K: Hash, -{ - use fnv::FnvHasher; - - const MASK: u64 = (MAX_SIZE as u64) - 1; - - let hash = match *danger { - // Safe hash - Danger::Red(ref hasher) => { - let mut h = hasher.build_hasher(); - k.hash(&mut h); - h.finish() - } - // Fast hash - _ => { - let mut h = FnvHasher::default(); - k.hash(&mut h); - h.finish() - } - }; - - HashValue((hash & MASK) as u16) -} - -/* - * - * ===== impl IntoHeaderName / AsHeaderName ===== - * - */ - -mod into_header_name { - use super::{Entry, HdrName, HeaderMap, HeaderName}; - - /// A marker trait used to identify values that can be used as insert keys - /// to a `HeaderMap`. - pub trait IntoHeaderName: Sealed {} - - // All methods are on this pub(super) trait, instead of `IntoHeaderName`, - // so that they aren't publicly exposed to the world. - // - // Being on the `IntoHeaderName` trait would mean users could call - // `"host".insert(&mut map, "localhost")`. - // - // Ultimately, this allows us to adjust the signatures of these methods - // without breaking any external crate. - pub trait Sealed { - #[doc(hidden)] - fn insert(self, map: &mut HeaderMap, val: T) -> Option; - - #[doc(hidden)] - fn append(self, map: &mut HeaderMap, val: T) -> bool; - - #[doc(hidden)] - fn entry(self, map: &mut HeaderMap) -> Entry<'_, T>; - } - - // ==== impls ==== - - impl Sealed for HeaderName { - #[inline] - fn insert(self, map: &mut HeaderMap, val: T) -> Option { - map.insert2(self, val) - } - - #[inline] - fn append(self, map: &mut HeaderMap, val: T) -> bool { - map.append2(self, val) - } - - #[inline] - fn entry(self, map: &mut HeaderMap) -> Entry<'_, T> { - map.entry2(self) - } - } - - impl IntoHeaderName for HeaderName {} - - impl<'a> Sealed for &'a HeaderName { - #[inline] - fn insert(self, map: &mut HeaderMap, val: T) -> Option { - map.insert2(self, val) - } - #[inline] - fn append(self, map: &mut HeaderMap, val: T) -> bool { - map.append2(self, val) - } - - #[inline] - fn entry(self, map: &mut HeaderMap) -> Entry<'_, T> { - map.entry2(self) - } - } - - impl<'a> IntoHeaderName for &'a HeaderName {} - - impl Sealed for &'static str { - #[inline] - fn insert(self, map: &mut HeaderMap, val: T) -> Option { - HdrName::from_static(self, move |hdr| map.insert2(hdr, val)) - } - #[inline] - fn append(self, map: &mut HeaderMap, val: T) -> bool { - HdrName::from_static(self, move |hdr| map.append2(hdr, val)) - } - - #[inline] - fn entry(self, map: &mut HeaderMap) -> Entry<'_, T> { - HdrName::from_static(self, move |hdr| map.entry2(hdr)) - } - } - - impl IntoHeaderName for &'static str {} -} - -mod as_header_name { - use super::{Entry, HdrName, HeaderMap, HeaderName, InvalidHeaderName}; - - /// A marker trait used to identify values that can be used as search keys - /// to a `HeaderMap`. - pub trait AsHeaderName: Sealed {} - - // All methods are on this pub(super) trait, instead of `AsHeaderName`, - // so that they aren't publicly exposed to the world. - // - // Being on the `AsHeaderName` trait would mean users could call - // `"host".find(&map)`. - // - // Ultimately, this allows us to adjust the signatures of these methods - // without breaking any external crate. - pub trait Sealed { - #[doc(hidden)] - fn try_entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName>; - - #[doc(hidden)] - fn find(&self, map: &HeaderMap) -> Option<(usize, usize)>; - - #[doc(hidden)] - fn as_str(&self) -> &str; - } - - // ==== impls ==== - - impl Sealed for HeaderName { - #[inline] - fn try_entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName> { - Ok(map.entry2(self)) - } - - #[inline] - fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { - map.find(self) - } - - fn as_str(&self) -> &str { - ::as_str(self) - } - } - - impl AsHeaderName for HeaderName {} - - impl<'a> Sealed for &'a HeaderName { - #[inline] - fn try_entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName> { - Ok(map.entry2(self)) - } - - #[inline] - fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { - map.find(*self) - } - - fn as_str(&self) -> &str { - ::as_str(*self) - } - } - - impl<'a> AsHeaderName for &'a HeaderName {} - - impl<'a> Sealed for &'a str { - #[inline] - fn try_entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName> { - HdrName::from_bytes(self.as_bytes(), move |hdr| map.entry2(hdr)) - } - - #[inline] - fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { - HdrName::from_bytes(self.as_bytes(), move |hdr| map.find(&hdr)).unwrap_or(None) - } - - fn as_str(&self) -> &str { - self - } - } - - impl<'a> AsHeaderName for &'a str {} - - impl Sealed for String { - #[inline] - fn try_entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName> { - self.as_str().try_entry(map) - } - - #[inline] - fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { - Sealed::find(&self.as_str(), map) - } - - fn as_str(&self) -> &str { - self - } - } - - impl AsHeaderName for String {} - - impl<'a> Sealed for &'a String { - #[inline] - fn try_entry(self, map: &mut HeaderMap) -> Result, InvalidHeaderName> { - self.as_str().try_entry(map) - } - - #[inline] - fn find(&self, map: &HeaderMap) -> Option<(usize, usize)> { - Sealed::find(*self, map) - } - - fn as_str(&self) -> &str { - *self - } - } - - impl<'a> AsHeaderName for &'a String {} -} - -#[test] -fn test_bounds() { - fn check_bounds() {} - - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); - check_bounds::>(); -} - -#[test] -fn skip_duplicates_during_key_iteration() { - let mut map = HeaderMap::new(); - map.append("a", HeaderValue::from_static("a")); - map.append("a", HeaderValue::from_static("b")); - assert_eq!(map.keys().count(), map.keys_len()); -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/header/mod.rs s390-tools-2.33.1/rust-vendor/http/src/header/mod.rs --- s390-tools-2.31.0/rust-vendor/http/src/header/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/header/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,174 +0,0 @@ -//! HTTP header types -//! -//! The module provides [`HeaderName`], [`HeaderMap`], and a number of types -//! used for interacting with `HeaderMap`. These types allow representing both -//! HTTP/1 and HTTP/2 headers. -//! -//! # `HeaderName` -//! -//! The `HeaderName` type represents both standard header names as well as -//! custom header names. The type handles the case insensitive nature of header -//! names and is used as the key portion of `HeaderMap`. Header names are -//! normalized to lower case. In other words, when creating a `HeaderName` with -//! a string, even if upper case characters are included, when getting a string -//! representation of the `HeaderName`, it will be all lower case. This allows -//! for faster `HeaderMap` comparison operations. -//! -//! The internal representation is optimized to efficiently handle the cases -//! most commonly encountered when working with HTTP. Standard header names are -//! special cased and are represented internally as an enum. Short custom -//! headers will be stored directly in the `HeaderName` struct and will not -//! incur any allocation overhead, however longer strings will require an -//! allocation for storage. -//! -//! ## Limitations -//! -//! `HeaderName` has a max length of 32,768 for header names. Attempting to -//! parse longer names will result in a panic. -//! -//! # `HeaderMap` -//! -//! `HeaderMap` is a map structure of header names highly optimized for use -//! cases common with HTTP. It is a [multimap] structure, where each header name -//! may have multiple associated header values. Given this, some of the APIs -//! diverge from [`HashMap`]. -//! -//! ## Overview -//! -//! Just like `HashMap` in Rust's stdlib, `HeaderMap` is based on [Robin Hood -//! hashing]. This algorithm tends to reduce the worst case search times in the -//! table and enables high load factors without seriously affecting performance. -//! Internally, keys and values are stored in vectors. As such, each insertion -//! will not incur allocation overhead. However, once the underlying vector -//! storage is full, a larger vector must be allocated and all values copied. -//! -//! ## Deterministic ordering -//! -//! Unlike Rust's `HashMap`, values in `HeaderMap` are deterministically -//! ordered. Roughly, values are ordered by insertion. This means that a -//! function that deterministically operates on a header map can rely on the -//! iteration order to remain consistent across processes and platforms. -//! -//! ## Adaptive hashing -//! -//! `HeaderMap` uses an adaptive hashing strategy in order to efficiently handle -//! most common cases. All standard headers have statically computed hash values -//! which removes the need to perform any hashing of these headers at runtime. -//! The default hash function emphasizes performance over robustness. However, -//! `HeaderMap` detects high collision rates and switches to a secure hash -//! function in those events. The threshold is set such that only denial of -//! service attacks should trigger it. -//! -//! ## Limitations -//! -//! `HeaderMap` can store a maximum of 32,768 headers (header name / value -//! pairs). Attempting to insert more will result in a panic. -//! -//! [`HeaderName`]: struct.HeaderName.html -//! [`HeaderMap`]: struct.HeaderMap.html -//! [multimap]: https://en.wikipedia.org/wiki/Multimap -//! [`HashMap`]: https://doc.rust-lang.org/std/collections/struct.HashMap.html -//! [Robin Hood hashing]: https://en.wikipedia.org/wiki/Hash_table#Robin_Hood_hashing - -mod map; -mod name; -mod value; - -pub use self::map::{ - AsHeaderName, Drain, Entry, GetAll, HeaderMap, IntoHeaderName, IntoIter, Iter, IterMut, Keys, - OccupiedEntry, VacantEntry, ValueDrain, ValueIter, ValueIterMut, Values, ValuesMut, -}; -pub use self::name::{HeaderName, InvalidHeaderName}; -pub use self::value::{HeaderValue, InvalidHeaderValue, ToStrError}; - -// Use header name constants -pub use self::name::{ - ACCEPT, - ACCEPT_CHARSET, - ACCEPT_ENCODING, - ACCEPT_LANGUAGE, - ACCEPT_RANGES, - ACCESS_CONTROL_ALLOW_CREDENTIALS, - ACCESS_CONTROL_ALLOW_HEADERS, - ACCESS_CONTROL_ALLOW_METHODS, - ACCESS_CONTROL_ALLOW_ORIGIN, - ACCESS_CONTROL_EXPOSE_HEADERS, - ACCESS_CONTROL_MAX_AGE, - ACCESS_CONTROL_REQUEST_HEADERS, - ACCESS_CONTROL_REQUEST_METHOD, - AGE, - ALLOW, - ALT_SVC, - AUTHORIZATION, - CACHE_CONTROL, - CACHE_STATUS, - CDN_CACHE_CONTROL, - CONNECTION, - CONTENT_DISPOSITION, - CONTENT_ENCODING, - CONTENT_LANGUAGE, - CONTENT_LENGTH, - CONTENT_LOCATION, - CONTENT_RANGE, - CONTENT_SECURITY_POLICY, - CONTENT_SECURITY_POLICY_REPORT_ONLY, - CONTENT_TYPE, - COOKIE, - DNT, - DATE, - ETAG, - EXPECT, - EXPIRES, - FORWARDED, - FROM, - HOST, - IF_MATCH, - IF_MODIFIED_SINCE, - IF_NONE_MATCH, - IF_RANGE, - IF_UNMODIFIED_SINCE, - LAST_MODIFIED, - LINK, - LOCATION, - MAX_FORWARDS, - ORIGIN, - PRAGMA, - PROXY_AUTHENTICATE, - PROXY_AUTHORIZATION, - PUBLIC_KEY_PINS, - PUBLIC_KEY_PINS_REPORT_ONLY, - RANGE, - REFERER, - REFERRER_POLICY, - REFRESH, - RETRY_AFTER, - SEC_WEBSOCKET_ACCEPT, - SEC_WEBSOCKET_EXTENSIONS, - SEC_WEBSOCKET_KEY, - SEC_WEBSOCKET_PROTOCOL, - SEC_WEBSOCKET_VERSION, - SERVER, - SET_COOKIE, - STRICT_TRANSPORT_SECURITY, - TE, - TRAILER, - TRANSFER_ENCODING, - UPGRADE, - UPGRADE_INSECURE_REQUESTS, - USER_AGENT, - VARY, - VIA, - WARNING, - WWW_AUTHENTICATE, - X_CONTENT_TYPE_OPTIONS, - X_DNS_PREFETCH_CONTROL, - X_FRAME_OPTIONS, - X_XSS_PROTECTION, -}; - -/// Maximum length of a header name -/// -/// Generally, 64kb for a header name is WAY too much than would ever be needed -/// in practice. Restricting it to this size enables using `u16` values to -/// represent offsets when dealing with header names. -const MAX_HEADER_NAME_LEN: usize = (1 << 16) - 1; diff -Nru s390-tools-2.31.0/rust-vendor/http/src/header/name.rs s390-tools-2.33.1/rust-vendor/http/src/header/name.rs --- s390-tools-2.31.0/rust-vendor/http/src/header/name.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/header/name.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1868 +0,0 @@ -use crate::byte_str::ByteStr; -use bytes::{Bytes, BytesMut}; - -use std::borrow::Borrow; -use std::error::Error; -use std::convert::{TryFrom}; -use std::hash::{Hash, Hasher}; -use std::mem::MaybeUninit; -use std::str::FromStr; -use std::fmt; - -/// Represents an HTTP header field name -/// -/// Header field names identify the header. Header sets may include multiple -/// headers with the same name. The HTTP specification defines a number of -/// standard headers, but HTTP messages may include non-standard header names as -/// well as long as they adhere to the specification. -/// -/// `HeaderName` is used as the [`HeaderMap`] key. Constants are available for -/// all standard header names in the [`header`] module. -/// -/// # Representation -/// -/// `HeaderName` represents standard header names using an `enum`, as such they -/// will not require an allocation for storage. All custom header names are -/// lower cased upon conversion to a `HeaderName` value. This avoids the -/// overhead of dynamically doing lower case conversion during the hash code -/// computation and the comparison operation. -/// -/// [`HeaderMap`]: struct.HeaderMap.html -/// [`header`]: index.html -#[derive(Clone, Eq, PartialEq, Hash)] -pub struct HeaderName { - inner: Repr, -} - -// Almost a full `HeaderName` -#[derive(Debug, Hash)] -pub struct HdrName<'a> { - inner: Repr>, -} - -#[derive(Debug, Clone, Eq, PartialEq, Hash)] -enum Repr { - Standard(StandardHeader), - Custom(T), -} - -// Used to hijack the Hash impl -#[derive(Debug, Clone, Eq, PartialEq)] -struct Custom(ByteStr); - -#[derive(Debug, Clone)] -// Invariant: If lower then buf is valid UTF-8. -struct MaybeLower<'a> { - buf: &'a [u8], - lower: bool, -} - -/// A possible error when converting a `HeaderName` from another type. -pub struct InvalidHeaderName { - _priv: (), -} - -macro_rules! standard_headers { - ( - $( - $(#[$docs:meta])* - ($konst:ident, $upcase:ident, $name_bytes:literal); - )+ - ) => { - #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] - enum StandardHeader { - $( - $konst, - )+ - } - - $( - $(#[$docs])* - pub const $upcase: HeaderName = HeaderName { - inner: Repr::Standard(StandardHeader::$konst), - }; - )+ - - impl StandardHeader { - #[inline] - fn as_str(&self) -> &'static str { - match *self { - // Safety: test_parse_standard_headers ensures these &[u8]s are &str-safe. - $( - StandardHeader::$konst => unsafe { std::str::from_utf8_unchecked( $name_bytes ) }, - )+ - } - } - - const fn from_bytes(name_bytes: &[u8]) -> Option { - match name_bytes { - $( - $name_bytes => Some(StandardHeader::$konst), - )+ - _ => None, - } - } - } - - #[cfg(test)] - const TEST_HEADERS: &'static [(StandardHeader, &'static [u8])] = &[ - $( - (StandardHeader::$konst, $name_bytes), - )+ - ]; - - #[test] - fn test_parse_standard_headers() { - for &(std, name_bytes) in TEST_HEADERS { - // Test lower case - assert_eq!(HeaderName::from_bytes(name_bytes).unwrap(), HeaderName::from(std)); - - // Test upper case - let upper = std::str::from_utf8(name_bytes).expect("byte string constants are all utf-8").to_uppercase(); - assert_eq!(HeaderName::from_bytes(upper.as_bytes()).unwrap(), HeaderName::from(std)); - } - } - - #[test] - fn test_standard_headers_into_bytes() { - for &(std, name_bytes) in TEST_HEADERS { - let name = std::str::from_utf8(name_bytes).unwrap(); - let std = HeaderName::from(std); - // Test lower case - let bytes: Bytes = - HeaderName::from_bytes(name_bytes).unwrap().inner.into(); - assert_eq!(bytes, name); - assert_eq!(HeaderName::from_bytes(name_bytes).unwrap(), std); - - // Test upper case - let upper = name.to_uppercase(); - let bytes: Bytes = - HeaderName::from_bytes(upper.as_bytes()).unwrap().inner.into(); - assert_eq!(bytes, name_bytes); - assert_eq!(HeaderName::from_bytes(upper.as_bytes()).unwrap(), - std); - } - - } - } -} - -// Generate constants for all standard HTTP headers. This includes a static hash -// code for the "fast hash" path. The hash code for static headers *do not* have -// to match the text representation of those headers. This is because header -// strings are always converted to the static values (when they match) before -// being hashed. This means that it is impossible to compare the static hash -// code of CONTENT_LENGTH with "content-length". -standard_headers! { - /// Advertises which content types the client is able to understand. - /// - /// The Accept request HTTP header advertises which content types, expressed - /// as MIME types, the client is able to understand. Using content - /// negotiation, the server then selects one of the proposals, uses it and - /// informs the client of its choice with the Content-Type response header. - /// Browsers set adequate values for this header depending of the context - /// where the request is done: when fetching a CSS stylesheet a different - /// value is set for the request than when fetching an image, video or a - /// script. - (Accept, ACCEPT, b"accept"); - - /// Advertises which character set the client is able to understand. - /// - /// The Accept-Charset request HTTP header advertises which character set - /// the client is able to understand. Using content negotiation, the server - /// then selects one of the proposals, uses it and informs the client of its - /// choice within the Content-Type response header. Browsers usually don't - /// set this header as the default value for each content type is usually - /// correct and transmitting it would allow easier fingerprinting. - /// - /// If the server cannot serve any matching character set, it can - /// theoretically send back a 406 (Not Acceptable) error code. But, for a - /// better user experience, this is rarely done and the more common way is - /// to ignore the Accept-Charset header in this case. - (AcceptCharset, ACCEPT_CHARSET, b"accept-charset"); - - /// Advertises which content encoding the client is able to understand. - /// - /// The Accept-Encoding request HTTP header advertises which content - /// encoding, usually a compression algorithm, the client is able to - /// understand. Using content negotiation, the server selects one of the - /// proposals, uses it and informs the client of its choice with the - /// Content-Encoding response header. - /// - /// Even if both the client and the server supports the same compression - /// algorithms, the server may choose not to compress the body of a - /// response, if the identity value is also acceptable. Two common cases - /// lead to this: - /// - /// * The data to be sent is already compressed and a second compression - /// won't lead to smaller data to be transmitted. This may the case with - /// some image formats; - /// - /// * The server is overloaded and cannot afford the computational overhead - /// induced by the compression requirement. Typically, Microsoft recommends - /// not to compress if a server use more than 80 % of its computational - /// power. - /// - /// As long as the identity value, meaning no encryption, is not explicitly - /// forbidden, by an identity;q=0 or a *;q=0 without another explicitly set - /// value for identity, the server must never send back a 406 Not Acceptable - /// error. - (AcceptEncoding, ACCEPT_ENCODING, b"accept-encoding"); - - /// Advertises which languages the client is able to understand. - /// - /// The Accept-Language request HTTP header advertises which languages the - /// client is able to understand, and which locale variant is preferred. - /// Using content negotiation, the server then selects one of the proposals, - /// uses it and informs the client of its choice with the Content-Language - /// response header. Browsers set adequate values for this header according - /// their user interface language and even if a user can change it, this - /// happens rarely (and is frown upon as it leads to fingerprinting). - /// - /// This header is a hint to be used when the server has no way of - /// determining the language via another way, like a specific URL, that is - /// controlled by an explicit user decision. It is recommended that the - /// server never overrides an explicit decision. The content of the - /// Accept-Language is often out of the control of the user (like when - /// traveling and using an Internet Cafe in a different country); the user - /// may also want to visit a page in another language than the locale of - /// their user interface. - /// - /// If the server cannot serve any matching language, it can theoretically - /// send back a 406 (Not Acceptable) error code. But, for a better user - /// experience, this is rarely done and more common way is to ignore the - /// Accept-Language header in this case. - (AcceptLanguage, ACCEPT_LANGUAGE, b"accept-language"); - - /// Marker used by the server to advertise partial request support. - /// - /// The Accept-Ranges response HTTP header is a marker used by the server to - /// advertise its support of partial requests. The value of this field - /// indicates the unit that can be used to define a range. - /// - /// In presence of an Accept-Ranges header, the browser may try to resume an - /// interrupted download, rather than to start it from the start again. - (AcceptRanges, ACCEPT_RANGES, b"accept-ranges"); - - /// Preflight response indicating if the response to the request can be - /// exposed to the page. - /// - /// The Access-Control-Allow-Credentials response header indicates whether - /// or not the response to the request can be exposed to the page. It can be - /// exposed when the true value is returned; it can't in other cases. - /// - /// Credentials are cookies, authorization headers or TLS client - /// certificates. - /// - /// When used as part of a response to a preflight request, this indicates - /// whether or not the actual request can be made using credentials. Note - /// that simple GET requests are not preflighted, and so if a request is - /// made for a resource with credentials, if this header is not returned - /// with the resource, the response is ignored by the browser and not - /// returned to web content. - /// - /// The Access-Control-Allow-Credentials header works in conjunction with - /// the XMLHttpRequest.withCredentials property or with the credentials - /// option in the Request() constructor of the Fetch API. Credentials must - /// be set on both sides (the Access-Control-Allow-Credentials header and in - /// the XHR or Fetch request) in order for the CORS request with credentials - /// to succeed. - (AccessControlAllowCredentials, ACCESS_CONTROL_ALLOW_CREDENTIALS, b"access-control-allow-credentials"); - - /// Preflight response indicating permitted HTTP headers. - /// - /// The Access-Control-Allow-Headers response header is used in response to - /// a preflight request to indicate which HTTP headers will be available via - /// Access-Control-Expose-Headers when making the actual request. - /// - /// The simple headers, Accept, Accept-Language, Content-Language, - /// Content-Type (but only with a MIME type of its parsed value (ignoring - /// parameters) of either application/x-www-form-urlencoded, - /// multipart/form-data, or text/plain), are always available and don't need - /// to be listed by this header. - /// - /// This header is required if the request has an - /// Access-Control-Request-Headers header. - (AccessControlAllowHeaders, ACCESS_CONTROL_ALLOW_HEADERS, b"access-control-allow-headers"); - - /// Preflight header response indicating permitted access methods. - /// - /// The Access-Control-Allow-Methods response header specifies the method or - /// methods allowed when accessing the resource in response to a preflight - /// request. - (AccessControlAllowMethods, ACCESS_CONTROL_ALLOW_METHODS, b"access-control-allow-methods"); - - /// Indicates whether the response can be shared with resources with the - /// given origin. - (AccessControlAllowOrigin, ACCESS_CONTROL_ALLOW_ORIGIN, b"access-control-allow-origin"); - - /// Indicates which headers can be exposed as part of the response by - /// listing their names. - (AccessControlExposeHeaders, ACCESS_CONTROL_EXPOSE_HEADERS, b"access-control-expose-headers"); - - /// Indicates how long the results of a preflight request can be cached. - (AccessControlMaxAge, ACCESS_CONTROL_MAX_AGE, b"access-control-max-age"); - - /// Informs the server which HTTP headers will be used when an actual - /// request is made. - (AccessControlRequestHeaders, ACCESS_CONTROL_REQUEST_HEADERS, b"access-control-request-headers"); - - /// Informs the server know which HTTP method will be used when the actual - /// request is made. - (AccessControlRequestMethod, ACCESS_CONTROL_REQUEST_METHOD, b"access-control-request-method"); - - /// Indicates the time in seconds the object has been in a proxy cache. - /// - /// The Age header is usually close to zero. If it is Age: 0, it was - /// probably just fetched from the origin server; otherwise It is usually - /// calculated as a difference between the proxy's current date and the Date - /// general header included in the HTTP response. - (Age, AGE, b"age"); - - /// Lists the set of methods support by a resource. - /// - /// This header must be sent if the server responds with a 405 Method Not - /// Allowed status code to indicate which request methods can be used. An - /// empty Allow header indicates that the resource allows no request - /// methods, which might occur temporarily for a given resource, for - /// example. - (Allow, ALLOW, b"allow"); - - /// Advertises the availability of alternate services to clients. - (AltSvc, ALT_SVC, b"alt-svc"); - - /// Contains the credentials to authenticate a user agent with a server. - /// - /// Usually this header is included after the server has responded with a - /// 401 Unauthorized status and the WWW-Authenticate header. - (Authorization, AUTHORIZATION, b"authorization"); - - /// Specifies directives for caching mechanisms in both requests and - /// responses. - /// - /// Caching directives are unidirectional, meaning that a given directive in - /// a request is not implying that the same directive is to be given in the - /// response. - (CacheControl, CACHE_CONTROL, b"cache-control"); - - /// Indicates how caches have handled a response and its corresponding request. - /// - /// See [RFC 9211](https://www.rfc-editor.org/rfc/rfc9211.html). - (CacheStatus, CACHE_STATUS, b"cache-status"); - - /// Specifies directives that allow origin servers to control the behavior of CDN caches - /// interposed between them and clients separately from other caches that might handle the - /// response. - /// - /// See [RFC 9213](https://www.rfc-editor.org/rfc/rfc9213.html). - (CdnCacheControl, CDN_CACHE_CONTROL, b"cdn-cache-control"); - - /// Controls whether or not the network connection stays open after the - /// current transaction finishes. - /// - /// If the value sent is keep-alive, the connection is persistent and not - /// closed, allowing for subsequent requests to the same server to be done. - /// - /// Except for the standard hop-by-hop headers (Keep-Alive, - /// Transfer-Encoding, TE, Connection, Trailer, Upgrade, Proxy-Authorization - /// and Proxy-Authenticate), any hop-by-hop headers used by the message must - /// be listed in the Connection header, so that the first proxy knows he has - /// to consume them and not to forward them further. Standard hop-by-hop - /// headers can be listed too (it is often the case of Keep-Alive, but this - /// is not mandatory. - (Connection, CONNECTION, b"connection"); - - /// Indicates if the content is expected to be displayed inline. - /// - /// In a regular HTTP response, the Content-Disposition response header is a - /// header indicating if the content is expected to be displayed inline in - /// the browser, that is, as a Web page or as part of a Web page, or as an - /// attachment, that is downloaded and saved locally. - /// - /// In a multipart/form-data body, the HTTP Content-Disposition general - /// header is a header that can be used on the subpart of a multipart body - /// to give information about the field it applies to. The subpart is - /// delimited by the boundary defined in the Content-Type header. Used on - /// the body itself, Content-Disposition has no effect. - /// - /// The Content-Disposition header is defined in the larger context of MIME - /// messages for e-mail, but only a subset of the possible parameters apply - /// to HTTP forms and POST requests. Only the value form-data, as well as - /// the optional directive name and filename, can be used in the HTTP - /// context. - (ContentDisposition, CONTENT_DISPOSITION, b"content-disposition"); - - /// Used to compress the media-type. - /// - /// When present, its value indicates what additional content encoding has - /// been applied to the entity-body. It lets the client know, how to decode - /// in order to obtain the media-type referenced by the Content-Type header. - /// - /// It is recommended to compress data as much as possible and therefore to - /// use this field, but some types of resources, like jpeg images, are - /// already compressed. Sometimes using additional compression doesn't - /// reduce payload size and can even make the payload longer. - (ContentEncoding, CONTENT_ENCODING, b"content-encoding"); - - /// Used to describe the languages intended for the audience. - /// - /// This header allows a user to differentiate according to the users' own - /// preferred language. For example, if "Content-Language: de-DE" is set, it - /// says that the document is intended for German language speakers - /// (however, it doesn't indicate the document is written in German. For - /// example, it might be written in English as part of a language course for - /// German speakers). - /// - /// If no Content-Language is specified, the default is that the content is - /// intended for all language audiences. Multiple language tags are also - /// possible, as well as applying the Content-Language header to various - /// media types and not only to textual documents. - (ContentLanguage, CONTENT_LANGUAGE, b"content-language"); - - /// Indicates the size of the entity-body. - /// - /// The header value must be a decimal indicating the number of octets sent - /// to the recipient. - (ContentLength, CONTENT_LENGTH, b"content-length"); - - /// Indicates an alternate location for the returned data. - /// - /// The principal use case is to indicate the URL of the resource - /// transmitted as the result of content negotiation. - /// - /// Location and Content-Location are different: Location indicates the - /// target of a redirection (or the URL of a newly created document), while - /// Content-Location indicates the direct URL to use to access the resource, - /// without the need of further content negotiation. Location is a header - /// associated with the response, while Content-Location is associated with - /// the entity returned. - (ContentLocation, CONTENT_LOCATION, b"content-location"); - - /// Indicates where in a full body message a partial message belongs. - (ContentRange, CONTENT_RANGE, b"content-range"); - - /// Allows controlling resources the user agent is allowed to load for a - /// given page. - /// - /// With a few exceptions, policies mostly involve specifying server origins - /// and script endpoints. This helps guard against cross-site scripting - /// attacks (XSS). - (ContentSecurityPolicy, CONTENT_SECURITY_POLICY, b"content-security-policy"); - - /// Allows experimenting with policies by monitoring their effects. - /// - /// The HTTP Content-Security-Policy-Report-Only response header allows web - /// developers to experiment with policies by monitoring (but not enforcing) - /// their effects. These violation reports consist of JSON documents sent - /// via an HTTP POST request to the specified URI. - (ContentSecurityPolicyReportOnly, CONTENT_SECURITY_POLICY_REPORT_ONLY, b"content-security-policy-report-only"); - - /// Used to indicate the media type of the resource. - /// - /// In responses, a Content-Type header tells the client what the content - /// type of the returned content actually is. Browsers will do MIME sniffing - /// in some cases and will not necessarily follow the value of this header; - /// to prevent this behavior, the header X-Content-Type-Options can be set - /// to nosniff. - /// - /// In requests, (such as POST or PUT), the client tells the server what - /// type of data is actually sent. - (ContentType, CONTENT_TYPE, b"content-type"); - - /// Contains stored HTTP cookies previously sent by the server with the - /// Set-Cookie header. - /// - /// The Cookie header might be omitted entirely, if the privacy setting of - /// the browser are set to block them, for example. - (Cookie, COOKIE, b"cookie"); - - /// Indicates the client's tracking preference. - /// - /// This header lets users indicate whether they would prefer privacy rather - /// than personalized content. - (Dnt, DNT, b"dnt"); - - /// Contains the date and time at which the message was originated. - (Date, DATE, b"date"); - - /// Identifier for a specific version of a resource. - /// - /// This header allows caches to be more efficient, and saves bandwidth, as - /// a web server does not need to send a full response if the content has - /// not changed. On the other side, if the content has changed, etags are - /// useful to help prevent simultaneous updates of a resource from - /// overwriting each other ("mid-air collisions"). - /// - /// If the resource at a given URL changes, a new Etag value must be - /// generated. Etags are therefore similar to fingerprints and might also be - /// used for tracking purposes by some servers. A comparison of them allows - /// to quickly determine whether two representations of a resource are the - /// same, but they might also be set to persist indefinitely by a tracking - /// server. - (Etag, ETAG, b"etag"); - - /// Indicates expectations that need to be fulfilled by the server in order - /// to properly handle the request. - /// - /// The only expectation defined in the specification is Expect: - /// 100-continue, to which the server shall respond with: - /// - /// * 100 if the information contained in the header is sufficient to cause - /// an immediate success, - /// - /// * 417 (Expectation Failed) if it cannot meet the expectation; or any - /// other 4xx status otherwise. - /// - /// For example, the server may reject a request if its Content-Length is - /// too large. - /// - /// No common browsers send the Expect header, but some other clients such - /// as cURL do so by default. - (Expect, EXPECT, b"expect"); - - /// Contains the date/time after which the response is considered stale. - /// - /// Invalid dates, like the value 0, represent a date in the past and mean - /// that the resource is already expired. - /// - /// If there is a Cache-Control header with the "max-age" or "s-max-age" - /// directive in the response, the Expires header is ignored. - (Expires, EXPIRES, b"expires"); - - /// Contains information from the client-facing side of proxy servers that - /// is altered or lost when a proxy is involved in the path of the request. - /// - /// The alternative and de-facto standard versions of this header are the - /// X-Forwarded-For, X-Forwarded-Host and X-Forwarded-Proto headers. - /// - /// This header is used for debugging, statistics, and generating - /// location-dependent content and by design it exposes privacy sensitive - /// information, such as the IP address of the client. Therefore the user's - /// privacy must be kept in mind when deploying this header. - (Forwarded, FORWARDED, b"forwarded"); - - /// Contains an Internet email address for a human user who controls the - /// requesting user agent. - /// - /// If you are running a robotic user agent (e.g. a crawler), the From - /// header should be sent, so you can be contacted if problems occur on - /// servers, such as if the robot is sending excessive, unwanted, or invalid - /// requests. - (From, FROM, b"from"); - - /// Specifies the domain name of the server and (optionally) the TCP port - /// number on which the server is listening. - /// - /// If no port is given, the default port for the service requested (e.g., - /// "80" for an HTTP URL) is implied. - /// - /// A Host header field must be sent in all HTTP/1.1 request messages. A 400 - /// (Bad Request) status code will be sent to any HTTP/1.1 request message - /// that lacks a Host header field or contains more than one. - (Host, HOST, b"host"); - - /// Makes a request conditional based on the E-Tag. - /// - /// For GET and HEAD methods, the server will send back the requested - /// resource only if it matches one of the listed ETags. For PUT and other - /// non-safe methods, it will only upload the resource in this case. - /// - /// The comparison with the stored ETag uses the strong comparison - /// algorithm, meaning two files are considered identical byte to byte only. - /// This is weakened when the W/ prefix is used in front of the ETag. - /// - /// There are two common use cases: - /// - /// * For GET and HEAD methods, used in combination with an Range header, it - /// can guarantee that the new ranges requested comes from the same resource - /// than the previous one. If it doesn't match, then a 416 (Range Not - /// Satisfiable) response is returned. - /// - /// * For other methods, and in particular for PUT, If-Match can be used to - /// prevent the lost update problem. It can check if the modification of a - /// resource that the user wants to upload will not override another change - /// that has been done since the original resource was fetched. If the - /// request cannot be fulfilled, the 412 (Precondition Failed) response is - /// returned. - (IfMatch, IF_MATCH, b"if-match"); - - /// Makes a request conditional based on the modification date. - /// - /// The If-Modified-Since request HTTP header makes the request conditional: - /// the server will send back the requested resource, with a 200 status, - /// only if it has been last modified after the given date. If the request - /// has not been modified since, the response will be a 304 without any - /// body; the Last-Modified header will contain the date of last - /// modification. Unlike If-Unmodified-Since, If-Modified-Since can only be - /// used with a GET or HEAD. - /// - /// When used in combination with If-None-Match, it is ignored, unless the - /// server doesn't support If-None-Match. - /// - /// The most common use case is to update a cached entity that has no - /// associated ETag. - (IfModifiedSince, IF_MODIFIED_SINCE, b"if-modified-since"); - - /// Makes a request conditional based on the E-Tag. - /// - /// The If-None-Match HTTP request header makes the request conditional. For - /// GET and HEAD methods, the server will send back the requested resource, - /// with a 200 status, only if it doesn't have an ETag matching the given - /// ones. For other methods, the request will be processed only if the - /// eventually existing resource's ETag doesn't match any of the values - /// listed. - /// - /// When the condition fails for GET and HEAD methods, then the server must - /// return HTTP status code 304 (Not Modified). For methods that apply - /// server-side changes, the status code 412 (Precondition Failed) is used. - /// Note that the server generating a 304 response MUST generate any of the - /// following header fields that would have been sent in a 200 (OK) response - /// to the same request: Cache-Control, Content-Location, Date, ETag, - /// Expires, and Vary. - /// - /// The comparison with the stored ETag uses the weak comparison algorithm, - /// meaning two files are considered identical not only if they are - /// identical byte to byte, but if the content is equivalent. For example, - /// two pages that would differ only by the date of generation in the footer - /// would be considered as identical. - /// - /// When used in combination with If-Modified-Since, it has precedence (if - /// the server supports it). - /// - /// There are two common use cases: - /// - /// * For `GET` and `HEAD` methods, to update a cached entity that has an associated ETag. - /// * For other methods, and in particular for `PUT`, `If-None-Match` used with - /// the `*` value can be used to save a file not known to exist, - /// guaranteeing that another upload didn't happen before, losing the data - /// of the previous put; this problems is the variation of the lost update - /// problem. - (IfNoneMatch, IF_NONE_MATCH, b"if-none-match"); - - /// Makes a request conditional based on range. - /// - /// The If-Range HTTP request header makes a range request conditional: if - /// the condition is fulfilled, the range request will be issued and the - /// server sends back a 206 Partial Content answer with the appropriate - /// body. If the condition is not fulfilled, the full resource is sent back, - /// with a 200 OK status. - /// - /// This header can be used either with a Last-Modified validator, or with - /// an ETag, but not with both. - /// - /// The most common use case is to resume a download, to guarantee that the - /// stored resource has not been modified since the last fragment has been - /// received. - (IfRange, IF_RANGE, b"if-range"); - - /// Makes the request conditional based on the last modification date. - /// - /// The If-Unmodified-Since request HTTP header makes the request - /// conditional: the server will send back the requested resource, or accept - /// it in the case of a POST or another non-safe method, only if it has not - /// been last modified after the given date. If the request has been - /// modified after the given date, the response will be a 412 (Precondition - /// Failed) error. - /// - /// There are two common use cases: - /// - /// * In conjunction non-safe methods, like POST, it can be used to - /// implement an optimistic concurrency control, like done by some wikis: - /// editions are rejected if the stored document has been modified since the - /// original has been retrieved. - /// - /// * In conjunction with a range request with a If-Range header, it can be - /// used to ensure that the new fragment requested comes from an unmodified - /// document. - (IfUnmodifiedSince, IF_UNMODIFIED_SINCE, b"if-unmodified-since"); - - /// Content-Types that are acceptable for the response. - (LastModified, LAST_MODIFIED, b"last-modified"); - - /// Allows the server to point an interested client to another resource - /// containing metadata about the requested resource. - (Link, LINK, b"link"); - - /// Indicates the URL to redirect a page to. - /// - /// The Location response header indicates the URL to redirect a page to. It - /// only provides a meaning when served with a 3xx status response. - /// - /// The HTTP method used to make the new request to fetch the page pointed - /// to by Location depends of the original method and of the kind of - /// redirection: - /// - /// * If 303 (See Also) responses always lead to the use of a GET method, - /// 307 (Temporary Redirect) and 308 (Permanent Redirect) don't change the - /// method used in the original request; - /// - /// * 301 (Permanent Redirect) and 302 (Found) doesn't change the method - /// most of the time, though older user-agents may (so you basically don't - /// know). - /// - /// All responses with one of these status codes send a Location header. - /// - /// Beside redirect response, messages with 201 (Created) status also - /// include the Location header. It indicates the URL to the newly created - /// resource. - /// - /// Location and Content-Location are different: Location indicates the - /// target of a redirection (or the URL of a newly created resource), while - /// Content-Location indicates the direct URL to use to access the resource - /// when content negotiation happened, without the need of further content - /// negotiation. Location is a header associated with the response, while - /// Content-Location is associated with the entity returned. - (Location, LOCATION, b"location"); - - /// Indicates the max number of intermediaries the request should be sent - /// through. - (MaxForwards, MAX_FORWARDS, b"max-forwards"); - - /// Indicates where a fetch originates from. - /// - /// It doesn't include any path information, but only the server name. It is - /// sent with CORS requests, as well as with POST requests. It is similar to - /// the Referer header, but, unlike this header, it doesn't disclose the - /// whole path. - (Origin, ORIGIN, b"origin"); - - /// HTTP/1.0 header usually used for backwards compatibility. - /// - /// The Pragma HTTP/1.0 general header is an implementation-specific header - /// that may have various effects along the request-response chain. It is - /// used for backwards compatibility with HTTP/1.0 caches where the - /// Cache-Control HTTP/1.1 header is not yet present. - (Pragma, PRAGMA, b"pragma"); - - /// Defines the authentication method that should be used to gain access to - /// a proxy. - /// - /// Unlike `www-authenticate`, the `proxy-authenticate` header field applies - /// only to the next outbound client on the response chain. This is because - /// only the client that chose a given proxy is likely to have the - /// credentials necessary for authentication. However, when multiple proxies - /// are used within the same administrative domain, such as office and - /// regional caching proxies within a large corporate network, it is common - /// for credentials to be generated by the user agent and passed through the - /// hierarchy until consumed. Hence, in such a configuration, it will appear - /// as if Proxy-Authenticate is being forwarded because each proxy will send - /// the same challenge set. - /// - /// The `proxy-authenticate` header is sent along with a `407 Proxy - /// Authentication Required`. - (ProxyAuthenticate, PROXY_AUTHENTICATE, b"proxy-authenticate"); - - /// Contains the credentials to authenticate a user agent to a proxy server. - /// - /// This header is usually included after the server has responded with a - /// 407 Proxy Authentication Required status and the Proxy-Authenticate - /// header. - (ProxyAuthorization, PROXY_AUTHORIZATION, b"proxy-authorization"); - - /// Associates a specific cryptographic public key with a certain server. - /// - /// This decreases the risk of MITM attacks with forged certificates. If one - /// or several keys are pinned and none of them are used by the server, the - /// browser will not accept the response as legitimate, and will not display - /// it. - (PublicKeyPins, PUBLIC_KEY_PINS, b"public-key-pins"); - - /// Sends reports of pinning violation to the report-uri specified in the - /// header. - /// - /// Unlike `Public-Key-Pins`, this header still allows browsers to connect - /// to the server if the pinning is violated. - (PublicKeyPinsReportOnly, PUBLIC_KEY_PINS_REPORT_ONLY, b"public-key-pins-report-only"); - - /// Indicates the part of a document that the server should return. - /// - /// Several parts can be requested with one Range header at once, and the - /// server may send back these ranges in a multipart document. If the server - /// sends back ranges, it uses the 206 Partial Content for the response. If - /// the ranges are invalid, the server returns the 416 Range Not Satisfiable - /// error. The server can also ignore the Range header and return the whole - /// document with a 200 status code. - (Range, RANGE, b"range"); - - /// Contains the address of the previous web page from which a link to the - /// currently requested page was followed. - /// - /// The Referer header allows servers to identify where people are visiting - /// them from and may use that data for analytics, logging, or optimized - /// caching, for example. - (Referer, REFERER, b"referer"); - - /// Governs which referrer information should be included with requests - /// made. - (ReferrerPolicy, REFERRER_POLICY, b"referrer-policy"); - - /// Informs the web browser that the current page or frame should be - /// refreshed. - (Refresh, REFRESH, b"refresh"); - - /// The Retry-After response HTTP header indicates how long the user agent - /// should wait before making a follow-up request. There are two main cases - /// this header is used: - /// - /// * When sent with a 503 (Service Unavailable) response, it indicates how - /// long the service is expected to be unavailable. - /// - /// * When sent with a redirect response, such as 301 (Moved Permanently), - /// it indicates the minimum time that the user agent is asked to wait - /// before issuing the redirected request. - (RetryAfter, RETRY_AFTER, b"retry-after"); - - /// The |Sec-WebSocket-Accept| header field is used in the WebSocket - /// opening handshake. It is sent from the server to the client to - /// confirm that the server is willing to initiate the WebSocket - /// connection. - (SecWebSocketAccept, SEC_WEBSOCKET_ACCEPT, b"sec-websocket-accept"); - - /// The |Sec-WebSocket-Extensions| header field is used in the WebSocket - /// opening handshake. It is initially sent from the client to the - /// server, and then subsequently sent from the server to the client, to - /// agree on a set of protocol-level extensions to use for the duration - /// of the connection. - (SecWebSocketExtensions, SEC_WEBSOCKET_EXTENSIONS, b"sec-websocket-extensions"); - - /// The |Sec-WebSocket-Key| header field is used in the WebSocket opening - /// handshake. It is sent from the client to the server to provide part - /// of the information used by the server to prove that it received a - /// valid WebSocket opening handshake. This helps ensure that the server - /// does not accept connections from non-WebSocket clients (e.g., HTTP - /// clients) that are being abused to send data to unsuspecting WebSocket - /// servers. - (SecWebSocketKey, SEC_WEBSOCKET_KEY, b"sec-websocket-key"); - - /// The |Sec-WebSocket-Protocol| header field is used in the WebSocket - /// opening handshake. It is sent from the client to the server and back - /// from the server to the client to confirm the subprotocol of the - /// connection. This enables scripts to both select a subprotocol and be - /// sure that the server agreed to serve that subprotocol. - (SecWebSocketProtocol, SEC_WEBSOCKET_PROTOCOL, b"sec-websocket-protocol"); - - /// The |Sec-WebSocket-Version| header field is used in the WebSocket - /// opening handshake. It is sent from the client to the server to - /// indicate the protocol version of the connection. This enables - /// servers to correctly interpret the opening handshake and subsequent - /// data being sent from the data, and close the connection if the server - /// cannot interpret that data in a safe manner. - (SecWebSocketVersion, SEC_WEBSOCKET_VERSION, b"sec-websocket-version"); - - /// Contains information about the software used by the origin server to - /// handle the request. - /// - /// Overly long and detailed Server values should be avoided as they - /// potentially reveal internal implementation details that might make it - /// (slightly) easier for attackers to find and exploit known security - /// holes. - (Server, SERVER, b"server"); - - /// Used to send cookies from the server to the user agent. - (SetCookie, SET_COOKIE, b"set-cookie"); - - /// Tells the client to communicate with HTTPS instead of using HTTP. - (StrictTransportSecurity, STRICT_TRANSPORT_SECURITY, b"strict-transport-security"); - - /// Informs the server of transfer encodings willing to be accepted as part - /// of the response. - /// - /// See also the Transfer-Encoding response header for more details on - /// transfer encodings. Note that chunked is always acceptable for HTTP/1.1 - /// recipients and you that don't have to specify "chunked" using the TE - /// header. However, it is useful for setting if the client is accepting - /// trailer fields in a chunked transfer coding using the "trailers" value. - (Te, TE, b"te"); - - /// Allows the sender to include additional fields at the end of chunked - /// messages. - (Trailer, TRAILER, b"trailer"); - - /// Specifies the form of encoding used to safely transfer the entity to the - /// client. - /// - /// `transfer-encoding` is a hop-by-hop header, that is applying to a - /// message between two nodes, not to a resource itself. Each segment of a - /// multi-node connection can use different `transfer-encoding` values. If - /// you want to compress data over the whole connection, use the end-to-end - /// header `content-encoding` header instead. - /// - /// When present on a response to a `HEAD` request that has no body, it - /// indicates the value that would have applied to the corresponding `GET` - /// message. - (TransferEncoding, TRANSFER_ENCODING, b"transfer-encoding"); - - /// Contains a string that allows identifying the requesting client's - /// software. - (UserAgent, USER_AGENT, b"user-agent"); - - /// Used as part of the exchange to upgrade the protocol. - (Upgrade, UPGRADE, b"upgrade"); - - /// Sends a signal to the server expressing the client’s preference for an - /// encrypted and authenticated response. - (UpgradeInsecureRequests, UPGRADE_INSECURE_REQUESTS, b"upgrade-insecure-requests"); - - /// Determines how to match future requests with cached responses. - /// - /// The `vary` HTTP response header determines how to match future request - /// headers to decide whether a cached response can be used rather than - /// requesting a fresh one from the origin server. It is used by the server - /// to indicate which headers it used when selecting a representation of a - /// resource in a content negotiation algorithm. - /// - /// The `vary` header should be set on a 304 Not Modified response exactly - /// like it would have been set on an equivalent 200 OK response. - (Vary, VARY, b"vary"); - - /// Added by proxies to track routing. - /// - /// The `via` general header is added by proxies, both forward and reverse - /// proxies, and can appear in the request headers and the response headers. - /// It is used for tracking message forwards, avoiding request loops, and - /// identifying the protocol capabilities of senders along the - /// request/response chain. - (Via, VIA, b"via"); - - /// General HTTP header contains information about possible problems with - /// the status of the message. - /// - /// More than one `warning` header may appear in a response. Warning header - /// fields can in general be applied to any message, however some warn-codes - /// are specific to caches and can only be applied to response messages. - (Warning, WARNING, b"warning"); - - /// Defines the authentication method that should be used to gain access to - /// a resource. - (WwwAuthenticate, WWW_AUTHENTICATE, b"www-authenticate"); - - /// Marker used by the server to indicate that the MIME types advertised in - /// the `content-type` headers should not be changed and be followed. - /// - /// This allows to opt-out of MIME type sniffing, or, in other words, it is - /// a way to say that the webmasters knew what they were doing. - /// - /// This header was introduced by Microsoft in IE 8 as a way for webmasters - /// to block content sniffing that was happening and could transform - /// non-executable MIME types into executable MIME types. Since then, other - /// browsers have introduced it, even if their MIME sniffing algorithms were - /// less aggressive. - /// - /// Site security testers usually expect this header to be set. - (XContentTypeOptions, X_CONTENT_TYPE_OPTIONS, b"x-content-type-options"); - - /// Controls DNS prefetching. - /// - /// The `x-dns-prefetch-control` HTTP response header controls DNS - /// prefetching, a feature by which browsers proactively perform domain name - /// resolution on both links that the user may choose to follow as well as - /// URLs for items referenced by the document, including images, CSS, - /// JavaScript, and so forth. - /// - /// This prefetching is performed in the background, so that the DNS is - /// likely to have been resolved by the time the referenced items are - /// needed. This reduces latency when the user clicks a link. - (XDnsPrefetchControl, X_DNS_PREFETCH_CONTROL, b"x-dns-prefetch-control"); - - /// Indicates whether or not a browser should be allowed to render a page in - /// a frame. - /// - /// Sites can use this to avoid clickjacking attacks, by ensuring that their - /// content is not embedded into other sites. - /// - /// The added security is only provided if the user accessing the document - /// is using a browser supporting `x-frame-options`. - (XFrameOptions, X_FRAME_OPTIONS, b"x-frame-options"); - - /// Stop pages from loading when an XSS attack is detected. - /// - /// The HTTP X-XSS-Protection response header is a feature of Internet - /// Explorer, Chrome and Safari that stops pages from loading when they - /// detect reflected cross-site scripting (XSS) attacks. Although these - /// protections are largely unnecessary in modern browsers when sites - /// implement a strong Content-Security-Policy that disables the use of - /// inline JavaScript ('unsafe-inline'), they can still provide protections - /// for users of older web browsers that don't yet support CSP. - (XXssProtection, X_XSS_PROTECTION, b"x-xss-protection"); -} - -/// Valid header name characters -/// -/// ```not_rust -/// field-name = token -/// separators = "(" | ")" | "<" | ">" | "@" -/// | "," | ";" | ":" | "\" | <"> -/// | "/" | "[" | "]" | "?" | "=" -/// | "{" | "}" | SP | HT -/// token = 1*tchar -/// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" -/// / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" -/// / DIGIT / ALPHA -/// ; any VCHAR, except delimiters -/// ``` -// HEADER_CHARS maps every byte that is 128 or larger to 0 so everything that is -// mapped by HEADER_CHARS, maps to a valid single-byte UTF-8 codepoint. -const HEADER_CHARS: [u8; 256] = [ - // 0 1 2 3 4 5 6 7 8 9 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x - 0, 0, 0, b'!', b'"', b'#', b'$', b'%', b'&', b'\'', // 3x - 0, 0, b'*', b'+', 0, b'-', b'.', 0, b'0', b'1', // 4x - b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', 0, 0, // 5x - 0, 0, 0, 0, 0, b'a', b'b', b'c', b'd', b'e', // 6x - b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', // 7x - b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', // 8x - b'z', 0, 0, 0, b'^', b'_', b'`', b'a', b'b', b'c', // 9x - b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x - b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x - b'x', b'y', b'z', 0, b'|', 0, b'~', 0, 0, 0, // 12x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x - 0, 0, 0, 0, 0, 0 // 25x -]; - -/// Valid header name characters for HTTP/2.0 and HTTP/3.0 -// HEADER_CHARS_H2 maps every byte that is 128 or larger to 0 so everything that is -// mapped by HEADER_CHARS_H2, maps to a valid single-byte UTF-8 codepoint. -const HEADER_CHARS_H2: [u8; 256] = [ - // 0 1 2 3 4 5 6 7 8 9 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x - 0, 0, 0, b'!', b'"', b'#', b'$', b'%', b'&', b'\'', // 3x - 0, 0, b'*', b'+', 0, b'-', b'.', 0, b'0', b'1', // 4x - b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', 0, 0, // 5x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 6x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 7x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 8x - 0, 0, 0, 0, b'^', b'_', b'`', b'a', b'b', b'c', // 9x - b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x - b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x - b'x', b'y', b'z', 0, b'|', 0, b'~', 0, 0, 0, // 12x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x - 0, 0, 0, 0, 0, 0 // 25x -]; - -fn parse_hdr<'a>( - data: &'a [u8], - b: &'a mut [MaybeUninit; SCRATCH_BUF_SIZE], - table: &[u8; 256], -) -> Result, InvalidHeaderName> { - match data.len() { - 0 => Err(InvalidHeaderName::new()), - len @ 1..=SCRATCH_BUF_SIZE => { - // Read from data into the buffer - transforming using `table` as we go - data.iter() - .zip(b.iter_mut()) - .for_each(|(index, out)| *out = MaybeUninit::new(table[*index as usize])); - // Safety: len bytes of b were just initialized. - let name: &'a [u8] = unsafe { slice_assume_init(&b[0..len]) }; - match StandardHeader::from_bytes(name) { - Some(sh) => Ok(sh.into()), - None => { - if name.contains(&0) { - Err(InvalidHeaderName::new()) - } else { - Ok(HdrName::custom(name, true)) - } - } - } - } - SCRATCH_BUF_OVERFLOW..=super::MAX_HEADER_NAME_LEN => Ok(HdrName::custom(data, false)), - _ => Err(InvalidHeaderName::new()), - } -} - - - -impl<'a> From for HdrName<'a> { - fn from(hdr: StandardHeader) -> HdrName<'a> { - HdrName { inner: Repr::Standard(hdr) } - } -} - -impl HeaderName { - /// Converts a slice of bytes to an HTTP header name. - /// - /// This function normalizes the input. - pub fn from_bytes(src: &[u8]) -> Result { - let mut buf = uninit_u8_array(); - // Precondition: HEADER_CHARS is a valid table for parse_hdr(). - match parse_hdr(src, &mut buf, &HEADER_CHARS)?.inner { - Repr::Standard(std) => Ok(std.into()), - Repr::Custom(MaybeLower { buf, lower: true }) => { - let buf = Bytes::copy_from_slice(buf); - // Safety: the invariant on MaybeLower ensures buf is valid UTF-8. - let val = unsafe { ByteStr::from_utf8_unchecked(buf) }; - Ok(Custom(val).into()) - } - Repr::Custom(MaybeLower { buf, lower: false }) => { - use bytes::{BufMut}; - let mut dst = BytesMut::with_capacity(buf.len()); - - for b in buf.iter() { - // HEADER_CHARS maps all bytes to valid single-byte UTF-8 - let b = HEADER_CHARS[*b as usize]; - - if b == 0 { - return Err(InvalidHeaderName::new()); - } - - dst.put_u8(b); - } - - // Safety: the loop above maps all bytes in buf to valid single byte - // UTF-8 before copying them into dst. This means that dst (and hence - // dst.freeze()) is valid UTF-8. - let val = unsafe { ByteStr::from_utf8_unchecked(dst.freeze()) }; - - Ok(Custom(val).into()) - } - } - } - - /// Converts a slice of bytes to an HTTP header name. - /// - /// This function expects the input to only contain lowercase characters. - /// This is useful when decoding HTTP/2.0 or HTTP/3.0 headers. Both - /// require that all headers be represented in lower case. - /// - /// # Examples - /// - /// ``` - /// # use http::header::*; - /// - /// // Parsing a lower case header - /// let hdr = HeaderName::from_lowercase(b"content-length").unwrap(); - /// assert_eq!(CONTENT_LENGTH, hdr); - /// - /// // Parsing a header that contains uppercase characters - /// assert!(HeaderName::from_lowercase(b"Content-Length").is_err()); - /// ``` - pub fn from_lowercase(src: &[u8]) -> Result { - let mut buf = uninit_u8_array(); - // Precondition: HEADER_CHARS_H2 is a valid table for parse_hdr() - match parse_hdr(src, &mut buf, &HEADER_CHARS_H2)?.inner { - Repr::Standard(std) => Ok(std.into()), - Repr::Custom(MaybeLower { buf, lower: true }) => { - let buf = Bytes::copy_from_slice(buf); - // Safety: the invariant on MaybeLower ensures buf is valid UTF-8. - let val = unsafe { ByteStr::from_utf8_unchecked(buf) }; - Ok(Custom(val).into()) - } - Repr::Custom(MaybeLower { buf, lower: false }) => { - for &b in buf.iter() { - // HEADER_CHARS maps all bytes that are not valid single-byte - // UTF-8 to 0 so this check returns an error for invalid UTF-8. - if b != HEADER_CHARS[b as usize] { - return Err(InvalidHeaderName::new()); - } - } - - let buf = Bytes::copy_from_slice(buf); - // Safety: the loop above checks that each byte of buf (either - // version) is valid UTF-8. - let val = unsafe { ByteStr::from_utf8_unchecked(buf) }; - Ok(Custom(val).into()) - } - } - } - - /// Converts a static string to a HTTP header name. - /// - /// This function requires the static string to only contain lowercase - /// characters, numerals and symbols, as per the HTTP/2.0 specification - /// and header names internal representation within this library. - /// - /// # Panics - /// - /// This function panics when the static string is a invalid header. - /// - /// Until [Allow panicking in constants](https://github.com/rust-lang/rfcs/pull/2345) - /// makes its way into stable, the panic message at compile-time is - /// going to look cryptic, but should at least point at your header value: - /// - /// ```text - /// error: any use of this value will cause an error - /// --> http/src/header/name.rs:1241:13 - /// | - /// 1241 | ([] as [u8; 0])[0]; // Invalid header name - /// | ^^^^^^^^^^^^^^^^^^ - /// | | - /// | index out of bounds: the length is 0 but the index is 0 - /// | inside `http::HeaderName::from_static` at http/src/header/name.rs:1241:13 - /// | inside `INVALID_NAME` at src/main.rs:3:34 - /// | - /// ::: src/main.rs:3:1 - /// | - /// 3 | const INVALID_NAME: HeaderName = HeaderName::from_static("Capitalized"); - /// | ------------------------------------------------------------------------ - /// ``` - /// - /// # Examples - /// - /// ``` - /// # use http::header::*; - /// // Parsing a standard header - /// let hdr = HeaderName::from_static("content-length"); - /// assert_eq!(CONTENT_LENGTH, hdr); - /// - /// // Parsing a custom header - /// let CUSTOM_HEADER: &'static str = "custom-header"; - /// - /// let a = HeaderName::from_lowercase(b"custom-header").unwrap(); - /// let b = HeaderName::from_static(CUSTOM_HEADER); - /// assert_eq!(a, b); - /// ``` - /// - /// ```should_panic - /// # use http::header::*; - /// # - /// // Parsing a header that contains invalid symbols(s): - /// HeaderName::from_static("content{}{}length"); // This line panics! - /// - /// // Parsing a header that contains invalid uppercase characters. - /// let a = HeaderName::from_static("foobar"); - /// let b = HeaderName::from_static("FOOBAR"); // This line panics! - /// ``` - #[allow(unconditional_panic)] // required for the panic circumvention - pub const fn from_static(src: &'static str) -> HeaderName { - let name_bytes = src.as_bytes(); - if let Some(standard) = StandardHeader::from_bytes(name_bytes) { - return HeaderName{ - inner: Repr::Standard(standard), - }; - } - - if name_bytes.len() == 0 || name_bytes.len() > super::MAX_HEADER_NAME_LEN || { - let mut i = 0; - loop { - if i >= name_bytes.len() { - break false; - } else if HEADER_CHARS_H2[name_bytes[i] as usize] == 0 { - break true; - } - i += 1; - } - } { - ([] as [u8; 0])[0]; // Invalid header name - } - - HeaderName { - inner: Repr::Custom(Custom(ByteStr::from_static(src))) - } - } - - /// Returns a `str` representation of the header. - /// - /// The returned string will always be lower case. - #[inline] - pub fn as_str(&self) -> &str { - match self.inner { - Repr::Standard(v) => v.as_str(), - Repr::Custom(ref v) => &*v.0, - } - } - - pub(super) fn into_bytes(self) -> Bytes { - self.inner.into() - } -} - -impl FromStr for HeaderName { - type Err = InvalidHeaderName; - - fn from_str(s: &str) -> Result { - HeaderName::from_bytes(s.as_bytes()).map_err(|_| InvalidHeaderName { _priv: () }) - } -} - -impl AsRef for HeaderName { - fn as_ref(&self) -> &str { - self.as_str() - } -} - -impl AsRef<[u8]> for HeaderName { - fn as_ref(&self) -> &[u8] { - self.as_str().as_bytes() - } -} - -impl Borrow for HeaderName { - fn borrow(&self) -> &str { - self.as_str() - } -} - -impl fmt::Debug for HeaderName { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self.as_str(), fmt) - } -} - -impl fmt::Display for HeaderName { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self.as_str(), fmt) - } -} - -impl InvalidHeaderName { - fn new() -> InvalidHeaderName { - InvalidHeaderName { _priv: () } - } -} - -impl<'a> From<&'a HeaderName> for HeaderName { - fn from(src: &'a HeaderName) -> HeaderName { - src.clone() - } -} - -#[doc(hidden)] -impl From> for Bytes -where - T: Into, -{ - fn from(repr: Repr) -> Bytes { - match repr { - Repr::Standard(header) => Bytes::from_static(header.as_str().as_bytes()), - Repr::Custom(header) => header.into(), - } - } -} - -impl From for Bytes { - #[inline] - fn from(Custom(inner): Custom) -> Bytes { - Bytes::from(inner) - } -} - -impl<'a> TryFrom<&'a str> for HeaderName { - type Error = InvalidHeaderName; - #[inline] - fn try_from(s: &'a str) -> Result { - Self::from_bytes(s.as_bytes()) - } -} - -impl<'a> TryFrom<&'a String> for HeaderName { - type Error = InvalidHeaderName; - #[inline] - fn try_from(s: &'a String) -> Result { - Self::from_bytes(s.as_bytes()) - } -} - -impl<'a> TryFrom<&'a [u8]> for HeaderName { - type Error = InvalidHeaderName; - #[inline] - fn try_from(s: &'a [u8]) -> Result { - Self::from_bytes(s) - } -} - -impl TryFrom for HeaderName { - type Error = InvalidHeaderName; - - #[inline] - fn try_from(s: String) -> Result { - Self::from_bytes(s.as_bytes()) - } -} - -impl TryFrom> for HeaderName { - type Error = InvalidHeaderName; - - #[inline] - fn try_from(vec: Vec) -> Result { - Self::from_bytes(&vec) - } -} - -#[doc(hidden)] -impl From for HeaderName { - fn from(src: StandardHeader) -> HeaderName { - HeaderName { - inner: Repr::Standard(src), - } - } -} - -#[doc(hidden)] -impl From for HeaderName { - fn from(src: Custom) -> HeaderName { - HeaderName { - inner: Repr::Custom(src), - } - } -} - -impl<'a> PartialEq<&'a HeaderName> for HeaderName { - #[inline] - fn eq(&self, other: &&'a HeaderName) -> bool { - *self == **other - } -} - -impl<'a> PartialEq for &'a HeaderName { - #[inline] - fn eq(&self, other: &HeaderName) -> bool { - *other == *self - } -} - -impl PartialEq for HeaderName { - /// Performs a case-insensitive comparison of the string against the header - /// name - /// - /// # Examples - /// - /// ``` - /// use http::header::CONTENT_LENGTH; - /// - /// assert_eq!(CONTENT_LENGTH, "content-length"); - /// assert_eq!(CONTENT_LENGTH, "Content-Length"); - /// assert_ne!(CONTENT_LENGTH, "content length"); - /// ``` - #[inline] - fn eq(&self, other: &str) -> bool { - eq_ignore_ascii_case(self.as_ref(), other.as_bytes()) - } -} - -impl PartialEq for str { - /// Performs a case-insensitive comparison of the string against the header - /// name - /// - /// # Examples - /// - /// ``` - /// use http::header::CONTENT_LENGTH; - /// - /// assert_eq!(CONTENT_LENGTH, "content-length"); - /// assert_eq!(CONTENT_LENGTH, "Content-Length"); - /// assert_ne!(CONTENT_LENGTH, "content length"); - /// ``` - #[inline] - fn eq(&self, other: &HeaderName) -> bool { - *other == *self - } -} - -impl<'a> PartialEq<&'a str> for HeaderName { - /// Performs a case-insensitive comparison of the string against the header - /// name - #[inline] - fn eq(&self, other: &&'a str) -> bool { - *self == **other - } -} - -impl<'a> PartialEq for &'a str { - /// Performs a case-insensitive comparison of the string against the header - /// name - #[inline] - fn eq(&self, other: &HeaderName) -> bool { - *other == *self - } -} - -impl fmt::Debug for InvalidHeaderName { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("InvalidHeaderName") - // skip _priv noise - .finish() - } -} - -impl fmt::Display for InvalidHeaderName { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("invalid HTTP header name") - } -} - -impl Error for InvalidHeaderName {} - -// ===== HdrName ===== - -impl<'a> HdrName<'a> { - // Precondition: if lower then buf is valid UTF-8 - fn custom(buf: &'a [u8], lower: bool) -> HdrName<'a> { - HdrName { - // Invariant (on MaybeLower): follows from the precondition - inner: Repr::Custom(MaybeLower { - buf: buf, - lower: lower, - }), - } - } - - pub fn from_bytes(hdr: &[u8], f: F) -> Result - where F: FnOnce(HdrName<'_>) -> U, - { - let mut buf = uninit_u8_array(); - // Precondition: HEADER_CHARS is a valid table for parse_hdr(). - let hdr = parse_hdr(hdr, &mut buf, &HEADER_CHARS)?; - Ok(f(hdr)) - } - - pub fn from_static(hdr: &'static str, f: F) -> U - where - F: FnOnce(HdrName<'_>) -> U, - { - let mut buf = uninit_u8_array(); - let hdr = - // Precondition: HEADER_CHARS is a valid table for parse_hdr(). - parse_hdr(hdr.as_bytes(), &mut buf, &HEADER_CHARS).expect("static str is invalid name"); - f(hdr) - } -} - -#[doc(hidden)] -impl<'a> From> for HeaderName { - fn from(src: HdrName<'a>) -> HeaderName { - match src.inner { - Repr::Standard(s) => HeaderName { - inner: Repr::Standard(s), - }, - Repr::Custom(maybe_lower) => { - if maybe_lower.lower { - let buf = Bytes::copy_from_slice(&maybe_lower.buf[..]); - // Safety: the invariant on MaybeLower ensures buf is valid UTF-8. - let byte_str = unsafe { ByteStr::from_utf8_unchecked(buf) }; - - HeaderName { - inner: Repr::Custom(Custom(byte_str)), - } - } else { - use bytes::BufMut; - let mut dst = BytesMut::with_capacity(maybe_lower.buf.len()); - - for b in maybe_lower.buf.iter() { - // HEADER_CHARS maps each byte to a valid single-byte UTF-8 - // codepoint. - dst.put_u8(HEADER_CHARS[*b as usize]); - } - - // Safety: the loop above maps each byte of maybe_lower.buf to a - // valid single-byte UTF-8 codepoint before copying it into dst. - // dst (and hence dst.freeze()) is thus valid UTF-8. - let buf = unsafe { ByteStr::from_utf8_unchecked(dst.freeze()) }; - - HeaderName { - inner: Repr::Custom(Custom(buf)), - } - } - } - } - } -} - -#[doc(hidden)] -impl<'a> PartialEq> for HeaderName { - #[inline] - fn eq(&self, other: &HdrName<'a>) -> bool { - match self.inner { - Repr::Standard(a) => match other.inner { - Repr::Standard(b) => a == b, - _ => false, - }, - Repr::Custom(Custom(ref a)) => match other.inner { - Repr::Custom(ref b) => { - if b.lower { - a.as_bytes() == b.buf - } else { - eq_ignore_ascii_case(a.as_bytes(), b.buf) - } - } - _ => false, - }, - } - } -} - -// ===== Custom ===== - -impl Hash for Custom { - #[inline] - fn hash(&self, hasher: &mut H) { - hasher.write(self.0.as_bytes()) - } -} - -// ===== MaybeLower ===== - -impl<'a> Hash for MaybeLower<'a> { - #[inline] - fn hash(&self, hasher: &mut H) { - if self.lower { - hasher.write(self.buf); - } else { - for &b in self.buf { - hasher.write(&[HEADER_CHARS[b as usize]]); - } - } - } -} - -// Assumes that the left hand side is already lower case -#[inline] -fn eq_ignore_ascii_case(lower: &[u8], s: &[u8]) -> bool { - if lower.len() != s.len() { - return false; - } - - lower.iter().zip(s).all(|(a, b)| { - *a == HEADER_CHARS[*b as usize] - }) -} - -// Utility functions for MaybeUninit<>. These are drawn from unstable API's on -// MaybeUninit<> itself. -const SCRATCH_BUF_SIZE: usize = 64; -const SCRATCH_BUF_OVERFLOW: usize = SCRATCH_BUF_SIZE + 1; - -fn uninit_u8_array() -> [MaybeUninit; SCRATCH_BUF_SIZE] { - let arr = MaybeUninit::<[MaybeUninit; SCRATCH_BUF_SIZE]>::uninit(); - // Safety: assume_init() is claiming that an array of MaybeUninit<> - // has been initilized, but MaybeUninit<>'s do not require initilizaton. - unsafe { arr.assume_init() } -} - -// Assuming all the elements are initilized, get a slice of them. -// -// Safety: All elements of `slice` must be initilized to prevent -// undefined behavior. -unsafe fn slice_assume_init(slice: &[MaybeUninit]) -> &[T] { - &*(slice as *const [MaybeUninit] as *const [T]) -} - -#[cfg(test)] -mod tests { - use super::*; - use self::StandardHeader::Vary; - - #[test] - fn test_bounds() { - fn check_bounds() {} - check_bounds::(); - } - - #[test] - fn test_parse_invalid_headers() { - for i in 0..128 { - let hdr = vec![1u8; i]; - assert!(HeaderName::from_bytes(&hdr).is_err(), "{} invalid header chars did not fail", i); - } - } - - const ONE_TOO_LONG: &[u8] = &[b'a'; super::super::MAX_HEADER_NAME_LEN+1]; - - #[test] - fn test_invalid_name_lengths() { - assert!( - HeaderName::from_bytes(&[]).is_err(), - "zero-length header name is an error", - ); - - let long = &ONE_TOO_LONG[0..super::super::MAX_HEADER_NAME_LEN]; - - let long_str = std::str::from_utf8(long).unwrap(); - assert_eq!(HeaderName::from_static(long_str), long_str); // shouldn't panic! - - assert!( - HeaderName::from_bytes(long).is_ok(), - "max header name length is ok", - ); - assert!( - HeaderName::from_bytes(ONE_TOO_LONG).is_err(), - "longer than max header name length is an error", - ); - } - - #[test] - #[should_panic] - fn test_static_invalid_name_lengths() { - // Safety: ONE_TOO_LONG contains only the UTF-8 safe, single-byte codepoint b'a'. - let _ = HeaderName::from_static(unsafe { std::str::from_utf8_unchecked(ONE_TOO_LONG) }); - } - - #[test] - fn test_from_hdr_name() { - use self::StandardHeader::Vary; - - let name = HeaderName::from(HdrName { - inner: Repr::Standard(Vary), - }); - - assert_eq!(name.inner, Repr::Standard(Vary)); - - let name = HeaderName::from(HdrName { - inner: Repr::Custom(MaybeLower { - buf: b"hello-world", - lower: true, - }), - }); - - assert_eq!(name.inner, Repr::Custom(Custom(ByteStr::from_static("hello-world")))); - - let name = HeaderName::from(HdrName { - inner: Repr::Custom(MaybeLower { - buf: b"Hello-World", - lower: false, - }), - }); - - assert_eq!(name.inner, Repr::Custom(Custom(ByteStr::from_static("hello-world")))); - } - - #[test] - fn test_eq_hdr_name() { - use self::StandardHeader::Vary; - - let a = HeaderName { inner: Repr::Standard(Vary) }; - let b = HdrName { inner: Repr::Standard(Vary) }; - - assert_eq!(a, b); - - let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("vaary"))) }; - assert_ne!(a, b); - - let b = HdrName { inner: Repr::Custom(MaybeLower { - buf: b"vaary", - lower: true, - })}; - - assert_eq!(a, b); - - let b = HdrName { inner: Repr::Custom(MaybeLower { - buf: b"vaary", - lower: false, - })}; - - assert_eq!(a, b); - - let b = HdrName { inner: Repr::Custom(MaybeLower { - buf: b"VAARY", - lower: false, - })}; - - assert_eq!(a, b); - - let a = HeaderName { inner: Repr::Standard(Vary) }; - assert_ne!(a, b); - } - - #[test] - fn test_from_static_std() { - let a = HeaderName { inner: Repr::Standard(Vary) }; - - let b = HeaderName::from_static("vary"); - assert_eq!(a, b); - - let b = HeaderName::from_static("vaary"); - assert_ne!(a, b); - } - - #[test] - #[should_panic] - fn test_from_static_std_uppercase() { - HeaderName::from_static("Vary"); - } - - #[test] - #[should_panic] - fn test_from_static_std_symbol() { - HeaderName::from_static("vary{}"); - } - - // MaybeLower { lower: true } - #[test] - fn test_from_static_custom_short() { - let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("customheader"))) }; - let b = HeaderName::from_static("customheader"); - assert_eq!(a, b); - } - - #[test] - #[should_panic] - fn test_from_static_custom_short_uppercase() { - HeaderName::from_static("custom header"); - } - - #[test] - #[should_panic] - fn test_from_static_custom_short_symbol() { - HeaderName::from_static("CustomHeader"); - } - - // MaybeLower { lower: false } - #[test] - fn test_from_static_custom_long() { - let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static( - "longer-than-63--thisheaderislongerthansixtythreecharactersandthushandleddifferent" - ))) }; - let b = HeaderName::from_static( - "longer-than-63--thisheaderislongerthansixtythreecharactersandthushandleddifferent" - ); - assert_eq!(a, b); - } - - #[test] - #[should_panic] - fn test_from_static_custom_long_uppercase() { - HeaderName::from_static( - "Longer-Than-63--ThisHeaderIsLongerThanSixtyThreeCharactersAndThusHandledDifferent" - ); - } - - #[test] - #[should_panic] - fn test_from_static_custom_long_symbol() { - HeaderName::from_static( - "longer-than-63--thisheader{}{}{}{}islongerthansixtythreecharactersandthushandleddifferent" - ); - } - - #[test] - fn test_from_static_custom_single_char() { - let a = HeaderName { inner: Repr::Custom(Custom(ByteStr::from_static("a"))) }; - let b = HeaderName::from_static("a"); - assert_eq!(a, b); - } - - #[test] - #[should_panic] - fn test_from_static_empty() { - HeaderName::from_static(""); - } - - #[test] - fn test_all_tokens() { - HeaderName::from_static("!#$%&'*+-.^_`|~0123456789abcdefghijklmnopqrstuvwxyz"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/header/value.rs s390-tools-2.33.1/rust-vendor/http/src/header/value.rs --- s390-tools-2.31.0/rust-vendor/http/src/header/value.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/header/value.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,795 +0,0 @@ -use bytes::{Bytes, BytesMut}; - -use std::convert::TryFrom; -use std::error::Error; -use std::fmt::Write; -use std::str::FromStr; -use std::{cmp, fmt, mem, str}; - -use crate::header::name::HeaderName; - -/// Represents an HTTP header field value. -/// -/// In practice, HTTP header field values are usually valid ASCII. However, the -/// HTTP spec allows for a header value to contain opaque bytes as well. In this -/// case, the header field value is not able to be represented as a string. -/// -/// To handle this, the `HeaderValue` is useable as a type and can be compared -/// with strings and implements `Debug`. A `to_str` fn is provided that returns -/// an `Err` if the header value contains non visible ascii characters. -#[derive(Clone, Hash)] -pub struct HeaderValue { - inner: Bytes, - is_sensitive: bool, -} - -/// A possible error when converting a `HeaderValue` from a string or byte -/// slice. -pub struct InvalidHeaderValue { - _priv: (), -} - -/// A possible error when converting a `HeaderValue` to a string representation. -/// -/// Header field values may contain opaque bytes, in which case it is not -/// possible to represent the value as a string. -#[derive(Debug)] -pub struct ToStrError { - _priv: (), -} - -impl HeaderValue { - /// Convert a static string to a `HeaderValue`. - /// - /// This function will not perform any copying, however the string is - /// checked to ensure that no invalid characters are present. Only visible - /// ASCII characters (32-127) are permitted. - /// - /// # Panics - /// - /// This function panics if the argument contains invalid header value - /// characters. - /// - /// Until [Allow panicking in constants](https://github.com/rust-lang/rfcs/pull/2345) - /// makes its way into stable, the panic message at compile-time is - /// going to look cryptic, but should at least point at your header value: - /// - /// ```text - /// error: any use of this value will cause an error - /// --> http/src/header/value.rs:67:17 - /// | - /// 67 | ([] as [u8; 0])[0]; // Invalid header value - /// | ^^^^^^^^^^^^^^^^^^ - /// | | - /// | index out of bounds: the length is 0 but the index is 0 - /// | inside `HeaderValue::from_static` at http/src/header/value.rs:67:17 - /// | inside `INVALID_HEADER` at src/main.rs:73:33 - /// | - /// ::: src/main.rs:73:1 - /// | - /// 73 | const INVALID_HEADER: HeaderValue = HeaderValue::from_static("жsome value"); - /// | ---------------------------------------------------------------------------- - /// ``` - /// - /// # Examples - /// - /// ``` - /// # use http::header::HeaderValue; - /// let val = HeaderValue::from_static("hello"); - /// assert_eq!(val, "hello"); - /// ``` - #[inline] - #[allow(unconditional_panic)] // required for the panic circumvention - pub const fn from_static(src: &'static str) -> HeaderValue { - let bytes = src.as_bytes(); - let mut i = 0; - while i < bytes.len() { - if !is_visible_ascii(bytes[i]) { - ([] as [u8; 0])[0]; // Invalid header value - } - i += 1; - } - - HeaderValue { - inner: Bytes::from_static(bytes), - is_sensitive: false, - } - } - - /// Attempt to convert a string to a `HeaderValue`. - /// - /// If the argument contains invalid header value characters, an error is - /// returned. Only visible ASCII characters (32-127) are permitted. Use - /// `from_bytes` to create a `HeaderValue` that includes opaque octets - /// (128-255). - /// - /// This function is intended to be replaced in the future by a `TryFrom` - /// implementation once the trait is stabilized in std. - /// - /// # Examples - /// - /// ``` - /// # use http::header::HeaderValue; - /// let val = HeaderValue::from_str("hello").unwrap(); - /// assert_eq!(val, "hello"); - /// ``` - /// - /// An invalid value - /// - /// ``` - /// # use http::header::HeaderValue; - /// let val = HeaderValue::from_str("\n"); - /// assert!(val.is_err()); - /// ``` - #[inline] - pub fn from_str(src: &str) -> Result { - HeaderValue::try_from_generic(src, |s| Bytes::copy_from_slice(s.as_bytes())) - } - - /// Converts a HeaderName into a HeaderValue - /// - /// Since every valid HeaderName is a valid HeaderValue this is done infallibly. - /// - /// # Examples - /// - /// ``` - /// # use http::header::{HeaderValue, HeaderName}; - /// # use http::header::ACCEPT; - /// let val = HeaderValue::from_name(ACCEPT); - /// assert_eq!(val, HeaderValue::from_bytes(b"accept").unwrap()); - /// ``` - #[inline] - pub fn from_name(name: HeaderName) -> HeaderValue { - name.into() - } - - /// Attempt to convert a byte slice to a `HeaderValue`. - /// - /// If the argument contains invalid header value bytes, an error is - /// returned. Only byte values between 32 and 255 (inclusive) are permitted, - /// excluding byte 127 (DEL). - /// - /// This function is intended to be replaced in the future by a `TryFrom` - /// implementation once the trait is stabilized in std. - /// - /// # Examples - /// - /// ``` - /// # use http::header::HeaderValue; - /// let val = HeaderValue::from_bytes(b"hello\xfa").unwrap(); - /// assert_eq!(val, &b"hello\xfa"[..]); - /// ``` - /// - /// An invalid value - /// - /// ``` - /// # use http::header::HeaderValue; - /// let val = HeaderValue::from_bytes(b"\n"); - /// assert!(val.is_err()); - /// ``` - #[inline] - pub fn from_bytes(src: &[u8]) -> Result { - HeaderValue::try_from_generic(src, Bytes::copy_from_slice) - } - - /// Attempt to convert a `Bytes` buffer to a `HeaderValue`. - /// - /// This will try to prevent a copy if the type passed is the type used - /// internally, and will copy the data if it is not. - pub fn from_maybe_shared(src: T) -> Result - where - T: AsRef<[u8]> + 'static, - { - if_downcast_into!(T, Bytes, src, { - return HeaderValue::from_shared(src); - }); - - HeaderValue::from_bytes(src.as_ref()) - } - - /// Convert a `Bytes` directly into a `HeaderValue` without validating. - /// - /// This function does NOT validate that illegal bytes are not contained - /// within the buffer. - pub unsafe fn from_maybe_shared_unchecked(src: T) -> HeaderValue - where - T: AsRef<[u8]> + 'static, - { - if cfg!(debug_assertions) { - match HeaderValue::from_maybe_shared(src) { - Ok(val) => val, - Err(_err) => { - panic!("HeaderValue::from_maybe_shared_unchecked() with invalid bytes"); - } - } - } else { - - if_downcast_into!(T, Bytes, src, { - return HeaderValue { - inner: src, - is_sensitive: false, - }; - }); - - let src = Bytes::copy_from_slice(src.as_ref()); - HeaderValue { - inner: src, - is_sensitive: false, - } - } - } - - fn from_shared(src: Bytes) -> Result { - HeaderValue::try_from_generic(src, std::convert::identity) - } - - fn try_from_generic, F: FnOnce(T) -> Bytes>(src: T, into: F) -> Result { - for &b in src.as_ref() { - if !is_valid(b) { - return Err(InvalidHeaderValue { _priv: () }); - } - } - Ok(HeaderValue { - inner: into(src), - is_sensitive: false, - }) - } - - /// Yields a `&str` slice if the `HeaderValue` only contains visible ASCII - /// chars. - /// - /// This function will perform a scan of the header value, checking all the - /// characters. - /// - /// # Examples - /// - /// ``` - /// # use http::header::HeaderValue; - /// let val = HeaderValue::from_static("hello"); - /// assert_eq!(val.to_str().unwrap(), "hello"); - /// ``` - pub fn to_str(&self) -> Result<&str, ToStrError> { - let bytes = self.as_ref(); - - for &b in bytes { - if !is_visible_ascii(b) { - return Err(ToStrError { _priv: () }); - } - } - - unsafe { Ok(str::from_utf8_unchecked(bytes)) } - } - - /// Returns the length of `self`. - /// - /// This length is in bytes. - /// - /// # Examples - /// - /// ``` - /// # use http::header::HeaderValue; - /// let val = HeaderValue::from_static("hello"); - /// assert_eq!(val.len(), 5); - /// ``` - #[inline] - pub fn len(&self) -> usize { - self.as_ref().len() - } - - /// Returns true if the `HeaderValue` has a length of zero bytes. - /// - /// # Examples - /// - /// ``` - /// # use http::header::HeaderValue; - /// let val = HeaderValue::from_static(""); - /// assert!(val.is_empty()); - /// - /// let val = HeaderValue::from_static("hello"); - /// assert!(!val.is_empty()); - /// ``` - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Converts a `HeaderValue` to a byte slice. - /// - /// # Examples - /// - /// ``` - /// # use http::header::HeaderValue; - /// let val = HeaderValue::from_static("hello"); - /// assert_eq!(val.as_bytes(), b"hello"); - /// ``` - #[inline] - pub fn as_bytes(&self) -> &[u8] { - self.as_ref() - } - - /// Mark that the header value represents sensitive information. - /// - /// # Examples - /// - /// ``` - /// # use http::header::HeaderValue; - /// let mut val = HeaderValue::from_static("my secret"); - /// - /// val.set_sensitive(true); - /// assert!(val.is_sensitive()); - /// - /// val.set_sensitive(false); - /// assert!(!val.is_sensitive()); - /// ``` - #[inline] - pub fn set_sensitive(&mut self, val: bool) { - self.is_sensitive = val; - } - - /// Returns `true` if the value represents sensitive data. - /// - /// Sensitive data could represent passwords or other data that should not - /// be stored on disk or in memory. By marking header values as sensitive, - /// components using this crate can be instructed to treat them with special - /// care for security reasons. For example, caches can avoid storing - /// sensitive values, and HPACK encoders used by HTTP/2.0 implementations - /// can choose not to compress them. - /// - /// Additionally, sensitive values will be masked by the `Debug` - /// implementation of `HeaderValue`. - /// - /// Note that sensitivity is not factored into equality or ordering. - /// - /// # Examples - /// - /// ``` - /// # use http::header::HeaderValue; - /// let mut val = HeaderValue::from_static("my secret"); - /// - /// val.set_sensitive(true); - /// assert!(val.is_sensitive()); - /// - /// val.set_sensitive(false); - /// assert!(!val.is_sensitive()); - /// ``` - #[inline] - pub fn is_sensitive(&self) -> bool { - self.is_sensitive - } -} - -impl AsRef<[u8]> for HeaderValue { - #[inline] - fn as_ref(&self) -> &[u8] { - self.inner.as_ref() - } -} - -impl fmt::Debug for HeaderValue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.is_sensitive { - f.write_str("Sensitive") - } else { - f.write_str("\"")?; - let mut from = 0; - let bytes = self.as_bytes(); - for (i, &b) in bytes.iter().enumerate() { - if !is_visible_ascii(b) || b == b'"' { - if from != i { - f.write_str(unsafe { str::from_utf8_unchecked(&bytes[from..i]) })?; - } - if b == b'"' { - f.write_str("\\\"")?; - } else { - write!(f, "\\x{:x}", b)?; - } - from = i + 1; - } - } - - f.write_str(unsafe { str::from_utf8_unchecked(&bytes[from..]) })?; - f.write_str("\"") - } - } -} - -impl From for HeaderValue { - #[inline] - fn from(h: HeaderName) -> HeaderValue { - HeaderValue { - inner: h.into_bytes(), - is_sensitive: false, - } - } -} - -macro_rules! from_integers { - ($($name:ident: $t:ident => $max_len:expr),*) => {$( - impl From<$t> for HeaderValue { - fn from(num: $t) -> HeaderValue { - let mut buf = if mem::size_of::() - 1 < $max_len { - // On 32bit platforms, BytesMut max inline size - // is 15 bytes, but the $max_len could be bigger. - // - // The likelihood of the number *actually* being - // that big is very small, so only allocate - // if the number needs that space. - // - // The largest decimal number in 15 digits: - // It wold be 10.pow(15) - 1, but this is a constant - // version. - if num as u64 > 999_999_999_999_999_999 { - BytesMut::with_capacity($max_len) - } else { - // fits inline... - BytesMut::new() - } - } else { - // full value fits inline, so don't allocate! - BytesMut::new() - }; - let _ = buf.write_str(::itoa::Buffer::new().format(num)); - HeaderValue { - inner: buf.freeze(), - is_sensitive: false, - } - } - } - - #[test] - fn $name() { - let n: $t = 55; - let val = HeaderValue::from(n); - assert_eq!(val, &n.to_string()); - - let n = ::std::$t::MAX; - let val = HeaderValue::from(n); - assert_eq!(val, &n.to_string()); - } - )*}; -} - -from_integers! { - // integer type => maximum decimal length - - // u8 purposely left off... HeaderValue::from(b'3') could be confusing - from_u16: u16 => 5, - from_i16: i16 => 6, - from_u32: u32 => 10, - from_i32: i32 => 11, - from_u64: u64 => 20, - from_i64: i64 => 20 -} - -#[cfg(target_pointer_width = "16")] -from_integers! { - from_usize: usize => 5, - from_isize: isize => 6 -} - -#[cfg(target_pointer_width = "32")] -from_integers! { - from_usize: usize => 10, - from_isize: isize => 11 -} - -#[cfg(target_pointer_width = "64")] -from_integers! { - from_usize: usize => 20, - from_isize: isize => 20 -} - -#[cfg(test)] -mod from_header_name_tests { - use super::*; - use crate::header::map::HeaderMap; - use crate::header::name; - - #[test] - fn it_can_insert_header_name_as_header_value() { - let mut map = HeaderMap::new(); - map.insert(name::UPGRADE, name::SEC_WEBSOCKET_PROTOCOL.into()); - map.insert( - name::ACCEPT, - name::HeaderName::from_bytes(b"hello-world").unwrap().into(), - ); - - assert_eq!( - map.get(name::UPGRADE).unwrap(), - HeaderValue::from_bytes(b"sec-websocket-protocol").unwrap() - ); - - assert_eq!( - map.get(name::ACCEPT).unwrap(), - HeaderValue::from_bytes(b"hello-world").unwrap() - ); - } -} - -impl FromStr for HeaderValue { - type Err = InvalidHeaderValue; - - #[inline] - fn from_str(s: &str) -> Result { - HeaderValue::from_str(s) - } -} - -impl<'a> From<&'a HeaderValue> for HeaderValue { - #[inline] - fn from(t: &'a HeaderValue) -> Self { - t.clone() - } -} - -impl<'a> TryFrom<&'a str> for HeaderValue { - type Error = InvalidHeaderValue; - - #[inline] - fn try_from(t: &'a str) -> Result { - t.parse() - } -} - -impl<'a> TryFrom<&'a String> for HeaderValue { - type Error = InvalidHeaderValue; - #[inline] - fn try_from(s: &'a String) -> Result { - Self::from_bytes(s.as_bytes()) - } -} - -impl<'a> TryFrom<&'a [u8]> for HeaderValue { - type Error = InvalidHeaderValue; - - #[inline] - fn try_from(t: &'a [u8]) -> Result { - HeaderValue::from_bytes(t) - } -} - -impl TryFrom for HeaderValue { - type Error = InvalidHeaderValue; - - #[inline] - fn try_from(t: String) -> Result { - HeaderValue::from_shared(t.into()) - } -} - -impl TryFrom> for HeaderValue { - type Error = InvalidHeaderValue; - - #[inline] - fn try_from(vec: Vec) -> Result { - HeaderValue::from_shared(vec.into()) - } -} - -#[cfg(test)] -mod try_from_header_name_tests { - use super::*; - use crate::header::name; - - #[test] - fn it_converts_using_try_from() { - assert_eq!( - HeaderValue::try_from(name::UPGRADE).unwrap(), - HeaderValue::from_bytes(b"upgrade").unwrap() - ); - } -} - -const fn is_visible_ascii(b: u8) -> bool { - b >= 32 && b < 127 || b == b'\t' -} - -#[inline] -fn is_valid(b: u8) -> bool { - b >= 32 && b != 127 || b == b'\t' -} - -impl fmt::Debug for InvalidHeaderValue { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("InvalidHeaderValue") - // skip _priv noise - .finish() - } -} - -impl fmt::Display for InvalidHeaderValue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("failed to parse header value") - } -} - -impl Error for InvalidHeaderValue {} - -impl fmt::Display for ToStrError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("failed to convert header to a str") - } -} - -impl Error for ToStrError {} - -// ===== PartialEq / PartialOrd ===== - -impl PartialEq for HeaderValue { - #[inline] - fn eq(&self, other: &HeaderValue) -> bool { - self.inner == other.inner - } -} - -impl Eq for HeaderValue {} - -impl PartialOrd for HeaderValue { - #[inline] - fn partial_cmp(&self, other: &HeaderValue) -> Option { - self.inner.partial_cmp(&other.inner) - } -} - -impl Ord for HeaderValue { - #[inline] - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.inner.cmp(&other.inner) - } -} - -impl PartialEq for HeaderValue { - #[inline] - fn eq(&self, other: &str) -> bool { - self.inner == other.as_bytes() - } -} - -impl PartialEq<[u8]> for HeaderValue { - #[inline] - fn eq(&self, other: &[u8]) -> bool { - self.inner == other - } -} - -impl PartialOrd for HeaderValue { - #[inline] - fn partial_cmp(&self, other: &str) -> Option { - (*self.inner).partial_cmp(other.as_bytes()) - } -} - -impl PartialOrd<[u8]> for HeaderValue { - #[inline] - fn partial_cmp(&self, other: &[u8]) -> Option { - (*self.inner).partial_cmp(other) - } -} - -impl PartialEq for str { - #[inline] - fn eq(&self, other: &HeaderValue) -> bool { - *other == *self - } -} - -impl PartialEq for [u8] { - #[inline] - fn eq(&self, other: &HeaderValue) -> bool { - *other == *self - } -} - -impl PartialOrd for str { - #[inline] - fn partial_cmp(&self, other: &HeaderValue) -> Option { - self.as_bytes().partial_cmp(other.as_bytes()) - } -} - -impl PartialOrd for [u8] { - #[inline] - fn partial_cmp(&self, other: &HeaderValue) -> Option { - self.partial_cmp(other.as_bytes()) - } -} - -impl PartialEq for HeaderValue { - #[inline] - fn eq(&self, other: &String) -> bool { - *self == &other[..] - } -} - -impl PartialOrd for HeaderValue { - #[inline] - fn partial_cmp(&self, other: &String) -> Option { - self.inner.partial_cmp(other.as_bytes()) - } -} - -impl PartialEq for String { - #[inline] - fn eq(&self, other: &HeaderValue) -> bool { - *other == *self - } -} - -impl PartialOrd for String { - #[inline] - fn partial_cmp(&self, other: &HeaderValue) -> Option { - self.as_bytes().partial_cmp(other.as_bytes()) - } -} - -impl<'a> PartialEq for &'a HeaderValue { - #[inline] - fn eq(&self, other: &HeaderValue) -> bool { - **self == *other - } -} - -impl<'a> PartialOrd for &'a HeaderValue { - #[inline] - fn partial_cmp(&self, other: &HeaderValue) -> Option { - (**self).partial_cmp(other) - } -} - -impl<'a, T: ?Sized> PartialEq<&'a T> for HeaderValue -where - HeaderValue: PartialEq, -{ - #[inline] - fn eq(&self, other: &&'a T) -> bool { - *self == **other - } -} - -impl<'a, T: ?Sized> PartialOrd<&'a T> for HeaderValue -where - HeaderValue: PartialOrd, -{ - #[inline] - fn partial_cmp(&self, other: &&'a T) -> Option { - self.partial_cmp(*other) - } -} - -impl<'a> PartialEq for &'a str { - #[inline] - fn eq(&self, other: &HeaderValue) -> bool { - *other == *self - } -} - -impl<'a> PartialOrd for &'a str { - #[inline] - fn partial_cmp(&self, other: &HeaderValue) -> Option { - self.as_bytes().partial_cmp(other.as_bytes()) - } -} - -#[test] -fn test_try_from() { - HeaderValue::try_from(vec![127]).unwrap_err(); -} - -#[test] -fn test_debug() { - let cases = &[ - ("hello", "\"hello\""), - ("hello \"world\"", "\"hello \\\"world\\\"\""), - ("\u{7FFF}hello", "\"\\xe7\\xbf\\xbfhello\""), - ]; - - for &(value, expected) in cases { - let val = HeaderValue::from_bytes(value.as_bytes()).unwrap(); - let actual = format!("{:?}", val); - assert_eq!(expected, actual); - } - - let mut sensitive = HeaderValue::from_static("password"); - sensitive.set_sensitive(true); - assert_eq!("Sensitive", format!("{:?}", sensitive)); -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/lib.rs s390-tools-2.33.1/rust-vendor/http/src/lib.rs --- s390-tools-2.31.0/rust-vendor/http/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,211 +0,0 @@ -#![doc(html_root_url = "https://docs.rs/http/0.2.11")] - -//! A general purpose library of common HTTP types -//! -//! This crate is a general purpose library for common types found when working -//! with the HTTP protocol. You'll find `Request` and `Response` types for -//! working as either a client or a server as well as all of their components. -//! Notably you'll find `Uri` for what a `Request` is requesting, a `Method` -//! for how it's being requested, a `StatusCode` for what sort of response came -//! back, a `Version` for how this was communicated, and -//! `HeaderName`/`HeaderValue` definitions to get grouped in a `HeaderMap` to -//! work with request/response headers. -//! -//! You will notably *not* find an implementation of sending requests or -//! spinning up a server in this crate. It's intended that this crate is the -//! "standard library" for HTTP clients and servers without dictating any -//! particular implementation. Note that this crate is still early on in its -//! lifecycle so the support libraries that integrate with the `http` crate are -//! a work in progress! Stay tuned and we'll be sure to highlight crates here -//! in the future. -//! -//! ## Requests and Responses -//! -//! Perhaps the main two types in this crate are the `Request` and `Response` -//! types. A `Request` could either be constructed to get sent off as a client -//! or it can also be received to generate a `Response` for a server. Similarly -//! as a client a `Response` is what you get after sending a `Request`, whereas -//! on a server you'll be manufacturing a `Response` to send back to the client. -//! -//! Each type has a number of accessors for the component fields. For as a -//! server you might want to inspect a requests URI to dispatch it: -//! -//! ``` -//! use http::{Request, Response}; -//! -//! fn response(req: Request<()>) -> http::Result> { -//! match req.uri().path() { -//! "/" => index(req), -//! "/foo" => foo(req), -//! "/bar" => bar(req), -//! _ => not_found(req), -//! } -//! } -//! # fn index(_req: Request<()>) -> http::Result> { panic!() } -//! # fn foo(_req: Request<()>) -> http::Result> { panic!() } -//! # fn bar(_req: Request<()>) -> http::Result> { panic!() } -//! # fn not_found(_req: Request<()>) -> http::Result> { panic!() } -//! ``` -//! -//! On a `Request` you'll also find accessors like `method` to return a -//! `Method` and `headers` to inspect the various headers. A `Response` -//! has similar methods for headers, the status code, etc. -//! -//! In addition to getters, request/response types also have mutable accessors -//! to edit the request/response: -//! -//! ``` -//! use http::{HeaderValue, Response, StatusCode}; -//! use http::header::CONTENT_TYPE; -//! -//! fn add_server_headers(response: &mut Response) { -//! response.headers_mut() -//! .insert(CONTENT_TYPE, HeaderValue::from_static("text/html")); -//! *response.status_mut() = StatusCode::OK; -//! } -//! ``` -//! -//! And finally, one of the most important aspects of requests/responses, the -//! body! The `Request` and `Response` types in this crate are *generic* in -//! what their body is. This allows downstream libraries to use different -//! representations such as `Request>`, `Response`, -//! `Request, Error = _>>`, or even -//! `Response` where the custom type was deserialized from JSON. -//! -//! The body representation is intentionally flexible to give downstream -//! libraries maximal flexibility in implementing the body as appropriate. -//! -//! ## HTTP Headers -//! -//! Another major piece of functionality in this library is HTTP header -//! interpretation and generation. The `HeaderName` type serves as a way to -//! define header *names*, or what's to the left of the colon. A `HeaderValue` -//! conversely is the header *value*, or what's to the right of a colon. -//! -//! For example, if you have an HTTP request that looks like: -//! -//! ```http -//! GET /foo HTTP/1.1 -//! Accept: text/html -//! ``` -//! -//! Then `"Accept"` is a `HeaderName` while `"text/html"` is a `HeaderValue`. -//! Each of these is a dedicated type to allow for a number of interesting -//! optimizations and to also encode the static guarantees of each type. For -//! example a `HeaderName` is always a valid `&str`, but a `HeaderValue` may -//! not be valid UTF-8. -//! -//! The most common header names are already defined for you as constant values -//! in the `header` module of this crate. For example: -//! -//! ``` -//! use http::header::{self, HeaderName}; -//! -//! let name: HeaderName = header::ACCEPT; -//! assert_eq!(name.as_str(), "accept"); -//! ``` -//! -//! You can, however, also parse header names from strings: -//! -//! ``` -//! use http::header::{self, HeaderName}; -//! -//! let name = "Accept".parse::().unwrap(); -//! assert_eq!(name, header::ACCEPT); -//! ``` -//! -//! Header values can be created from string literals through the `from_static` -//! function: -//! -//! ``` -//! use http::HeaderValue; -//! -//! let value = HeaderValue::from_static("text/html"); -//! assert_eq!(value.as_bytes(), b"text/html"); -//! ``` -//! -//! And header values can also be parsed like names: -//! -//! ``` -//! use http::HeaderValue; -//! -//! let value = "text/html"; -//! let value = value.parse::().unwrap(); -//! ``` -//! -//! Most HTTP requests and responses tend to come with more than one header, so -//! it's not too useful to just work with names and values only! This crate also -//! provides a `HeaderMap` type which is a specialized hash map for keys as -//! `HeaderName` and generic values. This type, like header names, is optimized -//! for common usage but should continue to scale with your needs over time. -//! -//! # URIs -//! -//! Each HTTP `Request` has an associated URI with it. This may just be a path -//! like `/index.html` but it could also be an absolute URL such as -//! `https://www.rust-lang.org/index.html`. A `URI` has a number of accessors to -//! interpret it: -//! -//! ``` -//! use http::Uri; -//! use http::uri::Scheme; -//! -//! let uri = "https://www.rust-lang.org/index.html".parse::().unwrap(); -//! -//! assert_eq!(uri.scheme(), Some(&Scheme::HTTPS)); -//! assert_eq!(uri.host(), Some("www.rust-lang.org")); -//! assert_eq!(uri.path(), "/index.html"); -//! assert_eq!(uri.query(), None); -//! ``` - -#![deny(warnings, missing_docs, missing_debug_implementations)] - -#[cfg(test)] -#[macro_use] -extern crate doc_comment; - -#[cfg(test)] -doctest!("../README.md"); - -#[macro_use] -mod convert; - -pub mod header; -pub mod method; -pub mod request; -pub mod response; -pub mod status; -pub mod uri; -pub mod version; - -mod byte_str; -mod error; -mod extensions; - -pub use crate::error::{Error, Result}; -pub use crate::extensions::Extensions; -#[doc(no_inline)] -pub use crate::header::{HeaderMap, HeaderName, HeaderValue}; -pub use crate::method::Method; -pub use crate::request::Request; -pub use crate::response::Response; -pub use crate::status::StatusCode; -pub use crate::uri::Uri; -pub use crate::version::Version; - -fn _assert_types() { - fn assert_send() {} - fn assert_sync() {} - - assert_send::>(); - assert_send::>(); - - assert_sync::>(); - assert_sync::>(); -} - -mod sealed { - /// Private trait to this crate to prevent traits from being implemented in - /// downstream crates. - pub trait Sealed {} -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/method.rs s390-tools-2.33.1/rust-vendor/http/src/method.rs --- s390-tools-2.31.0/rust-vendor/http/src/method.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/method.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,473 +0,0 @@ -//! The HTTP request method -//! -//! This module contains HTTP-method related structs and errors and such. The -//! main type of this module, `Method`, is also reexported at the root of the -//! crate as `http::Method` and is intended for import through that location -//! primarily. -//! -//! # Examples -//! -//! ``` -//! use http::Method; -//! -//! assert_eq!(Method::GET, Method::from_bytes(b"GET").unwrap()); -//! assert!(Method::GET.is_idempotent()); -//! assert_eq!(Method::POST.as_str(), "POST"); -//! ``` - -use self::Inner::*; -use self::extension::{InlineExtension, AllocatedExtension}; - -use std::convert::AsRef; -use std::error::Error; -use std::str::FromStr; -use std::convert::TryFrom; -use std::{fmt, str}; - -/// The Request Method (VERB) -/// -/// This type also contains constants for a number of common HTTP methods such -/// as GET, POST, etc. -/// -/// Currently includes 8 variants representing the 8 methods defined in -/// [RFC 7230](https://tools.ietf.org/html/rfc7231#section-4.1), plus PATCH, -/// and an Extension variant for all extensions. -/// -/// # Examples -/// -/// ``` -/// use http::Method; -/// -/// assert_eq!(Method::GET, Method::from_bytes(b"GET").unwrap()); -/// assert!(Method::GET.is_idempotent()); -/// assert_eq!(Method::POST.as_str(), "POST"); -/// ``` -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct Method(Inner); - -/// A possible error value when converting `Method` from bytes. -pub struct InvalidMethod { - _priv: (), -} - -#[derive(Clone, PartialEq, Eq, Hash)] -enum Inner { - Options, - Get, - Post, - Put, - Delete, - Head, - Trace, - Connect, - Patch, - // If the extension is short enough, store it inline - ExtensionInline(InlineExtension), - // Otherwise, allocate it - ExtensionAllocated(AllocatedExtension), -} - - -impl Method { - /// GET - pub const GET: Method = Method(Get); - - /// POST - pub const POST: Method = Method(Post); - - /// PUT - pub const PUT: Method = Method(Put); - - /// DELETE - pub const DELETE: Method = Method(Delete); - - /// HEAD - pub const HEAD: Method = Method(Head); - - /// OPTIONS - pub const OPTIONS: Method = Method(Options); - - /// CONNECT - pub const CONNECT: Method = Method(Connect); - - /// PATCH - pub const PATCH: Method = Method(Patch); - - /// TRACE - pub const TRACE: Method = Method(Trace); - - /// Converts a slice of bytes to an HTTP method. - pub fn from_bytes(src: &[u8]) -> Result { - match src.len() { - 0 => Err(InvalidMethod::new()), - 3 => match src { - b"GET" => Ok(Method(Get)), - b"PUT" => Ok(Method(Put)), - _ => Method::extension_inline(src), - }, - 4 => match src { - b"POST" => Ok(Method(Post)), - b"HEAD" => Ok(Method(Head)), - _ => Method::extension_inline(src), - }, - 5 => match src { - b"PATCH" => Ok(Method(Patch)), - b"TRACE" => Ok(Method(Trace)), - _ => Method::extension_inline(src), - }, - 6 => match src { - b"DELETE" => Ok(Method(Delete)), - _ => Method::extension_inline(src), - }, - 7 => match src { - b"OPTIONS" => Ok(Method(Options)), - b"CONNECT" => Ok(Method(Connect)), - _ => Method::extension_inline(src), - }, - _ => { - if src.len() < InlineExtension::MAX { - Method::extension_inline(src) - } else { - let allocated = AllocatedExtension::new(src)?; - - Ok(Method(ExtensionAllocated(allocated))) - } - } - } - } - - fn extension_inline(src: &[u8]) -> Result { - let inline = InlineExtension::new(src)?; - - Ok(Method(ExtensionInline(inline))) - } - - /// Whether a method is considered "safe", meaning the request is - /// essentially read-only. - /// - /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.1) - /// for more words. - pub fn is_safe(&self) -> bool { - match self.0 { - Get | Head | Options | Trace => true, - _ => false, - } - } - - /// Whether a method is considered "idempotent", meaning the request has - /// the same result if executed multiple times. - /// - /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.2) for - /// more words. - pub fn is_idempotent(&self) -> bool { - match self.0 { - Put | Delete => true, - _ => self.is_safe(), - } - } - - /// Return a &str representation of the HTTP method - #[inline] - pub fn as_str(&self) -> &str { - match self.0 { - Options => "OPTIONS", - Get => "GET", - Post => "POST", - Put => "PUT", - Delete => "DELETE", - Head => "HEAD", - Trace => "TRACE", - Connect => "CONNECT", - Patch => "PATCH", - ExtensionInline(ref inline) => inline.as_str(), - ExtensionAllocated(ref allocated) => allocated.as_str(), - } - } -} - -impl AsRef for Method { - #[inline] - fn as_ref(&self) -> &str { - self.as_str() - } -} - -impl<'a> PartialEq<&'a Method> for Method { - #[inline] - fn eq(&self, other: &&'a Method) -> bool { - self == *other - } -} - -impl<'a> PartialEq for &'a Method { - #[inline] - fn eq(&self, other: &Method) -> bool { - *self == other - } -} - -impl PartialEq for Method { - #[inline] - fn eq(&self, other: &str) -> bool { - self.as_ref() == other - } -} - -impl PartialEq for str { - #[inline] - fn eq(&self, other: &Method) -> bool { - self == other.as_ref() - } -} - -impl<'a> PartialEq<&'a str> for Method { - #[inline] - fn eq(&self, other: &&'a str) -> bool { - self.as_ref() == *other - } -} - -impl<'a> PartialEq for &'a str { - #[inline] - fn eq(&self, other: &Method) -> bool { - *self == other.as_ref() - } -} - -impl fmt::Debug for Method { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.as_ref()) - } -} - -impl fmt::Display for Method { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.write_str(self.as_ref()) - } -} - -impl Default for Method { - #[inline] - fn default() -> Method { - Method::GET - } -} - -impl<'a> From<&'a Method> for Method { - #[inline] - fn from(t: &'a Method) -> Self { - t.clone() - } -} - -impl<'a> TryFrom<&'a [u8]> for Method { - type Error = InvalidMethod; - - #[inline] - fn try_from(t: &'a [u8]) -> Result { - Method::from_bytes(t) - } -} - -impl<'a> TryFrom<&'a str> for Method { - type Error = InvalidMethod; - - #[inline] - fn try_from(t: &'a str) -> Result { - TryFrom::try_from(t.as_bytes()) - } -} - -impl FromStr for Method { - type Err = InvalidMethod; - - #[inline] - fn from_str(t: &str) -> Result { - TryFrom::try_from(t) - } -} - -impl InvalidMethod { - fn new() -> InvalidMethod { - InvalidMethod { _priv: () } - } -} - -impl fmt::Debug for InvalidMethod { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("InvalidMethod") - // skip _priv noise - .finish() - } -} - -impl fmt::Display for InvalidMethod { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("invalid HTTP method") - } -} - -impl Error for InvalidMethod {} - -mod extension { - use super::InvalidMethod; - use std::str; - - #[derive(Clone, PartialEq, Eq, Hash)] - // Invariant: the first self.1 bytes of self.0 are valid UTF-8. - pub struct InlineExtension([u8; InlineExtension::MAX], u8); - - #[derive(Clone, PartialEq, Eq, Hash)] - // Invariant: self.0 contains valid UTF-8. - pub struct AllocatedExtension(Box<[u8]>); - - impl InlineExtension { - // Method::from_bytes() assumes this is at least 7 - pub const MAX: usize = 15; - - pub fn new(src: &[u8]) -> Result { - let mut data: [u8; InlineExtension::MAX] = Default::default(); - - write_checked(src, &mut data)?; - - // Invariant: write_checked ensures that the first src.len() bytes - // of data are valid UTF-8. - Ok(InlineExtension(data, src.len() as u8)) - } - - pub fn as_str(&self) -> &str { - let InlineExtension(ref data, len) = self; - // Safety: the invariant of InlineExtension ensures that the first - // len bytes of data contain valid UTF-8. - unsafe {str::from_utf8_unchecked(&data[..*len as usize])} - } - } - - impl AllocatedExtension { - pub fn new(src: &[u8]) -> Result { - let mut data: Vec = vec![0; src.len()]; - - write_checked(src, &mut data)?; - - // Invariant: data is exactly src.len() long and write_checked - // ensures that the first src.len() bytes of data are valid UTF-8. - Ok(AllocatedExtension(data.into_boxed_slice())) - } - - pub fn as_str(&self) -> &str { - // Safety: the invariant of AllocatedExtension ensures that self.0 - // contains valid UTF-8. - unsafe {str::from_utf8_unchecked(&self.0)} - } - } - - // From the HTTP spec section 5.1.1, the HTTP method is case-sensitive and can - // contain the following characters: - // - // ``` - // method = token - // token = 1*tchar - // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / - // "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA - // ``` - // - // https://www.w3.org/Protocols/HTTP/1.1/draft-ietf-http-v11-spec-01#Method - // - // Note that this definition means that any &[u8] that consists solely of valid - // characters is also valid UTF-8 because the valid method characters are a - // subset of the valid 1 byte UTF-8 encoding. - const METHOD_CHARS: [u8; 256] = [ - // 0 1 2 3 4 5 6 7 8 9 - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 1x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 2x - b'\0', b'\0', b'\0', b'!', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 3x - b'\0', b'\0', b'*', b'+', b'\0', b'-', b'.', b'\0', b'0', b'1', // 4x - b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'\0', b'\0', // 5x - b'\0', b'\0', b'\0', b'\0', b'\0', b'A', b'B', b'C', b'D', b'E', // 6x - b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', // 7x - b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', // 8x - b'Z', b'\0', b'\0', b'\0', b'^', b'_', b'`', b'a', b'b', b'c', // 9x - b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x - b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x - b'x', b'y', b'z', b'\0', b'|', b'\0', b'~', b'\0', b'\0', b'\0', // 12x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 13x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 14x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 15x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 16x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 17x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 18x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 19x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 20x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 21x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 22x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 23x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', b'\0', // 24x - b'\0', b'\0', b'\0', b'\0', b'\0', b'\0' // 25x - ]; - - // write_checked ensures (among other things) that the first src.len() bytes - // of dst are valid UTF-8 - fn write_checked(src: &[u8], dst: &mut [u8]) -> Result<(), InvalidMethod> { - for (i, &b) in src.iter().enumerate() { - let b = METHOD_CHARS[b as usize]; - - if b == 0 { - return Err(InvalidMethod::new()); - } - - dst[i] = b; - } - - Ok(()) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_method_eq() { - assert_eq!(Method::GET, Method::GET); - assert_eq!(Method::GET, "GET"); - assert_eq!(&Method::GET, "GET"); - - assert_eq!("GET", Method::GET); - assert_eq!("GET", &Method::GET); - - assert_eq!(&Method::GET, Method::GET); - assert_eq!(Method::GET, &Method::GET); - } - - #[test] - fn test_invalid_method() { - assert!(Method::from_str("").is_err()); - assert!(Method::from_bytes(b"").is_err()); - assert!(Method::from_bytes(&[0xC0]).is_err()); // invalid utf-8 - assert!(Method::from_bytes(&[0x10]).is_err()); // invalid method characters - } - - #[test] - fn test_is_idempotent() { - assert!(Method::OPTIONS.is_idempotent()); - assert!(Method::GET.is_idempotent()); - assert!(Method::PUT.is_idempotent()); - assert!(Method::DELETE.is_idempotent()); - assert!(Method::HEAD.is_idempotent()); - assert!(Method::TRACE.is_idempotent()); - - assert!(!Method::POST.is_idempotent()); - assert!(!Method::CONNECT.is_idempotent()); - assert!(!Method::PATCH.is_idempotent()); - } - - #[test] - fn test_extension_method() { - assert_eq!(Method::from_str("WOW").unwrap(), "WOW"); - assert_eq!(Method::from_str("wOw!!").unwrap(), "wOw!!"); - - let long_method = "This_is_a_very_long_method.It_is_valid_but_unlikely."; - assert_eq!(Method::from_str(&long_method).unwrap(), long_method); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/request.rs s390-tools-2.33.1/rust-vendor/http/src/request.rs --- s390-tools-2.31.0/rust-vendor/http/src/request.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/request.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1087 +0,0 @@ -//! HTTP request types. -//! -//! This module contains structs related to HTTP requests, notably the -//! `Request` type itself as well as a builder to create requests. Typically -//! you'll import the `http::Request` type rather than reaching into this -//! module itself. -//! -//! # Examples -//! -//! Creating a `Request` to send -//! -//! ```no_run -//! use http::{Request, Response}; -//! -//! let mut request = Request::builder() -//! .uri("https://www.rust-lang.org/") -//! .header("User-Agent", "my-awesome-agent/1.0"); -//! -//! if needs_awesome_header() { -//! request = request.header("Awesome", "yes"); -//! } -//! -//! let response = send(request.body(()).unwrap()); -//! -//! # fn needs_awesome_header() -> bool { -//! # true -//! # } -//! # -//! fn send(req: Request<()>) -> Response<()> { -//! // ... -//! # panic!() -//! } -//! ``` -//! -//! Inspecting a request to see what was sent. -//! -//! ``` -//! use http::{Request, Response, StatusCode}; -//! -//! fn respond_to(req: Request<()>) -> http::Result> { -//! if req.uri() != "/awesome-url" { -//! return Response::builder() -//! .status(StatusCode::NOT_FOUND) -//! .body(()) -//! } -//! -//! let has_awesome_header = req.headers().contains_key("Awesome"); -//! let body = req.body(); -//! -//! // ... -//! # panic!() -//! } -//! ``` - -use std::any::Any; -use std::convert::{TryFrom}; -use std::fmt; - -use crate::header::{HeaderMap, HeaderName, HeaderValue}; -use crate::method::Method; -use crate::version::Version; -use crate::{Extensions, Result, Uri}; - -/// Represents an HTTP request. -/// -/// An HTTP request consists of a head and a potentially optional body. The body -/// component is generic, enabling arbitrary types to represent the HTTP body. -/// For example, the body could be `Vec`, a `Stream` of byte chunks, or a -/// value that has been deserialized. -/// -/// # Examples -/// -/// Creating a `Request` to send -/// -/// ```no_run -/// use http::{Request, Response}; -/// -/// let mut request = Request::builder() -/// .uri("https://www.rust-lang.org/") -/// .header("User-Agent", "my-awesome-agent/1.0"); -/// -/// if needs_awesome_header() { -/// request = request.header("Awesome", "yes"); -/// } -/// -/// let response = send(request.body(()).unwrap()); -/// -/// # fn needs_awesome_header() -> bool { -/// # true -/// # } -/// # -/// fn send(req: Request<()>) -> Response<()> { -/// // ... -/// # panic!() -/// } -/// ``` -/// -/// Inspecting a request to see what was sent. -/// -/// ``` -/// use http::{Request, Response, StatusCode}; -/// -/// fn respond_to(req: Request<()>) -> http::Result> { -/// if req.uri() != "/awesome-url" { -/// return Response::builder() -/// .status(StatusCode::NOT_FOUND) -/// .body(()) -/// } -/// -/// let has_awesome_header = req.headers().contains_key("Awesome"); -/// let body = req.body(); -/// -/// // ... -/// # panic!() -/// } -/// ``` -/// -/// Deserialize a request of bytes via json: -/// -/// ``` -/// # extern crate serde; -/// # extern crate serde_json; -/// # extern crate http; -/// use http::Request; -/// use serde::de; -/// -/// fn deserialize(req: Request>) -> serde_json::Result> -/// where for<'de> T: de::Deserialize<'de>, -/// { -/// let (parts, body) = req.into_parts(); -/// let body = serde_json::from_slice(&body)?; -/// Ok(Request::from_parts(parts, body)) -/// } -/// # -/// # fn main() {} -/// ``` -/// -/// Or alternatively, serialize the body of a request to json -/// -/// ``` -/// # extern crate serde; -/// # extern crate serde_json; -/// # extern crate http; -/// use http::Request; -/// use serde::ser; -/// -/// fn serialize(req: Request) -> serde_json::Result>> -/// where T: ser::Serialize, -/// { -/// let (parts, body) = req.into_parts(); -/// let body = serde_json::to_vec(&body)?; -/// Ok(Request::from_parts(parts, body)) -/// } -/// # -/// # fn main() {} -/// ``` -pub struct Request { - head: Parts, - body: T, -} - -/// Component parts of an HTTP `Request` -/// -/// The HTTP request head consists of a method, uri, version, and a set of -/// header fields. -pub struct Parts { - /// The request's method - pub method: Method, - - /// The request's URI - pub uri: Uri, - - /// The request's version - pub version: Version, - - /// The request's headers - pub headers: HeaderMap, - - /// The request's extensions - pub extensions: Extensions, - - _priv: (), -} - -/// An HTTP request builder -/// -/// This type can be used to construct an instance or `Request` -/// through a builder-like pattern. -#[derive(Debug)] -pub struct Builder { - inner: Result, -} - -impl Request<()> { - /// Creates a new builder-style object to manufacture a `Request` - /// - /// This method returns an instance of `Builder` which can be used to - /// create a `Request`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let request = Request::builder() - /// .method("GET") - /// .uri("https://www.rust-lang.org/") - /// .header("X-Custom-Foo", "Bar") - /// .body(()) - /// .unwrap(); - /// ``` - #[inline] - pub fn builder() -> Builder { - Builder::new() - } - - /// Creates a new `Builder` initialized with a GET method and the given URI. - /// - /// This method returns an instance of `Builder` which can be used to - /// create a `Request`. - /// - /// # Example - /// - /// ``` - /// # use http::*; - /// - /// let request = Request::get("https://www.rust-lang.org/") - /// .body(()) - /// .unwrap(); - /// ``` - pub fn get(uri: T) -> Builder - where - Uri: TryFrom, - >::Error: Into, - - { - Builder::new().method(Method::GET).uri(uri) - } - - /// Creates a new `Builder` initialized with a PUT method and the given URI. - /// - /// This method returns an instance of `Builder` which can be used to - /// create a `Request`. - /// - /// # Example - /// - /// ``` - /// # use http::*; - /// - /// let request = Request::put("https://www.rust-lang.org/") - /// .body(()) - /// .unwrap(); - /// ``` - pub fn put(uri: T) -> Builder - where - Uri: TryFrom, - >::Error: Into, - - { - Builder::new().method(Method::PUT).uri(uri) - } - - /// Creates a new `Builder` initialized with a POST method and the given URI. - /// - /// This method returns an instance of `Builder` which can be used to - /// create a `Request`. - /// - /// # Example - /// - /// ``` - /// # use http::*; - /// - /// let request = Request::post("https://www.rust-lang.org/") - /// .body(()) - /// .unwrap(); - /// ``` - pub fn post(uri: T) -> Builder - where - Uri: TryFrom, - >::Error: Into, - - { - Builder::new().method(Method::POST).uri(uri) - } - - /// Creates a new `Builder` initialized with a DELETE method and the given URI. - /// - /// This method returns an instance of `Builder` which can be used to - /// create a `Request`. - /// - /// # Example - /// - /// ``` - /// # use http::*; - /// - /// let request = Request::delete("https://www.rust-lang.org/") - /// .body(()) - /// .unwrap(); - /// ``` - pub fn delete(uri: T) -> Builder - where - Uri: TryFrom, - >::Error: Into, - - { - Builder::new().method(Method::DELETE).uri(uri) - } - - /// Creates a new `Builder` initialized with an OPTIONS method and the given URI. - /// - /// This method returns an instance of `Builder` which can be used to - /// create a `Request`. - /// - /// # Example - /// - /// ``` - /// # use http::*; - /// - /// let request = Request::options("https://www.rust-lang.org/") - /// .body(()) - /// .unwrap(); - /// # assert_eq!(*request.method(), Method::OPTIONS); - /// ``` - pub fn options(uri: T) -> Builder - where - Uri: TryFrom, - >::Error: Into, - - { - Builder::new().method(Method::OPTIONS).uri(uri) - } - - /// Creates a new `Builder` initialized with a HEAD method and the given URI. - /// - /// This method returns an instance of `Builder` which can be used to - /// create a `Request`. - /// - /// # Example - /// - /// ``` - /// # use http::*; - /// - /// let request = Request::head("https://www.rust-lang.org/") - /// .body(()) - /// .unwrap(); - /// ``` - pub fn head(uri: T) -> Builder - where - Uri: TryFrom, - >::Error: Into, - - { - Builder::new().method(Method::HEAD).uri(uri) - } - - /// Creates a new `Builder` initialized with a CONNECT method and the given URI. - /// - /// This method returns an instance of `Builder` which can be used to - /// create a `Request`. - /// - /// # Example - /// - /// ``` - /// # use http::*; - /// - /// let request = Request::connect("https://www.rust-lang.org/") - /// .body(()) - /// .unwrap(); - /// ``` - pub fn connect(uri: T) -> Builder - where - Uri: TryFrom, - >::Error: Into, - - { - Builder::new().method(Method::CONNECT).uri(uri) - } - - /// Creates a new `Builder` initialized with a PATCH method and the given URI. - /// - /// This method returns an instance of `Builder` which can be used to - /// create a `Request`. - /// - /// # Example - /// - /// ``` - /// # use http::*; - /// - /// let request = Request::patch("https://www.rust-lang.org/") - /// .body(()) - /// .unwrap(); - /// ``` - pub fn patch(uri: T) -> Builder - where - Uri: TryFrom, - >::Error: Into, - { - Builder::new().method(Method::PATCH).uri(uri) - } - - /// Creates a new `Builder` initialized with a TRACE method and the given URI. - /// - /// This method returns an instance of `Builder` which can be used to - /// create a `Request`. - /// - /// # Example - /// - /// ``` - /// # use http::*; - /// - /// let request = Request::trace("https://www.rust-lang.org/") - /// .body(()) - /// .unwrap(); - /// ``` - pub fn trace(uri: T) -> Builder - where - Uri: TryFrom, - >::Error: Into, - { - Builder::new().method(Method::TRACE).uri(uri) - } -} - -impl Request { - /// Creates a new blank `Request` with the body - /// - /// The component parts of this request will be set to their default, e.g. - /// the GET method, no headers, etc. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let request = Request::new("hello world"); - /// - /// assert_eq!(*request.method(), Method::GET); - /// assert_eq!(*request.body(), "hello world"); - /// ``` - #[inline] - pub fn new(body: T) -> Request { - Request { - head: Parts::new(), - body: body, - } - } - - /// Creates a new `Request` with the given components parts and body. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let request = Request::new("hello world"); - /// let (mut parts, body) = request.into_parts(); - /// parts.method = Method::POST; - /// - /// let request = Request::from_parts(parts, body); - /// ``` - #[inline] - pub fn from_parts(parts: Parts, body: T) -> Request { - Request { - head: parts, - body: body, - } - } - - /// Returns a reference to the associated HTTP method. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let request: Request<()> = Request::default(); - /// assert_eq!(*request.method(), Method::GET); - /// ``` - #[inline] - pub fn method(&self) -> &Method { - &self.head.method - } - - /// Returns a mutable reference to the associated HTTP method. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let mut request: Request<()> = Request::default(); - /// *request.method_mut() = Method::PUT; - /// assert_eq!(*request.method(), Method::PUT); - /// ``` - #[inline] - pub fn method_mut(&mut self) -> &mut Method { - &mut self.head.method - } - - /// Returns a reference to the associated URI. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let request: Request<()> = Request::default(); - /// assert_eq!(*request.uri(), *"/"); - /// ``` - #[inline] - pub fn uri(&self) -> &Uri { - &self.head.uri - } - - /// Returns a mutable reference to the associated URI. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let mut request: Request<()> = Request::default(); - /// *request.uri_mut() = "/hello".parse().unwrap(); - /// assert_eq!(*request.uri(), *"/hello"); - /// ``` - #[inline] - pub fn uri_mut(&mut self) -> &mut Uri { - &mut self.head.uri - } - - /// Returns the associated version. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let request: Request<()> = Request::default(); - /// assert_eq!(request.version(), Version::HTTP_11); - /// ``` - #[inline] - pub fn version(&self) -> Version { - self.head.version - } - - /// Returns a mutable reference to the associated version. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let mut request: Request<()> = Request::default(); - /// *request.version_mut() = Version::HTTP_2; - /// assert_eq!(request.version(), Version::HTTP_2); - /// ``` - #[inline] - pub fn version_mut(&mut self) -> &mut Version { - &mut self.head.version - } - - /// Returns a reference to the associated header field map. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let request: Request<()> = Request::default(); - /// assert!(request.headers().is_empty()); - /// ``` - #[inline] - pub fn headers(&self) -> &HeaderMap { - &self.head.headers - } - - /// Returns a mutable reference to the associated header field map. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// # use http::header::*; - /// let mut request: Request<()> = Request::default(); - /// request.headers_mut().insert(HOST, HeaderValue::from_static("world")); - /// assert!(!request.headers().is_empty()); - /// ``` - #[inline] - pub fn headers_mut(&mut self) -> &mut HeaderMap { - &mut self.head.headers - } - - /// Returns a reference to the associated extensions. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let request: Request<()> = Request::default(); - /// assert!(request.extensions().get::().is_none()); - /// ``` - #[inline] - pub fn extensions(&self) -> &Extensions { - &self.head.extensions - } - - /// Returns a mutable reference to the associated extensions. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// # use http::header::*; - /// let mut request: Request<()> = Request::default(); - /// request.extensions_mut().insert("hello"); - /// assert_eq!(request.extensions().get(), Some(&"hello")); - /// ``` - #[inline] - pub fn extensions_mut(&mut self) -> &mut Extensions { - &mut self.head.extensions - } - - /// Returns a reference to the associated HTTP body. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let request: Request = Request::default(); - /// assert!(request.body().is_empty()); - /// ``` - #[inline] - pub fn body(&self) -> &T { - &self.body - } - - /// Returns a mutable reference to the associated HTTP body. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let mut request: Request = Request::default(); - /// request.body_mut().push_str("hello world"); - /// assert!(!request.body().is_empty()); - /// ``` - #[inline] - pub fn body_mut(&mut self) -> &mut T { - &mut self.body - } - - /// Consumes the request, returning just the body. - /// - /// # Examples - /// - /// ``` - /// # use http::Request; - /// let request = Request::new(10); - /// let body = request.into_body(); - /// assert_eq!(body, 10); - /// ``` - #[inline] - pub fn into_body(self) -> T { - self.body - } - - /// Consumes the request returning the head and body parts. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let request = Request::new(()); - /// let (parts, body) = request.into_parts(); - /// assert_eq!(parts.method, Method::GET); - /// ``` - #[inline] - pub fn into_parts(self) -> (Parts, T) { - (self.head, self.body) - } - - /// Consumes the request returning a new request with body mapped to the - /// return type of the passed in function. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let request = Request::builder().body("some string").unwrap(); - /// let mapped_request: Request<&[u8]> = request.map(|b| { - /// assert_eq!(b, "some string"); - /// b.as_bytes() - /// }); - /// assert_eq!(mapped_request.body(), &"some string".as_bytes()); - /// ``` - #[inline] - pub fn map(self, f: F) -> Request - where - F: FnOnce(T) -> U, - { - Request { - body: f(self.body), - head: self.head, - } - } -} - -impl Default for Request { - fn default() -> Request { - Request::new(T::default()) - } -} - -impl fmt::Debug for Request { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Request") - .field("method", self.method()) - .field("uri", self.uri()) - .field("version", &self.version()) - .field("headers", self.headers()) - // omits Extensions because not useful - .field("body", self.body()) - .finish() - } -} - -impl Parts { - /// Creates a new default instance of `Parts` - fn new() -> Parts { - Parts { - method: Method::default(), - uri: Uri::default(), - version: Version::default(), - headers: HeaderMap::default(), - extensions: Extensions::default(), - _priv: (), - } - } -} - -impl fmt::Debug for Parts { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Parts") - .field("method", &self.method) - .field("uri", &self.uri) - .field("version", &self.version) - .field("headers", &self.headers) - // omits Extensions because not useful - // omits _priv because not useful - .finish() - } -} - -impl Builder { - /// Creates a new default instance of `Builder` to construct a `Request`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let req = request::Builder::new() - /// .method("POST") - /// .body(()) - /// .unwrap(); - /// ``` - #[inline] - pub fn new() -> Builder { - Builder::default() - } - - /// Set the HTTP method for this request. - /// - /// By default this is `GET`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let req = Request::builder() - /// .method("POST") - /// .body(()) - /// .unwrap(); - /// ``` - pub fn method(self, method: T) -> Builder - where - Method: TryFrom, - >::Error: Into, - { - self.and_then(move |mut head| { - let method = TryFrom::try_from(method).map_err(Into::into)?; - head.method = method; - Ok(head) - }) - } - - /// Get the HTTP Method for this request. - /// - /// By default this is `GET`. If builder has error, returns None. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let mut req = Request::builder(); - /// assert_eq!(req.method_ref(),Some(&Method::GET)); - /// - /// req = req.method("POST"); - /// assert_eq!(req.method_ref(),Some(&Method::POST)); - /// ``` - pub fn method_ref(&self) -> Option<&Method> { - self.inner.as_ref().ok().map(|h| &h.method) - } - - /// Set the URI for this request. - /// - /// By default this is `/`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let req = Request::builder() - /// .uri("https://www.rust-lang.org/") - /// .body(()) - /// .unwrap(); - /// ``` - pub fn uri(self, uri: T) -> Builder - where - Uri: TryFrom, - >::Error: Into, - { - self.and_then(move |mut head| { - head.uri = TryFrom::try_from(uri).map_err(Into::into)?; - Ok(head) - }) - } - - /// Get the URI for this request - /// - /// By default this is `/`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let mut req = Request::builder(); - /// assert_eq!(req.uri_ref().unwrap(), "/" ); - /// - /// req = req.uri("https://www.rust-lang.org/"); - /// assert_eq!(req.uri_ref().unwrap(), "https://www.rust-lang.org/" ); - /// ``` - pub fn uri_ref(&self) -> Option<&Uri> { - self.inner.as_ref().ok().map(|h| &h.uri) - } - - /// Set the HTTP version for this request. - /// - /// By default this is HTTP/1.1 - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let req = Request::builder() - /// .version(Version::HTTP_2) - /// .body(()) - /// .unwrap(); - /// ``` - pub fn version(self, version: Version) -> Builder { - self.and_then(move |mut head| { - head.version = version; - Ok(head) - }) - } - - /// Get the HTTP version for this request - /// - /// By default this is HTTP/1.1. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let mut req = Request::builder(); - /// assert_eq!(req.version_ref().unwrap(), &Version::HTTP_11 ); - /// - /// req = req.version(Version::HTTP_2); - /// assert_eq!(req.version_ref().unwrap(), &Version::HTTP_2 ); - /// ``` - pub fn version_ref(&self) -> Option<&Version> { - self.inner.as_ref().ok().map(|h| &h.version) - } - - /// Appends a header to this request builder. - /// - /// This function will append the provided key/value as a header to the - /// internal `HeaderMap` being constructed. Essentially this is equivalent - /// to calling `HeaderMap::append`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// # use http::header::HeaderValue; - /// - /// let req = Request::builder() - /// .header("Accept", "text/html") - /// .header("X-Custom-Foo", "bar") - /// .body(()) - /// .unwrap(); - /// ``` - pub fn header(self, key: K, value: V) -> Builder - where - HeaderName: TryFrom, - >::Error: Into, - HeaderValue: TryFrom, - >::Error: Into, - { - self.and_then(move |mut head| { - let name = >::try_from(key).map_err(Into::into)?; - let value = >::try_from(value).map_err(Into::into)?; - head.headers.append(name, value); - Ok(head) - }) - } - - /// Get header on this request builder. - /// when builder has error returns None - /// - /// # Example - /// - /// ``` - /// # use http::Request; - /// let req = Request::builder() - /// .header("Accept", "text/html") - /// .header("X-Custom-Foo", "bar"); - /// let headers = req.headers_ref().unwrap(); - /// assert_eq!( headers["Accept"], "text/html" ); - /// assert_eq!( headers["X-Custom-Foo"], "bar" ); - /// ``` - pub fn headers_ref(&self) -> Option<&HeaderMap> { - self.inner.as_ref().ok().map(|h| &h.headers) - } - - /// Get headers on this request builder. - /// - /// When builder has error returns None. - /// - /// # Example - /// - /// ``` - /// # use http::{header::HeaderValue, Request}; - /// let mut req = Request::builder(); - /// { - /// let headers = req.headers_mut().unwrap(); - /// headers.insert("Accept", HeaderValue::from_static("text/html")); - /// headers.insert("X-Custom-Foo", HeaderValue::from_static("bar")); - /// } - /// let headers = req.headers_ref().unwrap(); - /// assert_eq!( headers["Accept"], "text/html" ); - /// assert_eq!( headers["X-Custom-Foo"], "bar" ); - /// ``` - pub fn headers_mut(&mut self) -> Option<&mut HeaderMap> { - self.inner.as_mut().ok().map(|h| &mut h.headers) - } - - /// Adds an extension to this builder - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let req = Request::builder() - /// .extension("My Extension") - /// .body(()) - /// .unwrap(); - /// - /// assert_eq!(req.extensions().get::<&'static str>(), - /// Some(&"My Extension")); - /// ``` - pub fn extension(self, extension: T) -> Builder - where - T: Any + Send + Sync + 'static, - { - self.and_then(move |mut head| { - head.extensions.insert(extension); - Ok(head) - }) - } - - /// Get a reference to the extensions for this request builder. - /// - /// If the builder has an error, this returns `None`. - /// - /// # Example - /// - /// ``` - /// # use http::Request; - /// let req = Request::builder().extension("My Extension").extension(5u32); - /// let extensions = req.extensions_ref().unwrap(); - /// assert_eq!(extensions.get::<&'static str>(), Some(&"My Extension")); - /// assert_eq!(extensions.get::(), Some(&5u32)); - /// ``` - pub fn extensions_ref(&self) -> Option<&Extensions> { - self.inner.as_ref().ok().map(|h| &h.extensions) - } - - /// Get a mutable reference to the extensions for this request builder. - /// - /// If the builder has an error, this returns `None`. - /// - /// # Example - /// - /// ``` - /// # use http::Request; - /// let mut req = Request::builder().extension("My Extension"); - /// let mut extensions = req.extensions_mut().unwrap(); - /// assert_eq!(extensions.get::<&'static str>(), Some(&"My Extension")); - /// extensions.insert(5u32); - /// assert_eq!(extensions.get::(), Some(&5u32)); - /// ``` - pub fn extensions_mut(&mut self) -> Option<&mut Extensions> { - self.inner.as_mut().ok().map(|h| &mut h.extensions) - } - - /// "Consumes" this builder, using the provided `body` to return a - /// constructed `Request`. - /// - /// # Errors - /// - /// This function may return an error if any previously configured argument - /// failed to parse or get converted to the internal representation. For - /// example if an invalid `head` was specified via `header("Foo", - /// "Bar\r\n")` the error will be returned when this function is called - /// rather than when `header` was called. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let request = Request::builder() - /// .body(()) - /// .unwrap(); - /// ``` - pub fn body(self, body: T) -> Result> { - self.inner.map(move |head| { - Request { - head, - body, - } - }) - } - - // private - - fn and_then(self, func: F) -> Self - where - F: FnOnce(Parts) -> Result - { - Builder { - inner: self.inner.and_then(func), - } - } -} - -impl Default for Builder { - #[inline] - fn default() -> Builder { - Builder { - inner: Ok(Parts::new()), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_can_map_a_body_from_one_type_to_another() { - let request = Request::builder().body("some string").unwrap(); - let mapped_request = request.map(|s| { - assert_eq!(s, "some string"); - 123u32 - }); - assert_eq!(mapped_request.body(), &123u32); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/response.rs s390-tools-2.33.1/rust-vendor/http/src/response.rs --- s390-tools-2.31.0/rust-vendor/http/src/response.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/response.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,793 +0,0 @@ -//! HTTP response types. -//! -//! This module contains structs related to HTTP responses, notably the -//! `Response` type itself as well as a builder to create responses. Typically -//! you'll import the `http::Response` type rather than reaching into this -//! module itself. -//! -//! # Examples -//! -//! Creating a `Response` to return -//! -//! ``` -//! use http::{Request, Response, StatusCode}; -//! -//! fn respond_to(req: Request<()>) -> http::Result> { -//! let mut builder = Response::builder() -//! .header("Foo", "Bar") -//! .status(StatusCode::OK); -//! -//! if req.headers().contains_key("Another-Header") { -//! builder = builder.header("Another-Header", "Ack"); -//! } -//! -//! builder.body(()) -//! } -//! ``` -//! -//! A simple 404 handler -//! -//! ``` -//! use http::{Request, Response, StatusCode}; -//! -//! fn not_found(_req: Request<()>) -> http::Result> { -//! Response::builder() -//! .status(StatusCode::NOT_FOUND) -//! .body(()) -//! } -//! ``` -//! -//! Or otherwise inspecting the result of a request: -//! -//! ```no_run -//! use http::{Request, Response}; -//! -//! fn get(url: &str) -> http::Result> { -//! // ... -//! # panic!() -//! } -//! -//! let response = get("https://www.rust-lang.org/").unwrap(); -//! -//! if !response.status().is_success() { -//! panic!("failed to get a successful response status!"); -//! } -//! -//! if let Some(date) = response.headers().get("Date") { -//! // we've got a `Date` header! -//! } -//! -//! let body = response.body(); -//! // ... -//! ``` - -use std::any::Any; -use std::convert::TryFrom; -use std::fmt; - -use crate::header::{HeaderMap, HeaderName, HeaderValue}; -use crate::status::StatusCode; -use crate::version::Version; -use crate::{Extensions, Result}; - -/// Represents an HTTP response -/// -/// An HTTP response consists of a head and a potentially optional body. The body -/// component is generic, enabling arbitrary types to represent the HTTP body. -/// For example, the body could be `Vec`, a `Stream` of byte chunks, or a -/// value that has been deserialized. -/// -/// Typically you'll work with responses on the client side as the result of -/// sending a `Request` and on the server you'll be generating a `Response` to -/// send back to the client. -/// -/// # Examples -/// -/// Creating a `Response` to return -/// -/// ``` -/// use http::{Request, Response, StatusCode}; -/// -/// fn respond_to(req: Request<()>) -> http::Result> { -/// let mut builder = Response::builder() -/// .header("Foo", "Bar") -/// .status(StatusCode::OK); -/// -/// if req.headers().contains_key("Another-Header") { -/// builder = builder.header("Another-Header", "Ack"); -/// } -/// -/// builder.body(()) -/// } -/// ``` -/// -/// A simple 404 handler -/// -/// ``` -/// use http::{Request, Response, StatusCode}; -/// -/// fn not_found(_req: Request<()>) -> http::Result> { -/// Response::builder() -/// .status(StatusCode::NOT_FOUND) -/// .body(()) -/// } -/// ``` -/// -/// Or otherwise inspecting the result of a request: -/// -/// ```no_run -/// use http::{Request, Response}; -/// -/// fn get(url: &str) -> http::Result> { -/// // ... -/// # panic!() -/// } -/// -/// let response = get("https://www.rust-lang.org/").unwrap(); -/// -/// if !response.status().is_success() { -/// panic!("failed to get a successful response status!"); -/// } -/// -/// if let Some(date) = response.headers().get("Date") { -/// // we've got a `Date` header! -/// } -/// -/// let body = response.body(); -/// // ... -/// ``` -/// -/// Deserialize a response of bytes via json: -/// -/// ``` -/// # extern crate serde; -/// # extern crate serde_json; -/// # extern crate http; -/// use http::Response; -/// use serde::de; -/// -/// fn deserialize(res: Response>) -> serde_json::Result> -/// where for<'de> T: de::Deserialize<'de>, -/// { -/// let (parts, body) = res.into_parts(); -/// let body = serde_json::from_slice(&body)?; -/// Ok(Response::from_parts(parts, body)) -/// } -/// # -/// # fn main() {} -/// ``` -/// -/// Or alternatively, serialize the body of a response to json -/// -/// ``` -/// # extern crate serde; -/// # extern crate serde_json; -/// # extern crate http; -/// use http::Response; -/// use serde::ser; -/// -/// fn serialize(res: Response) -> serde_json::Result>> -/// where T: ser::Serialize, -/// { -/// let (parts, body) = res.into_parts(); -/// let body = serde_json::to_vec(&body)?; -/// Ok(Response::from_parts(parts, body)) -/// } -/// # -/// # fn main() {} -/// ``` -pub struct Response { - head: Parts, - body: T, -} - -/// Component parts of an HTTP `Response` -/// -/// The HTTP response head consists of a status, version, and a set of -/// header fields. -pub struct Parts { - /// The response's status - pub status: StatusCode, - - /// The response's version - pub version: Version, - - /// The response's headers - pub headers: HeaderMap, - - /// The response's extensions - pub extensions: Extensions, - - _priv: (), -} - -/// An HTTP response builder -/// -/// This type can be used to construct an instance of `Response` through a -/// builder-like pattern. -#[derive(Debug)] -pub struct Builder { - inner: Result, -} - -impl Response<()> { - /// Creates a new builder-style object to manufacture a `Response` - /// - /// This method returns an instance of `Builder` which can be used to - /// create a `Response`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let response = Response::builder() - /// .status(200) - /// .header("X-Custom-Foo", "Bar") - /// .body(()) - /// .unwrap(); - /// ``` - #[inline] - pub fn builder() -> Builder { - Builder::new() - } -} - -impl Response { - /// Creates a new blank `Response` with the body - /// - /// The component ports of this response will be set to their default, e.g. - /// the ok status, no headers, etc. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let response = Response::new("hello world"); - /// - /// assert_eq!(response.status(), StatusCode::OK); - /// assert_eq!(*response.body(), "hello world"); - /// ``` - #[inline] - pub fn new(body: T) -> Response { - Response { - head: Parts::new(), - body: body, - } - } - - /// Creates a new `Response` with the given head and body - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let response = Response::new("hello world"); - /// let (mut parts, body) = response.into_parts(); - /// - /// parts.status = StatusCode::BAD_REQUEST; - /// let response = Response::from_parts(parts, body); - /// - /// assert_eq!(response.status(), StatusCode::BAD_REQUEST); - /// assert_eq!(*response.body(), "hello world"); - /// ``` - #[inline] - pub fn from_parts(parts: Parts, body: T) -> Response { - Response { - head: parts, - body: body, - } - } - - /// Returns the `StatusCode`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let response: Response<()> = Response::default(); - /// assert_eq!(response.status(), StatusCode::OK); - /// ``` - #[inline] - pub fn status(&self) -> StatusCode { - self.head.status - } - - /// Returns a mutable reference to the associated `StatusCode`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let mut response: Response<()> = Response::default(); - /// *response.status_mut() = StatusCode::CREATED; - /// assert_eq!(response.status(), StatusCode::CREATED); - /// ``` - #[inline] - pub fn status_mut(&mut self) -> &mut StatusCode { - &mut self.head.status - } - - /// Returns a reference to the associated version. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let response: Response<()> = Response::default(); - /// assert_eq!(response.version(), Version::HTTP_11); - /// ``` - #[inline] - pub fn version(&self) -> Version { - self.head.version - } - - /// Returns a mutable reference to the associated version. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let mut response: Response<()> = Response::default(); - /// *response.version_mut() = Version::HTTP_2; - /// assert_eq!(response.version(), Version::HTTP_2); - /// ``` - #[inline] - pub fn version_mut(&mut self) -> &mut Version { - &mut self.head.version - } - - /// Returns a reference to the associated header field map. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let response: Response<()> = Response::default(); - /// assert!(response.headers().is_empty()); - /// ``` - #[inline] - pub fn headers(&self) -> &HeaderMap { - &self.head.headers - } - - /// Returns a mutable reference to the associated header field map. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// # use http::header::*; - /// let mut response: Response<()> = Response::default(); - /// response.headers_mut().insert(HOST, HeaderValue::from_static("world")); - /// assert!(!response.headers().is_empty()); - /// ``` - #[inline] - pub fn headers_mut(&mut self) -> &mut HeaderMap { - &mut self.head.headers - } - - /// Returns a reference to the associated extensions. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let response: Response<()> = Response::default(); - /// assert!(response.extensions().get::().is_none()); - /// ``` - #[inline] - pub fn extensions(&self) -> &Extensions { - &self.head.extensions - } - - /// Returns a mutable reference to the associated extensions. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// # use http::header::*; - /// let mut response: Response<()> = Response::default(); - /// response.extensions_mut().insert("hello"); - /// assert_eq!(response.extensions().get(), Some(&"hello")); - /// ``` - #[inline] - pub fn extensions_mut(&mut self) -> &mut Extensions { - &mut self.head.extensions - } - - /// Returns a reference to the associated HTTP body. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let response: Response = Response::default(); - /// assert!(response.body().is_empty()); - /// ``` - #[inline] - pub fn body(&self) -> &T { - &self.body - } - - /// Returns a mutable reference to the associated HTTP body. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let mut response: Response = Response::default(); - /// response.body_mut().push_str("hello world"); - /// assert!(!response.body().is_empty()); - /// ``` - #[inline] - pub fn body_mut(&mut self) -> &mut T { - &mut self.body - } - - /// Consumes the response, returning just the body. - /// - /// # Examples - /// - /// ``` - /// # use http::Response; - /// let response = Response::new(10); - /// let body = response.into_body(); - /// assert_eq!(body, 10); - /// ``` - #[inline] - pub fn into_body(self) -> T { - self.body - } - - /// Consumes the response returning the head and body parts. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let response: Response<()> = Response::default(); - /// let (parts, body) = response.into_parts(); - /// assert_eq!(parts.status, StatusCode::OK); - /// ``` - #[inline] - pub fn into_parts(self) -> (Parts, T) { - (self.head, self.body) - } - - /// Consumes the response returning a new response with body mapped to the - /// return type of the passed in function. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// let response = Response::builder().body("some string").unwrap(); - /// let mapped_response: Response<&[u8]> = response.map(|b| { - /// assert_eq!(b, "some string"); - /// b.as_bytes() - /// }); - /// assert_eq!(mapped_response.body(), &"some string".as_bytes()); - /// ``` - #[inline] - pub fn map(self, f: F) -> Response - where - F: FnOnce(T) -> U, - { - Response { - body: f(self.body), - head: self.head, - } - } -} - -impl Default for Response { - #[inline] - fn default() -> Response { - Response::new(T::default()) - } -} - -impl fmt::Debug for Response { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Response") - .field("status", &self.status()) - .field("version", &self.version()) - .field("headers", self.headers()) - // omits Extensions because not useful - .field("body", self.body()) - .finish() - } -} - -impl Parts { - /// Creates a new default instance of `Parts` - fn new() -> Parts { - Parts { - status: StatusCode::default(), - version: Version::default(), - headers: HeaderMap::default(), - extensions: Extensions::default(), - _priv: (), - } - } -} - -impl fmt::Debug for Parts { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Parts") - .field("status", &self.status) - .field("version", &self.version) - .field("headers", &self.headers) - // omits Extensions because not useful - // omits _priv because not useful - .finish() - } -} - -impl Builder { - /// Creates a new default instance of `Builder` to construct either a - /// `Head` or a `Response`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let response = response::Builder::new() - /// .status(200) - /// .body(()) - /// .unwrap(); - /// ``` - #[inline] - pub fn new() -> Builder { - Builder::default() - } - - /// Set the HTTP status for this response. - /// - /// By default this is `200`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let response = Response::builder() - /// .status(200) - /// .body(()) - /// .unwrap(); - /// ``` - pub fn status(self, status: T) -> Builder - where - StatusCode: TryFrom, - >::Error: Into, - { - self.and_then(move |mut head| { - head.status = TryFrom::try_from(status).map_err(Into::into)?; - Ok(head) - }) - } - - /// Set the HTTP version for this response. - /// - /// By default this is HTTP/1.1 - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let response = Response::builder() - /// .version(Version::HTTP_2) - /// .body(()) - /// .unwrap(); - /// ``` - pub fn version(self, version: Version) -> Builder { - self.and_then(move |mut head| { - head.version = version; - Ok(head) - }) - } - - /// Appends a header to this response builder. - /// - /// This function will append the provided key/value as a header to the - /// internal `HeaderMap` being constructed. Essentially this is equivalent - /// to calling `HeaderMap::append`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// # use http::header::HeaderValue; - /// - /// let response = Response::builder() - /// .header("Content-Type", "text/html") - /// .header("X-Custom-Foo", "bar") - /// .header("content-length", 0) - /// .body(()) - /// .unwrap(); - /// ``` - pub fn header(self, key: K, value: V) -> Builder - where - HeaderName: TryFrom, - >::Error: Into, - HeaderValue: TryFrom, - >::Error: Into, - { - self.and_then(move |mut head| { - let name = >::try_from(key).map_err(Into::into)?; - let value = >::try_from(value).map_err(Into::into)?; - head.headers.append(name, value); - Ok(head) - }) - } - - /// Get header on this response builder. - /// - /// When builder has error returns None. - /// - /// # Example - /// - /// ``` - /// # use http::Response; - /// # use http::header::HeaderValue; - /// let res = Response::builder() - /// .header("Accept", "text/html") - /// .header("X-Custom-Foo", "bar"); - /// let headers = res.headers_ref().unwrap(); - /// assert_eq!( headers["Accept"], "text/html" ); - /// assert_eq!( headers["X-Custom-Foo"], "bar" ); - /// ``` - pub fn headers_ref(&self) -> Option<&HeaderMap> { - self.inner.as_ref().ok().map(|h| &h.headers) - } - - /// Get header on this response builder. - /// when builder has error returns None - /// - /// # Example - /// - /// ``` - /// # use http::*; - /// # use http::header::HeaderValue; - /// # use http::response::Builder; - /// let mut res = Response::builder(); - /// { - /// let headers = res.headers_mut().unwrap(); - /// headers.insert("Accept", HeaderValue::from_static("text/html")); - /// headers.insert("X-Custom-Foo", HeaderValue::from_static("bar")); - /// } - /// let headers = res.headers_ref().unwrap(); - /// assert_eq!( headers["Accept"], "text/html" ); - /// assert_eq!( headers["X-Custom-Foo"], "bar" ); - /// ``` - pub fn headers_mut(&mut self) -> Option<&mut HeaderMap> { - self.inner.as_mut().ok().map(|h| &mut h.headers) - } - - /// Adds an extension to this builder - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let response = Response::builder() - /// .extension("My Extension") - /// .body(()) - /// .unwrap(); - /// - /// assert_eq!(response.extensions().get::<&'static str>(), - /// Some(&"My Extension")); - /// ``` - pub fn extension(self, extension: T) -> Builder - where - T: Any + Send + Sync + 'static, - { - self.and_then(move |mut head| { - head.extensions.insert(extension); - Ok(head) - }) - } - - /// Get a reference to the extensions for this response builder. - /// - /// If the builder has an error, this returns `None`. - /// - /// # Example - /// - /// ``` - /// # use http::Response; - /// let res = Response::builder().extension("My Extension").extension(5u32); - /// let extensions = res.extensions_ref().unwrap(); - /// assert_eq!(extensions.get::<&'static str>(), Some(&"My Extension")); - /// assert_eq!(extensions.get::(), Some(&5u32)); - /// ``` - pub fn extensions_ref(&self) -> Option<&Extensions> { - self.inner.as_ref().ok().map(|h| &h.extensions) - } - - /// Get a mutable reference to the extensions for this response builder. - /// - /// If the builder has an error, this returns `None`. - /// - /// # Example - /// - /// ``` - /// # use http::Response; - /// let mut res = Response::builder().extension("My Extension"); - /// let mut extensions = res.extensions_mut().unwrap(); - /// assert_eq!(extensions.get::<&'static str>(), Some(&"My Extension")); - /// extensions.insert(5u32); - /// assert_eq!(extensions.get::(), Some(&5u32)); - /// ``` - pub fn extensions_mut(&mut self) -> Option<&mut Extensions> { - self.inner.as_mut().ok().map(|h| &mut h.extensions) - } - - /// "Consumes" this builder, using the provided `body` to return a - /// constructed `Response`. - /// - /// # Errors - /// - /// This function may return an error if any previously configured argument - /// failed to parse or get converted to the internal representation. For - /// example if an invalid `head` was specified via `header("Foo", - /// "Bar\r\n")` the error will be returned when this function is called - /// rather than when `header` was called. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let response = Response::builder() - /// .body(()) - /// .unwrap(); - /// ``` - pub fn body(self, body: T) -> Result> { - self.inner.map(move |head| { - Response { - head, - body, - } - }) - } - - // private - - fn and_then(self, func: F) -> Self - where - F: FnOnce(Parts) -> Result - { - Builder { - inner: self.inner.and_then(func), - } - } -} - -impl Default for Builder { - #[inline] - fn default() -> Builder { - Builder { - inner: Ok(Parts::new()), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_can_map_a_body_from_one_type_to_another() { - let response = Response::builder().body("some string").unwrap(); - let mapped_response = response.map(|s| { - assert_eq!(s, "some string"); - 123u32 - }); - assert_eq!(mapped_response.body(), &123u32); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/status.rs s390-tools-2.33.1/rust-vendor/http/src/status.rs --- s390-tools-2.31.0/rust-vendor/http/src/status.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/status.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,588 +0,0 @@ -//! HTTP status codes -//! -//! This module contains HTTP-status code related structs an errors. The main -//! type in this module is `StatusCode` which is not intended to be used through -//! this module but rather the `http::StatusCode` type. -//! -//! # Examples -//! -//! ``` -//! use http::StatusCode; -//! -//! assert_eq!(StatusCode::from_u16(200).unwrap(), StatusCode::OK); -//! assert_eq!(StatusCode::NOT_FOUND, 404); -//! assert!(StatusCode::OK.is_success()); -//! ``` - -use std::convert::TryFrom; -use std::num::NonZeroU16; -use std::error::Error; -use std::fmt; -use std::str::FromStr; - -/// An HTTP status code (`status-code` in RFC 7230 et al.). -/// -/// Constants are provided for known status codes, including those in the IANA -/// [HTTP Status Code Registry]( -/// https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml). -/// -/// Status code values in the range 100-999 (inclusive) are supported by this -/// type. Values in the range 100-599 are semantically classified by the most -/// significant digit. See [`StatusCode::is_success`], etc. Values above 599 -/// are unclassified but allowed for legacy compatibility, though their use is -/// discouraged. Applications may interpret such values as protocol errors. -/// -/// # Examples -/// -/// ``` -/// use http::StatusCode; -/// -/// assert_eq!(StatusCode::from_u16(200).unwrap(), StatusCode::OK); -/// assert_eq!(StatusCode::NOT_FOUND.as_u16(), 404); -/// assert!(StatusCode::OK.is_success()); -/// ``` -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct StatusCode(NonZeroU16); - -/// A possible error value when converting a `StatusCode` from a `u16` or `&str` -/// -/// This error indicates that the supplied input was not a valid number, was less -/// than 100, or was greater than 999. -pub struct InvalidStatusCode { - _priv: (), -} - -impl StatusCode { - /// Converts a u16 to a status code. - /// - /// The function validates the correctness of the supplied u16. It must be - /// greater or equal to 100 and less than 1000. - /// - /// # Example - /// - /// ``` - /// use http::StatusCode; - /// - /// let ok = StatusCode::from_u16(200).unwrap(); - /// assert_eq!(ok, StatusCode::OK); - /// - /// let err = StatusCode::from_u16(99); - /// assert!(err.is_err()); - /// ``` - #[inline] - pub fn from_u16(src: u16) -> Result { - if src < 100 || src >= 1000 { - return Err(InvalidStatusCode::new()); - } - - NonZeroU16::new(src) - .map(StatusCode) - .ok_or_else(InvalidStatusCode::new) - } - - /// Converts a &[u8] to a status code - pub fn from_bytes(src: &[u8]) -> Result { - if src.len() != 3 { - return Err(InvalidStatusCode::new()); - } - - let a = src[0].wrapping_sub(b'0') as u16; - let b = src[1].wrapping_sub(b'0') as u16; - let c = src[2].wrapping_sub(b'0') as u16; - - if a == 0 || a > 9 || b > 9 || c > 9 { - return Err(InvalidStatusCode::new()); - } - - let status = (a * 100) + (b * 10) + c; - NonZeroU16::new(status) - .map(StatusCode) - .ok_or_else(InvalidStatusCode::new) - } - - /// Returns the `u16` corresponding to this `StatusCode`. - /// - /// # Note - /// - /// This is the same as the `From` implementation, but - /// included as an inherent method because that implementation doesn't - /// appear in rustdocs, as well as a way to force the type instead of - /// relying on inference. - /// - /// # Example - /// - /// ``` - /// let status = http::StatusCode::OK; - /// assert_eq!(status.as_u16(), 200); - /// ``` - #[inline] - pub fn as_u16(&self) -> u16 { - (*self).into() - } - - /// Returns a &str representation of the `StatusCode` - /// - /// The return value only includes a numerical representation of the - /// status code. The canonical reason is not included. - /// - /// # Example - /// - /// ``` - /// let status = http::StatusCode::OK; - /// assert_eq!(status.as_str(), "200"); - /// ``` - #[inline] - pub fn as_str(&self) -> &str { - let offset = (self.0.get() - 100) as usize; - let offset = offset * 3; - - // Invariant: self has checked range [100, 999] and CODE_DIGITS is - // ASCII-only, of length 900 * 3 = 2700 bytes - - #[cfg(debug_assertions)] - { &CODE_DIGITS[offset..offset+3] } - - #[cfg(not(debug_assertions))] - unsafe { CODE_DIGITS.get_unchecked(offset..offset+3) } - } - - /// Get the standardised `reason-phrase` for this status code. - /// - /// This is mostly here for servers writing responses, but could potentially have application - /// at other times. - /// - /// The reason phrase is defined as being exclusively for human readers. You should avoid - /// deriving any meaning from it at all costs. - /// - /// Bear in mind also that in HTTP/2.0 and HTTP/3.0 the reason phrase is abolished from - /// transmission, and so this canonical reason phrase really is the only reason phrase you’ll - /// find. - /// - /// # Example - /// - /// ``` - /// let status = http::StatusCode::OK; - /// assert_eq!(status.canonical_reason(), Some("OK")); - /// ``` - pub fn canonical_reason(&self) -> Option<&'static str> { - canonical_reason(self.0.get()) - } - - /// Check if status is within 100-199. - #[inline] - pub fn is_informational(&self) -> bool { - 200 > self.0.get() && self.0.get() >= 100 - } - - /// Check if status is within 200-299. - #[inline] - pub fn is_success(&self) -> bool { - 300 > self.0.get() && self.0.get() >= 200 - } - - /// Check if status is within 300-399. - #[inline] - pub fn is_redirection(&self) -> bool { - 400 > self.0.get() && self.0.get() >= 300 - } - - /// Check if status is within 400-499. - #[inline] - pub fn is_client_error(&self) -> bool { - 500 > self.0.get() && self.0.get() >= 400 - } - - /// Check if status is within 500-599. - #[inline] - pub fn is_server_error(&self) -> bool { - 600 > self.0.get() && self.0.get() >= 500 - } -} - -impl fmt::Debug for StatusCode { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&self.0, f) - } -} - -/// Formats the status code, *including* the canonical reason. -/// -/// # Example -/// -/// ``` -/// # use http::StatusCode; -/// assert_eq!(format!("{}", StatusCode::OK), "200 OK"); -/// ``` -impl fmt::Display for StatusCode { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{} {}", - u16::from(*self), - self.canonical_reason().unwrap_or("") - ) - } -} - -impl Default for StatusCode { - #[inline] - fn default() -> StatusCode { - StatusCode::OK - } -} - -impl PartialEq for StatusCode { - #[inline] - fn eq(&self, other: &u16) -> bool { - self.as_u16() == *other - } -} - -impl PartialEq for u16 { - #[inline] - fn eq(&self, other: &StatusCode) -> bool { - *self == other.as_u16() - } -} - -impl From for u16 { - #[inline] - fn from(status: StatusCode) -> u16 { - status.0.get() - } -} - -impl FromStr for StatusCode { - type Err = InvalidStatusCode; - - fn from_str(s: &str) -> Result { - StatusCode::from_bytes(s.as_ref()) - } -} - -impl<'a> From<&'a StatusCode> for StatusCode { - #[inline] - fn from(t: &'a StatusCode) -> Self { - t.clone() - } -} - -impl<'a> TryFrom<&'a [u8]> for StatusCode { - type Error = InvalidStatusCode; - - #[inline] - fn try_from(t: &'a [u8]) -> Result { - StatusCode::from_bytes(t) - } -} - -impl<'a> TryFrom<&'a str> for StatusCode { - type Error = InvalidStatusCode; - - #[inline] - fn try_from(t: &'a str) -> Result { - t.parse() - } -} - -impl TryFrom for StatusCode { - type Error = InvalidStatusCode; - - #[inline] - fn try_from(t: u16) -> Result { - StatusCode::from_u16(t) - } -} - -macro_rules! status_codes { - ( - $( - $(#[$docs:meta])* - ($num:expr, $konst:ident, $phrase:expr); - )+ - ) => { - impl StatusCode { - $( - $(#[$docs])* - pub const $konst: StatusCode = StatusCode(unsafe { NonZeroU16::new_unchecked($num) }); - )+ - - } - - fn canonical_reason(num: u16) -> Option<&'static str> { - match num { - $( - $num => Some($phrase), - )+ - _ => None - } - } - } -} - -status_codes! { - /// 100 Continue - /// [[RFC7231, Section 6.2.1](https://tools.ietf.org/html/rfc7231#section-6.2.1)] - (100, CONTINUE, "Continue"); - /// 101 Switching Protocols - /// [[RFC7231, Section 6.2.2](https://tools.ietf.org/html/rfc7231#section-6.2.2)] - (101, SWITCHING_PROTOCOLS, "Switching Protocols"); - /// 102 Processing - /// [[RFC2518](https://tools.ietf.org/html/rfc2518)] - (102, PROCESSING, "Processing"); - - /// 200 OK - /// [[RFC7231, Section 6.3.1](https://tools.ietf.org/html/rfc7231#section-6.3.1)] - (200, OK, "OK"); - /// 201 Created - /// [[RFC7231, Section 6.3.2](https://tools.ietf.org/html/rfc7231#section-6.3.2)] - (201, CREATED, "Created"); - /// 202 Accepted - /// [[RFC7231, Section 6.3.3](https://tools.ietf.org/html/rfc7231#section-6.3.3)] - (202, ACCEPTED, "Accepted"); - /// 203 Non-Authoritative Information - /// [[RFC7231, Section 6.3.4](https://tools.ietf.org/html/rfc7231#section-6.3.4)] - (203, NON_AUTHORITATIVE_INFORMATION, "Non Authoritative Information"); - /// 204 No Content - /// [[RFC7231, Section 6.3.5](https://tools.ietf.org/html/rfc7231#section-6.3.5)] - (204, NO_CONTENT, "No Content"); - /// 205 Reset Content - /// [[RFC7231, Section 6.3.6](https://tools.ietf.org/html/rfc7231#section-6.3.6)] - (205, RESET_CONTENT, "Reset Content"); - /// 206 Partial Content - /// [[RFC7233, Section 4.1](https://tools.ietf.org/html/rfc7233#section-4.1)] - (206, PARTIAL_CONTENT, "Partial Content"); - /// 207 Multi-Status - /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] - (207, MULTI_STATUS, "Multi-Status"); - /// 208 Already Reported - /// [[RFC5842](https://tools.ietf.org/html/rfc5842)] - (208, ALREADY_REPORTED, "Already Reported"); - - /// 226 IM Used - /// [[RFC3229](https://tools.ietf.org/html/rfc3229)] - (226, IM_USED, "IM Used"); - - /// 300 Multiple Choices - /// [[RFC7231, Section 6.4.1](https://tools.ietf.org/html/rfc7231#section-6.4.1)] - (300, MULTIPLE_CHOICES, "Multiple Choices"); - /// 301 Moved Permanently - /// [[RFC7231, Section 6.4.2](https://tools.ietf.org/html/rfc7231#section-6.4.2)] - (301, MOVED_PERMANENTLY, "Moved Permanently"); - /// 302 Found - /// [[RFC7231, Section 6.4.3](https://tools.ietf.org/html/rfc7231#section-6.4.3)] - (302, FOUND, "Found"); - /// 303 See Other - /// [[RFC7231, Section 6.4.4](https://tools.ietf.org/html/rfc7231#section-6.4.4)] - (303, SEE_OTHER, "See Other"); - /// 304 Not Modified - /// [[RFC7232, Section 4.1](https://tools.ietf.org/html/rfc7232#section-4.1)] - (304, NOT_MODIFIED, "Not Modified"); - /// 305 Use Proxy - /// [[RFC7231, Section 6.4.5](https://tools.ietf.org/html/rfc7231#section-6.4.5)] - (305, USE_PROXY, "Use Proxy"); - /// 307 Temporary Redirect - /// [[RFC7231, Section 6.4.7](https://tools.ietf.org/html/rfc7231#section-6.4.7)] - (307, TEMPORARY_REDIRECT, "Temporary Redirect"); - /// 308 Permanent Redirect - /// [[RFC7238](https://tools.ietf.org/html/rfc7238)] - (308, PERMANENT_REDIRECT, "Permanent Redirect"); - - /// 400 Bad Request - /// [[RFC7231, Section 6.5.1](https://tools.ietf.org/html/rfc7231#section-6.5.1)] - (400, BAD_REQUEST, "Bad Request"); - /// 401 Unauthorized - /// [[RFC7235, Section 3.1](https://tools.ietf.org/html/rfc7235#section-3.1)] - (401, UNAUTHORIZED, "Unauthorized"); - /// 402 Payment Required - /// [[RFC7231, Section 6.5.2](https://tools.ietf.org/html/rfc7231#section-6.5.2)] - (402, PAYMENT_REQUIRED, "Payment Required"); - /// 403 Forbidden - /// [[RFC7231, Section 6.5.3](https://tools.ietf.org/html/rfc7231#section-6.5.3)] - (403, FORBIDDEN, "Forbidden"); - /// 404 Not Found - /// [[RFC7231, Section 6.5.4](https://tools.ietf.org/html/rfc7231#section-6.5.4)] - (404, NOT_FOUND, "Not Found"); - /// 405 Method Not Allowed - /// [[RFC7231, Section 6.5.5](https://tools.ietf.org/html/rfc7231#section-6.5.5)] - (405, METHOD_NOT_ALLOWED, "Method Not Allowed"); - /// 406 Not Acceptable - /// [[RFC7231, Section 6.5.6](https://tools.ietf.org/html/rfc7231#section-6.5.6)] - (406, NOT_ACCEPTABLE, "Not Acceptable"); - /// 407 Proxy Authentication Required - /// [[RFC7235, Section 3.2](https://tools.ietf.org/html/rfc7235#section-3.2)] - (407, PROXY_AUTHENTICATION_REQUIRED, "Proxy Authentication Required"); - /// 408 Request Timeout - /// [[RFC7231, Section 6.5.7](https://tools.ietf.org/html/rfc7231#section-6.5.7)] - (408, REQUEST_TIMEOUT, "Request Timeout"); - /// 409 Conflict - /// [[RFC7231, Section 6.5.8](https://tools.ietf.org/html/rfc7231#section-6.5.8)] - (409, CONFLICT, "Conflict"); - /// 410 Gone - /// [[RFC7231, Section 6.5.9](https://tools.ietf.org/html/rfc7231#section-6.5.9)] - (410, GONE, "Gone"); - /// 411 Length Required - /// [[RFC7231, Section 6.5.10](https://tools.ietf.org/html/rfc7231#section-6.5.10)] - (411, LENGTH_REQUIRED, "Length Required"); - /// 412 Precondition Failed - /// [[RFC7232, Section 4.2](https://tools.ietf.org/html/rfc7232#section-4.2)] - (412, PRECONDITION_FAILED, "Precondition Failed"); - /// 413 Payload Too Large - /// [[RFC7231, Section 6.5.11](https://tools.ietf.org/html/rfc7231#section-6.5.11)] - (413, PAYLOAD_TOO_LARGE, "Payload Too Large"); - /// 414 URI Too Long - /// [[RFC7231, Section 6.5.12](https://tools.ietf.org/html/rfc7231#section-6.5.12)] - (414, URI_TOO_LONG, "URI Too Long"); - /// 415 Unsupported Media Type - /// [[RFC7231, Section 6.5.13](https://tools.ietf.org/html/rfc7231#section-6.5.13)] - (415, UNSUPPORTED_MEDIA_TYPE, "Unsupported Media Type"); - /// 416 Range Not Satisfiable - /// [[RFC7233, Section 4.4](https://tools.ietf.org/html/rfc7233#section-4.4)] - (416, RANGE_NOT_SATISFIABLE, "Range Not Satisfiable"); - /// 417 Expectation Failed - /// [[RFC7231, Section 6.5.14](https://tools.ietf.org/html/rfc7231#section-6.5.14)] - (417, EXPECTATION_FAILED, "Expectation Failed"); - /// 418 I'm a teapot - /// [curiously not registered by IANA but [RFC2324](https://tools.ietf.org/html/rfc2324)] - (418, IM_A_TEAPOT, "I'm a teapot"); - - /// 421 Misdirected Request - /// [RFC7540, Section 9.1.2](http://tools.ietf.org/html/rfc7540#section-9.1.2) - (421, MISDIRECTED_REQUEST, "Misdirected Request"); - /// 422 Unprocessable Entity - /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] - (422, UNPROCESSABLE_ENTITY, "Unprocessable Entity"); - /// 423 Locked - /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] - (423, LOCKED, "Locked"); - /// 424 Failed Dependency - /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] - (424, FAILED_DEPENDENCY, "Failed Dependency"); - - /// 426 Upgrade Required - /// [[RFC7231, Section 6.5.15](https://tools.ietf.org/html/rfc7231#section-6.5.15)] - (426, UPGRADE_REQUIRED, "Upgrade Required"); - - /// 428 Precondition Required - /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] - (428, PRECONDITION_REQUIRED, "Precondition Required"); - /// 429 Too Many Requests - /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] - (429, TOO_MANY_REQUESTS, "Too Many Requests"); - - /// 431 Request Header Fields Too Large - /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] - (431, REQUEST_HEADER_FIELDS_TOO_LARGE, "Request Header Fields Too Large"); - - /// 451 Unavailable For Legal Reasons - /// [[RFC7725](http://tools.ietf.org/html/rfc7725)] - (451, UNAVAILABLE_FOR_LEGAL_REASONS, "Unavailable For Legal Reasons"); - - /// 500 Internal Server Error - /// [[RFC7231, Section 6.6.1](https://tools.ietf.org/html/rfc7231#section-6.6.1)] - (500, INTERNAL_SERVER_ERROR, "Internal Server Error"); - /// 501 Not Implemented - /// [[RFC7231, Section 6.6.2](https://tools.ietf.org/html/rfc7231#section-6.6.2)] - (501, NOT_IMPLEMENTED, "Not Implemented"); - /// 502 Bad Gateway - /// [[RFC7231, Section 6.6.3](https://tools.ietf.org/html/rfc7231#section-6.6.3)] - (502, BAD_GATEWAY, "Bad Gateway"); - /// 503 Service Unavailable - /// [[RFC7231, Section 6.6.4](https://tools.ietf.org/html/rfc7231#section-6.6.4)] - (503, SERVICE_UNAVAILABLE, "Service Unavailable"); - /// 504 Gateway Timeout - /// [[RFC7231, Section 6.6.5](https://tools.ietf.org/html/rfc7231#section-6.6.5)] - (504, GATEWAY_TIMEOUT, "Gateway Timeout"); - /// 505 HTTP Version Not Supported - /// [[RFC7231, Section 6.6.6](https://tools.ietf.org/html/rfc7231#section-6.6.6)] - (505, HTTP_VERSION_NOT_SUPPORTED, "HTTP Version Not Supported"); - /// 506 Variant Also Negotiates - /// [[RFC2295](https://tools.ietf.org/html/rfc2295)] - (506, VARIANT_ALSO_NEGOTIATES, "Variant Also Negotiates"); - /// 507 Insufficient Storage - /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] - (507, INSUFFICIENT_STORAGE, "Insufficient Storage"); - /// 508 Loop Detected - /// [[RFC5842](https://tools.ietf.org/html/rfc5842)] - (508, LOOP_DETECTED, "Loop Detected"); - - /// 510 Not Extended - /// [[RFC2774](https://tools.ietf.org/html/rfc2774)] - (510, NOT_EXTENDED, "Not Extended"); - /// 511 Network Authentication Required - /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] - (511, NETWORK_AUTHENTICATION_REQUIRED, "Network Authentication Required"); -} - -impl InvalidStatusCode { - fn new() -> InvalidStatusCode { - InvalidStatusCode { - _priv: (), - } - } -} - -impl fmt::Debug for InvalidStatusCode { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("InvalidStatusCode") - // skip _priv noise - .finish() - } -} - -impl fmt::Display for InvalidStatusCode { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("invalid status code") - } -} - -impl Error for InvalidStatusCode {} - -// A string of packed 3-ASCII-digit status code values for the supported range -// of [100, 999] (900 codes, 2700 bytes). -const CODE_DIGITS: &'static str = "\ -100101102103104105106107108109110111112113114115116117118119\ -120121122123124125126127128129130131132133134135136137138139\ -140141142143144145146147148149150151152153154155156157158159\ -160161162163164165166167168169170171172173174175176177178179\ -180181182183184185186187188189190191192193194195196197198199\ -200201202203204205206207208209210211212213214215216217218219\ -220221222223224225226227228229230231232233234235236237238239\ -240241242243244245246247248249250251252253254255256257258259\ -260261262263264265266267268269270271272273274275276277278279\ -280281282283284285286287288289290291292293294295296297298299\ -300301302303304305306307308309310311312313314315316317318319\ -320321322323324325326327328329330331332333334335336337338339\ -340341342343344345346347348349350351352353354355356357358359\ -360361362363364365366367368369370371372373374375376377378379\ -380381382383384385386387388389390391392393394395396397398399\ -400401402403404405406407408409410411412413414415416417418419\ -420421422423424425426427428429430431432433434435436437438439\ -440441442443444445446447448449450451452453454455456457458459\ -460461462463464465466467468469470471472473474475476477478479\ -480481482483484485486487488489490491492493494495496497498499\ -500501502503504505506507508509510511512513514515516517518519\ -520521522523524525526527528529530531532533534535536537538539\ -540541542543544545546547548549550551552553554555556557558559\ -560561562563564565566567568569570571572573574575576577578579\ -580581582583584585586587588589590591592593594595596597598599\ -600601602603604605606607608609610611612613614615616617618619\ -620621622623624625626627628629630631632633634635636637638639\ -640641642643644645646647648649650651652653654655656657658659\ -660661662663664665666667668669670671672673674675676677678679\ -680681682683684685686687688689690691692693694695696697698699\ -700701702703704705706707708709710711712713714715716717718719\ -720721722723724725726727728729730731732733734735736737738739\ -740741742743744745746747748749750751752753754755756757758759\ -760761762763764765766767768769770771772773774775776777778779\ -780781782783784785786787788789790791792793794795796797798799\ -800801802803804805806807808809810811812813814815816817818819\ -820821822823824825826827828829830831832833834835836837838839\ -840841842843844845846847848849850851852853854855856857858859\ -860861862863864865866867868869870871872873874875876877878879\ -880881882883884885886887888889890891892893894895896897898899\ -900901902903904905906907908909910911912913914915916917918919\ -920921922923924925926927928929930931932933934935936937938939\ -940941942943944945946947948949950951952953954955956957958959\ -960961962963964965966967968969970971972973974975976977978979\ -980981982983984985986987988989990991992993994995996997998999"; diff -Nru s390-tools-2.31.0/rust-vendor/http/src/uri/authority.rs s390-tools-2.33.1/rust-vendor/http/src/uri/authority.rs --- s390-tools-2.31.0/rust-vendor/http/src/uri/authority.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/uri/authority.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,684 +0,0 @@ -use std::convert::TryFrom; -use std::hash::{Hash, Hasher}; -use std::str::FromStr; -use std::{cmp, fmt, str}; - -use bytes::Bytes; - -use super::{ErrorKind, InvalidUri, Port, URI_CHARS}; -use crate::byte_str::ByteStr; - -/// Represents the authority component of a URI. -#[derive(Clone)] -pub struct Authority { - pub(super) data: ByteStr, -} - -impl Authority { - pub(super) fn empty() -> Self { - Authority { - data: ByteStr::new(), - } - } - - // Not public while `bytes` is unstable. - pub(super) fn from_shared(s: Bytes) -> Result { - // Precondition on create_authority: trivially satisfied by the - // identity clousre - create_authority(s, |s| s) - } - - /// Attempt to convert an `Authority` from a static string. - /// - /// This function will not perform any copying, and the string will be - /// checked if it is empty or contains an invalid character. - /// - /// # Panics - /// - /// This function panics if the argument contains invalid characters or - /// is empty. - /// - /// # Examples - /// - /// ``` - /// # use http::uri::Authority; - /// let authority = Authority::from_static("example.com"); - /// assert_eq!(authority.host(), "example.com"); - /// ``` - pub fn from_static(src: &'static str) -> Self { - Authority::from_shared(Bytes::from_static(src.as_bytes())) - .expect("static str is not valid authority") - } - - /// Attempt to convert a `Bytes` buffer to a `Authority`. - /// - /// This will try to prevent a copy if the type passed is the type used - /// internally, and will copy the data if it is not. - pub fn from_maybe_shared(src: T) -> Result - where - T: AsRef<[u8]> + 'static, - { - if_downcast_into!(T, Bytes, src, { - return Authority::from_shared(src); - }); - - Authority::try_from(src.as_ref()) - } - - // Note: this may return an *empty* Authority. You might want `parse_non_empty`. - // Postcondition: for all Ok() returns, s[..ret.unwrap()] is valid UTF-8 where - // ret is the return value. - pub(super) fn parse(s: &[u8]) -> Result { - let mut colon_cnt = 0u32; - let mut start_bracket = false; - let mut end_bracket = false; - let mut has_percent = false; - let mut end = s.len(); - let mut at_sign_pos = None; - const MAX_COLONS: u32 = 8; // e.g., [FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80 - - // Among other things, this loop checks that every byte in s up to the - // first '/', '?', or '#' is a valid URI character (or in some contexts, - // a '%'). This means that each such byte is a valid single-byte UTF-8 - // code point. - for (i, &b) in s.iter().enumerate() { - match URI_CHARS[b as usize] { - b'/' | b'?' | b'#' => { - end = i; - break; - } - b':' => { - if colon_cnt >= MAX_COLONS { - return Err(ErrorKind::InvalidAuthority.into()); - } - colon_cnt += 1; - } - b'[' => { - if has_percent || start_bracket { - // Something other than the userinfo has a `%`, so reject it. - return Err(ErrorKind::InvalidAuthority.into()); - } - start_bracket = true; - } - b']' => { - if (!start_bracket) || end_bracket { - return Err(ErrorKind::InvalidAuthority.into()); - } - end_bracket = true; - - // Those were part of an IPv6 hostname, so forget them... - colon_cnt = 0; - has_percent = false; - } - b'@' => { - at_sign_pos = Some(i); - - // Those weren't a port colon, but part of the - // userinfo, so it needs to be forgotten. - colon_cnt = 0; - has_percent = false; - } - 0 if b == b'%' => { - // Per https://tools.ietf.org/html/rfc3986#section-3.2.1 and - // https://url.spec.whatwg.org/#authority-state - // the userinfo can have a percent-encoded username and password, - // so record that a `%` was found. If this turns out to be - // part of the userinfo, this flag will be cleared. - // Also per https://tools.ietf.org/html/rfc6874, percent-encoding can - // be used to indicate a zone identifier. - // If the flag hasn't been cleared at the end, that means this - // was part of the hostname (and not part of an IPv6 address), and - // will fail with an error. - has_percent = true; - } - 0 => { - return Err(ErrorKind::InvalidUriChar.into()); - } - _ => {} - } - } - - if start_bracket ^ end_bracket { - return Err(ErrorKind::InvalidAuthority.into()); - } - - if colon_cnt > 1 { - // Things like 'localhost:8080:3030' are rejected. - return Err(ErrorKind::InvalidAuthority.into()); - } - - if end > 0 && at_sign_pos == Some(end - 1) { - // If there's nothing after an `@`, this is bonkers. - return Err(ErrorKind::InvalidAuthority.into()); - } - - if has_percent { - // Something after the userinfo has a `%`, so reject it. - return Err(ErrorKind::InvalidAuthority.into()); - } - - Ok(end) - } - - // Parse bytes as an Authority, not allowing an empty string. - // - // This should be used by functions that allow a user to parse - // an `Authority` by itself. - // - // Postcondition: for all Ok() returns, s[..ret.unwrap()] is valid UTF-8 where - // ret is the return value. - fn parse_non_empty(s: &[u8]) -> Result { - if s.is_empty() { - return Err(ErrorKind::Empty.into()); - } - Authority::parse(s) - } - - /// Get the host of this `Authority`. - /// - /// The host subcomponent of authority is identified by an IP literal - /// encapsulated within square brackets, an IPv4 address in dotted- decimal - /// form, or a registered name. The host subcomponent is **case-insensitive**. - /// - /// ```notrust - /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 - /// |---------| - /// | - /// host - /// ``` - /// - /// # Examples - /// - /// ``` - /// # use http::uri::*; - /// let authority: Authority = "example.org:80".parse().unwrap(); - /// - /// assert_eq!(authority.host(), "example.org"); - /// ``` - #[inline] - pub fn host(&self) -> &str { - host(self.as_str()) - } - - /// Get the port part of this `Authority`. - /// - /// The port subcomponent of authority is designated by an optional port - /// number following the host and delimited from it by a single colon (":") - /// character. It can be turned into a decimal port number with the `as_u16` - /// method or as a `str` with the `as_str` method. - /// - /// ```notrust - /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 - /// |-| - /// | - /// port - /// ``` - /// - /// # Examples - /// - /// Authority with port - /// - /// ``` - /// # use http::uri::Authority; - /// let authority: Authority = "example.org:80".parse().unwrap(); - /// - /// let port = authority.port().unwrap(); - /// assert_eq!(port.as_u16(), 80); - /// assert_eq!(port.as_str(), "80"); - /// ``` - /// - /// Authority without port - /// - /// ``` - /// # use http::uri::Authority; - /// let authority: Authority = "example.org".parse().unwrap(); - /// - /// assert!(authority.port().is_none()); - /// ``` - pub fn port(&self) -> Option> { - let bytes = self.as_str(); - bytes - .rfind(":") - .and_then(|i| Port::from_str(&bytes[i + 1..]).ok()) - } - - /// Get the port of this `Authority` as a `u16`. - /// - /// # Example - /// - /// ``` - /// # use http::uri::Authority; - /// let authority: Authority = "example.org:80".parse().unwrap(); - /// - /// assert_eq!(authority.port_u16(), Some(80)); - /// ``` - pub fn port_u16(&self) -> Option { - self.port().and_then(|p| Some(p.as_u16())) - } - - /// Return a str representation of the authority - #[inline] - pub fn as_str(&self) -> &str { - &self.data[..] - } -} - -// Purposefully not public while `bytes` is unstable. -// impl TryFrom for Authority - -impl AsRef for Authority { - fn as_ref(&self) -> &str { - self.as_str() - } -} - -impl PartialEq for Authority { - fn eq(&self, other: &Authority) -> bool { - self.data.eq_ignore_ascii_case(&other.data) - } -} - -impl Eq for Authority {} - -/// Case-insensitive equality -/// -/// # Examples -/// -/// ``` -/// # use http::uri::Authority; -/// let authority: Authority = "HELLO.com".parse().unwrap(); -/// assert_eq!(authority, "hello.coM"); -/// assert_eq!("hello.com", authority); -/// ``` -impl PartialEq for Authority { - fn eq(&self, other: &str) -> bool { - self.data.eq_ignore_ascii_case(other) - } -} - -impl PartialEq for str { - fn eq(&self, other: &Authority) -> bool { - self.eq_ignore_ascii_case(other.as_str()) - } -} - -impl<'a> PartialEq for &'a str { - fn eq(&self, other: &Authority) -> bool { - self.eq_ignore_ascii_case(other.as_str()) - } -} - -impl<'a> PartialEq<&'a str> for Authority { - fn eq(&self, other: &&'a str) -> bool { - self.data.eq_ignore_ascii_case(other) - } -} - -impl PartialEq for Authority { - fn eq(&self, other: &String) -> bool { - self.data.eq_ignore_ascii_case(other.as_str()) - } -} - -impl PartialEq for String { - fn eq(&self, other: &Authority) -> bool { - self.as_str().eq_ignore_ascii_case(other.as_str()) - } -} - -/// Case-insensitive ordering -/// -/// # Examples -/// -/// ``` -/// # use http::uri::Authority; -/// let authority: Authority = "DEF.com".parse().unwrap(); -/// assert!(authority < "ghi.com"); -/// assert!(authority > "abc.com"); -/// ``` -impl PartialOrd for Authority { - fn partial_cmp(&self, other: &Authority) -> Option { - let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - left.partial_cmp(right) - } -} - -impl PartialOrd for Authority { - fn partial_cmp(&self, other: &str) -> Option { - let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - let right = other.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - left.partial_cmp(right) - } -} - -impl PartialOrd for str { - fn partial_cmp(&self, other: &Authority) -> Option { - let left = self.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - left.partial_cmp(right) - } -} - -impl<'a> PartialOrd for &'a str { - fn partial_cmp(&self, other: &Authority) -> Option { - let left = self.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - left.partial_cmp(right) - } -} - -impl<'a> PartialOrd<&'a str> for Authority { - fn partial_cmp(&self, other: &&'a str) -> Option { - let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - let right = other.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - left.partial_cmp(right) - } -} - -impl PartialOrd for Authority { - fn partial_cmp(&self, other: &String) -> Option { - let left = self.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - let right = other.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - left.partial_cmp(right) - } -} - -impl PartialOrd for String { - fn partial_cmp(&self, other: &Authority) -> Option { - let left = self.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - let right = other.data.as_bytes().iter().map(|b| b.to_ascii_lowercase()); - left.partial_cmp(right) - } -} - -/// Case-insensitive hashing -/// -/// # Examples -/// -/// ``` -/// # use http::uri::Authority; -/// # use std::hash::{Hash, Hasher}; -/// # use std::collections::hash_map::DefaultHasher; -/// -/// let a: Authority = "HELLO.com".parse().unwrap(); -/// let b: Authority = "hello.coM".parse().unwrap(); -/// -/// let mut s = DefaultHasher::new(); -/// a.hash(&mut s); -/// let a = s.finish(); -/// -/// let mut s = DefaultHasher::new(); -/// b.hash(&mut s); -/// let b = s.finish(); -/// -/// assert_eq!(a, b); -/// ``` -impl Hash for Authority { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - self.data.len().hash(state); - for &b in self.data.as_bytes() { - state.write_u8(b.to_ascii_lowercase()); - } - } -} - -impl<'a> TryFrom<&'a [u8]> for Authority { - type Error = InvalidUri; - #[inline] - fn try_from(s: &'a [u8]) -> Result { - // parse first, and only turn into Bytes if valid - - // Preconditon on create_authority: copy_from_slice() copies all of - // bytes from the [u8] parameter into a new Bytes - create_authority(s, |s| Bytes::copy_from_slice(s)) - } -} - -impl<'a> TryFrom<&'a str> for Authority { - type Error = InvalidUri; - #[inline] - fn try_from(s: &'a str) -> Result { - TryFrom::try_from(s.as_bytes()) - } -} - -impl TryFrom> for Authority { - type Error = InvalidUri; - - #[inline] - fn try_from(vec: Vec) -> Result { - Authority::from_shared(vec.into()) - } -} - -impl TryFrom for Authority { - type Error = InvalidUri; - - #[inline] - fn try_from(t: String) -> Result { - Authority::from_shared(t.into()) - } -} - -impl FromStr for Authority { - type Err = InvalidUri; - - fn from_str(s: &str) -> Result { - TryFrom::try_from(s) - } -} - -impl fmt::Debug for Authority { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.as_str()) - } -} - -impl fmt::Display for Authority { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.as_str()) - } -} - -fn host(auth: &str) -> &str { - let host_port = auth - .rsplitn(2, '@') - .next() - .expect("split always has at least 1 item"); - - if host_port.as_bytes()[0] == b'[' { - let i = host_port - .find(']') - .expect("parsing should validate brackets"); - // ..= ranges aren't available in 1.20, our minimum Rust version... - &host_port[0..i + 1] - } else { - host_port - .split(':') - .next() - .expect("split always has at least 1 item") - } -} - -// Precondition: f converts all of the bytes in the passed in B into the -// returned Bytes. -fn create_authority(b: B, f: F) -> Result -where - B: AsRef<[u8]>, - F: FnOnce(B) -> Bytes, -{ - let s = b.as_ref(); - let authority_end = Authority::parse_non_empty(s)?; - - if authority_end != s.len() { - return Err(ErrorKind::InvalidUriChar.into()); - } - - let bytes = f(b); - - Ok(Authority { - // Safety: the postcondition on parse_non_empty() and the check against - // s.len() ensure that b is valid UTF-8. The precondition on f ensures - // that this is carried through to bytes. - data: unsafe { ByteStr::from_utf8_unchecked(bytes) }, - }) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_empty_string_is_error() { - let err = Authority::parse_non_empty(b"").unwrap_err(); - assert_eq!(err.0, ErrorKind::Empty); - } - - #[test] - fn equal_to_self_of_same_authority() { - let authority1: Authority = "example.com".parse().unwrap(); - let authority2: Authority = "EXAMPLE.COM".parse().unwrap(); - assert_eq!(authority1, authority2); - assert_eq!(authority2, authority1); - } - - #[test] - fn not_equal_to_self_of_different_authority() { - let authority1: Authority = "example.com".parse().unwrap(); - let authority2: Authority = "test.com".parse().unwrap(); - assert_ne!(authority1, authority2); - assert_ne!(authority2, authority1); - } - - #[test] - fn equates_with_a_str() { - let authority: Authority = "example.com".parse().unwrap(); - assert_eq!(&authority, "EXAMPLE.com"); - assert_eq!("EXAMPLE.com", &authority); - assert_eq!(authority, "EXAMPLE.com"); - assert_eq!("EXAMPLE.com", authority); - } - - #[test] - fn from_static_equates_with_a_str() { - let authority = Authority::from_static("example.com"); - assert_eq!(authority, "example.com"); - } - - #[test] - fn not_equal_with_a_str_of_a_different_authority() { - let authority: Authority = "example.com".parse().unwrap(); - assert_ne!(&authority, "test.com"); - assert_ne!("test.com", &authority); - assert_ne!(authority, "test.com"); - assert_ne!("test.com", authority); - } - - #[test] - fn equates_with_a_string() { - let authority: Authority = "example.com".parse().unwrap(); - assert_eq!(authority, "EXAMPLE.com".to_string()); - assert_eq!("EXAMPLE.com".to_string(), authority); - } - - #[test] - fn equates_with_a_string_of_a_different_authority() { - let authority: Authority = "example.com".parse().unwrap(); - assert_ne!(authority, "test.com".to_string()); - assert_ne!("test.com".to_string(), authority); - } - - #[test] - fn compares_to_self() { - let authority1: Authority = "abc.com".parse().unwrap(); - let authority2: Authority = "def.com".parse().unwrap(); - assert!(authority1 < authority2); - assert!(authority2 > authority1); - } - - #[test] - fn compares_with_a_str() { - let authority: Authority = "def.com".parse().unwrap(); - // with ref - assert!(&authority < "ghi.com"); - assert!("ghi.com" > &authority); - assert!(&authority > "abc.com"); - assert!("abc.com" < &authority); - - // no ref - assert!(authority < "ghi.com"); - assert!("ghi.com" > authority); - assert!(authority > "abc.com"); - assert!("abc.com" < authority); - } - - #[test] - fn compares_with_a_string() { - let authority: Authority = "def.com".parse().unwrap(); - assert!(authority < "ghi.com".to_string()); - assert!("ghi.com".to_string() > authority); - assert!(authority > "abc.com".to_string()); - assert!("abc.com".to_string() < authority); - } - - #[test] - fn allows_percent_in_userinfo() { - let authority_str = "a%2f:b%2f@example.com"; - let authority: Authority = authority_str.parse().unwrap(); - assert_eq!(authority, authority_str); - } - - #[test] - fn rejects_percent_in_hostname() { - let err = Authority::parse_non_empty(b"example%2f.com").unwrap_err(); - assert_eq!(err.0, ErrorKind::InvalidAuthority); - - let err = Authority::parse_non_empty(b"a%2f:b%2f@example%2f.com").unwrap_err(); - assert_eq!(err.0, ErrorKind::InvalidAuthority); - } - - #[test] - fn allows_percent_in_ipv6_address() { - let authority_str = "[fe80::1:2:3:4%25eth0]"; - let result: Authority = authority_str.parse().unwrap(); - assert_eq!(result, authority_str); - } - - #[test] - fn reject_obviously_invalid_ipv6_address() { - let err = Authority::parse_non_empty(b"[0:1:2:3:4:5:6:7:8:9:10:11:12:13:14]").unwrap_err(); - assert_eq!(err.0, ErrorKind::InvalidAuthority); - } - - #[test] - fn rejects_percent_outside_ipv6_address() { - let err = Authority::parse_non_empty(b"1234%20[fe80::1:2:3:4]").unwrap_err(); - assert_eq!(err.0, ErrorKind::InvalidAuthority); - - let err = Authority::parse_non_empty(b"[fe80::1:2:3:4]%20").unwrap_err(); - assert_eq!(err.0, ErrorKind::InvalidAuthority); - } - - #[test] - fn rejects_invalid_utf8() { - let err = Authority::try_from([0xc0u8].as_ref()).unwrap_err(); - assert_eq!(err.0, ErrorKind::InvalidUriChar); - - let err = Authority::from_shared(Bytes::from_static([0xc0u8].as_ref())).unwrap_err(); - assert_eq!(err.0, ErrorKind::InvalidUriChar); - } - - #[test] - fn rejects_invalid_use_of_brackets() { - let err = Authority::parse_non_empty(b"[]@[").unwrap_err(); - assert_eq!(err.0, ErrorKind::InvalidAuthority); - - // reject tie-fighter - let err = Authority::parse_non_empty(b"]o[").unwrap_err(); - assert_eq!(err.0, ErrorKind::InvalidAuthority); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/uri/builder.rs s390-tools-2.33.1/rust-vendor/http/src/uri/builder.rs --- s390-tools-2.31.0/rust-vendor/http/src/uri/builder.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/uri/builder.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,197 +0,0 @@ -use std::convert::{TryFrom, TryInto}; - -use super::{Authority, Parts, PathAndQuery, Scheme}; -use crate::Uri; - -/// A builder for `Uri`s. -/// -/// This type can be used to construct an instance of `Uri` -/// through a builder pattern. -#[derive(Debug)] -pub struct Builder { - parts: Result, -} - -impl Builder { - /// Creates a new default instance of `Builder` to construct a `Uri`. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let uri = uri::Builder::new() - /// .scheme("https") - /// .authority("hyper.rs") - /// .path_and_query("/") - /// .build() - /// .unwrap(); - /// ``` - #[inline] - pub fn new() -> Builder { - Builder::default() - } - - /// Set the `Scheme` for this URI. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let mut builder = uri::Builder::new(); - /// builder.scheme("https"); - /// ``` - pub fn scheme(self, scheme: T) -> Self - where - Scheme: TryFrom, - >::Error: Into, - { - self.map(move |mut parts| { - let scheme = scheme.try_into().map_err(Into::into)?; - parts.scheme = Some(scheme); - Ok(parts) - }) - } - - /// Set the `Authority` for this URI. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let uri = uri::Builder::new() - /// .authority("tokio.rs") - /// .build() - /// .unwrap(); - /// ``` - pub fn authority(self, auth: T) -> Self - where - Authority: TryFrom, - >::Error: Into, - { - self.map(move |mut parts| { - let auth = auth.try_into().map_err(Into::into)?; - parts.authority = Some(auth); - Ok(parts) - }) - } - - /// Set the `PathAndQuery` for this URI. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let uri = uri::Builder::new() - /// .path_and_query("/hello?foo=bar") - /// .build() - /// .unwrap(); - /// ``` - pub fn path_and_query(self, p_and_q: T) -> Self - where - PathAndQuery: TryFrom, - >::Error: Into, - { - self.map(move |mut parts| { - let p_and_q = p_and_q.try_into().map_err(Into::into)?; - parts.path_and_query = Some(p_and_q); - Ok(parts) - }) - } - - /// Consumes this builder, and tries to construct a valid `Uri` from - /// the configured pieces. - /// - /// # Errors - /// - /// This function may return an error if any previously configured argument - /// failed to parse or get converted to the internal representation. For - /// example if an invalid `scheme` was specified via `scheme("!@#%/^")` - /// the error will be returned when this function is called rather than - /// when `scheme` was called. - /// - /// Additionally, the various forms of URI require certain combinations of - /// parts to be set to be valid. If the parts don't fit into any of the - /// valid forms of URI, a new error is returned. - /// - /// # Examples - /// - /// ``` - /// # use http::*; - /// - /// let uri = Uri::builder() - /// .build() - /// .unwrap(); - /// ``` - pub fn build(self) -> Result { - let parts = self.parts?; - Uri::from_parts(parts).map_err(Into::into) - } - - // private - - fn map(self, func: F) -> Self - where - F: FnOnce(Parts) -> Result, - { - - Builder { - parts: self.parts.and_then(func), - } - } -} - -impl Default for Builder { - #[inline] - fn default() -> Builder { - Builder { - parts: Ok(Parts::default()), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn build_from_str() { - let uri = Builder::new() - .scheme(Scheme::HTTP) - .authority("hyper.rs") - .path_and_query("/foo?a=1") - .build() - .unwrap(); - assert_eq!(uri.scheme_str(), Some("http")); - assert_eq!(uri.authority().unwrap().host(), "hyper.rs"); - assert_eq!(uri.path(), "/foo"); - assert_eq!(uri.query(), Some("a=1")); - } - - #[test] - fn build_from_string() { - for i in 1..10 { - let uri = Builder::new() - .path_and_query(format!("/foo?a={}", i)) - .build() - .unwrap(); - let expected_query = format!("a={}", i); - assert_eq!(uri.path(), "/foo"); - assert_eq!(uri.query(), Some(expected_query.as_str())); - } - } - - #[test] - fn build_from_string_ref() { - for i in 1..10 { - let p_a_q = format!("/foo?a={}", i); - let uri = Builder::new().path_and_query(&p_a_q).build().unwrap(); - let expected_query = format!("a={}", i); - assert_eq!(uri.path(), "/foo"); - assert_eq!(uri.query(), Some(expected_query.as_str())); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/uri/mod.rs s390-tools-2.33.1/rust-vendor/http/src/uri/mod.rs --- s390-tools-2.31.0/rust-vendor/http/src/uri/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/uri/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1118 +0,0 @@ -//! URI component of request and response lines -//! -//! This module primarily contains the `Uri` type which is a component of all -//! HTTP requests and also reexports this type at the root of the crate. A URI -//! is not always a "full URL" in the sense of something you'd type into a web -//! browser, but HTTP requests may only have paths on servers but may have full -//! schemes and hostnames on clients. -//! -//! # Examples -//! -//! ``` -//! use http::Uri; -//! -//! let uri = "/foo/bar?baz".parse::().unwrap(); -//! assert_eq!(uri.path(), "/foo/bar"); -//! assert_eq!(uri.query(), Some("baz")); -//! assert_eq!(uri.host(), None); -//! -//! let uri = "https://www.rust-lang.org/install.html".parse::().unwrap(); -//! assert_eq!(uri.scheme_str(), Some("https")); -//! assert_eq!(uri.host(), Some("www.rust-lang.org")); -//! assert_eq!(uri.path(), "/install.html"); -//! ``` - -use crate::byte_str::ByteStr; -use std::convert::TryFrom; - -use bytes::Bytes; - -use std::error::Error; -use std::hash::{Hash, Hasher}; -use std::str::{self, FromStr}; -use std::{fmt, u16, u8}; - -use self::scheme::Scheme2; - -pub use self::authority::Authority; -pub use self::builder::Builder; -pub use self::path::PathAndQuery; -pub use self::port::Port; -pub use self::scheme::Scheme; - -mod authority; -mod builder; -mod path; -mod port; -mod scheme; -#[cfg(test)] -mod tests; - -/// The URI component of a request. -/// -/// For HTTP 1, this is included as part of the request line. From Section 5.3, -/// Request Target: -/// -/// > Once an inbound connection is obtained, the client sends an HTTP -/// > request message (Section 3) with a request-target derived from the -/// > target URI. There are four distinct formats for the request-target, -/// > depending on both the method being requested and whether the request -/// > is to a proxy. -/// > -/// > ```notrust -/// > request-target = origin-form -/// > / absolute-form -/// > / authority-form -/// > / asterisk-form -/// > ``` -/// -/// The URI is structured as follows: -/// -/// ```notrust -/// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 -/// |-| |-------------------------------||--------| |-------------------| |-----| -/// | | | | | -/// scheme authority path query fragment -/// ``` -/// -/// For HTTP 2.0, the URI is encoded using pseudoheaders. -/// -/// # Examples -/// -/// ``` -/// use http::Uri; -/// -/// let uri = "/foo/bar?baz".parse::().unwrap(); -/// assert_eq!(uri.path(), "/foo/bar"); -/// assert_eq!(uri.query(), Some("baz")); -/// assert_eq!(uri.host(), None); -/// -/// let uri = "https://www.rust-lang.org/install.html".parse::().unwrap(); -/// assert_eq!(uri.scheme_str(), Some("https")); -/// assert_eq!(uri.host(), Some("www.rust-lang.org")); -/// assert_eq!(uri.path(), "/install.html"); -/// ``` -#[derive(Clone)] -pub struct Uri { - scheme: Scheme, - authority: Authority, - path_and_query: PathAndQuery, -} - -/// The various parts of a URI. -/// -/// This struct is used to provide to and retrieve from a URI. -#[derive(Debug, Default)] -pub struct Parts { - /// The scheme component of a URI - pub scheme: Option, - - /// The authority component of a URI - pub authority: Option, - - /// The origin-form component of a URI - pub path_and_query: Option, - - /// Allow extending in the future - _priv: (), -} - -/// An error resulting from a failed attempt to construct a URI. -#[derive(Debug)] -pub struct InvalidUri(ErrorKind); - -/// An error resulting from a failed attempt to construct a URI. -#[derive(Debug)] -pub struct InvalidUriParts(InvalidUri); - -#[derive(Debug, Eq, PartialEq)] -enum ErrorKind { - InvalidUriChar, - InvalidScheme, - InvalidAuthority, - InvalidPort, - InvalidFormat, - SchemeMissing, - AuthorityMissing, - PathAndQueryMissing, - TooLong, - Empty, - SchemeTooLong, -} - -// u16::MAX is reserved for None -const MAX_LEN: usize = (u16::MAX - 1) as usize; - -// URI_CHARS is a table of valid characters in a URI. An entry in the table is -// 0 for invalid characters. For valid characters the entry is itself (i.e. -// the entry for 33 is b'!' because b'!' == 33u8). An important characteristic -// of this table is that all entries above 127 are invalid. This makes all of the -// valid entries a valid single-byte UTF-8 code point. This means that a slice -// of such valid entries is valid UTF-8. -const URI_CHARS: [u8; 256] = [ - // 0 1 2 3 4 5 6 7 8 9 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x - 0, 0, 0, b'!', 0, b'#', b'$', 0, b'&', b'\'', // 3x - b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', b'0', b'1', // 4x - b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', b';', // 5x - 0, b'=', 0, b'?', b'@', b'A', b'B', b'C', b'D', b'E', // 6x - b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', // 7x - b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', // 8x - b'Z', b'[', 0, b']', 0, b'_', 0, b'a', b'b', b'c', // 9x - b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x - b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x - b'x', b'y', b'z', 0, 0, 0, b'~', 0, 0, 0, // 12x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x - 0, 0, 0, 0, 0, 0 // 25x -]; - -impl Uri { - /// Creates a new builder-style object to manufacture a `Uri`. - /// - /// This method returns an instance of `Builder` which can be usd to - /// create a `Uri`. - /// - /// # Examples - /// - /// ``` - /// use http::Uri; - /// - /// let uri = Uri::builder() - /// .scheme("https") - /// .authority("hyper.rs") - /// .path_and_query("/") - /// .build() - /// .unwrap(); - /// ``` - pub fn builder() -> Builder { - Builder::new() - } - - /// Attempt to convert a `Parts` into a `Uri`. - /// - /// # Examples - /// - /// Relative URI - /// - /// ``` - /// # use http::uri::*; - /// let mut parts = Parts::default(); - /// parts.path_and_query = Some("/foo".parse().unwrap()); - /// - /// let uri = Uri::from_parts(parts).unwrap(); - /// - /// assert_eq!(uri.path(), "/foo"); - /// - /// assert!(uri.scheme().is_none()); - /// assert!(uri.authority().is_none()); - /// ``` - /// - /// Absolute URI - /// - /// ``` - /// # use http::uri::*; - /// let mut parts = Parts::default(); - /// parts.scheme = Some("http".parse().unwrap()); - /// parts.authority = Some("foo.com".parse().unwrap()); - /// parts.path_and_query = Some("/foo".parse().unwrap()); - /// - /// let uri = Uri::from_parts(parts).unwrap(); - /// - /// assert_eq!(uri.scheme().unwrap().as_str(), "http"); - /// assert_eq!(uri.authority().unwrap(), "foo.com"); - /// assert_eq!(uri.path(), "/foo"); - /// ``` - pub fn from_parts(src: Parts) -> Result { - if src.scheme.is_some() { - if src.authority.is_none() { - return Err(ErrorKind::AuthorityMissing.into()); - } - - if src.path_and_query.is_none() { - return Err(ErrorKind::PathAndQueryMissing.into()); - } - } else { - if src.authority.is_some() && src.path_and_query.is_some() { - return Err(ErrorKind::SchemeMissing.into()); - } - } - - let scheme = match src.scheme { - Some(scheme) => scheme, - None => Scheme { - inner: Scheme2::None, - }, - }; - - let authority = match src.authority { - Some(authority) => authority, - None => Authority::empty(), - }; - - let path_and_query = match src.path_and_query { - Some(path_and_query) => path_and_query, - None => PathAndQuery::empty(), - }; - - Ok(Uri { - scheme: scheme, - authority: authority, - path_and_query: path_and_query, - }) - } - - /// Attempt to convert a `Bytes` buffer to a `Uri`. - /// - /// This will try to prevent a copy if the type passed is the type used - /// internally, and will copy the data if it is not. - pub fn from_maybe_shared(src: T) -> Result - where - T: AsRef<[u8]> + 'static, - { - if_downcast_into!(T, Bytes, src, { - return Uri::from_shared(src); - }); - - Uri::try_from(src.as_ref()) - } - - // Not public while `bytes` is unstable. - fn from_shared(s: Bytes) -> Result { - use self::ErrorKind::*; - - if s.len() > MAX_LEN { - return Err(TooLong.into()); - } - - match s.len() { - 0 => { - return Err(Empty.into()); - } - 1 => match s[0] { - b'/' => { - return Ok(Uri { - scheme: Scheme::empty(), - authority: Authority::empty(), - path_and_query: PathAndQuery::slash(), - }); - } - b'*' => { - return Ok(Uri { - scheme: Scheme::empty(), - authority: Authority::empty(), - path_and_query: PathAndQuery::star(), - }); - } - _ => { - let authority = Authority::from_shared(s)?; - - return Ok(Uri { - scheme: Scheme::empty(), - authority: authority, - path_and_query: PathAndQuery::empty(), - }); - } - }, - _ => {} - } - - if s[0] == b'/' { - return Ok(Uri { - scheme: Scheme::empty(), - authority: Authority::empty(), - path_and_query: PathAndQuery::from_shared(s)?, - }); - } - - parse_full(s) - } - - /// Convert a `Uri` from a static string. - /// - /// This function will not perform any copying, however the string is - /// checked to ensure that it is valid. - /// - /// # Panics - /// - /// This function panics if the argument is an invalid URI. - /// - /// # Examples - /// - /// ``` - /// # use http::uri::Uri; - /// let uri = Uri::from_static("http://example.com/foo"); - /// - /// assert_eq!(uri.host().unwrap(), "example.com"); - /// assert_eq!(uri.path(), "/foo"); - /// ``` - pub fn from_static(src: &'static str) -> Self { - let s = Bytes::from_static(src.as_bytes()); - match Uri::from_shared(s) { - Ok(uri) => uri, - Err(e) => panic!("static str is not valid URI: {}", e), - } - } - - /// Convert a `Uri` into `Parts`. - /// - /// # Note - /// - /// This is just an inherent method providing the same functionality as - /// `let parts: Parts = uri.into()` - /// - /// # Examples - /// - /// ``` - /// # use http::uri::*; - /// let uri: Uri = "/foo".parse().unwrap(); - /// - /// let parts = uri.into_parts(); - /// - /// assert_eq!(parts.path_and_query.unwrap(), "/foo"); - /// - /// assert!(parts.scheme.is_none()); - /// assert!(parts.authority.is_none()); - /// ``` - #[inline] - pub fn into_parts(self) -> Parts { - self.into() - } - - /// Returns the path & query components of the Uri - #[inline] - pub fn path_and_query(&self) -> Option<&PathAndQuery> { - if !self.scheme.inner.is_none() || self.authority.data.is_empty() { - Some(&self.path_and_query) - } else { - None - } - } - - /// Get the path of this `Uri`. - /// - /// Both relative and absolute URIs contain a path component, though it - /// might be the empty string. The path component is **case sensitive**. - /// - /// ```notrust - /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 - /// |--------| - /// | - /// path - /// ``` - /// - /// If the URI is `*` then the path component is equal to `*`. - /// - /// # Examples - /// - /// A relative URI - /// - /// ``` - /// # use http::Uri; - /// - /// let uri: Uri = "/hello/world".parse().unwrap(); - /// - /// assert_eq!(uri.path(), "/hello/world"); - /// ``` - /// - /// An absolute URI - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "http://example.org/hello/world".parse().unwrap(); - /// - /// assert_eq!(uri.path(), "/hello/world"); - /// ``` - #[inline] - pub fn path(&self) -> &str { - if self.has_path() { - self.path_and_query.path() - } else { - "" - } - } - - /// Get the scheme of this `Uri`. - /// - /// The URI scheme refers to a specification for assigning identifiers - /// within that scheme. Only absolute URIs contain a scheme component, but - /// not all absolute URIs will contain a scheme component. Although scheme - /// names are case-insensitive, the canonical form is lowercase. - /// - /// ```notrust - /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 - /// |-| - /// | - /// scheme - /// ``` - /// - /// # Examples - /// - /// Absolute URI - /// - /// ``` - /// use http::uri::{Scheme, Uri}; - /// - /// let uri: Uri = "http://example.org/hello/world".parse().unwrap(); - /// - /// assert_eq!(uri.scheme(), Some(&Scheme::HTTP)); - /// ``` - /// - /// - /// Relative URI - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "/hello/world".parse().unwrap(); - /// - /// assert!(uri.scheme().is_none()); - /// ``` - #[inline] - pub fn scheme(&self) -> Option<&Scheme> { - if self.scheme.inner.is_none() { - None - } else { - Some(&self.scheme) - } - } - - /// Get the scheme of this `Uri` as a `&str`. - /// - /// # Example - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "http://example.org/hello/world".parse().unwrap(); - /// - /// assert_eq!(uri.scheme_str(), Some("http")); - /// ``` - #[inline] - pub fn scheme_str(&self) -> Option<&str> { - if self.scheme.inner.is_none() { - None - } else { - Some(self.scheme.as_str()) - } - } - - /// Get the authority of this `Uri`. - /// - /// The authority is a hierarchical element for naming authority such that - /// the remainder of the URI is delegated to that authority. For HTTP, the - /// authority consists of the host and port. The host portion of the - /// authority is **case-insensitive**. - /// - /// The authority also includes a `username:password` component, however - /// the use of this is deprecated and should be avoided. - /// - /// ```notrust - /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 - /// |-------------------------------| - /// | - /// authority - /// ``` - /// - /// # Examples - /// - /// Absolute URI - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "http://example.org:80/hello/world".parse().unwrap(); - /// - /// assert_eq!(uri.authority().map(|a| a.as_str()), Some("example.org:80")); - /// ``` - /// - /// - /// Relative URI - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "/hello/world".parse().unwrap(); - /// - /// assert!(uri.authority().is_none()); - /// ``` - #[inline] - pub fn authority(&self) -> Option<&Authority> { - if self.authority.data.is_empty() { - None - } else { - Some(&self.authority) - } - } - - /// Get the host of this `Uri`. - /// - /// The host subcomponent of authority is identified by an IP literal - /// encapsulated within square brackets, an IPv4 address in dotted- decimal - /// form, or a registered name. The host subcomponent is **case-insensitive**. - /// - /// ```notrust - /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 - /// |---------| - /// | - /// host - /// ``` - /// - /// # Examples - /// - /// Absolute URI - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "http://example.org:80/hello/world".parse().unwrap(); - /// - /// assert_eq!(uri.host(), Some("example.org")); - /// ``` - /// - /// - /// Relative URI - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "/hello/world".parse().unwrap(); - /// - /// assert!(uri.host().is_none()); - /// ``` - #[inline] - pub fn host(&self) -> Option<&str> { - self.authority().map(|a| a.host()) - } - - /// Get the port part of this `Uri`. - /// - /// The port subcomponent of authority is designated by an optional port - /// number following the host and delimited from it by a single colon (":") - /// character. It can be turned into a decimal port number with the `as_u16` - /// method or as a `str` with the `as_str` method. - /// - /// ```notrust - /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 - /// |-| - /// | - /// port - /// ``` - /// - /// # Examples - /// - /// Absolute URI with port - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "http://example.org:80/hello/world".parse().unwrap(); - /// - /// let port = uri.port().unwrap(); - /// assert_eq!(port.as_u16(), 80); - /// ``` - /// - /// Absolute URI without port - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "http://example.org/hello/world".parse().unwrap(); - /// - /// assert!(uri.port().is_none()); - /// ``` - /// - /// Relative URI - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "/hello/world".parse().unwrap(); - /// - /// assert!(uri.port().is_none()); - /// ``` - pub fn port(&self) -> Option> { - self.authority().and_then(|a| a.port()) - } - - /// Get the port of this `Uri` as a `u16`. - /// - /// - /// # Example - /// - /// ``` - /// # use http::{Uri, uri::Port}; - /// let uri: Uri = "http://example.org:80/hello/world".parse().unwrap(); - /// - /// assert_eq!(uri.port_u16(), Some(80)); - /// ``` - pub fn port_u16(&self) -> Option { - self.port().and_then(|p| Some(p.as_u16())) - } - - /// Get the query string of this `Uri`, starting after the `?`. - /// - /// The query component contains non-hierarchical data that, along with data - /// in the path component, serves to identify a resource within the scope of - /// the URI's scheme and naming authority (if any). The query component is - /// indicated by the first question mark ("?") character and terminated by a - /// number sign ("#") character or by the end of the URI. - /// - /// ```notrust - /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 - /// |-------------------| - /// | - /// query - /// ``` - /// - /// # Examples - /// - /// Absolute URI - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "http://example.org/hello/world?key=value".parse().unwrap(); - /// - /// assert_eq!(uri.query(), Some("key=value")); - /// ``` - /// - /// Relative URI with a query string component - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "/hello/world?key=value&foo=bar".parse().unwrap(); - /// - /// assert_eq!(uri.query(), Some("key=value&foo=bar")); - /// ``` - /// - /// Relative URI without a query string component - /// - /// ``` - /// # use http::Uri; - /// let uri: Uri = "/hello/world".parse().unwrap(); - /// - /// assert!(uri.query().is_none()); - /// ``` - #[inline] - pub fn query(&self) -> Option<&str> { - self.path_and_query.query() - } - - fn has_path(&self) -> bool { - !self.path_and_query.data.is_empty() || !self.scheme.inner.is_none() - } -} - -impl<'a> TryFrom<&'a [u8]> for Uri { - type Error = InvalidUri; - - #[inline] - fn try_from(t: &'a [u8]) -> Result { - Uri::from_shared(Bytes::copy_from_slice(t)) - } -} - -impl<'a> TryFrom<&'a str> for Uri { - type Error = InvalidUri; - - #[inline] - fn try_from(t: &'a str) -> Result { - t.parse() - } -} - -impl<'a> TryFrom<&'a String> for Uri { - type Error = InvalidUri; - - #[inline] - fn try_from(t: &'a String) -> Result { - t.parse() - } -} - -impl TryFrom for Uri { - type Error = InvalidUri; - - #[inline] - fn try_from(t: String) -> Result { - Uri::from_shared(Bytes::from(t)) - } -} - -impl<'a> TryFrom> for Uri { - type Error = InvalidUri; - - #[inline] - fn try_from(vec: Vec) -> Result { - Uri::from_shared(Bytes::from(vec)) - } -} - -impl TryFrom for Uri { - type Error = InvalidUriParts; - - #[inline] - fn try_from(src: Parts) -> Result { - Uri::from_parts(src) - } -} - -impl<'a> TryFrom<&'a Uri> for Uri { - type Error = crate::Error; - - #[inline] - fn try_from(src: &'a Uri) -> Result { - Ok(src.clone()) - } -} - -/// Convert an `Authority` into a `Uri`. -impl From for Uri { - fn from(authority: Authority) -> Self { - Self { - scheme: Scheme::empty(), - authority, - path_and_query: PathAndQuery::empty(), - } - } -} - -/// Convert a `PathAndQuery` into a `Uri`. -impl From for Uri { - fn from(path_and_query: PathAndQuery) -> Self { - Self { - scheme: Scheme::empty(), - authority: Authority::empty(), - path_and_query, - } - } -} - -/// Convert a `Uri` into `Parts` -impl From for Parts { - fn from(src: Uri) -> Self { - let path_and_query = if src.has_path() { - Some(src.path_and_query) - } else { - None - }; - - let scheme = match src.scheme.inner { - Scheme2::None => None, - _ => Some(src.scheme), - }; - - let authority = if src.authority.data.is_empty() { - None - } else { - Some(src.authority) - }; - - Parts { - scheme: scheme, - authority: authority, - path_and_query: path_and_query, - _priv: (), - } - } -} - -fn parse_full(mut s: Bytes) -> Result { - // Parse the scheme - let scheme = match Scheme2::parse(&s[..])? { - Scheme2::None => Scheme2::None, - Scheme2::Standard(p) => { - // TODO: use truncate - let _ = s.split_to(p.len() + 3); - Scheme2::Standard(p) - } - Scheme2::Other(n) => { - // Grab the protocol - let mut scheme = s.split_to(n + 3); - - // Strip ://, TODO: truncate - let _ = scheme.split_off(n); - - // Allocate the ByteStr - let val = unsafe { ByteStr::from_utf8_unchecked(scheme) }; - - Scheme2::Other(Box::new(val)) - } - }; - - // Find the end of the authority. The scheme will already have been - // extracted. - let authority_end = Authority::parse(&s[..])?; - - if scheme.is_none() { - if authority_end != s.len() { - return Err(ErrorKind::InvalidFormat.into()); - } - - let authority = Authority { - data: unsafe { ByteStr::from_utf8_unchecked(s) }, - }; - - return Ok(Uri { - scheme: scheme.into(), - authority: authority, - path_and_query: PathAndQuery::empty(), - }); - } - - // Authority is required when absolute - if authority_end == 0 { - return Err(ErrorKind::InvalidFormat.into()); - } - - let authority = s.split_to(authority_end); - let authority = Authority { - data: unsafe { ByteStr::from_utf8_unchecked(authority) }, - }; - - Ok(Uri { - scheme: scheme.into(), - authority: authority, - path_and_query: PathAndQuery::from_shared(s)?, - }) -} - -impl FromStr for Uri { - type Err = InvalidUri; - - #[inline] - fn from_str(s: &str) -> Result { - Uri::try_from(s.as_bytes()) - } -} - -impl PartialEq for Uri { - fn eq(&self, other: &Uri) -> bool { - if self.scheme() != other.scheme() { - return false; - } - - if self.authority() != other.authority() { - return false; - } - - if self.path() != other.path() { - return false; - } - - if self.query() != other.query() { - return false; - } - - true - } -} - -impl PartialEq for Uri { - fn eq(&self, other: &str) -> bool { - let mut other = other.as_bytes(); - let mut absolute = false; - - if let Some(scheme) = self.scheme() { - let scheme = scheme.as_str().as_bytes(); - absolute = true; - - if other.len() < scheme.len() + 3 { - return false; - } - - if !scheme.eq_ignore_ascii_case(&other[..scheme.len()]) { - return false; - } - - other = &other[scheme.len()..]; - - if &other[..3] != b"://" { - return false; - } - - other = &other[3..]; - } - - if let Some(auth) = self.authority() { - let len = auth.data.len(); - absolute = true; - - if other.len() < len { - return false; - } - - if !auth.data.as_bytes().eq_ignore_ascii_case(&other[..len]) { - return false; - } - - other = &other[len..]; - } - - let path = self.path(); - - if other.len() < path.len() || path.as_bytes() != &other[..path.len()] { - if absolute && path == "/" { - // PathAndQuery can be omitted, fall through - } else { - return false; - } - } else { - other = &other[path.len()..]; - } - - if let Some(query) = self.query() { - if other.len() == 0 { - return query.len() == 0; - } - - if other[0] != b'?' { - return false; - } - - other = &other[1..]; - - if other.len() < query.len() { - return false; - } - - if query.as_bytes() != &other[..query.len()] { - return false; - } - - other = &other[query.len()..]; - } - - other.is_empty() || other[0] == b'#' - } -} - -impl PartialEq for str { - fn eq(&self, uri: &Uri) -> bool { - uri == self - } -} - -impl<'a> PartialEq<&'a str> for Uri { - fn eq(&self, other: &&'a str) -> bool { - self == *other - } -} - -impl<'a> PartialEq for &'a str { - fn eq(&self, uri: &Uri) -> bool { - uri == *self - } -} - -impl Eq for Uri {} - -/// Returns a `Uri` representing `/` -impl Default for Uri { - #[inline] - fn default() -> Uri { - Uri { - scheme: Scheme::empty(), - authority: Authority::empty(), - path_and_query: PathAndQuery::slash(), - } - } -} - -impl fmt::Display for Uri { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(scheme) = self.scheme() { - write!(f, "{}://", scheme)?; - } - - if let Some(authority) = self.authority() { - write!(f, "{}", authority)?; - } - - write!(f, "{}", self.path())?; - - if let Some(query) = self.query() { - write!(f, "?{}", query)?; - } - - Ok(()) - } -} - -impl fmt::Debug for Uri { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl From for InvalidUri { - fn from(src: ErrorKind) -> InvalidUri { - InvalidUri(src) - } -} - -impl From for InvalidUriParts { - fn from(src: ErrorKind) -> InvalidUriParts { - InvalidUriParts(src.into()) - } -} - -impl InvalidUri { - fn s(&self) -> &str { - match self.0 { - ErrorKind::InvalidUriChar => "invalid uri character", - ErrorKind::InvalidScheme => "invalid scheme", - ErrorKind::InvalidAuthority => "invalid authority", - ErrorKind::InvalidPort => "invalid port", - ErrorKind::InvalidFormat => "invalid format", - ErrorKind::SchemeMissing => "scheme missing", - ErrorKind::AuthorityMissing => "authority missing", - ErrorKind::PathAndQueryMissing => "path missing", - ErrorKind::TooLong => "uri too long", - ErrorKind::Empty => "empty string", - ErrorKind::SchemeTooLong => "scheme too long", - } - } -} - -impl fmt::Display for InvalidUri { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.s().fmt(f) - } -} - -impl Error for InvalidUri {} - -impl fmt::Display for InvalidUriParts { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -impl Error for InvalidUriParts {} - -impl Hash for Uri { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - if !self.scheme.inner.is_none() { - self.scheme.hash(state); - state.write_u8(0xff); - } - - if let Some(auth) = self.authority() { - auth.hash(state); - } - - Hash::hash_slice(self.path().as_bytes(), state); - - if let Some(query) = self.query() { - b'?'.hash(state); - Hash::hash_slice(query.as_bytes(), state); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/uri/path.rs s390-tools-2.33.1/rust-vendor/http/src/uri/path.rs --- s390-tools-2.31.0/rust-vendor/http/src/uri/path.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/uri/path.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,564 +0,0 @@ -use std::convert::TryFrom; -use std::str::FromStr; -use std::{cmp, fmt, hash, str}; - -use bytes::Bytes; - -use super::{ErrorKind, InvalidUri}; -use crate::byte_str::ByteStr; - -/// Represents the path component of a URI -#[derive(Clone)] -pub struct PathAndQuery { - pub(super) data: ByteStr, - pub(super) query: u16, -} - -const NONE: u16 = ::std::u16::MAX; - -impl PathAndQuery { - // Not public while `bytes` is unstable. - pub(super) fn from_shared(mut src: Bytes) -> Result { - let mut query = NONE; - let mut fragment = None; - - // block for iterator borrow - { - let mut iter = src.as_ref().iter().enumerate(); - - // path ... - for (i, &b) in &mut iter { - // See https://url.spec.whatwg.org/#path-state - match b { - b'?' => { - debug_assert_eq!(query, NONE); - query = i as u16; - break; - } - b'#' => { - fragment = Some(i); - break; - } - - // This is the range of bytes that don't need to be - // percent-encoded in the path. If it should have been - // percent-encoded, then error. - 0x21 | - 0x24..=0x3B | - 0x3D | - 0x40..=0x5F | - 0x61..=0x7A | - 0x7C | - 0x7E => {}, - - // These are code points that are supposed to be - // percent-encoded in the path but there are clients - // out there sending them as is and httparse accepts - // to parse those requests, so they are allowed here - // for parity. - // - // For reference, those are code points that are used - // to send requests with JSON directly embedded in - // the URI path. Yes, those things happen for real. - b'"' | - b'{' | b'}' => {}, - - _ => return Err(ErrorKind::InvalidUriChar.into()), - } - } - - // query ... - if query != NONE { - for (i, &b) in iter { - match b { - // While queries *should* be percent-encoded, most - // bytes are actually allowed... - // See https://url.spec.whatwg.org/#query-state - // - // Allowed: 0x21 / 0x24 - 0x3B / 0x3D / 0x3F - 0x7E - 0x21 | - 0x24..=0x3B | - 0x3D | - 0x3F..=0x7E => {}, - - b'#' => { - fragment = Some(i); - break; - } - - _ => return Err(ErrorKind::InvalidUriChar.into()), - } - } - } - } - - if let Some(i) = fragment { - src.truncate(i); - } - - Ok(PathAndQuery { - data: unsafe { ByteStr::from_utf8_unchecked(src) }, - query: query, - }) - } - - /// Convert a `PathAndQuery` from a static string. - /// - /// This function will not perform any copying, however the string is - /// checked to ensure that it is valid. - /// - /// # Panics - /// - /// This function panics if the argument is an invalid path and query. - /// - /// # Examples - /// - /// ``` - /// # use http::uri::*; - /// let v = PathAndQuery::from_static("/hello?world"); - /// - /// assert_eq!(v.path(), "/hello"); - /// assert_eq!(v.query(), Some("world")); - /// ``` - #[inline] - pub fn from_static(src: &'static str) -> Self { - let src = Bytes::from_static(src.as_bytes()); - - PathAndQuery::from_shared(src).unwrap() - } - - /// Attempt to convert a `Bytes` buffer to a `PathAndQuery`. - /// - /// This will try to prevent a copy if the type passed is the type used - /// internally, and will copy the data if it is not. - pub fn from_maybe_shared(src: T) -> Result - where - T: AsRef<[u8]> + 'static, - { - if_downcast_into!(T, Bytes, src, { - return PathAndQuery::from_shared(src); - }); - - PathAndQuery::try_from(src.as_ref()) - } - - pub(super) fn empty() -> Self { - PathAndQuery { - data: ByteStr::new(), - query: NONE, - } - } - - pub(super) fn slash() -> Self { - PathAndQuery { - data: ByteStr::from_static("/"), - query: NONE, - } - } - - pub(super) fn star() -> Self { - PathAndQuery { - data: ByteStr::from_static("*"), - query: NONE, - } - } - - /// Returns the path component - /// - /// The path component is **case sensitive**. - /// - /// ```notrust - /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 - /// |--------| - /// | - /// path - /// ``` - /// - /// If the URI is `*` then the path component is equal to `*`. - /// - /// # Examples - /// - /// ``` - /// # use http::uri::*; - /// - /// let path_and_query: PathAndQuery = "/hello/world".parse().unwrap(); - /// - /// assert_eq!(path_and_query.path(), "/hello/world"); - /// ``` - #[inline] - pub fn path(&self) -> &str { - let ret = if self.query == NONE { - &self.data[..] - } else { - &self.data[..self.query as usize] - }; - - if ret.is_empty() { - return "/"; - } - - ret - } - - /// Returns the query string component - /// - /// The query component contains non-hierarchical data that, along with data - /// in the path component, serves to identify a resource within the scope of - /// the URI's scheme and naming authority (if any). The query component is - /// indicated by the first question mark ("?") character and terminated by a - /// number sign ("#") character or by the end of the URI. - /// - /// ```notrust - /// abc://username:password@example.com:123/path/data?key=value&key2=value2#fragid1 - /// |-------------------| - /// | - /// query - /// ``` - /// - /// # Examples - /// - /// With a query string component - /// - /// ``` - /// # use http::uri::*; - /// let path_and_query: PathAndQuery = "/hello/world?key=value&foo=bar".parse().unwrap(); - /// - /// assert_eq!(path_and_query.query(), Some("key=value&foo=bar")); - /// ``` - /// - /// Without a query string component - /// - /// ``` - /// # use http::uri::*; - /// let path_and_query: PathAndQuery = "/hello/world".parse().unwrap(); - /// - /// assert!(path_and_query.query().is_none()); - /// ``` - #[inline] - pub fn query(&self) -> Option<&str> { - if self.query == NONE { - None - } else { - let i = self.query + 1; - Some(&self.data[i as usize..]) - } - } - - /// Returns the path and query as a string component. - /// - /// # Examples - /// - /// With a query string component - /// - /// ``` - /// # use http::uri::*; - /// let path_and_query: PathAndQuery = "/hello/world?key=value&foo=bar".parse().unwrap(); - /// - /// assert_eq!(path_and_query.as_str(), "/hello/world?key=value&foo=bar"); - /// ``` - /// - /// Without a query string component - /// - /// ``` - /// # use http::uri::*; - /// let path_and_query: PathAndQuery = "/hello/world".parse().unwrap(); - /// - /// assert_eq!(path_and_query.as_str(), "/hello/world"); - /// ``` - #[inline] - pub fn as_str(&self) -> &str { - let ret = &self.data[..]; - if ret.is_empty() { - return "/"; - } - ret - } -} - -impl<'a> TryFrom<&'a [u8]> for PathAndQuery { - type Error = InvalidUri; - #[inline] - fn try_from(s: &'a [u8]) -> Result { - PathAndQuery::from_shared(Bytes::copy_from_slice(s)) - } -} - -impl<'a> TryFrom<&'a str> for PathAndQuery { - type Error = InvalidUri; - #[inline] - fn try_from(s: &'a str) -> Result { - TryFrom::try_from(s.as_bytes()) - } -} - -impl<'a> TryFrom> for PathAndQuery { - type Error = InvalidUri; - #[inline] - fn try_from(vec: Vec) -> Result { - PathAndQuery::from_shared(vec.into()) - } -} - -impl TryFrom for PathAndQuery { - type Error = InvalidUri; - #[inline] - fn try_from(s: String) -> Result { - PathAndQuery::from_shared(s.into()) - } -} - -impl TryFrom<&String> for PathAndQuery { - type Error = InvalidUri; - #[inline] - fn try_from(s: &String) -> Result { - TryFrom::try_from(s.as_bytes()) - } -} - -impl FromStr for PathAndQuery { - type Err = InvalidUri; - #[inline] - fn from_str(s: &str) -> Result { - TryFrom::try_from(s) - } -} - -impl fmt::Debug for PathAndQuery { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for PathAndQuery { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - if !self.data.is_empty() { - match self.data.as_bytes()[0] { - b'/' | b'*' => write!(fmt, "{}", &self.data[..]), - _ => write!(fmt, "/{}", &self.data[..]), - } - } else { - write!(fmt, "/") - } - } -} - -impl hash::Hash for PathAndQuery { - fn hash(&self, state: &mut H) { - self.data.hash(state); - } -} - -// ===== PartialEq / PartialOrd ===== - -impl PartialEq for PathAndQuery { - #[inline] - fn eq(&self, other: &PathAndQuery) -> bool { - self.data == other.data - } -} - -impl Eq for PathAndQuery {} - -impl PartialEq for PathAndQuery { - #[inline] - fn eq(&self, other: &str) -> bool { - self.as_str() == other - } -} - -impl<'a> PartialEq for &'a str { - #[inline] - fn eq(&self, other: &PathAndQuery) -> bool { - self == &other.as_str() - } -} - -impl<'a> PartialEq<&'a str> for PathAndQuery { - #[inline] - fn eq(&self, other: &&'a str) -> bool { - self.as_str() == *other - } -} - -impl PartialEq for str { - #[inline] - fn eq(&self, other: &PathAndQuery) -> bool { - self == other.as_str() - } -} - -impl PartialEq for PathAndQuery { - #[inline] - fn eq(&self, other: &String) -> bool { - self.as_str() == other.as_str() - } -} - -impl PartialEq for String { - #[inline] - fn eq(&self, other: &PathAndQuery) -> bool { - self.as_str() == other.as_str() - } -} - -impl PartialOrd for PathAndQuery { - #[inline] - fn partial_cmp(&self, other: &PathAndQuery) -> Option { - self.as_str().partial_cmp(other.as_str()) - } -} - -impl PartialOrd for PathAndQuery { - #[inline] - fn partial_cmp(&self, other: &str) -> Option { - self.as_str().partial_cmp(other) - } -} - -impl PartialOrd for str { - #[inline] - fn partial_cmp(&self, other: &PathAndQuery) -> Option { - self.partial_cmp(other.as_str()) - } -} - -impl<'a> PartialOrd<&'a str> for PathAndQuery { - #[inline] - fn partial_cmp(&self, other: &&'a str) -> Option { - self.as_str().partial_cmp(*other) - } -} - -impl<'a> PartialOrd for &'a str { - #[inline] - fn partial_cmp(&self, other: &PathAndQuery) -> Option { - self.partial_cmp(&other.as_str()) - } -} - -impl PartialOrd for PathAndQuery { - #[inline] - fn partial_cmp(&self, other: &String) -> Option { - self.as_str().partial_cmp(other.as_str()) - } -} - -impl PartialOrd for String { - #[inline] - fn partial_cmp(&self, other: &PathAndQuery) -> Option { - self.as_str().partial_cmp(other.as_str()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn equal_to_self_of_same_path() { - let p1: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); - let p2: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); - assert_eq!(p1, p2); - assert_eq!(p2, p1); - } - - #[test] - fn not_equal_to_self_of_different_path() { - let p1: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); - let p2: PathAndQuery = "/world&foo=bar".parse().unwrap(); - assert_ne!(p1, p2); - assert_ne!(p2, p1); - } - - #[test] - fn equates_with_a_str() { - let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); - assert_eq!(&path_and_query, "/hello/world&foo=bar"); - assert_eq!("/hello/world&foo=bar", &path_and_query); - assert_eq!(path_and_query, "/hello/world&foo=bar"); - assert_eq!("/hello/world&foo=bar", path_and_query); - } - - #[test] - fn not_equal_with_a_str_of_a_different_path() { - let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); - // as a reference - assert_ne!(&path_and_query, "/hello&foo=bar"); - assert_ne!("/hello&foo=bar", &path_and_query); - // without reference - assert_ne!(path_and_query, "/hello&foo=bar"); - assert_ne!("/hello&foo=bar", path_and_query); - } - - #[test] - fn equates_with_a_string() { - let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); - assert_eq!(path_and_query, "/hello/world&foo=bar".to_string()); - assert_eq!("/hello/world&foo=bar".to_string(), path_and_query); - } - - #[test] - fn not_equal_with_a_string_of_a_different_path() { - let path_and_query: PathAndQuery = "/hello/world&foo=bar".parse().unwrap(); - assert_ne!(path_and_query, "/hello&foo=bar".to_string()); - assert_ne!("/hello&foo=bar".to_string(), path_and_query); - } - - #[test] - fn compares_to_self() { - let p1: PathAndQuery = "/a/world&foo=bar".parse().unwrap(); - let p2: PathAndQuery = "/b/world&foo=bar".parse().unwrap(); - assert!(p1 < p2); - assert!(p2 > p1); - } - - #[test] - fn compares_with_a_str() { - let path_and_query: PathAndQuery = "/b/world&foo=bar".parse().unwrap(); - // by ref - assert!(&path_and_query < "/c/world&foo=bar"); - assert!("/c/world&foo=bar" > &path_and_query); - assert!(&path_and_query > "/a/world&foo=bar"); - assert!("/a/world&foo=bar" < &path_and_query); - - // by val - assert!(path_and_query < "/c/world&foo=bar"); - assert!("/c/world&foo=bar" > path_and_query); - assert!(path_and_query > "/a/world&foo=bar"); - assert!("/a/world&foo=bar" < path_and_query); - } - - #[test] - fn compares_with_a_string() { - let path_and_query: PathAndQuery = "/b/world&foo=bar".parse().unwrap(); - assert!(path_and_query < "/c/world&foo=bar".to_string()); - assert!("/c/world&foo=bar".to_string() > path_and_query); - assert!(path_and_query > "/a/world&foo=bar".to_string()); - assert!("/a/world&foo=bar".to_string() < path_and_query); - } - - #[test] - fn ignores_valid_percent_encodings() { - assert_eq!("/a%20b", pq("/a%20b?r=1").path()); - assert_eq!("qr=%31", pq("/a/b?qr=%31").query().unwrap()); - } - - #[test] - fn ignores_invalid_percent_encodings() { - assert_eq!("/a%%b", pq("/a%%b?r=1").path()); - assert_eq!("/aaa%", pq("/aaa%").path()); - assert_eq!("/aaa%", pq("/aaa%?r=1").path()); - assert_eq!("/aa%2", pq("/aa%2").path()); - assert_eq!("/aa%2", pq("/aa%2?r=1").path()); - assert_eq!("qr=%3", pq("/a/b?qr=%3").query().unwrap()); - } - - #[test] - fn json_is_fine() { - assert_eq!(r#"/{"bread":"baguette"}"#, pq(r#"/{"bread":"baguette"}"#).path()); - } - - fn pq(s: &str) -> PathAndQuery { - s.parse().expect(&format!("parsing {}", s)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/uri/port.rs s390-tools-2.33.1/rust-vendor/http/src/uri/port.rs --- s390-tools-2.31.0/rust-vendor/http/src/uri/port.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/uri/port.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,151 +0,0 @@ -use std::fmt; - -use super::{ErrorKind, InvalidUri}; - -/// The port component of a URI. -pub struct Port { - port: u16, - repr: T, -} - -impl Port { - /// Returns the port number as a `u16`. - /// - /// # Examples - /// - /// Port as `u16`. - /// - /// ``` - /// # use http::uri::Authority; - /// let authority: Authority = "example.org:80".parse().unwrap(); - /// - /// let port = authority.port().unwrap(); - /// assert_eq!(port.as_u16(), 80); - /// ``` - pub fn as_u16(&self) -> u16 { - self.port - } -} - -impl Port -where - T: AsRef, -{ - /// Converts a `str` to a port number. - /// - /// The supplied `str` must be a valid u16. - pub(crate) fn from_str(bytes: T) -> Result { - bytes - .as_ref() - .parse::() - .map(|port| Port { port, repr: bytes }) - .map_err(|_| ErrorKind::InvalidPort.into()) - } - - /// Returns the port number as a `str`. - /// - /// # Examples - /// - /// Port as `str`. - /// - /// ``` - /// # use http::uri::Authority; - /// let authority: Authority = "example.org:80".parse().unwrap(); - /// - /// let port = authority.port().unwrap(); - /// assert_eq!(port.as_str(), "80"); - /// ``` - pub fn as_str(&self) -> &str { - self.repr.as_ref() - } -} - -impl fmt::Debug for Port -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Port").field(&self.port).finish() - } -} - -impl fmt::Display for Port { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Use `u16::fmt` so that it respects any formatting flags that - // may have been set (like padding, align, etc). - fmt::Display::fmt(&self.port, f) - } -} - -impl From> for u16 { - fn from(port: Port) -> Self { - port.as_u16() - } -} - -impl AsRef for Port -where - T: AsRef, -{ - fn as_ref(&self) -> &str { - self.as_str() - } -} - -impl PartialEq> for Port { - fn eq(&self, other: &Port) -> bool { - self.port == other.port - } -} - -impl PartialEq for Port { - fn eq(&self, other: &u16) -> bool { - self.port == *other - } -} - -impl PartialEq> for u16 { - fn eq(&self, other: &Port) -> bool { - other.port == *self - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn partialeq_port() { - let port_a = Port::from_str("8080").unwrap(); - let port_b = Port::from_str("8080").unwrap(); - assert_eq!(port_a, port_b); - } - - #[test] - fn partialeq_port_different_reprs() { - let port_a = Port { - repr: "8081", - port: 8081, - }; - let port_b = Port { - repr: String::from("8081"), - port: 8081, - }; - assert_eq!(port_a, port_b); - assert_eq!(port_b, port_a); - } - - #[test] - fn partialeq_u16() { - let port = Port::from_str("8080").unwrap(); - // test equals in both directions - assert_eq!(port, 8080); - assert_eq!(8080, port); - } - - #[test] - fn u16_from_port() { - let port = Port::from_str("8080").unwrap(); - assert_eq!(8080, u16::from(port)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/uri/scheme.rs s390-tools-2.33.1/rust-vendor/http/src/uri/scheme.rs --- s390-tools-2.31.0/rust-vendor/http/src/uri/scheme.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/uri/scheme.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,363 +0,0 @@ -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::str::FromStr; - -use bytes::Bytes; - -use super::{ErrorKind, InvalidUri}; -use crate::byte_str::ByteStr; - -/// Represents the scheme component of a URI -#[derive(Clone)] -pub struct Scheme { - pub(super) inner: Scheme2, -} - -#[derive(Clone, Debug)] -pub(super) enum Scheme2> { - None, - Standard(Protocol), - Other(T), -} - -#[derive(Copy, Clone, Debug)] -pub(super) enum Protocol { - Http, - Https, -} - -impl Scheme { - /// HTTP protocol scheme - pub const HTTP: Scheme = Scheme { - inner: Scheme2::Standard(Protocol::Http), - }; - - /// HTTP protocol over TLS. - pub const HTTPS: Scheme = Scheme { - inner: Scheme2::Standard(Protocol::Https), - }; - - pub(super) fn empty() -> Self { - Scheme { - inner: Scheme2::None, - } - } - - /// Return a str representation of the scheme - /// - /// # Examples - /// - /// ``` - /// # use http::uri::*; - /// let scheme: Scheme = "http".parse().unwrap(); - /// assert_eq!(scheme.as_str(), "http"); - /// ``` - #[inline] - pub fn as_str(&self) -> &str { - use self::Protocol::*; - use self::Scheme2::*; - - match self.inner { - Standard(Http) => "http", - Standard(Https) => "https", - Other(ref v) => &v[..], - None => unreachable!(), - } - } -} - -impl<'a> TryFrom<&'a [u8]> for Scheme { - type Error = InvalidUri; - #[inline] - fn try_from(s: &'a [u8]) -> Result { - use self::Scheme2::*; - - match Scheme2::parse_exact(s)? { - None => Err(ErrorKind::InvalidScheme.into()), - Standard(p) => Ok(Standard(p).into()), - Other(_) => { - let bytes = Bytes::copy_from_slice(s); - - // Safety: postcondition on parse_exact() means that s and - // hence bytes are valid UTF-8. - let string = unsafe { ByteStr::from_utf8_unchecked(bytes) }; - - Ok(Other(Box::new(string)).into()) - } - } - } -} - -impl<'a> TryFrom<&'a str> for Scheme { - type Error = InvalidUri; - #[inline] - fn try_from(s: &'a str) -> Result { - TryFrom::try_from(s.as_bytes()) - } -} - -impl FromStr for Scheme { - type Err = InvalidUri; - - fn from_str(s: &str) -> Result { - TryFrom::try_from(s) - } -} - -impl fmt::Debug for Scheme { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self.as_str(), f) - } -} - -impl fmt::Display for Scheme { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.as_str()) - } -} - -impl AsRef for Scheme { - #[inline] - fn as_ref(&self) -> &str { - self.as_str() - } -} - -impl PartialEq for Scheme { - fn eq(&self, other: &Scheme) -> bool { - use self::Protocol::*; - use self::Scheme2::*; - - match (&self.inner, &other.inner) { - (&Standard(Http), &Standard(Http)) => true, - (&Standard(Https), &Standard(Https)) => true, - (&Other(ref a), &Other(ref b)) => a.eq_ignore_ascii_case(b), - (&None, _) | (_, &None) => unreachable!(), - _ => false, - } - } -} - -impl Eq for Scheme {} - -/// Case-insensitive equality -/// -/// # Examples -/// -/// ``` -/// # use http::uri::Scheme; -/// let scheme: Scheme = "HTTP".parse().unwrap(); -/// assert_eq!(scheme, *"http"); -/// ``` -impl PartialEq for Scheme { - fn eq(&self, other: &str) -> bool { - self.as_str().eq_ignore_ascii_case(other) - } -} - -/// Case-insensitive equality -impl PartialEq for str { - fn eq(&self, other: &Scheme) -> bool { - other == self - } -} - -/// Case-insensitive hashing -impl Hash for Scheme { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - match self.inner { - Scheme2::None => (), - Scheme2::Standard(Protocol::Http) => state.write_u8(1), - Scheme2::Standard(Protocol::Https) => state.write_u8(2), - Scheme2::Other(ref other) => { - other.len().hash(state); - for &b in other.as_bytes() { - state.write_u8(b.to_ascii_lowercase()); - } - } - } - } -} - -impl Scheme2 { - pub(super) fn is_none(&self) -> bool { - match *self { - Scheme2::None => true, - _ => false, - } - } -} - -// Require the scheme to not be too long in order to enable further -// optimizations later. -const MAX_SCHEME_LEN: usize = 64; - -// scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) -// -// SCHEME_CHARS is a table of valid characters in the scheme part of a URI. An -// entry in the table is 0 for invalid characters. For valid characters the -// entry is itself (i.e. the entry for 43 is b'+' because b'+' == 43u8). An -// important characteristic of this table is that all entries above 127 are -// invalid. This makes all of the valid entries a valid single-byte UTF-8 code -// point. This means that a slice of such valid entries is valid UTF-8. -const SCHEME_CHARS: [u8; 256] = [ - // 0 1 2 3 4 5 6 7 8 9 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 3x - 0, 0, 0, b'+', 0, b'-', b'.', 0, b'0', b'1', // 4x - b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', 0, // 5x - 0, 0, 0, 0, 0, b'A', b'B', b'C', b'D', b'E', // 6x - b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', // 7x - b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', // 8x - b'Z', 0, 0, 0, 0, 0, 0, b'a', b'b', b'c', // 9x - b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', // 10x - b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', // 11x - b'x', b'y', b'z', 0, 0, 0, b'~', 0, 0, 0, // 12x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 13x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 14x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 15x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 18x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 19x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 21x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 22x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 23x - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 24x - 0, 0, 0, 0, 0, 0 // 25x -]; - -impl Scheme2 { - // Postcondition: On all Ok() returns, s is valid UTF-8 - fn parse_exact(s: &[u8]) -> Result, InvalidUri> { - match s { - b"http" => Ok(Protocol::Http.into()), - b"https" => Ok(Protocol::Https.into()), - _ => { - if s.len() > MAX_SCHEME_LEN { - return Err(ErrorKind::SchemeTooLong.into()); - } - - // check that each byte in s is a SCHEME_CHARS which implies - // that it is a valid single byte UTF-8 code point. - for &b in s { - match SCHEME_CHARS[b as usize] { - b':' => { - // Don't want :// here - return Err(ErrorKind::InvalidScheme.into()); - } - 0 => { - return Err(ErrorKind::InvalidScheme.into()); - } - _ => {} - } - } - - Ok(Scheme2::Other(())) - } - } - } - - pub(super) fn parse(s: &[u8]) -> Result, InvalidUri> { - if s.len() >= 7 { - // Check for HTTP - if s[..7].eq_ignore_ascii_case(b"http://") { - // Prefix will be striped - return Ok(Protocol::Http.into()); - } - } - - if s.len() >= 8 { - // Check for HTTPs - if s[..8].eq_ignore_ascii_case(b"https://") { - return Ok(Protocol::Https.into()); - } - } - - if s.len() > 3 { - for i in 0..s.len() { - let b = s[i]; - - match SCHEME_CHARS[b as usize] { - b':' => { - // Not enough data remaining - if s.len() < i + 3 { - break; - } - - // Not a scheme - if &s[i + 1..i + 3] != b"//" { - break; - } - - if i > MAX_SCHEME_LEN { - return Err(ErrorKind::SchemeTooLong.into()); - } - - // Return scheme - return Ok(Scheme2::Other(i)); - } - // Invald scheme character, abort - 0 => break, - _ => {} - } - } - } - - Ok(Scheme2::None) - } -} - -impl Protocol { - pub(super) fn len(&self) -> usize { - match *self { - Protocol::Http => 4, - Protocol::Https => 5, - } - } -} - -impl From for Scheme2 { - fn from(src: Protocol) -> Self { - Scheme2::Standard(src) - } -} - -#[doc(hidden)] -impl From for Scheme { - fn from(src: Scheme2) -> Self { - Scheme { inner: src } - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn scheme_eq_to_str() { - assert_eq!(&scheme("http"), "http"); - assert_eq!(&scheme("https"), "https"); - assert_eq!(&scheme("ftp"), "ftp"); - assert_eq!(&scheme("my+funky+scheme"), "my+funky+scheme"); - } - - #[test] - fn invalid_scheme_is_error() { - Scheme::try_from("my_funky_scheme").expect_err("Unexpectly valid Scheme"); - - // Invalid UTF-8 - Scheme::try_from([0xC0].as_ref()).expect_err("Unexpectly valid Scheme"); - } - - fn scheme(s: &str) -> Scheme { - s.parse().expect(&format!("Invalid scheme: {}", s)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/uri/tests.rs s390-tools-2.33.1/rust-vendor/http/src/uri/tests.rs --- s390-tools-2.31.0/rust-vendor/http/src/uri/tests.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/uri/tests.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,519 +0,0 @@ -use std::str::FromStr; - -use super::{ErrorKind, InvalidUri, Port, Uri, URI_CHARS}; - -#[test] -fn test_char_table() { - for (i, &v) in URI_CHARS.iter().enumerate() { - if v != 0 { - assert_eq!(i, v as usize); - } - } -} - -macro_rules! part { - ($s:expr) => { - Some(&$s.parse().unwrap()) - }; -} - -macro_rules! test_parse { - ( - $test_name:ident, - $str:expr, - $alt:expr, - $($method:ident = $value:expr,)* - ) => ( - #[test] - fn $test_name() { - let orig_str = $str; - let uri = match Uri::from_str(orig_str) { - Ok(uri) => uri, - Err(err) => { - panic!("parse error {:?} from {:?}", err, orig_str); - }, - }; - $( - assert_eq!(uri.$method(), $value, "{}: uri = {:?}", stringify!($method), uri); - )+ - assert_eq!(uri, orig_str, "partial eq to original str"); - assert_eq!(uri, uri.clone(), "clones are equal"); - - let new_str = uri.to_string(); - let new_uri = Uri::from_str(&new_str).expect("to_string output parses again as a Uri"); - assert_eq!(new_uri, orig_str, "round trip still equals original str"); - - const ALT: &'static [&'static str] = &$alt; - - for &alt in ALT.iter() { - let other: Uri = alt.parse().unwrap(); - assert_eq!(uri, *alt); - assert_eq!(uri, other); - } - } - ); -} - -test_parse! { - test_uri_parse_path_and_query, - "/some/path/here?and=then&hello#and-bye", - [], - - scheme = None, - authority = None, - path = "/some/path/here", - query = Some("and=then&hello"), - host = None, -} - -test_parse! { - test_uri_parse_absolute_form, - "http://127.0.0.1:61761/chunks", - [], - - scheme = part!("http"), - authority = part!("127.0.0.1:61761"), - path = "/chunks", - query = None, - host = Some("127.0.0.1"), - port = Port::from_str("61761").ok(), -} - -test_parse! { - test_uri_parse_absolute_form_without_path, - "https://127.0.0.1:61761", - ["https://127.0.0.1:61761/"], - - scheme = part!("https"), - authority = part!("127.0.0.1:61761"), - path = "/", - query = None, - host = Some("127.0.0.1"), - port = Port::from_str("61761").ok(), -} - -test_parse! { - test_uri_parse_asterisk_form, - "*", - [], - - scheme = None, - authority = None, - path = "*", - query = None, - host = None, -} - -test_parse! { - test_uri_parse_authority_no_port, - "localhost", - ["LOCALHOST", "LocaLHOSt"], - - scheme = None, - authority = part!("localhost"), - path = "", - query = None, - port = None, - host = Some("localhost"), -} - -test_parse! { - test_uri_authority_only_one_character_issue_197, - "S", - [], - - scheme = None, - authority = part!("S"), - path = "", - query = None, - port = None, - host = Some("S"), -} - -test_parse! { - test_uri_parse_authority_form, - "localhost:3000", - ["localhosT:3000"], - - scheme = None, - authority = part!("localhost:3000"), - path = "", - query = None, - host = Some("localhost"), - port = Port::from_str("3000").ok(), -} - -test_parse! { - test_uri_parse_absolute_with_default_port_http, - "http://127.0.0.1:80", - ["http://127.0.0.1:80/"], - - scheme = part!("http"), - authority = part!("127.0.0.1:80"), - host = Some("127.0.0.1"), - path = "/", - query = None, - port = Port::from_str("80").ok(), -} - -test_parse! { - test_uri_parse_absolute_with_default_port_https, - "https://127.0.0.1:443", - ["https://127.0.0.1:443/"], - - scheme = part!("https"), - authority = part!("127.0.0.1:443"), - host = Some("127.0.0.1"), - path = "/", - query = None, - port = Port::from_str("443").ok(), -} - -test_parse! { - test_uri_parse_fragment_questionmark, - "http://127.0.0.1/#?", - [], - - scheme = part!("http"), - authority = part!("127.0.0.1"), - host = Some("127.0.0.1"), - path = "/", - query = None, - port = None, -} - -test_parse! { - test_uri_parse_path_with_terminating_questionmark, - "http://127.0.0.1/path?", - [], - - scheme = part!("http"), - authority = part!("127.0.0.1"), - path = "/path", - query = Some(""), - port = None, -} - -test_parse! { - test_uri_parse_absolute_form_with_empty_path_and_nonempty_query, - "http://127.0.0.1?foo=bar", - [], - - scheme = part!("http"), - authority = part!("127.0.0.1"), - path = "/", - query = Some("foo=bar"), - port = None, -} - -test_parse! { - test_uri_parse_absolute_form_with_empty_path_and_fragment_with_slash, - "http://127.0.0.1#foo/bar", - [], - - scheme = part!("http"), - authority = part!("127.0.0.1"), - path = "/", - query = None, - port = None, -} - -test_parse! { - test_uri_parse_absolute_form_with_empty_path_and_fragment_with_questionmark, - "http://127.0.0.1#foo?bar", - [], - - scheme = part!("http"), - authority = part!("127.0.0.1"), - path = "/", - query = None, - port = None, -} - -test_parse! { - test_uri_parse_long_host_with_no_scheme, - "thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost", - [], - - scheme = None, - authority = part!("thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost"), - path = "", - query = None, - port = None, -} - -test_parse! { - test_uri_parse_long_host_with_port_and_no_scheme, - "thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost:1234", - [], - - scheme = None, - authority = part!("thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost:1234"), - path = "", - query = None, - port = Port::from_str("1234").ok(), -} - -test_parse! { - test_userinfo1, - "http://a:b@127.0.0.1:1234/", - [], - - scheme = part!("http"), - authority = part!("a:b@127.0.0.1:1234"), - host = Some("127.0.0.1"), - path = "/", - query = None, - port = Port::from_str("1234").ok(), -} - -test_parse! { - test_userinfo2, - "http://a:b@127.0.0.1/", - [], - - scheme = part!("http"), - authority = part!("a:b@127.0.0.1"), - host = Some("127.0.0.1"), - path = "/", - query = None, - port = None, -} - -test_parse! { - test_userinfo3, - "http://a@127.0.0.1/", - [], - - scheme = part!("http"), - authority = part!("a@127.0.0.1"), - host = Some("127.0.0.1"), - path = "/", - query = None, - port = None, -} - -test_parse! { - test_userinfo_with_port, - "user@localhost:3000", - [], - - scheme = None, - authority = part!("user@localhost:3000"), - path = "", - query = None, - host = Some("localhost"), - port = Port::from_str("3000").ok(), -} - -test_parse! { - test_userinfo_pass_with_port, - "user:pass@localhost:3000", - [], - - scheme = None, - authority = part!("user:pass@localhost:3000"), - path = "", - query = None, - host = Some("localhost"), - port = Port::from_str("3000").ok(), -} - -test_parse! { - test_ipv6, - "http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]/", - [], - - scheme = part!("http"), - authority = part!("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]"), - host = Some("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]"), - path = "/", - query = None, - port = None, -} - -test_parse! { - test_ipv6_shorthand, - "http://[::1]/", - [], - - scheme = part!("http"), - authority = part!("[::1]"), - host = Some("[::1]"), - path = "/", - query = None, - port = None, -} - -test_parse! { - test_ipv6_shorthand2, - "http://[::]/", - [], - - scheme = part!("http"), - authority = part!("[::]"), - host = Some("[::]"), - path = "/", - query = None, - port = None, -} - -test_parse! { - test_ipv6_shorthand3, - "http://[2001:db8::2:1]/", - [], - - scheme = part!("http"), - authority = part!("[2001:db8::2:1]"), - host = Some("[2001:db8::2:1]"), - path = "/", - query = None, - port = None, -} - -test_parse! { - test_ipv6_with_port, - "http://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8008/", - [], - - scheme = part!("http"), - authority = part!("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8008"), - host = Some("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]"), - path = "/", - query = None, - port = Port::from_str("8008").ok(), -} - -test_parse! { - test_percentage_encoded_path, - "/echo/abcdefgh_i-j%20/abcdefg_i-j%20478", - [], - - scheme = None, - authority = None, - host = None, - path = "/echo/abcdefgh_i-j%20/abcdefg_i-j%20478", - query = None, - port = None, -} - -test_parse! { - test_path_permissive, - "/foo=bar|baz\\^~%", - [], - - path = "/foo=bar|baz\\^~%", -} - -test_parse! { - test_query_permissive, - "/?foo={bar|baz}\\^`", - [], - - query = Some("foo={bar|baz}\\^`"), -} - -#[test] -fn test_uri_parse_error() { - fn err(s: &str) { - Uri::from_str(s).unwrap_err(); - } - - err("http://"); - err("htt:p//host"); - err("hyper.rs/"); - err("hyper.rs?key=val"); - err("?key=val"); - err("localhost/"); - err("localhost?key=val"); - err("\0"); - err("http://[::1"); - err("http://::1]"); - err("localhost:8080:3030"); - err("@"); - err("http://username:password@/wut"); - - // illegal queries - err("/?foo\rbar"); - err("/?foo\nbar"); - err("/?<"); - err("/?>"); -} - -#[test] -fn test_max_uri_len() { - let mut uri = vec![]; - uri.extend(b"http://localhost/"); - uri.extend(vec![b'a'; 70 * 1024]); - - let uri = String::from_utf8(uri).unwrap(); - let res: Result = uri.parse(); - - assert_eq!(res.unwrap_err().0, ErrorKind::TooLong); -} - -#[test] -fn test_overflowing_scheme() { - let mut uri = vec![]; - uri.extend(vec![b'a'; 256]); - uri.extend(b"://localhost/"); - - let uri = String::from_utf8(uri).unwrap(); - let res: Result = uri.parse(); - - assert_eq!(res.unwrap_err().0, ErrorKind::SchemeTooLong); -} - -#[test] -fn test_max_length_scheme() { - let mut uri = vec![]; - uri.extend(vec![b'a'; 64]); - uri.extend(b"://localhost/"); - - let uri = String::from_utf8(uri).unwrap(); - let uri: Uri = uri.parse().unwrap(); - - assert_eq!(uri.scheme_str().unwrap().len(), 64); -} - -#[test] -fn test_uri_to_path_and_query() { - let cases = vec![ - ("/", "/"), - ("/foo?bar", "/foo?bar"), - ("/foo?bar#nope", "/foo?bar"), - ("http://hyper.rs", "/"), - ("http://hyper.rs/", "/"), - ("http://hyper.rs/path", "/path"), - ("http://hyper.rs?query", "/?query"), - ("*", "*"), - ]; - - for case in cases { - let uri = Uri::from_str(case.0).unwrap(); - let s = uri.path_and_query().unwrap().to_string(); - - assert_eq!(s, case.1); - } -} - -#[test] -fn test_authority_uri_parts_round_trip() { - let s = "hyper.rs"; - let uri = Uri::from_str(s).expect("first parse"); - assert_eq!(uri, s); - assert_eq!(uri.to_string(), s); - - let parts = uri.into_parts(); - let uri2 = Uri::from_parts(parts).expect("from_parts"); - assert_eq!(uri2, s); - assert_eq!(uri2.to_string(), s); -} - -#[test] -fn test_partial_eq_path_with_terminating_questionmark() { - let a = "/path"; - let uri = Uri::from_str("/path?").expect("first parse"); - - assert_eq!(uri, a); -} diff -Nru s390-tools-2.31.0/rust-vendor/http/src/version.rs s390-tools-2.33.1/rust-vendor/http/src/version.rs --- s390-tools-2.31.0/rust-vendor/http/src/version.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http/src/version.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -//! HTTP version -//! -//! This module contains a definition of the `Version` type. The `Version` -//! type is intended to be accessed through the root of the crate -//! (`http::Version`) rather than this module. -//! -//! The `Version` type contains constants that represent the various versions -//! of the HTTP protocol. -//! -//! # Examples -//! -//! ``` -//! use http::Version; -//! -//! let http11 = Version::HTTP_11; -//! let http2 = Version::HTTP_2; -//! assert!(http11 != http2); -//! -//! println!("{:?}", http2); -//! ``` - -use std::fmt; - -/// Represents a version of the HTTP spec. -#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash)] -pub struct Version(Http); - -impl Version { - /// `HTTP/0.9` - pub const HTTP_09: Version = Version(Http::Http09); - - /// `HTTP/1.0` - pub const HTTP_10: Version = Version(Http::Http10); - - /// `HTTP/1.1` - pub const HTTP_11: Version = Version(Http::Http11); - - /// `HTTP/2.0` - pub const HTTP_2: Version = Version(Http::H2); - - /// `HTTP/3.0` - pub const HTTP_3: Version = Version(Http::H3); -} - -#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash)] -enum Http { - Http09, - Http10, - Http11, - H2, - H3, - __NonExhaustive, -} - -impl Default for Version { - #[inline] - fn default() -> Version { - Version::HTTP_11 - } -} - -impl fmt::Debug for Version { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use self::Http::*; - - f.write_str(match self.0 { - Http09 => "HTTP/0.9", - Http10 => "HTTP/1.0", - Http11 => "HTTP/1.1", - H2 => "HTTP/2.0", - H3 => "HTTP/3.0", - __NonExhaustive => unreachable!(), - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/httparse/benches/parse.rs s390-tools-2.33.1/rust-vendor/httparse/benches/parse.rs --- s390-tools-2.31.0/rust-vendor/httparse/benches/parse.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/benches/parse.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,91 +0,0 @@ - -use std::time::Duration; - -use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; - -const REQ_SHORT: &[u8] = b"\ -GET / HTTP/1.0\r\n\ -Host: example.com\r\n\ -Cookie: session=60; user_id=1\r\n\r\n"; - -const REQ: &[u8] = b"\ -GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\ -Host: www.kittyhell.com\r\n\ -User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\ -Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n\ -Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n\ -Accept-Encoding: gzip,deflate\r\n\ -Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n\ -Keep-Alive: 115\r\n\ -Connection: keep-alive\r\n\ -Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; __utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; __utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral|padding=under256\r\n\r\n"; - -fn req(c: &mut Criterion) { - let mut headers = [httparse::Header{ name: "", value: &[] }; 16]; - let mut req = httparse::Request::new(&mut headers); - - c.benchmark_group("req") - .throughput(Throughput::Bytes(REQ.len() as u64)) - .bench_function("req", |b| b.iter(|| { - assert_eq!(black_box(req.parse(REQ).unwrap()), httparse::Status::Complete(REQ.len())); - })); -} - -fn req_short(c: &mut Criterion) { - let mut headers = [httparse::Header{ name: "", value: &[] }; 16]; - let mut req = httparse::Request::new(&mut headers); - - c.benchmark_group("req_short") - .throughput(Throughput::Bytes(REQ_SHORT.len() as u64)) - .bench_function("req_short", |b| b.iter(|| { - assert_eq!(black_box(req.parse(REQ_SHORT).unwrap()), httparse::Status::Complete(REQ_SHORT.len())); - })); -} - -const RESP_SHORT: &[u8] = b"\ -HTTP/1.0 200 OK\r\n\ -Date: Wed, 21 Oct 2015 07:28:00 GMT\r\n\ -Set-Cookie: session=60; user_id=1\r\n\r\n"; - -// These particular headers don't all make semantic sense for a response, but they're syntactically valid. -const RESP: &[u8] = b"\ -HTTP/1.1 200 OK\r\n\ -Date: Wed, 21 Oct 2015 07:28:00 GMT\r\n\ -Host: www.kittyhell.com\r\n\ -User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\ -Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n\ -Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n\ -Accept-Encoding: gzip,deflate\r\n\ -Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n\ -Keep-Alive: 115\r\n\ -Connection: keep-alive\r\n\ -Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; __utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; __utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral|padding=under256\r\n\r\n"; - -fn resp(c: &mut Criterion) { - let mut headers = [httparse::Header{ name: "", value: &[] }; 16]; - let mut resp = httparse::Response::new(&mut headers); - - c.benchmark_group("resp") - .throughput(Throughput::Bytes(RESP.len() as u64)) - .bench_function("resp", |b| b.iter(|| { - assert_eq!(black_box(resp.parse(RESP).unwrap()), httparse::Status::Complete(RESP.len())); - })); -} - -fn resp_short(c: &mut Criterion) { - let mut headers = [httparse::Header{ name: "", value: &[] }; 16]; - let mut resp = httparse::Response::new(&mut headers); - - c.benchmark_group("resp_short") - .throughput(Throughput::Bytes(RESP_SHORT.len() as u64)) - .bench_function("resp_short", |b| b.iter(|| { - assert_eq!(black_box(resp.parse(RESP_SHORT).unwrap()), httparse::Status::Complete(RESP_SHORT.len())); - })); -} - -criterion_group!{ - name = benches; - config = Criterion::default().sample_size(100).measurement_time(Duration::from_secs(10)); - targets = req, req_short, resp, resp_short -} -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/httparse/build.rs s390-tools-2.33.1/rust-vendor/httparse/build.rs --- s390-tools-2.31.0/rust-vendor/httparse/build.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,165 +0,0 @@ -use std::env; -//use std::ffi::OsString; -//use std::process::Command; - -fn main() { - // We don't currently need to check the Version anymore... - // But leaving this in place in case we need to in the future. - /* - let rustc = env::var_os("RUSTC").unwrap_or(OsString::from("rustc")); - let output = Command::new(&rustc) - .arg("--version") - .output() - .expect("failed to check 'rustc --version'") - .stdout; - - let version = String::from_utf8(output) - .expect("rustc version output should be utf-8"); - */ - - enable_new_features(/*&version*/); -} - -fn enable_new_features(/*raw_version: &str*/) { - /* - let version = match Version::parse(raw_version) { - Ok(version) => version, - Err(err) => { - println!("cargo:warning=failed to parse `rustc --version`: {}", err); - return; - } - }; - */ - - enable_simd(/*version*/); -} - -fn enable_simd(/*version: Version*/) { - if env::var_os("CARGO_FEATURE_STD").is_none() { - println!("cargo:warning=building for no_std disables httparse SIMD"); - return; - } - if env::var_os("CARGO_CFG_MIRI").is_some() { - println!("cargo:warning=building for Miri disables httparse SIMD"); - return; - } - - let env_disable = "CARGO_CFG_HTTPARSE_DISABLE_SIMD"; - if var_is(env_disable, "1") { - println!("cargo:warning=detected {} environment variable, disabling SIMD", env_disable); - return; - } - - println!("cargo:rustc-cfg=httparse_simd"); - - // cfg(target_feature) isn't stable yet, but CARGO_CFG_TARGET_FEATURE has - // a list... We aren't doing anything unsafe, since the is_x86_feature_detected - // macro still checks in the actual lib, BUT! - // - // By peeking at the list here, we can change up slightly how we do feature - // detection in the lib. If our features aren't in the feature list, we - // stick with a cached runtime detection strategy. - // - // But if the features *are* in the list, we benefit from removing our cache, - // since the compiler will eliminate several branches with its internal - // cfg(target_feature) usage. - - - let env_runtime_only = "CARGO_CFG_HTTPARSE_DISABLE_SIMD_COMPILETIME"; - if var_is(env_runtime_only, "1") { - println!("cargo:warning=detected {} environment variable, using runtime SIMD detection only", env_runtime_only); - return; - } - let feature_list = match env::var_os("CARGO_CFG_TARGET_FEATURE") { - Some(var) => match var.into_string() { - Ok(s) => s, - Err(_) => { - println!("cargo:warning=CARGO_CFG_TARGET_FEATURE was not valid utf-8"); - return; - }, - }, - None => { - println!("cargo:warning=CARGO_CFG_TARGET_FEATURE was not set"); - return - }, - }; - - let mut saw_sse42 = false; - let mut saw_avx2 = false; - - for feature in feature_list.split(',') { - let feature = feature.trim(); - if !saw_sse42 && feature == "sse4.2" { - saw_sse42 = true; - println!("cargo:rustc-cfg=httparse_simd_target_feature_sse42"); - } - - if !saw_avx2 && feature == "avx2" { - saw_avx2 = true; - println!("cargo:rustc-cfg=httparse_simd_target_feature_avx2"); - } - } -} - -/* -#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)] -struct Version { - major: u32, - minor: u32, - patch: u32, -} - -impl Version { - fn parse(mut s: &str) -> Result { - if !s.starts_with("rustc ") { - return Err(format!("unrecognized version string: {}", s)); - } - s = &s["rustc ".len()..]; - - let parts: Vec<&str> = s.split(".").collect(); - if parts.len() < 3 { - return Err(format!("not enough version parts: {:?}", parts)); - } - - let mut num = String::new(); - for c in parts[0].chars() { - if !c.is_digit(10) { - break; - } - num.push(c); - } - let major = num.parse::().map_err(|e| e.to_string())?; - - num.clear(); - for c in parts[1].chars() { - if !c.is_digit(10) { - break; - } - num.push(c); - } - let minor = num.parse::().map_err(|e| e.to_string())?; - - num.clear(); - for c in parts[2].chars() { - if !c.is_digit(10) { - break; - } - num.push(c); - } - let patch = num.parse::().map_err(|e| e.to_string())?; - - Ok(Version { - major: major, - minor: minor, - patch: patch, - }) - } -} -*/ - -fn var_is(key: &str, val: &str) -> bool { - match env::var(key) { - Ok(v) => v == val, - Err(_) => false, - } -} diff -Nru s390-tools-2.31.0/rust-vendor/httparse/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/httparse/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/httparse/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/httparse/Cargo.toml s390-tools-2.33.1/rust-vendor/httparse/Cargo.toml --- s390-tools-2.31.0/rust-vendor/httparse/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,52 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "httparse" -version = "1.8.0" -authors = ["Sean McArthur "] -build = "build.rs" -description = "A tiny, safe, speedy, zero-copy HTTP/1.x parser." -documentation = "https://docs.rs/httparse" -readme = "README.md" -keywords = [ - "http", - "parser", - "no_std", -] -categories = [ - "network-programming", - "no-std", - "parser-implementations", - "web-programming", -] -license = "MIT/Apache-2.0" -repository = "https://github.com/seanmonstar/httparse" - -[profile.bench] -opt-level = 3 -lto = true -codegen-units = 1 - -[lib] -bench = false - -[[bench]] -name = "parse" -harness = false - -[dev-dependencies.criterion] -version = "0.3.5" - -[features] -default = ["std"] -std = [] diff -Nru s390-tools-2.31.0/rust-vendor/httparse/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/httparse/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/httparse/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/httparse/LICENSE-MIT s390-tools-2.33.1/rust-vendor/httparse/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/httparse/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ -Copyright (c) 2015-2021 Sean McArthur - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff -Nru s390-tools-2.31.0/rust-vendor/httparse/README.md s390-tools-2.33.1/rust-vendor/httparse/README.md --- s390-tools-2.31.0/rust-vendor/httparse/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,43 +0,0 @@ -# httparse - -[![crates.io](https://img.shields.io/crates/v/httparse.svg)](https://crates.io/crates/httparse) -[![Released API docs](https://docs.rs/httparse/badge.svg)](https://docs.rs/httparse) -[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE-MIT) -[![CI](https://github.com/seanmonstar/httparse/workflows/CI/badge.svg)](https://github.com/seanmonstar/httparse/actions?query=workflow%3ACI) -[![Discord chat][discord-badge]][discord-url] - -A push parser for the HTTP 1.x protocol. Avoids allocations. No copy. **Fast.** - -Works with `no_std`, simply disable the `std` Cargo feature. - -[Changelog](https://github.com/seanmonstar/httparse/releases) - - -[discord-badge]: https://img.shields.io/discord/500028886025895936.svg?logo=discord -[discord-url]: https://discord.gg/kkwpueZ - -## Usage - -```rust -let mut headers = [httparse::EMPTY_HEADER; 64]; -let mut req = httparse::Request::new(&mut headers); - -let buf = b"GET /index.html HTTP/1.1\r\nHost"; -assert!(req.parse(buf)?.is_partial()); - -// a partial request, so we try again once we have more data - -let buf = b"GET /index.html HTTP/1.1\r\nHost: example.domain\r\n\r\n"; -assert!(req.parse(buf)?.is_complete()); -``` - -## License - -Licensed under either of - -- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/httparse/src/iter.rs s390-tools-2.33.1/rust-vendor/httparse/src/iter.rs --- s390-tools-2.31.0/rust-vendor/httparse/src/iter.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/src/iter.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,108 +0,0 @@ -use core::slice; -use core::convert::TryInto; -use core::convert::TryFrom; - -pub struct Bytes<'a> { - slice: &'a [u8], - pos: usize -} - -impl<'a> Bytes<'a> { - #[inline] - pub fn new(slice: &'a [u8]) -> Bytes<'a> { - Bytes { - slice, - pos: 0 - } - } - - #[inline] - pub fn pos(&self) -> usize { - self.pos - } - - #[inline] - pub fn peek(&self) -> Option { - self.peek_ahead(0) - } - - #[inline] - pub fn peek_ahead(&self, n: usize) -> Option { - self.slice.get(self.pos + n).copied() - } - - #[inline] - pub fn peek_n>(&self, n: usize) -> Option { - self.slice.get(self.pos..self.pos + n)?.try_into().ok() - } - - #[inline] - pub unsafe fn bump(&mut self) { - debug_assert!(self.pos < self.slice.len(), "overflow"); - self.pos += 1; - } - - #[allow(unused)] - #[inline] - pub unsafe fn advance(&mut self, n: usize) { - debug_assert!(self.pos + n <= self.slice.len(), "overflow"); - self.pos += n; - } - - #[inline] - pub fn len(&self) -> usize { - self.slice.len() - } - - #[inline] - pub fn slice(&mut self) -> &'a [u8] { - // not moving position at all, so it's safe - unsafe { - self.slice_skip(0) - } - } - - #[inline] - pub unsafe fn slice_skip(&mut self, skip: usize) -> &'a [u8] { - debug_assert!(self.pos >= skip); - let head_pos = self.pos - skip; - let ptr = self.slice.as_ptr(); - let head = slice::from_raw_parts(ptr, head_pos); - let tail = slice::from_raw_parts(ptr.add(self.pos), self.slice.len() - self.pos); - self.pos = 0; - self.slice = tail; - head - } - - #[inline] - pub unsafe fn advance_and_commit(&mut self, n: usize) { - debug_assert!(self.pos + n <= self.slice.len(), "overflow"); - self.pos += n; - let ptr = self.slice.as_ptr(); - let tail = slice::from_raw_parts(ptr.add(n), self.slice.len() - n); - self.pos = 0; - self.slice = tail; - } -} - -impl<'a> AsRef<[u8]> for Bytes<'a> { - #[inline] - fn as_ref(&self) -> &[u8] { - &self.slice[self.pos..] - } -} - -impl<'a> Iterator for Bytes<'a> { - type Item = u8; - - #[inline] - fn next(&mut self) -> Option { - if self.slice.len() > self.pos { - let b = unsafe { *self.slice.get_unchecked(self.pos) }; - self.pos += 1; - Some(b) - } else { - None - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/httparse/src/lib.rs s390-tools-2.33.1/rust-vendor/httparse/src/lib.rs --- s390-tools-2.31.0/rust-vendor/httparse/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2236 +0,0 @@ -#![cfg_attr(not(feature = "std"), no_std)] -#![deny(missing_docs)] -#![cfg_attr(test, deny(warnings))] - -//! # httparse -//! -//! A push library for parsing HTTP/1.x requests and responses. -//! -//! The focus is on speed and safety. Unsafe code is used to keep parsing fast, -//! but unsafety is contained in a submodule, with invariants enforced. The -//! parsing internals use an `Iterator` instead of direct indexing, while -//! skipping bounds checks. -//! -//! With Rust 1.27.0 or later, support for SIMD is enabled automatically. -//! If building an executable to be run on multiple platforms, and thus -//! not passing `target_feature` or `target_cpu` flags to the compiler, -//! runtime detection can still detect SSE4.2 or AVX2 support to provide -//! massive wins. -//! -//! If compiling for a specific target, remembering to include -//! `-C target_cpu=native` allows the detection to become compile time checks, -//! making it *even* faster. - -use core::{fmt, result, str}; -use core::mem::{self, MaybeUninit}; - -use crate::iter::Bytes; - -mod iter; -#[macro_use] mod macros; -mod simd; - -/// Determines if byte is a token char. -/// -/// > ```notrust -/// > token = 1*tchar -/// > -/// > tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" -/// > / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" -/// > / DIGIT / ALPHA -/// > ; any VCHAR, except delimiters -/// > ``` -#[inline] -fn is_token(b: u8) -> bool { - b > 0x1F && b < 0x7F -} - -// ASCII codes to accept URI string. -// i.e. A-Z a-z 0-9 !#$%&'*+-._();:@=,/?[]~^ -// TODO: Make a stricter checking for URI string? -static URI_MAP: [bool; 256] = byte_map![ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// \0 \n - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -// commands - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -// \w ! " # $ % & ' ( ) * + , - . / - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, -// 0 1 2 3 4 5 6 7 8 9 : ; < = > ? - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -// @ A B C D E F G H I J K L M N O - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -// P Q R S T U V W X Y Z [ \ ] ^ _ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -// ` a b c d e f g h i j k l m n o - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, -// p q r s t u v w x y z { | } ~ del -// ====== Extended ASCII (aka. obs-text) ====== - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -]; - -#[inline] -fn is_uri_token(b: u8) -> bool { - URI_MAP[b as usize] -} - -static HEADER_NAME_MAP: [bool; 256] = byte_map![ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -]; - -#[inline] -fn is_header_name_token(b: u8) -> bool { - HEADER_NAME_MAP[b as usize] -} - -static HEADER_VALUE_MAP: [bool; 256] = byte_map![ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -]; - - -#[inline] -fn is_header_value_token(b: u8) -> bool { - HEADER_VALUE_MAP[b as usize] -} - -/// An error in parsing. -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum Error { - /// Invalid byte in header name. - HeaderName, - /// Invalid byte in header value. - HeaderValue, - /// Invalid byte in new line. - NewLine, - /// Invalid byte in Response status. - Status, - /// Invalid byte where token is required. - Token, - /// Parsed more headers than provided buffer can contain. - TooManyHeaders, - /// Invalid byte in HTTP version. - Version, -} - -impl Error { - #[inline] - fn description_str(&self) -> &'static str { - match *self { - Error::HeaderName => "invalid header name", - Error::HeaderValue => "invalid header value", - Error::NewLine => "invalid new line", - Error::Status => "invalid response status", - Error::Token => "invalid token", - Error::TooManyHeaders => "too many headers", - Error::Version => "invalid HTTP version", - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.description_str()) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for Error { - fn description(&self) -> &str { - self.description_str() - } -} - -/// An error in parsing a chunk size. -// Note: Move this into the error enum once v2.0 is released. -#[derive(Debug, PartialEq, Eq)] -pub struct InvalidChunkSize; - -impl fmt::Display for InvalidChunkSize { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("invalid chunk size") - } -} - -/// A Result of any parsing action. -/// -/// If the input is invalid, an `Error` will be returned. Note that incomplete -/// data is not considered invalid, and so will not return an error, but rather -/// a `Ok(Status::Partial)`. -pub type Result = result::Result, Error>; - -/// The result of a successful parse pass. -/// -/// `Complete` is used when the buffer contained the complete value. -/// `Partial` is used when parsing did not reach the end of the expected value, -/// but no invalid data was found. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum Status { - /// The completed result. - Complete(T), - /// A partial result. - Partial -} - -impl Status { - /// Convenience method to check if status is complete. - #[inline] - pub fn is_complete(&self) -> bool { - match *self { - Status::Complete(..) => true, - Status::Partial => false - } - } - - /// Convenience method to check if status is partial. - #[inline] - pub fn is_partial(&self) -> bool { - match *self { - Status::Complete(..) => false, - Status::Partial => true - } - } - - /// Convenience method to unwrap a Complete value. Panics if the status is - /// `Partial`. - #[inline] - pub fn unwrap(self) -> T { - match self { - Status::Complete(t) => t, - Status::Partial => panic!("Tried to unwrap Status::Partial") - } - } -} - -/// Parser configuration. -#[derive(Clone, Debug, Default)] -pub struct ParserConfig { - allow_spaces_after_header_name_in_responses: bool, - allow_obsolete_multiline_headers_in_responses: bool, - allow_multiple_spaces_in_request_line_delimiters: bool, - allow_multiple_spaces_in_response_status_delimiters: bool, - ignore_invalid_headers_in_responses: bool, -} - -impl ParserConfig { - /// Sets whether spaces and tabs should be allowed after header names in responses. - pub fn allow_spaces_after_header_name_in_responses( - &mut self, - value: bool, - ) -> &mut Self { - self.allow_spaces_after_header_name_in_responses = value; - self - } - - /// Sets whether multiple spaces are allowed as delimiters in request lines. - /// - /// # Background - /// - /// The [latest version of the HTTP/1.1 spec][spec] allows implementations to parse multiple - /// whitespace characters in place of the `SP` delimiters in the request line, including: - /// - /// > SP, HTAB, VT (%x0B), FF (%x0C), or bare CR - /// - /// This option relaxes the parser to allow for multiple spaces, but does *not* allow the - /// request line to contain the other mentioned whitespace characters. - /// - /// [spec]: https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.3.p.3 - pub fn allow_multiple_spaces_in_request_line_delimiters(&mut self, value: bool) -> &mut Self { - self.allow_multiple_spaces_in_request_line_delimiters = value; - self - } - - /// Whether multiple spaces are allowed as delimiters in request lines. - pub fn multiple_spaces_in_request_line_delimiters_are_allowed(&self) -> bool { - self.allow_multiple_spaces_in_request_line_delimiters - } - - /// Sets whether multiple spaces are allowed as delimiters in response status lines. - /// - /// # Background - /// - /// The [latest version of the HTTP/1.1 spec][spec] allows implementations to parse multiple - /// whitespace characters in place of the `SP` delimiters in the response status line, - /// including: - /// - /// > SP, HTAB, VT (%x0B), FF (%x0C), or bare CR - /// - /// This option relaxes the parser to allow for multiple spaces, but does *not* allow the status - /// line to contain the other mentioned whitespace characters. - /// - /// [spec]: https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.4.p.3 - pub fn allow_multiple_spaces_in_response_status_delimiters(&mut self, value: bool) -> &mut Self { - self.allow_multiple_spaces_in_response_status_delimiters = value; - self - } - - /// Whether multiple spaces are allowed as delimiters in response status lines. - pub fn multiple_spaces_in_response_status_delimiters_are_allowed(&self) -> bool { - self.allow_multiple_spaces_in_response_status_delimiters - } - - /// Sets whether obsolete multiline headers should be allowed. - /// - /// This is an obsolete part of HTTP/1. Use at your own risk. If you are - /// building an HTTP library, the newlines (`\r` and `\n`) should be - /// replaced by spaces before handing the header value to the user. - /// - /// # Example - /// - /// ```rust - /// let buf = b"HTTP/1.1 200 OK\r\nFolded-Header: hello\r\n there \r\n\r\n"; - /// let mut headers = [httparse::EMPTY_HEADER; 16]; - /// let mut response = httparse::Response::new(&mut headers); - /// - /// let res = httparse::ParserConfig::default() - /// .allow_obsolete_multiline_headers_in_responses(true) - /// .parse_response(&mut response, buf); - /// - /// assert_eq!(res, Ok(httparse::Status::Complete(buf.len()))); - /// - /// assert_eq!(response.headers.len(), 1); - /// assert_eq!(response.headers[0].name, "Folded-Header"); - /// assert_eq!(response.headers[0].value, b"hello\r\n there"); - /// ``` - pub fn allow_obsolete_multiline_headers_in_responses( - &mut self, - value: bool, - ) -> &mut Self { - self.allow_obsolete_multiline_headers_in_responses = value; - self - } - - /// Whether obsolete multiline headers should be allowed. - pub fn obsolete_multiline_headers_in_responses_are_allowed(&self) -> bool { - self.allow_obsolete_multiline_headers_in_responses - } - - /// Parses a request with the given config. - pub fn parse_request<'headers, 'buf>( - &self, - request: &mut Request<'headers, 'buf>, - buf: &'buf [u8], - ) -> Result { - request.parse_with_config(buf, self) - } - - /// Parses a request with the given config and buffer for headers - pub fn parse_request_with_uninit_headers<'headers, 'buf>( - &self, - request: &mut Request<'headers, 'buf>, - buf: &'buf [u8], - headers: &'headers mut [MaybeUninit>], - ) -> Result { - request.parse_with_config_and_uninit_headers(buf, self, headers) - } - - /// Sets whether invalid header lines should be silently ignored in responses. - /// - /// This mimicks the behaviour of major browsers. You probably don't want this. - /// You should only want this if you are implementing a proxy whose main - /// purpose is to sit in front of browsers whose users access arbitrary content - /// which may be malformed, and they expect everything that works without - /// the proxy to keep working with the proxy. - /// - /// This option will prevent `ParserConfig::parse_response` from returning - /// an error encountered when parsing a header, except if the error was caused - /// by the character NUL (ASCII code 0), as Chrome specifically always reject - /// those, or if the error was caused by a lone character `\r`, as Firefox and - /// Chrome behave differently in that case. - /// - /// The ignorable errors are: - /// * empty header names; - /// * characters that are not allowed in header names, except for `\0` and `\r`; - /// * when `allow_spaces_after_header_name_in_responses` is not enabled, - /// spaces and tabs between the header name and the colon; - /// * missing colon between header name and value; - /// * when `allow_obsolete_multiline_headers_in_responses` is not enabled, - /// headers using obsolete line folding. - /// * characters that are not allowed in header values except for `\0` and `\r`. - /// - /// If an ignorable error is encountered, the parser tries to find the next - /// line in the input to resume parsing the rest of the headers. As lines - /// contributing to a header using obsolete line folding always start - /// with whitespace, those will be ignored too. An error will be emitted - /// nonetheless if it finds `\0` or a lone `\r` while looking for the - /// next line. - pub fn ignore_invalid_headers_in_responses( - &mut self, - value: bool, - ) -> &mut Self { - self.ignore_invalid_headers_in_responses = value; - self - } - - /// Parses a response with the given config. - pub fn parse_response<'headers, 'buf>( - &self, - response: &mut Response<'headers, 'buf>, - buf: &'buf [u8], - ) -> Result { - response.parse_with_config(buf, self) - } - - /// Parses a response with the given config and buffer for headers - pub fn parse_response_with_uninit_headers<'headers, 'buf>( - &self, - response: &mut Response<'headers, 'buf>, - buf: &'buf [u8], - headers: &'headers mut [MaybeUninit>], - ) -> Result { - response.parse_with_config_and_uninit_headers(buf, self, headers) - } -} - -/// A parsed Request. -/// -/// The optional values will be `None` if a parse was not complete, and did not -/// parse the associated property. This allows you to inspect the parts that -/// could be parsed, before reading more, in case you wish to exit early. -/// -/// # Example -/// -/// ```no_run -/// let buf = b"GET /404 HTTP/1.1\r\nHost:"; -/// let mut headers = [httparse::EMPTY_HEADER; 16]; -/// let mut req = httparse::Request::new(&mut headers); -/// let res = req.parse(buf).unwrap(); -/// if res.is_partial() { -/// match req.path { -/// Some(ref path) => { -/// // check router for path. -/// // /404 doesn't exist? we could stop parsing -/// }, -/// None => { -/// // must read more and parse again -/// } -/// } -/// } -/// ``` -#[derive(Debug, Eq, PartialEq)] -pub struct Request<'headers, 'buf> { - /// The request method, such as `GET`. - pub method: Option<&'buf str>, - /// The request path, such as `/about-us`. - pub path: Option<&'buf str>, - /// The request minor version, such as `1` for `HTTP/1.1`. - pub version: Option, - /// The request headers. - pub headers: &'headers mut [Header<'buf>] -} - -impl<'h, 'b> Request<'h, 'b> { - /// Creates a new Request, using a slice of headers you allocate. - #[inline] - pub fn new(headers: &'h mut [Header<'b>]) -> Request<'h, 'b> { - Request { - method: None, - path: None, - version: None, - headers, - } - } - - fn parse_with_config_and_uninit_headers( - &mut self, - buf: &'b [u8], - config: &ParserConfig, - mut headers: &'h mut [MaybeUninit>], - ) -> Result { - let orig_len = buf.len(); - let mut bytes = Bytes::new(buf); - complete!(skip_empty_lines(&mut bytes)); - const GET: [u8; 4] = *b"GET "; - const POST: [u8; 4] = *b"POST"; - let method = match bytes.peek_n::<[u8; 4]>(4) { - Some(GET) => { - unsafe { - bytes.advance_and_commit(4); - } - "GET" - } - Some(POST) if bytes.peek_ahead(4) == Some(b' ') => { - unsafe { - bytes.advance_and_commit(5); - } - "POST" - } - _ => complete!(parse_token(&mut bytes)), - }; - self.method = Some(method); - if config.allow_multiple_spaces_in_request_line_delimiters { - complete!(skip_spaces(&mut bytes)); - } - self.path = Some(complete!(parse_uri(&mut bytes))); - if config.allow_multiple_spaces_in_request_line_delimiters { - complete!(skip_spaces(&mut bytes)); - } - self.version = Some(complete!(parse_version(&mut bytes))); - newline!(bytes); - - let len = orig_len - bytes.len(); - let headers_len = complete!(parse_headers_iter_uninit( - &mut headers, - &mut bytes, - &ParserConfig::default(), - )); - /* SAFETY: see `parse_headers_iter_uninit` guarantees */ - self.headers = unsafe { assume_init_slice(headers) }; - - Ok(Status::Complete(len + headers_len)) - } - - /// Try to parse a buffer of bytes into the Request, - /// except use an uninitialized slice of `Header`s. - /// - /// For more information, see `parse` - pub fn parse_with_uninit_headers( - &mut self, - buf: &'b [u8], - headers: &'h mut [MaybeUninit>], - ) -> Result { - self.parse_with_config_and_uninit_headers(buf, &Default::default(), headers) - } - - fn parse_with_config(&mut self, buf: &'b [u8], config: &ParserConfig) -> Result { - let headers = mem::replace(&mut self.headers, &mut []); - - /* SAFETY: see `parse_headers_iter_uninit` guarantees */ - unsafe { - let headers: *mut [Header<'_>] = headers; - let headers = headers as *mut [MaybeUninit>]; - match self.parse_with_config_and_uninit_headers(buf, config, &mut *headers) { - Ok(Status::Complete(idx)) => Ok(Status::Complete(idx)), - other => { - // put the original headers back - self.headers = &mut *(headers as *mut [Header<'_>]); - other - }, - } - } - } - - /// Try to parse a buffer of bytes into the Request. - /// - /// Returns byte offset in `buf` to start of HTTP body. - pub fn parse(&mut self, buf: &'b [u8]) -> Result { - self.parse_with_config(buf, &Default::default()) - } -} - -#[inline] -fn skip_empty_lines(bytes: &mut Bytes<'_>) -> Result<()> { - loop { - let b = bytes.peek(); - match b { - Some(b'\r') => { - // there's `\r`, so it's safe to bump 1 pos - unsafe { bytes.bump() }; - expect!(bytes.next() == b'\n' => Err(Error::NewLine)); - }, - Some(b'\n') => { - // there's `\n`, so it's safe to bump 1 pos - unsafe { bytes.bump(); } - }, - Some(..) => { - bytes.slice(); - return Ok(Status::Complete(())); - }, - None => return Ok(Status::Partial) - } - } -} - -#[inline] -fn skip_spaces(bytes: &mut Bytes<'_>) -> Result<()> { - loop { - let b = bytes.peek(); - match b { - Some(b' ') => { - // there's ` `, so it's safe to bump 1 pos - unsafe { bytes.bump() }; - } - Some(..) => { - bytes.slice(); - return Ok(Status::Complete(())); - } - None => return Ok(Status::Partial), - } - } -} - -/// A parsed Response. -/// -/// See `Request` docs for explanation of optional values. -#[derive(Debug, Eq, PartialEq)] -pub struct Response<'headers, 'buf> { - /// The response minor version, such as `1` for `HTTP/1.1`. - pub version: Option, - /// The response code, such as `200`. - pub code: Option, - /// The response reason-phrase, such as `OK`. - /// - /// Contains an empty string if the reason-phrase was missing or contained invalid characters. - pub reason: Option<&'buf str>, - /// The response headers. - pub headers: &'headers mut [Header<'buf>] -} - -impl<'h, 'b> Response<'h, 'b> { - /// Creates a new `Response` using a slice of `Header`s you have allocated. - #[inline] - pub fn new(headers: &'h mut [Header<'b>]) -> Response<'h, 'b> { - Response { - version: None, - code: None, - reason: None, - headers, - } - } - - /// Try to parse a buffer of bytes into this `Response`. - pub fn parse(&mut self, buf: &'b [u8]) -> Result { - self.parse_with_config(buf, &ParserConfig::default()) - } - - fn parse_with_config(&mut self, buf: &'b [u8], config: &ParserConfig) -> Result { - let headers = mem::replace(&mut self.headers, &mut []); - - unsafe { - let headers: *mut [Header<'_>] = headers; - let headers = headers as *mut [MaybeUninit>]; - match self.parse_with_config_and_uninit_headers(buf, config, &mut *headers) { - Ok(Status::Complete(idx)) => Ok(Status::Complete(idx)), - other => { - // put the original headers back - self.headers = &mut *(headers as *mut [Header<'_>]); - other - }, - } - } - } - - fn parse_with_config_and_uninit_headers( - &mut self, - buf: &'b [u8], - config: &ParserConfig, - mut headers: &'h mut [MaybeUninit>], - ) -> Result { - let orig_len = buf.len(); - let mut bytes = Bytes::new(buf); - - complete!(skip_empty_lines(&mut bytes)); - self.version = Some(complete!(parse_version(&mut bytes))); - space!(bytes or Error::Version); - if config.allow_multiple_spaces_in_response_status_delimiters { - complete!(skip_spaces(&mut bytes)); - } - self.code = Some(complete!(parse_code(&mut bytes))); - - // RFC7230 says there must be 'SP' and then reason-phrase, but admits - // its only for legacy reasons. With the reason-phrase completely - // optional (and preferred to be omitted) in HTTP2, we'll just - // handle any response that doesn't include a reason-phrase, because - // it's more lenient, and we don't care anyways. - // - // So, a SP means parse a reason-phrase. - // A newline means go to headers. - // Anything else we'll say is a malformed status. - match next!(bytes) { - b' ' => { - if config.allow_multiple_spaces_in_response_status_delimiters { - complete!(skip_spaces(&mut bytes)); - } - bytes.slice(); - self.reason = Some(complete!(parse_reason(&mut bytes))); - }, - b'\r' => { - expect!(bytes.next() == b'\n' => Err(Error::Status)); - bytes.slice(); - self.reason = Some(""); - }, - b'\n' => { - bytes.slice(); - self.reason = Some(""); - } - _ => return Err(Error::Status), - } - - - let len = orig_len - bytes.len(); - let headers_len = complete!(parse_headers_iter_uninit( - &mut headers, - &mut bytes, - config - )); - /* SAFETY: see `parse_headers_iter_uninit` guarantees */ - self.headers = unsafe { assume_init_slice(headers) }; - Ok(Status::Complete(len + headers_len)) - } -} - -/// Represents a parsed header. -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct Header<'a> { - /// The name portion of a header. - /// - /// A header name must be valid ASCII-US, so it's safe to store as a `&str`. - pub name: &'a str, - /// The value portion of a header. - /// - /// While headers **should** be ASCII-US, the specification allows for - /// values that may not be, and so the value is stored as bytes. - pub value: &'a [u8], -} - -impl<'a> fmt::Debug for Header<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut f = f.debug_struct("Header"); - f.field("name", &self.name); - if let Ok(value) = str::from_utf8(self.value) { - f.field("value", &value); - } else { - f.field("value", &self.value); - } - f.finish() - } -} - -/// An empty header, useful for constructing a `Header` array to pass in for -/// parsing. -/// -/// # Example -/// -/// ``` -/// let headers = [httparse::EMPTY_HEADER; 64]; -/// ``` -pub const EMPTY_HEADER: Header<'static> = Header { name: "", value: b"" }; - -#[inline] -fn parse_version(bytes: &mut Bytes<'_>) -> Result { - if let Some(eight) = bytes.peek_n::<[u8; 8]>(8) { - unsafe { bytes.advance(8); } - return match &eight { - b"HTTP/1.0" => Ok(Status::Complete(0)), - b"HTTP/1.1" => Ok(Status::Complete(1)), - _ => Err(Error::Version), - } - } - - // else (but not in `else` because of borrow checker) - - // If there aren't at least 8 bytes, we still want to detect early - // if this is a valid version or not. If it is, we'll return Partial. - expect!(bytes.next() == b'H' => Err(Error::Version)); - expect!(bytes.next() == b'T' => Err(Error::Version)); - expect!(bytes.next() == b'T' => Err(Error::Version)); - expect!(bytes.next() == b'P' => Err(Error::Version)); - expect!(bytes.next() == b'/' => Err(Error::Version)); - expect!(bytes.next() == b'1' => Err(Error::Version)); - expect!(bytes.next() == b'.' => Err(Error::Version)); - Ok(Status::Partial) -} - -/// From [RFC 7230](https://tools.ietf.org/html/rfc7230): -/// -/// > ```notrust -/// > reason-phrase = *( HTAB / SP / VCHAR / obs-text ) -/// > HTAB = %x09 ; horizontal tab -/// > VCHAR = %x21-7E ; visible (printing) characters -/// > obs-text = %x80-FF -/// > ``` -/// -/// > A.2. Changes from RFC 2616 -/// > -/// > Non-US-ASCII content in header fields and the reason phrase -/// > has been obsoleted and made opaque (the TEXT rule was removed). -#[inline] -fn parse_reason<'a>(bytes: &mut Bytes<'a>) -> Result<&'a str> { - let mut seen_obs_text = false; - loop { - let b = next!(bytes); - if b == b'\r' { - expect!(bytes.next() == b'\n' => Err(Error::Status)); - return Ok(Status::Complete(unsafe { - let bytes = bytes.slice_skip(2); - if !seen_obs_text { - // all bytes up till `i` must have been HTAB / SP / VCHAR - str::from_utf8_unchecked(bytes) - } else { - // obs-text characters were found, so return the fallback empty string - "" - } - })); - } else if b == b'\n' { - return Ok(Status::Complete(unsafe { - let bytes = bytes.slice_skip(1); - if !seen_obs_text { - // all bytes up till `i` must have been HTAB / SP / VCHAR - str::from_utf8_unchecked(bytes) - } else { - // obs-text characters were found, so return the fallback empty string - "" - } - })); - } else if !(b == 0x09 || b == b' ' || (0x21..=0x7E).contains(&b) || b >= 0x80) { - return Err(Error::Status); - } else if b >= 0x80 { - seen_obs_text = true; - } - } -} - -#[inline] -fn parse_token<'a>(bytes: &mut Bytes<'a>) -> Result<&'a str> { - let b = next!(bytes); - if !is_token(b) { - // First char must be a token char, it can't be a space which would indicate an empty token. - return Err(Error::Token); - } - - loop { - let b = next!(bytes); - if b == b' ' { - return Ok(Status::Complete(unsafe { - // all bytes up till `i` must have been `is_token`. - str::from_utf8_unchecked(bytes.slice_skip(1)) - })); - } else if !is_token(b) { - return Err(Error::Token); - } - } -} - -#[inline] -fn parse_uri<'a>(bytes: &mut Bytes<'a>) -> Result<&'a str> { - let b = next!(bytes); - if !is_uri_token(b) { - // First char must be a URI char, it can't be a space which would indicate an empty path. - return Err(Error::Token); - } - - simd::match_uri_vectored(bytes); - - loop { - let b = next!(bytes); - if b == b' ' { - return Ok(Status::Complete(unsafe { - // all bytes up till `i` must have been `is_token`. - str::from_utf8_unchecked(bytes.slice_skip(1)) - })); - } else if !is_uri_token(b) { - return Err(Error::Token); - } - } -} - - -#[inline] -fn parse_code(bytes: &mut Bytes<'_>) -> Result { - let hundreds = expect!(bytes.next() == b'0'..=b'9' => Err(Error::Status)); - let tens = expect!(bytes.next() == b'0'..=b'9' => Err(Error::Status)); - let ones = expect!(bytes.next() == b'0'..=b'9' => Err(Error::Status)); - - Ok(Status::Complete((hundreds - b'0') as u16 * 100 + - (tens - b'0') as u16 * 10 + - (ones - b'0') as u16)) -} - -/// Parse a buffer of bytes as headers. -/// -/// The return value, if complete and successful, includes the index of the -/// buffer that parsing stopped at, and a sliced reference to the parsed -/// headers. The length of the slice will be equal to the number of properly -/// parsed headers. -/// -/// # Example -/// -/// ``` -/// let buf = b"Host: foo.bar\nAccept: */*\n\nblah blah"; -/// let mut headers = [httparse::EMPTY_HEADER; 4]; -/// assert_eq!(httparse::parse_headers(buf, &mut headers), -/// Ok(httparse::Status::Complete((27, &[ -/// httparse::Header { name: "Host", value: b"foo.bar" }, -/// httparse::Header { name: "Accept", value: b"*/*" } -/// ][..])))); -/// ``` -pub fn parse_headers<'b: 'h, 'h>( - src: &'b [u8], - mut dst: &'h mut [Header<'b>], -) -> Result<(usize, &'h [Header<'b>])> { - let mut iter = Bytes::new(src); - let pos = complete!(parse_headers_iter(&mut dst, &mut iter, &ParserConfig::default())); - Ok(Status::Complete((pos, dst))) -} - -#[inline] -fn parse_headers_iter<'a, 'b>( - headers: &mut &mut [Header<'a>], - bytes: &'b mut Bytes<'a>, - config: &ParserConfig, -) -> Result { - parse_headers_iter_uninit( - /* SAFETY: see `parse_headers_iter_uninit` guarantees */ - unsafe { deinit_slice_mut(headers) }, - bytes, - config, - ) -} - -unsafe fn deinit_slice_mut<'a, 'b, T>(s: &'a mut &'b mut [T]) -> &'a mut &'b mut [MaybeUninit] { - let s: *mut &mut [T] = s; - let s = s as *mut &mut [MaybeUninit]; - &mut *s -} -unsafe fn assume_init_slice(s: &mut [MaybeUninit]) -> &mut [T] { - let s: *mut [MaybeUninit] = s; - let s = s as *mut [T]; - &mut *s -} - -/* Function which parsers headers into uninitialized buffer. - * - * Guarantees that it doesn't write garbage, so casting - * &mut &mut [Header] -> &mut &mut [MaybeUninit

] - * is safe here. - * - * Also it promises `headers` get shrunk to number of initialized headers, - * so casting the other way around after calling this function is safe - */ -fn parse_headers_iter_uninit<'a, 'b>( - headers: &mut &mut [MaybeUninit>], - bytes: &'b mut Bytes<'a>, - config: &ParserConfig, -) -> Result { - - /* Flow of this function is pretty complex, especially with macros, - * so this struct makes sure we shrink `headers` to only parsed ones. - * Comparing to previous code, this only may introduce some additional - * instructions in case of early return */ - struct ShrinkOnDrop<'r1, 'r2, 'a> { - headers: &'r1 mut &'r2 mut [MaybeUninit>], - num_headers: usize, - } - - impl<'r1, 'r2, 'a> Drop for ShrinkOnDrop<'r1, 'r2, 'a> { - fn drop(&mut self) { - let headers = mem::replace(self.headers, &mut []); - - /* SAFETY: num_headers is the number of initialized headers */ - let headers = unsafe { headers.get_unchecked_mut(..self.num_headers) }; - - *self.headers = headers; - } - } - - let mut autoshrink = ShrinkOnDrop { - headers, - num_headers: 0, - }; - let mut count: usize = 0; - let mut result = Err(Error::TooManyHeaders); - - let mut iter = autoshrink.headers.iter_mut(); - - macro_rules! maybe_continue_after_obsolete_line_folding { - ($bytes:ident, $label:lifetime) => { - if config.allow_obsolete_multiline_headers_in_responses { - match $bytes.peek() { - None => { - // Next byte may be a space, in which case that header - // is using obsolete line folding, so we may have more - // whitespace to skip after colon. - return Ok(Status::Partial); - } - Some(b' ') | Some(b'\t') => { - // The space will be consumed next iteration. - continue $label; - } - _ => { - // There is another byte after the end of the line, - // but it's not whitespace, so it's probably another - // header or the final line return. This header is thus - // empty. - }, - } - } - } - } - - 'headers: loop { - // Return the error `$err` if `ignore_invalid_headers_in_responses` - // is false, otherwise find the end of the current line and resume - // parsing on the next one. - macro_rules! handle_invalid_char { - ($bytes:ident, $b:ident, $err:ident) => { - if !config.ignore_invalid_headers_in_responses { - return Err(Error::$err); - } - - let mut b = $b; - - loop { - if b == b'\r' { - expect!(bytes.next() == b'\n' => Err(Error::$err)); - break; - } - if b == b'\n' { - break; - } - if b == b'\0' { - return Err(Error::$err); - } - b = next!($bytes); - } - - count += $bytes.pos(); - $bytes.slice(); - - continue 'headers; - }; - } - - // a newline here means the head is over! - let b = next!(bytes); - if b == b'\r' { - expect!(bytes.next() == b'\n' => Err(Error::NewLine)); - result = Ok(Status::Complete(count + bytes.pos())); - break; - } - if b == b'\n' { - result = Ok(Status::Complete(count + bytes.pos())); - break; - } - if !is_header_name_token(b) { - handle_invalid_char!(bytes, b, HeaderName); - } - - // parse header name until colon - let header_name: &str = 'name: loop { - let mut b = next!(bytes); - - if is_header_name_token(b) { - continue 'name; - } - - count += bytes.pos(); - let name = unsafe { - str::from_utf8_unchecked(bytes.slice_skip(1)) - }; - - if b == b':' { - break 'name name; - } - - if config.allow_spaces_after_header_name_in_responses { - while b == b' ' || b == b'\t' { - b = next!(bytes); - - if b == b':' { - count += bytes.pos(); - bytes.slice(); - break 'name name; - } - } - } - - handle_invalid_char!(bytes, b, HeaderName); - }; - - let mut b; - - let value_slice = 'value: loop { - // eat white space between colon and value - 'whitespace_after_colon: loop { - b = next!(bytes); - if b == b' ' || b == b'\t' { - count += bytes.pos(); - bytes.slice(); - continue 'whitespace_after_colon; - } - if is_header_value_token(b) { - break 'whitespace_after_colon; - } - - if b == b'\r' { - expect!(bytes.next() == b'\n' => Err(Error::HeaderValue)); - } else if b != b'\n' { - handle_invalid_char!(bytes, b, HeaderValue); - } - - maybe_continue_after_obsolete_line_folding!(bytes, 'whitespace_after_colon); - - count += bytes.pos(); - let whitespace_slice = bytes.slice(); - - // This produces an empty slice that points to the beginning - // of the whitespace. - break 'value &whitespace_slice[0..0]; - } - - 'value_lines: loop { - // parse value till EOL - - simd::match_header_value_vectored(bytes); - - 'value_line: loop { - if let Some(bytes8) = bytes.peek_n::<[u8; 8]>(8) { - macro_rules! check { - ($bytes:ident, $i:literal) => ({ - b = $bytes[$i]; - if !is_header_value_token(b) { - unsafe { bytes.advance($i + 1); } - break 'value_line; - } - }); - } - - check!(bytes8, 0); - check!(bytes8, 1); - check!(bytes8, 2); - check!(bytes8, 3); - check!(bytes8, 4); - check!(bytes8, 5); - check!(bytes8, 6); - check!(bytes8, 7); - unsafe { bytes.advance(8); } - - continue 'value_line; - } - - b = next!(bytes); - if !is_header_value_token(b) { - break 'value_line; - } - } - - //found_ctl - let skip = if b == b'\r' { - expect!(bytes.next() == b'\n' => Err(Error::HeaderValue)); - 2 - } else if b == b'\n' { - 1 - } else { - handle_invalid_char!(bytes, b, HeaderValue); - }; - - maybe_continue_after_obsolete_line_folding!(bytes, 'value_lines); - - count += bytes.pos(); - // having just checked that a newline exists, it's safe to skip it. - unsafe { - break 'value bytes.slice_skip(skip); - } - } - }; - - let uninit_header = match iter.next() { - Some(header) => header, - None => break 'headers - }; - - // trim trailing whitespace in the header - let header_value = if let Some(last_visible) = value_slice - .iter() - .rposition(|b| *b != b' ' && *b != b'\t' && *b != b'\r' && *b != b'\n') - { - // There is at least one non-whitespace character. - &value_slice[0..last_visible+1] - } else { - // There is no non-whitespace character. This can only happen when value_slice is - // empty. - value_slice - }; - - *uninit_header = MaybeUninit::new(Header { - name: header_name, - value: header_value, - }); - autoshrink.num_headers += 1; - } - - result -} - -/// Parse a buffer of bytes as a chunk size. -/// -/// The return value, if complete and successful, includes the index of the -/// buffer that parsing stopped at, and the size of the following chunk. -/// -/// # Example -/// -/// ``` -/// let buf = b"4\r\nRust\r\n0\r\n\r\n"; -/// assert_eq!(httparse::parse_chunk_size(buf), -/// Ok(httparse::Status::Complete((3, 4)))); -/// ``` -pub fn parse_chunk_size(buf: &[u8]) - -> result::Result, InvalidChunkSize> { - const RADIX: u64 = 16; - let mut bytes = Bytes::new(buf); - let mut size = 0; - let mut in_chunk_size = true; - let mut in_ext = false; - let mut count = 0; - loop { - let b = next!(bytes); - match b { - b'0' ..= b'9' if in_chunk_size => { - if count > 15 { - return Err(InvalidChunkSize); - } - count += 1; - size *= RADIX; - size += (b - b'0') as u64; - }, - b'a' ..= b'f' if in_chunk_size => { - if count > 15 { - return Err(InvalidChunkSize); - } - count += 1; - size *= RADIX; - size += (b + 10 - b'a') as u64; - } - b'A' ..= b'F' if in_chunk_size => { - if count > 15 { - return Err(InvalidChunkSize); - } - count += 1; - size *= RADIX; - size += (b + 10 - b'A') as u64; - } - b'\r' => { - match next!(bytes) { - b'\n' => break, - _ => return Err(InvalidChunkSize), - } - } - // If we weren't in the extension yet, the ";" signals its start - b';' if !in_ext => { - in_ext = true; - in_chunk_size = false; - } - // "Linear white space" is ignored between the chunk size and the - // extension separator token (";") due to the "implied *LWS rule". - b'\t' | b' ' if !in_ext && !in_chunk_size => {} - // LWS can follow the chunk size, but no more digits can come - b'\t' | b' ' if in_chunk_size => in_chunk_size = false, - // We allow any arbitrary octet once we are in the extension, since - // they all get ignored anyway. According to the HTTP spec, valid - // extensions would have a more strict syntax: - // (token ["=" (token | quoted-string)]) - // but we gain nothing by rejecting an otherwise valid chunk size. - _ if in_ext => {} - // Finally, if we aren't in the extension and we're reading any - // other octet, the chunk size line is invalid! - _ => return Err(InvalidChunkSize), - } - } - Ok(Status::Complete((bytes.pos(), size))) -} - -#[cfg(test)] -mod tests { - use super::{Request, Response, Status, EMPTY_HEADER, parse_chunk_size}; - - const NUM_OF_HEADERS: usize = 4; - - macro_rules! req { - ($name:ident, $buf:expr, |$arg:ident| $body:expr) => ( - req! {$name, $buf, Ok(Status::Complete($buf.len())), |$arg| $body } - ); - ($name:ident, $buf:expr, $len:expr, |$arg:ident| $body:expr) => ( - #[test] - fn $name() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut req = Request::new(&mut headers[..]); - let status = req.parse($buf.as_ref()); - assert_eq!(status, $len); - closure(req); - - fn closure($arg: Request) { - $body - } - } - ) - } - - req! { - test_request_simple, - b"GET / HTTP/1.1\r\n\r\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 0); - } - } - - req! { - test_request_simple_with_query_params, - b"GET /thing?data=a HTTP/1.1\r\n\r\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/thing?data=a"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 0); - } - } - - req! { - test_request_simple_with_whatwg_query_params, - b"GET /thing?data=a^ HTTP/1.1\r\n\r\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/thing?data=a^"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 0); - } - } - - req! { - test_request_headers, - b"GET / HTTP/1.1\r\nHost: foo.com\r\nCookie: \r\n\r\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 2); - assert_eq!(req.headers[0].name, "Host"); - assert_eq!(req.headers[0].value, b"foo.com"); - assert_eq!(req.headers[1].name, "Cookie"); - assert_eq!(req.headers[1].value, b""); - } - } - - req! { - test_request_headers_optional_whitespace, - b"GET / HTTP/1.1\r\nHost: \tfoo.com\t \r\nCookie: \t \r\n\r\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 2); - assert_eq!(req.headers[0].name, "Host"); - assert_eq!(req.headers[0].value, b"foo.com"); - assert_eq!(req.headers[1].name, "Cookie"); - assert_eq!(req.headers[1].value, b""); - } - } - - req! { - // test the scalar parsing - test_request_header_value_htab_short, - b"GET / HTTP/1.1\r\nUser-Agent: some\tagent\r\n\r\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 1); - assert_eq!(req.headers[0].name, "User-Agent"); - assert_eq!(req.headers[0].value, b"some\tagent"); - } - } - - req! { - // test the sse42 parsing - test_request_header_value_htab_med, - b"GET / HTTP/1.1\r\nUser-Agent: 1234567890some\tagent\r\n\r\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 1); - assert_eq!(req.headers[0].name, "User-Agent"); - assert_eq!(req.headers[0].value, b"1234567890some\tagent"); - } - } - - req! { - // test the avx2 parsing - test_request_header_value_htab_long, - b"GET / HTTP/1.1\r\nUser-Agent: 1234567890some\t1234567890agent1234567890\r\n\r\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 1); - assert_eq!(req.headers[0].name, "User-Agent"); - assert_eq!(req.headers[0].value, &b"1234567890some\t1234567890agent1234567890"[..]); - } - } - - req! { - test_request_headers_max, - b"GET / HTTP/1.1\r\nA: A\r\nB: B\r\nC: C\r\nD: D\r\n\r\n", - |req| { - assert_eq!(req.headers.len(), NUM_OF_HEADERS); - } - } - - req! { - test_request_multibyte, - b"GET / HTTP/1.1\r\nHost: foo.com\r\nUser-Agent: \xe3\x81\xb2\xe3/1.0\r\n\r\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 2); - assert_eq!(req.headers[0].name, "Host"); - assert_eq!(req.headers[0].value, b"foo.com"); - assert_eq!(req.headers[1].name, "User-Agent"); - assert_eq!(req.headers[1].value, b"\xe3\x81\xb2\xe3/1.0"); - } - } - - - req! { - test_request_partial, - b"GET / HTTP/1.1\r\n\r", Ok(Status::Partial), - |_req| {} - } - - req! { - test_request_partial_version, - b"GET / HTTP/1.", Ok(Status::Partial), - |_req| {} - } - - req! { - test_request_partial_parses_headers_as_much_as_it_can, - b"GET / HTTP/1.1\r\nHost: yolo\r\n", - Ok(crate::Status::Partial), - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), NUM_OF_HEADERS); // doesn't slice since not Complete - assert_eq!(req.headers[0].name, "Host"); - assert_eq!(req.headers[0].value, b"yolo"); - } - } - - req! { - test_request_newlines, - b"GET / HTTP/1.1\nHost: foo.bar\n\n", - |_r| {} - } - - req! { - test_request_empty_lines_prefix, - b"\r\n\r\nGET / HTTP/1.1\r\n\r\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 0); - } - } - - req! { - test_request_empty_lines_prefix_lf_only, - b"\n\nGET / HTTP/1.1\n\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 0); - } - } - - req! { - test_request_path_backslash, - b"\n\nGET /\\?wayne\\=5 HTTP/1.1\n\n", - |req| { - assert_eq!(req.method.unwrap(), "GET"); - assert_eq!(req.path.unwrap(), "/\\?wayne\\=5"); - assert_eq!(req.version.unwrap(), 1); - assert_eq!(req.headers.len(), 0); - } - } - - req! { - test_request_with_invalid_token_delimiter, - b"GET\n/ HTTP/1.1\r\nHost: foo.bar\r\n\r\n", - Err(crate::Error::Token), - |_r| {} - } - - - req! { - test_request_with_invalid_but_short_version, - b"GET / HTTP/1!", - Err(crate::Error::Version), - |_r| {} - } - - req! { - test_request_with_empty_method, - b" / HTTP/1.1\r\n\r\n", - Err(crate::Error::Token), - |_r| {} - } - - req! { - test_request_with_empty_path, - b"GET HTTP/1.1\r\n\r\n", - Err(crate::Error::Token), - |_r| {} - } - - req! { - test_request_with_empty_method_and_path, - b" HTTP/1.1\r\n\r\n", - Err(crate::Error::Token), - |_r| {} - } - - macro_rules! res { - ($name:ident, $buf:expr, |$arg:ident| $body:expr) => ( - res! {$name, $buf, Ok(Status::Complete($buf.len())), |$arg| $body } - ); - ($name:ident, $buf:expr, $len:expr, |$arg:ident| $body:expr) => ( - #[test] - fn $name() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut res = Response::new(&mut headers[..]); - let status = res.parse($buf.as_ref()); - assert_eq!(status, $len); - closure(res); - - fn closure($arg: Response) { - $body - } - } - ) - } - - res! { - test_response_simple, - b"HTTP/1.1 200 OK\r\n\r\n", - |res| { - assert_eq!(res.version.unwrap(), 1); - assert_eq!(res.code.unwrap(), 200); - assert_eq!(res.reason.unwrap(), "OK"); - } - } - - res! { - test_response_newlines, - b"HTTP/1.0 403 Forbidden\nServer: foo.bar\n\n", - |_r| {} - } - - res! { - test_response_reason_missing, - b"HTTP/1.1 200 \r\n\r\n", - |res| { - assert_eq!(res.version.unwrap(), 1); - assert_eq!(res.code.unwrap(), 200); - assert_eq!(res.reason.unwrap(), ""); - } - } - - res! { - test_response_reason_missing_no_space, - b"HTTP/1.1 200\r\n\r\n", - |res| { - assert_eq!(res.version.unwrap(), 1); - assert_eq!(res.code.unwrap(), 200); - assert_eq!(res.reason.unwrap(), ""); - } - } - - res! { - test_response_reason_missing_no_space_with_headers, - b"HTTP/1.1 200\r\nFoo: bar\r\n\r\n", - |res| { - assert_eq!(res.version.unwrap(), 1); - assert_eq!(res.code.unwrap(), 200); - assert_eq!(res.reason.unwrap(), ""); - assert_eq!(res.headers.len(), 1); - assert_eq!(res.headers[0].name, "Foo"); - assert_eq!(res.headers[0].value, b"bar"); - } - } - - res! { - test_response_reason_with_space_and_tab, - b"HTTP/1.1 101 Switching Protocols\t\r\n\r\n", - |res| { - assert_eq!(res.version.unwrap(), 1); - assert_eq!(res.code.unwrap(), 101); - assert_eq!(res.reason.unwrap(), "Switching Protocols\t"); - } - } - - static RESPONSE_REASON_WITH_OBS_TEXT_BYTE: &[u8] = b"HTTP/1.1 200 X\xFFZ\r\n\r\n"; - res! { - test_response_reason_with_obsolete_text_byte, - RESPONSE_REASON_WITH_OBS_TEXT_BYTE, - |res| { - assert_eq!(res.version.unwrap(), 1); - assert_eq!(res.code.unwrap(), 200); - // Empty string fallback in case of obs-text - assert_eq!(res.reason.unwrap(), ""); - } - } - - res! { - test_response_reason_with_nul_byte, - b"HTTP/1.1 200 \x00\r\n\r\n", - Err(crate::Error::Status), - |_res| {} - } - - res! { - test_response_version_missing_space, - b"HTTP/1.1", - Ok(Status::Partial), - |_res| {} - } - - res! { - test_response_code_missing_space, - b"HTTP/1.1 200", - Ok(Status::Partial), - |_res| {} - } - - res! { - test_response_partial_parses_headers_as_much_as_it_can, - b"HTTP/1.1 200 OK\r\nServer: yolo\r\n", - Ok(crate::Status::Partial), - |res| { - assert_eq!(res.version.unwrap(), 1); - assert_eq!(res.code.unwrap(), 200); - assert_eq!(res.reason.unwrap(), "OK"); - assert_eq!(res.headers.len(), NUM_OF_HEADERS); // doesn't slice since not Complete - assert_eq!(res.headers[0].name, "Server"); - assert_eq!(res.headers[0].value, b"yolo"); - } - } - - res! { - test_response_empty_lines_prefix_lf_only, - b"\n\nHTTP/1.1 200 OK\n\n", - |_res| {} - } - - res! { - test_response_no_cr, - b"HTTP/1.0 200\nContent-type: text/html\n\n", - |res| { - assert_eq!(res.version.unwrap(), 0); - assert_eq!(res.code.unwrap(), 200); - assert_eq!(res.reason.unwrap(), ""); - assert_eq!(res.headers.len(), 1); - assert_eq!(res.headers[0].name, "Content-type"); - assert_eq!(res.headers[0].value, b"text/html"); - } - } - - static RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON: &[u8] = - b"HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials : true\r\nBread: baguette\r\n\r\n"; - - #[test] - fn test_forbid_response_with_whitespace_between_header_name_and_colon() { - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - let result = response.parse(RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); - - assert_eq!(result, Err(crate::Error::HeaderName)); - } - - #[test] - fn test_allow_response_with_whitespace_between_header_name_and_colon() { - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .allow_spaces_after_header_name_in_responses(true) - .parse_response(&mut response, RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); - - assert_eq!(result, Ok(Status::Complete(77))); - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 2); - assert_eq!(response.headers[0].name, "Access-Control-Allow-Credentials"); - assert_eq!(response.headers[0].value, &b"true"[..]); - assert_eq!(response.headers[1].name, "Bread"); - assert_eq!(response.headers[1].value, &b"baguette"[..]); - } - - #[test] - fn test_ignore_header_line_with_whitespaces_after_header_name() { - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .ignore_invalid_headers_in_responses(true) - .parse_response(&mut response, RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); - - assert_eq!(result, Ok(Status::Complete(77))); - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 1); - assert_eq!(response.headers[0].name, "Bread"); - assert_eq!(response.headers[0].value, &b"baguette"[..]); - } - - static REQUEST_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON: &[u8] = - b"GET / HTTP/1.1\r\nHost : localhost\r\n\r\n"; - - #[test] - fn test_forbid_request_with_whitespace_between_header_name_and_colon() { - let mut headers = [EMPTY_HEADER; 1]; - let mut request = Request::new(&mut headers[..]); - let result = request.parse(REQUEST_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); - - assert_eq!(result, Err(crate::Error::HeaderName)); - } - - static RESPONSE_WITH_OBSOLETE_LINE_FOLDING_AT_START: &[u8] = - b"HTTP/1.1 200 OK\r\nLine-Folded-Header: \r\n \r\n hello there\r\n\r\n"; - - #[test] - fn test_forbid_response_with_obsolete_line_folding_at_start() { - let mut headers = [EMPTY_HEADER; 1]; - let mut response = Response::new(&mut headers[..]); - let result = response.parse(RESPONSE_WITH_OBSOLETE_LINE_FOLDING_AT_START); - - assert_eq!(result, Err(crate::Error::HeaderName)); - } - - #[test] - fn test_allow_response_with_obsolete_line_folding_at_start() { - let mut headers = [EMPTY_HEADER; 1]; - let mut response = Response::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .allow_obsolete_multiline_headers_in_responses(true) - .parse_response(&mut response, RESPONSE_WITH_OBSOLETE_LINE_FOLDING_AT_START); - - assert_eq!(result, Ok(Status::Complete(RESPONSE_WITH_OBSOLETE_LINE_FOLDING_AT_START.len()))); - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 1); - assert_eq!(response.headers[0].name, "Line-Folded-Header"); - assert_eq!(response.headers[0].value, &b"hello there"[..]); - } - - static RESPONSE_WITH_OBSOLETE_LINE_FOLDING_AT_END: &[u8] = - b"HTTP/1.1 200 OK\r\nLine-Folded-Header: hello there\r\n \r\n \r\n\r\n"; - - #[test] - fn test_forbid_response_with_obsolete_line_folding_at_end() { - let mut headers = [EMPTY_HEADER; 1]; - let mut response = Response::new(&mut headers[..]); - let result = response.parse(RESPONSE_WITH_OBSOLETE_LINE_FOLDING_AT_END); - - assert_eq!(result, Err(crate::Error::HeaderName)); - } - - #[test] - fn test_allow_response_with_obsolete_line_folding_at_end() { - let mut headers = [EMPTY_HEADER; 1]; - let mut response = Response::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .allow_obsolete_multiline_headers_in_responses(true) - .parse_response(&mut response, RESPONSE_WITH_OBSOLETE_LINE_FOLDING_AT_END); - - assert_eq!(result, Ok(Status::Complete(RESPONSE_WITH_OBSOLETE_LINE_FOLDING_AT_END.len()))); - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 1); - assert_eq!(response.headers[0].name, "Line-Folded-Header"); - assert_eq!(response.headers[0].value, &b"hello there"[..]); - } - - static RESPONSE_WITH_OBSOLETE_LINE_FOLDING_IN_MIDDLE: &[u8] = - b"HTTP/1.1 200 OK\r\nLine-Folded-Header: hello \r\n \r\n there\r\n\r\n"; - - #[test] - fn test_forbid_response_with_obsolete_line_folding_in_middle() { - let mut headers = [EMPTY_HEADER; 1]; - let mut response = Response::new(&mut headers[..]); - let result = response.parse(RESPONSE_WITH_OBSOLETE_LINE_FOLDING_IN_MIDDLE); - - assert_eq!(result, Err(crate::Error::HeaderName)); - } - - #[test] - fn test_allow_response_with_obsolete_line_folding_in_middle() { - let mut headers = [EMPTY_HEADER; 1]; - let mut response = Response::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .allow_obsolete_multiline_headers_in_responses(true) - .parse_response(&mut response, RESPONSE_WITH_OBSOLETE_LINE_FOLDING_IN_MIDDLE); - - assert_eq!(result, Ok(Status::Complete(RESPONSE_WITH_OBSOLETE_LINE_FOLDING_IN_MIDDLE.len()))); - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 1); - assert_eq!(response.headers[0].name, "Line-Folded-Header"); - assert_eq!(response.headers[0].value, &b"hello \r\n \r\n there"[..]); - } - - static RESPONSE_WITH_OBSOLETE_LINE_FOLDING_IN_EMPTY_HEADER: &[u8] = - b"HTTP/1.1 200 OK\r\nLine-Folded-Header: \r\n \r\n \r\n\r\n"; - - #[test] - fn test_forbid_response_with_obsolete_line_folding_in_empty_header() { - let mut headers = [EMPTY_HEADER; 1]; - let mut response = Response::new(&mut headers[..]); - let result = response.parse(RESPONSE_WITH_OBSOLETE_LINE_FOLDING_IN_EMPTY_HEADER); - - assert_eq!(result, Err(crate::Error::HeaderName)); - } - - #[test] - fn test_allow_response_with_obsolete_line_folding_in_empty_header() { - let mut headers = [EMPTY_HEADER; 1]; - let mut response = Response::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .allow_obsolete_multiline_headers_in_responses(true) - .parse_response(&mut response, RESPONSE_WITH_OBSOLETE_LINE_FOLDING_IN_EMPTY_HEADER); - - assert_eq!(result, Ok(Status::Complete(RESPONSE_WITH_OBSOLETE_LINE_FOLDING_IN_EMPTY_HEADER.len()))); - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 1); - assert_eq!(response.headers[0].name, "Line-Folded-Header"); - assert_eq!(response.headers[0].value, &b""[..]); - } - - #[test] - fn test_chunk_size() { - assert_eq!(parse_chunk_size(b"0\r\n"), Ok(Status::Complete((3, 0)))); - assert_eq!(parse_chunk_size(b"12\r\nchunk"), Ok(Status::Complete((4, 18)))); - assert_eq!(parse_chunk_size(b"3086d\r\n"), Ok(Status::Complete((7, 198765)))); - assert_eq!(parse_chunk_size(b"3735AB1;foo bar*\r\n"), Ok(Status::Complete((18, 57891505)))); - assert_eq!(parse_chunk_size(b"3735ab1 ; baz \r\n"), Ok(Status::Complete((16, 57891505)))); - assert_eq!(parse_chunk_size(b"77a65\r"), Ok(Status::Partial)); - assert_eq!(parse_chunk_size(b"ab"), Ok(Status::Partial)); - assert_eq!(parse_chunk_size(b"567f8a\rfoo"), Err(crate::InvalidChunkSize)); - assert_eq!(parse_chunk_size(b"567f8a\rfoo"), Err(crate::InvalidChunkSize)); - assert_eq!(parse_chunk_size(b"567xf8a\r\n"), Err(crate::InvalidChunkSize)); - assert_eq!(parse_chunk_size(b"ffffffffffffffff\r\n"), Ok(Status::Complete((18, std::u64::MAX)))); - assert_eq!(parse_chunk_size(b"1ffffffffffffffff\r\n"), Err(crate::InvalidChunkSize)); - assert_eq!(parse_chunk_size(b"Affffffffffffffff\r\n"), Err(crate::InvalidChunkSize)); - assert_eq!(parse_chunk_size(b"fffffffffffffffff\r\n"), Err(crate::InvalidChunkSize)); - } - - static RESPONSE_WITH_MULTIPLE_SPACE_DELIMITERS: &[u8] = - b"HTTP/1.1 200 OK\r\n\r\n"; - - #[test] - fn test_forbid_response_with_multiple_space_delimiters() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut response = Response::new(&mut headers[..]); - let result = response.parse(RESPONSE_WITH_MULTIPLE_SPACE_DELIMITERS); - - assert_eq!(result, Err(crate::Error::Status)); - } - - #[test] - fn test_allow_response_with_multiple_space_delimiters() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut response = Response::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .allow_multiple_spaces_in_response_status_delimiters(true) - .parse_response(&mut response, RESPONSE_WITH_MULTIPLE_SPACE_DELIMITERS); - - assert_eq!(result, Ok(Status::Complete(RESPONSE_WITH_MULTIPLE_SPACE_DELIMITERS.len()))); - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 0); - } - - /// This is technically allowed by the spec, but we only support multiple spaces as an option, - /// not stray `\r`s. - static RESPONSE_WITH_WEIRD_WHITESPACE_DELIMITERS: &[u8] = - b"HTTP/1.1 200\rOK\r\n\r\n"; - - #[test] - fn test_forbid_response_with_weird_whitespace_delimiters() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut response = Response::new(&mut headers[..]); - let result = response.parse(RESPONSE_WITH_WEIRD_WHITESPACE_DELIMITERS); - - assert_eq!(result, Err(crate::Error::Status)); - } - - #[test] - fn test_still_forbid_response_with_weird_whitespace_delimiters() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut response = Response::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .allow_multiple_spaces_in_response_status_delimiters(true) - .parse_response(&mut response, RESPONSE_WITH_WEIRD_WHITESPACE_DELIMITERS); - assert_eq!(result, Err(crate::Error::Status)); - } - - static REQUEST_WITH_MULTIPLE_SPACE_DELIMITERS: &[u8] = - b"GET / HTTP/1.1\r\n\r\n"; - - #[test] - fn test_forbid_request_with_multiple_space_delimiters() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut request = Request::new(&mut headers[..]); - let result = request.parse(REQUEST_WITH_MULTIPLE_SPACE_DELIMITERS); - - assert_eq!(result, Err(crate::Error::Token)); - } - - #[test] - fn test_allow_request_with_multiple_space_delimiters() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut request = Request::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .allow_multiple_spaces_in_request_line_delimiters(true) - .parse_request(&mut request, REQUEST_WITH_MULTIPLE_SPACE_DELIMITERS); - - assert_eq!(result, Ok(Status::Complete(REQUEST_WITH_MULTIPLE_SPACE_DELIMITERS.len()))); - assert_eq!(request.method.unwrap(), "GET"); - assert_eq!(request.path.unwrap(), "/"); - assert_eq!(request.version.unwrap(), 1); - assert_eq!(request.headers.len(), 0); - } - - /// This is technically allowed by the spec, but we only support multiple spaces as an option, - /// not stray `\r`s. - static REQUEST_WITH_WEIRD_WHITESPACE_DELIMITERS: &[u8] = - b"GET\r/\rHTTP/1.1\r\n\r\n"; - - #[test] - fn test_forbid_request_with_weird_whitespace_delimiters() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut request = Request::new(&mut headers[..]); - let result = request.parse(REQUEST_WITH_WEIRD_WHITESPACE_DELIMITERS); - - assert_eq!(result, Err(crate::Error::Token)); - } - - #[test] - fn test_still_forbid_request_with_weird_whitespace_delimiters() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut request = Request::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .allow_multiple_spaces_in_request_line_delimiters(true) - .parse_request(&mut request, REQUEST_WITH_WEIRD_WHITESPACE_DELIMITERS); - assert_eq!(result, Err(crate::Error::Token)); - } - - static REQUEST_WITH_MULTIPLE_SPACES_AND_BAD_PATH: &[u8] = b"GET /foo>ohno HTTP/1.1\r\n\r\n"; - - #[test] - fn test_request_with_multiple_spaces_and_bad_path() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut request = Request::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .allow_multiple_spaces_in_request_line_delimiters(true) - .parse_request(&mut request, REQUEST_WITH_MULTIPLE_SPACES_AND_BAD_PATH); - assert_eq!(result, Err(crate::Error::Token)); - } - - static RESPONSE_WITH_SPACES_IN_CODE: &[u8] = b"HTTP/1.1 99 200 OK\r\n\r\n"; - - #[test] - fn test_response_with_spaces_in_code() { - let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; - let mut response = Response::new(&mut headers[..]); - let result = crate::ParserConfig::default() - .allow_multiple_spaces_in_response_status_delimiters(true) - .parse_response(&mut response, RESPONSE_WITH_SPACES_IN_CODE); - assert_eq!(result, Err(crate::Error::Status)); - } - - #[test] - fn test_response_with_empty_header_name() { - const RESPONSE: &[u8] = - b"HTTP/1.1 200 OK\r\n: hello\r\nBread: baguette\r\n\r\n"; - - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - - let result = crate::ParserConfig::default() - .allow_spaces_after_header_name_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderName)); - - let result = crate::ParserConfig::default() - .ignore_invalid_headers_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Ok(Status::Complete(45))); - - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 1); - assert_eq!(response.headers[0].name, "Bread"); - assert_eq!(response.headers[0].value, &b"baguette"[..]); - } - - #[test] - fn test_request_with_whitespace_between_header_name_and_colon() { - const REQUEST: &[u8] = - b"GET / HTTP/1.1\r\nAccess-Control-Allow-Credentials : true\r\nBread: baguette\r\n\r\n"; - - let mut headers = [EMPTY_HEADER; 2]; - let mut request = Request::new(&mut headers[..]); - - let result = crate::ParserConfig::default() - .allow_spaces_after_header_name_in_responses(true) - .parse_request(&mut request, REQUEST); - assert_eq!(result, Err(crate::Error::HeaderName)); - - let result = crate::ParserConfig::default() - - .ignore_invalid_headers_in_responses(true) - .parse_request(&mut request, REQUEST); - assert_eq!(result, Err(crate::Error::HeaderName)); - } - - #[test] - fn test_response_with_invalid_char_between_header_name_and_colon() { - const RESPONSE: &[u8] = - b"HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials\xFF : true\r\nBread: baguette\r\n\r\n"; - - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - - let result = crate::ParserConfig::default() - .allow_spaces_after_header_name_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderName)); - - let result = crate::ParserConfig::default() - .ignore_invalid_headers_in_responses(true) - .parse_response(&mut response, RESPONSE); - - assert_eq!(result, Ok(Status::Complete(79))); - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 1); - assert_eq!(response.headers[0].name, "Bread"); - assert_eq!(response.headers[0].value, &b"baguette"[..]); - } - - #[test] - fn test_ignore_header_line_with_missing_colon() { - const RESPONSE: &[u8] = - b"HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials\r\nBread: baguette\r\n\r\n"; - - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - - let result = crate::ParserConfig::default() - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderName)); - - let result = crate::ParserConfig::default() - .ignore_invalid_headers_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Ok(Status::Complete(70))); - - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 1); - assert_eq!(response.headers[0].name, "Bread"); - assert_eq!(response.headers[0].value, &b"baguette"[..]); - } - - #[test] - fn test_header_with_missing_colon_with_folding() { - const RESPONSE: &[u8] = - b"HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials \r\n hello\r\nBread: baguette\r\n\r\n"; - - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - - let result = crate::ParserConfig::default() - .allow_obsolete_multiline_headers_in_responses(true) - .allow_spaces_after_header_name_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderName)); - - let result = crate::ParserConfig::default() - .ignore_invalid_headers_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Ok(Status::Complete(81))); - - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 1); - assert_eq!(response.headers[0].name, "Bread"); - assert_eq!(response.headers[0].value, &b"baguette"[..]); - } - - #[test] - fn test_header_with_nul_in_header_name() { - const RESPONSE: &[u8] = - b"HTTP/1.1 200 OK\r\nAccess-Control-Allow-Cred\0entials: hello\r\nBread: baguette\r\n\r\n"; - - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - - let result = crate::ParserConfig::default() - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderName)); - - let result = crate::ParserConfig::default() - .ignore_invalid_headers_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderName)); - } - - #[test] - fn test_header_with_cr_in_header_name() { - const RESPONSE: &[u8] = - b"HTTP/1.1 200 OK\r\nAccess-Control-Allow-Cred\rentials: hello\r\nBread: baguette\r\n\r\n"; - - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - - let result = crate::ParserConfig::default() - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderName)); - - let result = crate::ParserConfig::default() - .ignore_invalid_headers_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderName)); - } - - #[test] - fn test_header_with_nul_in_whitespace_before_colon() { - const RESPONSE: &[u8] = - b"HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials \0: hello\r\nBread: baguette\r\n\r\n"; - - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - - let result = crate::ParserConfig::default() - .allow_spaces_after_header_name_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderName)); - - let result = crate::ParserConfig::default() - .allow_spaces_after_header_name_in_responses(true) - .ignore_invalid_headers_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderName)); - } - - #[test] - fn test_header_with_nul_in_value() { - const RESPONSE: &[u8] = - b"HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials: hell\0o\r\nBread: baguette\r\n\r\n"; - - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - - let result = crate::ParserConfig::default() - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderValue)); - - let result = crate::ParserConfig::default() - .ignore_invalid_headers_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderValue)); - } - - #[test] - fn test_header_with_invalid_char_in_value() { - const RESPONSE: &[u8] = - b"HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials: hell\x01o\r\nBread: baguette\r\n\r\n"; - - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - - let result = crate::ParserConfig::default() - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderValue)); - - let result = crate::ParserConfig::default() - .ignore_invalid_headers_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Ok(Status::Complete(78))); - - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 1); - assert_eq!(response.headers[0].name, "Bread"); - assert_eq!(response.headers[0].value, &b"baguette"[..]); - } - - #[test] - fn test_header_with_invalid_char_in_value_with_folding() { - const RESPONSE: &[u8] = - b"HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials: hell\x01o \n world!\r\nBread: baguette\r\n\r\n"; - - let mut headers = [EMPTY_HEADER; 2]; - let mut response = Response::new(&mut headers[..]); - - let result = crate::ParserConfig::default() - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Err(crate::Error::HeaderValue)); - - let result = crate::ParserConfig::default() - .ignore_invalid_headers_in_responses(true) - .parse_response(&mut response, RESPONSE); - assert_eq!(result, Ok(Status::Complete(88))); - - assert_eq!(response.version.unwrap(), 1); - assert_eq!(response.code.unwrap(), 200); - assert_eq!(response.reason.unwrap(), "OK"); - assert_eq!(response.headers.len(), 1); - assert_eq!(response.headers[0].name, "Bread"); - assert_eq!(response.headers[0].value, &b"baguette"[..]); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/httparse/src/macros.rs s390-tools-2.33.1/rust-vendor/httparse/src/macros.rs --- s390-tools-2.31.0/rust-vendor/httparse/src/macros.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/src/macros.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,59 +0,0 @@ -///! Utility macros - -macro_rules! next { - ($bytes:ident) => ({ - match $bytes.next() { - Some(b) => b, - None => return Ok(Status::Partial) - } - }) -} - -macro_rules! expect { - ($bytes:ident.next() == $pat:pat => $ret:expr) => { - expect!(next!($bytes) => $pat |? $ret) - }; - ($e:expr => $pat:pat |? $ret:expr) => { - match $e { - v@$pat => v, - _ => return $ret - } - }; -} - -macro_rules! complete { - ($e:expr) => { - match $e? { - Status::Complete(v) => v, - Status::Partial => return Ok(Status::Partial) - } - } -} - -macro_rules! byte_map { - ($($flag:expr,)*) => ([ - $($flag != 0,)* - ]) -} - -macro_rules! space { - ($bytes:ident or $err:expr) => ({ - expect!($bytes.next() == b' ' => Err($err)); - $bytes.slice(); - }) -} - -macro_rules! newline { - ($bytes:ident) => ({ - match next!($bytes) { - b'\r' => { - expect!($bytes.next() == b'\n' => Err(Error::NewLine)); - $bytes.slice(); - }, - b'\n' => { - $bytes.slice(); - }, - _ => return Err(Error::NewLine) - } - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/httparse/src/simd/avx2.rs s390-tools-2.33.1/rust-vendor/httparse/src/simd/avx2.rs --- s390-tools-2.31.0/rust-vendor/httparse/src/simd/avx2.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/src/simd/avx2.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,181 +0,0 @@ -use crate::iter::Bytes; - -pub enum Scan { - /// Returned when an implementation finds a noteworthy token. - Found, - /// Returned when an implementation couldn't keep running because the input was too short. - TooShort, -} - - -pub unsafe fn parse_uri_batch_32(bytes: &mut Bytes) -> Scan { - while bytes.as_ref().len() >= 32 { - let advance = match_url_char_32_avx(bytes.as_ref()); - bytes.advance(advance); - - if advance != 32 { - return Scan::Found; - } - } - Scan::TooShort -} - -#[cfg(target_arch = "x86_64")] -#[target_feature(enable = "avx2")] -#[inline] -#[allow(non_snake_case, overflowing_literals)] -unsafe fn match_url_char_32_avx(buf: &[u8]) -> usize { - debug_assert!(buf.len() >= 32); - - /* - #[cfg(target_arch = "x86")] - use core::arch::x86::*; - #[cfg(target_arch = "x86_64")] - */ - use core::arch::x86_64::*; - - let ptr = buf.as_ptr(); - - let LSH: __m256i = _mm256_set1_epi8(0x0f); - - // See comment in sse42::match_url_char_16_sse. - - let URI: __m256i = _mm256_setr_epi8( - 0xf8, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, - 0xfc, 0xfc, 0xfc, 0xfc, 0xf4, 0xfc, 0xf4, 0x7c, - 0xf8, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, - 0xfc, 0xfc, 0xfc, 0xfc, 0xf4, 0xfc, 0xf4, 0x7c, - ); - let ARF: __m256i = _mm256_setr_epi8( - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - ); - - let data = _mm256_lddqu_si256(ptr as *const _); - let rbms = _mm256_shuffle_epi8(URI, data); - let cols = _mm256_and_si256(LSH, _mm256_srli_epi16(data, 4)); - let bits = _mm256_and_si256(_mm256_shuffle_epi8(ARF, cols), rbms); - - let v = _mm256_cmpeq_epi8(bits, _mm256_setzero_si256()); - let r = 0xffff_ffff_0000_0000 | _mm256_movemask_epi8(v) as u64; - - _tzcnt_u64(r) as usize -} - -#[cfg(target_arch = "x86")] -unsafe fn match_url_char_32_avx(_: &[u8]) -> usize { - unreachable!("AVX2 detection should be disabled for x86"); -} - -pub unsafe fn match_header_value_batch_32(bytes: &mut Bytes) -> Scan { - while bytes.as_ref().len() >= 32 { - let advance = match_header_value_char_32_avx(bytes.as_ref()); - bytes.advance(advance); - - if advance != 32 { - return Scan::Found; - } - } - Scan::TooShort -} - -#[cfg(target_arch = "x86_64")] -#[target_feature(enable = "avx2")] -#[inline] -#[allow(non_snake_case)] -unsafe fn match_header_value_char_32_avx(buf: &[u8]) -> usize { - debug_assert!(buf.len() >= 32); - - /* - #[cfg(target_arch = "x86")] - use core::arch::x86::*; - #[cfg(target_arch = "x86_64")] - */ - use core::arch::x86_64::*; - - let ptr = buf.as_ptr(); - - // %x09 %x20-%x7e %x80-%xff - let TAB: __m256i = _mm256_set1_epi8(0x09); - let DEL: __m256i = _mm256_set1_epi8(0x7f); - let LOW: __m256i = _mm256_set1_epi8(0x20); - - let dat = _mm256_lddqu_si256(ptr as *const _); - // unsigned comparison dat >= LOW - let low = _mm256_cmpeq_epi8(_mm256_max_epu8(dat, LOW), dat); - let tab = _mm256_cmpeq_epi8(dat, TAB); - let del = _mm256_cmpeq_epi8(dat, DEL); - let bit = _mm256_andnot_si256(del, _mm256_or_si256(low, tab)); - let rev = _mm256_cmpeq_epi8(bit, _mm256_setzero_si256()); - let res = 0xffff_ffff_0000_0000 | _mm256_movemask_epi8(rev) as u64; - - _tzcnt_u64(res) as usize -} - -#[cfg(target_arch = "x86")] -unsafe fn match_header_value_char_32_avx(_: &[u8]) -> usize { - unreachable!("AVX2 detection should be disabled for x86"); -} - -#[test] -fn avx2_code_matches_uri_chars_table() { - match super::detect() { - super::AVX_2 | super::AVX_2_AND_SSE_42 => {}, - _ => return, - } - - unsafe { - assert!(byte_is_allowed(b'_', parse_uri_batch_32)); - - for (b, allowed) in crate::URI_MAP.iter().cloned().enumerate() { - assert_eq!( - byte_is_allowed(b as u8, parse_uri_batch_32), allowed, - "byte_is_allowed({:?}) should be {:?}", b, allowed, - ); - } - } -} - -#[test] -fn avx2_code_matches_header_value_chars_table() { - match super::detect() { - super::AVX_2 | super::AVX_2_AND_SSE_42 => {}, - _ => return, - } - - unsafe { - assert!(byte_is_allowed(b'_', match_header_value_batch_32)); - - for (b, allowed) in crate::HEADER_VALUE_MAP.iter().cloned().enumerate() { - assert_eq!( - byte_is_allowed(b as u8, match_header_value_batch_32), allowed, - "byte_is_allowed({:?}) should be {:?}", b, allowed, - ); - } - } -} - -#[cfg(test)] -unsafe fn byte_is_allowed(byte: u8, f: unsafe fn(bytes: &mut Bytes<'_>) -> Scan) -> bool { - let slice = [ - b'_', b'_', b'_', b'_', - b'_', b'_', b'_', b'_', - b'_', b'_', b'_', b'_', - b'_', b'_', b'_', b'_', - b'_', b'_', b'_', b'_', - b'_', b'_', b'_', b'_', - b'_', b'_', byte, b'_', - b'_', b'_', b'_', b'_', - ]; - let mut bytes = Bytes::new(&slice); - - f(&mut bytes); - - match bytes.pos() { - 32 => true, - 26 => false, - _ => unreachable!(), - } -} diff -Nru s390-tools-2.31.0/rust-vendor/httparse/src/simd/fallback.rs s390-tools-2.33.1/rust-vendor/httparse/src/simd/fallback.rs --- s390-tools-2.31.0/rust-vendor/httparse/src/simd/fallback.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/src/simd/fallback.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,8 +0,0 @@ -use crate::iter::Bytes; - -// Fallbacks that do nothing... - -#[inline(always)] -pub fn match_uri_vectored(_: &mut Bytes<'_>) {} -#[inline(always)] -pub fn match_header_value_vectored(_: &mut Bytes<'_>) {} diff -Nru s390-tools-2.31.0/rust-vendor/httparse/src/simd/mod.rs s390-tools-2.33.1/rust-vendor/httparse/src/simd/mod.rs --- s390-tools-2.31.0/rust-vendor/httparse/src/simd/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/src/simd/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,291 +0,0 @@ -#[cfg(not(all( - httparse_simd, - any( - target_arch = "x86", - target_arch = "x86_64", - ), -)))] -mod fallback; - -#[cfg(not(all( - httparse_simd, - any( - target_arch = "x86", - target_arch = "x86_64", - ), -)))] -pub use self::fallback::*; - -#[cfg(all( - httparse_simd, - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -mod sse42; - -#[cfg(all( - httparse_simd, - any( - httparse_simd_target_feature_avx2, - not(httparse_simd_target_feature_sse42), - ), - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -mod avx2; - -#[cfg(all( - httparse_simd, - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -pub const SSE_42: usize = 1; -#[cfg(all( - httparse_simd, - any(not(httparse_simd_target_feature_sse42), httparse_simd_target_feature_avx2), - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -pub const AVX_2: usize = 2; -#[cfg(all( - httparse_simd, - any( - not(httparse_simd_target_feature_sse42), - httparse_simd_target_feature_avx2, - test, - ), - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -pub const AVX_2_AND_SSE_42: usize = 3; - -#[cfg(all( - httparse_simd, - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -const NONE: usize = std::usize::MAX; -#[cfg(all( - httparse_simd, - not(any( - httparse_simd_target_feature_sse42, - httparse_simd_target_feature_avx2, - )), - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -mod runtime { - //! Runtime detection of simd features. Used when the build script - //! doesn't notice any target features at build time. - //! - //! While `is_x86_feature_detected!` has it's own caching built-in, - //! at least in 1.27.0, the functions don't inline, leaving using it - //! actually *slower* than just using the scalar fallback. - - use core::sync::atomic::{AtomicUsize, Ordering}; - - static FEATURE: AtomicUsize = AtomicUsize::new(0); - - const INIT: usize = 0; - - pub fn detect() -> usize { - let feat = FEATURE.load(Ordering::Relaxed); - if feat == INIT { - if cfg!(target_arch = "x86_64") && is_x86_feature_detected!("avx2") { - if is_x86_feature_detected!("sse4.2") { - FEATURE.store(super::AVX_2_AND_SSE_42, Ordering::Relaxed); - return super::AVX_2_AND_SSE_42; - } else { - FEATURE.store(super::AVX_2, Ordering::Relaxed); - return super::AVX_2; - } - } else if is_x86_feature_detected!("sse4.2") { - FEATURE.store(super::SSE_42, Ordering::Relaxed); - return super::SSE_42; - } else { - FEATURE.store(super::NONE, Ordering::Relaxed); - } - } - feat - } - - pub fn match_uri_vectored(bytes: &mut crate::iter::Bytes) { - unsafe { - match detect() { - super::SSE_42 => super::sse42::parse_uri_batch_16(bytes), - super::AVX_2 => { super::avx2::parse_uri_batch_32(bytes); }, - super::AVX_2_AND_SSE_42 => { - if let super::avx2::Scan::Found = super::avx2::parse_uri_batch_32(bytes) { - return; - } - super::sse42::parse_uri_batch_16(bytes) - }, - _ => () - } - } - - // else do nothing - } - - pub fn match_header_value_vectored(bytes: &mut crate::iter::Bytes) { - unsafe { - match detect() { - super::SSE_42 => super::sse42::match_header_value_batch_16(bytes), - super::AVX_2 => { super::avx2::match_header_value_batch_32(bytes); }, - super::AVX_2_AND_SSE_42 => { - if let super::avx2::Scan::Found = super::avx2::match_header_value_batch_32(bytes) { - return; - } - super::sse42::match_header_value_batch_16(bytes) - }, - _ => () - } - } - - // else do nothing - } -} - -#[cfg(all( - httparse_simd, - not(any( - httparse_simd_target_feature_sse42, - httparse_simd_target_feature_avx2, - )), - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -pub use self::runtime::*; - -#[cfg(all( - httparse_simd, - httparse_simd_target_feature_sse42, - not(httparse_simd_target_feature_avx2), - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -mod sse42_compile_time { - pub fn match_uri_vectored(bytes: &mut crate::iter::Bytes) { - if detect() == super::SSE_42 { - unsafe { - super::sse42::parse_uri_batch_16(bytes); - } - } - - // else do nothing - } - - pub fn match_header_value_vectored(bytes: &mut crate::iter::Bytes) { - if detect() == super::SSE_42 { - unsafe { - super::sse42::match_header_value_batch_16(bytes); - } - } - - // else do nothing - } - - pub fn detect() -> usize { - if is_x86_feature_detected!("sse4.2") { - super::SSE_42 - } else { - super::NONE - } - } -} - -#[cfg(all( - httparse_simd, - httparse_simd_target_feature_sse42, - not(httparse_simd_target_feature_avx2), - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -pub use self::sse42_compile_time::*; - -#[cfg(all( - httparse_simd, - httparse_simd_target_feature_avx2, - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -mod avx2_compile_time { - pub fn match_uri_vectored(bytes: &mut crate::iter::Bytes) { - // do both, since avx2 only works when bytes.len() >= 32 - if detect() == super::AVX_2_AND_SSE_42 { - unsafe { - super::avx2::parse_uri_batch_32(bytes); - } - - } - if detect() == super::SSE_42 { - unsafe { - super::sse42::parse_uri_batch_16(bytes); - } - } - - // else do nothing - } - - pub fn match_header_value_vectored(bytes: &mut crate::iter::Bytes) { - // do both, since avx2 only works when bytes.len() >= 32 - if detect() == super::AVX_2_AND_SSE_42 { - let scanned = unsafe { - super::avx2::match_header_value_batch_32(bytes) - }; - - if let super::avx2::Scan::Found = scanned { - return; - } - } - if detect() == super::SSE_42 { - unsafe { - super::sse42::match_header_value_batch_16(bytes); - } - } - - // else do nothing - } - - pub fn detect() -> usize { - if cfg!(target_arch = "x86_64") && is_x86_feature_detected!("avx2") { - super::AVX_2_AND_SSE_42 - } else if is_x86_feature_detected!("sse4.2") { - super::SSE_42 - } else { - super::NONE - } - } -} - -#[cfg(all( - httparse_simd, - httparse_simd_target_feature_avx2, - any( - target_arch = "x86", - target_arch = "x86_64", - ), -))] -pub use self::avx2_compile_time::*; diff -Nru s390-tools-2.31.0/rust-vendor/httparse/src/simd/sse42.rs s390-tools-2.33.1/rust-vendor/httparse/src/simd/sse42.rs --- s390-tools-2.31.0/rust-vendor/httparse/src/simd/sse42.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httparse/src/simd/sse42.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,157 +0,0 @@ -use crate::iter::Bytes; - -pub unsafe fn parse_uri_batch_16(bytes: &mut Bytes) { - while bytes.as_ref().len() >= 16 { - let advance = match_url_char_16_sse(bytes.as_ref()); - bytes.advance(advance); - - if advance != 16 { - break; - } - } -} - -#[target_feature(enable = "sse4.2")] -#[allow(non_snake_case, overflowing_literals)] -unsafe fn match_url_char_16_sse(buf: &[u8]) -> usize { - debug_assert!(buf.len() >= 16); - - #[cfg(target_arch = "x86")] - use core::arch::x86::*; - #[cfg(target_arch = "x86_64")] - use core::arch::x86_64::*; - - let ptr = buf.as_ptr(); - - let LSH: __m128i = _mm_set1_epi8(0x0f); - - // The first 0xf8 corresponds to the 8 first rows of the first column - // of URI_MAP in the crate's root, with the first row corresponding to bit 0 - // and the 8th row corresponding to bit 7. - // The 8 first rows give 0 0 0 1 1 1 1 1, which is 0xf8 (with least - // significant digit on the left). - // - // Another example just to drive the point home: in column 15, '>' is - // rejected, so the values are 0 0 1 0 1 1 1 1, which gives us 0xf4. - // - // Thanks to Vlad Krasnov for explaining this stuff to us mere mortals in - // a GitHub comment! - // - // https://github.com/seanmonstar/httparse/pull/89#issuecomment-807039219 - - let URI: __m128i = _mm_setr_epi8( - 0xf8, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, - 0xfc, 0xfc, 0xfc, 0xfc, 0xf4, 0xfc, 0xf4, 0x7c, - ); - let ARF: __m128i = _mm_setr_epi8( - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - ); - - let data = _mm_lddqu_si128(ptr as *const _); - let rbms = _mm_shuffle_epi8(URI, data); - let cols = _mm_and_si128(LSH, _mm_srli_epi16(data, 4)); - let bits = _mm_and_si128(_mm_shuffle_epi8(ARF, cols), rbms); - - let v = _mm_cmpeq_epi8(bits, _mm_setzero_si128()); - let r = 0xffff_0000 | _mm_movemask_epi8(v) as u32; - - _tzcnt_u32(r) as usize -} - -pub unsafe fn match_header_value_batch_16(bytes: &mut Bytes) { - while bytes.as_ref().len() >= 16 { - let advance = match_header_value_char_16_sse(bytes.as_ref()); - bytes.advance(advance); - - if advance != 16 { - break; - } - } -} - -#[target_feature(enable = "sse4.2")] -#[allow(non_snake_case)] -unsafe fn match_header_value_char_16_sse(buf: &[u8]) -> usize { - debug_assert!(buf.len() >= 16); - - #[cfg(target_arch = "x86")] - use core::arch::x86::*; - #[cfg(target_arch = "x86_64")] - use core::arch::x86_64::*; - - let ptr = buf.as_ptr(); - - // %x09 %x20-%x7e %x80-%xff - let TAB: __m128i = _mm_set1_epi8(0x09); - let DEL: __m128i = _mm_set1_epi8(0x7f); - let LOW: __m128i = _mm_set1_epi8(0x20); - - let dat = _mm_lddqu_si128(ptr as *const _); - // unsigned comparison dat >= LOW - let low = _mm_cmpeq_epi8(_mm_max_epu8(dat, LOW), dat); - let tab = _mm_cmpeq_epi8(dat, TAB); - let del = _mm_cmpeq_epi8(dat, DEL); - let bit = _mm_andnot_si128(del, _mm_or_si128(low, tab)); - let rev = _mm_cmpeq_epi8(bit, _mm_setzero_si128()); - let res = 0xffff_0000 | _mm_movemask_epi8(rev) as u32; - - _tzcnt_u32(res) as usize -} - -#[test] -fn sse_code_matches_uri_chars_table() { - match super::detect() { - super::SSE_42 | super::AVX_2_AND_SSE_42 => {}, - _ => return, - } - - unsafe { - assert!(byte_is_allowed(b'_', parse_uri_batch_16)); - - for (b, allowed) in crate::URI_MAP.iter().cloned().enumerate() { - assert_eq!( - byte_is_allowed(b as u8, parse_uri_batch_16), allowed, - "byte_is_allowed({:?}) should be {:?}", b, allowed, - ); - } - } -} - -#[test] -fn sse_code_matches_header_value_chars_table() { - match super::detect() { - super::SSE_42 | super::AVX_2_AND_SSE_42 => {}, - _ => return, - } - - unsafe { - assert!(byte_is_allowed(b'_', match_header_value_batch_16)); - - for (b, allowed) in crate::HEADER_VALUE_MAP.iter().cloned().enumerate() { - assert_eq!( - byte_is_allowed(b as u8, match_header_value_batch_16), allowed, - "byte_is_allowed({:?}) should be {:?}", b, allowed, - ); - } - } -} - -#[cfg(test)] -unsafe fn byte_is_allowed(byte: u8, f: unsafe fn(bytes: &mut Bytes<'_>)) -> bool { - let slice = [ - b'_', b'_', b'_', b'_', - b'_', b'_', b'_', b'_', - b'_', b'_', byte, b'_', - b'_', b'_', b'_', b'_', - ]; - let mut bytes = Bytes::new(&slice); - - f(&mut bytes); - - match bytes.pos() { - 16 => true, - 10 => false, - _ => unreachable!(), - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http-body/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/http-body/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/http-body/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/http-body/Cargo.toml s390-tools-2.33.1/rust-vendor/http-body/Cargo.toml --- s390-tools-2.31.0/rust-vendor/http-body/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,45 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "http-body" -version = "0.4.5" -authors = [ - "Carl Lerche ", - "Lucio Franco ", - "Sean McArthur ", -] -description = """ -Trait representing an asynchronous, streaming, HTTP request or response body. -""" -documentation = "https://docs.rs/http-body" -readme = "README.md" -keywords = ["http"] -categories = ["web-programming"] -license = "MIT" -repository = "https://github.com/hyperium/http-body" - -[dependencies.bytes] -version = "1" - -[dependencies.http] -version = "0.2" - -[dependencies.pin-project-lite] -version = "0.2" - -[dev-dependencies.tokio] -version = "1" -features = [ - "macros", - "rt", -] diff -Nru s390-tools-2.31.0/rust-vendor/http-body/CHANGELOG.md s390-tools-2.33.1/rust-vendor/http-body/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/http-body/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,62 +0,0 @@ -# Unreleased - -None. - -# 0.4.5 (May 20, 2022) - -- Add `String` impl for `Body`. -- Add `Limited` body implementation. - -# 0.4.4 (October 22, 2021) - -- Add `UnsyncBoxBody` and `Body::boxed_unsync`. - -# 0.4.3 (August 8, 2021) - -- Implement `Default` for `BoxBody`. - -# 0.4.2 (May 8, 2021) - -- Correctly override `Body::size_hint` and `Body::is_end_stream` for `Empty`. -- Add `Full` which is a body that consists of a single chunk. - -# 0.4.1 (March 18, 2021) - -- Add combinators to `Body`: - - `map_data`: Change the `Data` chunks produced by the body. - - `map_err`: Change the `Error`s produced by the body. - - `boxed`: Convert the `Body` into a boxed trait object. -- Add `Empty`. - -# 0.4.0 (December 23, 2020) - -- Update `bytes` to v1.0. - -# 0.3.1 (December 13, 2019) - -- Implement `Body` for `http::Request` and `http::Response`. - -# 0.3.0 (December 4, 2019) - -- Rename `next` combinator to `data`. - -# 0.2.0 (December 3, 2019) - -- Update `http` to v0.2. -- Update `bytes` to v0.5. - -# 0.2.0-alpha.3 (October 1, 2019) - -- Fix `Body` to be object-safe. - -# 0.2.0-alpha.2 (October 1, 2019) - -- Add `next` and `trailers` combinator methods. - -# 0.2.0-alpha.1 (August 20, 2019) - -- Update to use `Pin` in `poll_data` and `poll_trailers`. - -# 0.1.0 (May 7, 2019) - -- Initial release diff -Nru s390-tools-2.31.0/rust-vendor/http-body/LICENSE s390-tools-2.33.1/rust-vendor/http-body/LICENSE --- s390-tools-2.31.0/rust-vendor/http-body/LICENSE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2019 Hyper Contributors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/http-body/README.md s390-tools-2.33.1/rust-vendor/http-body/README.md --- s390-tools-2.31.0/rust-vendor/http-body/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -# HTTP Body - -A trait representing asynchronous operations on an HTTP body. - -[![crates.io][crates-badge]][crates-url] -[![documentation][docs-badge]][docs-url] -[![MIT License][mit-badge]][mit-url] -[![CI Status][ci-badge]][ci-url] - -[crates-badge]: https://img.shields.io/crates/v/http-body.svg -[crates-url]: https://crates.io/crates/http-body -[docs-badge]: https://docs.rs/http-body/badge.svg -[docs-url]: https://docs.rs/http-body -[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg -[mit-url]: LICENSE -[ci-badge]: https://github.com/hyperium/http-body/workflows/CI/badge.svg -[ci-url]: https://github.com/hyperium/http-body/actions?query=workflow%3ACI - -## License - -This project is licensed under the [MIT license](LICENSE). - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in `http-body` by you, shall be licensed as MIT, without any additional -terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/http-body/src/combinators/box_body.rs s390-tools-2.33.1/rust-vendor/http-body/src/combinators/box_body.rs --- s390-tools-2.31.0/rust-vendor/http-body/src/combinators/box_body.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/src/combinators/box_body.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,134 +0,0 @@ -use crate::Body; -use bytes::Buf; -use std::{ - fmt, - pin::Pin, - task::{Context, Poll}, -}; - -/// A boxed [`Body`] trait object. -pub struct BoxBody { - inner: Pin + Send + Sync + 'static>>, -} - -/// A boxed [`Body`] trait object that is !Sync. -pub struct UnsyncBoxBody { - inner: Pin + Send + 'static>>, -} - -impl BoxBody { - /// Create a new `BoxBody`. - pub fn new(body: B) -> Self - where - B: Body + Send + Sync + 'static, - D: Buf, - { - Self { - inner: Box::pin(body), - } - } -} - -impl fmt::Debug for BoxBody { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BoxBody").finish() - } -} - -impl Body for BoxBody -where - D: Buf, -{ - type Data = D; - type Error = E; - - fn poll_data( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - self.inner.as_mut().poll_data(cx) - } - - fn poll_trailers( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - self.inner.as_mut().poll_trailers(cx) - } - - fn is_end_stream(&self) -> bool { - self.inner.is_end_stream() - } - - fn size_hint(&self) -> crate::SizeHint { - self.inner.size_hint() - } -} - -impl Default for BoxBody -where - D: Buf + 'static, -{ - fn default() -> Self { - BoxBody::new(crate::Empty::new().map_err(|err| match err {})) - } -} - -// === UnsyncBoxBody === -impl UnsyncBoxBody { - /// Create a new `BoxBody`. - pub fn new(body: B) -> Self - where - B: Body + Send + 'static, - D: Buf, - { - Self { - inner: Box::pin(body), - } - } -} - -impl fmt::Debug for UnsyncBoxBody { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("UnsyncBoxBody").finish() - } -} - -impl Body for UnsyncBoxBody -where - D: Buf, -{ - type Data = D; - type Error = E; - - fn poll_data( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - self.inner.as_mut().poll_data(cx) - } - - fn poll_trailers( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - self.inner.as_mut().poll_trailers(cx) - } - - fn is_end_stream(&self) -> bool { - self.inner.is_end_stream() - } - - fn size_hint(&self) -> crate::SizeHint { - self.inner.size_hint() - } -} - -impl Default for UnsyncBoxBody -where - D: Buf + 'static, -{ - fn default() -> Self { - UnsyncBoxBody::new(crate::Empty::new().map_err(|err| match err {})) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http-body/src/combinators/map_data.rs s390-tools-2.33.1/rust-vendor/http-body/src/combinators/map_data.rs --- s390-tools-2.31.0/rust-vendor/http-body/src/combinators/map_data.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/src/combinators/map_data.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,94 +0,0 @@ -use crate::Body; -use bytes::Buf; -use pin_project_lite::pin_project; -use std::{ - any::type_name, - fmt, - pin::Pin, - task::{Context, Poll}, -}; - -pin_project! { - /// Body returned by the [`map_data`] combinator. - /// - /// [`map_data`]: crate::util::BodyExt::map_data - #[derive(Clone, Copy)] - pub struct MapData { - #[pin] - inner: B, - f: F - } -} - -impl MapData { - #[inline] - pub(crate) fn new(body: B, f: F) -> Self { - Self { inner: body, f } - } - - /// Get a reference to the inner body - pub fn get_ref(&self) -> &B { - &self.inner - } - - /// Get a mutable reference to the inner body - pub fn get_mut(&mut self) -> &mut B { - &mut self.inner - } - - /// Get a pinned mutable reference to the inner body - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { - self.project().inner - } - - /// Consume `self`, returning the inner body - pub fn into_inner(self) -> B { - self.inner - } -} - -impl Body for MapData -where - B: Body, - F: FnMut(B::Data) -> B2, - B2: Buf, -{ - type Data = B2; - type Error = B::Error; - - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - let this = self.project(); - match this.inner.poll_data(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(None) => Poll::Ready(None), - Poll::Ready(Some(Ok(data))) => Poll::Ready(Some(Ok((this.f)(data)))), - Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err))), - } - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - self.project().inner.poll_trailers(cx) - } - - fn is_end_stream(&self) -> bool { - self.inner.is_end_stream() - } -} - -impl fmt::Debug for MapData -where - B: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("MapData") - .field("inner", &self.inner) - .field("f", &type_name::()) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http-body/src/combinators/map_err.rs s390-tools-2.33.1/rust-vendor/http-body/src/combinators/map_err.rs --- s390-tools-2.31.0/rust-vendor/http-body/src/combinators/map_err.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/src/combinators/map_err.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,97 +0,0 @@ -use crate::Body; -use pin_project_lite::pin_project; -use std::{ - any::type_name, - fmt, - pin::Pin, - task::{Context, Poll}, -}; - -pin_project! { - /// Body returned by the [`map_err`] combinator. - /// - /// [`map_err`]: crate::util::BodyExt::map_err - #[derive(Clone, Copy)] - pub struct MapErr { - #[pin] - inner: B, - f: F - } -} - -impl MapErr { - #[inline] - pub(crate) fn new(body: B, f: F) -> Self { - Self { inner: body, f } - } - - /// Get a reference to the inner body - pub fn get_ref(&self) -> &B { - &self.inner - } - - /// Get a mutable reference to the inner body - pub fn get_mut(&mut self) -> &mut B { - &mut self.inner - } - - /// Get a pinned mutable reference to the inner body - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { - self.project().inner - } - - /// Consume `self`, returning the inner body - pub fn into_inner(self) -> B { - self.inner - } -} - -impl Body for MapErr -where - B: Body, - F: FnMut(B::Error) -> E, -{ - type Data = B::Data; - type Error = E; - - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - let this = self.project(); - match this.inner.poll_data(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(None) => Poll::Ready(None), - Poll::Ready(Some(Ok(data))) => Poll::Ready(Some(Ok(data))), - Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err((this.f)(err)))), - } - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - let this = self.project(); - this.inner.poll_trailers(cx).map_err(this.f) - } - - fn is_end_stream(&self) -> bool { - self.inner.is_end_stream() - } - - fn size_hint(&self) -> crate::SizeHint { - self.inner.size_hint() - } -} - -impl fmt::Debug for MapErr -where - B: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("MapErr") - .field("inner", &self.inner) - .field("f", &type_name::()) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http-body/src/combinators/mod.rs s390-tools-2.33.1/rust-vendor/http-body/src/combinators/mod.rs --- s390-tools-2.31.0/rust-vendor/http-body/src/combinators/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/src/combinators/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -//! Combinators for the `Body` trait. - -mod box_body; -mod map_data; -mod map_err; - -pub use self::{ - box_body::{BoxBody, UnsyncBoxBody}, - map_data::MapData, - map_err::MapErr, -}; diff -Nru s390-tools-2.31.0/rust-vendor/http-body/src/empty.rs s390-tools-2.33.1/rust-vendor/http-body/src/empty.rs --- s390-tools-2.31.0/rust-vendor/http-body/src/empty.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/src/empty.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -use super::{Body, SizeHint}; -use bytes::Buf; -use http::HeaderMap; -use std::{ - convert::Infallible, - fmt, - marker::PhantomData, - pin::Pin, - task::{Context, Poll}, -}; - -/// A body that is always empty. -pub struct Empty { - _marker: PhantomData D>, -} - -impl Empty { - /// Create a new `Empty`. - pub fn new() -> Self { - Self::default() - } -} - -impl Body for Empty { - type Data = D; - type Error = Infallible; - - #[inline] - fn poll_data( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll>> { - Poll::Ready(None) - } - - #[inline] - fn poll_trailers( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Poll::Ready(Ok(None)) - } - - fn is_end_stream(&self) -> bool { - true - } - - fn size_hint(&self) -> SizeHint { - SizeHint::with_exact(0) - } -} - -impl fmt::Debug for Empty { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Empty").finish() - } -} - -impl Default for Empty { - fn default() -> Self { - Self { - _marker: PhantomData, - } - } -} - -impl Clone for Empty { - fn clone(&self) -> Self { - Self { - _marker: PhantomData, - } - } -} - -impl Copy for Empty {} diff -Nru s390-tools-2.31.0/rust-vendor/http-body/src/full.rs s390-tools-2.33.1/rust-vendor/http-body/src/full.rs --- s390-tools-2.31.0/rust-vendor/http-body/src/full.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/src/full.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,151 +0,0 @@ -use crate::{Body, SizeHint}; -use bytes::{Buf, Bytes}; -use http::HeaderMap; -use pin_project_lite::pin_project; -use std::borrow::Cow; -use std::convert::{Infallible, TryFrom}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A body that consists of a single chunk. - #[derive(Clone, Copy, Debug)] - pub struct Full { - data: Option, - } -} - -impl Full -where - D: Buf, -{ - /// Create a new `Full`. - pub fn new(data: D) -> Self { - let data = if data.has_remaining() { - Some(data) - } else { - None - }; - Full { data } - } -} - -impl Body for Full -where - D: Buf, -{ - type Data = D; - type Error = Infallible; - - fn poll_data( - mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll>> { - Poll::Ready(self.data.take().map(Ok)) - } - - fn poll_trailers( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Poll::Ready(Ok(None)) - } - - fn is_end_stream(&self) -> bool { - self.data.is_none() - } - - fn size_hint(&self) -> SizeHint { - self.data - .as_ref() - .map(|data| SizeHint::with_exact(u64::try_from(data.remaining()).unwrap())) - .unwrap_or_else(|| SizeHint::with_exact(0)) - } -} - -impl Default for Full -where - D: Buf, -{ - /// Create an empty `Full`. - fn default() -> Self { - Full { data: None } - } -} - -impl From for Full -where - D: Buf + From, -{ - fn from(bytes: Bytes) -> Self { - Full::new(D::from(bytes)) - } -} - -impl From> for Full -where - D: Buf + From>, -{ - fn from(vec: Vec) -> Self { - Full::new(D::from(vec)) - } -} - -impl From<&'static [u8]> for Full -where - D: Buf + From<&'static [u8]>, -{ - fn from(slice: &'static [u8]) -> Self { - Full::new(D::from(slice)) - } -} - -impl From> for Full -where - D: Buf + From<&'static B> + From, - B: ToOwned + ?Sized, -{ - fn from(cow: Cow<'static, B>) -> Self { - match cow { - Cow::Borrowed(b) => Full::new(D::from(b)), - Cow::Owned(o) => Full::new(D::from(o)), - } - } -} - -impl From for Full -where - D: Buf + From, -{ - fn from(s: String) -> Self { - Full::new(D::from(s)) - } -} - -impl From<&'static str> for Full -where - D: Buf + From<&'static str>, -{ - fn from(slice: &'static str) -> Self { - Full::new(D::from(slice)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn full_returns_some() { - let mut full = Full::new(&b"hello"[..]); - assert_eq!(full.size_hint().exact(), Some(b"hello".len() as u64)); - assert_eq!(full.data().await, Some(Ok(&b"hello"[..]))); - assert!(full.data().await.is_none()); - } - - #[tokio::test] - async fn empty_full_returns_none() { - assert!(Full::<&[u8]>::default().data().await.is_none()); - assert!(Full::new(&b""[..]).data().await.is_none()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http-body/src/lib.rs s390-tools-2.33.1/rust-vendor/http-body/src/lib.rs --- s390-tools-2.31.0/rust-vendor/http-body/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,324 +0,0 @@ -#![doc(html_root_url = "https://docs.rs/http-body/0.4.5")] -#![deny( - missing_debug_implementations, - missing_docs, - unreachable_pub, - broken_intra_doc_links -)] -#![cfg_attr(test, deny(warnings))] - -//! Asynchronous HTTP request or response body. -//! -//! See [`Body`] for more details. -//! -//! [`Body`]: trait.Body.html - -mod empty; -mod full; -mod limited; -mod next; -mod size_hint; - -pub mod combinators; - -pub use self::empty::Empty; -pub use self::full::Full; -pub use self::limited::{LengthLimitError, Limited}; -pub use self::next::{Data, Trailers}; -pub use self::size_hint::SizeHint; - -use self::combinators::{BoxBody, MapData, MapErr, UnsyncBoxBody}; -use bytes::{Buf, Bytes}; -use http::HeaderMap; -use std::convert::Infallible; -use std::ops; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Trait representing a streaming body of a Request or Response. -/// -/// Data is streamed via the `poll_data` function, which asynchronously yields `T: Buf` values. The -/// `size_hint` function provides insight into the total number of bytes that will be streamed. -/// -/// The `poll_trailers` function returns an optional set of trailers used to finalize the request / -/// response exchange. This is mostly used when using the HTTP/2.0 protocol. -/// -pub trait Body { - /// Values yielded by the `Body`. - type Data: Buf; - - /// The error type this `Body` might generate. - type Error; - - /// Attempt to pull out the next data buffer of this stream. - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>>; - - /// Poll for an optional **single** `HeaderMap` of trailers. - /// - /// This function should only be called once `poll_data` returns `None`. - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>>; - - /// Returns `true` when the end of stream has been reached. - /// - /// An end of stream means that both `poll_data` and `poll_trailers` will - /// return `None`. - /// - /// A return value of `false` **does not** guarantee that a value will be - /// returned from `poll_stream` or `poll_trailers`. - fn is_end_stream(&self) -> bool { - false - } - - /// Returns the bounds on the remaining length of the stream. - /// - /// When the **exact** remaining length of the stream is known, the upper bound will be set and - /// will equal the lower bound. - fn size_hint(&self) -> SizeHint { - SizeHint::default() - } - - /// Returns future that resolves to next data chunk, if any. - fn data(&mut self) -> Data<'_, Self> - where - Self: Unpin + Sized, - { - Data(self) - } - - /// Returns future that resolves to trailers, if any. - fn trailers(&mut self) -> Trailers<'_, Self> - where - Self: Unpin + Sized, - { - Trailers(self) - } - - /// Maps this body's data value to a different value. - fn map_data(self, f: F) -> MapData - where - Self: Sized, - F: FnMut(Self::Data) -> B, - B: Buf, - { - MapData::new(self, f) - } - - /// Maps this body's error value to a different value. - fn map_err(self, f: F) -> MapErr - where - Self: Sized, - F: FnMut(Self::Error) -> E, - { - MapErr::new(self, f) - } - - /// Turn this body into a boxed trait object. - fn boxed(self) -> BoxBody - where - Self: Sized + Send + Sync + 'static, - { - BoxBody::new(self) - } - - /// Turn this body into a boxed trait object that is !Sync. - fn boxed_unsync(self) -> UnsyncBoxBody - where - Self: Sized + Send + 'static, - { - UnsyncBoxBody::new(self) - } -} - -impl Body for &mut T { - type Data = T::Data; - type Error = T::Error; - - fn poll_data( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - Pin::new(&mut **self).poll_data(cx) - } - - fn poll_trailers( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Pin::new(&mut **self).poll_trailers(cx) - } - - fn is_end_stream(&self) -> bool { - Pin::new(&**self).is_end_stream() - } - - fn size_hint(&self) -> SizeHint { - Pin::new(&**self).size_hint() - } -} - -impl

Body for Pin

-where - P: Unpin + ops::DerefMut, - P::Target: Body, -{ - type Data = <

::Target as Body>::Data; - type Error = <

(&self, mut pred: P) -> usize - where - P: FnMut(&K, &V) -> bool, - { - self.entries - .partition_point(move |a| pred(&a.key, &a.value)) - } -} - -impl<'a, K, V> IntoIterator for &'a Slice { - type IntoIter = Iter<'a, K, V>; - type Item = (&'a K, &'a V); - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, K, V> IntoIterator for &'a mut Slice { - type IntoIter = IterMut<'a, K, V>; - type Item = (&'a K, &'a mut V); - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -impl IntoIterator for Box> { - type IntoIter = IntoIter; - type Item = (K, V); - - fn into_iter(self) -> Self::IntoIter { - IntoIter::new(self.into_entries()) - } -} - -impl Default for &'_ Slice { - fn default() -> Self { - Slice::from_slice(&[]) - } -} - -impl Default for &'_ mut Slice { - fn default() -> Self { - Slice::from_mut_slice(&mut []) - } -} - -impl Default for Box> { - fn default() -> Self { - Slice::from_boxed(Box::default()) - } -} - -impl Clone for Box> { - fn clone(&self) -> Self { - Slice::from_boxed(self.entries.to_vec().into_boxed_slice()) - } -} - -impl From<&Slice> for Box> { - fn from(slice: &Slice) -> Self { - Slice::from_boxed(Box::from(&slice.entries)) - } -} - -impl fmt::Debug for Slice { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self).finish() - } -} - -impl PartialEq for Slice { - fn eq(&self, other: &Self) -> bool { - self.len() == other.len() && self.iter().eq(other) - } -} - -impl Eq for Slice {} - -impl PartialOrd for Slice { - fn partial_cmp(&self, other: &Self) -> Option { - self.iter().partial_cmp(other) - } -} - -impl Ord for Slice { - fn cmp(&self, other: &Self) -> Ordering { - self.iter().cmp(other) - } -} - -impl Hash for Slice { - fn hash(&self, state: &mut H) { - self.len().hash(state); - for (key, value) in self { - key.hash(state); - value.hash(state); - } - } -} - -impl Index for Slice { - type Output = V; - - fn index(&self, index: usize) -> &V { - &self.entries[index].value - } -} - -impl IndexMut for Slice { - fn index_mut(&mut self, index: usize) -> &mut V { - &mut self.entries[index].value - } -} - -// We can't have `impl> Index` because that conflicts -// both upstream with `Index` and downstream with `Index<&Q>`. -// Instead, we repeat the implementations for all the core range types. -macro_rules! impl_index { - ($($range:ty),*) => {$( - impl Index<$range> for IndexMap { - type Output = Slice; - - fn index(&self, range: $range) -> &Self::Output { - Slice::from_slice(&self.as_entries()[range]) - } - } - - impl IndexMut<$range> for IndexMap { - fn index_mut(&mut self, range: $range) -> &mut Self::Output { - Slice::from_mut_slice(&mut self.as_entries_mut()[range]) - } - } - - impl Index<$range> for Slice { - type Output = Slice; - - fn index(&self, range: $range) -> &Self { - Self::from_slice(&self.entries[range]) - } - } - - impl IndexMut<$range> for Slice { - fn index_mut(&mut self, range: $range) -> &mut Self { - Self::from_mut_slice(&mut self.entries[range]) - } - } - )*} -} -impl_index!( - ops::Range, - ops::RangeFrom, - ops::RangeFull, - ops::RangeInclusive, - ops::RangeTo, - ops::RangeToInclusive, - (Bound, Bound) -); - -#[cfg(test)] -mod tests { - use super::*; - use alloc::vec::Vec; - - #[test] - fn slice_index() { - fn check( - vec_slice: &[(i32, i32)], - map_slice: &Slice, - sub_slice: &Slice, - ) { - assert_eq!(map_slice as *const _, sub_slice as *const _); - itertools::assert_equal( - vec_slice.iter().copied(), - map_slice.iter().map(|(&k, &v)| (k, v)), - ); - itertools::assert_equal(vec_slice.iter().map(|(k, _)| k), map_slice.keys()); - itertools::assert_equal(vec_slice.iter().map(|(_, v)| v), map_slice.values()); - } - - let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect(); - let map: IndexMap = vec.iter().cloned().collect(); - let slice = map.as_slice(); - - // RangeFull - check(&vec[..], &map[..], &slice[..]); - - for i in 0usize..10 { - // Index - assert_eq!(vec[i].1, map[i]); - assert_eq!(vec[i].1, slice[i]); - assert_eq!(map[&(i as i32)], map[i]); - assert_eq!(map[&(i as i32)], slice[i]); - - // RangeFrom - check(&vec[i..], &map[i..], &slice[i..]); - - // RangeTo - check(&vec[..i], &map[..i], &slice[..i]); - - // RangeToInclusive - check(&vec[..=i], &map[..=i], &slice[..=i]); - - // (Bound, Bound) - let bounds = (Bound::Excluded(i), Bound::Unbounded); - check(&vec[i + 1..], &map[bounds], &slice[bounds]); - - for j in i..=10 { - // Range - check(&vec[i..j], &map[i..j], &slice[i..j]); - } - - for j in i..10 { - // RangeInclusive - check(&vec[i..=j], &map[i..=j], &slice[i..=j]); - } - } - } - - #[test] - fn slice_index_mut() { - fn check_mut( - vec_slice: &[(i32, i32)], - map_slice: &mut Slice, - sub_slice: &mut Slice, - ) { - assert_eq!(map_slice, sub_slice); - itertools::assert_equal( - vec_slice.iter().copied(), - map_slice.iter_mut().map(|(&k, &mut v)| (k, v)), - ); - itertools::assert_equal( - vec_slice.iter().map(|&(_, v)| v), - map_slice.values_mut().map(|&mut v| v), - ); - } - - let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect(); - let mut map: IndexMap = vec.iter().cloned().collect(); - let mut map2 = map.clone(); - let slice = map2.as_mut_slice(); - - // RangeFull - check_mut(&vec[..], &mut map[..], &mut slice[..]); - - for i in 0usize..10 { - // IndexMut - assert_eq!(&mut map[i], &mut slice[i]); - - // RangeFrom - check_mut(&vec[i..], &mut map[i..], &mut slice[i..]); - - // RangeTo - check_mut(&vec[..i], &mut map[..i], &mut slice[..i]); - - // RangeToInclusive - check_mut(&vec[..=i], &mut map[..=i], &mut slice[..=i]); - - // (Bound, Bound) - let bounds = (Bound::Excluded(i), Bound::Unbounded); - check_mut(&vec[i + 1..], &mut map[bounds], &mut slice[bounds]); - - for j in i..=10 { - // Range - check_mut(&vec[i..j], &mut map[i..j], &mut slice[i..j]); - } - - for j in i..10 { - // RangeInclusive - check_mut(&vec[i..=j], &mut map[i..=j], &mut slice[i..=j]); - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/map/tests.rs s390-tools-2.33.1/rust-vendor/indexmap/src/map/tests.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/map/tests.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/map/tests.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,670 +0,0 @@ -use super::*; -use std::string::String; - -#[test] -fn it_works() { - let mut map = IndexMap::new(); - assert_eq!(map.is_empty(), true); - map.insert(1, ()); - map.insert(1, ()); - assert_eq!(map.len(), 1); - assert!(map.get(&1).is_some()); - assert_eq!(map.is_empty(), false); -} - -#[test] -fn new() { - let map = IndexMap::::new(); - println!("{:?}", map); - assert_eq!(map.capacity(), 0); - assert_eq!(map.len(), 0); - assert_eq!(map.is_empty(), true); -} - -#[test] -fn insert() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5]; - let not_present = [1, 3, 6, 9, 10]; - let mut map = IndexMap::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(map.len(), i); - map.insert(elt, elt); - assert_eq!(map.len(), i + 1); - assert_eq!(map.get(&elt), Some(&elt)); - assert_eq!(map[&elt], elt); - } - println!("{:?}", map); - - for &elt in ¬_present { - assert!(map.get(&elt).is_none()); - } -} - -#[test] -fn insert_full() { - let insert = vec![9, 2, 7, 1, 4, 6, 13]; - let present = vec![1, 6, 2]; - let mut map = IndexMap::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(map.len(), i); - let (index, existing) = map.insert_full(elt, elt); - assert_eq!(existing, None); - assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); - assert_eq!(map.len(), i + 1); - } - - let len = map.len(); - for &elt in &present { - let (index, existing) = map.insert_full(elt, elt); - assert_eq!(existing, Some(elt)); - assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); - assert_eq!(map.len(), len); - } -} - -#[test] -fn insert_2() { - let mut map = IndexMap::with_capacity(16); - - let mut keys = vec![]; - keys.extend(0..16); - keys.extend(if cfg!(miri) { 32..64 } else { 128..267 }); - - for &i in &keys { - let old_map = map.clone(); - map.insert(i, ()); - for key in old_map.keys() { - if map.get(key).is_none() { - println!("old_map: {:?}", old_map); - println!("map: {:?}", map); - panic!("did not find {} in map", key); - } - } - } - - for &i in &keys { - assert!(map.get(&i).is_some(), "did not find {}", i); - } -} - -#[test] -fn insert_order() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut map = IndexMap::new(); - - for &elt in &insert { - map.insert(elt, ()); - } - - assert_eq!(map.keys().count(), map.len()); - assert_eq!(map.keys().count(), insert.len()); - for (a, b) in insert.iter().zip(map.keys()) { - assert_eq!(a, b); - } - for (i, k) in (0..insert.len()).zip(map.keys()) { - assert_eq!(map.get_index(i).unwrap().0, k); - } -} - -#[test] -fn grow() { - let insert = [0, 4, 2, 12, 8, 7, 11]; - let not_present = [1, 3, 6, 9, 10]; - let mut map = IndexMap::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(map.len(), i); - map.insert(elt, elt); - assert_eq!(map.len(), i + 1); - assert_eq!(map.get(&elt), Some(&elt)); - assert_eq!(map[&elt], elt); - } - - println!("{:?}", map); - for &elt in &insert { - map.insert(elt * 10, elt); - } - for &elt in &insert { - map.insert(elt * 100, elt); - } - for (i, &elt) in insert.iter().cycle().enumerate().take(100) { - map.insert(elt * 100 + i as i32, elt); - } - println!("{:?}", map); - for &elt in ¬_present { - assert!(map.get(&elt).is_none()); - } -} - -#[test] -fn reserve() { - let mut map = IndexMap::::new(); - assert_eq!(map.capacity(), 0); - map.reserve(100); - let capacity = map.capacity(); - assert!(capacity >= 100); - for i in 0..capacity { - assert_eq!(map.len(), i); - map.insert(i, i * i); - assert_eq!(map.len(), i + 1); - assert_eq!(map.capacity(), capacity); - assert_eq!(map.get(&i), Some(&(i * i))); - } - map.insert(capacity, std::usize::MAX); - assert_eq!(map.len(), capacity + 1); - assert!(map.capacity() > capacity); - assert_eq!(map.get(&capacity), Some(&std::usize::MAX)); -} - -#[test] -fn try_reserve() { - let mut map = IndexMap::::new(); - assert_eq!(map.capacity(), 0); - assert_eq!(map.try_reserve(100), Ok(())); - assert!(map.capacity() >= 100); - assert!(map.try_reserve(usize::MAX).is_err()); -} - -#[test] -fn shrink_to_fit() { - let mut map = IndexMap::::new(); - assert_eq!(map.capacity(), 0); - for i in 0..100 { - assert_eq!(map.len(), i); - map.insert(i, i * i); - assert_eq!(map.len(), i + 1); - assert!(map.capacity() >= i + 1); - assert_eq!(map.get(&i), Some(&(i * i))); - map.shrink_to_fit(); - assert_eq!(map.len(), i + 1); - assert_eq!(map.capacity(), i + 1); - assert_eq!(map.get(&i), Some(&(i * i))); - } -} - -#[test] -fn remove() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut map = IndexMap::new(); - - for &elt in &insert { - map.insert(elt, elt); - } - - assert_eq!(map.keys().count(), map.len()); - assert_eq!(map.keys().count(), insert.len()); - for (a, b) in insert.iter().zip(map.keys()) { - assert_eq!(a, b); - } - - let remove_fail = [99, 77]; - let remove = [4, 12, 8, 7]; - - for &key in &remove_fail { - assert!(map.swap_remove_full(&key).is_none()); - } - println!("{:?}", map); - for &key in &remove { - //println!("{:?}", map); - let index = map.get_full(&key).unwrap().0; - assert_eq!(map.swap_remove_full(&key), Some((index, key, key))); - } - println!("{:?}", map); - - for key in &insert { - assert_eq!(map.get(key).is_some(), !remove.contains(key)); - } - assert_eq!(map.len(), insert.len() - remove.len()); - assert_eq!(map.keys().count(), insert.len() - remove.len()); -} - -#[test] -fn remove_to_empty() { - let mut map = indexmap! { 0 => 0, 4 => 4, 5 => 5 }; - map.swap_remove(&5).unwrap(); - map.swap_remove(&4).unwrap(); - map.swap_remove(&0).unwrap(); - assert!(map.is_empty()); -} - -#[test] -fn swap_remove_index() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut map = IndexMap::new(); - - for &elt in &insert { - map.insert(elt, elt * 2); - } - - let mut vector = insert.to_vec(); - let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; - - // check that the same swap remove sequence on vec and map - // have the same result. - for &rm in remove_sequence { - let out_vec = vector.swap_remove(rm); - let (out_map, _) = map.swap_remove_index(rm).unwrap(); - assert_eq!(out_vec, out_map); - } - assert_eq!(vector.len(), map.len()); - for (a, b) in vector.iter().zip(map.keys()) { - assert_eq!(a, b); - } -} - -#[test] -fn partial_eq_and_eq() { - let mut map_a = IndexMap::new(); - map_a.insert(1, "1"); - map_a.insert(2, "2"); - let mut map_b = map_a.clone(); - assert_eq!(map_a, map_b); - map_b.swap_remove(&1); - assert_ne!(map_a, map_b); - - let map_c: IndexMap<_, String> = map_b.into_iter().map(|(k, v)| (k, v.into())).collect(); - assert_ne!(map_a, map_c); - assert_ne!(map_c, map_a); -} - -#[test] -fn extend() { - let mut map = IndexMap::new(); - map.extend(vec![(&1, &2), (&3, &4)]); - map.extend(vec![(5, 6)]); - assert_eq!( - map.into_iter().collect::>(), - vec![(1, 2), (3, 4), (5, 6)] - ); -} - -#[test] -fn entry() { - let mut map = IndexMap::new(); - - map.insert(1, "1"); - map.insert(2, "2"); - { - let e = map.entry(3); - assert_eq!(e.index(), 2); - let e = e.or_insert("3"); - assert_eq!(e, &"3"); - } - - let e = map.entry(2); - assert_eq!(e.index(), 1); - assert_eq!(e.key(), &2); - match e { - Entry::Occupied(ref e) => assert_eq!(e.get(), &"2"), - Entry::Vacant(_) => panic!(), - } - assert_eq!(e.or_insert("4"), &"2"); -} - -#[test] -fn entry_and_modify() { - let mut map = IndexMap::new(); - - map.insert(1, "1"); - map.entry(1).and_modify(|x| *x = "2"); - assert_eq!(Some(&"2"), map.get(&1)); - - map.entry(2).and_modify(|x| *x = "doesn't exist"); - assert_eq!(None, map.get(&2)); -} - -#[test] -fn entry_or_default() { - let mut map = IndexMap::new(); - - #[derive(Debug, PartialEq)] - enum TestEnum { - DefaultValue, - NonDefaultValue, - } - - impl Default for TestEnum { - fn default() -> Self { - TestEnum::DefaultValue - } - } - - map.insert(1, TestEnum::NonDefaultValue); - assert_eq!(&mut TestEnum::NonDefaultValue, map.entry(1).or_default()); - - assert_eq!(&mut TestEnum::DefaultValue, map.entry(2).or_default()); -} - -#[test] -fn occupied_entry_key() { - // These keys match hash and equality, but their addresses are distinct. - let (k1, k2) = (&mut 1, &mut 1); - let k1_ptr = k1 as *const i32; - let k2_ptr = k2 as *const i32; - assert_ne!(k1_ptr, k2_ptr); - - let mut map = IndexMap::new(); - map.insert(k1, "value"); - match map.entry(k2) { - Entry::Occupied(ref e) => { - // `OccupiedEntry::key` should reference the key in the map, - // not the key that was used to find the entry. - let ptr = *e.key() as *const i32; - assert_eq!(ptr, k1_ptr); - assert_ne!(ptr, k2_ptr); - } - Entry::Vacant(_) => panic!(), - } -} - -#[test] -fn keys() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: IndexMap<_, _> = vec.into_iter().collect(); - let keys: Vec<_> = map.keys().copied().collect(); - assert_eq!(keys.len(), 3); - assert!(keys.contains(&1)); - assert!(keys.contains(&2)); - assert!(keys.contains(&3)); -} - -#[test] -fn into_keys() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: IndexMap<_, _> = vec.into_iter().collect(); - let keys: Vec = map.into_keys().collect(); - assert_eq!(keys.len(), 3); - assert!(keys.contains(&1)); - assert!(keys.contains(&2)); - assert!(keys.contains(&3)); -} - -#[test] -fn values() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: IndexMap<_, _> = vec.into_iter().collect(); - let values: Vec<_> = map.values().copied().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&'a')); - assert!(values.contains(&'b')); - assert!(values.contains(&'c')); -} - -#[test] -fn values_mut() { - let vec = vec![(1, 1), (2, 2), (3, 3)]; - let mut map: IndexMap<_, _> = vec.into_iter().collect(); - for value in map.values_mut() { - *value *= 2 - } - let values: Vec<_> = map.values().copied().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&2)); - assert!(values.contains(&4)); - assert!(values.contains(&6)); -} - -#[test] -fn into_values() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: IndexMap<_, _> = vec.into_iter().collect(); - let values: Vec = map.into_values().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&'a')); - assert!(values.contains(&'b')); - assert!(values.contains(&'c')); -} - -#[test] -#[cfg(feature = "std")] -fn from_array() { - let map = IndexMap::from([(1, 2), (3, 4)]); - let mut expected = IndexMap::new(); - expected.insert(1, 2); - expected.insert(3, 4); - - assert_eq!(map, expected) -} - -#[test] -fn iter_default() { - struct K; - struct V; - fn assert_default() - where - T: Default + Iterator, - { - assert!(T::default().next().is_none()); - } - assert_default::>(); - assert_default::>(); - assert_default::>(); - assert_default::>(); - assert_default::>(); - assert_default::>(); - assert_default::>(); - assert_default::>(); -} - -#[test] -fn test_binary_search_by() { - // adapted from std's test for binary_search - let b: IndexMap<_, i32> = [] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(0)); - - let b: IndexMap<_, i32> = [4] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&3)), Err(0)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&4)), Ok(0)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(1)); - - let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(3)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Ok(3)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Err(4)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Ok(4)); - - let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&9)), Err(6)); - - let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Ok(3)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(3)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Ok(5)); - - let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Err(5)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&0)), Err(0)); - - let b: IndexMap<_, i32> = [1, 3, 3, 3, 7] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&0)), Err(0)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&1)), Ok(0)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&2)), Err(1)); - assert!(match b.binary_search_by(|_, x| x.cmp(&3)) { - Ok(1..=3) => true, - _ => false, - }); - assert!(match b.binary_search_by(|_, x| x.cmp(&3)) { - Ok(1..=3) => true, - _ => false, - }); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&4)), Err(4)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(4)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Err(4)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Ok(4)); - assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Err(5)); -} - -#[test] -fn test_binary_search_by_key() { - // adapted from std's test for binary_search - let b: IndexMap<_, i32> = [] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(0)); - - let b: IndexMap<_, i32> = [4] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by_key(&3, |_, &x| x), Err(0)); - assert_eq!(b.binary_search_by_key(&4, |_, &x| x), Ok(0)); - assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(1)); - - let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(3)); - assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Ok(3)); - assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Err(4)); - assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Ok(4)); - - let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by_key(&9, |_, &x| x), Err(6)); - - let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Ok(3)); - assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(3)); - assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Ok(5)); - - let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Err(5)); - assert_eq!(b.binary_search_by_key(&0, |_, &x| x), Err(0)); - - let b: IndexMap<_, i32> = [1, 3, 3, 3, 7] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.binary_search_by_key(&0, |_, &x| x), Err(0)); - assert_eq!(b.binary_search_by_key(&1, |_, &x| x), Ok(0)); - assert_eq!(b.binary_search_by_key(&2, |_, &x| x), Err(1)); - assert!(match b.binary_search_by_key(&3, |_, &x| x) { - Ok(1..=3) => true, - _ => false, - }); - assert!(match b.binary_search_by_key(&3, |_, &x| x) { - Ok(1..=3) => true, - _ => false, - }); - assert_eq!(b.binary_search_by_key(&4, |_, &x| x), Err(4)); - assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(4)); - assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Err(4)); - assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Ok(4)); - assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Err(5)); -} - -#[test] -fn test_partition_point() { - // adapted from std's test for partition_point - let b: IndexMap<_, i32> = [] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.partition_point(|_, &x| x < 5), 0); - - let b: IndexMap<_, i32> = [4] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.partition_point(|_, &x| x < 3), 0); - assert_eq!(b.partition_point(|_, &x| x < 4), 0); - assert_eq!(b.partition_point(|_, &x| x < 5), 1); - - let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.partition_point(|_, &x| x < 5), 3); - assert_eq!(b.partition_point(|_, &x| x < 6), 3); - assert_eq!(b.partition_point(|_, &x| x < 7), 4); - assert_eq!(b.partition_point(|_, &x| x < 8), 4); - - let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.partition_point(|_, &x| x < 9), 6); - - let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.partition_point(|_, &x| x < 6), 3); - assert_eq!(b.partition_point(|_, &x| x < 5), 3); - assert_eq!(b.partition_point(|_, &x| x < 8), 5); - - let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.partition_point(|_, &x| x < 7), 5); - assert_eq!(b.partition_point(|_, &x| x < 0), 0); - - let b: IndexMap<_, i32> = [1, 3, 3, 3, 7] - .into_iter() - .enumerate() - .map(|(i, x)| (i + 100, x)) - .collect(); - assert_eq!(b.partition_point(|_, &x| x < 0), 0); - assert_eq!(b.partition_point(|_, &x| x < 1), 0); - assert_eq!(b.partition_point(|_, &x| x < 2), 1); - assert_eq!(b.partition_point(|_, &x| x < 3), 1); - assert_eq!(b.partition_point(|_, &x| x < 4), 4); - assert_eq!(b.partition_point(|_, &x| x < 5), 4); - assert_eq!(b.partition_point(|_, &x| x < 6), 4); - assert_eq!(b.partition_point(|_, &x| x < 7), 4); - assert_eq!(b.partition_point(|_, &x| x < 8), 5); -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/map.rs s390-tools-2.33.1/rust-vendor/indexmap/src/map.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/map.rs 2024-05-28 11:57:36.000000000 +0200 @@ -2,39 +2,29 @@ //! pairs is independent of the hash values of the keys. mod core; -mod iter; -mod slice; -#[cfg(feature = "serde")] -#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] -pub mod serde_seq; - -#[cfg(test)] -mod tests; - -pub use self::core::{Entry, OccupiedEntry, VacantEntry}; -pub use self::iter::{ - Drain, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, Values, ValuesMut, -}; -pub use self::slice::Slice; pub use crate::mutable_keys::MutableKeys; #[cfg(feature = "rayon")] pub use crate::rayon::map as rayon; +use crate::vec::{self, Vec}; use ::core::cmp::Ordering; use ::core::fmt; use ::core::hash::{BuildHasher, Hash, Hasher}; +use ::core::iter::FusedIterator; use ::core::ops::{Index, IndexMut, RangeBounds}; -use alloc::boxed::Box; -use alloc::vec::Vec; +use ::core::slice::{Iter as SliceIter, IterMut as SliceIterMut}; -#[cfg(feature = "std")] +#[cfg(has_std)] use std::collections::hash_map::RandomState; use self::core::IndexMapCore; -use crate::util::{third, try_simplify_range}; -use crate::{Bucket, Entries, Equivalent, HashValue, TryReserveError}; +use crate::equivalent::Equivalent; +use crate::util::third; +use crate::{Bucket, Entries, HashValue}; + +pub use self::core::{Entry, OccupiedEntry, VacantEntry}; /// A hash table where the iteration order of the key-value pairs is independent /// of the hash values of the keys. @@ -77,12 +67,12 @@ /// assert_eq!(letters[&'u'], 1); /// assert_eq!(letters.get(&'y'), None); /// ``` -#[cfg(feature = "std")] +#[cfg(has_std)] pub struct IndexMap { pub(crate) core: IndexMapCore, hash_builder: S, } -#[cfg(not(feature = "std"))] +#[cfg(not(has_std))] pub struct IndexMap { pub(crate) core: IndexMapCore, hash_builder: S, @@ -150,8 +140,7 @@ } } -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +#[cfg(has_std)] impl IndexMap { /// Create a new map. (Does not allocate.) #[inline] @@ -197,11 +186,6 @@ } } - /// Return the number of elements the map can hold without reallocating. - /// - /// This number is a lower bound; the map might be able to hold more, - /// but is guaranteed to be able to hold at least this many. - /// /// Computes in **O(1)** time. pub fn capacity(&self) -> usize { self.core.capacity() @@ -230,38 +214,52 @@ /// Return an iterator over the key-value pairs of the map, in their order pub fn iter(&self) -> Iter<'_, K, V> { - Iter::new(self.as_entries()) + Iter { + iter: self.as_entries().iter(), + } } /// Return an iterator over the key-value pairs of the map, in their order pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { - IterMut::new(self.as_entries_mut()) + IterMut { + iter: self.as_entries_mut().iter_mut(), + } } /// Return an iterator over the keys of the map, in their order pub fn keys(&self) -> Keys<'_, K, V> { - Keys::new(self.as_entries()) + Keys { + iter: self.as_entries().iter(), + } } /// Return an owning iterator over the keys of the map, in their order pub fn into_keys(self) -> IntoKeys { - IntoKeys::new(self.into_entries()) + IntoKeys { + iter: self.into_entries().into_iter(), + } } /// Return an iterator over the values of the map, in their order pub fn values(&self) -> Values<'_, K, V> { - Values::new(self.as_entries()) + Values { + iter: self.as_entries().iter(), + } } /// Return an iterator over mutable references to the values of the map, /// in their order pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { - ValuesMut::new(self.as_entries_mut()) + ValuesMut { + iter: self.as_entries_mut().iter_mut(), + } } /// Return an owning iterator over the values of the map, in their order pub fn into_values(self) -> IntoValues { - IntoValues::new(self.into_entries()) + IntoValues { + iter: self.into_entries().into_iter(), + } } /// Remove all key-value pairs in the map, while preserving its capacity. @@ -295,7 +293,9 @@ where R: RangeBounds, { - Drain::new(self.core.drain(range)) + Drain { + iter: self.core.drain(range), + } } /// Splits the collection into two at the given index. @@ -328,37 +328,6 @@ self.core.reserve(additional); } - /// Reserve capacity for `additional` more key-value pairs, without over-allocating. - /// - /// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid - /// frequent re-allocations. However, the underlying data structures may still have internal - /// capacity requirements, and the allocator itself may give more space than requested, so this - /// cannot be relied upon to be precisely minimal. - /// - /// Computes in **O(n)** time. - pub fn reserve_exact(&mut self, additional: usize) { - self.core.reserve_exact(additional); - } - - /// Try to reserve capacity for `additional` more key-value pairs. - /// - /// Computes in **O(n)** time. - pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.core.try_reserve(additional) - } - - /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating. - /// - /// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid - /// frequent re-allocations. However, the underlying data structures may still have internal - /// capacity requirements, and the allocator itself may give more space than requested, so this - /// cannot be relied upon to be precisely minimal. - /// - /// Computes in **O(n)** time. - pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.core.try_reserve_exact(additional) - } - /// Shrink the capacity of the map as much as possible. /// /// Computes in **O(n)** time. @@ -517,6 +486,21 @@ } } + pub(crate) fn get_full_mut2_impl( + &mut self, + key: &Q, + ) -> Option<(usize, &mut K, &mut V)> + where + Q: Hash + Equivalent, + { + if let Some(i) = self.get_index_of(key) { + let entry = &mut self.as_entries_mut()[i]; + Some((i, &mut entry.key, &mut entry.value)) + } else { + None + } + } + /// Remove the key-value pair equivalent to `key` and return /// its value. /// @@ -729,7 +713,9 @@ { let mut entries = self.into_entries(); entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); - IntoIter::new(entries) + IntoIter { + iter: entries.into_iter(), + } } /// Sort the map's key-value pairs by the default ordering of the keys, but @@ -773,82 +759,9 @@ { let mut entries = self.into_entries(); entries.sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); - IntoIter::new(entries) - } - - /// Sort the map’s key-value pairs in place using a sort-key extraction function. - /// - /// During sorting, the function is called at most once per entry, by using temporary storage - /// to remember the results of its evaluation. The order of calls to the function is - /// unspecified and may change between versions of `indexmap` or the standard library. - /// - /// Computes in **O(m n + n log n + c)** time () and **O(n)** space, where the function is - /// **O(m)**, *n* is the length of the map, and *c* the capacity. The sort is stable. - pub fn sort_by_cached_key(&mut self, mut sort_key: F) - where - T: Ord, - F: FnMut(&K, &V) -> T, - { - self.with_entries(move |entries| { - entries.sort_by_cached_key(move |a| sort_key(&a.key, &a.value)); - }); - } - - /// Search over a sorted map for a key. - /// - /// Returns the position where that key is present, or the position where it can be inserted to - /// maintain the sort. See [`slice::binary_search`] for more details. - /// - /// Computes in **O(log(n))** time, which is notably less scalable than looking the key up - /// using [`get_index_of`][IndexMap::get_index_of], but this can also position missing keys. - pub fn binary_search_keys(&self, x: &K) -> Result - where - K: Ord, - { - self.as_slice().binary_search_keys(x) - } - - /// Search over a sorted map with a comparator function. - /// - /// Returns the position where that value is present, or the position where it can be inserted - /// to maintain the sort. See [`slice::binary_search_by`] for more details. - /// - /// Computes in **O(log(n))** time. - #[inline] - pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result - where - F: FnMut(&'a K, &'a V) -> Ordering, - { - self.as_slice().binary_search_by(f) - } - - /// Search over a sorted map with an extraction function. - /// - /// Returns the position where that value is present, or the position where it can be inserted - /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. - /// - /// Computes in **O(log(n))** time. - #[inline] - pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result - where - F: FnMut(&'a K, &'a V) -> B, - B: Ord, - { - self.as_slice().binary_search_by_key(b, f) - } - - /// Returns the index of the partition point of a sorted map according to the given predicate - /// (the index of the first element of the second partition). - /// - /// See [`slice::partition_point`] for more details. - /// - /// Computes in **O(log(n))** time. - #[must_use] - pub fn partition_point

(&self, pred: P) -> usize - where - P: FnMut(&K, &V) -> bool, - { - self.as_slice().partition_point(pred) + IntoIter { + iter: entries.into_iter(), + } } /// Reverses the order of the map’s key-value pairs in place. @@ -860,27 +773,6 @@ } impl IndexMap { - /// Returns a slice of all the key-value pairs in the map. - /// - /// Computes in **O(1)** time. - pub fn as_slice(&self) -> &Slice { - Slice::from_slice(self.as_entries()) - } - - /// Returns a mutable slice of all the key-value pairs in the map. - /// - /// Computes in **O(1)** time. - pub fn as_mut_slice(&mut self) -> &mut Slice { - Slice::from_mut_slice(self.as_entries_mut()) - } - - /// Converts into a boxed slice of all the key-value pairs in the map. - /// - /// Note that this will drop the inner hash table and any excess capacity. - pub fn into_boxed_slice(self) -> Box> { - Slice::from_boxed(self.into_entries().into_boxed_slice()) - } - /// Get a key-value pair by index /// /// Valid indices are *0 <= index < self.len()* @@ -895,30 +787,8 @@ /// Valid indices are *0 <= index < self.len()* /// /// Computes in **O(1)** time. - pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> { - self.as_entries_mut().get_mut(index).map(Bucket::ref_mut) - } - - /// Returns a slice of key-value pairs in the given range of indices. - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Computes in **O(1)** time. - pub fn get_range>(&self, range: R) -> Option<&Slice> { - let entries = self.as_entries(); - let range = try_simplify_range(range, entries.len())?; - entries.get(range).map(Slice::from_slice) - } - - /// Returns a mutable slice of key-value pairs in the given range of indices. - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Computes in **O(1)** time. - pub fn get_range_mut>(&mut self, range: R) -> Option<&mut Slice> { - let entries = self.as_entries_mut(); - let range = try_simplify_range(range, entries.len())?; - entries.get_mut(range).map(Slice::from_mut_slice) + pub fn get_index_mut(&mut self, index: usize) -> Option<(&mut K, &mut V)> { + self.as_entries_mut().get_mut(index).map(Bucket::muts) } /// Get the first key-value pair @@ -996,6 +866,380 @@ } } +/// An iterator over the keys of a `IndexMap`. +/// +/// This `struct` is created by the [`keys`] method on [`IndexMap`]. See its +/// documentation for more. +/// +/// [`keys`]: struct.IndexMap.html#method.keys +/// [`IndexMap`]: struct.IndexMap.html +pub struct Keys<'a, K, V> { + iter: SliceIter<'a, Bucket>, +} + +impl<'a, K, V> Iterator for Keys<'a, K, V> { + type Item = &'a K; + + iterator_methods!(Bucket::key_ref); +} + +impl DoubleEndedIterator for Keys<'_, K, V> { + double_ended_iterator_methods!(Bucket::key_ref); +} + +impl ExactSizeIterator for Keys<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Keys<'_, K, V> {} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Keys<'_, K, V> { + fn clone(&self) -> Self { + Keys { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for Keys<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// An owning iterator over the keys of a `IndexMap`. +/// +/// This `struct` is created by the [`into_keys`] method on [`IndexMap`]. +/// See its documentation for more. +/// +/// [`IndexMap`]: struct.IndexMap.html +/// [`into_keys`]: struct.IndexMap.html#method.into_keys +pub struct IntoKeys { + iter: vec::IntoIter>, +} + +impl Iterator for IntoKeys { + type Item = K; + + iterator_methods!(Bucket::key); +} + +impl DoubleEndedIterator for IntoKeys { + double_ended_iterator_methods!(Bucket::key); +} + +impl ExactSizeIterator for IntoKeys { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for IntoKeys {} + +impl fmt::Debug for IntoKeys { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::key_ref); + f.debug_list().entries(iter).finish() + } +} + +/// An iterator over the values of a `IndexMap`. +/// +/// This `struct` is created by the [`values`] method on [`IndexMap`]. See its +/// documentation for more. +/// +/// [`values`]: struct.IndexMap.html#method.values +/// [`IndexMap`]: struct.IndexMap.html +pub struct Values<'a, K, V> { + iter: SliceIter<'a, Bucket>, +} + +impl<'a, K, V> Iterator for Values<'a, K, V> { + type Item = &'a V; + + iterator_methods!(Bucket::value_ref); +} + +impl DoubleEndedIterator for Values<'_, K, V> { + double_ended_iterator_methods!(Bucket::value_ref); +} + +impl ExactSizeIterator for Values<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Values<'_, K, V> {} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Values<'_, K, V> { + fn clone(&self) -> Self { + Values { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for Values<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A mutable iterator over the values of a `IndexMap`. +/// +/// This `struct` is created by the [`values_mut`] method on [`IndexMap`]. See its +/// documentation for more. +/// +/// [`values_mut`]: struct.IndexMap.html#method.values_mut +/// [`IndexMap`]: struct.IndexMap.html +pub struct ValuesMut<'a, K, V> { + iter: SliceIterMut<'a, Bucket>, +} + +impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { + type Item = &'a mut V; + + iterator_methods!(Bucket::value_mut); +} + +impl DoubleEndedIterator for ValuesMut<'_, K, V> { + double_ended_iterator_methods!(Bucket::value_mut); +} + +impl ExactSizeIterator for ValuesMut<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for ValuesMut<'_, K, V> {} + +impl fmt::Debug for ValuesMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::value_ref); + f.debug_list().entries(iter).finish() + } +} + +/// An owning iterator over the values of a `IndexMap`. +/// +/// This `struct` is created by the [`into_values`] method on [`IndexMap`]. +/// See its documentation for more. +/// +/// [`IndexMap`]: struct.IndexMap.html +/// [`into_values`]: struct.IndexMap.html#method.into_values +pub struct IntoValues { + iter: vec::IntoIter>, +} + +impl Iterator for IntoValues { + type Item = V; + + iterator_methods!(Bucket::value); +} + +impl DoubleEndedIterator for IntoValues { + double_ended_iterator_methods!(Bucket::value); +} + +impl ExactSizeIterator for IntoValues { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for IntoValues {} + +impl fmt::Debug for IntoValues { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::value_ref); + f.debug_list().entries(iter).finish() + } +} + +/// An iterator over the entries of a `IndexMap`. +/// +/// This `struct` is created by the [`iter`] method on [`IndexMap`]. See its +/// documentation for more. +/// +/// [`iter`]: struct.IndexMap.html#method.iter +/// [`IndexMap`]: struct.IndexMap.html +pub struct Iter<'a, K, V> { + iter: SliceIter<'a, Bucket>, +} + +impl<'a, K, V> Iterator for Iter<'a, K, V> { + type Item = (&'a K, &'a V); + + iterator_methods!(Bucket::refs); +} + +impl DoubleEndedIterator for Iter<'_, K, V> { + double_ended_iterator_methods!(Bucket::refs); +} + +impl ExactSizeIterator for Iter<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Iter<'_, K, V> {} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Iter<'_, K, V> { + fn clone(&self) -> Self { + Iter { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for Iter<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A mutable iterator over the entries of a `IndexMap`. +/// +/// This `struct` is created by the [`iter_mut`] method on [`IndexMap`]. See its +/// documentation for more. +/// +/// [`iter_mut`]: struct.IndexMap.html#method.iter_mut +/// [`IndexMap`]: struct.IndexMap.html +pub struct IterMut<'a, K, V> { + iter: SliceIterMut<'a, Bucket>, +} + +impl<'a, K, V> Iterator for IterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + iterator_methods!(Bucket::ref_mut); +} + +impl DoubleEndedIterator for IterMut<'_, K, V> { + double_ended_iterator_methods!(Bucket::ref_mut); +} + +impl ExactSizeIterator for IterMut<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for IterMut<'_, K, V> {} + +impl fmt::Debug for IterMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::refs); + f.debug_list().entries(iter).finish() + } +} + +/// An owning iterator over the entries of a `IndexMap`. +/// +/// This `struct` is created by the [`into_iter`] method on [`IndexMap`] +/// (provided by the `IntoIterator` trait). See its documentation for more. +/// +/// [`into_iter`]: struct.IndexMap.html#method.into_iter +/// [`IndexMap`]: struct.IndexMap.html +pub struct IntoIter { + iter: vec::IntoIter>, +} + +impl Iterator for IntoIter { + type Item = (K, V); + + iterator_methods!(Bucket::key_value); +} + +impl DoubleEndedIterator for IntoIter { + double_ended_iterator_methods!(Bucket::key_value); +} + +impl ExactSizeIterator for IntoIter { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for IntoIter {} + +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::refs); + f.debug_list().entries(iter).finish() + } +} + +/// A draining iterator over the entries of a `IndexMap`. +/// +/// This `struct` is created by the [`drain`] method on [`IndexMap`]. See its +/// documentation for more. +/// +/// [`drain`]: struct.IndexMap.html#method.drain +/// [`IndexMap`]: struct.IndexMap.html +pub struct Drain<'a, K, V> { + pub(crate) iter: vec::Drain<'a, Bucket>, +} + +impl Iterator for Drain<'_, K, V> { + type Item = (K, V); + + iterator_methods!(Bucket::key_value); +} + +impl DoubleEndedIterator for Drain<'_, K, V> { + double_ended_iterator_methods!(Bucket::key_value); +} + +impl ExactSizeIterator for Drain<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Drain<'_, K, V> {} + +impl fmt::Debug for Drain<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::refs); + f.debug_list().entries(iter).finish() + } +} + +impl<'a, K, V, S> IntoIterator for &'a IndexMap { + type Item = (&'a K, &'a V); + type IntoIter = Iter<'a, K, V>; + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a mut IndexMap { + type Item = (&'a K, &'a mut V); + type IntoIter = IterMut<'a, K, V>; + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl IntoIterator for IndexMap { + type Item = (K, V); + type IntoIter = IntoIter; + fn into_iter(self) -> Self::IntoIter { + IntoIter { + iter: self.into_entries().into_iter(), + } + } +} + /// Access `IndexMap` values corresponding to a key. /// /// # Examples @@ -1177,8 +1421,7 @@ } } -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +#[cfg(has_std)] impl From<[(K, V); N]> for IndexMap where K: Hash + Eq, @@ -1278,3 +1521,427 @@ S: BuildHasher, { } + +#[cfg(test)] +mod tests { + use super::*; + use std::string::String; + + #[test] + fn it_works() { + let mut map = IndexMap::new(); + assert_eq!(map.is_empty(), true); + map.insert(1, ()); + map.insert(1, ()); + assert_eq!(map.len(), 1); + assert!(map.get(&1).is_some()); + assert_eq!(map.is_empty(), false); + } + + #[test] + fn new() { + let map = IndexMap::::new(); + println!("{:?}", map); + assert_eq!(map.capacity(), 0); + assert_eq!(map.len(), 0); + assert_eq!(map.is_empty(), true); + } + + #[test] + fn insert() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5]; + let not_present = [1, 3, 6, 9, 10]; + let mut map = IndexMap::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(map.len(), i); + map.insert(elt, elt); + assert_eq!(map.len(), i + 1); + assert_eq!(map.get(&elt), Some(&elt)); + assert_eq!(map[&elt], elt); + } + println!("{:?}", map); + + for &elt in ¬_present { + assert!(map.get(&elt).is_none()); + } + } + + #[test] + fn insert_full() { + let insert = vec![9, 2, 7, 1, 4, 6, 13]; + let present = vec![1, 6, 2]; + let mut map = IndexMap::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(map.len(), i); + let (index, existing) = map.insert_full(elt, elt); + assert_eq!(existing, None); + assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); + assert_eq!(map.len(), i + 1); + } + + let len = map.len(); + for &elt in &present { + let (index, existing) = map.insert_full(elt, elt); + assert_eq!(existing, Some(elt)); + assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); + assert_eq!(map.len(), len); + } + } + + #[test] + fn insert_2() { + let mut map = IndexMap::with_capacity(16); + + let mut keys = vec![]; + keys.extend(0..16); + keys.extend(if cfg!(miri) { 32..64 } else { 128..267 }); + + for &i in &keys { + let old_map = map.clone(); + map.insert(i, ()); + for key in old_map.keys() { + if map.get(key).is_none() { + println!("old_map: {:?}", old_map); + println!("map: {:?}", map); + panic!("did not find {} in map", key); + } + } + } + + for &i in &keys { + assert!(map.get(&i).is_some(), "did not find {}", i); + } + } + + #[test] + fn insert_order() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut map = IndexMap::new(); + + for &elt in &insert { + map.insert(elt, ()); + } + + assert_eq!(map.keys().count(), map.len()); + assert_eq!(map.keys().count(), insert.len()); + for (a, b) in insert.iter().zip(map.keys()) { + assert_eq!(a, b); + } + for (i, k) in (0..insert.len()).zip(map.keys()) { + assert_eq!(map.get_index(i).unwrap().0, k); + } + } + + #[test] + fn grow() { + let insert = [0, 4, 2, 12, 8, 7, 11]; + let not_present = [1, 3, 6, 9, 10]; + let mut map = IndexMap::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(map.len(), i); + map.insert(elt, elt); + assert_eq!(map.len(), i + 1); + assert_eq!(map.get(&elt), Some(&elt)); + assert_eq!(map[&elt], elt); + } + + println!("{:?}", map); + for &elt in &insert { + map.insert(elt * 10, elt); + } + for &elt in &insert { + map.insert(elt * 100, elt); + } + for (i, &elt) in insert.iter().cycle().enumerate().take(100) { + map.insert(elt * 100 + i as i32, elt); + } + println!("{:?}", map); + for &elt in ¬_present { + assert!(map.get(&elt).is_none()); + } + } + + #[test] + fn reserve() { + let mut map = IndexMap::::new(); + assert_eq!(map.capacity(), 0); + map.reserve(100); + let capacity = map.capacity(); + assert!(capacity >= 100); + for i in 0..capacity { + assert_eq!(map.len(), i); + map.insert(i, i * i); + assert_eq!(map.len(), i + 1); + assert_eq!(map.capacity(), capacity); + assert_eq!(map.get(&i), Some(&(i * i))); + } + map.insert(capacity, std::usize::MAX); + assert_eq!(map.len(), capacity + 1); + assert!(map.capacity() > capacity); + assert_eq!(map.get(&capacity), Some(&std::usize::MAX)); + } + + #[test] + fn shrink_to_fit() { + let mut map = IndexMap::::new(); + assert_eq!(map.capacity(), 0); + for i in 0..100 { + assert_eq!(map.len(), i); + map.insert(i, i * i); + assert_eq!(map.len(), i + 1); + assert!(map.capacity() >= i + 1); + assert_eq!(map.get(&i), Some(&(i * i))); + map.shrink_to_fit(); + assert_eq!(map.len(), i + 1); + assert_eq!(map.capacity(), i + 1); + assert_eq!(map.get(&i), Some(&(i * i))); + } + } + + #[test] + fn remove() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut map = IndexMap::new(); + + for &elt in &insert { + map.insert(elt, elt); + } + + assert_eq!(map.keys().count(), map.len()); + assert_eq!(map.keys().count(), insert.len()); + for (a, b) in insert.iter().zip(map.keys()) { + assert_eq!(a, b); + } + + let remove_fail = [99, 77]; + let remove = [4, 12, 8, 7]; + + for &key in &remove_fail { + assert!(map.swap_remove_full(&key).is_none()); + } + println!("{:?}", map); + for &key in &remove { + //println!("{:?}", map); + let index = map.get_full(&key).unwrap().0; + assert_eq!(map.swap_remove_full(&key), Some((index, key, key))); + } + println!("{:?}", map); + + for key in &insert { + assert_eq!(map.get(key).is_some(), !remove.contains(key)); + } + assert_eq!(map.len(), insert.len() - remove.len()); + assert_eq!(map.keys().count(), insert.len() - remove.len()); + } + + #[test] + fn remove_to_empty() { + let mut map = indexmap! { 0 => 0, 4 => 4, 5 => 5 }; + map.swap_remove(&5).unwrap(); + map.swap_remove(&4).unwrap(); + map.swap_remove(&0).unwrap(); + assert!(map.is_empty()); + } + + #[test] + fn swap_remove_index() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut map = IndexMap::new(); + + for &elt in &insert { + map.insert(elt, elt * 2); + } + + let mut vector = insert.to_vec(); + let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; + + // check that the same swap remove sequence on vec and map + // have the same result. + for &rm in remove_sequence { + let out_vec = vector.swap_remove(rm); + let (out_map, _) = map.swap_remove_index(rm).unwrap(); + assert_eq!(out_vec, out_map); + } + assert_eq!(vector.len(), map.len()); + for (a, b) in vector.iter().zip(map.keys()) { + assert_eq!(a, b); + } + } + + #[test] + fn partial_eq_and_eq() { + let mut map_a = IndexMap::new(); + map_a.insert(1, "1"); + map_a.insert(2, "2"); + let mut map_b = map_a.clone(); + assert_eq!(map_a, map_b); + map_b.swap_remove(&1); + assert_ne!(map_a, map_b); + + let map_c: IndexMap<_, String> = map_b.into_iter().map(|(k, v)| (k, v.into())).collect(); + assert_ne!(map_a, map_c); + assert_ne!(map_c, map_a); + } + + #[test] + fn extend() { + let mut map = IndexMap::new(); + map.extend(vec![(&1, &2), (&3, &4)]); + map.extend(vec![(5, 6)]); + assert_eq!( + map.into_iter().collect::>(), + vec![(1, 2), (3, 4), (5, 6)] + ); + } + + #[test] + fn entry() { + let mut map = IndexMap::new(); + + map.insert(1, "1"); + map.insert(2, "2"); + { + let e = map.entry(3); + assert_eq!(e.index(), 2); + let e = e.or_insert("3"); + assert_eq!(e, &"3"); + } + + let e = map.entry(2); + assert_eq!(e.index(), 1); + assert_eq!(e.key(), &2); + match e { + Entry::Occupied(ref e) => assert_eq!(e.get(), &"2"), + Entry::Vacant(_) => panic!(), + } + assert_eq!(e.or_insert("4"), &"2"); + } + + #[test] + fn entry_and_modify() { + let mut map = IndexMap::new(); + + map.insert(1, "1"); + map.entry(1).and_modify(|x| *x = "2"); + assert_eq!(Some(&"2"), map.get(&1)); + + map.entry(2).and_modify(|x| *x = "doesn't exist"); + assert_eq!(None, map.get(&2)); + } + + #[test] + fn entry_or_default() { + let mut map = IndexMap::new(); + + #[derive(Debug, PartialEq)] + enum TestEnum { + DefaultValue, + NonDefaultValue, + } + + impl Default for TestEnum { + fn default() -> Self { + TestEnum::DefaultValue + } + } + + map.insert(1, TestEnum::NonDefaultValue); + assert_eq!(&mut TestEnum::NonDefaultValue, map.entry(1).or_default()); + + assert_eq!(&mut TestEnum::DefaultValue, map.entry(2).or_default()); + } + + #[test] + fn occupied_entry_key() { + // These keys match hash and equality, but their addresses are distinct. + let (k1, k2) = (&mut 1, &mut 1); + let k1_ptr = k1 as *const i32; + let k2_ptr = k2 as *const i32; + assert_ne!(k1_ptr, k2_ptr); + + let mut map = IndexMap::new(); + map.insert(k1, "value"); + match map.entry(k2) { + Entry::Occupied(ref e) => { + // `OccupiedEntry::key` should reference the key in the map, + // not the key that was used to find the entry. + let ptr = *e.key() as *const i32; + assert_eq!(ptr, k1_ptr); + assert_ne!(ptr, k2_ptr); + } + Entry::Vacant(_) => panic!(), + } + } + + #[test] + fn keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: IndexMap<_, _> = vec.into_iter().collect(); + let keys: Vec<_> = map.keys().copied().collect(); + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); + } + + #[test] + fn into_keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: IndexMap<_, _> = vec.into_iter().collect(); + let keys: Vec = map.into_keys().collect(); + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); + } + + #[test] + fn values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: IndexMap<_, _> = vec.into_iter().collect(); + let values: Vec<_> = map.values().copied().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); + } + + #[test] + fn values_mut() { + let vec = vec![(1, 1), (2, 2), (3, 3)]; + let mut map: IndexMap<_, _> = vec.into_iter().collect(); + for value in map.values_mut() { + *value *= 2 + } + let values: Vec<_> = map.values().copied().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&2)); + assert!(values.contains(&4)); + assert!(values.contains(&6)); + } + + #[test] + fn into_values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: IndexMap<_, _> = vec.into_iter().collect(); + let values: Vec = map.into_values().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); + } + + #[test] + #[cfg(has_std)] + fn from_array() { + let map = IndexMap::from([(1, 2), (3, 4)]); + let mut expected = IndexMap::new(); + expected.insert(1, 2); + expected.insert(3, 4); + + assert_eq!(map, expected) + } +} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/mutable_keys.rs s390-tools-2.33.1/rust-vendor/indexmap/src/mutable_keys.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/mutable_keys.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/mutable_keys.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,6 +1,8 @@ use core::hash::{BuildHasher, Hash}; -use super::{Bucket, Entries, Equivalent, IndexMap}; +use super::{Equivalent, IndexMap}; + +pub struct PrivateMarker {} /// Opt-in mutable access to keys. /// @@ -14,15 +16,11 @@ /// implementing PartialEq, Eq, or Hash incorrectly would be). /// /// `use` this trait to enable its methods for `IndexMap`. -/// -/// This trait is sealed and cannot be implemented for types outside this crate. -pub trait MutableKeys: private::Sealed { +pub trait MutableKeys { type Key; type Value; /// Return item index, mutable reference to key and value - /// - /// Computes in **O(1)** time (average). fn get_full_mut2( &mut self, key: &Q, @@ -30,13 +28,6 @@ where Q: Hash + Equivalent; - /// Return mutable reference to key and value at an index. - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Computes in **O(1)** time. - fn get_index_mut2(&mut self, index: usize) -> Option<(&mut Self::Key, &mut Self::Value)>; - /// Scan through each key-value pair in the map and keep those where the /// closure `keep` returns `true`. /// @@ -47,6 +38,11 @@ fn retain2(&mut self, keep: F) where F: FnMut(&mut Self::Key, &mut Self::Value) -> bool; + + /// This method is not useful in itself – it is there to “seal†the trait + /// for external implementation, so that we can add methods without + /// causing breaking changes. + fn __private_marker(&self) -> PrivateMarker; } /// Opt-in mutable access to keys. @@ -59,21 +55,11 @@ { type Key = K; type Value = V; - fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut K, &mut V)> where Q: Hash + Equivalent, { - if let Some(i) = self.get_index_of(key) { - let entry = &mut self.as_entries_mut()[i]; - Some((i, &mut entry.key, &mut entry.value)) - } else { - None - } - } - - fn get_index_mut2(&mut self, index: usize) -> Option<(&mut K, &mut V)> { - self.as_entries_mut().get_mut(index).map(Bucket::muts) + self.get_full_mut2_impl(key) } fn retain2(&mut self, keep: F) @@ -82,10 +68,8 @@ { self.retain_mut(keep) } -} - -mod private { - pub trait Sealed {} - impl Sealed for super::IndexMap {} + fn __private_marker(&self) -> PrivateMarker { + PrivateMarker {} + } } diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/rayon/map.rs s390-tools-2.33.1/rust-vendor/indexmap/src/rayon/map.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/rayon/map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/rayon/map.rs 2024-05-28 11:57:36.000000000 +0200 @@ -2,23 +2,24 @@ //! //! You will rarely need to interact with this module directly unless you need to name one of the //! iterator types. +//! +//! Requires crate feature `"rayon"` use super::collect; use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; use rayon::prelude::*; use crate::vec::Vec; -use alloc::boxed::Box; use core::cmp::Ordering; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::ops::RangeBounds; -use crate::map::Slice; use crate::Bucket; use crate::Entries; use crate::IndexMap; +/// Requires crate feature `"rayon"`. impl IntoParallelIterator for IndexMap where K: Send, @@ -34,21 +35,6 @@ } } -impl IntoParallelIterator for Box> -where - K: Send, - V: Send, -{ - type Item = (K, V); - type Iter = IntoParIter; - - fn into_par_iter(self) -> Self::Iter { - IntoParIter { - entries: self.into_entries(), - } - } -} - /// A parallel owning iterator over the entries of a `IndexMap`. /// /// This `struct` is created by the [`into_par_iter`] method on [`IndexMap`] @@ -77,6 +63,7 @@ indexed_parallel_iterator_methods!(Bucket::key_value); } +/// Requires crate feature `"rayon"`. impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap where K: Sync, @@ -92,21 +79,6 @@ } } -impl<'a, K, V> IntoParallelIterator for &'a Slice -where - K: Sync, - V: Sync, -{ - type Item = (&'a K, &'a V); - type Iter = ParIter<'a, K, V>; - - fn into_par_iter(self) -> Self::Iter { - ParIter { - entries: &self.entries, - } - } -} - /// A parallel iterator over the entries of a `IndexMap`. /// /// This `struct` is created by the [`par_iter`] method on [`IndexMap`] @@ -141,6 +113,7 @@ indexed_parallel_iterator_methods!(Bucket::refs); } +/// Requires crate feature `"rayon"`. impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap where K: Sync + Send, @@ -156,21 +129,6 @@ } } -impl<'a, K, V> IntoParallelIterator for &'a mut Slice -where - K: Sync + Send, - V: Send, -{ - type Item = (&'a K, &'a mut V); - type Iter = ParIterMut<'a, K, V>; - - fn into_par_iter(self) -> Self::Iter { - ParIterMut { - entries: &mut self.entries, - } - } -} - /// A parallel mutable iterator over the entries of a `IndexMap`. /// /// This `struct` is created by the [`par_iter_mut`] method on [`IndexMap`] @@ -199,6 +157,7 @@ indexed_parallel_iterator_methods!(Bucket::ref_mut); } +/// Requires crate feature `"rayon"`. impl<'a, K, V, S> ParallelDrainRange for &'a mut IndexMap where K: Send, @@ -266,37 +225,6 @@ } } -/// Parallel iterator methods and other parallel methods. -/// -/// The following methods **require crate feature `"rayon"`**. -/// -/// See also the `IntoParallelIterator` implementations. -impl Slice -where - K: Sync, - V: Sync, -{ - /// Return a parallel iterator over the keys of the map slice. - /// - /// While parallel iterators can process items in any order, their relative order - /// in the slice is still preserved for operations like `reduce` and `collect`. - pub fn par_keys(&self) -> ParKeys<'_, K, V> { - ParKeys { - entries: &self.entries, - } - } - - /// Return a parallel iterator over the values of the map slice. - /// - /// While parallel iterators can process items in any order, their relative order - /// in the slice is still preserved for operations like `reduce` and `collect`. - pub fn par_values(&self) -> ParValues<'_, K, V> { - ParValues { - entries: &self.entries, - } - } -} - impl IndexMap where K: Hash + Eq + Sync, @@ -386,6 +314,7 @@ indexed_parallel_iterator_methods!(Bucket::value_ref); } +/// Requires crate feature `"rayon"`. impl IndexMap where K: Send, @@ -402,22 +331,6 @@ } } -impl Slice -where - K: Send, - V: Send, -{ - /// Return a parallel iterator over mutable references to the the values of the map slice. - /// - /// While parallel iterators can process items in any order, their relative order - /// in the slice is still preserved for operations like `reduce` and `collect`. - pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { - ParValuesMut { - entries: &mut self.entries, - } - } -} - impl IndexMap where K: Hash + Eq + Send, @@ -493,18 +406,6 @@ entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); IntoParIter { entries } } - - /// Sort the map’s key-value pairs in place and in parallel, using a sort-key extraction - /// function. - pub fn par_sort_by_cached_key(&mut self, sort_key: F) - where - T: Ord + Send, - F: Fn(&K, &V) -> T + Sync, - { - self.with_entries(move |entries| { - entries.par_sort_by_cached_key(move |a| sort_key(&a.key, &a.value)); - }); - } } /// A parallel mutable iterator over the values of a `IndexMap`. @@ -535,6 +436,7 @@ indexed_parallel_iterator_methods!(Bucket::value_mut); } +/// Requires crate feature `"rayon"`. impl FromParallelIterator<(K, V)> for IndexMap where K: Eq + Hash + Send, @@ -555,6 +457,7 @@ } } +/// Requires crate feature `"rayon"`. impl ParallelExtend<(K, V)> for IndexMap where K: Eq + Hash + Send, @@ -571,6 +474,7 @@ } } +/// Requires crate feature `"rayon"`. impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for IndexMap where K: Copy + Eq + Hash + Send + Sync, diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/rayon/set.rs s390-tools-2.33.1/rust-vendor/indexmap/src/rayon/set.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/rayon/set.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/rayon/set.rs 2024-05-28 11:57:36.000000000 +0200 @@ -2,24 +2,25 @@ //! //! You will rarely need to interact with this module directly unless you need to name one of the //! iterator types. +//! +//! Requires crate feature `"rayon"`. use super::collect; use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; use rayon::prelude::*; use crate::vec::Vec; -use alloc::boxed::Box; use core::cmp::Ordering; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::ops::RangeBounds; -use crate::set::Slice; use crate::Entries; use crate::IndexSet; type Bucket = crate::Bucket; +/// Requires crate feature `"rayon"`. impl IntoParallelIterator for IndexSet where T: Send, @@ -34,20 +35,6 @@ } } -impl IntoParallelIterator for Box> -where - T: Send, -{ - type Item = T; - type Iter = IntoParIter; - - fn into_par_iter(self) -> Self::Iter { - IntoParIter { - entries: self.into_entries(), - } - } -} - /// A parallel owning iterator over the items of a `IndexSet`. /// /// This `struct` is created by the [`into_par_iter`] method on [`IndexSet`] @@ -76,6 +63,7 @@ indexed_parallel_iterator_methods!(Bucket::key); } +/// Requires crate feature `"rayon"`. impl<'a, T, S> IntoParallelIterator for &'a IndexSet where T: Sync, @@ -90,20 +78,6 @@ } } -impl<'a, T> IntoParallelIterator for &'a Slice -where - T: Sync, -{ - type Item = &'a T; - type Iter = ParIter<'a, T>; - - fn into_par_iter(self) -> Self::Iter { - ParIter { - entries: &self.entries, - } - } -} - /// A parallel iterator over the items of a `IndexSet`. /// /// This `struct` is created by the [`par_iter`] method on [`IndexSet`] @@ -138,6 +112,7 @@ indexed_parallel_iterator_methods!(Bucket::key_ref); } +/// Requires crate feature `"rayon"`. impl<'a, T, S> ParallelDrainRange for &'a mut IndexSet where T: Send, @@ -565,19 +540,9 @@ entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); IntoParIter { entries } } - - /// Sort the set’s values in place and in parallel, using a key extraction function. - pub fn par_sort_by_cached_key(&mut self, sort_key: F) - where - K: Ord + Send, - F: Fn(&T) -> K + Sync, - { - self.with_entries(move |entries| { - entries.par_sort_by_cached_key(move |a| sort_key(&a.key)); - }); - } } +/// Requires crate feature `"rayon"`. impl FromParallelIterator for IndexSet where T: Eq + Hash + Send, @@ -597,6 +562,7 @@ } } +/// Requires crate feature `"rayon"`. impl ParallelExtend for IndexSet where T: Eq + Hash + Send, @@ -612,6 +578,7 @@ } } +/// Requires crate feature `"rayon"`. impl<'a, T: 'a, S> ParallelExtend<&'a T> for IndexSet where T: Copy + Eq + Hash + Send + Sync, diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/serde.rs s390-tools-2.33.1/rust-vendor/indexmap/src/serde.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/serde.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/serde.rs 2024-05-28 11:57:36.000000000 +0200 @@ -10,6 +10,7 @@ use crate::IndexMap; +/// Requires crate feature `"serde"` or `"serde-1"` impl Serialize for IndexMap where K: Serialize + Hash + Eq, @@ -53,6 +54,7 @@ } } +/// Requires crate feature `"serde"` or `"serde-1"` impl<'de, K, V, S> Deserialize<'de> for IndexMap where K: Deserialize<'de> + Eq + Hash, @@ -83,6 +85,7 @@ use crate::IndexSet; +/// Requires crate feature `"serde"` or `"serde-1"` impl Serialize for IndexSet where T: Serialize + Hash + Eq, @@ -124,6 +127,7 @@ } } +/// Requires crate feature `"serde"` or `"serde-1"` impl<'de, T, S> Deserialize<'de> for IndexSet where T: Deserialize<'de> + Eq + Hash, diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/serde_seq.rs s390-tools-2.33.1/rust-vendor/indexmap/src/serde_seq.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/serde_seq.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/serde_seq.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,112 @@ +//! Functions to serialize and deserialize an `IndexMap` as an ordered sequence. +//! +//! The default `serde` implementation serializes `IndexMap` as a normal map, +//! but there is no guarantee that serialization formats will preserve the order +//! of the key-value pairs. This module serializes `IndexMap` as a sequence of +//! `(key, value)` elements instead, in order. +//! +//! This module may be used in a field attribute for derived implementations: +//! +//! ``` +//! # use indexmap::IndexMap; +//! # use serde_derive::{Deserialize, Serialize}; +//! #[derive(Deserialize, Serialize)] +//! struct Data { +//! #[serde(with = "indexmap::serde_seq")] +//! map: IndexMap, +//! // ... +//! } +//! ``` +//! +//! Requires crate feature `"serde"` or `"serde-1"` + +use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; +use serde::ser::{Serialize, Serializer}; + +use core::fmt::{self, Formatter}; +use core::hash::{BuildHasher, Hash}; +use core::marker::PhantomData; + +use crate::IndexMap; + +/// Serializes an `IndexMap` as an ordered sequence. +/// +/// This function may be used in a field attribute for deriving `Serialize`: +/// +/// ``` +/// # use indexmap::IndexMap; +/// # use serde_derive::Serialize; +/// #[derive(Serialize)] +/// struct Data { +/// #[serde(serialize_with = "indexmap::serde_seq::serialize")] +/// map: IndexMap, +/// // ... +/// } +/// ``` +/// +/// Requires crate feature `"serde"` or `"serde-1"` +pub fn serialize(map: &IndexMap, serializer: T) -> Result +where + K: Serialize + Hash + Eq, + V: Serialize, + S: BuildHasher, + T: Serializer, +{ + serializer.collect_seq(map) +} + +/// Visitor to deserialize a *sequenced* `IndexMap` +struct SeqVisitor(PhantomData<(K, V, S)>); + +impl<'de, K, V, S> Visitor<'de> for SeqVisitor +where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: Default + BuildHasher, +{ + type Value = IndexMap; + + fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "a sequenced map") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let capacity = seq.size_hint().unwrap_or(0); + let mut map = IndexMap::with_capacity_and_hasher(capacity, S::default()); + + while let Some((key, value)) = seq.next_element()? { + map.insert(key, value); + } + + Ok(map) + } +} + +/// Deserializes an `IndexMap` from an ordered sequence. +/// +/// This function may be used in a field attribute for deriving `Deserialize`: +/// +/// ``` +/// # use indexmap::IndexMap; +/// # use serde_derive::Deserialize; +/// #[derive(Deserialize)] +/// struct Data { +/// #[serde(deserialize_with = "indexmap::serde_seq::deserialize")] +/// map: IndexMap, +/// // ... +/// } +/// ``` +/// +/// Requires crate feature `"serde"` or `"serde-1"` +pub fn deserialize<'de, D, K, V, S>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: Default + BuildHasher, +{ + deserializer.deserialize_seq(SeqVisitor(PhantomData)) +} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/set/iter.rs s390-tools-2.33.1/rust-vendor/indexmap/src/set/iter.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/set/iter.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/set/iter.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,543 +0,0 @@ -use super::{Bucket, Entries, IndexSet, Slice}; - -use alloc::vec::{self, Vec}; -use core::fmt; -use core::hash::{BuildHasher, Hash}; -use core::iter::{Chain, FusedIterator}; -use core::slice::Iter as SliceIter; - -impl<'a, T, S> IntoIterator for &'a IndexSet { - type Item = &'a T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for IndexSet { - type Item = T; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter::new(self.into_entries()) - } -} - -/// An iterator over the items of a `IndexSet`. -/// -/// This `struct` is created by the [`iter`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`iter`]: struct.IndexSet.html#method.iter -pub struct Iter<'a, T> { - iter: SliceIter<'a, Bucket>, -} - -impl<'a, T> Iter<'a, T> { - pub(super) fn new(entries: &'a [Bucket]) -> Self { - Self { - iter: entries.iter(), - } - } - - /// Returns a slice of the remaining entries in the iterator. - pub fn as_slice(&self) -> &'a Slice { - Slice::from_slice(self.iter.as_slice()) - } -} - -impl<'a, T> Iterator for Iter<'a, T> { - type Item = &'a T; - - iterator_methods!(Bucket::key_ref); -} - -impl DoubleEndedIterator for Iter<'_, T> { - double_ended_iterator_methods!(Bucket::key_ref); -} - -impl ExactSizeIterator for Iter<'_, T> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Iter<'_, T> {} - -impl Clone for Iter<'_, T> { - fn clone(&self) -> Self { - Iter { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for Iter<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -impl Default for Iter<'_, T> { - fn default() -> Self { - Self { iter: [].iter() } - } -} - -/// An owning iterator over the items of a `IndexSet`. -/// -/// This `struct` is created by the [`into_iter`] method on [`IndexSet`] -/// (provided by the `IntoIterator` trait). See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`into_iter`]: struct.IndexSet.html#method.into_iter -pub struct IntoIter { - iter: vec::IntoIter>, -} - -impl IntoIter { - pub(super) fn new(entries: Vec>) -> Self { - Self { - iter: entries.into_iter(), - } - } - - /// Returns a slice of the remaining entries in the iterator. - pub fn as_slice(&self) -> &Slice { - Slice::from_slice(self.iter.as_slice()) - } -} - -impl Iterator for IntoIter { - type Item = T; - - iterator_methods!(Bucket::key); -} - -impl DoubleEndedIterator for IntoIter { - double_ended_iterator_methods!(Bucket::key); -} - -impl ExactSizeIterator for IntoIter { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for IntoIter {} - -impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::key_ref); - f.debug_list().entries(iter).finish() - } -} - -impl Default for IntoIter { - fn default() -> Self { - Self { - iter: Vec::new().into_iter(), - } - } -} - -/// A draining iterator over the items of a `IndexSet`. -/// -/// This `struct` is created by the [`drain`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`drain`]: struct.IndexSet.html#method.drain -pub struct Drain<'a, T> { - iter: vec::Drain<'a, Bucket>, -} - -impl<'a, T> Drain<'a, T> { - pub(super) fn new(iter: vec::Drain<'a, Bucket>) -> Self { - Self { iter } - } - - /// Returns a slice of the remaining entries in the iterator. - pub fn as_slice(&self) -> &Slice { - Slice::from_slice(self.iter.as_slice()) - } -} - -impl Iterator for Drain<'_, T> { - type Item = T; - - iterator_methods!(Bucket::key); -} - -impl DoubleEndedIterator for Drain<'_, T> { - double_ended_iterator_methods!(Bucket::key); -} - -impl ExactSizeIterator for Drain<'_, T> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Drain<'_, T> {} - -impl fmt::Debug for Drain<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::key_ref); - f.debug_list().entries(iter).finish() - } -} - -/// A lazy iterator producing elements in the difference of `IndexSet`s. -/// -/// This `struct` is created by the [`difference`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`difference`]: struct.IndexSet.html#method.difference -pub struct Difference<'a, T, S> { - iter: Iter<'a, T>, - other: &'a IndexSet, -} - -impl<'a, T, S> Difference<'a, T, S> { - pub(super) fn new(set: &'a IndexSet, other: &'a IndexSet) -> Self { - Self { - iter: set.iter(), - other, - } - } -} - -impl<'a, T, S> Iterator for Difference<'a, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - while let Some(item) = self.iter.next() { - if !self.other.contains(item) { - return Some(item); - } - } - None - } - - fn size_hint(&self) -> (usize, Option) { - (0, self.iter.size_hint().1) - } -} - -impl DoubleEndedIterator for Difference<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - fn next_back(&mut self) -> Option { - while let Some(item) = self.iter.next_back() { - if !self.other.contains(item) { - return Some(item); - } - } - None - } -} - -impl FusedIterator for Difference<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ -} - -impl Clone for Difference<'_, T, S> { - fn clone(&self) -> Self { - Difference { - iter: self.iter.clone(), - ..*self - } - } -} - -impl fmt::Debug for Difference<'_, T, S> -where - T: fmt::Debug + Eq + Hash, - S: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// A lazy iterator producing elements in the intersection of `IndexSet`s. -/// -/// This `struct` is created by the [`intersection`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`intersection`]: struct.IndexSet.html#method.intersection -pub struct Intersection<'a, T, S> { - iter: Iter<'a, T>, - other: &'a IndexSet, -} - -impl<'a, T, S> Intersection<'a, T, S> { - pub(super) fn new(set: &'a IndexSet, other: &'a IndexSet) -> Self { - Self { - iter: set.iter(), - other, - } - } -} - -impl<'a, T, S> Iterator for Intersection<'a, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - while let Some(item) = self.iter.next() { - if self.other.contains(item) { - return Some(item); - } - } - None - } - - fn size_hint(&self) -> (usize, Option) { - (0, self.iter.size_hint().1) - } -} - -impl DoubleEndedIterator for Intersection<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - fn next_back(&mut self) -> Option { - while let Some(item) = self.iter.next_back() { - if self.other.contains(item) { - return Some(item); - } - } - None - } -} - -impl FusedIterator for Intersection<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ -} - -impl Clone for Intersection<'_, T, S> { - fn clone(&self) -> Self { - Intersection { - iter: self.iter.clone(), - ..*self - } - } -} - -impl fmt::Debug for Intersection<'_, T, S> -where - T: fmt::Debug + Eq + Hash, - S: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// A lazy iterator producing elements in the symmetric difference of `IndexSet`s. -/// -/// This `struct` is created by the [`symmetric_difference`] method on -/// [`IndexSet`]. See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`symmetric_difference`]: struct.IndexSet.html#method.symmetric_difference -pub struct SymmetricDifference<'a, T, S1, S2> { - iter: Chain, Difference<'a, T, S1>>, -} - -impl<'a, T, S1, S2> SymmetricDifference<'a, T, S1, S2> -where - T: Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ - pub(super) fn new(set1: &'a IndexSet, set2: &'a IndexSet) -> Self { - let diff1 = set1.difference(set2); - let diff2 = set2.difference(set1); - Self { - iter: diff1.chain(diff2), - } - } -} - -impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2> -where - T: Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - self.iter.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn fold(self, init: B, f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, f) - } -} - -impl DoubleEndedIterator for SymmetricDifference<'_, T, S1, S2> -where - T: Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ - fn next_back(&mut self) -> Option { - self.iter.next_back() - } - - fn rfold(self, init: B, f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - self.iter.rfold(init, f) - } -} - -impl FusedIterator for SymmetricDifference<'_, T, S1, S2> -where - T: Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ -} - -impl Clone for SymmetricDifference<'_, T, S1, S2> { - fn clone(&self) -> Self { - SymmetricDifference { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for SymmetricDifference<'_, T, S1, S2> -where - T: fmt::Debug + Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// A lazy iterator producing elements in the union of `IndexSet`s. -/// -/// This `struct` is created by the [`union`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`union`]: struct.IndexSet.html#method.union -pub struct Union<'a, T, S> { - iter: Chain, Difference<'a, T, S>>, -} - -impl<'a, T, S> Union<'a, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - pub(super) fn new(set1: &'a IndexSet, set2: &'a IndexSet) -> Self - where - S2: BuildHasher, - { - Self { - iter: set1.iter().chain(set2.difference(set1)), - } - } -} - -impl<'a, T, S> Iterator for Union<'a, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - self.iter.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn fold(self, init: B, f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, f) - } -} - -impl DoubleEndedIterator for Union<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - fn next_back(&mut self) -> Option { - self.iter.next_back() - } - - fn rfold(self, init: B, f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - self.iter.rfold(init, f) - } -} - -impl FusedIterator for Union<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ -} - -impl Clone for Union<'_, T, S> { - fn clone(&self) -> Self { - Union { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for Union<'_, T, S> -where - T: fmt::Debug + Eq + Hash, - S: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/set/slice.rs s390-tools-2.33.1/rust-vendor/indexmap/src/set/slice.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/set/slice.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/set/slice.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,341 +0,0 @@ -use super::{Bucket, Entries, IndexSet, IntoIter, Iter}; -use crate::util::try_simplify_range; - -use alloc::boxed::Box; -use alloc::vec::Vec; -use core::cmp::Ordering; -use core::fmt; -use core::hash::{Hash, Hasher}; -use core::ops::{self, Bound, Index, RangeBounds}; - -/// A dynamically-sized slice of values in an `IndexSet`. -/// -/// This supports indexed operations much like a `[T]` slice, -/// but not any hashed operations on the values. -/// -/// Unlike `IndexSet`, `Slice` does consider the order for `PartialEq` -/// and `Eq`, and it also implements `PartialOrd`, `Ord`, and `Hash`. -#[repr(transparent)] -pub struct Slice { - pub(crate) entries: [Bucket], -} - -// SAFETY: `Slice` is a transparent wrapper around `[Bucket]`, -// and reference lifetimes are bound together in function signatures. -#[allow(unsafe_code)] -impl Slice { - pub(super) const fn from_slice(entries: &[Bucket]) -> &Self { - unsafe { &*(entries as *const [Bucket] as *const Self) } - } - - pub(super) fn from_boxed(entries: Box<[Bucket]>) -> Box { - unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) } - } - - fn into_boxed(self: Box) -> Box<[Bucket]> { - unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket]) } - } -} - -impl Slice { - pub(crate) fn into_entries(self: Box) -> Vec> { - self.into_boxed().into_vec() - } - - /// Returns an empty slice. - pub const fn new<'a>() -> &'a Self { - Self::from_slice(&[]) - } - - /// Return the number of elements in the set slice. - pub const fn len(&self) -> usize { - self.entries.len() - } - - /// Returns true if the set slice contains no elements. - pub const fn is_empty(&self) -> bool { - self.entries.is_empty() - } - - /// Get a value by index. - /// - /// Valid indices are *0 <= index < self.len()* - pub fn get_index(&self, index: usize) -> Option<&T> { - self.entries.get(index).map(Bucket::key_ref) - } - - /// Returns a slice of values in the given range of indices. - /// - /// Valid indices are *0 <= index < self.len()* - pub fn get_range>(&self, range: R) -> Option<&Self> { - let range = try_simplify_range(range, self.entries.len())?; - self.entries.get(range).map(Self::from_slice) - } - - /// Get the first value. - pub fn first(&self) -> Option<&T> { - self.entries.first().map(Bucket::key_ref) - } - - /// Get the last value. - pub fn last(&self) -> Option<&T> { - self.entries.last().map(Bucket::key_ref) - } - - /// Divides one slice into two at an index. - /// - /// ***Panics*** if `index > len`. - pub fn split_at(&self, index: usize) -> (&Self, &Self) { - let (first, second) = self.entries.split_at(index); - (Self::from_slice(first), Self::from_slice(second)) - } - - /// Returns the first value and the rest of the slice, - /// or `None` if it is empty. - pub fn split_first(&self) -> Option<(&T, &Self)> { - if let [first, rest @ ..] = &self.entries { - Some((&first.key, Self::from_slice(rest))) - } else { - None - } - } - - /// Returns the last value and the rest of the slice, - /// or `None` if it is empty. - pub fn split_last(&self) -> Option<(&T, &Self)> { - if let [rest @ .., last] = &self.entries { - Some((&last.key, Self::from_slice(rest))) - } else { - None - } - } - - /// Return an iterator over the values of the set slice. - pub fn iter(&self) -> Iter<'_, T> { - Iter::new(&self.entries) - } - - /// Search over a sorted set for a value. - /// - /// Returns the position where that value is present, or the position where it can be inserted - /// to maintain the sort. See [`slice::binary_search`] for more details. - /// - /// Computes in **O(log(n))** time, which is notably less scalable than looking the value up in - /// the set this is a slice from using [`IndexSet::get_index_of`], but this can also position - /// missing values. - pub fn binary_search(&self, x: &T) -> Result - where - T: Ord, - { - self.binary_search_by(|p| p.cmp(x)) - } - - /// Search over a sorted set with a comparator function. - /// - /// Returns the position where that value is present, or the position where it can be inserted - /// to maintain the sort. See [`slice::binary_search_by`] for more details. - /// - /// Computes in **O(log(n))** time. - #[inline] - pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result - where - F: FnMut(&'a T) -> Ordering, - { - self.entries.binary_search_by(move |a| f(&a.key)) - } - - /// Search over a sorted set with an extraction function. - /// - /// Returns the position where that value is present, or the position where it can be inserted - /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. - /// - /// Computes in **O(log(n))** time. - #[inline] - pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result - where - F: FnMut(&'a T) -> B, - B: Ord, - { - self.binary_search_by(|k| f(k).cmp(b)) - } - - /// Returns the index of the partition point of a sorted set according to the given predicate - /// (the index of the first element of the second partition). - /// - /// See [`slice::partition_point`] for more details. - /// - /// Computes in **O(log(n))** time. - #[must_use] - pub fn partition_point

(&self, mut pred: P) -> usize - where - P: FnMut(&T) -> bool, - { - self.entries.partition_point(move |a| pred(&a.key)) - } -} - -impl<'a, T> IntoIterator for &'a Slice { - type IntoIter = Iter<'a, T>; - type Item = &'a T; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for Box> { - type IntoIter = IntoIter; - type Item = T; - - fn into_iter(self) -> Self::IntoIter { - IntoIter::new(self.into_entries()) - } -} - -impl Default for &'_ Slice { - fn default() -> Self { - Slice::from_slice(&[]) - } -} - -impl Default for Box> { - fn default() -> Self { - Slice::from_boxed(Box::default()) - } -} - -impl Clone for Box> { - fn clone(&self) -> Self { - Slice::from_boxed(self.entries.to_vec().into_boxed_slice()) - } -} - -impl From<&Slice> for Box> { - fn from(slice: &Slice) -> Self { - Slice::from_boxed(Box::from(&slice.entries)) - } -} - -impl fmt::Debug for Slice { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self).finish() - } -} - -impl PartialEq for Slice { - fn eq(&self, other: &Self) -> bool { - self.len() == other.len() && self.iter().eq(other) - } -} - -impl Eq for Slice {} - -impl PartialOrd for Slice { - fn partial_cmp(&self, other: &Self) -> Option { - self.iter().partial_cmp(other) - } -} - -impl Ord for Slice { - fn cmp(&self, other: &Self) -> Ordering { - self.iter().cmp(other) - } -} - -impl Hash for Slice { - fn hash(&self, state: &mut H) { - self.len().hash(state); - for value in self { - value.hash(state); - } - } -} - -impl Index for Slice { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self.entries[index].key - } -} - -// We can't have `impl> Index` because that conflicts with `Index`. -// Instead, we repeat the implementations for all the core range types. -macro_rules! impl_index { - ($($range:ty),*) => {$( - impl Index<$range> for IndexSet { - type Output = Slice; - - fn index(&self, range: $range) -> &Self::Output { - Slice::from_slice(&self.as_entries()[range]) - } - } - - impl Index<$range> for Slice { - type Output = Self; - - fn index(&self, range: $range) -> &Self::Output { - Slice::from_slice(&self.entries[range]) - } - } - )*} -} -impl_index!( - ops::Range, - ops::RangeFrom, - ops::RangeFull, - ops::RangeInclusive, - ops::RangeTo, - ops::RangeToInclusive, - (Bound, Bound) -); - -#[cfg(test)] -mod tests { - use super::*; - use alloc::vec::Vec; - - #[test] - fn slice_index() { - fn check(vec_slice: &[i32], set_slice: &Slice, sub_slice: &Slice) { - assert_eq!(set_slice as *const _, sub_slice as *const _); - itertools::assert_equal(vec_slice, set_slice); - } - - let vec: Vec = (0..10).map(|i| i * i).collect(); - let set: IndexSet = vec.iter().cloned().collect(); - let slice = set.as_slice(); - - // RangeFull - check(&vec[..], &set[..], &slice[..]); - - for i in 0usize..10 { - // Index - assert_eq!(vec[i], set[i]); - assert_eq!(vec[i], slice[i]); - - // RangeFrom - check(&vec[i..], &set[i..], &slice[i..]); - - // RangeTo - check(&vec[..i], &set[..i], &slice[..i]); - - // RangeToInclusive - check(&vec[..=i], &set[..=i], &slice[..=i]); - - // (Bound, Bound) - let bounds = (Bound::Excluded(i), Bound::Unbounded); - check(&vec[i + 1..], &set[bounds], &slice[bounds]); - - for j in i..=10 { - // Range - check(&vec[i..j], &set[i..j], &slice[i..j]); - } - - for j in i..10 { - // RangeInclusive - check(&vec[i..=j], &set[i..=j], &slice[i..=j]); - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/set/tests.rs s390-tools-2.33.1/rust-vendor/indexmap/src/set/tests.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/set/tests.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/set/tests.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,684 +0,0 @@ -use super::*; -use std::string::String; - -#[test] -fn it_works() { - let mut set = IndexSet::new(); - assert_eq!(set.is_empty(), true); - set.insert(1); - set.insert(1); - assert_eq!(set.len(), 1); - assert!(set.get(&1).is_some()); - assert_eq!(set.is_empty(), false); -} - -#[test] -fn new() { - let set = IndexSet::::new(); - println!("{:?}", set); - assert_eq!(set.capacity(), 0); - assert_eq!(set.len(), 0); - assert_eq!(set.is_empty(), true); -} - -#[test] -fn insert() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5]; - let not_present = [1, 3, 6, 9, 10]; - let mut set = IndexSet::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(set.len(), i); - set.insert(elt); - assert_eq!(set.len(), i + 1); - assert_eq!(set.get(&elt), Some(&elt)); - } - println!("{:?}", set); - - for &elt in ¬_present { - assert!(set.get(&elt).is_none()); - } -} - -#[test] -fn insert_full() { - let insert = vec![9, 2, 7, 1, 4, 6, 13]; - let present = vec![1, 6, 2]; - let mut set = IndexSet::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(set.len(), i); - let (index, success) = set.insert_full(elt); - assert!(success); - assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); - assert_eq!(set.len(), i + 1); - } - - let len = set.len(); - for &elt in &present { - let (index, success) = set.insert_full(elt); - assert!(!success); - assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); - assert_eq!(set.len(), len); - } -} - -#[test] -fn insert_2() { - let mut set = IndexSet::with_capacity(16); - - let mut values = vec![]; - values.extend(0..16); - values.extend(if cfg!(miri) { 32..64 } else { 128..267 }); - - for &i in &values { - let old_set = set.clone(); - set.insert(i); - for value in old_set.iter() { - if set.get(value).is_none() { - println!("old_set: {:?}", old_set); - println!("set: {:?}", set); - panic!("did not find {} in set", value); - } - } - } - - for &i in &values { - assert!(set.get(&i).is_some(), "did not find {}", i); - } -} - -#[test] -fn insert_dup() { - let mut elements = vec![0, 2, 4, 6, 8]; - let mut set: IndexSet = elements.drain(..).collect(); - { - let (i, v) = set.get_full(&0).unwrap(); - assert_eq!(set.len(), 5); - assert_eq!(i, 0); - assert_eq!(*v, 0); - } - { - let inserted = set.insert(0); - let (i, v) = set.get_full(&0).unwrap(); - assert_eq!(set.len(), 5); - assert_eq!(inserted, false); - assert_eq!(i, 0); - assert_eq!(*v, 0); - } -} - -#[test] -fn insert_order() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut set = IndexSet::new(); - - for &elt in &insert { - set.insert(elt); - } - - assert_eq!(set.iter().count(), set.len()); - assert_eq!(set.iter().count(), insert.len()); - for (a, b) in insert.iter().zip(set.iter()) { - assert_eq!(a, b); - } - for (i, v) in (0..insert.len()).zip(set.iter()) { - assert_eq!(set.get_index(i).unwrap(), v); - } -} - -#[test] -fn replace() { - let replace = [0, 4, 2, 12, 8, 7, 11, 5]; - let not_present = [1, 3, 6, 9, 10]; - let mut set = IndexSet::with_capacity(replace.len()); - - for (i, &elt) in replace.iter().enumerate() { - assert_eq!(set.len(), i); - set.replace(elt); - assert_eq!(set.len(), i + 1); - assert_eq!(set.get(&elt), Some(&elt)); - } - println!("{:?}", set); - - for &elt in ¬_present { - assert!(set.get(&elt).is_none()); - } -} - -#[test] -fn replace_full() { - let replace = vec![9, 2, 7, 1, 4, 6, 13]; - let present = vec![1, 6, 2]; - let mut set = IndexSet::with_capacity(replace.len()); - - for (i, &elt) in replace.iter().enumerate() { - assert_eq!(set.len(), i); - let (index, replaced) = set.replace_full(elt); - assert!(replaced.is_none()); - assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); - assert_eq!(set.len(), i + 1); - } - - let len = set.len(); - for &elt in &present { - let (index, replaced) = set.replace_full(elt); - assert_eq!(Some(elt), replaced); - assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); - assert_eq!(set.len(), len); - } -} - -#[test] -fn replace_2() { - let mut set = IndexSet::with_capacity(16); - - let mut values = vec![]; - values.extend(0..16); - values.extend(if cfg!(miri) { 32..64 } else { 128..267 }); - - for &i in &values { - let old_set = set.clone(); - set.replace(i); - for value in old_set.iter() { - if set.get(value).is_none() { - println!("old_set: {:?}", old_set); - println!("set: {:?}", set); - panic!("did not find {} in set", value); - } - } - } - - for &i in &values { - assert!(set.get(&i).is_some(), "did not find {}", i); - } -} - -#[test] -fn replace_dup() { - let mut elements = vec![0, 2, 4, 6, 8]; - let mut set: IndexSet = elements.drain(..).collect(); - { - let (i, v) = set.get_full(&0).unwrap(); - assert_eq!(set.len(), 5); - assert_eq!(i, 0); - assert_eq!(*v, 0); - } - { - let replaced = set.replace(0); - let (i, v) = set.get_full(&0).unwrap(); - assert_eq!(set.len(), 5); - assert_eq!(replaced, Some(0)); - assert_eq!(i, 0); - assert_eq!(*v, 0); - } -} - -#[test] -fn replace_order() { - let replace = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut set = IndexSet::new(); - - for &elt in &replace { - set.replace(elt); - } - - assert_eq!(set.iter().count(), set.len()); - assert_eq!(set.iter().count(), replace.len()); - for (a, b) in replace.iter().zip(set.iter()) { - assert_eq!(a, b); - } - for (i, v) in (0..replace.len()).zip(set.iter()) { - assert_eq!(set.get_index(i).unwrap(), v); - } -} - -#[test] -fn grow() { - let insert = [0, 4, 2, 12, 8, 7, 11]; - let not_present = [1, 3, 6, 9, 10]; - let mut set = IndexSet::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(set.len(), i); - set.insert(elt); - assert_eq!(set.len(), i + 1); - assert_eq!(set.get(&elt), Some(&elt)); - } - - println!("{:?}", set); - for &elt in &insert { - set.insert(elt * 10); - } - for &elt in &insert { - set.insert(elt * 100); - } - for (i, &elt) in insert.iter().cycle().enumerate().take(100) { - set.insert(elt * 100 + i as i32); - } - println!("{:?}", set); - for &elt in ¬_present { - assert!(set.get(&elt).is_none()); - } -} - -#[test] -fn reserve() { - let mut set = IndexSet::::new(); - assert_eq!(set.capacity(), 0); - set.reserve(100); - let capacity = set.capacity(); - assert!(capacity >= 100); - for i in 0..capacity { - assert_eq!(set.len(), i); - set.insert(i); - assert_eq!(set.len(), i + 1); - assert_eq!(set.capacity(), capacity); - assert_eq!(set.get(&i), Some(&i)); - } - set.insert(capacity); - assert_eq!(set.len(), capacity + 1); - assert!(set.capacity() > capacity); - assert_eq!(set.get(&capacity), Some(&capacity)); -} - -#[test] -fn try_reserve() { - let mut set = IndexSet::::new(); - assert_eq!(set.capacity(), 0); - assert_eq!(set.try_reserve(100), Ok(())); - assert!(set.capacity() >= 100); - assert!(set.try_reserve(usize::MAX).is_err()); -} - -#[test] -fn shrink_to_fit() { - let mut set = IndexSet::::new(); - assert_eq!(set.capacity(), 0); - for i in 0..100 { - assert_eq!(set.len(), i); - set.insert(i); - assert_eq!(set.len(), i + 1); - assert!(set.capacity() >= i + 1); - assert_eq!(set.get(&i), Some(&i)); - set.shrink_to_fit(); - assert_eq!(set.len(), i + 1); - assert_eq!(set.capacity(), i + 1); - assert_eq!(set.get(&i), Some(&i)); - } -} - -#[test] -fn remove() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut set = IndexSet::new(); - - for &elt in &insert { - set.insert(elt); - } - - assert_eq!(set.iter().count(), set.len()); - assert_eq!(set.iter().count(), insert.len()); - for (a, b) in insert.iter().zip(set.iter()) { - assert_eq!(a, b); - } - - let remove_fail = [99, 77]; - let remove = [4, 12, 8, 7]; - - for &value in &remove_fail { - assert!(set.swap_remove_full(&value).is_none()); - } - println!("{:?}", set); - for &value in &remove { - //println!("{:?}", set); - let index = set.get_full(&value).unwrap().0; - assert_eq!(set.swap_remove_full(&value), Some((index, value))); - } - println!("{:?}", set); - - for value in &insert { - assert_eq!(set.get(value).is_some(), !remove.contains(value)); - } - assert_eq!(set.len(), insert.len() - remove.len()); - assert_eq!(set.iter().count(), insert.len() - remove.len()); -} - -#[test] -fn swap_remove_index() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut set = IndexSet::new(); - - for &elt in &insert { - set.insert(elt); - } - - let mut vector = insert.to_vec(); - let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; - - // check that the same swap remove sequence on vec and set - // have the same result. - for &rm in remove_sequence { - let out_vec = vector.swap_remove(rm); - let out_set = set.swap_remove_index(rm).unwrap(); - assert_eq!(out_vec, out_set); - } - assert_eq!(vector.len(), set.len()); - for (a, b) in vector.iter().zip(set.iter()) { - assert_eq!(a, b); - } -} - -#[test] -fn partial_eq_and_eq() { - let mut set_a = IndexSet::new(); - set_a.insert(1); - set_a.insert(2); - let mut set_b = set_a.clone(); - assert_eq!(set_a, set_b); - set_b.swap_remove(&1); - assert_ne!(set_a, set_b); - - let set_c: IndexSet<_> = set_b.into_iter().collect(); - assert_ne!(set_a, set_c); - assert_ne!(set_c, set_a); -} - -#[test] -fn extend() { - let mut set = IndexSet::new(); - set.extend(vec![&1, &2, &3, &4]); - set.extend(vec![5, 6]); - assert_eq!(set.into_iter().collect::>(), vec![1, 2, 3, 4, 5, 6]); -} - -#[test] -fn comparisons() { - let set_a: IndexSet<_> = (0..3).collect(); - let set_b: IndexSet<_> = (3..6).collect(); - let set_c: IndexSet<_> = (0..6).collect(); - let set_d: IndexSet<_> = (3..9).collect(); - - assert!(!set_a.is_disjoint(&set_a)); - assert!(set_a.is_subset(&set_a)); - assert!(set_a.is_superset(&set_a)); - - assert!(set_a.is_disjoint(&set_b)); - assert!(set_b.is_disjoint(&set_a)); - assert!(!set_a.is_subset(&set_b)); - assert!(!set_b.is_subset(&set_a)); - assert!(!set_a.is_superset(&set_b)); - assert!(!set_b.is_superset(&set_a)); - - assert!(!set_a.is_disjoint(&set_c)); - assert!(!set_c.is_disjoint(&set_a)); - assert!(set_a.is_subset(&set_c)); - assert!(!set_c.is_subset(&set_a)); - assert!(!set_a.is_superset(&set_c)); - assert!(set_c.is_superset(&set_a)); - - assert!(!set_c.is_disjoint(&set_d)); - assert!(!set_d.is_disjoint(&set_c)); - assert!(!set_c.is_subset(&set_d)); - assert!(!set_d.is_subset(&set_c)); - assert!(!set_c.is_superset(&set_d)); - assert!(!set_d.is_superset(&set_c)); -} - -#[test] -fn iter_comparisons() { - use std::iter::empty; - - fn check<'a, I1, I2>(iter1: I1, iter2: I2) - where - I1: Iterator, - I2: Iterator, - { - assert!(iter1.copied().eq(iter2)); - } - - let set_a: IndexSet<_> = (0..3).collect(); - let set_b: IndexSet<_> = (3..6).collect(); - let set_c: IndexSet<_> = (0..6).collect(); - let set_d: IndexSet<_> = (3..9).rev().collect(); - - check(set_a.difference(&set_a), empty()); - check(set_a.symmetric_difference(&set_a), empty()); - check(set_a.intersection(&set_a), 0..3); - check(set_a.union(&set_a), 0..3); - - check(set_a.difference(&set_b), 0..3); - check(set_b.difference(&set_a), 3..6); - check(set_a.symmetric_difference(&set_b), 0..6); - check(set_b.symmetric_difference(&set_a), (3..6).chain(0..3)); - check(set_a.intersection(&set_b), empty()); - check(set_b.intersection(&set_a), empty()); - check(set_a.union(&set_b), 0..6); - check(set_b.union(&set_a), (3..6).chain(0..3)); - - check(set_a.difference(&set_c), empty()); - check(set_c.difference(&set_a), 3..6); - check(set_a.symmetric_difference(&set_c), 3..6); - check(set_c.symmetric_difference(&set_a), 3..6); - check(set_a.intersection(&set_c), 0..3); - check(set_c.intersection(&set_a), 0..3); - check(set_a.union(&set_c), 0..6); - check(set_c.union(&set_a), 0..6); - - check(set_c.difference(&set_d), 0..3); - check(set_d.difference(&set_c), (6..9).rev()); - check( - set_c.symmetric_difference(&set_d), - (0..3).chain((6..9).rev()), - ); - check(set_d.symmetric_difference(&set_c), (6..9).rev().chain(0..3)); - check(set_c.intersection(&set_d), 3..6); - check(set_d.intersection(&set_c), (3..6).rev()); - check(set_c.union(&set_d), (0..6).chain((6..9).rev())); - check(set_d.union(&set_c), (3..9).rev().chain(0..3)); -} - -#[test] -fn ops() { - let empty = IndexSet::::new(); - let set_a: IndexSet<_> = (0..3).collect(); - let set_b: IndexSet<_> = (3..6).collect(); - let set_c: IndexSet<_> = (0..6).collect(); - let set_d: IndexSet<_> = (3..9).rev().collect(); - - #[allow(clippy::eq_op)] - { - assert_eq!(&set_a & &set_a, set_a); - assert_eq!(&set_a | &set_a, set_a); - assert_eq!(&set_a ^ &set_a, empty); - assert_eq!(&set_a - &set_a, empty); - } - - assert_eq!(&set_a & &set_b, empty); - assert_eq!(&set_b & &set_a, empty); - assert_eq!(&set_a | &set_b, set_c); - assert_eq!(&set_b | &set_a, set_c); - assert_eq!(&set_a ^ &set_b, set_c); - assert_eq!(&set_b ^ &set_a, set_c); - assert_eq!(&set_a - &set_b, set_a); - assert_eq!(&set_b - &set_a, set_b); - - assert_eq!(&set_a & &set_c, set_a); - assert_eq!(&set_c & &set_a, set_a); - assert_eq!(&set_a | &set_c, set_c); - assert_eq!(&set_c | &set_a, set_c); - assert_eq!(&set_a ^ &set_c, set_b); - assert_eq!(&set_c ^ &set_a, set_b); - assert_eq!(&set_a - &set_c, empty); - assert_eq!(&set_c - &set_a, set_b); - - assert_eq!(&set_c & &set_d, set_b); - assert_eq!(&set_d & &set_c, set_b); - assert_eq!(&set_c | &set_d, &set_a | &set_d); - assert_eq!(&set_d | &set_c, &set_a | &set_d); - assert_eq!(&set_c ^ &set_d, &set_a | &(&set_d - &set_b)); - assert_eq!(&set_d ^ &set_c, &set_a | &(&set_d - &set_b)); - assert_eq!(&set_c - &set_d, set_a); - assert_eq!(&set_d - &set_c, &set_d - &set_b); -} - -#[test] -#[cfg(feature = "std")] -fn from_array() { - let set1 = IndexSet::from([1, 2, 3, 4]); - let set2: IndexSet<_> = [1, 2, 3, 4].into(); - - assert_eq!(set1, set2); -} - -#[test] -fn iter_default() { - struct Item; - fn assert_default() - where - T: Default + Iterator, - { - assert!(T::default().next().is_none()); - } - assert_default::>(); - assert_default::>(); -} - -#[test] -fn test_binary_search_by() { - // adapted from std's test for binary_search - let b: IndexSet = [].into(); - assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(0)); - - let b: IndexSet = [4].into(); - assert_eq!(b.binary_search_by(|x| x.cmp(&3)), Err(0)); - assert_eq!(b.binary_search_by(|x| x.cmp(&4)), Ok(0)); - assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(1)); - - let b: IndexSet = [1, 2, 4, 6, 8, 9].into(); - assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(3)); - assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Ok(3)); - assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Err(4)); - assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Ok(4)); - - let b: IndexSet = [1, 2, 4, 5, 6, 8].into(); - assert_eq!(b.binary_search_by(|x| x.cmp(&9)), Err(6)); - - let b: IndexSet = [1, 2, 4, 6, 7, 8, 9].into(); - assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Ok(3)); - assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(3)); - assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Ok(5)); - - let b: IndexSet = [1, 2, 4, 5, 6, 8, 9].into(); - assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Err(5)); - assert_eq!(b.binary_search_by(|x| x.cmp(&0)), Err(0)); - - let b: IndexSet = [1, 3, 3, 3, 7].into(); - assert_eq!(b.binary_search_by(|x| x.cmp(&0)), Err(0)); - assert_eq!(b.binary_search_by(|x| x.cmp(&1)), Ok(0)); - assert_eq!(b.binary_search_by(|x| x.cmp(&2)), Err(1)); - // diff from std as set merges the duplicate keys - assert!(match b.binary_search_by(|x| x.cmp(&3)) { - Ok(1..=2) => true, - _ => false, - }); - assert!(match b.binary_search_by(|x| x.cmp(&3)) { - Ok(1..=2) => true, - _ => false, - }); - assert_eq!(b.binary_search_by(|x| x.cmp(&4)), Err(2)); - assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(2)); - assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Err(2)); - assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Ok(2)); - assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Err(3)); -} - -#[test] -fn test_binary_search_by_key() { - // adapted from std's test for binary_search - let b: IndexSet = [].into(); - assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(0)); - - let b: IndexSet = [4].into(); - assert_eq!(b.binary_search_by_key(&3, |&x| x), Err(0)); - assert_eq!(b.binary_search_by_key(&4, |&x| x), Ok(0)); - assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(1)); - - let b: IndexSet = [1, 2, 4, 6, 8, 9].into(); - assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(3)); - assert_eq!(b.binary_search_by_key(&6, |&x| x), Ok(3)); - assert_eq!(b.binary_search_by_key(&7, |&x| x), Err(4)); - assert_eq!(b.binary_search_by_key(&8, |&x| x), Ok(4)); - - let b: IndexSet = [1, 2, 4, 5, 6, 8].into(); - assert_eq!(b.binary_search_by_key(&9, |&x| x), Err(6)); - - let b: IndexSet = [1, 2, 4, 6, 7, 8, 9].into(); - assert_eq!(b.binary_search_by_key(&6, |&x| x), Ok(3)); - assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(3)); - assert_eq!(b.binary_search_by_key(&8, |&x| x), Ok(5)); - - let b: IndexSet = [1, 2, 4, 5, 6, 8, 9].into(); - assert_eq!(b.binary_search_by_key(&7, |&x| x), Err(5)); - assert_eq!(b.binary_search_by_key(&0, |&x| x), Err(0)); - - let b: IndexSet = [1, 3, 3, 3, 7].into(); - assert_eq!(b.binary_search_by_key(&0, |&x| x), Err(0)); - assert_eq!(b.binary_search_by_key(&1, |&x| x), Ok(0)); - assert_eq!(b.binary_search_by_key(&2, |&x| x), Err(1)); - // diff from std as set merges the duplicate keys - assert!(match b.binary_search_by_key(&3, |&x| x) { - Ok(1..=2) => true, - _ => false, - }); - assert!(match b.binary_search_by_key(&3, |&x| x) { - Ok(1..=2) => true, - _ => false, - }); - assert_eq!(b.binary_search_by_key(&4, |&x| x), Err(2)); - assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(2)); - assert_eq!(b.binary_search_by_key(&6, |&x| x), Err(2)); - assert_eq!(b.binary_search_by_key(&7, |&x| x), Ok(2)); - assert_eq!(b.binary_search_by_key(&8, |&x| x), Err(3)); -} - -#[test] -fn test_partition_point() { - // adapted from std's test for partition_point - let b: IndexSet = [].into(); - assert_eq!(b.partition_point(|&x| x < 5), 0); - - let b: IndexSet<_> = [4].into(); - assert_eq!(b.partition_point(|&x| x < 3), 0); - assert_eq!(b.partition_point(|&x| x < 4), 0); - assert_eq!(b.partition_point(|&x| x < 5), 1); - - let b: IndexSet<_> = [1, 2, 4, 6, 8, 9].into(); - assert_eq!(b.partition_point(|&x| x < 5), 3); - assert_eq!(b.partition_point(|&x| x < 6), 3); - assert_eq!(b.partition_point(|&x| x < 7), 4); - assert_eq!(b.partition_point(|&x| x < 8), 4); - - let b: IndexSet<_> = [1, 2, 4, 5, 6, 8].into(); - assert_eq!(b.partition_point(|&x| x < 9), 6); - - let b: IndexSet<_> = [1, 2, 4, 6, 7, 8, 9].into(); - assert_eq!(b.partition_point(|&x| x < 6), 3); - assert_eq!(b.partition_point(|&x| x < 5), 3); - assert_eq!(b.partition_point(|&x| x < 8), 5); - - let b: IndexSet<_> = [1, 2, 4, 5, 6, 8, 9].into(); - assert_eq!(b.partition_point(|&x| x < 7), 5); - assert_eq!(b.partition_point(|&x| x < 0), 0); - - let b: IndexSet<_> = [1, 3, 3, 3, 7].into(); - assert_eq!(b.partition_point(|&x| x < 0), 0); - assert_eq!(b.partition_point(|&x| x < 1), 0); - assert_eq!(b.partition_point(|&x| x < 2), 1); - assert_eq!(b.partition_point(|&x| x < 3), 1); - assert_eq!(b.partition_point(|&x| x < 4), 2); // diff from std as set merges the duplicate keys - assert_eq!(b.partition_point(|&x| x < 5), 2); - assert_eq!(b.partition_point(|&x| x < 6), 2); - assert_eq!(b.partition_point(|&x| x < 7), 2); - assert_eq!(b.partition_point(|&x| x < 8), 3); -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/set.rs s390-tools-2.33.1/rust-vendor/indexmap/src/set.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/set.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/set.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,28 +1,18 @@ //! A hash set implemented using `IndexMap` -mod iter; -mod slice; - -#[cfg(test)] -mod tests; - -pub use self::iter::{Difference, Drain, Intersection, IntoIter, Iter, SymmetricDifference, Union}; -pub use self::slice::Slice; - #[cfg(feature = "rayon")] pub use crate::rayon::set as rayon; -use crate::TryReserveError; -#[cfg(feature = "std")] +#[cfg(has_std)] use std::collections::hash_map::RandomState; -use crate::util::try_simplify_range; -use alloc::boxed::Box; -use alloc::vec::Vec; +use crate::vec::{self, Vec}; use core::cmp::Ordering; use core::fmt; use core::hash::{BuildHasher, Hash}; +use core::iter::{Chain, FusedIterator}; use core::ops::{BitAnd, BitOr, BitXor, Index, RangeBounds, Sub}; +use core::slice; use super::{Entries, Equivalent, IndexMap}; @@ -56,11 +46,6 @@ /// `0..self.len()`. For example, the method `.get_full` looks up the index for /// a value, and the method `.get_index` looks up the value by index. /// -/// # Complexity -/// -/// Internally, `IndexSet` just holds an [`IndexMap`](IndexMap). Thus the complexity -/// of the two are the same for most methods. -/// /// # Examples /// /// ``` @@ -74,11 +59,11 @@ /// assert!(letters.contains(&'u')); /// assert!(!letters.contains(&'y')); /// ``` -#[cfg(feature = "std")] +#[cfg(has_std)] pub struct IndexSet { pub(crate) map: IndexMap, } -#[cfg(not(feature = "std"))] +#[cfg(not(has_std))] pub struct IndexSet { pub(crate) map: IndexMap, } @@ -139,8 +124,7 @@ } } -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +#[cfg(has_std)] impl IndexSet { /// Create a new set. (Does not allocate.) pub fn new() -> Self { @@ -181,11 +165,6 @@ } } - /// Return the number of elements the set can hold without reallocating. - /// - /// This number is a lower bound; the set might be able to hold more, - /// but is guaranteed to be able to hold at least this many. - /// /// Computes in **O(1)** time. pub fn capacity(&self) -> usize { self.map.capacity() @@ -212,7 +191,9 @@ /// Return an iterator over the values of the set, in their order pub fn iter(&self) -> Iter<'_, T> { - Iter::new(self.as_entries()) + Iter { + iter: self.map.as_entries().iter(), + } } /// Remove all elements in the set, while preserving its capacity. @@ -246,7 +227,9 @@ where R: RangeBounds, { - Drain::new(self.map.core.drain(range)) + Drain { + iter: self.map.drain(range).iter, + } } /// Splits the collection into two at the given index. @@ -278,37 +261,6 @@ self.map.reserve(additional); } - /// Reserve capacity for `additional` more values, without over-allocating. - /// - /// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid - /// frequent re-allocations. However, the underlying data structures may still have internal - /// capacity requirements, and the allocator itself may give more space than requested, so this - /// cannot be relied upon to be precisely minimal. - /// - /// Computes in **O(n)** time. - pub fn reserve_exact(&mut self, additional: usize) { - self.map.reserve_exact(additional); - } - - /// Try to reserve capacity for `additional` more values. - /// - /// Computes in **O(n)** time. - pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.map.try_reserve(additional) - } - - /// Try to reserve capacity for `additional` more values, without over-allocating. - /// - /// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid - /// frequent re-allocations. However, the underlying data structures may still have internal - /// capacity requirements, and the allocator itself may give more space than requested, so this - /// cannot be relied upon to be precisely minimal. - /// - /// Computes in **O(n)** time. - pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.map.try_reserve_exact(additional) - } - /// Shrink the capacity of the set as much as possible. /// /// Computes in **O(n)** time. @@ -345,8 +297,16 @@ /// /// Computes in **O(1)** time (amortized average). pub fn insert_full(&mut self, value: T) -> (usize, bool) { - let (index, existing) = self.map.insert_full(value, ()); - (index, existing.is_none()) + use super::map::Entry::*; + + match self.map.entry(value) { + Occupied(e) => (e.index(), false), + Vacant(e) => { + let index = e.index(); + e.insert(()); + (index, true) + } + } } /// Return an iterator over the values that are in `self` but not `other`. @@ -356,7 +316,10 @@ where S2: BuildHasher, { - Difference::new(self, other) + Difference { + iter: self.iter(), + other, + } } /// Return an iterator over the values that are in `self` or `other`, @@ -371,7 +334,9 @@ where S2: BuildHasher, { - SymmetricDifference::new(self, other) + SymmetricDifference { + iter: self.difference(other).chain(other.difference(self)), + } } /// Return an iterator over the values that are in both `self` and `other`. @@ -381,7 +346,10 @@ where S2: BuildHasher, { - Intersection::new(self, other) + Intersection { + iter: self.iter(), + other, + } } /// Return an iterator over all values that are in `self` or `other`. @@ -392,7 +360,9 @@ where S2: BuildHasher, { - Union::new(self, other) + Union { + iter: self.iter().chain(other.difference(self)), + } } /// Return `true` if an equivalent to `value` exists in the set. @@ -425,8 +395,6 @@ } /// Return item index, if it exists in the set - /// - /// Computes in **O(1)** time (average). pub fn get_index_of(&self, value: &Q) -> Option where Q: Hash + Equivalent, @@ -636,7 +604,9 @@ { let mut entries = self.into_entries(); entries.sort_by(move |a, b| cmp(&a.key, &b.key)); - IntoIter::new(entries) + IntoIter { + iter: entries.into_iter(), + } } /// Sort the set's values by their default ordering. @@ -649,7 +619,7 @@ self.map.sort_unstable_keys() } - /// Sort the set's values in place using the comparison function `cmp`. + /// Sort the set's values in place using the comparison funtion `cmp`. /// /// Computes in **O(n log n)** time. The sort is unstable. pub fn sort_unstable_by(&mut self, mut cmp: F) @@ -667,82 +637,9 @@ { let mut entries = self.into_entries(); entries.sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); - IntoIter::new(entries) - } - - /// Sort the set’s values in place using a key extraction function. - /// - /// During sorting, the function is called at most once per entry, by using temporary storage - /// to remember the results of its evaluation. The order of calls to the function is - /// unspecified and may change between versions of `indexmap` or the standard library. - /// - /// Computes in **O(m n + n log n + c)** time () and **O(n)** space, where the function is - /// **O(m)**, *n* is the length of the map, and *c* the capacity. The sort is stable. - pub fn sort_by_cached_key(&mut self, mut sort_key: F) - where - K: Ord, - F: FnMut(&T) -> K, - { - self.with_entries(move |entries| { - entries.sort_by_cached_key(move |a| sort_key(&a.key)); - }); - } - - /// Search over a sorted set for a value. - /// - /// Returns the position where that value is present, or the position where it can be inserted - /// to maintain the sort. See [`slice::binary_search`] for more details. - /// - /// Computes in **O(log(n))** time, which is notably less scalable than looking the value up - /// using [`get_index_of`][IndexSet::get_index_of], but this can also position missing values. - pub fn binary_search(&self, x: &T) -> Result - where - T: Ord, - { - self.as_slice().binary_search(x) - } - - /// Search over a sorted set with a comparator function. - /// - /// Returns the position where that value is present, or the position where it can be inserted - /// to maintain the sort. See [`slice::binary_search_by`] for more details. - /// - /// Computes in **O(log(n))** time. - #[inline] - pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result - where - F: FnMut(&'a T) -> Ordering, - { - self.as_slice().binary_search_by(f) - } - - /// Search over a sorted set with an extraction function. - /// - /// Returns the position where that value is present, or the position where it can be inserted - /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. - /// - /// Computes in **O(log(n))** time. - #[inline] - pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result - where - F: FnMut(&'a T) -> B, - B: Ord, - { - self.as_slice().binary_search_by_key(b, f) - } - - /// Returns the index of the partition point of a sorted set according to the given predicate - /// (the index of the first element of the second partition). - /// - /// See [`slice::partition_point`] for more details. - /// - /// Computes in **O(log(n))** time. - #[must_use] - pub fn partition_point

(&self, pred: P) -> usize - where - P: FnMut(&T) -> bool, - { - self.as_slice().partition_point(pred) + IntoIter { + iter: entries.into_iter(), + } } /// Reverses the order of the set’s values in place. @@ -754,20 +651,6 @@ } impl IndexSet { - /// Returns a slice of all the values in the set. - /// - /// Computes in **O(1)** time. - pub fn as_slice(&self) -> &Slice { - Slice::from_slice(self.as_entries()) - } - - /// Converts into a boxed slice of all the values in the set. - /// - /// Note that this will drop the inner hash table and any excess capacity. - pub fn into_boxed_slice(self) -> Box> { - Slice::from_boxed(self.into_entries().into_boxed_slice()) - } - /// Get a value by index /// /// Valid indices are *0 <= index < self.len()* @@ -777,17 +660,6 @@ self.as_entries().get(index).map(Bucket::key_ref) } - /// Returns a slice of values in the given range of indices. - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Computes in **O(1)** time. - pub fn get_range>(&self, range: R) -> Option<&Slice> { - let entries = self.as_entries(); - let range = try_simplify_range(range, entries.len())?; - entries.get(range).map(Slice::from_slice) - } - /// Get the first value /// /// Computes in **O(1)** time. @@ -889,6 +761,141 @@ } } +/// An owning iterator over the items of a `IndexSet`. +/// +/// This `struct` is created by the [`into_iter`] method on [`IndexSet`] +/// (provided by the `IntoIterator` trait). See its documentation for more. +/// +/// [`IndexSet`]: struct.IndexSet.html +/// [`into_iter`]: struct.IndexSet.html#method.into_iter +pub struct IntoIter { + iter: vec::IntoIter>, +} + +impl Iterator for IntoIter { + type Item = T; + + iterator_methods!(Bucket::key); +} + +impl DoubleEndedIterator for IntoIter { + double_ended_iterator_methods!(Bucket::key); +} + +impl ExactSizeIterator for IntoIter { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for IntoIter {} + +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::key_ref); + f.debug_list().entries(iter).finish() + } +} + +/// An iterator over the items of a `IndexSet`. +/// +/// This `struct` is created by the [`iter`] method on [`IndexSet`]. +/// See its documentation for more. +/// +/// [`IndexSet`]: struct.IndexSet.html +/// [`iter`]: struct.IndexSet.html#method.iter +pub struct Iter<'a, T> { + iter: slice::Iter<'a, Bucket>, +} + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = &'a T; + + iterator_methods!(Bucket::key_ref); +} + +impl DoubleEndedIterator for Iter<'_, T> { + double_ended_iterator_methods!(Bucket::key_ref); +} + +impl ExactSizeIterator for Iter<'_, T> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Iter<'_, T> {} + +impl Clone for Iter<'_, T> { + fn clone(&self) -> Self { + Iter { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for Iter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A draining iterator over the items of a `IndexSet`. +/// +/// This `struct` is created by the [`drain`] method on [`IndexSet`]. +/// See its documentation for more. +/// +/// [`IndexSet`]: struct.IndexSet.html +/// [`drain`]: struct.IndexSet.html#method.drain +pub struct Drain<'a, T> { + iter: vec::Drain<'a, Bucket>, +} + +impl Iterator for Drain<'_, T> { + type Item = T; + + iterator_methods!(Bucket::key); +} + +impl DoubleEndedIterator for Drain<'_, T> { + double_ended_iterator_methods!(Bucket::key); +} + +impl ExactSizeIterator for Drain<'_, T> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Drain<'_, T> {} + +impl fmt::Debug for Drain<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::key_ref); + f.debug_list().entries(iter).finish() + } +} + +impl<'a, T, S> IntoIterator for &'a IndexSet { + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for IndexSet { + type Item = T; + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter { + iter: self.into_entries().into_iter(), + } + } +} + impl FromIterator for IndexSet where T: Hash + Eq, @@ -902,8 +909,7 @@ } } -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +#[cfg(has_std)] impl From<[T; N]> for IndexSet where T: Eq + Hash, @@ -1008,6 +1014,310 @@ } } +/// A lazy iterator producing elements in the difference of `IndexSet`s. +/// +/// This `struct` is created by the [`difference`] method on [`IndexSet`]. +/// See its documentation for more. +/// +/// [`IndexSet`]: struct.IndexSet.html +/// [`difference`]: struct.IndexSet.html#method.difference +pub struct Difference<'a, T, S> { + iter: Iter<'a, T>, + other: &'a IndexSet, +} + +impl<'a, T, S> Iterator for Difference<'a, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + while let Some(item) = self.iter.next() { + if !self.other.contains(item) { + return Some(item); + } + } + None + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } +} + +impl DoubleEndedIterator for Difference<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + fn next_back(&mut self) -> Option { + while let Some(item) = self.iter.next_back() { + if !self.other.contains(item) { + return Some(item); + } + } + None + } +} + +impl FusedIterator for Difference<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl Clone for Difference<'_, T, S> { + fn clone(&self) -> Self { + Difference { + iter: self.iter.clone(), + ..*self + } + } +} + +impl fmt::Debug for Difference<'_, T, S> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A lazy iterator producing elements in the intersection of `IndexSet`s. +/// +/// This `struct` is created by the [`intersection`] method on [`IndexSet`]. +/// See its documentation for more. +/// +/// [`IndexSet`]: struct.IndexSet.html +/// [`intersection`]: struct.IndexSet.html#method.intersection +pub struct Intersection<'a, T, S> { + iter: Iter<'a, T>, + other: &'a IndexSet, +} + +impl<'a, T, S> Iterator for Intersection<'a, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + while let Some(item) = self.iter.next() { + if self.other.contains(item) { + return Some(item); + } + } + None + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } +} + +impl DoubleEndedIterator for Intersection<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + fn next_back(&mut self) -> Option { + while let Some(item) = self.iter.next_back() { + if self.other.contains(item) { + return Some(item); + } + } + None + } +} + +impl FusedIterator for Intersection<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl Clone for Intersection<'_, T, S> { + fn clone(&self) -> Self { + Intersection { + iter: self.iter.clone(), + ..*self + } + } +} + +impl fmt::Debug for Intersection<'_, T, S> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A lazy iterator producing elements in the symmetric difference of `IndexSet`s. +/// +/// This `struct` is created by the [`symmetric_difference`] method on +/// [`IndexSet`]. See its documentation for more. +/// +/// [`IndexSet`]: struct.IndexSet.html +/// [`symmetric_difference`]: struct.IndexSet.html#method.symmetric_difference +pub struct SymmetricDifference<'a, T, S1, S2> { + iter: Chain, Difference<'a, T, S1>>, +} + +impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2> +where + T: Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + self.iter.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn fold(self, init: B, f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, f) + } +} + +impl DoubleEndedIterator for SymmetricDifference<'_, T, S1, S2> +where + T: Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + fn next_back(&mut self) -> Option { + self.iter.next_back() + } + + fn rfold(self, init: B, f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + self.iter.rfold(init, f) + } +} + +impl FusedIterator for SymmetricDifference<'_, T, S1, S2> +where + T: Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ +} + +impl Clone for SymmetricDifference<'_, T, S1, S2> { + fn clone(&self) -> Self { + SymmetricDifference { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for SymmetricDifference<'_, T, S1, S2> +where + T: fmt::Debug + Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A lazy iterator producing elements in the union of `IndexSet`s. +/// +/// This `struct` is created by the [`union`] method on [`IndexSet`]. +/// See its documentation for more. +/// +/// [`IndexSet`]: struct.IndexSet.html +/// [`union`]: struct.IndexSet.html#method.union +pub struct Union<'a, T, S> { + iter: Chain, Difference<'a, T, S>>, +} + +impl<'a, T, S> Iterator for Union<'a, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + self.iter.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn fold(self, init: B, f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, f) + } +} + +impl DoubleEndedIterator for Union<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + fn next_back(&mut self) -> Option { + self.iter.next_back() + } + + fn rfold(self, init: B, f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + self.iter.rfold(init, f) + } +} + +impl FusedIterator for Union<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl Clone for Union<'_, T, S> { + fn clone(&self) -> Self { + Union { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for Union<'_, T, S> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + impl BitAnd<&IndexSet> for &IndexSet where T: Eq + Hash + Clone, @@ -1073,3 +1383,530 @@ self.difference(other).cloned().collect() } } + +#[cfg(test)] +mod tests { + use super::*; + use std::string::String; + + #[test] + fn it_works() { + let mut set = IndexSet::new(); + assert_eq!(set.is_empty(), true); + set.insert(1); + set.insert(1); + assert_eq!(set.len(), 1); + assert!(set.get(&1).is_some()); + assert_eq!(set.is_empty(), false); + } + + #[test] + fn new() { + let set = IndexSet::::new(); + println!("{:?}", set); + assert_eq!(set.capacity(), 0); + assert_eq!(set.len(), 0); + assert_eq!(set.is_empty(), true); + } + + #[test] + fn insert() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5]; + let not_present = [1, 3, 6, 9, 10]; + let mut set = IndexSet::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(set.len(), i); + set.insert(elt); + assert_eq!(set.len(), i + 1); + assert_eq!(set.get(&elt), Some(&elt)); + } + println!("{:?}", set); + + for &elt in ¬_present { + assert!(set.get(&elt).is_none()); + } + } + + #[test] + fn insert_full() { + let insert = vec![9, 2, 7, 1, 4, 6, 13]; + let present = vec![1, 6, 2]; + let mut set = IndexSet::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(set.len(), i); + let (index, success) = set.insert_full(elt); + assert!(success); + assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); + assert_eq!(set.len(), i + 1); + } + + let len = set.len(); + for &elt in &present { + let (index, success) = set.insert_full(elt); + assert!(!success); + assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); + assert_eq!(set.len(), len); + } + } + + #[test] + fn insert_2() { + let mut set = IndexSet::with_capacity(16); + + let mut values = vec![]; + values.extend(0..16); + values.extend(if cfg!(miri) { 32..64 } else { 128..267 }); + + for &i in &values { + let old_set = set.clone(); + set.insert(i); + for value in old_set.iter() { + if set.get(value).is_none() { + println!("old_set: {:?}", old_set); + println!("set: {:?}", set); + panic!("did not find {} in set", value); + } + } + } + + for &i in &values { + assert!(set.get(&i).is_some(), "did not find {}", i); + } + } + + #[test] + fn insert_dup() { + let mut elements = vec![0, 2, 4, 6, 8]; + let mut set: IndexSet = elements.drain(..).collect(); + { + let (i, v) = set.get_full(&0).unwrap(); + assert_eq!(set.len(), 5); + assert_eq!(i, 0); + assert_eq!(*v, 0); + } + { + let inserted = set.insert(0); + let (i, v) = set.get_full(&0).unwrap(); + assert_eq!(set.len(), 5); + assert_eq!(inserted, false); + assert_eq!(i, 0); + assert_eq!(*v, 0); + } + } + + #[test] + fn insert_order() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &insert { + set.insert(elt); + } + + assert_eq!(set.iter().count(), set.len()); + assert_eq!(set.iter().count(), insert.len()); + for (a, b) in insert.iter().zip(set.iter()) { + assert_eq!(a, b); + } + for (i, v) in (0..insert.len()).zip(set.iter()) { + assert_eq!(set.get_index(i).unwrap(), v); + } + } + + #[test] + fn replace() { + let replace = [0, 4, 2, 12, 8, 7, 11, 5]; + let not_present = [1, 3, 6, 9, 10]; + let mut set = IndexSet::with_capacity(replace.len()); + + for (i, &elt) in replace.iter().enumerate() { + assert_eq!(set.len(), i); + set.replace(elt); + assert_eq!(set.len(), i + 1); + assert_eq!(set.get(&elt), Some(&elt)); + } + println!("{:?}", set); + + for &elt in ¬_present { + assert!(set.get(&elt).is_none()); + } + } + + #[test] + fn replace_full() { + let replace = vec![9, 2, 7, 1, 4, 6, 13]; + let present = vec![1, 6, 2]; + let mut set = IndexSet::with_capacity(replace.len()); + + for (i, &elt) in replace.iter().enumerate() { + assert_eq!(set.len(), i); + let (index, replaced) = set.replace_full(elt); + assert!(replaced.is_none()); + assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); + assert_eq!(set.len(), i + 1); + } + + let len = set.len(); + for &elt in &present { + let (index, replaced) = set.replace_full(elt); + assert_eq!(Some(elt), replaced); + assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); + assert_eq!(set.len(), len); + } + } + + #[test] + fn replace_2() { + let mut set = IndexSet::with_capacity(16); + + let mut values = vec![]; + values.extend(0..16); + values.extend(if cfg!(miri) { 32..64 } else { 128..267 }); + + for &i in &values { + let old_set = set.clone(); + set.replace(i); + for value in old_set.iter() { + if set.get(value).is_none() { + println!("old_set: {:?}", old_set); + println!("set: {:?}", set); + panic!("did not find {} in set", value); + } + } + } + + for &i in &values { + assert!(set.get(&i).is_some(), "did not find {}", i); + } + } + + #[test] + fn replace_dup() { + let mut elements = vec![0, 2, 4, 6, 8]; + let mut set: IndexSet = elements.drain(..).collect(); + { + let (i, v) = set.get_full(&0).unwrap(); + assert_eq!(set.len(), 5); + assert_eq!(i, 0); + assert_eq!(*v, 0); + } + { + let replaced = set.replace(0); + let (i, v) = set.get_full(&0).unwrap(); + assert_eq!(set.len(), 5); + assert_eq!(replaced, Some(0)); + assert_eq!(i, 0); + assert_eq!(*v, 0); + } + } + + #[test] + fn replace_order() { + let replace = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &replace { + set.replace(elt); + } + + assert_eq!(set.iter().count(), set.len()); + assert_eq!(set.iter().count(), replace.len()); + for (a, b) in replace.iter().zip(set.iter()) { + assert_eq!(a, b); + } + for (i, v) in (0..replace.len()).zip(set.iter()) { + assert_eq!(set.get_index(i).unwrap(), v); + } + } + + #[test] + fn grow() { + let insert = [0, 4, 2, 12, 8, 7, 11]; + let not_present = [1, 3, 6, 9, 10]; + let mut set = IndexSet::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(set.len(), i); + set.insert(elt); + assert_eq!(set.len(), i + 1); + assert_eq!(set.get(&elt), Some(&elt)); + } + + println!("{:?}", set); + for &elt in &insert { + set.insert(elt * 10); + } + for &elt in &insert { + set.insert(elt * 100); + } + for (i, &elt) in insert.iter().cycle().enumerate().take(100) { + set.insert(elt * 100 + i as i32); + } + println!("{:?}", set); + for &elt in ¬_present { + assert!(set.get(&elt).is_none()); + } + } + + #[test] + fn reserve() { + let mut set = IndexSet::::new(); + assert_eq!(set.capacity(), 0); + set.reserve(100); + let capacity = set.capacity(); + assert!(capacity >= 100); + for i in 0..capacity { + assert_eq!(set.len(), i); + set.insert(i); + assert_eq!(set.len(), i + 1); + assert_eq!(set.capacity(), capacity); + assert_eq!(set.get(&i), Some(&i)); + } + set.insert(capacity); + assert_eq!(set.len(), capacity + 1); + assert!(set.capacity() > capacity); + assert_eq!(set.get(&capacity), Some(&capacity)); + } + + #[test] + fn shrink_to_fit() { + let mut set = IndexSet::::new(); + assert_eq!(set.capacity(), 0); + for i in 0..100 { + assert_eq!(set.len(), i); + set.insert(i); + assert_eq!(set.len(), i + 1); + assert!(set.capacity() >= i + 1); + assert_eq!(set.get(&i), Some(&i)); + set.shrink_to_fit(); + assert_eq!(set.len(), i + 1); + assert_eq!(set.capacity(), i + 1); + assert_eq!(set.get(&i), Some(&i)); + } + } + + #[test] + fn remove() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &insert { + set.insert(elt); + } + + assert_eq!(set.iter().count(), set.len()); + assert_eq!(set.iter().count(), insert.len()); + for (a, b) in insert.iter().zip(set.iter()) { + assert_eq!(a, b); + } + + let remove_fail = [99, 77]; + let remove = [4, 12, 8, 7]; + + for &value in &remove_fail { + assert!(set.swap_remove_full(&value).is_none()); + } + println!("{:?}", set); + for &value in &remove { + //println!("{:?}", set); + let index = set.get_full(&value).unwrap().0; + assert_eq!(set.swap_remove_full(&value), Some((index, value))); + } + println!("{:?}", set); + + for value in &insert { + assert_eq!(set.get(value).is_some(), !remove.contains(value)); + } + assert_eq!(set.len(), insert.len() - remove.len()); + assert_eq!(set.iter().count(), insert.len() - remove.len()); + } + + #[test] + fn swap_remove_index() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &insert { + set.insert(elt); + } + + let mut vector = insert.to_vec(); + let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; + + // check that the same swap remove sequence on vec and set + // have the same result. + for &rm in remove_sequence { + let out_vec = vector.swap_remove(rm); + let out_set = set.swap_remove_index(rm).unwrap(); + assert_eq!(out_vec, out_set); + } + assert_eq!(vector.len(), set.len()); + for (a, b) in vector.iter().zip(set.iter()) { + assert_eq!(a, b); + } + } + + #[test] + fn partial_eq_and_eq() { + let mut set_a = IndexSet::new(); + set_a.insert(1); + set_a.insert(2); + let mut set_b = set_a.clone(); + assert_eq!(set_a, set_b); + set_b.swap_remove(&1); + assert_ne!(set_a, set_b); + + let set_c: IndexSet<_> = set_b.into_iter().collect(); + assert_ne!(set_a, set_c); + assert_ne!(set_c, set_a); + } + + #[test] + fn extend() { + let mut set = IndexSet::new(); + set.extend(vec![&1, &2, &3, &4]); + set.extend(vec![5, 6]); + assert_eq!(set.into_iter().collect::>(), vec![1, 2, 3, 4, 5, 6]); + } + + #[test] + fn comparisons() { + let set_a: IndexSet<_> = (0..3).collect(); + let set_b: IndexSet<_> = (3..6).collect(); + let set_c: IndexSet<_> = (0..6).collect(); + let set_d: IndexSet<_> = (3..9).collect(); + + assert!(!set_a.is_disjoint(&set_a)); + assert!(set_a.is_subset(&set_a)); + assert!(set_a.is_superset(&set_a)); + + assert!(set_a.is_disjoint(&set_b)); + assert!(set_b.is_disjoint(&set_a)); + assert!(!set_a.is_subset(&set_b)); + assert!(!set_b.is_subset(&set_a)); + assert!(!set_a.is_superset(&set_b)); + assert!(!set_b.is_superset(&set_a)); + + assert!(!set_a.is_disjoint(&set_c)); + assert!(!set_c.is_disjoint(&set_a)); + assert!(set_a.is_subset(&set_c)); + assert!(!set_c.is_subset(&set_a)); + assert!(!set_a.is_superset(&set_c)); + assert!(set_c.is_superset(&set_a)); + + assert!(!set_c.is_disjoint(&set_d)); + assert!(!set_d.is_disjoint(&set_c)); + assert!(!set_c.is_subset(&set_d)); + assert!(!set_d.is_subset(&set_c)); + assert!(!set_c.is_superset(&set_d)); + assert!(!set_d.is_superset(&set_c)); + } + + #[test] + fn iter_comparisons() { + use std::iter::empty; + + fn check<'a, I1, I2>(iter1: I1, iter2: I2) + where + I1: Iterator, + I2: Iterator, + { + assert!(iter1.copied().eq(iter2)); + } + + let set_a: IndexSet<_> = (0..3).collect(); + let set_b: IndexSet<_> = (3..6).collect(); + let set_c: IndexSet<_> = (0..6).collect(); + let set_d: IndexSet<_> = (3..9).rev().collect(); + + check(set_a.difference(&set_a), empty()); + check(set_a.symmetric_difference(&set_a), empty()); + check(set_a.intersection(&set_a), 0..3); + check(set_a.union(&set_a), 0..3); + + check(set_a.difference(&set_b), 0..3); + check(set_b.difference(&set_a), 3..6); + check(set_a.symmetric_difference(&set_b), 0..6); + check(set_b.symmetric_difference(&set_a), (3..6).chain(0..3)); + check(set_a.intersection(&set_b), empty()); + check(set_b.intersection(&set_a), empty()); + check(set_a.union(&set_b), 0..6); + check(set_b.union(&set_a), (3..6).chain(0..3)); + + check(set_a.difference(&set_c), empty()); + check(set_c.difference(&set_a), 3..6); + check(set_a.symmetric_difference(&set_c), 3..6); + check(set_c.symmetric_difference(&set_a), 3..6); + check(set_a.intersection(&set_c), 0..3); + check(set_c.intersection(&set_a), 0..3); + check(set_a.union(&set_c), 0..6); + check(set_c.union(&set_a), 0..6); + + check(set_c.difference(&set_d), 0..3); + check(set_d.difference(&set_c), (6..9).rev()); + check( + set_c.symmetric_difference(&set_d), + (0..3).chain((6..9).rev()), + ); + check(set_d.symmetric_difference(&set_c), (6..9).rev().chain(0..3)); + check(set_c.intersection(&set_d), 3..6); + check(set_d.intersection(&set_c), (3..6).rev()); + check(set_c.union(&set_d), (0..6).chain((6..9).rev())); + check(set_d.union(&set_c), (3..9).rev().chain(0..3)); + } + + #[test] + fn ops() { + let empty = IndexSet::::new(); + let set_a: IndexSet<_> = (0..3).collect(); + let set_b: IndexSet<_> = (3..6).collect(); + let set_c: IndexSet<_> = (0..6).collect(); + let set_d: IndexSet<_> = (3..9).rev().collect(); + + #[allow(clippy::eq_op)] + { + assert_eq!(&set_a & &set_a, set_a); + assert_eq!(&set_a | &set_a, set_a); + assert_eq!(&set_a ^ &set_a, empty); + assert_eq!(&set_a - &set_a, empty); + } + + assert_eq!(&set_a & &set_b, empty); + assert_eq!(&set_b & &set_a, empty); + assert_eq!(&set_a | &set_b, set_c); + assert_eq!(&set_b | &set_a, set_c); + assert_eq!(&set_a ^ &set_b, set_c); + assert_eq!(&set_b ^ &set_a, set_c); + assert_eq!(&set_a - &set_b, set_a); + assert_eq!(&set_b - &set_a, set_b); + + assert_eq!(&set_a & &set_c, set_a); + assert_eq!(&set_c & &set_a, set_a); + assert_eq!(&set_a | &set_c, set_c); + assert_eq!(&set_c | &set_a, set_c); + assert_eq!(&set_a ^ &set_c, set_b); + assert_eq!(&set_c ^ &set_a, set_b); + assert_eq!(&set_a - &set_c, empty); + assert_eq!(&set_c - &set_a, set_b); + + assert_eq!(&set_c & &set_d, set_b); + assert_eq!(&set_d & &set_c, set_b); + assert_eq!(&set_c | &set_d, &set_a | &set_d); + assert_eq!(&set_d | &set_c, &set_a | &set_d); + assert_eq!(&set_c ^ &set_d, &set_a | &(&set_d - &set_b)); + assert_eq!(&set_d ^ &set_c, &set_a | &(&set_d - &set_b)); + assert_eq!(&set_c - &set_d, set_a); + assert_eq!(&set_d - &set_c, &set_d - &set_b); + } + + #[test] + #[cfg(has_std)] + fn from_array() { + let set1 = IndexSet::from([1, 2, 3, 4]); + let set2: IndexSet<_> = [1, 2, 3, 4].into(); + + assert_eq!(set1, set2); + } +} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/util.rs s390-tools-2.33.1/rust-vendor/indexmap/src/util.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/util.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/util.rs 2024-05-28 11:57:36.000000000 +0200 @@ -29,25 +29,3 @@ } start..end } - -pub(crate) fn try_simplify_range(range: R, len: usize) -> Option> -where - R: RangeBounds, -{ - let start = match range.start_bound() { - Bound::Unbounded => 0, - Bound::Included(&i) if i <= len => i, - Bound::Excluded(&i) if i < len => i + 1, - _ => return None, - }; - let end = match range.end_bound() { - Bound::Unbounded => len, - Bound::Excluded(&i) if i <= len => i, - Bound::Included(&i) if i < len => i + 1, - _ => return None, - }; - if start > end { - return None; - } - Some(start..end) -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/benches/bench.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/benches/bench.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/benches/bench.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/benches/bench.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,763 +0,0 @@ -#![feature(test)] - -extern crate test; -#[macro_use] -extern crate lazy_static; - -use fnv::FnvHasher; -use std::hash::BuildHasherDefault; -use std::hash::Hash; -type FnvBuilder = BuildHasherDefault; - -use test::black_box; -use test::Bencher; - -use indexmap::IndexMap; - -use std::collections::HashMap; - -use rand::rngs::SmallRng; -use rand::seq::SliceRandom; -use rand::SeedableRng; - -/// Use a consistently seeded Rng for benchmark stability -fn small_rng() -> SmallRng { - let seed = u64::from_le_bytes(*b"indexmap"); - SmallRng::seed_from_u64(seed) -} - -#[bench] -fn new_hashmap(b: &mut Bencher) { - b.iter(|| HashMap::::new()); -} - -#[bench] -fn new_indexmap(b: &mut Bencher) { - b.iter(|| IndexMap::::new()); -} - -#[bench] -fn with_capacity_10e5_hashmap(b: &mut Bencher) { - b.iter(|| HashMap::::with_capacity(10_000)); -} - -#[bench] -fn with_capacity_10e5_indexmap(b: &mut Bencher) { - b.iter(|| IndexMap::::with_capacity(10_000)); -} - -#[bench] -fn insert_hashmap_10_000(b: &mut Bencher) { - let c = 10_000; - b.iter(|| { - let mut map = HashMap::with_capacity(c); - for x in 0..c { - map.insert(x, ()); - } - map - }); -} - -#[bench] -fn insert_indexmap_10_000(b: &mut Bencher) { - let c = 10_000; - b.iter(|| { - let mut map = IndexMap::with_capacity(c); - for x in 0..c { - map.insert(x, ()); - } - map - }); -} - -#[bench] -fn insert_hashmap_string_10_000(b: &mut Bencher) { - let c = 10_000; - b.iter(|| { - let mut map = HashMap::with_capacity(c); - for x in 0..c { - map.insert(x.to_string(), ()); - } - map - }); -} - -#[bench] -fn insert_indexmap_string_10_000(b: &mut Bencher) { - let c = 10_000; - b.iter(|| { - let mut map = IndexMap::with_capacity(c); - for x in 0..c { - map.insert(x.to_string(), ()); - } - map - }); -} - -#[bench] -fn insert_hashmap_str_10_000(b: &mut Bencher) { - let c = 10_000; - let ss = Vec::from_iter((0..c).map(|x| x.to_string())); - b.iter(|| { - let mut map = HashMap::with_capacity(c); - for key in &ss { - map.insert(&key[..], ()); - } - map - }); -} - -#[bench] -fn insert_indexmap_str_10_000(b: &mut Bencher) { - let c = 10_000; - let ss = Vec::from_iter((0..c).map(|x| x.to_string())); - b.iter(|| { - let mut map = IndexMap::with_capacity(c); - for key in &ss { - map.insert(&key[..], ()); - } - map - }); -} - -#[bench] -fn insert_hashmap_int_bigvalue_10_000(b: &mut Bencher) { - let c = 10_000; - let value = [0u64; 10]; - b.iter(|| { - let mut map = HashMap::with_capacity(c); - for i in 0..c { - map.insert(i, value); - } - map - }); -} - -#[bench] -fn insert_indexmap_int_bigvalue_10_000(b: &mut Bencher) { - let c = 10_000; - let value = [0u64; 10]; - b.iter(|| { - let mut map = IndexMap::with_capacity(c); - for i in 0..c { - map.insert(i, value); - } - map - }); -} - -#[bench] -fn insert_hashmap_100_000(b: &mut Bencher) { - let c = 100_000; - b.iter(|| { - let mut map = HashMap::with_capacity(c); - for x in 0..c { - map.insert(x, ()); - } - map - }); -} - -#[bench] -fn insert_indexmap_100_000(b: &mut Bencher) { - let c = 100_000; - b.iter(|| { - let mut map = IndexMap::with_capacity(c); - for x in 0..c { - map.insert(x, ()); - } - map - }); -} - -#[bench] -fn insert_hashmap_150(b: &mut Bencher) { - let c = 150; - b.iter(|| { - let mut map = HashMap::with_capacity(c); - for x in 0..c { - map.insert(x, ()); - } - map - }); -} - -#[bench] -fn insert_indexmap_150(b: &mut Bencher) { - let c = 150; - b.iter(|| { - let mut map = IndexMap::with_capacity(c); - for x in 0..c { - map.insert(x, ()); - } - map - }); -} - -#[bench] -fn entry_hashmap_150(b: &mut Bencher) { - let c = 150; - b.iter(|| { - let mut map = HashMap::with_capacity(c); - for x in 0..c { - map.entry(x).or_insert(()); - } - map - }); -} - -#[bench] -fn entry_indexmap_150(b: &mut Bencher) { - let c = 150; - b.iter(|| { - let mut map = IndexMap::with_capacity(c); - for x in 0..c { - map.entry(x).or_insert(()); - } - map - }); -} - -#[bench] -fn iter_sum_hashmap_10_000(b: &mut Bencher) { - let c = 10_000; - let mut map = HashMap::with_capacity(c); - let len = c - c / 10; - for x in 0..len { - map.insert(x, ()); - } - assert_eq!(map.len(), len); - b.iter(|| map.keys().sum::()); -} - -#[bench] -fn iter_sum_indexmap_10_000(b: &mut Bencher) { - let c = 10_000; - let mut map = IndexMap::with_capacity(c); - let len = c - c / 10; - for x in 0..len { - map.insert(x, ()); - } - assert_eq!(map.len(), len); - b.iter(|| map.keys().sum::()); -} - -#[bench] -fn iter_black_box_hashmap_10_000(b: &mut Bencher) { - let c = 10_000; - let mut map = HashMap::with_capacity(c); - let len = c - c / 10; - for x in 0..len { - map.insert(x, ()); - } - assert_eq!(map.len(), len); - b.iter(|| { - for &key in map.keys() { - black_box(key); - } - }); -} - -#[bench] -fn iter_black_box_indexmap_10_000(b: &mut Bencher) { - let c = 10_000; - let mut map = IndexMap::with_capacity(c); - let len = c - c / 10; - for x in 0..len { - map.insert(x, ()); - } - assert_eq!(map.len(), len); - b.iter(|| { - for &key in map.keys() { - black_box(key); - } - }); -} - -fn shuffled_keys(iter: I) -> Vec -where - I: IntoIterator, -{ - let mut v = Vec::from_iter(iter); - let mut rng = small_rng(); - v.shuffle(&mut rng); - v -} - -#[bench] -fn lookup_hashmap_10_000_exist(b: &mut Bencher) { - let c = 10_000; - let mut map = HashMap::with_capacity(c); - let keys = shuffled_keys(0..c); - for &key in &keys { - map.insert(key, 1); - } - b.iter(|| { - let mut found = 0; - for key in 5000..c { - found += map.get(&key).is_some() as i32; - } - found - }); -} - -#[bench] -fn lookup_hashmap_10_000_noexist(b: &mut Bencher) { - let c = 10_000; - let mut map = HashMap::with_capacity(c); - let keys = shuffled_keys(0..c); - for &key in &keys { - map.insert(key, 1); - } - b.iter(|| { - let mut found = 0; - for key in c..15000 { - found += map.get(&key).is_some() as i32; - } - found - }); -} - -#[bench] -fn lookup_indexmap_10_000_exist(b: &mut Bencher) { - let c = 10_000; - let mut map = IndexMap::with_capacity(c); - let keys = shuffled_keys(0..c); - for &key in &keys { - map.insert(key, 1); - } - b.iter(|| { - let mut found = 0; - for key in 5000..c { - found += map.get(&key).is_some() as i32; - } - found - }); -} - -#[bench] -fn lookup_indexmap_10_000_noexist(b: &mut Bencher) { - let c = 10_000; - let mut map = IndexMap::with_capacity(c); - let keys = shuffled_keys(0..c); - for &key in &keys { - map.insert(key, 1); - } - b.iter(|| { - let mut found = 0; - for key in c..15000 { - found += map.get(&key).is_some() as i32; - } - found - }); -} - -// number of items to look up -const LOOKUP_MAP_SIZE: u32 = 100_000_u32; -const LOOKUP_SAMPLE_SIZE: u32 = 5000; -const SORT_MAP_SIZE: usize = 10_000; - -// use lazy_static so that comparison benchmarks use the exact same inputs -lazy_static! { - static ref KEYS: Vec = shuffled_keys(0..LOOKUP_MAP_SIZE); -} - -lazy_static! { - static ref HMAP_100K: HashMap = { - let c = LOOKUP_MAP_SIZE; - let mut map = HashMap::with_capacity(c as usize); - let keys = &*KEYS; - for &key in keys { - map.insert(key, key); - } - map - }; -} - -lazy_static! { - static ref IMAP_100K: IndexMap = { - let c = LOOKUP_MAP_SIZE; - let mut map = IndexMap::with_capacity(c as usize); - let keys = &*KEYS; - for &key in keys { - map.insert(key, key); - } - map - }; -} - -lazy_static! { - static ref IMAP_SORT_U32: IndexMap = { - let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); - for &key in &KEYS[..SORT_MAP_SIZE] { - map.insert(key, key); - } - map - }; -} -lazy_static! { - static ref IMAP_SORT_S: IndexMap = { - let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); - for &key in &KEYS[..SORT_MAP_SIZE] { - map.insert(format!("{:^16x}", &key), String::new()); - } - map - }; -} - -#[bench] -fn lookup_hashmap_100_000_multi(b: &mut Bencher) { - let map = &*HMAP_100K; - b.iter(|| { - let mut found = 0; - for key in 0..LOOKUP_SAMPLE_SIZE { - found += map.get(&key).is_some() as u32; - } - found - }); -} - -#[bench] -fn lookup_indexmap_100_000_multi(b: &mut Bencher) { - let map = &*IMAP_100K; - b.iter(|| { - let mut found = 0; - for key in 0..LOOKUP_SAMPLE_SIZE { - found += map.get(&key).is_some() as u32; - } - found - }); -} - -// inorder: Test looking up keys in the same order as they were inserted -#[bench] -fn lookup_hashmap_100_000_inorder_multi(b: &mut Bencher) { - let map = &*HMAP_100K; - let keys = &*KEYS; - b.iter(|| { - let mut found = 0; - for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { - found += map.get(key).is_some() as u32; - } - found - }); -} - -#[bench] -fn lookup_indexmap_100_000_inorder_multi(b: &mut Bencher) { - let map = &*IMAP_100K; - let keys = &*KEYS; - b.iter(|| { - let mut found = 0; - for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { - found += map.get(key).is_some() as u32; - } - found - }); -} - -#[bench] -fn lookup_hashmap_100_000_single(b: &mut Bencher) { - let map = &*HMAP_100K; - let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); - b.iter(|| { - let key = iter.next().unwrap(); - map.get(&key).is_some() - }); -} - -#[bench] -fn lookup_indexmap_100_000_single(b: &mut Bencher) { - let map = &*IMAP_100K; - let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); - b.iter(|| { - let key = iter.next().unwrap(); - map.get(&key).is_some() - }); -} - -const GROW_SIZE: usize = 100_000; -type GrowKey = u32; - -// Test grow/resize without preallocation -#[bench] -fn grow_fnv_hashmap_100_000(b: &mut Bencher) { - b.iter(|| { - let mut map: HashMap<_, _, FnvBuilder> = HashMap::default(); - for x in 0..GROW_SIZE { - map.insert(x as GrowKey, x as GrowKey); - } - map - }); -} - -#[bench] -fn grow_fnv_indexmap_100_000(b: &mut Bencher) { - b.iter(|| { - let mut map: IndexMap<_, _, FnvBuilder> = IndexMap::default(); - for x in 0..GROW_SIZE { - map.insert(x as GrowKey, x as GrowKey); - } - map - }); -} - -const MERGE: u64 = 10_000; -#[bench] -fn hashmap_merge_simple(b: &mut Bencher) { - let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); - let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); - b.iter(|| { - let mut merged = first_map.clone(); - merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); - merged - }); -} - -#[bench] -fn hashmap_merge_shuffle(b: &mut Bencher) { - let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); - let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); - let mut v = Vec::new(); - let mut rng = small_rng(); - b.iter(|| { - let mut merged = first_map.clone(); - v.extend(second_map.iter().map(|(&k, &v)| (k, v))); - v.shuffle(&mut rng); - merged.extend(v.drain(..)); - - merged - }); -} - -#[bench] -fn indexmap_merge_simple(b: &mut Bencher) { - let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); - let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); - b.iter(|| { - let mut merged = first_map.clone(); - merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); - merged - }); -} - -#[bench] -fn indexmap_merge_shuffle(b: &mut Bencher) { - let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); - let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); - let mut v = Vec::new(); - let mut rng = small_rng(); - b.iter(|| { - let mut merged = first_map.clone(); - v.extend(second_map.iter().map(|(&k, &v)| (k, v))); - v.shuffle(&mut rng); - merged.extend(v.drain(..)); - - merged - }); -} - -#[bench] -fn swap_remove_indexmap_100_000(b: &mut Bencher) { - let map = IMAP_100K.clone(); - let mut keys = Vec::from_iter(map.keys().copied()); - let mut rng = small_rng(); - keys.shuffle(&mut rng); - - b.iter(|| { - let mut map = map.clone(); - for key in &keys { - map.swap_remove(key); - } - assert_eq!(map.len(), 0); - map - }); -} - -#[bench] -fn shift_remove_indexmap_100_000_few(b: &mut Bencher) { - let map = IMAP_100K.clone(); - let mut keys = Vec::from_iter(map.keys().copied()); - let mut rng = small_rng(); - keys.shuffle(&mut rng); - keys.truncate(50); - - b.iter(|| { - let mut map = map.clone(); - for key in &keys { - map.shift_remove(key); - } - assert_eq!(map.len(), IMAP_100K.len() - keys.len()); - map - }); -} - -#[bench] -fn shift_remove_indexmap_2_000_full(b: &mut Bencher) { - let mut keys = KEYS[..2_000].to_vec(); - let mut map = IndexMap::with_capacity(keys.len()); - for &key in &keys { - map.insert(key, key); - } - let mut rng = small_rng(); - keys.shuffle(&mut rng); - - b.iter(|| { - let mut map = map.clone(); - for key in &keys { - map.shift_remove(key); - } - assert_eq!(map.len(), 0); - map - }); -} - -#[bench] -fn pop_indexmap_100_000(b: &mut Bencher) { - let map = IMAP_100K.clone(); - - b.iter(|| { - let mut map = map.clone(); - while !map.is_empty() { - map.pop(); - } - assert_eq!(map.len(), 0); - map - }); -} - -#[bench] -fn few_retain_indexmap_100_000(b: &mut Bencher) { - let map = IMAP_100K.clone(); - - b.iter(|| { - let mut map = map.clone(); - map.retain(|k, _| *k % 7 == 0); - map - }); -} - -#[bench] -fn few_retain_hashmap_100_000(b: &mut Bencher) { - let map = HMAP_100K.clone(); - - b.iter(|| { - let mut map = map.clone(); - map.retain(|k, _| *k % 7 == 0); - map - }); -} - -#[bench] -fn half_retain_indexmap_100_000(b: &mut Bencher) { - let map = IMAP_100K.clone(); - - b.iter(|| { - let mut map = map.clone(); - map.retain(|k, _| *k % 2 == 0); - map - }); -} - -#[bench] -fn half_retain_hashmap_100_000(b: &mut Bencher) { - let map = HMAP_100K.clone(); - - b.iter(|| { - let mut map = map.clone(); - map.retain(|k, _| *k % 2 == 0); - map - }); -} - -#[bench] -fn many_retain_indexmap_100_000(b: &mut Bencher) { - let map = IMAP_100K.clone(); - - b.iter(|| { - let mut map = map.clone(); - map.retain(|k, _| *k % 100 != 0); - map - }); -} - -#[bench] -fn many_retain_hashmap_100_000(b: &mut Bencher) { - let map = HMAP_100K.clone(); - - b.iter(|| { - let mut map = map.clone(); - map.retain(|k, _| *k % 100 != 0); - map - }); -} - -// simple sort impl for comparison -pub fn simple_sort(m: &mut IndexMap) { - let mut ordered: Vec<_> = m.drain(..).collect(); - ordered.sort_by(|left, right| left.0.cmp(&right.0)); - m.extend(ordered); -} - -#[bench] -fn indexmap_sort_s(b: &mut Bencher) { - let map = IMAP_SORT_S.clone(); - - // there's a map clone there, but it's still useful to profile this - b.iter(|| { - let mut map = map.clone(); - map.sort_keys(); - map - }); -} - -#[bench] -fn indexmap_simple_sort_s(b: &mut Bencher) { - let map = IMAP_SORT_S.clone(); - - // there's a map clone there, but it's still useful to profile this - b.iter(|| { - let mut map = map.clone(); - simple_sort(&mut map); - map - }); -} - -#[bench] -fn indexmap_sort_u32(b: &mut Bencher) { - let map = IMAP_SORT_U32.clone(); - - // there's a map clone there, but it's still useful to profile this - b.iter(|| { - let mut map = map.clone(); - map.sort_keys(); - map - }); -} - -#[bench] -fn indexmap_simple_sort_u32(b: &mut Bencher) { - let map = IMAP_SORT_U32.clone(); - - // there's a map clone there, but it's still useful to profile this - b.iter(|| { - let mut map = map.clone(); - simple_sort(&mut map); - map - }); -} - -// measure the fixed overhead of cloning in sort benchmarks -#[bench] -fn indexmap_clone_for_sort_s(b: &mut Bencher) { - let map = IMAP_SORT_S.clone(); - - b.iter(|| map.clone()); -} - -#[bench] -fn indexmap_clone_for_sort_u32(b: &mut Bencher) { - let map = IMAP_SORT_U32.clone(); - - b.iter(|| map.clone()); -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/benches/faststring.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/benches/faststring.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/benches/faststring.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/benches/faststring.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,185 +0,0 @@ -#![feature(test)] - -extern crate test; - -use test::Bencher; - -use indexmap::IndexMap; - -use std::collections::HashMap; - -use rand::rngs::SmallRng; -use rand::seq::SliceRandom; -use rand::SeedableRng; - -use std::hash::{Hash, Hasher}; - -use std::borrow::Borrow; -use std::ops::Deref; - -/// Use a consistently seeded Rng for benchmark stability -fn small_rng() -> SmallRng { - let seed = u64::from_le_bytes(*b"indexmap"); - SmallRng::seed_from_u64(seed) -} - -#[derive(PartialEq, Eq, Copy, Clone)] -#[repr(transparent)] -pub struct OneShot(pub T); - -impl Hash for OneShot { - fn hash(&self, h: &mut H) { - h.write(self.0.as_bytes()) - } -} - -impl<'a, S> From<&'a S> for &'a OneShot -where - S: AsRef, -{ - fn from(s: &'a S) -> Self { - let s: &str = s.as_ref(); - unsafe { &*(s as *const str as *const OneShot) } - } -} - -impl Hash for OneShot { - fn hash(&self, h: &mut H) { - h.write(self.0.as_bytes()) - } -} - -impl Borrow> for OneShot { - fn borrow(&self) -> &OneShot { - <&OneShot>::from(&self.0) - } -} - -impl Deref for OneShot { - type Target = T; - fn deref(&self) -> &T { - &self.0 - } -} - -fn shuffled_keys(iter: I) -> Vec -where - I: IntoIterator, -{ - let mut v = Vec::from_iter(iter); - let mut rng = small_rng(); - v.shuffle(&mut rng); - v -} - -#[bench] -fn insert_hashmap_string_10_000(b: &mut Bencher) { - let c = 10_000; - b.iter(|| { - let mut map = HashMap::with_capacity(c); - for x in 0..c { - map.insert(x.to_string(), ()); - } - map - }); -} - -#[bench] -fn insert_hashmap_string_oneshot_10_000(b: &mut Bencher) { - let c = 10_000; - b.iter(|| { - let mut map = HashMap::with_capacity(c); - for x in 0..c { - map.insert(OneShot(x.to_string()), ()); - } - map - }); -} - -#[bench] -fn insert_indexmap_string_10_000(b: &mut Bencher) { - let c = 10_000; - b.iter(|| { - let mut map = IndexMap::with_capacity(c); - for x in 0..c { - map.insert(x.to_string(), ()); - } - map - }); -} - -#[bench] -fn lookup_hashmap_10_000_exist_string(b: &mut Bencher) { - let c = 10_000; - let mut map = HashMap::with_capacity(c); - let keys = shuffled_keys(0..c); - for &key in &keys { - map.insert(key.to_string(), 1); - } - let lookups = (5000..c).map(|x| x.to_string()).collect::>(); - b.iter(|| { - let mut found = 0; - for key in &lookups { - found += map.get(key).is_some() as i32; - } - found - }); -} - -#[bench] -fn lookup_hashmap_10_000_exist_string_oneshot(b: &mut Bencher) { - let c = 10_000; - let mut map = HashMap::with_capacity(c); - let keys = shuffled_keys(0..c); - for &key in &keys { - map.insert(OneShot(key.to_string()), 1); - } - let lookups = (5000..c) - .map(|x| OneShot(x.to_string())) - .collect::>(); - b.iter(|| { - let mut found = 0; - for key in &lookups { - found += map.get(key).is_some() as i32; - } - found - }); -} - -#[bench] -fn lookup_indexmap_10_000_exist_string(b: &mut Bencher) { - let c = 10_000; - let mut map = IndexMap::with_capacity(c); - let keys = shuffled_keys(0..c); - for &key in &keys { - map.insert(key.to_string(), 1); - } - let lookups = (5000..c).map(|x| x.to_string()).collect::>(); - b.iter(|| { - let mut found = 0; - for key in &lookups { - found += map.get(key).is_some() as i32; - } - found - }); -} - -#[bench] -fn lookup_indexmap_10_000_exist_string_oneshot(b: &mut Bencher) { - let c = 10_000; - let mut map = IndexMap::with_capacity(c); - let keys = shuffled_keys(0..c); - for &key in &keys { - map.insert(OneShot(key.to_string()), 1); - } - let lookups = (5000..c) - .map(|x| OneShot(x.to_string())) - .collect::>(); - b.iter(|| { - let mut found = 0; - for key in &lookups { - found += map.get(key).is_some() as i32; - } - found - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/build.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/build.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/build.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,8 +0,0 @@ -fn main() { - // If "std" is explicitly requested, don't bother probing the target for it. - match std::env::var_os("CARGO_FEATURE_STD") { - Some(_) => autocfg::emit("has_std"), - None => autocfg::new().emit_sysroot_crate("std"), - } - autocfg::rerun_path("build.rs"); -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/Cargo.toml s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/Cargo.toml --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,108 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.56" -name = "indexmap" -version = "1.9.3" -description = "A hash table with consistent order and fast iteration." -documentation = "https://docs.rs/indexmap/" -readme = "README.md" -keywords = [ - "hashmap", - "no_std", -] -categories = [ - "data-structures", - "no-std", -] -license = "Apache-2.0 OR MIT" -repository = "https://github.com/bluss/indexmap" - -[package.metadata.release] -no-dev-version = true -tag-name = "{{version}}" - -[package.metadata.docs.rs] -features = [ - "arbitrary", - "quickcheck", - "serde-1", - "rayon", -] - -[profile.bench] -debug = true - -[lib] -bench = false - -[dependencies.arbitrary] -version = "1.0" -optional = true -default-features = false - -[dependencies.hashbrown] -version = "0.12" -features = ["raw"] -default-features = false - -[dependencies.quickcheck] -version = "1.0" -optional = true -default-features = false - -[dependencies.rayon] -version = "1.4.1" -optional = true - -[dependencies.rustc-rayon] -version = "0.5" -optional = true -package = "rustc-rayon" - -[dependencies.serde] -version = "1.0" -optional = true -default-features = false - -[dev-dependencies.fnv] -version = "1.0" - -[dev-dependencies.fxhash] -version = "0.2.1" - -[dev-dependencies.itertools] -version = "0.10" - -[dev-dependencies.lazy_static] -version = "1.3" - -[dev-dependencies.quickcheck] -version = "1.0" -default-features = false - -[dev-dependencies.rand] -version = "0.8" -features = ["small_rng"] - -[dev-dependencies.serde_derive] -version = "1.0" - -[build-dependencies.autocfg] -version = "1" - -[features] -serde-1 = ["serde"] -std = [] -test_debug = [] -test_low_transition_point = [] diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/LICENSE-MIT s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2016--2017 - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/README.md s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/README.md --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -# indexmap - -[![build status](https://github.com/bluss/indexmap/workflows/Continuous%20integration/badge.svg?branch=master)](https://github.com/bluss/indexmap/actions) -[![crates.io](https://img.shields.io/crates/v/indexmap.svg)](https://crates.io/crates/indexmap) -[![docs](https://docs.rs/indexmap/badge.svg)](https://docs.rs/indexmap) -[![rustc](https://img.shields.io/badge/rust-1.56%2B-orange.svg)](https://img.shields.io/badge/rust-1.56%2B-orange.svg) - -A pure-Rust hash table which preserves (in a limited sense) insertion order. - -This crate implements compact map and set data-structures, -where the iteration order of the keys is independent from their hash or -value. It preserves insertion order (except after removals), and it -allows lookup of entries by either hash table key or numerical index. - -Note: this crate was originally released under the name `ordermap`, -but it was renamed to `indexmap` to better reflect its features. - -# Background - -This was inspired by Python 3.6's new dict implementation (which remembers -the insertion order and is fast to iterate, and is compact in memory). - -Some of those features were translated to Rust, and some were not. The result -was indexmap, a hash table that has following properties: - -- Order is **independent of hash function** and hash values of keys. -- Fast to iterate. -- Indexed in compact space. -- Preserves insertion order **as long** as you don't call `.remove()`. -- Uses hashbrown for the inner table, just like Rust's libstd `HashMap` does. - -## Performance - -`IndexMap` derives a couple of performance facts directly from how it is constructed, -which is roughly: - -> A raw hash table of key-value indices, and a vector of key-value pairs. - -- Iteration is very fast since it is on the dense key-values. -- Removal is fast since it moves memory areas only in the table, - and uses a single swap in the vector. -- Lookup is fast-ish because the initial 7-bit hash lookup uses SIMD, and indices are - densely stored. Lookup also is slow-ish since the actual key-value pairs are stored - separately. (Visible when cpu caches size is limiting.) - -- In practice, `IndexMap` has been tested out as the hashmap in rustc in [PR45282] and - the performance was roughly on par across the whole workload. -- If you want the properties of `IndexMap`, or its strongest performance points - fits your workload, it might be the best hash table implementation. - -[PR45282]: https://github.com/rust-lang/rust/pull/45282 - -# Recent Changes - -See [RELEASES.md](https://github.com/bluss/indexmap/blob/master/RELEASES.md). diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/RELEASES.md s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/RELEASES.md --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/RELEASES.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/RELEASES.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,388 +0,0 @@ -- 1.9.3 - - - Bump the `rustc-rayon` dependency, for compiler use only. - -- 1.9.2 - - - `IndexMap` and `IndexSet` both implement `arbitrary::Arbitrary<'_>` and - `quickcheck::Arbitrary` if those optional dependency features are enabled. - -- 1.9.1 - - - The MSRV now allows Rust 1.56.0 as well. However, currently `hashbrown` - 0.12.1 requires 1.56.1, so users on 1.56.0 should downgrade that to 0.12.0 - until there is a later published version relaxing its requirement. - -- 1.9.0 - - - **MSRV**: Rust 1.56.1 or later is now required. - - - The `hashbrown` dependency has been updated to version 0.12. - - - `IterMut` and `ValuesMut` now implement `Debug`. - - - The new `IndexMap::shrink_to` and `IndexSet::shrink_to` methods shrink - the capacity with a lower bound. - - - The new `IndexMap::move_index` and `IndexSet::move_index` methods change - the position of an item from one index to another, shifting the items - between to accommodate the move. - -- 1.8.2 - - - Bump the `rustc-rayon` dependency, for compiler use only. - -- 1.8.1 - - - The new `IndexSet::replace_full` will return the index of the item along - with the replaced value, if any, by @zakcutner in PR [222]. - -[222]: https://github.com/bluss/indexmap/pull/222 - -- 1.8.0 - - - The new `IndexMap::into_keys` and `IndexMap::into_values` will consume - the map into keys or values, respectively, matching Rust 1.54's `HashMap` - methods, by @taiki-e in PR [195]. - - - More of the iterator types implement `Debug`, `ExactSizeIterator`, and - `FusedIterator`, by @cuviper in PR [196]. - - - `IndexMap` and `IndexSet` now implement rayon's `ParallelDrainRange`, - by @cuviper in PR [197]. - - - `IndexMap::with_hasher` and `IndexSet::with_hasher` are now `const` - functions, allowing static maps and sets, by @mwillsey in PR [203]. - - - `IndexMap` and `IndexSet` now implement `From` for arrays, matching - Rust 1.56's implementation for `HashMap`, by @rouge8 in PR [205]. - - - `IndexMap` and `IndexSet` now have methods `sort_unstable_keys`, - `sort_unstable_by`, `sorted_unstable_by`, and `par_*` equivalents, - which sort in-place without preserving the order of equal items, by - @bhgomes in PR [211]. - -[195]: https://github.com/bluss/indexmap/pull/195 -[196]: https://github.com/bluss/indexmap/pull/196 -[197]: https://github.com/bluss/indexmap/pull/197 -[203]: https://github.com/bluss/indexmap/pull/203 -[205]: https://github.com/bluss/indexmap/pull/205 -[211]: https://github.com/bluss/indexmap/pull/211 - -- 1.7.0 - - - **MSRV**: Rust 1.49 or later is now required. - - - The `hashbrown` dependency has been updated to version 0.11. - -- 1.6.2 - - - Fixed to match `std` behavior, `OccupiedEntry::key` now references the - existing key in the map instead of the lookup key, by @cuviper in PR [170]. - - - The new `Entry::or_insert_with_key` matches Rust 1.50's `Entry` method, - passing `&K` to the callback to create a value, by @cuviper in PR [175]. - -[170]: https://github.com/bluss/indexmap/pull/170 -[175]: https://github.com/bluss/indexmap/pull/175 - -- 1.6.1 - - - The new `serde_seq` module implements `IndexMap` serialization as a - sequence to ensure order is preserved, by @cuviper in PR [158]. - - - New methods on maps and sets work like the `Vec`/slice methods by the same name: - `truncate`, `split_off`, `first`, `first_mut`, `last`, `last_mut`, and - `swap_indices`, by @cuviper in PR [160]. - -[158]: https://github.com/bluss/indexmap/pull/158 -[160]: https://github.com/bluss/indexmap/pull/160 - -- 1.6.0 - - - **MSRV**: Rust 1.36 or later is now required. - - - The `hashbrown` dependency has been updated to version 0.9. - -- 1.5.2 - - - The new "std" feature will force the use of `std` for users that explicitly - want the default `S = RandomState`, bypassing the autodetection added in 1.3.0, - by @cuviper in PR [145]. - -[145]: https://github.com/bluss/indexmap/pull/145 - -- 1.5.1 - - - Values can now be indexed by their `usize` position by @cuviper in PR [132]. - - - Some of the generic bounds have been relaxed to match `std` by @cuviper in PR [141]. - - - `drain` now accepts any `R: RangeBounds` by @cuviper in PR [142]. - -[132]: https://github.com/bluss/indexmap/pull/132 -[141]: https://github.com/bluss/indexmap/pull/141 -[142]: https://github.com/bluss/indexmap/pull/142 - -- 1.5.0 - - - **MSRV**: Rust 1.32 or later is now required. - - - The inner hash table is now based on `hashbrown` by @cuviper in PR [131]. - This also completes the method `reserve` and adds `shrink_to_fit`. - - - Add new methods `get_key_value`, `remove_entry`, `swap_remove_entry`, - and `shift_remove_entry`, by @cuviper in PR [136] - - - `Clone::clone_from` reuses allocations by @cuviper in PR [125] - - - Add new method `reverse` by @linclelinkpart5 in PR [128] - -[125]: https://github.com/bluss/indexmap/pull/125 -[128]: https://github.com/bluss/indexmap/pull/128 -[131]: https://github.com/bluss/indexmap/pull/131 -[136]: https://github.com/bluss/indexmap/pull/136 - -- 1.4.0 - - - Add new method `get_index_of` by @Thermatrix in PR [115] and [120] - - - Fix build script rebuild-if-changed configuration to use "build.rs"; - fixes issue [123]. Fix by @cuviper. - - - Dev-dependencies (rand and quickcheck) have been updated. The crate's tests - now run using Rust 1.32 or later (MSRV for building the crate has not changed). - by @kjeremy and @bluss - -[123]: https://github.com/bluss/indexmap/issues/123 -[115]: https://github.com/bluss/indexmap/pull/115 -[120]: https://github.com/bluss/indexmap/pull/120 - -- 1.3.2 - - - Maintenance update to regenerate the published `Cargo.toml`. - -- 1.3.1 - - - Maintenance update for formatting and `autocfg` 1.0. - -- 1.3.0 - - - The deprecation messages in the previous version have been removed. - (The methods have not otherwise changed.) Docs for removal methods have been - improved. - - From Rust 1.36, this crate supports being built **without std**, requiring - `alloc` instead. This is enabled automatically when it is detected that - `std` is not available. There is no crate feature to enable/disable to - trigger this. The new build-dep `autocfg` enables this. - -- 1.2.0 - - - Plain `.remove()` now has a deprecation message, it informs the user - about picking one of the removal functions `swap_remove` and `shift_remove` - which have different performance and order semantics. - Plain `.remove()` will not be removed, the warning message and method - will remain until further. - - - Add new method `shift_remove` for order preserving removal on the map, - and `shift_take` for the corresponding operation on the set. - - - Add methods `swap_remove`, `swap_remove_entry` to `Entry`. - - - Fix indexset/indexmap to support full paths, like `indexmap::indexmap!()` - - - Internal improvements: fix warnings, deprecations and style lints - -- 1.1.0 - - - Added optional feature `"rayon"` that adds parallel iterator support - to `IndexMap` and `IndexSet` using Rayon. This includes all the regular - iterators in parallel versions, and parallel sort. - - - Implemented `Clone` for `map::{Iter, Keys, Values}` and - `set::{Difference, Intersection, Iter, SymmetricDifference, Union}` - - - Implemented `Debug` for `map::{Entry, IntoIter, Iter, Keys, Values}` and - `set::{Difference, Intersection, IntoIter, Iter, SymmetricDifference, Union}` - - - Serde trait `IntoDeserializer` are implemented for `IndexMap` and `IndexSet`. - - - Minimum Rust version requirement increased to Rust 1.30 for development builds. - -- 1.0.2 - - - The new methods `IndexMap::insert_full` and `IndexSet::insert_full` are - both like `insert` with the index included in the return value. - - - The new method `Entry::and_modify` can be used to modify occupied - entries, matching the new methods of `std` maps in Rust 1.26. - - - The new method `Entry::or_default` inserts a default value in unoccupied - entries, matching the new methods of `std` maps in Rust 1.28. - -- 1.0.1 - - - Document Rust version policy for the crate (see rustdoc) - -- 1.0.0 - - - This is the 1.0 release for `indexmap`! (the crate and datastructure - formerly known as “ordermapâ€) - - `OccupiedEntry::insert` changed its signature, to use `&mut self` for - the method receiver, matching the equivalent method for a standard - `HashMap`. Thanks to @dtolnay for finding this bug. - - The deprecated old names from ordermap were removed: `OrderMap`, - `OrderSet`, `ordermap!{}`, `orderset!{}`. Use the new `IndexMap` - etc names instead. - -- 0.4.1 - - - Renamed crate to `indexmap`; the `ordermap` crate is now deprecated - and the types `OrderMap/Set` now have a deprecation notice. - -- 0.4.0 - - - This is the last release series for this `ordermap` under that name, - because the crate is **going to be renamed** to `indexmap` (with types - `IndexMap`, `IndexSet`) and no change in functionality! - - The map and its associated structs moved into the `map` submodule of the - crate, so that the map and set are symmetric - - + The iterators, `Entry` and other structs are now under `ordermap::map::` - - - Internally refactored `OrderMap` so that all the main algorithms - (insertion, lookup, removal etc) that don't use the `S` parameter (the - hasher) are compiled without depending on `S`, which reduces generics bloat. - - - `Entry` no longer has a type parameter `S`, which is just like - the standard `HashMap`'s entry. - - - Minimum Rust version requirement increased to Rust 1.18 - -- 0.3.5 - - - Documentation improvements - -- 0.3.4 - - - The `.retain()` methods for `OrderMap` and `OrderSet` now - traverse the elements in order, and the retained elements **keep their order** - - Added new methods `.sort_by()`, `.sort_keys()` to `OrderMap` and - `.sort_by()`, `.sort()` to `OrderSet`. These methods allow you to - sort the maps in place efficiently. - -- 0.3.3 - - - Document insertion behaviour better by @lucab - - Updated dependences (no feature changes) by @ignatenkobrain - -- 0.3.2 - - - Add `OrderSet` by @cuviper! - - `OrderMap::drain` is now (too) a double ended iterator. - -- 0.3.1 - - - In all ordermap iterators, forward the `collect` method to the underlying - iterator as well. - - Add crates.io categories. - -- 0.3.0 - - - The methods `get_pair`, `get_pair_index` were both replaced by - `get_full` (and the same for the mutable case). - - Method `swap_remove_pair` replaced by `swap_remove_full`. - - Add trait `MutableKeys` for opt-in mutable key access. Mutable key access - is only possible through the methods of this extension trait. - - Add new trait `Equivalent` for key equivalence. This extends the - `Borrow` trait mechanism for `OrderMap::get` in a backwards compatible - way, just some minor type inference related issues may become apparent. - See [#10] for more information. - - Implement `Extend<(&K, &V)>` by @xfix. - -[#10]: https://github.com/bluss/ordermap/pull/10 - -- 0.2.13 - - - Fix deserialization to support custom hashers by @Techcable. - - Add methods `.index()` on the entry types by @garro95. - -- 0.2.12 - - - Add methods `.with_hasher()`, `.hasher()`. - -- 0.2.11 - - - Support `ExactSizeIterator` for the iterators. By @Binero. - - Use `Box<[Pos]>` internally, saving a word in the `OrderMap` struct. - - Serde support, with crate feature `"serde-1"`. By @xfix. - -- 0.2.10 - - - Add iterator `.drain(..)` by @stevej. - -- 0.2.9 - - - Add method `.is_empty()` by @overvenus. - - Implement `PartialEq, Eq` by @overvenus. - - Add method `.sorted_by()`. - -- 0.2.8 - - - Add iterators `.values()` and `.values_mut()`. - - Fix compatibility with 32-bit platforms. - -- 0.2.7 - - - Add `.retain()`. - -- 0.2.6 - - - Add `OccupiedEntry::remove_entry` and other minor entry methods, - so that it now has all the features of `HashMap`'s entries. - -- 0.2.5 - - - Improved `.pop()` slightly. - -- 0.2.4 - - - Improved performance of `.insert()` ([#3]) by @pczarn. - -[#3]: https://github.com/bluss/ordermap/pull/3 - -- 0.2.3 - - - Generalize `Entry` for now, so that it works on hashmaps with non-default - hasher. However, there's a lingering compat issue since libstd `HashMap` - does not parameterize its entries by the hasher (`S` typarm). - - Special case some iterator methods like `.nth()`. - -- 0.2.2 - - - Disable the verbose `Debug` impl by default. - -- 0.2.1 - - - Fix doc links and clarify docs. - -- 0.2.0 - - - Add more `HashMap` methods & compat with its API. - - Experimental support for `.entry()` (the simplest parts of the API). - - Add `.reserve()` (placeholder impl). - - Add `.remove()` as synonym for `.swap_remove()`. - - Changed `.insert()` to swap value if the entry already exists, and - return `Option`. - - Experimental support as an *indexed* hash map! Added methods - `.get_index()`, `.get_index_mut()`, `.swap_remove_index()`, - `.get_pair_index()`, `.get_pair_index_mut()`. - -- 0.1.2 - - - Implement the 32/32 split idea for `Pos` which improves cache utilization - and lookup performance. - -- 0.1.1 - - - Initial release. diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/arbitrary.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/arbitrary.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/arbitrary.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/arbitrary.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -#[cfg(feature = "arbitrary")] -mod impl_arbitrary { - use crate::{IndexMap, IndexSet}; - use arbitrary::{Arbitrary, Result, Unstructured}; - use core::hash::{BuildHasher, Hash}; - - impl<'a, K, V, S> Arbitrary<'a> for IndexMap - where - K: Arbitrary<'a> + Hash + Eq, - V: Arbitrary<'a>, - S: BuildHasher + Default, - { - fn arbitrary(u: &mut Unstructured<'a>) -> Result { - u.arbitrary_iter()?.collect() - } - - fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { - u.arbitrary_take_rest_iter()?.collect() - } - } - - impl<'a, T, S> Arbitrary<'a> for IndexSet - where - T: Arbitrary<'a> + Hash + Eq, - S: BuildHasher + Default, - { - fn arbitrary(u: &mut Unstructured<'a>) -> Result { - u.arbitrary_iter()?.collect() - } - - fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { - u.arbitrary_take_rest_iter()?.collect() - } - } -} - -#[cfg(feature = "quickcheck")] -mod impl_quickcheck { - use crate::{IndexMap, IndexSet}; - use alloc::boxed::Box; - use alloc::vec::Vec; - use core::hash::{BuildHasher, Hash}; - use quickcheck::{Arbitrary, Gen}; - - impl Arbitrary for IndexMap - where - K: Arbitrary + Hash + Eq, - V: Arbitrary, - S: BuildHasher + Default + Clone + 'static, - { - fn arbitrary(g: &mut Gen) -> Self { - Self::from_iter(Vec::arbitrary(g)) - } - - fn shrink(&self) -> Box> { - let vec = Vec::from_iter(self.clone()); - Box::new(vec.shrink().map(Self::from_iter)) - } - } - - impl Arbitrary for IndexSet - where - T: Arbitrary + Hash + Eq, - S: BuildHasher + Default + Clone + 'static, - { - fn arbitrary(g: &mut Gen) -> Self { - Self::from_iter(Vec::arbitrary(g)) - } - - fn shrink(&self) -> Box> { - let vec = Vec::from_iter(self.clone()); - Box::new(vec.shrink().map(Self::from_iter)) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/equivalent.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/equivalent.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/equivalent.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/equivalent.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -use core::borrow::Borrow; - -/// Key equivalence trait. -/// -/// This trait allows hash table lookup to be customized. -/// It has one blanket implementation that uses the regular `Borrow` solution, -/// just like `HashMap` and `BTreeMap` do, so that you can pass `&str` to lookup -/// into a map with `String` keys and so on. -/// -/// # Contract -/// -/// The implementor **must** hash like `K`, if it is hashable. -pub trait Equivalent { - /// Compare self to `key` and return `true` if they are equal. - fn equivalent(&self, key: &K) -> bool; -} - -impl Equivalent for Q -where - Q: Eq, - K: Borrow, -{ - #[inline] - fn equivalent(&self, key: &K) -> bool { - *self == *key.borrow() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/lib.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/lib.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,194 +0,0 @@ -// We *mostly* avoid unsafe code, but `map::core::raw` allows it to use `RawTable` buckets. -#![deny(unsafe_code)] -#![warn(rust_2018_idioms)] -#![doc(html_root_url = "https://docs.rs/indexmap/1/")] -#![no_std] - -//! [`IndexMap`] is a hash table where the iteration order of the key-value -//! pairs is independent of the hash values of the keys. -//! -//! [`IndexSet`] is a corresponding hash set using the same implementation and -//! with similar properties. -//! -//! [`IndexMap`]: map/struct.IndexMap.html -//! [`IndexSet`]: set/struct.IndexSet.html -//! -//! -//! ### Feature Highlights -//! -//! [`IndexMap`] and [`IndexSet`] are drop-in compatible with the std `HashMap` -//! and `HashSet`, but they also have some features of note: -//! -//! - The ordering semantics (see their documentation for details) -//! - Sorting methods and the [`.pop()`][IndexMap::pop] methods. -//! - The [`Equivalent`] trait, which offers more flexible equality definitions -//! between borrowed and owned versions of keys. -//! - The [`MutableKeys`][map::MutableKeys] trait, which gives opt-in mutable -//! access to hash map keys. -//! -//! ### Alternate Hashers -//! -//! [`IndexMap`] and [`IndexSet`] have a default hasher type `S = RandomState`, -//! just like the standard `HashMap` and `HashSet`, which is resistant to -//! HashDoS attacks but not the most performant. Type aliases can make it easier -//! to use alternate hashers: -//! -//! ``` -//! use fnv::FnvBuildHasher; -//! use fxhash::FxBuildHasher; -//! use indexmap::{IndexMap, IndexSet}; -//! -//! type FnvIndexMap = IndexMap; -//! type FnvIndexSet = IndexSet; -//! -//! type FxIndexMap = IndexMap; -//! type FxIndexSet = IndexSet; -//! -//! let std: IndexSet = (0..100).collect(); -//! let fnv: FnvIndexSet = (0..100).collect(); -//! let fx: FxIndexSet = (0..100).collect(); -//! assert_eq!(std, fnv); -//! assert_eq!(std, fx); -//! ``` -//! -//! ### Rust Version -//! -//! This version of indexmap requires Rust 1.56 or later. -//! -//! The indexmap 1.x release series will use a carefully considered version -//! upgrade policy, where in a later 1.x version, we will raise the minimum -//! required Rust version. -//! -//! ## No Standard Library Targets -//! -//! This crate supports being built without `std`, requiring -//! `alloc` instead. This is enabled automatically when it is detected that -//! `std` is not available. There is no crate feature to enable/disable to -//! trigger this. It can be tested by building for a std-less target. -//! -//! - Creating maps and sets using [`new`][IndexMap::new] and -//! [`with_capacity`][IndexMap::with_capacity] is unavailable without `std`. -//! Use methods [`IndexMap::default`][def], -//! [`with_hasher`][IndexMap::with_hasher], -//! [`with_capacity_and_hasher`][IndexMap::with_capacity_and_hasher] instead. -//! A no-std compatible hasher will be needed as well, for example -//! from the crate `twox-hash`. -//! - Macros [`indexmap!`] and [`indexset!`] are unavailable without `std`. -//! -//! [def]: map/struct.IndexMap.html#impl-Default - -extern crate alloc; - -#[cfg(has_std)] -#[macro_use] -extern crate std; - -use alloc::vec::{self, Vec}; - -mod arbitrary; -#[macro_use] -mod macros; -mod equivalent; -mod mutable_keys; -#[cfg(feature = "serde")] -mod serde; -#[cfg(feature = "serde")] -pub mod serde_seq; -mod util; - -pub mod map; -pub mod set; - -// Placed after `map` and `set` so new `rayon` methods on the types -// are documented after the "normal" methods. -#[cfg(feature = "rayon")] -mod rayon; - -#[cfg(feature = "rustc-rayon")] -mod rustc; - -pub use crate::equivalent::Equivalent; -pub use crate::map::IndexMap; -pub use crate::set::IndexSet; - -// shared private items - -/// Hash value newtype. Not larger than usize, since anything larger -/// isn't used for selecting position anyway. -#[derive(Clone, Copy, Debug, PartialEq)] -struct HashValue(usize); - -impl HashValue { - #[inline(always)] - fn get(self) -> u64 { - self.0 as u64 - } -} - -#[derive(Copy, Debug)] -struct Bucket { - hash: HashValue, - key: K, - value: V, -} - -impl Clone for Bucket -where - K: Clone, - V: Clone, -{ - fn clone(&self) -> Self { - Bucket { - hash: self.hash, - key: self.key.clone(), - value: self.value.clone(), - } - } - - fn clone_from(&mut self, other: &Self) { - self.hash = other.hash; - self.key.clone_from(&other.key); - self.value.clone_from(&other.value); - } -} - -impl Bucket { - // field accessors -- used for `f` instead of closures in `.map(f)` - fn key_ref(&self) -> &K { - &self.key - } - fn value_ref(&self) -> &V { - &self.value - } - fn value_mut(&mut self) -> &mut V { - &mut self.value - } - fn key(self) -> K { - self.key - } - fn value(self) -> V { - self.value - } - fn key_value(self) -> (K, V) { - (self.key, self.value) - } - fn refs(&self) -> (&K, &V) { - (&self.key, &self.value) - } - fn ref_mut(&mut self) -> (&K, &mut V) { - (&self.key, &mut self.value) - } - fn muts(&mut self) -> (&mut K, &mut V) { - (&mut self.key, &mut self.value) - } -} - -trait Entries { - type Entry; - fn into_entries(self) -> Vec; - fn as_entries(&self) -> &[Self::Entry]; - fn as_entries_mut(&mut self) -> &mut [Self::Entry]; - fn with_entries(&mut self, f: F) - where - F: FnOnce(&mut [Self::Entry]); -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/macros.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/macros.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/macros.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/macros.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,178 +0,0 @@ -#[cfg(has_std)] -#[macro_export] -/// Create an `IndexMap` from a list of key-value pairs -/// -/// ## Example -/// -/// ``` -/// use indexmap::indexmap; -/// -/// let map = indexmap!{ -/// "a" => 1, -/// "b" => 2, -/// }; -/// assert_eq!(map["a"], 1); -/// assert_eq!(map["b"], 2); -/// assert_eq!(map.get("c"), None); -/// -/// // "a" is the first key -/// assert_eq!(map.keys().next(), Some(&"a")); -/// ``` -macro_rules! indexmap { - (@single $($x:tt)*) => (()); - (@count $($rest:expr),*) => (<[()]>::len(&[$($crate::indexmap!(@single $rest)),*])); - - ($($key:expr => $value:expr,)+) => { $crate::indexmap!($($key => $value),+) }; - ($($key:expr => $value:expr),*) => { - { - let _cap = $crate::indexmap!(@count $($key),*); - let mut _map = $crate::IndexMap::with_capacity(_cap); - $( - _map.insert($key, $value); - )* - _map - } - }; -} - -#[cfg(has_std)] -#[macro_export] -/// Create an `IndexSet` from a list of values -/// -/// ## Example -/// -/// ``` -/// use indexmap::indexset; -/// -/// let set = indexset!{ -/// "a", -/// "b", -/// }; -/// assert!(set.contains("a")); -/// assert!(set.contains("b")); -/// assert!(!set.contains("c")); -/// -/// // "a" is the first value -/// assert_eq!(set.iter().next(), Some(&"a")); -/// ``` -macro_rules! indexset { - (@single $($x:tt)*) => (()); - (@count $($rest:expr),*) => (<[()]>::len(&[$($crate::indexset!(@single $rest)),*])); - - ($($value:expr,)+) => { $crate::indexset!($($value),+) }; - ($($value:expr),*) => { - { - let _cap = $crate::indexset!(@count $($value),*); - let mut _set = $crate::IndexSet::with_capacity(_cap); - $( - _set.insert($value); - )* - _set - } - }; -} - -// generate all the Iterator methods by just forwarding to the underlying -// self.iter and mapping its element. -macro_rules! iterator_methods { - // $map_elt is the mapping function from the underlying iterator's element - // same mapping function for both options and iterators - ($map_elt:expr) => { - fn next(&mut self) -> Option { - self.iter.next().map($map_elt) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn count(self) -> usize { - self.iter.len() - } - - fn nth(&mut self, n: usize) -> Option { - self.iter.nth(n).map($map_elt) - } - - fn last(mut self) -> Option { - self.next_back() - } - - fn collect(self) -> C - where - C: FromIterator, - { - // NB: forwarding this directly to standard iterators will - // allow it to leverage unstable traits like `TrustedLen`. - self.iter.map($map_elt).collect() - } - }; -} - -macro_rules! double_ended_iterator_methods { - // $map_elt is the mapping function from the underlying iterator's element - // same mapping function for both options and iterators - ($map_elt:expr) => { - fn next_back(&mut self) -> Option { - self.iter.next_back().map($map_elt) - } - - fn nth_back(&mut self, n: usize) -> Option { - self.iter.nth_back(n).map($map_elt) - } - }; -} - -// generate `ParallelIterator` methods by just forwarding to the underlying -// self.entries and mapping its elements. -#[cfg(any(feature = "rayon", feature = "rustc-rayon"))] -macro_rules! parallel_iterator_methods { - // $map_elt is the mapping function from the underlying iterator's element - ($map_elt:expr) => { - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - self.entries - .into_par_iter() - .map($map_elt) - .drive_unindexed(consumer) - } - - // NB: This allows indexed collection, e.g. directly into a `Vec`, but the - // underlying iterator must really be indexed. We should remove this if we - // start having tombstones that must be filtered out. - fn opt_len(&self) -> Option { - Some(self.entries.len()) - } - }; -} - -// generate `IndexedParallelIterator` methods by just forwarding to the underlying -// self.entries and mapping its elements. -#[cfg(any(feature = "rayon", feature = "rustc-rayon"))] -macro_rules! indexed_parallel_iterator_methods { - // $map_elt is the mapping function from the underlying iterator's element - ($map_elt:expr) => { - fn drive(self, consumer: C) -> C::Result - where - C: Consumer, - { - self.entries.into_par_iter().map($map_elt).drive(consumer) - } - - fn len(&self) -> usize { - self.entries.len() - } - - fn with_producer(self, callback: CB) -> CB::Output - where - CB: ProducerCallback, - { - self.entries - .into_par_iter() - .map($map_elt) - .with_producer(callback) - } - }; -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/map/core/raw.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/map/core/raw.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/map/core/raw.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/map/core/raw.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,191 +0,0 @@ -#![allow(unsafe_code)] -//! This module encapsulates the `unsafe` access to `hashbrown::raw::RawTable`, -//! mostly in dealing with its bucket "pointers". - -use super::{equivalent, Bucket, Entry, HashValue, IndexMapCore, VacantEntry}; -use core::fmt; -use core::mem::replace; -use hashbrown::raw::RawTable; - -type RawBucket = hashbrown::raw::Bucket; - -/// Inserts many entries into a raw table without reallocating. -/// -/// ***Panics*** if there is not sufficient capacity already. -pub(super) fn insert_bulk_no_grow(indices: &mut RawTable, entries: &[Bucket]) { - assert!(indices.capacity() - indices.len() >= entries.len()); - for entry in entries { - // SAFETY: we asserted that sufficient capacity exists for all entries. - unsafe { - indices.insert_no_grow(entry.hash.get(), indices.len()); - } - } -} - -pub(super) struct DebugIndices<'a>(pub &'a RawTable); -impl fmt::Debug for DebugIndices<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // SAFETY: we're not letting any of the buckets escape this function - let indices = unsafe { self.0.iter().map(|raw_bucket| raw_bucket.read()) }; - f.debug_list().entries(indices).finish() - } -} - -impl IndexMapCore { - /// Sweep the whole table to erase indices start..end - pub(super) fn erase_indices_sweep(&mut self, start: usize, end: usize) { - // SAFETY: we're not letting any of the buckets escape this function - unsafe { - let offset = end - start; - for bucket in self.indices.iter() { - let i = bucket.read(); - if i >= end { - bucket.write(i - offset); - } else if i >= start { - self.indices.erase(bucket); - } - } - } - } - - pub(crate) fn entry(&mut self, hash: HashValue, key: K) -> Entry<'_, K, V> - where - K: Eq, - { - let eq = equivalent(&key, &self.entries); - match self.indices.find(hash.get(), eq) { - // SAFETY: The entry is created with a live raw bucket, at the same time - // we have a &mut reference to the map, so it can not be modified further. - Some(raw_bucket) => Entry::Occupied(OccupiedEntry { - map: self, - raw_bucket, - key, - }), - None => Entry::Vacant(VacantEntry { - map: self, - hash, - key, - }), - } - } - - pub(super) fn indices_mut(&mut self) -> impl Iterator { - // SAFETY: we're not letting any of the buckets escape this function, - // only the item references that are appropriately bound to `&mut self`. - unsafe { self.indices.iter().map(|bucket| bucket.as_mut()) } - } - - /// Return the raw bucket for the given index - fn find_index(&self, index: usize) -> RawBucket { - // We'll get a "nice" bounds-check from indexing `self.entries`, - // and then we expect to find it in the table as well. - let hash = self.entries[index].hash.get(); - self.indices - .find(hash, move |&i| i == index) - .expect("index not found") - } - - pub(crate) fn swap_indices(&mut self, a: usize, b: usize) { - // SAFETY: Can't take two `get_mut` references from one table, so we - // must use raw buckets to do the swap. This is still safe because we - // are locally sure they won't dangle, and we write them individually. - unsafe { - let raw_bucket_a = self.find_index(a); - let raw_bucket_b = self.find_index(b); - raw_bucket_a.write(b); - raw_bucket_b.write(a); - } - self.entries.swap(a, b); - } -} - -/// A view into an occupied entry in a `IndexMap`. -/// It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -// SAFETY: The lifetime of the map reference also constrains the raw bucket, -// which is essentially a raw pointer into the map indices. -pub struct OccupiedEntry<'a, K, V> { - map: &'a mut IndexMapCore, - raw_bucket: RawBucket, - key: K, -} - -// `hashbrown::raw::Bucket` is only `Send`, not `Sync`. -// SAFETY: `&self` only accesses the bucket to read it. -unsafe impl Sync for OccupiedEntry<'_, K, V> {} - -// The parent module also adds methods that don't threaten the unsafe encapsulation. -impl<'a, K, V> OccupiedEntry<'a, K, V> { - /// Gets a reference to the entry's key in the map. - /// - /// Note that this is not the key that was used to find the entry. There may be an observable - /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like - /// extra fields or the memory address of an allocation. - pub fn key(&self) -> &K { - &self.map.entries[self.index()].key - } - - /// Gets a reference to the entry's value in the map. - pub fn get(&self) -> &V { - &self.map.entries[self.index()].value - } - - /// Gets a mutable reference to the entry's value in the map. - /// - /// If you need a reference which may outlive the destruction of the - /// `Entry` value, see `into_mut`. - pub fn get_mut(&mut self) -> &mut V { - let index = self.index(); - &mut self.map.entries[index].value - } - - /// Put the new key in the occupied entry's key slot - pub(crate) fn replace_key(self) -> K { - let index = self.index(); - let old_key = &mut self.map.entries[index].key; - replace(old_key, self.key) - } - - /// Return the index of the key-value pair - #[inline] - pub fn index(&self) -> usize { - // SAFETY: we have &mut map keep keeping the bucket stable - unsafe { self.raw_bucket.read() } - } - - /// Converts into a mutable reference to the entry's value in the map, - /// with a lifetime bound to the map itself. - pub fn into_mut(self) -> &'a mut V { - let index = self.index(); - &mut self.map.entries[index].value - } - - /// Remove and return the key, value pair stored in the map for this entry - /// - /// Like `Vec::swap_remove`, the pair is removed by swapping it with the - /// last element of the map and popping it off. **This perturbs - /// the position of what used to be the last element!** - /// - /// Computes in **O(1)** time (average). - pub fn swap_remove_entry(self) -> (K, V) { - // SAFETY: This is safe because it can only happen once (self is consumed) - // and map.indices have not been modified since entry construction - let index = unsafe { self.map.indices.remove(self.raw_bucket) }; - self.map.swap_remove_finish(index) - } - - /// Remove and return the key, value pair stored in the map for this entry - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove_entry(self) -> (K, V) { - // SAFETY: This is safe because it can only happen once (self is consumed) - // and map.indices have not been modified since entry construction - let index = unsafe { self.map.indices.remove(self.raw_bucket) }; - self.map.shift_remove_finish(index) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/map/core.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/map/core.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/map/core.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/map/core.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,700 +0,0 @@ -//! This is the core implementation that doesn't depend on the hasher at all. -//! -//! The methods of `IndexMapCore` don't use any Hash properties of K. -//! -//! It's cleaner to separate them out, then the compiler checks that we are not -//! using Hash at all in these methods. -//! -//! However, we should probably not let this show in the public API or docs. - -mod raw; - -use hashbrown::raw::RawTable; - -use crate::vec::{Drain, Vec}; -use core::cmp; -use core::fmt; -use core::mem::replace; -use core::ops::RangeBounds; - -use crate::equivalent::Equivalent; -use crate::util::simplify_range; -use crate::{Bucket, Entries, HashValue}; - -/// Core of the map that does not depend on S -pub(crate) struct IndexMapCore { - /// indices mapping from the entry hash to its index. - indices: RawTable, - /// entries is a dense vec of entries in their order. - entries: Vec>, -} - -#[inline(always)] -fn get_hash(entries: &[Bucket]) -> impl Fn(&usize) -> u64 + '_ { - move |&i| entries[i].hash.get() -} - -#[inline] -fn equivalent<'a, K, V, Q: ?Sized + Equivalent>( - key: &'a Q, - entries: &'a [Bucket], -) -> impl Fn(&usize) -> bool + 'a { - move |&i| Q::equivalent(key, &entries[i].key) -} - -#[inline] -fn erase_index(table: &mut RawTable, hash: HashValue, index: usize) { - let erased = table.erase_entry(hash.get(), move |&i| i == index); - debug_assert!(erased); -} - -#[inline] -fn update_index(table: &mut RawTable, hash: HashValue, old: usize, new: usize) { - let index = table - .get_mut(hash.get(), move |&i| i == old) - .expect("index not found"); - *index = new; -} - -impl Clone for IndexMapCore -where - K: Clone, - V: Clone, -{ - fn clone(&self) -> Self { - let indices = self.indices.clone(); - let mut entries = Vec::with_capacity(indices.capacity()); - entries.clone_from(&self.entries); - IndexMapCore { indices, entries } - } - - fn clone_from(&mut self, other: &Self) { - let hasher = get_hash(&other.entries); - self.indices.clone_from_with_hasher(&other.indices, hasher); - if self.entries.capacity() < other.entries.len() { - // If we must resize, match the indices capacity - self.reserve_entries(); - } - self.entries.clone_from(&other.entries); - } -} - -impl fmt::Debug for IndexMapCore -where - K: fmt::Debug, - V: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("IndexMapCore") - .field("indices", &raw::DebugIndices(&self.indices)) - .field("entries", &self.entries) - .finish() - } -} - -impl Entries for IndexMapCore { - type Entry = Bucket; - - #[inline] - fn into_entries(self) -> Vec { - self.entries - } - - #[inline] - fn as_entries(&self) -> &[Self::Entry] { - &self.entries - } - - #[inline] - fn as_entries_mut(&mut self) -> &mut [Self::Entry] { - &mut self.entries - } - - fn with_entries(&mut self, f: F) - where - F: FnOnce(&mut [Self::Entry]), - { - f(&mut self.entries); - self.rebuild_hash_table(); - } -} - -impl IndexMapCore { - #[inline] - pub(crate) const fn new() -> Self { - IndexMapCore { - indices: RawTable::new(), - entries: Vec::new(), - } - } - - #[inline] - pub(crate) fn with_capacity(n: usize) -> Self { - IndexMapCore { - indices: RawTable::with_capacity(n), - entries: Vec::with_capacity(n), - } - } - - #[inline] - pub(crate) fn len(&self) -> usize { - self.indices.len() - } - - #[inline] - pub(crate) fn capacity(&self) -> usize { - cmp::min(self.indices.capacity(), self.entries.capacity()) - } - - pub(crate) fn clear(&mut self) { - self.indices.clear(); - self.entries.clear(); - } - - pub(crate) fn truncate(&mut self, len: usize) { - if len < self.len() { - self.erase_indices(len, self.entries.len()); - self.entries.truncate(len); - } - } - - pub(crate) fn drain(&mut self, range: R) -> Drain<'_, Bucket> - where - R: RangeBounds, - { - let range = simplify_range(range, self.entries.len()); - self.erase_indices(range.start, range.end); - self.entries.drain(range) - } - - #[cfg(feature = "rayon")] - pub(crate) fn par_drain(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket> - where - K: Send, - V: Send, - R: RangeBounds, - { - use rayon::iter::ParallelDrainRange; - let range = simplify_range(range, self.entries.len()); - self.erase_indices(range.start, range.end); - self.entries.par_drain(range) - } - - pub(crate) fn split_off(&mut self, at: usize) -> Self { - assert!(at <= self.entries.len()); - self.erase_indices(at, self.entries.len()); - let entries = self.entries.split_off(at); - - let mut indices = RawTable::with_capacity(entries.len()); - raw::insert_bulk_no_grow(&mut indices, &entries); - Self { indices, entries } - } - - /// Reserve capacity for `additional` more key-value pairs. - pub(crate) fn reserve(&mut self, additional: usize) { - self.indices.reserve(additional, get_hash(&self.entries)); - self.reserve_entries(); - } - - /// Reserve entries capacity to match the indices - fn reserve_entries(&mut self) { - let additional = self.indices.capacity() - self.entries.len(); - self.entries.reserve_exact(additional); - } - - /// Shrink the capacity of the map with a lower bound - pub(crate) fn shrink_to(&mut self, min_capacity: usize) { - self.indices - .shrink_to(min_capacity, get_hash(&self.entries)); - self.entries.shrink_to(min_capacity); - } - - /// Remove the last key-value pair - pub(crate) fn pop(&mut self) -> Option<(K, V)> { - if let Some(entry) = self.entries.pop() { - let last = self.entries.len(); - erase_index(&mut self.indices, entry.hash, last); - Some((entry.key, entry.value)) - } else { - None - } - } - - /// Append a key-value pair, *without* checking whether it already exists, - /// and return the pair's new index. - fn push(&mut self, hash: HashValue, key: K, value: V) -> usize { - let i = self.entries.len(); - self.indices.insert(hash.get(), i, get_hash(&self.entries)); - if i == self.entries.capacity() { - // Reserve our own capacity synced to the indices, - // rather than letting `Vec::push` just double it. - self.reserve_entries(); - } - self.entries.push(Bucket { hash, key, value }); - i - } - - /// Return the index in `entries` where an equivalent key can be found - pub(crate) fn get_index_of(&self, hash: HashValue, key: &Q) -> Option - where - Q: ?Sized + Equivalent, - { - let eq = equivalent(key, &self.entries); - self.indices.get(hash.get(), eq).copied() - } - - pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option) - where - K: Eq, - { - match self.get_index_of(hash, &key) { - Some(i) => (i, Some(replace(&mut self.entries[i].value, value))), - None => (self.push(hash, key, value), None), - } - } - - /// Remove an entry by shifting all entries that follow it - pub(crate) fn shift_remove_full(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> - where - Q: ?Sized + Equivalent, - { - let eq = equivalent(key, &self.entries); - match self.indices.remove_entry(hash.get(), eq) { - Some(index) => { - let (key, value) = self.shift_remove_finish(index); - Some((index, key, value)) - } - None => None, - } - } - - /// Remove an entry by shifting all entries that follow it - pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { - match self.entries.get(index) { - Some(entry) => { - erase_index(&mut self.indices, entry.hash, index); - Some(self.shift_remove_finish(index)) - } - None => None, - } - } - - /// Remove an entry by shifting all entries that follow it - /// - /// The index should already be removed from `self.indices`. - fn shift_remove_finish(&mut self, index: usize) -> (K, V) { - // Correct indices that point to the entries that followed the removed entry. - self.decrement_indices(index + 1, self.entries.len()); - - // Use Vec::remove to actually remove the entry. - let entry = self.entries.remove(index); - (entry.key, entry.value) - } - - /// Decrement all indices in the range `start..end`. - /// - /// The index `start - 1` should not exist in `self.indices`. - /// All entries should still be in their original positions. - fn decrement_indices(&mut self, start: usize, end: usize) { - // Use a heuristic between a full sweep vs. a `find()` for every shifted item. - let shifted_entries = &self.entries[start..end]; - if shifted_entries.len() > self.indices.buckets() / 2 { - // Shift all indices in range. - for i in self.indices_mut() { - if start <= *i && *i < end { - *i -= 1; - } - } - } else { - // Find each entry in range to shift its index. - for (i, entry) in (start..end).zip(shifted_entries) { - update_index(&mut self.indices, entry.hash, i, i - 1); - } - } - } - - /// Increment all indices in the range `start..end`. - /// - /// The index `end` should not exist in `self.indices`. - /// All entries should still be in their original positions. - fn increment_indices(&mut self, start: usize, end: usize) { - // Use a heuristic between a full sweep vs. a `find()` for every shifted item. - let shifted_entries = &self.entries[start..end]; - if shifted_entries.len() > self.indices.buckets() / 2 { - // Shift all indices in range. - for i in self.indices_mut() { - if start <= *i && *i < end { - *i += 1; - } - } - } else { - // Find each entry in range to shift its index, updated in reverse so - // we never have duplicated indices that might have a hash collision. - for (i, entry) in (start..end).zip(shifted_entries).rev() { - update_index(&mut self.indices, entry.hash, i, i + 1); - } - } - } - - pub(super) fn move_index(&mut self, from: usize, to: usize) { - let from_hash = self.entries[from].hash; - if from != to { - // Use a sentinal index so other indices don't collide. - update_index(&mut self.indices, from_hash, from, usize::MAX); - - // Update all other indices and rotate the entry positions. - if from < to { - self.decrement_indices(from + 1, to + 1); - self.entries[from..=to].rotate_left(1); - } else if to < from { - self.increment_indices(to, from); - self.entries[to..=from].rotate_right(1); - } - - // Change the sentinal index to its final position. - update_index(&mut self.indices, from_hash, usize::MAX, to); - } - } - - /// Remove an entry by swapping it with the last - pub(crate) fn swap_remove_full(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> - where - Q: ?Sized + Equivalent, - { - let eq = equivalent(key, &self.entries); - match self.indices.remove_entry(hash.get(), eq) { - Some(index) => { - let (key, value) = self.swap_remove_finish(index); - Some((index, key, value)) - } - None => None, - } - } - - /// Remove an entry by swapping it with the last - pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { - match self.entries.get(index) { - Some(entry) => { - erase_index(&mut self.indices, entry.hash, index); - Some(self.swap_remove_finish(index)) - } - None => None, - } - } - - /// Finish removing an entry by swapping it with the last - /// - /// The index should already be removed from `self.indices`. - fn swap_remove_finish(&mut self, index: usize) -> (K, V) { - // use swap_remove, but then we need to update the index that points - // to the other entry that has to move - let entry = self.entries.swap_remove(index); - - // correct index that points to the entry that had to swap places - if let Some(entry) = self.entries.get(index) { - // was not last element - // examine new element in `index` and find it in indices - let last = self.entries.len(); - update_index(&mut self.indices, entry.hash, last, index); - } - - (entry.key, entry.value) - } - - /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..` - /// - /// All of these items should still be at their original location in `entries`. - /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`. - fn erase_indices(&mut self, start: usize, end: usize) { - let (init, shifted_entries) = self.entries.split_at(end); - let (start_entries, erased_entries) = init.split_at(start); - - let erased = erased_entries.len(); - let shifted = shifted_entries.len(); - let half_capacity = self.indices.buckets() / 2; - - // Use a heuristic between different strategies - if erased == 0 { - // Degenerate case, nothing to do - } else if start + shifted < half_capacity && start < erased { - // Reinsert everything, as there are few kept indices - self.indices.clear(); - - // Reinsert stable indices, then shifted indices - raw::insert_bulk_no_grow(&mut self.indices, start_entries); - raw::insert_bulk_no_grow(&mut self.indices, shifted_entries); - } else if erased + shifted < half_capacity { - // Find each affected index, as there are few to adjust - - // Find erased indices - for (i, entry) in (start..).zip(erased_entries) { - erase_index(&mut self.indices, entry.hash, i); - } - - // Find shifted indices - for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) { - update_index(&mut self.indices, entry.hash, old, new); - } - } else { - // Sweep the whole table for adjustments - self.erase_indices_sweep(start, end); - } - - debug_assert_eq!(self.indices.len(), start + shifted); - } - - pub(crate) fn retain_in_order(&mut self, mut keep: F) - where - F: FnMut(&mut K, &mut V) -> bool, - { - // FIXME: This could use Vec::retain_mut with MSRV 1.61. - // Like Vec::retain in self.entries, but with mutable K and V. - // We swap-shift all the items we want to keep, truncate the rest, - // then rebuild the raw hash table with the new indexes. - let len = self.entries.len(); - let mut n_deleted = 0; - for i in 0..len { - let will_keep = { - let entry = &mut self.entries[i]; - keep(&mut entry.key, &mut entry.value) - }; - if !will_keep { - n_deleted += 1; - } else if n_deleted > 0 { - self.entries.swap(i - n_deleted, i); - } - } - if n_deleted > 0 { - self.entries.truncate(len - n_deleted); - self.rebuild_hash_table(); - } - } - - fn rebuild_hash_table(&mut self) { - self.indices.clear(); - raw::insert_bulk_no_grow(&mut self.indices, &self.entries); - } - - pub(crate) fn reverse(&mut self) { - self.entries.reverse(); - - // No need to save hash indices, can easily calculate what they should - // be, given that this is an in-place reversal. - let len = self.entries.len(); - for i in self.indices_mut() { - *i = len - *i - 1; - } - } -} - -/// Entry for an existing key-value pair or a vacant location to -/// insert one. -pub enum Entry<'a, K, V> { - /// Existing slot with equivalent key. - Occupied(OccupiedEntry<'a, K, V>), - /// Vacant slot (no equivalent key in the map). - Vacant(VacantEntry<'a, K, V>), -} - -impl<'a, K, V> Entry<'a, K, V> { - /// Inserts the given default value in the entry if it is vacant and returns a mutable - /// reference to it. Otherwise a mutable reference to an already existent value is returned. - /// - /// Computes in **O(1)** time (amortized average). - pub fn or_insert(self, default: V) -> &'a mut V { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(default), - } - } - - /// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable - /// reference to it. Otherwise a mutable reference to an already existent value is returned. - /// - /// Computes in **O(1)** time (amortized average). - pub fn or_insert_with(self, call: F) -> &'a mut V - where - F: FnOnce() -> V, - { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(call()), - } - } - - /// Inserts the result of the `call` function with a reference to the entry's key if it is - /// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to - /// an already existent value is returned. - /// - /// Computes in **O(1)** time (amortized average). - pub fn or_insert_with_key(self, call: F) -> &'a mut V - where - F: FnOnce(&K) -> V, - { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => { - let value = call(&entry.key); - entry.insert(value) - } - } - } - - /// Gets a reference to the entry's key, either within the map if occupied, - /// or else the new key that was used to find the entry. - pub fn key(&self) -> &K { - match *self { - Entry::Occupied(ref entry) => entry.key(), - Entry::Vacant(ref entry) => entry.key(), - } - } - - /// Return the index where the key-value pair exists or will be inserted. - pub fn index(&self) -> usize { - match *self { - Entry::Occupied(ref entry) => entry.index(), - Entry::Vacant(ref entry) => entry.index(), - } - } - - /// Modifies the entry if it is occupied. - pub fn and_modify(self, f: F) -> Self - where - F: FnOnce(&mut V), - { - match self { - Entry::Occupied(mut o) => { - f(o.get_mut()); - Entry::Occupied(o) - } - x => x, - } - } - - /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable - /// reference to it. Otherwise a mutable reference to an already existent value is returned. - /// - /// Computes in **O(1)** time (amortized average). - pub fn or_default(self) -> &'a mut V - where - V: Default, - { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(V::default()), - } - } -} - -impl fmt::Debug for Entry<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Entry::Vacant(ref v) => f.debug_tuple(stringify!(Entry)).field(v).finish(), - Entry::Occupied(ref o) => f.debug_tuple(stringify!(Entry)).field(o).finish(), - } - } -} - -pub use self::raw::OccupiedEntry; - -// Extra methods that don't threaten the unsafe encapsulation. -impl OccupiedEntry<'_, K, V> { - /// Sets the value of the entry to `value`, and returns the entry's old value. - pub fn insert(&mut self, value: V) -> V { - replace(self.get_mut(), value) - } - - /// Remove the key, value pair stored in the map for this entry, and return the value. - /// - /// **NOTE:** This is equivalent to `.swap_remove()`. - pub fn remove(self) -> V { - self.swap_remove() - } - - /// Remove the key, value pair stored in the map for this entry, and return the value. - /// - /// Like `Vec::swap_remove`, the pair is removed by swapping it with the - /// last element of the map and popping it off. **This perturbs - /// the position of what used to be the last element!** - /// - /// Computes in **O(1)** time (average). - pub fn swap_remove(self) -> V { - self.swap_remove_entry().1 - } - - /// Remove the key, value pair stored in the map for this entry, and return the value. - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove(self) -> V { - self.shift_remove_entry().1 - } - - /// Remove and return the key, value pair stored in the map for this entry - /// - /// **NOTE:** This is equivalent to `.swap_remove_entry()`. - pub fn remove_entry(self) -> (K, V) { - self.swap_remove_entry() - } -} - -impl fmt::Debug for OccupiedEntry<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct(stringify!(OccupiedEntry)) - .field("key", self.key()) - .field("value", self.get()) - .finish() - } -} - -/// A view into a vacant entry in a `IndexMap`. -/// It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -pub struct VacantEntry<'a, K, V> { - map: &'a mut IndexMapCore, - hash: HashValue, - key: K, -} - -impl<'a, K, V> VacantEntry<'a, K, V> { - /// Gets a reference to the key that was used to find the entry. - pub fn key(&self) -> &K { - &self.key - } - - /// Takes ownership of the key, leaving the entry vacant. - pub fn into_key(self) -> K { - self.key - } - - /// Return the index where the key-value pair will be inserted. - pub fn index(&self) -> usize { - self.map.len() - } - - /// Inserts the entry's key and the given value into the map, and returns a mutable reference - /// to the value. - pub fn insert(self, value: V) -> &'a mut V { - let i = self.map.push(self.hash, self.key, value); - &mut self.map.entries[i].value - } -} - -impl fmt::Debug for VacantEntry<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple(stringify!(VacantEntry)) - .field(self.key()) - .finish() - } -} - -#[test] -fn assert_send_sync() { - fn assert_send_sync() {} - assert_send_sync::>(); - assert_send_sync::>(); -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/map.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/map.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1947 +0,0 @@ -//! `IndexMap` is a hash table where the iteration order of the key-value -//! pairs is independent of the hash values of the keys. - -mod core; - -pub use crate::mutable_keys::MutableKeys; - -#[cfg(feature = "rayon")] -pub use crate::rayon::map as rayon; - -use crate::vec::{self, Vec}; -use ::core::cmp::Ordering; -use ::core::fmt; -use ::core::hash::{BuildHasher, Hash, Hasher}; -use ::core::iter::FusedIterator; -use ::core::ops::{Index, IndexMut, RangeBounds}; -use ::core::slice::{Iter as SliceIter, IterMut as SliceIterMut}; - -#[cfg(has_std)] -use std::collections::hash_map::RandomState; - -use self::core::IndexMapCore; -use crate::equivalent::Equivalent; -use crate::util::third; -use crate::{Bucket, Entries, HashValue}; - -pub use self::core::{Entry, OccupiedEntry, VacantEntry}; - -/// A hash table where the iteration order of the key-value pairs is independent -/// of the hash values of the keys. -/// -/// The interface is closely compatible with the standard `HashMap`, but also -/// has additional features. -/// -/// # Order -/// -/// The key-value pairs have a consistent order that is determined by -/// the sequence of insertion and removal calls on the map. The order does -/// not depend on the keys or the hash function at all. -/// -/// All iterators traverse the map in *the order*. -/// -/// The insertion order is preserved, with **notable exceptions** like the -/// `.remove()` or `.swap_remove()` methods. Methods such as `.sort_by()` of -/// course result in a new order, depending on the sorting order. -/// -/// # Indices -/// -/// The key-value pairs are indexed in a compact range without holes in the -/// range `0..self.len()`. For example, the method `.get_full` looks up the -/// index for a key, and the method `.get_index` looks up the key-value pair by -/// index. -/// -/// # Examples -/// -/// ``` -/// use indexmap::IndexMap; -/// -/// // count the frequency of each letter in a sentence. -/// let mut letters = IndexMap::new(); -/// for ch in "a short treatise on fungi".chars() { -/// *letters.entry(ch).or_insert(0) += 1; -/// } -/// -/// assert_eq!(letters[&'s'], 2); -/// assert_eq!(letters[&'t'], 3); -/// assert_eq!(letters[&'u'], 1); -/// assert_eq!(letters.get(&'y'), None); -/// ``` -#[cfg(has_std)] -pub struct IndexMap { - pub(crate) core: IndexMapCore, - hash_builder: S, -} -#[cfg(not(has_std))] -pub struct IndexMap { - pub(crate) core: IndexMapCore, - hash_builder: S, -} - -impl Clone for IndexMap -where - K: Clone, - V: Clone, - S: Clone, -{ - fn clone(&self) -> Self { - IndexMap { - core: self.core.clone(), - hash_builder: self.hash_builder.clone(), - } - } - - fn clone_from(&mut self, other: &Self) { - self.core.clone_from(&other.core); - self.hash_builder.clone_from(&other.hash_builder); - } -} - -impl Entries for IndexMap { - type Entry = Bucket; - - #[inline] - fn into_entries(self) -> Vec { - self.core.into_entries() - } - - #[inline] - fn as_entries(&self) -> &[Self::Entry] { - self.core.as_entries() - } - - #[inline] - fn as_entries_mut(&mut self) -> &mut [Self::Entry] { - self.core.as_entries_mut() - } - - fn with_entries(&mut self, f: F) - where - F: FnOnce(&mut [Self::Entry]), - { - self.core.with_entries(f); - } -} - -impl fmt::Debug for IndexMap -where - K: fmt::Debug, - V: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if cfg!(not(feature = "test_debug")) { - f.debug_map().entries(self.iter()).finish() - } else { - // Let the inner `IndexMapCore` print all of its details - f.debug_struct("IndexMap") - .field("core", &self.core) - .finish() - } - } -} - -#[cfg(has_std)] -impl IndexMap { - /// Create a new map. (Does not allocate.) - #[inline] - pub fn new() -> Self { - Self::with_capacity(0) - } - - /// Create a new map with capacity for `n` key-value pairs. (Does not - /// allocate if `n` is zero.) - /// - /// Computes in **O(n)** time. - #[inline] - pub fn with_capacity(n: usize) -> Self { - Self::with_capacity_and_hasher(n, <_>::default()) - } -} - -impl IndexMap { - /// Create a new map with capacity for `n` key-value pairs. (Does not - /// allocate if `n` is zero.) - /// - /// Computes in **O(n)** time. - #[inline] - pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self { - if n == 0 { - Self::with_hasher(hash_builder) - } else { - IndexMap { - core: IndexMapCore::with_capacity(n), - hash_builder, - } - } - } - - /// Create a new map with `hash_builder`. - /// - /// This function is `const`, so it - /// can be called in `static` contexts. - pub const fn with_hasher(hash_builder: S) -> Self { - IndexMap { - core: IndexMapCore::new(), - hash_builder, - } - } - - /// Computes in **O(1)** time. - pub fn capacity(&self) -> usize { - self.core.capacity() - } - - /// Return a reference to the map's `BuildHasher`. - pub fn hasher(&self) -> &S { - &self.hash_builder - } - - /// Return the number of key-value pairs in the map. - /// - /// Computes in **O(1)** time. - #[inline] - pub fn len(&self) -> usize { - self.core.len() - } - - /// Returns true if the map contains no elements. - /// - /// Computes in **O(1)** time. - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Return an iterator over the key-value pairs of the map, in their order - pub fn iter(&self) -> Iter<'_, K, V> { - Iter { - iter: self.as_entries().iter(), - } - } - - /// Return an iterator over the key-value pairs of the map, in their order - pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { - IterMut { - iter: self.as_entries_mut().iter_mut(), - } - } - - /// Return an iterator over the keys of the map, in their order - pub fn keys(&self) -> Keys<'_, K, V> { - Keys { - iter: self.as_entries().iter(), - } - } - - /// Return an owning iterator over the keys of the map, in their order - pub fn into_keys(self) -> IntoKeys { - IntoKeys { - iter: self.into_entries().into_iter(), - } - } - - /// Return an iterator over the values of the map, in their order - pub fn values(&self) -> Values<'_, K, V> { - Values { - iter: self.as_entries().iter(), - } - } - - /// Return an iterator over mutable references to the values of the map, - /// in their order - pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { - ValuesMut { - iter: self.as_entries_mut().iter_mut(), - } - } - - /// Return an owning iterator over the values of the map, in their order - pub fn into_values(self) -> IntoValues { - IntoValues { - iter: self.into_entries().into_iter(), - } - } - - /// Remove all key-value pairs in the map, while preserving its capacity. - /// - /// Computes in **O(n)** time. - pub fn clear(&mut self) { - self.core.clear(); - } - - /// Shortens the map, keeping the first `len` elements and dropping the rest. - /// - /// If `len` is greater than the map's current length, this has no effect. - pub fn truncate(&mut self, len: usize) { - self.core.truncate(len); - } - - /// Clears the `IndexMap` in the given index range, returning those - /// key-value pairs as a drain iterator. - /// - /// The range may be any type that implements `RangeBounds`, - /// including all of the `std::ops::Range*` types, or even a tuple pair of - /// `Bound` start and end values. To drain the map entirely, use `RangeFull` - /// like `map.drain(..)`. - /// - /// This shifts down all entries following the drained range to fill the - /// gap, and keeps the allocated memory for reuse. - /// - /// ***Panics*** if the starting point is greater than the end point or if - /// the end point is greater than the length of the map. - pub fn drain(&mut self, range: R) -> Drain<'_, K, V> - where - R: RangeBounds, - { - Drain { - iter: self.core.drain(range), - } - } - - /// Splits the collection into two at the given index. - /// - /// Returns a newly allocated map containing the elements in the range - /// `[at, len)`. After the call, the original map will be left containing - /// the elements `[0, at)` with its previous capacity unchanged. - /// - /// ***Panics*** if `at > len`. - pub fn split_off(&mut self, at: usize) -> Self - where - S: Clone, - { - Self { - core: self.core.split_off(at), - hash_builder: self.hash_builder.clone(), - } - } -} - -impl IndexMap -where - K: Hash + Eq, - S: BuildHasher, -{ - /// Reserve capacity for `additional` more key-value pairs. - /// - /// Computes in **O(n)** time. - pub fn reserve(&mut self, additional: usize) { - self.core.reserve(additional); - } - - /// Shrink the capacity of the map as much as possible. - /// - /// Computes in **O(n)** time. - pub fn shrink_to_fit(&mut self) { - self.core.shrink_to(0); - } - - /// Shrink the capacity of the map with a lower limit. - /// - /// Computes in **O(n)** time. - pub fn shrink_to(&mut self, min_capacity: usize) { - self.core.shrink_to(min_capacity); - } - - fn hash(&self, key: &Q) -> HashValue { - let mut h = self.hash_builder.build_hasher(); - key.hash(&mut h); - HashValue(h.finish() as usize) - } - - /// Insert a key-value pair in the map. - /// - /// If an equivalent key already exists in the map: the key remains and - /// retains in its place in the order, its corresponding value is updated - /// with `value` and the older value is returned inside `Some(_)`. - /// - /// If no equivalent key existed in the map: the new key-value pair is - /// inserted, last in order, and `None` is returned. - /// - /// Computes in **O(1)** time (amortized average). - /// - /// See also [`entry`](#method.entry) if you you want to insert *or* modify - /// or if you need to get the index of the corresponding key-value pair. - pub fn insert(&mut self, key: K, value: V) -> Option { - self.insert_full(key, value).1 - } - - /// Insert a key-value pair in the map, and get their index. - /// - /// If an equivalent key already exists in the map: the key remains and - /// retains in its place in the order, its corresponding value is updated - /// with `value` and the older value is returned inside `(index, Some(_))`. - /// - /// If no equivalent key existed in the map: the new key-value pair is - /// inserted, last in order, and `(index, None)` is returned. - /// - /// Computes in **O(1)** time (amortized average). - /// - /// See also [`entry`](#method.entry) if you you want to insert *or* modify - /// or if you need to get the index of the corresponding key-value pair. - pub fn insert_full(&mut self, key: K, value: V) -> (usize, Option) { - let hash = self.hash(&key); - self.core.insert_full(hash, key, value) - } - - /// Get the given key’s corresponding entry in the map for insertion and/or - /// in-place manipulation. - /// - /// Computes in **O(1)** time (amortized average). - pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { - let hash = self.hash(&key); - self.core.entry(hash, key) - } - - /// Return `true` if an equivalent to `key` exists in the map. - /// - /// Computes in **O(1)** time (average). - pub fn contains_key(&self, key: &Q) -> bool - where - Q: Hash + Equivalent, - { - self.get_index_of(key).is_some() - } - - /// Return a reference to the value stored for `key`, if it is present, - /// else `None`. - /// - /// Computes in **O(1)** time (average). - pub fn get(&self, key: &Q) -> Option<&V> - where - Q: Hash + Equivalent, - { - if let Some(i) = self.get_index_of(key) { - let entry = &self.as_entries()[i]; - Some(&entry.value) - } else { - None - } - } - - /// Return references to the key-value pair stored for `key`, - /// if it is present, else `None`. - /// - /// Computes in **O(1)** time (average). - pub fn get_key_value(&self, key: &Q) -> Option<(&K, &V)> - where - Q: Hash + Equivalent, - { - if let Some(i) = self.get_index_of(key) { - let entry = &self.as_entries()[i]; - Some((&entry.key, &entry.value)) - } else { - None - } - } - - /// Return item index, key and value - pub fn get_full(&self, key: &Q) -> Option<(usize, &K, &V)> - where - Q: Hash + Equivalent, - { - if let Some(i) = self.get_index_of(key) { - let entry = &self.as_entries()[i]; - Some((i, &entry.key, &entry.value)) - } else { - None - } - } - - /// Return item index, if it exists in the map - /// - /// Computes in **O(1)** time (average). - pub fn get_index_of(&self, key: &Q) -> Option - where - Q: Hash + Equivalent, - { - if self.is_empty() { - None - } else { - let hash = self.hash(key); - self.core.get_index_of(hash, key) - } - } - - pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> - where - Q: Hash + Equivalent, - { - if let Some(i) = self.get_index_of(key) { - let entry = &mut self.as_entries_mut()[i]; - Some(&mut entry.value) - } else { - None - } - } - - pub fn get_full_mut(&mut self, key: &Q) -> Option<(usize, &K, &mut V)> - where - Q: Hash + Equivalent, - { - if let Some(i) = self.get_index_of(key) { - let entry = &mut self.as_entries_mut()[i]; - Some((i, &entry.key, &mut entry.value)) - } else { - None - } - } - - pub(crate) fn get_full_mut2_impl( - &mut self, - key: &Q, - ) -> Option<(usize, &mut K, &mut V)> - where - Q: Hash + Equivalent, - { - if let Some(i) = self.get_index_of(key) { - let entry = &mut self.as_entries_mut()[i]; - Some((i, &mut entry.key, &mut entry.value)) - } else { - None - } - } - - /// Remove the key-value pair equivalent to `key` and return - /// its value. - /// - /// **NOTE:** This is equivalent to `.swap_remove(key)`, if you need to - /// preserve the order of the keys in the map, use `.shift_remove(key)` - /// instead. - /// - /// Computes in **O(1)** time (average). - pub fn remove(&mut self, key: &Q) -> Option - where - Q: Hash + Equivalent, - { - self.swap_remove(key) - } - - /// Remove and return the key-value pair equivalent to `key`. - /// - /// **NOTE:** This is equivalent to `.swap_remove_entry(key)`, if you need to - /// preserve the order of the keys in the map, use `.shift_remove_entry(key)` - /// instead. - /// - /// Computes in **O(1)** time (average). - pub fn remove_entry(&mut self, key: &Q) -> Option<(K, V)> - where - Q: Hash + Equivalent, - { - self.swap_remove_entry(key) - } - - /// Remove the key-value pair equivalent to `key` and return - /// its value. - /// - /// Like `Vec::swap_remove`, the pair is removed by swapping it with the - /// last element of the map and popping it off. **This perturbs - /// the position of what used to be the last element!** - /// - /// Return `None` if `key` is not in map. - /// - /// Computes in **O(1)** time (average). - pub fn swap_remove(&mut self, key: &Q) -> Option - where - Q: Hash + Equivalent, - { - self.swap_remove_full(key).map(third) - } - - /// Remove and return the key-value pair equivalent to `key`. - /// - /// Like `Vec::swap_remove`, the pair is removed by swapping it with the - /// last element of the map and popping it off. **This perturbs - /// the position of what used to be the last element!** - /// - /// Return `None` if `key` is not in map. - /// - /// Computes in **O(1)** time (average). - pub fn swap_remove_entry(&mut self, key: &Q) -> Option<(K, V)> - where - Q: Hash + Equivalent, - { - match self.swap_remove_full(key) { - Some((_, key, value)) => Some((key, value)), - None => None, - } - } - - /// Remove the key-value pair equivalent to `key` and return it and - /// the index it had. - /// - /// Like `Vec::swap_remove`, the pair is removed by swapping it with the - /// last element of the map and popping it off. **This perturbs - /// the position of what used to be the last element!** - /// - /// Return `None` if `key` is not in map. - /// - /// Computes in **O(1)** time (average). - pub fn swap_remove_full(&mut self, key: &Q) -> Option<(usize, K, V)> - where - Q: Hash + Equivalent, - { - if self.is_empty() { - return None; - } - let hash = self.hash(key); - self.core.swap_remove_full(hash, key) - } - - /// Remove the key-value pair equivalent to `key` and return - /// its value. - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Return `None` if `key` is not in map. - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove(&mut self, key: &Q) -> Option - where - Q: Hash + Equivalent, - { - self.shift_remove_full(key).map(third) - } - - /// Remove and return the key-value pair equivalent to `key`. - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Return `None` if `key` is not in map. - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove_entry(&mut self, key: &Q) -> Option<(K, V)> - where - Q: Hash + Equivalent, - { - match self.shift_remove_full(key) { - Some((_, key, value)) => Some((key, value)), - None => None, - } - } - - /// Remove the key-value pair equivalent to `key` and return it and - /// the index it had. - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Return `None` if `key` is not in map. - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove_full(&mut self, key: &Q) -> Option<(usize, K, V)> - where - Q: Hash + Equivalent, - { - if self.is_empty() { - return None; - } - let hash = self.hash(key); - self.core.shift_remove_full(hash, key) - } - - /// Remove the last key-value pair - /// - /// This preserves the order of the remaining elements. - /// - /// Computes in **O(1)** time (average). - pub fn pop(&mut self) -> Option<(K, V)> { - self.core.pop() - } - - /// Scan through each key-value pair in the map and keep those where the - /// closure `keep` returns `true`. - /// - /// The elements are visited in order, and remaining elements keep their - /// order. - /// - /// Computes in **O(n)** time (average). - pub fn retain(&mut self, mut keep: F) - where - F: FnMut(&K, &mut V) -> bool, - { - self.core.retain_in_order(move |k, v| keep(k, v)); - } - - pub(crate) fn retain_mut(&mut self, keep: F) - where - F: FnMut(&mut K, &mut V) -> bool, - { - self.core.retain_in_order(keep); - } - - /// Sort the map’s key-value pairs by the default ordering of the keys. - /// - /// See [`sort_by`](Self::sort_by) for details. - pub fn sort_keys(&mut self) - where - K: Ord, - { - self.with_entries(move |entries| { - entries.sort_by(move |a, b| K::cmp(&a.key, &b.key)); - }); - } - - /// Sort the map’s key-value pairs in place using the comparison - /// function `cmp`. - /// - /// The comparison function receives two key and value pairs to compare (you - /// can sort by keys or values or their combination as needed). - /// - /// Computes in **O(n log n + c)** time and **O(n)** space where *n* is - /// the length of the map and *c* the capacity. The sort is stable. - pub fn sort_by(&mut self, mut cmp: F) - where - F: FnMut(&K, &V, &K, &V) -> Ordering, - { - self.with_entries(move |entries| { - entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); - }); - } - - /// Sort the key-value pairs of the map and return a by-value iterator of - /// the key-value pairs with the result. - /// - /// The sort is stable. - pub fn sorted_by(self, mut cmp: F) -> IntoIter - where - F: FnMut(&K, &V, &K, &V) -> Ordering, - { - let mut entries = self.into_entries(); - entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); - IntoIter { - iter: entries.into_iter(), - } - } - - /// Sort the map's key-value pairs by the default ordering of the keys, but - /// may not preserve the order of equal elements. - /// - /// See [`sort_unstable_by`](Self::sort_unstable_by) for details. - pub fn sort_unstable_keys(&mut self) - where - K: Ord, - { - self.with_entries(move |entries| { - entries.sort_unstable_by(move |a, b| K::cmp(&a.key, &b.key)); - }); - } - - /// Sort the map's key-value pairs in place using the comparison function `cmp`, but - /// may not preserve the order of equal elements. - /// - /// The comparison function receives two key and value pairs to compare (you - /// can sort by keys or values or their combination as needed). - /// - /// Computes in **O(n log n + c)** time where *n* is - /// the length of the map and *c* is the capacity. The sort is unstable. - pub fn sort_unstable_by(&mut self, mut cmp: F) - where - F: FnMut(&K, &V, &K, &V) -> Ordering, - { - self.with_entries(move |entries| { - entries.sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); - }); - } - - /// Sort the key-value pairs of the map and return a by-value iterator of - /// the key-value pairs with the result. - /// - /// The sort is unstable. - #[inline] - pub fn sorted_unstable_by(self, mut cmp: F) -> IntoIter - where - F: FnMut(&K, &V, &K, &V) -> Ordering, - { - let mut entries = self.into_entries(); - entries.sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); - IntoIter { - iter: entries.into_iter(), - } - } - - /// Reverses the order of the map’s key-value pairs in place. - /// - /// Computes in **O(n)** time and **O(1)** space. - pub fn reverse(&mut self) { - self.core.reverse() - } -} - -impl IndexMap { - /// Get a key-value pair by index - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Computes in **O(1)** time. - pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { - self.as_entries().get(index).map(Bucket::refs) - } - - /// Get a key-value pair by index - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Computes in **O(1)** time. - pub fn get_index_mut(&mut self, index: usize) -> Option<(&mut K, &mut V)> { - self.as_entries_mut().get_mut(index).map(Bucket::muts) - } - - /// Get the first key-value pair - /// - /// Computes in **O(1)** time. - pub fn first(&self) -> Option<(&K, &V)> { - self.as_entries().first().map(Bucket::refs) - } - - /// Get the first key-value pair, with mutable access to the value - /// - /// Computes in **O(1)** time. - pub fn first_mut(&mut self) -> Option<(&K, &mut V)> { - self.as_entries_mut().first_mut().map(Bucket::ref_mut) - } - - /// Get the last key-value pair - /// - /// Computes in **O(1)** time. - pub fn last(&self) -> Option<(&K, &V)> { - self.as_entries().last().map(Bucket::refs) - } - - /// Get the last key-value pair, with mutable access to the value - /// - /// Computes in **O(1)** time. - pub fn last_mut(&mut self) -> Option<(&K, &mut V)> { - self.as_entries_mut().last_mut().map(Bucket::ref_mut) - } - - /// Remove the key-value pair by index - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Like `Vec::swap_remove`, the pair is removed by swapping it with the - /// last element of the map and popping it off. **This perturbs - /// the position of what used to be the last element!** - /// - /// Computes in **O(1)** time (average). - pub fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { - self.core.swap_remove_index(index) - } - - /// Remove the key-value pair by index - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { - self.core.shift_remove_index(index) - } - - /// Moves the position of a key-value pair from one index to another - /// by shifting all other pairs in-between. - /// - /// * If `from < to`, the other pairs will shift down while the targeted pair moves up. - /// * If `from > to`, the other pairs will shift up while the targeted pair moves down. - /// - /// ***Panics*** if `from` or `to` are out of bounds. - /// - /// Computes in **O(n)** time (average). - pub fn move_index(&mut self, from: usize, to: usize) { - self.core.move_index(from, to) - } - - /// Swaps the position of two key-value pairs in the map. - /// - /// ***Panics*** if `a` or `b` are out of bounds. - pub fn swap_indices(&mut self, a: usize, b: usize) { - self.core.swap_indices(a, b) - } -} - -/// An iterator over the keys of a `IndexMap`. -/// -/// This `struct` is created by the [`keys`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`keys`]: struct.IndexMap.html#method.keys -/// [`IndexMap`]: struct.IndexMap.html -pub struct Keys<'a, K, V> { - iter: SliceIter<'a, Bucket>, -} - -impl<'a, K, V> Iterator for Keys<'a, K, V> { - type Item = &'a K; - - iterator_methods!(Bucket::key_ref); -} - -impl DoubleEndedIterator for Keys<'_, K, V> { - double_ended_iterator_methods!(Bucket::key_ref); -} - -impl ExactSizeIterator for Keys<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Keys<'_, K, V> {} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -impl Clone for Keys<'_, K, V> { - fn clone(&self) -> Self { - Keys { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for Keys<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// An owning iterator over the keys of a `IndexMap`. -/// -/// This `struct` is created by the [`into_keys`] method on [`IndexMap`]. -/// See its documentation for more. -/// -/// [`IndexMap`]: struct.IndexMap.html -/// [`into_keys`]: struct.IndexMap.html#method.into_keys -pub struct IntoKeys { - iter: vec::IntoIter>, -} - -impl Iterator for IntoKeys { - type Item = K; - - iterator_methods!(Bucket::key); -} - -impl DoubleEndedIterator for IntoKeys { - double_ended_iterator_methods!(Bucket::key); -} - -impl ExactSizeIterator for IntoKeys { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for IntoKeys {} - -impl fmt::Debug for IntoKeys { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::key_ref); - f.debug_list().entries(iter).finish() - } -} - -/// An iterator over the values of a `IndexMap`. -/// -/// This `struct` is created by the [`values`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`values`]: struct.IndexMap.html#method.values -/// [`IndexMap`]: struct.IndexMap.html -pub struct Values<'a, K, V> { - iter: SliceIter<'a, Bucket>, -} - -impl<'a, K, V> Iterator for Values<'a, K, V> { - type Item = &'a V; - - iterator_methods!(Bucket::value_ref); -} - -impl DoubleEndedIterator for Values<'_, K, V> { - double_ended_iterator_methods!(Bucket::value_ref); -} - -impl ExactSizeIterator for Values<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Values<'_, K, V> {} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -impl Clone for Values<'_, K, V> { - fn clone(&self) -> Self { - Values { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for Values<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// A mutable iterator over the values of a `IndexMap`. -/// -/// This `struct` is created by the [`values_mut`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`values_mut`]: struct.IndexMap.html#method.values_mut -/// [`IndexMap`]: struct.IndexMap.html -pub struct ValuesMut<'a, K, V> { - iter: SliceIterMut<'a, Bucket>, -} - -impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { - type Item = &'a mut V; - - iterator_methods!(Bucket::value_mut); -} - -impl DoubleEndedIterator for ValuesMut<'_, K, V> { - double_ended_iterator_methods!(Bucket::value_mut); -} - -impl ExactSizeIterator for ValuesMut<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for ValuesMut<'_, K, V> {} - -impl fmt::Debug for ValuesMut<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::value_ref); - f.debug_list().entries(iter).finish() - } -} - -/// An owning iterator over the values of a `IndexMap`. -/// -/// This `struct` is created by the [`into_values`] method on [`IndexMap`]. -/// See its documentation for more. -/// -/// [`IndexMap`]: struct.IndexMap.html -/// [`into_values`]: struct.IndexMap.html#method.into_values -pub struct IntoValues { - iter: vec::IntoIter>, -} - -impl Iterator for IntoValues { - type Item = V; - - iterator_methods!(Bucket::value); -} - -impl DoubleEndedIterator for IntoValues { - double_ended_iterator_methods!(Bucket::value); -} - -impl ExactSizeIterator for IntoValues { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for IntoValues {} - -impl fmt::Debug for IntoValues { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::value_ref); - f.debug_list().entries(iter).finish() - } -} - -/// An iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`iter`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`iter`]: struct.IndexMap.html#method.iter -/// [`IndexMap`]: struct.IndexMap.html -pub struct Iter<'a, K, V> { - iter: SliceIter<'a, Bucket>, -} - -impl<'a, K, V> Iterator for Iter<'a, K, V> { - type Item = (&'a K, &'a V); - - iterator_methods!(Bucket::refs); -} - -impl DoubleEndedIterator for Iter<'_, K, V> { - double_ended_iterator_methods!(Bucket::refs); -} - -impl ExactSizeIterator for Iter<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Iter<'_, K, V> {} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -impl Clone for Iter<'_, K, V> { - fn clone(&self) -> Self { - Iter { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for Iter<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// A mutable iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`iter_mut`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`iter_mut`]: struct.IndexMap.html#method.iter_mut -/// [`IndexMap`]: struct.IndexMap.html -pub struct IterMut<'a, K, V> { - iter: SliceIterMut<'a, Bucket>, -} - -impl<'a, K, V> Iterator for IterMut<'a, K, V> { - type Item = (&'a K, &'a mut V); - - iterator_methods!(Bucket::ref_mut); -} - -impl DoubleEndedIterator for IterMut<'_, K, V> { - double_ended_iterator_methods!(Bucket::ref_mut); -} - -impl ExactSizeIterator for IterMut<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for IterMut<'_, K, V> {} - -impl fmt::Debug for IterMut<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::refs); - f.debug_list().entries(iter).finish() - } -} - -/// An owning iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`into_iter`] method on [`IndexMap`] -/// (provided by the `IntoIterator` trait). See its documentation for more. -/// -/// [`into_iter`]: struct.IndexMap.html#method.into_iter -/// [`IndexMap`]: struct.IndexMap.html -pub struct IntoIter { - iter: vec::IntoIter>, -} - -impl Iterator for IntoIter { - type Item = (K, V); - - iterator_methods!(Bucket::key_value); -} - -impl DoubleEndedIterator for IntoIter { - double_ended_iterator_methods!(Bucket::key_value); -} - -impl ExactSizeIterator for IntoIter { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for IntoIter {} - -impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::refs); - f.debug_list().entries(iter).finish() - } -} - -/// A draining iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`drain`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`drain`]: struct.IndexMap.html#method.drain -/// [`IndexMap`]: struct.IndexMap.html -pub struct Drain<'a, K, V> { - pub(crate) iter: vec::Drain<'a, Bucket>, -} - -impl Iterator for Drain<'_, K, V> { - type Item = (K, V); - - iterator_methods!(Bucket::key_value); -} - -impl DoubleEndedIterator for Drain<'_, K, V> { - double_ended_iterator_methods!(Bucket::key_value); -} - -impl ExactSizeIterator for Drain<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Drain<'_, K, V> {} - -impl fmt::Debug for Drain<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::refs); - f.debug_list().entries(iter).finish() - } -} - -impl<'a, K, V, S> IntoIterator for &'a IndexMap { - type Item = (&'a K, &'a V); - type IntoIter = Iter<'a, K, V>; - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, K, V, S> IntoIterator for &'a mut IndexMap { - type Item = (&'a K, &'a mut V); - type IntoIter = IterMut<'a, K, V>; - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -impl IntoIterator for IndexMap { - type Item = (K, V); - type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter { - IntoIter { - iter: self.into_entries().into_iter(), - } - } -} - -/// Access `IndexMap` values corresponding to a key. -/// -/// # Examples -/// -/// ``` -/// use indexmap::IndexMap; -/// -/// let mut map = IndexMap::new(); -/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { -/// map.insert(word.to_lowercase(), word.to_uppercase()); -/// } -/// assert_eq!(map["lorem"], "LOREM"); -/// assert_eq!(map["ipsum"], "IPSUM"); -/// ``` -/// -/// ```should_panic -/// use indexmap::IndexMap; -/// -/// let mut map = IndexMap::new(); -/// map.insert("foo", 1); -/// println!("{:?}", map["bar"]); // panics! -/// ``` -impl Index<&Q> for IndexMap -where - Q: Hash + Equivalent, - K: Hash + Eq, - S: BuildHasher, -{ - type Output = V; - - /// Returns a reference to the value corresponding to the supplied `key`. - /// - /// ***Panics*** if `key` is not present in the map. - fn index(&self, key: &Q) -> &V { - self.get(key).expect("IndexMap: key not found") - } -} - -/// Access `IndexMap` values corresponding to a key. -/// -/// Mutable indexing allows changing / updating values of key-value -/// pairs that are already present. -/// -/// You can **not** insert new pairs with index syntax, use `.insert()`. -/// -/// # Examples -/// -/// ``` -/// use indexmap::IndexMap; -/// -/// let mut map = IndexMap::new(); -/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { -/// map.insert(word.to_lowercase(), word.to_string()); -/// } -/// let lorem = &mut map["lorem"]; -/// assert_eq!(lorem, "Lorem"); -/// lorem.retain(char::is_lowercase); -/// assert_eq!(map["lorem"], "orem"); -/// ``` -/// -/// ```should_panic -/// use indexmap::IndexMap; -/// -/// let mut map = IndexMap::new(); -/// map.insert("foo", 1); -/// map["bar"] = 1; // panics! -/// ``` -impl IndexMut<&Q> for IndexMap -where - Q: Hash + Equivalent, - K: Hash + Eq, - S: BuildHasher, -{ - /// Returns a mutable reference to the value corresponding to the supplied `key`. - /// - /// ***Panics*** if `key` is not present in the map. - fn index_mut(&mut self, key: &Q) -> &mut V { - self.get_mut(key).expect("IndexMap: key not found") - } -} - -/// Access `IndexMap` values at indexed positions. -/// -/// # Examples -/// -/// ``` -/// use indexmap::IndexMap; -/// -/// let mut map = IndexMap::new(); -/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { -/// map.insert(word.to_lowercase(), word.to_uppercase()); -/// } -/// assert_eq!(map[0], "LOREM"); -/// assert_eq!(map[1], "IPSUM"); -/// map.reverse(); -/// assert_eq!(map[0], "AMET"); -/// assert_eq!(map[1], "SIT"); -/// map.sort_keys(); -/// assert_eq!(map[0], "AMET"); -/// assert_eq!(map[1], "DOLOR"); -/// ``` -/// -/// ```should_panic -/// use indexmap::IndexMap; -/// -/// let mut map = IndexMap::new(); -/// map.insert("foo", 1); -/// println!("{:?}", map[10]); // panics! -/// ``` -impl Index for IndexMap { - type Output = V; - - /// Returns a reference to the value at the supplied `index`. - /// - /// ***Panics*** if `index` is out of bounds. - fn index(&self, index: usize) -> &V { - self.get_index(index) - .expect("IndexMap: index out of bounds") - .1 - } -} - -/// Access `IndexMap` values at indexed positions. -/// -/// Mutable indexing allows changing / updating indexed values -/// that are already present. -/// -/// You can **not** insert new values with index syntax, use `.insert()`. -/// -/// # Examples -/// -/// ``` -/// use indexmap::IndexMap; -/// -/// let mut map = IndexMap::new(); -/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { -/// map.insert(word.to_lowercase(), word.to_string()); -/// } -/// let lorem = &mut map[0]; -/// assert_eq!(lorem, "Lorem"); -/// lorem.retain(char::is_lowercase); -/// assert_eq!(map["lorem"], "orem"); -/// ``` -/// -/// ```should_panic -/// use indexmap::IndexMap; -/// -/// let mut map = IndexMap::new(); -/// map.insert("foo", 1); -/// map[10] = 1; // panics! -/// ``` -impl IndexMut for IndexMap { - /// Returns a mutable reference to the value at the supplied `index`. - /// - /// ***Panics*** if `index` is out of bounds. - fn index_mut(&mut self, index: usize) -> &mut V { - self.get_index_mut(index) - .expect("IndexMap: index out of bounds") - .1 - } -} - -impl FromIterator<(K, V)> for IndexMap -where - K: Hash + Eq, - S: BuildHasher + Default, -{ - /// Create an `IndexMap` from the sequence of key-value pairs in the - /// iterable. - /// - /// `from_iter` uses the same logic as `extend`. See - /// [`extend`](#method.extend) for more details. - fn from_iter>(iterable: I) -> Self { - let iter = iterable.into_iter(); - let (low, _) = iter.size_hint(); - let mut map = Self::with_capacity_and_hasher(low, <_>::default()); - map.extend(iter); - map - } -} - -#[cfg(has_std)] -impl From<[(K, V); N]> for IndexMap -where - K: Hash + Eq, -{ - /// # Examples - /// - /// ``` - /// use indexmap::IndexMap; - /// - /// let map1 = IndexMap::from([(1, 2), (3, 4)]); - /// let map2: IndexMap<_, _> = [(1, 2), (3, 4)].into(); - /// assert_eq!(map1, map2); - /// ``` - fn from(arr: [(K, V); N]) -> Self { - Self::from_iter(arr) - } -} - -impl Extend<(K, V)> for IndexMap -where - K: Hash + Eq, - S: BuildHasher, -{ - /// Extend the map with all key-value pairs in the iterable. - /// - /// This is equivalent to calling [`insert`](#method.insert) for each of - /// them in order, which means that for keys that already existed - /// in the map, their value is updated but it keeps the existing order. - /// - /// New keys are inserted in the order they appear in the sequence. If - /// equivalents of a key occur more than once, the last corresponding value - /// prevails. - fn extend>(&mut self, iterable: I) { - // (Note: this is a copy of `std`/`hashbrown`'s reservation logic.) - // Keys may be already present or show multiple times in the iterator. - // Reserve the entire hint lower bound if the map is empty. - // Otherwise reserve half the hint (rounded up), so the map - // will only resize twice in the worst case. - let iter = iterable.into_iter(); - let reserve = if self.is_empty() { - iter.size_hint().0 - } else { - (iter.size_hint().0 + 1) / 2 - }; - self.reserve(reserve); - iter.for_each(move |(k, v)| { - self.insert(k, v); - }); - } -} - -impl<'a, K, V, S> Extend<(&'a K, &'a V)> for IndexMap -where - K: Hash + Eq + Copy, - V: Copy, - S: BuildHasher, -{ - /// Extend the map with all key-value pairs in the iterable. - /// - /// See the first extend method for more details. - fn extend>(&mut self, iterable: I) { - self.extend(iterable.into_iter().map(|(&key, &value)| (key, value))); - } -} - -impl Default for IndexMap -where - S: Default, -{ - /// Return an empty `IndexMap` - fn default() -> Self { - Self::with_capacity_and_hasher(0, S::default()) - } -} - -impl PartialEq> for IndexMap -where - K: Hash + Eq, - V1: PartialEq, - S1: BuildHasher, - S2: BuildHasher, -{ - fn eq(&self, other: &IndexMap) -> bool { - if self.len() != other.len() { - return false; - } - - self.iter() - .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) - } -} - -impl Eq for IndexMap -where - K: Eq + Hash, - V: Eq, - S: BuildHasher, -{ -} - -#[cfg(test)] -mod tests { - use super::*; - use std::string::String; - - #[test] - fn it_works() { - let mut map = IndexMap::new(); - assert_eq!(map.is_empty(), true); - map.insert(1, ()); - map.insert(1, ()); - assert_eq!(map.len(), 1); - assert!(map.get(&1).is_some()); - assert_eq!(map.is_empty(), false); - } - - #[test] - fn new() { - let map = IndexMap::::new(); - println!("{:?}", map); - assert_eq!(map.capacity(), 0); - assert_eq!(map.len(), 0); - assert_eq!(map.is_empty(), true); - } - - #[test] - fn insert() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5]; - let not_present = [1, 3, 6, 9, 10]; - let mut map = IndexMap::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(map.len(), i); - map.insert(elt, elt); - assert_eq!(map.len(), i + 1); - assert_eq!(map.get(&elt), Some(&elt)); - assert_eq!(map[&elt], elt); - } - println!("{:?}", map); - - for &elt in ¬_present { - assert!(map.get(&elt).is_none()); - } - } - - #[test] - fn insert_full() { - let insert = vec![9, 2, 7, 1, 4, 6, 13]; - let present = vec![1, 6, 2]; - let mut map = IndexMap::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(map.len(), i); - let (index, existing) = map.insert_full(elt, elt); - assert_eq!(existing, None); - assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); - assert_eq!(map.len(), i + 1); - } - - let len = map.len(); - for &elt in &present { - let (index, existing) = map.insert_full(elt, elt); - assert_eq!(existing, Some(elt)); - assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); - assert_eq!(map.len(), len); - } - } - - #[test] - fn insert_2() { - let mut map = IndexMap::with_capacity(16); - - let mut keys = vec![]; - keys.extend(0..16); - keys.extend(if cfg!(miri) { 32..64 } else { 128..267 }); - - for &i in &keys { - let old_map = map.clone(); - map.insert(i, ()); - for key in old_map.keys() { - if map.get(key).is_none() { - println!("old_map: {:?}", old_map); - println!("map: {:?}", map); - panic!("did not find {} in map", key); - } - } - } - - for &i in &keys { - assert!(map.get(&i).is_some(), "did not find {}", i); - } - } - - #[test] - fn insert_order() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut map = IndexMap::new(); - - for &elt in &insert { - map.insert(elt, ()); - } - - assert_eq!(map.keys().count(), map.len()); - assert_eq!(map.keys().count(), insert.len()); - for (a, b) in insert.iter().zip(map.keys()) { - assert_eq!(a, b); - } - for (i, k) in (0..insert.len()).zip(map.keys()) { - assert_eq!(map.get_index(i).unwrap().0, k); - } - } - - #[test] - fn grow() { - let insert = [0, 4, 2, 12, 8, 7, 11]; - let not_present = [1, 3, 6, 9, 10]; - let mut map = IndexMap::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(map.len(), i); - map.insert(elt, elt); - assert_eq!(map.len(), i + 1); - assert_eq!(map.get(&elt), Some(&elt)); - assert_eq!(map[&elt], elt); - } - - println!("{:?}", map); - for &elt in &insert { - map.insert(elt * 10, elt); - } - for &elt in &insert { - map.insert(elt * 100, elt); - } - for (i, &elt) in insert.iter().cycle().enumerate().take(100) { - map.insert(elt * 100 + i as i32, elt); - } - println!("{:?}", map); - for &elt in ¬_present { - assert!(map.get(&elt).is_none()); - } - } - - #[test] - fn reserve() { - let mut map = IndexMap::::new(); - assert_eq!(map.capacity(), 0); - map.reserve(100); - let capacity = map.capacity(); - assert!(capacity >= 100); - for i in 0..capacity { - assert_eq!(map.len(), i); - map.insert(i, i * i); - assert_eq!(map.len(), i + 1); - assert_eq!(map.capacity(), capacity); - assert_eq!(map.get(&i), Some(&(i * i))); - } - map.insert(capacity, std::usize::MAX); - assert_eq!(map.len(), capacity + 1); - assert!(map.capacity() > capacity); - assert_eq!(map.get(&capacity), Some(&std::usize::MAX)); - } - - #[test] - fn shrink_to_fit() { - let mut map = IndexMap::::new(); - assert_eq!(map.capacity(), 0); - for i in 0..100 { - assert_eq!(map.len(), i); - map.insert(i, i * i); - assert_eq!(map.len(), i + 1); - assert!(map.capacity() >= i + 1); - assert_eq!(map.get(&i), Some(&(i * i))); - map.shrink_to_fit(); - assert_eq!(map.len(), i + 1); - assert_eq!(map.capacity(), i + 1); - assert_eq!(map.get(&i), Some(&(i * i))); - } - } - - #[test] - fn remove() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut map = IndexMap::new(); - - for &elt in &insert { - map.insert(elt, elt); - } - - assert_eq!(map.keys().count(), map.len()); - assert_eq!(map.keys().count(), insert.len()); - for (a, b) in insert.iter().zip(map.keys()) { - assert_eq!(a, b); - } - - let remove_fail = [99, 77]; - let remove = [4, 12, 8, 7]; - - for &key in &remove_fail { - assert!(map.swap_remove_full(&key).is_none()); - } - println!("{:?}", map); - for &key in &remove { - //println!("{:?}", map); - let index = map.get_full(&key).unwrap().0; - assert_eq!(map.swap_remove_full(&key), Some((index, key, key))); - } - println!("{:?}", map); - - for key in &insert { - assert_eq!(map.get(key).is_some(), !remove.contains(key)); - } - assert_eq!(map.len(), insert.len() - remove.len()); - assert_eq!(map.keys().count(), insert.len() - remove.len()); - } - - #[test] - fn remove_to_empty() { - let mut map = indexmap! { 0 => 0, 4 => 4, 5 => 5 }; - map.swap_remove(&5).unwrap(); - map.swap_remove(&4).unwrap(); - map.swap_remove(&0).unwrap(); - assert!(map.is_empty()); - } - - #[test] - fn swap_remove_index() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut map = IndexMap::new(); - - for &elt in &insert { - map.insert(elt, elt * 2); - } - - let mut vector = insert.to_vec(); - let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; - - // check that the same swap remove sequence on vec and map - // have the same result. - for &rm in remove_sequence { - let out_vec = vector.swap_remove(rm); - let (out_map, _) = map.swap_remove_index(rm).unwrap(); - assert_eq!(out_vec, out_map); - } - assert_eq!(vector.len(), map.len()); - for (a, b) in vector.iter().zip(map.keys()) { - assert_eq!(a, b); - } - } - - #[test] - fn partial_eq_and_eq() { - let mut map_a = IndexMap::new(); - map_a.insert(1, "1"); - map_a.insert(2, "2"); - let mut map_b = map_a.clone(); - assert_eq!(map_a, map_b); - map_b.swap_remove(&1); - assert_ne!(map_a, map_b); - - let map_c: IndexMap<_, String> = map_b.into_iter().map(|(k, v)| (k, v.into())).collect(); - assert_ne!(map_a, map_c); - assert_ne!(map_c, map_a); - } - - #[test] - fn extend() { - let mut map = IndexMap::new(); - map.extend(vec![(&1, &2), (&3, &4)]); - map.extend(vec![(5, 6)]); - assert_eq!( - map.into_iter().collect::>(), - vec![(1, 2), (3, 4), (5, 6)] - ); - } - - #[test] - fn entry() { - let mut map = IndexMap::new(); - - map.insert(1, "1"); - map.insert(2, "2"); - { - let e = map.entry(3); - assert_eq!(e.index(), 2); - let e = e.or_insert("3"); - assert_eq!(e, &"3"); - } - - let e = map.entry(2); - assert_eq!(e.index(), 1); - assert_eq!(e.key(), &2); - match e { - Entry::Occupied(ref e) => assert_eq!(e.get(), &"2"), - Entry::Vacant(_) => panic!(), - } - assert_eq!(e.or_insert("4"), &"2"); - } - - #[test] - fn entry_and_modify() { - let mut map = IndexMap::new(); - - map.insert(1, "1"); - map.entry(1).and_modify(|x| *x = "2"); - assert_eq!(Some(&"2"), map.get(&1)); - - map.entry(2).and_modify(|x| *x = "doesn't exist"); - assert_eq!(None, map.get(&2)); - } - - #[test] - fn entry_or_default() { - let mut map = IndexMap::new(); - - #[derive(Debug, PartialEq)] - enum TestEnum { - DefaultValue, - NonDefaultValue, - } - - impl Default for TestEnum { - fn default() -> Self { - TestEnum::DefaultValue - } - } - - map.insert(1, TestEnum::NonDefaultValue); - assert_eq!(&mut TestEnum::NonDefaultValue, map.entry(1).or_default()); - - assert_eq!(&mut TestEnum::DefaultValue, map.entry(2).or_default()); - } - - #[test] - fn occupied_entry_key() { - // These keys match hash and equality, but their addresses are distinct. - let (k1, k2) = (&mut 1, &mut 1); - let k1_ptr = k1 as *const i32; - let k2_ptr = k2 as *const i32; - assert_ne!(k1_ptr, k2_ptr); - - let mut map = IndexMap::new(); - map.insert(k1, "value"); - match map.entry(k2) { - Entry::Occupied(ref e) => { - // `OccupiedEntry::key` should reference the key in the map, - // not the key that was used to find the entry. - let ptr = *e.key() as *const i32; - assert_eq!(ptr, k1_ptr); - assert_ne!(ptr, k2_ptr); - } - Entry::Vacant(_) => panic!(), - } - } - - #[test] - fn keys() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: IndexMap<_, _> = vec.into_iter().collect(); - let keys: Vec<_> = map.keys().copied().collect(); - assert_eq!(keys.len(), 3); - assert!(keys.contains(&1)); - assert!(keys.contains(&2)); - assert!(keys.contains(&3)); - } - - #[test] - fn into_keys() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: IndexMap<_, _> = vec.into_iter().collect(); - let keys: Vec = map.into_keys().collect(); - assert_eq!(keys.len(), 3); - assert!(keys.contains(&1)); - assert!(keys.contains(&2)); - assert!(keys.contains(&3)); - } - - #[test] - fn values() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: IndexMap<_, _> = vec.into_iter().collect(); - let values: Vec<_> = map.values().copied().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&'a')); - assert!(values.contains(&'b')); - assert!(values.contains(&'c')); - } - - #[test] - fn values_mut() { - let vec = vec![(1, 1), (2, 2), (3, 3)]; - let mut map: IndexMap<_, _> = vec.into_iter().collect(); - for value in map.values_mut() { - *value *= 2 - } - let values: Vec<_> = map.values().copied().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&2)); - assert!(values.contains(&4)); - assert!(values.contains(&6)); - } - - #[test] - fn into_values() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: IndexMap<_, _> = vec.into_iter().collect(); - let values: Vec = map.into_values().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&'a')); - assert!(values.contains(&'b')); - assert!(values.contains(&'c')); - } - - #[test] - #[cfg(has_std)] - fn from_array() { - let map = IndexMap::from([(1, 2), (3, 4)]); - let mut expected = IndexMap::new(); - expected.insert(1, 2); - expected.insert(3, 4); - - assert_eq!(map, expected) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/mutable_keys.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/mutable_keys.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/mutable_keys.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/mutable_keys.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -use core::hash::{BuildHasher, Hash}; - -use super::{Equivalent, IndexMap}; - -pub struct PrivateMarker {} - -/// Opt-in mutable access to keys. -/// -/// These methods expose `&mut K`, mutable references to the key as it is stored -/// in the map. -/// You are allowed to modify the keys in the hashmap **if the modification -/// does not change the key’s hash and equality**. -/// -/// If keys are modified erroneously, you can no longer look them up. -/// This is sound (memory safe) but a logical error hazard (just like -/// implementing PartialEq, Eq, or Hash incorrectly would be). -/// -/// `use` this trait to enable its methods for `IndexMap`. -pub trait MutableKeys { - type Key; - type Value; - - /// Return item index, mutable reference to key and value - fn get_full_mut2( - &mut self, - key: &Q, - ) -> Option<(usize, &mut Self::Key, &mut Self::Value)> - where - Q: Hash + Equivalent; - - /// Scan through each key-value pair in the map and keep those where the - /// closure `keep` returns `true`. - /// - /// The elements are visited in order, and remaining elements keep their - /// order. - /// - /// Computes in **O(n)** time (average). - fn retain2(&mut self, keep: F) - where - F: FnMut(&mut Self::Key, &mut Self::Value) -> bool; - - /// This method is not useful in itself – it is there to “seal†the trait - /// for external implementation, so that we can add methods without - /// causing breaking changes. - fn __private_marker(&self) -> PrivateMarker; -} - -/// Opt-in mutable access to keys. -/// -/// See [`MutableKeys`](trait.MutableKeys.html) for more information. -impl MutableKeys for IndexMap -where - K: Eq + Hash, - S: BuildHasher, -{ - type Key = K; - type Value = V; - fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut K, &mut V)> - where - Q: Hash + Equivalent, - { - self.get_full_mut2_impl(key) - } - - fn retain2(&mut self, keep: F) - where - F: FnMut(&mut K, &mut V) -> bool, - { - self.retain_mut(keep) - } - - fn __private_marker(&self) -> PrivateMarker { - PrivateMarker {} - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/rayon/map.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/rayon/map.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/rayon/map.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/rayon/map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,583 +0,0 @@ -//! Parallel iterator types for `IndexMap` with [rayon](https://docs.rs/rayon/1.0/rayon). -//! -//! You will rarely need to interact with this module directly unless you need to name one of the -//! iterator types. -//! -//! Requires crate feature `"rayon"` - -use super::collect; -use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; -use rayon::prelude::*; - -use crate::vec::Vec; -use core::cmp::Ordering; -use core::fmt; -use core::hash::{BuildHasher, Hash}; -use core::ops::RangeBounds; - -use crate::Bucket; -use crate::Entries; -use crate::IndexMap; - -/// Requires crate feature `"rayon"`. -impl IntoParallelIterator for IndexMap -where - K: Send, - V: Send, -{ - type Item = (K, V); - type Iter = IntoParIter; - - fn into_par_iter(self) -> Self::Iter { - IntoParIter { - entries: self.into_entries(), - } - } -} - -/// A parallel owning iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`into_par_iter`] method on [`IndexMap`] -/// (provided by rayon's `IntoParallelIterator` trait). See its documentation for more. -/// -/// [`into_par_iter`]: ../struct.IndexMap.html#method.into_par_iter -/// [`IndexMap`]: ../struct.IndexMap.html -pub struct IntoParIter { - entries: Vec>, -} - -impl fmt::Debug for IntoParIter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.entries.iter().map(Bucket::refs); - f.debug_list().entries(iter).finish() - } -} - -impl ParallelIterator for IntoParIter { - type Item = (K, V); - - parallel_iterator_methods!(Bucket::key_value); -} - -impl IndexedParallelIterator for IntoParIter { - indexed_parallel_iterator_methods!(Bucket::key_value); -} - -/// Requires crate feature `"rayon"`. -impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap -where - K: Sync, - V: Sync, -{ - type Item = (&'a K, &'a V); - type Iter = ParIter<'a, K, V>; - - fn into_par_iter(self) -> Self::Iter { - ParIter { - entries: self.as_entries(), - } - } -} - -/// A parallel iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`par_iter`] method on [`IndexMap`] -/// (provided by rayon's `IntoParallelRefIterator` trait). See its documentation for more. -/// -/// [`par_iter`]: ../struct.IndexMap.html#method.par_iter -/// [`IndexMap`]: ../struct.IndexMap.html -pub struct ParIter<'a, K, V> { - entries: &'a [Bucket], -} - -impl Clone for ParIter<'_, K, V> { - fn clone(&self) -> Self { - ParIter { ..*self } - } -} - -impl fmt::Debug for ParIter<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.entries.iter().map(Bucket::refs); - f.debug_list().entries(iter).finish() - } -} - -impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> { - type Item = (&'a K, &'a V); - - parallel_iterator_methods!(Bucket::refs); -} - -impl IndexedParallelIterator for ParIter<'_, K, V> { - indexed_parallel_iterator_methods!(Bucket::refs); -} - -/// Requires crate feature `"rayon"`. -impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap -where - K: Sync + Send, - V: Send, -{ - type Item = (&'a K, &'a mut V); - type Iter = ParIterMut<'a, K, V>; - - fn into_par_iter(self) -> Self::Iter { - ParIterMut { - entries: self.as_entries_mut(), - } - } -} - -/// A parallel mutable iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`par_iter_mut`] method on [`IndexMap`] -/// (provided by rayon's `IntoParallelRefMutIterator` trait). See its documentation for more. -/// -/// [`par_iter_mut`]: ../struct.IndexMap.html#method.par_iter_mut -/// [`IndexMap`]: ../struct.IndexMap.html -pub struct ParIterMut<'a, K, V> { - entries: &'a mut [Bucket], -} - -impl fmt::Debug for ParIterMut<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.entries.iter().map(Bucket::refs); - f.debug_list().entries(iter).finish() - } -} - -impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> { - type Item = (&'a K, &'a mut V); - - parallel_iterator_methods!(Bucket::ref_mut); -} - -impl IndexedParallelIterator for ParIterMut<'_, K, V> { - indexed_parallel_iterator_methods!(Bucket::ref_mut); -} - -/// Requires crate feature `"rayon"`. -impl<'a, K, V, S> ParallelDrainRange for &'a mut IndexMap -where - K: Send, - V: Send, -{ - type Item = (K, V); - type Iter = ParDrain<'a, K, V>; - - fn par_drain>(self, range: R) -> Self::Iter { - ParDrain { - entries: self.core.par_drain(range), - } - } -} - -/// A parallel draining iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`par_drain`] method on [`IndexMap`] -/// (provided by rayon's `ParallelDrainRange` trait). See its documentation for more. -/// -/// [`par_drain`]: ../struct.IndexMap.html#method.par_drain -/// [`IndexMap`]: ../struct.IndexMap.html -pub struct ParDrain<'a, K: Send, V: Send> { - entries: rayon::vec::Drain<'a, Bucket>, -} - -impl ParallelIterator for ParDrain<'_, K, V> { - type Item = (K, V); - - parallel_iterator_methods!(Bucket::key_value); -} - -impl IndexedParallelIterator for ParDrain<'_, K, V> { - indexed_parallel_iterator_methods!(Bucket::key_value); -} - -/// Parallel iterator methods and other parallel methods. -/// -/// The following methods **require crate feature `"rayon"`**. -/// -/// See also the `IntoParallelIterator` implementations. -impl IndexMap -where - K: Sync, - V: Sync, -{ - /// Return a parallel iterator over the keys of the map. - /// - /// While parallel iterators can process items in any order, their relative order - /// in the map is still preserved for operations like `reduce` and `collect`. - pub fn par_keys(&self) -> ParKeys<'_, K, V> { - ParKeys { - entries: self.as_entries(), - } - } - - /// Return a parallel iterator over the values of the map. - /// - /// While parallel iterators can process items in any order, their relative order - /// in the map is still preserved for operations like `reduce` and `collect`. - pub fn par_values(&self) -> ParValues<'_, K, V> { - ParValues { - entries: self.as_entries(), - } - } -} - -impl IndexMap -where - K: Hash + Eq + Sync, - V: Sync, - S: BuildHasher, -{ - /// Returns `true` if `self` contains all of the same key-value pairs as `other`, - /// regardless of each map's indexed order, determined in parallel. - pub fn par_eq(&self, other: &IndexMap) -> bool - where - V: PartialEq, - V2: Sync, - S2: BuildHasher + Sync, - { - self.len() == other.len() - && self - .par_iter() - .all(move |(key, value)| other.get(key).map_or(false, |v| *value == *v)) - } -} - -/// A parallel iterator over the keys of a `IndexMap`. -/// -/// This `struct` is created by the [`par_keys`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`par_keys`]: ../struct.IndexMap.html#method.par_keys -/// [`IndexMap`]: ../struct.IndexMap.html -pub struct ParKeys<'a, K, V> { - entries: &'a [Bucket], -} - -impl Clone for ParKeys<'_, K, V> { - fn clone(&self) -> Self { - ParKeys { ..*self } - } -} - -impl fmt::Debug for ParKeys<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.entries.iter().map(Bucket::key_ref); - f.debug_list().entries(iter).finish() - } -} - -impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> { - type Item = &'a K; - - parallel_iterator_methods!(Bucket::key_ref); -} - -impl IndexedParallelIterator for ParKeys<'_, K, V> { - indexed_parallel_iterator_methods!(Bucket::key_ref); -} - -/// A parallel iterator over the values of a `IndexMap`. -/// -/// This `struct` is created by the [`par_values`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`par_values`]: ../struct.IndexMap.html#method.par_values -/// [`IndexMap`]: ../struct.IndexMap.html -pub struct ParValues<'a, K, V> { - entries: &'a [Bucket], -} - -impl Clone for ParValues<'_, K, V> { - fn clone(&self) -> Self { - ParValues { ..*self } - } -} - -impl fmt::Debug for ParValues<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.entries.iter().map(Bucket::value_ref); - f.debug_list().entries(iter).finish() - } -} - -impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> { - type Item = &'a V; - - parallel_iterator_methods!(Bucket::value_ref); -} - -impl IndexedParallelIterator for ParValues<'_, K, V> { - indexed_parallel_iterator_methods!(Bucket::value_ref); -} - -/// Requires crate feature `"rayon"`. -impl IndexMap -where - K: Send, - V: Send, -{ - /// Return a parallel iterator over mutable references to the values of the map - /// - /// While parallel iterators can process items in any order, their relative order - /// in the map is still preserved for operations like `reduce` and `collect`. - pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { - ParValuesMut { - entries: self.as_entries_mut(), - } - } -} - -impl IndexMap -where - K: Hash + Eq + Send, - V: Send, - S: BuildHasher, -{ - /// Sort the map’s key-value pairs in parallel, by the default ordering of the keys. - pub fn par_sort_keys(&mut self) - where - K: Ord, - { - self.with_entries(|entries| { - entries.par_sort_by(|a, b| K::cmp(&a.key, &b.key)); - }); - } - - /// Sort the map’s key-value pairs in place and in parallel, using the comparison - /// function `cmp`. - /// - /// The comparison function receives two key and value pairs to compare (you - /// can sort by keys or values or their combination as needed). - pub fn par_sort_by(&mut self, cmp: F) - where - F: Fn(&K, &V, &K, &V) -> Ordering + Sync, - { - self.with_entries(|entries| { - entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); - }); - } - - /// Sort the key-value pairs of the map in parallel and return a by-value parallel - /// iterator of the key-value pairs with the result. - pub fn par_sorted_by(self, cmp: F) -> IntoParIter - where - F: Fn(&K, &V, &K, &V) -> Ordering + Sync, - { - let mut entries = self.into_entries(); - entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); - IntoParIter { entries } - } - - /// Sort the map's key-value pairs in parallel, by the default ordering of the keys. - pub fn par_sort_unstable_keys(&mut self) - where - K: Ord, - { - self.with_entries(|entries| { - entries.par_sort_unstable_by(|a, b| K::cmp(&a.key, &b.key)); - }); - } - - /// Sort the map's key-value pairs in place and in parallel, using the comparison - /// function `cmp`. - /// - /// The comparison function receives two key and value pairs to compare (you - /// can sort by keys or values or their combination as needed). - pub fn par_sort_unstable_by(&mut self, cmp: F) - where - F: Fn(&K, &V, &K, &V) -> Ordering + Sync, - { - self.with_entries(|entries| { - entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); - }); - } - - /// Sort the key-value pairs of the map in parallel and return a by-value parallel - /// iterator of the key-value pairs with the result. - pub fn par_sorted_unstable_by(self, cmp: F) -> IntoParIter - where - F: Fn(&K, &V, &K, &V) -> Ordering + Sync, - { - let mut entries = self.into_entries(); - entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); - IntoParIter { entries } - } -} - -/// A parallel mutable iterator over the values of a `IndexMap`. -/// -/// This `struct` is created by the [`par_values_mut`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`par_values_mut`]: ../struct.IndexMap.html#method.par_values_mut -/// [`IndexMap`]: ../struct.IndexMap.html -pub struct ParValuesMut<'a, K, V> { - entries: &'a mut [Bucket], -} - -impl fmt::Debug for ParValuesMut<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.entries.iter().map(Bucket::value_ref); - f.debug_list().entries(iter).finish() - } -} - -impl<'a, K: Send, V: Send> ParallelIterator for ParValuesMut<'a, K, V> { - type Item = &'a mut V; - - parallel_iterator_methods!(Bucket::value_mut); -} - -impl IndexedParallelIterator for ParValuesMut<'_, K, V> { - indexed_parallel_iterator_methods!(Bucket::value_mut); -} - -/// Requires crate feature `"rayon"`. -impl FromParallelIterator<(K, V)> for IndexMap -where - K: Eq + Hash + Send, - V: Send, - S: BuildHasher + Default + Send, -{ - fn from_par_iter(iter: I) -> Self - where - I: IntoParallelIterator, - { - let list = collect(iter); - let len = list.iter().map(Vec::len).sum(); - let mut map = Self::with_capacity_and_hasher(len, S::default()); - for vec in list { - map.extend(vec); - } - map - } -} - -/// Requires crate feature `"rayon"`. -impl ParallelExtend<(K, V)> for IndexMap -where - K: Eq + Hash + Send, - V: Send, - S: BuildHasher + Send, -{ - fn par_extend(&mut self, iter: I) - where - I: IntoParallelIterator, - { - for vec in collect(iter) { - self.extend(vec); - } - } -} - -/// Requires crate feature `"rayon"`. -impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for IndexMap -where - K: Copy + Eq + Hash + Send + Sync, - V: Copy + Send + Sync, - S: BuildHasher + Send, -{ - fn par_extend(&mut self, iter: I) - where - I: IntoParallelIterator, - { - for vec in collect(iter) { - self.extend(vec); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::string::String; - - #[test] - fn insert_order() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut map = IndexMap::new(); - - for &elt in &insert { - map.insert(elt, ()); - } - - assert_eq!(map.par_keys().count(), map.len()); - assert_eq!(map.par_keys().count(), insert.len()); - insert.par_iter().zip(map.par_keys()).for_each(|(a, b)| { - assert_eq!(a, b); - }); - (0..insert.len()) - .into_par_iter() - .zip(map.par_keys()) - .for_each(|(i, k)| { - assert_eq!(map.get_index(i).unwrap().0, k); - }); - } - - #[test] - fn partial_eq_and_eq() { - let mut map_a = IndexMap::new(); - map_a.insert(1, "1"); - map_a.insert(2, "2"); - let mut map_b = map_a.clone(); - assert!(map_a.par_eq(&map_b)); - map_b.swap_remove(&1); - assert!(!map_a.par_eq(&map_b)); - map_b.insert(3, "3"); - assert!(!map_a.par_eq(&map_b)); - - let map_c: IndexMap<_, String> = - map_b.into_par_iter().map(|(k, v)| (k, v.into())).collect(); - assert!(!map_a.par_eq(&map_c)); - assert!(!map_c.par_eq(&map_a)); - } - - #[test] - fn extend() { - let mut map = IndexMap::new(); - map.par_extend(vec![(&1, &2), (&3, &4)]); - map.par_extend(vec![(5, 6)]); - assert_eq!( - map.into_par_iter().collect::>(), - vec![(1, 2), (3, 4), (5, 6)] - ); - } - - #[test] - fn keys() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: IndexMap<_, _> = vec.into_par_iter().collect(); - let keys: Vec<_> = map.par_keys().copied().collect(); - assert_eq!(keys.len(), 3); - assert!(keys.contains(&1)); - assert!(keys.contains(&2)); - assert!(keys.contains(&3)); - } - - #[test] - fn values() { - let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; - let map: IndexMap<_, _> = vec.into_par_iter().collect(); - let values: Vec<_> = map.par_values().copied().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&'a')); - assert!(values.contains(&'b')); - assert!(values.contains(&'c')); - } - - #[test] - fn values_mut() { - let vec = vec![(1, 1), (2, 2), (3, 3)]; - let mut map: IndexMap<_, _> = vec.into_par_iter().collect(); - map.par_values_mut().for_each(|value| *value *= 2); - let values: Vec<_> = map.par_values().copied().collect(); - assert_eq!(values.len(), 3); - assert!(values.contains(&2)); - assert!(values.contains(&4)); - assert!(values.contains(&6)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/rayon/mod.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/rayon/mod.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/rayon/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/rayon/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -use rayon::prelude::*; - -use alloc::collections::LinkedList; - -use crate::vec::Vec; - -pub mod map; -pub mod set; - -// This form of intermediate collection is also how Rayon collects `HashMap`. -// Note that the order will also be preserved! -fn collect(iter: I) -> LinkedList> { - iter.into_par_iter() - .fold(Vec::new, |mut vec, elem| { - vec.push(elem); - vec - }) - .map(|vec| { - let mut list = LinkedList::new(); - list.push_back(vec); - list - }) - .reduce(LinkedList::new, |mut list1, mut list2| { - list1.append(&mut list2); - list1 - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/rayon/set.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/rayon/set.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/rayon/set.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/rayon/set.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,741 +0,0 @@ -//! Parallel iterator types for `IndexSet` with [rayon](https://docs.rs/rayon/1.0/rayon). -//! -//! You will rarely need to interact with this module directly unless you need to name one of the -//! iterator types. -//! -//! Requires crate feature `"rayon"`. - -use super::collect; -use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; -use rayon::prelude::*; - -use crate::vec::Vec; -use core::cmp::Ordering; -use core::fmt; -use core::hash::{BuildHasher, Hash}; -use core::ops::RangeBounds; - -use crate::Entries; -use crate::IndexSet; - -type Bucket = crate::Bucket; - -/// Requires crate feature `"rayon"`. -impl IntoParallelIterator for IndexSet -where - T: Send, -{ - type Item = T; - type Iter = IntoParIter; - - fn into_par_iter(self) -> Self::Iter { - IntoParIter { - entries: self.into_entries(), - } - } -} - -/// A parallel owning iterator over the items of a `IndexSet`. -/// -/// This `struct` is created by the [`into_par_iter`] method on [`IndexSet`] -/// (provided by rayon's `IntoParallelIterator` trait). See its documentation for more. -/// -/// [`IndexSet`]: ../struct.IndexSet.html -/// [`into_par_iter`]: ../struct.IndexSet.html#method.into_par_iter -pub struct IntoParIter { - entries: Vec>, -} - -impl fmt::Debug for IntoParIter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.entries.iter().map(Bucket::key_ref); - f.debug_list().entries(iter).finish() - } -} - -impl ParallelIterator for IntoParIter { - type Item = T; - - parallel_iterator_methods!(Bucket::key); -} - -impl IndexedParallelIterator for IntoParIter { - indexed_parallel_iterator_methods!(Bucket::key); -} - -/// Requires crate feature `"rayon"`. -impl<'a, T, S> IntoParallelIterator for &'a IndexSet -where - T: Sync, -{ - type Item = &'a T; - type Iter = ParIter<'a, T>; - - fn into_par_iter(self) -> Self::Iter { - ParIter { - entries: self.as_entries(), - } - } -} - -/// A parallel iterator over the items of a `IndexSet`. -/// -/// This `struct` is created by the [`par_iter`] method on [`IndexSet`] -/// (provided by rayon's `IntoParallelRefIterator` trait). See its documentation for more. -/// -/// [`IndexSet`]: ../struct.IndexSet.html -/// [`par_iter`]: ../struct.IndexSet.html#method.par_iter -pub struct ParIter<'a, T> { - entries: &'a [Bucket], -} - -impl Clone for ParIter<'_, T> { - fn clone(&self) -> Self { - ParIter { ..*self } - } -} - -impl fmt::Debug for ParIter<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.entries.iter().map(Bucket::key_ref); - f.debug_list().entries(iter).finish() - } -} - -impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { - type Item = &'a T; - - parallel_iterator_methods!(Bucket::key_ref); -} - -impl IndexedParallelIterator for ParIter<'_, T> { - indexed_parallel_iterator_methods!(Bucket::key_ref); -} - -/// Requires crate feature `"rayon"`. -impl<'a, T, S> ParallelDrainRange for &'a mut IndexSet -where - T: Send, -{ - type Item = T; - type Iter = ParDrain<'a, T>; - - fn par_drain>(self, range: R) -> Self::Iter { - ParDrain { - entries: self.map.core.par_drain(range), - } - } -} - -/// A parallel draining iterator over the items of a `IndexSet`. -/// -/// This `struct` is created by the [`par_drain`] method on [`IndexSet`] -/// (provided by rayon's `ParallelDrainRange` trait). See its documentation for more. -/// -/// [`par_drain`]: ../struct.IndexSet.html#method.par_drain -/// [`IndexSet`]: ../struct.IndexSet.html -pub struct ParDrain<'a, T: Send> { - entries: rayon::vec::Drain<'a, Bucket>, -} - -impl ParallelIterator for ParDrain<'_, T> { - type Item = T; - - parallel_iterator_methods!(Bucket::key); -} - -impl IndexedParallelIterator for ParDrain<'_, T> { - indexed_parallel_iterator_methods!(Bucket::key); -} - -/// Parallel iterator methods and other parallel methods. -/// -/// The following methods **require crate feature `"rayon"`**. -/// -/// See also the `IntoParallelIterator` implementations. -impl IndexSet -where - T: Hash + Eq + Sync, - S: BuildHasher + Sync, -{ - /// Return a parallel iterator over the values that are in `self` but not `other`. - /// - /// While parallel iterators can process items in any order, their relative order - /// in the `self` set is still preserved for operations like `reduce` and `collect`. - pub fn par_difference<'a, S2>( - &'a self, - other: &'a IndexSet, - ) -> ParDifference<'a, T, S, S2> - where - S2: BuildHasher + Sync, - { - ParDifference { - set1: self, - set2: other, - } - } - - /// Return a parallel iterator over the values that are in `self` or `other`, - /// but not in both. - /// - /// While parallel iterators can process items in any order, their relative order - /// in the sets is still preserved for operations like `reduce` and `collect`. - /// Values from `self` are produced in their original order, followed by - /// values from `other` in their original order. - pub fn par_symmetric_difference<'a, S2>( - &'a self, - other: &'a IndexSet, - ) -> ParSymmetricDifference<'a, T, S, S2> - where - S2: BuildHasher + Sync, - { - ParSymmetricDifference { - set1: self, - set2: other, - } - } - - /// Return a parallel iterator over the values that are in both `self` and `other`. - /// - /// While parallel iterators can process items in any order, their relative order - /// in the `self` set is still preserved for operations like `reduce` and `collect`. - pub fn par_intersection<'a, S2>( - &'a self, - other: &'a IndexSet, - ) -> ParIntersection<'a, T, S, S2> - where - S2: BuildHasher + Sync, - { - ParIntersection { - set1: self, - set2: other, - } - } - - /// Return a parallel iterator over all values that are in `self` or `other`. - /// - /// While parallel iterators can process items in any order, their relative order - /// in the sets is still preserved for operations like `reduce` and `collect`. - /// Values from `self` are produced in their original order, followed by - /// values that are unique to `other` in their original order. - pub fn par_union<'a, S2>(&'a self, other: &'a IndexSet) -> ParUnion<'a, T, S, S2> - where - S2: BuildHasher + Sync, - { - ParUnion { - set1: self, - set2: other, - } - } - - /// Returns `true` if `self` contains all of the same values as `other`, - /// regardless of each set's indexed order, determined in parallel. - pub fn par_eq(&self, other: &IndexSet) -> bool - where - S2: BuildHasher + Sync, - { - self.len() == other.len() && self.par_is_subset(other) - } - - /// Returns `true` if `self` has no elements in common with `other`, - /// determined in parallel. - pub fn par_is_disjoint(&self, other: &IndexSet) -> bool - where - S2: BuildHasher + Sync, - { - if self.len() <= other.len() { - self.par_iter().all(move |value| !other.contains(value)) - } else { - other.par_iter().all(move |value| !self.contains(value)) - } - } - - /// Returns `true` if all elements of `other` are contained in `self`, - /// determined in parallel. - pub fn par_is_superset(&self, other: &IndexSet) -> bool - where - S2: BuildHasher + Sync, - { - other.par_is_subset(self) - } - - /// Returns `true` if all elements of `self` are contained in `other`, - /// determined in parallel. - pub fn par_is_subset(&self, other: &IndexSet) -> bool - where - S2: BuildHasher + Sync, - { - self.len() <= other.len() && self.par_iter().all(move |value| other.contains(value)) - } -} - -/// A parallel iterator producing elements in the difference of `IndexSet`s. -/// -/// This `struct` is created by the [`par_difference`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: ../struct.IndexSet.html -/// [`par_difference`]: ../struct.IndexSet.html#method.par_difference -pub struct ParDifference<'a, T, S1, S2> { - set1: &'a IndexSet, - set2: &'a IndexSet, -} - -impl Clone for ParDifference<'_, T, S1, S2> { - fn clone(&self) -> Self { - ParDifference { ..*self } - } -} - -impl fmt::Debug for ParDifference<'_, T, S1, S2> -where - T: fmt::Debug + Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.set1.difference(self.set2)) - .finish() - } -} - -impl<'a, T, S1, S2> ParallelIterator for ParDifference<'a, T, S1, S2> -where - T: Hash + Eq + Sync, - S1: BuildHasher + Sync, - S2: BuildHasher + Sync, -{ - type Item = &'a T; - - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - let Self { set1, set2 } = self; - - set1.par_iter() - .filter(move |&item| !set2.contains(item)) - .drive_unindexed(consumer) - } -} - -/// A parallel iterator producing elements in the intersection of `IndexSet`s. -/// -/// This `struct` is created by the [`par_intersection`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: ../struct.IndexSet.html -/// [`par_intersection`]: ../struct.IndexSet.html#method.par_intersection -pub struct ParIntersection<'a, T, S1, S2> { - set1: &'a IndexSet, - set2: &'a IndexSet, -} - -impl Clone for ParIntersection<'_, T, S1, S2> { - fn clone(&self) -> Self { - ParIntersection { ..*self } - } -} - -impl fmt::Debug for ParIntersection<'_, T, S1, S2> -where - T: fmt::Debug + Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.set1.intersection(self.set2)) - .finish() - } -} - -impl<'a, T, S1, S2> ParallelIterator for ParIntersection<'a, T, S1, S2> -where - T: Hash + Eq + Sync, - S1: BuildHasher + Sync, - S2: BuildHasher + Sync, -{ - type Item = &'a T; - - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - let Self { set1, set2 } = self; - - set1.par_iter() - .filter(move |&item| set2.contains(item)) - .drive_unindexed(consumer) - } -} - -/// A parallel iterator producing elements in the symmetric difference of `IndexSet`s. -/// -/// This `struct` is created by the [`par_symmetric_difference`] method on -/// [`IndexSet`]. See its documentation for more. -/// -/// [`IndexSet`]: ../struct.IndexSet.html -/// [`par_symmetric_difference`]: ../struct.IndexSet.html#method.par_symmetric_difference -pub struct ParSymmetricDifference<'a, T, S1, S2> { - set1: &'a IndexSet, - set2: &'a IndexSet, -} - -impl Clone for ParSymmetricDifference<'_, T, S1, S2> { - fn clone(&self) -> Self { - ParSymmetricDifference { ..*self } - } -} - -impl fmt::Debug for ParSymmetricDifference<'_, T, S1, S2> -where - T: fmt::Debug + Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list() - .entries(self.set1.symmetric_difference(self.set2)) - .finish() - } -} - -impl<'a, T, S1, S2> ParallelIterator for ParSymmetricDifference<'a, T, S1, S2> -where - T: Hash + Eq + Sync, - S1: BuildHasher + Sync, - S2: BuildHasher + Sync, -{ - type Item = &'a T; - - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - let Self { set1, set2 } = self; - - set1.par_difference(set2) - .chain(set2.par_difference(set1)) - .drive_unindexed(consumer) - } -} - -/// A parallel iterator producing elements in the union of `IndexSet`s. -/// -/// This `struct` is created by the [`par_union`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: ../struct.IndexSet.html -/// [`par_union`]: ../struct.IndexSet.html#method.par_union -pub struct ParUnion<'a, T, S1, S2> { - set1: &'a IndexSet, - set2: &'a IndexSet, -} - -impl Clone for ParUnion<'_, T, S1, S2> { - fn clone(&self) -> Self { - ParUnion { ..*self } - } -} - -impl fmt::Debug for ParUnion<'_, T, S1, S2> -where - T: fmt::Debug + Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.set1.union(self.set2)).finish() - } -} - -impl<'a, T, S1, S2> ParallelIterator for ParUnion<'a, T, S1, S2> -where - T: Hash + Eq + Sync, - S1: BuildHasher + Sync, - S2: BuildHasher + Sync, -{ - type Item = &'a T; - - fn drive_unindexed(self, consumer: C) -> C::Result - where - C: UnindexedConsumer, - { - let Self { set1, set2 } = self; - - set1.par_iter() - .chain(set2.par_difference(set1)) - .drive_unindexed(consumer) - } -} - -/// Parallel sorting methods. -/// -/// The following methods **require crate feature `"rayon"`**. -impl IndexSet -where - T: Hash + Eq + Send, - S: BuildHasher + Send, -{ - /// Sort the set’s values in parallel by their default ordering. - pub fn par_sort(&mut self) - where - T: Ord, - { - self.with_entries(|entries| { - entries.par_sort_by(|a, b| T::cmp(&a.key, &b.key)); - }); - } - - /// Sort the set’s values in place and in parallel, using the comparison function `cmp`. - pub fn par_sort_by(&mut self, cmp: F) - where - F: Fn(&T, &T) -> Ordering + Sync, - { - self.with_entries(|entries| { - entries.par_sort_by(move |a, b| cmp(&a.key, &b.key)); - }); - } - - /// Sort the values of the set in parallel and return a by-value parallel iterator of - /// the values with the result. - pub fn par_sorted_by(self, cmp: F) -> IntoParIter - where - F: Fn(&T, &T) -> Ordering + Sync, - { - let mut entries = self.into_entries(); - entries.par_sort_by(move |a, b| cmp(&a.key, &b.key)); - IntoParIter { entries } - } - - /// Sort the set's values in parallel by their default ordering. - pub fn par_sort_unstable(&mut self) - where - T: Ord, - { - self.with_entries(|entries| { - entries.par_sort_unstable_by(|a, b| T::cmp(&a.key, &b.key)); - }); - } - - /// Sort the set’s values in place and in parallel, using the comparison function `cmp`. - pub fn par_sort_unstable_by(&mut self, cmp: F) - where - F: Fn(&T, &T) -> Ordering + Sync, - { - self.with_entries(|entries| { - entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); - }); - } - - /// Sort the values of the set in parallel and return a by-value parallel iterator of - /// the values with the result. - pub fn par_sorted_unstable_by(self, cmp: F) -> IntoParIter - where - F: Fn(&T, &T) -> Ordering + Sync, - { - let mut entries = self.into_entries(); - entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); - IntoParIter { entries } - } -} - -/// Requires crate feature `"rayon"`. -impl FromParallelIterator for IndexSet -where - T: Eq + Hash + Send, - S: BuildHasher + Default + Send, -{ - fn from_par_iter(iter: I) -> Self - where - I: IntoParallelIterator, - { - let list = collect(iter); - let len = list.iter().map(Vec::len).sum(); - let mut set = Self::with_capacity_and_hasher(len, S::default()); - for vec in list { - set.extend(vec); - } - set - } -} - -/// Requires crate feature `"rayon"`. -impl ParallelExtend for IndexSet -where - T: Eq + Hash + Send, - S: BuildHasher + Send, -{ - fn par_extend(&mut self, iter: I) - where - I: IntoParallelIterator, - { - for vec in collect(iter) { - self.extend(vec); - } - } -} - -/// Requires crate feature `"rayon"`. -impl<'a, T: 'a, S> ParallelExtend<&'a T> for IndexSet -where - T: Copy + Eq + Hash + Send + Sync, - S: BuildHasher + Send, -{ - fn par_extend(&mut self, iter: I) - where - I: IntoParallelIterator, - { - for vec in collect(iter) { - self.extend(vec); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn insert_order() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut set = IndexSet::new(); - - for &elt in &insert { - set.insert(elt); - } - - assert_eq!(set.par_iter().count(), set.len()); - assert_eq!(set.par_iter().count(), insert.len()); - insert.par_iter().zip(&set).for_each(|(a, b)| { - assert_eq!(a, b); - }); - (0..insert.len()) - .into_par_iter() - .zip(&set) - .for_each(|(i, v)| { - assert_eq!(set.get_index(i).unwrap(), v); - }); - } - - #[test] - fn partial_eq_and_eq() { - let mut set_a = IndexSet::new(); - set_a.insert(1); - set_a.insert(2); - let mut set_b = set_a.clone(); - assert!(set_a.par_eq(&set_b)); - set_b.swap_remove(&1); - assert!(!set_a.par_eq(&set_b)); - set_b.insert(3); - assert!(!set_a.par_eq(&set_b)); - - let set_c: IndexSet<_> = set_b.into_par_iter().collect(); - assert!(!set_a.par_eq(&set_c)); - assert!(!set_c.par_eq(&set_a)); - } - - #[test] - fn extend() { - let mut set = IndexSet::new(); - set.par_extend(vec![&1, &2, &3, &4]); - set.par_extend(vec![5, 6]); - assert_eq!( - set.into_par_iter().collect::>(), - vec![1, 2, 3, 4, 5, 6] - ); - } - - #[test] - fn comparisons() { - let set_a: IndexSet<_> = (0..3).collect(); - let set_b: IndexSet<_> = (3..6).collect(); - let set_c: IndexSet<_> = (0..6).collect(); - let set_d: IndexSet<_> = (3..9).collect(); - - assert!(!set_a.par_is_disjoint(&set_a)); - assert!(set_a.par_is_subset(&set_a)); - assert!(set_a.par_is_superset(&set_a)); - - assert!(set_a.par_is_disjoint(&set_b)); - assert!(set_b.par_is_disjoint(&set_a)); - assert!(!set_a.par_is_subset(&set_b)); - assert!(!set_b.par_is_subset(&set_a)); - assert!(!set_a.par_is_superset(&set_b)); - assert!(!set_b.par_is_superset(&set_a)); - - assert!(!set_a.par_is_disjoint(&set_c)); - assert!(!set_c.par_is_disjoint(&set_a)); - assert!(set_a.par_is_subset(&set_c)); - assert!(!set_c.par_is_subset(&set_a)); - assert!(!set_a.par_is_superset(&set_c)); - assert!(set_c.par_is_superset(&set_a)); - - assert!(!set_c.par_is_disjoint(&set_d)); - assert!(!set_d.par_is_disjoint(&set_c)); - assert!(!set_c.par_is_subset(&set_d)); - assert!(!set_d.par_is_subset(&set_c)); - assert!(!set_c.par_is_superset(&set_d)); - assert!(!set_d.par_is_superset(&set_c)); - } - - #[test] - fn iter_comparisons() { - use std::iter::empty; - - fn check<'a, I1, I2>(iter1: I1, iter2: I2) - where - I1: ParallelIterator, - I2: Iterator, - { - let v1: Vec<_> = iter1.copied().collect(); - let v2: Vec<_> = iter2.collect(); - assert_eq!(v1, v2); - } - - let set_a: IndexSet<_> = (0..3).collect(); - let set_b: IndexSet<_> = (3..6).collect(); - let set_c: IndexSet<_> = (0..6).collect(); - let set_d: IndexSet<_> = (3..9).rev().collect(); - - check(set_a.par_difference(&set_a), empty()); - check(set_a.par_symmetric_difference(&set_a), empty()); - check(set_a.par_intersection(&set_a), 0..3); - check(set_a.par_union(&set_a), 0..3); - - check(set_a.par_difference(&set_b), 0..3); - check(set_b.par_difference(&set_a), 3..6); - check(set_a.par_symmetric_difference(&set_b), 0..6); - check(set_b.par_symmetric_difference(&set_a), (3..6).chain(0..3)); - check(set_a.par_intersection(&set_b), empty()); - check(set_b.par_intersection(&set_a), empty()); - check(set_a.par_union(&set_b), 0..6); - check(set_b.par_union(&set_a), (3..6).chain(0..3)); - - check(set_a.par_difference(&set_c), empty()); - check(set_c.par_difference(&set_a), 3..6); - check(set_a.par_symmetric_difference(&set_c), 3..6); - check(set_c.par_symmetric_difference(&set_a), 3..6); - check(set_a.par_intersection(&set_c), 0..3); - check(set_c.par_intersection(&set_a), 0..3); - check(set_a.par_union(&set_c), 0..6); - check(set_c.par_union(&set_a), 0..6); - - check(set_c.par_difference(&set_d), 0..3); - check(set_d.par_difference(&set_c), (6..9).rev()); - check( - set_c.par_symmetric_difference(&set_d), - (0..3).chain((6..9).rev()), - ); - check( - set_d.par_symmetric_difference(&set_c), - (6..9).rev().chain(0..3), - ); - check(set_c.par_intersection(&set_d), 3..6); - check(set_d.par_intersection(&set_c), (3..6).rev()); - check(set_c.par_union(&set_d), (0..6).chain((6..9).rev())); - check(set_d.par_union(&set_c), (3..9).rev().chain(0..3)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/rustc.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/rustc.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/rustc.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/rustc.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,158 +0,0 @@ -//! Minimal support for `rustc-rayon`, not intended for general use. - -use crate::vec::Vec; -use crate::{Bucket, Entries, IndexMap, IndexSet}; - -use rustc_rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; -use rustc_rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; - -mod map { - use super::*; - - impl IntoParallelIterator for IndexMap - where - K: Send, - V: Send, - { - type Item = (K, V); - type Iter = IntoParIter; - - fn into_par_iter(self) -> Self::Iter { - IntoParIter { - entries: self.into_entries(), - } - } - } - - pub struct IntoParIter { - entries: Vec>, - } - - impl ParallelIterator for IntoParIter { - type Item = (K, V); - - parallel_iterator_methods!(Bucket::key_value); - } - - impl IndexedParallelIterator for IntoParIter { - indexed_parallel_iterator_methods!(Bucket::key_value); - } - - impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap - where - K: Sync, - V: Sync, - { - type Item = (&'a K, &'a V); - type Iter = ParIter<'a, K, V>; - - fn into_par_iter(self) -> Self::Iter { - ParIter { - entries: self.as_entries(), - } - } - } - - pub struct ParIter<'a, K, V> { - entries: &'a [Bucket], - } - - impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> { - type Item = (&'a K, &'a V); - - parallel_iterator_methods!(Bucket::refs); - } - - impl IndexedParallelIterator for ParIter<'_, K, V> { - indexed_parallel_iterator_methods!(Bucket::refs); - } - - impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap - where - K: Sync + Send, - V: Send, - { - type Item = (&'a K, &'a mut V); - type Iter = ParIterMut<'a, K, V>; - - fn into_par_iter(self) -> Self::Iter { - ParIterMut { - entries: self.as_entries_mut(), - } - } - } - - pub struct ParIterMut<'a, K, V> { - entries: &'a mut [Bucket], - } - - impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> { - type Item = (&'a K, &'a mut V); - - parallel_iterator_methods!(Bucket::ref_mut); - } - - impl IndexedParallelIterator for ParIterMut<'_, K, V> { - indexed_parallel_iterator_methods!(Bucket::ref_mut); - } -} - -mod set { - use super::*; - - impl IntoParallelIterator for IndexSet - where - T: Send, - { - type Item = T; - type Iter = IntoParIter; - - fn into_par_iter(self) -> Self::Iter { - IntoParIter { - entries: self.into_entries(), - } - } - } - - pub struct IntoParIter { - entries: Vec>, - } - - impl ParallelIterator for IntoParIter { - type Item = T; - - parallel_iterator_methods!(Bucket::key); - } - - impl IndexedParallelIterator for IntoParIter { - indexed_parallel_iterator_methods!(Bucket::key); - } - - impl<'a, T, S> IntoParallelIterator for &'a IndexSet - where - T: Sync, - { - type Item = &'a T; - type Iter = ParIter<'a, T>; - - fn into_par_iter(self) -> Self::Iter { - ParIter { - entries: self.as_entries(), - } - } - } - - pub struct ParIter<'a, T> { - entries: &'a [Bucket], - } - - impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { - type Item = &'a T; - - parallel_iterator_methods!(Bucket::key_ref); - } - - impl IndexedParallelIterator for ParIter<'_, T> { - indexed_parallel_iterator_methods!(Bucket::key_ref); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/serde.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/serde.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/serde.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/serde.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,155 +0,0 @@ -use serde::de::value::{MapDeserializer, SeqDeserializer}; -use serde::de::{ - Deserialize, Deserializer, Error, IntoDeserializer, MapAccess, SeqAccess, Visitor, -}; -use serde::ser::{Serialize, Serializer}; - -use core::fmt::{self, Formatter}; -use core::hash::{BuildHasher, Hash}; -use core::marker::PhantomData; - -use crate::IndexMap; - -/// Requires crate feature `"serde"` or `"serde-1"` -impl Serialize for IndexMap -where - K: Serialize + Hash + Eq, - V: Serialize, - S: BuildHasher, -{ - fn serialize(&self, serializer: T) -> Result - where - T: Serializer, - { - serializer.collect_map(self) - } -} - -struct IndexMapVisitor(PhantomData<(K, V, S)>); - -impl<'de, K, V, S> Visitor<'de> for IndexMapVisitor -where - K: Deserialize<'de> + Eq + Hash, - V: Deserialize<'de>, - S: Default + BuildHasher, -{ - type Value = IndexMap; - - fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!(formatter, "a map") - } - - fn visit_map(self, mut map: A) -> Result - where - A: MapAccess<'de>, - { - let mut values = - IndexMap::with_capacity_and_hasher(map.size_hint().unwrap_or(0), S::default()); - - while let Some((key, value)) = map.next_entry()? { - values.insert(key, value); - } - - Ok(values) - } -} - -/// Requires crate feature `"serde"` or `"serde-1"` -impl<'de, K, V, S> Deserialize<'de> for IndexMap -where - K: Deserialize<'de> + Eq + Hash, - V: Deserialize<'de>, - S: Default + BuildHasher, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_map(IndexMapVisitor(PhantomData)) - } -} - -impl<'de, K, V, S, E> IntoDeserializer<'de, E> for IndexMap -where - K: IntoDeserializer<'de, E> + Eq + Hash, - V: IntoDeserializer<'de, E>, - S: BuildHasher, - E: Error, -{ - type Deserializer = MapDeserializer<'de, ::IntoIter, E>; - - fn into_deserializer(self) -> Self::Deserializer { - MapDeserializer::new(self.into_iter()) - } -} - -use crate::IndexSet; - -/// Requires crate feature `"serde"` or `"serde-1"` -impl Serialize for IndexSet -where - T: Serialize + Hash + Eq, - S: BuildHasher, -{ - fn serialize(&self, serializer: Se) -> Result - where - Se: Serializer, - { - serializer.collect_seq(self) - } -} - -struct IndexSetVisitor(PhantomData<(T, S)>); - -impl<'de, T, S> Visitor<'de> for IndexSetVisitor -where - T: Deserialize<'de> + Eq + Hash, - S: Default + BuildHasher, -{ - type Value = IndexSet; - - fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!(formatter, "a set") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: SeqAccess<'de>, - { - let mut values = - IndexSet::with_capacity_and_hasher(seq.size_hint().unwrap_or(0), S::default()); - - while let Some(value) = seq.next_element()? { - values.insert(value); - } - - Ok(values) - } -} - -/// Requires crate feature `"serde"` or `"serde-1"` -impl<'de, T, S> Deserialize<'de> for IndexSet -where - T: Deserialize<'de> + Eq + Hash, - S: Default + BuildHasher, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_seq(IndexSetVisitor(PhantomData)) - } -} - -impl<'de, T, S, E> IntoDeserializer<'de, E> for IndexSet -where - T: IntoDeserializer<'de, E> + Eq + Hash, - S: BuildHasher, - E: Error, -{ - type Deserializer = SeqDeserializer<::IntoIter, E>; - - fn into_deserializer(self) -> Self::Deserializer { - SeqDeserializer::new(self.into_iter()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/serde_seq.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/serde_seq.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/serde_seq.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/serde_seq.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,112 +0,0 @@ -//! Functions to serialize and deserialize an `IndexMap` as an ordered sequence. -//! -//! The default `serde` implementation serializes `IndexMap` as a normal map, -//! but there is no guarantee that serialization formats will preserve the order -//! of the key-value pairs. This module serializes `IndexMap` as a sequence of -//! `(key, value)` elements instead, in order. -//! -//! This module may be used in a field attribute for derived implementations: -//! -//! ``` -//! # use indexmap::IndexMap; -//! # use serde_derive::{Deserialize, Serialize}; -//! #[derive(Deserialize, Serialize)] -//! struct Data { -//! #[serde(with = "indexmap::serde_seq")] -//! map: IndexMap, -//! // ... -//! } -//! ``` -//! -//! Requires crate feature `"serde"` or `"serde-1"` - -use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; -use serde::ser::{Serialize, Serializer}; - -use core::fmt::{self, Formatter}; -use core::hash::{BuildHasher, Hash}; -use core::marker::PhantomData; - -use crate::IndexMap; - -/// Serializes an `IndexMap` as an ordered sequence. -/// -/// This function may be used in a field attribute for deriving `Serialize`: -/// -/// ``` -/// # use indexmap::IndexMap; -/// # use serde_derive::Serialize; -/// #[derive(Serialize)] -/// struct Data { -/// #[serde(serialize_with = "indexmap::serde_seq::serialize")] -/// map: IndexMap, -/// // ... -/// } -/// ``` -/// -/// Requires crate feature `"serde"` or `"serde-1"` -pub fn serialize(map: &IndexMap, serializer: T) -> Result -where - K: Serialize + Hash + Eq, - V: Serialize, - S: BuildHasher, - T: Serializer, -{ - serializer.collect_seq(map) -} - -/// Visitor to deserialize a *sequenced* `IndexMap` -struct SeqVisitor(PhantomData<(K, V, S)>); - -impl<'de, K, V, S> Visitor<'de> for SeqVisitor -where - K: Deserialize<'de> + Eq + Hash, - V: Deserialize<'de>, - S: Default + BuildHasher, -{ - type Value = IndexMap; - - fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!(formatter, "a sequenced map") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: SeqAccess<'de>, - { - let capacity = seq.size_hint().unwrap_or(0); - let mut map = IndexMap::with_capacity_and_hasher(capacity, S::default()); - - while let Some((key, value)) = seq.next_element()? { - map.insert(key, value); - } - - Ok(map) - } -} - -/// Deserializes an `IndexMap` from an ordered sequence. -/// -/// This function may be used in a field attribute for deriving `Deserialize`: -/// -/// ``` -/// # use indexmap::IndexMap; -/// # use serde_derive::Deserialize; -/// #[derive(Deserialize)] -/// struct Data { -/// #[serde(deserialize_with = "indexmap::serde_seq::deserialize")] -/// map: IndexMap, -/// // ... -/// } -/// ``` -/// -/// Requires crate feature `"serde"` or `"serde-1"` -pub fn deserialize<'de, D, K, V, S>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - K: Deserialize<'de> + Eq + Hash, - V: Deserialize<'de>, - S: Default + BuildHasher, -{ - deserializer.deserialize_seq(SeqVisitor(PhantomData)) -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/set.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/set.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/set.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/set.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1912 +0,0 @@ -//! A hash set implemented using `IndexMap` - -#[cfg(feature = "rayon")] -pub use crate::rayon::set as rayon; - -#[cfg(has_std)] -use std::collections::hash_map::RandomState; - -use crate::vec::{self, Vec}; -use core::cmp::Ordering; -use core::fmt; -use core::hash::{BuildHasher, Hash}; -use core::iter::{Chain, FusedIterator}; -use core::ops::{BitAnd, BitOr, BitXor, Index, RangeBounds, Sub}; -use core::slice; - -use super::{Entries, Equivalent, IndexMap}; - -type Bucket = super::Bucket; - -/// A hash set where the iteration order of the values is independent of their -/// hash values. -/// -/// The interface is closely compatible with the standard `HashSet`, but also -/// has additional features. -/// -/// # Order -/// -/// The values have a consistent order that is determined by the sequence of -/// insertion and removal calls on the set. The order does not depend on the -/// values or the hash function at all. Note that insertion order and value -/// are not affected if a re-insertion is attempted once an element is -/// already present. -/// -/// All iterators traverse the set *in order*. Set operation iterators like -/// `union` produce a concatenated order, as do their matching "bitwise" -/// operators. See their documentation for specifics. -/// -/// The insertion order is preserved, with **notable exceptions** like the -/// `.remove()` or `.swap_remove()` methods. Methods such as `.sort_by()` of -/// course result in a new order, depending on the sorting order. -/// -/// # Indices -/// -/// The values are indexed in a compact range without holes in the range -/// `0..self.len()`. For example, the method `.get_full` looks up the index for -/// a value, and the method `.get_index` looks up the value by index. -/// -/// # Examples -/// -/// ``` -/// use indexmap::IndexSet; -/// -/// // Collects which letters appear in a sentence. -/// let letters: IndexSet<_> = "a short treatise on fungi".chars().collect(); -/// -/// assert!(letters.contains(&'s')); -/// assert!(letters.contains(&'t')); -/// assert!(letters.contains(&'u')); -/// assert!(!letters.contains(&'y')); -/// ``` -#[cfg(has_std)] -pub struct IndexSet { - pub(crate) map: IndexMap, -} -#[cfg(not(has_std))] -pub struct IndexSet { - pub(crate) map: IndexMap, -} - -impl Clone for IndexSet -where - T: Clone, - S: Clone, -{ - fn clone(&self) -> Self { - IndexSet { - map: self.map.clone(), - } - } - - fn clone_from(&mut self, other: &Self) { - self.map.clone_from(&other.map); - } -} - -impl Entries for IndexSet { - type Entry = Bucket; - - #[inline] - fn into_entries(self) -> Vec { - self.map.into_entries() - } - - #[inline] - fn as_entries(&self) -> &[Self::Entry] { - self.map.as_entries() - } - - #[inline] - fn as_entries_mut(&mut self) -> &mut [Self::Entry] { - self.map.as_entries_mut() - } - - fn with_entries(&mut self, f: F) - where - F: FnOnce(&mut [Self::Entry]), - { - self.map.with_entries(f); - } -} - -impl fmt::Debug for IndexSet -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if cfg!(not(feature = "test_debug")) { - f.debug_set().entries(self.iter()).finish() - } else { - // Let the inner `IndexMap` print all of its details - f.debug_struct("IndexSet").field("map", &self.map).finish() - } - } -} - -#[cfg(has_std)] -impl IndexSet { - /// Create a new set. (Does not allocate.) - pub fn new() -> Self { - IndexSet { - map: IndexMap::new(), - } - } - - /// Create a new set with capacity for `n` elements. - /// (Does not allocate if `n` is zero.) - /// - /// Computes in **O(n)** time. - pub fn with_capacity(n: usize) -> Self { - IndexSet { - map: IndexMap::with_capacity(n), - } - } -} - -impl IndexSet { - /// Create a new set with capacity for `n` elements. - /// (Does not allocate if `n` is zero.) - /// - /// Computes in **O(n)** time. - pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self { - IndexSet { - map: IndexMap::with_capacity_and_hasher(n, hash_builder), - } - } - - /// Create a new set with `hash_builder`. - /// - /// This function is `const`, so it - /// can be called in `static` contexts. - pub const fn with_hasher(hash_builder: S) -> Self { - IndexSet { - map: IndexMap::with_hasher(hash_builder), - } - } - - /// Computes in **O(1)** time. - pub fn capacity(&self) -> usize { - self.map.capacity() - } - - /// Return a reference to the set's `BuildHasher`. - pub fn hasher(&self) -> &S { - self.map.hasher() - } - - /// Return the number of elements in the set. - /// - /// Computes in **O(1)** time. - pub fn len(&self) -> usize { - self.map.len() - } - - /// Returns true if the set contains no elements. - /// - /// Computes in **O(1)** time. - pub fn is_empty(&self) -> bool { - self.map.is_empty() - } - - /// Return an iterator over the values of the set, in their order - pub fn iter(&self) -> Iter<'_, T> { - Iter { - iter: self.map.as_entries().iter(), - } - } - - /// Remove all elements in the set, while preserving its capacity. - /// - /// Computes in **O(n)** time. - pub fn clear(&mut self) { - self.map.clear(); - } - - /// Shortens the set, keeping the first `len` elements and dropping the rest. - /// - /// If `len` is greater than the set's current length, this has no effect. - pub fn truncate(&mut self, len: usize) { - self.map.truncate(len); - } - - /// Clears the `IndexSet` in the given index range, returning those values - /// as a drain iterator. - /// - /// The range may be any type that implements `RangeBounds`, - /// including all of the `std::ops::Range*` types, or even a tuple pair of - /// `Bound` start and end values. To drain the set entirely, use `RangeFull` - /// like `set.drain(..)`. - /// - /// This shifts down all entries following the drained range to fill the - /// gap, and keeps the allocated memory for reuse. - /// - /// ***Panics*** if the starting point is greater than the end point or if - /// the end point is greater than the length of the set. - pub fn drain(&mut self, range: R) -> Drain<'_, T> - where - R: RangeBounds, - { - Drain { - iter: self.map.drain(range).iter, - } - } - - /// Splits the collection into two at the given index. - /// - /// Returns a newly allocated set containing the elements in the range - /// `[at, len)`. After the call, the original set will be left containing - /// the elements `[0, at)` with its previous capacity unchanged. - /// - /// ***Panics*** if `at > len`. - pub fn split_off(&mut self, at: usize) -> Self - where - S: Clone, - { - Self { - map: self.map.split_off(at), - } - } -} - -impl IndexSet -where - T: Hash + Eq, - S: BuildHasher, -{ - /// Reserve capacity for `additional` more values. - /// - /// Computes in **O(n)** time. - pub fn reserve(&mut self, additional: usize) { - self.map.reserve(additional); - } - - /// Shrink the capacity of the set as much as possible. - /// - /// Computes in **O(n)** time. - pub fn shrink_to_fit(&mut self) { - self.map.shrink_to_fit(); - } - - /// Shrink the capacity of the set with a lower limit. - /// - /// Computes in **O(n)** time. - pub fn shrink_to(&mut self, min_capacity: usize) { - self.map.shrink_to(min_capacity); - } - - /// Insert the value into the set. - /// - /// If an equivalent item already exists in the set, it returns - /// `false` leaving the original value in the set and without - /// altering its insertion order. Otherwise, it inserts the new - /// item and returns `true`. - /// - /// Computes in **O(1)** time (amortized average). - pub fn insert(&mut self, value: T) -> bool { - self.map.insert(value, ()).is_none() - } - - /// Insert the value into the set, and get its index. - /// - /// If an equivalent item already exists in the set, it returns - /// the index of the existing item and `false`, leaving the - /// original value in the set and without altering its insertion - /// order. Otherwise, it inserts the new item and returns the index - /// of the inserted item and `true`. - /// - /// Computes in **O(1)** time (amortized average). - pub fn insert_full(&mut self, value: T) -> (usize, bool) { - use super::map::Entry::*; - - match self.map.entry(value) { - Occupied(e) => (e.index(), false), - Vacant(e) => { - let index = e.index(); - e.insert(()); - (index, true) - } - } - } - - /// Return an iterator over the values that are in `self` but not `other`. - /// - /// Values are produced in the same order that they appear in `self`. - pub fn difference<'a, S2>(&'a self, other: &'a IndexSet) -> Difference<'a, T, S2> - where - S2: BuildHasher, - { - Difference { - iter: self.iter(), - other, - } - } - - /// Return an iterator over the values that are in `self` or `other`, - /// but not in both. - /// - /// Values from `self` are produced in their original order, followed by - /// values from `other` in their original order. - pub fn symmetric_difference<'a, S2>( - &'a self, - other: &'a IndexSet, - ) -> SymmetricDifference<'a, T, S, S2> - where - S2: BuildHasher, - { - SymmetricDifference { - iter: self.difference(other).chain(other.difference(self)), - } - } - - /// Return an iterator over the values that are in both `self` and `other`. - /// - /// Values are produced in the same order that they appear in `self`. - pub fn intersection<'a, S2>(&'a self, other: &'a IndexSet) -> Intersection<'a, T, S2> - where - S2: BuildHasher, - { - Intersection { - iter: self.iter(), - other, - } - } - - /// Return an iterator over all values that are in `self` or `other`. - /// - /// Values from `self` are produced in their original order, followed by - /// values that are unique to `other` in their original order. - pub fn union<'a, S2>(&'a self, other: &'a IndexSet) -> Union<'a, T, S> - where - S2: BuildHasher, - { - Union { - iter: self.iter().chain(other.difference(self)), - } - } - - /// Return `true` if an equivalent to `value` exists in the set. - /// - /// Computes in **O(1)** time (average). - pub fn contains(&self, value: &Q) -> bool - where - Q: Hash + Equivalent, - { - self.map.contains_key(value) - } - - /// Return a reference to the value stored in the set, if it is present, - /// else `None`. - /// - /// Computes in **O(1)** time (average). - pub fn get(&self, value: &Q) -> Option<&T> - where - Q: Hash + Equivalent, - { - self.map.get_key_value(value).map(|(x, &())| x) - } - - /// Return item index and value - pub fn get_full(&self, value: &Q) -> Option<(usize, &T)> - where - Q: Hash + Equivalent, - { - self.map.get_full(value).map(|(i, x, &())| (i, x)) - } - - /// Return item index, if it exists in the set - pub fn get_index_of(&self, value: &Q) -> Option - where - Q: Hash + Equivalent, - { - self.map.get_index_of(value) - } - - /// Adds a value to the set, replacing the existing value, if any, that is - /// equal to the given one, without altering its insertion order. Returns - /// the replaced value. - /// - /// Computes in **O(1)** time (average). - pub fn replace(&mut self, value: T) -> Option { - self.replace_full(value).1 - } - - /// Adds a value to the set, replacing the existing value, if any, that is - /// equal to the given one, without altering its insertion order. Returns - /// the index of the item and its replaced value. - /// - /// Computes in **O(1)** time (average). - pub fn replace_full(&mut self, value: T) -> (usize, Option) { - use super::map::Entry::*; - - match self.map.entry(value) { - Vacant(e) => { - let index = e.index(); - e.insert(()); - (index, None) - } - Occupied(e) => (e.index(), Some(e.replace_key())), - } - } - - /// Remove the value from the set, and return `true` if it was present. - /// - /// **NOTE:** This is equivalent to `.swap_remove(value)`, if you want - /// to preserve the order of the values in the set, use `.shift_remove(value)`. - /// - /// Computes in **O(1)** time (average). - pub fn remove(&mut self, value: &Q) -> bool - where - Q: Hash + Equivalent, - { - self.swap_remove(value) - } - - /// Remove the value from the set, and return `true` if it was present. - /// - /// Like `Vec::swap_remove`, the value is removed by swapping it with the - /// last element of the set and popping it off. **This perturbs - /// the position of what used to be the last element!** - /// - /// Return `false` if `value` was not in the set. - /// - /// Computes in **O(1)** time (average). - pub fn swap_remove(&mut self, value: &Q) -> bool - where - Q: Hash + Equivalent, - { - self.map.swap_remove(value).is_some() - } - - /// Remove the value from the set, and return `true` if it was present. - /// - /// Like `Vec::remove`, the value is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Return `false` if `value` was not in the set. - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove(&mut self, value: &Q) -> bool - where - Q: Hash + Equivalent, - { - self.map.shift_remove(value).is_some() - } - - /// Removes and returns the value in the set, if any, that is equal to the - /// given one. - /// - /// **NOTE:** This is equivalent to `.swap_take(value)`, if you need to - /// preserve the order of the values in the set, use `.shift_take(value)` - /// instead. - /// - /// Computes in **O(1)** time (average). - pub fn take(&mut self, value: &Q) -> Option - where - Q: Hash + Equivalent, - { - self.swap_take(value) - } - - /// Removes and returns the value in the set, if any, that is equal to the - /// given one. - /// - /// Like `Vec::swap_remove`, the value is removed by swapping it with the - /// last element of the set and popping it off. **This perturbs - /// the position of what used to be the last element!** - /// - /// Return `None` if `value` was not in the set. - /// - /// Computes in **O(1)** time (average). - pub fn swap_take(&mut self, value: &Q) -> Option - where - Q: Hash + Equivalent, - { - self.map.swap_remove_entry(value).map(|(x, ())| x) - } - - /// Removes and returns the value in the set, if any, that is equal to the - /// given one. - /// - /// Like `Vec::remove`, the value is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Return `None` if `value` was not in the set. - /// - /// Computes in **O(n)** time (average). - pub fn shift_take(&mut self, value: &Q) -> Option - where - Q: Hash + Equivalent, - { - self.map.shift_remove_entry(value).map(|(x, ())| x) - } - - /// Remove the value from the set return it and the index it had. - /// - /// Like `Vec::swap_remove`, the value is removed by swapping it with the - /// last element of the set and popping it off. **This perturbs - /// the position of what used to be the last element!** - /// - /// Return `None` if `value` was not in the set. - pub fn swap_remove_full(&mut self, value: &Q) -> Option<(usize, T)> - where - Q: Hash + Equivalent, - { - self.map.swap_remove_full(value).map(|(i, x, ())| (i, x)) - } - - /// Remove the value from the set return it and the index it had. - /// - /// Like `Vec::remove`, the value is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Return `None` if `value` was not in the set. - pub fn shift_remove_full(&mut self, value: &Q) -> Option<(usize, T)> - where - Q: Hash + Equivalent, - { - self.map.shift_remove_full(value).map(|(i, x, ())| (i, x)) - } - - /// Remove the last value - /// - /// This preserves the order of the remaining elements. - /// - /// Computes in **O(1)** time (average). - pub fn pop(&mut self) -> Option { - self.map.pop().map(|(x, ())| x) - } - - /// Scan through each value in the set and keep those where the - /// closure `keep` returns `true`. - /// - /// The elements are visited in order, and remaining elements keep their - /// order. - /// - /// Computes in **O(n)** time (average). - pub fn retain(&mut self, mut keep: F) - where - F: FnMut(&T) -> bool, - { - self.map.retain(move |x, &mut ()| keep(x)) - } - - /// Sort the set’s values by their default ordering. - /// - /// See [`sort_by`](Self::sort_by) for details. - pub fn sort(&mut self) - where - T: Ord, - { - self.map.sort_keys() - } - - /// Sort the set’s values in place using the comparison function `cmp`. - /// - /// Computes in **O(n log n)** time and **O(n)** space. The sort is stable. - pub fn sort_by(&mut self, mut cmp: F) - where - F: FnMut(&T, &T) -> Ordering, - { - self.map.sort_by(move |a, _, b, _| cmp(a, b)); - } - - /// Sort the values of the set and return a by-value iterator of - /// the values with the result. - /// - /// The sort is stable. - pub fn sorted_by(self, mut cmp: F) -> IntoIter - where - F: FnMut(&T, &T) -> Ordering, - { - let mut entries = self.into_entries(); - entries.sort_by(move |a, b| cmp(&a.key, &b.key)); - IntoIter { - iter: entries.into_iter(), - } - } - - /// Sort the set's values by their default ordering. - /// - /// See [`sort_unstable_by`](Self::sort_unstable_by) for details. - pub fn sort_unstable(&mut self) - where - T: Ord, - { - self.map.sort_unstable_keys() - } - - /// Sort the set's values in place using the comparison funtion `cmp`. - /// - /// Computes in **O(n log n)** time. The sort is unstable. - pub fn sort_unstable_by(&mut self, mut cmp: F) - where - F: FnMut(&T, &T) -> Ordering, - { - self.map.sort_unstable_by(move |a, _, b, _| cmp(a, b)) - } - - /// Sort the values of the set and return a by-value iterator of - /// the values with the result. - pub fn sorted_unstable_by(self, mut cmp: F) -> IntoIter - where - F: FnMut(&T, &T) -> Ordering, - { - let mut entries = self.into_entries(); - entries.sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); - IntoIter { - iter: entries.into_iter(), - } - } - - /// Reverses the order of the set’s values in place. - /// - /// Computes in **O(n)** time and **O(1)** space. - pub fn reverse(&mut self) { - self.map.reverse() - } -} - -impl IndexSet { - /// Get a value by index - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Computes in **O(1)** time. - pub fn get_index(&self, index: usize) -> Option<&T> { - self.as_entries().get(index).map(Bucket::key_ref) - } - - /// Get the first value - /// - /// Computes in **O(1)** time. - pub fn first(&self) -> Option<&T> { - self.as_entries().first().map(Bucket::key_ref) - } - - /// Get the last value - /// - /// Computes in **O(1)** time. - pub fn last(&self) -> Option<&T> { - self.as_entries().last().map(Bucket::key_ref) - } - - /// Remove the value by index - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Like `Vec::swap_remove`, the value is removed by swapping it with the - /// last element of the set and popping it off. **This perturbs - /// the position of what used to be the last element!** - /// - /// Computes in **O(1)** time (average). - pub fn swap_remove_index(&mut self, index: usize) -> Option { - self.map.swap_remove_index(index).map(|(x, ())| x) - } - - /// Remove the value by index - /// - /// Valid indices are *0 <= index < self.len()* - /// - /// Like `Vec::remove`, the value is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove_index(&mut self, index: usize) -> Option { - self.map.shift_remove_index(index).map(|(x, ())| x) - } - - /// Moves the position of a value from one index to another - /// by shifting all other values in-between. - /// - /// * If `from < to`, the other values will shift down while the targeted value moves up. - /// * If `from > to`, the other values will shift up while the targeted value moves down. - /// - /// ***Panics*** if `from` or `to` are out of bounds. - /// - /// Computes in **O(n)** time (average). - pub fn move_index(&mut self, from: usize, to: usize) { - self.map.move_index(from, to) - } - - /// Swaps the position of two values in the set. - /// - /// ***Panics*** if `a` or `b` are out of bounds. - pub fn swap_indices(&mut self, a: usize, b: usize) { - self.map.swap_indices(a, b) - } -} - -/// Access `IndexSet` values at indexed positions. -/// -/// # Examples -/// -/// ``` -/// use indexmap::IndexSet; -/// -/// let mut set = IndexSet::new(); -/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { -/// set.insert(word.to_string()); -/// } -/// assert_eq!(set[0], "Lorem"); -/// assert_eq!(set[1], "ipsum"); -/// set.reverse(); -/// assert_eq!(set[0], "amet"); -/// assert_eq!(set[1], "sit"); -/// set.sort(); -/// assert_eq!(set[0], "Lorem"); -/// assert_eq!(set[1], "amet"); -/// ``` -/// -/// ```should_panic -/// use indexmap::IndexSet; -/// -/// let mut set = IndexSet::new(); -/// set.insert("foo"); -/// println!("{:?}", set[10]); // panics! -/// ``` -impl Index for IndexSet { - type Output = T; - - /// Returns a reference to the value at the supplied `index`. - /// - /// ***Panics*** if `index` is out of bounds. - fn index(&self, index: usize) -> &T { - self.get_index(index) - .expect("IndexSet: index out of bounds") - } -} - -/// An owning iterator over the items of a `IndexSet`. -/// -/// This `struct` is created by the [`into_iter`] method on [`IndexSet`] -/// (provided by the `IntoIterator` trait). See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`into_iter`]: struct.IndexSet.html#method.into_iter -pub struct IntoIter { - iter: vec::IntoIter>, -} - -impl Iterator for IntoIter { - type Item = T; - - iterator_methods!(Bucket::key); -} - -impl DoubleEndedIterator for IntoIter { - double_ended_iterator_methods!(Bucket::key); -} - -impl ExactSizeIterator for IntoIter { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for IntoIter {} - -impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::key_ref); - f.debug_list().entries(iter).finish() - } -} - -/// An iterator over the items of a `IndexSet`. -/// -/// This `struct` is created by the [`iter`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`iter`]: struct.IndexSet.html#method.iter -pub struct Iter<'a, T> { - iter: slice::Iter<'a, Bucket>, -} - -impl<'a, T> Iterator for Iter<'a, T> { - type Item = &'a T; - - iterator_methods!(Bucket::key_ref); -} - -impl DoubleEndedIterator for Iter<'_, T> { - double_ended_iterator_methods!(Bucket::key_ref); -} - -impl ExactSizeIterator for Iter<'_, T> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Iter<'_, T> {} - -impl Clone for Iter<'_, T> { - fn clone(&self) -> Self { - Iter { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for Iter<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// A draining iterator over the items of a `IndexSet`. -/// -/// This `struct` is created by the [`drain`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`drain`]: struct.IndexSet.html#method.drain -pub struct Drain<'a, T> { - iter: vec::Drain<'a, Bucket>, -} - -impl Iterator for Drain<'_, T> { - type Item = T; - - iterator_methods!(Bucket::key); -} - -impl DoubleEndedIterator for Drain<'_, T> { - double_ended_iterator_methods!(Bucket::key); -} - -impl ExactSizeIterator for Drain<'_, T> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Drain<'_, T> {} - -impl fmt::Debug for Drain<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::key_ref); - f.debug_list().entries(iter).finish() - } -} - -impl<'a, T, S> IntoIterator for &'a IndexSet { - type Item = &'a T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for IndexSet { - type Item = T; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter { - iter: self.into_entries().into_iter(), - } - } -} - -impl FromIterator for IndexSet -where - T: Hash + Eq, - S: BuildHasher + Default, -{ - fn from_iter>(iterable: I) -> Self { - let iter = iterable.into_iter().map(|x| (x, ())); - IndexSet { - map: IndexMap::from_iter(iter), - } - } -} - -#[cfg(has_std)] -impl From<[T; N]> for IndexSet -where - T: Eq + Hash, -{ - /// # Examples - /// - /// ``` - /// use indexmap::IndexSet; - /// - /// let set1 = IndexSet::from([1, 2, 3, 4]); - /// let set2: IndexSet<_> = [1, 2, 3, 4].into(); - /// assert_eq!(set1, set2); - /// ``` - fn from(arr: [T; N]) -> Self { - Self::from_iter(arr) - } -} - -impl Extend for IndexSet -where - T: Hash + Eq, - S: BuildHasher, -{ - fn extend>(&mut self, iterable: I) { - let iter = iterable.into_iter().map(|x| (x, ())); - self.map.extend(iter); - } -} - -impl<'a, T, S> Extend<&'a T> for IndexSet -where - T: Hash + Eq + Copy + 'a, - S: BuildHasher, -{ - fn extend>(&mut self, iterable: I) { - let iter = iterable.into_iter().copied(); - self.extend(iter); - } -} - -impl Default for IndexSet -where - S: Default, -{ - /// Return an empty `IndexSet` - fn default() -> Self { - IndexSet { - map: IndexMap::default(), - } - } -} - -impl PartialEq> for IndexSet -where - T: Hash + Eq, - S1: BuildHasher, - S2: BuildHasher, -{ - fn eq(&self, other: &IndexSet) -> bool { - self.len() == other.len() && self.is_subset(other) - } -} - -impl Eq for IndexSet -where - T: Eq + Hash, - S: BuildHasher, -{ -} - -impl IndexSet -where - T: Eq + Hash, - S: BuildHasher, -{ - /// Returns `true` if `self` has no elements in common with `other`. - pub fn is_disjoint(&self, other: &IndexSet) -> bool - where - S2: BuildHasher, - { - if self.len() <= other.len() { - self.iter().all(move |value| !other.contains(value)) - } else { - other.iter().all(move |value| !self.contains(value)) - } - } - - /// Returns `true` if all elements of `self` are contained in `other`. - pub fn is_subset(&self, other: &IndexSet) -> bool - where - S2: BuildHasher, - { - self.len() <= other.len() && self.iter().all(move |value| other.contains(value)) - } - - /// Returns `true` if all elements of `other` are contained in `self`. - pub fn is_superset(&self, other: &IndexSet) -> bool - where - S2: BuildHasher, - { - other.is_subset(self) - } -} - -/// A lazy iterator producing elements in the difference of `IndexSet`s. -/// -/// This `struct` is created by the [`difference`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`difference`]: struct.IndexSet.html#method.difference -pub struct Difference<'a, T, S> { - iter: Iter<'a, T>, - other: &'a IndexSet, -} - -impl<'a, T, S> Iterator for Difference<'a, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - while let Some(item) = self.iter.next() { - if !self.other.contains(item) { - return Some(item); - } - } - None - } - - fn size_hint(&self) -> (usize, Option) { - (0, self.iter.size_hint().1) - } -} - -impl DoubleEndedIterator for Difference<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - fn next_back(&mut self) -> Option { - while let Some(item) = self.iter.next_back() { - if !self.other.contains(item) { - return Some(item); - } - } - None - } -} - -impl FusedIterator for Difference<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ -} - -impl Clone for Difference<'_, T, S> { - fn clone(&self) -> Self { - Difference { - iter: self.iter.clone(), - ..*self - } - } -} - -impl fmt::Debug for Difference<'_, T, S> -where - T: fmt::Debug + Eq + Hash, - S: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// A lazy iterator producing elements in the intersection of `IndexSet`s. -/// -/// This `struct` is created by the [`intersection`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`intersection`]: struct.IndexSet.html#method.intersection -pub struct Intersection<'a, T, S> { - iter: Iter<'a, T>, - other: &'a IndexSet, -} - -impl<'a, T, S> Iterator for Intersection<'a, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - while let Some(item) = self.iter.next() { - if self.other.contains(item) { - return Some(item); - } - } - None - } - - fn size_hint(&self) -> (usize, Option) { - (0, self.iter.size_hint().1) - } -} - -impl DoubleEndedIterator for Intersection<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - fn next_back(&mut self) -> Option { - while let Some(item) = self.iter.next_back() { - if self.other.contains(item) { - return Some(item); - } - } - None - } -} - -impl FusedIterator for Intersection<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ -} - -impl Clone for Intersection<'_, T, S> { - fn clone(&self) -> Self { - Intersection { - iter: self.iter.clone(), - ..*self - } - } -} - -impl fmt::Debug for Intersection<'_, T, S> -where - T: fmt::Debug + Eq + Hash, - S: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// A lazy iterator producing elements in the symmetric difference of `IndexSet`s. -/// -/// This `struct` is created by the [`symmetric_difference`] method on -/// [`IndexSet`]. See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`symmetric_difference`]: struct.IndexSet.html#method.symmetric_difference -pub struct SymmetricDifference<'a, T, S1, S2> { - iter: Chain, Difference<'a, T, S1>>, -} - -impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2> -where - T: Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - self.iter.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn fold(self, init: B, f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, f) - } -} - -impl DoubleEndedIterator for SymmetricDifference<'_, T, S1, S2> -where - T: Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ - fn next_back(&mut self) -> Option { - self.iter.next_back() - } - - fn rfold(self, init: B, f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - self.iter.rfold(init, f) - } -} - -impl FusedIterator for SymmetricDifference<'_, T, S1, S2> -where - T: Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ -} - -impl Clone for SymmetricDifference<'_, T, S1, S2> { - fn clone(&self) -> Self { - SymmetricDifference { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for SymmetricDifference<'_, T, S1, S2> -where - T: fmt::Debug + Eq + Hash, - S1: BuildHasher, - S2: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -/// A lazy iterator producing elements in the union of `IndexSet`s. -/// -/// This `struct` is created by the [`union`] method on [`IndexSet`]. -/// See its documentation for more. -/// -/// [`IndexSet`]: struct.IndexSet.html -/// [`union`]: struct.IndexSet.html#method.union -pub struct Union<'a, T, S> { - iter: Chain, Difference<'a, T, S>>, -} - -impl<'a, T, S> Iterator for Union<'a, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - self.iter.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn fold(self, init: B, f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - self.iter.fold(init, f) - } -} - -impl DoubleEndedIterator for Union<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ - fn next_back(&mut self) -> Option { - self.iter.next_back() - } - - fn rfold(self, init: B, f: F) -> B - where - F: FnMut(B, Self::Item) -> B, - { - self.iter.rfold(init, f) - } -} - -impl FusedIterator for Union<'_, T, S> -where - T: Eq + Hash, - S: BuildHasher, -{ -} - -impl Clone for Union<'_, T, S> { - fn clone(&self) -> Self { - Union { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for Union<'_, T, S> -where - T: fmt::Debug + Eq + Hash, - S: BuildHasher, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -impl BitAnd<&IndexSet> for &IndexSet -where - T: Eq + Hash + Clone, - S1: BuildHasher + Default, - S2: BuildHasher, -{ - type Output = IndexSet; - - /// Returns the set intersection, cloned into a new set. - /// - /// Values are collected in the same order that they appear in `self`. - fn bitand(self, other: &IndexSet) -> Self::Output { - self.intersection(other).cloned().collect() - } -} - -impl BitOr<&IndexSet> for &IndexSet -where - T: Eq + Hash + Clone, - S1: BuildHasher + Default, - S2: BuildHasher, -{ - type Output = IndexSet; - - /// Returns the set union, cloned into a new set. - /// - /// Values from `self` are collected in their original order, followed by - /// values that are unique to `other` in their original order. - fn bitor(self, other: &IndexSet) -> Self::Output { - self.union(other).cloned().collect() - } -} - -impl BitXor<&IndexSet> for &IndexSet -where - T: Eq + Hash + Clone, - S1: BuildHasher + Default, - S2: BuildHasher, -{ - type Output = IndexSet; - - /// Returns the set symmetric-difference, cloned into a new set. - /// - /// Values from `self` are collected in their original order, followed by - /// values from `other` in their original order. - fn bitxor(self, other: &IndexSet) -> Self::Output { - self.symmetric_difference(other).cloned().collect() - } -} - -impl Sub<&IndexSet> for &IndexSet -where - T: Eq + Hash + Clone, - S1: BuildHasher + Default, - S2: BuildHasher, -{ - type Output = IndexSet; - - /// Returns the set difference, cloned into a new set. - /// - /// Values are collected in the same order that they appear in `self`. - fn sub(self, other: &IndexSet) -> Self::Output { - self.difference(other).cloned().collect() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::string::String; - - #[test] - fn it_works() { - let mut set = IndexSet::new(); - assert_eq!(set.is_empty(), true); - set.insert(1); - set.insert(1); - assert_eq!(set.len(), 1); - assert!(set.get(&1).is_some()); - assert_eq!(set.is_empty(), false); - } - - #[test] - fn new() { - let set = IndexSet::::new(); - println!("{:?}", set); - assert_eq!(set.capacity(), 0); - assert_eq!(set.len(), 0); - assert_eq!(set.is_empty(), true); - } - - #[test] - fn insert() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5]; - let not_present = [1, 3, 6, 9, 10]; - let mut set = IndexSet::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(set.len(), i); - set.insert(elt); - assert_eq!(set.len(), i + 1); - assert_eq!(set.get(&elt), Some(&elt)); - } - println!("{:?}", set); - - for &elt in ¬_present { - assert!(set.get(&elt).is_none()); - } - } - - #[test] - fn insert_full() { - let insert = vec![9, 2, 7, 1, 4, 6, 13]; - let present = vec![1, 6, 2]; - let mut set = IndexSet::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(set.len(), i); - let (index, success) = set.insert_full(elt); - assert!(success); - assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); - assert_eq!(set.len(), i + 1); - } - - let len = set.len(); - for &elt in &present { - let (index, success) = set.insert_full(elt); - assert!(!success); - assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); - assert_eq!(set.len(), len); - } - } - - #[test] - fn insert_2() { - let mut set = IndexSet::with_capacity(16); - - let mut values = vec![]; - values.extend(0..16); - values.extend(if cfg!(miri) { 32..64 } else { 128..267 }); - - for &i in &values { - let old_set = set.clone(); - set.insert(i); - for value in old_set.iter() { - if set.get(value).is_none() { - println!("old_set: {:?}", old_set); - println!("set: {:?}", set); - panic!("did not find {} in set", value); - } - } - } - - for &i in &values { - assert!(set.get(&i).is_some(), "did not find {}", i); - } - } - - #[test] - fn insert_dup() { - let mut elements = vec![0, 2, 4, 6, 8]; - let mut set: IndexSet = elements.drain(..).collect(); - { - let (i, v) = set.get_full(&0).unwrap(); - assert_eq!(set.len(), 5); - assert_eq!(i, 0); - assert_eq!(*v, 0); - } - { - let inserted = set.insert(0); - let (i, v) = set.get_full(&0).unwrap(); - assert_eq!(set.len(), 5); - assert_eq!(inserted, false); - assert_eq!(i, 0); - assert_eq!(*v, 0); - } - } - - #[test] - fn insert_order() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut set = IndexSet::new(); - - for &elt in &insert { - set.insert(elt); - } - - assert_eq!(set.iter().count(), set.len()); - assert_eq!(set.iter().count(), insert.len()); - for (a, b) in insert.iter().zip(set.iter()) { - assert_eq!(a, b); - } - for (i, v) in (0..insert.len()).zip(set.iter()) { - assert_eq!(set.get_index(i).unwrap(), v); - } - } - - #[test] - fn replace() { - let replace = [0, 4, 2, 12, 8, 7, 11, 5]; - let not_present = [1, 3, 6, 9, 10]; - let mut set = IndexSet::with_capacity(replace.len()); - - for (i, &elt) in replace.iter().enumerate() { - assert_eq!(set.len(), i); - set.replace(elt); - assert_eq!(set.len(), i + 1); - assert_eq!(set.get(&elt), Some(&elt)); - } - println!("{:?}", set); - - for &elt in ¬_present { - assert!(set.get(&elt).is_none()); - } - } - - #[test] - fn replace_full() { - let replace = vec![9, 2, 7, 1, 4, 6, 13]; - let present = vec![1, 6, 2]; - let mut set = IndexSet::with_capacity(replace.len()); - - for (i, &elt) in replace.iter().enumerate() { - assert_eq!(set.len(), i); - let (index, replaced) = set.replace_full(elt); - assert!(replaced.is_none()); - assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); - assert_eq!(set.len(), i + 1); - } - - let len = set.len(); - for &elt in &present { - let (index, replaced) = set.replace_full(elt); - assert_eq!(Some(elt), replaced); - assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); - assert_eq!(set.len(), len); - } - } - - #[test] - fn replace_2() { - let mut set = IndexSet::with_capacity(16); - - let mut values = vec![]; - values.extend(0..16); - values.extend(if cfg!(miri) { 32..64 } else { 128..267 }); - - for &i in &values { - let old_set = set.clone(); - set.replace(i); - for value in old_set.iter() { - if set.get(value).is_none() { - println!("old_set: {:?}", old_set); - println!("set: {:?}", set); - panic!("did not find {} in set", value); - } - } - } - - for &i in &values { - assert!(set.get(&i).is_some(), "did not find {}", i); - } - } - - #[test] - fn replace_dup() { - let mut elements = vec![0, 2, 4, 6, 8]; - let mut set: IndexSet = elements.drain(..).collect(); - { - let (i, v) = set.get_full(&0).unwrap(); - assert_eq!(set.len(), 5); - assert_eq!(i, 0); - assert_eq!(*v, 0); - } - { - let replaced = set.replace(0); - let (i, v) = set.get_full(&0).unwrap(); - assert_eq!(set.len(), 5); - assert_eq!(replaced, Some(0)); - assert_eq!(i, 0); - assert_eq!(*v, 0); - } - } - - #[test] - fn replace_order() { - let replace = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut set = IndexSet::new(); - - for &elt in &replace { - set.replace(elt); - } - - assert_eq!(set.iter().count(), set.len()); - assert_eq!(set.iter().count(), replace.len()); - for (a, b) in replace.iter().zip(set.iter()) { - assert_eq!(a, b); - } - for (i, v) in (0..replace.len()).zip(set.iter()) { - assert_eq!(set.get_index(i).unwrap(), v); - } - } - - #[test] - fn grow() { - let insert = [0, 4, 2, 12, 8, 7, 11]; - let not_present = [1, 3, 6, 9, 10]; - let mut set = IndexSet::with_capacity(insert.len()); - - for (i, &elt) in insert.iter().enumerate() { - assert_eq!(set.len(), i); - set.insert(elt); - assert_eq!(set.len(), i + 1); - assert_eq!(set.get(&elt), Some(&elt)); - } - - println!("{:?}", set); - for &elt in &insert { - set.insert(elt * 10); - } - for &elt in &insert { - set.insert(elt * 100); - } - for (i, &elt) in insert.iter().cycle().enumerate().take(100) { - set.insert(elt * 100 + i as i32); - } - println!("{:?}", set); - for &elt in ¬_present { - assert!(set.get(&elt).is_none()); - } - } - - #[test] - fn reserve() { - let mut set = IndexSet::::new(); - assert_eq!(set.capacity(), 0); - set.reserve(100); - let capacity = set.capacity(); - assert!(capacity >= 100); - for i in 0..capacity { - assert_eq!(set.len(), i); - set.insert(i); - assert_eq!(set.len(), i + 1); - assert_eq!(set.capacity(), capacity); - assert_eq!(set.get(&i), Some(&i)); - } - set.insert(capacity); - assert_eq!(set.len(), capacity + 1); - assert!(set.capacity() > capacity); - assert_eq!(set.get(&capacity), Some(&capacity)); - } - - #[test] - fn shrink_to_fit() { - let mut set = IndexSet::::new(); - assert_eq!(set.capacity(), 0); - for i in 0..100 { - assert_eq!(set.len(), i); - set.insert(i); - assert_eq!(set.len(), i + 1); - assert!(set.capacity() >= i + 1); - assert_eq!(set.get(&i), Some(&i)); - set.shrink_to_fit(); - assert_eq!(set.len(), i + 1); - assert_eq!(set.capacity(), i + 1); - assert_eq!(set.get(&i), Some(&i)); - } - } - - #[test] - fn remove() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut set = IndexSet::new(); - - for &elt in &insert { - set.insert(elt); - } - - assert_eq!(set.iter().count(), set.len()); - assert_eq!(set.iter().count(), insert.len()); - for (a, b) in insert.iter().zip(set.iter()) { - assert_eq!(a, b); - } - - let remove_fail = [99, 77]; - let remove = [4, 12, 8, 7]; - - for &value in &remove_fail { - assert!(set.swap_remove_full(&value).is_none()); - } - println!("{:?}", set); - for &value in &remove { - //println!("{:?}", set); - let index = set.get_full(&value).unwrap().0; - assert_eq!(set.swap_remove_full(&value), Some((index, value))); - } - println!("{:?}", set); - - for value in &insert { - assert_eq!(set.get(value).is_some(), !remove.contains(value)); - } - assert_eq!(set.len(), insert.len() - remove.len()); - assert_eq!(set.iter().count(), insert.len() - remove.len()); - } - - #[test] - fn swap_remove_index() { - let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; - let mut set = IndexSet::new(); - - for &elt in &insert { - set.insert(elt); - } - - let mut vector = insert.to_vec(); - let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; - - // check that the same swap remove sequence on vec and set - // have the same result. - for &rm in remove_sequence { - let out_vec = vector.swap_remove(rm); - let out_set = set.swap_remove_index(rm).unwrap(); - assert_eq!(out_vec, out_set); - } - assert_eq!(vector.len(), set.len()); - for (a, b) in vector.iter().zip(set.iter()) { - assert_eq!(a, b); - } - } - - #[test] - fn partial_eq_and_eq() { - let mut set_a = IndexSet::new(); - set_a.insert(1); - set_a.insert(2); - let mut set_b = set_a.clone(); - assert_eq!(set_a, set_b); - set_b.swap_remove(&1); - assert_ne!(set_a, set_b); - - let set_c: IndexSet<_> = set_b.into_iter().collect(); - assert_ne!(set_a, set_c); - assert_ne!(set_c, set_a); - } - - #[test] - fn extend() { - let mut set = IndexSet::new(); - set.extend(vec![&1, &2, &3, &4]); - set.extend(vec![5, 6]); - assert_eq!(set.into_iter().collect::>(), vec![1, 2, 3, 4, 5, 6]); - } - - #[test] - fn comparisons() { - let set_a: IndexSet<_> = (0..3).collect(); - let set_b: IndexSet<_> = (3..6).collect(); - let set_c: IndexSet<_> = (0..6).collect(); - let set_d: IndexSet<_> = (3..9).collect(); - - assert!(!set_a.is_disjoint(&set_a)); - assert!(set_a.is_subset(&set_a)); - assert!(set_a.is_superset(&set_a)); - - assert!(set_a.is_disjoint(&set_b)); - assert!(set_b.is_disjoint(&set_a)); - assert!(!set_a.is_subset(&set_b)); - assert!(!set_b.is_subset(&set_a)); - assert!(!set_a.is_superset(&set_b)); - assert!(!set_b.is_superset(&set_a)); - - assert!(!set_a.is_disjoint(&set_c)); - assert!(!set_c.is_disjoint(&set_a)); - assert!(set_a.is_subset(&set_c)); - assert!(!set_c.is_subset(&set_a)); - assert!(!set_a.is_superset(&set_c)); - assert!(set_c.is_superset(&set_a)); - - assert!(!set_c.is_disjoint(&set_d)); - assert!(!set_d.is_disjoint(&set_c)); - assert!(!set_c.is_subset(&set_d)); - assert!(!set_d.is_subset(&set_c)); - assert!(!set_c.is_superset(&set_d)); - assert!(!set_d.is_superset(&set_c)); - } - - #[test] - fn iter_comparisons() { - use std::iter::empty; - - fn check<'a, I1, I2>(iter1: I1, iter2: I2) - where - I1: Iterator, - I2: Iterator, - { - assert!(iter1.copied().eq(iter2)); - } - - let set_a: IndexSet<_> = (0..3).collect(); - let set_b: IndexSet<_> = (3..6).collect(); - let set_c: IndexSet<_> = (0..6).collect(); - let set_d: IndexSet<_> = (3..9).rev().collect(); - - check(set_a.difference(&set_a), empty()); - check(set_a.symmetric_difference(&set_a), empty()); - check(set_a.intersection(&set_a), 0..3); - check(set_a.union(&set_a), 0..3); - - check(set_a.difference(&set_b), 0..3); - check(set_b.difference(&set_a), 3..6); - check(set_a.symmetric_difference(&set_b), 0..6); - check(set_b.symmetric_difference(&set_a), (3..6).chain(0..3)); - check(set_a.intersection(&set_b), empty()); - check(set_b.intersection(&set_a), empty()); - check(set_a.union(&set_b), 0..6); - check(set_b.union(&set_a), (3..6).chain(0..3)); - - check(set_a.difference(&set_c), empty()); - check(set_c.difference(&set_a), 3..6); - check(set_a.symmetric_difference(&set_c), 3..6); - check(set_c.symmetric_difference(&set_a), 3..6); - check(set_a.intersection(&set_c), 0..3); - check(set_c.intersection(&set_a), 0..3); - check(set_a.union(&set_c), 0..6); - check(set_c.union(&set_a), 0..6); - - check(set_c.difference(&set_d), 0..3); - check(set_d.difference(&set_c), (6..9).rev()); - check( - set_c.symmetric_difference(&set_d), - (0..3).chain((6..9).rev()), - ); - check(set_d.symmetric_difference(&set_c), (6..9).rev().chain(0..3)); - check(set_c.intersection(&set_d), 3..6); - check(set_d.intersection(&set_c), (3..6).rev()); - check(set_c.union(&set_d), (0..6).chain((6..9).rev())); - check(set_d.union(&set_c), (3..9).rev().chain(0..3)); - } - - #[test] - fn ops() { - let empty = IndexSet::::new(); - let set_a: IndexSet<_> = (0..3).collect(); - let set_b: IndexSet<_> = (3..6).collect(); - let set_c: IndexSet<_> = (0..6).collect(); - let set_d: IndexSet<_> = (3..9).rev().collect(); - - #[allow(clippy::eq_op)] - { - assert_eq!(&set_a & &set_a, set_a); - assert_eq!(&set_a | &set_a, set_a); - assert_eq!(&set_a ^ &set_a, empty); - assert_eq!(&set_a - &set_a, empty); - } - - assert_eq!(&set_a & &set_b, empty); - assert_eq!(&set_b & &set_a, empty); - assert_eq!(&set_a | &set_b, set_c); - assert_eq!(&set_b | &set_a, set_c); - assert_eq!(&set_a ^ &set_b, set_c); - assert_eq!(&set_b ^ &set_a, set_c); - assert_eq!(&set_a - &set_b, set_a); - assert_eq!(&set_b - &set_a, set_b); - - assert_eq!(&set_a & &set_c, set_a); - assert_eq!(&set_c & &set_a, set_a); - assert_eq!(&set_a | &set_c, set_c); - assert_eq!(&set_c | &set_a, set_c); - assert_eq!(&set_a ^ &set_c, set_b); - assert_eq!(&set_c ^ &set_a, set_b); - assert_eq!(&set_a - &set_c, empty); - assert_eq!(&set_c - &set_a, set_b); - - assert_eq!(&set_c & &set_d, set_b); - assert_eq!(&set_d & &set_c, set_b); - assert_eq!(&set_c | &set_d, &set_a | &set_d); - assert_eq!(&set_d | &set_c, &set_a | &set_d); - assert_eq!(&set_c ^ &set_d, &set_a | &(&set_d - &set_b)); - assert_eq!(&set_d ^ &set_c, &set_a | &(&set_d - &set_b)); - assert_eq!(&set_c - &set_d, set_a); - assert_eq!(&set_d - &set_c, &set_d - &set_b); - } - - #[test] - #[cfg(has_std)] - fn from_array() { - let set1 = IndexSet::from([1, 2, 3, 4]); - let set2: IndexSet<_> = [1, 2, 3, 4].into(); - - assert_eq!(set1, set2); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/util.rs s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/util.rs --- s390-tools-2.31.0/rust-vendor/indexmap-1.9.3/src/util.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap-1.9.3/src/util.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -use core::ops::{Bound, Range, RangeBounds}; - -pub(crate) fn third(t: (A, B, C)) -> C { - t.2 -} - -pub(crate) fn simplify_range(range: R, len: usize) -> Range -where - R: RangeBounds, -{ - let start = match range.start_bound() { - Bound::Unbounded => 0, - Bound::Included(&i) if i <= len => i, - Bound::Excluded(&i) if i < len => i + 1, - bound => panic!("range start {:?} should be <= length {}", bound, len), - }; - let end = match range.end_bound() { - Bound::Unbounded => len, - Bound::Excluded(&i) if i <= len => i, - Bound::Included(&i) if i < len => i + 1, - bound => panic!("range end {:?} should be <= length {}", bound, len), - }; - if start > end { - panic!( - "range start {:?} should be <= range end {:?}", - range.start_bound(), - range.end_bound() - ); - } - start..end -} diff -Nru s390-tools-2.31.0/rust-vendor/lock_api/build.rs s390-tools-2.33.1/rust-vendor/lock_api/build.rs --- s390-tools-2.31.0/rust-vendor/lock_api/build.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/lock_api/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,7 +0,0 @@ -fn main() { - let cfg = autocfg::new(); - - if cfg.probe_rustc_version(1, 61) { - println!("cargo:rustc-cfg=has_const_fn_trait_bound"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/lock_api/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/lock_api/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/lock_api/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/lock_api/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/lock_api/Cargo.toml s390-tools-2.33.1/rust-vendor/lock_api/Cargo.toml --- s390-tools-2.31.0/rust-vendor/lock_api/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/lock_api/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.49.0" -name = "lock_api" -version = "0.4.11" -authors = ["Amanieu d'Antras "] -description = "Wrappers to create fully-featured Mutex and RwLock types. Compatible with no_std." -keywords = [ - "mutex", - "rwlock", - "lock", - "no_std", -] -categories = [ - "concurrency", - "no-std", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/Amanieu/parking_lot" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs", - "--generate-link-to-definition", -] - -[dependencies.owning_ref] -version = "0.4.1" -optional = true - -[dependencies.scopeguard] -version = "1.1.0" -default-features = false - -[dependencies.serde] -version = "1.0.126" -optional = true -default-features = false - -[build-dependencies.autocfg] -version = "1.1.0" - -[features] -arc_lock = [] -atomic_usize = [] -default = ["atomic_usize"] -nightly = [] diff -Nru s390-tools-2.31.0/rust-vendor/lock_api/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/lock_api/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/lock_api/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/lock_api/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/lock_api/LICENSE-MIT s390-tools-2.33.1/rust-vendor/lock_api/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/lock_api/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/lock_api/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2016 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/lock_api/src/lib.rs s390-tools-2.33.1/rust-vendor/lock_api/src/lib.rs --- s390-tools-2.31.0/rust-vendor/lock_api/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/lock_api/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,116 +0,0 @@ -// Copyright 2018 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -//! This library provides type-safe and fully-featured `Mutex` and `RwLock` -//! types which wrap a simple raw mutex or rwlock type. This has several -//! benefits: not only does it eliminate a large portion of the work in -//! implementing custom lock types, it also allows users to write code which is -//! generic with regards to different lock implementations. -//! -//! Basic usage of this crate is very straightforward: -//! -//! 1. Create a raw lock type. This should only contain the lock state, not any -//! data protected by the lock. -//! 2. Implement the `RawMutex` trait for your custom lock type. -//! 3. Export your mutex as a type alias for `lock_api::Mutex`, and -//! your mutex guard as a type alias for `lock_api::MutexGuard`. -//! See the [example](#example) below for details. -//! -//! This process is similar for RwLocks, except that two guards need to be -//! exported instead of one. (Or 3 guards if your type supports upgradable read -//! locks, see [extension traits](#extension-traits) below for details) -//! -//! # Example -//! -//! ``` -//! use lock_api::{RawMutex, Mutex, GuardSend}; -//! use std::sync::atomic::{AtomicBool, Ordering}; -//! -//! // 1. Define our raw lock type -//! pub struct RawSpinlock(AtomicBool); -//! -//! // 2. Implement RawMutex for this type -//! unsafe impl RawMutex for RawSpinlock { -//! const INIT: RawSpinlock = RawSpinlock(AtomicBool::new(false)); -//! -//! // A spinlock guard can be sent to another thread and unlocked there -//! type GuardMarker = GuardSend; -//! -//! fn lock(&self) { -//! // Note: This isn't the best way of implementing a spinlock, but it -//! // suffices for the sake of this example. -//! while !self.try_lock() {} -//! } -//! -//! fn try_lock(&self) -> bool { -//! self.0 -//! .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) -//! .is_ok() -//! } -//! -//! unsafe fn unlock(&self) { -//! self.0.store(false, Ordering::Release); -//! } -//! } -//! -//! // 3. Export the wrappers. This are the types that your users will actually use. -//! pub type Spinlock = lock_api::Mutex; -//! pub type SpinlockGuard<'a, T> = lock_api::MutexGuard<'a, RawSpinlock, T>; -//! ``` -//! -//! # Extension traits -//! -//! In addition to basic locking & unlocking functionality, you have the option -//! of exposing additional functionality in your lock types by implementing -//! additional traits for it. Examples of extension features include: -//! -//! - Fair unlocking (`RawMutexFair`, `RawRwLockFair`) -//! - Lock timeouts (`RawMutexTimed`, `RawRwLockTimed`) -//! - Downgradable write locks (`RawRwLockDowngradable`) -//! - Recursive read locks (`RawRwLockRecursive`) -//! - Upgradable read locks (`RawRwLockUpgrade`) -//! -//! The `Mutex` and `RwLock` wrappers will automatically expose this additional -//! functionality if the raw lock type implements these extension traits. -//! -//! # Cargo features -//! -//! This crate supports three cargo features: -//! -//! - `owning_ref`: Allows your lock types to be used with the `owning_ref` crate. -//! - `arc_lock`: Enables locking from an `Arc`. This enables types such as `ArcMutexGuard`. Note that this -//! requires the `alloc` crate to be present. - -#![no_std] -#![cfg_attr(docsrs, feature(doc_auto_cfg))] -#![warn(missing_docs)] -#![warn(rust_2018_idioms)] - -#[macro_use] -extern crate scopeguard; - -#[cfg(feature = "arc_lock")] -extern crate alloc; - -/// Marker type which indicates that the Guard type for a lock is `Send`. -pub struct GuardSend(()); - -/// Marker type which indicates that the Guard type for a lock is not `Send`. -pub struct GuardNoSend(*mut ()); - -unsafe impl Sync for GuardNoSend {} - -mod mutex; -pub use crate::mutex::*; - -#[cfg(feature = "atomic_usize")] -mod remutex; -#[cfg(feature = "atomic_usize")] -pub use crate::remutex::*; - -mod rwlock; -pub use crate::rwlock::*; diff -Nru s390-tools-2.31.0/rust-vendor/lock_api/src/mutex.rs s390-tools-2.33.1/rust-vendor/lock_api/src/mutex.rs --- s390-tools-2.31.0/rust-vendor/lock_api/src/mutex.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/lock_api/src/mutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,960 +0,0 @@ -// Copyright 2018 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use core::cell::UnsafeCell; -use core::fmt; -use core::marker::PhantomData; -use core::mem; -use core::ops::{Deref, DerefMut}; - -#[cfg(feature = "arc_lock")] -use alloc::sync::Arc; -#[cfg(feature = "arc_lock")] -use core::mem::ManuallyDrop; -#[cfg(feature = "arc_lock")] -use core::ptr; - -#[cfg(feature = "owning_ref")] -use owning_ref::StableAddress; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Deserializer, Serialize, Serializer}; - -/// Basic operations for a mutex. -/// -/// Types implementing this trait can be used by `Mutex` to form a safe and -/// fully-functioning mutex type. -/// -/// # Safety -/// -/// Implementations of this trait must ensure that the mutex is actually -/// exclusive: a lock can't be acquired while the mutex is already locked. -pub unsafe trait RawMutex { - /// Initial value for an unlocked mutex. - // A “non-constant†const item is a legacy way to supply an initialized value to downstream - // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. - #[allow(clippy::declare_interior_mutable_const)] - const INIT: Self; - - /// Marker type which determines whether a lock guard should be `Send`. Use - /// one of the `GuardSend` or `GuardNoSend` helper types here. - type GuardMarker; - - /// Acquires this mutex, blocking the current thread until it is able to do so. - fn lock(&self); - - /// Attempts to acquire this mutex without blocking. Returns `true` - /// if the lock was successfully acquired and `false` otherwise. - fn try_lock(&self) -> bool; - - /// Unlocks this mutex. - /// - /// # Safety - /// - /// This method may only be called if the mutex is held in the current context, i.e. it must - /// be paired with a successful call to [`lock`], [`try_lock`], [`try_lock_for`] or [`try_lock_until`]. - /// - /// [`lock`]: #tymethod.lock - /// [`try_lock`]: #tymethod.try_lock - /// [`try_lock_for`]: trait.RawMutexTimed.html#tymethod.try_lock_for - /// [`try_lock_until`]: trait.RawMutexTimed.html#tymethod.try_lock_until - unsafe fn unlock(&self); - - /// Checks whether the mutex is currently locked. - #[inline] - fn is_locked(&self) -> bool { - let acquired_lock = self.try_lock(); - if acquired_lock { - // Safety: The lock has been successfully acquired above. - unsafe { - self.unlock(); - } - } - !acquired_lock - } -} - -/// Additional methods for mutexes which support fair unlocking. -/// -/// Fair unlocking means that a lock is handed directly over to the next waiting -/// thread if there is one, without giving other threads the opportunity to -/// "steal" the lock in the meantime. This is typically slower than unfair -/// unlocking, but may be necessary in certain circumstances. -pub unsafe trait RawMutexFair: RawMutex { - /// Unlocks this mutex using a fair unlock protocol. - /// - /// # Safety - /// - /// This method may only be called if the mutex is held in the current context, see - /// the documentation of [`unlock`]. - /// - /// [`unlock`]: trait.RawMutex.html#tymethod.unlock - unsafe fn unlock_fair(&self); - - /// Temporarily yields the mutex to a waiting thread if there is one. - /// - /// This method is functionally equivalent to calling `unlock_fair` followed - /// by `lock`, however it can be much more efficient in the case where there - /// are no waiting threads. - /// - /// # Safety - /// - /// This method may only be called if the mutex is held in the current context, see - /// the documentation of [`unlock`]. - /// - /// [`unlock`]: trait.RawMutex.html#tymethod.unlock - unsafe fn bump(&self) { - self.unlock_fair(); - self.lock(); - } -} - -/// Additional methods for mutexes which support locking with timeouts. -/// -/// The `Duration` and `Instant` types are specified as associated types so that -/// this trait is usable even in `no_std` environments. -pub unsafe trait RawMutexTimed: RawMutex { - /// Duration type used for `try_lock_for`. - type Duration; - - /// Instant type used for `try_lock_until`. - type Instant; - - /// Attempts to acquire this lock until a timeout is reached. - fn try_lock_for(&self, timeout: Self::Duration) -> bool; - - /// Attempts to acquire this lock until a timeout is reached. - fn try_lock_until(&self, timeout: Self::Instant) -> bool; -} - -/// A mutual exclusion primitive useful for protecting shared data -/// -/// This mutex will block threads waiting for the lock to become available. The -/// mutex can also be statically initialized or created via a `new` -/// constructor. Each mutex has a type parameter which represents the data that -/// it is protecting. The data can only be accessed through the RAII guards -/// returned from `lock` and `try_lock`, which guarantees that the data is only -/// ever accessed when the mutex is locked. -pub struct Mutex { - raw: R, - data: UnsafeCell, -} - -unsafe impl Send for Mutex {} -unsafe impl Sync for Mutex {} - -impl Mutex { - /// Creates a new mutex in an unlocked state ready for use. - #[cfg(has_const_fn_trait_bound)] - #[inline] - pub const fn new(val: T) -> Mutex { - Mutex { - raw: R::INIT, - data: UnsafeCell::new(val), - } - } - - /// Creates a new mutex in an unlocked state ready for use. - #[cfg(not(has_const_fn_trait_bound))] - #[inline] - pub fn new(val: T) -> Mutex { - Mutex { - raw: R::INIT, - data: UnsafeCell::new(val), - } - } - - /// Consumes this mutex, returning the underlying data. - #[inline] - pub fn into_inner(self) -> T { - self.data.into_inner() - } -} - -impl Mutex { - /// Creates a new mutex based on a pre-existing raw mutex. - /// - /// This allows creating a mutex in a constant context on stable Rust. - #[inline] - pub const fn const_new(raw_mutex: R, val: T) -> Mutex { - Mutex { - raw: raw_mutex, - data: UnsafeCell::new(val), - } - } -} - -impl Mutex { - /// Creates a new `MutexGuard` without checking if the mutex is locked. - /// - /// # Safety - /// - /// This method must only be called if the thread logically holds the lock. - /// - /// Calling this function when a guard has already been produced is undefined behaviour unless - /// the guard was forgotten with `mem::forget`. - #[inline] - pub unsafe fn make_guard_unchecked(&self) -> MutexGuard<'_, R, T> { - MutexGuard { - mutex: self, - marker: PhantomData, - } - } - - /// Acquires a mutex, blocking the current thread until it is able to do so. - /// - /// This function will block the local thread until it is available to acquire - /// the mutex. Upon returning, the thread is the only thread with the mutex - /// held. An RAII guard is returned to allow scoped unlock of the lock. When - /// the guard goes out of scope, the mutex will be unlocked. - /// - /// Attempts to lock a mutex in the thread which already holds the lock will - /// result in a deadlock. - #[inline] - pub fn lock(&self) -> MutexGuard<'_, R, T> { - self.raw.lock(); - // SAFETY: The lock is held, as required. - unsafe { self.make_guard_unchecked() } - } - - /// Attempts to acquire this lock. - /// - /// If the lock could not be acquired at this time, then `None` is returned. - /// Otherwise, an RAII guard is returned. The lock will be unlocked when the - /// guard is dropped. - /// - /// This function does not block. - #[inline] - pub fn try_lock(&self) -> Option> { - if self.raw.try_lock() { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_guard_unchecked() }) - } else { - None - } - } - - /// Returns a mutable reference to the underlying data. - /// - /// Since this call borrows the `Mutex` mutably, no actual locking needs to - /// take place---the mutable borrow statically guarantees no locks exist. - #[inline] - pub fn get_mut(&mut self) -> &mut T { - unsafe { &mut *self.data.get() } - } - - /// Checks whether the mutex is currently locked. - #[inline] - pub fn is_locked(&self) -> bool { - self.raw.is_locked() - } - - /// Forcibly unlocks the mutex. - /// - /// This is useful when combined with `mem::forget` to hold a lock without - /// the need to maintain a `MutexGuard` object alive, for example when - /// dealing with FFI. - /// - /// # Safety - /// - /// This method must only be called if the current thread logically owns a - /// `MutexGuard` but that guard has been discarded using `mem::forget`. - /// Behavior is undefined if a mutex is unlocked when not locked. - #[inline] - pub unsafe fn force_unlock(&self) { - self.raw.unlock(); - } - - /// Returns the underlying raw mutex object. - /// - /// Note that you will most likely need to import the `RawMutex` trait from - /// `lock_api` to be able to call functions on the raw mutex. - /// - /// # Safety - /// - /// This method is unsafe because it allows unlocking a mutex while - /// still holding a reference to a `MutexGuard`. - #[inline] - pub unsafe fn raw(&self) -> &R { - &self.raw - } - - /// Returns a raw pointer to the underlying data. - /// - /// This is useful when combined with `mem::forget` to hold a lock without - /// the need to maintain a `MutexGuard` object alive, for example when - /// dealing with FFI. - /// - /// # Safety - /// - /// You must ensure that there are no data races when dereferencing the - /// returned pointer, for example if the current thread logically owns - /// a `MutexGuard` but that guard has been discarded using `mem::forget`. - #[inline] - pub fn data_ptr(&self) -> *mut T { - self.data.get() - } - - /// Creates a new `ArcMutexGuard` without checking if the mutex is locked. - /// - /// # Safety - /// - /// This method must only be called if the thread logically holds the lock. - /// - /// Calling this function when a guard has already been produced is undefined behaviour unless - /// the guard was forgotten with `mem::forget`. - #[cfg(feature = "arc_lock")] - #[inline] - unsafe fn make_arc_guard_unchecked(self: &Arc) -> ArcMutexGuard { - ArcMutexGuard { - mutex: self.clone(), - marker: PhantomData, - } - } - - /// Acquires a lock through an `Arc`. - /// - /// This method is similar to the `lock` method; however, it requires the `Mutex` to be inside of an `Arc` - /// and the resulting mutex guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn lock_arc(self: &Arc) -> ArcMutexGuard { - self.raw.lock(); - // SAFETY: the locking guarantee is upheld - unsafe { self.make_arc_guard_unchecked() } - } - - /// Attempts to acquire a lock through an `Arc`. - /// - /// This method is similar to the `try_lock` method; however, it requires the `Mutex` to be inside of an - /// `Arc` and the resulting mutex guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_lock_arc(self: &Arc) -> Option> { - if self.raw.try_lock() { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_guard_unchecked() }) - } else { - None - } - } -} - -impl Mutex { - /// Forcibly unlocks the mutex using a fair unlock procotol. - /// - /// This is useful when combined with `mem::forget` to hold a lock without - /// the need to maintain a `MutexGuard` object alive, for example when - /// dealing with FFI. - /// - /// # Safety - /// - /// This method must only be called if the current thread logically owns a - /// `MutexGuard` but that guard has been discarded using `mem::forget`. - /// Behavior is undefined if a mutex is unlocked when not locked. - #[inline] - pub unsafe fn force_unlock_fair(&self) { - self.raw.unlock_fair(); - } -} - -impl Mutex { - /// Attempts to acquire this lock until a timeout is reached. - /// - /// If the lock could not be acquired before the timeout expired, then - /// `None` is returned. Otherwise, an RAII guard is returned. The lock will - /// be unlocked when the guard is dropped. - #[inline] - pub fn try_lock_for(&self, timeout: R::Duration) -> Option> { - if self.raw.try_lock_for(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this lock until a timeout is reached. - /// - /// If the lock could not be acquired before the timeout expired, then - /// `None` is returned. Otherwise, an RAII guard is returned. The lock will - /// be unlocked when the guard is dropped. - #[inline] - pub fn try_lock_until(&self, timeout: R::Instant) -> Option> { - if self.raw.try_lock_until(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this lock through an `Arc` until a timeout is reached. - /// - /// This method is similar to the `try_lock_for` method; however, it requires the `Mutex` to be inside of an - /// `Arc` and the resulting mutex guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_lock_arc_for(self: &Arc, timeout: R::Duration) -> Option> { - if self.raw.try_lock_for(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this lock through an `Arc` until a timeout is reached. - /// - /// This method is similar to the `try_lock_until` method; however, it requires the `Mutex` to be inside of - /// an `Arc` and the resulting mutex guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_lock_arc_until( - self: &Arc, - timeout: R::Instant, - ) -> Option> { - if self.raw.try_lock_until(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_guard_unchecked() }) - } else { - None - } - } -} - -impl Default for Mutex { - #[inline] - fn default() -> Mutex { - Mutex::new(Default::default()) - } -} - -impl From for Mutex { - #[inline] - fn from(t: T) -> Mutex { - Mutex::new(t) - } -} - -impl fmt::Debug for Mutex { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.try_lock() { - Some(guard) => f.debug_struct("Mutex").field("data", &&*guard).finish(), - None => { - struct LockedPlaceholder; - impl fmt::Debug for LockedPlaceholder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("") - } - } - - f.debug_struct("Mutex") - .field("data", &LockedPlaceholder) - .finish() - } - } - } -} - -// Copied and modified from serde -#[cfg(feature = "serde")] -impl Serialize for Mutex -where - R: RawMutex, - T: Serialize + ?Sized, -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.lock().serialize(serializer) - } -} - -#[cfg(feature = "serde")] -impl<'de, R, T> Deserialize<'de> for Mutex -where - R: RawMutex, - T: Deserialize<'de> + ?Sized, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Deserialize::deserialize(deserializer).map(Mutex::new) - } -} - -/// An RAII implementation of a "scoped lock" of a mutex. When this structure is -/// dropped (falls out of scope), the lock will be unlocked. -/// -/// The data protected by the mutex can be accessed through this guard via its -/// `Deref` and `DerefMut` implementations. -#[clippy::has_significant_drop] -#[must_use = "if unused the Mutex will immediately unlock"] -pub struct MutexGuard<'a, R: RawMutex, T: ?Sized> { - mutex: &'a Mutex, - marker: PhantomData<(&'a mut T, R::GuardMarker)>, -} - -unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MutexGuard<'a, R, T> {} - -impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> { - /// Returns a reference to the original `Mutex` object. - pub fn mutex(s: &Self) -> &'a Mutex { - s.mutex - } - - /// Makes a new `MappedMutexGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `MutexGuard` passed - /// in already locked the mutex. - /// - /// This is an associated function that needs to be - /// used as `MutexGuard::map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn map(s: Self, f: F) -> MappedMutexGuard<'a, R, U> - where - F: FnOnce(&mut T) -> &mut U, - { - let raw = &s.mutex.raw; - let data = f(unsafe { &mut *s.mutex.data.get() }); - mem::forget(s); - MappedMutexGuard { - raw, - data, - marker: PhantomData, - } - } - - /// Attempts to make a new `MappedMutexGuard` for a component of the - /// locked data. The original guard is returned if the closure returns `None`. - /// - /// This operation cannot fail as the `MutexGuard` passed - /// in already locked the mutex. - /// - /// This is an associated function that needs to be - /// used as `MutexGuard::try_map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn try_map(s: Self, f: F) -> Result, Self> - where - F: FnOnce(&mut T) -> Option<&mut U>, - { - let raw = &s.mutex.raw; - let data = match f(unsafe { &mut *s.mutex.data.get() }) { - Some(data) => data, - None => return Err(s), - }; - mem::forget(s); - Ok(MappedMutexGuard { - raw, - data, - marker: PhantomData, - }) - } - - /// Temporarily unlocks the mutex to execute the given function. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the mutex. - #[inline] - pub fn unlocked(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: A MutexGuard always holds the lock. - unsafe { - s.mutex.raw.unlock(); - } - defer!(s.mutex.raw.lock()); - f() - } - - /// Leaks the mutex guard and returns a mutable reference to the data - /// protected by the mutex. - /// - /// This will leave the `Mutex` in a locked state. - #[inline] - pub fn leak(s: Self) -> &'a mut T { - let r = unsafe { &mut *s.mutex.data.get() }; - mem::forget(s); - r - } -} - -impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> { - /// Unlocks the mutex using a fair unlock protocol. - /// - /// By default, mutexes are unfair and allow the current thread to re-lock - /// the mutex before another has the chance to acquire the lock, even if - /// that thread has been blocked on the mutex for a long time. This is the - /// default because it allows much higher throughput as it avoids forcing a - /// context switch on every mutex unlock. This can result in one thread - /// acquiring a mutex many more times than other threads. - /// - /// However in some cases it can be beneficial to ensure fairness by forcing - /// the lock to pass on to a waiting thread if there is one. This is done by - /// using this method instead of dropping the `MutexGuard` normally. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: A MutexGuard always holds the lock. - unsafe { - s.mutex.raw.unlock_fair(); - } - mem::forget(s); - } - - /// Temporarily unlocks the mutex to execute the given function. - /// - /// The mutex is unlocked using a fair unlock protocol. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the mutex. - #[inline] - pub fn unlocked_fair(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: A MutexGuard always holds the lock. - unsafe { - s.mutex.raw.unlock_fair(); - } - defer!(s.mutex.raw.lock()); - f() - } - - /// Temporarily yields the mutex to a waiting thread if there is one. - /// - /// This method is functionally equivalent to calling `unlock_fair` followed - /// by `lock`, however it can be much more efficient in the case where there - /// are no waiting threads. - #[inline] - pub fn bump(s: &mut Self) { - // Safety: A MutexGuard always holds the lock. - unsafe { - s.mutex.raw.bump(); - } - } -} - -impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MutexGuard<'a, R, T> { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.mutex.data.get() } - } -} - -impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MutexGuard<'a, R, T> { - #[inline] - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.mutex.data.get() } - } -} - -impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MutexGuard<'a, R, T> { - #[inline] - fn drop(&mut self) { - // Safety: A MutexGuard always holds the lock. - unsafe { - self.mutex.raw.unlock(); - } - } -} - -impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MutexGuard<'a, R, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for MutexGuard<'a, R, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[cfg(feature = "owning_ref")] -unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MutexGuard<'a, R, T> {} - -/// An RAII mutex guard returned by the `Arc` locking operations on `Mutex`. -/// -/// This is similar to the `MutexGuard` struct, except instead of using a reference to unlock the `Mutex` it -/// uses an `Arc`. This has several advantages, most notably that it has an `'static` lifetime. -#[cfg(feature = "arc_lock")] -#[clippy::has_significant_drop] -#[must_use = "if unused the Mutex will immediately unlock"] -pub struct ArcMutexGuard { - mutex: Arc>, - marker: PhantomData<*const ()>, -} - -#[cfg(feature = "arc_lock")] -unsafe impl Send for ArcMutexGuard where - R::GuardMarker: Send -{ -} -#[cfg(feature = "arc_lock")] -unsafe impl Sync for ArcMutexGuard where - R::GuardMarker: Sync -{ -} - -#[cfg(feature = "arc_lock")] -impl ArcMutexGuard { - /// Returns a reference to the `Mutex` this is guarding, contained in its `Arc`. - #[inline] - pub fn mutex(s: &Self) -> &Arc> { - &s.mutex - } - - /// Unlocks the mutex and returns the `Arc` that was held by the [`ArcMutexGuard`]. - #[inline] - pub fn into_arc(s: Self) -> Arc> { - // Safety: Skip our Drop impl and manually unlock the mutex. - let arc = unsafe { ptr::read(&s.mutex) }; - mem::forget(s); - unsafe { - arc.raw.unlock(); - } - arc - } - - /// Temporarily unlocks the mutex to execute the given function. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the mutex. - #[inline] - pub fn unlocked(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: A MutexGuard always holds the lock. - unsafe { - s.mutex.raw.unlock(); - } - defer!(s.mutex.raw.lock()); - f() - } -} - -#[cfg(feature = "arc_lock")] -impl ArcMutexGuard { - /// Unlocks the mutex using a fair unlock protocol. - /// - /// This is functionally identical to the `unlock_fair` method on [`MutexGuard`]. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: A MutexGuard always holds the lock. - unsafe { - s.mutex.raw.unlock_fair(); - } - - // SAFETY: make sure the Arc gets it reference decremented - let mut s = ManuallyDrop::new(s); - unsafe { ptr::drop_in_place(&mut s.mutex) }; - } - - /// Temporarily unlocks the mutex to execute the given function. - /// - /// This is functionally identical to the `unlocked_fair` method on [`MutexGuard`]. - #[inline] - pub fn unlocked_fair(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: A MutexGuard always holds the lock. - unsafe { - s.mutex.raw.unlock_fair(); - } - defer!(s.mutex.raw.lock()); - f() - } - - /// Temporarily yields the mutex to a waiting thread if there is one. - /// - /// This is functionally identical to the `bump` method on [`MutexGuard`]. - #[inline] - pub fn bump(s: &mut Self) { - // Safety: A MutexGuard always holds the lock. - unsafe { - s.mutex.raw.bump(); - } - } -} - -#[cfg(feature = "arc_lock")] -impl Deref for ArcMutexGuard { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.mutex.data.get() } - } -} - -#[cfg(feature = "arc_lock")] -impl DerefMut for ArcMutexGuard { - #[inline] - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.mutex.data.get() } - } -} - -#[cfg(feature = "arc_lock")] -impl Drop for ArcMutexGuard { - #[inline] - fn drop(&mut self) { - // Safety: A MutexGuard always holds the lock. - unsafe { - self.mutex.raw.unlock(); - } - } -} - -/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a -/// subfield of the protected data. -/// -/// The main difference between `MappedMutexGuard` and `MutexGuard` is that the -/// former doesn't support temporarily unlocking and re-locking, since that -/// could introduce soundness issues if the locked object is modified by another -/// thread. -#[clippy::has_significant_drop] -#[must_use = "if unused the Mutex will immediately unlock"] -pub struct MappedMutexGuard<'a, R: RawMutex, T: ?Sized> { - raw: &'a R, - data: *mut T, - marker: PhantomData<&'a mut T>, -} - -unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync - for MappedMutexGuard<'a, R, T> -{ -} -unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + Send + 'a> Send for MappedMutexGuard<'a, R, T> where - R::GuardMarker: Send -{ -} - -impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> { - /// Makes a new `MappedMutexGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `MappedMutexGuard` passed - /// in already locked the mutex. - /// - /// This is an associated function that needs to be - /// used as `MappedMutexGuard::map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn map(s: Self, f: F) -> MappedMutexGuard<'a, R, U> - where - F: FnOnce(&mut T) -> &mut U, - { - let raw = s.raw; - let data = f(unsafe { &mut *s.data }); - mem::forget(s); - MappedMutexGuard { - raw, - data, - marker: PhantomData, - } - } - - /// Attempts to make a new `MappedMutexGuard` for a component of the - /// locked data. The original guard is returned if the closure returns `None`. - /// - /// This operation cannot fail as the `MappedMutexGuard` passed - /// in already locked the mutex. - /// - /// This is an associated function that needs to be - /// used as `MappedMutexGuard::try_map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn try_map(s: Self, f: F) -> Result, Self> - where - F: FnOnce(&mut T) -> Option<&mut U>, - { - let raw = s.raw; - let data = match f(unsafe { &mut *s.data }) { - Some(data) => data, - None => return Err(s), - }; - mem::forget(s); - Ok(MappedMutexGuard { - raw, - data, - marker: PhantomData, - }) - } -} - -impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> { - /// Unlocks the mutex using a fair unlock protocol. - /// - /// By default, mutexes are unfair and allow the current thread to re-lock - /// the mutex before another has the chance to acquire the lock, even if - /// that thread has been blocked on the mutex for a long time. This is the - /// default because it allows much higher throughput as it avoids forcing a - /// context switch on every mutex unlock. This can result in one thread - /// acquiring a mutex many more times than other threads. - /// - /// However in some cases it can be beneficial to ensure fairness by forcing - /// the lock to pass on to a waiting thread if there is one. This is done by - /// using this method instead of dropping the `MutexGuard` normally. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: A MutexGuard always holds the lock. - unsafe { - s.raw.unlock_fair(); - } - mem::forget(s); - } -} - -impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MappedMutexGuard<'a, R, T> { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.data } - } -} - -impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MappedMutexGuard<'a, R, T> { - #[inline] - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.data } - } -} - -impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MappedMutexGuard<'a, R, T> { - #[inline] - fn drop(&mut self) { - // Safety: A MappedMutexGuard always holds the lock. - unsafe { - self.raw.unlock(); - } - } -} - -impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MappedMutexGuard<'a, R, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display - for MappedMutexGuard<'a, R, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[cfg(feature = "owning_ref")] -unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MappedMutexGuard<'a, R, T> {} diff -Nru s390-tools-2.31.0/rust-vendor/lock_api/src/remutex.rs s390-tools-2.33.1/rust-vendor/lock_api/src/remutex.rs --- s390-tools-2.31.0/rust-vendor/lock_api/src/remutex.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/lock_api/src/remutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1051 +0,0 @@ -// Copyright 2018 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::{ - mutex::{RawMutex, RawMutexFair, RawMutexTimed}, - GuardNoSend, -}; -use core::{ - cell::{Cell, UnsafeCell}, - fmt, - marker::PhantomData, - mem, - num::NonZeroUsize, - ops::Deref, - sync::atomic::{AtomicUsize, Ordering}, -}; - -#[cfg(feature = "arc_lock")] -use alloc::sync::Arc; -#[cfg(feature = "arc_lock")] -use core::mem::ManuallyDrop; -#[cfg(feature = "arc_lock")] -use core::ptr; - -#[cfg(feature = "owning_ref")] -use owning_ref::StableAddress; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Deserializer, Serialize, Serializer}; - -/// Helper trait which returns a non-zero thread ID. -/// -/// The simplest way to implement this trait is to return the address of a -/// thread-local variable. -/// -/// # Safety -/// -/// Implementations of this trait must ensure that no two active threads share -/// the same thread ID. However the ID of a thread that has exited can be -/// re-used since that thread is no longer active. -pub unsafe trait GetThreadId { - /// Initial value. - // A “non-constant†const item is a legacy way to supply an initialized value to downstream - // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. - #[allow(clippy::declare_interior_mutable_const)] - const INIT: Self; - - /// Returns a non-zero thread ID which identifies the current thread of - /// execution. - fn nonzero_thread_id(&self) -> NonZeroUsize; -} - -/// A raw mutex type that wraps another raw mutex to provide reentrancy. -/// -/// Although this has the same methods as the [`RawMutex`] trait, it does -/// not implement it, and should not be used in the same way, since this -/// mutex can successfully acquire a lock multiple times in the same thread. -/// Only use this when you know you want a raw mutex that can be locked -/// reentrantly; you probably want [`ReentrantMutex`] instead. -/// -/// [`RawMutex`]: trait.RawMutex.html -/// [`ReentrantMutex`]: struct.ReentrantMutex.html -pub struct RawReentrantMutex { - owner: AtomicUsize, - lock_count: Cell, - mutex: R, - get_thread_id: G, -} - -unsafe impl Send for RawReentrantMutex {} -unsafe impl Sync for RawReentrantMutex {} - -impl RawReentrantMutex { - /// Initial value for an unlocked mutex. - #[allow(clippy::declare_interior_mutable_const)] - pub const INIT: Self = RawReentrantMutex { - owner: AtomicUsize::new(0), - lock_count: Cell::new(0), - mutex: R::INIT, - get_thread_id: G::INIT, - }; - - #[inline] - fn lock_internal bool>(&self, try_lock: F) -> bool { - let id = self.get_thread_id.nonzero_thread_id().get(); - if self.owner.load(Ordering::Relaxed) == id { - self.lock_count.set( - self.lock_count - .get() - .checked_add(1) - .expect("ReentrantMutex lock count overflow"), - ); - } else { - if !try_lock() { - return false; - } - self.owner.store(id, Ordering::Relaxed); - debug_assert_eq!(self.lock_count.get(), 0); - self.lock_count.set(1); - } - true - } - - /// Acquires this mutex, blocking if it's held by another thread. - #[inline] - pub fn lock(&self) { - self.lock_internal(|| { - self.mutex.lock(); - true - }); - } - - /// Attempts to acquire this mutex without blocking. Returns `true` - /// if the lock was successfully acquired and `false` otherwise. - #[inline] - pub fn try_lock(&self) -> bool { - self.lock_internal(|| self.mutex.try_lock()) - } - - /// Unlocks this mutex. The inner mutex may not be unlocked if - /// this mutex was acquired previously in the current thread. - /// - /// # Safety - /// - /// This method may only be called if the mutex is held by the current thread. - #[inline] - pub unsafe fn unlock(&self) { - let lock_count = self.lock_count.get() - 1; - self.lock_count.set(lock_count); - if lock_count == 0 { - self.owner.store(0, Ordering::Relaxed); - self.mutex.unlock(); - } - } - - /// Checks whether the mutex is currently locked. - #[inline] - pub fn is_locked(&self) -> bool { - self.mutex.is_locked() - } - - /// Checks whether the mutex is currently held by the current thread. - #[inline] - pub fn is_owned_by_current_thread(&self) -> bool { - let id = self.get_thread_id.nonzero_thread_id().get(); - self.owner.load(Ordering::Relaxed) == id - } -} - -impl RawReentrantMutex { - /// Unlocks this mutex using a fair unlock protocol. The inner mutex - /// may not be unlocked if this mutex was acquired previously in the - /// current thread. - /// - /// # Safety - /// - /// This method may only be called if the mutex is held by the current thread. - #[inline] - pub unsafe fn unlock_fair(&self) { - let lock_count = self.lock_count.get() - 1; - self.lock_count.set(lock_count); - if lock_count == 0 { - self.owner.store(0, Ordering::Relaxed); - self.mutex.unlock_fair(); - } - } - - /// Temporarily yields the mutex to a waiting thread if there is one. - /// - /// This method is functionally equivalent to calling `unlock_fair` followed - /// by `lock`, however it can be much more efficient in the case where there - /// are no waiting threads. - /// - /// # Safety - /// - /// This method may only be called if the mutex is held by the current thread. - #[inline] - pub unsafe fn bump(&self) { - if self.lock_count.get() == 1 { - let id = self.owner.load(Ordering::Relaxed); - self.owner.store(0, Ordering::Relaxed); - self.lock_count.set(0); - self.mutex.bump(); - self.owner.store(id, Ordering::Relaxed); - self.lock_count.set(1); - } - } -} - -impl RawReentrantMutex { - /// Attempts to acquire this lock until a timeout is reached. - #[inline] - pub fn try_lock_until(&self, timeout: R::Instant) -> bool { - self.lock_internal(|| self.mutex.try_lock_until(timeout)) - } - - /// Attempts to acquire this lock until a timeout is reached. - #[inline] - pub fn try_lock_for(&self, timeout: R::Duration) -> bool { - self.lock_internal(|| self.mutex.try_lock_for(timeout)) - } -} - -/// A mutex which can be recursively locked by a single thread. -/// -/// This type is identical to `Mutex` except for the following points: -/// -/// - Locking multiple times from the same thread will work correctly instead of -/// deadlocking. -/// - `ReentrantMutexGuard` does not give mutable references to the locked data. -/// Use a `RefCell` if you need this. -/// -/// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex -/// primitive. -pub struct ReentrantMutex { - raw: RawReentrantMutex, - data: UnsafeCell, -} - -unsafe impl Send - for ReentrantMutex -{ -} -unsafe impl Sync - for ReentrantMutex -{ -} - -impl ReentrantMutex { - /// Creates a new reentrant mutex in an unlocked state ready for use. - #[cfg(has_const_fn_trait_bound)] - #[inline] - pub const fn new(val: T) -> ReentrantMutex { - ReentrantMutex { - data: UnsafeCell::new(val), - raw: RawReentrantMutex { - owner: AtomicUsize::new(0), - lock_count: Cell::new(0), - mutex: R::INIT, - get_thread_id: G::INIT, - }, - } - } - - /// Creates a new reentrant mutex in an unlocked state ready for use. - #[cfg(not(has_const_fn_trait_bound))] - #[inline] - pub fn new(val: T) -> ReentrantMutex { - ReentrantMutex { - data: UnsafeCell::new(val), - raw: RawReentrantMutex { - owner: AtomicUsize::new(0), - lock_count: Cell::new(0), - mutex: R::INIT, - get_thread_id: G::INIT, - }, - } - } - - /// Consumes this mutex, returning the underlying data. - #[inline] - pub fn into_inner(self) -> T { - self.data.into_inner() - } -} - -impl ReentrantMutex { - /// Creates a new reentrant mutex based on a pre-existing raw mutex and a - /// helper to get the thread ID. - /// - /// This allows creating a reentrant mutex in a constant context on stable - /// Rust. - #[inline] - pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex { - ReentrantMutex { - data: UnsafeCell::new(val), - raw: RawReentrantMutex { - owner: AtomicUsize::new(0), - lock_count: Cell::new(0), - mutex: raw_mutex, - get_thread_id, - }, - } - } -} - -impl ReentrantMutex { - /// Creates a new `ReentrantMutexGuard` without checking if the lock is held. - /// - /// # Safety - /// - /// This method must only be called if the thread logically holds the lock. - /// - /// Calling this function when a guard has already been produced is undefined behaviour unless - /// the guard was forgotten with `mem::forget`. - #[inline] - pub unsafe fn make_guard_unchecked(&self) -> ReentrantMutexGuard<'_, R, G, T> { - ReentrantMutexGuard { - remutex: &self, - marker: PhantomData, - } - } - - /// Acquires a reentrant mutex, blocking the current thread until it is able - /// to do so. - /// - /// If the mutex is held by another thread then this function will block the - /// local thread until it is available to acquire the mutex. If the mutex is - /// already held by the current thread then this function will increment the - /// lock reference count and return immediately. Upon returning, - /// the thread is the only thread with the mutex held. An RAII guard is - /// returned to allow scoped unlock of the lock. When the guard goes out of - /// scope, the mutex will be unlocked. - #[inline] - pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> { - self.raw.lock(); - // SAFETY: The lock is held, as required. - unsafe { self.make_guard_unchecked() } - } - - /// Attempts to acquire this lock. - /// - /// If the lock could not be acquired at this time, then `None` is returned. - /// Otherwise, an RAII guard is returned. The lock will be unlocked when the - /// guard is dropped. - /// - /// This function does not block. - #[inline] - pub fn try_lock(&self) -> Option> { - if self.raw.try_lock() { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_guard_unchecked() }) - } else { - None - } - } - - /// Returns a mutable reference to the underlying data. - /// - /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to - /// take place---the mutable borrow statically guarantees no locks exist. - #[inline] - pub fn get_mut(&mut self) -> &mut T { - unsafe { &mut *self.data.get() } - } - - /// Checks whether the mutex is currently locked. - #[inline] - pub fn is_locked(&self) -> bool { - self.raw.is_locked() - } - - /// Checks whether the mutex is currently held by the current thread. - #[inline] - pub fn is_owned_by_current_thread(&self) -> bool { - self.raw.is_owned_by_current_thread() - } - - /// Forcibly unlocks the mutex. - /// - /// This is useful when combined with `mem::forget` to hold a lock without - /// the need to maintain a `ReentrantMutexGuard` object alive, for example when - /// dealing with FFI. - /// - /// # Safety - /// - /// This method must only be called if the current thread logically owns a - /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`. - /// Behavior is undefined if a mutex is unlocked when not locked. - #[inline] - pub unsafe fn force_unlock(&self) { - self.raw.unlock(); - } - - /// Returns the underlying raw mutex object. - /// - /// Note that you will most likely need to import the `RawMutex` trait from - /// `lock_api` to be able to call functions on the raw mutex. - /// - /// # Safety - /// - /// This method is unsafe because it allows unlocking a mutex while - /// still holding a reference to a `ReentrantMutexGuard`. - #[inline] - pub unsafe fn raw(&self) -> &R { - &self.raw.mutex - } - - /// Returns a raw pointer to the underlying data. - /// - /// This is useful when combined with `mem::forget` to hold a lock without - /// the need to maintain a `ReentrantMutexGuard` object alive, for example - /// when dealing with FFI. - /// - /// # Safety - /// - /// You must ensure that there are no data races when dereferencing the - /// returned pointer, for example if the current thread logically owns a - /// `ReentrantMutexGuard` but that guard has been discarded using - /// `mem::forget`. - #[inline] - pub fn data_ptr(&self) -> *mut T { - self.data.get() - } - - /// Creates a new `ArcReentrantMutexGuard` without checking if the lock is held. - /// - /// # Safety - /// - /// This method must only be called if the thread logically holds the lock. - /// - /// Calling this function when a guard has already been produced is undefined behaviour unless - /// the guard was forgotten with `mem::forget`. - #[cfg(feature = "arc_lock")] - #[inline] - pub unsafe fn make_arc_guard_unchecked(self: &Arc) -> ArcReentrantMutexGuard { - ArcReentrantMutexGuard { - remutex: self.clone(), - marker: PhantomData, - } - } - - /// Acquires a reentrant mutex through an `Arc`. - /// - /// This method is similar to the `lock` method; however, it requires the `ReentrantMutex` to be inside of an - /// `Arc` and the resulting mutex guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn lock_arc(self: &Arc) -> ArcReentrantMutexGuard { - self.raw.lock(); - // SAFETY: locking guarantee is upheld - unsafe { self.make_arc_guard_unchecked() } - } - - /// Attempts to acquire a reentrant mutex through an `Arc`. - /// - /// This method is similar to the `try_lock` method; however, it requires the `ReentrantMutex` to be inside - /// of an `Arc` and the resulting mutex guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_lock_arc(self: &Arc) -> Option> { - if self.raw.try_lock() { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_guard_unchecked() }) - } else { - None - } - } -} - -impl ReentrantMutex { - /// Forcibly unlocks the mutex using a fair unlock protocol. - /// - /// This is useful when combined with `mem::forget` to hold a lock without - /// the need to maintain a `ReentrantMutexGuard` object alive, for example when - /// dealing with FFI. - /// - /// # Safety - /// - /// This method must only be called if the current thread logically owns a - /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`. - /// Behavior is undefined if a mutex is unlocked when not locked. - #[inline] - pub unsafe fn force_unlock_fair(&self) { - self.raw.unlock_fair(); - } -} - -impl ReentrantMutex { - /// Attempts to acquire this lock until a timeout is reached. - /// - /// If the lock could not be acquired before the timeout expired, then - /// `None` is returned. Otherwise, an RAII guard is returned. The lock will - /// be unlocked when the guard is dropped. - #[inline] - pub fn try_lock_for(&self, timeout: R::Duration) -> Option> { - if self.raw.try_lock_for(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this lock until a timeout is reached. - /// - /// If the lock could not be acquired before the timeout expired, then - /// `None` is returned. Otherwise, an RAII guard is returned. The lock will - /// be unlocked when the guard is dropped. - #[inline] - pub fn try_lock_until(&self, timeout: R::Instant) -> Option> { - if self.raw.try_lock_until(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this lock until a timeout is reached, through an `Arc`. - /// - /// This method is similar to the `try_lock_for` method; however, it requires the `ReentrantMutex` to be - /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_lock_arc_for( - self: &Arc, - timeout: R::Duration, - ) -> Option> { - if self.raw.try_lock_for(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this lock until a timeout is reached, through an `Arc`. - /// - /// This method is similar to the `try_lock_until` method; however, it requires the `ReentrantMutex` to be - /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_lock_arc_until( - self: &Arc, - timeout: R::Instant, - ) -> Option> { - if self.raw.try_lock_until(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_guard_unchecked() }) - } else { - None - } - } -} - -impl Default for ReentrantMutex { - #[inline] - fn default() -> ReentrantMutex { - ReentrantMutex::new(Default::default()) - } -} - -impl From for ReentrantMutex { - #[inline] - fn from(t: T) -> ReentrantMutex { - ReentrantMutex::new(t) - } -} - -impl fmt::Debug for ReentrantMutex { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.try_lock() { - Some(guard) => f - .debug_struct("ReentrantMutex") - .field("data", &&*guard) - .finish(), - None => { - struct LockedPlaceholder; - impl fmt::Debug for LockedPlaceholder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("") - } - } - - f.debug_struct("ReentrantMutex") - .field("data", &LockedPlaceholder) - .finish() - } - } - } -} - -// Copied and modified from serde -#[cfg(feature = "serde")] -impl Serialize for ReentrantMutex -where - R: RawMutex, - G: GetThreadId, - T: Serialize + ?Sized, -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.lock().serialize(serializer) - } -} - -#[cfg(feature = "serde")] -impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex -where - R: RawMutex, - G: GetThreadId, - T: Deserialize<'de> + ?Sized, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Deserialize::deserialize(deserializer).map(ReentrantMutex::new) - } -} - -/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure -/// is dropped (falls out of scope), the lock will be unlocked. -/// -/// The data protected by the mutex can be accessed through this guard via its -/// `Deref` implementation. -#[clippy::has_significant_drop] -#[must_use = "if unused the ReentrantMutex will immediately unlock"] -pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> { - remutex: &'a ReentrantMutex, - marker: PhantomData<(&'a T, GuardNoSend)>, -} - -unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync - for ReentrantMutexGuard<'a, R, G, T> -{ -} - -impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> { - /// Returns a reference to the original `ReentrantMutex` object. - pub fn remutex(s: &Self) -> &'a ReentrantMutex { - s.remutex - } - - /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `ReentrantMutexGuard` passed - /// in already locked the mutex. - /// - /// This is an associated function that needs to be - /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn map(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> - where - F: FnOnce(&T) -> &U, - { - let raw = &s.remutex.raw; - let data = f(unsafe { &*s.remutex.data.get() }); - mem::forget(s); - MappedReentrantMutexGuard { - raw, - data, - marker: PhantomData, - } - } - - /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the - /// locked data. The original guard is return if the closure returns `None`. - /// - /// This operation cannot fail as the `ReentrantMutexGuard` passed - /// in already locked the mutex. - /// - /// This is an associated function that needs to be - /// used as `ReentrantMutexGuard::try_map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn try_map( - s: Self, - f: F, - ) -> Result, Self> - where - F: FnOnce(&T) -> Option<&U>, - { - let raw = &s.remutex.raw; - let data = match f(unsafe { &*s.remutex.data.get() }) { - Some(data) => data, - None => return Err(s), - }; - mem::forget(s); - Ok(MappedReentrantMutexGuard { - raw, - data, - marker: PhantomData, - }) - } - - /// Temporarily unlocks the mutex to execute the given function. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the mutex. - #[inline] - pub fn unlocked(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: A ReentrantMutexGuard always holds the lock. - unsafe { - s.remutex.raw.unlock(); - } - defer!(s.remutex.raw.lock()); - f() - } -} - -impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> - ReentrantMutexGuard<'a, R, G, T> -{ - /// Unlocks the mutex using a fair unlock protocol. - /// - /// By default, mutexes are unfair and allow the current thread to re-lock - /// the mutex before another has the chance to acquire the lock, even if - /// that thread has been blocked on the mutex for a long time. This is the - /// default because it allows much higher throughput as it avoids forcing a - /// context switch on every mutex unlock. This can result in one thread - /// acquiring a mutex many more times than other threads. - /// - /// However in some cases it can be beneficial to ensure fairness by forcing - /// the lock to pass on to a waiting thread if there is one. This is done by - /// using this method instead of dropping the `ReentrantMutexGuard` normally. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: A ReentrantMutexGuard always holds the lock - unsafe { - s.remutex.raw.unlock_fair(); - } - mem::forget(s); - } - - /// Temporarily unlocks the mutex to execute the given function. - /// - /// The mutex is unlocked a fair unlock protocol. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the mutex. - #[inline] - pub fn unlocked_fair(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: A ReentrantMutexGuard always holds the lock - unsafe { - s.remutex.raw.unlock_fair(); - } - defer!(s.remutex.raw.lock()); - f() - } - - /// Temporarily yields the mutex to a waiting thread if there is one. - /// - /// This method is functionally equivalent to calling `unlock_fair` followed - /// by `lock`, however it can be much more efficient in the case where there - /// are no waiting threads. - #[inline] - pub fn bump(s: &mut Self) { - // Safety: A ReentrantMutexGuard always holds the lock - unsafe { - s.remutex.raw.bump(); - } - } -} - -impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref - for ReentrantMutexGuard<'a, R, G, T> -{ - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.remutex.data.get() } - } -} - -impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop - for ReentrantMutexGuard<'a, R, G, T> -{ - #[inline] - fn drop(&mut self) { - // Safety: A ReentrantMutexGuard always holds the lock. - unsafe { - self.remutex.raw.unlock(); - } - } -} - -impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug - for ReentrantMutexGuard<'a, R, G, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display - for ReentrantMutexGuard<'a, R, G, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[cfg(feature = "owning_ref")] -unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress - for ReentrantMutexGuard<'a, R, G, T> -{ -} - -/// An RAII mutex guard returned by the `Arc` locking operations on `ReentrantMutex`. -/// -/// This is similar to the `ReentrantMutexGuard` struct, except instead of using a reference to unlock the -/// `Mutex` it uses an `Arc`. This has several advantages, most notably that it has an `'static` -/// lifetime. -#[cfg(feature = "arc_lock")] -#[clippy::has_significant_drop] -#[must_use = "if unused the ReentrantMutex will immediately unlock"] -pub struct ArcReentrantMutexGuard { - remutex: Arc>, - marker: PhantomData, -} - -#[cfg(feature = "arc_lock")] -impl ArcReentrantMutexGuard { - /// Returns a reference to the `ReentrantMutex` this object is guarding, contained in its `Arc`. - pub fn remutex(s: &Self) -> &Arc> { - &s.remutex - } - - /// Temporarily unlocks the mutex to execute the given function. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the mutex. - #[inline] - pub fn unlocked(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: A ReentrantMutexGuard always holds the lock. - unsafe { - s.remutex.raw.unlock(); - } - defer!(s.remutex.raw.lock()); - f() - } -} - -#[cfg(feature = "arc_lock")] -impl ArcReentrantMutexGuard { - /// Unlocks the mutex using a fair unlock protocol. - /// - /// This is functionally identical to the `unlock_fair` method on [`ReentrantMutexGuard`]. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: A ReentrantMutexGuard always holds the lock - unsafe { - s.remutex.raw.unlock_fair(); - } - - // SAFETY: ensure that the Arc's refcount is decremented - let mut s = ManuallyDrop::new(s); - unsafe { ptr::drop_in_place(&mut s.remutex) }; - } - - /// Temporarily unlocks the mutex to execute the given function. - /// - /// This is functionally identical to the `unlocked_fair` method on [`ReentrantMutexGuard`]. - #[inline] - pub fn unlocked_fair(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: A ReentrantMutexGuard always holds the lock - unsafe { - s.remutex.raw.unlock_fair(); - } - defer!(s.remutex.raw.lock()); - f() - } - - /// Temporarily yields the mutex to a waiting thread if there is one. - /// - /// This is functionally equivalent to the `bump` method on [`ReentrantMutexGuard`]. - #[inline] - pub fn bump(s: &mut Self) { - // Safety: A ReentrantMutexGuard always holds the lock - unsafe { - s.remutex.raw.bump(); - } - } -} - -#[cfg(feature = "arc_lock")] -impl Deref for ArcReentrantMutexGuard { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.remutex.data.get() } - } -} - -#[cfg(feature = "arc_lock")] -impl Drop for ArcReentrantMutexGuard { - #[inline] - fn drop(&mut self) { - // Safety: A ReentrantMutexGuard always holds the lock. - unsafe { - self.remutex.raw.unlock(); - } - } -} - -/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a -/// subfield of the protected data. -/// -/// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the -/// former doesn't support temporarily unlocking and re-locking, since that -/// could introduce soundness issues if the locked object is modified by another -/// thread. -#[clippy::has_significant_drop] -#[must_use = "if unused the ReentrantMutex will immediately unlock"] -pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> { - raw: &'a RawReentrantMutex, - data: *const T, - marker: PhantomData<&'a T>, -} - -unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync - for MappedReentrantMutexGuard<'a, R, G, T> -{ -} - -impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> - MappedReentrantMutexGuard<'a, R, G, T> -{ - /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `MappedReentrantMutexGuard` passed - /// in already locked the mutex. - /// - /// This is an associated function that needs to be - /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn map(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> - where - F: FnOnce(&T) -> &U, - { - let raw = s.raw; - let data = f(unsafe { &*s.data }); - mem::forget(s); - MappedReentrantMutexGuard { - raw, - data, - marker: PhantomData, - } - } - - /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the - /// locked data. The original guard is return if the closure returns `None`. - /// - /// This operation cannot fail as the `MappedReentrantMutexGuard` passed - /// in already locked the mutex. - /// - /// This is an associated function that needs to be - /// used as `MappedReentrantMutexGuard::try_map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn try_map( - s: Self, - f: F, - ) -> Result, Self> - where - F: FnOnce(&T) -> Option<&U>, - { - let raw = s.raw; - let data = match f(unsafe { &*s.data }) { - Some(data) => data, - None => return Err(s), - }; - mem::forget(s); - Ok(MappedReentrantMutexGuard { - raw, - data, - marker: PhantomData, - }) - } -} - -impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> - MappedReentrantMutexGuard<'a, R, G, T> -{ - /// Unlocks the mutex using a fair unlock protocol. - /// - /// By default, mutexes are unfair and allow the current thread to re-lock - /// the mutex before another has the chance to acquire the lock, even if - /// that thread has been blocked on the mutex for a long time. This is the - /// default because it allows much higher throughput as it avoids forcing a - /// context switch on every mutex unlock. This can result in one thread - /// acquiring a mutex many more times than other threads. - /// - /// However in some cases it can be beneficial to ensure fairness by forcing - /// the lock to pass on to a waiting thread if there is one. This is done by - /// using this method instead of dropping the `ReentrantMutexGuard` normally. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: A MappedReentrantMutexGuard always holds the lock - unsafe { - s.raw.unlock_fair(); - } - mem::forget(s); - } -} - -impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref - for MappedReentrantMutexGuard<'a, R, G, T> -{ - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.data } - } -} - -impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop - for MappedReentrantMutexGuard<'a, R, G, T> -{ - #[inline] - fn drop(&mut self) { - // Safety: A MappedReentrantMutexGuard always holds the lock. - unsafe { - self.raw.unlock(); - } - } -} - -impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug - for MappedReentrantMutexGuard<'a, R, G, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display - for MappedReentrantMutexGuard<'a, R, G, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[cfg(feature = "owning_ref")] -unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress - for MappedReentrantMutexGuard<'a, R, G, T> -{ -} diff -Nru s390-tools-2.31.0/rust-vendor/lock_api/src/rwlock.rs s390-tools-2.33.1/rust-vendor/lock_api/src/rwlock.rs --- s390-tools-2.31.0/rust-vendor/lock_api/src/rwlock.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/lock_api/src/rwlock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2883 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use core::cell::UnsafeCell; -use core::fmt; -use core::marker::PhantomData; -use core::mem; -use core::ops::{Deref, DerefMut}; - -#[cfg(feature = "arc_lock")] -use alloc::sync::Arc; -#[cfg(feature = "arc_lock")] -use core::mem::ManuallyDrop; -#[cfg(feature = "arc_lock")] -use core::ptr; - -#[cfg(feature = "owning_ref")] -use owning_ref::StableAddress; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Deserializer, Serialize, Serializer}; - -/// Basic operations for a reader-writer lock. -/// -/// Types implementing this trait can be used by `RwLock` to form a safe and -/// fully-functioning `RwLock` type. -/// -/// # Safety -/// -/// Implementations of this trait must ensure that the `RwLock` is actually -/// exclusive: an exclusive lock can't be acquired while an exclusive or shared -/// lock exists, and a shared lock can't be acquire while an exclusive lock -/// exists. -pub unsafe trait RawRwLock { - /// Initial value for an unlocked `RwLock`. - // A “non-constant†const item is a legacy way to supply an initialized value to downstream - // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. - #[allow(clippy::declare_interior_mutable_const)] - const INIT: Self; - - /// Marker type which determines whether a lock guard should be `Send`. Use - /// one of the `GuardSend` or `GuardNoSend` helper types here. - type GuardMarker; - - /// Acquires a shared lock, blocking the current thread until it is able to do so. - fn lock_shared(&self); - - /// Attempts to acquire a shared lock without blocking. - fn try_lock_shared(&self) -> bool; - - /// Releases a shared lock. - /// - /// # Safety - /// - /// This method may only be called if a shared lock is held in the current context. - unsafe fn unlock_shared(&self); - - /// Acquires an exclusive lock, blocking the current thread until it is able to do so. - fn lock_exclusive(&self); - - /// Attempts to acquire an exclusive lock without blocking. - fn try_lock_exclusive(&self) -> bool; - - /// Releases an exclusive lock. - /// - /// # Safety - /// - /// This method may only be called if an exclusive lock is held in the current context. - unsafe fn unlock_exclusive(&self); - - /// Checks if this `RwLock` is currently locked in any way. - #[inline] - fn is_locked(&self) -> bool { - let acquired_lock = self.try_lock_exclusive(); - if acquired_lock { - // Safety: A lock was successfully acquired above. - unsafe { - self.unlock_exclusive(); - } - } - !acquired_lock - } - - /// Check if this `RwLock` is currently exclusively locked. - fn is_locked_exclusive(&self) -> bool { - let acquired_lock = self.try_lock_shared(); - if acquired_lock { - // Safety: A shared lock was successfully acquired above. - unsafe { - self.unlock_shared(); - } - } - !acquired_lock - } -} - -/// Additional methods for RwLocks which support fair unlocking. -/// -/// Fair unlocking means that a lock is handed directly over to the next waiting -/// thread if there is one, without giving other threads the opportunity to -/// "steal" the lock in the meantime. This is typically slower than unfair -/// unlocking, but may be necessary in certain circumstances. -pub unsafe trait RawRwLockFair: RawRwLock { - /// Releases a shared lock using a fair unlock protocol. - /// - /// # Safety - /// - /// This method may only be called if a shared lock is held in the current context. - unsafe fn unlock_shared_fair(&self); - - /// Releases an exclusive lock using a fair unlock protocol. - /// - /// # Safety - /// - /// This method may only be called if an exclusive lock is held in the current context. - unsafe fn unlock_exclusive_fair(&self); - - /// Temporarily yields a shared lock to a waiting thread if there is one. - /// - /// This method is functionally equivalent to calling `unlock_shared_fair` followed - /// by `lock_shared`, however it can be much more efficient in the case where there - /// are no waiting threads. - /// - /// # Safety - /// - /// This method may only be called if a shared lock is held in the current context. - unsafe fn bump_shared(&self) { - self.unlock_shared_fair(); - self.lock_shared(); - } - - /// Temporarily yields an exclusive lock to a waiting thread if there is one. - /// - /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed - /// by `lock_exclusive`, however it can be much more efficient in the case where there - /// are no waiting threads. - /// - /// # Safety - /// - /// This method may only be called if an exclusive lock is held in the current context. - unsafe fn bump_exclusive(&self) { - self.unlock_exclusive_fair(); - self.lock_exclusive(); - } -} - -/// Additional methods for RwLocks which support atomically downgrading an -/// exclusive lock to a shared lock. -pub unsafe trait RawRwLockDowngrade: RawRwLock { - /// Atomically downgrades an exclusive lock into a shared lock without - /// allowing any thread to take an exclusive lock in the meantime. - /// - /// # Safety - /// - /// This method may only be called if an exclusive lock is held in the current context. - unsafe fn downgrade(&self); -} - -/// Additional methods for RwLocks which support locking with timeouts. -/// -/// The `Duration` and `Instant` types are specified as associated types so that -/// this trait is usable even in `no_std` environments. -pub unsafe trait RawRwLockTimed: RawRwLock { - /// Duration type used for `try_lock_for`. - type Duration; - - /// Instant type used for `try_lock_until`. - type Instant; - - /// Attempts to acquire a shared lock until a timeout is reached. - fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool; - - /// Attempts to acquire a shared lock until a timeout is reached. - fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool; - - /// Attempts to acquire an exclusive lock until a timeout is reached. - fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool; - - /// Attempts to acquire an exclusive lock until a timeout is reached. - fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool; -} - -/// Additional methods for RwLocks which support recursive read locks. -/// -/// These are guaranteed to succeed without blocking if -/// another read lock is held at the time of the call. This allows a thread -/// to recursively lock a `RwLock`. However using this method can cause -/// writers to starve since readers no longer block if a writer is waiting -/// for the lock. -pub unsafe trait RawRwLockRecursive: RawRwLock { - /// Acquires a shared lock without deadlocking in case of a recursive lock. - fn lock_shared_recursive(&self); - - /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock. - fn try_lock_shared_recursive(&self) -> bool; -} - -/// Additional methods for RwLocks which support recursive read locks and timeouts. -pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed { - /// Attempts to acquire a shared lock until a timeout is reached, without - /// deadlocking in case of a recursive lock. - fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool; - - /// Attempts to acquire a shared lock until a timeout is reached, without - /// deadlocking in case of a recursive lock. - fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool; -} - -/// Additional methods for RwLocks which support atomically upgrading a shared -/// lock to an exclusive lock. -/// -/// This requires acquiring a special "upgradable read lock" instead of a -/// normal shared lock. There may only be one upgradable lock at any time, -/// otherwise deadlocks could occur when upgrading. -pub unsafe trait RawRwLockUpgrade: RawRwLock { - /// Acquires an upgradable lock, blocking the current thread until it is able to do so. - fn lock_upgradable(&self); - - /// Attempts to acquire an upgradable lock without blocking. - fn try_lock_upgradable(&self) -> bool; - - /// Releases an upgradable lock. - /// - /// # Safety - /// - /// This method may only be called if an upgradable lock is held in the current context. - unsafe fn unlock_upgradable(&self); - - /// Upgrades an upgradable lock to an exclusive lock. - /// - /// # Safety - /// - /// This method may only be called if an upgradable lock is held in the current context. - unsafe fn upgrade(&self); - - /// Attempts to upgrade an upgradable lock to an exclusive lock without - /// blocking. - /// - /// # Safety - /// - /// This method may only be called if an upgradable lock is held in the current context. - unsafe fn try_upgrade(&self) -> bool; -} - -/// Additional methods for RwLocks which support upgradable locks and fair -/// unlocking. -pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair { - /// Releases an upgradable lock using a fair unlock protocol. - /// - /// # Safety - /// - /// This method may only be called if an upgradable lock is held in the current context. - unsafe fn unlock_upgradable_fair(&self); - - /// Temporarily yields an upgradable lock to a waiting thread if there is one. - /// - /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed - /// by `lock_upgradable`, however it can be much more efficient in the case where there - /// are no waiting threads. - /// - /// # Safety - /// - /// This method may only be called if an upgradable lock is held in the current context. - unsafe fn bump_upgradable(&self) { - self.unlock_upgradable_fair(); - self.lock_upgradable(); - } -} - -/// Additional methods for RwLocks which support upgradable locks and lock -/// downgrading. -pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade { - /// Downgrades an upgradable lock to a shared lock. - /// - /// # Safety - /// - /// This method may only be called if an upgradable lock is held in the current context. - unsafe fn downgrade_upgradable(&self); - - /// Downgrades an exclusive lock to an upgradable lock. - /// - /// # Safety - /// - /// This method may only be called if an exclusive lock is held in the current context. - unsafe fn downgrade_to_upgradable(&self); -} - -/// Additional methods for RwLocks which support upgradable locks and locking -/// with timeouts. -pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed { - /// Attempts to acquire an upgradable lock until a timeout is reached. - fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool; - - /// Attempts to acquire an upgradable lock until a timeout is reached. - fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool; - - /// Attempts to upgrade an upgradable lock to an exclusive lock until a - /// timeout is reached. - /// - /// # Safety - /// - /// This method may only be called if an upgradable lock is held in the current context. - unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool; - - /// Attempts to upgrade an upgradable lock to an exclusive lock until a - /// timeout is reached. - /// - /// # Safety - /// - /// This method may only be called if an upgradable lock is held in the current context. - unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool; -} - -/// A reader-writer lock -/// -/// This type of lock allows a number of readers or at most one writer at any -/// point in time. The write portion of this lock typically allows modification -/// of the underlying data (exclusive access) and the read portion of this lock -/// typically allows for read-only access (shared access). -/// -/// The type parameter `T` represents the data that this lock protects. It is -/// required that `T` satisfies `Send` to be shared across threads and `Sync` to -/// allow concurrent access through readers. The RAII guards returned from the -/// locking methods implement `Deref` (and `DerefMut` for the `write` methods) -/// to allow access to the contained of the lock. -pub struct RwLock { - raw: R, - data: UnsafeCell, -} - -// Copied and modified from serde -#[cfg(feature = "serde")] -impl Serialize for RwLock -where - R: RawRwLock, - T: Serialize + ?Sized, -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.read().serialize(serializer) - } -} - -#[cfg(feature = "serde")] -impl<'de, R, T> Deserialize<'de> for RwLock -where - R: RawRwLock, - T: Deserialize<'de> + ?Sized, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Deserialize::deserialize(deserializer).map(RwLock::new) - } -} - -unsafe impl Send for RwLock {} -unsafe impl Sync for RwLock {} - -impl RwLock { - /// Creates a new instance of an `RwLock` which is unlocked. - #[cfg(has_const_fn_trait_bound)] - #[inline] - pub const fn new(val: T) -> RwLock { - RwLock { - data: UnsafeCell::new(val), - raw: R::INIT, - } - } - - /// Creates a new instance of an `RwLock` which is unlocked. - #[cfg(not(has_const_fn_trait_bound))] - #[inline] - pub fn new(val: T) -> RwLock { - RwLock { - data: UnsafeCell::new(val), - raw: R::INIT, - } - } - - /// Consumes this `RwLock`, returning the underlying data. - #[inline] - #[allow(unused_unsafe)] - pub fn into_inner(self) -> T { - unsafe { self.data.into_inner() } - } -} - -impl RwLock { - /// Creates a new new instance of an `RwLock` based on a pre-existing - /// `RawRwLock`. - /// - /// This allows creating a `RwLock` in a constant context on stable - /// Rust. - #[inline] - pub const fn const_new(raw_rwlock: R, val: T) -> RwLock { - RwLock { - data: UnsafeCell::new(val), - raw: raw_rwlock, - } - } -} - -impl RwLock { - /// Creates a new `RwLockReadGuard` without checking if the lock is held. - /// - /// # Safety - /// - /// This method must only be called if the thread logically holds a read lock. - /// - /// This function does not increment the read count of the lock. Calling this function when a - /// guard has already been produced is undefined behaviour unless the guard was forgotten - /// with `mem::forget`.` - #[inline] - pub unsafe fn make_read_guard_unchecked(&self) -> RwLockReadGuard<'_, R, T> { - RwLockReadGuard { - rwlock: self, - marker: PhantomData, - } - } - - /// Creates a new `RwLockReadGuard` without checking if the lock is held. - /// - /// # Safety - /// - /// This method must only be called if the thread logically holds a write lock. - /// - /// Calling this function when a guard has already been produced is undefined behaviour unless - /// the guard was forgotten with `mem::forget`. - #[inline] - pub unsafe fn make_write_guard_unchecked(&self) -> RwLockWriteGuard<'_, R, T> { - RwLockWriteGuard { - rwlock: self, - marker: PhantomData, - } - } - - /// Locks this `RwLock` with shared read access, blocking the current thread - /// until it can be acquired. - /// - /// The calling thread will be blocked until there are no more writers which - /// hold the lock. There may be other readers currently inside the lock when - /// this method returns. - /// - /// Note that attempts to recursively acquire a read lock on a `RwLock` when - /// the current thread already holds one may result in a deadlock. - /// - /// Returns an RAII guard which will release this thread's shared access - /// once it is dropped. - #[inline] - pub fn read(&self) -> RwLockReadGuard<'_, R, T> { - self.raw.lock_shared(); - // SAFETY: The lock is held, as required. - unsafe { self.make_read_guard_unchecked() } - } - - /// Attempts to acquire this `RwLock` with shared read access. - /// - /// If the access could not be granted at this time, then `None` is returned. - /// Otherwise, an RAII guard is returned which will release the shared access - /// when it is dropped. - /// - /// This function does not block. - #[inline] - pub fn try_read(&self) -> Option> { - if self.raw.try_lock_shared() { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_read_guard_unchecked() }) - } else { - None - } - } - - /// Locks this `RwLock` with exclusive write access, blocking the current - /// thread until it can be acquired. - /// - /// This function will not return while other writers or other readers - /// currently have access to the lock. - /// - /// Returns an RAII guard which will drop the write access of this `RwLock` - /// when dropped. - #[inline] - pub fn write(&self) -> RwLockWriteGuard<'_, R, T> { - self.raw.lock_exclusive(); - // SAFETY: The lock is held, as required. - unsafe { self.make_write_guard_unchecked() } - } - - /// Attempts to lock this `RwLock` with exclusive write access. - /// - /// If the lock could not be acquired at this time, then `None` is returned. - /// Otherwise, an RAII guard is returned which will release the lock when - /// it is dropped. - /// - /// This function does not block. - #[inline] - pub fn try_write(&self) -> Option> { - if self.raw.try_lock_exclusive() { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_write_guard_unchecked() }) - } else { - None - } - } - - /// Returns a mutable reference to the underlying data. - /// - /// Since this call borrows the `RwLock` mutably, no actual locking needs to - /// take place---the mutable borrow statically guarantees no locks exist. - #[inline] - pub fn get_mut(&mut self) -> &mut T { - unsafe { &mut *self.data.get() } - } - - /// Checks whether this `RwLock` is currently locked in any way. - #[inline] - pub fn is_locked(&self) -> bool { - self.raw.is_locked() - } - - /// Check if this `RwLock` is currently exclusively locked. - #[inline] - pub fn is_locked_exclusive(&self) -> bool { - self.raw.is_locked_exclusive() - } - - /// Forcibly unlocks a read lock. - /// - /// This is useful when combined with `mem::forget` to hold a lock without - /// the need to maintain a `RwLockReadGuard` object alive, for example when - /// dealing with FFI. - /// - /// # Safety - /// - /// This method must only be called if the current thread logically owns a - /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`. - /// Behavior is undefined if a rwlock is read-unlocked when not read-locked. - #[inline] - pub unsafe fn force_unlock_read(&self) { - self.raw.unlock_shared(); - } - - /// Forcibly unlocks a write lock. - /// - /// This is useful when combined with `mem::forget` to hold a lock without - /// the need to maintain a `RwLockWriteGuard` object alive, for example when - /// dealing with FFI. - /// - /// # Safety - /// - /// This method must only be called if the current thread logically owns a - /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`. - /// Behavior is undefined if a rwlock is write-unlocked when not write-locked. - #[inline] - pub unsafe fn force_unlock_write(&self) { - self.raw.unlock_exclusive(); - } - - /// Returns the underlying raw reader-writer lock object. - /// - /// Note that you will most likely need to import the `RawRwLock` trait from - /// `lock_api` to be able to call functions on the raw - /// reader-writer lock. - /// - /// # Safety - /// - /// This method is unsafe because it allows unlocking a mutex while - /// still holding a reference to a lock guard. - pub unsafe fn raw(&self) -> &R { - &self.raw - } - - /// Returns a raw pointer to the underlying data. - /// - /// This is useful when combined with `mem::forget` to hold a lock without - /// the need to maintain a `RwLockReadGuard` or `RwLockWriteGuard` object - /// alive, for example when dealing with FFI. - /// - /// # Safety - /// - /// You must ensure that there are no data races when dereferencing the - /// returned pointer, for example if the current thread logically owns a - /// `RwLockReadGuard` or `RwLockWriteGuard` but that guard has been discarded - /// using `mem::forget`. - #[inline] - pub fn data_ptr(&self) -> *mut T { - self.data.get() - } - - /// Creates a new `RwLockReadGuard` without checking if the lock is held. - /// - /// # Safety - /// - /// This method must only be called if the thread logically holds a read lock. - /// - /// This function does not increment the read count of the lock. Calling this function when a - /// guard has already been produced is undefined behaviour unless the guard was forgotten - /// with `mem::forget`.` - #[cfg(feature = "arc_lock")] - #[inline] - pub unsafe fn make_arc_read_guard_unchecked(self: &Arc) -> ArcRwLockReadGuard { - ArcRwLockReadGuard { - rwlock: self.clone(), - marker: PhantomData, - } - } - - /// Creates a new `RwLockWriteGuard` without checking if the lock is held. - /// - /// # Safety - /// - /// This method must only be called if the thread logically holds a write lock. - /// - /// Calling this function when a guard has already been produced is undefined behaviour unless - /// the guard was forgotten with `mem::forget`. - #[cfg(feature = "arc_lock")] - #[inline] - pub unsafe fn make_arc_write_guard_unchecked(self: &Arc) -> ArcRwLockWriteGuard { - ArcRwLockWriteGuard { - rwlock: self.clone(), - marker: PhantomData, - } - } - - /// Locks this `RwLock` with read access, through an `Arc`. - /// - /// This method is similar to the `read` method; however, it requires the `RwLock` to be inside of an `Arc` - /// and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn read_arc(self: &Arc) -> ArcRwLockReadGuard { - self.raw.lock_shared(); - // SAFETY: locking guarantee is upheld - unsafe { self.make_arc_read_guard_unchecked() } - } - - /// Attempts to lock this `RwLock` with read access, through an `Arc`. - /// - /// This method is similar to the `try_read` method; however, it requires the `RwLock` to be inside of an - /// `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_read_arc(self: &Arc) -> Option> { - if self.raw.try_lock_shared() { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_read_guard_unchecked() }) - } else { - None - } - } - - /// Locks this `RwLock` with write access, through an `Arc`. - /// - /// This method is similar to the `write` method; however, it requires the `RwLock` to be inside of an `Arc` - /// and the resulting write guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn write_arc(self: &Arc) -> ArcRwLockWriteGuard { - self.raw.lock_exclusive(); - // SAFETY: locking guarantee is upheld - unsafe { self.make_arc_write_guard_unchecked() } - } - - /// Attempts to lock this `RwLock` with writ access, through an `Arc`. - /// - /// This method is similar to the `try_write` method; however, it requires the `RwLock` to be inside of an - /// `Arc` and the resulting write guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_write_arc(self: &Arc) -> Option> { - if self.raw.try_lock_exclusive() { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_write_guard_unchecked() }) - } else { - None - } - } -} - -impl RwLock { - /// Forcibly unlocks a read lock using a fair unlock procotol. - /// - /// This is useful when combined with `mem::forget` to hold a lock without - /// the need to maintain a `RwLockReadGuard` object alive, for example when - /// dealing with FFI. - /// - /// # Safety - /// - /// This method must only be called if the current thread logically owns a - /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`. - /// Behavior is undefined if a rwlock is read-unlocked when not read-locked. - #[inline] - pub unsafe fn force_unlock_read_fair(&self) { - self.raw.unlock_shared_fair(); - } - - /// Forcibly unlocks a write lock using a fair unlock procotol. - /// - /// This is useful when combined with `mem::forget` to hold a lock without - /// the need to maintain a `RwLockWriteGuard` object alive, for example when - /// dealing with FFI. - /// - /// # Safety - /// - /// This method must only be called if the current thread logically owns a - /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`. - /// Behavior is undefined if a rwlock is write-unlocked when not write-locked. - #[inline] - pub unsafe fn force_unlock_write_fair(&self) { - self.raw.unlock_exclusive_fair(); - } -} - -impl RwLock { - /// Attempts to acquire this `RwLock` with shared read access until a timeout - /// is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. Otherwise, an RAII guard is returned which will - /// release the shared access when it is dropped. - #[inline] - pub fn try_read_for(&self, timeout: R::Duration) -> Option> { - if self.raw.try_lock_shared_for(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_read_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this `RwLock` with shared read access until a timeout - /// is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. Otherwise, an RAII guard is returned which will - /// release the shared access when it is dropped. - #[inline] - pub fn try_read_until(&self, timeout: R::Instant) -> Option> { - if self.raw.try_lock_shared_until(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_read_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this `RwLock` with exclusive write access until a - /// timeout is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. Otherwise, an RAII guard is returned which will - /// release the exclusive access when it is dropped. - #[inline] - pub fn try_write_for(&self, timeout: R::Duration) -> Option> { - if self.raw.try_lock_exclusive_for(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_write_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this `RwLock` with exclusive write access until a - /// timeout is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. Otherwise, an RAII guard is returned which will - /// release the exclusive access when it is dropped. - #[inline] - pub fn try_write_until(&self, timeout: R::Instant) -> Option> { - if self.raw.try_lock_exclusive_until(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_write_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. - /// - /// This method is similar to the `try_read_for` method; however, it requires the `RwLock` to be inside of an - /// `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_read_arc_for( - self: &Arc, - timeout: R::Duration, - ) -> Option> { - if self.raw.try_lock_shared_for(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_read_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. - /// - /// This method is similar to the `try_read_until` method; however, it requires the `RwLock` to be inside of - /// an `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_read_arc_until( - self: &Arc, - timeout: R::Instant, - ) -> Option> { - if self.raw.try_lock_shared_until(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_read_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this `RwLock` with write access until a timeout is reached, through an `Arc`. - /// - /// This method is similar to the `try_write_for` method; however, it requires the `RwLock` to be inside of - /// an `Arc` and the resulting write guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_write_arc_for( - self: &Arc, - timeout: R::Duration, - ) -> Option> { - if self.raw.try_lock_exclusive_for(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_write_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. - /// - /// This method is similar to the `try_write_until` method; however, it requires the `RwLock` to be inside of - /// an `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_write_arc_until( - self: &Arc, - timeout: R::Instant, - ) -> Option> { - if self.raw.try_lock_exclusive_until(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_write_guard_unchecked() }) - } else { - None - } - } -} - -impl RwLock { - /// Locks this `RwLock` with shared read access, blocking the current thread - /// until it can be acquired. - /// - /// The calling thread will be blocked until there are no more writers which - /// hold the lock. There may be other readers currently inside the lock when - /// this method returns. - /// - /// Unlike `read`, this method is guaranteed to succeed without blocking if - /// another read lock is held at the time of the call. This allows a thread - /// to recursively lock a `RwLock`. However using this method can cause - /// writers to starve since readers no longer block if a writer is waiting - /// for the lock. - /// - /// Returns an RAII guard which will release this thread's shared access - /// once it is dropped. - #[inline] - pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> { - self.raw.lock_shared_recursive(); - // SAFETY: The lock is held, as required. - unsafe { self.make_read_guard_unchecked() } - } - - /// Attempts to acquire this `RwLock` with shared read access. - /// - /// If the access could not be granted at this time, then `None` is returned. - /// Otherwise, an RAII guard is returned which will release the shared access - /// when it is dropped. - /// - /// This method is guaranteed to succeed if another read lock is held at the - /// time of the call. See the documentation for `read_recursive` for details. - /// - /// This function does not block. - #[inline] - pub fn try_read_recursive(&self) -> Option> { - if self.raw.try_lock_shared_recursive() { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_read_guard_unchecked() }) - } else { - None - } - } - - /// Locks this `RwLock` with shared read access, through an `Arc`. - /// - /// This method is similar to the `read_recursive` method; however, it requires the `RwLock` to be inside of - /// an `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn read_arc_recursive(self: &Arc) -> ArcRwLockReadGuard { - self.raw.lock_shared_recursive(); - // SAFETY: locking guarantee is upheld - unsafe { self.make_arc_read_guard_unchecked() } - } - - /// Attempts to lock this `RwLock` with shared read access, through an `Arc`. - /// - /// This method is similar to the `try_read_recursive` method; however, it requires the `RwLock` to be inside - /// of an `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_read_recursive_arc(self: &Arc) -> Option> { - if self.raw.try_lock_shared_recursive() { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_read_guard_unchecked() }) - } else { - None - } - } -} - -impl RwLock { - /// Attempts to acquire this `RwLock` with shared read access until a timeout - /// is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. Otherwise, an RAII guard is returned which will - /// release the shared access when it is dropped. - /// - /// This method is guaranteed to succeed without blocking if another read - /// lock is held at the time of the call. See the documentation for - /// `read_recursive` for details. - #[inline] - pub fn try_read_recursive_for( - &self, - timeout: R::Duration, - ) -> Option> { - if self.raw.try_lock_shared_recursive_for(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_read_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this `RwLock` with shared read access until a timeout - /// is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. Otherwise, an RAII guard is returned which will - /// release the shared access when it is dropped. - #[inline] - pub fn try_read_recursive_until( - &self, - timeout: R::Instant, - ) -> Option> { - if self.raw.try_lock_shared_recursive_until(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_read_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`. - /// - /// This method is similar to the `try_read_recursive_for` method; however, it requires the `RwLock` to be - /// inside of an `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_read_arc_recursive_for( - self: &Arc, - timeout: R::Duration, - ) -> Option> { - if self.raw.try_lock_shared_recursive_for(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_read_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`. - /// - /// This method is similar to the `try_read_recursive_until` method; however, it requires the `RwLock` to be - /// inside of an `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_read_arc_recursive_until( - self: &Arc, - timeout: R::Instant, - ) -> Option> { - if self.raw.try_lock_shared_recursive_until(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_arc_read_guard_unchecked() }) - } else { - None - } - } -} - -impl RwLock { - /// Creates a new `RwLockUpgradableReadGuard` without checking if the lock is held. - /// - /// # Safety - /// - /// This method must only be called if the thread logically holds an upgradable read lock. - /// - /// This function does not increment the read count of the lock. Calling this function when a - /// guard has already been produced is undefined behaviour unless the guard was forgotten - /// with `mem::forget`.` - #[inline] - pub unsafe fn make_upgradable_guard_unchecked(&self) -> RwLockUpgradableReadGuard<'_, R, T> { - RwLockUpgradableReadGuard { - rwlock: self, - marker: PhantomData, - } - } - - /// Locks this `RwLock` with upgradable read access, blocking the current thread - /// until it can be acquired. - /// - /// The calling thread will be blocked until there are no more writers or other - /// upgradable reads which hold the lock. There may be other readers currently - /// inside the lock when this method returns. - /// - /// Returns an RAII guard which will release this thread's shared access - /// once it is dropped. - #[inline] - pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> { - self.raw.lock_upgradable(); - // SAFETY: The lock is held, as required. - unsafe { self.make_upgradable_guard_unchecked() } - } - - /// Attempts to acquire this `RwLock` with upgradable read access. - /// - /// If the access could not be granted at this time, then `None` is returned. - /// Otherwise, an RAII guard is returned which will release the shared access - /// when it is dropped. - /// - /// This function does not block. - #[inline] - pub fn try_upgradable_read(&self) -> Option> { - if self.raw.try_lock_upgradable() { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_upgradable_guard_unchecked() }) - } else { - None - } - } - - /// Creates a new `ArcRwLockUpgradableReadGuard` without checking if the lock is held. - /// - /// # Safety - /// - /// This method must only be called if the thread logically holds an upgradable read lock. - /// - /// This function does not increment the read count of the lock. Calling this function when a - /// guard has already been produced is undefined behaviour unless the guard was forgotten - /// with `mem::forget`.` - #[cfg(feature = "arc_lock")] - #[inline] - pub unsafe fn make_upgradable_arc_guard_unchecked( - self: &Arc, - ) -> ArcRwLockUpgradableReadGuard { - ArcRwLockUpgradableReadGuard { - rwlock: self.clone(), - marker: PhantomData, - } - } - - /// Locks this `RwLock` with upgradable read access, through an `Arc`. - /// - /// This method is similar to the `upgradable_read` method; however, it requires the `RwLock` to be - /// inside of an `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn upgradable_read_arc(self: &Arc) -> ArcRwLockUpgradableReadGuard { - self.raw.lock_upgradable(); - // SAFETY: locking guarantee is upheld - unsafe { self.make_upgradable_arc_guard_unchecked() } - } - - /// Attempts to lock this `RwLock` with upgradable read access, through an `Arc`. - /// - /// This method is similar to the `try_upgradable_read` method; however, it requires the `RwLock` to be - /// inside of an `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_upgradable_read_arc(self: &Arc) -> Option> { - if self.raw.try_lock_upgradable() { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_upgradable_arc_guard_unchecked() }) - } else { - None - } - } -} - -impl RwLock { - /// Attempts to acquire this `RwLock` with upgradable read access until a timeout - /// is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. Otherwise, an RAII guard is returned which will - /// release the shared access when it is dropped. - #[inline] - pub fn try_upgradable_read_for( - &self, - timeout: R::Duration, - ) -> Option> { - if self.raw.try_lock_upgradable_for(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_upgradable_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to acquire this `RwLock` with upgradable read access until a timeout - /// is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. Otherwise, an RAII guard is returned which will - /// release the shared access when it is dropped. - #[inline] - pub fn try_upgradable_read_until( - &self, - timeout: R::Instant, - ) -> Option> { - if self.raw.try_lock_upgradable_until(timeout) { - // SAFETY: The lock is held, as required. - Some(unsafe { self.make_upgradable_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`. - /// - /// This method is similar to the `try_upgradable_read_for` method; however, it requires the `RwLock` to be - /// inside of an `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_upgradable_read_arc_for( - self: &Arc, - timeout: R::Duration, - ) -> Option> { - if self.raw.try_lock_upgradable_for(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_upgradable_arc_guard_unchecked() }) - } else { - None - } - } - - /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`. - /// - /// This method is similar to the `try_upgradable_read_until` method; however, it requires the `RwLock` to be - /// inside of an `Arc` and the resulting read guard has no lifetime requirements. - #[cfg(feature = "arc_lock")] - #[inline] - pub fn try_upgradable_read_arc_until( - self: &Arc, - timeout: R::Instant, - ) -> Option> { - if self.raw.try_lock_upgradable_until(timeout) { - // SAFETY: locking guarantee is upheld - Some(unsafe { self.make_upgradable_arc_guard_unchecked() }) - } else { - None - } - } -} - -impl Default for RwLock { - #[inline] - fn default() -> RwLock { - RwLock::new(Default::default()) - } -} - -impl From for RwLock { - #[inline] - fn from(t: T) -> RwLock { - RwLock::new(t) - } -} - -impl fmt::Debug for RwLock { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.try_read() { - Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(), - None => { - struct LockedPlaceholder; - impl fmt::Debug for LockedPlaceholder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("") - } - } - - f.debug_struct("RwLock") - .field("data", &LockedPlaceholder) - .finish() - } - } - } -} - -/// RAII structure used to release the shared read access of a lock when -/// dropped. -#[clippy::has_significant_drop] -#[must_use = "if unused the RwLock will immediately unlock"] -pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> { - rwlock: &'a RwLock, - marker: PhantomData<(&'a T, R::GuardMarker)>, -} - -unsafe impl Sync for RwLockReadGuard<'_, R, T> {} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> { - /// Returns a reference to the original reader-writer lock object. - pub fn rwlock(s: &Self) -> &'a RwLock { - s.rwlock - } - - /// Make a new `MappedRwLockReadGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `RwLockReadGuard` passed - /// in already locked the data. - /// - /// This is an associated function that needs to be - /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn map(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> - where - F: FnOnce(&T) -> &U, - { - let raw = &s.rwlock.raw; - let data = f(unsafe { &*s.rwlock.data.get() }); - mem::forget(s); - MappedRwLockReadGuard { - raw, - data, - marker: PhantomData, - } - } - - /// Attempts to make a new `MappedRwLockReadGuard` for a component of the - /// locked data. Returns the original guard if the closure returns `None`. - /// - /// This operation cannot fail as the `RwLockReadGuard` passed - /// in already locked the data. - /// - /// This is an associated function that needs to be - /// used as `RwLockReadGuard::try_map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn try_map(s: Self, f: F) -> Result, Self> - where - F: FnOnce(&T) -> Option<&U>, - { - let raw = &s.rwlock.raw; - let data = match f(unsafe { &*s.rwlock.data.get() }) { - Some(data) => data, - None => return Err(s), - }; - mem::forget(s); - Ok(MappedRwLockReadGuard { - raw, - data, - marker: PhantomData, - }) - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the `RwLock`. - #[inline] - pub fn unlocked(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockReadGuard always holds a shared lock. - unsafe { - s.rwlock.raw.unlock_shared(); - } - defer!(s.rwlock.raw.lock_shared()); - f() - } -} - -impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> { - /// Unlocks the `RwLock` using a fair unlock protocol. - /// - /// By default, `RwLock` is unfair and allow the current thread to re-lock - /// the `RwLock` before another has the chance to acquire the lock, even if - /// that thread has been blocked on the `RwLock` for a long time. This is - /// the default because it allows much higher throughput as it avoids - /// forcing a context switch on every `RwLock` unlock. This can result in one - /// thread acquiring a `RwLock` many more times than other threads. - /// - /// However in some cases it can be beneficial to ensure fairness by forcing - /// the lock to pass on to a waiting thread if there is one. This is done by - /// using this method instead of dropping the `RwLockReadGuard` normally. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: An RwLockReadGuard always holds a shared lock. - unsafe { - s.rwlock.raw.unlock_shared_fair(); - } - mem::forget(s); - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// The `RwLock` is unlocked a fair unlock protocol. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the `RwLock`. - #[inline] - pub fn unlocked_fair(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockReadGuard always holds a shared lock. - unsafe { - s.rwlock.raw.unlock_shared_fair(); - } - defer!(s.rwlock.raw.lock_shared()); - f() - } - - /// Temporarily yields the `RwLock` to a waiting thread if there is one. - /// - /// This method is functionally equivalent to calling `unlock_fair` followed - /// by `read`, however it can be much more efficient in the case where there - /// are no waiting threads. - #[inline] - pub fn bump(s: &mut Self) { - // Safety: An RwLockReadGuard always holds a shared lock. - unsafe { - s.rwlock.raw.bump_shared(); - } - } -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.rwlock.data.get() } - } -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> { - #[inline] - fn drop(&mut self) { - // Safety: An RwLockReadGuard always holds a shared lock. - unsafe { - self.rwlock.raw.unlock_shared(); - } - } -} - -impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display - for RwLockReadGuard<'a, R, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[cfg(feature = "owning_ref")] -unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {} - -/// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. -/// -/// This is similar to the `RwLockReadGuard` struct, except instead of using a reference to unlock the `RwLock` -/// it uses an `Arc`. This has several advantages, most notably that it has an `'static` lifetime. -#[cfg(feature = "arc_lock")] -#[clippy::has_significant_drop] -#[must_use = "if unused the RwLock will immediately unlock"] -pub struct ArcRwLockReadGuard { - rwlock: Arc>, - marker: PhantomData, -} - -#[cfg(feature = "arc_lock")] -impl ArcRwLockReadGuard { - /// Returns a reference to the rwlock, contained in its `Arc`. - pub fn rwlock(s: &Self) -> &Arc> { - &s.rwlock - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// This is functionally identical to the `unlocked` method on [`RwLockReadGuard`]. - #[inline] - pub fn unlocked(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockReadGuard always holds a shared lock. - unsafe { - s.rwlock.raw.unlock_shared(); - } - defer!(s.rwlock.raw.lock_shared()); - f() - } -} - -#[cfg(feature = "arc_lock")] -impl ArcRwLockReadGuard { - /// Unlocks the `RwLock` using a fair unlock protocol. - /// - /// This is functionally identical to the `unlock_fair` method on [`RwLockReadGuard`]. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: An RwLockReadGuard always holds a shared lock. - unsafe { - s.rwlock.raw.unlock_shared_fair(); - } - - // SAFETY: ensure the Arc has its refcount decremented - let mut s = ManuallyDrop::new(s); - unsafe { ptr::drop_in_place(&mut s.rwlock) }; - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// This is functionally identical to the `unlocked_fair` method on [`RwLockReadGuard`]. - #[inline] - pub fn unlocked_fair(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockReadGuard always holds a shared lock. - unsafe { - s.rwlock.raw.unlock_shared_fair(); - } - defer!(s.rwlock.raw.lock_shared()); - f() - } - - /// Temporarily yields the `RwLock` to a waiting thread if there is one. - /// - /// This is functionally identical to the `bump` method on [`RwLockReadGuard`]. - #[inline] - pub fn bump(s: &mut Self) { - // Safety: An RwLockReadGuard always holds a shared lock. - unsafe { - s.rwlock.raw.bump_shared(); - } - } -} - -#[cfg(feature = "arc_lock")] -impl Deref for ArcRwLockReadGuard { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.rwlock.data.get() } - } -} - -#[cfg(feature = "arc_lock")] -impl Drop for ArcRwLockReadGuard { - #[inline] - fn drop(&mut self) { - // Safety: An RwLockReadGuard always holds a shared lock. - unsafe { - self.rwlock.raw.unlock_shared(); - } - } -} - -#[cfg(feature = "arc_lock")] -impl fmt::Debug for ArcRwLockReadGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -#[cfg(feature = "arc_lock")] -impl fmt::Display for ArcRwLockReadGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -/// RAII structure used to release the exclusive write access of a lock when -/// dropped. -#[clippy::has_significant_drop] -#[must_use = "if unused the RwLock will immediately unlock"] -pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> { - rwlock: &'a RwLock, - marker: PhantomData<(&'a mut T, R::GuardMarker)>, -} - -unsafe impl Sync for RwLockWriteGuard<'_, R, T> {} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { - /// Returns a reference to the original reader-writer lock object. - pub fn rwlock(s: &Self) -> &'a RwLock { - s.rwlock - } - - /// Make a new `MappedRwLockWriteGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `RwLockWriteGuard` passed - /// in already locked the data. - /// - /// This is an associated function that needs to be - /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn map(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> - where - F: FnOnce(&mut T) -> &mut U, - { - let raw = &s.rwlock.raw; - let data = f(unsafe { &mut *s.rwlock.data.get() }); - mem::forget(s); - MappedRwLockWriteGuard { - raw, - data, - marker: PhantomData, - } - } - - /// Attempts to make a new `MappedRwLockWriteGuard` for a component of the - /// locked data. The original guard is return if the closure returns `None`. - /// - /// This operation cannot fail as the `RwLockWriteGuard` passed - /// in already locked the data. - /// - /// This is an associated function that needs to be - /// used as `RwLockWriteGuard::try_map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn try_map(s: Self, f: F) -> Result, Self> - where - F: FnOnce(&mut T) -> Option<&mut U>, - { - let raw = &s.rwlock.raw; - let data = match f(unsafe { &mut *s.rwlock.data.get() }) { - Some(data) => data, - None => return Err(s), - }; - mem::forget(s); - Ok(MappedRwLockWriteGuard { - raw, - data, - marker: PhantomData, - }) - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the `RwLock`. - #[inline] - pub fn unlocked(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockReadGuard always holds a shared lock. - unsafe { - s.rwlock.raw.unlock_exclusive(); - } - defer!(s.rwlock.raw.lock_exclusive()); - f() - } -} - -impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { - /// Atomically downgrades a write lock into a read lock without allowing any - /// writers to take exclusive access of the lock in the meantime. - /// - /// Note that if there are any writers currently waiting to take the lock - /// then other readers may not be able to acquire the lock even if it was - /// downgraded. - pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - s.rwlock.raw.downgrade(); - } - let rwlock = s.rwlock; - mem::forget(s); - RwLockReadGuard { - rwlock, - marker: PhantomData, - } - } -} - -impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { - /// Atomically downgrades a write lock into an upgradable read lock without allowing any - /// writers to take exclusive access of the lock in the meantime. - /// - /// Note that if there are any writers currently waiting to take the lock - /// then other readers may not be able to acquire the lock even if it was - /// downgraded. - pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - s.rwlock.raw.downgrade_to_upgradable(); - } - let rwlock = s.rwlock; - mem::forget(s); - RwLockUpgradableReadGuard { - rwlock, - marker: PhantomData, - } - } -} - -impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { - /// Unlocks the `RwLock` using a fair unlock protocol. - /// - /// By default, `RwLock` is unfair and allow the current thread to re-lock - /// the `RwLock` before another has the chance to acquire the lock, even if - /// that thread has been blocked on the `RwLock` for a long time. This is - /// the default because it allows much higher throughput as it avoids - /// forcing a context switch on every `RwLock` unlock. This can result in one - /// thread acquiring a `RwLock` many more times than other threads. - /// - /// However in some cases it can be beneficial to ensure fairness by forcing - /// the lock to pass on to a waiting thread if there is one. This is done by - /// using this method instead of dropping the `RwLockWriteGuard` normally. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - s.rwlock.raw.unlock_exclusive_fair(); - } - mem::forget(s); - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// The `RwLock` is unlocked a fair unlock protocol. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the `RwLock`. - #[inline] - pub fn unlocked_fair(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - s.rwlock.raw.unlock_exclusive_fair(); - } - defer!(s.rwlock.raw.lock_exclusive()); - f() - } - - /// Temporarily yields the `RwLock` to a waiting thread if there is one. - /// - /// This method is functionally equivalent to calling `unlock_fair` followed - /// by `write`, however it can be much more efficient in the case where there - /// are no waiting threads. - #[inline] - pub fn bump(s: &mut Self) { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - s.rwlock.raw.bump_exclusive(); - } - } -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.rwlock.data.get() } - } -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> { - #[inline] - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.rwlock.data.get() } - } -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> { - #[inline] - fn drop(&mut self) { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - self.rwlock.raw.unlock_exclusive(); - } - } -} - -impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display - for RwLockWriteGuard<'a, R, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[cfg(feature = "owning_ref")] -unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {} - -/// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. -/// This is similar to the `RwLockWriteGuard` struct, except instead of using a reference to unlock the `RwLock` -/// it uses an `Arc`. This has several advantages, most notably that it has an `'static` lifetime. -#[cfg(feature = "arc_lock")] -#[clippy::has_significant_drop] -#[must_use = "if unused the RwLock will immediately unlock"] -pub struct ArcRwLockWriteGuard { - rwlock: Arc>, - marker: PhantomData, -} - -#[cfg(feature = "arc_lock")] -impl ArcRwLockWriteGuard { - /// Returns a reference to the rwlock, contained in its `Arc`. - pub fn rwlock(s: &Self) -> &Arc> { - &s.rwlock - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// This is functionally equivalent to the `unlocked` method on [`RwLockWriteGuard`]. - #[inline] - pub fn unlocked(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockWriteGuard always holds a shared lock. - unsafe { - s.rwlock.raw.unlock_exclusive(); - } - defer!(s.rwlock.raw.lock_exclusive()); - f() - } -} - -#[cfg(feature = "arc_lock")] -impl ArcRwLockWriteGuard { - /// Atomically downgrades a write lock into a read lock without allowing any - /// writers to take exclusive access of the lock in the meantime. - /// - /// This is functionally equivalent to the `downgrade` method on [`RwLockWriteGuard`]. - pub fn downgrade(s: Self) -> ArcRwLockReadGuard { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - s.rwlock.raw.downgrade(); - } - - // SAFETY: prevent the arc's refcount from changing using ManuallyDrop and ptr::read - let s = ManuallyDrop::new(s); - let rwlock = unsafe { ptr::read(&s.rwlock) }; - - ArcRwLockReadGuard { - rwlock, - marker: PhantomData, - } - } -} - -#[cfg(feature = "arc_lock")] -impl ArcRwLockWriteGuard { - /// Atomically downgrades a write lock into an upgradable read lock without allowing any - /// writers to take exclusive access of the lock in the meantime. - /// - /// This is functionally identical to the `downgrade_to_upgradable` method on [`RwLockWriteGuard`]. - pub fn downgrade_to_upgradable(s: Self) -> ArcRwLockUpgradableReadGuard { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - s.rwlock.raw.downgrade_to_upgradable(); - } - - // SAFETY: same as above - let s = ManuallyDrop::new(s); - let rwlock = unsafe { ptr::read(&s.rwlock) }; - - ArcRwLockUpgradableReadGuard { - rwlock, - marker: PhantomData, - } - } -} - -#[cfg(feature = "arc_lock")] -impl ArcRwLockWriteGuard { - /// Unlocks the `RwLock` using a fair unlock protocol. - /// - /// This is functionally equivalent to the `unlock_fair` method on [`RwLockWriteGuard`]. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - s.rwlock.raw.unlock_exclusive_fair(); - } - - // SAFETY: prevent the Arc from leaking memory - let mut s = ManuallyDrop::new(s); - unsafe { ptr::drop_in_place(&mut s.rwlock) }; - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockWriteGuard`]. - #[inline] - pub fn unlocked_fair(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - s.rwlock.raw.unlock_exclusive_fair(); - } - defer!(s.rwlock.raw.lock_exclusive()); - f() - } - - /// Temporarily yields the `RwLock` to a waiting thread if there is one. - /// - /// This method is functionally equivalent to the `bump` method on [`RwLockWriteGuard`]. - #[inline] - pub fn bump(s: &mut Self) { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - s.rwlock.raw.bump_exclusive(); - } - } -} - -#[cfg(feature = "arc_lock")] -impl Deref for ArcRwLockWriteGuard { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.rwlock.data.get() } - } -} - -#[cfg(feature = "arc_lock")] -impl DerefMut for ArcRwLockWriteGuard { - #[inline] - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.rwlock.data.get() } - } -} - -#[cfg(feature = "arc_lock")] -impl Drop for ArcRwLockWriteGuard { - #[inline] - fn drop(&mut self) { - // Safety: An RwLockWriteGuard always holds an exclusive lock. - unsafe { - self.rwlock.raw.unlock_exclusive(); - } - } -} - -#[cfg(feature = "arc_lock")] -impl fmt::Debug for ArcRwLockWriteGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -#[cfg(feature = "arc_lock")] -impl fmt::Display for ArcRwLockWriteGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -/// RAII structure used to release the upgradable read access of a lock when -/// dropped. -#[clippy::has_significant_drop] -#[must_use = "if unused the RwLock will immediately unlock"] -pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> { - rwlock: &'a RwLock, - marker: PhantomData<(&'a T, R::GuardMarker)>, -} - -unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync - for RwLockUpgradableReadGuard<'a, R, T> -{ -} - -impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { - /// Returns a reference to the original reader-writer lock object. - pub fn rwlock(s: &Self) -> &'a RwLock { - s.rwlock - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the `RwLock`. - #[inline] - pub fn unlocked(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.unlock_upgradable(); - } - defer!(s.rwlock.raw.lock_upgradable()); - f() - } - - /// Atomically upgrades an upgradable read lock lock into an exclusive write lock, - /// blocking the current thread until it can be acquired. - pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.upgrade(); - } - let rwlock = s.rwlock; - mem::forget(s); - RwLockWriteGuard { - rwlock, - marker: PhantomData, - } - } - - /// Tries to atomically upgrade an upgradable read lock into an exclusive write lock. - /// - /// If the access could not be granted at this time, then the current guard is returned. - pub fn try_upgrade(s: Self) -> Result, Self> { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - if unsafe { s.rwlock.raw.try_upgrade() } { - let rwlock = s.rwlock; - mem::forget(s); - Ok(RwLockWriteGuard { - rwlock, - marker: PhantomData, - }) - } else { - Err(s) - } - } -} - -impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { - /// Unlocks the `RwLock` using a fair unlock protocol. - /// - /// By default, `RwLock` is unfair and allow the current thread to re-lock - /// the `RwLock` before another has the chance to acquire the lock, even if - /// that thread has been blocked on the `RwLock` for a long time. This is - /// the default because it allows much higher throughput as it avoids - /// forcing a context switch on every `RwLock` unlock. This can result in one - /// thread acquiring a `RwLock` many more times than other threads. - /// - /// However in some cases it can be beneficial to ensure fairness by forcing - /// the lock to pass on to a waiting thread if there is one. This is done by - /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.unlock_upgradable_fair(); - } - mem::forget(s); - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// The `RwLock` is unlocked a fair unlock protocol. - /// - /// This is safe because `&mut` guarantees that there exist no other - /// references to the data protected by the `RwLock`. - #[inline] - pub fn unlocked_fair(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.unlock_upgradable_fair(); - } - defer!(s.rwlock.raw.lock_upgradable()); - f() - } - - /// Temporarily yields the `RwLock` to a waiting thread if there is one. - /// - /// This method is functionally equivalent to calling `unlock_fair` followed - /// by `upgradable_read`, however it can be much more efficient in the case where there - /// are no waiting threads. - #[inline] - pub fn bump(s: &mut Self) { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.bump_upgradable(); - } - } -} - -impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { - /// Atomically downgrades an upgradable read lock lock into a shared read lock - /// without allowing any writers to take exclusive access of the lock in the - /// meantime. - /// - /// Note that if there are any writers currently waiting to take the lock - /// then other readers may not be able to acquire the lock even if it was - /// downgraded. - pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.downgrade_upgradable(); - } - let rwlock = s.rwlock; - mem::forget(s); - RwLockReadGuard { - rwlock, - marker: PhantomData, - } - } - - /// First, atomically upgrades an upgradable read lock lock into an exclusive write lock, - /// blocking the current thread until it can be acquired. - /// - /// Then, calls the provided closure with an exclusive reference to the lock's data. - /// - /// Finally, atomically downgrades the lock back to an upgradable read lock. - /// The closure's return value is wrapped in `Some` and returned. - /// - /// This function only requires a mutable reference to the guard, unlike - /// `upgrade` which takes the guard by value. - pub fn with_upgraded Ret>(&mut self, f: F) -> Ret { - unsafe { - self.rwlock.raw.upgrade(); - } - - // Safety: We just upgraded the lock, so we have mutable access to the data. - // This will restore the state the lock was in at the start of the function. - defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); - - // Safety: We upgraded the lock, so we have mutable access to the data. - // When this function returns, whether by drop or panic, - // the drop guard will downgrade it back to an upgradeable lock. - f(unsafe { &mut *self.rwlock.data.get() }) - } - - /// First, tries to atomically upgrade an upgradable read lock into an exclusive write lock. - /// - /// If the access could not be granted at this time, then `None` is returned. - /// - /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, - /// and finally downgrades the lock back to an upgradable read lock. - /// The closure's return value is wrapped in `Some` and returned. - /// - /// This function only requires a mutable reference to the guard, unlike - /// `try_upgrade` which takes the guard by value. - pub fn try_with_upgraded Ret>(&mut self, f: F) -> Option { - if unsafe { self.rwlock.raw.try_upgrade() } { - // Safety: We just upgraded the lock, so we have mutable access to the data. - // This will restore the state the lock was in at the start of the function. - defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); - - // Safety: We upgraded the lock, so we have mutable access to the data. - // When this function returns, whether by drop or panic, - // the drop guard will downgrade it back to an upgradeable lock. - Some(f(unsafe { &mut *self.rwlock.data.get() })) - } else { - None - } - } -} - -impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { - /// Tries to atomically upgrade an upgradable read lock into an exclusive - /// write lock, until a timeout is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// the current guard is returned. - pub fn try_upgrade_for( - s: Self, - timeout: R::Duration, - ) -> Result, Self> { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } { - let rwlock = s.rwlock; - mem::forget(s); - Ok(RwLockWriteGuard { - rwlock, - marker: PhantomData, - }) - } else { - Err(s) - } - } - - /// Tries to atomically upgrade an upgradable read lock into an exclusive - /// write lock, until a timeout is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// the current guard is returned. - #[inline] - pub fn try_upgrade_until( - s: Self, - timeout: R::Instant, - ) -> Result, Self> { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } { - let rwlock = s.rwlock; - mem::forget(s); - Ok(RwLockWriteGuard { - rwlock, - marker: PhantomData, - }) - } else { - Err(s) - } - } -} - -impl<'a, R: RawRwLockUpgradeTimed + RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> - RwLockUpgradableReadGuard<'a, R, T> -{ - /// Tries to atomically upgrade an upgradable read lock into an exclusive - /// write lock, until a timeout is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. - /// - /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, - /// and finally downgrades the lock back to an upgradable read lock. - /// The closure's return value is wrapped in `Some` and returned. - /// - /// This function only requires a mutable reference to the guard, unlike - /// `try_upgrade_for` which takes the guard by value. - pub fn try_with_upgraded_for Ret>( - &mut self, - timeout: R::Duration, - f: F, - ) -> Option { - if unsafe { self.rwlock.raw.try_upgrade_for(timeout) } { - // Safety: We just upgraded the lock, so we have mutable access to the data. - // This will restore the state the lock was in at the start of the function. - defer!(unsafe { self.rwlock.raw.downgrade_upgradable() }); - - // Safety: We upgraded the lock, so we have mutable access to the data. - // When this function returns, whether by drop or panic, - // the drop guard will downgrade it back to an upgradeable lock. - Some(f(unsafe { &mut *self.rwlock.data.get() })) - } else { - None - } - } - - /// Tries to atomically upgrade an upgradable read lock into an exclusive - /// write lock, until a timeout is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. - /// - /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, - /// and finally downgrades the lock back to an upgradable read lock. - /// The closure's return value is wrapped in `Some` and returned. - /// - /// This function only requires a mutable reference to the guard, unlike - /// `try_upgrade_until` which takes the guard by value. - pub fn try_with_upgraded_until Ret>( - &mut self, - timeout: R::Instant, - f: F, - ) -> Option { - if unsafe { self.rwlock.raw.try_upgrade_until(timeout) } { - // Safety: We just upgraded the lock, so we have mutable access to the data. - // This will restore the state the lock was in at the start of the function. - defer!(unsafe { self.rwlock.raw.downgrade_upgradable() }); - - // Safety: We upgraded the lock, so we have mutable access to the data. - // When this function returns, whether by drop or panic, - // the drop guard will downgrade it back to an upgradeable lock. - Some(f(unsafe { &mut *self.rwlock.data.get() })) - } else { - None - } - } -} - -impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.rwlock.data.get() } - } -} - -impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> { - #[inline] - fn drop(&mut self) { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - self.rwlock.raw.unlock_upgradable(); - } - } -} - -impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug - for RwLockUpgradableReadGuard<'a, R, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display - for RwLockUpgradableReadGuard<'a, R, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[cfg(feature = "owning_ref")] -unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress - for RwLockUpgradableReadGuard<'a, R, T> -{ -} - -/// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. -/// This is similar to the `RwLockUpgradableReadGuard` struct, except instead of using a reference to unlock the -/// `RwLock` it uses an `Arc`. This has several advantages, most notably that it has an `'static` -/// lifetime. -#[cfg(feature = "arc_lock")] -#[clippy::has_significant_drop] -#[must_use = "if unused the RwLock will immediately unlock"] -pub struct ArcRwLockUpgradableReadGuard { - rwlock: Arc>, - marker: PhantomData, -} - -#[cfg(feature = "arc_lock")] -impl ArcRwLockUpgradableReadGuard { - /// Returns a reference to the rwlock, contained in its original `Arc`. - pub fn rwlock(s: &Self) -> &Arc> { - &s.rwlock - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// This is functionally identical to the `unlocked` method on [`RwLockUpgradableReadGuard`]. - #[inline] - pub fn unlocked(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.unlock_upgradable(); - } - defer!(s.rwlock.raw.lock_upgradable()); - f() - } - - /// Atomically upgrades an upgradable read lock lock into an exclusive write lock, - /// blocking the current thread until it can be acquired. - pub fn upgrade(s: Self) -> ArcRwLockWriteGuard { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.upgrade(); - } - - // SAFETY: avoid incrementing or decrementing the refcount using ManuallyDrop and reading the Arc out - // of the struct - let s = ManuallyDrop::new(s); - let rwlock = unsafe { ptr::read(&s.rwlock) }; - - ArcRwLockWriteGuard { - rwlock, - marker: PhantomData, - } - } - - /// Tries to atomically upgrade an upgradable read lock into an exclusive write lock. - /// - /// If the access could not be granted at this time, then the current guard is returned. - pub fn try_upgrade(s: Self) -> Result, Self> { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - if unsafe { s.rwlock.raw.try_upgrade() } { - // SAFETY: same as above - let s = ManuallyDrop::new(s); - let rwlock = unsafe { ptr::read(&s.rwlock) }; - - Ok(ArcRwLockWriteGuard { - rwlock, - marker: PhantomData, - }) - } else { - Err(s) - } - } -} - -#[cfg(feature = "arc_lock")] -impl ArcRwLockUpgradableReadGuard { - /// Unlocks the `RwLock` using a fair unlock protocol. - /// - /// This is functionally identical to the `unlock_fair` method on [`RwLockUpgradableReadGuard`]. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.unlock_upgradable_fair(); - } - - // SAFETY: make sure we decrement the refcount properly - let mut s = ManuallyDrop::new(s); - unsafe { ptr::drop_in_place(&mut s.rwlock) }; - } - - /// Temporarily unlocks the `RwLock` to execute the given function. - /// - /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockUpgradableReadGuard`]. - #[inline] - pub fn unlocked_fair(s: &mut Self, f: F) -> U - where - F: FnOnce() -> U, - { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.unlock_upgradable_fair(); - } - defer!(s.rwlock.raw.lock_upgradable()); - f() - } - - /// Temporarily yields the `RwLock` to a waiting thread if there is one. - /// - /// This method is functionally equivalent to calling `bump` on [`RwLockUpgradableReadGuard`]. - #[inline] - pub fn bump(s: &mut Self) { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.bump_upgradable(); - } - } -} - -#[cfg(feature = "arc_lock")] -impl ArcRwLockUpgradableReadGuard { - /// Atomically downgrades an upgradable read lock lock into a shared read lock - /// without allowing any writers to take exclusive access of the lock in the - /// meantime. - /// - /// Note that if there are any writers currently waiting to take the lock - /// then other readers may not be able to acquire the lock even if it was - /// downgraded. - pub fn downgrade(s: Self) -> ArcRwLockReadGuard { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - s.rwlock.raw.downgrade_upgradable(); - } - - // SAFETY: use ManuallyDrop and ptr::read to ensure the refcount is not changed - let s = ManuallyDrop::new(s); - let rwlock = unsafe { ptr::read(&s.rwlock) }; - - ArcRwLockReadGuard { - rwlock, - marker: PhantomData, - } - } - - /// First, atomically upgrades an upgradable read lock lock into an exclusive write lock, - /// blocking the current thread until it can be acquired. - /// - /// Then, calls the provided closure with an exclusive reference to the lock's data. - /// - /// Finally, atomically downgrades the lock back to an upgradable read lock. - /// The closure's return value is returned. - /// - /// This function only requires a mutable reference to the guard, unlike - /// `upgrade` which takes the guard by value. - pub fn with_upgraded Ret>(&mut self, f: F) -> Ret { - unsafe { - self.rwlock.raw.upgrade(); - } - - // Safety: We just upgraded the lock, so we have mutable access to the data. - // This will restore the state the lock was in at the start of the function. - defer!(unsafe { self.rwlock.raw.downgrade_upgradable() }); - - // Safety: We upgraded the lock, so we have mutable access to the data. - // When this function returns, whether by drop or panic, - // the drop guard will downgrade it back to an upgradeable lock. - f(unsafe { &mut *self.rwlock.data.get() }) - } - - /// First, tries to atomically upgrade an upgradable read lock into an exclusive write lock. - /// - /// If the access could not be granted at this time, then `None` is returned. - /// - /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, - /// and finally downgrades the lock back to an upgradable read lock. - /// The closure's return value is wrapped in `Some` and returned. - /// - /// This function only requires a mutable reference to the guard, unlike - /// `try_upgrade` which takes the guard by value. - pub fn try_with_upgraded Ret>(&mut self, f: F) -> Option { - if unsafe { self.rwlock.raw.try_upgrade() } { - // Safety: We just upgraded the lock, so we have mutable access to the data. - // This will restore the state the lock was in at the start of the function. - defer!(unsafe { self.rwlock.raw.downgrade_upgradable() }); - - // Safety: We upgraded the lock, so we have mutable access to the data. - // When this function returns, whether by drop or panic, - // the drop guard will downgrade it back to an upgradeable lock. - Some(f(unsafe { &mut *self.rwlock.data.get() })) - } else { - None - } - } -} - -#[cfg(feature = "arc_lock")] -impl ArcRwLockUpgradableReadGuard { - /// Tries to atomically upgrade an upgradable read lock into an exclusive - /// write lock, until a timeout is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// the current guard is returned. - pub fn try_upgrade_for( - s: Self, - timeout: R::Duration, - ) -> Result, Self> { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } { - // SAFETY: same as above - let s = ManuallyDrop::new(s); - let rwlock = unsafe { ptr::read(&s.rwlock) }; - - Ok(ArcRwLockWriteGuard { - rwlock, - marker: PhantomData, - }) - } else { - Err(s) - } - } - - /// Tries to atomically upgrade an upgradable read lock into an exclusive - /// write lock, until a timeout is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// the current guard is returned. - #[inline] - pub fn try_upgrade_until( - s: Self, - timeout: R::Instant, - ) -> Result, Self> { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } { - // SAFETY: same as above - let s = ManuallyDrop::new(s); - let rwlock = unsafe { ptr::read(&s.rwlock) }; - - Ok(ArcRwLockWriteGuard { - rwlock, - marker: PhantomData, - }) - } else { - Err(s) - } - } -} - -#[cfg(feature = "arc_lock")] -impl - ArcRwLockUpgradableReadGuard -{ - /// Tries to atomically upgrade an upgradable read lock into an exclusive - /// write lock, until a timeout is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. - /// - /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, - /// and finally downgrades the lock back to an upgradable read lock. - /// The closure's return value is wrapped in `Some` and returned. - /// - /// This function only requires a mutable reference to the guard, unlike - /// `try_upgrade_for` which takes the guard by value. - pub fn try_with_upgraded_for Ret>( - &mut self, - timeout: R::Duration, - f: F, - ) -> Option { - if unsafe { self.rwlock.raw.try_upgrade_for(timeout) } { - // Safety: We just upgraded the lock, so we have mutable access to the data. - // This will restore the state the lock was in at the start of the function. - defer!(unsafe { self.rwlock.raw.downgrade_upgradable() }); - - // Safety: We upgraded the lock, so we have mutable access to the data. - // When this function returns, whether by drop or panic, - // the drop guard will downgrade it back to an upgradeable lock. - Some(f(unsafe { &mut *self.rwlock.data.get() })) - } else { - None - } - } - - /// Tries to atomically upgrade an upgradable read lock into an exclusive - /// write lock, until a timeout is reached. - /// - /// If the access could not be granted before the timeout expires, then - /// `None` is returned. - /// - /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, - /// and finally downgrades the lock back to an upgradable read lock. - /// The closure's return value is wrapped in `Some` and returned. - /// - /// This function only requires a mutable reference to the guard, unlike - /// `try_upgrade_until` which takes the guard by value. - pub fn try_with_upgraded_until Ret>( - &mut self, - timeout: R::Instant, - f: F, - ) -> Option { - if unsafe { self.rwlock.raw.try_upgrade_until(timeout) } { - // Safety: We just upgraded the lock, so we have mutable access to the data. - // This will restore the state the lock was in at the start of the function. - defer!(unsafe { self.rwlock.raw.downgrade_upgradable() }); - - // Safety: We upgraded the lock, so we have mutable access to the data. - // When this function returns, whether by drop or panic, - // the drop guard will downgrade it back to an upgradeable lock. - Some(f(unsafe { &mut *self.rwlock.data.get() })) - } else { - None - } - } -} - -#[cfg(feature = "arc_lock")] -impl Deref for ArcRwLockUpgradableReadGuard { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.rwlock.data.get() } - } -} - -#[cfg(feature = "arc_lock")] -impl Drop for ArcRwLockUpgradableReadGuard { - #[inline] - fn drop(&mut self) { - // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. - unsafe { - self.rwlock.raw.unlock_upgradable(); - } - } -} - -#[cfg(feature = "arc_lock")] -impl fmt::Debug - for ArcRwLockUpgradableReadGuard -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -#[cfg(feature = "arc_lock")] -impl fmt::Display - for ArcRwLockUpgradableReadGuard -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a -/// subfield of the protected data. -/// -/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the -/// former doesn't support temporarily unlocking and re-locking, since that -/// could introduce soundness issues if the locked object is modified by another -/// thread. -#[clippy::has_significant_drop] -#[must_use = "if unused the RwLock will immediately unlock"] -pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> { - raw: &'a R, - data: *const T, - marker: PhantomData<&'a T>, -} - -unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {} -unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Send for MappedRwLockReadGuard<'a, R, T> where - R::GuardMarker: Send -{ -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> { - /// Make a new `MappedRwLockReadGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `MappedRwLockReadGuard` passed - /// in already locked the data. - /// - /// This is an associated function that needs to be - /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn map(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> - where - F: FnOnce(&T) -> &U, - { - let raw = s.raw; - let data = f(unsafe { &*s.data }); - mem::forget(s); - MappedRwLockReadGuard { - raw, - data, - marker: PhantomData, - } - } - - /// Attempts to make a new `MappedRwLockReadGuard` for a component of the - /// locked data. The original guard is return if the closure returns `None`. - /// - /// This operation cannot fail as the `MappedRwLockReadGuard` passed - /// in already locked the data. - /// - /// This is an associated function that needs to be - /// used as `MappedRwLockReadGuard::try_map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn try_map(s: Self, f: F) -> Result, Self> - where - F: FnOnce(&T) -> Option<&U>, - { - let raw = s.raw; - let data = match f(unsafe { &*s.data }) { - Some(data) => data, - None => return Err(s), - }; - mem::forget(s); - Ok(MappedRwLockReadGuard { - raw, - data, - marker: PhantomData, - }) - } -} - -impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> { - /// Unlocks the `RwLock` using a fair unlock protocol. - /// - /// By default, `RwLock` is unfair and allow the current thread to re-lock - /// the `RwLock` before another has the chance to acquire the lock, even if - /// that thread has been blocked on the `RwLock` for a long time. This is - /// the default because it allows much higher throughput as it avoids - /// forcing a context switch on every `RwLock` unlock. This can result in one - /// thread acquiring a `RwLock` many more times than other threads. - /// - /// However in some cases it can be beneficial to ensure fairness by forcing - /// the lock to pass on to a waiting thread if there is one. This is done by - /// using this method instead of dropping the `MappedRwLockReadGuard` normally. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: A MappedRwLockReadGuard always holds a shared lock. - unsafe { - s.raw.unlock_shared_fair(); - } - mem::forget(s); - } -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.data } - } -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> { - #[inline] - fn drop(&mut self) { - // Safety: A MappedRwLockReadGuard always holds a shared lock. - unsafe { - self.raw.unlock_shared(); - } - } -} - -impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug - for MappedRwLockReadGuard<'a, R, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display - for MappedRwLockReadGuard<'a, R, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[cfg(feature = "owning_ref")] -unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress - for MappedRwLockReadGuard<'a, R, T> -{ -} - -/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a -/// subfield of the protected data. -/// -/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the -/// former doesn't support temporarily unlocking and re-locking, since that -/// could introduce soundness issues if the locked object is modified by another -/// thread. -#[clippy::has_significant_drop] -#[must_use = "if unused the RwLock will immediately unlock"] -pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> { - raw: &'a R, - data: *mut T, - marker: PhantomData<&'a mut T>, -} - -unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync - for MappedRwLockWriteGuard<'a, R, T> -{ -} -unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Send + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where - R::GuardMarker: Send -{ -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> { - /// Make a new `MappedRwLockWriteGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `MappedRwLockWriteGuard` passed - /// in already locked the data. - /// - /// This is an associated function that needs to be - /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn map(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> - where - F: FnOnce(&mut T) -> &mut U, - { - let raw = s.raw; - let data = f(unsafe { &mut *s.data }); - mem::forget(s); - MappedRwLockWriteGuard { - raw, - data, - marker: PhantomData, - } - } - - /// Attempts to make a new `MappedRwLockWriteGuard` for a component of the - /// locked data. The original guard is return if the closure returns `None`. - /// - /// This operation cannot fail as the `MappedRwLockWriteGuard` passed - /// in already locked the data. - /// - /// This is an associated function that needs to be - /// used as `MappedRwLockWriteGuard::try_map(...)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - #[inline] - pub fn try_map(s: Self, f: F) -> Result, Self> - where - F: FnOnce(&mut T) -> Option<&mut U>, - { - let raw = s.raw; - let data = match f(unsafe { &mut *s.data }) { - Some(data) => data, - None => return Err(s), - }; - mem::forget(s); - Ok(MappedRwLockWriteGuard { - raw, - data, - marker: PhantomData, - }) - } -} - -impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> { - /// Unlocks the `RwLock` using a fair unlock protocol. - /// - /// By default, `RwLock` is unfair and allow the current thread to re-lock - /// the `RwLock` before another has the chance to acquire the lock, even if - /// that thread has been blocked on the `RwLock` for a long time. This is - /// the default because it allows much higher throughput as it avoids - /// forcing a context switch on every `RwLock` unlock. This can result in one - /// thread acquiring a `RwLock` many more times than other threads. - /// - /// However in some cases it can be beneficial to ensure fairness by forcing - /// the lock to pass on to a waiting thread if there is one. This is done by - /// using this method instead of dropping the `MappedRwLockWriteGuard` normally. - #[inline] - pub fn unlock_fair(s: Self) { - // Safety: A MappedRwLockWriteGuard always holds an exclusive lock. - unsafe { - s.raw.unlock_exclusive_fair(); - } - mem::forget(s); - } -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.data } - } -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> { - #[inline] - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.data } - } -} - -impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> { - #[inline] - fn drop(&mut self) { - // Safety: A MappedRwLockWriteGuard always holds an exclusive lock. - unsafe { - self.raw.unlock_exclusive(); - } - } -} - -impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug - for MappedRwLockWriteGuard<'a, R, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display - for MappedRwLockWriteGuard<'a, R, T> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -#[cfg(feature = "owning_ref")] -unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress - for MappedRwLockWriteGuard<'a, R, T> -{ -} diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/miniz_oxide/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/miniz_oxide/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/Cargo.toml s390-tools-2.33.1/rust-vendor/miniz_oxide/Cargo.toml --- s390-tools-2.31.0/rust-vendor/miniz_oxide/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,74 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "miniz_oxide" -version = "0.7.1" -authors = [ - "Frommi ", - "oyvindln ", -] -exclude = [ - "benches/*", - "tests/*", -] -description = "DEFLATE compression and decompression library rewritten in Rust based on miniz" -homepage = "https://github.com/Frommi/miniz_oxide/tree/master/miniz_oxide" -documentation = "https://docs.rs/miniz_oxide" -readme = "Readme.md" -keywords = [ - "zlib", - "miniz", - "deflate", - "encoding", -] -categories = ["compression"] -license = "MIT OR Zlib OR Apache-2.0" -repository = "https://github.com/Frommi/miniz_oxide/tree/master/miniz_oxide" - -[lib] -name = "miniz_oxide" - -[dependencies.adler] -version = "1.0" -default-features = false - -[dependencies.alloc] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-alloc" - -[dependencies.compiler_builtins] -version = "0.1.2" -optional = true - -[dependencies.core] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-core" - -[dependencies.simd-adler32] -version = "0.3" -optional = true -default-features = false - -[features] -default = ["with-alloc"] -rustc-dep-of-std = [ - "core", - "alloc", - "compiler_builtins", - "adler/rustc-dep-of-std", -] -simd = ["simd-adler32"] -std = [] -with-alloc = [] diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/LICENSE s390-tools-2.33.1/rust-vendor/miniz_oxide/LICENSE --- s390-tools-2.31.0/rust-vendor/miniz_oxide/LICENSE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 Frommi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/LICENSE-APACHE.md s390-tools-2.33.1/rust-vendor/miniz_oxide/LICENSE-APACHE.md --- s390-tools-2.31.0/rust-vendor/miniz_oxide/LICENSE-APACHE.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/LICENSE-APACHE.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,177 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/LICENSE-MIT.md s390-tools-2.33.1/rust-vendor/miniz_oxide/LICENSE-MIT.md --- s390-tools-2.31.0/rust-vendor/miniz_oxide/LICENSE-MIT.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/LICENSE-MIT.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 Frommi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/LICENSE-ZLIB.md s390-tools-2.33.1/rust-vendor/miniz_oxide/LICENSE-ZLIB.md --- s390-tools-2.31.0/rust-vendor/miniz_oxide/LICENSE-ZLIB.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/LICENSE-ZLIB.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -Copyright (c) 2020 Frommi - -This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. - -Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. - -2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. - -3. This notice may not be removed or altered from any source distribution. diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/Readme.md s390-tools-2.33.1/rust-vendor/miniz_oxide/Readme.md --- s390-tools-2.31.0/rust-vendor/miniz_oxide/Readme.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/Readme.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,44 +0,0 @@ -# miniz_oxide - -A fully safe, pure rust replacement for the [miniz](https://github.com/richgel999/miniz) DEFLATE/zlib encoder/decoder. -The main intention of this crate is to be used as a back-end for the [flate2](https://github.com/alexcrichton/flate2-rs), but it can also be used on it's own. Using flate2 with the ```rust_backend``` feature provides an easy to use streaming API for miniz_oxide. - -The library is fully [no_std](https://docs.rust-embedded.org/book/intro/no-std.html). By default, the `with-alloc` feature is enabled, which requires the use of the `alloc` and `collection` crates as it allocates memory. - -The `std` feature additionally turns on things only available if `no_std` is not used. Currently this only means implementing [Error](https://doc.rust-lang.org/stable/std/error/trait.Error.html) for the `DecompressError` error struct returned by the simple decompression functions if enabled together with `with-alloc`. - -Using the library with `default-features = false` removes the dependency on `alloc` -and `collection` crates, making it suitable for systems without an allocator. -Running without allocation reduces crate functionality: - -- The `deflate` module is removed completely -- Some `inflate` functions which return a `Vec` are removed - -miniz_oxide 0.5.x and 0.6.x Requires at least rust 1.40.0 0.3.x requires at least rust 0.36.0. - -miniz_oxide features no use of unsafe code. - -miniz_oxide can optionally be made to use a simd-accelerated version of adler32 via the [simd-adler32](https://crates.io/crates/simd-adler32) crate by enabling the 'simd' feature. This is not enabled by default as due to the use of simd intrinsics, the simd-adler32 has to use unsafe. The default setup uses the [adler](https://crates.io/crates/adler) crate which features no unsafe code. - -## Usage -Simple compression/decompression: -```rust - -use miniz_oxide::deflate::compress_to_vec; -use miniz_oxide::inflate::decompress_to_vec; - -fn roundtrip(data: &[u8]) { - // Compress the input - let compressed = compress_to_vec(data, 6); - // Decompress the compressed input and limit max output size to avoid going out of memory on large/malformed input. - let decompressed = decompress_to_vec_with_limit(compressed.as_slice(), 60000).expect("Failed to decompress!"); - // Check roundtrip succeeded - assert_eq!(data, decompressed); -} - -fn main() { - roundtrip("Hello, world!".as_bytes()); -} - -``` -These simple functions will do everything in one go and are thus not recommended for use cases outside of prototyping/testing as real world data can have any size and thus result in very large memory allocations for the output Vector. Consider using miniz_oxide via [flate2](https://github.com/alexcrichton/flate2-rs) which makes it easy to do streaming (de)compression or the low-level streaming functions instead. diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/src/deflate/buffer.rs s390-tools-2.33.1/rust-vendor/miniz_oxide/src/deflate/buffer.rs --- s390-tools-2.31.0/rust-vendor/miniz_oxide/src/deflate/buffer.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/src/deflate/buffer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,58 +0,0 @@ -//! Buffer wrappers implementing default so we can allocate the buffers with `Box::default()` -//! to avoid stack copies. Box::new() doesn't at the moment, and using a vec means we would lose -//! static length info. - -use crate::deflate::core::{LZ_DICT_SIZE, MAX_MATCH_LEN}; - -/// Size of the buffer of lz77 encoded data. -pub const LZ_CODE_BUF_SIZE: usize = 64 * 1024; -/// Size of the output buffer. -pub const OUT_BUF_SIZE: usize = (LZ_CODE_BUF_SIZE * 13) / 10; -pub const LZ_DICT_FULL_SIZE: usize = LZ_DICT_SIZE + MAX_MATCH_LEN - 1 + 1; - -/// Size of hash values in the hash chains. -pub const LZ_HASH_BITS: i32 = 15; -/// How many bits to shift when updating the current hash value. -pub const LZ_HASH_SHIFT: i32 = (LZ_HASH_BITS + 2) / 3; -/// Size of the chained hash tables. -pub const LZ_HASH_SIZE: usize = 1 << LZ_HASH_BITS; - -#[inline] -pub fn update_hash(current_hash: u16, byte: u8) -> u16 { - ((current_hash << LZ_HASH_SHIFT) ^ u16::from(byte)) & (LZ_HASH_SIZE as u16 - 1) -} - -pub struct HashBuffers { - pub dict: [u8; LZ_DICT_FULL_SIZE], - pub next: [u16; LZ_DICT_SIZE], - pub hash: [u16; LZ_DICT_SIZE], -} - -impl HashBuffers { - #[inline] - pub fn reset(&mut self) { - *self = HashBuffers::default(); - } -} - -impl Default for HashBuffers { - fn default() -> HashBuffers { - HashBuffers { - dict: [0; LZ_DICT_FULL_SIZE], - next: [0; LZ_DICT_SIZE], - hash: [0; LZ_DICT_SIZE], - } - } -} - -pub struct LocalBuf { - pub b: [u8; OUT_BUF_SIZE], -} - -impl Default for LocalBuf { - fn default() -> LocalBuf { - LocalBuf { - b: [0; OUT_BUF_SIZE], - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/src/deflate/core.rs s390-tools-2.33.1/rust-vendor/miniz_oxide/src/deflate/core.rs --- s390-tools-2.31.0/rust-vendor/miniz_oxide/src/deflate/core.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/src/deflate/core.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2462 +0,0 @@ -//! Streaming compression functionality. - -use alloc::boxed::Box; -use core::convert::TryInto; -use core::{cmp, mem}; - -use super::super::*; -use super::deflate_flags::*; -use super::CompressionLevel; -use crate::deflate::buffer::{ - update_hash, HashBuffers, LocalBuf, LZ_CODE_BUF_SIZE, LZ_DICT_FULL_SIZE, LZ_HASH_BITS, - LZ_HASH_SHIFT, LZ_HASH_SIZE, OUT_BUF_SIZE, -}; -use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER, MZ_ADLER32_INIT}; -use crate::DataFormat; - -// Currently not bubbled up outside this module, so can fill in with more -// context eventually if needed. -type Result = core::result::Result; -struct Error {} - -const MAX_PROBES_MASK: i32 = 0xFFF; - -const MAX_SUPPORTED_HUFF_CODESIZE: usize = 32; - -/// Length code for length values. -#[rustfmt::skip] -const LEN_SYM: [u16; 256] = [ - 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, - 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, - 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, - 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, - 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, - 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, - 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, - 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, - 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, - 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, - 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, - 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, - 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, - 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, - 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285 -]; - -/// Number of extra bits for length values. -#[rustfmt::skip] -const LEN_EXTRA: [u8; 256] = [ - 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0 -]; - -/// Distance codes for distances smaller than 512. -#[rustfmt::skip] -const SMALL_DIST_SYM: [u8; 512] = [ - 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, - 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, - 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, - 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, - 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, - 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, - 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, - 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, - 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, - 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, - 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, - 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, - 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, - 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, - 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, - 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, - 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, - 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, - 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17 -]; - -/// Number of extra bits for distances smaller than 512. -#[rustfmt::skip] -const SMALL_DIST_EXTRA: [u8; 512] = [ - 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 -]; - -/// Base values to calculate distances above 512. -#[rustfmt::skip] -const LARGE_DIST_SYM: [u8; 128] = [ - 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, - 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, - 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, - 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, - 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 -]; - -/// Number of extra bits distances above 512. -#[rustfmt::skip] -const LARGE_DIST_EXTRA: [u8; 128] = [ - 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, - 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, - 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13 -]; - -#[rustfmt::skip] -const BITMASKS: [u32; 17] = [ - 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, - 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF -]; - -/// The maximum number of checks for matches in the hash table the compressor will make for each -/// compression level. -const NUM_PROBES: [u32; 11] = [0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500]; - -#[derive(Copy, Clone)] -struct SymFreq { - key: u16, - sym_index: u16, -} - -pub mod deflate_flags { - /// Whether to use a zlib wrapper. - pub const TDEFL_WRITE_ZLIB_HEADER: u32 = 0x0000_1000; - /// Should we compute the adler32 checksum. - pub const TDEFL_COMPUTE_ADLER32: u32 = 0x0000_2000; - /// Should we use greedy parsing (as opposed to lazy parsing where look ahead one or more - /// bytes to check for better matches.) - pub const TDEFL_GREEDY_PARSING_FLAG: u32 = 0x0000_4000; - /// Used in miniz to skip zero-initializing hash and dict. We don't do this here, so - /// this flag is ignored. - pub const TDEFL_NONDETERMINISTIC_PARSING_FLAG: u32 = 0x0000_8000; - /// Only look for matches with a distance of 0. - pub const TDEFL_RLE_MATCHES: u32 = 0x0001_0000; - /// Only use matches that are at least 6 bytes long. - pub const TDEFL_FILTER_MATCHES: u32 = 0x0002_0000; - /// Force the compressor to only output static blocks. (Blocks using the default huffman codes - /// specified in the deflate specification.) - pub const TDEFL_FORCE_ALL_STATIC_BLOCKS: u32 = 0x0004_0000; - /// Force the compressor to only output raw/uncompressed blocks. - pub const TDEFL_FORCE_ALL_RAW_BLOCKS: u32 = 0x0008_0000; -} - -/// Strategy setting for compression. -/// -/// The non-default settings offer some special-case compression variants. -#[repr(i32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum CompressionStrategy { - /// Don't use any of the special strategies. - Default = 0, - /// Only use matches that are at least 5 bytes long. - Filtered = 1, - /// Don't look for matches, only huffman encode the literals. - HuffmanOnly = 2, - /// Only look for matches with a distance of 1, i.e do run-length encoding only. - RLE = 3, - /// Only use static/fixed blocks. (Blocks using the default huffman codes - /// specified in the deflate specification.) - Fixed = 4, -} - -/// A list of deflate flush types. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum TDEFLFlush { - /// Normal operation. - /// - /// Compress as much as there is space for, and then return waiting for more input. - None = 0, - - /// Try to flush all the current data and output an empty raw block. - Sync = 2, - - /// Same as [`Sync`][Self::Sync], but reset the dictionary so that the following data does not - /// depend on previous data. - Full = 3, - - /// Try to flush everything and end the deflate stream. - /// - /// On success this will yield a [`TDEFLStatus::Done`] return status. - Finish = 4, -} - -impl From for TDEFLFlush { - fn from(flush: MZFlush) -> Self { - match flush { - MZFlush::None => TDEFLFlush::None, - MZFlush::Sync => TDEFLFlush::Sync, - MZFlush::Full => TDEFLFlush::Full, - MZFlush::Finish => TDEFLFlush::Finish, - _ => TDEFLFlush::None, // TODO: ??? What to do ??? - } - } -} - -impl TDEFLFlush { - pub fn new(flush: i32) -> Result { - match flush { - 0 => Ok(TDEFLFlush::None), - 2 => Ok(TDEFLFlush::Sync), - 3 => Ok(TDEFLFlush::Full), - 4 => Ok(TDEFLFlush::Finish), - _ => Err(MZError::Param), - } - } -} - -/// Return status of compression. -#[repr(i32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum TDEFLStatus { - /// Usage error. - /// - /// This indicates that either the [`CompressorOxide`] experienced a previous error, or the - /// stream has already been [`TDEFLFlush::Finish`]'d. - BadParam = -2, - - /// Error putting data into output buffer. - /// - /// This usually indicates a too-small buffer. - PutBufFailed = -1, - - /// Compression succeeded normally. - Okay = 0, - - /// Compression succeeded and the deflate stream was ended. - /// - /// This is the result of calling compression with [`TDEFLFlush::Finish`]. - Done = 1, -} - -const MAX_HUFF_SYMBOLS: usize = 288; -/// Size of hash chain for fast compression mode. -const LEVEL1_HASH_SIZE_MASK: u32 = 4095; -/// The number of huffman tables used by the compressor. -/// Literal/length, Distances and Length of the huffman codes for the other two tables. -const MAX_HUFF_TABLES: usize = 3; -/// Literal/length codes -const MAX_HUFF_SYMBOLS_0: usize = 288; -/// Distance codes. -const MAX_HUFF_SYMBOLS_1: usize = 32; -/// Huffman length values. -const MAX_HUFF_SYMBOLS_2: usize = 19; -/// Size of the chained hash table. -pub(crate) const LZ_DICT_SIZE: usize = 32_768; -/// Mask used when stepping through the hash chains. -const LZ_DICT_SIZE_MASK: usize = (LZ_DICT_SIZE as u32 - 1) as usize; -/// The minimum length of a match. -const MIN_MATCH_LEN: u8 = 3; -/// The maximum length of a match. -pub(crate) const MAX_MATCH_LEN: usize = 258; - -const DEFAULT_FLAGS: u32 = NUM_PROBES[4] | TDEFL_WRITE_ZLIB_HEADER; - -mod zlib { - const DEFAULT_CM: u8 = 8; - const DEFAULT_CINFO: u8 = 7 << 4; - const _DEFAULT_FDICT: u8 = 0; - const DEFAULT_CMF: u8 = DEFAULT_CM | DEFAULT_CINFO; - /// The 16-bit value consisting of CMF and FLG must be divisible by this to be valid. - const FCHECK_DIVISOR: u8 = 31; - - /// Generate FCHECK from CMF and FLG (without FCKECH )so that they are correct according to the - /// specification, i.e (CMF*256 + FCHK) % 31 = 0. - /// Returns flg with the FCHKECK bits added (any existing FCHECK bits are ignored). - fn add_fcheck(cmf: u8, flg: u8) -> u8 { - let rem = ((usize::from(cmf) * 256) + usize::from(flg)) % usize::from(FCHECK_DIVISOR); - - // Clear existing FCHECK if any - let flg = flg & 0b11100000; - - // Casting is safe as rem can't overflow since it is a value mod 31 - // We can simply add the value to flg as (31 - rem) will never be above 2^5 - flg + (FCHECK_DIVISOR - rem as u8) - } - - fn zlib_level_from_flags(flags: u32) -> u8 { - use super::NUM_PROBES; - - let num_probes = flags & (super::MAX_PROBES_MASK as u32); - if flags & super::TDEFL_GREEDY_PARSING_FLAG != 0 { - if num_probes <= 1 { - 0 - } else { - 1 - } - } else if num_probes >= NUM_PROBES[9] { - 3 - } else { - 2 - } - } - - /// Get the zlib header for the level using the default window size and no - /// dictionary. - fn header_from_level(level: u8) -> [u8; 2] { - let cmf = DEFAULT_CMF; - [cmf, add_fcheck(cmf, (level as u8) << 6)] - } - - /// Create a zlib header from the given compression flags. - /// Only level is considered. - pub fn header_from_flags(flags: u32) -> [u8; 2] { - let level = zlib_level_from_flags(flags); - header_from_level(level) - } - - #[cfg(test)] - mod test { - #[test] - fn zlib() { - use super::super::*; - use super::*; - - let test_level = |level, expected| { - let flags = create_comp_flags_from_zip_params( - level, - MZ_DEFAULT_WINDOW_BITS, - CompressionStrategy::Default as i32, - ); - assert_eq!(zlib_level_from_flags(flags), expected); - }; - - assert_eq!(zlib_level_from_flags(DEFAULT_FLAGS), 2); - test_level(0, 0); - test_level(1, 0); - test_level(2, 1); - test_level(3, 1); - for i in 4..=8 { - test_level(i, 2) - } - test_level(9, 3); - test_level(10, 3); - } - - #[test] - fn test_header() { - let header = super::header_from_level(3); - assert_eq!( - ((usize::from(header[0]) * 256) + usize::from(header[1])) % 31, - 0 - ); - } - } -} - -fn memset(slice: &mut [T], val: T) { - for x in slice { - *x = val - } -} - -#[cfg(test)] -#[inline] -fn write_u16_le(val: u16, slice: &mut [u8], pos: usize) { - slice[pos] = val as u8; - slice[pos + 1] = (val >> 8) as u8; -} - -// Read the two bytes starting at pos and interpret them as an u16. -#[inline] -const fn read_u16_le(slice: &[u8], pos: usize) -> u16 { - // The compiler is smart enough to optimize this into an unaligned load. - slice[pos] as u16 | ((slice[pos + 1] as u16) << 8) -} - -/// Main compression struct. -pub struct CompressorOxide { - lz: LZOxide, - params: ParamsOxide, - huff: Box, - dict: DictOxide, -} - -impl CompressorOxide { - /// Create a new `CompressorOxide` with the given flags. - /// - /// # Notes - /// This function may be changed to take different parameters in the future. - pub fn new(flags: u32) -> Self { - CompressorOxide { - lz: LZOxide::new(), - params: ParamsOxide::new(flags), - /// Put HuffmanOxide on the heap with default trick to avoid - /// excessive stack copies. - huff: Box::default(), - dict: DictOxide::new(flags), - } - } - - /// Get the adler32 checksum of the currently encoded data. - pub const fn adler32(&self) -> u32 { - self.params.adler32 - } - - /// Get the return status of the previous [`compress`](fn.compress.html) - /// call with this compressor. - pub const fn prev_return_status(&self) -> TDEFLStatus { - self.params.prev_return_status - } - - /// Get the raw compressor flags. - /// - /// # Notes - /// This function may be deprecated or changed in the future to use more rust-style flags. - pub const fn flags(&self) -> i32 { - self.params.flags as i32 - } - - /// Returns whether the compressor is wrapping the data in a zlib format or not. - pub fn data_format(&self) -> DataFormat { - if (self.params.flags & TDEFL_WRITE_ZLIB_HEADER) != 0 { - DataFormat::Zlib - } else { - DataFormat::Raw - } - } - - /// Reset the state of the compressor, keeping the same parameters. - /// - /// This avoids re-allocating data. - pub fn reset(&mut self) { - // LZ buf and huffman has no settings or dynamic memory - // that needs to be saved, so we simply replace them. - self.lz = LZOxide::new(); - self.params.reset(); - *self.huff = HuffmanOxide::default(); - self.dict.reset(); - } - - /// Set the compression level of the compressor. - /// - /// Using this to change level after compression has started is supported. - /// # Notes - /// The compression strategy will be reset to the default one when this is called. - pub fn set_compression_level(&mut self, level: CompressionLevel) { - let format = self.data_format(); - self.set_format_and_level(format, level as u8); - } - - /// Set the compression level of the compressor using an integer value. - /// - /// Using this to change level after compression has started is supported. - /// # Notes - /// The compression strategy will be reset to the default one when this is called. - pub fn set_compression_level_raw(&mut self, level: u8) { - let format = self.data_format(); - self.set_format_and_level(format, level); - } - - /// Update the compression settings of the compressor. - /// - /// Changing the `DataFormat` after compression has started will result in - /// a corrupted stream. - /// - /// # Notes - /// This function mainly intended for setting the initial settings after e.g creating with - /// `default` or after calling `CompressorOxide::reset()`, and behaviour may be changed - /// to disallow calling it after starting compression in the future. - pub fn set_format_and_level(&mut self, data_format: DataFormat, level: u8) { - let flags = create_comp_flags_from_zip_params( - level.into(), - data_format.to_window_bits(), - CompressionStrategy::Default as i32, - ); - self.params.update_flags(flags); - self.dict.update_flags(flags); - } -} - -impl Default for CompressorOxide { - /// Initialize the compressor with a level of 4, zlib wrapper and - /// the default strategy. - fn default() -> Self { - CompressorOxide { - lz: LZOxide::new(), - params: ParamsOxide::new(DEFAULT_FLAGS), - /// Put HuffmanOxide on the heap with default trick to avoid - /// excessive stack copies. - huff: Box::default(), - dict: DictOxide::new(DEFAULT_FLAGS), - } - } -} - -/// Callback function and user used in `compress_to_output`. -pub struct CallbackFunc<'a> { - pub put_buf_func: &'a mut dyn FnMut(&[u8]) -> bool, -} - -impl<'a> CallbackFunc<'a> { - fn flush_output( - &mut self, - saved_output: SavedOutputBufferOxide, - params: &mut ParamsOxide, - ) -> i32 { - // TODO: As this could be unsafe since - // we can't verify the function pointer - // this whole function should maybe be unsafe as well. - let call_success = (self.put_buf_func)(¶ms.local_buf.b[0..saved_output.pos as usize]); - - if !call_success { - params.prev_return_status = TDEFLStatus::PutBufFailed; - return params.prev_return_status as i32; - } - - params.flush_remaining as i32 - } -} - -struct CallbackBuf<'a> { - pub out_buf: &'a mut [u8], -} - -impl<'a> CallbackBuf<'a> { - fn flush_output( - &mut self, - saved_output: SavedOutputBufferOxide, - params: &mut ParamsOxide, - ) -> i32 { - if saved_output.local { - let n = cmp::min( - saved_output.pos as usize, - self.out_buf.len() - params.out_buf_ofs, - ); - (&mut self.out_buf[params.out_buf_ofs..params.out_buf_ofs + n]) - .copy_from_slice(¶ms.local_buf.b[..n]); - - params.out_buf_ofs += n; - if saved_output.pos != n { - params.flush_ofs = n as u32; - params.flush_remaining = (saved_output.pos - n) as u32; - } - } else { - params.out_buf_ofs += saved_output.pos; - } - - params.flush_remaining as i32 - } -} - -enum CallbackOut<'a> { - Func(CallbackFunc<'a>), - Buf(CallbackBuf<'a>), -} - -impl<'a> CallbackOut<'a> { - fn new_output_buffer<'b>( - &'b mut self, - local_buf: &'b mut [u8], - out_buf_ofs: usize, - ) -> OutputBufferOxide<'b> { - let is_local; - let buf_len = OUT_BUF_SIZE - 16; - let chosen_buffer = match *self { - CallbackOut::Buf(ref mut cb) if cb.out_buf.len() - out_buf_ofs >= OUT_BUF_SIZE => { - is_local = false; - &mut cb.out_buf[out_buf_ofs..out_buf_ofs + buf_len] - } - _ => { - is_local = true; - &mut local_buf[..buf_len] - } - }; - - OutputBufferOxide { - inner: chosen_buffer, - inner_pos: 0, - local: is_local, - bit_buffer: 0, - bits_in: 0, - } - } -} - -struct CallbackOxide<'a> { - in_buf: Option<&'a [u8]>, - in_buf_size: Option<&'a mut usize>, - out_buf_size: Option<&'a mut usize>, - out: CallbackOut<'a>, -} - -impl<'a> CallbackOxide<'a> { - fn new_callback_buf(in_buf: &'a [u8], out_buf: &'a mut [u8]) -> Self { - CallbackOxide { - in_buf: Some(in_buf), - in_buf_size: None, - out_buf_size: None, - out: CallbackOut::Buf(CallbackBuf { out_buf }), - } - } - - fn new_callback_func(in_buf: &'a [u8], callback_func: CallbackFunc<'a>) -> Self { - CallbackOxide { - in_buf: Some(in_buf), - in_buf_size: None, - out_buf_size: None, - out: CallbackOut::Func(callback_func), - } - } - - fn update_size(&mut self, in_size: Option, out_size: Option) { - if let (Some(in_size), Some(size)) = (in_size, self.in_buf_size.as_mut()) { - **size = in_size; - } - - if let (Some(out_size), Some(size)) = (out_size, self.out_buf_size.as_mut()) { - **size = out_size - } - } - - fn flush_output( - &mut self, - saved_output: SavedOutputBufferOxide, - params: &mut ParamsOxide, - ) -> i32 { - if saved_output.pos == 0 { - return params.flush_remaining as i32; - } - - self.update_size(Some(params.src_pos), None); - match self.out { - CallbackOut::Func(ref mut cf) => cf.flush_output(saved_output, params), - CallbackOut::Buf(ref mut cb) => cb.flush_output(saved_output, params), - } - } -} - -struct OutputBufferOxide<'a> { - pub inner: &'a mut [u8], - pub inner_pos: usize, - pub local: bool, - - pub bit_buffer: u32, - pub bits_in: u32, -} - -impl<'a> OutputBufferOxide<'a> { - fn put_bits(&mut self, bits: u32, len: u32) { - assert!(bits <= ((1u32 << len) - 1u32)); - self.bit_buffer |= bits << self.bits_in; - self.bits_in += len; - while self.bits_in >= 8 { - self.inner[self.inner_pos] = self.bit_buffer as u8; - self.inner_pos += 1; - self.bit_buffer >>= 8; - self.bits_in -= 8; - } - } - - const fn save(&self) -> SavedOutputBufferOxide { - SavedOutputBufferOxide { - pos: self.inner_pos, - bit_buffer: self.bit_buffer, - bits_in: self.bits_in, - local: self.local, - } - } - - fn load(&mut self, saved: SavedOutputBufferOxide) { - self.inner_pos = saved.pos; - self.bit_buffer = saved.bit_buffer; - self.bits_in = saved.bits_in; - self.local = saved.local; - } - - fn pad_to_bytes(&mut self) { - if self.bits_in != 0 { - let len = 8 - self.bits_in; - self.put_bits(0, len); - } - } -} - -struct SavedOutputBufferOxide { - pub pos: usize, - pub bit_buffer: u32, - pub bits_in: u32, - pub local: bool, -} - -struct BitBuffer { - pub bit_buffer: u64, - pub bits_in: u32, -} - -impl BitBuffer { - fn put_fast(&mut self, bits: u64, len: u32) { - self.bit_buffer |= bits << self.bits_in; - self.bits_in += len; - } - - fn flush(&mut self, output: &mut OutputBufferOxide) -> Result<()> { - let pos = output.inner_pos; - { - // isolation to please borrow checker - let inner = &mut output.inner[pos..pos + 8]; - let bytes = u64::to_le_bytes(self.bit_buffer); - inner.copy_from_slice(&bytes); - } - match output.inner_pos.checked_add((self.bits_in >> 3) as usize) { - Some(n) if n <= output.inner.len() => output.inner_pos = n, - _ => return Err(Error {}), - } - self.bit_buffer >>= self.bits_in & !7; - self.bits_in &= 7; - Ok(()) - } -} - -/// A struct containing data about huffman codes and symbol frequencies. -/// -/// NOTE: Only the literal/lengths have enough symbols to actually use -/// the full array. It's unclear why it's defined like this in miniz, -/// it could be for cache/alignment reasons. -struct HuffmanOxide { - /// Number of occurrences of each symbol. - pub count: [[u16; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], - /// The bits of the huffman code assigned to the symbol - pub codes: [[u16; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], - /// The length of the huffman code assigned to the symbol. - pub code_sizes: [[u8; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], -} - -/// Tables used for literal/lengths in `HuffmanOxide`. -const LITLEN_TABLE: usize = 0; -/// Tables for distances. -const DIST_TABLE: usize = 1; -/// Tables for the run-length encoded huffman lengths for literals/lengths/distances. -const HUFF_CODES_TABLE: usize = 2; - -/// Status of RLE encoding of huffman code lengths. -struct Rle { - pub z_count: u32, - pub repeat_count: u32, - pub prev_code_size: u8, -} - -impl Rle { - fn prev_code_size( - &mut self, - packed_code_sizes: &mut [u8], - packed_pos: &mut usize, - h: &mut HuffmanOxide, - ) -> Result<()> { - let mut write = |buf| write(buf, packed_code_sizes, packed_pos); - let counts = &mut h.count[HUFF_CODES_TABLE]; - if self.repeat_count != 0 { - if self.repeat_count < 3 { - counts[self.prev_code_size as usize] = - counts[self.prev_code_size as usize].wrapping_add(self.repeat_count as u16); - let code = self.prev_code_size; - write(&[code, code, code][..self.repeat_count as usize])?; - } else { - counts[16] = counts[16].wrapping_add(1); - write(&[16, (self.repeat_count - 3) as u8][..])?; - } - self.repeat_count = 0; - } - - Ok(()) - } - - fn zero_code_size( - &mut self, - packed_code_sizes: &mut [u8], - packed_pos: &mut usize, - h: &mut HuffmanOxide, - ) -> Result<()> { - let mut write = |buf| write(buf, packed_code_sizes, packed_pos); - let counts = &mut h.count[HUFF_CODES_TABLE]; - if self.z_count != 0 { - if self.z_count < 3 { - counts[0] = counts[0].wrapping_add(self.z_count as u16); - write(&[0, 0, 0][..self.z_count as usize])?; - } else if self.z_count <= 10 { - counts[17] = counts[17].wrapping_add(1); - write(&[17, (self.z_count - 3) as u8][..])?; - } else { - counts[18] = counts[18].wrapping_add(1); - write(&[18, (self.z_count - 11) as u8][..])?; - } - self.z_count = 0; - } - - Ok(()) - } -} - -fn write(src: &[u8], dst: &mut [u8], dst_pos: &mut usize) -> Result<()> { - match dst.get_mut(*dst_pos..*dst_pos + src.len()) { - Some(s) => s.copy_from_slice(src), - None => return Err(Error {}), - } - *dst_pos += src.len(); - Ok(()) -} - -impl Default for HuffmanOxide { - fn default() -> Self { - HuffmanOxide { - count: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], - codes: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], - code_sizes: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], - } - } -} - -impl HuffmanOxide { - fn radix_sort_symbols<'a>( - symbols0: &'a mut [SymFreq], - symbols1: &'a mut [SymFreq], - ) -> &'a mut [SymFreq] { - let mut hist = [[0; 256]; 2]; - - for freq in symbols0.iter() { - hist[0][(freq.key & 0xFF) as usize] += 1; - hist[1][((freq.key >> 8) & 0xFF) as usize] += 1; - } - - let mut n_passes = 2; - if symbols0.len() == hist[1][0] { - n_passes -= 1; - } - - let mut current_symbols = symbols0; - let mut new_symbols = symbols1; - - for (pass, hist_item) in hist.iter().enumerate().take(n_passes) { - let mut offsets = [0; 256]; - let mut offset = 0; - for i in 0..256 { - offsets[i] = offset; - offset += hist_item[i]; - } - - for sym in current_symbols.iter() { - let j = ((sym.key >> (pass * 8)) & 0xFF) as usize; - new_symbols[offsets[j]] = *sym; - offsets[j] += 1; - } - - mem::swap(&mut current_symbols, &mut new_symbols); - } - - current_symbols - } - - fn calculate_minimum_redundancy(symbols: &mut [SymFreq]) { - match symbols.len() { - 0 => (), - 1 => symbols[0].key = 1, - n => { - symbols[0].key += symbols[1].key; - let mut root = 0; - let mut leaf = 2; - for next in 1..n - 1 { - if (leaf >= n) || (symbols[root].key < symbols[leaf].key) { - symbols[next].key = symbols[root].key; - symbols[root].key = next as u16; - root += 1; - } else { - symbols[next].key = symbols[leaf].key; - leaf += 1; - } - - if (leaf >= n) || (root < next && symbols[root].key < symbols[leaf].key) { - symbols[next].key = symbols[next].key.wrapping_add(symbols[root].key); - symbols[root].key = next as u16; - root += 1; - } else { - symbols[next].key = symbols[next].key.wrapping_add(symbols[leaf].key); - leaf += 1; - } - } - - symbols[n - 2].key = 0; - for next in (0..n - 2).rev() { - symbols[next].key = symbols[symbols[next].key as usize].key + 1; - } - - let mut avbl = 1; - let mut used = 0; - let mut dpth = 0; - let mut root = (n - 2) as i32; - let mut next = (n - 1) as i32; - while avbl > 0 { - while (root >= 0) && (symbols[root as usize].key == dpth) { - used += 1; - root -= 1; - } - while avbl > used { - symbols[next as usize].key = dpth; - next -= 1; - avbl -= 1; - } - avbl = 2 * used; - dpth += 1; - used = 0; - } - } - } - } - - fn enforce_max_code_size(num_codes: &mut [i32], code_list_len: usize, max_code_size: usize) { - if code_list_len <= 1 { - return; - } - - num_codes[max_code_size] += num_codes[max_code_size + 1..].iter().sum::(); - let total = num_codes[1..=max_code_size] - .iter() - .rev() - .enumerate() - .fold(0u32, |total, (i, &x)| total + ((x as u32) << i)); - - for _ in (1 << max_code_size)..total { - num_codes[max_code_size] -= 1; - for i in (1..max_code_size).rev() { - if num_codes[i] != 0 { - num_codes[i] -= 1; - num_codes[i + 1] += 2; - break; - } - } - } - } - - fn optimize_table( - &mut self, - table_num: usize, - table_len: usize, - code_size_limit: usize, - static_table: bool, - ) { - let mut num_codes = [0i32; MAX_SUPPORTED_HUFF_CODESIZE + 1]; - let mut next_code = [0u32; MAX_SUPPORTED_HUFF_CODESIZE + 1]; - - if static_table { - for &code_size in &self.code_sizes[table_num][..table_len] { - num_codes[code_size as usize] += 1; - } - } else { - let mut symbols0 = [SymFreq { - key: 0, - sym_index: 0, - }; MAX_HUFF_SYMBOLS]; - let mut symbols1 = [SymFreq { - key: 0, - sym_index: 0, - }; MAX_HUFF_SYMBOLS]; - - let mut num_used_symbols = 0; - for i in 0..table_len { - if self.count[table_num][i] != 0 { - symbols0[num_used_symbols] = SymFreq { - key: self.count[table_num][i], - sym_index: i as u16, - }; - num_used_symbols += 1; - } - } - - let symbols = Self::radix_sort_symbols( - &mut symbols0[..num_used_symbols], - &mut symbols1[..num_used_symbols], - ); - Self::calculate_minimum_redundancy(symbols); - - for symbol in symbols.iter() { - num_codes[symbol.key as usize] += 1; - } - - Self::enforce_max_code_size(&mut num_codes, num_used_symbols, code_size_limit); - - memset(&mut self.code_sizes[table_num][..], 0); - memset(&mut self.codes[table_num][..], 0); - - let mut last = num_used_symbols; - for (i, &num_item) in num_codes - .iter() - .enumerate() - .take(code_size_limit + 1) - .skip(1) - { - let first = last - num_item as usize; - for symbol in &symbols[first..last] { - self.code_sizes[table_num][symbol.sym_index as usize] = i as u8; - } - last = first; - } - } - - let mut j = 0; - next_code[1] = 0; - for i in 2..=code_size_limit { - j = (j + num_codes[i - 1]) << 1; - next_code[i] = j as u32; - } - - for (&code_size, huff_code) in self.code_sizes[table_num] - .iter() - .take(table_len) - .zip(self.codes[table_num].iter_mut().take(table_len)) - { - if code_size == 0 { - continue; - } - - let mut code = next_code[code_size as usize]; - next_code[code_size as usize] += 1; - - let mut rev_code = 0; - for _ in 0..code_size { - rev_code = (rev_code << 1) | (code & 1); - code >>= 1; - } - *huff_code = rev_code as u16; - } - } - - fn start_static_block(&mut self, output: &mut OutputBufferOxide) { - memset(&mut self.code_sizes[LITLEN_TABLE][0..144], 8); - memset(&mut self.code_sizes[LITLEN_TABLE][144..256], 9); - memset(&mut self.code_sizes[LITLEN_TABLE][256..280], 7); - memset(&mut self.code_sizes[LITLEN_TABLE][280..288], 8); - - memset(&mut self.code_sizes[DIST_TABLE][..32], 5); - - self.optimize_table(LITLEN_TABLE, 288, 15, true); - self.optimize_table(DIST_TABLE, 32, 15, true); - - output.put_bits(0b01, 2) - } - - fn start_dynamic_block(&mut self, output: &mut OutputBufferOxide) -> Result<()> { - // There will always be one, and only one end of block code. - self.count[0][256] = 1; - - self.optimize_table(0, MAX_HUFF_SYMBOLS_0, 15, false); - self.optimize_table(1, MAX_HUFF_SYMBOLS_1, 15, false); - - let num_lit_codes = 286 - - &self.code_sizes[0][257..286] - .iter() - .rev() - .take_while(|&x| *x == 0) - .count(); - - let num_dist_codes = 30 - - &self.code_sizes[1][1..30] - .iter() - .rev() - .take_while(|&x| *x == 0) - .count(); - - let mut code_sizes_to_pack = [0u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1]; - let mut packed_code_sizes = [0u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1]; - - let total_code_sizes_to_pack = num_lit_codes + num_dist_codes; - - code_sizes_to_pack[..num_lit_codes].copy_from_slice(&self.code_sizes[0][..num_lit_codes]); - - code_sizes_to_pack[num_lit_codes..total_code_sizes_to_pack] - .copy_from_slice(&self.code_sizes[1][..num_dist_codes]); - - let mut rle = Rle { - z_count: 0, - repeat_count: 0, - prev_code_size: 0xFF, - }; - - memset(&mut self.count[HUFF_CODES_TABLE][..MAX_HUFF_SYMBOLS_2], 0); - - let mut packed_pos = 0; - for &code_size in &code_sizes_to_pack[..total_code_sizes_to_pack] { - if code_size == 0 { - rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; - rle.z_count += 1; - if rle.z_count == 138 { - rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; - } - } else { - rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; - if code_size != rle.prev_code_size { - rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; - self.count[HUFF_CODES_TABLE][code_size as usize] = - self.count[HUFF_CODES_TABLE][code_size as usize].wrapping_add(1); - write(&[code_size], &mut packed_code_sizes, &mut packed_pos)?; - } else { - rle.repeat_count += 1; - if rle.repeat_count == 6 { - rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; - } - } - } - rle.prev_code_size = code_size; - } - - if rle.repeat_count != 0 { - rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; - } else { - rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; - } - - self.optimize_table(2, MAX_HUFF_SYMBOLS_2, 7, false); - - output.put_bits(2, 2); - - output.put_bits((num_lit_codes - 257) as u32, 5); - output.put_bits((num_dist_codes - 1) as u32, 5); - - let mut num_bit_lengths = 18 - - HUFFMAN_LENGTH_ORDER - .iter() - .rev() - .take_while(|&swizzle| self.code_sizes[HUFF_CODES_TABLE][*swizzle as usize] == 0) - .count(); - - num_bit_lengths = cmp::max(4, num_bit_lengths + 1); - output.put_bits(num_bit_lengths as u32 - 4, 4); - for &swizzle in &HUFFMAN_LENGTH_ORDER[..num_bit_lengths] { - output.put_bits( - u32::from(self.code_sizes[HUFF_CODES_TABLE][swizzle as usize]), - 3, - ); - } - - let mut packed_code_size_index = 0; - while packed_code_size_index < packed_pos { - let code = packed_code_sizes[packed_code_size_index] as usize; - packed_code_size_index += 1; - assert!(code < MAX_HUFF_SYMBOLS_2); - output.put_bits( - u32::from(self.codes[HUFF_CODES_TABLE][code]), - u32::from(self.code_sizes[HUFF_CODES_TABLE][code]), - ); - if code >= 16 { - output.put_bits( - u32::from(packed_code_sizes[packed_code_size_index]), - [2, 3, 7][code - 16], - ); - packed_code_size_index += 1; - } - } - - Ok(()) - } -} - -struct DictOxide { - /// The maximum number of checks in the hash chain, for the initial, - /// and the lazy match respectively. - pub max_probes: [u32; 2], - /// Buffer of input data. - /// Padded with 1 byte to simplify matching code in `compress_fast`. - pub b: Box, - - pub code_buf_dict_pos: usize, - pub lookahead_size: usize, - pub lookahead_pos: usize, - pub size: usize, -} - -const fn probes_from_flags(flags: u32) -> [u32; 2] { - [ - 1 + ((flags & 0xFFF) + 2) / 3, - 1 + (((flags & 0xFFF) >> 2) + 2) / 3, - ] -} - -impl DictOxide { - fn new(flags: u32) -> Self { - DictOxide { - max_probes: probes_from_flags(flags), - b: Box::default(), - code_buf_dict_pos: 0, - lookahead_size: 0, - lookahead_pos: 0, - size: 0, - } - } - - fn update_flags(&mut self, flags: u32) { - self.max_probes = probes_from_flags(flags); - } - - fn reset(&mut self) { - self.b.reset(); - self.code_buf_dict_pos = 0; - self.lookahead_size = 0; - self.lookahead_pos = 0; - self.size = 0; - } - - /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of - /// type T. - #[inline] - fn read_unaligned_u32(&self, pos: usize) -> u32 { - // Masking the value here helps avoid bounds checks. - let pos = (pos & LZ_DICT_SIZE_MASK) as usize; - let end = pos + 4; - // Somehow this assertion makes things faster. - assert!(end < LZ_DICT_FULL_SIZE); - - let bytes: [u8; 4] = self.b.dict[pos..end].try_into().unwrap(); - u32::from_le_bytes(bytes) - } - - /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of - /// type T. - #[inline] - fn read_unaligned_u64(&self, pos: usize) -> u64 { - let pos = pos as usize; - let bytes: [u8; 8] = self.b.dict[pos..pos + 8].try_into().unwrap(); - u64::from_le_bytes(bytes) - } - - /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of - /// type T. - #[inline] - fn read_as_u16(&self, pos: usize) -> u16 { - read_u16_le(&self.b.dict[..], pos) - } - - /// Try to find a match for the data at lookahead_pos in the dictionary that is - /// longer than `match_len`. - /// Returns a tuple containing (match_distance, match_length). Will be equal to the input - /// values if no better matches were found. - fn find_match( - &self, - lookahead_pos: usize, - max_dist: usize, - max_match_len: u32, - mut match_dist: u32, - mut match_len: u32, - ) -> (u32, u32) { - // Clamp the match len and max_match_len to be valid. (It should be when this is called, but - // do it for now just in case for safety reasons.) - // This should normally end up as at worst conditional moves, - // so it shouldn't slow us down much. - // TODO: Statically verify these so we don't need to do this. - let max_match_len = cmp::min(MAX_MATCH_LEN as u32, max_match_len); - match_len = cmp::max(match_len, 1); - - let pos = lookahead_pos as usize & LZ_DICT_SIZE_MASK; - let mut probe_pos = pos; - // Number of probes into the hash chains. - let mut num_probes_left = self.max_probes[(match_len >= 32) as usize]; - - // If we already have a match of the full length don't bother searching for another one. - if max_match_len <= match_len { - return (match_dist, match_len); - } - - // Read the last byte of the current match, and the next one, used to compare matches. - let mut c01: u16 = self.read_as_u16(pos as usize + match_len as usize - 1); - // Read the two bytes at the end position of the current match. - let s01: u16 = self.read_as_u16(pos as usize); - - 'outer: loop { - let mut dist; - 'found: loop { - num_probes_left -= 1; - if num_probes_left == 0 { - // We have done as many probes in the hash chain as the current compression - // settings allow, so return the best match we found, if any. - return (match_dist, match_len); - } - - for _ in 0..3 { - let next_probe_pos = self.b.next[probe_pos as usize] as usize; - - dist = (lookahead_pos - next_probe_pos) & 0xFFFF; - if next_probe_pos == 0 || dist > max_dist { - // We reached the end of the hash chain, or the next value is further away - // than the maximum allowed distance, so return the best match we found, if - // any. - return (match_dist, match_len); - } - - // Mask the position value to get the position in the hash chain of the next - // position to match against. - probe_pos = next_probe_pos & LZ_DICT_SIZE_MASK; - - if self.read_as_u16((probe_pos + match_len as usize - 1) as usize) == c01 { - break 'found; - } - } - } - - if dist == 0 { - // We've looked through the whole match range, so return the best match we - // found. - return (match_dist, match_len); - } - - // Check if the two first bytes match. - if self.read_as_u16(probe_pos as usize) != s01 { - continue; - } - - let mut p = pos + 2; - let mut q = probe_pos + 2; - // The first two bytes matched, so check the full length of the match. - for _ in 0..32 { - let p_data: u64 = self.read_unaligned_u64(p); - let q_data: u64 = self.read_unaligned_u64(q); - // Compare of 8 bytes at a time by using unaligned loads of 64-bit integers. - let xor_data = p_data ^ q_data; - if xor_data == 0 { - p += 8; - q += 8; - } else { - // If not all of the last 8 bytes matched, check how may of them did. - let trailing = xor_data.trailing_zeros(); - - let probe_len = p - pos + (trailing as usize >> 3); - if probe_len > match_len as usize { - match_dist = dist as u32; - match_len = cmp::min(max_match_len, probe_len as u32); - if match_len == max_match_len { - // We found a match that had the maximum allowed length, - // so there is now point searching further. - return (match_dist, match_len); - } - // We found a better match, so save the last two bytes for further match - // comparisons. - c01 = self.read_as_u16(pos + match_len as usize - 1) - } - continue 'outer; - } - } - - return (dist as u32, cmp::min(max_match_len, MAX_MATCH_LEN as u32)); - } - } -} - -struct ParamsOxide { - pub flags: u32, - pub greedy_parsing: bool, - pub block_index: u32, - - pub saved_match_dist: u32, - pub saved_match_len: u32, - pub saved_lit: u8, - - pub flush: TDEFLFlush, - pub flush_ofs: u32, - pub flush_remaining: u32, - pub finished: bool, - - pub adler32: u32, - - pub src_pos: usize, - - pub out_buf_ofs: usize, - pub prev_return_status: TDEFLStatus, - - pub saved_bit_buffer: u32, - pub saved_bits_in: u32, - - pub local_buf: Box, -} - -impl ParamsOxide { - fn new(flags: u32) -> Self { - ParamsOxide { - flags, - greedy_parsing: flags & TDEFL_GREEDY_PARSING_FLAG != 0, - block_index: 0, - saved_match_dist: 0, - saved_match_len: 0, - saved_lit: 0, - flush: TDEFLFlush::None, - flush_ofs: 0, - flush_remaining: 0, - finished: false, - adler32: MZ_ADLER32_INIT, - src_pos: 0, - out_buf_ofs: 0, - prev_return_status: TDEFLStatus::Okay, - saved_bit_buffer: 0, - saved_bits_in: 0, - local_buf: Box::default(), - } - } - - fn update_flags(&mut self, flags: u32) { - self.flags = flags; - self.greedy_parsing = self.flags & TDEFL_GREEDY_PARSING_FLAG != 0; - } - - /// Reset state, saving settings. - fn reset(&mut self) { - self.block_index = 0; - self.saved_match_len = 0; - self.saved_match_dist = 0; - self.saved_lit = 0; - self.flush = TDEFLFlush::None; - self.flush_ofs = 0; - self.flush_remaining = 0; - self.finished = false; - self.adler32 = MZ_ADLER32_INIT; - self.src_pos = 0; - self.out_buf_ofs = 0; - self.prev_return_status = TDEFLStatus::Okay; - self.saved_bit_buffer = 0; - self.saved_bits_in = 0; - self.local_buf.b = [0; OUT_BUF_SIZE]; - } -} - -struct LZOxide { - pub codes: [u8; LZ_CODE_BUF_SIZE], - pub code_position: usize, - pub flag_position: usize, - - // The total number of bytes in the current block. - // (Could maybe use usize, but it's not possible to exceed a block size of ) - pub total_bytes: u32, - pub num_flags_left: u32, -} - -impl LZOxide { - const fn new() -> Self { - LZOxide { - codes: [0; LZ_CODE_BUF_SIZE], - code_position: 1, - flag_position: 0, - total_bytes: 0, - num_flags_left: 8, - } - } - - fn write_code(&mut self, val: u8) { - self.codes[self.code_position] = val; - self.code_position += 1; - } - - fn init_flag(&mut self) { - if self.num_flags_left == 8 { - *self.get_flag() = 0; - self.code_position -= 1; - } else { - *self.get_flag() >>= self.num_flags_left; - } - } - - fn get_flag(&mut self) -> &mut u8 { - &mut self.codes[self.flag_position] - } - - fn plant_flag(&mut self) { - self.flag_position = self.code_position; - self.code_position += 1; - } - - fn consume_flag(&mut self) { - self.num_flags_left -= 1; - if self.num_flags_left == 0 { - self.num_flags_left = 8; - self.plant_flag(); - } - } -} - -fn compress_lz_codes( - huff: &HuffmanOxide, - output: &mut OutputBufferOxide, - lz_code_buf: &[u8], -) -> Result { - let mut flags = 1; - let mut bb = BitBuffer { - bit_buffer: u64::from(output.bit_buffer), - bits_in: output.bits_in, - }; - - let mut i: usize = 0; - while i < lz_code_buf.len() { - if flags == 1 { - flags = u32::from(lz_code_buf[i]) | 0x100; - i += 1; - } - - // The lz code was a length code - if flags & 1 == 1 { - flags >>= 1; - - let sym; - let num_extra_bits; - - let match_len = lz_code_buf[i] as usize; - - let match_dist = read_u16_le(lz_code_buf, i + 1); - - i += 3; - - debug_assert!(huff.code_sizes[0][LEN_SYM[match_len] as usize] != 0); - bb.put_fast( - u64::from(huff.codes[0][LEN_SYM[match_len] as usize]), - u32::from(huff.code_sizes[0][LEN_SYM[match_len] as usize]), - ); - bb.put_fast( - match_len as u64 & u64::from(BITMASKS[LEN_EXTRA[match_len] as usize]), - u32::from(LEN_EXTRA[match_len]), - ); - - if match_dist < 512 { - sym = SMALL_DIST_SYM[match_dist as usize] as usize; - num_extra_bits = SMALL_DIST_EXTRA[match_dist as usize] as usize; - } else { - sym = LARGE_DIST_SYM[(match_dist >> 8) as usize] as usize; - num_extra_bits = LARGE_DIST_EXTRA[(match_dist >> 8) as usize] as usize; - } - - debug_assert!(huff.code_sizes[1][sym] != 0); - bb.put_fast( - u64::from(huff.codes[1][sym]), - u32::from(huff.code_sizes[1][sym]), - ); - bb.put_fast( - u64::from(match_dist) & u64::from(BITMASKS[num_extra_bits as usize]), - num_extra_bits as u32, - ); - } else { - // The lz code was a literal - for _ in 0..3 { - flags >>= 1; - let lit = lz_code_buf[i]; - i += 1; - - debug_assert!(huff.code_sizes[0][lit as usize] != 0); - bb.put_fast( - u64::from(huff.codes[0][lit as usize]), - u32::from(huff.code_sizes[0][lit as usize]), - ); - - if flags & 1 == 1 || i >= lz_code_buf.len() { - break; - } - } - } - - bb.flush(output)?; - } - - output.bits_in = 0; - output.bit_buffer = 0; - while bb.bits_in != 0 { - let n = cmp::min(bb.bits_in, 16); - output.put_bits(bb.bit_buffer as u32 & BITMASKS[n as usize], n); - bb.bit_buffer >>= n; - bb.bits_in -= n; - } - - // Output the end of block symbol. - output.put_bits( - u32::from(huff.codes[0][256]), - u32::from(huff.code_sizes[0][256]), - ); - - Ok(true) -} - -fn compress_block( - huff: &mut HuffmanOxide, - output: &mut OutputBufferOxide, - lz: &LZOxide, - static_block: bool, -) -> Result { - if static_block { - huff.start_static_block(output); - } else { - huff.start_dynamic_block(output)?; - } - - compress_lz_codes(huff, output, &lz.codes[..lz.code_position]) -} - -fn flush_block( - d: &mut CompressorOxide, - callback: &mut CallbackOxide, - flush: TDEFLFlush, -) -> Result { - let mut saved_buffer; - { - let mut output = callback - .out - .new_output_buffer(&mut d.params.local_buf.b, d.params.out_buf_ofs); - output.bit_buffer = d.params.saved_bit_buffer; - output.bits_in = d.params.saved_bits_in; - - let use_raw_block = (d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0) - && (d.dict.lookahead_pos - d.dict.code_buf_dict_pos) <= d.dict.size; - - assert!(d.params.flush_remaining == 0); - d.params.flush_ofs = 0; - d.params.flush_remaining = 0; - - d.lz.init_flag(); - - // If we are at the start of the stream, write the zlib header if requested. - if d.params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 && d.params.block_index == 0 { - let header = zlib::header_from_flags(d.params.flags as u32); - output.put_bits(header[0].into(), 8); - output.put_bits(header[1].into(), 8); - } - - // Output the block header. - output.put_bits((flush == TDEFLFlush::Finish) as u32, 1); - - saved_buffer = output.save(); - - let comp_success = if !use_raw_block { - let use_static = - (d.params.flags & TDEFL_FORCE_ALL_STATIC_BLOCKS != 0) || (d.lz.total_bytes < 48); - compress_block(&mut d.huff, &mut output, &d.lz, use_static)? - } else { - false - }; - - // If we failed to compress anything and the output would take up more space than the output - // data, output a stored block instead, which has at most 5 bytes of overhead. - // We only use some simple heuristics for now. - // A stored block will have an overhead of at least 4 bytes containing the block length - // but usually more due to the length parameters having to start at a byte boundary and thus - // requiring up to 5 bytes of padding. - // As a static block will have an overhead of at most 1 bit per byte - // (as literals are either 8 or 9 bytes), a raw block will - // never take up less space if the number of input bytes are less than 32. - let expanded = (d.lz.total_bytes > 32) - && (output.inner_pos - saved_buffer.pos + 1 >= (d.lz.total_bytes as usize)) - && (d.dict.lookahead_pos - d.dict.code_buf_dict_pos <= d.dict.size); - - if use_raw_block || expanded { - output.load(saved_buffer); - - // Block header. - output.put_bits(0, 2); - - // Block length has to start on a byte boundary, s opad. - output.pad_to_bytes(); - - // Block length and ones complement of block length. - output.put_bits(d.lz.total_bytes & 0xFFFF, 16); - output.put_bits(!d.lz.total_bytes & 0xFFFF, 16); - - // Write the actual bytes. - for i in 0..d.lz.total_bytes { - let pos = (d.dict.code_buf_dict_pos + i as usize) & LZ_DICT_SIZE_MASK; - output.put_bits(u32::from(d.dict.b.dict[pos as usize]), 8); - } - } else if !comp_success { - output.load(saved_buffer); - compress_block(&mut d.huff, &mut output, &d.lz, true)?; - } - - if flush != TDEFLFlush::None { - if flush == TDEFLFlush::Finish { - output.pad_to_bytes(); - if d.params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 { - let mut adler = d.params.adler32; - for _ in 0..4 { - output.put_bits((adler >> 24) & 0xFF, 8); - adler <<= 8; - } - } - } else { - // Sync or Full flush. - // Output an empty raw block. - output.put_bits(0, 3); - output.pad_to_bytes(); - output.put_bits(0, 16); - output.put_bits(0xFFFF, 16); - } - } - - memset(&mut d.huff.count[0][..MAX_HUFF_SYMBOLS_0], 0); - memset(&mut d.huff.count[1][..MAX_HUFF_SYMBOLS_1], 0); - - d.lz.code_position = 1; - d.lz.flag_position = 0; - d.lz.num_flags_left = 8; - d.dict.code_buf_dict_pos += d.lz.total_bytes as usize; - d.lz.total_bytes = 0; - d.params.block_index += 1; - - saved_buffer = output.save(); - - d.params.saved_bit_buffer = saved_buffer.bit_buffer; - d.params.saved_bits_in = saved_buffer.bits_in; - } - - Ok(callback.flush_output(saved_buffer, &mut d.params)) -} - -fn record_literal(h: &mut HuffmanOxide, lz: &mut LZOxide, lit: u8) { - lz.total_bytes += 1; - lz.write_code(lit); - - *lz.get_flag() >>= 1; - lz.consume_flag(); - - h.count[0][lit as usize] += 1; -} - -fn record_match(h: &mut HuffmanOxide, lz: &mut LZOxide, mut match_len: u32, mut match_dist: u32) { - assert!(match_len >= MIN_MATCH_LEN.into()); - assert!(match_dist >= 1); - assert!(match_dist as usize <= LZ_DICT_SIZE); - - lz.total_bytes += match_len; - match_dist -= 1; - match_len -= u32::from(MIN_MATCH_LEN); - lz.write_code(match_len as u8); - lz.write_code(match_dist as u8); - lz.write_code((match_dist >> 8) as u8); - - *lz.get_flag() >>= 1; - *lz.get_flag() |= 0x80; - lz.consume_flag(); - - let symbol = if match_dist < 512 { - SMALL_DIST_SYM[match_dist as usize] - } else { - LARGE_DIST_SYM[((match_dist >> 8) & 127) as usize] - } as usize; - h.count[1][symbol] += 1; - h.count[0][LEN_SYM[match_len as usize] as usize] += 1; -} - -fn compress_normal(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool { - let mut src_pos = d.params.src_pos; - let in_buf = match callback.in_buf { - None => return true, - Some(in_buf) => in_buf, - }; - - let mut lookahead_size = d.dict.lookahead_size; - let mut lookahead_pos = d.dict.lookahead_pos; - let mut saved_lit = d.params.saved_lit; - let mut saved_match_dist = d.params.saved_match_dist; - let mut saved_match_len = d.params.saved_match_len; - - while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size != 0) { - let src_buf_left = in_buf.len() - src_pos; - let num_bytes_to_process = cmp::min(src_buf_left, MAX_MATCH_LEN - lookahead_size as usize); - - if lookahead_size + d.dict.size >= usize::from(MIN_MATCH_LEN) - 1 - && num_bytes_to_process > 0 - { - let dictb = &mut d.dict.b; - - let mut dst_pos = (lookahead_pos + lookahead_size as usize) & LZ_DICT_SIZE_MASK; - let mut ins_pos = lookahead_pos + lookahead_size as usize - 2; - // Start the hash value from the first two bytes - let mut hash = update_hash( - u16::from(dictb.dict[(ins_pos & LZ_DICT_SIZE_MASK) as usize]), - dictb.dict[((ins_pos + 1) & LZ_DICT_SIZE_MASK) as usize], - ); - - lookahead_size += num_bytes_to_process; - - for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] { - // Add byte to input buffer. - dictb.dict[dst_pos as usize] = c; - if (dst_pos as usize) < MAX_MATCH_LEN - 1 { - dictb.dict[LZ_DICT_SIZE + dst_pos as usize] = c; - } - - // Generate hash from the current byte, - hash = update_hash(hash, c); - dictb.next[(ins_pos & LZ_DICT_SIZE_MASK) as usize] = dictb.hash[hash as usize]; - // and insert it into the hash chain. - dictb.hash[hash as usize] = ins_pos as u16; - dst_pos = (dst_pos + 1) & LZ_DICT_SIZE_MASK; - ins_pos += 1; - } - src_pos += num_bytes_to_process; - } else { - let dictb = &mut d.dict.b; - for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] { - let dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK; - dictb.dict[dst_pos as usize] = c; - if (dst_pos as usize) < MAX_MATCH_LEN - 1 { - dictb.dict[LZ_DICT_SIZE + dst_pos as usize] = c; - } - - lookahead_size += 1; - if lookahead_size + d.dict.size >= MIN_MATCH_LEN.into() { - let ins_pos = lookahead_pos + lookahead_size - 3; - let hash = ((u32::from(dictb.dict[(ins_pos & LZ_DICT_SIZE_MASK) as usize]) - << (LZ_HASH_SHIFT * 2)) - ^ ((u32::from(dictb.dict[((ins_pos + 1) & LZ_DICT_SIZE_MASK) as usize]) - << LZ_HASH_SHIFT) - ^ u32::from(c))) - & (LZ_HASH_SIZE as u32 - 1); - - dictb.next[(ins_pos & LZ_DICT_SIZE_MASK) as usize] = dictb.hash[hash as usize]; - dictb.hash[hash as usize] = ins_pos as u16; - } - } - - src_pos += num_bytes_to_process; - } - - d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size); - if d.params.flush == TDEFLFlush::None && (lookahead_size as usize) < MAX_MATCH_LEN { - break; - } - - let mut len_to_move = 1; - let mut cur_match_dist = 0; - let mut cur_match_len = if saved_match_len != 0 { - saved_match_len - } else { - u32::from(MIN_MATCH_LEN) - 1 - }; - let cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK; - if d.params.flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS) != 0 { - // If TDEFL_RLE_MATCHES is set, we only look for repeating sequences of the current byte. - if d.dict.size != 0 && d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS == 0 { - let c = d.dict.b.dict[((cur_pos.wrapping_sub(1)) & LZ_DICT_SIZE_MASK) as usize]; - cur_match_len = d.dict.b.dict[cur_pos as usize..(cur_pos + lookahead_size) as usize] - .iter() - .take_while(|&x| *x == c) - .count() as u32; - if cur_match_len < MIN_MATCH_LEN.into() { - cur_match_len = 0 - } else { - cur_match_dist = 1 - } - } - } else { - // Try to find a match for the bytes at the current position. - let dist_len = d.dict.find_match( - lookahead_pos, - d.dict.size, - lookahead_size as u32, - cur_match_dist, - cur_match_len, - ); - cur_match_dist = dist_len.0; - cur_match_len = dist_len.1; - } - - let far_and_small = cur_match_len == MIN_MATCH_LEN.into() && cur_match_dist >= 8 * 1024; - let filter_small = d.params.flags & TDEFL_FILTER_MATCHES != 0 && cur_match_len <= 5; - if far_and_small || filter_small || cur_pos == cur_match_dist as usize { - cur_match_dist = 0; - cur_match_len = 0; - } - - if saved_match_len != 0 { - if cur_match_len > saved_match_len { - record_literal(&mut d.huff, &mut d.lz, saved_lit); - if cur_match_len >= 128 { - record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist); - saved_match_len = 0; - len_to_move = cur_match_len as usize; - } else { - saved_lit = d.dict.b.dict[cur_pos as usize]; - saved_match_dist = cur_match_dist; - saved_match_len = cur_match_len; - } - } else { - record_match(&mut d.huff, &mut d.lz, saved_match_len, saved_match_dist); - len_to_move = (saved_match_len - 1) as usize; - saved_match_len = 0; - } - } else if cur_match_dist == 0 { - record_literal( - &mut d.huff, - &mut d.lz, - d.dict.b.dict[cmp::min(cur_pos as usize, d.dict.b.dict.len() - 1)], - ); - } else if d.params.greedy_parsing - || (d.params.flags & TDEFL_RLE_MATCHES != 0) - || cur_match_len >= 128 - { - // If we are using lazy matching, check for matches at the next byte if the current - // match was shorter than 128 bytes. - record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist); - len_to_move = cur_match_len as usize; - } else { - saved_lit = d.dict.b.dict[cmp::min(cur_pos as usize, d.dict.b.dict.len() - 1)]; - saved_match_dist = cur_match_dist; - saved_match_len = cur_match_len; - } - - lookahead_pos += len_to_move; - assert!(lookahead_size >= len_to_move); - lookahead_size -= len_to_move; - d.dict.size = cmp::min(d.dict.size + len_to_move, LZ_DICT_SIZE); - - let lz_buf_tight = d.lz.code_position > LZ_CODE_BUF_SIZE - 8; - let raw = d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0; - let fat = ((d.lz.code_position * 115) >> 7) >= d.lz.total_bytes as usize; - let fat_or_raw = (d.lz.total_bytes > 31 * 1024) && (fat || raw); - - if lz_buf_tight || fat_or_raw { - d.params.src_pos = src_pos; - // These values are used in flush_block, so we need to write them back here. - d.dict.lookahead_size = lookahead_size; - d.dict.lookahead_pos = lookahead_pos; - - let n = flush_block(d, callback, TDEFLFlush::None) - .unwrap_or(TDEFLStatus::PutBufFailed as i32); - if n != 0 { - d.params.saved_lit = saved_lit; - d.params.saved_match_dist = saved_match_dist; - d.params.saved_match_len = saved_match_len; - return n > 0; - } - } - } - - d.params.src_pos = src_pos; - d.dict.lookahead_size = lookahead_size; - d.dict.lookahead_pos = lookahead_pos; - d.params.saved_lit = saved_lit; - d.params.saved_match_dist = saved_match_dist; - d.params.saved_match_len = saved_match_len; - true -} - -const COMP_FAST_LOOKAHEAD_SIZE: usize = 4096; - -fn compress_fast(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool { - let mut src_pos = d.params.src_pos; - let mut lookahead_size = d.dict.lookahead_size; - let mut lookahead_pos = d.dict.lookahead_pos; - - let mut cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK; - let in_buf = match callback.in_buf { - None => return true, - Some(in_buf) => in_buf, - }; - - debug_assert!(d.lz.code_position < LZ_CODE_BUF_SIZE - 2); - - while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size > 0) { - let mut dst_pos = ((lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK) as usize; - let mut num_bytes_to_process = cmp::min( - in_buf.len() - src_pos, - (COMP_FAST_LOOKAHEAD_SIZE - lookahead_size) as usize, - ); - lookahead_size += num_bytes_to_process; - - while num_bytes_to_process != 0 { - let n = cmp::min(LZ_DICT_SIZE - dst_pos, num_bytes_to_process); - d.dict.b.dict[dst_pos..dst_pos + n].copy_from_slice(&in_buf[src_pos..src_pos + n]); - - if dst_pos < MAX_MATCH_LEN - 1 { - let m = cmp::min(n, MAX_MATCH_LEN - 1 - dst_pos); - d.dict.b.dict[dst_pos + LZ_DICT_SIZE..dst_pos + LZ_DICT_SIZE + m] - .copy_from_slice(&in_buf[src_pos..src_pos + m]); - } - - src_pos += n; - dst_pos = (dst_pos + n) & LZ_DICT_SIZE_MASK as usize; - num_bytes_to_process -= n; - } - - d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size); - if d.params.flush == TDEFLFlush::None && lookahead_size < COMP_FAST_LOOKAHEAD_SIZE { - break; - } - - while lookahead_size >= 4 { - let mut cur_match_len = 1; - - let first_trigram = d.dict.read_unaligned_u32(cur_pos) & 0xFF_FFFF; - - let hash = (first_trigram ^ (first_trigram >> (24 - (LZ_HASH_BITS - 8)))) - & LEVEL1_HASH_SIZE_MASK; - - let mut probe_pos = usize::from(d.dict.b.hash[hash as usize]); - d.dict.b.hash[hash as usize] = lookahead_pos as u16; - - let mut cur_match_dist = (lookahead_pos - probe_pos as usize) as u16; - if cur_match_dist as usize <= d.dict.size { - probe_pos &= LZ_DICT_SIZE_MASK; - - let trigram = d.dict.read_unaligned_u32(probe_pos) & 0xFF_FFFF; - - if first_trigram == trigram { - // Trigram was tested, so we can start with "+ 3" displacement. - let mut p = cur_pos + 3; - let mut q = probe_pos + 3; - cur_match_len = (|| { - for _ in 0..32 { - let p_data: u64 = d.dict.read_unaligned_u64(p); - let q_data: u64 = d.dict.read_unaligned_u64(q); - let xor_data = p_data ^ q_data; - if xor_data == 0 { - p += 8; - q += 8; - } else { - let trailing = xor_data.trailing_zeros(); - return p as u32 - cur_pos as u32 + (trailing >> 3); - } - } - - if cur_match_dist == 0 { - 0 - } else { - MAX_MATCH_LEN as u32 - } - })(); - - if cur_match_len < MIN_MATCH_LEN.into() - || (cur_match_len == MIN_MATCH_LEN.into() && cur_match_dist >= 8 * 1024) - { - let lit = first_trigram as u8; - cur_match_len = 1; - d.lz.write_code(lit); - *d.lz.get_flag() >>= 1; - d.huff.count[0][lit as usize] += 1; - } else { - // Limit the match to the length of the lookahead so we don't create a match - // that ends after the end of the input data. - cur_match_len = cmp::min(cur_match_len, lookahead_size as u32); - debug_assert!(cur_match_len >= MIN_MATCH_LEN.into()); - debug_assert!(cur_match_dist >= 1); - debug_assert!(cur_match_dist as usize <= LZ_DICT_SIZE); - cur_match_dist -= 1; - - d.lz.write_code((cur_match_len - u32::from(MIN_MATCH_LEN)) as u8); - d.lz.write_code(cur_match_dist as u8); - d.lz.write_code((cur_match_dist >> 8) as u8); - - *d.lz.get_flag() >>= 1; - *d.lz.get_flag() |= 0x80; - if cur_match_dist < 512 { - d.huff.count[1][SMALL_DIST_SYM[cur_match_dist as usize] as usize] += 1; - } else { - d.huff.count[1] - [LARGE_DIST_SYM[(cur_match_dist >> 8) as usize] as usize] += 1; - } - - d.huff.count[0][LEN_SYM[(cur_match_len - u32::from(MIN_MATCH_LEN)) as usize] - as usize] += 1; - } - } else { - d.lz.write_code(first_trigram as u8); - *d.lz.get_flag() >>= 1; - d.huff.count[0][first_trigram as u8 as usize] += 1; - } - - d.lz.consume_flag(); - d.lz.total_bytes += cur_match_len; - lookahead_pos += cur_match_len as usize; - d.dict.size = cmp::min(d.dict.size + cur_match_len as usize, LZ_DICT_SIZE); - cur_pos = (cur_pos + cur_match_len as usize) & LZ_DICT_SIZE_MASK; - lookahead_size -= cur_match_len as usize; - - if d.lz.code_position > LZ_CODE_BUF_SIZE - 8 { - // These values are used in flush_block, so we need to write them back here. - d.dict.lookahead_size = lookahead_size; - d.dict.lookahead_pos = lookahead_pos; - - let n = match flush_block(d, callback, TDEFLFlush::None) { - Err(_) => { - d.params.src_pos = src_pos; - d.params.prev_return_status = TDEFLStatus::PutBufFailed; - return false; - } - Ok(status) => status, - }; - if n != 0 { - d.params.src_pos = src_pos; - return n > 0; - } - debug_assert!(d.lz.code_position < LZ_CODE_BUF_SIZE - 2); - - lookahead_size = d.dict.lookahead_size; - lookahead_pos = d.dict.lookahead_pos; - } - } - } - - while lookahead_size != 0 { - let lit = d.dict.b.dict[cur_pos as usize]; - d.lz.total_bytes += 1; - d.lz.write_code(lit); - *d.lz.get_flag() >>= 1; - d.lz.consume_flag(); - - d.huff.count[0][lit as usize] += 1; - lookahead_pos += 1; - d.dict.size = cmp::min(d.dict.size + 1, LZ_DICT_SIZE); - cur_pos = (cur_pos + 1) & LZ_DICT_SIZE_MASK; - lookahead_size -= 1; - - if d.lz.code_position > LZ_CODE_BUF_SIZE - 8 { - // These values are used in flush_block, so we need to write them back here. - d.dict.lookahead_size = lookahead_size; - d.dict.lookahead_pos = lookahead_pos; - - let n = match flush_block(d, callback, TDEFLFlush::None) { - Err(_) => { - d.params.prev_return_status = TDEFLStatus::PutBufFailed; - d.params.src_pos = src_pos; - return false; - } - Ok(status) => status, - }; - if n != 0 { - d.params.src_pos = src_pos; - return n > 0; - } - - lookahead_size = d.dict.lookahead_size; - lookahead_pos = d.dict.lookahead_pos; - } - } - } - - d.params.src_pos = src_pos; - d.dict.lookahead_size = lookahead_size; - d.dict.lookahead_pos = lookahead_pos; - true -} - -fn flush_output_buffer(c: &mut CallbackOxide, p: &mut ParamsOxide) -> (TDEFLStatus, usize, usize) { - let mut res = (TDEFLStatus::Okay, p.src_pos, 0); - if let CallbackOut::Buf(ref mut cb) = c.out { - let n = cmp::min(cb.out_buf.len() - p.out_buf_ofs, p.flush_remaining as usize); - if n != 0 { - (&mut cb.out_buf[p.out_buf_ofs..p.out_buf_ofs + n]) - .copy_from_slice(&p.local_buf.b[p.flush_ofs as usize..p.flush_ofs as usize + n]); - } - p.flush_ofs += n as u32; - p.flush_remaining -= n as u32; - p.out_buf_ofs += n; - res.2 = p.out_buf_ofs; - } - - if p.finished && p.flush_remaining == 0 { - res.0 = TDEFLStatus::Done - } - res -} - -/// Main compression function. Tries to compress as much as possible from `in_buf` and -/// puts compressed output into `out_buf`. -/// -/// The value of `flush` determines if the compressor should attempt to flush all output -/// and alternatively try to finish the stream. -/// -/// Use [`TDEFLFlush::Finish`] on the final call to signal that the stream is finishing. -/// -/// Note that this function does not keep track of whether a flush marker has been output, so -/// if called using [`TDEFLFlush::Sync`], the caller needs to ensure there is enough space in the -/// output buffer if they want to avoid repeated flush markers. -/// See #105 for details. -/// -/// # Returns -/// Returns a tuple containing the current status of the compressor, the current position -/// in the input buffer and the current position in the output buffer. -pub fn compress( - d: &mut CompressorOxide, - in_buf: &[u8], - out_buf: &mut [u8], - flush: TDEFLFlush, -) -> (TDEFLStatus, usize, usize) { - compress_inner( - d, - &mut CallbackOxide::new_callback_buf(in_buf, out_buf), - flush, - ) -} - -/// Main compression function. Callbacks output. -/// -/// # Returns -/// Returns a tuple containing the current status of the compressor, the current position -/// in the input buffer. -/// -/// The caller is responsible for ensuring the `CallbackFunc` struct will not cause undefined -/// behaviour. -pub fn compress_to_output( - d: &mut CompressorOxide, - in_buf: &[u8], - flush: TDEFLFlush, - mut callback_func: impl FnMut(&[u8]) -> bool, -) -> (TDEFLStatus, usize) { - let res = compress_inner( - d, - &mut CallbackOxide::new_callback_func( - in_buf, - CallbackFunc { - put_buf_func: &mut callback_func, - }, - ), - flush, - ); - - (res.0, res.1) -} - -fn compress_inner( - d: &mut CompressorOxide, - callback: &mut CallbackOxide, - flush: TDEFLFlush, -) -> (TDEFLStatus, usize, usize) { - d.params.out_buf_ofs = 0; - d.params.src_pos = 0; - - let prev_ok = d.params.prev_return_status == TDEFLStatus::Okay; - let flush_finish_once = d.params.flush != TDEFLFlush::Finish || flush == TDEFLFlush::Finish; - - d.params.flush = flush; - if !prev_ok || !flush_finish_once { - d.params.prev_return_status = TDEFLStatus::BadParam; - return (d.params.prev_return_status, 0, 0); - } - - if d.params.flush_remaining != 0 || d.params.finished { - let res = flush_output_buffer(callback, &mut d.params); - d.params.prev_return_status = res.0; - return res; - } - - let one_probe = d.params.flags & MAX_PROBES_MASK as u32 == 1; - let greedy = d.params.flags & TDEFL_GREEDY_PARSING_FLAG != 0; - let filter_or_rle_or_raw = d.params.flags - & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES) - != 0; - - let compress_success = if one_probe && greedy && !filter_or_rle_or_raw { - compress_fast(d, callback) - } else { - compress_normal(d, callback) - }; - - if !compress_success { - return ( - d.params.prev_return_status, - d.params.src_pos, - d.params.out_buf_ofs, - ); - } - - if let Some(in_buf) = callback.in_buf { - if d.params.flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32) != 0 { - d.params.adler32 = update_adler32(d.params.adler32, &in_buf[..d.params.src_pos]); - } - } - - let flush_none = d.params.flush == TDEFLFlush::None; - let in_left = callback.in_buf.map_or(0, |buf| buf.len()) - d.params.src_pos; - let remaining = in_left != 0 || d.params.flush_remaining != 0; - if !flush_none && d.dict.lookahead_size == 0 && !remaining { - let flush = d.params.flush; - match flush_block(d, callback, flush) { - Err(_) => { - d.params.prev_return_status = TDEFLStatus::PutBufFailed; - return ( - d.params.prev_return_status, - d.params.src_pos, - d.params.out_buf_ofs, - ); - } - Ok(x) if x < 0 => { - return ( - d.params.prev_return_status, - d.params.src_pos, - d.params.out_buf_ofs, - ) - } - _ => { - d.params.finished = d.params.flush == TDEFLFlush::Finish; - if d.params.flush == TDEFLFlush::Full { - memset(&mut d.dict.b.hash[..], 0); - memset(&mut d.dict.b.next[..], 0); - d.dict.size = 0; - } - } - } - } - - let res = flush_output_buffer(callback, &mut d.params); - d.params.prev_return_status = res.0; - - res -} - -/// Create a set of compression flags using parameters used by zlib and other compressors. -/// Mainly intended for use with transition from c libraries as it deals with raw integers. -/// -/// # Parameters -/// `level` determines compression level. Clamped to maximum of 10. Negative values result in -/// `CompressionLevel::DefaultLevel`. -/// `window_bits`: Above 0, wraps the stream in a zlib wrapper, 0 or negative for a raw deflate -/// stream. -/// `strategy`: Sets the strategy if this conforms to any of the values in `CompressionStrategy`. -/// -/// # Notes -/// This function may be removed or moved to the `miniz_oxide_c_api` in the future. -pub fn create_comp_flags_from_zip_params(level: i32, window_bits: i32, strategy: i32) -> u32 { - let num_probes = (if level >= 0 { - cmp::min(10, level) - } else { - CompressionLevel::DefaultLevel as i32 - }) as usize; - let greedy = if level <= 3 { - TDEFL_GREEDY_PARSING_FLAG - } else { - 0 - }; - let mut comp_flags = NUM_PROBES[num_probes] | greedy; - - if window_bits > 0 { - comp_flags |= TDEFL_WRITE_ZLIB_HEADER; - } - - if level == 0 { - comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; - } else if strategy == CompressionStrategy::Filtered as i32 { - comp_flags |= TDEFL_FILTER_MATCHES; - } else if strategy == CompressionStrategy::HuffmanOnly as i32 { - comp_flags &= !MAX_PROBES_MASK as u32; - } else if strategy == CompressionStrategy::Fixed as i32 { - comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; - } else if strategy == CompressionStrategy::RLE as i32 { - comp_flags |= TDEFL_RLE_MATCHES; - } - - comp_flags -} - -#[cfg(test)] -mod test { - use super::{ - compress_to_output, create_comp_flags_from_zip_params, read_u16_le, write_u16_le, - CompressionStrategy, CompressorOxide, TDEFLFlush, TDEFLStatus, DEFAULT_FLAGS, - MZ_DEFAULT_WINDOW_BITS, - }; - use crate::inflate::decompress_to_vec; - use alloc::vec; - - #[test] - fn u16_to_slice() { - let mut slice = [0, 0]; - write_u16_le(2000, &mut slice, 0); - assert_eq!(slice, [208, 7]); - } - - #[test] - fn u16_from_slice() { - let mut slice = [208, 7]; - assert_eq!(read_u16_le(&mut slice, 0), 2000); - } - - #[test] - fn compress_output() { - assert_eq!( - DEFAULT_FLAGS, - create_comp_flags_from_zip_params( - 4, - MZ_DEFAULT_WINDOW_BITS, - CompressionStrategy::Default as i32 - ) - ); - - let slice = [ - 1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3, - ]; - let mut encoded = vec![]; - let flags = create_comp_flags_from_zip_params(6, 0, 0); - let mut d = CompressorOxide::new(flags); - let (status, in_consumed) = - compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| { - encoded.extend_from_slice(out); - true - }); - - assert_eq!(status, TDEFLStatus::Done); - assert_eq!(in_consumed, slice.len()); - - let decoded = decompress_to_vec(&encoded[..]).unwrap(); - assert_eq!(&decoded[..], &slice[..]); - } - - #[test] - /// Check fast compress mode - fn compress_fast() { - let slice = [ - 1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3, - ]; - let mut encoded = vec![]; - let flags = create_comp_flags_from_zip_params(1, 0, 0); - let mut d = CompressorOxide::new(flags); - let (status, in_consumed) = - compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| { - encoded.extend_from_slice(out); - true - }); - - assert_eq!(status, TDEFLStatus::Done); - assert_eq!(in_consumed, slice.len()); - - // Needs to be altered if algorithm improves. - assert_eq!( - &encoded[..], - [99, 100, 98, 102, 1, 98, 48, 98, 3, 147, 204, 76, 204, 140, 76, 204, 0] - ); - - let decoded = decompress_to_vec(&encoded[..]).unwrap(); - assert_eq!(&decoded[..], &slice[..]); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/src/deflate/mod.rs s390-tools-2.33.1/rust-vendor/miniz_oxide/src/deflate/mod.rs --- s390-tools-2.31.0/rust-vendor/miniz_oxide/src/deflate/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/src/deflate/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,227 +0,0 @@ -//! This module contains functionality for compression. - -use crate::alloc::vec; -use crate::alloc::vec::Vec; - -mod buffer; -pub mod core; -pub mod stream; -use self::core::*; - -/// How much processing the compressor should do to compress the data. -/// `NoCompression` and `Bestspeed` have special meanings, the other levels determine the number -/// of checks for matches in the hash chains and whether to use lazy or greedy parsing. -#[repr(i32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum CompressionLevel { - /// Don't do any compression, only output uncompressed blocks. - NoCompression = 0, - /// Fast compression. Uses a special compression routine that is optimized for speed. - BestSpeed = 1, - /// Slow/high compression. Do a lot of checks to try to find good matches. - BestCompression = 9, - /// Even more checks, can be very slow. - UberCompression = 10, - /// Default compromise between speed and compression. - DefaultLevel = 6, - /// Use the default compression level. - DefaultCompression = -1, -} - -// Missing safe rust analogue (this and mem-to-mem are quite similar) -/* -fn tdefl_compress( - d: Option<&mut CompressorOxide>, - in_buf: *const c_void, - in_size: Option<&mut usize>, - out_buf: *mut c_void, - out_size: Option<&mut usize>, - flush: TDEFLFlush, -) -> TDEFLStatus { - let res = match d { - None => { - in_size.map(|size| *size = 0); - out_size.map(|size| *size = 0); - (TDEFLStatus::BadParam, 0, 0) - }, - Some(compressor) => { - let callback_res = CallbackOxide::new( - compressor.callback_func.clone(), - in_buf, - in_size, - out_buf, - out_size, - ); - - if let Ok(mut callback) = callback_res { - let res = compress(compressor, &mut callback, flush); - callback.update_size(Some(res.1), Some(res.2)); - res - } else { - (TDEFLStatus::BadParam, 0, 0) - } - } - }; - res.0 -}*/ - -// Missing safe rust analogue -/* -fn tdefl_init( - d: Option<&mut CompressorOxide>, - put_buf_func: PutBufFuncPtr, - put_buf_user: *mut c_void, - flags: c_int, -) -> TDEFLStatus { - if let Some(d) = d { - *d = CompressorOxide::new( - put_buf_func.map(|func| - CallbackFunc { put_buf_func: func, put_buf_user: put_buf_user } - ), - flags as u32, - ); - TDEFLStatus::Okay - } else { - TDEFLStatus::BadParam - } -}*/ - -// Missing safe rust analogue (though maybe best served by flate2 front-end instead) -/* -fn tdefl_compress_mem_to_output( - buf: *const c_void, - buf_len: usize, - put_buf_func: PutBufFuncPtr, - put_buf_user: *mut c_void, - flags: c_int, -) -> bool*/ - -// Missing safe Rust analogue -/* -fn tdefl_compress_mem_to_mem( - out_buf: *mut c_void, - out_buf_len: usize, - src_buf: *const c_void, - src_buf_len: usize, - flags: c_int, -) -> usize*/ - -/// Compress the input data to a vector, using the specified compression level (0-10). -pub fn compress_to_vec(input: &[u8], level: u8) -> Vec { - compress_to_vec_inner(input, level, 0, 0) -} - -/// Compress the input data to a vector, using the specified compression level (0-10), and with a -/// zlib wrapper. -pub fn compress_to_vec_zlib(input: &[u8], level: u8) -> Vec { - compress_to_vec_inner(input, level, 1, 0) -} - -/// Simple function to compress data to a vec. -fn compress_to_vec_inner(input: &[u8], level: u8, window_bits: i32, strategy: i32) -> Vec { - // The comp flags function sets the zlib flag if the window_bits parameter is > 0. - let flags = create_comp_flags_from_zip_params(level.into(), window_bits, strategy); - let mut compressor = CompressorOxide::new(flags); - let mut output = vec![0; ::core::cmp::max(input.len() / 2, 2)]; - - let mut in_pos = 0; - let mut out_pos = 0; - loop { - let (status, bytes_in, bytes_out) = compress( - &mut compressor, - &input[in_pos..], - &mut output[out_pos..], - TDEFLFlush::Finish, - ); - - out_pos += bytes_out; - in_pos += bytes_in; - - match status { - TDEFLStatus::Done => { - output.truncate(out_pos); - break; - } - TDEFLStatus::Okay => { - // We need more space, so resize the vector. - if output.len().saturating_sub(out_pos) < 30 { - output.resize(output.len() * 2, 0) - } - } - // Not supposed to happen unless there is a bug. - _ => panic!("Bug! Unexpectedly failed to compress!"), - } - } - - output -} - -#[cfg(test)] -mod test { - use super::{compress_to_vec, compress_to_vec_inner, CompressionStrategy}; - use crate::inflate::decompress_to_vec; - use alloc::vec; - - /// Test deflate example. - /// - /// Check if the encoder produces the same code as the example given by Mark Adler here: - /// https://stackoverflow.com/questions/17398931/deflate-encoding-with-static-huffman-codes/17415203 - #[test] - fn compress_small() { - let test_data = b"Deflate late"; - let check = [ - 0x73, 0x49, 0x4d, 0xcb, 0x49, 0x2c, 0x49, 0x55, 0x00, 0x11, 0x00, - ]; - - let res = compress_to_vec(test_data, 1); - assert_eq!(&check[..], res.as_slice()); - - let res = compress_to_vec(test_data, 9); - assert_eq!(&check[..], res.as_slice()); - } - - #[test] - fn compress_huff_only() { - let test_data = b"Deflate late"; - - let res = compress_to_vec_inner(test_data, 1, 0, CompressionStrategy::HuffmanOnly as i32); - let d = decompress_to_vec(res.as_slice()).expect("Failed to decompress!"); - assert_eq!(test_data, d.as_slice()); - } - - /// Test that a raw block compresses fine. - #[test] - fn compress_raw() { - let text = b"Hello, zlib!"; - let encoded = { - let len = text.len(); - let notlen = !len; - let mut encoded = vec![ - 1, - len as u8, - (len >> 8) as u8, - notlen as u8, - (notlen >> 8) as u8, - ]; - encoded.extend_from_slice(&text[..]); - encoded - }; - - let res = compress_to_vec(text, 0); - assert_eq!(encoded, res.as_slice()); - } - - #[test] - fn short() { - let test_data = [10, 10, 10, 10, 10, 55]; - let c = compress_to_vec(&test_data, 9); - - let d = decompress_to_vec(c.as_slice()).expect("Failed to decompress!"); - assert_eq!(&test_data, d.as_slice()); - // Check that a static block is used here, rather than a raw block - // , so the data is actually compressed. - // (The optimal compressed length would be 5, but neither miniz nor zlib manages that either - // as neither checks matches against the byte at index 0.) - assert!(c.len() <= 6); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/src/deflate/stream.rs s390-tools-2.33.1/rust-vendor/miniz_oxide/src/deflate/stream.rs --- s390-tools-2.31.0/rust-vendor/miniz_oxide/src/deflate/stream.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/src/deflate/stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,121 +0,0 @@ -//! Extra streaming compression functionality. -//! -//! As of now this is mainly intended for use to build a higher-level wrapper. -//! -//! There is no DeflateState as the needed state is contained in the compressor struct itself. - -use crate::deflate::core::{compress, CompressorOxide, TDEFLFlush, TDEFLStatus}; -use crate::{MZError, MZFlush, MZStatus, StreamResult}; - -/// Try to compress from input to output with the given [`CompressorOxide`]. -/// -/// # Errors -/// -/// Returns [`MZError::Buf`] If the size of the `output` slice is empty or no progress was made due -/// to lack of expected input data, or if called without [`MZFlush::Finish`] after the compression -/// was already finished. -/// -/// Returns [`MZError::Param`] if the compressor parameters are set wrong. -/// -/// Returns [`MZError::Stream`] when lower-level decompressor returns a -/// [`TDEFLStatus::PutBufFailed`]; may not actually be possible. -pub fn deflate( - compressor: &mut CompressorOxide, - input: &[u8], - output: &mut [u8], - flush: MZFlush, -) -> StreamResult { - if output.is_empty() { - return StreamResult::error(MZError::Buf); - } - - if compressor.prev_return_status() == TDEFLStatus::Done { - return if flush == MZFlush::Finish { - StreamResult { - bytes_written: 0, - bytes_consumed: 0, - status: Ok(MZStatus::StreamEnd), - } - } else { - StreamResult::error(MZError::Buf) - }; - } - - let mut bytes_written = 0; - let mut bytes_consumed = 0; - - let mut next_in = input; - let mut next_out = output; - - let status = loop { - let in_bytes; - let out_bytes; - let defl_status = { - let res = compress(compressor, next_in, next_out, TDEFLFlush::from(flush)); - in_bytes = res.1; - out_bytes = res.2; - res.0 - }; - - next_in = &next_in[in_bytes..]; - next_out = &mut next_out[out_bytes..]; - bytes_consumed += in_bytes; - bytes_written += out_bytes; - - // Check if we are done, or compression failed. - match defl_status { - TDEFLStatus::BadParam => break Err(MZError::Param), - // Don't think this can happen as we're not using a custom callback. - TDEFLStatus::PutBufFailed => break Err(MZError::Stream), - TDEFLStatus::Done => break Ok(MZStatus::StreamEnd), - _ => (), - }; - - // All the output space was used, so wait for more. - if next_out.is_empty() { - break Ok(MZStatus::Ok); - } - - if next_in.is_empty() && (flush != MZFlush::Finish) { - let total_changed = bytes_written > 0 || bytes_consumed > 0; - - break if (flush != MZFlush::None) || total_changed { - // We wrote or consumed something, and/or did a flush (sync/partial etc.). - Ok(MZStatus::Ok) - } else { - // No more input data, not flushing, and nothing was consumed or written, - // so couldn't make any progress. - Err(MZError::Buf) - }; - } - }; - StreamResult { - bytes_consumed, - bytes_written, - status, - } -} - -#[cfg(test)] -mod test { - use super::deflate; - use crate::deflate::CompressorOxide; - use crate::inflate::decompress_to_vec_zlib; - use crate::{MZFlush, MZStatus}; - use alloc::boxed::Box; - use alloc::vec; - - #[test] - fn test_state() { - let data = b"Hello zlib!"; - let mut compressed = vec![0; 50]; - let mut compressor = Box::::default(); - let res = deflate(&mut compressor, data, &mut compressed, MZFlush::Finish); - let status = res.status.expect("Failed to compress!"); - let decomp = - decompress_to_vec_zlib(&compressed).expect("Failed to decompress compressed data"); - assert_eq!(status, MZStatus::StreamEnd); - assert_eq!(decomp[..], data[..]); - assert_eq!(res.bytes_consumed, data.len()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/src/inflate/core.rs s390-tools-2.33.1/rust-vendor/miniz_oxide/src/inflate/core.rs --- s390-tools-2.31.0/rust-vendor/miniz_oxide/src/inflate/core.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/src/inflate/core.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1992 +0,0 @@ -//! Streaming decompression functionality. - -use super::*; -use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER}; - -use ::core::convert::TryInto; -use ::core::{cmp, slice}; - -use self::output_buffer::OutputBuffer; - -pub const TINFL_LZ_DICT_SIZE: usize = 32_768; - -/// A struct containing huffman code lengths and the huffman code tree used by the decompressor. -struct HuffmanTable { - /// Length of the code at each index. - pub code_size: [u8; MAX_HUFF_SYMBOLS_0], - /// Fast lookup table for shorter huffman codes. - /// - /// See `HuffmanTable::fast_lookup`. - pub look_up: [i16; FAST_LOOKUP_SIZE as usize], - /// Full huffman tree. - /// - /// Positive values are edge nodes/symbols, negative values are - /// parent nodes/references to other nodes. - pub tree: [i16; MAX_HUFF_TREE_SIZE], -} - -impl HuffmanTable { - const fn new() -> HuffmanTable { - HuffmanTable { - code_size: [0; MAX_HUFF_SYMBOLS_0], - look_up: [0; FAST_LOOKUP_SIZE as usize], - tree: [0; MAX_HUFF_TREE_SIZE], - } - } - - /// Look for a symbol in the fast lookup table. - /// The symbol is stored in the lower 9 bits, the length in the next 6. - /// If the returned value is negative, the code wasn't found in the - /// fast lookup table and the full tree has to be traversed to find the code. - #[inline] - fn fast_lookup(&self, bit_buf: BitBuffer) -> i16 { - self.look_up[(bit_buf & BitBuffer::from(FAST_LOOKUP_SIZE - 1)) as usize] - } - - /// Get the symbol and the code length from the huffman tree. - #[inline] - fn tree_lookup(&self, fast_symbol: i32, bit_buf: BitBuffer, mut code_len: u32) -> (i32, u32) { - let mut symbol = fast_symbol; - // We step through the tree until we encounter a positive value, which indicates a - // symbol. - loop { - // symbol here indicates the position of the left (0) node, if the next bit is 1 - // we add 1 to the lookup position to get the right node. - symbol = i32::from(self.tree[(!symbol + ((bit_buf >> code_len) & 1) as i32) as usize]); - code_len += 1; - if symbol >= 0 { - break; - } - } - (symbol, code_len) - } - - #[inline] - /// Look up a symbol and code length from the bits in the provided bit buffer. - /// - /// Returns Some(symbol, length) on success, - /// None if the length is 0. - /// - /// It's possible we could avoid checking for 0 if we can guarantee a sane table. - /// TODO: Check if a smaller type for code_len helps performance. - fn lookup(&self, bit_buf: BitBuffer) -> Option<(i32, u32)> { - let symbol = self.fast_lookup(bit_buf).into(); - if symbol >= 0 { - if (symbol >> 9) as u32 != 0 { - Some((symbol, (symbol >> 9) as u32)) - } else { - // Zero-length code. - None - } - } else { - // We didn't get a symbol from the fast lookup table, so check the tree instead. - Some(self.tree_lookup(symbol, bit_buf, FAST_LOOKUP_BITS.into())) - } - } -} - -/// The number of huffman tables used. -const MAX_HUFF_TABLES: usize = 3; -/// The length of the first (literal/length) huffman table. -const MAX_HUFF_SYMBOLS_0: usize = 288; -/// The length of the second (distance) huffman table. -const MAX_HUFF_SYMBOLS_1: usize = 32; -/// The length of the last (huffman code length) huffman table. -const _MAX_HUFF_SYMBOLS_2: usize = 19; -/// The maximum length of a code that can be looked up in the fast lookup table. -const FAST_LOOKUP_BITS: u8 = 10; -/// The size of the fast lookup table. -const FAST_LOOKUP_SIZE: u32 = 1 << FAST_LOOKUP_BITS; -const MAX_HUFF_TREE_SIZE: usize = MAX_HUFF_SYMBOLS_0 * 2; -const LITLEN_TABLE: usize = 0; -const DIST_TABLE: usize = 1; -const HUFFLEN_TABLE: usize = 2; - -/// Flags to [`decompress()`] to control how inflation works. -/// -/// These define bits for a bitmask argument. -pub mod inflate_flags { - /// Should we try to parse a zlib header? - /// - /// If unset, the function will expect an RFC1951 deflate stream. If set, it will expect a - /// RFC1950 zlib wrapper around the deflate stream. - pub const TINFL_FLAG_PARSE_ZLIB_HEADER: u32 = 1; - - /// There will be more input that hasn't been given to the decompressor yet. - /// - /// This is useful when you want to decompress what you have so far, - /// even if you know there is probably more input that hasn't gotten here yet (_e.g._, over a - /// network connection). When [`decompress()`][super::decompress] reaches the end of the input - /// without finding the end of the compressed stream, it will return - /// [`TINFLStatus::NeedsMoreInput`][super::TINFLStatus::NeedsMoreInput] if this is set, - /// indicating that you should get more data before calling again. If not set, it will return - /// [`TINFLStatus::FailedCannotMakeProgress`][super::TINFLStatus::FailedCannotMakeProgress] - /// suggesting the stream is corrupt, since you claimed it was all there. - pub const TINFL_FLAG_HAS_MORE_INPUT: u32 = 2; - - /// The output buffer should not wrap around. - pub const TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: u32 = 4; - - /// Calculate the adler32 checksum of the output data even if we're not inflating a zlib stream. - /// - /// If [`TINFL_FLAG_IGNORE_ADLER32`] is specified, it will override this. - /// - /// NOTE: Enabling/disabling this between calls to decompress will result in an incorrect - /// checksum. - pub const TINFL_FLAG_COMPUTE_ADLER32: u32 = 8; - - /// Ignore adler32 checksum even if we are inflating a zlib stream. - /// - /// Overrides [`TINFL_FLAG_COMPUTE_ADLER32`] if both are enabled. - /// - /// NOTE: This flag does not exist in miniz as it does not support this and is a - /// custom addition for miniz_oxide. - /// - /// NOTE: Should not be changed from enabled to disabled after decompression has started, - /// this will result in checksum failure (outside the unlikely event where the checksum happens - /// to match anyway). - pub const TINFL_FLAG_IGNORE_ADLER32: u32 = 64; -} - -use self::inflate_flags::*; - -const MIN_TABLE_SIZES: [u16; 3] = [257, 1, 4]; - -#[cfg(target_pointer_width = "64")] -type BitBuffer = u64; - -#[cfg(not(target_pointer_width = "64"))] -type BitBuffer = u32; - -/// Main decompression struct. -/// -pub struct DecompressorOxide { - /// Current state of the decompressor. - state: core::State, - /// Number of bits in the bit buffer. - num_bits: u32, - /// Zlib CMF - z_header0: u32, - /// Zlib FLG - z_header1: u32, - /// Adler32 checksum from the zlib header. - z_adler32: u32, - /// 1 if the current block is the last block, 0 otherwise. - finish: u32, - /// The type of the current block. - block_type: u32, - /// 1 if the adler32 value should be checked. - check_adler32: u32, - /// Last match distance. - dist: u32, - /// Variable used for match length, symbols, and a number of other things. - counter: u32, - /// Number of extra bits for the last length or distance code. - num_extra: u32, - /// Number of entries in each huffman table. - table_sizes: [u32; MAX_HUFF_TABLES], - /// Buffer of input data. - bit_buf: BitBuffer, - /// Huffman tables. - tables: [HuffmanTable; MAX_HUFF_TABLES], - /// Raw block header. - raw_header: [u8; 4], - /// Huffman length codes. - len_codes: [u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1 + 137], -} - -impl DecompressorOxide { - /// Create a new tinfl_decompressor with all fields set to 0. - pub fn new() -> DecompressorOxide { - DecompressorOxide::default() - } - - /// Set the current state to `Start`. - #[inline] - pub fn init(&mut self) { - // The rest of the data is reset or overwritten when used. - self.state = core::State::Start; - } - - /// Returns the adler32 checksum of the currently decompressed data. - /// Note: Will return Some(1) if decompressing zlib but ignoring adler32. - #[inline] - pub fn adler32(&self) -> Option { - if self.state != State::Start && !self.state.is_failure() && self.z_header0 != 0 { - Some(self.check_adler32) - } else { - None - } - } - - /// Returns the adler32 that was read from the zlib header if it exists. - #[inline] - pub fn adler32_header(&self) -> Option { - if self.state != State::Start && self.state != State::BadZlibHeader && self.z_header0 != 0 { - Some(self.z_adler32) - } else { - None - } - } -} - -impl Default for DecompressorOxide { - /// Create a new tinfl_decompressor with all fields set to 0. - #[inline(always)] - fn default() -> Self { - DecompressorOxide { - state: core::State::Start, - num_bits: 0, - z_header0: 0, - z_header1: 0, - z_adler32: 0, - finish: 0, - block_type: 0, - check_adler32: 0, - dist: 0, - counter: 0, - num_extra: 0, - table_sizes: [0; MAX_HUFF_TABLES], - bit_buf: 0, - // TODO:(oyvindln) Check that copies here are optimized out in release mode. - tables: [ - HuffmanTable::new(), - HuffmanTable::new(), - HuffmanTable::new(), - ], - raw_header: [0; 4], - len_codes: [0; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1 + 137], - } - } -} - -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -#[non_exhaustive] -enum State { - Start = 0, - ReadZlibCmf, - ReadZlibFlg, - ReadBlockHeader, - BlockTypeNoCompression, - RawHeader, - RawMemcpy1, - RawMemcpy2, - ReadTableSizes, - ReadHufflenTableCodeSize, - ReadLitlenDistTablesCodeSize, - ReadExtraBitsCodeSize, - DecodeLitlen, - WriteSymbol, - ReadExtraBitsLitlen, - DecodeDistance, - ReadExtraBitsDistance, - RawReadFirstByte, - RawStoreFirstByte, - WriteLenBytesToEnd, - BlockDone, - HuffDecodeOuterLoop1, - HuffDecodeOuterLoop2, - ReadAdler32, - - DoneForever, - - // Failure states. - BlockTypeUnexpected, - BadCodeSizeSum, - BadDistOrLiteralTableLength, - BadTotalSymbols, - BadZlibHeader, - DistanceOutOfBounds, - BadRawLength, - BadCodeSizeDistPrevLookup, - InvalidLitlen, - InvalidDist, - InvalidCodeLen, -} - -impl State { - fn is_failure(self) -> bool { - match self { - BlockTypeUnexpected => true, - BadCodeSizeSum => true, - BadDistOrLiteralTableLength => true, - BadTotalSymbols => true, - BadZlibHeader => true, - DistanceOutOfBounds => true, - BadRawLength => true, - BadCodeSizeDistPrevLookup => true, - InvalidLitlen => true, - InvalidDist => true, - _ => false, - } - } - - #[inline] - fn begin(&mut self, new_state: State) { - *self = new_state; - } -} - -use self::State::*; - -// Not sure why miniz uses 32-bit values for these, maybe alignment/cache again? -// # Optimization -// We add a extra value at the end and make the tables 32 elements long -// so we can use a mask to avoid bounds checks. -// The invalid values are set to something high enough to avoid underflowing -// the match length. -/// Base length for each length code. -/// -/// The base is used together with the value of the extra bits to decode the actual -/// length/distance values in a match. -#[rustfmt::skip] -const LENGTH_BASE: [u16; 32] = [ - 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, - 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 512, 512, 512 -]; - -/// Number of extra bits for each length code. -#[rustfmt::skip] -const LENGTH_EXTRA: [u8; 32] = [ - 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, - 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0, 0 -]; - -/// Base length for each distance code. -#[rustfmt::skip] -const DIST_BASE: [u16; 32] = [ - 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, - 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, - 2049, 3073, 4097, 6145, 8193, 12_289, 16_385, 24_577, 32_768, 32_768 -]; - -/// Number of extra bits for each distance code. -#[rustfmt::skip] -const DIST_EXTRA: [u8; 32] = [ - 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, - 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 13, 13 -]; - -/// The mask used when indexing the base/extra arrays. -const BASE_EXTRA_MASK: usize = 32 - 1; - -/// Sets the value of all the elements of the slice to `val`. -#[inline] -fn memset(slice: &mut [T], val: T) { - for x in slice { - *x = val - } -} - -/// Read an le u16 value from the slice iterator. -/// -/// # Panics -/// Panics if there are less than two bytes left. -#[inline] -fn read_u16_le(iter: &mut slice::Iter) -> u16 { - let ret = { - let two_bytes = iter.as_ref()[..2].try_into().unwrap(); - u16::from_le_bytes(two_bytes) - }; - iter.nth(1); - ret -} - -/// Read an le u32 value from the slice iterator. -/// -/// # Panics -/// Panics if there are less than four bytes left. -#[inline(always)] -#[cfg(target_pointer_width = "64")] -fn read_u32_le(iter: &mut slice::Iter) -> u32 { - let ret = { - let four_bytes: [u8; 4] = iter.as_ref()[..4].try_into().unwrap(); - u32::from_le_bytes(four_bytes) - }; - iter.nth(3); - ret -} - -/// Ensure that there is data in the bit buffer. -/// -/// On 64-bit platform, we use a 64-bit value so this will -/// result in there being at least 32 bits in the bit buffer. -/// This function assumes that there is at least 4 bytes left in the input buffer. -#[inline(always)] -#[cfg(target_pointer_width = "64")] -fn fill_bit_buffer(l: &mut LocalVars, in_iter: &mut slice::Iter) { - // Read four bytes into the buffer at once. - if l.num_bits < 30 { - l.bit_buf |= BitBuffer::from(read_u32_le(in_iter)) << l.num_bits; - l.num_bits += 32; - } -} - -/// Same as previous, but for non-64-bit platforms. -/// Ensures at least 16 bits are present, requires at least 2 bytes in the in buffer. -#[inline(always)] -#[cfg(not(target_pointer_width = "64"))] -fn fill_bit_buffer(l: &mut LocalVars, in_iter: &mut slice::Iter) { - // If the buffer is 32-bit wide, read 2 bytes instead. - if l.num_bits < 15 { - l.bit_buf |= BitBuffer::from(read_u16_le(in_iter)) << l.num_bits; - l.num_bits += 16; - } -} - -/// Check that the zlib header is correct and that there is enough space in the buffer -/// for the window size specified in the header. -/// -/// See https://tools.ietf.org/html/rfc1950 -#[inline] -fn validate_zlib_header(cmf: u32, flg: u32, flags: u32, mask: usize) -> Action { - let mut failed = - // cmf + flg should be divisible by 31. - (((cmf * 256) + flg) % 31 != 0) || - // If this flag is set, a dictionary was used for this zlib compressed data. - // This is currently not supported by miniz or miniz-oxide - ((flg & 0b0010_0000) != 0) || - // Compression method. Only 8(DEFLATE) is defined by the standard. - ((cmf & 15) != 8); - - let window_size = 1 << ((cmf >> 4) + 8); - if (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) == 0 { - // Bail if the buffer is wrapping and the window size is larger than the buffer. - failed |= (mask + 1) < window_size; - } - - // Zlib doesn't allow window sizes above 32 * 1024. - failed |= window_size > 32_768; - - if failed { - Action::Jump(BadZlibHeader) - } else { - Action::Jump(ReadBlockHeader) - } -} - -enum Action { - None, - Jump(State), - End(TINFLStatus), -} - -/// Try to decode the next huffman code, and puts it in the counter field of the decompressor -/// if successful. -/// -/// # Returns -/// The specified action returned from `f` on success, -/// `Action::End` if there are not enough data left to decode a symbol. -fn decode_huffman_code( - r: &mut DecompressorOxide, - l: &mut LocalVars, - table: usize, - flags: u32, - in_iter: &mut slice::Iter, - f: F, -) -> Action -where - F: FnOnce(&mut DecompressorOxide, &mut LocalVars, i32) -> Action, -{ - // As the huffman codes can be up to 15 bits long we need at least 15 bits - // ready in the bit buffer to start decoding the next huffman code. - if l.num_bits < 15 { - // First, make sure there is enough data in the bit buffer to decode a huffman code. - if in_iter.len() < 2 { - // If there is less than 2 bytes left in the input buffer, we try to look up - // the huffman code with what's available, and return if that doesn't succeed. - // Original explanation in miniz: - // /* TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes - // * remaining in the input buffer falls below 2. */ - // /* It reads just enough bytes from the input stream that are needed to decode - // * the next Huffman code (and absolutely no more). It works by trying to fully - // * decode a */ - // /* Huffman code by using whatever bits are currently present in the bit buffer. - // * If this fails, it reads another byte, and tries again until it succeeds or - // * until the */ - // /* bit buffer contains >=15 bits (deflate's max. Huffman code size). */ - loop { - let mut temp = i32::from(r.tables[table].fast_lookup(l.bit_buf)); - - if temp >= 0 { - let code_len = (temp >> 9) as u32; - if (code_len != 0) && (l.num_bits >= code_len) { - break; - } - } else if l.num_bits > FAST_LOOKUP_BITS.into() { - let mut code_len = u32::from(FAST_LOOKUP_BITS); - loop { - temp = i32::from( - r.tables[table].tree - [(!temp + ((l.bit_buf >> code_len) & 1) as i32) as usize], - ); - code_len += 1; - if temp >= 0 || l.num_bits < code_len + 1 { - break; - } - } - if temp >= 0 { - break; - } - } - - // TODO: miniz jumps straight to here after getting here again after failing to read - // a byte. - // Doing that lets miniz avoid re-doing the lookup that that was done in the - // previous call. - let mut byte = 0; - if let a @ Action::End(_) = read_byte(in_iter, flags, |b| { - byte = b; - Action::None - }) { - return a; - }; - - // Do this outside closure for now to avoid borrowing r. - l.bit_buf |= BitBuffer::from(byte) << l.num_bits; - l.num_bits += 8; - - if l.num_bits >= 15 { - break; - } - } - } else { - // There is enough data in the input buffer, so read the next two bytes - // and add them to the bit buffer. - // Unwrapping here is fine since we just checked that there are at least two - // bytes left. - l.bit_buf |= BitBuffer::from(read_u16_le(in_iter)) << l.num_bits; - l.num_bits += 16; - } - } - - // We now have at least 15 bits in the input buffer. - let mut symbol = i32::from(r.tables[table].fast_lookup(l.bit_buf)); - let code_len; - // If the symbol was found in the fast lookup table. - if symbol >= 0 { - // Get the length value from the top bits. - // As we shift down the sign bit, converting to an unsigned value - // shouldn't overflow. - code_len = (symbol >> 9) as u32; - // Mask out the length value. - symbol &= 511; - } else { - let res = r.tables[table].tree_lookup(symbol, l.bit_buf, u32::from(FAST_LOOKUP_BITS)); - symbol = res.0; - code_len = res.1 as u32; - }; - - if code_len == 0 { - return Action::Jump(InvalidCodeLen); - } - - l.bit_buf >>= code_len as u32; - l.num_bits -= code_len; - f(r, l, symbol) -} - -/// Try to read one byte from `in_iter` and call `f` with the read byte as an argument, -/// returning the result. -/// If reading fails, `Action::End is returned` -#[inline] -fn read_byte(in_iter: &mut slice::Iter, flags: u32, f: F) -> Action -where - F: FnOnce(u8) -> Action, -{ - match in_iter.next() { - None => end_of_input(flags), - Some(&byte) => f(byte), - } -} - -// TODO: `l: &mut LocalVars` may be slow similar to decompress_fast (even with inline(always)) -/// Try to read `amount` number of bits from `in_iter` and call the function `f` with the bits as an -/// an argument after reading, returning the result of that function, or `Action::End` if there are -/// not enough bytes left. -#[inline] -#[allow(clippy::while_immutable_condition)] -fn read_bits( - l: &mut LocalVars, - amount: u32, - in_iter: &mut slice::Iter, - flags: u32, - f: F, -) -> Action -where - F: FnOnce(&mut LocalVars, BitBuffer) -> Action, -{ - // Clippy gives a false positive warning here due to the closure. - // Read enough bytes from the input iterator to cover the number of bits we want. - while l.num_bits < amount { - match read_byte(in_iter, flags, |byte| { - l.bit_buf |= BitBuffer::from(byte) << l.num_bits; - l.num_bits += 8; - Action::None - }) { - Action::None => (), - // If there are not enough bytes in the input iterator, return and signal that we need - // more. - action => return action, - } - } - - let bits = l.bit_buf & ((1 << amount) - 1); - l.bit_buf >>= amount; - l.num_bits -= amount; - f(l, bits) -} - -#[inline] -fn pad_to_bytes(l: &mut LocalVars, in_iter: &mut slice::Iter, flags: u32, f: F) -> Action -where - F: FnOnce(&mut LocalVars) -> Action, -{ - let num_bits = l.num_bits & 7; - read_bits(l, num_bits, in_iter, flags, |l, _| f(l)) -} - -#[inline] -fn end_of_input(flags: u32) -> Action { - Action::End(if flags & TINFL_FLAG_HAS_MORE_INPUT != 0 { - TINFLStatus::NeedsMoreInput - } else { - TINFLStatus::FailedCannotMakeProgress - }) -} - -#[inline] -fn undo_bytes(l: &mut LocalVars, max: u32) -> u32 { - let res = cmp::min(l.num_bits >> 3, max); - l.num_bits -= res << 3; - res -} - -fn start_static_table(r: &mut DecompressorOxide) { - r.table_sizes[LITLEN_TABLE] = 288; - r.table_sizes[DIST_TABLE] = 32; - memset(&mut r.tables[LITLEN_TABLE].code_size[0..144], 8); - memset(&mut r.tables[LITLEN_TABLE].code_size[144..256], 9); - memset(&mut r.tables[LITLEN_TABLE].code_size[256..280], 7); - memset(&mut r.tables[LITLEN_TABLE].code_size[280..288], 8); - memset(&mut r.tables[DIST_TABLE].code_size[0..32], 5); -} - -static REVERSED_BITS_LOOKUP: [u32; 1024] = { - let mut table = [0; 1024]; - - let mut i = 0; - while i < 1024 { - table[i] = (i as u32).reverse_bits(); - i += 1; - } - - table -}; - -fn init_tree(r: &mut DecompressorOxide, l: &mut LocalVars) -> Action { - loop { - let table = &mut r.tables[r.block_type as usize]; - let table_size = r.table_sizes[r.block_type as usize] as usize; - let mut total_symbols = [0u32; 16]; - let mut next_code = [0u32; 17]; - memset(&mut table.look_up[..], 0); - memset(&mut table.tree[..], 0); - - for &code_size in &table.code_size[..table_size] { - total_symbols[code_size as usize] += 1; - } - - let mut used_symbols = 0; - let mut total = 0; - for i in 1..16 { - used_symbols += total_symbols[i]; - total += total_symbols[i]; - total <<= 1; - next_code[i + 1] = total; - } - - if total != 65_536 && used_symbols > 1 { - return Action::Jump(BadTotalSymbols); - } - - let mut tree_next = -1; - for symbol_index in 0..table_size { - let mut rev_code = 0; - let code_size = table.code_size[symbol_index]; - if code_size == 0 { - continue; - } - - let mut cur_code = next_code[code_size as usize]; - next_code[code_size as usize] += 1; - - let n = cur_code & (u32::MAX >> (32 - code_size)); - - let mut rev_code = if n < 1024 { - REVERSED_BITS_LOOKUP[n as usize] >> (32 - code_size) - } else { - for _ in 0..code_size { - rev_code = (rev_code << 1) | (cur_code & 1); - cur_code >>= 1; - } - rev_code - }; - - if code_size <= FAST_LOOKUP_BITS { - let k = (i16::from(code_size) << 9) | symbol_index as i16; - while rev_code < FAST_LOOKUP_SIZE { - table.look_up[rev_code as usize] = k; - rev_code += 1 << code_size; - } - continue; - } - - let mut tree_cur = table.look_up[(rev_code & (FAST_LOOKUP_SIZE - 1)) as usize]; - if tree_cur == 0 { - table.look_up[(rev_code & (FAST_LOOKUP_SIZE - 1)) as usize] = tree_next as i16; - tree_cur = tree_next; - tree_next -= 2; - } - - rev_code >>= FAST_LOOKUP_BITS - 1; - for _ in FAST_LOOKUP_BITS + 1..code_size { - rev_code >>= 1; - tree_cur -= (rev_code & 1) as i16; - if table.tree[(-tree_cur - 1) as usize] == 0 { - table.tree[(-tree_cur - 1) as usize] = tree_next as i16; - tree_cur = tree_next; - tree_next -= 2; - } else { - tree_cur = table.tree[(-tree_cur - 1) as usize]; - } - } - - rev_code >>= 1; - tree_cur -= (rev_code & 1) as i16; - table.tree[(-tree_cur - 1) as usize] = symbol_index as i16; - } - - if r.block_type == 2 { - l.counter = 0; - return Action::Jump(ReadLitlenDistTablesCodeSize); - } - - if r.block_type == 0 { - break; - } - r.block_type -= 1; - } - - l.counter = 0; - Action::Jump(DecodeLitlen) -} - -// A helper macro for generating the state machine. -// -// As Rust doesn't have fallthrough on matches, we have to return to the match statement -// and jump for each state change. (Which would ideally be optimized away, but often isn't.) -macro_rules! generate_state { - ($state: ident, $state_machine: tt, $f: expr) => { - loop { - match $f { - Action::None => continue, - Action::Jump(new_state) => { - $state = new_state; - continue $state_machine; - }, - Action::End(result) => break $state_machine result, - } - } - }; -} - -#[derive(Copy, Clone)] -struct LocalVars { - pub bit_buf: BitBuffer, - pub num_bits: u32, - pub dist: u32, - pub counter: u32, - pub num_extra: u32, -} - -#[inline] -fn transfer( - out_slice: &mut [u8], - mut source_pos: usize, - mut out_pos: usize, - match_len: usize, - out_buf_size_mask: usize, -) { - // special case that comes up surprisingly often. in the case that `source_pos` - // is 1 less than `out_pos`, we can say that the entire range will be the same - // value and optimize this to be a simple `memset` - let source_diff = if source_pos > out_pos { - source_pos - out_pos - } else { - out_pos - source_pos - }; - if out_buf_size_mask == usize::MAX && source_diff == 1 && out_pos > source_pos { - let init = out_slice[out_pos - 1]; - let end = (match_len >> 2) * 4 + out_pos; - - out_slice[out_pos..end].fill(init); - out_pos = end; - source_pos = end - 1; - // if the difference between `source_pos` and `out_pos` is greater than 3, we - // can do slightly better than the naive case by copying everything at once - } else if out_buf_size_mask == usize::MAX && source_diff >= 4 && out_pos > source_pos { - for _ in 0..match_len >> 2 { - out_slice.copy_within(source_pos..=source_pos + 3, out_pos); - source_pos += 4; - out_pos += 4; - } - } else { - for _ in 0..match_len >> 2 { - out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask]; - out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask]; - out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask]; - out_slice[out_pos + 3] = out_slice[(source_pos + 3) & out_buf_size_mask]; - source_pos += 4; - out_pos += 4; - } - } - - match match_len & 3 { - 0 => (), - 1 => out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask], - 2 => { - out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask]; - out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask]; - } - 3 => { - out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask]; - out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask]; - out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask]; - } - _ => unreachable!(), - } -} - -/// Presumes that there is at least match_len bytes in output left. -#[inline] -fn apply_match( - out_slice: &mut [u8], - out_pos: usize, - dist: usize, - match_len: usize, - out_buf_size_mask: usize, -) { - debug_assert!(out_pos + match_len <= out_slice.len()); - - let source_pos = out_pos.wrapping_sub(dist) & out_buf_size_mask; - - if match_len == 3 { - // Fast path for match len 3. - out_slice[out_pos] = out_slice[source_pos]; - out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask]; - out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask]; - return; - } - - if cfg!(not(any(target_arch = "x86", target_arch = "x86_64"))) { - // We are not on x86 so copy manually. - transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask); - return; - } - - if source_pos >= out_pos && (source_pos - out_pos) < match_len { - transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask); - } else if match_len <= dist && source_pos + match_len < out_slice.len() { - // Destination and source segments does not intersect and source does not wrap. - if source_pos < out_pos { - let (from_slice, to_slice) = out_slice.split_at_mut(out_pos); - to_slice[..match_len].copy_from_slice(&from_slice[source_pos..source_pos + match_len]); - } else { - let (to_slice, from_slice) = out_slice.split_at_mut(source_pos); - to_slice[out_pos..out_pos + match_len].copy_from_slice(&from_slice[..match_len]); - } - } else { - transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask); - } -} - -/// Fast inner decompression loop which is run while there is at least -/// 259 bytes left in the output buffer, and at least 6 bytes left in the input buffer -/// (The maximum one match would need + 1). -/// -/// This was inspired by a similar optimization in zlib, which uses this info to do -/// faster unchecked copies of multiple bytes at a time. -/// Currently we don't do this here, but this function does avoid having to jump through the -/// big match loop on each state change(as rust does not have fallthrough or gotos at the moment), -/// and already improves decompression speed a fair bit. -fn decompress_fast( - r: &mut DecompressorOxide, - in_iter: &mut slice::Iter, - out_buf: &mut OutputBuffer, - flags: u32, - local_vars: &mut LocalVars, - out_buf_size_mask: usize, -) -> (TINFLStatus, State) { - // Make a local copy of the most used variables, to avoid having to update and read from values - // in a random memory location and to encourage more register use. - let mut l = *local_vars; - let mut state; - - let status: TINFLStatus = 'o: loop { - state = State::DecodeLitlen; - loop { - // This function assumes that there is at least 259 bytes left in the output buffer, - // and that there is at least 14 bytes left in the input buffer. 14 input bytes: - // 15 (prev lit) + 15 (length) + 5 (length extra) + 15 (dist) - // + 29 + 32 (left in bit buf, including last 13 dist extra) = 111 bits < 14 bytes - // We need the one extra byte as we may write one length and one full match - // before checking again. - if out_buf.bytes_left() < 259 || in_iter.len() < 14 { - state = State::DecodeLitlen; - break 'o TINFLStatus::Done; - } - - fill_bit_buffer(&mut l, in_iter); - - if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) { - l.counter = symbol as u32; - l.bit_buf >>= code_len; - l.num_bits -= code_len; - - if (l.counter & 256) != 0 { - // The symbol is not a literal. - break; - } else { - // If we have a 32-bit buffer we need to read another two bytes now - // to have enough bits to keep going. - if cfg!(not(target_pointer_width = "64")) { - fill_bit_buffer(&mut l, in_iter); - } - - if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) { - l.bit_buf >>= code_len; - l.num_bits -= code_len; - // The previous symbol was a literal, so write it directly and check - // the next one. - out_buf.write_byte(l.counter as u8); - if (symbol & 256) != 0 { - l.counter = symbol as u32; - // The symbol is a length value. - break; - } else { - // The symbol is a literal, so write it directly and continue. - out_buf.write_byte(symbol as u8); - } - } else { - state.begin(InvalidCodeLen); - break 'o TINFLStatus::Failed; - } - } - } else { - state.begin(InvalidCodeLen); - break 'o TINFLStatus::Failed; - } - } - - // Mask the top bits since they may contain length info. - l.counter &= 511; - if l.counter == 256 { - // We hit the end of block symbol. - state.begin(BlockDone); - break 'o TINFLStatus::Done; - } else if l.counter > 285 { - // Invalid code. - // We already verified earlier that the code is > 256. - state.begin(InvalidLitlen); - break 'o TINFLStatus::Failed; - } else { - // The symbol was a length code. - // # Optimization - // Mask the value to avoid bounds checks - // We could use get_unchecked later if can statically verify that - // this will never go out of bounds. - l.num_extra = u32::from(LENGTH_EXTRA[(l.counter - 257) as usize & BASE_EXTRA_MASK]); - l.counter = u32::from(LENGTH_BASE[(l.counter - 257) as usize & BASE_EXTRA_MASK]); - // Length and distance codes have a number of extra bits depending on - // the base, which together with the base gives us the exact value. - - fill_bit_buffer(&mut l, in_iter); - if l.num_extra != 0 { - let extra_bits = l.bit_buf & ((1 << l.num_extra) - 1); - l.bit_buf >>= l.num_extra; - l.num_bits -= l.num_extra; - l.counter += extra_bits as u32; - } - - // We found a length code, so a distance code should follow. - - if cfg!(not(target_pointer_width = "64")) { - fill_bit_buffer(&mut l, in_iter); - } - - if let Some((mut symbol, code_len)) = r.tables[DIST_TABLE].lookup(l.bit_buf) { - symbol &= 511; - l.bit_buf >>= code_len; - l.num_bits -= code_len; - if symbol > 29 { - state.begin(InvalidDist); - break 'o TINFLStatus::Failed; - } - - l.num_extra = u32::from(DIST_EXTRA[symbol as usize]); - l.dist = u32::from(DIST_BASE[symbol as usize]); - } else { - state.begin(InvalidCodeLen); - break 'o TINFLStatus::Failed; - } - - if l.num_extra != 0 { - fill_bit_buffer(&mut l, in_iter); - let extra_bits = l.bit_buf & ((1 << l.num_extra) - 1); - l.bit_buf >>= l.num_extra; - l.num_bits -= l.num_extra; - l.dist += extra_bits as u32; - } - - let position = out_buf.position(); - if l.dist as usize > out_buf.position() - && (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0) - { - // We encountered a distance that refers a position before - // the start of the decoded data, so we can't continue. - state.begin(DistanceOutOfBounds); - break TINFLStatus::Failed; - } - - apply_match( - out_buf.get_mut(), - position, - l.dist as usize, - l.counter as usize, - out_buf_size_mask, - ); - - out_buf.set_position(position + l.counter as usize); - } - }; - - *local_vars = l; - (status, state) -} - -/// Main decompression function. Keeps decompressing data from `in_buf` until the `in_buf` is -/// empty, `out` is full, the end of the deflate stream is hit, or there is an error in the -/// deflate stream. -/// -/// # Arguments -/// -/// `r` is a [`DecompressorOxide`] struct with the state of this stream. -/// -/// `in_buf` is a reference to the compressed data that is to be decompressed. The decompressor will -/// start at the first byte of this buffer. -/// -/// `out` is a reference to the buffer that will store the decompressed data, and that -/// stores previously decompressed data if any. -/// -/// * The offset given by `out_pos` indicates where in the output buffer slice writing should start. -/// * If [`TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF`] is not set, the output buffer is used in a -/// wrapping manner, and it's size is required to be a power of 2. -/// * The decompression function normally needs access to 32KiB of the previously decompressed data -///(or to the beginning of the decompressed data if less than 32KiB has been decompressed.) -/// - If this data is not available, decompression may fail. -/// - Some deflate compressors allow specifying a window size which limits match distances to -/// less than this, or alternatively an RLE mode where matches will only refer to the previous byte -/// and thus allows a smaller output buffer. The window size can be specified in the zlib -/// header structure, however, the header data should not be relied on to be correct. -/// -/// `flags` indicates settings and status to the decompression function. -/// * The [`TINFL_FLAG_HAS_MORE_INPUT`] has to be specified if more compressed data is to be provided -/// in a subsequent call to this function. -/// * See the the [`inflate_flags`] module for details on other flags. -/// -/// # Returns -/// -/// Returns a tuple containing the status of the compressor, the number of input bytes read, and the -/// number of bytes output to `out`. -/// -/// This function shouldn't panic pending any bugs. -pub fn decompress( - r: &mut DecompressorOxide, - in_buf: &[u8], - out: &mut [u8], - out_pos: usize, - flags: u32, -) -> (TINFLStatus, usize, usize) { - let out_buf_size_mask = if flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0 { - usize::max_value() - } else { - // In the case of zero len, any attempt to write would produce HasMoreOutput, - // so to gracefully process the case of there really being no output, - // set the mask to all zeros. - out.len().saturating_sub(1) - }; - - // Ensure the output buffer's size is a power of 2, unless the output buffer - // is large enough to hold the entire output file (in which case it doesn't - // matter). - // Also make sure that the output buffer position is not past the end of the output buffer. - if (out_buf_size_mask.wrapping_add(1) & out_buf_size_mask) != 0 || out_pos > out.len() { - return (TINFLStatus::BadParam, 0, 0); - } - - let mut in_iter = in_buf.iter(); - - let mut state = r.state; - - let mut out_buf = OutputBuffer::from_slice_and_pos(out, out_pos); - - // Make a local copy of the important variables here so we can work with them on the stack. - let mut l = LocalVars { - bit_buf: r.bit_buf, - num_bits: r.num_bits, - dist: r.dist, - counter: r.counter, - num_extra: r.num_extra, - }; - - let mut status = 'state_machine: loop { - match state { - Start => generate_state!(state, 'state_machine, { - l.bit_buf = 0; - l.num_bits = 0; - l.dist = 0; - l.counter = 0; - l.num_extra = 0; - r.z_header0 = 0; - r.z_header1 = 0; - r.z_adler32 = 1; - r.check_adler32 = 1; - if flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 { - Action::Jump(State::ReadZlibCmf) - } else { - Action::Jump(State::ReadBlockHeader) - } - }), - - ReadZlibCmf => generate_state!(state, 'state_machine, { - read_byte(&mut in_iter, flags, |cmf| { - r.z_header0 = u32::from(cmf); - Action::Jump(State::ReadZlibFlg) - }) - }), - - ReadZlibFlg => generate_state!(state, 'state_machine, { - read_byte(&mut in_iter, flags, |flg| { - r.z_header1 = u32::from(flg); - validate_zlib_header(r.z_header0, r.z_header1, flags, out_buf_size_mask) - }) - }), - - // Read the block header and jump to the relevant section depending on the block type. - ReadBlockHeader => generate_state!(state, 'state_machine, { - read_bits(&mut l, 3, &mut in_iter, flags, |l, bits| { - r.finish = (bits & 1) as u32; - r.block_type = (bits >> 1) as u32 & 3; - match r.block_type { - 0 => Action::Jump(BlockTypeNoCompression), - 1 => { - start_static_table(r); - init_tree(r, l) - }, - 2 => { - l.counter = 0; - Action::Jump(ReadTableSizes) - }, - 3 => Action::Jump(BlockTypeUnexpected), - _ => unreachable!() - } - }) - }), - - // Raw/Stored/uncompressed block. - BlockTypeNoCompression => generate_state!(state, 'state_machine, { - pad_to_bytes(&mut l, &mut in_iter, flags, |l| { - l.counter = 0; - Action::Jump(RawHeader) - }) - }), - - // Check that the raw block header is correct. - RawHeader => generate_state!(state, 'state_machine, { - if l.counter < 4 { - // Read block length and block length check. - if l.num_bits != 0 { - read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| { - r.raw_header[l.counter as usize] = bits as u8; - l.counter += 1; - Action::None - }) - } else { - read_byte(&mut in_iter, flags, |byte| { - r.raw_header[l.counter as usize] = byte; - l.counter += 1; - Action::None - }) - } - } else { - // Check if the length value of a raw block is correct. - // The 2 first (2-byte) words in a raw header are the length and the - // ones complement of the length. - let length = u16::from(r.raw_header[0]) | (u16::from(r.raw_header[1]) << 8); - let check = u16::from(r.raw_header[2]) | (u16::from(r.raw_header[3]) << 8); - let valid = length == !check; - l.counter = length.into(); - - if !valid { - Action::Jump(BadRawLength) - } else if l.counter == 0 { - // Empty raw block. Sometimes used for synchronization. - Action::Jump(BlockDone) - } else if l.num_bits != 0 { - // There is some data in the bit buffer, so we need to write that first. - Action::Jump(RawReadFirstByte) - } else { - // The bit buffer is empty, so memcpy the rest of the uncompressed data from - // the block. - Action::Jump(RawMemcpy1) - } - } - }), - - // Read the byte from the bit buffer. - RawReadFirstByte => generate_state!(state, 'state_machine, { - read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| { - l.dist = bits as u32; - Action::Jump(RawStoreFirstByte) - }) - }), - - // Write the byte we just read to the output buffer. - RawStoreFirstByte => generate_state!(state, 'state_machine, { - if out_buf.bytes_left() == 0 { - Action::End(TINFLStatus::HasMoreOutput) - } else { - out_buf.write_byte(l.dist as u8); - l.counter -= 1; - if l.counter == 0 || l.num_bits == 0 { - Action::Jump(RawMemcpy1) - } else { - // There is still some data left in the bit buffer that needs to be output. - // TODO: Changed this to jump to `RawReadfirstbyte` rather than - // `RawStoreFirstByte` as that seemed to be the correct path, but this - // needs testing. - Action::Jump(RawReadFirstByte) - } - } - }), - - RawMemcpy1 => generate_state!(state, 'state_machine, { - if l.counter == 0 { - Action::Jump(BlockDone) - } else if out_buf.bytes_left() == 0 { - Action::End(TINFLStatus::HasMoreOutput) - } else { - Action::Jump(RawMemcpy2) - } - }), - - RawMemcpy2 => generate_state!(state, 'state_machine, { - if in_iter.len() > 0 { - // Copy as many raw bytes as possible from the input to the output using memcpy. - // Raw block lengths are limited to 64 * 1024, so casting through usize and u32 - // is not an issue. - let space_left = out_buf.bytes_left(); - let bytes_to_copy = cmp::min(cmp::min( - space_left, - in_iter.len()), - l.counter as usize - ); - - out_buf.write_slice(&in_iter.as_slice()[..bytes_to_copy]); - - (&mut in_iter).nth(bytes_to_copy - 1); - l.counter -= bytes_to_copy as u32; - Action::Jump(RawMemcpy1) - } else { - end_of_input(flags) - } - }), - - // Read how many huffman codes/symbols are used for each table. - ReadTableSizes => generate_state!(state, 'state_machine, { - if l.counter < 3 { - let num_bits = [5, 5, 4][l.counter as usize]; - read_bits(&mut l, num_bits, &mut in_iter, flags, |l, bits| { - r.table_sizes[l.counter as usize] = - bits as u32 + u32::from(MIN_TABLE_SIZES[l.counter as usize]); - l.counter += 1; - Action::None - }) - } else { - memset(&mut r.tables[HUFFLEN_TABLE].code_size[..], 0); - l.counter = 0; - // Check that the litlen and distance are within spec. - // litlen table should be <=286 acc to the RFC and - // additionally zlib rejects dist table sizes larger than 30. - // NOTE this the final sizes after adding back predefined values, not - // raw value in the data. - // See miniz_oxide issue #130 and https://github.com/madler/zlib/issues/82. - if r.table_sizes[LITLEN_TABLE] <= 286 && r.table_sizes[DIST_TABLE] <= 30 { - Action::Jump(ReadHufflenTableCodeSize) - } - else { - Action::Jump(BadDistOrLiteralTableLength) - } - } - }), - - // Read the 3-bit lengths of the huffman codes describing the huffman code lengths used - // to decode the lengths of the main tables. - ReadHufflenTableCodeSize => generate_state!(state, 'state_machine, { - if l.counter < r.table_sizes[HUFFLEN_TABLE] { - read_bits(&mut l, 3, &mut in_iter, flags, |l, bits| { - // These lengths are not stored in a normal ascending order, but rather one - // specified by the deflate specification intended to put the most used - // values at the front as trailing zero lengths do not have to be stored. - r.tables[HUFFLEN_TABLE] - .code_size[HUFFMAN_LENGTH_ORDER[l.counter as usize] as usize] = - bits as u8; - l.counter += 1; - Action::None - }) - } else { - r.table_sizes[HUFFLEN_TABLE] = 19; - init_tree(r, &mut l) - } - }), - - ReadLitlenDistTablesCodeSize => generate_state!(state, 'state_machine, { - if l.counter < r.table_sizes[LITLEN_TABLE] + r.table_sizes[DIST_TABLE] { - decode_huffman_code( - r, &mut l, HUFFLEN_TABLE, - flags, &mut in_iter, |r, l, symbol| { - l.dist = symbol as u32; - if l.dist < 16 { - r.len_codes[l.counter as usize] = l.dist as u8; - l.counter += 1; - Action::None - } else if l.dist == 16 && l.counter == 0 { - Action::Jump(BadCodeSizeDistPrevLookup) - } else { - l.num_extra = [2, 3, 7][l.dist as usize - 16]; - Action::Jump(ReadExtraBitsCodeSize) - } - } - ) - } else if l.counter != r.table_sizes[LITLEN_TABLE] + r.table_sizes[DIST_TABLE] { - Action::Jump(BadCodeSizeSum) - } else { - r.tables[LITLEN_TABLE].code_size[..r.table_sizes[LITLEN_TABLE] as usize] - .copy_from_slice(&r.len_codes[..r.table_sizes[LITLEN_TABLE] as usize]); - - let dist_table_start = r.table_sizes[LITLEN_TABLE] as usize; - let dist_table_end = (r.table_sizes[LITLEN_TABLE] + - r.table_sizes[DIST_TABLE]) as usize; - r.tables[DIST_TABLE].code_size[..r.table_sizes[DIST_TABLE] as usize] - .copy_from_slice(&r.len_codes[dist_table_start..dist_table_end]); - - r.block_type -= 1; - init_tree(r, &mut l) - } - }), - - ReadExtraBitsCodeSize => generate_state!(state, 'state_machine, { - let num_extra = l.num_extra; - read_bits(&mut l, num_extra, &mut in_iter, flags, |l, mut extra_bits| { - // Mask to avoid a bounds check. - extra_bits += [3, 3, 11][(l.dist as usize - 16) & 3]; - let val = if l.dist == 16 { - r.len_codes[l.counter as usize - 1] - } else { - 0 - }; - - memset( - &mut r.len_codes[ - l.counter as usize..l.counter as usize + extra_bits as usize - ], - val, - ); - l.counter += extra_bits as u32; - Action::Jump(ReadLitlenDistTablesCodeSize) - }) - }), - - DecodeLitlen => generate_state!(state, 'state_machine, { - if in_iter.len() < 4 || out_buf.bytes_left() < 2 { - // See if we can decode a literal with the data we have left. - // Jumps to next state (WriteSymbol) if successful. - decode_huffman_code( - r, - &mut l, - LITLEN_TABLE, - flags, - &mut in_iter, - |_r, l, symbol| { - l.counter = symbol as u32; - Action::Jump(WriteSymbol) - }, - ) - } else if - // If there is enough space, use the fast inner decompression - // function. - out_buf.bytes_left() >= 259 && - in_iter.len() >= 14 - { - let (status, new_state) = decompress_fast( - r, - &mut in_iter, - &mut out_buf, - flags, - &mut l, - out_buf_size_mask, - ); - - state = new_state; - if status == TINFLStatus::Done { - Action::Jump(new_state) - } else { - Action::End(status) - } - } else { - fill_bit_buffer(&mut l, &mut in_iter); - - if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) { - - l.counter = symbol as u32; - l.bit_buf >>= code_len; - l.num_bits -= code_len; - - if (l.counter & 256) != 0 { - // The symbol is not a literal. - Action::Jump(HuffDecodeOuterLoop1) - } else { - // If we have a 32-bit buffer we need to read another two bytes now - // to have enough bits to keep going. - if cfg!(not(target_pointer_width = "64")) { - fill_bit_buffer(&mut l, &mut in_iter); - } - - if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) { - - l.bit_buf >>= code_len; - l.num_bits -= code_len; - // The previous symbol was a literal, so write it directly and check - // the next one. - out_buf.write_byte(l.counter as u8); - if (symbol & 256) != 0 { - l.counter = symbol as u32; - // The symbol is a length value. - Action::Jump(HuffDecodeOuterLoop1) - } else { - // The symbol is a literal, so write it directly and continue. - out_buf.write_byte(symbol as u8); - Action::None - } - } else { - Action::Jump(InvalidCodeLen) - } - } - } else { - Action::Jump(InvalidCodeLen) - } - } - }), - - WriteSymbol => generate_state!(state, 'state_machine, { - if l.counter >= 256 { - Action::Jump(HuffDecodeOuterLoop1) - } else if out_buf.bytes_left() > 0 { - out_buf.write_byte(l.counter as u8); - Action::Jump(DecodeLitlen) - } else { - Action::End(TINFLStatus::HasMoreOutput) - } - }), - - HuffDecodeOuterLoop1 => generate_state!(state, 'state_machine, { - // Mask the top bits since they may contain length info. - l.counter &= 511; - - if l.counter - == 256 { - // We hit the end of block symbol. - Action::Jump(BlockDone) - } else if l.counter > 285 { - // Invalid code. - // We already verified earlier that the code is > 256. - Action::Jump(InvalidLitlen) - } else { - // # Optimization - // Mask the value to avoid bounds checks - // We could use get_unchecked later if can statically verify that - // this will never go out of bounds. - l.num_extra = - u32::from(LENGTH_EXTRA[(l.counter - 257) as usize & BASE_EXTRA_MASK]); - l.counter = u32::from(LENGTH_BASE[(l.counter - 257) as usize & BASE_EXTRA_MASK]); - // Length and distance codes have a number of extra bits depending on - // the base, which together with the base gives us the exact value. - if l.num_extra != 0 { - Action::Jump(ReadExtraBitsLitlen) - } else { - Action::Jump(DecodeDistance) - } - } - }), - - ReadExtraBitsLitlen => generate_state!(state, 'state_machine, { - let num_extra = l.num_extra; - read_bits(&mut l, num_extra, &mut in_iter, flags, |l, extra_bits| { - l.counter += extra_bits as u32; - Action::Jump(DecodeDistance) - }) - }), - - DecodeDistance => generate_state!(state, 'state_machine, { - // Try to read a huffman code from the input buffer and look up what - // length code the decoded symbol refers to. - decode_huffman_code(r, &mut l, DIST_TABLE, flags, &mut in_iter, |_r, l, symbol| { - if symbol > 29 { - // Invalid distance code. - return Action::Jump(InvalidDist) - } - // # Optimization - // Mask the value to avoid bounds checks - // We could use get_unchecked later if can statically verify that - // this will never go out of bounds. - l.num_extra = u32::from(DIST_EXTRA[symbol as usize & BASE_EXTRA_MASK]); - l.dist = u32::from(DIST_BASE[symbol as usize & BASE_EXTRA_MASK]); - if l.num_extra != 0 { - // ReadEXTRA_BITS_DISTACNE - Action::Jump(ReadExtraBitsDistance) - } else { - Action::Jump(HuffDecodeOuterLoop2) - } - }) - }), - - ReadExtraBitsDistance => generate_state!(state, 'state_machine, { - let num_extra = l.num_extra; - read_bits(&mut l, num_extra, &mut in_iter, flags, |l, extra_bits| { - l.dist += extra_bits as u32; - Action::Jump(HuffDecodeOuterLoop2) - }) - }), - - HuffDecodeOuterLoop2 => generate_state!(state, 'state_machine, { - if l.dist as usize > out_buf.position() && - (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0) - { - // We encountered a distance that refers a position before - // the start of the decoded data, so we can't continue. - Action::Jump(DistanceOutOfBounds) - } else { - let out_pos = out_buf.position(); - let source_pos = out_buf.position() - .wrapping_sub(l.dist as usize) & out_buf_size_mask; - - let out_len = out_buf.get_ref().len() as usize; - let match_end_pos = out_buf.position() + l.counter as usize; - - if match_end_pos > out_len || - // miniz doesn't do this check here. Not sure how it makes sure - // that this case doesn't happen. - (source_pos >= out_pos && (source_pos - out_pos) < l.counter as usize) - { - // Not enough space for all of the data in the output buffer, - // so copy what we have space for. - if l.counter == 0 { - Action::Jump(DecodeLitlen) - } else { - Action::Jump(WriteLenBytesToEnd) - } - } else { - apply_match( - out_buf.get_mut(), - out_pos, - l.dist as usize, - l.counter as usize, - out_buf_size_mask - ); - out_buf.set_position(out_pos + l.counter as usize); - Action::Jump(DecodeLitlen) - } - } - }), - - WriteLenBytesToEnd => generate_state!(state, 'state_machine, { - if out_buf.bytes_left() > 0 { - let out_pos = out_buf.position(); - let source_pos = out_buf.position() - .wrapping_sub(l.dist as usize) & out_buf_size_mask; - - - let len = cmp::min(out_buf.bytes_left(), l.counter as usize); - - transfer(out_buf.get_mut(), source_pos, out_pos, len, out_buf_size_mask); - - out_buf.set_position(out_pos + len); - l.counter -= len as u32; - if l.counter == 0 { - Action::Jump(DecodeLitlen) - } else { - Action::None - } - } else { - Action::End(TINFLStatus::HasMoreOutput) - } - }), - - BlockDone => generate_state!(state, 'state_machine, { - // End once we've read the last block. - if r.finish != 0 { - pad_to_bytes(&mut l, &mut in_iter, flags, |_| Action::None); - - let in_consumed = in_buf.len() - in_iter.len(); - let undo = undo_bytes(&mut l, in_consumed as u32) as usize; - in_iter = in_buf[in_consumed - undo..].iter(); - - l.bit_buf &= ((1 as BitBuffer) << l.num_bits) - 1; - debug_assert_eq!(l.num_bits, 0); - - if flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 { - l.counter = 0; - Action::Jump(ReadAdler32) - } else { - Action::Jump(DoneForever) - } - } else { - Action::Jump(ReadBlockHeader) - } - }), - - ReadAdler32 => generate_state!(state, 'state_machine, { - if l.counter < 4 { - if l.num_bits != 0 { - read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| { - r.z_adler32 <<= 8; - r.z_adler32 |= bits as u32; - l.counter += 1; - Action::None - }) - } else { - read_byte(&mut in_iter, flags, |byte| { - r.z_adler32 <<= 8; - r.z_adler32 |= u32::from(byte); - l.counter += 1; - Action::None - }) - } - } else { - Action::Jump(DoneForever) - } - }), - - // We are done. - DoneForever => break TINFLStatus::Done, - - // Anything else indicates failure. - // BadZlibHeader | BadRawLength | BadDistOrLiteralTableLength | BlockTypeUnexpected | - // DistanceOutOfBounds | - // BadTotalSymbols | BadCodeSizeDistPrevLookup | BadCodeSizeSum | InvalidLitlen | - // InvalidDist | InvalidCodeLen - _ => break TINFLStatus::Failed, - }; - }; - - let in_undo = if status != TINFLStatus::NeedsMoreInput - && status != TINFLStatus::FailedCannotMakeProgress - { - undo_bytes(&mut l, (in_buf.len() - in_iter.len()) as u32) as usize - } else { - 0 - }; - - // Make sure HasMoreOutput overrides NeedsMoreInput if the output buffer is full. - // (Unless the missing input is the adler32 value in which case we don't need to write anything.) - // TODO: May want to see if we can do this in a better way. - if status == TINFLStatus::NeedsMoreInput - && out_buf.bytes_left() == 0 - && state != State::ReadAdler32 - { - status = TINFLStatus::HasMoreOutput - } - - r.state = state; - r.bit_buf = l.bit_buf; - r.num_bits = l.num_bits; - r.dist = l.dist; - r.counter = l.counter; - r.num_extra = l.num_extra; - - r.bit_buf &= ((1 as BitBuffer) << r.num_bits) - 1; - - // If this is a zlib stream, and update the adler32 checksum with the decompressed bytes if - // requested. - let need_adler = if (flags & TINFL_FLAG_IGNORE_ADLER32) == 0 { - flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32) != 0 - } else { - // If TINFL_FLAG_IGNORE_ADLER32 is enabled, ignore the checksum. - false - }; - if need_adler && status as i32 >= 0 { - let out_buf_pos = out_buf.position(); - r.check_adler32 = update_adler32(r.check_adler32, &out_buf.get_ref()[out_pos..out_buf_pos]); - - // disabled so that random input from fuzzer would not be rejected early, - // before it has a chance to reach interesting parts of code - if !cfg!(fuzzing) { - // Once we are done, check if the checksum matches with the one provided in the zlib header. - if status == TINFLStatus::Done - && flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 - && r.check_adler32 != r.z_adler32 - { - status = TINFLStatus::Adler32Mismatch; - } - } - } - - ( - status, - in_buf.len() - in_iter.len() - in_undo, - out_buf.position() - out_pos, - ) -} - -#[cfg(test)] -mod test { - use super::*; - - //TODO: Fix these. - - fn tinfl_decompress_oxide<'i>( - r: &mut DecompressorOxide, - input_buffer: &'i [u8], - output_buffer: &mut [u8], - flags: u32, - ) -> (TINFLStatus, &'i [u8], usize) { - let (status, in_pos, out_pos) = decompress(r, input_buffer, output_buffer, 0, flags); - (status, &input_buffer[in_pos..], out_pos) - } - - #[test] - fn decompress_zlib() { - let encoded = [ - 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19, - ]; - let flags = TINFL_FLAG_COMPUTE_ADLER32 | TINFL_FLAG_PARSE_ZLIB_HEADER; - - let mut b = DecompressorOxide::new(); - const LEN: usize = 32; - let mut b_buf = vec![0; LEN]; - - // This should fail with the out buffer being to small. - let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], b_buf.as_mut_slice(), flags); - - assert_eq!(b_status.0, TINFLStatus::Failed); - - let flags = flags | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; - - b = DecompressorOxide::new(); - - // With TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF set this should no longer fail. - let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], b_buf.as_mut_slice(), flags); - - assert_eq!(b_buf[..b_status.2], b"Hello, zlib!"[..]); - assert_eq!(b_status.0, TINFLStatus::Done); - } - - #[test] - fn raw_block() { - const LEN: usize = 64; - - let text = b"Hello, zlib!"; - let encoded = { - let len = text.len(); - let notlen = !len; - let mut encoded = vec![ - 1, - len as u8, - (len >> 8) as u8, - notlen as u8, - (notlen >> 8) as u8, - ]; - encoded.extend_from_slice(&text[..]); - encoded - }; - - //let flags = TINFL_FLAG_COMPUTE_ADLER32 | TINFL_FLAG_PARSE_ZLIB_HEADER | - let flags = TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; - - let mut b = DecompressorOxide::new(); - - let mut b_buf = vec![0; LEN]; - - let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], b_buf.as_mut_slice(), flags); - assert_eq!(b_buf[..b_status.2], text[..]); - assert_eq!(b_status.0, TINFLStatus::Done); - } - - fn masked_lookup(table: &HuffmanTable, bit_buf: BitBuffer) -> (i32, u32) { - let ret = table.lookup(bit_buf).unwrap(); - (ret.0 & 511, ret.1) - } - - #[test] - fn fixed_table_lookup() { - let mut d = DecompressorOxide::new(); - d.block_type = 1; - start_static_table(&mut d); - let mut l = LocalVars { - bit_buf: d.bit_buf, - num_bits: d.num_bits, - dist: d.dist, - counter: d.counter, - num_extra: d.num_extra, - }; - init_tree(&mut d, &mut l); - let llt = &d.tables[LITLEN_TABLE]; - let dt = &d.tables[DIST_TABLE]; - assert_eq!(masked_lookup(llt, 0b00001100), (0, 8)); - assert_eq!(masked_lookup(llt, 0b00011110), (72, 8)); - assert_eq!(masked_lookup(llt, 0b01011110), (74, 8)); - assert_eq!(masked_lookup(llt, 0b11111101), (143, 8)); - assert_eq!(masked_lookup(llt, 0b000010011), (144, 9)); - assert_eq!(masked_lookup(llt, 0b111111111), (255, 9)); - assert_eq!(masked_lookup(llt, 0b00000000), (256, 7)); - assert_eq!(masked_lookup(llt, 0b1110100), (279, 7)); - assert_eq!(masked_lookup(llt, 0b00000011), (280, 8)); - assert_eq!(masked_lookup(llt, 0b11100011), (287, 8)); - - assert_eq!(masked_lookup(dt, 0), (0, 5)); - assert_eq!(masked_lookup(dt, 20), (5, 5)); - } - - fn check_result(input: &[u8], expected_status: TINFLStatus, expected_state: State, zlib: bool) { - let mut r = DecompressorOxide::default(); - let mut output_buf = vec![0; 1024 * 32]; - let flags = if zlib { - inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER - } else { - 0 - } | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF - | TINFL_FLAG_HAS_MORE_INPUT; - let (d_status, _in_bytes, _out_bytes) = - decompress(&mut r, input, &mut output_buf, 0, flags); - assert_eq!(expected_status, d_status); - assert_eq!(expected_state, r.state); - } - - #[test] - fn bogus_input() { - use self::check_result as cr; - const F: TINFLStatus = TINFLStatus::Failed; - const OK: TINFLStatus = TINFLStatus::Done; - // Bad CM. - cr(&[0x77, 0x85], F, State::BadZlibHeader, true); - // Bad window size (but check is correct). - cr(&[0x88, 0x98], F, State::BadZlibHeader, true); - // Bad check bits. - cr(&[0x78, 0x98], F, State::BadZlibHeader, true); - - // Too many code lengths. (From inflate library issues) - cr( - b"M\xff\xffM*\xad\xad\xad\xad\xad\xad\xad\xcd\xcd\xcdM", - F, - State::BadDistOrLiteralTableLength, - false, - ); - - // Bad CLEN (also from inflate library issues) - cr( - b"\xdd\xff\xff*M\x94ffffffffff", - F, - State::BadDistOrLiteralTableLength, - false, - ); - - // Port of inflate coverage tests from zlib-ng - // https://github.com/Dead2/zlib-ng/blob/develop/test/infcover.c - let c = |a, b, c| cr(a, b, c, false); - - // Invalid uncompressed/raw block length. - c(&[0, 0, 0, 0, 0], F, State::BadRawLength); - // Ok empty uncompressed block. - c(&[3, 0], OK, State::DoneForever); - // Invalid block type. - c(&[6], F, State::BlockTypeUnexpected); - // Ok uncompressed block. - c(&[1, 1, 0, 0xfe, 0xff, 0], OK, State::DoneForever); - // Too many litlens, we handle this later than zlib, so this test won't - // give the same result. - // c(&[0xfc, 0, 0], F, State::BadTotalSymbols); - // Invalid set of code lengths - TODO Check if this is the correct error for this. - c(&[4, 0, 0xfe, 0xff], F, State::BadTotalSymbols); - // Invalid repeat in list of code lengths. - // (Try to repeat a non-existent code.) - c(&[4, 0, 0x24, 0x49, 0], F, State::BadCodeSizeDistPrevLookup); - // Missing end of block code (should we have a separate error for this?) - fails on further input - // c(&[4, 0, 0x24, 0xe9, 0xff, 0x6d], F, State::BadTotalSymbols); - // Invalid set of literals/lengths - c( - &[ - 4, 0x80, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0x71, 0xff, 0xff, 0x93, 0x11, 0, - ], - F, - State::BadTotalSymbols, - ); - // Invalid set of distances _ needsmoreinput - // c(&[4, 0x80, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0x0f, 0xb4, 0xff, 0xff, 0xc3, 0x84], F, State::BadTotalSymbols); - // Invalid distance code - c(&[2, 0x7e, 0xff, 0xff], F, State::InvalidDist); - - // Distance refers to position before the start - c( - &[0x0c, 0xc0, 0x81, 0, 0, 0, 0, 0, 0x90, 0xff, 0x6b, 0x4, 0], - F, - State::DistanceOutOfBounds, - ); - - // Trailer - // Bad gzip trailer checksum GZip header not handled by miniz_oxide - //cr(&[0x1f, 0x8b, 0x08 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0x03, 0, 0, 0, 0, 0x01], F, State::BadCRC, false) - // Bad gzip trailer length - //cr(&[0x1f, 0x8b, 0x08 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0x03, 0, 0, 0, 0, 0, 0, 0, 0, 0x01], F, State::BadCRC, false) - } - - #[test] - fn empty_output_buffer_non_wrapping() { - let encoded = [ - 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19, - ]; - let flags = TINFL_FLAG_COMPUTE_ADLER32 - | TINFL_FLAG_PARSE_ZLIB_HEADER - | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; - let mut r = DecompressorOxide::new(); - let mut output_buf = vec![]; - // Check that we handle an empty buffer properly and not panicking. - // https://github.com/Frommi/miniz_oxide/issues/23 - let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags); - assert_eq!(res, (TINFLStatus::HasMoreOutput, 4, 0)); - } - - #[test] - fn empty_output_buffer_wrapping() { - let encoded = [ - 0x73, 0x49, 0x4d, 0xcb, 0x49, 0x2c, 0x49, 0x55, 0x00, 0x11, 0x00, - ]; - let flags = TINFL_FLAG_COMPUTE_ADLER32; - let mut r = DecompressorOxide::new(); - let mut output_buf = vec![]; - // Check that we handle an empty buffer properly and not panicking. - // https://github.com/Frommi/miniz_oxide/issues/23 - let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags); - assert_eq!(res, (TINFLStatus::HasMoreOutput, 2, 0)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/src/inflate/mod.rs s390-tools-2.33.1/rust-vendor/miniz_oxide/src/inflate/mod.rs --- s390-tools-2.31.0/rust-vendor/miniz_oxide/src/inflate/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/src/inflate/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,337 +0,0 @@ -//! This module contains functionality for decompression. - -#[cfg(feature = "with-alloc")] -use crate::alloc::{boxed::Box, vec, vec::Vec}; -use ::core::usize; -#[cfg(all(feature = "std", feature = "with-alloc"))] -use std::error::Error; - -pub mod core; -mod output_buffer; -pub mod stream; -use self::core::*; - -const TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS: i32 = -4; -const TINFL_STATUS_BAD_PARAM: i32 = -3; -const TINFL_STATUS_ADLER32_MISMATCH: i32 = -2; -const TINFL_STATUS_FAILED: i32 = -1; -const TINFL_STATUS_DONE: i32 = 0; -const TINFL_STATUS_NEEDS_MORE_INPUT: i32 = 1; -const TINFL_STATUS_HAS_MORE_OUTPUT: i32 = 2; - -/// Return status codes. -#[repr(i8)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum TINFLStatus { - /// More input data was expected, but the caller indicated that there was no more data, so the - /// input stream is likely truncated. - /// - /// This can't happen if you have provided the - /// [`TINFL_FLAG_HAS_MORE_INPUT`][core::inflate_flags::TINFL_FLAG_HAS_MORE_INPUT] flag to the - /// decompression. By setting that flag, you indicate more input exists but is not provided, - /// and so reaching the end of the input data without finding the end of the compressed stream - /// would instead return a [`NeedsMoreInput`][Self::NeedsMoreInput] status. - FailedCannotMakeProgress = TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS as i8, - - /// The output buffer is an invalid size; consider the `flags` parameter. - BadParam = TINFL_STATUS_BAD_PARAM as i8, - - /// The decompression went fine, but the adler32 checksum did not match the one - /// provided in the header. - Adler32Mismatch = TINFL_STATUS_ADLER32_MISMATCH as i8, - - /// Failed to decompress due to invalid data. - Failed = TINFL_STATUS_FAILED as i8, - - /// Finished decompression without issues. - /// - /// This indicates the end of the compressed stream has been reached. - Done = TINFL_STATUS_DONE as i8, - - /// The decompressor needs more input data to continue decompressing. - /// - /// This occurs when there's no more consumable input, but the end of the stream hasn't been - /// reached, and you have supplied the - /// [`TINFL_FLAG_HAS_MORE_INPUT`][core::inflate_flags::TINFL_FLAG_HAS_MORE_INPUT] flag to the - /// decompressor. Had you not supplied that flag (which would mean you were asserting that you - /// believed all the data was available) you would have gotten a - /// [`FailedCannotMakeProcess`][Self::FailedCannotMakeProgress] instead. - NeedsMoreInput = TINFL_STATUS_NEEDS_MORE_INPUT as i8, - - /// There is still pending data that didn't fit in the output buffer. - HasMoreOutput = TINFL_STATUS_HAS_MORE_OUTPUT as i8, -} - -impl TINFLStatus { - pub fn from_i32(value: i32) -> Option { - use self::TINFLStatus::*; - match value { - TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS => Some(FailedCannotMakeProgress), - TINFL_STATUS_BAD_PARAM => Some(BadParam), - TINFL_STATUS_ADLER32_MISMATCH => Some(Adler32Mismatch), - TINFL_STATUS_FAILED => Some(Failed), - TINFL_STATUS_DONE => Some(Done), - TINFL_STATUS_NEEDS_MORE_INPUT => Some(NeedsMoreInput), - TINFL_STATUS_HAS_MORE_OUTPUT => Some(HasMoreOutput), - _ => None, - } - } -} - -/// Struct return when decompress_to_vec functions fail. -#[cfg(feature = "with-alloc")] -#[derive(Debug)] -pub struct DecompressError { - /// Decompressor status on failure. See [TINFLStatus] for details. - pub status: TINFLStatus, - /// The currently decompressed data if any. - pub output: Vec, -} - -#[cfg(feature = "with-alloc")] -impl alloc::fmt::Display for DecompressError { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { - f.write_str(match self.status { - TINFLStatus::FailedCannotMakeProgress => "Truncated input stream", - TINFLStatus::BadParam => "Invalid output buffer size", - TINFLStatus::Adler32Mismatch => "Adler32 checksum mismatch", - TINFLStatus::Failed => "Invalid input data", - TINFLStatus::Done => unreachable!(), - TINFLStatus::NeedsMoreInput => "Truncated input stream", - TINFLStatus::HasMoreOutput => "Output size exceeded the specified limit", - }) - } -} - -/// Implement Error trait only if std feature is requested as it requires std. -#[cfg(all(feature = "std", feature = "with-alloc"))] -impl Error for DecompressError {} - -#[cfg(feature = "with-alloc")] -fn decompress_error(status: TINFLStatus, output: Vec) -> Result, DecompressError> { - Err(DecompressError { status, output }) -} - -/// Decompress the deflate-encoded data in `input` to a vector. -/// -/// NOTE: This function will not bound the output, so if the output is large enough it can result in an out of memory error. -/// It is therefore suggested to not use this for anything other than test programs, use the functions with a specified limit, or -/// ideally streaming decompression via the [flate2](https://github.com/alexcrichton/flate2-rs) library instead. -/// -/// Returns a [`Result`] containing the [`Vec`] of decompressed data on success, and a [struct][DecompressError] containing the status and so far decompressed data if any on failure. -#[inline] -#[cfg(feature = "with-alloc")] -pub fn decompress_to_vec(input: &[u8]) -> Result, DecompressError> { - decompress_to_vec_inner(input, 0, usize::max_value()) -} - -/// Decompress the deflate-encoded data (with a zlib wrapper) in `input` to a vector. -/// -/// NOTE: This function will not bound the output, so if the output is large enough it can result in an out of memory error. -/// It is therefore suggested to not use this for anything other than test programs, use the functions with a specified limit, or -/// ideally streaming decompression via the [flate2](https://github.com/alexcrichton/flate2-rs) library instead. -/// -/// Returns a [`Result`] containing the [`Vec`] of decompressed data on success, and a [struct][DecompressError] containing the status and so far decompressed data if any on failure. -#[inline] -#[cfg(feature = "with-alloc")] -pub fn decompress_to_vec_zlib(input: &[u8]) -> Result, DecompressError> { - decompress_to_vec_inner( - input, - inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER, - usize::max_value(), - ) -} - -/// Decompress the deflate-encoded data in `input` to a vector. -/// -/// The vector is grown to at most `max_size` bytes; if the data does not fit in that size, -/// the error [struct][DecompressError] will contain the status [`TINFLStatus::HasMoreOutput`] and the data that was decompressed on failure. -/// -/// As this function tries to decompress everything in one go, it's not ideal for general use outside of tests or where the output size is expected to be small. -/// It is suggested to use streaming decompression via the [flate2](https://github.com/alexcrichton/flate2-rs) library instead. -/// -/// Returns a [`Result`] containing the [`Vec`] of decompressed data on success, and a [struct][DecompressError] on failure. -#[inline] -#[cfg(feature = "with-alloc")] -pub fn decompress_to_vec_with_limit( - input: &[u8], - max_size: usize, -) -> Result, DecompressError> { - decompress_to_vec_inner(input, 0, max_size) -} - -/// Decompress the deflate-encoded data (with a zlib wrapper) in `input` to a vector. -/// The vector is grown to at most `max_size` bytes; if the data does not fit in that size, -/// the error [struct][DecompressError] will contain the status [`TINFLStatus::HasMoreOutput`] and the data that was decompressed on failure. -/// -/// As this function tries to decompress everything in one go, it's not ideal for general use outside of tests or where the output size is expected to be small. -/// It is suggested to use streaming decompression via the [flate2](https://github.com/alexcrichton/flate2-rs) library instead. -/// -/// Returns a [`Result`] containing the [`Vec`] of decompressed data on success, and a [struct][DecompressError] on failure. -#[inline] -#[cfg(feature = "with-alloc")] -pub fn decompress_to_vec_zlib_with_limit( - input: &[u8], - max_size: usize, -) -> Result, DecompressError> { - decompress_to_vec_inner(input, inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER, max_size) -} - -/// Backend of various to-[`Vec`] decompressions. -/// -/// Returns [`Vec`] of decompressed data on success and the [error struct][DecompressError] with details on failure. -#[cfg(feature = "with-alloc")] -fn decompress_to_vec_inner( - input: &[u8], - flags: u32, - max_output_size: usize, -) -> Result, DecompressError> { - let flags = flags | inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; - let mut ret: Vec = vec![0; input.len().saturating_mul(2).min(max_output_size)]; - - let mut decomp = Box::::default(); - - let mut in_pos = 0; - let mut out_pos = 0; - loop { - // Wrap the whole output slice so we know we have enough of the - // decompressed data for matches. - let (status, in_consumed, out_consumed) = - decompress(&mut decomp, &input[in_pos..], &mut ret, out_pos, flags); - in_pos += in_consumed; - out_pos += out_consumed; - - match status { - TINFLStatus::Done => { - ret.truncate(out_pos); - return Ok(ret); - } - - TINFLStatus::HasMoreOutput => { - // if the buffer has already reached the size limit, return an error - if ret.len() >= max_output_size { - return decompress_error(TINFLStatus::HasMoreOutput, ret); - } - // calculate the new length, capped at `max_output_size` - let new_len = ret.len().saturating_mul(2).min(max_output_size); - ret.resize(new_len, 0); - } - - _ => return decompress_error(status, ret), - } - } -} - -/// Decompress one or more source slices from an iterator into the output slice. -/// -/// * On success, returns the number of bytes that were written. -/// * On failure, returns the failure status code. -/// -/// This will fail if the output buffer is not large enough, but in that case -/// the output buffer will still contain the partial decompression. -/// -/// * `out` the output buffer. -/// * `it` the iterator of input slices. -/// * `zlib_header` if the first slice out of the iterator is expected to have a -/// Zlib header. Otherwise the slices are assumed to be the deflate data only. -/// * `ignore_adler32` if the adler32 checksum should be calculated or not. -pub fn decompress_slice_iter_to_slice<'out, 'inp>( - out: &'out mut [u8], - it: impl Iterator, - zlib_header: bool, - ignore_adler32: bool, -) -> Result { - use self::core::inflate_flags::*; - - let mut it = it.peekable(); - let r = &mut DecompressorOxide::new(); - let mut out_pos = 0; - while let Some(in_buf) = it.next() { - let has_more = it.peek().is_some(); - let flags = { - let mut f = TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; - if zlib_header { - f |= TINFL_FLAG_PARSE_ZLIB_HEADER; - } - if ignore_adler32 { - f |= TINFL_FLAG_IGNORE_ADLER32; - } - if has_more { - f |= TINFL_FLAG_HAS_MORE_INPUT; - } - f - }; - let (status, _input_read, bytes_written) = decompress(r, in_buf, out, out_pos, flags); - out_pos += bytes_written; - match status { - TINFLStatus::NeedsMoreInput => continue, - TINFLStatus::Done => return Ok(out_pos), - e => return Err(e), - } - } - // If we ran out of source slices without getting a `Done` from the - // decompression we can call it a failure. - Err(TINFLStatus::FailedCannotMakeProgress) -} - -#[cfg(test)] -mod test { - use super::{ - decompress_slice_iter_to_slice, decompress_to_vec_zlib, decompress_to_vec_zlib_with_limit, - DecompressError, TINFLStatus, - }; - const ENCODED: [u8; 20] = [ - 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19, - ]; - - #[test] - fn decompress_vec() { - let res = decompress_to_vec_zlib(&ENCODED[..]).unwrap(); - assert_eq!(res.as_slice(), &b"Hello, zlib!"[..]); - } - - #[test] - fn decompress_vec_with_high_limit() { - let res = decompress_to_vec_zlib_with_limit(&ENCODED[..], 100_000).unwrap(); - assert_eq!(res.as_slice(), &b"Hello, zlib!"[..]); - } - - #[test] - fn fail_to_decompress_with_limit() { - let res = decompress_to_vec_zlib_with_limit(&ENCODED[..], 8); - match res { - Err(DecompressError { - status: TINFLStatus::HasMoreOutput, - .. - }) => (), // expected result - _ => panic!("Decompression output size limit was not enforced"), - } - } - - #[test] - fn test_decompress_slice_iter_to_slice() { - // one slice - let mut out = [0_u8; 12_usize]; - let r = - decompress_slice_iter_to_slice(&mut out, Some(&ENCODED[..]).into_iter(), true, false); - assert_eq!(r, Ok(12)); - assert_eq!(&out[..12], &b"Hello, zlib!"[..]); - - // some chunks at a time - for chunk_size in 1..13 { - // Note: because of https://github.com/Frommi/miniz_oxide/issues/110 our - // out buffer needs to have +1 byte available when the chunk size cuts - // the adler32 data off from the last actual data. - let mut out = [0_u8; 12_usize + 1]; - let r = - decompress_slice_iter_to_slice(&mut out, ENCODED.chunks(chunk_size), true, false); - assert_eq!(r, Ok(12)); - assert_eq!(&out[..12], &b"Hello, zlib!"[..]); - } - - // output buffer too small - let mut out = [0_u8; 3_usize]; - let r = decompress_slice_iter_to_slice(&mut out, ENCODED.chunks(7), true, false); - assert!(r.is_err()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/src/inflate/output_buffer.rs s390-tools-2.33.1/rust-vendor/miniz_oxide/src/inflate/output_buffer.rs --- s390-tools-2.31.0/rust-vendor/miniz_oxide/src/inflate/output_buffer.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/src/inflate/output_buffer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -/// A wrapper for the output slice used when decompressing. -/// -/// Using this rather than `Cursor` lets us implement the writing methods directly on -/// the buffer and lets us use a usize rather than u64 for the position which helps with -/// performance on 32-bit systems. -pub struct OutputBuffer<'a> { - slice: &'a mut [u8], - position: usize, -} - -impl<'a> OutputBuffer<'a> { - #[inline] - pub fn from_slice_and_pos(slice: &'a mut [u8], position: usize) -> OutputBuffer<'a> { - OutputBuffer { slice, position } - } - - #[inline] - pub const fn position(&self) -> usize { - self.position - } - - #[inline] - pub fn set_position(&mut self, position: usize) { - self.position = position; - } - - /// Write a byte to the current position and increment - /// - /// Assumes that there is space. - #[inline] - pub fn write_byte(&mut self, byte: u8) { - self.slice[self.position] = byte; - self.position += 1; - } - - /// Write a slice to the current position and increment - /// - /// Assumes that there is space. - #[inline] - pub fn write_slice(&mut self, data: &[u8]) { - let len = data.len(); - self.slice[self.position..self.position + len].copy_from_slice(data); - self.position += data.len(); - } - - #[inline] - pub const fn bytes_left(&self) -> usize { - self.slice.len() - self.position - } - - #[inline] - pub const fn get_ref(&self) -> &[u8] { - self.slice - } - - #[inline] - pub fn get_mut(&mut self) -> &mut [u8] { - self.slice - } -} diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/src/inflate/stream.rs s390-tools-2.33.1/rust-vendor/miniz_oxide/src/inflate/stream.rs --- s390-tools-2.31.0/rust-vendor/miniz_oxide/src/inflate/stream.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/src/inflate/stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,418 +0,0 @@ -//! Extra streaming decompression functionality. -//! -//! As of now this is mainly intended for use to build a higher-level wrapper. -#[cfg(feature = "with-alloc")] -use crate::alloc::boxed::Box; -use core::{cmp, mem}; - -use crate::inflate::core::{decompress, inflate_flags, DecompressorOxide, TINFL_LZ_DICT_SIZE}; -use crate::inflate::TINFLStatus; -use crate::{DataFormat, MZError, MZFlush, MZResult, MZStatus, StreamResult}; - -/// Tag that determines reset policy of [InflateState](struct.InflateState.html) -pub trait ResetPolicy { - /// Performs reset - fn reset(&self, state: &mut InflateState); -} - -/// Resets state, without performing expensive ops (e.g. zeroing buffer) -/// -/// Note that not zeroing buffer can lead to security issues when dealing with untrusted input. -pub struct MinReset; - -impl ResetPolicy for MinReset { - fn reset(&self, state: &mut InflateState) { - state.decompressor().init(); - state.dict_ofs = 0; - state.dict_avail = 0; - state.first_call = true; - state.has_flushed = false; - state.last_status = TINFLStatus::NeedsMoreInput; - } -} - -/// Resets state and zero memory, continuing to use the same data format. -pub struct ZeroReset; - -impl ResetPolicy for ZeroReset { - #[inline] - fn reset(&self, state: &mut InflateState) { - MinReset.reset(state); - state.dict = [0; TINFL_LZ_DICT_SIZE]; - } -} - -/// Full reset of the state, including zeroing memory. -/// -/// Requires to provide new data format. -pub struct FullReset(pub DataFormat); - -impl ResetPolicy for FullReset { - #[inline] - fn reset(&self, state: &mut InflateState) { - ZeroReset.reset(state); - state.data_format = self.0; - } -} - -/// A struct that compbines a decompressor with extra data for streaming decompression. -/// -pub struct InflateState { - /// Inner decompressor struct - decomp: DecompressorOxide, - - /// Buffer of input bytes for matches. - /// TODO: Could probably do this a bit cleaner with some - /// Cursor-like class. - /// We may also look into whether we need to keep a buffer here, or just one in the - /// decompressor struct. - dict: [u8; TINFL_LZ_DICT_SIZE], - /// Where in the buffer are we currently at? - dict_ofs: usize, - /// How many bytes of data to be flushed is there currently in the buffer? - dict_avail: usize, - - first_call: bool, - has_flushed: bool, - - /// Whether the input data is wrapped in a zlib header and checksum. - /// TODO: This should be stored in the decompressor. - data_format: DataFormat, - last_status: TINFLStatus, -} - -impl Default for InflateState { - fn default() -> Self { - InflateState { - decomp: DecompressorOxide::default(), - dict: [0; TINFL_LZ_DICT_SIZE], - dict_ofs: 0, - dict_avail: 0, - first_call: true, - has_flushed: false, - data_format: DataFormat::Raw, - last_status: TINFLStatus::NeedsMoreInput, - } - } -} -impl InflateState { - /// Create a new state. - /// - /// Note that this struct is quite large due to internal buffers, and as such storing it on - /// the stack is not recommended. - /// - /// # Parameters - /// `data_format`: Determines whether the compressed data is assumed to wrapped with zlib - /// metadata. - pub fn new(data_format: DataFormat) -> InflateState { - InflateState { - data_format, - ..Default::default() - } - } - - /// Create a new state on the heap. - /// - /// # Parameters - /// `data_format`: Determines whether the compressed data is assumed to wrapped with zlib - /// metadata. - #[cfg(feature = "with-alloc")] - pub fn new_boxed(data_format: DataFormat) -> Box { - let mut b: Box = Box::default(); - b.data_format = data_format; - b - } - - /// Access the innner decompressor. - pub fn decompressor(&mut self) -> &mut DecompressorOxide { - &mut self.decomp - } - - /// Return the status of the last call to `inflate` with this `InflateState`. - pub const fn last_status(&self) -> TINFLStatus { - self.last_status - } - - /// Create a new state using miniz/zlib style window bits parameter. - /// - /// The decompressor does not support different window sizes. As such, - /// any positive (>0) value will set the zlib header flag, while a negative one - /// will not. - #[cfg(feature = "with-alloc")] - pub fn new_boxed_with_window_bits(window_bits: i32) -> Box { - let mut b: Box = Box::default(); - b.data_format = DataFormat::from_window_bits(window_bits); - b - } - - #[inline] - /// Reset the decompressor without re-allocating memory, using the given - /// data format. - pub fn reset(&mut self, data_format: DataFormat) { - self.reset_as(FullReset(data_format)); - } - - #[inline] - /// Resets the state according to specified policy. - pub fn reset_as(&mut self, policy: T) { - policy.reset(self) - } -} - -/// Try to decompress from `input` to `output` with the given [`InflateState`] -/// -/// # `flush` -/// -/// Generally, the various [`MZFlush`] flags have meaning only on the compression side. They can be -/// supplied here, but the only one that has any semantic meaning is [`MZFlush::Finish`], which is a -/// signal that the stream is expected to finish, and failing to do so is an error. It isn't -/// necessary to specify it when the stream ends; you'll still get returned a -/// [`MZStatus::StreamEnd`] anyway. Other values either have no effect or cause errors. It's -/// likely that you'll almost always just want to use [`MZFlush::None`]. -/// -/// # Errors -/// -/// Returns [`MZError::Buf`] if the size of the `output` slice is empty or no progress was made due -/// to lack of expected input data, or if called with [`MZFlush::Finish`] and input wasn't all -/// consumed. -/// -/// Returns [`MZError::Data`] if this or a a previous call failed with an error return from -/// [`TINFLStatus`]; probably indicates corrupted data. -/// -/// Returns [`MZError::Stream`] when called with [`MZFlush::Full`] (meaningless on -/// decompression), or when called without [`MZFlush::Finish`] after an earlier call with -/// [`MZFlush::Finish`] has been made. -pub fn inflate( - state: &mut InflateState, - input: &[u8], - output: &mut [u8], - flush: MZFlush, -) -> StreamResult { - let mut bytes_consumed = 0; - let mut bytes_written = 0; - let mut next_in = input; - let mut next_out = output; - - if flush == MZFlush::Full { - return StreamResult::error(MZError::Stream); - } - - let mut decomp_flags = if state.data_format == DataFormat::Zlib { - inflate_flags::TINFL_FLAG_COMPUTE_ADLER32 - } else { - inflate_flags::TINFL_FLAG_IGNORE_ADLER32 - }; - - if (state.data_format == DataFormat::Zlib) - | (state.data_format == DataFormat::ZLibIgnoreChecksum) - { - decomp_flags |= inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER; - } - - let first_call = state.first_call; - state.first_call = false; - if (state.last_status as i32) < 0 { - return StreamResult::error(MZError::Data); - } - - if state.has_flushed && (flush != MZFlush::Finish) { - return StreamResult::error(MZError::Stream); - } - state.has_flushed |= flush == MZFlush::Finish; - - if (flush == MZFlush::Finish) && first_call { - decomp_flags |= inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; - - let status = decompress(&mut state.decomp, next_in, next_out, 0, decomp_flags); - let in_bytes = status.1; - let out_bytes = status.2; - let status = status.0; - - state.last_status = status; - - bytes_consumed += in_bytes; - bytes_written += out_bytes; - - let ret_status = { - if (status as i32) < 0 { - Err(MZError::Data) - } else if status != TINFLStatus::Done { - state.last_status = TINFLStatus::Failed; - Err(MZError::Buf) - } else { - Ok(MZStatus::StreamEnd) - } - }; - return StreamResult { - bytes_consumed, - bytes_written, - status: ret_status, - }; - } - - if flush != MZFlush::Finish { - decomp_flags |= inflate_flags::TINFL_FLAG_HAS_MORE_INPUT; - } - - if state.dict_avail != 0 { - bytes_written += push_dict_out(state, &mut next_out); - return StreamResult { - bytes_consumed, - bytes_written, - status: Ok( - if (state.last_status == TINFLStatus::Done) && (state.dict_avail == 0) { - MZStatus::StreamEnd - } else { - MZStatus::Ok - }, - ), - }; - } - - let status = inflate_loop( - state, - &mut next_in, - &mut next_out, - &mut bytes_consumed, - &mut bytes_written, - decomp_flags, - flush, - ); - StreamResult { - bytes_consumed, - bytes_written, - status, - } -} - -fn inflate_loop( - state: &mut InflateState, - next_in: &mut &[u8], - next_out: &mut &mut [u8], - total_in: &mut usize, - total_out: &mut usize, - decomp_flags: u32, - flush: MZFlush, -) -> MZResult { - let orig_in_len = next_in.len(); - loop { - let status = decompress( - &mut state.decomp, - *next_in, - &mut state.dict, - state.dict_ofs, - decomp_flags, - ); - - let in_bytes = status.1; - let out_bytes = status.2; - let status = status.0; - - state.last_status = status; - - *next_in = &next_in[in_bytes..]; - *total_in += in_bytes; - - state.dict_avail = out_bytes; - *total_out += push_dict_out(state, next_out); - - // The stream was corrupted, and decompression failed. - if (status as i32) < 0 { - return Err(MZError::Data); - } - - // The decompressor has flushed all it's data and is waiting for more input, but - // there was no more input provided. - if (status == TINFLStatus::NeedsMoreInput) && orig_in_len == 0 { - return Err(MZError::Buf); - } - - if flush == MZFlush::Finish { - if status == TINFLStatus::Done { - // There is not enough space in the output buffer to flush the remaining - // decompressed data in the internal buffer. - return if state.dict_avail != 0 { - Err(MZError::Buf) - } else { - Ok(MZStatus::StreamEnd) - }; - // No more space in the output buffer, but we're not done. - } else if next_out.is_empty() { - return Err(MZError::Buf); - } - } else { - // We're not expected to finish, so it's fine if we can't flush everything yet. - let empty_buf = next_in.is_empty() || next_out.is_empty(); - if (status == TINFLStatus::Done) || empty_buf || (state.dict_avail != 0) { - return if (status == TINFLStatus::Done) && (state.dict_avail == 0) { - // No more data left, we're done. - Ok(MZStatus::StreamEnd) - } else { - // Ok for now, still waiting for more input data or output space. - Ok(MZStatus::Ok) - }; - } - } - } -} - -fn push_dict_out(state: &mut InflateState, next_out: &mut &mut [u8]) -> usize { - let n = cmp::min(state.dict_avail as usize, next_out.len()); - (next_out[..n]).copy_from_slice(&state.dict[state.dict_ofs..state.dict_ofs + n]); - *next_out = &mut mem::take(next_out)[n..]; - state.dict_avail -= n; - state.dict_ofs = (state.dict_ofs + (n)) & (TINFL_LZ_DICT_SIZE - 1); - n -} - -#[cfg(test)] -mod test { - use super::{inflate, InflateState}; - use crate::{DataFormat, MZFlush, MZStatus}; - use alloc::vec; - - #[test] - fn test_state() { - let encoded = [ - 120u8, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, - 19, - ]; - let mut out = vec![0; 50]; - let mut state = InflateState::new_boxed(DataFormat::Zlib); - let res = inflate(&mut state, &encoded, &mut out, MZFlush::Finish); - let status = res.status.expect("Failed to decompress!"); - assert_eq!(status, MZStatus::StreamEnd); - assert_eq!(out[..res.bytes_written as usize], b"Hello, zlib!"[..]); - assert_eq!(res.bytes_consumed, encoded.len()); - - state.reset_as(super::ZeroReset); - out.iter_mut().map(|x| *x = 0).count(); - let res = inflate(&mut state, &encoded, &mut out, MZFlush::Finish); - let status = res.status.expect("Failed to decompress!"); - assert_eq!(status, MZStatus::StreamEnd); - assert_eq!(out[..res.bytes_written as usize], b"Hello, zlib!"[..]); - assert_eq!(res.bytes_consumed, encoded.len()); - - state.reset_as(super::MinReset); - out.iter_mut().map(|x| *x = 0).count(); - let res = inflate(&mut state, &encoded, &mut out, MZFlush::Finish); - let status = res.status.expect("Failed to decompress!"); - assert_eq!(status, MZStatus::StreamEnd); - assert_eq!(out[..res.bytes_written as usize], b"Hello, zlib!"[..]); - assert_eq!(res.bytes_consumed, encoded.len()); - assert_eq!(state.decompressor().adler32(), Some(459605011)); - - // Test state when not computing adler. - state = InflateState::new_boxed(DataFormat::ZLibIgnoreChecksum); - out.iter_mut().map(|x| *x = 0).count(); - let res = inflate(&mut state, &encoded, &mut out, MZFlush::Finish); - let status = res.status.expect("Failed to decompress!"); - assert_eq!(status, MZStatus::StreamEnd); - assert_eq!(out[..res.bytes_written as usize], b"Hello, zlib!"[..]); - assert_eq!(res.bytes_consumed, encoded.len()); - // Not computed, so should be Some(1) - assert_eq!(state.decompressor().adler32(), Some(1)); - // Should still have the checksum read from the header file. - assert_eq!(state.decompressor().adler32_header(), Some(459605011)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/src/lib.rs s390-tools-2.33.1/rust-vendor/miniz_oxide/src/lib.rs --- s390-tools-2.31.0/rust-vendor/miniz_oxide/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,209 +0,0 @@ -//! A pure rust replacement for the [miniz](https://github.com/richgel999/miniz) -//! DEFLATE/zlib encoder/decoder. -//! Used a rust back-end for the -//! [flate2](https://github.com/alexcrichton/flate2-rs) crate. -//! -//! # Usage -//! ## Simple compression/decompression: -//! ``` rust -//! -//! use miniz_oxide::inflate::decompress_to_vec; -//! use miniz_oxide::deflate::compress_to_vec; -//! -//! fn roundtrip(data: &[u8]) { -//! let compressed = compress_to_vec(data, 6); -//! let decompressed = decompress_to_vec(compressed.as_slice()).expect("Failed to decompress!"); -//! # let _ = decompressed; -//! } -//! -//! # roundtrip(b"Test_data test data lalalal blabla"); -//! -//! ``` - -#![forbid(unsafe_code)] -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(feature = "with-alloc")] -extern crate alloc; - -#[cfg(feature = "with-alloc")] -pub mod deflate; -pub mod inflate; -mod shared; - -pub use crate::shared::update_adler32 as mz_adler32_oxide; -pub use crate::shared::{MZ_ADLER32_INIT, MZ_DEFAULT_WINDOW_BITS}; - -/// A list of flush types. -/// -/// See for more in-depth info. -#[repr(i32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum MZFlush { - /// Don't force any flushing. - /// Used when more input data is expected. - None = 0, - /// Zlib partial flush. - /// Currently treated as [`Sync`]. - Partial = 1, - /// Finish compressing the currently buffered data, and output an empty raw block. - /// Has no use in decompression. - Sync = 2, - /// Same as [`Sync`], but resets the compression dictionary so that further compressed - /// data does not depend on data compressed before the flush. - /// - /// Has no use in decompression, and is an error to supply in that case. - Full = 3, - /// Attempt to flush the remaining data and end the stream. - Finish = 4, - /// Not implemented. - Block = 5, -} - -impl MZFlush { - /// Create an MZFlush value from an integer value. - /// - /// Returns `MZError::Param` on invalid values. - pub fn new(flush: i32) -> Result { - match flush { - 0 => Ok(MZFlush::None), - 1 | 2 => Ok(MZFlush::Sync), - 3 => Ok(MZFlush::Full), - 4 => Ok(MZFlush::Finish), - _ => Err(MZError::Param), - } - } -} - -/// A list of miniz successful status codes. -/// -/// These are emitted as the [`Ok`] side of a [`MZResult`] in the [`StreamResult`] returned from -/// [`deflate::stream::deflate()`] or [`inflate::stream::inflate()`]. -#[repr(i32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum MZStatus { - /// Operation succeeded. - /// - /// Some data was decompressed or compressed; see the byte counters in the [`StreamResult`] for - /// details. - Ok = 0, - - /// Operation succeeded and end of deflate stream was found. - /// - /// X-ref [`TINFLStatus::Done`][inflate::TINFLStatus::Done] or - /// [`TDEFLStatus::Done`][deflate::core::TDEFLStatus::Done] for `inflate` or `deflate` - /// respectively. - StreamEnd = 1, - - /// Unused - NeedDict = 2, -} - -/// A list of miniz failed status codes. -/// -/// These are emitted as the [`Err`] side of a [`MZResult`] in the [`StreamResult`] returned from -/// [`deflate::stream::deflate()`] or [`inflate::stream::inflate()`]. -#[repr(i32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum MZError { - /// Unused - ErrNo = -1, - - /// General stream error. - /// - /// See [`inflate::stream::inflate()`] docs for details of how it can occur there. - /// - /// See [`deflate::stream::deflate()`] docs for how it can in principle occur there, though it's - /// believed impossible in practice. - Stream = -2, - - /// Error in inflation; see [`inflate::stream::inflate()`] for details. - /// - /// Not returned from [`deflate::stream::deflate()`]. - Data = -3, - - /// Unused - Mem = -4, - - /// Buffer-related error. - /// - /// See the docs of [`deflate::stream::deflate()`] or [`inflate::stream::inflate()`] for details - /// of when it would trigger in the one you're using. - Buf = -5, - - /// Unused - Version = -6, - - /// Bad parameters. - /// - /// This can be returned from [`deflate::stream::deflate()`] in the case of bad parameters. See - /// [`TDEFLStatus::BadParam`][deflate::core::TDEFLStatus::BadParam]. - Param = -10_000, -} - -/// How compressed data is wrapped. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum DataFormat { - /// Wrapped using the [zlib](http://www.zlib.org/rfc-zlib.html) format. - Zlib, - /// Zlib wrapped but ignore and don't compute the adler32 checksum. - /// Currently only used for inflate, behaves the same as Zlib for compression. - ZLibIgnoreChecksum, - /// Raw DEFLATE. - Raw, -} - -impl DataFormat { - pub fn from_window_bits(window_bits: i32) -> DataFormat { - if window_bits > 0 { - DataFormat::Zlib - } else { - DataFormat::Raw - } - } - - pub fn to_window_bits(self) -> i32 { - match self { - DataFormat::Zlib | DataFormat::ZLibIgnoreChecksum => shared::MZ_DEFAULT_WINDOW_BITS, - DataFormat::Raw => -shared::MZ_DEFAULT_WINDOW_BITS, - } - } -} - -/// `Result` alias for all miniz status codes both successful and failed. -pub type MZResult = Result; - -/// A structure containing the result of a call to the inflate or deflate streaming functions. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct StreamResult { - /// The number of bytes consumed from the input slice. - pub bytes_consumed: usize, - /// The number of bytes written to the output slice. - pub bytes_written: usize, - /// The return status of the call. - pub status: MZResult, -} - -impl StreamResult { - #[inline] - pub const fn error(error: MZError) -> StreamResult { - StreamResult { - bytes_consumed: 0, - bytes_written: 0, - status: Err(error), - } - } -} - -impl core::convert::From for MZResult { - fn from(res: StreamResult) -> Self { - res.status - } -} - -impl core::convert::From<&StreamResult> for MZResult { - fn from(res: &StreamResult) -> Self { - res.status - } -} diff -Nru s390-tools-2.31.0/rust-vendor/miniz_oxide/src/shared.rs s390-tools-2.33.1/rust-vendor/miniz_oxide/src/shared.rs --- s390-tools-2.31.0/rust-vendor/miniz_oxide/src/shared.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/miniz_oxide/src/shared.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -#[doc(hidden)] -pub const MZ_ADLER32_INIT: u32 = 1; - -#[doc(hidden)] -pub const MZ_DEFAULT_WINDOW_BITS: i32 = 15; - -pub const HUFFMAN_LENGTH_ORDER: [u8; 19] = [ - 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, -]; - -#[doc(hidden)] -#[cfg(not(feature = "simd"))] -pub fn update_adler32(adler: u32, data: &[u8]) -> u32 { - let mut hash = adler::Adler32::from_checksum(adler); - hash.write_slice(data); - hash.checksum() -} - -#[doc(hidden)] -#[cfg(feature = "simd")] -pub fn update_adler32(adler: u32, data: &[u8]) -> u32 { - let mut hash = simd_adler32::Adler32::from_checksum(adler); - hash.write(data); - hash.finish() -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/mio/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/mio/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/mio/Cargo.lock s390-tools-2.33.1/rust-vendor/mio/Cargo.lock --- s390-tools-2.31.0/rust-vendor/mio/Cargo.lock 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/Cargo.lock 1970-01-01 01:00:00.000000000 +0100 @@ -1,162 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "log", -] - -[[package]] -name = "getrandom" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "libc" -version = "0.2.139" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" - -[[package]] -name = "log" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "mio" -version = "0.8.8" -dependencies = [ - "env_logger", - "libc", - "log", - "rand", - "wasi", - "windows-sys", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" diff -Nru s390-tools-2.31.0/rust-vendor/mio/Cargo.toml s390-tools-2.33.1/rust-vendor/mio/Cargo.toml --- s390-tools-2.31.0/rust-vendor/mio/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,127 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "mio" -version = "0.8.8" -authors = [ - "Carl Lerche ", - "Thomas de Zeeuw ", - "Tokio Contributors ", -] -include = [ - "Cargo.toml", - "LICENSE", - "README.md", - "CHANGELOG.md", - "src/**/*.rs", - "examples/**/*.rs", -] -description = "Lightweight non-blocking I/O." -homepage = "https://github.com/tokio-rs/mio" -readme = "README.md" -keywords = [ - "io", - "async", - "non-blocking", -] -categories = ["asynchronous"] -license = "MIT" -repository = "https://github.com/tokio-rs/mio" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs", -] -targets = [ - "aarch64-apple-ios", - "aarch64-linux-android", - "wasm32-wasi", - "x86_64-apple-darwin", - "x86_64-pc-windows-msvc", - "x86_64-unknown-dragonfly", - "x86_64-unknown-freebsd", - "x86_64-unknown-illumos", - "x86_64-unknown-linux-gnu", - "x86_64-unknown-netbsd", - "x86_64-unknown-openbsd", -] - -[package.metadata.playground] -features = [ - "os-poll", - "os-ext", - "net", -] - -[[example]] -name = "tcp_server" -required-features = [ - "os-poll", - "net", -] - -[[example]] -name = "tcp_listenfd_server" -required-features = [ - "os-poll", - "net", -] - -[[example]] -name = "udp_server" -required-features = [ - "os-poll", - "net", -] - -[dependencies.log] -version = "0.4.8" -optional = true - -[dev-dependencies.env_logger] -version = "0.9.3" -default-features = false - -[dev-dependencies.rand] -version = "0.8" - -[features] -default = ["log"] -net = [] -os-ext = [ - "os-poll", - "windows-sys/Win32_System_Pipes", - "windows-sys/Win32_Security", -] -os-poll = [] - -[target."cfg(target_os = \"wasi\")".dependencies.libc] -version = "0.2.121" - -[target."cfg(target_os = \"wasi\")".dependencies.wasi] -version = "0.11.0" - -[target."cfg(unix)".dependencies.libc] -version = "0.2.121" - -[target."cfg(windows)".dependencies.windows-sys] -version = "0.48" -features = [ - "Win32_Foundation", - "Win32_Networking_WinSock", - "Win32_Storage_FileSystem", - "Win32_System_IO", - "Win32_System_WindowsProgramming", -] diff -Nru s390-tools-2.31.0/rust-vendor/mio/CHANGELOG.md s390-tools-2.33.1/rust-vendor/mio/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/mio/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,650 +0,0 @@ -# 0.8.8 - -## Fixed - -* Fix compilation on WASI (https://github.com/tokio-rs/mio/pull/1676). - -# 0.8.7 - -## Added - -* Add/fix support for tvOS and watchOS, Mio should now build for tvOS and - watchOS, but we don't have a CI setup yet - (https://github.com/tokio-rs/mio/pull/1658). - -## Changed - -* Made the `log` dependency optional behind the `log` feature flag (enabled by - default). Users that disabled Mio's default features will now not see any - logging from Mio, enabling the `log` feature will fix that. This was done in - response to the `log` crate increasing it's MSRV to v1.60, see - https://github.com/rust-lang/log/pull/552 - (https://github.com/tokio-rs/mio/pull/1673). -* Update windows-sys dependency to v0.48 - (https://github.com/tokio-rs/mio/pull/1663). - -## Fixed - -* Fix overflow in `Poll::poll` when using `Duration::MAX` as timeout - (https://github.com/tokio-rs/mio/pull/1657). - -# 0.8.6 - -## Added - -* `Interest::PRIORITY` on Linux and Android, to trigger `Event::is_priority` - (https://github.com/tokio-rs/mio/pull/1647). - -## Changed - -* Updated windows-sys to 0.45 - (https://github.com/tokio-rs/mio/pull/1644). -* We started testing with sanitizers on the CI - (https://github.com/tokio-rs/mio/pull/1648). - -## Fixed - -* A number of potential fd leaks when setup resulted in an error right after - creation (https://github.com/tokio-rs/mio/pull/1636). -* Less truncating for timeout values in `Poll::poll` - (https://github.com/tokio-rs/mio/pull/1642). - -# 0.8.5 - -## Changed - -* Updated `windows-sys` to 0.42.0 - (https://github.com/tokio-rs/mio/pull/1624). -* Officially document Wine as not supported, some people claimed it worked, - other claims it doesn't, but nobody stepped up to fix the problem - (https://github.com/tokio-rs/mio/pull/1596). -* Switch to GitHub Actions - (https://github.com/tokio-rs/mio/pull/1598, https://github.com/tokio-rs/mio/pull/1601). -* Documented the current Poll::poll time behaviour - (https://github.com/tokio-rs/mio/pull/1603). - -## Fixed - -* Timeout less than one millisecond becoming zero millsiconds - (https://github.com/tokio-rs/mio/pull/1615, https://github.com/tokio-rs/mio/pull/1616) -* Undefined reference to `epoll\_create1` on Android API level < 21. - (https://github.com/tokio-rs/mio/pull/1590). - -# 0.8.4 - -## Added - -* Support `Registery::try_clone` on `wasm32-wasi` - (https://github.com/tokio-rs/mio/pull/1576). -* Add docs about polling without registering event sources - (https://github.com/tokio-rs/mio/pull/1585). - -# 0.8.3 - -## Changed - -* Replace `winapi` dependency with `windows-sys`. - (https://github.com/tokio-rs/mio/pull/1556). -* Future proofed the kevent ABI for FreeBSD - (https://github.com/tokio-rs/mio/pull/1572). - -## Fixed - -* Improved support for Redox, making it possible to run on stable Rust - (https://github.com/tokio-rs/mio/pull/1555). -* Don't ignore EAGAIN in UDS connect call - (https://github.com/tokio-rs/mio/pull/1564). -* Documentation of `TcpStream::connect` - (https://github.com/tokio-rs/mio/pull/1565). - -# 0.8.2 - -## Added - -* Experimental support for Redox. - -# 0.8.1 - -## Added - -* Add `try_io` method to all I/O types (#1551). This execute a user defined I/O - closure while updating Mio's internal state ensuring that the I/O type - receives more events if it hits a WouldBlock error. This is added to the - following types: - * `TcpStream` - * `UdpSocket` - * `UnixDatagram` - * `UnixStream` - * `unix::pipe::Sender` - * `unix::pipe::Receiver` -* Basic, experimental support for `wasm32-wasi` target (#1549). Note that a lot - of time type are still missing, e.g. the `Waker`, and may never be possible to - implement. - -# 0.8.0 - -## Removed - -* Deprecated features (https://github.com/tokio-rs/mio/commit/105f8f2afb57b01ddea716a0aa9720f226c520e3): - * extra-docs (always enabled) - * tcp (replaced with "net" feature). - * udp (replaced with "net" feature). - * uds (replaced with "net" feature). - * pipe (replaced with "os-ext" feature). - * os-util (replaced with "os-ext" feature). -* `TcpSocket` type - (https://github.com/tokio-rs/mio/commit/02e9be41f27daf822575444fdd2b3067433a5996). - The socket2 crate provides all the functionality and more. -* Support for Solaris, it never really worked anyway - (https://github.com/tokio-rs/mio/pull/1528). - -## Changes - -* Update minimum Rustc version (MSVR) to 1.46.0 - (https://github.com/tokio-rs/mio/commit/5c577efecd23750a9a3e0f6ad080ab98f14a255d). - -## Added - -* `UdpSocket::peer_addr` - (https://github.com/tokio-rs/mio/commit/5fc104d08e0e74c8a19247f7cba0f058699fc438). - -# 0.7.14 - -## Fixes - -* Remove use unsound internal macro (#1519). - -## Added - -* `sys::unix::SocketAddr::as_abstract_namespace()` (#1520). - -# 0.7.13 - -## Fixes - -* Fix `Registry::try_clone` invalid usage of `F_DUPFD_CLOEXEC` (#1497, - https://github.com/tokio-rs/mio/commit/2883f5c1f35bf1a59682c5ffc4afe6b97d7d6e68). - -# 0.7.12 (yanked) - -## Fixes - -* Set `FD_CLOEXEC` when calling `Registry::try_clone` - (https://github.com/tokio-rs/mio/commit/d1617b567ff6bc669d71e367d22e0e93ff7e2e24 for epoll and - (https://github.com/tokio-rs/mio/commit/b367a05e408ca90a26383c3aa16d8a16f019dc59 for kqueue). - -# 0.7.11 - -## Fixes - -* Fix missing feature of winapi. - (https://github.com/tokio-rs/mio/commit/a7e61db9e3c2b929ef1a33532bfcc22045d163ce). - -# 0.7.10 - -## Fixes - -* Fix an instance of not doc(cfg(.*)) - (https://github.com/tokio-rs/mio/commit/25e8f911357c740034f10a170dfa4ea1b28234ce). - -# 0.7.9 - -## Fixes - -* Fix error handling in `NamedPipe::write` - (https://github.com/tokio-rs/mio/commit/aec872be9732e5c6685100674278be27f54a271b). -* Use `accept(2)` on x86 Android instead of `accept4(2)` - (https://github.com/tokio-rs/mio/commit/6f86b925d3e48f30905d5cfa54348acf3f1fa036, - https://github.com/tokio-rs/mio/commit/8d5414880ab82178305ac1d2c16d715e58633d3e). -* Improve error message when opening AFD device - (https://github.com/tokio-rs/mio/commit/139f7c4422321eb4a17b14ae2c296fddd19a8804). - -# 0.7.8 - -## Fixes - -* Fix `TcpStream::set_linger` on macOS - (https://github.com/tokio-rs/mio/commit/175773ce02e85977db81224c782c8d140aba8543). -* Fix compilation on DragonFlyBSD - (https://github.com/tokio-rs/mio/commit/b51af46b28871f8dd3233b490ee62237ffed6a26). - -# 0.7.7 - -## Added - -* `UdpSocket::only_v6` - (https://github.com/tokio-rs/mio/commit/0101e05a800f17fb88f4315d9b9fe0f08cca6e57). -* `Clone` implementation for `Event` - (https://github.com/tokio-rs/mio/commit/26540ebbae89df6d4d08465c56f715d8f2addfc3). -* `AsRawFd` implementation for `Registry` - (https://github.com/tokio-rs/mio/commit/f70daa72da0042b1880256164774c3286d315a02). -* `Read` and `Write` implementation for `&unix::pipe::Sender` and `Receiver`, - that is on the reference to them, an implementation existed on the types - themselves already - (https://github.com/tokio-rs/mio/commit/1be481dcbbcb6906364008b5d61e7f53cddc3eb3). - -## Fixes - -* Underflow in `SocketAddr::address` - (https://github.com/tokio-rs/mio/commit/6d3fa69240cd4bb95e9d34605c660c30245a18bd). -* Android build with the net feature enabled, but with os-poll disabled - (https://github.com/tokio-rs/mio/commit/49d8fd33e026ad6e2c055d05d6667180ba2af7be). -* Solaris build with the net feature enabled, but with os-poll disabled - (https://github.com/tokio-rs/mio/commit/a6e025e9d9511639ec106ebedc0dd312bdc9be12). -* Ensure that `Waker::wake` works on illumos systems with poor `pipe(2)` and - `epoll(2)` interaction using `EPOLLET` - (https://github.com/tokio-rs/mio/commit/943d4249dcc17cd8b4d2250c4fa19116097248fa). -* Fix `unix::pipe` on illumos - (https://github.com/tokio-rs/mio/commit/0db49f6d5caf54b12176821363d154384357e70a). - -# 0.7.6 - -## Added - -* `net` feature, replaces `tcp`, `udp` and `uds` features - (https://github.com/tokio-rs/mio/commit/a301ba520a8479b459c4acdcefa4a7c5eea818c7). -* `os-ext` feature, replaces `os-util` and `pipe` features - (https://github.com/tokio-rs/mio/commit/f5017fae8a3d3bb4b4cada25b01a2d76a406badc). -* Added keepalive support to `TcpSocket` - (https://github.com/tokio-rs/mio/commit/290c43a96662d54ab7c4b8814e5a9f9a9e523fda). -* `TcpSocket::set_{send, recv}_buffer_size` - (https://github.com/tokio-rs/mio/commit/40c4af79bf5b32b8fbdbf6f2e5c16290e1d3d406). -* `TcpSocket::get_linger` - (https://github.com/tokio-rs/mio/commit/13e82ced655bbb6e2729226e485a7de9f2c2ccd9). -* Implement `IntoRawFd` for `TcpSocket` - (https://github.com/tokio-rs/mio/commit/50548ed45d0b2c98f1f2e003e210d14195284ef4). - -## Deprecated - -* The `tcp`, `udp` and `uds` features, replaced by a new `net` feature. - (https://github.com/tokio-rs/mio/commit/a301ba520a8479b459c4acdcefa4a7c5eea818c7). -* The `extra-docs` feature, now enabled by default. - (https://github.com/tokio-rs/mio/commit/25731e8688a2d91c5c700674a2c2d3841240ece1). -* The `os-util` and `pipe` features, replaced by a new `os-ext` feature. - (https://github.com/tokio-rs/mio/commit/f5017fae8a3d3bb4b4cada25b01a2d76a406badc). - -## Fixes - -* Incorrect assumption of the layout of `std::net::SocketAddr`. Previously Mio - would assume that `SocketAddrV{4,6}` had the same layout as - `libc::sockaddr_in(6)`, however this is not guaranteed by the standard - library. - (https://github.com/tokio-rs/mio/commit/152e0751f0be1c9b0cbd6778645b76bcb0eba93c). -* Also bumped the miow dependency to version 0.3.6 to solve the same problem as - above. - -# 0.7.5 - -## Added - -* `TcpSocket::get_localaddr()` retrieves local address - (https://github.com/tokio-rs/mio/commit/b41a022b2242eef1969c70c8ba93e04c528dba47). -* `TcpSocket::set_reuseport()` & `TcpSocket::get_reuseport()` configures and reads `SO_REUSEPORT` - (https://github.com/tokio-rs/mio/commit/183bbe409ab69cbf9db41d0263b41ec86202d9a0). -* `unix:pipe()` a wrapper around pipe(2) sys call - (https://github.com/tokio-rs/mio/commit/2b7c0967a7362303946deb3d4ca2ae507af6c72d). -* Add a check that a single Waker is active per Poll instance (only in debug mode) - (https://github.com/tokio-rs/mio/commit/f4874f28b32efcf4841691884c65a89734d96a56). -* Added `Interest:remove()` - (https://github.com/tokio-rs/mio/commit/b8639c3d9ac07bb7e2e27685680c8a6510fa1357). - -# 0.7.4 - -## Fixes - -* lost "socket closed" events on windows - (https://github.com/tokio-rs/mio/commit/50c299aca56c4a26e5ed20c283007239fbe6a7a7). - -## Added - -* `TcpSocket::set_linger()` configures SO_LINGER - (https://github.com/tokio-rs/mio/commit/3b4096565c1a879f651b8f8282ecdcbdbd5c92d3). - -# 0.7.3 - -## Added - -* `TcpSocket` for configuring a TCP socket before connecting or listening - (https://github.com/tokio-rs/mio/commit/5b09e60d0f64419b989bda88c86a3147334a03b3). - -# 0.7.2 - -## Added - -* Windows named pipe support. - (https://github.com/tokio-rs/mio/commit/52e8c2220e87696d20f13561402bcaabba4136ed). - -# 0.7.1 - -## Reduced support for 32-bit Apple targets - -In January 2020 Rust reduced its support for 32-bit Apple targets -(https://blog.rust-lang.org/2020/01/03/reducing-support-for-32-bit-apple-targets.html). -Starting with v0.7.1 Mio will do the same as we're no longer checking 32 bit -iOS/macOS on our CI. - -## Added - -* Support for illumos - (https://github.com/tokio-rs/mio/commit/976f2354d0e8fbbb64fba3bf017d7131f9c369a0). -* Report `epoll(2)`'s `EPOLLERR` event as `Event::is_write_closed` if it's the - only event - (https://github.com/tokio-rs/mio/commit/0c77b5712d675eeb9bd43928b5dd7d22b2c7ac0c). -* Optimised event::Iter::{size_hint, count} - (https://github.com/tokio-rs/mio/commit/40df934a11b05233a7796c4de19a4ee06bc4e03e). - -## Fixed - -* Work around Linux kernel < 2.6.37 bug on 32-bits making timeouts longer then - ~30 minutes effectively infinite - (https://github.com/tokio-rs/mio/commit/d555991f5ee81f6c1eec0fe481557d3d5b8d5ff4). -* Set `SO_NOSIGPIPE` on all sockets (not just UDP) on for Apple targets - (https://github.com/tokio-rs/mio/commit/b8bbdcb0d3236f4c4acb257996d42a88dc9987d9). -* Properly handle `POLL_ABORT` on Windows - (https://github.com/tokio-rs/mio/commit/a98da62b3ed1eeed1770aaca12f46d647e4fa749). -* Improved error handling around failing `SIO_BASE_HANDLE` calls on Windows - (https://github.com/tokio-rs/mio/commit/b15fc18458a79ef8a51f73effa92548650f4e5dc). - -## Changed - -* On NetBSD we now use `accept4(2)` - (https://github.com/tokio-rs/mio/commit/4e306addc7144f2e02a7e8397c220b179a006a19). -* The package uploaded to crates.io should be slightly smaller - (https://github.com/tokio-rs/mio/commit/eef8d3b9500bc0db957cd1ac68ee128ebc68351f). - -## Removed - -* Dependency on `lazy_static` on Windows - (https://github.com/tokio-rs/mio/commit/57e4c2a8ac153bc7bb87829e22cf0a21e3927e8a). - -# 0.7.0 - -Version 0.7 of Mio contains various major changes compared to version 0.6. -Overall a large number of API changes have been made to reduce the complexity of -the implementation and remove overhead where possible. - -Please refer to the [blog post about -0.7-alpha.1](https://tokio.rs/blog/2019-12-mio-v0.7-alpha.1/) for additional -information. - -## Added - -* `Interest` structure that replaces `Ready` in registering event sources. -* `Registry` structure that separates the registering and polling functionality. -* `Waker` structure that allows another thread to wake a thread polling `Poll`. -* Unix Domain Socket (UDS) types: `UnixDatagram`, `UnixListener` and - `UnixStream`. - -## Removed - -* All code deprecated in 0.6 was removed in 0.7. -* Support for Fuchsia was removed as the code was unmaintained. -* Support for Bitrig was removed, rustc dropped support for it also. -* `UnixReady` was merged into `Ready`. -* Custom user-space readiness queue was removed, this includes the public - `Registration` and `SetReadiness` types. -* `PollOpt` was removed and all registrations use edge-triggers. See the upgrade - guide on how to process event using edge-triggers. -* The network types (types in the `net` module) now support only the same API as - found in the standard library, various methods on the types were removed. -* `TcpStream` now supports vectored I/O. -* `Poll::poll_interruptible` was removed. Instead `Poll::poll` will now return - an error if one occurs. -* `From` is removed from `Token`, the internal field is still public, so - `Token(my_token)` can still be used. - -## Changed - -* Various documentation improvements were made around correct usage of `Poll` - and registered event sources. It is recommended to reread the documentation of - at least `event::Source` and `Poll`. -* Mio now uses Rust 2018 and rustfmt for all code. -* `Event` was changed to be a wrapper around the OS event. This means it can be - significantly larger on some OSes. -* `Ready` was removed and replaced with various `is_*` methods on `Event`. For - example instead checking for readable readiness using - `Event::ready().is_readable()`, you would call `Event::is_readable()`. -* `Ready::is_hup` was removed in favour of `Event::is_read_closed` and - `Event::is_write_closed`. -* The Iterator implementation of `Events` was changed to return `&Event`. -* `Evented` was renamed to `event::Source` and now takes mutable reference to - the source. -* Minimum supported Rust version was increased to 1.39. -* By default Mio now uses a shim implementation. To enable the full - implementation, that uses the OS, enable the `os-oll` feature. To enable the - network types use `tcp`, `udp` and/or `uds`. For more documentation on the - features see the `feature` module in the API documentation (requires the - `extra-docs` feature). -* The entire Windows implementation was rewritten. -* Various optimisation were made to reduce the number of system calls in - creating and using sockets, e.g. making use of `accept4(2)`. -* The `fmt::Debug` implementation of `Events` is now actually useful as it - prints all `Event`s. - -# 0.6.23 (Dec 01, 2020) - -### Changed -- **MSRV**: Increased the MSRV from 1.18.0 (Jun 8, 2017) to 1.31.0 (Dec 6, - 2018) - (https://github.com/tokio-rs/mio/commit/4879e0d32ddfd98e762fc87240e594a3ad8fca30). - -### Fixed -- Work around Linux kernel < 2.6.37 bug on 32-bits making timeouts longer then - ~30 minutes effectively infinite - (https://github.com/tokio-rs/mio/commit/e7cba59950e9c9fa6194e29b5b1e72029e3df455). -- Update miow and net2 depedencies to get rid of invalid memory layout assumption - (https://github.com/tokio-rs/mio/commit/13f02ac0a86d7c0c0001e5ff8960a0b4340d075c). - -# 0.6.22 (May 01, 2020) - -### Added -- Add support for illumos target (#1294) - -# 0.6.21 (November 27, 2019) - -### Fixed -- remove `=` dependency on `cfg-if`. - -# 0.6.20 (November 21, 2019) - -### Fixed -- Use default IOCP concurrency value (#1161). -- setting FD_CLOEXEC in pipe (#1095). - -# 0.6.19 (May 28, 2018) - -### Fixed -- Do not trigger HUP events on kqueue platforms (#958). - -# 0.6.18 (May 24, 2018) - -### Fixed -- Fix compilation on kqueue platforms with 32bit C long (#948). - -# 0.6.17 (May 15, 2018) - -### Fixed -- Don't report `RDHUP` as `HUP` (#939) -- Fix lazycell related compilation issues. -- Fix EPOLLPRI conflicting with READABLE -- Abort process on ref count overflows - -### Added -- Define PRI on all targets - -# 0.6.16 (September 5, 2018) - -* Add EPOLLPRI readiness to UnixReady on supported platforms (#867) -* Reduce spurious awaken calls (#875) - -# 0.6.15 (July 3, 2018) - -* Implement `Evented` for containers (#840). -* Fix android-aarch64 build (#850). - -# 0.6.14 (March 8, 2018) - -* Add `Poll::poll_interruptible` (#811) -* Add `Ready::all` and `usize` conversions (#825) - -# 0.6.13 (February 5, 2018) - -* Fix build on DragonFlyBSD. -* Add `TcpListener::from_std` that does not require the socket addr. -* Deprecate `TcpListener::from_listener` in favor of from_std. - -# 0.6.12 (January 5, 2018) - -* Add `TcpStream::peek` function (#773). -* Raise minimum Rust version to 1.18.0. -* `Poll`: retry select() when interrupted by a signal (#742). -* Deprecate `Events` index access (#713). -* Add `Events::clear` (#782). -* Add support for `lio_listio` (#780). - -# 0.6.11 (October 25, 2017) - -* Allow register to take empty interest (#640). -* Fix bug with TCP errors on windows (#725). -* Add TcpListener::accept_std (#733). -* Update IoVec to fix soundness bug -- includes behavior change. (#747). -* Minimum Rust version is now 1.14.0. -* Fix Android x86_64 build. -* Misc API & doc polish. - -# 0.6.10 (July 27, 2017) - -* Experimental support for Fuchsia -* Add `only_v6` option for UDP sockets -* Fix build on NetBSD -* Minimum Rust version is now 1.13.0 -* Assignment operators (e.g. `|=`) are now implemented for `Ready` - -# 0.6.9 (June 7, 2017) - -* More socket options are exposed through the TCP types, brought in through the - `net2` crate. - -# 0.6.8 (May 26, 2017) - -* Support Fuchia -* POSIX AIO support -* Fix memory leak caused by Register::new2 -* Windows: fix handling failed TCP connections -* Fix build on aarch64-linux-android -* Fix usage of `O_CLOEXEC` with `SETFL` - -# 0.6.7 (April 27, 2017) - -* Ignore EPIPE coming out of `kevent` -* Timer thread should exit when timer is dropped. - -# 0.6.6 (March 22, 2017) - -* Add send(), recv() and connect() to UDPSocket. -* Fix bug in custom readiness queue -* Move net types into `net` module - -# 0.6.5 (March 14, 2017) - -* Misc improvements to kqueue bindings -* Add official support for iOS, Android, BSD -* Reimplement custom readiness queue -* `Poll` is now `Sync` -* Officially deprecate non-core functionality (timers, channel, etc...) -* `Registration` now implements `Evented` -* Fix bug around error conditions with `connect` on windows. -* Use iovec crate for scatter / gather operations -* Only support readable and writable readiness on all platforms -* Expose additional readiness in a platform specific capacity - -# 0.6.4 (January 24, 2017) - -* Fix compilation on musl -* Add `TcpStream::from_stream` which converts a std TCP stream to Mio. - -# 0.6.3 (January 22, 2017) - -* Implement readv/writev for `TcpStream`, allowing vectored reads/writes to - work across platforms -* Remove `nix` dependency -* Implement `Display` and `Error` for some channel error types. -* Optimize TCP on Windows through `SetFileCompletionNotificationModes` - -# 0.6.2 (December 18, 2016) - -* Allow registration of custom handles on Windows (like `EventedFd` on Unix) -* Send only one byte for the awakener on Unix instead of four -* Fix a bug in the timer implementation which caused an infinite loop - -# 0.6.1 (October 30, 2016) - -* Update dependency of `libc` to 0.2.16 -* Fix channel `dec` logic -* Fix a timer bug around timeout cancellation -* Don't allocate buffers for TCP reads on Windows -* Touched up documentation in a few places -* Fix an infinite looping timer thread on OSX -* Fix compile on 32-bit OSX -* Fix compile on FreeBSD - -# 0.6.0 (September 2, 2016) - -* Shift primary API towards `Poll` -* `EventLoop` and types to `deprecated` mod. All contents of the - `deprecated` mod will be removed by Mio 1.0. -* Increase minimum supported Rust version to 1.9.0 -* Deprecate unix domain socket implementation in favor of using a - version external to Mio. For example: https://github.com/alexcrichton/mio-uds. -* Remove various types now included in `std` -* Updated TCP & UDP APIs to match the versions in `std` -* Enable implementing `Evented` for any type via `Registration` -* Rename `IoEvent` -> `Event` -* Access `Event` data via functions vs. public fields. -* Expose `Events` as a public type that is passed into `Poll` -* Use `std::time::Duration` for all APIs that require a time duration. -* Polled events are now retrieved via `Events` type. -* Implement `std::error::Error` for `TimerError` -* Relax `Send` bound on notify messages. -* Remove `Clone` impl for `Timeout` (future proof) -* Remove `mio::prelude` -* Remove `mio::util` -* Remove dependency on bytes - -# 0.5.0 (December 3, 2015) - -* Windows support (#239) -* NetBSD support (#306) -* Android support (#295) -* Don't re-export bytes types -* Renamed `EventLoop::register_opt` to `EventLoop::register` (#257) -* `EventLoopConfig` is now a builder instead of having public struct fields. It - is also no longer `Copy`. (#259) -* `TcpSocket` is no longer exported in the public API (#262) -* Integrate with net2. (#262) -* `TcpListener` now returns the remote peer address from `accept` as well (#275) -* The `UdpSocket::{send_to, recv_from}` methods are no longer generic over `Buf` - or `MutBuf` but instead take slices directly. The return types have also been - updated to return the number of bytes transferred. (#260) -* Fix bug with kqueue where an error on registration prevented the - changelist from getting flushed (#276) -* Support sending/receiving FDs over UNIX sockets (#291) -* Mio's socket types are permanently associated with an EventLoop (#308) -* Reduce unnecessary poll wakeups (#314) - - -# 0.4.1 (July 21, 2015) - -* [BUGFIX] Fix notify channel concurrency bug (#216) - -# 0.4.0 (July 16, 2015) - -* [BUGFIX] EventLoop::register requests all events, not just readable. -* [BUGFIX] Attempting to send a message to a shutdown event loop fails correctly. -* [FEATURE] Expose TCP shutdown -* [IMPROVEMENT] Coalesce readable & writable into `ready` event (#184) -* [IMPROVEMENT] Rename TryRead & TryWrite function names to avoid conflict with std. -* [IMPROVEMENT] Provide TCP and UDP types in Mio (path to windows #155) -* [IMPROVEMENT] Use clock_ticks crate instead of time (path to windows #155) -* [IMPROVEMENT] Move unix specific features into mio::unix module -* [IMPROVEMENT] TcpListener sets SO_REUSEADDR by default diff -Nru s390-tools-2.31.0/rust-vendor/mio/examples/tcp_listenfd_server.rs s390-tools-2.33.1/rust-vendor/mio/examples/tcp_listenfd_server.rs --- s390-tools-2.31.0/rust-vendor/mio/examples/tcp_listenfd_server.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/examples/tcp_listenfd_server.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,209 +0,0 @@ -// You can run this example from the root of the mio repo: -// cargo run --example tcp_listenfd_server --features="os-poll net" -// or with wasi: -// cargo +nightly build --target wasm32-wasi --example tcp_listenfd_server --features="os-poll net" -// wasmtime run --tcplisten 127.0.0.1:9000 --env 'LISTEN_FDS=1' target/wasm32-wasi/debug/examples/tcp_listenfd_server.wasm - -use mio::event::Event; -use mio::net::{TcpListener, TcpStream}; -use mio::{Events, Interest, Poll, Registry, Token}; -use std::collections::HashMap; -use std::io::{self, Read, Write}; -use std::str::from_utf8; - -// Setup some tokens to allow us to identify which event is for which socket. -const SERVER: Token = Token(0); - -// Some data we'll send over the connection. -const DATA: &[u8] = b"Hello world!\n"; - -#[cfg(not(windows))] -fn get_first_listen_fd_listener() -> Option { - #[cfg(unix)] - use std::os::unix::io::FromRawFd; - #[cfg(target_os = "wasi")] - use std::os::wasi::io::FromRawFd; - - let stdlistener = unsafe { std::net::TcpListener::from_raw_fd(3) }; - stdlistener.set_nonblocking(true).unwrap(); - Some(stdlistener) -} - -#[cfg(windows)] -fn get_first_listen_fd_listener() -> Option { - // Windows does not support `LISTEN_FDS` - None -} - -fn main() -> io::Result<()> { - env_logger::init(); - - std::env::var("LISTEN_FDS").expect("LISTEN_FDS environment variable unset"); - - // Create a poll instance. - let mut poll = Poll::new()?; - // Create storage for events. - let mut events = Events::with_capacity(128); - - // Setup the TCP server socket. - let mut server = { - let stdlistener = get_first_listen_fd_listener().unwrap(); - println!("Using preopened socket FD 3"); - println!("You can connect to the server using `nc`:"); - match stdlistener.local_addr() { - Ok(a) => println!(" $ nc {} {}", a.ip(), a.port()), - Err(_) => println!(" $ nc "), - } - println!("You'll see our welcome message and anything you type will be printed here."); - TcpListener::from_std(stdlistener) - }; - - // Register the server with poll we can receive events for it. - poll.registry() - .register(&mut server, SERVER, Interest::READABLE)?; - - // Map of `Token` -> `TcpStream`. - let mut connections = HashMap::new(); - // Unique token for each incoming connection. - let mut unique_token = Token(SERVER.0 + 1); - - loop { - poll.poll(&mut events, None)?; - - for event in events.iter() { - match event.token() { - SERVER => loop { - // Received an event for the TCP server socket, which - // indicates we can accept an connection. - let (mut connection, address) = match server.accept() { - Ok((connection, address)) => (connection, address), - Err(ref e) if would_block(e) => { - // If we get a `WouldBlock` error we know our - // listener has no more incoming connections queued, - // so we can return to polling and wait for some - // more. - break; - } - Err(e) => { - // If it was any other kind of error, something went - // wrong and we terminate with an error. - return Err(e); - } - }; - - println!("Accepted connection from: {}", address); - - let token = next(&mut unique_token); - poll.registry() - .register(&mut connection, token, Interest::WRITABLE)?; - - connections.insert(token, connection); - }, - token => { - // Maybe received an event for a TCP connection. - let done = if let Some(connection) = connections.get_mut(&token) { - handle_connection_event(poll.registry(), connection, event)? - } else { - // Sporadic events happen, we can safely ignore them. - false - }; - if done { - if let Some(mut connection) = connections.remove(&token) { - poll.registry().deregister(&mut connection)?; - } - } - } - } - } - } -} - -fn next(current: &mut Token) -> Token { - let next = current.0; - current.0 += 1; - Token(next) -} - -/// Returns `true` if the connection is done. -fn handle_connection_event( - registry: &Registry, - connection: &mut TcpStream, - event: &Event, -) -> io::Result { - if event.is_writable() { - // We can (maybe) write to the connection. - match connection.write(DATA) { - // We want to write the entire `DATA` buffer in a single go. If we - // write less we'll return a short write error (same as - // `io::Write::write_all` does). - Ok(n) if n < DATA.len() => return Err(io::ErrorKind::WriteZero.into()), - Ok(_) => { - // After we've written something we'll reregister the connection - // to only respond to readable events. - registry.reregister(connection, event.token(), Interest::READABLE)? - } - // Would block "errors" are the OS's way of saying that the - // connection is not actually ready to perform this I/O operation. - Err(ref err) if would_block(err) => {} - // Got interrupted (how rude!), we'll try again. - Err(ref err) if interrupted(err) => { - return handle_connection_event(registry, connection, event) - } - // Other errors we'll consider fatal. - Err(err) => return Err(err), - } - } - - if event.is_readable() { - let mut connection_closed = false; - let mut received_data = vec![0; 4096]; - let mut bytes_read = 0; - // We can (maybe) read from the connection. - loop { - match connection.read(&mut received_data[bytes_read..]) { - Ok(0) => { - // Reading 0 bytes means the other side has closed the - // connection or is done writing, then so are we. - connection_closed = true; - break; - } - Ok(n) => { - bytes_read += n; - if bytes_read == received_data.len() { - received_data.resize(received_data.len() + 1024, 0); - } - } - // Would block "errors" are the OS's way of saying that the - // connection is not actually ready to perform this I/O operation. - Err(ref err) if would_block(err) => break, - Err(ref err) if interrupted(err) => continue, - // Other errors we'll consider fatal. - Err(err) => return Err(err), - } - } - - if bytes_read != 0 { - let received_data = &received_data[..bytes_read]; - if let Ok(str_buf) = from_utf8(received_data) { - println!("Received data: {}", str_buf.trim_end()); - } else { - println!("Received (none UTF-8) data: {:?}", received_data); - } - } - - if connection_closed { - println!("Connection closed"); - return Ok(true); - } - } - - Ok(false) -} - -fn would_block(err: &io::Error) -> bool { - err.kind() == io::ErrorKind::WouldBlock -} - -fn interrupted(err: &io::Error) -> bool { - err.kind() == io::ErrorKind::Interrupted -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/examples/tcp_server.rs s390-tools-2.33.1/rust-vendor/mio/examples/tcp_server.rs --- s390-tools-2.31.0/rust-vendor/mio/examples/tcp_server.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/examples/tcp_server.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,189 +0,0 @@ -// You can run this example from the root of the mio repo: -// cargo run --example tcp_server --features="os-poll net" -use mio::event::Event; -use mio::net::{TcpListener, TcpStream}; -use mio::{Events, Interest, Poll, Registry, Token}; -use std::collections::HashMap; -use std::io::{self, Read, Write}; -use std::str::from_utf8; - -// Setup some tokens to allow us to identify which event is for which socket. -const SERVER: Token = Token(0); - -// Some data we'll send over the connection. -const DATA: &[u8] = b"Hello world!\n"; - -#[cfg(not(target_os = "wasi"))] -fn main() -> io::Result<()> { - env_logger::init(); - - // Create a poll instance. - let mut poll = Poll::new()?; - // Create storage for events. - let mut events = Events::with_capacity(128); - - // Setup the TCP server socket. - let addr = "127.0.0.1:9000".parse().unwrap(); - let mut server = TcpListener::bind(addr)?; - - // Register the server with poll we can receive events for it. - poll.registry() - .register(&mut server, SERVER, Interest::READABLE)?; - - // Map of `Token` -> `TcpStream`. - let mut connections = HashMap::new(); - // Unique token for each incoming connection. - let mut unique_token = Token(SERVER.0 + 1); - - println!("You can connect to the server using `nc`:"); - println!(" $ nc 127.0.0.1 9000"); - println!("You'll see our welcome message and anything you type will be printed here."); - - loop { - poll.poll(&mut events, None)?; - - for event in events.iter() { - match event.token() { - SERVER => loop { - // Received an event for the TCP server socket, which - // indicates we can accept an connection. - let (mut connection, address) = match server.accept() { - Ok((connection, address)) => (connection, address), - Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - // If we get a `WouldBlock` error we know our - // listener has no more incoming connections queued, - // so we can return to polling and wait for some - // more. - break; - } - Err(e) => { - // If it was any other kind of error, something went - // wrong and we terminate with an error. - return Err(e); - } - }; - - println!("Accepted connection from: {}", address); - - let token = next(&mut unique_token); - poll.registry().register( - &mut connection, - token, - Interest::READABLE.add(Interest::WRITABLE), - )?; - - connections.insert(token, connection); - }, - token => { - // Maybe received an event for a TCP connection. - let done = if let Some(connection) = connections.get_mut(&token) { - handle_connection_event(poll.registry(), connection, event)? - } else { - // Sporadic events happen, we can safely ignore them. - false - }; - if done { - if let Some(mut connection) = connections.remove(&token) { - poll.registry().deregister(&mut connection)?; - } - } - } - } - } - } -} - -fn next(current: &mut Token) -> Token { - let next = current.0; - current.0 += 1; - Token(next) -} - -/// Returns `true` if the connection is done. -fn handle_connection_event( - registry: &Registry, - connection: &mut TcpStream, - event: &Event, -) -> io::Result { - if event.is_writable() { - // We can (maybe) write to the connection. - match connection.write(DATA) { - // We want to write the entire `DATA` buffer in a single go. If we - // write less we'll return a short write error (same as - // `io::Write::write_all` does). - Ok(n) if n < DATA.len() => return Err(io::ErrorKind::WriteZero.into()), - Ok(_) => { - // After we've written something we'll reregister the connection - // to only respond to readable events. - registry.reregister(connection, event.token(), Interest::READABLE)? - } - // Would block "errors" are the OS's way of saying that the - // connection is not actually ready to perform this I/O operation. - Err(ref err) if would_block(err) => {} - // Got interrupted (how rude!), we'll try again. - Err(ref err) if interrupted(err) => { - return handle_connection_event(registry, connection, event) - } - // Other errors we'll consider fatal. - Err(err) => return Err(err), - } - } - - if event.is_readable() { - let mut connection_closed = false; - let mut received_data = vec![0; 4096]; - let mut bytes_read = 0; - // We can (maybe) read from the connection. - loop { - match connection.read(&mut received_data[bytes_read..]) { - Ok(0) => { - // Reading 0 bytes means the other side has closed the - // connection or is done writing, then so are we. - connection_closed = true; - break; - } - Ok(n) => { - bytes_read += n; - if bytes_read == received_data.len() { - received_data.resize(received_data.len() + 1024, 0); - } - } - // Would block "errors" are the OS's way of saying that the - // connection is not actually ready to perform this I/O operation. - Err(ref err) if would_block(err) => break, - Err(ref err) if interrupted(err) => continue, - // Other errors we'll consider fatal. - Err(err) => return Err(err), - } - } - - if bytes_read != 0 { - let received_data = &received_data[..bytes_read]; - if let Ok(str_buf) = from_utf8(received_data) { - println!("Received data: {}", str_buf.trim_end()); - } else { - println!("Received (none UTF-8) data: {:?}", received_data); - } - } - - if connection_closed { - println!("Connection closed"); - return Ok(true); - } - } - - Ok(false) -} - -fn would_block(err: &io::Error) -> bool { - err.kind() == io::ErrorKind::WouldBlock -} - -fn interrupted(err: &io::Error) -> bool { - err.kind() == io::ErrorKind::Interrupted -} - -#[cfg(target_os = "wasi")] -fn main() { - panic!("can't bind to an address with wasi") -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/examples/udp_server.rs s390-tools-2.33.1/rust-vendor/mio/examples/udp_server.rs --- s390-tools-2.31.0/rust-vendor/mio/examples/udp_server.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/examples/udp_server.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,85 +0,0 @@ -// You can run this example from the root of the mio repo: -// cargo run --example udp_server --features="os-poll net" -use log::warn; -use mio::{Events, Interest, Poll, Token}; -use std::io; - -// A token to allow us to identify which event is for the `UdpSocket`. -const UDP_SOCKET: Token = Token(0); - -#[cfg(not(target_os = "wasi"))] -fn main() -> io::Result<()> { - use mio::net::UdpSocket; - - env_logger::init(); - - // Create a poll instance. - let mut poll = Poll::new()?; - // Create storage for events. Since we will only register a single socket, a - // capacity of 1 will do. - let mut events = Events::with_capacity(1); - - // Setup the UDP socket. - let addr = "127.0.0.1:9000".parse().unwrap(); - - let mut socket = UdpSocket::bind(addr)?; - - // Register our socket with the token defined above and an interest in being - // `READABLE`. - poll.registry() - .register(&mut socket, UDP_SOCKET, Interest::READABLE)?; - - println!("You can connect to the server using `nc`:"); - println!(" $ nc -u 127.0.0.1 9000"); - println!("Anything you type will be echoed back to you."); - - // Initialize a buffer for the UDP packet. We use the maximum size of a UDP - // packet, which is the maximum value of 16 a bit integer. - let mut buf = [0; 1 << 16]; - - // Our event loop. - loop { - // Poll to check if we have events waiting for us. - poll.poll(&mut events, None)?; - - // Process each event. - for event in events.iter() { - // Validate the token we registered our socket with, - // in this example it will only ever be one but we - // make sure it's valid none the less. - match event.token() { - UDP_SOCKET => loop { - // In this loop we receive all packets queued for the socket. - match socket.recv_from(&mut buf) { - Ok((packet_size, source_address)) => { - // Echo the data. - socket.send_to(&buf[..packet_size], source_address)?; - } - Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - // If we get a `WouldBlock` error we know our socket - // has no more packets queued, so we can return to - // polling and wait for some more. - break; - } - Err(e) => { - // If it was any other kind of error, something went - // wrong and we terminate with an error. - return Err(e); - } - } - }, - _ => { - // This should never happen as we only registered our - // `UdpSocket` using the `UDP_SOCKET` token, but if it ever - // does we'll log it. - warn!("Got event for unexpected token: {:?}", event); - } - } - } - } -} - -#[cfg(target_os = "wasi")] -fn main() { - panic!("can't bind to an address with wasi") -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/LICENSE s390-tools-2.33.1/rust-vendor/mio/LICENSE --- s390-tools-2.31.0/rust-vendor/mio/LICENSE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ -Copyright (c) 2014 Carl Lerche and other MIO contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/mio/README.md s390-tools-2.33.1/rust-vendor/mio/README.md --- s390-tools-2.31.0/rust-vendor/mio/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,179 +0,0 @@ -# Mio – Metal I/O - -Mio is a fast, low-level I/O library for Rust focusing on non-blocking APIs and -event notification for building high performance I/O apps with as little -overhead as possible over the OS abstractions. - -[![Crates.io][crates-badge]][crates-url] -[![MIT licensed][mit-badge]][mit-url] -[![Build Status][actions-badge]][actions-url] -[![Build Status][cirrus-badge]][cirrus-url] - -[crates-badge]: https://img.shields.io/crates/v/mio.svg -[crates-url]: https://crates.io/crates/mio -[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg -[mit-url]: LICENSE -[actions-badge]: https://github.com/tokio-rs/mio/workflows/CI/badge.svg -[actions-url]: https://github.com/tokio-rs/mio/actions?query=workflow%3ACI+branch%3Amaster -[cirrus-badge]: https://api.cirrus-ci.com/github/tokio-rs/mio.svg -[cirrus-url]: https://cirrus-ci.com/github/tokio-rs/mio - -**API documentation** - -* [v0.8](https://docs.rs/mio/^0.8) -* [v0.7](https://docs.rs/mio/^0.7) - -This is a low level library, if you are looking for something easier to get -started with, see [Tokio](https://tokio.rs). - -## Usage - -To use `mio`, first add this to your `Cargo.toml`: - -```toml -[dependencies] -mio = "0.8" -``` - -Next we can start using Mio. The following is quick introduction using -`TcpListener` and `TcpStream`. Note that `features = ["os-poll", "net"]` must be -specified for this example. - -```rust -use std::error::Error; - -use mio::net::{TcpListener, TcpStream}; -use mio::{Events, Interest, Poll, Token}; - -// Some tokens to allow us to identify which event is for which socket. -const SERVER: Token = Token(0); -const CLIENT: Token = Token(1); - -fn main() -> Result<(), Box> { - // Create a poll instance. - let mut poll = Poll::new()?; - // Create storage for events. - let mut events = Events::with_capacity(128); - - // Setup the server socket. - let addr = "127.0.0.1:13265".parse()?; - let mut server = TcpListener::bind(addr)?; - // Start listening for incoming connections. - poll.registry() - .register(&mut server, SERVER, Interest::READABLE)?; - - // Setup the client socket. - let mut client = TcpStream::connect(addr)?; - // Register the socket. - poll.registry() - .register(&mut client, CLIENT, Interest::READABLE | Interest::WRITABLE)?; - - // Start an event loop. - loop { - // Poll Mio for events, blocking until we get an event. - poll.poll(&mut events, None)?; - - // Process each event. - for event in events.iter() { - // We can use the token we previously provided to `register` to - // determine for which socket the event is. - match event.token() { - SERVER => { - // If this is an event for the server, it means a connection - // is ready to be accepted. - // - // Accept the connection and drop it immediately. This will - // close the socket and notify the client of the EOF. - let connection = server.accept(); - drop(connection); - } - CLIENT => { - if event.is_writable() { - // We can (likely) write to the socket without blocking. - } - - if event.is_readable() { - // We can (likely) read from the socket without blocking. - } - - // Since the server just shuts down the connection, let's - // just exit from our event loop. - return Ok(()); - } - // We don't expect any events with tokens other than those we provided. - _ => unreachable!(), - } - } - } -} -``` - -## Features - -* Non-blocking TCP, UDP -* I/O event queue backed by epoll, kqueue, and IOCP -* Zero allocations at runtime -* Platform specific extensions - -## Non-goals - -The following are specifically omitted from Mio and are left to the user -or higher-level libraries. - -* File operations -* Thread pools / multi-threaded event loop -* Timers - -## Platforms - -Currently supported platforms: - -* Android (API level 21) -* DragonFly BSD -* FreeBSD -* Linux -* NetBSD -* OpenBSD -* Windows -* iOS -* macOS - -There are potentially others. If you find that Mio works on another -platform, submit a PR to update the list! - -Mio can handle interfacing with each of the event systems of the aforementioned -platforms. The details of their implementation are further discussed in the -`Poll` type of the API documentation (see above). - -The Windows implementation for polling sockets is using the [wepoll] strategy. -This uses the Windows AFD system to access socket readiness events. - -[wepoll]: https://github.com/piscisaureus/wepoll - -### Unsupported - -* Haiku, see [issue #1472] -* Solaris, see [issue #1152] -* Wine, see [issue #1444] - -[issue #1472]: https://github.com/tokio-rs/mio/issues/1472 -[issue #1152]: https://github.com/tokio-rs/mio/issues/1152 -[issue #1444]: https://github.com/tokio-rs/mio/issues/1444 - -## Community - -A group of Mio users hang out on [Discord], this can be a good place to go for -questions. - -[Discord]: https://discord.gg/tokio - -## Contributing - -Interested in getting involved? We would love to help you! For simple -bug fixes, just submit a PR with the fix and we can discuss the fix -directly in the PR. If the fix is more complex, start with an issue. - -If you want to propose an API change, create an issue to start a -discussion with the community. Also, feel free to talk with us in Discord. - -Finally, be kind. We support the [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct). diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/event/event.rs s390-tools-2.33.1/rust-vendor/mio/src/event/event.rs --- s390-tools-2.31.0/rust-vendor/mio/src/event/event.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/event/event.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,230 +0,0 @@ -use crate::{sys, Token}; - -use std::fmt; - -/// A readiness event. -/// -/// `Event` is a readiness state paired with a [`Token`]. It is returned by -/// [`Poll::poll`]. -/// -/// For more documentation on polling and events, see [`Poll`]. -/// -/// [`Poll::poll`]: ../struct.Poll.html#method.poll -/// [`Poll`]: ../struct.Poll.html -/// [`Token`]: ../struct.Token.html -#[derive(Clone)] -#[repr(transparent)] -pub struct Event { - inner: sys::Event, -} - -impl Event { - /// Returns the event's token. - pub fn token(&self) -> Token { - sys::event::token(&self.inner) - } - - /// Returns true if the event contains readable readiness. - /// - /// # Notes - /// - /// Out-of-band (OOB) data also triggers readable events. But must - /// application don't actually read OOB data, this could leave an - /// application open to a Denial-of-Service (Dos) attack, see - /// . - /// However because Mio uses edge-triggers it will not result in an infinite - /// loop as described in the article above. - pub fn is_readable(&self) -> bool { - sys::event::is_readable(&self.inner) - } - - /// Returns true if the event contains writable readiness. - pub fn is_writable(&self) -> bool { - sys::event::is_writable(&self.inner) - } - - /// Returns true if the event contains error readiness. - /// - /// Error events occur when the socket enters an error state. In this case, - /// the socket will also receive a readable or writable event. Reading or - /// writing to the socket will result in an error. - /// - /// # Notes - /// - /// Method is available on all platforms, but not all platforms trigger the - /// error event. - /// - /// The table below shows what flags are checked on what OS. - /// - /// | [OS selector] | Flag(s) checked | - /// |---------------|-----------------| - /// | [epoll] | `EPOLLERR` | - /// | [kqueue] | `EV_ERROR` and `EV_EOF` with `fflags` set to `0`. | - /// - /// [OS selector]: ../struct.Poll.html#implementation-notes - /// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html - /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 - pub fn is_error(&self) -> bool { - sys::event::is_error(&self.inner) - } - - /// Returns true if the event contains read closed readiness. - /// - /// # Notes - /// - /// Read closed readiness can be expected after any of the following have - /// occurred: - /// * The local stream has shutdown the read half of its socket - /// * The local stream has shutdown both the read half and the write half - /// of its socket - /// * The peer stream has shutdown the write half its socket; this sends a - /// `FIN` packet that has been received by the local stream - /// - /// Method is a best effort implementation. While some platforms may not - /// return readiness when read half is closed, it is guaranteed that - /// false-positives will not occur. - /// - /// The table below shows what flags are checked on what OS. - /// - /// | [OS selector] | Flag(s) checked | - /// |---------------|-----------------| - /// | [epoll] | `EPOLLHUP`, or | - /// | | `EPOLLIN` and `EPOLLRDHUP` | - /// | [kqueue] | `EV_EOF` | - /// - /// [OS selector]: ../struct.Poll.html#implementation-notes - /// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html - /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 - pub fn is_read_closed(&self) -> bool { - sys::event::is_read_closed(&self.inner) - } - - /// Returns true if the event contains write closed readiness. - /// - /// # Notes - /// - /// On [epoll] this is essentially a check for `EPOLLHUP` flag as the - /// local stream shutting down its write half does not trigger this event. - /// - /// On [kqueue] the local stream shutting down the write half of its - /// socket will trigger this event. - /// - /// Method is a best effort implementation. While some platforms may not - /// return readiness when write half is closed, it is guaranteed that - /// false-positives will not occur. - /// - /// The table below shows what flags are checked on what OS. - /// - /// | [OS selector] | Flag(s) checked | - /// |---------------|-----------------| - /// | [epoll] | `EPOLLHUP`, or | - /// | | only `EPOLLERR`, or | - /// | | `EPOLLOUT` and `EPOLLERR` | - /// | [kqueue] | `EV_EOF` | - /// - /// [OS selector]: ../struct.Poll.html#implementation-notes - /// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html - /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 - pub fn is_write_closed(&self) -> bool { - sys::event::is_write_closed(&self.inner) - } - - /// Returns true if the event contains priority readiness. - /// - /// # Notes - /// - /// Method is available on all platforms, but not all platforms trigger the - /// priority event. - /// - /// The table below shows what flags are checked on what OS. - /// - /// | [OS selector] | Flag(s) checked | - /// |---------------|-----------------| - /// | [epoll] | `EPOLLPRI` | - /// | [kqueue] | *Not supported* | - /// - /// [OS selector]: ../struct.Poll.html#implementation-notes - /// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html - /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 - #[inline] - pub fn is_priority(&self) -> bool { - sys::event::is_priority(&self.inner) - } - - /// Returns true if the event contains AIO readiness. - /// - /// # Notes - /// - /// Method is available on all platforms, but not all platforms support AIO. - /// - /// The table below shows what flags are checked on what OS. - /// - /// | [OS selector] | Flag(s) checked | - /// |---------------|-----------------| - /// | [epoll] | *Not supported* | - /// | [kqueue]1 | `EVFILT_AIO` | - /// - /// 1: Only supported on DragonFly BSD, FreeBSD, iOS and macOS. - /// - /// [OS selector]: ../struct.Poll.html#implementation-notes - /// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html - /// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 - pub fn is_aio(&self) -> bool { - sys::event::is_aio(&self.inner) - } - - /// Returns true if the event contains LIO readiness. - /// - /// # Notes - /// - /// Method is available on all platforms, but only FreeBSD supports LIO. On - /// FreeBSD this method checks the `EVFILT_LIO` flag. - pub fn is_lio(&self) -> bool { - sys::event::is_lio(&self.inner) - } - - /// Create a reference to an `Event` from a platform specific event. - pub(crate) fn from_sys_event_ref(sys_event: &sys::Event) -> &Event { - unsafe { - // This is safe because the memory layout of `Event` is - // the same as `sys::Event` due to the `repr(transparent)` attribute. - &*(sys_event as *const sys::Event as *const Event) - } - } -} - -/// When the [alternate] flag is enabled this will print platform specific -/// details, for example the fields of the `kevent` structure on platforms that -/// use `kqueue(2)`. Note however that the output of this implementation is -/// **not** consider a part of the stable API. -/// -/// [alternate]: fmt::Formatter::alternate -impl fmt::Debug for Event { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let alternate = f.alternate(); - let mut d = f.debug_struct("Event"); - d.field("token", &self.token()) - .field("readable", &self.is_readable()) - .field("writable", &self.is_writable()) - .field("error", &self.is_error()) - .field("read_closed", &self.is_read_closed()) - .field("write_closed", &self.is_write_closed()) - .field("priority", &self.is_priority()) - .field("aio", &self.is_aio()) - .field("lio", &self.is_lio()); - - if alternate { - struct EventDetails<'a>(&'a sys::Event); - - impl<'a> fmt::Debug for EventDetails<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - sys::event::debug_details(f, self.0) - } - } - - d.field("details", &EventDetails(&self.inner)).finish() - } else { - d.finish() - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/event/events.rs s390-tools-2.33.1/rust-vendor/mio/src/event/events.rs --- s390-tools-2.31.0/rust-vendor/mio/src/event/events.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/event/events.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,230 +0,0 @@ -use crate::event::Event; -use crate::sys; - -use std::fmt; - -/// A collection of readiness events. -/// -/// `Events` is passed as an argument to [`Poll::poll`] and will be used to -/// receive any new readiness events received since the last poll. Usually, a -/// single `Events` instance is created at the same time as a [`Poll`] and -/// reused on each call to [`Poll::poll`]. -/// -/// See [`Poll`] for more documentation on polling. -/// -/// [`Poll::poll`]: ../struct.Poll.html#method.poll -/// [`Poll`]: ../struct.Poll.html -/// -/// # Examples -/// -#[cfg_attr(feature = "os-poll", doc = "```")] -#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] -/// # use std::error::Error; -/// # fn main() -> Result<(), Box> { -/// use mio::{Events, Poll}; -/// use std::time::Duration; -/// -/// let mut events = Events::with_capacity(1024); -/// let mut poll = Poll::new()?; -/// # -/// # assert!(events.is_empty()); -/// -/// // Register `event::Source`s with `poll`. -/// -/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; -/// -/// for event in events.iter() { -/// println!("Got an event for {:?}", event.token()); -/// } -/// # Ok(()) -/// # } -/// ``` -pub struct Events { - inner: sys::Events, -} - -/// [`Events`] iterator. -/// -/// This struct is created by the [`iter`] method on [`Events`]. -/// -/// [`Events`]: struct.Events.html -/// [`iter`]: struct.Events.html#method.iter -/// -/// # Examples -/// -#[cfg_attr(feature = "os-poll", doc = "```")] -#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] -/// # use std::error::Error; -/// # fn main() -> Result<(), Box> { -/// use mio::{Events, Poll}; -/// use std::time::Duration; -/// -/// let mut events = Events::with_capacity(1024); -/// let mut poll = Poll::new()?; -/// -/// // Register handles with `poll`. -/// -/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; -/// -/// for event in events.iter() { -/// println!("Got an event for {:?}", event.token()); -/// } -/// # Ok(()) -/// # } -/// ``` -#[derive(Debug, Clone)] -pub struct Iter<'a> { - inner: &'a Events, - pos: usize, -} - -impl Events { - /// Return a new `Events` capable of holding up to `capacity` events. - /// - /// # Examples - /// - /// ``` - /// use mio::Events; - /// - /// let events = Events::with_capacity(1024); - /// assert_eq!(1024, events.capacity()); - /// ``` - pub fn with_capacity(capacity: usize) -> Events { - Events { - inner: sys::Events::with_capacity(capacity), - } - } - - /// Returns the number of `Event` values that `self` can hold. - /// - /// ``` - /// use mio::Events; - /// - /// let events = Events::with_capacity(1024); - /// assert_eq!(1024, events.capacity()); - /// ``` - pub fn capacity(&self) -> usize { - self.inner.capacity() - } - - /// Returns `true` if `self` contains no `Event` values. - /// - /// # Examples - /// - /// ``` - /// use mio::Events; - /// - /// let events = Events::with_capacity(1024); - /// assert!(events.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - /// Returns an iterator over the `Event` values. - /// - /// # Examples - /// - #[cfg_attr(feature = "os-poll", doc = "```")] - #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] - /// # use std::error::Error; - /// # fn main() -> Result<(), Box> { - /// use mio::{Events, Poll}; - /// use std::time::Duration; - /// - /// let mut events = Events::with_capacity(1024); - /// let mut poll = Poll::new()?; - /// - /// // Register handles with `poll`. - /// - /// poll.poll(&mut events, Some(Duration::from_millis(100)))?; - /// - /// for event in events.iter() { - /// println!("Got an event for {:?}", event.token()); - /// } - /// # Ok(()) - /// # } - /// ``` - pub fn iter(&self) -> Iter<'_> { - Iter { - inner: self, - pos: 0, - } - } - - /// Clearing all `Event` values from container explicitly. - /// - /// # Notes - /// - /// Events are cleared before every `poll`, so it is not required to call - /// this manually. - /// - /// # Examples - /// - #[cfg_attr(feature = "os-poll", doc = "```")] - #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] - /// # use std::error::Error; - /// # fn main() -> Result<(), Box> { - /// use mio::{Events, Poll}; - /// use std::time::Duration; - /// - /// let mut events = Events::with_capacity(1024); - /// let mut poll = Poll::new()?; - /// - /// // Register handles with `poll`. - /// - /// poll.poll(&mut events, Some(Duration::from_millis(100)))?; - /// - /// // Clear all events. - /// events.clear(); - /// assert!(events.is_empty()); - /// # Ok(()) - /// # } - /// ``` - pub fn clear(&mut self) { - self.inner.clear(); - } - - /// Returns the inner `sys::Events`. - pub(crate) fn sys(&mut self) -> &mut sys::Events { - &mut self.inner - } -} - -impl<'a> IntoIterator for &'a Events { - type Item = &'a Event; - type IntoIter = Iter<'a>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a> Iterator for Iter<'a> { - type Item = &'a Event; - - fn next(&mut self) -> Option { - let ret = self - .inner - .inner - .get(self.pos) - .map(Event::from_sys_event_ref); - self.pos += 1; - ret - } - - fn size_hint(&self) -> (usize, Option) { - let size = self.inner.inner.len(); - (size, Some(size)) - } - - fn count(self) -> usize { - self.inner.inner.len() - } -} - -impl fmt::Debug for Events { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self).finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/event/mod.rs s390-tools-2.33.1/rust-vendor/mio/src/event/mod.rs --- s390-tools-2.31.0/rust-vendor/mio/src/event/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/event/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,10 +0,0 @@ -//! Readiness event types and utilities. - -#[allow(clippy::module_inception)] -mod event; -mod events; -mod source; - -pub use self::event::Event; -pub use self::events::{Events, Iter}; -pub use self::source::Source; diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/event/source.rs s390-tools-2.33.1/rust-vendor/mio/src/event/source.rs --- s390-tools-2.31.0/rust-vendor/mio/src/event/source.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/event/source.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,139 +0,0 @@ -use crate::{Interest, Registry, Token}; - -use std::io; - -/// An event source that may be registered with [`Registry`]. -/// -/// Types that implement `event::Source` can be registered with -/// `Registry`. Users of Mio **should not** use the `event::Source` trait -/// functions directly. Instead, the equivalent functions on `Registry` should -/// be used. -/// -/// See [`Registry`] for more details. -/// -/// [`Registry`]: ../struct.Registry.html -/// -/// # Implementing `event::Source` -/// -/// Event sources are always backed by system handles, such as sockets or other -/// system handles. These `event::Source`s will be monitored by the system -/// selector. An implementation of `Source` will almost always delegates to a -/// lower level handle. Examples of this are [`TcpStream`]s, or the *unix only* -/// [`SourceFd`]. -/// -/// [`TcpStream`]: ../net/struct.TcpStream.html -/// [`SourceFd`]: ../unix/struct.SourceFd.html -/// -/// # Dropping `event::Source`s -/// -/// All `event::Source`s, unless otherwise specified, need to be [deregistered] -/// before being dropped for them to not leak resources. This goes against the -/// normal drop behaviour of types in Rust which cleanup after themselves, e.g. -/// a `File` will close itself. However since deregistering needs access to -/// [`Registry`] this cannot be done while being dropped. -/// -/// [deregistered]: ../struct.Registry.html#method.deregister -/// -/// # Examples -/// -/// Implementing `Source` on a struct containing a socket: -/// -#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] -#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] -/// use mio::{Interest, Registry, Token}; -/// use mio::event::Source; -/// use mio::net::TcpStream; -/// -/// use std::io; -/// -/// # #[allow(dead_code)] -/// pub struct MySource { -/// socket: TcpStream, -/// } -/// -/// impl Source for MySource { -/// fn register(&mut self, registry: &Registry, token: Token, interests: Interest) -/// -> io::Result<()> -/// { -/// // Delegate the `register` call to `socket` -/// self.socket.register(registry, token, interests) -/// } -/// -/// fn reregister(&mut self, registry: &Registry, token: Token, interests: Interest) -/// -> io::Result<()> -/// { -/// // Delegate the `reregister` call to `socket` -/// self.socket.reregister(registry, token, interests) -/// } -/// -/// fn deregister(&mut self, registry: &Registry) -> io::Result<()> { -/// // Delegate the `deregister` call to `socket` -/// self.socket.deregister(registry) -/// } -/// } -/// ``` -pub trait Source { - /// Register `self` with the given `Registry` instance. - /// - /// This function should not be called directly. Use [`Registry::register`] - /// instead. Implementors should handle registration by delegating the call - /// to another `Source` type. - /// - /// [`Registry::register`]: ../struct.Registry.html#method.register - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()>; - - /// Re-register `self` with the given `Registry` instance. - /// - /// This function should not be called directly. Use - /// [`Registry::reregister`] instead. Implementors should handle - /// re-registration by either delegating the call to another `Source` type. - /// - /// [`Registry::reregister`]: ../struct.Registry.html#method.reregister - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()>; - - /// Deregister `self` from the given `Registry` instance. - /// - /// This function should not be called directly. Use - /// [`Registry::deregister`] instead. Implementors should handle - /// deregistration by delegating the call to another `Source` type. - /// - /// [`Registry::deregister`]: ../struct.Registry.html#method.deregister - fn deregister(&mut self, registry: &Registry) -> io::Result<()>; -} - -impl Source for Box -where - T: Source + ?Sized, -{ - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - (**self).register(registry, token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - (**self).reregister(registry, token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - (**self).deregister(registry) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/interest.rs s390-tools-2.33.1/rust-vendor/mio/src/interest.rs --- s390-tools-2.31.0/rust-vendor/mio/src/interest.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/interest.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,193 +0,0 @@ -use std::num::NonZeroU8; -use std::{fmt, ops}; - -/// Interest used in registering. -/// -/// Interest are used in [registering] [`event::Source`]s with [`Poll`], they -/// indicate what readiness should be monitored for. For example if a socket is -/// registered with [readable] interests and the socket becomes writable, no -/// event will be returned from a call to [`poll`]. -/// -/// [registering]: struct.Registry.html#method.register -/// [`event::Source`]: ./event/trait.Source.html -/// [`Poll`]: struct.Poll.html -/// [readable]: struct.Interest.html#associatedconstant.READABLE -/// [`poll`]: struct.Poll.html#method.poll -#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)] -pub struct Interest(NonZeroU8); - -// These must be unique. -const READABLE: u8 = 0b0001; -const WRITABLE: u8 = 0b0010; -// The following are not available on all platforms. -const AIO: u8 = 0b0100; -const LIO: u8 = 0b1000; -const PRIORITY: u8 = 0b10000; - -impl Interest { - /// Returns a `Interest` set representing readable interests. - pub const READABLE: Interest = Interest(unsafe { NonZeroU8::new_unchecked(READABLE) }); - - /// Returns a `Interest` set representing writable interests. - pub const WRITABLE: Interest = Interest(unsafe { NonZeroU8::new_unchecked(WRITABLE) }); - - /// Returns a `Interest` set representing AIO completion interests. - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - pub const AIO: Interest = Interest(unsafe { NonZeroU8::new_unchecked(AIO) }); - - /// Returns a `Interest` set representing LIO completion interests. - #[cfg(target_os = "freebsd")] - pub const LIO: Interest = Interest(unsafe { NonZeroU8::new_unchecked(LIO) }); - - /// Returns a `Interest` set representing priority completion interests. - #[cfg(any(target_os = "linux", target_os = "android"))] - pub const PRIORITY: Interest = Interest(unsafe { NonZeroU8::new_unchecked(PRIORITY) }); - - /// Add together two `Interest`. - /// - /// This does the same thing as the `BitOr` implementation, but is a - /// constant function. - /// - /// ``` - /// use mio::Interest; - /// - /// const INTERESTS: Interest = Interest::READABLE.add(Interest::WRITABLE); - /// # fn silent_dead_code_warning(_: Interest) { } - /// # silent_dead_code_warning(INTERESTS) - /// ``` - #[allow(clippy::should_implement_trait)] - pub const fn add(self, other: Interest) -> Interest { - Interest(unsafe { NonZeroU8::new_unchecked(self.0.get() | other.0.get()) }) - } - - /// Removes `other` `Interest` from `self`. - /// - /// Returns `None` if the set would be empty after removing `other`. - /// - /// ``` - /// use mio::Interest; - /// - /// const RW_INTERESTS: Interest = Interest::READABLE.add(Interest::WRITABLE); - /// - /// // As long a one interest remain this will return `Some`. - /// let w_interest = RW_INTERESTS.remove(Interest::READABLE).unwrap(); - /// assert!(!w_interest.is_readable()); - /// assert!(w_interest.is_writable()); - /// - /// // Removing all interests from the set will return `None`. - /// assert_eq!(w_interest.remove(Interest::WRITABLE), None); - /// - /// // Its also possible to remove multiple interests at once. - /// assert_eq!(RW_INTERESTS.remove(RW_INTERESTS), None); - /// ``` - pub fn remove(self, other: Interest) -> Option { - NonZeroU8::new(self.0.get() & !other.0.get()).map(Interest) - } - - /// Returns true if the value includes readable readiness. - pub const fn is_readable(self) -> bool { - (self.0.get() & READABLE) != 0 - } - - /// Returns true if the value includes writable readiness. - pub const fn is_writable(self) -> bool { - (self.0.get() & WRITABLE) != 0 - } - - /// Returns true if `Interest` contains AIO readiness. - pub const fn is_aio(self) -> bool { - (self.0.get() & AIO) != 0 - } - - /// Returns true if `Interest` contains LIO readiness. - pub const fn is_lio(self) -> bool { - (self.0.get() & LIO) != 0 - } - - /// Returns true if `Interest` contains priority readiness. - pub const fn is_priority(self) -> bool { - (self.0.get() & PRIORITY) != 0 - } -} - -impl ops::BitOr for Interest { - type Output = Self; - - #[inline] - fn bitor(self, other: Self) -> Self { - self.add(other) - } -} - -impl ops::BitOrAssign for Interest { - #[inline] - fn bitor_assign(&mut self, other: Self) { - self.0 = (*self | other).0; - } -} - -impl fmt::Debug for Interest { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut one = false; - if self.is_readable() { - if one { - write!(fmt, " | ")? - } - write!(fmt, "READABLE")?; - one = true - } - if self.is_writable() { - if one { - write!(fmt, " | ")? - } - write!(fmt, "WRITABLE")?; - one = true - } - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - { - if self.is_aio() { - if one { - write!(fmt, " | ")? - } - write!(fmt, "AIO")?; - one = true - } - } - #[cfg(any(target_os = "freebsd"))] - { - if self.is_lio() { - if one { - write!(fmt, " | ")? - } - write!(fmt, "LIO")?; - one = true - } - } - #[cfg(any(target_os = "linux", target_os = "android"))] - { - if self.is_priority() { - if one { - write!(fmt, " | ")? - } - write!(fmt, "PRIORITY")?; - one = true - } - } - debug_assert!(one, "printing empty interests"); - Ok(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/io_source.rs s390-tools-2.33.1/rust-vendor/mio/src/io_source.rs --- s390-tools-2.31.0/rust-vendor/mio/src/io_source.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/io_source.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,334 +0,0 @@ -use std::ops::{Deref, DerefMut}; -#[cfg(unix)] -use std::os::unix::io::AsRawFd; -#[cfg(target_os = "wasi")] -use std::os::wasi::io::AsRawFd; -#[cfg(windows)] -use std::os::windows::io::AsRawSocket; -#[cfg(debug_assertions)] -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::{fmt, io}; - -use crate::sys::IoSourceState; -use crate::{event, Interest, Registry, Token}; - -/// Adapter for a [`RawFd`] or [`RawSocket`] providing an [`event::Source`] -/// implementation. -/// -/// `IoSource` enables registering any FD or socket wrapper with [`Poll`]. -/// -/// While only implementations for TCP, UDP, and UDS (Unix only) are provided, -/// Mio supports registering any FD or socket that can be registered with the -/// underlying OS selector. `IoSource` provides the necessary bridge. -/// -/// [`RawFd`]: std::os::unix::io::RawFd -/// [`RawSocket`]: std::os::windows::io::RawSocket -/// -/// # Notes -/// -/// To handle the registrations and events properly **all** I/O operations (such -/// as `read`, `write`, etc.) must go through the [`do_io`] method to ensure the -/// internal state is updated accordingly. -/// -/// [`Poll`]: crate::Poll -/// [`do_io`]: IoSource::do_io -/* -/// -/// # Examples -/// -/// Basic usage. -/// -/// ``` -/// # use std::error::Error; -/// # fn main() -> Result<(), Box> { -/// use mio::{Interest, Poll, Token}; -/// use mio::IoSource; -/// -/// use std::net; -/// -/// let poll = Poll::new()?; -/// -/// // Bind a std TCP listener. -/// let listener = net::TcpListener::bind("127.0.0.1:0")?; -/// // Wrap it in the `IoSource` type. -/// let mut listener = IoSource::new(listener); -/// -/// // Register the listener. -/// poll.registry().register(&mut listener, Token(0), Interest::READABLE)?; -/// # Ok(()) -/// # } -/// ``` -*/ -pub struct IoSource { - state: IoSourceState, - inner: T, - #[cfg(debug_assertions)] - selector_id: SelectorId, -} - -impl IoSource { - /// Create a new `IoSource`. - pub fn new(io: T) -> IoSource { - IoSource { - state: IoSourceState::new(), - inner: io, - #[cfg(debug_assertions)] - selector_id: SelectorId::new(), - } - } - - /// Execute an I/O operations ensuring that the socket receives more events - /// if it hits a [`WouldBlock`] error. - /// - /// # Notes - /// - /// This method is required to be called for **all** I/O operations to - /// ensure the user will receive events once the socket is ready again after - /// returning a [`WouldBlock`] error. - /// - /// [`WouldBlock`]: io::ErrorKind::WouldBlock - pub fn do_io(&self, f: F) -> io::Result - where - F: FnOnce(&T) -> io::Result, - { - self.state.do_io(f, &self.inner) - } - - /// Returns the I/O source, dropping the state. - /// - /// # Notes - /// - /// To ensure no more events are to be received for this I/O source first - /// [`deregister`] it. - /// - /// [`deregister`]: Registry::deregister - pub fn into_inner(self) -> T { - self.inner - } -} - -/// Be careful when using this method. All I/O operations that may block must go -/// through the [`do_io`] method. -/// -/// [`do_io`]: IoSource::do_io -impl Deref for IoSource { - type Target = T; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -/// Be careful when using this method. All I/O operations that may block must go -/// through the [`do_io`] method. -/// -/// [`do_io`]: IoSource::do_io -impl DerefMut for IoSource { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} - -#[cfg(unix)] -impl event::Source for IoSource -where - T: AsRawFd, -{ - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - #[cfg(debug_assertions)] - self.selector_id.associate(registry)?; - registry - .selector() - .register(self.inner.as_raw_fd(), token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - #[cfg(debug_assertions)] - self.selector_id.check_association(registry)?; - registry - .selector() - .reregister(self.inner.as_raw_fd(), token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - #[cfg(debug_assertions)] - self.selector_id.remove_association(registry)?; - registry.selector().deregister(self.inner.as_raw_fd()) - } -} - -#[cfg(windows)] -impl event::Source for IoSource -where - T: AsRawSocket, -{ - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - #[cfg(debug_assertions)] - self.selector_id.associate(registry)?; - self.state - .register(registry, token, interests, self.inner.as_raw_socket()) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - #[cfg(debug_assertions)] - self.selector_id.check_association(registry)?; - self.state.reregister(registry, token, interests) - } - - fn deregister(&mut self, _registry: &Registry) -> io::Result<()> { - #[cfg(debug_assertions)] - self.selector_id.remove_association(_registry)?; - self.state.deregister() - } -} - -#[cfg(target_os = "wasi")] -impl event::Source for IoSource -where - T: AsRawFd, -{ - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - #[cfg(debug_assertions)] - self.selector_id.associate(registry)?; - registry - .selector() - .register(self.inner.as_raw_fd() as _, token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - #[cfg(debug_assertions)] - self.selector_id.check_association(registry)?; - registry - .selector() - .reregister(self.inner.as_raw_fd() as _, token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - #[cfg(debug_assertions)] - self.selector_id.remove_association(registry)?; - registry.selector().deregister(self.inner.as_raw_fd() as _) - } -} - -impl fmt::Debug for IoSource -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) - } -} - -/// Used to associate an `IoSource` with a `sys::Selector`. -#[cfg(debug_assertions)] -#[derive(Debug)] -struct SelectorId { - id: AtomicUsize, -} - -#[cfg(debug_assertions)] -impl SelectorId { - /// Value of `id` if `SelectorId` is not associated with any - /// `sys::Selector`. Valid selector ids start at 1. - const UNASSOCIATED: usize = 0; - - /// Create a new `SelectorId`. - const fn new() -> SelectorId { - SelectorId { - id: AtomicUsize::new(Self::UNASSOCIATED), - } - } - - /// Associate an I/O source with `registry`, returning an error if its - /// already registered. - fn associate(&self, registry: &Registry) -> io::Result<()> { - let registry_id = registry.selector().id(); - let previous_id = self.id.swap(registry_id, Ordering::AcqRel); - - if previous_id == Self::UNASSOCIATED { - Ok(()) - } else { - Err(io::Error::new( - io::ErrorKind::AlreadyExists, - "I/O source already registered with a `Registry`", - )) - } - } - - /// Check the association of an I/O source with `registry`, returning an - /// error if its registered with a different `Registry` or not registered at - /// all. - fn check_association(&self, registry: &Registry) -> io::Result<()> { - let registry_id = registry.selector().id(); - let id = self.id.load(Ordering::Acquire); - - if id == registry_id { - Ok(()) - } else if id == Self::UNASSOCIATED { - Err(io::Error::new( - io::ErrorKind::NotFound, - "I/O source not registered with `Registry`", - )) - } else { - Err(io::Error::new( - io::ErrorKind::AlreadyExists, - "I/O source already registered with a different `Registry`", - )) - } - } - - /// Remove a previously made association from `registry`, returns an error - /// if it was not previously associated with `registry`. - fn remove_association(&self, registry: &Registry) -> io::Result<()> { - let registry_id = registry.selector().id(); - let previous_id = self.id.swap(Self::UNASSOCIATED, Ordering::AcqRel); - - if previous_id == registry_id { - Ok(()) - } else { - Err(io::Error::new( - io::ErrorKind::NotFound, - "I/O source not registered with `Registry`", - )) - } - } -} - -#[cfg(debug_assertions)] -impl Clone for SelectorId { - fn clone(&self) -> SelectorId { - SelectorId { - id: AtomicUsize::new(self.id.load(Ordering::Acquire)), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/lib.rs s390-tools-2.33.1/rust-vendor/mio/src/lib.rs --- s390-tools-2.31.0/rust-vendor/mio/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,266 +0,0 @@ -#![deny( - missing_docs, - missing_debug_implementations, - rust_2018_idioms, - unused_imports, - dead_code -)] -#![cfg_attr(docsrs, feature(doc_cfg))] -// Disallow warnings when running tests. -#![cfg_attr(test, deny(warnings))] -// Disallow warnings in examples. -#![doc(test(attr(deny(warnings))))] - -//! Mio is a fast, low-level I/O library for Rust focusing on non-blocking APIs -//! and event notification for building high performance I/O apps with as little -//! overhead as possible over the OS abstractions. -//! -//! # Usage -//! -//! Using Mio starts by creating a [`Poll`], which reads events from the OS and -//! puts them into [`Events`]. You can handle I/O events from the OS with it. -//! -//! For more detail, see [`Poll`]. -//! -//! [`Poll`]: ../mio/struct.Poll.html -//! [`Events`]: ../mio/event/struct.Events.html -//! -//! ## Examples -//! -//! Examples can found in the `examples` directory of the source code, or [on -//! GitHub]. -//! -//! [on GitHub]: https://github.com/tokio-rs/mio/tree/master/examples -//! -//! ## Guide -//! -//! A getting started guide is available in the [`guide`] module. -//! -//! ## Available features -//! -//! The available features are described in the [`features`] module. - -// macros used internally -#[macro_use] -mod macros; - -mod interest; -mod poll; -mod sys; -mod token; -#[cfg(not(target_os = "wasi"))] -mod waker; - -pub mod event; - -cfg_io_source! { - mod io_source; -} - -cfg_net! { - pub mod net; -} - -#[doc(no_inline)] -pub use event::Events; -pub use interest::Interest; -pub use poll::{Poll, Registry}; -pub use token::Token; -#[cfg(not(target_os = "wasi"))] -pub use waker::Waker; - -#[cfg(all(unix, feature = "os-ext"))] -#[cfg_attr(docsrs, doc(cfg(all(unix, feature = "os-ext"))))] -pub mod unix { - //! Unix only extensions. - - pub mod pipe { - //! Unix pipe. - //! - //! See the [`new`] function for documentation. - - pub use crate::sys::pipe::{new, Receiver, Sender}; - } - - pub use crate::sys::SourceFd; -} - -#[cfg(all(windows, feature = "os-ext"))] -#[cfg_attr(docsrs, doc(cfg(all(windows, feature = "os-ext"))))] -pub mod windows { - //! Windows only extensions. - - pub use crate::sys::named_pipe::NamedPipe; -} - -pub mod features { - //! # Mio's optional features. - //! - //! This document describes the available features in Mio. - //! - #![cfg_attr(feature = "os-poll", doc = "## `os-poll` (enabled)")] - #![cfg_attr(not(feature = "os-poll"), doc = "## `os-poll` (disabled)")] - //! - //! Mio by default provides only a shell implementation that `panic!`s the - //! moment it is actually run. To run it requires OS support, this is - //! enabled by activating the `os-poll` feature. - //! - //! This makes `Poll`, `Registry` and `Waker` functional. - //! - #![cfg_attr(feature = "os-ext", doc = "## `os-ext` (enabled)")] - #![cfg_attr(not(feature = "os-ext"), doc = "## `os-ext` (disabled)")] - //! - //! `os-ext` enables additional OS specific facilities. These facilities can - //! be found in the `unix` and `windows` module. - //! - #![cfg_attr(feature = "net", doc = "## Network types (enabled)")] - #![cfg_attr(not(feature = "net"), doc = "## Network types (disabled)")] - //! - //! The `net` feature enables networking primitives in the `net` module. -} - -pub mod guide { - //! # Getting started guide. - //! - //! In this guide we'll do the following: - //! - //! 1. Create a [`Poll`] instance (and learn what it is). - //! 2. Register an [event source]. - //! 3. Create an event loop. - //! - //! At the end you'll have a very small (but quick) TCP server that accepts - //! connections and then drops (disconnects) them. - //! - //! ## 1. Creating a `Poll` instance - //! - //! Using Mio starts by creating a [`Poll`] instance, which monitors events - //! from the OS and puts them into [`Events`]. This allows us to execute I/O - //! operations based on what operations are ready. - //! - //! [`Poll`]: ../struct.Poll.html - //! [`Events`]: ../event/struct.Events.html - //! - #![cfg_attr(feature = "os-poll", doc = "```")] - #![cfg_attr(not(feature = "os-poll"), doc = "```ignore")] - //! # use mio::{Poll, Events}; - //! # fn main() -> std::io::Result<()> { - //! // `Poll` allows for polling of readiness events. - //! let poll = Poll::new()?; - //! // `Events` is collection of readiness `Event`s and can be filled by - //! // calling `Poll::poll`. - //! let events = Events::with_capacity(128); - //! # drop((poll, events)); - //! # Ok(()) - //! # } - //! ``` - //! - //! For example if we're using a [`TcpListener`], we'll only want to - //! attempt to accept an incoming connection *iff* any connections are - //! queued and ready to be accepted. We don't want to waste our time if no - //! connections are ready. - //! - //! [`TcpListener`]: ../net/struct.TcpListener.html - //! - //! ## 2. Registering event source - //! - //! After we've created a [`Poll`] instance that monitors events from the OS - //! for us, we need to provide it with a source of events. This is done by - //! registering an [event source]. As the name “event source†suggests it is - //! a source of events which can be polled using a `Poll` instance. On Unix - //! systems this is usually a file descriptor, or a socket/handle on - //! Windows. - //! - //! In the example below we'll use a [`TcpListener`] for which we'll receive - //! an event (from [`Poll`]) once a connection is ready to be accepted. - //! - //! [event source]: ../event/trait.Source.html - //! - #![cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] - #![cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] - //! # use mio::net::TcpListener; - //! # use mio::{Poll, Token, Interest}; - //! # fn main() -> std::io::Result<()> { - //! # let poll = Poll::new()?; - //! # let address = "127.0.0.1:0".parse().unwrap(); - //! // Create a `TcpListener`, binding it to `address`. - //! let mut listener = TcpListener::bind(address)?; - //! - //! // Next we register it with `Poll` to receive events for it. The `SERVER` - //! // `Token` is used to determine that we received an event for the listener - //! // later on. - //! const SERVER: Token = Token(0); - //! poll.registry().register(&mut listener, SERVER, Interest::READABLE)?; - //! # Ok(()) - //! # } - //! ``` - //! - //! Multiple event sources can be [registered] (concurrently), so we can - //! monitor multiple sources at a time. - //! - //! [registered]: ../struct.Registry.html#method.register - //! - //! ## 3. Creating the event loop - //! - //! After we've created a [`Poll`] instance and registered one or more - //! [event sources] with it, we can [poll] it for events. Polling for events - //! is simple, we need a container to store the events: [`Events`] and need - //! to do something based on the polled events (this part is up to you, we - //! can't do it all!). If we do this in a loop we've got ourselves an event - //! loop. - //! - //! The example below shows the event loop in action, completing our small - //! TCP server. - //! - //! [poll]: ../struct.Poll.html#method.poll - //! [event sources]: ../event/trait.Source.html - //! - #![cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] - #![cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] - //! # use std::io; - //! # use std::time::Duration; - //! # use mio::net::TcpListener; - //! # use mio::{Poll, Token, Interest, Events}; - //! # fn main() -> io::Result<()> { - //! # let mut poll = Poll::new()?; - //! # let mut events = Events::with_capacity(128); - //! # let address = "127.0.0.1:0".parse().unwrap(); - //! # let mut listener = TcpListener::bind(address)?; - //! # const SERVER: Token = Token(0); - //! # poll.registry().register(&mut listener, SERVER, Interest::READABLE)?; - //! // Start our event loop. - //! loop { - //! // Poll the OS for events, waiting at most 100 milliseconds. - //! poll.poll(&mut events, Some(Duration::from_millis(100)))?; - //! - //! // Process each event. - //! for event in events.iter() { - //! // We can use the token we previously provided to `register` to - //! // determine for which type the event is. - //! match event.token() { - //! SERVER => loop { - //! // One or more connections are ready, so we'll attempt to - //! // accept them (in a loop). - //! match listener.accept() { - //! Ok((connection, address)) => { - //! println!("Got a connection from: {}", address); - //! # drop(connection); - //! }, - //! // A "would block error" is returned if the operation - //! // is not ready, so we'll stop trying to accept - //! // connections. - //! Err(ref err) if would_block(err) => break, - //! Err(err) => return Err(err), - //! } - //! } - //! # _ => unreachable!(), - //! } - //! } - //! # return Ok(()); - //! } - //! - //! fn would_block(err: &io::Error) -> bool { - //! err.kind() == io::ErrorKind::WouldBlock - //! } - //! # } - //! ``` -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/macros.rs s390-tools-2.33.1/rust-vendor/mio/src/macros.rs --- s390-tools-2.31.0/rust-vendor/mio/src/macros.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/macros.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,98 +0,0 @@ -//! Macros to ease conditional code based on enabled features. - -// Depending on the features not all macros are used. -#![allow(unused_macros)] - -/// The `os-poll` feature is enabled. -macro_rules! cfg_os_poll { - ($($item:item)*) => { - $( - #[cfg(feature = "os-poll")] - #[cfg_attr(docsrs, doc(cfg(feature = "os-poll")))] - $item - )* - } -} - -/// The `os-poll` feature is disabled. -macro_rules! cfg_not_os_poll { - ($($item:item)*) => { - $( - #[cfg(not(feature = "os-poll"))] - $item - )* - } -} - -/// The `os-ext` feature is enabled. -macro_rules! cfg_os_ext { - ($($item:item)*) => { - $( - #[cfg(feature = "os-ext")] - #[cfg_attr(docsrs, doc(cfg(feature = "os-ext")))] - $item - )* - } -} - -/// The `net` feature is enabled. -macro_rules! cfg_net { - ($($item:item)*) => { - $( - #[cfg(feature = "net")] - #[cfg_attr(docsrs, doc(cfg(feature = "net")))] - $item - )* - } -} - -/// One of the features enabled that needs `IoSource`. That is `net` or `os-ext` -/// on Unix (for `pipe`). -macro_rules! cfg_io_source { - ($($item:item)*) => { - $( - #[cfg(any(feature = "net", all(unix, feature = "os-ext")))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "net", all(unix, feature = "os-ext")))))] - $item - )* - } -} - -/// The `os-ext` feature is enabled, or one of the features that need `os-ext`. -macro_rules! cfg_any_os_ext { - ($($item:item)*) => { - $( - #[cfg(any(feature = "os-ext", feature = "net"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "os-ext", feature = "net"))))] - $item - )* - } -} - -macro_rules! trace { - ($($t:tt)*) => { - log!(trace, $($t)*) - } -} - -macro_rules! warn { - ($($t:tt)*) => { - log!(warn, $($t)*) - } -} - -macro_rules! error { - ($($t:tt)*) => { - log!(error, $($t)*) - } -} - -macro_rules! log { - ($level: ident, $($t:tt)*) => { - #[cfg(feature = "log")] - { log::$level!($($t)*) } - // Silence unused variables warnings. - #[cfg(not(feature = "log"))] - { if false { let _ = ( $($t)* ); } } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/net/mod.rs s390-tools-2.33.1/rust-vendor/mio/src/net/mod.rs --- s390-tools-2.31.0/rust-vendor/mio/src/net/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/net/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ -//! Networking primitives. -//! -//! The types provided in this module are non-blocking by default and are -//! designed to be portable across all supported Mio platforms. As long as the -//! [portability guidelines] are followed, the behavior should be identical no -//! matter the target platform. -//! -//! [portability guidelines]: ../struct.Poll.html#portability -//! -//! # Notes -//! -//! When using a datagram based socket, i.e. [`UdpSocket`] or [`UnixDatagram`], -//! its only possible to receive a packet once. This means that if you provide a -//! buffer that is too small you won't be able to receive the data anymore. How -//! OSs deal with this situation is different for each OS: -//! * Unixes, such as Linux, FreeBSD and macOS, will simply fill the buffer and -//! return the amount of bytes written. This means that if the returned value -//! is equal to the size of the buffer it may have only written a part of the -//! packet (or the packet has the same size as the buffer). -//! * Windows returns an `WSAEMSGSIZE` error. -//! -//! Mio does not change the value (either ok or error) returned by the OS, it's -//! up to the user handle this. How to deal with these difference is still up -//! for debate, specifically in -//! . The best advice we can -//! give is to always call receive with a large enough buffer. - -mod tcp; -pub use self::tcp::{TcpListener, TcpStream}; - -#[cfg(not(target_os = "wasi"))] -mod udp; -#[cfg(not(target_os = "wasi"))] -pub use self::udp::UdpSocket; - -#[cfg(unix)] -mod uds; -#[cfg(unix)] -pub use self::uds::{SocketAddr, UnixDatagram, UnixListener, UnixStream}; diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/net/tcp/listener.rs s390-tools-2.33.1/rust-vendor/mio/src/net/tcp/listener.rs --- s390-tools-2.31.0/rust-vendor/mio/src/net/tcp/listener.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/net/tcp/listener.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,248 +0,0 @@ -use std::net::{self, SocketAddr}; -#[cfg(unix)] -use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; -#[cfg(target_os = "wasi")] -use std::os::wasi::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; -#[cfg(windows)] -use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket}; -use std::{fmt, io}; - -use crate::io_source::IoSource; -use crate::net::TcpStream; -#[cfg(unix)] -use crate::sys::tcp::set_reuseaddr; -#[cfg(not(target_os = "wasi"))] -use crate::sys::tcp::{bind, listen, new_for_addr}; -use crate::{event, sys, Interest, Registry, Token}; - -/// A structure representing a socket server -/// -/// # Examples -/// -#[cfg_attr(feature = "os-poll", doc = "```")] -#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] -/// # use std::error::Error; -/// # fn main() -> Result<(), Box> { -/// use mio::{Events, Interest, Poll, Token}; -/// use mio::net::TcpListener; -/// use std::time::Duration; -/// -/// let mut listener = TcpListener::bind("127.0.0.1:34255".parse()?)?; -/// -/// let mut poll = Poll::new()?; -/// let mut events = Events::with_capacity(128); -/// -/// // Register the socket with `Poll` -/// poll.registry().register(&mut listener, Token(0), Interest::READABLE)?; -/// -/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; -/// -/// // There may be a socket ready to be accepted -/// # Ok(()) -/// # } -/// ``` -pub struct TcpListener { - inner: IoSource, -} - -impl TcpListener { - /// Convenience method to bind a new TCP listener to the specified address - /// to receive new connections. - /// - /// This function will take the following steps: - /// - /// 1. Create a new TCP socket. - /// 2. Set the `SO_REUSEADDR` option on the socket on Unix. - /// 3. Bind the socket to the specified address. - /// 4. Calls `listen` on the socket to prepare it to receive new connections. - #[cfg(not(target_os = "wasi"))] - pub fn bind(addr: SocketAddr) -> io::Result { - let socket = new_for_addr(addr)?; - #[cfg(unix)] - let listener = unsafe { TcpListener::from_raw_fd(socket) }; - #[cfg(windows)] - let listener = unsafe { TcpListener::from_raw_socket(socket as _) }; - - // On platforms with Berkeley-derived sockets, this allows to quickly - // rebind a socket, without needing to wait for the OS to clean up the - // previous one. - // - // On Windows, this allows rebinding sockets which are actively in use, - // which allows “socket hijackingâ€, so we explicitly don't set it here. - // https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse - #[cfg(not(windows))] - set_reuseaddr(&listener.inner, true)?; - - bind(&listener.inner, addr)?; - listen(&listener.inner, 1024)?; - Ok(listener) - } - - /// Creates a new `TcpListener` from a standard `net::TcpListener`. - /// - /// This function is intended to be used to wrap a TCP listener from the - /// standard library in the Mio equivalent. The conversion assumes nothing - /// about the underlying listener; ; it is left up to the user to set it - /// in non-blocking mode. - pub fn from_std(listener: net::TcpListener) -> TcpListener { - TcpListener { - inner: IoSource::new(listener), - } - } - - /// Accepts a new `TcpStream`. - /// - /// This may return an `Err(e)` where `e.kind()` is - /// `io::ErrorKind::WouldBlock`. This means a stream may be ready at a later - /// point and one should wait for an event before calling `accept` again. - /// - /// If an accepted stream is returned, the remote address of the peer is - /// returned along with it. - pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { - self.inner.do_io(|inner| { - sys::tcp::accept(inner).map(|(stream, addr)| (TcpStream::from_std(stream), addr)) - }) - } - - /// Returns the local socket address of this listener. - pub fn local_addr(&self) -> io::Result { - self.inner.local_addr() - } - - /// Sets the value for the `IP_TTL` option on this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - self.inner.set_ttl(ttl) - } - - /// Gets the value of the `IP_TTL` option for this socket. - /// - /// For more information about this option, see [`set_ttl`][link]. - /// - /// [link]: #method.set_ttl - pub fn ttl(&self) -> io::Result { - self.inner.ttl() - } - - /// Get the value of the `SO_ERROR` option on this socket. - /// - /// This will retrieve the stored error in the underlying socket, clearing - /// the field in the process. This can be useful for checking errors between - /// calls. - pub fn take_error(&self) -> io::Result> { - self.inner.take_error() - } -} - -impl event::Source for TcpListener { - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.register(registry, token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.reregister(registry, token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - self.inner.deregister(registry) - } -} - -impl fmt::Debug for TcpListener { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) - } -} - -#[cfg(unix)] -impl IntoRawFd for TcpListener { - fn into_raw_fd(self) -> RawFd { - self.inner.into_inner().into_raw_fd() - } -} - -#[cfg(unix)] -impl AsRawFd for TcpListener { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -#[cfg(unix)] -impl FromRawFd for TcpListener { - /// Converts a `RawFd` to a `TcpListener`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_fd(fd: RawFd) -> TcpListener { - TcpListener::from_std(FromRawFd::from_raw_fd(fd)) - } -} - -#[cfg(windows)] -impl IntoRawSocket for TcpListener { - fn into_raw_socket(self) -> RawSocket { - self.inner.into_inner().into_raw_socket() - } -} - -#[cfg(windows)] -impl AsRawSocket for TcpListener { - fn as_raw_socket(&self) -> RawSocket { - self.inner.as_raw_socket() - } -} - -#[cfg(windows)] -impl FromRawSocket for TcpListener { - /// Converts a `RawSocket` to a `TcpListener`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_socket(socket: RawSocket) -> TcpListener { - TcpListener::from_std(FromRawSocket::from_raw_socket(socket)) - } -} - -#[cfg(target_os = "wasi")] -impl IntoRawFd for TcpListener { - fn into_raw_fd(self) -> RawFd { - self.inner.into_inner().into_raw_fd() - } -} - -#[cfg(target_os = "wasi")] -impl AsRawFd for TcpListener { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -#[cfg(target_os = "wasi")] -impl FromRawFd for TcpListener { - /// Converts a `RawFd` to a `TcpListener`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_fd(fd: RawFd) -> TcpListener { - TcpListener::from_std(FromRawFd::from_raw_fd(fd)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/net/tcp/mod.rs s390-tools-2.33.1/rust-vendor/mio/src/net/tcp/mod.rs --- s390-tools-2.31.0/rust-vendor/mio/src/net/tcp/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/net/tcp/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,5 +0,0 @@ -mod listener; -pub use self::listener::TcpListener; - -mod stream; -pub use self::stream::TcpStream; diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/net/tcp/stream.rs s390-tools-2.33.1/rust-vendor/mio/src/net/tcp/stream.rs --- s390-tools-2.31.0/rust-vendor/mio/src/net/tcp/stream.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/net/tcp/stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,427 +0,0 @@ -use std::fmt; -use std::io::{self, IoSlice, IoSliceMut, Read, Write}; -use std::net::{self, Shutdown, SocketAddr}; -#[cfg(unix)] -use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; -#[cfg(target_os = "wasi")] -use std::os::wasi::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; -#[cfg(windows)] -use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket}; - -use crate::io_source::IoSource; -#[cfg(not(target_os = "wasi"))] -use crate::sys::tcp::{connect, new_for_addr}; -use crate::{event, Interest, Registry, Token}; - -/// A non-blocking TCP stream between a local socket and a remote socket. -/// -/// The socket will be closed when the value is dropped. -/// -/// # Examples -/// -#[cfg_attr(feature = "os-poll", doc = "```")] -#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] -/// # use std::net::{TcpListener, SocketAddr}; -/// # use std::error::Error; -/// # -/// # fn main() -> Result<(), Box> { -/// let address: SocketAddr = "127.0.0.1:0".parse()?; -/// let listener = TcpListener::bind(address)?; -/// use mio::{Events, Interest, Poll, Token}; -/// use mio::net::TcpStream; -/// use std::time::Duration; -/// -/// let mut stream = TcpStream::connect(listener.local_addr()?)?; -/// -/// let mut poll = Poll::new()?; -/// let mut events = Events::with_capacity(128); -/// -/// // Register the socket with `Poll` -/// poll.registry().register(&mut stream, Token(0), Interest::WRITABLE)?; -/// -/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; -/// -/// // The socket might be ready at this point -/// # Ok(()) -/// # } -/// ``` -pub struct TcpStream { - inner: IoSource, -} - -impl TcpStream { - /// Create a new TCP stream and issue a non-blocking connect to the - /// specified address. - /// - /// # Notes - /// - /// The returned `TcpStream` may not be connected (and thus usable), unlike - /// the API found in `std::net::TcpStream`. Because Mio issues a - /// *non-blocking* connect it will not block the thread and instead return - /// an unconnected `TcpStream`. - /// - /// Ensuring the returned stream is connected is surprisingly complex when - /// considering cross-platform support. Doing this properly should follow - /// the steps below, an example implementation can be found - /// [here](https://github.com/Thomasdezeeuw/heph/blob/0c4f1ab3eaf08bea1d65776528bfd6114c9f8374/src/net/tcp/stream.rs#L560-L622). - /// - /// 1. Call `TcpStream::connect` - /// 2. Register the returned stream with at least [write interest]. - /// 3. Wait for a (writable) event. - /// 4. Check `TcpStream::peer_addr`. If it returns `libc::EINPROGRESS` or - /// `ErrorKind::NotConnected` it means the stream is not yet connected, - /// go back to step 3. If it returns an address it means the stream is - /// connected, go to step 5. If another error is returned something - /// went wrong. - /// 5. Now the stream can be used. - /// - /// This may return a `WouldBlock` in which case the socket connection - /// cannot be completed immediately, it usually means there are insufficient - /// entries in the routing cache. - /// - /// [write interest]: Interest::WRITABLE - #[cfg(not(target_os = "wasi"))] - pub fn connect(addr: SocketAddr) -> io::Result { - let socket = new_for_addr(addr)?; - #[cfg(unix)] - let stream = unsafe { TcpStream::from_raw_fd(socket) }; - #[cfg(windows)] - let stream = unsafe { TcpStream::from_raw_socket(socket as _) }; - connect(&stream.inner, addr)?; - Ok(stream) - } - - /// Creates a new `TcpStream` from a standard `net::TcpStream`. - /// - /// This function is intended to be used to wrap a TCP stream from the - /// standard library in the Mio equivalent. The conversion assumes nothing - /// about the underlying stream; it is left up to the user to set it in - /// non-blocking mode. - /// - /// # Note - /// - /// The TCP stream here will not have `connect` called on it, so it - /// should already be connected via some other means (be it manually, or - /// the standard library). - pub fn from_std(stream: net::TcpStream) -> TcpStream { - TcpStream { - inner: IoSource::new(stream), - } - } - - /// Returns the socket address of the remote peer of this TCP connection. - pub fn peer_addr(&self) -> io::Result { - self.inner.peer_addr() - } - - /// Returns the socket address of the local half of this TCP connection. - pub fn local_addr(&self) -> io::Result { - self.inner.local_addr() - } - - /// Shuts down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O on the specified - /// portions to return immediately with an appropriate value (see the - /// documentation of `Shutdown`). - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.inner.shutdown(how) - } - - /// Sets the value of the `TCP_NODELAY` option on this socket. - /// - /// If set, this option disables the Nagle algorithm. This means that - /// segments are always sent as soon as possible, even if there is only a - /// small amount of data. When not set, data is buffered until there is a - /// sufficient amount to send out, thereby avoiding the frequent sending of - /// small packets. - /// - /// # Notes - /// - /// On Windows make sure the stream is connected before calling this method, - /// by receiving an (writable) event. Trying to set `nodelay` on an - /// unconnected `TcpStream` is unspecified behavior. - pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { - self.inner.set_nodelay(nodelay) - } - - /// Gets the value of the `TCP_NODELAY` option on this socket. - /// - /// For more information about this option, see [`set_nodelay`][link]. - /// - /// [link]: #method.set_nodelay - /// - /// # Notes - /// - /// On Windows make sure the stream is connected before calling this method, - /// by receiving an (writable) event. Trying to get `nodelay` on an - /// unconnected `TcpStream` is unspecified behavior. - pub fn nodelay(&self) -> io::Result { - self.inner.nodelay() - } - - /// Sets the value for the `IP_TTL` option on this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - /// - /// # Notes - /// - /// On Windows make sure the stream is connected before calling this method, - /// by receiving an (writable) event. Trying to set `ttl` on an - /// unconnected `TcpStream` is unspecified behavior. - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - self.inner.set_ttl(ttl) - } - - /// Gets the value of the `IP_TTL` option for this socket. - /// - /// For more information about this option, see [`set_ttl`][link]. - /// - /// # Notes - /// - /// On Windows make sure the stream is connected before calling this method, - /// by receiving an (writable) event. Trying to get `ttl` on an - /// unconnected `TcpStream` is unspecified behavior. - /// - /// [link]: #method.set_ttl - pub fn ttl(&self) -> io::Result { - self.inner.ttl() - } - - /// Get the value of the `SO_ERROR` option on this socket. - /// - /// This will retrieve the stored error in the underlying socket, clearing - /// the field in the process. This can be useful for checking errors between - /// calls. - pub fn take_error(&self) -> io::Result> { - self.inner.take_error() - } - - /// Receives data on the socket from the remote address to which it is - /// connected, without removing that data from the queue. On success, - /// returns the number of bytes peeked. - /// - /// Successive calls return the same data. This is accomplished by passing - /// `MSG_PEEK` as a flag to the underlying recv system call. - pub fn peek(&self, buf: &mut [u8]) -> io::Result { - self.inner.peek(buf) - } - - /// Execute an I/O operation ensuring that the socket receives more events - /// if it hits a [`WouldBlock`] error. - /// - /// # Notes - /// - /// This method is required to be called for **all** I/O operations to - /// ensure the user will receive events once the socket is ready again after - /// returning a [`WouldBlock`] error. - /// - /// [`WouldBlock`]: io::ErrorKind::WouldBlock - /// - /// # Examples - /// - #[cfg_attr(unix, doc = "```no_run")] - #[cfg_attr(windows, doc = "```ignore")] - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use std::io; - /// #[cfg(unix)] - /// use std::os::unix::io::AsRawFd; - /// #[cfg(windows)] - /// use std::os::windows::io::AsRawSocket; - /// use mio::net::TcpStream; - /// - /// let address = "127.0.0.1:8080".parse().unwrap(); - /// let stream = TcpStream::connect(address)?; - /// - /// // Wait until the stream is readable... - /// - /// // Read from the stream using a direct libc call, of course the - /// // `io::Read` implementation would be easier to use. - /// let mut buf = [0; 512]; - /// let n = stream.try_io(|| { - /// let buf_ptr = &mut buf as *mut _ as *mut _; - /// #[cfg(unix)] - /// let res = unsafe { libc::recv(stream.as_raw_fd(), buf_ptr, buf.len(), 0) }; - /// #[cfg(windows)] - /// let res = unsafe { libc::recvfrom(stream.as_raw_socket() as usize, buf_ptr, buf.len() as i32, 0, std::ptr::null_mut(), std::ptr::null_mut()) }; - /// if res != -1 { - /// Ok(res as usize) - /// } else { - /// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure - /// // should return `WouldBlock` error. - /// Err(io::Error::last_os_error()) - /// } - /// })?; - /// eprintln!("read {} bytes", n); - /// # Ok(()) - /// # } - /// ``` - pub fn try_io(&self, f: F) -> io::Result - where - F: FnOnce() -> io::Result, - { - self.inner.do_io(|_| f()) - } -} - -impl Read for TcpStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.do_io(|mut inner| inner.read(buf)) - } - - fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result { - self.inner.do_io(|mut inner| inner.read_vectored(bufs)) - } -} - -impl<'a> Read for &'a TcpStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.do_io(|mut inner| inner.read(buf)) - } - - fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result { - self.inner.do_io(|mut inner| inner.read_vectored(bufs)) - } -} - -impl Write for TcpStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.do_io(|mut inner| inner.write(buf)) - } - - fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - self.inner.do_io(|mut inner| inner.write_vectored(bufs)) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.do_io(|mut inner| inner.flush()) - } -} - -impl<'a> Write for &'a TcpStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.do_io(|mut inner| inner.write(buf)) - } - - fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - self.inner.do_io(|mut inner| inner.write_vectored(bufs)) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.do_io(|mut inner| inner.flush()) - } -} - -impl event::Source for TcpStream { - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.register(registry, token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.reregister(registry, token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - self.inner.deregister(registry) - } -} - -impl fmt::Debug for TcpStream { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) - } -} - -#[cfg(unix)] -impl IntoRawFd for TcpStream { - fn into_raw_fd(self) -> RawFd { - self.inner.into_inner().into_raw_fd() - } -} - -#[cfg(unix)] -impl AsRawFd for TcpStream { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -#[cfg(unix)] -impl FromRawFd for TcpStream { - /// Converts a `RawFd` to a `TcpStream`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_fd(fd: RawFd) -> TcpStream { - TcpStream::from_std(FromRawFd::from_raw_fd(fd)) - } -} - -#[cfg(windows)] -impl IntoRawSocket for TcpStream { - fn into_raw_socket(self) -> RawSocket { - self.inner.into_inner().into_raw_socket() - } -} - -#[cfg(windows)] -impl AsRawSocket for TcpStream { - fn as_raw_socket(&self) -> RawSocket { - self.inner.as_raw_socket() - } -} - -#[cfg(windows)] -impl FromRawSocket for TcpStream { - /// Converts a `RawSocket` to a `TcpStream`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_socket(socket: RawSocket) -> TcpStream { - TcpStream::from_std(FromRawSocket::from_raw_socket(socket)) - } -} - -#[cfg(target_os = "wasi")] -impl IntoRawFd for TcpStream { - fn into_raw_fd(self) -> RawFd { - self.inner.into_inner().into_raw_fd() - } -} - -#[cfg(target_os = "wasi")] -impl AsRawFd for TcpStream { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -#[cfg(target_os = "wasi")] -impl FromRawFd for TcpStream { - /// Converts a `RawFd` to a `TcpStream`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_fd(fd: RawFd) -> TcpStream { - TcpStream::from_std(FromRawFd::from_raw_fd(fd)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/net/udp.rs s390-tools-2.33.1/rust-vendor/mio/src/net/udp.rs --- s390-tools-2.31.0/rust-vendor/mio/src/net/udp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/net/udp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,697 +0,0 @@ -//! Primitives for working with UDP. -//! -//! The types provided in this module are non-blocking by default and are -//! designed to be portable across all supported Mio platforms. As long as the -//! [portability guidelines] are followed, the behavior should be identical no -//! matter the target platform. -//! -//! [portability guidelines]: ../struct.Poll.html#portability - -use crate::io_source::IoSource; -use crate::{event, sys, Interest, Registry, Token}; - -use std::fmt; -use std::io; -use std::net; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -#[cfg(unix)] -use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; -#[cfg(windows)] -use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket}; - -/// A User Datagram Protocol socket. -/// -/// This is an implementation of a bound UDP socket. This supports both IPv4 and -/// IPv6 addresses, and there is no corresponding notion of a server because UDP -/// is a datagram protocol. -/// -/// # Examples -/// -#[cfg_attr(feature = "os-poll", doc = "```")] -#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] -/// # use std::error::Error; -/// # -/// # fn main() -> Result<(), Box> { -/// // An Echo program: -/// // SENDER -> sends a message. -/// // ECHOER -> listens and prints the message received. -/// -/// use mio::net::UdpSocket; -/// use mio::{Events, Interest, Poll, Token}; -/// use std::time::Duration; -/// -/// const SENDER: Token = Token(0); -/// const ECHOER: Token = Token(1); -/// -/// // This operation will fail if the address is in use, so we select different ports for each -/// // socket. -/// let mut sender_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; -/// let mut echoer_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; -/// -/// // If we do not use connect here, SENDER and ECHOER would need to call send_to and recv_from -/// // respectively. -/// sender_socket.connect(echoer_socket.local_addr()?)?; -/// -/// // We need a Poll to check if SENDER is ready to be written into, and if ECHOER is ready to be -/// // read from. -/// let mut poll = Poll::new()?; -/// -/// // We register our sockets here so that we can check if they are ready to be written/read. -/// poll.registry().register(&mut sender_socket, SENDER, Interest::WRITABLE)?; -/// poll.registry().register(&mut echoer_socket, ECHOER, Interest::READABLE)?; -/// -/// let msg_to_send = [9; 9]; -/// let mut buffer = [0; 9]; -/// -/// let mut events = Events::with_capacity(128); -/// loop { -/// poll.poll(&mut events, Some(Duration::from_millis(100)))?; -/// for event in events.iter() { -/// match event.token() { -/// // Our SENDER is ready to be written into. -/// SENDER => { -/// let bytes_sent = sender_socket.send(&msg_to_send)?; -/// assert_eq!(bytes_sent, 9); -/// println!("sent {:?} -> {:?} bytes", msg_to_send, bytes_sent); -/// }, -/// // Our ECHOER is ready to be read from. -/// ECHOER => { -/// let num_recv = echoer_socket.recv(&mut buffer)?; -/// println!("echo {:?} -> {:?}", buffer, num_recv); -/// buffer = [0; 9]; -/// # _ = buffer; // Silence unused assignment warning. -/// # return Ok(()); -/// } -/// _ => unreachable!() -/// } -/// } -/// } -/// # } -/// ``` -pub struct UdpSocket { - inner: IoSource, -} - -impl UdpSocket { - /// Creates a UDP socket from the given address. - /// - /// # Examples - /// - #[cfg_attr(feature = "os-poll", doc = "```")] - #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use mio::net::UdpSocket; - /// - /// // We must bind it to an open address. - /// let socket = match UdpSocket::bind("127.0.0.1:0".parse()?) { - /// Ok(new_socket) => new_socket, - /// Err(fail) => { - /// // We panic! here, but you could try to bind it again on another address. - /// panic!("Failed to bind socket. {:?}", fail); - /// } - /// }; - /// - /// // Our socket was created, but we should not use it before checking it's readiness. - /// # drop(socket); // Silence unused variable warning. - /// # Ok(()) - /// # } - /// ``` - pub fn bind(addr: SocketAddr) -> io::Result { - sys::udp::bind(addr).map(UdpSocket::from_std) - } - - /// Creates a new `UdpSocket` from a standard `net::UdpSocket`. - /// - /// This function is intended to be used to wrap a UDP socket from the - /// standard library in the Mio equivalent. The conversion assumes nothing - /// about the underlying socket; it is left up to the user to set it in - /// non-blocking mode. - pub fn from_std(socket: net::UdpSocket) -> UdpSocket { - UdpSocket { - inner: IoSource::new(socket), - } - } - - /// Returns the socket address that this socket was created from. - /// - /// # Examples - /// - // This assertion is almost, but not quite, universal. It fails on - // shared-IP FreeBSD jails. It's hard for mio to know whether we're jailed, - // so simply disable the test on FreeBSD. - #[cfg_attr(all(feature = "os-poll", not(target_os = "freebsd")), doc = "```")] - #[cfg_attr( - any(not(feature = "os-poll"), target_os = "freebsd"), - doc = "```ignore" - )] - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use mio::net::UdpSocket; - /// - /// let addr = "127.0.0.1:0".parse()?; - /// let socket = UdpSocket::bind(addr)?; - /// assert_eq!(socket.local_addr()?.ip(), addr.ip()); - /// # Ok(()) - /// # } - /// ``` - pub fn local_addr(&self) -> io::Result { - self.inner.local_addr() - } - - /// Returns the socket address of the remote peer this socket was connected to. - /// - /// # Examples - /// - #[cfg_attr(feature = "os-poll", doc = "```")] - #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use mio::net::UdpSocket; - /// - /// let addr = "127.0.0.1:0".parse()?; - /// let peer_addr = "127.0.0.1:11100".parse()?; - /// let socket = UdpSocket::bind(addr)?; - /// socket.connect(peer_addr)?; - /// assert_eq!(socket.peer_addr()?.ip(), peer_addr.ip()); - /// # Ok(()) - /// # } - /// ``` - pub fn peer_addr(&self) -> io::Result { - self.inner.peer_addr() - } - - /// Sends data on the socket to the given address. On success, returns the - /// number of bytes written. - /// - /// Address type can be any implementor of `ToSocketAddrs` trait. See its - /// documentation for concrete examples. - /// - /// # Examples - /// - /// ```no_run - /// # use std::error::Error; - /// # fn main() -> Result<(), Box> { - /// use mio::net::UdpSocket; - /// - /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; - /// - /// // We must check if the socket is writable before calling send_to, - /// // or we could run into a WouldBlock error. - /// - /// let bytes_sent = socket.send_to(&[9; 9], "127.0.0.1:11100".parse()?)?; - /// assert_eq!(bytes_sent, 9); - /// # - /// # Ok(()) - /// # } - /// ``` - pub fn send_to(&self, buf: &[u8], target: SocketAddr) -> io::Result { - self.inner.do_io(|inner| inner.send_to(buf, target)) - } - - /// Receives data from the socket. On success, returns the number of bytes - /// read and the address from whence the data came. - /// - /// # Notes - /// - /// On Windows, if the data is larger than the buffer specified, the buffer - /// is filled with the first part of the data, and recv_from returns the error - /// WSAEMSGSIZE(10040). The excess data is lost. - /// Make sure to always use a sufficiently large buffer to hold the - /// maximum UDP packet size, which can be up to 65536 bytes in size. - /// - /// # Examples - /// - /// ```no_run - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use mio::net::UdpSocket; - /// - /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; - /// - /// // We must check if the socket is readable before calling recv_from, - /// // or we could run into a WouldBlock error. - /// - /// let mut buf = [0; 9]; - /// let (num_recv, from_addr) = socket.recv_from(&mut buf)?; - /// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr); - /// # - /// # Ok(()) - /// # } - /// ``` - pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - self.inner.do_io(|inner| inner.recv_from(buf)) - } - - /// Receives data from the socket, without removing it from the input queue. - /// On success, returns the number of bytes read and the address from whence - /// the data came. - /// - /// # Notes - /// - /// On Windows, if the data is larger than the buffer specified, the buffer - /// is filled with the first part of the data, and peek_from returns the error - /// WSAEMSGSIZE(10040). The excess data is lost. - /// Make sure to always use a sufficiently large buffer to hold the - /// maximum UDP packet size, which can be up to 65536 bytes in size. - /// - /// # Examples - /// - /// ```no_run - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use mio::net::UdpSocket; - /// - /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; - /// - /// // We must check if the socket is readable before calling recv_from, - /// // or we could run into a WouldBlock error. - /// - /// let mut buf = [0; 9]; - /// let (num_recv, from_addr) = socket.peek_from(&mut buf)?; - /// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr); - /// # - /// # Ok(()) - /// # } - /// ``` - pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - self.inner.do_io(|inner| inner.peek_from(buf)) - } - - /// Sends data on the socket to the address previously bound via connect(). On success, - /// returns the number of bytes written. - pub fn send(&self, buf: &[u8]) -> io::Result { - self.inner.do_io(|inner| inner.send(buf)) - } - - /// Receives data from the socket previously bound with connect(). On success, returns - /// the number of bytes read. - /// - /// # Notes - /// - /// On Windows, if the data is larger than the buffer specified, the buffer - /// is filled with the first part of the data, and recv returns the error - /// WSAEMSGSIZE(10040). The excess data is lost. - /// Make sure to always use a sufficiently large buffer to hold the - /// maximum UDP packet size, which can be up to 65536 bytes in size. - pub fn recv(&self, buf: &mut [u8]) -> io::Result { - self.inner.do_io(|inner| inner.recv(buf)) - } - - /// Receives data from the socket, without removing it from the input queue. - /// On success, returns the number of bytes read. - /// - /// # Notes - /// - /// On Windows, if the data is larger than the buffer specified, the buffer - /// is filled with the first part of the data, and peek returns the error - /// WSAEMSGSIZE(10040). The excess data is lost. - /// Make sure to always use a sufficiently large buffer to hold the - /// maximum UDP packet size, which can be up to 65536 bytes in size. - pub fn peek(&self, buf: &mut [u8]) -> io::Result { - self.inner.do_io(|inner| inner.peek(buf)) - } - - /// Connects the UDP socket setting the default destination for `send()` - /// and limiting packets that are read via `recv` from the address specified - /// in `addr`. - /// - /// This may return a `WouldBlock` in which case the socket connection - /// cannot be completed immediately, it usually means there are insufficient - /// entries in the routing cache. - pub fn connect(&self, addr: SocketAddr) -> io::Result<()> { - self.inner.connect(addr) - } - - /// Sets the value of the `SO_BROADCAST` option for this socket. - /// - /// When enabled, this socket is allowed to send packets to a broadcast - /// address. - /// - /// # Examples - /// - #[cfg_attr(feature = "os-poll", doc = "```")] - #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use mio::net::UdpSocket; - /// - /// let broadcast_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; - /// if broadcast_socket.broadcast()? == false { - /// broadcast_socket.set_broadcast(true)?; - /// } - /// - /// assert_eq!(broadcast_socket.broadcast()?, true); - /// # - /// # Ok(()) - /// # } - /// ``` - pub fn set_broadcast(&self, on: bool) -> io::Result<()> { - self.inner.set_broadcast(on) - } - - /// Gets the value of the `SO_BROADCAST` option for this socket. - /// - /// For more information about this option, see - /// [`set_broadcast`][link]. - /// - /// [link]: #method.set_broadcast - /// - /// # Examples - /// - #[cfg_attr(feature = "os-poll", doc = "```")] - #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use mio::net::UdpSocket; - /// - /// let broadcast_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; - /// assert_eq!(broadcast_socket.broadcast()?, false); - /// # - /// # Ok(()) - /// # } - /// ``` - pub fn broadcast(&self) -> io::Result { - self.inner.broadcast() - } - - /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket. - /// - /// If enabled, multicast packets will be looped back to the local socket. - /// Note that this may not have any affect on IPv6 sockets. - pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> { - self.inner.set_multicast_loop_v4(on) - } - - /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket. - /// - /// For more information about this option, see - /// [`set_multicast_loop_v4`][link]. - /// - /// [link]: #method.set_multicast_loop_v4 - pub fn multicast_loop_v4(&self) -> io::Result { - self.inner.multicast_loop_v4() - } - - /// Sets the value of the `IP_MULTICAST_TTL` option for this socket. - /// - /// Indicates the time-to-live value of outgoing multicast packets for - /// this socket. The default value is 1 which means that multicast packets - /// don't leave the local network unless explicitly requested. - /// - /// Note that this may not have any affect on IPv6 sockets. - pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> { - self.inner.set_multicast_ttl_v4(ttl) - } - - /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. - /// - /// For more information about this option, see - /// [`set_multicast_ttl_v4`][link]. - /// - /// [link]: #method.set_multicast_ttl_v4 - pub fn multicast_ttl_v4(&self) -> io::Result { - self.inner.multicast_ttl_v4() - } - - /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket. - /// - /// Controls whether this socket sees the multicast packets it sends itself. - /// Note that this may not have any affect on IPv4 sockets. - pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> { - self.inner.set_multicast_loop_v6(on) - } - - /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket. - /// - /// For more information about this option, see - /// [`set_multicast_loop_v6`][link]. - /// - /// [link]: #method.set_multicast_loop_v6 - pub fn multicast_loop_v6(&self) -> io::Result { - self.inner.multicast_loop_v6() - } - - /// Sets the value for the `IP_TTL` option on this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - /// - /// # Examples - /// - #[cfg_attr(feature = "os-poll", doc = "```")] - #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use mio::net::UdpSocket; - /// - /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; - /// if socket.ttl()? < 255 { - /// socket.set_ttl(255)?; - /// } - /// - /// assert_eq!(socket.ttl()?, 255); - /// # - /// # Ok(()) - /// # } - /// ``` - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - self.inner.set_ttl(ttl) - } - - /// Gets the value of the `IP_TTL` option for this socket. - /// - /// For more information about this option, see [`set_ttl`][link]. - /// - /// [link]: #method.set_ttl - /// - /// # Examples - /// - #[cfg_attr(feature = "os-poll", doc = "```")] - #[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use mio::net::UdpSocket; - /// - /// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?; - /// socket.set_ttl(255)?; - /// - /// assert_eq!(socket.ttl()?, 255); - /// # - /// # Ok(()) - /// # } - /// ``` - pub fn ttl(&self) -> io::Result { - self.inner.ttl() - } - - /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. - /// - /// This function specifies a new multicast group for this socket to join. - /// The address must be a valid multicast address, and `interface` is the - /// address of the local interface with which the system should join the - /// multicast group. If it's equal to `INADDR_ANY` then an appropriate - /// interface is chosen by the system. - #[allow(clippy::trivially_copy_pass_by_ref)] - pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { - self.inner.join_multicast_v4(multiaddr, interface) - } - - /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type. - /// - /// This function specifies a new multicast group for this socket to join. - /// The address must be a valid multicast address, and `interface` is the - /// index of the interface to join/leave (or 0 to indicate any interface). - #[allow(clippy::trivially_copy_pass_by_ref)] - pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - self.inner.join_multicast_v6(multiaddr, interface) - } - - /// Executes an operation of the `IP_DROP_MEMBERSHIP` type. - /// - /// For more information about this option, see - /// [`join_multicast_v4`][link]. - /// - /// [link]: #method.join_multicast_v4 - #[allow(clippy::trivially_copy_pass_by_ref)] - pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { - self.inner.leave_multicast_v4(multiaddr, interface) - } - - /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type. - /// - /// For more information about this option, see - /// [`join_multicast_v6`][link]. - /// - /// [link]: #method.join_multicast_v6 - #[allow(clippy::trivially_copy_pass_by_ref)] - pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - self.inner.leave_multicast_v6(multiaddr, interface) - } - - /// Get the value of the `IPV6_V6ONLY` option on this socket. - #[allow(clippy::trivially_copy_pass_by_ref)] - pub fn only_v6(&self) -> io::Result { - sys::udp::only_v6(&self.inner) - } - - /// Get the value of the `SO_ERROR` option on this socket. - /// - /// This will retrieve the stored error in the underlying socket, clearing - /// the field in the process. This can be useful for checking errors between - /// calls. - pub fn take_error(&self) -> io::Result> { - self.inner.take_error() - } - - /// Execute an I/O operation ensuring that the socket receives more events - /// if it hits a [`WouldBlock`] error. - /// - /// # Notes - /// - /// This method is required to be called for **all** I/O operations to - /// ensure the user will receive events once the socket is ready again after - /// returning a [`WouldBlock`] error. - /// - /// [`WouldBlock`]: io::ErrorKind::WouldBlock - /// - /// # Examples - /// - #[cfg_attr(unix, doc = "```no_run")] - #[cfg_attr(windows, doc = "```ignore")] - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use std::io; - /// #[cfg(unix)] - /// use std::os::unix::io::AsRawFd; - /// #[cfg(windows)] - /// use std::os::windows::io::AsRawSocket; - /// use mio::net::UdpSocket; - /// - /// let address = "127.0.0.1:8080".parse().unwrap(); - /// let dgram = UdpSocket::bind(address)?; - /// - /// // Wait until the dgram is readable... - /// - /// // Read from the dgram using a direct libc call, of course the - /// // `io::Read` implementation would be easier to use. - /// let mut buf = [0; 512]; - /// let n = dgram.try_io(|| { - /// let buf_ptr = &mut buf as *mut _ as *mut _; - /// #[cfg(unix)] - /// let res = unsafe { libc::recv(dgram.as_raw_fd(), buf_ptr, buf.len(), 0) }; - /// #[cfg(windows)] - /// let res = unsafe { libc::recvfrom(dgram.as_raw_socket() as usize, buf_ptr, buf.len() as i32, 0, std::ptr::null_mut(), std::ptr::null_mut()) }; - /// if res != -1 { - /// Ok(res as usize) - /// } else { - /// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure - /// // should return `WouldBlock` error. - /// Err(io::Error::last_os_error()) - /// } - /// })?; - /// eprintln!("read {} bytes", n); - /// # Ok(()) - /// # } - /// ``` - pub fn try_io(&self, f: F) -> io::Result - where - F: FnOnce() -> io::Result, - { - self.inner.do_io(|_| f()) - } -} - -impl event::Source for UdpSocket { - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.register(registry, token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.reregister(registry, token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - self.inner.deregister(registry) - } -} - -impl fmt::Debug for UdpSocket { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) - } -} - -#[cfg(unix)] -impl IntoRawFd for UdpSocket { - fn into_raw_fd(self) -> RawFd { - self.inner.into_inner().into_raw_fd() - } -} - -#[cfg(unix)] -impl AsRawFd for UdpSocket { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -#[cfg(unix)] -impl FromRawFd for UdpSocket { - /// Converts a `RawFd` to a `UdpSocket`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket { - UdpSocket::from_std(FromRawFd::from_raw_fd(fd)) - } -} - -#[cfg(windows)] -impl IntoRawSocket for UdpSocket { - fn into_raw_socket(self) -> RawSocket { - self.inner.into_inner().into_raw_socket() - } -} - -#[cfg(windows)] -impl AsRawSocket for UdpSocket { - fn as_raw_socket(&self) -> RawSocket { - self.inner.as_raw_socket() - } -} - -#[cfg(windows)] -impl FromRawSocket for UdpSocket { - /// Converts a `RawSocket` to a `UdpSocket`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_socket(socket: RawSocket) -> UdpSocket { - UdpSocket::from_std(FromRawSocket::from_raw_socket(socket)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/net/uds/datagram.rs s390-tools-2.33.1/rust-vendor/mio/src/net/uds/datagram.rs --- s390-tools-2.31.0/rust-vendor/mio/src/net/uds/datagram.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/net/uds/datagram.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,236 +0,0 @@ -use crate::io_source::IoSource; -use crate::{event, sys, Interest, Registry, Token}; - -use std::net::Shutdown; -use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; -use std::os::unix::net; -use std::path::Path; -use std::{fmt, io}; - -/// A Unix datagram socket. -pub struct UnixDatagram { - inner: IoSource, -} - -impl UnixDatagram { - /// Creates a Unix datagram socket bound to the given path. - pub fn bind>(path: P) -> io::Result { - sys::uds::datagram::bind(path.as_ref()).map(UnixDatagram::from_std) - } - - /// Creates a new `UnixDatagram` from a standard `net::UnixDatagram`. - /// - /// This function is intended to be used to wrap a Unix datagram from the - /// standard library in the Mio equivalent. The conversion assumes nothing - /// about the underlying datagram; it is left up to the user to set it in - /// non-blocking mode. - pub fn from_std(socket: net::UnixDatagram) -> UnixDatagram { - UnixDatagram { - inner: IoSource::new(socket), - } - } - - /// Connects the socket to the specified address. - /// - /// This may return a `WouldBlock` in which case the socket connection - /// cannot be completed immediately. - pub fn connect>(&self, path: P) -> io::Result<()> { - self.inner.connect(path) - } - - /// Creates a Unix Datagram socket which is not bound to any address. - pub fn unbound() -> io::Result { - sys::uds::datagram::unbound().map(UnixDatagram::from_std) - } - - /// Create an unnamed pair of connected sockets. - pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> { - sys::uds::datagram::pair().map(|(socket1, socket2)| { - ( - UnixDatagram::from_std(socket1), - UnixDatagram::from_std(socket2), - ) - }) - } - - /// Returns the address of this socket. - pub fn local_addr(&self) -> io::Result { - sys::uds::datagram::local_addr(&self.inner) - } - - /// Returns the address of this socket's peer. - /// - /// The `connect` method will connect the socket to a peer. - pub fn peer_addr(&self) -> io::Result { - sys::uds::datagram::peer_addr(&self.inner) - } - - /// Receives data from the socket. - /// - /// On success, returns the number of bytes read and the address from - /// whence the data came. - pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, sys::SocketAddr)> { - self.inner - .do_io(|inner| sys::uds::datagram::recv_from(inner, buf)) - } - - /// Receives data from the socket. - /// - /// On success, returns the number of bytes read. - pub fn recv(&self, buf: &mut [u8]) -> io::Result { - self.inner.do_io(|inner| inner.recv(buf)) - } - - /// Sends data on the socket to the specified address. - /// - /// On success, returns the number of bytes written. - pub fn send_to>(&self, buf: &[u8], path: P) -> io::Result { - self.inner.do_io(|inner| inner.send_to(buf, path)) - } - - /// Sends data on the socket to the socket's peer. - /// - /// The peer address may be set by the `connect` method, and this method - /// will return an error if the socket has not already been connected. - /// - /// On success, returns the number of bytes written. - pub fn send(&self, buf: &[u8]) -> io::Result { - self.inner.do_io(|inner| inner.send(buf)) - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.inner.take_error() - } - - /// Shut down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O calls on the - /// specified portions to immediately return with an appropriate value - /// (see the documentation of `Shutdown`). - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.inner.shutdown(how) - } - - /// Execute an I/O operation ensuring that the socket receives more events - /// if it hits a [`WouldBlock`] error. - /// - /// # Notes - /// - /// This method is required to be called for **all** I/O operations to - /// ensure the user will receive events once the socket is ready again after - /// returning a [`WouldBlock`] error. - /// - /// [`WouldBlock`]: io::ErrorKind::WouldBlock - /// - /// # Examples - /// - /// ``` - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use std::io; - /// use std::os::unix::io::AsRawFd; - /// use mio::net::UnixDatagram; - /// - /// let (dgram1, dgram2) = UnixDatagram::pair()?; - /// - /// // Wait until the dgram is writable... - /// - /// // Write to the dgram using a direct libc call, of course the - /// // `io::Write` implementation would be easier to use. - /// let buf = b"hello"; - /// let n = dgram1.try_io(|| { - /// let buf_ptr = &buf as *const _ as *const _; - /// let res = unsafe { libc::send(dgram1.as_raw_fd(), buf_ptr, buf.len(), 0) }; - /// if res != -1 { - /// Ok(res as usize) - /// } else { - /// // If EAGAIN or EWOULDBLOCK is set by libc::send, the closure - /// // should return `WouldBlock` error. - /// Err(io::Error::last_os_error()) - /// } - /// })?; - /// eprintln!("write {} bytes", n); - /// - /// // Wait until the dgram is readable... - /// - /// // Read from the dgram using a direct libc call, of course the - /// // `io::Read` implementation would be easier to use. - /// let mut buf = [0; 512]; - /// let n = dgram2.try_io(|| { - /// let buf_ptr = &mut buf as *mut _ as *mut _; - /// let res = unsafe { libc::recv(dgram2.as_raw_fd(), buf_ptr, buf.len(), 0) }; - /// if res != -1 { - /// Ok(res as usize) - /// } else { - /// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure - /// // should return `WouldBlock` error. - /// Err(io::Error::last_os_error()) - /// } - /// })?; - /// eprintln!("read {} bytes", n); - /// # Ok(()) - /// # } - /// ``` - pub fn try_io(&self, f: F) -> io::Result - where - F: FnOnce() -> io::Result, - { - self.inner.do_io(|_| f()) - } -} - -impl event::Source for UnixDatagram { - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.register(registry, token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.reregister(registry, token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - self.inner.deregister(registry) - } -} - -impl fmt::Debug for UnixDatagram { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) - } -} - -impl IntoRawFd for UnixDatagram { - fn into_raw_fd(self) -> RawFd { - self.inner.into_inner().into_raw_fd() - } -} - -impl AsRawFd for UnixDatagram { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -impl FromRawFd for UnixDatagram { - /// Converts a `RawFd` to a `UnixDatagram`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_fd(fd: RawFd) -> UnixDatagram { - UnixDatagram::from_std(FromRawFd::from_raw_fd(fd)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/net/uds/listener.rs s390-tools-2.33.1/rust-vendor/mio/src/net/uds/listener.rs --- s390-tools-2.31.0/rust-vendor/mio/src/net/uds/listener.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/net/uds/listener.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,104 +0,0 @@ -use crate::io_source::IoSource; -use crate::net::{SocketAddr, UnixStream}; -use crate::{event, sys, Interest, Registry, Token}; - -use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; -use std::os::unix::net; -use std::path::Path; -use std::{fmt, io}; - -/// A non-blocking Unix domain socket server. -pub struct UnixListener { - inner: IoSource, -} - -impl UnixListener { - /// Creates a new `UnixListener` bound to the specified socket. - pub fn bind>(path: P) -> io::Result { - sys::uds::listener::bind(path.as_ref()).map(UnixListener::from_std) - } - - /// Creates a new `UnixListener` from a standard `net::UnixListener`. - /// - /// This function is intended to be used to wrap a Unix listener from the - /// standard library in the Mio equivalent. The conversion assumes nothing - /// about the underlying listener; it is left up to the user to set it in - /// non-blocking mode. - pub fn from_std(listener: net::UnixListener) -> UnixListener { - UnixListener { - inner: IoSource::new(listener), - } - } - - /// Accepts a new incoming connection to this listener. - /// - /// The call is responsible for ensuring that the listening socket is in - /// non-blocking mode. - pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> { - sys::uds::listener::accept(&self.inner) - } - - /// Returns the local socket address of this listener. - pub fn local_addr(&self) -> io::Result { - sys::uds::listener::local_addr(&self.inner) - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.inner.take_error() - } -} - -impl event::Source for UnixListener { - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.register(registry, token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.reregister(registry, token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - self.inner.deregister(registry) - } -} - -impl fmt::Debug for UnixListener { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) - } -} - -impl IntoRawFd for UnixListener { - fn into_raw_fd(self) -> RawFd { - self.inner.into_inner().into_raw_fd() - } -} - -impl AsRawFd for UnixListener { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -impl FromRawFd for UnixListener { - /// Converts a `RawFd` to a `UnixListener`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_fd(fd: RawFd) -> UnixListener { - UnixListener::from_std(FromRawFd::from_raw_fd(fd)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/net/uds/mod.rs s390-tools-2.33.1/rust-vendor/mio/src/net/uds/mod.rs --- s390-tools-2.31.0/rust-vendor/mio/src/net/uds/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/net/uds/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,10 +0,0 @@ -mod datagram; -pub use self::datagram::UnixDatagram; - -mod listener; -pub use self::listener::UnixListener; - -mod stream; -pub use self::stream::UnixStream; - -pub use crate::sys::SocketAddr; diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/net/uds/stream.rs s390-tools-2.33.1/rust-vendor/mio/src/net/uds/stream.rs --- s390-tools-2.31.0/rust-vendor/mio/src/net/uds/stream.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/net/uds/stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,245 +0,0 @@ -use crate::io_source::IoSource; -use crate::{event, sys, Interest, Registry, Token}; - -use std::fmt; -use std::io::{self, IoSlice, IoSliceMut, Read, Write}; -use std::net::Shutdown; -use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; -use std::os::unix::net; -use std::path::Path; - -/// A non-blocking Unix stream socket. -pub struct UnixStream { - inner: IoSource, -} - -impl UnixStream { - /// Connects to the socket named by `path`. - /// - /// This may return a `WouldBlock` in which case the socket connection - /// cannot be completed immediately. Usually it means the backlog is full. - pub fn connect>(path: P) -> io::Result { - sys::uds::stream::connect(path.as_ref()).map(UnixStream::from_std) - } - - /// Creates a new `UnixStream` from a standard `net::UnixStream`. - /// - /// This function is intended to be used to wrap a Unix stream from the - /// standard library in the Mio equivalent. The conversion assumes nothing - /// about the underlying stream; it is left up to the user to set it in - /// non-blocking mode. - /// - /// # Note - /// - /// The Unix stream here will not have `connect` called on it, so it - /// should already be connected via some other means (be it manually, or - /// the standard library). - pub fn from_std(stream: net::UnixStream) -> UnixStream { - UnixStream { - inner: IoSource::new(stream), - } - } - - /// Creates an unnamed pair of connected sockets. - /// - /// Returns two `UnixStream`s which are connected to each other. - pub fn pair() -> io::Result<(UnixStream, UnixStream)> { - sys::uds::stream::pair().map(|(stream1, stream2)| { - (UnixStream::from_std(stream1), UnixStream::from_std(stream2)) - }) - } - - /// Returns the socket address of the local half of this connection. - pub fn local_addr(&self) -> io::Result { - sys::uds::stream::local_addr(&self.inner) - } - - /// Returns the socket address of the remote half of this connection. - pub fn peer_addr(&self) -> io::Result { - sys::uds::stream::peer_addr(&self.inner) - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.inner.take_error() - } - - /// Shuts down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O calls on the - /// specified portions to immediately return with an appropriate value - /// (see the documentation of `Shutdown`). - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.inner.shutdown(how) - } - - /// Execute an I/O operation ensuring that the socket receives more events - /// if it hits a [`WouldBlock`] error. - /// - /// # Notes - /// - /// This method is required to be called for **all** I/O operations to - /// ensure the user will receive events once the socket is ready again after - /// returning a [`WouldBlock`] error. - /// - /// [`WouldBlock`]: io::ErrorKind::WouldBlock - /// - /// # Examples - /// - /// ``` - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use std::io; - /// use std::os::unix::io::AsRawFd; - /// use mio::net::UnixStream; - /// - /// let (stream1, stream2) = UnixStream::pair()?; - /// - /// // Wait until the stream is writable... - /// - /// // Write to the stream using a direct libc call, of course the - /// // `io::Write` implementation would be easier to use. - /// let buf = b"hello"; - /// let n = stream1.try_io(|| { - /// let buf_ptr = &buf as *const _ as *const _; - /// let res = unsafe { libc::send(stream1.as_raw_fd(), buf_ptr, buf.len(), 0) }; - /// if res != -1 { - /// Ok(res as usize) - /// } else { - /// // If EAGAIN or EWOULDBLOCK is set by libc::send, the closure - /// // should return `WouldBlock` error. - /// Err(io::Error::last_os_error()) - /// } - /// })?; - /// eprintln!("write {} bytes", n); - /// - /// // Wait until the stream is readable... - /// - /// // Read from the stream using a direct libc call, of course the - /// // `io::Read` implementation would be easier to use. - /// let mut buf = [0; 512]; - /// let n = stream2.try_io(|| { - /// let buf_ptr = &mut buf as *mut _ as *mut _; - /// let res = unsafe { libc::recv(stream2.as_raw_fd(), buf_ptr, buf.len(), 0) }; - /// if res != -1 { - /// Ok(res as usize) - /// } else { - /// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure - /// // should return `WouldBlock` error. - /// Err(io::Error::last_os_error()) - /// } - /// })?; - /// eprintln!("read {} bytes", n); - /// # Ok(()) - /// # } - /// ``` - pub fn try_io(&self, f: F) -> io::Result - where - F: FnOnce() -> io::Result, - { - self.inner.do_io(|_| f()) - } -} - -impl Read for UnixStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.do_io(|mut inner| inner.read(buf)) - } - - fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result { - self.inner.do_io(|mut inner| inner.read_vectored(bufs)) - } -} - -impl<'a> Read for &'a UnixStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.do_io(|mut inner| inner.read(buf)) - } - - fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result { - self.inner.do_io(|mut inner| inner.read_vectored(bufs)) - } -} - -impl Write for UnixStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.do_io(|mut inner| inner.write(buf)) - } - - fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - self.inner.do_io(|mut inner| inner.write_vectored(bufs)) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.do_io(|mut inner| inner.flush()) - } -} - -impl<'a> Write for &'a UnixStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.do_io(|mut inner| inner.write(buf)) - } - - fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - self.inner.do_io(|mut inner| inner.write_vectored(bufs)) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.do_io(|mut inner| inner.flush()) - } -} - -impl event::Source for UnixStream { - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.register(registry, token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.reregister(registry, token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - self.inner.deregister(registry) - } -} - -impl fmt::Debug for UnixStream { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) - } -} - -impl IntoRawFd for UnixStream { - fn into_raw_fd(self) -> RawFd { - self.inner.into_inner().into_raw_fd() - } -} - -impl AsRawFd for UnixStream { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -impl FromRawFd for UnixStream { - /// Converts a `RawFd` to a `UnixStream`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_fd(fd: RawFd) -> UnixStream { - UnixStream::from_std(FromRawFd::from_raw_fd(fd)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/poll.rs s390-tools-2.33.1/rust-vendor/mio/src/poll.rs --- s390-tools-2.31.0/rust-vendor/mio/src/poll.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/poll.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,713 +0,0 @@ -use crate::{event, sys, Events, Interest, Token}; -#[cfg(unix)] -use std::os::unix::io::{AsRawFd, RawFd}; -use std::time::Duration; -use std::{fmt, io}; - -/// Polls for readiness events on all registered values. -/// -/// `Poll` allows a program to monitor a large number of [`event::Source`]s, -/// waiting until one or more become "ready" for some class of operations; e.g. -/// reading and writing. An event source is considered ready if it is possible -/// to immediately perform a corresponding operation; e.g. [`read`] or -/// [`write`]. -/// -/// To use `Poll`, an `event::Source` must first be registered with the `Poll` -/// instance using the [`register`] method on its associated `Register`, -/// supplying readiness interest. The readiness interest tells `Poll` which -/// specific operations on the handle to monitor for readiness. A `Token` is -/// also passed to the [`register`] function. When `Poll` returns a readiness -/// event, it will include this token. This associates the event with the -/// event source that generated the event. -/// -/// [`event::Source`]: ./event/trait.Source.html -/// [`read`]: ./net/struct.TcpStream.html#method.read -/// [`write`]: ./net/struct.TcpStream.html#method.write -/// [`register`]: struct.Registry.html#method.register -/// -/// # Examples -/// -/// A basic example -- establishing a `TcpStream` connection. -/// -#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] -#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] -/// # use std::error::Error; -/// # fn main() -> Result<(), Box> { -/// use mio::{Events, Poll, Interest, Token}; -/// use mio::net::TcpStream; -/// -/// use std::net::{self, SocketAddr}; -/// -/// // Bind a server socket to connect to. -/// let addr: SocketAddr = "127.0.0.1:0".parse()?; -/// let server = net::TcpListener::bind(addr)?; -/// -/// // Construct a new `Poll` handle as well as the `Events` we'll store into -/// let mut poll = Poll::new()?; -/// let mut events = Events::with_capacity(1024); -/// -/// // Connect the stream -/// let mut stream = TcpStream::connect(server.local_addr()?)?; -/// -/// // Register the stream with `Poll` -/// poll.registry().register(&mut stream, Token(0), Interest::READABLE | Interest::WRITABLE)?; -/// -/// // Wait for the socket to become ready. This has to happens in a loop to -/// // handle spurious wakeups. -/// loop { -/// poll.poll(&mut events, None)?; -/// -/// for event in &events { -/// if event.token() == Token(0) && event.is_writable() { -/// // The socket connected (probably, it could still be a spurious -/// // wakeup) -/// return Ok(()); -/// } -/// } -/// } -/// # } -/// ``` -/// -/// # Portability -/// -/// Using `Poll` provides a portable interface across supported platforms as -/// long as the caller takes the following into consideration: -/// -/// ### Spurious events -/// -/// [`Poll::poll`] may return readiness events even if the associated -/// event source is not actually ready. Given the same code, this may -/// happen more on some platforms than others. It is important to never assume -/// that, just because a readiness event was received, that the associated -/// operation will succeed as well. -/// -/// If operation fails with [`WouldBlock`], then the caller should not treat -/// this as an error, but instead should wait until another readiness event is -/// received. -/// -/// ### Draining readiness -/// -/// Once a readiness event is received, the corresponding operation must be -/// performed repeatedly until it returns [`WouldBlock`]. Unless this is done, -/// there is no guarantee that another readiness event will be delivered, even -/// if further data is received for the event source. -/// -/// [`WouldBlock`]: std::io::ErrorKind::WouldBlock -/// -/// ### Readiness operations -/// -/// The only readiness operations that are guaranteed to be present on all -/// supported platforms are [`readable`] and [`writable`]. All other readiness -/// operations may have false negatives and as such should be considered -/// **hints**. This means that if a socket is registered with [`readable`] -/// interest and either an error or close is received, a readiness event will -/// be generated for the socket, but it **may** only include `readable` -/// readiness. Also note that, given the potential for spurious events, -/// receiving a readiness event with `read_closed`, `write_closed`, or `error` -/// doesn't actually mean that a `read` on the socket will return a result -/// matching the readiness event. -/// -/// In other words, portable programs that explicitly check for [`read_closed`], -/// [`write_closed`], or [`error`] readiness should be doing so as an -/// **optimization** and always be able to handle an error or close situation -/// when performing the actual read operation. -/// -/// [`readable`]: ./event/struct.Event.html#method.is_readable -/// [`writable`]: ./event/struct.Event.html#method.is_writable -/// [`error`]: ./event/struct.Event.html#method.is_error -/// [`read_closed`]: ./event/struct.Event.html#method.is_read_closed -/// [`write_closed`]: ./event/struct.Event.html#method.is_write_closed -/// -/// ### Registering handles -/// -/// Unless otherwise noted, it should be assumed that types implementing -/// [`event::Source`] will never become ready unless they are registered with -/// `Poll`. -/// -/// For example: -/// -#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] -#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] -/// # use std::error::Error; -/// # use std::net; -/// # fn main() -> Result<(), Box> { -/// use mio::{Poll, Interest, Token}; -/// use mio::net::TcpStream; -/// use std::net::SocketAddr; -/// use std::time::Duration; -/// use std::thread; -/// -/// let address: SocketAddr = "127.0.0.1:0".parse()?; -/// let listener = net::TcpListener::bind(address)?; -/// let mut sock = TcpStream::connect(listener.local_addr()?)?; -/// -/// thread::sleep(Duration::from_secs(1)); -/// -/// let poll = Poll::new()?; -/// -/// // The connect is not guaranteed to have started until it is registered at -/// // this point -/// poll.registry().register(&mut sock, Token(0), Interest::READABLE | Interest::WRITABLE)?; -/// # Ok(()) -/// # } -/// ``` -/// -/// ### Dropping `Poll` -/// -/// When the `Poll` instance is dropped it may cancel in-flight operations for -/// the registered [event sources], meaning that no further events for them may -/// be received. It also means operations on the registered event sources may no -/// longer work. It is up to the user to keep the `Poll` instance alive while -/// registered event sources are being used. -/// -/// [event sources]: ./event/trait.Source.html -/// -/// ### Accessing raw fd/socket/handle -/// -/// Mio makes it possible for many types to be converted into a raw file -/// descriptor (fd, Unix), socket (Windows) or handle (Windows). This makes it -/// possible to support more operations on the type than Mio supports, for -/// example it makes [mio-aio] possible. However accessing the raw fd is not -/// without it's pitfalls. -/// -/// Specifically performing I/O operations outside of Mio on these types (via -/// the raw fd) has unspecified behaviour. It could cause no more events to be -/// generated for the type even though it returned `WouldBlock` (in an operation -/// directly accessing the fd). The behaviour is OS specific and Mio can only -/// guarantee cross-platform behaviour if it can control the I/O. -/// -/// [mio-aio]: https://github.com/asomers/mio-aio -/// -/// *The following is **not** guaranteed, just a description of the current -/// situation!* Mio is allowed to change the following without it being considered -/// a breaking change, don't depend on this, it's just here to inform the user. -/// Currently the kqueue and epoll implementation support direct I/O operations -/// on the fd without Mio's knowledge. Windows however needs **all** I/O -/// operations to go through Mio otherwise it is not able to update it's -/// internal state properly and won't generate events. -/// -/// ### Polling without registering event sources -/// -/// -/// *The following is **not** guaranteed, just a description of the current -/// situation!* Mio is allowed to change the following without it being -/// considered a breaking change, don't depend on this, it's just here to inform -/// the user. On platforms that use epoll, kqueue or IOCP (see implementation -/// notes below) polling without previously registering [event sources] will -/// result in sleeping forever, only a process signal will be able to wake up -/// the thread. -/// -/// On WASM/WASI this is different as it doesn't support process signals, -/// furthermore the WASI specification doesn't specify a behaviour in this -/// situation, thus it's up to the implementation what to do here. As an -/// example, the wasmtime runtime will return `EINVAL` in this situation, but -/// different runtimes may return different results. If you have further -/// insights or thoughts about this situation (and/or how Mio should handle it) -/// please add you comment to [pull request#1580]. -/// -/// [event sources]: crate::event::Source -/// [pull request#1580]: https://github.com/tokio-rs/mio/pull/1580 -/// -/// # Implementation notes -/// -/// `Poll` is backed by the selector provided by the operating system. -/// -/// | OS | Selector | -/// |---------------|-----------| -/// | Android | [epoll] | -/// | DragonFly BSD | [kqueue] | -/// | FreeBSD | [kqueue] | -/// | iOS | [kqueue] | -/// | illumos | [epoll] | -/// | Linux | [epoll] | -/// | NetBSD | [kqueue] | -/// | OpenBSD | [kqueue] | -/// | Windows | [IOCP] | -/// | macOS | [kqueue] | -/// -/// On all supported platforms, socket operations are handled by using the -/// system selector. Platform specific extensions (e.g. [`SourceFd`]) allow -/// accessing other features provided by individual system selectors. For -/// example, Linux's [`signalfd`] feature can be used by registering the FD with -/// `Poll` via [`SourceFd`]. -/// -/// On all platforms except windows, a call to [`Poll::poll`] is mostly just a -/// direct call to the system selector. However, [IOCP] uses a completion model -/// instead of a readiness model. In this case, `Poll` must adapt the completion -/// model Mio's API. While non-trivial, the bridge layer is still quite -/// efficient. The most expensive part being calls to `read` and `write` require -/// data to be copied into an intermediate buffer before it is passed to the -/// kernel. -/// -/// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html -/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 -/// [IOCP]: https://docs.microsoft.com/en-us/windows/win32/fileio/i-o-completion-ports -/// [`signalfd`]: https://man7.org/linux/man-pages/man2/signalfd.2.html -/// [`SourceFd`]: unix/struct.SourceFd.html -/// [`Poll::poll`]: struct.Poll.html#method.poll -pub struct Poll { - registry: Registry, -} - -/// Registers I/O resources. -pub struct Registry { - selector: sys::Selector, -} - -impl Poll { - cfg_os_poll! { - /// Return a new `Poll` handle. - /// - /// This function will make a syscall to the operating system to create - /// the system selector. If this syscall fails, `Poll::new` will return - /// with the error. - /// - /// close-on-exec flag is set on the file descriptors used by the selector to prevent - /// leaking it to executed processes. However, on some systems such as - /// old Linux systems that don't support `epoll_create1` syscall it is done - /// non-atomically, so a separate thread executing in parallel to this - /// function may accidentally leak the file descriptor if it executes a - /// new process before this function returns. - /// - /// See [struct] level docs for more details. - /// - /// [struct]: struct.Poll.html - /// - /// # Examples - /// - /// ``` - /// # use std::error::Error; - /// # fn main() -> Result<(), Box> { - /// use mio::{Poll, Events}; - /// use std::time::Duration; - /// - /// let mut poll = match Poll::new() { - /// Ok(poll) => poll, - /// Err(e) => panic!("failed to create Poll instance; err={:?}", e), - /// }; - /// - /// // Create a structure to receive polled events - /// let mut events = Events::with_capacity(1024); - /// - /// // Wait for events, but none will be received because no - /// // `event::Source`s have been registered with this `Poll` instance. - /// poll.poll(&mut events, Some(Duration::from_millis(500)))?; - /// assert!(events.is_empty()); - /// # Ok(()) - /// # } - /// ``` - pub fn new() -> io::Result { - sys::Selector::new().map(|selector| Poll { - registry: Registry { selector }, - }) - } - } - - /// Create a separate `Registry` which can be used to register - /// `event::Source`s. - pub fn registry(&self) -> &Registry { - &self.registry - } - - /// Wait for readiness events - /// - /// Blocks the current thread and waits for readiness events for any of the - /// [`event::Source`]s that have been registered with this `Poll` instance. - /// The function will block until either at least one readiness event has - /// been received or `timeout` has elapsed. A `timeout` of `None` means that - /// `poll` will block until a readiness event has been received. - /// - /// The supplied `events` will be cleared and newly received readiness events - /// will be pushed onto the end. At most `events.capacity()` events will be - /// returned. If there are further pending readiness events, they will be - /// returned on the next call to `poll`. - /// - /// A single call to `poll` may result in multiple readiness events being - /// returned for a single event source. For example, if a TCP socket becomes - /// both readable and writable, it may be possible for a single readiness - /// event to be returned with both [`readable`] and [`writable`] readiness - /// **OR** two separate events may be returned, one with [`readable`] set - /// and one with [`writable`] set. - /// - /// Note that the `timeout` will be rounded up to the system clock - /// granularity (usually 1ms), and kernel scheduling delays mean that - /// the blocking interval may be overrun by a small amount. - /// - /// See the [struct] level documentation for a higher level discussion of - /// polling. - /// - /// [`event::Source`]: ./event/trait.Source.html - /// [`readable`]: struct.Interest.html#associatedconstant.READABLE - /// [`writable`]: struct.Interest.html#associatedconstant.WRITABLE - /// [struct]: struct.Poll.html - /// [`iter`]: ./event/struct.Events.html#method.iter - /// - /// # Notes - /// - /// This returns any errors without attempting to retry, previous versions - /// of Mio would automatically retry the poll call if it was interrupted - /// (if `EINTR` was returned). - /// - /// Currently if the `timeout` elapses without any readiness events - /// triggering this will return `Ok(())`. However we're not guaranteeing - /// this behaviour as this depends on the OS. - /// - /// # Examples - /// - /// A basic example -- establishing a `TcpStream` connection. - /// - #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] - #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] - /// # use std::error::Error; - /// # fn main() -> Result<(), Box> { - /// use mio::{Events, Poll, Interest, Token}; - /// use mio::net::TcpStream; - /// - /// use std::net::{TcpListener, SocketAddr}; - /// use std::thread; - /// - /// // Bind a server socket to connect to. - /// let addr: SocketAddr = "127.0.0.1:0".parse()?; - /// let server = TcpListener::bind(addr)?; - /// let addr = server.local_addr()?.clone(); - /// - /// // Spawn a thread to accept the socket - /// thread::spawn(move || { - /// let _ = server.accept(); - /// }); - /// - /// // Construct a new `Poll` handle as well as the `Events` we'll store into - /// let mut poll = Poll::new()?; - /// let mut events = Events::with_capacity(1024); - /// - /// // Connect the stream - /// let mut stream = TcpStream::connect(addr)?; - /// - /// // Register the stream with `Poll` - /// poll.registry().register( - /// &mut stream, - /// Token(0), - /// Interest::READABLE | Interest::WRITABLE)?; - /// - /// // Wait for the socket to become ready. This has to happens in a loop to - /// // handle spurious wakeups. - /// loop { - /// poll.poll(&mut events, None)?; - /// - /// for event in &events { - /// if event.token() == Token(0) && event.is_writable() { - /// // The socket connected (probably, it could still be a spurious - /// // wakeup) - /// return Ok(()); - /// } - /// } - /// } - /// # } - /// ``` - /// - /// [struct]: # - pub fn poll(&mut self, events: &mut Events, timeout: Option) -> io::Result<()> { - self.registry.selector.select(events.sys(), timeout) - } -} - -#[cfg(unix)] -impl AsRawFd for Poll { - fn as_raw_fd(&self) -> RawFd { - self.registry.as_raw_fd() - } -} - -impl fmt::Debug for Poll { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Poll").finish() - } -} - -impl Registry { - /// Register an [`event::Source`] with the `Poll` instance. - /// - /// Once registered, the `Poll` instance will monitor the event source for - /// readiness state changes. When it notices a state change, it will return - /// a readiness event for the handle the next time [`poll`] is called. - /// - /// See [`Poll`] docs for a high level overview. - /// - /// # Arguments - /// - /// `source: &mut S: event::Source`: This is the source of events that the - /// `Poll` instance should monitor for readiness state changes. - /// - /// `token: Token`: The caller picks a token to associate with the socket. - /// When [`poll`] returns an event for the handle, this token is included. - /// This allows the caller to map the event to its source. The token - /// associated with the `event::Source` can be changed at any time by - /// calling [`reregister`]. - /// - /// See documentation on [`Token`] for an example showing how to pick - /// [`Token`] values. - /// - /// `interest: Interest`: Specifies which operations `Poll` should monitor - /// for readiness. `Poll` will only return readiness events for operations - /// specified by this argument. - /// - /// If a socket is registered with readable interest and the socket becomes - /// writable, no event will be returned from [`poll`]. - /// - /// The readiness interest for an `event::Source` can be changed at any time - /// by calling [`reregister`]. - /// - /// # Notes - /// - /// Callers must ensure that if a source being registered with a `Poll` - /// instance was previously registered with that `Poll` instance, then a - /// call to [`deregister`] has already occurred. Consecutive calls to - /// `register` is unspecified behavior. - /// - /// Unless otherwise specified, the caller should assume that once an event - /// source is registered with a `Poll` instance, it is bound to that `Poll` - /// instance for the lifetime of the event source. This remains true even - /// if the event source is deregistered from the poll instance using - /// [`deregister`]. - /// - /// [`event::Source`]: ./event/trait.Source.html - /// [`poll`]: struct.Poll.html#method.poll - /// [`reregister`]: struct.Registry.html#method.reregister - /// [`deregister`]: struct.Registry.html#method.deregister - /// [`Token`]: struct.Token.html - /// - /// # Examples - /// - #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] - #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] - /// # use std::error::Error; - /// # use std::net; - /// # fn main() -> Result<(), Box> { - /// use mio::{Events, Poll, Interest, Token}; - /// use mio::net::TcpStream; - /// use std::net::SocketAddr; - /// use std::time::{Duration, Instant}; - /// - /// let mut poll = Poll::new()?; - /// - /// let address: SocketAddr = "127.0.0.1:0".parse()?; - /// let listener = net::TcpListener::bind(address)?; - /// let mut socket = TcpStream::connect(listener.local_addr()?)?; - /// - /// // Register the socket with `poll` - /// poll.registry().register( - /// &mut socket, - /// Token(0), - /// Interest::READABLE | Interest::WRITABLE)?; - /// - /// let mut events = Events::with_capacity(1024); - /// let start = Instant::now(); - /// let timeout = Duration::from_millis(500); - /// - /// loop { - /// let elapsed = start.elapsed(); - /// - /// if elapsed >= timeout { - /// // Connection timed out - /// return Ok(()); - /// } - /// - /// let remaining = timeout - elapsed; - /// poll.poll(&mut events, Some(remaining))?; - /// - /// for event in &events { - /// if event.token() == Token(0) { - /// // Something (probably) happened on the socket. - /// return Ok(()); - /// } - /// } - /// } - /// # } - /// ``` - pub fn register(&self, source: &mut S, token: Token, interests: Interest) -> io::Result<()> - where - S: event::Source + ?Sized, - { - trace!( - "registering event source with poller: token={:?}, interests={:?}", - token, - interests - ); - source.register(self, token, interests) - } - - /// Re-register an [`event::Source`] with the `Poll` instance. - /// - /// Re-registering an event source allows changing the details of the - /// registration. Specifically, it allows updating the associated `token` - /// and `interests` specified in previous `register` and `reregister` calls. - /// - /// The `reregister` arguments fully override the previous values. In other - /// words, if a socket is registered with [`readable`] interest and the call - /// to `reregister` specifies [`writable`], then read interest is no longer - /// requested for the handle. - /// - /// The event source must have previously been registered with this instance - /// of `Poll`, otherwise the behavior is unspecified. - /// - /// See the [`register`] documentation for details about the function - /// arguments and see the [`struct`] docs for a high level overview of - /// polling. - /// - /// # Examples - /// - #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] - #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] - /// # use std::error::Error; - /// # use std::net; - /// # fn main() -> Result<(), Box> { - /// use mio::{Poll, Interest, Token}; - /// use mio::net::TcpStream; - /// use std::net::SocketAddr; - /// - /// let poll = Poll::new()?; - /// - /// let address: SocketAddr = "127.0.0.1:0".parse()?; - /// let listener = net::TcpListener::bind(address)?; - /// let mut socket = TcpStream::connect(listener.local_addr()?)?; - /// - /// // Register the socket with `poll`, requesting readable - /// poll.registry().register( - /// &mut socket, - /// Token(0), - /// Interest::READABLE)?; - /// - /// // Reregister the socket specifying write interest instead. Even though - /// // the token is the same it must be specified. - /// poll.registry().reregister( - /// &mut socket, - /// Token(0), - /// Interest::WRITABLE)?; - /// # Ok(()) - /// # } - /// ``` - /// - /// [`event::Source`]: ./event/trait.Source.html - /// [`struct`]: struct.Poll.html - /// [`register`]: struct.Registry.html#method.register - /// [`readable`]: ./event/struct.Event.html#is_readable - /// [`writable`]: ./event/struct.Event.html#is_writable - pub fn reregister(&self, source: &mut S, token: Token, interests: Interest) -> io::Result<()> - where - S: event::Source + ?Sized, - { - trace!( - "reregistering event source with poller: token={:?}, interests={:?}", - token, - interests - ); - source.reregister(self, token, interests) - } - - /// Deregister an [`event::Source`] with the `Poll` instance. - /// - /// When an event source is deregistered, the `Poll` instance will no longer - /// monitor it for readiness state changes. Deregistering clears up any - /// internal resources needed to track the handle. After an explicit call - /// to this method completes, it is guaranteed that the token previously - /// registered to this handle will not be returned by a future poll, so long - /// as a happens-before relationship is established between this call and - /// the poll. - /// - /// The event source must have previously been registered with this instance - /// of `Poll`, otherwise the behavior is unspecified. - /// - /// A handle can be passed back to `register` after it has been - /// deregistered; however, it must be passed back to the **same** `Poll` - /// instance, otherwise the behavior is unspecified. - /// - /// # Examples - /// - #[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] - #[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] - /// # use std::error::Error; - /// # use std::net; - /// # fn main() -> Result<(), Box> { - /// use mio::{Events, Poll, Interest, Token}; - /// use mio::net::TcpStream; - /// use std::net::SocketAddr; - /// use std::time::Duration; - /// - /// let mut poll = Poll::new()?; - /// - /// let address: SocketAddr = "127.0.0.1:0".parse()?; - /// let listener = net::TcpListener::bind(address)?; - /// let mut socket = TcpStream::connect(listener.local_addr()?)?; - /// - /// // Register the socket with `poll` - /// poll.registry().register( - /// &mut socket, - /// Token(0), - /// Interest::READABLE)?; - /// - /// poll.registry().deregister(&mut socket)?; - /// - /// let mut events = Events::with_capacity(1024); - /// - /// // Set a timeout because this poll should never receive any events. - /// poll.poll(&mut events, Some(Duration::from_secs(1)))?; - /// assert!(events.is_empty()); - /// # Ok(()) - /// # } - /// ``` - pub fn deregister(&self, source: &mut S) -> io::Result<()> - where - S: event::Source + ?Sized, - { - trace!("deregistering event source from poller"); - source.deregister(self) - } - - /// Creates a new independently owned `Registry`. - /// - /// Event sources registered with this `Registry` will be registered with - /// the original `Registry` and `Poll` instance. - pub fn try_clone(&self) -> io::Result { - self.selector - .try_clone() - .map(|selector| Registry { selector }) - } - - /// Internal check to ensure only a single `Waker` is active per [`Poll`] - /// instance. - #[cfg(all(debug_assertions, not(target_os = "wasi")))] - pub(crate) fn register_waker(&self) { - assert!( - !self.selector.register_waker(), - "Only a single `Waker` can be active per `Poll` instance" - ); - } - - /// Get access to the `sys::Selector`. - #[cfg(any(not(target_os = "wasi"), feature = "net"))] - pub(crate) fn selector(&self) -> &sys::Selector { - &self.selector - } -} - -impl fmt::Debug for Registry { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Registry").finish() - } -} - -#[cfg(unix)] -impl AsRawFd for Registry { - fn as_raw_fd(&self) -> RawFd { - self.selector.as_raw_fd() - } -} - -cfg_os_poll! { - #[cfg(unix)] - #[test] - pub fn as_raw_fd() { - let poll = Poll::new().unwrap(); - assert!(poll.as_raw_fd() > 0); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/mod.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/mod.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,86 +0,0 @@ -//! Module with system specific types. -//! -//! Required types: -//! -//! * `Event`: a type alias for the system specific event, e.g. `kevent` or -//! `epoll_event`. -//! * `event`: a module with various helper functions for `Event`, see -//! [`crate::event::Event`] for the required functions. -//! * `Events`: collection of `Event`s, see [`crate::Events`]. -//! * `IoSourceState`: state for the `IoSource` type. -//! * `Selector`: selector used to register event sources and poll for events, -//! see [`crate::Poll`] and [`crate::Registry`] for required -//! methods. -//! * `tcp` and `udp` modules: see the [`crate::net`] module. -//! * `Waker`: see [`crate::Waker`]. - -cfg_os_poll! { - macro_rules! debug_detail { - ( - $type: ident ($event_type: ty), $test: path, - $($(#[$target: meta])* $libc: ident :: $flag: ident),+ $(,)* - ) => { - struct $type($event_type); - - impl fmt::Debug for $type { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut written_one = false; - $( - $(#[$target])* - #[allow(clippy::bad_bit_mask)] // Apparently some flags are zero. - { - // Windows doesn't use `libc` but the `afd` module. - if $test(&self.0, &$libc :: $flag) { - if !written_one { - write!(f, "{}", stringify!($flag))?; - written_one = true; - } else { - write!(f, "|{}", stringify!($flag))?; - } - } - } - )+ - if !written_one { - write!(f, "(empty)") - } else { - Ok(()) - } - } - } - }; - } -} - -#[cfg(unix)] -cfg_os_poll! { - mod unix; - pub use self::unix::*; -} - -#[cfg(windows)] -cfg_os_poll! { - mod windows; - pub use self::windows::*; -} - -#[cfg(target_os = "wasi")] -cfg_os_poll! { - mod wasi; - pub(crate) use self::wasi::*; -} - -cfg_not_os_poll! { - mod shell; - pub(crate) use self::shell::*; - - #[cfg(unix)] - cfg_any_os_ext! { - mod unix; - pub use self::unix::SourceFd; - } - - #[cfg(unix)] - cfg_net! { - pub use self::unix::SocketAddr; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/mod.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/mod.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,72 +0,0 @@ -macro_rules! os_required { - () => { - panic!("mio must be compiled with `os-poll` to run.") - }; -} - -mod selector; -pub(crate) use self::selector::{event, Event, Events, Selector}; - -#[cfg(not(target_os = "wasi"))] -mod waker; -#[cfg(not(target_os = "wasi"))] -pub(crate) use self::waker::Waker; - -cfg_net! { - pub(crate) mod tcp; - pub(crate) mod udp; - #[cfg(unix)] - pub(crate) mod uds; -} - -cfg_io_source! { - use std::io; - #[cfg(windows)] - use std::os::windows::io::RawSocket; - - #[cfg(windows)] - use crate::{Registry, Token, Interest}; - - pub(crate) struct IoSourceState; - - impl IoSourceState { - pub fn new() -> IoSourceState { - IoSourceState - } - - pub fn do_io(&self, f: F, io: &T) -> io::Result - where - F: FnOnce(&T) -> io::Result, - { - // We don't hold state, so we can just call the function and - // return. - f(io) - } - } - - #[cfg(windows)] - impl IoSourceState { - pub fn register( - &mut self, - _: &Registry, - _: Token, - _: Interest, - _: RawSocket, - ) -> io::Result<()> { - os_required!() - } - - pub fn reregister( - &mut self, - _: &Registry, - _: Token, - _: Interest, - ) -> io::Result<()> { - os_required!() - } - - pub fn deregister(&mut self) -> io::Result<()> { - os_required!() - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/selector.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/selector.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/selector.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/selector.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,127 +0,0 @@ -use std::io; -#[cfg(unix)] -use std::os::unix::io::{AsRawFd, RawFd}; -use std::time::Duration; - -pub type Event = usize; - -pub type Events = Vec; - -#[derive(Debug)] -pub struct Selector {} - -impl Selector { - pub fn try_clone(&self) -> io::Result { - os_required!(); - } - - pub fn select(&self, _: &mut Events, _: Option) -> io::Result<()> { - os_required!(); - } - - #[cfg(all(debug_assertions, not(target_os = "wasi")))] - pub fn register_waker(&self) -> bool { - os_required!(); - } -} - -#[cfg(unix)] -cfg_any_os_ext! { - use crate::{Interest, Token}; - - impl Selector { - pub fn register(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> { - os_required!(); - } - - pub fn reregister(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> { - os_required!(); - } - - pub fn deregister(&self, _: RawFd) -> io::Result<()> { - os_required!(); - } - } -} - -#[cfg(target_os = "wasi")] -cfg_any_os_ext! { - use crate::{Interest, Token}; - - impl Selector { - pub fn register(&self, _: wasi::Fd, _: Token, _: Interest) -> io::Result<()> { - os_required!(); - } - - pub fn reregister(&self, _: wasi::Fd, _: Token, _: Interest) -> io::Result<()> { - os_required!(); - } - - pub fn deregister(&self, _: wasi::Fd) -> io::Result<()> { - os_required!(); - } - } -} - -cfg_io_source! { - #[cfg(debug_assertions)] - impl Selector { - pub fn id(&self) -> usize { - os_required!(); - } - } -} - -#[cfg(unix)] -impl AsRawFd for Selector { - fn as_raw_fd(&self) -> RawFd { - os_required!() - } -} - -#[allow(clippy::trivially_copy_pass_by_ref)] -pub mod event { - use crate::sys::Event; - use crate::Token; - use std::fmt; - - pub fn token(_: &Event) -> Token { - os_required!(); - } - - pub fn is_readable(_: &Event) -> bool { - os_required!(); - } - - pub fn is_writable(_: &Event) -> bool { - os_required!(); - } - - pub fn is_error(_: &Event) -> bool { - os_required!(); - } - - pub fn is_read_closed(_: &Event) -> bool { - os_required!(); - } - - pub fn is_write_closed(_: &Event) -> bool { - os_required!(); - } - - pub fn is_priority(_: &Event) -> bool { - os_required!(); - } - - pub fn is_aio(_: &Event) -> bool { - os_required!(); - } - - pub fn is_lio(_: &Event) -> bool { - os_required!(); - } - - pub fn debug_details(_: &mut fmt::Formatter<'_>, _: &Event) -> fmt::Result { - os_required!(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/tcp.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/tcp.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/tcp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/tcp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -use std::io; -use std::net::{self, SocketAddr}; - -#[cfg(not(target_os = "wasi"))] -pub(crate) fn new_for_addr(_: SocketAddr) -> io::Result { - os_required!(); -} - -#[cfg(not(target_os = "wasi"))] -pub(crate) fn bind(_: &net::TcpListener, _: SocketAddr) -> io::Result<()> { - os_required!(); -} - -#[cfg(not(target_os = "wasi"))] -pub(crate) fn connect(_: &net::TcpStream, _: SocketAddr) -> io::Result<()> { - os_required!(); -} - -#[cfg(not(target_os = "wasi"))] -pub(crate) fn listen(_: &net::TcpListener, _: u32) -> io::Result<()> { - os_required!(); -} - -#[cfg(unix)] -pub(crate) fn set_reuseaddr(_: &net::TcpListener, _: bool) -> io::Result<()> { - os_required!(); -} - -pub(crate) fn accept(_: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> { - os_required!(); -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/udp.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/udp.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/udp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/udp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -#![cfg(not(target_os = "wasi"))] -use std::io; -use std::net::{self, SocketAddr}; - -pub fn bind(_: SocketAddr) -> io::Result { - os_required!() -} - -pub(crate) fn only_v6(_: &net::UdpSocket) -> io::Result { - os_required!() -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/uds.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/uds.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/uds.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/uds.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -pub(crate) mod datagram { - use crate::net::SocketAddr; - use std::io; - use std::os::unix::net; - use std::path::Path; - - pub(crate) fn bind(_: &Path) -> io::Result { - os_required!() - } - - pub(crate) fn unbound() -> io::Result { - os_required!() - } - - pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> { - os_required!() - } - - pub(crate) fn local_addr(_: &net::UnixDatagram) -> io::Result { - os_required!() - } - - pub(crate) fn peer_addr(_: &net::UnixDatagram) -> io::Result { - os_required!() - } - - pub(crate) fn recv_from( - _: &net::UnixDatagram, - _: &mut [u8], - ) -> io::Result<(usize, SocketAddr)> { - os_required!() - } -} - -pub(crate) mod listener { - use crate::net::{SocketAddr, UnixStream}; - use std::io; - use std::os::unix::net; - use std::path::Path; - - pub(crate) fn bind(_: &Path) -> io::Result { - os_required!() - } - - pub(crate) fn accept(_: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> { - os_required!() - } - - pub(crate) fn local_addr(_: &net::UnixListener) -> io::Result { - os_required!() - } -} - -pub(crate) mod stream { - use crate::net::SocketAddr; - use std::io; - use std::os::unix::net; - use std::path::Path; - - pub(crate) fn connect(_: &Path) -> io::Result { - os_required!() - } - - pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> { - os_required!() - } - - pub(crate) fn local_addr(_: &net::UnixStream) -> io::Result { - os_required!() - } - - pub(crate) fn peer_addr(_: &net::UnixStream) -> io::Result { - os_required!() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/waker.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/waker.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/shell/waker.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/shell/waker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,16 +0,0 @@ -use crate::sys::Selector; -use crate::Token; -use std::io; - -#[derive(Debug)] -pub struct Waker {} - -impl Waker { - pub fn new(_: &Selector, _: Token) -> io::Result { - os_required!(); - } - - pub fn wake(&self) -> io::Result<()> { - os_required!(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/mod.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/mod.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,72 +0,0 @@ -/// Helper macro to execute a system call that returns an `io::Result`. -// -// Macro must be defined before any modules that uses them. -#[allow(unused_macros)] -macro_rules! syscall { - ($fn: ident ( $($arg: expr),* $(,)* ) ) => {{ - let res = unsafe { libc::$fn($($arg, )*) }; - if res == -1 { - Err(std::io::Error::last_os_error()) - } else { - Ok(res) - } - }}; -} - -cfg_os_poll! { - mod selector; - pub(crate) use self::selector::{event, Event, Events, Selector}; - - mod sourcefd; - pub use self::sourcefd::SourceFd; - - mod waker; - pub(crate) use self::waker::Waker; - - cfg_net! { - mod net; - - pub(crate) mod tcp; - pub(crate) mod udp; - pub(crate) mod uds; - pub use self::uds::SocketAddr; - } - - cfg_io_source! { - use std::io; - - // Both `kqueue` and `epoll` don't need to hold any user space state. - pub(crate) struct IoSourceState; - - impl IoSourceState { - pub fn new() -> IoSourceState { - IoSourceState - } - - pub fn do_io(&self, f: F, io: &T) -> io::Result - where - F: FnOnce(&T) -> io::Result, - { - // We don't hold state, so we can just call the function and - // return. - f(io) - } - } - } - - cfg_os_ext! { - pub(crate) mod pipe; - } -} - -cfg_not_os_poll! { - cfg_net! { - mod uds; - pub use self::uds::SocketAddr; - } - - cfg_any_os_ext! { - mod sourcefd; - pub use self::sourcefd::SourceFd; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/net.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/net.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/net.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/net.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,178 +0,0 @@ -use std::io; -use std::mem::size_of; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; - -pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: libc::c_int) -> io::Result { - let domain = match addr { - SocketAddr::V4(..) => libc::AF_INET, - SocketAddr::V6(..) => libc::AF_INET6, - }; - - new_socket(domain, socket_type) -} - -/// Create a new non-blocking socket. -pub(crate) fn new_socket(domain: libc::c_int, socket_type: libc::c_int) -> io::Result { - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd", - ))] - let socket_type = socket_type | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC; - - let socket = syscall!(socket(domain, socket_type, 0))?; - - // Mimick `libstd` and set `SO_NOSIGPIPE` on apple systems. - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - if let Err(err) = syscall!(setsockopt( - socket, - libc::SOL_SOCKET, - libc::SO_NOSIGPIPE, - &1 as *const libc::c_int as *const libc::c_void, - size_of::() as libc::socklen_t - )) { - let _ = syscall!(close(socket)); - return Err(err); - } - - // Darwin doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC. - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - { - if let Err(err) = syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK)) { - let _ = syscall!(close(socket)); - return Err(err); - } - if let Err(err) = syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC)) { - let _ = syscall!(close(socket)); - return Err(err); - } - } - - Ok(socket) -} - -/// A type with the same memory layout as `libc::sockaddr`. Used in converting Rust level -/// SocketAddr* types into their system representation. The benefit of this specific -/// type over using `libc::sockaddr_storage` is that this type is exactly as large as it -/// needs to be and not a lot larger. And it can be initialized cleaner from Rust. -#[repr(C)] -pub(crate) union SocketAddrCRepr { - v4: libc::sockaddr_in, - v6: libc::sockaddr_in6, -} - -impl SocketAddrCRepr { - pub(crate) fn as_ptr(&self) -> *const libc::sockaddr { - self as *const _ as *const libc::sockaddr - } -} - -/// Converts a Rust `SocketAddr` into the system representation. -pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, libc::socklen_t) { - match addr { - SocketAddr::V4(ref addr) => { - // `s_addr` is stored as BE on all machine and the array is in BE order. - // So the native endian conversion method is used so that it's never swapped. - let sin_addr = libc::in_addr { - s_addr: u32::from_ne_bytes(addr.ip().octets()), - }; - - let sockaddr_in = libc::sockaddr_in { - sin_family: libc::AF_INET as libc::sa_family_t, - sin_port: addr.port().to_be(), - sin_addr, - sin_zero: [0; 8], - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "tvos", - target_os = "watchos", - ))] - sin_len: 0, - }; - - let sockaddr = SocketAddrCRepr { v4: sockaddr_in }; - let socklen = size_of::() as libc::socklen_t; - (sockaddr, socklen) - } - SocketAddr::V6(ref addr) => { - let sockaddr_in6 = libc::sockaddr_in6 { - sin6_family: libc::AF_INET6 as libc::sa_family_t, - sin6_port: addr.port().to_be(), - sin6_addr: libc::in6_addr { - s6_addr: addr.ip().octets(), - }, - sin6_flowinfo: addr.flowinfo(), - sin6_scope_id: addr.scope_id(), - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "tvos", - target_os = "watchos", - ))] - sin6_len: 0, - #[cfg(target_os = "illumos")] - __sin6_src_id: 0, - }; - - let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 }; - let socklen = size_of::() as libc::socklen_t; - (sockaddr, socklen) - } - } -} - -/// Converts a `libc::sockaddr` compatible struct into a native Rust `SocketAddr`. -/// -/// # Safety -/// -/// `storage` must have the `ss_family` field correctly initialized. -/// `storage` must be initialised to a `sockaddr_in` or `sockaddr_in6`. -pub(crate) unsafe fn to_socket_addr( - storage: *const libc::sockaddr_storage, -) -> io::Result { - match (*storage).ss_family as libc::c_int { - libc::AF_INET => { - // Safety: if the ss_family field is AF_INET then storage must be a sockaddr_in. - let addr: &libc::sockaddr_in = &*(storage as *const libc::sockaddr_in); - let ip = Ipv4Addr::from(addr.sin_addr.s_addr.to_ne_bytes()); - let port = u16::from_be(addr.sin_port); - Ok(SocketAddr::V4(SocketAddrV4::new(ip, port))) - } - libc::AF_INET6 => { - // Safety: if the ss_family field is AF_INET6 then storage must be a sockaddr_in6. - let addr: &libc::sockaddr_in6 = &*(storage as *const libc::sockaddr_in6); - let ip = Ipv6Addr::from(addr.sin6_addr.s6_addr); - let port = u16::from_be(addr.sin6_port); - Ok(SocketAddr::V6(SocketAddrV6::new( - ip, - port, - addr.sin6_flowinfo, - addr.sin6_scope_id, - ))) - } - _ => Err(io::ErrorKind::InvalidInput.into()), - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/pipe.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/pipe.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/pipe.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/pipe.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,577 +0,0 @@ -//! Unix pipe. -//! -//! See the [`new`] function for documentation. - -use std::fs::File; -use std::io::{self, IoSlice, IoSliceMut, Read, Write}; -use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; -use std::process::{ChildStderr, ChildStdin, ChildStdout}; - -use crate::io_source::IoSource; -use crate::{event, Interest, Registry, Token}; - -/// Create a new non-blocking Unix pipe. -/// -/// This is a wrapper around Unix's [`pipe(2)`] system call and can be used as -/// inter-process or thread communication channel. -/// -/// This channel may be created before forking the process and then one end used -/// in each process, e.g. the parent process has the sending end to send command -/// to the child process. -/// -/// [`pipe(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/pipe.html -/// -/// # Events -/// -/// The [`Sender`] can be registered with [`WRITABLE`] interest to receive -/// [writable events], the [`Receiver`] with [`READABLE`] interest. Once data is -/// written to the `Sender` the `Receiver` will receive an [readable event]. -/// -/// In addition to those events, events will also be generated if the other side -/// is dropped. To check if the `Sender` is dropped you'll need to check -/// [`is_read_closed`] on events for the `Receiver`, if it returns true the -/// `Sender` is dropped. On the `Sender` end check [`is_write_closed`], if it -/// returns true the `Receiver` was dropped. Also see the second example below. -/// -/// [`WRITABLE`]: Interest::WRITABLE -/// [writable events]: event::Event::is_writable -/// [`READABLE`]: Interest::READABLE -/// [readable event]: event::Event::is_readable -/// [`is_read_closed`]: event::Event::is_read_closed -/// [`is_write_closed`]: event::Event::is_write_closed -/// -/// # Deregistering -/// -/// Both `Sender` and `Receiver` will deregister themselves when dropped, -/// **iff** the file descriptors are not duplicated (via [`dup(2)`]). -/// -/// [`dup(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/dup.html -/// -/// # Examples -/// -/// Simple example that writes data into the sending end and read it from the -/// receiving end. -/// -/// ``` -/// use std::io::{self, Read, Write}; -/// -/// use mio::{Poll, Events, Interest, Token}; -/// use mio::unix::pipe; -/// -/// // Unique tokens for the two ends of the channel. -/// const PIPE_RECV: Token = Token(0); -/// const PIPE_SEND: Token = Token(1); -/// -/// # fn main() -> io::Result<()> { -/// // Create our `Poll` instance and the `Events` container. -/// let mut poll = Poll::new()?; -/// let mut events = Events::with_capacity(8); -/// -/// // Create a new pipe. -/// let (mut sender, mut receiver) = pipe::new()?; -/// -/// // Register both ends of the channel. -/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?; -/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?; -/// -/// const MSG: &[u8; 11] = b"Hello world"; -/// -/// loop { -/// poll.poll(&mut events, None)?; -/// -/// for event in events.iter() { -/// match event.token() { -/// PIPE_SEND => sender.write(MSG) -/// .and_then(|n| if n != MSG.len() { -/// // We'll consider a short write an error in this -/// // example. NOTE: we can't use `write_all` with -/// // non-blocking I/O. -/// Err(io::ErrorKind::WriteZero.into()) -/// } else { -/// Ok(()) -/// })?, -/// PIPE_RECV => { -/// let mut buf = [0; 11]; -/// let n = receiver.read(&mut buf)?; -/// println!("received: {:?}", &buf[0..n]); -/// assert_eq!(n, MSG.len()); -/// assert_eq!(&buf, &*MSG); -/// return Ok(()); -/// }, -/// _ => unreachable!(), -/// } -/// } -/// } -/// # } -/// ``` -/// -/// Example that receives an event once the `Sender` is dropped. -/// -/// ``` -/// # use std::io; -/// # -/// # use mio::{Poll, Events, Interest, Token}; -/// # use mio::unix::pipe; -/// # -/// # const PIPE_RECV: Token = Token(0); -/// # const PIPE_SEND: Token = Token(1); -/// # -/// # fn main() -> io::Result<()> { -/// // Same setup as in the example above. -/// let mut poll = Poll::new()?; -/// let mut events = Events::with_capacity(8); -/// -/// let (mut sender, mut receiver) = pipe::new()?; -/// -/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?; -/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?; -/// -/// // Drop the sender. -/// drop(sender); -/// -/// poll.poll(&mut events, None)?; -/// -/// for event in events.iter() { -/// match event.token() { -/// PIPE_RECV if event.is_read_closed() => { -/// // Detected that the sender was dropped. -/// println!("Sender dropped!"); -/// return Ok(()); -/// }, -/// _ => unreachable!(), -/// } -/// } -/// # unreachable!(); -/// # } -/// ``` -pub fn new() -> io::Result<(Sender, Receiver)> { - let mut fds: [RawFd; 2] = [-1, -1]; - - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd", - target_os = "illumos", - target_os = "redox", - ))] - unsafe { - if libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC | libc::O_NONBLOCK) != 0 { - return Err(io::Error::last_os_error()); - } - } - - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - unsafe { - // For platforms that don't have `pipe2(2)` we need to manually set the - // correct flags on the file descriptor. - if libc::pipe(fds.as_mut_ptr()) != 0 { - return Err(io::Error::last_os_error()); - } - - for fd in &fds { - if libc::fcntl(*fd, libc::F_SETFL, libc::O_NONBLOCK) != 0 - || libc::fcntl(*fd, libc::F_SETFD, libc::FD_CLOEXEC) != 0 - { - let err = io::Error::last_os_error(); - // Don't leak file descriptors. Can't handle closing error though. - let _ = libc::close(fds[0]); - let _ = libc::close(fds[1]); - return Err(err); - } - } - } - - #[cfg(not(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "illumos", - target_os = "ios", - target_os = "linux", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "tvos", - target_os = "watchos", - )))] - compile_error!("unsupported target for `mio::unix::pipe`"); - - // SAFETY: we just initialised the `fds` above. - let r = unsafe { Receiver::from_raw_fd(fds[0]) }; - let w = unsafe { Sender::from_raw_fd(fds[1]) }; - - Ok((w, r)) -} - -/// Sending end of an Unix pipe. -/// -/// See [`new`] for documentation, including examples. -#[derive(Debug)] -pub struct Sender { - inner: IoSource, -} - -impl Sender { - /// Set the `Sender` into or out of non-blocking mode. - pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { - set_nonblocking(self.inner.as_raw_fd(), nonblocking) - } - - /// Execute an I/O operation ensuring that the socket receives more events - /// if it hits a [`WouldBlock`] error. - /// - /// # Notes - /// - /// This method is required to be called for **all** I/O operations to - /// ensure the user will receive events once the socket is ready again after - /// returning a [`WouldBlock`] error. - /// - /// [`WouldBlock`]: io::ErrorKind::WouldBlock - /// - /// # Examples - /// - /// ``` - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use std::io; - /// use std::os::unix::io::AsRawFd; - /// use mio::unix::pipe; - /// - /// let (sender, receiver) = pipe::new()?; - /// - /// // Wait until the sender is writable... - /// - /// // Write to the sender using a direct libc call, of course the - /// // `io::Write` implementation would be easier to use. - /// let buf = b"hello"; - /// let n = sender.try_io(|| { - /// let buf_ptr = &buf as *const _ as *const _; - /// let res = unsafe { libc::write(sender.as_raw_fd(), buf_ptr, buf.len()) }; - /// if res != -1 { - /// Ok(res as usize) - /// } else { - /// // If EAGAIN or EWOULDBLOCK is set by libc::write, the closure - /// // should return `WouldBlock` error. - /// Err(io::Error::last_os_error()) - /// } - /// })?; - /// eprintln!("write {} bytes", n); - /// - /// // Wait until the receiver is readable... - /// - /// // Read from the receiver using a direct libc call, of course the - /// // `io::Read` implementation would be easier to use. - /// let mut buf = [0; 512]; - /// let n = receiver.try_io(|| { - /// let buf_ptr = &mut buf as *mut _ as *mut _; - /// let res = unsafe { libc::read(receiver.as_raw_fd(), buf_ptr, buf.len()) }; - /// if res != -1 { - /// Ok(res as usize) - /// } else { - /// // If EAGAIN or EWOULDBLOCK is set by libc::read, the closure - /// // should return `WouldBlock` error. - /// Err(io::Error::last_os_error()) - /// } - /// })?; - /// eprintln!("read {} bytes", n); - /// # Ok(()) - /// # } - /// ``` - pub fn try_io(&self, f: F) -> io::Result - where - F: FnOnce() -> io::Result, - { - self.inner.do_io(|_| f()) - } -} - -impl event::Source for Sender { - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.register(registry, token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.reregister(registry, token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - self.inner.deregister(registry) - } -} - -impl Write for Sender { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.do_io(|mut sender| sender.write(buf)) - } - - fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - self.inner.do_io(|mut sender| sender.write_vectored(bufs)) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.do_io(|mut sender| sender.flush()) - } -} - -impl Write for &Sender { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.do_io(|mut sender| sender.write(buf)) - } - - fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - self.inner.do_io(|mut sender| sender.write_vectored(bufs)) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.do_io(|mut sender| sender.flush()) - } -} - -/// # Notes -/// -/// The underlying pipe is **not** set to non-blocking. -impl From for Sender { - fn from(stdin: ChildStdin) -> Sender { - // Safety: `ChildStdin` is guaranteed to be a valid file descriptor. - unsafe { Sender::from_raw_fd(stdin.into_raw_fd()) } - } -} - -impl FromRawFd for Sender { - unsafe fn from_raw_fd(fd: RawFd) -> Sender { - Sender { - inner: IoSource::new(File::from_raw_fd(fd)), - } - } -} - -impl AsRawFd for Sender { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -impl IntoRawFd for Sender { - fn into_raw_fd(self) -> RawFd { - self.inner.into_inner().into_raw_fd() - } -} - -/// Receiving end of an Unix pipe. -/// -/// See [`new`] for documentation, including examples. -#[derive(Debug)] -pub struct Receiver { - inner: IoSource, -} - -impl Receiver { - /// Set the `Receiver` into or out of non-blocking mode. - pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { - set_nonblocking(self.inner.as_raw_fd(), nonblocking) - } - - /// Execute an I/O operation ensuring that the socket receives more events - /// if it hits a [`WouldBlock`] error. - /// - /// # Notes - /// - /// This method is required to be called for **all** I/O operations to - /// ensure the user will receive events once the socket is ready again after - /// returning a [`WouldBlock`] error. - /// - /// [`WouldBlock`]: io::ErrorKind::WouldBlock - /// - /// # Examples - /// - /// ``` - /// # use std::error::Error; - /// # - /// # fn main() -> Result<(), Box> { - /// use std::io; - /// use std::os::unix::io::AsRawFd; - /// use mio::unix::pipe; - /// - /// let (sender, receiver) = pipe::new()?; - /// - /// // Wait until the sender is writable... - /// - /// // Write to the sender using a direct libc call, of course the - /// // `io::Write` implementation would be easier to use. - /// let buf = b"hello"; - /// let n = sender.try_io(|| { - /// let buf_ptr = &buf as *const _ as *const _; - /// let res = unsafe { libc::write(sender.as_raw_fd(), buf_ptr, buf.len()) }; - /// if res != -1 { - /// Ok(res as usize) - /// } else { - /// // If EAGAIN or EWOULDBLOCK is set by libc::write, the closure - /// // should return `WouldBlock` error. - /// Err(io::Error::last_os_error()) - /// } - /// })?; - /// eprintln!("write {} bytes", n); - /// - /// // Wait until the receiver is readable... - /// - /// // Read from the receiver using a direct libc call, of course the - /// // `io::Read` implementation would be easier to use. - /// let mut buf = [0; 512]; - /// let n = receiver.try_io(|| { - /// let buf_ptr = &mut buf as *mut _ as *mut _; - /// let res = unsafe { libc::read(receiver.as_raw_fd(), buf_ptr, buf.len()) }; - /// if res != -1 { - /// Ok(res as usize) - /// } else { - /// // If EAGAIN or EWOULDBLOCK is set by libc::read, the closure - /// // should return `WouldBlock` error. - /// Err(io::Error::last_os_error()) - /// } - /// })?; - /// eprintln!("read {} bytes", n); - /// # Ok(()) - /// # } - /// ``` - pub fn try_io(&self, f: F) -> io::Result - where - F: FnOnce() -> io::Result, - { - self.inner.do_io(|_| f()) - } -} - -impl event::Source for Receiver { - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.register(registry, token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.reregister(registry, token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - self.inner.deregister(registry) - } -} - -impl Read for Receiver { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.do_io(|mut sender| sender.read(buf)) - } - - fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result { - self.inner.do_io(|mut sender| sender.read_vectored(bufs)) - } -} - -impl Read for &Receiver { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.do_io(|mut sender| sender.read(buf)) - } - - fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result { - self.inner.do_io(|mut sender| sender.read_vectored(bufs)) - } -} - -/// # Notes -/// -/// The underlying pipe is **not** set to non-blocking. -impl From for Receiver { - fn from(stdout: ChildStdout) -> Receiver { - // Safety: `ChildStdout` is guaranteed to be a valid file descriptor. - unsafe { Receiver::from_raw_fd(stdout.into_raw_fd()) } - } -} - -/// # Notes -/// -/// The underlying pipe is **not** set to non-blocking. -impl From for Receiver { - fn from(stderr: ChildStderr) -> Receiver { - // Safety: `ChildStderr` is guaranteed to be a valid file descriptor. - unsafe { Receiver::from_raw_fd(stderr.into_raw_fd()) } - } -} - -impl FromRawFd for Receiver { - unsafe fn from_raw_fd(fd: RawFd) -> Receiver { - Receiver { - inner: IoSource::new(File::from_raw_fd(fd)), - } - } -} - -impl AsRawFd for Receiver { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -impl IntoRawFd for Receiver { - fn into_raw_fd(self) -> RawFd { - self.inner.into_inner().into_raw_fd() - } -} - -#[cfg(not(target_os = "illumos"))] -fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> { - let value = nonblocking as libc::c_int; - if unsafe { libc::ioctl(fd, libc::FIONBIO, &value) } == -1 { - Err(io::Error::last_os_error()) - } else { - Ok(()) - } -} - -#[cfg(target_os = "illumos")] -fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> { - let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) }; - if flags < 0 { - return Err(io::Error::last_os_error()); - } - - let nflags = if nonblocking { - flags | libc::O_NONBLOCK - } else { - flags & !libc::O_NONBLOCK - }; - - if flags != nflags { - if unsafe { libc::fcntl(fd, libc::F_SETFL, nflags) } < 0 { - return Err(io::Error::last_os_error()); - } - } - - Ok(()) -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/selector/epoll.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/selector/epoll.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/selector/epoll.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/selector/epoll.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,289 +0,0 @@ -use crate::{Interest, Token}; - -use libc::{EPOLLET, EPOLLIN, EPOLLOUT, EPOLLPRI, EPOLLRDHUP}; -use std::os::unix::io::{AsRawFd, RawFd}; -#[cfg(debug_assertions)] -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use std::time::Duration; -use std::{cmp, i32, io, ptr}; - -/// Unique id for use as `SelectorId`. -#[cfg(debug_assertions)] -static NEXT_ID: AtomicUsize = AtomicUsize::new(1); - -#[derive(Debug)] -pub struct Selector { - #[cfg(debug_assertions)] - id: usize, - ep: RawFd, - #[cfg(debug_assertions)] - has_waker: AtomicBool, -} - -impl Selector { - pub fn new() -> io::Result { - #[cfg(not(target_os = "android"))] - let res = syscall!(epoll_create1(libc::EPOLL_CLOEXEC)); - - // On Android < API level 16 `epoll_create1` is not defined, so use a - // raw system call. - // According to libuv, `EPOLL_CLOEXEC` is not defined on Android API < - // 21. But `EPOLL_CLOEXEC` is an alias for `O_CLOEXEC` on that platform, - // so we use it instead. - #[cfg(target_os = "android")] - let res = syscall!(syscall(libc::SYS_epoll_create1, libc::O_CLOEXEC)); - - let ep = match res { - Ok(ep) => ep as RawFd, - Err(err) => { - // When `epoll_create1` is not available fall back to use - // `epoll_create` followed by `fcntl`. - if let Some(libc::ENOSYS) = err.raw_os_error() { - match syscall!(epoll_create(1024)) { - Ok(ep) => match syscall!(fcntl(ep, libc::F_SETFD, libc::FD_CLOEXEC)) { - Ok(ep) => ep as RawFd, - Err(err) => { - // `fcntl` failed, cleanup `ep`. - let _ = unsafe { libc::close(ep) }; - return Err(err); - } - }, - Err(err) => return Err(err), - } - } else { - return Err(err); - } - } - }; - - Ok(Selector { - #[cfg(debug_assertions)] - id: NEXT_ID.fetch_add(1, Ordering::Relaxed), - ep, - #[cfg(debug_assertions)] - has_waker: AtomicBool::new(false), - }) - } - - pub fn try_clone(&self) -> io::Result { - syscall!(fcntl(self.ep, libc::F_DUPFD_CLOEXEC, super::LOWEST_FD)).map(|ep| Selector { - // It's the same selector, so we use the same id. - #[cfg(debug_assertions)] - id: self.id, - ep, - #[cfg(debug_assertions)] - has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)), - }) - } - - pub fn select(&self, events: &mut Events, timeout: Option) -> io::Result<()> { - // A bug in kernels < 2.6.37 makes timeouts larger than LONG_MAX / CONFIG_HZ - // (approx. 30 minutes with CONFIG_HZ=1200) effectively infinite on 32 bits - // architectures. The magic number is the same constant used by libuv. - #[cfg(target_pointer_width = "32")] - const MAX_SAFE_TIMEOUT: u128 = 1789569; - #[cfg(not(target_pointer_width = "32"))] - const MAX_SAFE_TIMEOUT: u128 = libc::c_int::max_value() as u128; - - let timeout = timeout - .map(|to| { - // `Duration::as_millis` truncates, so round up. This avoids - // turning sub-millisecond timeouts into a zero timeout, unless - // the caller explicitly requests that by specifying a zero - // timeout. - let to_ms = to - .checked_add(Duration::from_nanos(999_999)) - .unwrap_or(to) - .as_millis(); - cmp::min(MAX_SAFE_TIMEOUT, to_ms) as libc::c_int - }) - .unwrap_or(-1); - - events.clear(); - syscall!(epoll_wait( - self.ep, - events.as_mut_ptr(), - events.capacity() as i32, - timeout, - )) - .map(|n_events| { - // This is safe because `epoll_wait` ensures that `n_events` are - // assigned. - unsafe { events.set_len(n_events as usize) }; - }) - } - - pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> { - let mut event = libc::epoll_event { - events: interests_to_epoll(interests), - u64: usize::from(token) as u64, - #[cfg(target_os = "redox")] - _pad: 0, - }; - - syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_ADD, fd, &mut event)).map(|_| ()) - } - - pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> { - let mut event = libc::epoll_event { - events: interests_to_epoll(interests), - u64: usize::from(token) as u64, - #[cfg(target_os = "redox")] - _pad: 0, - }; - - syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_MOD, fd, &mut event)).map(|_| ()) - } - - pub fn deregister(&self, fd: RawFd) -> io::Result<()> { - syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_DEL, fd, ptr::null_mut())).map(|_| ()) - } - - #[cfg(debug_assertions)] - pub fn register_waker(&self) -> bool { - self.has_waker.swap(true, Ordering::AcqRel) - } -} - -cfg_io_source! { - impl Selector { - #[cfg(debug_assertions)] - pub fn id(&self) -> usize { - self.id - } - } -} - -impl AsRawFd for Selector { - fn as_raw_fd(&self) -> RawFd { - self.ep - } -} - -impl Drop for Selector { - fn drop(&mut self) { - if let Err(err) = syscall!(close(self.ep)) { - error!("error closing epoll: {}", err); - } - } -} - -fn interests_to_epoll(interests: Interest) -> u32 { - let mut kind = EPOLLET; - - if interests.is_readable() { - kind = kind | EPOLLIN | EPOLLRDHUP; - } - - if interests.is_writable() { - kind |= EPOLLOUT; - } - - if interests.is_priority() { - kind |= EPOLLPRI; - } - - kind as u32 -} - -pub type Event = libc::epoll_event; -pub type Events = Vec; - -pub mod event { - use std::fmt; - - use crate::sys::Event; - use crate::Token; - - pub fn token(event: &Event) -> Token { - Token(event.u64 as usize) - } - - pub fn is_readable(event: &Event) -> bool { - (event.events as libc::c_int & libc::EPOLLIN) != 0 - || (event.events as libc::c_int & libc::EPOLLPRI) != 0 - } - - pub fn is_writable(event: &Event) -> bool { - (event.events as libc::c_int & libc::EPOLLOUT) != 0 - } - - pub fn is_error(event: &Event) -> bool { - (event.events as libc::c_int & libc::EPOLLERR) != 0 - } - - pub fn is_read_closed(event: &Event) -> bool { - // Both halves of the socket have closed - event.events as libc::c_int & libc::EPOLLHUP != 0 - // Socket has received FIN or called shutdown(SHUT_RD) - || (event.events as libc::c_int & libc::EPOLLIN != 0 - && event.events as libc::c_int & libc::EPOLLRDHUP != 0) - } - - pub fn is_write_closed(event: &Event) -> bool { - // Both halves of the socket have closed - event.events as libc::c_int & libc::EPOLLHUP != 0 - // Unix pipe write end has closed - || (event.events as libc::c_int & libc::EPOLLOUT != 0 - && event.events as libc::c_int & libc::EPOLLERR != 0) - // The other side (read end) of a Unix pipe has closed. - || event.events as libc::c_int == libc::EPOLLERR - } - - pub fn is_priority(event: &Event) -> bool { - (event.events as libc::c_int & libc::EPOLLPRI) != 0 - } - - pub fn is_aio(_: &Event) -> bool { - // Not supported in the kernel, only in libc. - false - } - - pub fn is_lio(_: &Event) -> bool { - // Not supported. - false - } - - pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result { - #[allow(clippy::trivially_copy_pass_by_ref)] - fn check_events(got: &u32, want: &libc::c_int) -> bool { - (*got as libc::c_int & want) != 0 - } - debug_detail!( - EventsDetails(u32), - check_events, - libc::EPOLLIN, - libc::EPOLLPRI, - libc::EPOLLOUT, - libc::EPOLLRDNORM, - libc::EPOLLRDBAND, - libc::EPOLLWRNORM, - libc::EPOLLWRBAND, - libc::EPOLLMSG, - libc::EPOLLERR, - libc::EPOLLHUP, - libc::EPOLLET, - libc::EPOLLRDHUP, - libc::EPOLLONESHOT, - #[cfg(target_os = "linux")] - libc::EPOLLEXCLUSIVE, - #[cfg(any(target_os = "android", target_os = "linux"))] - libc::EPOLLWAKEUP, - libc::EPOLL_CLOEXEC, - ); - - // Can't reference fields in packed structures. - let e_u64 = event.u64; - f.debug_struct("epoll_event") - .field("events", &EventsDetails(event.events)) - .field("u64", &e_u64) - .finish() - } -} - -#[cfg(target_os = "android")] -#[test] -fn assert_close_on_exec_flag() { - // This assertion need to be true for Selector::new. - assert_eq!(libc::O_CLOEXEC, libc::EPOLL_CLOEXEC); -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/selector/kqueue.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/selector/kqueue.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/selector/kqueue.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/selector/kqueue.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,860 +0,0 @@ -use crate::{Interest, Token}; -use std::mem::{self, MaybeUninit}; -use std::ops::{Deref, DerefMut}; -use std::os::unix::io::{AsRawFd, RawFd}; -#[cfg(debug_assertions)] -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use std::time::Duration; -use std::{cmp, io, ptr, slice}; - -/// Unique id for use as `SelectorId`. -#[cfg(debug_assertions)] -static NEXT_ID: AtomicUsize = AtomicUsize::new(1); - -// Type of the `nchanges` and `nevents` parameters in the `kevent` function. -#[cfg(not(target_os = "netbsd"))] -type Count = libc::c_int; -#[cfg(target_os = "netbsd")] -type Count = libc::size_t; - -// Type of the `filter` field in the `kevent` structure. -#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))] -type Filter = libc::c_short; -#[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" -))] -type Filter = i16; -#[cfg(target_os = "netbsd")] -type Filter = u32; - -// Type of the `flags` field in the `kevent` structure. -#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))] -type Flags = libc::c_ushort; -#[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" -))] -type Flags = u16; -#[cfg(target_os = "netbsd")] -type Flags = u32; - -// Type of the `udata` field in the `kevent` structure. -#[cfg(not(target_os = "netbsd"))] -type UData = *mut libc::c_void; -#[cfg(target_os = "netbsd")] -type UData = libc::intptr_t; - -macro_rules! kevent { - ($id: expr, $filter: expr, $flags: expr, $data: expr) => { - libc::kevent { - ident: $id as libc::uintptr_t, - filter: $filter as Filter, - flags: $flags, - udata: $data as UData, - ..unsafe { mem::zeroed() } - } - }; -} - -#[derive(Debug)] -pub struct Selector { - #[cfg(debug_assertions)] - id: usize, - kq: RawFd, - #[cfg(debug_assertions)] - has_waker: AtomicBool, -} - -impl Selector { - pub fn new() -> io::Result { - let kq = syscall!(kqueue())?; - let selector = Selector { - #[cfg(debug_assertions)] - id: NEXT_ID.fetch_add(1, Ordering::Relaxed), - kq, - #[cfg(debug_assertions)] - has_waker: AtomicBool::new(false), - }; - - syscall!(fcntl(kq, libc::F_SETFD, libc::FD_CLOEXEC))?; - Ok(selector) - } - - pub fn try_clone(&self) -> io::Result { - syscall!(fcntl(self.kq, libc::F_DUPFD_CLOEXEC, super::LOWEST_FD)).map(|kq| Selector { - // It's the same selector, so we use the same id. - #[cfg(debug_assertions)] - id: self.id, - kq, - #[cfg(debug_assertions)] - has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)), - }) - } - - pub fn select(&self, events: &mut Events, timeout: Option) -> io::Result<()> { - let timeout = timeout.map(|to| libc::timespec { - tv_sec: cmp::min(to.as_secs(), libc::time_t::max_value() as u64) as libc::time_t, - // `Duration::subsec_nanos` is guaranteed to be less than one - // billion (the number of nanoseconds in a second), making the - // cast to i32 safe. The cast itself is needed for platforms - // where C's long is only 32 bits. - tv_nsec: libc::c_long::from(to.subsec_nanos() as i32), - }); - let timeout = timeout - .as_ref() - .map(|s| s as *const _) - .unwrap_or(ptr::null_mut()); - - events.clear(); - syscall!(kevent( - self.kq, - ptr::null(), - 0, - events.as_mut_ptr(), - events.capacity() as Count, - timeout, - )) - .map(|n_events| { - // This is safe because `kevent` ensures that `n_events` are - // assigned. - unsafe { events.set_len(n_events as usize) }; - }) - } - - pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> { - let flags = libc::EV_CLEAR | libc::EV_RECEIPT | libc::EV_ADD; - // At most we need two changes, but maybe we only need 1. - let mut changes: [MaybeUninit; 2] = - [MaybeUninit::uninit(), MaybeUninit::uninit()]; - let mut n_changes = 0; - - if interests.is_writable() { - let kevent = kevent!(fd, libc::EVFILT_WRITE, flags, token.0); - changes[n_changes] = MaybeUninit::new(kevent); - n_changes += 1; - } - - if interests.is_readable() { - let kevent = kevent!(fd, libc::EVFILT_READ, flags, token.0); - changes[n_changes] = MaybeUninit::new(kevent); - n_changes += 1; - } - - // Older versions of macOS (OS X 10.11 and 10.10 have been witnessed) - // can return EPIPE when registering a pipe file descriptor where the - // other end has already disappeared. For example code that creates a - // pipe, closes a file descriptor, and then registers the other end will - // see an EPIPE returned from `register`. - // - // It also turns out that kevent will still report events on the file - // descriptor, telling us that it's readable/hup at least after we've - // done this registration. As a result we just ignore `EPIPE` here - // instead of propagating it. - // - // More info can be found at tokio-rs/mio#582. - let changes = unsafe { - // This is safe because we ensure that at least `n_changes` are in - // the array. - slice::from_raw_parts_mut(changes[0].as_mut_ptr(), n_changes) - }; - kevent_register(self.kq, changes, &[libc::EPIPE as i64]) - } - - pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> { - let flags = libc::EV_CLEAR | libc::EV_RECEIPT; - let write_flags = if interests.is_writable() { - flags | libc::EV_ADD - } else { - flags | libc::EV_DELETE - }; - let read_flags = if interests.is_readable() { - flags | libc::EV_ADD - } else { - flags | libc::EV_DELETE - }; - - let mut changes: [libc::kevent; 2] = [ - kevent!(fd, libc::EVFILT_WRITE, write_flags, token.0), - kevent!(fd, libc::EVFILT_READ, read_flags, token.0), - ]; - - // Since there is no way to check with which interests the fd was - // registered we modify both readable and write, adding it when required - // and removing it otherwise, ignoring the ENOENT error when it comes - // up. The ENOENT error informs us that a filter we're trying to remove - // wasn't there in first place, but we don't really care since our goal - // is accomplished. - // - // For the explanation of ignoring `EPIPE` see `register`. - kevent_register( - self.kq, - &mut changes, - &[libc::ENOENT as i64, libc::EPIPE as i64], - ) - } - - pub fn deregister(&self, fd: RawFd) -> io::Result<()> { - let flags = libc::EV_DELETE | libc::EV_RECEIPT; - let mut changes: [libc::kevent; 2] = [ - kevent!(fd, libc::EVFILT_WRITE, flags, 0), - kevent!(fd, libc::EVFILT_READ, flags, 0), - ]; - - // Since there is no way to check with which interests the fd was - // registered we remove both filters (readable and writeable) and ignore - // the ENOENT error when it comes up. The ENOENT error informs us that - // the filter wasn't there in first place, but we don't really care - // about that since our goal is to remove it. - kevent_register(self.kq, &mut changes, &[libc::ENOENT as i64]) - } - - #[cfg(debug_assertions)] - pub fn register_waker(&self) -> bool { - self.has_waker.swap(true, Ordering::AcqRel) - } - - // Used by `Waker`. - #[cfg(any( - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - pub fn setup_waker(&self, token: Token) -> io::Result<()> { - // First attempt to accept user space notifications. - let mut kevent = kevent!( - 0, - libc::EVFILT_USER, - libc::EV_ADD | libc::EV_CLEAR | libc::EV_RECEIPT, - token.0 - ); - - syscall!(kevent(self.kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| { - if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 { - Err(io::Error::from_raw_os_error(kevent.data as i32)) - } else { - Ok(()) - } - }) - } - - // Used by `Waker`. - #[cfg(any( - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - pub fn wake(&self, token: Token) -> io::Result<()> { - let mut kevent = kevent!( - 0, - libc::EVFILT_USER, - libc::EV_ADD | libc::EV_RECEIPT, - token.0 - ); - kevent.fflags = libc::NOTE_TRIGGER; - - syscall!(kevent(self.kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| { - if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 { - Err(io::Error::from_raw_os_error(kevent.data as i32)) - } else { - Ok(()) - } - }) - } -} - -/// Register `changes` with `kq`ueue. -fn kevent_register( - kq: RawFd, - changes: &mut [libc::kevent], - ignored_errors: &[i64], -) -> io::Result<()> { - syscall!(kevent( - kq, - changes.as_ptr(), - changes.len() as Count, - changes.as_mut_ptr(), - changes.len() as Count, - ptr::null(), - )) - .map(|_| ()) - .or_else(|err| { - // According to the manual page of FreeBSD: "When kevent() call fails - // with EINTR error, all changes in the changelist have been applied", - // so we can safely ignore it. - if err.raw_os_error() == Some(libc::EINTR) { - Ok(()) - } else { - Err(err) - } - }) - .and_then(|()| check_errors(changes, ignored_errors)) -} - -/// Check all events for possible errors, it returns the first error found. -fn check_errors(events: &[libc::kevent], ignored_errors: &[i64]) -> io::Result<()> { - for event in events { - // We can't use references to packed structures (in checking the ignored - // errors), so we need copy the data out before use. - let data = event.data as _; - // Check for the error flag, the actual error will be in the `data` - // field. - if (event.flags & libc::EV_ERROR != 0) && data != 0 && !ignored_errors.contains(&data) { - return Err(io::Error::from_raw_os_error(data as i32)); - } - } - Ok(()) -} - -cfg_io_source! { - #[cfg(debug_assertions)] - impl Selector { - pub fn id(&self) -> usize { - self.id - } - } -} - -impl AsRawFd for Selector { - fn as_raw_fd(&self) -> RawFd { - self.kq - } -} - -impl Drop for Selector { - fn drop(&mut self) { - if let Err(err) = syscall!(close(self.kq)) { - error!("error closing kqueue: {}", err); - } - } -} - -pub type Event = libc::kevent; -pub struct Events(Vec); - -impl Events { - pub fn with_capacity(capacity: usize) -> Events { - Events(Vec::with_capacity(capacity)) - } -} - -impl Deref for Events { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for Events { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -// `Events` cannot derive `Send` or `Sync` because of the -// `udata: *mut ::c_void` field in `libc::kevent`. However, `Events`'s public -// API treats the `udata` field as a `uintptr_t` which is `Send`. `Sync` is -// safe because with a `events: &Events` value, the only access to the `udata` -// field is through `fn token(event: &Event)` which cannot mutate the field. -unsafe impl Send for Events {} -unsafe impl Sync for Events {} - -pub mod event { - use std::fmt; - - use crate::sys::Event; - use crate::Token; - - use super::{Filter, Flags}; - - pub fn token(event: &Event) -> Token { - Token(event.udata as usize) - } - - pub fn is_readable(event: &Event) -> bool { - event.filter == libc::EVFILT_READ || { - #[cfg(any( - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - // Used by the `Awakener`. On platforms that use `eventfd` or a unix - // pipe it will emit a readable event so we'll fake that here as - // well. - { - event.filter == libc::EVFILT_USER - } - #[cfg(not(any( - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - )))] - { - false - } - } - } - - pub fn is_writable(event: &Event) -> bool { - event.filter == libc::EVFILT_WRITE - } - - pub fn is_error(event: &Event) -> bool { - (event.flags & libc::EV_ERROR) != 0 || - // When the read end of the socket is closed, EV_EOF is set on - // flags, and fflags contains the error if there is one. - (event.flags & libc::EV_EOF) != 0 && event.fflags != 0 - } - - pub fn is_read_closed(event: &Event) -> bool { - event.filter == libc::EVFILT_READ && event.flags & libc::EV_EOF != 0 - } - - pub fn is_write_closed(event: &Event) -> bool { - event.filter == libc::EVFILT_WRITE && event.flags & libc::EV_EOF != 0 - } - - pub fn is_priority(_: &Event) -> bool { - // kqueue doesn't have priority indicators. - false - } - - #[allow(unused_variables)] // `event` is not used on some platforms. - pub fn is_aio(event: &Event) -> bool { - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - { - event.filter == libc::EVFILT_AIO - } - #[cfg(not(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - )))] - { - false - } - } - - #[allow(unused_variables)] // `event` is only used on FreeBSD. - pub fn is_lio(event: &Event) -> bool { - #[cfg(target_os = "freebsd")] - { - event.filter == libc::EVFILT_LIO - } - #[cfg(not(target_os = "freebsd"))] - { - false - } - } - - pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result { - debug_detail!( - FilterDetails(Filter), - PartialEq::eq, - libc::EVFILT_READ, - libc::EVFILT_WRITE, - libc::EVFILT_AIO, - libc::EVFILT_VNODE, - libc::EVFILT_PROC, - libc::EVFILT_SIGNAL, - libc::EVFILT_TIMER, - #[cfg(target_os = "freebsd")] - libc::EVFILT_PROCDESC, - #[cfg(any( - target_os = "freebsd", - target_os = "dragonfly", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - libc::EVFILT_FS, - #[cfg(target_os = "freebsd")] - libc::EVFILT_LIO, - #[cfg(any( - target_os = "freebsd", - target_os = "dragonfly", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - libc::EVFILT_USER, - #[cfg(target_os = "freebsd")] - libc::EVFILT_SENDFILE, - #[cfg(target_os = "freebsd")] - libc::EVFILT_EMPTY, - #[cfg(target_os = "dragonfly")] - libc::EVFILT_EXCEPT, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::EVFILT_MACHPORT, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::EVFILT_VM, - ); - - #[allow(clippy::trivially_copy_pass_by_ref)] - fn check_flag(got: &Flags, want: &Flags) -> bool { - (got & want) != 0 - } - debug_detail!( - FlagsDetails(Flags), - check_flag, - libc::EV_ADD, - libc::EV_DELETE, - libc::EV_ENABLE, - libc::EV_DISABLE, - libc::EV_ONESHOT, - libc::EV_CLEAR, - libc::EV_RECEIPT, - libc::EV_DISPATCH, - #[cfg(target_os = "freebsd")] - libc::EV_DROP, - libc::EV_FLAG1, - libc::EV_ERROR, - libc::EV_EOF, - libc::EV_SYSFLAGS, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::EV_FLAG0, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::EV_POLL, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::EV_OOBAND, - #[cfg(target_os = "dragonfly")] - libc::EV_NODATA, - ); - - #[allow(clippy::trivially_copy_pass_by_ref)] - fn check_fflag(got: &u32, want: &u32) -> bool { - (got & want) != 0 - } - debug_detail!( - FflagsDetails(u32), - check_fflag, - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - libc::NOTE_TRIGGER, - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - libc::NOTE_FFNOP, - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - libc::NOTE_FFAND, - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - libc::NOTE_FFOR, - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - libc::NOTE_FFCOPY, - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - libc::NOTE_FFCTRLMASK, - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - libc::NOTE_FFLAGSMASK, - libc::NOTE_LOWAT, - libc::NOTE_DELETE, - libc::NOTE_WRITE, - #[cfg(target_os = "dragonfly")] - libc::NOTE_OOB, - #[cfg(target_os = "openbsd")] - libc::NOTE_EOF, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_EXTEND, - libc::NOTE_ATTRIB, - libc::NOTE_LINK, - libc::NOTE_RENAME, - libc::NOTE_REVOKE, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_NONE, - #[cfg(any(target_os = "openbsd"))] - libc::NOTE_TRUNCATE, - libc::NOTE_EXIT, - libc::NOTE_FORK, - libc::NOTE_EXEC, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_SIGNAL, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_EXITSTATUS, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_EXIT_DETAIL, - libc::NOTE_PDATAMASK, - libc::NOTE_PCTRLMASK, - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "netbsd", - target_os = "openbsd", - ))] - libc::NOTE_TRACK, - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "netbsd", - target_os = "openbsd", - ))] - libc::NOTE_TRACKERR, - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "netbsd", - target_os = "openbsd", - ))] - libc::NOTE_CHILD, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_EXIT_DETAIL_MASK, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_EXIT_DECRYPTFAIL, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_EXIT_MEMORY, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_EXIT_CSERROR, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_VM_PRESSURE, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_VM_PRESSURE_TERMINATE, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_VM_PRESSURE_SUDDEN_TERMINATE, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_VM_ERROR, - #[cfg(any( - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_SECONDS, - #[cfg(any(target_os = "freebsd"))] - libc::NOTE_MSECONDS, - #[cfg(any( - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_USECONDS, - #[cfg(any( - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_NSECONDS, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_ABSOLUTE, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_LEEWAY, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_CRITICAL, - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ))] - libc::NOTE_BACKGROUND, - ); - - // Can't reference fields in packed structures. - let ident = event.ident; - let data = event.data; - let udata = event.udata; - f.debug_struct("kevent") - .field("ident", &ident) - .field("filter", &FilterDetails(event.filter)) - .field("flags", &FlagsDetails(event.flags)) - .field("fflags", &FflagsDetails(event.fflags)) - .field("data", &data) - .field("udata", &udata) - .finish() - } -} - -#[test] -#[cfg(feature = "os-ext")] -fn does_not_register_rw() { - use crate::unix::SourceFd; - use crate::{Poll, Token}; - - let kq = unsafe { libc::kqueue() }; - let mut kqf = SourceFd(&kq); - let poll = Poll::new().unwrap(); - - // Registering kqueue fd will fail if write is requested (On anything but - // some versions of macOS). - poll.registry() - .register(&mut kqf, Token(1234), Interest::READABLE) - .unwrap(); -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/selector/mod.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/selector/mod.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/selector/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/selector/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,49 +0,0 @@ -#[cfg(any( - target_os = "android", - target_os = "illumos", - target_os = "linux", - target_os = "redox", -))] -mod epoll; - -#[cfg(any( - target_os = "android", - target_os = "illumos", - target_os = "linux", - target_os = "redox", -))] -pub(crate) use self::epoll::{event, Event, Events, Selector}; - -#[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "tvos", - target_os = "watchos", -))] -mod kqueue; - -#[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "tvos", - target_os = "watchos", -))] -pub(crate) use self::kqueue::{event, Event, Events, Selector}; - -/// Lowest file descriptor used in `Selector::try_clone`. -/// -/// # Notes -/// -/// Usually fds 0, 1 and 2 are standard in, out and error. Some application -/// blindly assume this to be true, which means using any one of those a select -/// could result in some interesting and unexpected errors. Avoid that by using -/// an fd that doesn't have a pre-determined usage. -const LOWEST_FD: libc::c_int = 3; diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/sourcefd.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/sourcefd.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/sourcefd.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/sourcefd.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,116 +0,0 @@ -use crate::{event, Interest, Registry, Token}; - -use std::io; -use std::os::unix::io::RawFd; - -/// Adapter for [`RawFd`] providing an [`event::Source`] implementation. -/// -/// `SourceFd` enables registering any type with an FD with [`Poll`]. -/// -/// While only implementations for TCP and UDP are provided, Mio supports -/// registering any FD that can be registered with the underlying OS selector. -/// `SourceFd` provides the necessary bridge. -/// -/// Note that `SourceFd` takes a `&RawFd`. This is because `SourceFd` **does -/// not** take ownership of the FD. Specifically, it will not manage any -/// lifecycle related operations, such as closing the FD on drop. It is expected -/// that the `SourceFd` is constructed right before a call to -/// [`Registry::register`]. See the examples for more detail. -/// -/// [`event::Source`]: ../event/trait.Source.html -/// [`Poll`]: ../struct.Poll.html -/// [`Registry::register`]: ../struct.Registry.html#method.register -/// -/// # Examples -/// -/// Basic usage. -/// -#[cfg_attr( - all(feature = "os-poll", feature = "net", feature = "os-ext"), - doc = "```" -)] -#[cfg_attr( - not(all(feature = "os-poll", feature = "net", feature = "os-ext")), - doc = "```ignore" -)] -/// # use std::error::Error; -/// # fn main() -> Result<(), Box> { -/// use mio::{Interest, Poll, Token}; -/// use mio::unix::SourceFd; -/// -/// use std::os::unix::io::AsRawFd; -/// use std::net::TcpListener; -/// -/// // Bind a std listener -/// let listener = TcpListener::bind("127.0.0.1:0")?; -/// -/// let poll = Poll::new()?; -/// -/// // Register the listener -/// poll.registry().register( -/// &mut SourceFd(&listener.as_raw_fd()), -/// Token(0), -/// Interest::READABLE)?; -/// # Ok(()) -/// # } -/// ``` -/// -/// Implementing [`event::Source`] for a custom type backed by a [`RawFd`]. -/// -#[cfg_attr(all(feature = "os-poll", feature = "os-ext"), doc = "```")] -#[cfg_attr(not(all(feature = "os-poll", feature = "os-ext")), doc = "```ignore")] -/// use mio::{event, Interest, Registry, Token}; -/// use mio::unix::SourceFd; -/// -/// use std::os::unix::io::RawFd; -/// use std::io; -/// -/// # #[allow(dead_code)] -/// pub struct MyIo { -/// fd: RawFd, -/// } -/// -/// impl event::Source for MyIo { -/// fn register(&mut self, registry: &Registry, token: Token, interests: Interest) -/// -> io::Result<()> -/// { -/// SourceFd(&self.fd).register(registry, token, interests) -/// } -/// -/// fn reregister(&mut self, registry: &Registry, token: Token, interests: Interest) -/// -> io::Result<()> -/// { -/// SourceFd(&self.fd).reregister(registry, token, interests) -/// } -/// -/// fn deregister(&mut self, registry: &Registry) -> io::Result<()> { -/// SourceFd(&self.fd).deregister(registry) -/// } -/// } -/// ``` -#[derive(Debug)] -pub struct SourceFd<'a>(pub &'a RawFd); - -impl<'a> event::Source for SourceFd<'a> { - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - registry.selector().register(*self.0, token, interests) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - registry.selector().reregister(*self.0, token, interests) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - registry.selector().deregister(*self.0) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/tcp.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/tcp.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/tcp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/tcp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,113 +0,0 @@ -use std::convert::TryInto; -use std::io; -use std::mem::{size_of, MaybeUninit}; -use std::net::{self, SocketAddr}; -use std::os::unix::io::{AsRawFd, FromRawFd}; - -use crate::sys::unix::net::{new_socket, socket_addr, to_socket_addr}; - -pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result { - let domain = match address { - SocketAddr::V4(_) => libc::AF_INET, - SocketAddr::V6(_) => libc::AF_INET6, - }; - new_socket(domain, libc::SOCK_STREAM) -} - -pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> { - let (raw_addr, raw_addr_length) = socket_addr(&addr); - syscall!(bind(socket.as_raw_fd(), raw_addr.as_ptr(), raw_addr_length))?; - Ok(()) -} - -pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> { - let (raw_addr, raw_addr_length) = socket_addr(&addr); - - match syscall!(connect( - socket.as_raw_fd(), - raw_addr.as_ptr(), - raw_addr_length - )) { - Err(err) if err.raw_os_error() != Some(libc::EINPROGRESS) => Err(err), - _ => Ok(()), - } -} - -pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> { - let backlog = backlog.try_into().unwrap_or(i32::max_value()); - syscall!(listen(socket.as_raw_fd(), backlog))?; - Ok(()) -} - -pub(crate) fn set_reuseaddr(socket: &net::TcpListener, reuseaddr: bool) -> io::Result<()> { - let val: libc::c_int = i32::from(reuseaddr); - syscall!(setsockopt( - socket.as_raw_fd(), - libc::SOL_SOCKET, - libc::SO_REUSEADDR, - &val as *const libc::c_int as *const libc::c_void, - size_of::() as libc::socklen_t, - ))?; - Ok(()) -} - -pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> { - let mut addr: MaybeUninit = MaybeUninit::uninit(); - let mut length = size_of::() as libc::socklen_t; - - // On platforms that support it we can use `accept4(2)` to set `NONBLOCK` - // and `CLOEXEC` in the call to accept the connection. - #[cfg(any( - // Android x86's seccomp profile forbids calls to `accept4(2)` - // See https://github.com/tokio-rs/mio/issues/1445 for details - all(not(target_arch="x86"), target_os = "android"), - target_os = "dragonfly", - target_os = "freebsd", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd", - ))] - let stream = { - syscall!(accept4( - listener.as_raw_fd(), - addr.as_mut_ptr() as *mut _, - &mut length, - libc::SOCK_CLOEXEC | libc::SOCK_NONBLOCK, - )) - .map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) }) - }?; - - // But not all platforms have the `accept4(2)` call. Luckily BSD (derived) - // OSes inherit the non-blocking flag from the listener, so we just have to - // set `CLOEXEC`. - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "redox", - target_os = "tvos", - target_os = "watchos", - all(target_arch = "x86", target_os = "android"), - ))] - let stream = { - syscall!(accept( - listener.as_raw_fd(), - addr.as_mut_ptr() as *mut _, - &mut length - )) - .map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) }) - .and_then(|s| { - syscall!(fcntl(s.as_raw_fd(), libc::F_SETFD, libc::FD_CLOEXEC))?; - - // See https://github.com/tokio-rs/mio/issues/1450 - #[cfg(all(target_arch = "x86", target_os = "android"))] - syscall!(fcntl(s.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK))?; - - Ok(s) - }) - }?; - - // This is safe because `accept` calls above ensures the address - // initialised. - unsafe { to_socket_addr(addr.as_ptr()) }.map(|addr| (stream, addr)) -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/udp.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/udp.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/udp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/udp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -use crate::sys::unix::net::{new_ip_socket, socket_addr}; - -use std::io; -use std::mem; -use std::net::{self, SocketAddr}; -use std::os::unix::io::{AsRawFd, FromRawFd}; - -pub fn bind(addr: SocketAddr) -> io::Result { - let fd = new_ip_socket(addr, libc::SOCK_DGRAM)?; - let socket = unsafe { net::UdpSocket::from_raw_fd(fd) }; - - let (raw_addr, raw_addr_length) = socket_addr(&addr); - syscall!(bind(fd, raw_addr.as_ptr(), raw_addr_length))?; - - Ok(socket) -} - -pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result { - let mut optval: libc::c_int = 0; - let mut optlen = mem::size_of::() as libc::socklen_t; - - syscall!(getsockopt( - socket.as_raw_fd(), - libc::IPPROTO_IPV6, - libc::IPV6_V6ONLY, - &mut optval as *mut _ as *mut _, - &mut optlen, - ))?; - - Ok(optval != 0) -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/uds/datagram.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/uds/datagram.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/uds/datagram.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/uds/datagram.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -use super::{socket_addr, SocketAddr}; -use crate::sys::unix::net::new_socket; - -use std::io; -use std::os::unix::io::{AsRawFd, FromRawFd}; -use std::os::unix::net; -use std::path::Path; - -pub(crate) fn bind(path: &Path) -> io::Result { - let (sockaddr, socklen) = socket_addr(path)?; - let sockaddr = &sockaddr as *const libc::sockaddr_un as *const _; - - let socket = unbound()?; - syscall!(bind(socket.as_raw_fd(), sockaddr, socklen))?; - - Ok(socket) -} - -pub(crate) fn unbound() -> io::Result { - let fd = new_socket(libc::AF_UNIX, libc::SOCK_DGRAM)?; - Ok(unsafe { net::UnixDatagram::from_raw_fd(fd) }) -} - -pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> { - super::pair(libc::SOCK_DGRAM) -} - -pub(crate) fn local_addr(socket: &net::UnixDatagram) -> io::Result { - super::local_addr(socket.as_raw_fd()) -} - -pub(crate) fn peer_addr(socket: &net::UnixDatagram) -> io::Result { - super::peer_addr(socket.as_raw_fd()) -} - -pub(crate) fn recv_from( - socket: &net::UnixDatagram, - dst: &mut [u8], -) -> io::Result<(usize, SocketAddr)> { - let mut count = 0; - let socketaddr = SocketAddr::new(|sockaddr, socklen| { - syscall!(recvfrom( - socket.as_raw_fd(), - dst.as_mut_ptr() as *mut _, - dst.len(), - 0, - sockaddr, - socklen, - )) - .map(|c| { - count = c; - c as libc::c_int - }) - })?; - Ok((count as usize, socketaddr)) -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/uds/listener.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/uds/listener.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/uds/listener.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/uds/listener.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,93 +0,0 @@ -use super::socket_addr; -use crate::net::{SocketAddr, UnixStream}; -use crate::sys::unix::net::new_socket; -use std::os::unix::io::{AsRawFd, FromRawFd}; -use std::os::unix::net; -use std::path::Path; -use std::{io, mem}; - -pub(crate) fn bind(path: &Path) -> io::Result { - let (sockaddr, socklen) = socket_addr(path)?; - let sockaddr = &sockaddr as *const libc::sockaddr_un as *const libc::sockaddr; - - let fd = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?; - let socket = unsafe { net::UnixListener::from_raw_fd(fd) }; - syscall!(bind(fd, sockaddr, socklen))?; - syscall!(listen(fd, 1024))?; - - Ok(socket) -} - -pub(crate) fn accept(listener: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> { - let sockaddr = mem::MaybeUninit::::zeroed(); - - // This is safe to assume because a `libc::sockaddr_un` filled with `0` - // bytes is properly initialized. - // - // `0` is a valid value for `sockaddr_un::sun_family`; it is - // `libc::AF_UNSPEC`. - // - // `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an - // abstract path. - let mut sockaddr = unsafe { sockaddr.assume_init() }; - - sockaddr.sun_family = libc::AF_UNIX as libc::sa_family_t; - let mut socklen = mem::size_of_val(&sockaddr) as libc::socklen_t; - - #[cfg(not(any( - target_os = "ios", - target_os = "macos", - target_os = "netbsd", - target_os = "redox", - target_os = "tvos", - target_os = "watchos", - // Android x86's seccomp profile forbids calls to `accept4(2)` - // See https://github.com/tokio-rs/mio/issues/1445 for details - all(target_arch = "x86", target_os = "android"), - )))] - let socket = { - let flags = libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC; - syscall!(accept4( - listener.as_raw_fd(), - &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr, - &mut socklen, - flags - )) - .map(|socket| unsafe { net::UnixStream::from_raw_fd(socket) }) - }; - - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "netbsd", - target_os = "redox", - target_os = "tvos", - target_os = "watchos", - all(target_arch = "x86", target_os = "android") - ))] - let socket = syscall!(accept( - listener.as_raw_fd(), - &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr, - &mut socklen, - )) - .and_then(|socket| { - // Ensure the socket is closed if either of the `fcntl` calls - // error below. - let s = unsafe { net::UnixStream::from_raw_fd(socket) }; - syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC))?; - - // See https://github.com/tokio-rs/mio/issues/1450 - #[cfg(all(target_arch = "x86", target_os = "android"))] - syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK))?; - - Ok(s) - }); - - socket - .map(UnixStream::from_std) - .map(|stream| (stream, SocketAddr::from_parts(sockaddr, socklen))) -} - -pub(crate) fn local_addr(listener: &net::UnixListener) -> io::Result { - super::local_addr(listener.as_raw_fd()) -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/uds/mod.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/uds/mod.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/uds/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/uds/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,159 +0,0 @@ -mod socketaddr; -pub use self::socketaddr::SocketAddr; - -/// Get the `sun_path` field offset of `sockaddr_un` for the target OS. -/// -/// On Linux, this function equates to the same value as -/// `size_of::()`, but some other implementations include -/// other fields before `sun_path`, so the expression more portably -/// describes the size of the address structure. -pub(in crate::sys) fn path_offset(sockaddr: &libc::sockaddr_un) -> usize { - let base = sockaddr as *const _ as usize; - let path = &sockaddr.sun_path as *const _ as usize; - path - base -} - -cfg_os_poll! { - use std::cmp::Ordering; - use std::os::unix::ffi::OsStrExt; - use std::os::unix::io::{RawFd, FromRawFd}; - use std::path::Path; - use std::{io, mem}; - - pub(crate) mod datagram; - pub(crate) mod listener; - pub(crate) mod stream; - - pub(in crate::sys) fn socket_addr(path: &Path) -> io::Result<(libc::sockaddr_un, libc::socklen_t)> { - let sockaddr = mem::MaybeUninit::::zeroed(); - - // This is safe to assume because a `libc::sockaddr_un` filled with `0` - // bytes is properly initialized. - // - // `0` is a valid value for `sockaddr_un::sun_family`; it is - // `libc::AF_UNSPEC`. - // - // `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an - // abstract path. - let mut sockaddr = unsafe { sockaddr.assume_init() }; - - sockaddr.sun_family = libc::AF_UNIX as libc::sa_family_t; - - let bytes = path.as_os_str().as_bytes(); - match (bytes.first(), bytes.len().cmp(&sockaddr.sun_path.len())) { - // Abstract paths don't need a null terminator - (Some(&0), Ordering::Greater) => { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "path must be no longer than libc::sockaddr_un.sun_path", - )); - } - (_, Ordering::Greater) | (_, Ordering::Equal) => { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "path must be shorter than libc::sockaddr_un.sun_path", - )); - } - _ => {} - } - - for (dst, src) in sockaddr.sun_path.iter_mut().zip(bytes.iter()) { - *dst = *src as libc::c_char; - } - - let offset = path_offset(&sockaddr); - let mut socklen = offset + bytes.len(); - - match bytes.first() { - // The struct has already been zeroes so the null byte for pathname - // addresses is already there. - Some(&0) | None => {} - Some(_) => socklen += 1, - } - - Ok((sockaddr, socklen as libc::socklen_t)) - } - - fn pair(flags: libc::c_int) -> io::Result<(T, T)> - where T: FromRawFd, - { - #[cfg(not(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - )))] - let flags = flags | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC; - - let mut fds = [-1; 2]; - syscall!(socketpair(libc::AF_UNIX, flags, 0, fds.as_mut_ptr()))?; - let pair = unsafe { (T::from_raw_fd(fds[0]), T::from_raw_fd(fds[1])) }; - - // Darwin doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC. - // - // In order to set those flags, additional `fcntl` sys calls must be - // performed. If a `fnctl` fails after the sockets have been created, - // the file descriptors will leak. Creating `pair` above ensures that if - // there is an error, the file descriptors are closed. - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] - { - syscall!(fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK))?; - syscall!(fcntl(fds[0], libc::F_SETFD, libc::FD_CLOEXEC))?; - syscall!(fcntl(fds[1], libc::F_SETFL, libc::O_NONBLOCK))?; - syscall!(fcntl(fds[1], libc::F_SETFD, libc::FD_CLOEXEC))?; - } - Ok(pair) - } - - // The following functions can't simply be replaced with a call to - // `net::UnixDatagram` because of our `SocketAddr` type. - - fn local_addr(socket: RawFd) -> io::Result { - SocketAddr::new(|sockaddr, socklen| syscall!(getsockname(socket, sockaddr, socklen))) - } - - fn peer_addr(socket: RawFd) -> io::Result { - SocketAddr::new(|sockaddr, socklen| syscall!(getpeername(socket, sockaddr, socklen))) - } - - #[cfg(test)] - mod tests { - use super::{path_offset, socket_addr}; - use std::path::Path; - use std::str; - - #[test] - fn pathname_address() { - const PATH: &str = "./foo/bar.txt"; - const PATH_LEN: usize = 13; - - // Pathname addresses do have a null terminator, so `socklen` is - // expected to be `PATH_LEN` + `offset` + 1. - let path = Path::new(PATH); - let (sockaddr, actual) = socket_addr(path).unwrap(); - let offset = path_offset(&sockaddr); - let expected = PATH_LEN + offset + 1; - assert_eq!(expected as libc::socklen_t, actual) - } - - #[test] - fn abstract_address() { - const PATH: &[u8] = &[0, 116, 111, 107, 105, 111]; - const PATH_LEN: usize = 6; - - // Abstract addresses do not have a null terminator, so `socklen` is - // expected to be `PATH_LEN` + `offset`. - let abstract_path = str::from_utf8(PATH).unwrap(); - let path = Path::new(abstract_path); - let (sockaddr, actual) = socket_addr(path).unwrap(); - let offset = path_offset(&sockaddr); - let expected = PATH_LEN + offset; - assert_eq!(expected as libc::socklen_t, actual) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/uds/socketaddr.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/uds/socketaddr.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/uds/socketaddr.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/uds/socketaddr.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,130 +0,0 @@ -use super::path_offset; -use std::ffi::OsStr; -use std::os::unix::ffi::OsStrExt; -use std::path::Path; -use std::{ascii, fmt}; - -/// An address associated with a `mio` specific Unix socket. -/// -/// This is implemented instead of imported from [`net::SocketAddr`] because -/// there is no way to create a [`net::SocketAddr`]. One must be returned by -/// [`accept`], so this is returned instead. -/// -/// [`net::SocketAddr`]: std::os::unix::net::SocketAddr -/// [`accept`]: #method.accept -pub struct SocketAddr { - sockaddr: libc::sockaddr_un, - socklen: libc::socklen_t, -} - -struct AsciiEscaped<'a>(&'a [u8]); - -enum AddressKind<'a> { - Unnamed, - Pathname(&'a Path), - Abstract(&'a [u8]), -} - -impl SocketAddr { - fn address(&self) -> AddressKind<'_> { - let offset = path_offset(&self.sockaddr); - // Don't underflow in `len` below. - if (self.socklen as usize) < offset { - return AddressKind::Unnamed; - } - let len = self.socklen as usize - offset; - let path = unsafe { &*(&self.sockaddr.sun_path as *const [libc::c_char] as *const [u8]) }; - - // macOS seems to return a len of 16 and a zeroed sun_path for unnamed addresses - if len == 0 - || (cfg!(not(any(target_os = "linux", target_os = "android"))) - && self.sockaddr.sun_path[0] == 0) - { - AddressKind::Unnamed - } else if self.sockaddr.sun_path[0] == 0 { - AddressKind::Abstract(&path[1..len]) - } else { - AddressKind::Pathname(OsStr::from_bytes(&path[..len - 1]).as_ref()) - } - } -} - -cfg_os_poll! { - use std::{io, mem}; - - impl SocketAddr { - pub(crate) fn new(f: F) -> io::Result - where - F: FnOnce(*mut libc::sockaddr, &mut libc::socklen_t) -> io::Result, - { - let mut sockaddr = { - let sockaddr = mem::MaybeUninit::::zeroed(); - unsafe { sockaddr.assume_init() } - }; - - let raw_sockaddr = &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr; - let mut socklen = mem::size_of_val(&sockaddr) as libc::socklen_t; - - f(raw_sockaddr, &mut socklen)?; - Ok(SocketAddr::from_parts(sockaddr, socklen)) - } - - pub(crate) fn from_parts(sockaddr: libc::sockaddr_un, socklen: libc::socklen_t) -> SocketAddr { - SocketAddr { sockaddr, socklen } - } - - /// Returns `true` if the address is unnamed. - /// - /// Documentation reflected in [`SocketAddr`] - /// - /// [`SocketAddr`]: std::os::unix::net::SocketAddr - pub fn is_unnamed(&self) -> bool { - matches!(self.address(), AddressKind::Unnamed) - } - - /// Returns the contents of this address if it is a `pathname` address. - /// - /// Documentation reflected in [`SocketAddr`] - /// - /// [`SocketAddr`]: std::os::unix::net::SocketAddr - pub fn as_pathname(&self) -> Option<&Path> { - if let AddressKind::Pathname(path) = self.address() { - Some(path) - } else { - None - } - } - - /// Returns the contents of this address if it is an abstract namespace - /// without the leading null byte. - // Link to std::os::unix::net::SocketAddr pending - // https://github.com/rust-lang/rust/issues/85410. - pub fn as_abstract_namespace(&self) -> Option<&[u8]> { - if let AddressKind::Abstract(path) = self.address() { - Some(path) - } else { - None - } - } - } -} - -impl fmt::Debug for SocketAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.address() { - AddressKind::Unnamed => write!(fmt, "(unnamed)"), - AddressKind::Abstract(name) => write!(fmt, "{} (abstract)", AsciiEscaped(name)), - AddressKind::Pathname(path) => write!(fmt, "{:?} (pathname)", path), - } - } -} - -impl<'a> fmt::Display for AsciiEscaped<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "\"")?; - for byte in self.0.iter().cloned().flat_map(ascii::escape_default) { - write!(fmt, "{}", byte as char)?; - } - write!(fmt, "\"") - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/uds/stream.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/uds/stream.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/uds/stream.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/uds/stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,34 +0,0 @@ -use super::{socket_addr, SocketAddr}; -use crate::sys::unix::net::new_socket; - -use std::io; -use std::os::unix::io::{AsRawFd, FromRawFd}; -use std::os::unix::net; -use std::path::Path; - -pub(crate) fn connect(path: &Path) -> io::Result { - let (sockaddr, socklen) = socket_addr(path)?; - let sockaddr = &sockaddr as *const libc::sockaddr_un as *const libc::sockaddr; - - let fd = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?; - let socket = unsafe { net::UnixStream::from_raw_fd(fd) }; - match syscall!(connect(fd, sockaddr, socklen)) { - Ok(_) => {} - Err(ref err) if err.raw_os_error() == Some(libc::EINPROGRESS) => {} - Err(e) => return Err(e), - } - - Ok(socket) -} - -pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> { - super::pair(libc::SOCK_STREAM) -} - -pub(crate) fn local_addr(socket: &net::UnixStream) -> io::Result { - super::local_addr(socket.as_raw_fd()) -} - -pub(crate) fn peer_addr(socket: &net::UnixStream) -> io::Result { - super::peer_addr(socket.as_raw_fd()) -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/waker.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/waker.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/unix/waker.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/unix/waker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,185 +0,0 @@ -#[cfg(any(target_os = "linux", target_os = "android"))] -mod eventfd { - use crate::sys::Selector; - use crate::{Interest, Token}; - - use std::fs::File; - use std::io::{self, Read, Write}; - use std::os::unix::io::FromRawFd; - - /// Waker backed by `eventfd`. - /// - /// `eventfd` is effectively an 64 bit counter. All writes must be of 8 - /// bytes (64 bits) and are converted (native endian) into an 64 bit - /// unsigned integer and added to the count. Reads must also be 8 bytes and - /// reset the count to 0, returning the count. - #[derive(Debug)] - pub struct Waker { - fd: File, - } - - impl Waker { - pub fn new(selector: &Selector, token: Token) -> io::Result { - let fd = syscall!(eventfd(0, libc::EFD_CLOEXEC | libc::EFD_NONBLOCK))?; - let file = unsafe { File::from_raw_fd(fd) }; - - selector.register(fd, token, Interest::READABLE)?; - Ok(Waker { fd: file }) - } - - pub fn wake(&self) -> io::Result<()> { - let buf: [u8; 8] = 1u64.to_ne_bytes(); - match (&self.fd).write(&buf) { - Ok(_) => Ok(()), - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - // Writing only blocks if the counter is going to overflow. - // So we'll reset the counter to 0 and wake it again. - self.reset()?; - self.wake() - } - Err(err) => Err(err), - } - } - - /// Reset the eventfd object, only need to call this if `wake` fails. - fn reset(&self) -> io::Result<()> { - let mut buf: [u8; 8] = 0u64.to_ne_bytes(); - match (&self.fd).read(&mut buf) { - Ok(_) => Ok(()), - // If the `Waker` hasn't been awoken yet this will return a - // `WouldBlock` error which we can safely ignore. - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Ok(()), - Err(err) => Err(err), - } - } - } -} - -#[cfg(any(target_os = "linux", target_os = "android"))] -pub use self::eventfd::Waker; - -#[cfg(any( - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", -))] -mod kqueue { - use crate::sys::Selector; - use crate::Token; - - use std::io; - - /// Waker backed by kqueue user space notifications (`EVFILT_USER`). - /// - /// The implementation is fairly simple, first the kqueue must be setup to - /// receive waker events this done by calling `Selector.setup_waker`. Next - /// we need access to kqueue, thus we need to duplicate the file descriptor. - /// Now waking is as simple as adding an event to the kqueue. - #[derive(Debug)] - pub struct Waker { - selector: Selector, - token: Token, - } - - impl Waker { - pub fn new(selector: &Selector, token: Token) -> io::Result { - let selector = selector.try_clone()?; - selector.setup_waker(token)?; - Ok(Waker { selector, token }) - } - - pub fn wake(&self) -> io::Result<()> { - self.selector.wake(self.token) - } - } -} - -#[cfg(any( - target_os = "freebsd", - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", -))] -pub use self::kqueue::Waker; - -#[cfg(any( - target_os = "dragonfly", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", -))] -mod pipe { - use crate::sys::unix::Selector; - use crate::{Interest, Token}; - - use std::fs::File; - use std::io::{self, Read, Write}; - use std::os::unix::io::FromRawFd; - - /// Waker backed by a unix pipe. - /// - /// Waker controls both the sending and receiving ends and empties the pipe - /// if writing to it (waking) fails. - #[derive(Debug)] - pub struct Waker { - sender: File, - receiver: File, - } - - impl Waker { - pub fn new(selector: &Selector, token: Token) -> io::Result { - let mut fds = [-1; 2]; - syscall!(pipe2(fds.as_mut_ptr(), libc::O_NONBLOCK | libc::O_CLOEXEC))?; - let sender = unsafe { File::from_raw_fd(fds[1]) }; - let receiver = unsafe { File::from_raw_fd(fds[0]) }; - - selector.register(fds[0], token, Interest::READABLE)?; - Ok(Waker { sender, receiver }) - } - - pub fn wake(&self) -> io::Result<()> { - // The epoll emulation on some illumos systems currently requires - // the pipe buffer to be completely empty for an edge-triggered - // wakeup on the pipe read side. - #[cfg(target_os = "illumos")] - self.empty(); - - match (&self.sender).write(&[1]) { - Ok(_) => Ok(()), - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - // The reading end is full so we'll empty the buffer and try - // again. - self.empty(); - self.wake() - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => self.wake(), - Err(err) => Err(err), - } - } - - /// Empty the pipe's buffer, only need to call this if `wake` fails. - /// This ignores any errors. - fn empty(&self) { - let mut buf = [0; 4096]; - loop { - match (&self.receiver).read(&mut buf) { - Ok(n) if n > 0 => continue, - _ => return, - } - } - } - } -} - -#[cfg(any( - target_os = "dragonfly", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", -))] -pub use self::pipe::Waker; diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/wasi/mod.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/wasi/mod.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/wasi/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/wasi/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,370 +0,0 @@ -//! # Notes -//! -//! The current implementation is somewhat limited. The `Waker` is not -//! implemented, as at the time of writing there is no way to support to wake-up -//! a thread from calling `poll_oneoff`. -//! -//! Furthermore the (re/de)register functions also don't work while concurrently -//! polling as both registering and polling requires a lock on the -//! `subscriptions`. -//! -//! Finally `Selector::try_clone`, required by `Registry::try_clone`, doesn't -//! work. However this could be implemented by use of an `Arc`. -//! -//! In summary, this only (barely) works using a single thread. - -use std::cmp::min; -use std::io; -#[cfg(all(feature = "net", debug_assertions))] -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::{Arc, Mutex}; -use std::time::Duration; - -#[cfg(feature = "net")] -use crate::{Interest, Token}; - -cfg_net! { - pub(crate) mod tcp { - use std::io; - use std::net::{self, SocketAddr}; - - pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> { - let (stream, addr) = listener.accept()?; - stream.set_nonblocking(true)?; - Ok((stream, addr)) - } - } -} - -/// Unique id for use as `SelectorId`. -#[cfg(all(debug_assertions, feature = "net"))] -static NEXT_ID: AtomicUsize = AtomicUsize::new(1); - -pub(crate) struct Selector { - #[cfg(all(debug_assertions, feature = "net"))] - id: usize, - /// Subscriptions (reads events) we're interested in. - subscriptions: Arc>>, -} - -impl Selector { - pub(crate) fn new() -> io::Result { - Ok(Selector { - #[cfg(all(debug_assertions, feature = "net"))] - id: NEXT_ID.fetch_add(1, Ordering::Relaxed), - subscriptions: Arc::new(Mutex::new(Vec::new())), - }) - } - - #[cfg(all(debug_assertions, feature = "net"))] - pub(crate) fn id(&self) -> usize { - self.id - } - - pub(crate) fn select(&self, events: &mut Events, timeout: Option) -> io::Result<()> { - events.clear(); - - let mut subscriptions = self.subscriptions.lock().unwrap(); - - // If we want to a use a timeout in the `wasi_poll_oneoff()` function - // we need another subscription to the list. - if let Some(timeout) = timeout { - subscriptions.push(timeout_subscription(timeout)); - } - - // `poll_oneoff` needs the same number of events as subscriptions. - let length = subscriptions.len(); - events.reserve(length); - - debug_assert!(events.capacity() >= length); - #[cfg(debug_assertions)] - if length == 0 { - warn!( - "calling mio::Poll::poll with empty subscriptions, this likely not what you want" - ); - } - - let res = unsafe { wasi::poll_oneoff(subscriptions.as_ptr(), events.as_mut_ptr(), length) }; - - // Remove the timeout subscription we possibly added above. - if timeout.is_some() { - let timeout_sub = subscriptions.pop(); - debug_assert_eq!( - timeout_sub.unwrap().u.tag, - wasi::EVENTTYPE_CLOCK.raw(), - "failed to remove timeout subscription" - ); - } - - drop(subscriptions); // Unlock. - - match res { - Ok(n_events) => { - // Safety: `poll_oneoff` initialises the `events` for us. - unsafe { events.set_len(n_events) }; - - // Remove the timeout event. - if timeout.is_some() { - if let Some(index) = events.iter().position(is_timeout_event) { - events.swap_remove(index); - } - } - - check_errors(&events) - } - Err(err) => Err(io_err(err)), - } - } - - pub(crate) fn try_clone(&self) -> io::Result { - Ok(Selector { - #[cfg(all(debug_assertions, feature = "net"))] - id: self.id, - subscriptions: self.subscriptions.clone(), - }) - } - - #[cfg(feature = "net")] - pub(crate) fn register( - &self, - fd: wasi::Fd, - token: Token, - interests: Interest, - ) -> io::Result<()> { - let mut subscriptions = self.subscriptions.lock().unwrap(); - - if interests.is_writable() { - let subscription = wasi::Subscription { - userdata: token.0 as wasi::Userdata, - u: wasi::SubscriptionU { - tag: wasi::EVENTTYPE_FD_WRITE.raw(), - u: wasi::SubscriptionUU { - fd_write: wasi::SubscriptionFdReadwrite { - file_descriptor: fd, - }, - }, - }, - }; - subscriptions.push(subscription); - } - - if interests.is_readable() { - let subscription = wasi::Subscription { - userdata: token.0 as wasi::Userdata, - u: wasi::SubscriptionU { - tag: wasi::EVENTTYPE_FD_READ.raw(), - u: wasi::SubscriptionUU { - fd_read: wasi::SubscriptionFdReadwrite { - file_descriptor: fd, - }, - }, - }, - }; - subscriptions.push(subscription); - } - - Ok(()) - } - - #[cfg(feature = "net")] - pub(crate) fn reregister( - &self, - fd: wasi::Fd, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.deregister(fd) - .and_then(|()| self.register(fd, token, interests)) - } - - #[cfg(feature = "net")] - pub(crate) fn deregister(&self, fd: wasi::Fd) -> io::Result<()> { - let mut subscriptions = self.subscriptions.lock().unwrap(); - - let predicate = |subscription: &wasi::Subscription| { - // Safety: `subscription.u.tag` defines the type of the union in - // `subscription.u.u`. - match subscription.u.tag { - t if t == wasi::EVENTTYPE_FD_WRITE.raw() => unsafe { - subscription.u.u.fd_write.file_descriptor == fd - }, - t if t == wasi::EVENTTYPE_FD_READ.raw() => unsafe { - subscription.u.u.fd_read.file_descriptor == fd - }, - _ => false, - } - }; - - let mut ret = Err(io::ErrorKind::NotFound.into()); - - while let Some(index) = subscriptions.iter().position(predicate) { - subscriptions.swap_remove(index); - ret = Ok(()) - } - - ret - } -} - -/// Token used to a add a timeout subscription, also used in removing it again. -const TIMEOUT_TOKEN: wasi::Userdata = wasi::Userdata::max_value(); - -/// Returns a `wasi::Subscription` for `timeout`. -fn timeout_subscription(timeout: Duration) -> wasi::Subscription { - wasi::Subscription { - userdata: TIMEOUT_TOKEN, - u: wasi::SubscriptionU { - tag: wasi::EVENTTYPE_CLOCK.raw(), - u: wasi::SubscriptionUU { - clock: wasi::SubscriptionClock { - id: wasi::CLOCKID_MONOTONIC, - // Timestamp is in nanoseconds. - timeout: min(wasi::Timestamp::MAX as u128, timeout.as_nanos()) - as wasi::Timestamp, - // Give the implementation another millisecond to coalesce - // events. - precision: Duration::from_millis(1).as_nanos() as wasi::Timestamp, - // Zero means the `timeout` is considered relative to the - // current time. - flags: 0, - }, - }, - }, - } -} - -fn is_timeout_event(event: &wasi::Event) -> bool { - event.type_ == wasi::EVENTTYPE_CLOCK && event.userdata == TIMEOUT_TOKEN -} - -/// Check all events for possible errors, it returns the first error found. -fn check_errors(events: &[Event]) -> io::Result<()> { - for event in events { - if event.error != wasi::ERRNO_SUCCESS { - return Err(io_err(event.error)); - } - } - Ok(()) -} - -/// Convert `wasi::Errno` into an `io::Error`. -fn io_err(errno: wasi::Errno) -> io::Error { - // TODO: check if this is valid. - io::Error::from_raw_os_error(errno.raw() as i32) -} - -pub(crate) type Events = Vec; - -pub(crate) type Event = wasi::Event; - -pub(crate) mod event { - use std::fmt; - - use crate::sys::Event; - use crate::Token; - - pub(crate) fn token(event: &Event) -> Token { - Token(event.userdata as usize) - } - - pub(crate) fn is_readable(event: &Event) -> bool { - event.type_ == wasi::EVENTTYPE_FD_READ - } - - pub(crate) fn is_writable(event: &Event) -> bool { - event.type_ == wasi::EVENTTYPE_FD_WRITE - } - - pub(crate) fn is_error(_: &Event) -> bool { - // Not supported? It could be that `wasi::Event.error` could be used for - // this, but the docs say `error that occurred while processing the - // subscription request`, so it's checked in `Select::select` already. - false - } - - pub(crate) fn is_read_closed(event: &Event) -> bool { - event.type_ == wasi::EVENTTYPE_FD_READ - // Safety: checked the type of the union above. - && (event.fd_readwrite.flags & wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP) != 0 - } - - pub(crate) fn is_write_closed(event: &Event) -> bool { - event.type_ == wasi::EVENTTYPE_FD_WRITE - // Safety: checked the type of the union above. - && (event.fd_readwrite.flags & wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP) != 0 - } - - pub(crate) fn is_priority(_: &Event) -> bool { - // Not supported. - false - } - - pub(crate) fn is_aio(_: &Event) -> bool { - // Not supported. - false - } - - pub(crate) fn is_lio(_: &Event) -> bool { - // Not supported. - false - } - - pub(crate) fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result { - debug_detail!( - TypeDetails(wasi::Eventtype), - PartialEq::eq, - wasi::EVENTTYPE_CLOCK, - wasi::EVENTTYPE_FD_READ, - wasi::EVENTTYPE_FD_WRITE, - ); - - #[allow(clippy::trivially_copy_pass_by_ref)] - fn check_flag(got: &wasi::Eventrwflags, want: &wasi::Eventrwflags) -> bool { - (got & want) != 0 - } - debug_detail!( - EventrwflagsDetails(wasi::Eventrwflags), - check_flag, - wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP, - ); - - struct EventFdReadwriteDetails(wasi::EventFdReadwrite); - - impl fmt::Debug for EventFdReadwriteDetails { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("EventFdReadwrite") - .field("nbytes", &self.0.nbytes) - .field("flags", &self.0.flags) - .finish() - } - } - - f.debug_struct("Event") - .field("userdata", &event.userdata) - .field("error", &event.error) - .field("type", &TypeDetails(event.type_)) - .field("fd_readwrite", &EventFdReadwriteDetails(event.fd_readwrite)) - .finish() - } -} - -cfg_os_poll! { - cfg_io_source! { - pub(crate) struct IoSourceState; - - impl IoSourceState { - pub(crate) fn new() -> IoSourceState { - IoSourceState - } - - pub(crate) fn do_io(&self, f: F, io: &T) -> io::Result - where - F: FnOnce(&T) -> io::Result, - { - // We don't hold state, so we can just call the function and - // return. - f(io) - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/afd.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/afd.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/afd.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/afd.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,255 +0,0 @@ -use std::ffi::c_void; -use std::fmt; -use std::fs::File; -use std::io; -use std::mem::size_of; -use std::os::windows::io::AsRawHandle; - -use windows_sys::Win32::Foundation::{ - RtlNtStatusToDosError, HANDLE, NTSTATUS, STATUS_NOT_FOUND, STATUS_PENDING, STATUS_SUCCESS, -}; -use windows_sys::Win32::System::WindowsProgramming::{ - NtDeviceIoControlFile, IO_STATUS_BLOCK, IO_STATUS_BLOCK_0, -}; - -const IOCTL_AFD_POLL: u32 = 0x00012024; - -#[link(name = "ntdll")] -extern "system" { - /// See - /// - /// This is an undocumented API and as such not part of - /// from which `windows-sys` is generated, and also unlikely to be added, so - /// we manually declare it here - fn NtCancelIoFileEx( - FileHandle: HANDLE, - IoRequestToCancel: *mut IO_STATUS_BLOCK, - IoStatusBlock: *mut IO_STATUS_BLOCK, - ) -> NTSTATUS; -} -/// Winsock2 AFD driver instance. -/// -/// All operations are unsafe due to IO_STATUS_BLOCK parameter are being used by Afd driver during STATUS_PENDING before I/O Completion Port returns its result. -#[derive(Debug)] -pub struct Afd { - fd: File, -} - -#[repr(C)] -#[derive(Debug)] -pub struct AfdPollHandleInfo { - pub handle: HANDLE, - pub events: u32, - pub status: NTSTATUS, -} - -unsafe impl Send for AfdPollHandleInfo {} - -#[repr(C)] -pub struct AfdPollInfo { - pub timeout: i64, - // Can have only value 1. - pub number_of_handles: u32, - pub exclusive: u32, - pub handles: [AfdPollHandleInfo; 1], -} - -impl fmt::Debug for AfdPollInfo { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AfdPollInfo").finish() - } -} - -impl Afd { - /// Poll `Afd` instance with `AfdPollInfo`. - /// - /// # Unsafety - /// - /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`). - /// `iosb` needs to be untouched after the call while operation is in effective at ALL TIME except for `cancel` method. - /// So be careful not to `poll` twice while polling. - /// User should deallocate there overlapped value when error to prevent memory leak. - pub unsafe fn poll( - &self, - info: &mut AfdPollInfo, - iosb: *mut IO_STATUS_BLOCK, - overlapped: *mut c_void, - ) -> io::Result { - let info_ptr = info as *mut _ as *mut c_void; - (*iosb).Anonymous.Status = STATUS_PENDING; - let status = NtDeviceIoControlFile( - self.fd.as_raw_handle() as HANDLE, - 0, - None, - overlapped, - iosb, - IOCTL_AFD_POLL, - info_ptr, - size_of::() as u32, - info_ptr, - size_of::() as u32, - ); - match status { - STATUS_SUCCESS => Ok(true), - STATUS_PENDING => Ok(false), - _ => Err(io::Error::from_raw_os_error( - RtlNtStatusToDosError(status) as i32 - )), - } - } - - /// Cancel previous polled request of `Afd`. - /// - /// iosb needs to be used by `poll` first for valid `cancel`. - /// - /// # Unsafety - /// - /// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`). - /// Use it only with request is still being polled so that you have valid `IO_STATUS_BLOCK` to use. - /// User should NOT deallocate there overlapped value after the `cancel` to prevent double free. - pub unsafe fn cancel(&self, iosb: *mut IO_STATUS_BLOCK) -> io::Result<()> { - if (*iosb).Anonymous.Status != STATUS_PENDING { - return Ok(()); - } - - let mut cancel_iosb = IO_STATUS_BLOCK { - Anonymous: IO_STATUS_BLOCK_0 { Status: 0 }, - Information: 0, - }; - let status = NtCancelIoFileEx(self.fd.as_raw_handle() as HANDLE, iosb, &mut cancel_iosb); - if status == STATUS_SUCCESS || status == STATUS_NOT_FOUND { - return Ok(()); - } - Err(io::Error::from_raw_os_error( - RtlNtStatusToDosError(status) as i32 - )) - } -} - -cfg_io_source! { - use std::mem::zeroed; - use std::os::windows::io::{FromRawHandle, RawHandle}; - use std::ptr::null_mut; - use std::sync::atomic::{AtomicUsize, Ordering}; - - use super::iocp::CompletionPort; - use windows_sys::Win32::{ - Foundation::{UNICODE_STRING, INVALID_HANDLE_VALUE}, - System::WindowsProgramming::{ - OBJECT_ATTRIBUTES, FILE_SKIP_SET_EVENT_ON_HANDLE, - }, - Storage::FileSystem::{FILE_OPEN, NtCreateFile, SetFileCompletionNotificationModes, SYNCHRONIZE, FILE_SHARE_READ, FILE_SHARE_WRITE}, - }; - - const AFD_HELPER_ATTRIBUTES: OBJECT_ATTRIBUTES = OBJECT_ATTRIBUTES { - Length: size_of::() as u32, - RootDirectory: 0, - ObjectName: &AFD_OBJ_NAME as *const _ as *mut _, - Attributes: 0, - SecurityDescriptor: null_mut(), - SecurityQualityOfService: null_mut(), - }; - - const AFD_OBJ_NAME: UNICODE_STRING = UNICODE_STRING { - Length: (AFD_HELPER_NAME.len() * size_of::()) as u16, - MaximumLength: (AFD_HELPER_NAME.len() * size_of::()) as u16, - Buffer: AFD_HELPER_NAME.as_ptr() as *mut _, - }; - - const AFD_HELPER_NAME: &[u16] = &[ - '\\' as _, - 'D' as _, - 'e' as _, - 'v' as _, - 'i' as _, - 'c' as _, - 'e' as _, - '\\' as _, - 'A' as _, - 'f' as _, - 'd' as _, - '\\' as _, - 'M' as _, - 'i' as _, - 'o' as _ - ]; - - static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(0); - - impl AfdPollInfo { - pub fn zeroed() -> AfdPollInfo { - unsafe { zeroed() } - } - } - - impl Afd { - /// Create new Afd instance. - pub(crate) fn new(cp: &CompletionPort) -> io::Result { - let mut afd_helper_handle: HANDLE = INVALID_HANDLE_VALUE; - let mut iosb = IO_STATUS_BLOCK { - Anonymous: IO_STATUS_BLOCK_0 { Status: 0 }, - Information: 0, - }; - - unsafe { - let status = NtCreateFile( - &mut afd_helper_handle as *mut _, - SYNCHRONIZE, - &AFD_HELPER_ATTRIBUTES as *const _ as *mut _, - &mut iosb, - null_mut(), - 0, - FILE_SHARE_READ | FILE_SHARE_WRITE, - FILE_OPEN, - 0, - null_mut(), - 0, - ); - if status != STATUS_SUCCESS { - let raw_err = io::Error::from_raw_os_error( - RtlNtStatusToDosError(status) as i32 - ); - let msg = format!("Failed to open \\Device\\Afd\\Mio: {}", raw_err); - return Err(io::Error::new(raw_err.kind(), msg)); - } - let fd = File::from_raw_handle(afd_helper_handle as RawHandle); - // Increment by 2 to reserve space for other types of handles. - // Non-AFD types (currently only NamedPipe), use odd numbered - // tokens. This allows the selector to differentiate between them - // and dispatch events accordingly. - let token = NEXT_TOKEN.fetch_add(2, Ordering::Relaxed) + 2; - let afd = Afd { fd }; - cp.add_handle(token, &afd.fd)?; - match SetFileCompletionNotificationModes( - afd_helper_handle, - FILE_SKIP_SET_EVENT_ON_HANDLE as u8 // This is just 2, so fits in u8 - ) { - 0 => Err(io::Error::last_os_error()), - _ => Ok(afd), - } - } - } - } -} - -pub const POLL_RECEIVE: u32 = 0b0_0000_0001; -pub const POLL_RECEIVE_EXPEDITED: u32 = 0b0_0000_0010; -pub const POLL_SEND: u32 = 0b0_0000_0100; -pub const POLL_DISCONNECT: u32 = 0b0_0000_1000; -pub const POLL_ABORT: u32 = 0b0_0001_0000; -pub const POLL_LOCAL_CLOSE: u32 = 0b0_0010_0000; -// Not used as it indicated in each event where a connection is connected, not -// just the first time a connection is established. -// Also see https://github.com/piscisaureus/wepoll/commit/8b7b340610f88af3d83f40fb728e7b850b090ece. -pub const POLL_CONNECT: u32 = 0b0_0100_0000; -pub const POLL_ACCEPT: u32 = 0b0_1000_0000; -pub const POLL_CONNECT_FAIL: u32 = 0b1_0000_0000; - -pub const KNOWN_EVENTS: u32 = POLL_RECEIVE - | POLL_RECEIVE_EXPEDITED - | POLL_SEND - | POLL_DISCONNECT - | POLL_ABORT - | POLL_LOCAL_CLOSE - | POLL_ACCEPT - | POLL_CONNECT_FAIL; diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/event.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/event.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/event.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/event.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,161 +0,0 @@ -use std::fmt; - -use super::afd; -use super::iocp::CompletionStatus; -use crate::Token; - -#[derive(Clone)] -pub struct Event { - pub flags: u32, - pub data: u64, -} - -pub fn token(event: &Event) -> Token { - Token(event.data as usize) -} - -impl Event { - pub(super) fn new(token: Token) -> Event { - Event { - flags: 0, - data: usize::from(token) as u64, - } - } - - pub(super) fn set_readable(&mut self) { - self.flags |= afd::POLL_RECEIVE - } - - #[cfg(feature = "os-ext")] - pub(super) fn set_writable(&mut self) { - self.flags |= afd::POLL_SEND; - } - - pub(super) fn from_completion_status(status: &CompletionStatus) -> Event { - Event { - flags: status.bytes_transferred(), - data: status.token() as u64, - } - } - - pub(super) fn to_completion_status(&self) -> CompletionStatus { - CompletionStatus::new(self.flags, self.data as usize, std::ptr::null_mut()) - } -} - -pub(crate) const READABLE_FLAGS: u32 = afd::POLL_RECEIVE - | afd::POLL_DISCONNECT - | afd::POLL_ACCEPT - | afd::POLL_ABORT - | afd::POLL_CONNECT_FAIL; -pub(crate) const WRITABLE_FLAGS: u32 = afd::POLL_SEND | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL; -pub(crate) const ERROR_FLAGS: u32 = afd::POLL_CONNECT_FAIL; -pub(crate) const READ_CLOSED_FLAGS: u32 = - afd::POLL_DISCONNECT | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL; -pub(crate) const WRITE_CLOSED_FLAGS: u32 = afd::POLL_ABORT | afd::POLL_CONNECT_FAIL; - -pub fn is_readable(event: &Event) -> bool { - event.flags & READABLE_FLAGS != 0 -} - -pub fn is_writable(event: &Event) -> bool { - event.flags & WRITABLE_FLAGS != 0 -} - -pub fn is_error(event: &Event) -> bool { - event.flags & ERROR_FLAGS != 0 -} - -pub fn is_read_closed(event: &Event) -> bool { - event.flags & READ_CLOSED_FLAGS != 0 -} - -pub fn is_write_closed(event: &Event) -> bool { - event.flags & WRITE_CLOSED_FLAGS != 0 -} - -pub fn is_priority(event: &Event) -> bool { - event.flags & afd::POLL_RECEIVE_EXPEDITED != 0 -} - -pub fn is_aio(_: &Event) -> bool { - // Not supported. - false -} - -pub fn is_lio(_: &Event) -> bool { - // Not supported. - false -} - -pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result { - #[allow(clippy::trivially_copy_pass_by_ref)] - fn check_flags(got: &u32, want: &u32) -> bool { - (got & want) != 0 - } - debug_detail!( - FlagsDetails(u32), - check_flags, - afd::POLL_RECEIVE, - afd::POLL_RECEIVE_EXPEDITED, - afd::POLL_SEND, - afd::POLL_DISCONNECT, - afd::POLL_ABORT, - afd::POLL_LOCAL_CLOSE, - afd::POLL_CONNECT, - afd::POLL_ACCEPT, - afd::POLL_CONNECT_FAIL, - ); - - f.debug_struct("event") - .field("flags", &FlagsDetails(event.flags)) - .field("data", &event.data) - .finish() -} - -pub struct Events { - /// Raw I/O event completions are filled in here by the call to `get_many` - /// on the completion port above. These are then processed to run callbacks - /// which figure out what to do after the event is done. - pub statuses: Box<[CompletionStatus]>, - - /// Literal events returned by `get` to the upwards `EventLoop`. This file - /// doesn't really modify this (except for the waker), instead almost all - /// events are filled in by the `ReadinessQueue` from the `poll` module. - pub events: Vec, -} - -impl Events { - pub fn with_capacity(cap: usize) -> Events { - // Note that it's possible for the output `events` to grow beyond the - // capacity as it can also include deferred events, but that's certainly - // not the end of the world! - Events { - statuses: vec![CompletionStatus::zero(); cap].into_boxed_slice(), - events: Vec::with_capacity(cap), - } - } - - pub fn is_empty(&self) -> bool { - self.events.is_empty() - } - - pub fn capacity(&self) -> usize { - self.events.capacity() - } - - pub fn len(&self) -> usize { - self.events.len() - } - - pub fn get(&self, idx: usize) -> Option<&Event> { - self.events.get(idx) - } - - pub fn clear(&mut self) { - self.events.clear(); - for status in self.statuses.iter_mut() { - *status = CompletionStatus::zero(); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/handle.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/handle.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/handle.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/handle.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ -use std::os::windows::io::RawHandle; -use windows_sys::Win32::Foundation::{CloseHandle, HANDLE}; - -/// Wrapper around a Windows HANDLE so that we close it upon drop in all scenarios -#[derive(Debug)] -pub struct Handle(HANDLE); - -impl Handle { - #[inline] - pub fn new(handle: HANDLE) -> Self { - Self(handle) - } - - pub fn raw(&self) -> HANDLE { - self.0 - } - - pub fn into_raw(self) -> RawHandle { - let ret = self.0; - // This is super important so that drop is not called! - std::mem::forget(self); - ret as RawHandle - } -} - -impl Drop for Handle { - fn drop(&mut self) { - unsafe { CloseHandle(self.0) }; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/iocp.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/iocp.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/iocp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/iocp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,273 +0,0 @@ -//! Bindings to IOCP, I/O Completion Ports - -use super::{Handle, Overlapped}; -use std::cmp; -use std::fmt; -use std::io; -use std::mem; -use std::os::windows::io::*; -use std::time::Duration; - -use windows_sys::Win32::Foundation::{HANDLE, INVALID_HANDLE_VALUE}; -use windows_sys::Win32::System::IO::{ - CreateIoCompletionPort, GetQueuedCompletionStatusEx, PostQueuedCompletionStatus, OVERLAPPED, - OVERLAPPED_ENTRY, -}; - -/// A handle to an Windows I/O Completion Port. -#[derive(Debug)] -pub(crate) struct CompletionPort { - handle: Handle, -} - -/// A status message received from an I/O completion port. -/// -/// These statuses can be created via the `new` or `empty` constructors and then -/// provided to a completion port, or they are read out of a completion port. -/// The fields of each status are read through its accessor methods. -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct CompletionStatus(OVERLAPPED_ENTRY); - -impl fmt::Debug for CompletionStatus { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "CompletionStatus(OVERLAPPED_ENTRY)") - } -} - -unsafe impl Send for CompletionStatus {} -unsafe impl Sync for CompletionStatus {} - -impl CompletionPort { - /// Creates a new I/O completion port with the specified concurrency value. - /// - /// The number of threads given corresponds to the level of concurrency - /// allowed for threads associated with this port. Consult the Windows - /// documentation for more information about this value. - pub fn new(threads: u32) -> io::Result { - let ret = unsafe { CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, threads) }; - if ret == 0 { - Err(io::Error::last_os_error()) - } else { - Ok(CompletionPort { - handle: Handle::new(ret), - }) - } - } - - /// Associates a new `HANDLE` to this I/O completion port. - /// - /// This function will associate the given handle to this port with the - /// given `token` to be returned in status messages whenever it receives a - /// notification. - /// - /// Any object which is convertible to a `HANDLE` via the `AsRawHandle` - /// trait can be provided to this function, such as `std::fs::File` and - /// friends. - #[cfg(any(feature = "net", feature = "os-ext"))] - pub fn add_handle(&self, token: usize, t: &T) -> io::Result<()> { - let ret = unsafe { - CreateIoCompletionPort(t.as_raw_handle() as HANDLE, self.handle.raw(), token, 0) - }; - if ret == 0 { - Err(io::Error::last_os_error()) - } else { - Ok(()) - } - } - - /// Dequeues a number of completion statuses from this I/O completion port. - /// - /// This function is the same as `get` except that it may return more than - /// one status. A buffer of "zero" statuses is provided (the contents are - /// not read) and then on success this function will return a sub-slice of - /// statuses which represent those which were dequeued from this port. This - /// function does not wait to fill up the entire list of statuses provided. - /// - /// Like with `get`, a timeout may be specified for this operation. - pub fn get_many<'a>( - &self, - list: &'a mut [CompletionStatus], - timeout: Option, - ) -> io::Result<&'a mut [CompletionStatus]> { - debug_assert_eq!( - mem::size_of::(), - mem::size_of::() - ); - let mut removed = 0; - let timeout = duration_millis(timeout); - let len = cmp::min(list.len(), ::max_value() as usize) as u32; - let ret = unsafe { - GetQueuedCompletionStatusEx( - self.handle.raw(), - list.as_ptr() as *mut _, - len, - &mut removed, - timeout, - 0, - ) - }; - - if ret == 0 { - Err(io::Error::last_os_error()) - } else { - Ok(&mut list[..removed as usize]) - } - } - - /// Posts a new completion status onto this I/O completion port. - /// - /// This function will post the given status, with custom parameters, to the - /// port. Threads blocked in `get` or `get_many` will eventually receive - /// this status. - pub fn post(&self, status: CompletionStatus) -> io::Result<()> { - let ret = unsafe { - PostQueuedCompletionStatus( - self.handle.raw(), - status.0.dwNumberOfBytesTransferred, - status.0.lpCompletionKey, - status.0.lpOverlapped, - ) - }; - - if ret == 0 { - Err(io::Error::last_os_error()) - } else { - Ok(()) - } - } -} - -impl AsRawHandle for CompletionPort { - fn as_raw_handle(&self) -> RawHandle { - self.handle.raw() as RawHandle - } -} - -impl FromRawHandle for CompletionPort { - unsafe fn from_raw_handle(handle: RawHandle) -> CompletionPort { - CompletionPort { - handle: Handle::new(handle as HANDLE), - } - } -} - -impl IntoRawHandle for CompletionPort { - fn into_raw_handle(self) -> RawHandle { - self.handle.into_raw() - } -} - -impl CompletionStatus { - /// Creates a new completion status with the provided parameters. - /// - /// This function is useful when creating a status to send to a port with - /// the `post` method. The parameters are opaquely passed through and not - /// interpreted by the system at all. - pub(crate) fn new(bytes: u32, token: usize, overlapped: *mut Overlapped) -> Self { - CompletionStatus(OVERLAPPED_ENTRY { - dwNumberOfBytesTransferred: bytes, - lpCompletionKey: token, - lpOverlapped: overlapped as *mut _, - Internal: 0, - }) - } - - /// Creates a new borrowed completion status from the borrowed - /// `OVERLAPPED_ENTRY` argument provided. - /// - /// This method will wrap the `OVERLAPPED_ENTRY` in a `CompletionStatus`, - /// returning the wrapped structure. - #[cfg(feature = "os-ext")] - pub fn from_entry(entry: &OVERLAPPED_ENTRY) -> &Self { - // Safety: CompletionStatus is repr(transparent) w/ OVERLAPPED_ENTRY, so - // a reference to one is guaranteed to be layout compatible with the - // reference to another. - unsafe { &*(entry as *const _ as *const _) } - } - - /// Creates a new "zero" completion status. - /// - /// This function is useful when creating a stack buffer or vector of - /// completion statuses to be passed to the `get_many` function. - pub fn zero() -> Self { - Self::new(0, 0, std::ptr::null_mut()) - } - - /// Returns the number of bytes that were transferred for the I/O operation - /// associated with this completion status. - pub fn bytes_transferred(&self) -> u32 { - self.0.dwNumberOfBytesTransferred - } - - /// Returns the completion key value associated with the file handle whose - /// I/O operation has completed. - /// - /// A completion key is a per-handle key that is specified when it is added - /// to an I/O completion port via `add_handle` or `add_socket`. - pub fn token(&self) -> usize { - self.0.lpCompletionKey as usize - } - - /// Returns a pointer to the `Overlapped` structure that was specified when - /// the I/O operation was started. - pub fn overlapped(&self) -> *mut OVERLAPPED { - self.0.lpOverlapped - } - - /// Returns a pointer to the internal `OVERLAPPED_ENTRY` object. - pub fn entry(&self) -> &OVERLAPPED_ENTRY { - &self.0 - } -} - -#[inline] -fn duration_millis(dur: Option) -> u32 { - if let Some(dur) = dur { - // `Duration::as_millis` truncates, so round up. This avoids - // turning sub-millisecond timeouts into a zero timeout, unless - // the caller explicitly requests that by specifying a zero - // timeout. - let dur_ms = dur - .checked_add(Duration::from_nanos(999_999)) - .unwrap_or(dur) - .as_millis(); - cmp::min(dur_ms, u32::MAX as u128) as u32 - } else { - u32::MAX - } -} - -#[cfg(test)] -mod tests { - use super::{CompletionPort, CompletionStatus}; - - #[test] - fn is_send_sync() { - fn is_send_sync() {} - is_send_sync::(); - } - - #[test] - fn get_many() { - let c = CompletionPort::new(1).unwrap(); - - c.post(CompletionStatus::new(1, 2, 3 as *mut _)).unwrap(); - c.post(CompletionStatus::new(4, 5, 6 as *mut _)).unwrap(); - - let mut s = vec![CompletionStatus::zero(); 4]; - { - let s = c.get_many(&mut s, None).unwrap(); - assert_eq!(s.len(), 2); - assert_eq!(s[0].bytes_transferred(), 1); - assert_eq!(s[0].token(), 2); - assert_eq!(s[0].overlapped(), 3 as *mut _); - assert_eq!(s[1].bytes_transferred(), 4); - assert_eq!(s[1].token(), 5); - assert_eq!(s[1].overlapped(), 6 as *mut _); - } - assert_eq!(s[2].bytes_transferred(), 0); - assert_eq!(s[2].token(), 0); - assert_eq!(s[2].overlapped(), 0 as *mut _); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/io_status_block.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/io_status_block.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/io_status_block.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/io_status_block.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,40 +0,0 @@ -use std::fmt; -use std::ops::{Deref, DerefMut}; - -use windows_sys::Win32::System::WindowsProgramming::IO_STATUS_BLOCK; - -pub struct IoStatusBlock(IO_STATUS_BLOCK); - -cfg_io_source! { - use windows_sys::Win32::System::WindowsProgramming::{IO_STATUS_BLOCK_0}; - - impl IoStatusBlock { - pub fn zeroed() -> Self { - Self(IO_STATUS_BLOCK { - Anonymous: IO_STATUS_BLOCK_0 { Status: 0 }, - Information: 0, - }) - } - } -} - -unsafe impl Send for IoStatusBlock {} - -impl Deref for IoStatusBlock { - type Target = IO_STATUS_BLOCK; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for IoStatusBlock { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl fmt::Debug for IoStatusBlock { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("IoStatusBlock").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/mod.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/mod.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,152 +0,0 @@ -mod afd; - -pub mod event; -pub use event::{Event, Events}; - -mod handle; -use handle::Handle; - -mod io_status_block; -mod iocp; - -mod overlapped; -use overlapped::Overlapped; - -mod selector; -pub use selector::{Selector, SelectorInner, SockState}; - -// Macros must be defined before the modules that use them -cfg_net! { - /// Helper macro to execute a system call that returns an `io::Result`. - // - // Macro must be defined before any modules that uses them. - macro_rules! syscall { - ($fn: ident ( $($arg: expr),* $(,)* ), $err_test: path, $err_value: expr) => {{ - let res = unsafe { $fn($($arg, )*) }; - if $err_test(&res, &$err_value) { - Err(io::Error::last_os_error()) - } else { - Ok(res) - } - }}; - } - - mod net; - - pub(crate) mod tcp; - pub(crate) mod udp; -} - -cfg_os_ext! { - pub(crate) mod named_pipe; -} - -mod waker; -pub(crate) use waker::Waker; - -cfg_io_source! { - use std::io; - use std::os::windows::io::RawSocket; - use std::pin::Pin; - use std::sync::{Arc, Mutex}; - - use crate::{Interest, Registry, Token}; - - struct InternalState { - selector: Arc, - token: Token, - interests: Interest, - sock_state: Pin>>, - } - - impl Drop for InternalState { - fn drop(&mut self) { - let mut sock_state = self.sock_state.lock().unwrap(); - sock_state.mark_delete(); - } - } - - pub struct IoSourceState { - // This is `None` if the socket has not yet been registered. - // - // We box the internal state to not increase the size on the stack as the - // type might move around a lot. - inner: Option>, - } - - impl IoSourceState { - pub fn new() -> IoSourceState { - IoSourceState { inner: None } - } - - pub fn do_io(&self, f: F, io: &T) -> io::Result - where - F: FnOnce(&T) -> io::Result, - { - let result = f(io); - if let Err(ref e) = result { - if e.kind() == io::ErrorKind::WouldBlock { - self.inner.as_ref().map_or(Ok(()), |state| { - state - .selector - .reregister(state.sock_state.clone(), state.token, state.interests) - })?; - } - } - result - } - - pub fn register( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - socket: RawSocket, - ) -> io::Result<()> { - if self.inner.is_some() { - Err(io::ErrorKind::AlreadyExists.into()) - } else { - registry - .selector() - .register(socket, token, interests) - .map(|state| { - self.inner = Some(Box::new(state)); - }) - } - } - - pub fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: Interest, - ) -> io::Result<()> { - match self.inner.as_mut() { - Some(state) => { - registry - .selector() - .reregister(state.sock_state.clone(), token, interests) - .map(|()| { - state.token = token; - state.interests = interests; - }) - } - None => Err(io::ErrorKind::NotFound.into()), - } - } - - pub fn deregister(&mut self) -> io::Result<()> { - match self.inner.as_mut() { - Some(state) => { - { - let mut sock_state = state.sock_state.lock().unwrap(); - sock_state.mark_delete(); - } - self.inner = None; - Ok(()) - } - None => Err(io::ErrorKind::NotFound.into()), - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/named_pipe.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/named_pipe.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/named_pipe.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/named_pipe.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,993 +0,0 @@ -use std::ffi::OsStr; -use std::io::{self, Read, Write}; -use std::os::windows::io::{AsRawHandle, FromRawHandle, RawHandle}; -use std::sync::atomic::Ordering::{Relaxed, SeqCst}; -use std::sync::atomic::{AtomicBool, AtomicUsize}; -use std::sync::{Arc, Mutex}; -use std::{fmt, mem, slice}; - -use windows_sys::Win32::Foundation::{ - ERROR_BROKEN_PIPE, ERROR_IO_INCOMPLETE, ERROR_IO_PENDING, ERROR_NO_DATA, ERROR_PIPE_CONNECTED, - ERROR_PIPE_LISTENING, HANDLE, INVALID_HANDLE_VALUE, -}; -use windows_sys::Win32::Storage::FileSystem::{ - ReadFile, WriteFile, FILE_FLAG_FIRST_PIPE_INSTANCE, FILE_FLAG_OVERLAPPED, PIPE_ACCESS_DUPLEX, -}; -use windows_sys::Win32::System::Pipes::{ - ConnectNamedPipe, CreateNamedPipeW, DisconnectNamedPipe, PIPE_TYPE_BYTE, - PIPE_UNLIMITED_INSTANCES, -}; -use windows_sys::Win32::System::IO::{ - CancelIoEx, GetOverlappedResult, OVERLAPPED, OVERLAPPED_ENTRY, -}; - -use crate::event::Source; -use crate::sys::windows::iocp::{CompletionPort, CompletionStatus}; -use crate::sys::windows::{Event, Handle, Overlapped}; -use crate::Registry; -use crate::{Interest, Token}; - -/// Non-blocking windows named pipe. -/// -/// This structure internally contains a `HANDLE` which represents the named -/// pipe, and also maintains state associated with the mio event loop and active -/// I/O operations that have been scheduled to translate IOCP to a readiness -/// model. -/// -/// Note, IOCP is a *completion* based model whereas mio is a *readiness* based -/// model. To bridge this, `NamedPipe` performs internal buffering. Writes are -/// written to an internal buffer and the buffer is submitted to IOCP. IOCP -/// reads are submitted using internal buffers and `NamedPipe::read` reads from -/// this internal buffer. -/// -/// # Trait implementations -/// -/// The `Read` and `Write` traits are implemented for `NamedPipe` and for -/// `&NamedPipe`. This represents that a named pipe can be concurrently read and -/// written to and also can be read and written to at all. Typically a named -/// pipe needs to be connected to a client before it can be read or written, -/// however. -/// -/// Note that for I/O operations on a named pipe to succeed then the named pipe -/// needs to be associated with an event loop. Until this happens all I/O -/// operations will return a "would block" error. -/// -/// # Managing connections -/// -/// The `NamedPipe` type supports a `connect` method to connect to a client and -/// a `disconnect` method to disconnect from that client. These two methods only -/// work once a named pipe is associated with an event loop. -/// -/// The `connect` method will succeed asynchronously and a completion can be -/// detected once the object receives a writable notification. -/// -/// # Named pipe clients -/// -/// Currently to create a client of a named pipe server then you can use the -/// `OpenOptions` type in the standard library to create a `File` that connects -/// to a named pipe. Afterwards you can use the `into_raw_handle` method coupled -/// with the `NamedPipe::from_raw_handle` method to convert that to a named pipe -/// that can operate asynchronously. Don't forget to pass the -/// `FILE_FLAG_OVERLAPPED` flag when opening the `File`. -pub struct NamedPipe { - inner: Arc, -} - -/// # Notes -/// -/// The memory layout of this structure must be fixed as the -/// `ptr_from_*_overlapped` methods depend on it, see the `ptr_from` test. -#[repr(C)] -struct Inner { - // NOTE: careful modifying the order of these three fields, the `ptr_from_*` - // methods depend on the layout! - connect: Overlapped, - read: Overlapped, - write: Overlapped, - // END NOTE. - handle: Handle, - connecting: AtomicBool, - io: Mutex, - pool: Mutex, -} - -impl Inner { - /// Converts a pointer to `Inner.connect` to a pointer to `Inner`. - /// - /// # Unsafety - /// - /// Caller must ensure `ptr` is pointing to `Inner.connect`. - unsafe fn ptr_from_conn_overlapped(ptr: *mut OVERLAPPED) -> *const Inner { - // `connect` is the first field, so the pointer are the same. - ptr.cast() - } - - /// Same as [`ptr_from_conn_overlapped`] but for `Inner.read`. - unsafe fn ptr_from_read_overlapped(ptr: *mut OVERLAPPED) -> *const Inner { - // `read` is after `connect: Overlapped`. - (ptr as *mut Overlapped).wrapping_sub(1) as *const Inner - } - - /// Same as [`ptr_from_conn_overlapped`] but for `Inner.write`. - unsafe fn ptr_from_write_overlapped(ptr: *mut OVERLAPPED) -> *const Inner { - // `read` is after `connect: Overlapped` and `read: Overlapped`. - (ptr as *mut Overlapped).wrapping_sub(2) as *const Inner - } - - /// Issue a connection request with the specified overlapped operation. - /// - /// This function will issue a request to connect a client to this server, - /// returning immediately after starting the overlapped operation. - /// - /// If this function immediately succeeds then `Ok(true)` is returned. If - /// the overlapped operation is enqueued and pending, then `Ok(false)` is - /// returned. Otherwise an error is returned indicating what went wrong. - /// - /// # Unsafety - /// - /// This function is unsafe because the kernel requires that the - /// `overlapped` pointer is valid until the end of the I/O operation. The - /// kernel also requires that `overlapped` is unique for this I/O operation - /// and is not in use for any other I/O. - /// - /// To safely use this function callers must ensure that this pointer is - /// valid until the I/O operation is completed, typically via completion - /// ports and waiting to receive the completion notification on the port. - pub unsafe fn connect_overlapped(&self, overlapped: *mut OVERLAPPED) -> io::Result { - if ConnectNamedPipe(self.handle.raw(), overlapped) != 0 { - return Ok(true); - } - - let err = io::Error::last_os_error(); - - match err.raw_os_error().map(|e| e as u32) { - Some(ERROR_PIPE_CONNECTED) => Ok(true), - Some(ERROR_NO_DATA) => Ok(true), - Some(ERROR_IO_PENDING) => Ok(false), - _ => Err(err), - } - } - - /// Disconnects this named pipe from any connected client. - pub fn disconnect(&self) -> io::Result<()> { - if unsafe { DisconnectNamedPipe(self.handle.raw()) } == 0 { - Err(io::Error::last_os_error()) - } else { - Ok(()) - } - } - - /// Issues an overlapped read operation to occur on this pipe. - /// - /// This function will issue an asynchronous read to occur in an overlapped - /// fashion, returning immediately. The `buf` provided will be filled in - /// with data and the request is tracked by the `overlapped` function - /// provided. - /// - /// If the operation succeeds immediately, `Ok(Some(n))` is returned where - /// `n` is the number of bytes read. If an asynchronous operation is - /// enqueued, then `Ok(None)` is returned. Otherwise if an error occurred - /// it is returned. - /// - /// When this operation completes (or if it completes immediately), another - /// mechanism must be used to learn how many bytes were transferred (such as - /// looking at the filed in the IOCP status message). - /// - /// # Unsafety - /// - /// This function is unsafe because the kernel requires that the `buf` and - /// `overlapped` pointers to be valid until the end of the I/O operation. - /// The kernel also requires that `overlapped` is unique for this I/O - /// operation and is not in use for any other I/O. - /// - /// To safely use this function callers must ensure that the pointers are - /// valid until the I/O operation is completed, typically via completion - /// ports and waiting to receive the completion notification on the port. - pub unsafe fn read_overlapped( - &self, - buf: &mut [u8], - overlapped: *mut OVERLAPPED, - ) -> io::Result> { - let len = std::cmp::min(buf.len(), u32::MAX as usize) as u32; - let res = ReadFile( - self.handle.raw(), - buf.as_mut_ptr() as *mut _, - len, - std::ptr::null_mut(), - overlapped, - ); - if res == 0 { - let err = io::Error::last_os_error(); - if err.raw_os_error() != Some(ERROR_IO_PENDING as i32) { - return Err(err); - } - } - - let mut bytes = 0; - let res = GetOverlappedResult(self.handle.raw(), overlapped, &mut bytes, 0); - if res == 0 { - let err = io::Error::last_os_error(); - if err.raw_os_error() == Some(ERROR_IO_INCOMPLETE as i32) { - Ok(None) - } else { - Err(err) - } - } else { - Ok(Some(bytes as usize)) - } - } - - /// Issues an overlapped write operation to occur on this pipe. - /// - /// This function will issue an asynchronous write to occur in an overlapped - /// fashion, returning immediately. The `buf` provided will be filled in - /// with data and the request is tracked by the `overlapped` function - /// provided. - /// - /// If the operation succeeds immediately, `Ok(Some(n))` is returned where - /// `n` is the number of bytes written. If an asynchronous operation is - /// enqueued, then `Ok(None)` is returned. Otherwise if an error occurred - /// it is returned. - /// - /// When this operation completes (or if it completes immediately), another - /// mechanism must be used to learn how many bytes were transferred (such as - /// looking at the filed in the IOCP status message). - /// - /// # Unsafety - /// - /// This function is unsafe because the kernel requires that the `buf` and - /// `overlapped` pointers to be valid until the end of the I/O operation. - /// The kernel also requires that `overlapped` is unique for this I/O - /// operation and is not in use for any other I/O. - /// - /// To safely use this function callers must ensure that the pointers are - /// valid until the I/O operation is completed, typically via completion - /// ports and waiting to receive the completion notification on the port. - pub unsafe fn write_overlapped( - &self, - buf: &[u8], - overlapped: *mut OVERLAPPED, - ) -> io::Result> { - let len = std::cmp::min(buf.len(), u32::MAX as usize) as u32; - let res = WriteFile( - self.handle.raw(), - buf.as_ptr() as *const _, - len, - std::ptr::null_mut(), - overlapped, - ); - if res == 0 { - let err = io::Error::last_os_error(); - if err.raw_os_error() != Some(ERROR_IO_PENDING as i32) { - return Err(err); - } - } - - let mut bytes = 0; - let res = GetOverlappedResult(self.handle.raw(), overlapped, &mut bytes, 0); - if res == 0 { - let err = io::Error::last_os_error(); - if err.raw_os_error() == Some(ERROR_IO_INCOMPLETE as i32) { - Ok(None) - } else { - Err(err) - } - } else { - Ok(Some(bytes as usize)) - } - } - - /// Calls the `GetOverlappedResult` function to get the result of an - /// overlapped operation for this handle. - /// - /// This function takes the `OVERLAPPED` argument which must have been used - /// to initiate an overlapped I/O operation, and returns either the - /// successful number of bytes transferred during the operation or an error - /// if one occurred. - /// - /// # Unsafety - /// - /// This function is unsafe as `overlapped` must have previously been used - /// to execute an operation for this handle, and it must also be a valid - /// pointer to an `Overlapped` instance. - #[inline] - unsafe fn result(&self, overlapped: *mut OVERLAPPED) -> io::Result { - let mut transferred = 0; - let r = GetOverlappedResult(self.handle.raw(), overlapped, &mut transferred, 0); - if r == 0 { - Err(io::Error::last_os_error()) - } else { - Ok(transferred as usize) - } - } -} - -#[test] -fn ptr_from() { - use std::mem::ManuallyDrop; - use std::ptr; - - let pipe = unsafe { ManuallyDrop::new(NamedPipe::from_raw_handle(ptr::null_mut())) }; - let inner: &Inner = &pipe.inner; - assert_eq!( - inner as *const Inner, - unsafe { Inner::ptr_from_conn_overlapped(&inner.connect as *const _ as *mut OVERLAPPED) }, - "`ptr_from_conn_overlapped` incorrect" - ); - assert_eq!( - inner as *const Inner, - unsafe { Inner::ptr_from_read_overlapped(&inner.read as *const _ as *mut OVERLAPPED) }, - "`ptr_from_read_overlapped` incorrect" - ); - assert_eq!( - inner as *const Inner, - unsafe { Inner::ptr_from_write_overlapped(&inner.write as *const _ as *mut OVERLAPPED) }, - "`ptr_from_write_overlapped` incorrect" - ); -} - -struct Io { - // Uniquely identifies the selector associated with this named pipe - cp: Option>, - // Token used to identify events - token: Option, - read: State, - write: State, - connect_error: Option, -} - -#[derive(Debug)] -enum State { - None, - Pending(Vec, usize), - Ok(Vec, usize), - Err(io::Error), -} - -// Odd tokens are for named pipes -static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(1); - -fn would_block() -> io::Error { - io::ErrorKind::WouldBlock.into() -} - -impl NamedPipe { - /// Creates a new named pipe at the specified `addr` given a "reasonable - /// set" of initial configuration options. - pub fn new>(addr: A) -> io::Result { - use std::os::windows::ffi::OsStrExt; - let name: Vec<_> = addr.as_ref().encode_wide().chain(Some(0)).collect(); - - // Safety: syscall - let h = unsafe { - CreateNamedPipeW( - name.as_ptr(), - PIPE_ACCESS_DUPLEX | FILE_FLAG_FIRST_PIPE_INSTANCE | FILE_FLAG_OVERLAPPED, - PIPE_TYPE_BYTE, - PIPE_UNLIMITED_INSTANCES, - 65536, - 65536, - 0, - std::ptr::null_mut(), - ) - }; - - if h == INVALID_HANDLE_VALUE { - Err(io::Error::last_os_error()) - } else { - // Safety: nothing actually unsafe about this. The trait fn includes - // `unsafe`. - Ok(unsafe { Self::from_raw_handle(h as RawHandle) }) - } - } - - /// Attempts to call `ConnectNamedPipe`, if possible. - /// - /// This function will attempt to connect this pipe to a client in an - /// asynchronous fashion. If the function immediately establishes a - /// connection to a client then `Ok(())` is returned. Otherwise if a - /// connection attempt was issued and is now in progress then a "would - /// block" error is returned. - /// - /// When the connection is finished then this object will be flagged as - /// being ready for a write, or otherwise in the writable state. - /// - /// # Errors - /// - /// This function will return a "would block" error if the pipe has not yet - /// been registered with an event loop, if the connection operation has - /// previously been issued but has not yet completed, or if the connect - /// itself was issued and didn't finish immediately. - /// - /// Normal I/O errors from the call to `ConnectNamedPipe` are returned - /// immediately. - pub fn connect(&self) -> io::Result<()> { - // "Acquire the connecting lock" or otherwise just make sure we're the - // only operation that's using the `connect` overlapped instance. - if self.inner.connecting.swap(true, SeqCst) { - return Err(would_block()); - } - - // Now that we've flagged ourselves in the connecting state, issue the - // connection attempt. Afterwards interpret the return value and set - // internal state accordingly. - let res = unsafe { - let overlapped = self.inner.connect.as_ptr() as *mut _; - self.inner.connect_overlapped(overlapped) - }; - - match res { - // The connection operation finished immediately, so let's schedule - // reads/writes and such. - Ok(true) => { - self.inner.connecting.store(false, SeqCst); - Inner::post_register(&self.inner, None); - Ok(()) - } - - // If the overlapped operation was successful and didn't finish - // immediately then we forget a copy of the arc we hold - // internally. This ensures that when the completion status comes - // in for the I/O operation finishing it'll have a reference - // associated with it and our data will still be valid. The - // `connect_done` function will "reify" this forgotten pointer to - // drop the refcount on the other side. - Ok(false) => { - mem::forget(self.inner.clone()); - Err(would_block()) - } - - Err(e) => { - self.inner.connecting.store(false, SeqCst); - Err(e) - } - } - } - - /// Takes any internal error that has happened after the last I/O operation - /// which hasn't been retrieved yet. - /// - /// This is particularly useful when detecting failed attempts to `connect`. - /// After a completed `connect` flags this pipe as writable then callers - /// must invoke this method to determine whether the connection actually - /// succeeded. If this function returns `None` then a client is connected, - /// otherwise it returns an error of what happened and a client shouldn't be - /// connected. - pub fn take_error(&self) -> io::Result> { - Ok(self.inner.io.lock().unwrap().connect_error.take()) - } - - /// Disconnects this named pipe from a connected client. - /// - /// This function will disconnect the pipe from a connected client, if any, - /// transitively calling the `DisconnectNamedPipe` function. - /// - /// After a `disconnect` is issued, then a `connect` may be called again to - /// connect to another client. - pub fn disconnect(&self) -> io::Result<()> { - self.inner.disconnect() - } -} - -impl FromRawHandle for NamedPipe { - unsafe fn from_raw_handle(handle: RawHandle) -> NamedPipe { - NamedPipe { - inner: Arc::new(Inner { - handle: Handle::new(handle as HANDLE), - connect: Overlapped::new(connect_done), - connecting: AtomicBool::new(false), - read: Overlapped::new(read_done), - write: Overlapped::new(write_done), - io: Mutex::new(Io { - cp: None, - token: None, - read: State::None, - write: State::None, - connect_error: None, - }), - pool: Mutex::new(BufferPool::with_capacity(2)), - }), - } - } -} - -impl Read for NamedPipe { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - <&NamedPipe as Read>::read(&mut &*self, buf) - } -} - -impl Write for NamedPipe { - fn write(&mut self, buf: &[u8]) -> io::Result { - <&NamedPipe as Write>::write(&mut &*self, buf) - } - - fn flush(&mut self) -> io::Result<()> { - <&NamedPipe as Write>::flush(&mut &*self) - } -} - -impl<'a> Read for &'a NamedPipe { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let mut state = self.inner.io.lock().unwrap(); - - if state.token.is_none() { - return Err(would_block()); - } - - match mem::replace(&mut state.read, State::None) { - // In theory not possible with `token` checked above, - // but return would block for now. - State::None => Err(would_block()), - - // A read is in flight, still waiting for it to finish - State::Pending(buf, amt) => { - state.read = State::Pending(buf, amt); - Err(would_block()) - } - - // We previously read something into `data`, try to copy out some - // data. If we copy out all the data schedule a new read and - // otherwise store the buffer to get read later. - State::Ok(data, cur) => { - let n = { - let mut remaining = &data[cur..]; - remaining.read(buf)? - }; - let next = cur + n; - if next != data.len() { - state.read = State::Ok(data, next); - } else { - self.inner.put_buffer(data); - Inner::schedule_read(&self.inner, &mut state, None); - } - Ok(n) - } - - // Looks like an in-flight read hit an error, return that here while - // we schedule a new one. - State::Err(e) => { - Inner::schedule_read(&self.inner, &mut state, None); - if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) { - Ok(0) - } else { - Err(e) - } - } - } - } -} - -impl<'a> Write for &'a NamedPipe { - fn write(&mut self, buf: &[u8]) -> io::Result { - // Make sure there's no writes pending - let mut io = self.inner.io.lock().unwrap(); - - if io.token.is_none() { - return Err(would_block()); - } - - match io.write { - State::None => {} - State::Err(_) => match mem::replace(&mut io.write, State::None) { - State::Err(e) => return Err(e), - // `io` is locked, so this branch is unreachable - _ => unreachable!(), - }, - // any other state should be handled in `write_done` - _ => { - return Err(would_block()); - } - } - - // Move `buf` onto the heap and fire off the write - let mut owned_buf = self.inner.get_buffer(); - owned_buf.extend(buf); - match Inner::maybe_schedule_write(&self.inner, owned_buf, 0, &mut io)? { - // Some bytes are written immediately - Some(n) => Ok(n), - // Write operation is anqueued for whole buffer - None => Ok(buf.len()), - } - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -impl Source for NamedPipe { - fn register(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> { - let mut io = self.inner.io.lock().unwrap(); - - io.check_association(registry, false)?; - - if io.token.is_some() { - return Err(io::Error::new( - io::ErrorKind::AlreadyExists, - "I/O source already registered with a `Registry`", - )); - } - - if io.cp.is_none() { - let selector = registry.selector(); - - io.cp = Some(selector.clone_port()); - - let inner_token = NEXT_TOKEN.fetch_add(2, Relaxed) + 2; - selector.inner.cp.add_handle(inner_token, self)?; - } - - io.token = Some(token); - drop(io); - - Inner::post_register(&self.inner, None); - - Ok(()) - } - - fn reregister(&mut self, registry: &Registry, token: Token, _: Interest) -> io::Result<()> { - let mut io = self.inner.io.lock().unwrap(); - - io.check_association(registry, true)?; - - io.token = Some(token); - drop(io); - - Inner::post_register(&self.inner, None); - - Ok(()) - } - - fn deregister(&mut self, registry: &Registry) -> io::Result<()> { - let mut io = self.inner.io.lock().unwrap(); - - io.check_association(registry, true)?; - - if io.token.is_none() { - return Err(io::Error::new( - io::ErrorKind::NotFound, - "I/O source not registered with `Registry`", - )); - } - - io.token = None; - Ok(()) - } -} - -impl AsRawHandle for NamedPipe { - fn as_raw_handle(&self) -> RawHandle { - self.inner.handle.raw() as RawHandle - } -} - -impl fmt::Debug for NamedPipe { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.handle.fmt(f) - } -} - -impl Drop for NamedPipe { - fn drop(&mut self) { - // Cancel pending reads/connects, but don't cancel writes to ensure that - // everything is flushed out. - unsafe { - if self.inner.connecting.load(SeqCst) { - drop(cancel(&self.inner.handle, &self.inner.connect)); - } - - let io = self.inner.io.lock().unwrap(); - if let State::Pending(..) = io.read { - drop(cancel(&self.inner.handle, &self.inner.read)); - } - } - } -} - -impl Inner { - /// Schedules a read to happen in the background, executing an overlapped - /// operation. - /// - /// This function returns `true` if a normal error happens or if the read - /// is scheduled in the background. If the pipe is no longer connected - /// (ERROR_PIPE_LISTENING) then `false` is returned and no read is - /// scheduled. - fn schedule_read(me: &Arc, io: &mut Io, events: Option<&mut Vec>) -> bool { - // Check to see if a read is already scheduled/completed - match io.read { - State::None => {} - _ => return true, - } - - // Allocate a buffer and schedule the read. - let mut buf = me.get_buffer(); - let e = unsafe { - let overlapped = me.read.as_ptr() as *mut _; - let slice = slice::from_raw_parts_mut(buf.as_mut_ptr(), buf.capacity()); - me.read_overlapped(slice, overlapped) - }; - - match e { - // See `NamedPipe::connect` above for the rationale behind `forget` - Ok(_) => { - io.read = State::Pending(buf, 0); // 0 is ignored on read side - mem::forget(me.clone()); - true - } - - // If ERROR_PIPE_LISTENING happens then it's not a real read error, - // we just need to wait for a connect. - Err(ref e) if e.raw_os_error() == Some(ERROR_PIPE_LISTENING as i32) => false, - - // If some other error happened, though, we're now readable to give - // out the error. - Err(e) => { - io.read = State::Err(e); - io.notify_readable(events); - true - } - } - } - - /// Maybe schedules overlapped write operation. - /// - /// * `None` means that overlapped operation was enqueued - /// * `Some(n)` means that `n` bytes was immediately written. - /// Note, that `write_done` will fire anyway to clean up the state. - fn maybe_schedule_write( - me: &Arc, - buf: Vec, - pos: usize, - io: &mut Io, - ) -> io::Result> { - // Very similar to `schedule_read` above, just done for the write half. - let e = unsafe { - let overlapped = me.write.as_ptr() as *mut _; - me.write_overlapped(&buf[pos..], overlapped) - }; - - // See `connect` above for the rationale behind `forget` - match e { - // `n` bytes are written immediately - Ok(Some(n)) => { - io.write = State::Ok(buf, pos); - mem::forget(me.clone()); - Ok(Some(n)) - } - // write operation is enqueued - Ok(None) => { - io.write = State::Pending(buf, pos); - mem::forget(me.clone()); - Ok(None) - } - Err(e) => Err(e), - } - } - - fn schedule_write( - me: &Arc, - buf: Vec, - pos: usize, - io: &mut Io, - events: Option<&mut Vec>, - ) { - match Inner::maybe_schedule_write(me, buf, pos, io) { - Ok(Some(_)) => { - // immediate result will be handled in `write_done`, - // so we'll reinterpret the `Ok` state - let state = mem::replace(&mut io.write, State::None); - io.write = match state { - State::Ok(buf, pos) => State::Pending(buf, pos), - // io is locked, so this branch is unreachable - _ => unreachable!(), - }; - mem::forget(me.clone()); - } - Ok(None) => (), - Err(e) => { - io.write = State::Err(e); - io.notify_writable(events); - } - } - } - - fn post_register(me: &Arc, mut events: Option<&mut Vec>) { - let mut io = me.io.lock().unwrap(); - #[allow(clippy::needless_option_as_deref)] - if Inner::schedule_read(me, &mut io, events.as_deref_mut()) { - if let State::None = io.write { - io.notify_writable(events); - } - } - } - - fn get_buffer(&self) -> Vec { - self.pool.lock().unwrap().get(4 * 1024) - } - - fn put_buffer(&self, buf: Vec) { - self.pool.lock().unwrap().put(buf) - } -} - -unsafe fn cancel(handle: &Handle, overlapped: &Overlapped) -> io::Result<()> { - let ret = CancelIoEx(handle.raw(), overlapped.as_ptr()); - // `CancelIoEx` returns 0 on error: - // https://docs.microsoft.com/en-us/windows/win32/fileio/cancelioex-func - if ret == 0 { - Err(io::Error::last_os_error()) - } else { - Ok(()) - } -} - -fn connect_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec>) { - let status = CompletionStatus::from_entry(status); - - // Acquire the `Arc`. Note that we should be guaranteed that - // the refcount is available to us due to the `mem::forget` in - // `connect` above. - let me = unsafe { Arc::from_raw(Inner::ptr_from_conn_overlapped(status.overlapped())) }; - - // Flag ourselves as no longer using the `connect` overlapped instances. - let prev = me.connecting.swap(false, SeqCst); - assert!(prev, "NamedPipe was not previously connecting"); - - // Stash away our connect error if one happened - debug_assert_eq!(status.bytes_transferred(), 0); - unsafe { - match me.result(status.overlapped()) { - Ok(n) => debug_assert_eq!(n, 0), - Err(e) => me.io.lock().unwrap().connect_error = Some(e), - } - } - - // We essentially just finished a registration, so kick off a - // read and register write readiness. - Inner::post_register(&me, events); -} - -fn read_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec>) { - let status = CompletionStatus::from_entry(status); - - // Acquire the `FromRawArc`. Note that we should be guaranteed that - // the refcount is available to us due to the `mem::forget` in - // `schedule_read` above. - let me = unsafe { Arc::from_raw(Inner::ptr_from_read_overlapped(status.overlapped())) }; - - // Move from the `Pending` to `Ok` state. - let mut io = me.io.lock().unwrap(); - let mut buf = match mem::replace(&mut io.read, State::None) { - State::Pending(buf, _) => buf, - _ => unreachable!(), - }; - unsafe { - match me.result(status.overlapped()) { - Ok(n) => { - debug_assert_eq!(status.bytes_transferred() as usize, n); - buf.set_len(status.bytes_transferred() as usize); - io.read = State::Ok(buf, 0); - } - Err(e) => { - debug_assert_eq!(status.bytes_transferred(), 0); - io.read = State::Err(e); - } - } - } - - // Flag our readiness that we've got data. - io.notify_readable(events); -} - -fn write_done(status: &OVERLAPPED_ENTRY, events: Option<&mut Vec>) { - let status = CompletionStatus::from_entry(status); - - // Acquire the `Arc`. Note that we should be guaranteed that - // the refcount is available to us due to the `mem::forget` in - // `schedule_write` above. - let me = unsafe { Arc::from_raw(Inner::ptr_from_write_overlapped(status.overlapped())) }; - - // Make the state change out of `Pending`. If we wrote the entire buffer - // then we're writable again and otherwise we schedule another write. - let mut io = me.io.lock().unwrap(); - let (buf, pos) = match mem::replace(&mut io.write, State::None) { - // `Ok` here means, that the operation was completed immediately - // `bytes_transferred` is already reported to a client - State::Ok(..) => { - io.notify_writable(events); - return; - } - State::Pending(buf, pos) => (buf, pos), - _ => unreachable!(), - }; - - unsafe { - match me.result(status.overlapped()) { - Ok(n) => { - debug_assert_eq!(status.bytes_transferred() as usize, n); - let new_pos = pos + (status.bytes_transferred() as usize); - if new_pos == buf.len() { - me.put_buffer(buf); - io.notify_writable(events); - } else { - Inner::schedule_write(&me, buf, new_pos, &mut io, events); - } - } - Err(e) => { - debug_assert_eq!(status.bytes_transferred(), 0); - io.write = State::Err(e); - io.notify_writable(events); - } - } - } -} - -impl Io { - fn check_association(&self, registry: &Registry, required: bool) -> io::Result<()> { - match self.cp { - Some(ref cp) if !registry.selector().same_port(cp) => Err(io::Error::new( - io::ErrorKind::AlreadyExists, - "I/O source already registered with a different `Registry`", - )), - None if required => Err(io::Error::new( - io::ErrorKind::NotFound, - "I/O source not registered with `Registry`", - )), - _ => Ok(()), - } - } - - fn notify_readable(&self, events: Option<&mut Vec>) { - if let Some(token) = self.token { - let mut ev = Event::new(token); - ev.set_readable(); - - if let Some(events) = events { - events.push(ev); - } else { - let _ = self.cp.as_ref().unwrap().post(ev.to_completion_status()); - } - } - } - - fn notify_writable(&self, events: Option<&mut Vec>) { - if let Some(token) = self.token { - let mut ev = Event::new(token); - ev.set_writable(); - - if let Some(events) = events { - events.push(ev); - } else { - let _ = self.cp.as_ref().unwrap().post(ev.to_completion_status()); - } - } - } -} - -struct BufferPool { - pool: Vec>, -} - -impl BufferPool { - fn with_capacity(cap: usize) -> BufferPool { - BufferPool { - pool: Vec::with_capacity(cap), - } - } - - fn get(&mut self, default_cap: usize) -> Vec { - self.pool - .pop() - .unwrap_or_else(|| Vec::with_capacity(default_cap)) - } - - fn put(&mut self, mut buf: Vec) { - if self.pool.len() < self.pool.capacity() { - unsafe { - buf.set_len(0); - } - self.pool.push(buf); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/net.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/net.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/net.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/net.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,111 +0,0 @@ -use std::io; -use std::mem; -use std::net::SocketAddr; -use std::sync::Once; - -use windows_sys::Win32::Networking::WinSock::{ - closesocket, ioctlsocket, socket, AF_INET, AF_INET6, FIONBIO, IN6_ADDR, IN6_ADDR_0, - INVALID_SOCKET, IN_ADDR, IN_ADDR_0, SOCKADDR, SOCKADDR_IN, SOCKADDR_IN6, SOCKADDR_IN6_0, - SOCKET, -}; - -/// Initialise the network stack for Windows. -fn init() { - static INIT: Once = Once::new(); - INIT.call_once(|| { - // Let standard library call `WSAStartup` for us, we can't do it - // ourselves because otherwise using any type in `std::net` would panic - // when it tries to call `WSAStartup` a second time. - drop(std::net::UdpSocket::bind("127.0.0.1:0")); - }); -} - -/// Create a new non-blocking socket. -pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: i32) -> io::Result { - let domain = match addr { - SocketAddr::V4(..) => AF_INET, - SocketAddr::V6(..) => AF_INET6, - }; - - new_socket(domain.into(), socket_type) -} - -pub(crate) fn new_socket(domain: u32, socket_type: i32) -> io::Result { - init(); - - let socket = syscall!( - socket(domain as i32, socket_type, 0), - PartialEq::eq, - INVALID_SOCKET - )?; - - if let Err(err) = syscall!(ioctlsocket(socket, FIONBIO, &mut 1), PartialEq::ne, 0) { - let _ = unsafe { closesocket(socket) }; - return Err(err); - } - - Ok(socket as SOCKET) -} - -/// A type with the same memory layout as `SOCKADDR`. Used in converting Rust level -/// SocketAddr* types into their system representation. The benefit of this specific -/// type over using `SOCKADDR_STORAGE` is that this type is exactly as large as it -/// needs to be and not a lot larger. And it can be initialized cleaner from Rust. -#[repr(C)] -pub(crate) union SocketAddrCRepr { - v4: SOCKADDR_IN, - v6: SOCKADDR_IN6, -} - -impl SocketAddrCRepr { - pub(crate) fn as_ptr(&self) -> *const SOCKADDR { - self as *const _ as *const SOCKADDR - } -} - -pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, i32) { - match addr { - SocketAddr::V4(ref addr) => { - // `s_addr` is stored as BE on all machine and the array is in BE order. - // So the native endian conversion method is used so that it's never swapped. - let sin_addr = unsafe { - let mut s_un = mem::zeroed::(); - s_un.S_addr = u32::from_ne_bytes(addr.ip().octets()); - IN_ADDR { S_un: s_un } - }; - - let sockaddr_in = SOCKADDR_IN { - sin_family: AF_INET as u16, // 1 - sin_port: addr.port().to_be(), - sin_addr, - sin_zero: [0; 8], - }; - - let sockaddr = SocketAddrCRepr { v4: sockaddr_in }; - (sockaddr, mem::size_of::() as i32) - } - SocketAddr::V6(ref addr) => { - let sin6_addr = unsafe { - let mut u = mem::zeroed::(); - u.Byte = addr.ip().octets(); - IN6_ADDR { u } - }; - let u = unsafe { - let mut u = mem::zeroed::(); - u.sin6_scope_id = addr.scope_id(); - u - }; - - let sockaddr_in6 = SOCKADDR_IN6 { - sin6_family: AF_INET6 as u16, // 23 - sin6_port: addr.port().to_be(), - sin6_addr, - sin6_flowinfo: addr.flowinfo(), - Anonymous: u, - }; - - let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 }; - (sockaddr, mem::size_of::() as i32) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/overlapped.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/overlapped.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/overlapped.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/overlapped.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,35 +0,0 @@ -use crate::sys::windows::Event; - -use std::cell::UnsafeCell; -use std::fmt; - -use windows_sys::Win32::System::IO::{OVERLAPPED, OVERLAPPED_ENTRY}; - -#[repr(C)] -pub(crate) struct Overlapped { - inner: UnsafeCell, - pub(crate) callback: fn(&OVERLAPPED_ENTRY, Option<&mut Vec>), -} - -#[cfg(feature = "os-ext")] -impl Overlapped { - pub(crate) fn new(cb: fn(&OVERLAPPED_ENTRY, Option<&mut Vec>)) -> Overlapped { - Overlapped { - inner: UnsafeCell::new(unsafe { std::mem::zeroed() }), - callback: cb, - } - } - - pub(crate) fn as_ptr(&self) -> *const OVERLAPPED { - self.inner.get() - } -} - -impl fmt::Debug for Overlapped { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Overlapped").finish() - } -} - -unsafe impl Send for Overlapped {} -unsafe impl Sync for Overlapped {} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/selector.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/selector.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/selector.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/selector.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,752 +0,0 @@ -use super::afd::{self, Afd, AfdPollInfo}; -use super::io_status_block::IoStatusBlock; -use super::Event; -use crate::sys::Events; - -cfg_net! { - use crate::sys::event::{ - ERROR_FLAGS, READABLE_FLAGS, READ_CLOSED_FLAGS, WRITABLE_FLAGS, WRITE_CLOSED_FLAGS, - }; - use crate::Interest; -} - -use super::iocp::{CompletionPort, CompletionStatus}; -use std::collections::VecDeque; -use std::ffi::c_void; -use std::io; -use std::marker::PhantomPinned; -use std::os::windows::io::RawSocket; -use std::pin::Pin; -#[cfg(debug_assertions)] -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, Mutex}; -use std::time::Duration; - -use windows_sys::Win32::Foundation::{ - ERROR_INVALID_HANDLE, ERROR_IO_PENDING, HANDLE, STATUS_CANCELLED, WAIT_TIMEOUT, -}; -use windows_sys::Win32::System::IO::OVERLAPPED; - -#[derive(Debug)] -struct AfdGroup { - #[cfg_attr(not(feature = "net"), allow(dead_code))] - cp: Arc, - afd_group: Mutex>>, -} - -impl AfdGroup { - pub fn new(cp: Arc) -> AfdGroup { - AfdGroup { - afd_group: Mutex::new(Vec::new()), - cp, - } - } - - pub fn release_unused_afd(&self) { - let mut afd_group = self.afd_group.lock().unwrap(); - afd_group.retain(|g| Arc::strong_count(g) > 1); - } -} - -cfg_io_source! { - const POLL_GROUP__MAX_GROUP_SIZE: usize = 32; - - impl AfdGroup { - pub fn acquire(&self) -> io::Result> { - let mut afd_group = self.afd_group.lock().unwrap(); - if afd_group.len() == 0 { - self._alloc_afd_group(&mut afd_group)?; - } else { - // + 1 reference in Vec - if Arc::strong_count(afd_group.last().unwrap()) > POLL_GROUP__MAX_GROUP_SIZE { - self._alloc_afd_group(&mut afd_group)?; - } - } - - match afd_group.last() { - Some(arc) => Ok(arc.clone()), - None => unreachable!( - "Cannot acquire afd, {:#?}, afd_group: {:#?}", - self, afd_group - ), - } - } - - fn _alloc_afd_group(&self, afd_group: &mut Vec>) -> io::Result<()> { - let afd = Afd::new(&self.cp)?; - let arc = Arc::new(afd); - afd_group.push(arc); - Ok(()) - } - } -} - -#[derive(Debug)] -enum SockPollStatus { - Idle, - Pending, - Cancelled, -} - -#[derive(Debug)] -pub struct SockState { - iosb: IoStatusBlock, - poll_info: AfdPollInfo, - afd: Arc, - - base_socket: RawSocket, - - user_evts: u32, - pending_evts: u32, - - user_data: u64, - - poll_status: SockPollStatus, - delete_pending: bool, - - // last raw os error - error: Option, - - _pinned: PhantomPinned, -} - -impl SockState { - fn update(&mut self, self_arc: &Pin>>) -> io::Result<()> { - assert!(!self.delete_pending); - - // make sure to reset previous error before a new update - self.error = None; - - if let SockPollStatus::Pending = self.poll_status { - if (self.user_evts & afd::KNOWN_EVENTS & !self.pending_evts) == 0 { - /* All the events the user is interested in are already being monitored by - * the pending poll operation. It might spuriously complete because of an - * event that we're no longer interested in; when that happens we'll submit - * a new poll operation with the updated event mask. */ - } else { - /* A poll operation is already pending, but it's not monitoring for all the - * events that the user is interested in. Therefore, cancel the pending - * poll operation; when we receive it's completion package, a new poll - * operation will be submitted with the correct event mask. */ - if let Err(e) = self.cancel() { - self.error = e.raw_os_error(); - return Err(e); - } - return Ok(()); - } - } else if let SockPollStatus::Cancelled = self.poll_status { - /* The poll operation has already been cancelled, we're still waiting for - * it to return. For now, there's nothing that needs to be done. */ - } else if let SockPollStatus::Idle = self.poll_status { - /* No poll operation is pending; start one. */ - self.poll_info.exclusive = 0; - self.poll_info.number_of_handles = 1; - self.poll_info.timeout = i64::MAX; - self.poll_info.handles[0].handle = self.base_socket as HANDLE; - self.poll_info.handles[0].status = 0; - self.poll_info.handles[0].events = self.user_evts | afd::POLL_LOCAL_CLOSE; - - // Increase the ref count as the memory will be used by the kernel. - let overlapped_ptr = into_overlapped(self_arc.clone()); - - let result = unsafe { - self.afd - .poll(&mut self.poll_info, &mut *self.iosb, overlapped_ptr) - }; - if let Err(e) = result { - let code = e.raw_os_error().unwrap(); - if code == ERROR_IO_PENDING as i32 { - /* Overlapped poll operation in progress; this is expected. */ - } else { - // Since the operation failed it means the kernel won't be - // using the memory any more. - drop(from_overlapped(overlapped_ptr as *mut _)); - if code == ERROR_INVALID_HANDLE as i32 { - /* Socket closed; it'll be dropped. */ - self.mark_delete(); - return Ok(()); - } else { - self.error = e.raw_os_error(); - return Err(e); - } - } - } - - self.poll_status = SockPollStatus::Pending; - self.pending_evts = self.user_evts; - } else { - unreachable!("Invalid poll status during update, {:#?}", self) - } - - Ok(()) - } - - fn cancel(&mut self) -> io::Result<()> { - match self.poll_status { - SockPollStatus::Pending => {} - _ => unreachable!("Invalid poll status during cancel, {:#?}", self), - }; - unsafe { - self.afd.cancel(&mut *self.iosb)?; - } - self.poll_status = SockPollStatus::Cancelled; - self.pending_evts = 0; - Ok(()) - } - - // This is the function called from the overlapped using as Arc>. Watch out for reference counting. - fn feed_event(&mut self) -> Option { - self.poll_status = SockPollStatus::Idle; - self.pending_evts = 0; - - let mut afd_events = 0; - // We use the status info in IO_STATUS_BLOCK to determine the socket poll status. It is unsafe to use a pointer of IO_STATUS_BLOCK. - unsafe { - if self.delete_pending { - return None; - } else if self.iosb.Anonymous.Status == STATUS_CANCELLED { - /* The poll request was cancelled by CancelIoEx. */ - } else if self.iosb.Anonymous.Status < 0 { - /* The overlapped request itself failed in an unexpected way. */ - afd_events = afd::POLL_CONNECT_FAIL; - } else if self.poll_info.number_of_handles < 1 { - /* This poll operation succeeded but didn't report any socket events. */ - } else if self.poll_info.handles[0].events & afd::POLL_LOCAL_CLOSE != 0 { - /* The poll operation reported that the socket was closed. */ - self.mark_delete(); - return None; - } else { - afd_events = self.poll_info.handles[0].events; - } - } - - afd_events &= self.user_evts; - - if afd_events == 0 { - return None; - } - - // In mio, we have to simulate Edge-triggered behavior to match API usage. - // The strategy here is to intercept all read/write from user that could cause WouldBlock usage, - // then reregister the socket to reset the interests. - self.user_evts &= !afd_events; - - Some(Event { - data: self.user_data, - flags: afd_events, - }) - } - - pub fn is_pending_deletion(&self) -> bool { - self.delete_pending - } - - pub fn mark_delete(&mut self) { - if !self.delete_pending { - if let SockPollStatus::Pending = self.poll_status { - drop(self.cancel()); - } - - self.delete_pending = true; - } - } - - fn has_error(&self) -> bool { - self.error.is_some() - } -} - -cfg_io_source! { - impl SockState { - fn new(raw_socket: RawSocket, afd: Arc) -> io::Result { - Ok(SockState { - iosb: IoStatusBlock::zeroed(), - poll_info: AfdPollInfo::zeroed(), - afd, - base_socket: get_base_socket(raw_socket)?, - user_evts: 0, - pending_evts: 0, - user_data: 0, - poll_status: SockPollStatus::Idle, - delete_pending: false, - error: None, - _pinned: PhantomPinned, - }) - } - - /// True if need to be added on update queue, false otherwise. - fn set_event(&mut self, ev: Event) -> bool { - /* afd::POLL_CONNECT_FAIL and afd::POLL_ABORT are always reported, even when not requested by the caller. */ - let events = ev.flags | afd::POLL_CONNECT_FAIL | afd::POLL_ABORT; - - self.user_evts = events; - self.user_data = ev.data; - - (events & !self.pending_evts) != 0 - } - } -} - -impl Drop for SockState { - fn drop(&mut self) { - self.mark_delete(); - } -} - -/// Converts the pointer to a `SockState` into a raw pointer. -/// To revert see `from_overlapped`. -fn into_overlapped(sock_state: Pin>>) -> *mut c_void { - let overlapped_ptr: *const Mutex = - unsafe { Arc::into_raw(Pin::into_inner_unchecked(sock_state)) }; - overlapped_ptr as *mut _ -} - -/// Convert a raw overlapped pointer into a reference to `SockState`. -/// Reverts `into_overlapped`. -fn from_overlapped(ptr: *mut OVERLAPPED) -> Pin>> { - let sock_ptr: *const Mutex = ptr as *const _; - unsafe { Pin::new_unchecked(Arc::from_raw(sock_ptr)) } -} - -/// Each Selector has a globally unique(ish) ID associated with it. This ID -/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first -/// registered with the `Selector`. If a type that is previously associated with -/// a `Selector` attempts to register itself with a different `Selector`, the -/// operation will return with an error. This matches windows behavior. -#[cfg(debug_assertions)] -static NEXT_ID: AtomicUsize = AtomicUsize::new(0); - -/// Windows implementation of `sys::Selector` -/// -/// Edge-triggered event notification is simulated by resetting internal event flag of each socket state `SockState` -/// and setting all events back by intercepting all requests that could cause `io::ErrorKind::WouldBlock` happening. -/// -/// This selector is currently only support socket due to `Afd` driver is winsock2 specific. -#[derive(Debug)] -pub struct Selector { - #[cfg(debug_assertions)] - id: usize, - pub(super) inner: Arc, - #[cfg(debug_assertions)] - has_waker: AtomicBool, -} - -impl Selector { - pub fn new() -> io::Result { - SelectorInner::new().map(|inner| { - #[cfg(debug_assertions)] - let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1; - Selector { - #[cfg(debug_assertions)] - id, - inner: Arc::new(inner), - #[cfg(debug_assertions)] - has_waker: AtomicBool::new(false), - } - }) - } - - pub fn try_clone(&self) -> io::Result { - Ok(Selector { - #[cfg(debug_assertions)] - id: self.id, - inner: Arc::clone(&self.inner), - #[cfg(debug_assertions)] - has_waker: AtomicBool::new(self.has_waker.load(Ordering::Acquire)), - }) - } - - /// # Safety - /// - /// This requires a mutable reference to self because only a single thread - /// can poll IOCP at a time. - pub fn select(&mut self, events: &mut Events, timeout: Option) -> io::Result<()> { - self.inner.select(events, timeout) - } - - #[cfg(debug_assertions)] - pub fn register_waker(&self) -> bool { - self.has_waker.swap(true, Ordering::AcqRel) - } - - pub(super) fn clone_port(&self) -> Arc { - self.inner.cp.clone() - } - - #[cfg(feature = "os-ext")] - pub(super) fn same_port(&self, other: &Arc) -> bool { - Arc::ptr_eq(&self.inner.cp, other) - } -} - -cfg_io_source! { - use super::InternalState; - use crate::Token; - - impl Selector { - pub(super) fn register( - &self, - socket: RawSocket, - token: Token, - interests: Interest, - ) -> io::Result { - SelectorInner::register(&self.inner, socket, token, interests) - } - - pub(super) fn reregister( - &self, - state: Pin>>, - token: Token, - interests: Interest, - ) -> io::Result<()> { - self.inner.reregister(state, token, interests) - } - - #[cfg(debug_assertions)] - pub fn id(&self) -> usize { - self.id - } - } -} - -#[derive(Debug)] -pub struct SelectorInner { - pub(super) cp: Arc, - update_queue: Mutex>>>>, - afd_group: AfdGroup, - is_polling: AtomicBool, -} - -// We have ensured thread safety by introducing lock manually. -unsafe impl Sync for SelectorInner {} - -impl SelectorInner { - pub fn new() -> io::Result { - CompletionPort::new(0).map(|cp| { - let cp = Arc::new(cp); - let cp_afd = Arc::clone(&cp); - - SelectorInner { - cp, - update_queue: Mutex::new(VecDeque::new()), - afd_group: AfdGroup::new(cp_afd), - is_polling: AtomicBool::new(false), - } - }) - } - - /// # Safety - /// - /// May only be calling via `Selector::select`. - pub fn select(&self, events: &mut Events, timeout: Option) -> io::Result<()> { - events.clear(); - - if timeout.is_none() { - loop { - let len = self.select2(&mut events.statuses, &mut events.events, None)?; - if len == 0 { - continue; - } - break Ok(()); - } - } else { - self.select2(&mut events.statuses, &mut events.events, timeout)?; - Ok(()) - } - } - - pub fn select2( - &self, - statuses: &mut [CompletionStatus], - events: &mut Vec, - timeout: Option, - ) -> io::Result { - assert!(!self.is_polling.swap(true, Ordering::AcqRel)); - - unsafe { self.update_sockets_events() }?; - - let result = self.cp.get_many(statuses, timeout); - - self.is_polling.store(false, Ordering::Relaxed); - - match result { - Ok(iocp_events) => Ok(unsafe { self.feed_events(events, iocp_events) }), - Err(ref e) if e.raw_os_error() == Some(WAIT_TIMEOUT as i32) => Ok(0), - Err(e) => Err(e), - } - } - - unsafe fn update_sockets_events(&self) -> io::Result<()> { - let mut update_queue = self.update_queue.lock().unwrap(); - for sock in update_queue.iter_mut() { - let mut sock_internal = sock.lock().unwrap(); - if !sock_internal.is_pending_deletion() { - sock_internal.update(sock)?; - } - } - - // remove all sock which do not have error, they have afd op pending - update_queue.retain(|sock| sock.lock().unwrap().has_error()); - - self.afd_group.release_unused_afd(); - Ok(()) - } - - // It returns processed count of iocp_events rather than the events itself. - unsafe fn feed_events( - &self, - events: &mut Vec, - iocp_events: &[CompletionStatus], - ) -> usize { - let mut n = 0; - let mut update_queue = self.update_queue.lock().unwrap(); - for iocp_event in iocp_events.iter() { - if iocp_event.overlapped().is_null() { - events.push(Event::from_completion_status(iocp_event)); - n += 1; - continue; - } else if iocp_event.token() % 2 == 1 { - // Handle is a named pipe. This could be extended to be any non-AFD event. - let callback = (*(iocp_event.overlapped() as *mut super::Overlapped)).callback; - - let len = events.len(); - callback(iocp_event.entry(), Some(events)); - n += events.len() - len; - continue; - } - - let sock_state = from_overlapped(iocp_event.overlapped()); - let mut sock_guard = sock_state.lock().unwrap(); - if let Some(e) = sock_guard.feed_event() { - events.push(e); - n += 1; - } - - if !sock_guard.is_pending_deletion() { - update_queue.push_back(sock_state.clone()); - } - } - self.afd_group.release_unused_afd(); - n - } -} - -cfg_io_source! { - use std::mem::size_of; - use std::ptr::null_mut; - - use windows_sys::Win32::Networking::WinSock::{ - WSAGetLastError, WSAIoctl, SIO_BASE_HANDLE, SIO_BSP_HANDLE, - SIO_BSP_HANDLE_POLL, SIO_BSP_HANDLE_SELECT, SOCKET_ERROR, - }; - - - impl SelectorInner { - fn register( - this: &Arc, - socket: RawSocket, - token: Token, - interests: Interest, - ) -> io::Result { - let flags = interests_to_afd_flags(interests); - - let sock = { - let sock = this._alloc_sock_for_rawsocket(socket)?; - let event = Event { - flags, - data: token.0 as u64, - }; - sock.lock().unwrap().set_event(event); - sock - }; - - let state = InternalState { - selector: this.clone(), - token, - interests, - sock_state: sock.clone(), - }; - - this.queue_state(sock); - unsafe { this.update_sockets_events_if_polling()? }; - - Ok(state) - } - - // Directly accessed in `IoSourceState::do_io`. - pub(super) fn reregister( - &self, - state: Pin>>, - token: Token, - interests: Interest, - ) -> io::Result<()> { - { - let event = Event { - flags: interests_to_afd_flags(interests), - data: token.0 as u64, - }; - - state.lock().unwrap().set_event(event); - } - - // FIXME: a sock which has_error true should not be re-added to - // the update queue because it's already there. - self.queue_state(state); - unsafe { self.update_sockets_events_if_polling() } - } - - /// This function is called by register() and reregister() to start an - /// IOCTL_AFD_POLL operation corresponding to the registered events, but - /// only if necessary. - /// - /// Since it is not possible to modify or synchronously cancel an AFD_POLL - /// operation, and there can be only one active AFD_POLL operation per - /// (socket, completion port) pair at any time, it is expensive to change - /// a socket's event registration after it has been submitted to the kernel. - /// - /// Therefore, if no other threads are polling when interest in a socket - /// event is (re)registered, the socket is added to the 'update queue', but - /// the actual syscall to start the IOCTL_AFD_POLL operation is deferred - /// until just before the GetQueuedCompletionStatusEx() syscall is made. - /// - /// However, when another thread is already blocked on - /// GetQueuedCompletionStatusEx() we tell the kernel about the registered - /// socket event(s) immediately. - unsafe fn update_sockets_events_if_polling(&self) -> io::Result<()> { - if self.is_polling.load(Ordering::Acquire) { - self.update_sockets_events() - } else { - Ok(()) - } - } - - fn queue_state(&self, sock_state: Pin>>) { - let mut update_queue = self.update_queue.lock().unwrap(); - update_queue.push_back(sock_state); - } - - fn _alloc_sock_for_rawsocket( - &self, - raw_socket: RawSocket, - ) -> io::Result>>> { - let afd = self.afd_group.acquire()?; - Ok(Arc::pin(Mutex::new(SockState::new(raw_socket, afd)?))) - } - } - - fn try_get_base_socket(raw_socket: RawSocket, ioctl: u32) -> Result { - let mut base_socket: RawSocket = 0; - let mut bytes: u32 = 0; - unsafe { - if WSAIoctl( - raw_socket as usize, - ioctl, - null_mut(), - 0, - &mut base_socket as *mut _ as *mut c_void, - size_of::() as u32, - &mut bytes, - null_mut(), - None, - ) != SOCKET_ERROR - { - Ok(base_socket) - } else { - Err(WSAGetLastError()) - } - } - } - - fn get_base_socket(raw_socket: RawSocket) -> io::Result { - let res = try_get_base_socket(raw_socket, SIO_BASE_HANDLE); - if let Ok(base_socket) = res { - return Ok(base_socket); - } - - // The `SIO_BASE_HANDLE` should not be intercepted by LSPs, therefore - // it should not fail as long as `raw_socket` is a valid socket. See - // https://docs.microsoft.com/en-us/windows/win32/winsock/winsock-ioctls. - // However, at least one known LSP deliberately breaks it, so we try - // some alternative IOCTLs, starting with the most appropriate one. - for &ioctl in &[ - SIO_BSP_HANDLE_SELECT, - SIO_BSP_HANDLE_POLL, - SIO_BSP_HANDLE, - ] { - if let Ok(base_socket) = try_get_base_socket(raw_socket, ioctl) { - // Since we know now that we're dealing with an LSP (otherwise - // SIO_BASE_HANDLE would't have failed), only return any result - // when it is different from the original `raw_socket`. - if base_socket != raw_socket { - return Ok(base_socket); - } - } - } - - // If the alternative IOCTLs also failed, return the original error. - let os_error = res.unwrap_err(); - let err = io::Error::from_raw_os_error(os_error); - Err(err) - } -} - -impl Drop for SelectorInner { - fn drop(&mut self) { - loop { - let events_num: usize; - let mut statuses: [CompletionStatus; 1024] = [CompletionStatus::zero(); 1024]; - - let result = self - .cp - .get_many(&mut statuses, Some(std::time::Duration::from_millis(0))); - match result { - Ok(iocp_events) => { - events_num = iocp_events.iter().len(); - for iocp_event in iocp_events.iter() { - if iocp_event.overlapped().is_null() { - // Custom event - } else if iocp_event.token() % 2 == 1 { - // Named pipe, dispatch the event so it can release resources - let callback = unsafe { - (*(iocp_event.overlapped() as *mut super::Overlapped)).callback - }; - - callback(iocp_event.entry(), None); - } else { - // drain sock state to release memory of Arc reference - let _sock_state = from_overlapped(iocp_event.overlapped()); - } - } - } - - Err(_) => { - break; - } - } - - if events_num == 0 { - // continue looping until all completion statuses have been drained - break; - } - } - - self.afd_group.release_unused_afd(); - } -} - -cfg_net! { - fn interests_to_afd_flags(interests: Interest) -> u32 { - let mut flags = 0; - - if interests.is_readable() { - flags |= READABLE_FLAGS | READ_CLOSED_FLAGS | ERROR_FLAGS; - } - - if interests.is_writable() { - flags |= WRITABLE_FLAGS | WRITE_CLOSED_FLAGS | ERROR_FLAGS; - } - - flags - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/tcp.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/tcp.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/tcp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/tcp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,66 +0,0 @@ -use std::io; -use std::net::{self, SocketAddr}; -use std::os::windows::io::AsRawSocket; - -use windows_sys::Win32::Networking::WinSock::{self, SOCKET, SOCKET_ERROR, SOCK_STREAM}; - -use crate::sys::windows::net::{new_ip_socket, socket_addr}; - -pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result { - new_ip_socket(address, SOCK_STREAM) -} - -pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> { - use WinSock::bind; - - let (raw_addr, raw_addr_length) = socket_addr(&addr); - syscall!( - bind( - socket.as_raw_socket() as _, - raw_addr.as_ptr(), - raw_addr_length - ), - PartialEq::eq, - SOCKET_ERROR - )?; - Ok(()) -} - -pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> { - use WinSock::connect; - - let (raw_addr, raw_addr_length) = socket_addr(&addr); - let res = syscall!( - connect( - socket.as_raw_socket() as _, - raw_addr.as_ptr(), - raw_addr_length - ), - PartialEq::eq, - SOCKET_ERROR - ); - - match res { - Err(err) if err.kind() != io::ErrorKind::WouldBlock => Err(err), - _ => Ok(()), - } -} - -pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> { - use std::convert::TryInto; - use WinSock::listen; - - let backlog = backlog.try_into().unwrap_or(i32::max_value()); - syscall!( - listen(socket.as_raw_socket() as _, backlog), - PartialEq::eq, - SOCKET_ERROR - )?; - Ok(()) -} - -pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> { - // The non-blocking state of `listener` is inherited. See - // https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-accept#remarks. - listener.accept() -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/udp.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/udp.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/udp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/udp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,46 +0,0 @@ -use std::io; -use std::mem::{self, MaybeUninit}; -use std::net::{self, SocketAddr}; -use std::os::windows::io::{AsRawSocket, FromRawSocket}; -use std::os::windows::raw::SOCKET as StdSocket; // windows-sys uses usize, stdlib uses u32/u64. - -use crate::sys::windows::net::{new_ip_socket, socket_addr}; -use windows_sys::Win32::Networking::WinSock::{ - bind as win_bind, getsockopt, IPPROTO_IPV6, IPV6_V6ONLY, SOCKET_ERROR, SOCK_DGRAM, -}; - -pub fn bind(addr: SocketAddr) -> io::Result { - let raw_socket = new_ip_socket(addr, SOCK_DGRAM)?; - let socket = unsafe { net::UdpSocket::from_raw_socket(raw_socket as StdSocket) }; - - let (raw_addr, raw_addr_length) = socket_addr(&addr); - syscall!( - win_bind(raw_socket, raw_addr.as_ptr(), raw_addr_length), - PartialEq::eq, - SOCKET_ERROR - )?; - - Ok(socket) -} - -pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result { - let mut optval: MaybeUninit = MaybeUninit::uninit(); - let mut optlen = mem::size_of::() as i32; - - syscall!( - getsockopt( - socket.as_raw_socket() as usize, - IPPROTO_IPV6 as i32, - IPV6_V6ONLY as i32, - optval.as_mut_ptr().cast(), - &mut optlen, - ), - PartialEq::eq, - SOCKET_ERROR - )?; - - debug_assert_eq!(optlen as usize, mem::size_of::()); - // Safety: `getsockopt` initialised `optval` for us. - let optval = unsafe { optval.assume_init() }; - Ok(optval != 0) -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/waker.rs s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/waker.rs --- s390-tools-2.31.0/rust-vendor/mio/src/sys/windows/waker.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/sys/windows/waker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,29 +0,0 @@ -use crate::sys::windows::Event; -use crate::sys::windows::Selector; -use crate::Token; - -use super::iocp::CompletionPort; -use std::io; -use std::sync::Arc; - -#[derive(Debug)] -pub struct Waker { - token: Token, - port: Arc, -} - -impl Waker { - pub fn new(selector: &Selector, token: Token) -> io::Result { - Ok(Waker { - token, - port: selector.clone_port(), - }) - } - - pub fn wake(&self) -> io::Result<()> { - let mut ev = Event::new(self.token); - ev.set_readable(); - - self.port.post(ev.to_completion_status()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/token.rs s390-tools-2.33.1/rust-vendor/mio/src/token.rs --- s390-tools-2.31.0/rust-vendor/mio/src/token.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/token.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,138 +0,0 @@ -/// Associates readiness events with [`event::Source`]s. -/// -/// `Token` is a wrapper around `usize` and is used as an argument to -/// [`Registry::register`] and [`Registry::reregister`]. -/// -/// See [`Poll`] for more documentation on polling. -/// -/// [`event::Source`]: ./event/trait.Source.html -/// [`Poll`]: struct.Poll.html -/// [`Registry::register`]: struct.Registry.html#method.register -/// [`Registry::reregister`]: struct.Registry.html#method.reregister -/// -/// # Example -/// -/// Using `Token` to track which socket generated the event. In this example, -/// `HashMap` is used, but usually something like [`slab`] is better. -/// -/// [`slab`]: https://crates.io/crates/slab -/// -#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")] -#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")] -/// # use std::error::Error; -/// # fn main() -> Result<(), Box> { -/// use mio::{Events, Interest, Poll, Token}; -/// use mio::net::TcpListener; -/// -/// use std::thread; -/// use std::io::{self, Read}; -/// use std::collections::HashMap; -/// -/// // After this number of sockets is accepted, the server will shutdown. -/// const MAX_SOCKETS: usize = 32; -/// -/// // Pick a token that will not be used by any other socket and use that one -/// // for the listener. -/// const LISTENER: Token = Token(1024); -/// -/// // Used to store the sockets. -/// let mut sockets = HashMap::new(); -/// -/// // This is used to generate a unique token for a socket -/// let mut next_socket_index = 0; -/// -/// // The `Poll` instance -/// let mut poll = Poll::new()?; -/// -/// // Tcp listener -/// let mut listener = TcpListener::bind("127.0.0.1:0".parse()?)?; -/// -/// // Register the listener -/// poll.registry().register(&mut listener, LISTENER, Interest::READABLE)?; -/// -/// // Spawn a thread that will connect a bunch of sockets then close them -/// let addr = listener.local_addr()?; -/// thread::spawn(move || { -/// use std::net::TcpStream; -/// -/// // +1 here is to connect an extra socket to signal the socket to close -/// for _ in 0..(MAX_SOCKETS+1) { -/// // Connect then drop the socket -/// let _ = TcpStream::connect(addr).unwrap(); -/// } -/// }); -/// -/// // Event storage -/// let mut events = Events::with_capacity(1024); -/// -/// // Read buffer, this will never actually get filled -/// let mut buf = [0; 256]; -/// -/// // The main event loop -/// loop { -/// // Wait for events -/// poll.poll(&mut events, None)?; -/// -/// for event in &events { -/// match event.token() { -/// LISTENER => { -/// // Perform operations in a loop until `WouldBlock` is -/// // encountered. -/// loop { -/// match listener.accept() { -/// Ok((mut socket, _)) => { -/// // Shutdown the server -/// if next_socket_index == MAX_SOCKETS { -/// return Ok(()); -/// } -/// -/// // Get the token for the socket -/// let token = Token(next_socket_index); -/// next_socket_index += 1; -/// -/// // Register the new socket w/ poll -/// poll.registry().register(&mut socket, token, Interest::READABLE)?; -/// -/// // Store the socket -/// sockets.insert(token, socket); -/// } -/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { -/// // Socket is not ready anymore, stop accepting -/// break; -/// } -/// e => panic!("err={:?}", e), // Unexpected error -/// } -/// } -/// } -/// token => { -/// // Always operate in a loop -/// loop { -/// match sockets.get_mut(&token).unwrap().read(&mut buf) { -/// Ok(0) => { -/// // Socket is closed, remove it from the map -/// sockets.remove(&token); -/// break; -/// } -/// // Data is not actually sent in this example -/// Ok(_) => unreachable!(), -/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { -/// // Socket is not ready anymore, stop reading -/// break; -/// } -/// e => panic!("err={:?}", e), // Unexpected error -/// } -/// } -/// } -/// } -/// } -/// } -/// # } -/// ``` -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct Token(pub usize); - -impl From for usize { - fn from(val: Token) -> usize { - val.0 - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mio/src/waker.rs s390-tools-2.33.1/rust-vendor/mio/src/waker.rs --- s390-tools-2.31.0/rust-vendor/mio/src/waker.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mio/src/waker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,96 +0,0 @@ -use crate::{sys, Registry, Token}; - -use std::io; - -/// Waker allows cross-thread waking of [`Poll`]. -/// -/// When created it will cause events with [`readable`] readiness and the -/// provided `token` if [`wake`] is called, possibly from another thread. -/// -/// [`Poll`]: struct.Poll.html -/// [`readable`]: ./event/struct.Event.html#method.is_readable -/// [`wake`]: struct.Waker.html#method.wake -/// -/// # Notes -/// -/// `Waker` events are only guaranteed to be delivered while the `Waker` value -/// is alive. -/// -/// Only a single `Waker` can be active per [`Poll`], if multiple threads need -/// access to the `Waker` it can be shared via for example an `Arc`. What -/// happens if multiple `Waker`s are registered with the same `Poll` is -/// unspecified. -/// -/// # Implementation notes -/// -/// On platforms that support kqueue this will use the `EVFILT_USER` event -/// filter, see [implementation notes of `Poll`] to see what platforms support -/// kqueue. On Linux it uses [eventfd]. -/// -/// [implementation notes of `Poll`]: struct.Poll.html#implementation-notes -/// [eventfd]: https://man7.org/linux/man-pages/man2/eventfd.2.html -/// -/// # Examples -/// -/// Wake a [`Poll`] instance from another thread. -/// -#[cfg_attr(feature = "os-poll", doc = "```")] -#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")] -/// # fn main() -> Result<(), Box> { -/// use std::thread; -/// use std::time::Duration; -/// use std::sync::Arc; -/// -/// use mio::{Events, Token, Poll, Waker}; -/// -/// const WAKE_TOKEN: Token = Token(10); -/// -/// let mut poll = Poll::new()?; -/// let mut events = Events::with_capacity(2); -/// -/// let waker = Arc::new(Waker::new(poll.registry(), WAKE_TOKEN)?); -/// -/// // We need to keep the Waker alive, so we'll create a clone for the -/// // thread we create below. -/// let waker1 = waker.clone(); -/// let handle = thread::spawn(move || { -/// // Working hard, or hardly working? -/// thread::sleep(Duration::from_millis(500)); -/// -/// // Now we'll wake the queue on the other thread. -/// waker1.wake().expect("unable to wake"); -/// }); -/// -/// // On our current thread we'll poll for events, without a timeout. -/// poll.poll(&mut events, None)?; -/// -/// // After about 500 milliseconds we should be awoken by the other thread and -/// // get a single event. -/// assert!(!events.is_empty()); -/// let waker_event = events.iter().next().unwrap(); -/// assert!(waker_event.is_readable()); -/// assert_eq!(waker_event.token(), WAKE_TOKEN); -/// # handle.join().unwrap(); -/// # Ok(()) -/// # } -/// ``` -#[derive(Debug)] -pub struct Waker { - inner: sys::Waker, -} - -impl Waker { - /// Create a new `Waker`. - pub fn new(registry: &Registry, token: Token) -> io::Result { - #[cfg(debug_assertions)] - registry.register_waker(); - sys::Waker::new(registry.selector(), token).map(|inner| Waker { inner }) - } - - /// Wake up the [`Poll`] associated with this `Waker`. - /// - /// [`Poll`]: struct.Poll.html - pub fn wake(&self) -> io::Result<()> { - self.inner.wake() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mockito/benches/lib.rs s390-tools-2.33.1/rust-vendor/mockito/benches/lib.rs --- s390-tools-2.31.0/rust-vendor/mockito/benches/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/benches/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -#![feature(test)] - -extern crate test; - -use mockito::Server; -use std::fmt::Display; -use std::io::{BufRead, BufReader, Read, Write}; -use std::net::TcpStream; -use std::str::FromStr; -use test::Bencher; - -fn request_stream(host: impl Display, route: &str, headers: &str) -> TcpStream { - let mut stream = TcpStream::connect(host.to_string()).unwrap(); - let message = [route, " HTTP/1.1\r\n", headers, "\r\n"].join(""); - stream.write_all(message.as_bytes()).unwrap(); - - stream -} - -fn parse_stream(stream: TcpStream) -> (String, Vec, String) { - let mut reader = BufReader::new(stream); - - let mut status_line = String::new(); - reader.read_line(&mut status_line).unwrap(); - - let mut headers = vec![]; - let mut content_length: u64 = 0; - loop { - let mut header_line = String::new(); - reader.read_line(&mut header_line).unwrap(); - - if header_line == "\r\n" { - break; - } - - if header_line.starts_with("content-length:") { - let mut parts = header_line.split(':'); - content_length = u64::from_str(parts.nth(1).unwrap().trim()).unwrap(); - } - - headers.push(header_line.trim_end().to_string()); - } - - let mut body = String::new(); - reader - .take(content_length) - .read_to_string(&mut body) - .unwrap(); - - (status_line, headers, body) -} - -fn request(host: impl Display, route: &str, headers: &str) -> (String, Vec, String) { - parse_stream(request_stream(host, route, headers)) -} - -#[bench] -fn bench_create_simple_mock(b: &mut Bencher) { - let mut s = Server::new(); - - b.iter(|| { - let _m = s.mock("GET", "/").with_body("test").create(); - }) -} - -#[bench] -fn bench_match_simple_mock(b: &mut Bencher) { - let mut s = Server::new(); - - let _m = s.mock("GET", "/").with_body("test").create(); - - b.iter(|| { - let (status_line, _, _) = request(&s.host_with_port(), "GET /", ""); - assert!(status_line.starts_with("HTTP/1.1 200")); - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/mockito/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/mockito/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/mockito/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"f8d3038e23466858569c2d30a537f691fa0d53b51626630ae08262943e3bbb8b"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/mockito/Cargo.lock s390-tools-2.33.1/rust-vendor/mockito/Cargo.lock --- s390-tools-2.31.0/rust-vendor/mockito/Cargo.lock 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/Cargo.lock 1970-01-01 01:00:00.000000000 +0100 @@ -1,1269 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aho-corasick" -version = "0.7.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" -dependencies = [ - "memchr", -] - -[[package]] -name = "assert-json-diff" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "base64" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bumpalo" -version = "3.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" - -[[package]] -name = "bytes" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" - -[[package]] -name = "cc" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "colored" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd" -dependencies = [ - "atty", - "lazy_static", - "winapi", -] - -[[package]] -name = "core-foundation" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" - -[[package]] -name = "encoding_rs" -version = "0.8.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "env_logger" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "form_urlencoded" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "futures" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" - -[[package]] -name = "futures-executor" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" - -[[package]] -name = "futures-macro" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-sink" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" - -[[package]] -name = "futures-task" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" - -[[package]] -name = "futures-util" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "getrandom" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "h2" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "http" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" - -[[package]] -name = "httpdate" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" - -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "hyper" -version = "0.14.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - -[[package]] -name = "idna" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "indexmap" -version = "1.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "ipnet" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" - -[[package]] -name = "itoa" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" - -[[package]] -name = "js-sys" -version = "0.3.61" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.124" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a41fed9d98f27ab1c6d161da622a4fa35e8a54a8adc24bbf3ddd0ef70b0e50" - -[[package]] -name = "lock_api" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "memchr" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" - -[[package]] -name = "mime" -version = "0.3.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" - -[[package]] -name = "mio" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" -dependencies = [ - "libc", - "log", - "wasi", - "windows-sys 0.42.0", -] - -[[package]] -name = "mockito" -version = "1.2.0" -dependencies = [ - "assert-json-diff", - "colored", - "env_logger", - "futures", - "hyper", - "log", - "rand", - "regex", - "reqwest", - "serde_json", - "serde_urlencoded", - "similar", - "testing_logger", - "tokio", -] - -[[package]] -name = "native-tls" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "num_cpus" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" -dependencies = [ - "hermit-abi 0.2.6", - "libc", -] - -[[package]] -name = "once_cell" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" - -[[package]] -name = "openssl" -version = "0.10.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" -dependencies = [ - "bitflags", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" -dependencies = [ - "autocfg", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-sys 0.45.0", -] - -[[package]] -name = "percent-encoding" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" - -[[package]] -name = "pin-project-lite" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkg-config" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "proc-macro2" -version = "1.0.51" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags", -] - -[[package]] -name = "regex" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - -[[package]] -name = "reqwest" -version = "0.11.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" -dependencies = [ - "base64", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "serde", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-native-tls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - -[[package]] -name = "ryu" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" - -[[package]] -name = "schannel" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" -dependencies = [ - "windows-sys 0.42.0", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "security-framework" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "serde" -version = "1.0.152" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" - -[[package]] -name = "serde_json" -version = "1.0.91" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "signal-hook-registry" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" -dependencies = [ - "libc", -] - -[[package]] -name = "similar" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf" - -[[package]] -name = "slab" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" -dependencies = [ - "autocfg", -] - -[[package]] -name = "smallvec" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" - -[[package]] -name = "socket2" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "syn" -version = "1.0.107" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tempfile" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" -dependencies = [ - "cfg-if", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", -] - -[[package]] -name = "termcolor" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "testing_logger" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d92b727cb45d33ae956f7f46b966b25f1bc712092aeef9dba5ac798fc89f720" -dependencies = [ - "log", -] - -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokio" -version = "1.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" -dependencies = [ - "autocfg", - "bytes", - "libc", - "memchr", - "mio", - "num_cpus", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "windows-sys 0.42.0", -] - -[[package]] -name = "tokio-macros" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6a3b08b64e6dfad376fa2432c7b1f01522e37a623c3050bc95db2d3ff21583" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", - "tracing", -] - -[[package]] -name = "tower-service" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" - -[[package]] -name = "tracing" -version = "0.1.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" -dependencies = [ - "cfg-if", - "pin-project-lite", - "tracing-core", -] - -[[package]] -name = "tracing-core" -version = "0.1.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" -dependencies = [ - "once_cell", -] - -[[package]] -name = "try-lock" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" - -[[package]] -name = "unicode-bidi" -version = "0.3.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" - -[[package]] -name = "unicode-ident" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" - -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "url" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", -] - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "want" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -dependencies = [ - "log", - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.84" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.84" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.84" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.84" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.84" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" - -[[package]] -name = "web-sys" -version = "0.3.61" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.42.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" - -[[package]] -name = "winreg" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" -dependencies = [ - "winapi", -] diff -Nru s390-tools-2.31.0/rust-vendor/mockito/Cargo.toml s390-tools-2.33.1/rust-vendor/mockito/Cargo.toml --- s390-tools-2.31.0/rust-vendor/mockito/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,99 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.68" -name = "mockito" -version = "1.2.0" -authors = ["Florin Lipan "] -exclude = [ - "/.appveyor.yml", - "/.travis.yml", - "/benchmarks.txt", - "/docs/", - "/slides.pdf", -] -description = "HTTP mocking for Rust." -homepage = "https://github.com/lipanski/mockito" -documentation = "https://docs.rs/mockito" -readme = "README.md" -keywords = [ - "mock", - "mocks", - "http", - "webmock", - "webmocks", -] -categories = [ - "development-tools::testing", - "web-programming", -] -license = "MIT" -repository = "https://github.com/lipanski/mockito" - -[dependencies.assert-json-diff] -version = "2.0" - -[dependencies.colored] -version = "2.0" -optional = true - -[dependencies.futures] -version = "0.3" - -[dependencies.hyper] -version = "0.14" -features = ["full"] - -[dependencies.log] -version = "0.4" - -[dependencies.rand] -version = "0.8" - -[dependencies.regex] -version = "1.7" - -[dependencies.serde_json] -version = "1.0" - -[dependencies.serde_urlencoded] -version = "0.7" - -[dependencies.similar] -version = "2.2" - -[dependencies.tokio] -version = "1.25" -features = ["full"] - -[dev-dependencies.env_logger] -version = "0.8" - -[dev-dependencies.reqwest] -version = "0.11" - -[dev-dependencies.testing_logger] -version = "0.1" - -[features] -color = ["colored"] -default = ["color"] - -[badges.appveyor] -branch = "master" -repository = "lipanski/mockito" -service = "github" - -[badges.travis-ci] -branch = "master" -repository = "lipanski/mockito" diff -Nru s390-tools-2.31.0/rust-vendor/mockito/examples/mockito-server.rs s390-tools-2.33.1/rust-vendor/mockito/examples/mockito-server.rs --- s390-tools-2.31.0/rust-vendor/mockito/examples/mockito-server.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/examples/mockito-server.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,13 +0,0 @@ -use mockito; - -use std::time::Duration; - -fn main() { - let mut s = mockito::Server::new(); - - s.mock("GET", "/").with_body("hello world"); - - loop { - std::thread::sleep(Duration::from_secs(1)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mockito/LICENSE s390-tools-2.33.1/rust-vendor/mockito/LICENSE --- s390-tools-2.31.0/rust-vendor/mockito/LICENSE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Florin Lipan - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/mockito/README.md s390-tools-2.33.1/rust-vendor/mockito/README.md --- s390-tools-2.31.0/rust-vendor/mockito/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,210 +0,0 @@ -

-

-

::Target as Body>::Error; - - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - Pin::get_mut(self).as_mut().poll_data(cx) - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Pin::get_mut(self).as_mut().poll_trailers(cx) - } - - fn is_end_stream(&self) -> bool { - self.as_ref().is_end_stream() - } - - fn size_hint(&self) -> SizeHint { - self.as_ref().size_hint() - } -} - -impl Body for Box { - type Data = T::Data; - type Error = T::Error; - - fn poll_data( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - Pin::new(&mut **self).poll_data(cx) - } - - fn poll_trailers( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Pin::new(&mut **self).poll_trailers(cx) - } - - fn is_end_stream(&self) -> bool { - self.as_ref().is_end_stream() - } - - fn size_hint(&self) -> SizeHint { - self.as_ref().size_hint() - } -} - -impl Body for http::Request { - type Data = B::Data; - type Error = B::Error; - - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - unsafe { - self.map_unchecked_mut(http::Request::body_mut) - .poll_data(cx) - } - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - unsafe { - self.map_unchecked_mut(http::Request::body_mut) - .poll_trailers(cx) - } - } - - fn is_end_stream(&self) -> bool { - self.body().is_end_stream() - } - - fn size_hint(&self) -> SizeHint { - self.body().size_hint() - } -} - -impl Body for http::Response { - type Data = B::Data; - type Error = B::Error; - - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - unsafe { - self.map_unchecked_mut(http::Response::body_mut) - .poll_data(cx) - } - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - unsafe { - self.map_unchecked_mut(http::Response::body_mut) - .poll_trailers(cx) - } - } - - fn is_end_stream(&self) -> bool { - self.body().is_end_stream() - } - - fn size_hint(&self) -> SizeHint { - self.body().size_hint() - } -} - -impl Body for String { - type Data = Bytes; - type Error = Infallible; - - fn poll_data( - mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll>> { - if !self.is_empty() { - let s = std::mem::take(&mut *self); - Poll::Ready(Some(Ok(s.into_bytes().into()))) - } else { - Poll::Ready(None) - } - } - - fn poll_trailers( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Poll::Ready(Ok(None)) - } - - fn is_end_stream(&self) -> bool { - self.is_empty() - } - - fn size_hint(&self) -> SizeHint { - SizeHint::with_exact(self.len() as u64) - } -} - -#[cfg(test)] -fn _assert_bounds() { - fn can_be_trait_object(_: &dyn Body>, Error = std::io::Error>) {} -} diff -Nru s390-tools-2.31.0/rust-vendor/http-body/src/limited.rs s390-tools-2.33.1/rust-vendor/http-body/src/limited.rs --- s390-tools-2.31.0/rust-vendor/http-body/src/limited.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/src/limited.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,299 +0,0 @@ -use crate::{Body, SizeHint}; -use bytes::Buf; -use http::HeaderMap; -use pin_project_lite::pin_project; -use std::error::Error; -use std::fmt; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A length limited body. - /// - /// This body will return an error if more than the configured number - /// of bytes are returned on polling the wrapped body. - #[derive(Clone, Copy, Debug)] - pub struct Limited { - remaining: usize, - #[pin] - inner: B, - } -} - -impl Limited { - /// Create a new `Limited`. - pub fn new(inner: B, limit: usize) -> Self { - Self { - remaining: limit, - inner, - } - } -} - -impl Body for Limited -where - B: Body, - B::Error: Into>, -{ - type Data = B::Data; - type Error = Box; - - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - let this = self.project(); - let res = match this.inner.poll_data(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(None) => None, - Poll::Ready(Some(Ok(data))) => { - if data.remaining() > *this.remaining { - *this.remaining = 0; - Some(Err(LengthLimitError.into())) - } else { - *this.remaining -= data.remaining(); - Some(Ok(data)) - } - } - Poll::Ready(Some(Err(err))) => Some(Err(err.into())), - }; - - Poll::Ready(res) - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - let this = self.project(); - let res = match this.inner.poll_trailers(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(Ok(data)) => Ok(data), - Poll::Ready(Err(err)) => Err(err.into()), - }; - - Poll::Ready(res) - } - - fn is_end_stream(&self) -> bool { - self.inner.is_end_stream() - } - - fn size_hint(&self) -> SizeHint { - use std::convert::TryFrom; - match u64::try_from(self.remaining) { - Ok(n) => { - let mut hint = self.inner.size_hint(); - if hint.lower() >= n { - hint.set_exact(n) - } else if let Some(max) = hint.upper() { - hint.set_upper(n.min(max)) - } else { - hint.set_upper(n) - } - hint - } - Err(_) => self.inner.size_hint(), - } - } -} - -/// An error returned when body length exceeds the configured limit. -#[derive(Debug)] -#[non_exhaustive] -pub struct LengthLimitError; - -impl fmt::Display for LengthLimitError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("length limit exceeded") - } -} - -impl Error for LengthLimitError {} - -#[cfg(test)] -mod tests { - use super::*; - use crate::Full; - use bytes::Bytes; - use std::convert::Infallible; - - #[tokio::test] - async fn read_for_body_under_limit_returns_data() { - const DATA: &[u8] = b"testing"; - let inner = Full::new(Bytes::from(DATA)); - let body = &mut Limited::new(inner, 8); - - let mut hint = SizeHint::new(); - hint.set_upper(7); - assert_eq!(body.size_hint().upper(), hint.upper()); - - let data = body.data().await.unwrap().unwrap(); - assert_eq!(data, DATA); - hint.set_upper(0); - assert_eq!(body.size_hint().upper(), hint.upper()); - - assert!(matches!(body.data().await, None)); - } - - #[tokio::test] - async fn read_for_body_over_limit_returns_error() { - const DATA: &[u8] = b"testing a string that is too long"; - let inner = Full::new(Bytes::from(DATA)); - let body = &mut Limited::new(inner, 8); - - let mut hint = SizeHint::new(); - hint.set_upper(8); - assert_eq!(body.size_hint().upper(), hint.upper()); - - let error = body.data().await.unwrap().unwrap_err(); - assert!(matches!(error.downcast_ref(), Some(LengthLimitError))); - } - - struct Chunky(&'static [&'static [u8]]); - - impl Body for Chunky { - type Data = &'static [u8]; - type Error = Infallible; - - fn poll_data( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll>> { - let mut this = self; - match this.0.split_first().map(|(&head, tail)| (Ok(head), tail)) { - Some((data, new_tail)) => { - this.0 = new_tail; - - Poll::Ready(Some(data)) - } - None => Poll::Ready(None), - } - } - - fn poll_trailers( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Poll::Ready(Ok(Some(HeaderMap::new()))) - } - } - - #[tokio::test] - async fn read_for_chunked_body_around_limit_returns_first_chunk_but_returns_error_on_over_limit_chunk( - ) { - const DATA: &[&[u8]] = &[b"testing ", b"a string that is too long"]; - let inner = Chunky(DATA); - let body = &mut Limited::new(inner, 8); - - let mut hint = SizeHint::new(); - hint.set_upper(8); - assert_eq!(body.size_hint().upper(), hint.upper()); - - let data = body.data().await.unwrap().unwrap(); - assert_eq!(data, DATA[0]); - hint.set_upper(0); - assert_eq!(body.size_hint().upper(), hint.upper()); - - let error = body.data().await.unwrap().unwrap_err(); - assert!(matches!(error.downcast_ref(), Some(LengthLimitError))); - } - - #[tokio::test] - async fn read_for_chunked_body_over_limit_on_first_chunk_returns_error() { - const DATA: &[&[u8]] = &[b"testing a string", b" that is too long"]; - let inner = Chunky(DATA); - let body = &mut Limited::new(inner, 8); - - let mut hint = SizeHint::new(); - hint.set_upper(8); - assert_eq!(body.size_hint().upper(), hint.upper()); - - let error = body.data().await.unwrap().unwrap_err(); - assert!(matches!(error.downcast_ref(), Some(LengthLimitError))); - } - - #[tokio::test] - async fn read_for_chunked_body_under_limit_is_okay() { - const DATA: &[&[u8]] = &[b"test", b"ing!"]; - let inner = Chunky(DATA); - let body = &mut Limited::new(inner, 8); - - let mut hint = SizeHint::new(); - hint.set_upper(8); - assert_eq!(body.size_hint().upper(), hint.upper()); - - let data = body.data().await.unwrap().unwrap(); - assert_eq!(data, DATA[0]); - hint.set_upper(4); - assert_eq!(body.size_hint().upper(), hint.upper()); - - let data = body.data().await.unwrap().unwrap(); - assert_eq!(data, DATA[1]); - hint.set_upper(0); - assert_eq!(body.size_hint().upper(), hint.upper()); - - assert!(matches!(body.data().await, None)); - } - - #[tokio::test] - async fn read_for_trailers_propagates_inner_trailers() { - const DATA: &[&[u8]] = &[b"test", b"ing!"]; - let inner = Chunky(DATA); - let body = &mut Limited::new(inner, 8); - let trailers = body.trailers().await.unwrap(); - assert_eq!(trailers, Some(HeaderMap::new())) - } - - #[derive(Debug)] - enum ErrorBodyError { - Data, - Trailers, - } - - impl fmt::Display for ErrorBodyError { - fn fmt(&self, _f: &mut fmt::Formatter) -> fmt::Result { - Ok(()) - } - } - - impl Error for ErrorBodyError {} - - struct ErrorBody; - - impl Body for ErrorBody { - type Data = &'static [u8]; - type Error = ErrorBodyError; - - fn poll_data( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll>> { - Poll::Ready(Some(Err(ErrorBodyError::Data))) - } - - fn poll_trailers( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Poll::Ready(Err(ErrorBodyError::Trailers)) - } - } - - #[tokio::test] - async fn read_for_body_returning_error_propagates_error() { - let body = &mut Limited::new(ErrorBody, 8); - let error = body.data().await.unwrap().unwrap_err(); - assert!(matches!(error.downcast_ref(), Some(ErrorBodyError::Data))); - } - - #[tokio::test] - async fn trailers_for_body_returning_error_propagates_error() { - let body = &mut Limited::new(ErrorBody, 8); - let error = body.trailers().await.unwrap_err(); - assert!(matches!( - error.downcast_ref(), - Some(ErrorBodyError::Trailers) - )); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http-body/src/next.rs s390-tools-2.33.1/rust-vendor/http-body/src/next.rs --- s390-tools-2.31.0/rust-vendor/http-body/src/next.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/src/next.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -use crate::Body; - -use core::future::Future; -use core::pin::Pin; -use core::task; - -#[must_use = "futures don't do anything unless polled"] -#[derive(Debug)] -/// Future that resolves to the next data chunk from `Body` -pub struct Data<'a, T: ?Sized>(pub(crate) &'a mut T); - -impl<'a, T: Body + Unpin + ?Sized> Future for Data<'a, T> { - type Output = Option>; - - fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> task::Poll { - Pin::new(&mut self.0).poll_data(ctx) - } -} - -#[must_use = "futures don't do anything unless polled"] -#[derive(Debug)] -/// Future that resolves to the optional trailers from `Body` -pub struct Trailers<'a, T: ?Sized>(pub(crate) &'a mut T); - -impl<'a, T: Body + Unpin + ?Sized> Future for Trailers<'a, T> { - type Output = Result, T::Error>; - - fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> task::Poll { - Pin::new(&mut self.0).poll_trailers(ctx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/http-body/src/size_hint.rs s390-tools-2.33.1/rust-vendor/http-body/src/size_hint.rs --- s390-tools-2.31.0/rust-vendor/http-body/src/size_hint.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/http-body/src/size_hint.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,86 +0,0 @@ -use std::u64; - -/// A `Body` size hint -/// -/// The default implementation returns: -/// -/// * 0 for `lower` -/// * `None` for `upper`. -#[derive(Debug, Default, Clone)] -pub struct SizeHint { - lower: u64, - upper: Option, -} - -impl SizeHint { - /// Returns a new `SizeHint` with default values - #[inline] - pub fn new() -> SizeHint { - SizeHint::default() - } - - /// Returns a new `SizeHint` with both upper and lower bounds set to the - /// given value. - #[inline] - pub fn with_exact(value: u64) -> SizeHint { - SizeHint { - lower: value, - upper: Some(value), - } - } - - /// Returns the lower bound of data that the `Body` will yield before - /// completing. - #[inline] - pub fn lower(&self) -> u64 { - self.lower - } - - /// Set the value of the `lower` hint. - /// - /// # Panics - /// - /// The function panics if `value` is greater than `upper`. - #[inline] - pub fn set_lower(&mut self, value: u64) { - assert!(value <= self.upper.unwrap_or(u64::MAX)); - self.lower = value; - } - - /// Returns the upper bound of data the `Body` will yield before - /// completing, or `None` if the value is unknown. - #[inline] - pub fn upper(&self) -> Option { - self.upper - } - - /// Set the value of the `upper` hint value. - /// - /// # Panics - /// - /// This function panics if `value` is less than `lower`. - #[inline] - pub fn set_upper(&mut self, value: u64) { - assert!(value >= self.lower, "`value` is less than than `lower`"); - - self.upper = Some(value); - } - - /// Returns the exact size of data that will be yielded **if** the - /// `lower` and `upper` bounds are equal. - #[inline] - pub fn exact(&self) -> Option { - if Some(self.lower) == self.upper { - self.upper - } else { - None - } - } - - /// Set the value of the `lower` and `upper` bounds to exactly the same. - #[inline] - pub fn set_exact(&mut self, value: u64) { - self.lower = value; - self.upper = Some(value); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/httpdate/benches/benchmarks.rs s390-tools-2.33.1/rust-vendor/httpdate/benches/benchmarks.rs --- s390-tools-2.31.0/rust-vendor/httpdate/benches/benchmarks.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httpdate/benches/benchmarks.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,57 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; - -pub fn parse_imf_fixdate(c: &mut Criterion) { - c.bench_function("parse_imf_fixdate", |b| { - b.iter(|| { - let d = black_box("Sun, 06 Nov 1994 08:49:37 GMT"); - black_box(httpdate::parse_http_date(d)).unwrap(); - }) - }); -} - -pub fn parse_rfc850_date(c: &mut Criterion) { - c.bench_function("parse_rfc850_date", |b| { - b.iter(|| { - let d = black_box("Sunday, 06-Nov-94 08:49:37 GMT"); - black_box(httpdate::parse_http_date(d)).unwrap(); - }) - }); -} - -pub fn parse_asctime(c: &mut Criterion) { - c.bench_function("parse_asctime", |b| { - b.iter(|| { - let d = black_box("Sun Nov 6 08:49:37 1994"); - black_box(httpdate::parse_http_date(d)).unwrap(); - }) - }); -} - -struct BlackBoxWrite; - -impl std::fmt::Write for BlackBoxWrite { - fn write_str(&mut self, s: &str) -> Result<(), std::fmt::Error> { - black_box(s); - Ok(()) - } -} - -pub fn encode_date(c: &mut Criterion) { - c.bench_function("encode_date", |b| { - let d = "Wed, 21 Oct 2015 07:28:00 GMT"; - black_box(httpdate::parse_http_date(d)).unwrap(); - b.iter(|| { - use std::fmt::Write; - let _ = write!(BlackBoxWrite, "{}", d); - }) - }); -} - -criterion_group!( - benches, - parse_imf_fixdate, - parse_rfc850_date, - parse_asctime, - encode_date -); -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/httpdate/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/httpdate/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/httpdate/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httpdate/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/httpdate/Cargo.toml s390-tools-2.33.1/rust-vendor/httpdate/Cargo.toml --- s390-tools-2.31.0/rust-vendor/httpdate/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httpdate/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,35 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.56" -name = "httpdate" -version = "1.0.3" -authors = ["Pyfisch "] -description = "HTTP date parsing and formatting" -readme = "README.md" -keywords = [ - "http", - "date", - "time", - "simple", - "timestamp", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/pyfisch/httpdate" - -[[bench]] -name = "benchmarks" -harness = false - -[dev-dependencies.criterion] -version = "0.5" diff -Nru s390-tools-2.31.0/rust-vendor/httpdate/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/httpdate/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/httpdate/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httpdate/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, -and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by -the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all -other entities that control, are controlled by, or are under common -control with that entity. For the purposes of this definition, -"control" means (i) the power, direct or indirect, to cause the -direction or management of such entity, whether by contract or -otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity -exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, -including but not limited to software source code, documentation -source, and configuration files. - -"Object" form shall mean any form resulting from mechanical -transformation or translation of a Source form, including but -not limited to compiled object code, generated documentation, -and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or -Object form, made available under the License, as indicated by a -copyright notice that is included in or attached to the work -(an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object -form, that is based on (or derived from) the Work and for which the -editorial revisions, annotations, elaborations, or other modifications -represent, as a whole, an original work of authorship. For the purposes -of this License, Derivative Works shall not include works that remain -separable from, or merely link (or bind by name) to the interfaces of, -the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including -the original version of the Work and any modifications or additions -to that Work or Derivative Works thereof, that is intentionally -submitted to Licensor for inclusion in the Work by the copyright owner -or by an individual or Legal Entity authorized to submit on behalf of -the copyright owner. For the purposes of this definition, "submitted" -means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, -and issue tracking systems that are managed by, or on behalf of, the -Licensor for the purpose of discussing and improving the Work, but -excluding communication that is conspicuously marked or otherwise -designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity -on behalf of whom a Contribution has been received by Licensor and -subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of -this License, each Contributor hereby grants to You a perpetual, -worldwide, non-exclusive, no-charge, royalty-free, irrevocable -copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the -Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of -this License, each Contributor hereby grants to You a perpetual, -worldwide, non-exclusive, no-charge, royalty-free, irrevocable -(except as stated in this section) patent license to make, have made, -use, offer to sell, sell, import, and otherwise transfer the Work, -where such license applies only to those patent claims licensable -by such Contributor that are necessarily infringed by their -Contribution(s) alone or by combination of their Contribution(s) -with the Work to which such Contribution(s) was submitted. If You -institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work -or a Contribution incorporated within the Work constitutes direct -or contributory patent infringement, then any patent licenses -granted to You under this License for that Work shall terminate -as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the -Work or Derivative Works thereof in any medium, with or without -modifications, and in Source or Object form, provided that You -meet the following conditions: - -(a) You must give any other recipients of the Work or -Derivative Works a copy of this License; and - -(b) You must cause any modified files to carry prominent notices -stating that You changed the files; and - -(c) You must retain, in the Source form of any Derivative Works -that You distribute, all copyright, patent, trademark, and -attribution notices from the Source form of the Work, -excluding those notices that do not pertain to any part of -the Derivative Works; and - -(d) If the Work includes a "NOTICE" text file as part of its -distribution, then any Derivative Works that You distribute must -include a readable copy of the attribution notices contained -within such NOTICE file, excluding those notices that do not -pertain to any part of the Derivative Works, in at least one -of the following places: within a NOTICE text file distributed -as part of the Derivative Works; within the Source form or -documentation, if provided along with the Derivative Works; or, -within a display generated by the Derivative Works, if and -wherever such third-party notices normally appear. The contents -of the NOTICE file are for informational purposes only and -do not modify the License. You may add Your own attribution -notices within Derivative Works that You distribute, alongside -or as an addendum to the NOTICE text from the Work, provided -that such additional attribution notices cannot be construed -as modifying the License. - -You may add Your own copyright statement to Your modifications and -may provide additional or different license terms and conditions -for use, reproduction, or distribution of Your modifications, or -for any such Derivative Works as a whole, provided Your use, -reproduction, and distribution of the Work otherwise complies with -the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, -any Contribution intentionally submitted for inclusion in the Work -by You to the Licensor shall be under the terms and conditions of -this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify -the terms of any separate license agreement you may have executed -with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade -names, trademarks, service marks, or product names of the Licensor, -except as required for reasonable and customary use in describing the -origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or -agreed to in writing, Licensor provides the Work (and each -Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -implied, including, without limitation, any warranties or conditions -of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -PARTICULAR PURPOSE. You are solely responsible for determining the -appropriateness of using or redistributing the Work and assume any -risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, -whether in tort (including negligence), contract, or otherwise, -unless required by applicable law (such as deliberate and grossly -negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, -incidental, or consequential damages of any character arising as a -result of this License or out of the use or inability to use the -Work (including but not limited to damages for loss of goodwill, -work stoppage, computer failure or malfunction, or any and all -other commercial damages or losses), even if such Contributor -has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing -the Work or Derivative Works thereof, You may choose to offer, -and charge a fee for, acceptance of support, warranty, indemnity, -or other liability obligations and/or rights consistent with this -License. However, in accepting such obligations, You may act only -on Your own behalf and on Your sole responsibility, not on behalf -of any other Contributor, and only if You agree to indemnify, -defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason -of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - -To apply the Apache License to your work, attach the following -boilerplate notice, with the fields enclosed by brackets "[]" -replaced with your own identifying information. (Don't include -the brackets!) The text should be enclosed in the appropriate -comment syntax for the file format. We also recommend that a -file or class name and description of purpose be included on the -same "printed page" as the copyright notice for easier -identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/httpdate/LICENSE-MIT s390-tools-2.33.1/rust-vendor/httpdate/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/httpdate/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httpdate/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ -Copyright (c) 2016 Pyfisch - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/httpdate/README.md s390-tools-2.33.1/rust-vendor/httpdate/README.md --- s390-tools-2.31.0/rust-vendor/httpdate/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httpdate/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -# Date and time utils for HTTP. - -[![Build Status](https://github.com/pyfisch/httpdate/actions/workflows/ci.yml/badge.svg)](https://github.com/pyfisch/httpdate/actions/workflows/ci.yml) -[![Crates.io](https://img.shields.io/crates/v/httpdate.svg)](https://crates.io/crates/httpdate) -[![Documentation](https://docs.rs/httpdate/badge.svg)](https://docs.rs/httpdate) - -Multiple HTTP header fields store timestamps. -For example a response created on May 15, 2015 may contain the header -`Date: Fri, 15 May 2015 15:34:21 GMT`. Since the timestamp does not -contain any timezone or leap second information it is equvivalent to -writing 1431696861 Unix time. Rust’s `SystemTime` is used to store -these timestamps. - -This crate provides two public functions: - -* `parse_http_date` to parse a HTTP datetime string to a system time -* `fmt_http_date` to format a system time to a IMF-fixdate - -In addition it exposes the `HttpDate` type that can be used to parse -and format timestamps. Convert a sytem time to `HttpDate` and vice versa. -The `HttpDate` (8 bytes) is smaller than `SystemTime` (16 bytes) and -using the display impl avoids a temporary allocation. - -Read the [blog post](https://pyfisch.org/blog/http-datetime-handling/) to learn -more. - -Fuzz it by installing *cargo-fuzz* and running `cargo fuzz run fuzz_target_1`. diff -Nru s390-tools-2.31.0/rust-vendor/httpdate/src/date.rs s390-tools-2.33.1/rust-vendor/httpdate/src/date.rs --- s390-tools-2.31.0/rust-vendor/httpdate/src/date.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httpdate/src/date.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,420 +0,0 @@ -use std::cmp; -use std::fmt::{self, Display, Formatter}; -use std::str::FromStr; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; - -use crate::Error; - -/// HTTP timestamp type. -/// -/// Parse using `FromStr` impl. -/// Format using the `Display` trait. -/// Convert timestamp into/from `SytemTime` to use. -/// Supports comparsion and sorting. -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] -pub struct HttpDate { - /// 0...59 - sec: u8, - /// 0...59 - min: u8, - /// 0...23 - hour: u8, - /// 1...31 - day: u8, - /// 1...12 - mon: u8, - /// 1970...9999 - year: u16, - /// 1...7 - wday: u8, -} - -impl HttpDate { - fn is_valid(&self) -> bool { - self.sec < 60 - && self.min < 60 - && self.hour < 24 - && self.day > 0 - && self.day < 32 - && self.mon > 0 - && self.mon <= 12 - && self.year >= 1970 - && self.year <= 9999 - && &HttpDate::from(SystemTime::from(*self)) == self - } -} - -impl From for HttpDate { - fn from(v: SystemTime) -> HttpDate { - let dur = v - .duration_since(UNIX_EPOCH) - .expect("all times should be after the epoch"); - let secs_since_epoch = dur.as_secs(); - - if secs_since_epoch >= 253402300800 { - // year 9999 - panic!("date must be before year 9999"); - } - - /* 2000-03-01 (mod 400 year, immediately after feb29 */ - const LEAPOCH: i64 = 11017; - const DAYS_PER_400Y: i64 = 365 * 400 + 97; - const DAYS_PER_100Y: i64 = 365 * 100 + 24; - const DAYS_PER_4Y: i64 = 365 * 4 + 1; - - let days = (secs_since_epoch / 86400) as i64 - LEAPOCH; - let secs_of_day = secs_since_epoch % 86400; - - let mut qc_cycles = days / DAYS_PER_400Y; - let mut remdays = days % DAYS_PER_400Y; - - if remdays < 0 { - remdays += DAYS_PER_400Y; - qc_cycles -= 1; - } - - let mut c_cycles = remdays / DAYS_PER_100Y; - if c_cycles == 4 { - c_cycles -= 1; - } - remdays -= c_cycles * DAYS_PER_100Y; - - let mut q_cycles = remdays / DAYS_PER_4Y; - if q_cycles == 25 { - q_cycles -= 1; - } - remdays -= q_cycles * DAYS_PER_4Y; - - let mut remyears = remdays / 365; - if remyears == 4 { - remyears -= 1; - } - remdays -= remyears * 365; - - let mut year = 2000 + remyears + 4 * q_cycles + 100 * c_cycles + 400 * qc_cycles; - - let months = [31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 31, 29]; - let mut mon = 0; - for mon_len in months.iter() { - mon += 1; - if remdays < *mon_len { - break; - } - remdays -= *mon_len; - } - let mday = remdays + 1; - let mon = if mon + 2 > 12 { - year += 1; - mon - 10 - } else { - mon + 2 - }; - - let mut wday = (3 + days) % 7; - if wday <= 0 { - wday += 7 - }; - - HttpDate { - sec: (secs_of_day % 60) as u8, - min: ((secs_of_day % 3600) / 60) as u8, - hour: (secs_of_day / 3600) as u8, - day: mday as u8, - mon: mon as u8, - year: year as u16, - wday: wday as u8, - } - } -} - -impl From for SystemTime { - fn from(v: HttpDate) -> SystemTime { - let leap_years = - ((v.year - 1) - 1968) / 4 - ((v.year - 1) - 1900) / 100 + ((v.year - 1) - 1600) / 400; - let mut ydays = match v.mon { - 1 => 0, - 2 => 31, - 3 => 59, - 4 => 90, - 5 => 120, - 6 => 151, - 7 => 181, - 8 => 212, - 9 => 243, - 10 => 273, - 11 => 304, - 12 => 334, - _ => unreachable!(), - } + v.day as u64 - - 1; - if is_leap_year(v.year) && v.mon > 2 { - ydays += 1; - } - let days = (v.year as u64 - 1970) * 365 + leap_years as u64 + ydays; - UNIX_EPOCH - + Duration::from_secs( - v.sec as u64 + v.min as u64 * 60 + v.hour as u64 * 3600 + days * 86400, - ) - } -} - -impl FromStr for HttpDate { - type Err = Error; - - fn from_str(s: &str) -> Result { - if !s.is_ascii() { - return Err(Error(())); - } - let x = s.trim().as_bytes(); - let date = parse_imf_fixdate(x) - .or_else(|_| parse_rfc850_date(x)) - .or_else(|_| parse_asctime(x))?; - if !date.is_valid() { - return Err(Error(())); - } - Ok(date) - } -} - -impl Display for HttpDate { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - let wday = match self.wday { - 1 => b"Mon", - 2 => b"Tue", - 3 => b"Wed", - 4 => b"Thu", - 5 => b"Fri", - 6 => b"Sat", - 7 => b"Sun", - _ => unreachable!(), - }; - - let mon = match self.mon { - 1 => b"Jan", - 2 => b"Feb", - 3 => b"Mar", - 4 => b"Apr", - 5 => b"May", - 6 => b"Jun", - 7 => b"Jul", - 8 => b"Aug", - 9 => b"Sep", - 10 => b"Oct", - 11 => b"Nov", - 12 => b"Dec", - _ => unreachable!(), - }; - - let mut buf: [u8; 29] = *b" , 00 0000 00:00:00 GMT"; - buf[0] = wday[0]; - buf[1] = wday[1]; - buf[2] = wday[2]; - buf[5] = b'0' + (self.day / 10); - buf[6] = b'0' + (self.day % 10); - buf[8] = mon[0]; - buf[9] = mon[1]; - buf[10] = mon[2]; - buf[12] = b'0' + (self.year / 1000) as u8; - buf[13] = b'0' + (self.year / 100 % 10) as u8; - buf[14] = b'0' + (self.year / 10 % 10) as u8; - buf[15] = b'0' + (self.year % 10) as u8; - buf[17] = b'0' + (self.hour / 10); - buf[18] = b'0' + (self.hour % 10); - buf[20] = b'0' + (self.min / 10); - buf[21] = b'0' + (self.min % 10); - buf[23] = b'0' + (self.sec / 10); - buf[24] = b'0' + (self.sec % 10); - f.write_str(std::str::from_utf8(&buf[..]).unwrap()) - } -} - -impl Ord for HttpDate { - fn cmp(&self, other: &HttpDate) -> cmp::Ordering { - SystemTime::from(*self).cmp(&SystemTime::from(*other)) - } -} - -impl PartialOrd for HttpDate { - fn partial_cmp(&self, other: &HttpDate) -> Option { - Some(self.cmp(other)) - } -} - -fn toint_1(x: u8) -> Result { - let result = x.wrapping_sub(b'0'); - if result < 10 { - Ok(result) - } else { - Err(Error(())) - } -} - -fn toint_2(s: &[u8]) -> Result { - let high = s[0].wrapping_sub(b'0'); - let low = s[1].wrapping_sub(b'0'); - - if high < 10 && low < 10 { - Ok(high * 10 + low) - } else { - Err(Error(())) - } -} - -#[allow(clippy::many_single_char_names)] -fn toint_4(s: &[u8]) -> Result { - let a = u16::from(s[0].wrapping_sub(b'0')); - let b = u16::from(s[1].wrapping_sub(b'0')); - let c = u16::from(s[2].wrapping_sub(b'0')); - let d = u16::from(s[3].wrapping_sub(b'0')); - - if a < 10 && b < 10 && c < 10 && d < 10 { - Ok(a * 1000 + b * 100 + c * 10 + d) - } else { - Err(Error(())) - } -} - -fn parse_imf_fixdate(s: &[u8]) -> Result { - // Example: `Sun, 06 Nov 1994 08:49:37 GMT` - if s.len() != 29 || &s[25..] != b" GMT" || s[16] != b' ' || s[19] != b':' || s[22] != b':' { - return Err(Error(())); - } - Ok(HttpDate { - sec: toint_2(&s[23..25])?, - min: toint_2(&s[20..22])?, - hour: toint_2(&s[17..19])?, - day: toint_2(&s[5..7])?, - mon: match &s[7..12] { - b" Jan " => 1, - b" Feb " => 2, - b" Mar " => 3, - b" Apr " => 4, - b" May " => 5, - b" Jun " => 6, - b" Jul " => 7, - b" Aug " => 8, - b" Sep " => 9, - b" Oct " => 10, - b" Nov " => 11, - b" Dec " => 12, - _ => return Err(Error(())), - }, - year: toint_4(&s[12..16])?, - wday: match &s[..5] { - b"Mon, " => 1, - b"Tue, " => 2, - b"Wed, " => 3, - b"Thu, " => 4, - b"Fri, " => 5, - b"Sat, " => 6, - b"Sun, " => 7, - _ => return Err(Error(())), - }, - }) -} - -fn parse_rfc850_date(s: &[u8]) -> Result { - // Example: `Sunday, 06-Nov-94 08:49:37 GMT` - if s.len() < 23 { - return Err(Error(())); - } - - fn wday<'a>(s: &'a [u8], wday: u8, name: &'static [u8]) -> Option<(u8, &'a [u8])> { - if &s[0..name.len()] == name { - return Some((wday, &s[name.len()..])); - } - None - } - let (wday, s) = wday(s, 1, b"Monday, ") - .or_else(|| wday(s, 2, b"Tuesday, ")) - .or_else(|| wday(s, 3, b"Wednesday, ")) - .or_else(|| wday(s, 4, b"Thursday, ")) - .or_else(|| wday(s, 5, b"Friday, ")) - .or_else(|| wday(s, 6, b"Saturday, ")) - .or_else(|| wday(s, 7, b"Sunday, ")) - .ok_or(Error(()))?; - if s.len() != 22 || s[12] != b':' || s[15] != b':' || &s[18..22] != b" GMT" { - return Err(Error(())); - } - let mut year = u16::from(toint_2(&s[7..9])?); - if year < 70 { - year += 2000; - } else { - year += 1900; - } - Ok(HttpDate { - sec: toint_2(&s[16..18])?, - min: toint_2(&s[13..15])?, - hour: toint_2(&s[10..12])?, - day: toint_2(&s[0..2])?, - mon: match &s[2..7] { - b"-Jan-" => 1, - b"-Feb-" => 2, - b"-Mar-" => 3, - b"-Apr-" => 4, - b"-May-" => 5, - b"-Jun-" => 6, - b"-Jul-" => 7, - b"-Aug-" => 8, - b"-Sep-" => 9, - b"-Oct-" => 10, - b"-Nov-" => 11, - b"-Dec-" => 12, - _ => return Err(Error(())), - }, - year, - wday, - }) -} - -fn parse_asctime(s: &[u8]) -> Result { - // Example: `Sun Nov 6 08:49:37 1994` - if s.len() != 24 || s[10] != b' ' || s[13] != b':' || s[16] != b':' || s[19] != b' ' { - return Err(Error(())); - } - Ok(HttpDate { - sec: toint_2(&s[17..19])?, - min: toint_2(&s[14..16])?, - hour: toint_2(&s[11..13])?, - day: { - let x = &s[8..10]; - { - if x[0] == b' ' { - toint_1(x[1]) - } else { - toint_2(x) - } - }? - }, - mon: match &s[4..8] { - b"Jan " => 1, - b"Feb " => 2, - b"Mar " => 3, - b"Apr " => 4, - b"May " => 5, - b"Jun " => 6, - b"Jul " => 7, - b"Aug " => 8, - b"Sep " => 9, - b"Oct " => 10, - b"Nov " => 11, - b"Dec " => 12, - _ => return Err(Error(())), - }, - year: toint_4(&s[20..24])?, - wday: match &s[0..4] { - b"Mon " => 1, - b"Tue " => 2, - b"Wed " => 3, - b"Thu " => 4, - b"Fri " => 5, - b"Sat " => 6, - b"Sun " => 7, - _ => return Err(Error(())), - }, - }) -} - -fn is_leap_year(y: u16) -> bool { - y % 4 == 0 && (y % 100 != 0 || y % 400 == 0) -} diff -Nru s390-tools-2.31.0/rust-vendor/httpdate/src/lib.rs s390-tools-2.33.1/rust-vendor/httpdate/src/lib.rs --- s390-tools-2.31.0/rust-vendor/httpdate/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/httpdate/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,160 +0,0 @@ -//! Date and time utils for HTTP. -//! -//! Multiple HTTP header fields store timestamps. -//! For example a response created on May 15, 2015 may contain the header -//! `Date: Fri, 15 May 2015 15:34:21 GMT`. Since the timestamp does not -//! contain any timezone or leap second information it is equvivalent to -//! writing 1431696861 Unix time. Rust’s `SystemTime` is used to store -//! these timestamps. -//! -//! This crate provides two public functions: -//! -//! * `parse_http_date` to parse a HTTP datetime string to a system time -//! * `fmt_http_date` to format a system time to a IMF-fixdate -//! -//! In addition it exposes the `HttpDate` type that can be used to parse -//! and format timestamps. Convert a sytem time to `HttpDate` and vice versa. -//! The `HttpDate` (8 bytes) is smaller than `SystemTime` (16 bytes) and -//! using the display impl avoids a temporary allocation. -#![forbid(unsafe_code)] - -use std::error; -use std::fmt::{self, Display, Formatter}; -use std::io; -use std::time::SystemTime; - -pub use date::HttpDate; - -mod date; - -/// An opaque error type for all parsing errors. -#[derive(Debug)] -pub struct Error(()); - -impl error::Error for Error {} - -impl Display for Error { - fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { - f.write_str("string contains no or an invalid date") - } -} - -impl From for io::Error { - fn from(e: Error) -> io::Error { - io::Error::new(io::ErrorKind::Other, e) - } -} - -/// Parse a date from an HTTP header field. -/// -/// Supports the preferred IMF-fixdate and the legacy RFC 805 and -/// ascdate formats. Two digit years are mapped to dates between -/// 1970 and 2069. -pub fn parse_http_date(s: &str) -> Result { - s.parse::().map(|d| d.into()) -} - -/// Format a date to be used in a HTTP header field. -/// -/// Dates are formatted as IMF-fixdate: `Fri, 15 May 2015 15:34:21 GMT`. -pub fn fmt_http_date(d: SystemTime) -> String { - format!("{}", HttpDate::from(d)) -} - -#[cfg(test)] -mod tests { - use std::str; - use std::time::{Duration, UNIX_EPOCH}; - - use super::{fmt_http_date, parse_http_date, HttpDate}; - - #[test] - fn test_rfc_example() { - let d = UNIX_EPOCH + Duration::from_secs(784111777); - assert_eq!( - d, - parse_http_date("Sun, 06 Nov 1994 08:49:37 GMT").expect("#1") - ); - assert_eq!( - d, - parse_http_date("Sunday, 06-Nov-94 08:49:37 GMT").expect("#2") - ); - assert_eq!(d, parse_http_date("Sun Nov 6 08:49:37 1994").expect("#3")); - } - - #[test] - fn test2() { - let d = UNIX_EPOCH + Duration::from_secs(1475419451); - assert_eq!( - d, - parse_http_date("Sun, 02 Oct 2016 14:44:11 GMT").expect("#1") - ); - assert!(parse_http_date("Sun Nov 10 08:00:00 1000").is_err()); - assert!(parse_http_date("Sun Nov 10 08*00:00 2000").is_err()); - assert!(parse_http_date("Sunday, 06-Nov-94 08+49:37 GMT").is_err()); - } - - #[test] - fn test3() { - let mut d = UNIX_EPOCH; - assert_eq!(d, parse_http_date("Thu, 01 Jan 1970 00:00:00 GMT").unwrap()); - d += Duration::from_secs(3600); - assert_eq!(d, parse_http_date("Thu, 01 Jan 1970 01:00:00 GMT").unwrap()); - d += Duration::from_secs(86400); - assert_eq!(d, parse_http_date("Fri, 02 Jan 1970 01:00:00 GMT").unwrap()); - d += Duration::from_secs(2592000); - assert_eq!(d, parse_http_date("Sun, 01 Feb 1970 01:00:00 GMT").unwrap()); - d += Duration::from_secs(2592000); - assert_eq!(d, parse_http_date("Tue, 03 Mar 1970 01:00:00 GMT").unwrap()); - d += Duration::from_secs(31536005); - assert_eq!(d, parse_http_date("Wed, 03 Mar 1971 01:00:05 GMT").unwrap()); - d += Duration::from_secs(15552000); - assert_eq!(d, parse_http_date("Mon, 30 Aug 1971 01:00:05 GMT").unwrap()); - d += Duration::from_secs(6048000); - assert_eq!(d, parse_http_date("Mon, 08 Nov 1971 01:00:05 GMT").unwrap()); - d += Duration::from_secs(864000000); - assert_eq!(d, parse_http_date("Fri, 26 Mar 1999 01:00:05 GMT").unwrap()); - } - - #[test] - fn test_fmt() { - let d = UNIX_EPOCH; - assert_eq!(fmt_http_date(d), "Thu, 01 Jan 1970 00:00:00 GMT"); - let d = UNIX_EPOCH + Duration::from_secs(1475419451); - assert_eq!(fmt_http_date(d), "Sun, 02 Oct 2016 14:44:11 GMT"); - } - - #[allow(dead_code)] - fn testcase(data: &[u8]) { - if let Ok(s) = str::from_utf8(data) { - println!("{:?}", s); - if let Ok(d) = parse_http_date(s) { - let o = fmt_http_date(d); - assert!(!o.is_empty()); - } - } - } - - #[test] - fn size_of() { - assert_eq!(::std::mem::size_of::(), 8); - } - - #[test] - fn test_date_comparison() { - let a = UNIX_EPOCH + Duration::from_secs(784111777); - let b = a + Duration::from_secs(30); - assert!(a < b); - let a_date: HttpDate = a.into(); - let b_date: HttpDate = b.into(); - assert!(a_date < b_date); - assert_eq!(a_date.cmp(&b_date), ::std::cmp::Ordering::Less) - } - - #[test] - fn test_parse_bad_date() { - // 1994-11-07 is actually a Monday - let parsed = "Sun, 07 Nov 1994 08:48:37 GMT".parse::(); - assert!(parsed.is_err()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/hyper/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/hyper/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/hyper/Cargo.lock s390-tools-2.33.1/rust-vendor/hyper/Cargo.lock --- s390-tools-2.31.0/rust-vendor/hyper/Cargo.lock 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/Cargo.lock 1970-01-01 01:00:00.000000000 +0100 @@ -1,814 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aho-corasick" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" -dependencies = [ - "memchr", -] - -[[package]] -name = "async-stream" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "bytes" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "form_urlencoded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "futures-channel" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" -dependencies = [ - "futures-core", -] - -[[package]] -name = "futures-core" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" - -[[package]] -name = "futures-sink" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" - -[[package]] -name = "futures-task" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" - -[[package]] -name = "futures-util" -version = "0.3.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" -dependencies = [ - "futures-core", - "futures-task", - "pin-project-lite", - "pin-utils", -] - -[[package]] -name = "h2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "http" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" - -[[package]] -name = "httpdate" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" - -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error", -] - -[[package]] -name = "hyper" -version = "0.14.27" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "libc", - "matches", - "num_cpus", - "pin-project-lite", - "pnet_datalink", - "pretty_env_logger", - "serde", - "serde_json", - "socket2", - "spmc", - "tokio", - "tokio-test", - "tokio-util", - "tower", - "tower-service", - "tracing", - "url", - "want", -] - -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "ipnetwork" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" -dependencies = [ - "serde", -] - -[[package]] -name = "itoa" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" - -[[package]] -name = "libc" -version = "0.2.146" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" - -[[package]] -name = "log" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" - -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "mio" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" -dependencies = [ - "libc", - "wasi", - "windows-sys", -] - -[[package]] -name = "num_cpus" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" -dependencies = [ - "hermit-abi 0.2.6", - "libc", -] - -[[package]] -name = "once_cell" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" - -[[package]] -name = "percent-encoding" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" - -[[package]] -name = "pin-project" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pnet_base" -version = "0.27.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4688aa497ef62129f302a5800ebde67825f8ff129f43690ca84099f6620bed" - -[[package]] -name = "pnet_datalink" -version = "0.27.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59001c9c4d9d23bf2f61afaaf134a766fd6932ba2557c606b9112157053b9ac7" -dependencies = [ - "ipnetwork", - "libc", - "pnet_base", - "pnet_sys", - "winapi", -] - -[[package]] -name = "pnet_sys" -version = "0.27.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7589e4c4e7ed72a3ffdff8a65d3bea84e8c3a23e19d0a10e8f45efdf632fff15" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "pretty_env_logger" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" -dependencies = [ - "env_logger", - "log", -] - -[[package]] -name = "proc-macro2" -version = "1.0.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - -[[package]] -name = "quote" -version = "1.0.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "regex" -version = "1.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" - -[[package]] -name = "ryu" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" - -[[package]] -name = "serde" -version = "1.0.164" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.164" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.97" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "slab" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" -dependencies = [ - "autocfg", -] - -[[package]] -name = "socket2" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "spmc" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02a8428da277a8e3a15271d79943e80ccc2ef254e78813a166a08d65e4c3ece5" - -[[package]] -name = "syn" -version = "2.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "termcolor" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokio" -version = "1.28.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" -dependencies = [ - "autocfg", - "bytes", - "libc", - "mio", - "num_cpus", - "pin-project-lite", - "socket2", - "tokio-macros", - "windows-sys", -] - -[[package]] -name = "tokio-macros" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tokio-stream" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-test" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53474327ae5e166530d17f2d956afcb4f8a004de581b3cae10f12006bc8163e3" -dependencies = [ - "async-stream", - "bytes", - "futures-core", - "tokio", - "tokio-stream", -] - -[[package]] -name = "tokio-util" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", - "tracing", -] - -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tokio", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-layer" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" - -[[package]] -name = "tower-service" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" - -[[package]] -name = "tracing" -version = "0.1.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" -dependencies = [ - "cfg-if", - "pin-project-lite", - "tracing-core", -] - -[[package]] -name = "tracing-core" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" -dependencies = [ - "once_cell", -] - -[[package]] -name = "try-lock" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" - -[[package]] -name = "unicode-bidi" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" - -[[package]] -name = "unicode-ident" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" - -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "url" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", -] - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" diff -Nru s390-tools-2.31.0/rust-vendor/hyper/Cargo.toml s390-tools-2.33.1/rust-vendor/hyper/Cargo.toml --- s390-tools-2.31.0/rust-vendor/hyper/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,331 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "hyper" -version = "0.14.27" -authors = ["Sean McArthur "] -include = [ - "Cargo.toml", - "LICENSE", - "src/**/*", -] -description = "A fast and correct HTTP library." -homepage = "https://hyper.rs" -documentation = "https://docs.rs/hyper" -readme = "README.md" -keywords = [ - "http", - "hyper", - "hyperium", -] -categories = [ - "network-programming", - "web-programming::http-client", - "web-programming::http-server", -] -license = "MIT" -repository = "https://github.com/hyperium/hyper" - -[package.metadata.docs.rs] -features = [ - "ffi", - "full", -] -rustdoc-args = [ - "--cfg", - "docsrs", - "--cfg", - "hyper_unstable_ffi", -] - -[package.metadata.playground] -features = ["full"] - -[profile.bench] -codegen-units = 1 -incremental = false - -[profile.release] -codegen-units = 1 -incremental = false - -[[example]] -name = "client" -path = "examples/client.rs" -required-features = ["full"] - -[[example]] -name = "client_json" -path = "examples/client_json.rs" -required-features = ["full"] - -[[example]] -name = "echo" -path = "examples/echo.rs" -required-features = ["full"] - -[[example]] -name = "gateway" -path = "examples/gateway.rs" -required-features = ["full"] - -[[example]] -name = "hello" -path = "examples/hello.rs" -required-features = ["full"] - -[[example]] -name = "http_proxy" -path = "examples/http_proxy.rs" -required-features = ["full"] - -[[example]] -name = "multi_server" -path = "examples/multi_server.rs" -required-features = ["full"] - -[[example]] -name = "params" -path = "examples/params.rs" -required-features = ["full"] - -[[example]] -name = "send_file" -path = "examples/send_file.rs" -required-features = ["full"] - -[[example]] -name = "service_struct_impl" -path = "examples/service_struct_impl.rs" -required-features = ["full"] - -[[example]] -name = "single_threaded" -path = "examples/single_threaded.rs" -required-features = ["full"] - -[[example]] -name = "state" -path = "examples/state.rs" -required-features = ["full"] - -[[example]] -name = "tower_client" -path = "examples/tower_client.rs" -required-features = ["full"] - -[[example]] -name = "tower_server" -path = "examples/tower_server.rs" -required-features = ["full"] - -[[example]] -name = "upgrades" -path = "examples/upgrades.rs" -required-features = ["full"] - -[[example]] -name = "web_api" -path = "examples/web_api.rs" -required-features = ["full"] - -[[test]] -name = "client" -path = "tests/client.rs" -required-features = ["full"] - -[[test]] -name = "integration" -path = "tests/integration.rs" -required-features = ["full"] - -[[test]] -name = "server" -path = "tests/server.rs" -required-features = ["full"] - -[[bench]] -name = "body" -path = "benches/body.rs" -required-features = ["full"] - -[[bench]] -name = "connect" -path = "benches/connect.rs" -required-features = ["full"] - -[[bench]] -name = "end_to_end" -path = "benches/end_to_end.rs" -required-features = ["full"] - -[[bench]] -name = "pipeline" -path = "benches/pipeline.rs" -required-features = ["full"] - -[[bench]] -name = "server" -path = "benches/server.rs" -required-features = ["full"] - -[dependencies.bytes] -version = "1" - -[dependencies.futures-channel] -version = "0.3" - -[dependencies.futures-core] -version = "0.3" -default-features = false - -[dependencies.futures-util] -version = "0.3" -default-features = false - -[dependencies.h2] -version = "0.3.17" -optional = true - -[dependencies.http] -version = "0.2" - -[dependencies.http-body] -version = "0.4" - -[dependencies.httparse] -version = "1.8" - -[dependencies.httpdate] -version = "1.0" - -[dependencies.itoa] -version = "1" - -[dependencies.libc] -version = "0.2" -optional = true - -[dependencies.pin-project-lite] -version = "0.2.4" - -[dependencies.socket2] -version = "0.4.7" -features = ["all"] -optional = true - -[dependencies.tokio] -version = "1" -features = ["sync"] - -[dependencies.tower-service] -version = "0.3" - -[dependencies.tracing] -version = "0.1" -features = ["std"] -default-features = false - -[dependencies.want] -version = "0.3" - -[dev-dependencies.futures-util] -version = "0.3" -features = ["alloc"] -default-features = false - -[dev-dependencies.matches] -version = "0.1" - -[dev-dependencies.num_cpus] -version = "1.0" - -[dev-dependencies.pretty_env_logger] -version = "0.4" - -[dev-dependencies.serde] -version = "1.0" -features = ["derive"] - -[dev-dependencies.serde_json] -version = "1.0" - -[dev-dependencies.spmc] -version = "0.3" - -[dev-dependencies.tokio] -version = "1" -features = [ - "fs", - "macros", - "io-std", - "io-util", - "rt", - "rt-multi-thread", - "sync", - "time", - "test-util", -] - -[dev-dependencies.tokio-test] -version = "0.4" - -[dev-dependencies.tokio-util] -version = "0.7" -features = ["codec"] - -[dev-dependencies.tower] -version = "0.4" -features = [ - "make", - "util", -] -default-features = false - -[dev-dependencies.url] -version = "2.2" - -[features] -__internal_happy_eyeballs_tests = [] -backports = [] -client = [] -default = [] -deprecated = [] -ffi = ["libc"] -full = [ - "client", - "http1", - "http2", - "server", - "stream", - "runtime", -] -http1 = [] -http2 = ["h2"] -nightly = [] -runtime = [ - "tcp", - "tokio/rt", - "tokio/time", -] -server = [] -stream = [] -tcp = [ - "socket2", - "tokio/net", - "tokio/rt", - "tokio/time", -] - -[target."cfg(any(target_os = \"linux\", target_os = \"macos\"))".dev-dependencies.pnet_datalink] -version = "0.27.2" diff -Nru s390-tools-2.31.0/rust-vendor/hyper/LICENSE s390-tools-2.33.1/rust-vendor/hyper/LICENSE --- s390-tools-2.31.0/rust-vendor/hyper/LICENSE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ -Copyright (c) 2014-2021 Sean McArthur - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/body/aggregate.rs s390-tools-2.33.1/rust-vendor/hyper/src/body/aggregate.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/body/aggregate.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/body/aggregate.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -use bytes::Buf; - -use super::HttpBody; -use crate::common::buf::BufList; - -/// Aggregate the data buffers from a body asynchronously. -/// -/// The returned `impl Buf` groups the `Buf`s from the `HttpBody` without -/// copying them. This is ideal if you don't require a contiguous buffer. -/// -/// # Note -/// -/// Care needs to be taken if the remote is untrusted. The function doesn't implement any length -/// checks and an malicious peer might make it consume arbitrary amounts of memory. Checking the -/// `Content-Length` is a possibility, but it is not strictly mandated to be present. -pub async fn aggregate(body: T) -> Result -where - T: HttpBody, -{ - let mut bufs = BufList::new(); - - futures_util::pin_mut!(body); - while let Some(buf) = body.data().await { - let buf = buf?; - if buf.has_remaining() { - bufs.push(buf); - } - } - - Ok(bufs) -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/body/body.rs s390-tools-2.33.1/rust-vendor/hyper/src/body/body.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/body/body.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/body/body.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,784 +0,0 @@ -use std::borrow::Cow; -#[cfg(feature = "stream")] -use std::error::Error as StdError; -use std::fmt; - -use bytes::Bytes; -use futures_channel::mpsc; -use futures_channel::oneshot; -use futures_core::Stream; // for mpsc::Receiver -#[cfg(feature = "stream")] -use futures_util::TryStreamExt; -use http::HeaderMap; -use http_body::{Body as HttpBody, SizeHint}; - -use super::DecodedLength; -#[cfg(feature = "stream")] -use crate::common::sync_wrapper::SyncWrapper; -use crate::common::Future; -#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] -use crate::common::Never; -use crate::common::{task, watch, Pin, Poll}; -#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] -use crate::proto::h2::ping; - -type BodySender = mpsc::Sender>; -type TrailersSender = oneshot::Sender; - -/// A stream of `Bytes`, used when receiving bodies. -/// -/// A good default [`HttpBody`](crate::body::HttpBody) to use in many -/// applications. -/// -/// Note: To read the full body, use [`body::to_bytes`](crate::body::to_bytes()) -/// or [`body::aggregate`](crate::body::aggregate()). -#[must_use = "streams do nothing unless polled"] -pub struct Body { - kind: Kind, - /// Keep the extra bits in an `Option>`, so that - /// Body stays small in the common case (no extras needed). - extra: Option>, -} - -enum Kind { - Once(Option), - Chan { - content_length: DecodedLength, - want_tx: watch::Sender, - data_rx: mpsc::Receiver>, - trailers_rx: oneshot::Receiver, - }, - #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] - H2 { - ping: ping::Recorder, - content_length: DecodedLength, - recv: h2::RecvStream, - }, - #[cfg(feature = "ffi")] - Ffi(crate::ffi::UserBody), - #[cfg(feature = "stream")] - Wrapped( - SyncWrapper< - Pin>> + Send>>, - >, - ), -} - -struct Extra { - /// Allow the client to pass a future to delay the `Body` from returning - /// EOF. This allows the `Client` to try to put the idle connection - /// back into the pool before the body is "finished". - /// - /// The reason for this is so that creating a new request after finishing - /// streaming the body of a response could sometimes result in creating - /// a brand new connection, since the pool didn't know about the idle - /// connection yet. - delayed_eof: Option, -} - -#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] -type DelayEofUntil = oneshot::Receiver; - -enum DelayEof { - /// Initial state, stream hasn't seen EOF yet. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - NotEof(DelayEofUntil), - /// Transitions to this state once we've seen `poll` try to - /// return EOF (`None`). This future is then polled, and - /// when it completes, the Body finally returns EOF (`None`). - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - Eof(DelayEofUntil), -} - -/// A sender half created through [`Body::channel()`]. -/// -/// Useful when wanting to stream chunks from another thread. -/// -/// ## Body Closing -/// -/// Note that the request body will always be closed normally when the sender is dropped (meaning -/// that the empty terminating chunk will be sent to the remote). If you desire to close the -/// connection with an incomplete response (e.g. in the case of an error during asynchronous -/// processing), call the [`Sender::abort()`] method to abort the body in an abnormal fashion. -/// -/// [`Body::channel()`]: struct.Body.html#method.channel -/// [`Sender::abort()`]: struct.Sender.html#method.abort -#[must_use = "Sender does nothing unless sent on"] -pub struct Sender { - want_rx: watch::Receiver, - data_tx: BodySender, - trailers_tx: Option, -} - -const WANT_PENDING: usize = 1; -const WANT_READY: usize = 2; - -impl Body { - /// Create an empty `Body` stream. - /// - /// # Example - /// - /// ``` - /// use hyper::{Body, Request}; - /// - /// // create a `GET /` request - /// let get = Request::new(Body::empty()); - /// ``` - #[inline] - pub fn empty() -> Body { - Body::new(Kind::Once(None)) - } - - /// Create a `Body` stream with an associated sender half. - /// - /// Useful when wanting to stream chunks from another thread. - #[inline] - pub fn channel() -> (Sender, Body) { - Self::new_channel(DecodedLength::CHUNKED, /*wanter =*/ false) - } - - pub(crate) fn new_channel(content_length: DecodedLength, wanter: bool) -> (Sender, Body) { - let (data_tx, data_rx) = mpsc::channel(0); - let (trailers_tx, trailers_rx) = oneshot::channel(); - - // If wanter is true, `Sender::poll_ready()` won't becoming ready - // until the `Body` has been polled for data once. - let want = if wanter { WANT_PENDING } else { WANT_READY }; - - let (want_tx, want_rx) = watch::channel(want); - - let tx = Sender { - want_rx, - data_tx, - trailers_tx: Some(trailers_tx), - }; - let rx = Body::new(Kind::Chan { - content_length, - want_tx, - data_rx, - trailers_rx, - }); - - (tx, rx) - } - - /// Wrap a futures `Stream` in a box inside `Body`. - /// - /// # Example - /// - /// ``` - /// # use hyper::Body; - /// let chunks: Vec> = vec![ - /// Ok("hello"), - /// Ok(" "), - /// Ok("world"), - /// ]; - /// - /// let stream = futures_util::stream::iter(chunks); - /// - /// let body = Body::wrap_stream(stream); - /// ``` - /// - /// # Optional - /// - /// This function requires enabling the `stream` feature in your - /// `Cargo.toml`. - #[cfg(feature = "stream")] - #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] - pub fn wrap_stream(stream: S) -> Body - where - S: Stream> + Send + 'static, - O: Into + 'static, - E: Into> + 'static, - { - let mapped = stream.map_ok(Into::into).map_err(Into::into); - Body::new(Kind::Wrapped(SyncWrapper::new(Box::pin(mapped)))) - } - - fn new(kind: Kind) -> Body { - Body { kind, extra: None } - } - - #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] - pub(crate) fn h2( - recv: h2::RecvStream, - mut content_length: DecodedLength, - ping: ping::Recorder, - ) -> Self { - // If the stream is already EOS, then the "unknown length" is clearly - // actually ZERO. - if !content_length.is_exact() && recv.is_end_stream() { - content_length = DecodedLength::ZERO; - } - let body = Body::new(Kind::H2 { - ping, - content_length, - recv, - }); - - body - } - - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - pub(crate) fn delayed_eof(&mut self, fut: DelayEofUntil) { - self.extra_mut().delayed_eof = Some(DelayEof::NotEof(fut)); - } - - fn take_delayed_eof(&mut self) -> Option { - self.extra - .as_mut() - .and_then(|extra| extra.delayed_eof.take()) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - fn extra_mut(&mut self) -> &mut Extra { - self.extra - .get_or_insert_with(|| Box::new(Extra { delayed_eof: None })) - } - - fn poll_eof(&mut self, cx: &mut task::Context<'_>) -> Poll>> { - match self.take_delayed_eof() { - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - Some(DelayEof::NotEof(mut delay)) => match self.poll_inner(cx) { - ok @ Poll::Ready(Some(Ok(..))) | ok @ Poll::Pending => { - self.extra_mut().delayed_eof = Some(DelayEof::NotEof(delay)); - ok - } - Poll::Ready(None) => match Pin::new(&mut delay).poll(cx) { - Poll::Ready(Ok(never)) => match never {}, - Poll::Pending => { - self.extra_mut().delayed_eof = Some(DelayEof::Eof(delay)); - Poll::Pending - } - Poll::Ready(Err(_done)) => Poll::Ready(None), - }, - Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))), - }, - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - Some(DelayEof::Eof(mut delay)) => match Pin::new(&mut delay).poll(cx) { - Poll::Ready(Ok(never)) => match never {}, - Poll::Pending => { - self.extra_mut().delayed_eof = Some(DelayEof::Eof(delay)); - Poll::Pending - } - Poll::Ready(Err(_done)) => Poll::Ready(None), - }, - #[cfg(any( - not(any(feature = "http1", feature = "http2")), - not(feature = "client") - ))] - Some(delay_eof) => match delay_eof {}, - None => self.poll_inner(cx), - } - } - - #[cfg(feature = "ffi")] - pub(crate) fn as_ffi_mut(&mut self) -> &mut crate::ffi::UserBody { - match self.kind { - Kind::Ffi(ref mut body) => return body, - _ => { - self.kind = Kind::Ffi(crate::ffi::UserBody::new()); - } - } - - match self.kind { - Kind::Ffi(ref mut body) => body, - _ => unreachable!(), - } - } - - fn poll_inner(&mut self, cx: &mut task::Context<'_>) -> Poll>> { - match self.kind { - Kind::Once(ref mut val) => Poll::Ready(val.take().map(Ok)), - Kind::Chan { - content_length: ref mut len, - ref mut data_rx, - ref mut want_tx, - .. - } => { - want_tx.send(WANT_READY); - - match ready!(Pin::new(data_rx).poll_next(cx)?) { - Some(chunk) => { - len.sub_if(chunk.len() as u64); - Poll::Ready(Some(Ok(chunk))) - } - None => Poll::Ready(None), - } - } - #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] - Kind::H2 { - ref ping, - recv: ref mut h2, - content_length: ref mut len, - } => match ready!(h2.poll_data(cx)) { - Some(Ok(bytes)) => { - let _ = h2.flow_control().release_capacity(bytes.len()); - len.sub_if(bytes.len() as u64); - ping.record_data(bytes.len()); - Poll::Ready(Some(Ok(bytes))) - } - Some(Err(e)) => Poll::Ready(Some(Err(crate::Error::new_body(e)))), - None => Poll::Ready(None), - }, - - #[cfg(feature = "ffi")] - Kind::Ffi(ref mut body) => body.poll_data(cx), - - #[cfg(feature = "stream")] - Kind::Wrapped(ref mut s) => match ready!(s.get_mut().as_mut().poll_next(cx)) { - Some(res) => Poll::Ready(Some(res.map_err(crate::Error::new_body))), - None => Poll::Ready(None), - }, - } - } - - #[cfg(feature = "http1")] - pub(super) fn take_full_data(&mut self) -> Option { - if let Kind::Once(ref mut chunk) = self.kind { - chunk.take() - } else { - None - } - } -} - -impl Default for Body { - /// Returns `Body::empty()`. - #[inline] - fn default() -> Body { - Body::empty() - } -} - -impl HttpBody for Body { - type Data = Bytes; - type Error = crate::Error; - - fn poll_data( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll>> { - self.poll_eof(cx) - } - - fn poll_trailers( - #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut self: Pin<&mut Self>, - #[cfg_attr(not(feature = "http2"), allow(unused))] cx: &mut task::Context<'_>, - ) -> Poll, Self::Error>> { - match self.kind { - #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] - Kind::H2 { - recv: ref mut h2, - ref ping, - .. - } => match ready!(h2.poll_trailers(cx)) { - Ok(t) => { - ping.record_non_data(); - Poll::Ready(Ok(t)) - } - Err(e) => Poll::Ready(Err(crate::Error::new_h2(e))), - }, - Kind::Chan { - ref mut trailers_rx, - .. - } => match ready!(Pin::new(trailers_rx).poll(cx)) { - Ok(t) => Poll::Ready(Ok(Some(t))), - Err(_) => Poll::Ready(Ok(None)), - }, - #[cfg(feature = "ffi")] - Kind::Ffi(ref mut body) => body.poll_trailers(cx), - _ => Poll::Ready(Ok(None)), - } - } - - fn is_end_stream(&self) -> bool { - match self.kind { - Kind::Once(ref val) => val.is_none(), - Kind::Chan { content_length, .. } => content_length == DecodedLength::ZERO, - #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] - Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(), - #[cfg(feature = "ffi")] - Kind::Ffi(..) => false, - #[cfg(feature = "stream")] - Kind::Wrapped(..) => false, - } - } - - fn size_hint(&self) -> SizeHint { - macro_rules! opt_len { - ($content_length:expr) => {{ - let mut hint = SizeHint::default(); - - if let Some(content_length) = $content_length.into_opt() { - hint.set_exact(content_length); - } - - hint - }}; - } - - match self.kind { - Kind::Once(Some(ref val)) => SizeHint::with_exact(val.len() as u64), - Kind::Once(None) => SizeHint::with_exact(0), - #[cfg(feature = "stream")] - Kind::Wrapped(..) => SizeHint::default(), - Kind::Chan { content_length, .. } => opt_len!(content_length), - #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] - Kind::H2 { content_length, .. } => opt_len!(content_length), - #[cfg(feature = "ffi")] - Kind::Ffi(..) => SizeHint::default(), - } - } -} - -impl fmt::Debug for Body { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - #[derive(Debug)] - struct Streaming; - #[derive(Debug)] - struct Empty; - #[derive(Debug)] - struct Full<'a>(&'a Bytes); - - let mut builder = f.debug_tuple("Body"); - match self.kind { - Kind::Once(None) => builder.field(&Empty), - Kind::Once(Some(ref chunk)) => builder.field(&Full(chunk)), - _ => builder.field(&Streaming), - }; - - builder.finish() - } -} - -/// # Optional -/// -/// This function requires enabling the `stream` feature in your -/// `Cargo.toml`. -#[cfg(feature = "stream")] -impl Stream for Body { - type Item = crate::Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - HttpBody::poll_data(self, cx) - } -} - -/// # Optional -/// -/// This function requires enabling the `stream` feature in your -/// `Cargo.toml`. -#[cfg(feature = "stream")] -impl From>> + Send>> for Body { - #[inline] - fn from( - stream: Box>> + Send>, - ) -> Body { - Body::new(Kind::Wrapped(SyncWrapper::new(stream.into()))) - } -} - -impl From for Body { - #[inline] - fn from(chunk: Bytes) -> Body { - if chunk.is_empty() { - Body::empty() - } else { - Body::new(Kind::Once(Some(chunk))) - } - } -} - -impl From> for Body { - #[inline] - fn from(vec: Vec) -> Body { - Body::from(Bytes::from(vec)) - } -} - -impl From<&'static [u8]> for Body { - #[inline] - fn from(slice: &'static [u8]) -> Body { - Body::from(Bytes::from(slice)) - } -} - -impl From> for Body { - #[inline] - fn from(cow: Cow<'static, [u8]>) -> Body { - match cow { - Cow::Borrowed(b) => Body::from(b), - Cow::Owned(o) => Body::from(o), - } - } -} - -impl From for Body { - #[inline] - fn from(s: String) -> Body { - Body::from(Bytes::from(s.into_bytes())) - } -} - -impl From<&'static str> for Body { - #[inline] - fn from(slice: &'static str) -> Body { - Body::from(Bytes::from(slice.as_bytes())) - } -} - -impl From> for Body { - #[inline] - fn from(cow: Cow<'static, str>) -> Body { - match cow { - Cow::Borrowed(b) => Body::from(b), - Cow::Owned(o) => Body::from(o), - } - } -} - -impl Sender { - /// Check to see if this `Sender` can send more data. - pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - // Check if the receiver end has tried polling for the body yet - ready!(self.poll_want(cx)?); - self.data_tx - .poll_ready(cx) - .map_err(|_| crate::Error::new_closed()) - } - - fn poll_want(&mut self, cx: &mut task::Context<'_>) -> Poll> { - match self.want_rx.load(cx) { - WANT_READY => Poll::Ready(Ok(())), - WANT_PENDING => Poll::Pending, - watch::CLOSED => Poll::Ready(Err(crate::Error::new_closed())), - unexpected => unreachable!("want_rx value: {}", unexpected), - } - } - - async fn ready(&mut self) -> crate::Result<()> { - futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await - } - - /// Send data on data channel when it is ready. - pub async fn send_data(&mut self, chunk: Bytes) -> crate::Result<()> { - self.ready().await?; - self.data_tx - .try_send(Ok(chunk)) - .map_err(|_| crate::Error::new_closed()) - } - - /// Send trailers on trailers channel. - pub async fn send_trailers(&mut self, trailers: HeaderMap) -> crate::Result<()> { - let tx = match self.trailers_tx.take() { - Some(tx) => tx, - None => return Err(crate::Error::new_closed()), - }; - tx.send(trailers).map_err(|_| crate::Error::new_closed()) - } - - /// Try to send data on this channel. - /// - /// # Errors - /// - /// Returns `Err(Bytes)` if the channel could not (currently) accept - /// another `Bytes`. - /// - /// # Note - /// - /// This is mostly useful for when trying to send from some other thread - /// that doesn't have an async context. If in an async context, prefer - /// `send_data()` instead. - pub fn try_send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> { - self.data_tx - .try_send(Ok(chunk)) - .map_err(|err| err.into_inner().expect("just sent Ok")) - } - - /// Aborts the body in an abnormal fashion. - pub fn abort(mut self) { - self.send_error(crate::Error::new_body_write_aborted()); - } - - pub(crate) fn send_error(&mut self, err: crate::Error) { - let _ = self - .data_tx - // clone so the send works even if buffer is full - .clone() - .try_send(Err(err)); - } -} - -impl fmt::Debug for Sender { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - #[derive(Debug)] - struct Open; - #[derive(Debug)] - struct Closed; - - let mut builder = f.debug_tuple("Sender"); - match self.want_rx.peek() { - watch::CLOSED => builder.field(&Closed), - _ => builder.field(&Open), - }; - - builder.finish() - } -} - -#[cfg(test)] -mod tests { - use std::mem; - use std::task::Poll; - - use super::{Body, DecodedLength, HttpBody, Sender, SizeHint}; - - #[test] - fn test_size_of() { - // These are mostly to help catch *accidentally* increasing - // the size by too much. - - let body_size = mem::size_of::(); - let body_expected_size = mem::size_of::() * 6; - assert!( - body_size <= body_expected_size, - "Body size = {} <= {}", - body_size, - body_expected_size, - ); - - assert_eq!(body_size, mem::size_of::>(), "Option"); - - assert_eq!( - mem::size_of::(), - mem::size_of::() * 5, - "Sender" - ); - - assert_eq!( - mem::size_of::(), - mem::size_of::>(), - "Option" - ); - } - - #[test] - fn size_hint() { - fn eq(body: Body, b: SizeHint, note: &str) { - let a = body.size_hint(); - assert_eq!(a.lower(), b.lower(), "lower for {:?}", note); - assert_eq!(a.upper(), b.upper(), "upper for {:?}", note); - } - - eq(Body::from("Hello"), SizeHint::with_exact(5), "from str"); - - eq(Body::empty(), SizeHint::with_exact(0), "empty"); - - eq(Body::channel().1, SizeHint::new(), "channel"); - - eq( - Body::new_channel(DecodedLength::new(4), /*wanter =*/ false).1, - SizeHint::with_exact(4), - "channel with length", - ); - } - - #[tokio::test] - async fn channel_abort() { - let (tx, mut rx) = Body::channel(); - - tx.abort(); - - let err = rx.data().await.unwrap().unwrap_err(); - assert!(err.is_body_write_aborted(), "{:?}", err); - } - - #[tokio::test] - async fn channel_abort_when_buffer_is_full() { - let (mut tx, mut rx) = Body::channel(); - - tx.try_send_data("chunk 1".into()).expect("send 1"); - // buffer is full, but can still send abort - tx.abort(); - - let chunk1 = rx.data().await.expect("item 1").expect("chunk 1"); - assert_eq!(chunk1, "chunk 1"); - - let err = rx.data().await.unwrap().unwrap_err(); - assert!(err.is_body_write_aborted(), "{:?}", err); - } - - #[test] - fn channel_buffers_one() { - let (mut tx, _rx) = Body::channel(); - - tx.try_send_data("chunk 1".into()).expect("send 1"); - - // buffer is now full - let chunk2 = tx.try_send_data("chunk 2".into()).expect_err("send 2"); - assert_eq!(chunk2, "chunk 2"); - } - - #[tokio::test] - async fn channel_empty() { - let (_, mut rx) = Body::channel(); - - assert!(rx.data().await.is_none()); - } - - #[test] - fn channel_ready() { - let (mut tx, _rx) = Body::new_channel(DecodedLength::CHUNKED, /*wanter = */ false); - - let mut tx_ready = tokio_test::task::spawn(tx.ready()); - - assert!(tx_ready.poll().is_ready(), "tx is ready immediately"); - } - - #[test] - fn channel_wanter() { - let (mut tx, mut rx) = Body::new_channel(DecodedLength::CHUNKED, /*wanter = */ true); - - let mut tx_ready = tokio_test::task::spawn(tx.ready()); - let mut rx_data = tokio_test::task::spawn(rx.data()); - - assert!( - tx_ready.poll().is_pending(), - "tx isn't ready before rx has been polled" - ); - - assert!(rx_data.poll().is_pending(), "poll rx.data"); - assert!(tx_ready.is_woken(), "rx poll wakes tx"); - - assert!( - tx_ready.poll().is_ready(), - "tx is ready after rx has been polled" - ); - } - - #[test] - fn channel_notices_closure() { - let (mut tx, rx) = Body::new_channel(DecodedLength::CHUNKED, /*wanter = */ true); - - let mut tx_ready = tokio_test::task::spawn(tx.ready()); - - assert!( - tx_ready.poll().is_pending(), - "tx isn't ready before rx has been polled" - ); - - drop(rx); - assert!(tx_ready.is_woken(), "dropping rx wakes tx"); - - match tx_ready.poll() { - Poll::Ready(Err(ref e)) if e.is_closed() => (), - unexpected => panic!("tx poll ready unexpected: {:?}", unexpected), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/body/length.rs s390-tools-2.33.1/rust-vendor/hyper/src/body/length.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/body/length.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/body/length.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,123 +0,0 @@ -use std::fmt; - -#[derive(Clone, Copy, PartialEq, Eq)] -pub(crate) struct DecodedLength(u64); - -#[cfg(any(feature = "http1", feature = "http2"))] -impl From> for DecodedLength { - fn from(len: Option) -> Self { - len.and_then(|len| { - // If the length is u64::MAX, oh well, just reported chunked. - Self::checked_new(len).ok() - }) - .unwrap_or(DecodedLength::CHUNKED) - } -} - -#[cfg(any(feature = "http1", feature = "http2", test))] -const MAX_LEN: u64 = std::u64::MAX - 2; - -impl DecodedLength { - pub(crate) const CLOSE_DELIMITED: DecodedLength = DecodedLength(::std::u64::MAX); - pub(crate) const CHUNKED: DecodedLength = DecodedLength(::std::u64::MAX - 1); - pub(crate) const ZERO: DecodedLength = DecodedLength(0); - - #[cfg(test)] - pub(crate) fn new(len: u64) -> Self { - debug_assert!(len <= MAX_LEN); - DecodedLength(len) - } - - /// Takes the length as a content-length without other checks. - /// - /// Should only be called if previously confirmed this isn't - /// CLOSE_DELIMITED or CHUNKED. - #[inline] - #[cfg(feature = "http1")] - pub(crate) fn danger_len(self) -> u64 { - debug_assert!(self.0 < Self::CHUNKED.0); - self.0 - } - - /// Converts to an Option representing a Known or Unknown length. - pub(crate) fn into_opt(self) -> Option { - match self { - DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => None, - DecodedLength(known) => Some(known), - } - } - - /// Checks the `u64` is within the maximum allowed for content-length. - #[cfg(any(feature = "http1", feature = "http2"))] - pub(crate) fn checked_new(len: u64) -> Result { - use tracing::warn; - - if len <= MAX_LEN { - Ok(DecodedLength(len)) - } else { - warn!("content-length bigger than maximum: {} > {}", len, MAX_LEN); - Err(crate::error::Parse::TooLarge) - } - } - - pub(crate) fn sub_if(&mut self, amt: u64) { - match *self { - DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => (), - DecodedLength(ref mut known) => { - *known -= amt; - } - } - } - - /// Returns whether this represents an exact length. - /// - /// This includes 0, which of course is an exact known length. - /// - /// It would return false if "chunked" or otherwise size-unknown. - #[cfg(feature = "http2")] - pub(crate) fn is_exact(&self) -> bool { - self.0 <= MAX_LEN - } -} - -impl fmt::Debug for DecodedLength { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - DecodedLength::CLOSE_DELIMITED => f.write_str("CLOSE_DELIMITED"), - DecodedLength::CHUNKED => f.write_str("CHUNKED"), - DecodedLength(n) => f.debug_tuple("DecodedLength").field(&n).finish(), - } - } -} - -impl fmt::Display for DecodedLength { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - DecodedLength::CLOSE_DELIMITED => f.write_str("close-delimited"), - DecodedLength::CHUNKED => f.write_str("chunked encoding"), - DecodedLength::ZERO => f.write_str("empty"), - DecodedLength(n) => write!(f, "content-length ({} bytes)", n), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn sub_if_known() { - let mut len = DecodedLength::new(30); - len.sub_if(20); - - assert_eq!(len.0, 10); - } - - #[test] - fn sub_if_chunked() { - let mut len = DecodedLength::CHUNKED; - len.sub_if(20); - - assert_eq!(len, DecodedLength::CHUNKED); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/body/mod.rs s390-tools-2.33.1/rust-vendor/hyper/src/body/mod.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/body/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/body/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,65 +0,0 @@ -//! Streaming bodies for Requests and Responses -//! -//! For both [Clients](crate::client) and [Servers](crate::server), requests and -//! responses use streaming bodies, instead of complete buffering. This -//! allows applications to not use memory they don't need, and allows exerting -//! back-pressure on connections by only reading when asked. -//! -//! There are two pieces to this in hyper: -//! -//! - **The [`HttpBody`](HttpBody) trait** describes all possible bodies. -//! hyper allows any body type that implements `HttpBody`, allowing -//! applications to have fine-grained control over their streaming. -//! - **The [`Body`](Body) concrete type**, which is an implementation of -//! `HttpBody`, and returned by hyper as a "receive stream" (so, for server -//! requests and client responses). It is also a decent default implementation -//! if you don't have very custom needs of your send streams. - -pub use bytes::{Buf, Bytes}; -pub use http_body::Body as HttpBody; -pub use http_body::SizeHint; - -pub use self::aggregate::aggregate; -pub use self::body::{Body, Sender}; -pub(crate) use self::length::DecodedLength; -pub use self::to_bytes::to_bytes; - -mod aggregate; -mod body; -mod length; -mod to_bytes; - -/// An optimization to try to take a full body if immediately available. -/// -/// This is currently limited to *only* `hyper::Body`s. -#[cfg(feature = "http1")] -pub(crate) fn take_full_data(body: &mut T) -> Option { - use std::any::{Any, TypeId}; - - // This static type check can be optimized at compile-time. - if TypeId::of::() == TypeId::of::() { - let mut full = (body as &mut dyn Any) - .downcast_mut::() - .expect("must be Body") - .take_full_data(); - // This second cast is required to make the type system happy. - // Without it, the compiler cannot reason that the type is actually - // `T::Data`. Oh wells. - // - // It's still a measurable win! - (&mut full as &mut dyn Any) - .downcast_mut::>() - .expect("must be T::Data") - .take() - } else { - None - } -} - -fn _assert_send_sync() { - fn _assert_send() {} - fn _assert_sync() {} - - _assert_send::(); - _assert_sync::(); -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/body/to_bytes.rs s390-tools-2.33.1/rust-vendor/hyper/src/body/to_bytes.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/body/to_bytes.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/body/to_bytes.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,82 +0,0 @@ -use bytes::{Buf, BufMut, Bytes}; - -use super::HttpBody; - -/// Concatenate the buffers from a body into a single `Bytes` asynchronously. -/// -/// This may require copying the data into a single buffer. If you don't need -/// a contiguous buffer, prefer the [`aggregate`](crate::body::aggregate()) -/// function. -/// -/// # Note -/// -/// Care needs to be taken if the remote is untrusted. The function doesn't implement any length -/// checks and an malicious peer might make it consume arbitrary amounts of memory. Checking the -/// `Content-Length` is a possibility, but it is not strictly mandated to be present. -/// -/// # Example -/// -/// ``` -/// # #[cfg(all(feature = "client", feature = "tcp", any(feature = "http1", feature = "http2")))] -/// # async fn doc() -> hyper::Result<()> { -/// use hyper::{body::HttpBody}; -/// -/// # let request = hyper::Request::builder() -/// # .method(hyper::Method::POST) -/// # .uri("http://httpbin.org/post") -/// # .header("content-type", "application/json") -/// # .body(hyper::Body::from(r#"{"library":"hyper"}"#)).unwrap(); -/// # let client = hyper::Client::new(); -/// let response = client.request(request).await?; -/// -/// const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024; -/// -/// let response_content_length = match response.body().size_hint().upper() { -/// Some(v) => v, -/// None => MAX_ALLOWED_RESPONSE_SIZE + 1 // Just to protect ourselves from a malicious response -/// }; -/// -/// if response_content_length < MAX_ALLOWED_RESPONSE_SIZE { -/// let body_bytes = hyper::body::to_bytes(response.into_body()).await?; -/// println!("body: {:?}", body_bytes); -/// } -/// -/// # Ok(()) -/// # } -/// ``` -pub async fn to_bytes(body: T) -> Result -where - T: HttpBody, -{ - futures_util::pin_mut!(body); - - // If there's only 1 chunk, we can just return Buf::to_bytes() - let mut first = if let Some(buf) = body.data().await { - buf? - } else { - return Ok(Bytes::new()); - }; - - let second = if let Some(buf) = body.data().await { - buf? - } else { - return Ok(first.copy_to_bytes(first.remaining())); - }; - - // Don't pre-emptively reserve *too* much. - let rest = (body.size_hint().lower() as usize).min(1024 * 16); - let cap = first - .remaining() - .saturating_add(second.remaining()) - .saturating_add(rest); - // With more than 1 buf, we gotta flatten into a Vec first. - let mut vec = Vec::with_capacity(cap); - vec.put(first); - vec.put(second); - - while let Some(buf) = body.data().await { - vec.put(buf?); - } - - Ok(vec.into()) -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/cfg.rs s390-tools-2.33.1/rust-vendor/hyper/src/cfg.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/cfg.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/cfg.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,44 +0,0 @@ -macro_rules! cfg_feature { - ( - #![$meta:meta] - $($item:item)* - ) => { - $( - #[cfg($meta)] - #[cfg_attr(docsrs, doc(cfg($meta)))] - $item - )* - } -} - -macro_rules! cfg_proto { - ($($item:item)*) => { - cfg_feature! { - #![all( - any(feature = "http1", feature = "http2"), - any(feature = "client", feature = "server"), - )] - $($item)* - } - } -} - -cfg_proto! { - macro_rules! cfg_client { - ($($item:item)*) => { - cfg_feature! { - #![feature = "client"] - $($item)* - } - } - } - - macro_rules! cfg_server { - ($($item:item)*) => { - cfg_feature! { - #![feature = "server"] - $($item)* - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/client.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/client.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/client.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/client.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1515 +0,0 @@ -use std::error::Error as StdError; -use std::fmt; -use std::mem; -use std::time::Duration; - -use futures_channel::oneshot; -use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; -use http::header::{HeaderValue, HOST}; -use http::uri::{Port, Scheme}; -use http::{Method, Request, Response, Uri, Version}; -use tracing::{debug, trace, warn}; - -use crate::body::{Body, HttpBody}; -use crate::client::connect::CaptureConnectionExtension; -use crate::common::{ - exec::BoxSendFuture, lazy as hyper_lazy, sync_wrapper::SyncWrapper, task, Future, Lazy, Pin, - Poll, -}; -use crate::rt::Executor; - -use super::conn; -use super::connect::{self, sealed::Connect, Alpn, Connected, Connection}; -use super::pool::{ - self, CheckoutIsClosedError, Key as PoolKey, Pool, Poolable, Pooled, Reservation, -}; -#[cfg(feature = "tcp")] -use super::HttpConnector; - -/// A Client to make outgoing HTTP requests. -/// -/// `Client` is cheap to clone and cloning is the recommended way to share a `Client`. The -/// underlying connection pool will be reused. -#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] -pub struct Client { - config: Config, - #[cfg_attr(feature = "deprecated", allow(deprecated))] - conn_builder: conn::Builder, - connector: C, - pool: Pool>, -} - -#[derive(Clone, Copy, Debug)] -struct Config { - retry_canceled_requests: bool, - set_host: bool, - ver: Ver, -} - -/// A `Future` that will resolve to an HTTP Response. -/// -/// This is returned by `Client::request` (and `Client::get`). -#[must_use = "futures do nothing unless polled"] -pub struct ResponseFuture { - inner: SyncWrapper>> + Send>>>, -} - -// ===== impl Client ===== - -#[cfg(feature = "tcp")] -impl Client { - /// Create a new Client with the default [config](Builder). - /// - /// # Note - /// - /// The default connector does **not** handle TLS. Speaking to `https` - /// destinations will require [configuring a connector that implements - /// TLS](https://hyper.rs/guides/client/configuration). - #[cfg_attr(docsrs, doc(cfg(feature = "tcp")))] - #[inline] - pub fn new() -> Client { - Builder::default().build_http() - } -} - -#[cfg(feature = "tcp")] -impl Default for Client { - fn default() -> Client { - Client::new() - } -} - -impl Client<(), Body> { - /// Create a builder to configure a new `Client`. - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "runtime")] - /// # fn run () { - /// use std::time::Duration; - /// use hyper::Client; - /// - /// let client = Client::builder() - /// .pool_idle_timeout(Duration::from_secs(30)) - /// .http2_only(true) - /// .build_http(); - /// # let infer: Client<_, hyper::Body> = client; - /// # drop(infer); - /// # } - /// # fn main() {} - /// ``` - #[inline] - pub fn builder() -> Builder { - Builder::default() - } -} - -impl Client -where - C: Connect + Clone + Send + Sync + 'static, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - /// Send a `GET` request to the supplied `Uri`. - /// - /// # Note - /// - /// This requires that the `HttpBody` type have a `Default` implementation. - /// It *should* return an "empty" version of itself, such that - /// `HttpBody::is_end_stream` is `true`. - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "runtime")] - /// # fn run () { - /// use hyper::{Client, Uri}; - /// - /// let client = Client::new(); - /// - /// let future = client.get(Uri::from_static("http://httpbin.org/ip")); - /// # } - /// # fn main() {} - /// ``` - pub fn get(&self, uri: Uri) -> ResponseFuture - where - B: Default, - { - let body = B::default(); - if !body.is_end_stream() { - warn!("default HttpBody used for get() does not return true for is_end_stream"); - } - - let mut req = Request::new(body); - *req.uri_mut() = uri; - self.request(req) - } - - /// Send a constructed `Request` using this `Client`. - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "runtime")] - /// # fn run () { - /// use hyper::{Body, Method, Client, Request}; - /// - /// let client = Client::new(); - /// - /// let req = Request::builder() - /// .method(Method::POST) - /// .uri("http://httpbin.org/post") - /// .body(Body::from("Hallo!")) - /// .expect("request builder"); - /// - /// let future = client.request(req); - /// # } - /// # fn main() {} - /// ``` - pub fn request(&self, mut req: Request) -> ResponseFuture { - let is_http_connect = req.method() == Method::CONNECT; - match req.version() { - Version::HTTP_11 => (), - Version::HTTP_10 => { - if is_http_connect { - warn!("CONNECT is not allowed for HTTP/1.0"); - return ResponseFuture::new(future::err( - crate::Error::new_user_unsupported_request_method(), - )); - } - } - Version::HTTP_2 => (), - // completely unsupported HTTP version (like HTTP/0.9)! - other => return ResponseFuture::error_version(other), - }; - - let pool_key = match extract_domain(req.uri_mut(), is_http_connect) { - Ok(s) => s, - Err(err) => { - return ResponseFuture::new(future::err(err)); - } - }; - - ResponseFuture::new(self.clone().retryably_send_request(req, pool_key)) - } - - async fn retryably_send_request( - self, - mut req: Request, - pool_key: PoolKey, - ) -> crate::Result> { - let uri = req.uri().clone(); - - loop { - req = match self.send_request(req, pool_key.clone()).await { - Ok(resp) => return Ok(resp), - Err(ClientError::Normal(err)) => return Err(err), - Err(ClientError::Canceled { - connection_reused, - mut req, - reason, - }) => { - if !self.config.retry_canceled_requests || !connection_reused { - // if client disabled, don't retry - // a fresh connection means we definitely can't retry - return Err(reason); - } - - trace!( - "unstarted request canceled, trying again (reason={:?})", - reason - ); - *req.uri_mut() = uri.clone(); - req - } - } - } - } - - async fn send_request( - &self, - mut req: Request, - pool_key: PoolKey, - ) -> Result, ClientError> { - let mut pooled = match self.connection_for(pool_key).await { - Ok(pooled) => pooled, - Err(ClientConnectError::Normal(err)) => return Err(ClientError::Normal(err)), - Err(ClientConnectError::H2CheckoutIsClosed(reason)) => { - return Err(ClientError::Canceled { - connection_reused: true, - req, - reason, - }) - } - }; - req.extensions_mut() - .get_mut::() - .map(|conn| conn.set(&pooled.conn_info)); - if pooled.is_http1() { - if req.version() == Version::HTTP_2 { - warn!("Connection is HTTP/1, but request requires HTTP/2"); - return Err(ClientError::Normal( - crate::Error::new_user_unsupported_version().with_client_connect_info(pooled.conn_info.clone()), - )); - } - - if self.config.set_host { - let uri = req.uri().clone(); - req.headers_mut().entry(HOST).or_insert_with(|| { - let hostname = uri.host().expect("authority implies host"); - if let Some(port) = get_non_default_port(&uri) { - let s = format!("{}:{}", hostname, port); - HeaderValue::from_str(&s) - } else { - HeaderValue::from_str(hostname) - } - .expect("uri host is valid header value") - }); - } - - // CONNECT always sends authority-form, so check it first... - if req.method() == Method::CONNECT { - authority_form(req.uri_mut()); - } else if pooled.conn_info.is_proxied { - absolute_form(req.uri_mut()); - } else { - origin_form(req.uri_mut()); - } - } else if req.method() == Method::CONNECT { - authority_form(req.uri_mut()); - } - - let mut res = match pooled.send_request_retryable(req).await { - Err((err, orig_req)) => { - return Err(ClientError::map_with_reused(pooled.is_reused())(( - err.with_client_connect_info(pooled.conn_info.clone()), - orig_req, - ))); - } - Ok(res) => res, - }; - - // If the Connector included 'extra' info, add to Response... - if let Some(extra) = &pooled.conn_info.extra { - extra.set(res.extensions_mut()); - } - - // As of futures@0.1.21, there is a race condition in the mpsc - // channel, such that sending when the receiver is closing can - // result in the message being stuck inside the queue. It won't - // ever notify until the Sender side is dropped. - // - // To counteract this, we must check if our senders 'want' channel - // has been closed after having tried to send. If so, error out... - if pooled.is_closed() { - return Ok(res); - } - - // If pooled is HTTP/2, we can toss this reference immediately. - // - // when pooled is dropped, it will try to insert back into the - // pool. To delay that, spawn a future that completes once the - // sender is ready again. - // - // This *should* only be once the related `Connection` has polled - // for a new request to start. - // - // It won't be ready if there is a body to stream. - if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { - drop(pooled); - } else if !res.body().is_end_stream() { - let (delayed_tx, delayed_rx) = oneshot::channel(); - res.body_mut().delayed_eof(delayed_rx); - let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| { - // At this point, `pooled` is dropped, and had a chance - // to insert into the pool (if conn was idle) - drop(delayed_tx); - }); - - #[cfg_attr(feature = "deprecated", allow(deprecated))] - self.conn_builder.exec.execute(on_idle); - } else { - // There's no body to delay, but the connection isn't - // ready yet. Only re-insert when it's ready - let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); - - #[cfg_attr(feature = "deprecated", allow(deprecated))] - self.conn_builder.exec.execute(on_idle); - } - - Ok(res) - } - - async fn connection_for( - &self, - pool_key: PoolKey, - ) -> Result>, ClientConnectError> { - // This actually races 2 different futures to try to get a ready - // connection the fastest, and to reduce connection churn. - // - // - If the pool has an idle connection waiting, that's used - // immediately. - // - Otherwise, the Connector is asked to start connecting to - // the destination Uri. - // - Meanwhile, the pool Checkout is watching to see if any other - // request finishes and tries to insert an idle connection. - // - If a new connection is started, but the Checkout wins after - // (an idle connection became available first), the started - // connection future is spawned into the runtime to complete, - // and then be inserted into the pool as an idle connection. - let checkout = self.pool.checkout(pool_key.clone()); - let connect = self.connect_to(pool_key); - let is_ver_h2 = self.config.ver == Ver::Http2; - - // The order of the `select` is depended on below... - - match future::select(checkout, connect).await { - // Checkout won, connect future may have been started or not. - // - // If it has, let it finish and insert back into the pool, - // so as to not waste the socket... - Either::Left((Ok(checked_out), connecting)) => { - // This depends on the `select` above having the correct - // order, such that if the checkout future were ready - // immediately, the connect future will never have been - // started. - // - // If it *wasn't* ready yet, then the connect future will - // have been started... - if connecting.started() { - let bg = connecting - .map_err(|err| { - trace!("background connect error: {}", err); - }) - .map(|_pooled| { - // dropping here should just place it in - // the Pool for us... - }); - // An execute error here isn't important, we're just trying - // to prevent a waste of a socket... - #[cfg_attr(feature = "deprecated", allow(deprecated))] - self.conn_builder.exec.execute(bg); - } - Ok(checked_out) - } - // Connect won, checkout can just be dropped. - Either::Right((Ok(connected), _checkout)) => Ok(connected), - // Either checkout or connect could get canceled: - // - // 1. Connect is canceled if this is HTTP/2 and there is - // an outstanding HTTP/2 connecting task. - // 2. Checkout is canceled if the pool cannot deliver an - // idle connection reliably. - // - // In both cases, we should just wait for the other future. - Either::Left((Err(err), connecting)) => { - if err.is_canceled() { - connecting.await.map_err(ClientConnectError::Normal) - } else { - Err(ClientConnectError::Normal(err)) - } - } - Either::Right((Err(err), checkout)) => { - if err.is_canceled() { - checkout.await.map_err(move |err| { - if is_ver_h2 - && err.is_canceled() - && err.find_source::().is_some() - { - ClientConnectError::H2CheckoutIsClosed(err) - } else { - ClientConnectError::Normal(err) - } - }) - } else { - Err(ClientConnectError::Normal(err)) - } - } - } - } - - fn connect_to( - &self, - pool_key: PoolKey, - ) -> impl Lazy>>> + Unpin { - #[cfg_attr(feature = "deprecated", allow(deprecated))] - let executor = self.conn_builder.exec.clone(); - let pool = self.pool.clone(); - #[cfg(not(feature = "http2"))] - let conn_builder = self.conn_builder.clone(); - #[cfg(feature = "http2")] - let mut conn_builder = self.conn_builder.clone(); - let ver = self.config.ver; - let is_ver_h2 = ver == Ver::Http2; - let connector = self.connector.clone(); - let dst = domain_as_uri(pool_key.clone()); - hyper_lazy(move || { - // Try to take a "connecting lock". - // - // If the pool_key is for HTTP/2, and there is already a - // connection being established, then this can't take a - // second lock. The "connect_to" future is Canceled. - let connecting = match pool.connecting(&pool_key, ver) { - Some(lock) => lock, - None => { - let canceled = - crate::Error::new_canceled().with("HTTP/2 connection in progress"); - return Either::Right(future::err(canceled)); - } - }; - Either::Left( - connector - .connect(connect::sealed::Internal, dst) - .map_err(crate::Error::new_connect) - .and_then(move |io| { - let connected = io.connected(); - // If ALPN is h2 and we aren't http2_only already, - // then we need to convert our pool checkout into - // a single HTTP2 one. - let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 { - match connecting.alpn_h2(&pool) { - Some(lock) => { - trace!("ALPN negotiated h2, updating pool"); - lock - } - None => { - // Another connection has already upgraded, - // the pool checkout should finish up for us. - let canceled = crate::Error::new_canceled() - .with("ALPN upgraded to HTTP/2"); - return Either::Right(future::err(canceled)); - } - } - } else { - connecting - }; - - #[cfg_attr(not(feature = "http2"), allow(unused))] - let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2; - #[cfg(feature = "http2")] - { - conn_builder.http2_only(is_h2); - } - - Either::Left(Box::pin(async move { - let (tx, conn) = conn_builder.handshake(io).await?; - - trace!("handshake complete, spawning background dispatcher task"); - executor.execute( - conn.map_err(|e| debug!("client connection error: {}", e)) - .map(|_| ()), - ); - - // Wait for 'conn' to ready up before we - // declare this tx as usable - let tx = tx.when_ready().await?; - - let tx = { - #[cfg(feature = "http2")] - { - if is_h2 { - PoolTx::Http2(tx.into_http2()) - } else { - PoolTx::Http1(tx) - } - } - #[cfg(not(feature = "http2"))] - PoolTx::Http1(tx) - }; - - Ok(pool.pooled( - connecting, - PoolClient { - conn_info: connected, - tx, - }, - )) - })) - }), - ) - }) - } -} - -impl tower_service::Service> for Client -where - C: Connect + Clone + Send + Sync + 'static, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - type Response = Response; - type Error = crate::Error; - type Future = ResponseFuture; - - fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, req: Request) -> Self::Future { - self.request(req) - } -} - -impl tower_service::Service> for &'_ Client -where - C: Connect + Clone + Send + Sync + 'static, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - type Response = Response; - type Error = crate::Error; - type Future = ResponseFuture; - - fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, req: Request) -> Self::Future { - self.request(req) - } -} - -impl Clone for Client { - fn clone(&self) -> Client { - Client { - config: self.config.clone(), - conn_builder: self.conn_builder.clone(), - connector: self.connector.clone(), - pool: self.pool.clone(), - } - } -} - -impl fmt::Debug for Client { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Client").finish() - } -} - -// ===== impl ResponseFuture ===== - -impl ResponseFuture { - fn new(value: F) -> Self - where - F: Future>> + Send + 'static, - { - Self { - inner: SyncWrapper::new(Box::pin(value)) - } - } - - fn error_version(ver: Version) -> Self { - warn!("Request has unsupported version \"{:?}\"", ver); - ResponseFuture::new(Box::pin(future::err( - crate::Error::new_user_unsupported_version(), - ))) - } -} - -impl fmt::Debug for ResponseFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Future") - } -} - -impl Future for ResponseFuture { - type Output = crate::Result>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - self.inner.get_mut().as_mut().poll(cx) - } -} - -// ===== impl PoolClient ===== - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -struct PoolClient { - conn_info: Connected, - tx: PoolTx, -} - -enum PoolTx { - #[cfg_attr(feature = "deprecated", allow(deprecated))] - Http1(conn::SendRequest), - #[cfg(feature = "http2")] - Http2(conn::Http2SendRequest), -} - -impl PoolClient { - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - match self.tx { - PoolTx::Http1(ref mut tx) => tx.poll_ready(cx), - #[cfg(feature = "http2")] - PoolTx::Http2(_) => Poll::Ready(Ok(())), - } - } - - fn is_http1(&self) -> bool { - !self.is_http2() - } - - fn is_http2(&self) -> bool { - match self.tx { - PoolTx::Http1(_) => false, - #[cfg(feature = "http2")] - PoolTx::Http2(_) => true, - } - } - - fn is_ready(&self) -> bool { - match self.tx { - PoolTx::Http1(ref tx) => tx.is_ready(), - #[cfg(feature = "http2")] - PoolTx::Http2(ref tx) => tx.is_ready(), - } - } - - fn is_closed(&self) -> bool { - match self.tx { - PoolTx::Http1(ref tx) => tx.is_closed(), - #[cfg(feature = "http2")] - PoolTx::Http2(ref tx) => tx.is_closed(), - } - } -} - -impl PoolClient { - fn send_request_retryable( - &mut self, - req: Request, - ) -> impl Future, (crate::Error, Option>)>> - where - B: Send, - { - match self.tx { - #[cfg(not(feature = "http2"))] - PoolTx::Http1(ref mut tx) => tx.send_request_retryable(req), - #[cfg(feature = "http2")] - PoolTx::Http1(ref mut tx) => Either::Left(tx.send_request_retryable(req)), - #[cfg(feature = "http2")] - PoolTx::Http2(ref mut tx) => Either::Right(tx.send_request_retryable(req)), - } - } -} - -impl Poolable for PoolClient -where - B: Send + 'static, -{ - fn is_open(&self) -> bool { - if self.conn_info.poisoned.poisoned() { - trace!("marking {:?} as closed because it was poisoned", self.conn_info); - return false; - } - match self.tx { - PoolTx::Http1(ref tx) => tx.is_ready(), - #[cfg(feature = "http2")] - PoolTx::Http2(ref tx) => tx.is_ready(), - } - } - - fn reserve(self) -> Reservation { - match self.tx { - PoolTx::Http1(tx) => Reservation::Unique(PoolClient { - conn_info: self.conn_info, - tx: PoolTx::Http1(tx), - }), - #[cfg(feature = "http2")] - PoolTx::Http2(tx) => { - let b = PoolClient { - conn_info: self.conn_info.clone(), - tx: PoolTx::Http2(tx.clone()), - }; - let a = PoolClient { - conn_info: self.conn_info, - tx: PoolTx::Http2(tx), - }; - Reservation::Shared(a, b) - } - } - } - - fn can_share(&self) -> bool { - self.is_http2() - } -} - -// ===== impl ClientError ===== - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -enum ClientError { - Normal(crate::Error), - Canceled { - connection_reused: bool, - req: Request, - reason: crate::Error, - }, -} - -impl ClientError { - fn map_with_reused(conn_reused: bool) -> impl Fn((crate::Error, Option>)) -> Self { - move |(err, orig_req)| { - if let Some(req) = orig_req { - ClientError::Canceled { - connection_reused: conn_reused, - reason: err, - req, - } - } else { - ClientError::Normal(err) - } - } - } -} - -enum ClientConnectError { - Normal(crate::Error), - H2CheckoutIsClosed(crate::Error), -} - -/// A marker to identify what version a pooled connection is. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub(super) enum Ver { - Auto, - Http2, -} - -fn origin_form(uri: &mut Uri) { - let path = match uri.path_and_query() { - Some(path) if path.as_str() != "/" => { - let mut parts = ::http::uri::Parts::default(); - parts.path_and_query = Some(path.clone()); - Uri::from_parts(parts).expect("path is valid uri") - } - _none_or_just_slash => { - debug_assert!(Uri::default() == "/"); - Uri::default() - } - }; - *uri = path -} - -fn absolute_form(uri: &mut Uri) { - debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme"); - debug_assert!( - uri.authority().is_some(), - "absolute_form needs an authority" - ); - // If the URI is to HTTPS, and the connector claimed to be a proxy, - // then it *should* have tunneled, and so we don't want to send - // absolute-form in that case. - if uri.scheme() == Some(&Scheme::HTTPS) { - origin_form(uri); - } -} - -fn authority_form(uri: &mut Uri) { - if let Some(path) = uri.path_and_query() { - // `https://hyper.rs` would parse with `/` path, don't - // annoy people about that... - if path != "/" { - warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path); - } - } - *uri = match uri.authority() { - Some(auth) => { - let mut parts = ::http::uri::Parts::default(); - parts.authority = Some(auth.clone()); - Uri::from_parts(parts).expect("authority is valid") - } - None => { - unreachable!("authority_form with relative uri"); - } - }; -} - -fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result { - let uri_clone = uri.clone(); - match (uri_clone.scheme(), uri_clone.authority()) { - (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())), - (None, Some(auth)) if is_http_connect => { - let scheme = match auth.port_u16() { - Some(443) => { - set_scheme(uri, Scheme::HTTPS); - Scheme::HTTPS - } - _ => { - set_scheme(uri, Scheme::HTTP); - Scheme::HTTP - } - }; - Ok((scheme, auth.clone())) - } - _ => { - debug!("Client requires absolute-form URIs, received: {:?}", uri); - Err(crate::Error::new_user_absolute_uri_required()) - } - } -} - -fn domain_as_uri((scheme, auth): PoolKey) -> Uri { - http::uri::Builder::new() - .scheme(scheme) - .authority(auth) - .path_and_query("/") - .build() - .expect("domain is valid Uri") -} - -fn set_scheme(uri: &mut Uri, scheme: Scheme) { - debug_assert!( - uri.scheme().is_none(), - "set_scheme expects no existing scheme" - ); - let old = mem::replace(uri, Uri::default()); - let mut parts: ::http::uri::Parts = old.into(); - parts.scheme = Some(scheme); - parts.path_and_query = Some("/".parse().expect("slash is a valid path")); - *uri = Uri::from_parts(parts).expect("scheme is valid"); -} - -fn get_non_default_port(uri: &Uri) -> Option> { - match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) { - (Some(443), true) => None, - (Some(80), false) => None, - _ => uri.port(), - } -} - -fn is_schema_secure(uri: &Uri) -> bool { - uri.scheme_str() - .map(|scheme_str| matches!(scheme_str, "wss" | "https")) - .unwrap_or_default() -} - -/// A builder to configure a new [`Client`](Client). -/// -/// # Example -/// -/// ``` -/// # #[cfg(feature = "runtime")] -/// # fn run () { -/// use std::time::Duration; -/// use hyper::Client; -/// -/// let client = Client::builder() -/// .pool_idle_timeout(Duration::from_secs(30)) -/// .http2_only(true) -/// .build_http(); -/// # let infer: Client<_, hyper::Body> = client; -/// # drop(infer); -/// # } -/// # fn main() {} -/// ``` -#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] -#[derive(Clone)] -pub struct Builder { - client_config: Config, - #[cfg_attr(feature = "deprecated", allow(deprecated))] - conn_builder: conn::Builder, - pool_config: pool::Config, -} - -impl Default for Builder { - fn default() -> Self { - Self { - client_config: Config { - retry_canceled_requests: true, - set_host: true, - ver: Ver::Auto, - }, - #[cfg_attr(feature = "deprecated", allow(deprecated))] - conn_builder: conn::Builder::new(), - pool_config: pool::Config { - idle_timeout: Some(Duration::from_secs(90)), - max_idle_per_host: std::usize::MAX, - }, - } - } -} - -impl Builder { - #[doc(hidden)] - #[deprecated( - note = "name is confusing, to disable the connection pool, call pool_max_idle_per_host(0)" - )] - pub fn keep_alive(&mut self, val: bool) -> &mut Self { - if !val { - // disable - self.pool_max_idle_per_host(0) - } else if self.pool_config.max_idle_per_host == 0 { - // enable - self.pool_max_idle_per_host(std::usize::MAX) - } else { - // already enabled - self - } - } - - #[doc(hidden)] - #[deprecated(note = "renamed to `pool_idle_timeout`")] - pub fn keep_alive_timeout(&mut self, val: D) -> &mut Self - where - D: Into>, - { - self.pool_idle_timeout(val) - } - - /// Set an optional timeout for idle sockets being kept-alive. - /// - /// Pass `None` to disable timeout. - /// - /// Default is 90 seconds. - pub fn pool_idle_timeout(&mut self, val: D) -> &mut Self - where - D: Into>, - { - self.pool_config.idle_timeout = val.into(); - self - } - - #[doc(hidden)] - #[deprecated(note = "renamed to `pool_max_idle_per_host`")] - pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { - self.pool_config.max_idle_per_host = max_idle; - self - } - - /// Sets the maximum idle connection per host allowed in the pool. - /// - /// Default is `usize::MAX` (no limit). - pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { - self.pool_config.max_idle_per_host = max_idle; - self - } - - // HTTP/1 options - - /// Sets the exact size of the read buffer to *always* use. - /// - /// Note that setting this option unsets the `http1_max_buf_size` option. - /// - /// Default is an adaptive read buffer. - pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self { - self.conn_builder.http1_read_buf_exact_size(Some(sz)); - self - } - - /// Set the maximum buffer size for the connection. - /// - /// Default is ~400kb. - /// - /// Note that setting this option unsets the `http1_read_exact_buf_size` option. - /// - /// # Panics - /// - /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { - self.conn_builder.http1_max_buf_size(max); - self - } - - /// Set whether HTTP/1 connections will accept spaces between header names - /// and the colon that follow them in responses. - /// - /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when - /// parsing. - /// - /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has - /// to say about it: - /// - /// > No whitespace is allowed between the header field-name and colon. In - /// > the past, differences in the handling of such whitespace have led to - /// > security vulnerabilities in request routing and response handling. A - /// > server MUST reject any received request message that contains - /// > whitespace between a header field-name and colon with a response code - /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a - /// > response message before forwarding the message downstream. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - /// - /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 - pub fn http1_allow_spaces_after_header_name_in_responses(&mut self, val: bool) -> &mut Self { - self.conn_builder - .http1_allow_spaces_after_header_name_in_responses(val); - self - } - - /// Set whether HTTP/1 connections will accept obsolete line folding for - /// header values. - /// - /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has - /// to say about it: - /// - /// > A server that receives an obs-fold in a request message that is not - /// > within a message/http container MUST either reject the message by - /// > sending a 400 (Bad Request), preferably with a representation - /// > explaining that obsolete line folding is unacceptable, or replace - /// > each received obs-fold with one or more SP octets prior to - /// > interpreting the field value or forwarding the message downstream. - /// - /// > A proxy or gateway that receives an obs-fold in a response message - /// > that is not within a message/http container MUST either discard the - /// > message and replace it with a 502 (Bad Gateway) response, preferably - /// > with a representation explaining that unacceptable line folding was - /// > received, or replace each received obs-fold with one or more SP - /// > octets prior to interpreting the field value or forwarding the - /// > message downstream. - /// - /// > A user agent that receives an obs-fold in a response message that is - /// > not within a message/http container MUST replace each received - /// > obs-fold with one or more SP octets prior to interpreting the field - /// > value. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - /// - /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 - pub fn http1_allow_obsolete_multiline_headers_in_responses(&mut self, val: bool) -> &mut Self { - self.conn_builder - .http1_allow_obsolete_multiline_headers_in_responses(val); - self - } - - /// Sets whether invalid header lines should be silently ignored in HTTP/1 responses. - /// - /// This mimicks the behaviour of major browsers. You probably don't want this. - /// You should only want this if you are implementing a proxy whose main - /// purpose is to sit in front of browsers whose users access arbitrary content - /// which may be malformed, and they expect everything that works without - /// the proxy to keep working with the proxy. - /// - /// This option will prevent Hyper's client from returning an error encountered - /// when parsing a header, except if the error was caused by the character NUL - /// (ASCII code 0), as Chrome specifically always reject those. - /// - /// The ignorable errors are: - /// * empty header names; - /// * characters that are not allowed in header names, except for `\0` and `\r`; - /// * when `allow_spaces_after_header_name_in_responses` is not enabled, - /// spaces and tabs between the header name and the colon; - /// * missing colon between header name and colon; - /// * characters that are not allowed in header values except for `\0` and `\r`. - /// - /// If an ignorable error is encountered, the parser tries to find the next - /// line in the input to resume parsing the rest of the headers. An error - /// will be emitted nonetheless if it finds `\0` or a lone `\r` while - /// looking for the next line. - pub fn http1_ignore_invalid_headers_in_responses( - &mut self, - val: bool, - ) -> &mut Builder { - self.conn_builder - .http1_ignore_invalid_headers_in_responses(val); - self - } - - /// Set whether HTTP/1 connections should try to use vectored writes, - /// or always flatten into a single buffer. - /// - /// Note that setting this to false may mean more copies of body data, - /// but may also improve performance when an IO transport doesn't - /// support vectored writes well, such as most TLS implementations. - /// - /// Setting this to true will force hyper to use queued strategy - /// which may eliminate unnecessary cloning on some TLS backends - /// - /// Default is `auto`. In this mode hyper will try to guess which - /// mode to use - pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder { - self.conn_builder.http1_writev(enabled); - self - } - - /// Set whether HTTP/1 connections will write header names as title case at - /// the socket level. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { - self.conn_builder.http1_title_case_headers(val); - self - } - - /// Set whether to support preserving original header cases. - /// - /// Currently, this will record the original cases received, and store them - /// in a private extension on the `Response`. It will also look for and use - /// such an extension in any provided `Request`. - /// - /// Since the relevant extension is still private, there is no way to - /// interact with the original cases. The only effect this can have now is - /// to forward the cases in a proxy-like fashion. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self { - self.conn_builder.http1_preserve_header_case(val); - self - } - - /// Set whether HTTP/0.9 responses should be tolerated. - /// - /// Default is false. - pub fn http09_responses(&mut self, val: bool) -> &mut Self { - self.conn_builder.http09_responses(val); - self - } - - /// Set whether the connection **must** use HTTP/2. - /// - /// The destination must either allow HTTP2 Prior Knowledge, or the - /// `Connect` should be configured to do use ALPN to upgrade to `h2` - /// as part of the connection process. This will not make the `Client` - /// utilize ALPN by itself. - /// - /// Note that setting this to true prevents HTTP/1 from being allowed. - /// - /// Default is false. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_only(&mut self, val: bool) -> &mut Self { - self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto }; - self - } - - /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 - /// stream-level flow control. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { - self.conn_builder - .http2_initial_stream_window_size(sz.into()); - self - } - - /// Sets the max connection-level flow control for HTTP2 - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_connection_window_size( - &mut self, - sz: impl Into>, - ) -> &mut Self { - self.conn_builder - .http2_initial_connection_window_size(sz.into()); - self - } - - /// Sets whether to use an adaptive flow control. - /// - /// Enabling this will override the limits set in - /// `http2_initial_stream_window_size` and - /// `http2_initial_connection_window_size`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { - self.conn_builder.http2_adaptive_window(enabled); - self - } - - /// Sets the maximum frame size to use for HTTP2. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { - self.conn_builder.http2_max_frame_size(sz); - self - } - - /// Sets an interval for HTTP2 Ping frames should be sent to keep a - /// connection alive. - /// - /// Pass `None` to disable HTTP2 keep-alive. - /// - /// Default is currently disabled. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_interval( - &mut self, - interval: impl Into>, - ) -> &mut Self { - self.conn_builder.http2_keep_alive_interval(interval); - self - } - - /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. - /// - /// If the ping is not acknowledged within the timeout, the connection will - /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. - /// - /// Default is 20 seconds. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { - self.conn_builder.http2_keep_alive_timeout(timeout); - self - } - - /// Sets whether HTTP2 keep-alive should apply while the connection is idle. - /// - /// If disabled, keep-alive pings are only sent while there are open - /// request/responses streams. If enabled, pings are also sent when no - /// streams are active. Does nothing if `http2_keep_alive_interval` is - /// disabled. - /// - /// Default is `false`. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { - self.conn_builder.http2_keep_alive_while_idle(enabled); - self - } - - /// Sets the maximum number of HTTP2 concurrent locally reset streams. - /// - /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more - /// details. - /// - /// The default value is determined by the `h2` crate. - /// - /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { - self.conn_builder.http2_max_concurrent_reset_streams(max); - self - } - - /// Set the maximum write buffer size for each HTTP/2 stream. - /// - /// Default is currently 1MB, but may change. - /// - /// # Panics - /// - /// The value must be no larger than `u32::MAX`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { - self.conn_builder.http2_max_send_buf_size(max); - self - } - - /// Set whether to retry requests that get disrupted before ever starting - /// to write. - /// - /// This means a request that is queued, and gets given an idle, reused - /// connection, and then encounters an error immediately as the idle - /// connection was found to be unusable. - /// - /// When this is set to `false`, the related `ResponseFuture` would instead - /// resolve to an `Error::Cancel`. - /// - /// Default is `true`. - #[inline] - pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self { - self.client_config.retry_canceled_requests = val; - self - } - - /// Set whether to automatically add the `Host` header to requests. - /// - /// If true, and a request does not include a `Host` header, one will be - /// added automatically, derived from the authority of the `Uri`. - /// - /// Default is `true`. - #[inline] - pub fn set_host(&mut self, val: bool) -> &mut Self { - self.client_config.set_host = val; - self - } - - /// Provide an executor to execute background `Connection` tasks. - pub fn executor(&mut self, exec: E) -> &mut Self - where - E: Executor + Send + Sync + 'static, - { - self.conn_builder.executor(exec); - self - } - - /// Builder a client with this configuration and the default `HttpConnector`. - #[cfg(feature = "tcp")] - pub fn build_http(&self) -> Client - where - B: HttpBody + Send, - B::Data: Send, - { - let mut connector = HttpConnector::new(); - if self.pool_config.is_enabled() { - connector.set_keepalive(self.pool_config.idle_timeout); - } - self.build(connector) - } - - /// Combine the configuration of this builder with a connector to create a `Client`. - pub fn build(&self, connector: C) -> Client - where - C: Connect + Clone, - B: HttpBody + Send, - B::Data: Send, - { - #[cfg_attr(feature = "deprecated", allow(deprecated))] - Client { - config: self.client_config, - conn_builder: self.conn_builder.clone(), - connector, - pool: Pool::new(self.pool_config, &self.conn_builder.exec), - } - } -} - -impl fmt::Debug for Builder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Builder") - .field("client_config", &self.client_config) - .field("conn_builder", &self.conn_builder) - .field("pool_config", &self.pool_config) - .finish() - } -} - -#[cfg(test)] -mod unit_tests { - use super::*; - - #[test] - fn response_future_is_sync() { - fn assert_sync() {} - assert_sync::(); - } - - #[test] - fn set_relative_uri_with_implicit_path() { - let mut uri = "http://hyper.rs".parse().unwrap(); - origin_form(&mut uri); - assert_eq!(uri.to_string(), "/"); - } - - #[test] - fn test_origin_form() { - let mut uri = "http://hyper.rs/guides".parse().unwrap(); - origin_form(&mut uri); - assert_eq!(uri.to_string(), "/guides"); - - let mut uri = "http://hyper.rs/guides?foo=bar".parse().unwrap(); - origin_form(&mut uri); - assert_eq!(uri.to_string(), "/guides?foo=bar"); - } - - #[test] - fn test_absolute_form() { - let mut uri = "http://hyper.rs/guides".parse().unwrap(); - absolute_form(&mut uri); - assert_eq!(uri.to_string(), "http://hyper.rs/guides"); - - let mut uri = "https://hyper.rs/guides".parse().unwrap(); - absolute_form(&mut uri); - assert_eq!(uri.to_string(), "/guides"); - } - - #[test] - fn test_authority_form() { - let _ = pretty_env_logger::try_init(); - - let mut uri = "http://hyper.rs".parse().unwrap(); - authority_form(&mut uri); - assert_eq!(uri.to_string(), "hyper.rs"); - - let mut uri = "hyper.rs".parse().unwrap(); - authority_form(&mut uri); - assert_eq!(uri.to_string(), "hyper.rs"); - } - - #[test] - fn test_extract_domain_connect_no_port() { - let mut uri = "hyper.rs".parse().unwrap(); - let (scheme, host) = extract_domain(&mut uri, true).expect("extract domain"); - assert_eq!(scheme, *"http"); - assert_eq!(host, "hyper.rs"); - } - - #[test] - fn test_is_secure() { - assert_eq!( - is_schema_secure(&"http://hyper.rs".parse::().unwrap()), - false - ); - assert_eq!(is_schema_secure(&"hyper.rs".parse::().unwrap()), false); - assert_eq!( - is_schema_secure(&"wss://hyper.rs".parse::().unwrap()), - true - ); - assert_eq!( - is_schema_secure(&"ws://hyper.rs".parse::().unwrap()), - false - ); - } - - #[test] - fn test_get_non_default_port() { - assert!(get_non_default_port(&"http://hyper.rs".parse::().unwrap()).is_none()); - assert!(get_non_default_port(&"http://hyper.rs:80".parse::().unwrap()).is_none()); - assert!(get_non_default_port(&"https://hyper.rs:443".parse::().unwrap()).is_none()); - assert!(get_non_default_port(&"hyper.rs:80".parse::().unwrap()).is_none()); - - assert_eq!( - get_non_default_port(&"http://hyper.rs:123".parse::().unwrap()) - .unwrap() - .as_u16(), - 123 - ); - assert_eq!( - get_non_default_port(&"https://hyper.rs:80".parse::().unwrap()) - .unwrap() - .as_u16(), - 80 - ); - assert_eq!( - get_non_default_port(&"hyper.rs:123".parse::().unwrap()) - .unwrap() - .as_u16(), - 123 - ); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/conn/http1.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/conn/http1.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/conn/http1.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/conn/http1.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,539 +0,0 @@ -//! HTTP/1 client connections - -use std::error::Error as StdError; -use std::fmt; - -use bytes::Bytes; -use http::{Request, Response}; -use httparse::ParserConfig; -use tokio::io::{AsyncRead, AsyncWrite}; - -use crate::body::{Body as IncomingBody, HttpBody as Body}; -use super::super::dispatch; -use crate::common::{ - task, Future, Pin, Poll, -}; -use crate::proto; -use crate::upgrade::Upgraded; - -type Dispatcher = - proto::dispatch::Dispatcher, B, T, proto::h1::ClientTransaction>; - -/// The sender side of an established connection. -pub struct SendRequest { - dispatch: dispatch::Sender, Response>, -} - -/// Deconstructed parts of a `Connection`. -/// -/// This allows taking apart a `Connection` at a later time, in order to -/// reclaim the IO object, and additional related pieces. -#[derive(Debug)] -pub struct Parts { - /// The original IO object used in the handshake. - pub io: T, - /// A buffer of bytes that have been read but not processed as HTTP. - /// - /// For instance, if the `Connection` is used for an HTTP upgrade request, - /// it is possible the server sent back the first bytes of the new protocol - /// along with the response upgrade. - /// - /// You will want to check for any existing bytes if you plan to continue - /// communicating on the IO object. - pub read_buf: Bytes, - _inner: (), -} - - -/// A future that processes all HTTP state for the IO object. -/// -/// In most cases, this should just be spawned into an executor, so that it -/// can process incoming and outgoing messages, notice hangups, and the like. -#[must_use = "futures do nothing unless polled"] -pub struct Connection -where - T: AsyncRead + AsyncWrite + Send + 'static, - B: Body + 'static, -{ - inner: Option>, -} - -impl Connection -where - T: AsyncRead + AsyncWrite + Send + Unpin + 'static, - B: Body + 'static, - B::Error: Into>, -{ - /// Return the inner IO object, and additional information. - /// - /// Only works for HTTP/1 connections. HTTP/2 connections will panic. - pub fn into_parts(self) -> Parts { - let (io, read_buf, _) = self.inner.expect("already upgraded").into_inner(); - Parts { - io, - read_buf, - _inner: (), - } - } - - /// Poll the connection for completion, but without calling `shutdown` - /// on the underlying IO. - /// - /// This is useful to allow running a connection while doing an HTTP - /// upgrade. Once the upgrade is completed, the connection would be "done", - /// but it is not desired to actually shutdown the IO object. Instead you - /// would take it back using `into_parts`. - /// - /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html) - /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html) - /// to work with this function; or use the `without_shutdown` wrapper. - pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { - self.inner.as_mut().expect("algready upgraded").poll_without_shutdown(cx) - } -} - -/// A builder to configure an HTTP connection. -/// -/// After setting options, the builder is used to create a handshake future. -#[derive(Clone, Debug)] -pub struct Builder { - h09_responses: bool, - h1_parser_config: ParserConfig, - h1_writev: Option, - h1_title_case_headers: bool, - h1_preserve_header_case: bool, - #[cfg(feature = "ffi")] - h1_preserve_header_order: bool, - h1_read_buf_exact_size: Option, - h1_max_buf_size: Option, -} - -/// Returns a handshake future over some IO. -/// -/// This is a shortcut for `Builder::new().handshake(io)`. -/// See [`client::conn`](crate::client::conn) for more. -pub async fn handshake( - io: T, -) -> crate::Result<(SendRequest, Connection)> -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + 'static, - B::Data: Send, - B::Error: Into>, -{ - Builder::new().handshake(io).await -} - -// ===== impl SendRequest - -impl SendRequest { - /// Polls to determine whether this sender can be used yet for a request. - /// - /// If the associated connection is closed, this returns an Error. - pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - self.dispatch.poll_ready(cx) - } - - /// Waits until the dispatcher is ready - /// - /// If the associated connection is closed, this returns an Error. - pub async fn ready(&mut self) -> crate::Result<()> { - futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await - } - - /* - pub(super) async fn when_ready(self) -> crate::Result { - let mut me = Some(self); - future::poll_fn(move |cx| { - ready!(me.as_mut().unwrap().poll_ready(cx))?; - Poll::Ready(Ok(me.take().unwrap())) - }) - .await - } - - pub(super) fn is_ready(&self) -> bool { - self.dispatch.is_ready() - } - - pub(super) fn is_closed(&self) -> bool { - self.dispatch.is_closed() - } - */ -} - -impl SendRequest -where - B: Body + 'static, -{ - /// Sends a `Request` on the associated connection. - /// - /// Returns a future that if successful, yields the `Response`. - /// - /// # Note - /// - /// There are some key differences in what automatic things the `Client` - /// does for you that will not be done here: - /// - /// - `Client` requires absolute-form `Uri`s, since the scheme and - /// authority are needed to connect. They aren't required here. - /// - Since the `Client` requires absolute-form `Uri`s, it can add - /// the `Host` header based on it. You must add a `Host` header yourself - /// before calling this method. - /// - Since absolute-form `Uri`s are not required, if received, they will - /// be serialized as-is. - pub fn send_request( - &mut self, - req: Request, - ) -> impl Future>> { - let sent = self.dispatch.send(req); - - async move { - match sent { - Ok(rx) => match rx.await { - Ok(Ok(resp)) => Ok(resp), - Ok(Err(err)) => Err(err), - // this is definite bug if it happens, but it shouldn't happen! - Err(_canceled) => panic!("dispatch dropped without returning error"), - }, - Err(_req) => { - tracing::debug!("connection was not ready"); - - Err(crate::Error::new_canceled().with("connection was not ready")) - } - } - } - } - - /* - pub(super) fn send_request_retryable( - &mut self, - req: Request, - ) -> impl Future, (crate::Error, Option>)>> + Unpin - where - B: Send, - { - match self.dispatch.try_send(req) { - Ok(rx) => { - Either::Left(rx.then(move |res| { - match res { - Ok(Ok(res)) => future::ok(res), - Ok(Err(err)) => future::err(err), - // this is definite bug if it happens, but it shouldn't happen! - Err(_) => panic!("dispatch dropped without returning error"), - } - })) - } - Err(req) => { - tracing::debug!("connection was not ready"); - let err = crate::Error::new_canceled().with("connection was not ready"); - Either::Right(future::err((err, Some(req)))) - } - } - } - */ -} - -impl fmt::Debug for SendRequest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SendRequest").finish() - } -} - -// ===== impl Connection - -impl fmt::Debug for Connection -where - T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, - B: Body + 'static, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Connection").finish() - } -} - -impl Future for Connection -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - type Output = crate::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? { - proto::Dispatched::Shutdown => Poll::Ready(Ok(())), - proto::Dispatched::Upgrade(pending) => match self.inner.take() { - Some(h1) => { - let (io, buf, _) = h1.into_inner(); - pending.fulfill(Upgraded::new(io, buf)); - Poll::Ready(Ok(())) - } - _ => { - drop(pending); - unreachable!("Upgraded twice"); - } - }, - } - } -} - -// ===== impl Builder - -impl Builder { - /// Creates a new connection builder. - #[inline] - pub fn new() -> Builder { - Builder { - h09_responses: false, - h1_writev: None, - h1_read_buf_exact_size: None, - h1_parser_config: Default::default(), - h1_title_case_headers: false, - h1_preserve_header_case: false, - #[cfg(feature = "ffi")] - h1_preserve_header_order: false, - h1_max_buf_size: None, - } - } - - /// Set whether HTTP/0.9 responses should be tolerated. - /// - /// Default is false. - pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder { - self.h09_responses = enabled; - self - } - - /// Set whether HTTP/1 connections will accept spaces between header names - /// and the colon that follow them in responses. - /// - /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has - /// to say about it: - /// - /// > No whitespace is allowed between the header field-name and colon. In - /// > the past, differences in the handling of such whitespace have led to - /// > security vulnerabilities in request routing and response handling. A - /// > server MUST reject any received request message that contains - /// > whitespace between a header field-name and colon with a response code - /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a - /// > response message before forwarding the message downstream. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - /// - /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 - pub fn allow_spaces_after_header_name_in_responses( - &mut self, - enabled: bool, - ) -> &mut Builder { - self.h1_parser_config - .allow_spaces_after_header_name_in_responses(enabled); - self - } - - /// Set whether HTTP/1 connections will accept obsolete line folding for - /// header values. - /// - /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when - /// parsing. - /// - /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has - /// to say about it: - /// - /// > A server that receives an obs-fold in a request message that is not - /// > within a message/http container MUST either reject the message by - /// > sending a 400 (Bad Request), preferably with a representation - /// > explaining that obsolete line folding is unacceptable, or replace - /// > each received obs-fold with one or more SP octets prior to - /// > interpreting the field value or forwarding the message downstream. - /// - /// > A proxy or gateway that receives an obs-fold in a response message - /// > that is not within a message/http container MUST either discard the - /// > message and replace it with a 502 (Bad Gateway) response, preferably - /// > with a representation explaining that unacceptable line folding was - /// > received, or replace each received obs-fold with one or more SP - /// > octets prior to interpreting the field value or forwarding the - /// > message downstream. - /// - /// > A user agent that receives an obs-fold in a response message that is - /// > not within a message/http container MUST replace each received - /// > obs-fold with one or more SP octets prior to interpreting the field - /// > value. - /// - /// Default is false. - /// - /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 - pub fn allow_obsolete_multiline_headers_in_responses( - &mut self, - enabled: bool, - ) -> &mut Builder { - self.h1_parser_config - .allow_obsolete_multiline_headers_in_responses(enabled); - self - } - - /// Set whether HTTP/1 connections will silently ignored malformed header lines. - /// - /// If this is enabled and and a header line does not start with a valid header - /// name, or does not include a colon at all, the line will be silently ignored - /// and no error will be reported. - /// - /// Default is false. - pub fn ignore_invalid_headers_in_responses( - &mut self, - enabled: bool, - ) -> &mut Builder { - self.h1_parser_config - .ignore_invalid_headers_in_responses(enabled); - self - } - - /// Set whether HTTP/1 connections should try to use vectored writes, - /// or always flatten into a single buffer. - /// - /// Note that setting this to false may mean more copies of body data, - /// but may also improve performance when an IO transport doesn't - /// support vectored writes well, such as most TLS implementations. - /// - /// Setting this to true will force hyper to use queued strategy - /// which may eliminate unnecessary cloning on some TLS backends - /// - /// Default is `auto`. In this mode hyper will try to guess which - /// mode to use - pub fn writev(&mut self, enabled: bool) -> &mut Builder { - self.h1_writev = Some(enabled); - self - } - - /// Set whether HTTP/1 connections will write header names as title case at - /// the socket level. - /// - /// Default is false. - pub fn title_case_headers(&mut self, enabled: bool) -> &mut Builder { - self.h1_title_case_headers = enabled; - self - } - - /// Set whether to support preserving original header cases. - /// - /// Currently, this will record the original cases received, and store them - /// in a private extension on the `Response`. It will also look for and use - /// such an extension in any provided `Request`. - /// - /// Since the relevant extension is still private, there is no way to - /// interact with the original cases. The only effect this can have now is - /// to forward the cases in a proxy-like fashion. - /// - /// Default is false. - pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Builder { - self.h1_preserve_header_case = enabled; - self - } - - /// Set whether to support preserving original header order. - /// - /// Currently, this will record the order in which headers are received, and store this - /// ordering in a private extension on the `Response`. It will also look for and use - /// such an extension in any provided `Request`. - /// - /// Default is false. - #[cfg(feature = "ffi")] - pub fn preserve_header_order(&mut self, enabled: bool) -> &mut Builder { - self.h1_preserve_header_order = enabled; - self - } - - /// Sets the exact size of the read buffer to *always* use. - /// - /// Note that setting this option unsets the `max_buf_size` option. - /// - /// Default is an adaptive read buffer. - pub fn read_buf_exact_size(&mut self, sz: Option) -> &mut Builder { - self.h1_read_buf_exact_size = sz; - self.h1_max_buf_size = None; - self - } - - /// Set the maximum buffer size for the connection. - /// - /// Default is ~400kb. - /// - /// Note that setting this option unsets the `read_exact_buf_size` option. - /// - /// # Panics - /// - /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. - pub fn max_buf_size(&mut self, max: usize) -> &mut Self { - assert!( - max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, - "the max_buf_size cannot be smaller than the minimum that h1 specifies." - ); - - self.h1_max_buf_size = Some(max); - self.h1_read_buf_exact_size = None; - self - } - - /// Constructs a connection with the configured options and IO. - /// See [`client::conn`](crate::client::conn) for more. - /// - /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will - /// do nothing. - pub fn handshake( - &self, - io: T, - ) -> impl Future, Connection)>> - where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + 'static, - B::Data: Send, - B::Error: Into>, - { - let opts = self.clone(); - - async move { - tracing::trace!("client handshake HTTP/1"); - - let (tx, rx) = dispatch::channel(); - let mut conn = proto::Conn::new(io); - conn.set_h1_parser_config(opts.h1_parser_config); - if let Some(writev) = opts.h1_writev { - if writev { - conn.set_write_strategy_queue(); - } else { - conn.set_write_strategy_flatten(); - } - } - if opts.h1_title_case_headers { - conn.set_title_case_headers(); - } - if opts.h1_preserve_header_case { - conn.set_preserve_header_case(); - } - #[cfg(feature = "ffi")] - if opts.h1_preserve_header_order { - conn.set_preserve_header_order(); - } - - if opts.h09_responses { - conn.set_h09_responses(); - } - - if let Some(sz) = opts.h1_read_buf_exact_size { - conn.set_read_buf_exact_size(sz); - } - if let Some(max) = opts.h1_max_buf_size { - conn.set_max_buf_size(max); - } - let cd = proto::h1::dispatch::Client::new(rx); - let proto = proto::h1::Dispatcher::new(cd, conn); - - Ok(( - SendRequest { dispatch: tx }, - Connection { inner: Some(proto) }, - )) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/conn/http2.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/conn/http2.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/conn/http2.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/conn/http2.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,427 +0,0 @@ -//! HTTP/2 client connections - -use std::error::Error as StdError; -use std::fmt; -use std::marker::PhantomData; -use std::sync::Arc; -use std::time::Duration; - -use http::{Request, Response}; -use tokio::io::{AsyncRead, AsyncWrite}; - -use super::super::dispatch; -use crate::body::{HttpBody as Body, Body as IncomingBody}; -use crate::common::{ - exec::{BoxSendFuture, Exec}, - task, Future, Pin, Poll, -}; -use crate::proto; -use crate::rt::Executor; - -/// The sender side of an established connection. -pub struct SendRequest { - dispatch: dispatch::UnboundedSender, Response>, -} - -impl Clone for SendRequest { - fn clone(&self) -> SendRequest { - SendRequest { dispatch: self.dispatch.clone() } - } -} - -/// A future that processes all HTTP state for the IO object. -/// -/// In most cases, this should just be spawned into an executor, so that it -/// can process incoming and outgoing messages, notice hangups, and the like. -#[must_use = "futures do nothing unless polled"] -pub struct Connection -where - T: AsyncRead + AsyncWrite + Send + 'static, - B: Body + 'static, -{ - inner: (PhantomData, proto::h2::ClientTask), -} - -/// A builder to configure an HTTP connection. -/// -/// After setting options, the builder is used to create a handshake future. -#[derive(Clone, Debug)] -pub struct Builder { - pub(super) exec: Exec, - h2_builder: proto::h2::client::Config, -} - -/// Returns a handshake future over some IO. -/// -/// This is a shortcut for `Builder::new().handshake(io)`. -/// See [`client::conn`](crate::client::conn) for more. -pub async fn handshake( - exec: E, - io: T, -) -> crate::Result<(SendRequest, Connection)> -where - E: Executor + Send + Sync + 'static, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + 'static, - B::Data: Send, - B::Error: Into>, -{ - Builder::new(exec).handshake(io).await -} - -// ===== impl SendRequest - -impl SendRequest { - /// Polls to determine whether this sender can be used yet for a request. - /// - /// If the associated connection is closed, this returns an Error. - pub fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { - if self.is_closed() { - Poll::Ready(Err(crate::Error::new_closed())) - } else { - Poll::Ready(Ok(())) - } - } - - /// Waits until the dispatcher is ready - /// - /// If the associated connection is closed, this returns an Error. - pub async fn ready(&mut self) -> crate::Result<()> { - futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await - } - - /* - pub(super) async fn when_ready(self) -> crate::Result { - let mut me = Some(self); - future::poll_fn(move |cx| { - ready!(me.as_mut().unwrap().poll_ready(cx))?; - Poll::Ready(Ok(me.take().unwrap())) - }) - .await - } - - pub(super) fn is_ready(&self) -> bool { - self.dispatch.is_ready() - } - */ - - pub(super) fn is_closed(&self) -> bool { - self.dispatch.is_closed() - } -} - -impl SendRequest -where - B: Body + 'static, -{ - /// Sends a `Request` on the associated connection. - /// - /// Returns a future that if successful, yields the `Response`. - /// - /// # Note - /// - /// There are some key differences in what automatic things the `Client` - /// does for you that will not be done here: - /// - /// - `Client` requires absolute-form `Uri`s, since the scheme and - /// authority are needed to connect. They aren't required here. - /// - Since the `Client` requires absolute-form `Uri`s, it can add - /// the `Host` header based on it. You must add a `Host` header yourself - /// before calling this method. - /// - Since absolute-form `Uri`s are not required, if received, they will - /// be serialized as-is. - pub fn send_request( - &mut self, - req: Request, - ) -> impl Future>> { - let sent = self.dispatch.send(req); - - async move { - match sent { - Ok(rx) => match rx.await { - Ok(Ok(resp)) => Ok(resp), - Ok(Err(err)) => Err(err), - // this is definite bug if it happens, but it shouldn't happen! - Err(_canceled) => panic!("dispatch dropped without returning error"), - }, - Err(_req) => { - tracing::debug!("connection was not ready"); - - Err(crate::Error::new_canceled().with("connection was not ready")) - } - } - } - } - - /* - pub(super) fn send_request_retryable( - &mut self, - req: Request, - ) -> impl Future, (crate::Error, Option>)>> + Unpin - where - B: Send, - { - match self.dispatch.try_send(req) { - Ok(rx) => { - Either::Left(rx.then(move |res| { - match res { - Ok(Ok(res)) => future::ok(res), - Ok(Err(err)) => future::err(err), - // this is definite bug if it happens, but it shouldn't happen! - Err(_) => panic!("dispatch dropped without returning error"), - } - })) - } - Err(req) => { - tracing::debug!("connection was not ready"); - let err = crate::Error::new_canceled().with("connection was not ready"); - Either::Right(future::err((err, Some(req)))) - } - } - } - */ -} - -impl fmt::Debug for SendRequest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SendRequest").finish() - } -} - -// ===== impl Connection - -impl Connection -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + Unpin + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - /// Returns whether the [extended CONNECT protocol][1] is enabled or not. - /// - /// This setting is configured by the server peer by sending the - /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame. - /// This method returns the currently acknowledged value received from the - /// remote. - /// - /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 - /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3 - pub fn is_extended_connect_protocol_enabled(&self) -> bool { - self.inner.1.is_extended_connect_protocol_enabled() - } -} - -impl fmt::Debug for Connection -where - T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, - B: Body + 'static, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Connection").finish() - } -} - -impl Future for Connection -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - type Output = crate::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match ready!(Pin::new(&mut self.inner.1).poll(cx))? { - proto::Dispatched::Shutdown => Poll::Ready(Ok(())), - #[cfg(feature = "http1")] - proto::Dispatched::Upgrade(_pending) => unreachable!("http2 cannot upgrade"), - } - } -} - -// ===== impl Builder - -impl Builder { - /// Creates a new connection builder. - #[inline] - pub fn new(exec: E) -> Builder - where - E: Executor + Send + Sync + 'static, - { - use std::sync::Arc; - Builder { - exec: Exec::Executor(Arc::new(exec)), - h2_builder: Default::default(), - } - } - - /// Provide an executor to execute background HTTP2 tasks. - pub fn executor(&mut self, exec: E) -> &mut Builder - where - E: Executor + Send + Sync + 'static, - { - self.exec = Exec::Executor(Arc::new(exec)); - self - } - - /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 - /// stream-level flow control. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE - pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.adaptive_window = false; - self.h2_builder.initial_stream_window_size = sz; - } - self - } - - /// Sets the max connection-level flow control for HTTP2 - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - pub fn initial_connection_window_size( - &mut self, - sz: impl Into>, - ) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.adaptive_window = false; - self.h2_builder.initial_conn_window_size = sz; - } - self - } - - /// Sets whether to use an adaptive flow control. - /// - /// Enabling this will override the limits set in - /// `initial_stream_window_size` and - /// `initial_connection_window_size`. - pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { - use proto::h2::SPEC_WINDOW_SIZE; - - self.h2_builder.adaptive_window = enabled; - if enabled { - self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; - self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; - } - self - } - - /// Sets the maximum frame size to use for HTTP2. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.max_frame_size = sz; - } - self - } - - /// Sets an interval for HTTP2 Ping frames should be sent to keep a - /// connection alive. - /// - /// Pass `None` to disable HTTP2 keep-alive. - /// - /// Default is currently disabled. - #[cfg(feature = "runtime")] - pub fn keep_alive_interval( - &mut self, - interval: impl Into>, - ) -> &mut Self { - self.h2_builder.keep_alive_interval = interval.into(); - self - } - - /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. - /// - /// If the ping is not acknowledged within the timeout, the connection will - /// be closed. Does nothing if `keep_alive_interval` is disabled. - /// - /// Default is 20 seconds. - #[cfg(feature = "runtime")] - pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { - self.h2_builder.keep_alive_timeout = timeout; - self - } - - /// Sets whether HTTP2 keep-alive should apply while the connection is idle. - /// - /// If disabled, keep-alive pings are only sent while there are open - /// request/responses streams. If enabled, pings are also sent when no - /// streams are active. Does nothing if `keep_alive_interval` is - /// disabled. - /// - /// Default is `false`. - #[cfg(feature = "runtime")] - pub fn keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { - self.h2_builder.keep_alive_while_idle = enabled; - self - } - - /// Sets the maximum number of HTTP2 concurrent locally reset streams. - /// - /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more - /// details. - /// - /// The default value is determined by the `h2` crate. - /// - /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams - pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { - self.h2_builder.max_concurrent_reset_streams = Some(max); - self - } - - /// Set the maximum write buffer size for each HTTP/2 stream. - /// - /// Default is currently 1MB, but may change. - /// - /// # Panics - /// - /// The value must be no larger than `u32::MAX`. - pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { - assert!(max <= std::u32::MAX as usize); - self.h2_builder.max_send_buffer_size = max; - self - } - - /// Constructs a connection with the configured options and IO. - /// See [`client::conn`](crate::client::conn) for more. - /// - /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will - /// do nothing. - pub fn handshake( - &self, - io: T, - ) -> impl Future, Connection)>> - where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + 'static, - B::Data: Send, - B::Error: Into>, - { - let opts = self.clone(); - - async move { - tracing::trace!("client handshake HTTP/1"); - - let (tx, rx) = dispatch::channel(); - let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec) - .await?; - Ok(( - SendRequest { - dispatch: tx.unbound(), - }, - Connection { - inner: (PhantomData, h2), - }, - )) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/connect/dns.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/connect/dns.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/connect/dns.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/connect/dns.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,425 +0,0 @@ -//! DNS Resolution used by the `HttpConnector`. -//! -//! This module contains: -//! -//! - A [`GaiResolver`](GaiResolver) that is the default resolver for the -//! `HttpConnector`. -//! - The `Name` type used as an argument to custom resolvers. -//! -//! # Resolvers are `Service`s -//! -//! A resolver is just a -//! `Service>`. -//! -//! A simple resolver that ignores the name and always returns a specific -//! address: -//! -//! ```rust,ignore -//! use std::{convert::Infallible, iter, net::SocketAddr}; -//! -//! let resolver = tower::service_fn(|_name| async { -//! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080)))) -//! }); -//! ``` -use std::error::Error; -use std::future::Future; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs}; -use std::pin::Pin; -use std::str::FromStr; -use std::task::{self, Poll}; -use std::{fmt, io, vec}; - -use tokio::task::JoinHandle; -use tower_service::Service; -use tracing::debug; - -pub(super) use self::sealed::Resolve; - -/// A domain name to resolve into IP addresses. -#[derive(Clone, Hash, Eq, PartialEq)] -pub struct Name { - host: Box, -} - -/// A resolver using blocking `getaddrinfo` calls in a threadpool. -#[derive(Clone)] -pub struct GaiResolver { - _priv: (), -} - -/// An iterator of IP addresses returned from `getaddrinfo`. -pub struct GaiAddrs { - inner: SocketAddrs, -} - -/// A future to resolve a name returned by `GaiResolver`. -pub struct GaiFuture { - inner: JoinHandle>, -} - -impl Name { - pub(super) fn new(host: Box) -> Name { - Name { host } - } - - /// View the hostname as a string slice. - pub fn as_str(&self) -> &str { - &self.host - } -} - -impl fmt::Debug for Name { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&self.host, f) - } -} - -impl fmt::Display for Name { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&self.host, f) - } -} - -impl FromStr for Name { - type Err = InvalidNameError; - - fn from_str(host: &str) -> Result { - // Possibly add validation later - Ok(Name::new(host.into())) - } -} - -/// Error indicating a given string was not a valid domain name. -#[derive(Debug)] -pub struct InvalidNameError(()); - -impl fmt::Display for InvalidNameError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("Not a valid domain name") - } -} - -impl Error for InvalidNameError {} - -impl GaiResolver { - /// Construct a new `GaiResolver`. - pub fn new() -> Self { - GaiResolver { _priv: () } - } -} - -impl Service for GaiResolver { - type Response = GaiAddrs; - type Error = io::Error; - type Future = GaiFuture; - - fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, name: Name) -> Self::Future { - let blocking = tokio::task::spawn_blocking(move || { - debug!("resolving host={:?}", name.host); - (&*name.host, 0) - .to_socket_addrs() - .map(|i| SocketAddrs { iter: i }) - }); - - GaiFuture { inner: blocking } - } -} - -impl fmt::Debug for GaiResolver { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("GaiResolver") - } -} - -impl Future for GaiFuture { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - Pin::new(&mut self.inner).poll(cx).map(|res| match res { - Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }), - Ok(Err(err)) => Err(err), - Err(join_err) => { - if join_err.is_cancelled() { - Err(io::Error::new(io::ErrorKind::Interrupted, join_err)) - } else { - panic!("gai background task failed: {:?}", join_err) - } - } - }) - } -} - -impl fmt::Debug for GaiFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("GaiFuture") - } -} - -impl Drop for GaiFuture { - fn drop(&mut self) { - self.inner.abort(); - } -} - -impl Iterator for GaiAddrs { - type Item = SocketAddr; - - fn next(&mut self) -> Option { - self.inner.next() - } -} - -impl fmt::Debug for GaiAddrs { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("GaiAddrs") - } -} - -pub(super) struct SocketAddrs { - iter: vec::IntoIter, -} - -impl SocketAddrs { - pub(super) fn new(addrs: Vec) -> Self { - SocketAddrs { - iter: addrs.into_iter(), - } - } - - pub(super) fn try_parse(host: &str, port: u16) -> Option { - if let Ok(addr) = host.parse::() { - let addr = SocketAddrV4::new(addr, port); - return Some(SocketAddrs { - iter: vec![SocketAddr::V4(addr)].into_iter(), - }); - } - if let Ok(addr) = host.parse::() { - let addr = SocketAddrV6::new(addr, port, 0, 0); - return Some(SocketAddrs { - iter: vec![SocketAddr::V6(addr)].into_iter(), - }); - } - None - } - - #[inline] - fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs { - SocketAddrs::new(self.iter.filter(predicate).collect()) - } - - pub(super) fn split_by_preference( - self, - local_addr_ipv4: Option, - local_addr_ipv6: Option, - ) -> (SocketAddrs, SocketAddrs) { - match (local_addr_ipv4, local_addr_ipv6) { - (Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])), - (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])), - _ => { - let preferring_v6 = self - .iter - .as_slice() - .first() - .map(SocketAddr::is_ipv6) - .unwrap_or(false); - - let (preferred, fallback) = self - .iter - .partition::, _>(|addr| addr.is_ipv6() == preferring_v6); - - (SocketAddrs::new(preferred), SocketAddrs::new(fallback)) - } - } - } - - pub(super) fn is_empty(&self) -> bool { - self.iter.as_slice().is_empty() - } - - pub(super) fn len(&self) -> usize { - self.iter.as_slice().len() - } -} - -impl Iterator for SocketAddrs { - type Item = SocketAddr; - #[inline] - fn next(&mut self) -> Option { - self.iter.next() - } -} - -/* -/// A resolver using `getaddrinfo` calls via the `tokio_executor::threadpool::blocking` API. -/// -/// Unlike the `GaiResolver` this will not spawn dedicated threads, but only works when running on the -/// multi-threaded Tokio runtime. -#[cfg(feature = "runtime")] -#[derive(Clone, Debug)] -pub struct TokioThreadpoolGaiResolver(()); - -/// The future returned by `TokioThreadpoolGaiResolver`. -#[cfg(feature = "runtime")] -#[derive(Debug)] -pub struct TokioThreadpoolGaiFuture { - name: Name, -} - -#[cfg(feature = "runtime")] -impl TokioThreadpoolGaiResolver { - /// Creates a new DNS resolver that will use tokio threadpool's blocking - /// feature. - /// - /// **Requires** its futures to be run on the threadpool runtime. - pub fn new() -> Self { - TokioThreadpoolGaiResolver(()) - } -} - -#[cfg(feature = "runtime")] -impl Service for TokioThreadpoolGaiResolver { - type Response = GaiAddrs; - type Error = io::Error; - type Future = TokioThreadpoolGaiFuture; - - fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, name: Name) -> Self::Future { - TokioThreadpoolGaiFuture { name } - } -} - -#[cfg(feature = "runtime")] -impl Future for TokioThreadpoolGaiFuture { - type Output = Result; - - fn poll(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll { - match ready!(tokio_executor::threadpool::blocking(|| ( - self.name.as_str(), - 0 - ) - .to_socket_addrs())) - { - Ok(Ok(iter)) => Poll::Ready(Ok(GaiAddrs { - inner: IpAddrs { iter }, - })), - Ok(Err(e)) => Poll::Ready(Err(e)), - // a BlockingError, meaning not on a tokio_executor::threadpool :( - Err(e) => Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, e))), - } - } -} -*/ - -mod sealed { - use super::{SocketAddr, Name}; - use crate::common::{task, Future, Poll}; - use tower_service::Service; - - // "Trait alias" for `Service` - pub trait Resolve { - type Addrs: Iterator; - type Error: Into>; - type Future: Future>; - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; - fn resolve(&mut self, name: Name) -> Self::Future; - } - - impl Resolve for S - where - S: Service, - S::Response: Iterator, - S::Error: Into>, - { - type Addrs = S::Response; - type Error = S::Error; - type Future = S::Future; - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - Service::poll_ready(self, cx) - } - - fn resolve(&mut self, name: Name) -> Self::Future { - Service::call(self, name) - } - } -} - -pub(super) async fn resolve(resolver: &mut R, name: Name) -> Result -where - R: Resolve, -{ - futures_util::future::poll_fn(|cx| resolver.poll_ready(cx)).await?; - resolver.resolve(name).await -} - -#[cfg(test)] -mod tests { - use super::*; - use std::net::{Ipv4Addr, Ipv6Addr}; - - #[test] - fn test_ip_addrs_split_by_preference() { - let ip_v4 = Ipv4Addr::new(127, 0, 0, 1); - let ip_v6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); - let v4_addr = (ip_v4, 80).into(); - let v6_addr = (ip_v6, 80).into(); - - let (mut preferred, mut fallback) = SocketAddrs { - iter: vec![v4_addr, v6_addr].into_iter(), - } - .split_by_preference(None, None); - assert!(preferred.next().unwrap().is_ipv4()); - assert!(fallback.next().unwrap().is_ipv6()); - - let (mut preferred, mut fallback) = SocketAddrs { - iter: vec![v6_addr, v4_addr].into_iter(), - } - .split_by_preference(None, None); - assert!(preferred.next().unwrap().is_ipv6()); - assert!(fallback.next().unwrap().is_ipv4()); - - let (mut preferred, mut fallback) = SocketAddrs { - iter: vec![v4_addr, v6_addr].into_iter(), - } - .split_by_preference(Some(ip_v4), Some(ip_v6)); - assert!(preferred.next().unwrap().is_ipv4()); - assert!(fallback.next().unwrap().is_ipv6()); - - let (mut preferred, mut fallback) = SocketAddrs { - iter: vec![v6_addr, v4_addr].into_iter(), - } - .split_by_preference(Some(ip_v4), Some(ip_v6)); - assert!(preferred.next().unwrap().is_ipv6()); - assert!(fallback.next().unwrap().is_ipv4()); - - let (mut preferred, fallback) = SocketAddrs { - iter: vec![v4_addr, v6_addr].into_iter(), - } - .split_by_preference(Some(ip_v4), None); - assert!(preferred.next().unwrap().is_ipv4()); - assert!(fallback.is_empty()); - - let (mut preferred, fallback) = SocketAddrs { - iter: vec![v4_addr, v6_addr].into_iter(), - } - .split_by_preference(None, Some(ip_v6)); - assert!(preferred.next().unwrap().is_ipv6()); - assert!(fallback.is_empty()); - } - - #[test] - fn test_name_from_str() { - const DOMAIN: &str = "test.example.com"; - let name = Name::from_str(DOMAIN).expect("Should be a valid domain"); - assert_eq!(name.as_str(), DOMAIN); - assert_eq!(name.to_string(), DOMAIN); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/connect/http.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/connect/http.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/connect/http.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/connect/http.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1007 +0,0 @@ -use std::error::Error as StdError; -use std::fmt; -use std::future::Future; -use std::io; -use std::marker::PhantomData; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{self, Poll}; -use std::time::Duration; - -use futures_util::future::Either; -use http::uri::{Scheme, Uri}; -use pin_project_lite::pin_project; -use tokio::net::{TcpSocket, TcpStream}; -use tokio::time::Sleep; -use tracing::{debug, trace, warn}; - -use super::dns::{self, resolve, GaiResolver, Resolve}; -use super::{Connected, Connection}; -//#[cfg(feature = "runtime")] use super::dns::TokioThreadpoolGaiResolver; - -/// A connector for the `http` scheme. -/// -/// Performs DNS resolution in a thread pool, and then connects over TCP. -/// -/// # Note -/// -/// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes -/// transport information such as the remote socket address used. -#[cfg_attr(docsrs, doc(cfg(feature = "tcp")))] -#[derive(Clone)] -pub struct HttpConnector { - config: Arc, - resolver: R, -} - -/// Extra information about the transport when an HttpConnector is used. -/// -/// # Example -/// -/// ``` -/// # async fn doc() -> hyper::Result<()> { -/// use hyper::Uri; -/// use hyper::client::{Client, connect::HttpInfo}; -/// -/// let client = Client::new(); -/// let uri = Uri::from_static("http://example.com"); -/// -/// let res = client.get(uri).await?; -/// res -/// .extensions() -/// .get::() -/// .map(|info| { -/// println!("remote addr = {}", info.remote_addr()); -/// }); -/// # Ok(()) -/// # } -/// ``` -/// -/// # Note -/// -/// If a different connector is used besides [`HttpConnector`](HttpConnector), -/// this value will not exist in the extensions. Consult that specific -/// connector to see what "extra" information it might provide to responses. -#[derive(Clone, Debug)] -pub struct HttpInfo { - remote_addr: SocketAddr, - local_addr: SocketAddr, -} - -#[derive(Clone)] -struct Config { - connect_timeout: Option, - enforce_http: bool, - happy_eyeballs_timeout: Option, - keep_alive_timeout: Option, - local_address_ipv4: Option, - local_address_ipv6: Option, - nodelay: bool, - reuse_address: bool, - send_buffer_size: Option, - recv_buffer_size: Option, -} - -// ===== impl HttpConnector ===== - -impl HttpConnector { - /// Construct a new HttpConnector. - pub fn new() -> HttpConnector { - HttpConnector::new_with_resolver(GaiResolver::new()) - } -} - -/* -#[cfg(feature = "runtime")] -impl HttpConnector { - /// Construct a new HttpConnector using the `TokioThreadpoolGaiResolver`. - /// - /// This resolver **requires** the threadpool runtime to be used. - pub fn new_with_tokio_threadpool_resolver() -> Self { - HttpConnector::new_with_resolver(TokioThreadpoolGaiResolver::new()) - } -} -*/ - -impl HttpConnector { - /// Construct a new HttpConnector. - /// - /// Takes a [`Resolver`](crate::client::connect::dns#resolvers-are-services) to handle DNS lookups. - pub fn new_with_resolver(resolver: R) -> HttpConnector { - HttpConnector { - config: Arc::new(Config { - connect_timeout: None, - enforce_http: true, - happy_eyeballs_timeout: Some(Duration::from_millis(300)), - keep_alive_timeout: None, - local_address_ipv4: None, - local_address_ipv6: None, - nodelay: false, - reuse_address: false, - send_buffer_size: None, - recv_buffer_size: None, - }), - resolver, - } - } - - /// Option to enforce all `Uri`s have the `http` scheme. - /// - /// Enabled by default. - #[inline] - pub fn enforce_http(&mut self, is_enforced: bool) { - self.config_mut().enforce_http = is_enforced; - } - - /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration. - /// - /// If `None`, the option will not be set. - /// - /// Default is `None`. - #[inline] - pub fn set_keepalive(&mut self, dur: Option) { - self.config_mut().keep_alive_timeout = dur; - } - - /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`. - /// - /// Default is `false`. - #[inline] - pub fn set_nodelay(&mut self, nodelay: bool) { - self.config_mut().nodelay = nodelay; - } - - /// Sets the value of the SO_SNDBUF option on the socket. - #[inline] - pub fn set_send_buffer_size(&mut self, size: Option) { - self.config_mut().send_buffer_size = size; - } - - /// Sets the value of the SO_RCVBUF option on the socket. - #[inline] - pub fn set_recv_buffer_size(&mut self, size: Option) { - self.config_mut().recv_buffer_size = size; - } - - /// Set that all sockets are bound to the configured address before connection. - /// - /// If `None`, the sockets will not be bound. - /// - /// Default is `None`. - #[inline] - pub fn set_local_address(&mut self, addr: Option) { - let (v4, v6) = match addr { - Some(IpAddr::V4(a)) => (Some(a), None), - Some(IpAddr::V6(a)) => (None, Some(a)), - _ => (None, None), - }; - - let cfg = self.config_mut(); - - cfg.local_address_ipv4 = v4; - cfg.local_address_ipv6 = v6; - } - - /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's - /// preferences) before connection. - #[inline] - pub fn set_local_addresses(&mut self, addr_ipv4: Ipv4Addr, addr_ipv6: Ipv6Addr) { - let cfg = self.config_mut(); - - cfg.local_address_ipv4 = Some(addr_ipv4); - cfg.local_address_ipv6 = Some(addr_ipv6); - } - - /// Set the connect timeout. - /// - /// If a domain resolves to multiple IP addresses, the timeout will be - /// evenly divided across them. - /// - /// Default is `None`. - #[inline] - pub fn set_connect_timeout(&mut self, dur: Option) { - self.config_mut().connect_timeout = dur; - } - - /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm. - /// - /// If hostname resolves to both IPv4 and IPv6 addresses and connection - /// cannot be established using preferred address family before timeout - /// elapses, then connector will in parallel attempt connection using other - /// address family. - /// - /// If `None`, parallel connection attempts are disabled. - /// - /// Default is 300 milliseconds. - /// - /// [RFC 6555]: https://tools.ietf.org/html/rfc6555 - #[inline] - pub fn set_happy_eyeballs_timeout(&mut self, dur: Option) { - self.config_mut().happy_eyeballs_timeout = dur; - } - - /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`. - /// - /// Default is `false`. - #[inline] - pub fn set_reuse_address(&mut self, reuse_address: bool) -> &mut Self { - self.config_mut().reuse_address = reuse_address; - self - } - - // private - - fn config_mut(&mut self) -> &mut Config { - // If the are HttpConnector clones, this will clone the inner - // config. So mutating the config won't ever affect previous - // clones. - Arc::make_mut(&mut self.config) - } -} - -static INVALID_NOT_HTTP: &str = "invalid URL, scheme is not http"; -static INVALID_MISSING_SCHEME: &str = "invalid URL, scheme is missing"; -static INVALID_MISSING_HOST: &str = "invalid URL, host is missing"; - -// R: Debug required for now to allow adding it to debug output later... -impl fmt::Debug for HttpConnector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("HttpConnector").finish() - } -} - -impl tower_service::Service for HttpConnector -where - R: Resolve + Clone + Send + Sync + 'static, - R::Future: Send, -{ - type Response = TcpStream; - type Error = ConnectError; - type Future = HttpConnecting; - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - ready!(self.resolver.poll_ready(cx)).map_err(ConnectError::dns)?; - Poll::Ready(Ok(())) - } - - fn call(&mut self, dst: Uri) -> Self::Future { - let mut self_ = self.clone(); - HttpConnecting { - fut: Box::pin(async move { self_.call_async(dst).await }), - _marker: PhantomData, - } - } -} - -fn get_host_port<'u>(config: &Config, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> { - trace!( - "Http::connect; scheme={:?}, host={:?}, port={:?}", - dst.scheme(), - dst.host(), - dst.port(), - ); - - if config.enforce_http { - if dst.scheme() != Some(&Scheme::HTTP) { - return Err(ConnectError { - msg: INVALID_NOT_HTTP.into(), - cause: None, - }); - } - } else if dst.scheme().is_none() { - return Err(ConnectError { - msg: INVALID_MISSING_SCHEME.into(), - cause: None, - }); - } - - let host = match dst.host() { - Some(s) => s, - None => { - return Err(ConnectError { - msg: INVALID_MISSING_HOST.into(), - cause: None, - }) - } - }; - let port = match dst.port() { - Some(port) => port.as_u16(), - None => { - if dst.scheme() == Some(&Scheme::HTTPS) { - 443 - } else { - 80 - } - } - }; - - Ok((host, port)) -} - -impl HttpConnector -where - R: Resolve, -{ - async fn call_async(&mut self, dst: Uri) -> Result { - let config = &self.config; - - let (host, port) = get_host_port(config, &dst)?; - let host = host.trim_start_matches('[').trim_end_matches(']'); - - // If the host is already an IP addr (v4 or v6), - // skip resolving the dns and start connecting right away. - let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) { - addrs - } else { - let addrs = resolve(&mut self.resolver, dns::Name::new(host.into())) - .await - .map_err(ConnectError::dns)?; - let addrs = addrs - .map(|mut addr| { - addr.set_port(port); - addr - }) - .collect(); - dns::SocketAddrs::new(addrs) - }; - - let c = ConnectingTcp::new(addrs, config); - - let sock = c.connect().await?; - - if let Err(e) = sock.set_nodelay(config.nodelay) { - warn!("tcp set_nodelay error: {}", e); - } - - Ok(sock) - } -} - -impl Connection for TcpStream { - fn connected(&self) -> Connected { - let connected = Connected::new(); - if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) { - connected.extra(HttpInfo { remote_addr, local_addr }) - } else { - connected - } - } -} - -impl HttpInfo { - /// Get the remote address of the transport used. - pub fn remote_addr(&self) -> SocketAddr { - self.remote_addr - } - - /// Get the local address of the transport used. - pub fn local_addr(&self) -> SocketAddr { - self.local_addr - } -} - -pin_project! { - // Not publicly exported (so missing_docs doesn't trigger). - // - // We return this `Future` instead of the `Pin>` directly - // so that users don't rely on it fitting in a `Pin>` slot - // (and thus we can change the type in the future). - #[must_use = "futures do nothing unless polled"] - #[allow(missing_debug_implementations)] - pub struct HttpConnecting { - #[pin] - fut: BoxConnecting, - _marker: PhantomData, - } -} - -type ConnectResult = Result; -type BoxConnecting = Pin + Send>>; - -impl Future for HttpConnecting { - type Output = ConnectResult; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - self.project().fut.poll(cx) - } -} - -// Not publicly exported (so missing_docs doesn't trigger). -pub struct ConnectError { - msg: Box, - cause: Option>, -} - -impl ConnectError { - fn new(msg: S, cause: E) -> ConnectError - where - S: Into>, - E: Into>, - { - ConnectError { - msg: msg.into(), - cause: Some(cause.into()), - } - } - - fn dns(cause: E) -> ConnectError - where - E: Into>, - { - ConnectError::new("dns error", cause) - } - - fn m(msg: S) -> impl FnOnce(E) -> ConnectError - where - S: Into>, - E: Into>, - { - move |cause| ConnectError::new(msg, cause) - } -} - -impl fmt::Debug for ConnectError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(ref cause) = self.cause { - f.debug_tuple("ConnectError") - .field(&self.msg) - .field(cause) - .finish() - } else { - self.msg.fmt(f) - } - } -} - -impl fmt::Display for ConnectError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(&self.msg)?; - - if let Some(ref cause) = self.cause { - write!(f, ": {}", cause)?; - } - - Ok(()) - } -} - -impl StdError for ConnectError { - fn source(&self) -> Option<&(dyn StdError + 'static)> { - self.cause.as_ref().map(|e| &**e as _) - } -} - -struct ConnectingTcp<'a> { - preferred: ConnectingTcpRemote, - fallback: Option, - config: &'a Config, -} - -impl<'a> ConnectingTcp<'a> { - fn new(remote_addrs: dns::SocketAddrs, config: &'a Config) -> Self { - if let Some(fallback_timeout) = config.happy_eyeballs_timeout { - let (preferred_addrs, fallback_addrs) = remote_addrs - .split_by_preference(config.local_address_ipv4, config.local_address_ipv6); - if fallback_addrs.is_empty() { - return ConnectingTcp { - preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), - fallback: None, - config, - }; - } - - ConnectingTcp { - preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), - fallback: Some(ConnectingTcpFallback { - delay: tokio::time::sleep(fallback_timeout), - remote: ConnectingTcpRemote::new(fallback_addrs, config.connect_timeout), - }), - config, - } - } else { - ConnectingTcp { - preferred: ConnectingTcpRemote::new(remote_addrs, config.connect_timeout), - fallback: None, - config, - } - } - } -} - -struct ConnectingTcpFallback { - delay: Sleep, - remote: ConnectingTcpRemote, -} - -struct ConnectingTcpRemote { - addrs: dns::SocketAddrs, - connect_timeout: Option, -} - -impl ConnectingTcpRemote { - fn new(addrs: dns::SocketAddrs, connect_timeout: Option) -> Self { - let connect_timeout = connect_timeout.map(|t| t / (addrs.len() as u32)); - - Self { - addrs, - connect_timeout, - } - } -} - -impl ConnectingTcpRemote { - async fn connect(&mut self, config: &Config) -> Result { - let mut err = None; - for addr in &mut self.addrs { - debug!("connecting to {}", addr); - match connect(&addr, config, self.connect_timeout)?.await { - Ok(tcp) => { - debug!("connected to {}", addr); - return Ok(tcp); - } - Err(e) => { - trace!("connect error for {}: {:?}", addr, e); - err = Some(e); - } - } - } - - match err { - Some(e) => Err(e), - None => Err(ConnectError::new( - "tcp connect error", - std::io::Error::new(std::io::ErrorKind::NotConnected, "Network unreachable"), - )), - } - } -} - -fn bind_local_address( - socket: &socket2::Socket, - dst_addr: &SocketAddr, - local_addr_ipv4: &Option, - local_addr_ipv6: &Option, -) -> io::Result<()> { - match (*dst_addr, local_addr_ipv4, local_addr_ipv6) { - (SocketAddr::V4(_), Some(addr), _) => { - socket.bind(&SocketAddr::new(addr.clone().into(), 0).into())?; - } - (SocketAddr::V6(_), _, Some(addr)) => { - socket.bind(&SocketAddr::new(addr.clone().into(), 0).into())?; - } - _ => { - if cfg!(windows) { - // Windows requires a socket be bound before calling connect - let any: SocketAddr = match *dst_addr { - SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(), - SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(), - }; - socket.bind(&any.into())?; - } - } - } - - Ok(()) -} - -fn connect( - addr: &SocketAddr, - config: &Config, - connect_timeout: Option, -) -> Result>, ConnectError> { - // TODO(eliza): if Tokio's `TcpSocket` gains support for setting the - // keepalive timeout, it would be nice to use that instead of socket2, - // and avoid the unsafe `into_raw_fd`/`from_raw_fd` dance... - use socket2::{Domain, Protocol, Socket, TcpKeepalive, Type}; - use std::convert::TryInto; - - let domain = Domain::for_address(*addr); - let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)) - .map_err(ConnectError::m("tcp open error"))?; - - // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is - // responsible for ensuring O_NONBLOCK is set. - socket - .set_nonblocking(true) - .map_err(ConnectError::m("tcp set_nonblocking error"))?; - - if let Some(dur) = config.keep_alive_timeout { - let conf = TcpKeepalive::new().with_time(dur); - if let Err(e) = socket.set_tcp_keepalive(&conf) { - warn!("tcp set_keepalive error: {}", e); - } - } - - bind_local_address( - &socket, - addr, - &config.local_address_ipv4, - &config.local_address_ipv6, - ) - .map_err(ConnectError::m("tcp bind local error"))?; - - #[cfg(unix)] - let socket = unsafe { - // Safety: `from_raw_fd` is only safe to call if ownership of the raw - // file descriptor is transferred. Since we call `into_raw_fd` on the - // socket2 socket, it gives up ownership of the fd and will not close - // it, so this is safe. - use std::os::unix::io::{FromRawFd, IntoRawFd}; - TcpSocket::from_raw_fd(socket.into_raw_fd()) - }; - #[cfg(windows)] - let socket = unsafe { - // Safety: `from_raw_socket` is only safe to call if ownership of the raw - // Windows SOCKET is transferred. Since we call `into_raw_socket` on the - // socket2 socket, it gives up ownership of the SOCKET and will not close - // it, so this is safe. - use std::os::windows::io::{FromRawSocket, IntoRawSocket}; - TcpSocket::from_raw_socket(socket.into_raw_socket()) - }; - - if config.reuse_address { - if let Err(e) = socket.set_reuseaddr(true) { - warn!("tcp set_reuse_address error: {}", e); - } - } - - if let Some(size) = config.send_buffer_size { - if let Err(e) = socket.set_send_buffer_size(size.try_into().unwrap_or(std::u32::MAX)) { - warn!("tcp set_buffer_size error: {}", e); - } - } - - if let Some(size) = config.recv_buffer_size { - if let Err(e) = socket.set_recv_buffer_size(size.try_into().unwrap_or(std::u32::MAX)) { - warn!("tcp set_recv_buffer_size error: {}", e); - } - } - - let connect = socket.connect(*addr); - Ok(async move { - match connect_timeout { - Some(dur) => match tokio::time::timeout(dur, connect).await { - Ok(Ok(s)) => Ok(s), - Ok(Err(e)) => Err(e), - Err(e) => Err(io::Error::new(io::ErrorKind::TimedOut, e)), - }, - None => connect.await, - } - .map_err(ConnectError::m("tcp connect error")) - }) -} - -impl ConnectingTcp<'_> { - async fn connect(mut self) -> Result { - match self.fallback { - None => self.preferred.connect(self.config).await, - Some(mut fallback) => { - let preferred_fut = self.preferred.connect(self.config); - futures_util::pin_mut!(preferred_fut); - - let fallback_fut = fallback.remote.connect(self.config); - futures_util::pin_mut!(fallback_fut); - - let fallback_delay = fallback.delay; - futures_util::pin_mut!(fallback_delay); - - let (result, future) = - match futures_util::future::select(preferred_fut, fallback_delay).await { - Either::Left((result, _fallback_delay)) => { - (result, Either::Right(fallback_fut)) - } - Either::Right(((), preferred_fut)) => { - // Delay is done, start polling both the preferred and the fallback - futures_util::future::select(preferred_fut, fallback_fut) - .await - .factor_first() - } - }; - - if result.is_err() { - // Fallback to the remaining future (could be preferred or fallback) - // if we get an error - future.await - } else { - result - } - } - } - } -} - -#[cfg(test)] -mod tests { - use std::io; - - use ::http::Uri; - - use super::super::sealed::{Connect, ConnectSvc}; - use super::{Config, ConnectError, HttpConnector}; - - async fn connect( - connector: C, - dst: Uri, - ) -> Result<::Connection, ::Error> - where - C: Connect, - { - connector.connect(super::super::sealed::Internal, dst).await - } - - #[tokio::test] - async fn test_errors_enforce_http() { - let dst = "https://example.domain/foo/bar?baz".parse().unwrap(); - let connector = HttpConnector::new(); - - let err = connect(connector, dst).await.unwrap_err(); - assert_eq!(&*err.msg, super::INVALID_NOT_HTTP); - } - - #[cfg(any(target_os = "linux", target_os = "macos"))] - fn get_local_ips() -> (Option, Option) { - use std::net::{IpAddr, TcpListener}; - - let mut ip_v4 = None; - let mut ip_v6 = None; - - let ips = pnet_datalink::interfaces() - .into_iter() - .flat_map(|i| i.ips.into_iter().map(|n| n.ip())); - - for ip in ips { - match ip { - IpAddr::V4(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v4 = Some(ip), - IpAddr::V6(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v6 = Some(ip), - _ => (), - } - - if ip_v4.is_some() && ip_v6.is_some() { - break; - } - } - - (ip_v4, ip_v6) - } - - #[tokio::test] - async fn test_errors_missing_scheme() { - let dst = "example.domain".parse().unwrap(); - let mut connector = HttpConnector::new(); - connector.enforce_http(false); - - let err = connect(connector, dst).await.unwrap_err(); - assert_eq!(&*err.msg, super::INVALID_MISSING_SCHEME); - } - - // NOTE: pnet crate that we use in this test doesn't compile on Windows - #[cfg(any(target_os = "linux", target_os = "macos"))] - #[tokio::test] - async fn local_address() { - use std::net::{IpAddr, TcpListener}; - let _ = pretty_env_logger::try_init(); - - let (bind_ip_v4, bind_ip_v6) = get_local_ips(); - let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); - let port = server4.local_addr().unwrap().port(); - let server6 = TcpListener::bind(&format!("[::1]:{}", port)).unwrap(); - - let assert_client_ip = |dst: String, server: TcpListener, expected_ip: IpAddr| async move { - let mut connector = HttpConnector::new(); - - match (bind_ip_v4, bind_ip_v6) { - (Some(v4), Some(v6)) => connector.set_local_addresses(v4, v6), - (Some(v4), None) => connector.set_local_address(Some(v4.into())), - (None, Some(v6)) => connector.set_local_address(Some(v6.into())), - _ => unreachable!(), - } - - connect(connector, dst.parse().unwrap()).await.unwrap(); - - let (_, client_addr) = server.accept().unwrap(); - - assert_eq!(client_addr.ip(), expected_ip); - }; - - if let Some(ip) = bind_ip_v4 { - assert_client_ip(format!("http://127.0.0.1:{}", port), server4, ip.into()).await; - } - - if let Some(ip) = bind_ip_v6 { - assert_client_ip(format!("http://[::1]:{}", port), server6, ip.into()).await; - } - } - - #[test] - #[cfg_attr(not(feature = "__internal_happy_eyeballs_tests"), ignore)] - fn client_happy_eyeballs() { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, TcpListener}; - use std::time::{Duration, Instant}; - - use super::dns; - use super::ConnectingTcp; - - let _ = pretty_env_logger::try_init(); - let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); - let addr = server4.local_addr().unwrap(); - let _server6 = TcpListener::bind(&format!("[::1]:{}", addr.port())).unwrap(); - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap(); - - let local_timeout = Duration::default(); - let unreachable_v4_timeout = measure_connect(unreachable_ipv4_addr()).1; - let unreachable_v6_timeout = measure_connect(unreachable_ipv6_addr()).1; - let fallback_timeout = std::cmp::max(unreachable_v4_timeout, unreachable_v6_timeout) - + Duration::from_millis(250); - - let scenarios = &[ - // Fast primary, without fallback. - (&[local_ipv4_addr()][..], 4, local_timeout, false), - (&[local_ipv6_addr()][..], 6, local_timeout, false), - // Fast primary, with (unused) fallback. - ( - &[local_ipv4_addr(), local_ipv6_addr()][..], - 4, - local_timeout, - false, - ), - ( - &[local_ipv6_addr(), local_ipv4_addr()][..], - 6, - local_timeout, - false, - ), - // Unreachable + fast primary, without fallback. - ( - &[unreachable_ipv4_addr(), local_ipv4_addr()][..], - 4, - unreachable_v4_timeout, - false, - ), - ( - &[unreachable_ipv6_addr(), local_ipv6_addr()][..], - 6, - unreachable_v6_timeout, - false, - ), - // Unreachable + fast primary, with (unused) fallback. - ( - &[ - unreachable_ipv4_addr(), - local_ipv4_addr(), - local_ipv6_addr(), - ][..], - 4, - unreachable_v4_timeout, - false, - ), - ( - &[ - unreachable_ipv6_addr(), - local_ipv6_addr(), - local_ipv4_addr(), - ][..], - 6, - unreachable_v6_timeout, - true, - ), - // Slow primary, with (used) fallback. - ( - &[slow_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..], - 6, - fallback_timeout, - false, - ), - ( - &[slow_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..], - 4, - fallback_timeout, - true, - ), - // Slow primary, with (used) unreachable + fast fallback. - ( - &[slow_ipv4_addr(), unreachable_ipv6_addr(), local_ipv6_addr()][..], - 6, - fallback_timeout + unreachable_v6_timeout, - false, - ), - ( - &[slow_ipv6_addr(), unreachable_ipv4_addr(), local_ipv4_addr()][..], - 4, - fallback_timeout + unreachable_v4_timeout, - true, - ), - ]; - - // Scenarios for IPv6 -> IPv4 fallback require that host can access IPv6 network. - // Otherwise, connection to "slow" IPv6 address will error-out immediately. - let ipv6_accessible = measure_connect(slow_ipv6_addr()).0; - - for &(hosts, family, timeout, needs_ipv6_access) in scenarios { - if needs_ipv6_access && !ipv6_accessible { - continue; - } - - let (start, stream) = rt - .block_on(async move { - let addrs = hosts - .iter() - .map(|host| (host.clone(), addr.port()).into()) - .collect(); - let cfg = Config { - local_address_ipv4: None, - local_address_ipv6: None, - connect_timeout: None, - keep_alive_timeout: None, - happy_eyeballs_timeout: Some(fallback_timeout), - nodelay: false, - reuse_address: false, - enforce_http: false, - send_buffer_size: None, - recv_buffer_size: None, - }; - let connecting_tcp = ConnectingTcp::new(dns::SocketAddrs::new(addrs), &cfg); - let start = Instant::now(); - Ok::<_, ConnectError>((start, ConnectingTcp::connect(connecting_tcp).await?)) - }) - .unwrap(); - let res = if stream.peer_addr().unwrap().is_ipv4() { - 4 - } else { - 6 - }; - let duration = start.elapsed(); - - // Allow actual duration to be +/- 150ms off. - let min_duration = if timeout >= Duration::from_millis(150) { - timeout - Duration::from_millis(150) - } else { - Duration::default() - }; - let max_duration = timeout + Duration::from_millis(150); - - assert_eq!(res, family); - assert!(duration >= min_duration); - assert!(duration <= max_duration); - } - - fn local_ipv4_addr() -> IpAddr { - Ipv4Addr::new(127, 0, 0, 1).into() - } - - fn local_ipv6_addr() -> IpAddr { - Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into() - } - - fn unreachable_ipv4_addr() -> IpAddr { - Ipv4Addr::new(127, 0, 0, 2).into() - } - - fn unreachable_ipv6_addr() -> IpAddr { - Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 2).into() - } - - fn slow_ipv4_addr() -> IpAddr { - // RFC 6890 reserved IPv4 address. - Ipv4Addr::new(198, 18, 0, 25).into() - } - - fn slow_ipv6_addr() -> IpAddr { - // RFC 6890 reserved IPv6 address. - Ipv6Addr::new(2001, 2, 0, 0, 0, 0, 0, 254).into() - } - - fn measure_connect(addr: IpAddr) -> (bool, Duration) { - let start = Instant::now(); - let result = - std::net::TcpStream::connect_timeout(&(addr, 80).into(), Duration::from_secs(1)); - - let reachable = result.is_ok() || result.unwrap_err().kind() == io::ErrorKind::TimedOut; - let duration = start.elapsed(); - (reachable, duration) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/connect/mod.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/connect/mod.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/connect/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/connect/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,633 +0,0 @@ -//! Connectors used by the `Client`. -//! -//! This module contains: -//! -//! - A default [`HttpConnector`][] that does DNS resolution and establishes -//! connections over TCP. -//! - Types to build custom connectors. -//! -//! # Connectors -//! -//! A "connector" is a [`Service`][] that takes a [`Uri`][] destination, and -//! its `Response` is some type implementing [`AsyncRead`][], [`AsyncWrite`][], -//! and [`Connection`][]. -//! -//! ## Custom Connectors -//! -//! A simple connector that ignores the `Uri` destination and always returns -//! a TCP connection to the same address could be written like this: -//! -//! ```rust,ignore -//! let connector = tower::service_fn(|_dst| async { -//! tokio::net::TcpStream::connect("127.0.0.1:1337") -//! }) -//! ``` -//! -//! Or, fully written out: -//! -//! ``` -//! # #[cfg(feature = "runtime")] -//! # mod rt { -//! use std::{future::Future, net::SocketAddr, pin::Pin, task::{self, Poll}}; -//! use hyper::{service::Service, Uri}; -//! use tokio::net::TcpStream; -//! -//! #[derive(Clone)] -//! struct LocalConnector; -//! -//! impl Service for LocalConnector { -//! type Response = TcpStream; -//! type Error = std::io::Error; -//! // We can't "name" an `async` generated future. -//! type Future = Pin> + Send -//! >>; -//! -//! fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { -//! // This connector is always ready, but others might not be. -//! Poll::Ready(Ok(())) -//! } -//! -//! fn call(&mut self, _: Uri) -> Self::Future { -//! Box::pin(TcpStream::connect(SocketAddr::from(([127, 0, 0, 1], 1337)))) -//! } -//! } -//! # } -//! ``` -//! -//! It's worth noting that for `TcpStream`s, the [`HttpConnector`][] is a -//! better starting place to extend from. -//! -//! Using either of the above connector examples, it can be used with the -//! `Client` like this: -//! -//! ``` -//! # #[cfg(feature = "runtime")] -//! # fn rt () { -//! # let connector = hyper::client::HttpConnector::new(); -//! // let connector = ... -//! -//! let client = hyper::Client::builder() -//! .build::<_, hyper::Body>(connector); -//! # } -//! ``` -//! -//! -//! [`HttpConnector`]: HttpConnector -//! [`Service`]: crate::service::Service -//! [`Uri`]: ::http::Uri -//! [`AsyncRead`]: tokio::io::AsyncRead -//! [`AsyncWrite`]: tokio::io::AsyncWrite -//! [`Connection`]: Connection -use std::fmt; -use std::fmt::{Debug, Formatter}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::ops::Deref; -use std::sync::Arc; - -use ::http::Extensions; -use tokio::sync::watch; - -cfg_feature! { - #![feature = "tcp"] - - pub use self::http::{HttpConnector, HttpInfo}; - - pub mod dns; - mod http; -} - -cfg_feature! { - #![any(feature = "http1", feature = "http2")] - - pub use self::sealed::Connect; -} - -/// Describes a type returned by a connector. -pub trait Connection { - /// Return metadata describing the connection. - fn connected(&self) -> Connected; -} - -/// Extra information about the connected transport. -/// -/// This can be used to inform recipients about things like if ALPN -/// was used, or if connected to an HTTP proxy. -#[derive(Debug)] -pub struct Connected { - pub(super) alpn: Alpn, - pub(super) is_proxied: bool, - pub(super) extra: Option, - pub(super) poisoned: PoisonPill, -} - -#[derive(Clone)] -pub(crate) struct PoisonPill { - poisoned: Arc, -} - -impl Debug for PoisonPill { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - // print the address of the pill—this makes debugging issues much easier - write!(f, "PoisonPill@{:p} {{ poisoned: {} }}", self.poisoned, self.poisoned.load(Ordering::Relaxed)) - } -} - -impl PoisonPill { - pub(crate) fn healthy() -> Self { - Self { - poisoned: Arc::new(AtomicBool::new(false)), - } - } - pub(crate) fn poison(&self) { - self.poisoned.store(true, Ordering::Relaxed) - } - - pub(crate) fn poisoned(&self) -> bool { - self.poisoned.load(Ordering::Relaxed) - } -} - -/// [`CaptureConnection`] allows callers to capture [`Connected`] information -/// -/// To capture a connection for a request, use [`capture_connection`]. -#[derive(Debug, Clone)] -pub struct CaptureConnection { - rx: watch::Receiver>, -} - -/// Capture the connection for a given request -/// -/// When making a request with Hyper, the underlying connection must implement the [`Connection`] trait. -/// [`capture_connection`] allows a caller to capture the returned [`Connected`] structure as soon -/// as the connection is established. -/// -/// *Note*: If establishing a connection fails, [`CaptureConnection::connection_metadata`] will always return none. -/// -/// # Examples -/// -/// **Synchronous access**: -/// The [`CaptureConnection::connection_metadata`] method allows callers to check if a connection has been -/// established. This is ideal for situations where you are certain the connection has already -/// been established (e.g. after the response future has already completed). -/// ```rust -/// use hyper::client::connect::{capture_connection, CaptureConnection}; -/// let mut request = http::Request::builder() -/// .uri("http://foo.com") -/// .body(()) -/// .unwrap(); -/// -/// let captured_connection = capture_connection(&mut request); -/// // some time later after the request has been sent... -/// let connection_info = captured_connection.connection_metadata(); -/// println!("we are connected! {:?}", connection_info.as_ref()); -/// ``` -/// -/// **Asynchronous access**: -/// The [`CaptureConnection::wait_for_connection_metadata`] method returns a future resolves as soon as the -/// connection is available. -/// -/// ```rust -/// # #[cfg(feature = "runtime")] -/// # async fn example() { -/// use hyper::client::connect::{capture_connection, CaptureConnection}; -/// let mut request = http::Request::builder() -/// .uri("http://foo.com") -/// .body(hyper::Body::empty()) -/// .unwrap(); -/// -/// let mut captured = capture_connection(&mut request); -/// tokio::task::spawn(async move { -/// let connection_info = captured.wait_for_connection_metadata().await; -/// println!("we are connected! {:?}", connection_info.as_ref()); -/// }); -/// -/// let client = hyper::Client::new(); -/// client.request(request).await.expect("request failed"); -/// # } -/// ``` -pub fn capture_connection(request: &mut crate::http::Request) -> CaptureConnection { - let (tx, rx) = CaptureConnection::new(); - request.extensions_mut().insert(tx); - rx -} - -/// TxSide for [`CaptureConnection`] -/// -/// This is inserted into `Extensions` to allow Hyper to back channel connection info -#[derive(Clone)] -pub(crate) struct CaptureConnectionExtension { - tx: Arc>>, -} - -impl CaptureConnectionExtension { - pub(crate) fn set(&self, connected: &Connected) { - self.tx.send_replace(Some(connected.clone())); - } -} - -impl CaptureConnection { - /// Internal API to create the tx and rx half of [`CaptureConnection`] - pub(crate) fn new() -> (CaptureConnectionExtension, Self) { - let (tx, rx) = watch::channel(None); - ( - CaptureConnectionExtension { tx: Arc::new(tx) }, - CaptureConnection { rx }, - ) - } - - /// Retrieve the connection metadata, if available - pub fn connection_metadata(&self) -> impl Deref> + '_ { - self.rx.borrow() - } - - /// Wait for the connection to be established - /// - /// If a connection was established, this will always return `Some(...)`. If the request never - /// successfully connected (e.g. DNS resolution failure), this method will never return. - pub async fn wait_for_connection_metadata( - &mut self, - ) -> impl Deref> + '_ { - if self.rx.borrow().is_some() { - return self.rx.borrow(); - } - let _ = self.rx.changed().await; - self.rx.borrow() - } -} - -pub(super) struct Extra(Box); - -#[derive(Clone, Copy, Debug, PartialEq)] -pub(super) enum Alpn { - H2, - None, -} - -impl Connected { - /// Create new `Connected` type with empty metadata. - pub fn new() -> Connected { - Connected { - alpn: Alpn::None, - is_proxied: false, - extra: None, - poisoned: PoisonPill::healthy(), - } - } - - /// Set whether the connected transport is to an HTTP proxy. - /// - /// This setting will affect if HTTP/1 requests written on the transport - /// will have the request-target in absolute-form or origin-form: - /// - /// - When `proxy(false)`: - /// - /// ```http - /// GET /guide HTTP/1.1 - /// ``` - /// - /// - When `proxy(true)`: - /// - /// ```http - /// GET http://hyper.rs/guide HTTP/1.1 - /// ``` - /// - /// Default is `false`. - pub fn proxy(mut self, is_proxied: bool) -> Connected { - self.is_proxied = is_proxied; - self - } - - /// Determines if the connected transport is to an HTTP proxy. - pub fn is_proxied(&self) -> bool { - self.is_proxied - } - - /// Set extra connection information to be set in the extensions of every `Response`. - pub fn extra(mut self, extra: T) -> Connected { - if let Some(prev) = self.extra { - self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra)))); - } else { - self.extra = Some(Extra(Box::new(ExtraEnvelope(extra)))); - } - self - } - - /// Copies the extra connection information into an `Extensions` map. - pub fn get_extras(&self, extensions: &mut Extensions) { - if let Some(extra) = &self.extra { - extra.set(extensions); - } - } - - /// Set that the connected transport negotiated HTTP/2 as its next protocol. - pub fn negotiated_h2(mut self) -> Connected { - self.alpn = Alpn::H2; - self - } - - /// Determines if the connected transport negotiated HTTP/2 as its next protocol. - pub fn is_negotiated_h2(&self) -> bool { - self.alpn == Alpn::H2 - } - - /// Poison this connection - /// - /// A poisoned connection will not be reused for subsequent requests by the pool - pub fn poison(&self) { - self.poisoned.poison(); - tracing::debug!( - poison_pill = ?self.poisoned, "connection was poisoned" - ); - } - - // Don't public expose that `Connected` is `Clone`, unsure if we want to - // keep that contract... - pub(super) fn clone(&self) -> Connected { - Connected { - alpn: self.alpn.clone(), - is_proxied: self.is_proxied, - extra: self.extra.clone(), - poisoned: self.poisoned.clone(), - } - } -} - -// ===== impl Extra ===== - -impl Extra { - pub(super) fn set(&self, res: &mut Extensions) { - self.0.set(res); - } -} - -impl Clone for Extra { - fn clone(&self) -> Extra { - Extra(self.0.clone_box()) - } -} - -impl fmt::Debug for Extra { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Extra").finish() - } -} - -trait ExtraInner: Send + Sync { - fn clone_box(&self) -> Box; - fn set(&self, res: &mut Extensions); -} - -// This indirection allows the `Connected` to have a type-erased "extra" value, -// while that type still knows its inner extra type. This allows the correct -// TypeId to be used when inserting into `res.extensions_mut()`. -#[derive(Clone)] -struct ExtraEnvelope(T); - -impl ExtraInner for ExtraEnvelope -where - T: Clone + Send + Sync + 'static, -{ - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } - - fn set(&self, res: &mut Extensions) { - res.insert(self.0.clone()); - } -} - -struct ExtraChain(Box, T); - -impl Clone for ExtraChain { - fn clone(&self) -> Self { - ExtraChain(self.0.clone_box(), self.1.clone()) - } -} - -impl ExtraInner for ExtraChain -where - T: Clone + Send + Sync + 'static, -{ - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } - - fn set(&self, res: &mut Extensions) { - self.0.set(res); - res.insert(self.1.clone()); - } -} - -#[cfg(any(feature = "http1", feature = "http2"))] -pub(super) mod sealed { - use std::error::Error as StdError; - - use ::http::Uri; - use tokio::io::{AsyncRead, AsyncWrite}; - - use super::Connection; - use crate::common::{Future, Unpin}; - - /// Connect to a destination, returning an IO transport. - /// - /// A connector receives a [`Uri`](::http::Uri) and returns a `Future` of the - /// ready connection. - /// - /// # Trait Alias - /// - /// This is really just an *alias* for the `tower::Service` trait, with - /// additional bounds set for convenience *inside* hyper. You don't actually - /// implement this trait, but `tower::Service` instead. - // The `Sized` bound is to prevent creating `dyn Connect`, since they cannot - // fit the `Connect` bounds because of the blanket impl for `Service`. - pub trait Connect: Sealed + Sized { - #[doc(hidden)] - type _Svc: ConnectSvc; - #[doc(hidden)] - fn connect(self, internal_only: Internal, dst: Uri) -> ::Future; - } - - pub trait ConnectSvc { - type Connection: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static; - type Error: Into>; - type Future: Future> + Unpin + Send + 'static; - - fn connect(self, internal_only: Internal, dst: Uri) -> Self::Future; - } - - impl Connect for S - where - S: tower_service::Service + Send + 'static, - S::Error: Into>, - S::Future: Unpin + Send, - T: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static, - { - type _Svc = S; - - fn connect(self, _: Internal, dst: Uri) -> crate::service::Oneshot { - crate::service::oneshot(self, dst) - } - } - - impl ConnectSvc for S - where - S: tower_service::Service + Send + 'static, - S::Error: Into>, - S::Future: Unpin + Send, - T: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static, - { - type Connection = T; - type Error = S::Error; - type Future = crate::service::Oneshot; - - fn connect(self, _: Internal, dst: Uri) -> Self::Future { - crate::service::oneshot(self, dst) - } - } - - impl Sealed for S - where - S: tower_service::Service + Send, - S::Error: Into>, - S::Future: Unpin + Send, - T: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static, - { - } - - pub trait Sealed {} - #[allow(missing_debug_implementations)] - pub struct Internal; -} - -#[cfg(test)] -mod tests { - use super::Connected; - use crate::client::connect::CaptureConnection; - - #[derive(Clone, Debug, PartialEq)] - struct Ex1(usize); - - #[derive(Clone, Debug, PartialEq)] - struct Ex2(&'static str); - - #[derive(Clone, Debug, PartialEq)] - struct Ex3(&'static str); - - #[test] - fn test_connected_extra() { - let c1 = Connected::new().extra(Ex1(41)); - - let mut ex = ::http::Extensions::new(); - - assert_eq!(ex.get::(), None); - - c1.extra.as_ref().expect("c1 extra").set(&mut ex); - - assert_eq!(ex.get::(), Some(&Ex1(41))); - } - - #[test] - fn test_connected_extra_chain() { - // If a user composes connectors and at each stage, there's "extra" - // info to attach, it shouldn't override the previous extras. - - let c1 = Connected::new() - .extra(Ex1(45)) - .extra(Ex2("zoom")) - .extra(Ex3("pew pew")); - - let mut ex1 = ::http::Extensions::new(); - - assert_eq!(ex1.get::(), None); - assert_eq!(ex1.get::(), None); - assert_eq!(ex1.get::(), None); - - c1.extra.as_ref().expect("c1 extra").set(&mut ex1); - - assert_eq!(ex1.get::(), Some(&Ex1(45))); - assert_eq!(ex1.get::(), Some(&Ex2("zoom"))); - assert_eq!(ex1.get::(), Some(&Ex3("pew pew"))); - - // Just like extensions, inserting the same type overrides previous type. - let c2 = Connected::new() - .extra(Ex1(33)) - .extra(Ex2("hiccup")) - .extra(Ex1(99)); - - let mut ex2 = ::http::Extensions::new(); - - c2.extra.as_ref().expect("c2 extra").set(&mut ex2); - - assert_eq!(ex2.get::(), Some(&Ex1(99))); - assert_eq!(ex2.get::(), Some(&Ex2("hiccup"))); - } - - #[test] - fn test_sync_capture_connection() { - let (tx, rx) = CaptureConnection::new(); - assert!( - rx.connection_metadata().is_none(), - "connection has not been set" - ); - tx.set(&Connected::new().proxy(true)); - assert_eq!( - rx.connection_metadata() - .as_ref() - .expect("connected should be set") - .is_proxied(), - true - ); - - // ensure it can be called multiple times - assert_eq!( - rx.connection_metadata() - .as_ref() - .expect("connected should be set") - .is_proxied(), - true - ); - } - - #[tokio::test] - async fn async_capture_connection() { - let (tx, mut rx) = CaptureConnection::new(); - assert!( - rx.connection_metadata().is_none(), - "connection has not been set" - ); - let test_task = tokio::spawn(async move { - assert_eq!( - rx.wait_for_connection_metadata() - .await - .as_ref() - .expect("connection should be set") - .is_proxied(), - true - ); - // can be awaited multiple times - assert!( - rx.wait_for_connection_metadata().await.is_some(), - "should be awaitable multiple times" - ); - - assert_eq!(rx.connection_metadata().is_some(), true); - }); - // can't be finished, we haven't set the connection yet - assert_eq!(test_task.is_finished(), false); - tx.set(&Connected::new().proxy(true)); - - assert!(test_task.await.is_ok()); - } - - #[tokio::test] - async fn capture_connection_sender_side_dropped() { - let (tx, mut rx) = CaptureConnection::new(); - assert!( - rx.connection_metadata().is_none(), - "connection has not been set" - ); - drop(tx); - assert!(rx.wait_for_connection_metadata().await.is_none()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/conn.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/conn.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/conn.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/conn.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1156 +0,0 @@ -//! Lower-level client connection API. -//! -//! The types in this module are to provide a lower-level API based around a -//! single connection. Connecting to a host, pooling connections, and the like -//! are not handled at this level. This module provides the building blocks to -//! customize those things externally. -//! -//! If don't have need to manage connections yourself, consider using the -//! higher-level [Client](super) API. -//! -//! ## Example -//! A simple example that uses the `SendRequest` struct to talk HTTP over a Tokio TCP stream -//! ```no_run -//! # #[cfg(all(feature = "client", feature = "http1", feature = "runtime"))] -//! # mod rt { -//! use tower::ServiceExt; -//! use http::{Request, StatusCode}; -//! use hyper::{client::conn, Body}; -//! use tokio::net::TcpStream; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let target_stream = TcpStream::connect("example.com:80").await?; -//! -//! let (mut request_sender, connection) = conn::handshake(target_stream).await?; -//! -//! // spawn a task to poll the connection and drive the HTTP state -//! tokio::spawn(async move { -//! if let Err(e) = connection.await { -//! eprintln!("Error in connection: {}", e); -//! } -//! }); -//! -//! let request = Request::builder() -//! // We need to manually add the host header because SendRequest does not -//! .header("Host", "example.com") -//! .method("GET") -//! .body(Body::from(""))?; -//! let response = request_sender.send_request(request).await?; -//! assert!(response.status() == StatusCode::OK); -//! -//! // To send via the same connection again, it may not work as it may not be ready, -//! // so we have to wait until the request_sender becomes ready. -//! request_sender.ready().await?; -//! let request = Request::builder() -//! .header("Host", "example.com") -//! .method("GET") -//! .body(Body::from(""))?; -//! let response = request_sender.send_request(request).await?; -//! assert!(response.status() == StatusCode::OK); -//! Ok(()) -//! } -//! -//! # } -//! ``` - -#[cfg(all(feature = "backports", feature = "http1"))] -pub mod http1; -#[cfg(all(feature = "backports", feature = "http2"))] -pub mod http2; - -use std::error::Error as StdError; -use std::fmt; -#[cfg(not(all(feature = "http1", feature = "http2")))] -use std::marker::PhantomData; -use std::sync::Arc; -#[cfg(all(feature = "runtime", feature = "http2"))] -use std::time::Duration; - -use bytes::Bytes; -use futures_util::future::{self, Either, FutureExt as _}; -use httparse::ParserConfig; -use pin_project_lite::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; -use tower_service::Service; -use tracing::{debug, trace}; - -use super::dispatch; -use crate::body::HttpBody; -#[cfg(not(all(feature = "http1", feature = "http2")))] -use crate::common::Never; -use crate::common::{ - exec::{BoxSendFuture, Exec}, - task, Future, Pin, Poll, -}; -use crate::proto; -use crate::rt::Executor; -#[cfg(feature = "http1")] -use crate::upgrade::Upgraded; -use crate::{Body, Request, Response}; - -#[cfg(feature = "http1")] -type Http1Dispatcher = - proto::dispatch::Dispatcher, B, T, proto::h1::ClientTransaction>; - -#[cfg(not(feature = "http1"))] -type Http1Dispatcher = (Never, PhantomData<(T, Pin>)>); - -#[cfg(feature = "http2")] -type Http2ClientTask = proto::h2::ClientTask; - -#[cfg(not(feature = "http2"))] -type Http2ClientTask = (Never, PhantomData>>); - -pin_project! { - #[project = ProtoClientProj] - enum ProtoClient - where - B: HttpBody, - { - H1 { - #[pin] - h1: Http1Dispatcher, - }, - H2 { - #[pin] - h2: Http2ClientTask, - }, - } -} - -/// Returns a handshake future over some IO. -/// -/// This is a shortcut for `Builder::new().handshake(io)`. -/// See [`client::conn`](crate::client::conn) for more. -#[cfg_attr( - feature = "deprecated", - deprecated( - note = "This function will be replaced with `client::conn::http1::handshake` and `client::conn::http2::handshake` in 1.0, enable the \"backports\" feature to use them now." - ) -)] -#[cfg_attr(feature = "deprecated", allow(deprecated))] -pub async fn handshake( - io: T, -) -> crate::Result<(SendRequest, Connection)> -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, -{ - #[allow(deprecated)] - Builder::new().handshake(io).await -} - -/// The sender side of an established connection. -#[cfg_attr( - feature = "deprecated", - deprecated( - note = "This type will be replaced with `client::conn::http1::SendRequest` and `client::conn::http2::SendRequest` in 1.0, enable the \"backports\" feature to use them now." - ) -)] -pub struct SendRequest { - dispatch: dispatch::Sender, Response>, -} - -/// A future that processes all HTTP state for the IO object. -/// -/// In most cases, this should just be spawned into an executor, so that it -/// can process incoming and outgoing messages, notice hangups, and the like. -#[must_use = "futures do nothing unless polled"] -#[cfg_attr( - feature = "deprecated", - deprecated( - note = "This type will be replaced with `client::conn::http1::Connection` and `client::conn::http2::Connection` in 1.0, enable the \"backports\" feature to use them now." - ) -)] -pub struct Connection -where - T: AsyncRead + AsyncWrite + Send + 'static, - B: HttpBody + 'static, -{ - inner: Option>, -} - -/// A builder to configure an HTTP connection. -/// -/// After setting options, the builder is used to create a handshake future. -#[derive(Clone, Debug)] -#[cfg_attr( - feature = "deprecated", - deprecated( - note = "This type will be replaced with `client::conn::http1::Builder` and `client::conn::http2::Builder` in 1.0, enable the \"backports\" feature to use them now." - ) -)] -pub struct Builder { - pub(super) exec: Exec, - h09_responses: bool, - h1_parser_config: ParserConfig, - h1_writev: Option, - h1_title_case_headers: bool, - h1_preserve_header_case: bool, - #[cfg(feature = "ffi")] - h1_preserve_header_order: bool, - h1_read_buf_exact_size: Option, - h1_max_buf_size: Option, - #[cfg(feature = "ffi")] - h1_headers_raw: bool, - #[cfg(feature = "http2")] - h2_builder: proto::h2::client::Config, - version: Proto, -} - -#[derive(Clone, Debug)] -enum Proto { - #[cfg(feature = "http1")] - Http1, - #[cfg(feature = "http2")] - Http2, -} - -/// A future returned by `SendRequest::send_request`. -/// -/// Yields a `Response` if successful. -#[must_use = "futures do nothing unless polled"] -pub struct ResponseFuture { - inner: ResponseFutureState, -} - -enum ResponseFutureState { - Waiting(dispatch::Promise>), - // Option is to be able to `take()` it in `poll` - Error(Option), -} - -/// Deconstructed parts of a `Connection`. -/// -/// This allows taking apart a `Connection` at a later time, in order to -/// reclaim the IO object, and additional related pieces. -#[derive(Debug)] -pub struct Parts { - /// The original IO object used in the handshake. - pub io: T, - /// A buffer of bytes that have been read but not processed as HTTP. - /// - /// For instance, if the `Connection` is used for an HTTP upgrade request, - /// it is possible the server sent back the first bytes of the new protocol - /// along with the response upgrade. - /// - /// You will want to check for any existing bytes if you plan to continue - /// communicating on the IO object. - pub read_buf: Bytes, - _inner: (), -} - -// ========== internal client api - -// A `SendRequest` that can be cloned to send HTTP2 requests. -// private for now, probably not a great idea of a type... -#[must_use = "futures do nothing unless polled"] -#[cfg(feature = "http2")] -pub(super) struct Http2SendRequest { - dispatch: dispatch::UnboundedSender, Response>, -} - -// ===== impl SendRequest - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -impl SendRequest { - /// Polls to determine whether this sender can be used yet for a request. - /// - /// If the associated connection is closed, this returns an Error. - pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - self.dispatch.poll_ready(cx) - } - - pub(super) async fn when_ready(self) -> crate::Result { - let mut me = Some(self); - future::poll_fn(move |cx| { - ready!(me.as_mut().unwrap().poll_ready(cx))?; - Poll::Ready(Ok(me.take().unwrap())) - }) - .await - } - - pub(super) fn is_ready(&self) -> bool { - self.dispatch.is_ready() - } - - pub(super) fn is_closed(&self) -> bool { - self.dispatch.is_closed() - } - - #[cfg(feature = "http2")] - pub(super) fn into_http2(self) -> Http2SendRequest { - Http2SendRequest { - dispatch: self.dispatch.unbound(), - } - } -} - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -impl SendRequest -where - B: HttpBody + 'static, -{ - /// Sends a `Request` on the associated connection. - /// - /// Returns a future that if successful, yields the `Response`. - /// - /// # Note - /// - /// There are some key differences in what automatic things the `Client` - /// does for you that will not be done here: - /// - /// - `Client` requires absolute-form `Uri`s, since the scheme and - /// authority are needed to connect. They aren't required here. - /// - Since the `Client` requires absolute-form `Uri`s, it can add - /// the `Host` header based on it. You must add a `Host` header yourself - /// before calling this method. - /// - Since absolute-form `Uri`s are not required, if received, they will - /// be serialized as-is. - /// - /// # Example - /// - /// ``` - /// # use http::header::HOST; - /// # use hyper::client::conn::SendRequest; - /// # use hyper::Body; - /// use hyper::Request; - /// - /// # async fn doc(mut tx: SendRequest) -> hyper::Result<()> { - /// // build a Request - /// let req = Request::builder() - /// .uri("/foo/bar") - /// .header(HOST, "hyper.rs") - /// .body(Body::empty()) - /// .unwrap(); - /// - /// // send it and await a Response - /// let res = tx.send_request(req).await?; - /// // assert the Response - /// assert!(res.status().is_success()); - /// # Ok(()) - /// # } - /// # fn main() {} - /// ``` - pub fn send_request(&mut self, req: Request) -> ResponseFuture { - let inner = match self.dispatch.send(req) { - Ok(rx) => ResponseFutureState::Waiting(rx), - Err(_req) => { - debug!("connection was not ready"); - let err = crate::Error::new_canceled().with("connection was not ready"); - ResponseFutureState::Error(Some(err)) - } - }; - - ResponseFuture { inner } - } - - pub(super) fn send_request_retryable( - &mut self, - req: Request, - ) -> impl Future, (crate::Error, Option>)>> + Unpin - where - B: Send, - { - match self.dispatch.try_send(req) { - Ok(rx) => { - Either::Left(rx.then(move |res| { - match res { - Ok(Ok(res)) => future::ok(res), - Ok(Err(err)) => future::err(err), - // this is definite bug if it happens, but it shouldn't happen! - Err(_) => panic!("dispatch dropped without returning error"), - } - })) - } - Err(req) => { - debug!("connection was not ready"); - let err = crate::Error::new_canceled().with("connection was not ready"); - Either::Right(future::err((err, Some(req)))) - } - } - } -} - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -impl Service> for SendRequest -where - B: HttpBody + 'static, -{ - type Response = Response; - type Error = crate::Error; - type Future = ResponseFuture; - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - self.poll_ready(cx) - } - - fn call(&mut self, req: Request) -> Self::Future { - self.send_request(req) - } -} - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -impl fmt::Debug for SendRequest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SendRequest").finish() - } -} - -// ===== impl Http2SendRequest - -#[cfg(feature = "http2")] -impl Http2SendRequest { - pub(super) fn is_ready(&self) -> bool { - self.dispatch.is_ready() - } - - pub(super) fn is_closed(&self) -> bool { - self.dispatch.is_closed() - } -} - -#[cfg(feature = "http2")] -impl Http2SendRequest -where - B: HttpBody + 'static, -{ - pub(super) fn send_request_retryable( - &mut self, - req: Request, - ) -> impl Future, (crate::Error, Option>)>> - where - B: Send, - { - match self.dispatch.try_send(req) { - Ok(rx) => { - Either::Left(rx.then(move |res| { - match res { - Ok(Ok(res)) => future::ok(res), - Ok(Err(err)) => future::err(err), - // this is definite bug if it happens, but it shouldn't happen! - Err(_) => panic!("dispatch dropped without returning error"), - } - })) - } - Err(req) => { - debug!("connection was not ready"); - let err = crate::Error::new_canceled().with("connection was not ready"); - Either::Right(future::err((err, Some(req)))) - } - } - } -} - -#[cfg(feature = "http2")] -impl fmt::Debug for Http2SendRequest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Http2SendRequest").finish() - } -} - -#[cfg(feature = "http2")] -impl Clone for Http2SendRequest { - fn clone(&self) -> Self { - Http2SendRequest { - dispatch: self.dispatch.clone(), - } - } -} - -// ===== impl Connection - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -impl Connection -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: HttpBody + Unpin + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - /// Return the inner IO object, and additional information. - /// - /// Only works for HTTP/1 connections. HTTP/2 connections will panic. - pub fn into_parts(self) -> Parts { - match self.inner.expect("already upgraded") { - #[cfg(feature = "http1")] - ProtoClient::H1 { h1 } => { - let (io, read_buf, _) = h1.into_inner(); - Parts { - io, - read_buf, - _inner: (), - } - } - ProtoClient::H2 { .. } => { - panic!("http2 cannot into_inner"); - } - - #[cfg(not(feature = "http1"))] - ProtoClient::H1 { h1 } => match h1.0 {}, - } - } - - /// Poll the connection for completion, but without calling `shutdown` - /// on the underlying IO. - /// - /// This is useful to allow running a connection while doing an HTTP - /// upgrade. Once the upgrade is completed, the connection would be "done", - /// but it is not desired to actually shutdown the IO object. Instead you - /// would take it back using `into_parts`. - /// - /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html) - /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html) - /// to work with this function; or use the `without_shutdown` wrapper. - pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { - match *self.inner.as_mut().expect("already upgraded") { - #[cfg(feature = "http1")] - ProtoClient::H1 { ref mut h1 } => h1.poll_without_shutdown(cx), - #[cfg(feature = "http2")] - ProtoClient::H2 { ref mut h2, .. } => Pin::new(h2).poll(cx).map_ok(|_| ()), - - #[cfg(not(feature = "http1"))] - ProtoClient::H1 { ref mut h1 } => match h1.0 {}, - #[cfg(not(feature = "http2"))] - ProtoClient::H2 { ref mut h2, .. } => match h2.0 {}, - } - } - - /// Prevent shutdown of the underlying IO object at the end of service the request, - /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. - pub fn without_shutdown(self) -> impl Future>> { - let mut conn = Some(self); - future::poll_fn(move |cx| -> Poll>> { - ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; - Poll::Ready(Ok(conn.take().unwrap().into_parts())) - }) - } - - /// Returns whether the [extended CONNECT protocol][1] is enabled or not. - /// - /// This setting is configured by the server peer by sending the - /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame. - /// This method returns the currently acknowledged value received from the - /// remote. - /// - /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 - /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3 - #[cfg(feature = "http2")] - pub fn http2_is_extended_connect_protocol_enabled(&self) -> bool { - match self.inner.as_ref().unwrap() { - ProtoClient::H1 { .. } => false, - ProtoClient::H2 { h2 } => h2.is_extended_connect_protocol_enabled(), - } - } -} - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -impl Future for Connection -where - T: AsyncRead + AsyncWrite + Unpin + Send, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - type Output = crate::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? { - proto::Dispatched::Shutdown => Poll::Ready(Ok(())), - #[cfg(feature = "http1")] - proto::Dispatched::Upgrade(pending) => match self.inner.take() { - Some(ProtoClient::H1 { h1 }) => { - let (io, buf, _) = h1.into_inner(); - pending.fulfill(Upgraded::new(io, buf)); - Poll::Ready(Ok(())) - } - _ => { - drop(pending); - unreachable!("Upgrade expects h1"); - } - }, - } - } -} - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -impl fmt::Debug for Connection -where - T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, - B: HttpBody + 'static, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Connection").finish() - } -} - -// ===== impl Builder - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -impl Builder { - /// Creates a new connection builder. - #[inline] - pub fn new() -> Builder { - Builder { - exec: Exec::Default, - h09_responses: false, - h1_writev: None, - h1_read_buf_exact_size: None, - h1_parser_config: Default::default(), - h1_title_case_headers: false, - h1_preserve_header_case: false, - #[cfg(feature = "ffi")] - h1_preserve_header_order: false, - h1_max_buf_size: None, - #[cfg(feature = "ffi")] - h1_headers_raw: false, - #[cfg(feature = "http2")] - h2_builder: Default::default(), - #[cfg(feature = "http1")] - version: Proto::Http1, - #[cfg(not(feature = "http1"))] - version: Proto::Http2, - } - } - - /// Provide an executor to execute background HTTP2 tasks. - pub fn executor(&mut self, exec: E) -> &mut Builder - where - E: Executor + Send + Sync + 'static, - { - self.exec = Exec::Executor(Arc::new(exec)); - self - } - - /// Set whether HTTP/0.9 responses should be tolerated. - /// - /// Default is false. - pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder { - self.h09_responses = enabled; - self - } - - /// Set whether HTTP/1 connections will accept spaces between header names - /// and the colon that follow them in responses. - /// - /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has - /// to say about it: - /// - /// > No whitespace is allowed between the header field-name and colon. In - /// > the past, differences in the handling of such whitespace have led to - /// > security vulnerabilities in request routing and response handling. A - /// > server MUST reject any received request message that contains - /// > whitespace between a header field-name and colon with a response code - /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a - /// > response message before forwarding the message downstream. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - /// - /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 - pub fn http1_allow_spaces_after_header_name_in_responses( - &mut self, - enabled: bool, - ) -> &mut Builder { - self.h1_parser_config - .allow_spaces_after_header_name_in_responses(enabled); - self - } - - /// Set whether HTTP/1 connections will accept obsolete line folding for - /// header values. - /// - /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when - /// parsing. - /// - /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has - /// to say about it: - /// - /// > A server that receives an obs-fold in a request message that is not - /// > within a message/http container MUST either reject the message by - /// > sending a 400 (Bad Request), preferably with a representation - /// > explaining that obsolete line folding is unacceptable, or replace - /// > each received obs-fold with one or more SP octets prior to - /// > interpreting the field value or forwarding the message downstream. - /// - /// > A proxy or gateway that receives an obs-fold in a response message - /// > that is not within a message/http container MUST either discard the - /// > message and replace it with a 502 (Bad Gateway) response, preferably - /// > with a representation explaining that unacceptable line folding was - /// > received, or replace each received obs-fold with one or more SP - /// > octets prior to interpreting the field value or forwarding the - /// > message downstream. - /// - /// > A user agent that receives an obs-fold in a response message that is - /// > not within a message/http container MUST replace each received - /// > obs-fold with one or more SP octets prior to interpreting the field - /// > value. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - /// - /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 - pub fn http1_allow_obsolete_multiline_headers_in_responses( - &mut self, - enabled: bool, - ) -> &mut Builder { - self.h1_parser_config - .allow_obsolete_multiline_headers_in_responses(enabled); - self - } - - /// Set whether HTTP/1 connections will silently ignored malformed header lines. - /// - /// If this is enabled and and a header line does not start with a valid header - /// name, or does not include a colon at all, the line will be silently ignored - /// and no error will be reported. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - pub fn http1_ignore_invalid_headers_in_responses( - &mut self, - enabled: bool, - ) -> &mut Builder { - self.h1_parser_config - .ignore_invalid_headers_in_responses(enabled); - self - } - - /// Set whether HTTP/1 connections should try to use vectored writes, - /// or always flatten into a single buffer. - /// - /// Note that setting this to false may mean more copies of body data, - /// but may also improve performance when an IO transport doesn't - /// support vectored writes well, such as most TLS implementations. - /// - /// Setting this to true will force hyper to use queued strategy - /// which may eliminate unnecessary cloning on some TLS backends - /// - /// Default is `auto`. In this mode hyper will try to guess which - /// mode to use - pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder { - self.h1_writev = Some(enabled); - self - } - - /// Set whether HTTP/1 connections will write header names as title case at - /// the socket level. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Builder { - self.h1_title_case_headers = enabled; - self - } - - /// Set whether to support preserving original header cases. - /// - /// Currently, this will record the original cases received, and store them - /// in a private extension on the `Response`. It will also look for and use - /// such an extension in any provided `Request`. - /// - /// Since the relevant extension is still private, there is no way to - /// interact with the original cases. The only effect this can have now is - /// to forward the cases in a proxy-like fashion. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Builder { - self.h1_preserve_header_case = enabled; - self - } - - /// Set whether to support preserving original header order. - /// - /// Currently, this will record the order in which headers are received, and store this - /// ordering in a private extension on the `Response`. It will also look for and use - /// such an extension in any provided `Request`. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - #[cfg(feature = "ffi")] - pub fn http1_preserve_header_order(&mut self, enabled: bool) -> &mut Builder { - self.h1_preserve_header_order = enabled; - self - } - - /// Sets the exact size of the read buffer to *always* use. - /// - /// Note that setting this option unsets the `http1_max_buf_size` option. - /// - /// Default is an adaptive read buffer. - pub fn http1_read_buf_exact_size(&mut self, sz: Option) -> &mut Builder { - self.h1_read_buf_exact_size = sz; - self.h1_max_buf_size = None; - self - } - - /// Set the maximum buffer size for the connection. - /// - /// Default is ~400kb. - /// - /// Note that setting this option unsets the `http1_read_exact_buf_size` option. - /// - /// # Panics - /// - /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { - assert!( - max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, - "the max_buf_size cannot be smaller than the minimum that h1 specifies." - ); - - self.h1_max_buf_size = Some(max); - self.h1_read_buf_exact_size = None; - self - } - - #[cfg(feature = "ffi")] - pub(crate) fn http1_headers_raw(&mut self, enabled: bool) -> &mut Self { - self.h1_headers_raw = enabled; - self - } - - /// Sets whether HTTP2 is required. - /// - /// Default is false. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_only(&mut self, enabled: bool) -> &mut Builder { - if enabled { - self.version = Proto::Http2 - } - self - } - - /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 - /// stream-level flow control. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.adaptive_window = false; - self.h2_builder.initial_stream_window_size = sz; - } - self - } - - /// Sets the max connection-level flow control for HTTP2 - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_connection_window_size( - &mut self, - sz: impl Into>, - ) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.adaptive_window = false; - self.h2_builder.initial_conn_window_size = sz; - } - self - } - - /// Sets whether to use an adaptive flow control. - /// - /// Enabling this will override the limits set in - /// `http2_initial_stream_window_size` and - /// `http2_initial_connection_window_size`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { - use proto::h2::SPEC_WINDOW_SIZE; - - self.h2_builder.adaptive_window = enabled; - if enabled { - self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; - self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; - } - self - } - - /// Sets the maximum frame size to use for HTTP2. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.max_frame_size = sz; - } - self - } - - /// Sets an interval for HTTP2 Ping frames should be sent to keep a - /// connection alive. - /// - /// Pass `None` to disable HTTP2 keep-alive. - /// - /// Default is currently disabled. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_interval( - &mut self, - interval: impl Into>, - ) -> &mut Self { - self.h2_builder.keep_alive_interval = interval.into(); - self - } - - /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. - /// - /// If the ping is not acknowledged within the timeout, the connection will - /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. - /// - /// Default is 20 seconds. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { - self.h2_builder.keep_alive_timeout = timeout; - self - } - - /// Sets whether HTTP2 keep-alive should apply while the connection is idle. - /// - /// If disabled, keep-alive pings are only sent while there are open - /// request/responses streams. If enabled, pings are also sent when no - /// streams are active. Does nothing if `http2_keep_alive_interval` is - /// disabled. - /// - /// Default is `false`. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { - self.h2_builder.keep_alive_while_idle = enabled; - self - } - - /// Sets the maximum number of HTTP2 concurrent locally reset streams. - /// - /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more - /// details. - /// - /// The default value is determined by the `h2` crate. - /// - /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { - self.h2_builder.max_concurrent_reset_streams = Some(max); - self - } - - /// Set the maximum write buffer size for each HTTP/2 stream. - /// - /// Default is currently 1MB, but may change. - /// - /// # Panics - /// - /// The value must be no larger than `u32::MAX`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { - assert!(max <= std::u32::MAX as usize); - self.h2_builder.max_send_buffer_size = max; - self - } - - /// Constructs a connection with the configured options and IO. - /// See [`client::conn`](crate::client::conn) for more. - /// - /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will - /// do nothing. - pub fn handshake( - &self, - io: T, - ) -> impl Future, Connection)>> - where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: HttpBody + 'static, - B::Data: Send, - B::Error: Into>, - { - let opts = self.clone(); - - async move { - trace!("client handshake {:?}", opts.version); - - let (tx, rx) = dispatch::channel(); - let proto = match opts.version { - #[cfg(feature = "http1")] - Proto::Http1 => { - let mut conn = proto::Conn::new(io); - conn.set_h1_parser_config(opts.h1_parser_config); - if let Some(writev) = opts.h1_writev { - if writev { - conn.set_write_strategy_queue(); - } else { - conn.set_write_strategy_flatten(); - } - } - if opts.h1_title_case_headers { - conn.set_title_case_headers(); - } - if opts.h1_preserve_header_case { - conn.set_preserve_header_case(); - } - #[cfg(feature = "ffi")] - if opts.h1_preserve_header_order { - conn.set_preserve_header_order(); - } - if opts.h09_responses { - conn.set_h09_responses(); - } - - #[cfg(feature = "ffi")] - conn.set_raw_headers(opts.h1_headers_raw); - - if let Some(sz) = opts.h1_read_buf_exact_size { - conn.set_read_buf_exact_size(sz); - } - if let Some(max) = opts.h1_max_buf_size { - conn.set_max_buf_size(max); - } - let cd = proto::h1::dispatch::Client::new(rx); - let dispatch = proto::h1::Dispatcher::new(cd, conn); - ProtoClient::H1 { h1: dispatch } - } - #[cfg(feature = "http2")] - Proto::Http2 => { - let h2 = - proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec.clone()) - .await?; - ProtoClient::H2 { h2 } - } - }; - - Ok(( - SendRequest { dispatch: tx }, - Connection { inner: Some(proto) }, - )) - } - } -} - -// ===== impl ResponseFuture - -impl Future for ResponseFuture { - type Output = crate::Result>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match self.inner { - ResponseFutureState::Waiting(ref mut rx) => { - Pin::new(rx).poll(cx).map(|res| match res { - Ok(Ok(resp)) => Ok(resp), - Ok(Err(err)) => Err(err), - // this is definite bug if it happens, but it shouldn't happen! - Err(_canceled) => panic!("dispatch dropped without returning error"), - }) - } - ResponseFutureState::Error(ref mut err) => { - Poll::Ready(Err(err.take().expect("polled after ready"))) - } - } - } -} - -impl fmt::Debug for ResponseFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ResponseFuture").finish() - } -} - -// ===== impl ProtoClient - -impl Future for ProtoClient -where - T: AsyncRead + AsyncWrite + Send + Unpin + 'static, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - type Output = crate::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match self.project() { - #[cfg(feature = "http1")] - ProtoClientProj::H1 { h1 } => h1.poll(cx), - #[cfg(feature = "http2")] - ProtoClientProj::H2 { h2, .. } => h2.poll(cx), - - #[cfg(not(feature = "http1"))] - ProtoClientProj::H1 { h1 } => match h1.0 {}, - #[cfg(not(feature = "http2"))] - ProtoClientProj::H2 { h2, .. } => match h2.0 {}, - } - } -} - -// assert trait markers - -trait AssertSend: Send {} -trait AssertSendSync: Send + Sync {} - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -#[doc(hidden)] -impl AssertSendSync for SendRequest {} - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -#[doc(hidden)] -impl AssertSend for Connection -where - T: AsyncRead + AsyncWrite + Send + 'static, - B: HttpBody + 'static, - B::Data: Send, -{ -} - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -#[doc(hidden)] -impl AssertSendSync for Connection -where - T: AsyncRead + AsyncWrite + Send + 'static, - B: HttpBody + 'static, - B::Data: Send + Sync + 'static, -{ -} - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -#[doc(hidden)] -impl AssertSendSync for Builder {} - -#[doc(hidden)] -impl AssertSend for ResponseFuture {} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/dispatch.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/dispatch.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/dispatch.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/dispatch.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,445 +0,0 @@ -#[cfg(feature = "http2")] -use std::future::Future; - -use futures_util::FutureExt; -use tokio::sync::{mpsc, oneshot}; - -#[cfg(feature = "http2")] -use crate::common::Pin; -use crate::common::{task, Poll}; - -pub(crate) type RetryPromise = oneshot::Receiver)>>; -pub(crate) type Promise = oneshot::Receiver>; - -pub(crate) fn channel() -> (Sender, Receiver) { - let (tx, rx) = mpsc::unbounded_channel(); - let (giver, taker) = want::new(); - let tx = Sender { - buffered_once: false, - giver, - inner: tx, - }; - let rx = Receiver { inner: rx, taker }; - (tx, rx) -} - -/// A bounded sender of requests and callbacks for when responses are ready. -/// -/// While the inner sender is unbounded, the Giver is used to determine -/// if the Receiver is ready for another request. -pub(crate) struct Sender { - /// One message is always allowed, even if the Receiver hasn't asked - /// for it yet. This boolean keeps track of whether we've sent one - /// without notice. - buffered_once: bool, - /// The Giver helps watch that the the Receiver side has been polled - /// when the queue is empty. This helps us know when a request and - /// response have been fully processed, and a connection is ready - /// for more. - giver: want::Giver, - /// Actually bounded by the Giver, plus `buffered_once`. - inner: mpsc::UnboundedSender>, -} - -/// An unbounded version. -/// -/// Cannot poll the Giver, but can still use it to determine if the Receiver -/// has been dropped. However, this version can be cloned. -#[cfg(feature = "http2")] -pub(crate) struct UnboundedSender { - /// Only used for `is_closed`, since mpsc::UnboundedSender cannot be checked. - giver: want::SharedGiver, - inner: mpsc::UnboundedSender>, -} - -impl Sender { - pub(crate) fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - self.giver - .poll_want(cx) - .map_err(|_| crate::Error::new_closed()) - } - - pub(crate) fn is_ready(&self) -> bool { - self.giver.is_wanting() - } - - pub(crate) fn is_closed(&self) -> bool { - self.giver.is_canceled() - } - - fn can_send(&mut self) -> bool { - if self.giver.give() || !self.buffered_once { - // If the receiver is ready *now*, then of course we can send. - // - // If the receiver isn't ready yet, but we don't have anything - // in the channel yet, then allow one message. - self.buffered_once = true; - true - } else { - false - } - } - - pub(crate) fn try_send(&mut self, val: T) -> Result, T> { - if !self.can_send() { - return Err(val); - } - let (tx, rx) = oneshot::channel(); - self.inner - .send(Envelope(Some((val, Callback::Retry(Some(tx)))))) - .map(move |_| rx) - .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) - } - - pub(crate) fn send(&mut self, val: T) -> Result, T> { - if !self.can_send() { - return Err(val); - } - let (tx, rx) = oneshot::channel(); - self.inner - .send(Envelope(Some((val, Callback::NoRetry(Some(tx)))))) - .map(move |_| rx) - .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) - } - - #[cfg(feature = "http2")] - pub(crate) fn unbound(self) -> UnboundedSender { - UnboundedSender { - giver: self.giver.shared(), - inner: self.inner, - } - } -} - -#[cfg(feature = "http2")] -impl UnboundedSender { - pub(crate) fn is_ready(&self) -> bool { - !self.giver.is_canceled() - } - - pub(crate) fn is_closed(&self) -> bool { - self.giver.is_canceled() - } - - pub(crate) fn try_send(&mut self, val: T) -> Result, T> { - let (tx, rx) = oneshot::channel(); - self.inner - .send(Envelope(Some((val, Callback::Retry(Some(tx)))))) - .map(move |_| rx) - .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) - } - - #[cfg(all(feature = "backports", feature = "http2"))] - pub(crate) fn send(&mut self, val: T) -> Result, T> { - let (tx, rx) = oneshot::channel(); - self.inner - .send(Envelope(Some((val, Callback::NoRetry(Some(tx)))))) - .map(move |_| rx) - .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) - } -} - -#[cfg(feature = "http2")] -impl Clone for UnboundedSender { - fn clone(&self) -> Self { - UnboundedSender { - giver: self.giver.clone(), - inner: self.inner.clone(), - } - } -} - -pub(crate) struct Receiver { - inner: mpsc::UnboundedReceiver>, - taker: want::Taker, -} - -impl Receiver { - pub(crate) fn poll_recv( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll)>> { - match self.inner.poll_recv(cx) { - Poll::Ready(item) => { - Poll::Ready(item.map(|mut env| env.0.take().expect("envelope not dropped"))) - } - Poll::Pending => { - self.taker.want(); - Poll::Pending - } - } - } - - #[cfg(feature = "http1")] - pub(crate) fn close(&mut self) { - self.taker.cancel(); - self.inner.close(); - } - - #[cfg(feature = "http1")] - pub(crate) fn try_recv(&mut self) -> Option<(T, Callback)> { - match self.inner.recv().now_or_never() { - Some(Some(mut env)) => env.0.take(), - _ => None, - } - } -} - -impl Drop for Receiver { - fn drop(&mut self) { - // Notify the giver about the closure first, before dropping - // the mpsc::Receiver. - self.taker.cancel(); - } -} - -struct Envelope(Option<(T, Callback)>); - -impl Drop for Envelope { - fn drop(&mut self) { - if let Some((val, cb)) = self.0.take() { - cb.send(Err(( - crate::Error::new_canceled().with("connection closed"), - Some(val), - ))); - } - } -} - -pub(crate) enum Callback { - Retry(Option)>>>), - NoRetry(Option>>), -} - -impl Drop for Callback { - fn drop(&mut self) { - // FIXME(nox): What errors do we want here? - let error = crate::Error::new_user_dispatch_gone().with(if std::thread::panicking() { - "user code panicked" - } else { - "runtime dropped the dispatch task" - }); - - match self { - Callback::Retry(tx) => { - if let Some(tx) = tx.take() { - let _ = tx.send(Err((error, None))); - } - } - Callback::NoRetry(tx) => { - if let Some(tx) = tx.take() { - let _ = tx.send(Err(error)); - } - } - } - } -} - -impl Callback { - #[cfg(feature = "http2")] - pub(crate) fn is_canceled(&self) -> bool { - match *self { - Callback::Retry(Some(ref tx)) => tx.is_closed(), - Callback::NoRetry(Some(ref tx)) => tx.is_closed(), - _ => unreachable!(), - } - } - - pub(crate) fn poll_canceled(&mut self, cx: &mut task::Context<'_>) -> Poll<()> { - match *self { - Callback::Retry(Some(ref mut tx)) => tx.poll_closed(cx), - Callback::NoRetry(Some(ref mut tx)) => tx.poll_closed(cx), - _ => unreachable!(), - } - } - - pub(crate) fn send(mut self, val: Result)>) { - match self { - Callback::Retry(ref mut tx) => { - let _ = tx.take().unwrap().send(val); - } - Callback::NoRetry(ref mut tx) => { - let _ = tx.take().unwrap().send(val.map_err(|e| e.0)); - } - } - } - - #[cfg(feature = "http2")] - pub(crate) async fn send_when( - self, - mut when: impl Future)>> + Unpin, - ) { - use futures_util::future; - use tracing::trace; - - let mut cb = Some(self); - - // "select" on this callback being canceled, and the future completing - future::poll_fn(move |cx| { - match Pin::new(&mut when).poll(cx) { - Poll::Ready(Ok(res)) => { - cb.take().expect("polled after complete").send(Ok(res)); - Poll::Ready(()) - } - Poll::Pending => { - // check if the callback is canceled - ready!(cb.as_mut().unwrap().poll_canceled(cx)); - trace!("send_when canceled"); - Poll::Ready(()) - } - Poll::Ready(Err(err)) => { - cb.take().expect("polled after complete").send(Err(err)); - Poll::Ready(()) - } - } - }) - .await - } -} - -#[cfg(test)] -mod tests { - #[cfg(feature = "nightly")] - extern crate test; - - use std::future::Future; - use std::pin::Pin; - use std::task::{Context, Poll}; - - use super::{channel, Callback, Receiver}; - - #[derive(Debug)] - struct Custom(i32); - - impl Future for Receiver { - type Output = Option<(T, Callback)>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.poll_recv(cx) - } - } - - /// Helper to check if the future is ready after polling once. - struct PollOnce<'a, F>(&'a mut F); - - impl Future for PollOnce<'_, F> - where - F: Future + Unpin, - { - type Output = Option<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match Pin::new(&mut self.0).poll(cx) { - Poll::Ready(_) => Poll::Ready(Some(())), - Poll::Pending => Poll::Ready(None), - } - } - } - - #[tokio::test] - async fn drop_receiver_sends_cancel_errors() { - let _ = pretty_env_logger::try_init(); - - let (mut tx, mut rx) = channel::(); - - // must poll once for try_send to succeed - assert!(PollOnce(&mut rx).await.is_none(), "rx empty"); - - let promise = tx.try_send(Custom(43)).unwrap(); - drop(rx); - - let fulfilled = promise.await; - let err = fulfilled - .expect("fulfilled") - .expect_err("promise should error"); - match (err.0.kind(), err.1) { - (&crate::error::Kind::Canceled, Some(_)) => (), - e => panic!("expected Error::Cancel(_), found {:?}", e), - } - } - - #[tokio::test] - async fn sender_checks_for_want_on_send() { - let (mut tx, mut rx) = channel::(); - - // one is allowed to buffer, second is rejected - let _ = tx.try_send(Custom(1)).expect("1 buffered"); - tx.try_send(Custom(2)).expect_err("2 not ready"); - - assert!(PollOnce(&mut rx).await.is_some(), "rx once"); - - // Even though 1 has been popped, only 1 could be buffered for the - // lifetime of the channel. - tx.try_send(Custom(2)).expect_err("2 still not ready"); - - assert!(PollOnce(&mut rx).await.is_none(), "rx empty"); - - let _ = tx.try_send(Custom(2)).expect("2 ready"); - } - - #[cfg(feature = "http2")] - #[test] - fn unbounded_sender_doesnt_bound_on_want() { - let (tx, rx) = channel::(); - let mut tx = tx.unbound(); - - let _ = tx.try_send(Custom(1)).unwrap(); - let _ = tx.try_send(Custom(2)).unwrap(); - let _ = tx.try_send(Custom(3)).unwrap(); - - drop(rx); - - let _ = tx.try_send(Custom(4)).unwrap_err(); - } - - #[cfg(feature = "nightly")] - #[bench] - fn giver_queue_throughput(b: &mut test::Bencher) { - use crate::{Body, Request, Response}; - - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap(); - let (mut tx, mut rx) = channel::, Response>(); - - b.iter(move || { - let _ = tx.send(Request::default()).unwrap(); - rt.block_on(async { - loop { - let poll_once = PollOnce(&mut rx); - let opt = poll_once.await; - if opt.is_none() { - break; - } - } - }); - }) - } - - #[cfg(feature = "nightly")] - #[bench] - fn giver_queue_not_ready(b: &mut test::Bencher) { - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap(); - let (_tx, mut rx) = channel::(); - b.iter(move || { - rt.block_on(async { - let poll_once = PollOnce(&mut rx); - assert!(poll_once.await.is_none()); - }); - }) - } - - #[cfg(feature = "nightly")] - #[bench] - fn giver_queue_cancel(b: &mut test::Bencher) { - let (_tx, mut rx) = channel::(); - - b.iter(move || { - rx.taker.cancel(); - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/mod.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/mod.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,68 +0,0 @@ -//! HTTP Client -//! -//! There are two levels of APIs provided for construct HTTP clients: -//! -//! - The higher-level [`Client`](Client) type. -//! - The lower-level [`conn`](conn) module. -//! -//! # Client -//! -//! The [`Client`](Client) is the main way to send HTTP requests to a server. -//! The default `Client` provides these things on top of the lower-level API: -//! -//! - A default **connector**, able to resolve hostnames and connect to -//! destinations over plain-text TCP. -//! - A **pool** of existing connections, allowing better performance when -//! making multiple requests to the same hostname. -//! - Automatic setting of the `Host` header, based on the request `Uri`. -//! - Automatic request **retries** when a pooled connection is closed by the -//! server before any bytes have been written. -//! -//! Many of these features can configured, by making use of -//! [`Client::builder`](Client::builder). -//! -//! ## Example -//! -//! For a small example program simply fetching a URL, take a look at the -//! [full client example](https://github.com/hyperium/hyper/blob/master/examples/client.rs). -//! -//! ``` -//! # #[cfg(all(feature = "tcp", feature = "client", any(feature = "http1", feature = "http2")))] -//! # async fn fetch_httpbin() -> hyper::Result<()> { -//! use hyper::{body::HttpBody as _, Client, Uri}; -//! -//! let client = Client::new(); -//! -//! // Make a GET /ip to 'http://httpbin.org' -//! let res = client.get(Uri::from_static("http://httpbin.org/ip")).await?; -//! -//! // And then, if the request gets a response... -//! println!("status: {}", res.status()); -//! -//! // Concatenate the body stream into a single buffer... -//! let buf = hyper::body::to_bytes(res).await?; -//! -//! println!("body: {:?}", buf); -//! # Ok(()) -//! # } -//! # fn main () {} -//! ``` - -#[cfg(feature = "tcp")] -pub use self::connect::HttpConnector; - -pub mod connect; -#[cfg(all(test, feature = "runtime"))] -mod tests; - -cfg_feature! { - #![any(feature = "http1", feature = "http2")] - - pub use self::client::{Builder, Client, ResponseFuture}; - - mod client; - pub mod conn; - pub(super) mod dispatch; - mod pool; - pub mod service; -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/pool.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/pool.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/pool.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/pool.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1044 +0,0 @@ -use std::collections::{HashMap, HashSet, VecDeque}; -use std::error::Error as StdError; -use std::fmt; -use std::ops::{Deref, DerefMut}; -use std::sync::{Arc, Mutex, Weak}; - -#[cfg(not(feature = "runtime"))] -use std::time::{Duration, Instant}; - -use futures_channel::oneshot; -#[cfg(feature = "runtime")] -use tokio::time::{Duration, Instant, Interval}; -use tracing::{debug, trace}; - -use super::client::Ver; -use crate::common::{exec::Exec, task, Future, Pin, Poll, Unpin}; - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -pub(super) struct Pool { - // If the pool is disabled, this is None. - inner: Option>>>, -} - -// Before using a pooled connection, make sure the sender is not dead. -// -// This is a trait to allow the `client::pool::tests` to work for `i32`. -// -// See https://github.com/hyperium/hyper/issues/1429 -pub(super) trait Poolable: Unpin + Send + Sized + 'static { - fn is_open(&self) -> bool; - /// Reserve this connection. - /// - /// Allows for HTTP/2 to return a shared reservation. - fn reserve(self) -> Reservation; - fn can_share(&self) -> bool; -} - -/// When checking out a pooled connection, it might be that the connection -/// only supports a single reservation, or it might be usable for many. -/// -/// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be -/// used for multiple requests. -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -pub(super) enum Reservation { - /// This connection could be used multiple times, the first one will be - /// reinserted into the `idle` pool, and the second will be given to - /// the `Checkout`. - #[cfg(feature = "http2")] - Shared(T, T), - /// This connection requires unique access. It will be returned after - /// use is complete. - Unique(T), -} - -/// Simple type alias in case the key type needs to be adjusted. -pub(super) type Key = (http::uri::Scheme, http::uri::Authority); //Arc; - -struct PoolInner { - // A flag that a connection is being established, and the connection - // should be shared. This prevents making multiple HTTP/2 connections - // to the same host. - connecting: HashSet, - // These are internal Conns sitting in the event loop in the KeepAlive - // state, waiting to receive a new Request to send on the socket. - idle: HashMap>>, - max_idle_per_host: usize, - // These are outstanding Checkouts that are waiting for a socket to be - // able to send a Request one. This is used when "racing" for a new - // connection. - // - // The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait - // for the Pool to receive an idle Conn. When a Conn becomes idle, - // this list is checked for any parked Checkouts, and tries to notify - // them that the Conn could be used instead of waiting for a brand new - // connection. - waiters: HashMap>>, - // A oneshot channel is used to allow the interval to be notified when - // the Pool completely drops. That way, the interval can cancel immediately. - #[cfg(feature = "runtime")] - idle_interval_ref: Option>, - #[cfg(feature = "runtime")] - exec: Exec, - timeout: Option, -} - -// This is because `Weak::new()` *allocates* space for `T`, even if it -// doesn't need it! -struct WeakOpt(Option>); - -#[derive(Clone, Copy, Debug)] -pub(super) struct Config { - pub(super) idle_timeout: Option, - pub(super) max_idle_per_host: usize, -} - -impl Config { - pub(super) fn is_enabled(&self) -> bool { - self.max_idle_per_host > 0 - } -} - -impl Pool { - pub(super) fn new(config: Config, __exec: &Exec) -> Pool { - let inner = if config.is_enabled() { - Some(Arc::new(Mutex::new(PoolInner { - connecting: HashSet::new(), - idle: HashMap::new(), - #[cfg(feature = "runtime")] - idle_interval_ref: None, - max_idle_per_host: config.max_idle_per_host, - waiters: HashMap::new(), - #[cfg(feature = "runtime")] - exec: __exec.clone(), - timeout: config.idle_timeout, - }))) - } else { - None - }; - - Pool { inner } - } - - fn is_enabled(&self) -> bool { - self.inner.is_some() - } - - #[cfg(test)] - pub(super) fn no_timer(&self) { - // Prevent an actual interval from being created for this pool... - #[cfg(feature = "runtime")] - { - let mut inner = self.inner.as_ref().unwrap().lock().unwrap(); - assert!(inner.idle_interval_ref.is_none(), "timer already spawned"); - let (tx, _) = oneshot::channel(); - inner.idle_interval_ref = Some(tx); - } - } -} - -impl Pool { - /// Returns a `Checkout` which is a future that resolves if an idle - /// connection becomes available. - pub(super) fn checkout(&self, key: Key) -> Checkout { - Checkout { - key, - pool: self.clone(), - waiter: None, - } - } - - /// Ensure that there is only ever 1 connecting task for HTTP/2 - /// connections. This does nothing for HTTP/1. - pub(super) fn connecting(&self, key: &Key, ver: Ver) -> Option> { - if ver == Ver::Http2 { - if let Some(ref enabled) = self.inner { - let mut inner = enabled.lock().unwrap(); - return if inner.connecting.insert(key.clone()) { - let connecting = Connecting { - key: key.clone(), - pool: WeakOpt::downgrade(enabled), - }; - Some(connecting) - } else { - trace!("HTTP/2 connecting already in progress for {:?}", key); - None - }; - } - } - - // else - Some(Connecting { - key: key.clone(), - // in HTTP/1's case, there is never a lock, so we don't - // need to do anything in Drop. - pool: WeakOpt::none(), - }) - } - - #[cfg(test)] - fn locked(&self) -> std::sync::MutexGuard<'_, PoolInner> { - self.inner.as_ref().expect("enabled").lock().expect("lock") - } - - /* Used in client/tests.rs... - #[cfg(feature = "runtime")] - #[cfg(test)] - pub(super) fn h1_key(&self, s: &str) -> Key { - Arc::new(s.to_string()) - } - - #[cfg(feature = "runtime")] - #[cfg(test)] - pub(super) fn idle_count(&self, key: &Key) -> usize { - self - .locked() - .idle - .get(key) - .map(|list| list.len()) - .unwrap_or(0) - } - */ - - pub(super) fn pooled( - &self, - #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut connecting: Connecting, - value: T, - ) -> Pooled { - let (value, pool_ref) = if let Some(ref enabled) = self.inner { - match value.reserve() { - #[cfg(feature = "http2")] - Reservation::Shared(to_insert, to_return) => { - let mut inner = enabled.lock().unwrap(); - inner.put(connecting.key.clone(), to_insert, enabled); - // Do this here instead of Drop for Connecting because we - // already have a lock, no need to lock the mutex twice. - inner.connected(&connecting.key); - // prevent the Drop of Connecting from repeating inner.connected() - connecting.pool = WeakOpt::none(); - - // Shared reservations don't need a reference to the pool, - // since the pool always keeps a copy. - (to_return, WeakOpt::none()) - } - Reservation::Unique(value) => { - // Unique reservations must take a reference to the pool - // since they hope to reinsert once the reservation is - // completed - (value, WeakOpt::downgrade(enabled)) - } - } - } else { - // If pool is not enabled, skip all the things... - - // The Connecting should have had no pool ref - debug_assert!(connecting.pool.upgrade().is_none()); - - (value, WeakOpt::none()) - }; - Pooled { - key: connecting.key.clone(), - is_reused: false, - pool: pool_ref, - value: Some(value), - } - } - - fn reuse(&self, key: &Key, value: T) -> Pooled { - debug!("reuse idle connection for {:?}", key); - // TODO: unhack this - // In Pool::pooled(), which is used for inserting brand new connections, - // there's some code that adjusts the pool reference taken depending - // on if the Reservation can be shared or is unique. By the time - // reuse() is called, the reservation has already been made, and - // we just have the final value, without knowledge of if this is - // unique or shared. So, the hack is to just assume Ver::Http2 means - // shared... :( - let mut pool_ref = WeakOpt::none(); - if !value.can_share() { - if let Some(ref enabled) = self.inner { - pool_ref = WeakOpt::downgrade(enabled); - } - } - - Pooled { - is_reused: true, - key: key.clone(), - pool: pool_ref, - value: Some(value), - } - } -} - -/// Pop off this list, looking for a usable connection that hasn't expired. -struct IdlePopper<'a, T> { - key: &'a Key, - list: &'a mut Vec>, -} - -impl<'a, T: Poolable + 'a> IdlePopper<'a, T> { - fn pop(self, expiration: &Expiration) -> Option> { - while let Some(entry) = self.list.pop() { - // If the connection has been closed, or is older than our idle - // timeout, simply drop it and keep looking... - if !entry.value.is_open() { - trace!("removing closed connection for {:?}", self.key); - continue; - } - // TODO: Actually, since the `idle` list is pushed to the end always, - // that would imply that if *this* entry is expired, then anything - // "earlier" in the list would *have* to be expired also... Right? - // - // In that case, we could just break out of the loop and drop the - // whole list... - if expiration.expires(entry.idle_at) { - trace!("removing expired connection for {:?}", self.key); - continue; - } - - let value = match entry.value.reserve() { - #[cfg(feature = "http2")] - Reservation::Shared(to_reinsert, to_checkout) => { - self.list.push(Idle { - idle_at: Instant::now(), - value: to_reinsert, - }); - to_checkout - } - Reservation::Unique(unique) => unique, - }; - - return Some(Idle { - idle_at: entry.idle_at, - value, - }); - } - - None - } -} - -impl PoolInner { - fn put(&mut self, key: Key, value: T, __pool_ref: &Arc>>) { - if value.can_share() && self.idle.contains_key(&key) { - trace!("put; existing idle HTTP/2 connection for {:?}", key); - return; - } - trace!("put; add idle connection for {:?}", key); - let mut remove_waiters = false; - let mut value = Some(value); - if let Some(waiters) = self.waiters.get_mut(&key) { - while let Some(tx) = waiters.pop_front() { - if !tx.is_canceled() { - let reserved = value.take().expect("value already sent"); - let reserved = match reserved.reserve() { - #[cfg(feature = "http2")] - Reservation::Shared(to_keep, to_send) => { - value = Some(to_keep); - to_send - } - Reservation::Unique(uniq) => uniq, - }; - match tx.send(reserved) { - Ok(()) => { - if value.is_none() { - break; - } else { - continue; - } - } - Err(e) => { - value = Some(e); - } - } - } - - trace!("put; removing canceled waiter for {:?}", key); - } - remove_waiters = waiters.is_empty(); - } - if remove_waiters { - self.waiters.remove(&key); - } - - match value { - Some(value) => { - // borrow-check scope... - { - let idle_list = self.idle.entry(key.clone()).or_insert_with(Vec::new); - if self.max_idle_per_host <= idle_list.len() { - trace!("max idle per host for {:?}, dropping connection", key); - return; - } - - debug!("pooling idle connection for {:?}", key); - idle_list.push(Idle { - value, - idle_at: Instant::now(), - }); - } - - #[cfg(feature = "runtime")] - { - self.spawn_idle_interval(__pool_ref); - } - } - None => trace!("put; found waiter for {:?}", key), - } - } - - /// A `Connecting` task is complete. Not necessarily successfully, - /// but the lock is going away, so clean up. - fn connected(&mut self, key: &Key) { - let existed = self.connecting.remove(key); - debug_assert!(existed, "Connecting dropped, key not in pool.connecting"); - // cancel any waiters. if there are any, it's because - // this Connecting task didn't complete successfully. - // those waiters would never receive a connection. - self.waiters.remove(key); - } - - #[cfg(feature = "runtime")] - fn spawn_idle_interval(&mut self, pool_ref: &Arc>>) { - let (dur, rx) = { - if self.idle_interval_ref.is_some() { - return; - } - - if let Some(dur) = self.timeout { - let (tx, rx) = oneshot::channel(); - self.idle_interval_ref = Some(tx); - (dur, rx) - } else { - return; - } - }; - - let interval = IdleTask { - interval: tokio::time::interval(dur), - pool: WeakOpt::downgrade(pool_ref), - pool_drop_notifier: rx, - }; - - self.exec.execute(interval); - } -} - -impl PoolInner { - /// Any `FutureResponse`s that were created will have made a `Checkout`, - /// and possibly inserted into the pool that it is waiting for an idle - /// connection. If a user ever dropped that future, we need to clean out - /// those parked senders. - fn clean_waiters(&mut self, key: &Key) { - let mut remove_waiters = false; - if let Some(waiters) = self.waiters.get_mut(key) { - waiters.retain(|tx| !tx.is_canceled()); - remove_waiters = waiters.is_empty(); - } - if remove_waiters { - self.waiters.remove(key); - } - } -} - -#[cfg(feature = "runtime")] -impl PoolInner { - /// This should *only* be called by the IdleTask - fn clear_expired(&mut self) { - let dur = self.timeout.expect("interval assumes timeout"); - - let now = Instant::now(); - //self.last_idle_check_at = now; - - self.idle.retain(|key, values| { - values.retain(|entry| { - if !entry.value.is_open() { - trace!("idle interval evicting closed for {:?}", key); - return false; - } - - // Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470. - if now.saturating_duration_since(entry.idle_at) > dur { - trace!("idle interval evicting expired for {:?}", key); - return false; - } - - // Otherwise, keep this value... - true - }); - - // returning false evicts this key/val - !values.is_empty() - }); - } -} - -impl Clone for Pool { - fn clone(&self) -> Pool { - Pool { - inner: self.inner.clone(), - } - } -} - -/// A wrapped poolable value that tries to reinsert to the Pool on Drop. -// Note: The bounds `T: Poolable` is needed for the Drop impl. -pub(super) struct Pooled { - value: Option, - is_reused: bool, - key: Key, - pool: WeakOpt>>, -} - -impl Pooled { - pub(super) fn is_reused(&self) -> bool { - self.is_reused - } - - pub(super) fn is_pool_enabled(&self) -> bool { - self.pool.0.is_some() - } - - fn as_ref(&self) -> &T { - self.value.as_ref().expect("not dropped") - } - - fn as_mut(&mut self) -> &mut T { - self.value.as_mut().expect("not dropped") - } -} - -impl Deref for Pooled { - type Target = T; - fn deref(&self) -> &T { - self.as_ref() - } -} - -impl DerefMut for Pooled { - fn deref_mut(&mut self) -> &mut T { - self.as_mut() - } -} - -impl Drop for Pooled { - fn drop(&mut self) { - if let Some(value) = self.value.take() { - if !value.is_open() { - // If we *already* know the connection is done here, - // it shouldn't be re-inserted back into the pool. - return; - } - - if let Some(pool) = self.pool.upgrade() { - if let Ok(mut inner) = pool.lock() { - inner.put(self.key.clone(), value, &pool); - } - } else if !value.can_share() { - trace!("pool dropped, dropping pooled ({:?})", self.key); - } - // Ver::Http2 is already in the Pool (or dead), so we wouldn't - // have an actual reference to the Pool. - } - } -} - -impl fmt::Debug for Pooled { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Pooled").field("key", &self.key).finish() - } -} - -struct Idle { - idle_at: Instant, - value: T, -} - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -pub(super) struct Checkout { - key: Key, - pool: Pool, - waiter: Option>, -} - -#[derive(Debug)] -pub(super) struct CheckoutIsClosedError; - -impl StdError for CheckoutIsClosedError {} - -impl fmt::Display for CheckoutIsClosedError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("checked out connection was closed") - } -} - -impl Checkout { - fn poll_waiter( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll>>> { - if let Some(mut rx) = self.waiter.take() { - match Pin::new(&mut rx).poll(cx) { - Poll::Ready(Ok(value)) => { - if value.is_open() { - Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value)))) - } else { - Poll::Ready(Some(Err( - crate::Error::new_canceled().with(CheckoutIsClosedError) - ))) - } - } - Poll::Pending => { - self.waiter = Some(rx); - Poll::Pending - } - Poll::Ready(Err(_canceled)) => Poll::Ready(Some(Err( - crate::Error::new_canceled().with("request has been canceled") - ))), - } - } else { - Poll::Ready(None) - } - } - - fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option> { - let entry = { - let mut inner = self.pool.inner.as_ref()?.lock().unwrap(); - let expiration = Expiration::new(inner.timeout); - let maybe_entry = inner.idle.get_mut(&self.key).and_then(|list| { - trace!("take? {:?}: expiration = {:?}", self.key, expiration.0); - // A block to end the mutable borrow on list, - // so the map below can check is_empty() - { - let popper = IdlePopper { - key: &self.key, - list, - }; - popper.pop(&expiration) - } - .map(|e| (e, list.is_empty())) - }); - - let (entry, empty) = if let Some((e, empty)) = maybe_entry { - (Some(e), empty) - } else { - // No entry found means nuke the list for sure. - (None, true) - }; - if empty { - //TODO: This could be done with the HashMap::entry API instead. - inner.idle.remove(&self.key); - } - - if entry.is_none() && self.waiter.is_none() { - let (tx, mut rx) = oneshot::channel(); - trace!("checkout waiting for idle connection: {:?}", self.key); - inner - .waiters - .entry(self.key.clone()) - .or_insert_with(VecDeque::new) - .push_back(tx); - - // register the waker with this oneshot - assert!(Pin::new(&mut rx).poll(cx).is_pending()); - self.waiter = Some(rx); - } - - entry - }; - - entry.map(|e| self.pool.reuse(&self.key, e.value)) - } -} - -impl Future for Checkout { - type Output = crate::Result>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - if let Some(pooled) = ready!(self.poll_waiter(cx)?) { - return Poll::Ready(Ok(pooled)); - } - - if let Some(pooled) = self.checkout(cx) { - Poll::Ready(Ok(pooled)) - } else if !self.pool.is_enabled() { - Poll::Ready(Err(crate::Error::new_canceled().with("pool is disabled"))) - } else { - // There's a new waiter, already registered in self.checkout() - debug_assert!(self.waiter.is_some()); - Poll::Pending - } - } -} - -impl Drop for Checkout { - fn drop(&mut self) { - if self.waiter.take().is_some() { - trace!("checkout dropped for {:?}", self.key); - if let Some(Ok(mut inner)) = self.pool.inner.as_ref().map(|i| i.lock()) { - inner.clean_waiters(&self.key); - } - } - } -} - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -pub(super) struct Connecting { - key: Key, - pool: WeakOpt>>, -} - -impl Connecting { - pub(super) fn alpn_h2(self, pool: &Pool) -> Option { - debug_assert!( - self.pool.0.is_none(), - "Connecting::alpn_h2 but already Http2" - ); - - pool.connecting(&self.key, Ver::Http2) - } -} - -impl Drop for Connecting { - fn drop(&mut self) { - if let Some(pool) = self.pool.upgrade() { - // No need to panic on drop, that could abort! - if let Ok(mut inner) = pool.lock() { - inner.connected(&self.key); - } - } - } -} - -struct Expiration(Option); - -impl Expiration { - fn new(dur: Option) -> Expiration { - Expiration(dur) - } - - fn expires(&self, instant: Instant) -> bool { - match self.0 { - // Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470. - Some(timeout) => Instant::now().saturating_duration_since(instant) > timeout, - None => false, - } - } -} - -#[cfg(feature = "runtime")] -pin_project_lite::pin_project! { - struct IdleTask { - #[pin] - interval: Interval, - pool: WeakOpt>>, - // This allows the IdleTask to be notified as soon as the entire - // Pool is fully dropped, and shutdown. This channel is never sent on, - // but Err(Canceled) will be received when the Pool is dropped. - #[pin] - pool_drop_notifier: oneshot::Receiver, - } -} - -#[cfg(feature = "runtime")] -impl Future for IdleTask { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let mut this = self.project(); - loop { - match this.pool_drop_notifier.as_mut().poll(cx) { - Poll::Ready(Ok(n)) => match n {}, - Poll::Pending => (), - Poll::Ready(Err(_canceled)) => { - trace!("pool closed, canceling idle interval"); - return Poll::Ready(()); - } - } - - ready!(this.interval.as_mut().poll_tick(cx)); - - if let Some(inner) = this.pool.upgrade() { - if let Ok(mut inner) = inner.lock() { - trace!("idle interval checking for expired"); - inner.clear_expired(); - continue; - } - } - return Poll::Ready(()); - } - } -} - -impl WeakOpt { - fn none() -> Self { - WeakOpt(None) - } - - fn downgrade(arc: &Arc) -> Self { - WeakOpt(Some(Arc::downgrade(arc))) - } - - fn upgrade(&self) -> Option> { - self.0.as_ref().and_then(Weak::upgrade) - } -} - -#[cfg(test)] -mod tests { - use std::task::Poll; - use std::time::Duration; - - use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; - use crate::common::{exec::Exec, task, Future, Pin}; - - /// Test unique reservations. - #[derive(Debug, PartialEq, Eq)] - struct Uniq(T); - - impl Poolable for Uniq { - fn is_open(&self) -> bool { - true - } - - fn reserve(self) -> Reservation { - Reservation::Unique(self) - } - - fn can_share(&self) -> bool { - false - } - } - - fn c(key: Key) -> Connecting { - Connecting { - key, - pool: WeakOpt::none(), - } - } - - fn host_key(s: &str) -> Key { - (http::uri::Scheme::HTTP, s.parse().expect("host key")) - } - - fn pool_no_timer() -> Pool { - pool_max_idle_no_timer(::std::usize::MAX) - } - - fn pool_max_idle_no_timer(max_idle: usize) -> Pool { - let pool = Pool::new( - super::Config { - idle_timeout: Some(Duration::from_millis(100)), - max_idle_per_host: max_idle, - }, - &Exec::Default, - ); - pool.no_timer(); - pool - } - - #[tokio::test] - async fn test_pool_checkout_smoke() { - let pool = pool_no_timer(); - let key = host_key("foo"); - let pooled = pool.pooled(c(key.clone()), Uniq(41)); - - drop(pooled); - - match pool.checkout(key).await { - Ok(pooled) => assert_eq!(*pooled, Uniq(41)), - Err(_) => panic!("not ready"), - }; - } - - /// Helper to check if the future is ready after polling once. - struct PollOnce<'a, F>(&'a mut F); - - impl Future for PollOnce<'_, F> - where - F: Future> + Unpin, - { - type Output = Option<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match Pin::new(&mut self.0).poll(cx) { - Poll::Ready(Ok(_)) => Poll::Ready(Some(())), - Poll::Ready(Err(_)) => Poll::Ready(Some(())), - Poll::Pending => Poll::Ready(None), - } - } - } - - #[tokio::test] - async fn test_pool_checkout_returns_none_if_expired() { - let pool = pool_no_timer(); - let key = host_key("foo"); - let pooled = pool.pooled(c(key.clone()), Uniq(41)); - - drop(pooled); - tokio::time::sleep(pool.locked().timeout.unwrap()).await; - let mut checkout = pool.checkout(key); - let poll_once = PollOnce(&mut checkout); - let is_not_ready = poll_once.await.is_none(); - assert!(is_not_ready); - } - - #[cfg(feature = "runtime")] - #[tokio::test] - async fn test_pool_checkout_removes_expired() { - let pool = pool_no_timer(); - let key = host_key("foo"); - - pool.pooled(c(key.clone()), Uniq(41)); - pool.pooled(c(key.clone()), Uniq(5)); - pool.pooled(c(key.clone()), Uniq(99)); - - assert_eq!( - pool.locked().idle.get(&key).map(|entries| entries.len()), - Some(3) - ); - tokio::time::sleep(pool.locked().timeout.unwrap()).await; - - let mut checkout = pool.checkout(key.clone()); - let poll_once = PollOnce(&mut checkout); - // checkout.await should clean out the expired - poll_once.await; - assert!(pool.locked().idle.get(&key).is_none()); - } - - #[test] - fn test_pool_max_idle_per_host() { - let pool = pool_max_idle_no_timer(2); - let key = host_key("foo"); - - pool.pooled(c(key.clone()), Uniq(41)); - pool.pooled(c(key.clone()), Uniq(5)); - pool.pooled(c(key.clone()), Uniq(99)); - - // pooled and dropped 3, max_idle should only allow 2 - assert_eq!( - pool.locked().idle.get(&key).map(|entries| entries.len()), - Some(2) - ); - } - - #[cfg(feature = "runtime")] - #[tokio::test] - async fn test_pool_timer_removes_expired() { - let _ = pretty_env_logger::try_init(); - tokio::time::pause(); - - let pool = Pool::new( - super::Config { - idle_timeout: Some(Duration::from_millis(10)), - max_idle_per_host: std::usize::MAX, - }, - &Exec::Default, - ); - - let key = host_key("foo"); - - pool.pooled(c(key.clone()), Uniq(41)); - pool.pooled(c(key.clone()), Uniq(5)); - pool.pooled(c(key.clone()), Uniq(99)); - - assert_eq!( - pool.locked().idle.get(&key).map(|entries| entries.len()), - Some(3) - ); - - // Let the timer tick passed the expiration... - tokio::time::advance(Duration::from_millis(30)).await; - // Yield so the Interval can reap... - tokio::task::yield_now().await; - - assert!(pool.locked().idle.get(&key).is_none()); - } - - #[tokio::test] - async fn test_pool_checkout_task_unparked() { - use futures_util::future::join; - use futures_util::FutureExt; - - let pool = pool_no_timer(); - let key = host_key("foo"); - let pooled = pool.pooled(c(key.clone()), Uniq(41)); - - let checkout = join(pool.checkout(key), async { - // the checkout future will park first, - // and then this lazy future will be polled, which will insert - // the pooled back into the pool - // - // this test makes sure that doing so will unpark the checkout - drop(pooled); - }) - .map(|(entry, _)| entry); - - assert_eq!(*checkout.await.unwrap(), Uniq(41)); - } - - #[tokio::test] - async fn test_pool_checkout_drop_cleans_up_waiters() { - let pool = pool_no_timer::>(); - let key = host_key("foo"); - - let mut checkout1 = pool.checkout(key.clone()); - let mut checkout2 = pool.checkout(key.clone()); - - let poll_once1 = PollOnce(&mut checkout1); - let poll_once2 = PollOnce(&mut checkout2); - - // first poll needed to get into Pool's parked - poll_once1.await; - assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); - poll_once2.await; - assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2); - - // on drop, clean up Pool - drop(checkout1); - assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); - - drop(checkout2); - assert!(pool.locked().waiters.get(&key).is_none()); - } - - #[derive(Debug)] - struct CanClose { - #[allow(unused)] - val: i32, - closed: bool, - } - - impl Poolable for CanClose { - fn is_open(&self) -> bool { - !self.closed - } - - fn reserve(self) -> Reservation { - Reservation::Unique(self) - } - - fn can_share(&self) -> bool { - false - } - } - - #[test] - fn pooled_drop_if_closed_doesnt_reinsert() { - let pool = pool_no_timer(); - let key = host_key("foo"); - pool.pooled( - c(key.clone()), - CanClose { - val: 57, - closed: true, - }, - ); - - assert!(!pool.locked().idle.contains_key(&key)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/service.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/service.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/service.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/service.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,94 +0,0 @@ -//! Utilities used to interact with the Tower ecosystem. -//! -//! This module provides `Connect` which hook-ins into the Tower ecosystem. - -use std::error::Error as StdError; -use std::future::Future; -use std::marker::PhantomData; - -use tracing::debug; - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -use super::conn::{Builder, SendRequest}; -use crate::{ - body::HttpBody, - common::{task, Pin, Poll}, - service::{MakeConnection, Service}, -}; - -/// Creates a connection via `SendRequest`. -/// -/// This accepts a `hyper::client::conn::Builder` and provides -/// a `MakeService` implementation to create connections from some -/// target `T`. -#[derive(Debug)] -pub struct Connect { - inner: C, - #[cfg_attr(feature = "deprecated", allow(deprecated))] - builder: Builder, - _pd: PhantomData, -} - -impl Connect { - /// Create a new `Connect` with some inner connector `C` and a connection - /// builder. - #[cfg_attr(feature = "deprecated", allow(deprecated))] - pub fn new(inner: C, builder: Builder) -> Self { - Self { - inner, - builder, - _pd: PhantomData, - } - } -} - -impl Service for Connect -where - C: MakeConnection, - C::Connection: Unpin + Send + 'static, - C::Future: Send + 'static, - C::Error: Into> + Send, - B: HttpBody + Unpin + Send + 'static, - B::Data: Send + Unpin, - B::Error: Into>, -{ - #[cfg_attr(feature = "deprecated", allow(deprecated))] - type Response = SendRequest; - type Error = crate::Error; - type Future = - Pin> + Send + 'static>>; - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - self.inner - .poll_ready(cx) - .map_err(|e| crate::Error::new(crate::error::Kind::Connect).with(e.into())) - } - - fn call(&mut self, req: T) -> Self::Future { - let builder = self.builder.clone(); - let io = self.inner.make_connection(req); - - let fut = async move { - match io.await { - Ok(io) => match builder.handshake(io).await { - Ok((sr, conn)) => { - #[cfg_attr(feature = "deprecated", allow(deprecated))] - builder.exec.execute(async move { - if let Err(e) = conn.await { - debug!("connection error: {:?}", e); - } - }); - Ok(sr) - } - Err(e) => Err(e), - }, - Err(e) => { - let err = crate::Error::new(crate::error::Kind::Connect).with(e.into()); - Err(err) - } - } - }; - - Box::pin(fut) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/client/tests.rs s390-tools-2.33.1/rust-vendor/hyper/src/client/tests.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/client/tests.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/client/tests.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,286 +0,0 @@ -use std::io; - -use futures_util::future; -use tokio::net::TcpStream; - -use super::Client; - -#[tokio::test] -async fn client_connect_uri_argument() { - let connector = tower::service_fn(|dst: http::Uri| { - assert_eq!(dst.scheme(), Some(&http::uri::Scheme::HTTP)); - assert_eq!(dst.host(), Some("example.local")); - assert_eq!(dst.port(), None); - assert_eq!(dst.path(), "/", "path should be removed"); - - future::err::(io::Error::new(io::ErrorKind::Other, "expect me")) - }); - - let client = Client::builder().build::<_, crate::Body>(connector); - let _ = client - .get("http://example.local/and/a/path".parse().unwrap()) - .await - .expect_err("response should fail"); -} - -/* -// FIXME: re-implement tests with `async/await` -#[test] -fn retryable_request() { - let _ = pretty_env_logger::try_init(); - - let mut rt = Runtime::new().expect("new rt"); - let mut connector = MockConnector::new(); - - let sock1 = connector.mock("http://mock.local"); - let sock2 = connector.mock("http://mock.local"); - - let client = Client::builder() - .build::<_, crate::Body>(connector); - - client.pool.no_timer(); - - { - - let req = Request::builder() - .uri("http://mock.local/a") - .body(Default::default()) - .unwrap(); - let res1 = client.request(req); - let srv1 = poll_fn(|| { - try_ready!(sock1.read(&mut [0u8; 512])); - try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")); - Ok(Async::Ready(())) - }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e)); - rt.block_on(res1.join(srv1)).expect("res1"); - } - drop(sock1); - - let req = Request::builder() - .uri("http://mock.local/b") - .body(Default::default()) - .unwrap(); - let res2 = client.request(req) - .map(|res| { - assert_eq!(res.status().as_u16(), 222); - }); - let srv2 = poll_fn(|| { - try_ready!(sock2.read(&mut [0u8; 512])); - try_ready!(sock2.write(b"HTTP/1.1 222 OK\r\nContent-Length: 0\r\n\r\n")); - Ok(Async::Ready(())) - }).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e)); - - rt.block_on(res2.join(srv2)).expect("res2"); -} - -#[test] -fn conn_reset_after_write() { - let _ = pretty_env_logger::try_init(); - - let mut rt = Runtime::new().expect("new rt"); - let mut connector = MockConnector::new(); - - let sock1 = connector.mock("http://mock.local"); - - let client = Client::builder() - .build::<_, crate::Body>(connector); - - client.pool.no_timer(); - - { - let req = Request::builder() - .uri("http://mock.local/a") - .body(Default::default()) - .unwrap(); - let res1 = client.request(req); - let srv1 = poll_fn(|| { - try_ready!(sock1.read(&mut [0u8; 512])); - try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")); - Ok(Async::Ready(())) - }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e)); - rt.block_on(res1.join(srv1)).expect("res1"); - } - - let req = Request::builder() - .uri("http://mock.local/a") - .body(Default::default()) - .unwrap(); - let res2 = client.request(req); - let mut sock1 = Some(sock1); - let srv2 = poll_fn(|| { - // We purposefully keep the socket open until the client - // has written the second request, and THEN disconnect. - // - // Not because we expect servers to be jerks, but to trigger - // state where we write on an assumedly good connection, and - // only reset the close AFTER we wrote bytes. - try_ready!(sock1.as_mut().unwrap().read(&mut [0u8; 512])); - sock1.take(); - Ok(Async::Ready(())) - }).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e)); - let err = rt.block_on(res2.join(srv2)).expect_err("res2"); - assert!(err.is_incomplete_message(), "{:?}", err); -} - -#[test] -fn checkout_win_allows_connect_future_to_be_pooled() { - let _ = pretty_env_logger::try_init(); - - let mut rt = Runtime::new().expect("new rt"); - let mut connector = MockConnector::new(); - - - let (tx, rx) = oneshot::channel::<()>(); - let sock1 = connector.mock("http://mock.local"); - let sock2 = connector.mock_fut("http://mock.local", rx); - - let client = Client::builder() - .build::<_, crate::Body>(connector); - - client.pool.no_timer(); - - let uri = "http://mock.local/a".parse::().expect("uri parse"); - - // First request just sets us up to have a connection able to be put - // back in the pool. *However*, it doesn't insert immediately. The - // body has 1 pending byte, and we will only drain in request 2, once - // the connect future has been started. - let mut body = { - let res1 = client.get(uri.clone()) - .map(|res| res.into_body().concat2()); - let srv1 = poll_fn(|| { - try_ready!(sock1.read(&mut [0u8; 512])); - // Chunked is used so as to force 2 body reads. - try_ready!(sock1.write(b"\ - HTTP/1.1 200 OK\r\n\ - transfer-encoding: chunked\r\n\ - \r\n\ - 1\r\nx\r\n\ - 0\r\n\r\n\ - ")); - Ok(Async::Ready(())) - }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e)); - - rt.block_on(res1.join(srv1)).expect("res1").0 - }; - - - // The second request triggers the only mocked connect future, but then - // the drained body allows the first socket to go back to the pool, - // "winning" the checkout race. - { - let res2 = client.get(uri.clone()); - let drain = poll_fn(move || { - body.poll() - }); - let srv2 = poll_fn(|| { - try_ready!(sock1.read(&mut [0u8; 512])); - try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nConnection: close\r\n\r\nx")); - Ok(Async::Ready(())) - }).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e)); - - rt.block_on(res2.join(drain).join(srv2)).expect("res2"); - } - - // "Release" the mocked connect future, and let the runtime spin once so - // it's all setup... - { - let mut tx = Some(tx); - let client = &client; - let key = client.pool.h1_key("http://mock.local"); - let mut tick_cnt = 0; - let fut = poll_fn(move || { - tx.take(); - - if client.pool.idle_count(&key) == 0 { - tick_cnt += 1; - assert!(tick_cnt < 10, "ticked too many times waiting for idle"); - trace!("no idle yet; tick count: {}", tick_cnt); - ::futures::task::current().notify(); - Ok(Async::NotReady) - } else { - Ok::<_, ()>(Async::Ready(())) - } - }); - rt.block_on(fut).unwrap(); - } - - // Third request just tests out that the "loser" connection was pooled. If - // it isn't, this will panic since the MockConnector doesn't have any more - // mocks to give out. - { - let res3 = client.get(uri); - let srv3 = poll_fn(|| { - try_ready!(sock2.read(&mut [0u8; 512])); - try_ready!(sock2.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")); - Ok(Async::Ready(())) - }).map_err(|e: std::io::Error| panic!("srv3 poll_fn error: {}", e)); - - rt.block_on(res3.join(srv3)).expect("res3"); - } -} - -#[cfg(feature = "nightly")] -#[bench] -fn bench_http1_get_0b(b: &mut test::Bencher) { - let _ = pretty_env_logger::try_init(); - - let mut rt = Runtime::new().expect("new rt"); - let mut connector = MockConnector::new(); - - - let client = Client::builder() - .build::<_, crate::Body>(connector.clone()); - - client.pool.no_timer(); - - let uri = Uri::from_static("http://mock.local/a"); - - b.iter(move || { - let sock1 = connector.mock("http://mock.local"); - let res1 = client - .get(uri.clone()) - .and_then(|res| { - res.into_body().for_each(|_| Ok(())) - }); - let srv1 = poll_fn(|| { - try_ready!(sock1.read(&mut [0u8; 512])); - try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")); - Ok(Async::Ready(())) - }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e)); - rt.block_on(res1.join(srv1)).expect("res1"); - }); -} - -#[cfg(feature = "nightly")] -#[bench] -fn bench_http1_get_10b(b: &mut test::Bencher) { - let _ = pretty_env_logger::try_init(); - - let mut rt = Runtime::new().expect("new rt"); - let mut connector = MockConnector::new(); - - - let client = Client::builder() - .build::<_, crate::Body>(connector.clone()); - - client.pool.no_timer(); - - let uri = Uri::from_static("http://mock.local/a"); - - b.iter(move || { - let sock1 = connector.mock("http://mock.local"); - let res1 = client - .get(uri.clone()) - .and_then(|res| { - res.into_body().for_each(|_| Ok(())) - }); - let srv1 = poll_fn(|| { - try_ready!(sock1.read(&mut [0u8; 512])); - try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n0123456789")); - Ok(Async::Ready(())) - }).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e)); - rt.block_on(res1.join(srv1)).expect("res1"); - }); -} -*/ diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/buf.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/buf.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/buf.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,151 +0,0 @@ -use std::collections::VecDeque; -use std::io::IoSlice; - -use bytes::{Buf, BufMut, Bytes, BytesMut}; - -pub(crate) struct BufList { - bufs: VecDeque, -} - -impl BufList { - pub(crate) fn new() -> BufList { - BufList { - bufs: VecDeque::new(), - } - } - - #[inline] - pub(crate) fn push(&mut self, buf: T) { - debug_assert!(buf.has_remaining()); - self.bufs.push_back(buf); - } - - #[inline] - #[cfg(feature = "http1")] - pub(crate) fn bufs_cnt(&self) -> usize { - self.bufs.len() - } -} - -impl Buf for BufList { - #[inline] - fn remaining(&self) -> usize { - self.bufs.iter().map(|buf| buf.remaining()).sum() - } - - #[inline] - fn chunk(&self) -> &[u8] { - self.bufs.front().map(Buf::chunk).unwrap_or_default() - } - - #[inline] - fn advance(&mut self, mut cnt: usize) { - while cnt > 0 { - { - let front = &mut self.bufs[0]; - let rem = front.remaining(); - if rem > cnt { - front.advance(cnt); - return; - } else { - front.advance(rem); - cnt -= rem; - } - } - self.bufs.pop_front(); - } - } - - #[inline] - fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { - if dst.is_empty() { - return 0; - } - let mut vecs = 0; - for buf in &self.bufs { - vecs += buf.chunks_vectored(&mut dst[vecs..]); - if vecs == dst.len() { - break; - } - } - vecs - } - - #[inline] - fn copy_to_bytes(&mut self, len: usize) -> Bytes { - // Our inner buffer may have an optimized version of copy_to_bytes, and if the whole - // request can be fulfilled by the front buffer, we can take advantage. - match self.bufs.front_mut() { - Some(front) if front.remaining() == len => { - let b = front.copy_to_bytes(len); - self.bufs.pop_front(); - b - } - Some(front) if front.remaining() > len => front.copy_to_bytes(len), - _ => { - assert!(len <= self.remaining(), "`len` greater than remaining"); - let mut bm = BytesMut::with_capacity(len); - bm.put(self.take(len)); - bm.freeze() - } - } - } -} - -#[cfg(test)] -mod tests { - use std::ptr; - - use super::*; - - fn hello_world_buf() -> BufList { - BufList { - bufs: vec![Bytes::from("Hello"), Bytes::from(" "), Bytes::from("World")].into(), - } - } - - #[test] - fn to_bytes_shorter() { - let mut bufs = hello_world_buf(); - let old_ptr = bufs.chunk().as_ptr(); - let start = bufs.copy_to_bytes(4); - assert_eq!(start, "Hell"); - assert!(ptr::eq(old_ptr, start.as_ptr())); - assert_eq!(bufs.chunk(), b"o"); - assert!(ptr::eq(old_ptr.wrapping_add(4), bufs.chunk().as_ptr())); - assert_eq!(bufs.remaining(), 7); - } - - #[test] - fn to_bytes_eq() { - let mut bufs = hello_world_buf(); - let old_ptr = bufs.chunk().as_ptr(); - let start = bufs.copy_to_bytes(5); - assert_eq!(start, "Hello"); - assert!(ptr::eq(old_ptr, start.as_ptr())); - assert_eq!(bufs.chunk(), b" "); - assert_eq!(bufs.remaining(), 6); - } - - #[test] - fn to_bytes_longer() { - let mut bufs = hello_world_buf(); - let start = bufs.copy_to_bytes(7); - assert_eq!(start, "Hello W"); - assert_eq!(bufs.remaining(), 4); - } - - #[test] - fn one_long_buf_to_bytes() { - let mut buf = BufList::new(); - buf.push(b"Hello World" as &[_]); - assert_eq!(buf.copy_to_bytes(5), "Hello"); - assert_eq!(buf.chunk(), b" World"); - } - - #[test] - #[should_panic(expected = "`len` greater than remaining")] - fn buf_to_bytes_too_many() { - hello_world_buf().copy_to_bytes(42); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/date.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/date.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/date.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/date.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,124 +0,0 @@ -use std::cell::RefCell; -use std::fmt::{self, Write}; -use std::str; -use std::time::{Duration, SystemTime}; - -#[cfg(feature = "http2")] -use http::header::HeaderValue; -use httpdate::HttpDate; - -// "Sun, 06 Nov 1994 08:49:37 GMT".len() -pub(crate) const DATE_VALUE_LENGTH: usize = 29; - -#[cfg(feature = "http1")] -pub(crate) fn extend(dst: &mut Vec) { - CACHED.with(|cache| { - dst.extend_from_slice(cache.borrow().buffer()); - }) -} - -#[cfg(feature = "http1")] -pub(crate) fn update() { - CACHED.with(|cache| { - cache.borrow_mut().check(); - }) -} - -#[cfg(feature = "http2")] -pub(crate) fn update_and_header_value() -> HeaderValue { - CACHED.with(|cache| { - let mut cache = cache.borrow_mut(); - cache.check(); - HeaderValue::from_bytes(cache.buffer()).expect("Date format should be valid HeaderValue") - }) -} - -struct CachedDate { - bytes: [u8; DATE_VALUE_LENGTH], - pos: usize, - next_update: SystemTime, -} - -thread_local!(static CACHED: RefCell = RefCell::new(CachedDate::new())); - -impl CachedDate { - fn new() -> Self { - let mut cache = CachedDate { - bytes: [0; DATE_VALUE_LENGTH], - pos: 0, - next_update: SystemTime::now(), - }; - cache.update(cache.next_update); - cache - } - - fn buffer(&self) -> &[u8] { - &self.bytes[..] - } - - fn check(&mut self) { - let now = SystemTime::now(); - if now > self.next_update { - self.update(now); - } - } - - fn update(&mut self, now: SystemTime) { - self.render(now); - self.next_update = now + Duration::new(1, 0); - } - - fn render(&mut self, now: SystemTime) { - self.pos = 0; - let _ = write!(self, "{}", HttpDate::from(now)); - debug_assert!(self.pos == DATE_VALUE_LENGTH); - } -} - -impl fmt::Write for CachedDate { - fn write_str(&mut self, s: &str) -> fmt::Result { - let len = s.len(); - self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes()); - self.pos += len; - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[cfg(feature = "nightly")] - use test::Bencher; - - #[test] - fn test_date_len() { - assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len()); - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_date_check(b: &mut Bencher) { - let mut date = CachedDate::new(); - // cache the first update - date.check(); - - b.iter(|| { - date.check(); - }); - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_date_render(b: &mut Bencher) { - let mut date = CachedDate::new(); - let now = SystemTime::now(); - date.render(now); - b.bytes = date.buffer().len() as u64; - - b.iter(|| { - date.render(now); - test::black_box(&date); - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/drain.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/drain.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/drain.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/drain.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,217 +0,0 @@ -use std::mem; - -use pin_project_lite::pin_project; -use tokio::sync::watch; - -use super::{task, Future, Pin, Poll}; - -pub(crate) fn channel() -> (Signal, Watch) { - let (tx, rx) = watch::channel(()); - (Signal { tx }, Watch { rx }) -} - -pub(crate) struct Signal { - tx: watch::Sender<()>, -} - -pub(crate) struct Draining(Pin + Send + Sync>>); - -#[derive(Clone)] -pub(crate) struct Watch { - rx: watch::Receiver<()>, -} - -pin_project! { - #[allow(missing_debug_implementations)] - pub struct Watching { - #[pin] - future: F, - state: State, - watch: Pin + Send + Sync>>, - _rx: watch::Receiver<()>, - } -} - -enum State { - Watch(F), - Draining, -} - -impl Signal { - pub(crate) fn drain(self) -> Draining { - let _ = self.tx.send(()); - Draining(Box::pin(async move { self.tx.closed().await })) - } -} - -impl Future for Draining { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - Pin::new(&mut self.as_mut().0).poll(cx) - } -} - -impl Watch { - pub(crate) fn watch(self, future: F, on_drain: FN) -> Watching - where - F: Future, - FN: FnOnce(Pin<&mut F>), - { - let Self { mut rx } = self; - let _rx = rx.clone(); - Watching { - future, - state: State::Watch(on_drain), - watch: Box::pin(async move { - let _ = rx.changed().await; - }), - // Keep the receiver alive until the future completes, so that - // dropping it can signal that draining has completed. - _rx, - } - } -} - -impl Future for Watching -where - F: Future, - FN: FnOnce(Pin<&mut F>), -{ - type Output = F::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let mut me = self.project(); - loop { - match mem::replace(me.state, State::Draining) { - State::Watch(on_drain) => { - match Pin::new(&mut me.watch).poll(cx) { - Poll::Ready(()) => { - // Drain has been triggered! - on_drain(me.future.as_mut()); - } - Poll::Pending => { - *me.state = State::Watch(on_drain); - return me.future.poll(cx); - } - } - } - State::Draining => return me.future.poll(cx), - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - struct TestMe { - draining: bool, - finished: bool, - poll_cnt: usize, - } - - impl Future for TestMe { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - self.poll_cnt += 1; - if self.finished { - Poll::Ready(()) - } else { - Poll::Pending - } - } - } - - #[test] - fn watch() { - let mut mock = tokio_test::task::spawn(()); - mock.enter(|cx, _| { - let (tx, rx) = channel(); - let fut = TestMe { - draining: false, - finished: false, - poll_cnt: 0, - }; - - let mut watch = rx.watch(fut, |mut fut| { - fut.draining = true; - }); - - assert_eq!(watch.future.poll_cnt, 0); - - // First poll should poll the inner future - assert!(Pin::new(&mut watch).poll(cx).is_pending()); - assert_eq!(watch.future.poll_cnt, 1); - - // Second poll should poll the inner future again - assert!(Pin::new(&mut watch).poll(cx).is_pending()); - assert_eq!(watch.future.poll_cnt, 2); - - let mut draining = tx.drain(); - // Drain signaled, but needs another poll to be noticed. - assert!(!watch.future.draining); - assert_eq!(watch.future.poll_cnt, 2); - - // Now, poll after drain has been signaled. - assert!(Pin::new(&mut watch).poll(cx).is_pending()); - assert_eq!(watch.future.poll_cnt, 3); - assert!(watch.future.draining); - - // Draining is not ready until watcher completes - assert!(Pin::new(&mut draining).poll(cx).is_pending()); - - // Finishing up the watch future - watch.future.finished = true; - assert!(Pin::new(&mut watch).poll(cx).is_ready()); - assert_eq!(watch.future.poll_cnt, 4); - drop(watch); - - assert!(Pin::new(&mut draining).poll(cx).is_ready()); - }) - } - - #[test] - fn watch_clones() { - let mut mock = tokio_test::task::spawn(()); - mock.enter(|cx, _| { - let (tx, rx) = channel(); - - let fut1 = TestMe { - draining: false, - finished: false, - poll_cnt: 0, - }; - let fut2 = TestMe { - draining: false, - finished: false, - poll_cnt: 0, - }; - - let watch1 = rx.clone().watch(fut1, |mut fut| { - fut.draining = true; - }); - let watch2 = rx.watch(fut2, |mut fut| { - fut.draining = true; - }); - - let mut draining = tx.drain(); - - // Still 2 outstanding watchers - assert!(Pin::new(&mut draining).poll(cx).is_pending()); - - // drop 1 for whatever reason - drop(watch1); - - // Still not ready, 1 other watcher still pending - assert!(Pin::new(&mut draining).poll(cx).is_pending()); - - drop(watch2); - - // Now all watchers are gone, draining is complete - assert!(Pin::new(&mut draining).poll(cx).is_ready()); - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/exec.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/exec.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/exec.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/exec.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,145 +0,0 @@ -use std::fmt; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; - -#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] -use crate::body::Body; -#[cfg(feature = "server")] -use crate::body::HttpBody; -#[cfg(all(feature = "http2", feature = "server"))] -use crate::proto::h2::server::H2Stream; -use crate::rt::Executor; -#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] -use crate::server::server::{new_svc::NewSvcTask, Watcher}; -#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] -use crate::service::HttpService; - -#[cfg(feature = "server")] -pub trait ConnStreamExec: Clone { - fn execute_h2stream(&mut self, fut: H2Stream); -} - -#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] -pub trait NewSvcExec, E, W: Watcher>: Clone { - fn execute_new_svc(&mut self, fut: NewSvcTask); -} - -pub(crate) type BoxSendFuture = Pin + Send>>; - -// Either the user provides an executor for background tasks, or we use -// `tokio::spawn`. -#[derive(Clone)] -pub enum Exec { - Default, - Executor(Arc + Send + Sync>), -} - -// ===== impl Exec ===== - -impl Exec { - pub(crate) fn execute(&self, fut: F) - where - F: Future + Send + 'static, - { - match *self { - Exec::Default => { - #[cfg(feature = "tcp")] - { - tokio::task::spawn(fut); - } - #[cfg(not(feature = "tcp"))] - { - // If no runtime, we need an executor! - panic!("executor must be set") - } - } - Exec::Executor(ref e) => { - e.execute(Box::pin(fut)); - } - } - } -} - -impl fmt::Debug for Exec { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Exec").finish() - } -} - -#[cfg(feature = "server")] -impl ConnStreamExec for Exec -where - H2Stream: Future + Send + 'static, - B: HttpBody, -{ - fn execute_h2stream(&mut self, fut: H2Stream) { - self.execute(fut) - } -} - -#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] -impl NewSvcExec for Exec -where - NewSvcTask: Future + Send + 'static, - S: HttpService, - W: Watcher, -{ - fn execute_new_svc(&mut self, fut: NewSvcTask) { - self.execute(fut) - } -} - -// ==== impl Executor ===== - -#[cfg(feature = "server")] -impl ConnStreamExec for E -where - E: Executor> + Clone, - H2Stream: Future, - B: HttpBody, -{ - fn execute_h2stream(&mut self, fut: H2Stream) { - self.execute(fut) - } -} - -#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] -impl NewSvcExec for E -where - E: Executor> + Clone, - NewSvcTask: Future, - S: HttpService, - W: Watcher, -{ - fn execute_new_svc(&mut self, fut: NewSvcTask) { - self.execute(fut) - } -} - -// If http2 is not enable, we just have a stub here, so that the trait bounds -// that *would* have been needed are still checked. Why? -// -// Because enabling `http2` shouldn't suddenly add new trait bounds that cause -// a compilation error. -#[cfg(not(feature = "http2"))] -#[allow(missing_debug_implementations)] -pub struct H2Stream(std::marker::PhantomData<(F, B)>); - -#[cfg(not(feature = "http2"))] -impl Future for H2Stream -where - F: Future, E>>, - B: crate::body::HttpBody, - B::Error: Into>, - E: Into>, -{ - type Output = (); - - fn poll( - self: Pin<&mut Self>, - _cx: &mut std::task::Context<'_>, - ) -> std::task::Poll { - unreachable!() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/io/mod.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/io/mod.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/io/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/io/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,3 +0,0 @@ -mod rewind; - -pub(crate) use self::rewind::Rewind; diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/io/rewind.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/io/rewind.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/io/rewind.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/io/rewind.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,155 +0,0 @@ -use std::marker::Unpin; -use std::{cmp, io}; - -use bytes::{Buf, Bytes}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; - -use crate::common::{task, Pin, Poll}; - -/// Combine a buffer with an IO, rewinding reads to use the buffer. -#[derive(Debug)] -pub(crate) struct Rewind { - pre: Option, - inner: T, -} - -impl Rewind { - #[cfg(any(all(feature = "http2", feature = "server"), test))] - pub(crate) fn new(io: T) -> Self { - Rewind { - pre: None, - inner: io, - } - } - - pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self { - Rewind { - pre: Some(buf), - inner: io, - } - } - - #[cfg(any(all(feature = "http1", feature = "http2", feature = "server"), test))] - pub(crate) fn rewind(&mut self, bs: Bytes) { - debug_assert!(self.pre.is_none()); - self.pre = Some(bs); - } - - pub(crate) fn into_inner(self) -> (T, Bytes) { - (self.inner, self.pre.unwrap_or_else(Bytes::new)) - } - - // pub(crate) fn get_mut(&mut self) -> &mut T { - // &mut self.inner - // } -} - -impl AsyncRead for Rewind -where - T: AsyncRead + Unpin, -{ - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - if let Some(mut prefix) = self.pre.take() { - // If there are no remaining bytes, let the bytes get dropped. - if !prefix.is_empty() { - let copy_len = cmp::min(prefix.len(), buf.remaining()); - // TODO: There should be a way to do following two lines cleaner... - buf.put_slice(&prefix[..copy_len]); - prefix.advance(copy_len); - // Put back what's left - if !prefix.is_empty() { - self.pre = Some(prefix); - } - - return Poll::Ready(Ok(())); - } - } - Pin::new(&mut self.inner).poll_read(cx, buf) - } -} - -impl AsyncWrite for Rewind -where - T: AsyncWrite + Unpin, -{ - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.inner).poll_write(cx, buf) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_flush(cx) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_shutdown(cx) - } - - fn is_write_vectored(&self) -> bool { - self.inner.is_write_vectored() - } -} - -#[cfg(test)] -mod tests { - // FIXME: re-implement tests with `async/await`, this import should - // trigger a warning to remind us - use super::Rewind; - use bytes::Bytes; - use tokio::io::AsyncReadExt; - - #[tokio::test] - async fn partial_rewind() { - let underlying = [104, 101, 108, 108, 111]; - - let mock = tokio_test::io::Builder::new().read(&underlying).build(); - - let mut stream = Rewind::new(mock); - - // Read off some bytes, ensure we filled o1 - let mut buf = [0; 2]; - stream.read_exact(&mut buf).await.expect("read1"); - - // Rewind the stream so that it is as if we never read in the first place. - stream.rewind(Bytes::copy_from_slice(&buf[..])); - - let mut buf = [0; 5]; - stream.read_exact(&mut buf).await.expect("read1"); - - // At this point we should have read everything that was in the MockStream - assert_eq!(&buf, &underlying); - } - - #[tokio::test] - async fn full_rewind() { - let underlying = [104, 101, 108, 108, 111]; - - let mock = tokio_test::io::Builder::new().read(&underlying).build(); - - let mut stream = Rewind::new(mock); - - let mut buf = [0; 5]; - stream.read_exact(&mut buf).await.expect("read1"); - - // Rewind the stream so that it is as if we never read in the first place. - stream.rewind(Bytes::copy_from_slice(&buf[..])); - - let mut buf = [0; 5]; - stream.read_exact(&mut buf).await.expect("read1"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/lazy.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/lazy.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/lazy.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/lazy.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -use pin_project_lite::pin_project; - -use super::{task, Future, Pin, Poll}; - -pub(crate) trait Started: Future { - fn started(&self) -> bool; -} - -pub(crate) fn lazy(func: F) -> Lazy -where - F: FnOnce() -> R, - R: Future + Unpin, -{ - Lazy { - inner: Inner::Init { func }, - } -} - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -pin_project! { - #[allow(missing_debug_implementations)] - pub(crate) struct Lazy { - #[pin] - inner: Inner, - } -} - -pin_project! { - #[project = InnerProj] - #[project_replace = InnerProjReplace] - enum Inner { - Init { func: F }, - Fut { #[pin] fut: R }, - Empty, - } -} - -impl Started for Lazy -where - F: FnOnce() -> R, - R: Future, -{ - fn started(&self) -> bool { - match self.inner { - Inner::Init { .. } => false, - Inner::Fut { .. } | Inner::Empty => true, - } - } -} - -impl Future for Lazy -where - F: FnOnce() -> R, - R: Future, -{ - type Output = R::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let mut this = self.project(); - - if let InnerProj::Fut { fut } = this.inner.as_mut().project() { - return fut.poll(cx); - } - - match this.inner.as_mut().project_replace(Inner::Empty) { - InnerProjReplace::Init { func } => { - this.inner.set(Inner::Fut { fut: func() }); - if let InnerProj::Fut { fut } = this.inner.project() { - return fut.poll(cx); - } - unreachable!() - } - _ => unreachable!("lazy state wrong"), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/mod.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/mod.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ -macro_rules! ready { - ($e:expr) => { - match $e { - std::task::Poll::Ready(v) => v, - std::task::Poll::Pending => return std::task::Poll::Pending, - } - }; -} - -pub(crate) mod buf; -#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] -pub(crate) mod date; -#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] -pub(crate) mod drain; -#[cfg(any(feature = "http1", feature = "http2", feature = "server"))] -pub(crate) mod exec; -pub(crate) mod io; -#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] -mod lazy; -mod never; -#[cfg(any( - feature = "stream", - all(feature = "client", any(feature = "http1", feature = "http2")) -))] -pub(crate) mod sync_wrapper; -pub(crate) mod task; -pub(crate) mod watch; - -#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] -pub(crate) use self::lazy::{lazy, Started as Lazy}; -#[cfg(any(feature = "http1", feature = "http2", feature = "runtime"))] -pub(crate) use self::never::Never; -pub(crate) use self::task::Poll; - -// group up types normally needed for `Future` -cfg_proto! { - pub(crate) use std::marker::Unpin; -} -pub(crate) use std::{future::Future, pin::Pin}; diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/never.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/never.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/never.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/never.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,21 +0,0 @@ -//! An uninhabitable type meaning it can never happen. -//! -//! To be replaced with `!` once it is stable. - -use std::error::Error; -use std::fmt; - -#[derive(Debug)] -pub(crate) enum Never {} - -impl fmt::Display for Never { - fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self {} - } -} - -impl Error for Never { - fn description(&self) -> &str { - match *self {} - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/sync_wrapper.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/sync_wrapper.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/sync_wrapper.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/sync_wrapper.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,110 +0,0 @@ -/* - * This is a copy of the sync_wrapper crate. - */ - -/// A mutual exclusion primitive that relies on static type information only -/// -/// In some cases synchronization can be proven statically: whenever you hold an exclusive `&mut` -/// reference, the Rust type system ensures that no other part of the program can hold another -/// reference to the data. Therefore it is safe to access it even if the current thread obtained -/// this reference via a channel. Whenever this is the case, the overhead of allocating and locking -/// a [`Mutex`] can be avoided by using this static version. -/// -/// One example where this is often applicable is [`Future`], which requires an exclusive reference -/// for its [`poll`] method: While a given `Future` implementation may not be safe to access by -/// multiple threads concurrently, the executor can only run the `Future` on one thread at any -/// given time, making it [`Sync`] in practice as long as the implementation is `Send`. You can -/// therefore use the sync wrapper to prove that your data structure is `Sync` even though it -/// contains such a `Future`. -/// -/// # Example -/// -/// ```ignore -/// use hyper::common::sync_wrapper::SyncWrapper; -/// use std::future::Future; -/// -/// struct MyThing { -/// future: SyncWrapper + Send>>, -/// } -/// -/// impl MyThing { -/// // all accesses to `self.future` now require an exclusive reference or ownership -/// } -/// -/// fn assert_sync() {} -/// -/// assert_sync::(); -/// ``` -/// -/// [`Mutex`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html -/// [`Future`]: https://doc.rust-lang.org/std/future/trait.Future.html -/// [`poll`]: https://doc.rust-lang.org/std/future/trait.Future.html#method.poll -/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html -#[repr(transparent)] -pub(crate) struct SyncWrapper(T); - -impl SyncWrapper { - /// Creates a new SyncWrapper containing the given value. - /// - /// # Examples - /// - /// ```ignore - /// use hyper::common::sync_wrapper::SyncWrapper; - /// - /// let wrapped = SyncWrapper::new(42); - /// ``` - pub(crate) fn new(value: T) -> Self { - Self(value) - } - - /// Acquires a reference to the protected value. - /// - /// This is safe because it requires an exclusive reference to the wrapper. Therefore this method - /// neither panics nor does it return an error. This is in contrast to [`Mutex::get_mut`] which - /// returns an error if another thread panicked while holding the lock. It is not recommended - /// to send an exclusive reference to a potentially damaged value to another thread for further - /// processing. - /// - /// [`Mutex::get_mut`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.get_mut - /// - /// # Examples - /// - /// ```ignore - /// use hyper::common::sync_wrapper::SyncWrapper; - /// - /// let mut wrapped = SyncWrapper::new(42); - /// let value = wrapped.get_mut(); - /// *value = 0; - /// assert_eq!(*wrapped.get_mut(), 0); - /// ``` - pub(crate) fn get_mut(&mut self) -> &mut T { - &mut self.0 - } - - /// Consumes this wrapper, returning the underlying data. - /// - /// This is safe because it requires ownership of the wrapper, aherefore this method will neither - /// panic nor does it return an error. This is in contrast to [`Mutex::into_inner`] which - /// returns an error if another thread panicked while holding the lock. It is not recommended - /// to send an exclusive reference to a potentially damaged value to another thread for further - /// processing. - /// - /// [`Mutex::into_inner`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.into_inner - /// - /// # Examples - /// - /// ```ignore - /// use hyper::common::sync_wrapper::SyncWrapper; - /// - /// let mut wrapped = SyncWrapper::new(42); - /// assert_eq!(wrapped.into_inner(), 42); - /// ``` - #[allow(dead_code)] - pub(crate) fn into_inner(self) -> T { - self.0 - } -} - -// this is safe because the only operations permitted on this data structure require exclusive -// access or ownership -unsafe impl Sync for SyncWrapper {} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/task.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/task.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/task.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/task.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -#[cfg(feature = "http1")] -use super::Never; -pub(crate) use std::task::{Context, Poll}; - -/// A function to help "yield" a future, such that it is re-scheduled immediately. -/// -/// Useful for spin counts, so a future doesn't hog too much time. -#[cfg(feature = "http1")] -pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll { - cx.waker().wake_by_ref(); - Poll::Pending -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/common/watch.rs s390-tools-2.33.1/rust-vendor/hyper/src/common/watch.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/common/watch.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/common/watch.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,73 +0,0 @@ -//! An SPSC broadcast channel. -//! -//! - The value can only be a `usize`. -//! - The consumer is only notified if the value is different. -//! - The value `0` is reserved for closed. - -use futures_util::task::AtomicWaker; -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; -use std::task; - -type Value = usize; - -pub(crate) const CLOSED: usize = 0; - -pub(crate) fn channel(initial: Value) -> (Sender, Receiver) { - debug_assert!( - initial != CLOSED, - "watch::channel initial state of 0 is reserved" - ); - - let shared = Arc::new(Shared { - value: AtomicUsize::new(initial), - waker: AtomicWaker::new(), - }); - - ( - Sender { - shared: shared.clone(), - }, - Receiver { shared }, - ) -} - -pub(crate) struct Sender { - shared: Arc, -} - -pub(crate) struct Receiver { - shared: Arc, -} - -struct Shared { - value: AtomicUsize, - waker: AtomicWaker, -} - -impl Sender { - pub(crate) fn send(&mut self, value: Value) { - if self.shared.value.swap(value, Ordering::SeqCst) != value { - self.shared.waker.wake(); - } - } -} - -impl Drop for Sender { - fn drop(&mut self) { - self.send(CLOSED); - } -} - -impl Receiver { - pub(crate) fn load(&mut self, cx: &mut task::Context<'_>) -> Value { - self.shared.waker.register(cx.waker()); - self.shared.value.load(Ordering::SeqCst) - } - - pub(crate) fn peek(&self) -> Value { - self.shared.value.load(Ordering::Relaxed) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/error.rs s390-tools-2.33.1/rust-vendor/hyper/src/error.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/error.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,663 +0,0 @@ -//! Error and Result module. - -#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] -use crate::client::connect::Connected; -use std::error::Error as StdError; -use std::fmt; - -/// Result type often returned from methods that can have hyper `Error`s. -pub type Result = std::result::Result; - -type Cause = Box; - -/// Represents errors that can occur handling HTTP streams. -pub struct Error { - inner: Box, -} - -struct ErrorImpl { - kind: Kind, - cause: Option, - #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] - connect_info: Option, -} - -#[derive(Debug)] -pub(super) enum Kind { - Parse(Parse), - User(User), - /// A message reached EOF, but is not complete. - #[allow(unused)] - IncompleteMessage, - /// A connection received a message (or bytes) when not waiting for one. - #[cfg(feature = "http1")] - UnexpectedMessage, - /// A pending item was dropped before ever being processed. - Canceled, - /// Indicates a channel (client or body sender) is closed. - ChannelClosed, - /// An `io::Error` that occurred while trying to read or write to a network stream. - #[cfg(any(feature = "http1", feature = "http2"))] - Io, - /// Error occurred while connecting. - #[allow(unused)] - Connect, - /// Error creating a TcpListener. - #[cfg(all(feature = "tcp", feature = "server"))] - Listen, - /// Error accepting on an Incoming stream. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - Accept, - /// User took too long to send headers - #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] - HeaderTimeout, - /// Error while reading a body from connection. - #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] - Body, - /// Error while writing a body to connection. - #[cfg(any(feature = "http1", feature = "http2"))] - BodyWrite, - /// Error calling AsyncWrite::shutdown() - #[cfg(feature = "http1")] - Shutdown, - - /// A general error from h2. - #[cfg(feature = "http2")] - Http2, -} - -#[derive(Debug)] -pub(super) enum Parse { - Method, - Version, - #[cfg(feature = "http1")] - VersionH2, - Uri, - #[cfg_attr(not(all(feature = "http1", feature = "server")), allow(unused))] - UriTooLong, - Header(Header), - TooLarge, - Status, - #[cfg_attr(debug_assertions, allow(unused))] - Internal, -} - -#[derive(Debug)] -pub(super) enum Header { - Token, - #[cfg(feature = "http1")] - ContentLengthInvalid, - #[cfg(all(feature = "http1", feature = "server"))] - TransferEncodingInvalid, - #[cfg(feature = "http1")] - TransferEncodingUnexpected, -} - -#[derive(Debug)] -pub(super) enum User { - /// Error calling user's HttpBody::poll_data(). - #[cfg(any(feature = "http1", feature = "http2"))] - Body, - /// The user aborted writing of the outgoing body. - BodyWriteAborted, - /// Error calling user's MakeService. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - MakeService, - /// Error from future of user's Service. - #[cfg(any(feature = "http1", feature = "http2"))] - Service, - /// User tried to send a certain header in an unexpected context. - /// - /// For example, sending both `content-length` and `transfer-encoding`. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - UnexpectedHeader, - /// User tried to create a Request with bad version. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - UnsupportedVersion, - /// User tried to create a CONNECT Request with the Client. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - UnsupportedRequestMethod, - /// User tried to respond with a 1xx (not 101) response code. - #[cfg(feature = "http1")] - #[cfg(feature = "server")] - UnsupportedStatusCode, - /// User tried to send a Request with Client with non-absolute URI. - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - AbsoluteUriRequired, - - /// User tried polling for an upgrade that doesn't exist. - NoUpgrade, - - /// User polled for an upgrade, but low-level API is not using upgrades. - #[cfg(feature = "http1")] - ManualUpgrade, - - /// User called `server::Connection::without_shutdown()` on an HTTP/2 conn. - #[cfg(feature = "server")] - WithoutShutdownNonHttp1, - - /// The dispatch task is gone. - #[cfg(feature = "client")] - DispatchGone, - - /// User aborted in an FFI callback. - #[cfg(feature = "ffi")] - AbortedByCallback, -} - -// Sentinel type to indicate the error was caused by a timeout. -#[derive(Debug)] -pub(super) struct TimedOut; - -impl Error { - /// Returns true if this was an HTTP parse error. - pub fn is_parse(&self) -> bool { - matches!(self.inner.kind, Kind::Parse(_)) - } - - /// Returns true if this was an HTTP parse error caused by a message that was too large. - pub fn is_parse_too_large(&self) -> bool { - matches!( - self.inner.kind, - Kind::Parse(Parse::TooLarge) | Kind::Parse(Parse::UriTooLong) - ) - } - - /// Returns true if this was an HTTP parse error caused by an invalid response status code or - /// reason phrase. - pub fn is_parse_status(&self) -> bool { - matches!(self.inner.kind, Kind::Parse(Parse::Status)) - } - - /// Returns true if this error was caused by user code. - pub fn is_user(&self) -> bool { - matches!(self.inner.kind, Kind::User(_)) - } - - /// Returns true if this was about a `Request` that was canceled. - pub fn is_canceled(&self) -> bool { - matches!(self.inner.kind, Kind::Canceled) - } - - /// Returns true if a sender's channel is closed. - pub fn is_closed(&self) -> bool { - matches!(self.inner.kind, Kind::ChannelClosed) - } - - /// Returns true if this was an error from `Connect`. - pub fn is_connect(&self) -> bool { - matches!(self.inner.kind, Kind::Connect) - } - - /// Returns true if the connection closed before a message could complete. - pub fn is_incomplete_message(&self) -> bool { - matches!(self.inner.kind, Kind::IncompleteMessage) - } - - /// Returns true if the body write was aborted. - pub fn is_body_write_aborted(&self) -> bool { - matches!(self.inner.kind, Kind::User(User::BodyWriteAborted)) - } - - /// Returns true if the error was caused by a timeout. - pub fn is_timeout(&self) -> bool { - self.find_source::().is_some() - } - - /// Consumes the error, returning its cause. - pub fn into_cause(self) -> Option> { - self.inner.cause - } - - /// Returns the info of the client connection on which this error occurred. - #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] - pub fn client_connect_info(&self) -> Option<&Connected> { - self.inner.connect_info.as_ref() - } - - pub(super) fn new(kind: Kind) -> Error { - Error { - inner: Box::new(ErrorImpl { - kind, - cause: None, - #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] - connect_info: None, - }), - } - } - - pub(super) fn with>(mut self, cause: C) -> Error { - self.inner.cause = Some(cause.into()); - self - } - - #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] - pub(super) fn with_client_connect_info(mut self, connect_info: Connected) -> Error { - self.inner.connect_info = Some(connect_info); - self - } - - #[cfg(any(all(feature = "http1", feature = "server"), feature = "ffi"))] - pub(super) fn kind(&self) -> &Kind { - &self.inner.kind - } - - pub(crate) fn find_source(&self) -> Option<&E> { - let mut cause = self.source(); - while let Some(err) = cause { - if let Some(ref typed) = err.downcast_ref() { - return Some(typed); - } - cause = err.source(); - } - - // else - None - } - - #[cfg(feature = "http2")] - pub(super) fn h2_reason(&self) -> h2::Reason { - // Find an h2::Reason somewhere in the cause stack, if it exists, - // otherwise assume an INTERNAL_ERROR. - self.find_source::() - .and_then(|h2_err| h2_err.reason()) - .unwrap_or(h2::Reason::INTERNAL_ERROR) - } - - pub(super) fn new_canceled() -> Error { - Error::new(Kind::Canceled) - } - - #[cfg(feature = "http1")] - pub(super) fn new_incomplete() -> Error { - Error::new(Kind::IncompleteMessage) - } - - #[cfg(feature = "http1")] - pub(super) fn new_too_large() -> Error { - Error::new(Kind::Parse(Parse::TooLarge)) - } - - #[cfg(feature = "http1")] - pub(super) fn new_version_h2() -> Error { - Error::new(Kind::Parse(Parse::VersionH2)) - } - - #[cfg(feature = "http1")] - pub(super) fn new_unexpected_message() -> Error { - Error::new(Kind::UnexpectedMessage) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - pub(super) fn new_io(cause: std::io::Error) -> Error { - Error::new(Kind::Io).with(cause) - } - - #[cfg(all(feature = "server", feature = "tcp"))] - pub(super) fn new_listen>(cause: E) -> Error { - Error::new(Kind::Listen).with(cause) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - pub(super) fn new_accept>(cause: E) -> Error { - Error::new(Kind::Accept).with(cause) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - pub(super) fn new_connect>(cause: E) -> Error { - Error::new(Kind::Connect).with(cause) - } - - pub(super) fn new_closed() -> Error { - Error::new(Kind::ChannelClosed) - } - - #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] - pub(super) fn new_body>(cause: E) -> Error { - Error::new(Kind::Body).with(cause) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - pub(super) fn new_body_write>(cause: E) -> Error { - Error::new(Kind::BodyWrite).with(cause) - } - - pub(super) fn new_body_write_aborted() -> Error { - Error::new(Kind::User(User::BodyWriteAborted)) - } - - fn new_user(user: User) -> Error { - Error::new(Kind::User(user)) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - pub(super) fn new_user_header() -> Error { - Error::new_user(User::UnexpectedHeader) - } - - #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] - pub(super) fn new_header_timeout() -> Error { - Error::new(Kind::HeaderTimeout) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - pub(super) fn new_user_unsupported_version() -> Error { - Error::new_user(User::UnsupportedVersion) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - pub(super) fn new_user_unsupported_request_method() -> Error { - Error::new_user(User::UnsupportedRequestMethod) - } - - #[cfg(feature = "http1")] - #[cfg(feature = "server")] - pub(super) fn new_user_unsupported_status_code() -> Error { - Error::new_user(User::UnsupportedStatusCode) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - pub(super) fn new_user_absolute_uri_required() -> Error { - Error::new_user(User::AbsoluteUriRequired) - } - - pub(super) fn new_user_no_upgrade() -> Error { - Error::new_user(User::NoUpgrade) - } - - #[cfg(feature = "http1")] - pub(super) fn new_user_manual_upgrade() -> Error { - Error::new_user(User::ManualUpgrade) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - pub(super) fn new_user_make_service>(cause: E) -> Error { - Error::new_user(User::MakeService).with(cause) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - pub(super) fn new_user_service>(cause: E) -> Error { - Error::new_user(User::Service).with(cause) - } - - #[cfg(any(feature = "http1", feature = "http2"))] - pub(super) fn new_user_body>(cause: E) -> Error { - Error::new_user(User::Body).with(cause) - } - - #[cfg(feature = "server")] - pub(super) fn new_without_shutdown_not_h1() -> Error { - Error::new(Kind::User(User::WithoutShutdownNonHttp1)) - } - - #[cfg(feature = "http1")] - pub(super) fn new_shutdown(cause: std::io::Error) -> Error { - Error::new(Kind::Shutdown).with(cause) - } - - #[cfg(feature = "ffi")] - pub(super) fn new_user_aborted_by_callback() -> Error { - Error::new_user(User::AbortedByCallback) - } - - #[cfg(feature = "client")] - pub(super) fn new_user_dispatch_gone() -> Error { - Error::new(Kind::User(User::DispatchGone)) - } - - #[cfg(feature = "http2")] - pub(super) fn new_h2(cause: ::h2::Error) -> Error { - if cause.is_io() { - Error::new_io(cause.into_io().expect("h2::Error::is_io")) - } else { - Error::new(Kind::Http2).with(cause) - } - } - - /// The error's standalone message, without the message from the source. - pub fn message(&self) -> impl fmt::Display + '_ { - self.description() - } - - fn description(&self) -> &str { - match self.inner.kind { - Kind::Parse(Parse::Method) => "invalid HTTP method parsed", - Kind::Parse(Parse::Version) => "invalid HTTP version parsed", - #[cfg(feature = "http1")] - Kind::Parse(Parse::VersionH2) => "invalid HTTP version parsed (found HTTP2 preface)", - Kind::Parse(Parse::Uri) => "invalid URI", - Kind::Parse(Parse::UriTooLong) => "URI too long", - Kind::Parse(Parse::Header(Header::Token)) => "invalid HTTP header parsed", - #[cfg(feature = "http1")] - Kind::Parse(Parse::Header(Header::ContentLengthInvalid)) => { - "invalid content-length parsed" - } - #[cfg(all(feature = "http1", feature = "server"))] - Kind::Parse(Parse::Header(Header::TransferEncodingInvalid)) => { - "invalid transfer-encoding parsed" - } - #[cfg(feature = "http1")] - Kind::Parse(Parse::Header(Header::TransferEncodingUnexpected)) => { - "unexpected transfer-encoding parsed" - } - Kind::Parse(Parse::TooLarge) => "message head is too large", - Kind::Parse(Parse::Status) => "invalid HTTP status-code parsed", - Kind::Parse(Parse::Internal) => { - "internal error inside Hyper and/or its dependencies, please report" - } - Kind::IncompleteMessage => "connection closed before message completed", - #[cfg(feature = "http1")] - Kind::UnexpectedMessage => "received unexpected message from connection", - Kind::ChannelClosed => "channel closed", - Kind::Connect => "error trying to connect", - Kind::Canceled => "operation was canceled", - #[cfg(all(feature = "server", feature = "tcp"))] - Kind::Listen => "error creating server listener", - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - Kind::Accept => "error accepting connection", - #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))] - Kind::HeaderTimeout => "read header from client timeout", - #[cfg(any(feature = "http1", feature = "http2", feature = "stream"))] - Kind::Body => "error reading a body from connection", - #[cfg(any(feature = "http1", feature = "http2"))] - Kind::BodyWrite => "error writing a body to connection", - #[cfg(feature = "http1")] - Kind::Shutdown => "error shutting down connection", - #[cfg(feature = "http2")] - Kind::Http2 => "http2 error", - #[cfg(any(feature = "http1", feature = "http2"))] - Kind::Io => "connection error", - - #[cfg(any(feature = "http1", feature = "http2"))] - Kind::User(User::Body) => "error from user's HttpBody stream", - Kind::User(User::BodyWriteAborted) => "user body write aborted", - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - Kind::User(User::MakeService) => "error from user's MakeService", - #[cfg(any(feature = "http1", feature = "http2"))] - Kind::User(User::Service) => "error from user's Service", - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "server")] - Kind::User(User::UnexpectedHeader) => "user sent unexpected header", - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - Kind::User(User::UnsupportedVersion) => "request has unsupported HTTP version", - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - Kind::User(User::UnsupportedRequestMethod) => "request has unsupported HTTP method", - #[cfg(feature = "http1")] - #[cfg(feature = "server")] - Kind::User(User::UnsupportedStatusCode) => { - "response has 1xx status code, not supported by server" - } - #[cfg(any(feature = "http1", feature = "http2"))] - #[cfg(feature = "client")] - Kind::User(User::AbsoluteUriRequired) => "client requires absolute-form URIs", - Kind::User(User::NoUpgrade) => "no upgrade available", - #[cfg(feature = "http1")] - Kind::User(User::ManualUpgrade) => "upgrade expected but low level API in use", - #[cfg(feature = "server")] - Kind::User(User::WithoutShutdownNonHttp1) => { - "without_shutdown() called on a non-HTTP/1 connection" - } - #[cfg(feature = "client")] - Kind::User(User::DispatchGone) => "dispatch task is gone", - #[cfg(feature = "ffi")] - Kind::User(User::AbortedByCallback) => "operation aborted by an application callback", - } - } -} - -impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut f = f.debug_tuple("hyper::Error"); - f.field(&self.inner.kind); - if let Some(ref cause) = self.inner.cause { - f.field(cause); - } - f.finish() - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(ref cause) = self.inner.cause { - write!(f, "{}: {}", self.description(), cause) - } else { - f.write_str(self.description()) - } - } -} - -impl StdError for Error { - fn source(&self) -> Option<&(dyn StdError + 'static)> { - self.inner - .cause - .as_ref() - .map(|cause| &**cause as &(dyn StdError + 'static)) - } -} - -#[doc(hidden)] -impl From for Error { - fn from(err: Parse) -> Error { - Error::new(Kind::Parse(err)) - } -} - -#[cfg(feature = "http1")] -impl Parse { - pub(crate) fn content_length_invalid() -> Self { - Parse::Header(Header::ContentLengthInvalid) - } - - #[cfg(all(feature = "http1", feature = "server"))] - pub(crate) fn transfer_encoding_invalid() -> Self { - Parse::Header(Header::TransferEncodingInvalid) - } - - pub(crate) fn transfer_encoding_unexpected() -> Self { - Parse::Header(Header::TransferEncodingUnexpected) - } -} - -impl From for Parse { - fn from(err: httparse::Error) -> Parse { - match err { - httparse::Error::HeaderName - | httparse::Error::HeaderValue - | httparse::Error::NewLine - | httparse::Error::Token => Parse::Header(Header::Token), - httparse::Error::Status => Parse::Status, - httparse::Error::TooManyHeaders => Parse::TooLarge, - httparse::Error::Version => Parse::Version, - } - } -} - -impl From for Parse { - fn from(_: http::method::InvalidMethod) -> Parse { - Parse::Method - } -} - -impl From for Parse { - fn from(_: http::status::InvalidStatusCode) -> Parse { - Parse::Status - } -} - -impl From for Parse { - fn from(_: http::uri::InvalidUri) -> Parse { - Parse::Uri - } -} - -impl From for Parse { - fn from(_: http::uri::InvalidUriParts) -> Parse { - Parse::Uri - } -} - -#[doc(hidden)] -trait AssertSendSync: Send + Sync + 'static {} -#[doc(hidden)] -impl AssertSendSync for Error {} - -// ===== impl TimedOut ==== - -impl fmt::Display for TimedOut { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("operation timed out") - } -} - -impl StdError for TimedOut {} - -#[cfg(test)] -mod tests { - use super::*; - use std::mem; - - #[test] - fn error_size_of() { - assert_eq!(mem::size_of::(), mem::size_of::()); - } - - #[cfg(feature = "http2")] - #[test] - fn h2_reason_unknown() { - let closed = Error::new_closed(); - assert_eq!(closed.h2_reason(), h2::Reason::INTERNAL_ERROR); - } - - #[cfg(feature = "http2")] - #[test] - fn h2_reason_one_level() { - let body_err = Error::new_user_body(h2::Error::from(h2::Reason::ENHANCE_YOUR_CALM)); - assert_eq!(body_err.h2_reason(), h2::Reason::ENHANCE_YOUR_CALM); - } - - #[cfg(feature = "http2")] - #[test] - fn h2_reason_nested() { - let recvd = Error::new_h2(h2::Error::from(h2::Reason::HTTP_1_1_REQUIRED)); - // Suppose a user were proxying the received error - let svc_err = Error::new_user_service(recvd); - assert_eq!(svc_err.h2_reason(), h2::Reason::HTTP_1_1_REQUIRED); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/ext/h1_reason_phrase.rs s390-tools-2.33.1/rust-vendor/hyper/src/ext/h1_reason_phrase.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/ext/h1_reason_phrase.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/ext/h1_reason_phrase.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,221 +0,0 @@ -use std::convert::TryFrom; - -use bytes::Bytes; - -/// A reason phrase in an HTTP/1 response. -/// -/// # Clients -/// -/// For clients, a `ReasonPhrase` will be present in the extensions of the `http::Response` returned -/// for a request if the reason phrase is different from the canonical reason phrase for the -/// response's status code. For example, if a server returns `HTTP/1.1 200 Awesome`, the -/// `ReasonPhrase` will be present and contain `Awesome`, but if a server returns `HTTP/1.1 200 OK`, -/// the response will not contain a `ReasonPhrase`. -/// -/// ```no_run -/// # #[cfg(all(feature = "tcp", feature = "client", feature = "http1"))] -/// # async fn fake_fetch() -> hyper::Result<()> { -/// use hyper::{Client, Uri}; -/// use hyper::ext::ReasonPhrase; -/// -/// let res = Client::new().get(Uri::from_static("http://example.com/non_canonical_reason")).await?; -/// -/// // Print out the non-canonical reason phrase, if it has one... -/// if let Some(reason) = res.extensions().get::() { -/// println!("non-canonical reason: {}", std::str::from_utf8(reason.as_bytes()).unwrap()); -/// } -/// # Ok(()) -/// # } -/// ``` -/// -/// # Servers -/// -/// When a `ReasonPhrase` is present in the extensions of the `http::Response` written by a server, -/// its contents will be written in place of the canonical reason phrase when responding via HTTP/1. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct ReasonPhrase(Bytes); - -impl ReasonPhrase { - /// Gets the reason phrase as bytes. - pub fn as_bytes(&self) -> &[u8] { - &self.0 - } - - /// Converts a static byte slice to a reason phrase. - pub fn from_static(reason: &'static [u8]) -> Self { - // TODO: this can be made const once MSRV is >= 1.57.0 - if find_invalid_byte(reason).is_some() { - panic!("invalid byte in static reason phrase"); - } - Self(Bytes::from_static(reason)) - } - - /// Converts a `Bytes` directly into a `ReasonPhrase` without validating. - /// - /// Use with care; invalid bytes in a reason phrase can cause serious security problems if - /// emitted in a response. - pub unsafe fn from_bytes_unchecked(reason: Bytes) -> Self { - Self(reason) - } -} - -impl TryFrom<&[u8]> for ReasonPhrase { - type Error = InvalidReasonPhrase; - - fn try_from(reason: &[u8]) -> Result { - if let Some(bad_byte) = find_invalid_byte(reason) { - Err(InvalidReasonPhrase { bad_byte }) - } else { - Ok(Self(Bytes::copy_from_slice(reason))) - } - } -} - -impl TryFrom> for ReasonPhrase { - type Error = InvalidReasonPhrase; - - fn try_from(reason: Vec) -> Result { - if let Some(bad_byte) = find_invalid_byte(&reason) { - Err(InvalidReasonPhrase { bad_byte }) - } else { - Ok(Self(Bytes::from(reason))) - } - } -} - -impl TryFrom for ReasonPhrase { - type Error = InvalidReasonPhrase; - - fn try_from(reason: String) -> Result { - if let Some(bad_byte) = find_invalid_byte(reason.as_bytes()) { - Err(InvalidReasonPhrase { bad_byte }) - } else { - Ok(Self(Bytes::from(reason))) - } - } -} - -impl TryFrom for ReasonPhrase { - type Error = InvalidReasonPhrase; - - fn try_from(reason: Bytes) -> Result { - if let Some(bad_byte) = find_invalid_byte(&reason) { - Err(InvalidReasonPhrase { bad_byte }) - } else { - Ok(Self(reason)) - } - } -} - -impl Into for ReasonPhrase { - fn into(self) -> Bytes { - self.0 - } -} - -impl AsRef<[u8]> for ReasonPhrase { - fn as_ref(&self) -> &[u8] { - &self.0 - } -} - -/// Error indicating an invalid byte when constructing a `ReasonPhrase`. -/// -/// See [the spec][spec] for details on allowed bytes. -/// -/// [spec]: https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.4.p.7 -#[derive(Debug)] -pub struct InvalidReasonPhrase { - bad_byte: u8, -} - -impl std::fmt::Display for InvalidReasonPhrase { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Invalid byte in reason phrase: {}", self.bad_byte) - } -} - -impl std::error::Error for InvalidReasonPhrase {} - -const fn is_valid_byte(b: u8) -> bool { - // See https://www.rfc-editor.org/rfc/rfc5234.html#appendix-B.1 - const fn is_vchar(b: u8) -> bool { - 0x21 <= b && b <= 0x7E - } - - // See https://httpwg.org/http-core/draft-ietf-httpbis-semantics-latest.html#fields.values - // - // The 0xFF comparison is technically redundant, but it matches the text of the spec more - // clearly and will be optimized away. - #[allow(unused_comparisons)] - const fn is_obs_text(b: u8) -> bool { - 0x80 <= b && b <= 0xFF - } - - // See https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.4.p.7 - b == b'\t' || b == b' ' || is_vchar(b) || is_obs_text(b) -} - -const fn find_invalid_byte(bytes: &[u8]) -> Option { - let mut i = 0; - while i < bytes.len() { - let b = bytes[i]; - if !is_valid_byte(b) { - return Some(b); - } - i += 1; - } - None -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn basic_valid() { - const PHRASE: &'static [u8] = b"OK"; - assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE); - assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE); - } - - #[test] - fn empty_valid() { - const PHRASE: &'static [u8] = b""; - assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE); - assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE); - } - - #[test] - fn obs_text_valid() { - const PHRASE: &'static [u8] = b"hyp\xe9r"; - assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE); - assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE); - } - - const NEWLINE_PHRASE: &'static [u8] = b"hyp\ner"; - - #[test] - #[should_panic] - fn newline_invalid_panic() { - ReasonPhrase::from_static(NEWLINE_PHRASE); - } - - #[test] - fn newline_invalid_err() { - assert!(ReasonPhrase::try_from(NEWLINE_PHRASE).is_err()); - } - - const CR_PHRASE: &'static [u8] = b"hyp\rer"; - - #[test] - #[should_panic] - fn cr_invalid_panic() { - ReasonPhrase::from_static(CR_PHRASE); - } - - #[test] - fn cr_invalid_err() { - assert!(ReasonPhrase::try_from(CR_PHRASE).is_err()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/ext.rs s390-tools-2.33.1/rust-vendor/hyper/src/ext.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/ext.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/ext.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,228 +0,0 @@ -//! HTTP extensions. - -use bytes::Bytes; -#[cfg(any(feature = "http1", feature = "ffi"))] -use http::header::HeaderName; -#[cfg(feature = "http1")] -use http::header::{IntoHeaderName, ValueIter}; -use http::HeaderMap; -#[cfg(feature = "ffi")] -use std::collections::HashMap; -#[cfg(feature = "http2")] -use std::fmt; - -#[cfg(any(feature = "http1", feature = "ffi"))] -mod h1_reason_phrase; -#[cfg(any(feature = "http1", feature = "ffi"))] -pub use h1_reason_phrase::ReasonPhrase; - -#[cfg(feature = "http2")] -/// Represents the `:protocol` pseudo-header used by -/// the [Extended CONNECT Protocol]. -/// -/// [Extended CONNECT Protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 -#[derive(Clone, Eq, PartialEq)] -pub struct Protocol { - inner: h2::ext::Protocol, -} - -#[cfg(feature = "http2")] -impl Protocol { - /// Converts a static string to a protocol name. - pub const fn from_static(value: &'static str) -> Self { - Self { - inner: h2::ext::Protocol::from_static(value), - } - } - - /// Returns a str representation of the header. - pub fn as_str(&self) -> &str { - self.inner.as_str() - } - - #[cfg(feature = "server")] - pub(crate) fn from_inner(inner: h2::ext::Protocol) -> Self { - Self { inner } - } - - pub(crate) fn into_inner(self) -> h2::ext::Protocol { - self.inner - } -} - -#[cfg(feature = "http2")] -impl<'a> From<&'a str> for Protocol { - fn from(value: &'a str) -> Self { - Self { - inner: h2::ext::Protocol::from(value), - } - } -} - -#[cfg(feature = "http2")] -impl AsRef<[u8]> for Protocol { - fn as_ref(&self) -> &[u8] { - self.inner.as_ref() - } -} - -#[cfg(feature = "http2")] -impl fmt::Debug for Protocol { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) - } -} - -/// A map from header names to their original casing as received in an HTTP message. -/// -/// If an HTTP/1 response `res` is parsed on a connection whose option -/// [`http1_preserve_header_case`] was set to true and the response included -/// the following headers: -/// -/// ```ignore -/// x-Bread: Baguette -/// X-BREAD: Pain -/// x-bread: Ficelle -/// ``` -/// -/// Then `res.extensions().get::()` will return a map with: -/// -/// ```ignore -/// HeaderCaseMap({ -/// "x-bread": ["x-Bread", "X-BREAD", "x-bread"], -/// }) -/// ``` -/// -/// [`http1_preserve_header_case`]: /client/struct.Client.html#method.http1_preserve_header_case -#[derive(Clone, Debug)] -pub(crate) struct HeaderCaseMap(HeaderMap); - -#[cfg(feature = "http1")] -impl HeaderCaseMap { - /// Returns a view of all spellings associated with that header name, - /// in the order they were found. - pub(crate) fn get_all<'a>( - &'a self, - name: &HeaderName, - ) -> impl Iterator + 'a> + 'a { - self.get_all_internal(name).into_iter() - } - - /// Returns a view of all spellings associated with that header name, - /// in the order they were found. - pub(crate) fn get_all_internal<'a>(&'a self, name: &HeaderName) -> ValueIter<'_, Bytes> { - self.0.get_all(name).into_iter() - } - - pub(crate) fn default() -> Self { - Self(Default::default()) - } - - #[cfg(any(test, feature = "ffi"))] - pub(crate) fn insert(&mut self, name: HeaderName, orig: Bytes) { - self.0.insert(name, orig); - } - - pub(crate) fn append(&mut self, name: N, orig: Bytes) - where - N: IntoHeaderName, - { - self.0.append(name, orig); - } -} - -#[cfg(feature = "ffi")] -#[derive(Clone, Debug)] -/// Hashmap -pub(crate) struct OriginalHeaderOrder { - /// Stores how many entries a Headername maps to. This is used - /// for accounting. - num_entries: HashMap, - /// Stores the ordering of the headers. ex: `vec[i] = (headerName, idx)`, - /// The vector is ordered such that the ith element - /// represents the ith header that came in off the line. - /// The `HeaderName` and `idx` are then used elsewhere to index into - /// the multi map that stores the header values. - entry_order: Vec<(HeaderName, usize)>, -} - -#[cfg(all(feature = "http1", feature = "ffi"))] -impl OriginalHeaderOrder { - pub(crate) fn default() -> Self { - OriginalHeaderOrder { - num_entries: HashMap::new(), - entry_order: Vec::new(), - } - } - - pub(crate) fn insert(&mut self, name: HeaderName) { - if !self.num_entries.contains_key(&name) { - let idx = 0; - self.num_entries.insert(name.clone(), 1); - self.entry_order.push((name, idx)); - } - // Replacing an already existing element does not - // change ordering, so we only care if its the first - // header name encountered - } - - pub(crate) fn append(&mut self, name: N) - where - N: IntoHeaderName + Into + Clone, - { - let name: HeaderName = name.into(); - let idx; - if self.num_entries.contains_key(&name) { - idx = self.num_entries[&name]; - *self.num_entries.get_mut(&name).unwrap() += 1; - } else { - idx = 0; - self.num_entries.insert(name.clone(), 1); - } - self.entry_order.push((name, idx)); - } - - // No doc test is run here because `RUSTFLAGS='--cfg hyper_unstable_ffi'` - // is needed to compile. Once ffi is stablized `no_run` should be removed - // here. - /// This returns an iterator that provides header names and indexes - /// in the original order received. - /// - /// # Examples - /// ```no_run - /// use hyper::ext::OriginalHeaderOrder; - /// use hyper::header::{HeaderName, HeaderValue, HeaderMap}; - /// - /// let mut h_order = OriginalHeaderOrder::default(); - /// let mut h_map = Headermap::new(); - /// - /// let name1 = b"Set-CookiE"; - /// let value1 = b"a=b"; - /// h_map.append(name1); - /// h_order.append(name1); - /// - /// let name2 = b"Content-Encoding"; - /// let value2 = b"gzip"; - /// h_map.append(name2, value2); - /// h_order.append(name2); - /// - /// let name3 = b"SET-COOKIE"; - /// let value3 = b"c=d"; - /// h_map.append(name3, value3); - /// h_order.append(name3) - /// - /// let mut iter = h_order.get_in_order() - /// - /// let (name, idx) = iter.next(); - /// assert_eq!(b"a=b", h_map.get_all(name).nth(idx).unwrap()); - /// - /// let (name, idx) = iter.next(); - /// assert_eq!(b"gzip", h_map.get_all(name).nth(idx).unwrap()); - /// - /// let (name, idx) = iter.next(); - /// assert_eq!(b"c=d", h_map.get_all(name).nth(idx).unwrap()); - /// ``` - pub(crate) fn get_in_order(&self) -> impl Iterator { - self.entry_order.iter() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/ffi/body.rs s390-tools-2.33.1/rust-vendor/hyper/src/ffi/body.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/ffi/body.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/ffi/body.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,229 +0,0 @@ -use std::ffi::c_void; -use std::mem::ManuallyDrop; -use std::ptr; -use std::task::{Context, Poll}; - -use http::HeaderMap; -use libc::{c_int, size_t}; - -use super::task::{hyper_context, hyper_task, hyper_task_return_type, AsTaskType}; -use super::{UserDataPointer, HYPER_ITER_CONTINUE}; -use crate::body::{Body, Bytes, HttpBody as _}; - -/// A streaming HTTP body. -pub struct hyper_body(pub(super) Body); - -/// A buffer of bytes that is sent or received on a `hyper_body`. -pub struct hyper_buf(pub(crate) Bytes); - -pub(crate) struct UserBody { - data_func: hyper_body_data_callback, - userdata: *mut c_void, -} - -// ===== Body ===== - -type hyper_body_foreach_callback = extern "C" fn(*mut c_void, *const hyper_buf) -> c_int; - -type hyper_body_data_callback = - extern "C" fn(*mut c_void, *mut hyper_context<'_>, *mut *mut hyper_buf) -> c_int; - -ffi_fn! { - /// Create a new "empty" body. - /// - /// If not configured, this body acts as an empty payload. - fn hyper_body_new() -> *mut hyper_body { - Box::into_raw(Box::new(hyper_body(Body::empty()))) - } ?= ptr::null_mut() -} - -ffi_fn! { - /// Free a `hyper_body *`. - fn hyper_body_free(body: *mut hyper_body) { - drop(non_null!(Box::from_raw(body) ?= ())); - } -} - -ffi_fn! { - /// Return a task that will poll the body for the next buffer of data. - /// - /// The task value may have different types depending on the outcome: - /// - /// - `HYPER_TASK_BUF`: Success, and more data was received. - /// - `HYPER_TASK_ERROR`: An error retrieving the data. - /// - `HYPER_TASK_EMPTY`: The body has finished streaming data. - /// - /// This does not consume the `hyper_body *`, so it may be used to again. - /// However, it MUST NOT be used or freed until the related task completes. - fn hyper_body_data(body: *mut hyper_body) -> *mut hyper_task { - // This doesn't take ownership of the Body, so don't allow destructor - let mut body = ManuallyDrop::new(non_null!(Box::from_raw(body) ?= ptr::null_mut())); - - Box::into_raw(hyper_task::boxed(async move { - body.0.data().await.map(|res| res.map(hyper_buf)) - })) - } ?= ptr::null_mut() -} - -ffi_fn! { - /// Return a task that will poll the body and execute the callback with each - /// body chunk that is received. - /// - /// The `hyper_buf` pointer is only a borrowed reference, it cannot live outside - /// the execution of the callback. You must make a copy to retain it. - /// - /// The callback should return `HYPER_ITER_CONTINUE` to continue iterating - /// chunks as they are received, or `HYPER_ITER_BREAK` to cancel. - /// - /// This will consume the `hyper_body *`, you shouldn't use it anymore or free it. - fn hyper_body_foreach(body: *mut hyper_body, func: hyper_body_foreach_callback, userdata: *mut c_void) -> *mut hyper_task { - let mut body = non_null!(Box::from_raw(body) ?= ptr::null_mut()); - let userdata = UserDataPointer(userdata); - - Box::into_raw(hyper_task::boxed(async move { - while let Some(item) = body.0.data().await { - let chunk = item?; - if HYPER_ITER_CONTINUE != func(userdata.0, &hyper_buf(chunk)) { - return Err(crate::Error::new_user_aborted_by_callback()); - } - } - Ok(()) - })) - } ?= ptr::null_mut() -} - -ffi_fn! { - /// Set userdata on this body, which will be passed to callback functions. - fn hyper_body_set_userdata(body: *mut hyper_body, userdata: *mut c_void) { - let b = non_null!(&mut *body ?= ()); - b.0.as_ffi_mut().userdata = userdata; - } -} - -ffi_fn! { - /// Set the data callback for this body. - /// - /// The callback is called each time hyper needs to send more data for the - /// body. It is passed the value from `hyper_body_set_userdata`. - /// - /// If there is data available, the `hyper_buf **` argument should be set - /// to a `hyper_buf *` containing the data, and `HYPER_POLL_READY` should - /// be returned. - /// - /// Returning `HYPER_POLL_READY` while the `hyper_buf **` argument points - /// to `NULL` will indicate the body has completed all data. - /// - /// If there is more data to send, but it isn't yet available, a - /// `hyper_waker` should be saved from the `hyper_context *` argument, and - /// `HYPER_POLL_PENDING` should be returned. You must wake the saved waker - /// to signal the task when data is available. - /// - /// If some error has occurred, you can return `HYPER_POLL_ERROR` to abort - /// the body. - fn hyper_body_set_data_func(body: *mut hyper_body, func: hyper_body_data_callback) { - let b = non_null!{ &mut *body ?= () }; - b.0.as_ffi_mut().data_func = func; - } -} - -// ===== impl UserBody ===== - -impl UserBody { - pub(crate) fn new() -> UserBody { - UserBody { - data_func: data_noop, - userdata: std::ptr::null_mut(), - } - } - - pub(crate) fn poll_data(&mut self, cx: &mut Context<'_>) -> Poll>> { - let mut out = std::ptr::null_mut(); - match (self.data_func)(self.userdata, hyper_context::wrap(cx), &mut out) { - super::task::HYPER_POLL_READY => { - if out.is_null() { - Poll::Ready(None) - } else { - let buf = unsafe { Box::from_raw(out) }; - Poll::Ready(Some(Ok(buf.0))) - } - } - super::task::HYPER_POLL_PENDING => Poll::Pending, - super::task::HYPER_POLL_ERROR => { - Poll::Ready(Some(Err(crate::Error::new_body_write_aborted()))) - } - unexpected => Poll::Ready(Some(Err(crate::Error::new_body_write(format!( - "unexpected hyper_body_data_func return code {}", - unexpected - ))))), - } - } - - pub(crate) fn poll_trailers( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll>> { - Poll::Ready(Ok(None)) - } -} - -/// cbindgen:ignore -extern "C" fn data_noop( - _userdata: *mut c_void, - _: *mut hyper_context<'_>, - _: *mut *mut hyper_buf, -) -> c_int { - super::task::HYPER_POLL_READY -} - -unsafe impl Send for UserBody {} -unsafe impl Sync for UserBody {} - -// ===== Bytes ===== - -ffi_fn! { - /// Create a new `hyper_buf *` by copying the provided bytes. - /// - /// This makes an owned copy of the bytes, so the `buf` argument can be - /// freed or changed afterwards. - /// - /// This returns `NULL` if allocating a new buffer fails. - fn hyper_buf_copy(buf: *const u8, len: size_t) -> *mut hyper_buf { - let slice = unsafe { - std::slice::from_raw_parts(buf, len) - }; - Box::into_raw(Box::new(hyper_buf(Bytes::copy_from_slice(slice)))) - } ?= ptr::null_mut() -} - -ffi_fn! { - /// Get a pointer to the bytes in this buffer. - /// - /// This should be used in conjunction with `hyper_buf_len` to get the length - /// of the bytes data. - /// - /// This pointer is borrowed data, and not valid once the `hyper_buf` is - /// consumed/freed. - fn hyper_buf_bytes(buf: *const hyper_buf) -> *const u8 { - unsafe { (*buf).0.as_ptr() } - } ?= ptr::null() -} - -ffi_fn! { - /// Get the length of the bytes this buffer contains. - fn hyper_buf_len(buf: *const hyper_buf) -> size_t { - unsafe { (*buf).0.len() } - } -} - -ffi_fn! { - /// Free this buffer. - fn hyper_buf_free(buf: *mut hyper_buf) { - drop(unsafe { Box::from_raw(buf) }); - } -} - -unsafe impl AsTaskType for hyper_buf { - fn as_task_type(&self) -> hyper_task_return_type { - hyper_task_return_type::HYPER_TASK_BUF - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/ffi/client.rs s390-tools-2.33.1/rust-vendor/hyper/src/ffi/client.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/ffi/client.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/ffi/client.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,182 +0,0 @@ -use std::ptr; -use std::sync::Arc; - -use libc::c_int; - -use crate::client::conn; -use crate::rt::Executor as _; - -use super::error::hyper_code; -use super::http_types::{hyper_request, hyper_response}; -use super::io::hyper_io; -use super::task::{hyper_executor, hyper_task, hyper_task_return_type, AsTaskType, WeakExec}; - -/// An options builder to configure an HTTP client connection. -pub struct hyper_clientconn_options { - builder: conn::Builder, - /// Use a `Weak` to prevent cycles. - exec: WeakExec, -} - -/// An HTTP client connection handle. -/// -/// These are used to send a request on a single connection. It's possible to -/// send multiple requests on a single connection, such as when HTTP/1 -/// keep-alive or HTTP/2 is used. -pub struct hyper_clientconn { - tx: conn::SendRequest, -} - -// ===== impl hyper_clientconn ===== - -ffi_fn! { - /// Starts an HTTP client connection handshake using the provided IO transport - /// and options. - /// - /// Both the `io` and the `options` are consumed in this function call. - /// - /// The returned `hyper_task *` must be polled with an executor until the - /// handshake completes, at which point the value can be taken. - fn hyper_clientconn_handshake(io: *mut hyper_io, options: *mut hyper_clientconn_options) -> *mut hyper_task { - let options = non_null! { Box::from_raw(options) ?= ptr::null_mut() }; - let io = non_null! { Box::from_raw(io) ?= ptr::null_mut() }; - - Box::into_raw(hyper_task::boxed(async move { - options.builder.handshake::<_, crate::Body>(io) - .await - .map(|(tx, conn)| { - options.exec.execute(Box::pin(async move { - let _ = conn.await; - })); - hyper_clientconn { tx } - }) - })) - } ?= std::ptr::null_mut() -} - -ffi_fn! { - /// Send a request on the client connection. - /// - /// Returns a task that needs to be polled until it is ready. When ready, the - /// task yields a `hyper_response *`. - fn hyper_clientconn_send(conn: *mut hyper_clientconn, req: *mut hyper_request) -> *mut hyper_task { - let mut req = non_null! { Box::from_raw(req) ?= ptr::null_mut() }; - - // Update request with original-case map of headers - req.finalize_request(); - - let fut = non_null! { &mut *conn ?= ptr::null_mut() }.tx.send_request(req.0); - - let fut = async move { - fut.await.map(hyper_response::wrap) - }; - - Box::into_raw(hyper_task::boxed(fut)) - } ?= std::ptr::null_mut() -} - -ffi_fn! { - /// Free a `hyper_clientconn *`. - fn hyper_clientconn_free(conn: *mut hyper_clientconn) { - drop(non_null! { Box::from_raw(conn) ?= () }); - } -} - -unsafe impl AsTaskType for hyper_clientconn { - fn as_task_type(&self) -> hyper_task_return_type { - hyper_task_return_type::HYPER_TASK_CLIENTCONN - } -} - -// ===== impl hyper_clientconn_options ===== - -ffi_fn! { - /// Creates a new set of HTTP clientconn options to be used in a handshake. - fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options { - #[allow(deprecated)] - let builder = conn::Builder::new(); - - Box::into_raw(Box::new(hyper_clientconn_options { - builder, - exec: WeakExec::new(), - })) - } ?= std::ptr::null_mut() -} - -ffi_fn! { - /// Set the whether or not header case is preserved. - /// - /// Pass `0` to allow lowercase normalization (default), `1` to retain original case. - fn hyper_clientconn_options_set_preserve_header_case(opts: *mut hyper_clientconn_options, enabled: c_int) { - let opts = non_null! { &mut *opts ?= () }; - opts.builder.http1_preserve_header_case(enabled != 0); - } -} - -ffi_fn! { - /// Set the whether or not header order is preserved. - /// - /// Pass `0` to allow reordering (default), `1` to retain original ordering. - fn hyper_clientconn_options_set_preserve_header_order(opts: *mut hyper_clientconn_options, enabled: c_int) { - let opts = non_null! { &mut *opts ?= () }; - opts.builder.http1_preserve_header_order(enabled != 0); - } -} - -ffi_fn! { - /// Free a `hyper_clientconn_options *`. - fn hyper_clientconn_options_free(opts: *mut hyper_clientconn_options) { - drop(non_null! { Box::from_raw(opts) ?= () }); - } -} - -ffi_fn! { - /// Set the client background task executor. - /// - /// This does not consume the `options` or the `exec`. - fn hyper_clientconn_options_exec(opts: *mut hyper_clientconn_options, exec: *const hyper_executor) { - let opts = non_null! { &mut *opts ?= () }; - - let exec = non_null! { Arc::from_raw(exec) ?= () }; - let weak_exec = hyper_executor::downgrade(&exec); - std::mem::forget(exec); - - opts.builder.executor(weak_exec.clone()); - opts.exec = weak_exec; - } -} - -ffi_fn! { - /// Set the whether to use HTTP2. - /// - /// Pass `0` to disable, `1` to enable. - fn hyper_clientconn_options_http2(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { - #[cfg(feature = "http2")] - { - let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG }; - opts.builder.http2_only(enabled != 0); - hyper_code::HYPERE_OK - } - - #[cfg(not(feature = "http2"))] - { - drop(opts); - drop(enabled); - hyper_code::HYPERE_FEATURE_NOT_ENABLED - } - } -} - -ffi_fn! { - /// Set the whether to include a copy of the raw headers in responses - /// received on this connection. - /// - /// Pass `0` to disable, `1` to enable. - /// - /// If enabled, see `hyper_response_headers_raw()` for usage. - fn hyper_clientconn_options_headers_raw(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code { - let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG }; - opts.builder.http1_headers_raw(enabled != 0); - hyper_code::HYPERE_OK - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/ffi/error.rs s390-tools-2.33.1/rust-vendor/hyper/src/ffi/error.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/ffi/error.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/ffi/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,85 +0,0 @@ -use libc::size_t; - -/// A more detailed error object returned by some hyper functions. -pub struct hyper_error(crate::Error); - -/// A return code for many of hyper's methods. -#[repr(C)] -pub enum hyper_code { - /// All is well. - HYPERE_OK, - /// General error, details in the `hyper_error *`. - HYPERE_ERROR, - /// A function argument was invalid. - HYPERE_INVALID_ARG, - /// The IO transport returned an EOF when one wasn't expected. - /// - /// This typically means an HTTP request or response was expected, but the - /// connection closed cleanly without sending (all of) it. - HYPERE_UNEXPECTED_EOF, - /// Aborted by a user supplied callback. - HYPERE_ABORTED_BY_CALLBACK, - /// An optional hyper feature was not enabled. - #[cfg_attr(feature = "http2", allow(unused))] - HYPERE_FEATURE_NOT_ENABLED, - /// The peer sent an HTTP message that could not be parsed. - HYPERE_INVALID_PEER_MESSAGE, -} - -// ===== impl hyper_error ===== - -impl hyper_error { - fn code(&self) -> hyper_code { - use crate::error::Kind as ErrorKind; - use crate::error::User; - - match self.0.kind() { - ErrorKind::Parse(_) => hyper_code::HYPERE_INVALID_PEER_MESSAGE, - ErrorKind::IncompleteMessage => hyper_code::HYPERE_UNEXPECTED_EOF, - ErrorKind::User(User::AbortedByCallback) => hyper_code::HYPERE_ABORTED_BY_CALLBACK, - // TODO: add more variants - _ => hyper_code::HYPERE_ERROR, - } - } - - fn print_to(&self, dst: &mut [u8]) -> usize { - use std::io::Write; - - let mut dst = std::io::Cursor::new(dst); - - // A write! error doesn't matter. As much as possible will have been - // written, and the Cursor position will know how far that is (even - // if that is zero). - let _ = write!(dst, "{}", &self.0); - dst.position() as usize - } -} - -ffi_fn! { - /// Frees a `hyper_error`. - fn hyper_error_free(err: *mut hyper_error) { - drop(non_null!(Box::from_raw(err) ?= ())); - } -} - -ffi_fn! { - /// Get an equivalent `hyper_code` from this error. - fn hyper_error_code(err: *const hyper_error) -> hyper_code { - non_null!(&*err ?= hyper_code::HYPERE_INVALID_ARG).code() - } -} - -ffi_fn! { - /// Print the details of this error to a buffer. - /// - /// The `dst_len` value must be the maximum length that the buffer can - /// store. - /// - /// The return value is number of bytes that were written to `dst`. - fn hyper_error_print(err: *const hyper_error, dst: *mut u8, dst_len: size_t) -> size_t { - let dst = unsafe { - std::slice::from_raw_parts_mut(dst, dst_len) - }; - non_null!(&*err ?= 0).print_to(dst) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/ffi/http_types.rs s390-tools-2.33.1/rust-vendor/hyper/src/ffi/http_types.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/ffi/http_types.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/ffi/http_types.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,657 +0,0 @@ -use bytes::Bytes; -use libc::{c_int, size_t}; -use std::ffi::c_void; - -use super::body::{hyper_body, hyper_buf}; -use super::error::hyper_code; -use super::task::{hyper_task_return_type, AsTaskType}; -use super::{UserDataPointer, HYPER_ITER_CONTINUE}; -use crate::ext::{HeaderCaseMap, OriginalHeaderOrder, ReasonPhrase}; -use crate::header::{HeaderName, HeaderValue}; -use crate::{Body, HeaderMap, Method, Request, Response, Uri}; - -/// An HTTP request. -pub struct hyper_request(pub(super) Request); - -/// An HTTP response. -pub struct hyper_response(pub(super) Response); - -/// An HTTP header map. -/// -/// These can be part of a request or response. -pub struct hyper_headers { - pub(super) headers: HeaderMap, - orig_casing: HeaderCaseMap, - orig_order: OriginalHeaderOrder, -} - -pub(crate) struct RawHeaders(pub(crate) hyper_buf); - -pub(crate) struct OnInformational { - func: hyper_request_on_informational_callback, - data: UserDataPointer, -} - -type hyper_request_on_informational_callback = extern "C" fn(*mut c_void, *mut hyper_response); - -// ===== impl hyper_request ===== - -ffi_fn! { - /// Construct a new HTTP request. - fn hyper_request_new() -> *mut hyper_request { - Box::into_raw(Box::new(hyper_request(Request::new(Body::empty())))) - } ?= std::ptr::null_mut() -} - -ffi_fn! { - /// Free an HTTP request if not going to send it on a client. - fn hyper_request_free(req: *mut hyper_request) { - drop(non_null!(Box::from_raw(req) ?= ())); - } -} - -ffi_fn! { - /// Set the HTTP Method of the request. - fn hyper_request_set_method(req: *mut hyper_request, method: *const u8, method_len: size_t) -> hyper_code { - let bytes = unsafe { - std::slice::from_raw_parts(method, method_len as usize) - }; - let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); - match Method::from_bytes(bytes) { - Ok(m) => { - *req.0.method_mut() = m; - hyper_code::HYPERE_OK - }, - Err(_) => { - hyper_code::HYPERE_INVALID_ARG - } - } - } -} - -ffi_fn! { - /// Set the URI of the request. - /// - /// The request's URI is best described as the `request-target` from the RFCs. So in HTTP/1, - /// whatever is set will get sent as-is in the first line (GET $uri HTTP/1.1). It - /// supports the 4 defined variants, origin-form, absolute-form, authority-form, and - /// asterisk-form. - /// - /// The underlying type was built to efficiently support HTTP/2 where the request-target is - /// split over :scheme, :authority, and :path. As such, each part can be set explicitly, or the - /// type can parse a single contiguous string and if a scheme is found, that slot is "set". If - /// the string just starts with a path, only the path portion is set. All pseudo headers that - /// have been parsed/set are sent when the connection type is HTTP/2. - /// - /// To set each slot explicitly, use `hyper_request_set_uri_parts`. - fn hyper_request_set_uri(req: *mut hyper_request, uri: *const u8, uri_len: size_t) -> hyper_code { - let bytes = unsafe { - std::slice::from_raw_parts(uri, uri_len as usize) - }; - let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); - match Uri::from_maybe_shared(bytes) { - Ok(u) => { - *req.0.uri_mut() = u; - hyper_code::HYPERE_OK - }, - Err(_) => { - hyper_code::HYPERE_INVALID_ARG - } - } - } -} - -ffi_fn! { - /// Set the URI of the request with separate scheme, authority, and - /// path/query strings. - /// - /// Each of `scheme`, `authority`, and `path_and_query` should either be - /// null, to skip providing a component, or point to a UTF-8 encoded - /// string. If any string pointer argument is non-null, its corresponding - /// `len` parameter must be set to the string's length. - fn hyper_request_set_uri_parts( - req: *mut hyper_request, - scheme: *const u8, - scheme_len: size_t, - authority: *const u8, - authority_len: size_t, - path_and_query: *const u8, - path_and_query_len: size_t - ) -> hyper_code { - let mut builder = Uri::builder(); - if !scheme.is_null() { - let scheme_bytes = unsafe { - std::slice::from_raw_parts(scheme, scheme_len as usize) - }; - builder = builder.scheme(scheme_bytes); - } - if !authority.is_null() { - let authority_bytes = unsafe { - std::slice::from_raw_parts(authority, authority_len as usize) - }; - builder = builder.authority(authority_bytes); - } - if !path_and_query.is_null() { - let path_and_query_bytes = unsafe { - std::slice::from_raw_parts(path_and_query, path_and_query_len as usize) - }; - builder = builder.path_and_query(path_and_query_bytes); - } - match builder.build() { - Ok(u) => { - *unsafe { &mut *req }.0.uri_mut() = u; - hyper_code::HYPERE_OK - }, - Err(_) => { - hyper_code::HYPERE_INVALID_ARG - } - } - } -} - -ffi_fn! { - /// Set the preferred HTTP version of the request. - /// - /// The version value should be one of the `HYPER_HTTP_VERSION_` constants. - /// - /// Note that this won't change the major HTTP version of the connection, - /// since that is determined at the handshake step. - fn hyper_request_set_version(req: *mut hyper_request, version: c_int) -> hyper_code { - use http::Version; - - let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); - *req.0.version_mut() = match version { - super::HYPER_HTTP_VERSION_NONE => Version::HTTP_11, - super::HYPER_HTTP_VERSION_1_0 => Version::HTTP_10, - super::HYPER_HTTP_VERSION_1_1 => Version::HTTP_11, - super::HYPER_HTTP_VERSION_2 => Version::HTTP_2, - _ => { - // We don't know this version - return hyper_code::HYPERE_INVALID_ARG; - } - }; - hyper_code::HYPERE_OK - } -} - -ffi_fn! { - /// Gets a reference to the HTTP headers of this request - /// - /// This is not an owned reference, so it should not be accessed after the - /// `hyper_request` has been consumed. - fn hyper_request_headers(req: *mut hyper_request) -> *mut hyper_headers { - hyper_headers::get_or_default(unsafe { &mut *req }.0.extensions_mut()) - } ?= std::ptr::null_mut() -} - -ffi_fn! { - /// Set the body of the request. - /// - /// The default is an empty body. - /// - /// This takes ownership of the `hyper_body *`, you must not use it or - /// free it after setting it on the request. - fn hyper_request_set_body(req: *mut hyper_request, body: *mut hyper_body) -> hyper_code { - let body = non_null!(Box::from_raw(body) ?= hyper_code::HYPERE_INVALID_ARG); - let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); - *req.0.body_mut() = body.0; - hyper_code::HYPERE_OK - } -} - -ffi_fn! { - /// Set an informational (1xx) response callback. - /// - /// The callback is called each time hyper receives an informational (1xx) - /// response for this request. - /// - /// The third argument is an opaque user data pointer, which is passed to - /// the callback each time. - /// - /// The callback is passed the `void *` data pointer, and a - /// `hyper_response *` which can be inspected as any other response. The - /// body of the response will always be empty. - /// - /// NOTE: The `hyper_response *` is just borrowed data, and will not - /// be valid after the callback finishes. You must copy any data you wish - /// to persist. - fn hyper_request_on_informational(req: *mut hyper_request, callback: hyper_request_on_informational_callback, data: *mut c_void) -> hyper_code { - let ext = OnInformational { - func: callback, - data: UserDataPointer(data), - }; - let req = non_null!(&mut *req ?= hyper_code::HYPERE_INVALID_ARG); - req.0.extensions_mut().insert(ext); - hyper_code::HYPERE_OK - } -} - -impl hyper_request { - pub(super) fn finalize_request(&mut self) { - if let Some(headers) = self.0.extensions_mut().remove::() { - *self.0.headers_mut() = headers.headers; - self.0.extensions_mut().insert(headers.orig_casing); - self.0.extensions_mut().insert(headers.orig_order); - } - } -} - -// ===== impl hyper_response ===== - -ffi_fn! { - /// Free an HTTP response after using it. - fn hyper_response_free(resp: *mut hyper_response) { - drop(non_null!(Box::from_raw(resp) ?= ())); - } -} - -ffi_fn! { - /// Get the HTTP-Status code of this response. - /// - /// It will always be within the range of 100-599. - fn hyper_response_status(resp: *const hyper_response) -> u16 { - non_null!(&*resp ?= 0).0.status().as_u16() - } -} - -ffi_fn! { - /// Get a pointer to the reason-phrase of this response. - /// - /// This buffer is not null-terminated. - /// - /// This buffer is owned by the response, and should not be used after - /// the response has been freed. - /// - /// Use `hyper_response_reason_phrase_len()` to get the length of this - /// buffer. - fn hyper_response_reason_phrase(resp: *const hyper_response) -> *const u8 { - non_null!(&*resp ?= std::ptr::null()).reason_phrase().as_ptr() - } ?= std::ptr::null() -} - -ffi_fn! { - /// Get the length of the reason-phrase of this response. - /// - /// Use `hyper_response_reason_phrase()` to get the buffer pointer. - fn hyper_response_reason_phrase_len(resp: *const hyper_response) -> size_t { - non_null!(&*resp ?= 0).reason_phrase().len() - } -} - -ffi_fn! { - /// Get a reference to the full raw headers of this response. - /// - /// You must have enabled `hyper_clientconn_options_headers_raw()`, or this - /// will return NULL. - /// - /// The returned `hyper_buf *` is just a reference, owned by the response. - /// You need to make a copy if you wish to use it after freeing the - /// response. - /// - /// The buffer is not null-terminated, see the `hyper_buf` functions for - /// getting the bytes and length. - fn hyper_response_headers_raw(resp: *const hyper_response) -> *const hyper_buf { - let resp = non_null!(&*resp ?= std::ptr::null()); - match resp.0.extensions().get::() { - Some(raw) => &raw.0, - None => std::ptr::null(), - } - } ?= std::ptr::null() -} - -ffi_fn! { - /// Get the HTTP version used by this response. - /// - /// The returned value could be: - /// - /// - `HYPER_HTTP_VERSION_1_0` - /// - `HYPER_HTTP_VERSION_1_1` - /// - `HYPER_HTTP_VERSION_2` - /// - `HYPER_HTTP_VERSION_NONE` if newer (or older). - fn hyper_response_version(resp: *const hyper_response) -> c_int { - use http::Version; - - match non_null!(&*resp ?= 0).0.version() { - Version::HTTP_10 => super::HYPER_HTTP_VERSION_1_0, - Version::HTTP_11 => super::HYPER_HTTP_VERSION_1_1, - Version::HTTP_2 => super::HYPER_HTTP_VERSION_2, - _ => super::HYPER_HTTP_VERSION_NONE, - } - } -} - -ffi_fn! { - /// Gets a reference to the HTTP headers of this response. - /// - /// This is not an owned reference, so it should not be accessed after the - /// `hyper_response` has been freed. - fn hyper_response_headers(resp: *mut hyper_response) -> *mut hyper_headers { - hyper_headers::get_or_default(unsafe { &mut *resp }.0.extensions_mut()) - } ?= std::ptr::null_mut() -} - -ffi_fn! { - /// Take ownership of the body of this response. - /// - /// It is safe to free the response even after taking ownership of its body. - fn hyper_response_body(resp: *mut hyper_response) -> *mut hyper_body { - let body = std::mem::take(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut()); - Box::into_raw(Box::new(hyper_body(body))) - } ?= std::ptr::null_mut() -} - -impl hyper_response { - pub(super) fn wrap(mut resp: Response) -> hyper_response { - let headers = std::mem::take(resp.headers_mut()); - let orig_casing = resp - .extensions_mut() - .remove::() - .unwrap_or_else(HeaderCaseMap::default); - let orig_order = resp - .extensions_mut() - .remove::() - .unwrap_or_else(OriginalHeaderOrder::default); - resp.extensions_mut().insert(hyper_headers { - headers, - orig_casing, - orig_order, - }); - - hyper_response(resp) - } - - fn reason_phrase(&self) -> &[u8] { - if let Some(reason) = self.0.extensions().get::() { - return reason.as_bytes(); - } - - if let Some(reason) = self.0.status().canonical_reason() { - return reason.as_bytes(); - } - - &[] - } -} - -unsafe impl AsTaskType for hyper_response { - fn as_task_type(&self) -> hyper_task_return_type { - hyper_task_return_type::HYPER_TASK_RESPONSE - } -} - -// ===== impl Headers ===== - -type hyper_headers_foreach_callback = - extern "C" fn(*mut c_void, *const u8, size_t, *const u8, size_t) -> c_int; - -impl hyper_headers { - pub(super) fn get_or_default(ext: &mut http::Extensions) -> &mut hyper_headers { - if let None = ext.get_mut::() { - ext.insert(hyper_headers::default()); - } - - ext.get_mut::().unwrap() - } -} - -ffi_fn! { - /// Iterates the headers passing each name and value pair to the callback. - /// - /// The `userdata` pointer is also passed to the callback. - /// - /// The callback should return `HYPER_ITER_CONTINUE` to keep iterating, or - /// `HYPER_ITER_BREAK` to stop. - fn hyper_headers_foreach(headers: *const hyper_headers, func: hyper_headers_foreach_callback, userdata: *mut c_void) { - let headers = non_null!(&*headers ?= ()); - // For each header name/value pair, there may be a value in the casemap - // that corresponds to the HeaderValue. So, we iterator all the keys, - // and for each one, try to pair the originally cased name with the value. - // - // TODO: consider adding http::HeaderMap::entries() iterator - let mut ordered_iter = headers.orig_order.get_in_order().peekable(); - if ordered_iter.peek().is_some() { - for (name, idx) in ordered_iter { - let (name_ptr, name_len) = if let Some(orig_name) = headers.orig_casing.get_all(name).nth(*idx) { - (orig_name.as_ref().as_ptr(), orig_name.as_ref().len()) - } else { - ( - name.as_str().as_bytes().as_ptr(), - name.as_str().as_bytes().len(), - ) - }; - - let val_ptr; - let val_len; - if let Some(value) = headers.headers.get_all(name).iter().nth(*idx) { - val_ptr = value.as_bytes().as_ptr(); - val_len = value.as_bytes().len(); - } else { - // Stop iterating, something has gone wrong. - return; - } - - if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) { - return; - } - } - } else { - for name in headers.headers.keys() { - let mut names = headers.orig_casing.get_all(name); - - for value in headers.headers.get_all(name) { - let (name_ptr, name_len) = if let Some(orig_name) = names.next() { - (orig_name.as_ref().as_ptr(), orig_name.as_ref().len()) - } else { - ( - name.as_str().as_bytes().as_ptr(), - name.as_str().as_bytes().len(), - ) - }; - - let val_ptr = value.as_bytes().as_ptr(); - let val_len = value.as_bytes().len(); - - if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) { - return; - } - } - } - } - } -} - -ffi_fn! { - /// Sets the header with the provided name to the provided value. - /// - /// This overwrites any previous value set for the header. - fn hyper_headers_set(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code { - let headers = non_null!(&mut *headers ?= hyper_code::HYPERE_INVALID_ARG); - match unsafe { raw_name_value(name, name_len, value, value_len) } { - Ok((name, value, orig_name)) => { - headers.headers.insert(&name, value); - headers.orig_casing.insert(name.clone(), orig_name.clone()); - headers.orig_order.insert(name); - hyper_code::HYPERE_OK - } - Err(code) => code, - } - } -} - -ffi_fn! { - /// Adds the provided value to the list of the provided name. - /// - /// If there were already existing values for the name, this will append the - /// new value to the internal list. - fn hyper_headers_add(headers: *mut hyper_headers, name: *const u8, name_len: size_t, value: *const u8, value_len: size_t) -> hyper_code { - let headers = non_null!(&mut *headers ?= hyper_code::HYPERE_INVALID_ARG); - - match unsafe { raw_name_value(name, name_len, value, value_len) } { - Ok((name, value, orig_name)) => { - headers.headers.append(&name, value); - headers.orig_casing.append(&name, orig_name.clone()); - headers.orig_order.append(name); - hyper_code::HYPERE_OK - } - Err(code) => code, - } - } -} - -impl Default for hyper_headers { - fn default() -> Self { - Self { - headers: Default::default(), - orig_casing: HeaderCaseMap::default(), - orig_order: OriginalHeaderOrder::default(), - } - } -} - -unsafe fn raw_name_value( - name: *const u8, - name_len: size_t, - value: *const u8, - value_len: size_t, -) -> Result<(HeaderName, HeaderValue, Bytes), hyper_code> { - let name = std::slice::from_raw_parts(name, name_len); - let orig_name = Bytes::copy_from_slice(name); - let name = match HeaderName::from_bytes(name) { - Ok(name) => name, - Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG), - }; - let value = std::slice::from_raw_parts(value, value_len); - let value = match HeaderValue::from_bytes(value) { - Ok(val) => val, - Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG), - }; - - Ok((name, value, orig_name)) -} - -// ===== impl OnInformational ===== - -impl OnInformational { - pub(crate) fn call(&mut self, resp: Response) { - let mut resp = hyper_response::wrap(resp); - (self.func)(self.data.0, &mut resp); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_headers_foreach_cases_preserved() { - let mut headers = hyper_headers::default(); - - let name1 = b"Set-CookiE"; - let value1 = b"a=b"; - hyper_headers_add( - &mut headers, - name1.as_ptr(), - name1.len(), - value1.as_ptr(), - value1.len(), - ); - - let name2 = b"SET-COOKIE"; - let value2 = b"c=d"; - hyper_headers_add( - &mut headers, - name2.as_ptr(), - name2.len(), - value2.as_ptr(), - value2.len(), - ); - - let mut vec = Vec::::new(); - hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void); - - assert_eq!(vec, b"Set-CookiE: a=b\r\nSET-COOKIE: c=d\r\n"); - - extern "C" fn concat( - vec: *mut c_void, - name: *const u8, - name_len: usize, - value: *const u8, - value_len: usize, - ) -> c_int { - unsafe { - let vec = &mut *(vec as *mut Vec); - let name = std::slice::from_raw_parts(name, name_len); - let value = std::slice::from_raw_parts(value, value_len); - vec.extend(name); - vec.extend(b": "); - vec.extend(value); - vec.extend(b"\r\n"); - } - HYPER_ITER_CONTINUE - } - } - - #[cfg(all(feature = "http1", feature = "ffi"))] - #[test] - fn test_headers_foreach_order_preserved() { - let mut headers = hyper_headers::default(); - - let name1 = b"Set-CookiE"; - let value1 = b"a=b"; - hyper_headers_add( - &mut headers, - name1.as_ptr(), - name1.len(), - value1.as_ptr(), - value1.len(), - ); - - let name2 = b"Content-Encoding"; - let value2 = b"gzip"; - hyper_headers_add( - &mut headers, - name2.as_ptr(), - name2.len(), - value2.as_ptr(), - value2.len(), - ); - - let name3 = b"SET-COOKIE"; - let value3 = b"c=d"; - hyper_headers_add( - &mut headers, - name3.as_ptr(), - name3.len(), - value3.as_ptr(), - value3.len(), - ); - - let mut vec = Vec::::new(); - hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void); - - println!("{}", std::str::from_utf8(&vec).unwrap()); - assert_eq!( - vec, - b"Set-CookiE: a=b\r\nContent-Encoding: gzip\r\nSET-COOKIE: c=d\r\n" - ); - - extern "C" fn concat( - vec: *mut c_void, - name: *const u8, - name_len: usize, - value: *const u8, - value_len: usize, - ) -> c_int { - unsafe { - let vec = &mut *(vec as *mut Vec); - let name = std::slice::from_raw_parts(name, name_len); - let value = std::slice::from_raw_parts(value, value_len); - vec.extend(name); - vec.extend(b": "); - vec.extend(value); - vec.extend(b"\r\n"); - } - HYPER_ITER_CONTINUE - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/ffi/io.rs s390-tools-2.33.1/rust-vendor/hyper/src/ffi/io.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/ffi/io.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/ffi/io.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,178 +0,0 @@ -use std::ffi::c_void; -use std::pin::Pin; -use std::task::{Context, Poll}; - -use libc::size_t; -use tokio::io::{AsyncRead, AsyncWrite}; - -use super::task::hyper_context; - -/// Sentinel value to return from a read or write callback that the operation -/// is pending. -pub const HYPER_IO_PENDING: size_t = 0xFFFFFFFF; -/// Sentinel value to return from a read or write callback that the operation -/// has errored. -pub const HYPER_IO_ERROR: size_t = 0xFFFFFFFE; - -type hyper_io_read_callback = - extern "C" fn(*mut c_void, *mut hyper_context<'_>, *mut u8, size_t) -> size_t; -type hyper_io_write_callback = - extern "C" fn(*mut c_void, *mut hyper_context<'_>, *const u8, size_t) -> size_t; - -/// An IO object used to represent a socket or similar concept. -pub struct hyper_io { - read: hyper_io_read_callback, - write: hyper_io_write_callback, - userdata: *mut c_void, -} - -ffi_fn! { - /// Create a new IO type used to represent a transport. - /// - /// The read and write functions of this transport should be set with - /// `hyper_io_set_read` and `hyper_io_set_write`. - fn hyper_io_new() -> *mut hyper_io { - Box::into_raw(Box::new(hyper_io { - read: read_noop, - write: write_noop, - userdata: std::ptr::null_mut(), - })) - } ?= std::ptr::null_mut() -} - -ffi_fn! { - /// Free an unused `hyper_io *`. - /// - /// This is typically only useful if you aren't going to pass ownership - /// of the IO handle to hyper, such as with `hyper_clientconn_handshake()`. - fn hyper_io_free(io: *mut hyper_io) { - drop(non_null!(Box::from_raw(io) ?= ())); - } -} - -ffi_fn! { - /// Set the user data pointer for this IO to some value. - /// - /// This value is passed as an argument to the read and write callbacks. - fn hyper_io_set_userdata(io: *mut hyper_io, data: *mut c_void) { - non_null!(&mut *io ?= ()).userdata = data; - } -} - -ffi_fn! { - /// Set the read function for this IO transport. - /// - /// Data that is read from the transport should be put in the `buf` pointer, - /// up to `buf_len` bytes. The number of bytes read should be the return value. - /// - /// It is undefined behavior to try to access the bytes in the `buf` pointer, - /// unless you have already written them yourself. It is also undefined behavior - /// to return that more bytes have been written than actually set on the `buf`. - /// - /// If there is no data currently available, a waker should be claimed from - /// the `ctx` and registered with whatever polling mechanism is used to signal - /// when data is available later on. The return value should be - /// `HYPER_IO_PENDING`. - /// - /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` - /// should be the return value. - fn hyper_io_set_read(io: *mut hyper_io, func: hyper_io_read_callback) { - non_null!(&mut *io ?= ()).read = func; - } -} - -ffi_fn! { - /// Set the write function for this IO transport. - /// - /// Data from the `buf` pointer should be written to the transport, up to - /// `buf_len` bytes. The number of bytes written should be the return value. - /// - /// If no data can currently be written, the `waker` should be cloned and - /// registered with whatever polling mechanism is used to signal when data - /// is available later on. The return value should be `HYPER_IO_PENDING`. - /// - /// Yeet. - /// - /// If there is an irrecoverable error reading data, then `HYPER_IO_ERROR` - /// should be the return value. - fn hyper_io_set_write(io: *mut hyper_io, func: hyper_io_write_callback) { - non_null!(&mut *io ?= ()).write = func; - } -} - -/// cbindgen:ignore -extern "C" fn read_noop( - _userdata: *mut c_void, - _: *mut hyper_context<'_>, - _buf: *mut u8, - _buf_len: size_t, -) -> size_t { - 0 -} - -/// cbindgen:ignore -extern "C" fn write_noop( - _userdata: *mut c_void, - _: *mut hyper_context<'_>, - _buf: *const u8, - _buf_len: size_t, -) -> size_t { - 0 -} - -impl AsyncRead for hyper_io { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> Poll> { - let buf_ptr = unsafe { buf.unfilled_mut() }.as_mut_ptr() as *mut u8; - let buf_len = buf.remaining(); - - match (self.read)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) { - HYPER_IO_PENDING => Poll::Pending, - HYPER_IO_ERROR => Poll::Ready(Err(std::io::Error::new( - std::io::ErrorKind::Other, - "io error", - ))), - ok => { - // We have to trust that the user's read callback actually - // filled in that many bytes... :( - unsafe { buf.assume_init(ok) }; - buf.advance(ok); - Poll::Ready(Ok(())) - } - } - } -} - -impl AsyncWrite for hyper_io { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - let buf_ptr = buf.as_ptr(); - let buf_len = buf.len(); - - match (self.write)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) { - HYPER_IO_PENDING => Poll::Pending, - HYPER_IO_ERROR => Poll::Ready(Err(std::io::Error::new( - std::io::ErrorKind::Other, - "io error", - ))), - ok => Poll::Ready(Ok(ok)), - } - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -unsafe impl Send for hyper_io {} -unsafe impl Sync for hyper_io {} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/ffi/macros.rs s390-tools-2.33.1/rust-vendor/hyper/src/ffi/macros.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/ffi/macros.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/ffi/macros.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,53 +0,0 @@ -macro_rules! ffi_fn { - ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block ?= $default:expr) => { - $(#[$doc])* - #[no_mangle] - pub extern fn $name($($arg: $arg_ty),*) -> $ret { - use std::panic::{self, AssertUnwindSafe}; - - match panic::catch_unwind(AssertUnwindSafe(move || $body)) { - Ok(v) => v, - Err(_) => { - $default - } - } - } - }; - - ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) -> $ret:ty $body:block) => { - ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> $ret $body ?= { - eprintln!("panic unwind caught, aborting"); - std::process::abort() - }); - }; - - ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block ?= $default:expr) => { - ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body ?= $default); - }; - - ($(#[$doc:meta])* fn $name:ident($($arg:ident: $arg_ty:ty),*) $body:block) => { - ffi_fn!($(#[$doc])* fn $name($($arg: $arg_ty),*) -> () $body); - }; -} - -macro_rules! non_null { - ($ptr:ident, $eval:expr, $err:expr) => {{ - debug_assert!(!$ptr.is_null(), "{:?} must not be null", stringify!($ptr)); - if $ptr.is_null() { - return $err; - } - unsafe { $eval } - }}; - (&*$ptr:ident ?= $err:expr) => {{ - non_null!($ptr, &*$ptr, $err) - }}; - (&mut *$ptr:ident ?= $err:expr) => {{ - non_null!($ptr, &mut *$ptr, $err) - }}; - (Box::from_raw($ptr:ident) ?= $err:expr) => {{ - non_null!($ptr, Box::from_raw($ptr), $err) - }}; - (Arc::from_raw($ptr:ident) ?= $err:expr) => {{ - non_null!($ptr, Arc::from_raw($ptr), $err) - }}; -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/ffi/mod.rs s390-tools-2.33.1/rust-vendor/hyper/src/ffi/mod.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/ffi/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/ffi/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,94 +0,0 @@ -// We have a lot of c-types in here, stop warning about their names! -#![allow(non_camel_case_types)] -// fmt::Debug isn't helpful on FFI types -#![allow(missing_debug_implementations)] -// unreachable_pub warns `#[no_mangle] pub extern fn` in private mod. -#![allow(unreachable_pub)] - -//! # hyper C API -//! -//! This part of the documentation describes the C API for hyper. That is, how -//! to *use* the hyper library in C code. This is **not** a regular Rust -//! module, and thus it is not accessible in Rust. -//! -//! ## Unstable -//! -//! The C API of hyper is currently **unstable**, which means it's not part of -//! the semver contract as the rest of the Rust API is. Because of that, it's -//! only accessible if `--cfg hyper_unstable_ffi` is passed to `rustc` when -//! compiling. The easiest way to do that is setting the `RUSTFLAGS` -//! environment variable. -//! -//! ## Building -//! -//! The C API is part of the Rust library, but isn't compiled by default. Using -//! `cargo`, it can be compiled with the following command: -//! -//! ```notrust -//! RUSTFLAGS="--cfg hyper_unstable_ffi" cargo build --features client,http1,http2,ffi -//! ``` - -// We may eventually allow the FFI to be enabled without `client` or `http1`, -// that is why we don't auto enable them as `ffi = ["client", "http1"]` in -// the `Cargo.toml`. -// -// But for now, give a clear message that this compile error is expected. -#[cfg(not(all(feature = "client", feature = "http1")))] -compile_error!("The `ffi` feature currently requires the `client` and `http1` features."); - -#[cfg(not(hyper_unstable_ffi))] -compile_error!( - "\ - The `ffi` feature is unstable, and requires the \ - `RUSTFLAGS='--cfg hyper_unstable_ffi'` environment variable to be set.\ -" -); - -#[macro_use] -mod macros; - -mod body; -mod client; -mod error; -mod http_types; -mod io; -mod task; - -pub use self::body::*; -pub use self::client::*; -pub use self::error::*; -pub use self::http_types::*; -pub use self::io::*; -pub use self::task::*; - -/// Return in iter functions to continue iterating. -pub const HYPER_ITER_CONTINUE: libc::c_int = 0; -/// Return in iter functions to stop iterating. -#[allow(unused)] -pub const HYPER_ITER_BREAK: libc::c_int = 1; - -/// An HTTP Version that is unspecified. -pub const HYPER_HTTP_VERSION_NONE: libc::c_int = 0; -/// The HTTP/1.0 version. -pub const HYPER_HTTP_VERSION_1_0: libc::c_int = 10; -/// The HTTP/1.1 version. -pub const HYPER_HTTP_VERSION_1_1: libc::c_int = 11; -/// The HTTP/2 version. -pub const HYPER_HTTP_VERSION_2: libc::c_int = 20; - -struct UserDataPointer(*mut std::ffi::c_void); - -// We don't actually know anything about this pointer, it's up to the user -// to do the right thing. -unsafe impl Send for UserDataPointer {} -unsafe impl Sync for UserDataPointer {} - -/// cbindgen:ignore -static VERSION_CSTR: &str = concat!(env!("CARGO_PKG_VERSION"), "\0"); - -ffi_fn! { - /// Returns a static ASCII (null terminated) string of the hyper version. - fn hyper_version() -> *const libc::c_char { - VERSION_CSTR.as_ptr() as _ - } ?= std::ptr::null() -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/ffi/task.rs s390-tools-2.33.1/rust-vendor/hyper/src/ffi/task.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/ffi/task.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/ffi/task.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,411 +0,0 @@ -use std::ffi::c_void; -use std::future::Future; -use std::pin::Pin; -use std::ptr; -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, Mutex, Weak, -}; -use std::task::{Context, Poll}; - -use futures_util::stream::{FuturesUnordered, Stream}; -use libc::c_int; - -use super::error::hyper_code; -use super::UserDataPointer; - -type BoxFuture = Pin + Send>>; -type BoxAny = Box; - -/// Return in a poll function to indicate it was ready. -pub const HYPER_POLL_READY: c_int = 0; -/// Return in a poll function to indicate it is still pending. -/// -/// The passed in `hyper_waker` should be registered to wake up the task at -/// some later point. -pub const HYPER_POLL_PENDING: c_int = 1; -/// Return in a poll function indicate an error. -pub const HYPER_POLL_ERROR: c_int = 3; - -/// A task executor for `hyper_task`s. -pub struct hyper_executor { - /// The executor of all task futures. - /// - /// There should never be contention on the mutex, as it is only locked - /// to drive the futures. However, we cannot guarantee proper usage from - /// `hyper_executor_poll()`, which in C could potentially be called inside - /// one of the stored futures. The mutex isn't re-entrant, so doing so - /// would result in a deadlock, but that's better than data corruption. - driver: Mutex>, - - /// The queue of futures that need to be pushed into the `driver`. - /// - /// This is has a separate mutex since `spawn` could be called from inside - /// a future, which would mean the driver's mutex is already locked. - spawn_queue: Mutex>, - - /// This is used to track when a future calls `wake` while we are within - /// `hyper_executor::poll_next`. - is_woken: Arc, -} - -#[derive(Clone)] -pub(crate) struct WeakExec(Weak); - -struct ExecWaker(AtomicBool); - -/// An async task. -pub struct hyper_task { - future: BoxFuture, - output: Option, - userdata: UserDataPointer, -} - -struct TaskFuture { - task: Option>, -} - -/// An async context for a task that contains the related waker. -pub struct hyper_context<'a>(Context<'a>); - -/// A waker that is saved and used to waken a pending task. -pub struct hyper_waker { - waker: std::task::Waker, -} - -/// A descriptor for what type a `hyper_task` value is. -#[repr(C)] -pub enum hyper_task_return_type { - /// The value of this task is null (does not imply an error). - HYPER_TASK_EMPTY, - /// The value of this task is `hyper_error *`. - HYPER_TASK_ERROR, - /// The value of this task is `hyper_clientconn *`. - HYPER_TASK_CLIENTCONN, - /// The value of this task is `hyper_response *`. - HYPER_TASK_RESPONSE, - /// The value of this task is `hyper_buf *`. - HYPER_TASK_BUF, -} - -pub(crate) unsafe trait AsTaskType { - fn as_task_type(&self) -> hyper_task_return_type; -} - -pub(crate) trait IntoDynTaskType { - fn into_dyn_task_type(self) -> BoxAny; -} - -// ===== impl hyper_executor ===== - -impl hyper_executor { - fn new() -> Arc { - Arc::new(hyper_executor { - driver: Mutex::new(FuturesUnordered::new()), - spawn_queue: Mutex::new(Vec::new()), - is_woken: Arc::new(ExecWaker(AtomicBool::new(false))), - }) - } - - pub(crate) fn downgrade(exec: &Arc) -> WeakExec { - WeakExec(Arc::downgrade(exec)) - } - - fn spawn(&self, task: Box) { - self.spawn_queue - .lock() - .unwrap() - .push(TaskFuture { task: Some(task) }); - } - - fn poll_next(&self) -> Option> { - // Drain the queue first. - self.drain_queue(); - - let waker = futures_util::task::waker_ref(&self.is_woken); - let mut cx = Context::from_waker(&waker); - - loop { - match Pin::new(&mut *self.driver.lock().unwrap()).poll_next(&mut cx) { - Poll::Ready(val) => return val, - Poll::Pending => { - // Check if any of the pending tasks tried to spawn - // some new tasks. If so, drain into the driver and loop. - if self.drain_queue() { - continue; - } - - // If the driver called `wake` while we were polling, - // we should poll again immediately! - if self.is_woken.0.swap(false, Ordering::SeqCst) { - continue; - } - - return None; - } - } - } - } - - fn drain_queue(&self) -> bool { - let mut queue = self.spawn_queue.lock().unwrap(); - if queue.is_empty() { - return false; - } - - let driver = self.driver.lock().unwrap(); - - for task in queue.drain(..) { - driver.push(task); - } - - true - } -} - -impl futures_util::task::ArcWake for ExecWaker { - fn wake_by_ref(me: &Arc) { - me.0.store(true, Ordering::SeqCst); - } -} - -// ===== impl WeakExec ===== - -impl WeakExec { - pub(crate) fn new() -> Self { - WeakExec(Weak::new()) - } -} - -impl crate::rt::Executor> for WeakExec { - fn execute(&self, fut: BoxFuture<()>) { - if let Some(exec) = self.0.upgrade() { - exec.spawn(hyper_task::boxed(fut)); - } - } -} - -ffi_fn! { - /// Creates a new task executor. - fn hyper_executor_new() -> *const hyper_executor { - Arc::into_raw(hyper_executor::new()) - } ?= ptr::null() -} - -ffi_fn! { - /// Frees an executor and any incomplete tasks still part of it. - fn hyper_executor_free(exec: *const hyper_executor) { - drop(non_null!(Arc::from_raw(exec) ?= ())); - } -} - -ffi_fn! { - /// Push a task onto the executor. - /// - /// The executor takes ownership of the task, it should not be accessed - /// again unless returned back to the user with `hyper_executor_poll`. - fn hyper_executor_push(exec: *const hyper_executor, task: *mut hyper_task) -> hyper_code { - let exec = non_null!(&*exec ?= hyper_code::HYPERE_INVALID_ARG); - let task = non_null!(Box::from_raw(task) ?= hyper_code::HYPERE_INVALID_ARG); - exec.spawn(task); - hyper_code::HYPERE_OK - } -} - -ffi_fn! { - /// Polls the executor, trying to make progress on any tasks that have notified - /// that they are ready again. - /// - /// If ready, returns a task from the executor that has completed. - /// - /// If there are no ready tasks, this returns `NULL`. - fn hyper_executor_poll(exec: *const hyper_executor) -> *mut hyper_task { - let exec = non_null!(&*exec ?= ptr::null_mut()); - match exec.poll_next() { - Some(task) => Box::into_raw(task), - None => ptr::null_mut(), - } - } ?= ptr::null_mut() -} - -// ===== impl hyper_task ===== - -impl hyper_task { - pub(crate) fn boxed(fut: F) -> Box - where - F: Future + Send + 'static, - F::Output: IntoDynTaskType + Send + Sync + 'static, - { - Box::new(hyper_task { - future: Box::pin(async move { fut.await.into_dyn_task_type() }), - output: None, - userdata: UserDataPointer(ptr::null_mut()), - }) - } - - fn output_type(&self) -> hyper_task_return_type { - match self.output { - None => hyper_task_return_type::HYPER_TASK_EMPTY, - Some(ref val) => val.as_task_type(), - } - } -} - -impl Future for TaskFuture { - type Output = Box; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match Pin::new(&mut self.task.as_mut().unwrap().future).poll(cx) { - Poll::Ready(val) => { - let mut task = self.task.take().unwrap(); - task.output = Some(val); - Poll::Ready(task) - } - Poll::Pending => Poll::Pending, - } - } -} - -ffi_fn! { - /// Free a task. - fn hyper_task_free(task: *mut hyper_task) { - drop(non_null!(Box::from_raw(task) ?= ())); - } -} - -ffi_fn! { - /// Takes the output value of this task. - /// - /// This must only be called once polling the task on an executor has finished - /// this task. - /// - /// Use `hyper_task_type` to determine the type of the `void *` return value. - fn hyper_task_value(task: *mut hyper_task) -> *mut c_void { - let task = non_null!(&mut *task ?= ptr::null_mut()); - - if let Some(val) = task.output.take() { - let p = Box::into_raw(val) as *mut c_void; - // protect from returning fake pointers to empty types - if p == std::ptr::NonNull::::dangling().as_ptr() { - ptr::null_mut() - } else { - p - } - } else { - ptr::null_mut() - } - } ?= ptr::null_mut() -} - -ffi_fn! { - /// Query the return type of this task. - fn hyper_task_type(task: *mut hyper_task) -> hyper_task_return_type { - // instead of blowing up spectacularly, just say this null task - // doesn't have a value to retrieve. - non_null!(&*task ?= hyper_task_return_type::HYPER_TASK_EMPTY).output_type() - } -} - -ffi_fn! { - /// Set a user data pointer to be associated with this task. - /// - /// This value will be passed to task callbacks, and can be checked later - /// with `hyper_task_userdata`. - fn hyper_task_set_userdata(task: *mut hyper_task, userdata: *mut c_void) { - if task.is_null() { - return; - } - - unsafe { (*task).userdata = UserDataPointer(userdata) }; - } -} - -ffi_fn! { - /// Retrieve the userdata that has been set via `hyper_task_set_userdata`. - fn hyper_task_userdata(task: *mut hyper_task) -> *mut c_void { - non_null!(&*task ?= ptr::null_mut()).userdata.0 - } ?= ptr::null_mut() -} - -// ===== impl AsTaskType ===== - -unsafe impl AsTaskType for () { - fn as_task_type(&self) -> hyper_task_return_type { - hyper_task_return_type::HYPER_TASK_EMPTY - } -} - -unsafe impl AsTaskType for crate::Error { - fn as_task_type(&self) -> hyper_task_return_type { - hyper_task_return_type::HYPER_TASK_ERROR - } -} - -impl IntoDynTaskType for T -where - T: AsTaskType + Send + Sync + 'static, -{ - fn into_dyn_task_type(self) -> BoxAny { - Box::new(self) - } -} - -impl IntoDynTaskType for crate::Result -where - T: IntoDynTaskType + Send + Sync + 'static, -{ - fn into_dyn_task_type(self) -> BoxAny { - match self { - Ok(val) => val.into_dyn_task_type(), - Err(err) => Box::new(err), - } - } -} - -impl IntoDynTaskType for Option -where - T: IntoDynTaskType + Send + Sync + 'static, -{ - fn into_dyn_task_type(self) -> BoxAny { - match self { - Some(val) => val.into_dyn_task_type(), - None => ().into_dyn_task_type(), - } - } -} - -// ===== impl hyper_context ===== - -impl hyper_context<'_> { - pub(crate) fn wrap<'a, 'b>(cx: &'a mut Context<'b>) -> &'a mut hyper_context<'b> { - // A struct with only one field has the same layout as that field. - unsafe { std::mem::transmute::<&mut Context<'_>, &mut hyper_context<'_>>(cx) } - } -} - -ffi_fn! { - /// Copies a waker out of the task context. - fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker { - let waker = non_null!(&mut *cx ?= ptr::null_mut()).0.waker().clone(); - Box::into_raw(Box::new(hyper_waker { waker })) - } ?= ptr::null_mut() -} - -// ===== impl hyper_waker ===== - -ffi_fn! { - /// Free a waker that hasn't been woken. - fn hyper_waker_free(waker: *mut hyper_waker) { - drop(non_null!(Box::from_raw(waker) ?= ())); - } -} - -ffi_fn! { - /// Wake up the task associated with a waker. - /// - /// NOTE: This consumes the waker. You should not use or free the waker afterwards. - fn hyper_waker_wake(waker: *mut hyper_waker) { - let waker = non_null!(Box::from_raw(waker) ?= ()); - waker.waker.wake(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/headers.rs s390-tools-2.33.1/rust-vendor/hyper/src/headers.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/headers.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/headers.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,154 +0,0 @@ -#[cfg(feature = "http1")] -use bytes::BytesMut; -use http::header::CONTENT_LENGTH; -use http::header::{HeaderValue, ValueIter}; -use http::HeaderMap; -#[cfg(all(feature = "http2", feature = "client"))] -use http::Method; - -#[cfg(feature = "http1")] -pub(super) fn connection_keep_alive(value: &HeaderValue) -> bool { - connection_has(value, "keep-alive") -} - -#[cfg(feature = "http1")] -pub(super) fn connection_close(value: &HeaderValue) -> bool { - connection_has(value, "close") -} - -#[cfg(feature = "http1")] -fn connection_has(value: &HeaderValue, needle: &str) -> bool { - if let Ok(s) = value.to_str() { - for val in s.split(',') { - if val.trim().eq_ignore_ascii_case(needle) { - return true; - } - } - } - false -} - -#[cfg(all(feature = "http1", feature = "server"))] -pub(super) fn content_length_parse(value: &HeaderValue) -> Option { - from_digits(value.as_bytes()) -} - -pub(super) fn content_length_parse_all(headers: &HeaderMap) -> Option { - content_length_parse_all_values(headers.get_all(CONTENT_LENGTH).into_iter()) -} - -pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option { - // If multiple Content-Length headers were sent, everything can still - // be alright if they all contain the same value, and all parse - // correctly. If not, then it's an error. - - let mut content_length: Option = None; - for h in values { - if let Ok(line) = h.to_str() { - for v in line.split(',') { - if let Some(n) = from_digits(v.trim().as_bytes()) { - if content_length.is_none() { - content_length = Some(n) - } else if content_length != Some(n) { - return None; - } - } else { - return None - } - } - } else { - return None - } - } - - return content_length -} - -fn from_digits(bytes: &[u8]) -> Option { - // cannot use FromStr for u64, since it allows a signed prefix - let mut result = 0u64; - const RADIX: u64 = 10; - - if bytes.is_empty() { - return None; - } - - for &b in bytes { - // can't use char::to_digit, since we haven't verified these bytes - // are utf-8. - match b { - b'0'..=b'9' => { - result = result.checked_mul(RADIX)?; - result = result.checked_add((b - b'0') as u64)?; - }, - _ => { - // not a DIGIT, get outta here! - return None; - } - } - } - - Some(result) -} - -#[cfg(all(feature = "http2", feature = "client"))] -pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool { - match *method { - Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT => false, - _ => true, - } -} - -#[cfg(feature = "http2")] -pub(super) fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) { - headers - .entry(CONTENT_LENGTH) - .or_insert_with(|| HeaderValue::from(len)); -} - -#[cfg(feature = "http1")] -pub(super) fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool { - is_chunked(headers.get_all(http::header::TRANSFER_ENCODING).into_iter()) -} - -#[cfg(feature = "http1")] -pub(super) fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool { - // chunked must always be the last encoding, according to spec - if let Some(line) = encodings.next_back() { - return is_chunked_(line); - } - - false -} - -#[cfg(feature = "http1")] -pub(super) fn is_chunked_(value: &HeaderValue) -> bool { - // chunked must always be the last encoding, according to spec - if let Ok(s) = value.to_str() { - if let Some(encoding) = s.rsplit(',').next() { - return encoding.trim().eq_ignore_ascii_case("chunked"); - } - } - - false -} - -#[cfg(feature = "http1")] -pub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) { - const CHUNKED: &str = "chunked"; - - if let Some(line) = entry.iter_mut().next_back() { - // + 2 for ", " - let new_cap = line.as_bytes().len() + CHUNKED.len() + 2; - let mut buf = BytesMut::with_capacity(new_cap); - buf.extend_from_slice(line.as_bytes()); - buf.extend_from_slice(b", "); - buf.extend_from_slice(CHUNKED.as_bytes()); - - *line = HeaderValue::from_maybe_shared(buf.freeze()) - .expect("original header value plus ascii is valid"); - return; - } - - entry.insert(HeaderValue::from_static(CHUNKED)); -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/lib.rs s390-tools-2.33.1/rust-vendor/hyper/src/lib.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,111 +0,0 @@ -#![deny(missing_docs)] -#![deny(missing_debug_implementations)] -#![cfg_attr(test, deny(rust_2018_idioms))] -#![cfg_attr(all(test, feature = "full"), deny(unreachable_pub))] -#![cfg_attr(all(test, feature = "full"), deny(warnings))] -#![cfg_attr(all(test, feature = "nightly"), feature(test))] -#![cfg_attr(docsrs, feature(doc_cfg))] - -//! # hyper -//! -//! hyper is a **fast** and **correct** HTTP implementation written in and for Rust. -//! -//! ## Features -//! -//! - HTTP/1 and HTTP/2 -//! - Asynchronous design -//! - Leading in performance -//! - Tested and **correct** -//! - Extensive production use -//! - [Client](client/index.html) and [Server](server/index.html) APIs -//! -//! If just starting out, **check out the [Guides](https://hyper.rs/guides) -//! first.** -//! -//! ## "Low-level" -//! -//! hyper is a lower-level HTTP library, meant to be a building block -//! for libraries and applications. -//! -//! If looking for just a convenient HTTP client, consider the -//! [reqwest](https://crates.io/crates/reqwest) crate. -//! -//! # Optional Features -//! -//! hyper uses a set of [feature flags] to reduce the amount of compiled code. -//! It is possible to just enable certain features over others. By default, -//! hyper does not enable any features but allows one to enable a subset for -//! their use case. Below is a list of the available feature flags. You may -//! also notice above each function, struct and trait there is listed one or -//! more feature flags that are required for that item to be used. -//! -//! If you are new to hyper it is possible to enable the `full` feature flag -//! which will enable all public APIs. Beware though that this will pull in -//! many extra dependencies that you may not need. -//! -//! The following optional features are available: -//! -//! - `http1`: Enables HTTP/1 support. -//! - `http2`: Enables HTTP/2 support. -//! - `client`: Enables the HTTP `client`. -//! - `server`: Enables the HTTP `server`. -//! - `runtime`: Enables convenient integration with `tokio`, providing -//! connectors and acceptors for TCP, and a default executor. -//! - `tcp`: Enables convenient implementations over TCP (using tokio). -//! - `stream`: Provides `futures::Stream` capabilities. -//! - `backports`: 1.0 functionality backported to 0.14. -//! - `deprecated`: opt-in to deprecation warnings to prepare you for 1.0. -//! -//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section - -#[doc(hidden)] -pub use http; - -#[cfg(all(test, feature = "nightly"))] -extern crate test; - -pub use crate::http::{header, Method, Request, Response, StatusCode, Uri, Version}; - -#[doc(no_inline)] -pub use crate::http::HeaderMap; - -pub use crate::body::Body; -pub use crate::error::{Error, Result}; - -#[macro_use] -mod cfg; -#[macro_use] -mod common; -pub mod body; -mod error; -pub mod ext; -#[cfg(test)] -mod mock; -pub mod rt; -pub mod service; -pub mod upgrade; - -#[cfg(feature = "ffi")] -pub mod ffi; - -cfg_proto! { - mod headers; - mod proto; -} - -cfg_feature! { - #![feature = "client"] - - pub mod client; - #[cfg(any(feature = "http1", feature = "http2"))] - #[doc(no_inline)] - pub use crate::client::Client; -} - -cfg_feature! { - #![feature = "server"] - - pub mod server; - #[doc(no_inline)] - pub use crate::server::Server; -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/mock.rs s390-tools-2.33.1/rust-vendor/hyper/src/mock.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/mock.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/mock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,235 +0,0 @@ -// FIXME: re-implement tests with `async/await` -/* -#[cfg(feature = "runtime")] -use std::collections::HashMap; -use std::cmp; -use std::io::{self, Read, Write}; -#[cfg(feature = "runtime")] -use std::sync::{Arc, Mutex}; - -use bytes::Buf; -use futures::{Async, Poll}; -#[cfg(feature = "runtime")] -use futures::Future; -use futures::task::{self, Task}; -use tokio_io::{AsyncRead, AsyncWrite}; - -#[cfg(feature = "runtime")] -use crate::client::connect::{Connect, Connected, Destination}; - - - -#[cfg(feature = "runtime")] -pub struct Duplex { - inner: Arc>, -} - -#[cfg(feature = "runtime")] -struct DuplexInner { - handle_read_task: Option, - read: AsyncIo, - write: AsyncIo, -} - -#[cfg(feature = "runtime")] -impl Duplex { - pub(crate) fn channel() -> (Duplex, DuplexHandle) { - let mut inner = DuplexInner { - handle_read_task: None, - read: AsyncIo::new_buf(Vec::new(), 0), - write: AsyncIo::new_buf(Vec::new(), std::usize::MAX), - }; - - inner.read.park_tasks(true); - inner.write.park_tasks(true); - - let inner = Arc::new(Mutex::new(inner)); - - let duplex = Duplex { - inner: inner.clone(), - }; - let handle = DuplexHandle { - inner: inner, - }; - - (duplex, handle) - } -} - -#[cfg(feature = "runtime")] -impl Read for Duplex { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.lock().unwrap().read.read(buf) - } -} - -#[cfg(feature = "runtime")] -impl Write for Duplex { - fn write(&mut self, buf: &[u8]) -> io::Result { - let mut inner = self.inner.lock().unwrap(); - let ret = inner.write.write(buf); - if let Some(task) = inner.handle_read_task.take() { - trace!("waking DuplexHandle read"); - task.notify(); - } - ret - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.lock().unwrap().write.flush() - } -} - -#[cfg(feature = "runtime")] -impl AsyncRead for Duplex { -} - -#[cfg(feature = "runtime")] -impl AsyncWrite for Duplex { - fn shutdown(&mut self) -> Poll<(), io::Error> { - Ok(().into()) - } - - fn write_buf(&mut self, buf: &mut B) -> Poll { - let mut inner = self.inner.lock().unwrap(); - if let Some(task) = inner.handle_read_task.take() { - task.notify(); - } - inner.write.write_buf(buf) - } -} - -#[cfg(feature = "runtime")] -pub struct DuplexHandle { - inner: Arc>, -} - -#[cfg(feature = "runtime")] -impl DuplexHandle { - pub fn read(&self, buf: &mut [u8]) -> Poll { - let mut inner = self.inner.lock().unwrap(); - assert!(buf.len() >= inner.write.inner.len()); - if inner.write.inner.is_empty() { - trace!("DuplexHandle read parking"); - inner.handle_read_task = Some(task::current()); - return Ok(Async::NotReady); - } - inner.write.read(buf).map(Async::Ready) - } - - pub fn write(&self, bytes: &[u8]) -> Poll { - let mut inner = self.inner.lock().unwrap(); - assert_eq!(inner.read.inner.pos, 0); - assert_eq!(inner.read.inner.vec.len(), 0, "write but read isn't empty"); - inner - .read - .inner - .vec - .extend(bytes); - inner.read.block_in(bytes.len()); - Ok(Async::Ready(bytes.len())) - } -} - -#[cfg(feature = "runtime")] -impl Drop for DuplexHandle { - fn drop(&mut self) { - trace!("mock duplex handle drop"); - if !::std::thread::panicking() { - let mut inner = self.inner.lock().unwrap(); - inner.read.close(); - inner.write.close(); - } - } -} - -#[cfg(feature = "runtime")] -type BoxedConnectFut = Box + Send>; - -#[cfg(feature = "runtime")] -#[derive(Clone)] -pub struct MockConnector { - mocks: Arc>, -} - -#[cfg(feature = "runtime")] -struct MockedConnections(HashMap>); - -#[cfg(feature = "runtime")] -impl MockConnector { - pub fn new() -> MockConnector { - MockConnector { - mocks: Arc::new(Mutex::new(MockedConnections(HashMap::new()))), - } - } - - pub fn mock(&mut self, key: &str) -> DuplexHandle { - use futures::future; - self.mock_fut(key, future::ok::<_, ()>(())) - } - - pub fn mock_fut(&mut self, key: &str, fut: F) -> DuplexHandle - where - F: Future + Send + 'static, - { - self.mock_opts(key, Connected::new(), fut) - } - - pub fn mock_opts(&mut self, key: &str, connected: Connected, fut: F) -> DuplexHandle - where - F: Future + Send + 'static, - { - let key = key.to_owned(); - - let (duplex, handle) = Duplex::channel(); - - let fut = Box::new(fut.then(move |_| { - trace!("MockConnector mocked fut ready"); - Ok((duplex, connected)) - })); - self.mocks.lock().unwrap().0.entry(key) - .or_insert(Vec::new()) - .push(fut); - - handle - } -} - -#[cfg(feature = "runtime")] -impl Connect for MockConnector { - type Transport = Duplex; - type Error = io::Error; - type Future = BoxedConnectFut; - - fn connect(&self, dst: Destination) -> Self::Future { - trace!("mock connect: {:?}", dst); - let key = format!("{}://{}{}", dst.scheme(), dst.host(), if let Some(port) = dst.port() { - format!(":{}", port) - } else { - "".to_owned() - }); - let mut mocks = self.mocks.lock().unwrap(); - let mocks = mocks.0.get_mut(&key) - .expect(&format!("unknown mocks uri: {}", key)); - assert!(!mocks.is_empty(), "no additional mocks for {}", key); - mocks.remove(0) - } -} - - -#[cfg(feature = "runtime")] -impl Drop for MockedConnections { - fn drop(&mut self) { - if !::std::thread::panicking() { - for (key, mocks) in self.0.iter() { - assert_eq!( - mocks.len(), - 0, - "not all mocked connects for {:?} were used", - key, - ); - } - } - } -} -*/ diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/conn.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/conn.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/conn.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/conn.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1425 +0,0 @@ -use std::fmt; -use std::io; -use std::marker::PhantomData; -#[cfg(all(feature = "server", feature = "runtime"))] -use std::time::Duration; - -use bytes::{Buf, Bytes}; -use http::header::{HeaderValue, CONNECTION}; -use http::{HeaderMap, Method, Version}; -use httparse::ParserConfig; -use tokio::io::{AsyncRead, AsyncWrite}; -#[cfg(all(feature = "server", feature = "runtime"))] -use tokio::time::Sleep; -use tracing::{debug, error, trace}; - -use super::io::Buffered; -use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants}; -use crate::body::DecodedLength; -use crate::common::{task, Pin, Poll, Unpin}; -use crate::headers::connection_keep_alive; -use crate::proto::{BodyLength, MessageHead}; - -const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; - -/// This handles a connection, which will have been established over an -/// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple -/// `Transaction`s over HTTP. -/// -/// The connection will determine when a message begins and ends as well as -/// determine if this connection can be kept alive after the message, -/// or if it is complete. -pub(crate) struct Conn { - io: Buffered>, - state: State, - _marker: PhantomData, -} - -impl Conn -where - I: AsyncRead + AsyncWrite + Unpin, - B: Buf, - T: Http1Transaction, -{ - pub(crate) fn new(io: I) -> Conn { - Conn { - io: Buffered::new(io), - state: State { - allow_half_close: false, - cached_headers: None, - error: None, - keep_alive: KA::Busy, - method: None, - h1_parser_config: ParserConfig::default(), - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout: None, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_fut: None, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_running: false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - title_case_headers: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: None, - #[cfg(feature = "ffi")] - raw_headers: false, - notify_read: false, - reading: Reading::Init, - writing: Writing::Init, - upgrade: None, - // We assume a modern world where the remote speaks HTTP/1.1. - // If they tell us otherwise, we'll downgrade in `read_head`. - version: Version::HTTP_11, - }, - _marker: PhantomData, - } - } - - #[cfg(feature = "server")] - pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) { - self.io.set_flush_pipeline(enabled); - } - - pub(crate) fn set_write_strategy_queue(&mut self) { - self.io.set_write_strategy_queue(); - } - - pub(crate) fn set_max_buf_size(&mut self, max: usize) { - self.io.set_max_buf_size(max); - } - - #[cfg(feature = "client")] - pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) { - self.io.set_read_buf_exact_size(sz); - } - - pub(crate) fn set_write_strategy_flatten(&mut self) { - self.io.set_write_strategy_flatten(); - } - - #[cfg(feature = "client")] - pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) { - self.state.h1_parser_config = parser_config; - } - - pub(crate) fn set_title_case_headers(&mut self) { - self.state.title_case_headers = true; - } - - pub(crate) fn set_preserve_header_case(&mut self) { - self.state.preserve_header_case = true; - } - - #[cfg(feature = "ffi")] - pub(crate) fn set_preserve_header_order(&mut self) { - self.state.preserve_header_order = true; - } - - #[cfg(feature = "client")] - pub(crate) fn set_h09_responses(&mut self) { - self.state.h09_responses = true; - } - - #[cfg(all(feature = "server", feature = "runtime"))] - pub(crate) fn set_http1_header_read_timeout(&mut self, val: Duration) { - self.state.h1_header_read_timeout = Some(val); - } - - #[cfg(feature = "server")] - pub(crate) fn set_allow_half_close(&mut self) { - self.state.allow_half_close = true; - } - - #[cfg(feature = "ffi")] - pub(crate) fn set_raw_headers(&mut self, enabled: bool) { - self.state.raw_headers = enabled; - } - - pub(crate) fn into_inner(self) -> (I, Bytes) { - self.io.into_inner() - } - - pub(crate) fn pending_upgrade(&mut self) -> Option { - self.state.upgrade.take() - } - - pub(crate) fn is_read_closed(&self) -> bool { - self.state.is_read_closed() - } - - pub(crate) fn is_write_closed(&self) -> bool { - self.state.is_write_closed() - } - - pub(crate) fn can_read_head(&self) -> bool { - if !matches!(self.state.reading, Reading::Init) { - return false; - } - - if T::should_read_first() { - return true; - } - - !matches!(self.state.writing, Writing::Init) - } - - pub(crate) fn can_read_body(&self) -> bool { - match self.state.reading { - Reading::Body(..) | Reading::Continue(..) => true, - _ => false, - } - } - - fn should_error_on_eof(&self) -> bool { - // If we're idle, it's probably just the connection closing gracefully. - T::should_error_on_parse_eof() && !self.state.is_idle() - } - - fn has_h2_prefix(&self) -> bool { - let read_buf = self.io.read_buf(); - read_buf.len() >= 24 && read_buf[..24] == *H2_PREFACE - } - - pub(super) fn poll_read_head( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll, DecodedLength, Wants)>>> { - debug_assert!(self.can_read_head()); - trace!("Conn::read_head"); - - let msg = match ready!(self.io.parse::( - cx, - ParseContext { - cached_headers: &mut self.state.cached_headers, - req_method: &mut self.state.method, - h1_parser_config: self.state.h1_parser_config.clone(), - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout: self.state.h1_header_read_timeout, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_fut: &mut self.state.h1_header_read_timeout_fut, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_running: &mut self.state.h1_header_read_timeout_running, - preserve_header_case: self.state.preserve_header_case, - #[cfg(feature = "ffi")] - preserve_header_order: self.state.preserve_header_order, - h09_responses: self.state.h09_responses, - #[cfg(feature = "ffi")] - on_informational: &mut self.state.on_informational, - #[cfg(feature = "ffi")] - raw_headers: self.state.raw_headers, - } - )) { - Ok(msg) => msg, - Err(e) => return self.on_read_head_error(e), - }; - - // Note: don't deconstruct `msg` into local variables, it appears - // the optimizer doesn't remove the extra copies. - - debug!("incoming body is {}", msg.decode); - - // Prevent accepting HTTP/0.9 responses after the initial one, if any. - self.state.h09_responses = false; - - // Drop any OnInformational callbacks, we're done there! - #[cfg(feature = "ffi")] - { - self.state.on_informational = None; - } - - self.state.busy(); - self.state.keep_alive &= msg.keep_alive; - self.state.version = msg.head.version; - - let mut wants = if msg.wants_upgrade { - Wants::UPGRADE - } else { - Wants::EMPTY - }; - - if msg.decode == DecodedLength::ZERO { - if msg.expect_continue { - debug!("ignoring expect-continue since body is empty"); - } - self.state.reading = Reading::KeepAlive; - if !T::should_read_first() { - self.try_keep_alive(cx); - } - } else if msg.expect_continue { - self.state.reading = Reading::Continue(Decoder::new(msg.decode)); - wants = wants.add(Wants::EXPECT); - } else { - self.state.reading = Reading::Body(Decoder::new(msg.decode)); - } - - Poll::Ready(Some(Ok((msg.head, msg.decode, wants)))) - } - - fn on_read_head_error(&mut self, e: crate::Error) -> Poll>> { - // If we are currently waiting on a message, then an empty - // message should be reported as an error. If not, it is just - // the connection closing gracefully. - let must_error = self.should_error_on_eof(); - self.close_read(); - self.io.consume_leading_lines(); - let was_mid_parse = e.is_parse() || !self.io.read_buf().is_empty(); - if was_mid_parse || must_error { - // We check if the buf contains the h2 Preface - debug!( - "parse error ({}) with {} bytes", - e, - self.io.read_buf().len() - ); - match self.on_parse_error(e) { - Ok(()) => Poll::Pending, // XXX: wat? - Err(e) => Poll::Ready(Some(Err(e))), - } - } else { - debug!("read eof"); - self.close_write(); - Poll::Ready(None) - } - } - - pub(crate) fn poll_read_body( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll>> { - debug_assert!(self.can_read_body()); - - let (reading, ret) = match self.state.reading { - Reading::Body(ref mut decoder) => { - match ready!(decoder.decode(cx, &mut self.io)) { - Ok(slice) => { - let (reading, chunk) = if decoder.is_eof() { - debug!("incoming body completed"); - ( - Reading::KeepAlive, - if !slice.is_empty() { - Some(Ok(slice)) - } else { - None - }, - ) - } else if slice.is_empty() { - error!("incoming body unexpectedly ended"); - // This should be unreachable, since all 3 decoders - // either set eof=true or return an Err when reading - // an empty slice... - (Reading::Closed, None) - } else { - return Poll::Ready(Some(Ok(slice))); - }; - (reading, Poll::Ready(chunk)) - } - Err(e) => { - debug!("incoming body decode error: {}", e); - (Reading::Closed, Poll::Ready(Some(Err(e)))) - } - } - } - Reading::Continue(ref decoder) => { - // Write the 100 Continue if not already responded... - if let Writing::Init = self.state.writing { - trace!("automatically sending 100 Continue"); - let cont = b"HTTP/1.1 100 Continue\r\n\r\n"; - self.io.headers_buf().extend_from_slice(cont); - } - - // And now recurse once in the Reading::Body state... - self.state.reading = Reading::Body(decoder.clone()); - return self.poll_read_body(cx); - } - _ => unreachable!("poll_read_body invalid state: {:?}", self.state.reading), - }; - - self.state.reading = reading; - self.try_keep_alive(cx); - ret - } - - pub(crate) fn wants_read_again(&mut self) -> bool { - let ret = self.state.notify_read; - self.state.notify_read = false; - ret - } - - pub(crate) fn poll_read_keep_alive( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll> { - debug_assert!(!self.can_read_head() && !self.can_read_body()); - - if self.is_read_closed() { - Poll::Pending - } else if self.is_mid_message() { - self.mid_message_detect_eof(cx) - } else { - self.require_empty_read(cx) - } - } - - fn is_mid_message(&self) -> bool { - !matches!( - (&self.state.reading, &self.state.writing), - (&Reading::Init, &Writing::Init) - ) - } - - // This will check to make sure the io object read is empty. - // - // This should only be called for Clients wanting to enter the idle - // state. - fn require_empty_read(&mut self, cx: &mut task::Context<'_>) -> Poll> { - debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed()); - debug_assert!(!self.is_mid_message()); - debug_assert!(T::is_client()); - - if !self.io.read_buf().is_empty() { - debug!("received an unexpected {} bytes", self.io.read_buf().len()); - return Poll::Ready(Err(crate::Error::new_unexpected_message())); - } - - let num_read = ready!(self.force_io_read(cx)).map_err(crate::Error::new_io)?; - - if num_read == 0 { - let ret = if self.should_error_on_eof() { - trace!("found unexpected EOF on busy connection: {:?}", self.state); - Poll::Ready(Err(crate::Error::new_incomplete())) - } else { - trace!("found EOF on idle connection, closing"); - Poll::Ready(Ok(())) - }; - - // order is important: should_error needs state BEFORE close_read - self.state.close_read(); - return ret; - } - - debug!( - "received unexpected {} bytes on an idle connection", - num_read - ); - Poll::Ready(Err(crate::Error::new_unexpected_message())) - } - - fn mid_message_detect_eof(&mut self, cx: &mut task::Context<'_>) -> Poll> { - debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed()); - debug_assert!(self.is_mid_message()); - - if self.state.allow_half_close || !self.io.read_buf().is_empty() { - return Poll::Pending; - } - - let num_read = ready!(self.force_io_read(cx)).map_err(crate::Error::new_io)?; - - if num_read == 0 { - trace!("found unexpected EOF on busy connection: {:?}", self.state); - self.state.close_read(); - Poll::Ready(Err(crate::Error::new_incomplete())) - } else { - Poll::Ready(Ok(())) - } - } - - fn force_io_read(&mut self, cx: &mut task::Context<'_>) -> Poll> { - debug_assert!(!self.state.is_read_closed()); - - let result = ready!(self.io.poll_read_from_io(cx)); - Poll::Ready(result.map_err(|e| { - trace!("force_io_read; io error = {:?}", e); - self.state.close(); - e - })) - } - - fn maybe_notify(&mut self, cx: &mut task::Context<'_>) { - // its possible that we returned NotReady from poll() without having - // exhausted the underlying Io. We would have done this when we - // determined we couldn't keep reading until we knew how writing - // would finish. - - match self.state.reading { - Reading::Continue(..) | Reading::Body(..) | Reading::KeepAlive | Reading::Closed => { - return - } - Reading::Init => (), - }; - - match self.state.writing { - Writing::Body(..) => return, - Writing::Init | Writing::KeepAlive | Writing::Closed => (), - } - - if !self.io.is_read_blocked() { - if self.io.read_buf().is_empty() { - match self.io.poll_read_from_io(cx) { - Poll::Ready(Ok(n)) => { - if n == 0 { - trace!("maybe_notify; read eof"); - if self.state.is_idle() { - self.state.close(); - } else { - self.close_read() - } - return; - } - } - Poll::Pending => { - trace!("maybe_notify; read_from_io blocked"); - return; - } - Poll::Ready(Err(e)) => { - trace!("maybe_notify; read_from_io error: {}", e); - self.state.close(); - self.state.error = Some(crate::Error::new_io(e)); - } - } - } - self.state.notify_read = true; - } - } - - fn try_keep_alive(&mut self, cx: &mut task::Context<'_>) { - self.state.try_keep_alive::(); - self.maybe_notify(cx); - } - - pub(crate) fn can_write_head(&self) -> bool { - if !T::should_read_first() && matches!(self.state.reading, Reading::Closed) { - return false; - } - - match self.state.writing { - Writing::Init => self.io.can_headers_buf(), - _ => false, - } - } - - pub(crate) fn can_write_body(&self) -> bool { - match self.state.writing { - Writing::Body(..) => true, - Writing::Init | Writing::KeepAlive | Writing::Closed => false, - } - } - - pub(crate) fn can_buffer_body(&self) -> bool { - self.io.can_buffer() - } - - pub(crate) fn write_head(&mut self, head: MessageHead, body: Option) { - if let Some(encoder) = self.encode_head(head, body) { - self.state.writing = if !encoder.is_eof() { - Writing::Body(encoder) - } else if encoder.is_last() { - Writing::Closed - } else { - Writing::KeepAlive - }; - } - } - - pub(crate) fn write_full_msg(&mut self, head: MessageHead, body: B) { - if let Some(encoder) = - self.encode_head(head, Some(BodyLength::Known(body.remaining() as u64))) - { - let is_last = encoder.is_last(); - // Make sure we don't write a body if we weren't actually allowed - // to do so, like because its a HEAD request. - if !encoder.is_eof() { - encoder.danger_full_buf(body, self.io.write_buf()); - } - self.state.writing = if is_last { - Writing::Closed - } else { - Writing::KeepAlive - } - } - } - - fn encode_head( - &mut self, - mut head: MessageHead, - body: Option, - ) -> Option { - debug_assert!(self.can_write_head()); - - if !T::should_read_first() { - self.state.busy(); - } - - self.enforce_version(&mut head); - - let buf = self.io.headers_buf(); - match super::role::encode_headers::( - Encode { - head: &mut head, - body, - #[cfg(feature = "server")] - keep_alive: self.state.wants_keep_alive(), - req_method: &mut self.state.method, - title_case_headers: self.state.title_case_headers, - }, - buf, - ) { - Ok(encoder) => { - debug_assert!(self.state.cached_headers.is_none()); - debug_assert!(head.headers.is_empty()); - self.state.cached_headers = Some(head.headers); - - #[cfg(feature = "ffi")] - { - self.state.on_informational = - head.extensions.remove::(); - } - - Some(encoder) - } - Err(err) => { - self.state.error = Some(err); - self.state.writing = Writing::Closed; - None - } - } - } - - // Fix keep-alive when Connection: keep-alive header is not present - fn fix_keep_alive(&mut self, head: &mut MessageHead) { - let outgoing_is_keep_alive = head - .headers - .get(CONNECTION) - .map(connection_keep_alive) - .unwrap_or(false); - - if !outgoing_is_keep_alive { - match head.version { - // If response is version 1.0 and keep-alive is not present in the response, - // disable keep-alive so the server closes the connection - Version::HTTP_10 => self.state.disable_keep_alive(), - // If response is version 1.1 and keep-alive is wanted, add - // Connection: keep-alive header when not present - Version::HTTP_11 => { - if self.state.wants_keep_alive() { - head.headers - .insert(CONNECTION, HeaderValue::from_static("keep-alive")); - } - } - _ => (), - } - } - } - - // If we know the remote speaks an older version, we try to fix up any messages - // to work with our older peer. - fn enforce_version(&mut self, head: &mut MessageHead) { - if let Version::HTTP_10 = self.state.version { - // Fixes response or connection when keep-alive header is not present - self.fix_keep_alive(head); - // If the remote only knows HTTP/1.0, we should force ourselves - // to do only speak HTTP/1.0 as well. - head.version = Version::HTTP_10; - } - // If the remote speaks HTTP/1.1, then it *should* be fine with - // both HTTP/1.0 and HTTP/1.1 from us. So again, we just let - // the user's headers be. - } - - pub(crate) fn write_body(&mut self, chunk: B) { - debug_assert!(self.can_write_body() && self.can_buffer_body()); - // empty chunks should be discarded at Dispatcher level - debug_assert!(chunk.remaining() != 0); - - let state = match self.state.writing { - Writing::Body(ref mut encoder) => { - self.io.buffer(encoder.encode(chunk)); - - if !encoder.is_eof() { - return; - } - - if encoder.is_last() { - Writing::Closed - } else { - Writing::KeepAlive - } - } - _ => unreachable!("write_body invalid state: {:?}", self.state.writing), - }; - - self.state.writing = state; - } - - pub(crate) fn write_body_and_end(&mut self, chunk: B) { - debug_assert!(self.can_write_body() && self.can_buffer_body()); - // empty chunks should be discarded at Dispatcher level - debug_assert!(chunk.remaining() != 0); - - let state = match self.state.writing { - Writing::Body(ref encoder) => { - let can_keep_alive = encoder.encode_and_end(chunk, self.io.write_buf()); - if can_keep_alive { - Writing::KeepAlive - } else { - Writing::Closed - } - } - _ => unreachable!("write_body invalid state: {:?}", self.state.writing), - }; - - self.state.writing = state; - } - - pub(crate) fn end_body(&mut self) -> crate::Result<()> { - debug_assert!(self.can_write_body()); - - let encoder = match self.state.writing { - Writing::Body(ref mut enc) => enc, - _ => return Ok(()), - }; - - // end of stream, that means we should try to eof - match encoder.end() { - Ok(end) => { - if let Some(end) = end { - self.io.buffer(end); - } - - self.state.writing = if encoder.is_last() || encoder.is_close_delimited() { - Writing::Closed - } else { - Writing::KeepAlive - }; - - Ok(()) - } - Err(not_eof) => { - self.state.writing = Writing::Closed; - Err(crate::Error::new_body_write_aborted().with(not_eof)) - } - } - } - - // When we get a parse error, depending on what side we are, we might be able - // to write a response before closing the connection. - // - // - Client: there is nothing we can do - // - Server: if Response hasn't been written yet, we can send a 4xx response - fn on_parse_error(&mut self, err: crate::Error) -> crate::Result<()> { - if let Writing::Init = self.state.writing { - if self.has_h2_prefix() { - return Err(crate::Error::new_version_h2()); - } - if let Some(msg) = T::on_error(&err) { - // Drop the cached headers so as to not trigger a debug - // assert in `write_head`... - self.state.cached_headers.take(); - self.write_head(msg, None); - self.state.error = Some(err); - return Ok(()); - } - } - - // fallback is pass the error back up - Err(err) - } - - pub(crate) fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { - ready!(Pin::new(&mut self.io).poll_flush(cx))?; - self.try_keep_alive(cx); - trace!("flushed({}): {:?}", T::LOG, self.state); - Poll::Ready(Ok(())) - } - - pub(crate) fn poll_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { - match ready!(Pin::new(self.io.io_mut()).poll_shutdown(cx)) { - Ok(()) => { - trace!("shut down IO complete"); - Poll::Ready(Ok(())) - } - Err(e) => { - debug!("error shutting down IO: {}", e); - Poll::Ready(Err(e)) - } - } - } - - /// If the read side can be cheaply drained, do so. Otherwise, close. - pub(super) fn poll_drain_or_close_read(&mut self, cx: &mut task::Context<'_>) { - if let Reading::Continue(ref decoder) = self.state.reading { - // skip sending the 100-continue - // just move forward to a read, in case a tiny body was included - self.state.reading = Reading::Body(decoder.clone()); - } - - let _ = self.poll_read_body(cx); - - // If still in Reading::Body, just give up - match self.state.reading { - Reading::Init | Reading::KeepAlive => trace!("body drained"), - _ => self.close_read(), - } - } - - pub(crate) fn close_read(&mut self) { - self.state.close_read(); - } - - pub(crate) fn close_write(&mut self) { - self.state.close_write(); - } - - #[cfg(feature = "server")] - pub(crate) fn disable_keep_alive(&mut self) { - if self.state.is_idle() { - trace!("disable_keep_alive; closing idle connection"); - self.state.close(); - } else { - trace!("disable_keep_alive; in-progress connection"); - self.state.disable_keep_alive(); - } - } - - pub(crate) fn take_error(&mut self) -> crate::Result<()> { - if let Some(err) = self.state.error.take() { - Err(err) - } else { - Ok(()) - } - } - - pub(super) fn on_upgrade(&mut self) -> crate::upgrade::OnUpgrade { - trace!("{}: prepare possible HTTP upgrade", T::LOG); - self.state.prepare_upgrade() - } -} - -impl fmt::Debug for Conn { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Conn") - .field("state", &self.state) - .field("io", &self.io) - .finish() - } -} - -// B and T are never pinned -impl Unpin for Conn {} - -struct State { - allow_half_close: bool, - /// Re-usable HeaderMap to reduce allocating new ones. - cached_headers: Option, - /// If an error occurs when there wasn't a direct way to return it - /// back to the user, this is set. - error: Option, - /// Current keep-alive status. - keep_alive: KA, - /// If mid-message, the HTTP Method that started it. - /// - /// This is used to know things such as if the message can include - /// a body or not. - method: Option, - h1_parser_config: ParserConfig, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout: Option, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_fut: Option>>, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_running: bool, - preserve_header_case: bool, - #[cfg(feature = "ffi")] - preserve_header_order: bool, - title_case_headers: bool, - h09_responses: bool, - /// If set, called with each 1xx informational response received for - /// the current request. MUST be unset after a non-1xx response is - /// received. - #[cfg(feature = "ffi")] - on_informational: Option, - #[cfg(feature = "ffi")] - raw_headers: bool, - /// Set to true when the Dispatcher should poll read operations - /// again. See the `maybe_notify` method for more. - notify_read: bool, - /// State of allowed reads - reading: Reading, - /// State of allowed writes - writing: Writing, - /// An expected pending HTTP upgrade. - upgrade: Option, - /// Either HTTP/1.0 or 1.1 connection - version: Version, -} - -#[derive(Debug)] -enum Reading { - Init, - Continue(Decoder), - Body(Decoder), - KeepAlive, - Closed, -} - -enum Writing { - Init, - Body(Encoder), - KeepAlive, - Closed, -} - -impl fmt::Debug for State { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut builder = f.debug_struct("State"); - builder - .field("reading", &self.reading) - .field("writing", &self.writing) - .field("keep_alive", &self.keep_alive); - - // Only show error field if it's interesting... - if let Some(ref error) = self.error { - builder.field("error", error); - } - - if self.allow_half_close { - builder.field("allow_half_close", &true); - } - - // Purposefully leaving off other fields.. - - builder.finish() - } -} - -impl fmt::Debug for Writing { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Writing::Init => f.write_str("Init"), - Writing::Body(ref enc) => f.debug_tuple("Body").field(enc).finish(), - Writing::KeepAlive => f.write_str("KeepAlive"), - Writing::Closed => f.write_str("Closed"), - } - } -} - -impl std::ops::BitAndAssign for KA { - fn bitand_assign(&mut self, enabled: bool) { - if !enabled { - trace!("remote disabling keep-alive"); - *self = KA::Disabled; - } - } -} - -#[derive(Clone, Copy, Debug)] -enum KA { - Idle, - Busy, - Disabled, -} - -impl Default for KA { - fn default() -> KA { - KA::Busy - } -} - -impl KA { - fn idle(&mut self) { - *self = KA::Idle; - } - - fn busy(&mut self) { - *self = KA::Busy; - } - - fn disable(&mut self) { - *self = KA::Disabled; - } - - fn status(&self) -> KA { - *self - } -} - -impl State { - fn close(&mut self) { - trace!("State::close()"); - self.reading = Reading::Closed; - self.writing = Writing::Closed; - self.keep_alive.disable(); - } - - fn close_read(&mut self) { - trace!("State::close_read()"); - self.reading = Reading::Closed; - self.keep_alive.disable(); - } - - fn close_write(&mut self) { - trace!("State::close_write()"); - self.writing = Writing::Closed; - self.keep_alive.disable(); - } - - fn wants_keep_alive(&self) -> bool { - if let KA::Disabled = self.keep_alive.status() { - false - } else { - true - } - } - - fn try_keep_alive(&mut self) { - match (&self.reading, &self.writing) { - (&Reading::KeepAlive, &Writing::KeepAlive) => { - if let KA::Busy = self.keep_alive.status() { - self.idle::(); - } else { - trace!( - "try_keep_alive({}): could keep-alive, but status = {:?}", - T::LOG, - self.keep_alive - ); - self.close(); - } - } - (&Reading::Closed, &Writing::KeepAlive) | (&Reading::KeepAlive, &Writing::Closed) => { - self.close() - } - _ => (), - } - } - - fn disable_keep_alive(&mut self) { - self.keep_alive.disable() - } - - fn busy(&mut self) { - if let KA::Disabled = self.keep_alive.status() { - return; - } - self.keep_alive.busy(); - } - - fn idle(&mut self) { - debug_assert!(!self.is_idle(), "State::idle() called while idle"); - - self.method = None; - self.keep_alive.idle(); - - if !self.is_idle() { - self.close(); - return; - } - - self.reading = Reading::Init; - self.writing = Writing::Init; - - // !T::should_read_first() means Client. - // - // If Client connection has just gone idle, the Dispatcher - // should try the poll loop one more time, so as to poll the - // pending requests stream. - if !T::should_read_first() { - self.notify_read = true; - } - } - - fn is_idle(&self) -> bool { - matches!(self.keep_alive.status(), KA::Idle) - } - - fn is_read_closed(&self) -> bool { - matches!(self.reading, Reading::Closed) - } - - fn is_write_closed(&self) -> bool { - matches!(self.writing, Writing::Closed) - } - - fn prepare_upgrade(&mut self) -> crate::upgrade::OnUpgrade { - let (tx, rx) = crate::upgrade::pending(); - self.upgrade = Some(tx); - rx - } -} - -#[cfg(test)] -mod tests { - #[cfg(feature = "nightly")] - #[bench] - fn bench_read_head_short(b: &mut ::test::Bencher) { - use super::*; - let s = b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n"; - let len = s.len(); - b.bytes = len as u64; - - // an empty IO, we'll be skipping and using the read buffer anyways - let io = tokio_test::io::Builder::new().build(); - let mut conn = Conn::<_, bytes::Bytes, crate::proto::h1::ServerTransaction>::new(io); - *conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]); - conn.state.cached_headers = Some(HeaderMap::with_capacity(2)); - - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap(); - - b.iter(|| { - rt.block_on(futures_util::future::poll_fn(|cx| { - match conn.poll_read_head(cx) { - Poll::Ready(Some(Ok(x))) => { - ::test::black_box(&x); - let mut headers = x.0.headers; - headers.clear(); - conn.state.cached_headers = Some(headers); - } - f => panic!("expected Ready(Some(Ok(..))): {:?}", f), - } - - conn.io.read_buf_mut().reserve(1); - unsafe { - conn.io.read_buf_mut().set_len(len); - } - conn.state.reading = Reading::Init; - Poll::Ready(()) - })); - }); - } - - /* - //TODO: rewrite these using dispatch... someday... - use futures::{Async, Future, Stream, Sink}; - use futures::future; - - use proto::{self, ClientTransaction, MessageHead, ServerTransaction}; - use super::super::Encoder; - use mock::AsyncIo; - - use super::{Conn, Decoder, Reading, Writing}; - use ::uri::Uri; - - use std::str::FromStr; - - #[test] - fn test_conn_init_read() { - let good_message = b"GET / HTTP/1.1\r\n\r\n".to_vec(); - let len = good_message.len(); - let io = AsyncIo::new_buf(good_message, len); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - - match conn.poll().unwrap() { - Async::Ready(Some(Frame::Message { message, body: false })) => { - assert_eq!(message, MessageHead { - subject: ::proto::RequestLine(::Get, Uri::from_str("/").unwrap()), - .. MessageHead::default() - }) - }, - f => panic!("frame is not Frame::Message: {:?}", f) - } - } - - #[test] - fn test_conn_parse_partial() { - let _: Result<(), ()> = future::lazy(|| { - let good_message = b"GET / HTTP/1.1\r\nHost: foo.bar\r\n\r\n".to_vec(); - let io = AsyncIo::new_buf(good_message, 10); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - assert!(conn.poll().unwrap().is_not_ready()); - conn.io.io_mut().block_in(50); - let async = conn.poll().unwrap(); - assert!(async.is_ready()); - match async { - Async::Ready(Some(Frame::Message { .. })) => (), - f => panic!("frame is not Message: {:?}", f), - } - Ok(()) - }).wait(); - } - - #[test] - fn test_conn_init_read_eof_idle() { - let io = AsyncIo::new_buf(vec![], 1); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.idle(); - - match conn.poll().unwrap() { - Async::Ready(None) => {}, - other => panic!("frame is not None: {:?}", other) - } - } - - #[test] - fn test_conn_init_read_eof_idle_partial_parse() { - let io = AsyncIo::new_buf(b"GET / HTTP/1.1".to_vec(), 100); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.idle(); - - match conn.poll() { - Err(ref err) if err.kind() == std::io::ErrorKind::UnexpectedEof => {}, - other => panic!("unexpected frame: {:?}", other) - } - } - - #[test] - fn test_conn_init_read_eof_busy() { - let _: Result<(), ()> = future::lazy(|| { - // server ignores - let io = AsyncIo::new_eof(); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.busy(); - - match conn.poll().unwrap() { - Async::Ready(None) => {}, - other => panic!("unexpected frame: {:?}", other) - } - - // client - let io = AsyncIo::new_eof(); - let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io); - conn.state.busy(); - - match conn.poll() { - Err(ref err) if err.kind() == std::io::ErrorKind::UnexpectedEof => {}, - other => panic!("unexpected frame: {:?}", other) - } - Ok(()) - }).wait(); - } - - #[test] - fn test_conn_body_finish_read_eof() { - let _: Result<(), ()> = future::lazy(|| { - let io = AsyncIo::new_eof(); - let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io); - conn.state.busy(); - conn.state.writing = Writing::KeepAlive; - conn.state.reading = Reading::Body(Decoder::length(0)); - - match conn.poll() { - Ok(Async::Ready(Some(Frame::Body { chunk: None }))) => (), - other => panic!("unexpected frame: {:?}", other) - } - - // conn eofs, but tokio-proto will call poll() again, before calling flush() - // the conn eof in this case is perfectly fine - - match conn.poll() { - Ok(Async::Ready(None)) => (), - other => panic!("unexpected frame: {:?}", other) - } - Ok(()) - }).wait(); - } - - #[test] - fn test_conn_message_empty_body_read_eof() { - let _: Result<(), ()> = future::lazy(|| { - let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n".to_vec(), 1024); - let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io); - conn.state.busy(); - conn.state.writing = Writing::KeepAlive; - - match conn.poll() { - Ok(Async::Ready(Some(Frame::Message { body: false, .. }))) => (), - other => panic!("unexpected frame: {:?}", other) - } - - // conn eofs, but tokio-proto will call poll() again, before calling flush() - // the conn eof in this case is perfectly fine - - match conn.poll() { - Ok(Async::Ready(None)) => (), - other => panic!("unexpected frame: {:?}", other) - } - Ok(()) - }).wait(); - } - - #[test] - fn test_conn_read_body_end() { - let _: Result<(), ()> = future::lazy(|| { - let io = AsyncIo::new_buf(b"POST / HTTP/1.1\r\nContent-Length: 5\r\n\r\n12345".to_vec(), 1024); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.busy(); - - match conn.poll() { - Ok(Async::Ready(Some(Frame::Message { body: true, .. }))) => (), - other => panic!("unexpected frame: {:?}", other) - } - - match conn.poll() { - Ok(Async::Ready(Some(Frame::Body { chunk: Some(_) }))) => (), - other => panic!("unexpected frame: {:?}", other) - } - - // When the body is done, `poll` MUST return a `Body` frame with chunk set to `None` - match conn.poll() { - Ok(Async::Ready(Some(Frame::Body { chunk: None }))) => (), - other => panic!("unexpected frame: {:?}", other) - } - - match conn.poll() { - Ok(Async::NotReady) => (), - other => panic!("unexpected frame: {:?}", other) - } - Ok(()) - }).wait(); - } - - #[test] - fn test_conn_closed_read() { - let io = AsyncIo::new_buf(vec![], 0); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.close(); - - match conn.poll().unwrap() { - Async::Ready(None) => {}, - other => panic!("frame is not None: {:?}", other) - } - } - - #[test] - fn test_conn_body_write_length() { - let _ = pretty_env_logger::try_init(); - let _: Result<(), ()> = future::lazy(|| { - let io = AsyncIo::new_buf(vec![], 0); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - let max = super::super::io::DEFAULT_MAX_BUFFER_SIZE + 4096; - conn.state.writing = Writing::Body(Encoder::length((max * 2) as u64)); - - assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; max].into()) }).unwrap().is_ready()); - assert!(!conn.can_buffer_body()); - - assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'b'; 1024 * 8].into()) }).unwrap().is_not_ready()); - - conn.io.io_mut().block_in(1024 * 3); - assert!(conn.poll_complete().unwrap().is_not_ready()); - conn.io.io_mut().block_in(1024 * 3); - assert!(conn.poll_complete().unwrap().is_not_ready()); - conn.io.io_mut().block_in(max * 2); - assert!(conn.poll_complete().unwrap().is_ready()); - - assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'c'; 1024 * 8].into()) }).unwrap().is_ready()); - Ok(()) - }).wait(); - } - - #[test] - fn test_conn_body_write_chunked() { - let _: Result<(), ()> = future::lazy(|| { - let io = AsyncIo::new_buf(vec![], 4096); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.writing = Writing::Body(Encoder::chunked()); - - assert!(conn.start_send(Frame::Body { chunk: Some("headers".into()) }).unwrap().is_ready()); - assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'x'; 8192].into()) }).unwrap().is_ready()); - Ok(()) - }).wait(); - } - - #[test] - fn test_conn_body_flush() { - let _: Result<(), ()> = future::lazy(|| { - let io = AsyncIo::new_buf(vec![], 1024 * 1024 * 5); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.writing = Writing::Body(Encoder::length(1024 * 1024)); - assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; 1024 * 1024].into()) }).unwrap().is_ready()); - assert!(!conn.can_buffer_body()); - conn.io.io_mut().block_in(1024 * 1024 * 5); - assert!(conn.poll_complete().unwrap().is_ready()); - assert!(conn.can_buffer_body()); - assert!(conn.io.io_mut().flushed()); - - Ok(()) - }).wait(); - } - - #[test] - fn test_conn_parking() { - use std::sync::Arc; - use futures::executor::Notify; - use futures::executor::NotifyHandle; - - struct Car { - permit: bool, - } - impl Notify for Car { - fn notify(&self, _id: usize) { - assert!(self.permit, "unparked without permit"); - } - } - - fn car(permit: bool) -> NotifyHandle { - Arc::new(Car { - permit: permit, - }).into() - } - - // test that once writing is done, unparks - let f = future::lazy(|| { - let io = AsyncIo::new_buf(vec![], 4096); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.reading = Reading::KeepAlive; - assert!(conn.poll().unwrap().is_not_ready()); - - conn.state.writing = Writing::KeepAlive; - assert!(conn.poll_complete().unwrap().is_ready()); - Ok::<(), ()>(()) - }); - ::futures::executor::spawn(f).poll_future_notify(&car(true), 0).unwrap(); - - - // test that flushing when not waiting on read doesn't unpark - let f = future::lazy(|| { - let io = AsyncIo::new_buf(vec![], 4096); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.writing = Writing::KeepAlive; - assert!(conn.poll_complete().unwrap().is_ready()); - Ok::<(), ()>(()) - }); - ::futures::executor::spawn(f).poll_future_notify(&car(false), 0).unwrap(); - - - // test that flushing and writing isn't done doesn't unpark - let f = future::lazy(|| { - let io = AsyncIo::new_buf(vec![], 4096); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.reading = Reading::KeepAlive; - assert!(conn.poll().unwrap().is_not_ready()); - conn.state.writing = Writing::Body(Encoder::length(5_000)); - assert!(conn.poll_complete().unwrap().is_ready()); - Ok::<(), ()>(()) - }); - ::futures::executor::spawn(f).poll_future_notify(&car(false), 0).unwrap(); - } - - #[test] - fn test_conn_closed_write() { - let io = AsyncIo::new_buf(vec![], 0); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.close(); - - match conn.start_send(Frame::Body { chunk: Some(b"foobar".to_vec().into()) }) { - Err(_e) => {}, - other => panic!("did not return Err: {:?}", other) - } - - assert!(conn.state.is_write_closed()); - } - - #[test] - fn test_conn_write_empty_chunk() { - let io = AsyncIo::new_buf(vec![], 0); - let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io); - conn.state.writing = Writing::KeepAlive; - - assert!(conn.start_send(Frame::Body { chunk: None }).unwrap().is_ready()); - assert!(conn.start_send(Frame::Body { chunk: Some(Vec::new().into()) }).unwrap().is_ready()); - conn.start_send(Frame::Body { chunk: Some(vec![b'a'].into()) }).unwrap_err(); - } - */ -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/decode.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/decode.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/decode.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/decode.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,731 +0,0 @@ -use std::error::Error as StdError; -use std::fmt; -use std::io; -use std::usize; - -use bytes::Bytes; -use tracing::{debug, trace}; - -use crate::common::{task, Poll}; - -use super::io::MemRead; -use super::DecodedLength; - -use self::Kind::{Chunked, Eof, Length}; - -/// Decoders to handle different Transfer-Encodings. -/// -/// If a message body does not include a Transfer-Encoding, it *should* -/// include a Content-Length header. -#[derive(Clone, PartialEq)] -pub(crate) struct Decoder { - kind: Kind, -} - -#[derive(Debug, Clone, Copy, PartialEq)] -enum Kind { - /// A Reader used when a Content-Length header is passed with a positive integer. - Length(u64), - /// A Reader used when Transfer-Encoding is `chunked`. - Chunked(ChunkedState, u64), - /// A Reader used for responses that don't indicate a length or chunked. - /// - /// The bool tracks when EOF is seen on the transport. - /// - /// Note: This should only used for `Response`s. It is illegal for a - /// `Request` to be made with both `Content-Length` and - /// `Transfer-Encoding: chunked` missing, as explained from the spec: - /// - /// > If a Transfer-Encoding header field is present in a response and - /// > the chunked transfer coding is not the final encoding, the - /// > message body length is determined by reading the connection until - /// > it is closed by the server. If a Transfer-Encoding header field - /// > is present in a request and the chunked transfer coding is not - /// > the final encoding, the message body length cannot be determined - /// > reliably; the server MUST respond with the 400 (Bad Request) - /// > status code and then close the connection. - Eof(bool), -} - -#[derive(Debug, PartialEq, Clone, Copy)] -enum ChunkedState { - Size, - SizeLws, - Extension, - SizeLf, - Body, - BodyCr, - BodyLf, - Trailer, - TrailerLf, - EndCr, - EndLf, - End, -} - -impl Decoder { - // constructors - - pub(crate) fn length(x: u64) -> Decoder { - Decoder { - kind: Kind::Length(x), - } - } - - pub(crate) fn chunked() -> Decoder { - Decoder { - kind: Kind::Chunked(ChunkedState::Size, 0), - } - } - - pub(crate) fn eof() -> Decoder { - Decoder { - kind: Kind::Eof(false), - } - } - - pub(super) fn new(len: DecodedLength) -> Self { - match len { - DecodedLength::CHUNKED => Decoder::chunked(), - DecodedLength::CLOSE_DELIMITED => Decoder::eof(), - length => Decoder::length(length.danger_len()), - } - } - - // methods - - pub(crate) fn is_eof(&self) -> bool { - matches!(self.kind, Length(0) | Chunked(ChunkedState::End, _) | Eof(true)) - } - - pub(crate) fn decode( - &mut self, - cx: &mut task::Context<'_>, - body: &mut R, - ) -> Poll> { - trace!("decode; state={:?}", self.kind); - match self.kind { - Length(ref mut remaining) => { - if *remaining == 0 { - Poll::Ready(Ok(Bytes::new())) - } else { - let to_read = *remaining as usize; - let buf = ready!(body.read_mem(cx, to_read))?; - let num = buf.as_ref().len() as u64; - if num > *remaining { - *remaining = 0; - } else if num == 0 { - return Poll::Ready(Err(io::Error::new( - io::ErrorKind::UnexpectedEof, - IncompleteBody, - ))); - } else { - *remaining -= num; - } - Poll::Ready(Ok(buf)) - } - } - Chunked(ref mut state, ref mut size) => { - loop { - let mut buf = None; - // advances the chunked state - *state = ready!(state.step(cx, body, size, &mut buf))?; - if *state == ChunkedState::End { - trace!("end of chunked"); - return Poll::Ready(Ok(Bytes::new())); - } - if let Some(buf) = buf { - return Poll::Ready(Ok(buf)); - } - } - } - Eof(ref mut is_eof) => { - if *is_eof { - Poll::Ready(Ok(Bytes::new())) - } else { - // 8192 chosen because its about 2 packets, there probably - // won't be that much available, so don't have MemReaders - // allocate buffers to big - body.read_mem(cx, 8192).map_ok(|slice| { - *is_eof = slice.is_empty(); - slice - }) - } - } - } - } - - #[cfg(test)] - async fn decode_fut(&mut self, body: &mut R) -> Result { - futures_util::future::poll_fn(move |cx| self.decode(cx, body)).await - } -} - -impl fmt::Debug for Decoder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&self.kind, f) - } -} - -macro_rules! byte ( - ($rdr:ident, $cx:expr) => ({ - let buf = ready!($rdr.read_mem($cx, 1))?; - if !buf.is_empty() { - buf[0] - } else { - return Poll::Ready(Err(io::Error::new(io::ErrorKind::UnexpectedEof, - "unexpected EOF during chunk size line"))); - } - }) -); - -impl ChunkedState { - fn step( - &self, - cx: &mut task::Context<'_>, - body: &mut R, - size: &mut u64, - buf: &mut Option, - ) -> Poll> { - use self::ChunkedState::*; - match *self { - Size => ChunkedState::read_size(cx, body, size), - SizeLws => ChunkedState::read_size_lws(cx, body), - Extension => ChunkedState::read_extension(cx, body), - SizeLf => ChunkedState::read_size_lf(cx, body, *size), - Body => ChunkedState::read_body(cx, body, size, buf), - BodyCr => ChunkedState::read_body_cr(cx, body), - BodyLf => ChunkedState::read_body_lf(cx, body), - Trailer => ChunkedState::read_trailer(cx, body), - TrailerLf => ChunkedState::read_trailer_lf(cx, body), - EndCr => ChunkedState::read_end_cr(cx, body), - EndLf => ChunkedState::read_end_lf(cx, body), - End => Poll::Ready(Ok(ChunkedState::End)), - } - } - fn read_size( - cx: &mut task::Context<'_>, - rdr: &mut R, - size: &mut u64, - ) -> Poll> { - trace!("Read chunk hex size"); - - macro_rules! or_overflow { - ($e:expr) => ( - match $e { - Some(val) => val, - None => return Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidData, - "invalid chunk size: overflow", - ))), - } - ) - } - - let radix = 16; - match byte!(rdr, cx) { - b @ b'0'..=b'9' => { - *size = or_overflow!(size.checked_mul(radix)); - *size = or_overflow!(size.checked_add((b - b'0') as u64)); - } - b @ b'a'..=b'f' => { - *size = or_overflow!(size.checked_mul(radix)); - *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64)); - } - b @ b'A'..=b'F' => { - *size = or_overflow!(size.checked_mul(radix)); - *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64)); - } - b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)), - b';' => return Poll::Ready(Ok(ChunkedState::Extension)), - b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)), - _ => { - return Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk size line: Invalid Size", - ))); - } - } - Poll::Ready(Ok(ChunkedState::Size)) - } - fn read_size_lws( - cx: &mut task::Context<'_>, - rdr: &mut R, - ) -> Poll> { - trace!("read_size_lws"); - match byte!(rdr, cx) { - // LWS can follow the chunk size, but no more digits can come - b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)), - b';' => Poll::Ready(Ok(ChunkedState::Extension)), - b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk size linear white space", - ))), - } - } - fn read_extension( - cx: &mut task::Context<'_>, - rdr: &mut R, - ) -> Poll> { - trace!("read_extension"); - // We don't care about extensions really at all. Just ignore them. - // They "end" at the next CRLF. - // - // However, some implementations may not check for the CR, so to save - // them from themselves, we reject extensions containing plain LF as - // well. - match byte!(rdr, cx) { - b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)), - b'\n' => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidData, - "invalid chunk extension contains newline", - ))), - _ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions - } - } - fn read_size_lf( - cx: &mut task::Context<'_>, - rdr: &mut R, - size: u64, - ) -> Poll> { - trace!("Chunk size is {:?}", size); - match byte!(rdr, cx) { - b'\n' => { - if size == 0 { - Poll::Ready(Ok(ChunkedState::EndCr)) - } else { - debug!("incoming chunked header: {0:#X} ({0} bytes)", size); - Poll::Ready(Ok(ChunkedState::Body)) - } - } - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk size LF", - ))), - } - } - - fn read_body( - cx: &mut task::Context<'_>, - rdr: &mut R, - rem: &mut u64, - buf: &mut Option, - ) -> Poll> { - trace!("Chunked read, remaining={:?}", rem); - - // cap remaining bytes at the max capacity of usize - let rem_cap = match *rem { - r if r > usize::MAX as u64 => usize::MAX, - r => r as usize, - }; - - let to_read = rem_cap; - let slice = ready!(rdr.read_mem(cx, to_read))?; - let count = slice.len(); - - if count == 0 { - *rem = 0; - return Poll::Ready(Err(io::Error::new( - io::ErrorKind::UnexpectedEof, - IncompleteBody, - ))); - } - *buf = Some(slice); - *rem -= count as u64; - - if *rem > 0 { - Poll::Ready(Ok(ChunkedState::Body)) - } else { - Poll::Ready(Ok(ChunkedState::BodyCr)) - } - } - fn read_body_cr( - cx: &mut task::Context<'_>, - rdr: &mut R, - ) -> Poll> { - match byte!(rdr, cx) { - b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk body CR", - ))), - } - } - fn read_body_lf( - cx: &mut task::Context<'_>, - rdr: &mut R, - ) -> Poll> { - match byte!(rdr, cx) { - b'\n' => Poll::Ready(Ok(ChunkedState::Size)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk body LF", - ))), - } - } - - fn read_trailer( - cx: &mut task::Context<'_>, - rdr: &mut R, - ) -> Poll> { - trace!("read_trailer"); - match byte!(rdr, cx) { - b'\r' => Poll::Ready(Ok(ChunkedState::TrailerLf)), - _ => Poll::Ready(Ok(ChunkedState::Trailer)), - } - } - fn read_trailer_lf( - cx: &mut task::Context<'_>, - rdr: &mut R, - ) -> Poll> { - match byte!(rdr, cx) { - b'\n' => Poll::Ready(Ok(ChunkedState::EndCr)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid trailer end LF", - ))), - } - } - - fn read_end_cr( - cx: &mut task::Context<'_>, - rdr: &mut R, - ) -> Poll> { - match byte!(rdr, cx) { - b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)), - _ => Poll::Ready(Ok(ChunkedState::Trailer)), - } - } - fn read_end_lf( - cx: &mut task::Context<'_>, - rdr: &mut R, - ) -> Poll> { - match byte!(rdr, cx) { - b'\n' => Poll::Ready(Ok(ChunkedState::End)), - _ => Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid chunk end LF", - ))), - } - } -} - -#[derive(Debug)] -struct IncompleteBody; - -impl fmt::Display for IncompleteBody { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "end of file before message length reached") - } -} - -impl StdError for IncompleteBody {} - -#[cfg(test)] -mod tests { - use super::*; - use std::pin::Pin; - use std::time::Duration; - use tokio::io::{AsyncRead, ReadBuf}; - - impl<'a> MemRead for &'a [u8] { - fn read_mem(&mut self, _: &mut task::Context<'_>, len: usize) -> Poll> { - let n = std::cmp::min(len, self.len()); - if n > 0 { - let (a, b) = self.split_at(n); - let buf = Bytes::copy_from_slice(a); - *self = b; - Poll::Ready(Ok(buf)) - } else { - Poll::Ready(Ok(Bytes::new())) - } - } - } - - impl<'a> MemRead for &'a mut (dyn AsyncRead + Unpin) { - fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll> { - let mut v = vec![0; len]; - let mut buf = ReadBuf::new(&mut v); - ready!(Pin::new(self).poll_read(cx, &mut buf)?); - Poll::Ready(Ok(Bytes::copy_from_slice(&buf.filled()))) - } - } - - #[cfg(feature = "nightly")] - impl MemRead for Bytes { - fn read_mem(&mut self, _: &mut task::Context<'_>, len: usize) -> Poll> { - let n = std::cmp::min(len, self.len()); - let ret = self.split_to(n); - Poll::Ready(Ok(ret)) - } - } - - /* - use std::io; - use std::io::Write; - use super::Decoder; - use super::ChunkedState; - use futures::{Async, Poll}; - use bytes::{BytesMut, Bytes}; - use crate::mock::AsyncIo; - */ - - #[tokio::test] - async fn test_read_chunk_size() { - use std::io::ErrorKind::{InvalidData, InvalidInput, UnexpectedEof}; - - async fn read(s: &str) -> u64 { - let mut state = ChunkedState::Size; - let rdr = &mut s.as_bytes(); - let mut size = 0; - loop { - let result = - futures_util::future::poll_fn(|cx| state.step(cx, rdr, &mut size, &mut None)) - .await; - let desc = format!("read_size failed for {:?}", s); - state = result.expect(desc.as_str()); - if state == ChunkedState::Body || state == ChunkedState::EndCr { - break; - } - } - size - } - - async fn read_err(s: &str, expected_err: io::ErrorKind) { - let mut state = ChunkedState::Size; - let rdr = &mut s.as_bytes(); - let mut size = 0; - loop { - let result = - futures_util::future::poll_fn(|cx| state.step(cx, rdr, &mut size, &mut None)) - .await; - state = match result { - Ok(s) => s, - Err(e) => { - assert!( - expected_err == e.kind(), - "Reading {:?}, expected {:?}, but got {:?}", - s, - expected_err, - e.kind() - ); - return; - } - }; - if state == ChunkedState::Body || state == ChunkedState::End { - panic!("Was Ok. Expected Err for {:?}", s); - } - } - } - - assert_eq!(1, read("1\r\n").await); - assert_eq!(1, read("01\r\n").await); - assert_eq!(0, read("0\r\n").await); - assert_eq!(0, read("00\r\n").await); - assert_eq!(10, read("A\r\n").await); - assert_eq!(10, read("a\r\n").await); - assert_eq!(255, read("Ff\r\n").await); - assert_eq!(255, read("Ff \r\n").await); - // Missing LF or CRLF - read_err("F\rF", InvalidInput).await; - read_err("F", UnexpectedEof).await; - // Invalid hex digit - read_err("X\r\n", InvalidInput).await; - read_err("1X\r\n", InvalidInput).await; - read_err("-\r\n", InvalidInput).await; - read_err("-1\r\n", InvalidInput).await; - // Acceptable (if not fully valid) extensions do not influence the size - assert_eq!(1, read("1;extension\r\n").await); - assert_eq!(10, read("a;ext name=value\r\n").await); - assert_eq!(1, read("1;extension;extension2\r\n").await); - assert_eq!(1, read("1;;; ;\r\n").await); - assert_eq!(2, read("2; extension...\r\n").await); - assert_eq!(3, read("3 ; extension=123\r\n").await); - assert_eq!(3, read("3 ;\r\n").await); - assert_eq!(3, read("3 ; \r\n").await); - // Invalid extensions cause an error - read_err("1 invalid extension\r\n", InvalidInput).await; - read_err("1 A\r\n", InvalidInput).await; - read_err("1;no CRLF", UnexpectedEof).await; - read_err("1;reject\nnewlines\r\n", InvalidData).await; - // Overflow - read_err("f0000000000000003\r\n", InvalidData).await; - } - - #[tokio::test] - async fn test_read_sized_early_eof() { - let mut bytes = &b"foo bar"[..]; - let mut decoder = Decoder::length(10); - assert_eq!(decoder.decode_fut(&mut bytes).await.unwrap().len(), 7); - let e = decoder.decode_fut(&mut bytes).await.unwrap_err(); - assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof); - } - - #[tokio::test] - async fn test_read_chunked_early_eof() { - let mut bytes = &b"\ - 9\r\n\ - foo bar\ - "[..]; - let mut decoder = Decoder::chunked(); - assert_eq!(decoder.decode_fut(&mut bytes).await.unwrap().len(), 7); - let e = decoder.decode_fut(&mut bytes).await.unwrap_err(); - assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof); - } - - #[tokio::test] - async fn test_read_chunked_single_read() { - let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n"[..]; - let buf = Decoder::chunked() - .decode_fut(&mut mock_buf) - .await - .expect("decode"); - assert_eq!(16, buf.len()); - let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String"); - assert_eq!("1234567890abcdef", &result); - } - - #[tokio::test] - async fn test_read_chunked_trailer_with_missing_lf() { - let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..]; - let mut decoder = Decoder::chunked(); - decoder.decode_fut(&mut mock_buf).await.expect("decode"); - let e = decoder.decode_fut(&mut mock_buf).await.unwrap_err(); - assert_eq!(e.kind(), io::ErrorKind::InvalidInput); - } - - #[tokio::test] - async fn test_read_chunked_after_eof() { - let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..]; - let mut decoder = Decoder::chunked(); - - // normal read - let buf = decoder.decode_fut(&mut mock_buf).await.unwrap(); - assert_eq!(16, buf.len()); - let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String"); - assert_eq!("1234567890abcdef", &result); - - // eof read - let buf = decoder.decode_fut(&mut mock_buf).await.expect("decode"); - assert_eq!(0, buf.len()); - - // ensure read after eof also returns eof - let buf = decoder.decode_fut(&mut mock_buf).await.expect("decode"); - assert_eq!(0, buf.len()); - } - - // perform an async read using a custom buffer size and causing a blocking - // read at the specified byte - async fn read_async(mut decoder: Decoder, content: &[u8], block_at: usize) -> String { - let mut outs = Vec::new(); - - let mut ins = if block_at == 0 { - tokio_test::io::Builder::new() - .wait(Duration::from_millis(10)) - .read(content) - .build() - } else { - tokio_test::io::Builder::new() - .read(&content[..block_at]) - .wait(Duration::from_millis(10)) - .read(&content[block_at..]) - .build() - }; - - let mut ins = &mut ins as &mut (dyn AsyncRead + Unpin); - - loop { - let buf = decoder - .decode_fut(&mut ins) - .await - .expect("unexpected decode error"); - if buf.is_empty() { - break; // eof - } - outs.extend(buf.as_ref()); - } - - String::from_utf8(outs).expect("decode String") - } - - // iterate over the different ways that this async read could go. - // tests blocking a read at each byte along the content - The shotgun approach - async fn all_async_cases(content: &str, expected: &str, decoder: Decoder) { - let content_len = content.len(); - for block_at in 0..content_len { - let actual = read_async(decoder.clone(), content.as_bytes(), block_at).await; - assert_eq!(expected, &actual) //, "Failed async. Blocking at {}", block_at); - } - } - - #[tokio::test] - async fn test_read_length_async() { - let content = "foobar"; - all_async_cases(content, content, Decoder::length(content.len() as u64)).await; - } - - #[tokio::test] - async fn test_read_chunked_async() { - let content = "3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n"; - let expected = "foobar"; - all_async_cases(content, expected, Decoder::chunked()).await; - } - - #[tokio::test] - async fn test_read_eof_async() { - let content = "foobar"; - all_async_cases(content, content, Decoder::eof()).await; - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_decode_chunked_1kb(b: &mut test::Bencher) { - let rt = new_runtime(); - - const LEN: usize = 1024; - let mut vec = Vec::new(); - vec.extend(format!("{:x}\r\n", LEN).as_bytes()); - vec.extend(&[0; LEN][..]); - vec.extend(b"\r\n"); - let content = Bytes::from(vec); - - b.bytes = LEN as u64; - - b.iter(|| { - let mut decoder = Decoder::chunked(); - rt.block_on(async { - let mut raw = content.clone(); - let chunk = decoder.decode_fut(&mut raw).await.unwrap(); - assert_eq!(chunk.len(), LEN); - }); - }); - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_decode_length_1kb(b: &mut test::Bencher) { - let rt = new_runtime(); - - const LEN: usize = 1024; - let content = Bytes::from(&[0; LEN][..]); - b.bytes = LEN as u64; - - b.iter(|| { - let mut decoder = Decoder::length(LEN as u64); - rt.block_on(async { - let mut raw = content.clone(); - let chunk = decoder.decode_fut(&mut raw).await.unwrap(); - assert_eq!(chunk.len(), LEN); - }); - }); - } - - #[cfg(feature = "nightly")] - fn new_runtime() -> tokio::runtime::Runtime { - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("rt build") - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/dispatch.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/dispatch.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/dispatch.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/dispatch.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,759 +0,0 @@ -use std::error::Error as StdError; - -use bytes::{Buf, Bytes}; -use http::Request; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::{debug, trace}; - -use super::{Http1Transaction, Wants}; -use crate::body::{Body, DecodedLength, HttpBody}; -use crate::common::{task, Future, Pin, Poll, Unpin}; -use crate::proto::{ - BodyLength, Conn, Dispatched, MessageHead, RequestHead, -}; -use crate::upgrade::OnUpgrade; - -pub(crate) struct Dispatcher { - conn: Conn, - dispatch: D, - body_tx: Option, - body_rx: Pin>>, - is_closing: bool, -} - -pub(crate) trait Dispatch { - type PollItem; - type PollBody; - type PollError; - type RecvItem; - fn poll_msg( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll>>; - fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()>; - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; - fn should_poll(&self) -> bool; -} - -cfg_server! { - use crate::service::HttpService; - - pub(crate) struct Server, B> { - in_flight: Pin>>, - pub(crate) service: S, - } -} - -cfg_client! { - pin_project_lite::pin_project! { - pub(crate) struct Client { - callback: Option, http::Response>>, - #[pin] - rx: ClientRx, - rx_closed: bool, - } - } - - type ClientRx = crate::client::dispatch::Receiver, http::Response>; -} - -impl Dispatcher -where - D: Dispatch< - PollItem = MessageHead, - PollBody = Bs, - RecvItem = MessageHead, - > + Unpin, - D::PollError: Into>, - I: AsyncRead + AsyncWrite + Unpin, - T: Http1Transaction + Unpin, - Bs: HttpBody + 'static, - Bs::Error: Into>, -{ - pub(crate) fn new(dispatch: D, conn: Conn) -> Self { - Dispatcher { - conn, - dispatch, - body_tx: None, - body_rx: Box::pin(None), - is_closing: false, - } - } - - #[cfg(feature = "server")] - pub(crate) fn disable_keep_alive(&mut self) { - self.conn.disable_keep_alive(); - if self.conn.is_write_closed() { - self.close(); - } - } - - pub(crate) fn into_inner(self) -> (I, Bytes, D) { - let (io, buf) = self.conn.into_inner(); - (io, buf, self.dispatch) - } - - /// Run this dispatcher until HTTP says this connection is done, - /// but don't call `AsyncWrite::shutdown` on the underlying IO. - /// - /// This is useful for old-style HTTP upgrades, but ignores - /// newer-style upgrade API. - pub(crate) fn poll_without_shutdown( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll> - where - Self: Unpin, - { - Pin::new(self).poll_catch(cx, false).map_ok(|ds| { - if let Dispatched::Upgrade(pending) = ds { - pending.manual(); - } - }) - } - - fn poll_catch( - &mut self, - cx: &mut task::Context<'_>, - should_shutdown: bool, - ) -> Poll> { - Poll::Ready(ready!(self.poll_inner(cx, should_shutdown)).or_else(|e| { - // Be sure to alert a streaming body of the failure. - if let Some(mut body) = self.body_tx.take() { - body.send_error(crate::Error::new_body("connection error")); - } - // An error means we're shutting down either way. - // We just try to give the error to the user, - // and close the connection with an Ok. If we - // cannot give it to the user, then return the Err. - self.dispatch.recv_msg(Err(e))?; - Ok(Dispatched::Shutdown) - })) - } - - fn poll_inner( - &mut self, - cx: &mut task::Context<'_>, - should_shutdown: bool, - ) -> Poll> { - T::update_date(); - - ready!(self.poll_loop(cx))?; - - if self.is_done() { - if let Some(pending) = self.conn.pending_upgrade() { - self.conn.take_error()?; - return Poll::Ready(Ok(Dispatched::Upgrade(pending))); - } else if should_shutdown { - ready!(self.conn.poll_shutdown(cx)).map_err(crate::Error::new_shutdown)?; - } - self.conn.take_error()?; - Poll::Ready(Ok(Dispatched::Shutdown)) - } else { - Poll::Pending - } - } - - fn poll_loop(&mut self, cx: &mut task::Context<'_>) -> Poll> { - // Limit the looping on this connection, in case it is ready far too - // often, so that other futures don't starve. - // - // 16 was chosen arbitrarily, as that is number of pipelined requests - // benchmarks often use. Perhaps it should be a config option instead. - for _ in 0..16 { - let _ = self.poll_read(cx)?; - let _ = self.poll_write(cx)?; - let _ = self.poll_flush(cx)?; - - // This could happen if reading paused before blocking on IO, - // such as getting to the end of a framed message, but then - // writing/flushing set the state back to Init. In that case, - // if the read buffer still had bytes, we'd want to try poll_read - // again, or else we wouldn't ever be woken up again. - // - // Using this instead of task::current() and notify() inside - // the Conn is noticeably faster in pipelined benchmarks. - if !self.conn.wants_read_again() { - //break; - return Poll::Ready(Ok(())); - } - } - - trace!("poll_loop yielding (self = {:p})", self); - - task::yield_now(cx).map(|never| match never {}) - } - - fn poll_read(&mut self, cx: &mut task::Context<'_>) -> Poll> { - loop { - if self.is_closing { - return Poll::Ready(Ok(())); - } else if self.conn.can_read_head() { - ready!(self.poll_read_head(cx))?; - } else if let Some(mut body) = self.body_tx.take() { - if self.conn.can_read_body() { - match body.poll_ready(cx) { - Poll::Ready(Ok(())) => (), - Poll::Pending => { - self.body_tx = Some(body); - return Poll::Pending; - } - Poll::Ready(Err(_canceled)) => { - // user doesn't care about the body - // so we should stop reading - trace!("body receiver dropped before eof, draining or closing"); - self.conn.poll_drain_or_close_read(cx); - continue; - } - } - match self.conn.poll_read_body(cx) { - Poll::Ready(Some(Ok(chunk))) => match body.try_send_data(chunk) { - Ok(()) => { - self.body_tx = Some(body); - } - Err(_canceled) => { - if self.conn.can_read_body() { - trace!("body receiver dropped before eof, closing"); - self.conn.close_read(); - } - } - }, - Poll::Ready(None) => { - // just drop, the body will close automatically - } - Poll::Pending => { - self.body_tx = Some(body); - return Poll::Pending; - } - Poll::Ready(Some(Err(e))) => { - body.send_error(crate::Error::new_body(e)); - } - } - } else { - // just drop, the body will close automatically - } - } else { - return self.conn.poll_read_keep_alive(cx); - } - } - } - - fn poll_read_head(&mut self, cx: &mut task::Context<'_>) -> Poll> { - // can dispatch receive, or does it still care about, an incoming message? - match ready!(self.dispatch.poll_ready(cx)) { - Ok(()) => (), - Err(()) => { - trace!("dispatch no longer receiving messages"); - self.close(); - return Poll::Ready(Ok(())); - } - } - // dispatch is ready for a message, try to read one - match ready!(self.conn.poll_read_head(cx)) { - Some(Ok((mut head, body_len, wants))) => { - let body = match body_len { - DecodedLength::ZERO => Body::empty(), - other => { - let (tx, rx) = Body::new_channel(other, wants.contains(Wants::EXPECT)); - self.body_tx = Some(tx); - rx - } - }; - if wants.contains(Wants::UPGRADE) { - let upgrade = self.conn.on_upgrade(); - debug_assert!(!upgrade.is_none(), "empty upgrade"); - debug_assert!(head.extensions.get::().is_none(), "OnUpgrade already set"); - head.extensions.insert(upgrade); - } - self.dispatch.recv_msg(Ok((head, body)))?; - Poll::Ready(Ok(())) - } - Some(Err(err)) => { - debug!("read_head error: {}", err); - self.dispatch.recv_msg(Err(err))?; - // if here, the dispatcher gave the user the error - // somewhere else. we still need to shutdown, but - // not as a second error. - self.close(); - Poll::Ready(Ok(())) - } - None => { - // read eof, the write side will have been closed too unless - // allow_read_close was set to true, in which case just do - // nothing... - debug_assert!(self.conn.is_read_closed()); - if self.conn.is_write_closed() { - self.close(); - } - Poll::Ready(Ok(())) - } - } - } - - fn poll_write(&mut self, cx: &mut task::Context<'_>) -> Poll> { - loop { - if self.is_closing { - return Poll::Ready(Ok(())); - } else if self.body_rx.is_none() - && self.conn.can_write_head() - && self.dispatch.should_poll() - { - if let Some(msg) = ready!(Pin::new(&mut self.dispatch).poll_msg(cx)) { - let (head, mut body) = msg.map_err(crate::Error::new_user_service)?; - - // Check if the body knows its full data immediately. - // - // If so, we can skip a bit of bookkeeping that streaming - // bodies need to do. - if let Some(full) = crate::body::take_full_data(&mut body) { - self.conn.write_full_msg(head, full); - return Poll::Ready(Ok(())); - } - - let body_type = if body.is_end_stream() { - self.body_rx.set(None); - None - } else { - let btype = body - .size_hint() - .exact() - .map(BodyLength::Known) - .or_else(|| Some(BodyLength::Unknown)); - self.body_rx.set(Some(body)); - btype - }; - self.conn.write_head(head, body_type); - } else { - self.close(); - return Poll::Ready(Ok(())); - } - } else if !self.conn.can_buffer_body() { - ready!(self.poll_flush(cx))?; - } else { - // A new scope is needed :( - if let (Some(mut body), clear_body) = - OptGuard::new(self.body_rx.as_mut()).guard_mut() - { - debug_assert!(!*clear_body, "opt guard defaults to keeping body"); - if !self.conn.can_write_body() { - trace!( - "no more write body allowed, user body is_end_stream = {}", - body.is_end_stream(), - ); - *clear_body = true; - continue; - } - - let item = ready!(body.as_mut().poll_data(cx)); - if let Some(item) = item { - let chunk = item.map_err(|e| { - *clear_body = true; - crate::Error::new_user_body(e) - })?; - let eos = body.is_end_stream(); - if eos { - *clear_body = true; - if chunk.remaining() == 0 { - trace!("discarding empty chunk"); - self.conn.end_body()?; - } else { - self.conn.write_body_and_end(chunk); - } - } else { - if chunk.remaining() == 0 { - trace!("discarding empty chunk"); - continue; - } - self.conn.write_body(chunk); - } - } else { - *clear_body = true; - self.conn.end_body()?; - } - } else { - // If there's no body_rx, end the body - if self.conn.can_write_body() { - self.conn.end_body()?; - } else { - return Poll::Pending; - } - } - } - } - } - - fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { - self.conn.poll_flush(cx).map_err(|err| { - debug!("error writing: {}", err); - crate::Error::new_body_write(err) - }) - } - - fn close(&mut self) { - self.is_closing = true; - self.conn.close_read(); - self.conn.close_write(); - } - - fn is_done(&self) -> bool { - if self.is_closing { - return true; - } - - let read_done = self.conn.is_read_closed(); - - if !T::should_read_first() && read_done { - // a client that cannot read may was well be done. - true - } else { - let write_done = self.conn.is_write_closed() - || (!self.dispatch.should_poll() && self.body_rx.is_none()); - read_done && write_done - } - } -} - -impl Future for Dispatcher -where - D: Dispatch< - PollItem = MessageHead, - PollBody = Bs, - RecvItem = MessageHead, - > + Unpin, - D::PollError: Into>, - I: AsyncRead + AsyncWrite + Unpin, - T: Http1Transaction + Unpin, - Bs: HttpBody + 'static, - Bs::Error: Into>, -{ - type Output = crate::Result; - - #[inline] - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - self.poll_catch(cx, true) - } -} - -// ===== impl OptGuard ===== - -/// A drop guard to allow a mutable borrow of an Option while being able to -/// set whether the `Option` should be cleared on drop. -struct OptGuard<'a, T>(Pin<&'a mut Option>, bool); - -impl<'a, T> OptGuard<'a, T> { - fn new(pin: Pin<&'a mut Option>) -> Self { - OptGuard(pin, false) - } - - fn guard_mut(&mut self) -> (Option>, &mut bool) { - (self.0.as_mut().as_pin_mut(), &mut self.1) - } -} - -impl<'a, T> Drop for OptGuard<'a, T> { - fn drop(&mut self) { - if self.1 { - self.0.set(None); - } - } -} - -// ===== impl Server ===== - -cfg_server! { - impl Server - where - S: HttpService, - { - pub(crate) fn new(service: S) -> Server { - Server { - in_flight: Box::pin(None), - service, - } - } - - pub(crate) fn into_service(self) -> S { - self.service - } - } - - // Service is never pinned - impl, B> Unpin for Server {} - - impl Dispatch for Server - where - S: HttpService, - S::Error: Into>, - Bs: HttpBody, - { - type PollItem = MessageHead; - type PollBody = Bs; - type PollError = S::Error; - type RecvItem = RequestHead; - - fn poll_msg( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll>> { - let mut this = self.as_mut(); - let ret = if let Some(ref mut fut) = this.in_flight.as_mut().as_pin_mut() { - let resp = ready!(fut.as_mut().poll(cx)?); - let (parts, body) = resp.into_parts(); - let head = MessageHead { - version: parts.version, - subject: parts.status, - headers: parts.headers, - extensions: parts.extensions, - }; - Poll::Ready(Some(Ok((head, body)))) - } else { - unreachable!("poll_msg shouldn't be called if no inflight"); - }; - - // Since in_flight finished, remove it - this.in_flight.set(None); - ret - } - - fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> { - let (msg, body) = msg?; - let mut req = Request::new(body); - *req.method_mut() = msg.subject.0; - *req.uri_mut() = msg.subject.1; - *req.headers_mut() = msg.headers; - *req.version_mut() = msg.version; - *req.extensions_mut() = msg.extensions; - let fut = self.service.call(req); - self.in_flight.set(Some(fut)); - Ok(()) - } - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - if self.in_flight.is_some() { - Poll::Pending - } else { - self.service.poll_ready(cx).map_err(|_e| { - // FIXME: return error value. - trace!("service closed"); - }) - } - } - - fn should_poll(&self) -> bool { - self.in_flight.is_some() - } - } -} - -// ===== impl Client ===== - -cfg_client! { - impl Client { - pub(crate) fn new(rx: ClientRx) -> Client { - Client { - callback: None, - rx, - rx_closed: false, - } - } - } - - impl Dispatch for Client - where - B: HttpBody, - { - type PollItem = RequestHead; - type PollBody = B; - type PollError = crate::common::Never; - type RecvItem = crate::proto::ResponseHead; - - fn poll_msg( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll>> { - let mut this = self.as_mut(); - debug_assert!(!this.rx_closed); - match this.rx.poll_recv(cx) { - Poll::Ready(Some((req, mut cb))) => { - // check that future hasn't been canceled already - match cb.poll_canceled(cx) { - Poll::Ready(()) => { - trace!("request canceled"); - Poll::Ready(None) - } - Poll::Pending => { - let (parts, body) = req.into_parts(); - let head = RequestHead { - version: parts.version, - subject: crate::proto::RequestLine(parts.method, parts.uri), - headers: parts.headers, - extensions: parts.extensions, - }; - this.callback = Some(cb); - Poll::Ready(Some(Ok((head, body)))) - } - } - } - Poll::Ready(None) => { - // user has dropped sender handle - trace!("client tx closed"); - this.rx_closed = true; - Poll::Ready(None) - } - Poll::Pending => Poll::Pending, - } - } - - fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> { - match msg { - Ok((msg, body)) => { - if let Some(cb) = self.callback.take() { - let res = msg.into_response(body); - cb.send(Ok(res)); - Ok(()) - } else { - // Getting here is likely a bug! An error should have happened - // in Conn::require_empty_read() before ever parsing a - // full message! - Err(crate::Error::new_unexpected_message()) - } - } - Err(err) => { - if let Some(cb) = self.callback.take() { - cb.send(Err((err, None))); - Ok(()) - } else if !self.rx_closed { - self.rx.close(); - if let Some((req, cb)) = self.rx.try_recv() { - trace!("canceling queued request with connection error: {}", err); - // in this case, the message was never even started, so it's safe to tell - // the user that the request was completely canceled - cb.send(Err((crate::Error::new_canceled().with(err), Some(req)))); - Ok(()) - } else { - Err(err) - } - } else { - Err(err) - } - } - } - } - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - match self.callback { - Some(ref mut cb) => match cb.poll_canceled(cx) { - Poll::Ready(()) => { - trace!("callback receiver has dropped"); - Poll::Ready(Err(())) - } - Poll::Pending => Poll::Ready(Ok(())), - }, - None => Poll::Ready(Err(())), - } - } - - fn should_poll(&self) -> bool { - self.callback.is_none() - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::proto::h1::ClientTransaction; - use std::time::Duration; - - #[test] - fn client_read_bytes_before_writing_request() { - let _ = pretty_env_logger::try_init(); - - tokio_test::task::spawn(()).enter(|cx, _| { - let (io, mut handle) = tokio_test::io::Builder::new().build_with_handle(); - - // Block at 0 for now, but we will release this response before - // the request is ready to write later... - let (mut tx, rx) = crate::client::dispatch::channel(); - let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); - let mut dispatcher = Dispatcher::new(Client::new(rx), conn); - - // First poll is needed to allow tx to send... - assert!(Pin::new(&mut dispatcher).poll(cx).is_pending()); - - // Unblock our IO, which has a response before we've sent request! - // - handle.read(b"HTTP/1.1 200 OK\r\n\r\n"); - - let mut res_rx = tx - .try_send(crate::Request::new(crate::Body::empty())) - .unwrap(); - - tokio_test::assert_ready_ok!(Pin::new(&mut dispatcher).poll(cx)); - let err = tokio_test::assert_ready_ok!(Pin::new(&mut res_rx).poll(cx)) - .expect_err("callback should send error"); - - match (err.0.kind(), err.1) { - (&crate::error::Kind::Canceled, Some(_)) => (), - other => panic!("expected Canceled, got {:?}", other), - } - }); - } - - #[tokio::test] - async fn client_flushing_is_not_ready_for_next_request() { - let _ = pretty_env_logger::try_init(); - - let (io, _handle) = tokio_test::io::Builder::new() - .write(b"POST / HTTP/1.1\r\ncontent-length: 4\r\n\r\n") - .read(b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n") - .wait(std::time::Duration::from_secs(2)) - .build_with_handle(); - - let (mut tx, rx) = crate::client::dispatch::channel(); - let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); - conn.set_write_strategy_queue(); - - let dispatcher = Dispatcher::new(Client::new(rx), conn); - let _dispatcher = tokio::spawn(async move { dispatcher.await }); - - let req = crate::Request::builder() - .method("POST") - .body(crate::Body::from("reee")) - .unwrap(); - - let res = tx.try_send(req).unwrap().await.expect("response"); - drop(res); - - assert!(!tx.is_ready()); - } - - #[tokio::test] - async fn body_empty_chunks_ignored() { - let _ = pretty_env_logger::try_init(); - - let io = tokio_test::io::Builder::new() - // no reading or writing, just be blocked for the test... - .wait(Duration::from_secs(5)) - .build(); - - let (mut tx, rx) = crate::client::dispatch::channel(); - let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); - let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn)); - - // First poll is needed to allow tx to send... - assert!(dispatcher.poll().is_pending()); - - let body = { - let (mut tx, body) = crate::Body::channel(); - tx.try_send_data("".into()).unwrap(); - body - }; - - let _res_rx = tx.try_send(crate::Request::new(body)).unwrap(); - - // Ensure conn.write_body wasn't called with the empty chunk. - // If it is, it will trigger an assertion. - assert!(dispatcher.poll().is_pending()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/encode.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/encode.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/encode.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/encode.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,439 +0,0 @@ -use std::fmt; -use std::io::IoSlice; - -use bytes::buf::{Chain, Take}; -use bytes::Buf; -use tracing::trace; - -use super::io::WriteBuf; - -type StaticBuf = &'static [u8]; - -/// Encoders to handle different Transfer-Encodings. -#[derive(Debug, Clone, PartialEq)] -pub(crate) struct Encoder { - kind: Kind, - is_last: bool, -} - -#[derive(Debug)] -pub(crate) struct EncodedBuf { - kind: BufKind, -} - -#[derive(Debug)] -pub(crate) struct NotEof(u64); - -#[derive(Debug, PartialEq, Clone)] -enum Kind { - /// An Encoder for when Transfer-Encoding includes `chunked`. - Chunked, - /// An Encoder for when Content-Length is set. - /// - /// Enforces that the body is not longer than the Content-Length header. - Length(u64), - /// An Encoder for when neither Content-Length nor Chunked encoding is set. - /// - /// This is mostly only used with HTTP/1.0 with a length. This kind requires - /// the connection to be closed when the body is finished. - #[cfg(feature = "server")] - CloseDelimited, -} - -#[derive(Debug)] -enum BufKind { - Exact(B), - Limited(Take), - Chunked(Chain, StaticBuf>), - ChunkedEnd(StaticBuf), -} - -impl Encoder { - fn new(kind: Kind) -> Encoder { - Encoder { - kind, - is_last: false, - } - } - pub(crate) fn chunked() -> Encoder { - Encoder::new(Kind::Chunked) - } - - pub(crate) fn length(len: u64) -> Encoder { - Encoder::new(Kind::Length(len)) - } - - #[cfg(feature = "server")] - pub(crate) fn close_delimited() -> Encoder { - Encoder::new(Kind::CloseDelimited) - } - - pub(crate) fn is_eof(&self) -> bool { - matches!(self.kind, Kind::Length(0)) - } - - #[cfg(feature = "server")] - pub(crate) fn set_last(mut self, is_last: bool) -> Self { - self.is_last = is_last; - self - } - - pub(crate) fn is_last(&self) -> bool { - self.is_last - } - - pub(crate) fn is_close_delimited(&self) -> bool { - match self.kind { - #[cfg(feature = "server")] - Kind::CloseDelimited => true, - _ => false, - } - } - - pub(crate) fn end(&self) -> Result>, NotEof> { - match self.kind { - Kind::Length(0) => Ok(None), - Kind::Chunked => Ok(Some(EncodedBuf { - kind: BufKind::ChunkedEnd(b"0\r\n\r\n"), - })), - #[cfg(feature = "server")] - Kind::CloseDelimited => Ok(None), - Kind::Length(n) => Err(NotEof(n)), - } - } - - pub(crate) fn encode(&mut self, msg: B) -> EncodedBuf - where - B: Buf, - { - let len = msg.remaining(); - debug_assert!(len > 0, "encode() called with empty buf"); - - let kind = match self.kind { - Kind::Chunked => { - trace!("encoding chunked {}B", len); - let buf = ChunkSize::new(len) - .chain(msg) - .chain(b"\r\n" as &'static [u8]); - BufKind::Chunked(buf) - } - Kind::Length(ref mut remaining) => { - trace!("sized write, len = {}", len); - if len as u64 > *remaining { - let limit = *remaining as usize; - *remaining = 0; - BufKind::Limited(msg.take(limit)) - } else { - *remaining -= len as u64; - BufKind::Exact(msg) - } - } - #[cfg(feature = "server")] - Kind::CloseDelimited => { - trace!("close delimited write {}B", len); - BufKind::Exact(msg) - } - }; - EncodedBuf { kind } - } - - pub(super) fn encode_and_end(&self, msg: B, dst: &mut WriteBuf>) -> bool - where - B: Buf, - { - let len = msg.remaining(); - debug_assert!(len > 0, "encode() called with empty buf"); - - match self.kind { - Kind::Chunked => { - trace!("encoding chunked {}B", len); - let buf = ChunkSize::new(len) - .chain(msg) - .chain(b"\r\n0\r\n\r\n" as &'static [u8]); - dst.buffer(buf); - !self.is_last - } - Kind::Length(remaining) => { - use std::cmp::Ordering; - - trace!("sized write, len = {}", len); - match (len as u64).cmp(&remaining) { - Ordering::Equal => { - dst.buffer(msg); - !self.is_last - } - Ordering::Greater => { - dst.buffer(msg.take(remaining as usize)); - !self.is_last - } - Ordering::Less => { - dst.buffer(msg); - false - } - } - } - #[cfg(feature = "server")] - Kind::CloseDelimited => { - trace!("close delimited write {}B", len); - dst.buffer(msg); - false - } - } - } - - /// Encodes the full body, without verifying the remaining length matches. - /// - /// This is used in conjunction with HttpBody::__hyper_full_data(), which - /// means we can trust that the buf has the correct size (the buf itself - /// was checked to make the headers). - pub(super) fn danger_full_buf(self, msg: B, dst: &mut WriteBuf>) - where - B: Buf, - { - debug_assert!(msg.remaining() > 0, "encode() called with empty buf"); - debug_assert!( - match self.kind { - Kind::Length(len) => len == msg.remaining() as u64, - _ => true, - }, - "danger_full_buf length mismatches" - ); - - match self.kind { - Kind::Chunked => { - let len = msg.remaining(); - trace!("encoding chunked {}B", len); - let buf = ChunkSize::new(len) - .chain(msg) - .chain(b"\r\n0\r\n\r\n" as &'static [u8]); - dst.buffer(buf); - } - _ => { - dst.buffer(msg); - } - } - } -} - -impl Buf for EncodedBuf -where - B: Buf, -{ - #[inline] - fn remaining(&self) -> usize { - match self.kind { - BufKind::Exact(ref b) => b.remaining(), - BufKind::Limited(ref b) => b.remaining(), - BufKind::Chunked(ref b) => b.remaining(), - BufKind::ChunkedEnd(ref b) => b.remaining(), - } - } - - #[inline] - fn chunk(&self) -> &[u8] { - match self.kind { - BufKind::Exact(ref b) => b.chunk(), - BufKind::Limited(ref b) => b.chunk(), - BufKind::Chunked(ref b) => b.chunk(), - BufKind::ChunkedEnd(ref b) => b.chunk(), - } - } - - #[inline] - fn advance(&mut self, cnt: usize) { - match self.kind { - BufKind::Exact(ref mut b) => b.advance(cnt), - BufKind::Limited(ref mut b) => b.advance(cnt), - BufKind::Chunked(ref mut b) => b.advance(cnt), - BufKind::ChunkedEnd(ref mut b) => b.advance(cnt), - } - } - - #[inline] - fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { - match self.kind { - BufKind::Exact(ref b) => b.chunks_vectored(dst), - BufKind::Limited(ref b) => b.chunks_vectored(dst), - BufKind::Chunked(ref b) => b.chunks_vectored(dst), - BufKind::ChunkedEnd(ref b) => b.chunks_vectored(dst), - } - } -} - -#[cfg(target_pointer_width = "32")] -const USIZE_BYTES: usize = 4; - -#[cfg(target_pointer_width = "64")] -const USIZE_BYTES: usize = 8; - -// each byte will become 2 hex -const CHUNK_SIZE_MAX_BYTES: usize = USIZE_BYTES * 2; - -#[derive(Clone, Copy)] -struct ChunkSize { - bytes: [u8; CHUNK_SIZE_MAX_BYTES + 2], - pos: u8, - len: u8, -} - -impl ChunkSize { - fn new(len: usize) -> ChunkSize { - use std::fmt::Write; - let mut size = ChunkSize { - bytes: [0; CHUNK_SIZE_MAX_BYTES + 2], - pos: 0, - len: 0, - }; - write!(&mut size, "{:X}\r\n", len).expect("CHUNK_SIZE_MAX_BYTES should fit any usize"); - size - } -} - -impl Buf for ChunkSize { - #[inline] - fn remaining(&self) -> usize { - (self.len - self.pos).into() - } - - #[inline] - fn chunk(&self) -> &[u8] { - &self.bytes[self.pos.into()..self.len.into()] - } - - #[inline] - fn advance(&mut self, cnt: usize) { - assert!(cnt <= self.remaining()); - self.pos += cnt as u8; // just asserted cnt fits in u8 - } -} - -impl fmt::Debug for ChunkSize { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ChunkSize") - .field("bytes", &&self.bytes[..self.len.into()]) - .field("pos", &self.pos) - .finish() - } -} - -impl fmt::Write for ChunkSize { - fn write_str(&mut self, num: &str) -> fmt::Result { - use std::io::Write; - (&mut self.bytes[self.len.into()..]) - .write_all(num.as_bytes()) - .expect("&mut [u8].write() cannot error"); - self.len += num.len() as u8; // safe because bytes is never bigger than 256 - Ok(()) - } -} - -impl From for EncodedBuf { - fn from(buf: B) -> Self { - EncodedBuf { - kind: BufKind::Exact(buf), - } - } -} - -impl From> for EncodedBuf { - fn from(buf: Take) -> Self { - EncodedBuf { - kind: BufKind::Limited(buf), - } - } -} - -impl From, StaticBuf>> for EncodedBuf { - fn from(buf: Chain, StaticBuf>) -> Self { - EncodedBuf { - kind: BufKind::Chunked(buf), - } - } -} - -impl fmt::Display for NotEof { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "early end, expected {} more bytes", self.0) - } -} - -impl std::error::Error for NotEof {} - -#[cfg(test)] -mod tests { - use bytes::BufMut; - - use super::super::io::Cursor; - use super::Encoder; - - #[test] - fn chunked() { - let mut encoder = Encoder::chunked(); - let mut dst = Vec::new(); - - let msg1 = b"foo bar".as_ref(); - let buf1 = encoder.encode(msg1); - dst.put(buf1); - assert_eq!(dst, b"7\r\nfoo bar\r\n"); - - let msg2 = b"baz quux herp".as_ref(); - let buf2 = encoder.encode(msg2); - dst.put(buf2); - - assert_eq!(dst, b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n"); - - let end = encoder.end::>>().unwrap().unwrap(); - dst.put(end); - - assert_eq!( - dst, - b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n".as_ref() - ); - } - - #[test] - fn length() { - let max_len = 8; - let mut encoder = Encoder::length(max_len as u64); - let mut dst = Vec::new(); - - let msg1 = b"foo bar".as_ref(); - let buf1 = encoder.encode(msg1); - dst.put(buf1); - - assert_eq!(dst, b"foo bar"); - assert!(!encoder.is_eof()); - encoder.end::<()>().unwrap_err(); - - let msg2 = b"baz".as_ref(); - let buf2 = encoder.encode(msg2); - dst.put(buf2); - - assert_eq!(dst.len(), max_len); - assert_eq!(dst, b"foo barb"); - assert!(encoder.is_eof()); - assert!(encoder.end::<()>().unwrap().is_none()); - } - - #[test] - fn eof() { - let mut encoder = Encoder::close_delimited(); - let mut dst = Vec::new(); - - let msg1 = b"foo bar".as_ref(); - let buf1 = encoder.encode(msg1); - dst.put(buf1); - - assert_eq!(dst, b"foo bar"); - assert!(!encoder.is_eof()); - encoder.end::<()>().unwrap(); - - let msg2 = b"baz".as_ref(); - let buf2 = encoder.encode(msg2); - dst.put(buf2); - - assert_eq!(dst, b"foo barbaz"); - assert!(!encoder.is_eof()); - encoder.end::<()>().unwrap(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/io.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/io.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/io.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/io.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1002 +0,0 @@ -use std::cmp; -use std::fmt; -#[cfg(all(feature = "server", feature = "runtime"))] -use std::future::Future; -use std::io::{self, IoSlice}; -use std::marker::Unpin; -use std::mem::MaybeUninit; -#[cfg(all(feature = "server", feature = "runtime"))] -use std::time::Duration; - -use bytes::{Buf, BufMut, Bytes, BytesMut}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -#[cfg(all(feature = "server", feature = "runtime"))] -use tokio::time::Instant; -use tracing::{debug, trace}; - -use super::{Http1Transaction, ParseContext, ParsedMessage}; -use crate::common::buf::BufList; -use crate::common::{task, Pin, Poll}; - -/// The initial buffer size allocated before trying to read from IO. -pub(crate) const INIT_BUFFER_SIZE: usize = 8192; - -/// The minimum value that can be set to max buffer size. -pub(crate) const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE; - -/// The default maximum read buffer size. If the buffer gets this big and -/// a message is still not complete, a `TooLarge` error is triggered. -// Note: if this changes, update server::conn::Http::max_buf_size docs. -pub(crate) const DEFAULT_MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100; - -/// The maximum number of distinct `Buf`s to hold in a list before requiring -/// a flush. Only affects when the buffer strategy is to queue buffers. -/// -/// Note that a flush can happen before reaching the maximum. This simply -/// forces a flush if the queue gets this big. -const MAX_BUF_LIST_BUFFERS: usize = 16; - -pub(crate) struct Buffered { - flush_pipeline: bool, - io: T, - read_blocked: bool, - read_buf: BytesMut, - read_buf_strategy: ReadStrategy, - write_buf: WriteBuf, -} - -impl fmt::Debug for Buffered -where - B: Buf, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Buffered") - .field("read_buf", &self.read_buf) - .field("write_buf", &self.write_buf) - .finish() - } -} - -impl Buffered -where - T: AsyncRead + AsyncWrite + Unpin, - B: Buf, -{ - pub(crate) fn new(io: T) -> Buffered { - let strategy = if io.is_write_vectored() { - WriteStrategy::Queue - } else { - WriteStrategy::Flatten - }; - let write_buf = WriteBuf::new(strategy); - Buffered { - flush_pipeline: false, - io, - read_blocked: false, - read_buf: BytesMut::with_capacity(0), - read_buf_strategy: ReadStrategy::default(), - write_buf, - } - } - - #[cfg(feature = "server")] - pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) { - debug_assert!(!self.write_buf.has_remaining()); - self.flush_pipeline = enabled; - if enabled { - self.set_write_strategy_flatten(); - } - } - - pub(crate) fn set_max_buf_size(&mut self, max: usize) { - assert!( - max >= MINIMUM_MAX_BUFFER_SIZE, - "The max_buf_size cannot be smaller than {}.", - MINIMUM_MAX_BUFFER_SIZE, - ); - self.read_buf_strategy = ReadStrategy::with_max(max); - self.write_buf.max_buf_size = max; - } - - #[cfg(feature = "client")] - pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) { - self.read_buf_strategy = ReadStrategy::Exact(sz); - } - - pub(crate) fn set_write_strategy_flatten(&mut self) { - // this should always be called only at construction time, - // so this assert is here to catch myself - debug_assert!(self.write_buf.queue.bufs_cnt() == 0); - self.write_buf.set_strategy(WriteStrategy::Flatten); - } - - pub(crate) fn set_write_strategy_queue(&mut self) { - // this should always be called only at construction time, - // so this assert is here to catch myself - debug_assert!(self.write_buf.queue.bufs_cnt() == 0); - self.write_buf.set_strategy(WriteStrategy::Queue); - } - - pub(crate) fn read_buf(&self) -> &[u8] { - self.read_buf.as_ref() - } - - #[cfg(test)] - #[cfg(feature = "nightly")] - pub(super) fn read_buf_mut(&mut self) -> &mut BytesMut { - &mut self.read_buf - } - - /// Return the "allocated" available space, not the potential space - /// that could be allocated in the future. - fn read_buf_remaining_mut(&self) -> usize { - self.read_buf.capacity() - self.read_buf.len() - } - - /// Return whether we can append to the headers buffer. - /// - /// Reasons we can't: - /// - The write buf is in queue mode, and some of the past body is still - /// needing to be flushed. - pub(crate) fn can_headers_buf(&self) -> bool { - !self.write_buf.queue.has_remaining() - } - - pub(crate) fn headers_buf(&mut self) -> &mut Vec { - let buf = self.write_buf.headers_mut(); - &mut buf.bytes - } - - pub(super) fn write_buf(&mut self) -> &mut WriteBuf { - &mut self.write_buf - } - - pub(crate) fn buffer>(&mut self, buf: BB) { - self.write_buf.buffer(buf) - } - - pub(crate) fn can_buffer(&self) -> bool { - self.flush_pipeline || self.write_buf.can_buffer() - } - - pub(crate) fn consume_leading_lines(&mut self) { - if !self.read_buf.is_empty() { - let mut i = 0; - while i < self.read_buf.len() { - match self.read_buf[i] { - b'\r' | b'\n' => i += 1, - _ => break, - } - } - self.read_buf.advance(i); - } - } - - pub(super) fn parse( - &mut self, - cx: &mut task::Context<'_>, - parse_ctx: ParseContext<'_>, - ) -> Poll>> - where - S: Http1Transaction, - { - loop { - match super::role::parse_headers::( - &mut self.read_buf, - ParseContext { - cached_headers: parse_ctx.cached_headers, - req_method: parse_ctx.req_method, - h1_parser_config: parse_ctx.h1_parser_config.clone(), - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout: parse_ctx.h1_header_read_timeout, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_fut: parse_ctx.h1_header_read_timeout_fut, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_running: parse_ctx.h1_header_read_timeout_running, - preserve_header_case: parse_ctx.preserve_header_case, - #[cfg(feature = "ffi")] - preserve_header_order: parse_ctx.preserve_header_order, - h09_responses: parse_ctx.h09_responses, - #[cfg(feature = "ffi")] - on_informational: parse_ctx.on_informational, - #[cfg(feature = "ffi")] - raw_headers: parse_ctx.raw_headers, - }, - )? { - Some(msg) => { - debug!("parsed {} headers", msg.head.headers.len()); - - #[cfg(all(feature = "server", feature = "runtime"))] - { - *parse_ctx.h1_header_read_timeout_running = false; - - if let Some(h1_header_read_timeout_fut) = - parse_ctx.h1_header_read_timeout_fut - { - // Reset the timer in order to avoid woken up when the timeout finishes - h1_header_read_timeout_fut - .as_mut() - .reset(Instant::now() + Duration::from_secs(30 * 24 * 60 * 60)); - } - } - return Poll::Ready(Ok(msg)); - } - None => { - let max = self.read_buf_strategy.max(); - if self.read_buf.len() >= max { - debug!("max_buf_size ({}) reached, closing", max); - return Poll::Ready(Err(crate::Error::new_too_large())); - } - - #[cfg(all(feature = "server", feature = "runtime"))] - if *parse_ctx.h1_header_read_timeout_running { - if let Some(h1_header_read_timeout_fut) = - parse_ctx.h1_header_read_timeout_fut - { - if Pin::new(h1_header_read_timeout_fut).poll(cx).is_ready() { - *parse_ctx.h1_header_read_timeout_running = false; - - tracing::warn!("read header from client timeout"); - return Poll::Ready(Err(crate::Error::new_header_timeout())); - } - } - } - } - } - if ready!(self.poll_read_from_io(cx)).map_err(crate::Error::new_io)? == 0 { - trace!("parse eof"); - return Poll::Ready(Err(crate::Error::new_incomplete())); - } - } - } - - pub(crate) fn poll_read_from_io( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll> { - self.read_blocked = false; - let next = self.read_buf_strategy.next(); - if self.read_buf_remaining_mut() < next { - self.read_buf.reserve(next); - } - - let dst = self.read_buf.chunk_mut(); - let dst = unsafe { &mut *(dst as *mut _ as *mut [MaybeUninit]) }; - let mut buf = ReadBuf::uninit(dst); - match Pin::new(&mut self.io).poll_read(cx, &mut buf) { - Poll::Ready(Ok(_)) => { - let n = buf.filled().len(); - trace!("received {} bytes", n); - unsafe { - // Safety: we just read that many bytes into the - // uninitialized part of the buffer, so this is okay. - // @tokio pls give me back `poll_read_buf` thanks - self.read_buf.advance_mut(n); - } - self.read_buf_strategy.record(n); - Poll::Ready(Ok(n)) - } - Poll::Pending => { - self.read_blocked = true; - Poll::Pending - } - Poll::Ready(Err(e)) => Poll::Ready(Err(e)), - } - } - - pub(crate) fn into_inner(self) -> (T, Bytes) { - (self.io, self.read_buf.freeze()) - } - - pub(crate) fn io_mut(&mut self) -> &mut T { - &mut self.io - } - - pub(crate) fn is_read_blocked(&self) -> bool { - self.read_blocked - } - - pub(crate) fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { - if self.flush_pipeline && !self.read_buf.is_empty() { - Poll::Ready(Ok(())) - } else if self.write_buf.remaining() == 0 { - Pin::new(&mut self.io).poll_flush(cx) - } else { - if let WriteStrategy::Flatten = self.write_buf.strategy { - return self.poll_flush_flattened(cx); - } - - const MAX_WRITEV_BUFS: usize = 64; - loop { - let n = { - let mut iovs = [IoSlice::new(&[]); MAX_WRITEV_BUFS]; - let len = self.write_buf.chunks_vectored(&mut iovs); - ready!(Pin::new(&mut self.io).poll_write_vectored(cx, &iovs[..len]))? - }; - // TODO(eliza): we have to do this manually because - // `poll_write_buf` doesn't exist in Tokio 0.3 yet...when - // `poll_write_buf` comes back, the manual advance will need to leave! - self.write_buf.advance(n); - debug!("flushed {} bytes", n); - if self.write_buf.remaining() == 0 { - break; - } else if n == 0 { - trace!( - "write returned zero, but {} bytes remaining", - self.write_buf.remaining() - ); - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - } - Pin::new(&mut self.io).poll_flush(cx) - } - } - - /// Specialized version of `flush` when strategy is Flatten. - /// - /// Since all buffered bytes are flattened into the single headers buffer, - /// that skips some bookkeeping around using multiple buffers. - fn poll_flush_flattened(&mut self, cx: &mut task::Context<'_>) -> Poll> { - loop { - let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.chunk()))?; - debug!("flushed {} bytes", n); - self.write_buf.headers.advance(n); - if self.write_buf.headers.remaining() == 0 { - self.write_buf.headers.reset(); - break; - } else if n == 0 { - trace!( - "write returned zero, but {} bytes remaining", - self.write_buf.remaining() - ); - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - } - Pin::new(&mut self.io).poll_flush(cx) - } - - #[cfg(test)] - fn flush<'a>(&'a mut self) -> impl std::future::Future> + 'a { - futures_util::future::poll_fn(move |cx| self.poll_flush(cx)) - } -} - -// The `B` is a `Buf`, we never project a pin to it -impl Unpin for Buffered {} - -// TODO: This trait is old... at least rename to PollBytes or something... -pub(crate) trait MemRead { - fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll>; -} - -impl MemRead for Buffered -where - T: AsyncRead + AsyncWrite + Unpin, - B: Buf, -{ - fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll> { - if !self.read_buf.is_empty() { - let n = std::cmp::min(len, self.read_buf.len()); - Poll::Ready(Ok(self.read_buf.split_to(n).freeze())) - } else { - let n = ready!(self.poll_read_from_io(cx))?; - Poll::Ready(Ok(self.read_buf.split_to(::std::cmp::min(len, n)).freeze())) - } - } -} - -#[derive(Clone, Copy, Debug)] -enum ReadStrategy { - Adaptive { - decrease_now: bool, - next: usize, - max: usize, - }, - #[cfg(feature = "client")] - Exact(usize), -} - -impl ReadStrategy { - fn with_max(max: usize) -> ReadStrategy { - ReadStrategy::Adaptive { - decrease_now: false, - next: INIT_BUFFER_SIZE, - max, - } - } - - fn next(&self) -> usize { - match *self { - ReadStrategy::Adaptive { next, .. } => next, - #[cfg(feature = "client")] - ReadStrategy::Exact(exact) => exact, - } - } - - fn max(&self) -> usize { - match *self { - ReadStrategy::Adaptive { max, .. } => max, - #[cfg(feature = "client")] - ReadStrategy::Exact(exact) => exact, - } - } - - fn record(&mut self, bytes_read: usize) { - match *self { - ReadStrategy::Adaptive { - ref mut decrease_now, - ref mut next, - max, - .. - } => { - if bytes_read >= *next { - *next = cmp::min(incr_power_of_two(*next), max); - *decrease_now = false; - } else { - let decr_to = prev_power_of_two(*next); - if bytes_read < decr_to { - if *decrease_now { - *next = cmp::max(decr_to, INIT_BUFFER_SIZE); - *decrease_now = false; - } else { - // Decreasing is a two "record" process. - *decrease_now = true; - } - } else { - // A read within the current range should cancel - // a potential decrease, since we just saw proof - // that we still need this size. - *decrease_now = false; - } - } - } - #[cfg(feature = "client")] - ReadStrategy::Exact(_) => (), - } - } -} - -fn incr_power_of_two(n: usize) -> usize { - n.saturating_mul(2) -} - -fn prev_power_of_two(n: usize) -> usize { - // Only way this shift can underflow is if n is less than 4. - // (Which would means `usize::MAX >> 64` and underflowed!) - debug_assert!(n >= 4); - (::std::usize::MAX >> (n.leading_zeros() + 2)) + 1 -} - -impl Default for ReadStrategy { - fn default() -> ReadStrategy { - ReadStrategy::with_max(DEFAULT_MAX_BUFFER_SIZE) - } -} - -#[derive(Clone)] -pub(crate) struct Cursor { - bytes: T, - pos: usize, -} - -impl> Cursor { - #[inline] - pub(crate) fn new(bytes: T) -> Cursor { - Cursor { bytes, pos: 0 } - } -} - -impl Cursor> { - /// If we've advanced the position a bit in this cursor, and wish to - /// extend the underlying vector, we may wish to unshift the "read" bytes - /// off, and move everything else over. - fn maybe_unshift(&mut self, additional: usize) { - if self.pos == 0 { - // nothing to do - return; - } - - if self.bytes.capacity() - self.bytes.len() >= additional { - // there's room! - return; - } - - self.bytes.drain(0..self.pos); - self.pos = 0; - } - - fn reset(&mut self) { - self.pos = 0; - self.bytes.clear(); - } -} - -impl> fmt::Debug for Cursor { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Cursor") - .field("pos", &self.pos) - .field("len", &self.bytes.as_ref().len()) - .finish() - } -} - -impl> Buf for Cursor { - #[inline] - fn remaining(&self) -> usize { - self.bytes.as_ref().len() - self.pos - } - - #[inline] - fn chunk(&self) -> &[u8] { - &self.bytes.as_ref()[self.pos..] - } - - #[inline] - fn advance(&mut self, cnt: usize) { - debug_assert!(self.pos + cnt <= self.bytes.as_ref().len()); - self.pos += cnt; - } -} - -// an internal buffer to collect writes before flushes -pub(super) struct WriteBuf { - /// Re-usable buffer that holds message headers - headers: Cursor>, - max_buf_size: usize, - /// Deque of user buffers if strategy is Queue - queue: BufList, - strategy: WriteStrategy, -} - -impl WriteBuf { - fn new(strategy: WriteStrategy) -> WriteBuf { - WriteBuf { - headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)), - max_buf_size: DEFAULT_MAX_BUFFER_SIZE, - queue: BufList::new(), - strategy, - } - } -} - -impl WriteBuf -where - B: Buf, -{ - fn set_strategy(&mut self, strategy: WriteStrategy) { - self.strategy = strategy; - } - - pub(super) fn buffer>(&mut self, mut buf: BB) { - debug_assert!(buf.has_remaining()); - match self.strategy { - WriteStrategy::Flatten => { - let head = self.headers_mut(); - - head.maybe_unshift(buf.remaining()); - trace!( - self.len = head.remaining(), - buf.len = buf.remaining(), - "buffer.flatten" - ); - //perf: This is a little faster than >::put, - //but accomplishes the same result. - loop { - let adv = { - let slice = buf.chunk(); - if slice.is_empty() { - return; - } - head.bytes.extend_from_slice(slice); - slice.len() - }; - buf.advance(adv); - } - } - WriteStrategy::Queue => { - trace!( - self.len = self.remaining(), - buf.len = buf.remaining(), - "buffer.queue" - ); - self.queue.push(buf.into()); - } - } - } - - fn can_buffer(&self) -> bool { - match self.strategy { - WriteStrategy::Flatten => self.remaining() < self.max_buf_size, - WriteStrategy::Queue => { - self.queue.bufs_cnt() < MAX_BUF_LIST_BUFFERS && self.remaining() < self.max_buf_size - } - } - } - - fn headers_mut(&mut self) -> &mut Cursor> { - debug_assert!(!self.queue.has_remaining()); - &mut self.headers - } -} - -impl fmt::Debug for WriteBuf { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("WriteBuf") - .field("remaining", &self.remaining()) - .field("strategy", &self.strategy) - .finish() - } -} - -impl Buf for WriteBuf { - #[inline] - fn remaining(&self) -> usize { - self.headers.remaining() + self.queue.remaining() - } - - #[inline] - fn chunk(&self) -> &[u8] { - let headers = self.headers.chunk(); - if !headers.is_empty() { - headers - } else { - self.queue.chunk() - } - } - - #[inline] - fn advance(&mut self, cnt: usize) { - let hrem = self.headers.remaining(); - - match hrem.cmp(&cnt) { - cmp::Ordering::Equal => self.headers.reset(), - cmp::Ordering::Greater => self.headers.advance(cnt), - cmp::Ordering::Less => { - let qcnt = cnt - hrem; - self.headers.reset(); - self.queue.advance(qcnt); - } - } - } - - #[inline] - fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize { - let n = self.headers.chunks_vectored(dst); - self.queue.chunks_vectored(&mut dst[n..]) + n - } -} - -#[derive(Debug)] -enum WriteStrategy { - Flatten, - Queue, -} - -#[cfg(test)] -mod tests { - use super::*; - use std::time::Duration; - - use tokio_test::io::Builder as Mock; - - // #[cfg(feature = "nightly")] - // use test::Bencher; - - /* - impl MemRead for AsyncIo { - fn read_mem(&mut self, len: usize) -> Poll { - let mut v = vec![0; len]; - let n = try_nb!(self.read(v.as_mut_slice())); - Ok(Async::Ready(BytesMut::from(&v[..n]).freeze())) - } - } - */ - - #[tokio::test] - #[ignore] - async fn iobuf_write_empty_slice() { - // TODO(eliza): can i have writev back pls T_T - // // First, let's just check that the Mock would normally return an - // // error on an unexpected write, even if the buffer is empty... - // let mut mock = Mock::new().build(); - // futures_util::future::poll_fn(|cx| { - // Pin::new(&mut mock).poll_write_buf(cx, &mut Cursor::new(&[])) - // }) - // .await - // .expect_err("should be a broken pipe"); - - // // underlying io will return the logic error upon write, - // // so we are testing that the io_buf does not trigger a write - // // when there is nothing to flush - // let mock = Mock::new().build(); - // let mut io_buf = Buffered::<_, Cursor>>::new(mock); - // io_buf.flush().await.expect("should short-circuit flush"); - } - - #[tokio::test] - async fn parse_reads_until_blocked() { - use crate::proto::h1::ClientTransaction; - - let _ = pretty_env_logger::try_init(); - let mock = Mock::new() - // Split over multiple reads will read all of it - .read(b"HTTP/1.1 200 OK\r\n") - .read(b"Server: hyper\r\n") - // missing last line ending - .wait(Duration::from_secs(1)) - .build(); - - let mut buffered = Buffered::<_, Cursor>>::new(mock); - - // We expect a `parse` to be not ready, and so can't await it directly. - // Rather, this `poll_fn` will wrap the `Poll` result. - futures_util::future::poll_fn(|cx| { - let parse_ctx = ParseContext { - cached_headers: &mut None, - req_method: &mut None, - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }; - assert!(buffered - .parse::(cx, parse_ctx) - .is_pending()); - Poll::Ready(()) - }) - .await; - - assert_eq!( - buffered.read_buf, - b"HTTP/1.1 200 OK\r\nServer: hyper\r\n"[..] - ); - } - - #[test] - fn read_strategy_adaptive_increments() { - let mut strategy = ReadStrategy::default(); - assert_eq!(strategy.next(), 8192); - - // Grows if record == next - strategy.record(8192); - assert_eq!(strategy.next(), 16384); - - strategy.record(16384); - assert_eq!(strategy.next(), 32768); - - // Enormous records still increment at same rate - strategy.record(::std::usize::MAX); - assert_eq!(strategy.next(), 65536); - - let max = strategy.max(); - while strategy.next() < max { - strategy.record(max); - } - - assert_eq!(strategy.next(), max, "never goes over max"); - strategy.record(max + 1); - assert_eq!(strategy.next(), max, "never goes over max"); - } - - #[test] - fn read_strategy_adaptive_decrements() { - let mut strategy = ReadStrategy::default(); - strategy.record(8192); - assert_eq!(strategy.next(), 16384); - - strategy.record(1); - assert_eq!( - strategy.next(), - 16384, - "first smaller record doesn't decrement yet" - ); - strategy.record(8192); - assert_eq!(strategy.next(), 16384, "record was with range"); - - strategy.record(1); - assert_eq!( - strategy.next(), - 16384, - "in-range record should make this the 'first' again" - ); - - strategy.record(1); - assert_eq!(strategy.next(), 8192, "second smaller record decrements"); - - strategy.record(1); - assert_eq!(strategy.next(), 8192, "first doesn't decrement"); - strategy.record(1); - assert_eq!(strategy.next(), 8192, "doesn't decrement under minimum"); - } - - #[test] - fn read_strategy_adaptive_stays_the_same() { - let mut strategy = ReadStrategy::default(); - strategy.record(8192); - assert_eq!(strategy.next(), 16384); - - strategy.record(8193); - assert_eq!( - strategy.next(), - 16384, - "first smaller record doesn't decrement yet" - ); - - strategy.record(8193); - assert_eq!( - strategy.next(), - 16384, - "with current step does not decrement" - ); - } - - #[test] - fn read_strategy_adaptive_max_fuzz() { - fn fuzz(max: usize) { - let mut strategy = ReadStrategy::with_max(max); - while strategy.next() < max { - strategy.record(::std::usize::MAX); - } - let mut next = strategy.next(); - while next > 8192 { - strategy.record(1); - strategy.record(1); - next = strategy.next(); - assert!( - next.is_power_of_two(), - "decrement should be powers of two: {} (max = {})", - next, - max, - ); - } - } - - let mut max = 8192; - while max < std::usize::MAX { - fuzz(max); - max = (max / 2).saturating_mul(3); - } - fuzz(::std::usize::MAX); - } - - #[test] - #[should_panic] - #[cfg(debug_assertions)] // needs to trigger a debug_assert - fn write_buf_requires_non_empty_bufs() { - let mock = Mock::new().build(); - let mut buffered = Buffered::<_, Cursor>>::new(mock); - - buffered.buffer(Cursor::new(Vec::new())); - } - - /* - TODO: needs tokio_test::io to allow configure write_buf calls - #[test] - fn write_buf_queue() { - let _ = pretty_env_logger::try_init(); - - let mock = AsyncIo::new_buf(vec![], 1024); - let mut buffered = Buffered::<_, Cursor>>::new(mock); - - - buffered.headers_buf().extend(b"hello "); - buffered.buffer(Cursor::new(b"world, ".to_vec())); - buffered.buffer(Cursor::new(b"it's ".to_vec())); - buffered.buffer(Cursor::new(b"hyper!".to_vec())); - assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3); - buffered.flush().unwrap(); - - assert_eq!(buffered.io, b"hello world, it's hyper!"); - assert_eq!(buffered.io.num_writes(), 1); - assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0); - } - */ - - #[tokio::test] - async fn write_buf_flatten() { - let _ = pretty_env_logger::try_init(); - - let mock = Mock::new().write(b"hello world, it's hyper!").build(); - - let mut buffered = Buffered::<_, Cursor>>::new(mock); - buffered.write_buf.set_strategy(WriteStrategy::Flatten); - - buffered.headers_buf().extend(b"hello "); - buffered.buffer(Cursor::new(b"world, ".to_vec())); - buffered.buffer(Cursor::new(b"it's ".to_vec())); - buffered.buffer(Cursor::new(b"hyper!".to_vec())); - assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0); - - buffered.flush().await.expect("flush"); - } - - #[test] - fn write_buf_flatten_partially_flushed() { - let _ = pretty_env_logger::try_init(); - - let b = |s: &str| Cursor::new(s.as_bytes().to_vec()); - - let mut write_buf = WriteBuf::>>::new(WriteStrategy::Flatten); - - write_buf.buffer(b("hello ")); - write_buf.buffer(b("world, ")); - - assert_eq!(write_buf.chunk(), b"hello world, "); - - // advance most of the way, but not all - write_buf.advance(11); - - assert_eq!(write_buf.chunk(), b", "); - assert_eq!(write_buf.headers.pos, 11); - assert_eq!(write_buf.headers.bytes.capacity(), INIT_BUFFER_SIZE); - - // there's still room in the headers buffer, so just push on the end - write_buf.buffer(b("it's hyper!")); - - assert_eq!(write_buf.chunk(), b", it's hyper!"); - assert_eq!(write_buf.headers.pos, 11); - - let rem1 = write_buf.remaining(); - let cap = write_buf.headers.bytes.capacity(); - - // but when this would go over capacity, don't copy the old bytes - write_buf.buffer(Cursor::new(vec![b'X'; cap])); - assert_eq!(write_buf.remaining(), cap + rem1); - assert_eq!(write_buf.headers.pos, 0); - } - - #[tokio::test] - async fn write_buf_queue_disable_auto() { - let _ = pretty_env_logger::try_init(); - - let mock = Mock::new() - .write(b"hello ") - .write(b"world, ") - .write(b"it's ") - .write(b"hyper!") - .build(); - - let mut buffered = Buffered::<_, Cursor>>::new(mock); - buffered.write_buf.set_strategy(WriteStrategy::Queue); - - // we have 4 buffers, and vec IO disabled, but explicitly said - // don't try to auto detect (via setting strategy above) - - buffered.headers_buf().extend(b"hello "); - buffered.buffer(Cursor::new(b"world, ".to_vec())); - buffered.buffer(Cursor::new(b"it's ".to_vec())); - buffered.buffer(Cursor::new(b"hyper!".to_vec())); - assert_eq!(buffered.write_buf.queue.bufs_cnt(), 3); - - buffered.flush().await.expect("flush"); - - assert_eq!(buffered.write_buf.queue.bufs_cnt(), 0); - } - - // #[cfg(feature = "nightly")] - // #[bench] - // fn bench_write_buf_flatten_buffer_chunk(b: &mut Bencher) { - // let s = "Hello, World!"; - // b.bytes = s.len() as u64; - - // let mut write_buf = WriteBuf::::new(); - // write_buf.set_strategy(WriteStrategy::Flatten); - // b.iter(|| { - // let chunk = bytes::Bytes::from(s); - // write_buf.buffer(chunk); - // ::test::black_box(&write_buf); - // write_buf.headers.bytes.clear(); - // }) - // } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/mod.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/mod.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,122 +0,0 @@ -#[cfg(all(feature = "server", feature = "runtime"))] -use std::{pin::Pin, time::Duration}; - -use bytes::BytesMut; -use http::{HeaderMap, Method}; -use httparse::ParserConfig; -#[cfg(all(feature = "server", feature = "runtime"))] -use tokio::time::Sleep; - -use crate::body::DecodedLength; -use crate::proto::{BodyLength, MessageHead}; - -pub(crate) use self::conn::Conn; -pub(crate) use self::decode::Decoder; -pub(crate) use self::dispatch::Dispatcher; -pub(crate) use self::encode::{EncodedBuf, Encoder}; -//TODO: move out of h1::io -pub(crate) use self::io::MINIMUM_MAX_BUFFER_SIZE; - -mod conn; -mod decode; -pub(crate) mod dispatch; -mod encode; -mod io; -mod role; - -cfg_client! { - pub(crate) type ClientTransaction = role::Client; -} - -cfg_server! { - pub(crate) type ServerTransaction = role::Server; -} - -pub(crate) trait Http1Transaction { - type Incoming; - type Outgoing: Default; - const LOG: &'static str; - fn parse(bytes: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult; - fn encode(enc: Encode<'_, Self::Outgoing>, dst: &mut Vec) -> crate::Result; - - fn on_error(err: &crate::Error) -> Option>; - - fn is_client() -> bool { - !Self::is_server() - } - - fn is_server() -> bool { - !Self::is_client() - } - - fn should_error_on_parse_eof() -> bool { - Self::is_client() - } - - fn should_read_first() -> bool { - Self::is_server() - } - - fn update_date() {} -} - -/// Result newtype for Http1Transaction::parse. -pub(crate) type ParseResult = Result>, crate::error::Parse>; - -#[derive(Debug)] -pub(crate) struct ParsedMessage { - head: MessageHead, - decode: DecodedLength, - expect_continue: bool, - keep_alive: bool, - wants_upgrade: bool, -} - -pub(crate) struct ParseContext<'a> { - cached_headers: &'a mut Option, - req_method: &'a mut Option, - h1_parser_config: ParserConfig, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout: Option, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_fut: &'a mut Option>>, - #[cfg(all(feature = "server", feature = "runtime"))] - h1_header_read_timeout_running: &'a mut bool, - preserve_header_case: bool, - #[cfg(feature = "ffi")] - preserve_header_order: bool, - h09_responses: bool, - #[cfg(feature = "ffi")] - on_informational: &'a mut Option, - #[cfg(feature = "ffi")] - raw_headers: bool, -} - -/// Passed to Http1Transaction::encode -pub(crate) struct Encode<'a, T> { - head: &'a mut MessageHead, - body: Option, - #[cfg(feature = "server")] - keep_alive: bool, - req_method: &'a mut Option, - title_case_headers: bool, -} - -/// Extra flags that a request "wants", like expect-continue or upgrades. -#[derive(Clone, Copy, Debug)] -struct Wants(u8); - -impl Wants { - const EMPTY: Wants = Wants(0b00); - const EXPECT: Wants = Wants(0b01); - const UPGRADE: Wants = Wants(0b10); - - #[must_use] - fn add(self, other: Wants) -> Wants { - Wants(self.0 | other.0) - } - - fn contains(&self, other: Wants) -> bool { - (self.0 & other.0) == other.0 - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/role.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/role.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/h1/role.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/h1/role.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2847 +0,0 @@ -use std::fmt::{self, Write}; -use std::mem::MaybeUninit; - -use bytes::Bytes; -use bytes::BytesMut; -#[cfg(feature = "server")] -use http::header::ValueIter; -use http::header::{self, Entry, HeaderName, HeaderValue}; -use http::{HeaderMap, Method, StatusCode, Version}; -#[cfg(all(feature = "server", feature = "runtime"))] -use tokio::time::Instant; -use tracing::{debug, error, trace, trace_span, warn}; - -use crate::body::DecodedLength; -#[cfg(feature = "server")] -use crate::common::date; -use crate::error::Parse; -use crate::ext::HeaderCaseMap; -#[cfg(feature = "ffi")] -use crate::ext::OriginalHeaderOrder; -use crate::headers; -use crate::proto::h1::{ - Encode, Encoder, Http1Transaction, ParseContext, ParseResult, ParsedMessage, -}; -use crate::proto::{BodyLength, MessageHead, RequestHead, RequestLine}; - -const MAX_HEADERS: usize = 100; -const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific -#[cfg(feature = "server")] -const MAX_URI_LEN: usize = (u16::MAX - 1) as usize; - -macro_rules! header_name { - ($bytes:expr) => {{ - { - match HeaderName::from_bytes($bytes) { - Ok(name) => name, - Err(e) => maybe_panic!(e), - } - } - }}; -} - -macro_rules! header_value { - ($bytes:expr) => {{ - { - unsafe { HeaderValue::from_maybe_shared_unchecked($bytes) } - } - }}; -} - -macro_rules! maybe_panic { - ($($arg:tt)*) => ({ - let _err = ($($arg)*); - if cfg!(debug_assertions) { - panic!("{:?}", _err); - } else { - error!("Internal Hyper error, please report {:?}", _err); - return Err(Parse::Internal) - } - }) -} - -pub(super) fn parse_headers( - bytes: &mut BytesMut, - ctx: ParseContext<'_>, -) -> ParseResult -where - T: Http1Transaction, -{ - // If the buffer is empty, don't bother entering the span, it's just noise. - if bytes.is_empty() { - return Ok(None); - } - - let span = trace_span!("parse_headers"); - let _s = span.enter(); - - #[cfg(all(feature = "server", feature = "runtime"))] - if !*ctx.h1_header_read_timeout_running { - if let Some(h1_header_read_timeout) = ctx.h1_header_read_timeout { - let deadline = Instant::now() + h1_header_read_timeout; - *ctx.h1_header_read_timeout_running = true; - match ctx.h1_header_read_timeout_fut { - Some(h1_header_read_timeout_fut) => { - debug!("resetting h1 header read timeout timer"); - h1_header_read_timeout_fut.as_mut().reset(deadline); - } - None => { - debug!("setting h1 header read timeout timer"); - *ctx.h1_header_read_timeout_fut = - Some(Box::pin(tokio::time::sleep_until(deadline))); - } - } - } - } - - T::parse(bytes, ctx) -} - -pub(super) fn encode_headers( - enc: Encode<'_, T::Outgoing>, - dst: &mut Vec, -) -> crate::Result -where - T: Http1Transaction, -{ - let span = trace_span!("encode_headers"); - let _s = span.enter(); - T::encode(enc, dst) -} - -// There are 2 main roles, Client and Server. - -#[cfg(feature = "client")] -pub(crate) enum Client {} - -#[cfg(feature = "server")] -pub(crate) enum Server {} - -#[cfg(feature = "server")] -impl Http1Transaction for Server { - type Incoming = RequestLine; - type Outgoing = StatusCode; - const LOG: &'static str = "{role=server}"; - - fn parse(buf: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult { - debug_assert!(!buf.is_empty(), "parse called with empty buf"); - - let mut keep_alive; - let is_http_11; - let subject; - let version; - let len; - let headers_len; - - // Unsafe: both headers_indices and headers are using uninitialized memory, - // but we *never* read any of it until after httparse has assigned - // values into it. By not zeroing out the stack memory, this saves - // a good ~5% on pipeline benchmarks. - let mut headers_indices: [MaybeUninit; MAX_HEADERS] = unsafe { - // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit - MaybeUninit::uninit().assume_init() - }; - { - /* SAFETY: it is safe to go from MaybeUninit array to array of MaybeUninit */ - let mut headers: [MaybeUninit>; MAX_HEADERS] = - unsafe { MaybeUninit::uninit().assume_init() }; - trace!(bytes = buf.len(), "Request.parse"); - let mut req = httparse::Request::new(&mut []); - let bytes = buf.as_ref(); - match req.parse_with_uninit_headers(bytes, &mut headers) { - Ok(httparse::Status::Complete(parsed_len)) => { - trace!("Request.parse Complete({})", parsed_len); - len = parsed_len; - let uri = req.path.unwrap(); - if uri.len() > MAX_URI_LEN { - return Err(Parse::UriTooLong); - } - subject = RequestLine( - Method::from_bytes(req.method.unwrap().as_bytes())?, - uri.parse()?, - ); - version = if req.version.unwrap() == 1 { - keep_alive = true; - is_http_11 = true; - Version::HTTP_11 - } else { - keep_alive = false; - is_http_11 = false; - Version::HTTP_10 - }; - - record_header_indices(bytes, &req.headers, &mut headers_indices)?; - headers_len = req.headers.len(); - } - Ok(httparse::Status::Partial) => return Ok(None), - Err(err) => { - return Err(match err { - // if invalid Token, try to determine if for method or path - httparse::Error::Token => { - if req.method.is_none() { - Parse::Method - } else { - debug_assert!(req.path.is_none()); - Parse::Uri - } - } - other => other.into(), - }); - } - } - }; - - let slice = buf.split_to(len).freeze(); - - // According to https://tools.ietf.org/html/rfc7230#section-3.3.3 - // 1. (irrelevant to Request) - // 2. (irrelevant to Request) - // 3. Transfer-Encoding: chunked has a chunked body. - // 4. If multiple differing Content-Length headers or invalid, close connection. - // 5. Content-Length header has a sized body. - // 6. Length 0. - // 7. (irrelevant to Request) - - let mut decoder = DecodedLength::ZERO; - let mut expect_continue = false; - let mut con_len = None; - let mut is_te = false; - let mut is_te_chunked = false; - let mut wants_upgrade = subject.0 == Method::CONNECT; - - let mut header_case_map = if ctx.preserve_header_case { - Some(HeaderCaseMap::default()) - } else { - None - }; - - #[cfg(feature = "ffi")] - let mut header_order = if ctx.preserve_header_order { - Some(OriginalHeaderOrder::default()) - } else { - None - }; - - let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new); - - headers.reserve(headers_len); - - for header in &headers_indices[..headers_len] { - // SAFETY: array is valid up to `headers_len` - let header = unsafe { &*header.as_ptr() }; - let name = header_name!(&slice[header.name.0..header.name.1]); - let value = header_value!(slice.slice(header.value.0..header.value.1)); - - match name { - header::TRANSFER_ENCODING => { - // https://tools.ietf.org/html/rfc7230#section-3.3.3 - // If Transfer-Encoding header is present, and 'chunked' is - // not the final encoding, and this is a Request, then it is - // malformed. A server should respond with 400 Bad Request. - if !is_http_11 { - debug!("HTTP/1.0 cannot have Transfer-Encoding header"); - return Err(Parse::transfer_encoding_unexpected()); - } - is_te = true; - if headers::is_chunked_(&value) { - is_te_chunked = true; - decoder = DecodedLength::CHUNKED; - } else { - is_te_chunked = false; - } - } - header::CONTENT_LENGTH => { - if is_te { - continue; - } - let len = headers::content_length_parse(&value) - .ok_or_else(Parse::content_length_invalid)?; - if let Some(prev) = con_len { - if prev != len { - debug!( - "multiple Content-Length headers with different values: [{}, {}]", - prev, len, - ); - return Err(Parse::content_length_invalid()); - } - // we don't need to append this secondary length - continue; - } - decoder = DecodedLength::checked_new(len)?; - con_len = Some(len); - } - header::CONNECTION => { - // keep_alive was previously set to default for Version - if keep_alive { - // HTTP/1.1 - keep_alive = !headers::connection_close(&value); - } else { - // HTTP/1.0 - keep_alive = headers::connection_keep_alive(&value); - } - } - header::EXPECT => { - // According to https://datatracker.ietf.org/doc/html/rfc2616#section-14.20 - // Comparison of expectation values is case-insensitive for unquoted tokens - // (including the 100-continue token) - expect_continue = value.as_bytes().eq_ignore_ascii_case(b"100-continue"); - } - header::UPGRADE => { - // Upgrades are only allowed with HTTP/1.1 - wants_upgrade = is_http_11; - } - - _ => (), - } - - if let Some(ref mut header_case_map) = header_case_map { - header_case_map.append(&name, slice.slice(header.name.0..header.name.1)); - } - - #[cfg(feature = "ffi")] - if let Some(ref mut header_order) = header_order { - header_order.append(&name); - } - - headers.append(name, value); - } - - if is_te && !is_te_chunked { - debug!("request with transfer-encoding header, but not chunked, bad request"); - return Err(Parse::transfer_encoding_invalid()); - } - - let mut extensions = http::Extensions::default(); - - if let Some(header_case_map) = header_case_map { - extensions.insert(header_case_map); - } - - #[cfg(feature = "ffi")] - if let Some(header_order) = header_order { - extensions.insert(header_order); - } - - *ctx.req_method = Some(subject.0.clone()); - - Ok(Some(ParsedMessage { - head: MessageHead { - version, - subject, - headers, - extensions, - }, - decode: decoder, - expect_continue, - keep_alive, - wants_upgrade, - })) - } - - fn encode(mut msg: Encode<'_, Self::Outgoing>, dst: &mut Vec) -> crate::Result { - trace!( - "Server::encode status={:?}, body={:?}, req_method={:?}", - msg.head.subject, - msg.body, - msg.req_method - ); - - let mut wrote_len = false; - - // hyper currently doesn't support returning 1xx status codes as a Response - // This is because Service only allows returning a single Response, and - // so if you try to reply with a e.g. 100 Continue, you have no way of - // replying with the latter status code response. - let (ret, is_last) = if msg.head.subject == StatusCode::SWITCHING_PROTOCOLS { - (Ok(()), true) - } else if msg.req_method == &Some(Method::CONNECT) && msg.head.subject.is_success() { - // Sending content-length or transfer-encoding header on 2xx response - // to CONNECT is forbidden in RFC 7231. - wrote_len = true; - (Ok(()), true) - } else if msg.head.subject.is_informational() { - warn!("response with 1xx status code not supported"); - *msg.head = MessageHead::default(); - msg.head.subject = StatusCode::INTERNAL_SERVER_ERROR; - msg.body = None; - (Err(crate::Error::new_user_unsupported_status_code()), true) - } else { - (Ok(()), !msg.keep_alive) - }; - - // In some error cases, we don't know about the invalid message until already - // pushing some bytes onto the `dst`. In those cases, we don't want to send - // the half-pushed message, so rewind to before. - let orig_len = dst.len(); - - let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE; - dst.reserve(init_cap); - - let custom_reason_phrase = msg.head.extensions.get::(); - - if msg.head.version == Version::HTTP_11 - && msg.head.subject == StatusCode::OK - && custom_reason_phrase.is_none() - { - extend(dst, b"HTTP/1.1 200 OK\r\n"); - } else { - match msg.head.version { - Version::HTTP_10 => extend(dst, b"HTTP/1.0 "), - Version::HTTP_11 => extend(dst, b"HTTP/1.1 "), - Version::HTTP_2 => { - debug!("response with HTTP2 version coerced to HTTP/1.1"); - extend(dst, b"HTTP/1.1 "); - } - other => panic!("unexpected response version: {:?}", other), - } - - extend(dst, msg.head.subject.as_str().as_bytes()); - extend(dst, b" "); - - if let Some(reason) = custom_reason_phrase { - extend(dst, reason.as_bytes()); - } else { - // a reason MUST be written, as many parsers will expect it. - extend( - dst, - msg.head - .subject - .canonical_reason() - .unwrap_or("") - .as_bytes(), - ); - } - - extend(dst, b"\r\n"); - } - - let orig_headers; - let extensions = std::mem::take(&mut msg.head.extensions); - let orig_headers = match extensions.get::() { - None if msg.title_case_headers => { - orig_headers = HeaderCaseMap::default(); - Some(&orig_headers) - } - orig_headers => orig_headers, - }; - let encoder = if let Some(orig_headers) = orig_headers { - Self::encode_headers_with_original_case( - msg, - dst, - is_last, - orig_len, - wrote_len, - orig_headers, - )? - } else { - Self::encode_headers_with_lower_case(msg, dst, is_last, orig_len, wrote_len)? - }; - - ret.map(|()| encoder) - } - - fn on_error(err: &crate::Error) -> Option> { - use crate::error::Kind; - let status = match *err.kind() { - Kind::Parse(Parse::Method) - | Kind::Parse(Parse::Header(_)) - | Kind::Parse(Parse::Uri) - | Kind::Parse(Parse::Version) => StatusCode::BAD_REQUEST, - Kind::Parse(Parse::TooLarge) => StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE, - Kind::Parse(Parse::UriTooLong) => StatusCode::URI_TOO_LONG, - _ => return None, - }; - - debug!("sending automatic response ({}) for parse error", status); - let mut msg = MessageHead::default(); - msg.subject = status; - Some(msg) - } - - fn is_server() -> bool { - true - } - - fn update_date() { - date::update(); - } -} - -#[cfg(feature = "server")] -impl Server { - fn can_have_body(method: &Option, status: StatusCode) -> bool { - Server::can_chunked(method, status) - } - - fn can_chunked(method: &Option, status: StatusCode) -> bool { - if method == &Some(Method::HEAD) || method == &Some(Method::CONNECT) && status.is_success() - { - false - } else if status.is_informational() { - false - } else { - match status { - StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, - _ => true, - } - } - } - - fn can_have_content_length(method: &Option, status: StatusCode) -> bool { - if status.is_informational() || method == &Some(Method::CONNECT) && status.is_success() { - false - } else { - match status { - StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false, - _ => true, - } - } - } - - fn can_have_implicit_zero_content_length(method: &Option, status: StatusCode) -> bool { - Server::can_have_content_length(method, status) && method != &Some(Method::HEAD) - } - - fn encode_headers_with_lower_case( - msg: Encode<'_, StatusCode>, - dst: &mut Vec, - is_last: bool, - orig_len: usize, - wrote_len: bool, - ) -> crate::Result { - struct LowercaseWriter; - - impl HeaderNameWriter for LowercaseWriter { - #[inline] - fn write_full_header_line( - &mut self, - dst: &mut Vec, - line: &str, - _: (HeaderName, &str), - ) { - extend(dst, line.as_bytes()) - } - - #[inline] - fn write_header_name_with_colon( - &mut self, - dst: &mut Vec, - name_with_colon: &str, - _: HeaderName, - ) { - extend(dst, name_with_colon.as_bytes()) - } - - #[inline] - fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName) { - extend(dst, name.as_str().as_bytes()) - } - } - - Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, LowercaseWriter) - } - - #[cold] - #[inline(never)] - fn encode_headers_with_original_case( - msg: Encode<'_, StatusCode>, - dst: &mut Vec, - is_last: bool, - orig_len: usize, - wrote_len: bool, - orig_headers: &HeaderCaseMap, - ) -> crate::Result { - struct OrigCaseWriter<'map> { - map: &'map HeaderCaseMap, - current: Option<(HeaderName, ValueIter<'map, Bytes>)>, - title_case_headers: bool, - } - - impl HeaderNameWriter for OrigCaseWriter<'_> { - #[inline] - fn write_full_header_line( - &mut self, - dst: &mut Vec, - _: &str, - (name, rest): (HeaderName, &str), - ) { - self.write_header_name(dst, &name); - extend(dst, rest.as_bytes()); - } - - #[inline] - fn write_header_name_with_colon( - &mut self, - dst: &mut Vec, - _: &str, - name: HeaderName, - ) { - self.write_header_name(dst, &name); - extend(dst, b": "); - } - - #[inline] - fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName) { - let Self { - map, - ref mut current, - title_case_headers, - } = *self; - if current.as_ref().map_or(true, |(last, _)| last != name) { - *current = None; - } - let (_, values) = - current.get_or_insert_with(|| (name.clone(), map.get_all_internal(name))); - - if let Some(orig_name) = values.next() { - extend(dst, orig_name); - } else if title_case_headers { - title_case(dst, name.as_str().as_bytes()); - } else { - extend(dst, name.as_str().as_bytes()); - } - } - } - - let header_name_writer = OrigCaseWriter { - map: orig_headers, - current: None, - title_case_headers: msg.title_case_headers, - }; - - Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, header_name_writer) - } - - #[inline] - fn encode_headers( - msg: Encode<'_, StatusCode>, - dst: &mut Vec, - mut is_last: bool, - orig_len: usize, - mut wrote_len: bool, - mut header_name_writer: W, - ) -> crate::Result - where - W: HeaderNameWriter, - { - // In some error cases, we don't know about the invalid message until already - // pushing some bytes onto the `dst`. In those cases, we don't want to send - // the half-pushed message, so rewind to before. - let rewind = |dst: &mut Vec| { - dst.truncate(orig_len); - }; - - let mut encoder = Encoder::length(0); - let mut wrote_date = false; - let mut cur_name = None; - let mut is_name_written = false; - let mut must_write_chunked = false; - let mut prev_con_len = None; - - macro_rules! handle_is_name_written { - () => {{ - if is_name_written { - // we need to clean up and write the newline - debug_assert_ne!( - &dst[dst.len() - 2..], - b"\r\n", - "previous header wrote newline but set is_name_written" - ); - - if must_write_chunked { - extend(dst, b", chunked\r\n"); - } else { - extend(dst, b"\r\n"); - } - } - }}; - } - - 'headers: for (opt_name, value) in msg.head.headers.drain() { - if let Some(n) = opt_name { - cur_name = Some(n); - handle_is_name_written!(); - is_name_written = false; - } - let name = cur_name.as_ref().expect("current header name"); - match *name { - header::CONTENT_LENGTH => { - if wrote_len && !is_name_written { - warn!("unexpected content-length found, canceling"); - rewind(dst); - return Err(crate::Error::new_user_header()); - } - match msg.body { - Some(BodyLength::Known(known_len)) => { - // The HttpBody claims to know a length, and - // the headers are already set. For performance - // reasons, we are just going to trust that - // the values match. - // - // In debug builds, we'll assert they are the - // same to help developers find bugs. - #[cfg(debug_assertions)] - { - if let Some(len) = headers::content_length_parse(&value) { - assert!( - len == known_len, - "payload claims content-length of {}, custom content-length header claims {}", - known_len, - len, - ); - } - } - - if !is_name_written { - encoder = Encoder::length(known_len); - header_name_writer.write_header_name_with_colon( - dst, - "content-length: ", - header::CONTENT_LENGTH, - ); - extend(dst, value.as_bytes()); - wrote_len = true; - is_name_written = true; - } - continue 'headers; - } - Some(BodyLength::Unknown) => { - // The HttpBody impl didn't know how long the - // body is, but a length header was included. - // We have to parse the value to return our - // Encoder... - - if let Some(len) = headers::content_length_parse(&value) { - if let Some(prev) = prev_con_len { - if prev != len { - warn!( - "multiple Content-Length values found: [{}, {}]", - prev, len - ); - rewind(dst); - return Err(crate::Error::new_user_header()); - } - debug_assert!(is_name_written); - continue 'headers; - } else { - // we haven't written content-length yet! - encoder = Encoder::length(len); - header_name_writer.write_header_name_with_colon( - dst, - "content-length: ", - header::CONTENT_LENGTH, - ); - extend(dst, value.as_bytes()); - wrote_len = true; - is_name_written = true; - prev_con_len = Some(len); - continue 'headers; - } - } else { - warn!("illegal Content-Length value: {:?}", value); - rewind(dst); - return Err(crate::Error::new_user_header()); - } - } - None => { - // We have no body to actually send, - // but the headers claim a content-length. - // There's only 2 ways this makes sense: - // - // - The header says the length is `0`. - // - This is a response to a `HEAD` request. - if msg.req_method == &Some(Method::HEAD) { - debug_assert_eq!(encoder, Encoder::length(0)); - } else { - if value.as_bytes() != b"0" { - warn!( - "content-length value found, but empty body provided: {:?}", - value - ); - } - continue 'headers; - } - } - } - wrote_len = true; - } - header::TRANSFER_ENCODING => { - if wrote_len && !is_name_written { - warn!("unexpected transfer-encoding found, canceling"); - rewind(dst); - return Err(crate::Error::new_user_header()); - } - // check that we actually can send a chunked body... - if msg.head.version == Version::HTTP_10 - || !Server::can_chunked(msg.req_method, msg.head.subject) - { - continue; - } - wrote_len = true; - // Must check each value, because `chunked` needs to be the - // last encoding, or else we add it. - must_write_chunked = !headers::is_chunked_(&value); - - if !is_name_written { - encoder = Encoder::chunked(); - is_name_written = true; - header_name_writer.write_header_name_with_colon( - dst, - "transfer-encoding: ", - header::TRANSFER_ENCODING, - ); - extend(dst, value.as_bytes()); - } else { - extend(dst, b", "); - extend(dst, value.as_bytes()); - } - continue 'headers; - } - header::CONNECTION => { - if !is_last && headers::connection_close(&value) { - is_last = true; - } - if !is_name_written { - is_name_written = true; - header_name_writer.write_header_name_with_colon( - dst, - "connection: ", - header::CONNECTION, - ); - extend(dst, value.as_bytes()); - } else { - extend(dst, b", "); - extend(dst, value.as_bytes()); - } - continue 'headers; - } - header::DATE => { - wrote_date = true; - } - _ => (), - } - //TODO: this should perhaps instead combine them into - //single lines, as RFC7230 suggests is preferable. - - // non-special write Name and Value - debug_assert!( - !is_name_written, - "{:?} set is_name_written and didn't continue loop", - name, - ); - header_name_writer.write_header_name(dst, name); - extend(dst, b": "); - extend(dst, value.as_bytes()); - extend(dst, b"\r\n"); - } - - handle_is_name_written!(); - - if !wrote_len { - encoder = match msg.body { - Some(BodyLength::Unknown) => { - if msg.head.version == Version::HTTP_10 - || !Server::can_chunked(msg.req_method, msg.head.subject) - { - Encoder::close_delimited() - } else { - header_name_writer.write_full_header_line( - dst, - "transfer-encoding: chunked\r\n", - (header::TRANSFER_ENCODING, ": chunked\r\n"), - ); - Encoder::chunked() - } - } - None | Some(BodyLength::Known(0)) => { - if Server::can_have_implicit_zero_content_length( - msg.req_method, - msg.head.subject, - ) { - header_name_writer.write_full_header_line( - dst, - "content-length: 0\r\n", - (header::CONTENT_LENGTH, ": 0\r\n"), - ) - } - Encoder::length(0) - } - Some(BodyLength::Known(len)) => { - if !Server::can_have_content_length(msg.req_method, msg.head.subject) { - Encoder::length(0) - } else { - header_name_writer.write_header_name_with_colon( - dst, - "content-length: ", - header::CONTENT_LENGTH, - ); - extend(dst, ::itoa::Buffer::new().format(len).as_bytes()); - extend(dst, b"\r\n"); - Encoder::length(len) - } - } - }; - } - - if !Server::can_have_body(msg.req_method, msg.head.subject) { - trace!( - "server body forced to 0; method={:?}, status={:?}", - msg.req_method, - msg.head.subject - ); - encoder = Encoder::length(0); - } - - // cached date is much faster than formatting every request - if !wrote_date { - dst.reserve(date::DATE_VALUE_LENGTH + 8); - header_name_writer.write_header_name_with_colon(dst, "date: ", header::DATE); - date::extend(dst); - extend(dst, b"\r\n\r\n"); - } else { - extend(dst, b"\r\n"); - } - - Ok(encoder.set_last(is_last)) - } -} - -#[cfg(feature = "server")] -trait HeaderNameWriter { - fn write_full_header_line( - &mut self, - dst: &mut Vec, - line: &str, - name_value_pair: (HeaderName, &str), - ); - fn write_header_name_with_colon( - &mut self, - dst: &mut Vec, - name_with_colon: &str, - name: HeaderName, - ); - fn write_header_name(&mut self, dst: &mut Vec, name: &HeaderName); -} - -#[cfg(feature = "client")] -impl Http1Transaction for Client { - type Incoming = StatusCode; - type Outgoing = RequestLine; - const LOG: &'static str = "{role=client}"; - - fn parse(buf: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult { - debug_assert!(!buf.is_empty(), "parse called with empty buf"); - - // Loop to skip information status code headers (100 Continue, etc). - loop { - // Unsafe: see comment in Server Http1Transaction, above. - let mut headers_indices: [MaybeUninit; MAX_HEADERS] = unsafe { - // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit - MaybeUninit::uninit().assume_init() - }; - let (len, status, reason, version, headers_len) = { - // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit - let mut headers: [MaybeUninit>; MAX_HEADERS] = - unsafe { MaybeUninit::uninit().assume_init() }; - trace!(bytes = buf.len(), "Response.parse"); - let mut res = httparse::Response::new(&mut []); - let bytes = buf.as_ref(); - match ctx.h1_parser_config.parse_response_with_uninit_headers( - &mut res, - bytes, - &mut headers, - ) { - Ok(httparse::Status::Complete(len)) => { - trace!("Response.parse Complete({})", len); - let status = StatusCode::from_u16(res.code.unwrap())?; - - let reason = { - let reason = res.reason.unwrap(); - // Only save the reason phrase if it isn't the canonical reason - if Some(reason) != status.canonical_reason() { - Some(Bytes::copy_from_slice(reason.as_bytes())) - } else { - None - } - }; - - let version = if res.version.unwrap() == 1 { - Version::HTTP_11 - } else { - Version::HTTP_10 - }; - record_header_indices(bytes, &res.headers, &mut headers_indices)?; - let headers_len = res.headers.len(); - (len, status, reason, version, headers_len) - } - Ok(httparse::Status::Partial) => return Ok(None), - Err(httparse::Error::Version) if ctx.h09_responses => { - trace!("Response.parse accepted HTTP/0.9 response"); - - (0, StatusCode::OK, None, Version::HTTP_09, 0) - } - Err(e) => return Err(e.into()), - } - }; - - let mut slice = buf.split_to(len); - - if ctx - .h1_parser_config - .obsolete_multiline_headers_in_responses_are_allowed() - { - for header in &headers_indices[..headers_len] { - // SAFETY: array is valid up to `headers_len` - let header = unsafe { &*header.as_ptr() }; - for b in &mut slice[header.value.0..header.value.1] { - if *b == b'\r' || *b == b'\n' { - *b = b' '; - } - } - } - } - - let slice = slice.freeze(); - - let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new); - - let mut keep_alive = version == Version::HTTP_11; - - let mut header_case_map = if ctx.preserve_header_case { - Some(HeaderCaseMap::default()) - } else { - None - }; - - #[cfg(feature = "ffi")] - let mut header_order = if ctx.preserve_header_order { - Some(OriginalHeaderOrder::default()) - } else { - None - }; - - headers.reserve(headers_len); - for header in &headers_indices[..headers_len] { - // SAFETY: array is valid up to `headers_len` - let header = unsafe { &*header.as_ptr() }; - let name = header_name!(&slice[header.name.0..header.name.1]); - let value = header_value!(slice.slice(header.value.0..header.value.1)); - - if let header::CONNECTION = name { - // keep_alive was previously set to default for Version - if keep_alive { - // HTTP/1.1 - keep_alive = !headers::connection_close(&value); - } else { - // HTTP/1.0 - keep_alive = headers::connection_keep_alive(&value); - } - } - - if let Some(ref mut header_case_map) = header_case_map { - header_case_map.append(&name, slice.slice(header.name.0..header.name.1)); - } - - #[cfg(feature = "ffi")] - if let Some(ref mut header_order) = header_order { - header_order.append(&name); - } - - headers.append(name, value); - } - - let mut extensions = http::Extensions::default(); - - if let Some(header_case_map) = header_case_map { - extensions.insert(header_case_map); - } - - #[cfg(feature = "ffi")] - if let Some(header_order) = header_order { - extensions.insert(header_order); - } - - if let Some(reason) = reason { - // Safety: httparse ensures that only valid reason phrase bytes are present in this - // field. - let reason = unsafe { crate::ext::ReasonPhrase::from_bytes_unchecked(reason) }; - extensions.insert(reason); - } - - #[cfg(feature = "ffi")] - if ctx.raw_headers { - extensions.insert(crate::ffi::RawHeaders(crate::ffi::hyper_buf(slice))); - } - - let head = MessageHead { - version, - subject: status, - headers, - extensions, - }; - if let Some((decode, is_upgrade)) = Client::decoder(&head, ctx.req_method)? { - return Ok(Some(ParsedMessage { - head, - decode, - expect_continue: false, - // a client upgrade means the connection can't be used - // again, as it is definitely upgrading. - keep_alive: keep_alive && !is_upgrade, - wants_upgrade: is_upgrade, - })); - } - - #[cfg(feature = "ffi")] - if head.subject.is_informational() { - if let Some(callback) = ctx.on_informational { - callback.call(head.into_response(crate::Body::empty())); - } - } - - // Parsing a 1xx response could have consumed the buffer, check if - // it is empty now... - if buf.is_empty() { - return Ok(None); - } - } - } - - fn encode(msg: Encode<'_, Self::Outgoing>, dst: &mut Vec) -> crate::Result { - trace!( - "Client::encode method={:?}, body={:?}", - msg.head.subject.0, - msg.body - ); - - *msg.req_method = Some(msg.head.subject.0.clone()); - - let body = Client::set_length(msg.head, msg.body); - - let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE; - dst.reserve(init_cap); - - extend(dst, msg.head.subject.0.as_str().as_bytes()); - extend(dst, b" "); - //TODO: add API to http::Uri to encode without std::fmt - let _ = write!(FastWrite(dst), "{} ", msg.head.subject.1); - - match msg.head.version { - Version::HTTP_10 => extend(dst, b"HTTP/1.0"), - Version::HTTP_11 => extend(dst, b"HTTP/1.1"), - Version::HTTP_2 => { - debug!("request with HTTP2 version coerced to HTTP/1.1"); - extend(dst, b"HTTP/1.1"); - } - other => panic!("unexpected request version: {:?}", other), - } - extend(dst, b"\r\n"); - - if let Some(orig_headers) = msg.head.extensions.get::() { - write_headers_original_case( - &msg.head.headers, - orig_headers, - dst, - msg.title_case_headers, - ); - } else if msg.title_case_headers { - write_headers_title_case(&msg.head.headers, dst); - } else { - write_headers(&msg.head.headers, dst); - } - - extend(dst, b"\r\n"); - msg.head.headers.clear(); //TODO: remove when switching to drain() - - Ok(body) - } - - fn on_error(_err: &crate::Error) -> Option> { - // we can't tell the server about any errors it creates - None - } - - fn is_client() -> bool { - true - } -} - -#[cfg(feature = "client")] -impl Client { - /// Returns Some(length, wants_upgrade) if successful. - /// - /// Returns None if this message head should be skipped (like a 100 status). - fn decoder( - inc: &MessageHead, - method: &mut Option, - ) -> Result, Parse> { - // According to https://tools.ietf.org/html/rfc7230#section-3.3.3 - // 1. HEAD responses, and Status 1xx, 204, and 304 cannot have a body. - // 2. Status 2xx to a CONNECT cannot have a body. - // 3. Transfer-Encoding: chunked has a chunked body. - // 4. If multiple differing Content-Length headers or invalid, close connection. - // 5. Content-Length header has a sized body. - // 6. (irrelevant to Response) - // 7. Read till EOF. - - match inc.subject.as_u16() { - 101 => { - return Ok(Some((DecodedLength::ZERO, true))); - } - 100 | 102..=199 => { - trace!("ignoring informational response: {}", inc.subject.as_u16()); - return Ok(None); - } - 204 | 304 => return Ok(Some((DecodedLength::ZERO, false))), - _ => (), - } - match *method { - Some(Method::HEAD) => { - return Ok(Some((DecodedLength::ZERO, false))); - } - Some(Method::CONNECT) => { - if let 200..=299 = inc.subject.as_u16() { - return Ok(Some((DecodedLength::ZERO, true))); - } - } - Some(_) => {} - None => { - trace!("Client::decoder is missing the Method"); - } - } - - if inc.headers.contains_key(header::TRANSFER_ENCODING) { - // https://tools.ietf.org/html/rfc7230#section-3.3.3 - // If Transfer-Encoding header is present, and 'chunked' is - // not the final encoding, and this is a Request, then it is - // malformed. A server should respond with 400 Bad Request. - if inc.version == Version::HTTP_10 { - debug!("HTTP/1.0 cannot have Transfer-Encoding header"); - Err(Parse::transfer_encoding_unexpected()) - } else if headers::transfer_encoding_is_chunked(&inc.headers) { - Ok(Some((DecodedLength::CHUNKED, false))) - } else { - trace!("not chunked, read till eof"); - Ok(Some((DecodedLength::CLOSE_DELIMITED, false))) - } - } else if let Some(len) = headers::content_length_parse_all(&inc.headers) { - Ok(Some((DecodedLength::checked_new(len)?, false))) - } else if inc.headers.contains_key(header::CONTENT_LENGTH) { - debug!("illegal Content-Length header"); - Err(Parse::content_length_invalid()) - } else { - trace!("neither Transfer-Encoding nor Content-Length"); - Ok(Some((DecodedLength::CLOSE_DELIMITED, false))) - } - } - fn set_length(head: &mut RequestHead, body: Option) -> Encoder { - let body = if let Some(body) = body { - body - } else { - head.headers.remove(header::TRANSFER_ENCODING); - return Encoder::length(0); - }; - - // HTTP/1.0 doesn't know about chunked - let can_chunked = head.version == Version::HTTP_11; - let headers = &mut head.headers; - - // If the user already set specific headers, we should respect them, regardless - // of what the HttpBody knows about itself. They set them for a reason. - - // Because of the borrow checker, we can't check the for an existing - // Content-Length header while holding an `Entry` for the Transfer-Encoding - // header, so unfortunately, we must do the check here, first. - - let existing_con_len = headers::content_length_parse_all(headers); - let mut should_remove_con_len = false; - - if !can_chunked { - // Chunked isn't legal, so if it is set, we need to remove it. - if headers.remove(header::TRANSFER_ENCODING).is_some() { - trace!("removing illegal transfer-encoding header"); - } - - return if let Some(len) = existing_con_len { - Encoder::length(len) - } else if let BodyLength::Known(len) = body { - set_content_length(headers, len) - } else { - // HTTP/1.0 client requests without a content-length - // cannot have any body at all. - Encoder::length(0) - }; - } - - // If the user set a transfer-encoding, respect that. Let's just - // make sure `chunked` is the final encoding. - let encoder = match headers.entry(header::TRANSFER_ENCODING) { - Entry::Occupied(te) => { - should_remove_con_len = true; - if headers::is_chunked(te.iter()) { - Some(Encoder::chunked()) - } else { - warn!("user provided transfer-encoding does not end in 'chunked'"); - - // There's a Transfer-Encoding, but it doesn't end in 'chunked'! - // An example that could trigger this: - // - // Transfer-Encoding: gzip - // - // This can be bad, depending on if this is a request or a - // response. - // - // - A request is illegal if there is a `Transfer-Encoding` - // but it doesn't end in `chunked`. - // - A response that has `Transfer-Encoding` but doesn't - // end in `chunked` isn't illegal, it just forces this - // to be close-delimited. - // - // We can try to repair this, by adding `chunked` ourselves. - - headers::add_chunked(te); - Some(Encoder::chunked()) - } - } - Entry::Vacant(te) => { - if let Some(len) = existing_con_len { - Some(Encoder::length(len)) - } else if let BodyLength::Unknown = body { - // GET, HEAD, and CONNECT almost never have bodies. - // - // So instead of sending a "chunked" body with a 0-chunk, - // assume no body here. If you *must* send a body, - // set the headers explicitly. - match head.subject.0 { - Method::GET | Method::HEAD | Method::CONNECT => Some(Encoder::length(0)), - _ => { - te.insert(HeaderValue::from_static("chunked")); - Some(Encoder::chunked()) - } - } - } else { - None - } - } - }; - - // This is because we need a second mutable borrow to remove - // content-length header. - if let Some(encoder) = encoder { - if should_remove_con_len && existing_con_len.is_some() { - headers.remove(header::CONTENT_LENGTH); - } - return encoder; - } - - // User didn't set transfer-encoding, AND we know body length, - // so we can just set the Content-Length automatically. - - let len = if let BodyLength::Known(len) = body { - len - } else { - unreachable!("BodyLength::Unknown would set chunked"); - }; - - set_content_length(headers, len) - } -} - -fn set_content_length(headers: &mut HeaderMap, len: u64) -> Encoder { - // At this point, there should not be a valid Content-Length - // header. However, since we'll be indexing in anyways, we can - // warn the user if there was an existing illegal header. - // - // Or at least, we can in theory. It's actually a little bit slower, - // so perhaps only do that while the user is developing/testing. - - if cfg!(debug_assertions) { - match headers.entry(header::CONTENT_LENGTH) { - Entry::Occupied(mut cl) => { - // Internal sanity check, we should have already determined - // that the header was illegal before calling this function. - debug_assert!(headers::content_length_parse_all_values(cl.iter()).is_none()); - // Uh oh, the user set `Content-Length` headers, but set bad ones. - // This would be an illegal message anyways, so let's try to repair - // with our known good length. - error!("user provided content-length header was invalid"); - - cl.insert(HeaderValue::from(len)); - Encoder::length(len) - } - Entry::Vacant(cl) => { - cl.insert(HeaderValue::from(len)); - Encoder::length(len) - } - } - } else { - headers.insert(header::CONTENT_LENGTH, HeaderValue::from(len)); - Encoder::length(len) - } -} - -#[derive(Clone, Copy)] -struct HeaderIndices { - name: (usize, usize), - value: (usize, usize), -} - -fn record_header_indices( - bytes: &[u8], - headers: &[httparse::Header<'_>], - indices: &mut [MaybeUninit], -) -> Result<(), crate::error::Parse> { - let bytes_ptr = bytes.as_ptr() as usize; - - for (header, indices) in headers.iter().zip(indices.iter_mut()) { - if header.name.len() >= (1 << 16) { - debug!("header name larger than 64kb: {:?}", header.name); - return Err(crate::error::Parse::TooLarge); - } - let name_start = header.name.as_ptr() as usize - bytes_ptr; - let name_end = name_start + header.name.len(); - let value_start = header.value.as_ptr() as usize - bytes_ptr; - let value_end = value_start + header.value.len(); - - // FIXME(maybe_uninit_extra) - // FIXME(addr_of) - // Currently we don't have `ptr::addr_of_mut` in stable rust or - // MaybeUninit::write, so this is some way of assigning into a MaybeUninit - // safely - let new_header_indices = HeaderIndices { - name: (name_start, name_end), - value: (value_start, value_end), - }; - *indices = MaybeUninit::new(new_header_indices); - } - - Ok(()) -} - -// Write header names as title case. The header name is assumed to be ASCII. -fn title_case(dst: &mut Vec, name: &[u8]) { - dst.reserve(name.len()); - - // Ensure first character is uppercased - let mut prev = b'-'; - for &(mut c) in name { - if prev == b'-' { - c.make_ascii_uppercase(); - } - dst.push(c); - prev = c; - } -} - -fn write_headers_title_case(headers: &HeaderMap, dst: &mut Vec) { - for (name, value) in headers { - title_case(dst, name.as_str().as_bytes()); - extend(dst, b": "); - extend(dst, value.as_bytes()); - extend(dst, b"\r\n"); - } -} - -fn write_headers(headers: &HeaderMap, dst: &mut Vec) { - for (name, value) in headers { - extend(dst, name.as_str().as_bytes()); - extend(dst, b": "); - extend(dst, value.as_bytes()); - extend(dst, b"\r\n"); - } -} - -#[cold] -fn write_headers_original_case( - headers: &HeaderMap, - orig_case: &HeaderCaseMap, - dst: &mut Vec, - title_case_headers: bool, -) { - // For each header name/value pair, there may be a value in the casemap - // that corresponds to the HeaderValue. So, we iterator all the keys, - // and for each one, try to pair the originally cased name with the value. - // - // TODO: consider adding http::HeaderMap::entries() iterator - for name in headers.keys() { - let mut names = orig_case.get_all(name); - - for value in headers.get_all(name) { - if let Some(orig_name) = names.next() { - extend(dst, orig_name.as_ref()); - } else if title_case_headers { - title_case(dst, name.as_str().as_bytes()); - } else { - extend(dst, name.as_str().as_bytes()); - } - - // Wanted for curl test cases that send `X-Custom-Header:\r\n` - if value.is_empty() { - extend(dst, b":\r\n"); - } else { - extend(dst, b": "); - extend(dst, value.as_bytes()); - extend(dst, b"\r\n"); - } - } - } -} - -struct FastWrite<'a>(&'a mut Vec); - -impl<'a> fmt::Write for FastWrite<'a> { - #[inline] - fn write_str(&mut self, s: &str) -> fmt::Result { - extend(self.0, s.as_bytes()); - Ok(()) - } - - #[inline] - fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { - fmt::write(self, args) - } -} - -#[inline] -fn extend(dst: &mut Vec, data: &[u8]) { - dst.extend_from_slice(data); -} - -#[cfg(test)] -mod tests { - use bytes::BytesMut; - - use super::*; - - #[test] - fn test_parse_request() { - let _ = pretty_env_logger::try_init(); - let mut raw = BytesMut::from("GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n"); - let mut method = None; - let msg = Server::parse( - &mut raw, - ParseContext { - cached_headers: &mut None, - req_method: &mut method, - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }, - ) - .unwrap() - .unwrap(); - assert_eq!(raw.len(), 0); - assert_eq!(msg.head.subject.0, crate::Method::GET); - assert_eq!(msg.head.subject.1, "/echo"); - assert_eq!(msg.head.version, crate::Version::HTTP_11); - assert_eq!(msg.head.headers.len(), 1); - assert_eq!(msg.head.headers["Host"], "hyper.rs"); - assert_eq!(method, Some(crate::Method::GET)); - } - - #[test] - fn test_parse_response() { - let _ = pretty_env_logger::try_init(); - let mut raw = BytesMut::from("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"); - let ctx = ParseContext { - cached_headers: &mut None, - req_method: &mut Some(crate::Method::GET), - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }; - let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); - assert_eq!(raw.len(), 0); - assert_eq!(msg.head.subject, crate::StatusCode::OK); - assert_eq!(msg.head.version, crate::Version::HTTP_11); - assert_eq!(msg.head.headers.len(), 1); - assert_eq!(msg.head.headers["Content-Length"], "0"); - } - - #[test] - fn test_parse_request_errors() { - let mut raw = BytesMut::from("GET htt:p// HTTP/1.1\r\nHost: hyper.rs\r\n\r\n"); - let ctx = ParseContext { - cached_headers: &mut None, - req_method: &mut None, - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }; - Server::parse(&mut raw, ctx).unwrap_err(); - } - - const H09_RESPONSE: &'static str = "Baguettes are super delicious, don't you agree?"; - - #[test] - fn test_parse_response_h09_allowed() { - let _ = pretty_env_logger::try_init(); - let mut raw = BytesMut::from(H09_RESPONSE); - let ctx = ParseContext { - cached_headers: &mut None, - req_method: &mut Some(crate::Method::GET), - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: true, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }; - let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); - assert_eq!(raw, H09_RESPONSE); - assert_eq!(msg.head.subject, crate::StatusCode::OK); - assert_eq!(msg.head.version, crate::Version::HTTP_09); - assert_eq!(msg.head.headers.len(), 0); - } - - #[test] - fn test_parse_response_h09_rejected() { - let _ = pretty_env_logger::try_init(); - let mut raw = BytesMut::from(H09_RESPONSE); - let ctx = ParseContext { - cached_headers: &mut None, - req_method: &mut Some(crate::Method::GET), - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }; - Client::parse(&mut raw, ctx).unwrap_err(); - assert_eq!(raw, H09_RESPONSE); - } - - const RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON: &'static str = - "HTTP/1.1 200 OK\r\nAccess-Control-Allow-Credentials : true\r\n\r\n"; - - #[test] - fn test_parse_allow_response_with_spaces_before_colons() { - use httparse::ParserConfig; - - let _ = pretty_env_logger::try_init(); - let mut raw = BytesMut::from(RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); - let mut h1_parser_config = ParserConfig::default(); - h1_parser_config.allow_spaces_after_header_name_in_responses(true); - let ctx = ParseContext { - cached_headers: &mut None, - req_method: &mut Some(crate::Method::GET), - h1_parser_config, - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }; - let msg = Client::parse(&mut raw, ctx).unwrap().unwrap(); - assert_eq!(raw.len(), 0); - assert_eq!(msg.head.subject, crate::StatusCode::OK); - assert_eq!(msg.head.version, crate::Version::HTTP_11); - assert_eq!(msg.head.headers.len(), 1); - assert_eq!(msg.head.headers["Access-Control-Allow-Credentials"], "true"); - } - - #[test] - fn test_parse_reject_response_with_spaces_before_colons() { - let _ = pretty_env_logger::try_init(); - let mut raw = BytesMut::from(RESPONSE_WITH_WHITESPACE_BETWEEN_HEADER_NAME_AND_COLON); - let ctx = ParseContext { - cached_headers: &mut None, - req_method: &mut Some(crate::Method::GET), - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }; - Client::parse(&mut raw, ctx).unwrap_err(); - } - - #[test] - fn test_parse_preserve_header_case_in_request() { - let mut raw = - BytesMut::from("GET / HTTP/1.1\r\nHost: hyper.rs\r\nX-BREAD: baguette\r\n\r\n"); - let ctx = ParseContext { - cached_headers: &mut None, - req_method: &mut None, - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: true, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }; - let parsed_message = Server::parse(&mut raw, ctx).unwrap().unwrap(); - let orig_headers = parsed_message - .head - .extensions - .get::() - .unwrap(); - assert_eq!( - orig_headers - .get_all_internal(&HeaderName::from_static("host")) - .into_iter() - .collect::>(), - vec![&Bytes::from("Host")] - ); - assert_eq!( - orig_headers - .get_all_internal(&HeaderName::from_static("x-bread")) - .into_iter() - .collect::>(), - vec![&Bytes::from("X-BREAD")] - ); - } - - #[test] - fn test_decoder_request() { - fn parse(s: &str) -> ParsedMessage { - let mut bytes = BytesMut::from(s); - Server::parse( - &mut bytes, - ParseContext { - cached_headers: &mut None, - req_method: &mut None, - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }, - ) - .expect("parse ok") - .expect("parse complete") - } - - fn parse_err(s: &str, comment: &str) -> crate::error::Parse { - let mut bytes = BytesMut::from(s); - Server::parse( - &mut bytes, - ParseContext { - cached_headers: &mut None, - req_method: &mut None, - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }, - ) - .expect_err(comment) - } - - // no length or transfer-encoding means 0-length body - assert_eq!( - parse( - "\ - GET / HTTP/1.1\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::ZERO - ); - - assert_eq!( - parse( - "\ - POST / HTTP/1.1\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::ZERO - ); - - // transfer-encoding: chunked - assert_eq!( - parse( - "\ - POST / HTTP/1.1\r\n\ - transfer-encoding: chunked\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::CHUNKED - ); - - assert_eq!( - parse( - "\ - POST / HTTP/1.1\r\n\ - transfer-encoding: gzip, chunked\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::CHUNKED - ); - - assert_eq!( - parse( - "\ - POST / HTTP/1.1\r\n\ - transfer-encoding: gzip\r\n\ - transfer-encoding: chunked\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::CHUNKED - ); - - // content-length - assert_eq!( - parse( - "\ - POST / HTTP/1.1\r\n\ - content-length: 10\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::new(10) - ); - - // transfer-encoding and content-length = chunked - assert_eq!( - parse( - "\ - POST / HTTP/1.1\r\n\ - content-length: 10\r\n\ - transfer-encoding: chunked\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::CHUNKED - ); - - assert_eq!( - parse( - "\ - POST / HTTP/1.1\r\n\ - transfer-encoding: chunked\r\n\ - content-length: 10\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::CHUNKED - ); - - assert_eq!( - parse( - "\ - POST / HTTP/1.1\r\n\ - transfer-encoding: gzip\r\n\ - content-length: 10\r\n\ - transfer-encoding: chunked\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::CHUNKED - ); - - // multiple content-lengths of same value are fine - assert_eq!( - parse( - "\ - POST / HTTP/1.1\r\n\ - content-length: 10\r\n\ - content-length: 10\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::new(10) - ); - - // multiple content-lengths with different values is an error - parse_err( - "\ - POST / HTTP/1.1\r\n\ - content-length: 10\r\n\ - content-length: 11\r\n\ - \r\n\ - ", - "multiple content-lengths", - ); - - // content-length with prefix is not allowed - parse_err( - "\ - POST / HTTP/1.1\r\n\ - content-length: +10\r\n\ - \r\n\ - ", - "prefixed content-length", - ); - - // transfer-encoding that isn't chunked is an error - parse_err( - "\ - POST / HTTP/1.1\r\n\ - transfer-encoding: gzip\r\n\ - \r\n\ - ", - "transfer-encoding but not chunked", - ); - - parse_err( - "\ - POST / HTTP/1.1\r\n\ - transfer-encoding: chunked, gzip\r\n\ - \r\n\ - ", - "transfer-encoding doesn't end in chunked", - ); - - parse_err( - "\ - POST / HTTP/1.1\r\n\ - transfer-encoding: chunked\r\n\ - transfer-encoding: afterlol\r\n\ - \r\n\ - ", - "transfer-encoding multiple lines doesn't end in chunked", - ); - - // http/1.0 - - assert_eq!( - parse( - "\ - POST / HTTP/1.0\r\n\ - content-length: 10\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::new(10) - ); - - // 1.0 doesn't understand chunked, so its an error - parse_err( - "\ - POST / HTTP/1.0\r\n\ - transfer-encoding: chunked\r\n\ - \r\n\ - ", - "1.0 chunked", - ); - } - - #[test] - fn test_decoder_response() { - fn parse(s: &str) -> ParsedMessage { - parse_with_method(s, Method::GET) - } - - fn parse_ignores(s: &str) { - let mut bytes = BytesMut::from(s); - assert!(Client::parse( - &mut bytes, - ParseContext { - cached_headers: &mut None, - req_method: &mut Some(Method::GET), - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - } - ) - .expect("parse ok") - .is_none()) - } - - fn parse_with_method(s: &str, m: Method) -> ParsedMessage { - let mut bytes = BytesMut::from(s); - Client::parse( - &mut bytes, - ParseContext { - cached_headers: &mut None, - req_method: &mut Some(m), - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }, - ) - .expect("parse ok") - .expect("parse complete") - } - - fn parse_err(s: &str) -> crate::error::Parse { - let mut bytes = BytesMut::from(s); - Client::parse( - &mut bytes, - ParseContext { - cached_headers: &mut None, - req_method: &mut Some(Method::GET), - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }, - ) - .expect_err("parse should err") - } - - // no content-length or transfer-encoding means close-delimited - assert_eq!( - parse( - "\ - HTTP/1.1 200 OK\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::CLOSE_DELIMITED - ); - - // 204 and 304 never have a body - assert_eq!( - parse( - "\ - HTTP/1.1 204 No Content\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::ZERO - ); - - assert_eq!( - parse( - "\ - HTTP/1.1 304 Not Modified\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::ZERO - ); - - // content-length - assert_eq!( - parse( - "\ - HTTP/1.1 200 OK\r\n\ - content-length: 8\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::new(8) - ); - - assert_eq!( - parse( - "\ - HTTP/1.1 200 OK\r\n\ - content-length: 8\r\n\ - content-length: 8\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::new(8) - ); - - parse_err( - "\ - HTTP/1.1 200 OK\r\n\ - content-length: 8\r\n\ - content-length: 9\r\n\ - \r\n\ - ", - ); - - parse_err( - "\ - HTTP/1.1 200 OK\r\n\ - content-length: +8\r\n\ - \r\n\ - ", - ); - - // transfer-encoding: chunked - assert_eq!( - parse( - "\ - HTTP/1.1 200 OK\r\n\ - transfer-encoding: chunked\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::CHUNKED - ); - - // transfer-encoding not-chunked is close-delimited - assert_eq!( - parse( - "\ - HTTP/1.1 200 OK\r\n\ - transfer-encoding: yolo\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::CLOSE_DELIMITED - ); - - // transfer-encoding and content-length = chunked - assert_eq!( - parse( - "\ - HTTP/1.1 200 OK\r\n\ - content-length: 10\r\n\ - transfer-encoding: chunked\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::CHUNKED - ); - - // HEAD can have content-length, but not body - assert_eq!( - parse_with_method( - "\ - HTTP/1.1 200 OK\r\n\ - content-length: 8\r\n\ - \r\n\ - ", - Method::HEAD - ) - .decode, - DecodedLength::ZERO - ); - - // CONNECT with 200 never has body - { - let msg = parse_with_method( - "\ - HTTP/1.1 200 OK\r\n\ - \r\n\ - ", - Method::CONNECT, - ); - assert_eq!(msg.decode, DecodedLength::ZERO); - assert!(!msg.keep_alive, "should be upgrade"); - assert!(msg.wants_upgrade, "should be upgrade"); - } - - // CONNECT receiving non 200 can have a body - assert_eq!( - parse_with_method( - "\ - HTTP/1.1 400 Bad Request\r\n\ - \r\n\ - ", - Method::CONNECT - ) - .decode, - DecodedLength::CLOSE_DELIMITED - ); - - // 1xx status codes - parse_ignores( - "\ - HTTP/1.1 100 Continue\r\n\ - \r\n\ - ", - ); - - parse_ignores( - "\ - HTTP/1.1 103 Early Hints\r\n\ - \r\n\ - ", - ); - - // 101 upgrade not supported yet - { - let msg = parse( - "\ - HTTP/1.1 101 Switching Protocols\r\n\ - \r\n\ - ", - ); - assert_eq!(msg.decode, DecodedLength::ZERO); - assert!(!msg.keep_alive, "should be last"); - assert!(msg.wants_upgrade, "should be upgrade"); - } - - // http/1.0 - assert_eq!( - parse( - "\ - HTTP/1.0 200 OK\r\n\ - \r\n\ - " - ) - .decode, - DecodedLength::CLOSE_DELIMITED - ); - - // 1.0 doesn't understand chunked - parse_err( - "\ - HTTP/1.0 200 OK\r\n\ - transfer-encoding: chunked\r\n\ - \r\n\ - ", - ); - - // keep-alive - assert!( - parse( - "\ - HTTP/1.1 200 OK\r\n\ - content-length: 0\r\n\ - \r\n\ - " - ) - .keep_alive, - "HTTP/1.1 keep-alive is default" - ); - - assert!( - !parse( - "\ - HTTP/1.1 200 OK\r\n\ - content-length: 0\r\n\ - connection: foo, close, bar\r\n\ - \r\n\ - " - ) - .keep_alive, - "connection close is always close" - ); - - assert!( - !parse( - "\ - HTTP/1.0 200 OK\r\n\ - content-length: 0\r\n\ - \r\n\ - " - ) - .keep_alive, - "HTTP/1.0 close is default" - ); - - assert!( - parse( - "\ - HTTP/1.0 200 OK\r\n\ - content-length: 0\r\n\ - connection: foo, keep-alive, bar\r\n\ - \r\n\ - " - ) - .keep_alive, - "connection keep-alive is always keep-alive" - ); - } - - #[test] - fn test_client_request_encode_title_case() { - use crate::proto::BodyLength; - use http::header::HeaderValue; - - let mut head = MessageHead::default(); - head.headers - .insert("content-length", HeaderValue::from_static("10")); - head.headers - .insert("content-type", HeaderValue::from_static("application/json")); - head.headers.insert("*-*", HeaderValue::from_static("o_o")); - - let mut vec = Vec::new(); - Client::encode( - Encode { - head: &mut head, - body: Some(BodyLength::Known(10)), - keep_alive: true, - req_method: &mut None, - title_case_headers: true, - }, - &mut vec, - ) - .unwrap(); - - assert_eq!(vec, b"GET / HTTP/1.1\r\nContent-Length: 10\r\nContent-Type: application/json\r\n*-*: o_o\r\n\r\n".to_vec()); - } - - #[test] - fn test_client_request_encode_orig_case() { - use crate::proto::BodyLength; - use http::header::{HeaderValue, CONTENT_LENGTH}; - - let mut head = MessageHead::default(); - head.headers - .insert("content-length", HeaderValue::from_static("10")); - head.headers - .insert("content-type", HeaderValue::from_static("application/json")); - - let mut orig_headers = HeaderCaseMap::default(); - orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); - head.extensions.insert(orig_headers); - - let mut vec = Vec::new(); - Client::encode( - Encode { - head: &mut head, - body: Some(BodyLength::Known(10)), - keep_alive: true, - req_method: &mut None, - title_case_headers: false, - }, - &mut vec, - ) - .unwrap(); - - assert_eq!( - &*vec, - b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\n\r\n" - .as_ref(), - ); - } - #[test] - fn test_client_request_encode_orig_and_title_case() { - use crate::proto::BodyLength; - use http::header::{HeaderValue, CONTENT_LENGTH}; - - let mut head = MessageHead::default(); - head.headers - .insert("content-length", HeaderValue::from_static("10")); - head.headers - .insert("content-type", HeaderValue::from_static("application/json")); - - let mut orig_headers = HeaderCaseMap::default(); - orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); - head.extensions.insert(orig_headers); - - let mut vec = Vec::new(); - Client::encode( - Encode { - head: &mut head, - body: Some(BodyLength::Known(10)), - keep_alive: true, - req_method: &mut None, - title_case_headers: true, - }, - &mut vec, - ) - .unwrap(); - - assert_eq!( - &*vec, - b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\n\r\n" - .as_ref(), - ); - } - - #[test] - fn test_server_encode_connect_method() { - let mut head = MessageHead::default(); - - let mut vec = Vec::new(); - let encoder = Server::encode( - Encode { - head: &mut head, - body: None, - keep_alive: true, - req_method: &mut Some(Method::CONNECT), - title_case_headers: false, - }, - &mut vec, - ) - .unwrap(); - - assert!(encoder.is_last()); - } - - #[test] - fn test_server_response_encode_title_case() { - use crate::proto::BodyLength; - use http::header::HeaderValue; - - let mut head = MessageHead::default(); - head.headers - .insert("content-length", HeaderValue::from_static("10")); - head.headers - .insert("content-type", HeaderValue::from_static("application/json")); - head.headers - .insert("weird--header", HeaderValue::from_static("")); - - let mut vec = Vec::new(); - Server::encode( - Encode { - head: &mut head, - body: Some(BodyLength::Known(10)), - keep_alive: true, - req_method: &mut None, - title_case_headers: true, - }, - &mut vec, - ) - .unwrap(); - - let expected_response = - b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\nContent-Type: application/json\r\nWeird--Header: \r\n"; - - assert_eq!(&vec[..expected_response.len()], &expected_response[..]); - } - - #[test] - fn test_server_response_encode_orig_case() { - use crate::proto::BodyLength; - use http::header::{HeaderValue, CONTENT_LENGTH}; - - let mut head = MessageHead::default(); - head.headers - .insert("content-length", HeaderValue::from_static("10")); - head.headers - .insert("content-type", HeaderValue::from_static("application/json")); - - let mut orig_headers = HeaderCaseMap::default(); - orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); - head.extensions.insert(orig_headers); - - let mut vec = Vec::new(); - Server::encode( - Encode { - head: &mut head, - body: Some(BodyLength::Known(10)), - keep_alive: true, - req_method: &mut None, - title_case_headers: false, - }, - &mut vec, - ) - .unwrap(); - - let expected_response = - b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\ndate: "; - - assert_eq!(&vec[..expected_response.len()], &expected_response[..]); - } - - #[test] - fn test_server_response_encode_orig_and_title_case() { - use crate::proto::BodyLength; - use http::header::{HeaderValue, CONTENT_LENGTH}; - - let mut head = MessageHead::default(); - head.headers - .insert("content-length", HeaderValue::from_static("10")); - head.headers - .insert("content-type", HeaderValue::from_static("application/json")); - - let mut orig_headers = HeaderCaseMap::default(); - orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into()); - head.extensions.insert(orig_headers); - - let mut vec = Vec::new(); - Server::encode( - Encode { - head: &mut head, - body: Some(BodyLength::Known(10)), - keep_alive: true, - req_method: &mut None, - title_case_headers: true, - }, - &mut vec, - ) - .unwrap(); - - let expected_response = - b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\nDate: "; - - assert_eq!(&vec[..expected_response.len()], &expected_response[..]); - } - - #[test] - fn parse_header_htabs() { - let mut bytes = BytesMut::from("HTTP/1.1 200 OK\r\nserver: hello\tworld\r\n\r\n"); - let parsed = Client::parse( - &mut bytes, - ParseContext { - cached_headers: &mut None, - req_method: &mut Some(Method::GET), - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }, - ) - .expect("parse ok") - .expect("parse complete"); - - assert_eq!(parsed.head.headers["server"], "hello\tworld"); - } - - #[test] - fn test_write_headers_orig_case_empty_value() { - let mut headers = HeaderMap::new(); - let name = http::header::HeaderName::from_static("x-empty"); - headers.insert(&name, "".parse().expect("parse empty")); - let mut orig_cases = HeaderCaseMap::default(); - orig_cases.insert(name, Bytes::from_static(b"X-EmptY")); - - let mut dst = Vec::new(); - super::write_headers_original_case(&headers, &orig_cases, &mut dst, false); - - assert_eq!( - dst, b"X-EmptY:\r\n", - "there should be no space between the colon and CRLF" - ); - } - - #[test] - fn test_write_headers_orig_case_multiple_entries() { - let mut headers = HeaderMap::new(); - let name = http::header::HeaderName::from_static("x-empty"); - headers.insert(&name, "a".parse().unwrap()); - headers.append(&name, "b".parse().unwrap()); - - let mut orig_cases = HeaderCaseMap::default(); - orig_cases.insert(name.clone(), Bytes::from_static(b"X-Empty")); - orig_cases.append(name, Bytes::from_static(b"X-EMPTY")); - - let mut dst = Vec::new(); - super::write_headers_original_case(&headers, &orig_cases, &mut dst, false); - - assert_eq!(dst, b"X-Empty: a\r\nX-EMPTY: b\r\n"); - } - - #[cfg(feature = "nightly")] - use test::Bencher; - - #[cfg(feature = "nightly")] - #[bench] - fn bench_parse_incoming(b: &mut Bencher) { - let mut raw = BytesMut::from( - &b"GET /super_long_uri/and_whatever?what_should_we_talk_about/\ - I_wonder/Hard_to_write_in_an_uri_after_all/you_have_to_make\ - _up_the_punctuation_yourself/how_fun_is_that?test=foo&test1=\ - foo1&test2=foo2&test3=foo3&test4=foo4 HTTP/1.1\r\nHost: \ - hyper.rs\r\nAccept: a lot of things\r\nAccept-Charset: \ - utf8\r\nAccept-Encoding: *\r\nAccess-Control-Allow-\ - Credentials: None\r\nAccess-Control-Allow-Origin: None\r\n\ - Access-Control-Allow-Methods: None\r\nAccess-Control-Allow-\ - Headers: None\r\nContent-Encoding: utf8\r\nContent-Security-\ - Policy: None\r\nContent-Type: text/html\r\nOrigin: hyper\ - \r\nSec-Websocket-Extensions: It looks super important!\r\n\ - Sec-Websocket-Origin: hyper\r\nSec-Websocket-Version: 4.3\r\ - \nStrict-Transport-Security: None\r\nUser-Agent: hyper\r\n\ - X-Content-Duration: None\r\nX-Content-Security-Policy: None\ - \r\nX-DNSPrefetch-Control: None\r\nX-Frame-Options: \ - Something important obviously\r\nX-Requested-With: Nothing\ - \r\n\r\n"[..], - ); - let len = raw.len(); - let mut headers = Some(HeaderMap::new()); - - b.bytes = len as u64; - b.iter(|| { - let mut msg = Server::parse( - &mut raw, - ParseContext { - cached_headers: &mut headers, - req_method: &mut None, - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }, - ) - .unwrap() - .unwrap(); - ::test::black_box(&msg); - msg.head.headers.clear(); - headers = Some(msg.head.headers); - restart(&mut raw, len); - }); - - fn restart(b: &mut BytesMut, len: usize) { - b.reserve(1); - unsafe { - b.set_len(len); - } - } - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_parse_short(b: &mut Bencher) { - let s = &b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n"[..]; - let mut raw = BytesMut::from(s); - let len = raw.len(); - let mut headers = Some(HeaderMap::new()); - - b.bytes = len as u64; - b.iter(|| { - let mut msg = Server::parse( - &mut raw, - ParseContext { - cached_headers: &mut headers, - req_method: &mut None, - h1_parser_config: Default::default(), - #[cfg(feature = "runtime")] - h1_header_read_timeout: None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_fut: &mut None, - #[cfg(feature = "runtime")] - h1_header_read_timeout_running: &mut false, - preserve_header_case: false, - #[cfg(feature = "ffi")] - preserve_header_order: false, - h09_responses: false, - #[cfg(feature = "ffi")] - on_informational: &mut None, - #[cfg(feature = "ffi")] - raw_headers: false, - }, - ) - .unwrap() - .unwrap(); - ::test::black_box(&msg); - msg.head.headers.clear(); - headers = Some(msg.head.headers); - restart(&mut raw, len); - }); - - fn restart(b: &mut BytesMut, len: usize) { - b.reserve(1); - unsafe { - b.set_len(len); - } - } - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_server_encode_headers_preset(b: &mut Bencher) { - use crate::proto::BodyLength; - use http::header::HeaderValue; - - let len = 108; - b.bytes = len as u64; - - let mut head = MessageHead::default(); - let mut headers = HeaderMap::new(); - headers.insert("content-length", HeaderValue::from_static("10")); - headers.insert("content-type", HeaderValue::from_static("application/json")); - - b.iter(|| { - let mut vec = Vec::new(); - head.headers = headers.clone(); - Server::encode( - Encode { - head: &mut head, - body: Some(BodyLength::Known(10)), - keep_alive: true, - req_method: &mut Some(Method::GET), - title_case_headers: false, - }, - &mut vec, - ) - .unwrap(); - assert_eq!(vec.len(), len); - ::test::black_box(vec); - }) - } - - #[cfg(feature = "nightly")] - #[bench] - fn bench_server_encode_no_headers(b: &mut Bencher) { - use crate::proto::BodyLength; - - let len = 76; - b.bytes = len as u64; - - let mut head = MessageHead::default(); - let mut vec = Vec::with_capacity(128); - - b.iter(|| { - Server::encode( - Encode { - head: &mut head, - body: Some(BodyLength::Known(10)), - keep_alive: true, - req_method: &mut Some(Method::GET), - title_case_headers: false, - }, - &mut vec, - ) - .unwrap(); - assert_eq!(vec.len(), len); - ::test::black_box(&vec); - - vec.clear(); - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/h2/client.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/h2/client.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/h2/client.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/h2/client.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,450 +0,0 @@ -use std::error::Error as StdError; -#[cfg(feature = "runtime")] -use std::time::Duration; - -use bytes::Bytes; -use futures_channel::{mpsc, oneshot}; -use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; -use futures_util::stream::StreamExt as _; -use h2::client::{Builder, SendRequest}; -use h2::SendStream; -use http::{Method, StatusCode}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::{debug, trace, warn}; - -use super::{ping, H2Upgraded, PipeToSendStream, SendBuf}; -use crate::body::HttpBody; -use crate::client::dispatch::Callback; -use crate::common::{exec::Exec, task, Future, Never, Pin, Poll}; -use crate::ext::Protocol; -use crate::headers; -use crate::proto::h2::UpgradedSendStream; -use crate::proto::Dispatched; -use crate::upgrade::Upgraded; -use crate::{Body, Request, Response}; -use h2::client::ResponseFuture; - -type ClientRx = crate::client::dispatch::Receiver, Response>; - -///// An mpsc channel is used to help notify the `Connection` task when *all* -///// other handles to it have been dropped, so that it can shutdown. -type ConnDropRef = mpsc::Sender; - -///// A oneshot channel watches the `Connection` task, and when it completes, -///// the "dispatch" task will be notified and can shutdown sooner. -type ConnEof = oneshot::Receiver; - -// Our defaults are chosen for the "majority" case, which usually are not -// resource constrained, and so the spec default of 64kb can be too limiting -// for performance. -const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024 * 5; // 5mb -const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024 * 2; // 2mb -const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb -const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 1024; // 1mb - -#[derive(Clone, Debug)] -pub(crate) struct Config { - pub(crate) adaptive_window: bool, - pub(crate) initial_conn_window_size: u32, - pub(crate) initial_stream_window_size: u32, - pub(crate) max_frame_size: u32, - #[cfg(feature = "runtime")] - pub(crate) keep_alive_interval: Option, - #[cfg(feature = "runtime")] - pub(crate) keep_alive_timeout: Duration, - #[cfg(feature = "runtime")] - pub(crate) keep_alive_while_idle: bool, - pub(crate) max_concurrent_reset_streams: Option, - pub(crate) max_send_buffer_size: usize, -} - -impl Default for Config { - fn default() -> Config { - Config { - adaptive_window: false, - initial_conn_window_size: DEFAULT_CONN_WINDOW, - initial_stream_window_size: DEFAULT_STREAM_WINDOW, - max_frame_size: DEFAULT_MAX_FRAME_SIZE, - #[cfg(feature = "runtime")] - keep_alive_interval: None, - #[cfg(feature = "runtime")] - keep_alive_timeout: Duration::from_secs(20), - #[cfg(feature = "runtime")] - keep_alive_while_idle: false, - max_concurrent_reset_streams: None, - max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE, - } - } -} - -fn new_builder(config: &Config) -> Builder { - let mut builder = Builder::default(); - builder - .initial_window_size(config.initial_stream_window_size) - .initial_connection_window_size(config.initial_conn_window_size) - .max_frame_size(config.max_frame_size) - .max_send_buffer_size(config.max_send_buffer_size) - .enable_push(false); - if let Some(max) = config.max_concurrent_reset_streams { - builder.max_concurrent_reset_streams(max); - } - builder -} - -fn new_ping_config(config: &Config) -> ping::Config { - ping::Config { - bdp_initial_window: if config.adaptive_window { - Some(config.initial_stream_window_size) - } else { - None - }, - #[cfg(feature = "runtime")] - keep_alive_interval: config.keep_alive_interval, - #[cfg(feature = "runtime")] - keep_alive_timeout: config.keep_alive_timeout, - #[cfg(feature = "runtime")] - keep_alive_while_idle: config.keep_alive_while_idle, - } -} - -pub(crate) async fn handshake( - io: T, - req_rx: ClientRx, - config: &Config, - exec: Exec, -) -> crate::Result> -where - T: AsyncRead + AsyncWrite + Send + Unpin + 'static, - B: HttpBody, - B::Data: Send + 'static, -{ - let (h2_tx, mut conn) = new_builder(config) - .handshake::<_, SendBuf>(io) - .await - .map_err(crate::Error::new_h2)?; - - // An mpsc channel is used entirely to detect when the - // 'Client' has been dropped. This is to get around a bug - // in h2 where dropping all SendRequests won't notify a - // parked Connection. - let (conn_drop_ref, rx) = mpsc::channel(1); - let (cancel_tx, conn_eof) = oneshot::channel(); - - let conn_drop_rx = rx.into_future().map(|(item, _rx)| { - if let Some(never) = item { - match never {} - } - }); - - let ping_config = new_ping_config(&config); - - let (conn, ping) = if ping_config.is_enabled() { - let pp = conn.ping_pong().expect("conn.ping_pong"); - let (recorder, mut ponger) = ping::channel(pp, ping_config); - - let conn = future::poll_fn(move |cx| { - match ponger.poll(cx) { - Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => { - conn.set_target_window_size(wnd); - conn.set_initial_window_size(wnd)?; - } - #[cfg(feature = "runtime")] - Poll::Ready(ping::Ponged::KeepAliveTimedOut) => { - debug!("connection keep-alive timed out"); - return Poll::Ready(Ok(())); - } - Poll::Pending => {} - } - - Pin::new(&mut conn).poll(cx) - }); - (Either::Left(conn), recorder) - } else { - (Either::Right(conn), ping::disabled()) - }; - let conn = conn.map_err(|e| debug!("connection error: {}", e)); - - exec.execute(conn_task(conn, conn_drop_rx, cancel_tx)); - - Ok(ClientTask { - ping, - conn_drop_ref, - conn_eof, - executor: exec, - h2_tx, - req_rx, - fut_ctx: None, - }) -} - -async fn conn_task(conn: C, drop_rx: D, cancel_tx: oneshot::Sender) -where - C: Future + Unpin, - D: Future + Unpin, -{ - match future::select(conn, drop_rx).await { - Either::Left(_) => { - // ok or err, the `conn` has finished - } - Either::Right(((), conn)) => { - // mpsc has been dropped, hopefully polling - // the connection some more should start shutdown - // and then close - trace!("send_request dropped, starting conn shutdown"); - drop(cancel_tx); - let _ = conn.await; - } - } -} - -struct FutCtx -where - B: HttpBody, -{ - is_connect: bool, - eos: bool, - fut: ResponseFuture, - body_tx: SendStream>, - body: B, - cb: Callback, Response>, -} - -impl Unpin for FutCtx {} - -pub(crate) struct ClientTask -where - B: HttpBody, -{ - ping: ping::Recorder, - conn_drop_ref: ConnDropRef, - conn_eof: ConnEof, - executor: Exec, - h2_tx: SendRequest>, - req_rx: ClientRx, - fut_ctx: Option>, -} - -impl ClientTask -where - B: HttpBody + 'static, -{ - pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { - self.h2_tx.is_extended_connect_protocol_enabled() - } -} - -impl ClientTask -where - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - fn poll_pipe(&mut self, f: FutCtx, cx: &mut task::Context<'_>) { - let ping = self.ping.clone(); - let send_stream = if !f.is_connect { - if !f.eos { - let mut pipe = Box::pin(PipeToSendStream::new(f.body, f.body_tx)).map(|res| { - if let Err(e) = res { - debug!("client request body error: {}", e); - } - }); - - // eagerly see if the body pipe is ready and - // can thus skip allocating in the executor - match Pin::new(&mut pipe).poll(cx) { - Poll::Ready(_) => (), - Poll::Pending => { - let conn_drop_ref = self.conn_drop_ref.clone(); - // keep the ping recorder's knowledge of an - // "open stream" alive while this body is - // still sending... - let ping = ping.clone(); - let pipe = pipe.map(move |x| { - drop(conn_drop_ref); - drop(ping); - x - }); - // Clear send task - self.executor.execute(pipe); - } - } - } - - None - } else { - Some(f.body_tx) - }; - - let fut = f.fut.map(move |result| match result { - Ok(res) => { - // record that we got the response headers - ping.record_non_data(); - - let content_length = headers::content_length_parse_all(res.headers()); - if let (Some(mut send_stream), StatusCode::OK) = (send_stream, res.status()) { - if content_length.map_or(false, |len| len != 0) { - warn!("h2 connect response with non-zero body not supported"); - - send_stream.send_reset(h2::Reason::INTERNAL_ERROR); - return Err(( - crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), - None, - )); - } - let (parts, recv_stream) = res.into_parts(); - let mut res = Response::from_parts(parts, Body::empty()); - - let (pending, on_upgrade) = crate::upgrade::pending(); - let io = H2Upgraded { - ping, - send_stream: unsafe { UpgradedSendStream::new(send_stream) }, - recv_stream, - buf: Bytes::new(), - }; - let upgraded = Upgraded::new(io, Bytes::new()); - - pending.fulfill(upgraded); - res.extensions_mut().insert(on_upgrade); - - Ok(res) - } else { - let res = res.map(|stream| { - let ping = ping.for_stream(&stream); - crate::Body::h2(stream, content_length.into(), ping) - }); - Ok(res) - } - } - Err(err) => { - ping.ensure_not_timed_out().map_err(|e| (e, None))?; - - debug!("client response error: {}", err); - Err((crate::Error::new_h2(err), None)) - } - }); - self.executor.execute(f.cb.send_when(fut)); - } -} - -impl Future for ClientTask -where - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into>, -{ - type Output = crate::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - loop { - match ready!(self.h2_tx.poll_ready(cx)) { - Ok(()) => (), - Err(err) => { - self.ping.ensure_not_timed_out()?; - return if err.reason() == Some(::h2::Reason::NO_ERROR) { - trace!("connection gracefully shutdown"); - Poll::Ready(Ok(Dispatched::Shutdown)) - } else { - Poll::Ready(Err(crate::Error::new_h2(err))) - }; - } - }; - - match self.fut_ctx.take() { - // If we were waiting on pending open - // continue where we left off. - Some(f) => { - self.poll_pipe(f, cx); - continue; - } - None => (), - } - - match self.req_rx.poll_recv(cx) { - Poll::Ready(Some((req, cb))) => { - // check that future hasn't been canceled already - if cb.is_canceled() { - trace!("request callback is canceled"); - continue; - } - let (head, body) = req.into_parts(); - let mut req = ::http::Request::from_parts(head, ()); - super::strip_connection_headers(req.headers_mut(), true); - if let Some(len) = body.size_hint().exact() { - if len != 0 || headers::method_has_defined_payload_semantics(req.method()) { - headers::set_content_length_if_missing(req.headers_mut(), len); - } - } - - let is_connect = req.method() == Method::CONNECT; - let eos = body.is_end_stream(); - - if is_connect { - if headers::content_length_parse_all(req.headers()) - .map_or(false, |len| len != 0) - { - warn!("h2 connect request with non-zero body not supported"); - cb.send(Err(( - crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), - None, - ))); - continue; - } - } - - if let Some(protocol) = req.extensions_mut().remove::() { - req.extensions_mut().insert(protocol.into_inner()); - } - - let (fut, body_tx) = match self.h2_tx.send_request(req, !is_connect && eos) { - Ok(ok) => ok, - Err(err) => { - debug!("client send request error: {}", err); - cb.send(Err((crate::Error::new_h2(err), None))); - continue; - } - }; - - let f = FutCtx { - is_connect, - eos, - fut, - body_tx, - body, - cb, - }; - - // Check poll_ready() again. - // If the call to send_request() resulted in the new stream being pending open - // we have to wait for the open to complete before accepting new requests. - match self.h2_tx.poll_ready(cx) { - Poll::Pending => { - // Save Context - self.fut_ctx = Some(f); - return Poll::Pending; - } - Poll::Ready(Ok(())) => (), - Poll::Ready(Err(err)) => { - f.cb.send(Err((crate::Error::new_h2(err), None))); - continue; - } - } - self.poll_pipe(f, cx); - continue; - } - - Poll::Ready(None) => { - trace!("client::dispatch::Sender dropped"); - return Poll::Ready(Ok(Dispatched::Shutdown)); - } - - Poll::Pending => match ready!(Pin::new(&mut self.conn_eof).poll(cx)) { - Ok(never) => match never {}, - Err(_conn_is_eof) => { - trace!("connection task is closed, closing dispatch task"); - return Poll::Ready(Ok(Dispatched::Shutdown)); - } - }, - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/h2/mod.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/h2/mod.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/h2/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/h2/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,471 +0,0 @@ -use bytes::{Buf, Bytes}; -use h2::{Reason, RecvStream, SendStream}; -use http::header::{HeaderName, CONNECTION, TE, TRAILER, TRANSFER_ENCODING, UPGRADE}; -use http::HeaderMap; -use pin_project_lite::pin_project; -use std::error::Error as StdError; -use std::io::{self, Cursor, IoSlice}; -use std::mem; -use std::task::Context; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tracing::{debug, trace, warn}; - -use crate::body::HttpBody; -use crate::common::{task, Future, Pin, Poll}; -use crate::proto::h2::ping::Recorder; - -pub(crate) mod ping; - -cfg_client! { - pub(crate) mod client; - pub(crate) use self::client::ClientTask; -} - -cfg_server! { - pub(crate) mod server; - pub(crate) use self::server::Server; -} - -/// Default initial stream window size defined in HTTP2 spec. -pub(crate) const SPEC_WINDOW_SIZE: u32 = 65_535; - -fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { - // List of connection headers from: - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection - // - // TE headers are allowed in HTTP/2 requests as long as the value is "trailers", so they're - // tested separately. - let connection_headers = [ - HeaderName::from_lowercase(b"keep-alive").unwrap(), - HeaderName::from_lowercase(b"proxy-connection").unwrap(), - TRAILER, - TRANSFER_ENCODING, - UPGRADE, - ]; - - for header in connection_headers.iter() { - if headers.remove(header).is_some() { - warn!("Connection header illegal in HTTP/2: {}", header.as_str()); - } - } - - if is_request { - if headers - .get(TE) - .map(|te_header| te_header != "trailers") - .unwrap_or(false) - { - warn!("TE headers not set to \"trailers\" are illegal in HTTP/2 requests"); - headers.remove(TE); - } - } else if headers.remove(TE).is_some() { - warn!("TE headers illegal in HTTP/2 responses"); - } - - if let Some(header) = headers.remove(CONNECTION) { - warn!( - "Connection header illegal in HTTP/2: {}", - CONNECTION.as_str() - ); - let header_contents = header.to_str().unwrap(); - - // A `Connection` header may have a comma-separated list of names of other headers that - // are meant for only this specific connection. - // - // Iterate these names and remove them as headers. Connection-specific headers are - // forbidden in HTTP2, as that information has been moved into frame types of the h2 - // protocol. - for name in header_contents.split(',') { - let name = name.trim(); - headers.remove(name); - } - } -} - -// body adapters used by both Client and Server - -pin_project! { - struct PipeToSendStream - where - S: HttpBody, - { - body_tx: SendStream>, - data_done: bool, - #[pin] - stream: S, - } -} - -impl PipeToSendStream -where - S: HttpBody, -{ - fn new(stream: S, tx: SendStream>) -> PipeToSendStream { - PipeToSendStream { - body_tx: tx, - data_done: false, - stream, - } - } -} - -impl Future for PipeToSendStream -where - S: HttpBody, - S::Error: Into>, -{ - type Output = crate::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let mut me = self.project(); - loop { - if !*me.data_done { - // we don't have the next chunk of data yet, so just reserve 1 byte to make - // sure there's some capacity available. h2 will handle the capacity management - // for the actual body chunk. - me.body_tx.reserve_capacity(1); - - if me.body_tx.capacity() == 0 { - loop { - match ready!(me.body_tx.poll_capacity(cx)) { - Some(Ok(0)) => {} - Some(Ok(_)) => break, - Some(Err(e)) => { - return Poll::Ready(Err(crate::Error::new_body_write(e))) - } - None => { - // None means the stream is no longer in a - // streaming state, we either finished it - // somehow, or the remote reset us. - return Poll::Ready(Err(crate::Error::new_body_write( - "send stream capacity unexpectedly closed", - ))); - } - } - } - } else if let Poll::Ready(reason) = me - .body_tx - .poll_reset(cx) - .map_err(crate::Error::new_body_write)? - { - debug!("stream received RST_STREAM: {:?}", reason); - return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from( - reason, - )))); - } - - match ready!(me.stream.as_mut().poll_data(cx)) { - Some(Ok(chunk)) => { - let is_eos = me.stream.is_end_stream(); - trace!( - "send body chunk: {} bytes, eos={}", - chunk.remaining(), - is_eos, - ); - - let buf = SendBuf::Buf(chunk); - me.body_tx - .send_data(buf, is_eos) - .map_err(crate::Error::new_body_write)?; - - if is_eos { - return Poll::Ready(Ok(())); - } - } - Some(Err(e)) => return Poll::Ready(Err(me.body_tx.on_user_err(e))), - None => { - me.body_tx.reserve_capacity(0); - let is_eos = me.stream.is_end_stream(); - if is_eos { - return Poll::Ready(me.body_tx.send_eos_frame()); - } else { - *me.data_done = true; - // loop again to poll_trailers - } - } - } - } else { - if let Poll::Ready(reason) = me - .body_tx - .poll_reset(cx) - .map_err(crate::Error::new_body_write)? - { - debug!("stream received RST_STREAM: {:?}", reason); - return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from( - reason, - )))); - } - - match ready!(me.stream.poll_trailers(cx)) { - Ok(Some(trailers)) => { - me.body_tx - .send_trailers(trailers) - .map_err(crate::Error::new_body_write)?; - return Poll::Ready(Ok(())); - } - Ok(None) => { - // There were no trailers, so send an empty DATA frame... - return Poll::Ready(me.body_tx.send_eos_frame()); - } - Err(e) => return Poll::Ready(Err(me.body_tx.on_user_err(e))), - } - } - } - } -} - -trait SendStreamExt { - fn on_user_err(&mut self, err: E) -> crate::Error - where - E: Into>; - fn send_eos_frame(&mut self) -> crate::Result<()>; -} - -impl SendStreamExt for SendStream> { - fn on_user_err(&mut self, err: E) -> crate::Error - where - E: Into>, - { - let err = crate::Error::new_user_body(err); - debug!("send body user stream error: {}", err); - self.send_reset(err.h2_reason()); - err - } - - fn send_eos_frame(&mut self) -> crate::Result<()> { - trace!("send body eos"); - self.send_data(SendBuf::None, true) - .map_err(crate::Error::new_body_write) - } -} - -#[repr(usize)] -enum SendBuf { - Buf(B), - Cursor(Cursor>), - None, -} - -impl Buf for SendBuf { - #[inline] - fn remaining(&self) -> usize { - match *self { - Self::Buf(ref b) => b.remaining(), - Self::Cursor(ref c) => Buf::remaining(c), - Self::None => 0, - } - } - - #[inline] - fn chunk(&self) -> &[u8] { - match *self { - Self::Buf(ref b) => b.chunk(), - Self::Cursor(ref c) => c.chunk(), - Self::None => &[], - } - } - - #[inline] - fn advance(&mut self, cnt: usize) { - match *self { - Self::Buf(ref mut b) => b.advance(cnt), - Self::Cursor(ref mut c) => c.advance(cnt), - Self::None => {} - } - } - - fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { - match *self { - Self::Buf(ref b) => b.chunks_vectored(dst), - Self::Cursor(ref c) => c.chunks_vectored(dst), - Self::None => 0, - } - } -} - -struct H2Upgraded -where - B: Buf, -{ - ping: Recorder, - send_stream: UpgradedSendStream, - recv_stream: RecvStream, - buf: Bytes, -} - -impl AsyncRead for H2Upgraded -where - B: Buf, -{ - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - read_buf: &mut ReadBuf<'_>, - ) -> Poll> { - if self.buf.is_empty() { - self.buf = loop { - match ready!(self.recv_stream.poll_data(cx)) { - None => return Poll::Ready(Ok(())), - Some(Ok(buf)) if buf.is_empty() && !self.recv_stream.is_end_stream() => { - continue - } - Some(Ok(buf)) => { - self.ping.record_data(buf.len()); - break buf; - } - Some(Err(e)) => { - return Poll::Ready(match e.reason() { - Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => Ok(()), - Some(Reason::STREAM_CLOSED) => { - Err(io::Error::new(io::ErrorKind::BrokenPipe, e)) - } - _ => Err(h2_to_io_error(e)), - }) - } - } - }; - } - let cnt = std::cmp::min(self.buf.len(), read_buf.remaining()); - read_buf.put_slice(&self.buf[..cnt]); - self.buf.advance(cnt); - let _ = self.recv_stream.flow_control().release_capacity(cnt); - Poll::Ready(Ok(())) - } -} - -impl AsyncWrite for H2Upgraded -where - B: Buf, -{ - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - if buf.is_empty() { - return Poll::Ready(Ok(0)); - } - self.send_stream.reserve_capacity(buf.len()); - - // We ignore all errors returned by `poll_capacity` and `write`, as we - // will get the correct from `poll_reset` anyway. - let cnt = match ready!(self.send_stream.poll_capacity(cx)) { - None => Some(0), - Some(Ok(cnt)) => self - .send_stream - .write(&buf[..cnt], false) - .ok() - .map(|()| cnt), - Some(Err(_)) => None, - }; - - if let Some(cnt) = cnt { - return Poll::Ready(Ok(cnt)); - } - - Poll::Ready(Err(h2_to_io_error( - match ready!(self.send_stream.poll_reset(cx)) { - Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { - return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) - } - Ok(reason) => reason.into(), - Err(e) => e, - }, - ))) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - if self.send_stream.write(&[], true).is_ok() { - return Poll::Ready(Ok(())) - } - - Poll::Ready(Err(h2_to_io_error( - match ready!(self.send_stream.poll_reset(cx)) { - Ok(Reason::NO_ERROR) => { - return Poll::Ready(Ok(())) - } - Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { - return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) - } - Ok(reason) => reason.into(), - Err(e) => e, - }, - ))) - } -} - -fn h2_to_io_error(e: h2::Error) -> io::Error { - if e.is_io() { - e.into_io().unwrap() - } else { - io::Error::new(io::ErrorKind::Other, e) - } -} - -struct UpgradedSendStream(SendStream>>); - -impl UpgradedSendStream -where - B: Buf, -{ - unsafe fn new(inner: SendStream>) -> Self { - assert_eq!(mem::size_of::(), mem::size_of::>()); - Self(mem::transmute(inner)) - } - - fn reserve_capacity(&mut self, cnt: usize) { - unsafe { self.as_inner_unchecked().reserve_capacity(cnt) } - } - - fn poll_capacity(&mut self, cx: &mut Context<'_>) -> Poll>> { - unsafe { self.as_inner_unchecked().poll_capacity(cx) } - } - - fn poll_reset(&mut self, cx: &mut Context<'_>) -> Poll> { - unsafe { self.as_inner_unchecked().poll_reset(cx) } - } - - fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), io::Error> { - let send_buf = SendBuf::Cursor(Cursor::new(buf.into())); - unsafe { - self.as_inner_unchecked() - .send_data(send_buf, end_of_stream) - .map_err(h2_to_io_error) - } - } - - unsafe fn as_inner_unchecked(&mut self) -> &mut SendStream> { - &mut *(&mut self.0 as *mut _ as *mut _) - } -} - -#[repr(transparent)] -struct Neutered { - _inner: B, - impossible: Impossible, -} - -enum Impossible {} - -unsafe impl Send for Neutered {} - -impl Buf for Neutered { - fn remaining(&self) -> usize { - match self.impossible {} - } - - fn chunk(&self) -> &[u8] { - match self.impossible {} - } - - fn advance(&mut self, _cnt: usize) { - match self.impossible {} - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/h2/ping.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/h2/ping.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/h2/ping.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/h2/ping.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,555 +0,0 @@ -/// HTTP2 Ping usage -/// -/// hyper uses HTTP2 pings for two purposes: -/// -/// 1. Adaptive flow control using BDP -/// 2. Connection keep-alive -/// -/// Both cases are optional. -/// -/// # BDP Algorithm -/// -/// 1. When receiving a DATA frame, if a BDP ping isn't outstanding: -/// 1a. Record current time. -/// 1b. Send a BDP ping. -/// 2. Increment the number of received bytes. -/// 3. When the BDP ping ack is received: -/// 3a. Record duration from sent time. -/// 3b. Merge RTT with a running average. -/// 3c. Calculate bdp as bytes/rtt. -/// 3d. If bdp is over 2/3 max, set new max to bdp and update windows. - -#[cfg(feature = "runtime")] -use std::fmt; -#[cfg(feature = "runtime")] -use std::future::Future; -#[cfg(feature = "runtime")] -use std::pin::Pin; -use std::sync::{Arc, Mutex}; -use std::task::{self, Poll}; -use std::time::Duration; -#[cfg(not(feature = "runtime"))] -use std::time::Instant; - -use h2::{Ping, PingPong}; -#[cfg(feature = "runtime")] -use tokio::time::{Instant, Sleep}; -use tracing::{debug, trace}; - -type WindowSize = u32; - -pub(super) fn disabled() -> Recorder { - Recorder { shared: None } -} - -pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) { - debug_assert!( - config.is_enabled(), - "ping channel requires bdp or keep-alive config", - ); - - let bdp = config.bdp_initial_window.map(|wnd| Bdp { - bdp: wnd, - max_bandwidth: 0.0, - rtt: 0.0, - ping_delay: Duration::from_millis(100), - stable_count: 0, - }); - - let (bytes, next_bdp_at) = if bdp.is_some() { - (Some(0), Some(Instant::now())) - } else { - (None, None) - }; - - #[cfg(feature = "runtime")] - let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive { - interval, - timeout: config.keep_alive_timeout, - while_idle: config.keep_alive_while_idle, - timer: Box::pin(tokio::time::sleep(interval)), - state: KeepAliveState::Init, - }); - - #[cfg(feature = "runtime")] - let last_read_at = keep_alive.as_ref().map(|_| Instant::now()); - - let shared = Arc::new(Mutex::new(Shared { - bytes, - #[cfg(feature = "runtime")] - last_read_at, - #[cfg(feature = "runtime")] - is_keep_alive_timed_out: false, - ping_pong, - ping_sent_at: None, - next_bdp_at, - })); - - ( - Recorder { - shared: Some(shared.clone()), - }, - Ponger { - bdp, - #[cfg(feature = "runtime")] - keep_alive, - shared, - }, - ) -} - -#[derive(Clone)] -pub(super) struct Config { - pub(super) bdp_initial_window: Option, - /// If no frames are received in this amount of time, a PING frame is sent. - #[cfg(feature = "runtime")] - pub(super) keep_alive_interval: Option, - /// After sending a keepalive PING, the connection will be closed if - /// a pong is not received in this amount of time. - #[cfg(feature = "runtime")] - pub(super) keep_alive_timeout: Duration, - /// If true, sends pings even when there are no active streams. - #[cfg(feature = "runtime")] - pub(super) keep_alive_while_idle: bool, -} - -#[derive(Clone)] -pub(crate) struct Recorder { - shared: Option>>, -} - -pub(super) struct Ponger { - bdp: Option, - #[cfg(feature = "runtime")] - keep_alive: Option, - shared: Arc>, -} - -struct Shared { - ping_pong: PingPong, - ping_sent_at: Option, - - // bdp - /// If `Some`, bdp is enabled, and this tracks how many bytes have been - /// read during the current sample. - bytes: Option, - /// We delay a variable amount of time between BDP pings. This allows us - /// to send less pings as the bandwidth stabilizes. - next_bdp_at: Option, - - // keep-alive - /// If `Some`, keep-alive is enabled, and the Instant is how long ago - /// the connection read the last frame. - #[cfg(feature = "runtime")] - last_read_at: Option, - - #[cfg(feature = "runtime")] - is_keep_alive_timed_out: bool, -} - -struct Bdp { - /// Current BDP in bytes - bdp: u32, - /// Largest bandwidth we've seen so far. - max_bandwidth: f64, - /// Round trip time in seconds - rtt: f64, - /// Delay the next ping by this amount. - /// - /// This will change depending on how stable the current bandwidth is. - ping_delay: Duration, - /// The count of ping round trips where BDP has stayed the same. - stable_count: u32, -} - -#[cfg(feature = "runtime")] -struct KeepAlive { - /// If no frames are received in this amount of time, a PING frame is sent. - interval: Duration, - /// After sending a keepalive PING, the connection will be closed if - /// a pong is not received in this amount of time. - timeout: Duration, - /// If true, sends pings even when there are no active streams. - while_idle: bool, - - state: KeepAliveState, - timer: Pin>, -} - -#[cfg(feature = "runtime")] -enum KeepAliveState { - Init, - Scheduled, - PingSent, -} - -pub(super) enum Ponged { - SizeUpdate(WindowSize), - #[cfg(feature = "runtime")] - KeepAliveTimedOut, -} - -#[cfg(feature = "runtime")] -#[derive(Debug)] -pub(super) struct KeepAliveTimedOut; - -// ===== impl Config ===== - -impl Config { - pub(super) fn is_enabled(&self) -> bool { - #[cfg(feature = "runtime")] - { - self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some() - } - - #[cfg(not(feature = "runtime"))] - { - self.bdp_initial_window.is_some() - } - } -} - -// ===== impl Recorder ===== - -impl Recorder { - pub(crate) fn record_data(&self, len: usize) { - let shared = if let Some(ref shared) = self.shared { - shared - } else { - return; - }; - - let mut locked = shared.lock().unwrap(); - - #[cfg(feature = "runtime")] - locked.update_last_read_at(); - - // are we ready to send another bdp ping? - // if not, we don't need to record bytes either - - if let Some(ref next_bdp_at) = locked.next_bdp_at { - if Instant::now() < *next_bdp_at { - return; - } else { - locked.next_bdp_at = None; - } - } - - if let Some(ref mut bytes) = locked.bytes { - *bytes += len; - } else { - // no need to send bdp ping if bdp is disabled - return; - } - - if !locked.is_ping_sent() { - locked.send_ping(); - } - } - - pub(crate) fn record_non_data(&self) { - #[cfg(feature = "runtime")] - { - let shared = if let Some(ref shared) = self.shared { - shared - } else { - return; - }; - - let mut locked = shared.lock().unwrap(); - - locked.update_last_read_at(); - } - } - - /// If the incoming stream is already closed, convert self into - /// a disabled reporter. - #[cfg(feature = "client")] - pub(super) fn for_stream(self, stream: &h2::RecvStream) -> Self { - if stream.is_end_stream() { - disabled() - } else { - self - } - } - - pub(super) fn ensure_not_timed_out(&self) -> crate::Result<()> { - #[cfg(feature = "runtime")] - { - if let Some(ref shared) = self.shared { - let locked = shared.lock().unwrap(); - if locked.is_keep_alive_timed_out { - return Err(KeepAliveTimedOut.crate_error()); - } - } - } - - // else - Ok(()) - } -} - -// ===== impl Ponger ===== - -impl Ponger { - pub(super) fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll { - let now = Instant::now(); - let mut locked = self.shared.lock().unwrap(); - #[cfg(feature = "runtime")] - let is_idle = self.is_idle(); - - #[cfg(feature = "runtime")] - { - if let Some(ref mut ka) = self.keep_alive { - ka.schedule(is_idle, &locked); - ka.maybe_ping(cx, &mut locked); - } - } - - if !locked.is_ping_sent() { - // XXX: this doesn't register a waker...? - return Poll::Pending; - } - - match locked.ping_pong.poll_pong(cx) { - Poll::Ready(Ok(_pong)) => { - let start = locked - .ping_sent_at - .expect("pong received implies ping_sent_at"); - locked.ping_sent_at = None; - let rtt = now - start; - trace!("recv pong"); - - #[cfg(feature = "runtime")] - { - if let Some(ref mut ka) = self.keep_alive { - locked.update_last_read_at(); - ka.schedule(is_idle, &locked); - } - } - - if let Some(ref mut bdp) = self.bdp { - let bytes = locked.bytes.expect("bdp enabled implies bytes"); - locked.bytes = Some(0); // reset - trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt); - - let update = bdp.calculate(bytes, rtt); - locked.next_bdp_at = Some(now + bdp.ping_delay); - if let Some(update) = update { - return Poll::Ready(Ponged::SizeUpdate(update)) - } - } - } - Poll::Ready(Err(e)) => { - debug!("pong error: {}", e); - } - Poll::Pending => { - #[cfg(feature = "runtime")] - { - if let Some(ref mut ka) = self.keep_alive { - if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) { - self.keep_alive = None; - locked.is_keep_alive_timed_out = true; - return Poll::Ready(Ponged::KeepAliveTimedOut); - } - } - } - } - } - - // XXX: this doesn't register a waker...? - Poll::Pending - } - - #[cfg(feature = "runtime")] - fn is_idle(&self) -> bool { - Arc::strong_count(&self.shared) <= 2 - } -} - -// ===== impl Shared ===== - -impl Shared { - fn send_ping(&mut self) { - match self.ping_pong.send_ping(Ping::opaque()) { - Ok(()) => { - self.ping_sent_at = Some(Instant::now()); - trace!("sent ping"); - } - Err(err) => { - debug!("error sending ping: {}", err); - } - } - } - - fn is_ping_sent(&self) -> bool { - self.ping_sent_at.is_some() - } - - #[cfg(feature = "runtime")] - fn update_last_read_at(&mut self) { - if self.last_read_at.is_some() { - self.last_read_at = Some(Instant::now()); - } - } - - #[cfg(feature = "runtime")] - fn last_read_at(&self) -> Instant { - self.last_read_at.expect("keep_alive expects last_read_at") - } -} - -// ===== impl Bdp ===== - -/// Any higher than this likely will be hitting the TCP flow control. -const BDP_LIMIT: usize = 1024 * 1024 * 16; - -impl Bdp { - fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option { - // No need to do any math if we're at the limit. - if self.bdp as usize == BDP_LIMIT { - self.stabilize_delay(); - return None; - } - - // average the rtt - let rtt = seconds(rtt); - if self.rtt == 0.0 { - // First sample means rtt is first rtt. - self.rtt = rtt; - } else { - // Weigh this rtt as 1/8 for a moving average. - self.rtt += (rtt - self.rtt) * 0.125; - } - - // calculate the current bandwidth - let bw = (bytes as f64) / (self.rtt * 1.5); - trace!("current bandwidth = {:.1}B/s", bw); - - if bw < self.max_bandwidth { - // not a faster bandwidth, so don't update - self.stabilize_delay(); - return None; - } else { - self.max_bandwidth = bw; - } - - // if the current `bytes` sample is at least 2/3 the previous - // bdp, increase to double the current sample. - if bytes >= self.bdp as usize * 2 / 3 { - self.bdp = (bytes * 2).min(BDP_LIMIT) as WindowSize; - trace!("BDP increased to {}", self.bdp); - - self.stable_count = 0; - self.ping_delay /= 2; - Some(self.bdp) - } else { - self.stabilize_delay(); - None - } - } - - fn stabilize_delay(&mut self) { - if self.ping_delay < Duration::from_secs(10) { - self.stable_count += 1; - - if self.stable_count >= 2 { - self.ping_delay *= 4; - self.stable_count = 0; - } - } - } -} - -fn seconds(dur: Duration) -> f64 { - const NANOS_PER_SEC: f64 = 1_000_000_000.0; - let secs = dur.as_secs() as f64; - secs + (dur.subsec_nanos() as f64) / NANOS_PER_SEC -} - -// ===== impl KeepAlive ===== - -#[cfg(feature = "runtime")] -impl KeepAlive { - fn schedule(&mut self, is_idle: bool, shared: &Shared) { - match self.state { - KeepAliveState::Init => { - if !self.while_idle && is_idle { - return; - } - - self.state = KeepAliveState::Scheduled; - let interval = shared.last_read_at() + self.interval; - self.timer.as_mut().reset(interval); - } - KeepAliveState::PingSent => { - if shared.is_ping_sent() { - return; - } - - self.state = KeepAliveState::Scheduled; - let interval = shared.last_read_at() + self.interval; - self.timer.as_mut().reset(interval); - } - KeepAliveState::Scheduled => (), - } - } - - fn maybe_ping(&mut self, cx: &mut task::Context<'_>, shared: &mut Shared) { - match self.state { - KeepAliveState::Scheduled => { - if Pin::new(&mut self.timer).poll(cx).is_pending() { - return; - } - // check if we've received a frame while we were scheduled - if shared.last_read_at() + self.interval > self.timer.deadline() { - self.state = KeepAliveState::Init; - cx.waker().wake_by_ref(); // schedule us again - return; - } - trace!("keep-alive interval ({:?}) reached", self.interval); - shared.send_ping(); - self.state = KeepAliveState::PingSent; - let timeout = Instant::now() + self.timeout; - self.timer.as_mut().reset(timeout); - } - KeepAliveState::Init | KeepAliveState::PingSent => (), - } - } - - fn maybe_timeout(&mut self, cx: &mut task::Context<'_>) -> Result<(), KeepAliveTimedOut> { - match self.state { - KeepAliveState::PingSent => { - if Pin::new(&mut self.timer).poll(cx).is_pending() { - return Ok(()); - } - trace!("keep-alive timeout ({:?}) reached", self.timeout); - Err(KeepAliveTimedOut) - } - KeepAliveState::Init | KeepAliveState::Scheduled => Ok(()), - } - } -} - -// ===== impl KeepAliveTimedOut ===== - -#[cfg(feature = "runtime")] -impl KeepAliveTimedOut { - pub(super) fn crate_error(self) -> crate::Error { - crate::Error::new(crate::error::Kind::Http2).with(self) - } -} - -#[cfg(feature = "runtime")] -impl fmt::Display for KeepAliveTimedOut { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("keep-alive timed out") - } -} - -#[cfg(feature = "runtime")] -impl std::error::Error for KeepAliveTimedOut { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - Some(&crate::error::TimedOut) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/h2/server.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/h2/server.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/h2/server.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/h2/server.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,552 +0,0 @@ -use std::error::Error as StdError; -use std::marker::Unpin; -#[cfg(feature = "runtime")] -use std::time::Duration; - -use bytes::Bytes; -use h2::server::{Connection, Handshake, SendResponse}; -use h2::{Reason, RecvStream}; -use http::{Method, Request}; -use pin_project_lite::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::{debug, trace, warn}; - -use super::{ping, PipeToSendStream, SendBuf}; -use crate::body::HttpBody; -use crate::common::exec::ConnStreamExec; -use crate::common::{date, task, Future, Pin, Poll}; -use crate::ext::Protocol; -use crate::headers; -use crate::proto::h2::ping::Recorder; -use crate::proto::h2::{H2Upgraded, UpgradedSendStream}; -use crate::proto::Dispatched; -use crate::service::HttpService; - -use crate::upgrade::{OnUpgrade, Pending, Upgraded}; -use crate::{Body, Response}; - -// Our defaults are chosen for the "majority" case, which usually are not -// resource constrained, and so the spec default of 64kb can be too limiting -// for performance. -// -// At the same time, a server more often has multiple clients connected, and -// so is more likely to use more resources than a client would. -const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024; // 1mb -const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb -const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb -const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 400; // 400kb -// 16 MB "sane default" taken from golang http2 -const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: u32 = 16 << 20; - -#[derive(Clone, Debug)] -pub(crate) struct Config { - pub(crate) adaptive_window: bool, - pub(crate) initial_conn_window_size: u32, - pub(crate) initial_stream_window_size: u32, - pub(crate) max_frame_size: u32, - pub(crate) enable_connect_protocol: bool, - pub(crate) max_concurrent_streams: Option, - pub(crate) max_pending_accept_reset_streams: Option, - #[cfg(feature = "runtime")] - pub(crate) keep_alive_interval: Option, - #[cfg(feature = "runtime")] - pub(crate) keep_alive_timeout: Duration, - pub(crate) max_send_buffer_size: usize, - pub(crate) max_header_list_size: u32, -} - -impl Default for Config { - fn default() -> Config { - Config { - adaptive_window: false, - initial_conn_window_size: DEFAULT_CONN_WINDOW, - initial_stream_window_size: DEFAULT_STREAM_WINDOW, - max_frame_size: DEFAULT_MAX_FRAME_SIZE, - enable_connect_protocol: false, - max_concurrent_streams: None, - max_pending_accept_reset_streams: None, - #[cfg(feature = "runtime")] - keep_alive_interval: None, - #[cfg(feature = "runtime")] - keep_alive_timeout: Duration::from_secs(20), - max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE, - max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE, - } - } -} - -pin_project! { - pub(crate) struct Server - where - S: HttpService, - B: HttpBody, - { - exec: E, - service: S, - state: State, - } -} - -enum State -where - B: HttpBody, -{ - Handshaking { - ping_config: ping::Config, - hs: Handshake>, - }, - Serving(Serving), - Closed, -} - -struct Serving -where - B: HttpBody, -{ - ping: Option<(ping::Recorder, ping::Ponger)>, - conn: Connection>, - closing: Option, -} - -impl Server -where - T: AsyncRead + AsyncWrite + Unpin, - S: HttpService, - S::Error: Into>, - B: HttpBody + 'static, - E: ConnStreamExec, -{ - pub(crate) fn new(io: T, service: S, config: &Config, exec: E) -> Server { - let mut builder = h2::server::Builder::default(); - builder - .initial_window_size(config.initial_stream_window_size) - .initial_connection_window_size(config.initial_conn_window_size) - .max_frame_size(config.max_frame_size) - .max_header_list_size(config.max_header_list_size) - .max_send_buffer_size(config.max_send_buffer_size); - if let Some(max) = config.max_concurrent_streams { - builder.max_concurrent_streams(max); - } - if let Some(max) = config.max_pending_accept_reset_streams { - builder.max_pending_accept_reset_streams(max); - } - if config.enable_connect_protocol { - builder.enable_connect_protocol(); - } - let handshake = builder.handshake(io); - - let bdp = if config.adaptive_window { - Some(config.initial_stream_window_size) - } else { - None - }; - - let ping_config = ping::Config { - bdp_initial_window: bdp, - #[cfg(feature = "runtime")] - keep_alive_interval: config.keep_alive_interval, - #[cfg(feature = "runtime")] - keep_alive_timeout: config.keep_alive_timeout, - // If keep-alive is enabled for servers, always enabled while - // idle, so it can more aggressively close dead connections. - #[cfg(feature = "runtime")] - keep_alive_while_idle: true, - }; - - Server { - exec, - state: State::Handshaking { - ping_config, - hs: handshake, - }, - service, - } - } - - pub(crate) fn graceful_shutdown(&mut self) { - trace!("graceful_shutdown"); - match self.state { - State::Handshaking { .. } => { - // fall-through, to replace state with Closed - } - State::Serving(ref mut srv) => { - if srv.closing.is_none() { - srv.conn.graceful_shutdown(); - } - return; - } - State::Closed => { - return; - } - } - self.state = State::Closed; - } -} - -impl Future for Server -where - T: AsyncRead + AsyncWrite + Unpin, - S: HttpService, - S::Error: Into>, - B: HttpBody + 'static, - E: ConnStreamExec, -{ - type Output = crate::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let me = &mut *self; - loop { - let next = match me.state { - State::Handshaking { - ref mut hs, - ref ping_config, - } => { - let mut conn = ready!(Pin::new(hs).poll(cx).map_err(crate::Error::new_h2))?; - let ping = if ping_config.is_enabled() { - let pp = conn.ping_pong().expect("conn.ping_pong"); - Some(ping::channel(pp, ping_config.clone())) - } else { - None - }; - State::Serving(Serving { - ping, - conn, - closing: None, - }) - } - State::Serving(ref mut srv) => { - ready!(srv.poll_server(cx, &mut me.service, &mut me.exec))?; - return Poll::Ready(Ok(Dispatched::Shutdown)); - } - State::Closed => { - // graceful_shutdown was called before handshaking finished, - // nothing to do here... - return Poll::Ready(Ok(Dispatched::Shutdown)); - } - }; - me.state = next; - } - } -} - -impl Serving -where - T: AsyncRead + AsyncWrite + Unpin, - B: HttpBody + 'static, -{ - fn poll_server( - &mut self, - cx: &mut task::Context<'_>, - service: &mut S, - exec: &mut E, - ) -> Poll> - where - S: HttpService, - S::Error: Into>, - E: ConnStreamExec, - { - if self.closing.is_none() { - loop { - self.poll_ping(cx); - - // Check that the service is ready to accept a new request. - // - // - If not, just drive the connection some. - // - If ready, try to accept a new request from the connection. - match service.poll_ready(cx) { - Poll::Ready(Ok(())) => (), - Poll::Pending => { - // use `poll_closed` instead of `poll_accept`, - // in order to avoid accepting a request. - ready!(self.conn.poll_closed(cx).map_err(crate::Error::new_h2))?; - trace!("incoming connection complete"); - return Poll::Ready(Ok(())); - } - Poll::Ready(Err(err)) => { - let err = crate::Error::new_user_service(err); - debug!("service closed: {}", err); - - let reason = err.h2_reason(); - if reason == Reason::NO_ERROR { - // NO_ERROR is only used for graceful shutdowns... - trace!("interpreting NO_ERROR user error as graceful_shutdown"); - self.conn.graceful_shutdown(); - } else { - trace!("abruptly shutting down with {:?}", reason); - self.conn.abrupt_shutdown(reason); - } - self.closing = Some(err); - break; - } - } - - // When the service is ready, accepts an incoming request. - match ready!(self.conn.poll_accept(cx)) { - Some(Ok((req, mut respond))) => { - trace!("incoming request"); - let content_length = headers::content_length_parse_all(req.headers()); - let ping = self - .ping - .as_ref() - .map(|ping| ping.0.clone()) - .unwrap_or_else(ping::disabled); - - // Record the headers received - ping.record_non_data(); - - let is_connect = req.method() == Method::CONNECT; - let (mut parts, stream) = req.into_parts(); - let (mut req, connect_parts) = if !is_connect { - ( - Request::from_parts( - parts, - crate::Body::h2(stream, content_length.into(), ping), - ), - None, - ) - } else { - if content_length.map_or(false, |len| len != 0) { - warn!("h2 connect request with non-zero body not supported"); - respond.send_reset(h2::Reason::INTERNAL_ERROR); - return Poll::Ready(Ok(())); - } - let (pending, upgrade) = crate::upgrade::pending(); - debug_assert!(parts.extensions.get::().is_none()); - parts.extensions.insert(upgrade); - ( - Request::from_parts(parts, crate::Body::empty()), - Some(ConnectParts { - pending, - ping, - recv_stream: stream, - }), - ) - }; - - if let Some(protocol) = req.extensions_mut().remove::() { - req.extensions_mut().insert(Protocol::from_inner(protocol)); - } - - let fut = H2Stream::new(service.call(req), connect_parts, respond); - exec.execute_h2stream(fut); - } - Some(Err(e)) => { - return Poll::Ready(Err(crate::Error::new_h2(e))); - } - None => { - // no more incoming streams... - if let Some((ref ping, _)) = self.ping { - ping.ensure_not_timed_out()?; - } - - trace!("incoming connection complete"); - return Poll::Ready(Ok(())); - } - } - } - } - - debug_assert!( - self.closing.is_some(), - "poll_server broke loop without closing" - ); - - ready!(self.conn.poll_closed(cx).map_err(crate::Error::new_h2))?; - - Poll::Ready(Err(self.closing.take().expect("polled after error"))) - } - - fn poll_ping(&mut self, cx: &mut task::Context<'_>) { - if let Some((_, ref mut estimator)) = self.ping { - match estimator.poll(cx) { - Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => { - self.conn.set_target_window_size(wnd); - let _ = self.conn.set_initial_window_size(wnd); - } - #[cfg(feature = "runtime")] - Poll::Ready(ping::Ponged::KeepAliveTimedOut) => { - debug!("keep-alive timed out, closing connection"); - self.conn.abrupt_shutdown(h2::Reason::NO_ERROR); - } - Poll::Pending => {} - } - } - } -} - -pin_project! { - #[allow(missing_debug_implementations)] - pub struct H2Stream - where - B: HttpBody, - { - reply: SendResponse>, - #[pin] - state: H2StreamState, - } -} - -pin_project! { - #[project = H2StreamStateProj] - enum H2StreamState - where - B: HttpBody, - { - Service { - #[pin] - fut: F, - connect_parts: Option, - }, - Body { - #[pin] - pipe: PipeToSendStream, - }, - } -} - -struct ConnectParts { - pending: Pending, - ping: Recorder, - recv_stream: RecvStream, -} - -impl H2Stream -where - B: HttpBody, -{ - fn new( - fut: F, - connect_parts: Option, - respond: SendResponse>, - ) -> H2Stream { - H2Stream { - reply: respond, - state: H2StreamState::Service { fut, connect_parts }, - } - } -} - -macro_rules! reply { - ($me:expr, $res:expr, $eos:expr) => {{ - match $me.reply.send_response($res, $eos) { - Ok(tx) => tx, - Err(e) => { - debug!("send response error: {}", e); - $me.reply.send_reset(Reason::INTERNAL_ERROR); - return Poll::Ready(Err(crate::Error::new_h2(e))); - } - } - }}; -} - -impl H2Stream -where - F: Future, E>>, - B: HttpBody, - B::Data: 'static, - B::Error: Into>, - E: Into>, -{ - fn poll2(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - let mut me = self.project(); - loop { - let next = match me.state.as_mut().project() { - H2StreamStateProj::Service { - fut: h, - connect_parts, - } => { - let res = match h.poll(cx) { - Poll::Ready(Ok(r)) => r, - Poll::Pending => { - // Response is not yet ready, so we want to check if the client has sent a - // RST_STREAM frame which would cancel the current request. - if let Poll::Ready(reason) = - me.reply.poll_reset(cx).map_err(crate::Error::new_h2)? - { - debug!("stream received RST_STREAM: {:?}", reason); - return Poll::Ready(Err(crate::Error::new_h2(reason.into()))); - } - return Poll::Pending; - } - Poll::Ready(Err(e)) => { - let err = crate::Error::new_user_service(e); - warn!("http2 service errored: {}", err); - me.reply.send_reset(err.h2_reason()); - return Poll::Ready(Err(err)); - } - }; - - let (head, body) = res.into_parts(); - let mut res = ::http::Response::from_parts(head, ()); - super::strip_connection_headers(res.headers_mut(), false); - - // set Date header if it isn't already set... - res.headers_mut() - .entry(::http::header::DATE) - .or_insert_with(date::update_and_header_value); - - if let Some(connect_parts) = connect_parts.take() { - if res.status().is_success() { - if headers::content_length_parse_all(res.headers()) - .map_or(false, |len| len != 0) - { - warn!("h2 successful response to CONNECT request with body not supported"); - me.reply.send_reset(h2::Reason::INTERNAL_ERROR); - return Poll::Ready(Err(crate::Error::new_user_header())); - } - let send_stream = reply!(me, res, false); - connect_parts.pending.fulfill(Upgraded::new( - H2Upgraded { - ping: connect_parts.ping, - recv_stream: connect_parts.recv_stream, - send_stream: unsafe { UpgradedSendStream::new(send_stream) }, - buf: Bytes::new(), - }, - Bytes::new(), - )); - return Poll::Ready(Ok(())); - } - } - - if !body.is_end_stream() { - // automatically set Content-Length from body... - if let Some(len) = body.size_hint().exact() { - headers::set_content_length_if_missing(res.headers_mut(), len); - } - - let body_tx = reply!(me, res, false); - H2StreamState::Body { - pipe: PipeToSendStream::new(body, body_tx), - } - } else { - reply!(me, res, true); - return Poll::Ready(Ok(())); - } - } - H2StreamStateProj::Body { pipe } => { - return pipe.poll(cx); - } - }; - me.state.set(next); - } - } -} - -impl Future for H2Stream -where - F: Future, E>>, - B: HttpBody, - B::Data: 'static, - B::Error: Into>, - E: Into>, -{ - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - self.poll2(cx).map(|res| { - if let Err(e) = res { - debug!("stream error: {}", e); - } - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/proto/mod.rs s390-tools-2.33.1/rust-vendor/hyper/src/proto/mod.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/proto/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/proto/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,71 +0,0 @@ -//! Pieces pertaining to the HTTP message protocol. - -cfg_feature! { - #![feature = "http1"] - - pub(crate) mod h1; - - pub(crate) use self::h1::Conn; - - #[cfg(feature = "client")] - pub(crate) use self::h1::dispatch; - #[cfg(feature = "server")] - pub(crate) use self::h1::ServerTransaction; -} - -#[cfg(feature = "http2")] -pub(crate) mod h2; - -/// An Incoming Message head. Includes request/status line, and headers. -#[derive(Debug, Default)] -pub(crate) struct MessageHead { - /// HTTP version of the message. - pub(crate) version: http::Version, - /// Subject (request line or status line) of Incoming message. - pub(crate) subject: S, - /// Headers of the Incoming message. - pub(crate) headers: http::HeaderMap, - /// Extensions. - extensions: http::Extensions, -} - -/// An incoming request message. -#[cfg(feature = "http1")] -pub(crate) type RequestHead = MessageHead; - -#[derive(Debug, Default, PartialEq)] -#[cfg(feature = "http1")] -pub(crate) struct RequestLine(pub(crate) http::Method, pub(crate) http::Uri); - -/// An incoming response message. -#[cfg(all(feature = "http1", feature = "client"))] -pub(crate) type ResponseHead = MessageHead; - -#[derive(Debug)] -#[cfg(feature = "http1")] -pub(crate) enum BodyLength { - /// Content-Length - Known(u64), - /// Transfer-Encoding: chunked (if h1) - Unknown, -} - -/// Status of when a Disaptcher future completes. -pub(crate) enum Dispatched { - /// Dispatcher completely shutdown connection. - Shutdown, - /// Dispatcher has pending upgrade, and so did not shutdown. - #[cfg(feature = "http1")] - Upgrade(crate::upgrade::Pending), -} - -impl MessageHead { - fn into_response(self, body: B) -> http::Response { - let mut res = http::Response::new(body); - *res.status_mut() = self.subject; - *res.headers_mut() = self.headers; - *res.version_mut() = self.version; - *res.extensions_mut() = self.extensions; - res - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/rt.rs s390-tools-2.33.1/rust-vendor/hyper/src/rt.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/rt.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/rt.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -//! Runtime components -//! -//! By default, hyper includes the [tokio](https://tokio.rs) runtime. -//! -//! If the `runtime` feature is disabled, the types in this module can be used -//! to plug in other runtimes. - -/// An executor of futures. -pub trait Executor { - /// Place the future into the executor to be run. - fn execute(&self, fut: Fut); -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/server/accept.rs s390-tools-2.33.1/rust-vendor/hyper/src/server/accept.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/server/accept.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/server/accept.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,111 +0,0 @@ -//! The `Accept` trait and supporting types. -//! -//! This module contains: -//! -//! - The [`Accept`](Accept) trait used to asynchronously accept incoming -//! connections. -//! - Utilities like `poll_fn` to ease creating a custom `Accept`. - -#[cfg(feature = "stream")] -use futures_core::Stream; -#[cfg(feature = "stream")] -use pin_project_lite::pin_project; - -use crate::common::{ - task::{self, Poll}, - Pin, -}; - -/// Asynchronously accept incoming connections. -pub trait Accept { - /// The connection type that can be accepted. - type Conn; - /// The error type that can occur when accepting a connection. - type Error; - - /// Poll to accept the next connection. - fn poll_accept( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll>>; -} - -/// Create an `Accept` with a polling function. -/// -/// # Example -/// -/// ``` -/// use std::task::Poll; -/// use hyper::server::{accept, Server}; -/// -/// # let mock_conn = (); -/// // If we created some mocked connection... -/// let mut conn = Some(mock_conn); -/// -/// // And accept just the mocked conn once... -/// let once = accept::poll_fn(move |cx| { -/// Poll::Ready(conn.take().map(Ok::<_, ()>)) -/// }); -/// -/// let builder = Server::builder(once); -/// ``` -pub fn poll_fn(func: F) -> impl Accept -where - F: FnMut(&mut task::Context<'_>) -> Poll>>, -{ - struct PollFn(F); - - // The closure `F` is never pinned - impl Unpin for PollFn {} - - impl Accept for PollFn - where - F: FnMut(&mut task::Context<'_>) -> Poll>>, - { - type Conn = IO; - type Error = E; - fn poll_accept( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll>> { - (self.get_mut().0)(cx) - } - } - - PollFn(func) -} - -/// Adapt a `Stream` of incoming connections into an `Accept`. -/// -/// # Optional -/// -/// This function requires enabling the `stream` feature in your -/// `Cargo.toml`. -#[cfg(feature = "stream")] -pub fn from_stream(stream: S) -> impl Accept -where - S: Stream>, -{ - pin_project! { - struct FromStream { - #[pin] - stream: S, - } - } - - impl Accept for FromStream - where - S: Stream>, - { - type Conn = IO; - type Error = E; - fn poll_accept( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll>> { - self.project().stream.poll_next(cx) - } - } - - FromStream { stream } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/server/conn/http1.rs s390-tools-2.33.1/rust-vendor/hyper/src/server/conn/http1.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/server/conn/http1.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/server/conn/http1.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,446 +0,0 @@ -//! HTTP/1 Server Connections - -use std::error::Error as StdError; -use std::fmt; -use std::time::Duration; - -use bytes::Bytes; -use tokio::io::{AsyncRead, AsyncWrite}; - -use crate::body::{Body as IncomingBody, HttpBody as Body}; -use crate::common::{task, Future, Pin, Poll, Unpin}; -use crate::proto; -use crate::service::HttpService; - -type Http1Dispatcher = proto::h1::Dispatcher< - proto::h1::dispatch::Server, - B, - T, - proto::ServerTransaction, ->; - -pin_project_lite::pin_project! { - /// A future binding an http1 connection with a Service. - /// - /// Polling this future will drive HTTP forward. - #[must_use = "futures do nothing unless polled"] - pub struct Connection - where - S: HttpService, - { - conn: Http1Dispatcher, - } -} - -/// A configuration builder for HTTP/1 server connections. -#[derive(Clone, Debug)] -pub struct Builder { - h1_half_close: bool, - h1_keep_alive: bool, - h1_title_case_headers: bool, - h1_preserve_header_case: bool, - h1_header_read_timeout: Option, - h1_writev: Option, - max_buf_size: Option, - pipeline_flush: bool, -} - -/// Deconstructed parts of a `Connection`. -/// -/// This allows taking apart a `Connection` at a later time, in order to -/// reclaim the IO object, and additional related pieces. -#[derive(Debug)] -pub struct Parts { - /// The original IO object used in the handshake. - pub io: T, - /// A buffer of bytes that have been read but not processed as HTTP. - /// - /// If the client sent additional bytes after its last request, and - /// this connection "ended" with an upgrade, the read buffer will contain - /// those bytes. - /// - /// You will want to check for any existing bytes if you plan to continue - /// communicating on the IO object. - pub read_buf: Bytes, - /// The `Service` used to serve this connection. - pub service: S, - _inner: (), -} - -// ===== impl Connection ===== - -impl fmt::Debug for Connection -where - S: HttpService, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Connection").finish() - } -} - -impl Connection -where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - B: Body + 'static, - B::Error: Into>, -{ - /// Start a graceful shutdown process for this connection. - /// - /// This `Connection` should continue to be polled until shutdown - /// can finish. - /// - /// # Note - /// - /// This should only be called while the `Connection` future is still - /// pending. If called after `Connection::poll` has resolved, this does - /// nothing. - pub fn graceful_shutdown(mut self: Pin<&mut Self>) { - self.conn.disable_keep_alive(); - } - - /// Return the inner IO object, and additional information. - /// - /// If the IO object has been "rewound" the io will not contain those bytes rewound. - /// This should only be called after `poll_without_shutdown` signals - /// that the connection is "done". Otherwise, it may not have finished - /// flushing all necessary HTTP bytes. - /// - /// # Panics - /// This method will panic if this connection is using an h2 protocol. - pub fn into_parts(self) -> Parts { - let (io, read_buf, dispatch) = self.conn.into_inner(); - Parts { - io, - read_buf, - service: dispatch.into_service(), - _inner: (), - } - } - - /// Poll the connection for completion, but without calling `shutdown` - /// on the underlying IO. - /// - /// This is useful to allow running a connection while doing an HTTP - /// upgrade. Once the upgrade is completed, the connection would be "done", - /// but it is not desired to actually shutdown the IO object. Instead you - /// would take it back using `into_parts`. - pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> - where - S: Unpin, - S::Future: Unpin, - B: Unpin, - { - self.conn.poll_without_shutdown(cx) - } - - /// Prevent shutdown of the underlying IO object at the end of service the request, - /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. - /// - /// # Error - /// - /// This errors if the underlying connection protocol is not HTTP/1. - pub fn without_shutdown(self) -> impl Future>> - where - S: Unpin, - S::Future: Unpin, - B: Unpin, - { - let mut zelf = Some(self); - futures_util::future::poll_fn(move |cx| { - ready!(zelf.as_mut().unwrap().conn.poll_without_shutdown(cx))?; - Poll::Ready(Ok(zelf.take().unwrap().into_parts())) - }) - } - - /// Enable this connection to support higher-level HTTP upgrades. - /// - /// See [the `upgrade` module](crate::upgrade) for more. - pub fn with_upgrades(self) -> upgrades::UpgradeableConnection - where - I: Send, - { - upgrades::UpgradeableConnection { inner: Some(self) } - } -} - -impl Future for Connection -where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + 'static, - B: Body + 'static, - B::Error: Into>, -{ - type Output = crate::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match ready!(Pin::new(&mut self.conn).poll(cx)) { - Ok(done) => { - match done { - proto::Dispatched::Shutdown => {} - proto::Dispatched::Upgrade(pending) => { - // With no `Send` bound on `I`, we can't try to do - // upgrades here. In case a user was trying to use - // `Body::on_upgrade` with this API, send a special - // error letting them know about that. - pending.manual(); - } - }; - return Poll::Ready(Ok(())); - } - Err(e) => Poll::Ready(Err(e)), - } - } -} - -// ===== impl Builder ===== - -impl Builder { - /// Create a new connection builder. - pub fn new() -> Self { - Self { - h1_half_close: false, - h1_keep_alive: true, - h1_title_case_headers: false, - h1_preserve_header_case: false, - h1_header_read_timeout: None, - h1_writev: None, - max_buf_size: None, - pipeline_flush: false, - } - } - /// Set whether HTTP/1 connections should support half-closures. - /// - /// Clients can chose to shutdown their write-side while waiting - /// for the server to respond. Setting this to `true` will - /// prevent closing the connection immediately if `read` - /// detects an EOF in the middle of a request. - /// - /// Default is `false`. - pub fn half_close(&mut self, val: bool) -> &mut Self { - self.h1_half_close = val; - self - } - - /// Enables or disables HTTP/1 keep-alive. - /// - /// Default is true. - pub fn keep_alive(&mut self, val: bool) -> &mut Self { - self.h1_keep_alive = val; - self - } - - /// Set whether HTTP/1 connections will write header names as title case at - /// the socket level. - /// - /// Default is false. - pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self { - self.h1_title_case_headers = enabled; - self - } - - /// Set whether to support preserving original header cases. - /// - /// Currently, this will record the original cases received, and store them - /// in a private extension on the `Request`. It will also look for and use - /// such an extension in any provided `Response`. - /// - /// Since the relevant extension is still private, there is no way to - /// interact with the original cases. The only effect this can have now is - /// to forward the cases in a proxy-like fashion. - /// - /// Default is false. - pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self { - self.h1_preserve_header_case = enabled; - self - } - - /// Set a timeout for reading client request headers. If a client does not - /// transmit the entire header within this time, the connection is closed. - /// - /// Default is None. - pub fn header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self { - self.h1_header_read_timeout = Some(read_timeout); - self - } - - /// Set whether HTTP/1 connections should try to use vectored writes, - /// or always flatten into a single buffer. - /// - /// Note that setting this to false may mean more copies of body data, - /// but may also improve performance when an IO transport doesn't - /// support vectored writes well, such as most TLS implementations. - /// - /// Setting this to true will force hyper to use queued strategy - /// which may eliminate unnecessary cloning on some TLS backends - /// - /// Default is `auto`. In this mode hyper will try to guess which - /// mode to use - pub fn writev(&mut self, val: bool) -> &mut Self { - self.h1_writev = Some(val); - self - } - - /// Set the maximum buffer size for the connection. - /// - /// Default is ~400kb. - /// - /// # Panics - /// - /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. - pub fn max_buf_size(&mut self, max: usize) -> &mut Self { - assert!( - max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, - "the max_buf_size cannot be smaller than the minimum that h1 specifies." - ); - self.max_buf_size = Some(max); - self - } - - /// Aggregates flushes to better support pipelined responses. - /// - /// Experimental, may have bugs. - /// - /// Default is false. - pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { - self.pipeline_flush = enabled; - self - } - - // /// Set the timer used in background tasks. - // pub fn timer(&mut self, timer: M) -> &mut Self - // where - // M: Timer + Send + Sync + 'static, - // { - // self.timer = Time::Timer(Arc::new(timer)); - // self - // } - - /// Bind a connection together with a [`Service`](crate::service::Service). - /// - /// This returns a Future that must be polled in order for HTTP to be - /// driven on the connection. - /// - /// # Example - /// - /// ``` - /// # use hyper::{Body as Incoming, Request, Response}; - /// # use hyper::service::Service; - /// # use hyper::server::conn::http1::Builder; - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # async fn run(some_io: I, some_service: S) - /// # where - /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static, - /// # S: Service, Response=hyper::Response> + Send + 'static, - /// # S::Error: Into>, - /// # S::Future: Send, - /// # { - /// let http = Builder::new(); - /// let conn = http.serve_connection(some_io, some_service); - /// - /// if let Err(e) = conn.await { - /// eprintln!("server connection error: {}", e); - /// } - /// # } - /// # fn main() {} - /// ``` - pub fn serve_connection(&self, io: I, service: S) -> Connection - where - S: HttpService, - S::Error: Into>, - S::ResBody: 'static, - ::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - { - let mut conn = proto::Conn::new(io); - if !self.h1_keep_alive { - conn.disable_keep_alive(); - } - if self.h1_half_close { - conn.set_allow_half_close(); - } - if self.h1_title_case_headers { - conn.set_title_case_headers(); - } - if self.h1_preserve_header_case { - conn.set_preserve_header_case(); - } - if let Some(header_read_timeout) = self.h1_header_read_timeout { - conn.set_http1_header_read_timeout(header_read_timeout); - } - if let Some(writev) = self.h1_writev { - if writev { - conn.set_write_strategy_queue(); - } else { - conn.set_write_strategy_flatten(); - } - } - conn.set_flush_pipeline(self.pipeline_flush); - if let Some(max) = self.max_buf_size { - conn.set_max_buf_size(max); - } - let sd = proto::h1::dispatch::Server::new(service); - let proto = proto::h1::Dispatcher::new(sd, conn); - Connection { conn: proto } - } -} - -mod upgrades { - use crate::upgrade::Upgraded; - - use super::*; - - // A future binding a connection with a Service with Upgrade support. - // - // This type is unnameable outside the crate. - #[must_use = "futures do nothing unless polled"] - #[allow(missing_debug_implementations)] - pub struct UpgradeableConnection - where - S: HttpService, - { - pub(super) inner: Option>, - } - - impl UpgradeableConnection - where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - B: Body + 'static, - B::Error: Into>, - { - /// Start a graceful shutdown process for this connection. - /// - /// This `Connection` should continue to be polled until shutdown - /// can finish. - pub fn graceful_shutdown(mut self: Pin<&mut Self>) { - Pin::new(self.inner.as_mut().unwrap()).graceful_shutdown() - } - } - - impl Future for UpgradeableConnection - where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + 'static, - B::Error: Into>, - { - type Output = crate::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match ready!(Pin::new(&mut self.inner.as_mut().unwrap().conn).poll(cx)) { - Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())), - Ok(proto::Dispatched::Upgrade(pending)) => { - let (io, buf, _) = self.inner.take().unwrap().conn.into_inner(); - pending.fulfill(Upgraded::new(io, buf)); - Poll::Ready(Ok(())) - } - Err(e) => Poll::Ready(Err(e)), - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/server/conn/http2.rs s390-tools-2.33.1/rust-vendor/hyper/src/server/conn/http2.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/server/conn/http2.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/server/conn/http2.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,257 +0,0 @@ -//! HTTP/2 Server Connections - -use std::error::Error as StdError; -use std::fmt; -use std::time::Duration; - -use pin_project_lite::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; - -use crate::body::{Body as IncomingBody, HttpBody as Body}; -use crate::common::exec::ConnStreamExec; -use crate::common::{task, Future, Pin, Poll, Unpin}; -use crate::proto; -use crate::service::HttpService; - -pin_project! { - /// A future binding an HTTP/2 connection with a Service. - /// - /// Polling this future will drive HTTP forward. - #[must_use = "futures do nothing unless polled"] - pub struct Connection - where - S: HttpService, - { - conn: proto::h2::Server, - } -} - -/// A configuration builder for HTTP/2 server connections. -#[derive(Clone, Debug)] -pub struct Builder { - exec: E, - h2_builder: proto::h2::server::Config, -} - -// ===== impl Connection ===== - -impl fmt::Debug for Connection -where - S: HttpService, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Connection").finish() - } -} - -impl Connection -where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - B: Body + 'static, - B::Error: Into>, - E: ConnStreamExec, -{ - /// Start a graceful shutdown process for this connection. - /// - /// This `Connection` should continue to be polled until shutdown - /// can finish. - /// - /// # Note - /// - /// This should only be called while the `Connection` future is still - /// pending. If called after `Connection::poll` has resolved, this does - /// nothing. - pub fn graceful_shutdown(mut self: Pin<&mut Self>) { - self.conn.graceful_shutdown(); - } -} - -impl Future for Connection -where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + 'static, - B: Body + 'static, - B::Error: Into>, - E: ConnStreamExec, -{ - type Output = crate::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match ready!(Pin::new(&mut self.conn).poll(cx)) { - Ok(_done) => { - //TODO: the proto::h2::Server no longer needs to return - //the Dispatched enum - Poll::Ready(Ok(())) - } - Err(e) => Poll::Ready(Err(e)), - } - } -} - -// ===== impl Builder ===== - -impl Builder { - /// Create a new connection builder. - /// - /// This starts with the default options, and an executor. - pub fn new(exec: E) -> Self { - Self { - exec: exec, - h2_builder: Default::default(), - } - } - - /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 - /// stream-level flow control. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE - pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.adaptive_window = false; - self.h2_builder.initial_stream_window_size = sz; - } - self - } - - /// Sets the max connection-level flow control for HTTP2. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.adaptive_window = false; - self.h2_builder.initial_conn_window_size = sz; - } - self - } - - /// Sets whether to use an adaptive flow control. - /// - /// Enabling this will override the limits set in - /// `initial_stream_window_size` and - /// `initial_connection_window_size`. - pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { - use proto::h2::SPEC_WINDOW_SIZE; - - self.h2_builder.adaptive_window = enabled; - if enabled { - self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; - self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; - } - self - } - - /// Sets the maximum frame size to use for HTTP2. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.max_frame_size = sz; - } - self - } - - /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 - /// connections. - /// - /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS - pub fn max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { - self.h2_builder.max_concurrent_streams = max.into(); - self - } - - /// Sets an interval for HTTP2 Ping frames should be sent to keep a - /// connection alive. - /// - /// Pass `None` to disable HTTP2 keep-alive. - /// - /// Default is currently disabled. - /// - /// # Cargo Feature - /// - pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { - self.h2_builder.keep_alive_interval = interval.into(); - self - } - - /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. - /// - /// If the ping is not acknowledged within the timeout, the connection will - /// be closed. Does nothing if `keep_alive_interval` is disabled. - /// - /// Default is 20 seconds. - /// - /// # Cargo Feature - /// - pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { - self.h2_builder.keep_alive_timeout = timeout; - self - } - - /// Set the maximum write buffer size for each HTTP/2 stream. - /// - /// Default is currently ~400KB, but may change. - /// - /// # Panics - /// - /// The value must be no larger than `u32::MAX`. - pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { - assert!(max <= std::u32::MAX as usize); - self.h2_builder.max_send_buffer_size = max; - self - } - - /// Enables the [extended CONNECT protocol]. - /// - /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 - pub fn enable_connect_protocol(&mut self) -> &mut Self { - self.h2_builder.enable_connect_protocol = true; - self - } - - /// Sets the max size of received header frames. - /// - /// Default is currently ~16MB, but may change. - pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { - self.h2_builder.max_header_list_size = max; - self - } - - // /// Set the timer used in background tasks. - // pub fn timer(&mut self, timer: M) -> &mut Self - // where - // M: Timer + Send + Sync + 'static, - // { - // self.timer = Time::Timer(Arc::new(timer)); - // self - // } - - /// Bind a connection together with a [`Service`](crate::service::Service). - /// - /// This returns a Future that must be polled in order for HTTP to be - /// driven on the connection. - pub fn serve_connection(&self, io: I, service: S) -> Connection - where - S: HttpService, - S::Error: Into>, - Bd: Body + 'static, - Bd::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - E: ConnStreamExec, - { - let proto = proto::h2::Server::new(io, service, &self.h2_builder, self.exec.clone()); - Connection { conn: proto } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/server/conn.rs s390-tools-2.33.1/rust-vendor/hyper/src/server/conn.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/server/conn.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/server/conn.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1079 +0,0 @@ -//! Lower-level Server connection API. -//! -//! The types in this module are to provide a lower-level API based around a -//! single connection. Accepting a connection and binding it with a service -//! are not handled at this level. This module provides the building blocks to -//! customize those things externally. -//! -//! If you don't have need to manage connections yourself, consider using the -//! higher-level [Server](super) API. -//! -//! ## Example -//! A simple example that uses the `Http` struct to talk HTTP over a Tokio TCP stream -//! ```no_run -//! # #[cfg(all(feature = "http1", feature = "runtime"))] -//! # mod rt { -//! use http::{Request, Response, StatusCode}; -//! use hyper::{server::conn::Http, service::service_fn, Body}; -//! use std::{net::SocketAddr, convert::Infallible}; -//! use tokio::net::TcpListener; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let addr: SocketAddr = ([127, 0, 0, 1], 8080).into(); -//! -//! let mut tcp_listener = TcpListener::bind(addr).await?; -//! loop { -//! let (tcp_stream, _) = tcp_listener.accept().await?; -//! tokio::task::spawn(async move { -//! if let Err(http_err) = Http::new() -//! .http1_only(true) -//! .http1_keep_alive(true) -//! .serve_connection(tcp_stream, service_fn(hello)) -//! .await { -//! eprintln!("Error while serving HTTP connection: {}", http_err); -//! } -//! }); -//! } -//! } -//! -//! async fn hello(_req: Request) -> Result, Infallible> { -//! Ok(Response::new(Body::from("Hello World!"))) -//! } -//! # } -//! ``` - -#[cfg(all( - any(feature = "http1", feature = "http2"), - not(all(feature = "http1", feature = "http2")) -))] -use std::marker::PhantomData; -#[cfg(all(any(feature = "http1", feature = "http2"), feature = "runtime"))] -use std::time::Duration; - -#[cfg(feature = "http2")] -use crate::common::io::Rewind; -#[cfg(all(feature = "http1", feature = "http2"))] -use crate::error::{Kind, Parse}; -#[cfg(feature = "http1")] -use crate::upgrade::Upgraded; - -#[cfg(all(feature = "backports", feature = "http1"))] -pub mod http1; -#[cfg(all(feature = "backports", feature = "http2"))] -pub mod http2; - -cfg_feature! { - #![any(feature = "http1", feature = "http2")] - - use std::error::Error as StdError; - use std::fmt; - - use bytes::Bytes; - use pin_project_lite::pin_project; - use tokio::io::{AsyncRead, AsyncWrite}; - use tracing::trace; - - pub use super::server::Connecting; - use crate::body::{Body, HttpBody}; - use crate::common::{task, Future, Pin, Poll, Unpin}; - #[cfg(not(all(feature = "http1", feature = "http2")))] - use crate::common::Never; - use crate::common::exec::{ConnStreamExec, Exec}; - use crate::proto; - use crate::service::HttpService; - - pub(super) use self::upgrades::UpgradeableConnection; -} - -#[cfg(feature = "tcp")] -pub use super::tcp::{AddrIncoming, AddrStream}; - -/// A lower-level configuration of the HTTP protocol. -/// -/// This structure is used to configure options for an HTTP server connection. -/// -/// If you don't have need to manage connections yourself, consider using the -/// higher-level [Server](super) API. -#[derive(Clone, Debug)] -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] -#[cfg_attr( - feature = "deprecated", - deprecated( - note = "This struct will be replaced with `server::conn::http1::Builder` and `server::conn::http2::Builder` in 1.0, enable the \"backports\" feature to use them now." - ) -)] -pub struct Http { - pub(crate) exec: E, - h1_half_close: bool, - h1_keep_alive: bool, - h1_title_case_headers: bool, - h1_preserve_header_case: bool, - #[cfg(all(feature = "http1", feature = "runtime"))] - h1_header_read_timeout: Option, - h1_writev: Option, - #[cfg(feature = "http2")] - h2_builder: proto::h2::server::Config, - mode: ConnectionMode, - max_buf_size: Option, - pipeline_flush: bool, -} - -/// The internal mode of HTTP protocol which indicates the behavior when a parse error occurs. -#[cfg(any(feature = "http1", feature = "http2"))] -#[derive(Clone, Debug, PartialEq)] -enum ConnectionMode { - /// Always use HTTP/1 and do not upgrade when a parse error occurs. - #[cfg(feature = "http1")] - H1Only, - /// Always use HTTP/2. - #[cfg(feature = "http2")] - H2Only, - /// Use HTTP/1 and try to upgrade to h2 when a parse error occurs. - #[cfg(all(feature = "http1", feature = "http2"))] - Fallback, -} - -#[cfg(any(feature = "http1", feature = "http2"))] -pin_project! { - /// A future binding a connection with a Service. - /// - /// Polling this future will drive HTTP forward. - #[must_use = "futures do nothing unless polled"] - #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] - pub struct Connection - where - S: HttpService, - { - pub(super) conn: Option>, - fallback: Fallback, - } -} - -#[cfg(feature = "http1")] -type Http1Dispatcher = - proto::h1::Dispatcher, B, T, proto::ServerTransaction>; - -#[cfg(all(not(feature = "http1"), feature = "http2"))] -type Http1Dispatcher = (Never, PhantomData<(T, Box>, Box>)>); - -#[cfg(feature = "http2")] -type Http2Server = proto::h2::Server, S, B, E>; - -#[cfg(all(not(feature = "http2"), feature = "http1"))] -type Http2Server = ( - Never, - PhantomData<(T, Box>, Box>, Box>)>, -); - -#[cfg(any(feature = "http1", feature = "http2"))] -pin_project! { - #[project = ProtoServerProj] - pub(super) enum ProtoServer - where - S: HttpService, - B: HttpBody, - { - H1 { - #[pin] - h1: Http1Dispatcher, - }, - H2 { - #[pin] - h2: Http2Server, - }, - } -} - -#[cfg(all(feature = "http1", feature = "http2"))] -#[derive(Clone, Debug)] -enum Fallback { - ToHttp2(proto::h2::server::Config, E), - Http1Only, -} - -#[cfg(all( - any(feature = "http1", feature = "http2"), - not(all(feature = "http1", feature = "http2")) -))] -type Fallback = PhantomData; - -#[cfg(all(feature = "http1", feature = "http2"))] -impl Fallback { - fn to_h2(&self) -> bool { - match *self { - Fallback::ToHttp2(..) => true, - Fallback::Http1Only => false, - } - } -} - -#[cfg(all(feature = "http1", feature = "http2"))] -impl Unpin for Fallback {} - -/// Deconstructed parts of a `Connection`. -/// -/// This allows taking apart a `Connection` at a later time, in order to -/// reclaim the IO object, and additional related pieces. -#[derive(Debug)] -#[cfg(any(feature = "http1", feature = "http2"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] -#[cfg_attr( - feature = "deprecated", - deprecated( - note = "This struct will be replaced with `server::conn::http1::Parts` in 1.0, enable the \"backports\" feature to use them now." - ) -)] -pub struct Parts { - /// The original IO object used in the handshake. - pub io: T, - /// A buffer of bytes that have been read but not processed as HTTP. - /// - /// If the client sent additional bytes after its last request, and - /// this connection "ended" with an upgrade, the read buffer will contain - /// those bytes. - /// - /// You will want to check for any existing bytes if you plan to continue - /// communicating on the IO object. - pub read_buf: Bytes, - /// The `Service` used to serve this connection. - pub service: S, - _inner: (), -} - -// ===== impl Http ===== - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -#[cfg(any(feature = "http1", feature = "http2"))] -impl Http { - /// Creates a new instance of the HTTP protocol, ready to spawn a server or - /// start accepting connections. - pub fn new() -> Http { - Http { - exec: Exec::Default, - h1_half_close: false, - h1_keep_alive: true, - h1_title_case_headers: false, - h1_preserve_header_case: false, - #[cfg(all(feature = "http1", feature = "runtime"))] - h1_header_read_timeout: None, - h1_writev: None, - #[cfg(feature = "http2")] - h2_builder: Default::default(), - mode: ConnectionMode::default(), - max_buf_size: None, - pipeline_flush: false, - } - } -} - -#[cfg_attr(feature = "deprecated", allow(deprecated))] -#[cfg(any(feature = "http1", feature = "http2"))] -impl Http { - /// Sets whether HTTP1 is required. - /// - /// Default is false - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_only(&mut self, val: bool) -> &mut Self { - if val { - self.mode = ConnectionMode::H1Only; - } else { - #[cfg(feature = "http2")] - { - self.mode = ConnectionMode::Fallback; - } - } - self - } - - /// Set whether HTTP/1 connections should support half-closures. - /// - /// Clients can chose to shutdown their write-side while waiting - /// for the server to respond. Setting this to `true` will - /// prevent closing the connection immediately if `read` - /// detects an EOF in the middle of a request. - /// - /// Default is `false`. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_half_close(&mut self, val: bool) -> &mut Self { - self.h1_half_close = val; - self - } - - /// Enables or disables HTTP/1 keep-alive. - /// - /// Default is true. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_keep_alive(&mut self, val: bool) -> &mut Self { - self.h1_keep_alive = val; - self - } - - /// Set whether HTTP/1 connections will write header names as title case at - /// the socket level. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Self { - self.h1_title_case_headers = enabled; - self - } - - /// Set whether to support preserving original header cases. - /// - /// Currently, this will record the original cases received, and store them - /// in a private extension on the `Request`. It will also look for and use - /// such an extension in any provided `Response`. - /// - /// Since the relevant extension is still private, there is no way to - /// interact with the original cases. The only effect this can have now is - /// to forward the cases in a proxy-like fashion. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Self { - self.h1_preserve_header_case = enabled; - self - } - - /// Set a timeout for reading client request headers. If a client does not - /// transmit the entire header within this time, the connection is closed. - /// - /// Default is None. - #[cfg(all(feature = "http1", feature = "runtime"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "http1", feature = "runtime"))))] - pub fn http1_header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self { - self.h1_header_read_timeout = Some(read_timeout); - self - } - - /// Set whether HTTP/1 connections should try to use vectored writes, - /// or always flatten into a single buffer. - /// - /// Note that setting this to false may mean more copies of body data, - /// but may also improve performance when an IO transport doesn't - /// support vectored writes well, such as most TLS implementations. - /// - /// Setting this to true will force hyper to use queued strategy - /// which may eliminate unnecessary cloning on some TLS backends - /// - /// Default is `auto`. In this mode hyper will try to guess which - /// mode to use - #[inline] - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_writev(&mut self, val: bool) -> &mut Self { - self.h1_writev = Some(val); - self - } - - /// Sets whether HTTP2 is required. - /// - /// Default is false - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_only(&mut self, val: bool) -> &mut Self { - if val { - self.mode = ConnectionMode::H2Only; - } else { - #[cfg(feature = "http1")] - { - self.mode = ConnectionMode::Fallback; - } - } - self - } - - /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. - /// - /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). - /// As of v0.3.17, it is 20. - /// - /// See for more information. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_pending_accept_reset_streams( - &mut self, - max: impl Into>, - ) -> &mut Self { - self.h2_builder.max_pending_accept_reset_streams = max.into(); - - self - } - - /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 - /// stream-level flow control. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.adaptive_window = false; - self.h2_builder.initial_stream_window_size = sz; - } - self - } - - /// Sets the max connection-level flow control for HTTP2. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_connection_window_size( - &mut self, - sz: impl Into>, - ) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.adaptive_window = false; - self.h2_builder.initial_conn_window_size = sz; - } - self - } - - /// Sets whether to use an adaptive flow control. - /// - /// Enabling this will override the limits set in - /// `http2_initial_stream_window_size` and - /// `http2_initial_connection_window_size`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { - use proto::h2::SPEC_WINDOW_SIZE; - - self.h2_builder.adaptive_window = enabled; - if enabled { - self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; - self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; - } - self - } - - /// Sets the maximum frame size to use for HTTP2. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { - if let Some(sz) = sz.into() { - self.h2_builder.max_frame_size = sz; - } - self - } - - /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 - /// connections. - /// - /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { - self.h2_builder.max_concurrent_streams = max.into(); - self - } - - /// Sets an interval for HTTP2 Ping frames should be sent to keep a - /// connection alive. - /// - /// Pass `None` to disable HTTP2 keep-alive. - /// - /// Default is currently disabled. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_interval( - &mut self, - interval: impl Into>, - ) -> &mut Self { - self.h2_builder.keep_alive_interval = interval.into(); - self - } - - /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. - /// - /// If the ping is not acknowledged within the timeout, the connection will - /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. - /// - /// Default is 20 seconds. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(feature = "runtime")] - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { - self.h2_builder.keep_alive_timeout = timeout; - self - } - - /// Set the maximum write buffer size for each HTTP/2 stream. - /// - /// Default is currently ~400KB, but may change. - /// - /// # Panics - /// - /// The value must be no larger than `u32::MAX`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { - assert!(max <= std::u32::MAX as usize); - self.h2_builder.max_send_buffer_size = max; - self - } - - /// Enables the [extended CONNECT protocol]. - /// - /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 - #[cfg(feature = "http2")] - pub fn http2_enable_connect_protocol(&mut self) -> &mut Self { - self.h2_builder.enable_connect_protocol = true; - self - } - - /// Sets the max size of received header frames. - /// - /// Default is currently ~16MB, but may change. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_header_list_size(&mut self, max: u32) -> &mut Self { - self.h2_builder.max_header_list_size = max; - self - } - - /// Set the maximum buffer size for the connection. - /// - /// Default is ~400kb. - /// - /// # Panics - /// - /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn max_buf_size(&mut self, max: usize) -> &mut Self { - assert!( - max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, - "the max_buf_size cannot be smaller than the minimum that h1 specifies." - ); - self.max_buf_size = Some(max); - self - } - - /// Aggregates flushes to better support pipelined responses. - /// - /// Experimental, may have bugs. - /// - /// Default is false. - pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { - self.pipeline_flush = enabled; - self - } - - /// Set the executor used to spawn background tasks. - /// - /// Default uses implicit default (like `tokio::spawn`). - pub fn with_executor(self, exec: E2) -> Http { - Http { - exec, - h1_half_close: self.h1_half_close, - h1_keep_alive: self.h1_keep_alive, - h1_title_case_headers: self.h1_title_case_headers, - h1_preserve_header_case: self.h1_preserve_header_case, - #[cfg(all(feature = "http1", feature = "runtime"))] - h1_header_read_timeout: self.h1_header_read_timeout, - h1_writev: self.h1_writev, - #[cfg(feature = "http2")] - h2_builder: self.h2_builder, - mode: self.mode, - max_buf_size: self.max_buf_size, - pipeline_flush: self.pipeline_flush, - } - } - - /// Bind a connection together with a [`Service`](crate::service::Service). - /// - /// This returns a Future that must be polled in order for HTTP to be - /// driven on the connection. - /// - /// # Example - /// - /// ``` - /// # use hyper::{Body, Request, Response}; - /// # use hyper::service::Service; - /// # use hyper::server::conn::Http; - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # async fn run(some_io: I, some_service: S) - /// # where - /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static, - /// # S: Service, Response=hyper::Response> + Send + 'static, - /// # S::Error: Into>, - /// # S::Future: Send, - /// # { - /// let http = Http::new(); - /// let conn = http.serve_connection(some_io, some_service); - /// - /// if let Err(e) = conn.await { - /// eprintln!("server connection error: {}", e); - /// } - /// # } - /// # fn main() {} - /// ``` - pub fn serve_connection(&self, io: I, service: S) -> Connection - where - S: HttpService, - S::Error: Into>, - Bd: HttpBody + 'static, - Bd::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - E: ConnStreamExec, - { - #[cfg(feature = "http1")] - macro_rules! h1 { - () => {{ - let mut conn = proto::Conn::new(io); - if !self.h1_keep_alive { - conn.disable_keep_alive(); - } - if self.h1_half_close { - conn.set_allow_half_close(); - } - if self.h1_title_case_headers { - conn.set_title_case_headers(); - } - if self.h1_preserve_header_case { - conn.set_preserve_header_case(); - } - #[cfg(all(feature = "http1", feature = "runtime"))] - if let Some(header_read_timeout) = self.h1_header_read_timeout { - conn.set_http1_header_read_timeout(header_read_timeout); - } - if let Some(writev) = self.h1_writev { - if writev { - conn.set_write_strategy_queue(); - } else { - conn.set_write_strategy_flatten(); - } - } - conn.set_flush_pipeline(self.pipeline_flush); - if let Some(max) = self.max_buf_size { - conn.set_max_buf_size(max); - } - let sd = proto::h1::dispatch::Server::new(service); - ProtoServer::H1 { - h1: proto::h1::Dispatcher::new(sd, conn), - } - }}; - } - - let proto = match self.mode { - #[cfg(feature = "http1")] - #[cfg(not(feature = "http2"))] - ConnectionMode::H1Only => h1!(), - #[cfg(feature = "http2")] - #[cfg(feature = "http1")] - ConnectionMode::H1Only | ConnectionMode::Fallback => h1!(), - #[cfg(feature = "http2")] - ConnectionMode::H2Only => { - let rewind_io = Rewind::new(io); - let h2 = - proto::h2::Server::new(rewind_io, service, &self.h2_builder, self.exec.clone()); - ProtoServer::H2 { h2 } - } - }; - - Connection { - conn: Some(proto), - #[cfg(all(feature = "http1", feature = "http2"))] - fallback: if self.mode == ConnectionMode::Fallback { - Fallback::ToHttp2(self.h2_builder.clone(), self.exec.clone()) - } else { - Fallback::Http1Only - }, - #[cfg(not(all(feature = "http1", feature = "http2")))] - fallback: PhantomData, - } - } -} - -// ===== impl Connection ===== - -#[cfg(any(feature = "http1", feature = "http2"))] -impl Connection -where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec, -{ - /// Start a graceful shutdown process for this connection. - /// - /// This `Connection` should continue to be polled until shutdown - /// can finish. - /// - /// # Note - /// - /// This should only be called while the `Connection` future is still - /// pending. If called after `Connection::poll` has resolved, this does - /// nothing. - pub fn graceful_shutdown(mut self: Pin<&mut Self>) { - match self.conn { - #[cfg(feature = "http1")] - Some(ProtoServer::H1 { ref mut h1, .. }) => { - h1.disable_keep_alive(); - } - #[cfg(feature = "http2")] - Some(ProtoServer::H2 { ref mut h2 }) => { - h2.graceful_shutdown(); - } - None => (), - - #[cfg(not(feature = "http1"))] - Some(ProtoServer::H1 { ref mut h1, .. }) => match h1.0 {}, - #[cfg(not(feature = "http2"))] - Some(ProtoServer::H2 { ref mut h2 }) => match h2.0 {}, - } - } - - /// Return the inner IO object, and additional information. - /// - /// If the IO object has been "rewound" the io will not contain those bytes rewound. - /// This should only be called after `poll_without_shutdown` signals - /// that the connection is "done". Otherwise, it may not have finished - /// flushing all necessary HTTP bytes. - /// - /// # Panics - /// This method will panic if this connection is using an h2 protocol. - #[cfg_attr(feature = "deprecated", allow(deprecated))] - pub fn into_parts(self) -> Parts { - self.try_into_parts() - .unwrap_or_else(|| panic!("h2 cannot into_inner")) - } - - /// Return the inner IO object, and additional information, if available. - /// - /// This method will return a `None` if this connection is using an h2 protocol. - #[cfg_attr(feature = "deprecated", allow(deprecated))] - pub fn try_into_parts(self) -> Option> { - match self.conn.unwrap() { - #[cfg(feature = "http1")] - ProtoServer::H1 { h1, .. } => { - let (io, read_buf, dispatch) = h1.into_inner(); - Some(Parts { - io, - read_buf, - service: dispatch.into_service(), - _inner: (), - }) - } - ProtoServer::H2 { .. } => None, - - #[cfg(not(feature = "http1"))] - ProtoServer::H1 { h1, .. } => match h1.0 {}, - } - } - - /// Poll the connection for completion, but without calling `shutdown` - /// on the underlying IO. - /// - /// This is useful to allow running a connection while doing an HTTP - /// upgrade. Once the upgrade is completed, the connection would be "done", - /// but it is not desired to actually shutdown the IO object. Instead you - /// would take it back using `into_parts`. - pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { - loop { - match *self.conn.as_mut().unwrap() { - #[cfg(feature = "http1")] - ProtoServer::H1 { ref mut h1, .. } => match ready!(h1.poll_without_shutdown(cx)) { - Ok(()) => return Poll::Ready(Ok(())), - Err(e) => { - #[cfg(feature = "http2")] - match *e.kind() { - Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => { - self.upgrade_h2(); - continue; - } - _ => (), - } - - return Poll::Ready(Err(e)); - } - }, - #[cfg(feature = "http2")] - ProtoServer::H2 { ref mut h2 } => return Pin::new(h2).poll(cx).map_ok(|_| ()), - - #[cfg(not(feature = "http1"))] - ProtoServer::H1 { ref mut h1, .. } => match h1.0 {}, - #[cfg(not(feature = "http2"))] - ProtoServer::H2 { ref mut h2 } => match h2.0 {}, - }; - } - } - - /// Prevent shutdown of the underlying IO object at the end of service the request, - /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. - /// - /// # Error - /// - /// This errors if the underlying connection protocol is not HTTP/1. - #[cfg_attr(feature = "deprecated", allow(deprecated))] - pub fn without_shutdown(self) -> impl Future>> { - let mut conn = Some(self); - futures_util::future::poll_fn(move |cx| { - ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; - Poll::Ready( - conn.take() - .unwrap() - .try_into_parts() - .ok_or_else(crate::Error::new_without_shutdown_not_h1), - ) - }) - } - - #[cfg(all(feature = "http1", feature = "http2"))] - fn upgrade_h2(&mut self) { - trace!("Trying to upgrade connection to h2"); - let conn = self.conn.take(); - - let (io, read_buf, dispatch) = match conn.unwrap() { - ProtoServer::H1 { h1, .. } => h1.into_inner(), - ProtoServer::H2 { .. } => { - panic!("h2 cannot into_inner"); - } - }; - let mut rewind_io = Rewind::new(io); - rewind_io.rewind(read_buf); - let (builder, exec) = match self.fallback { - Fallback::ToHttp2(ref builder, ref exec) => (builder, exec), - Fallback::Http1Only => unreachable!("upgrade_h2 with Fallback::Http1Only"), - }; - let h2 = proto::h2::Server::new(rewind_io, dispatch.into_service(), builder, exec.clone()); - - debug_assert!(self.conn.is_none()); - self.conn = Some(ProtoServer::H2 { h2 }); - } - - /// Enable this connection to support higher-level HTTP upgrades. - /// - /// See [the `upgrade` module](crate::upgrade) for more. - pub fn with_upgrades(self) -> UpgradeableConnection - where - I: Send, - { - UpgradeableConnection { inner: self } - } -} - -#[cfg(any(feature = "http1", feature = "http2"))] -impl Future for Connection -where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec, -{ - type Output = crate::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - loop { - match ready!(Pin::new(self.conn.as_mut().unwrap()).poll(cx)) { - Ok(done) => { - match done { - proto::Dispatched::Shutdown => {} - #[cfg(feature = "http1")] - proto::Dispatched::Upgrade(pending) => { - // With no `Send` bound on `I`, we can't try to do - // upgrades here. In case a user was trying to use - // `Body::on_upgrade` with this API, send a special - // error letting them know about that. - pending.manual(); - } - }; - return Poll::Ready(Ok(())); - } - Err(e) => { - #[cfg(feature = "http1")] - #[cfg(feature = "http2")] - match *e.kind() { - Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => { - self.upgrade_h2(); - continue; - } - _ => (), - } - - return Poll::Ready(Err(e)); - } - } - } - } -} - -#[cfg(any(feature = "http1", feature = "http2"))] -impl fmt::Debug for Connection -where - S: HttpService, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Connection").finish() - } -} - -// ===== impl ConnectionMode ===== - -#[cfg(any(feature = "http1", feature = "http2"))] -impl Default for ConnectionMode { - #[cfg(all(feature = "http1", feature = "http2"))] - fn default() -> ConnectionMode { - ConnectionMode::Fallback - } - - #[cfg(all(feature = "http1", not(feature = "http2")))] - fn default() -> ConnectionMode { - ConnectionMode::H1Only - } - - #[cfg(all(not(feature = "http1"), feature = "http2"))] - fn default() -> ConnectionMode { - ConnectionMode::H2Only - } -} - -// ===== impl ProtoServer ===== - -#[cfg(any(feature = "http1", feature = "http2"))] -impl Future for ProtoServer -where - T: AsyncRead + AsyncWrite + Unpin, - S: HttpService, - S::Error: Into>, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec, -{ - type Output = crate::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match self.project() { - #[cfg(feature = "http1")] - ProtoServerProj::H1 { h1, .. } => h1.poll(cx), - #[cfg(feature = "http2")] - ProtoServerProj::H2 { h2 } => h2.poll(cx), - - #[cfg(not(feature = "http1"))] - ProtoServerProj::H1 { h1, .. } => match h1.0 {}, - #[cfg(not(feature = "http2"))] - ProtoServerProj::H2 { h2 } => match h2.0 {}, - } - } -} - -#[cfg(any(feature = "http1", feature = "http2"))] -mod upgrades { - use super::*; - - // A future binding a connection with a Service with Upgrade support. - // - // This type is unnameable outside the crate, and so basically just an - // `impl Future`, without requiring Rust 1.26. - #[must_use = "futures do nothing unless polled"] - #[allow(missing_debug_implementations)] - pub struct UpgradeableConnection - where - S: HttpService, - { - pub(super) inner: Connection, - } - - impl UpgradeableConnection - where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec, - { - /// Start a graceful shutdown process for this connection. - /// - /// This `Connection` should continue to be polled until shutdown - /// can finish. - pub fn graceful_shutdown(mut self: Pin<&mut Self>) { - Pin::new(&mut self.inner).graceful_shutdown() - } - } - - impl Future for UpgradeableConnection - where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec, - { - type Output = crate::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - loop { - match ready!(Pin::new(self.inner.conn.as_mut().unwrap()).poll(cx)) { - Ok(proto::Dispatched::Shutdown) => return Poll::Ready(Ok(())), - #[cfg(feature = "http1")] - Ok(proto::Dispatched::Upgrade(pending)) => { - match self.inner.conn.take() { - Some(ProtoServer::H1 { h1, .. }) => { - let (io, buf, _) = h1.into_inner(); - pending.fulfill(Upgraded::new(io, buf)); - return Poll::Ready(Ok(())); - } - _ => { - drop(pending); - unreachable!("Upgrade expects h1") - } - }; - } - Err(e) => { - #[cfg(feature = "http1")] - #[cfg(feature = "http2")] - match *e.kind() { - Kind::Parse(Parse::VersionH2) if self.inner.fallback.to_h2() => { - self.inner.upgrade_h2(); - continue; - } - _ => (), - } - - return Poll::Ready(Err(e)); - } - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/server/mod.rs s390-tools-2.33.1/rust-vendor/hyper/src/server/mod.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/server/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/server/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,173 +0,0 @@ -//! HTTP Server -//! -//! A `Server` is created to listen on a port, parse HTTP requests, and hand -//! them off to a `Service`. -//! -//! There are two levels of APIs provide for constructing HTTP servers: -//! -//! - The higher-level [`Server`](Server) type. -//! - The lower-level [`conn`](conn) module. -//! -//! # Server -//! -//! The [`Server`](Server) is main way to start listening for HTTP requests. -//! It wraps a listener with a [`MakeService`](crate::service), and then should -//! be executed to start serving requests. -//! -//! [`Server`](Server) accepts connections in both HTTP1 and HTTP2 by default. -//! -//! ## Examples -//! -//! ```no_run -//! use std::convert::Infallible; -//! use std::net::SocketAddr; -//! use hyper::{Body, Request, Response, Server}; -//! use hyper::service::{make_service_fn, service_fn}; -//! -//! async fn handle(_req: Request) -> Result, Infallible> { -//! Ok(Response::new(Body::from("Hello World"))) -//! } -//! -//! # #[cfg(feature = "runtime")] -//! #[tokio::main] -//! async fn main() { -//! // Construct our SocketAddr to listen on... -//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); -//! -//! // And a MakeService to handle each connection... -//! let make_service = make_service_fn(|_conn| async { -//! Ok::<_, Infallible>(service_fn(handle)) -//! }); -//! -//! // Then bind and serve... -//! let server = Server::bind(&addr).serve(make_service); -//! -//! // And run forever... -//! if let Err(e) = server.await { -//! eprintln!("server error: {}", e); -//! } -//! } -//! # #[cfg(not(feature = "runtime"))] -//! # fn main() {} -//! ``` -//! -//! If you don't need the connection and your service implements `Clone` you can use -//! [`tower::make::Shared`] instead of `make_service_fn` which is a bit simpler: -//! -//! ```no_run -//! # use std::convert::Infallible; -//! # use std::net::SocketAddr; -//! # use hyper::{Body, Request, Response, Server}; -//! # use hyper::service::{make_service_fn, service_fn}; -//! # use tower::make::Shared; -//! # async fn handle(_req: Request) -> Result, Infallible> { -//! # Ok(Response::new(Body::from("Hello World"))) -//! # } -//! # #[cfg(feature = "runtime")] -//! #[tokio::main] -//! async fn main() { -//! // Construct our SocketAddr to listen on... -//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); -//! -//! // Shared is a MakeService that produces services by cloning an inner service... -//! let make_service = Shared::new(service_fn(handle)); -//! -//! // Then bind and serve... -//! let server = Server::bind(&addr).serve(make_service); -//! -//! // And run forever... -//! if let Err(e) = server.await { -//! eprintln!("server error: {}", e); -//! } -//! } -//! # #[cfg(not(feature = "runtime"))] -//! # fn main() {} -//! ``` -//! -//! Passing data to your request handler can be done like so: -//! -//! ```no_run -//! use std::convert::Infallible; -//! use std::net::SocketAddr; -//! use hyper::{Body, Request, Response, Server}; -//! use hyper::service::{make_service_fn, service_fn}; -//! # #[cfg(feature = "runtime")] -//! use hyper::server::conn::AddrStream; -//! -//! #[derive(Clone)] -//! struct AppContext { -//! // Whatever data your application needs can go here -//! } -//! -//! async fn handle( -//! context: AppContext, -//! addr: SocketAddr, -//! req: Request -//! ) -> Result, Infallible> { -//! Ok(Response::new(Body::from("Hello World"))) -//! } -//! -//! # #[cfg(feature = "runtime")] -//! #[tokio::main] -//! async fn main() { -//! let context = AppContext { -//! // ... -//! }; -//! -//! // A `MakeService` that produces a `Service` to handle each connection. -//! let make_service = make_service_fn(move |conn: &AddrStream| { -//! // We have to clone the context to share it with each invocation of -//! // `make_service`. If your data doesn't implement `Clone` consider using -//! // an `std::sync::Arc`. -//! let context = context.clone(); -//! -//! // You can grab the address of the incoming connection like so. -//! let addr = conn.remote_addr(); -//! -//! // Create a `Service` for responding to the request. -//! let service = service_fn(move |req| { -//! handle(context.clone(), addr, req) -//! }); -//! -//! // Return the service to hyper. -//! async move { Ok::<_, Infallible>(service) } -//! }); -//! -//! // Run the server like above... -//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); -//! -//! let server = Server::bind(&addr).serve(make_service); -//! -//! if let Err(e) = server.await { -//! eprintln!("server error: {}", e); -//! } -//! } -//! # #[cfg(not(feature = "runtime"))] -//! # fn main() {} -//! ``` -//! -//! [`tower::make::Shared`]: https://docs.rs/tower/latest/tower/make/struct.Shared.html - -pub mod accept; -pub mod conn; -#[cfg(feature = "tcp")] -mod tcp; - -pub use self::server::Server; - -cfg_feature! { - #![any(feature = "http1", feature = "http2")] - - #[cfg_attr(feature = "deprecated", allow(deprecated))] - pub(crate) mod server; - pub use self::server::Builder; - - mod shutdown; -} - -cfg_feature! { - #![not(any(feature = "http1", feature = "http2"))] - - mod server_stub; - use server_stub as server; -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/server/server.rs s390-tools-2.33.1/rust-vendor/hyper/src/server/server.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/server/server.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/server/server.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,811 +0,0 @@ -use std::error::Error as StdError; -use std::fmt; -#[cfg(feature = "tcp")] -use std::net::{SocketAddr, TcpListener as StdTcpListener}; - -#[cfg(feature = "tcp")] -use std::time::Duration; - -use pin_project_lite::pin_project; - -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::trace; - -use super::accept::Accept; -#[cfg(all(feature = "tcp"))] -use super::tcp::AddrIncoming; -use crate::body::{Body, HttpBody}; -use crate::common::exec::Exec; -use crate::common::exec::{ConnStreamExec, NewSvcExec}; -use crate::common::{task, Future, Pin, Poll, Unpin}; -// Renamed `Http` as `Http_` for now so that people upgrading don't see an -// error that `hyper::server::Http` is private... -use super::conn::{Connection, Http as Http_, UpgradeableConnection}; -use super::shutdown::{Graceful, GracefulWatcher}; -use crate::service::{HttpService, MakeServiceRef}; - -use self::new_svc::NewSvcTask; - -pin_project! { - /// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. - /// - /// `Server` is a `Future` mapping a bound listener with a set of service - /// handlers. It is built using the [`Builder`](Builder), and the future - /// completes when the server has been shutdown. It should be run by an - /// `Executor`. - pub struct Server { - #[pin] - incoming: I, - make_service: S, - protocol: Http_, - } -} - -/// A builder for a [`Server`](Server). -#[derive(Debug)] -#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] -pub struct Builder { - incoming: I, - protocol: Http_, -} - -// ===== impl Server ===== - -#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] -impl Server { - /// Starts a [`Builder`](Builder) with the provided incoming stream. - pub fn builder(incoming: I) -> Builder { - Builder { - incoming, - protocol: Http_::new(), - } - } -} - -#[cfg(feature = "tcp")] -#[cfg_attr( - docsrs, - doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))) -)] -impl Server { - /// Binds to the provided address, and returns a [`Builder`](Builder). - /// - /// # Panics - /// - /// This method will panic if binding to the address fails. For a method - /// to bind to an address and return a `Result`, see `Server::try_bind`. - pub fn bind(addr: &SocketAddr) -> Builder { - let incoming = AddrIncoming::new(addr).unwrap_or_else(|e| { - panic!("error binding to {}: {}", addr, e); - }); - Server::builder(incoming) - } - - /// Tries to bind to the provided address, and returns a [`Builder`](Builder). - pub fn try_bind(addr: &SocketAddr) -> crate::Result> { - AddrIncoming::new(addr).map(Server::builder) - } - - /// Create a new instance from a `std::net::TcpListener` instance. - pub fn from_tcp(listener: StdTcpListener) -> Result, crate::Error> { - AddrIncoming::from_std(listener).map(Server::builder) - } -} - -#[cfg(feature = "tcp")] -#[cfg_attr( - docsrs, - doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))) -)] -impl Server { - /// Returns the local address that this server is bound to. - pub fn local_addr(&self) -> SocketAddr { - self.incoming.local_addr() - } -} - -#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] -impl Server -where - I: Accept, - IE: Into>, - IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: MakeServiceRef, - S::Error: Into>, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec<>::Future, B>, -{ - /// Prepares a server to handle graceful shutdown when the provided future - /// completes. - /// - /// # Example - /// - /// ``` - /// # fn main() {} - /// # #[cfg(feature = "tcp")] - /// # async fn run() { - /// # use hyper::{Body, Response, Server, Error}; - /// # use hyper::service::{make_service_fn, service_fn}; - /// # let make_service = make_service_fn(|_| async { - /// # Ok::<_, Error>(service_fn(|_req| async { - /// # Ok::<_, Error>(Response::new(Body::from("Hello World"))) - /// # })) - /// # }); - /// // Make a server from the previous examples... - /// let server = Server::bind(&([127, 0, 0, 1], 3000).into()) - /// .serve(make_service); - /// - /// // Prepare some signal for when the server should start shutting down... - /// let (tx, rx) = tokio::sync::oneshot::channel::<()>(); - /// let graceful = server - /// .with_graceful_shutdown(async { - /// rx.await.ok(); - /// }); - /// - /// // Await the `server` receiving the signal... - /// if let Err(e) = graceful.await { - /// eprintln!("server error: {}", e); - /// } - /// - /// // And later, trigger the signal by calling `tx.send(())`. - /// let _ = tx.send(()); - /// # } - /// ``` - pub fn with_graceful_shutdown(self, signal: F) -> Graceful - where - F: Future, - E: NewSvcExec, - { - Graceful::new(self, signal) - } - - fn poll_next_( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll>>> { - let me = self.project(); - match ready!(me.make_service.poll_ready_ref(cx)) { - Ok(()) => (), - Err(e) => { - trace!("make_service closed"); - return Poll::Ready(Some(Err(crate::Error::new_user_make_service(e)))); - } - } - - if let Some(item) = ready!(me.incoming.poll_accept(cx)) { - let io = item.map_err(crate::Error::new_accept)?; - let new_fut = me.make_service.make_service_ref(&io); - Poll::Ready(Some(Ok(Connecting { - future: new_fut, - io: Some(io), - protocol: me.protocol.clone(), - }))) - } else { - Poll::Ready(None) - } - } - - pub(super) fn poll_watch( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - watcher: &W, - ) -> Poll> - where - E: NewSvcExec, - W: Watcher, - { - loop { - if let Some(connecting) = ready!(self.as_mut().poll_next_(cx)?) { - let fut = NewSvcTask::new(connecting, watcher.clone()); - self.as_mut().project().protocol.exec.execute_new_svc(fut); - } else { - return Poll::Ready(Ok(())); - } - } - } -} - -#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] -impl Future for Server -where - I: Accept, - IE: Into>, - IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: MakeServiceRef, - S::Error: Into>, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec<>::Future, B>, - E: NewSvcExec, -{ - type Output = crate::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - self.poll_watch(cx, &NoopWatcher) - } -} - -impl fmt::Debug for Server { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut st = f.debug_struct("Server"); - st.field("listener", &self.incoming); - st.finish() - } -} - -// ===== impl Builder ===== - -#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] -impl Builder { - /// Start a new builder, wrapping an incoming stream and low-level options. - /// - /// For a more convenient constructor, see [`Server::bind`](Server::bind). - pub fn new(incoming: I, protocol: Http_) -> Self { - Builder { incoming, protocol } - } - - /// Sets whether to use keep-alive for HTTP/1 connections. - /// - /// Default is `true`. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_keepalive(mut self, val: bool) -> Self { - self.protocol.http1_keep_alive(val); - self - } - - /// Set whether HTTP/1 connections should support half-closures. - /// - /// Clients can chose to shutdown their write-side while waiting - /// for the server to respond. Setting this to `true` will - /// prevent closing the connection immediately if `read` - /// detects an EOF in the middle of a request. - /// - /// Default is `false`. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_half_close(mut self, val: bool) -> Self { - self.protocol.http1_half_close(val); - self - } - - /// Set the maximum buffer size. - /// - /// Default is ~ 400kb. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_max_buf_size(mut self, val: usize) -> Self { - self.protocol.max_buf_size(val); - self - } - - // Sets whether to bunch up HTTP/1 writes until the read buffer is empty. - // - // This isn't really desirable in most cases, only really being useful in - // silly pipeline benchmarks. - #[doc(hidden)] - #[cfg(feature = "http1")] - pub fn http1_pipeline_flush(mut self, val: bool) -> Self { - self.protocol.pipeline_flush(val); - self - } - - /// Set whether HTTP/1 connections should try to use vectored writes, - /// or always flatten into a single buffer. - /// - /// Note that setting this to false may mean more copies of body data, - /// but may also improve performance when an IO transport doesn't - /// support vectored writes well, such as most TLS implementations. - /// - /// Setting this to true will force hyper to use queued strategy - /// which may eliminate unnecessary cloning on some TLS backends - /// - /// Default is `auto`. In this mode hyper will try to guess which - /// mode to use - #[cfg(feature = "http1")] - pub fn http1_writev(mut self, enabled: bool) -> Self { - self.protocol.http1_writev(enabled); - self - } - - /// Set whether HTTP/1 connections will write header names as title case at - /// the socket level. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_title_case_headers(mut self, val: bool) -> Self { - self.protocol.http1_title_case_headers(val); - self - } - - /// Set whether to support preserving original header cases. - /// - /// Currently, this will record the original cases received, and store them - /// in a private extension on the `Request`. It will also look for and use - /// such an extension in any provided `Response`. - /// - /// Since the relevant extension is still private, there is no way to - /// interact with the original cases. The only effect this can have now is - /// to forward the cases in a proxy-like fashion. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_preserve_header_case(mut self, val: bool) -> Self { - self.protocol.http1_preserve_header_case(val); - self - } - - /// Set a timeout for reading client request headers. If a client does not - /// transmit the entire header within this time, the connection is closed. - /// - /// Default is None. - #[cfg(all(feature = "http1", feature = "runtime"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "http1", feature = "runtime"))))] - pub fn http1_header_read_timeout(mut self, read_timeout: Duration) -> Self { - self.protocol.http1_header_read_timeout(read_timeout); - self - } - - /// Sets whether HTTP/1 is required. - /// - /// Default is `false`. - #[cfg(feature = "http1")] - #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] - pub fn http1_only(mut self, val: bool) -> Self { - self.protocol.http1_only(val); - self - } - - /// Sets whether HTTP/2 is required. - /// - /// Default is `false`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_only(mut self, val: bool) -> Self { - self.protocol.http2_only(val); - self - } - - /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. - /// - /// This will default to whatever the default in h2 is. As of v0.3.17, it is 20. - /// - /// See for more information. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_pending_accept_reset_streams(mut self, max: impl Into>) -> Self { - self.protocol.http2_max_pending_accept_reset_streams(max); - self - } - - /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 - /// stream-level flow control. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_stream_window_size(mut self, sz: impl Into>) -> Self { - self.protocol.http2_initial_stream_window_size(sz.into()); - self - } - - /// Sets the max connection-level flow control for HTTP2 - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_initial_connection_window_size(mut self, sz: impl Into>) -> Self { - self.protocol - .http2_initial_connection_window_size(sz.into()); - self - } - - /// Sets whether to use an adaptive flow control. - /// - /// Enabling this will override the limits set in - /// `http2_initial_stream_window_size` and - /// `http2_initial_connection_window_size`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_adaptive_window(mut self, enabled: bool) -> Self { - self.protocol.http2_adaptive_window(enabled); - self - } - - /// Sets the maximum frame size to use for HTTP2. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_frame_size(mut self, sz: impl Into>) -> Self { - self.protocol.http2_max_frame_size(sz); - self - } - - /// Sets the max size of received header frames. - /// - /// Default is currently ~16MB, but may change. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_header_list_size(mut self, max: u32) -> Self { - self.protocol.http2_max_header_list_size(max); - self - } - - /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 - /// connections. - /// - /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. - /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_concurrent_streams(mut self, max: impl Into>) -> Self { - self.protocol.http2_max_concurrent_streams(max.into()); - self - } - - /// Sets an interval for HTTP2 Ping frames should be sent to keep a - /// connection alive. - /// - /// Pass `None` to disable HTTP2 keep-alive. - /// - /// Default is currently disabled. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(all(feature = "runtime", feature = "http2"))] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_interval(mut self, interval: impl Into>) -> Self { - self.protocol.http2_keep_alive_interval(interval); - self - } - - /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. - /// - /// If the ping is not acknowledged within the timeout, the connection will - /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. - /// - /// Default is 20 seconds. - /// - /// # Cargo Feature - /// - /// Requires the `runtime` cargo feature to be enabled. - #[cfg(all(feature = "runtime", feature = "http2"))] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_keep_alive_timeout(mut self, timeout: Duration) -> Self { - self.protocol.http2_keep_alive_timeout(timeout); - self - } - - /// Set the maximum write buffer size for each HTTP/2 stream. - /// - /// Default is currently ~400KB, but may change. - /// - /// # Panics - /// - /// The value must be no larger than `u32::MAX`. - #[cfg(feature = "http2")] - #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] - pub fn http2_max_send_buf_size(mut self, max: usize) -> Self { - self.protocol.http2_max_send_buf_size(max); - self - } - - /// Enables the [extended CONNECT protocol]. - /// - /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 - #[cfg(feature = "http2")] - pub fn http2_enable_connect_protocol(mut self) -> Self { - self.protocol.http2_enable_connect_protocol(); - self - } - - /// Sets the `Executor` to deal with connection tasks. - /// - /// Default is `tokio::spawn`. - pub fn executor(self, executor: E2) -> Builder { - Builder { - incoming: self.incoming, - protocol: self.protocol.with_executor(executor), - } - } - - /// Consume this `Builder`, creating a [`Server`](Server). - /// - /// # Example - /// - /// ``` - /// # #[cfg(feature = "tcp")] - /// # async fn run() { - /// use hyper::{Body, Error, Response, Server}; - /// use hyper::service::{make_service_fn, service_fn}; - /// - /// // Construct our SocketAddr to listen on... - /// let addr = ([127, 0, 0, 1], 3000).into(); - /// - /// // And a MakeService to handle each connection... - /// let make_svc = make_service_fn(|_| async { - /// Ok::<_, Error>(service_fn(|_req| async { - /// Ok::<_, Error>(Response::new(Body::from("Hello World"))) - /// })) - /// }); - /// - /// // Then bind and serve... - /// let server = Server::bind(&addr) - /// .serve(make_svc); - /// - /// // Run forever-ish... - /// if let Err(err) = server.await { - /// eprintln!("server error: {}", err); - /// } - /// # } - /// ``` - pub fn serve(self, make_service: S) -> Server - where - I: Accept, - I::Error: Into>, - I::Conn: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: MakeServiceRef, - S::Error: Into>, - B: HttpBody + 'static, - B::Error: Into>, - E: NewSvcExec, - E: ConnStreamExec<>::Future, B>, - { - Server { - incoming: self.incoming, - make_service, - protocol: self.protocol.clone(), - } - } -} - -#[cfg(feature = "tcp")] -#[cfg_attr( - docsrs, - doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))) -)] -impl Builder { - /// Set the duration to remain idle before sending TCP keepalive probes. - /// - /// If `None` is specified, keepalive is disabled. - pub fn tcp_keepalive(mut self, keepalive: Option) -> Self { - self.incoming.set_keepalive(keepalive); - self - } - - /// Set the duration between two successive TCP keepalive retransmissions, - /// if acknowledgement to the previous keepalive transmission is not received. - pub fn tcp_keepalive_interval(mut self, interval: Option) -> Self { - self.incoming.set_keepalive_interval(interval); - self - } - - /// Set the number of retransmissions to be carried out before declaring that remote end is not available. - pub fn tcp_keepalive_retries(mut self, retries: Option) -> Self { - self.incoming.set_keepalive_retries(retries); - self - } - - /// Set the value of `TCP_NODELAY` option for accepted connections. - pub fn tcp_nodelay(mut self, enabled: bool) -> Self { - self.incoming.set_nodelay(enabled); - self - } - - /// Set whether to sleep on accept errors. - /// - /// A possible scenario is that the process has hit the max open files - /// allowed, and so trying to accept a new connection will fail with - /// EMFILE. In some cases, it's preferable to just wait for some time, if - /// the application will likely close some files (or connections), and try - /// to accept the connection again. If this option is true, the error will - /// be logged at the error level, since it is still a big deal, and then - /// the listener will sleep for 1 second. - /// - /// In other cases, hitting the max open files should be treat similarly - /// to being out-of-memory, and simply error (and shutdown). Setting this - /// option to false will allow that. - /// - /// For more details see [`AddrIncoming::set_sleep_on_errors`] - pub fn tcp_sleep_on_accept_errors(mut self, val: bool) -> Self { - self.incoming.set_sleep_on_errors(val); - self - } -} - -// Used by `Server` to optionally watch a `Connection` future. -// -// The regular `hyper::Server` just uses a `NoopWatcher`, which does -// not need to watch anything, and so returns the `Connection` untouched. -// -// The `Server::with_graceful_shutdown` needs to keep track of all active -// connections, and signal that they start to shutdown when prompted, so -// it has a `GracefulWatcher` implementation to do that. -pub trait Watcher, E>: Clone { - type Future: Future>; - - fn watch(&self, conn: UpgradeableConnection) -> Self::Future; -} - -#[allow(missing_debug_implementations)] -#[derive(Copy, Clone)] -pub struct NoopWatcher; - -impl Watcher for NoopWatcher -where - I: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: HttpService, - E: ConnStreamExec, - S::ResBody: 'static, - ::Error: Into>, -{ - type Future = UpgradeableConnection; - - fn watch(&self, conn: UpgradeableConnection) -> Self::Future { - conn - } -} - -// used by exec.rs -pub(crate) mod new_svc { - use std::error::Error as StdError; - use tokio::io::{AsyncRead, AsyncWrite}; - use tracing::debug; - - use super::{Connecting, Watcher}; - use crate::body::{Body, HttpBody}; - use crate::common::exec::ConnStreamExec; - use crate::common::{task, Future, Pin, Poll, Unpin}; - use crate::service::HttpService; - use pin_project_lite::pin_project; - - // This is a `Future` spawned to an `Executor` inside - // the `Server`. By being a nameable type, we can be generic over the - // user's `Service::Future`, and thus an `Executor` can execute it. - // - // Doing this allows for the server to conditionally require `Send` futures, - // depending on the `Executor` configured. - // - // Users cannot import this type, nor the associated `NewSvcExec`. Instead, - // a blanket implementation for `Executor` is sufficient. - - pin_project! { - #[allow(missing_debug_implementations)] - pub struct NewSvcTask, E, W: Watcher> { - #[pin] - state: State, - } - } - - pin_project! { - #[project = StateProj] - pub(super) enum State, E, W: Watcher> { - Connecting { - #[pin] - connecting: Connecting, - watcher: W, - }, - Connected { - #[pin] - future: W::Future, - }, - } - } - - impl, E, W: Watcher> NewSvcTask { - pub(super) fn new(connecting: Connecting, watcher: W) -> Self { - NewSvcTask { - state: State::Connecting { - connecting, - watcher, - }, - } - } - } - - impl Future for NewSvcTask - where - I: AsyncRead + AsyncWrite + Unpin + Send + 'static, - N: Future>, - NE: Into>, - S: HttpService, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec, - W: Watcher, - { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - // If it weren't for needing to name this type so the `Send` bounds - // could be projected to the `Serve` executor, this could just be - // an `async fn`, and much safer. Woe is me. - - let mut me = self.project(); - loop { - let next = { - match me.state.as_mut().project() { - StateProj::Connecting { - connecting, - watcher, - } => { - let res = ready!(connecting.poll(cx)); - let conn = match res { - Ok(conn) => conn, - Err(err) => { - let err = crate::Error::new_user_make_service(err); - debug!("connecting error: {}", err); - return Poll::Ready(()); - } - }; - let future = watcher.watch(conn.with_upgrades()); - State::Connected { future } - } - StateProj::Connected { future } => { - return future.poll(cx).map(|res| { - if let Err(err) = res { - debug!("connection error: {}", err); - } - }); - } - } - }; - - me.state.set(next); - } - } - } -} - -pin_project! { - /// A future building a new `Service` to a `Connection`. - /// - /// Wraps the future returned from `MakeService` into one that returns - /// a `Connection`. - #[must_use = "futures do nothing unless polled"] - #[derive(Debug)] - #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] - pub struct Connecting { - #[pin] - future: F, - io: Option, - protocol: Http_, - } -} - -impl Future for Connecting -where - I: AsyncRead + AsyncWrite + Unpin, - F: Future>, - S: HttpService, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec, -{ - type Output = Result, FE>; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let mut me = self.project(); - let service = ready!(me.future.poll(cx))?; - let io = Option::take(&mut me.io).expect("polled after complete"); - Poll::Ready(Ok(me.protocol.serve_connection(io, service))) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/server/server_stub.rs s390-tools-2.33.1/rust-vendor/hyper/src/server/server_stub.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/server/server_stub.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/server/server_stub.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,16 +0,0 @@ -use std::fmt; - -use crate::common::exec::Exec; - -/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. -/// -/// Needs at least one of the `http1` and `http2` features to be activated to actually be useful. -pub struct Server { - _marker: std::marker::PhantomData<(I, S, E)>, -} - -impl fmt::Debug for Server { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Server").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/server/shutdown.rs s390-tools-2.33.1/rust-vendor/hyper/src/server/shutdown.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/server/shutdown.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/server/shutdown.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,128 +0,0 @@ -use std::error::Error as StdError; - -use pin_project_lite::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::debug; - -use super::accept::Accept; -use super::conn::UpgradeableConnection; -use super::server::{Server, Watcher}; -use crate::body::{Body, HttpBody}; -use crate::common::drain::{self, Draining, Signal, Watch, Watching}; -use crate::common::exec::{ConnStreamExec, NewSvcExec}; -use crate::common::{task, Future, Pin, Poll, Unpin}; -use crate::service::{HttpService, MakeServiceRef}; - -pin_project! { - #[allow(missing_debug_implementations)] - pub struct Graceful { - #[pin] - state: State, - } -} - -pin_project! { - #[project = StateProj] - pub(super) enum State { - Running { - drain: Option<(Signal, Watch)>, - #[pin] - server: Server, - #[pin] - signal: F, - }, - Draining { draining: Draining }, - } -} - -impl Graceful { - pub(super) fn new(server: Server, signal: F) -> Self { - let drain = Some(drain::channel()); - Graceful { - state: State::Running { - drain, - server, - signal, - }, - } - } -} - -impl Future for Graceful -where - I: Accept, - IE: Into>, - IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: MakeServiceRef, - S::Error: Into>, - B: HttpBody + 'static, - B::Error: Into>, - F: Future, - E: ConnStreamExec<>::Future, B>, - E: NewSvcExec, -{ - type Output = crate::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let mut me = self.project(); - loop { - let next = { - match me.state.as_mut().project() { - StateProj::Running { - drain, - server, - signal, - } => match signal.poll(cx) { - Poll::Ready(()) => { - debug!("signal received, starting graceful shutdown"); - let sig = drain.take().expect("drain channel").0; - State::Draining { - draining: sig.drain(), - } - } - Poll::Pending => { - let watch = drain.as_ref().expect("drain channel").1.clone(); - return server.poll_watch(cx, &GracefulWatcher(watch)); - } - }, - StateProj::Draining { ref mut draining } => { - return Pin::new(draining).poll(cx).map(Ok); - } - } - }; - me.state.set(next); - } - } -} - -#[allow(missing_debug_implementations)] -#[derive(Clone)] -pub struct GracefulWatcher(Watch); - -impl Watcher for GracefulWatcher -where - I: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: HttpService, - E: ConnStreamExec, - S::ResBody: 'static, - ::Error: Into>, -{ - type Future = - Watching, fn(Pin<&mut UpgradeableConnection>)>; - - fn watch(&self, conn: UpgradeableConnection) -> Self::Future { - self.0.clone().watch(conn, on_drain) - } -} - -fn on_drain(conn: Pin<&mut UpgradeableConnection>) -where - S: HttpService, - S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, - S::ResBody: HttpBody + 'static, - ::Error: Into>, - E: ConnStreamExec, -{ - conn.graceful_shutdown() -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/server/tcp.rs s390-tools-2.33.1/rust-vendor/hyper/src/server/tcp.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/server/tcp.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/server/tcp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,484 +0,0 @@ -use std::fmt; -use std::io; -use std::net::{SocketAddr, TcpListener as StdTcpListener}; -use std::time::Duration; -use socket2::TcpKeepalive; - -use tokio::net::TcpListener; -use tokio::time::Sleep; -use tracing::{debug, error, trace}; - -use crate::common::{task, Future, Pin, Poll}; - -#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 -pub use self::addr_stream::AddrStream; -use super::accept::Accept; - -#[derive(Default, Debug, Clone, Copy)] -struct TcpKeepaliveConfig { - time: Option, - interval: Option, - retries: Option, -} - -impl TcpKeepaliveConfig { - /// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration. - fn into_socket2(self) -> Option { - let mut dirty = false; - let mut ka = TcpKeepalive::new(); - if let Some(time) = self.time { - ka = ka.with_time(time); - dirty = true - } - if let Some(interval) = self.interval { - ka = Self::ka_with_interval(ka, interval, &mut dirty) - }; - if let Some(retries) = self.retries { - ka = Self::ka_with_retries(ka, retries, &mut dirty) - }; - if dirty { - Some(ka) - } else { - None - } - } - - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - windows, - ))] - fn ka_with_interval(ka: TcpKeepalive, interval: Duration, dirty: &mut bool) -> TcpKeepalive { - *dirty = true; - ka.with_interval(interval) - } - - #[cfg(not(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - windows, - )))] - fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive { - ka // no-op as keepalive interval is not supported on this platform - } - - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - ))] - fn ka_with_retries(ka: TcpKeepalive, retries: u32, dirty: &mut bool) -> TcpKeepalive { - *dirty = true; - ka.with_retries(retries) - } - - #[cfg(not(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - )))] - fn ka_with_retries(ka: TcpKeepalive, _: u32, _: &mut bool) -> TcpKeepalive { - ka // no-op as keepalive retries is not supported on this platform - } -} - -/// A stream of connections from binding to an address. -#[must_use = "streams do nothing unless polled"] -pub struct AddrIncoming { - addr: SocketAddr, - listener: TcpListener, - sleep_on_errors: bool, - tcp_keepalive_config: TcpKeepaliveConfig, - tcp_nodelay: bool, - timeout: Option>>, -} - -impl AddrIncoming { - pub(super) fn new(addr: &SocketAddr) -> crate::Result { - let std_listener = StdTcpListener::bind(addr).map_err(crate::Error::new_listen)?; - - AddrIncoming::from_std(std_listener) - } - - pub(super) fn from_std(std_listener: StdTcpListener) -> crate::Result { - // TcpListener::from_std doesn't set O_NONBLOCK - std_listener - .set_nonblocking(true) - .map_err(crate::Error::new_listen)?; - let listener = TcpListener::from_std(std_listener).map_err(crate::Error::new_listen)?; - AddrIncoming::from_listener(listener) - } - - /// Creates a new `AddrIncoming` binding to provided socket address. - pub fn bind(addr: &SocketAddr) -> crate::Result { - AddrIncoming::new(addr) - } - - /// Creates a new `AddrIncoming` from an existing `tokio::net::TcpListener`. - pub fn from_listener(listener: TcpListener) -> crate::Result { - let addr = listener.local_addr().map_err(crate::Error::new_listen)?; - Ok(AddrIncoming { - listener, - addr, - sleep_on_errors: true, - tcp_keepalive_config: TcpKeepaliveConfig::default(), - tcp_nodelay: false, - timeout: None, - }) - } - - /// Get the local address bound to this listener. - pub fn local_addr(&self) -> SocketAddr { - self.addr - } - - /// Set the duration to remain idle before sending TCP keepalive probes. - /// - /// If `None` is specified, keepalive is disabled. - pub fn set_keepalive(&mut self, time: Option) -> &mut Self { - self.tcp_keepalive_config.time = time; - self - } - - /// Set the duration between two successive TCP keepalive retransmissions, - /// if acknowledgement to the previous keepalive transmission is not received. - pub fn set_keepalive_interval(&mut self, interval: Option) -> &mut Self { - self.tcp_keepalive_config.interval = interval; - self - } - - /// Set the number of retransmissions to be carried out before declaring that remote end is not available. - pub fn set_keepalive_retries(&mut self, retries: Option) -> &mut Self { - self.tcp_keepalive_config.retries = retries; - self - } - - /// Set the value of `TCP_NODELAY` option for accepted connections. - pub fn set_nodelay(&mut self, enabled: bool) -> &mut Self { - self.tcp_nodelay = enabled; - self - } - - /// Set whether to sleep on accept errors. - /// - /// A possible scenario is that the process has hit the max open files - /// allowed, and so trying to accept a new connection will fail with - /// `EMFILE`. In some cases, it's preferable to just wait for some time, if - /// the application will likely close some files (or connections), and try - /// to accept the connection again. If this option is `true`, the error - /// will be logged at the `error` level, since it is still a big deal, - /// and then the listener will sleep for 1 second. - /// - /// In other cases, hitting the max open files should be treat similarly - /// to being out-of-memory, and simply error (and shutdown). Setting - /// this option to `false` will allow that. - /// - /// Default is `true`. - pub fn set_sleep_on_errors(&mut self, val: bool) { - self.sleep_on_errors = val; - } - - fn poll_next_(&mut self, cx: &mut task::Context<'_>) -> Poll> { - // Check if a previous timeout is active that was set by IO errors. - if let Some(ref mut to) = self.timeout { - ready!(Pin::new(to).poll(cx)); - } - self.timeout = None; - - loop { - match ready!(self.listener.poll_accept(cx)) { - Ok((socket, remote_addr)) => { - if let Some(tcp_keepalive) = &self.tcp_keepalive_config.into_socket2() { - let sock_ref = socket2::SockRef::from(&socket); - if let Err(e) = sock_ref.set_tcp_keepalive(tcp_keepalive) { - trace!("error trying to set TCP keepalive: {}", e); - } - } - if let Err(e) = socket.set_nodelay(self.tcp_nodelay) { - trace!("error trying to set TCP nodelay: {}", e); - } - let local_addr = socket.local_addr()?; - return Poll::Ready(Ok(AddrStream::new(socket, remote_addr, local_addr))); - } - Err(e) => { - // Connection errors can be ignored directly, continue by - // accepting the next request. - if is_connection_error(&e) { - debug!("accepted connection already errored: {}", e); - continue; - } - - if self.sleep_on_errors { - error!("accept error: {}", e); - - // Sleep 1s. - let mut timeout = Box::pin(tokio::time::sleep(Duration::from_secs(1))); - - match timeout.as_mut().poll(cx) { - Poll::Ready(()) => { - // Wow, it's been a second already? Ok then... - continue; - } - Poll::Pending => { - self.timeout = Some(timeout); - return Poll::Pending; - } - } - } else { - return Poll::Ready(Err(e)); - } - } - } - } - } -} - -impl Accept for AddrIncoming { - type Conn = AddrStream; - type Error = io::Error; - - fn poll_accept( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll>> { - let result = ready!(self.poll_next_(cx)); - Poll::Ready(Some(result)) - } -} - -/// This function defines errors that are per-connection. Which basically -/// means that if we get this error from `accept()` system call it means -/// next connection might be ready to be accepted. -/// -/// All other errors will incur a timeout before next `accept()` is performed. -/// The timeout is useful to handle resource exhaustion errors like ENFILE -/// and EMFILE. Otherwise, could enter into tight loop. -fn is_connection_error(e: &io::Error) -> bool { - matches!( - e.kind(), - io::ErrorKind::ConnectionRefused - | io::ErrorKind::ConnectionAborted - | io::ErrorKind::ConnectionReset - ) -} - -impl fmt::Debug for AddrIncoming { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AddrIncoming") - .field("addr", &self.addr) - .field("sleep_on_errors", &self.sleep_on_errors) - .field("tcp_keepalive_config", &self.tcp_keepalive_config) - .field("tcp_nodelay", &self.tcp_nodelay) - .finish() - } -} - -mod addr_stream { - use std::io; - use std::net::SocketAddr; - #[cfg(unix)] - use std::os::unix::io::{AsRawFd, RawFd}; - use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; - use tokio::net::TcpStream; - - use crate::common::{task, Pin, Poll}; - - pin_project_lite::pin_project! { - /// A transport returned yieled by `AddrIncoming`. - #[derive(Debug)] - pub struct AddrStream { - #[pin] - inner: TcpStream, - pub(super) remote_addr: SocketAddr, - pub(super) local_addr: SocketAddr - } - } - - impl AddrStream { - pub(super) fn new( - tcp: TcpStream, - remote_addr: SocketAddr, - local_addr: SocketAddr, - ) -> AddrStream { - AddrStream { - inner: tcp, - remote_addr, - local_addr, - } - } - - /// Returns the remote (peer) address of this connection. - #[inline] - pub fn remote_addr(&self) -> SocketAddr { - self.remote_addr - } - - /// Returns the local address of this connection. - #[inline] - pub fn local_addr(&self) -> SocketAddr { - self.local_addr - } - - /// Consumes the AddrStream and returns the underlying IO object - #[inline] - pub fn into_inner(self) -> TcpStream { - self.inner - } - - /// Attempt to receive data on the socket, without removing that data - /// from the queue, registering the current task for wakeup if data is - /// not yet available. - pub fn poll_peek( - &mut self, - cx: &mut task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> Poll> { - self.inner.poll_peek(cx, buf) - } - } - - impl AsyncRead for AddrStream { - #[inline] - fn poll_read( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.project().inner.poll_read(cx, buf) - } - } - - impl AsyncWrite for AddrStream { - #[inline] - fn poll_write( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &[u8], - ) -> Poll> { - self.project().inner.poll_write(cx, buf) - } - - #[inline] - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.project().inner.poll_write_vectored(cx, bufs) - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll> { - // TCP flush is a noop - Poll::Ready(Ok(())) - } - - #[inline] - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - self.project().inner.poll_shutdown(cx) - } - - #[inline] - fn is_write_vectored(&self) -> bool { - // Note that since `self.inner` is a `TcpStream`, this could - // *probably* be hard-coded to return `true`...but it seems more - // correct to ask it anyway (maybe we're on some platform without - // scatter-gather IO?) - self.inner.is_write_vectored() - } - } - - #[cfg(unix)] - impl AsRawFd for AddrStream { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } - } -} - -#[cfg(test)] -mod tests { - use std::time::Duration; - use crate::server::tcp::TcpKeepaliveConfig; - - #[test] - fn no_tcp_keepalive_config() { - assert!(TcpKeepaliveConfig::default().into_socket2().is_none()); - } - - #[test] - fn tcp_keepalive_time_config() { - let mut kac = TcpKeepaliveConfig::default(); - kac.time = Some(Duration::from_secs(60)); - if let Some(tcp_keepalive) = kac.into_socket2() { - assert!(format!("{tcp_keepalive:?}").contains("time: Some(60s)")); - } else { - panic!("test failed"); - } - } - - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - windows, - ))] - #[test] - fn tcp_keepalive_interval_config() { - let mut kac = TcpKeepaliveConfig::default(); - kac.interval = Some(Duration::from_secs(1)); - if let Some(tcp_keepalive) = kac.into_socket2() { - assert!(format!("{tcp_keepalive:?}").contains("interval: Some(1s)")); - } else { - panic!("test failed"); - } - } - - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - ))] - #[test] - fn tcp_keepalive_retries_config() { - let mut kac = TcpKeepaliveConfig::default(); - kac.retries = Some(3); - if let Some(tcp_keepalive) = kac.into_socket2() { - assert!(format!("{tcp_keepalive:?}").contains("retries: Some(3)")); - } else { - panic!("test failed"); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/service/http.rs s390-tools-2.33.1/rust-vendor/hyper/src/service/http.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/service/http.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/service/http.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,58 +0,0 @@ -use std::error::Error as StdError; - -use crate::body::HttpBody; -use crate::common::{task, Future, Poll}; -use crate::{Request, Response}; - -/// An asynchronous function from `Request` to `Response`. -pub trait HttpService: sealed::Sealed { - /// The `HttpBody` body of the `http::Response`. - type ResBody: HttpBody; - - /// The error type that can occur within this `Service`. - /// - /// Note: Returning an `Error` to a hyper server will cause the connection - /// to be abruptly aborted. In most cases, it is better to return a `Response` - /// with a 4xx or 5xx status code. - type Error: Into>; - - /// The `Future` returned by this `Service`. - type Future: Future, Self::Error>>; - - #[doc(hidden)] - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; - - #[doc(hidden)] - fn call(&mut self, req: Request) -> Self::Future; -} - -impl HttpService for T -where - T: tower_service::Service, Response = Response>, - B2: HttpBody, - T::Error: Into>, -{ - type ResBody = B2; - - type Error = T::Error; - type Future = T::Future; - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - tower_service::Service::poll_ready(self, cx) - } - - fn call(&mut self, req: Request) -> Self::Future { - tower_service::Service::call(self, req) - } -} - -impl sealed::Sealed for T -where - T: tower_service::Service, Response = Response>, - B2: HttpBody, -{ -} - -mod sealed { - pub trait Sealed {} -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/service/make.rs s390-tools-2.33.1/rust-vendor/hyper/src/service/make.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/service/make.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/service/make.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,187 +0,0 @@ -use std::error::Error as StdError; -use std::fmt; - -use tokio::io::{AsyncRead, AsyncWrite}; - -use super::{HttpService, Service}; -use crate::body::HttpBody; -use crate::common::{task, Future, Poll}; - -// The same "trait alias" as tower::MakeConnection, but inlined to reduce -// dependencies. -pub trait MakeConnection: self::sealed::Sealed<(Target,)> { - type Connection: AsyncRead + AsyncWrite; - type Error; - type Future: Future>; - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; - fn make_connection(&mut self, target: Target) -> Self::Future; -} - -impl self::sealed::Sealed<(Target,)> for S where S: Service {} - -impl MakeConnection for S -where - S: Service, - S::Response: AsyncRead + AsyncWrite, -{ - type Connection = S::Response; - type Error = S::Error; - type Future = S::Future; - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - Service::poll_ready(self, cx) - } - - fn make_connection(&mut self, target: Target) -> Self::Future { - Service::call(self, target) - } -} - -// Just a sort-of "trait alias" of `MakeService`, not to be implemented -// by anyone, only used as bounds. -pub trait MakeServiceRef: self::sealed::Sealed<(Target, ReqBody)> { - type ResBody: HttpBody; - type Error: Into>; - type Service: HttpService; - type MakeError: Into>; - type Future: Future>; - - // Acting like a #[non_exhaustive] for associated types of this trait. - // - // Basically, no one outside of hyper should be able to set this type - // or declare bounds on it, so it should prevent people from creating - // trait objects or otherwise writing code that requires using *all* - // of the associated types. - // - // Why? So we can add new associated types to this alias in the future, - // if necessary. - type __DontNameMe: self::sealed::CantImpl; - - fn poll_ready_ref(&mut self, cx: &mut task::Context<'_>) -> Poll>; - - fn make_service_ref(&mut self, target: &Target) -> Self::Future; -} - -impl MakeServiceRef for T -where - T: for<'a> Service<&'a Target, Error = ME, Response = S, Future = F>, - E: Into>, - ME: Into>, - S: HttpService, - F: Future>, - IB: HttpBody, - OB: HttpBody, -{ - type Error = E; - type Service = S; - type ResBody = OB; - type MakeError = ME; - type Future = F; - - type __DontNameMe = self::sealed::CantName; - - fn poll_ready_ref(&mut self, cx: &mut task::Context<'_>) -> Poll> { - self.poll_ready(cx) - } - - fn make_service_ref(&mut self, target: &Target) -> Self::Future { - self.call(target) - } -} - -impl self::sealed::Sealed<(Target, B1)> for T -where - T: for<'a> Service<&'a Target, Response = S>, - S: HttpService, - B1: HttpBody, - B2: HttpBody, -{ -} - -/// Create a `MakeService` from a function. -/// -/// # Example -/// -/// ``` -/// # #[cfg(feature = "runtime")] -/// # async fn run() { -/// use std::convert::Infallible; -/// use hyper::{Body, Request, Response, Server}; -/// use hyper::server::conn::AddrStream; -/// use hyper::service::{make_service_fn, service_fn}; -/// -/// let addr = ([127, 0, 0, 1], 3000).into(); -/// -/// let make_svc = make_service_fn(|socket: &AddrStream| { -/// let remote_addr = socket.remote_addr(); -/// async move { -/// Ok::<_, Infallible>(service_fn(move |_: Request| async move { -/// Ok::<_, Infallible>( -/// Response::new(Body::from(format!("Hello, {}!", remote_addr))) -/// ) -/// })) -/// } -/// }); -/// -/// // Then bind and serve... -/// let server = Server::bind(&addr) -/// .serve(make_svc); -/// -/// // Finally, spawn `server` onto an Executor... -/// if let Err(e) = server.await { -/// eprintln!("server error: {}", e); -/// } -/// # } -/// # fn main() {} -/// ``` -pub fn make_service_fn(f: F) -> MakeServiceFn -where - F: FnMut(&Target) -> Ret, - Ret: Future, -{ - MakeServiceFn { f } -} - -/// `MakeService` returned from [`make_service_fn`] -#[derive(Clone, Copy)] -pub struct MakeServiceFn { - f: F, -} - -impl<'t, F, Ret, Target, Svc, MkErr> Service<&'t Target> for MakeServiceFn -where - F: FnMut(&Target) -> Ret, - Ret: Future>, - MkErr: Into>, -{ - type Error = MkErr; - type Response = Svc; - type Future = Ret; - - fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, target: &'t Target) -> Self::Future { - (self.f)(target) - } -} - -impl fmt::Debug for MakeServiceFn { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MakeServiceFn").finish() - } -} - -mod sealed { - pub trait Sealed {} - - #[allow(unreachable_pub)] // This is intentional. - pub trait CantImpl {} - - #[allow(missing_debug_implementations)] - pub enum CantName {} - - impl CantImpl for CantName {} -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/service/mod.rs s390-tools-2.33.1/rust-vendor/hyper/src/service/mod.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/service/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/service/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -//! Asynchronous Services -//! -//! A [`Service`](Service) is a trait representing an asynchronous -//! function of a request to a response. It's similar to -//! `async fn(Request) -> Result`. -//! -//! The argument and return value isn't strictly required to be for HTTP. -//! Therefore, hyper uses several "trait aliases" to reduce clutter around -//! bounds. These are: -//! -//! - `HttpService`: This is blanketly implemented for all types that -//! implement `Service, Response = http::Response>`. -//! - `MakeService`: When a `Service` returns a new `Service` as its "response", -//! we consider it a `MakeService`. Again, blanketly implemented in those cases. -//! - `MakeConnection`: A `Service` that returns a "connection", a type that -//! implements `AsyncRead` and `AsyncWrite`. -//! -//! # HttpService -//! -//! In hyper, especially in the server setting, a `Service` is usually bound -//! to a single connection. It defines how to respond to **all** requests that -//! connection will receive. -//! -//! The helper [`service_fn`](service_fn) should be sufficient for most cases, but -//! if you need to implement `Service` for a type manually, you can follow the example -//! in `service_struct_impl.rs`. -//! -//! # MakeService -//! -//! Since a `Service` is bound to a single connection, a [`Server`](crate::Server) -//! needs a way to make them as it accepts connections. This is what a -//! `MakeService` does. -//! -//! Resources that need to be shared by all `Service`s can be put into a -//! `MakeService`, and then passed to individual `Service`s when `call` -//! is called. - -pub use tower_service::Service; - -mod http; -mod make; -#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] -mod oneshot; -mod util; - -pub(super) use self::http::HttpService; -#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] -pub(super) use self::make::MakeConnection; -#[cfg(all(any(feature = "http1", feature = "http2"), feature = "server"))] -pub(super) use self::make::MakeServiceRef; -#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))] -pub(super) use self::oneshot::{oneshot, Oneshot}; - -pub use self::make::make_service_fn; -pub use self::util::service_fn; diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/service/oneshot.rs s390-tools-2.33.1/rust-vendor/hyper/src/service/oneshot.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/service/oneshot.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/service/oneshot.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,73 +0,0 @@ -// TODO: Eventually to be replaced with tower_util::Oneshot. - -use pin_project_lite::pin_project; -use tower_service::Service; - -use crate::common::{task, Future, Pin, Poll}; - -pub(crate) fn oneshot(svc: S, req: Req) -> Oneshot -where - S: Service, -{ - Oneshot { - state: State::NotReady { svc, req }, - } -} - -pin_project! { - // A `Future` consuming a `Service` and request, waiting until the `Service` - // is ready, and then calling `Service::call` with the request, and - // waiting for that `Future`. - #[allow(missing_debug_implementations)] - pub struct Oneshot, Req> { - #[pin] - state: State, - } -} - -pin_project! { - #[project = StateProj] - #[project_replace = StateProjOwn] - enum State, Req> { - NotReady { - svc: S, - req: Req, - }, - Called { - #[pin] - fut: S::Future, - }, - Tmp, - } -} - -impl Future for Oneshot -where - S: Service, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let mut me = self.project(); - - loop { - match me.state.as_mut().project() { - StateProj::NotReady { ref mut svc, .. } => { - ready!(svc.poll_ready(cx))?; - // fallthrough out of the match's borrow - } - StateProj::Called { fut } => { - return fut.poll(cx); - } - StateProj::Tmp => unreachable!(), - } - - match me.state.as_mut().project_replace(State::Tmp) { - StateProjOwn::NotReady { mut svc, req } => { - me.state.set(State::Called { fut: svc.call(req) }); - } - _ => unreachable!(), - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/service/util.rs s390-tools-2.33.1/rust-vendor/hyper/src/service/util.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/service/util.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/service/util.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,84 +0,0 @@ -use std::error::Error as StdError; -use std::fmt; -use std::marker::PhantomData; - -use crate::body::HttpBody; -use crate::common::{task, Future, Poll}; -use crate::{Request, Response}; - -/// Create a `Service` from a function. -/// -/// # Example -/// -/// ``` -/// use hyper::{Body, Request, Response, Version}; -/// use hyper::service::service_fn; -/// -/// let service = service_fn(|req: Request| async move { -/// if req.version() == Version::HTTP_11 { -/// Ok(Response::new(Body::from("Hello World"))) -/// } else { -/// // Note: it's usually better to return a Response -/// // with an appropriate StatusCode instead of an Err. -/// Err("not HTTP/1.1, abort connection") -/// } -/// }); -/// ``` -pub fn service_fn(f: F) -> ServiceFn -where - F: FnMut(Request) -> S, - S: Future, -{ - ServiceFn { - f, - _req: PhantomData, - } -} - -/// Service returned by [`service_fn`] -pub struct ServiceFn { - f: F, - _req: PhantomData, -} - -impl tower_service::Service> - for ServiceFn -where - F: FnMut(Request) -> Ret, - ReqBody: HttpBody, - Ret: Future, E>>, - E: Into>, - ResBody: HttpBody, -{ - type Response = crate::Response; - type Error = E; - type Future = Ret; - - fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, req: Request) -> Self::Future { - (self.f)(req) - } -} - -impl fmt::Debug for ServiceFn { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("impl Service").finish() - } -} - -impl Clone for ServiceFn -where - F: Clone, -{ - fn clone(&self) -> Self { - ServiceFn { - f: self.f.clone(), - _req: PhantomData, - } - } -} - -impl Copy for ServiceFn where F: Copy {} diff -Nru s390-tools-2.31.0/rust-vendor/hyper/src/upgrade.rs s390-tools-2.33.1/rust-vendor/hyper/src/upgrade.rs --- s390-tools-2.31.0/rust-vendor/hyper/src/upgrade.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/hyper/src/upgrade.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,382 +0,0 @@ -//! HTTP Upgrades -//! -//! This module deals with managing [HTTP Upgrades][mdn] in hyper. Since -//! several concepts in HTTP allow for first talking HTTP, and then converting -//! to a different protocol, this module conflates them into a single API. -//! Those include: -//! -//! - HTTP/1.1 Upgrades -//! - HTTP `CONNECT` -//! -//! You are responsible for any other pre-requisites to establish an upgrade, -//! such as sending the appropriate headers, methods, and status codes. You can -//! then use [`on`][] to grab a `Future` which will resolve to the upgraded -//! connection object, or an error if the upgrade fails. -//! -//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Protocol_upgrade_mechanism -//! -//! # Client -//! -//! Sending an HTTP upgrade from the [`client`](super::client) involves setting -//! either the appropriate method, if wanting to `CONNECT`, or headers such as -//! `Upgrade` and `Connection`, on the `http::Request`. Once receiving the -//! `http::Response` back, you must check for the specific information that the -//! upgrade is agreed upon by the server (such as a `101` status code), and then -//! get the `Future` from the `Response`. -//! -//! # Server -//! -//! Receiving upgrade requests in a server requires you to check the relevant -//! headers in a `Request`, and if an upgrade should be done, you then send the -//! corresponding headers in a response. To then wait for hyper to finish the -//! upgrade, you call `on()` with the `Request`, and then can spawn a task -//! awaiting it. -//! -//! # Example -//! -//! See [this example][example] showing how upgrades work with both -//! Clients and Servers. -//! -//! [example]: https://github.com/hyperium/hyper/blob/master/examples/upgrades.rs - -use std::any::TypeId; -use std::error::Error as StdError; -use std::fmt; -use std::io; -use std::marker::Unpin; - -use bytes::Bytes; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tokio::sync::oneshot; -#[cfg(any(feature = "http1", feature = "http2"))] -use tracing::trace; - -use crate::common::io::Rewind; -use crate::common::{task, Future, Pin, Poll}; - -/// An upgraded HTTP connection. -/// -/// This type holds a trait object internally of the original IO that -/// was used to speak HTTP before the upgrade. It can be used directly -/// as a `Read` or `Write` for convenience. -/// -/// Alternatively, if the exact type is known, this can be deconstructed -/// into its parts. -pub struct Upgraded { - io: Rewind>, -} - -/// A future for a possible HTTP upgrade. -/// -/// If no upgrade was available, or it doesn't succeed, yields an `Error`. -pub struct OnUpgrade { - rx: Option>>, -} - -/// The deconstructed parts of an [`Upgraded`](Upgraded) type. -/// -/// Includes the original IO type, and a read buffer of bytes that the -/// HTTP state machine may have already read before completing an upgrade. -#[derive(Debug)] -pub struct Parts { - /// The original IO object used before the upgrade. - pub io: T, - /// A buffer of bytes that have been read but not processed as HTTP. - /// - /// For instance, if the `Connection` is used for an HTTP upgrade request, - /// it is possible the server sent back the first bytes of the new protocol - /// along with the response upgrade. - /// - /// You will want to check for any existing bytes if you plan to continue - /// communicating on the IO object. - pub read_buf: Bytes, - _inner: (), -} - -/// Gets a pending HTTP upgrade from this message. -/// -/// This can be called on the following types: -/// -/// - `http::Request` -/// - `http::Response` -/// - `&mut http::Request` -/// - `&mut http::Response` -pub fn on(msg: T) -> OnUpgrade { - msg.on_upgrade() -} - -#[cfg(any(feature = "http1", feature = "http2"))] -pub(super) struct Pending { - tx: oneshot::Sender>, -} - -#[cfg(any(feature = "http1", feature = "http2"))] -pub(super) fn pending() -> (Pending, OnUpgrade) { - let (tx, rx) = oneshot::channel(); - (Pending { tx }, OnUpgrade { rx: Some(rx) }) -} - -// ===== impl Upgraded ===== - -impl Upgraded { - #[cfg(any(feature = "http1", feature = "http2", test))] - pub(super) fn new(io: T, read_buf: Bytes) -> Self - where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - { - Upgraded { - io: Rewind::new_buffered(Box::new(io), read_buf), - } - } - - /// Tries to downcast the internal trait object to the type passed. - /// - /// On success, returns the downcasted parts. On error, returns the - /// `Upgraded` back. - pub fn downcast(self) -> Result, Self> { - let (io, buf) = self.io.into_inner(); - match io.__hyper_downcast() { - Ok(t) => Ok(Parts { - io: *t, - read_buf: buf, - _inner: (), - }), - Err(io) => Err(Upgraded { - io: Rewind::new_buffered(io, buf), - }), - } - } -} - -impl AsyncRead for Upgraded { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - Pin::new(&mut self.io).poll_read(cx, buf) - } -} - -impl AsyncWrite for Upgraded { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.io).poll_write(cx, buf) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - Pin::new(&mut self.io).poll_write_vectored(cx, bufs) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - Pin::new(&mut self.io).poll_flush(cx) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - Pin::new(&mut self.io).poll_shutdown(cx) - } - - fn is_write_vectored(&self) -> bool { - self.io.is_write_vectored() - } -} - -impl fmt::Debug for Upgraded { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Upgraded").finish() - } -} - -// ===== impl OnUpgrade ===== - -impl OnUpgrade { - pub(super) fn none() -> Self { - OnUpgrade { rx: None } - } - - #[cfg(feature = "http1")] - pub(super) fn is_none(&self) -> bool { - self.rx.is_none() - } -} - -impl Future for OnUpgrade { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match self.rx { - Some(ref mut rx) => Pin::new(rx).poll(cx).map(|res| match res { - Ok(Ok(upgraded)) => Ok(upgraded), - Ok(Err(err)) => Err(err), - Err(_oneshot_canceled) => Err(crate::Error::new_canceled().with(UpgradeExpected)), - }), - None => Poll::Ready(Err(crate::Error::new_user_no_upgrade())), - } - } -} - -impl fmt::Debug for OnUpgrade { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("OnUpgrade").finish() - } -} - -// ===== impl Pending ===== - -#[cfg(any(feature = "http1", feature = "http2"))] -impl Pending { - pub(super) fn fulfill(self, upgraded: Upgraded) { - trace!("pending upgrade fulfill"); - let _ = self.tx.send(Ok(upgraded)); - } - - #[cfg(feature = "http1")] - /// Don't fulfill the pending Upgrade, but instead signal that - /// upgrades are handled manually. - pub(super) fn manual(self) { - trace!("pending upgrade handled manually"); - let _ = self.tx.send(Err(crate::Error::new_user_manual_upgrade())); - } -} - -// ===== impl UpgradeExpected ===== - -/// Error cause returned when an upgrade was expected but canceled -/// for whatever reason. -/// -/// This likely means the actual `Conn` future wasn't polled and upgraded. -#[derive(Debug)] -struct UpgradeExpected; - -impl fmt::Display for UpgradeExpected { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("upgrade expected but not completed") - } -} - -impl StdError for UpgradeExpected {} - -// ===== impl Io ===== - -pub(super) trait Io: AsyncRead + AsyncWrite + Unpin + 'static { - fn __hyper_type_id(&self) -> TypeId { - TypeId::of::() - } -} - -impl Io for T {} - -impl dyn Io + Send { - fn __hyper_is(&self) -> bool { - let t = TypeId::of::(); - self.__hyper_type_id() == t - } - - fn __hyper_downcast(self: Box) -> Result, Box> { - if self.__hyper_is::() { - // Taken from `std::error::Error::downcast()`. - unsafe { - let raw: *mut dyn Io = Box::into_raw(self); - Ok(Box::from_raw(raw as *mut T)) - } - } else { - Err(self) - } - } -} - -mod sealed { - use super::OnUpgrade; - - pub trait CanUpgrade { - fn on_upgrade(self) -> OnUpgrade; - } - - impl CanUpgrade for http::Request { - fn on_upgrade(mut self) -> OnUpgrade { - self.extensions_mut() - .remove::() - .unwrap_or_else(OnUpgrade::none) - } - } - - impl CanUpgrade for &'_ mut http::Request { - fn on_upgrade(self) -> OnUpgrade { - self.extensions_mut() - .remove::() - .unwrap_or_else(OnUpgrade::none) - } - } - - impl CanUpgrade for http::Response { - fn on_upgrade(mut self) -> OnUpgrade { - self.extensions_mut() - .remove::() - .unwrap_or_else(OnUpgrade::none) - } - } - - impl CanUpgrade for &'_ mut http::Response { - fn on_upgrade(self) -> OnUpgrade { - self.extensions_mut() - .remove::() - .unwrap_or_else(OnUpgrade::none) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn upgraded_downcast() { - let upgraded = Upgraded::new(Mock, Bytes::new()); - - let upgraded = upgraded.downcast::>>().unwrap_err(); - - upgraded.downcast::().unwrap(); - } - - // TODO: replace with tokio_test::io when it can test write_buf - struct Mock; - - impl AsyncRead for Mock { - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut task::Context<'_>, - _buf: &mut ReadBuf<'_>, - ) -> Poll> { - unreachable!("Mock::poll_read") - } - } - - impl AsyncWrite for Mock { - fn poll_write( - self: Pin<&mut Self>, - _: &mut task::Context<'_>, - buf: &[u8], - ) -> Poll> { - // panic!("poll_write shouldn't be called"); - Poll::Ready(Ok(buf.len())) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll> { - unreachable!("Mock::poll_flush") - } - - fn poll_shutdown( - self: Pin<&mut Self>, - _cx: &mut task::Context<'_>, - ) -> Poll> { - unreachable!("Mock::poll_shutdown") - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/build.rs s390-tools-2.33.1/rust-vendor/indexmap/build.rs --- s390-tools-2.31.0/rust-vendor/indexmap/build.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/build.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,8 @@ +fn main() { + // If "std" is explicitly requested, don't bother probing the target for it. + match std::env::var_os("CARGO_FEATURE_STD") { + Some(_) => autocfg::emit("has_std"), + None => autocfg::new().emit_sysroot_crate("std"), + } + autocfg::rerun_path("build.rs"); +} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/indexmap/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/indexmap/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/.cargo-checksum.json 2024-05-28 11:57:39.000000000 +0200 @@ -1 +1 @@ -{"files":{},"package":"d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f"} \ No newline at end of file +{"files":{},"package":"bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/Cargo.toml s390-tools-2.33.1/rust-vendor/indexmap/Cargo.toml --- s390-tools-2.31.0/rust-vendor/indexmap/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/Cargo.toml 2024-05-28 11:57:36.000000000 +0200 @@ -11,9 +11,9 @@ [package] edition = "2021" -rust-version = "1.63" +rust-version = "1.56" name = "indexmap" -version = "2.1.0" +version = "1.9.3" description = "A hash table with consistent order and fast iteration." documentation = "https://docs.rs/indexmap/" readme = "README.md" @@ -28,24 +28,20 @@ license = "Apache-2.0 OR MIT" repository = "https://github.com/bluss/indexmap" +[package.metadata.release] +no-dev-version = true +tag-name = "{{version}}" + [package.metadata.docs.rs] features = [ "arbitrary", "quickcheck", - "serde", + "serde-1", "rayon", ] -rustdoc-args = [ - "--cfg", - "docsrs", -] - -[package.metadata.release] -no-dev-version = true -tag-name = "{{version}}" [profile.bench] -debug = 2 +debug = true [lib] bench = false @@ -55,12 +51,8 @@ optional = true default-features = false -[dependencies.equivalent] -version = "1.0" -default-features = false - [dependencies.hashbrown] -version = "0.14.1" +version = "0.12" features = ["raw"] default-features = false @@ -70,7 +62,7 @@ default-features = false [dependencies.rayon] -version = "1.5.3" +version = "1.4.1" optional = true [dependencies.rustc-rayon] @@ -90,7 +82,7 @@ version = "0.2.1" [dev-dependencies.itertools] -version = "0.11" +version = "0.10" [dev-dependencies.lazy_static] version = "1.3" @@ -106,7 +98,11 @@ [dev-dependencies.serde_derive] version = "1.0" +[build-dependencies.autocfg] +version = "1" + [features] -default = ["std"] +serde-1 = ["serde"] std = [] test_debug = [] +test_low_transition_point = [] diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/README.md s390-tools-2.33.1/rust-vendor/indexmap/README.md --- s390-tools-2.31.0/rust-vendor/indexmap/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/README.md 2024-05-28 11:57:36.000000000 +0200 @@ -3,7 +3,7 @@ [![build status](https://github.com/bluss/indexmap/workflows/Continuous%20integration/badge.svg?branch=master)](https://github.com/bluss/indexmap/actions) [![crates.io](https://img.shields.io/crates/v/indexmap.svg)](https://crates.io/crates/indexmap) [![docs](https://docs.rs/indexmap/badge.svg)](https://docs.rs/indexmap) -[![rustc](https://img.shields.io/badge/rust-1.63%2B-orange.svg)](https://img.shields.io/badge/rust-1.63%2B-orange.svg) +[![rustc](https://img.shields.io/badge/rust-1.56%2B-orange.svg)](https://img.shields.io/badge/rust-1.56%2B-orange.svg) A pure-Rust hash table which preserves (in a limited sense) insertion order. diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/RELEASES.md s390-tools-2.33.1/rust-vendor/indexmap/RELEASES.md --- s390-tools-2.31.0/rust-vendor/indexmap/RELEASES.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/RELEASES.md 2024-05-28 11:57:36.000000000 +0200 @@ -1,61 +1,3 @@ -- 2.1.0 - - - Empty slices can now be created with `map::Slice::{new, new_mut}` and - `set::Slice::new`. In addition, `Slice::new`, `len`, and `is_empty` are - now `const` functions on both types. - - - `IndexMap`, `IndexSet`, and their respective `Slice`s all have binary - search methods for sorted data: map `binary_search_keys` and set - `binary_search` for plain comparision, `binary_search_by` for custom - comparators, `binary_search_by_key` for key extraction, and - `partition_point` for boolean conditions. - -- 2.0.2 - - - The `hashbrown` dependency has been updated to version 0.14.1 to - complete the support for Rust 1.63. - -- 2.0.1 - - - **MSRV**: Rust 1.63.0 is now supported as well, pending publication of - `hashbrown`'s relaxed MSRV (or use cargo `--ignore-rust-version`). - -- 2.0.0 - - - **MSRV**: Rust 1.64.0 or later is now required. - - - The `"std"` feature is no longer auto-detected. It is included in the - default feature set, or else can be enabled like any other Cargo feature. - - - The `"serde-1"` feature has been removed, leaving just the optional - `"serde"` dependency to be enabled like a feature itself. - - - `IndexMap::get_index_mut` now returns `Option<(&K, &mut V)>`, changing - the key part from `&mut K` to `&K`. There is also a new alternative - `MutableKeys::get_index_mut2` to access the former behavior. - - - The new `map::Slice` and `set::Slice` offer a linear view of maps - and sets, behaving a lot like normal `[(K, V)]` and `[T]` slices. Notably, - comparison traits like `Eq` only consider items in order, rather than hash - lookups, and slices even implement `Hash`. - - - `IndexMap` and `IndexSet` now have `sort_by_cached_key` and - `par_sort_by_cached_key` methods which perform stable sorts in place - using a key extraction function. - - - `IndexMap` and `IndexSet` now have `reserve_exact`, `try_reserve`, and - `try_reserve_exact` methods that correspond to the same methods on `Vec`. - However, exactness only applies to the direct capacity for items, while the - raw hash table still follows its own rules for capacity and load factor. - - - The `Equivalent` trait is now re-exported from the `equivalent` crate, - intended as a common base to allow types to work with multiple map types. - - - The `hashbrown` dependency has been updated to version 0.14. - - - The `serde_seq` module has been moved from the crate root to below the - `map` module. - - 1.9.3 - Bump the `rustc-rayon` dependency, for compiler use only. diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/arbitrary.rs s390-tools-2.33.1/rust-vendor/indexmap/src/arbitrary.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/arbitrary.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/arbitrary.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,5 +1,4 @@ #[cfg(feature = "arbitrary")] -#[cfg_attr(docsrs, doc(cfg(feature = "arbitrary")))] mod impl_arbitrary { use crate::{IndexMap, IndexSet}; use arbitrary::{Arbitrary, Result, Unstructured}; @@ -36,7 +35,6 @@ } #[cfg(feature = "quickcheck")] -#[cfg_attr(docsrs, doc(cfg(feature = "quickcheck")))] mod impl_quickcheck { use crate::{IndexMap, IndexSet}; use alloc::boxed::Box; diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/equivalent.rs s390-tools-2.33.1/rust-vendor/indexmap/src/equivalent.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/equivalent.rs 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/equivalent.rs 2024-05-28 11:57:36.000000000 +0200 @@ -0,0 +1,27 @@ +use core::borrow::Borrow; + +/// Key equivalence trait. +/// +/// This trait allows hash table lookup to be customized. +/// It has one blanket implementation that uses the regular `Borrow` solution, +/// just like `HashMap` and `BTreeMap` do, so that you can pass `&str` to lookup +/// into a map with `String` keys and so on. +/// +/// # Contract +/// +/// The implementor **must** hash like `K`, if it is hashable. +pub trait Equivalent { + /// Compare self to `key` and return `true` if they are equal. + fn equivalent(&self, key: &K) -> bool; +} + +impl Equivalent for Q +where + Q: Eq, + K: Borrow, +{ + #[inline] + fn equivalent(&self, key: &K) -> bool { + *self == *key.borrow() + } +} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/lib.rs s390-tools-2.33.1/rust-vendor/indexmap/src/lib.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/lib.rs 2024-05-28 11:57:36.000000000 +0200 @@ -14,7 +14,7 @@ //! [`IndexSet`]: set/struct.IndexSet.html //! //! -//! ### Highlights +//! ### Feature Highlights //! //! [`IndexMap`] and [`IndexSet`] are drop-in compatible with the std `HashMap` //! and `HashSet`, but they also have some features of note: @@ -26,34 +26,6 @@ //! - The [`MutableKeys`][map::MutableKeys] trait, which gives opt-in mutable //! access to hash map keys. //! -//! ### Feature Flags -//! -//! To reduce the amount of compiled code in the crate by default, certain -//! features are gated behind [feature flags]. These allow you to opt in to (or -//! out of) functionality. Below is a list of the features available in this -//! crate. -//! -//! * `std`: Enables features which require the Rust standard library. For more -//! information see the section on [`no_std`]. -//! * `rayon`: Enables parallel iteration and other parallel methods. -//! * `serde`: Adds implementations for [`Serialize`] and [`Deserialize`] -//! to [`IndexMap`] and [`IndexSet`]. Alternative implementations for -//! (de)serializing [`IndexMap`] as an ordered sequence are available in the -//! [`map::serde_seq`] module. -//! * `arbitrary`: Adds implementations for the [`arbitrary::Arbitrary`] trait -//! to [`IndexMap`] and [`IndexSet`]. -//! * `quickcheck`: Adds implementations for the [`quickcheck::Arbitrary`] trait -//! to [`IndexMap`] and [`IndexSet`]. -//! -//! _Note: only the `std` feature is enabled by default._ -//! -//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section -//! [`no_std`]: #no-standard-library-targets -//! [`Serialize`]: `::serde::Serialize` -//! [`Deserialize`]: `::serde::Deserialize` -//! [`arbitrary::Arbitrary`]: `::arbitrary::Arbitrary` -//! [`quickcheck::Arbitrary`]: `::quickcheck::Arbitrary` -//! //! ### Alternate Hashers //! //! [`IndexMap`] and [`IndexSet`] have a default hasher type `S = RandomState`, @@ -81,20 +53,21 @@ //! //! ### Rust Version //! -//! This version of indexmap requires Rust 1.63 or later. +//! This version of indexmap requires Rust 1.56 or later. //! -//! The indexmap 2.x release series will use a carefully considered version -//! upgrade policy, where in a later 2.x version, we will raise the minimum +//! The indexmap 1.x release series will use a carefully considered version +//! upgrade policy, where in a later 1.x version, we will raise the minimum //! required Rust version. //! //! ## No Standard Library Targets //! -//! This crate supports being built without `std`, requiring `alloc` instead. -//! This is chosen by disabling the default "std" cargo feature, by adding -//! `default-features = false` to your dependency specification. +//! This crate supports being built without `std`, requiring +//! `alloc` instead. This is enabled automatically when it is detected that +//! `std` is not available. There is no crate feature to enable/disable to +//! trigger this. It can be tested by building for a std-less target. //! //! - Creating maps and sets using [`new`][IndexMap::new] and -//! [`with_capacity`][IndexMap::with_capacity] is unavailable without `std`. +//! [`with_capacity`][IndexMap::with_capacity] is unavailable without `std`. //! Use methods [`IndexMap::default`][def], //! [`with_hasher`][IndexMap::with_hasher], //! [`with_capacity_and_hasher`][IndexMap::with_capacity_and_hasher] instead. @@ -104,11 +77,9 @@ //! //! [def]: map/struct.IndexMap.html#impl-Default -#![cfg_attr(docsrs, feature(doc_cfg))] - extern crate alloc; -#[cfg(feature = "std")] +#[cfg(has_std)] #[macro_use] extern crate std; @@ -117,10 +88,12 @@ mod arbitrary; #[macro_use] mod macros; +mod equivalent; mod mutable_keys; #[cfg(feature = "serde")] -#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] mod serde; +#[cfg(feature = "serde")] +pub mod serde_seq; mod util; pub mod map; @@ -129,15 +102,14 @@ // Placed after `map` and `set` so new `rayon` methods on the types // are documented after the "normal" methods. #[cfg(feature = "rayon")] -#[cfg_attr(docsrs, doc(cfg(feature = "rayon")))] mod rayon; #[cfg(feature = "rustc-rayon")] mod rustc; +pub use crate::equivalent::Equivalent; pub use crate::map::IndexMap; pub use crate::set::IndexSet; -pub use equivalent::Equivalent; // shared private items @@ -220,59 +192,3 @@ where F: FnOnce(&mut [Self::Entry]); } - -/// The error type for `try_reserve` methods. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct TryReserveError { - kind: TryReserveErrorKind, -} - -#[derive(Clone, PartialEq, Eq, Debug)] -enum TryReserveErrorKind { - // The standard library's kind is currently opaque to us, otherwise we could unify this. - Std(alloc::collections::TryReserveError), - CapacityOverflow, - AllocError { layout: alloc::alloc::Layout }, -} - -// These are not `From` so we don't expose them in our public API. -impl TryReserveError { - fn from_alloc(error: alloc::collections::TryReserveError) -> Self { - Self { - kind: TryReserveErrorKind::Std(error), - } - } - - fn from_hashbrown(error: hashbrown::TryReserveError) -> Self { - Self { - kind: match error { - hashbrown::TryReserveError::CapacityOverflow => { - TryReserveErrorKind::CapacityOverflow - } - hashbrown::TryReserveError::AllocError { layout } => { - TryReserveErrorKind::AllocError { layout } - } - }, - } - } -} - -impl core::fmt::Display for TryReserveError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let reason = match &self.kind { - TryReserveErrorKind::Std(e) => return core::fmt::Display::fmt(e, f), - TryReserveErrorKind::CapacityOverflow => { - " because the computed capacity exceeded the collection's maximum" - } - TryReserveErrorKind::AllocError { .. } => { - " because the memory allocator returned an error" - } - }; - f.write_str("memory allocation failed")?; - f.write_str(reason) - } -} - -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -impl std::error::Error for TryReserveError {} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/macros.rs s390-tools-2.33.1/rust-vendor/indexmap/src/macros.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/macros.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/macros.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,5 +1,4 @@ -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +#[cfg(has_std)] #[macro_export] /// Create an `IndexMap` from a list of key-value pairs /// @@ -20,23 +19,23 @@ /// assert_eq!(map.keys().next(), Some(&"a")); /// ``` macro_rules! indexmap { + (@single $($x:tt)*) => (()); + (@count $($rest:expr),*) => (<[()]>::len(&[$($crate::indexmap!(@single $rest)),*])); + ($($key:expr => $value:expr,)+) => { $crate::indexmap!($($key => $value),+) }; ($($key:expr => $value:expr),*) => { { - // Note: `stringify!($key)` is just here to consume the repetition, - // but we throw away that string literal during constant evaluation. - const CAP: usize = <[()]>::len(&[$({ stringify!($key); }),*]); - let mut map = $crate::IndexMap::with_capacity(CAP); + let _cap = $crate::indexmap!(@count $($key),*); + let mut _map = $crate::IndexMap::with_capacity(_cap); $( - map.insert($key, $value); + _map.insert($key, $value); )* - map + _map } }; } -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +#[cfg(has_std)] #[macro_export] /// Create an `IndexSet` from a list of values /// @@ -57,17 +56,18 @@ /// assert_eq!(set.iter().next(), Some(&"a")); /// ``` macro_rules! indexset { + (@single $($x:tt)*) => (()); + (@count $($rest:expr),*) => (<[()]>::len(&[$($crate::indexset!(@single $rest)),*])); + ($($value:expr,)+) => { $crate::indexset!($($value),+) }; ($($value:expr),*) => { { - // Note: `stringify!($value)` is just here to consume the repetition, - // but we throw away that string literal during constant evaluation. - const CAP: usize = <[()]>::len(&[$({ stringify!($value); }),*]); - let mut set = $crate::IndexSet::with_capacity(CAP); + let _cap = $crate::indexset!(@count $($value),*); + let mut _set = $crate::IndexSet::with_capacity(_cap); $( - set.insert($value); + _set.insert($value); )* - set + _set } }; } diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/map/core/raw.rs s390-tools-2.33.1/rust-vendor/indexmap/src/map/core/raw.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/map/core/raw.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/map/core/raw.rs 2024-05-28 11:57:36.000000000 +0200 @@ -2,7 +2,7 @@ //! This module encapsulates the `unsafe` access to `hashbrown::raw::RawTable`, //! mostly in dealing with its bucket "pointers". -use super::{equivalent, get_hash, Bucket, Entry, HashValue, IndexMapCore, VacantEntry}; +use super::{equivalent, Bucket, Entry, HashValue, IndexMapCore, VacantEntry}; use core::fmt; use core::mem::replace; use hashbrown::raw::RawTable; @@ -26,7 +26,7 @@ impl fmt::Debug for DebugIndices<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // SAFETY: we're not letting any of the buckets escape this function - let indices = unsafe { self.0.iter().map(|raw_bucket| *raw_bucket.as_ref()) }; + let indices = unsafe { self.0.iter().map(|raw_bucket| raw_bucket.read()) }; f.debug_list().entries(indices).finish() } } @@ -38,42 +38,16 @@ unsafe { let offset = end - start; for bucket in self.indices.iter() { - let i = bucket.as_mut(); - if *i >= end { - *i -= offset; - } else if *i >= start { + let i = bucket.read(); + if i >= end { + bucket.write(i - offset); + } else if i >= start { self.indices.erase(bucket); } } } } - /// Search for a key in the table and return `Ok(entry_index)` if found. - /// Otherwise, insert the key and return `Err(new_index)`. - /// - /// Note that hashbrown may resize the table to reserve space for insertion, - /// even before checking if it's already present, so this is somewhat biased - /// towards new items. - pub(crate) fn find_or_insert(&mut self, hash: HashValue, key: &K) -> Result - where - K: Eq, - { - let hash = hash.get(); - let eq = equivalent(key, &self.entries); - let hasher = get_hash(&self.entries); - // SAFETY: We're not mutating between find and read/insert. - unsafe { - match self.indices.find_or_find_insert_slot(hash, eq, hasher) { - Ok(raw_bucket) => Ok(*raw_bucket.as_ref()), - Err(slot) => { - let index = self.indices.len(); - self.indices.insert_in_slot(hash, slot, index); - Err(index) - } - } - } - } - pub(crate) fn entry(&mut self, hash: HashValue, key: K) -> Entry<'_, K, V> where K: Eq, @@ -100,6 +74,29 @@ // only the item references that are appropriately bound to `&mut self`. unsafe { self.indices.iter().map(|bucket| bucket.as_mut()) } } + + /// Return the raw bucket for the given index + fn find_index(&self, index: usize) -> RawBucket { + // We'll get a "nice" bounds-check from indexing `self.entries`, + // and then we expect to find it in the table as well. + let hash = self.entries[index].hash.get(); + self.indices + .find(hash, move |&i| i == index) + .expect("index not found") + } + + pub(crate) fn swap_indices(&mut self, a: usize, b: usize) { + // SAFETY: Can't take two `get_mut` references from one table, so we + // must use raw buckets to do the swap. This is still safe because we + // are locally sure they won't dangle, and we write them individually. + unsafe { + let raw_bucket_a = self.find_index(a); + let raw_bucket_b = self.find_index(b); + raw_bucket_a.write(b); + raw_bucket_b.write(a); + } + self.entries.swap(a, b); + } } /// A view into an occupied entry in a `IndexMap`. @@ -154,7 +151,7 @@ #[inline] pub fn index(&self) -> usize { // SAFETY: we have &mut map keep keeping the bucket stable - unsafe { *self.raw_bucket.as_ref() } + unsafe { self.raw_bucket.read() } } /// Converts into a mutable reference to the entry's value in the map, @@ -174,7 +171,7 @@ pub fn swap_remove_entry(self) -> (K, V) { // SAFETY: This is safe because it can only happen once (self is consumed) // and map.indices have not been modified since entry construction - let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) }; + let index = unsafe { self.map.indices.remove(self.raw_bucket) }; self.map.swap_remove_finish(index) } @@ -188,7 +185,7 @@ pub fn shift_remove_entry(self) -> (K, V) { // SAFETY: This is safe because it can only happen once (self is consumed) // and map.indices have not been modified since entry construction - let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) }; + let index = unsafe { self.map.indices.remove(self.raw_bucket) }; self.map.shift_remove_finish(index) } } diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/map/core.rs s390-tools-2.33.1/rust-vendor/indexmap/src/map/core.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/map/core.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/map/core.rs 2024-05-28 11:57:36.000000000 +0200 @@ -12,13 +12,14 @@ use hashbrown::raw::RawTable; use crate::vec::{Drain, Vec}; -use crate::TryReserveError; +use core::cmp; use core::fmt; -use core::mem; +use core::mem::replace; use core::ops::RangeBounds; +use crate::equivalent::Equivalent; use crate::util::simplify_range; -use crate::{Bucket, Entries, Equivalent, HashValue}; +use crate::{Bucket, Entries, HashValue}; /// Core of the map that does not depend on S pub(crate) struct IndexMapCore { @@ -61,18 +62,18 @@ V: Clone, { fn clone(&self) -> Self { - let mut new = Self::new(); - new.clone_from(self); - new + let indices = self.indices.clone(); + let mut entries = Vec::with_capacity(indices.capacity()); + entries.clone_from(&self.entries); + IndexMapCore { indices, entries } } fn clone_from(&mut self, other: &Self) { let hasher = get_hash(&other.entries); self.indices.clone_from_with_hasher(&other.indices, hasher); if self.entries.capacity() < other.entries.len() { - // If we must resize, match the indices capacity. - let additional = other.entries.len() - self.entries.len(); - self.reserve_entries(additional); + // If we must resize, match the indices capacity + self.reserve_entries(); } self.entries.clone_from(&other.entries); } @@ -119,9 +120,6 @@ } impl IndexMapCore { - /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`. - const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::>(); - #[inline] pub(crate) const fn new() -> Self { IndexMapCore { @@ -145,7 +143,7 @@ #[inline] pub(crate) fn capacity(&self) -> usize { - Ord::min(self.indices.capacity(), self.entries.capacity()) + cmp::min(self.indices.capacity(), self.entries.capacity()) } pub(crate) fn clear(&mut self) { @@ -195,67 +193,15 @@ /// Reserve capacity for `additional` more key-value pairs. pub(crate) fn reserve(&mut self, additional: usize) { self.indices.reserve(additional, get_hash(&self.entries)); - // Only grow entries if necessary, since we also round up capacity. - if additional > self.entries.capacity() - self.entries.len() { - self.reserve_entries(additional); - } + self.reserve_entries(); } - /// Reserve entries capacity, rounded up to match the indices - fn reserve_entries(&mut self, additional: usize) { - // Use a soft-limit on the maximum capacity, but if the caller explicitly - // requested more, do it and let them have the resulting panic. - let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); - let try_add = new_capacity - self.entries.len(); - if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { - return; - } - self.entries.reserve_exact(additional); - } - - /// Reserve capacity for `additional` more key-value pairs, without over-allocating. - pub(crate) fn reserve_exact(&mut self, additional: usize) { - self.indices.reserve(additional, get_hash(&self.entries)); + /// Reserve entries capacity to match the indices + fn reserve_entries(&mut self) { + let additional = self.indices.capacity() - self.entries.len(); self.entries.reserve_exact(additional); } - /// Try to reserve capacity for `additional` more key-value pairs. - pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.indices - .try_reserve(additional, get_hash(&self.entries)) - .map_err(TryReserveError::from_hashbrown)?; - // Only grow entries if necessary, since we also round up capacity. - if additional > self.entries.capacity() - self.entries.len() { - self.try_reserve_entries(additional) - } else { - Ok(()) - } - } - - /// Try to reserve entries capacity, rounded up to match the indices - fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> { - // Use a soft-limit on the maximum capacity, but if the caller explicitly - // requested more, do it and let them have the resulting error. - let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); - let try_add = new_capacity - self.entries.len(); - if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { - return Ok(()); - } - self.entries - .try_reserve_exact(additional) - .map_err(TryReserveError::from_alloc) - } - - /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating. - pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.indices - .try_reserve(additional, get_hash(&self.entries)) - .map_err(TryReserveError::from_hashbrown)?; - self.entries - .try_reserve_exact(additional) - .map_err(TryReserveError::from_alloc) - } - /// Shrink the capacity of the map with a lower bound pub(crate) fn shrink_to(&mut self, min_capacity: usize) { self.indices @@ -274,14 +220,18 @@ } } - /// Append a key-value pair to `entries`, *without* checking whether it already exists. - fn push_entry(&mut self, hash: HashValue, key: K, value: V) { - if self.entries.len() == self.entries.capacity() { + /// Append a key-value pair, *without* checking whether it already exists, + /// and return the pair's new index. + fn push(&mut self, hash: HashValue, key: K, value: V) -> usize { + let i = self.entries.len(); + self.indices.insert(hash.get(), i, get_hash(&self.entries)); + if i == self.entries.capacity() { // Reserve our own capacity synced to the indices, // rather than letting `Vec::push` just double it. - self.reserve_entries(1); + self.reserve_entries(); } self.entries.push(Bucket { hash, key, value }); + i } /// Return the index in `entries` where an equivalent key can be found @@ -297,13 +247,9 @@ where K: Eq, { - match self.find_or_insert(hash, &key) { - Ok(i) => (i, Some(mem::replace(&mut self.entries[i].value, value))), - Err(i) => { - debug_assert_eq!(i, self.entries.len()); - self.push_entry(hash, key, value); - (i, None) - } + match self.get_index_of(hash, &key) { + Some(i) => (i, Some(replace(&mut self.entries[i].value, value))), + None => (self.push(hash, key, value), None), } } @@ -393,7 +339,7 @@ pub(super) fn move_index(&mut self, from: usize, to: usize) { let from_hash = self.entries[from].hash; if from != to { - // Use a sentinel index so other indices don't collide. + // Use a sentinal index so other indices don't collide. update_index(&mut self.indices, from_hash, from, usize::MAX); // Update all other indices and rotate the entry positions. @@ -405,31 +351,11 @@ self.entries[to..=from].rotate_right(1); } - // Change the sentinel index to its final position. + // Change the sentinal index to its final position. update_index(&mut self.indices, from_hash, usize::MAX, to); } } - pub(crate) fn swap_indices(&mut self, a: usize, b: usize) { - // If they're equal and in-bounds, there's nothing to do. - if a == b && a < self.entries.len() { - return; - } - - // We'll get a "nice" bounds-check from indexing `self.entries`, - // and then we expect to find it in the table as well. - let [ref_a, ref_b] = self - .indices - .get_many_mut( - [self.entries[a].hash.get(), self.entries[b].hash.get()], - move |i, &x| if i == 0 { x == a } else { x == b }, - ) - .expect("indices not found"); - - mem::swap(ref_a, ref_b); - self.entries.swap(a, b); - } - /// Remove an entry by swapping it with the last pub(crate) fn swap_remove_full(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> where @@ -521,9 +447,25 @@ where F: FnMut(&mut K, &mut V) -> bool, { - self.entries - .retain_mut(|entry| keep(&mut entry.key, &mut entry.value)); - if self.entries.len() < self.indices.len() { + // FIXME: This could use Vec::retain_mut with MSRV 1.61. + // Like Vec::retain in self.entries, but with mutable K and V. + // We swap-shift all the items we want to keep, truncate the rest, + // then rebuild the raw hash table with the new indexes. + let len = self.entries.len(); + let mut n_deleted = 0; + for i in 0..len { + let will_keep = { + let entry = &mut self.entries[i]; + keep(&mut entry.key, &mut entry.value) + }; + if !will_keep { + n_deleted += 1; + } else if n_deleted > 0 { + self.entries.swap(i - n_deleted, i); + } + } + if n_deleted > 0 { + self.entries.truncate(len - n_deleted); self.rebuild_hash_table(); } } @@ -659,7 +601,7 @@ impl OccupiedEntry<'_, K, V> { /// Sets the value of the entry to `value`, and returns the entry's old value. pub fn insert(&mut self, value: V) -> V { - mem::replace(self.get_mut(), value) + replace(self.get_mut(), value) } /// Remove the key, value pair stored in the map for this entry, and return the value. @@ -731,18 +673,14 @@ /// Return the index where the key-value pair will be inserted. pub fn index(&self) -> usize { - self.map.indices.len() + self.map.len() } /// Inserts the entry's key and the given value into the map, and returns a mutable reference /// to the value. pub fn insert(self, value: V) -> &'a mut V { - let i = self.index(); - let Self { map, hash, key } = self; - map.indices.insert(hash.get(), i, get_hash(&map.entries)); - debug_assert_eq!(i, map.entries.len()); - map.push_entry(hash, key, value); - &mut map.entries[i].value + let i = self.map.push(self.hash, self.key, value); + &mut self.map.entries[i].value } } diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/map/iter.rs s390-tools-2.33.1/rust-vendor/indexmap/src/map/iter.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/map/iter.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/map/iter.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,541 +0,0 @@ -use super::{Bucket, Entries, IndexMap, Slice}; - -use alloc::vec::{self, Vec}; -use core::fmt; -use core::iter::FusedIterator; -use core::slice; - -impl<'a, K, V, S> IntoIterator for &'a IndexMap { - type Item = (&'a K, &'a V); - type IntoIter = Iter<'a, K, V>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, K, V, S> IntoIterator for &'a mut IndexMap { - type Item = (&'a K, &'a mut V); - type IntoIter = IterMut<'a, K, V>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -impl IntoIterator for IndexMap { - type Item = (K, V); - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter::new(self.into_entries()) - } -} - -/// An iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`iter`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`iter`]: struct.IndexMap.html#method.iter -/// [`IndexMap`]: struct.IndexMap.html -pub struct Iter<'a, K, V> { - iter: slice::Iter<'a, Bucket>, -} - -impl<'a, K, V> Iter<'a, K, V> { - pub(super) fn new(entries: &'a [Bucket]) -> Self { - Self { - iter: entries.iter(), - } - } - - /// Returns a slice of the remaining entries in the iterator. - pub fn as_slice(&self) -> &'a Slice { - Slice::from_slice(self.iter.as_slice()) - } -} - -impl<'a, K, V> Iterator for Iter<'a, K, V> { - type Item = (&'a K, &'a V); - - iterator_methods!(Bucket::refs); -} - -impl DoubleEndedIterator for Iter<'_, K, V> { - double_ended_iterator_methods!(Bucket::refs); -} - -impl ExactSizeIterator for Iter<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Iter<'_, K, V> {} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -impl Clone for Iter<'_, K, V> { - fn clone(&self) -> Self { - Iter { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for Iter<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -impl Default for Iter<'_, K, V> { - fn default() -> Self { - Self { iter: [].iter() } - } -} - -/// A mutable iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`iter_mut`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`iter_mut`]: struct.IndexMap.html#method.iter_mut -/// [`IndexMap`]: struct.IndexMap.html -pub struct IterMut<'a, K, V> { - iter: slice::IterMut<'a, Bucket>, -} - -impl<'a, K, V> IterMut<'a, K, V> { - pub(super) fn new(entries: &'a mut [Bucket]) -> Self { - Self { - iter: entries.iter_mut(), - } - } - - /// Returns a slice of the remaining entries in the iterator. - pub fn as_slice(&self) -> &Slice { - Slice::from_slice(self.iter.as_slice()) - } - - /// Returns a mutable slice of the remaining entries in the iterator. - /// - /// To avoid creating `&mut` references that alias, this is forced to consume the iterator. - pub fn into_slice(self) -> &'a mut Slice { - Slice::from_mut_slice(self.iter.into_slice()) - } -} - -impl<'a, K, V> Iterator for IterMut<'a, K, V> { - type Item = (&'a K, &'a mut V); - - iterator_methods!(Bucket::ref_mut); -} - -impl DoubleEndedIterator for IterMut<'_, K, V> { - double_ended_iterator_methods!(Bucket::ref_mut); -} - -impl ExactSizeIterator for IterMut<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for IterMut<'_, K, V> {} - -impl fmt::Debug for IterMut<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::refs); - f.debug_list().entries(iter).finish() - } -} - -impl Default for IterMut<'_, K, V> { - fn default() -> Self { - Self { - iter: [].iter_mut(), - } - } -} - -/// An owning iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`into_iter`] method on [`IndexMap`] -/// (provided by the `IntoIterator` trait). See its documentation for more. -/// -/// [`into_iter`]: struct.IndexMap.html#method.into_iter -/// [`IndexMap`]: struct.IndexMap.html -pub struct IntoIter { - iter: vec::IntoIter>, -} - -impl IntoIter { - pub(super) fn new(entries: Vec>) -> Self { - Self { - iter: entries.into_iter(), - } - } - - /// Returns a slice of the remaining entries in the iterator. - pub fn as_slice(&self) -> &Slice { - Slice::from_slice(self.iter.as_slice()) - } - - /// Returns a mutable slice of the remaining entries in the iterator. - pub fn as_mut_slice(&mut self) -> &mut Slice { - Slice::from_mut_slice(self.iter.as_mut_slice()) - } -} - -impl Iterator for IntoIter { - type Item = (K, V); - - iterator_methods!(Bucket::key_value); -} - -impl DoubleEndedIterator for IntoIter { - double_ended_iterator_methods!(Bucket::key_value); -} - -impl ExactSizeIterator for IntoIter { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for IntoIter {} - -impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::refs); - f.debug_list().entries(iter).finish() - } -} - -impl Default for IntoIter { - fn default() -> Self { - Self { - iter: Vec::new().into_iter(), - } - } -} - -/// A draining iterator over the entries of a `IndexMap`. -/// -/// This `struct` is created by the [`drain`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`drain`]: struct.IndexMap.html#method.drain -/// [`IndexMap`]: struct.IndexMap.html -pub struct Drain<'a, K, V> { - iter: vec::Drain<'a, Bucket>, -} - -impl<'a, K, V> Drain<'a, K, V> { - pub(super) fn new(iter: vec::Drain<'a, Bucket>) -> Self { - Self { iter } - } - - /// Returns a slice of the remaining entries in the iterator. - pub fn as_slice(&self) -> &Slice { - Slice::from_slice(self.iter.as_slice()) - } -} - -impl Iterator for Drain<'_, K, V> { - type Item = (K, V); - - iterator_methods!(Bucket::key_value); -} - -impl DoubleEndedIterator for Drain<'_, K, V> { - double_ended_iterator_methods!(Bucket::key_value); -} - -impl ExactSizeIterator for Drain<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Drain<'_, K, V> {} - -impl fmt::Debug for Drain<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::refs); - f.debug_list().entries(iter).finish() - } -} - -/// An iterator over the keys of a `IndexMap`. -/// -/// This `struct` is created by the [`keys`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`keys`]: struct.IndexMap.html#method.keys -/// [`IndexMap`]: struct.IndexMap.html -pub struct Keys<'a, K, V> { - iter: slice::Iter<'a, Bucket>, -} - -impl<'a, K, V> Keys<'a, K, V> { - pub(super) fn new(entries: &'a [Bucket]) -> Self { - Self { - iter: entries.iter(), - } - } -} - -impl<'a, K, V> Iterator for Keys<'a, K, V> { - type Item = &'a K; - - iterator_methods!(Bucket::key_ref); -} - -impl DoubleEndedIterator for Keys<'_, K, V> { - double_ended_iterator_methods!(Bucket::key_ref); -} - -impl ExactSizeIterator for Keys<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Keys<'_, K, V> {} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -impl Clone for Keys<'_, K, V> { - fn clone(&self) -> Self { - Keys { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for Keys<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -impl Default for Keys<'_, K, V> { - fn default() -> Self { - Self { iter: [].iter() } - } -} - -/// An owning iterator over the keys of a `IndexMap`. -/// -/// This `struct` is created by the [`into_keys`] method on [`IndexMap`]. -/// See its documentation for more. -/// -/// [`IndexMap`]: struct.IndexMap.html -/// [`into_keys`]: struct.IndexMap.html#method.into_keys -pub struct IntoKeys { - iter: vec::IntoIter>, -} - -impl IntoKeys { - pub(super) fn new(entries: Vec>) -> Self { - Self { - iter: entries.into_iter(), - } - } -} - -impl Iterator for IntoKeys { - type Item = K; - - iterator_methods!(Bucket::key); -} - -impl DoubleEndedIterator for IntoKeys { - double_ended_iterator_methods!(Bucket::key); -} - -impl ExactSizeIterator for IntoKeys { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for IntoKeys {} - -impl fmt::Debug for IntoKeys { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::key_ref); - f.debug_list().entries(iter).finish() - } -} - -impl Default for IntoKeys { - fn default() -> Self { - Self { - iter: Vec::new().into_iter(), - } - } -} - -/// An iterator over the values of a `IndexMap`. -/// -/// This `struct` is created by the [`values`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`values`]: struct.IndexMap.html#method.values -/// [`IndexMap`]: struct.IndexMap.html -pub struct Values<'a, K, V> { - iter: slice::Iter<'a, Bucket>, -} - -impl<'a, K, V> Values<'a, K, V> { - pub(super) fn new(entries: &'a [Bucket]) -> Self { - Self { - iter: entries.iter(), - } - } -} - -impl<'a, K, V> Iterator for Values<'a, K, V> { - type Item = &'a V; - - iterator_methods!(Bucket::value_ref); -} - -impl DoubleEndedIterator for Values<'_, K, V> { - double_ended_iterator_methods!(Bucket::value_ref); -} - -impl ExactSizeIterator for Values<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for Values<'_, K, V> {} - -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` -impl Clone for Values<'_, K, V> { - fn clone(&self) -> Self { - Values { - iter: self.iter.clone(), - } - } -} - -impl fmt::Debug for Values<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.clone()).finish() - } -} - -impl Default for Values<'_, K, V> { - fn default() -> Self { - Self { iter: [].iter() } - } -} - -/// A mutable iterator over the values of a `IndexMap`. -/// -/// This `struct` is created by the [`values_mut`] method on [`IndexMap`]. See its -/// documentation for more. -/// -/// [`values_mut`]: struct.IndexMap.html#method.values_mut -/// [`IndexMap`]: struct.IndexMap.html -pub struct ValuesMut<'a, K, V> { - iter: slice::IterMut<'a, Bucket>, -} - -impl<'a, K, V> ValuesMut<'a, K, V> { - pub(super) fn new(entries: &'a mut [Bucket]) -> Self { - Self { - iter: entries.iter_mut(), - } - } -} - -impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { - type Item = &'a mut V; - - iterator_methods!(Bucket::value_mut); -} - -impl DoubleEndedIterator for ValuesMut<'_, K, V> { - double_ended_iterator_methods!(Bucket::value_mut); -} - -impl ExactSizeIterator for ValuesMut<'_, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for ValuesMut<'_, K, V> {} - -impl fmt::Debug for ValuesMut<'_, K, V> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::value_ref); - f.debug_list().entries(iter).finish() - } -} - -impl Default for ValuesMut<'_, K, V> { - fn default() -> Self { - Self { - iter: [].iter_mut(), - } - } -} - -/// An owning iterator over the values of a `IndexMap`. -/// -/// This `struct` is created by the [`into_values`] method on [`IndexMap`]. -/// See its documentation for more. -/// -/// [`IndexMap`]: struct.IndexMap.html -/// [`into_values`]: struct.IndexMap.html#method.into_values -pub struct IntoValues { - iter: vec::IntoIter>, -} - -impl IntoValues { - pub(super) fn new(entries: Vec>) -> Self { - Self { - iter: entries.into_iter(), - } - } -} - -impl Iterator for IntoValues { - type Item = V; - - iterator_methods!(Bucket::value); -} - -impl DoubleEndedIterator for IntoValues { - double_ended_iterator_methods!(Bucket::value); -} - -impl ExactSizeIterator for IntoValues { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl FusedIterator for IntoValues {} - -impl fmt::Debug for IntoValues { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let iter = self.iter.as_slice().iter().map(Bucket::value_ref); - f.debug_list().entries(iter).finish() - } -} - -impl Default for IntoValues { - fn default() -> Self { - Self { - iter: Vec::new().into_iter(), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/map/serde_seq.rs s390-tools-2.33.1/rust-vendor/indexmap/src/map/serde_seq.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/map/serde_seq.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/map/serde_seq.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,138 +0,0 @@ -//! Functions to serialize and deserialize an `IndexMap` as an ordered sequence. -//! -//! The default `serde` implementation serializes `IndexMap` as a normal map, -//! but there is no guarantee that serialization formats will preserve the order -//! of the key-value pairs. This module serializes `IndexMap` as a sequence of -//! `(key, value)` elements instead, in order. -//! -//! This module may be used in a field attribute for derived implementations: -//! -//! ``` -//! # use indexmap::IndexMap; -//! # use serde_derive::{Deserialize, Serialize}; -//! #[derive(Deserialize, Serialize)] -//! struct Data { -//! #[serde(with = "indexmap::map::serde_seq")] -//! map: IndexMap, -//! // ... -//! } -//! ``` - -use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; -use serde::ser::{Serialize, Serializer}; - -use core::fmt::{self, Formatter}; -use core::hash::{BuildHasher, Hash}; -use core::marker::PhantomData; - -use crate::map::Slice as MapSlice; -use crate::set::Slice as SetSlice; -use crate::IndexMap; - -/// Serializes a `map::Slice` as an ordered sequence. -/// -/// This behaves like [`crate::map::serde_seq`] for `IndexMap`, serializing a sequence -/// of `(key, value)` pairs, rather than as a map that might not preserve order. -impl Serialize for MapSlice -where - K: Serialize, - V: Serialize, -{ - fn serialize(&self, serializer: T) -> Result - where - T: Serializer, - { - serializer.collect_seq(self) - } -} - -/// Serializes a `set::Slice` as an ordered sequence. -impl Serialize for SetSlice -where - T: Serialize, -{ - fn serialize(&self, serializer: Se) -> Result - where - Se: Serializer, - { - serializer.collect_seq(self) - } -} - -/// Serializes an `IndexMap` as an ordered sequence. -/// -/// This function may be used in a field attribute for deriving `Serialize`: -/// -/// ``` -/// # use indexmap::IndexMap; -/// # use serde_derive::Serialize; -/// #[derive(Serialize)] -/// struct Data { -/// #[serde(serialize_with = "indexmap::map::serde_seq::serialize")] -/// map: IndexMap, -/// // ... -/// } -/// ``` -pub fn serialize(map: &IndexMap, serializer: T) -> Result -where - K: Serialize + Hash + Eq, - V: Serialize, - S: BuildHasher, - T: Serializer, -{ - serializer.collect_seq(map) -} - -/// Visitor to deserialize a *sequenced* `IndexMap` -struct SeqVisitor(PhantomData<(K, V, S)>); - -impl<'de, K, V, S> Visitor<'de> for SeqVisitor -where - K: Deserialize<'de> + Eq + Hash, - V: Deserialize<'de>, - S: Default + BuildHasher, -{ - type Value = IndexMap; - - fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!(formatter, "a sequenced map") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: SeqAccess<'de>, - { - let capacity = seq.size_hint().unwrap_or(0); - let mut map = IndexMap::with_capacity_and_hasher(capacity, S::default()); - - while let Some((key, value)) = seq.next_element()? { - map.insert(key, value); - } - - Ok(map) - } -} - -/// Deserializes an `IndexMap` from an ordered sequence. -/// -/// This function may be used in a field attribute for deriving `Deserialize`: -/// -/// ``` -/// # use indexmap::IndexMap; -/// # use serde_derive::Deserialize; -/// #[derive(Deserialize)] -/// struct Data { -/// #[serde(deserialize_with = "indexmap::map::serde_seq::deserialize")] -/// map: IndexMap, -/// // ... -/// } -/// ``` -pub fn deserialize<'de, D, K, V, S>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - K: Deserialize<'de> + Eq + Hash, - V: Deserialize<'de>, - S: Default + BuildHasher, -{ - deserializer.deserialize_seq(SeqVisitor(PhantomData)) -} diff -Nru s390-tools-2.31.0/rust-vendor/indexmap/src/map/slice.rs s390-tools-2.33.1/rust-vendor/indexmap/src/map/slice.rs --- s390-tools-2.31.0/rust-vendor/indexmap/src/map/slice.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/indexmap/src/map/slice.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,540 +0,0 @@ -use super::{ - Bucket, Entries, IndexMap, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, Values, - ValuesMut, -}; -use crate::util::try_simplify_range; - -use alloc::boxed::Box; -use alloc::vec::Vec; -use core::cmp::Ordering; -use core::fmt; -use core::hash::{Hash, Hasher}; -use core::ops::{self, Bound, Index, IndexMut, RangeBounds}; - -/// A dynamically-sized slice of key-value pairs in an `IndexMap`. -/// -/// This supports indexed operations much like a `[(K, V)]` slice, -/// but not any hashed operations on the map keys. -/// -/// Unlike `IndexMap`, `Slice` does consider the order for `PartialEq` -/// and `Eq`, and it also implements `PartialOrd`, `Ord`, and `Hash`. -#[repr(transparent)] -pub struct Slice { - pub(crate) entries: [Bucket], -} - -// SAFETY: `Slice` is a transparent wrapper around `[Bucket]`, -// and reference lifetimes are bound together in function signatures. -#[allow(unsafe_code)] -impl Slice { - pub(super) const fn from_slice(entries: &[Bucket]) -> &Self { - unsafe { &*(entries as *const [Bucket] as *const Self) } - } - - pub(super) fn from_mut_slice(entries: &mut [Bucket]) -> &mut Self { - unsafe { &mut *(entries as *mut [Bucket] as *mut Self) } - } - - pub(super) fn from_boxed(entries: Box<[Bucket]>) -> Box { - unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) } - } - - fn into_boxed(self: Box) -> Box<[Bucket]> { - unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket]) } - } -} - -impl Slice { - pub(crate) fn into_entries(self: Box) -> Vec> { - self.into_boxed().into_vec() - } - - /// Returns an empty slice. - pub const fn new<'a>() -> &'a Self { - Self::from_slice(&[]) - } - - /// Returns an empty mutable slice. - pub fn new_mut<'a>() -> &'a mut Self { - Self::from_mut_slice(&mut []) - } - - /// Return the number of key-value pairs in the map slice. - #[inline] - pub const fn len(&self) -> usize { - self.entries.len() - } - - /// Returns true if the map slice contains no elements. - #[inline] - pub const fn is_empty(&self) -> bool { - self.entries.is_empty() - } - - /// Get a key-value pair by index. - /// - /// Valid indices are *0 <= index < self.len()* - pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { - self.entries.get(index).map(Bucket::refs) - } - - /// Get a key-value pair by index, with mutable access to the value. - /// - /// Valid indices are *0 <= index < self.len()* - pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> { - self.entries.get_mut(index).map(Bucket::ref_mut) - } - - /// Returns a slice of key-value pairs in the given range of indices. - /// - /// Valid indices are *0 <= index < self.len()* - pub fn get_range>(&self, range: R) -> Option<&Self> { - let range = try_simplify_range(range, self.entries.len())?; - self.entries.get(range).map(Slice::from_slice) - } - - /// Returns a mutable slice of key-value pairs in the given range of indices. - /// - /// Valid indices are *0 <= index < self.len()* - pub fn get_range_mut>(&mut self, range: R) -> Option<&mut Self> { - let range = try_simplify_range(range, self.entries.len())?; - self.entries.get_mut(range).map(Slice::from_mut_slice) - } - - /// Get the first key-value pair. - pub fn first(&self) -> Option<(&K, &V)> { - self.entries.first().map(Bucket::refs) - } - - /// Get the first key-value pair, with mutable access to the value. - pub fn first_mut(&mut self) -> Option<(&K, &mut V)> { - self.entries.first_mut().map(Bucket::ref_mut) - } - - /// Get the last key-value pair. - pub fn last(&self) -> Option<(&K, &V)> { - self.entries.last().map(Bucket::refs) - } - - /// Get the last key-value pair, with mutable access to the value. - pub fn last_mut(&mut self) -> Option<(&K, &mut V)> { - self.entries.last_mut().map(Bucket::ref_mut) - } - - /// Divides one slice into two at an index. - /// - /// ***Panics*** if `index > len`. - pub fn split_at(&self, index: usize) -> (&Self, &Self) { - let (first, second) = self.entries.split_at(index); - (Self::from_slice(first), Self::from_slice(second)) - } - - /// Divides one mutable slice into two at an index. - /// - /// ***Panics*** if `index > len`. - pub fn split_at_mut(&mut self, index: usize) -> (&mut Self, &mut Self) { - let (first, second) = self.entries.split_at_mut(index); - (Self::from_mut_slice(first), Self::from_mut_slice(second)) - } - - /// Returns the first key-value pair and the rest of the slice, - /// or `None` if it is empty. - pub fn split_first(&self) -> Option<((&K, &V), &Self)> { - if let [first, rest @ ..] = &self.entries { - Some((first.refs(), Self::from_slice(rest))) - } else { - None - } - } - - /// Returns the first key-value pair and the rest of the slice, - /// with mutable access to the value, or `None` if it is empty. - pub fn split_first_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> { - if let [first, rest @ ..] = &mut self.entries { - Some((first.ref_mut(), Self::from_mut_slice(rest))) - } else { - None - } - } - - /// Returns the last key-value pair and the rest of the slice, - /// or `None` if it is empty. - pub fn split_last(&self) -> Option<((&K, &V), &Self)> { - if let [rest @ .., last] = &self.entries { - Some((last.refs(), Self::from_slice(rest))) - } else { - None - } - } - - /// Returns the last key-value pair and the rest of the slice, - /// with mutable access to the value, or `None` if it is empty. - pub fn split_last_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> { - if let [rest @ .., last] = &mut self.entries { - Some((last.ref_mut(), Self::from_mut_slice(rest))) - } else { - None - } - } - - /// Return an iterator over the key-value pairs of the map slice. - pub fn iter(&self) -> Iter<'_, K, V> { - Iter::new(&self.entries) - } - - /// Return an iterator over the key-value pairs of the map slice. - pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { - IterMut::new(&mut self.entries) - } - - /// Return an iterator over the keys of the map slice. - pub fn keys(&self) -> Keys<'_, K, V> { - Keys::new(&self.entries) - } - - /// Return an owning iterator over the keys of the map slice. - pub fn into_keys(self: Box) -> IntoKeys { - IntoKeys::new(self.into_entries()) - } - - /// Return an iterator over the values of the map slice. - pub fn values(&self) -> Values<'_, K, V> { - Values::new(&self.entries) - } - - /// Return an iterator over mutable references to the the values of the map slice. - pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { - ValuesMut::new(&mut self.entries) - } - - /// Return an owning iterator over the values of the map slice. - pub fn into_values(self: Box) -> IntoValues { - IntoValues::new(self.into_entries()) - } - - /// Search over a sorted map for a key. - /// - /// Returns the position where that key is present, or the position where it can be inserted to - /// maintain the sort. See [`slice::binary_search`] for more details. - /// - /// Computes in **O(log(n))** time, which is notably less scalable than looking the key up in - /// the map this is a slice from using [`IndexMap::get_index_of`], but this can also position - /// missing keys. - pub fn binary_search_keys(&self, x: &K) -> Result - where - K: Ord, - { - self.binary_search_by(|p, _| p.cmp(x)) - } - - /// Search over a sorted map with a comparator function. - /// - /// Returns the position where that value is present, or the position where it can be inserted - /// to maintain the sort. See [`slice::binary_search_by`] for more details. - /// - /// Computes in **O(log(n))** time. - #[inline] - pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result - where - F: FnMut(&'a K, &'a V) -> Ordering, - { - self.entries.binary_search_by(move |a| f(&a.key, &a.value)) - } - - /// Search over a sorted map with an extraction function. - /// - /// Returns the position where that value is present, or the position where it can be inserted - /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. - /// - /// Computes in **O(log(n))** time. - #[inline] - pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result - where - F: FnMut(&'a K, &'a V) -> B, - B: Ord, - { - self.binary_search_by(|k, v| f(k, v).cmp(b)) - } - - /// Returns the index of the partition point of a sorted map according to the given predicate - /// (the index of the first element of the second partition). - /// - /// See [`slice::partition_point`] for more details. - /// - /// Computes in **O(log(n))** time. - #[must_use] - pub fn partition_point

- - - - - -

-

HTTP mocking for Rust!

-

- -Mockito is a library for **generating and delivering HTTP mocks** in Rust. You can use it for integration testing -or offline work. Mockito runs a local pool of HTTP servers which create, deliver and remove the mocks. - -## Features - -- Supports HTTP1/2 -- Runs your tests in parallel -- Comes with a wide range of request matchers (Regex, JSON, query parameters etc.) -- Checks that a mock was called (spy) -- Mocks multiple hosts at the same time -- Exposes sync and async interfaces -- Prints out a colored diff of the last unmatched request in case of errors -- Simple, intuitive API -- An awesome logo - - -The full documentation is available at . - -Before upgrading, make sure to check out the [changelog](https://github.com/lipanski/mockito/releases). - -## Getting Started - -Add `mockito` to your `Cargo.toml` and start mocking: - -```rust -#[test] -fn test_something() { - // Request a new server from the pool - let mut server = mockito::Server::new(); - - // Use one of these addresses to configure your client - let host = server.host_with_port(); - let url = server.url(); - - // Create a mock - let mock = server.mock("GET", "/hello") - .with_status(201) - .with_header("content-type", "text/plain") - .with_header("x-api-key", "1234") - .with_body("world") - .create(); - - // Any calls to GET /hello beyond this line will respond with 201, the - // `content-type: text/plain` header and the body "world". - - // You can use `Mock::assert` to verify that your mock was called - mock.assert(); -} -``` - -If `Mock::assert` fails, a colored diff of the last unmatched request is displayed: - -![colored-diff.png](https://raw.githubusercontent.com/lipanski/mockito/master/docs/colored-diff.png) - -Use **matchers** to handle requests to the same endpoint in a different way: - -```rust -#[test] -fn test_something() { - let mut server = mockito::Server::new(); - - server.mock("GET", "/greetings") - .match_header("content-type", "application/json") - .match_body(mockito::Matcher::PartialJsonString( - "{\"greeting\": \"hello\"}".to_string(), - )) - .with_body("hello json") - .create(); - - server.mock("GET", "/greetings") - .match_header("content-type", "application/text") - .match_body(mockito::Matcher::Regex("greeting=hello".to_string())) - .with_body("hello text") - .create(); -} -``` - -Start **multiple servers** to simulate requests to different hosts: - -```rust -#[test] -fn test_something() { - let mut twitter = mockito::Server::new(); - let mut github = mockito::Server::new(); - - // These mocks will be available at `twitter.url()` - let twitter_mock = twitter.mock("GET", "/api").create(); - - // These mocks will be available at `github.url()` - let github_mock = github.mock("GET", "/api").create(); -} -``` - -Write **async** tests (make sure to use the `_async` methods!): - -```rust -#[tokio::test] -async fn test_simple_route_mock_async() { - let mut server = Server::new_async().await; - let m1 = server.mock("GET", "/a").with_body("aaa").create_async().await; - let m2 = server.mock("GET", "/b").with_body("bbb").create_async().await; - - let (m1, m2) = futures::join!(m1, m2); - - // You can use `Mock::assert_async` to verify that your mock was called - // m1.assert_async().await; - // m2.assert_async().await; -} -``` - -## Minimum supported Rust toolchain - -The current minimum support Rust toolchain is **1.68.0** - -## Contribution Guidelines - -1. Check the existing issues and pull requests. -2. One commit is one feature - consider squashing. -3. Format code with `cargo fmt`. -4. :shipit: - -## Development - -### Tests - -Run tests: - -```sh -cargo test -``` - -...or run tests using a different toolchain: - -```sh -rustup run --install 1.68.0 cargo test -``` - -...or run tests while disabling the default features (e.g. the colors): - -```sh -cargo test --no-default-features -``` - -### Code style - -Mockito uses [rustfmt](https://github.com/rust-lang/rustfmt) as a general code style. - -Install `rustfmt`: - -```sh -rustup component add rustfmt -``` - -Format code: - -```sh -cargo fmt -``` - -Some editors might provide a plugin to format your Rust code automatically. - -### Linter - -Mockito uses [clippy](https://github.com/rust-lang/rust-clippy) and it should be run always on the minimum supported Rust version, in order to ensure backwards compatibility. - -Install `clippy`: - -```sh -rustup component add clippy -``` - -The linter is always run on the minimum supported Rust version: - -```sh -rustup run --install 1.68.0 cargo clippy-mockito -``` - -### Release - -Release: - -```sh -cargo publish -``` - -### Benchmarks - -Install `rust nightly`: - -```sh -rustup install nightly -``` - -Run benchmarks: - -```sh -rustup run nightly cargo bench -``` diff -Nru s390-tools-2.31.0/rust-vendor/mockito/rustfmt.toml s390-tools-2.33.1/rust-vendor/mockito/rustfmt.toml --- s390-tools-2.31.0/rust-vendor/mockito/rustfmt.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/rustfmt.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,2 +0,0 @@ -edition = "2021" -max_width = 100 diff -Nru s390-tools-2.31.0/rust-vendor/mockito/src/diff.rs s390-tools-2.33.1/rust-vendor/mockito/src/diff.rs --- s390-tools-2.31.0/rust-vendor/mockito/src/diff.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/src/diff.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,64 +0,0 @@ -#[cfg(feature = "color")] -use colored::*; -use similar::{Change, ChangeTag, TextDiff}; - -pub fn compare(expected: &str, actual: &str) -> String { - let mut result = String::new(); - - let clean_expected = expected.replace("\r\n", "\n"); - let clean_actual = actual.replace("\r\n", "\n"); - - let mut last: Option> = None; - for diff in TextDiff::from_lines(&clean_expected, &clean_actual).iter_all_changes() { - let x = diff.value(); - match diff.tag() { - ChangeTag::Equal => { - result.push_str(x); - } - ChangeTag::Insert => { - if let Some((y, ChangeTag::Delete)) = last.map(|d| (d.value(), d.tag())) { - for change in TextDiff::from_words(y, x).iter_all_changes() { - match change.tag() { - ChangeTag::Equal => { - let z = change.value(); - #[cfg(feature = "color")] - #[allow(clippy::unnecessary_to_owned)] - result.push_str(&z.green().to_string()); - #[cfg(not(feature = "color"))] - result.push_str(z); - } - ChangeTag::Insert => { - let z = change.value(); - #[cfg(feature = "color")] - #[allow(clippy::unnecessary_to_owned)] - result.push_str(&z.black().on_green().to_string()); - #[cfg(not(feature = "color"))] - result.push_str(z); - } - _ => (), - } - } - } else { - #[cfg(feature = "color")] - #[allow(clippy::unnecessary_to_owned)] - result.push_str(&x.bright_green().to_string()); - #[cfg(not(feature = "color"))] - result.push_str(x); - } - } - ChangeTag::Delete => { - #[cfg(feature = "color")] - #[allow(clippy::unnecessary_to_owned)] - result.push_str(&x.red().to_string()); - #[cfg(not(feature = "color"))] - result.push_str(x); - } - } - - last = Some(diff); - } - - result.push('\n'); - - result -} diff -Nru s390-tools-2.31.0/rust-vendor/mockito/src/error.rs s390-tools-2.33.1/rust-vendor/mockito/src/error.rs --- s390-tools-2.31.0/rust-vendor/mockito/src/error.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/src/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,80 +0,0 @@ -use std::error::Error as ErrorTrait; -use std::fmt::Display; - -/// -/// Contains information about an error occurence -/// -#[derive(Debug)] -pub struct Error { - /// The type of this error - pub kind: ErrorKind, - /// Some errors come with more context - pub context: Option, -} - -impl Error { - pub(crate) fn new(kind: ErrorKind) -> Error { - Error { - kind, - context: None, - } - } - - pub(crate) fn new_with_context(kind: ErrorKind, context: impl Display) -> Error { - Error { - kind, - context: Some(context.to_string()), - } - } -} - -impl Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{} (context: {})", - self.kind.description(), - self.context.as_ref().unwrap_or(&"none".to_string()) - ) - } -} - -impl ErrorTrait for Error {} - -/// -/// The type of an error -/// -#[derive(Debug)] -pub enum ErrorKind { - /// The server is not running - ServerFailure, - /// The server is busy - ServerBusy, - /// A lock can't be bypassed - Deadlock, - /// Could not deliver a response - ResponseFailure, - /// The status code is invalid or out of range - InvalidStatusCode, - /// Failed to read the request body - RequestBodyFailure, - /// Failed to write the response body - ResponseBodyFailure, - /// File not found - FileNotFound, -} - -impl ErrorKind { - fn description(&self) -> &'static str { - match self { - ErrorKind::ServerFailure => "the server is not running", - ErrorKind::ServerBusy => "the server is busy", - ErrorKind::Deadlock => "a lock can't be bypassed", - ErrorKind::ResponseFailure => "could not deliver a response", - ErrorKind::InvalidStatusCode => "invalid status code", - ErrorKind::RequestBodyFailure => "failed to read the request body", - ErrorKind::ResponseBodyFailure => "failed to write the response body", - ErrorKind::FileNotFound => "file not found", - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mockito/src/lib.rs s390-tools-2.33.1/rust-vendor/mockito/src/lib.rs --- s390-tools-2.31.0/rust-vendor/mockito/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,687 +0,0 @@ -#![warn(missing_docs)] -#![doc( - html_logo_url = "https://raw.githubusercontent.com/lipanski/mockito/master/docs/logo-black-100.png" -)] - -//! -//! Mockito is a library for **generating and delivering HTTP mocks** in Rust. You can use it for integration testing -//! or offline work. Mockito runs a local pool of HTTP servers which create, deliver and remove the mocks. -//! -//! # Features -//! -//! - Supports HTTP1/2 -//! - Runs your tests in parallel -//! - Comes with a wide range of request matchers (Regex, JSON, query parameters etc.) -//! - Checks that a mock was called (spy) -//! - Mocks multiple hosts at the same time -//! - Exposes sync and async interfaces -//! - Prints out a colored diff of the last unmatched request in case of errors -//! - Simple, intuitive API -//! - An awesome logo -//! -//! # Getting Started -//! -//! Add `mockito` to your `Cargo.toml` and start mocking: -//! -//! ``` -//! #[cfg(test)] -//! mod tests { -//! #[test] -//! fn test_something() { -//! // Request a new server from the pool -//! let mut server = mockito::Server::new(); -//! -//! // Use one of these addresses to configure your client -//! let host = server.host_with_port(); -//! let url = server.url(); -//! -//! // Create a mock -//! let mock = server.mock("GET", "/hello") -//! .with_status(201) -//! .with_header("content-type", "text/plain") -//! .with_header("x-api-key", "1234") -//! .with_body("world") -//! .create(); -//! -//! // Any calls to GET /hello beyond this line will respond with 201, the -//! // `content-type: text/plain` header and the body "world". -//! -//! // You can use `Mock::assert` to verify that your mock was called -//! // mock.assert(); -//! } -//! } -//! ``` -//! -//! If `Mock::assert` fails, a colored diff of the last unmatched request is displayed: -//! -//! ![colored-diff.png](https://raw.githubusercontent.com/lipanski/mockito/master/docs/colored-diff.png) -//! -//! Use **matchers** to handle requests to the same endpoint in a different way: -//! -//! ``` -//! #[cfg(test)] -//! mod tests { -//! #[test] -//! fn test_something() { -//! let mut server = mockito::Server::new(); -//! -//! server.mock("GET", "/greetings") -//! .match_header("content-type", "application/json") -//! .match_body(mockito::Matcher::PartialJsonString( -//! "{\"greeting\": \"hello\"}".to_string(), -//! )) -//! .with_body("hello json") -//! .create(); -//! -//! server.mock("GET", "/greetings") -//! .match_header("content-type", "application/text") -//! .match_body(mockito::Matcher::Regex("greeting=hello".to_string())) -//! .with_body("hello text") -//! .create(); -//! } -//! } -//! ``` -//! -//! Start **multiple servers** to simulate requests to different hosts: -//! -//! ``` -//! #[cfg(test)] -//! mod tests { -//! #[test] -//! fn test_something() { -//! let mut twitter = mockito::Server::new(); -//! let mut github = mockito::Server::new(); -//! -//! // These mocks will be available at `twitter.url()` -//! let twitter_mock = twitter.mock("GET", "/api").create(); -//! -//! // These mocks will be available at `github.url()` -//! let github_mock = github.mock("GET", "/api").create(); -//! } -//! } -//! ``` -//! -//! Write **async** tests (make sure to use the `_async` methods!): -//! -//! ``` -//! #[cfg(test)] -//! mod tests { -//! #[tokio::test] -//! async fn test_something() { -//! let mut server = Server::new_async().await; -//! let m1 = server.mock("GET", "/a").with_body("aaa").create_async().await; -//! let m2 = server.mock("GET", "/b").with_body("bbb").create_async().await; -//! -//! let (m1, m2) = futures::join!(m1, m2); -//! -//! // You can use `Mock::assert_async` to verify that your mock was called -//! // m1.assert_async().await; -//! // m2.assert_async().await; -//! } -//! } -//! ``` -//! -//! # Lifetime -//! -//! A mock is available only throughout the lifetime of the server. Once the server goes -//! out of scope, all mocks defined on that server are removed: -//! -//! ``` -//! let address; -//! -//! { -//! let mut s = mockito::Server::new(); -//! address = s.host_with_port(); -//! -//! s.mock("GET", "/").with_body("hi").create(); -//! -//! // Requests to `address` will be responded with "hi" til here -//! } -//! -//! // Requests to `address` will fail as of this point -//! ``` -//! -//! You can remove individual mocks earlier by calling `Mock::remove`. -//! -//! # Async -//! -//! Mockito comes with both a sync and an async interface. -//! -//! In order to write async tests, you'll need to use the `_async` methods: -//! -//! - `Server::new_async` -//! - `Mock::create_async` -//! - `Mock::assert_async` -//! - `Mock::matched_async` -//! - `Mock::remove_async` -//! -//! ...otherwise your tests will not compile and you'll see the following error: -//! -//! ```text -//! Cannot block the current thread from within a runtime. -//! This happens because a function attempted to block the current thread while the thread is being used to drive asynchronous tasks. -//! ``` -//! -//! # Matchers -//! -//! Mockito can match your request by method, path, query, headers or body. -//! -//! Various matchers are provided by the `Matcher` type: exact (string, binary, JSON), partial (regular expressions, -//! JSON), any or missing. The following guide will walk you through the most common matchers. Check the -//! `Matcher` documentation for all the rest. -//! -//! # Matching by path and query -//! -//! By default, the request path and query is compared by its exact value: -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! // Matches only calls to GET /hello -//! s.mock("GET", "/hello").create(); -//! -//! // Matches only calls to GET /hello?world=1 -//! s.mock("GET", "/hello?world=1").create(); -//! ``` -//! -//! You can also match the path partially, by using a regular expression: -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! // Will match calls to GET /hello/1 and GET /hello/2 -//! s.mock("GET", -//! mockito::Matcher::Regex(r"^/hello/(1|2)$".to_string()) -//! ).create(); -//! ``` -//! -//! Or you can catch all requests, by using the `Matcher::Any` variant: -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! // Will match any GET request -//! s.mock("GET", mockito::Matcher::Any).create(); -//! ``` -//! -//! # Matching by query -//! -//! You can match the query part by using the `Mock#match_query` function together with the various matchers, -//! most notably `Matcher::UrlEncoded`: -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! // This will match requests containing the URL-encoded -//! // query parameter `greeting=good%20day` -//! s.mock("GET", "/test") -//! .match_query(mockito::Matcher::UrlEncoded("greeting".into(), "good day".into())) -//! .create(); -//! -//! // This will match requests containing the URL-encoded -//! // query parameters `hello=world` and `greeting=good%20day` -//! s.mock("GET", "/test") -//! .match_query(mockito::Matcher::AllOf(vec![ -//! mockito::Matcher::UrlEncoded("hello".into(), "world".into()), -//! mockito::Matcher::UrlEncoded("greeting".into(), "good day".into()) -//! ])) -//! .create(); -//! -//! // You can achieve similar results with the regex matcher -//! s.mock("GET", "/test") -//! .match_query(mockito::Matcher::Regex("hello=world".into())) -//! .create(); -//! ``` -//! -//! Note that the key/value arguments for `Matcher::UrlEncoded` should be left in plain (unencoded) format. -//! -//! You can also specify the query as part of the path argument in a `mock` call, in which case an exact -//! match will be performed: -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! // This will perform a full match against the query part -//! s.mock("GET", "/test?hello=world").create(); -//! ``` -//! -//! If you'd like to ignore the query entirely, use the `Matcher::Any` variant: -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! // This will match requests to GET /test with any query -//! s.mock("GET", "/test").match_query(mockito::Matcher::Any).create(); -//! ``` -//! -//! # Matching by header -//! -//! By default, headers are compared by their exact value. The header name letter case is ignored though. -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! s.mock("GET", "/hello") -//! .match_header("content-type", "application/json") -//! .with_body(r#"{"hello": "world"}"#) -//! .create(); -//! -//! s.mock("GET", "/hello") -//! .match_header("content-type", "text/plain") -//! .with_body("world") -//! .create(); -//! -//! // JSON requests to GET /hello will respond with JSON, while plain requests -//! // will respond with text. -//! ``` -//! -//! You can also match a header value with a *regular expressions*, by using the `Matcher::Regex` matcher: -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! s.mock("GET", "/hello") -//! .match_header("content-type", mockito::Matcher::Regex(r".*json.*".to_string())) -//! .with_body(r#"{"hello": "world"}"#) -//! .create(); -//! ``` -//! -//! Or you can match a header *only by its field name*, by setting the `Mock::match_header` value to `Matcher::Any`. -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! s.mock("GET", "/hello") -//! .match_header("content-type", mockito::Matcher::Any) -//! .with_body("something") -//! .create(); -//! -//! // Requests containing any content-type header value will be mocked. -//! // Requests not containing this header will return `501 Not Implemented`. -//! ``` -//! -//! You can mock requests that should be *missing a particular header field*, by setting the `Mock::match_header` -//! value to `Matcher::Missing`. -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! s.mock("GET", "/hello") -//! .match_header("authorization", mockito::Matcher::Missing) -//! .with_body("no authorization header") -//! .create(); -//! -//! // Requests without the authorization header will be matched. -//! // Requests containing the authorization header will return `501 Mock Not Found`. -//! ``` -//! -//! # Matching by body -//! -//! You can match a request by its body by using the `Mock#match_body` method. -//! By default the request body is ignored, similar to passing the `Matcher::Any` argument to the `match_body` method. -//! -//! You can match a body by an exact value: -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! // Will match requests to POST / whenever the request body is "hello" -//! s.mock("POST", "/").match_body("hello").create(); -//! ``` -//! -//! Or you can match the body by using a regular expression: -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! // Will match requests to POST / whenever the request body *contains* the word "hello" (e.g. "hello world") -//! s.mock("POST", "/").match_body( -//! mockito::Matcher::Regex("hello".to_string()) -//! ).create(); -//! ``` -//! -//! Or you can match the body using a JSON object: -//! -//! ## Example -//! -//! ``` -//! # extern crate mockito; -//! #[macro_use] -//! extern crate serde_json; -//! -//! # fn main() { -//! let mut s = mockito::Server::new(); -//! // Will match requests to POST / whenever the request body matches the json object -//! s.mock("POST", "/").match_body(mockito::Matcher::Json(json!({"hello": "world"}))).create(); -//! # } -//! ``` -//! -//! If `serde_json::json!` is not exposed, you can use `Matcher::JsonString` the same way, -//! but by passing a `String` to the matcher: -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! // Will match requests to POST / whenever the request body matches the json object -//! s.mock("POST", "/") -//! .match_body( -//! mockito::Matcher::JsonString(r#"{"hello": "world"}"#.to_string()) -//! ) -//! .create(); -//! ``` -//! -//! # The `AnyOf` matcher -//! -//! The `Matcher::AnyOf` construct takes a vector of matchers as arguments and will be enabled -//! if at least one of the provided matchers matches the request. -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! // Will match requests to POST / whenever the request body is either `hello=world` or `{"hello":"world"}` -//! s.mock("POST", "/") -//! .match_body( -//! mockito::Matcher::AnyOf(vec![ -//! mockito::Matcher::Exact("hello=world".to_string()), -//! mockito::Matcher::JsonString(r#"{"hello": "world"}"#.to_string()), -//! ]) -//! ) -//! .create(); -//! ``` -//! -//! # The `AllOf` matcher -//! -//! The `Matcher::AllOf` construct takes a vector of matchers as arguments and will be enabled -//! if all of the provided matchers match the request. -//! -//! ## Example -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! // Will match requests to POST / whenever the request body contains both `hello` and `world` -//! s.mock("POST", "/") -//! .match_body( -//! mockito::Matcher::AllOf(vec![ -//! mockito::Matcher::Regex("hello".to_string()), -//! mockito::Matcher::Regex("world".to_string()), -//! ]) -//! ) -//! .create(); -//! ``` -//! -//! # Asserts -//! -//! You can use the `Mock::assert` method to **assert that a mock was called**. In other words, -//! `Mock#assert` can validate that your code performed the expected HTTP request. -//! -//! By default, the method expects only **one** request to your mock. -//! -//! ## Example -//! -//! ```no_run -//! use std::net::TcpStream; -//! use std::io::{Read, Write}; -//! -//! let mut s = mockito::Server::new(); -//! let mock = s.mock("GET", "/hello").create(); -//! -//! { -//! // Place a request -//! let mut stream = TcpStream::connect(s.host_with_port()).unwrap(); -//! stream.write_all("GET /hello HTTP/1.1\r\n\r\n".as_bytes()).unwrap(); -//! let mut response = String::new(); -//! stream.read_to_string(&mut response).unwrap(); -//! stream.flush().unwrap(); -//! } -//! -//! mock.assert(); -//! ``` -//! -//! When several mocks can match a request, Mockito applies the first one that still expects requests. -//! You can use this behaviour to provide **different responses for subsequent requests to the same endpoint**. -//! -//! ## Example -//! -//! ``` -//! use std::net::TcpStream; -//! use std::io::{Read, Write}; -//! -//! let mut s = mockito::Server::new(); -//! let english_hello_mock = s.mock("GET", "/hello").with_body("good bye").create(); -//! let french_hello_mock = s.mock("GET", "/hello").with_body("au revoir").create(); -//! -//! { -//! // Place a request to GET /hello -//! let mut stream = TcpStream::connect(s.host_with_port()).unwrap(); -//! stream.write_all("GET /hello HTTP/1.1\r\n\r\n".as_bytes()).unwrap(); -//! let mut response = String::new(); -//! stream.read_to_string(&mut response).unwrap(); -//! stream.flush().unwrap(); -//! } -//! -//! english_hello_mock.assert(); -//! -//! { -//! // Place another request to GET /hello -//! let mut stream = TcpStream::connect(s.host_with_port()).unwrap(); -//! stream.write_all("GET /hello HTTP/1.1\r\n\r\n".as_bytes()).unwrap(); -//! let mut response = String::new(); -//! stream.read_to_string(&mut response).unwrap(); -//! stream.flush().unwrap(); -//! } -//! -//! french_hello_mock.assert(); -//! ``` -//! -//! If you're expecting more than 1 request, you can use the `Mock::expect` method to specify the exact amount of requests: -//! -//! ## Example -//! -//! ```no_run -//! use std::net::TcpStream; -//! use std::io::{Read, Write}; -//! -//! let mut s = mockito::Server::new(); -//! -//! let mock = s.mock("GET", "/hello").expect(3).create(); -//! -//! for _ in 0..3 { -//! // Place a request -//! let mut stream = TcpStream::connect(s.host_with_port()).unwrap(); -//! stream.write_all("GET /hello HTTP/1.1\r\n\r\n".as_bytes()).unwrap(); -//! let mut response = String::new(); -//! stream.read_to_string(&mut response).unwrap(); -//! stream.flush().unwrap(); -//! } -//! -//! mock.assert(); -//! ``` -//! -//! You can also work with ranges, by using the `Mock::expect_at_least` and `Mock::expect_at_most` methods: -//! -//! ## Example -//! -//! ```no_run -//! use std::net::TcpStream; -//! use std::io::{Read, Write}; -//! -//! let mut s = mockito::Server::new(); -//! -//! let mock = s.mock("GET", "/hello").expect_at_least(2).expect_at_most(4).create(); -//! -//! for _ in 0..3 { -//! // Place a request -//! let mut stream = TcpStream::connect(s.host_with_port()).unwrap(); -//! stream.write_all("GET /hello HTTP/1.1\r\n\r\n".as_bytes()).unwrap(); -//! let mut response = String::new(); -//! stream.read_to_string(&mut response).unwrap(); -//! stream.flush().unwrap(); -//! } -//! -//! mock.assert(); -//! ``` -//! -//! The errors produced by the `assert` method contain information about the tested mock, but also about the -//! **last unmatched request**, which can be very useful to track down an error in your implementation or -//! a missing or incomplete mock. A colored diff is also displayed: -//! -//! ![colored-diff.png](https://raw.githubusercontent.com/lipanski/mockito/master/docs/colored-diff.png) -//! -//! Color output is enabled by default, but can be toggled with the `color` feature flag. -//! -//! Here's an example of how a `Mock#assert` error looks like: -//! -//! ```text -//! > Expected 1 request(s) to: -//! -//! POST /users?number=one -//! bob -//! -//! ...but received 0 -//! -//! > The last unmatched request was: -//! -//! POST /users?number=two -//! content-length: 5 -//! alice -//! -//! > Difference: -//! -//! # A colored diff -//! -//! ``` -//! -//! You can also use the `matched` method to return a boolean for whether the mock was called the -//! correct number of times without panicking -//! -//! ## Example -//! -//! ``` -//! use std::net::TcpStream; -//! use std::io::{Read, Write}; -//! -//! let mut s = mockito::Server::new(); -//! -//! let mock = s.mock("GET", "/").create(); -//! -//! { -//! let mut stream = TcpStream::connect(s.host_with_port()).unwrap(); -//! stream.write_all("GET / HTTP/1.1\r\n\r\n".as_bytes()).unwrap(); -//! let mut response = String::new(); -//! stream.read_to_string(&mut response).unwrap(); -//! stream.flush().unwrap(); -//! } -//! -//! assert!(mock.matched()); -//! -//! { -//! let mut stream = TcpStream::connect(s.host_with_port()).unwrap(); -//! stream.write_all("GET / HTTP/1.1\r\n\r\n".as_bytes()).unwrap(); -//! let mut response = String::new(); -//! stream.read_to_string(&mut response).unwrap(); -//! stream.flush().unwrap(); -//! } -//! assert!(!mock.matched()); -//! ``` -//! -//! # Non-matching calls -//! -//! Any calls to the Mockito server that are not matched will return *501 Mock Not Found*. -//! -//! Note that **mocks are matched in reverse order** - the most recent one wins. -//! -//! # Cleaning up -//! -//! As mentioned earlier, mocks are cleaned up whenever the server goes out of scope. If you -//! need to remove them earlier, you can call `Server::reset` to remove all mocks registered -//! so far: -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! s.mock("GET", "/1").create(); -//! s.mock("GET", "/2").create(); -//! s.mock("GET", "/3").create(); -//! -//! s.reset(); -//! -//! // Nothing is mocked at this point -//! ``` -//! -//! ...or you can call `Mock::remove` to remove a single mock: -//! -//! ``` -//! let mut s = mockito::Server::new(); -//! -//! let m1 = s.mock("GET", "/1").create(); -//! let m2 = s.mock("GET", "/2").create(); -//! -//! m1.remove(); -//! -//! // Only m2 is available at this point -//! ``` -//! -//! # Debug -//! -//! Mockito uses the `env_logger` crate under the hood to provide useful debugging information. -//! -//! If you'd like to activate the debug output, introduce the [env_logger](https://crates.rs/crates/env_logger) crate -//! to your project and initialize it before each test that needs debugging: -//! -//! ``` -//! #[test] -//! fn example_test() { -//! let _ = env_logger::try_init(); -//! // ... -//! } -//! ``` -//! -//! Run your tests with: -//! -//! ```sh -//! RUST_LOG=mockito=debug cargo test -//! ``` -//! -pub use error::{Error, ErrorKind}; -#[allow(deprecated)] -pub use matcher::Matcher; -pub use mock::Mock; -pub use request::Request; -pub use server::Server; -pub use server_pool::ServerGuard; - -mod diff; -mod error; -mod matcher; -mod mock; -mod request; -mod response; -mod server; -mod server_pool; diff -Nru s390-tools-2.31.0/rust-vendor/mockito/src/matcher.rs s390-tools-2.33.1/rust-vendor/mockito/src/matcher.rs --- s390-tools-2.31.0/rust-vendor/mockito/src/matcher.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/src/matcher.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,277 +0,0 @@ -use assert_json_diff::{assert_json_matches_no_panic, CompareMode}; -use regex::Regex; -use std::collections::HashMap; -use std::convert::From; -use std::fmt; -use std::fs::File; -use std::io; -use std::io::Read; -use std::path::Path; -use std::string::ToString; - -/// -/// Allows matching the request path, headers or body in multiple ways: by the exact value, by any value (as -/// long as it is present), by regular expression or by checking that a particular header is missing. -/// -/// These matchers can be used within the `Server::mock`, `Mock::match_header` or `Mock::match_body` calls. -/// -#[derive(Clone, PartialEq, Debug)] -#[allow(deprecated)] // Rust bug #38832 -pub enum Matcher { - /// Matches the exact path or header value. There's also an implementation of `From<&str>` - /// to keep things simple and backwards compatible. - Exact(String), - /// Matches the body content as a binary file - Binary(BinaryBody), - /// Matches a path or header value by a regular expression. - Regex(String), - /// Matches a specified JSON body from a `serde_json::Value` - Json(serde_json::Value), - /// Matches a specified JSON body from a `String` - JsonString(String), - /// Matches a partial JSON body from a `serde_json::Value` - PartialJson(serde_json::Value), - /// Matches a specified partial JSON body from a `String` - PartialJsonString(String), - /// Matches a URL-encoded key/value pair, where both key and value should be specified - /// in plain (unencoded) format - UrlEncoded(String, String), - /// At least one matcher must match - AnyOf(Vec), - /// All matchers must match - AllOf(Vec), - /// Matches any path or any header value. - Any, - /// Checks that a header is not present in the request. - Missing, -} - -impl<'a> From<&'a str> for Matcher { - fn from(value: &str) -> Self { - Matcher::Exact(value.to_string()) - } -} - -#[allow(clippy::fallible_impl_from)] -impl From<&Path> for Matcher { - fn from(value: &Path) -> Self { - // We want the code to panic if the path is not readable. - Matcher::Binary(BinaryBody::from_path(value).unwrap()) - } -} - -impl From<&mut File> for Matcher { - fn from(value: &mut File) -> Self { - Matcher::Binary(BinaryBody::from_file(value)) - } -} - -impl From> for Matcher { - fn from(value: Vec) -> Self { - Matcher::Binary(BinaryBody::from_bytes(value)) - } -} - -impl fmt::Display for Matcher { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let join_matches = |matches: &[Self]| { - matches - .iter() - .map(Self::to_string) - .fold(String::new(), |acc, matcher| { - if acc.is_empty() { - matcher - } else { - format!("{}, {}", acc, matcher) - } - }) - }; - - let result = match self { - Matcher::Exact(ref value) => value.to_string(), - Matcher::Binary(ref file) => format!("{} (binary)", file), - Matcher::Regex(ref value) => format!("{} (regex)", value), - Matcher::Json(ref json_obj) => format!("{} (json)", json_obj), - Matcher::JsonString(ref value) => format!("{} (json)", value), - Matcher::PartialJson(ref json_obj) => format!("{} (partial json)", json_obj), - Matcher::PartialJsonString(ref value) => format!("{} (partial json)", value), - Matcher::UrlEncoded(ref field, ref value) => { - format!("{}={} (urlencoded)", field, value) - } - Matcher::Any => "(any)".to_string(), - Matcher::AnyOf(x) => format!("({}) (any of)", join_matches(x)), - Matcher::AllOf(x) => format!("({}) (all of)", join_matches(x)), - Matcher::Missing => "(missing)".to_string(), - }; - write!(f, "{}", result) - } -} - -impl Matcher { - pub(crate) fn matches_values(&self, header_values: &[&str]) -> bool { - match self { - Matcher::Missing => header_values.is_empty(), - // AnyOf([…Missing…]) is handled here, but - // AnyOf([Something]) is handled in the last block. - // That's because Missing matches against all values at once, - // but other matchers match against individual values. - Matcher::AnyOf(ref matchers) if header_values.is_empty() => { - matchers.iter().any(|m| m.matches_values(header_values)) - } - Matcher::AllOf(ref matchers) if header_values.is_empty() => { - matchers.iter().all(|m| m.matches_values(header_values)) - } - _ => { - !header_values.is_empty() && header_values.iter().all(|val| self.matches_value(val)) - } - } - } - - pub(crate) fn matches_binary_value(&self, binary: &[u8]) -> bool { - match self { - Matcher::Binary(ref file) => binary == &*file.content, - _ => false, - } - } - - #[allow(deprecated)] - pub(crate) fn matches_value(&self, other: &str) -> bool { - let compare_json_config = assert_json_diff::Config::new(CompareMode::Inclusive); - match self { - Matcher::Exact(ref value) => value == other, - Matcher::Binary(_) => false, - Matcher::Regex(ref regex) => Regex::new(regex).unwrap().is_match(other), - Matcher::Json(ref json_obj) => { - let other: serde_json::Value = serde_json::from_str(other).unwrap(); - *json_obj == other - } - Matcher::JsonString(ref value) => { - let value: serde_json::Value = serde_json::from_str(value).unwrap(); - let other: serde_json::Value = serde_json::from_str(other).unwrap(); - value == other - } - Matcher::PartialJson(ref json_obj) => { - let actual: serde_json::Value = serde_json::from_str(other).unwrap(); - let expected = json_obj.clone(); - assert_json_matches_no_panic(&actual, &expected, compare_json_config).is_ok() - } - Matcher::PartialJsonString(ref value) => { - let expected: serde_json::Value = serde_json::from_str(value).unwrap(); - let actual: serde_json::Value = serde_json::from_str(other).unwrap(); - assert_json_matches_no_panic(&actual, &expected, compare_json_config).is_ok() - } - Matcher::UrlEncoded(ref expected_field, ref expected_value) => { - serde_urlencoded::from_str::>(other) - .map(|params: HashMap<_, _>| { - params.into_iter().any(|(ref field, ref value)| { - field == expected_field && value == expected_value - }) - }) - .unwrap_or(false) - } - Matcher::Any => true, - Matcher::AnyOf(ref matchers) => matchers.iter().any(|m| m.matches_value(other)), - Matcher::AllOf(ref matchers) => matchers.iter().all(|m| m.matches_value(other)), - Matcher::Missing => other.is_empty(), - } - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) enum PathAndQueryMatcher { - Unified(Matcher), - Split(Box, Box), -} - -impl PathAndQueryMatcher { - pub(crate) fn matches_value(&self, other: &str) -> bool { - match self { - PathAndQueryMatcher::Unified(matcher) => matcher.matches_value(other), - PathAndQueryMatcher::Split(ref path_matcher, ref query_matcher) => { - let mut parts = other.splitn(2, '?'); - let path = parts.next().unwrap(); - let query = parts.next().unwrap_or(""); - - path_matcher.matches_value(path) && query_matcher.matches_value(query) - } - } - } -} - -impl fmt::Display for PathAndQueryMatcher { - #[allow(deprecated)] - #[allow(clippy::write_with_newline)] - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - PathAndQueryMatcher::Unified(matcher) => write!(f, "{}\r\n", &matcher), - PathAndQueryMatcher::Split(path, query) => write!(f, "{}?{}\r\n", &path, &query), - } - } -} - -/// -/// Represents a binary object the body should be matched against -/// -#[derive(Debug, Clone)] -pub struct BinaryBody { - path: Option, - content: Vec, -} - -impl BinaryBody { - /// Read the content from path and initialize a `BinaryBody` - /// - /// # Errors - /// - /// The same resulting from a failed `std::fs::read`. - pub fn from_path(path: &Path) -> Result { - Ok(Self { - path: path.to_str().map(ToString::to_string), - content: std::fs::read(path)?, - }) - } - - /// Read the content from a &mut File and initialize a `BinaryBody` - pub fn from_file(file: &mut File) -> Self { - Self { - path: None, - content: get_content_from(file), - } - } - - /// Instantiate the matcher directly passing the content - #[allow(clippy::missing_const_for_fn)] - pub fn from_bytes(content: Vec) -> Self { - Self { - path: None, - content, - } - } -} - -fn get_content_from(file: &mut File) -> Vec { - let mut filecontent: Vec = Vec::new(); - file.read_to_end(&mut filecontent).unwrap(); - filecontent -} - -impl PartialEq for BinaryBody { - fn eq(&self, other: &Self) -> bool { - match (self.path.as_ref(), other.path.as_ref()) { - (Some(p), Some(o)) => p == o, - _ => self.content == other.content, - } - } -} - -impl fmt::Display for BinaryBody { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(filepath) = self.path.as_ref() { - write!(f, "filepath: {}", filepath) - } else { - let len: usize = std::cmp::min(self.content.len(), 8); - let first_bytes: Vec = self.content.iter().copied().take(len).collect(); - write!(f, "filecontent: {:?}", first_bytes) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mockito/src/mock.rs s390-tools-2.33.1/rust-vendor/mockito/src/mock.rs --- s390-tools-2.31.0/rust-vendor/mockito/src/mock.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/src/mock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,635 +0,0 @@ -use crate::diff; -use crate::matcher::{Matcher, PathAndQueryMatcher}; -use crate::response::{Body, Response}; -use crate::server::RemoteMock; -use crate::server::State; -use crate::Request; -use crate::{Error, ErrorKind}; -use hyper::StatusCode; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; -use std::convert::Into; -use std::fmt; -use std::io; -use std::ops::Drop; -use std::path::Path; -use std::string::ToString; -use std::sync::Arc; -use std::sync::RwLock; - -#[derive(Clone, Debug)] -pub struct InnerMock { - pub(crate) id: String, - pub(crate) method: String, - pub(crate) path: PathAndQueryMatcher, - pub(crate) headers: Vec<(String, Matcher)>, - pub(crate) body: Matcher, - pub(crate) response: Response, - pub(crate) hits: usize, - pub(crate) expected_hits_at_least: Option, - pub(crate) expected_hits_at_most: Option, -} - -impl fmt::Display for InnerMock { - #[allow(deprecated)] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut formatted = String::new(); - - formatted.push_str("\r\n"); - formatted.push_str(&self.method); - formatted.push(' '); - formatted.push_str(&self.path.to_string()); - - for &(ref key, ref value) in &self.headers { - formatted.push_str(key); - formatted.push_str(": "); - formatted.push_str(&value.to_string()); - formatted.push_str("\r\n"); - } - - match self.body { - Matcher::Exact(ref value) - | Matcher::JsonString(ref value) - | Matcher::PartialJsonString(ref value) - | Matcher::Regex(ref value) => { - formatted.push_str(value); - formatted.push_str("\r\n"); - } - Matcher::Binary(_) => { - formatted.push_str("(binary)\r\n"); - } - Matcher::Json(ref json_obj) | Matcher::PartialJson(ref json_obj) => { - formatted.push_str(&json_obj.to_string()); - formatted.push_str("\r\n") - } - Matcher::UrlEncoded(ref field, ref value) => { - formatted.push_str(field); - formatted.push('='); - formatted.push_str(value); - } - Matcher::Missing => formatted.push_str("(missing)\r\n"), - Matcher::AnyOf(..) => formatted.push_str("(any of)\r\n"), - Matcher::AllOf(..) => formatted.push_str("(all of)\r\n"), - Matcher::Any => {} - } - - f.write_str(&formatted) - } -} - -impl PartialEq for InnerMock { - fn eq(&self, other: &Self) -> bool { - self.id == other.id - && self.method == other.method - && self.path == other.path - && self.headers == other.headers - && self.body == other.body - && self.response == other.response - && self.hits == other.hits - } -} - -/// -/// Stores information about a mocked request. Should be initialized via `Server::mock()`. -/// -#[derive(Debug)] -pub struct Mock { - state: Arc>, - inner: InnerMock, - /// Used to warn of mocks missing a `.create()` call. See issue #112 - created: bool, -} - -impl Mock { - pub(crate) fn new>(state: Arc>, method: &str, path: P) -> Mock { - let inner = InnerMock { - id: thread_rng() - .sample_iter(&Alphanumeric) - .map(char::from) - .take(24) - .collect(), - method: method.to_owned().to_uppercase(), - path: PathAndQueryMatcher::Unified(path.into()), - headers: Vec::new(), - body: Matcher::Any, - response: Response::default(), - hits: 0, - expected_hits_at_least: None, - expected_hits_at_most: None, - }; - - Self { - state, - inner, - created: false, - } - } - - /// - /// Allows matching against the query part when responding with a mock. - /// - /// Note that you can also specify the query as part of the path argument - /// in a `mock` call, in which case an exact match will be performed. - /// Any future calls of `Mock#match_query` will override the query matcher. - /// - /// ## Example - /// - /// ``` - /// use mockito::Matcher; - /// - /// let mut s = mockito::Server::new(); - /// - /// // This will match requests containing the URL-encoded - /// // query parameter `greeting=good%20day` - /// s.mock("GET", "/test") - /// .match_query(Matcher::UrlEncoded("greeting".into(), "good day".into())) - /// .create(); - /// - /// // This will match requests containing the URL-encoded - /// // query parameters `hello=world` and `greeting=good%20day` - /// s.mock("GET", "/test") - /// .match_query(Matcher::AllOf(vec![ - /// Matcher::UrlEncoded("hello".into(), "world".into()), - /// Matcher::UrlEncoded("greeting".into(), "good day".into()) - /// ])) - /// .create(); - /// - /// // You can achieve similar results with the regex matcher - /// s.mock("GET", "/test") - /// .match_query(Matcher::Regex("hello=world".into())) - /// .create(); - /// ``` - /// - pub fn match_query>(mut self, query: M) -> Self { - let new_path = match &self.inner.path { - PathAndQueryMatcher::Unified(matcher) => { - PathAndQueryMatcher::Split(Box::new(matcher.clone()), Box::new(query.into())) - } - PathAndQueryMatcher::Split(path, _) => { - PathAndQueryMatcher::Split(path.clone(), Box::new(query.into())) - } - }; - - self.inner.path = new_path; - - self - } - - /// - /// Allows matching a particular request header when responding with a mock. - /// - /// When matching a request, the field letter case is ignored. - /// - /// ## Example - /// - /// ``` - /// let mut s = mockito::Server::new(); - /// - /// s.mock("GET", "/").match_header("content-type", "application/json"); - /// ``` - /// - /// Like most other `Mock` methods, it allows chanining: - /// - /// ## Example - /// - /// ``` - /// let mut s = mockito::Server::new(); - /// - /// s.mock("GET", "/") - /// .match_header("content-type", "application/json") - /// .match_header("authorization", "password"); - /// ``` - /// - pub fn match_header>(mut self, field: &str, value: M) -> Self { - self.inner - .headers - .push((field.to_owned().to_lowercase(), value.into())); - - self - } - - /// - /// Allows matching a particular request body when responding with a mock. - /// - /// ## Example - /// - /// ``` - /// let mut s = mockito::Server::new(); - /// - /// s.mock("POST", "/").match_body(r#"{"hello": "world"}"#).with_body("json").create(); - /// s.mock("POST", "/").match_body("hello=world").with_body("form").create(); - /// - /// // Requests passing `{"hello": "world"}` inside the body will be responded with "json". - /// // Requests passing `hello=world` inside the body will be responded with "form". - /// - /// // Create a temporary file - /// use std::env; - /// use std::fs::File; - /// use std::io::Write; - /// use std::path::Path; - /// use rand; - /// use rand::Rng; - /// - /// let random_bytes: Vec = (0..1024).map(|_| rand::random::()).collect(); - /// - /// let mut tmp_file = env::temp_dir(); - /// tmp_file.push("test_file.txt"); - /// let mut f_write = File::create(tmp_file.clone()).unwrap(); - /// f_write.write_all(random_bytes.as_slice()).unwrap(); - /// let mut f_read = File::open(tmp_file.clone()).unwrap(); - /// - /// - /// // the following are equivalent ways of defining a mock matching - /// // a binary payload - /// s.mock("POST", "/").match_body(tmp_file.as_path()).create(); - /// s.mock("POST", "/").match_body(random_bytes).create(); - /// s.mock("POST", "/").match_body(&mut f_read).create(); - /// ``` - /// - pub fn match_body>(mut self, body: M) -> Self { - self.inner.body = body.into(); - - self - } - - /// - /// Sets the status code of the mock response. The default status code is 200. - /// - /// ## Example - /// - /// ``` - /// let mut s = mockito::Server::new(); - /// - /// s.mock("GET", "/").with_status(201); - /// ``` - /// - #[track_caller] - pub fn with_status(mut self, status: usize) -> Self { - self.inner.response.status = StatusCode::from_u16(status as u16) - .map_err(|_| Error::new_with_context(ErrorKind::InvalidStatusCode, status)) - .unwrap(); - - self - } - - /// - /// Sets a header of the mock response. - /// - /// ## Example - /// - /// ``` - /// let mut s = mockito::Server::new(); - /// - /// s.mock("GET", "/").with_header("content-type", "application/json"); - /// ``` - /// - pub fn with_header(mut self, field: &str, value: &str) -> Self { - self.inner - .response - .headers - .push((field.to_owned(), value.to_owned())); - - self - } - - /// - /// Sets the body of the mock response. Its `Content-Length` is handled automatically. - /// - /// ## Example - /// - /// ``` - /// let mut s = mockito::Server::new(); - /// - /// s.mock("GET", "/").with_body("hello world"); - /// ``` - /// - pub fn with_body>(mut self, body: StrOrBytes) -> Self { - self.inner.response.body = Body::Bytes(body.as_ref().to_owned()); - self - } - - /// - /// Sets the body of the mock response dynamically. The response will use chunked transfer encoding. - /// - /// The function must be thread-safe. If it's a closure, it can't be borrowing its context. - /// Use `move` closures and `Arc` to share any data. - /// - /// ## Example - /// - /// ``` - /// let mut s = mockito::Server::new(); - /// - /// s.mock("GET", "/").with_chunked_body(|w| w.write_all(b"hello world")); - /// ``` - /// - pub fn with_chunked_body( - mut self, - callback: impl Fn(&mut dyn io::Write) -> io::Result<()> + Send + Sync + 'static, - ) -> Self { - self.inner.response.body = Body::FnWithWriter(Arc::new(callback)); - self - } - - /// - /// **DEPRECATED:** Replaced by `Mock::with_chunked_body`. - /// - #[deprecated(since = "1.0.0", note = "Use `Mock::with_chunked_body` instead")] - pub fn with_body_from_fn( - self, - callback: impl Fn(&mut dyn io::Write) -> io::Result<()> + Send + Sync + 'static, - ) -> Self { - self.with_chunked_body(callback) - } - - /// - /// Sets the body of the mock response dynamically while exposing the request object. - /// - /// You can use this method to provide a custom reponse body for every incoming request. - /// - /// The function must be thread-safe. If it's a closure, it can't be borrowing its context. - /// Use `move` closures and `Arc` to share any data. - /// - /// ### Example - /// - /// ``` - /// let mut s = mockito::Server::new(); - /// - /// let _m = s.mock("GET", mockito::Matcher::Any).with_body_from_request(|request| { - /// if request.path() == "/bob" { - /// "hello bob".into() - /// } else if request.path() == "/alice" { - /// "hello alice".into() - /// } else { - /// "hello world".into() - /// } - /// }); - /// ``` - /// - pub fn with_body_from_request( - mut self, - callback: impl Fn(&Request) -> Vec + Send + Sync + 'static, - ) -> Self { - self.inner.response.body = Body::FnWithRequest(Arc::new(callback)); - self - } - - /// - /// Sets the body of the mock response from the contents of a file stored under `path`. - /// Its `Content-Length` is handled automatically. - /// - /// ## Example - /// - /// ``` - /// let mut s = mockito::Server::new(); - /// - /// s.mock("GET", "/").with_body_from_file("tests/files/simple.http"); - /// ``` - /// - #[track_caller] - pub fn with_body_from_file(mut self, path: impl AsRef) -> Self { - self.inner.response.body = Body::Bytes( - std::fs::read(path) - .map_err(|_| Error::new(ErrorKind::FileNotFound)) - .unwrap(), - ); - self - } - - /// - /// Sets the expected amount of requests that this mock is supposed to receive. - /// This is only enforced when calling the `assert` method. - /// Defaults to 1 request. - /// - #[allow(clippy::missing_const_for_fn)] - pub fn expect(mut self, hits: usize) -> Self { - self.inner.expected_hits_at_least = Some(hits); - self.inner.expected_hits_at_most = Some(hits); - self - } - - /// - /// Sets the minimum amount of requests that this mock is supposed to receive. - /// This is only enforced when calling the `assert` method. - /// - pub fn expect_at_least(mut self, hits: usize) -> Self { - self.inner.expected_hits_at_least = Some(hits); - if self.inner.expected_hits_at_most.is_some() - && self.inner.expected_hits_at_most < self.inner.expected_hits_at_least - { - self.inner.expected_hits_at_most = None; - } - self - } - - /// - /// Sets the maximum amount of requests that this mock is supposed to receive. - /// This is only enforced when calling the `assert` method. - /// - pub fn expect_at_most(mut self, hits: usize) -> Self { - self.inner.expected_hits_at_most = Some(hits); - if self.inner.expected_hits_at_least.is_some() - && self.inner.expected_hits_at_least > self.inner.expected_hits_at_most - { - self.inner.expected_hits_at_least = None; - } - self - } - - /// - /// Asserts that the expected amount of requests (defaults to 1 request) were performed. - /// - #[track_caller] - pub fn assert(&self) { - let mutex = self.state.clone(); - let state = mutex.read().unwrap(); - if let Some(hits) = state.get_mock_hits(self.inner.id.clone()) { - let matched = self.matched_hits(hits); - let message = if !matched { - let last_request = state.get_last_unmatched_request(); - self.build_assert_message(hits, last_request) - } else { - String::default() - }; - - assert!(matched, "{}", message) - } else { - panic!("could not retrieve enough information about the remote mock") - } - } - - /// - /// Same as `Mock::assert` but async. - /// - pub async fn assert_async(&self) { - let mutex = self.state.clone(); - let state = mutex.read().unwrap(); - if let Some(hits) = state.get_mock_hits(self.inner.id.clone()) { - let matched = self.matched_hits(hits); - let message = if !matched { - let last_request = state.get_last_unmatched_request(); - self.build_assert_message(hits, last_request) - } else { - String::default() - }; - - assert!(matched, "{}", message) - } else { - panic!("could not retrieve enough information about the remote mock") - } - } - - /// - /// Returns whether the expected amount of requests (defaults to 1) were performed. - /// - pub fn matched(&self) -> bool { - let mutex = self.state.clone(); - let state = mutex.read().unwrap(); - let Some(hits) = state.get_mock_hits(self.inner.id.clone()) else { - return false; - }; - - self.matched_hits(hits) - } - - /// - /// Same as `Mock::matched` but async. - /// - pub async fn matched_async(&self) -> bool { - let mutex = self.state.clone(); - let state = mutex.read().unwrap(); - let Some(hits) = state.get_mock_hits(self.inner.id.clone()) else { - return false; - }; - - self.matched_hits(hits) - } - - /// - /// Registers the mock to the server - your mock will be served only after calling this method. - /// - /// ## Example - /// - /// ``` - /// let mut s = mockito::Server::new(); - /// - /// s.mock("GET", "/").with_body("hello world").create(); - /// ``` - /// - pub fn create(mut self) -> Mock { - let remote_mock = RemoteMock::new(self.inner.clone()); - let state = self.state.clone(); - let mut state = state.write().unwrap(); - state.mocks.push(remote_mock); - - self.created = true; - - self - } - - /// - /// Same as `Mock::create` but async. - /// - pub async fn create_async(mut self) -> Mock { - let remote_mock = RemoteMock::new(self.inner.clone()); - let state = self.state.clone(); - let mut state = state.write().unwrap(); - state.mocks.push(remote_mock); - - self.created = true; - - self - } - - /// - /// Removes the mock from the server. - /// - pub fn remove(&self) { - let mutex = self.state.clone(); - let mut state = mutex.write().unwrap(); - state.remove_mock(self.inner.id.clone()); - } - - /// - /// Same as `Mock::remove` but async. - /// - pub async fn remove_async(&self) { - let mutex = self.state.clone(); - let mut state = mutex.write().unwrap(); - state.remove_mock(self.inner.id.clone()); - } - - fn matched_hits(&self, hits: usize) -> bool { - match ( - self.inner.expected_hits_at_least, - self.inner.expected_hits_at_most, - ) { - (Some(min), Some(max)) => hits >= min && hits <= max, - (Some(min), None) => hits >= min, - (None, Some(max)) => hits <= max, - (None, None) => hits == 1, - } - } - - fn build_assert_message(&self, hits: usize, last_request: Option) -> String { - let mut message = match ( - self.inner.expected_hits_at_least, - self.inner.expected_hits_at_most, - ) { - (Some(min), Some(max)) if min == max => format!( - "\n> Expected {} request(s) to:\n{}\n...but received {}\n\n", - min, self, hits - ), - (Some(min), Some(max)) => format!( - "\n> Expected between {} and {} request(s) to:\n{}\n...but received {}\n\n", - min, max, self, hits - ), - (Some(min), None) => format!( - "\n> Expected at least {} request(s) to:\n{}\n...but received {}\n\n", - min, self, hits - ), - (None, Some(max)) => format!( - "\n> Expected at most {} request(s) to:\n{}\n...but received {}\n\n", - max, self, hits - ), - (None, None) => format!( - "\n> Expected 1 request(s) to:\n{}\n...but received {}\n\n", - self, hits - ), - }; - - if let Some(last_request) = last_request { - message.push_str(&format!( - "> The last unmatched request was:\n{}\n", - last_request - )); - - let difference = diff::compare(&self.to_string(), &last_request); - message.push_str(&format!("> Difference:\n{}\n", difference)); - } - - message - } -} - -impl Drop for Mock { - fn drop(&mut self) { - if !self.created { - log::warn!("Missing .create() call on mock {}", self); - } - } -} - -impl fmt::Display for Mock { - #[allow(deprecated)] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut formatted = String::new(); - formatted.push_str(&self.inner.to_string()); - f.write_str(&formatted) - } -} - -impl PartialEq for Mock { - fn eq(&self, other: &Self) -> bool { - self.inner == other.inner - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mockito/src/request.rs s390-tools-2.33.1/rust-vendor/mockito/src/request.rs --- s390-tools-2.31.0/rust-vendor/mockito/src/request.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/src/request.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,109 +0,0 @@ -use crate::{Error, ErrorKind}; -use hyper::body; -use hyper::body::Buf; -use hyper::Body as HyperBody; -use hyper::Request as HyperRequest; - -/// -/// Stores a HTTP request -/// -#[derive(Debug)] -pub struct Request { - inner: HyperRequest, - body: Option>, -} - -impl Request { - pub(crate) fn new(request: HyperRequest) -> Self { - Request { - inner: request, - body: None, - } - } - - /// The HTTP method - pub fn method(&self) -> &str { - self.inner.method().as_ref() - } - - /// The path excluding the query part - pub fn path(&self) -> &str { - self.inner.uri().path() - } - - /// The path including the query part - pub fn path_and_query(&self) -> &str { - self.inner - .uri() - .path_and_query() - .map(|pq| pq.as_str()) - .unwrap_or("") - } - - /// Retrieves all the header values for the given header field name - pub fn header(&self, header_name: &str) -> Vec<&str> { - self.inner - .headers() - .get_all(header_name) - .iter() - .map(|item| item.to_str().unwrap()) - .collect::>() - } - - /// Checks whether the provided header field exists - pub fn has_header(&self, header_name: &str) -> bool { - self.inner.headers().contains_key(header_name) - } - - /// Returns the request body or an error, if the body hasn't been read - /// up to this moment. - pub fn body(&self) -> Result<&Vec, Error> { - self.body - .as_ref() - .ok_or_else(|| Error::new(ErrorKind::RequestBodyFailure)) - } - - /// Reads the body (if it hasn't been read already) and returns it - pub(crate) async fn read_body(&mut self) -> &Vec { - if self.body.is_none() { - let raw_body = self.inner.body_mut(); - let mut buf = body::aggregate(raw_body) - .await - .map_err(|err| Error::new_with_context(ErrorKind::RequestBodyFailure, err)) - .unwrap(); - let bytes = buf.copy_to_bytes(buf.remaining()).to_vec(); - self.body = Some(bytes); - } - - self.body.as_ref().unwrap() - } - - pub(crate) fn formatted(&self) -> String { - let mut formatted = format!( - "\r\n{} {}\r\n", - &self.inner.method(), - &self - .inner - .uri() - .path_and_query() - .map(|pq| pq.as_str()) - .unwrap_or("") - ); - - for (key, value) in self.inner.headers() { - formatted.push_str(&format!( - "{}: {}\r\n", - key, - value.to_str().unwrap_or("") - )); - } - - if let Some(body) = &self.body { - if !body.is_empty() { - formatted.push_str(&format!("{}\r\n", &String::from_utf8_lossy(body))); - } - } - - formatted - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mockito/src/response.rs s390-tools-2.33.1/rust-vendor/mockito/src/response.rs --- s390-tools-2.31.0/rust-vendor/mockito/src/response.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/src/response.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,127 +0,0 @@ -use crate::error::Error; -use crate::Request; -use futures::stream::Stream; -use hyper::StatusCode; -use std::fmt; -use std::io; -use std::sync::Arc; -use std::task::Poll; -use std::thread; -use tokio::sync::mpsc; - -#[derive(Clone, Debug, PartialEq)] -pub(crate) struct Response { - pub status: StatusCode, - pub headers: Vec<(String, String)>, - pub body: Body, -} - -type BodyFnWithWriter = dyn Fn(&mut dyn io::Write) -> io::Result<()> + Send + Sync + 'static; -type BodyFnWithRequest = dyn Fn(&Request) -> Vec + Send + Sync + 'static; - -#[derive(Clone)] -pub(crate) enum Body { - Bytes(Vec), - FnWithWriter(Arc), - FnWithRequest(Arc), -} - -impl fmt::Debug for Body { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Body::Bytes(ref b) => b.fmt(f), - Body::FnWithWriter(_) => f.write_str(""), - Body::FnWithRequest(_) => f.write_str(""), - } - } -} - -impl PartialEq for Body { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Body::Bytes(ref a), Body::Bytes(ref b)) => a == b, - (Body::FnWithWriter(ref a), Body::FnWithWriter(ref b)) => std::ptr::eq( - a.as_ref() as *const BodyFnWithWriter as *const u8, - b.as_ref() as *const BodyFnWithWriter as *const u8, - ), - (Body::FnWithRequest(ref a), Body::FnWithRequest(ref b)) => std::ptr::eq( - a.as_ref() as *const BodyFnWithRequest as *const u8, - b.as_ref() as *const BodyFnWithRequest as *const u8, - ), - _ => false, - } - } -} - -impl Default for Response { - fn default() -> Self { - Self { - status: StatusCode::OK, - headers: vec![("connection".into(), "close".into())], - body: Body::Bytes(Vec::new()), - } - } -} - -struct ChunkedStreamWriter { - sender: mpsc::Sender>>, -} - -impl io::Write for ChunkedStreamWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.sender - .blocking_send(Ok(buf.into())) - .map_err(|_| io::ErrorKind::BrokenPipe)?; - Ok(buf.len()) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -pub(crate) struct ChunkedStream { - receiver: Option>>>, - thread: Option>, -} - -impl ChunkedStream { - pub fn new(body_fn: Arc) -> Result { - let (sender, receiver) = mpsc::channel(1); - let join = thread::Builder::new() - .name(format!("mockito::body_fn_{:p}", body_fn)) - .spawn(move || { - let mut writer = ChunkedStreamWriter { sender }; - if let Err(e) = body_fn(&mut writer) { - let _ = writer.sender.blocking_send(Err(e)); - } - }) - .map_err(|e| Error::new_with_context(crate::ErrorKind::ResponseFailure, e))?; - Ok(Self { - receiver: Some(receiver), - thread: Some(join), - }) - } -} - -impl Drop for ChunkedStream { - fn drop(&mut self) { - // must close the channel first - let _ = self.receiver.take(); - let _ = self.thread.take().map(|t| t.join()); - } -} - -impl Stream for ChunkedStream { - type Item = io::Result>; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - self.receiver - .as_mut() - .map(move |r| r.poll_recv(cx)) - .unwrap_or(Poll::Ready(None)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mockito/src/server_pool.rs s390-tools-2.33.1/rust-vendor/mockito/src/server_pool.rs --- s390-tools-2.31.0/rust-vendor/mockito/src/server_pool.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/src/server_pool.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,88 +0,0 @@ -use crate::Server; -use crate::{Error, ErrorKind}; -use std::collections::VecDeque; -use std::ops::{Deref, DerefMut, Drop}; -use std::sync::Mutex; -use tokio::sync::{Semaphore, SemaphorePermit}; - -// macOS has small default ulimits. Sync it with test_server_pool() -const DEFAULT_POOL_SIZE: usize = if cfg!(target_os = "macos") { 20 } else { 50 }; -pub(crate) static SERVER_POOL: ServerPool = ServerPool::new(DEFAULT_POOL_SIZE); - -/// -/// A handle around a pooled `Server` object which dereferences to `Server`. -/// -pub struct ServerGuard { - server: Option, - _permit: SemaphorePermit<'static>, -} - -impl ServerGuard { - fn new(server: Server, _permit: SemaphorePermit<'static>) -> ServerGuard { - ServerGuard { - server: Some(server), - _permit, - } - } -} - -impl Deref for ServerGuard { - type Target = Server; - - fn deref(&self) -> &Self::Target { - self.server.as_ref().unwrap() - } -} - -impl DerefMut for ServerGuard { - fn deref_mut(&mut self) -> &mut Self::Target { - self.server.as_mut().unwrap() - } -} - -impl Drop for ServerGuard { - fn drop(&mut self) { - if let Some(server) = self.server.take() { - // the permit is still held when recycling, - // so the next acquire will already see the recycled server - SERVER_POOL.recycle(server); - } - } -} - -pub(crate) struct ServerPool { - semaphore: Semaphore, - free_list: Mutex>, -} - -impl ServerPool { - const fn new(max_size: usize) -> ServerPool { - ServerPool { - semaphore: Semaphore::const_new(max_size), - free_list: Mutex::new(VecDeque::new()), - } - } - - pub(crate) async fn get_async(&'static self) -> Result { - // number of active permits limits the number of servers created - let permit = self - .semaphore - .acquire() - .await - .map_err(|err| Error::new_with_context(ErrorKind::Deadlock, err))?; - - // be careful not to lock locks in match - it extends scope of temporaries - let recycled = self.free_list.lock().unwrap().pop_front(); - let server = match recycled { - Some(server) => server, - None => Server::try_new_with_port_async(0).await?, - }; - - Ok(ServerGuard::new(server, permit)) - } - - fn recycle(&self, mut server: Server) { - server.reset(); - self.free_list.lock().unwrap().push_back(server); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/mockito/src/server.rs s390-tools-2.33.1/rust-vendor/mockito/src/server.rs --- s390-tools-2.31.0/rust-vendor/mockito/src/server.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/mockito/src/server.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,430 +0,0 @@ -use crate::mock::InnerMock; -use crate::request::Request; -use crate::response::{Body as ResponseBody, ChunkedStream}; -use crate::ServerGuard; -use crate::{Error, ErrorKind, Matcher, Mock}; -use hyper::server::conn::Http; -use hyper::service::service_fn; -use hyper::{Body, Request as HyperRequest, Response, StatusCode}; -use std::fmt; -use std::net::SocketAddr; -use std::ops::Drop; -use std::sync::{mpsc, Arc, RwLock}; -use std::thread; -use tokio::net::TcpListener; -use tokio::runtime; -use tokio::task::{spawn_local, LocalSet}; - -#[derive(Clone, Debug)] -pub(crate) struct RemoteMock { - pub(crate) inner: InnerMock, -} - -impl RemoteMock { - pub(crate) fn new(inner: InnerMock) -> Self { - RemoteMock { inner } - } - - fn matches(&self, other: &mut Request) -> bool { - self.method_matches(other) - && self.path_matches(other) - && self.headers_match(other) - && self.body_matches(other) - } - - fn method_matches(&self, request: &Request) -> bool { - self.inner.method.as_str() == request.method() - } - - fn path_matches(&self, request: &Request) -> bool { - self.inner.path.matches_value(request.path_and_query()) - } - - fn headers_match(&self, request: &Request) -> bool { - self.inner - .headers - .iter() - .all(|&(ref field, ref expected)| expected.matches_values(&request.header(field))) - } - - fn body_matches(&self, request: &mut Request) -> bool { - let body = request.body().unwrap(); - let safe_body = &String::from_utf8_lossy(body); - - self.inner.body.matches_value(safe_body) || self.inner.body.matches_binary_value(body) - } - - #[allow(clippy::missing_const_for_fn)] - fn is_missing_hits(&self) -> bool { - match ( - self.inner.expected_hits_at_least, - self.inner.expected_hits_at_most, - ) { - (Some(_at_least), Some(at_most)) => self.inner.hits < at_most, - (Some(at_least), None) => self.inner.hits < at_least, - (None, Some(at_most)) => self.inner.hits < at_most, - (None, None) => self.inner.hits < 1, - } - } -} - -#[derive(Debug)] -pub(crate) struct State { - pub(crate) mocks: Vec, - pub(crate) unmatched_requests: Vec, -} - -impl State { - fn new() -> Self { - State { - mocks: vec![], - unmatched_requests: vec![], - } - } - - pub(crate) fn get_mock_hits(&self, mock_id: String) -> Option { - self.mocks - .iter() - .find(|remote_mock| remote_mock.inner.id == mock_id) - .map(|remote_mock| remote_mock.inner.hits) - } - - pub(crate) fn remove_mock(&mut self, mock_id: String) -> bool { - if let Some(pos) = self - .mocks - .iter() - .position(|remote_mock| remote_mock.inner.id == mock_id) - { - self.mocks.remove(pos); - return true; - } - - false - } - - pub(crate) fn get_last_unmatched_request(&self) -> Option { - self.unmatched_requests.last().map(|req| req.formatted()) - } -} - -/// -/// One instance of the mock server. -/// -/// Mockito uses a server pool to manage running servers. Once the pool reaches capacity, -/// new requests will have to wait for a free server. The size of the server pool -/// is set to 50. -/// -/// Most of the times, you should initialize new servers with `Server::new`, which fetches -/// the next available instance from the pool: -/// -/// ``` -/// let mut server = mockito::Server::new(); -/// ``` -/// -/// If for any reason you'd like to bypass the server pool, you can use `Server::new_with_port`: -/// -/// ``` -/// let mut server = mockito::Server::new_with_port(0); -/// ``` -/// -#[derive(Debug)] -pub struct Server { - address: String, - state: Arc>, -} - -impl Server { - /// - /// Fetches a new mock server from the server pool. - /// - /// This method will panic on failure. - /// - /// If for any reason you'd like to bypass the server pool, you can use `Server::new_with_port`: - /// - #[allow(clippy::new_ret_no_self)] - #[track_caller] - pub fn new() -> ServerGuard { - Server::try_new().unwrap() - } - - /// - /// Same as `Server::new` but async. - /// - pub async fn new_async() -> ServerGuard { - Server::try_new_async().await.unwrap() - } - - /// - /// Same as `Server::new` but won't panic on failure. - /// - #[track_caller] - pub(crate) fn try_new() -> Result { - futures::executor::block_on(async { Server::try_new_async().await }) - } - - /// - /// Same as `Server::try_new` but async. - /// - pub(crate) async fn try_new_async() -> Result { - let server = crate::server_pool::SERVER_POOL - .get_async() - .await - .map_err(|err| Error::new_with_context(ErrorKind::ServerFailure, err))?; - - Ok(server) - } - - /// - /// Starts a new server on a given port. If the port is set to `0`, a random available - /// port will be assigned. Note that **this call bypasses the server pool**. - /// - /// This method will panic on failure. - /// - #[track_caller] - pub fn new_with_port(port: u16) -> Server { - Server::try_new_with_port(port).unwrap() - } - - /// - /// Same as `Server::new_with_port` but async. - /// - pub async fn new_with_port_async(port: u16) -> Server { - Server::try_new_with_port_async(port).await.unwrap() - } - - /// - /// Same as `Server::new_with_port` but won't panic on failure. - /// - #[track_caller] - pub(crate) fn try_new_with_port(port: u16) -> Result { - let state = Arc::new(RwLock::new(State::new())); - let address = SocketAddr::from(([127, 0, 0, 1], port)); - let (address_sender, address_receiver) = mpsc::channel::(); - let runtime = runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("Cannot build local tokio runtime"); - - let state_clone = state.clone(); - thread::spawn(move || { - let server = Server::bind_server(address, address_sender, state_clone); - LocalSet::new().block_on(&runtime, server).unwrap(); - }); - - let address = address_receiver - .recv() - .map_err(|err| Error::new_with_context(ErrorKind::ServerFailure, err))?; - - let server = Server { address, state }; - - Ok(server) - } - - /// - /// Same as `Server::try_new_with_port` but async. - /// - pub(crate) async fn try_new_with_port_async(port: u16) -> Result { - let state = Arc::new(RwLock::new(State::new())); - let address = SocketAddr::from(([127, 0, 0, 1], port)); - let (address_sender, address_receiver) = mpsc::channel::(); - let runtime = runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("Cannot build local tokio runtime"); - - let state_clone = state.clone(); - thread::spawn(move || { - let server = Server::bind_server(address, address_sender, state_clone); - LocalSet::new().block_on(&runtime, server).unwrap(); - }); - - let address = address_receiver - .recv() - .map_err(|err| Error::new_with_context(ErrorKind::ServerFailure, err))?; - - let server = Server { address, state }; - - Ok(server) - } - - async fn bind_server( - address: SocketAddr, - address_sender: mpsc::Sender, - state: Arc>, - ) -> Result<(), Error> { - let listener = TcpListener::bind(address) - .await - .map_err(|err| Error::new_with_context(ErrorKind::ServerFailure, err))?; - - let address = listener - .local_addr() - .map_err(|err| Error::new_with_context(ErrorKind::ServerFailure, err))?; - - address_sender.send(address.to_string()).unwrap(); - - while let Ok((stream, _)) = listener.accept().await { - let mutex = state.clone(); - - spawn_local(async move { - let _ = Http::new() - .serve_connection( - stream, - service_fn(move |request: HyperRequest| { - handle_request(request, mutex.clone()) - }), - ) - .await; - }); - } - - Ok(()) - } - - /// - /// Initializes a mock with the given HTTP `method` and `path`. - /// - /// The mock is enabled on the server only after calling the `Mock::create` method. - /// - /// ## Example - /// - /// ``` - /// let mut s = mockito::Server::new(); - /// - /// let _m1 = s.mock("GET", "/"); - /// let _m2 = s.mock("POST", "/users"); - /// let _m3 = s.mock("DELETE", "/users?id=1"); - /// ``` - /// - pub fn mock>(&mut self, method: &str, path: P) -> Mock { - Mock::new(self.state.clone(), method, path) - } - - /// - /// The URL of the mock server (including the protocol). - /// - pub fn url(&self) -> String { - format!("http://{}", self.address) - } - - /// - /// The host and port of the mock server. - /// Can be used with `std::net::TcpStream`. - /// - pub fn host_with_port(&self) -> String { - self.address.clone() - } - - /// - /// Removes all the mocks stored on the server. - /// - pub fn reset(&mut self) { - let state = self.state.clone(); - let mut state = state.write().unwrap(); - state.mocks.clear(); - state.unmatched_requests.clear(); - } - - /// - /// **DEPRECATED:** Use `Server::reset` instead. The implementation is not async any more. - /// - #[deprecated(since = "1.0.1", note = "Use `Server::reset` instead")] - pub async fn reset_async(&mut self) { - let state = self.state.clone(); - let mut state = state.write().unwrap(); - state.mocks.clear(); - state.unmatched_requests.clear(); - } -} - -impl Drop for Server { - fn drop(&mut self) { - self.reset(); - } -} - -impl fmt::Display for Server { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(&format!("server {}", self.host_with_port())) - } -} - -async fn handle_request( - hyper_request: HyperRequest, - state: Arc>, -) -> Result, Error> { - let mut request = Request::new(hyper_request); - request.read_body().await; - log::debug!("Request received: {}", request.formatted()); - - let mutex = state.clone(); - let mut state = mutex.write().unwrap(); - let mut matching_mocks: Vec<&mut RemoteMock> = vec![]; - - for mock in state.mocks.iter_mut() { - if mock.matches(&mut request) { - matching_mocks.push(mock); - } - } - - let maybe_missing_hits = matching_mocks.iter_mut().find(|m| m.is_missing_hits()); - - let mock = match maybe_missing_hits { - Some(m) => Some(m), - None => matching_mocks.last_mut(), - }; - - if let Some(mock) = mock { - log::debug!("Mock found"); - mock.inner.hits += 1; - respond_with_mock(request, mock) - } else { - log::debug!("Mock not found"); - state.unmatched_requests.push(request); - respond_with_mock_not_found() - } -} - -fn respond_with_mock(request: Request, mock: &RemoteMock) -> Result, Error> { - let status: StatusCode = mock.inner.response.status; - let mut response = Response::builder().status(status); - - for (name, value) in mock.inner.response.headers.iter() { - response = response.header(name, value); - } - - let body = if request.method() != "HEAD" { - match &mock.inner.response.body { - ResponseBody::Bytes(bytes) => { - if !request.has_header("content-length") { - response = response.header("content-length", bytes.len()); - } - Body::from(bytes.clone()) - } - ResponseBody::FnWithWriter(body_fn) => { - let stream = ChunkedStream::new(Arc::clone(body_fn))?; - Body::wrap_stream(stream) - } - ResponseBody::FnWithRequest(body_fn) => { - let bytes = body_fn(&request); - Body::from(bytes) - } - } - } else { - Body::empty() - }; - - let response: Response = response - .body(body) - .map_err(|err| Error::new_with_context(ErrorKind::ResponseFailure, err))?; - - Ok(response) -} - -fn respond_with_mock_not_found() -> Result, Error> { - let response: Response = Response::builder() - .status(StatusCode::NOT_IMPLEMENTED) - .body(Body::empty()) - .map_err(|err| Error::new_with_context(ErrorKind::ResponseFailure, err))?; - - Ok(response) -} diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/num_cpus/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/num_cpus/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/Cargo.lock s390-tools-2.33.1/rust-vendor/num_cpus/Cargo.lock --- s390-tools-2.31.0/rust-vendor/num_cpus/Cargo.lock 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/Cargo.lock 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "hermit-abi" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" - -[[package]] -name = "libc" -version = "0.2.65" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a31a0627fdf1f6a39ec0dd577e101440b7db22672c0901fe00a9a6fbb5c24e8" - -[[package]] -name = "num_cpus" -version = "1.16.0" -dependencies = [ - "hermit-abi", - "libc", -] diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/Cargo.toml s390-tools-2.33.1/rust-vendor/num_cpus/Cargo.toml --- s390-tools-2.31.0/rust-vendor/num_cpus/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,32 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -name = "num_cpus" -version = "1.16.0" -authors = ["Sean McArthur "] -description = "Get the number of CPUs on a machine." -documentation = "https://docs.rs/num_cpus" -readme = "README.md" -keywords = [ - "cpu", - "cpus", - "cores", -] -categories = ["hardware-support"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/seanmonstar/num_cpus" - -[target."cfg(not(windows))".dependencies.libc] -version = "0.2.26" - -[target."cfg(target_os = \"hermit\")".dependencies.hermit-abi] -version = "0.3.0" diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/CHANGELOG.md s390-tools-2.33.1/rust-vendor/num_cpus/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/num_cpus/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,160 +0,0 @@ -## v1.16.0 - -### Features - -- add support for AIX operating system - -### Fixes - -- update hermit-abi to 0.3.0 - -## v1.15.0 - -### Fixes - -- update hermit-abi - -## v1.14.0 - -### Features - -- add support for cgroups v2 -- Skip reading files in Miri - -## v1.13.1 - -### Fixes - -- fix parsing zero or multiple optional fields in cgroup mountinfo. - -## v1.13.0 - -### Features - -- add Linux cgroups support when calling `get()`. - -## v1.12.0 - -#### Fixes - -- fix `get` on OpenBSD to ignore offline CPUs -- implement `get_physical` on OpenBSD - -## v1.11.1 - -#### Fixes - -- Use `mem::zeroed` instead of `mem::uninitialized`. - -## v1.11.0 - -#### Features - -- add `hermit` target OS support -- removes `bitrig` support - -#### Fixes - -- fix `get_physical` count with AMD hyperthreading. - -## v1.10.1 - -#### Fixes - -- improve `haiku` CPU detection - -## v1.10.0 - -#### Features - -- add `illumos` target OS support -- add default fallback if target is unknown to `1` - -## v1.9.0 - -#### Features - -- add `sgx` target env support - -## v1.8.0 - -#### Features - -- add `wasm-unknown-unknown` target support - -## v1.7.0 - -#### Features - -- add `get_physical` support for macOS - -#### Fixes - -- use `_SC_NPROCESSORS_CONF` on Unix targets - -### v1.6.2 - -#### Fixes - -- revert 1.6.1 for now - -### v1.6.1 - -#### Fixes - -- fixes sometimes incorrect num on Android/ARM Linux (#45) - -## v1.6.0 - -#### Features - -- `get_physical` gains Windows support - -### v1.5.1 - -#### Fixes - -- fix `get` to return 1 if `sysconf(_SC_NPROCESSORS_ONLN)` failed - -## v1.5.0 - -#### Features - -- `get()` now checks `sched_affinity` on Linux - -## v1.4.0 - -#### Features - -- add `haiku` target support - -## v1.3.0 - -#### Features - -- add `redox` target support - -### v1.2.1 - -#### Fixes - -- fixes `get_physical` count (454ff1b) - -## v1.2.0 - -#### Features - -- add `emscripten` target support -- add `fuchsia` target support - -## v1.1.0 - -#### Features - -- added `get_physical` function to return number of physical CPUs found - -# v1.0.0 - -#### Features - -- `get` function returns number of CPUs (physical and virtual) of current platform diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/ci/cgroups/Dockerfile s390-tools-2.33.1/rust-vendor/num_cpus/ci/cgroups/Dockerfile --- s390-tools-2.31.0/rust-vendor/num_cpus/ci/cgroups/Dockerfile 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/ci/cgroups/Dockerfile 1970-01-01 01:00:00.000000000 +0100 @@ -1,9 +0,0 @@ -FROM rust:latest - -WORKDIR /usr/num_cpus - -COPY . . - -RUN cargo build - -CMD [ "cargo", "test", "--lib" ] diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/CONTRIBUTING.md s390-tools-2.33.1/rust-vendor/num_cpus/CONTRIBUTING.md --- s390-tools-2.31.0/rust-vendor/num_cpus/CONTRIBUTING.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/CONTRIBUTING.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,16 +0,0 @@ -# Contributing - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any -additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/examples/values.rs s390-tools-2.33.1/rust-vendor/num_cpus/examples/values.rs --- s390-tools-2.31.0/rust-vendor/num_cpus/examples/values.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/examples/values.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,6 +0,0 @@ -extern crate num_cpus; - -fn main() { - println!("Logical CPUs: {}", num_cpus::get()); - println!("Physical CPUs: {}", num_cpus::get_physical()); -} diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/ceil/cpu.cfs_period_us s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/ceil/cpu.cfs_period_us --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/ceil/cpu.cfs_period_us 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/ceil/cpu.cfs_period_us 1970-01-01 01:00:00.000000000 +0100 @@ -1,2 +0,0 @@ -100000 - diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/ceil/cpu.cfs_quota_us s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/ceil/cpu.cfs_quota_us --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/ceil/cpu.cfs_quota_us 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/ceil/cpu.cfs_quota_us 1970-01-01 01:00:00.000000000 +0100 @@ -1,2 +0,0 @@ -150000 - diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/good/cpu.cfs_period_us s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/good/cpu.cfs_period_us --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/good/cpu.cfs_period_us 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/good/cpu.cfs_period_us 1970-01-01 01:00:00.000000000 +0100 @@ -1,2 +0,0 @@ -100000 - diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/good/cpu.cfs_quota_us s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/good/cpu.cfs_quota_us --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/good/cpu.cfs_quota_us 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/good/cpu.cfs_quota_us 1970-01-01 01:00:00.000000000 +0100 @@ -1,2 +0,0 @@ -600000 - diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/zero-period/cpu.cfs_period_us s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/zero-period/cpu.cfs_period_us --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/zero-period/cpu.cfs_period_us 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/zero-period/cpu.cfs_period_us 1970-01-01 01:00:00.000000000 +0100 @@ -1,2 +0,0 @@ -0 - diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/zero-period/cpu.cfs_quota_us s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/zero-period/cpu.cfs_quota_us --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/cgroups/zero-period/cpu.cfs_quota_us 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/cgroups/zero-period/cpu.cfs_quota_us 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -600000 diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/cgroup s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/cgroup --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/cgroup 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/cgroup 1970-01-01 01:00:00.000000000 +0100 @@ -1,3 +0,0 @@ -12:perf_event:/ -11:cpu,cpuacct:/ -3:devices:/user.slice diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo 1970-01-01 01:00:00.000000000 +0100 @@ -1,8 +0,0 @@ -1 0 8:1 / / rw,noatime shared:1 - ext4 /dev/sda1 rw,errors=remount-ro,data=reordered -2 1 0:1 / /dev rw,relatime shared:2 - devtmpfs udev rw,size=10240k,nr_inodes=16487629,mode=755 -3 1 0:2 / /proc rw,nosuid,nodev,noexec,relatime shared:3 - proc proc rw -4 1 0:3 / /sys rw,nosuid,nodev,noexec,relatime shared:4 - sysfs sysfs rw -5 4 0:4 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:5 - tmpfs tmpfs ro,mode=755 -6 5 0:5 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:6 - cgroup cgroup rw,cpuset -7 5 0:6 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:7 - cgroup cgroup rw,cpu,cpuacct -8 5 0:7 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:8 - cgroup cgroup rw,memory diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo_multi_opt s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo_multi_opt --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo_multi_opt 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo_multi_opt 1970-01-01 01:00:00.000000000 +0100 @@ -1,8 +0,0 @@ -1 0 8:1 / / rw,noatime shared:1 - ext4 /dev/sda1 rw,errors=remount-ro,data=reordered -2 1 0:1 / /dev rw,relatime shared:2 - devtmpfs udev rw,size=10240k,nr_inodes=16487629,mode=755 -3 1 0:2 / /proc rw,nosuid,nodev,noexec,relatime shared:3 - proc proc rw -4 1 0:3 / /sys rw,nosuid,nodev,noexec,relatime shared:4 - sysfs sysfs rw -5 4 0:4 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:5 - tmpfs tmpfs ro,mode=755 -6 5 0:5 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:6 - cgroup cgroup rw,cpuset -7 5 0:6 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:7 shared:8 shared:9 - cgroup cgroup rw,cpu,cpuacct -8 5 0:7 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:8 - cgroup cgroup rw,memory diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo_zero_opt s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo_zero_opt --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo_zero_opt 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups/proc/cgroups/mountinfo_zero_opt 1970-01-01 01:00:00.000000000 +0100 @@ -1,8 +0,0 @@ -1 0 8:1 / / rw,noatime shared:1 - ext4 /dev/sda1 rw,errors=remount-ro,data=reordered -2 1 0:1 / /dev rw,relatime shared:2 - devtmpfs udev rw,size=10240k,nr_inodes=16487629,mode=755 -3 1 0:2 / /proc rw,nosuid,nodev,noexec,relatime shared:3 - proc proc rw -4 1 0:3 / /sys rw,nosuid,nodev,noexec,relatime shared:4 - sysfs sysfs rw -5 4 0:4 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:5 - tmpfs tmpfs ro,mode=755 -6 5 0:5 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:6 - cgroup cgroup rw,cpuset -7 5 0:6 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu,cpuacct -8 5 0:7 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:8 - cgroup cgroup rw,memory diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/ceil/cpu.max s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/ceil/cpu.max --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/ceil/cpu.max 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/ceil/cpu.max 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -150000 100000 diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/good/cpu.max s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/good/cpu.max --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/good/cpu.max 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/good/cpu.max 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -600000 100000 diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/zero-period/cpu.max s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/zero-period/cpu.max --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/zero-period/cpu.max 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/cgroups/zero-period/cpu.max 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -600000 0 diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/cgroup s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/cgroup --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/cgroup 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/cgroup 1970-01-01 01:00:00.000000000 +0100 @@ -1,2 +0,0 @@ -12::/ -3::/user.slice diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/cgroup_multi s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/cgroup_multi --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/cgroup_multi 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/cgroup_multi 1970-01-01 01:00:00.000000000 +0100 @@ -1,3 +0,0 @@ -12::/ -11:cpu,cpuacct:/ -3::/user.slice diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/mountinfo s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/mountinfo --- s390-tools-2.31.0/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/mountinfo 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/fixtures/cgroups2/proc/cgroups/mountinfo 1970-01-01 01:00:00.000000000 +0100 @@ -1,5 +0,0 @@ -1 0 8:1 / / rw,noatime shared:1 - ext4 /dev/sda1 rw,errors=remount-ro,data=reordered -2 1 0:1 / /dev rw,relatime shared:2 - devtmpfs udev rw,size=10240k,nr_inodes=16487629,mode=755 -3 1 0:2 / /proc rw,nosuid,nodev,noexec,relatime shared:3 - proc proc rw -4 1 0:3 / /sys rw,nosuid,nodev,noexec,relatime shared:4 - sysfs sysfs rw -5 4 0:4 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime shared:5 - cgroup2 cgroup2 rw,nsdelegate,memory_recursiveprot diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/num_cpus/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/num_cpus/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/LICENSE-MIT s390-tools-2.33.1/rust-vendor/num_cpus/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/num_cpus/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ -Copyright (c) 2015 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/README.md s390-tools-2.33.1/rust-vendor/num_cpus/README.md --- s390-tools-2.31.0/rust-vendor/num_cpus/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -# num_cpus - -[![crates.io](https://img.shields.io/crates/v/num_cpus.svg)](https://crates.io/crates/num_cpus) -[![CI Status](https://github.com/seanmonstar/num_cpus/actions/workflows/ci.yml/badge.svg)](https://github.com/seanmonstar/num_cpus/actions) - -- [Documentation](https://docs.rs/num_cpus) -- [CHANGELOG](CHANGELOG.md) - -Count the number of CPUs on the current machine. - -## Usage - -Add to Cargo.toml: - -```toml -[dependencies] -num_cpus = "1.0" -``` - -In your `main.rs` or `lib.rs`: - -```rust -extern crate num_cpus; - -// count logical cores this process could try to use -let num = num_cpus::get(); -``` diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/src/lib.rs s390-tools-2.33.1/rust-vendor/num_cpus/src/lib.rs --- s390-tools-2.31.0/rust-vendor/num_cpus/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,485 +0,0 @@ -//! A crate with utilities to determine the number of CPUs available on the -//! current system. -//! -//! Sometimes the CPU will exaggerate the number of CPUs it contains, because it can use -//! [processor tricks] to deliver increased performance when there are more threads. This -//! crate provides methods to get both the logical and physical numbers of cores. -//! -//! This information can be used as a guide to how many tasks can be run in parallel. -//! There are many properties of the system architecture that will affect parallelism, -//! for example memory access speeds (for all the caches and RAM) and the physical -//! architecture of the processor, so the number of CPUs should be used as a rough guide -//! only. -//! -//! -//! ## Examples -//! -//! Fetch the number of logical CPUs. -//! -//! ``` -//! let cpus = num_cpus::get(); -//! ``` -//! -//! See [`rayon::Threadpool`] for an example of where the number of CPUs could be -//! used when setting up parallel jobs (Where the threadpool example uses a fixed -//! number 8, it could use the number of CPUs). -//! -//! [processor tricks]: https://en.wikipedia.org/wiki/Simultaneous_multithreading -//! [`rayon::ThreadPool`]: https://docs.rs/rayon/1.*/rayon/struct.ThreadPool.html -#![cfg_attr(test, deny(warnings))] -#![deny(missing_docs)] -#![allow(non_snake_case)] - -#[cfg(not(windows))] -extern crate libc; - -#[cfg(target_os = "hermit")] -extern crate hermit_abi; - -#[cfg(target_os = "linux")] -mod linux; -#[cfg(target_os = "linux")] -use linux::{get_num_cpus, get_num_physical_cpus}; - -/// Returns the number of available CPUs of the current system. -/// -/// This function will get the number of logical cores. Sometimes this is different from the number -/// of physical cores (See [Simultaneous multithreading on Wikipedia][smt]). -/// -/// This will always return at least `1`. -/// -/// # Examples -/// -/// ``` -/// let cpus = num_cpus::get(); -/// if cpus > 1 { -/// println!("We are on a multicore system with {} CPUs", cpus); -/// } else { -/// println!("We are on a single core system"); -/// } -/// ``` -/// -/// # Note -/// -/// This will check [sched affinity] on Linux, showing a lower number of CPUs if the current -/// thread does not have access to all the computer's CPUs. -/// -/// This will also check [cgroups], frequently used in containers to constrain CPU usage. -/// -/// [smt]: https://en.wikipedia.org/wiki/Simultaneous_multithreading -/// [sched affinity]: http://www.gnu.org/software/libc/manual/html_node/CPU-Affinity.html -/// [cgroups]: https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt -#[inline] -pub fn get() -> usize { - get_num_cpus() -} - -/// Returns the number of physical cores of the current system. -/// -/// This will always return at least `1`. -/// -/// # Note -/// -/// Physical count is supported only on Linux, mac OS and Windows platforms. -/// On other platforms, or if the physical count fails on supported platforms, -/// this function returns the same as [`get()`], which is the number of logical -/// CPUS. -/// -/// # Examples -/// -/// ``` -/// let logical_cpus = num_cpus::get(); -/// let physical_cpus = num_cpus::get_physical(); -/// if logical_cpus > physical_cpus { -/// println!("We have simultaneous multithreading with about {:.2} \ -/// logical cores to 1 physical core.", -/// (logical_cpus as f64) / (physical_cpus as f64)); -/// } else if logical_cpus == physical_cpus { -/// println!("Either we don't have simultaneous multithreading, or our \ -/// system doesn't support getting the number of physical CPUs."); -/// } else { -/// println!("We have less logical CPUs than physical CPUs, maybe we only have access to \ -/// some of the CPUs on our system."); -/// } -/// ``` -/// -/// [`get()`]: fn.get.html -#[inline] -pub fn get_physical() -> usize { - get_num_physical_cpus() -} - - -#[cfg(not(any( - target_os = "linux", - target_os = "windows", - target_os = "macos", - target_os = "openbsd", - target_os = "aix")))] -#[inline] -fn get_num_physical_cpus() -> usize { - // Not implemented, fall back - get_num_cpus() -} - -#[cfg(target_os = "windows")] -fn get_num_physical_cpus() -> usize { - match get_num_physical_cpus_windows() { - Some(num) => num, - None => get_num_cpus() - } -} - -#[cfg(target_os = "windows")] -fn get_num_physical_cpus_windows() -> Option { - // Inspired by https://msdn.microsoft.com/en-us/library/ms683194 - - use std::ptr; - use std::mem; - - #[allow(non_upper_case_globals)] - const RelationProcessorCore: u32 = 0; - - #[repr(C)] - #[allow(non_camel_case_types)] - struct SYSTEM_LOGICAL_PROCESSOR_INFORMATION { - mask: usize, - relationship: u32, - _unused: [u64; 2] - } - - extern "system" { - fn GetLogicalProcessorInformation( - info: *mut SYSTEM_LOGICAL_PROCESSOR_INFORMATION, - length: &mut u32 - ) -> u32; - } - - // First we need to determine how much space to reserve. - - // The required size of the buffer, in bytes. - let mut needed_size = 0; - - unsafe { - GetLogicalProcessorInformation(ptr::null_mut(), &mut needed_size); - } - - let struct_size = mem::size_of::() as u32; - - // Could be 0, or some other bogus size. - if needed_size == 0 || needed_size < struct_size || needed_size % struct_size != 0 { - return None; - } - - let count = needed_size / struct_size; - - // Allocate some memory where we will store the processor info. - let mut buf = Vec::with_capacity(count as usize); - - let result; - - unsafe { - result = GetLogicalProcessorInformation(buf.as_mut_ptr(), &mut needed_size); - } - - // Failed for any reason. - if result == 0 { - return None; - } - - let count = needed_size / struct_size; - - unsafe { - buf.set_len(count as usize); - } - - let phys_proc_count = buf.iter() - // Only interested in processor packages (physical processors.) - .filter(|proc_info| proc_info.relationship == RelationProcessorCore) - .count(); - - if phys_proc_count == 0 { - None - } else { - Some(phys_proc_count) - } -} - -#[cfg(windows)] -fn get_num_cpus() -> usize { - #[repr(C)] - struct SYSTEM_INFO { - wProcessorArchitecture: u16, - wReserved: u16, - dwPageSize: u32, - lpMinimumApplicationAddress: *mut u8, - lpMaximumApplicationAddress: *mut u8, - dwActiveProcessorMask: *mut u8, - dwNumberOfProcessors: u32, - dwProcessorType: u32, - dwAllocationGranularity: u32, - wProcessorLevel: u16, - wProcessorRevision: u16, - } - - extern "system" { - fn GetSystemInfo(lpSystemInfo: *mut SYSTEM_INFO); - } - - unsafe { - let mut sysinfo: SYSTEM_INFO = std::mem::zeroed(); - GetSystemInfo(&mut sysinfo); - sysinfo.dwNumberOfProcessors as usize - } -} - -#[cfg(any(target_os = "freebsd", - target_os = "dragonfly", - target_os = "netbsd"))] -fn get_num_cpus() -> usize { - use std::ptr; - - let mut cpus: libc::c_uint = 0; - let mut cpus_size = std::mem::size_of_val(&cpus); - - unsafe { - cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint; - } - if cpus < 1 { - let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; - unsafe { - libc::sysctl(mib.as_mut_ptr(), - 2, - &mut cpus as *mut _ as *mut _, - &mut cpus_size as *mut _ as *mut _, - ptr::null_mut(), - 0); - } - if cpus < 1 { - cpus = 1; - } - } - cpus as usize -} - -#[cfg(target_os = "openbsd")] -fn get_num_cpus() -> usize { - use std::ptr; - - let mut cpus: libc::c_uint = 0; - let mut cpus_size = std::mem::size_of_val(&cpus); - let mut mib = [libc::CTL_HW, libc::HW_NCPUONLINE, 0, 0]; - let rc: libc::c_int; - - unsafe { - rc = libc::sysctl(mib.as_mut_ptr(), - 2, - &mut cpus as *mut _ as *mut _, - &mut cpus_size as *mut _ as *mut _, - ptr::null_mut(), - 0); - } - if rc < 0 { - cpus = 1; - } - cpus as usize -} - -#[cfg(target_os = "openbsd")] -fn get_num_physical_cpus() -> usize { - use std::ptr; - - let mut cpus: libc::c_uint = 0; - let mut cpus_size = std::mem::size_of_val(&cpus); - let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; - let rc: libc::c_int; - - unsafe { - rc = libc::sysctl(mib.as_mut_ptr(), - 2, - &mut cpus as *mut _ as *mut _, - &mut cpus_size as *mut _ as *mut _, - ptr::null_mut(), - 0); - } - if rc < 0 { - cpus = 1; - } - cpus as usize -} - - -#[cfg(target_os = "macos")] -fn get_num_physical_cpus() -> usize { - use std::ffi::CStr; - use std::ptr; - - let mut cpus: i32 = 0; - let mut cpus_size = std::mem::size_of_val(&cpus); - - let sysctl_name = CStr::from_bytes_with_nul(b"hw.physicalcpu\0") - .expect("byte literal is missing NUL"); - - unsafe { - if 0 != libc::sysctlbyname(sysctl_name.as_ptr(), - &mut cpus as *mut _ as *mut _, - &mut cpus_size as *mut _ as *mut _, - ptr::null_mut(), - 0) { - return get_num_cpus(); - } - } - cpus as usize -} - -#[cfg(target_os = "aix")] -fn get_num_physical_cpus() -> usize { - match get_smt_threads_aix() { - Some(num) => get_num_cpus() / num, - None => get_num_cpus(), - } -} - -#[cfg(target_os = "aix")] -fn get_smt_threads_aix() -> Option { - let smt = unsafe { - libc::getsystemcfg(libc::SC_SMT_TC) - }; - if smt == u64::MAX { - return None; - } - Some(smt as usize) -} - -#[cfg(any( - target_os = "nacl", - target_os = "macos", - target_os = "ios", - target_os = "android", - target_os = "aix", - target_os = "solaris", - target_os = "illumos", - target_os = "fuchsia") -)] -fn get_num_cpus() -> usize { - // On ARM targets, processors could be turned off to save power. - // Use `_SC_NPROCESSORS_CONF` to get the real number. - #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] - const CONF_NAME: libc::c_int = libc::_SC_NPROCESSORS_CONF; - #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] - const CONF_NAME: libc::c_int = libc::_SC_NPROCESSORS_ONLN; - - let cpus = unsafe { libc::sysconf(CONF_NAME) }; - if cpus < 1 { - 1 - } else { - cpus as usize - } -} - -#[cfg(target_os = "haiku")] -fn get_num_cpus() -> usize { - use std::mem; - - #[allow(non_camel_case_types)] - type bigtime_t = i64; - #[allow(non_camel_case_types)] - type status_t = i32; - - #[repr(C)] - pub struct system_info { - pub boot_time: bigtime_t, - pub cpu_count: u32, - pub max_pages: u64, - pub used_pages: u64, - pub cached_pages: u64, - pub block_cache_pages: u64, - pub ignored_pages: u64, - pub needed_memory: u64, - pub free_memory: u64, - pub max_swap_pages: u64, - pub free_swap_pages: u64, - pub page_faults: u32, - pub max_sems: u32, - pub used_sems: u32, - pub max_ports: u32, - pub used_ports: u32, - pub max_threads: u32, - pub used_threads: u32, - pub max_teams: u32, - pub used_teams: u32, - pub kernel_name: [::std::os::raw::c_char; 256usize], - pub kernel_build_date: [::std::os::raw::c_char; 32usize], - pub kernel_build_time: [::std::os::raw::c_char; 32usize], - pub kernel_version: i64, - pub abi: u32, - } - - extern { - fn get_system_info(info: *mut system_info) -> status_t; - } - - let mut info: system_info = unsafe { mem::zeroed() }; - let status = unsafe { get_system_info(&mut info as *mut _) }; - if status == 0 { - info.cpu_count as usize - } else { - 1 - } -} - -#[cfg(target_os = "hermit")] -fn get_num_cpus() -> usize { - unsafe { hermit_abi::get_processor_count() } -} - -#[cfg(not(any( - target_os = "nacl", - target_os = "macos", - target_os = "ios", - target_os = "android", - target_os = "aix", - target_os = "solaris", - target_os = "illumos", - target_os = "fuchsia", - target_os = "linux", - target_os = "openbsd", - target_os = "freebsd", - target_os = "dragonfly", - target_os = "netbsd", - target_os = "haiku", - target_os = "hermit", - windows, -)))] -fn get_num_cpus() -> usize { - 1 -} - -#[cfg(test)] -mod tests { - fn env_var(name: &'static str) -> Option { - ::std::env::var(name).ok().map(|val| val.parse().unwrap()) - } - - #[test] - fn test_get() { - let num = super::get(); - if let Some(n) = env_var("NUM_CPUS_TEST_GET") { - assert_eq!(num, n); - } else { - assert!(num > 0); - assert!(num < 236_451); - } - } - - #[test] - fn test_get_physical() { - let num = super::get_physical(); - if let Some(n) = env_var("NUM_CPUS_TEST_GET_PHYSICAL") { - assert_eq!(num, n); - } else { - assert!(num > 0); - assert!(num < 236_451); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/num_cpus/src/linux.rs s390-tools-2.33.1/rust-vendor/num_cpus/src/linux.rs --- s390-tools-2.31.0/rust-vendor/num_cpus/src/linux.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/num_cpus/src/linux.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,595 +0,0 @@ -use std::collections::HashMap; -use std::fs::File; -use std::io::{BufRead, BufReader, Read}; -use std::mem; -use std::path::{Path, PathBuf}; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Once; - -use libc; - -macro_rules! debug { - ($($args:expr),*) => ({ - if false { - //if true { - println!($($args),*); - } - }); -} - -macro_rules! some { - ($e:expr) => {{ - match $e { - Some(v) => v, - None => { - debug!("NONE: {:?}", stringify!($e)); - return None; - } - } - }}; -} - -pub fn get_num_cpus() -> usize { - match cgroups_num_cpus() { - Some(n) => n, - None => logical_cpus(), - } -} - -fn logical_cpus() -> usize { - let mut set: libc::cpu_set_t = unsafe { mem::zeroed() }; - if unsafe { libc::sched_getaffinity(0, mem::size_of::(), &mut set) } == 0 { - let mut count: u32 = 0; - for i in 0..libc::CPU_SETSIZE as usize { - if unsafe { libc::CPU_ISSET(i, &set) } { - count += 1 - } - } - count as usize - } else { - let cpus = unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) }; - if cpus < 1 { - 1 - } else { - cpus as usize - } - } -} - -pub fn get_num_physical_cpus() -> usize { - let file = match File::open("/proc/cpuinfo") { - Ok(val) => val, - Err(_) => return get_num_cpus(), - }; - let reader = BufReader::new(file); - let mut map = HashMap::new(); - let mut physid: u32 = 0; - let mut cores: usize = 0; - let mut chgcount = 0; - for line in reader.lines().filter_map(|result| result.ok()) { - let mut it = line.split(':'); - let (key, value) = match (it.next(), it.next()) { - (Some(key), Some(value)) => (key.trim(), value.trim()), - _ => continue, - }; - if key == "physical id" { - match value.parse() { - Ok(val) => physid = val, - Err(_) => break, - }; - chgcount += 1; - } - if key == "cpu cores" { - match value.parse() { - Ok(val) => cores = val, - Err(_) => break, - }; - chgcount += 1; - } - if chgcount == 2 { - map.insert(physid, cores); - chgcount = 0; - } - } - let count = map.into_iter().fold(0, |acc, (_, cores)| acc + cores); - - if count == 0 { - get_num_cpus() - } else { - count - } -} - -/// Cached CPUs calculated from cgroups. -/// -/// If 0, check logical cpus. -// Allow deprecation warnings, we want to work on older rustc -#[allow(warnings)] -static CGROUPS_CPUS: AtomicUsize = ::std::sync::atomic::ATOMIC_USIZE_INIT; - -fn cgroups_num_cpus() -> Option { - #[allow(warnings)] - static ONCE: Once = ::std::sync::ONCE_INIT; - - ONCE.call_once(init_cgroups); - - let cpus = CGROUPS_CPUS.load(Ordering::Acquire); - - if cpus > 0 { - Some(cpus) - } else { - None - } -} - -fn init_cgroups() { - // Should only be called once - debug_assert!(CGROUPS_CPUS.load(Ordering::SeqCst) == 0); - - // Fails in Miri by default (cannot open files), and Miri does not have parallelism anyway. - if cfg!(miri) { - return; - } - - if let Some(quota) = load_cgroups("/proc/self/cgroup", "/proc/self/mountinfo") { - if quota == 0 { - return; - } - - let logical = logical_cpus(); - let count = ::std::cmp::min(quota, logical); - - CGROUPS_CPUS.store(count, Ordering::SeqCst); - } -} - -fn load_cgroups(cgroup_proc: P1, mountinfo_proc: P2) -> Option -where - P1: AsRef, - P2: AsRef, -{ - let subsys = some!(Subsys::load_cpu(cgroup_proc)); - let mntinfo = some!(MountInfo::load_cpu(mountinfo_proc, subsys.version)); - let cgroup = some!(Cgroup::translate(mntinfo, subsys)); - cgroup.cpu_quota() -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum CgroupVersion { - V1, - V2, -} - -struct Cgroup { - version: CgroupVersion, - base: PathBuf, -} - -struct MountInfo { - version: CgroupVersion, - root: String, - mount_point: String, -} - -struct Subsys { - version: CgroupVersion, - base: String, -} - -impl Cgroup { - fn new(version: CgroupVersion, dir: PathBuf) -> Cgroup { - Cgroup { version: version, base: dir } - } - - fn translate(mntinfo: MountInfo, subsys: Subsys) -> Option { - // Translate the subsystem directory via the host paths. - debug!( - "subsys = {:?}; root = {:?}; mount_point = {:?}", - subsys.base, mntinfo.root, mntinfo.mount_point - ); - - let rel_from_root = some!(Path::new(&subsys.base).strip_prefix(&mntinfo.root).ok()); - - debug!("rel_from_root: {:?}", rel_from_root); - - // join(mp.MountPoint, relPath) - let mut path = PathBuf::from(mntinfo.mount_point); - path.push(rel_from_root); - Some(Cgroup::new(mntinfo.version, path)) - } - - fn cpu_quota(&self) -> Option { - let (quota_us, period_us) = match self.version { - CgroupVersion::V1 => (some!(self.quota_us()), some!(self.period_us())), - CgroupVersion::V2 => some!(self.max()), - }; - - // protect against dividing by zero - if period_us == 0 { - return None; - } - - // Ceil the division, since we want to be able to saturate - // the available CPUs, and flooring would leave a CPU un-utilized. - - Some((quota_us as f64 / period_us as f64).ceil() as usize) - } - - fn quota_us(&self) -> Option { - self.param("cpu.cfs_quota_us") - } - - fn period_us(&self) -> Option { - self.param("cpu.cfs_period_us") - } - - fn max(&self) -> Option<(usize, usize)> { - let max = some!(self.raw_param("cpu.max")); - let mut max = some!(max.lines().next()).split(' '); - - let quota = some!(max.next().and_then(|quota| quota.parse().ok())); - let period = some!(max.next().and_then(|period| period.parse().ok())); - - Some((quota, period)) - } - - fn param(&self, param: &str) -> Option { - let buf = some!(self.raw_param(param)); - - buf.trim().parse().ok() - } - - fn raw_param(&self, param: &str) -> Option { - let mut file = some!(File::open(self.base.join(param)).ok()); - - let mut buf = String::new(); - some!(file.read_to_string(&mut buf).ok()); - - Some(buf) - } -} - -impl MountInfo { - fn load_cpu>(proc_path: P, version: CgroupVersion) -> Option { - let file = some!(File::open(proc_path).ok()); - let file = BufReader::new(file); - - file.lines() - .filter_map(|result| result.ok()) - .filter_map(MountInfo::parse_line) - .find(|mount_info| mount_info.version == version) - } - - fn parse_line(line: String) -> Option { - let mut fields = line.split(' '); - - // 7 5 0:6 /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:7 - cgroup cgroup rw,cpu,cpuacct - let mnt_root = some!(fields.nth(3)); - // 7 5 0:6 / rw,nosuid,nodev,noexec,relatime shared:7 - cgroup cgroup rw,cpu,cpuacct - let mnt_point = some!(fields.next()); - - // Ignore all fields until the separator(-). - // Note: there could be zero or more optional fields before hyphen. - // See: https://man7.org/linux/man-pages/man5/proc.5.html - // 7 5 0:6 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:7 <-> cgroup cgroup rw,cpu,cpuacct - // Note: we cannot use `?` here because we need to support Rust 1.13. - match fields.find(|&s| s == "-") { - Some(_) => {} - None => return None, - }; - - // 7 5 0:6 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:7 - cgroup rw,cpu,cpuacct - let version = match fields.next() { - Some("cgroup") => CgroupVersion::V1, - Some("cgroup2") => CgroupVersion::V2, - _ => return None, - }; - - // cgroups2 only has a single mount point - if version == CgroupVersion::V1 { - // 7 5 0:6 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:7 - cgroup cgroup - let super_opts = some!(fields.nth(1)); - - // We only care about the 'cpu' option - if !super_opts.split(',').any(|opt| opt == "cpu") { - return None; - } - } - - Some(MountInfo { - version: version, - root: mnt_root.to_owned(), - mount_point: mnt_point.to_owned(), - }) - } -} - -impl Subsys { - fn load_cpu>(proc_path: P) -> Option { - let file = some!(File::open(proc_path).ok()); - let file = BufReader::new(file); - - file.lines() - .filter_map(|result| result.ok()) - .filter_map(Subsys::parse_line) - .fold(None, |previous, line| { - // already-found v1 trumps v2 since it explicitly specifies its controllers - if previous.is_some() && line.version == CgroupVersion::V2 { - return previous; - } - - Some(line) - }) - } - - fn parse_line(line: String) -> Option { - // Example format: - // 11:cpu,cpuacct:/ - let mut fields = line.split(':'); - - let sub_systems = some!(fields.nth(1)); - - let version = if sub_systems.is_empty() { - CgroupVersion::V2 - } else { - CgroupVersion::V1 - }; - - if version == CgroupVersion::V1 && !sub_systems.split(',').any(|sub| sub == "cpu") { - return None; - } - - fields.next().map(|path| Subsys { - version: version, - base: path.to_owned(), - }) - } -} - -#[cfg(test)] -mod tests { - mod v1 { - use super::super::{Cgroup, CgroupVersion, MountInfo, Subsys}; - use std::path::{Path, PathBuf}; - - // `static_in_const` feature is not stable in Rust 1.13. - static FIXTURES_PROC: &'static str = "fixtures/cgroups/proc/cgroups"; - - static FIXTURES_CGROUPS: &'static str = "fixtures/cgroups/cgroups"; - - macro_rules! join { - ($base:expr, $($path:expr),+) => ({ - Path::new($base) - $(.join($path))+ - }) - } - - #[test] - fn test_load_mountinfo() { - // test only one optional fields - let path = join!(FIXTURES_PROC, "mountinfo"); - - let mnt_info = MountInfo::load_cpu(path, CgroupVersion::V1).unwrap(); - - assert_eq!(mnt_info.root, "/"); - assert_eq!(mnt_info.mount_point, "/sys/fs/cgroup/cpu,cpuacct"); - - // test zero optional field - let path = join!(FIXTURES_PROC, "mountinfo_zero_opt"); - - let mnt_info = MountInfo::load_cpu(path, CgroupVersion::V1).unwrap(); - - assert_eq!(mnt_info.root, "/"); - assert_eq!(mnt_info.mount_point, "/sys/fs/cgroup/cpu,cpuacct"); - - // test multi optional fields - let path = join!(FIXTURES_PROC, "mountinfo_multi_opt"); - - let mnt_info = MountInfo::load_cpu(path, CgroupVersion::V1).unwrap(); - - assert_eq!(mnt_info.root, "/"); - assert_eq!(mnt_info.mount_point, "/sys/fs/cgroup/cpu,cpuacct"); - } - - #[test] - fn test_load_subsys() { - let path = join!(FIXTURES_PROC, "cgroup"); - - let subsys = Subsys::load_cpu(path).unwrap(); - - assert_eq!(subsys.base, "/"); - assert_eq!(subsys.version, CgroupVersion::V1); - } - - #[test] - fn test_cgroup_mount() { - let cases = &[ - ("/", "/sys/fs/cgroup/cpu", "/", Some("/sys/fs/cgroup/cpu")), - ( - "/docker/01abcd", - "/sys/fs/cgroup/cpu", - "/docker/01abcd", - Some("/sys/fs/cgroup/cpu"), - ), - ( - "/docker/01abcd", - "/sys/fs/cgroup/cpu", - "/docker/01abcd/", - Some("/sys/fs/cgroup/cpu"), - ), - ( - "/docker/01abcd", - "/sys/fs/cgroup/cpu", - "/docker/01abcd/large", - Some("/sys/fs/cgroup/cpu/large"), - ), - // fails - ("/docker/01abcd", "/sys/fs/cgroup/cpu", "/", None), - ("/docker/01abcd", "/sys/fs/cgroup/cpu", "/docker", None), - ("/docker/01abcd", "/sys/fs/cgroup/cpu", "/elsewhere", None), - ( - "/docker/01abcd", - "/sys/fs/cgroup/cpu", - "/docker/01abcd-other-dir", - None, - ), - ]; - - for &(root, mount_point, subsys, expected) in cases.iter() { - let mnt_info = MountInfo { - version: CgroupVersion::V1, - root: root.into(), - mount_point: mount_point.into(), - }; - let subsys = Subsys { - version: CgroupVersion::V1, - base: subsys.into(), - }; - - let actual = Cgroup::translate(mnt_info, subsys).map(|c| c.base); - let expected = expected.map(PathBuf::from); - assert_eq!(actual, expected); - } - } - - #[test] - fn test_cgroup_cpu_quota() { - let cgroup = Cgroup::new(CgroupVersion::V1, join!(FIXTURES_CGROUPS, "good")); - assert_eq!(cgroup.cpu_quota(), Some(6)); - } - - #[test] - fn test_cgroup_cpu_quota_divide_by_zero() { - let cgroup = Cgroup::new(CgroupVersion::V1, join!(FIXTURES_CGROUPS, "zero-period")); - assert!(cgroup.quota_us().is_some()); - assert_eq!(cgroup.period_us(), Some(0)); - assert_eq!(cgroup.cpu_quota(), None); - } - - #[test] - fn test_cgroup_cpu_quota_ceil() { - let cgroup = Cgroup::new(CgroupVersion::V1, join!(FIXTURES_CGROUPS, "ceil")); - assert_eq!(cgroup.cpu_quota(), Some(2)); - } - } - - mod v2 { - use super::super::{Cgroup, CgroupVersion, MountInfo, Subsys}; - use std::path::{Path, PathBuf}; - - // `static_in_const` feature is not stable in Rust 1.13. - static FIXTURES_PROC: &'static str = "fixtures/cgroups2/proc/cgroups"; - - static FIXTURES_CGROUPS: &'static str = "fixtures/cgroups2/cgroups"; - - macro_rules! join { - ($base:expr, $($path:expr),+) => ({ - Path::new($base) - $(.join($path))+ - }) - } - - #[test] - fn test_load_mountinfo() { - // test only one optional fields - let path = join!(FIXTURES_PROC, "mountinfo"); - - let mnt_info = MountInfo::load_cpu(path, CgroupVersion::V2).unwrap(); - - assert_eq!(mnt_info.root, "/"); - assert_eq!(mnt_info.mount_point, "/sys/fs/cgroup"); - } - - #[test] - fn test_load_subsys() { - let path = join!(FIXTURES_PROC, "cgroup"); - - let subsys = Subsys::load_cpu(path).unwrap(); - - assert_eq!(subsys.base, "/"); - assert_eq!(subsys.version, CgroupVersion::V2); - } - - #[test] - fn test_load_subsys_multi() { - let path = join!(FIXTURES_PROC, "cgroup_multi"); - - let subsys = Subsys::load_cpu(path).unwrap(); - - assert_eq!(subsys.base, "/"); - assert_eq!(subsys.version, CgroupVersion::V1); - } - - #[test] - fn test_cgroup_mount() { - let cases = &[ - ("/", "/sys/fs/cgroup/cpu", "/", Some("/sys/fs/cgroup/cpu")), - ( - "/docker/01abcd", - "/sys/fs/cgroup/cpu", - "/docker/01abcd", - Some("/sys/fs/cgroup/cpu"), - ), - ( - "/docker/01abcd", - "/sys/fs/cgroup/cpu", - "/docker/01abcd/", - Some("/sys/fs/cgroup/cpu"), - ), - ( - "/docker/01abcd", - "/sys/fs/cgroup/cpu", - "/docker/01abcd/large", - Some("/sys/fs/cgroup/cpu/large"), - ), - // fails - ("/docker/01abcd", "/sys/fs/cgroup/cpu", "/", None), - ("/docker/01abcd", "/sys/fs/cgroup/cpu", "/docker", None), - ("/docker/01abcd", "/sys/fs/cgroup/cpu", "/elsewhere", None), - ( - "/docker/01abcd", - "/sys/fs/cgroup/cpu", - "/docker/01abcd-other-dir", - None, - ), - ]; - - for &(root, mount_point, subsys, expected) in cases.iter() { - let mnt_info = MountInfo { - version: CgroupVersion::V1, - root: root.into(), - mount_point: mount_point.into(), - }; - let subsys = Subsys { - version: CgroupVersion::V1, - base: subsys.into(), - }; - - let actual = Cgroup::translate(mnt_info, subsys).map(|c| c.base); - let expected = expected.map(PathBuf::from); - assert_eq!(actual, expected); - } - } - - #[test] - fn test_cgroup_cpu_quota() { - let cgroup = Cgroup::new(CgroupVersion::V2, join!(FIXTURES_CGROUPS, "good")); - assert_eq!(cgroup.cpu_quota(), Some(6)); - } - - #[test] - fn test_cgroup_cpu_quota_divide_by_zero() { - let cgroup = Cgroup::new(CgroupVersion::V2, join!(FIXTURES_CGROUPS, "zero-period")); - let period = cgroup.max().map(|max| max.1); - - assert_eq!(period, Some(0)); - assert_eq!(cgroup.cpu_quota(), None); - } - - #[test] - fn test_cgroup_cpu_quota_ceil() { - let cgroup = Cgroup::new(CgroupVersion::V2, join!(FIXTURES_CGROUPS, "ceil")); - assert_eq!(cgroup.cpu_quota(), Some(2)); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/object/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/object/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/object/Cargo.toml s390-tools-2.33.1/rust-vendor/object/Cargo.toml --- s390-tools-2.31.0/rust-vendor/object/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,163 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.60" -name = "object" -version = "0.32.1" -exclude = [ - "/.github", - "/testfiles", -] -description = "A unified interface for reading and writing object file formats." -readme = "README.md" -keywords = [ - "object", - "elf", - "mach-o", - "pe", - "coff", -] -license = "Apache-2.0 OR MIT" -repository = "https://github.com/gimli-rs/object" -resolver = "2" - -[package.metadata.docs.rs] -features = ["doc"] - -[dependencies.alloc] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-alloc" - -[dependencies.compiler_builtins] -version = "0.1.2" -optional = true - -[dependencies.core] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-core" - -[dependencies.crc32fast] -version = "1.2" -optional = true -default-features = false - -[dependencies.flate2] -version = "1" -optional = true - -[dependencies.hashbrown] -version = "0.14.0" -features = ["ahash"] -optional = true -default-features = false - -[dependencies.indexmap] -version = "2.0" -optional = true -default-features = false - -[dependencies.memchr] -version = "2.4.1" -default-features = false - -[dependencies.ruzstd] -version = "0.4.0" -optional = true - -[dependencies.wasmparser] -version = "0.110.0" -optional = true - -[features] -all = [ - "read", - "write", - "std", - "compression", - "wasm", -] -archive = [] -cargo-all = [] -coff = [] -compression = [ - "dep:flate2", - "dep:ruzstd", - "std", -] -default = [ - "read", - "compression", -] -doc = [ - "read_core", - "write_std", - "std", - "compression", - "archive", - "coff", - "elf", - "macho", - "pe", - "wasm", - "xcoff", -] -elf = [] -macho = [] -pe = ["coff"] -read = [ - "read_core", - "archive", - "coff", - "elf", - "macho", - "pe", - "xcoff", - "unaligned", -] -read_core = [] -rustc-dep-of-std = [ - "core", - "compiler_builtins", - "alloc", - "memchr/rustc-dep-of-std", -] -std = ["memchr/std"] -unaligned = [] -unstable = [] -unstable-all = [ - "all", - "unstable", -] -wasm = ["dep:wasmparser"] -write = [ - "write_std", - "coff", - "elf", - "macho", - "pe", - "xcoff", -] -write_core = [ - "dep:crc32fast", - "dep:indexmap", - "dep:hashbrown", -] -write_std = [ - "write_core", - "std", - "indexmap?/std", - "crc32fast?/std", -] -xcoff = [] diff -Nru s390-tools-2.31.0/rust-vendor/object/CHANGELOG.md s390-tools-2.33.1/rust-vendor/object/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/object/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,690 +0,0 @@ -# `object` Change Log - --------------------------------------------------------------------------------- - -## 0.32.1 - -Released 2023/09/03. - -### Added - -* Added `write::Object::set_macho_cpu_subtype`. - [#574](https://github.com/gimli-rs/object/pull/574) - --------------------------------------------------------------------------------- - -## 0.32.0 - -Released 2023/08/12. - -### Breaking changes - -* Changed `read::elf::Note::name` to exclude all trailing null bytes. - [#549](https://github.com/gimli-rs/object/pull/549) - -* Updated dependencies, and changed some optional dependencies to use the `dep:` - feature syntax. - [#558](https://github.com/gimli-rs/object/pull/558) - [#569](https://github.com/gimli-rs/object/pull/569) - -### Changed - -* The minimum supported rust version for the `read` feature and its dependencies - has changed to 1.60.0. - -* The minimum supported rust version for other features has changed to 1.65.0. - -* Changed many definitions from `static` to `const`. - [#549](https://github.com/gimli-rs/object/pull/549) - -* Fixed Mach-O section alignment padding in `write::Object`. - [#553](https://github.com/gimli-rs/object/pull/553) - -* Changed `read::File` to an enum. - [#564](https://github.com/gimli-rs/object/pull/564) - -### Added - -* Added `elf::ELF_NOTE_GO`, `elf::NT_GO_BUILD_ID`, and `read::elf::Note::name_bytes`. - [#549](https://github.com/gimli-rs/object/pull/549) - -* Added `read::FileKind::CoffImport` and `read::coff::ImportFile`. - [#555](https://github.com/gimli-rs/object/pull/555) - [#556](https://github.com/gimli-rs/object/pull/556) - -* Added `Architecture::Csky` and basic ELF support for C-SKY. - [#561](https://github.com/gimli-rs/object/pull/561) - -* Added `read::elf::ElfSymbol::raw_symbol`. - [#562](https://github.com/gimli-rs/object/pull/562) - --------------------------------------------------------------------------------- - -## 0.30.4 - -Released 2023/06/05. - -### Changed - -* Fixed Mach-O section alignment padding in `write::Object`. - [#553](https://github.com/gimli-rs/object/pull/553) - --------------------------------------------------------------------------------- - -## 0.31.1 - -Released 2023/05/09. - -### Changed - -* Fixed address for global symbols in `read::wasm`. - [#539](https://github.com/gimli-rs/object/pull/539) - -* Fixed writing of alignment for empty ELF sections. - [#540](https://github.com/gimli-rs/object/pull/540) - -### Added - -* Added more `elf::GNU_PROPERTY_*` definitions. - Added `read::elf::note::gnu_properties`, `write::StandardSection::GnuProperty`, - and `write::Object::add_elf_gnu_property_u32`. - [#537](https://github.com/gimli-rs/object/pull/537) - [#541](https://github.com/gimli-rs/object/pull/541) - -* Added Mach-O support for `Architecture::Aarch64_Ilp32`. - [#542](https://github.com/gimli-rs/object/pull/542) - [#545](https://github.com/gimli-rs/object/pull/545) - -* Added `Architecture::Wasm64`. - [#543](https://github.com/gimli-rs/object/pull/543) - --------------------------------------------------------------------------------- - -## 0.31.0 - -Released 2023/04/14. - -### Breaking changes - -* Added a type parameter on existing COFF types to support reading COFF `/bigobj` files. - [#502](https://github.com/gimli-rs/object/pull/502) - -* Changed PE symbols to support COFF `/bigobj`. - Changed `pe::IMAGE_SYM_*` to `i32`. - Changed `pe::ImageSymbolEx::section_number` to `I32Bytes`. - Deleted a number of methods from `pe::ImageSymbol`. - Use the `read::pe::ImageSymbol` trait instead. - [#502](https://github.com/gimli-rs/object/pull/502) - -* Changed `pe::Guid` to a single array, and added methods to read the individual fields. - [#502](https://github.com/gimli-rs/object/pull/502) - -* Added `Symbol` type parameter to `SymbolFlags` to support `SymbolFlags::Xcoff`. - [#527](https://github.com/gimli-rs/object/pull/527) - -### Changed - -* Fix alignment when reserving zero length sections in `write::elf::Write::reserve`. - [#514](https://github.com/gimli-rs/object/pull/514) - -* Validate command size in `read::macho::LoadCommandIterator`. - [#516](https://github.com/gimli-rs/object/pull/516) - -* Handle invalid alignment in `read::macho::MachoSection::align`. - [#516](https://github.com/gimli-rs/object/pull/516) - -* Accept `SymbolKind::Unknown` in `write::Object::macho_write`. - [#519](https://github.com/gimli-rs/object/pull/519) - -* Updated `wasmparser` dependency. - [#528](https://github.com/gimli-rs/object/pull/528) - -### Added - -* Added more `elf::EF_RISCV_*` definitions. - [#507](https://github.com/gimli-rs/object/pull/507) - -* Added `read::elf::SectionHeader::gnu_attributes` and associated types. - Added `.gnu.attributes` support to `write::elf::Writer`. - [#509](https://github.com/gimli-rs/object/pull/509) - [#525](https://github.com/gimli-rs/object/pull/525) - -* Added `write::Object::set_macho_build_version`. - [#524](https://github.com/gimli-rs/object/pull/524) - -* Added `read::FileKind::Xcoff32`, `read::FileKind::Xcoff64`, `read::XcoffFile`, - and associated types. - Added XCOFF support to `write::Object`. - [#469](https://github.com/gimli-rs/object/pull/469) - [#476](https://github.com/gimli-rs/object/pull/476) - [#477](https://github.com/gimli-rs/object/pull/477) - [#482](https://github.com/gimli-rs/object/pull/482) - [#484](https://github.com/gimli-rs/object/pull/484) - [#486](https://github.com/gimli-rs/object/pull/486) - [#527](https://github.com/gimli-rs/object/pull/527) - -* Added `read::FileKind::CoffBig`, `read::pe::CoffHeader` and `read::pe::ImageSymbol`. - [#502](https://github.com/gimli-rs/object/pull/502) - -* Added `elf::PT_GNU_PROPERTY`. - [#530](https://github.com/gimli-rs/object/pull/530) - -* Added `elf::ELFCOMPRESS_ZSTD`, `read::CompressionFormat::Zstandard`, - and Zstandard decompression in `read::CompressedData::decompress` using - the `ruzstd` crate. - [#532](https://github.com/gimli-rs/object/pull/532) - -* Added `read::elf::NoteIterator::new`. - [#533](https://github.com/gimli-rs/object/pull/533) - --------------------------------------------------------------------------------- - -## 0.30.3 - -Released 2023/01/23. - -### Added - -* Added `SectionKind::ReadOnlyDataWithRel` for writing. - [#504](https://github.com/gimli-rs/object/pull/504) - --------------------------------------------------------------------------------- - -## 0.30.2 - -Released 2023/01/11. - -### Added - -* Added more ELF constants for AVR flags and relocations. - [#500](https://github.com/gimli-rs/object/pull/500) - --------------------------------------------------------------------------------- - -## 0.30.1 - -Released 2023/01/04. - -### Changed - -* Changed `read::ElfSymbol::kind` to handle `STT_NOTYPE` and `STT_GNU_IFUNC`. - [#498](https://github.com/gimli-rs/object/pull/498) - -### Added - -* Added `read::CoffSymbol::raw_symbol`. - [#494](https://github.com/gimli-rs/object/pull/494) - -* Added ELF support for Solana Binary Format. - [#491](https://github.com/gimli-rs/object/pull/491) - -* Added ELF support for AArch64 ILP32. - [#497](https://github.com/gimli-rs/object/pull/497) - --------------------------------------------------------------------------------- - -## 0.30.0 - -Released 2022/11/22. - -### Breaking changes - -* The minimum supported rust version for the `read` feature has changed to 1.52.0. - [#458](https://github.com/gimli-rs/object/pull/458) - -* The minimum supported rust version for the `write` feature has changed to 1.61.0. - -* Fixed endian handling in `read::elf::SymbolTable::shndx`. - [#458](https://github.com/gimli-rs/object/pull/458) - -* Fixed endian handling in `read::pe::ResourceName`. - [#458](https://github.com/gimli-rs/object/pull/458) - -* Changed definitions for LoongArch ELF header flags. - [#483](https://github.com/gimli-rs/object/pull/483) - -### Changed - -* Fixed parsing of multiple debug directory entries in `read::pe::PeFile::pdb_info`. - [#451](https://github.com/gimli-rs/object/pull/451) - -* Changed the section name used when writing COFF stub symbols. - [#475](https://github.com/gimli-rs/object/pull/475) - -### Added - -* Added `read::pe::DataDirectories::delay_load_import_table`. - [#448](https://github.com/gimli-rs/object/pull/448) - -* Added `read::macho::LoadCommandData::raw_data`. - [#449](https://github.com/gimli-rs/object/pull/449) - -* Added ELF relocations for LoongArch ps ABI v2. - [#450](https://github.com/gimli-rs/object/pull/450) - -* Added PowerPC support for Mach-O. - [#460](https://github.com/gimli-rs/object/pull/460) - -* Added support for reading the AIX big archive format. - [#462](https://github.com/gimli-rs/object/pull/462) - [#467](https://github.com/gimli-rs/object/pull/467) - [#473](https://github.com/gimli-rs/object/pull/473) - -* Added support for `RelocationEncoding::AArch64Call` when writing Mach-O files. - [#465](https://github.com/gimli-rs/object/pull/465) - -* Added support for `RelocationKind::Relative` when writing RISC-V ELF files. - [#470](https://github.com/gimli-rs/object/pull/470) - -* Added Xtensa architecture support for ELF. - [#481](https://github.com/gimli-rs/object/pull/481) - -* Added `read::pe::ResourceName::raw_data`. - [#487](https://github.com/gimli-rs/object/pull/487) - --------------------------------------------------------------------------------- - -## 0.29.0 - -Released 2022/06/22. - -### Breaking changes - -* The `write` feature now has a minimum supported rust version of 1.56.1. - [#444](https://github.com/gimli-rs/object/pull/444) - -* Added `os_abi` and `abi_version` fields to `FileFlags::Elf`. - [#438](https://github.com/gimli-rs/object/pull/438) - [#441](https://github.com/gimli-rs/object/pull/441) - -### Changed - -* Fixed handling of empty symbol tables in `read::elf::ElfFile::symbol_table` and - `read::elf::ElfFile::dynamic_symbol_table`. - [#443](https://github.com/gimli-rs/object/pull/443) - -### Added - -* Added more `ELF_OSABI_*` constants. - [#439](https://github.com/gimli-rs/object/pull/439) - --------------------------------------------------------------------------------- - -## 0.28.4 - -Released 2022/05/09. - -### Added - -* Added `read::pe::DataDirectories::resource_directory`. - [#425](https://github.com/gimli-rs/object/pull/425) - [#427](https://github.com/gimli-rs/object/pull/427) - -* Added PE support for more ARM relocations. - [#428](https://github.com/gimli-rs/object/pull/428) - -* Added support for `Architecture::LoongArch64`. - [#430](https://github.com/gimli-rs/object/pull/430) - [#432](https://github.com/gimli-rs/object/pull/432) - -* Added `elf::EF_MIPS_ABI` and associated constants. - [#433](https://github.com/gimli-rs/object/pull/433) - --------------------------------------------------------------------------------- - -## 0.28.3 - -Released 2022/01/19. - -### Changed - -* For the Mach-O support in `write::Object`, accept `RelocationKind::MachO` for all - architectures, and accept `RelocationKind::Absolute` for ARM64. - [#422](https://github.com/gimli-rs/object/pull/422) - -### Added - -* Added `pe::ImageDataDirectory::file_range`, `read::pe::SectionTable::pe_file_range_at` - and `pe::ImageSectionHeader::pe_file_range_at`. - [#421](https://github.com/gimli-rs/object/pull/421) - -* Added `write::Object::add_coff_exports`. - [#423](https://github.com/gimli-rs/object/pull/423) - --------------------------------------------------------------------------------- - -## 0.28.2 - -Released 2022/01/09. - -### Changed - -* Ignored errors for the Wasm extended name section in `read::WasmFile::parse`. - [#408](https://github.com/gimli-rs/object/pull/408) - -* Ignored errors for the COFF symbol table in `read::PeFile::parse`. - [#410](https://github.com/gimli-rs/object/pull/410) - -* Fixed handling of `SectionFlags::Coff` in `write::Object::coff_write`. - [#412](https://github.com/gimli-rs/object/pull/412) - -### Added - -* Added `read::ObjectSegment::flags`. - [#416](https://github.com/gimli-rs/object/pull/416) - [#418](https://github.com/gimli-rs/object/pull/418) - --------------------------------------------------------------------------------- - -## 0.28.1 - -Released 2021/12/12. - -### Changed - -* Fixed `read::elf::SymbolTable::shndx_section`. - [#405](https://github.com/gimli-rs/object/pull/405) - -* Fixed build warnings. - [#405](https://github.com/gimli-rs/object/pull/405) - [#406](https://github.com/gimli-rs/object/pull/406) - --------------------------------------------------------------------------------- - -## 0.28.0 - -Released 2021/12/12. - -### Breaking changes - -* `write_core` feature no longer enables `std` support. Use `write_std` instead. - [#400](https://github.com/gimli-rs/object/pull/400) - -* Multiple changes related to Mach-O split dyld cache support. - [#398](https://github.com/gimli-rs/object/pull/398) - -### Added - -* Added `write::pe::Writer::write_file_align`. - [#397](https://github.com/gimli-rs/object/pull/397) - -* Added support for Mach-O split dyld cache. - [#398](https://github.com/gimli-rs/object/pull/398) - -* Added support for `IMAGE_SCN_LNK_NRELOC_OVFL` when reading and writing COFF. - [#399](https://github.com/gimli-rs/object/pull/399) - -* Added `write::elf::Writer::reserve_null_symbol_index`. - [#402](https://github.com/gimli-rs/object/pull/402) - --------------------------------------------------------------------------------- - -## 0.27.1 - -Released 2021/10/22. - -### Changed - -* Fixed build error with older Rust versions due to cargo resolver version. - --------------------------------------------------------------------------------- - -## 0.27.0 - -Released 2021/10/17. - -### Breaking changes - -* Changed `read::elf` to use `SectionIndex` instead of `usize` in more places. - [#341](https://github.com/gimli-rs/object/pull/341) - -* Changed some `read::elf` section methods to additionally return the linked section index. - [#341](https://github.com/gimli-rs/object/pull/341) - -* Changed `read::pe::ImageNtHeaders::parse` to return `DataDirectories` instead of a slice. - [#357](https://github.com/gimli-rs/object/pull/357) - -* Deleted `value` parameter for `write:WritableBuffer::resize`. - [#369](https://github.com/gimli-rs/object/pull/369) - -* Changed `write::Object` and `write::Section` to use `Cow` for section data. - This added a lifetime parameter, which existing users can set to `'static`. - [#370](https://github.com/gimli-rs/object/pull/370) - -### Changed - -* Fixed parsing when PE import directory has zero size. - [#341](https://github.com/gimli-rs/object/pull/341) - -* Fixed parsing when PE import directory has zero for original first thunk. - [#385](https://github.com/gimli-rs/object/pull/385) - [#387](https://github.com/gimli-rs/object/pull/387) - -* Fixed parsing when PE export directory has zero number of names. - [#353](https://github.com/gimli-rs/object/pull/353) - -* Fixed parsing when PE export directory has zero number of names and addresses. - [#362](https://github.com/gimli-rs/object/pull/362) - -* Fixed parsing when PE sections are contiguous. - [#354](https://github.com/gimli-rs/object/pull/354) - -* Fixed `std` feature for `indexmap` dependency. - [#374](https://github.com/gimli-rs/object/pull/374) - -* Fixed overflow in COFF section name offset parsing. - [#390](https://github.com/gimli-rs/object/pull/390) - -### Added - -* Added `name_bytes` methods to unified `read` traits. - [#351](https://github.com/gimli-rs/object/pull/351) - -* Added `read::Object::kind`. - [#352](https://github.com/gimli-rs/object/pull/352) - -* Added `read::elf::VersionTable` and related helpers. - [#341](https://github.com/gimli-rs/object/pull/341) - -* Added `read::elf::SectionTable::dynamic` and related helpers. - [#345](https://github.com/gimli-rs/object/pull/345) - -* Added `read::coff::SectionTable::max_section_file_offset`. - [#344](https://github.com/gimli-rs/object/pull/344) - -* Added `read::pe::ExportTable` and related helpers. - [#349](https://github.com/gimli-rs/object/pull/349) - [#353](https://github.com/gimli-rs/object/pull/353) - -* Added `read::pe::ImportTable` and related helpers. - [#357](https://github.com/gimli-rs/object/pull/357) - -* Added `read::pe::DataDirectories` and related helpers. - [#357](https://github.com/gimli-rs/object/pull/357) - [#384](https://github.com/gimli-rs/object/pull/384) - -* Added `read::pe::RichHeaderInfo` and related helpers. - [#375](https://github.com/gimli-rs/object/pull/375) - [#379](https://github.com/gimli-rs/object/pull/379) - -* Added `read::pe::RelocationBlocks` and related helpers. - [#378](https://github.com/gimli-rs/object/pull/378) - -* Added `write::elf::Writer`. - [#350](https://github.com/gimli-rs/object/pull/350) - -* Added `write::pe::Writer`. - [#382](https://github.com/gimli-rs/object/pull/382) - [#388](https://github.com/gimli-rs/object/pull/388) - -* Added `write::Section::data/data_mut`. - [#367](https://github.com/gimli-rs/object/pull/367) - -* Added `write::Object::write_stream`. - [#369](https://github.com/gimli-rs/object/pull/369) - -* Added MIPSr6 ELF header flag definitions. - [#372](https://github.com/gimli-rs/object/pull/372) - --------------------------------------------------------------------------------- - -## 0.26.2 - -Released 2021/08/28. - -### Added - -* Added support for 64-bit symbol table names to `read::archive`. - [#366](https://github.com/gimli-rs/object/pull/366) - --------------------------------------------------------------------------------- - -## 0.26.1 - -Released 2021/08/19. - -### Changed - -* Activate `memchr`'s `rustc-dep-of-std` feature - [#356](https://github.com/gimli-rs/object/pull/356) - --------------------------------------------------------------------------------- - -## 0.26.0 - -Released 2021/07/26. - -### Breaking changes - -* Changed `ReadRef::read_bytes_at_until` to accept a range parameter. - [#326](https://github.com/gimli-rs/object/pull/326) - -* Added `ReadRef` type parameter to `read::StringTable` and types that - contain it. String table entries are now only read as required. - [#326](https://github.com/gimli-rs/object/pull/326) - -* Changed result type of `read::elf::SectionHeader::data` and `data_as_array`. - [#332](https://github.com/gimli-rs/object/pull/332) - -* Moved `pod::WritableBuffer` to `write::WritableBuffer`. - Renamed `WritableBuffer::extend` to `write_bytes`. - Added more provided methods to `WritableBuffer`. - [#335](https://github.com/gimli-rs/object/pull/335) - -* Moved `pod::Bytes` to `read::Bytes`. - [#336](https://github.com/gimli-rs/object/pull/336) - -* Added `is_mips64el` parameter to `elf::Rela64::r_info/set_r_info`. - [#337](https://github.com/gimli-rs/object/pull/337) - -### Changed - -* Removed `alloc` dependency when no features are enabled. - [#336](https://github.com/gimli-rs/object/pull/336) - -### Added - -* Added `read::pe::PeFile` methods: `section_table`, `data_directory`, and `data`. - [#324](https://github.com/gimli-rs/object/pull/324) - -* Added more ELF definitions. - [#332](https://github.com/gimli-rs/object/pull/332) - -* Added `read::elf::SectionTable` methods for hash tables and symbol version - information. - [#332](https://github.com/gimli-rs/object/pull/332) - -* Added PE RISC-V definitions. - [#333](https://github.com/gimli-rs/object/pull/333) - -* Added `WritableBuffer` implementation for `Vec`. - [#335](https://github.com/gimli-rs/object/pull/335) - --------------------------------------------------------------------------------- - -## 0.25.3 - -Released 2021/06/12. - -### Added - -* Added `RelocationEncoding::AArch64Call`. - [#322](https://github.com/gimli-rs/object/pull/322) - --------------------------------------------------------------------------------- - -## 0.25.2 - -Released 2021/06/04. - -### Added - -* Added `Architecture::X86_64_X32`. - [#320](https://github.com/gimli-rs/object/pull/320) - --------------------------------------------------------------------------------- - -## 0.25.1 - -Released 2021/06/03. - -### Changed - -* write: Fix choice of `SHT_REL` or `SHT_RELA` for most architectures. - [#318](https://github.com/gimli-rs/object/pull/318) - -* write: Fix relocation encoding for MIPS64EL. - [#318](https://github.com/gimli-rs/object/pull/318) - --------------------------------------------------------------------------------- - -## 0.25.0 - -Released 2021/06/02. - -### Breaking changes - -* Added `non_exhaustive` to most public enums. - [#306](https://github.com/gimli-rs/object/pull/306) - -* `MachHeader::parse` and `MachHeader::load_commands` now require a header offset. - [#304](https://github.com/gimli-rs/object/pull/304) - -* Added `ReadRef::read_bytes_at_until`. - [#308](https://github.com/gimli-rs/object/pull/308) - -* `PeFile::entry`, `PeSection::address` and `PeSegment::address` now return a - virtual address instead of a RVA. - [#315](https://github.com/gimli-rs/object/pull/315) - -### Added - -* Added `pod::from_bytes_mut`, `pod::slice_from_bytes_mut`, `pod::bytes_of_mut`, - and `pod::bytes_of_slice_mut`. - [#296](https://github.com/gimli-rs/object/pull/296) - [#297](https://github.com/gimli-rs/object/pull/297) - -* Added `Object::pdb_info`. - [#298](https://github.com/gimli-rs/object/pull/298) - -* Added `read::macho::DyldCache`, other associated definitions, - and support for these in the examples. - [#308](https://github.com/gimli-rs/object/pull/308) - -* Added more architecture support. - [#303](https://github.com/gimli-rs/object/pull/303) - [#309](https://github.com/gimli-rs/object/pull/309) - -* Derive more traits for enums. - [#311](https://github.com/gimli-rs/object/pull/311) - -* Added `Object::relative_address_base`. - [#315](https://github.com/gimli-rs/object/pull/315) - -### Changed - -* Improved performance for string parsing. - [#302](https://github.com/gimli-rs/object/pull/302) - -* `objdump` example allows selecting container members. - [#308](https://github.com/gimli-rs/object/pull/308) diff -Nru s390-tools-2.31.0/rust-vendor/object/clippy.toml s390-tools-2.33.1/rust-vendor/object/clippy.toml --- s390-tools-2.31.0/rust-vendor/object/clippy.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/clippy.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -msrv = "1.60.0" diff -Nru s390-tools-2.31.0/rust-vendor/object/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/object/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/object/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/object/LICENSE-MIT s390-tools-2.33.1/rust-vendor/object/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/object/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2015 The Gimli Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/object/README.md s390-tools-2.33.1/rust-vendor/object/README.md --- s390-tools-2.31.0/rust-vendor/object/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,58 +0,0 @@ -# `object` - -The `object` crate provides a unified interface to working with object files -across platforms. It supports reading relocatable object files and executable files, -and writing COFF/ELF/Mach-O/XCOFF relocatable object files and ELF/PE executable files. - -For reading files, it provides multiple levels of support: - -* raw struct definitions suitable for zero copy access -* low level APIs for accessing the raw structs ([example](crates/examples/src/readobj/)) -* a higher level unified API for accessing common features of object files, such - as sections and symbols ([example](crates/examples/src/objdump.rs)) - -Supported file formats: ELF, Mach-O, Windows PE/COFF, Wasm, XCOFF, and Unix archive. - -## Example for unified read API -```rust -use object::{Object, ObjectSection}; -use std::error::Error; -use std::fs; - -/// Reads a file and displays the content of the ".boot" section. -fn main() -> Result<(), Box> { - let bin_data = fs::read("./multiboot2-binary.elf")?; - let obj_file = object::File::parse(&*bin_data)?; - if let Some(section) = obj_file.section_by_name(".boot") { - println!("{:#x?}", section.data()?); - } else { - eprintln!("section not available"); - } - Ok(()) -} -``` - -See [`crates/examples`](crates/examples) for more examples. - -## Minimum Supported Rust Version (MSRV) - -Changes to MSRV are considered breaking changes. We are conservative about changing the MSRV, -but sometimes are required to due to dependencies. The MSRV is: - - * 1.60.0 for the `read` feature and its dependencies. - * 1.65.0 for other features. - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([`LICENSE-APACHE`](./LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([`LICENSE-MIT`](./LICENSE-MIT) or https://opensource.org/licenses/MIT) - -at your option. - -## Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/object/src/archive.rs s390-tools-2.33.1/rust-vendor/object/src/archive.rs --- s390-tools-2.31.0/rust-vendor/object/src/archive.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/archive.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,91 +0,0 @@ -//! Archive definitions. -//! -//! These definitions are independent of read/write support, although we do implement -//! some traits useful for those. - -use crate::pod::Pod; - -/// File identification bytes stored at the beginning of the file. -pub const MAGIC: [u8; 8] = *b"!\n"; - -/// File identification bytes at the beginning of AIX big archive. -pub const AIX_BIG_MAGIC: [u8; 8] = *b"\n"; - -/// File identification bytes stored at the beginning of a thin archive. -/// -/// A thin archive only contains a symbol table and file names. -pub const THIN_MAGIC: [u8; 8] = *b"!\n"; - -/// The terminator for each archive member header. -pub const TERMINATOR: [u8; 2] = *b"`\n"; - -/// The header at the start of an archive member. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Header { - /// The file name. - pub name: [u8; 16], - /// File modification timestamp in decimal. - pub date: [u8; 12], - /// User ID in decimal. - pub uid: [u8; 6], - /// Group ID in decimal. - pub gid: [u8; 6], - /// File mode in octal. - pub mode: [u8; 8], - /// File size in decimal. - pub size: [u8; 10], - /// Must be equal to `TERMINATOR`. - pub terminator: [u8; 2], -} - -/// The header at the start of an AIX big archive member, without name. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct AixHeader { - /// File member size in decimal. - pub size: [u8; 20], - /// Next member offset in decimal. - pub nxtmem: [u8; 20], - /// Previous member offset in decimal. - pub prvmem: [u8; 20], - /// File member date in decimal. - pub date: [u8; 12], - /// File member user id in decimal. - pub uid: [u8; 12], - /// File member group id in decimal. - pub gid: [u8; 12], - /// File member mode in octal. - pub mode: [u8; 12], - /// File member name length in decimal. - pub namlen: [u8; 4], -} - -/// The AIX big archive's fixed length header at file beginning. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct AixFileHeader { - /// Archive magic string. - pub magic: [u8; 8], - /// Offset of member table. - pub memoff: [u8; 20], - /// Offset of global symbol table. - pub gstoff: [u8; 20], - /// Offset of global symbol table for 64-bit objects. - pub gst64off: [u8; 20], - /// Offset of first member. - pub fstmoff: [u8; 20], - /// Offset of last member. - pub lstmoff: [u8; 20], - /// Offset of first member on free list. - pub freeoff: [u8; 20], -} - -/// Offset of a member in an AIX big archive. -/// -/// This is used in the member index. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct AixMemberOffset(pub [u8; 20]); - -unsafe_impl_pod!(Header, AixHeader, AixFileHeader, AixMemberOffset,); diff -Nru s390-tools-2.31.0/rust-vendor/object/src/common.rs s390-tools-2.33.1/rust-vendor/object/src/common.rs --- s390-tools-2.31.0/rust-vendor/object/src/common.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/common.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,501 +0,0 @@ -/// A CPU architecture. -#[allow(missing_docs)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum Architecture { - Unknown, - Aarch64, - #[allow(non_camel_case_types)] - Aarch64_Ilp32, - Arm, - Avr, - Bpf, - Csky, - I386, - X86_64, - #[allow(non_camel_case_types)] - X86_64_X32, - Hexagon, - LoongArch64, - Mips, - Mips64, - Msp430, - PowerPc, - PowerPc64, - Riscv32, - Riscv64, - S390x, - Sbf, - Sparc64, - Wasm32, - Wasm64, - Xtensa, -} - -impl Architecture { - /// The size of an address value for this architecture. - /// - /// Returns `None` for unknown architectures. - pub fn address_size(self) -> Option { - match self { - Architecture::Unknown => None, - Architecture::Aarch64 => Some(AddressSize::U64), - Architecture::Aarch64_Ilp32 => Some(AddressSize::U32), - Architecture::Arm => Some(AddressSize::U32), - Architecture::Avr => Some(AddressSize::U8), - Architecture::Bpf => Some(AddressSize::U64), - Architecture::Csky => Some(AddressSize::U32), - Architecture::I386 => Some(AddressSize::U32), - Architecture::X86_64 => Some(AddressSize::U64), - Architecture::X86_64_X32 => Some(AddressSize::U32), - Architecture::Hexagon => Some(AddressSize::U32), - Architecture::LoongArch64 => Some(AddressSize::U64), - Architecture::Mips => Some(AddressSize::U32), - Architecture::Mips64 => Some(AddressSize::U64), - Architecture::Msp430 => Some(AddressSize::U16), - Architecture::PowerPc => Some(AddressSize::U32), - Architecture::PowerPc64 => Some(AddressSize::U64), - Architecture::Riscv32 => Some(AddressSize::U32), - Architecture::Riscv64 => Some(AddressSize::U64), - Architecture::S390x => Some(AddressSize::U64), - Architecture::Sbf => Some(AddressSize::U64), - Architecture::Sparc64 => Some(AddressSize::U64), - Architecture::Wasm32 => Some(AddressSize::U32), - Architecture::Wasm64 => Some(AddressSize::U64), - Architecture::Xtensa => Some(AddressSize::U32), - } - } -} - -/// The size of an address value for an architecture. -/// -/// This may differ from the address size supported by the file format (such as for COFF). -#[allow(missing_docs)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -#[repr(u8)] -pub enum AddressSize { - U8 = 1, - U16 = 2, - U32 = 4, - U64 = 8, -} - -impl AddressSize { - /// The size in bytes of an address value. - #[inline] - pub fn bytes(self) -> u8 { - self as u8 - } -} - -/// A binary file format. -#[allow(missing_docs)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum BinaryFormat { - Coff, - Elf, - MachO, - Pe, - Wasm, - Xcoff, -} - -/// The kind of a section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum SectionKind { - /// The section kind is unknown. - Unknown, - /// An executable code section. - /// - /// Example ELF sections: `.text` - /// - /// Example Mach-O sections: `__TEXT/__text` - Text, - /// A data section. - /// - /// Example ELF sections: `.data` - /// - /// Example Mach-O sections: `__DATA/__data` - Data, - /// A read only data section. - /// - /// Example ELF sections: `.rodata` - /// - /// Example Mach-O sections: `__TEXT/__const`, `__DATA/__const`, `__TEXT/__literal4` - ReadOnlyData, - /// A read only data section with relocations. - /// - /// This is the same as either `Data` or `ReadOnlyData`, depending on the file format. - /// This value is only used in the API for writing files. It is never returned when reading files. - ReadOnlyDataWithRel, - /// A loadable string section. - /// - /// Example ELF sections: `.rodata.str` - /// - /// Example Mach-O sections: `__TEXT/__cstring` - ReadOnlyString, - /// An uninitialized data section. - /// - /// Example ELF sections: `.bss` - /// - /// Example Mach-O sections: `__DATA/__bss` - UninitializedData, - /// An uninitialized common data section. - /// - /// Example Mach-O sections: `__DATA/__common` - Common, - /// A TLS data section. - /// - /// Example ELF sections: `.tdata` - /// - /// Example Mach-O sections: `__DATA/__thread_data` - Tls, - /// An uninitialized TLS data section. - /// - /// Example ELF sections: `.tbss` - /// - /// Example Mach-O sections: `__DATA/__thread_bss` - UninitializedTls, - /// A TLS variables section. - /// - /// This contains TLS variable structures, rather than the variable initializers. - /// - /// Example Mach-O sections: `__DATA/__thread_vars` - TlsVariables, - /// A non-loadable string section. - /// - /// Example ELF sections: `.comment`, `.debug_str` - OtherString, - /// Some other non-loadable section. - /// - /// Example ELF sections: `.debug_info` - Other, - /// Debug information. - /// - /// Example Mach-O sections: `__DWARF/__debug_info` - Debug, - /// Information for the linker. - /// - /// Example COFF sections: `.drectve` - Linker, - /// ELF note section. - Note, - /// Metadata such as symbols or relocations. - /// - /// Example ELF sections: `.symtab`, `.strtab`, `.group` - Metadata, - /// Some other ELF section type. - /// - /// This is the `sh_type` field in the section header. - /// The meaning may be dependent on the architecture. - Elf(u32), -} - -impl SectionKind { - /// Return true if this section contains zerofill data. - pub fn is_bss(self) -> bool { - self == SectionKind::UninitializedData - || self == SectionKind::UninitializedTls - || self == SectionKind::Common - } -} - -/// The selection kind for a COMDAT section group. -/// -/// This determines the way in which the linker resolves multiple definitions of the COMDAT -/// sections. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum ComdatKind { - /// The selection kind is unknown. - Unknown, - /// Multiple definitions are allowed. - /// - /// An arbitrary definition is selected, and the rest are removed. - /// - /// This is the only supported selection kind for ELF. - Any, - /// Multiple definitions are not allowed. - /// - /// This is used to group sections without allowing duplicates. - NoDuplicates, - /// Multiple definitions must have the same size. - /// - /// An arbitrary definition is selected, and the rest are removed. - SameSize, - /// Multiple definitions must match exactly. - /// - /// An arbitrary definition is selected, and the rest are removed. - ExactMatch, - /// Multiple definitions are allowed, and the largest is selected. - /// - /// An arbitrary definition with the largest size is selected, and the rest are removed. - Largest, - /// Multiple definitions are allowed, and the newest is selected. - Newest, -} - -/// The kind of a symbol. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum SymbolKind { - /// The symbol kind is unknown. - Unknown, - /// The symbol is a null placeholder. - Null, - /// The symbol is for executable code. - Text, - /// The symbol is for a data object. - Data, - /// The symbol is for a section. - Section, - /// The symbol is the name of a file. It precedes symbols within that file. - File, - /// The symbol is for a code label. - Label, - /// The symbol is for a thread local storage entity. - Tls, -} - -/// A symbol scope. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub enum SymbolScope { - /// Unknown scope. - Unknown, - /// Symbol is visible to the compilation unit. - Compilation, - /// Symbol is visible to the static linkage unit. - Linkage, - /// Symbol is visible to dynamically linked objects. - Dynamic, -} - -/// The operation used to calculate the result of the relocation. -/// -/// The relocation descriptions use the following definitions. Note that -/// these definitions probably don't match any ELF ABI. -/// -/// * A - The value of the addend. -/// * G - The address of the symbol's entry within the global offset table. -/// * L - The address of the symbol's entry within the procedure linkage table. -/// * P - The address of the place of the relocation. -/// * S - The address of the symbol. -/// * GotBase - The address of the global offset table. -/// * Image - The base address of the image. -/// * Section - The address of the section containing the symbol. -/// -/// 'XxxRelative' means 'Xxx + A - P'. 'XxxOffset' means 'S + A - Xxx'. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum RelocationKind { - /// S + A - Absolute, - /// S + A - P - Relative, - /// G + A - GotBase - Got, - /// G + A - P - GotRelative, - /// GotBase + A - P - GotBaseRelative, - /// S + A - GotBase - GotBaseOffset, - /// L + A - P - PltRelative, - /// S + A - Image - ImageOffset, - /// S + A - Section - SectionOffset, - /// The index of the section containing the symbol. - SectionIndex, - /// Some other ELF relocation. The value is dependent on the architecture. - Elf(u32), - /// Some other Mach-O relocation. The value is dependent on the architecture. - MachO { - /// The relocation type. - value: u8, - /// Whether the relocation is relative to the place. - relative: bool, - }, - /// Some other COFF relocation. The value is dependent on the architecture. - Coff(u16), - /// Some other XCOFF relocation. - Xcoff(u8), -} - -/// Information about how the result of the relocation operation is encoded in the place. -/// -/// This is usually architecture specific, such as specifying an addressing mode or -/// a specific instruction. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum RelocationEncoding { - /// Generic encoding. - Generic, - - /// x86 sign extension at runtime. - /// - /// Used with `RelocationKind::Absolute`. - X86Signed, - /// x86 rip-relative addressing. - /// - /// The `RelocationKind` must be PC relative. - X86RipRelative, - /// x86 rip-relative addressing in movq instruction. - /// - /// The `RelocationKind` must be PC relative. - X86RipRelativeMovq, - /// x86 branch instruction. - /// - /// The `RelocationKind` must be PC relative. - X86Branch, - - /// s390x PC-relative offset shifted right by one bit. - /// - /// The `RelocationKind` must be PC relative. - S390xDbl, - - /// AArch64 call target. - /// - /// The `RelocationKind` must be PC relative. - AArch64Call, - - /// LoongArch branch offset with two trailing zeros. - /// - /// The `RelocationKind` must be PC relative. - LoongArchBranch, -} - -/// File flags that are specific to each file format. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum FileFlags { - /// No file flags. - None, - /// ELF file flags. - Elf { - /// `os_abi` field in the ELF file header. - os_abi: u8, - /// `abi_version` field in the ELF file header. - abi_version: u8, - /// `e_flags` field in the ELF file header. - e_flags: u32, - }, - /// Mach-O file flags. - MachO { - /// `flags` field in the Mach-O file header. - flags: u32, - }, - /// COFF file flags. - Coff { - /// `Characteristics` field in the COFF file header. - characteristics: u16, - }, - /// XCOFF file flags. - Xcoff { - /// `f_flags` field in the XCOFF file header. - f_flags: u16, - }, -} - -/// Segment flags that are specific to each file format. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum SegmentFlags { - /// No segment flags. - None, - /// ELF segment flags. - Elf { - /// `p_flags` field in the segment header. - p_flags: u32, - }, - /// Mach-O segment flags. - MachO { - /// `flags` field in the segment header. - flags: u32, - /// `maxprot` field in the segment header. - maxprot: u32, - /// `initprot` field in the segment header. - initprot: u32, - }, - /// COFF segment flags. - Coff { - /// `Characteristics` field in the segment header. - characteristics: u32, - }, -} - -/// Section flags that are specific to each file format. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum SectionFlags { - /// No section flags. - None, - /// ELF section flags. - Elf { - /// `sh_flags` field in the section header. - sh_flags: u64, - }, - /// Mach-O section flags. - MachO { - /// `flags` field in the section header. - flags: u32, - }, - /// COFF section flags. - Coff { - /// `Characteristics` field in the section header. - characteristics: u32, - }, - /// XCOFF section flags. - Xcoff { - /// `s_flags` field in the section header. - s_flags: u32, - }, -} - -/// Symbol flags that are specific to each file format. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum SymbolFlags { - /// No symbol flags. - None, - /// ELF symbol flags. - Elf { - /// `st_info` field in the ELF symbol. - st_info: u8, - /// `st_other` field in the ELF symbol. - st_other: u8, - }, - /// Mach-O symbol flags. - MachO { - /// `n_desc` field in the Mach-O symbol. - n_desc: u16, - }, - /// COFF flags for a section symbol. - CoffSection { - /// `Selection` field in the auxiliary symbol for the section. - selection: u8, - /// `Number` field in the auxiliary symbol for the section. - associative_section: Option
, - }, - /// XCOFF symbol flags. - Xcoff { - /// `n_sclass` field in the XCOFF symbol. - n_sclass: u8, - /// `x_smtyp` field in the CSECT auxiliary symbol. - /// - /// Only valid if `n_sclass` is `C_EXT`, `C_WEAKEXT`, or `C_HIDEXT`. - x_smtyp: u8, - /// `x_smclas` field in the CSECT auxiliary symbol. - /// - /// Only valid if `n_sclass` is `C_EXT`, `C_WEAKEXT`, or `C_HIDEXT`. - x_smclas: u8, - /// The containing csect for the symbol. - /// - /// Only valid if `x_smtyp` is `XTY_LD`. - containing_csect: Option, - }, -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/elf.rs s390-tools-2.33.1/rust-vendor/object/src/elf.rs --- s390-tools-2.31.0/rust-vendor/object/src/elf.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/elf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,6157 +0,0 @@ -//! ELF definitions. -//! -//! These definitions are independent of read/write support, although we do implement -//! some traits useful for those. -//! -//! This module is the equivalent of /usr/include/elf.h, and is based heavily on it. - -#![allow(missing_docs)] -#![allow(clippy::identity_op)] - -use crate::endian::{Endian, U32Bytes, U64Bytes, I32, I64, U16, U32, U64}; -use crate::pod::Pod; - -/// The header at the start of every 32-bit ELF file. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FileHeader32 { - /// Magic number and other information. - pub e_ident: Ident, - /// Object file type. One of the `ET_*` constants. - pub e_type: U16, - /// Architecture. One of the `EM_*` constants. - pub e_machine: U16, - /// Object file version. Must be `EV_CURRENT`. - pub e_version: U32, - /// Entry point virtual address. - pub e_entry: U32, - /// Program header table file offset. - pub e_phoff: U32, - /// Section header table file offset. - pub e_shoff: U32, - /// Processor-specific flags. - /// - /// A combination of the `EF_*` constants. - pub e_flags: U32, - /// Size in bytes of this header. - pub e_ehsize: U16, - /// Program header table entry size. - pub e_phentsize: U16, - /// Program header table entry count. - /// - /// If the count is greater than or equal to `PN_XNUM` then this field is set to - /// `PN_XNUM` and the count is stored in the `sh_info` field of section 0. - pub e_phnum: U16, - /// Section header table entry size. - pub e_shentsize: U16, - /// Section header table entry count. - /// - /// If the count is greater than or equal to `SHN_LORESERVE` then this field is set to - /// `0` and the count is stored in the `sh_size` field of section 0. - /// first section header. - pub e_shnum: U16, - /// Section header string table index. - /// - /// If the index is greater than or equal to `SHN_LORESERVE` then this field is set to - /// `SHN_XINDEX` and the index is stored in the `sh_link` field of section 0. - pub e_shstrndx: U16, -} - -/// The header at the start of every 64-bit ELF file. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FileHeader64 { - /// Magic number and other information. - pub e_ident: Ident, - /// Object file type. One of the `ET_*` constants. - pub e_type: U16, - /// Architecture. One of the `EM_*` constants. - pub e_machine: U16, - /// Object file version. Must be `EV_CURRENT`. - pub e_version: U32, - /// Entry point virtual address. - pub e_entry: U64, - /// Program header table file offset. - pub e_phoff: U64, - /// Section header table file offset. - pub e_shoff: U64, - /// Processor-specific flags. - /// - /// A combination of the `EF_*` constants. - pub e_flags: U32, - /// Size in bytes of this header. - pub e_ehsize: U16, - /// Program header table entry size. - pub e_phentsize: U16, - /// Program header table entry count. - /// - /// If the count is greater than or equal to `PN_XNUM` then this field is set to - /// `PN_XNUM` and the count is stored in the `sh_info` field of section 0. - pub e_phnum: U16, - /// Section header table entry size. - pub e_shentsize: U16, - /// Section header table entry count. - /// - /// If the count is greater than or equal to `SHN_LORESERVE` then this field is set to - /// `0` and the count is stored in the `sh_size` field of section 0. - /// first section header. - pub e_shnum: U16, - /// Section header string table index. - /// - /// If the index is greater than or equal to `SHN_LORESERVE` then this field is set to - /// `SHN_XINDEX` and the index is stored in the `sh_link` field of section 0. - pub e_shstrndx: U16, -} - -/// Magic number and other information. -/// -/// Contained in the file header. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Ident { - /// Magic number. Must be `ELFMAG`. - pub magic: [u8; 4], - /// File class. One of the `ELFCLASS*` constants. - pub class: u8, - /// Data encoding. One of the `ELFDATA*` constants. - pub data: u8, - /// ELF version. Must be `EV_CURRENT`. - pub version: u8, - /// OS ABI identification. One of the `ELFOSABI*` constants. - pub os_abi: u8, - /// ABI version. - /// - /// The meaning of this field depends on the `os_abi` value. - pub abi_version: u8, - /// Padding bytes. - pub padding: [u8; 7], -} - -/// File identification bytes stored in `Ident::magic`. -pub const ELFMAG: [u8; 4] = [0x7f, b'E', b'L', b'F']; - -// Values for `Ident::class`. -/// Invalid class. -pub const ELFCLASSNONE: u8 = 0; -/// 32-bit object. -pub const ELFCLASS32: u8 = 1; -/// 64-bit object. -pub const ELFCLASS64: u8 = 2; - -// Values for `Ident::data`. -/// Invalid data encoding. -pub const ELFDATANONE: u8 = 0; -/// 2's complement, little endian. -pub const ELFDATA2LSB: u8 = 1; -/// 2's complement, big endian. -pub const ELFDATA2MSB: u8 = 2; - -// Values for `Ident::os_abi`. -/// UNIX System V ABI. -pub const ELFOSABI_NONE: u8 = 0; -/// UNIX System V ABI. -/// -/// Alias. -pub const ELFOSABI_SYSV: u8 = 0; -/// HP-UX. -pub const ELFOSABI_HPUX: u8 = 1; -/// NetBSD. -pub const ELFOSABI_NETBSD: u8 = 2; -/// Object uses GNU ELF extensions. -pub const ELFOSABI_GNU: u8 = 3; -/// Object uses GNU ELF extensions. -/// -/// Compatibility alias. -pub const ELFOSABI_LINUX: u8 = ELFOSABI_GNU; -/// GNU/Hurd. -pub const ELFOSABI_HURD: u8 = 4; -/// Sun Solaris. -pub const ELFOSABI_SOLARIS: u8 = 6; -/// IBM AIX. -pub const ELFOSABI_AIX: u8 = 7; -/// SGI Irix. -pub const ELFOSABI_IRIX: u8 = 8; -/// FreeBSD. -pub const ELFOSABI_FREEBSD: u8 = 9; -/// Compaq TRU64 UNIX. -pub const ELFOSABI_TRU64: u8 = 10; -/// Novell Modesto. -pub const ELFOSABI_MODESTO: u8 = 11; -/// OpenBSD. -pub const ELFOSABI_OPENBSD: u8 = 12; -/// OpenVMS. -pub const ELFOSABI_OPENVMS: u8 = 13; -/// Hewlett-Packard Non-Stop Kernel. -pub const ELFOSABI_NSK: u8 = 14; -/// AROS -pub const ELFOSABI_AROS: u8 = 15; -/// FenixOS -pub const ELFOSABI_FENIXOS: u8 = 16; -/// Nuxi CloudABI -pub const ELFOSABI_CLOUDABI: u8 = 17; -/// ARM EABI. -pub const ELFOSABI_ARM_AEABI: u8 = 64; -/// ARM. -pub const ELFOSABI_ARM: u8 = 97; -/// Standalone (embedded) application. -pub const ELFOSABI_STANDALONE: u8 = 255; - -// Values for `FileHeader*::e_type`. -/// No file type. -pub const ET_NONE: u16 = 0; -/// Relocatable file. -pub const ET_REL: u16 = 1; -/// Executable file. -pub const ET_EXEC: u16 = 2; -/// Shared object file. -pub const ET_DYN: u16 = 3; -/// Core file. -pub const ET_CORE: u16 = 4; -/// OS-specific range start. -pub const ET_LOOS: u16 = 0xfe00; -/// OS-specific range end. -pub const ET_HIOS: u16 = 0xfeff; -/// Processor-specific range start. -pub const ET_LOPROC: u16 = 0xff00; -/// Processor-specific range end. -pub const ET_HIPROC: u16 = 0xffff; - -// Values for `FileHeader*::e_machine`. -/// No machine -pub const EM_NONE: u16 = 0; -/// AT&T WE 32100 -pub const EM_M32: u16 = 1; -/// SUN SPARC -pub const EM_SPARC: u16 = 2; -/// Intel 80386 -pub const EM_386: u16 = 3; -/// Motorola m68k family -pub const EM_68K: u16 = 4; -/// Motorola m88k family -pub const EM_88K: u16 = 5; -/// Intel MCU -pub const EM_IAMCU: u16 = 6; -/// Intel 80860 -pub const EM_860: u16 = 7; -/// MIPS R3000 big-endian -pub const EM_MIPS: u16 = 8; -/// IBM System/370 -pub const EM_S370: u16 = 9; -/// MIPS R3000 little-endian -pub const EM_MIPS_RS3_LE: u16 = 10; -/// HPPA -pub const EM_PARISC: u16 = 15; -/// Fujitsu VPP500 -pub const EM_VPP500: u16 = 17; -/// Sun's "v8plus" -pub const EM_SPARC32PLUS: u16 = 18; -/// Intel 80960 -pub const EM_960: u16 = 19; -/// PowerPC -pub const EM_PPC: u16 = 20; -/// PowerPC 64-bit -pub const EM_PPC64: u16 = 21; -/// IBM S390 -pub const EM_S390: u16 = 22; -/// IBM SPU/SPC -pub const EM_SPU: u16 = 23; -/// NEC V800 series -pub const EM_V800: u16 = 36; -/// Fujitsu FR20 -pub const EM_FR20: u16 = 37; -/// TRW RH-32 -pub const EM_RH32: u16 = 38; -/// Motorola RCE -pub const EM_RCE: u16 = 39; -/// ARM -pub const EM_ARM: u16 = 40; -/// Digital Alpha -pub const EM_FAKE_ALPHA: u16 = 41; -/// Hitachi SH -pub const EM_SH: u16 = 42; -/// SPARC v9 64-bit -pub const EM_SPARCV9: u16 = 43; -/// Siemens Tricore -pub const EM_TRICORE: u16 = 44; -/// Argonaut RISC Core -pub const EM_ARC: u16 = 45; -/// Hitachi H8/300 -pub const EM_H8_300: u16 = 46; -/// Hitachi H8/300H -pub const EM_H8_300H: u16 = 47; -/// Hitachi H8S -pub const EM_H8S: u16 = 48; -/// Hitachi H8/500 -pub const EM_H8_500: u16 = 49; -/// Intel Merced -pub const EM_IA_64: u16 = 50; -/// Stanford MIPS-X -pub const EM_MIPS_X: u16 = 51; -/// Motorola Coldfire -pub const EM_COLDFIRE: u16 = 52; -/// Motorola M68HC12 -pub const EM_68HC12: u16 = 53; -/// Fujitsu MMA Multimedia Accelerator -pub const EM_MMA: u16 = 54; -/// Siemens PCP -pub const EM_PCP: u16 = 55; -/// Sony nCPU embeeded RISC -pub const EM_NCPU: u16 = 56; -/// Denso NDR1 microprocessor -pub const EM_NDR1: u16 = 57; -/// Motorola Start*Core processor -pub const EM_STARCORE: u16 = 58; -/// Toyota ME16 processor -pub const EM_ME16: u16 = 59; -/// STMicroelectronic ST100 processor -pub const EM_ST100: u16 = 60; -/// Advanced Logic Corp. Tinyj emb.fam -pub const EM_TINYJ: u16 = 61; -/// AMD x86-64 architecture -pub const EM_X86_64: u16 = 62; -/// Sony DSP Processor -pub const EM_PDSP: u16 = 63; -/// Digital PDP-10 -pub const EM_PDP10: u16 = 64; -/// Digital PDP-11 -pub const EM_PDP11: u16 = 65; -/// Siemens FX66 microcontroller -pub const EM_FX66: u16 = 66; -/// STMicroelectronics ST9+ 8/16 mc -pub const EM_ST9PLUS: u16 = 67; -/// STmicroelectronics ST7 8 bit mc -pub const EM_ST7: u16 = 68; -/// Motorola MC68HC16 microcontroller -pub const EM_68HC16: u16 = 69; -/// Motorola MC68HC11 microcontroller -pub const EM_68HC11: u16 = 70; -/// Motorola MC68HC08 microcontroller -pub const EM_68HC08: u16 = 71; -/// Motorola MC68HC05 microcontroller -pub const EM_68HC05: u16 = 72; -/// Silicon Graphics SVx -pub const EM_SVX: u16 = 73; -/// STMicroelectronics ST19 8 bit mc -pub const EM_ST19: u16 = 74; -/// Digital VAX -pub const EM_VAX: u16 = 75; -/// Axis Communications 32-bit emb.proc -pub const EM_CRIS: u16 = 76; -/// Infineon Technologies 32-bit emb.proc -pub const EM_JAVELIN: u16 = 77; -/// Element 14 64-bit DSP Processor -pub const EM_FIREPATH: u16 = 78; -/// LSI Logic 16-bit DSP Processor -pub const EM_ZSP: u16 = 79; -/// Donald Knuth's educational 64-bit proc -pub const EM_MMIX: u16 = 80; -/// Harvard University machine-independent object files -pub const EM_HUANY: u16 = 81; -/// SiTera Prism -pub const EM_PRISM: u16 = 82; -/// Atmel AVR 8-bit microcontroller -pub const EM_AVR: u16 = 83; -/// Fujitsu FR30 -pub const EM_FR30: u16 = 84; -/// Mitsubishi D10V -pub const EM_D10V: u16 = 85; -/// Mitsubishi D30V -pub const EM_D30V: u16 = 86; -/// NEC v850 -pub const EM_V850: u16 = 87; -/// Mitsubishi M32R -pub const EM_M32R: u16 = 88; -/// Matsushita MN10300 -pub const EM_MN10300: u16 = 89; -/// Matsushita MN10200 -pub const EM_MN10200: u16 = 90; -/// picoJava -pub const EM_PJ: u16 = 91; -/// OpenRISC 32-bit embedded processor -pub const EM_OPENRISC: u16 = 92; -/// ARC International ARCompact -pub const EM_ARC_COMPACT: u16 = 93; -/// Tensilica Xtensa Architecture -pub const EM_XTENSA: u16 = 94; -/// Alphamosaic VideoCore -pub const EM_VIDEOCORE: u16 = 95; -/// Thompson Multimedia General Purpose Proc -pub const EM_TMM_GPP: u16 = 96; -/// National Semi. 32000 -pub const EM_NS32K: u16 = 97; -/// Tenor Network TPC -pub const EM_TPC: u16 = 98; -/// Trebia SNP 1000 -pub const EM_SNP1K: u16 = 99; -/// STMicroelectronics ST200 -pub const EM_ST200: u16 = 100; -/// Ubicom IP2xxx -pub const EM_IP2K: u16 = 101; -/// MAX processor -pub const EM_MAX: u16 = 102; -/// National Semi. CompactRISC -pub const EM_CR: u16 = 103; -/// Fujitsu F2MC16 -pub const EM_F2MC16: u16 = 104; -/// Texas Instruments msp430 -pub const EM_MSP430: u16 = 105; -/// Analog Devices Blackfin DSP -pub const EM_BLACKFIN: u16 = 106; -/// Seiko Epson S1C33 family -pub const EM_SE_C33: u16 = 107; -/// Sharp embedded microprocessor -pub const EM_SEP: u16 = 108; -/// Arca RISC -pub const EM_ARCA: u16 = 109; -/// PKU-Unity & MPRC Peking Uni. mc series -pub const EM_UNICORE: u16 = 110; -/// eXcess configurable cpu -pub const EM_EXCESS: u16 = 111; -/// Icera Semi. Deep Execution Processor -pub const EM_DXP: u16 = 112; -/// Altera Nios II -pub const EM_ALTERA_NIOS2: u16 = 113; -/// National Semi. CompactRISC CRX -pub const EM_CRX: u16 = 114; -/// Motorola XGATE -pub const EM_XGATE: u16 = 115; -/// Infineon C16x/XC16x -pub const EM_C166: u16 = 116; -/// Renesas M16C -pub const EM_M16C: u16 = 117; -/// Microchip Technology dsPIC30F -pub const EM_DSPIC30F: u16 = 118; -/// Freescale Communication Engine RISC -pub const EM_CE: u16 = 119; -/// Renesas M32C -pub const EM_M32C: u16 = 120; -/// Altium TSK3000 -pub const EM_TSK3000: u16 = 131; -/// Freescale RS08 -pub const EM_RS08: u16 = 132; -/// Analog Devices SHARC family -pub const EM_SHARC: u16 = 133; -/// Cyan Technology eCOG2 -pub const EM_ECOG2: u16 = 134; -/// Sunplus S+core7 RISC -pub const EM_SCORE7: u16 = 135; -/// New Japan Radio (NJR) 24-bit DSP -pub const EM_DSP24: u16 = 136; -/// Broadcom VideoCore III -pub const EM_VIDEOCORE3: u16 = 137; -/// RISC for Lattice FPGA -pub const EM_LATTICEMICO32: u16 = 138; -/// Seiko Epson C17 -pub const EM_SE_C17: u16 = 139; -/// Texas Instruments TMS320C6000 DSP -pub const EM_TI_C6000: u16 = 140; -/// Texas Instruments TMS320C2000 DSP -pub const EM_TI_C2000: u16 = 141; -/// Texas Instruments TMS320C55x DSP -pub const EM_TI_C5500: u16 = 142; -/// Texas Instruments App. Specific RISC -pub const EM_TI_ARP32: u16 = 143; -/// Texas Instruments Prog. Realtime Unit -pub const EM_TI_PRU: u16 = 144; -/// STMicroelectronics 64bit VLIW DSP -pub const EM_MMDSP_PLUS: u16 = 160; -/// Cypress M8C -pub const EM_CYPRESS_M8C: u16 = 161; -/// Renesas R32C -pub const EM_R32C: u16 = 162; -/// NXP Semi. TriMedia -pub const EM_TRIMEDIA: u16 = 163; -/// QUALCOMM Hexagon -pub const EM_HEXAGON: u16 = 164; -/// Intel 8051 and variants -pub const EM_8051: u16 = 165; -/// STMicroelectronics STxP7x -pub const EM_STXP7X: u16 = 166; -/// Andes Tech. compact code emb. RISC -pub const EM_NDS32: u16 = 167; -/// Cyan Technology eCOG1X -pub const EM_ECOG1X: u16 = 168; -/// Dallas Semi. MAXQ30 mc -pub const EM_MAXQ30: u16 = 169; -/// New Japan Radio (NJR) 16-bit DSP -pub const EM_XIMO16: u16 = 170; -/// M2000 Reconfigurable RISC -pub const EM_MANIK: u16 = 171; -/// Cray NV2 vector architecture -pub const EM_CRAYNV2: u16 = 172; -/// Renesas RX -pub const EM_RX: u16 = 173; -/// Imagination Tech. META -pub const EM_METAG: u16 = 174; -/// MCST Elbrus -pub const EM_MCST_ELBRUS: u16 = 175; -/// Cyan Technology eCOG16 -pub const EM_ECOG16: u16 = 176; -/// National Semi. CompactRISC CR16 -pub const EM_CR16: u16 = 177; -/// Freescale Extended Time Processing Unit -pub const EM_ETPU: u16 = 178; -/// Infineon Tech. SLE9X -pub const EM_SLE9X: u16 = 179; -/// Intel L10M -pub const EM_L10M: u16 = 180; -/// Intel K10M -pub const EM_K10M: u16 = 181; -/// ARM AARCH64 -pub const EM_AARCH64: u16 = 183; -/// Amtel 32-bit microprocessor -pub const EM_AVR32: u16 = 185; -/// STMicroelectronics STM8 -pub const EM_STM8: u16 = 186; -/// Tileta TILE64 -pub const EM_TILE64: u16 = 187; -/// Tilera TILEPro -pub const EM_TILEPRO: u16 = 188; -/// Xilinx MicroBlaze -pub const EM_MICROBLAZE: u16 = 189; -/// NVIDIA CUDA -pub const EM_CUDA: u16 = 190; -/// Tilera TILE-Gx -pub const EM_TILEGX: u16 = 191; -/// CloudShield -pub const EM_CLOUDSHIELD: u16 = 192; -/// KIPO-KAIST Core-A 1st gen. -pub const EM_COREA_1ST: u16 = 193; -/// KIPO-KAIST Core-A 2nd gen. -pub const EM_COREA_2ND: u16 = 194; -/// Synopsys ARCompact V2 -pub const EM_ARC_COMPACT2: u16 = 195; -/// Open8 RISC -pub const EM_OPEN8: u16 = 196; -/// Renesas RL78 -pub const EM_RL78: u16 = 197; -/// Broadcom VideoCore V -pub const EM_VIDEOCORE5: u16 = 198; -/// Renesas 78KOR -pub const EM_78KOR: u16 = 199; -/// Freescale 56800EX DSC -pub const EM_56800EX: u16 = 200; -/// Beyond BA1 -pub const EM_BA1: u16 = 201; -/// Beyond BA2 -pub const EM_BA2: u16 = 202; -/// XMOS xCORE -pub const EM_XCORE: u16 = 203; -/// Microchip 8-bit PIC(r) -pub const EM_MCHP_PIC: u16 = 204; -/// KM211 KM32 -pub const EM_KM32: u16 = 210; -/// KM211 KMX32 -pub const EM_KMX32: u16 = 211; -/// KM211 KMX16 -pub const EM_EMX16: u16 = 212; -/// KM211 KMX8 -pub const EM_EMX8: u16 = 213; -/// KM211 KVARC -pub const EM_KVARC: u16 = 214; -/// Paneve CDP -pub const EM_CDP: u16 = 215; -/// Cognitive Smart Memory Processor -pub const EM_COGE: u16 = 216; -/// Bluechip CoolEngine -pub const EM_COOL: u16 = 217; -/// Nanoradio Optimized RISC -pub const EM_NORC: u16 = 218; -/// CSR Kalimba -pub const EM_CSR_KALIMBA: u16 = 219; -/// Zilog Z80 -pub const EM_Z80: u16 = 220; -/// Controls and Data Services VISIUMcore -pub const EM_VISIUM: u16 = 221; -/// FTDI Chip FT32 -pub const EM_FT32: u16 = 222; -/// Moxie processor -pub const EM_MOXIE: u16 = 223; -/// AMD GPU -pub const EM_AMDGPU: u16 = 224; -/// RISC-V -pub const EM_RISCV: u16 = 243; -/// Linux BPF -- in-kernel virtual machine -pub const EM_BPF: u16 = 247; -/// C-SKY -pub const EM_CSKY: u16 = 252; -/// Loongson LoongArch -pub const EM_LOONGARCH: u16 = 258; -/// Solana Binary Format -pub const EM_SBF: u16 = 263; -/// Digital Alpha -pub const EM_ALPHA: u16 = 0x9026; - -// Values for `FileHeader*::e_version` and `Ident::version`. -/// Invalid ELF version. -pub const EV_NONE: u8 = 0; -/// Current ELF version. -pub const EV_CURRENT: u8 = 1; - -/// Section header. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SectionHeader32 { - /// Section name. - /// - /// This is an offset into the section header string table. - pub sh_name: U32, - /// Section type. One of the `SHT_*` constants. - pub sh_type: U32, - /// Section flags. A combination of the `SHF_*` constants. - pub sh_flags: U32, - /// Section virtual address at execution. - pub sh_addr: U32, - /// Section file offset. - pub sh_offset: U32, - /// Section size in bytes. - pub sh_size: U32, - /// Link to another section. - /// - /// The section relationship depends on the `sh_type` value. - pub sh_link: U32, - /// Additional section information. - /// - /// The meaning of this field depends on the `sh_type` value. - pub sh_info: U32, - /// Section alignment. - pub sh_addralign: U32, - /// Entry size if the section holds a table. - pub sh_entsize: U32, -} - -/// Section header. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SectionHeader64 { - /// Section name. - /// - /// This is an offset into the section header string table. - pub sh_name: U32, - /// Section type. One of the `SHT_*` constants. - pub sh_type: U32, - /// Section flags. A combination of the `SHF_*` constants. - pub sh_flags: U64, - /// Section virtual address at execution. - pub sh_addr: U64, - /// Section file offset. - pub sh_offset: U64, - /// Section size in bytes. - pub sh_size: U64, - /// Link to another section. - /// - /// The section relationship depends on the `sh_type` value. - pub sh_link: U32, - /// Additional section information. - /// - /// The meaning of this field depends on the `sh_type` value. - pub sh_info: U32, - /// Section alignment. - pub sh_addralign: U64, - /// Entry size if the section holds a table. - pub sh_entsize: U64, -} - -// Special values for section indices. -/// Undefined section. -pub const SHN_UNDEF: u16 = 0; -/// OS-specific range start. -/// Start of reserved section indices. -pub const SHN_LORESERVE: u16 = 0xff00; -/// Start of processor-specific section indices. -pub const SHN_LOPROC: u16 = 0xff00; -/// End of processor-specific section indices. -pub const SHN_HIPROC: u16 = 0xff1f; -/// Start of OS-specific section indices. -pub const SHN_LOOS: u16 = 0xff20; -/// End of OS-specific section indices. -pub const SHN_HIOS: u16 = 0xff3f; -/// Associated symbol is absolute. -pub const SHN_ABS: u16 = 0xfff1; -/// Associated symbol is common. -pub const SHN_COMMON: u16 = 0xfff2; -/// Section index is in the `SHT_SYMTAB_SHNDX` section. -pub const SHN_XINDEX: u16 = 0xffff; -/// End of reserved section indices. -pub const SHN_HIRESERVE: u16 = 0xffff; - -// Values for `SectionHeader*::sh_type`. -/// Section header table entry is unused. -pub const SHT_NULL: u32 = 0; -/// Program data. -pub const SHT_PROGBITS: u32 = 1; -/// Symbol table. -pub const SHT_SYMTAB: u32 = 2; -/// String table. -pub const SHT_STRTAB: u32 = 3; -/// Relocation entries with explicit addends. -pub const SHT_RELA: u32 = 4; -/// Symbol hash table. -pub const SHT_HASH: u32 = 5; -/// Dynamic linking information. -pub const SHT_DYNAMIC: u32 = 6; -/// Notes. -pub const SHT_NOTE: u32 = 7; -/// Program space with no data (bss). -pub const SHT_NOBITS: u32 = 8; -/// Relocation entries without explicit addends. -pub const SHT_REL: u32 = 9; -/// Reserved section type. -pub const SHT_SHLIB: u32 = 10; -/// Dynamic linker symbol table. -pub const SHT_DYNSYM: u32 = 11; -/// Array of constructors. -pub const SHT_INIT_ARRAY: u32 = 14; -/// Array of destructors. -pub const SHT_FINI_ARRAY: u32 = 15; -/// Array of pre-constructors. -pub const SHT_PREINIT_ARRAY: u32 = 16; -/// Section group. -pub const SHT_GROUP: u32 = 17; -/// Extended section indices for a symbol table. -pub const SHT_SYMTAB_SHNDX: u32 = 18; -/// Start of OS-specific section types. -pub const SHT_LOOS: u32 = 0x6000_0000; -/// Object attributes. -pub const SHT_GNU_ATTRIBUTES: u32 = 0x6fff_fff5; -/// GNU-style hash table. -pub const SHT_GNU_HASH: u32 = 0x6fff_fff6; -/// Prelink library list -pub const SHT_GNU_LIBLIST: u32 = 0x6fff_fff7; -/// Checksum for DSO content. -pub const SHT_CHECKSUM: u32 = 0x6fff_fff8; -/// Sun-specific low bound. -pub const SHT_LOSUNW: u32 = 0x6fff_fffa; -#[allow(non_upper_case_globals)] -pub const SHT_SUNW_move: u32 = 0x6fff_fffa; -pub const SHT_SUNW_COMDAT: u32 = 0x6fff_fffb; -#[allow(non_upper_case_globals)] -pub const SHT_SUNW_syminfo: u32 = 0x6fff_fffc; -/// Version definition section. -#[allow(non_upper_case_globals)] -pub const SHT_GNU_VERDEF: u32 = 0x6fff_fffd; -/// Version needs section. -#[allow(non_upper_case_globals)] -pub const SHT_GNU_VERNEED: u32 = 0x6fff_fffe; -/// Version symbol table. -#[allow(non_upper_case_globals)] -pub const SHT_GNU_VERSYM: u32 = 0x6fff_ffff; -/// Sun-specific high bound. -pub const SHT_HISUNW: u32 = 0x6fff_ffff; -/// End of OS-specific section types. -pub const SHT_HIOS: u32 = 0x6fff_ffff; -/// Start of processor-specific section types. -pub const SHT_LOPROC: u32 = 0x7000_0000; -/// End of processor-specific section types. -pub const SHT_HIPROC: u32 = 0x7fff_ffff; -/// Start of application-specific section types. -pub const SHT_LOUSER: u32 = 0x8000_0000; -/// End of application-specific section types. -pub const SHT_HIUSER: u32 = 0x8fff_ffff; - -// Values for `SectionHeader*::sh_flags`. -/// Section is writable. -pub const SHF_WRITE: u32 = 1 << 0; -/// Section occupies memory during execution. -pub const SHF_ALLOC: u32 = 1 << 1; -/// Section is executable. -pub const SHF_EXECINSTR: u32 = 1 << 2; -/// Section may be be merged to eliminate duplication. -pub const SHF_MERGE: u32 = 1 << 4; -/// Section contains nul-terminated strings. -pub const SHF_STRINGS: u32 = 1 << 5; -/// The `sh_info` field contains a section header table index. -pub const SHF_INFO_LINK: u32 = 1 << 6; -/// Section has special ordering requirements when combining sections. -pub const SHF_LINK_ORDER: u32 = 1 << 7; -/// Section requires special OS-specific handling. -pub const SHF_OS_NONCONFORMING: u32 = 1 << 8; -/// Section is a member of a group. -pub const SHF_GROUP: u32 = 1 << 9; -/// Section holds thread-local storage. -pub const SHF_TLS: u32 = 1 << 10; -/// Section is compressed. -/// -/// Compressed sections begin with one of the `CompressionHeader*` headers. -pub const SHF_COMPRESSED: u32 = 1 << 11; -/// OS-specific section flags. -pub const SHF_MASKOS: u32 = 0x0ff0_0000; -/// Processor-specific section flags. -pub const SHF_MASKPROC: u32 = 0xf000_0000; -/// This section is excluded from the final executable or shared library. -pub const SHF_EXCLUDE: u32 = 0x8000_0000; - -/// Section compression header. -/// -/// Used when `SHF_COMPRESSED` is set. -/// -/// Note: this type currently allows for misaligned headers, but that may be -/// changed in a future version. -#[derive(Debug, Default, Clone, Copy)] -#[repr(C)] -pub struct CompressionHeader32 { - /// Compression format. One of the `ELFCOMPRESS_*` values. - pub ch_type: U32Bytes, - /// Uncompressed data size. - pub ch_size: U32Bytes, - /// Uncompressed data alignment. - pub ch_addralign: U32Bytes, -} - -/// Section compression header. -/// -/// Used when `SHF_COMPRESSED` is set. -/// -/// Note: this type currently allows for misaligned headers, but that may be -/// changed in a future version. -#[derive(Debug, Default, Clone, Copy)] -#[repr(C)] -pub struct CompressionHeader64 { - /// Compression format. One of the `ELFCOMPRESS_*` values. - pub ch_type: U32Bytes, - /// Reserved. - pub ch_reserved: U32Bytes, - /// Uncompressed data size. - pub ch_size: U64Bytes, - /// Uncompressed data alignment. - pub ch_addralign: U64Bytes, -} - -/// ZLIB/DEFLATE algorithm. -pub const ELFCOMPRESS_ZLIB: u32 = 1; -/// Zstandard algorithm. -pub const ELFCOMPRESS_ZSTD: u32 = 2; -/// Start of OS-specific compression types. -pub const ELFCOMPRESS_LOOS: u32 = 0x6000_0000; -/// End of OS-specific compression types. -pub const ELFCOMPRESS_HIOS: u32 = 0x6fff_ffff; -/// Start of processor-specific compression types. -pub const ELFCOMPRESS_LOPROC: u32 = 0x7000_0000; -/// End of processor-specific compression types. -pub const ELFCOMPRESS_HIPROC: u32 = 0x7fff_ffff; - -// Values for the flag entry for section groups. -/// Mark group as COMDAT. -pub const GRP_COMDAT: u32 = 1; - -/// Symbol table entry. -#[derive(Debug, Default, Clone, Copy)] -#[repr(C)] -pub struct Sym32 { - /// Symbol name. - /// - /// This is an offset into the symbol string table. - pub st_name: U32, - /// Symbol value. - pub st_value: U32, - /// Symbol size. - pub st_size: U32, - /// Symbol type and binding. - /// - /// Use the `st_type` and `st_bind` methods to access this value. - pub st_info: u8, - /// Symbol visibility. - /// - /// Use the `st_visibility` method to access this value. - pub st_other: u8, - /// Section index or one of the `SHN_*` values. - pub st_shndx: U16, -} - -impl Sym32 { - /// Get the `st_bind` component of the `st_info` field. - #[inline] - pub fn st_bind(&self) -> u8 { - self.st_info >> 4 - } - - /// Get the `st_type` component of the `st_info` field. - #[inline] - pub fn st_type(&self) -> u8 { - self.st_info & 0xf - } - - /// Set the `st_info` field given the `st_bind` and `st_type` components. - #[inline] - pub fn set_st_info(&mut self, st_bind: u8, st_type: u8) { - self.st_info = (st_bind << 4) + (st_type & 0xf); - } - - /// Get the `st_visibility` component of the `st_info` field. - #[inline] - pub fn st_visibility(&self) -> u8 { - self.st_other & 0x3 - } -} - -/// Symbol table entry. -#[derive(Debug, Default, Clone, Copy)] -#[repr(C)] -pub struct Sym64 { - /// Symbol name. - /// - /// This is an offset into the symbol string table. - pub st_name: U32, - /// Symbol type and binding. - /// - /// Use the `st_bind` and `st_type` methods to access this value. - pub st_info: u8, - /// Symbol visibility. - /// - /// Use the `st_visibility` method to access this value. - pub st_other: u8, - /// Section index or one of the `SHN_*` values. - pub st_shndx: U16, - /// Symbol value. - pub st_value: U64, - /// Symbol size. - pub st_size: U64, -} - -impl Sym64 { - /// Get the `st_bind` component of the `st_info` field. - #[inline] - pub fn st_bind(&self) -> u8 { - self.st_info >> 4 - } - - /// Get the `st_type` component of the `st_info` field. - #[inline] - pub fn st_type(&self) -> u8 { - self.st_info & 0xf - } - - /// Set the `st_info` field given the `st_bind` and `st_type` components. - #[inline] - pub fn set_st_info(&mut self, st_bind: u8, st_type: u8) { - self.st_info = (st_bind << 4) + (st_type & 0xf); - } - - /// Get the `st_visibility` component of the `st_info` field. - #[inline] - pub fn st_visibility(&self) -> u8 { - self.st_other & 0x3 - } -} - -/// Additional information about a `Sym32`. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Syminfo32 { - /// Direct bindings, symbol bound to. - pub si_boundto: U16, - /// Per symbol flags. - pub si_flags: U16, -} - -/// Additional information about a `Sym64`. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Syminfo64 { - /// Direct bindings, symbol bound to. - pub si_boundto: U16, - /// Per symbol flags. - pub si_flags: U16, -} - -// Values for `Syminfo*::si_boundto`. -/// Symbol bound to self -pub const SYMINFO_BT_SELF: u16 = 0xffff; -/// Symbol bound to parent -pub const SYMINFO_BT_PARENT: u16 = 0xfffe; -/// Beginning of reserved entries -pub const SYMINFO_BT_LOWRESERVE: u16 = 0xff00; - -// Values for `Syminfo*::si_flags`. -/// Direct bound symbol -pub const SYMINFO_FLG_DIRECT: u16 = 0x0001; -/// Pass-thru symbol for translator -pub const SYMINFO_FLG_PASSTHRU: u16 = 0x0002; -/// Symbol is a copy-reloc -pub const SYMINFO_FLG_COPY: u16 = 0x0004; -/// Symbol bound to object to be lazy loaded -pub const SYMINFO_FLG_LAZYLOAD: u16 = 0x0008; - -// Syminfo version values. -pub const SYMINFO_NONE: u16 = 0; -pub const SYMINFO_CURRENT: u16 = 1; -pub const SYMINFO_NUM: u16 = 2; - -// Values for bind component of `Sym*::st_info`. -/// Local symbol. -pub const STB_LOCAL: u8 = 0; -/// Global symbol. -pub const STB_GLOBAL: u8 = 1; -/// Weak symbol. -pub const STB_WEAK: u8 = 2; -/// Start of OS-specific symbol binding. -pub const STB_LOOS: u8 = 10; -/// Unique symbol. -pub const STB_GNU_UNIQUE: u8 = 10; -/// End of OS-specific symbol binding. -pub const STB_HIOS: u8 = 12; -/// Start of processor-specific symbol binding. -pub const STB_LOPROC: u8 = 13; -/// End of processor-specific symbol binding. -pub const STB_HIPROC: u8 = 15; - -// Values for type component of `Sym*::st_info`. -/// Symbol type is unspecified. -pub const STT_NOTYPE: u8 = 0; -/// Symbol is a data object. -pub const STT_OBJECT: u8 = 1; -/// Symbol is a code object. -pub const STT_FUNC: u8 = 2; -/// Symbol is associated with a section. -pub const STT_SECTION: u8 = 3; -/// Symbol's name is a file name. -pub const STT_FILE: u8 = 4; -/// Symbol is a common data object. -pub const STT_COMMON: u8 = 5; -/// Symbol is a thread-local storage object. -pub const STT_TLS: u8 = 6; -/// Start of OS-specific symbol types. -pub const STT_LOOS: u8 = 10; -/// Symbol is an indirect code object. -pub const STT_GNU_IFUNC: u8 = 10; -/// End of OS-specific symbol types. -pub const STT_HIOS: u8 = 12; -/// Start of processor-specific symbol types. -pub const STT_LOPROC: u8 = 13; -/// End of processor-specific symbol types. -pub const STT_HIPROC: u8 = 15; - -// Values for visibility component of `Symbol*::st_other`. -/// Default symbol visibility rules. -pub const STV_DEFAULT: u8 = 0; -/// Processor specific hidden class. -pub const STV_INTERNAL: u8 = 1; -/// Symbol is not visible to other components. -pub const STV_HIDDEN: u8 = 2; -/// Symbol is visible to other components, but is not preemptible. -pub const STV_PROTECTED: u8 = 3; - -/// Relocation table entry without explicit addend. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Rel32 { - /// Relocation address. - pub r_offset: U32, - /// Relocation type and symbol index. - pub r_info: U32, -} - -impl Rel32 { - /// Get the `r_sym` component of the `r_info` field. - #[inline] - pub fn r_sym(&self, endian: E) -> u32 { - self.r_info.get(endian) >> 8 - } - - /// Get the `r_type` component of the `r_info` field. - #[inline] - pub fn r_type(&self, endian: E) -> u32 { - self.r_info.get(endian) & 0xff - } - - /// Calculate the `r_info` field given the `r_sym` and `r_type` components. - pub fn r_info(endian: E, r_sym: u32, r_type: u8) -> U32 { - U32::new(endian, (r_sym << 8) | u32::from(r_type)) - } - - /// Set the `r_info` field given the `r_sym` and `r_type` components. - pub fn set_r_info(&mut self, endian: E, r_sym: u32, r_type: u8) { - self.r_info = Self::r_info(endian, r_sym, r_type) - } -} - -/// Relocation table entry with explicit addend. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Rela32 { - /// Relocation address. - pub r_offset: U32, - /// Relocation type and symbol index. - pub r_info: U32, - /// Explicit addend. - pub r_addend: I32, -} - -impl Rela32 { - /// Get the `r_sym` component of the `r_info` field. - #[inline] - pub fn r_sym(&self, endian: E) -> u32 { - self.r_info.get(endian) >> 8 - } - - /// Get the `r_type` component of the `r_info` field. - #[inline] - pub fn r_type(&self, endian: E) -> u32 { - self.r_info.get(endian) & 0xff - } - - /// Calculate the `r_info` field given the `r_sym` and `r_type` components. - pub fn r_info(endian: E, r_sym: u32, r_type: u8) -> U32 { - U32::new(endian, (r_sym << 8) | u32::from(r_type)) - } - - /// Set the `r_info` field given the `r_sym` and `r_type` components. - pub fn set_r_info(&mut self, endian: E, r_sym: u32, r_type: u8) { - self.r_info = Self::r_info(endian, r_sym, r_type) - } -} - -impl From> for Rela32 { - fn from(rel: Rel32) -> Self { - Rela32 { - r_offset: rel.r_offset, - r_info: rel.r_info, - r_addend: I32::default(), - } - } -} - -/// Relocation table entry without explicit addend. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Rel64 { - /// Relocation address. - pub r_offset: U64, - /// Relocation type and symbol index. - pub r_info: U64, -} - -impl Rel64 { - /// Get the `r_sym` component of the `r_info` field. - #[inline] - pub fn r_sym(&self, endian: E) -> u32 { - (self.r_info.get(endian) >> 32) as u32 - } - - /// Get the `r_type` component of the `r_info` field. - #[inline] - pub fn r_type(&self, endian: E) -> u32 { - (self.r_info.get(endian) & 0xffff_ffff) as u32 - } - - /// Calculate the `r_info` field given the `r_sym` and `r_type` components. - pub fn r_info(endian: E, r_sym: u32, r_type: u32) -> U64 { - U64::new(endian, (u64::from(r_sym) << 32) | u64::from(r_type)) - } - - /// Set the `r_info` field given the `r_sym` and `r_type` components. - pub fn set_r_info(&mut self, endian: E, r_sym: u32, r_type: u32) { - self.r_info = Self::r_info(endian, r_sym, r_type) - } -} - -impl From> for Rela64 { - fn from(rel: Rel64) -> Self { - Rela64 { - r_offset: rel.r_offset, - r_info: rel.r_info, - r_addend: I64::default(), - } - } -} - -/// Relocation table entry with explicit addend. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Rela64 { - /// Relocation address. - pub r_offset: U64, - /// Relocation type and symbol index. - pub r_info: U64, - /// Explicit addend. - pub r_addend: I64, -} - -impl Rela64 { - pub(crate) fn get_r_info(&self, endian: E, is_mips64el: bool) -> u64 { - let mut t = self.r_info.get(endian); - if is_mips64el { - t = (t << 32) - | ((t >> 8) & 0xff000000) - | ((t >> 24) & 0x00ff0000) - | ((t >> 40) & 0x0000ff00) - | ((t >> 56) & 0x000000ff); - } - t - } - - /// Get the `r_sym` component of the `r_info` field. - #[inline] - pub fn r_sym(&self, endian: E, is_mips64el: bool) -> u32 { - (self.get_r_info(endian, is_mips64el) >> 32) as u32 - } - - /// Get the `r_type` component of the `r_info` field. - #[inline] - pub fn r_type(&self, endian: E, is_mips64el: bool) -> u32 { - (self.get_r_info(endian, is_mips64el) & 0xffff_ffff) as u32 - } - - /// Calculate the `r_info` field given the `r_sym` and `r_type` components. - pub fn r_info(endian: E, is_mips64el: bool, r_sym: u32, r_type: u32) -> U64 { - let mut t = (u64::from(r_sym) << 32) | u64::from(r_type); - if is_mips64el { - t = (t >> 32) - | ((t & 0xff000000) << 8) - | ((t & 0x00ff0000) << 24) - | ((t & 0x0000ff00) << 40) - | ((t & 0x000000ff) << 56); - } - U64::new(endian, t) - } - - /// Set the `r_info` field given the `r_sym` and `r_type` components. - pub fn set_r_info(&mut self, endian: E, is_mips64el: bool, r_sym: u32, r_type: u32) { - self.r_info = Self::r_info(endian, is_mips64el, r_sym, r_type); - } -} - -/// Program segment header. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ProgramHeader32 { - /// Segment type. One of the `PT_*` constants. - pub p_type: U32, - /// Segment file offset. - pub p_offset: U32, - /// Segment virtual address. - pub p_vaddr: U32, - /// Segment physical address. - pub p_paddr: U32, - /// Segment size in the file. - pub p_filesz: U32, - /// Segment size in memory. - pub p_memsz: U32, - /// Segment flags. A combination of the `PF_*` constants. - pub p_flags: U32, - /// Segment alignment. - pub p_align: U32, -} - -/// Program segment header. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ProgramHeader64 { - /// Segment type. One of the `PT_*` constants. - pub p_type: U32, - /// Segment flags. A combination of the `PF_*` constants. - pub p_flags: U32, - /// Segment file offset. - pub p_offset: U64, - /// Segment virtual address. - pub p_vaddr: U64, - /// Segment physical address. - pub p_paddr: U64, - /// Segment size in the file. - pub p_filesz: U64, - /// Segment size in memory. - pub p_memsz: U64, - /// Segment alignment. - pub p_align: U64, -} - -/// Special value for `FileHeader*::e_phnum`. -/// -/// This indicates that the real number of program headers is too large to fit into e_phnum. -/// Instead the real value is in the field `sh_info` of section 0. -pub const PN_XNUM: u16 = 0xffff; - -// Values for `ProgramHeader*::p_type`. -/// Program header table entry is unused. -pub const PT_NULL: u32 = 0; -/// Loadable program segment. -pub const PT_LOAD: u32 = 1; -/// Dynamic linking information. -pub const PT_DYNAMIC: u32 = 2; -/// Program interpreter. -pub const PT_INTERP: u32 = 3; -/// Auxiliary information. -pub const PT_NOTE: u32 = 4; -/// Reserved. -pub const PT_SHLIB: u32 = 5; -/// Segment contains the program header table. -pub const PT_PHDR: u32 = 6; -/// Thread-local storage segment. -pub const PT_TLS: u32 = 7; -/// Start of OS-specific segment types. -pub const PT_LOOS: u32 = 0x6000_0000; -/// GCC `.eh_frame_hdr` segment. -pub const PT_GNU_EH_FRAME: u32 = 0x6474_e550; -/// Indicates stack executability. -pub const PT_GNU_STACK: u32 = 0x6474_e551; -/// Read-only after relocation. -pub const PT_GNU_RELRO: u32 = 0x6474_e552; -/// Segment containing `.note.gnu.property` section. -pub const PT_GNU_PROPERTY: u32 = 0x6474_e553; -/// End of OS-specific segment types. -pub const PT_HIOS: u32 = 0x6fff_ffff; -/// Start of processor-specific segment types. -pub const PT_LOPROC: u32 = 0x7000_0000; -/// End of processor-specific segment types. -pub const PT_HIPROC: u32 = 0x7fff_ffff; - -// Values for `ProgramHeader*::p_flags`. -/// Segment is executable. -pub const PF_X: u32 = 1 << 0; -/// Segment is writable. -pub const PF_W: u32 = 1 << 1; -/// Segment is readable. -pub const PF_R: u32 = 1 << 2; -/// OS-specific segment flags. -pub const PF_MASKOS: u32 = 0x0ff0_0000; -/// Processor-specific segment flags. -pub const PF_MASKPROC: u32 = 0xf000_0000; - -/// Note name for core files. -pub const ELF_NOTE_CORE: &[u8] = b"CORE"; -/// Note name for linux core files. -/// -/// Notes in linux core files may also use `ELF_NOTE_CORE`. -pub const ELF_NOTE_LINUX: &[u8] = b"LINUX"; - -// Values for `NoteHeader*::n_type` in core files. -// -/// Contains copy of prstatus struct. -pub const NT_PRSTATUS: u32 = 1; -/// Contains copy of fpregset struct. -pub const NT_PRFPREG: u32 = 2; -/// Contains copy of fpregset struct. -pub const NT_FPREGSET: u32 = 2; -/// Contains copy of prpsinfo struct. -pub const NT_PRPSINFO: u32 = 3; -/// Contains copy of prxregset struct. -pub const NT_PRXREG: u32 = 4; -/// Contains copy of task structure. -pub const NT_TASKSTRUCT: u32 = 4; -/// String from sysinfo(SI_PLATFORM). -pub const NT_PLATFORM: u32 = 5; -/// Contains copy of auxv array. -pub const NT_AUXV: u32 = 6; -/// Contains copy of gwindows struct. -pub const NT_GWINDOWS: u32 = 7; -/// Contains copy of asrset struct. -pub const NT_ASRS: u32 = 8; -/// Contains copy of pstatus struct. -pub const NT_PSTATUS: u32 = 10; -/// Contains copy of psinfo struct. -pub const NT_PSINFO: u32 = 13; -/// Contains copy of prcred struct. -pub const NT_PRCRED: u32 = 14; -/// Contains copy of utsname struct. -pub const NT_UTSNAME: u32 = 15; -/// Contains copy of lwpstatus struct. -pub const NT_LWPSTATUS: u32 = 16; -/// Contains copy of lwpinfo struct. -pub const NT_LWPSINFO: u32 = 17; -/// Contains copy of fprxregset struct. -pub const NT_PRFPXREG: u32 = 20; -/// Contains copy of siginfo_t, size might increase. -pub const NT_SIGINFO: u32 = 0x5349_4749; -/// Contains information about mapped files. -pub const NT_FILE: u32 = 0x4649_4c45; -/// Contains copy of user_fxsr_struct. -pub const NT_PRXFPREG: u32 = 0x46e6_2b7f; -/// PowerPC Altivec/VMX registers. -pub const NT_PPC_VMX: u32 = 0x100; -/// PowerPC SPE/EVR registers. -pub const NT_PPC_SPE: u32 = 0x101; -/// PowerPC VSX registers. -pub const NT_PPC_VSX: u32 = 0x102; -/// Target Address Register. -pub const NT_PPC_TAR: u32 = 0x103; -/// Program Priority Register. -pub const NT_PPC_PPR: u32 = 0x104; -/// Data Stream Control Register. -pub const NT_PPC_DSCR: u32 = 0x105; -/// Event Based Branch Registers. -pub const NT_PPC_EBB: u32 = 0x106; -/// Performance Monitor Registers. -pub const NT_PPC_PMU: u32 = 0x107; -/// TM checkpointed GPR Registers. -pub const NT_PPC_TM_CGPR: u32 = 0x108; -/// TM checkpointed FPR Registers. -pub const NT_PPC_TM_CFPR: u32 = 0x109; -/// TM checkpointed VMX Registers. -pub const NT_PPC_TM_CVMX: u32 = 0x10a; -/// TM checkpointed VSX Registers. -pub const NT_PPC_TM_CVSX: u32 = 0x10b; -/// TM Special Purpose Registers. -pub const NT_PPC_TM_SPR: u32 = 0x10c; -/// TM checkpointed Target Address Register. -pub const NT_PPC_TM_CTAR: u32 = 0x10d; -/// TM checkpointed Program Priority Register. -pub const NT_PPC_TM_CPPR: u32 = 0x10e; -/// TM checkpointed Data Stream Control Register. -pub const NT_PPC_TM_CDSCR: u32 = 0x10f; -/// Memory Protection Keys registers. -pub const NT_PPC_PKEY: u32 = 0x110; -/// i386 TLS slots (struct user_desc). -pub const NT_386_TLS: u32 = 0x200; -/// x86 io permission bitmap (1=deny). -pub const NT_386_IOPERM: u32 = 0x201; -/// x86 extended state using xsave. -pub const NT_X86_XSTATE: u32 = 0x202; -/// s390 upper register halves. -pub const NT_S390_HIGH_GPRS: u32 = 0x300; -/// s390 timer register. -pub const NT_S390_TIMER: u32 = 0x301; -/// s390 TOD clock comparator register. -pub const NT_S390_TODCMP: u32 = 0x302; -/// s390 TOD programmable register. -pub const NT_S390_TODPREG: u32 = 0x303; -/// s390 control registers. -pub const NT_S390_CTRS: u32 = 0x304; -/// s390 prefix register. -pub const NT_S390_PREFIX: u32 = 0x305; -/// s390 breaking event address. -pub const NT_S390_LAST_BREAK: u32 = 0x306; -/// s390 system call restart data. -pub const NT_S390_SYSTEM_CALL: u32 = 0x307; -/// s390 transaction diagnostic block. -pub const NT_S390_TDB: u32 = 0x308; -/// s390 vector registers 0-15 upper half. -pub const NT_S390_VXRS_LOW: u32 = 0x309; -/// s390 vector registers 16-31. -pub const NT_S390_VXRS_HIGH: u32 = 0x30a; -/// s390 guarded storage registers. -pub const NT_S390_GS_CB: u32 = 0x30b; -/// s390 guarded storage broadcast control block. -pub const NT_S390_GS_BC: u32 = 0x30c; -/// s390 runtime instrumentation. -pub const NT_S390_RI_CB: u32 = 0x30d; -/// ARM VFP/NEON registers. -pub const NT_ARM_VFP: u32 = 0x400; -/// ARM TLS register. -pub const NT_ARM_TLS: u32 = 0x401; -/// ARM hardware breakpoint registers. -pub const NT_ARM_HW_BREAK: u32 = 0x402; -/// ARM hardware watchpoint registers. -pub const NT_ARM_HW_WATCH: u32 = 0x403; -/// ARM system call number. -pub const NT_ARM_SYSTEM_CALL: u32 = 0x404; -/// ARM Scalable Vector Extension registers. -pub const NT_ARM_SVE: u32 = 0x405; -/// Vmcore Device Dump Note. -pub const NT_VMCOREDD: u32 = 0x700; -/// MIPS DSP ASE registers. -pub const NT_MIPS_DSP: u32 = 0x800; -/// MIPS floating-point mode. -pub const NT_MIPS_FP_MODE: u32 = 0x801; - -/// Note type for version string. -/// -/// This note may appear in object files. -/// -/// It must be handled as a special case because it has no descriptor, and instead -/// uses the note name as the version string. -pub const NT_VERSION: u32 = 1; - -/// Dynamic section entry. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Dyn32 { - /// Dynamic entry type. - pub d_tag: U32, - /// Value (integer or address). - pub d_val: U32, -} - -/// Dynamic section entry. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Dyn64 { - /// Dynamic entry type. - pub d_tag: U64, - /// Value (integer or address). - pub d_val: U64, -} - -// Values for `Dyn*::d_tag`. - -/// Marks end of dynamic section -pub const DT_NULL: u32 = 0; -/// Name of needed library -pub const DT_NEEDED: u32 = 1; -/// Size in bytes of PLT relocs -pub const DT_PLTRELSZ: u32 = 2; -/// Processor defined value -pub const DT_PLTGOT: u32 = 3; -/// Address of symbol hash table -pub const DT_HASH: u32 = 4; -/// Address of string table -pub const DT_STRTAB: u32 = 5; -/// Address of symbol table -pub const DT_SYMTAB: u32 = 6; -/// Address of Rela relocs -pub const DT_RELA: u32 = 7; -/// Total size of Rela relocs -pub const DT_RELASZ: u32 = 8; -/// Size of one Rela reloc -pub const DT_RELAENT: u32 = 9; -/// Size of string table -pub const DT_STRSZ: u32 = 10; -/// Size of one symbol table entry -pub const DT_SYMENT: u32 = 11; -/// Address of init function -pub const DT_INIT: u32 = 12; -/// Address of termination function -pub const DT_FINI: u32 = 13; -/// Name of shared object -pub const DT_SONAME: u32 = 14; -/// Library search path (deprecated) -pub const DT_RPATH: u32 = 15; -/// Start symbol search here -pub const DT_SYMBOLIC: u32 = 16; -/// Address of Rel relocs -pub const DT_REL: u32 = 17; -/// Total size of Rel relocs -pub const DT_RELSZ: u32 = 18; -/// Size of one Rel reloc -pub const DT_RELENT: u32 = 19; -/// Type of reloc in PLT -pub const DT_PLTREL: u32 = 20; -/// For debugging; unspecified -pub const DT_DEBUG: u32 = 21; -/// Reloc might modify .text -pub const DT_TEXTREL: u32 = 22; -/// Address of PLT relocs -pub const DT_JMPREL: u32 = 23; -/// Process relocations of object -pub const DT_BIND_NOW: u32 = 24; -/// Array with addresses of init fct -pub const DT_INIT_ARRAY: u32 = 25; -/// Array with addresses of fini fct -pub const DT_FINI_ARRAY: u32 = 26; -/// Size in bytes of DT_INIT_ARRAY -pub const DT_INIT_ARRAYSZ: u32 = 27; -/// Size in bytes of DT_FINI_ARRAY -pub const DT_FINI_ARRAYSZ: u32 = 28; -/// Library search path -pub const DT_RUNPATH: u32 = 29; -/// Flags for the object being loaded -pub const DT_FLAGS: u32 = 30; -/// Start of encoded range -pub const DT_ENCODING: u32 = 32; -/// Array with addresses of preinit fct -pub const DT_PREINIT_ARRAY: u32 = 32; -/// size in bytes of DT_PREINIT_ARRAY -pub const DT_PREINIT_ARRAYSZ: u32 = 33; -/// Address of SYMTAB_SHNDX section -pub const DT_SYMTAB_SHNDX: u32 = 34; -/// Start of OS-specific -pub const DT_LOOS: u32 = 0x6000_000d; -/// End of OS-specific -pub const DT_HIOS: u32 = 0x6fff_f000; -/// Start of processor-specific -pub const DT_LOPROC: u32 = 0x7000_0000; -/// End of processor-specific -pub const DT_HIPROC: u32 = 0x7fff_ffff; - -// `DT_*` entries between `DT_VALRNGHI` & `DT_VALRNGLO` use `d_val` as a value. -pub const DT_VALRNGLO: u32 = 0x6fff_fd00; -/// Prelinking timestamp -pub const DT_GNU_PRELINKED: u32 = 0x6fff_fdf5; -/// Size of conflict section -pub const DT_GNU_CONFLICTSZ: u32 = 0x6fff_fdf6; -/// Size of library list -pub const DT_GNU_LIBLISTSZ: u32 = 0x6fff_fdf7; -pub const DT_CHECKSUM: u32 = 0x6fff_fdf8; -pub const DT_PLTPADSZ: u32 = 0x6fff_fdf9; -pub const DT_MOVEENT: u32 = 0x6fff_fdfa; -pub const DT_MOVESZ: u32 = 0x6fff_fdfb; -/// Feature selection (DTF_*). -pub const DT_FEATURE_1: u32 = 0x6fff_fdfc; -/// Flags for DT_* entries, affecting the following DT_* entry. -pub const DT_POSFLAG_1: u32 = 0x6fff_fdfd; -/// Size of syminfo table (in bytes) -pub const DT_SYMINSZ: u32 = 0x6fff_fdfe; -/// Entry size of syminfo -pub const DT_SYMINENT: u32 = 0x6fff_fdff; -pub const DT_VALRNGHI: u32 = 0x6fff_fdff; - -// `DT_*` entries between `DT_ADDRRNGHI` & `DT_ADDRRNGLO` use `d_val` as an address. -// -// If any adjustment is made to the ELF object after it has been -// built these entries will need to be adjusted. -pub const DT_ADDRRNGLO: u32 = 0x6fff_fe00; -/// GNU-style hash table. -pub const DT_GNU_HASH: u32 = 0x6fff_fef5; -pub const DT_TLSDESC_PLT: u32 = 0x6fff_fef6; -pub const DT_TLSDESC_GOT: u32 = 0x6fff_fef7; -/// Start of conflict section -pub const DT_GNU_CONFLICT: u32 = 0x6fff_fef8; -/// Library list -pub const DT_GNU_LIBLIST: u32 = 0x6fff_fef9; -/// Configuration information. -pub const DT_CONFIG: u32 = 0x6fff_fefa; -/// Dependency auditing. -pub const DT_DEPAUDIT: u32 = 0x6fff_fefb; -/// Object auditing. -pub const DT_AUDIT: u32 = 0x6fff_fefc; -/// PLT padding. -pub const DT_PLTPAD: u32 = 0x6fff_fefd; -/// Move table. -pub const DT_MOVETAB: u32 = 0x6fff_fefe; -/// Syminfo table. -pub const DT_SYMINFO: u32 = 0x6fff_feff; -pub const DT_ADDRRNGHI: u32 = 0x6fff_feff; - -// The versioning entry types. The next are defined as part of the -// GNU extension. -pub const DT_VERSYM: u32 = 0x6fff_fff0; -pub const DT_RELACOUNT: u32 = 0x6fff_fff9; -pub const DT_RELCOUNT: u32 = 0x6fff_fffa; -/// State flags, see DF_1_* below. -pub const DT_FLAGS_1: u32 = 0x6fff_fffb; -/// Address of version definition table -pub const DT_VERDEF: u32 = 0x6fff_fffc; -/// Number of version definitions -pub const DT_VERDEFNUM: u32 = 0x6fff_fffd; -/// Address of table with needed versions -pub const DT_VERNEED: u32 = 0x6fff_fffe; -/// Number of needed versions -pub const DT_VERNEEDNUM: u32 = 0x6fff_ffff; - -// Machine-independent extensions in the "processor-specific" range. -/// Shared object to load before self -pub const DT_AUXILIARY: u32 = 0x7fff_fffd; -/// Shared object to get values from -pub const DT_FILTER: u32 = 0x7fff_ffff; - -// Values of `Dyn*::d_val` in the `DT_FLAGS` entry. -/// Object may use DF_ORIGIN -pub const DF_ORIGIN: u32 = 0x0000_0001; -/// Symbol resolutions starts here -pub const DF_SYMBOLIC: u32 = 0x0000_0002; -/// Object contains text relocations -pub const DF_TEXTREL: u32 = 0x0000_0004; -/// No lazy binding for this object -pub const DF_BIND_NOW: u32 = 0x0000_0008; -/// Module uses the static TLS model -pub const DF_STATIC_TLS: u32 = 0x0000_0010; - -// Values of `Dyn*::d_val` in the `DT_FLAGS_1` entry. -/// Set RTLD_NOW for this object. -pub const DF_1_NOW: u32 = 0x0000_0001; -/// Set RTLD_GLOBAL for this object. -pub const DF_1_GLOBAL: u32 = 0x0000_0002; -/// Set RTLD_GROUP for this object. -pub const DF_1_GROUP: u32 = 0x0000_0004; -/// Set RTLD_NODELETE for this object. -pub const DF_1_NODELETE: u32 = 0x0000_0008; -/// Trigger filtee loading at runtime. -pub const DF_1_LOADFLTR: u32 = 0x0000_0010; -/// Set RTLD_INITFIRST for this object. -pub const DF_1_INITFIRST: u32 = 0x0000_0020; -/// Set RTLD_NOOPEN for this object. -pub const DF_1_NOOPEN: u32 = 0x0000_0040; -/// $ORIGIN must be handled. -pub const DF_1_ORIGIN: u32 = 0x0000_0080; -/// Direct binding enabled. -pub const DF_1_DIRECT: u32 = 0x0000_0100; -pub const DF_1_TRANS: u32 = 0x0000_0200; -/// Object is used to interpose. -pub const DF_1_INTERPOSE: u32 = 0x0000_0400; -/// Ignore default lib search path. -pub const DF_1_NODEFLIB: u32 = 0x0000_0800; -/// Object can't be dldump'ed. -pub const DF_1_NODUMP: u32 = 0x0000_1000; -/// Configuration alternative created. -pub const DF_1_CONFALT: u32 = 0x0000_2000; -/// Filtee terminates filters search. -pub const DF_1_ENDFILTEE: u32 = 0x0000_4000; -/// Disp reloc applied at build time. -pub const DF_1_DISPRELDNE: u32 = 0x0000_8000; -/// Disp reloc applied at run-time. -pub const DF_1_DISPRELPND: u32 = 0x0001_0000; -/// Object has no-direct binding. -pub const DF_1_NODIRECT: u32 = 0x0002_0000; -pub const DF_1_IGNMULDEF: u32 = 0x0004_0000; -pub const DF_1_NOKSYMS: u32 = 0x0008_0000; -pub const DF_1_NOHDR: u32 = 0x0010_0000; -/// Object is modified after built. -pub const DF_1_EDITED: u32 = 0x0020_0000; -pub const DF_1_NORELOC: u32 = 0x0040_0000; -/// Object has individual interposers. -pub const DF_1_SYMINTPOSE: u32 = 0x0080_0000; -/// Global auditing required. -pub const DF_1_GLOBAUDIT: u32 = 0x0100_0000; -/// Singleton symbols are used. -pub const DF_1_SINGLETON: u32 = 0x0200_0000; -pub const DF_1_STUB: u32 = 0x0400_0000; -pub const DF_1_PIE: u32 = 0x0800_0000; - -/// Version symbol information -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Versym(pub U16); - -/// Symbol is hidden. -pub const VERSYM_HIDDEN: u16 = 0x8000; -/// Symbol version index. -pub const VERSYM_VERSION: u16 = 0x7fff; - -/// Version definition sections -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Verdef { - /// Version revision - pub vd_version: U16, - /// Version information - pub vd_flags: U16, - /// Version Index - pub vd_ndx: U16, - /// Number of associated aux entries - pub vd_cnt: U16, - /// Version name hash value - pub vd_hash: U32, - /// Offset in bytes to verdaux array - pub vd_aux: U32, - /// Offset in bytes to next verdef entry - pub vd_next: U32, -} - -// Legal values for vd_version (version revision). -/// No version -pub const VER_DEF_NONE: u16 = 0; -/// Current version -pub const VER_DEF_CURRENT: u16 = 1; - -// Legal values for vd_flags (version information flags). -/// Version definition of file itself -pub const VER_FLG_BASE: u16 = 0x1; -// Legal values for vd_flags and vna_flags (version information flags). -/// Weak version identifier -pub const VER_FLG_WEAK: u16 = 0x2; - -// Versym symbol index values. -/// Symbol is local. -pub const VER_NDX_LOCAL: u16 = 0; -/// Symbol is global. -pub const VER_NDX_GLOBAL: u16 = 1; - -/// Auxiliary version information. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Verdaux { - /// Version or dependency names - pub vda_name: U32, - /// Offset in bytes to next verdaux - pub vda_next: U32, -} - -/// Version dependency. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Verneed { - /// Version of structure - pub vn_version: U16, - /// Number of associated aux entries - pub vn_cnt: U16, - /// Offset of filename for this dependency - pub vn_file: U32, - /// Offset in bytes to vernaux array - pub vn_aux: U32, - /// Offset in bytes to next verneed entry - pub vn_next: U32, -} - -// Legal values for vn_version (version revision). -/// No version -pub const VER_NEED_NONE: u16 = 0; -/// Current version -pub const VER_NEED_CURRENT: u16 = 1; - -/// Auxiliary needed version information. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Vernaux { - /// Hash value of dependency name - pub vna_hash: U32, - /// Dependency specific information - pub vna_flags: U16, - /// Version Index - pub vna_other: U16, - /// Dependency name string offset - pub vna_name: U32, - /// Offset in bytes to next vernaux entry - pub vna_next: U32, -} - -// TODO: Elf*_auxv_t, AT_* - -/// Note section entry header. -/// -/// A note consists of a header followed by a variable length name and descriptor. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct NoteHeader32 { - /// Length of the note's name. - /// - /// Some known names are defined by the `ELF_NOTE_*` constants. - pub n_namesz: U32, - /// Length of the note's descriptor. - /// - /// The content of the descriptor depends on the note name and type. - pub n_descsz: U32, - /// Type of the note. - /// - /// One of the `NT_*` constants. The note name determines which - /// `NT_*` constants are valid. - pub n_type: U32, -} - -/// Note section entry header. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct NoteHeader64 { - /// Length of the note's name. - /// - /// Some known names are defined by the `ELF_NOTE_*` constants. - pub n_namesz: U32, - /// Length of the note's descriptor. - /// - /// The content of the descriptor depends on the note name and type. - pub n_descsz: U32, - /// Type of the note. - /// - /// One of the `NT_*` constants. The note name determines which - /// `NT_*` constants are valid. - pub n_type: U32, -} - -/// Solaris entries in the note section have this name. -pub const ELF_NOTE_SOLARIS: &[u8] = b"SUNW Solaris"; - -// Values for `n_type` when the name is `ELF_NOTE_SOLARIS`. -/// Desired pagesize for the binary. -pub const NT_SOLARIS_PAGESIZE_HINT: u32 = 1; - -/// GNU entries in the note section have this name. -pub const ELF_NOTE_GNU: &[u8] = b"GNU"; - -/// Go entries in the note section have this name. -// See https://go-review.googlesource.com/9520 and https://go-review.googlesource.com/10704. -pub const ELF_NOTE_GO: &[u8] = b"Go"; - -// Note types for `ELF_NOTE_GNU`. - -/// ABI information. -/// -/// The descriptor consists of words: -/// - word 0: OS descriptor -/// - word 1: major version of the ABI -/// - word 2: minor version of the ABI -/// - word 3: subminor version of the ABI -pub const NT_GNU_ABI_TAG: u32 = 1; - -/// OS descriptor for `NT_GNU_ABI_TAG`. -pub const ELF_NOTE_OS_LINUX: u32 = 0; -/// OS descriptor for `NT_GNU_ABI_TAG`. -pub const ELF_NOTE_OS_GNU: u32 = 1; -/// OS descriptor for `NT_GNU_ABI_TAG`. -pub const ELF_NOTE_OS_SOLARIS2: u32 = 2; -/// OS descriptor for `NT_GNU_ABI_TAG`. -pub const ELF_NOTE_OS_FREEBSD: u32 = 3; - -/// Synthetic hwcap information. -/// -/// The descriptor begins with two words: -/// - word 0: number of entries -/// - word 1: bitmask of enabled entries -/// Then follow variable-length entries, one byte followed by a -/// '\0'-terminated hwcap name string. The byte gives the bit -/// number to test if enabled, (1U << bit) & bitmask. */ -pub const NT_GNU_HWCAP: u32 = 2; - -/// Build ID bits as generated by `ld --build-id`. -/// -/// The descriptor consists of any nonzero number of bytes. -pub const NT_GNU_BUILD_ID: u32 = 3; - -/// Build ID bits as generated by Go's gc compiler. -/// -/// The descriptor consists of any nonzero number of bytes. -// See https://go-review.googlesource.com/10707. -pub const NT_GO_BUILD_ID: u32 = 4; - -/// Version note generated by GNU gold containing a version string. -pub const NT_GNU_GOLD_VERSION: u32 = 4; - -/// Program property. -pub const NT_GNU_PROPERTY_TYPE_0: u32 = 5; - -// Values used in GNU .note.gnu.property notes (NT_GNU_PROPERTY_TYPE_0). - -/// Stack size. -pub const GNU_PROPERTY_STACK_SIZE: u32 = 1; -/// No copy relocation on protected data symbol. -pub const GNU_PROPERTY_NO_COPY_ON_PROTECTED: u32 = 2; - -// A 4-byte unsigned integer property: A bit is set if it is set in all -// relocatable inputs. -pub const GNU_PROPERTY_UINT32_AND_LO: u32 = 0xb0000000; -pub const GNU_PROPERTY_UINT32_AND_HI: u32 = 0xb0007fff; - -// A 4-byte unsigned integer property: A bit is set if it is set in any -// relocatable inputs. -pub const GNU_PROPERTY_UINT32_OR_LO: u32 = 0xb0008000; -pub const GNU_PROPERTY_UINT32_OR_HI: u32 = 0xb000ffff; - -/// The needed properties by the object file. */ -pub const GNU_PROPERTY_1_NEEDED: u32 = GNU_PROPERTY_UINT32_OR_LO; - -/// Set if the object file requires canonical function pointers and -/// cannot be used with copy relocation. -pub const GNU_PROPERTY_1_NEEDED_INDIRECT_EXTERN_ACCESS: u32 = 1 << 0; - -/// Processor-specific semantics, lo -pub const GNU_PROPERTY_LOPROC: u32 = 0xc0000000; -/// Processor-specific semantics, hi -pub const GNU_PROPERTY_HIPROC: u32 = 0xdfffffff; -/// Application-specific semantics, lo -pub const GNU_PROPERTY_LOUSER: u32 = 0xe0000000; -/// Application-specific semantics, hi -pub const GNU_PROPERTY_HIUSER: u32 = 0xffffffff; - -/// AArch64 specific GNU properties. -pub const GNU_PROPERTY_AARCH64_FEATURE_1_AND: u32 = 0xc0000000; -pub const GNU_PROPERTY_AARCH64_FEATURE_PAUTH: u32 = 0xc0000001; - -pub const GNU_PROPERTY_AARCH64_FEATURE_1_BTI: u32 = 1 << 0; -pub const GNU_PROPERTY_AARCH64_FEATURE_1_PAC: u32 = 1 << 1; - -// A 4-byte unsigned integer property: A bit is set if it is set in all -// relocatable inputs. -pub const GNU_PROPERTY_X86_UINT32_AND_LO: u32 = 0xc0000002; -pub const GNU_PROPERTY_X86_UINT32_AND_HI: u32 = 0xc0007fff; - -// A 4-byte unsigned integer property: A bit is set if it is set in any -// relocatable inputs. -pub const GNU_PROPERTY_X86_UINT32_OR_LO: u32 = 0xc0008000; -pub const GNU_PROPERTY_X86_UINT32_OR_HI: u32 = 0xc000ffff; - -// A 4-byte unsigned integer property: A bit is set if it is set in any -// relocatable inputs and the property is present in all relocatable -// inputs. -pub const GNU_PROPERTY_X86_UINT32_OR_AND_LO: u32 = 0xc0010000; -pub const GNU_PROPERTY_X86_UINT32_OR_AND_HI: u32 = 0xc0017fff; - -/// The x86 instruction sets indicated by the corresponding bits are -/// used in program. Their support in the hardware is optional. -pub const GNU_PROPERTY_X86_ISA_1_USED: u32 = 0xc0010002; -/// The x86 instruction sets indicated by the corresponding bits are -/// used in program and they must be supported by the hardware. -pub const GNU_PROPERTY_X86_ISA_1_NEEDED: u32 = 0xc0008002; -/// X86 processor-specific features used in program. -pub const GNU_PROPERTY_X86_FEATURE_1_AND: u32 = 0xc0000002; - -/// GNU_PROPERTY_X86_ISA_1_BASELINE: CMOV, CX8 (cmpxchg8b), FPU (fld), -/// MMX, OSFXSR (fxsave), SCE (syscall), SSE and SSE2. -pub const GNU_PROPERTY_X86_ISA_1_BASELINE: u32 = 1 << 0; -/// GNU_PROPERTY_X86_ISA_1_V2: GNU_PROPERTY_X86_ISA_1_BASELINE, -/// CMPXCHG16B (cmpxchg16b), LAHF-SAHF (lahf), POPCNT (popcnt), SSE3, -/// SSSE3, SSE4.1 and SSE4.2. -pub const GNU_PROPERTY_X86_ISA_1_V2: u32 = 1 << 1; -/// GNU_PROPERTY_X86_ISA_1_V3: GNU_PROPERTY_X86_ISA_1_V2, AVX, AVX2, BMI1, -/// BMI2, F16C, FMA, LZCNT, MOVBE, XSAVE. -pub const GNU_PROPERTY_X86_ISA_1_V3: u32 = 1 << 2; -/// GNU_PROPERTY_X86_ISA_1_V4: GNU_PROPERTY_X86_ISA_1_V3, AVX512F, -/// AVX512BW, AVX512CD, AVX512DQ and AVX512VL. -pub const GNU_PROPERTY_X86_ISA_1_V4: u32 = 1 << 3; - -/// This indicates that all executable sections are compatible with IBT. -pub const GNU_PROPERTY_X86_FEATURE_1_IBT: u32 = 1 << 0; -/// This indicates that all executable sections are compatible with SHSTK. -pub const GNU_PROPERTY_X86_FEATURE_1_SHSTK: u32 = 1 << 1; - -// TODO: Elf*_Move - -/// Header of `SHT_HASH` section. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct HashHeader { - /// The number of hash buckets. - pub bucket_count: U32, - /// The number of chain values. - pub chain_count: U32, - // Array of hash bucket start indices. - // buckets: U32[bucket_count] - // Array of hash chain links. An index of 0 terminates the chain. - // chains: U32[chain_count] -} - -/// Calculate the SysV hash for a symbol name. -/// -/// Used for `SHT_HASH`. -pub fn hash(name: &[u8]) -> u32 { - let mut hash = 0u32; - for byte in name { - hash = hash.wrapping_mul(16).wrapping_add(u32::from(*byte)); - hash ^= (hash >> 24) & 0xf0; - } - hash & 0xfff_ffff -} - -/// Header of `SHT_GNU_HASH` section. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct GnuHashHeader { - /// The number of hash buckets. - pub bucket_count: U32, - /// The symbol table index of the first symbol in the hash. - pub symbol_base: U32, - /// The number of words in the bloom filter. - /// - /// Must be a non-zero power of 2. - pub bloom_count: U32, - /// The bit shift count for the bloom filter. - pub bloom_shift: U32, - // Array of bloom filter words. - // bloom_filters: U32[bloom_count] or U64[bloom_count] - // Array of hash bucket start indices. - // buckets: U32[bucket_count] - // Array of hash values, one for each symbol starting at symbol_base. - // values: U32[symbol_count] -} - -/// Calculate the GNU hash for a symbol name. -/// -/// Used for `SHT_GNU_HASH`. -pub fn gnu_hash(name: &[u8]) -> u32 { - let mut hash = 5381u32; - for byte in name { - hash = hash.wrapping_mul(33).wrapping_add(u32::from(*byte)); - } - hash -} - -// Motorola 68k specific definitions. - -// m68k values for `Rel*::r_type`. - -/// No reloc -pub const R_68K_NONE: u32 = 0; -/// Direct 32 bit -pub const R_68K_32: u32 = 1; -/// Direct 16 bit -pub const R_68K_16: u32 = 2; -/// Direct 8 bit -pub const R_68K_8: u32 = 3; -/// PC relative 32 bit -pub const R_68K_PC32: u32 = 4; -/// PC relative 16 bit -pub const R_68K_PC16: u32 = 5; -/// PC relative 8 bit -pub const R_68K_PC8: u32 = 6; -/// 32 bit PC relative GOT entry -pub const R_68K_GOT32: u32 = 7; -/// 16 bit PC relative GOT entry -pub const R_68K_GOT16: u32 = 8; -/// 8 bit PC relative GOT entry -pub const R_68K_GOT8: u32 = 9; -/// 32 bit GOT offset -pub const R_68K_GOT32O: u32 = 10; -/// 16 bit GOT offset -pub const R_68K_GOT16O: u32 = 11; -/// 8 bit GOT offset -pub const R_68K_GOT8O: u32 = 12; -/// 32 bit PC relative PLT address -pub const R_68K_PLT32: u32 = 13; -/// 16 bit PC relative PLT address -pub const R_68K_PLT16: u32 = 14; -/// 8 bit PC relative PLT address -pub const R_68K_PLT8: u32 = 15; -/// 32 bit PLT offset -pub const R_68K_PLT32O: u32 = 16; -/// 16 bit PLT offset -pub const R_68K_PLT16O: u32 = 17; -/// 8 bit PLT offset -pub const R_68K_PLT8O: u32 = 18; -/// Copy symbol at runtime -pub const R_68K_COPY: u32 = 19; -/// Create GOT entry -pub const R_68K_GLOB_DAT: u32 = 20; -/// Create PLT entry -pub const R_68K_JMP_SLOT: u32 = 21; -/// Adjust by program base -pub const R_68K_RELATIVE: u32 = 22; -/// 32 bit GOT offset for GD -pub const R_68K_TLS_GD32: u32 = 25; -/// 16 bit GOT offset for GD -pub const R_68K_TLS_GD16: u32 = 26; -/// 8 bit GOT offset for GD -pub const R_68K_TLS_GD8: u32 = 27; -/// 32 bit GOT offset for LDM -pub const R_68K_TLS_LDM32: u32 = 28; -/// 16 bit GOT offset for LDM -pub const R_68K_TLS_LDM16: u32 = 29; -/// 8 bit GOT offset for LDM -pub const R_68K_TLS_LDM8: u32 = 30; -/// 32 bit module-relative offset -pub const R_68K_TLS_LDO32: u32 = 31; -/// 16 bit module-relative offset -pub const R_68K_TLS_LDO16: u32 = 32; -/// 8 bit module-relative offset -pub const R_68K_TLS_LDO8: u32 = 33; -/// 32 bit GOT offset for IE -pub const R_68K_TLS_IE32: u32 = 34; -/// 16 bit GOT offset for IE -pub const R_68K_TLS_IE16: u32 = 35; -/// 8 bit GOT offset for IE -pub const R_68K_TLS_IE8: u32 = 36; -/// 32 bit offset relative to static TLS block -pub const R_68K_TLS_LE32: u32 = 37; -/// 16 bit offset relative to static TLS block -pub const R_68K_TLS_LE16: u32 = 38; -/// 8 bit offset relative to static TLS block -pub const R_68K_TLS_LE8: u32 = 39; -/// 32 bit module number -pub const R_68K_TLS_DTPMOD32: u32 = 40; -/// 32 bit module-relative offset -pub const R_68K_TLS_DTPREL32: u32 = 41; -/// 32 bit TP-relative offset -pub const R_68K_TLS_TPREL32: u32 = 42; - -// Intel 80386 specific definitions. - -// i386 values for `Rel*::r_type`. - -/// No reloc -pub const R_386_NONE: u32 = 0; -/// Direct 32 bit -pub const R_386_32: u32 = 1; -/// PC relative 32 bit -pub const R_386_PC32: u32 = 2; -/// 32 bit GOT entry -pub const R_386_GOT32: u32 = 3; -/// 32 bit PLT address -pub const R_386_PLT32: u32 = 4; -/// Copy symbol at runtime -pub const R_386_COPY: u32 = 5; -/// Create GOT entry -pub const R_386_GLOB_DAT: u32 = 6; -/// Create PLT entry -pub const R_386_JMP_SLOT: u32 = 7; -/// Adjust by program base -pub const R_386_RELATIVE: u32 = 8; -/// 32 bit offset to GOT -pub const R_386_GOTOFF: u32 = 9; -/// 32 bit PC relative offset to GOT -pub const R_386_GOTPC: u32 = 10; -/// Direct 32 bit PLT address -pub const R_386_32PLT: u32 = 11; -/// Offset in static TLS block -pub const R_386_TLS_TPOFF: u32 = 14; -/// Address of GOT entry for static TLS block offset -pub const R_386_TLS_IE: u32 = 15; -/// GOT entry for static TLS block offset -pub const R_386_TLS_GOTIE: u32 = 16; -/// Offset relative to static TLS block -pub const R_386_TLS_LE: u32 = 17; -/// Direct 32 bit for GNU version of general dynamic thread local data -pub const R_386_TLS_GD: u32 = 18; -/// Direct 32 bit for GNU version of local dynamic thread local data in LE code -pub const R_386_TLS_LDM: u32 = 19; -/// Direct 16 bit -pub const R_386_16: u32 = 20; -/// PC relative 16 bit -pub const R_386_PC16: u32 = 21; -/// Direct 8 bit -pub const R_386_8: u32 = 22; -/// PC relative 8 bit -pub const R_386_PC8: u32 = 23; -/// Direct 32 bit for general dynamic thread local data -pub const R_386_TLS_GD_32: u32 = 24; -/// Tag for pushl in GD TLS code -pub const R_386_TLS_GD_PUSH: u32 = 25; -/// Relocation for call to __tls_get_addr() -pub const R_386_TLS_GD_CALL: u32 = 26; -/// Tag for popl in GD TLS code -pub const R_386_TLS_GD_POP: u32 = 27; -/// Direct 32 bit for local dynamic thread local data in LE code -pub const R_386_TLS_LDM_32: u32 = 28; -/// Tag for pushl in LDM TLS code -pub const R_386_TLS_LDM_PUSH: u32 = 29; -/// Relocation for call to __tls_get_addr() in LDM code -pub const R_386_TLS_LDM_CALL: u32 = 30; -/// Tag for popl in LDM TLS code -pub const R_386_TLS_LDM_POP: u32 = 31; -/// Offset relative to TLS block -pub const R_386_TLS_LDO_32: u32 = 32; -/// GOT entry for negated static TLS block offset -pub const R_386_TLS_IE_32: u32 = 33; -/// Negated offset relative to static TLS block -pub const R_386_TLS_LE_32: u32 = 34; -/// ID of module containing symbol -pub const R_386_TLS_DTPMOD32: u32 = 35; -/// Offset in TLS block -pub const R_386_TLS_DTPOFF32: u32 = 36; -/// Negated offset in static TLS block -pub const R_386_TLS_TPOFF32: u32 = 37; -/// 32-bit symbol size -pub const R_386_SIZE32: u32 = 38; -/// GOT offset for TLS descriptor. -pub const R_386_TLS_GOTDESC: u32 = 39; -/// Marker of call through TLS descriptor for relaxation. -pub const R_386_TLS_DESC_CALL: u32 = 40; -/// TLS descriptor containing pointer to code and to argument, returning the TLS offset for the symbol. -pub const R_386_TLS_DESC: u32 = 41; -/// Adjust indirectly by program base -pub const R_386_IRELATIVE: u32 = 42; -/// Load from 32 bit GOT entry, relaxable. -pub const R_386_GOT32X: u32 = 43; - -// SUN SPARC specific definitions. - -// SPARC values for `st_type` component of `Sym*::st_info`. - -/// Global register reserved to app. -pub const STT_SPARC_REGISTER: u8 = 13; - -// SPARC values for `FileHeader64::e_flags`. - -pub const EF_SPARCV9_MM: u32 = 3; -pub const EF_SPARCV9_TSO: u32 = 0; -pub const EF_SPARCV9_PSO: u32 = 1; -pub const EF_SPARCV9_RMO: u32 = 2; -/// little endian data -pub const EF_SPARC_LEDATA: u32 = 0x80_0000; -pub const EF_SPARC_EXT_MASK: u32 = 0xFF_FF00; -/// generic V8+ features -pub const EF_SPARC_32PLUS: u32 = 0x00_0100; -/// Sun UltraSPARC1 extensions -pub const EF_SPARC_SUN_US1: u32 = 0x00_0200; -/// HAL R1 extensions -pub const EF_SPARC_HAL_R1: u32 = 0x00_0400; -/// Sun UltraSPARCIII extensions -pub const EF_SPARC_SUN_US3: u32 = 0x00_0800; - -// SPARC values for `Rel*::r_type`. - -/// No reloc -pub const R_SPARC_NONE: u32 = 0; -/// Direct 8 bit -pub const R_SPARC_8: u32 = 1; -/// Direct 16 bit -pub const R_SPARC_16: u32 = 2; -/// Direct 32 bit -pub const R_SPARC_32: u32 = 3; -/// PC relative 8 bit -pub const R_SPARC_DISP8: u32 = 4; -/// PC relative 16 bit -pub const R_SPARC_DISP16: u32 = 5; -/// PC relative 32 bit -pub const R_SPARC_DISP32: u32 = 6; -/// PC relative 30 bit shifted -pub const R_SPARC_WDISP30: u32 = 7; -/// PC relative 22 bit shifted -pub const R_SPARC_WDISP22: u32 = 8; -/// High 22 bit -pub const R_SPARC_HI22: u32 = 9; -/// Direct 22 bit -pub const R_SPARC_22: u32 = 10; -/// Direct 13 bit -pub const R_SPARC_13: u32 = 11; -/// Truncated 10 bit -pub const R_SPARC_LO10: u32 = 12; -/// Truncated 10 bit GOT entry -pub const R_SPARC_GOT10: u32 = 13; -/// 13 bit GOT entry -pub const R_SPARC_GOT13: u32 = 14; -/// 22 bit GOT entry shifted -pub const R_SPARC_GOT22: u32 = 15; -/// PC relative 10 bit truncated -pub const R_SPARC_PC10: u32 = 16; -/// PC relative 22 bit shifted -pub const R_SPARC_PC22: u32 = 17; -/// 30 bit PC relative PLT address -pub const R_SPARC_WPLT30: u32 = 18; -/// Copy symbol at runtime -pub const R_SPARC_COPY: u32 = 19; -/// Create GOT entry -pub const R_SPARC_GLOB_DAT: u32 = 20; -/// Create PLT entry -pub const R_SPARC_JMP_SLOT: u32 = 21; -/// Adjust by program base -pub const R_SPARC_RELATIVE: u32 = 22; -/// Direct 32 bit unaligned -pub const R_SPARC_UA32: u32 = 23; - -// Sparc64 values for `Rel*::r_type`. - -/// Direct 32 bit ref to PLT entry -pub const R_SPARC_PLT32: u32 = 24; -/// High 22 bit PLT entry -pub const R_SPARC_HIPLT22: u32 = 25; -/// Truncated 10 bit PLT entry -pub const R_SPARC_LOPLT10: u32 = 26; -/// PC rel 32 bit ref to PLT entry -pub const R_SPARC_PCPLT32: u32 = 27; -/// PC rel high 22 bit PLT entry -pub const R_SPARC_PCPLT22: u32 = 28; -/// PC rel trunc 10 bit PLT entry -pub const R_SPARC_PCPLT10: u32 = 29; -/// Direct 10 bit -pub const R_SPARC_10: u32 = 30; -/// Direct 11 bit -pub const R_SPARC_11: u32 = 31; -/// Direct 64 bit -pub const R_SPARC_64: u32 = 32; -/// 10bit with secondary 13bit addend -pub const R_SPARC_OLO10: u32 = 33; -/// Top 22 bits of direct 64 bit -pub const R_SPARC_HH22: u32 = 34; -/// High middle 10 bits of ... -pub const R_SPARC_HM10: u32 = 35; -/// Low middle 22 bits of ... -pub const R_SPARC_LM22: u32 = 36; -/// Top 22 bits of pc rel 64 bit -pub const R_SPARC_PC_HH22: u32 = 37; -/// High middle 10 bit of ... -pub const R_SPARC_PC_HM10: u32 = 38; -/// Low miggle 22 bits of ... -pub const R_SPARC_PC_LM22: u32 = 39; -/// PC relative 16 bit shifted -pub const R_SPARC_WDISP16: u32 = 40; -/// PC relative 19 bit shifted -pub const R_SPARC_WDISP19: u32 = 41; -/// was part of v9 ABI but was removed -pub const R_SPARC_GLOB_JMP: u32 = 42; -/// Direct 7 bit -pub const R_SPARC_7: u32 = 43; -/// Direct 5 bit -pub const R_SPARC_5: u32 = 44; -/// Direct 6 bit -pub const R_SPARC_6: u32 = 45; -/// PC relative 64 bit -pub const R_SPARC_DISP64: u32 = 46; -/// Direct 64 bit ref to PLT entry -pub const R_SPARC_PLT64: u32 = 47; -/// High 22 bit complemented -pub const R_SPARC_HIX22: u32 = 48; -/// Truncated 11 bit complemented -pub const R_SPARC_LOX10: u32 = 49; -/// Direct high 12 of 44 bit -pub const R_SPARC_H44: u32 = 50; -/// Direct mid 22 of 44 bit -pub const R_SPARC_M44: u32 = 51; -/// Direct low 10 of 44 bit -pub const R_SPARC_L44: u32 = 52; -/// Global register usage -pub const R_SPARC_REGISTER: u32 = 53; -/// Direct 64 bit unaligned -pub const R_SPARC_UA64: u32 = 54; -/// Direct 16 bit unaligned -pub const R_SPARC_UA16: u32 = 55; -pub const R_SPARC_TLS_GD_HI22: u32 = 56; -pub const R_SPARC_TLS_GD_LO10: u32 = 57; -pub const R_SPARC_TLS_GD_ADD: u32 = 58; -pub const R_SPARC_TLS_GD_CALL: u32 = 59; -pub const R_SPARC_TLS_LDM_HI22: u32 = 60; -pub const R_SPARC_TLS_LDM_LO10: u32 = 61; -pub const R_SPARC_TLS_LDM_ADD: u32 = 62; -pub const R_SPARC_TLS_LDM_CALL: u32 = 63; -pub const R_SPARC_TLS_LDO_HIX22: u32 = 64; -pub const R_SPARC_TLS_LDO_LOX10: u32 = 65; -pub const R_SPARC_TLS_LDO_ADD: u32 = 66; -pub const R_SPARC_TLS_IE_HI22: u32 = 67; -pub const R_SPARC_TLS_IE_LO10: u32 = 68; -pub const R_SPARC_TLS_IE_LD: u32 = 69; -pub const R_SPARC_TLS_IE_LDX: u32 = 70; -pub const R_SPARC_TLS_IE_ADD: u32 = 71; -pub const R_SPARC_TLS_LE_HIX22: u32 = 72; -pub const R_SPARC_TLS_LE_LOX10: u32 = 73; -pub const R_SPARC_TLS_DTPMOD32: u32 = 74; -pub const R_SPARC_TLS_DTPMOD64: u32 = 75; -pub const R_SPARC_TLS_DTPOFF32: u32 = 76; -pub const R_SPARC_TLS_DTPOFF64: u32 = 77; -pub const R_SPARC_TLS_TPOFF32: u32 = 78; -pub const R_SPARC_TLS_TPOFF64: u32 = 79; -pub const R_SPARC_GOTDATA_HIX22: u32 = 80; -pub const R_SPARC_GOTDATA_LOX10: u32 = 81; -pub const R_SPARC_GOTDATA_OP_HIX22: u32 = 82; -pub const R_SPARC_GOTDATA_OP_LOX10: u32 = 83; -pub const R_SPARC_GOTDATA_OP: u32 = 84; -pub const R_SPARC_H34: u32 = 85; -pub const R_SPARC_SIZE32: u32 = 86; -pub const R_SPARC_SIZE64: u32 = 87; -pub const R_SPARC_WDISP10: u32 = 88; -pub const R_SPARC_JMP_IREL: u32 = 248; -pub const R_SPARC_IRELATIVE: u32 = 249; -pub const R_SPARC_GNU_VTINHERIT: u32 = 250; -pub const R_SPARC_GNU_VTENTRY: u32 = 251; -pub const R_SPARC_REV32: u32 = 252; - -// Sparc64 values for `Dyn32::d_tag`. - -pub const DT_SPARC_REGISTER: u32 = 0x7000_0001; - -// MIPS R3000 specific definitions. - -// MIPS values for `FileHeader32::e_flags`. - -/// A .noreorder directive was used. -pub const EF_MIPS_NOREORDER: u32 = 1; -/// Contains PIC code. -pub const EF_MIPS_PIC: u32 = 2; -/// Uses PIC calling sequence. -pub const EF_MIPS_CPIC: u32 = 4; -pub const EF_MIPS_XGOT: u32 = 8; -pub const EF_MIPS_64BIT_WHIRL: u32 = 16; -pub const EF_MIPS_ABI2: u32 = 32; -pub const EF_MIPS_ABI_ON32: u32 = 64; -/// Uses FP64 (12 callee-saved). -pub const EF_MIPS_FP64: u32 = 512; -/// Uses IEEE 754-2008 NaN encoding. -pub const EF_MIPS_NAN2008: u32 = 1024; -/// MIPS architecture level. -pub const EF_MIPS_ARCH: u32 = 0xf000_0000; - -/// The first MIPS 32 bit ABI -pub const EF_MIPS_ABI_O32: u32 = 0x0000_1000; -/// O32 ABI extended for 64-bit architectures -pub const EF_MIPS_ABI_O64: u32 = 0x0000_2000; -/// EABI in 32-bit mode -pub const EF_MIPS_ABI_EABI32: u32 = 0x0000_3000; -/// EABI in 64-bit mode -pub const EF_MIPS_ABI_EABI64: u32 = 0x0000_4000; -/// Mask for selecting EF_MIPS_ABI_ variant -pub const EF_MIPS_ABI: u32 = 0x0000_f000; - -// Legal values for MIPS architecture level. - -/// -mips1 code. -pub const EF_MIPS_ARCH_1: u32 = 0x0000_0000; -/// -mips2 code. -pub const EF_MIPS_ARCH_2: u32 = 0x1000_0000; -/// -mips3 code. -pub const EF_MIPS_ARCH_3: u32 = 0x2000_0000; -/// -mips4 code. -pub const EF_MIPS_ARCH_4: u32 = 0x3000_0000; -/// -mips5 code. -pub const EF_MIPS_ARCH_5: u32 = 0x4000_0000; -/// MIPS32 code. -pub const EF_MIPS_ARCH_32: u32 = 0x5000_0000; -/// MIPS64 code. -pub const EF_MIPS_ARCH_64: u32 = 0x6000_0000; -/// MIPS32r2 code. -pub const EF_MIPS_ARCH_32R2: u32 = 0x7000_0000; -/// MIPS64r2 code. -pub const EF_MIPS_ARCH_64R2: u32 = 0x8000_0000; -/// MIPS32r6 code -pub const EF_MIPS_ARCH_32R6: u32 = 0x9000_0000; -/// MIPS64r6 code -pub const EF_MIPS_ARCH_64R6: u32 = 0xa000_0000; - -// MIPS values for `Sym32::st_shndx`. - -/// Allocated common symbols. -pub const SHN_MIPS_ACOMMON: u16 = 0xff00; -/// Allocated test symbols. -pub const SHN_MIPS_TEXT: u16 = 0xff01; -/// Allocated data symbols. -pub const SHN_MIPS_DATA: u16 = 0xff02; -/// Small common symbols. -pub const SHN_MIPS_SCOMMON: u16 = 0xff03; -/// Small undefined symbols. -pub const SHN_MIPS_SUNDEFINED: u16 = 0xff04; - -// MIPS values for `SectionHeader32::sh_type`. - -/// Shared objects used in link. -pub const SHT_MIPS_LIBLIST: u32 = 0x7000_0000; -pub const SHT_MIPS_MSYM: u32 = 0x7000_0001; -/// Conflicting symbols. -pub const SHT_MIPS_CONFLICT: u32 = 0x7000_0002; -/// Global data area sizes. -pub const SHT_MIPS_GPTAB: u32 = 0x7000_0003; -/// Reserved for SGI/MIPS compilers -pub const SHT_MIPS_UCODE: u32 = 0x7000_0004; -/// MIPS ECOFF debugging info. -pub const SHT_MIPS_DEBUG: u32 = 0x7000_0005; -/// Register usage information. -pub const SHT_MIPS_REGINFO: u32 = 0x7000_0006; -pub const SHT_MIPS_PACKAGE: u32 = 0x7000_0007; -pub const SHT_MIPS_PACKSYM: u32 = 0x7000_0008; -pub const SHT_MIPS_RELD: u32 = 0x7000_0009; -pub const SHT_MIPS_IFACE: u32 = 0x7000_000b; -pub const SHT_MIPS_CONTENT: u32 = 0x7000_000c; -/// Miscellaneous options. -pub const SHT_MIPS_OPTIONS: u32 = 0x7000_000d; -pub const SHT_MIPS_SHDR: u32 = 0x7000_0010; -pub const SHT_MIPS_FDESC: u32 = 0x7000_0011; -pub const SHT_MIPS_EXTSYM: u32 = 0x7000_0012; -pub const SHT_MIPS_DENSE: u32 = 0x7000_0013; -pub const SHT_MIPS_PDESC: u32 = 0x7000_0014; -pub const SHT_MIPS_LOCSYM: u32 = 0x7000_0015; -pub const SHT_MIPS_AUXSYM: u32 = 0x7000_0016; -pub const SHT_MIPS_OPTSYM: u32 = 0x7000_0017; -pub const SHT_MIPS_LOCSTR: u32 = 0x7000_0018; -pub const SHT_MIPS_LINE: u32 = 0x7000_0019; -pub const SHT_MIPS_RFDESC: u32 = 0x7000_001a; -pub const SHT_MIPS_DELTASYM: u32 = 0x7000_001b; -pub const SHT_MIPS_DELTAINST: u32 = 0x7000_001c; -pub const SHT_MIPS_DELTACLASS: u32 = 0x7000_001d; -/// DWARF debugging information. -pub const SHT_MIPS_DWARF: u32 = 0x7000_001e; -pub const SHT_MIPS_DELTADECL: u32 = 0x7000_001f; -pub const SHT_MIPS_SYMBOL_LIB: u32 = 0x7000_0020; -/// Event section. -pub const SHT_MIPS_EVENTS: u32 = 0x7000_0021; -pub const SHT_MIPS_TRANSLATE: u32 = 0x7000_0022; -pub const SHT_MIPS_PIXIE: u32 = 0x7000_0023; -pub const SHT_MIPS_XLATE: u32 = 0x7000_0024; -pub const SHT_MIPS_XLATE_DEBUG: u32 = 0x7000_0025; -pub const SHT_MIPS_WHIRL: u32 = 0x7000_0026; -pub const SHT_MIPS_EH_REGION: u32 = 0x7000_0027; -pub const SHT_MIPS_XLATE_OLD: u32 = 0x7000_0028; -pub const SHT_MIPS_PDR_EXCEPTION: u32 = 0x7000_0029; - -// MIPS values for `SectionHeader32::sh_flags`. - -/// Must be in global data area. -pub const SHF_MIPS_GPREL: u32 = 0x1000_0000; -pub const SHF_MIPS_MERGE: u32 = 0x2000_0000; -pub const SHF_MIPS_ADDR: u32 = 0x4000_0000; -pub const SHF_MIPS_STRINGS: u32 = 0x8000_0000; -pub const SHF_MIPS_NOSTRIP: u32 = 0x0800_0000; -pub const SHF_MIPS_LOCAL: u32 = 0x0400_0000; -pub const SHF_MIPS_NAMES: u32 = 0x0200_0000; -pub const SHF_MIPS_NODUPE: u32 = 0x0100_0000; - -// MIPS values for `Sym32::st_other`. - -pub const STO_MIPS_PLT: u8 = 0x8; -/// Only valid for `STB_MIPS_SPLIT_COMMON`. -pub const STO_MIPS_SC_ALIGN_UNUSED: u8 = 0xff; - -// MIPS values for `Sym32::st_info'. -pub const STB_MIPS_SPLIT_COMMON: u8 = 13; - -// Entries found in sections of type `SHT_MIPS_GPTAB`. - -// TODO: Elf32_gptab, Elf32_RegInfo, Elf_Options - -// Values for `Elf_Options::kind`. - -/// Undefined. -pub const ODK_NULL: u32 = 0; -/// Register usage information. -pub const ODK_REGINFO: u32 = 1; -/// Exception processing options. -pub const ODK_EXCEPTIONS: u32 = 2; -/// Section padding options. -pub const ODK_PAD: u32 = 3; -/// Hardware workarounds performed -pub const ODK_HWPATCH: u32 = 4; -/// record the fill value used by the linker. -pub const ODK_FILL: u32 = 5; -/// reserve space for desktop tools to write. -pub const ODK_TAGS: u32 = 6; -/// HW workarounds. 'AND' bits when merging. -pub const ODK_HWAND: u32 = 7; -/// HW workarounds. 'OR' bits when merging. -pub const ODK_HWOR: u32 = 8; - -// Values for `Elf_Options::info` for `ODK_EXCEPTIONS` entries. - -/// FPE's which MUST be enabled. -pub const OEX_FPU_MIN: u32 = 0x1f; -/// FPE's which MAY be enabled. -pub const OEX_FPU_MAX: u32 = 0x1f00; -/// page zero must be mapped. -pub const OEX_PAGE0: u32 = 0x10000; -/// Force sequential memory mode? -pub const OEX_SMM: u32 = 0x20000; -/// Force floating point debug mode? -pub const OEX_FPDBUG: u32 = 0x40000; -pub const OEX_PRECISEFP: u32 = OEX_FPDBUG; -/// Dismiss invalid address faults? -pub const OEX_DISMISS: u32 = 0x80000; - -pub const OEX_FPU_INVAL: u32 = 0x10; -pub const OEX_FPU_DIV0: u32 = 0x08; -pub const OEX_FPU_OFLO: u32 = 0x04; -pub const OEX_FPU_UFLO: u32 = 0x02; -pub const OEX_FPU_INEX: u32 = 0x01; - -// Masks for `Elf_Options::info` for an `ODK_HWPATCH` entry. */ -/// R4000 end-of-page patch. -pub const OHW_R4KEOP: u32 = 0x1; -/// may need R8000 prefetch patch. -pub const OHW_R8KPFETCH: u32 = 0x2; -/// R5000 end-of-page patch. -pub const OHW_R5KEOP: u32 = 0x4; -/// R5000 cvt.\[ds\].l bug. clean=1. -pub const OHW_R5KCVTL: u32 = 0x8; - -pub const OPAD_PREFIX: u32 = 0x1; -pub const OPAD_POSTFIX: u32 = 0x2; -pub const OPAD_SYMBOL: u32 = 0x4; - -// Entries found in sections of type `SHT_MIPS_OPTIONS`. - -// TODO: Elf_Options_Hw - -// Masks for `ElfOptions::info` for `ODK_HWAND` and `ODK_HWOR` entries. - -pub const OHWA0_R4KEOP_CHECKED: u32 = 0x0000_0001; -pub const OHWA1_R4KEOP_CLEAN: u32 = 0x0000_0002; - -// MIPS values for `Rel*::r_type`. - -/// No reloc -pub const R_MIPS_NONE: u32 = 0; -/// Direct 16 bit -pub const R_MIPS_16: u32 = 1; -/// Direct 32 bit -pub const R_MIPS_32: u32 = 2; -/// PC relative 32 bit -pub const R_MIPS_REL32: u32 = 3; -/// Direct 26 bit shifted -pub const R_MIPS_26: u32 = 4; -/// High 16 bit -pub const R_MIPS_HI16: u32 = 5; -/// Low 16 bit -pub const R_MIPS_LO16: u32 = 6; -/// GP relative 16 bit -pub const R_MIPS_GPREL16: u32 = 7; -/// 16 bit literal entry -pub const R_MIPS_LITERAL: u32 = 8; -/// 16 bit GOT entry -pub const R_MIPS_GOT16: u32 = 9; -/// PC relative 16 bit -pub const R_MIPS_PC16: u32 = 10; -/// 16 bit GOT entry for function -pub const R_MIPS_CALL16: u32 = 11; -/// GP relative 32 bit -pub const R_MIPS_GPREL32: u32 = 12; - -pub const R_MIPS_SHIFT5: u32 = 16; -pub const R_MIPS_SHIFT6: u32 = 17; -pub const R_MIPS_64: u32 = 18; -pub const R_MIPS_GOT_DISP: u32 = 19; -pub const R_MIPS_GOT_PAGE: u32 = 20; -pub const R_MIPS_GOT_OFST: u32 = 21; -pub const R_MIPS_GOT_HI16: u32 = 22; -pub const R_MIPS_GOT_LO16: u32 = 23; -pub const R_MIPS_SUB: u32 = 24; -pub const R_MIPS_INSERT_A: u32 = 25; -pub const R_MIPS_INSERT_B: u32 = 26; -pub const R_MIPS_DELETE: u32 = 27; -pub const R_MIPS_HIGHER: u32 = 28; -pub const R_MIPS_HIGHEST: u32 = 29; -pub const R_MIPS_CALL_HI16: u32 = 30; -pub const R_MIPS_CALL_LO16: u32 = 31; -pub const R_MIPS_SCN_DISP: u32 = 32; -pub const R_MIPS_REL16: u32 = 33; -pub const R_MIPS_ADD_IMMEDIATE: u32 = 34; -pub const R_MIPS_PJUMP: u32 = 35; -pub const R_MIPS_RELGOT: u32 = 36; -pub const R_MIPS_JALR: u32 = 37; -/// Module number 32 bit -pub const R_MIPS_TLS_DTPMOD32: u32 = 38; -/// Module-relative offset 32 bit -pub const R_MIPS_TLS_DTPREL32: u32 = 39; -/// Module number 64 bit -pub const R_MIPS_TLS_DTPMOD64: u32 = 40; -/// Module-relative offset 64 bit -pub const R_MIPS_TLS_DTPREL64: u32 = 41; -/// 16 bit GOT offset for GD -pub const R_MIPS_TLS_GD: u32 = 42; -/// 16 bit GOT offset for LDM -pub const R_MIPS_TLS_LDM: u32 = 43; -/// Module-relative offset, high 16 bits -pub const R_MIPS_TLS_DTPREL_HI16: u32 = 44; -/// Module-relative offset, low 16 bits -pub const R_MIPS_TLS_DTPREL_LO16: u32 = 45; -/// 16 bit GOT offset for IE -pub const R_MIPS_TLS_GOTTPREL: u32 = 46; -/// TP-relative offset, 32 bit -pub const R_MIPS_TLS_TPREL32: u32 = 47; -/// TP-relative offset, 64 bit -pub const R_MIPS_TLS_TPREL64: u32 = 48; -/// TP-relative offset, high 16 bits -pub const R_MIPS_TLS_TPREL_HI16: u32 = 49; -/// TP-relative offset, low 16 bits -pub const R_MIPS_TLS_TPREL_LO16: u32 = 50; -pub const R_MIPS_GLOB_DAT: u32 = 51; -pub const R_MIPS_COPY: u32 = 126; -pub const R_MIPS_JUMP_SLOT: u32 = 127; - -// MIPS values for `ProgramHeader32::p_type`. - -/// Register usage information. -pub const PT_MIPS_REGINFO: u32 = 0x7000_0000; -/// Runtime procedure table. -pub const PT_MIPS_RTPROC: u32 = 0x7000_0001; -pub const PT_MIPS_OPTIONS: u32 = 0x7000_0002; -/// FP mode requirement. -pub const PT_MIPS_ABIFLAGS: u32 = 0x7000_0003; - -// MIPS values for `ProgramHeader32::p_flags`. - -pub const PF_MIPS_LOCAL: u32 = 0x1000_0000; - -// MIPS values for `Dyn32::d_tag`. - -/// Runtime linker interface version -pub const DT_MIPS_RLD_VERSION: u32 = 0x7000_0001; -/// Timestamp -pub const DT_MIPS_TIME_STAMP: u32 = 0x7000_0002; -/// Checksum -pub const DT_MIPS_ICHECKSUM: u32 = 0x7000_0003; -/// Version string (string tbl index) -pub const DT_MIPS_IVERSION: u32 = 0x7000_0004; -/// Flags -pub const DT_MIPS_FLAGS: u32 = 0x7000_0005; -/// Base address -pub const DT_MIPS_BASE_ADDRESS: u32 = 0x7000_0006; -pub const DT_MIPS_MSYM: u32 = 0x7000_0007; -/// Address of CONFLICT section -pub const DT_MIPS_CONFLICT: u32 = 0x7000_0008; -/// Address of LIBLIST section -pub const DT_MIPS_LIBLIST: u32 = 0x7000_0009; -/// Number of local GOT entries -pub const DT_MIPS_LOCAL_GOTNO: u32 = 0x7000_000a; -/// Number of CONFLICT entries -pub const DT_MIPS_CONFLICTNO: u32 = 0x7000_000b; -/// Number of LIBLIST entries -pub const DT_MIPS_LIBLISTNO: u32 = 0x7000_0010; -/// Number of DYNSYM entries -pub const DT_MIPS_SYMTABNO: u32 = 0x7000_0011; -/// First external DYNSYM -pub const DT_MIPS_UNREFEXTNO: u32 = 0x7000_0012; -/// First GOT entry in DYNSYM -pub const DT_MIPS_GOTSYM: u32 = 0x7000_0013; -/// Number of GOT page table entries -pub const DT_MIPS_HIPAGENO: u32 = 0x7000_0014; -/// Address of run time loader map. -pub const DT_MIPS_RLD_MAP: u32 = 0x7000_0016; -/// Delta C++ class definition. -pub const DT_MIPS_DELTA_CLASS: u32 = 0x7000_0017; -/// Number of entries in DT_MIPS_DELTA_CLASS. -pub const DT_MIPS_DELTA_CLASS_NO: u32 = 0x7000_0018; -/// Delta C++ class instances. -pub const DT_MIPS_DELTA_INSTANCE: u32 = 0x7000_0019; -/// Number of entries in DT_MIPS_DELTA_INSTANCE. -pub const DT_MIPS_DELTA_INSTANCE_NO: u32 = 0x7000_001a; -/// Delta relocations. -pub const DT_MIPS_DELTA_RELOC: u32 = 0x7000_001b; -/// Number of entries in DT_MIPS_DELTA_RELOC. -pub const DT_MIPS_DELTA_RELOC_NO: u32 = 0x7000_001c; -/// Delta symbols that Delta relocations refer to. -pub const DT_MIPS_DELTA_SYM: u32 = 0x7000_001d; -/// Number of entries in DT_MIPS_DELTA_SYM. -pub const DT_MIPS_DELTA_SYM_NO: u32 = 0x7000_001e; -/// Delta symbols that hold the class declaration. -pub const DT_MIPS_DELTA_CLASSSYM: u32 = 0x7000_0020; -/// Number of entries in DT_MIPS_DELTA_CLASSSYM. -pub const DT_MIPS_DELTA_CLASSSYM_NO: u32 = 0x7000_0021; -/// Flags indicating for C++ flavor. -pub const DT_MIPS_CXX_FLAGS: u32 = 0x7000_0022; -pub const DT_MIPS_PIXIE_INIT: u32 = 0x7000_0023; -pub const DT_MIPS_SYMBOL_LIB: u32 = 0x7000_0024; -pub const DT_MIPS_LOCALPAGE_GOTIDX: u32 = 0x7000_0025; -pub const DT_MIPS_LOCAL_GOTIDX: u32 = 0x7000_0026; -pub const DT_MIPS_HIDDEN_GOTIDX: u32 = 0x7000_0027; -pub const DT_MIPS_PROTECTED_GOTIDX: u32 = 0x7000_0028; -/// Address of .options. -pub const DT_MIPS_OPTIONS: u32 = 0x7000_0029; -/// Address of .interface. -pub const DT_MIPS_INTERFACE: u32 = 0x7000_002a; -pub const DT_MIPS_DYNSTR_ALIGN: u32 = 0x7000_002b; -/// Size of the .interface section. -pub const DT_MIPS_INTERFACE_SIZE: u32 = 0x7000_002c; -/// Address of rld_text_rsolve function stored in GOT. -pub const DT_MIPS_RLD_TEXT_RESOLVE_ADDR: u32 = 0x7000_002d; -/// Default suffix of dso to be added by rld on dlopen() calls. -pub const DT_MIPS_PERF_SUFFIX: u32 = 0x7000_002e; -/// (O32)Size of compact rel section. -pub const DT_MIPS_COMPACT_SIZE: u32 = 0x7000_002f; -/// GP value for aux GOTs. -pub const DT_MIPS_GP_VALUE: u32 = 0x7000_0030; -/// Address of aux .dynamic. -pub const DT_MIPS_AUX_DYNAMIC: u32 = 0x7000_0031; -/// The address of .got.plt in an executable using the new non-PIC ABI. -pub const DT_MIPS_PLTGOT: u32 = 0x7000_0032; -/// The base of the PLT in an executable using the new non-PIC ABI if that PLT is writable. For a non-writable PLT, this is omitted or has a zero value. -pub const DT_MIPS_RWPLT: u32 = 0x7000_0034; -/// An alternative description of the classic MIPS RLD_MAP that is usable in a PIE as it stores a relative offset from the address of the tag rather than an absolute address. -pub const DT_MIPS_RLD_MAP_REL: u32 = 0x7000_0035; - -// Values for `DT_MIPS_FLAGS` `Dyn32` entry. - -/// No flags -pub const RHF_NONE: u32 = 0; -/// Use quickstart -pub const RHF_QUICKSTART: u32 = 1 << 0; -/// Hash size not power of 2 -pub const RHF_NOTPOT: u32 = 1 << 1; -/// Ignore LD_LIBRARY_PATH -pub const RHF_NO_LIBRARY_REPLACEMENT: u32 = 1 << 2; -pub const RHF_NO_MOVE: u32 = 1 << 3; -pub const RHF_SGI_ONLY: u32 = 1 << 4; -pub const RHF_GUARANTEE_INIT: u32 = 1 << 5; -pub const RHF_DELTA_C_PLUS_PLUS: u32 = 1 << 6; -pub const RHF_GUARANTEE_START_INIT: u32 = 1 << 7; -pub const RHF_PIXIE: u32 = 1 << 8; -pub const RHF_DEFAULT_DELAY_LOAD: u32 = 1 << 9; -pub const RHF_REQUICKSTART: u32 = 1 << 10; -pub const RHF_REQUICKSTARTED: u32 = 1 << 11; -pub const RHF_CORD: u32 = 1 << 12; -pub const RHF_NO_UNRES_UNDEF: u32 = 1 << 13; -pub const RHF_RLD_ORDER_SAFE: u32 = 1 << 14; - -// Entries found in sections of type `SHT_MIPS_LIBLIST`. - -// TODO: Elf32_Lib, Elf64_Lib - -// Values for `Lib*::l_flags`. - -pub const LL_NONE: u32 = 0; -/// Require exact match -pub const LL_EXACT_MATCH: u32 = 1 << 0; -/// Ignore interface version -pub const LL_IGNORE_INT_VER: u32 = 1 << 1; -pub const LL_REQUIRE_MINOR: u32 = 1 << 2; -pub const LL_EXPORTS: u32 = 1 << 3; -pub const LL_DELAY_LOAD: u32 = 1 << 4; -pub const LL_DELTA: u32 = 1 << 5; - -// TODO: MIPS ABI flags - -// PA-RISC specific definitions. - -// PA-RISC values for `FileHeader32::e_flags`. - -/// Trap nil pointer dereference. -pub const EF_PARISC_TRAPNIL: u32 = 0x0001_0000; -/// Program uses arch. extensions. -pub const EF_PARISC_EXT: u32 = 0x0002_0000; -/// Program expects little endian. -pub const EF_PARISC_LSB: u32 = 0x0004_0000; -/// Program expects wide mode. -pub const EF_PARISC_WIDE: u32 = 0x0008_0000; -/// No kernel assisted branch prediction. -pub const EF_PARISC_NO_KABP: u32 = 0x0010_0000; -/// Allow lazy swapping. -pub const EF_PARISC_LAZYSWAP: u32 = 0x0040_0000; -/// Architecture version. -pub const EF_PARISC_ARCH: u32 = 0x0000_ffff; - -// Values for `EF_PARISC_ARCH'. - -/// PA-RISC 1.0 big-endian. -pub const EFA_PARISC_1_0: u32 = 0x020b; -/// PA-RISC 1.1 big-endian. -pub const EFA_PARISC_1_1: u32 = 0x0210; -/// PA-RISC 2.0 big-endian. -pub const EFA_PARISC_2_0: u32 = 0x0214; - -// PA-RISC values for `Sym*::st_shndx`. - -/// Section for tentatively declared symbols in ANSI C. -pub const SHN_PARISC_ANSI_COMMON: u16 = 0xff00; -/// Common blocks in huge model. -pub const SHN_PARISC_HUGE_COMMON: u16 = 0xff01; - -// PA-RISC values for `SectionHeader32::sh_type`. - -/// Contains product specific ext. -pub const SHT_PARISC_EXT: u32 = 0x7000_0000; -/// Unwind information. -pub const SHT_PARISC_UNWIND: u32 = 0x7000_0001; -/// Debug info for optimized code. -pub const SHT_PARISC_DOC: u32 = 0x7000_0002; - -// PA-RISC values for `SectionHeader32::sh_flags`. - -/// Section with short addressing. -pub const SHF_PARISC_SHORT: u32 = 0x2000_0000; -/// Section far from gp. -pub const SHF_PARISC_HUGE: u32 = 0x4000_0000; -/// Static branch prediction code. -pub const SHF_PARISC_SBP: u32 = 0x8000_0000; - -// PA-RISC values for `st_type` component of `Sym32::st_info`. - -/// Millicode function entry point. -pub const STT_PARISC_MILLICODE: u8 = 13; - -pub const STT_HP_OPAQUE: u8 = STT_LOOS + 0x1; -pub const STT_HP_STUB: u8 = STT_LOOS + 0x2; - -// PA-RISC values for `Rel*::r_type`. - -/// No reloc. -pub const R_PARISC_NONE: u32 = 0; -/// Direct 32-bit reference. -pub const R_PARISC_DIR32: u32 = 1; -/// Left 21 bits of eff. address. -pub const R_PARISC_DIR21L: u32 = 2; -/// Right 17 bits of eff. address. -pub const R_PARISC_DIR17R: u32 = 3; -/// 17 bits of eff. address. -pub const R_PARISC_DIR17F: u32 = 4; -/// Right 14 bits of eff. address. -pub const R_PARISC_DIR14R: u32 = 6; -/// 32-bit rel. address. -pub const R_PARISC_PCREL32: u32 = 9; -/// Left 21 bits of rel. address. -pub const R_PARISC_PCREL21L: u32 = 10; -/// Right 17 bits of rel. address. -pub const R_PARISC_PCREL17R: u32 = 11; -/// 17 bits of rel. address. -pub const R_PARISC_PCREL17F: u32 = 12; -/// Right 14 bits of rel. address. -pub const R_PARISC_PCREL14R: u32 = 14; -/// Left 21 bits of rel. address. -pub const R_PARISC_DPREL21L: u32 = 18; -/// Right 14 bits of rel. address. -pub const R_PARISC_DPREL14R: u32 = 22; -/// GP-relative, left 21 bits. -pub const R_PARISC_GPREL21L: u32 = 26; -/// GP-relative, right 14 bits. -pub const R_PARISC_GPREL14R: u32 = 30; -/// LT-relative, left 21 bits. -pub const R_PARISC_LTOFF21L: u32 = 34; -/// LT-relative, right 14 bits. -pub const R_PARISC_LTOFF14R: u32 = 38; -/// 32 bits section rel. address. -pub const R_PARISC_SECREL32: u32 = 41; -/// No relocation, set segment base. -pub const R_PARISC_SEGBASE: u32 = 48; -/// 32 bits segment rel. address. -pub const R_PARISC_SEGREL32: u32 = 49; -/// PLT rel. address, left 21 bits. -pub const R_PARISC_PLTOFF21L: u32 = 50; -/// PLT rel. address, right 14 bits. -pub const R_PARISC_PLTOFF14R: u32 = 54; -/// 32 bits LT-rel. function pointer. -pub const R_PARISC_LTOFF_FPTR32: u32 = 57; -/// LT-rel. fct ptr, left 21 bits. -pub const R_PARISC_LTOFF_FPTR21L: u32 = 58; -/// LT-rel. fct ptr, right 14 bits. -pub const R_PARISC_LTOFF_FPTR14R: u32 = 62; -/// 64 bits function address. -pub const R_PARISC_FPTR64: u32 = 64; -/// 32 bits function address. -pub const R_PARISC_PLABEL32: u32 = 65; -/// Left 21 bits of fdesc address. -pub const R_PARISC_PLABEL21L: u32 = 66; -/// Right 14 bits of fdesc address. -pub const R_PARISC_PLABEL14R: u32 = 70; -/// 64 bits PC-rel. address. -pub const R_PARISC_PCREL64: u32 = 72; -/// 22 bits PC-rel. address. -pub const R_PARISC_PCREL22F: u32 = 74; -/// PC-rel. address, right 14 bits. -pub const R_PARISC_PCREL14WR: u32 = 75; -/// PC rel. address, right 14 bits. -pub const R_PARISC_PCREL14DR: u32 = 76; -/// 16 bits PC-rel. address. -pub const R_PARISC_PCREL16F: u32 = 77; -/// 16 bits PC-rel. address. -pub const R_PARISC_PCREL16WF: u32 = 78; -/// 16 bits PC-rel. address. -pub const R_PARISC_PCREL16DF: u32 = 79; -/// 64 bits of eff. address. -pub const R_PARISC_DIR64: u32 = 80; -/// 14 bits of eff. address. -pub const R_PARISC_DIR14WR: u32 = 83; -/// 14 bits of eff. address. -pub const R_PARISC_DIR14DR: u32 = 84; -/// 16 bits of eff. address. -pub const R_PARISC_DIR16F: u32 = 85; -/// 16 bits of eff. address. -pub const R_PARISC_DIR16WF: u32 = 86; -/// 16 bits of eff. address. -pub const R_PARISC_DIR16DF: u32 = 87; -/// 64 bits of GP-rel. address. -pub const R_PARISC_GPREL64: u32 = 88; -/// GP-rel. address, right 14 bits. -pub const R_PARISC_GPREL14WR: u32 = 91; -/// GP-rel. address, right 14 bits. -pub const R_PARISC_GPREL14DR: u32 = 92; -/// 16 bits GP-rel. address. -pub const R_PARISC_GPREL16F: u32 = 93; -/// 16 bits GP-rel. address. -pub const R_PARISC_GPREL16WF: u32 = 94; -/// 16 bits GP-rel. address. -pub const R_PARISC_GPREL16DF: u32 = 95; -/// 64 bits LT-rel. address. -pub const R_PARISC_LTOFF64: u32 = 96; -/// LT-rel. address, right 14 bits. -pub const R_PARISC_LTOFF14WR: u32 = 99; -/// LT-rel. address, right 14 bits. -pub const R_PARISC_LTOFF14DR: u32 = 100; -/// 16 bits LT-rel. address. -pub const R_PARISC_LTOFF16F: u32 = 101; -/// 16 bits LT-rel. address. -pub const R_PARISC_LTOFF16WF: u32 = 102; -/// 16 bits LT-rel. address. -pub const R_PARISC_LTOFF16DF: u32 = 103; -/// 64 bits section rel. address. -pub const R_PARISC_SECREL64: u32 = 104; -/// 64 bits segment rel. address. -pub const R_PARISC_SEGREL64: u32 = 112; -/// PLT-rel. address, right 14 bits. -pub const R_PARISC_PLTOFF14WR: u32 = 115; -/// PLT-rel. address, right 14 bits. -pub const R_PARISC_PLTOFF14DR: u32 = 116; -/// 16 bits LT-rel. address. -pub const R_PARISC_PLTOFF16F: u32 = 117; -/// 16 bits PLT-rel. address. -pub const R_PARISC_PLTOFF16WF: u32 = 118; -/// 16 bits PLT-rel. address. -pub const R_PARISC_PLTOFF16DF: u32 = 119; -/// 64 bits LT-rel. function ptr. -pub const R_PARISC_LTOFF_FPTR64: u32 = 120; -/// LT-rel. fct. ptr., right 14 bits. -pub const R_PARISC_LTOFF_FPTR14WR: u32 = 123; -/// LT-rel. fct. ptr., right 14 bits. -pub const R_PARISC_LTOFF_FPTR14DR: u32 = 124; -/// 16 bits LT-rel. function ptr. -pub const R_PARISC_LTOFF_FPTR16F: u32 = 125; -/// 16 bits LT-rel. function ptr. -pub const R_PARISC_LTOFF_FPTR16WF: u32 = 126; -/// 16 bits LT-rel. function ptr. -pub const R_PARISC_LTOFF_FPTR16DF: u32 = 127; -pub const R_PARISC_LORESERVE: u32 = 128; -/// Copy relocation. -pub const R_PARISC_COPY: u32 = 128; -/// Dynamic reloc, imported PLT -pub const R_PARISC_IPLT: u32 = 129; -/// Dynamic reloc, exported PLT -pub const R_PARISC_EPLT: u32 = 130; -/// 32 bits TP-rel. address. -pub const R_PARISC_TPREL32: u32 = 153; -/// TP-rel. address, left 21 bits. -pub const R_PARISC_TPREL21L: u32 = 154; -/// TP-rel. address, right 14 bits. -pub const R_PARISC_TPREL14R: u32 = 158; -/// LT-TP-rel. address, left 21 bits. -pub const R_PARISC_LTOFF_TP21L: u32 = 162; -/// LT-TP-rel. address, right 14 bits. -pub const R_PARISC_LTOFF_TP14R: u32 = 166; -/// 14 bits LT-TP-rel. address. -pub const R_PARISC_LTOFF_TP14F: u32 = 167; -/// 64 bits TP-rel. address. -pub const R_PARISC_TPREL64: u32 = 216; -/// TP-rel. address, right 14 bits. -pub const R_PARISC_TPREL14WR: u32 = 219; -/// TP-rel. address, right 14 bits. -pub const R_PARISC_TPREL14DR: u32 = 220; -/// 16 bits TP-rel. address. -pub const R_PARISC_TPREL16F: u32 = 221; -/// 16 bits TP-rel. address. -pub const R_PARISC_TPREL16WF: u32 = 222; -/// 16 bits TP-rel. address. -pub const R_PARISC_TPREL16DF: u32 = 223; -/// 64 bits LT-TP-rel. address. -pub const R_PARISC_LTOFF_TP64: u32 = 224; -/// LT-TP-rel. address, right 14 bits. -pub const R_PARISC_LTOFF_TP14WR: u32 = 227; -/// LT-TP-rel. address, right 14 bits. -pub const R_PARISC_LTOFF_TP14DR: u32 = 228; -/// 16 bits LT-TP-rel. address. -pub const R_PARISC_LTOFF_TP16F: u32 = 229; -/// 16 bits LT-TP-rel. address. -pub const R_PARISC_LTOFF_TP16WF: u32 = 230; -/// 16 bits LT-TP-rel. address. -pub const R_PARISC_LTOFF_TP16DF: u32 = 231; -pub const R_PARISC_GNU_VTENTRY: u32 = 232; -pub const R_PARISC_GNU_VTINHERIT: u32 = 233; -/// GD 21-bit left. -pub const R_PARISC_TLS_GD21L: u32 = 234; -/// GD 14-bit right. -pub const R_PARISC_TLS_GD14R: u32 = 235; -/// GD call to __t_g_a. -pub const R_PARISC_TLS_GDCALL: u32 = 236; -/// LD module 21-bit left. -pub const R_PARISC_TLS_LDM21L: u32 = 237; -/// LD module 14-bit right. -pub const R_PARISC_TLS_LDM14R: u32 = 238; -/// LD module call to __t_g_a. -pub const R_PARISC_TLS_LDMCALL: u32 = 239; -/// LD offset 21-bit left. -pub const R_PARISC_TLS_LDO21L: u32 = 240; -/// LD offset 14-bit right. -pub const R_PARISC_TLS_LDO14R: u32 = 241; -/// DTP module 32-bit. -pub const R_PARISC_TLS_DTPMOD32: u32 = 242; -/// DTP module 64-bit. -pub const R_PARISC_TLS_DTPMOD64: u32 = 243; -/// DTP offset 32-bit. -pub const R_PARISC_TLS_DTPOFF32: u32 = 244; -/// DTP offset 32-bit. -pub const R_PARISC_TLS_DTPOFF64: u32 = 245; -pub const R_PARISC_TLS_LE21L: u32 = R_PARISC_TPREL21L; -pub const R_PARISC_TLS_LE14R: u32 = R_PARISC_TPREL14R; -pub const R_PARISC_TLS_IE21L: u32 = R_PARISC_LTOFF_TP21L; -pub const R_PARISC_TLS_IE14R: u32 = R_PARISC_LTOFF_TP14R; -pub const R_PARISC_TLS_TPREL32: u32 = R_PARISC_TPREL32; -pub const R_PARISC_TLS_TPREL64: u32 = R_PARISC_TPREL64; -pub const R_PARISC_HIRESERVE: u32 = 255; - -// PA-RISC values for `ProgramHeader*::p_type`. - -pub const PT_HP_TLS: u32 = PT_LOOS + 0x0; -pub const PT_HP_CORE_NONE: u32 = PT_LOOS + 0x1; -pub const PT_HP_CORE_VERSION: u32 = PT_LOOS + 0x2; -pub const PT_HP_CORE_KERNEL: u32 = PT_LOOS + 0x3; -pub const PT_HP_CORE_COMM: u32 = PT_LOOS + 0x4; -pub const PT_HP_CORE_PROC: u32 = PT_LOOS + 0x5; -pub const PT_HP_CORE_LOADABLE: u32 = PT_LOOS + 0x6; -pub const PT_HP_CORE_STACK: u32 = PT_LOOS + 0x7; -pub const PT_HP_CORE_SHM: u32 = PT_LOOS + 0x8; -pub const PT_HP_CORE_MMF: u32 = PT_LOOS + 0x9; -pub const PT_HP_PARALLEL: u32 = PT_LOOS + 0x10; -pub const PT_HP_FASTBIND: u32 = PT_LOOS + 0x11; -pub const PT_HP_OPT_ANNOT: u32 = PT_LOOS + 0x12; -pub const PT_HP_HSL_ANNOT: u32 = PT_LOOS + 0x13; -pub const PT_HP_STACK: u32 = PT_LOOS + 0x14; - -pub const PT_PARISC_ARCHEXT: u32 = 0x7000_0000; -pub const PT_PARISC_UNWIND: u32 = 0x7000_0001; - -// PA-RISC values for `ProgramHeader*::p_flags`. - -pub const PF_PARISC_SBP: u32 = 0x0800_0000; - -pub const PF_HP_PAGE_SIZE: u32 = 0x0010_0000; -pub const PF_HP_FAR_SHARED: u32 = 0x0020_0000; -pub const PF_HP_NEAR_SHARED: u32 = 0x0040_0000; -pub const PF_HP_CODE: u32 = 0x0100_0000; -pub const PF_HP_MODIFY: u32 = 0x0200_0000; -pub const PF_HP_LAZYSWAP: u32 = 0x0400_0000; -pub const PF_HP_SBP: u32 = 0x0800_0000; - -// Alpha specific definitions. - -// Alpha values for `FileHeader64::e_flags`. - -/// All addresses must be < 2GB. -pub const EF_ALPHA_32BIT: u32 = 1; -/// Relocations for relaxing exist. -pub const EF_ALPHA_CANRELAX: u32 = 2; - -// Alpha values for `SectionHeader64::sh_type`. - -// These two are primerily concerned with ECOFF debugging info. -pub const SHT_ALPHA_DEBUG: u32 = 0x7000_0001; -pub const SHT_ALPHA_REGINFO: u32 = 0x7000_0002; - -// Alpha values for `SectionHeader64::sh_flags`. - -pub const SHF_ALPHA_GPREL: u32 = 0x1000_0000; - -// Alpha values for `Sym64::st_other`. -/// No PV required. -pub const STO_ALPHA_NOPV: u8 = 0x80; -/// PV only used for initial ldgp. -pub const STO_ALPHA_STD_GPLOAD: u8 = 0x88; - -// Alpha values for `Rel64::r_type`. - -/// No reloc -pub const R_ALPHA_NONE: u32 = 0; -/// Direct 32 bit -pub const R_ALPHA_REFLONG: u32 = 1; -/// Direct 64 bit -pub const R_ALPHA_REFQUAD: u32 = 2; -/// GP relative 32 bit -pub const R_ALPHA_GPREL32: u32 = 3; -/// GP relative 16 bit w/optimization -pub const R_ALPHA_LITERAL: u32 = 4; -/// Optimization hint for LITERAL -pub const R_ALPHA_LITUSE: u32 = 5; -/// Add displacement to GP -pub const R_ALPHA_GPDISP: u32 = 6; -/// PC+4 relative 23 bit shifted -pub const R_ALPHA_BRADDR: u32 = 7; -/// PC+4 relative 16 bit shifted -pub const R_ALPHA_HINT: u32 = 8; -/// PC relative 16 bit -pub const R_ALPHA_SREL16: u32 = 9; -/// PC relative 32 bit -pub const R_ALPHA_SREL32: u32 = 10; -/// PC relative 64 bit -pub const R_ALPHA_SREL64: u32 = 11; -/// GP relative 32 bit, high 16 bits -pub const R_ALPHA_GPRELHIGH: u32 = 17; -/// GP relative 32 bit, low 16 bits -pub const R_ALPHA_GPRELLOW: u32 = 18; -/// GP relative 16 bit -pub const R_ALPHA_GPREL16: u32 = 19; -/// Copy symbol at runtime -pub const R_ALPHA_COPY: u32 = 24; -/// Create GOT entry -pub const R_ALPHA_GLOB_DAT: u32 = 25; -/// Create PLT entry -pub const R_ALPHA_JMP_SLOT: u32 = 26; -/// Adjust by program base -pub const R_ALPHA_RELATIVE: u32 = 27; -pub const R_ALPHA_TLS_GD_HI: u32 = 28; -pub const R_ALPHA_TLSGD: u32 = 29; -pub const R_ALPHA_TLS_LDM: u32 = 30; -pub const R_ALPHA_DTPMOD64: u32 = 31; -pub const R_ALPHA_GOTDTPREL: u32 = 32; -pub const R_ALPHA_DTPREL64: u32 = 33; -pub const R_ALPHA_DTPRELHI: u32 = 34; -pub const R_ALPHA_DTPRELLO: u32 = 35; -pub const R_ALPHA_DTPREL16: u32 = 36; -pub const R_ALPHA_GOTTPREL: u32 = 37; -pub const R_ALPHA_TPREL64: u32 = 38; -pub const R_ALPHA_TPRELHI: u32 = 39; -pub const R_ALPHA_TPRELLO: u32 = 40; -pub const R_ALPHA_TPREL16: u32 = 41; - -// Magic values of the `R_ALPHA_LITUSE` relocation addend. -pub const LITUSE_ALPHA_ADDR: u32 = 0; -pub const LITUSE_ALPHA_BASE: u32 = 1; -pub const LITUSE_ALPHA_BYTOFF: u32 = 2; -pub const LITUSE_ALPHA_JSR: u32 = 3; -pub const LITUSE_ALPHA_TLS_GD: u32 = 4; -pub const LITUSE_ALPHA_TLS_LDM: u32 = 5; - -// Alpha values for `Dyn64::d_tag`. -pub const DT_ALPHA_PLTRO: u32 = DT_LOPROC + 0; - -// PowerPC specific declarations. - -// PowerPC values for `FileHeader*::e_flags`. -/// PowerPC embedded flag -pub const EF_PPC_EMB: u32 = 0x8000_0000; - -// Cygnus local bits below . -/// PowerPC -mrelocatable flag -pub const EF_PPC_RELOCATABLE: u32 = 0x0001_0000; -/// PowerPC -mrelocatable-lib flag -pub const EF_PPC_RELOCATABLE_LIB: u32 = 0x0000_8000; - -// PowerPC values for `Rel*::r_type` defined by the ABIs. -pub const R_PPC_NONE: u32 = 0; -/// 32bit absolute address -pub const R_PPC_ADDR32: u32 = 1; -/// 26bit address, 2 bits ignored. -pub const R_PPC_ADDR24: u32 = 2; -/// 16bit absolute address -pub const R_PPC_ADDR16: u32 = 3; -/// lower 16bit of absolute address -pub const R_PPC_ADDR16_LO: u32 = 4; -/// high 16bit of absolute address -pub const R_PPC_ADDR16_HI: u32 = 5; -/// adjusted high 16bit -pub const R_PPC_ADDR16_HA: u32 = 6; -/// 16bit address, 2 bits ignored -pub const R_PPC_ADDR14: u32 = 7; -pub const R_PPC_ADDR14_BRTAKEN: u32 = 8; -pub const R_PPC_ADDR14_BRNTAKEN: u32 = 9; -/// PC relative 26 bit -pub const R_PPC_REL24: u32 = 10; -/// PC relative 16 bit -pub const R_PPC_REL14: u32 = 11; -pub const R_PPC_REL14_BRTAKEN: u32 = 12; -pub const R_PPC_REL14_BRNTAKEN: u32 = 13; -pub const R_PPC_GOT16: u32 = 14; -pub const R_PPC_GOT16_LO: u32 = 15; -pub const R_PPC_GOT16_HI: u32 = 16; -pub const R_PPC_GOT16_HA: u32 = 17; -pub const R_PPC_PLTREL24: u32 = 18; -pub const R_PPC_COPY: u32 = 19; -pub const R_PPC_GLOB_DAT: u32 = 20; -pub const R_PPC_JMP_SLOT: u32 = 21; -pub const R_PPC_RELATIVE: u32 = 22; -pub const R_PPC_LOCAL24PC: u32 = 23; -pub const R_PPC_UADDR32: u32 = 24; -pub const R_PPC_UADDR16: u32 = 25; -pub const R_PPC_REL32: u32 = 26; -pub const R_PPC_PLT32: u32 = 27; -pub const R_PPC_PLTREL32: u32 = 28; -pub const R_PPC_PLT16_LO: u32 = 29; -pub const R_PPC_PLT16_HI: u32 = 30; -pub const R_PPC_PLT16_HA: u32 = 31; -pub const R_PPC_SDAREL16: u32 = 32; -pub const R_PPC_SECTOFF: u32 = 33; -pub const R_PPC_SECTOFF_LO: u32 = 34; -pub const R_PPC_SECTOFF_HI: u32 = 35; -pub const R_PPC_SECTOFF_HA: u32 = 36; - -// PowerPC values for `Rel*::r_type` defined for the TLS access ABI. -/// none (sym+add)@tls -pub const R_PPC_TLS: u32 = 67; -/// word32 (sym+add)@dtpmod -pub const R_PPC_DTPMOD32: u32 = 68; -/// half16* (sym+add)@tprel -pub const R_PPC_TPREL16: u32 = 69; -/// half16 (sym+add)@tprel@l -pub const R_PPC_TPREL16_LO: u32 = 70; -/// half16 (sym+add)@tprel@h -pub const R_PPC_TPREL16_HI: u32 = 71; -/// half16 (sym+add)@tprel@ha -pub const R_PPC_TPREL16_HA: u32 = 72; -/// word32 (sym+add)@tprel -pub const R_PPC_TPREL32: u32 = 73; -/// half16*(sym+add)@dtprel -pub const R_PPC_DTPREL16: u32 = 74; -/// half16 (sym+add)@dtprel@l -pub const R_PPC_DTPREL16_LO: u32 = 75; -/// half16 (sym+add)@dtprel@h -pub const R_PPC_DTPREL16_HI: u32 = 76; -/// half16 (sym+add)@dtprel@ha -pub const R_PPC_DTPREL16_HA: u32 = 77; -/// word32 (sym+add)@dtprel -pub const R_PPC_DTPREL32: u32 = 78; -/// half16* (sym+add)@got@tlsgd -pub const R_PPC_GOT_TLSGD16: u32 = 79; -/// half16 (sym+add)@got@tlsgd@l -pub const R_PPC_GOT_TLSGD16_LO: u32 = 80; -/// half16 (sym+add)@got@tlsgd@h -pub const R_PPC_GOT_TLSGD16_HI: u32 = 81; -/// half16 (sym+add)@got@tlsgd@ha -pub const R_PPC_GOT_TLSGD16_HA: u32 = 82; -/// half16* (sym+add)@got@tlsld -pub const R_PPC_GOT_TLSLD16: u32 = 83; -/// half16 (sym+add)@got@tlsld@l -pub const R_PPC_GOT_TLSLD16_LO: u32 = 84; -/// half16 (sym+add)@got@tlsld@h -pub const R_PPC_GOT_TLSLD16_HI: u32 = 85; -/// half16 (sym+add)@got@tlsld@ha -pub const R_PPC_GOT_TLSLD16_HA: u32 = 86; -/// half16* (sym+add)@got@tprel -pub const R_PPC_GOT_TPREL16: u32 = 87; -/// half16 (sym+add)@got@tprel@l -pub const R_PPC_GOT_TPREL16_LO: u32 = 88; -/// half16 (sym+add)@got@tprel@h -pub const R_PPC_GOT_TPREL16_HI: u32 = 89; -/// half16 (sym+add)@got@tprel@ha -pub const R_PPC_GOT_TPREL16_HA: u32 = 90; -/// half16* (sym+add)@got@dtprel -pub const R_PPC_GOT_DTPREL16: u32 = 91; -/// half16* (sym+add)@got@dtprel@l -pub const R_PPC_GOT_DTPREL16_LO: u32 = 92; -/// half16* (sym+add)@got@dtprel@h -pub const R_PPC_GOT_DTPREL16_HI: u32 = 93; -/// half16* (sym+add)@got@dtprel@ha -pub const R_PPC_GOT_DTPREL16_HA: u32 = 94; -/// none (sym+add)@tlsgd -pub const R_PPC_TLSGD: u32 = 95; -/// none (sym+add)@tlsld -pub const R_PPC_TLSLD: u32 = 96; - -// PowerPC values for `Rel*::r_type` from the Embedded ELF ABI. -pub const R_PPC_EMB_NADDR32: u32 = 101; -pub const R_PPC_EMB_NADDR16: u32 = 102; -pub const R_PPC_EMB_NADDR16_LO: u32 = 103; -pub const R_PPC_EMB_NADDR16_HI: u32 = 104; -pub const R_PPC_EMB_NADDR16_HA: u32 = 105; -pub const R_PPC_EMB_SDAI16: u32 = 106; -pub const R_PPC_EMB_SDA2I16: u32 = 107; -pub const R_PPC_EMB_SDA2REL: u32 = 108; -/// 16 bit offset in SDA -pub const R_PPC_EMB_SDA21: u32 = 109; -pub const R_PPC_EMB_MRKREF: u32 = 110; -pub const R_PPC_EMB_RELSEC16: u32 = 111; -pub const R_PPC_EMB_RELST_LO: u32 = 112; -pub const R_PPC_EMB_RELST_HI: u32 = 113; -pub const R_PPC_EMB_RELST_HA: u32 = 114; -pub const R_PPC_EMB_BIT_FLD: u32 = 115; -/// 16 bit relative offset in SDA -pub const R_PPC_EMB_RELSDA: u32 = 116; - -// Diab tool values for `Rel*::r_type`. -/// like EMB_SDA21, but lower 16 bit -pub const R_PPC_DIAB_SDA21_LO: u32 = 180; -/// like EMB_SDA21, but high 16 bit -pub const R_PPC_DIAB_SDA21_HI: u32 = 181; -/// like EMB_SDA21, adjusted high 16 -pub const R_PPC_DIAB_SDA21_HA: u32 = 182; -/// like EMB_RELSDA, but lower 16 bit -pub const R_PPC_DIAB_RELSDA_LO: u32 = 183; -/// like EMB_RELSDA, but high 16 bit -pub const R_PPC_DIAB_RELSDA_HI: u32 = 184; -/// like EMB_RELSDA, adjusted high 16 -pub const R_PPC_DIAB_RELSDA_HA: u32 = 185; - -/// GNU extension to support local ifunc. -pub const R_PPC_IRELATIVE: u32 = 248; - -// GNU relocs used in PIC code sequences. -/// half16 (sym+add-.) -pub const R_PPC_REL16: u32 = 249; -/// half16 (sym+add-.)@l -pub const R_PPC_REL16_LO: u32 = 250; -/// half16 (sym+add-.)@h -pub const R_PPC_REL16_HI: u32 = 251; -/// half16 (sym+add-.)@ha -pub const R_PPC_REL16_HA: u32 = 252; - -/// This is a phony reloc to handle any old fashioned TOC16 references that may -/// still be in object files. -pub const R_PPC_TOC16: u32 = 255; - -// PowerPC specific values for `Dyn*::d_tag`. -pub const DT_PPC_GOT: u32 = DT_LOPROC + 0; -pub const DT_PPC_OPT: u32 = DT_LOPROC + 1; - -// PowerPC specific values for the `DT_PPC_OPT` entry. -pub const PPC_OPT_TLS: u32 = 1; - -// PowerPC64 values for `Rel*::r_type` defined by the ABIs. -pub const R_PPC64_NONE: u32 = R_PPC_NONE; -/// 32bit absolute address -pub const R_PPC64_ADDR32: u32 = R_PPC_ADDR32; -/// 26bit address, word aligned -pub const R_PPC64_ADDR24: u32 = R_PPC_ADDR24; -/// 16bit absolute address -pub const R_PPC64_ADDR16: u32 = R_PPC_ADDR16; -/// lower 16bits of address -pub const R_PPC64_ADDR16_LO: u32 = R_PPC_ADDR16_LO; -/// high 16bits of address. -pub const R_PPC64_ADDR16_HI: u32 = R_PPC_ADDR16_HI; -/// adjusted high 16bits. -pub const R_PPC64_ADDR16_HA: u32 = R_PPC_ADDR16_HA; -/// 16bit address, word aligned -pub const R_PPC64_ADDR14: u32 = R_PPC_ADDR14; -pub const R_PPC64_ADDR14_BRTAKEN: u32 = R_PPC_ADDR14_BRTAKEN; -pub const R_PPC64_ADDR14_BRNTAKEN: u32 = R_PPC_ADDR14_BRNTAKEN; -/// PC-rel. 26 bit, word aligned -pub const R_PPC64_REL24: u32 = R_PPC_REL24; -/// PC relative 16 bit -pub const R_PPC64_REL14: u32 = R_PPC_REL14; -pub const R_PPC64_REL14_BRTAKEN: u32 = R_PPC_REL14_BRTAKEN; -pub const R_PPC64_REL14_BRNTAKEN: u32 = R_PPC_REL14_BRNTAKEN; -pub const R_PPC64_GOT16: u32 = R_PPC_GOT16; -pub const R_PPC64_GOT16_LO: u32 = R_PPC_GOT16_LO; -pub const R_PPC64_GOT16_HI: u32 = R_PPC_GOT16_HI; -pub const R_PPC64_GOT16_HA: u32 = R_PPC_GOT16_HA; - -pub const R_PPC64_COPY: u32 = R_PPC_COPY; -pub const R_PPC64_GLOB_DAT: u32 = R_PPC_GLOB_DAT; -pub const R_PPC64_JMP_SLOT: u32 = R_PPC_JMP_SLOT; -pub const R_PPC64_RELATIVE: u32 = R_PPC_RELATIVE; - -pub const R_PPC64_UADDR32: u32 = R_PPC_UADDR32; -pub const R_PPC64_UADDR16: u32 = R_PPC_UADDR16; -pub const R_PPC64_REL32: u32 = R_PPC_REL32; -pub const R_PPC64_PLT32: u32 = R_PPC_PLT32; -pub const R_PPC64_PLTREL32: u32 = R_PPC_PLTREL32; -pub const R_PPC64_PLT16_LO: u32 = R_PPC_PLT16_LO; -pub const R_PPC64_PLT16_HI: u32 = R_PPC_PLT16_HI; -pub const R_PPC64_PLT16_HA: u32 = R_PPC_PLT16_HA; - -pub const R_PPC64_SECTOFF: u32 = R_PPC_SECTOFF; -pub const R_PPC64_SECTOFF_LO: u32 = R_PPC_SECTOFF_LO; -pub const R_PPC64_SECTOFF_HI: u32 = R_PPC_SECTOFF_HI; -pub const R_PPC64_SECTOFF_HA: u32 = R_PPC_SECTOFF_HA; -/// word30 (S + A - P) >> 2 -pub const R_PPC64_ADDR30: u32 = 37; -/// doubleword64 S + A -pub const R_PPC64_ADDR64: u32 = 38; -/// half16 #higher(S + A) -pub const R_PPC64_ADDR16_HIGHER: u32 = 39; -/// half16 #highera(S + A) -pub const R_PPC64_ADDR16_HIGHERA: u32 = 40; -/// half16 #highest(S + A) -pub const R_PPC64_ADDR16_HIGHEST: u32 = 41; -/// half16 #highesta(S + A) -pub const R_PPC64_ADDR16_HIGHESTA: u32 = 42; -/// doubleword64 S + A -pub const R_PPC64_UADDR64: u32 = 43; -/// doubleword64 S + A - P -pub const R_PPC64_REL64: u32 = 44; -/// doubleword64 L + A -pub const R_PPC64_PLT64: u32 = 45; -/// doubleword64 L + A - P -pub const R_PPC64_PLTREL64: u32 = 46; -/// half16* S + A - .TOC -pub const R_PPC64_TOC16: u32 = 47; -/// half16 #lo(S + A - .TOC.) -pub const R_PPC64_TOC16_LO: u32 = 48; -/// half16 #hi(S + A - .TOC.) -pub const R_PPC64_TOC16_HI: u32 = 49; -/// half16 #ha(S + A - .TOC.) -pub const R_PPC64_TOC16_HA: u32 = 50; -/// doubleword64 .TOC -pub const R_PPC64_TOC: u32 = 51; -/// half16* M + A -pub const R_PPC64_PLTGOT16: u32 = 52; -/// half16 #lo(M + A) -pub const R_PPC64_PLTGOT16_LO: u32 = 53; -/// half16 #hi(M + A) -pub const R_PPC64_PLTGOT16_HI: u32 = 54; -/// half16 #ha(M + A) -pub const R_PPC64_PLTGOT16_HA: u32 = 55; - -/// half16ds* (S + A) >> 2 -pub const R_PPC64_ADDR16_DS: u32 = 56; -/// half16ds #lo(S + A) >> 2 -pub const R_PPC64_ADDR16_LO_DS: u32 = 57; -/// half16ds* (G + A) >> 2 -pub const R_PPC64_GOT16_DS: u32 = 58; -/// half16ds #lo(G + A) >> 2 -pub const R_PPC64_GOT16_LO_DS: u32 = 59; -/// half16ds #lo(L + A) >> 2 -pub const R_PPC64_PLT16_LO_DS: u32 = 60; -/// half16ds* (R + A) >> 2 -pub const R_PPC64_SECTOFF_DS: u32 = 61; -/// half16ds #lo(R + A) >> 2 -pub const R_PPC64_SECTOFF_LO_DS: u32 = 62; -/// half16ds* (S + A - .TOC.) >> 2 -pub const R_PPC64_TOC16_DS: u32 = 63; -/// half16ds #lo(S + A - .TOC.) >> 2 -pub const R_PPC64_TOC16_LO_DS: u32 = 64; -/// half16ds* (M + A) >> 2 -pub const R_PPC64_PLTGOT16_DS: u32 = 65; -/// half16ds #lo(M + A) >> 2 -pub const R_PPC64_PLTGOT16_LO_DS: u32 = 66; - -// PowerPC64 values for `Rel*::r_type` defined for the TLS access ABI. -/// none (sym+add)@tls -pub const R_PPC64_TLS: u32 = 67; -/// doubleword64 (sym+add)@dtpmod -pub const R_PPC64_DTPMOD64: u32 = 68; -/// half16* (sym+add)@tprel -pub const R_PPC64_TPREL16: u32 = 69; -/// half16 (sym+add)@tprel@l -pub const R_PPC64_TPREL16_LO: u32 = 70; -/// half16 (sym+add)@tprel@h -pub const R_PPC64_TPREL16_HI: u32 = 71; -/// half16 (sym+add)@tprel@ha -pub const R_PPC64_TPREL16_HA: u32 = 72; -/// doubleword64 (sym+add)@tprel -pub const R_PPC64_TPREL64: u32 = 73; -/// half16* (sym+add)@dtprel -pub const R_PPC64_DTPREL16: u32 = 74; -/// half16 (sym+add)@dtprel@l -pub const R_PPC64_DTPREL16_LO: u32 = 75; -/// half16 (sym+add)@dtprel@h -pub const R_PPC64_DTPREL16_HI: u32 = 76; -/// half16 (sym+add)@dtprel@ha -pub const R_PPC64_DTPREL16_HA: u32 = 77; -/// doubleword64 (sym+add)@dtprel -pub const R_PPC64_DTPREL64: u32 = 78; -/// half16* (sym+add)@got@tlsgd -pub const R_PPC64_GOT_TLSGD16: u32 = 79; -/// half16 (sym+add)@got@tlsgd@l -pub const R_PPC64_GOT_TLSGD16_LO: u32 = 80; -/// half16 (sym+add)@got@tlsgd@h -pub const R_PPC64_GOT_TLSGD16_HI: u32 = 81; -/// half16 (sym+add)@got@tlsgd@ha -pub const R_PPC64_GOT_TLSGD16_HA: u32 = 82; -/// half16* (sym+add)@got@tlsld -pub const R_PPC64_GOT_TLSLD16: u32 = 83; -/// half16 (sym+add)@got@tlsld@l -pub const R_PPC64_GOT_TLSLD16_LO: u32 = 84; -/// half16 (sym+add)@got@tlsld@h -pub const R_PPC64_GOT_TLSLD16_HI: u32 = 85; -/// half16 (sym+add)@got@tlsld@ha -pub const R_PPC64_GOT_TLSLD16_HA: u32 = 86; -/// half16ds* (sym+add)@got@tprel -pub const R_PPC64_GOT_TPREL16_DS: u32 = 87; -/// half16ds (sym+add)@got@tprel@l -pub const R_PPC64_GOT_TPREL16_LO_DS: u32 = 88; -/// half16 (sym+add)@got@tprel@h -pub const R_PPC64_GOT_TPREL16_HI: u32 = 89; -/// half16 (sym+add)@got@tprel@ha -pub const R_PPC64_GOT_TPREL16_HA: u32 = 90; -/// half16ds* (sym+add)@got@dtprel -pub const R_PPC64_GOT_DTPREL16_DS: u32 = 91; -/// half16ds (sym+add)@got@dtprel@l -pub const R_PPC64_GOT_DTPREL16_LO_DS: u32 = 92; -/// half16 (sym+add)@got@dtprel@h -pub const R_PPC64_GOT_DTPREL16_HI: u32 = 93; -/// half16 (sym+add)@got@dtprel@ha -pub const R_PPC64_GOT_DTPREL16_HA: u32 = 94; -/// half16ds* (sym+add)@tprel -pub const R_PPC64_TPREL16_DS: u32 = 95; -/// half16ds (sym+add)@tprel@l -pub const R_PPC64_TPREL16_LO_DS: u32 = 96; -/// half16 (sym+add)@tprel@higher -pub const R_PPC64_TPREL16_HIGHER: u32 = 97; -/// half16 (sym+add)@tprel@highera -pub const R_PPC64_TPREL16_HIGHERA: u32 = 98; -/// half16 (sym+add)@tprel@highest -pub const R_PPC64_TPREL16_HIGHEST: u32 = 99; -/// half16 (sym+add)@tprel@highesta -pub const R_PPC64_TPREL16_HIGHESTA: u32 = 100; -/// half16ds* (sym+add)@dtprel -pub const R_PPC64_DTPREL16_DS: u32 = 101; -/// half16ds (sym+add)@dtprel@l -pub const R_PPC64_DTPREL16_LO_DS: u32 = 102; -/// half16 (sym+add)@dtprel@higher -pub const R_PPC64_DTPREL16_HIGHER: u32 = 103; -/// half16 (sym+add)@dtprel@highera -pub const R_PPC64_DTPREL16_HIGHERA: u32 = 104; -/// half16 (sym+add)@dtprel@highest -pub const R_PPC64_DTPREL16_HIGHEST: u32 = 105; -/// half16 (sym+add)@dtprel@highesta -pub const R_PPC64_DTPREL16_HIGHESTA: u32 = 106; -/// none (sym+add)@tlsgd -pub const R_PPC64_TLSGD: u32 = 107; -/// none (sym+add)@tlsld -pub const R_PPC64_TLSLD: u32 = 108; -/// none -pub const R_PPC64_TOCSAVE: u32 = 109; - -// Added when HA and HI relocs were changed to report overflows. -pub const R_PPC64_ADDR16_HIGH: u32 = 110; -pub const R_PPC64_ADDR16_HIGHA: u32 = 111; -pub const R_PPC64_TPREL16_HIGH: u32 = 112; -pub const R_PPC64_TPREL16_HIGHA: u32 = 113; -pub const R_PPC64_DTPREL16_HIGH: u32 = 114; -pub const R_PPC64_DTPREL16_HIGHA: u32 = 115; - -/// GNU extension to support local ifunc. -pub const R_PPC64_JMP_IREL: u32 = 247; -/// GNU extension to support local ifunc. -pub const R_PPC64_IRELATIVE: u32 = 248; -/// half16 (sym+add-.) -pub const R_PPC64_REL16: u32 = 249; -/// half16 (sym+add-.)@l -pub const R_PPC64_REL16_LO: u32 = 250; -/// half16 (sym+add-.)@h -pub const R_PPC64_REL16_HI: u32 = 251; -/// half16 (sym+add-.)@ha -pub const R_PPC64_REL16_HA: u32 = 252; - -// PowerPC64 values for `FileHeader64::e_flags. -/// PowerPC64 bits specifying ABI. -/// -/// 1 for original function descriptor using ABI, -/// 2 for revised ABI without function descriptors, -/// 0 for unspecified or not using any features affected by the differences. -pub const EF_PPC64_ABI: u32 = 3; - -// PowerPC64 values for `Dyn64::d_tag. -pub const DT_PPC64_GLINK: u32 = DT_LOPROC + 0; -pub const DT_PPC64_OPD: u32 = DT_LOPROC + 1; -pub const DT_PPC64_OPDSZ: u32 = DT_LOPROC + 2; -pub const DT_PPC64_OPT: u32 = DT_LOPROC + 3; - -// PowerPC64 bits for `DT_PPC64_OPT` entry. -pub const PPC64_OPT_TLS: u32 = 1; -pub const PPC64_OPT_MULTI_TOC: u32 = 2; -pub const PPC64_OPT_LOCALENTRY: u32 = 4; - -// PowerPC64 values for `Sym64::st_other. -pub const STO_PPC64_LOCAL_BIT: u8 = 5; -pub const STO_PPC64_LOCAL_MASK: u8 = 7 << STO_PPC64_LOCAL_BIT; - -// ARM specific declarations. - -// ARM values for `FileHeader*::e_flags`. -pub const EF_ARM_RELEXEC: u32 = 0x01; -pub const EF_ARM_HASENTRY: u32 = 0x02; -pub const EF_ARM_INTERWORK: u32 = 0x04; -pub const EF_ARM_APCS_26: u32 = 0x08; -pub const EF_ARM_APCS_FLOAT: u32 = 0x10; -pub const EF_ARM_PIC: u32 = 0x20; -/// 8-bit structure alignment is in use -pub const EF_ARM_ALIGN8: u32 = 0x40; -pub const EF_ARM_NEW_ABI: u32 = 0x80; -pub const EF_ARM_OLD_ABI: u32 = 0x100; -pub const EF_ARM_SOFT_FLOAT: u32 = 0x200; -pub const EF_ARM_VFP_FLOAT: u32 = 0x400; -pub const EF_ARM_MAVERICK_FLOAT: u32 = 0x800; - -/// NB conflicts with EF_ARM_SOFT_FLOAT -pub const EF_ARM_ABI_FLOAT_SOFT: u32 = 0x200; -/// NB conflicts with EF_ARM_VFP_FLOAT -pub const EF_ARM_ABI_FLOAT_HARD: u32 = 0x400; - -// Other constants defined in the ARM ELF spec. version B-01. -// NB. These conflict with values defined above. -pub const EF_ARM_SYMSARESORTED: u32 = 0x04; -pub const EF_ARM_DYNSYMSUSESEGIDX: u32 = 0x08; -pub const EF_ARM_MAPSYMSFIRST: u32 = 0x10; - -// Constants defined in AAELF. -pub const EF_ARM_BE8: u32 = 0x0080_0000; -pub const EF_ARM_LE8: u32 = 0x0040_0000; - -pub const EF_ARM_EABIMASK: u32 = 0xff00_0000; -pub const EF_ARM_EABI_UNKNOWN: u32 = 0x0000_0000; -pub const EF_ARM_EABI_VER1: u32 = 0x0100_0000; -pub const EF_ARM_EABI_VER2: u32 = 0x0200_0000; -pub const EF_ARM_EABI_VER3: u32 = 0x0300_0000; -pub const EF_ARM_EABI_VER4: u32 = 0x0400_0000; -pub const EF_ARM_EABI_VER5: u32 = 0x0500_0000; - -// ARM Thumb values for `st_type` component of `Sym*::st_info`. -/// A Thumb function. -pub const STT_ARM_TFUNC: u8 = STT_LOPROC; -/// A Thumb label. -pub const STT_ARM_16BIT: u8 = STT_HIPROC; - -// ARM values for `SectionHeader*::sh_flags`. -/// Section contains an entry point -pub const SHF_ARM_ENTRYSECT: u32 = 0x1000_0000; -/// Section may be multiply defined in the input to a link step. -pub const SHF_ARM_COMDEF: u32 = 0x8000_0000; - -// ARM values for `ProgramHeader*::p_flags`. -/// Segment contains the location addressed by the static base. -pub const PF_ARM_SB: u32 = 0x1000_0000; -/// Position-independent segment. -pub const PF_ARM_PI: u32 = 0x2000_0000; -/// Absolute segment. -pub const PF_ARM_ABS: u32 = 0x4000_0000; - -// ARM values for `ProgramHeader*::p_type`. -/// ARM unwind segment. -pub const PT_ARM_EXIDX: u32 = PT_LOPROC + 1; - -// ARM values for `SectionHeader*::sh_type`. -/// ARM unwind section. -pub const SHT_ARM_EXIDX: u32 = SHT_LOPROC + 1; -/// Preemption details. -pub const SHT_ARM_PREEMPTMAP: u32 = SHT_LOPROC + 2; -/// ARM attributes section. -pub const SHT_ARM_ATTRIBUTES: u32 = SHT_LOPROC + 3; - -// AArch64 values for `Rel*::r_type`. - -/// No relocation. -pub const R_AARCH64_NONE: u32 = 0; - -// ILP32 AArch64 relocs. -/// Direct 32 bit. -pub const R_AARCH64_P32_ABS32: u32 = 1; -/// Copy symbol at runtime. -pub const R_AARCH64_P32_COPY: u32 = 180; -/// Create GOT entry. -pub const R_AARCH64_P32_GLOB_DAT: u32 = 181; -/// Create PLT entry. -pub const R_AARCH64_P32_JUMP_SLOT: u32 = 182; -/// Adjust by program base. -pub const R_AARCH64_P32_RELATIVE: u32 = 183; -/// Module number, 32 bit. -pub const R_AARCH64_P32_TLS_DTPMOD: u32 = 184; -/// Module-relative offset, 32 bit. -pub const R_AARCH64_P32_TLS_DTPREL: u32 = 185; -/// TP-relative offset, 32 bit. -pub const R_AARCH64_P32_TLS_TPREL: u32 = 186; -/// TLS Descriptor. -pub const R_AARCH64_P32_TLSDESC: u32 = 187; -/// STT_GNU_IFUNC relocation. -pub const R_AARCH64_P32_IRELATIVE: u32 = 188; - -// LP64 AArch64 relocs. -/// Direct 64 bit. -pub const R_AARCH64_ABS64: u32 = 257; -/// Direct 32 bit. -pub const R_AARCH64_ABS32: u32 = 258; -/// Direct 16-bit. -pub const R_AARCH64_ABS16: u32 = 259; -/// PC-relative 64-bit. -pub const R_AARCH64_PREL64: u32 = 260; -/// PC-relative 32-bit. -pub const R_AARCH64_PREL32: u32 = 261; -/// PC-relative 16-bit. -pub const R_AARCH64_PREL16: u32 = 262; -/// Dir. MOVZ imm. from bits 15:0. -pub const R_AARCH64_MOVW_UABS_G0: u32 = 263; -/// Likewise for MOVK; no check. -pub const R_AARCH64_MOVW_UABS_G0_NC: u32 = 264; -/// Dir. MOVZ imm. from bits 31:16. -pub const R_AARCH64_MOVW_UABS_G1: u32 = 265; -/// Likewise for MOVK; no check. -pub const R_AARCH64_MOVW_UABS_G1_NC: u32 = 266; -/// Dir. MOVZ imm. from bits 47:32. -pub const R_AARCH64_MOVW_UABS_G2: u32 = 267; -/// Likewise for MOVK; no check. -pub const R_AARCH64_MOVW_UABS_G2_NC: u32 = 268; -/// Dir. MOV{K,Z} imm. from 63:48. -pub const R_AARCH64_MOVW_UABS_G3: u32 = 269; -/// Dir. MOV{N,Z} imm. from 15:0. -pub const R_AARCH64_MOVW_SABS_G0: u32 = 270; -/// Dir. MOV{N,Z} imm. from 31:16. -pub const R_AARCH64_MOVW_SABS_G1: u32 = 271; -/// Dir. MOV{N,Z} imm. from 47:32. -pub const R_AARCH64_MOVW_SABS_G2: u32 = 272; -/// PC-rel. LD imm. from bits 20:2. -pub const R_AARCH64_LD_PREL_LO19: u32 = 273; -/// PC-rel. ADR imm. from bits 20:0. -pub const R_AARCH64_ADR_PREL_LO21: u32 = 274; -/// Page-rel. ADRP imm. from 32:12. -pub const R_AARCH64_ADR_PREL_PG_HI21: u32 = 275; -/// Likewise; no overflow check. -pub const R_AARCH64_ADR_PREL_PG_HI21_NC: u32 = 276; -/// Dir. ADD imm. from bits 11:0. -pub const R_AARCH64_ADD_ABS_LO12_NC: u32 = 277; -/// Likewise for LD/ST; no check. -pub const R_AARCH64_LDST8_ABS_LO12_NC: u32 = 278; -/// PC-rel. TBZ/TBNZ imm. from 15:2. -pub const R_AARCH64_TSTBR14: u32 = 279; -/// PC-rel. cond. br. imm. from 20:2. -pub const R_AARCH64_CONDBR19: u32 = 280; -/// PC-rel. B imm. from bits 27:2. -pub const R_AARCH64_JUMP26: u32 = 282; -/// Likewise for CALL. -pub const R_AARCH64_CALL26: u32 = 283; -/// Dir. ADD imm. from bits 11:1. -pub const R_AARCH64_LDST16_ABS_LO12_NC: u32 = 284; -/// Likewise for bits 11:2. -pub const R_AARCH64_LDST32_ABS_LO12_NC: u32 = 285; -/// Likewise for bits 11:3. -pub const R_AARCH64_LDST64_ABS_LO12_NC: u32 = 286; -/// PC-rel. MOV{N,Z} imm. from 15:0. -pub const R_AARCH64_MOVW_PREL_G0: u32 = 287; -/// Likewise for MOVK; no check. -pub const R_AARCH64_MOVW_PREL_G0_NC: u32 = 288; -/// PC-rel. MOV{N,Z} imm. from 31:16. -pub const R_AARCH64_MOVW_PREL_G1: u32 = 289; -/// Likewise for MOVK; no check. -pub const R_AARCH64_MOVW_PREL_G1_NC: u32 = 290; -/// PC-rel. MOV{N,Z} imm. from 47:32. -pub const R_AARCH64_MOVW_PREL_G2: u32 = 291; -/// Likewise for MOVK; no check. -pub const R_AARCH64_MOVW_PREL_G2_NC: u32 = 292; -/// PC-rel. MOV{N,Z} imm. from 63:48. -pub const R_AARCH64_MOVW_PREL_G3: u32 = 293; -/// Dir. ADD imm. from bits 11:4. -pub const R_AARCH64_LDST128_ABS_LO12_NC: u32 = 299; -/// GOT-rel. off. MOV{N,Z} imm. 15:0. -pub const R_AARCH64_MOVW_GOTOFF_G0: u32 = 300; -/// Likewise for MOVK; no check. -pub const R_AARCH64_MOVW_GOTOFF_G0_NC: u32 = 301; -/// GOT-rel. o. MOV{N,Z} imm. 31:16. -pub const R_AARCH64_MOVW_GOTOFF_G1: u32 = 302; -/// Likewise for MOVK; no check. -pub const R_AARCH64_MOVW_GOTOFF_G1_NC: u32 = 303; -/// GOT-rel. o. MOV{N,Z} imm. 47:32. -pub const R_AARCH64_MOVW_GOTOFF_G2: u32 = 304; -/// Likewise for MOVK; no check. -pub const R_AARCH64_MOVW_GOTOFF_G2_NC: u32 = 305; -/// GOT-rel. o. MOV{N,Z} imm. 63:48. -pub const R_AARCH64_MOVW_GOTOFF_G3: u32 = 306; -/// GOT-relative 64-bit. -pub const R_AARCH64_GOTREL64: u32 = 307; -/// GOT-relative 32-bit. -pub const R_AARCH64_GOTREL32: u32 = 308; -/// PC-rel. GOT off. load imm. 20:2. -pub const R_AARCH64_GOT_LD_PREL19: u32 = 309; -/// GOT-rel. off. LD/ST imm. 14:3. -pub const R_AARCH64_LD64_GOTOFF_LO15: u32 = 310; -/// P-page-rel. GOT off. ADRP 32:12. -pub const R_AARCH64_ADR_GOT_PAGE: u32 = 311; -/// Dir. GOT off. LD/ST imm. 11:3. -pub const R_AARCH64_LD64_GOT_LO12_NC: u32 = 312; -/// GOT-page-rel. GOT off. LD/ST 14:3 -pub const R_AARCH64_LD64_GOTPAGE_LO15: u32 = 313; -/// PC-relative ADR imm. 20:0. -pub const R_AARCH64_TLSGD_ADR_PREL21: u32 = 512; -/// page-rel. ADRP imm. 32:12. -pub const R_AARCH64_TLSGD_ADR_PAGE21: u32 = 513; -/// direct ADD imm. from 11:0. -pub const R_AARCH64_TLSGD_ADD_LO12_NC: u32 = 514; -/// GOT-rel. MOV{N,Z} 31:16. -pub const R_AARCH64_TLSGD_MOVW_G1: u32 = 515; -/// GOT-rel. MOVK imm. 15:0. -pub const R_AARCH64_TLSGD_MOVW_G0_NC: u32 = 516; -/// Like 512; local dynamic model. -pub const R_AARCH64_TLSLD_ADR_PREL21: u32 = 517; -/// Like 513; local dynamic model. -pub const R_AARCH64_TLSLD_ADR_PAGE21: u32 = 518; -/// Like 514; local dynamic model. -pub const R_AARCH64_TLSLD_ADD_LO12_NC: u32 = 519; -/// Like 515; local dynamic model. -pub const R_AARCH64_TLSLD_MOVW_G1: u32 = 520; -/// Like 516; local dynamic model. -pub const R_AARCH64_TLSLD_MOVW_G0_NC: u32 = 521; -/// TLS PC-rel. load imm. 20:2. -pub const R_AARCH64_TLSLD_LD_PREL19: u32 = 522; -/// TLS DTP-rel. MOV{N,Z} 47:32. -pub const R_AARCH64_TLSLD_MOVW_DTPREL_G2: u32 = 523; -/// TLS DTP-rel. MOV{N,Z} 31:16. -pub const R_AARCH64_TLSLD_MOVW_DTPREL_G1: u32 = 524; -/// Likewise; MOVK; no check. -pub const R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: u32 = 525; -/// TLS DTP-rel. MOV{N,Z} 15:0. -pub const R_AARCH64_TLSLD_MOVW_DTPREL_G0: u32 = 526; -/// Likewise; MOVK; no check. -pub const R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: u32 = 527; -/// DTP-rel. ADD imm. from 23:12. -pub const R_AARCH64_TLSLD_ADD_DTPREL_HI12: u32 = 528; -/// DTP-rel. ADD imm. from 11:0. -pub const R_AARCH64_TLSLD_ADD_DTPREL_LO12: u32 = 529; -/// Likewise; no ovfl. check. -pub const R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: u32 = 530; -/// DTP-rel. LD/ST imm. 11:0. -pub const R_AARCH64_TLSLD_LDST8_DTPREL_LO12: u32 = 531; -/// Likewise; no check. -pub const R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC: u32 = 532; -/// DTP-rel. LD/ST imm. 11:1. -pub const R_AARCH64_TLSLD_LDST16_DTPREL_LO12: u32 = 533; -/// Likewise; no check. -pub const R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC: u32 = 534; -/// DTP-rel. LD/ST imm. 11:2. -pub const R_AARCH64_TLSLD_LDST32_DTPREL_LO12: u32 = 535; -/// Likewise; no check. -pub const R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC: u32 = 536; -/// DTP-rel. LD/ST imm. 11:3. -pub const R_AARCH64_TLSLD_LDST64_DTPREL_LO12: u32 = 537; -/// Likewise; no check. -pub const R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC: u32 = 538; -/// GOT-rel. MOV{N,Z} 31:16. -pub const R_AARCH64_TLSIE_MOVW_GOTTPREL_G1: u32 = 539; -/// GOT-rel. MOVK 15:0. -pub const R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: u32 = 540; -/// Page-rel. ADRP 32:12. -pub const R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: u32 = 541; -/// Direct LD off. 11:3. -pub const R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: u32 = 542; -/// PC-rel. load imm. 20:2. -pub const R_AARCH64_TLSIE_LD_GOTTPREL_PREL19: u32 = 543; -/// TLS TP-rel. MOV{N,Z} 47:32. -pub const R_AARCH64_TLSLE_MOVW_TPREL_G2: u32 = 544; -/// TLS TP-rel. MOV{N,Z} 31:16. -pub const R_AARCH64_TLSLE_MOVW_TPREL_G1: u32 = 545; -/// Likewise; MOVK; no check. -pub const R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: u32 = 546; -/// TLS TP-rel. MOV{N,Z} 15:0. -pub const R_AARCH64_TLSLE_MOVW_TPREL_G0: u32 = 547; -/// Likewise; MOVK; no check. -pub const R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: u32 = 548; -/// TP-rel. ADD imm. 23:12. -pub const R_AARCH64_TLSLE_ADD_TPREL_HI12: u32 = 549; -/// TP-rel. ADD imm. 11:0. -pub const R_AARCH64_TLSLE_ADD_TPREL_LO12: u32 = 550; -/// Likewise; no ovfl. check. -pub const R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: u32 = 551; -/// TP-rel. LD/ST off. 11:0. -pub const R_AARCH64_TLSLE_LDST8_TPREL_LO12: u32 = 552; -/// Likewise; no ovfl. check. -pub const R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: u32 = 553; -/// TP-rel. LD/ST off. 11:1. -pub const R_AARCH64_TLSLE_LDST16_TPREL_LO12: u32 = 554; -/// Likewise; no check. -pub const R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: u32 = 555; -/// TP-rel. LD/ST off. 11:2. -pub const R_AARCH64_TLSLE_LDST32_TPREL_LO12: u32 = 556; -/// Likewise; no check. -pub const R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: u32 = 557; -/// TP-rel. LD/ST off. 11:3. -pub const R_AARCH64_TLSLE_LDST64_TPREL_LO12: u32 = 558; -/// Likewise; no check. -pub const R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: u32 = 559; -/// PC-rel. load immediate 20:2. -pub const R_AARCH64_TLSDESC_LD_PREL19: u32 = 560; -/// PC-rel. ADR immediate 20:0. -pub const R_AARCH64_TLSDESC_ADR_PREL21: u32 = 561; -/// Page-rel. ADRP imm. 32:12. -pub const R_AARCH64_TLSDESC_ADR_PAGE21: u32 = 562; -/// Direct LD off. from 11:3. -pub const R_AARCH64_TLSDESC_LD64_LO12: u32 = 563; -/// Direct ADD imm. from 11:0. -pub const R_AARCH64_TLSDESC_ADD_LO12: u32 = 564; -/// GOT-rel. MOV{N,Z} imm. 31:16. -pub const R_AARCH64_TLSDESC_OFF_G1: u32 = 565; -/// GOT-rel. MOVK imm. 15:0; no ck. -pub const R_AARCH64_TLSDESC_OFF_G0_NC: u32 = 566; -/// Relax LDR. -pub const R_AARCH64_TLSDESC_LDR: u32 = 567; -/// Relax ADD. -pub const R_AARCH64_TLSDESC_ADD: u32 = 568; -/// Relax BLR. -pub const R_AARCH64_TLSDESC_CALL: u32 = 569; -/// TP-rel. LD/ST off. 11:4. -pub const R_AARCH64_TLSLE_LDST128_TPREL_LO12: u32 = 570; -/// Likewise; no check. -pub const R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC: u32 = 571; -/// DTP-rel. LD/ST imm. 11:4. -pub const R_AARCH64_TLSLD_LDST128_DTPREL_LO12: u32 = 572; -/// Likewise; no check. -pub const R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC: u32 = 573; -/// Copy symbol at runtime. -pub const R_AARCH64_COPY: u32 = 1024; -/// Create GOT entry. -pub const R_AARCH64_GLOB_DAT: u32 = 1025; -/// Create PLT entry. -pub const R_AARCH64_JUMP_SLOT: u32 = 1026; -/// Adjust by program base. -pub const R_AARCH64_RELATIVE: u32 = 1027; -/// Module number, 64 bit. -pub const R_AARCH64_TLS_DTPMOD: u32 = 1028; -/// Module-relative offset, 64 bit. -pub const R_AARCH64_TLS_DTPREL: u32 = 1029; -/// TP-relative offset, 64 bit. -pub const R_AARCH64_TLS_TPREL: u32 = 1030; -/// TLS Descriptor. -pub const R_AARCH64_TLSDESC: u32 = 1031; -/// STT_GNU_IFUNC relocation. -pub const R_AARCH64_IRELATIVE: u32 = 1032; - -// AVR values for `FileHeader*::e_flags`. - -/// Bitmask for `EF_AVR_ARCH_*`. -pub const EF_AVR_ARCH: u32 = 0x7F; - -/// If set, it is assumed that the elf file uses local symbols as reference -/// for the relocations so that linker relaxation is possible. -pub const EF_AVR_LINKRELAX_PREPARED: u32 = 0x80; - -pub const EF_AVR_ARCH_AVR1: u32 = 1; -pub const EF_AVR_ARCH_AVR2: u32 = 2; -pub const EF_AVR_ARCH_AVR25: u32 = 25; -pub const EF_AVR_ARCH_AVR3: u32 = 3; -pub const EF_AVR_ARCH_AVR31: u32 = 31; -pub const EF_AVR_ARCH_AVR35: u32 = 35; -pub const EF_AVR_ARCH_AVR4: u32 = 4; -pub const EF_AVR_ARCH_AVR5: u32 = 5; -pub const EF_AVR_ARCH_AVR51: u32 = 51; -pub const EF_AVR_ARCH_AVR6: u32 = 6; -pub const EF_AVR_ARCH_AVRTINY: u32 = 100; -pub const EF_AVR_ARCH_XMEGA1: u32 = 101; -pub const EF_AVR_ARCH_XMEGA2: u32 = 102; -pub const EF_AVR_ARCH_XMEGA3: u32 = 103; -pub const EF_AVR_ARCH_XMEGA4: u32 = 104; -pub const EF_AVR_ARCH_XMEGA5: u32 = 105; -pub const EF_AVR_ARCH_XMEGA6: u32 = 106; -pub const EF_AVR_ARCH_XMEGA7: u32 = 107; - -// AVR values for `Rel*::r_type`. - -pub const R_AVR_NONE: u32 = 0; -/// Direct 32 bit -pub const R_AVR_32: u32 = 1; -pub const R_AVR_7_PCREL: u32 = 2; -pub const R_AVR_13_PCREL: u32 = 3; -/// Direct 16 bit -pub const R_AVR_16: u32 = 4; -pub const R_AVR_16_PM: u32 = 5; -pub const R_AVR_LO8_LDI: u32 = 6; -pub const R_AVR_HI8_LDI: u32 = 7; -pub const R_AVR_HH8_LDI: u32 = 8; -pub const R_AVR_LO8_LDI_NEG: u32 = 9; -pub const R_AVR_HI8_LDI_NEG: u32 = 10; -pub const R_AVR_HH8_LDI_NEG: u32 = 11; -pub const R_AVR_LO8_LDI_PM: u32 = 12; -pub const R_AVR_HI8_LDI_PM: u32 = 13; -pub const R_AVR_HH8_LDI_PM: u32 = 14; -pub const R_AVR_LO8_LDI_PM_NEG: u32 = 15; -pub const R_AVR_HI8_LDI_PM_NEG: u32 = 16; -pub const R_AVR_HH8_LDI_PM_NEG: u32 = 17; -pub const R_AVR_CALL: u32 = 18; -pub const R_AVR_LDI: u32 = 19; -pub const R_AVR_6: u32 = 20; -pub const R_AVR_6_ADIW: u32 = 21; -pub const R_AVR_MS8_LDI: u32 = 22; -pub const R_AVR_MS8_LDI_NEG: u32 = 23; -pub const R_AVR_LO8_LDI_GS: u32 = 24; -pub const R_AVR_HI8_LDI_GS: u32 = 25; -pub const R_AVR_8: u32 = 26; -pub const R_AVR_8_LO8: u32 = 27; -pub const R_AVR_8_HI8: u32 = 28; -pub const R_AVR_8_HLO8: u32 = 29; -pub const R_AVR_DIFF8: u32 = 30; -pub const R_AVR_DIFF16: u32 = 31; -pub const R_AVR_DIFF32: u32 = 32; -pub const R_AVR_LDS_STS_16: u32 = 33; -pub const R_AVR_PORT6: u32 = 34; -pub const R_AVR_PORT5: u32 = 35; -pub const R_AVR_32_PCREL: u32 = 36; - -// MSP430 values for `Rel*::r_type`. - -/// Direct 32 bit -pub const R_MSP430_32: u32 = 1; -/// Direct 16 bit -pub const R_MSP430_16_BYTE: u32 = 5; - -// Hexagon values for `Rel*::r_type`. - -/// Direct 32 bit -pub const R_HEX_32: u32 = 6; - -// ARM values for `Rel*::r_type`. - -/// No reloc -pub const R_ARM_NONE: u32 = 0; -/// Deprecated PC relative 26 bit branch. -pub const R_ARM_PC24: u32 = 1; -/// Direct 32 bit -pub const R_ARM_ABS32: u32 = 2; -/// PC relative 32 bit -pub const R_ARM_REL32: u32 = 3; -pub const R_ARM_PC13: u32 = 4; -/// Direct 16 bit -pub const R_ARM_ABS16: u32 = 5; -/// Direct 12 bit -pub const R_ARM_ABS12: u32 = 6; -/// Direct & 0x7C (LDR, STR). -pub const R_ARM_THM_ABS5: u32 = 7; -/// Direct 8 bit -pub const R_ARM_ABS8: u32 = 8; -pub const R_ARM_SBREL32: u32 = 9; -/// PC relative 24 bit (Thumb32 BL). -pub const R_ARM_THM_PC22: u32 = 10; -/// PC relative & 0x3FC (Thumb16 LDR, ADD, ADR). -pub const R_ARM_THM_PC8: u32 = 11; -pub const R_ARM_AMP_VCALL9: u32 = 12; -/// Obsolete static relocation. -pub const R_ARM_SWI24: u32 = 13; -/// Dynamic relocation. -pub const R_ARM_TLS_DESC: u32 = 13; -/// Reserved. -pub const R_ARM_THM_SWI8: u32 = 14; -/// Reserved. -pub const R_ARM_XPC25: u32 = 15; -/// Reserved. -pub const R_ARM_THM_XPC22: u32 = 16; -/// ID of module containing symbol -pub const R_ARM_TLS_DTPMOD32: u32 = 17; -/// Offset in TLS block -pub const R_ARM_TLS_DTPOFF32: u32 = 18; -/// Offset in static TLS block -pub const R_ARM_TLS_TPOFF32: u32 = 19; -/// Copy symbol at runtime -pub const R_ARM_COPY: u32 = 20; -/// Create GOT entry -pub const R_ARM_GLOB_DAT: u32 = 21; -/// Create PLT entry -pub const R_ARM_JUMP_SLOT: u32 = 22; -/// Adjust by program base -pub const R_ARM_RELATIVE: u32 = 23; -/// 32 bit offset to GOT -pub const R_ARM_GOTOFF: u32 = 24; -/// 32 bit PC relative offset to GOT -pub const R_ARM_GOTPC: u32 = 25; -/// 32 bit GOT entry -pub const R_ARM_GOT32: u32 = 26; -/// Deprecated, 32 bit PLT address. -pub const R_ARM_PLT32: u32 = 27; -/// PC relative 24 bit (BL, BLX). -pub const R_ARM_CALL: u32 = 28; -/// PC relative 24 bit (B, BL). -pub const R_ARM_JUMP24: u32 = 29; -/// PC relative 24 bit (Thumb32 B.W). -pub const R_ARM_THM_JUMP24: u32 = 30; -/// Adjust by program base. -pub const R_ARM_BASE_ABS: u32 = 31; -/// Obsolete. -pub const R_ARM_ALU_PCREL_7_0: u32 = 32; -/// Obsolete. -pub const R_ARM_ALU_PCREL_15_8: u32 = 33; -/// Obsolete. -pub const R_ARM_ALU_PCREL_23_15: u32 = 34; -/// Deprecated, prog. base relative. -pub const R_ARM_LDR_SBREL_11_0: u32 = 35; -/// Deprecated, prog. base relative. -pub const R_ARM_ALU_SBREL_19_12: u32 = 36; -/// Deprecated, prog. base relative. -pub const R_ARM_ALU_SBREL_27_20: u32 = 37; -pub const R_ARM_TARGET1: u32 = 38; -/// Program base relative. -pub const R_ARM_SBREL31: u32 = 39; -pub const R_ARM_V4BX: u32 = 40; -pub const R_ARM_TARGET2: u32 = 41; -/// 32 bit PC relative. -pub const R_ARM_PREL31: u32 = 42; -/// Direct 16-bit (MOVW). -pub const R_ARM_MOVW_ABS_NC: u32 = 43; -/// Direct high 16-bit (MOVT). -pub const R_ARM_MOVT_ABS: u32 = 44; -/// PC relative 16-bit (MOVW). -pub const R_ARM_MOVW_PREL_NC: u32 = 45; -/// PC relative (MOVT). -pub const R_ARM_MOVT_PREL: u32 = 46; -/// Direct 16 bit (Thumb32 MOVW). -pub const R_ARM_THM_MOVW_ABS_NC: u32 = 47; -/// Direct high 16 bit (Thumb32 MOVT). -pub const R_ARM_THM_MOVT_ABS: u32 = 48; -/// PC relative 16 bit (Thumb32 MOVW). -pub const R_ARM_THM_MOVW_PREL_NC: u32 = 49; -/// PC relative high 16 bit (Thumb32 MOVT). -pub const R_ARM_THM_MOVT_PREL: u32 = 50; -/// PC relative 20 bit (Thumb32 B.W). -pub const R_ARM_THM_JUMP19: u32 = 51; -/// PC relative X & 0x7E (Thumb16 CBZ, CBNZ). -pub const R_ARM_THM_JUMP6: u32 = 52; -/// PC relative 12 bit (Thumb32 ADR.W). -pub const R_ARM_THM_ALU_PREL_11_0: u32 = 53; -/// PC relative 12 bit (Thumb32 LDR{D,SB,H,SH}). -pub const R_ARM_THM_PC12: u32 = 54; -/// Direct 32-bit. -pub const R_ARM_ABS32_NOI: u32 = 55; -/// PC relative 32-bit. -pub const R_ARM_REL32_NOI: u32 = 56; -/// PC relative (ADD, SUB). -pub const R_ARM_ALU_PC_G0_NC: u32 = 57; -/// PC relative (ADD, SUB). -pub const R_ARM_ALU_PC_G0: u32 = 58; -/// PC relative (ADD, SUB). -pub const R_ARM_ALU_PC_G1_NC: u32 = 59; -/// PC relative (ADD, SUB). -pub const R_ARM_ALU_PC_G1: u32 = 60; -/// PC relative (ADD, SUB). -pub const R_ARM_ALU_PC_G2: u32 = 61; -/// PC relative (LDR,STR,LDRB,STRB). -pub const R_ARM_LDR_PC_G1: u32 = 62; -/// PC relative (LDR,STR,LDRB,STRB). -pub const R_ARM_LDR_PC_G2: u32 = 63; -/// PC relative (STR{D,H}, LDR{D,SB,H,SH}). -pub const R_ARM_LDRS_PC_G0: u32 = 64; -/// PC relative (STR{D,H}, LDR{D,SB,H,SH}). -pub const R_ARM_LDRS_PC_G1: u32 = 65; -/// PC relative (STR{D,H}, LDR{D,SB,H,SH}). -pub const R_ARM_LDRS_PC_G2: u32 = 66; -/// PC relative (LDC, STC). -pub const R_ARM_LDC_PC_G0: u32 = 67; -/// PC relative (LDC, STC). -pub const R_ARM_LDC_PC_G1: u32 = 68; -/// PC relative (LDC, STC). -pub const R_ARM_LDC_PC_G2: u32 = 69; -/// Program base relative (ADD,SUB). -pub const R_ARM_ALU_SB_G0_NC: u32 = 70; -/// Program base relative (ADD,SUB). -pub const R_ARM_ALU_SB_G0: u32 = 71; -/// Program base relative (ADD,SUB). -pub const R_ARM_ALU_SB_G1_NC: u32 = 72; -/// Program base relative (ADD,SUB). -pub const R_ARM_ALU_SB_G1: u32 = 73; -/// Program base relative (ADD,SUB). -pub const R_ARM_ALU_SB_G2: u32 = 74; -/// Program base relative (LDR, STR, LDRB, STRB). -pub const R_ARM_LDR_SB_G0: u32 = 75; -/// Program base relative (LDR, STR, LDRB, STRB). -pub const R_ARM_LDR_SB_G1: u32 = 76; -/// Program base relative (LDR, STR, LDRB, STRB). -pub const R_ARM_LDR_SB_G2: u32 = 77; -/// Program base relative (LDR, STR, LDRB, STRB). -pub const R_ARM_LDRS_SB_G0: u32 = 78; -/// Program base relative (LDR, STR, LDRB, STRB). -pub const R_ARM_LDRS_SB_G1: u32 = 79; -/// Program base relative (LDR, STR, LDRB, STRB). -pub const R_ARM_LDRS_SB_G2: u32 = 80; -/// Program base relative (LDC,STC). -pub const R_ARM_LDC_SB_G0: u32 = 81; -/// Program base relative (LDC,STC). -pub const R_ARM_LDC_SB_G1: u32 = 82; -/// Program base relative (LDC,STC). -pub const R_ARM_LDC_SB_G2: u32 = 83; -/// Program base relative 16 bit (MOVW). -pub const R_ARM_MOVW_BREL_NC: u32 = 84; -/// Program base relative high 16 bit (MOVT). -pub const R_ARM_MOVT_BREL: u32 = 85; -/// Program base relative 16 bit (MOVW). -pub const R_ARM_MOVW_BREL: u32 = 86; -/// Program base relative 16 bit (Thumb32 MOVW). -pub const R_ARM_THM_MOVW_BREL_NC: u32 = 87; -/// Program base relative high 16 bit (Thumb32 MOVT). -pub const R_ARM_THM_MOVT_BREL: u32 = 88; -/// Program base relative 16 bit (Thumb32 MOVW). -pub const R_ARM_THM_MOVW_BREL: u32 = 89; -pub const R_ARM_TLS_GOTDESC: u32 = 90; -pub const R_ARM_TLS_CALL: u32 = 91; -/// TLS relaxation. -pub const R_ARM_TLS_DESCSEQ: u32 = 92; -pub const R_ARM_THM_TLS_CALL: u32 = 93; -pub const R_ARM_PLT32_ABS: u32 = 94; -/// GOT entry. -pub const R_ARM_GOT_ABS: u32 = 95; -/// PC relative GOT entry. -pub const R_ARM_GOT_PREL: u32 = 96; -/// GOT entry relative to GOT origin (LDR). -pub const R_ARM_GOT_BREL12: u32 = 97; -/// 12 bit, GOT entry relative to GOT origin (LDR, STR). -pub const R_ARM_GOTOFF12: u32 = 98; -pub const R_ARM_GOTRELAX: u32 = 99; -pub const R_ARM_GNU_VTENTRY: u32 = 100; -pub const R_ARM_GNU_VTINHERIT: u32 = 101; -/// PC relative & 0xFFE (Thumb16 B). -pub const R_ARM_THM_PC11: u32 = 102; -/// PC relative & 0x1FE (Thumb16 B/B). -pub const R_ARM_THM_PC9: u32 = 103; -/// PC-rel 32 bit for global dynamic thread local data -pub const R_ARM_TLS_GD32: u32 = 104; -/// PC-rel 32 bit for local dynamic thread local data -pub const R_ARM_TLS_LDM32: u32 = 105; -/// 32 bit offset relative to TLS block -pub const R_ARM_TLS_LDO32: u32 = 106; -/// PC-rel 32 bit for GOT entry of static TLS block offset -pub const R_ARM_TLS_IE32: u32 = 107; -/// 32 bit offset relative to static TLS block -pub const R_ARM_TLS_LE32: u32 = 108; -/// 12 bit relative to TLS block (LDR, STR). -pub const R_ARM_TLS_LDO12: u32 = 109; -/// 12 bit relative to static TLS block (LDR, STR). -pub const R_ARM_TLS_LE12: u32 = 110; -/// 12 bit GOT entry relative to GOT origin (LDR). -pub const R_ARM_TLS_IE12GP: u32 = 111; -/// Obsolete. -pub const R_ARM_ME_TOO: u32 = 128; -pub const R_ARM_THM_TLS_DESCSEQ: u32 = 129; -pub const R_ARM_THM_TLS_DESCSEQ16: u32 = 129; -pub const R_ARM_THM_TLS_DESCSEQ32: u32 = 130; -/// GOT entry relative to GOT origin, 12 bit (Thumb32 LDR). -pub const R_ARM_THM_GOT_BREL12: u32 = 131; -pub const R_ARM_IRELATIVE: u32 = 160; -pub const R_ARM_RXPC25: u32 = 249; -pub const R_ARM_RSBREL32: u32 = 250; -pub const R_ARM_THM_RPC22: u32 = 251; -pub const R_ARM_RREL32: u32 = 252; -pub const R_ARM_RABS22: u32 = 253; -pub const R_ARM_RPC24: u32 = 254; -pub const R_ARM_RBASE: u32 = 255; - -// C-SKY values for `Rel*::r_type`. -/// no reloc -pub const R_CKCORE_NONE: u32 = 0; -/// direct 32 bit (S + A) -pub const R_CKCORE_ADDR32: u32 = 1; -/// disp ((S + A - P) >> 2) & 0xff -pub const R_CKCORE_PCRELIMM8BY4: u32 = 2; -/// disp ((S + A - P) >> 1) & 0x7ff -pub const R_CKCORE_PCRELIMM11BY2: u32 = 3; -/// 32-bit rel (S + A - P) -pub const R_CKCORE_PCREL32: u32 = 5; -/// disp ((S + A - P) >>1) & 0x7ff -pub const R_CKCORE_PCRELJSR_IMM11BY2: u32 = 6; -/// 32 bit adjust program base(B + A) -pub const R_CKCORE_RELATIVE: u32 = 9; -/// 32 bit adjust by program base -pub const R_CKCORE_COPY: u32 = 10; -/// off between got and sym (S) -pub const R_CKCORE_GLOB_DAT: u32 = 11; -/// PLT entry (S) -pub const R_CKCORE_JUMP_SLOT: u32 = 12; -/// offset to GOT (S + A - GOT) -pub const R_CKCORE_GOTOFF: u32 = 13; -/// PC offset to GOT (GOT + A - P) -pub const R_CKCORE_GOTPC: u32 = 14; -/// 32 bit GOT entry (G) -pub const R_CKCORE_GOT32: u32 = 15; -/// 32 bit PLT entry (G) -pub const R_CKCORE_PLT32: u32 = 16; -/// GOT entry in GLOB_DAT (GOT + G) -pub const R_CKCORE_ADDRGOT: u32 = 17; -/// PLT entry in GLOB_DAT (GOT + G) -pub const R_CKCORE_ADDRPLT: u32 = 18; -/// ((S + A - P) >> 1) & 0x3ff_ffff -pub const R_CKCORE_PCREL_IMM26BY2: u32 = 19; -/// disp ((S + A - P) >> 1) & 0xffff -pub const R_CKCORE_PCREL_IMM16BY2: u32 = 20; -/// disp ((S + A - P) >> 2) & 0xffff -pub const R_CKCORE_PCREL_IMM16BY4: u32 = 21; -/// disp ((S + A - P) >> 1) & 0x3ff -pub const R_CKCORE_PCREL_IMM10BY2: u32 = 22; -/// disp ((S + A - P) >> 2) & 0x3ff -pub const R_CKCORE_PCREL_IMM10BY4: u32 = 23; -/// high & low 16 bit ADDR, ((S + A) >> 16) & 0xffff -pub const R_CKCORE_ADDR_HI16: u32 = 24; -/// (S + A) & 0xffff -pub const R_CKCORE_ADDR_LO16: u32 = 25; -/// high & low 16 bit GOTPC, ((GOT + A - P) >> 16) & 0xffff -pub const R_CKCORE_GOTPC_HI16: u32 = 26; -/// (GOT + A - P) & 0xffff -pub const R_CKCORE_GOTPC_LO16: u32 = 27; -/// high & low 16 bit GOTOFF, ((S + A - GOT) >> 16) & 0xffff -pub const R_CKCORE_GOTOFF_HI16: u32 = 28; -/// (S + A - GOT) & 0xffff -pub const R_CKCORE_GOTOFF_LO16: u32 = 29; -/// 12 bit disp GOT entry (G) -pub const R_CKCORE_GOT12: u32 = 30; -/// high & low 16 bit GOT, (G >> 16) & 0xffff -pub const R_CKCORE_GOT_HI16: u32 = 31; -/// (G & 0xffff) -pub const R_CKCORE_GOT_LO16: u32 = 32; -/// 12 bit disp PLT entry (G) -pub const R_CKCORE_PLT12: u32 = 33; -/// high & low 16 bit PLT, (G >> 16) & 0xffff -pub const R_CKCORE_PLT_HI16: u32 = 34; -/// G & 0xffff -pub const R_CKCORE_PLT_LO16: u32 = 35; -/// high & low 16 bit ADDRGOT, (GOT + G * 4) & 0xffff -pub const R_CKCORE_ADDRGOT_HI16: u32 = 36; -/// (GOT + G * 4) & 0xffff -pub const R_CKCORE_ADDRGOT_LO16: u32 = 37; -/// high & low 16 bit ADDRPLT, ((GOT + G * 4) >> 16) & 0xFFFF -pub const R_CKCORE_ADDRPLT_HI16: u32 = 38; -/// (GOT+G*4) & 0xffff -pub const R_CKCORE_ADDRPLT_LO16: u32 = 39; -/// disp ((S+A-P) >>1) & x3ff_ffff -pub const R_CKCORE_PCREL_JSR_IMM26BY2: u32 = 40; -/// (S+A-BTEXT) & 0xffff -pub const R_CKCORE_TOFFSET_LO16: u32 = 41; -/// (S+A-BTEXT) & 0xffff -pub const R_CKCORE_DOFFSET_LO16: u32 = 42; -/// disp ((S+A-P) >>1) & 0x3ffff -pub const R_CKCORE_PCREL_IMM18BY2: u32 = 43; -/// disp (S+A-BDATA) & 0x3ffff -pub const R_CKCORE_DOFFSET_IMM18: u32 = 44; -/// disp ((S+A-BDATA)>>1) & 0x3ffff -pub const R_CKCORE_DOFFSET_IMM18BY2: u32 = 45; -/// disp ((S+A-BDATA)>>2) & 0x3ffff -pub const R_CKCORE_DOFFSET_IMM18BY4: u32 = 46; -/// disp (G >> 2) -pub const R_CKCORE_GOT_IMM18BY4: u32 = 48; -/// disp (G >> 2) -pub const R_CKCORE_PLT_IMM18BY4: u32 = 49; -/// disp ((S+A-P) >>2) & 0x7f -pub const R_CKCORE_PCREL_IMM7BY4: u32 = 50; -/// 32 bit offset to TLS block -pub const R_CKCORE_TLS_LE32: u32 = 51; -pub const R_CKCORE_TLS_IE32: u32 = 52; -pub const R_CKCORE_TLS_GD32: u32 = 53; -pub const R_CKCORE_TLS_LDM32: u32 = 54; -pub const R_CKCORE_TLS_LDO32: u32 = 55; -pub const R_CKCORE_TLS_DTPMOD32: u32 = 56; -pub const R_CKCORE_TLS_DTPOFF32: u32 = 57; -pub const R_CKCORE_TLS_TPOFF32: u32 = 58; - -// C-SKY values for `FileHeader*::e_flags`. -pub const EF_CSKY_ABIMASK: u32 = 0xF000_0000; -pub const EF_CSKY_OTHER: u32 = 0x0FFF_0000; -pub const EF_CSKY_PROCESSOR: u32 = 0x0000_FFFF; - -pub const EF_CSKY_ABIV1: u32 = 0x1000_0000; -pub const EF_CSKY_ABIV2: u32 = 0x2000_0000; - -// C-SKY values for `SectionHeader*::sh_type`. -/// C-SKY attributes section. -pub const SHT_CSKY_ATTRIBUTES: u32 = SHT_LOPROC + 1; - -// IA-64 specific declarations. - -// IA-64 values for `FileHeader64::e_flags`. -/// os-specific flags -pub const EF_IA_64_MASKOS: u32 = 0x0000_000f; -/// 64-bit ABI -pub const EF_IA_64_ABI64: u32 = 0x0000_0010; -/// arch. version mask -pub const EF_IA_64_ARCH: u32 = 0xff00_0000; - -// IA-64 values for `ProgramHeader64::p_type`. -/// arch extension bits -pub const PT_IA_64_ARCHEXT: u32 = PT_LOPROC + 0; -/// ia64 unwind bits -pub const PT_IA_64_UNWIND: u32 = PT_LOPROC + 1; -pub const PT_IA_64_HP_OPT_ANOT: u32 = PT_LOOS + 0x12; -pub const PT_IA_64_HP_HSL_ANOT: u32 = PT_LOOS + 0x13; -pub const PT_IA_64_HP_STACK: u32 = PT_LOOS + 0x14; - -// IA-64 values for `ProgramHeader64::p_flags`. -/// spec insns w/o recovery -pub const PF_IA_64_NORECOV: u32 = 0x8000_0000; - -// IA-64 values for `SectionHeader64::sh_type`. -/// extension bits -pub const SHT_IA_64_EXT: u32 = SHT_LOPROC + 0; -/// unwind bits -pub const SHT_IA_64_UNWIND: u32 = SHT_LOPROC + 1; - -// IA-64 values for `SectionHeader64::sh_flags`. -/// section near gp -pub const SHF_IA_64_SHORT: u32 = 0x1000_0000; -/// spec insns w/o recovery -pub const SHF_IA_64_NORECOV: u32 = 0x2000_0000; - -// IA-64 values for `Dyn64::d_tag`. -pub const DT_IA_64_PLT_RESERVE: u32 = DT_LOPROC + 0; - -// IA-64 values for `Rel*::r_type`. -/// none -pub const R_IA64_NONE: u32 = 0x00; -/// symbol + addend, add imm14 -pub const R_IA64_IMM14: u32 = 0x21; -/// symbol + addend, add imm22 -pub const R_IA64_IMM22: u32 = 0x22; -/// symbol + addend, mov imm64 -pub const R_IA64_IMM64: u32 = 0x23; -/// symbol + addend, data4 MSB -pub const R_IA64_DIR32MSB: u32 = 0x24; -/// symbol + addend, data4 LSB -pub const R_IA64_DIR32LSB: u32 = 0x25; -/// symbol + addend, data8 MSB -pub const R_IA64_DIR64MSB: u32 = 0x26; -/// symbol + addend, data8 LSB -pub const R_IA64_DIR64LSB: u32 = 0x27; -/// @gprel(sym + add), add imm22 -pub const R_IA64_GPREL22: u32 = 0x2a; -/// @gprel(sym + add), mov imm64 -pub const R_IA64_GPREL64I: u32 = 0x2b; -/// @gprel(sym + add), data4 MSB -pub const R_IA64_GPREL32MSB: u32 = 0x2c; -/// @gprel(sym + add), data4 LSB -pub const R_IA64_GPREL32LSB: u32 = 0x2d; -/// @gprel(sym + add), data8 MSB -pub const R_IA64_GPREL64MSB: u32 = 0x2e; -/// @gprel(sym + add), data8 LSB -pub const R_IA64_GPREL64LSB: u32 = 0x2f; -/// @ltoff(sym + add), add imm22 -pub const R_IA64_LTOFF22: u32 = 0x32; -/// @ltoff(sym + add), mov imm64 -pub const R_IA64_LTOFF64I: u32 = 0x33; -/// @pltoff(sym + add), add imm22 -pub const R_IA64_PLTOFF22: u32 = 0x3a; -/// @pltoff(sym + add), mov imm64 -pub const R_IA64_PLTOFF64I: u32 = 0x3b; -/// @pltoff(sym + add), data8 MSB -pub const R_IA64_PLTOFF64MSB: u32 = 0x3e; -/// @pltoff(sym + add), data8 LSB -pub const R_IA64_PLTOFF64LSB: u32 = 0x3f; -/// @fptr(sym + add), mov imm64 -pub const R_IA64_FPTR64I: u32 = 0x43; -/// @fptr(sym + add), data4 MSB -pub const R_IA64_FPTR32MSB: u32 = 0x44; -/// @fptr(sym + add), data4 LSB -pub const R_IA64_FPTR32LSB: u32 = 0x45; -/// @fptr(sym + add), data8 MSB -pub const R_IA64_FPTR64MSB: u32 = 0x46; -/// @fptr(sym + add), data8 LSB -pub const R_IA64_FPTR64LSB: u32 = 0x47; -/// @pcrel(sym + add), brl -pub const R_IA64_PCREL60B: u32 = 0x48; -/// @pcrel(sym + add), ptb, call -pub const R_IA64_PCREL21B: u32 = 0x49; -/// @pcrel(sym + add), chk.s -pub const R_IA64_PCREL21M: u32 = 0x4a; -/// @pcrel(sym + add), fchkf -pub const R_IA64_PCREL21F: u32 = 0x4b; -/// @pcrel(sym + add), data4 MSB -pub const R_IA64_PCREL32MSB: u32 = 0x4c; -/// @pcrel(sym + add), data4 LSB -pub const R_IA64_PCREL32LSB: u32 = 0x4d; -/// @pcrel(sym + add), data8 MSB -pub const R_IA64_PCREL64MSB: u32 = 0x4e; -/// @pcrel(sym + add), data8 LSB -pub const R_IA64_PCREL64LSB: u32 = 0x4f; -/// @ltoff(@fptr(s+a)), imm22 -pub const R_IA64_LTOFF_FPTR22: u32 = 0x52; -/// @ltoff(@fptr(s+a)), imm64 -pub const R_IA64_LTOFF_FPTR64I: u32 = 0x53; -/// @ltoff(@fptr(s+a)), data4 MSB -pub const R_IA64_LTOFF_FPTR32MSB: u32 = 0x54; -/// @ltoff(@fptr(s+a)), data4 LSB -pub const R_IA64_LTOFF_FPTR32LSB: u32 = 0x55; -/// @ltoff(@fptr(s+a)), data8 MSB -pub const R_IA64_LTOFF_FPTR64MSB: u32 = 0x56; -/// @ltoff(@fptr(s+a)), data8 LSB -pub const R_IA64_LTOFF_FPTR64LSB: u32 = 0x57; -/// @segrel(sym + add), data4 MSB -pub const R_IA64_SEGREL32MSB: u32 = 0x5c; -/// @segrel(sym + add), data4 LSB -pub const R_IA64_SEGREL32LSB: u32 = 0x5d; -/// @segrel(sym + add), data8 MSB -pub const R_IA64_SEGREL64MSB: u32 = 0x5e; -/// @segrel(sym + add), data8 LSB -pub const R_IA64_SEGREL64LSB: u32 = 0x5f; -/// @secrel(sym + add), data4 MSB -pub const R_IA64_SECREL32MSB: u32 = 0x64; -/// @secrel(sym + add), data4 LSB -pub const R_IA64_SECREL32LSB: u32 = 0x65; -/// @secrel(sym + add), data8 MSB -pub const R_IA64_SECREL64MSB: u32 = 0x66; -/// @secrel(sym + add), data8 LSB -pub const R_IA64_SECREL64LSB: u32 = 0x67; -/// data 4 + REL -pub const R_IA64_REL32MSB: u32 = 0x6c; -/// data 4 + REL -pub const R_IA64_REL32LSB: u32 = 0x6d; -/// data 8 + REL -pub const R_IA64_REL64MSB: u32 = 0x6e; -/// data 8 + REL -pub const R_IA64_REL64LSB: u32 = 0x6f; -/// symbol + addend, data4 MSB -pub const R_IA64_LTV32MSB: u32 = 0x74; -/// symbol + addend, data4 LSB -pub const R_IA64_LTV32LSB: u32 = 0x75; -/// symbol + addend, data8 MSB -pub const R_IA64_LTV64MSB: u32 = 0x76; -/// symbol + addend, data8 LSB -pub const R_IA64_LTV64LSB: u32 = 0x77; -/// @pcrel(sym + add), 21bit inst -pub const R_IA64_PCREL21BI: u32 = 0x79; -/// @pcrel(sym + add), 22bit inst -pub const R_IA64_PCREL22: u32 = 0x7a; -/// @pcrel(sym + add), 64bit inst -pub const R_IA64_PCREL64I: u32 = 0x7b; -/// dynamic reloc, imported PLT, MSB -pub const R_IA64_IPLTMSB: u32 = 0x80; -/// dynamic reloc, imported PLT, LSB -pub const R_IA64_IPLTLSB: u32 = 0x81; -/// copy relocation -pub const R_IA64_COPY: u32 = 0x84; -/// Addend and symbol difference -pub const R_IA64_SUB: u32 = 0x85; -/// LTOFF22, relaxable. -pub const R_IA64_LTOFF22X: u32 = 0x86; -/// Use of LTOFF22X. -pub const R_IA64_LDXMOV: u32 = 0x87; -/// @tprel(sym + add), imm14 -pub const R_IA64_TPREL14: u32 = 0x91; -/// @tprel(sym + add), imm22 -pub const R_IA64_TPREL22: u32 = 0x92; -/// @tprel(sym + add), imm64 -pub const R_IA64_TPREL64I: u32 = 0x93; -/// @tprel(sym + add), data8 MSB -pub const R_IA64_TPREL64MSB: u32 = 0x96; -/// @tprel(sym + add), data8 LSB -pub const R_IA64_TPREL64LSB: u32 = 0x97; -/// @ltoff(@tprel(s+a)), imm2 -pub const R_IA64_LTOFF_TPREL22: u32 = 0x9a; -/// @dtpmod(sym + add), data8 MSB -pub const R_IA64_DTPMOD64MSB: u32 = 0xa6; -/// @dtpmod(sym + add), data8 LSB -pub const R_IA64_DTPMOD64LSB: u32 = 0xa7; -/// @ltoff(@dtpmod(sym + add)), imm22 -pub const R_IA64_LTOFF_DTPMOD22: u32 = 0xaa; -/// @dtprel(sym + add), imm14 -pub const R_IA64_DTPREL14: u32 = 0xb1; -/// @dtprel(sym + add), imm22 -pub const R_IA64_DTPREL22: u32 = 0xb2; -/// @dtprel(sym + add), imm64 -pub const R_IA64_DTPREL64I: u32 = 0xb3; -/// @dtprel(sym + add), data4 MSB -pub const R_IA64_DTPREL32MSB: u32 = 0xb4; -/// @dtprel(sym + add), data4 LSB -pub const R_IA64_DTPREL32LSB: u32 = 0xb5; -/// @dtprel(sym + add), data8 MSB -pub const R_IA64_DTPREL64MSB: u32 = 0xb6; -/// @dtprel(sym + add), data8 LSB -pub const R_IA64_DTPREL64LSB: u32 = 0xb7; -/// @ltoff(@dtprel(s+a)), imm22 -pub const R_IA64_LTOFF_DTPREL22: u32 = 0xba; - -// SH specific declarations. - -// SH values `FileHeader*::e_flags`. -pub const EF_SH_MACH_MASK: u32 = 0x1f; -pub const EF_SH_UNKNOWN: u32 = 0x0; -pub const EF_SH1: u32 = 0x1; -pub const EF_SH2: u32 = 0x2; -pub const EF_SH3: u32 = 0x3; -pub const EF_SH_DSP: u32 = 0x4; -pub const EF_SH3_DSP: u32 = 0x5; -pub const EF_SH4AL_DSP: u32 = 0x6; -pub const EF_SH3E: u32 = 0x8; -pub const EF_SH4: u32 = 0x9; -pub const EF_SH2E: u32 = 0xb; -pub const EF_SH4A: u32 = 0xc; -pub const EF_SH2A: u32 = 0xd; -pub const EF_SH4_NOFPU: u32 = 0x10; -pub const EF_SH4A_NOFPU: u32 = 0x11; -pub const EF_SH4_NOMMU_NOFPU: u32 = 0x12; -pub const EF_SH2A_NOFPU: u32 = 0x13; -pub const EF_SH3_NOMMU: u32 = 0x14; -pub const EF_SH2A_SH4_NOFPU: u32 = 0x15; -pub const EF_SH2A_SH3_NOFPU: u32 = 0x16; -pub const EF_SH2A_SH4: u32 = 0x17; -pub const EF_SH2A_SH3E: u32 = 0x18; - -// SH values `Rel*::r_type`. -pub const R_SH_NONE: u32 = 0; -pub const R_SH_DIR32: u32 = 1; -pub const R_SH_REL32: u32 = 2; -pub const R_SH_DIR8WPN: u32 = 3; -pub const R_SH_IND12W: u32 = 4; -pub const R_SH_DIR8WPL: u32 = 5; -pub const R_SH_DIR8WPZ: u32 = 6; -pub const R_SH_DIR8BP: u32 = 7; -pub const R_SH_DIR8W: u32 = 8; -pub const R_SH_DIR8L: u32 = 9; -pub const R_SH_SWITCH16: u32 = 25; -pub const R_SH_SWITCH32: u32 = 26; -pub const R_SH_USES: u32 = 27; -pub const R_SH_COUNT: u32 = 28; -pub const R_SH_ALIGN: u32 = 29; -pub const R_SH_CODE: u32 = 30; -pub const R_SH_DATA: u32 = 31; -pub const R_SH_LABEL: u32 = 32; -pub const R_SH_SWITCH8: u32 = 33; -pub const R_SH_GNU_VTINHERIT: u32 = 34; -pub const R_SH_GNU_VTENTRY: u32 = 35; -pub const R_SH_TLS_GD_32: u32 = 144; -pub const R_SH_TLS_LD_32: u32 = 145; -pub const R_SH_TLS_LDO_32: u32 = 146; -pub const R_SH_TLS_IE_32: u32 = 147; -pub const R_SH_TLS_LE_32: u32 = 148; -pub const R_SH_TLS_DTPMOD32: u32 = 149; -pub const R_SH_TLS_DTPOFF32: u32 = 150; -pub const R_SH_TLS_TPOFF32: u32 = 151; -pub const R_SH_GOT32: u32 = 160; -pub const R_SH_PLT32: u32 = 161; -pub const R_SH_COPY: u32 = 162; -pub const R_SH_GLOB_DAT: u32 = 163; -pub const R_SH_JMP_SLOT: u32 = 164; -pub const R_SH_RELATIVE: u32 = 165; -pub const R_SH_GOTOFF: u32 = 166; -pub const R_SH_GOTPC: u32 = 167; - -// S/390 specific definitions. - -// S/390 values `FileHeader*::e_flags`. - -/// High GPRs kernel facility needed. -pub const EF_S390_HIGH_GPRS: u32 = 0x0000_0001; - -// S/390 values `Rel*::r_type`. - -/// No reloc. -pub const R_390_NONE: u32 = 0; -/// Direct 8 bit. -pub const R_390_8: u32 = 1; -/// Direct 12 bit. -pub const R_390_12: u32 = 2; -/// Direct 16 bit. -pub const R_390_16: u32 = 3; -/// Direct 32 bit. -pub const R_390_32: u32 = 4; -/// PC relative 32 bit. -pub const R_390_PC32: u32 = 5; -/// 12 bit GOT offset. -pub const R_390_GOT12: u32 = 6; -/// 32 bit GOT offset. -pub const R_390_GOT32: u32 = 7; -/// 32 bit PC relative PLT address. -pub const R_390_PLT32: u32 = 8; -/// Copy symbol at runtime. -pub const R_390_COPY: u32 = 9; -/// Create GOT entry. -pub const R_390_GLOB_DAT: u32 = 10; -/// Create PLT entry. -pub const R_390_JMP_SLOT: u32 = 11; -/// Adjust by program base. -pub const R_390_RELATIVE: u32 = 12; -/// 32 bit offset to GOT. -pub const R_390_GOTOFF32: u32 = 13; -/// 32 bit PC relative offset to GOT. -pub const R_390_GOTPC: u32 = 14; -/// 16 bit GOT offset. -pub const R_390_GOT16: u32 = 15; -/// PC relative 16 bit. -pub const R_390_PC16: u32 = 16; -/// PC relative 16 bit shifted by 1. -pub const R_390_PC16DBL: u32 = 17; -/// 16 bit PC rel. PLT shifted by 1. -pub const R_390_PLT16DBL: u32 = 18; -/// PC relative 32 bit shifted by 1. -pub const R_390_PC32DBL: u32 = 19; -/// 32 bit PC rel. PLT shifted by 1. -pub const R_390_PLT32DBL: u32 = 20; -/// 32 bit PC rel. GOT shifted by 1. -pub const R_390_GOTPCDBL: u32 = 21; -/// Direct 64 bit. -pub const R_390_64: u32 = 22; -/// PC relative 64 bit. -pub const R_390_PC64: u32 = 23; -/// 64 bit GOT offset. -pub const R_390_GOT64: u32 = 24; -/// 64 bit PC relative PLT address. -pub const R_390_PLT64: u32 = 25; -/// 32 bit PC rel. to GOT entry >> 1. -pub const R_390_GOTENT: u32 = 26; -/// 16 bit offset to GOT. -pub const R_390_GOTOFF16: u32 = 27; -/// 64 bit offset to GOT. -pub const R_390_GOTOFF64: u32 = 28; -/// 12 bit offset to jump slot. -pub const R_390_GOTPLT12: u32 = 29; -/// 16 bit offset to jump slot. -pub const R_390_GOTPLT16: u32 = 30; -/// 32 bit offset to jump slot. -pub const R_390_GOTPLT32: u32 = 31; -/// 64 bit offset to jump slot. -pub const R_390_GOTPLT64: u32 = 32; -/// 32 bit rel. offset to jump slot. -pub const R_390_GOTPLTENT: u32 = 33; -/// 16 bit offset from GOT to PLT. -pub const R_390_PLTOFF16: u32 = 34; -/// 32 bit offset from GOT to PLT. -pub const R_390_PLTOFF32: u32 = 35; -/// 16 bit offset from GOT to PLT. -pub const R_390_PLTOFF64: u32 = 36; -/// Tag for load insn in TLS code. -pub const R_390_TLS_LOAD: u32 = 37; -/// Tag for function call in general dynamic TLS code. -pub const R_390_TLS_GDCALL: u32 = 38; -/// Tag for function call in local dynamic TLS code. -pub const R_390_TLS_LDCALL: u32 = 39; -/// Direct 32 bit for general dynamic thread local data. -pub const R_390_TLS_GD32: u32 = 40; -/// Direct 64 bit for general dynamic thread local data. -pub const R_390_TLS_GD64: u32 = 41; -/// 12 bit GOT offset for static TLS block offset. -pub const R_390_TLS_GOTIE12: u32 = 42; -/// 32 bit GOT offset for static TLS block offset. -pub const R_390_TLS_GOTIE32: u32 = 43; -/// 64 bit GOT offset for static TLS block offset. -pub const R_390_TLS_GOTIE64: u32 = 44; -/// Direct 32 bit for local dynamic thread local data in LE code. -pub const R_390_TLS_LDM32: u32 = 45; -/// Direct 64 bit for local dynamic thread local data in LE code. -pub const R_390_TLS_LDM64: u32 = 46; -/// 32 bit address of GOT entry for negated static TLS block offset. -pub const R_390_TLS_IE32: u32 = 47; -/// 64 bit address of GOT entry for negated static TLS block offset. -pub const R_390_TLS_IE64: u32 = 48; -/// 32 bit rel. offset to GOT entry for negated static TLS block offset. -pub const R_390_TLS_IEENT: u32 = 49; -/// 32 bit negated offset relative to static TLS block. -pub const R_390_TLS_LE32: u32 = 50; -/// 64 bit negated offset relative to static TLS block. -pub const R_390_TLS_LE64: u32 = 51; -/// 32 bit offset relative to TLS block. -pub const R_390_TLS_LDO32: u32 = 52; -/// 64 bit offset relative to TLS block. -pub const R_390_TLS_LDO64: u32 = 53; -/// ID of module containing symbol. -pub const R_390_TLS_DTPMOD: u32 = 54; -/// Offset in TLS block. -pub const R_390_TLS_DTPOFF: u32 = 55; -/// Negated offset in static TLS block. -pub const R_390_TLS_TPOFF: u32 = 56; -/// Direct 20 bit. -pub const R_390_20: u32 = 57; -/// 20 bit GOT offset. -pub const R_390_GOT20: u32 = 58; -/// 20 bit offset to jump slot. -pub const R_390_GOTPLT20: u32 = 59; -/// 20 bit GOT offset for static TLS block offset. -pub const R_390_TLS_GOTIE20: u32 = 60; -/// STT_GNU_IFUNC relocation. -pub const R_390_IRELATIVE: u32 = 61; - -// CRIS values `Rel*::r_type`. -pub const R_CRIS_NONE: u32 = 0; -pub const R_CRIS_8: u32 = 1; -pub const R_CRIS_16: u32 = 2; -pub const R_CRIS_32: u32 = 3; -pub const R_CRIS_8_PCREL: u32 = 4; -pub const R_CRIS_16_PCREL: u32 = 5; -pub const R_CRIS_32_PCREL: u32 = 6; -pub const R_CRIS_GNU_VTINHERIT: u32 = 7; -pub const R_CRIS_GNU_VTENTRY: u32 = 8; -pub const R_CRIS_COPY: u32 = 9; -pub const R_CRIS_GLOB_DAT: u32 = 10; -pub const R_CRIS_JUMP_SLOT: u32 = 11; -pub const R_CRIS_RELATIVE: u32 = 12; -pub const R_CRIS_16_GOT: u32 = 13; -pub const R_CRIS_32_GOT: u32 = 14; -pub const R_CRIS_16_GOTPLT: u32 = 15; -pub const R_CRIS_32_GOTPLT: u32 = 16; -pub const R_CRIS_32_GOTREL: u32 = 17; -pub const R_CRIS_32_PLT_GOTREL: u32 = 18; -pub const R_CRIS_32_PLT_PCREL: u32 = 19; - -// AMD x86-64 values `Rel*::r_type`. -/// No reloc -pub const R_X86_64_NONE: u32 = 0; -/// Direct 64 bit -pub const R_X86_64_64: u32 = 1; -/// PC relative 32 bit signed -pub const R_X86_64_PC32: u32 = 2; -/// 32 bit GOT entry -pub const R_X86_64_GOT32: u32 = 3; -/// 32 bit PLT address -pub const R_X86_64_PLT32: u32 = 4; -/// Copy symbol at runtime -pub const R_X86_64_COPY: u32 = 5; -/// Create GOT entry -pub const R_X86_64_GLOB_DAT: u32 = 6; -/// Create PLT entry -pub const R_X86_64_JUMP_SLOT: u32 = 7; -/// Adjust by program base -pub const R_X86_64_RELATIVE: u32 = 8; -/// 32 bit signed PC relative offset to GOT -pub const R_X86_64_GOTPCREL: u32 = 9; -/// Direct 32 bit zero extended -pub const R_X86_64_32: u32 = 10; -/// Direct 32 bit sign extended -pub const R_X86_64_32S: u32 = 11; -/// Direct 16 bit zero extended -pub const R_X86_64_16: u32 = 12; -/// 16 bit sign extended pc relative -pub const R_X86_64_PC16: u32 = 13; -/// Direct 8 bit sign extended -pub const R_X86_64_8: u32 = 14; -/// 8 bit sign extended pc relative -pub const R_X86_64_PC8: u32 = 15; -/// ID of module containing symbol -pub const R_X86_64_DTPMOD64: u32 = 16; -/// Offset in module's TLS block -pub const R_X86_64_DTPOFF64: u32 = 17; -/// Offset in initial TLS block -pub const R_X86_64_TPOFF64: u32 = 18; -/// 32 bit signed PC relative offset to two GOT entries for GD symbol -pub const R_X86_64_TLSGD: u32 = 19; -/// 32 bit signed PC relative offset to two GOT entries for LD symbol -pub const R_X86_64_TLSLD: u32 = 20; -/// Offset in TLS block -pub const R_X86_64_DTPOFF32: u32 = 21; -/// 32 bit signed PC relative offset to GOT entry for IE symbol -pub const R_X86_64_GOTTPOFF: u32 = 22; -/// Offset in initial TLS block -pub const R_X86_64_TPOFF32: u32 = 23; -/// PC relative 64 bit -pub const R_X86_64_PC64: u32 = 24; -/// 64 bit offset to GOT -pub const R_X86_64_GOTOFF64: u32 = 25; -/// 32 bit signed pc relative offset to GOT -pub const R_X86_64_GOTPC32: u32 = 26; -/// 64-bit GOT entry offset -pub const R_X86_64_GOT64: u32 = 27; -/// 64-bit PC relative offset to GOT entry -pub const R_X86_64_GOTPCREL64: u32 = 28; -/// 64-bit PC relative offset to GOT -pub const R_X86_64_GOTPC64: u32 = 29; -/// like GOT64, says PLT entry needed -pub const R_X86_64_GOTPLT64: u32 = 30; -/// 64-bit GOT relative offset to PLT entry -pub const R_X86_64_PLTOFF64: u32 = 31; -/// Size of symbol plus 32-bit addend -pub const R_X86_64_SIZE32: u32 = 32; -/// Size of symbol plus 64-bit addend -pub const R_X86_64_SIZE64: u32 = 33; -/// GOT offset for TLS descriptor. -pub const R_X86_64_GOTPC32_TLSDESC: u32 = 34; -/// Marker for call through TLS descriptor. -pub const R_X86_64_TLSDESC_CALL: u32 = 35; -/// TLS descriptor. -pub const R_X86_64_TLSDESC: u32 = 36; -/// Adjust indirectly by program base -pub const R_X86_64_IRELATIVE: u32 = 37; -/// 64-bit adjust by program base -pub const R_X86_64_RELATIVE64: u32 = 38; -// 39 Reserved was R_X86_64_PC32_BND -// 40 Reserved was R_X86_64_PLT32_BND -/// Load from 32 bit signed pc relative offset to GOT entry without REX prefix, relaxable. -pub const R_X86_64_GOTPCRELX: u32 = 41; -/// Load from 32 bit signed pc relative offset to GOT entry with REX prefix, relaxable. -pub const R_X86_64_REX_GOTPCRELX: u32 = 42; - -// AMD x86-64 values `SectionHeader*::sh_type`. -/// Unwind information. -pub const SHT_X86_64_UNWIND: u32 = 0x7000_0001; - -// AM33 values `Rel*::r_type`. -/// No reloc. -pub const R_MN10300_NONE: u32 = 0; -/// Direct 32 bit. -pub const R_MN10300_32: u32 = 1; -/// Direct 16 bit. -pub const R_MN10300_16: u32 = 2; -/// Direct 8 bit. -pub const R_MN10300_8: u32 = 3; -/// PC-relative 32-bit. -pub const R_MN10300_PCREL32: u32 = 4; -/// PC-relative 16-bit signed. -pub const R_MN10300_PCREL16: u32 = 5; -/// PC-relative 8-bit signed. -pub const R_MN10300_PCREL8: u32 = 6; -/// Ancient C++ vtable garbage... -pub const R_MN10300_GNU_VTINHERIT: u32 = 7; -/// ... collection annotation. -pub const R_MN10300_GNU_VTENTRY: u32 = 8; -/// Direct 24 bit. -pub const R_MN10300_24: u32 = 9; -/// 32-bit PCrel offset to GOT. -pub const R_MN10300_GOTPC32: u32 = 10; -/// 16-bit PCrel offset to GOT. -pub const R_MN10300_GOTPC16: u32 = 11; -/// 32-bit offset from GOT. -pub const R_MN10300_GOTOFF32: u32 = 12; -/// 24-bit offset from GOT. -pub const R_MN10300_GOTOFF24: u32 = 13; -/// 16-bit offset from GOT. -pub const R_MN10300_GOTOFF16: u32 = 14; -/// 32-bit PCrel to PLT entry. -pub const R_MN10300_PLT32: u32 = 15; -/// 16-bit PCrel to PLT entry. -pub const R_MN10300_PLT16: u32 = 16; -/// 32-bit offset to GOT entry. -pub const R_MN10300_GOT32: u32 = 17; -/// 24-bit offset to GOT entry. -pub const R_MN10300_GOT24: u32 = 18; -/// 16-bit offset to GOT entry. -pub const R_MN10300_GOT16: u32 = 19; -/// Copy symbol at runtime. -pub const R_MN10300_COPY: u32 = 20; -/// Create GOT entry. -pub const R_MN10300_GLOB_DAT: u32 = 21; -/// Create PLT entry. -pub const R_MN10300_JMP_SLOT: u32 = 22; -/// Adjust by program base. -pub const R_MN10300_RELATIVE: u32 = 23; -/// 32-bit offset for global dynamic. -pub const R_MN10300_TLS_GD: u32 = 24; -/// 32-bit offset for local dynamic. -pub const R_MN10300_TLS_LD: u32 = 25; -/// Module-relative offset. -pub const R_MN10300_TLS_LDO: u32 = 26; -/// GOT offset for static TLS block offset. -pub const R_MN10300_TLS_GOTIE: u32 = 27; -/// GOT address for static TLS block offset. -pub const R_MN10300_TLS_IE: u32 = 28; -/// Offset relative to static TLS block. -pub const R_MN10300_TLS_LE: u32 = 29; -/// ID of module containing symbol. -pub const R_MN10300_TLS_DTPMOD: u32 = 30; -/// Offset in module TLS block. -pub const R_MN10300_TLS_DTPOFF: u32 = 31; -/// Offset in static TLS block. -pub const R_MN10300_TLS_TPOFF: u32 = 32; -/// Adjustment for next reloc as needed by linker relaxation. -pub const R_MN10300_SYM_DIFF: u32 = 33; -/// Alignment requirement for linker relaxation. -pub const R_MN10300_ALIGN: u32 = 34; - -// M32R values `Rel32::r_type`. -/// No reloc. -pub const R_M32R_NONE: u32 = 0; -/// Direct 16 bit. -pub const R_M32R_16: u32 = 1; -/// Direct 32 bit. -pub const R_M32R_32: u32 = 2; -/// Direct 24 bit. -pub const R_M32R_24: u32 = 3; -/// PC relative 10 bit shifted. -pub const R_M32R_10_PCREL: u32 = 4; -/// PC relative 18 bit shifted. -pub const R_M32R_18_PCREL: u32 = 5; -/// PC relative 26 bit shifted. -pub const R_M32R_26_PCREL: u32 = 6; -/// High 16 bit with unsigned low. -pub const R_M32R_HI16_ULO: u32 = 7; -/// High 16 bit with signed low. -pub const R_M32R_HI16_SLO: u32 = 8; -/// Low 16 bit. -pub const R_M32R_LO16: u32 = 9; -/// 16 bit offset in SDA. -pub const R_M32R_SDA16: u32 = 10; -pub const R_M32R_GNU_VTINHERIT: u32 = 11; -pub const R_M32R_GNU_VTENTRY: u32 = 12; -// M32R values `Rela32::r_type`. -/// Direct 16 bit. -pub const R_M32R_16_RELA: u32 = 33; -/// Direct 32 bit. -pub const R_M32R_32_RELA: u32 = 34; -/// Direct 24 bit. -pub const R_M32R_24_RELA: u32 = 35; -/// PC relative 10 bit shifted. -pub const R_M32R_10_PCREL_RELA: u32 = 36; -/// PC relative 18 bit shifted. -pub const R_M32R_18_PCREL_RELA: u32 = 37; -/// PC relative 26 bit shifted. -pub const R_M32R_26_PCREL_RELA: u32 = 38; -/// High 16 bit with unsigned low -pub const R_M32R_HI16_ULO_RELA: u32 = 39; -/// High 16 bit with signed low -pub const R_M32R_HI16_SLO_RELA: u32 = 40; -/// Low 16 bit -pub const R_M32R_LO16_RELA: u32 = 41; -/// 16 bit offset in SDA -pub const R_M32R_SDA16_RELA: u32 = 42; -pub const R_M32R_RELA_GNU_VTINHERIT: u32 = 43; -pub const R_M32R_RELA_GNU_VTENTRY: u32 = 44; -/// PC relative 32 bit. -pub const R_M32R_REL32: u32 = 45; - -/// 24 bit GOT entry -pub const R_M32R_GOT24: u32 = 48; -/// 26 bit PC relative to PLT shifted -pub const R_M32R_26_PLTREL: u32 = 49; -/// Copy symbol at runtime -pub const R_M32R_COPY: u32 = 50; -/// Create GOT entry -pub const R_M32R_GLOB_DAT: u32 = 51; -/// Create PLT entry -pub const R_M32R_JMP_SLOT: u32 = 52; -/// Adjust by program base -pub const R_M32R_RELATIVE: u32 = 53; -/// 24 bit offset to GOT -pub const R_M32R_GOTOFF: u32 = 54; -/// 24 bit PC relative offset to GOT -pub const R_M32R_GOTPC24: u32 = 55; -/// High 16 bit GOT entry with unsigned low -pub const R_M32R_GOT16_HI_ULO: u32 = 56; -/// High 16 bit GOT entry with signed low -pub const R_M32R_GOT16_HI_SLO: u32 = 57; -/// Low 16 bit GOT entry -pub const R_M32R_GOT16_LO: u32 = 58; -/// High 16 bit PC relative offset to GOT with unsigned low -pub const R_M32R_GOTPC_HI_ULO: u32 = 59; -/// High 16 bit PC relative offset to GOT with signed low -pub const R_M32R_GOTPC_HI_SLO: u32 = 60; -/// Low 16 bit PC relative offset to GOT -pub const R_M32R_GOTPC_LO: u32 = 61; -/// High 16 bit offset to GOT with unsigned low -pub const R_M32R_GOTOFF_HI_ULO: u32 = 62; -/// High 16 bit offset to GOT with signed low -pub const R_M32R_GOTOFF_HI_SLO: u32 = 63; -/// Low 16 bit offset to GOT -pub const R_M32R_GOTOFF_LO: u32 = 64; -/// Keep this the last entry. -pub const R_M32R_NUM: u32 = 256; - -// MicroBlaze values `Rel*::r_type`. -/// No reloc. -pub const R_MICROBLAZE_NONE: u32 = 0; -/// Direct 32 bit. -pub const R_MICROBLAZE_32: u32 = 1; -/// PC relative 32 bit. -pub const R_MICROBLAZE_32_PCREL: u32 = 2; -/// PC relative 64 bit. -pub const R_MICROBLAZE_64_PCREL: u32 = 3; -/// Low 16 bits of PCREL32. -pub const R_MICROBLAZE_32_PCREL_LO: u32 = 4; -/// Direct 64 bit. -pub const R_MICROBLAZE_64: u32 = 5; -/// Low 16 bit. -pub const R_MICROBLAZE_32_LO: u32 = 6; -/// Read-only small data area. -pub const R_MICROBLAZE_SRO32: u32 = 7; -/// Read-write small data area. -pub const R_MICROBLAZE_SRW32: u32 = 8; -/// No reloc. -pub const R_MICROBLAZE_64_NONE: u32 = 9; -/// Symbol Op Symbol relocation. -pub const R_MICROBLAZE_32_SYM_OP_SYM: u32 = 10; -/// GNU C++ vtable hierarchy. -pub const R_MICROBLAZE_GNU_VTINHERIT: u32 = 11; -/// GNU C++ vtable member usage. -pub const R_MICROBLAZE_GNU_VTENTRY: u32 = 12; -/// PC-relative GOT offset. -pub const R_MICROBLAZE_GOTPC_64: u32 = 13; -/// GOT entry offset. -pub const R_MICROBLAZE_GOT_64: u32 = 14; -/// PLT offset (PC-relative). -pub const R_MICROBLAZE_PLT_64: u32 = 15; -/// Adjust by program base. -pub const R_MICROBLAZE_REL: u32 = 16; -/// Create PLT entry. -pub const R_MICROBLAZE_JUMP_SLOT: u32 = 17; -/// Create GOT entry. -pub const R_MICROBLAZE_GLOB_DAT: u32 = 18; -/// 64 bit offset to GOT. -pub const R_MICROBLAZE_GOTOFF_64: u32 = 19; -/// 32 bit offset to GOT. -pub const R_MICROBLAZE_GOTOFF_32: u32 = 20; -/// Runtime copy. -pub const R_MICROBLAZE_COPY: u32 = 21; -/// TLS Reloc. -pub const R_MICROBLAZE_TLS: u32 = 22; -/// TLS General Dynamic. -pub const R_MICROBLAZE_TLSGD: u32 = 23; -/// TLS Local Dynamic. -pub const R_MICROBLAZE_TLSLD: u32 = 24; -/// TLS Module ID. -pub const R_MICROBLAZE_TLSDTPMOD32: u32 = 25; -/// TLS Offset Within TLS Block. -pub const R_MICROBLAZE_TLSDTPREL32: u32 = 26; -/// TLS Offset Within TLS Block. -pub const R_MICROBLAZE_TLSDTPREL64: u32 = 27; -/// TLS Offset From Thread Pointer. -pub const R_MICROBLAZE_TLSGOTTPREL32: u32 = 28; -/// TLS Offset From Thread Pointer. -pub const R_MICROBLAZE_TLSTPREL32: u32 = 29; - -// Nios II values `Dyn::d_tag`. -/// Address of _gp. -pub const DT_NIOS2_GP: u32 = 0x7000_0002; - -// Nios II values `Rel*::r_type`. -/// No reloc. -pub const R_NIOS2_NONE: u32 = 0; -/// Direct signed 16 bit. -pub const R_NIOS2_S16: u32 = 1; -/// Direct unsigned 16 bit. -pub const R_NIOS2_U16: u32 = 2; -/// PC relative 16 bit. -pub const R_NIOS2_PCREL16: u32 = 3; -/// Direct call. -pub const R_NIOS2_CALL26: u32 = 4; -/// 5 bit constant expression. -pub const R_NIOS2_IMM5: u32 = 5; -/// 5 bit expression, shift 22. -pub const R_NIOS2_CACHE_OPX: u32 = 6; -/// 6 bit constant expression. -pub const R_NIOS2_IMM6: u32 = 7; -/// 8 bit constant expression. -pub const R_NIOS2_IMM8: u32 = 8; -/// High 16 bit. -pub const R_NIOS2_HI16: u32 = 9; -/// Low 16 bit. -pub const R_NIOS2_LO16: u32 = 10; -/// High 16 bit, adjusted. -pub const R_NIOS2_HIADJ16: u32 = 11; -/// 32 bit symbol value + addend. -pub const R_NIOS2_BFD_RELOC_32: u32 = 12; -/// 16 bit symbol value + addend. -pub const R_NIOS2_BFD_RELOC_16: u32 = 13; -/// 8 bit symbol value + addend. -pub const R_NIOS2_BFD_RELOC_8: u32 = 14; -/// 16 bit GP pointer offset. -pub const R_NIOS2_GPREL: u32 = 15; -/// GNU C++ vtable hierarchy. -pub const R_NIOS2_GNU_VTINHERIT: u32 = 16; -/// GNU C++ vtable member usage. -pub const R_NIOS2_GNU_VTENTRY: u32 = 17; -/// Unconditional branch. -pub const R_NIOS2_UJMP: u32 = 18; -/// Conditional branch. -pub const R_NIOS2_CJMP: u32 = 19; -/// Indirect call through register. -pub const R_NIOS2_CALLR: u32 = 20; -/// Alignment requirement for linker relaxation. -pub const R_NIOS2_ALIGN: u32 = 21; -/// 16 bit GOT entry. -pub const R_NIOS2_GOT16: u32 = 22; -/// 16 bit GOT entry for function. -pub const R_NIOS2_CALL16: u32 = 23; -/// %lo of offset to GOT pointer. -pub const R_NIOS2_GOTOFF_LO: u32 = 24; -/// %hiadj of offset to GOT pointer. -pub const R_NIOS2_GOTOFF_HA: u32 = 25; -/// %lo of PC relative offset. -pub const R_NIOS2_PCREL_LO: u32 = 26; -/// %hiadj of PC relative offset. -pub const R_NIOS2_PCREL_HA: u32 = 27; -/// 16 bit GOT offset for TLS GD. -pub const R_NIOS2_TLS_GD16: u32 = 28; -/// 16 bit GOT offset for TLS LDM. -pub const R_NIOS2_TLS_LDM16: u32 = 29; -/// 16 bit module relative offset. -pub const R_NIOS2_TLS_LDO16: u32 = 30; -/// 16 bit GOT offset for TLS IE. -pub const R_NIOS2_TLS_IE16: u32 = 31; -/// 16 bit LE TP-relative offset. -pub const R_NIOS2_TLS_LE16: u32 = 32; -/// Module number. -pub const R_NIOS2_TLS_DTPMOD: u32 = 33; -/// Module-relative offset. -pub const R_NIOS2_TLS_DTPREL: u32 = 34; -/// TP-relative offset. -pub const R_NIOS2_TLS_TPREL: u32 = 35; -/// Copy symbol at runtime. -pub const R_NIOS2_COPY: u32 = 36; -/// Create GOT entry. -pub const R_NIOS2_GLOB_DAT: u32 = 37; -/// Create PLT entry. -pub const R_NIOS2_JUMP_SLOT: u32 = 38; -/// Adjust by program base. -pub const R_NIOS2_RELATIVE: u32 = 39; -/// 16 bit offset to GOT pointer. -pub const R_NIOS2_GOTOFF: u32 = 40; -/// Direct call in .noat section. -pub const R_NIOS2_CALL26_NOAT: u32 = 41; -/// %lo() of GOT entry. -pub const R_NIOS2_GOT_LO: u32 = 42; -/// %hiadj() of GOT entry. -pub const R_NIOS2_GOT_HA: u32 = 43; -/// %lo() of function GOT entry. -pub const R_NIOS2_CALL_LO: u32 = 44; -/// %hiadj() of function GOT entry. -pub const R_NIOS2_CALL_HA: u32 = 45; - -// TILEPro values `Rel*::r_type`. -/// No reloc -pub const R_TILEPRO_NONE: u32 = 0; -/// Direct 32 bit -pub const R_TILEPRO_32: u32 = 1; -/// Direct 16 bit -pub const R_TILEPRO_16: u32 = 2; -/// Direct 8 bit -pub const R_TILEPRO_8: u32 = 3; -/// PC relative 32 bit -pub const R_TILEPRO_32_PCREL: u32 = 4; -/// PC relative 16 bit -pub const R_TILEPRO_16_PCREL: u32 = 5; -/// PC relative 8 bit -pub const R_TILEPRO_8_PCREL: u32 = 6; -/// Low 16 bit -pub const R_TILEPRO_LO16: u32 = 7; -/// High 16 bit -pub const R_TILEPRO_HI16: u32 = 8; -/// High 16 bit, adjusted -pub const R_TILEPRO_HA16: u32 = 9; -/// Copy relocation -pub const R_TILEPRO_COPY: u32 = 10; -/// Create GOT entry -pub const R_TILEPRO_GLOB_DAT: u32 = 11; -/// Create PLT entry -pub const R_TILEPRO_JMP_SLOT: u32 = 12; -/// Adjust by program base -pub const R_TILEPRO_RELATIVE: u32 = 13; -/// X1 pipe branch offset -pub const R_TILEPRO_BROFF_X1: u32 = 14; -/// X1 pipe jump offset -pub const R_TILEPRO_JOFFLONG_X1: u32 = 15; -/// X1 pipe jump offset to PLT -pub const R_TILEPRO_JOFFLONG_X1_PLT: u32 = 16; -/// X0 pipe 8-bit -pub const R_TILEPRO_IMM8_X0: u32 = 17; -/// Y0 pipe 8-bit -pub const R_TILEPRO_IMM8_Y0: u32 = 18; -/// X1 pipe 8-bit -pub const R_TILEPRO_IMM8_X1: u32 = 19; -/// Y1 pipe 8-bit -pub const R_TILEPRO_IMM8_Y1: u32 = 20; -/// X1 pipe mtspr -pub const R_TILEPRO_MT_IMM15_X1: u32 = 21; -/// X1 pipe mfspr -pub const R_TILEPRO_MF_IMM15_X1: u32 = 22; -/// X0 pipe 16-bit -pub const R_TILEPRO_IMM16_X0: u32 = 23; -/// X1 pipe 16-bit -pub const R_TILEPRO_IMM16_X1: u32 = 24; -/// X0 pipe low 16-bit -pub const R_TILEPRO_IMM16_X0_LO: u32 = 25; -/// X1 pipe low 16-bit -pub const R_TILEPRO_IMM16_X1_LO: u32 = 26; -/// X0 pipe high 16-bit -pub const R_TILEPRO_IMM16_X0_HI: u32 = 27; -/// X1 pipe high 16-bit -pub const R_TILEPRO_IMM16_X1_HI: u32 = 28; -/// X0 pipe high 16-bit, adjusted -pub const R_TILEPRO_IMM16_X0_HA: u32 = 29; -/// X1 pipe high 16-bit, adjusted -pub const R_TILEPRO_IMM16_X1_HA: u32 = 30; -/// X0 pipe PC relative 16 bit -pub const R_TILEPRO_IMM16_X0_PCREL: u32 = 31; -/// X1 pipe PC relative 16 bit -pub const R_TILEPRO_IMM16_X1_PCREL: u32 = 32; -/// X0 pipe PC relative low 16 bit -pub const R_TILEPRO_IMM16_X0_LO_PCREL: u32 = 33; -/// X1 pipe PC relative low 16 bit -pub const R_TILEPRO_IMM16_X1_LO_PCREL: u32 = 34; -/// X0 pipe PC relative high 16 bit -pub const R_TILEPRO_IMM16_X0_HI_PCREL: u32 = 35; -/// X1 pipe PC relative high 16 bit -pub const R_TILEPRO_IMM16_X1_HI_PCREL: u32 = 36; -/// X0 pipe PC relative ha() 16 bit -pub const R_TILEPRO_IMM16_X0_HA_PCREL: u32 = 37; -/// X1 pipe PC relative ha() 16 bit -pub const R_TILEPRO_IMM16_X1_HA_PCREL: u32 = 38; -/// X0 pipe 16-bit GOT offset -pub const R_TILEPRO_IMM16_X0_GOT: u32 = 39; -/// X1 pipe 16-bit GOT offset -pub const R_TILEPRO_IMM16_X1_GOT: u32 = 40; -/// X0 pipe low 16-bit GOT offset -pub const R_TILEPRO_IMM16_X0_GOT_LO: u32 = 41; -/// X1 pipe low 16-bit GOT offset -pub const R_TILEPRO_IMM16_X1_GOT_LO: u32 = 42; -/// X0 pipe high 16-bit GOT offset -pub const R_TILEPRO_IMM16_X0_GOT_HI: u32 = 43; -/// X1 pipe high 16-bit GOT offset -pub const R_TILEPRO_IMM16_X1_GOT_HI: u32 = 44; -/// X0 pipe ha() 16-bit GOT offset -pub const R_TILEPRO_IMM16_X0_GOT_HA: u32 = 45; -/// X1 pipe ha() 16-bit GOT offset -pub const R_TILEPRO_IMM16_X1_GOT_HA: u32 = 46; -/// X0 pipe mm "start" -pub const R_TILEPRO_MMSTART_X0: u32 = 47; -/// X0 pipe mm "end" -pub const R_TILEPRO_MMEND_X0: u32 = 48; -/// X1 pipe mm "start" -pub const R_TILEPRO_MMSTART_X1: u32 = 49; -/// X1 pipe mm "end" -pub const R_TILEPRO_MMEND_X1: u32 = 50; -/// X0 pipe shift amount -pub const R_TILEPRO_SHAMT_X0: u32 = 51; -/// X1 pipe shift amount -pub const R_TILEPRO_SHAMT_X1: u32 = 52; -/// Y0 pipe shift amount -pub const R_TILEPRO_SHAMT_Y0: u32 = 53; -/// Y1 pipe shift amount -pub const R_TILEPRO_SHAMT_Y1: u32 = 54; -/// X1 pipe destination 8-bit -pub const R_TILEPRO_DEST_IMM8_X1: u32 = 55; -// Relocs 56-59 are currently not defined. -/// "jal" for TLS GD -pub const R_TILEPRO_TLS_GD_CALL: u32 = 60; -/// X0 pipe "addi" for TLS GD -pub const R_TILEPRO_IMM8_X0_TLS_GD_ADD: u32 = 61; -/// X1 pipe "addi" for TLS GD -pub const R_TILEPRO_IMM8_X1_TLS_GD_ADD: u32 = 62; -/// Y0 pipe "addi" for TLS GD -pub const R_TILEPRO_IMM8_Y0_TLS_GD_ADD: u32 = 63; -/// Y1 pipe "addi" for TLS GD -pub const R_TILEPRO_IMM8_Y1_TLS_GD_ADD: u32 = 64; -/// "lw_tls" for TLS IE -pub const R_TILEPRO_TLS_IE_LOAD: u32 = 65; -/// X0 pipe 16-bit TLS GD offset -pub const R_TILEPRO_IMM16_X0_TLS_GD: u32 = 66; -/// X1 pipe 16-bit TLS GD offset -pub const R_TILEPRO_IMM16_X1_TLS_GD: u32 = 67; -/// X0 pipe low 16-bit TLS GD offset -pub const R_TILEPRO_IMM16_X0_TLS_GD_LO: u32 = 68; -/// X1 pipe low 16-bit TLS GD offset -pub const R_TILEPRO_IMM16_X1_TLS_GD_LO: u32 = 69; -/// X0 pipe high 16-bit TLS GD offset -pub const R_TILEPRO_IMM16_X0_TLS_GD_HI: u32 = 70; -/// X1 pipe high 16-bit TLS GD offset -pub const R_TILEPRO_IMM16_X1_TLS_GD_HI: u32 = 71; -/// X0 pipe ha() 16-bit TLS GD offset -pub const R_TILEPRO_IMM16_X0_TLS_GD_HA: u32 = 72; -/// X1 pipe ha() 16-bit TLS GD offset -pub const R_TILEPRO_IMM16_X1_TLS_GD_HA: u32 = 73; -/// X0 pipe 16-bit TLS IE offset -pub const R_TILEPRO_IMM16_X0_TLS_IE: u32 = 74; -/// X1 pipe 16-bit TLS IE offset -pub const R_TILEPRO_IMM16_X1_TLS_IE: u32 = 75; -/// X0 pipe low 16-bit TLS IE offset -pub const R_TILEPRO_IMM16_X0_TLS_IE_LO: u32 = 76; -/// X1 pipe low 16-bit TLS IE offset -pub const R_TILEPRO_IMM16_X1_TLS_IE_LO: u32 = 77; -/// X0 pipe high 16-bit TLS IE offset -pub const R_TILEPRO_IMM16_X0_TLS_IE_HI: u32 = 78; -/// X1 pipe high 16-bit TLS IE offset -pub const R_TILEPRO_IMM16_X1_TLS_IE_HI: u32 = 79; -/// X0 pipe ha() 16-bit TLS IE offset -pub const R_TILEPRO_IMM16_X0_TLS_IE_HA: u32 = 80; -/// X1 pipe ha() 16-bit TLS IE offset -pub const R_TILEPRO_IMM16_X1_TLS_IE_HA: u32 = 81; -/// ID of module containing symbol -pub const R_TILEPRO_TLS_DTPMOD32: u32 = 82; -/// Offset in TLS block -pub const R_TILEPRO_TLS_DTPOFF32: u32 = 83; -/// Offset in static TLS block -pub const R_TILEPRO_TLS_TPOFF32: u32 = 84; -/// X0 pipe 16-bit TLS LE offset -pub const R_TILEPRO_IMM16_X0_TLS_LE: u32 = 85; -/// X1 pipe 16-bit TLS LE offset -pub const R_TILEPRO_IMM16_X1_TLS_LE: u32 = 86; -/// X0 pipe low 16-bit TLS LE offset -pub const R_TILEPRO_IMM16_X0_TLS_LE_LO: u32 = 87; -/// X1 pipe low 16-bit TLS LE offset -pub const R_TILEPRO_IMM16_X1_TLS_LE_LO: u32 = 88; -/// X0 pipe high 16-bit TLS LE offset -pub const R_TILEPRO_IMM16_X0_TLS_LE_HI: u32 = 89; -/// X1 pipe high 16-bit TLS LE offset -pub const R_TILEPRO_IMM16_X1_TLS_LE_HI: u32 = 90; -/// X0 pipe ha() 16-bit TLS LE offset -pub const R_TILEPRO_IMM16_X0_TLS_LE_HA: u32 = 91; -/// X1 pipe ha() 16-bit TLS LE offset -pub const R_TILEPRO_IMM16_X1_TLS_LE_HA: u32 = 92; - -/// GNU C++ vtable hierarchy -pub const R_TILEPRO_GNU_VTINHERIT: u32 = 128; -/// GNU C++ vtable member usage -pub const R_TILEPRO_GNU_VTENTRY: u32 = 129; - -// TILE-Gx values `Rel*::r_type`. -/// No reloc -pub const R_TILEGX_NONE: u32 = 0; -/// Direct 64 bit -pub const R_TILEGX_64: u32 = 1; -/// Direct 32 bit -pub const R_TILEGX_32: u32 = 2; -/// Direct 16 bit -pub const R_TILEGX_16: u32 = 3; -/// Direct 8 bit -pub const R_TILEGX_8: u32 = 4; -/// PC relative 64 bit -pub const R_TILEGX_64_PCREL: u32 = 5; -/// PC relative 32 bit -pub const R_TILEGX_32_PCREL: u32 = 6; -/// PC relative 16 bit -pub const R_TILEGX_16_PCREL: u32 = 7; -/// PC relative 8 bit -pub const R_TILEGX_8_PCREL: u32 = 8; -/// hword 0 16-bit -pub const R_TILEGX_HW0: u32 = 9; -/// hword 1 16-bit -pub const R_TILEGX_HW1: u32 = 10; -/// hword 2 16-bit -pub const R_TILEGX_HW2: u32 = 11; -/// hword 3 16-bit -pub const R_TILEGX_HW3: u32 = 12; -/// last hword 0 16-bit -pub const R_TILEGX_HW0_LAST: u32 = 13; -/// last hword 1 16-bit -pub const R_TILEGX_HW1_LAST: u32 = 14; -/// last hword 2 16-bit -pub const R_TILEGX_HW2_LAST: u32 = 15; -/// Copy relocation -pub const R_TILEGX_COPY: u32 = 16; -/// Create GOT entry -pub const R_TILEGX_GLOB_DAT: u32 = 17; -/// Create PLT entry -pub const R_TILEGX_JMP_SLOT: u32 = 18; -/// Adjust by program base -pub const R_TILEGX_RELATIVE: u32 = 19; -/// X1 pipe branch offset -pub const R_TILEGX_BROFF_X1: u32 = 20; -/// X1 pipe jump offset -pub const R_TILEGX_JUMPOFF_X1: u32 = 21; -/// X1 pipe jump offset to PLT -pub const R_TILEGX_JUMPOFF_X1_PLT: u32 = 22; -/// X0 pipe 8-bit -pub const R_TILEGX_IMM8_X0: u32 = 23; -/// Y0 pipe 8-bit -pub const R_TILEGX_IMM8_Y0: u32 = 24; -/// X1 pipe 8-bit -pub const R_TILEGX_IMM8_X1: u32 = 25; -/// Y1 pipe 8-bit -pub const R_TILEGX_IMM8_Y1: u32 = 26; -/// X1 pipe destination 8-bit -pub const R_TILEGX_DEST_IMM8_X1: u32 = 27; -/// X1 pipe mtspr -pub const R_TILEGX_MT_IMM14_X1: u32 = 28; -/// X1 pipe mfspr -pub const R_TILEGX_MF_IMM14_X1: u32 = 29; -/// X0 pipe mm "start" -pub const R_TILEGX_MMSTART_X0: u32 = 30; -/// X0 pipe mm "end" -pub const R_TILEGX_MMEND_X0: u32 = 31; -/// X0 pipe shift amount -pub const R_TILEGX_SHAMT_X0: u32 = 32; -/// X1 pipe shift amount -pub const R_TILEGX_SHAMT_X1: u32 = 33; -/// Y0 pipe shift amount -pub const R_TILEGX_SHAMT_Y0: u32 = 34; -/// Y1 pipe shift amount -pub const R_TILEGX_SHAMT_Y1: u32 = 35; -/// X0 pipe hword 0 -pub const R_TILEGX_IMM16_X0_HW0: u32 = 36; -/// X1 pipe hword 0 -pub const R_TILEGX_IMM16_X1_HW0: u32 = 37; -/// X0 pipe hword 1 -pub const R_TILEGX_IMM16_X0_HW1: u32 = 38; -/// X1 pipe hword 1 -pub const R_TILEGX_IMM16_X1_HW1: u32 = 39; -/// X0 pipe hword 2 -pub const R_TILEGX_IMM16_X0_HW2: u32 = 40; -/// X1 pipe hword 2 -pub const R_TILEGX_IMM16_X1_HW2: u32 = 41; -/// X0 pipe hword 3 -pub const R_TILEGX_IMM16_X0_HW3: u32 = 42; -/// X1 pipe hword 3 -pub const R_TILEGX_IMM16_X1_HW3: u32 = 43; -/// X0 pipe last hword 0 -pub const R_TILEGX_IMM16_X0_HW0_LAST: u32 = 44; -/// X1 pipe last hword 0 -pub const R_TILEGX_IMM16_X1_HW0_LAST: u32 = 45; -/// X0 pipe last hword 1 -pub const R_TILEGX_IMM16_X0_HW1_LAST: u32 = 46; -/// X1 pipe last hword 1 -pub const R_TILEGX_IMM16_X1_HW1_LAST: u32 = 47; -/// X0 pipe last hword 2 -pub const R_TILEGX_IMM16_X0_HW2_LAST: u32 = 48; -/// X1 pipe last hword 2 -pub const R_TILEGX_IMM16_X1_HW2_LAST: u32 = 49; -/// X0 pipe PC relative hword 0 -pub const R_TILEGX_IMM16_X0_HW0_PCREL: u32 = 50; -/// X1 pipe PC relative hword 0 -pub const R_TILEGX_IMM16_X1_HW0_PCREL: u32 = 51; -/// X0 pipe PC relative hword 1 -pub const R_TILEGX_IMM16_X0_HW1_PCREL: u32 = 52; -/// X1 pipe PC relative hword 1 -pub const R_TILEGX_IMM16_X1_HW1_PCREL: u32 = 53; -/// X0 pipe PC relative hword 2 -pub const R_TILEGX_IMM16_X0_HW2_PCREL: u32 = 54; -/// X1 pipe PC relative hword 2 -pub const R_TILEGX_IMM16_X1_HW2_PCREL: u32 = 55; -/// X0 pipe PC relative hword 3 -pub const R_TILEGX_IMM16_X0_HW3_PCREL: u32 = 56; -/// X1 pipe PC relative hword 3 -pub const R_TILEGX_IMM16_X1_HW3_PCREL: u32 = 57; -/// X0 pipe PC-rel last hword 0 -pub const R_TILEGX_IMM16_X0_HW0_LAST_PCREL: u32 = 58; -/// X1 pipe PC-rel last hword 0 -pub const R_TILEGX_IMM16_X1_HW0_LAST_PCREL: u32 = 59; -/// X0 pipe PC-rel last hword 1 -pub const R_TILEGX_IMM16_X0_HW1_LAST_PCREL: u32 = 60; -/// X1 pipe PC-rel last hword 1 -pub const R_TILEGX_IMM16_X1_HW1_LAST_PCREL: u32 = 61; -/// X0 pipe PC-rel last hword 2 -pub const R_TILEGX_IMM16_X0_HW2_LAST_PCREL: u32 = 62; -/// X1 pipe PC-rel last hword 2 -pub const R_TILEGX_IMM16_X1_HW2_LAST_PCREL: u32 = 63; -/// X0 pipe hword 0 GOT offset -pub const R_TILEGX_IMM16_X0_HW0_GOT: u32 = 64; -/// X1 pipe hword 0 GOT offset -pub const R_TILEGX_IMM16_X1_HW0_GOT: u32 = 65; -/// X0 pipe PC-rel PLT hword 0 -pub const R_TILEGX_IMM16_X0_HW0_PLT_PCREL: u32 = 66; -/// X1 pipe PC-rel PLT hword 0 -pub const R_TILEGX_IMM16_X1_HW0_PLT_PCREL: u32 = 67; -/// X0 pipe PC-rel PLT hword 1 -pub const R_TILEGX_IMM16_X0_HW1_PLT_PCREL: u32 = 68; -/// X1 pipe PC-rel PLT hword 1 -pub const R_TILEGX_IMM16_X1_HW1_PLT_PCREL: u32 = 69; -/// X0 pipe PC-rel PLT hword 2 -pub const R_TILEGX_IMM16_X0_HW2_PLT_PCREL: u32 = 70; -/// X1 pipe PC-rel PLT hword 2 -pub const R_TILEGX_IMM16_X1_HW2_PLT_PCREL: u32 = 71; -/// X0 pipe last hword 0 GOT offset -pub const R_TILEGX_IMM16_X0_HW0_LAST_GOT: u32 = 72; -/// X1 pipe last hword 0 GOT offset -pub const R_TILEGX_IMM16_X1_HW0_LAST_GOT: u32 = 73; -/// X0 pipe last hword 1 GOT offset -pub const R_TILEGX_IMM16_X0_HW1_LAST_GOT: u32 = 74; -/// X1 pipe last hword 1 GOT offset -pub const R_TILEGX_IMM16_X1_HW1_LAST_GOT: u32 = 75; -/// X0 pipe PC-rel PLT hword 3 -pub const R_TILEGX_IMM16_X0_HW3_PLT_PCREL: u32 = 76; -/// X1 pipe PC-rel PLT hword 3 -pub const R_TILEGX_IMM16_X1_HW3_PLT_PCREL: u32 = 77; -/// X0 pipe hword 0 TLS GD offset -pub const R_TILEGX_IMM16_X0_HW0_TLS_GD: u32 = 78; -/// X1 pipe hword 0 TLS GD offset -pub const R_TILEGX_IMM16_X1_HW0_TLS_GD: u32 = 79; -/// X0 pipe hword 0 TLS LE offset -pub const R_TILEGX_IMM16_X0_HW0_TLS_LE: u32 = 80; -/// X1 pipe hword 0 TLS LE offset -pub const R_TILEGX_IMM16_X1_HW0_TLS_LE: u32 = 81; -/// X0 pipe last hword 0 LE off -pub const R_TILEGX_IMM16_X0_HW0_LAST_TLS_LE: u32 = 82; -/// X1 pipe last hword 0 LE off -pub const R_TILEGX_IMM16_X1_HW0_LAST_TLS_LE: u32 = 83; -/// X0 pipe last hword 1 LE off -pub const R_TILEGX_IMM16_X0_HW1_LAST_TLS_LE: u32 = 84; -/// X1 pipe last hword 1 LE off -pub const R_TILEGX_IMM16_X1_HW1_LAST_TLS_LE: u32 = 85; -/// X0 pipe last hword 0 GD off -pub const R_TILEGX_IMM16_X0_HW0_LAST_TLS_GD: u32 = 86; -/// X1 pipe last hword 0 GD off -pub const R_TILEGX_IMM16_X1_HW0_LAST_TLS_GD: u32 = 87; -/// X0 pipe last hword 1 GD off -pub const R_TILEGX_IMM16_X0_HW1_LAST_TLS_GD: u32 = 88; -/// X1 pipe last hword 1 GD off -pub const R_TILEGX_IMM16_X1_HW1_LAST_TLS_GD: u32 = 89; -// Relocs 90-91 are currently not defined. -/// X0 pipe hword 0 TLS IE offset -pub const R_TILEGX_IMM16_X0_HW0_TLS_IE: u32 = 92; -/// X1 pipe hword 0 TLS IE offset -pub const R_TILEGX_IMM16_X1_HW0_TLS_IE: u32 = 93; -/// X0 pipe PC-rel PLT last hword 0 -pub const R_TILEGX_IMM16_X0_HW0_LAST_PLT_PCREL: u32 = 94; -/// X1 pipe PC-rel PLT last hword 0 -pub const R_TILEGX_IMM16_X1_HW0_LAST_PLT_PCREL: u32 = 95; -/// X0 pipe PC-rel PLT last hword 1 -pub const R_TILEGX_IMM16_X0_HW1_LAST_PLT_PCREL: u32 = 96; -/// X1 pipe PC-rel PLT last hword 1 -pub const R_TILEGX_IMM16_X1_HW1_LAST_PLT_PCREL: u32 = 97; -/// X0 pipe PC-rel PLT last hword 2 -pub const R_TILEGX_IMM16_X0_HW2_LAST_PLT_PCREL: u32 = 98; -/// X1 pipe PC-rel PLT last hword 2 -pub const R_TILEGX_IMM16_X1_HW2_LAST_PLT_PCREL: u32 = 99; -/// X0 pipe last hword 0 IE off -pub const R_TILEGX_IMM16_X0_HW0_LAST_TLS_IE: u32 = 100; -/// X1 pipe last hword 0 IE off -pub const R_TILEGX_IMM16_X1_HW0_LAST_TLS_IE: u32 = 101; -/// X0 pipe last hword 1 IE off -pub const R_TILEGX_IMM16_X0_HW1_LAST_TLS_IE: u32 = 102; -/// X1 pipe last hword 1 IE off -pub const R_TILEGX_IMM16_X1_HW1_LAST_TLS_IE: u32 = 103; -// Relocs 104-105 are currently not defined. -/// 64-bit ID of symbol's module -pub const R_TILEGX_TLS_DTPMOD64: u32 = 106; -/// 64-bit offset in TLS block -pub const R_TILEGX_TLS_DTPOFF64: u32 = 107; -/// 64-bit offset in static TLS block -pub const R_TILEGX_TLS_TPOFF64: u32 = 108; -/// 32-bit ID of symbol's module -pub const R_TILEGX_TLS_DTPMOD32: u32 = 109; -/// 32-bit offset in TLS block -pub const R_TILEGX_TLS_DTPOFF32: u32 = 110; -/// 32-bit offset in static TLS block -pub const R_TILEGX_TLS_TPOFF32: u32 = 111; -/// "jal" for TLS GD -pub const R_TILEGX_TLS_GD_CALL: u32 = 112; -/// X0 pipe "addi" for TLS GD -pub const R_TILEGX_IMM8_X0_TLS_GD_ADD: u32 = 113; -/// X1 pipe "addi" for TLS GD -pub const R_TILEGX_IMM8_X1_TLS_GD_ADD: u32 = 114; -/// Y0 pipe "addi" for TLS GD -pub const R_TILEGX_IMM8_Y0_TLS_GD_ADD: u32 = 115; -/// Y1 pipe "addi" for TLS GD -pub const R_TILEGX_IMM8_Y1_TLS_GD_ADD: u32 = 116; -/// "ld_tls" for TLS IE -pub const R_TILEGX_TLS_IE_LOAD: u32 = 117; -/// X0 pipe "addi" for TLS GD/IE -pub const R_TILEGX_IMM8_X0_TLS_ADD: u32 = 118; -/// X1 pipe "addi" for TLS GD/IE -pub const R_TILEGX_IMM8_X1_TLS_ADD: u32 = 119; -/// Y0 pipe "addi" for TLS GD/IE -pub const R_TILEGX_IMM8_Y0_TLS_ADD: u32 = 120; -/// Y1 pipe "addi" for TLS GD/IE -pub const R_TILEGX_IMM8_Y1_TLS_ADD: u32 = 121; - -/// GNU C++ vtable hierarchy -pub const R_TILEGX_GNU_VTINHERIT: u32 = 128; -/// GNU C++ vtable member usage -pub const R_TILEGX_GNU_VTENTRY: u32 = 129; - -// RISC-V values `FileHeader*::e_flags`. -pub const EF_RISCV_RVC: u32 = 0x0001; -pub const EF_RISCV_FLOAT_ABI: u32 = 0x0006; -pub const EF_RISCV_FLOAT_ABI_SOFT: u32 = 0x0000; -pub const EF_RISCV_FLOAT_ABI_SINGLE: u32 = 0x0002; -pub const EF_RISCV_FLOAT_ABI_DOUBLE: u32 = 0x0004; -pub const EF_RISCV_FLOAT_ABI_QUAD: u32 = 0x0006; -pub const EF_RISCV_RVE: u32 = 0x0008; -pub const EF_RISCV_TSO: u32 = 0x0010; - -// RISC-V values `Rel*::r_type`. -pub const R_RISCV_NONE: u32 = 0; -pub const R_RISCV_32: u32 = 1; -pub const R_RISCV_64: u32 = 2; -pub const R_RISCV_RELATIVE: u32 = 3; -pub const R_RISCV_COPY: u32 = 4; -pub const R_RISCV_JUMP_SLOT: u32 = 5; -pub const R_RISCV_TLS_DTPMOD32: u32 = 6; -pub const R_RISCV_TLS_DTPMOD64: u32 = 7; -pub const R_RISCV_TLS_DTPREL32: u32 = 8; -pub const R_RISCV_TLS_DTPREL64: u32 = 9; -pub const R_RISCV_TLS_TPREL32: u32 = 10; -pub const R_RISCV_TLS_TPREL64: u32 = 11; -pub const R_RISCV_BRANCH: u32 = 16; -pub const R_RISCV_JAL: u32 = 17; -pub const R_RISCV_CALL: u32 = 18; -pub const R_RISCV_CALL_PLT: u32 = 19; -pub const R_RISCV_GOT_HI20: u32 = 20; -pub const R_RISCV_TLS_GOT_HI20: u32 = 21; -pub const R_RISCV_TLS_GD_HI20: u32 = 22; -pub const R_RISCV_PCREL_HI20: u32 = 23; -pub const R_RISCV_PCREL_LO12_I: u32 = 24; -pub const R_RISCV_PCREL_LO12_S: u32 = 25; -pub const R_RISCV_HI20: u32 = 26; -pub const R_RISCV_LO12_I: u32 = 27; -pub const R_RISCV_LO12_S: u32 = 28; -pub const R_RISCV_TPREL_HI20: u32 = 29; -pub const R_RISCV_TPREL_LO12_I: u32 = 30; -pub const R_RISCV_TPREL_LO12_S: u32 = 31; -pub const R_RISCV_TPREL_ADD: u32 = 32; -pub const R_RISCV_ADD8: u32 = 33; -pub const R_RISCV_ADD16: u32 = 34; -pub const R_RISCV_ADD32: u32 = 35; -pub const R_RISCV_ADD64: u32 = 36; -pub const R_RISCV_SUB8: u32 = 37; -pub const R_RISCV_SUB16: u32 = 38; -pub const R_RISCV_SUB32: u32 = 39; -pub const R_RISCV_SUB64: u32 = 40; -pub const R_RISCV_GNU_VTINHERIT: u32 = 41; -pub const R_RISCV_GNU_VTENTRY: u32 = 42; -pub const R_RISCV_ALIGN: u32 = 43; -pub const R_RISCV_RVC_BRANCH: u32 = 44; -pub const R_RISCV_RVC_JUMP: u32 = 45; -pub const R_RISCV_RVC_LUI: u32 = 46; -pub const R_RISCV_GPREL_I: u32 = 47; -pub const R_RISCV_GPREL_S: u32 = 48; -pub const R_RISCV_TPREL_I: u32 = 49; -pub const R_RISCV_TPREL_S: u32 = 50; -pub const R_RISCV_RELAX: u32 = 51; -pub const R_RISCV_SUB6: u32 = 52; -pub const R_RISCV_SET6: u32 = 53; -pub const R_RISCV_SET8: u32 = 54; -pub const R_RISCV_SET16: u32 = 55; -pub const R_RISCV_SET32: u32 = 56; -pub const R_RISCV_32_PCREL: u32 = 57; - -// BPF values `Rel*::r_type`. -/// No reloc -pub const R_BPF_NONE: u32 = 0; -pub const R_BPF_64_64: u32 = 1; -pub const R_BPF_64_32: u32 = 10; - -// SBF values `Rel*::r_type`. -/// No reloc -pub const R_SBF_NONE: u32 = 0; -pub const R_SBF_64_64: u32 = 1; -pub const R_SBF_64_32: u32 = 10; - -// Imagination Meta values `Rel*::r_type`. - -pub const R_METAG_HIADDR16: u32 = 0; -pub const R_METAG_LOADDR16: u32 = 1; -/// 32bit absolute address -pub const R_METAG_ADDR32: u32 = 2; -/// No reloc -pub const R_METAG_NONE: u32 = 3; -pub const R_METAG_RELBRANCH: u32 = 4; -pub const R_METAG_GETSETOFF: u32 = 5; - -// Backward compatibility -pub const R_METAG_REG32OP1: u32 = 6; -pub const R_METAG_REG32OP2: u32 = 7; -pub const R_METAG_REG32OP3: u32 = 8; -pub const R_METAG_REG16OP1: u32 = 9; -pub const R_METAG_REG16OP2: u32 = 10; -pub const R_METAG_REG16OP3: u32 = 11; -pub const R_METAG_REG32OP4: u32 = 12; - -pub const R_METAG_HIOG: u32 = 13; -pub const R_METAG_LOOG: u32 = 14; - -pub const R_METAG_REL8: u32 = 15; -pub const R_METAG_REL16: u32 = 16; - -pub const R_METAG_GNU_VTINHERIT: u32 = 30; -pub const R_METAG_GNU_VTENTRY: u32 = 31; - -// PIC relocations -pub const R_METAG_HI16_GOTOFF: u32 = 32; -pub const R_METAG_LO16_GOTOFF: u32 = 33; -pub const R_METAG_GETSET_GOTOFF: u32 = 34; -pub const R_METAG_GETSET_GOT: u32 = 35; -pub const R_METAG_HI16_GOTPC: u32 = 36; -pub const R_METAG_LO16_GOTPC: u32 = 37; -pub const R_METAG_HI16_PLT: u32 = 38; -pub const R_METAG_LO16_PLT: u32 = 39; -pub const R_METAG_RELBRANCH_PLT: u32 = 40; -pub const R_METAG_GOTOFF: u32 = 41; -pub const R_METAG_PLT: u32 = 42; -pub const R_METAG_COPY: u32 = 43; -pub const R_METAG_JMP_SLOT: u32 = 44; -pub const R_METAG_RELATIVE: u32 = 45; -pub const R_METAG_GLOB_DAT: u32 = 46; - -// TLS relocations -pub const R_METAG_TLS_GD: u32 = 47; -pub const R_METAG_TLS_LDM: u32 = 48; -pub const R_METAG_TLS_LDO_HI16: u32 = 49; -pub const R_METAG_TLS_LDO_LO16: u32 = 50; -pub const R_METAG_TLS_LDO: u32 = 51; -pub const R_METAG_TLS_IE: u32 = 52; -pub const R_METAG_TLS_IENONPIC: u32 = 53; -pub const R_METAG_TLS_IENONPIC_HI16: u32 = 54; -pub const R_METAG_TLS_IENONPIC_LO16: u32 = 55; -pub const R_METAG_TLS_TPOFF: u32 = 56; -pub const R_METAG_TLS_DTPMOD: u32 = 57; -pub const R_METAG_TLS_DTPOFF: u32 = 58; -pub const R_METAG_TLS_LE: u32 = 59; -pub const R_METAG_TLS_LE_HI16: u32 = 60; -pub const R_METAG_TLS_LE_LO16: u32 = 61; - -// NDS32 values `Rel*::r_type`. -pub const R_NDS32_NONE: u32 = 0; -pub const R_NDS32_32_RELA: u32 = 20; -pub const R_NDS32_COPY: u32 = 39; -pub const R_NDS32_GLOB_DAT: u32 = 40; -pub const R_NDS32_JMP_SLOT: u32 = 41; -pub const R_NDS32_RELATIVE: u32 = 42; -pub const R_NDS32_TLS_TPOFF: u32 = 102; -pub const R_NDS32_TLS_DESC: u32 = 119; - -// LoongArch values `FileHeader*::e_flags`. -/// Additional properties of the base ABI type, including the FP calling -/// convention. -pub const EF_LARCH_ABI_MODIFIER_MASK: u32 = 0x7; -/// Uses GPRs and the stack for parameter passing -pub const EF_LARCH_ABI_SOFT_FLOAT: u32 = 0x1; -/// Uses GPRs, 32-bit FPRs and the stack for parameter passing -pub const EF_LARCH_ABI_SINGLE_FLOAT: u32 = 0x2; -/// Uses GPRs, 64-bit FPRs and the stack for parameter passing -pub const EF_LARCH_ABI_DOUBLE_FLOAT: u32 = 0x3; -/// Uses relocation types directly writing to immediate slots -pub const EF_LARCH_OBJABI_V1: u32 = 0x40; - -// LoongArch values `Rel*::r_type`. -/// No reloc -pub const R_LARCH_NONE: u32 = 0; -/// Runtime address resolving -pub const R_LARCH_32: u32 = 1; -/// Runtime address resolving -pub const R_LARCH_64: u32 = 2; -/// Runtime fixup for load-address -pub const R_LARCH_RELATIVE: u32 = 3; -/// Runtime memory copy in executable -pub const R_LARCH_COPY: u32 = 4; -/// Runtime PLT supporting -pub const R_LARCH_JUMP_SLOT: u32 = 5; -/// Runtime relocation for TLS-GD -pub const R_LARCH_TLS_DTPMOD32: u32 = 6; -/// Runtime relocation for TLS-GD -pub const R_LARCH_TLS_DTPMOD64: u32 = 7; -/// Runtime relocation for TLS-GD -pub const R_LARCH_TLS_DTPREL32: u32 = 8; -/// Runtime relocation for TLS-GD -pub const R_LARCH_TLS_DTPREL64: u32 = 9; -/// Runtime relocation for TLE-IE -pub const R_LARCH_TLS_TPREL32: u32 = 10; -/// Runtime relocation for TLE-IE -pub const R_LARCH_TLS_TPREL64: u32 = 11; -/// Runtime local indirect function resolving -pub const R_LARCH_IRELATIVE: u32 = 12; -/// Mark la.abs: load absolute address for static link. -pub const R_LARCH_MARK_LA: u32 = 20; -/// Mark external label branch: access PC relative address for static link. -pub const R_LARCH_MARK_PCREL: u32 = 21; -/// Push PC-relative offset -pub const R_LARCH_SOP_PUSH_PCREL: u32 = 22; -/// Push constant or absolute address -pub const R_LARCH_SOP_PUSH_ABSOLUTE: u32 = 23; -/// Duplicate stack top -pub const R_LARCH_SOP_PUSH_DUP: u32 = 24; -/// Push for access GOT entry -pub const R_LARCH_SOP_PUSH_GPREL: u32 = 25; -/// Push for TLS-LE -pub const R_LARCH_SOP_PUSH_TLS_TPREL: u32 = 26; -/// Push for TLS-IE -pub const R_LARCH_SOP_PUSH_TLS_GOT: u32 = 27; -/// Push for TLS-GD -pub const R_LARCH_SOP_PUSH_TLS_GD: u32 = 28; -/// Push for external function calling -pub const R_LARCH_SOP_PUSH_PLT_PCREL: u32 = 29; -/// Assert stack top -pub const R_LARCH_SOP_ASSERT: u32 = 30; -/// Stack top logical not (unary) -pub const R_LARCH_SOP_NOT: u32 = 31; -/// Stack top subtraction (binary) -pub const R_LARCH_SOP_SUB: u32 = 32; -/// Stack top left shift (binary) -pub const R_LARCH_SOP_SL: u32 = 33; -/// Stack top right shift (binary) -pub const R_LARCH_SOP_SR: u32 = 34; -/// Stack top addition (binary) -pub const R_LARCH_SOP_ADD: u32 = 35; -/// Stack top bitwise and (binary) -pub const R_LARCH_SOP_AND: u32 = 36; -/// Stack top selection (tertiary) -pub const R_LARCH_SOP_IF_ELSE: u32 = 37; -/// Pop stack top to fill 5-bit signed immediate operand -pub const R_LARCH_SOP_POP_32_S_10_5: u32 = 38; -/// Pop stack top to fill 12-bit unsigned immediate operand -pub const R_LARCH_SOP_POP_32_U_10_12: u32 = 39; -/// Pop stack top to fill 12-bit signed immediate operand -pub const R_LARCH_SOP_POP_32_S_10_12: u32 = 40; -/// Pop stack top to fill 16-bit signed immediate operand -pub const R_LARCH_SOP_POP_32_S_10_16: u32 = 41; -/// Pop stack top to fill 18-bit signed immediate operand with two trailing -/// zeros implied -pub const R_LARCH_SOP_POP_32_S_10_16_S2: u32 = 42; -/// Pop stack top to fill 20-bit signed immediate operand -pub const R_LARCH_SOP_POP_32_S_5_20: u32 = 43; -/// Pop stack top to fill 23-bit signed immediate operand with two trailing -/// zeros implied -pub const R_LARCH_SOP_POP_32_S_0_5_10_16_S2: u32 = 44; -/// Pop stack top to fill 28-bit signed immediate operand with two trailing -/// zeros implied -pub const R_LARCH_SOP_POP_32_S_0_10_10_16_S2: u32 = 45; -/// Pop stack top to fill an instruction -pub const R_LARCH_SOP_POP_32_U: u32 = 46; -/// 8-bit in-place addition -pub const R_LARCH_ADD8: u32 = 47; -/// 16-bit in-place addition -pub const R_LARCH_ADD16: u32 = 48; -/// 24-bit in-place addition -pub const R_LARCH_ADD24: u32 = 49; -/// 32-bit in-place addition -pub const R_LARCH_ADD32: u32 = 50; -/// 64-bit in-place addition -pub const R_LARCH_ADD64: u32 = 51; -/// 8-bit in-place subtraction -pub const R_LARCH_SUB8: u32 = 52; -/// 16-bit in-place subtraction -pub const R_LARCH_SUB16: u32 = 53; -/// 24-bit in-place subtraction -pub const R_LARCH_SUB24: u32 = 54; -/// 32-bit in-place subtraction -pub const R_LARCH_SUB32: u32 = 55; -/// 64-bit in-place subtraction -pub const R_LARCH_SUB64: u32 = 56; -/// GNU C++ vtable hierarchy -pub const R_LARCH_GNU_VTINHERIT: u32 = 57; -/// GNU C++ vtable member usage -pub const R_LARCH_GNU_VTENTRY: u32 = 58; -/// 18-bit PC-relative jump offset with two trailing zeros -pub const R_LARCH_B16: u32 = 64; -/// 23-bit PC-relative jump offset with two trailing zeros -pub const R_LARCH_B21: u32 = 65; -/// 28-bit PC-relative jump offset with two trailing zeros -pub const R_LARCH_B26: u32 = 66; -/// 12..=31 bits of 32/64-bit absolute address -pub const R_LARCH_ABS_HI20: u32 = 67; -/// 0..=11 bits of 32/64-bit absolute address -pub const R_LARCH_ABS_LO12: u32 = 68; -/// 32..=51 bits of 64-bit absolute address -pub const R_LARCH_ABS64_LO20: u32 = 69; -/// 52..=63 bits of 64-bit absolute address -pub const R_LARCH_ABS64_HI12: u32 = 70; -/// The signed 32-bit offset `offs` from `PC & 0xfffff000` to -/// `(S + A + 0x800) & 0xfffff000`, with 12 trailing zeros removed. -/// -/// We define the *PC relative anchor* for `S + A` as `PC + offs` (`offs` -/// is sign-extended to VA bits). -pub const R_LARCH_PCALA_HI20: u32 = 71; -/// Same as R_LARCH_ABS_LO12. 0..=11 bits of the 32/64-bit offset from the -/// [PC relative anchor][R_LARCH_PCALA_HI20]. -pub const R_LARCH_PCALA_LO12: u32 = 72; -/// 32..=51 bits of the 64-bit offset from the -/// [PC relative anchor][R_LARCH_PCALA_HI20]. -pub const R_LARCH_PCALA64_LO20: u32 = 73; -/// 52..=63 bits of the 64-bit offset from the -/// [PC relative anchor][R_LARCH_PCALA_HI20]. -pub const R_LARCH_PCALA64_HI12: u32 = 74; -/// The signed 32-bit offset `offs` from `PC & 0xfffff000` to -/// `(GP + G + 0x800) & 0xfffff000`, with 12 trailing zeros removed. -/// -/// We define the *PC relative anchor* for the GOT entry at `GP + G` as -/// `PC + offs` (`offs` is sign-extended to VA bits). -pub const R_LARCH_GOT_PC_HI20: u32 = 75; -/// 0..=11 bits of the 32/64-bit offset from the -/// [PC relative anchor][R_LARCH_GOT_PC_HI20] to the GOT entry. -pub const R_LARCH_GOT_PC_LO12: u32 = 76; -/// 32..=51 bits of the 64-bit offset from the -/// [PC relative anchor][R_LARCH_GOT_PC_HI20] to the GOT entry. -pub const R_LARCH_GOT64_PC_LO20: u32 = 77; -/// 52..=63 bits of the 64-bit offset from the -/// [PC relative anchor][R_LARCH_GOT_PC_HI20] to the GOT entry. -pub const R_LARCH_GOT64_PC_HI12: u32 = 78; -/// 12..=31 bits of 32/64-bit GOT entry absolute address -pub const R_LARCH_GOT_HI20: u32 = 79; -/// 0..=11 bits of 32/64-bit GOT entry absolute address -pub const R_LARCH_GOT_LO12: u32 = 80; -/// 32..=51 bits of 64-bit GOT entry absolute address -pub const R_LARCH_GOT64_LO20: u32 = 81; -/// 52..=63 bits of 64-bit GOT entry absolute address -pub const R_LARCH_GOT64_HI12: u32 = 82; -/// 12..=31 bits of TLS LE 32/64-bit offset from thread pointer -pub const R_LARCH_TLS_LE_HI20: u32 = 83; -/// 0..=11 bits of TLS LE 32/64-bit offset from thread pointer -pub const R_LARCH_TLS_LE_LO12: u32 = 84; -/// 32..=51 bits of TLS LE 64-bit offset from thread pointer -pub const R_LARCH_TLS_LE64_LO20: u32 = 85; -/// 52..=63 bits of TLS LE 64-bit offset from thread pointer -pub const R_LARCH_TLS_LE64_HI12: u32 = 86; -/// The signed 32-bit offset `offs` from `PC & 0xfffff000` to -/// `(GP + IE + 0x800) & 0xfffff000`, with 12 trailing zeros removed. -/// -/// We define the *PC relative anchor* for the TLS IE GOT entry at -/// `GP + IE` as `PC + offs` (`offs` is sign-extended to VA bits). -pub const R_LARCH_TLS_IE_PC_HI20: u32 = 87; -/// 0..=12 bits of the 32/64-bit offset from the -/// [PC-relative anchor][R_LARCH_TLS_IE_PC_HI20] to the TLS IE GOT entry. -pub const R_LARCH_TLS_IE_PC_LO12: u32 = 88; -/// 32..=51 bits of the 64-bit offset from the -/// [PC-relative anchor][R_LARCH_TLS_IE_PC_HI20] to the TLS IE GOT entry. -pub const R_LARCH_TLS_IE64_PC_LO20: u32 = 89; -/// 52..=63 bits of the 64-bit offset from the -/// [PC-relative anchor][R_LARCH_TLS_IE_PC_HI20] to the TLS IE GOT entry. -pub const R_LARCH_TLS_IE64_PC_HI12: u32 = 90; -/// 12..=31 bits of TLS IE GOT entry 32/64-bit absolute address -pub const R_LARCH_TLS_IE_HI20: u32 = 91; -/// 0..=11 bits of TLS IE GOT entry 32/64-bit absolute address -pub const R_LARCH_TLS_IE_LO12: u32 = 92; -/// 32..=51 bits of TLS IE GOT entry 64-bit absolute address -pub const R_LARCH_TLS_IE64_LO20: u32 = 93; -/// 51..=63 bits of TLS IE GOT entry 64-bit absolute address -pub const R_LARCH_TLS_IE64_HI12: u32 = 94; -/// 12..=31 bits of the offset from `PC` to `GP + GD + 0x800`, where -/// `GP + GD` is a TLS LD GOT entry -pub const R_LARCH_TLS_LD_PC_HI20: u32 = 95; -/// 12..=31 bits of TLS LD GOT entry 32/64-bit absolute address -pub const R_LARCH_TLS_LD_HI20: u32 = 96; -/// 12..=31 bits of the 32/64-bit PC-relative offset to the PC-relative -/// anchor for the TLE GD GOT entry. -pub const R_LARCH_TLS_GD_PC_HI20: u32 = 97; -/// 12..=31 bits of TLS GD GOT entry 32/64-bit absolute address -pub const R_LARCH_TLS_GD_HI20: u32 = 98; -/// 32-bit PC relative -pub const R_LARCH_32_PCREL: u32 = 99; -/// Paired with a normal relocation at the same address to indicate the -/// insturction can be relaxed -pub const R_LARCH_RELAX: u32 = 100; - -// Xtensa values Rel*::r_type`. -pub const R_XTENSA_NONE: u32 = 0; -pub const R_XTENSA_32: u32 = 1; -pub const R_XTENSA_RTLD: u32 = 2; -pub const R_XTENSA_GLOB_DAT: u32 = 3; -pub const R_XTENSA_JMP_SLOT: u32 = 4; -pub const R_XTENSA_RELATIVE: u32 = 5; -pub const R_XTENSA_PLT: u32 = 6; -pub const R_XTENSA_OP0: u32 = 8; -pub const R_XTENSA_OP1: u32 = 9; -pub const R_XTENSA_OP2: u32 = 10; -pub const R_XTENSA_ASM_EXPAND: u32 = 11; -pub const R_XTENSA_ASM_SIMPLIFY: u32 = 12; -pub const R_XTENSA_32_PCREL: u32 = 14; -pub const R_XTENSA_GNU_VTINHERIT: u32 = 15; -pub const R_XTENSA_GNU_VTENTRY: u32 = 16; -pub const R_XTENSA_DIFF8: u32 = 17; -pub const R_XTENSA_DIFF16: u32 = 18; -pub const R_XTENSA_DIFF32: u32 = 19; -pub const R_XTENSA_SLOT0_OP: u32 = 20; -pub const R_XTENSA_SLOT1_OP: u32 = 21; -pub const R_XTENSA_SLOT2_OP: u32 = 22; -pub const R_XTENSA_SLOT3_OP: u32 = 23; -pub const R_XTENSA_SLOT4_OP: u32 = 24; -pub const R_XTENSA_SLOT5_OP: u32 = 25; -pub const R_XTENSA_SLOT6_OP: u32 = 26; -pub const R_XTENSA_SLOT7_OP: u32 = 27; -pub const R_XTENSA_SLOT8_OP: u32 = 28; -pub const R_XTENSA_SLOT9_OP: u32 = 29; -pub const R_XTENSA_SLOT10_OP: u32 = 30; -pub const R_XTENSA_SLOT11_OP: u32 = 31; -pub const R_XTENSA_SLOT12_OP: u32 = 32; -pub const R_XTENSA_SLOT13_OP: u32 = 33; -pub const R_XTENSA_SLOT14_OP: u32 = 34; -pub const R_XTENSA_SLOT0_ALT: u32 = 35; -pub const R_XTENSA_SLOT1_ALT: u32 = 36; -pub const R_XTENSA_SLOT2_ALT: u32 = 37; -pub const R_XTENSA_SLOT3_ALT: u32 = 38; -pub const R_XTENSA_SLOT4_ALT: u32 = 39; -pub const R_XTENSA_SLOT5_ALT: u32 = 40; -pub const R_XTENSA_SLOT6_ALT: u32 = 41; -pub const R_XTENSA_SLOT7_ALT: u32 = 42; -pub const R_XTENSA_SLOT8_ALT: u32 = 43; -pub const R_XTENSA_SLOT9_ALT: u32 = 44; -pub const R_XTENSA_SLOT10_ALT: u32 = 45; -pub const R_XTENSA_SLOT11_ALT: u32 = 46; -pub const R_XTENSA_SLOT12_ALT: u32 = 47; -pub const R_XTENSA_SLOT13_ALT: u32 = 48; -pub const R_XTENSA_SLOT14_ALT: u32 = 49; -pub const R_XTENSA_TLSDESC_FN: u32 = 50; -pub const R_XTENSA_TLSDESC_ARG: u32 = 51; -pub const R_XTENSA_TLS_DTPOFF: u32 = 52; -pub const R_XTENSA_TLS_TPOFF: u32 = 53; -pub const R_XTENSA_TLS_FUNC: u32 = 54; -pub const R_XTENSA_TLS_ARG: u32 = 55; -pub const R_XTENSA_TLS_CALL: u32 = 56; -pub const R_XTENSA_PDIFF8: u32 = 57; -pub const R_XTENSA_PDIFF16: u32 = 58; -pub const R_XTENSA_PDIFF32: u32 = 59; -pub const R_XTENSA_NDIFF8: u32 = 60; -pub const R_XTENSA_NDIFF16: u32 = 61; -pub const R_XTENSA_NDIFF32: u32 = 62; - -#[allow(non_upper_case_globals)] -pub const Tag_File: u8 = 1; -#[allow(non_upper_case_globals)] -pub const Tag_Section: u8 = 2; -#[allow(non_upper_case_globals)] -pub const Tag_Symbol: u8 = 3; - -unsafe_impl_endian_pod!( - FileHeader32, - FileHeader64, - SectionHeader32, - SectionHeader64, - CompressionHeader32, - CompressionHeader64, - Sym32, - Sym64, - Syminfo32, - Syminfo64, - Rel32, - Rel64, - Rela32, - Rela64, - ProgramHeader32, - ProgramHeader64, - Dyn32, - Dyn64, - Versym, - Verdef, - Verdaux, - Verneed, - Vernaux, - NoteHeader32, - NoteHeader64, - HashHeader, - GnuHashHeader, -); diff -Nru s390-tools-2.31.0/rust-vendor/object/src/endian.rs s390-tools-2.33.1/rust-vendor/object/src/endian.rs --- s390-tools-2.31.0/rust-vendor/object/src/endian.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/endian.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,831 +0,0 @@ -//! Types for compile-time and run-time endianness. - -use crate::pod::Pod; -use core::fmt::{self, Debug}; -use core::marker::PhantomData; - -/// A trait for using an endianness specification. -/// -/// Provides methods for converting between the specified endianness and -/// the native endianness of the target machine. -/// -/// This trait does not require that the endianness is known at compile time. -pub trait Endian: Debug + Default + Clone + Copy + PartialEq + Eq + 'static { - /// Construct a specification for the endianness of some values. - /// - /// Returns `None` if the type does not support specifying the given endianness. - fn from_big_endian(big_endian: bool) -> Option; - - /// Construct a specification for the endianness of some values. - /// - /// Returns `None` if the type does not support specifying the given endianness. - fn from_little_endian(little_endian: bool) -> Option { - Self::from_big_endian(!little_endian) - } - - /// Return true for big endian byte order. - fn is_big_endian(self) -> bool; - - /// Return true for little endian byte order. - #[inline] - fn is_little_endian(self) -> bool { - !self.is_big_endian() - } - - /// Converts an unsigned 16 bit integer to native endian. - #[inline] - fn read_u16(self, n: u16) -> u16 { - if self.is_big_endian() { - u16::from_be(n) - } else { - u16::from_le(n) - } - } - - /// Converts an unsigned 32 bit integer to native endian. - #[inline] - fn read_u32(self, n: u32) -> u32 { - if self.is_big_endian() { - u32::from_be(n) - } else { - u32::from_le(n) - } - } - - /// Converts an unsigned 64 bit integer to native endian. - #[inline] - fn read_u64(self, n: u64) -> u64 { - if self.is_big_endian() { - u64::from_be(n) - } else { - u64::from_le(n) - } - } - - /// Converts a signed 16 bit integer to native endian. - #[inline] - fn read_i16(self, n: i16) -> i16 { - if self.is_big_endian() { - i16::from_be(n) - } else { - i16::from_le(n) - } - } - - /// Converts a signed 32 bit integer to native endian. - #[inline] - fn read_i32(self, n: i32) -> i32 { - if self.is_big_endian() { - i32::from_be(n) - } else { - i32::from_le(n) - } - } - - /// Converts a signed 64 bit integer to native endian. - #[inline] - fn read_i64(self, n: i64) -> i64 { - if self.is_big_endian() { - i64::from_be(n) - } else { - i64::from_le(n) - } - } - - /// Converts an unaligned unsigned 16 bit integer to native endian. - #[inline] - fn read_u16_bytes(self, n: [u8; 2]) -> u16 { - if self.is_big_endian() { - u16::from_be_bytes(n) - } else { - u16::from_le_bytes(n) - } - } - - /// Converts an unaligned unsigned 32 bit integer to native endian. - #[inline] - fn read_u32_bytes(self, n: [u8; 4]) -> u32 { - if self.is_big_endian() { - u32::from_be_bytes(n) - } else { - u32::from_le_bytes(n) - } - } - - /// Converts an unaligned unsigned 64 bit integer to native endian. - #[inline] - fn read_u64_bytes(self, n: [u8; 8]) -> u64 { - if self.is_big_endian() { - u64::from_be_bytes(n) - } else { - u64::from_le_bytes(n) - } - } - - /// Converts an unaligned signed 16 bit integer to native endian. - #[inline] - fn read_i16_bytes(self, n: [u8; 2]) -> i16 { - if self.is_big_endian() { - i16::from_be_bytes(n) - } else { - i16::from_le_bytes(n) - } - } - - /// Converts an unaligned signed 32 bit integer to native endian. - #[inline] - fn read_i32_bytes(self, n: [u8; 4]) -> i32 { - if self.is_big_endian() { - i32::from_be_bytes(n) - } else { - i32::from_le_bytes(n) - } - } - - /// Converts an unaligned signed 64 bit integer to native endian. - #[inline] - fn read_i64_bytes(self, n: [u8; 8]) -> i64 { - if self.is_big_endian() { - i64::from_be_bytes(n) - } else { - i64::from_le_bytes(n) - } - } - - /// Converts an unsigned 16 bit integer from native endian. - #[inline] - fn write_u16(self, n: u16) -> u16 { - if self.is_big_endian() { - u16::to_be(n) - } else { - u16::to_le(n) - } - } - - /// Converts an unsigned 32 bit integer from native endian. - #[inline] - fn write_u32(self, n: u32) -> u32 { - if self.is_big_endian() { - u32::to_be(n) - } else { - u32::to_le(n) - } - } - - /// Converts an unsigned 64 bit integer from native endian. - #[inline] - fn write_u64(self, n: u64) -> u64 { - if self.is_big_endian() { - u64::to_be(n) - } else { - u64::to_le(n) - } - } - - /// Converts a signed 16 bit integer from native endian. - #[inline] - fn write_i16(self, n: i16) -> i16 { - if self.is_big_endian() { - i16::to_be(n) - } else { - i16::to_le(n) - } - } - - /// Converts a signed 32 bit integer from native endian. - #[inline] - fn write_i32(self, n: i32) -> i32 { - if self.is_big_endian() { - i32::to_be(n) - } else { - i32::to_le(n) - } - } - - /// Converts a signed 64 bit integer from native endian. - #[inline] - fn write_i64(self, n: i64) -> i64 { - if self.is_big_endian() { - i64::to_be(n) - } else { - i64::to_le(n) - } - } - - /// Converts an unaligned unsigned 16 bit integer from native endian. - #[inline] - fn write_u16_bytes(self, n: u16) -> [u8; 2] { - if self.is_big_endian() { - u16::to_be_bytes(n) - } else { - u16::to_le_bytes(n) - } - } - - /// Converts an unaligned unsigned 32 bit integer from native endian. - #[inline] - fn write_u32_bytes(self, n: u32) -> [u8; 4] { - if self.is_big_endian() { - u32::to_be_bytes(n) - } else { - u32::to_le_bytes(n) - } - } - - /// Converts an unaligned unsigned 64 bit integer from native endian. - #[inline] - fn write_u64_bytes(self, n: u64) -> [u8; 8] { - if self.is_big_endian() { - u64::to_be_bytes(n) - } else { - u64::to_le_bytes(n) - } - } - - /// Converts an unaligned signed 16 bit integer from native endian. - #[inline] - fn write_i16_bytes(self, n: i16) -> [u8; 2] { - if self.is_big_endian() { - i16::to_be_bytes(n) - } else { - i16::to_le_bytes(n) - } - } - - /// Converts an unaligned signed 32 bit integer from native endian. - #[inline] - fn write_i32_bytes(self, n: i32) -> [u8; 4] { - if self.is_big_endian() { - i32::to_be_bytes(n) - } else { - i32::to_le_bytes(n) - } - } - - /// Converts an unaligned signed 64 bit integer from native endian. - #[inline] - fn write_i64_bytes(self, n: i64) -> [u8; 8] { - if self.is_big_endian() { - i64::to_be_bytes(n) - } else { - i64::to_le_bytes(n) - } - } -} - -/// An endianness that is selectable at run-time. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub enum Endianness { - /// Little endian byte order. - Little, - /// Big endian byte order. - Big, -} - -impl Default for Endianness { - #[cfg(target_endian = "little")] - #[inline] - fn default() -> Endianness { - Endianness::Little - } - - #[cfg(target_endian = "big")] - #[inline] - fn default() -> Endianness { - Endianness::Big - } -} - -impl Endian for Endianness { - #[inline] - fn from_big_endian(big_endian: bool) -> Option { - Some(if big_endian { - Endianness::Big - } else { - Endianness::Little - }) - } - - #[inline] - fn is_big_endian(self) -> bool { - self != Endianness::Little - } -} - -/// Compile-time little endian byte order. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct LittleEndian; - -impl Default for LittleEndian { - #[inline] - fn default() -> LittleEndian { - LittleEndian - } -} - -impl Endian for LittleEndian { - #[inline] - fn from_big_endian(big_endian: bool) -> Option { - if big_endian { - None - } else { - Some(LittleEndian) - } - } - - #[inline] - fn is_big_endian(self) -> bool { - false - } -} - -/// Compile-time big endian byte order. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct BigEndian; - -impl Default for BigEndian { - #[inline] - fn default() -> BigEndian { - BigEndian - } -} - -impl Endian for BigEndian { - #[inline] - fn from_big_endian(big_endian: bool) -> Option { - if big_endian { - Some(BigEndian) - } else { - None - } - } - - #[inline] - fn is_big_endian(self) -> bool { - true - } -} - -/// The native endianness for the target platform. -#[cfg(target_endian = "little")] -pub type NativeEndian = LittleEndian; - -#[cfg(target_endian = "little")] -#[allow(non_upper_case_globals)] -#[doc(hidden)] -pub const NativeEndian: LittleEndian = LittleEndian; - -/// The native endianness for the target platform. -#[cfg(target_endian = "big")] -pub type NativeEndian = BigEndian; - -#[cfg(target_endian = "big")] -#[allow(non_upper_case_globals)] -#[doc(hidden)] -pub const NativeEndian: BigEndian = BigEndian; - -macro_rules! unsafe_impl_endian_pod { - ($($struct_name:ident),+ $(,)?) => { - $( - unsafe impl Pod for $struct_name { } - )+ - } -} - -#[cfg(not(feature = "unaligned"))] -mod aligned { - use super::{fmt, Endian, PhantomData, Pod}; - - /// A `u16` value with an externally specified endianness of type `E`. - #[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] - #[repr(transparent)] - pub struct U16(u16, PhantomData); - - impl U16 { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 2]) -> Self { - Self(u16::from_ne_bytes(n), PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: u16) -> Self { - Self(e.write_u16(n), PhantomData) - } - - /// Return the value as a native endian value. - pub fn get(self, e: E) -> u16 { - e.read_u16(self.0) - } - - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: u16) { - self.0 = e.write_u16(n); - } - } - - /// A `u32` value with an externally specified endianness of type `E`. - #[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] - #[repr(transparent)] - pub struct U32(u32, PhantomData); - - impl U32 { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 4]) -> Self { - Self(u32::from_ne_bytes(n), PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: u32) -> Self { - Self(e.write_u32(n), PhantomData) - } - /// Return the value as a native endian value. - pub fn get(self, e: E) -> u32 { - e.read_u32(self.0) - } - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: u32) { - self.0 = e.write_u32(n); - } - } - - /// A `u64` value with an externally specified endianness of type `E`. - #[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] - #[repr(transparent)] - pub struct U64(u64, PhantomData); - - impl U64 { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 8]) -> Self { - Self(u64::from_ne_bytes(n), PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: u64) -> Self { - Self(e.write_u64(n), PhantomData) - } - /// Return the value as a native endian value. - pub fn get(self, e: E) -> u64 { - e.read_u64(self.0) - } - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: u64) { - self.0 = e.write_u64(n); - } - } - - /// An `i16` value with an externally specified endianness of type `E`. - #[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] - #[repr(transparent)] - pub struct I16(i16, PhantomData); - - impl I16 { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 2]) -> Self { - Self(i16::from_ne_bytes(n), PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: i16) -> Self { - Self(e.write_i16(n), PhantomData) - } - /// Return the value as a native endian value. - pub fn get(self, e: E) -> i16 { - e.read_i16(self.0) - } - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: i16) { - self.0 = e.write_i16(n); - } - } - - /// An `i32` value with an externally specified endianness of type `E`. - #[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] - #[repr(transparent)] - pub struct I32(i32, PhantomData); - - impl I32 { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 4]) -> Self { - Self(i32::from_ne_bytes(n), PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: i32) -> Self { - Self(e.write_i32(n), PhantomData) - } - /// Return the value as a native endian value. - pub fn get(self, e: E) -> i32 { - e.read_i32(self.0) - } - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: i32) { - self.0 = e.write_i32(n); - } - } - - /// An `i64` value with an externally specified endianness of type `E`. - #[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] - #[repr(transparent)] - pub struct I64(i64, PhantomData); - - impl I64 { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 8]) -> Self { - Self(i64::from_ne_bytes(n), PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: i64) -> Self { - Self(e.write_i64(n), PhantomData) - } - /// Return the value as a native endian value. - pub fn get(self, e: E) -> i64 { - e.read_i64(self.0) - } - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: i64) { - self.0 = e.write_i64(n); - } - } - - impl fmt::Debug for U16 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "U16({:x})", self.0) - } - } - - impl fmt::Debug for U32 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "U32({:x})", self.0) - } - } - - impl fmt::Debug for U64 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "U64({:x})", self.0) - } - } - - impl fmt::Debug for I16 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "I16({:x})", self.0) - } - } - - impl fmt::Debug for I32 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "I32({:x})", self.0) - } - } - - impl fmt::Debug for I64 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "I64({:x})", self.0) - } - } - - unsafe_impl_endian_pod!(U16, U32, U64, I16, I32, I64); -} - -#[cfg(not(feature = "unaligned"))] -pub use aligned::*; - -/// A `u16` value with an externally specified endianness of type `E`. -#[cfg(feature = "unaligned")] -pub type U16 = U16Bytes; - -/// A `u32` value with an externally specified endianness of type `E`. -#[cfg(feature = "unaligned")] -pub type U32 = U32Bytes; - -/// A `u64` value with an externally specified endianness of type `E`. -#[cfg(feature = "unaligned")] -pub type U64 = U64Bytes; - -/// An `i16` value with an externally specified endianness of type `E`. -#[cfg(feature = "unaligned")] -pub type I16 = I16Bytes; - -/// An `i32` value with an externally specified endianness of type `E`. -#[cfg(feature = "unaligned")] -pub type I32 = I32Bytes; - -/// An `i64` value with an externally specified endianness of type `E`. -#[cfg(feature = "unaligned")] -pub type I64 = I64Bytes; - -/// An unaligned `u16` value with an externally specified endianness of type `E`. -#[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[repr(transparent)] -pub struct U16Bytes([u8; 2], PhantomData); - -impl U16Bytes { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 2]) -> Self { - Self(n, PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: u16) -> Self { - Self(e.write_u16_bytes(n), PhantomData) - } - - /// Return the value as a native endian value. - pub fn get(self, e: E) -> u16 { - e.read_u16_bytes(self.0) - } - - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: u16) { - self.0 = e.write_u16_bytes(n); - } -} - -/// An unaligned `u32` value with an externally specified endianness of type `E`. -#[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[repr(transparent)] -pub struct U32Bytes([u8; 4], PhantomData); - -impl U32Bytes { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 4]) -> Self { - Self(n, PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: u32) -> Self { - Self(e.write_u32_bytes(n), PhantomData) - } - - /// Return the value as a native endian value. - pub fn get(self, e: E) -> u32 { - e.read_u32_bytes(self.0) - } - - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: u32) { - self.0 = e.write_u32_bytes(n); - } -} - -/// An unaligned `u64` value with an externally specified endianness of type `E`. -#[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[repr(transparent)] -pub struct U64Bytes([u8; 8], PhantomData); - -impl U64Bytes { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 8]) -> Self { - Self(n, PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: u64) -> Self { - Self(e.write_u64_bytes(n), PhantomData) - } - - /// Return the value as a native endian value. - pub fn get(self, e: E) -> u64 { - e.read_u64_bytes(self.0) - } - - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: u64) { - self.0 = e.write_u64_bytes(n); - } -} - -/// An unaligned `i16` value with an externally specified endianness of type `E`. -#[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[repr(transparent)] -pub struct I16Bytes([u8; 2], PhantomData); - -impl I16Bytes { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 2]) -> Self { - Self(n, PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: i16) -> Self { - Self(e.write_i16_bytes(n), PhantomData) - } - - /// Return the value as a native endian value. - pub fn get(self, e: E) -> i16 { - e.read_i16_bytes(self.0) - } - - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: i16) { - self.0 = e.write_i16_bytes(n); - } -} - -/// An unaligned `i32` value with an externally specified endianness of type `E`. -#[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[repr(transparent)] -pub struct I32Bytes([u8; 4], PhantomData); - -impl I32Bytes { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 4]) -> Self { - Self(n, PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: i32) -> Self { - Self(e.write_i32_bytes(n), PhantomData) - } - - /// Return the value as a native endian value. - pub fn get(self, e: E) -> i32 { - e.read_i32_bytes(self.0) - } - - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: i32) { - self.0 = e.write_i32_bytes(n); - } -} - -/// An unaligned `i64` value with an externally specified endianness of type `E`. -#[derive(Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[repr(transparent)] -pub struct I64Bytes([u8; 8], PhantomData); - -impl I64Bytes { - /// Construct a new value given bytes that already have the required endianness. - pub fn from_bytes(n: [u8; 8]) -> Self { - Self(n, PhantomData) - } - - /// Construct a new value given a native endian value. - pub fn new(e: E, n: i64) -> Self { - Self(e.write_i64_bytes(n), PhantomData) - } - - /// Return the value as a native endian value. - pub fn get(self, e: E) -> i64 { - e.read_i64_bytes(self.0) - } - - /// Set the value given a native endian value. - pub fn set(&mut self, e: E, n: i64) { - self.0 = e.write_i64_bytes(n); - } -} - -impl fmt::Debug for U16Bytes { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "U16({:x}, {:x})", self.0[0], self.0[1],) - } -} - -impl fmt::Debug for U32Bytes { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "U32({:x}, {:x}, {:x}, {:x})", - self.0[0], self.0[1], self.0[2], self.0[3], - ) - } -} - -impl fmt::Debug for U64Bytes { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "U64({:x}, {:x}, {:x}, {:x}, {:x}, {:x}, {:x}, {:x})", - self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], self.0[6], self.0[7], - ) - } -} - -impl fmt::Debug for I16Bytes { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "I16({:x}, {:x})", self.0[0], self.0[1],) - } -} - -impl fmt::Debug for I32Bytes { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "I32({:x}, {:x}, {:x}, {:x})", - self.0[0], self.0[1], self.0[2], self.0[3], - ) - } -} - -impl fmt::Debug for I64Bytes { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "I64({:x}, {:x}, {:x}, {:x}, {:x}, {:x}, {:x}, {:x})", - self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], self.0[6], self.0[7], - ) - } -} - -unsafe_impl_endian_pod!(U16Bytes, U32Bytes, U64Bytes, I16Bytes, I32Bytes, I64Bytes); diff -Nru s390-tools-2.31.0/rust-vendor/object/src/lib.rs s390-tools-2.33.1/rust-vendor/object/src/lib.rs --- s390-tools-2.31.0/rust-vendor/object/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,116 +0,0 @@ -//! # `object` -//! -//! The `object` crate provides a unified interface to working with object files -//! across platforms. It supports reading relocatable object files and executable files, -//! and writing relocatable object files and some executable files. -//! -//! ## Raw struct definitions -//! -//! Raw structs are defined for: [ELF](elf), [Mach-O](macho), [PE/COFF](pe), -//! [XCOFF](xcoff), [archive]. -//! Types and traits for zerocopy support are defined in [pod] and [endian]. -//! -//! ## Unified read API -//! -//! The [read::Object] trait defines the unified interface. This trait is implemented -//! by [read::File], which allows reading any file format, as well as implementations -//! for each file format: [ELF](read::elf::ElfFile), [Mach-O](read::macho::MachOFile), -//! [COFF](read::coff::CoffFile), [PE](read::pe::PeFile), [Wasm](read::wasm::WasmFile), -//! [XCOFF](read::xcoff::XcoffFile). -//! -//! ## Low level read API -//! -//! In addition to the unified read API, the various `read` modules define helpers that -//! operate on the raw structs. These also provide traits that abstract over the differences -//! between 32-bit and 64-bit versions of the file format. -//! -//! ## Unified write API -//! -//! [write::Object] allows building a COFF/ELF/Mach-O/XCOFF relocatable object file and -//! then writing it out. -//! -//! ## Low level executable writers -//! -//! [write::elf::Writer] and [write::pe::Writer] allow writing executable files. -//! -//! ## Example for unified read API -//! ```no_run -//! # #[cfg(feature = "read")] -//! use object::{Object, ObjectSection}; -//! use std::error::Error; -//! use std::fs; -//! -//! /// Reads a file and displays the content of the ".boot" section. -//! fn main() -> Result<(), Box> { -//! # #[cfg(all(feature = "read", feature = "std"))] { -//! let bin_data = fs::read("./multiboot2-binary.elf")?; -//! let obj_file = object::File::parse(&*bin_data)?; -//! if let Some(section) = obj_file.section_by_name(".boot") { -//! println!("{:#x?}", section.data()?); -//! } else { -//! eprintln!("section not available"); -//! } -//! # } -//! Ok(()) -//! } -//! ``` - -#![deny(missing_docs)] -#![deny(missing_debug_implementations)] -#![no_std] -#![warn(rust_2018_idioms)] -// Style. -#![allow(clippy::collapsible_if)] -#![allow(clippy::comparison_chain)] -#![allow(clippy::match_like_matches_macro)] -#![allow(clippy::single_match)] -#![allow(clippy::type_complexity)] -// Occurs due to fallible iteration. -#![allow(clippy::should_implement_trait)] -// Unit errors are converted to other types by callers. -#![allow(clippy::result_unit_err)] -// Worse readability sometimes. -#![allow(clippy::collapsible_else_if)] - -#[cfg(feature = "cargo-all")] -compile_error!("'--all-features' is not supported; use '--features all' instead"); - -#[cfg(any(feature = "read_core", feature = "write_core"))] -#[allow(unused_imports)] -#[macro_use] -extern crate alloc; - -#[cfg(feature = "std")] -#[allow(unused_imports)] -#[macro_use] -extern crate std; - -mod common; -pub use common::*; - -#[macro_use] -pub mod endian; -pub use endian::*; - -#[macro_use] -pub mod pod; -pub use pod::*; - -#[cfg(feature = "read_core")] -pub mod read; -#[cfg(feature = "read_core")] -pub use read::*; - -#[cfg(feature = "write_core")] -pub mod write; - -#[cfg(feature = "archive")] -pub mod archive; -#[cfg(feature = "elf")] -pub mod elf; -#[cfg(feature = "macho")] -pub mod macho; -#[cfg(any(feature = "coff", feature = "pe"))] -pub mod pe; -#[cfg(feature = "xcoff")] -pub mod xcoff; diff -Nru s390-tools-2.31.0/rust-vendor/object/src/macho.rs s390-tools-2.33.1/rust-vendor/object/src/macho.rs --- s390-tools-2.31.0/rust-vendor/object/src/macho.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/macho.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,3307 +0,0 @@ -//! Mach-O definitions. -//! -//! These definitions are independent of read/write support, although we do implement -//! some traits useful for those. -//! -//! This module is based heavily on header files from MacOSX11.1.sdk. - -#![allow(missing_docs)] - -use crate::endian::{BigEndian, Endian, U64Bytes, U16, U32, U64}; -use crate::pod::Pod; - -// Definitions from "/usr/include/mach/machine.h". - -/* - * Capability bits used in the definition of cpu_type. - */ - -/// mask for architecture bits -pub const CPU_ARCH_MASK: u32 = 0xff00_0000; -/// 64 bit ABI -pub const CPU_ARCH_ABI64: u32 = 0x0100_0000; -/// ABI for 64-bit hardware with 32-bit types; LP32 -pub const CPU_ARCH_ABI64_32: u32 = 0x0200_0000; - -/* - * Machine types known by all. - */ - -pub const CPU_TYPE_ANY: u32 = !0; - -pub const CPU_TYPE_VAX: u32 = 1; -pub const CPU_TYPE_MC680X0: u32 = 6; -pub const CPU_TYPE_X86: u32 = 7; -pub const CPU_TYPE_X86_64: u32 = CPU_TYPE_X86 | CPU_ARCH_ABI64; -pub const CPU_TYPE_MIPS: u32 = 8; -pub const CPU_TYPE_MC98000: u32 = 10; -pub const CPU_TYPE_HPPA: u32 = 11; -pub const CPU_TYPE_ARM: u32 = 12; -pub const CPU_TYPE_ARM64: u32 = CPU_TYPE_ARM | CPU_ARCH_ABI64; -pub const CPU_TYPE_ARM64_32: u32 = CPU_TYPE_ARM | CPU_ARCH_ABI64_32; -pub const CPU_TYPE_MC88000: u32 = 13; -pub const CPU_TYPE_SPARC: u32 = 14; -pub const CPU_TYPE_I860: u32 = 15; -pub const CPU_TYPE_ALPHA: u32 = 16; -pub const CPU_TYPE_POWERPC: u32 = 18; -pub const CPU_TYPE_POWERPC64: u32 = CPU_TYPE_POWERPC | CPU_ARCH_ABI64; - -/* - * Capability bits used in the definition of cpu_subtype. - */ -/// mask for feature flags -pub const CPU_SUBTYPE_MASK: u32 = 0xff00_0000; -/// 64 bit libraries -pub const CPU_SUBTYPE_LIB64: u32 = 0x8000_0000; -/// pointer authentication with versioned ABI -pub const CPU_SUBTYPE_PTRAUTH_ABI: u32 = 0x8000_0000; - -/// When selecting a slice, ANY will pick the slice with the best -/// grading for the selected cpu_type_t, unlike the "ALL" subtypes, -/// which are the slices that can run on any hardware for that cpu type. -pub const CPU_SUBTYPE_ANY: u32 = !0; - -/* - * Object files that are hand-crafted to run on any - * implementation of an architecture are tagged with - * CPU_SUBTYPE_MULTIPLE. This functions essentially the same as - * the "ALL" subtype of an architecture except that it allows us - * to easily find object files that may need to be modified - * whenever a new implementation of an architecture comes out. - * - * It is the responsibility of the implementor to make sure the - * software handles unsupported implementations elegantly. - */ -pub const CPU_SUBTYPE_MULTIPLE: u32 = !0; -pub const CPU_SUBTYPE_LITTLE_ENDIAN: u32 = 0; -pub const CPU_SUBTYPE_BIG_ENDIAN: u32 = 1; - -/* - * VAX subtypes (these do *not* necessary conform to the actual cpu - * ID assigned by DEC available via the SID register). - */ - -pub const CPU_SUBTYPE_VAX_ALL: u32 = 0; -pub const CPU_SUBTYPE_VAX780: u32 = 1; -pub const CPU_SUBTYPE_VAX785: u32 = 2; -pub const CPU_SUBTYPE_VAX750: u32 = 3; -pub const CPU_SUBTYPE_VAX730: u32 = 4; -pub const CPU_SUBTYPE_UVAXI: u32 = 5; -pub const CPU_SUBTYPE_UVAXII: u32 = 6; -pub const CPU_SUBTYPE_VAX8200: u32 = 7; -pub const CPU_SUBTYPE_VAX8500: u32 = 8; -pub const CPU_SUBTYPE_VAX8600: u32 = 9; -pub const CPU_SUBTYPE_VAX8650: u32 = 10; -pub const CPU_SUBTYPE_VAX8800: u32 = 11; -pub const CPU_SUBTYPE_UVAXIII: u32 = 12; - -/* - * 680x0 subtypes - * - * The subtype definitions here are unusual for historical reasons. - * NeXT used to consider 68030 code as generic 68000 code. For - * backwards compatibility: - * - * CPU_SUBTYPE_MC68030 symbol has been preserved for source code - * compatibility. - * - * CPU_SUBTYPE_MC680x0_ALL has been defined to be the same - * subtype as CPU_SUBTYPE_MC68030 for binary comatability. - * - * CPU_SUBTYPE_MC68030_ONLY has been added to allow new object - * files to be tagged as containing 68030-specific instructions. - */ - -pub const CPU_SUBTYPE_MC680X0_ALL: u32 = 1; -// compat -pub const CPU_SUBTYPE_MC68030: u32 = 1; -pub const CPU_SUBTYPE_MC68040: u32 = 2; -pub const CPU_SUBTYPE_MC68030_ONLY: u32 = 3; - -/* - * I386 subtypes - */ - -#[inline] -pub const fn cpu_subtype_intel(f: u32, m: u32) -> u32 { - f + (m << 4) -} - -pub const CPU_SUBTYPE_I386_ALL: u32 = cpu_subtype_intel(3, 0); -pub const CPU_SUBTYPE_386: u32 = cpu_subtype_intel(3, 0); -pub const CPU_SUBTYPE_486: u32 = cpu_subtype_intel(4, 0); -pub const CPU_SUBTYPE_486SX: u32 = cpu_subtype_intel(4, 8); -pub const CPU_SUBTYPE_586: u32 = cpu_subtype_intel(5, 0); -pub const CPU_SUBTYPE_PENT: u32 = cpu_subtype_intel(5, 0); -pub const CPU_SUBTYPE_PENTPRO: u32 = cpu_subtype_intel(6, 1); -pub const CPU_SUBTYPE_PENTII_M3: u32 = cpu_subtype_intel(6, 3); -pub const CPU_SUBTYPE_PENTII_M5: u32 = cpu_subtype_intel(6, 5); -pub const CPU_SUBTYPE_CELERON: u32 = cpu_subtype_intel(7, 6); -pub const CPU_SUBTYPE_CELERON_MOBILE: u32 = cpu_subtype_intel(7, 7); -pub const CPU_SUBTYPE_PENTIUM_3: u32 = cpu_subtype_intel(8, 0); -pub const CPU_SUBTYPE_PENTIUM_3_M: u32 = cpu_subtype_intel(8, 1); -pub const CPU_SUBTYPE_PENTIUM_3_XEON: u32 = cpu_subtype_intel(8, 2); -pub const CPU_SUBTYPE_PENTIUM_M: u32 = cpu_subtype_intel(9, 0); -pub const CPU_SUBTYPE_PENTIUM_4: u32 = cpu_subtype_intel(10, 0); -pub const CPU_SUBTYPE_PENTIUM_4_M: u32 = cpu_subtype_intel(10, 1); -pub const CPU_SUBTYPE_ITANIUM: u32 = cpu_subtype_intel(11, 0); -pub const CPU_SUBTYPE_ITANIUM_2: u32 = cpu_subtype_intel(11, 1); -pub const CPU_SUBTYPE_XEON: u32 = cpu_subtype_intel(12, 0); -pub const CPU_SUBTYPE_XEON_MP: u32 = cpu_subtype_intel(12, 1); - -#[inline] -pub const fn cpu_subtype_intel_family(x: u32) -> u32 { - x & 15 -} -pub const CPU_SUBTYPE_INTEL_FAMILY_MAX: u32 = 15; - -#[inline] -pub const fn cpu_subtype_intel_model(x: u32) -> u32 { - x >> 4 -} -pub const CPU_SUBTYPE_INTEL_MODEL_ALL: u32 = 0; - -/* - * X86 subtypes. - */ - -pub const CPU_SUBTYPE_X86_ALL: u32 = 3; -pub const CPU_SUBTYPE_X86_64_ALL: u32 = 3; -pub const CPU_SUBTYPE_X86_ARCH1: u32 = 4; -/// Haswell feature subset -pub const CPU_SUBTYPE_X86_64_H: u32 = 8; - -/* - * Mips subtypes. - */ - -pub const CPU_SUBTYPE_MIPS_ALL: u32 = 0; -pub const CPU_SUBTYPE_MIPS_R2300: u32 = 1; -pub const CPU_SUBTYPE_MIPS_R2600: u32 = 2; -pub const CPU_SUBTYPE_MIPS_R2800: u32 = 3; -/// pmax -pub const CPU_SUBTYPE_MIPS_R2000A: u32 = 4; -pub const CPU_SUBTYPE_MIPS_R2000: u32 = 5; -/// 3max -pub const CPU_SUBTYPE_MIPS_R3000A: u32 = 6; -pub const CPU_SUBTYPE_MIPS_R3000: u32 = 7; - -/* - * MC98000 (PowerPC) subtypes - */ -pub const CPU_SUBTYPE_MC98000_ALL: u32 = 0; -pub const CPU_SUBTYPE_MC98601: u32 = 1; - -/* - * HPPA subtypes for Hewlett-Packard HP-PA family of - * risc processors. Port by NeXT to 700 series. - */ - -pub const CPU_SUBTYPE_HPPA_ALL: u32 = 0; -pub const CPU_SUBTYPE_HPPA_7100LC: u32 = 1; - -/* - * MC88000 subtypes. - */ -pub const CPU_SUBTYPE_MC88000_ALL: u32 = 0; -pub const CPU_SUBTYPE_MC88100: u32 = 1; -pub const CPU_SUBTYPE_MC88110: u32 = 2; - -/* - * SPARC subtypes - */ -pub const CPU_SUBTYPE_SPARC_ALL: u32 = 0; - -/* - * I860 subtypes - */ -pub const CPU_SUBTYPE_I860_ALL: u32 = 0; -pub const CPU_SUBTYPE_I860_860: u32 = 1; - -/* - * PowerPC subtypes - */ -pub const CPU_SUBTYPE_POWERPC_ALL: u32 = 0; -pub const CPU_SUBTYPE_POWERPC_601: u32 = 1; -pub const CPU_SUBTYPE_POWERPC_602: u32 = 2; -pub const CPU_SUBTYPE_POWERPC_603: u32 = 3; -pub const CPU_SUBTYPE_POWERPC_603E: u32 = 4; -pub const CPU_SUBTYPE_POWERPC_603EV: u32 = 5; -pub const CPU_SUBTYPE_POWERPC_604: u32 = 6; -pub const CPU_SUBTYPE_POWERPC_604E: u32 = 7; -pub const CPU_SUBTYPE_POWERPC_620: u32 = 8; -pub const CPU_SUBTYPE_POWERPC_750: u32 = 9; -pub const CPU_SUBTYPE_POWERPC_7400: u32 = 10; -pub const CPU_SUBTYPE_POWERPC_7450: u32 = 11; -pub const CPU_SUBTYPE_POWERPC_970: u32 = 100; - -/* - * ARM subtypes - */ -pub const CPU_SUBTYPE_ARM_ALL: u32 = 0; -pub const CPU_SUBTYPE_ARM_V4T: u32 = 5; -pub const CPU_SUBTYPE_ARM_V6: u32 = 6; -pub const CPU_SUBTYPE_ARM_V5TEJ: u32 = 7; -pub const CPU_SUBTYPE_ARM_XSCALE: u32 = 8; -/// ARMv7-A and ARMv7-R -pub const CPU_SUBTYPE_ARM_V7: u32 = 9; -/// Cortex A9 -pub const CPU_SUBTYPE_ARM_V7F: u32 = 10; -/// Swift -pub const CPU_SUBTYPE_ARM_V7S: u32 = 11; -pub const CPU_SUBTYPE_ARM_V7K: u32 = 12; -pub const CPU_SUBTYPE_ARM_V8: u32 = 13; -/// Not meant to be run under xnu -pub const CPU_SUBTYPE_ARM_V6M: u32 = 14; -/// Not meant to be run under xnu -pub const CPU_SUBTYPE_ARM_V7M: u32 = 15; -/// Not meant to be run under xnu -pub const CPU_SUBTYPE_ARM_V7EM: u32 = 16; -/// Not meant to be run under xnu -pub const CPU_SUBTYPE_ARM_V8M: u32 = 17; - -/* - * ARM64 subtypes - */ -pub const CPU_SUBTYPE_ARM64_ALL: u32 = 0; -pub const CPU_SUBTYPE_ARM64_V8: u32 = 1; -pub const CPU_SUBTYPE_ARM64E: u32 = 2; - -/* - * ARM64_32 subtypes - */ -pub const CPU_SUBTYPE_ARM64_32_ALL: u32 = 0; -pub const CPU_SUBTYPE_ARM64_32_V8: u32 = 1; - -// Definitions from "/usr/include/mach/vm_prot.h". - -/// read permission -pub const VM_PROT_READ: u32 = 0x01; -/// write permission -pub const VM_PROT_WRITE: u32 = 0x02; -/// execute permission -pub const VM_PROT_EXECUTE: u32 = 0x04; - -// Definitions from https://opensource.apple.com/source/dyld/dyld-210.2.3/launch-cache/dyld_cache_format.h.auto.html - -/// The dyld cache header. -/// Corresponds to struct dyld_cache_header from dyld_cache_format.h. -/// This header has grown over time. Only the fields up to and including dyld_base_address -/// are guaranteed to be present. For all other fields, check the header size before -/// accessing the field. The header size is stored in mapping_offset; the mappings start -/// right after the theader. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DyldCacheHeader { - /// e.g. "dyld_v0 i386" - pub magic: [u8; 16], - /// file offset to first dyld_cache_mapping_info - pub mapping_offset: U32, // offset: 0x10 - /// number of dyld_cache_mapping_info entries - pub mapping_count: U32, // offset: 0x14 - /// file offset to first dyld_cache_image_info - pub images_offset: U32, // offset: 0x18 - /// number of dyld_cache_image_info entries - pub images_count: U32, // offset: 0x1c - /// base address of dyld when cache was built - pub dyld_base_address: U64, // offset: 0x20 - /// - reserved1: [u8; 32], // offset: 0x28 - /// file offset of where local symbols are stored - pub local_symbols_offset: U64, // offset: 0x48 - /// size of local symbols information - pub local_symbols_size: U64, // offset: 0x50 - /// unique value for each shared cache file - pub uuid: [u8; 16], // offset: 0x58 - /// - reserved2: [u8; 32], // offset: 0x68 - /// - reserved3: [u8; 32], // offset: 0x88 - /// - reserved4: [u8; 32], // offset: 0xa8 - /// - reserved5: [u8; 32], // offset: 0xc8 - /// - reserved6: [u8; 32], // offset: 0xe8 - /// - reserved7: [u8; 32], // offset: 0x108 - /// - reserved8: [u8; 32], // offset: 0x128 - /// - reserved9: [u8; 32], // offset: 0x148 - /// - reserved10: [u8; 32], // offset: 0x168 - /// file offset to first dyld_subcache_info - pub subcaches_offset: U32, // offset: 0x188 - /// number of dyld_subcache_info entries - pub subcaches_count: U32, // offset: 0x18c - /// the UUID of the .symbols subcache - pub symbols_subcache_uuid: [u8; 16], // offset: 0x190 - /// - reserved11: [u8; 32], // offset: 0x1a0 - /// file offset to first dyld_cache_image_info - /// Use this instead of images_offset if mapping_offset is at least 0x1c4. - pub images_across_all_subcaches_offset: U32, // offset: 0x1c0 - /// number of dyld_cache_image_info entries - /// Use this instead of images_count if mapping_offset is at least 0x1c4. - pub images_across_all_subcaches_count: U32, // offset: 0x1c4 -} - -/// Corresponds to struct dyld_cache_mapping_info from dyld_cache_format.h. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DyldCacheMappingInfo { - /// - pub address: U64, - /// - pub size: U64, - /// - pub file_offset: U64, - /// - pub max_prot: U32, - /// - pub init_prot: U32, -} - -/// Corresponds to struct dyld_cache_image_info from dyld_cache_format.h. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DyldCacheImageInfo { - /// - pub address: U64, - /// - pub mod_time: U64, - /// - pub inode: U64, - /// - pub path_file_offset: U32, - /// - pub pad: U32, -} - -/// Corresponds to a struct whose source code has not been published as of Nov 2021. -/// Added in the dyld cache version which shipped with macOS 12 / iOS 15. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DyldSubCacheInfo { - /// The UUID of this subcache. - pub uuid: [u8; 16], - /// The size of this subcache plus all previous subcaches. - pub cumulative_size: U64, -} - -// Definitions from "/usr/include/mach-o/loader.h". - -/* - * This header file describes the structures of the file format for "fat" - * architecture specific file (wrapper design). At the beginning of the file - * there is one `FatHeader` structure followed by a number of `FatArch*` - * structures. For each architecture in the file, specified by a pair of - * cputype and cpusubtype, the `FatHeader` describes the file offset, file - * size and alignment in the file of the architecture specific member. - * The padded bytes in the file to place each member on it's specific alignment - * are defined to be read as zeros and can be left as "holes" if the file system - * can support them as long as they read as zeros. - * - * All structures defined here are always written and read to/from disk - * in big-endian order. - */ - -pub const FAT_MAGIC: u32 = 0xcafe_babe; -/// NXSwapLong(FAT_MAGIC) -pub const FAT_CIGAM: u32 = 0xbeba_feca; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FatHeader { - /// FAT_MAGIC or FAT_MAGIC_64 - pub magic: U32, - /// number of structs that follow - pub nfat_arch: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FatArch32 { - /// cpu specifier (int) - pub cputype: U32, - /// machine specifier (int) - pub cpusubtype: U32, - /// file offset to this object file - pub offset: U32, - /// size of this object file - pub size: U32, - /// alignment as a power of 2 - pub align: U32, -} - -/* - * The support for the 64-bit fat file format described here is a work in - * progress and not yet fully supported in all the Apple Developer Tools. - * - * When a slice is greater than 4mb or an offset to a slice is greater than 4mb - * then the 64-bit fat file format is used. - */ -pub const FAT_MAGIC_64: u32 = 0xcafe_babf; -/// NXSwapLong(FAT_MAGIC_64) -pub const FAT_CIGAM_64: u32 = 0xbfba_feca; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FatArch64 { - /// cpu specifier (int) - pub cputype: U32, - /// machine specifier (int) - pub cpusubtype: U32, - /// file offset to this object file - pub offset: U64, - /// size of this object file - pub size: U64, - /// alignment as a power of 2 - pub align: U32, - /// reserved - pub reserved: U32, -} - -// Definitions from "/usr/include/mach-o/loader.h". - -/// The 32-bit mach header. -/// -/// Appears at the very beginning of the object file for 32-bit architectures. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct MachHeader32 { - /// mach magic number identifier - pub magic: U32, - /// cpu specifier - pub cputype: U32, - /// machine specifier - pub cpusubtype: U32, - /// type of file - pub filetype: U32, - /// number of load commands - pub ncmds: U32, - /// the size of all the load commands - pub sizeofcmds: U32, - /// flags - pub flags: U32, -} - -// Values for `MachHeader32::magic`. -/// the mach magic number -pub const MH_MAGIC: u32 = 0xfeed_face; -/// NXSwapInt(MH_MAGIC) -pub const MH_CIGAM: u32 = 0xcefa_edfe; - -/// The 64-bit mach header. -/// -/// Appears at the very beginning of object files for 64-bit architectures. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct MachHeader64 { - /// mach magic number identifier - pub magic: U32, - /// cpu specifier - pub cputype: U32, - /// machine specifier - pub cpusubtype: U32, - /// type of file - pub filetype: U32, - /// number of load commands - pub ncmds: U32, - /// the size of all the load commands - pub sizeofcmds: U32, - /// flags - pub flags: U32, - /// reserved - pub reserved: U32, -} - -// Values for `MachHeader64::magic`. -/// the 64-bit mach magic number -pub const MH_MAGIC_64: u32 = 0xfeed_facf; -/// NXSwapInt(MH_MAGIC_64) -pub const MH_CIGAM_64: u32 = 0xcffa_edfe; - -/* - * The layout of the file depends on the filetype. For all but the MH_OBJECT - * file type the segments are padded out and aligned on a segment alignment - * boundary for efficient demand pageing. The MH_EXECUTE, MH_FVMLIB, MH_DYLIB, - * MH_DYLINKER and MH_BUNDLE file types also have the headers included as part - * of their first segment. - * - * The file type MH_OBJECT is a compact format intended as output of the - * assembler and input (and possibly output) of the link editor (the .o - * format). All sections are in one unnamed segment with no segment padding. - * This format is used as an executable format when the file is so small the - * segment padding greatly increases its size. - * - * The file type MH_PRELOAD is an executable format intended for things that - * are not executed under the kernel (proms, stand alones, kernels, etc). The - * format can be executed under the kernel but may demand paged it and not - * preload it before execution. - * - * A core file is in MH_CORE format and can be any in an arbritray legal - * Mach-O file. - */ - -// Values for `MachHeader*::filetype`. -/// relocatable object file -pub const MH_OBJECT: u32 = 0x1; -/// demand paged executable file -pub const MH_EXECUTE: u32 = 0x2; -/// fixed VM shared library file -pub const MH_FVMLIB: u32 = 0x3; -/// core file -pub const MH_CORE: u32 = 0x4; -/// preloaded executable file -pub const MH_PRELOAD: u32 = 0x5; -/// dynamically bound shared library -pub const MH_DYLIB: u32 = 0x6; -/// dynamic link editor -pub const MH_DYLINKER: u32 = 0x7; -/// dynamically bound bundle file -pub const MH_BUNDLE: u32 = 0x8; -/// shared library stub for static linking only, no section contents -pub const MH_DYLIB_STUB: u32 = 0x9; -/// companion file with only debug sections -pub const MH_DSYM: u32 = 0xa; -/// x86_64 kexts -pub const MH_KEXT_BUNDLE: u32 = 0xb; -/// set of mach-o's -pub const MH_FILESET: u32 = 0xc; - -// Values for `MachHeader*::flags`. -/// the object file has no undefined references -pub const MH_NOUNDEFS: u32 = 0x1; -/// the object file is the output of an incremental link against a base file and can't be link edited again -pub const MH_INCRLINK: u32 = 0x2; -/// the object file is input for the dynamic linker and can't be statically link edited again -pub const MH_DYLDLINK: u32 = 0x4; -/// the object file's undefined references are bound by the dynamic linker when loaded. -pub const MH_BINDATLOAD: u32 = 0x8; -/// the file has its dynamic undefined references prebound. -pub const MH_PREBOUND: u32 = 0x10; -/// the file has its read-only and read-write segments split -pub const MH_SPLIT_SEGS: u32 = 0x20; -/// the shared library init routine is to be run lazily via catching memory faults to its writeable segments (obsolete) -pub const MH_LAZY_INIT: u32 = 0x40; -/// the image is using two-level name space bindings -pub const MH_TWOLEVEL: u32 = 0x80; -/// the executable is forcing all images to use flat name space bindings -pub const MH_FORCE_FLAT: u32 = 0x100; -/// this umbrella guarantees no multiple definitions of symbols in its sub-images so the two-level namespace hints can always be used. -pub const MH_NOMULTIDEFS: u32 = 0x200; -/// do not have dyld notify the prebinding agent about this executable -pub const MH_NOFIXPREBINDING: u32 = 0x400; -/// the binary is not prebound but can have its prebinding redone. only used when MH_PREBOUND is not set. -pub const MH_PREBINDABLE: u32 = 0x800; -/// indicates that this binary binds to all two-level namespace modules of its dependent libraries. only used when MH_PREBINDABLE and MH_TWOLEVEL are both set. -pub const MH_ALLMODSBOUND: u32 = 0x1000; -/// safe to divide up the sections into sub-sections via symbols for dead code stripping -pub const MH_SUBSECTIONS_VIA_SYMBOLS: u32 = 0x2000; -/// the binary has been canonicalized via the unprebind operation -pub const MH_CANONICAL: u32 = 0x4000; -/// the final linked image contains external weak symbols -pub const MH_WEAK_DEFINES: u32 = 0x8000; -/// the final linked image uses weak symbols -pub const MH_BINDS_TO_WEAK: u32 = 0x10000; -/// When this bit is set, all stacks in the task will be given stack execution privilege. Only used in MH_EXECUTE filetypes. -pub const MH_ALLOW_STACK_EXECUTION: u32 = 0x20000; -/// When this bit is set, the binary declares it is safe for use in processes with uid zero -pub const MH_ROOT_SAFE: u32 = 0x40000; -/// When this bit is set, the binary declares it is safe for use in processes when issetugid() is true -pub const MH_SETUID_SAFE: u32 = 0x80000; -/// When this bit is set on a dylib, the static linker does not need to examine dependent dylibs to see if any are re-exported -pub const MH_NO_REEXPORTED_DYLIBS: u32 = 0x10_0000; -/// When this bit is set, the OS will load the main executable at a random address. Only used in MH_EXECUTE filetypes. -pub const MH_PIE: u32 = 0x20_0000; -/// Only for use on dylibs. When linking against a dylib that has this bit set, the static linker will automatically not create a LC_LOAD_DYLIB load command to the dylib if no symbols are being referenced from the dylib. -pub const MH_DEAD_STRIPPABLE_DYLIB: u32 = 0x40_0000; -/// Contains a section of type S_THREAD_LOCAL_VARIABLES -pub const MH_HAS_TLV_DESCRIPTORS: u32 = 0x80_0000; -/// When this bit is set, the OS will run the main executable with a non-executable heap even on platforms (e.g. i386) that don't require it. Only used in MH_EXECUTE filetypes. -pub const MH_NO_HEAP_EXECUTION: u32 = 0x100_0000; -/// The code was linked for use in an application extension. -pub const MH_APP_EXTENSION_SAFE: u32 = 0x0200_0000; -/// The external symbols listed in the nlist symbol table do not include all the symbols listed in the dyld info. -pub const MH_NLIST_OUTOFSYNC_WITH_DYLDINFO: u32 = 0x0400_0000; -/// Allow LC_MIN_VERSION_MACOS and LC_BUILD_VERSION load commands with -/// the platforms macOS, iOSMac, iOSSimulator, tvOSSimulator and watchOSSimulator. -pub const MH_SIM_SUPPORT: u32 = 0x0800_0000; -/// Only for use on dylibs. When this bit is set, the dylib is part of the dyld -/// shared cache, rather than loose in the filesystem. -pub const MH_DYLIB_IN_CACHE: u32 = 0x8000_0000; - -/// Common fields at the start of every load command. -/// -/// The load commands directly follow the mach_header. The total size of all -/// of the commands is given by the sizeofcmds field in the mach_header. All -/// load commands must have as their first two fields `cmd` and `cmdsize`. The `cmd` -/// field is filled in with a constant for that command type. Each command type -/// has a structure specifically for it. The `cmdsize` field is the size in bytes -/// of the particular load command structure plus anything that follows it that -/// is a part of the load command (i.e. section structures, strings, etc.). To -/// advance to the next load command the `cmdsize` can be added to the offset or -/// pointer of the current load command. The `cmdsize` for 32-bit architectures -/// MUST be a multiple of 4 bytes and for 64-bit architectures MUST be a multiple -/// of 8 bytes (these are forever the maximum alignment of any load commands). -/// The padded bytes must be zero. All tables in the object file must also -/// follow these rules so the file can be memory mapped. Otherwise the pointers -/// to these tables will not work well or at all on some machines. With all -/// padding zeroed like objects will compare byte for byte. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct LoadCommand { - /// Type of load command. - /// - /// One of the `LC_*` constants. - pub cmd: U32, - /// Total size of command in bytes. - pub cmdsize: U32, -} - -/* - * After MacOS X 10.1 when a new load command is added that is required to be - * understood by the dynamic linker for the image to execute properly the - * LC_REQ_DYLD bit will be or'ed into the load command constant. If the dynamic - * linker sees such a load command it it does not understand will issue a - * "unknown load command required for execution" error and refuse to use the - * image. Other load commands without this bit that are not understood will - * simply be ignored. - */ -pub const LC_REQ_DYLD: u32 = 0x8000_0000; - -/* Constants for the cmd field of all load commands, the type */ -/// segment of this file to be mapped -pub const LC_SEGMENT: u32 = 0x1; -/// link-edit stab symbol table info -pub const LC_SYMTAB: u32 = 0x2; -/// link-edit gdb symbol table info (obsolete) -pub const LC_SYMSEG: u32 = 0x3; -/// thread -pub const LC_THREAD: u32 = 0x4; -/// unix thread (includes a stack) -pub const LC_UNIXTHREAD: u32 = 0x5; -/// load a specified fixed VM shared library -pub const LC_LOADFVMLIB: u32 = 0x6; -/// fixed VM shared library identification -pub const LC_IDFVMLIB: u32 = 0x7; -/// object identification info (obsolete) -pub const LC_IDENT: u32 = 0x8; -/// fixed VM file inclusion (internal use) -pub const LC_FVMFILE: u32 = 0x9; -/// prepage command (internal use) -pub const LC_PREPAGE: u32 = 0xa; -/// dynamic link-edit symbol table info -pub const LC_DYSYMTAB: u32 = 0xb; -/// load a dynamically linked shared library -pub const LC_LOAD_DYLIB: u32 = 0xc; -/// dynamically linked shared lib ident -pub const LC_ID_DYLIB: u32 = 0xd; -/// load a dynamic linker -pub const LC_LOAD_DYLINKER: u32 = 0xe; -/// dynamic linker identification -pub const LC_ID_DYLINKER: u32 = 0xf; -/// modules prebound for a dynamically linked shared library -pub const LC_PREBOUND_DYLIB: u32 = 0x10; -/// image routines -pub const LC_ROUTINES: u32 = 0x11; -/// sub framework -pub const LC_SUB_FRAMEWORK: u32 = 0x12; -/// sub umbrella -pub const LC_SUB_UMBRELLA: u32 = 0x13; -/// sub client -pub const LC_SUB_CLIENT: u32 = 0x14; -/// sub library -pub const LC_SUB_LIBRARY: u32 = 0x15; -/// two-level namespace lookup hints -pub const LC_TWOLEVEL_HINTS: u32 = 0x16; -/// prebind checksum -pub const LC_PREBIND_CKSUM: u32 = 0x17; -/// load a dynamically linked shared library that is allowed to be missing -/// (all symbols are weak imported). -pub const LC_LOAD_WEAK_DYLIB: u32 = 0x18 | LC_REQ_DYLD; -/// 64-bit segment of this file to be mapped -pub const LC_SEGMENT_64: u32 = 0x19; -/// 64-bit image routines -pub const LC_ROUTINES_64: u32 = 0x1a; -/// the uuid -pub const LC_UUID: u32 = 0x1b; -/// runpath additions -pub const LC_RPATH: u32 = 0x1c | LC_REQ_DYLD; -/// local of code signature -pub const LC_CODE_SIGNATURE: u32 = 0x1d; -/// local of info to split segments -pub const LC_SEGMENT_SPLIT_INFO: u32 = 0x1e; -/// load and re-export dylib -pub const LC_REEXPORT_DYLIB: u32 = 0x1f | LC_REQ_DYLD; -/// delay load of dylib until first use -pub const LC_LAZY_LOAD_DYLIB: u32 = 0x20; -/// encrypted segment information -pub const LC_ENCRYPTION_INFO: u32 = 0x21; -/// compressed dyld information -pub const LC_DYLD_INFO: u32 = 0x22; -/// compressed dyld information only -pub const LC_DYLD_INFO_ONLY: u32 = 0x22 | LC_REQ_DYLD; -/// load upward dylib -pub const LC_LOAD_UPWARD_DYLIB: u32 = 0x23 | LC_REQ_DYLD; -/// build for MacOSX min OS version -pub const LC_VERSION_MIN_MACOSX: u32 = 0x24; -/// build for iPhoneOS min OS version -pub const LC_VERSION_MIN_IPHONEOS: u32 = 0x25; -/// compressed table of function start addresses -pub const LC_FUNCTION_STARTS: u32 = 0x26; -/// string for dyld to treat like environment variable -pub const LC_DYLD_ENVIRONMENT: u32 = 0x27; -/// replacement for LC_UNIXTHREAD -pub const LC_MAIN: u32 = 0x28 | LC_REQ_DYLD; -/// table of non-instructions in __text -pub const LC_DATA_IN_CODE: u32 = 0x29; -/// source version used to build binary -pub const LC_SOURCE_VERSION: u32 = 0x2A; -/// Code signing DRs copied from linked dylibs -pub const LC_DYLIB_CODE_SIGN_DRS: u32 = 0x2B; -/// 64-bit encrypted segment information -pub const LC_ENCRYPTION_INFO_64: u32 = 0x2C; -/// linker options in MH_OBJECT files -pub const LC_LINKER_OPTION: u32 = 0x2D; -/// optimization hints in MH_OBJECT files -pub const LC_LINKER_OPTIMIZATION_HINT: u32 = 0x2E; -/// build for AppleTV min OS version -pub const LC_VERSION_MIN_TVOS: u32 = 0x2F; -/// build for Watch min OS version -pub const LC_VERSION_MIN_WATCHOS: u32 = 0x30; -/// arbitrary data included within a Mach-O file -pub const LC_NOTE: u32 = 0x31; -/// build for platform min OS version -pub const LC_BUILD_VERSION: u32 = 0x32; -/// used with `LinkeditDataCommand`, payload is trie -pub const LC_DYLD_EXPORTS_TRIE: u32 = 0x33 | LC_REQ_DYLD; -/// used with `LinkeditDataCommand` -pub const LC_DYLD_CHAINED_FIXUPS: u32 = 0x34 | LC_REQ_DYLD; -/// used with `FilesetEntryCommand` -pub const LC_FILESET_ENTRY: u32 = 0x35 | LC_REQ_DYLD; - -/// A variable length string in a load command. -/// -/// The strings are stored just after the load command structure and -/// the offset is from the start of the load command structure. The size -/// of the string is reflected in the `cmdsize` field of the load command. -/// Once again any padded bytes to bring the `cmdsize` field to a multiple -/// of 4 bytes must be zero. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct LcStr { - /// offset to the string - pub offset: U32, -} - -/// 32-bit segment load command. -/// -/// The segment load command indicates that a part of this file is to be -/// mapped into the task's address space. The size of this segment in memory, -/// vmsize, maybe equal to or larger than the amount to map from this file, -/// filesize. The file is mapped starting at fileoff to the beginning of -/// the segment in memory, vmaddr. The rest of the memory of the segment, -/// if any, is allocated zero fill on demand. The segment's maximum virtual -/// memory protection and initial virtual memory protection are specified -/// by the maxprot and initprot fields. If the segment has sections then the -/// `Section32` structures directly follow the segment command and their size is -/// reflected in `cmdsize`. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SegmentCommand32 { - /// LC_SEGMENT - pub cmd: U32, - /// includes sizeof section structs - pub cmdsize: U32, - /// segment name - pub segname: [u8; 16], - /// memory address of this segment - pub vmaddr: U32, - /// memory size of this segment - pub vmsize: U32, - /// file offset of this segment - pub fileoff: U32, - /// amount to map from the file - pub filesize: U32, - /// maximum VM protection - pub maxprot: U32, - /// initial VM protection - pub initprot: U32, - /// number of sections in segment - pub nsects: U32, - /// flags - pub flags: U32, -} - -/// 64-bit segment load command. -/// -/// The 64-bit segment load command indicates that a part of this file is to be -/// mapped into a 64-bit task's address space. If the 64-bit segment has -/// sections then `Section64` structures directly follow the 64-bit segment -/// command and their size is reflected in `cmdsize`. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SegmentCommand64 { - /// LC_SEGMENT_64 - pub cmd: U32, - /// includes sizeof section_64 structs - pub cmdsize: U32, - /// segment name - pub segname: [u8; 16], - /// memory address of this segment - pub vmaddr: U64, - /// memory size of this segment - pub vmsize: U64, - /// file offset of this segment - pub fileoff: U64, - /// amount to map from the file - pub filesize: U64, - /// maximum VM protection - pub maxprot: U32, - /// initial VM protection - pub initprot: U32, - /// number of sections in segment - pub nsects: U32, - /// flags - pub flags: U32, -} - -// Values for `SegmentCommand*::flags`. -/// the file contents for this segment is for the high part of the VM space, the low part is zero filled (for stacks in core files) -pub const SG_HIGHVM: u32 = 0x1; -/// this segment is the VM that is allocated by a fixed VM library, for overlap checking in the link editor -pub const SG_FVMLIB: u32 = 0x2; -/// this segment has nothing that was relocated in it and nothing relocated to it, that is it maybe safely replaced without relocation -pub const SG_NORELOC: u32 = 0x4; -/// This segment is protected. If the segment starts at file offset 0, the first page of the segment is not protected. All other pages of the segment are protected. -pub const SG_PROTECTED_VERSION_1: u32 = 0x8; -/// This segment is made read-only after fixups -pub const SG_READ_ONLY: u32 = 0x10; - -/* - * A segment is made up of zero or more sections. Non-MH_OBJECT files have - * all of their segments with the proper sections in each, and padded to the - * specified segment alignment when produced by the link editor. The first - * segment of a MH_EXECUTE and MH_FVMLIB format file contains the mach_header - * and load commands of the object file before its first section. The zero - * fill sections are always last in their segment (in all formats). This - * allows the zeroed segment padding to be mapped into memory where zero fill - * sections might be. The gigabyte zero fill sections, those with the section - * type S_GB_ZEROFILL, can only be in a segment with sections of this type. - * These segments are then placed after all other segments. - * - * The MH_OBJECT format has all of its sections in one segment for - * compactness. There is no padding to a specified segment boundary and the - * mach_header and load commands are not part of the segment. - * - * Sections with the same section name, sectname, going into the same segment, - * segname, are combined by the link editor. The resulting section is aligned - * to the maximum alignment of the combined sections and is the new section's - * alignment. The combined sections are aligned to their original alignment in - * the combined section. Any padded bytes to get the specified alignment are - * zeroed. - * - * The format of the relocation entries referenced by the reloff and nreloc - * fields of the section structure for mach object files is described in the - * header file . - */ -/// 32-bit section. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Section32 { - /// name of this section - pub sectname: [u8; 16], - /// segment this section goes in - pub segname: [u8; 16], - /// memory address of this section - pub addr: U32, - /// size in bytes of this section - pub size: U32, - /// file offset of this section - pub offset: U32, - /// section alignment (power of 2) - pub align: U32, - /// file offset of relocation entries - pub reloff: U32, - /// number of relocation entries - pub nreloc: U32, - /// flags (section type and attributes) - pub flags: U32, - /// reserved (for offset or index) - pub reserved1: U32, - /// reserved (for count or sizeof) - pub reserved2: U32, -} - -/// 64-bit section. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Section64 { - /// name of this section - pub sectname: [u8; 16], - /// segment this section goes in - pub segname: [u8; 16], - /// memory address of this section - pub addr: U64, - /// size in bytes of this section - pub size: U64, - /// file offset of this section - pub offset: U32, - /// section alignment (power of 2) - pub align: U32, - /// file offset of relocation entries - pub reloff: U32, - /// number of relocation entries - pub nreloc: U32, - /// flags (section type and attributes) - pub flags: U32, - /// reserved (for offset or index) - pub reserved1: U32, - /// reserved (for count or sizeof) - pub reserved2: U32, - /// reserved - pub reserved3: U32, -} - -/* - * The flags field of a section structure is separated into two parts a section - * type and section attributes. The section types are mutually exclusive (it - * can only have one type) but the section attributes are not (it may have more - * than one attribute). - */ -/// 256 section types -pub const SECTION_TYPE: u32 = 0x0000_00ff; -/// 24 section attributes -pub const SECTION_ATTRIBUTES: u32 = 0xffff_ff00; - -/* Constants for the type of a section */ -/// regular section -pub const S_REGULAR: u32 = 0x0; -/// zero fill on demand section -pub const S_ZEROFILL: u32 = 0x1; -/// section with only literal C strings -pub const S_CSTRING_LITERALS: u32 = 0x2; -/// section with only 4 byte literals -pub const S_4BYTE_LITERALS: u32 = 0x3; -/// section with only 8 byte literals -pub const S_8BYTE_LITERALS: u32 = 0x4; -/// section with only pointers to literals -pub const S_LITERAL_POINTERS: u32 = 0x5; -/* - * For the two types of symbol pointers sections and the symbol stubs section - * they have indirect symbol table entries. For each of the entries in the - * section the indirect symbol table entries, in corresponding order in the - * indirect symbol table, start at the index stored in the reserved1 field - * of the section structure. Since the indirect symbol table entries - * correspond to the entries in the section the number of indirect symbol table - * entries is inferred from the size of the section divided by the size of the - * entries in the section. For symbol pointers sections the size of the entries - * in the section is 4 bytes and for symbol stubs sections the byte size of the - * stubs is stored in the reserved2 field of the section structure. - */ -/// section with only non-lazy symbol pointers -pub const S_NON_LAZY_SYMBOL_POINTERS: u32 = 0x6; -/// section with only lazy symbol pointers -pub const S_LAZY_SYMBOL_POINTERS: u32 = 0x7; -/// section with only symbol stubs, byte size of stub in the reserved2 field -pub const S_SYMBOL_STUBS: u32 = 0x8; -/// section with only function pointers for initialization -pub const S_MOD_INIT_FUNC_POINTERS: u32 = 0x9; -/// section with only function pointers for termination -pub const S_MOD_TERM_FUNC_POINTERS: u32 = 0xa; -/// section contains symbols that are to be coalesced -pub const S_COALESCED: u32 = 0xb; -/// zero fill on demand section (that can be larger than 4 gigabytes) -pub const S_GB_ZEROFILL: u32 = 0xc; -/// section with only pairs of function pointers for interposing -pub const S_INTERPOSING: u32 = 0xd; -/// section with only 16 byte literals -pub const S_16BYTE_LITERALS: u32 = 0xe; -/// section contains DTrace Object Format -pub const S_DTRACE_DOF: u32 = 0xf; -/// section with only lazy symbol pointers to lazy loaded dylibs -pub const S_LAZY_DYLIB_SYMBOL_POINTERS: u32 = 0x10; -/* - * Section types to support thread local variables - */ -/// template of initial values for TLVs -pub const S_THREAD_LOCAL_REGULAR: u32 = 0x11; -/// template of initial values for TLVs -pub const S_THREAD_LOCAL_ZEROFILL: u32 = 0x12; -/// TLV descriptors -pub const S_THREAD_LOCAL_VARIABLES: u32 = 0x13; -/// pointers to TLV descriptors -pub const S_THREAD_LOCAL_VARIABLE_POINTERS: u32 = 0x14; -/// functions to call to initialize TLV values -pub const S_THREAD_LOCAL_INIT_FUNCTION_POINTERS: u32 = 0x15; -/// 32-bit offsets to initializers -pub const S_INIT_FUNC_OFFSETS: u32 = 0x16; - -/* - * Constants for the section attributes part of the flags field of a section - * structure. - */ -/// User setable attributes -pub const SECTION_ATTRIBUTES_USR: u32 = 0xff00_0000; -/// section contains only true machine instructions -pub const S_ATTR_PURE_INSTRUCTIONS: u32 = 0x8000_0000; -/// section contains coalesced symbols that are not to be in a ranlib table of contents -pub const S_ATTR_NO_TOC: u32 = 0x4000_0000; -/// ok to strip static symbols in this section in files with the MH_DYLDLINK flag -pub const S_ATTR_STRIP_STATIC_SYMS: u32 = 0x2000_0000; -/// no dead stripping -pub const S_ATTR_NO_DEAD_STRIP: u32 = 0x1000_0000; -/// blocks are live if they reference live blocks -pub const S_ATTR_LIVE_SUPPORT: u32 = 0x0800_0000; -/// Used with i386 code stubs written on by dyld -pub const S_ATTR_SELF_MODIFYING_CODE: u32 = 0x0400_0000; -/* - * If a segment contains any sections marked with S_ATTR_DEBUG then all - * sections in that segment must have this attribute. No section other than - * a section marked with this attribute may reference the contents of this - * section. A section with this attribute may contain no symbols and must have - * a section type S_REGULAR. The static linker will not copy section contents - * from sections with this attribute into its output file. These sections - * generally contain DWARF debugging info. - */ -/// a debug section -pub const S_ATTR_DEBUG: u32 = 0x0200_0000; -/// system setable attributes -pub const SECTION_ATTRIBUTES_SYS: u32 = 0x00ff_ff00; -/// section contains some machine instructions -pub const S_ATTR_SOME_INSTRUCTIONS: u32 = 0x0000_0400; -/// section has external relocation entries -pub const S_ATTR_EXT_RELOC: u32 = 0x0000_0200; -/// section has local relocation entries -pub const S_ATTR_LOC_RELOC: u32 = 0x0000_0100; - -/* - * The names of segments and sections in them are mostly meaningless to the - * link-editor. But there are few things to support traditional UNIX - * executables that require the link-editor and assembler to use some names - * agreed upon by convention. - * - * The initial protection of the "__TEXT" segment has write protection turned - * off (not writeable). - * - * The link-editor will allocate common symbols at the end of the "__common" - * section in the "__DATA" segment. It will create the section and segment - * if needed. - */ - -/* The currently known segment names and the section names in those segments */ - -/// the pagezero segment which has no protections and catches NULL references for MH_EXECUTE files -pub const SEG_PAGEZERO: &str = "__PAGEZERO"; - -/// the tradition UNIX text segment -pub const SEG_TEXT: &str = "__TEXT"; -/// the real text part of the text section no headers, and no padding -pub const SECT_TEXT: &str = "__text"; -/// the fvmlib initialization section -pub const SECT_FVMLIB_INIT0: &str = "__fvmlib_init0"; -/// the section following the fvmlib initialization section -pub const SECT_FVMLIB_INIT1: &str = "__fvmlib_init1"; - -/// the tradition UNIX data segment -pub const SEG_DATA: &str = "__DATA"; -/// the real initialized data section no padding, no bss overlap -pub const SECT_DATA: &str = "__data"; -/// the real uninitialized data section no padding -pub const SECT_BSS: &str = "__bss"; -/// the section common symbols are allocated in by the link editor -pub const SECT_COMMON: &str = "__common"; - -/// objective-C runtime segment -pub const SEG_OBJC: &str = "__OBJC"; -/// symbol table -pub const SECT_OBJC_SYMBOLS: &str = "__symbol_table"; -/// module information -pub const SECT_OBJC_MODULES: &str = "__module_info"; -/// string table -pub const SECT_OBJC_STRINGS: &str = "__selector_strs"; -/// string table -pub const SECT_OBJC_REFS: &str = "__selector_refs"; - -/// the icon segment -pub const SEG_ICON: &str = "__ICON"; -/// the icon headers -pub const SECT_ICON_HEADER: &str = "__header"; -/// the icons in tiff format -pub const SECT_ICON_TIFF: &str = "__tiff"; - -/// the segment containing all structs created and maintained by the link editor. Created with -seglinkedit option to ld(1) for MH_EXECUTE and FVMLIB file types only -pub const SEG_LINKEDIT: &str = "__LINKEDIT"; - -/// the segment overlapping with linkedit containing linking information -pub const SEG_LINKINFO: &str = "__LINKINFO"; - -/// the unix stack segment -pub const SEG_UNIXSTACK: &str = "__UNIXSTACK"; - -/// the segment for the self (dyld) modifying code stubs that has read, write and execute permissions -pub const SEG_IMPORT: &str = "__IMPORT"; - -/* - * Fixed virtual memory shared libraries are identified by two things. The - * target pathname (the name of the library as found for execution), and the - * minor version number. The address of where the headers are loaded is in - * header_addr. (THIS IS OBSOLETE and no longer supported). - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Fvmlib { - /// library's target pathname - pub name: LcStr, - /// library's minor version number - pub minor_version: U32, - /// library's header address - pub header_addr: U32, -} - -/* - * A fixed virtual shared library (filetype == MH_FVMLIB in the mach header) - * contains a `FvmlibCommand` (cmd == LC_IDFVMLIB) to identify the library. - * An object that uses a fixed virtual shared library also contains a - * `FvmlibCommand` (cmd == LC_LOADFVMLIB) for each library it uses. - * (THIS IS OBSOLETE and no longer supported). - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FvmlibCommand { - /// LC_IDFVMLIB or LC_LOADFVMLIB - pub cmd: U32, - /// includes pathname string - pub cmdsize: U32, - /// the library identification - pub fvmlib: Fvmlib, -} - -/* - * Dynamically linked shared libraries are identified by two things. The - * pathname (the name of the library as found for execution), and the - * compatibility version number. The pathname must match and the compatibility - * number in the user of the library must be greater than or equal to the - * library being used. The time stamp is used to record the time a library was - * built and copied into user so it can be use to determined if the library used - * at runtime is exactly the same as used to built the program. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Dylib { - /// library's path name - pub name: LcStr, - /// library's build time stamp - pub timestamp: U32, - /// library's current version number - pub current_version: U32, - /// library's compatibility vers number - pub compatibility_version: U32, -} - -/* - * A dynamically linked shared library (filetype == MH_DYLIB in the mach header) - * contains a `DylibCommand` (cmd == LC_ID_DYLIB) to identify the library. - * An object that uses a dynamically linked shared library also contains a - * `DylibCommand` (cmd == LC_LOAD_DYLIB, LC_LOAD_WEAK_DYLIB, or - * LC_REEXPORT_DYLIB) for each library it uses. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DylibCommand { - /// LC_ID_DYLIB, LC_LOAD_{,WEAK_}DYLIB, LC_REEXPORT_DYLIB - pub cmd: U32, - /// includes pathname string - pub cmdsize: U32, - /// the library identification - pub dylib: Dylib, -} - -/* - * A dynamically linked shared library may be a subframework of an umbrella - * framework. If so it will be linked with "-umbrella umbrella_name" where - * Where "umbrella_name" is the name of the umbrella framework. A subframework - * can only be linked against by its umbrella framework or other subframeworks - * that are part of the same umbrella framework. Otherwise the static link - * editor produces an error and states to link against the umbrella framework. - * The name of the umbrella framework for subframeworks is recorded in the - * following structure. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SubFrameworkCommand { - /// LC_SUB_FRAMEWORK - pub cmd: U32, - /// includes umbrella string - pub cmdsize: U32, - /// the umbrella framework name - pub umbrella: LcStr, -} - -/* - * For dynamically linked shared libraries that are subframework of an umbrella - * framework they can allow clients other than the umbrella framework or other - * subframeworks in the same umbrella framework. To do this the subframework - * is built with "-allowable_client client_name" and an LC_SUB_CLIENT load - * command is created for each -allowable_client flag. The client_name is - * usually a framework name. It can also be a name used for bundles clients - * where the bundle is built with "-client_name client_name". - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SubClientCommand { - /// LC_SUB_CLIENT - pub cmd: U32, - /// includes client string - pub cmdsize: U32, - /// the client name - pub client: LcStr, -} - -/* - * A dynamically linked shared library may be a sub_umbrella of an umbrella - * framework. If so it will be linked with "-sub_umbrella umbrella_name" where - * Where "umbrella_name" is the name of the sub_umbrella framework. When - * statically linking when -twolevel_namespace is in effect a twolevel namespace - * umbrella framework will only cause its subframeworks and those frameworks - * listed as sub_umbrella frameworks to be implicited linked in. Any other - * dependent dynamic libraries will not be linked it when -twolevel_namespace - * is in effect. The primary library recorded by the static linker when - * resolving a symbol in these libraries will be the umbrella framework. - * Zero or more sub_umbrella frameworks may be use by an umbrella framework. - * The name of a sub_umbrella framework is recorded in the following structure. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SubUmbrellaCommand { - /// LC_SUB_UMBRELLA - pub cmd: U32, - /// includes sub_umbrella string - pub cmdsize: U32, - /// the sub_umbrella framework name - pub sub_umbrella: LcStr, -} - -/* - * A dynamically linked shared library may be a sub_library of another shared - * library. If so it will be linked with "-sub_library library_name" where - * Where "library_name" is the name of the sub_library shared library. When - * statically linking when -twolevel_namespace is in effect a twolevel namespace - * shared library will only cause its subframeworks and those frameworks - * listed as sub_umbrella frameworks and libraries listed as sub_libraries to - * be implicited linked in. Any other dependent dynamic libraries will not be - * linked it when -twolevel_namespace is in effect. The primary library - * recorded by the static linker when resolving a symbol in these libraries - * will be the umbrella framework (or dynamic library). Zero or more sub_library - * shared libraries may be use by an umbrella framework or (or dynamic library). - * The name of a sub_library framework is recorded in the following structure. - * For example /usr/lib/libobjc_profile.A.dylib would be recorded as "libobjc". - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SubLibraryCommand { - /// LC_SUB_LIBRARY - pub cmd: U32, - /// includes sub_library string - pub cmdsize: U32, - /// the sub_library name - pub sub_library: LcStr, -} - -/* - * A program (filetype == MH_EXECUTE) that is - * prebound to its dynamic libraries has one of these for each library that - * the static linker used in prebinding. It contains a bit vector for the - * modules in the library. The bits indicate which modules are bound (1) and - * which are not (0) from the library. The bit for module 0 is the low bit - * of the first byte. So the bit for the Nth module is: - * (linked_modules[N/8] >> N%8) & 1 - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct PreboundDylibCommand { - /// LC_PREBOUND_DYLIB - pub cmd: U32, - /// includes strings - pub cmdsize: U32, - /// library's path name - pub name: LcStr, - /// number of modules in library - pub nmodules: U32, - /// bit vector of linked modules - pub linked_modules: LcStr, -} - -/* - * A program that uses a dynamic linker contains a `DylinkerCommand` to identify - * the name of the dynamic linker (LC_LOAD_DYLINKER). And a dynamic linker - * contains a `DylinkerCommand` to identify the dynamic linker (LC_ID_DYLINKER). - * A file can have at most one of these. - * This struct is also used for the LC_DYLD_ENVIRONMENT load command and - * contains string for dyld to treat like environment variable. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DylinkerCommand { - /// LC_ID_DYLINKER, LC_LOAD_DYLINKER or LC_DYLD_ENVIRONMENT - pub cmd: U32, - /// includes pathname string - pub cmdsize: U32, - /// dynamic linker's path name - pub name: LcStr, -} - -/* - * Thread commands contain machine-specific data structures suitable for - * use in the thread state primitives. The machine specific data structures - * follow the struct `ThreadCommand` as follows. - * Each flavor of machine specific data structure is preceded by an uint32_t - * constant for the flavor of that data structure, an uint32_t that is the - * count of uint32_t's of the size of the state data structure and then - * the state data structure follows. This triple may be repeated for many - * flavors. The constants for the flavors, counts and state data structure - * definitions are expected to be in the header file . - * These machine specific data structures sizes must be multiples of - * 4 bytes. The `cmdsize` reflects the total size of the `ThreadCommand` - * and all of the sizes of the constants for the flavors, counts and state - * data structures. - * - * For executable objects that are unix processes there will be one - * `ThreadCommand` (cmd == LC_UNIXTHREAD) created for it by the link-editor. - * This is the same as a LC_THREAD, except that a stack is automatically - * created (based on the shell's limit for the stack size). Command arguments - * and environment variables are copied onto that stack. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ThreadCommand { - /// LC_THREAD or LC_UNIXTHREAD - pub cmd: U32, - /// total size of this command - pub cmdsize: U32, - /* uint32_t flavor flavor of thread state */ - /* uint32_t count count of uint32_t's in thread state */ - /* struct XXX_thread_state state thread state for this flavor */ - /* ... */ -} - -/* - * The routines command contains the address of the dynamic shared library - * initialization routine and an index into the module table for the module - * that defines the routine. Before any modules are used from the library the - * dynamic linker fully binds the module that defines the initialization routine - * and then calls it. This gets called before any module initialization - * routines (used for C++ static constructors) in the library. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct RoutinesCommand32 { - /* for 32-bit architectures */ - /// LC_ROUTINES - pub cmd: U32, - /// total size of this command - pub cmdsize: U32, - /// address of initialization routine - pub init_address: U32, - /// index into the module table that the init routine is defined in - pub init_module: U32, - pub reserved1: U32, - pub reserved2: U32, - pub reserved3: U32, - pub reserved4: U32, - pub reserved5: U32, - pub reserved6: U32, -} - -/* - * The 64-bit routines command. Same use as above. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct RoutinesCommand64 { - /* for 64-bit architectures */ - /// LC_ROUTINES_64 - pub cmd: U32, - /// total size of this command - pub cmdsize: U32, - /// address of initialization routine - pub init_address: U64, - /// index into the module table that the init routine is defined in - pub init_module: U64, - pub reserved1: U64, - pub reserved2: U64, - pub reserved3: U64, - pub reserved4: U64, - pub reserved5: U64, - pub reserved6: U64, -} - -/* - * The `SymtabCommand` contains the offsets and sizes of the link-edit 4.3BSD - * "stab" style symbol table information as described in the header files - * and . - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SymtabCommand { - /// LC_SYMTAB - pub cmd: U32, - /// sizeof(struct SymtabCommand) - pub cmdsize: U32, - /// symbol table offset - pub symoff: U32, - /// number of symbol table entries - pub nsyms: U32, - /// string table offset - pub stroff: U32, - /// string table size in bytes - pub strsize: U32, -} - -/* - * This is the second set of the symbolic information which is used to support - * the data structures for the dynamically link editor. - * - * The original set of symbolic information in the `SymtabCommand` which contains - * the symbol and string tables must also be present when this load command is - * present. When this load command is present the symbol table is organized - * into three groups of symbols: - * local symbols (static and debugging symbols) - grouped by module - * defined external symbols - grouped by module (sorted by name if not lib) - * undefined external symbols (sorted by name if MH_BINDATLOAD is not set, - * and in order the were seen by the static - * linker if MH_BINDATLOAD is set) - * In this load command there are offsets and counts to each of the three groups - * of symbols. - * - * This load command contains a the offsets and sizes of the following new - * symbolic information tables: - * table of contents - * module table - * reference symbol table - * indirect symbol table - * The first three tables above (the table of contents, module table and - * reference symbol table) are only present if the file is a dynamically linked - * shared library. For executable and object modules, which are files - * containing only one module, the information that would be in these three - * tables is determined as follows: - * table of contents - the defined external symbols are sorted by name - * module table - the file contains only one module so everything in the - * file is part of the module. - * reference symbol table - is the defined and undefined external symbols - * - * For dynamically linked shared library files this load command also contains - * offsets and sizes to the pool of relocation entries for all sections - * separated into two groups: - * external relocation entries - * local relocation entries - * For executable and object modules the relocation entries continue to hang - * off the section structures. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DysymtabCommand { - /// LC_DYSYMTAB - pub cmd: U32, - /// sizeof(struct DysymtabCommand) - pub cmdsize: U32, - - /* - * The symbols indicated by symoff and nsyms of the LC_SYMTAB load command - * are grouped into the following three groups: - * local symbols (further grouped by the module they are from) - * defined external symbols (further grouped by the module they are from) - * undefined symbols - * - * The local symbols are used only for debugging. The dynamic binding - * process may have to use them to indicate to the debugger the local - * symbols for a module that is being bound. - * - * The last two groups are used by the dynamic binding process to do the - * binding (indirectly through the module table and the reference symbol - * table when this is a dynamically linked shared library file). - */ - /// index to local symbols - pub ilocalsym: U32, - /// number of local symbols - pub nlocalsym: U32, - - /// index to externally defined symbols - pub iextdefsym: U32, - /// number of externally defined symbols - pub nextdefsym: U32, - - /// index to undefined symbols - pub iundefsym: U32, - /// number of undefined symbols - pub nundefsym: U32, - - /* - * For the for the dynamic binding process to find which module a symbol - * is defined in the table of contents is used (analogous to the ranlib - * structure in an archive) which maps defined external symbols to modules - * they are defined in. This exists only in a dynamically linked shared - * library file. For executable and object modules the defined external - * symbols are sorted by name and is use as the table of contents. - */ - /// file offset to table of contents - pub tocoff: U32, - /// number of entries in table of contents - pub ntoc: U32, - - /* - * To support dynamic binding of "modules" (whole object files) the symbol - * table must reflect the modules that the file was created from. This is - * done by having a module table that has indexes and counts into the merged - * tables for each module. The module structure that these two entries - * refer to is described below. This exists only in a dynamically linked - * shared library file. For executable and object modules the file only - * contains one module so everything in the file belongs to the module. - */ - /// file offset to module table - pub modtaboff: U32, - /// number of module table entries - pub nmodtab: U32, - - /* - * To support dynamic module binding the module structure for each module - * indicates the external references (defined and undefined) each module - * makes. For each module there is an offset and a count into the - * reference symbol table for the symbols that the module references. - * This exists only in a dynamically linked shared library file. For - * executable and object modules the defined external symbols and the - * undefined external symbols indicates the external references. - */ - /// offset to referenced symbol table - pub extrefsymoff: U32, - /// number of referenced symbol table entries - pub nextrefsyms: U32, - - /* - * The sections that contain "symbol pointers" and "routine stubs" have - * indexes and (implied counts based on the size of the section and fixed - * size of the entry) into the "indirect symbol" table for each pointer - * and stub. For every section of these two types the index into the - * indirect symbol table is stored in the section header in the field - * reserved1. An indirect symbol table entry is simply a 32bit index into - * the symbol table to the symbol that the pointer or stub is referring to. - * The indirect symbol table is ordered to match the entries in the section. - */ - /// file offset to the indirect symbol table - pub indirectsymoff: U32, - /// number of indirect symbol table entries - pub nindirectsyms: U32, - - /* - * To support relocating an individual module in a library file quickly the - * external relocation entries for each module in the library need to be - * accessed efficiently. Since the relocation entries can't be accessed - * through the section headers for a library file they are separated into - * groups of local and external entries further grouped by module. In this - * case the presents of this load command who's extreloff, nextrel, - * locreloff and nlocrel fields are non-zero indicates that the relocation - * entries of non-merged sections are not referenced through the section - * structures (and the reloff and nreloc fields in the section headers are - * set to zero). - * - * Since the relocation entries are not accessed through the section headers - * this requires the r_address field to be something other than a section - * offset to identify the item to be relocated. In this case r_address is - * set to the offset from the vmaddr of the first LC_SEGMENT command. - * For MH_SPLIT_SEGS images r_address is set to the the offset from the - * vmaddr of the first read-write LC_SEGMENT command. - * - * The relocation entries are grouped by module and the module table - * entries have indexes and counts into them for the group of external - * relocation entries for that the module. - * - * For sections that are merged across modules there must not be any - * remaining external relocation entries for them (for merged sections - * remaining relocation entries must be local). - */ - /// offset to external relocation entries - pub extreloff: U32, - /// number of external relocation entries - pub nextrel: U32, - - /* - * All the local relocation entries are grouped together (they are not - * grouped by their module since they are only used if the object is moved - * from it statically link edited address). - */ - /// offset to local relocation entries - pub locreloff: U32, - /// number of local relocation entries - pub nlocrel: U32, -} - -/* - * An indirect symbol table entry is simply a 32bit index into the symbol table - * to the symbol that the pointer or stub is referring to. Unless it is for a - * non-lazy symbol pointer section for a defined symbol which strip(1) as - * removed. In which case it has the value INDIRECT_SYMBOL_LOCAL. If the - * symbol was also absolute INDIRECT_SYMBOL_ABS is or'ed with that. - */ -pub const INDIRECT_SYMBOL_LOCAL: u32 = 0x8000_0000; -pub const INDIRECT_SYMBOL_ABS: u32 = 0x4000_0000; - -/* a table of contents entry */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DylibTableOfContents { - /// the defined external symbol (index into the symbol table) - pub symbol_index: U32, - /// index into the module table this symbol is defined in - pub module_index: U32, -} - -/* a module table entry */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DylibModule32 { - /// the module name (index into string table) - pub module_name: U32, - - /// index into externally defined symbols - pub iextdefsym: U32, - /// number of externally defined symbols - pub nextdefsym: U32, - /// index into reference symbol table - pub irefsym: U32, - /// number of reference symbol table entries - pub nrefsym: U32, - /// index into symbols for local symbols - pub ilocalsym: U32, - /// number of local symbols - pub nlocalsym: U32, - - /// index into external relocation entries - pub iextrel: U32, - /// number of external relocation entries - pub nextrel: U32, - - /// low 16 bits are the index into the init section, high 16 bits are the index into the term section - pub iinit_iterm: U32, - /// low 16 bits are the number of init section entries, high 16 bits are the number of term section entries - pub ninit_nterm: U32, - - /// for this module address of the start of the (__OBJC,__module_info) section - pub objc_module_info_addr: U32, - /// for this module size of the (__OBJC,__module_info) section - pub objc_module_info_size: U32, -} - -/* a 64-bit module table entry */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DylibModule64 { - /// the module name (index into string table) - pub module_name: U32, - - /// index into externally defined symbols - pub iextdefsym: U32, - /// number of externally defined symbols - pub nextdefsym: U32, - /// index into reference symbol table - pub irefsym: U32, - /// number of reference symbol table entries - pub nrefsym: U32, - /// index into symbols for local symbols - pub ilocalsym: U32, - /// number of local symbols - pub nlocalsym: U32, - - /// index into external relocation entries - pub iextrel: U32, - /// number of external relocation entries - pub nextrel: U32, - - /// low 16 bits are the index into the init section, high 16 bits are the index into the term section - pub iinit_iterm: U32, - /// low 16 bits are the number of init section entries, high 16 bits are the number of term section entries - pub ninit_nterm: U32, - - /// for this module size of the (__OBJC,__module_info) section - pub objc_module_info_size: U32, - /// for this module address of the start of the (__OBJC,__module_info) section - pub objc_module_info_addr: U64, -} - -/* - * The entries in the reference symbol table are used when loading the module - * (both by the static and dynamic link editors) and if the module is unloaded - * or replaced. Therefore all external symbols (defined and undefined) are - * listed in the module's reference table. The flags describe the type of - * reference that is being made. The constants for the flags are defined in - * as they are also used for symbol table entries. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DylibReference { - /* TODO: - uint32_t isym:24, /* index into the symbol table */ - flags:8; /* flags to indicate the type of reference */ - */ - pub bitfield: U32, -} - -/* - * The TwolevelHintsCommand contains the offset and number of hints in the - * two-level namespace lookup hints table. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct TwolevelHintsCommand { - /// LC_TWOLEVEL_HINTS - pub cmd: U32, - /// sizeof(struct TwolevelHintsCommand) - pub cmdsize: U32, - /// offset to the hint table - pub offset: U32, - /// number of hints in the hint table - pub nhints: U32, -} - -/* - * The entries in the two-level namespace lookup hints table are TwolevelHint - * structs. These provide hints to the dynamic link editor where to start - * looking for an undefined symbol in a two-level namespace image. The - * isub_image field is an index into the sub-images (sub-frameworks and - * sub-umbrellas list) that made up the two-level image that the undefined - * symbol was found in when it was built by the static link editor. If - * isub-image is 0 the the symbol is expected to be defined in library and not - * in the sub-images. If isub-image is non-zero it is an index into the array - * of sub-images for the umbrella with the first index in the sub-images being - * 1. The array of sub-images is the ordered list of sub-images of the umbrella - * that would be searched for a symbol that has the umbrella recorded as its - * primary library. The table of contents index is an index into the - * library's table of contents. This is used as the starting point of the - * binary search or a directed linear search. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct TwolevelHint { - /* TODO: - uint32_t - isub_image:8, /* index into the sub images */ - itoc:24; /* index into the table of contents */ - */ - pub bitfield: U32, -} - -/* - * The PrebindCksumCommand contains the value of the original check sum for - * prebound files or zero. When a prebound file is first created or modified - * for other than updating its prebinding information the value of the check sum - * is set to zero. When the file has it prebinding re-done and if the value of - * the check sum is zero the original check sum is calculated and stored in - * cksum field of this load command in the output file. If when the prebinding - * is re-done and the cksum field is non-zero it is left unchanged from the - * input file. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct PrebindCksumCommand { - /// LC_PREBIND_CKSUM - pub cmd: U32, - /// sizeof(struct PrebindCksumCommand) - pub cmdsize: U32, - /// the check sum or zero - pub cksum: U32, -} - -/* - * The uuid load command contains a single 128-bit unique random number that - * identifies an object produced by the static link editor. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct UuidCommand { - /// LC_UUID - pub cmd: U32, - /// sizeof(struct UuidCommand) - pub cmdsize: U32, - /// the 128-bit uuid - pub uuid: [u8; 16], -} - -/* - * The RpathCommand contains a path which at runtime should be added to - * the current run path used to find @rpath prefixed dylibs. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct RpathCommand { - /// LC_RPATH - pub cmd: U32, - /// includes string - pub cmdsize: U32, - /// path to add to run path - pub path: LcStr, -} - -/* - * The LinkeditDataCommand contains the offsets and sizes of a blob - * of data in the __LINKEDIT segment. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct LinkeditDataCommand { - /// `LC_CODE_SIGNATURE`, `LC_SEGMENT_SPLIT_INFO`, `LC_FUNCTION_STARTS`, - /// `LC_DATA_IN_CODE`, `LC_DYLIB_CODE_SIGN_DRS`, `LC_LINKER_OPTIMIZATION_HINT`, - /// `LC_DYLD_EXPORTS_TRIE`, or `LC_DYLD_CHAINED_FIXUPS`. - pub cmd: U32, - /// sizeof(struct LinkeditDataCommand) - pub cmdsize: U32, - /// file offset of data in __LINKEDIT segment - pub dataoff: U32, - /// file size of data in __LINKEDIT segment - pub datasize: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FilesetEntryCommand { - // LC_FILESET_ENTRY - pub cmd: U32, - /// includes id string - pub cmdsize: U32, - /// memory address of the dylib - pub vmaddr: U64, - /// file offset of the dylib - pub fileoff: U64, - /// contained entry id - pub entry_id: LcStr, - /// entry_id is 32-bits long, so this is the reserved padding - pub reserved: U32, -} - -/* - * The EncryptionInfoCommand32 contains the file offset and size of an - * of an encrypted segment. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct EncryptionInfoCommand32 { - /// LC_ENCRYPTION_INFO - pub cmd: U32, - /// sizeof(struct EncryptionInfoCommand32) - pub cmdsize: U32, - /// file offset of encrypted range - pub cryptoff: U32, - /// file size of encrypted range - pub cryptsize: U32, - /// which enryption system, 0 means not-encrypted yet - pub cryptid: U32, -} - -/* - * The EncryptionInfoCommand64 contains the file offset and size of an - * of an encrypted segment (for use in x86_64 targets). - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct EncryptionInfoCommand64 { - /// LC_ENCRYPTION_INFO_64 - pub cmd: U32, - /// sizeof(struct EncryptionInfoCommand64) - pub cmdsize: U32, - /// file offset of encrypted range - pub cryptoff: U32, - /// file size of encrypted range - pub cryptsize: U32, - /// which enryption system, 0 means not-encrypted yet - pub cryptid: U32, - /// padding to make this struct's size a multiple of 8 bytes - pub pad: U32, -} - -/* - * The VersionMinCommand contains the min OS version on which this - * binary was built to run. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct VersionMinCommand { - /// LC_VERSION_MIN_MACOSX or LC_VERSION_MIN_IPHONEOS or LC_VERSION_MIN_WATCHOS or LC_VERSION_MIN_TVOS - pub cmd: U32, - /// sizeof(struct VersionMinCommand) - pub cmdsize: U32, - /// X.Y.Z is encoded in nibbles xxxx.yy.zz - pub version: U32, - /// X.Y.Z is encoded in nibbles xxxx.yy.zz - pub sdk: U32, -} - -/* - * The BuildVersionCommand contains the min OS version on which this - * binary was built to run for its platform. The list of known platforms and - * tool values following it. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct BuildVersionCommand { - /// LC_BUILD_VERSION - pub cmd: U32, - /// sizeof(struct BuildVersionCommand) plus ntools * sizeof(struct BuildToolVersion) - pub cmdsize: U32, - /// platform - pub platform: U32, - /// X.Y.Z is encoded in nibbles xxxx.yy.zz - pub minos: U32, - /// X.Y.Z is encoded in nibbles xxxx.yy.zz - pub sdk: U32, - /// number of tool entries following this - pub ntools: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct BuildToolVersion { - /// enum for the tool - pub tool: U32, - /// version number of the tool - pub version: U32, -} - -/* Known values for the platform field above. */ -pub const PLATFORM_MACOS: u32 = 1; -pub const PLATFORM_IOS: u32 = 2; -pub const PLATFORM_TVOS: u32 = 3; -pub const PLATFORM_WATCHOS: u32 = 4; -pub const PLATFORM_BRIDGEOS: u32 = 5; -pub const PLATFORM_MACCATALYST: u32 = 6; -pub const PLATFORM_IOSSIMULATOR: u32 = 7; -pub const PLATFORM_TVOSSIMULATOR: u32 = 8; -pub const PLATFORM_WATCHOSSIMULATOR: u32 = 9; -pub const PLATFORM_DRIVERKIT: u32 = 10; - -/* Known values for the tool field above. */ -pub const TOOL_CLANG: u32 = 1; -pub const TOOL_SWIFT: u32 = 2; -pub const TOOL_LD: u32 = 3; - -/* - * The DyldInfoCommand contains the file offsets and sizes of - * the new compressed form of the information dyld needs to - * load the image. This information is used by dyld on Mac OS X - * 10.6 and later. All information pointed to by this command - * is encoded using byte streams, so no endian swapping is needed - * to interpret it. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DyldInfoCommand { - /// LC_DYLD_INFO or LC_DYLD_INFO_ONLY - pub cmd: U32, - /// sizeof(struct DyldInfoCommand) - pub cmdsize: U32, - - /* - * Dyld rebases an image whenever dyld loads it at an address different - * from its preferred address. The rebase information is a stream - * of byte sized opcodes whose symbolic names start with REBASE_OPCODE_. - * Conceptually the rebase information is a table of tuples: - * - * The opcodes are a compressed way to encode the table by only - * encoding when a column changes. In addition simple patterns - * like "every n'th offset for m times" can be encoded in a few - * bytes. - */ - /// file offset to rebase info - pub rebase_off: U32, - /// size of rebase info - pub rebase_size: U32, - - /* - * Dyld binds an image during the loading process, if the image - * requires any pointers to be initialized to symbols in other images. - * The bind information is a stream of byte sized - * opcodes whose symbolic names start with BIND_OPCODE_. - * Conceptually the bind information is a table of tuples: - * - * The opcodes are a compressed way to encode the table by only - * encoding when a column changes. In addition simple patterns - * like for runs of pointers initialized to the same value can be - * encoded in a few bytes. - */ - /// file offset to binding info - pub bind_off: U32, - /// size of binding info - pub bind_size: U32, - - /* - * Some C++ programs require dyld to unique symbols so that all - * images in the process use the same copy of some code/data. - * This step is done after binding. The content of the weak_bind - * info is an opcode stream like the bind_info. But it is sorted - * alphabetically by symbol name. This enable dyld to walk - * all images with weak binding information in order and look - * for collisions. If there are no collisions, dyld does - * no updating. That means that some fixups are also encoded - * in the bind_info. For instance, all calls to "operator new" - * are first bound to libstdc++.dylib using the information - * in bind_info. Then if some image overrides operator new - * that is detected when the weak_bind information is processed - * and the call to operator new is then rebound. - */ - /// file offset to weak binding info - pub weak_bind_off: U32, - /// size of weak binding info - pub weak_bind_size: U32, - - /* - * Some uses of external symbols do not need to be bound immediately. - * Instead they can be lazily bound on first use. The lazy_bind - * are contains a stream of BIND opcodes to bind all lazy symbols. - * Normal use is that dyld ignores the lazy_bind section when - * loading an image. Instead the static linker arranged for the - * lazy pointer to initially point to a helper function which - * pushes the offset into the lazy_bind area for the symbol - * needing to be bound, then jumps to dyld which simply adds - * the offset to lazy_bind_off to get the information on what - * to bind. - */ - /// file offset to lazy binding info - pub lazy_bind_off: U32, - /// size of lazy binding infs - pub lazy_bind_size: U32, - - /* - * The symbols exported by a dylib are encoded in a trie. This - * is a compact representation that factors out common prefixes. - * It also reduces LINKEDIT pages in RAM because it encodes all - * information (name, address, flags) in one small, contiguous range. - * The export area is a stream of nodes. The first node sequentially - * is the start node for the trie. - * - * Nodes for a symbol start with a uleb128 that is the length of - * the exported symbol information for the string so far. - * If there is no exported symbol, the node starts with a zero byte. - * If there is exported info, it follows the length. - * - * First is a uleb128 containing flags. Normally, it is followed by - * a uleb128 encoded offset which is location of the content named - * by the symbol from the mach_header for the image. If the flags - * is EXPORT_SYMBOL_FLAGS_REEXPORT, then following the flags is - * a uleb128 encoded library ordinal, then a zero terminated - * UTF8 string. If the string is zero length, then the symbol - * is re-export from the specified dylib with the same name. - * If the flags is EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER, then following - * the flags is two uleb128s: the stub offset and the resolver offset. - * The stub is used by non-lazy pointers. The resolver is used - * by lazy pointers and must be called to get the actual address to use. - * - * After the optional exported symbol information is a byte of - * how many edges (0-255) that this node has leaving it, - * followed by each edge. - * Each edge is a zero terminated UTF8 of the addition chars - * in the symbol, followed by a uleb128 offset for the node that - * edge points to. - * - */ - /// file offset to lazy binding info - pub export_off: U32, - /// size of lazy binding infs - pub export_size: U32, -} - -/* - * The following are used to encode rebasing information - */ -pub const REBASE_TYPE_POINTER: u8 = 1; -pub const REBASE_TYPE_TEXT_ABSOLUTE32: u8 = 2; -pub const REBASE_TYPE_TEXT_PCREL32: u8 = 3; - -pub const REBASE_OPCODE_MASK: u8 = 0xF0; -pub const REBASE_IMMEDIATE_MASK: u8 = 0x0F; -pub const REBASE_OPCODE_DONE: u8 = 0x00; -pub const REBASE_OPCODE_SET_TYPE_IMM: u8 = 0x10; -pub const REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: u8 = 0x20; -pub const REBASE_OPCODE_ADD_ADDR_ULEB: u8 = 0x30; -pub const REBASE_OPCODE_ADD_ADDR_IMM_SCALED: u8 = 0x40; -pub const REBASE_OPCODE_DO_REBASE_IMM_TIMES: u8 = 0x50; -pub const REBASE_OPCODE_DO_REBASE_ULEB_TIMES: u8 = 0x60; -pub const REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB: u8 = 0x70; -pub const REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB: u8 = 0x80; - -/* - * The following are used to encode binding information - */ -pub const BIND_TYPE_POINTER: u8 = 1; -pub const BIND_TYPE_TEXT_ABSOLUTE32: u8 = 2; -pub const BIND_TYPE_TEXT_PCREL32: u8 = 3; - -pub const BIND_SPECIAL_DYLIB_SELF: i8 = 0; -pub const BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE: i8 = -1; -pub const BIND_SPECIAL_DYLIB_FLAT_LOOKUP: i8 = -2; -pub const BIND_SPECIAL_DYLIB_WEAK_LOOKUP: i8 = -3; - -pub const BIND_SYMBOL_FLAGS_WEAK_IMPORT: u8 = 0x1; -pub const BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION: u8 = 0x8; - -pub const BIND_OPCODE_MASK: u8 = 0xF0; -pub const BIND_IMMEDIATE_MASK: u8 = 0x0F; -pub const BIND_OPCODE_DONE: u8 = 0x00; -pub const BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: u8 = 0x10; -pub const BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: u8 = 0x20; -pub const BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: u8 = 0x30; -pub const BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: u8 = 0x40; -pub const BIND_OPCODE_SET_TYPE_IMM: u8 = 0x50; -pub const BIND_OPCODE_SET_ADDEND_SLEB: u8 = 0x60; -pub const BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: u8 = 0x70; -pub const BIND_OPCODE_ADD_ADDR_ULEB: u8 = 0x80; -pub const BIND_OPCODE_DO_BIND: u8 = 0x90; -pub const BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: u8 = 0xA0; -pub const BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: u8 = 0xB0; -pub const BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: u8 = 0xC0; -pub const BIND_OPCODE_THREADED: u8 = 0xD0; -pub const BIND_SUBOPCODE_THREADED_SET_BIND_ORDINAL_TABLE_SIZE_ULEB: u8 = 0x00; -pub const BIND_SUBOPCODE_THREADED_APPLY: u8 = 0x01; - -/* - * The following are used on the flags byte of a terminal node - * in the export information. - */ -pub const EXPORT_SYMBOL_FLAGS_KIND_MASK: u32 = 0x03; -pub const EXPORT_SYMBOL_FLAGS_KIND_REGULAR: u32 = 0x00; -pub const EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL: u32 = 0x01; -pub const EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE: u32 = 0x02; -pub const EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION: u32 = 0x04; -pub const EXPORT_SYMBOL_FLAGS_REEXPORT: u32 = 0x08; -pub const EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER: u32 = 0x10; - -/* - * The LinkerOptionCommand contains linker options embedded in object files. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct LinkerOptionCommand { - /// LC_LINKER_OPTION only used in MH_OBJECT filetypes - pub cmd: U32, - pub cmdsize: U32, - /// number of strings - pub count: U32, - /* concatenation of zero terminated UTF8 strings. - Zero filled at end to align */ -} - -/* - * The SymsegCommand contains the offset and size of the GNU style - * symbol table information as described in the header file . - * The symbol roots of the symbol segments must also be aligned properly - * in the file. So the requirement of keeping the offsets aligned to a - * multiple of a 4 bytes translates to the length field of the symbol - * roots also being a multiple of a long. Also the padding must again be - * zeroed. (THIS IS OBSOLETE and no longer supported). - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SymsegCommand { - /// LC_SYMSEG - pub cmd: U32, - /// sizeof(struct SymsegCommand) - pub cmdsize: U32, - /// symbol segment offset - pub offset: U32, - /// symbol segment size in bytes - pub size: U32, -} - -/* - * The IdentCommand contains a free format string table following the - * IdentCommand structure. The strings are null terminated and the size of - * the command is padded out with zero bytes to a multiple of 4 bytes/ - * (THIS IS OBSOLETE and no longer supported). - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct IdentCommand { - /// LC_IDENT - pub cmd: U32, - /// strings that follow this command - pub cmdsize: U32, -} - -/* - * The FvmfileCommand contains a reference to a file to be loaded at the - * specified virtual address. (Presently, this command is reserved for - * internal use. The kernel ignores this command when loading a program into - * memory). - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FvmfileCommand { - /// LC_FVMFILE - pub cmd: U32, - /// includes pathname string - pub cmdsize: U32, - /// files pathname - pub name: LcStr, - /// files virtual address - pub header_addr: U32, -} - -/* - * The EntryPointCommand is a replacement for thread_command. - * It is used for main executables to specify the location (file offset) - * of main(). If -stack_size was used at link time, the stacksize - * field will contain the stack size need for the main thread. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct EntryPointCommand { - /// LC_MAIN only used in MH_EXECUTE filetypes - pub cmd: U32, - /// 24 - pub cmdsize: U32, - /// file (__TEXT) offset of main() - pub entryoff: U64, - /// if not zero, initial stack size - pub stacksize: U64, -} - -/* - * The SourceVersionCommand is an optional load command containing - * the version of the sources used to build the binary. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SourceVersionCommand { - /// LC_SOURCE_VERSION - pub cmd: U32, - /// 16 - pub cmdsize: U32, - /// A.B.C.D.E packed as a24.b10.c10.d10.e10 - pub version: U64, -} - -/* - * The LC_DATA_IN_CODE load commands uses a LinkeditDataCommand - * to point to an array of DataInCodeEntry entries. Each entry - * describes a range of data in a code section. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DataInCodeEntry { - /// from mach_header to start of data range - pub offset: U32, - /// number of bytes in data range - pub length: U16, - /// a DICE_KIND_* value - pub kind: U16, -} -pub const DICE_KIND_DATA: u32 = 0x0001; -pub const DICE_KIND_JUMP_TABLE8: u32 = 0x0002; -pub const DICE_KIND_JUMP_TABLE16: u32 = 0x0003; -pub const DICE_KIND_JUMP_TABLE32: u32 = 0x0004; -pub const DICE_KIND_ABS_JUMP_TABLE32: u32 = 0x0005; - -/* - * Sections of type S_THREAD_LOCAL_VARIABLES contain an array - * of TlvDescriptor structures. - */ -/* TODO: -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct TlvDescriptor -{ - void* (*thunk)(struct TlvDescriptor*); - unsigned long key; - unsigned long offset; -} -*/ - -/* - * LC_NOTE commands describe a region of arbitrary data included in a Mach-O - * file. Its initial use is to record extra data in MH_CORE files. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct NoteCommand { - /// LC_NOTE - pub cmd: U32, - /// sizeof(struct NoteCommand) - pub cmdsize: U32, - /// owner name for this LC_NOTE - pub data_owner: [u8; 16], - /// file offset of this data - pub offset: U64, - /// length of data region - pub size: U64, -} - -// Definitions from "/usr/include/mach-o/nlist.h". - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Nlist32 { - /// index into the string table - pub n_strx: U32, - /// type flag, see below - pub n_type: u8, - /// section number or NO_SECT - pub n_sect: u8, - /// see - pub n_desc: U16, - /// value of this symbol (or stab offset) - pub n_value: U32, -} - -/* - * This is the symbol table entry structure for 64-bit architectures. - */ -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Nlist64 { - /// index into the string table - pub n_strx: U32, - /// type flag, see below - pub n_type: u8, - /// section number or NO_SECT - pub n_sect: u8, - /// see - pub n_desc: U16, - /// value of this symbol (or stab offset) - // Note: 4 byte alignment has been observed in practice. - pub n_value: U64Bytes, -} - -/* - * Symbols with a index into the string table of zero (n_un.n_strx == 0) are - * defined to have a null, "", name. Therefore all string indexes to non null - * names must not have a zero string index. This is bit historical information - * that has never been well documented. - */ - -/* - * The n_type field really contains four fields: - * unsigned char N_STAB:3, - * N_PEXT:1, - * N_TYPE:3, - * N_EXT:1; - * which are used via the following masks. - */ -/// if any of these bits set, a symbolic debugging entry -pub const N_STAB: u8 = 0xe0; -/// private external symbol bit -pub const N_PEXT: u8 = 0x10; -/// mask for the type bits -pub const N_TYPE: u8 = 0x0e; -/// external symbol bit, set for external symbols -pub const N_EXT: u8 = 0x01; - -/* - * Only symbolic debugging entries have some of the N_STAB bits set and if any - * of these bits are set then it is a symbolic debugging entry (a stab). In - * which case then the values of the n_type field (the entire field) are given - * in - */ - -/* - * Values for N_TYPE bits of the n_type field. - */ -/// undefined, n_sect == NO_SECT -pub const N_UNDF: u8 = 0x0; -/// absolute, n_sect == NO_SECT -pub const N_ABS: u8 = 0x2; -/// defined in section number n_sect -pub const N_SECT: u8 = 0xe; -/// prebound undefined (defined in a dylib) -pub const N_PBUD: u8 = 0xc; -/// indirect -pub const N_INDR: u8 = 0xa; - -/* - * If the type is N_INDR then the symbol is defined to be the same as another - * symbol. In this case the n_value field is an index into the string table - * of the other symbol's name. When the other symbol is defined then they both - * take on the defined type and value. - */ - -/* - * If the type is N_SECT then the n_sect field contains an ordinal of the - * section the symbol is defined in. The sections are numbered from 1 and - * refer to sections in order they appear in the load commands for the file - * they are in. This means the same ordinal may very well refer to different - * sections in different files. - * - * The n_value field for all symbol table entries (including N_STAB's) gets - * updated by the link editor based on the value of it's n_sect field and where - * the section n_sect references gets relocated. If the value of the n_sect - * field is NO_SECT then it's n_value field is not changed by the link editor. - */ -/// symbol is not in any section -pub const NO_SECT: u8 = 0; -/// 1 thru 255 inclusive -pub const MAX_SECT: u8 = 255; - -/* - * Common symbols are represented by undefined (N_UNDF) external (N_EXT) types - * who's values (n_value) are non-zero. In which case the value of the n_value - * field is the size (in bytes) of the common symbol. The n_sect field is set - * to NO_SECT. The alignment of a common symbol may be set as a power of 2 - * between 2^1 and 2^15 as part of the n_desc field using the macros below. If - * the alignment is not set (a value of zero) then natural alignment based on - * the size is used. - */ -/* TODO: -#define GET_COMM_ALIGN(n_desc) (((n_desc) >> 8) & 0x0f) -#define SET_COMM_ALIGN(n_desc,align) \ - (n_desc) = (((n_desc) & 0xf0ff) | (((align) & 0x0f) << 8)) - */ - -/* - * To support the lazy binding of undefined symbols in the dynamic link-editor, - * the undefined symbols in the symbol table (the nlist structures) are marked - * with the indication if the undefined reference is a lazy reference or - * non-lazy reference. If both a non-lazy reference and a lazy reference is - * made to the same symbol the non-lazy reference takes precedence. A reference - * is lazy only when all references to that symbol are made through a symbol - * pointer in a lazy symbol pointer section. - * - * The implementation of marking nlist structures in the symbol table for - * undefined symbols will be to use some of the bits of the n_desc field as a - * reference type. The mask REFERENCE_TYPE will be applied to the n_desc field - * of an nlist structure for an undefined symbol to determine the type of - * undefined reference (lazy or non-lazy). - * - * The constants for the REFERENCE FLAGS are propagated to the reference table - * in a shared library file. In that case the constant for a defined symbol, - * REFERENCE_FLAG_DEFINED, is also used. - */ -/* Reference type bits of the n_desc field of undefined symbols */ -pub const REFERENCE_TYPE: u16 = 0x7; -/* types of references */ -pub const REFERENCE_FLAG_UNDEFINED_NON_LAZY: u16 = 0; -pub const REFERENCE_FLAG_UNDEFINED_LAZY: u16 = 1; -pub const REFERENCE_FLAG_DEFINED: u16 = 2; -pub const REFERENCE_FLAG_PRIVATE_DEFINED: u16 = 3; -pub const REFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZY: u16 = 4; -pub const REFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY: u16 = 5; - -/* - * To simplify stripping of objects that use are used with the dynamic link - * editor, the static link editor marks the symbols defined an object that are - * referenced by a dynamically bound object (dynamic shared libraries, bundles). - * With this marking strip knows not to strip these symbols. - */ -pub const REFERENCED_DYNAMICALLY: u16 = 0x0010; - -/* - * For images created by the static link editor with the -twolevel_namespace - * option in effect the flags field of the mach header is marked with - * MH_TWOLEVEL. And the binding of the undefined references of the image are - * determined by the static link editor. Which library an undefined symbol is - * bound to is recorded by the static linker in the high 8 bits of the n_desc - * field using the SET_LIBRARY_ORDINAL macro below. The ordinal recorded - * references the libraries listed in the Mach-O's LC_LOAD_DYLIB, - * LC_LOAD_WEAK_DYLIB, LC_REEXPORT_DYLIB, LC_LOAD_UPWARD_DYLIB, and - * LC_LAZY_LOAD_DYLIB, etc. load commands in the order they appear in the - * headers. The library ordinals start from 1. - * For a dynamic library that is built as a two-level namespace image the - * undefined references from module defined in another use the same nlist struct - * an in that case SELF_LIBRARY_ORDINAL is used as the library ordinal. For - * defined symbols in all images they also must have the library ordinal set to - * SELF_LIBRARY_ORDINAL. The EXECUTABLE_ORDINAL refers to the executable - * image for references from plugins that refer to the executable that loads - * them. - * - * The DYNAMIC_LOOKUP_ORDINAL is for undefined symbols in a two-level namespace - * image that are looked up by the dynamic linker with flat namespace semantics. - * This ordinal was added as a feature in Mac OS X 10.3 by reducing the - * value of MAX_LIBRARY_ORDINAL by one. So it is legal for existing binaries - * or binaries built with older tools to have 0xfe (254) dynamic libraries. In - * this case the ordinal value 0xfe (254) must be treated as a library ordinal - * for compatibility. - */ -/* TODO: -#define GET_LIBRARY_ORDINAL(n_desc) (((n_desc) >> 8) & 0xff) -#define SET_LIBRARY_ORDINAL(n_desc,ordinal) \ - (n_desc) = (((n_desc) & 0x00ff) | (((ordinal) & 0xff) << 8)) - */ -pub const SELF_LIBRARY_ORDINAL: u8 = 0x0; -pub const MAX_LIBRARY_ORDINAL: u8 = 0xfd; -pub const DYNAMIC_LOOKUP_ORDINAL: u8 = 0xfe; -pub const EXECUTABLE_ORDINAL: u8 = 0xff; - -/* - * The bit 0x0020 of the n_desc field is used for two non-overlapping purposes - * and has two different symbolic names, N_NO_DEAD_STRIP and N_DESC_DISCARDED. - */ - -/* - * The N_NO_DEAD_STRIP bit of the n_desc field only ever appears in a - * relocatable .o file (MH_OBJECT filetype). And is used to indicate to the - * static link editor it is never to dead strip the symbol. - */ -/// symbol is not to be dead stripped -pub const N_NO_DEAD_STRIP: u16 = 0x0020; - -/* - * The N_DESC_DISCARDED bit of the n_desc field never appears in linked image. - * But is used in very rare cases by the dynamic link editor to mark an in - * memory symbol as discared and longer used for linking. - */ -/// symbol is discarded -pub const N_DESC_DISCARDED: u16 = 0x0020; - -/* - * The N_WEAK_REF bit of the n_desc field indicates to the dynamic linker that - * the undefined symbol is allowed to be missing and is to have the address of - * zero when missing. - */ -/// symbol is weak referenced -pub const N_WEAK_REF: u16 = 0x0040; - -/* - * The N_WEAK_DEF bit of the n_desc field indicates to the static and dynamic - * linkers that the symbol definition is weak, allowing a non-weak symbol to - * also be used which causes the weak definition to be discared. Currently this - * is only supported for symbols in coalesced sections. - */ -/// coalesced symbol is a weak definition -pub const N_WEAK_DEF: u16 = 0x0080; - -/* - * The N_REF_TO_WEAK bit of the n_desc field indicates to the dynamic linker - * that the undefined symbol should be resolved using flat namespace searching. - */ -/// reference to a weak symbol -pub const N_REF_TO_WEAK: u16 = 0x0080; - -/* - * The N_ARM_THUMB_DEF bit of the n_desc field indicates that the symbol is - * a definition of a Thumb function. - */ -/// symbol is a Thumb function (ARM) -pub const N_ARM_THUMB_DEF: u16 = 0x0008; - -/* - * The N_SYMBOL_RESOLVER bit of the n_desc field indicates that the - * that the function is actually a resolver function and should - * be called to get the address of the real function to use. - * This bit is only available in .o files (MH_OBJECT filetype) - */ -pub const N_SYMBOL_RESOLVER: u16 = 0x0100; - -/* - * The N_ALT_ENTRY bit of the n_desc field indicates that the - * symbol is pinned to the previous content. - */ -pub const N_ALT_ENTRY: u16 = 0x0200; - -// Definitions from "/usr/include/mach-o/stab.h". - -/* - * This file gives definitions supplementing for permanent symbol - * table entries of Mach-O files. Modified from the BSD definitions. The - * modifications from the original definitions were changing what the values of - * what was the n_other field (an unused field) which is now the n_sect field. - * These modifications are required to support symbols in an arbitrary number of - * sections not just the three sections (text, data and bss) in a BSD file. - * The values of the defined constants have NOT been changed. - * - * These must have one of the N_STAB bits on. The n_value fields are subject - * to relocation according to the value of their n_sect field. So for types - * that refer to things in sections the n_sect field must be filled in with the - * proper section ordinal. For types that are not to have their n_value field - * relocatated the n_sect field must be NO_SECT. - */ - -/* - * Symbolic debugger symbols. The comments give the conventional use for - * - * .stabs "n_name", n_type, n_sect, n_desc, n_value - * - * where n_type is the defined constant and not listed in the comment. Other - * fields not listed are zero. n_sect is the section ordinal the entry is - * referring to. - */ -/// global symbol: name,,NO_SECT,type,0 -pub const N_GSYM: u8 = 0x20; -/// procedure name (f77 kludge): name,,NO_SECT,0,0 -pub const N_FNAME: u8 = 0x22; -/// procedure: name,,n_sect,linenumber,address -pub const N_FUN: u8 = 0x24; -/// static symbol: name,,n_sect,type,address -pub const N_STSYM: u8 = 0x26; -/// .lcomm symbol: name,,n_sect,type,address -pub const N_LCSYM: u8 = 0x28; -/// begin nsect sym: 0,,n_sect,0,address -pub const N_BNSYM: u8 = 0x2e; -/// AST file path: name,,NO_SECT,0,0 -pub const N_AST: u8 = 0x32; -/// emitted with gcc2_compiled and in gcc source -pub const N_OPT: u8 = 0x3c; -/// register sym: name,,NO_SECT,type,register -pub const N_RSYM: u8 = 0x40; -/// src line: 0,,n_sect,linenumber,address -pub const N_SLINE: u8 = 0x44; -/// end nsect sym: 0,,n_sect,0,address -pub const N_ENSYM: u8 = 0x4e; -/// structure elt: name,,NO_SECT,type,struct_offset -pub const N_SSYM: u8 = 0x60; -/// source file name: name,,n_sect,0,address -pub const N_SO: u8 = 0x64; -/// object file name: name,,0,0,st_mtime -pub const N_OSO: u8 = 0x66; -/// local sym: name,,NO_SECT,type,offset -pub const N_LSYM: u8 = 0x80; -/// include file beginning: name,,NO_SECT,0,sum -pub const N_BINCL: u8 = 0x82; -/// #included file name: name,,n_sect,0,address -pub const N_SOL: u8 = 0x84; -/// compiler parameters: name,,NO_SECT,0,0 -pub const N_PARAMS: u8 = 0x86; -/// compiler version: name,,NO_SECT,0,0 -pub const N_VERSION: u8 = 0x88; -/// compiler -O level: name,,NO_SECT,0,0 -pub const N_OLEVEL: u8 = 0x8A; -/// parameter: name,,NO_SECT,type,offset -pub const N_PSYM: u8 = 0xa0; -/// include file end: name,,NO_SECT,0,0 -pub const N_EINCL: u8 = 0xa2; -/// alternate entry: name,,n_sect,linenumber,address -pub const N_ENTRY: u8 = 0xa4; -/// left bracket: 0,,NO_SECT,nesting level,address -pub const N_LBRAC: u8 = 0xc0; -/// deleted include file: name,,NO_SECT,0,sum -pub const N_EXCL: u8 = 0xc2; -/// right bracket: 0,,NO_SECT,nesting level,address -pub const N_RBRAC: u8 = 0xe0; -/// begin common: name,,NO_SECT,0,0 -pub const N_BCOMM: u8 = 0xe2; -/// end common: name,,n_sect,0,0 -pub const N_ECOMM: u8 = 0xe4; -/// end common (local name): 0,,n_sect,0,address -pub const N_ECOML: u8 = 0xe8; -/// second stab entry with length information -pub const N_LENG: u8 = 0xfe; - -/* - * for the berkeley pascal compiler, pc(1): - */ -/// global pascal symbol: name,,NO_SECT,subtype,line -pub const N_PC: u8 = 0x30; - -// Definitions from "/usr/include/mach-o/reloc.h". - -/// A relocation entry. -/// -/// Mach-O relocations have plain and scattered variants, with the -/// meaning of the fields depending on the variant. -/// -/// This type provides functions for determining whether the relocation -/// is scattered, and for accessing the fields of each variant. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Relocation { - pub r_word0: U32, - pub r_word1: U32, -} - -impl Relocation { - /// Determine whether this is a scattered relocation. - #[inline] - pub fn r_scattered(self, endian: E, cputype: u32) -> bool { - if cputype == CPU_TYPE_X86_64 { - false - } else { - self.r_word0.get(endian) & R_SCATTERED != 0 - } - } - - /// Return the fields of a plain relocation. - pub fn info(self, endian: E) -> RelocationInfo { - let r_address = self.r_word0.get(endian); - let r_word1 = self.r_word1.get(endian); - if endian.is_little_endian() { - RelocationInfo { - r_address, - r_symbolnum: r_word1 & 0x00ff_ffff, - r_pcrel: ((r_word1 >> 24) & 0x1) != 0, - r_length: ((r_word1 >> 25) & 0x3) as u8, - r_extern: ((r_word1 >> 27) & 0x1) != 0, - r_type: (r_word1 >> 28) as u8, - } - } else { - RelocationInfo { - r_address, - r_symbolnum: r_word1 >> 8, - r_pcrel: ((r_word1 >> 7) & 0x1) != 0, - r_length: ((r_word1 >> 5) & 0x3) as u8, - r_extern: ((r_word1 >> 4) & 0x1) != 0, - r_type: (r_word1 & 0xf) as u8, - } - } - } - - /// Return the fields of a scattered relocation. - pub fn scattered_info(self, endian: E) -> ScatteredRelocationInfo { - let r_word0 = self.r_word0.get(endian); - let r_value = self.r_word1.get(endian); - ScatteredRelocationInfo { - r_address: r_word0 & 0x00ff_ffff, - r_type: ((r_word0 >> 24) & 0xf) as u8, - r_length: ((r_word0 >> 28) & 0x3) as u8, - r_pcrel: ((r_word0 >> 30) & 0x1) != 0, - r_value, - } - } -} - -/* - * Format of a relocation entry of a Mach-O file. Modified from the 4.3BSD - * format. The modifications from the original format were changing the value - * of the r_symbolnum field for "local" (r_extern == 0) relocation entries. - * This modification is required to support symbols in an arbitrary number of - * sections not just the three sections (text, data and bss) in a 4.3BSD file. - * Also the last 4 bits have had the r_type tag added to them. - */ - -#[derive(Debug, Clone, Copy)] -pub struct RelocationInfo { - /// offset in the section to what is being relocated - pub r_address: u32, - /// symbol index if r_extern == 1 or section ordinal if r_extern == 0 - pub r_symbolnum: u32, - /// was relocated pc relative already - pub r_pcrel: bool, - /// 0=byte, 1=word, 2=long, 3=quad - pub r_length: u8, - /// does not include value of sym referenced - pub r_extern: bool, - /// if not 0, machine specific relocation type - pub r_type: u8, -} - -impl RelocationInfo { - /// Combine the fields into a `Relocation`. - pub fn relocation(self, endian: E) -> Relocation { - let r_word0 = U32::new(endian, self.r_address); - let r_word1 = U32::new( - endian, - if endian.is_little_endian() { - self.r_symbolnum & 0x00ff_ffff - | u32::from(self.r_pcrel) << 24 - | u32::from(self.r_length & 0x3) << 25 - | u32::from(self.r_extern) << 27 - | u32::from(self.r_type) << 28 - } else { - self.r_symbolnum >> 8 - | u32::from(self.r_pcrel) << 7 - | u32::from(self.r_length & 0x3) << 5 - | u32::from(self.r_extern) << 4 - | u32::from(self.r_type) & 0xf - }, - ); - Relocation { r_word0, r_word1 } - } -} - -/// absolute relocation type for Mach-O files -pub const R_ABS: u8 = 0; - -/* - * The r_address is not really the address as it's name indicates but an offset. - * In 4.3BSD a.out objects this offset is from the start of the "segment" for - * which relocation entry is for (text or data). For Mach-O object files it is - * also an offset but from the start of the "section" for which the relocation - * entry is for. See comments in about the r_address feild - * in images for used with the dynamic linker. - * - * In 4.3BSD a.out objects if r_extern is zero then r_symbolnum is an ordinal - * for the segment the symbol being relocated is in. These ordinals are the - * symbol types N_TEXT, N_DATA, N_BSS or N_ABS. In Mach-O object files these - * ordinals refer to the sections in the object file in the order their section - * structures appear in the headers of the object file they are in. The first - * section has the ordinal 1, the second 2, and so on. This means that the - * same ordinal in two different object files could refer to two different - * sections. And further could have still different ordinals when combined - * by the link-editor. The value R_ABS is used for relocation entries for - * absolute symbols which need no further relocation. - */ - -/* - * For RISC machines some of the references are split across two instructions - * and the instruction does not contain the complete value of the reference. - * In these cases a second, or paired relocation entry, follows each of these - * relocation entries, using a PAIR r_type, which contains the other part of the - * reference not contained in the instruction. This other part is stored in the - * pair's r_address field. The exact number of bits of the other part of the - * reference store in the r_address field is dependent on the particular - * relocation type for the particular architecture. - */ - -/* - * To make scattered loading by the link editor work correctly "local" - * relocation entries can't be used when the item to be relocated is the value - * of a symbol plus an offset (where the resulting expression is outside the - * block the link editor is moving, a blocks are divided at symbol addresses). - * In this case. where the item is a symbol value plus offset, the link editor - * needs to know more than just the section the symbol was defined. What is - * needed is the actual value of the symbol without the offset so it can do the - * relocation correctly based on where the value of the symbol got relocated to - * not the value of the expression (with the offset added to the symbol value). - * So for the NeXT 2.0 release no "local" relocation entries are ever used when - * there is a non-zero offset added to a symbol. The "external" and "local" - * relocation entries remain unchanged. - * - * The implementation is quite messy given the compatibility with the existing - * relocation entry format. The ASSUMPTION is that a section will never be - * bigger than 2**24 - 1 (0x00ffffff or 16,777,215) bytes. This assumption - * allows the r_address (which is really an offset) to fit in 24 bits and high - * bit of the r_address field in the relocation_info structure to indicate - * it is really a scattered_relocation_info structure. Since these are only - * used in places where "local" relocation entries are used and not where - * "external" relocation entries are used the r_extern field has been removed. - * - * For scattered loading to work on a RISC machine where some of the references - * are split across two instructions the link editor needs to be assured that - * each reference has a unique 32 bit reference (that more than one reference is - * NOT sharing the same high 16 bits for example) so it move each referenced - * item independent of each other. Some compilers guarantees this but the - * compilers don't so scattered loading can be done on those that do guarantee - * this. - */ - -/// Bit set in `Relocation::r_word0` for scattered relocations. -pub const R_SCATTERED: u32 = 0x8000_0000; - -#[derive(Debug, Clone, Copy)] -pub struct ScatteredRelocationInfo { - /// offset in the section to what is being relocated - pub r_address: u32, - /// if not 0, machine specific relocation type - pub r_type: u8, - /// 0=byte, 1=word, 2=long, 3=quad - pub r_length: u8, - /// was relocated pc relative already - pub r_pcrel: bool, - /// the value the item to be relocated is referring to (without any offset added) - pub r_value: u32, -} - -impl ScatteredRelocationInfo { - /// Combine the fields into a `Relocation`. - pub fn relocation(self, endian: E) -> Relocation { - let r_word0 = U32::new( - endian, - self.r_address & 0x00ff_ffff - | u32::from(self.r_type & 0xf) << 24 - | u32::from(self.r_length & 0x3) << 28 - | u32::from(self.r_pcrel) << 30 - | R_SCATTERED, - ); - let r_word1 = U32::new(endian, self.r_value); - Relocation { r_word0, r_word1 } - } -} - -/* - * Relocation types used in a generic implementation. Relocation entries for - * normal things use the generic relocation as described above and their r_type - * is GENERIC_RELOC_VANILLA (a value of zero). - * - * Another type of generic relocation, GENERIC_RELOC_SECTDIFF, is to support - * the difference of two symbols defined in different sections. That is the - * expression "symbol1 - symbol2 + constant" is a relocatable expression when - * both symbols are defined in some section. For this type of relocation the - * both relocations entries are scattered relocation entries. The value of - * symbol1 is stored in the first relocation entry's r_value field and the - * value of symbol2 is stored in the pair's r_value field. - * - * A special case for a prebound lazy pointer is needed to beable to set the - * value of the lazy pointer back to its non-prebound state. This is done - * using the GENERIC_RELOC_PB_LA_PTR r_type. This is a scattered relocation - * entry where the r_value feild is the value of the lazy pointer not prebound. - */ -/// generic relocation as described above -pub const GENERIC_RELOC_VANILLA: u8 = 0; -/// Only follows a GENERIC_RELOC_SECTDIFF -pub const GENERIC_RELOC_PAIR: u8 = 1; -pub const GENERIC_RELOC_SECTDIFF: u8 = 2; -/// prebound lazy pointer -pub const GENERIC_RELOC_PB_LA_PTR: u8 = 3; -pub const GENERIC_RELOC_LOCAL_SECTDIFF: u8 = 4; -/// thread local variables -pub const GENERIC_RELOC_TLV: u8 = 5; - -// Definitions from "/usr/include/mach-o/arm/reloc.h". - -/* - * Relocation types used in the arm implementation. Relocation entries for - * things other than instructions use the same generic relocation as described - * in and their r_type is ARM_RELOC_VANILLA, one of the - * *_SECTDIFF or the *_PB_LA_PTR types. The rest of the relocation types are - * for instructions. Since they are for instructions the r_address field - * indicates the 32 bit instruction that the relocation is to be performed on. - */ -/// generic relocation as described above -pub const ARM_RELOC_VANILLA: u8 = 0; -/// the second relocation entry of a pair -pub const ARM_RELOC_PAIR: u8 = 1; -/// a PAIR follows with subtract symbol value -pub const ARM_RELOC_SECTDIFF: u8 = 2; -/// like ARM_RELOC_SECTDIFF, but the symbol referenced was local. -pub const ARM_RELOC_LOCAL_SECTDIFF: u8 = 3; -/// prebound lazy pointer -pub const ARM_RELOC_PB_LA_PTR: u8 = 4; -/// 24 bit branch displacement (to a word address) -pub const ARM_RELOC_BR24: u8 = 5; -/// 22 bit branch displacement (to a half-word address) -pub const ARM_THUMB_RELOC_BR22: u8 = 6; -/// obsolete - a thumb 32-bit branch instruction possibly needing page-spanning branch workaround -pub const ARM_THUMB_32BIT_BRANCH: u8 = 7; - -/* - * For these two r_type relocations they always have a pair following them - * and the r_length bits are used differently. The encoding of the - * r_length is as follows: - * low bit of r_length: - * 0 - :lower16: for movw instructions - * 1 - :upper16: for movt instructions - * high bit of r_length: - * 0 - arm instructions - * 1 - thumb instructions - * the other half of the relocated expression is in the following pair - * relocation entry in the the low 16 bits of r_address field. - */ -pub const ARM_RELOC_HALF: u8 = 8; -pub const ARM_RELOC_HALF_SECTDIFF: u8 = 9; - -// Definitions from "/usr/include/mach-o/arm64/reloc.h". - -/* - * Relocation types used in the arm64 implementation. - */ -/// for pointers -pub const ARM64_RELOC_UNSIGNED: u8 = 0; -/// must be followed by a ARM64_RELOC_UNSIGNED -pub const ARM64_RELOC_SUBTRACTOR: u8 = 1; -/// a B/BL instruction with 26-bit displacement -pub const ARM64_RELOC_BRANCH26: u8 = 2; -/// pc-rel distance to page of target -pub const ARM64_RELOC_PAGE21: u8 = 3; -/// offset within page, scaled by r_length -pub const ARM64_RELOC_PAGEOFF12: u8 = 4; -/// pc-rel distance to page of GOT slot -pub const ARM64_RELOC_GOT_LOAD_PAGE21: u8 = 5; -/// offset within page of GOT slot, scaled by r_length -pub const ARM64_RELOC_GOT_LOAD_PAGEOFF12: u8 = 6; -/// for pointers to GOT slots -pub const ARM64_RELOC_POINTER_TO_GOT: u8 = 7; -/// pc-rel distance to page of TLVP slot -pub const ARM64_RELOC_TLVP_LOAD_PAGE21: u8 = 8; -/// offset within page of TLVP slot, scaled by r_length -pub const ARM64_RELOC_TLVP_LOAD_PAGEOFF12: u8 = 9; -/// must be followed by PAGE21 or PAGEOFF12 -pub const ARM64_RELOC_ADDEND: u8 = 10; - -// An arm64e authenticated pointer. -// -// Represents a pointer to a symbol (like ARM64_RELOC_UNSIGNED). -// Additionally, the resulting pointer is signed. The signature is -// specified in the target location: the addend is restricted to the lower -// 32 bits (instead of the full 64 bits for ARM64_RELOC_UNSIGNED): -// -// |63|62|61-51|50-49| 48 |47 - 32|31 - 0| -// | 1| 0| 0 | key | addr | discriminator | addend | -// -// The key is one of: -// IA: 00 IB: 01 -// DA: 10 DB: 11 -// -// The discriminator field is used as extra signature diversification. -// -// The addr field indicates whether the target address should be blended -// into the discriminator. -// -pub const ARM64_RELOC_AUTHENTICATED_POINTER: u8 = 11; - -// Definitions from "/usr/include/mach-o/ppc/reloc.h". - -/* - * Relocation types used in the ppc implementation. Relocation entries for - * things other than instructions use the same generic relocation as described - * above and their r_type is RELOC_VANILLA. The rest of the relocation types - * are for instructions. Since they are for instructions the r_address field - * indicates the 32 bit instruction that the relocation is to be performed on. - * The fields r_pcrel and r_length are ignored for non-RELOC_VANILLA r_types - * except for PPC_RELOC_BR14. - * - * For PPC_RELOC_BR14 if the r_length is the unused value 3, then the branch was - * statically predicted setting or clearing the Y-bit based on the sign of the - * displacement or the opcode. If this is the case the static linker must flip - * the value of the Y-bit if the sign of the displacement changes for non-branch - * always conditions. - */ -/// generic relocation as described above -pub const PPC_RELOC_VANILLA: u8 = 0; -/// the second relocation entry of a pair -pub const PPC_RELOC_PAIR: u8 = 1; -/// 14 bit branch displacement (to a word address) -pub const PPC_RELOC_BR14: u8 = 2; -/// 24 bit branch displacement (to a word address) -pub const PPC_RELOC_BR24: u8 = 3; -/// a PAIR follows with the low half -pub const PPC_RELOC_HI16: u8 = 4; -/// a PAIR follows with the high half -pub const PPC_RELOC_LO16: u8 = 5; -/// Same as the RELOC_HI16 except the low 16 bits and the high 16 bits are added together -/// with the low 16 bits sign extended first. This means if bit 15 of the low 16 bits is -/// set the high 16 bits stored in the instruction will be adjusted. -pub const PPC_RELOC_HA16: u8 = 6; -/// Same as the LO16 except that the low 2 bits are not stored in the instruction and are -/// always zero. This is used in double word load/store instructions. -pub const PPC_RELOC_LO14: u8 = 7; -/// a PAIR follows with subtract symbol value -pub const PPC_RELOC_SECTDIFF: u8 = 8; -/// prebound lazy pointer -pub const PPC_RELOC_PB_LA_PTR: u8 = 9; -/// section difference forms of above. a PAIR -pub const PPC_RELOC_HI16_SECTDIFF: u8 = 10; -/// follows these with subtract symbol value -pub const PPC_RELOC_LO16_SECTDIFF: u8 = 11; -pub const PPC_RELOC_HA16_SECTDIFF: u8 = 12; -pub const PPC_RELOC_JBSR: u8 = 13; -pub const PPC_RELOC_LO14_SECTDIFF: u8 = 14; -/// like PPC_RELOC_SECTDIFF, but the symbol referenced was local. -pub const PPC_RELOC_LOCAL_SECTDIFF: u8 = 15; - -// Definitions from "/usr/include/mach-o/x86_64/reloc.h". - -/* - * Relocations for x86_64 are a bit different than for other architectures in - * Mach-O: Scattered relocations are not used. Almost all relocations produced - * by the compiler are external relocations. An external relocation has the - * r_extern bit set to 1 and the r_symbolnum field contains the symbol table - * index of the target label. - * - * When the assembler is generating relocations, if the target label is a local - * label (begins with 'L'), then the previous non-local label in the same - * section is used as the target of the external relocation. An addend is used - * with the distance from that non-local label to the target label. Only when - * there is no previous non-local label in the section is an internal - * relocation used. - * - * The addend (i.e. the 4 in _foo+4) is encoded in the instruction (Mach-O does - * not have RELA relocations). For PC-relative relocations, the addend is - * stored directly in the instruction. This is different from other Mach-O - * architectures, which encode the addend minus the current section offset. - * - * The relocation types are: - * - * X86_64_RELOC_UNSIGNED // for absolute addresses - * X86_64_RELOC_SIGNED // for signed 32-bit displacement - * X86_64_RELOC_BRANCH // a CALL/JMP instruction with 32-bit displacement - * X86_64_RELOC_GOT_LOAD // a MOVQ load of a GOT entry - * X86_64_RELOC_GOT // other GOT references - * X86_64_RELOC_SUBTRACTOR // must be followed by a X86_64_RELOC_UNSIGNED - * - * The following are sample assembly instructions, followed by the relocation - * and section content they generate in an object file: - * - * call _foo - * r_type=X86_64_RELOC_BRANCH, r_length=2, r_extern=1, r_pcrel=1, r_symbolnum=_foo - * E8 00 00 00 00 - * - * call _foo+4 - * r_type=X86_64_RELOC_BRANCH, r_length=2, r_extern=1, r_pcrel=1, r_symbolnum=_foo - * E8 04 00 00 00 - * - * movq _foo@GOTPCREL(%rip), %rax - * r_type=X86_64_RELOC_GOT_LOAD, r_length=2, r_extern=1, r_pcrel=1, r_symbolnum=_foo - * 48 8B 05 00 00 00 00 - * - * pushq _foo@GOTPCREL(%rip) - * r_type=X86_64_RELOC_GOT, r_length=2, r_extern=1, r_pcrel=1, r_symbolnum=_foo - * FF 35 00 00 00 00 - * - * movl _foo(%rip), %eax - * r_type=X86_64_RELOC_SIGNED, r_length=2, r_extern=1, r_pcrel=1, r_symbolnum=_foo - * 8B 05 00 00 00 00 - * - * movl _foo+4(%rip), %eax - * r_type=X86_64_RELOC_SIGNED, r_length=2, r_extern=1, r_pcrel=1, r_symbolnum=_foo - * 8B 05 04 00 00 00 - * - * movb $0x12, _foo(%rip) - * r_type=X86_64_RELOC_SIGNED, r_length=2, r_extern=1, r_pcrel=1, r_symbolnum=_foo - * C6 05 FF FF FF FF 12 - * - * movl $0x12345678, _foo(%rip) - * r_type=X86_64_RELOC_SIGNED, r_length=2, r_extern=1, r_pcrel=1, r_symbolnum=_foo - * C7 05 FC FF FF FF 78 56 34 12 - * - * .quad _foo - * r_type=X86_64_RELOC_UNSIGNED, r_length=3, r_extern=1, r_pcrel=0, r_symbolnum=_foo - * 00 00 00 00 00 00 00 00 - * - * .quad _foo+4 - * r_type=X86_64_RELOC_UNSIGNED, r_length=3, r_extern=1, r_pcrel=0, r_symbolnum=_foo - * 04 00 00 00 00 00 00 00 - * - * .quad _foo - _bar - * r_type=X86_64_RELOC_SUBTRACTOR, r_length=3, r_extern=1, r_pcrel=0, r_symbolnum=_bar - * r_type=X86_64_RELOC_UNSIGNED, r_length=3, r_extern=1, r_pcrel=0, r_symbolnum=_foo - * 00 00 00 00 00 00 00 00 - * - * .quad _foo - _bar + 4 - * r_type=X86_64_RELOC_SUBTRACTOR, r_length=3, r_extern=1, r_pcrel=0, r_symbolnum=_bar - * r_type=X86_64_RELOC_UNSIGNED, r_length=3, r_extern=1, r_pcrel=0, r_symbolnum=_foo - * 04 00 00 00 00 00 00 00 - * - * .long _foo - _bar - * r_type=X86_64_RELOC_SUBTRACTOR, r_length=2, r_extern=1, r_pcrel=0, r_symbolnum=_bar - * r_type=X86_64_RELOC_UNSIGNED, r_length=2, r_extern=1, r_pcrel=0, r_symbolnum=_foo - * 00 00 00 00 - * - * lea L1(%rip), %rax - * r_type=X86_64_RELOC_SIGNED, r_length=2, r_extern=1, r_pcrel=1, r_symbolnum=_prev - * 48 8d 05 12 00 00 00 - * // assumes _prev is the first non-local label 0x12 bytes before L1 - * - * lea L0(%rip), %rax - * r_type=X86_64_RELOC_SIGNED, r_length=2, r_extern=0, r_pcrel=1, r_symbolnum=3 - * 48 8d 05 56 00 00 00 - * // assumes L0 is in third section and there is no previous non-local label. - * // The rip-relative-offset of 0x00000056 is L0-address_of_next_instruction. - * // address_of_next_instruction is the address of the relocation + 4. - * - * add $6,L0(%rip) - * r_type=X86_64_RELOC_SIGNED_1, r_length=2, r_extern=0, r_pcrel=1, r_symbolnum=3 - * 83 05 18 00 00 00 06 - * // assumes L0 is in third section and there is no previous non-local label. - * // The rip-relative-offset of 0x00000018 is L0-address_of_next_instruction. - * // address_of_next_instruction is the address of the relocation + 4 + 1. - * // The +1 comes from SIGNED_1. This is used because the relocation is not - * // at the end of the instruction. - * - * .quad L1 - * r_type=X86_64_RELOC_UNSIGNED, r_length=3, r_extern=1, r_pcrel=0, r_symbolnum=_prev - * 12 00 00 00 00 00 00 00 - * // assumes _prev is the first non-local label 0x12 bytes before L1 - * - * .quad L0 - * r_type=X86_64_RELOC_UNSIGNED, r_length=3, r_extern=0, r_pcrel=0, r_symbolnum=3 - * 56 00 00 00 00 00 00 00 - * // assumes L0 is in third section, has an address of 0x00000056 in .o - * // file, and there is no previous non-local label - * - * .quad _foo - . - * r_type=X86_64_RELOC_SUBTRACTOR, r_length=3, r_extern=1, r_pcrel=0, r_symbolnum=_prev - * r_type=X86_64_RELOC_UNSIGNED, r_length=3, r_extern=1, r_pcrel=0, r_symbolnum=_foo - * EE FF FF FF FF FF FF FF - * // assumes _prev is the first non-local label 0x12 bytes before this - * // .quad - * - * .quad _foo - L1 - * r_type=X86_64_RELOC_SUBTRACTOR, r_length=3, r_extern=1, r_pcrel=0, r_symbolnum=_prev - * r_type=X86_64_RELOC_UNSIGNED, r_length=3, r_extern=1, r_pcrel=0, r_symbolnum=_foo - * EE FF FF FF FF FF FF FF - * // assumes _prev is the first non-local label 0x12 bytes before L1 - * - * .quad L1 - _prev - * // No relocations. This is an assembly time constant. - * 12 00 00 00 00 00 00 00 - * // assumes _prev is the first non-local label 0x12 bytes before L1 - * - * - * - * In final linked images, there are only two valid relocation kinds: - * - * r_type=X86_64_RELOC_UNSIGNED, r_length=3, r_pcrel=0, r_extern=1, r_symbolnum=sym_index - * This tells dyld to add the address of a symbol to a pointer sized (8-byte) - * piece of data (i.e on disk the 8-byte piece of data contains the addend). The - * r_symbolnum contains the index into the symbol table of the target symbol. - * - * r_type=X86_64_RELOC_UNSIGNED, r_length=3, r_pcrel=0, r_extern=0, r_symbolnum=0 - * This tells dyld to adjust the pointer sized (8-byte) piece of data by the amount - * the containing image was loaded from its base address (e.g. slide). - * - */ -/// for absolute addresses -pub const X86_64_RELOC_UNSIGNED: u8 = 0; -/// for signed 32-bit displacement -pub const X86_64_RELOC_SIGNED: u8 = 1; -/// a CALL/JMP instruction with 32-bit displacement -pub const X86_64_RELOC_BRANCH: u8 = 2; -/// a MOVQ load of a GOT entry -pub const X86_64_RELOC_GOT_LOAD: u8 = 3; -/// other GOT references -pub const X86_64_RELOC_GOT: u8 = 4; -/// must be followed by a X86_64_RELOC_UNSIGNED -pub const X86_64_RELOC_SUBTRACTOR: u8 = 5; -/// for signed 32-bit displacement with a -1 addend -pub const X86_64_RELOC_SIGNED_1: u8 = 6; -/// for signed 32-bit displacement with a -2 addend -pub const X86_64_RELOC_SIGNED_2: u8 = 7; -/// for signed 32-bit displacement with a -4 addend -pub const X86_64_RELOC_SIGNED_4: u8 = 8; -/// for thread local variables -pub const X86_64_RELOC_TLV: u8 = 9; - -unsafe_impl_pod!(FatHeader, FatArch32, FatArch64,); -unsafe_impl_endian_pod!( - DyldCacheHeader, - DyldCacheMappingInfo, - DyldCacheImageInfo, - DyldSubCacheInfo, - MachHeader32, - MachHeader64, - LoadCommand, - LcStr, - SegmentCommand32, - SegmentCommand64, - Section32, - Section64, - Fvmlib, - FvmlibCommand, - Dylib, - DylibCommand, - SubFrameworkCommand, - SubClientCommand, - SubUmbrellaCommand, - SubLibraryCommand, - PreboundDylibCommand, - DylinkerCommand, - ThreadCommand, - RoutinesCommand32, - RoutinesCommand64, - SymtabCommand, - DysymtabCommand, - DylibTableOfContents, - DylibModule32, - DylibModule64, - DylibReference, - TwolevelHintsCommand, - TwolevelHint, - PrebindCksumCommand, - UuidCommand, - RpathCommand, - LinkeditDataCommand, - FilesetEntryCommand, - EncryptionInfoCommand32, - EncryptionInfoCommand64, - VersionMinCommand, - BuildVersionCommand, - BuildToolVersion, - DyldInfoCommand, - LinkerOptionCommand, - SymsegCommand, - IdentCommand, - FvmfileCommand, - EntryPointCommand, - SourceVersionCommand, - DataInCodeEntry, - //TlvDescriptor, - NoteCommand, - Nlist32, - Nlist64, - Relocation, -); diff -Nru s390-tools-2.31.0/rust-vendor/object/src/pe.rs s390-tools-2.33.1/rust-vendor/object/src/pe.rs --- s390-tools-2.31.0/rust-vendor/object/src/pe.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/pe.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,3054 +0,0 @@ -//! PE/COFF definitions. -//! -//! These definitions are independent of read/write support, although we do implement -//! some traits useful for those. -//! -//! This module is based heavily on "winnt.h" (10.0.17763.0). - -#![allow(missing_docs)] - -use core::convert::TryInto; - -use crate::endian::{I32Bytes, LittleEndian as LE, U16Bytes, U32Bytes, I32, U16, U32, U64}; -use crate::pod::Pod; - -/// MZ -pub const IMAGE_DOS_SIGNATURE: u16 = 0x5A4D; -/// NE -pub const IMAGE_OS2_SIGNATURE: u16 = 0x454E; -/// LE -pub const IMAGE_OS2_SIGNATURE_LE: u16 = 0x454C; -/// LE -pub const IMAGE_VXD_SIGNATURE: u16 = 0x454C; -/// PE00 -pub const IMAGE_NT_SIGNATURE: u32 = 0x0000_4550; - -/// DOS .EXE header -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageDosHeader { - /// Magic number - pub e_magic: U16, - /// Bytes on last page of file - pub e_cblp: U16, - /// Pages in file - pub e_cp: U16, - /// Relocations - pub e_crlc: U16, - /// Size of header in paragraphs - pub e_cparhdr: U16, - /// Minimum extra paragraphs needed - pub e_minalloc: U16, - /// Maximum extra paragraphs needed - pub e_maxalloc: U16, - /// Initial (relative) SS value - pub e_ss: U16, - /// Initial SP value - pub e_sp: U16, - /// Checksum - pub e_csum: U16, - /// Initial IP value - pub e_ip: U16, - /// Initial (relative) CS value - pub e_cs: U16, - /// File address of relocation table - pub e_lfarlc: U16, - /// Overlay number - pub e_ovno: U16, - /// Reserved words - pub e_res: [U16; 4], - /// OEM identifier (for e_oeminfo) - pub e_oemid: U16, - /// OEM information; e_oemid specific - pub e_oeminfo: U16, - /// Reserved words - pub e_res2: [U16; 10], - /// File address of new exe header - pub e_lfanew: U32, -} - -/// OS/2 .EXE header -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageOs2Header { - /// Magic number - pub ne_magic: U16, - /// Version number - pub ne_ver: i8, - /// Revision number - pub ne_rev: i8, - /// Offset of Entry Table - pub ne_enttab: U16, - /// Number of bytes in Entry Table - pub ne_cbenttab: U16, - /// Checksum of whole file - pub ne_crc: I32, - /// Flag word - pub ne_flags: U16, - /// Automatic data segment number - pub ne_autodata: U16, - /// Initial heap allocation - pub ne_heap: U16, - /// Initial stack allocation - pub ne_stack: U16, - /// Initial CS:IP setting - pub ne_csip: I32, - /// Initial SS:SP setting - pub ne_sssp: I32, - /// Count of file segments - pub ne_cseg: U16, - /// Entries in Module Reference Table - pub ne_cmod: U16, - /// Size of non-resident name table - pub ne_cbnrestab: U16, - /// Offset of Segment Table - pub ne_segtab: U16, - /// Offset of Resource Table - pub ne_rsrctab: U16, - /// Offset of resident name table - pub ne_restab: U16, - /// Offset of Module Reference Table - pub ne_modtab: U16, - /// Offset of Imported Names Table - pub ne_imptab: U16, - /// Offset of Non-resident Names Table - pub ne_nrestab: I32, - /// Count of movable entries - pub ne_cmovent: U16, - /// Segment alignment shift count - pub ne_align: U16, - /// Count of resource segments - pub ne_cres: U16, - /// Target Operating system - pub ne_exetyp: u8, - /// Other .EXE flags - pub ne_flagsothers: u8, - /// offset to return thunks - pub ne_pretthunks: U16, - /// offset to segment ref. bytes - pub ne_psegrefbytes: U16, - /// Minimum code swap area size - pub ne_swaparea: U16, - /// Expected Windows version number - pub ne_expver: U16, -} - -/// Windows VXD header -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageVxdHeader { - /// Magic number - pub e32_magic: U16, - /// The byte ordering for the VXD - pub e32_border: u8, - /// The word ordering for the VXD - pub e32_worder: u8, - /// The EXE format level for now = 0 - pub e32_level: U32, - /// The CPU type - pub e32_cpu: U16, - /// The OS type - pub e32_os: U16, - /// Module version - pub e32_ver: U32, - /// Module flags - pub e32_mflags: U32, - /// Module # pages - pub e32_mpages: U32, - /// Object # for instruction pointer - pub e32_startobj: U32, - /// Extended instruction pointer - pub e32_eip: U32, - /// Object # for stack pointer - pub e32_stackobj: U32, - /// Extended stack pointer - pub e32_esp: U32, - /// VXD page size - pub e32_pagesize: U32, - /// Last page size in VXD - pub e32_lastpagesize: U32, - /// Fixup section size - pub e32_fixupsize: U32, - /// Fixup section checksum - pub e32_fixupsum: U32, - /// Loader section size - pub e32_ldrsize: U32, - /// Loader section checksum - pub e32_ldrsum: U32, - /// Object table offset - pub e32_objtab: U32, - /// Number of objects in module - pub e32_objcnt: U32, - /// Object page map offset - pub e32_objmap: U32, - /// Object iterated data map offset - pub e32_itermap: U32, - /// Offset of Resource Table - pub e32_rsrctab: U32, - /// Number of resource entries - pub e32_rsrccnt: U32, - /// Offset of resident name table - pub e32_restab: U32, - /// Offset of Entry Table - pub e32_enttab: U32, - /// Offset of Module Directive Table - pub e32_dirtab: U32, - /// Number of module directives - pub e32_dircnt: U32, - /// Offset of Fixup Page Table - pub e32_fpagetab: U32, - /// Offset of Fixup Record Table - pub e32_frectab: U32, - /// Offset of Import Module Name Table - pub e32_impmod: U32, - /// Number of entries in Import Module Name Table - pub e32_impmodcnt: U32, - /// Offset of Import Procedure Name Table - pub e32_impproc: U32, - /// Offset of Per-Page Checksum Table - pub e32_pagesum: U32, - /// Offset of Enumerated Data Pages - pub e32_datapage: U32, - /// Number of preload pages - pub e32_preload: U32, - /// Offset of Non-resident Names Table - pub e32_nrestab: U32, - /// Size of Non-resident Name Table - pub e32_cbnrestab: U32, - /// Non-resident Name Table Checksum - pub e32_nressum: U32, - /// Object # for automatic data object - pub e32_autodata: U32, - /// Offset of the debugging information - pub e32_debuginfo: U32, - /// The length of the debugging info. in bytes - pub e32_debuglen: U32, - /// Number of instance pages in preload section of VXD file - pub e32_instpreload: U32, - /// Number of instance pages in demand load section of VXD file - pub e32_instdemand: U32, - /// Size of heap - for 16-bit apps - pub e32_heapsize: U32, - /// Reserved words - pub e32_res3: [u8; 12], - pub e32_winresoff: U32, - pub e32_winreslen: U32, - /// Device ID for VxD - pub e32_devid: U16, - /// DDK version for VxD - pub e32_ddkver: U16, -} - -/// A PE rich header entry. -/// -/// Rich headers have no official documentation, but have been heavily -/// reversed-engineered and documented in the wild, e.g.: -/// * `http://www.ntcore.com/files/richsign.htm` -/// * `https://www.researchgate.net/figure/Structure-of-the-Rich-Header_fig1_318145388` -/// -/// This data is "masked", i.e. XORed with a checksum derived from the file data. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct MaskedRichHeaderEntry { - pub masked_comp_id: U32, - pub masked_count: U32, -} - -// -// File header format. -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageFileHeader { - pub machine: U16, - pub number_of_sections: U16, - pub time_date_stamp: U32, - pub pointer_to_symbol_table: U32, - pub number_of_symbols: U32, - pub size_of_optional_header: U16, - pub characteristics: U16, -} - -pub const IMAGE_SIZEOF_FILE_HEADER: usize = 20; - -/// Relocation info stripped from file. -pub const IMAGE_FILE_RELOCS_STRIPPED: u16 = 0x0001; -/// File is executable (i.e. no unresolved external references). -pub const IMAGE_FILE_EXECUTABLE_IMAGE: u16 = 0x0002; -/// Line numbers stripped from file. -pub const IMAGE_FILE_LINE_NUMS_STRIPPED: u16 = 0x0004; -/// Local symbols stripped from file. -pub const IMAGE_FILE_LOCAL_SYMS_STRIPPED: u16 = 0x0008; -/// Aggressively trim working set -pub const IMAGE_FILE_AGGRESIVE_WS_TRIM: u16 = 0x0010; -/// App can handle >2gb addresses -pub const IMAGE_FILE_LARGE_ADDRESS_AWARE: u16 = 0x0020; -/// Bytes of machine word are reversed. -pub const IMAGE_FILE_BYTES_REVERSED_LO: u16 = 0x0080; -/// 32 bit word machine. -pub const IMAGE_FILE_32BIT_MACHINE: u16 = 0x0100; -/// Debugging info stripped from file in .DBG file -pub const IMAGE_FILE_DEBUG_STRIPPED: u16 = 0x0200; -/// If Image is on removable media, copy and run from the swap file. -pub const IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP: u16 = 0x0400; -/// If Image is on Net, copy and run from the swap file. -pub const IMAGE_FILE_NET_RUN_FROM_SWAP: u16 = 0x0800; -/// System File. -pub const IMAGE_FILE_SYSTEM: u16 = 0x1000; -/// File is a DLL. -pub const IMAGE_FILE_DLL: u16 = 0x2000; -/// File should only be run on a UP machine -pub const IMAGE_FILE_UP_SYSTEM_ONLY: u16 = 0x4000; -/// Bytes of machine word are reversed. -pub const IMAGE_FILE_BYTES_REVERSED_HI: u16 = 0x8000; - -pub const IMAGE_FILE_MACHINE_UNKNOWN: u16 = 0; -/// Useful for indicating we want to interact with the host and not a WoW guest. -pub const IMAGE_FILE_MACHINE_TARGET_HOST: u16 = 0x0001; -/// Intel 386. -pub const IMAGE_FILE_MACHINE_I386: u16 = 0x014c; -/// MIPS little-endian, 0x160 big-endian -pub const IMAGE_FILE_MACHINE_R3000: u16 = 0x0162; -/// MIPS little-endian -pub const IMAGE_FILE_MACHINE_R4000: u16 = 0x0166; -/// MIPS little-endian -pub const IMAGE_FILE_MACHINE_R10000: u16 = 0x0168; -/// MIPS little-endian WCE v2 -pub const IMAGE_FILE_MACHINE_WCEMIPSV2: u16 = 0x0169; -/// Alpha_AXP -pub const IMAGE_FILE_MACHINE_ALPHA: u16 = 0x0184; -/// SH3 little-endian -pub const IMAGE_FILE_MACHINE_SH3: u16 = 0x01a2; -pub const IMAGE_FILE_MACHINE_SH3DSP: u16 = 0x01a3; -/// SH3E little-endian -pub const IMAGE_FILE_MACHINE_SH3E: u16 = 0x01a4; -/// SH4 little-endian -pub const IMAGE_FILE_MACHINE_SH4: u16 = 0x01a6; -/// SH5 -pub const IMAGE_FILE_MACHINE_SH5: u16 = 0x01a8; -/// ARM Little-Endian -pub const IMAGE_FILE_MACHINE_ARM: u16 = 0x01c0; -/// ARM Thumb/Thumb-2 Little-Endian -pub const IMAGE_FILE_MACHINE_THUMB: u16 = 0x01c2; -/// ARM Thumb-2 Little-Endian -pub const IMAGE_FILE_MACHINE_ARMNT: u16 = 0x01c4; -pub const IMAGE_FILE_MACHINE_AM33: u16 = 0x01d3; -/// IBM PowerPC Little-Endian -pub const IMAGE_FILE_MACHINE_POWERPC: u16 = 0x01F0; -pub const IMAGE_FILE_MACHINE_POWERPCFP: u16 = 0x01f1; -/// Intel 64 -pub const IMAGE_FILE_MACHINE_IA64: u16 = 0x0200; -/// MIPS -pub const IMAGE_FILE_MACHINE_MIPS16: u16 = 0x0266; -/// ALPHA64 -pub const IMAGE_FILE_MACHINE_ALPHA64: u16 = 0x0284; -/// MIPS -pub const IMAGE_FILE_MACHINE_MIPSFPU: u16 = 0x0366; -/// MIPS -pub const IMAGE_FILE_MACHINE_MIPSFPU16: u16 = 0x0466; -pub const IMAGE_FILE_MACHINE_AXP64: u16 = IMAGE_FILE_MACHINE_ALPHA64; -/// Infineon -pub const IMAGE_FILE_MACHINE_TRICORE: u16 = 0x0520; -pub const IMAGE_FILE_MACHINE_CEF: u16 = 0x0CEF; -/// EFI Byte Code -pub const IMAGE_FILE_MACHINE_EBC: u16 = 0x0EBC; -/// AMD64 (K8) -pub const IMAGE_FILE_MACHINE_AMD64: u16 = 0x8664; -/// M32R little-endian -pub const IMAGE_FILE_MACHINE_M32R: u16 = 0x9041; -/// ARM64 Little-Endian -pub const IMAGE_FILE_MACHINE_ARM64: u16 = 0xAA64; -pub const IMAGE_FILE_MACHINE_CEE: u16 = 0xC0EE; -/// RISCV32 -pub const IMAGE_FILE_MACHINE_RISCV32: u16 = 0x5032; -/// RISCV64 -pub const IMAGE_FILE_MACHINE_RISCV64: u16 = 0x5064; -/// RISCV128 -pub const IMAGE_FILE_MACHINE_RISCV128: u16 = 0x5128; - -// -// Directory format. -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageDataDirectory { - pub virtual_address: U32, - pub size: U32, -} - -pub const IMAGE_NUMBEROF_DIRECTORY_ENTRIES: usize = 16; - -// -// Optional header format. -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageOptionalHeader32 { - // Standard fields. - pub magic: U16, - pub major_linker_version: u8, - pub minor_linker_version: u8, - pub size_of_code: U32, - pub size_of_initialized_data: U32, - pub size_of_uninitialized_data: U32, - pub address_of_entry_point: U32, - pub base_of_code: U32, - pub base_of_data: U32, - - // NT additional fields. - pub image_base: U32, - pub section_alignment: U32, - pub file_alignment: U32, - pub major_operating_system_version: U16, - pub minor_operating_system_version: U16, - pub major_image_version: U16, - pub minor_image_version: U16, - pub major_subsystem_version: U16, - pub minor_subsystem_version: U16, - pub win32_version_value: U32, - pub size_of_image: U32, - pub size_of_headers: U32, - pub check_sum: U32, - pub subsystem: U16, - pub dll_characteristics: U16, - pub size_of_stack_reserve: U32, - pub size_of_stack_commit: U32, - pub size_of_heap_reserve: U32, - pub size_of_heap_commit: U32, - pub loader_flags: U32, - pub number_of_rva_and_sizes: U32, - //pub data_directory: [ImageDataDirectory; IMAGE_NUMBEROF_DIRECTORY_ENTRIES], -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageRomOptionalHeader { - pub magic: U16, - pub major_linker_version: u8, - pub minor_linker_version: u8, - pub size_of_code: U32, - pub size_of_initialized_data: U32, - pub size_of_uninitialized_data: U32, - pub address_of_entry_point: U32, - pub base_of_code: U32, - pub base_of_data: U32, - pub base_of_bss: U32, - pub gpr_mask: U32, - pub cpr_mask: [U32; 4], - pub gp_value: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageOptionalHeader64 { - pub magic: U16, - pub major_linker_version: u8, - pub minor_linker_version: u8, - pub size_of_code: U32, - pub size_of_initialized_data: U32, - pub size_of_uninitialized_data: U32, - pub address_of_entry_point: U32, - pub base_of_code: U32, - pub image_base: U64, - pub section_alignment: U32, - pub file_alignment: U32, - pub major_operating_system_version: U16, - pub minor_operating_system_version: U16, - pub major_image_version: U16, - pub minor_image_version: U16, - pub major_subsystem_version: U16, - pub minor_subsystem_version: U16, - pub win32_version_value: U32, - pub size_of_image: U32, - pub size_of_headers: U32, - pub check_sum: U32, - pub subsystem: U16, - pub dll_characteristics: U16, - pub size_of_stack_reserve: U64, - pub size_of_stack_commit: U64, - pub size_of_heap_reserve: U64, - pub size_of_heap_commit: U64, - pub loader_flags: U32, - pub number_of_rva_and_sizes: U32, - //pub data_directory: [ImageDataDirectory; IMAGE_NUMBEROF_DIRECTORY_ENTRIES], -} - -pub const IMAGE_NT_OPTIONAL_HDR32_MAGIC: u16 = 0x10b; -pub const IMAGE_NT_OPTIONAL_HDR64_MAGIC: u16 = 0x20b; -pub const IMAGE_ROM_OPTIONAL_HDR_MAGIC: u16 = 0x107; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageNtHeaders64 { - pub signature: U32, - pub file_header: ImageFileHeader, - pub optional_header: ImageOptionalHeader64, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageNtHeaders32 { - pub signature: U32, - pub file_header: ImageFileHeader, - pub optional_header: ImageOptionalHeader32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageRomHeaders { - pub file_header: ImageFileHeader, - pub optional_header: ImageRomOptionalHeader, -} - -// Values for `ImageOptionalHeader*::subsystem`. - -/// Unknown subsystem. -pub const IMAGE_SUBSYSTEM_UNKNOWN: u16 = 0; -/// Image doesn't require a subsystem. -pub const IMAGE_SUBSYSTEM_NATIVE: u16 = 1; -/// Image runs in the Windows GUI subsystem. -pub const IMAGE_SUBSYSTEM_WINDOWS_GUI: u16 = 2; -/// Image runs in the Windows character subsystem. -pub const IMAGE_SUBSYSTEM_WINDOWS_CUI: u16 = 3; -/// image runs in the OS/2 character subsystem. -pub const IMAGE_SUBSYSTEM_OS2_CUI: u16 = 5; -/// image runs in the Posix character subsystem. -pub const IMAGE_SUBSYSTEM_POSIX_CUI: u16 = 7; -/// image is a native Win9x driver. -pub const IMAGE_SUBSYSTEM_NATIVE_WINDOWS: u16 = 8; -/// Image runs in the Windows CE subsystem. -pub const IMAGE_SUBSYSTEM_WINDOWS_CE_GUI: u16 = 9; -pub const IMAGE_SUBSYSTEM_EFI_APPLICATION: u16 = 10; -pub const IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER: u16 = 11; -pub const IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER: u16 = 12; -pub const IMAGE_SUBSYSTEM_EFI_ROM: u16 = 13; -pub const IMAGE_SUBSYSTEM_XBOX: u16 = 14; -pub const IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION: u16 = 16; -pub const IMAGE_SUBSYSTEM_XBOX_CODE_CATALOG: u16 = 17; - -// Values for `ImageOptionalHeader*::dll_characteristics`. - -// IMAGE_LIBRARY_PROCESS_INIT 0x0001 // Reserved. -// IMAGE_LIBRARY_PROCESS_TERM 0x0002 // Reserved. -// IMAGE_LIBRARY_THREAD_INIT 0x0004 // Reserved. -// IMAGE_LIBRARY_THREAD_TERM 0x0008 // Reserved. -/// Image can handle a high entropy 64-bit virtual address space. -pub const IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA: u16 = 0x0020; -/// DLL can move. -pub const IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE: u16 = 0x0040; -/// Code Integrity Image -pub const IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY: u16 = 0x0080; -/// Image is NX compatible -pub const IMAGE_DLLCHARACTERISTICS_NX_COMPAT: u16 = 0x0100; -/// Image understands isolation and doesn't want it -pub const IMAGE_DLLCHARACTERISTICS_NO_ISOLATION: u16 = 0x0200; -/// Image does not use SEH. No SE handler may reside in this image -pub const IMAGE_DLLCHARACTERISTICS_NO_SEH: u16 = 0x0400; -/// Do not bind this image. -pub const IMAGE_DLLCHARACTERISTICS_NO_BIND: u16 = 0x0800; -/// Image should execute in an AppContainer -pub const IMAGE_DLLCHARACTERISTICS_APPCONTAINER: u16 = 0x1000; -/// Driver uses WDM model -pub const IMAGE_DLLCHARACTERISTICS_WDM_DRIVER: u16 = 0x2000; -/// Image supports Control Flow Guard. -pub const IMAGE_DLLCHARACTERISTICS_GUARD_CF: u16 = 0x4000; -pub const IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE: u16 = 0x8000; - -// Indices for `ImageOptionalHeader*::data_directory`. - -/// Export Directory -pub const IMAGE_DIRECTORY_ENTRY_EXPORT: usize = 0; -/// Import Directory -pub const IMAGE_DIRECTORY_ENTRY_IMPORT: usize = 1; -/// Resource Directory -pub const IMAGE_DIRECTORY_ENTRY_RESOURCE: usize = 2; -/// Exception Directory -pub const IMAGE_DIRECTORY_ENTRY_EXCEPTION: usize = 3; -/// Security Directory -pub const IMAGE_DIRECTORY_ENTRY_SECURITY: usize = 4; -/// Base Relocation Table -pub const IMAGE_DIRECTORY_ENTRY_BASERELOC: usize = 5; -/// Debug Directory -pub const IMAGE_DIRECTORY_ENTRY_DEBUG: usize = 6; -// IMAGE_DIRECTORY_ENTRY_COPYRIGHT 7 // (X86 usage) -/// Architecture Specific Data -pub const IMAGE_DIRECTORY_ENTRY_ARCHITECTURE: usize = 7; -/// RVA of GP -pub const IMAGE_DIRECTORY_ENTRY_GLOBALPTR: usize = 8; -/// TLS Directory -pub const IMAGE_DIRECTORY_ENTRY_TLS: usize = 9; -/// Load Configuration Directory -pub const IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG: usize = 10; -/// Bound Import Directory in headers -pub const IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT: usize = 11; -/// Import Address Table -pub const IMAGE_DIRECTORY_ENTRY_IAT: usize = 12; -/// Delay Load Import Descriptors -pub const IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT: usize = 13; -/// COM Runtime descriptor -pub const IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR: usize = 14; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(C)] -pub struct Guid(pub [u8; 16]); - -impl Guid { - #[inline] - pub fn data1(self) -> U32 { - U32::from_bytes(self.0[0..4].try_into().unwrap()) - } - - #[inline] - pub fn data2(self) -> U16 { - U16::from_bytes(self.0[4..6].try_into().unwrap()) - } - - #[inline] - pub fn data3(self) -> U16 { - U16::from_bytes(self.0[6..8].try_into().unwrap()) - } - - #[inline] - pub fn data4(self) -> [u8; 8] { - self.0[8..16].try_into().unwrap() - } -} - -pub use Guid as ClsId; - -/// Non-COFF Object file header -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct AnonObjectHeader { - /// Must be IMAGE_FILE_MACHINE_UNKNOWN - pub sig1: U16, - /// Must be 0xffff - pub sig2: U16, - /// >= 1 (implies the ClsId field is present) - pub version: U16, - pub machine: U16, - pub time_date_stamp: U32, - /// Used to invoke CoCreateInstance - pub class_id: ClsId, - /// Size of data that follows the header - pub size_of_data: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct AnonObjectHeaderV2 { - /// Must be IMAGE_FILE_MACHINE_UNKNOWN - pub sig1: U16, - /// Must be 0xffff - pub sig2: U16, - /// >= 2 (implies the Flags field is present - otherwise V1) - pub version: U16, - pub machine: U16, - pub time_date_stamp: U32, - /// Used to invoke CoCreateInstance - pub class_id: ClsId, - /// Size of data that follows the header - pub size_of_data: U32, - /// 0x1 -> contains metadata - pub flags: U32, - /// Size of CLR metadata - pub meta_data_size: U32, - /// Offset of CLR metadata - pub meta_data_offset: U32, -} - -/// The required value of `AnonObjectHeaderBigobj::class_id`. -pub const ANON_OBJECT_HEADER_BIGOBJ_CLASS_ID: ClsId = ClsId([ - 0xC7, 0xA1, 0xBA, 0xD1, 0xEE, 0xBA, 0xA9, 0x4B, 0xAF, 0x20, 0xFA, 0xF6, 0x6A, 0xA4, 0xDC, 0xB8, -]); - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct AnonObjectHeaderBigobj { - /* same as ANON_OBJECT_HEADER_V2 */ - /// Must be IMAGE_FILE_MACHINE_UNKNOWN - pub sig1: U16, - /// Must be 0xffff - pub sig2: U16, - /// >= 2 (implies the Flags field is present) - pub version: U16, - /// Actual machine - IMAGE_FILE_MACHINE_xxx - pub machine: U16, - pub time_date_stamp: U32, - /// Must be `ANON_OBJECT_HEADER_BIGOBJ_CLASS_ID`. - pub class_id: ClsId, - /// Size of data that follows the header - pub size_of_data: U32, - /// 0x1 -> contains metadata - pub flags: U32, - /// Size of CLR metadata - pub meta_data_size: U32, - /// Offset of CLR metadata - pub meta_data_offset: U32, - - /* bigobj specifics */ - /// extended from WORD - pub number_of_sections: U32, - pub pointer_to_symbol_table: U32, - pub number_of_symbols: U32, -} - -pub const IMAGE_SIZEOF_SHORT_NAME: usize = 8; - -// -// Section header format. -// - -#[derive(Debug, Default, Clone, Copy)] -#[repr(C)] -pub struct ImageSectionHeader { - pub name: [u8; IMAGE_SIZEOF_SHORT_NAME], - pub virtual_size: U32, - pub virtual_address: U32, - pub size_of_raw_data: U32, - pub pointer_to_raw_data: U32, - pub pointer_to_relocations: U32, - pub pointer_to_linenumbers: U32, - pub number_of_relocations: U16, - pub number_of_linenumbers: U16, - pub characteristics: U32, -} - -pub const IMAGE_SIZEOF_SECTION_HEADER: usize = 40; - -// Values for `ImageSectionHeader::characteristics`. - -// IMAGE_SCN_TYPE_REG 0x00000000 // Reserved. -// IMAGE_SCN_TYPE_DSECT 0x00000001 // Reserved. -// IMAGE_SCN_TYPE_NOLOAD 0x00000002 // Reserved. -// IMAGE_SCN_TYPE_GROUP 0x00000004 // Reserved. -/// Reserved. -pub const IMAGE_SCN_TYPE_NO_PAD: u32 = 0x0000_0008; -// IMAGE_SCN_TYPE_COPY 0x00000010 // Reserved. - -/// Section contains code. -pub const IMAGE_SCN_CNT_CODE: u32 = 0x0000_0020; -/// Section contains initialized data. -pub const IMAGE_SCN_CNT_INITIALIZED_DATA: u32 = 0x0000_0040; -/// Section contains uninitialized data. -pub const IMAGE_SCN_CNT_UNINITIALIZED_DATA: u32 = 0x0000_0080; - -/// Reserved. -pub const IMAGE_SCN_LNK_OTHER: u32 = 0x0000_0100; -/// Section contains comments or some other type of information. -pub const IMAGE_SCN_LNK_INFO: u32 = 0x0000_0200; -// IMAGE_SCN_TYPE_OVER 0x00000400 // Reserved. -/// Section contents will not become part of image. -pub const IMAGE_SCN_LNK_REMOVE: u32 = 0x0000_0800; -/// Section contents comdat. -pub const IMAGE_SCN_LNK_COMDAT: u32 = 0x0000_1000; -// 0x00002000 // Reserved. -// IMAGE_SCN_MEM_PROTECTED - Obsolete 0x00004000 -/// Reset speculative exceptions handling bits in the TLB entries for this section. -pub const IMAGE_SCN_NO_DEFER_SPEC_EXC: u32 = 0x0000_4000; -/// Section content can be accessed relative to GP -pub const IMAGE_SCN_GPREL: u32 = 0x0000_8000; -pub const IMAGE_SCN_MEM_FARDATA: u32 = 0x0000_8000; -// IMAGE_SCN_MEM_SYSHEAP - Obsolete 0x00010000 -pub const IMAGE_SCN_MEM_PURGEABLE: u32 = 0x0002_0000; -pub const IMAGE_SCN_MEM_16BIT: u32 = 0x0002_0000; -pub const IMAGE_SCN_MEM_LOCKED: u32 = 0x0004_0000; -pub const IMAGE_SCN_MEM_PRELOAD: u32 = 0x0008_0000; - -pub const IMAGE_SCN_ALIGN_1BYTES: u32 = 0x0010_0000; -pub const IMAGE_SCN_ALIGN_2BYTES: u32 = 0x0020_0000; -pub const IMAGE_SCN_ALIGN_4BYTES: u32 = 0x0030_0000; -pub const IMAGE_SCN_ALIGN_8BYTES: u32 = 0x0040_0000; -/// Default alignment if no others are specified. -pub const IMAGE_SCN_ALIGN_16BYTES: u32 = 0x0050_0000; -pub const IMAGE_SCN_ALIGN_32BYTES: u32 = 0x0060_0000; -pub const IMAGE_SCN_ALIGN_64BYTES: u32 = 0x0070_0000; -pub const IMAGE_SCN_ALIGN_128BYTES: u32 = 0x0080_0000; -pub const IMAGE_SCN_ALIGN_256BYTES: u32 = 0x0090_0000; -pub const IMAGE_SCN_ALIGN_512BYTES: u32 = 0x00A0_0000; -pub const IMAGE_SCN_ALIGN_1024BYTES: u32 = 0x00B0_0000; -pub const IMAGE_SCN_ALIGN_2048BYTES: u32 = 0x00C0_0000; -pub const IMAGE_SCN_ALIGN_4096BYTES: u32 = 0x00D0_0000; -pub const IMAGE_SCN_ALIGN_8192BYTES: u32 = 0x00E0_0000; -// Unused 0x00F0_0000 -pub const IMAGE_SCN_ALIGN_MASK: u32 = 0x00F0_0000; - -/// Section contains extended relocations. -pub const IMAGE_SCN_LNK_NRELOC_OVFL: u32 = 0x0100_0000; -/// Section can be discarded. -pub const IMAGE_SCN_MEM_DISCARDABLE: u32 = 0x0200_0000; -/// Section is not cacheable. -pub const IMAGE_SCN_MEM_NOT_CACHED: u32 = 0x0400_0000; -/// Section is not pageable. -pub const IMAGE_SCN_MEM_NOT_PAGED: u32 = 0x0800_0000; -/// Section is shareable. -pub const IMAGE_SCN_MEM_SHARED: u32 = 0x1000_0000; -/// Section is executable. -pub const IMAGE_SCN_MEM_EXECUTE: u32 = 0x2000_0000; -/// Section is readable. -pub const IMAGE_SCN_MEM_READ: u32 = 0x4000_0000; -/// Section is writeable. -pub const IMAGE_SCN_MEM_WRITE: u32 = 0x8000_0000; - -// -// TLS Characteristic Flags -// -/// Tls index is scaled -pub const IMAGE_SCN_SCALE_INDEX: u32 = 0x0000_0001; - -// -// Symbol format. -// - -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageSymbol { - /// If first 4 bytes are 0, then second 4 bytes are offset into string table. - pub name: [u8; 8], - pub value: U32Bytes, - pub section_number: U16Bytes, - pub typ: U16Bytes, - pub storage_class: u8, - pub number_of_aux_symbols: u8, -} - -pub const IMAGE_SIZEOF_SYMBOL: usize = 18; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageSymbolBytes(pub [u8; IMAGE_SIZEOF_SYMBOL]); - -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageSymbolEx { - /// If first 4 bytes are 0, then second 4 bytes are offset into string table. - pub name: [u8; 8], - pub value: U32Bytes, - pub section_number: I32Bytes, - pub typ: U16Bytes, - pub storage_class: u8, - pub number_of_aux_symbols: u8, -} - -pub const IMAGE_SIZEOF_SYMBOL_EX: usize = 20; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageSymbolExBytes(pub [u8; IMAGE_SIZEOF_SYMBOL_EX]); - -// Values for `ImageSymbol::section_number`. -// -// Symbols have a section number of the section in which they are -// defined. Otherwise, section numbers have the following meanings: - -/// Symbol is undefined or is common. -pub const IMAGE_SYM_UNDEFINED: i32 = 0; -/// Symbol is an absolute value. -pub const IMAGE_SYM_ABSOLUTE: i32 = -1; -/// Symbol is a special debug item. -pub const IMAGE_SYM_DEBUG: i32 = -2; -/// Values 0xFF00-0xFFFF are special -pub const IMAGE_SYM_SECTION_MAX: u16 = 0xFEFF; -pub const IMAGE_SYM_SECTION_MAX_EX: u32 = 0x7fff_ffff; - -// Values for `ImageSymbol::typ` (basic component). - -/// no type. -pub const IMAGE_SYM_TYPE_NULL: u16 = 0x0000; -pub const IMAGE_SYM_TYPE_VOID: u16 = 0x0001; -/// type character. -pub const IMAGE_SYM_TYPE_CHAR: u16 = 0x0002; -/// type short integer. -pub const IMAGE_SYM_TYPE_SHORT: u16 = 0x0003; -pub const IMAGE_SYM_TYPE_INT: u16 = 0x0004; -pub const IMAGE_SYM_TYPE_LONG: u16 = 0x0005; -pub const IMAGE_SYM_TYPE_FLOAT: u16 = 0x0006; -pub const IMAGE_SYM_TYPE_DOUBLE: u16 = 0x0007; -pub const IMAGE_SYM_TYPE_STRUCT: u16 = 0x0008; -pub const IMAGE_SYM_TYPE_UNION: u16 = 0x0009; -/// enumeration. -pub const IMAGE_SYM_TYPE_ENUM: u16 = 0x000A; -/// member of enumeration. -pub const IMAGE_SYM_TYPE_MOE: u16 = 0x000B; -pub const IMAGE_SYM_TYPE_BYTE: u16 = 0x000C; -pub const IMAGE_SYM_TYPE_WORD: u16 = 0x000D; -pub const IMAGE_SYM_TYPE_UINT: u16 = 0x000E; -pub const IMAGE_SYM_TYPE_DWORD: u16 = 0x000F; -pub const IMAGE_SYM_TYPE_PCODE: u16 = 0x8000; - -// Values for `ImageSymbol::typ` (derived component). - -/// no derived type. -pub const IMAGE_SYM_DTYPE_NULL: u16 = 0; -/// pointer. -pub const IMAGE_SYM_DTYPE_POINTER: u16 = 1; -/// function. -pub const IMAGE_SYM_DTYPE_FUNCTION: u16 = 2; -/// array. -pub const IMAGE_SYM_DTYPE_ARRAY: u16 = 3; - -// Values for `ImageSymbol::storage_class`. -pub const IMAGE_SYM_CLASS_END_OF_FUNCTION: u8 = 0xff; -pub const IMAGE_SYM_CLASS_NULL: u8 = 0x00; -pub const IMAGE_SYM_CLASS_AUTOMATIC: u8 = 0x01; -pub const IMAGE_SYM_CLASS_EXTERNAL: u8 = 0x02; -pub const IMAGE_SYM_CLASS_STATIC: u8 = 0x03; -pub const IMAGE_SYM_CLASS_REGISTER: u8 = 0x04; -pub const IMAGE_SYM_CLASS_EXTERNAL_DEF: u8 = 0x05; -pub const IMAGE_SYM_CLASS_LABEL: u8 = 0x06; -pub const IMAGE_SYM_CLASS_UNDEFINED_LABEL: u8 = 0x07; -pub const IMAGE_SYM_CLASS_MEMBER_OF_STRUCT: u8 = 0x08; -pub const IMAGE_SYM_CLASS_ARGUMENT: u8 = 0x09; -pub const IMAGE_SYM_CLASS_STRUCT_TAG: u8 = 0x0A; -pub const IMAGE_SYM_CLASS_MEMBER_OF_UNION: u8 = 0x0B; -pub const IMAGE_SYM_CLASS_UNION_TAG: u8 = 0x0C; -pub const IMAGE_SYM_CLASS_TYPE_DEFINITION: u8 = 0x0D; -pub const IMAGE_SYM_CLASS_UNDEFINED_STATIC: u8 = 0x0E; -pub const IMAGE_SYM_CLASS_ENUM_TAG: u8 = 0x0F; -pub const IMAGE_SYM_CLASS_MEMBER_OF_ENUM: u8 = 0x10; -pub const IMAGE_SYM_CLASS_REGISTER_PARAM: u8 = 0x11; -pub const IMAGE_SYM_CLASS_BIT_FIELD: u8 = 0x12; - -pub const IMAGE_SYM_CLASS_FAR_EXTERNAL: u8 = 0x44; - -pub const IMAGE_SYM_CLASS_BLOCK: u8 = 0x64; -pub const IMAGE_SYM_CLASS_FUNCTION: u8 = 0x65; -pub const IMAGE_SYM_CLASS_END_OF_STRUCT: u8 = 0x66; -pub const IMAGE_SYM_CLASS_FILE: u8 = 0x67; -// new -pub const IMAGE_SYM_CLASS_SECTION: u8 = 0x68; -pub const IMAGE_SYM_CLASS_WEAK_EXTERNAL: u8 = 0x69; - -pub const IMAGE_SYM_CLASS_CLR_TOKEN: u8 = 0x6B; - -// type packing constants - -pub const N_BTMASK: u16 = 0x000F; -pub const N_TMASK: u16 = 0x0030; -pub const N_TMASK1: u16 = 0x00C0; -pub const N_TMASK2: u16 = 0x00F0; -pub const N_BTSHFT: usize = 4; -pub const N_TSHIFT: usize = 2; - -pub const IMAGE_SYM_DTYPE_SHIFT: usize = N_BTSHFT; - -// -// Auxiliary entry format. -// - -// Used for both ImageSymbol and ImageSymbolEx (with padding). -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageAuxSymbolTokenDef { - /// IMAGE_AUX_SYMBOL_TYPE - pub aux_type: u8, - /// Must be 0 - pub reserved1: u8, - pub symbol_table_index: U32Bytes, - /// Must be 0 - pub reserved2: [u8; 12], -} - -pub const IMAGE_AUX_SYMBOL_TYPE_TOKEN_DEF: u16 = 1; - -/// Auxiliary symbol format 1: function definitions. -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageAuxSymbolFunction { - pub tag_index: U32Bytes, - pub total_size: U32Bytes, - pub pointer_to_linenumber: U32Bytes, - pub pointer_to_next_function: U32Bytes, - pub unused: [u8; 2], -} - -/// Auxiliary symbol format 2: .bf and .ef symbols. -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageAuxSymbolFunctionBeginEnd { - pub unused1: [u8; 4], - /// declaration line number - pub linenumber: U16Bytes, - pub unused2: [u8; 6], - pub pointer_to_next_function: U32Bytes, - pub unused3: [u8; 2], -} - -/// Auxiliary symbol format 3: weak externals. -/// -/// Used for both `ImageSymbol` and `ImageSymbolEx` (both with padding). -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageAuxSymbolWeak { - /// the weak extern default symbol index - pub weak_default_sym_index: U32Bytes, - pub weak_search_type: U32Bytes, -} - -/// Auxiliary symbol format 5: sections. -/// -/// Used for both `ImageSymbol` and `ImageSymbolEx` (with padding). -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageAuxSymbolSection { - /// section length - pub length: U32Bytes, - /// number of relocation entries - pub number_of_relocations: U16Bytes, - /// number of line numbers - pub number_of_linenumbers: U16Bytes, - /// checksum for communal - pub check_sum: U32Bytes, - /// section number to associate with - pub number: U16Bytes, - /// communal selection type - pub selection: u8, - pub reserved: u8, - /// high bits of the section number - pub high_number: U16Bytes, -} - -// Used for both ImageSymbol and ImageSymbolEx (both with padding). -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageAuxSymbolCrc { - pub crc: U32Bytes, -} - -// -// Communal selection types. -// - -pub const IMAGE_COMDAT_SELECT_NODUPLICATES: u8 = 1; -pub const IMAGE_COMDAT_SELECT_ANY: u8 = 2; -pub const IMAGE_COMDAT_SELECT_SAME_SIZE: u8 = 3; -pub const IMAGE_COMDAT_SELECT_EXACT_MATCH: u8 = 4; -pub const IMAGE_COMDAT_SELECT_ASSOCIATIVE: u8 = 5; -pub const IMAGE_COMDAT_SELECT_LARGEST: u8 = 6; -pub const IMAGE_COMDAT_SELECT_NEWEST: u8 = 7; - -pub const IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY: u16 = 1; -pub const IMAGE_WEAK_EXTERN_SEARCH_LIBRARY: u16 = 2; -pub const IMAGE_WEAK_EXTERN_SEARCH_ALIAS: u16 = 3; -pub const IMAGE_WEAK_EXTERN_ANTI_DEPENDENCY: u16 = 4; - -// -// Relocation format. -// - -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageRelocation { - /// Also `RelocCount` when IMAGE_SCN_LNK_NRELOC_OVFL is set - pub virtual_address: U32Bytes, - pub symbol_table_index: U32Bytes, - pub typ: U16Bytes, -} - -// -// I386 relocation types. -// -/// Reference is absolute, no relocation is necessary -pub const IMAGE_REL_I386_ABSOLUTE: u16 = 0x0000; -/// Direct 16-bit reference to the symbols virtual address -pub const IMAGE_REL_I386_DIR16: u16 = 0x0001; -/// PC-relative 16-bit reference to the symbols virtual address -pub const IMAGE_REL_I386_REL16: u16 = 0x0002; -/// Direct 32-bit reference to the symbols virtual address -pub const IMAGE_REL_I386_DIR32: u16 = 0x0006; -/// Direct 32-bit reference to the symbols virtual address, base not included -pub const IMAGE_REL_I386_DIR32NB: u16 = 0x0007; -/// Direct 16-bit reference to the segment-selector bits of a 32-bit virtual address -pub const IMAGE_REL_I386_SEG12: u16 = 0x0009; -pub const IMAGE_REL_I386_SECTION: u16 = 0x000A; -pub const IMAGE_REL_I386_SECREL: u16 = 0x000B; -/// clr token -pub const IMAGE_REL_I386_TOKEN: u16 = 0x000C; -/// 7 bit offset from base of section containing target -pub const IMAGE_REL_I386_SECREL7: u16 = 0x000D; -/// PC-relative 32-bit reference to the symbols virtual address -pub const IMAGE_REL_I386_REL32: u16 = 0x0014; - -// -// MIPS relocation types. -// -/// Reference is absolute, no relocation is necessary -pub const IMAGE_REL_MIPS_ABSOLUTE: u16 = 0x0000; -pub const IMAGE_REL_MIPS_REFHALF: u16 = 0x0001; -pub const IMAGE_REL_MIPS_REFWORD: u16 = 0x0002; -pub const IMAGE_REL_MIPS_JMPADDR: u16 = 0x0003; -pub const IMAGE_REL_MIPS_REFHI: u16 = 0x0004; -pub const IMAGE_REL_MIPS_REFLO: u16 = 0x0005; -pub const IMAGE_REL_MIPS_GPREL: u16 = 0x0006; -pub const IMAGE_REL_MIPS_LITERAL: u16 = 0x0007; -pub const IMAGE_REL_MIPS_SECTION: u16 = 0x000A; -pub const IMAGE_REL_MIPS_SECREL: u16 = 0x000B; -/// Low 16-bit section relative reference (used for >32k TLS) -pub const IMAGE_REL_MIPS_SECRELLO: u16 = 0x000C; -/// High 16-bit section relative reference (used for >32k TLS) -pub const IMAGE_REL_MIPS_SECRELHI: u16 = 0x000D; -/// clr token -pub const IMAGE_REL_MIPS_TOKEN: u16 = 0x000E; -pub const IMAGE_REL_MIPS_JMPADDR16: u16 = 0x0010; -pub const IMAGE_REL_MIPS_REFWORDNB: u16 = 0x0022; -pub const IMAGE_REL_MIPS_PAIR: u16 = 0x0025; - -// -// Alpha Relocation types. -// -pub const IMAGE_REL_ALPHA_ABSOLUTE: u16 = 0x0000; -pub const IMAGE_REL_ALPHA_REFLONG: u16 = 0x0001; -pub const IMAGE_REL_ALPHA_REFQUAD: u16 = 0x0002; -pub const IMAGE_REL_ALPHA_GPREL32: u16 = 0x0003; -pub const IMAGE_REL_ALPHA_LITERAL: u16 = 0x0004; -pub const IMAGE_REL_ALPHA_LITUSE: u16 = 0x0005; -pub const IMAGE_REL_ALPHA_GPDISP: u16 = 0x0006; -pub const IMAGE_REL_ALPHA_BRADDR: u16 = 0x0007; -pub const IMAGE_REL_ALPHA_HINT: u16 = 0x0008; -pub const IMAGE_REL_ALPHA_INLINE_REFLONG: u16 = 0x0009; -pub const IMAGE_REL_ALPHA_REFHI: u16 = 0x000A; -pub const IMAGE_REL_ALPHA_REFLO: u16 = 0x000B; -pub const IMAGE_REL_ALPHA_PAIR: u16 = 0x000C; -pub const IMAGE_REL_ALPHA_MATCH: u16 = 0x000D; -pub const IMAGE_REL_ALPHA_SECTION: u16 = 0x000E; -pub const IMAGE_REL_ALPHA_SECREL: u16 = 0x000F; -pub const IMAGE_REL_ALPHA_REFLONGNB: u16 = 0x0010; -/// Low 16-bit section relative reference -pub const IMAGE_REL_ALPHA_SECRELLO: u16 = 0x0011; -/// High 16-bit section relative reference -pub const IMAGE_REL_ALPHA_SECRELHI: u16 = 0x0012; -/// High 16 bits of 48 bit reference -pub const IMAGE_REL_ALPHA_REFQ3: u16 = 0x0013; -/// Middle 16 bits of 48 bit reference -pub const IMAGE_REL_ALPHA_REFQ2: u16 = 0x0014; -/// Low 16 bits of 48 bit reference -pub const IMAGE_REL_ALPHA_REFQ1: u16 = 0x0015; -/// Low 16-bit GP relative reference -pub const IMAGE_REL_ALPHA_GPRELLO: u16 = 0x0016; -/// High 16-bit GP relative reference -pub const IMAGE_REL_ALPHA_GPRELHI: u16 = 0x0017; - -// -// IBM PowerPC relocation types. -// -/// NOP -pub const IMAGE_REL_PPC_ABSOLUTE: u16 = 0x0000; -/// 64-bit address -pub const IMAGE_REL_PPC_ADDR64: u16 = 0x0001; -/// 32-bit address -pub const IMAGE_REL_PPC_ADDR32: u16 = 0x0002; -/// 26-bit address, shifted left 2 (branch absolute) -pub const IMAGE_REL_PPC_ADDR24: u16 = 0x0003; -/// 16-bit address -pub const IMAGE_REL_PPC_ADDR16: u16 = 0x0004; -/// 16-bit address, shifted left 2 (load doubleword) -pub const IMAGE_REL_PPC_ADDR14: u16 = 0x0005; -/// 26-bit PC-relative offset, shifted left 2 (branch relative) -pub const IMAGE_REL_PPC_REL24: u16 = 0x0006; -/// 16-bit PC-relative offset, shifted left 2 (br cond relative) -pub const IMAGE_REL_PPC_REL14: u16 = 0x0007; -/// 16-bit offset from TOC base -pub const IMAGE_REL_PPC_TOCREL16: u16 = 0x0008; -/// 16-bit offset from TOC base, shifted left 2 (load doubleword) -pub const IMAGE_REL_PPC_TOCREL14: u16 = 0x0009; - -/// 32-bit addr w/o image base -pub const IMAGE_REL_PPC_ADDR32NB: u16 = 0x000A; -/// va of containing section (as in an image sectionhdr) -pub const IMAGE_REL_PPC_SECREL: u16 = 0x000B; -/// sectionheader number -pub const IMAGE_REL_PPC_SECTION: u16 = 0x000C; -/// substitute TOC restore instruction iff symbol is glue code -pub const IMAGE_REL_PPC_IFGLUE: u16 = 0x000D; -/// symbol is glue code; virtual address is TOC restore instruction -pub const IMAGE_REL_PPC_IMGLUE: u16 = 0x000E; -/// va of containing section (limited to 16 bits) -pub const IMAGE_REL_PPC_SECREL16: u16 = 0x000F; -pub const IMAGE_REL_PPC_REFHI: u16 = 0x0010; -pub const IMAGE_REL_PPC_REFLO: u16 = 0x0011; -pub const IMAGE_REL_PPC_PAIR: u16 = 0x0012; -/// Low 16-bit section relative reference (used for >32k TLS) -pub const IMAGE_REL_PPC_SECRELLO: u16 = 0x0013; -/// High 16-bit section relative reference (used for >32k TLS) -pub const IMAGE_REL_PPC_SECRELHI: u16 = 0x0014; -pub const IMAGE_REL_PPC_GPREL: u16 = 0x0015; -/// clr token -pub const IMAGE_REL_PPC_TOKEN: u16 = 0x0016; - -/// mask to isolate above values in IMAGE_RELOCATION.Type -pub const IMAGE_REL_PPC_TYPEMASK: u16 = 0x00FF; - -// Flag bits in `ImageRelocation::typ`. - -/// subtract reloc value rather than adding it -pub const IMAGE_REL_PPC_NEG: u16 = 0x0100; -/// fix branch prediction bit to predict branch taken -pub const IMAGE_REL_PPC_BRTAKEN: u16 = 0x0200; -/// fix branch prediction bit to predict branch not taken -pub const IMAGE_REL_PPC_BRNTAKEN: u16 = 0x0400; -/// toc slot defined in file (or, data in toc) -pub const IMAGE_REL_PPC_TOCDEFN: u16 = 0x0800; - -// -// Hitachi SH3 relocation types. -// -/// No relocation -pub const IMAGE_REL_SH3_ABSOLUTE: u16 = 0x0000; -/// 16 bit direct -pub const IMAGE_REL_SH3_DIRECT16: u16 = 0x0001; -/// 32 bit direct -pub const IMAGE_REL_SH3_DIRECT32: u16 = 0x0002; -/// 8 bit direct, -128..255 -pub const IMAGE_REL_SH3_DIRECT8: u16 = 0x0003; -/// 8 bit direct .W (0 ext.) -pub const IMAGE_REL_SH3_DIRECT8_WORD: u16 = 0x0004; -/// 8 bit direct .L (0 ext.) -pub const IMAGE_REL_SH3_DIRECT8_LONG: u16 = 0x0005; -/// 4 bit direct (0 ext.) -pub const IMAGE_REL_SH3_DIRECT4: u16 = 0x0006; -/// 4 bit direct .W (0 ext.) -pub const IMAGE_REL_SH3_DIRECT4_WORD: u16 = 0x0007; -/// 4 bit direct .L (0 ext.) -pub const IMAGE_REL_SH3_DIRECT4_LONG: u16 = 0x0008; -/// 8 bit PC relative .W -pub const IMAGE_REL_SH3_PCREL8_WORD: u16 = 0x0009; -/// 8 bit PC relative .L -pub const IMAGE_REL_SH3_PCREL8_LONG: u16 = 0x000A; -/// 12 LSB PC relative .W -pub const IMAGE_REL_SH3_PCREL12_WORD: u16 = 0x000B; -/// Start of EXE section -pub const IMAGE_REL_SH3_STARTOF_SECTION: u16 = 0x000C; -/// Size of EXE section -pub const IMAGE_REL_SH3_SIZEOF_SECTION: u16 = 0x000D; -/// Section table index -pub const IMAGE_REL_SH3_SECTION: u16 = 0x000E; -/// Offset within section -pub const IMAGE_REL_SH3_SECREL: u16 = 0x000F; -/// 32 bit direct not based -pub const IMAGE_REL_SH3_DIRECT32_NB: u16 = 0x0010; -/// GP-relative addressing -pub const IMAGE_REL_SH3_GPREL4_LONG: u16 = 0x0011; -/// clr token -pub const IMAGE_REL_SH3_TOKEN: u16 = 0x0012; -/// Offset from current instruction in longwords -/// if not NOMODE, insert the inverse of the low bit at bit 32 to select PTA/PTB -pub const IMAGE_REL_SHM_PCRELPT: u16 = 0x0013; -/// Low bits of 32-bit address -pub const IMAGE_REL_SHM_REFLO: u16 = 0x0014; -/// High bits of 32-bit address -pub const IMAGE_REL_SHM_REFHALF: u16 = 0x0015; -/// Low bits of relative reference -pub const IMAGE_REL_SHM_RELLO: u16 = 0x0016; -/// High bits of relative reference -pub const IMAGE_REL_SHM_RELHALF: u16 = 0x0017; -/// offset operand for relocation -pub const IMAGE_REL_SHM_PAIR: u16 = 0x0018; - -/// relocation ignores section mode -pub const IMAGE_REL_SH_NOMODE: u16 = 0x8000; - -/// No relocation required -pub const IMAGE_REL_ARM_ABSOLUTE: u16 = 0x0000; -/// 32 bit address -pub const IMAGE_REL_ARM_ADDR32: u16 = 0x0001; -/// 32 bit address w/o image base -pub const IMAGE_REL_ARM_ADDR32NB: u16 = 0x0002; -/// 24 bit offset << 2 & sign ext. -pub const IMAGE_REL_ARM_BRANCH24: u16 = 0x0003; -/// Thumb: 2 11 bit offsets -pub const IMAGE_REL_ARM_BRANCH11: u16 = 0x0004; -/// clr token -pub const IMAGE_REL_ARM_TOKEN: u16 = 0x0005; -/// GP-relative addressing (ARM) -pub const IMAGE_REL_ARM_GPREL12: u16 = 0x0006; -/// GP-relative addressing (Thumb) -pub const IMAGE_REL_ARM_GPREL7: u16 = 0x0007; -pub const IMAGE_REL_ARM_BLX24: u16 = 0x0008; -pub const IMAGE_REL_ARM_BLX11: u16 = 0x0009; -/// 32-bit relative address from byte following reloc -pub const IMAGE_REL_ARM_REL32: u16 = 0x000A; -/// Section table index -pub const IMAGE_REL_ARM_SECTION: u16 = 0x000E; -/// Offset within section -pub const IMAGE_REL_ARM_SECREL: u16 = 0x000F; -/// ARM: MOVW/MOVT -pub const IMAGE_REL_ARM_MOV32A: u16 = 0x0010; -/// ARM: MOVW/MOVT (deprecated) -pub const IMAGE_REL_ARM_MOV32: u16 = 0x0010; -/// Thumb: MOVW/MOVT -pub const IMAGE_REL_ARM_MOV32T: u16 = 0x0011; -/// Thumb: MOVW/MOVT (deprecated) -pub const IMAGE_REL_THUMB_MOV32: u16 = 0x0011; -/// Thumb: 32-bit conditional B -pub const IMAGE_REL_ARM_BRANCH20T: u16 = 0x0012; -/// Thumb: 32-bit conditional B (deprecated) -pub const IMAGE_REL_THUMB_BRANCH20: u16 = 0x0012; -/// Thumb: 32-bit B or BL -pub const IMAGE_REL_ARM_BRANCH24T: u16 = 0x0014; -/// Thumb: 32-bit B or BL (deprecated) -pub const IMAGE_REL_THUMB_BRANCH24: u16 = 0x0014; -/// Thumb: BLX immediate -pub const IMAGE_REL_ARM_BLX23T: u16 = 0x0015; -/// Thumb: BLX immediate (deprecated) -pub const IMAGE_REL_THUMB_BLX23: u16 = 0x0015; - -pub const IMAGE_REL_AM_ABSOLUTE: u16 = 0x0000; -pub const IMAGE_REL_AM_ADDR32: u16 = 0x0001; -pub const IMAGE_REL_AM_ADDR32NB: u16 = 0x0002; -pub const IMAGE_REL_AM_CALL32: u16 = 0x0003; -pub const IMAGE_REL_AM_FUNCINFO: u16 = 0x0004; -pub const IMAGE_REL_AM_REL32_1: u16 = 0x0005; -pub const IMAGE_REL_AM_REL32_2: u16 = 0x0006; -pub const IMAGE_REL_AM_SECREL: u16 = 0x0007; -pub const IMAGE_REL_AM_SECTION: u16 = 0x0008; -pub const IMAGE_REL_AM_TOKEN: u16 = 0x0009; - -// -// ARM64 relocations types. -// - -/// No relocation required -pub const IMAGE_REL_ARM64_ABSOLUTE: u16 = 0x0000; -/// 32 bit address. Review! do we need it? -pub const IMAGE_REL_ARM64_ADDR32: u16 = 0x0001; -/// 32 bit address w/o image base (RVA: for Data/PData/XData) -pub const IMAGE_REL_ARM64_ADDR32NB: u16 = 0x0002; -/// 26 bit offset << 2 & sign ext. for B & BL -pub const IMAGE_REL_ARM64_BRANCH26: u16 = 0x0003; -/// ADRP -pub const IMAGE_REL_ARM64_PAGEBASE_REL21: u16 = 0x0004; -/// ADR -pub const IMAGE_REL_ARM64_REL21: u16 = 0x0005; -/// ADD/ADDS (immediate) with zero shift, for page offset -pub const IMAGE_REL_ARM64_PAGEOFFSET_12A: u16 = 0x0006; -/// LDR (indexed, unsigned immediate), for page offset -pub const IMAGE_REL_ARM64_PAGEOFFSET_12L: u16 = 0x0007; -/// Offset within section -pub const IMAGE_REL_ARM64_SECREL: u16 = 0x0008; -/// ADD/ADDS (immediate) with zero shift, for bit 0:11 of section offset -pub const IMAGE_REL_ARM64_SECREL_LOW12A: u16 = 0x0009; -/// ADD/ADDS (immediate) with zero shift, for bit 12:23 of section offset -pub const IMAGE_REL_ARM64_SECREL_HIGH12A: u16 = 0x000A; -/// LDR (indexed, unsigned immediate), for bit 0:11 of section offset -pub const IMAGE_REL_ARM64_SECREL_LOW12L: u16 = 0x000B; -pub const IMAGE_REL_ARM64_TOKEN: u16 = 0x000C; -/// Section table index -pub const IMAGE_REL_ARM64_SECTION: u16 = 0x000D; -/// 64 bit address -pub const IMAGE_REL_ARM64_ADDR64: u16 = 0x000E; -/// 19 bit offset << 2 & sign ext. for conditional B -pub const IMAGE_REL_ARM64_BRANCH19: u16 = 0x000F; -/// TBZ/TBNZ -pub const IMAGE_REL_ARM64_BRANCH14: u16 = 0x0010; -/// 32-bit relative address from byte following reloc -pub const IMAGE_REL_ARM64_REL32: u16 = 0x0011; - -// -// x64 relocations -// -/// Reference is absolute, no relocation is necessary -pub const IMAGE_REL_AMD64_ABSOLUTE: u16 = 0x0000; -/// 64-bit address (VA). -pub const IMAGE_REL_AMD64_ADDR64: u16 = 0x0001; -/// 32-bit address (VA). -pub const IMAGE_REL_AMD64_ADDR32: u16 = 0x0002; -/// 32-bit address w/o image base (RVA). -pub const IMAGE_REL_AMD64_ADDR32NB: u16 = 0x0003; -/// 32-bit relative address from byte following reloc -pub const IMAGE_REL_AMD64_REL32: u16 = 0x0004; -/// 32-bit relative address from byte distance 1 from reloc -pub const IMAGE_REL_AMD64_REL32_1: u16 = 0x0005; -/// 32-bit relative address from byte distance 2 from reloc -pub const IMAGE_REL_AMD64_REL32_2: u16 = 0x0006; -/// 32-bit relative address from byte distance 3 from reloc -pub const IMAGE_REL_AMD64_REL32_3: u16 = 0x0007; -/// 32-bit relative address from byte distance 4 from reloc -pub const IMAGE_REL_AMD64_REL32_4: u16 = 0x0008; -/// 32-bit relative address from byte distance 5 from reloc -pub const IMAGE_REL_AMD64_REL32_5: u16 = 0x0009; -/// Section index -pub const IMAGE_REL_AMD64_SECTION: u16 = 0x000A; -/// 32 bit offset from base of section containing target -pub const IMAGE_REL_AMD64_SECREL: u16 = 0x000B; -/// 7 bit unsigned offset from base of section containing target -pub const IMAGE_REL_AMD64_SECREL7: u16 = 0x000C; -/// 32 bit metadata token -pub const IMAGE_REL_AMD64_TOKEN: u16 = 0x000D; -/// 32 bit signed span-dependent value emitted into object -pub const IMAGE_REL_AMD64_SREL32: u16 = 0x000E; -pub const IMAGE_REL_AMD64_PAIR: u16 = 0x000F; -/// 32 bit signed span-dependent value applied at link time -pub const IMAGE_REL_AMD64_SSPAN32: u16 = 0x0010; -pub const IMAGE_REL_AMD64_EHANDLER: u16 = 0x0011; -/// Indirect branch to an import -pub const IMAGE_REL_AMD64_IMPORT_BR: u16 = 0x0012; -/// Indirect call to an import -pub const IMAGE_REL_AMD64_IMPORT_CALL: u16 = 0x0013; -/// Indirect branch to a CFG check -pub const IMAGE_REL_AMD64_CFG_BR: u16 = 0x0014; -/// Indirect branch to a CFG check, with REX.W prefix -pub const IMAGE_REL_AMD64_CFG_BR_REX: u16 = 0x0015; -/// Indirect call to a CFG check -pub const IMAGE_REL_AMD64_CFG_CALL: u16 = 0x0016; -/// Indirect branch to a target in RAX (no CFG) -pub const IMAGE_REL_AMD64_INDIR_BR: u16 = 0x0017; -/// Indirect branch to a target in RAX, with REX.W prefix (no CFG) -pub const IMAGE_REL_AMD64_INDIR_BR_REX: u16 = 0x0018; -/// Indirect call to a target in RAX (no CFG) -pub const IMAGE_REL_AMD64_INDIR_CALL: u16 = 0x0019; -/// Indirect branch for a switch table using Reg 0 (RAX) -pub const IMAGE_REL_AMD64_INDIR_BR_SWITCHTABLE_FIRST: u16 = 0x0020; -/// Indirect branch for a switch table using Reg 15 (R15) -pub const IMAGE_REL_AMD64_INDIR_BR_SWITCHTABLE_LAST: u16 = 0x002F; - -// -// IA64 relocation types. -// -pub const IMAGE_REL_IA64_ABSOLUTE: u16 = 0x0000; -pub const IMAGE_REL_IA64_IMM14: u16 = 0x0001; -pub const IMAGE_REL_IA64_IMM22: u16 = 0x0002; -pub const IMAGE_REL_IA64_IMM64: u16 = 0x0003; -pub const IMAGE_REL_IA64_DIR32: u16 = 0x0004; -pub const IMAGE_REL_IA64_DIR64: u16 = 0x0005; -pub const IMAGE_REL_IA64_PCREL21B: u16 = 0x0006; -pub const IMAGE_REL_IA64_PCREL21M: u16 = 0x0007; -pub const IMAGE_REL_IA64_PCREL21F: u16 = 0x0008; -pub const IMAGE_REL_IA64_GPREL22: u16 = 0x0009; -pub const IMAGE_REL_IA64_LTOFF22: u16 = 0x000A; -pub const IMAGE_REL_IA64_SECTION: u16 = 0x000B; -pub const IMAGE_REL_IA64_SECREL22: u16 = 0x000C; -pub const IMAGE_REL_IA64_SECREL64I: u16 = 0x000D; -pub const IMAGE_REL_IA64_SECREL32: u16 = 0x000E; -// -pub const IMAGE_REL_IA64_DIR32NB: u16 = 0x0010; -pub const IMAGE_REL_IA64_SREL14: u16 = 0x0011; -pub const IMAGE_REL_IA64_SREL22: u16 = 0x0012; -pub const IMAGE_REL_IA64_SREL32: u16 = 0x0013; -pub const IMAGE_REL_IA64_UREL32: u16 = 0x0014; -/// This is always a BRL and never converted -pub const IMAGE_REL_IA64_PCREL60X: u16 = 0x0015; -/// If possible, convert to MBB bundle with NOP.B in slot 1 -pub const IMAGE_REL_IA64_PCREL60B: u16 = 0x0016; -/// If possible, convert to MFB bundle with NOP.F in slot 1 -pub const IMAGE_REL_IA64_PCREL60F: u16 = 0x0017; -/// If possible, convert to MIB bundle with NOP.I in slot 1 -pub const IMAGE_REL_IA64_PCREL60I: u16 = 0x0018; -/// If possible, convert to MMB bundle with NOP.M in slot 1 -pub const IMAGE_REL_IA64_PCREL60M: u16 = 0x0019; -pub const IMAGE_REL_IA64_IMMGPREL64: u16 = 0x001A; -/// clr token -pub const IMAGE_REL_IA64_TOKEN: u16 = 0x001B; -pub const IMAGE_REL_IA64_GPREL32: u16 = 0x001C; -pub const IMAGE_REL_IA64_ADDEND: u16 = 0x001F; - -// -// CEF relocation types. -// -/// Reference is absolute, no relocation is necessary -pub const IMAGE_REL_CEF_ABSOLUTE: u16 = 0x0000; -/// 32-bit address (VA). -pub const IMAGE_REL_CEF_ADDR32: u16 = 0x0001; -/// 64-bit address (VA). -pub const IMAGE_REL_CEF_ADDR64: u16 = 0x0002; -/// 32-bit address w/o image base (RVA). -pub const IMAGE_REL_CEF_ADDR32NB: u16 = 0x0003; -/// Section index -pub const IMAGE_REL_CEF_SECTION: u16 = 0x0004; -/// 32 bit offset from base of section containing target -pub const IMAGE_REL_CEF_SECREL: u16 = 0x0005; -/// 32 bit metadata token -pub const IMAGE_REL_CEF_TOKEN: u16 = 0x0006; - -// -// clr relocation types. -// -/// Reference is absolute, no relocation is necessary -pub const IMAGE_REL_CEE_ABSOLUTE: u16 = 0x0000; -/// 32-bit address (VA). -pub const IMAGE_REL_CEE_ADDR32: u16 = 0x0001; -/// 64-bit address (VA). -pub const IMAGE_REL_CEE_ADDR64: u16 = 0x0002; -/// 32-bit address w/o image base (RVA). -pub const IMAGE_REL_CEE_ADDR32NB: u16 = 0x0003; -/// Section index -pub const IMAGE_REL_CEE_SECTION: u16 = 0x0004; -/// 32 bit offset from base of section containing target -pub const IMAGE_REL_CEE_SECREL: u16 = 0x0005; -/// 32 bit metadata token -pub const IMAGE_REL_CEE_TOKEN: u16 = 0x0006; - -/// No relocation required -pub const IMAGE_REL_M32R_ABSOLUTE: u16 = 0x0000; -/// 32 bit address -pub const IMAGE_REL_M32R_ADDR32: u16 = 0x0001; -/// 32 bit address w/o image base -pub const IMAGE_REL_M32R_ADDR32NB: u16 = 0x0002; -/// 24 bit address -pub const IMAGE_REL_M32R_ADDR24: u16 = 0x0003; -/// GP relative addressing -pub const IMAGE_REL_M32R_GPREL16: u16 = 0x0004; -/// 24 bit offset << 2 & sign ext. -pub const IMAGE_REL_M32R_PCREL24: u16 = 0x0005; -/// 16 bit offset << 2 & sign ext. -pub const IMAGE_REL_M32R_PCREL16: u16 = 0x0006; -/// 8 bit offset << 2 & sign ext. -pub const IMAGE_REL_M32R_PCREL8: u16 = 0x0007; -/// 16 MSBs -pub const IMAGE_REL_M32R_REFHALF: u16 = 0x0008; -/// 16 MSBs; adj for LSB sign ext. -pub const IMAGE_REL_M32R_REFHI: u16 = 0x0009; -/// 16 LSBs -pub const IMAGE_REL_M32R_REFLO: u16 = 0x000A; -/// Link HI and LO -pub const IMAGE_REL_M32R_PAIR: u16 = 0x000B; -/// Section table index -pub const IMAGE_REL_M32R_SECTION: u16 = 0x000C; -/// 32 bit section relative reference -pub const IMAGE_REL_M32R_SECREL32: u16 = 0x000D; -/// clr token -pub const IMAGE_REL_M32R_TOKEN: u16 = 0x000E; - -/// No relocation required -pub const IMAGE_REL_EBC_ABSOLUTE: u16 = 0x0000; -/// 32 bit address w/o image base -pub const IMAGE_REL_EBC_ADDR32NB: u16 = 0x0001; -/// 32-bit relative address from byte following reloc -pub const IMAGE_REL_EBC_REL32: u16 = 0x0002; -/// Section table index -pub const IMAGE_REL_EBC_SECTION: u16 = 0x0003; -/// Offset within section -pub const IMAGE_REL_EBC_SECREL: u16 = 0x0004; - -/* -// TODO? -#define EXT_IMM64(Value, Address, Size, InstPos, ValPos) /* Intel-IA64-Filler */ \ - Value |= (((ULONGLONG)((*(Address) >> InstPos) & (((ULONGLONG)1 << Size) - 1))) << ValPos) // Intel-IA64-Filler - -#define INS_IMM64(Value, Address, Size, InstPos, ValPos) /* Intel-IA64-Filler */\ - *(PDWORD)Address = (*(PDWORD)Address & ~(((1 << Size) - 1) << InstPos)) | /* Intel-IA64-Filler */\ - ((DWORD)((((ULONGLONG)Value >> ValPos) & (((ULONGLONG)1 << Size) - 1))) << InstPos) // Intel-IA64-Filler -*/ - -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM7B_INST_WORD_X: u16 = 3; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM7B_SIZE_X: u16 = 7; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM7B_INST_WORD_POS_X: u16 = 4; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM7B_VAL_POS_X: u16 = 0; - -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM9D_INST_WORD_X: u16 = 3; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM9D_SIZE_X: u16 = 9; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM9D_INST_WORD_POS_X: u16 = 18; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM9D_VAL_POS_X: u16 = 7; - -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM5C_INST_WORD_X: u16 = 3; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM5C_SIZE_X: u16 = 5; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM5C_INST_WORD_POS_X: u16 = 13; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM5C_VAL_POS_X: u16 = 16; - -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IC_INST_WORD_X: u16 = 3; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IC_SIZE_X: u16 = 1; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IC_INST_WORD_POS_X: u16 = 12; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IC_VAL_POS_X: u16 = 21; - -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41A_INST_WORD_X: u16 = 1; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41A_SIZE_X: u16 = 10; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41A_INST_WORD_POS_X: u16 = 14; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41A_VAL_POS_X: u16 = 22; - -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41B_INST_WORD_X: u16 = 1; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41B_SIZE_X: u16 = 8; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41B_INST_WORD_POS_X: u16 = 24; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41B_VAL_POS_X: u16 = 32; - -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41C_INST_WORD_X: u16 = 2; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41C_SIZE_X: u16 = 23; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41C_INST_WORD_POS_X: u16 = 0; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_IMM41C_VAL_POS_X: u16 = 40; - -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_SIGN_INST_WORD_X: u16 = 3; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_SIGN_SIZE_X: u16 = 1; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_SIGN_INST_WORD_POS_X: u16 = 27; -/// Intel-IA64-Filler -pub const EMARCH_ENC_I17_SIGN_VAL_POS_X: u16 = 63; - -/// Intel-IA64-Filler -pub const X3_OPCODE_INST_WORD_X: u16 = 3; -/// Intel-IA64-Filler -pub const X3_OPCODE_SIZE_X: u16 = 4; -/// Intel-IA64-Filler -pub const X3_OPCODE_INST_WORD_POS_X: u16 = 28; -/// Intel-IA64-Filler -pub const X3_OPCODE_SIGN_VAL_POS_X: u16 = 0; - -/// Intel-IA64-Filler -pub const X3_I_INST_WORD_X: u16 = 3; -/// Intel-IA64-Filler -pub const X3_I_SIZE_X: u16 = 1; -/// Intel-IA64-Filler -pub const X3_I_INST_WORD_POS_X: u16 = 27; -/// Intel-IA64-Filler -pub const X3_I_SIGN_VAL_POS_X: u16 = 59; - -/// Intel-IA64-Filler -pub const X3_D_WH_INST_WORD_X: u16 = 3; -/// Intel-IA64-Filler -pub const X3_D_WH_SIZE_X: u16 = 3; -/// Intel-IA64-Filler -pub const X3_D_WH_INST_WORD_POS_X: u16 = 24; -/// Intel-IA64-Filler -pub const X3_D_WH_SIGN_VAL_POS_X: u16 = 0; - -/// Intel-IA64-Filler -pub const X3_IMM20_INST_WORD_X: u16 = 3; -/// Intel-IA64-Filler -pub const X3_IMM20_SIZE_X: u16 = 20; -/// Intel-IA64-Filler -pub const X3_IMM20_INST_WORD_POS_X: u16 = 4; -/// Intel-IA64-Filler -pub const X3_IMM20_SIGN_VAL_POS_X: u16 = 0; - -/// Intel-IA64-Filler -pub const X3_IMM39_1_INST_WORD_X: u16 = 2; -/// Intel-IA64-Filler -pub const X3_IMM39_1_SIZE_X: u16 = 23; -/// Intel-IA64-Filler -pub const X3_IMM39_1_INST_WORD_POS_X: u16 = 0; -/// Intel-IA64-Filler -pub const X3_IMM39_1_SIGN_VAL_POS_X: u16 = 36; - -/// Intel-IA64-Filler -pub const X3_IMM39_2_INST_WORD_X: u16 = 1; -/// Intel-IA64-Filler -pub const X3_IMM39_2_SIZE_X: u16 = 16; -/// Intel-IA64-Filler -pub const X3_IMM39_2_INST_WORD_POS_X: u16 = 16; -/// Intel-IA64-Filler -pub const X3_IMM39_2_SIGN_VAL_POS_X: u16 = 20; - -/// Intel-IA64-Filler -pub const X3_P_INST_WORD_X: u16 = 3; -/// Intel-IA64-Filler -pub const X3_P_SIZE_X: u16 = 4; -/// Intel-IA64-Filler -pub const X3_P_INST_WORD_POS_X: u16 = 0; -/// Intel-IA64-Filler -pub const X3_P_SIGN_VAL_POS_X: u16 = 0; - -/// Intel-IA64-Filler -pub const X3_TMPLT_INST_WORD_X: u16 = 0; -/// Intel-IA64-Filler -pub const X3_TMPLT_SIZE_X: u16 = 4; -/// Intel-IA64-Filler -pub const X3_TMPLT_INST_WORD_POS_X: u16 = 0; -/// Intel-IA64-Filler -pub const X3_TMPLT_SIGN_VAL_POS_X: u16 = 0; - -/// Intel-IA64-Filler -pub const X3_BTYPE_QP_INST_WORD_X: u16 = 2; -/// Intel-IA64-Filler -pub const X3_BTYPE_QP_SIZE_X: u16 = 9; -/// Intel-IA64-Filler -pub const X3_BTYPE_QP_INST_WORD_POS_X: u16 = 23; -/// Intel-IA64-Filler -pub const X3_BTYPE_QP_INST_VAL_POS_X: u16 = 0; - -/// Intel-IA64-Filler -pub const X3_EMPTY_INST_WORD_X: u16 = 1; -/// Intel-IA64-Filler -pub const X3_EMPTY_SIZE_X: u16 = 2; -/// Intel-IA64-Filler -pub const X3_EMPTY_INST_WORD_POS_X: u16 = 14; -/// Intel-IA64-Filler -pub const X3_EMPTY_INST_VAL_POS_X: u16 = 0; - -// -// Line number format. -// - -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageLinenumber { - /// Symbol table index of function name if Linenumber is 0. - /// Otherwise virtual address of line number. - pub symbol_table_index_or_virtual_address: U32Bytes, - /// Line number. - pub linenumber: U16Bytes, -} - -// -// Based relocation format. -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageBaseRelocation { - pub virtual_address: U32, - pub size_of_block: U32, - // pub type_offset[1]: U16, -} - -// -// Based relocation types. -// - -pub const IMAGE_REL_BASED_ABSOLUTE: u16 = 0; -pub const IMAGE_REL_BASED_HIGH: u16 = 1; -pub const IMAGE_REL_BASED_LOW: u16 = 2; -pub const IMAGE_REL_BASED_HIGHLOW: u16 = 3; -pub const IMAGE_REL_BASED_HIGHADJ: u16 = 4; -pub const IMAGE_REL_BASED_MACHINE_SPECIFIC_5: u16 = 5; -pub const IMAGE_REL_BASED_RESERVED: u16 = 6; -pub const IMAGE_REL_BASED_MACHINE_SPECIFIC_7: u16 = 7; -pub const IMAGE_REL_BASED_MACHINE_SPECIFIC_8: u16 = 8; -pub const IMAGE_REL_BASED_MACHINE_SPECIFIC_9: u16 = 9; -pub const IMAGE_REL_BASED_DIR64: u16 = 10; - -// -// Platform-specific based relocation types. -// - -pub const IMAGE_REL_BASED_IA64_IMM64: u16 = 9; - -pub const IMAGE_REL_BASED_MIPS_JMPADDR: u16 = 5; -pub const IMAGE_REL_BASED_MIPS_JMPADDR16: u16 = 9; - -pub const IMAGE_REL_BASED_ARM_MOV32: u16 = 5; -pub const IMAGE_REL_BASED_THUMB_MOV32: u16 = 7; - -pub const IMAGE_REL_BASED_RISCV_HIGH20: u16 = 5; -pub const IMAGE_REL_BASED_RISCV_LOW12I: u16 = 7; -pub const IMAGE_REL_BASED_RISCV_LOW12S: u16 = 8; - -// -// Archive format. -// - -pub const IMAGE_ARCHIVE_START_SIZE: usize = 8; -pub const IMAGE_ARCHIVE_START: &[u8; 8] = b"!\n"; -pub const IMAGE_ARCHIVE_END: &[u8] = b"`\n"; -pub const IMAGE_ARCHIVE_PAD: &[u8] = b"\n"; -pub const IMAGE_ARCHIVE_LINKER_MEMBER: &[u8; 16] = b"/ "; -pub const IMAGE_ARCHIVE_LONGNAMES_MEMBER: &[u8; 16] = b"// "; -pub const IMAGE_ARCHIVE_HYBRIDMAP_MEMBER: &[u8; 16] = b"// "; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageArchiveMemberHeader { - /// File member name - `/' terminated. - pub name: [u8; 16], - /// File member date - decimal. - pub date: [u8; 12], - /// File member user id - decimal. - pub user_id: [u8; 6], - /// File member group id - decimal. - pub group_id: [u8; 6], - /// File member mode - octal. - pub mode: [u8; 8], - /// File member size - decimal. - pub size: [u8; 10], - /// String to end header. - pub end_header: [u8; 2], -} - -pub const IMAGE_SIZEOF_ARCHIVE_MEMBER_HDR: u16 = 60; - -// -// DLL support. -// - -// -// Export Format -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageExportDirectory { - pub characteristics: U32, - pub time_date_stamp: U32, - pub major_version: U16, - pub minor_version: U16, - pub name: U32, - pub base: U32, - pub number_of_functions: U32, - pub number_of_names: U32, - /// RVA from base of image - pub address_of_functions: U32, - /// RVA from base of image - pub address_of_names: U32, - /// RVA from base of image - pub address_of_name_ordinals: U32, -} - -// -// Import Format -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageImportByName { - pub hint: U16, - //pub name: [i8; 1], -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageThunkData64(pub U64); -/* - union { -/// PBYTE - pub forwarder_string: U64, -/// PDWORD - pub function: U64, - pub ordinal: U64, -/// PIMAGE_IMPORT_BY_NAME - pub address_of_data: U64, - } u1; -*/ - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageThunkData32(pub U32); -/* - union { -/// PBYTE - pub forwarder_string: U32, -/// PDWORD - pub function: U32, - pub ordinal: U32, -/// PIMAGE_IMPORT_BY_NAME - pub address_of_data: U32, - } u1; -} -*/ - -pub const IMAGE_ORDINAL_FLAG64: u64 = 0x8000000000000000; -pub const IMAGE_ORDINAL_FLAG32: u32 = 0x80000000; - -/* -#define IMAGE_ORDINAL64(Ordinal) (Ordinal & 0xffff) -#define IMAGE_ORDINAL32(Ordinal) (Ordinal & 0xffff) -#define IMAGE_SNAP_BY_ORDINAL64(Ordinal) ((Ordinal & IMAGE_ORDINAL_FLAG64) != 0) -#define IMAGE_SNAP_BY_ORDINAL32(Ordinal) ((Ordinal & IMAGE_ORDINAL_FLAG32) != 0) - -*/ - -// -// Thread Local Storage -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageTlsDirectory64 { - pub start_address_of_raw_data: U64, - pub end_address_of_raw_data: U64, - /// PDWORD - pub address_of_index: U64, - /// PIMAGE_TLS_CALLBACK *; - pub address_of_call_backs: U64, - pub size_of_zero_fill: U32, - pub characteristics: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageTlsDirectory32 { - pub start_address_of_raw_data: U32, - pub end_address_of_raw_data: U32, - /// PDWORD - pub address_of_index: U32, - /// PIMAGE_TLS_CALLBACK * - pub address_of_call_backs: U32, - pub size_of_zero_fill: U32, - pub characteristics: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageImportDescriptor { - /// RVA to original unbound IAT (`ImageThunkData32`/`ImageThunkData64`) - /// 0 for terminating null import descriptor - pub original_first_thunk: U32Bytes, - /// 0 if not bound, - /// -1 if bound, and real date\time stamp - /// in IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT (new BIND) - /// O.W. date/time stamp of DLL bound to (Old BIND) - pub time_date_stamp: U32Bytes, - /// -1 if no forwarders - pub forwarder_chain: U32Bytes, - pub name: U32Bytes, - /// RVA to IAT (if bound this IAT has actual addresses) - pub first_thunk: U32Bytes, -} - -impl ImageImportDescriptor { - /// Tell whether this import descriptor is the null descriptor - /// (used to mark the end of the iterator array in a PE) - pub fn is_null(&self) -> bool { - self.original_first_thunk.get(LE) == 0 - && self.time_date_stamp.get(LE) == 0 - && self.forwarder_chain.get(LE) == 0 - && self.name.get(LE) == 0 - && self.first_thunk.get(LE) == 0 - } -} - -// -// New format import descriptors pointed to by DataDirectory[ IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT ] -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageBoundImportDescriptor { - pub time_date_stamp: U32, - pub offset_module_name: U16, - pub number_of_module_forwarder_refs: U16, - // Array of zero or more IMAGE_BOUND_FORWARDER_REF follows -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageBoundForwarderRef { - pub time_date_stamp: U32, - pub offset_module_name: U16, - pub reserved: U16, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageDelayloadDescriptor { - pub attributes: U32, - - /// RVA to the name of the target library (NULL-terminate ASCII string) - pub dll_name_rva: U32, - /// RVA to the HMODULE caching location (PHMODULE) - pub module_handle_rva: U32, - /// RVA to the start of the IAT (PIMAGE_THUNK_DATA) - pub import_address_table_rva: U32, - /// RVA to the start of the name table (PIMAGE_THUNK_DATA::AddressOfData) - pub import_name_table_rva: U32, - /// RVA to an optional bound IAT - pub bound_import_address_table_rva: U32, - /// RVA to an optional unload info table - pub unload_information_table_rva: U32, - /// 0 if not bound, otherwise, date/time of the target DLL - pub time_date_stamp: U32, -} - -impl ImageDelayloadDescriptor { - /// Tell whether this delay-load import descriptor is the null descriptor - /// (used to mark the end of the iterator array in a PE) - pub fn is_null(&self) -> bool { - self.attributes.get(LE) == 0 - && self.dll_name_rva.get(LE) == 0 - && self.module_handle_rva.get(LE) == 0 - && self.import_address_table_rva.get(LE) == 0 - && self.import_name_table_rva.get(LE) == 0 - && self.bound_import_address_table_rva.get(LE) == 0 - && self.unload_information_table_rva.get(LE) == 0 - && self.time_date_stamp.get(LE) == 0 - } -} - -/// Delay load version 2 flag for `ImageDelayloadDescriptor::attributes`. -pub const IMAGE_DELAYLOAD_RVA_BASED: u32 = 0x8000_0000; - -// -// Resource Format. -// - -// -// Resource directory consists of two counts, following by a variable length -// array of directory entries. The first count is the number of entries at -// beginning of the array that have actual names associated with each entry. -// The entries are in ascending order, case insensitive strings. The second -// count is the number of entries that immediately follow the named entries. -// This second count identifies the number of entries that have 16-bit integer -// Ids as their name. These entries are also sorted in ascending order. -// -// This structure allows fast lookup by either name or number, but for any -// given resource entry only one form of lookup is supported, not both. -// This is consistent with the syntax of the .RC file and the .RES file. -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageResourceDirectory { - pub characteristics: U32, - pub time_date_stamp: U32, - pub major_version: U16, - pub minor_version: U16, - pub number_of_named_entries: U16, - pub number_of_id_entries: U16, -} - -pub const IMAGE_RESOURCE_NAME_IS_STRING: u32 = 0x8000_0000; -pub const IMAGE_RESOURCE_DATA_IS_DIRECTORY: u32 = 0x8000_0000; -// -// Each directory contains the 32-bit Name of the entry and an offset, -// relative to the beginning of the resource directory of the data associated -// with this directory entry. If the name of the entry is an actual text -// string instead of an integer Id, then the high order bit of the name field -// is set to one and the low order 31-bits are an offset, relative to the -// beginning of the resource directory of the string, which is of type -// IMAGE_RESOURCE_DIRECTORY_STRING. Otherwise the high bit is clear and the -// low-order 16-bits are the integer Id that identify this resource directory -// entry. If the directory entry is yet another resource directory (i.e. a -// subdirectory), then the high order bit of the offset field will be -// set to indicate this. Otherwise the high bit is clear and the offset -// field points to a resource data entry. -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageResourceDirectoryEntry { - pub name_or_id: U32, - pub offset_to_data_or_directory: U32, -} - -// -// For resource directory entries that have actual string names, the Name -// field of the directory entry points to an object of the following type. -// All of these string objects are stored together after the last resource -// directory entry and before the first resource data object. This minimizes -// the impact of these variable length objects on the alignment of the fixed -// size directory entry objects. -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageResourceDirectoryString { - pub length: U16, - //pub name_string: [i8; 1], -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageResourceDirStringU { - pub length: U16, - //pub name_string: [U16; 1], -} - -// -// Each resource data entry describes a leaf node in the resource directory -// tree. It contains an offset, relative to the beginning of the resource -// directory of the data for the resource, a size field that gives the number -// of bytes of data at that offset, a CodePage that should be used when -// decoding code point values within the resource data. Typically for new -// applications the code page would be the unicode code page. -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageResourceDataEntry { - /// RVA of the data. - pub offset_to_data: U32, - pub size: U32, - pub code_page: U32, - pub reserved: U32, -} - -// Resource type: https://docs.microsoft.com/en-us/windows/win32/menurc/resource-types - -/// ID for: Hardware-dependent cursor resource. -pub const RT_CURSOR: u16 = 1; -/// ID for: Bitmap resource. -pub const RT_BITMAP: u16 = 2; -/// ID for: Hardware-dependent icon resource. -pub const RT_ICON: u16 = 3; -/// ID for: Menu resource. -pub const RT_MENU: u16 = 4; -/// ID for: Dialog box. -pub const RT_DIALOG: u16 = 5; -/// ID for: String-table entry. -pub const RT_STRING: u16 = 6; -/// ID for: Font directory resource. -pub const RT_FONTDIR: u16 = 7; -/// ID for: Font resource. -pub const RT_FONT: u16 = 8; -/// ID for: Accelerator table. -pub const RT_ACCELERATOR: u16 = 9; -/// ID for: Application-defined resource (raw data). -pub const RT_RCDATA: u16 = 10; -/// ID for: Message-table entry. -pub const RT_MESSAGETABLE: u16 = 11; -/// ID for: Hardware-independent cursor resource. -pub const RT_GROUP_CURSOR: u16 = 12; -/// ID for: Hardware-independent icon resource. -pub const RT_GROUP_ICON: u16 = 14; -/// ID for: Version resource. -pub const RT_VERSION: u16 = 16; -/// ID for: Allows a resource editing tool to associate a string with an .rc file. -pub const RT_DLGINCLUDE: u16 = 17; -/// ID for: Plug and Play resource. -pub const RT_PLUGPLAY: u16 = 19; -/// ID for: VXD. -pub const RT_VXD: u16 = 20; -/// ID for: Animated cursor. -pub const RT_ANICURSOR: u16 = 21; -/// ID for: Animated icon. -pub const RT_ANIICON: u16 = 22; -/// ID for: HTML resource. -pub const RT_HTML: u16 = 23; -/// ID for: Side-by-Side Assembly Manifest. -pub const RT_MANIFEST: u16 = 24; - -// -// Code Integrity in loadconfig (CI) -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageLoadConfigCodeIntegrity { - /// Flags to indicate if CI information is available, etc. - pub flags: U16, - /// 0xFFFF means not available - pub catalog: U16, - pub catalog_offset: U32, - /// Additional bitmask to be defined later - pub reserved: U32, -} - -// -// Dynamic value relocation table in loadconfig -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageDynamicRelocationTable { - pub version: U32, - pub size: U32, - // DynamicRelocations: [ImageDynamicRelocation; 0], -} - -// -// Dynamic value relocation entries following IMAGE_DYNAMIC_RELOCATION_TABLE -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageDynamicRelocation32 { - pub symbol: U32, - pub base_reloc_size: U32, - // BaseRelocations: [ImageBaseRelocation; 0], -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageDynamicRelocation64 { - pub symbol: U64, - pub base_reloc_size: U32, - // BaseRelocations: [ImageBaseRelocation; 0], -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageDynamicRelocation32V2 { - pub header_size: U32, - pub fixup_info_size: U32, - pub symbol: U32, - pub symbol_group: U32, - pub flags: U32, - // ... variable length header fields - // pub fixup_info: [u8; fixup_info_size] -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageDynamicRelocation64V2 { - pub header_size: U32, - pub fixup_info_size: U32, - pub symbol: U64, - pub symbol_group: U32, - pub flags: U32, - // ... variable length header fields - // pub fixup_info[u8; fixup_info_size] -} - -// -// Defined symbolic dynamic relocation entries. -// - -pub const IMAGE_DYNAMIC_RELOCATION_GUARD_RF_PROLOGUE: u32 = 0x0000_0001; -pub const IMAGE_DYNAMIC_RELOCATION_GUARD_RF_EPILOGUE: u32 = 0x0000_0002; -pub const IMAGE_DYNAMIC_RELOCATION_GUARD_IMPORT_CONTROL_TRANSFER: u32 = 0x0000_0003; -pub const IMAGE_DYNAMIC_RELOCATION_GUARD_INDIR_CONTROL_TRANSFER: u32 = 0x0000_0004; -pub const IMAGE_DYNAMIC_RELOCATION_GUARD_SWITCHTABLE_BRANCH: u32 = 0x0000_0005; - -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImagePrologueDynamicRelocationHeader { - pub prologue_byte_count: u8, - // pub prologue_bytes: [u8; prologue_byte_count], -} - -// This struct has alignment 1. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageEpilogueDynamicRelocationHeader { - pub epilogue_count: U32Bytes, - pub epilogue_byte_count: u8, - pub branch_descriptor_element_size: u8, - pub branch_descriptor_count: U16Bytes, - // pub branch_descriptors[...], - // pub branch_descriptor_bit_map[...], -} - -/* -// TODO? bitfields -// TODO: unaligned? -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageImportControlTransferDynamicRelocation { - DWORD PageRelativeOffset : 12; - DWORD IndirectCall : 1; - DWORD IATIndex : 19; -} - -// TODO: unaligned? -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageIndirControlTransferDynamicRelocation { - WORD PageRelativeOffset : 12; - WORD IndirectCall : 1; - WORD RexWPrefix : 1; - WORD CfgCheck : 1; - WORD Reserved : 1; -} - -// TODO: unaligned? -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageSwitchtableBranchDynamicRelocation { - WORD PageRelativeOffset : 12; - WORD RegisterNumber : 4; -} -*/ - -// -// Load Configuration Directory Entry -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageLoadConfigDirectory32 { - pub size: U32, - pub time_date_stamp: U32, - pub major_version: U16, - pub minor_version: U16, - pub global_flags_clear: U32, - pub global_flags_set: U32, - pub critical_section_default_timeout: U32, - pub de_commit_free_block_threshold: U32, - pub de_commit_total_free_threshold: U32, - /// VA - pub lock_prefix_table: U32, - pub maximum_allocation_size: U32, - pub virtual_memory_threshold: U32, - pub process_heap_flags: U32, - pub process_affinity_mask: U32, - pub csd_version: U16, - pub dependent_load_flags: U16, - /// VA - pub edit_list: U32, - /// VA - pub security_cookie: U32, - /// VA - pub sehandler_table: U32, - pub sehandler_count: U32, - /// VA - pub guard_cf_check_function_pointer: U32, - /// VA - pub guard_cf_dispatch_function_pointer: U32, - /// VA - pub guard_cf_function_table: U32, - pub guard_cf_function_count: U32, - pub guard_flags: U32, - pub code_integrity: ImageLoadConfigCodeIntegrity, - /// VA - pub guard_address_taken_iat_entry_table: U32, - pub guard_address_taken_iat_entry_count: U32, - /// VA - pub guard_long_jump_target_table: U32, - pub guard_long_jump_target_count: U32, - /// VA - pub dynamic_value_reloc_table: U32, - pub chpe_metadata_pointer: U32, - /// VA - pub guard_rf_failure_routine: U32, - /// VA - pub guard_rf_failure_routine_function_pointer: U32, - pub dynamic_value_reloc_table_offset: U32, - pub dynamic_value_reloc_table_section: U16, - pub reserved2: U16, - /// VA - pub guard_rf_verify_stack_pointer_function_pointer: U32, - pub hot_patch_table_offset: U32, - pub reserved3: U32, - /// VA - pub enclave_configuration_pointer: U32, - /// VA - pub volatile_metadata_pointer: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageLoadConfigDirectory64 { - pub size: U32, - pub time_date_stamp: U32, - pub major_version: U16, - pub minor_version: U16, - pub global_flags_clear: U32, - pub global_flags_set: U32, - pub critical_section_default_timeout: U32, - pub de_commit_free_block_threshold: U64, - pub de_commit_total_free_threshold: U64, - /// VA - pub lock_prefix_table: U64, - pub maximum_allocation_size: U64, - pub virtual_memory_threshold: U64, - pub process_affinity_mask: U64, - pub process_heap_flags: U32, - pub csd_version: U16, - pub dependent_load_flags: U16, - /// VA - pub edit_list: U64, - /// VA - pub security_cookie: U64, - /// VA - pub sehandler_table: U64, - pub sehandler_count: U64, - /// VA - pub guard_cf_check_function_pointer: U64, - /// VA - pub guard_cf_dispatch_function_pointer: U64, - /// VA - pub guard_cf_function_table: U64, - pub guard_cf_function_count: U64, - pub guard_flags: U32, - pub code_integrity: ImageLoadConfigCodeIntegrity, - /// VA - pub guard_address_taken_iat_entry_table: U64, - pub guard_address_taken_iat_entry_count: U64, - /// VA - pub guard_long_jump_target_table: U64, - pub guard_long_jump_target_count: U64, - /// VA - pub dynamic_value_reloc_table: U64, - /// VA - pub chpe_metadata_pointer: U64, - /// VA - pub guard_rf_failure_routine: U64, - /// VA - pub guard_rf_failure_routine_function_pointer: U64, - pub dynamic_value_reloc_table_offset: U32, - pub dynamic_value_reloc_table_section: U16, - pub reserved2: U16, - /// VA - pub guard_rf_verify_stack_pointer_function_pointer: U64, - pub hot_patch_table_offset: U32, - pub reserved3: U32, - /// VA - pub enclave_configuration_pointer: U64, - /// VA - pub volatile_metadata_pointer: U64, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageHotPatchInfo { - pub version: U32, - pub size: U32, - pub sequence_number: U32, - pub base_image_list: U32, - pub base_image_count: U32, - /// Version 2 and later - pub buffer_offset: U32, - /// Version 3 and later - pub extra_patch_size: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageHotPatchBase { - pub sequence_number: U32, - pub flags: U32, - pub original_time_date_stamp: U32, - pub original_check_sum: U32, - pub code_integrity_info: U32, - pub code_integrity_size: U32, - pub patch_table: U32, - /// Version 2 and later - pub buffer_offset: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageHotPatchHashes { - pub sha256: [u8; 32], - pub sha1: [u8; 20], -} - -pub const IMAGE_HOT_PATCH_BASE_OBLIGATORY: u32 = 0x0000_0001; -pub const IMAGE_HOT_PATCH_BASE_CAN_ROLL_BACK: u32 = 0x0000_0002; - -pub const IMAGE_HOT_PATCH_CHUNK_INVERSE: u32 = 0x8000_0000; -pub const IMAGE_HOT_PATCH_CHUNK_OBLIGATORY: u32 = 0x4000_0000; -pub const IMAGE_HOT_PATCH_CHUNK_RESERVED: u32 = 0x3FF0_3000; -pub const IMAGE_HOT_PATCH_CHUNK_TYPE: u32 = 0x000F_C000; -pub const IMAGE_HOT_PATCH_CHUNK_SOURCE_RVA: u32 = 0x0000_8000; -pub const IMAGE_HOT_PATCH_CHUNK_TARGET_RVA: u32 = 0x0000_4000; -pub const IMAGE_HOT_PATCH_CHUNK_SIZE: u32 = 0x0000_0FFF; - -pub const IMAGE_HOT_PATCH_NONE: u32 = 0x0000_0000; -pub const IMAGE_HOT_PATCH_FUNCTION: u32 = 0x0001_C000; -pub const IMAGE_HOT_PATCH_ABSOLUTE: u32 = 0x0002_C000; -pub const IMAGE_HOT_PATCH_REL32: u32 = 0x0003_C000; -pub const IMAGE_HOT_PATCH_CALL_TARGET: u32 = 0x0004_4000; -pub const IMAGE_HOT_PATCH_INDIRECT: u32 = 0x0005_C000; -pub const IMAGE_HOT_PATCH_NO_CALL_TARGET: u32 = 0x0006_4000; -pub const IMAGE_HOT_PATCH_DYNAMIC_VALUE: u32 = 0x0007_8000; - -/// Module performs control flow integrity checks using system-supplied support -pub const IMAGE_GUARD_CF_INSTRUMENTED: u32 = 0x0000_0100; -/// Module performs control flow and write integrity checks -pub const IMAGE_GUARD_CFW_INSTRUMENTED: u32 = 0x0000_0200; -/// Module contains valid control flow target metadata -pub const IMAGE_GUARD_CF_FUNCTION_TABLE_PRESENT: u32 = 0x0000_0400; -/// Module does not make use of the /GS security cookie -pub const IMAGE_GUARD_SECURITY_COOKIE_UNUSED: u32 = 0x0000_0800; -/// Module supports read only delay load IAT -pub const IMAGE_GUARD_PROTECT_DELAYLOAD_IAT: u32 = 0x0000_1000; -/// Delayload import table in its own .didat section (with nothing else in it) that can be freely reprotected -pub const IMAGE_GUARD_DELAYLOAD_IAT_IN_ITS_OWN_SECTION: u32 = 0x0000_2000; -/// Module contains suppressed export information. -/// -/// This also infers that the address taken taken IAT table is also present in the load config. -pub const IMAGE_GUARD_CF_EXPORT_SUPPRESSION_INFO_PRESENT: u32 = 0x0000_4000; -/// Module enables suppression of exports -pub const IMAGE_GUARD_CF_ENABLE_EXPORT_SUPPRESSION: u32 = 0x0000_8000; -/// Module contains longjmp target information -pub const IMAGE_GUARD_CF_LONGJUMP_TABLE_PRESENT: u32 = 0x0001_0000; -/// Module contains return flow instrumentation and metadata -pub const IMAGE_GUARD_RF_INSTRUMENTED: u32 = 0x0002_0000; -/// Module requests that the OS enable return flow protection -pub const IMAGE_GUARD_RF_ENABLE: u32 = 0x0004_0000; -/// Module requests that the OS enable return flow protection in strict mode -pub const IMAGE_GUARD_RF_STRICT: u32 = 0x0008_0000; -/// Module was built with retpoline support -pub const IMAGE_GUARD_RETPOLINE_PRESENT: u32 = 0x0010_0000; - -/// Stride of Guard CF function table encoded in these bits (additional count of bytes per element) -pub const IMAGE_GUARD_CF_FUNCTION_TABLE_SIZE_MASK: u32 = 0xF000_0000; -/// Shift to right-justify Guard CF function table stride -pub const IMAGE_GUARD_CF_FUNCTION_TABLE_SIZE_SHIFT: u32 = 28; - -// -// GFIDS table entry flags. -// - -/// The containing GFID entry is suppressed -pub const IMAGE_GUARD_FLAG_FID_SUPPRESSED: u16 = 0x01; -/// The containing GFID entry is export suppressed -pub const IMAGE_GUARD_FLAG_EXPORT_SUPPRESSED: u16 = 0x02; - -// -// WIN CE Exception table format -// - -// -// Function table entry format. Function table is pointed to by the -// IMAGE_DIRECTORY_ENTRY_EXCEPTION directory entry. -// - -/* -// TODO? bitfields -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageCeRuntimeFunctionEntry { - pub func_start: U32, - DWORD PrologLen : 8; - DWORD FuncLen : 22; - DWORD ThirtyTwoBit : 1; - DWORD ExceptionFlag : 1; -} -*/ - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageArmRuntimeFunctionEntry { - pub begin_address: U32, - pub unwind_data: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageArm64RuntimeFunctionEntry { - pub begin_address: U32, - pub unwind_data: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageAlpha64RuntimeFunctionEntry { - pub begin_address: U64, - pub end_address: U64, - pub exception_handler: U64, - pub handler_data: U64, - pub prolog_end_address: U64, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageAlphaRuntimeFunctionEntry { - pub begin_address: U32, - pub end_address: U32, - pub exception_handler: U32, - pub handler_data: U32, - pub prolog_end_address: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageRuntimeFunctionEntry { - pub begin_address: U32, - pub end_address: U32, - pub unwind_info_address_or_data: U32, -} - -// -// Software enclave information -// - -pub const IMAGE_ENCLAVE_LONG_ID_LENGTH: usize = 32; -pub const IMAGE_ENCLAVE_SHORT_ID_LENGTH: usize = 16; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageEnclaveConfig32 { - pub size: U32, - pub minimum_required_config_size: U32, - pub policy_flags: U32, - pub number_of_imports: U32, - pub import_list: U32, - pub import_entry_size: U32, - pub family_id: [u8; IMAGE_ENCLAVE_SHORT_ID_LENGTH], - pub image_id: [u8; IMAGE_ENCLAVE_SHORT_ID_LENGTH], - pub image_version: U32, - pub security_version: U32, - pub enclave_size: U32, - pub number_of_threads: U32, - pub enclave_flags: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageEnclaveConfig64 { - pub size: U32, - pub minimum_required_config_size: U32, - pub policy_flags: U32, - pub number_of_imports: U32, - pub import_list: U32, - pub import_entry_size: U32, - pub family_id: [u8; IMAGE_ENCLAVE_SHORT_ID_LENGTH], - pub image_id: [u8; IMAGE_ENCLAVE_SHORT_ID_LENGTH], - pub image_version: U32, - pub security_version: U32, - pub enclave_size: U64, - pub number_of_threads: U32, - pub enclave_flags: U32, -} - -//pub const IMAGE_ENCLAVE_MINIMUM_CONFIG_SIZE: usize = FIELD_OFFSET(IMAGE_ENCLAVE_CONFIG, EnclaveFlags); - -pub const IMAGE_ENCLAVE_POLICY_DEBUGGABLE: u32 = 0x0000_0001; - -pub const IMAGE_ENCLAVE_FLAG_PRIMARY_IMAGE: u32 = 0x0000_0001; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageEnclaveImport { - pub match_type: U32, - pub minimum_security_version: U32, - pub unique_or_author_id: [u8; IMAGE_ENCLAVE_LONG_ID_LENGTH], - pub family_id: [u8; IMAGE_ENCLAVE_SHORT_ID_LENGTH], - pub image_id: [u8; IMAGE_ENCLAVE_SHORT_ID_LENGTH], - pub import_name: U32, - pub reserved: U32, -} - -pub const IMAGE_ENCLAVE_IMPORT_MATCH_NONE: u32 = 0x0000_0000; -pub const IMAGE_ENCLAVE_IMPORT_MATCH_UNIQUE_ID: u32 = 0x0000_0001; -pub const IMAGE_ENCLAVE_IMPORT_MATCH_AUTHOR_ID: u32 = 0x0000_0002; -pub const IMAGE_ENCLAVE_IMPORT_MATCH_FAMILY_ID: u32 = 0x0000_0003; -pub const IMAGE_ENCLAVE_IMPORT_MATCH_IMAGE_ID: u32 = 0x0000_0004; - -// -// Debug Format -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageDebugDirectory { - pub characteristics: U32, - pub time_date_stamp: U32, - pub major_version: U16, - pub minor_version: U16, - pub typ: U32, - pub size_of_data: U32, - pub address_of_raw_data: U32, - pub pointer_to_raw_data: U32, -} - -pub const IMAGE_DEBUG_TYPE_UNKNOWN: u32 = 0; -pub const IMAGE_DEBUG_TYPE_COFF: u32 = 1; -pub const IMAGE_DEBUG_TYPE_CODEVIEW: u32 = 2; -pub const IMAGE_DEBUG_TYPE_FPO: u32 = 3; -pub const IMAGE_DEBUG_TYPE_MISC: u32 = 4; -pub const IMAGE_DEBUG_TYPE_EXCEPTION: u32 = 5; -pub const IMAGE_DEBUG_TYPE_FIXUP: u32 = 6; -pub const IMAGE_DEBUG_TYPE_OMAP_TO_SRC: u32 = 7; -pub const IMAGE_DEBUG_TYPE_OMAP_FROM_SRC: u32 = 8; -pub const IMAGE_DEBUG_TYPE_BORLAND: u32 = 9; -pub const IMAGE_DEBUG_TYPE_RESERVED10: u32 = 10; -pub const IMAGE_DEBUG_TYPE_CLSID: u32 = 11; -pub const IMAGE_DEBUG_TYPE_VC_FEATURE: u32 = 12; -pub const IMAGE_DEBUG_TYPE_POGO: u32 = 13; -pub const IMAGE_DEBUG_TYPE_ILTCG: u32 = 14; -pub const IMAGE_DEBUG_TYPE_MPX: u32 = 15; -pub const IMAGE_DEBUG_TYPE_REPRO: u32 = 16; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageCoffSymbolsHeader { - pub number_of_symbols: U32, - pub lva_to_first_symbol: U32, - pub number_of_linenumbers: U32, - pub lva_to_first_linenumber: U32, - pub rva_to_first_byte_of_code: U32, - pub rva_to_last_byte_of_code: U32, - pub rva_to_first_byte_of_data: U32, - pub rva_to_last_byte_of_data: U32, -} - -pub const FRAME_FPO: u16 = 0; -pub const FRAME_TRAP: u16 = 1; -pub const FRAME_TSS: u16 = 2; -pub const FRAME_NONFPO: u16 = 3; - -/* -// TODO? bitfields -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FpoData { -/// offset 1st byte of function code - pub ul_off_start: U32, -/// # bytes in function - pub cb_proc_size: U32, -/// # bytes in locals/4 - pub cdw_locals: U32, -/// # bytes in params/4 - pub cdw_params: U16, -/// # bytes in prolog - WORD cbProlog : 8; -/// # regs saved - WORD cbRegs : 3; -/// TRUE if SEH in func - WORD fHasSEH : 1; -/// TRUE if EBP has been allocated - WORD fUseBP : 1; -/// reserved for future use - WORD reserved : 1; -/// frame type - WORD cbFrame : 2; -} -pub const SIZEOF_RFPO_DATA: usize = 16; -*/ - -pub const IMAGE_DEBUG_MISC_EXENAME: u16 = 1; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageDebugMisc { - /// type of misc data, see defines - pub data_type: U32, - /// total length of record, rounded to four byte multiple. - pub length: U32, - /// TRUE if data is unicode string - pub unicode: u8, - pub reserved: [u8; 3], - // Actual data - //pub data: [u8; 1], -} - -// -// Function table extracted from MIPS/ALPHA/IA64 images. Does not contain -// information needed only for runtime support. Just those fields for -// each entry needed by a debugger. -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageFunctionEntry { - pub starting_address: U32, - pub ending_address: U32, - pub end_of_prologue: U32, -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageFunctionEntry64 { - pub starting_address: U64, - pub ending_address: U64, - pub end_of_prologue_or_unwind_info_address: U64, -} - -// -// Debugging information can be stripped from an image file and placed -// in a separate .DBG file, whose file name part is the same as the -// image file name part (e.g. symbols for CMD.EXE could be stripped -// and placed in CMD.DBG). This is indicated by the IMAGE_FILE_DEBUG_STRIPPED -// flag in the Characteristics field of the file header. The beginning of -// the .DBG file contains the following structure which captures certain -// information from the image file. This allows a debug to proceed even if -// the original image file is not accessible. This header is followed by -// zero of more IMAGE_SECTION_HEADER structures, followed by zero or more -// IMAGE_DEBUG_DIRECTORY structures. The latter structures and those in -// the image file contain file offsets relative to the beginning of the -// .DBG file. -// -// If symbols have been stripped from an image, the IMAGE_DEBUG_MISC structure -// is left in the image file, but not mapped. This allows a debugger to -// compute the name of the .DBG file, from the name of the image in the -// IMAGE_DEBUG_MISC structure. -// - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageSeparateDebugHeader { - pub signature: U16, - pub flags: U16, - pub machine: U16, - pub characteristics: U16, - pub time_date_stamp: U32, - pub check_sum: U32, - pub image_base: U32, - pub size_of_image: U32, - pub number_of_sections: U32, - pub exported_names_size: U32, - pub debug_directory_size: U32, - pub section_alignment: U32, - pub reserved: [U32; 2], -} - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct NonPagedDebugInfo { - pub signature: U16, - pub flags: U16, - pub size: U32, - pub machine: U16, - pub characteristics: U16, - pub time_date_stamp: U32, - pub check_sum: U32, - pub size_of_image: U32, - pub image_base: U64, - //debug_directory_size - //ImageDebugDirectory -} - -pub const IMAGE_SEPARATE_DEBUG_SIGNATURE: u16 = 0x4944; -pub const NON_PAGED_DEBUG_SIGNATURE: u16 = 0x494E; - -pub const IMAGE_SEPARATE_DEBUG_FLAGS_MASK: u16 = 0x8000; -/// when DBG was updated, the old checksum didn't match. -pub const IMAGE_SEPARATE_DEBUG_MISMATCH: u16 = 0x8000; - -// -// The .arch section is made up of headers, each describing an amask position/value -// pointing to an array of IMAGE_ARCHITECTURE_ENTRY's. Each "array" (both the header -// and entry arrays) are terminiated by a quadword of 0xffffffffL. -// -// NOTE: There may be quadwords of 0 sprinkled around and must be skipped. -// - -/* -// TODO? bitfields -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageArchitectureHeader { - /// 1 -> code section depends on mask bit - /// 0 -> new instruction depends on mask bit - unsigned int AmaskValue: 1; - /// MBZ - int :7; - /// Amask bit in question for this fixup - unsigned int AmaskShift: 8; - /// MBZ - int :16; - /// RVA into .arch section to array of ARCHITECTURE_ENTRY's - pub first_entry_rva: U32, -} -*/ - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageArchitectureEntry { - /// RVA of instruction to fixup - pub fixup_inst_rva: U32, - /// fixup instruction (see alphaops.h) - pub new_inst: U32, -} - -// The following structure defines the new import object. Note the values of the first two fields, -// which must be set as stated in order to differentiate old and new import members. -// Following this structure, the linker emits two null-terminated strings used to recreate the -// import at the time of use. The first string is the import's name, the second is the dll's name. - -pub const IMPORT_OBJECT_HDR_SIG2: u16 = 0xffff; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImportObjectHeader { - /// Must be IMAGE_FILE_MACHINE_UNKNOWN - pub sig1: U16, - /// Must be IMPORT_OBJECT_HDR_SIG2. - pub sig2: U16, - pub version: U16, - pub machine: U16, - /// Time/date stamp - pub time_date_stamp: U32, - /// particularly useful for incremental links - pub size_of_data: U32, - - /// if grf & IMPORT_OBJECT_ORDINAL - pub ordinal_or_hint: U16, - - // WORD Type : 2; - // WORD NameType : 3; - // WORD Reserved : 11; - pub name_type: U16, -} - -pub const IMPORT_OBJECT_TYPE_MASK: u16 = 0b11; -pub const IMPORT_OBJECT_TYPE_SHIFT: u16 = 0; -pub const IMPORT_OBJECT_CODE: u16 = 0; -pub const IMPORT_OBJECT_DATA: u16 = 1; -pub const IMPORT_OBJECT_CONST: u16 = 2; - -pub const IMPORT_OBJECT_NAME_MASK: u16 = 0b111; -pub const IMPORT_OBJECT_NAME_SHIFT: u16 = 2; -/// Import by ordinal -pub const IMPORT_OBJECT_ORDINAL: u16 = 0; -/// Import name == public symbol name. -pub const IMPORT_OBJECT_NAME: u16 = 1; -/// Import name == public symbol name skipping leading ?, @, or optionally _. -pub const IMPORT_OBJECT_NAME_NO_PREFIX: u16 = 2; -/// Import name == public symbol name skipping leading ?, @, or optionally _ and truncating at first @. -pub const IMPORT_OBJECT_NAME_UNDECORATE: u16 = 3; -/// Import name == a name is explicitly provided after the DLL name. -pub const IMPORT_OBJECT_NAME_EXPORTAS: u16 = 4; - -// COM+ Header entry point flags. -pub const COMIMAGE_FLAGS_ILONLY: u32 = 0x0000_0001; -pub const COMIMAGE_FLAGS_32BITREQUIRED: u32 = 0x0000_0002; -pub const COMIMAGE_FLAGS_IL_LIBRARY: u32 = 0x0000_0004; -pub const COMIMAGE_FLAGS_STRONGNAMESIGNED: u32 = 0x0000_0008; -pub const COMIMAGE_FLAGS_NATIVE_ENTRYPOINT: u32 = 0x0000_0010; -pub const COMIMAGE_FLAGS_TRACKDEBUGDATA: u32 = 0x0001_0000; -pub const COMIMAGE_FLAGS_32BITPREFERRED: u32 = 0x0002_0000; - -// Version flags for image. -pub const COR_VERSION_MAJOR_V2: u16 = 2; -pub const COR_VERSION_MAJOR: u16 = COR_VERSION_MAJOR_V2; -pub const COR_VERSION_MINOR: u16 = 5; -pub const COR_DELETED_NAME_LENGTH: usize = 8; -pub const COR_VTABLEGAP_NAME_LENGTH: usize = 8; - -// Maximum size of a NativeType descriptor. -pub const NATIVE_TYPE_MAX_CB: u16 = 1; -pub const COR_ILMETHOD_SECT_SMALL_MAX_DATASIZE: u16 = 0xFF; - -// Consts for the MIH FLAGS -pub const IMAGE_COR_MIH_METHODRVA: u16 = 0x01; -pub const IMAGE_COR_MIH_EHRVA: u16 = 0x02; -pub const IMAGE_COR_MIH_BASICBLOCK: u16 = 0x08; - -// V-table constants -/// V-table slots are 32-bits in size. -pub const COR_VTABLE_32BIT: u16 = 0x01; -/// V-table slots are 64-bits in size. -pub const COR_VTABLE_64BIT: u16 = 0x02; -/// If set, transition from unmanaged. -pub const COR_VTABLE_FROM_UNMANAGED: u16 = 0x04; -/// If set, transition from unmanaged with keeping the current appdomain. -pub const COR_VTABLE_FROM_UNMANAGED_RETAIN_APPDOMAIN: u16 = 0x08; -/// Call most derived method described by -pub const COR_VTABLE_CALL_MOST_DERIVED: u16 = 0x10; - -// EATJ constants -/// Size of a jump thunk reserved range. -pub const IMAGE_COR_EATJ_THUNK_SIZE: usize = 32; - -// Max name lengths -pub const MAX_CLASS_NAME: usize = 1024; -pub const MAX_PACKAGE_NAME: usize = 1024; - -// CLR 2.0 header structure. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ImageCor20Header { - // Header versioning - pub cb: U32, - pub major_runtime_version: U16, - pub minor_runtime_version: U16, - - // Symbol table and startup information - pub meta_data: ImageDataDirectory, - pub flags: U32, - - // If COMIMAGE_FLAGS_NATIVE_ENTRYPOINT is not set, EntryPointToken represents a managed entrypoint. - // If COMIMAGE_FLAGS_NATIVE_ENTRYPOINT is set, EntryPointRVA represents an RVA to a native entrypoint. - pub entry_point_token_or_rva: U32, - - // Binding information - pub resources: ImageDataDirectory, - pub strong_name_signature: ImageDataDirectory, - - // Regular fixup and binding information - pub code_manager_table: ImageDataDirectory, - pub vtable_fixups: ImageDataDirectory, - pub export_address_table_jumps: ImageDataDirectory, - - // Precompiled image info (internal use only - set to zero) - pub managed_native_header: ImageDataDirectory, -} - -unsafe_impl_pod!( - ImageDosHeader, - ImageOs2Header, - ImageVxdHeader, - ImageFileHeader, - ImageDataDirectory, - ImageOptionalHeader32, - ImageRomOptionalHeader, - ImageOptionalHeader64, - ImageNtHeaders64, - ImageNtHeaders32, - ImageRomHeaders, - Guid, - AnonObjectHeader, - AnonObjectHeaderV2, - AnonObjectHeaderBigobj, - ImageSectionHeader, - ImageSymbol, - ImageSymbolBytes, - ImageSymbolEx, - ImageSymbolExBytes, - ImageAuxSymbolTokenDef, - ImageAuxSymbolFunction, - ImageAuxSymbolFunctionBeginEnd, - ImageAuxSymbolWeak, - ImageAuxSymbolSection, - ImageAuxSymbolCrc, - ImageRelocation, - ImageLinenumber, - ImageBaseRelocation, - ImageArchiveMemberHeader, - ImageExportDirectory, - ImageImportByName, - ImageThunkData64, - ImageThunkData32, - ImageTlsDirectory64, - ImageTlsDirectory32, - ImageImportDescriptor, - ImageBoundImportDescriptor, - ImageBoundForwarderRef, - ImageDelayloadDescriptor, - ImageResourceDirectory, - ImageResourceDirectoryEntry, - ImageResourceDirectoryString, - ImageResourceDirStringU, - ImageResourceDataEntry, - ImageLoadConfigCodeIntegrity, - ImageDynamicRelocationTable, - ImageDynamicRelocation32, - ImageDynamicRelocation64, - ImageDynamicRelocation32V2, - ImageDynamicRelocation64V2, - ImagePrologueDynamicRelocationHeader, - ImageEpilogueDynamicRelocationHeader, - //ImageImportControlTransferDynamicRelocation, - //ImageIndirControlTransferDynamicRelocation, - //ImageSwitchtableBranchDynamicRelocation, - ImageLoadConfigDirectory32, - ImageLoadConfigDirectory64, - ImageHotPatchInfo, - ImageHotPatchBase, - ImageHotPatchHashes, - //ImageCeRuntimeFunctionEntry, - ImageArmRuntimeFunctionEntry, - ImageArm64RuntimeFunctionEntry, - ImageAlpha64RuntimeFunctionEntry, - ImageAlphaRuntimeFunctionEntry, - ImageRuntimeFunctionEntry, - ImageEnclaveConfig32, - ImageEnclaveConfig64, - ImageEnclaveImport, - ImageDebugDirectory, - ImageCoffSymbolsHeader, - //FpoData, - ImageDebugMisc, - ImageFunctionEntry, - ImageFunctionEntry64, - ImageSeparateDebugHeader, - NonPagedDebugInfo, - //ImageArchitectureHeader, - ImageArchitectureEntry, - ImportObjectHeader, - ImageCor20Header, - MaskedRichHeaderEntry, -); diff -Nru s390-tools-2.31.0/rust-vendor/object/src/pod.rs s390-tools-2.33.1/rust-vendor/object/src/pod.rs --- s390-tools-2.31.0/rust-vendor/object/src/pod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/pod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,239 +0,0 @@ -//! Tools for converting file format structures to and from bytes. -//! -//! This module should be replaced once rust provides safe transmutes. - -// This module provides functions for both read and write features. -#![cfg_attr( - not(all(feature = "read_core", feature = "write_core")), - allow(dead_code) -)] - -use core::{mem, result, slice}; - -type Result = result::Result; - -/// A trait for types that can safely be converted from and to byte slices. -/// -/// # Safety -/// A type that is `Pod` must: -/// - be `#[repr(C)]` or `#[repr(transparent)]` -/// - have no invalid byte values -/// - have no padding -pub unsafe trait Pod: Copy + 'static {} - -/// Cast a byte slice to a `Pod` type. -/// -/// Returns the type and the tail of the slice. -#[inline] -pub fn from_bytes(data: &[u8]) -> Result<(&T, &[u8])> { - let size = mem::size_of::(); - let tail = data.get(size..).ok_or(())?; - let ptr = data.as_ptr(); - if (ptr as usize) % mem::align_of::() != 0 { - return Err(()); - } - // Safety: - // The alignment and size are checked by this function. - // The Pod trait ensures the type is valid to cast from bytes. - let val = unsafe { &*ptr.cast() }; - Ok((val, tail)) -} - -/// Cast a mutable byte slice to a `Pod` type. -/// -/// Returns the type and the tail of the slice. -#[inline] -pub fn from_bytes_mut(data: &mut [u8]) -> Result<(&mut T, &mut [u8])> { - let size = mem::size_of::(); - if size > data.len() { - return Err(()); - } - let (data, tail) = data.split_at_mut(size); - let ptr = data.as_mut_ptr(); - if (ptr as usize) % mem::align_of::() != 0 { - return Err(()); - } - // Safety: - // The alignment and size are checked by this function. - // The Pod trait ensures the type is valid to cast from bytes. - let val = unsafe { &mut *ptr.cast() }; - Ok((val, tail)) -} - -/// Cast a byte slice to a slice of a `Pod` type. -/// -/// Returns the type slice and the tail of the byte slice. -#[inline] -pub fn slice_from_bytes(data: &[u8], count: usize) -> Result<(&[T], &[u8])> { - let size = count.checked_mul(mem::size_of::()).ok_or(())?; - let tail = data.get(size..).ok_or(())?; - let ptr = data.as_ptr(); - if (ptr as usize) % mem::align_of::() != 0 { - return Err(()); - } - // Safety: - // The alignment and size are checked by this function. - // The Pod trait ensures the type is valid to cast from bytes. - let slice = unsafe { slice::from_raw_parts(ptr.cast(), count) }; - Ok((slice, tail)) -} - -/// Cast a mutable byte slice to a slice of a `Pod` type. -/// -/// Returns the type slice and the tail of the byte slice. -#[inline] -pub fn slice_from_bytes_mut( - data: &mut [u8], - count: usize, -) -> Result<(&mut [T], &mut [u8])> { - let size = count.checked_mul(mem::size_of::()).ok_or(())?; - if size > data.len() { - return Err(()); - } - let (data, tail) = data.split_at_mut(size); - let ptr = data.as_mut_ptr(); - if (ptr as usize) % mem::align_of::() != 0 { - return Err(()); - } - // Safety: - // The alignment and size are checked by this function. - // The Pod trait ensures the type is valid to cast from bytes. - let slice = unsafe { slice::from_raw_parts_mut(ptr.cast(), count) }; - Ok((slice, tail)) -} - -/// Cast a `Pod` type to a byte slice. -#[inline] -pub fn bytes_of(val: &T) -> &[u8] { - let size = mem::size_of::(); - // Safety: - // Any alignment is allowed. - // The size is determined in this function. - // The Pod trait ensures the type is valid to cast to bytes. - unsafe { slice::from_raw_parts(slice::from_ref(val).as_ptr().cast(), size) } -} - -/// Cast a `Pod` type to a mutable byte slice. -#[inline] -pub fn bytes_of_mut(val: &mut T) -> &mut [u8] { - let size = mem::size_of::(); - // Safety: - // Any alignment is allowed. - // The size is determined in this function. - // The Pod trait ensures the type is valid to cast to bytes. - unsafe { slice::from_raw_parts_mut(slice::from_mut(val).as_mut_ptr().cast(), size) } -} - -/// Cast a slice of a `Pod` type to a byte slice. -#[inline] -pub fn bytes_of_slice(val: &[T]) -> &[u8] { - let size = val.len().wrapping_mul(mem::size_of::()); - // Safety: - // Any alignment is allowed. - // The size is determined in this function. - // The Pod trait ensures the type is valid to cast to bytes. - unsafe { slice::from_raw_parts(val.as_ptr().cast(), size) } -} - -/// Cast a slice of a `Pod` type to a mutable byte slice. -#[inline] -pub fn bytes_of_slice_mut(val: &mut [T]) -> &mut [u8] { - let size = val.len().wrapping_mul(mem::size_of::()); - // Safety: - // Any alignment is allowed. - // The size is determined in this function. - // The Pod trait ensures the type is valid to cast to bytes. - unsafe { slice::from_raw_parts_mut(val.as_mut_ptr().cast(), size) } -} - -macro_rules! unsafe_impl_pod { - ($($struct_name:ident),+ $(,)?) => { - $( - unsafe impl Pod for $struct_name { } - )+ - } -} - -unsafe_impl_pod!(u8, u16, u32, u64); - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn single() { - let x = u32::to_be(0x0123_4567); - let mut x_mut = x; - let bytes = bytes_of(&x); - let bytes_mut = bytes_of_mut(&mut x_mut); - assert_eq!(bytes, [0x01, 0x23, 0x45, 0x67]); - assert_eq!(bytes, bytes_mut); - - let x16 = [u16::to_be(0x0123), u16::to_be(0x4567)]; - - let (y, tail) = from_bytes::(bytes).unwrap(); - let (y_mut, tail_mut) = from_bytes_mut::(bytes_mut).unwrap(); - assert_eq!(*y, x); - assert_eq!(y, y_mut); - assert_eq!(tail, &[]); - assert_eq!(tail, tail_mut); - - let (y, tail) = from_bytes::(bytes).unwrap(); - let (y_mut, tail_mut) = from_bytes_mut::(bytes_mut).unwrap(); - assert_eq!(*y, x16[0]); - assert_eq!(y, y_mut); - assert_eq!(tail, &bytes[2..]); - assert_eq!(tail, tail_mut); - - let (y, tail) = from_bytes::(&bytes[2..]).unwrap(); - let (y_mut, tail_mut) = from_bytes_mut::(&mut bytes_mut[2..]).unwrap(); - assert_eq!(*y, x16[1]); - assert_eq!(y, y_mut); - assert_eq!(tail, &[]); - assert_eq!(tail, tail_mut); - - assert_eq!(from_bytes::(&bytes[1..]), Err(())); - assert_eq!(from_bytes::(&bytes[3..]), Err(())); - assert_eq!(from_bytes::(&bytes[4..]), Err(())); - assert_eq!(from_bytes_mut::(&mut bytes_mut[1..]), Err(())); - assert_eq!(from_bytes_mut::(&mut bytes_mut[3..]), Err(())); - assert_eq!(from_bytes_mut::(&mut bytes_mut[4..]), Err(())); - } - - #[test] - fn slice() { - let x = [ - u16::to_be(0x0123), - u16::to_be(0x4567), - u16::to_be(0x89ab), - u16::to_be(0xcdef), - ]; - let mut x_mut = x; - - let bytes = bytes_of_slice(&x); - let bytes_mut = bytes_of_slice_mut(&mut x_mut); - assert_eq!(bytes, [0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]); - assert_eq!(bytes, bytes_mut); - - let (y, tail) = slice_from_bytes::(bytes, 4).unwrap(); - let (y_mut, tail_mut) = slice_from_bytes_mut::(bytes_mut, 4).unwrap(); - assert_eq!(y, x); - assert_eq!(y, y_mut); - assert_eq!(tail, &[]); - assert_eq!(tail, tail_mut); - - let (y, tail) = slice_from_bytes::(&bytes[2..], 2).unwrap(); - let (y_mut, tail_mut) = slice_from_bytes::(&mut bytes_mut[2..], 2).unwrap(); - assert_eq!(y, &x[1..3]); - assert_eq!(y, y_mut); - assert_eq!(tail, &bytes[6..]); - assert_eq!(tail, tail_mut); - - assert_eq!(slice_from_bytes::(bytes, 5), Err(())); - assert_eq!(slice_from_bytes::(&bytes[2..], 4), Err(())); - assert_eq!(slice_from_bytes::(&bytes[1..], 2), Err(())); - assert_eq!(slice_from_bytes_mut::(bytes_mut, 5), Err(())); - assert_eq!(slice_from_bytes_mut::(&mut bytes_mut[2..], 4), Err(())); - assert_eq!(slice_from_bytes_mut::(&mut bytes_mut[1..], 2), Err(())); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/any.rs s390-tools-2.33.1/rust-vendor/object/src/read/any.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/any.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/any.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1314 +0,0 @@ -use alloc::fmt; -use alloc::vec::Vec; -use core::marker::PhantomData; - -#[cfg(feature = "coff")] -use crate::read::coff; -#[cfg(feature = "elf")] -use crate::read::elf; -#[cfg(feature = "macho")] -use crate::read::macho; -#[cfg(feature = "pe")] -use crate::read::pe; -#[cfg(feature = "wasm")] -use crate::read::wasm; -#[cfg(feature = "xcoff")] -use crate::read::xcoff; -use crate::read::{ - self, Architecture, BinaryFormat, CodeView, ComdatKind, CompressedData, CompressedFileRange, - Error, Export, FileFlags, FileKind, Import, Object, ObjectComdat, ObjectKind, ObjectMap, - ObjectSection, ObjectSegment, ObjectSymbol, ObjectSymbolTable, ReadRef, Relocation, Result, - SectionFlags, SectionIndex, SectionKind, SegmentFlags, SymbolFlags, SymbolIndex, SymbolKind, - SymbolMap, SymbolMapName, SymbolScope, SymbolSection, -}; -#[allow(unused_imports)] -use crate::{AddressSize, Endian, Endianness}; - -/// Evaluate an expression on the contents of a file format enum. -/// -/// This is a hack to avoid virtual calls. -macro_rules! with_inner { - ($inner:expr, $enum:ident, | $var:ident | $body:expr) => { - match $inner { - #[cfg(feature = "coff")] - $enum::Coff(ref $var) => $body, - #[cfg(feature = "coff")] - $enum::CoffBig(ref $var) => $body, - #[cfg(feature = "elf")] - $enum::Elf32(ref $var) => $body, - #[cfg(feature = "elf")] - $enum::Elf64(ref $var) => $body, - #[cfg(feature = "macho")] - $enum::MachO32(ref $var) => $body, - #[cfg(feature = "macho")] - $enum::MachO64(ref $var) => $body, - #[cfg(feature = "pe")] - $enum::Pe32(ref $var) => $body, - #[cfg(feature = "pe")] - $enum::Pe64(ref $var) => $body, - #[cfg(feature = "wasm")] - $enum::Wasm(ref $var) => $body, - #[cfg(feature = "xcoff")] - $enum::Xcoff32(ref $var) => $body, - #[cfg(feature = "xcoff")] - $enum::Xcoff64(ref $var) => $body, - } - }; -} - -macro_rules! with_inner_mut { - ($inner:expr, $enum:ident, | $var:ident | $body:expr) => { - match $inner { - #[cfg(feature = "coff")] - $enum::Coff(ref mut $var) => $body, - #[cfg(feature = "coff")] - $enum::CoffBig(ref mut $var) => $body, - #[cfg(feature = "elf")] - $enum::Elf32(ref mut $var) => $body, - #[cfg(feature = "elf")] - $enum::Elf64(ref mut $var) => $body, - #[cfg(feature = "macho")] - $enum::MachO32(ref mut $var) => $body, - #[cfg(feature = "macho")] - $enum::MachO64(ref mut $var) => $body, - #[cfg(feature = "pe")] - $enum::Pe32(ref mut $var) => $body, - #[cfg(feature = "pe")] - $enum::Pe64(ref mut $var) => $body, - #[cfg(feature = "wasm")] - $enum::Wasm(ref mut $var) => $body, - #[cfg(feature = "xcoff")] - $enum::Xcoff32(ref mut $var) => $body, - #[cfg(feature = "xcoff")] - $enum::Xcoff64(ref mut $var) => $body, - } - }; -} - -/// Like `with_inner!`, but wraps the result in another enum. -macro_rules! map_inner { - ($inner:expr, $from:ident, $to:ident, | $var:ident | $body:expr) => { - match $inner { - #[cfg(feature = "coff")] - $from::Coff(ref $var) => $to::Coff($body), - #[cfg(feature = "coff")] - $from::CoffBig(ref $var) => $to::CoffBig($body), - #[cfg(feature = "elf")] - $from::Elf32(ref $var) => $to::Elf32($body), - #[cfg(feature = "elf")] - $from::Elf64(ref $var) => $to::Elf64($body), - #[cfg(feature = "macho")] - $from::MachO32(ref $var) => $to::MachO32($body), - #[cfg(feature = "macho")] - $from::MachO64(ref $var) => $to::MachO64($body), - #[cfg(feature = "pe")] - $from::Pe32(ref $var) => $to::Pe32($body), - #[cfg(feature = "pe")] - $from::Pe64(ref $var) => $to::Pe64($body), - #[cfg(feature = "wasm")] - $from::Wasm(ref $var) => $to::Wasm($body), - #[cfg(feature = "xcoff")] - $from::Xcoff32(ref $var) => $to::Xcoff32($body), - #[cfg(feature = "xcoff")] - $from::Xcoff64(ref $var) => $to::Xcoff64($body), - } - }; -} - -/// Like `map_inner!`, but the result is a Result or Option. -macro_rules! map_inner_option { - ($inner:expr, $from:ident, $to:ident, | $var:ident | $body:expr) => { - match $inner { - #[cfg(feature = "coff")] - $from::Coff(ref $var) => $body.map($to::Coff), - #[cfg(feature = "coff")] - $from::CoffBig(ref $var) => $body.map($to::CoffBig), - #[cfg(feature = "elf")] - $from::Elf32(ref $var) => $body.map($to::Elf32), - #[cfg(feature = "elf")] - $from::Elf64(ref $var) => $body.map($to::Elf64), - #[cfg(feature = "macho")] - $from::MachO32(ref $var) => $body.map($to::MachO32), - #[cfg(feature = "macho")] - $from::MachO64(ref $var) => $body.map($to::MachO64), - #[cfg(feature = "pe")] - $from::Pe32(ref $var) => $body.map($to::Pe32), - #[cfg(feature = "pe")] - $from::Pe64(ref $var) => $body.map($to::Pe64), - #[cfg(feature = "wasm")] - $from::Wasm(ref $var) => $body.map($to::Wasm), - #[cfg(feature = "xcoff")] - $from::Xcoff32(ref $var) => $body.map($to::Xcoff32), - #[cfg(feature = "xcoff")] - $from::Xcoff64(ref $var) => $body.map($to::Xcoff64), - } - }; -} - -macro_rules! map_inner_option_mut { - ($inner:expr, $from:ident, $to:ident, | $var:ident | $body:expr) => { - match $inner { - #[cfg(feature = "coff")] - $from::Coff(ref mut $var) => $body.map($to::Coff), - #[cfg(feature = "coff")] - $from::CoffBig(ref mut $var) => $body.map($to::CoffBig), - #[cfg(feature = "elf")] - $from::Elf32(ref mut $var) => $body.map($to::Elf32), - #[cfg(feature = "elf")] - $from::Elf64(ref mut $var) => $body.map($to::Elf64), - #[cfg(feature = "macho")] - $from::MachO32(ref mut $var) => $body.map($to::MachO32), - #[cfg(feature = "macho")] - $from::MachO64(ref mut $var) => $body.map($to::MachO64), - #[cfg(feature = "pe")] - $from::Pe32(ref mut $var) => $body.map($to::Pe32), - #[cfg(feature = "pe")] - $from::Pe64(ref mut $var) => $body.map($to::Pe64), - #[cfg(feature = "wasm")] - $from::Wasm(ref mut $var) => $body.map($to::Wasm), - #[cfg(feature = "xcoff")] - $from::Xcoff32(ref mut $var) => $body.map($to::Xcoff32), - #[cfg(feature = "xcoff")] - $from::Xcoff64(ref mut $var) => $body.map($to::Xcoff64), - } - }; -} - -/// Call `next` for a file format iterator. -macro_rules! next_inner { - ($inner:expr, $from:ident, $to:ident) => { - match $inner { - #[cfg(feature = "coff")] - $from::Coff(ref mut iter) => iter.next().map($to::Coff), - #[cfg(feature = "coff")] - $from::CoffBig(ref mut iter) => iter.next().map($to::CoffBig), - #[cfg(feature = "elf")] - $from::Elf32(ref mut iter) => iter.next().map($to::Elf32), - #[cfg(feature = "elf")] - $from::Elf64(ref mut iter) => iter.next().map($to::Elf64), - #[cfg(feature = "macho")] - $from::MachO32(ref mut iter) => iter.next().map($to::MachO32), - #[cfg(feature = "macho")] - $from::MachO64(ref mut iter) => iter.next().map($to::MachO64), - #[cfg(feature = "pe")] - $from::Pe32(ref mut iter) => iter.next().map($to::Pe32), - #[cfg(feature = "pe")] - $from::Pe64(ref mut iter) => iter.next().map($to::Pe64), - #[cfg(feature = "wasm")] - $from::Wasm(ref mut iter) => iter.next().map($to::Wasm), - #[cfg(feature = "xcoff")] - $from::Xcoff32(ref mut iter) => iter.next().map($to::Xcoff32), - #[cfg(feature = "xcoff")] - $from::Xcoff64(ref mut iter) => iter.next().map($to::Xcoff64), - } - }; -} - -/// An object file. -/// -/// Most functionality is provided by the `Object` trait implementation. -#[derive(Debug)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum File<'data, R: ReadRef<'data> = &'data [u8]> { - #[cfg(feature = "coff")] - Coff(coff::CoffFile<'data, R>), - #[cfg(feature = "coff")] - CoffBig(coff::CoffBigFile<'data, R>), - #[cfg(feature = "elf")] - Elf32(elf::ElfFile32<'data, Endianness, R>), - #[cfg(feature = "elf")] - Elf64(elf::ElfFile64<'data, Endianness, R>), - #[cfg(feature = "macho")] - MachO32(macho::MachOFile32<'data, Endianness, R>), - #[cfg(feature = "macho")] - MachO64(macho::MachOFile64<'data, Endianness, R>), - #[cfg(feature = "pe")] - Pe32(pe::PeFile32<'data, R>), - #[cfg(feature = "pe")] - Pe64(pe::PeFile64<'data, R>), - #[cfg(feature = "wasm")] - Wasm(wasm::WasmFile<'data, R>), - #[cfg(feature = "xcoff")] - Xcoff32(xcoff::XcoffFile32<'data, R>), - #[cfg(feature = "xcoff")] - Xcoff64(xcoff::XcoffFile64<'data, R>), -} - -impl<'data, R: ReadRef<'data>> File<'data, R> { - /// Parse the raw file data. - pub fn parse(data: R) -> Result { - Ok(match FileKind::parse(data)? { - #[cfg(feature = "elf")] - FileKind::Elf32 => File::Elf32(elf::ElfFile32::parse(data)?), - #[cfg(feature = "elf")] - FileKind::Elf64 => File::Elf64(elf::ElfFile64::parse(data)?), - #[cfg(feature = "macho")] - FileKind::MachO32 => File::MachO32(macho::MachOFile32::parse(data)?), - #[cfg(feature = "macho")] - FileKind::MachO64 => File::MachO64(macho::MachOFile64::parse(data)?), - #[cfg(feature = "wasm")] - FileKind::Wasm => File::Wasm(wasm::WasmFile::parse(data)?), - #[cfg(feature = "pe")] - FileKind::Pe32 => File::Pe32(pe::PeFile32::parse(data)?), - #[cfg(feature = "pe")] - FileKind::Pe64 => File::Pe64(pe::PeFile64::parse(data)?), - #[cfg(feature = "coff")] - FileKind::Coff => File::Coff(coff::CoffFile::parse(data)?), - #[cfg(feature = "coff")] - FileKind::CoffBig => File::CoffBig(coff::CoffBigFile::parse(data)?), - #[cfg(feature = "xcoff")] - FileKind::Xcoff32 => File::Xcoff32(xcoff::XcoffFile32::parse(data)?), - #[cfg(feature = "xcoff")] - FileKind::Xcoff64 => File::Xcoff64(xcoff::XcoffFile64::parse(data)?), - #[allow(unreachable_patterns)] - _ => return Err(Error("Unsupported file format")), - }) - } - - /// Parse a Mach-O image from the dyld shared cache. - #[cfg(feature = "macho")] - pub fn parse_dyld_cache_image<'cache, E: Endian>( - image: &macho::DyldCacheImage<'data, 'cache, E, R>, - ) -> Result { - Ok(match image.cache.architecture().address_size() { - Some(AddressSize::U64) => { - File::MachO64(macho::MachOFile64::parse_dyld_cache_image(image)?) - } - Some(AddressSize::U32) => { - File::MachO32(macho::MachOFile32::parse_dyld_cache_image(image)?) - } - _ => return Err(Error("Unsupported file format")), - }) - } - - /// Return the file format. - pub fn format(&self) -> BinaryFormat { - match self { - #[cfg(feature = "coff")] - File::Coff(_) | File::CoffBig(_) => BinaryFormat::Coff, - #[cfg(feature = "elf")] - File::Elf32(_) | File::Elf64(_) => BinaryFormat::Elf, - #[cfg(feature = "macho")] - File::MachO32(_) | File::MachO64(_) => BinaryFormat::MachO, - #[cfg(feature = "pe")] - File::Pe32(_) | File::Pe64(_) => BinaryFormat::Pe, - #[cfg(feature = "wasm")] - File::Wasm(_) => BinaryFormat::Wasm, - #[cfg(feature = "xcoff")] - File::Xcoff32(_) | File::Xcoff64(_) => BinaryFormat::Xcoff, - } - } -} - -impl<'data, R: ReadRef<'data>> read::private::Sealed for File<'data, R> {} - -impl<'data, 'file, R> Object<'data, 'file> for File<'data, R> -where - 'data: 'file, - R: 'file + ReadRef<'data>, -{ - type Segment = Segment<'data, 'file, R>; - type SegmentIterator = SegmentIterator<'data, 'file, R>; - type Section = Section<'data, 'file, R>; - type SectionIterator = SectionIterator<'data, 'file, R>; - type Comdat = Comdat<'data, 'file, R>; - type ComdatIterator = ComdatIterator<'data, 'file, R>; - type Symbol = Symbol<'data, 'file, R>; - type SymbolIterator = SymbolIterator<'data, 'file, R>; - type SymbolTable = SymbolTable<'data, 'file, R>; - type DynamicRelocationIterator = DynamicRelocationIterator<'data, 'file, R>; - - fn architecture(&self) -> Architecture { - with_inner!(self, File, |x| x.architecture()) - } - - fn is_little_endian(&self) -> bool { - with_inner!(self, File, |x| x.is_little_endian()) - } - - fn is_64(&self) -> bool { - with_inner!(self, File, |x| x.is_64()) - } - - fn kind(&self) -> ObjectKind { - with_inner!(self, File, |x| x.kind()) - } - - fn segments(&'file self) -> SegmentIterator<'data, 'file, R> { - SegmentIterator { - inner: map_inner!(self, File, SegmentIteratorInternal, |x| x.segments()), - } - } - - fn section_by_name_bytes(&'file self, section_name: &[u8]) -> Option> { - map_inner_option!(self, File, SectionInternal, |x| x - .section_by_name_bytes(section_name)) - .map(|inner| Section { inner }) - } - - fn section_by_index(&'file self, index: SectionIndex) -> Result> { - map_inner_option!(self, File, SectionInternal, |x| x.section_by_index(index)) - .map(|inner| Section { inner }) - } - - fn sections(&'file self) -> SectionIterator<'data, 'file, R> { - SectionIterator { - inner: map_inner!(self, File, SectionIteratorInternal, |x| x.sections()), - } - } - - fn comdats(&'file self) -> ComdatIterator<'data, 'file, R> { - ComdatIterator { - inner: map_inner!(self, File, ComdatIteratorInternal, |x| x.comdats()), - } - } - - fn symbol_by_index(&'file self, index: SymbolIndex) -> Result> { - map_inner_option!(self, File, SymbolInternal, |x| x - .symbol_by_index(index) - .map(|x| (x, PhantomData))) - .map(|inner| Symbol { inner }) - } - - fn symbols(&'file self) -> SymbolIterator<'data, 'file, R> { - SymbolIterator { - inner: map_inner!(self, File, SymbolIteratorInternal, |x| ( - x.symbols(), - PhantomData - )), - } - } - - fn symbol_table(&'file self) -> Option> { - map_inner_option!(self, File, SymbolTableInternal, |x| x - .symbol_table() - .map(|x| (x, PhantomData))) - .map(|inner| SymbolTable { inner }) - } - - fn dynamic_symbols(&'file self) -> SymbolIterator<'data, 'file, R> { - SymbolIterator { - inner: map_inner!(self, File, SymbolIteratorInternal, |x| ( - x.dynamic_symbols(), - PhantomData - )), - } - } - - fn dynamic_symbol_table(&'file self) -> Option> { - map_inner_option!(self, File, SymbolTableInternal, |x| x - .dynamic_symbol_table() - .map(|x| (x, PhantomData))) - .map(|inner| SymbolTable { inner }) - } - - #[cfg(feature = "elf")] - fn dynamic_relocations(&'file self) -> Option> { - let inner = match self { - File::Elf32(ref elf) => { - DynamicRelocationIteratorInternal::Elf32(elf.dynamic_relocations()?) - } - File::Elf64(ref elf) => { - DynamicRelocationIteratorInternal::Elf64(elf.dynamic_relocations()?) - } - #[allow(unreachable_patterns)] - _ => return None, - }; - Some(DynamicRelocationIterator { inner }) - } - - #[cfg(not(feature = "elf"))] - fn dynamic_relocations(&'file self) -> Option> { - None - } - - fn symbol_map(&self) -> SymbolMap> { - with_inner!(self, File, |x| x.symbol_map()) - } - - fn object_map(&self) -> ObjectMap<'data> { - with_inner!(self, File, |x| x.object_map()) - } - - fn imports(&self) -> Result>> { - with_inner!(self, File, |x| x.imports()) - } - - fn exports(&self) -> Result>> { - with_inner!(self, File, |x| x.exports()) - } - - fn has_debug_symbols(&self) -> bool { - with_inner!(self, File, |x| x.has_debug_symbols()) - } - - #[inline] - fn mach_uuid(&self) -> Result> { - with_inner!(self, File, |x| x.mach_uuid()) - } - - #[inline] - fn build_id(&self) -> Result> { - with_inner!(self, File, |x| x.build_id()) - } - - #[inline] - fn gnu_debuglink(&self) -> Result> { - with_inner!(self, File, |x| x.gnu_debuglink()) - } - - #[inline] - fn gnu_debugaltlink(&self) -> Result> { - with_inner!(self, File, |x| x.gnu_debugaltlink()) - } - - #[inline] - fn pdb_info(&self) -> Result>> { - with_inner!(self, File, |x| x.pdb_info()) - } - - fn relative_address_base(&self) -> u64 { - with_inner!(self, File, |x| x.relative_address_base()) - } - - fn entry(&self) -> u64 { - with_inner!(self, File, |x| x.entry()) - } - - fn flags(&self) -> FileFlags { - with_inner!(self, File, |x| x.flags()) - } -} - -/// An iterator over the segments of a `File`. -#[derive(Debug)] -pub struct SegmentIterator<'data, 'file, R: ReadRef<'data> = &'data [u8]> { - inner: SegmentIteratorInternal<'data, 'file, R>, -} - -#[derive(Debug)] -enum SegmentIteratorInternal<'data, 'file, R: ReadRef<'data>> { - #[cfg(feature = "coff")] - Coff(coff::CoffSegmentIterator<'data, 'file, R>), - #[cfg(feature = "coff")] - CoffBig(coff::CoffBigSegmentIterator<'data, 'file, R>), - #[cfg(feature = "elf")] - Elf32(elf::ElfSegmentIterator32<'data, 'file, Endianness, R>), - #[cfg(feature = "elf")] - Elf64(elf::ElfSegmentIterator64<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO32(macho::MachOSegmentIterator32<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO64(macho::MachOSegmentIterator64<'data, 'file, Endianness, R>), - #[cfg(feature = "pe")] - Pe32(pe::PeSegmentIterator32<'data, 'file, R>), - #[cfg(feature = "pe")] - Pe64(pe::PeSegmentIterator64<'data, 'file, R>), - #[cfg(feature = "wasm")] - Wasm(wasm::WasmSegmentIterator<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff32(xcoff::XcoffSegmentIterator32<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff64(xcoff::XcoffSegmentIterator64<'data, 'file, R>), -} - -impl<'data, 'file, R: ReadRef<'data>> Iterator for SegmentIterator<'data, 'file, R> { - type Item = Segment<'data, 'file, R>; - - fn next(&mut self) -> Option { - next_inner!(self.inner, SegmentIteratorInternal, SegmentInternal) - .map(|inner| Segment { inner }) - } -} - -/// A segment of a `File`. -pub struct Segment<'data, 'file, R: ReadRef<'data> = &'data [u8]> { - inner: SegmentInternal<'data, 'file, R>, -} - -#[derive(Debug)] -enum SegmentInternal<'data, 'file, R: ReadRef<'data>> { - #[cfg(feature = "coff")] - Coff(coff::CoffSegment<'data, 'file, R>), - #[cfg(feature = "coff")] - CoffBig(coff::CoffBigSegment<'data, 'file, R>), - #[cfg(feature = "elf")] - Elf32(elf::ElfSegment32<'data, 'file, Endianness, R>), - #[cfg(feature = "elf")] - Elf64(elf::ElfSegment64<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO32(macho::MachOSegment32<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO64(macho::MachOSegment64<'data, 'file, Endianness, R>), - #[cfg(feature = "pe")] - Pe32(pe::PeSegment32<'data, 'file, R>), - #[cfg(feature = "pe")] - Pe64(pe::PeSegment64<'data, 'file, R>), - #[cfg(feature = "wasm")] - Wasm(wasm::WasmSegment<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff32(xcoff::XcoffSegment32<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff64(xcoff::XcoffSegment64<'data, 'file, R>), -} - -impl<'data, 'file, R: ReadRef<'data>> fmt::Debug for Segment<'data, 'file, R> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // It's painful to do much better than this - let mut s = f.debug_struct("Segment"); - match self.name() { - Ok(Some(ref name)) => { - s.field("name", name); - } - Ok(None) => {} - Err(_) => { - s.field("name", &""); - } - } - s.field("address", &self.address()) - .field("size", &self.size()) - .finish() - } -} - -impl<'data, 'file, R: ReadRef<'data>> read::private::Sealed for Segment<'data, 'file, R> {} - -impl<'data, 'file, R: ReadRef<'data>> ObjectSegment<'data> for Segment<'data, 'file, R> { - fn address(&self) -> u64 { - with_inner!(self.inner, SegmentInternal, |x| x.address()) - } - - fn size(&self) -> u64 { - with_inner!(self.inner, SegmentInternal, |x| x.size()) - } - - fn align(&self) -> u64 { - with_inner!(self.inner, SegmentInternal, |x| x.align()) - } - - fn file_range(&self) -> (u64, u64) { - with_inner!(self.inner, SegmentInternal, |x| x.file_range()) - } - - fn data(&self) -> Result<&'data [u8]> { - with_inner!(self.inner, SegmentInternal, |x| x.data()) - } - - fn data_range(&self, address: u64, size: u64) -> Result> { - with_inner!(self.inner, SegmentInternal, |x| x.data_range(address, size)) - } - - fn name_bytes(&self) -> Result> { - with_inner!(self.inner, SegmentInternal, |x| x.name_bytes()) - } - - fn name(&self) -> Result> { - with_inner!(self.inner, SegmentInternal, |x| x.name()) - } - - fn flags(&self) -> SegmentFlags { - with_inner!(self.inner, SegmentInternal, |x| x.flags()) - } -} - -/// An iterator of the sections of a `File`. -#[derive(Debug)] -pub struct SectionIterator<'data, 'file, R: ReadRef<'data> = &'data [u8]> { - inner: SectionIteratorInternal<'data, 'file, R>, -} - -// we wrap our enums in a struct so that they are kept private. -#[derive(Debug)] -enum SectionIteratorInternal<'data, 'file, R: ReadRef<'data>> { - #[cfg(feature = "coff")] - Coff(coff::CoffSectionIterator<'data, 'file, R>), - #[cfg(feature = "coff")] - CoffBig(coff::CoffBigSectionIterator<'data, 'file, R>), - #[cfg(feature = "elf")] - Elf32(elf::ElfSectionIterator32<'data, 'file, Endianness, R>), - #[cfg(feature = "elf")] - Elf64(elf::ElfSectionIterator64<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO32(macho::MachOSectionIterator32<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO64(macho::MachOSectionIterator64<'data, 'file, Endianness, R>), - #[cfg(feature = "pe")] - Pe32(pe::PeSectionIterator32<'data, 'file, R>), - #[cfg(feature = "pe")] - Pe64(pe::PeSectionIterator64<'data, 'file, R>), - #[cfg(feature = "wasm")] - Wasm(wasm::WasmSectionIterator<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff32(xcoff::XcoffSectionIterator32<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff64(xcoff::XcoffSectionIterator64<'data, 'file, R>), -} - -impl<'data, 'file, R: ReadRef<'data>> Iterator for SectionIterator<'data, 'file, R> { - type Item = Section<'data, 'file, R>; - - fn next(&mut self) -> Option { - next_inner!(self.inner, SectionIteratorInternal, SectionInternal) - .map(|inner| Section { inner }) - } -} - -/// A Section of a File -pub struct Section<'data, 'file, R: ReadRef<'data> = &'data [u8]> { - inner: SectionInternal<'data, 'file, R>, -} - -enum SectionInternal<'data, 'file, R: ReadRef<'data>> { - #[cfg(feature = "coff")] - Coff(coff::CoffSection<'data, 'file, R>), - #[cfg(feature = "coff")] - CoffBig(coff::CoffBigSection<'data, 'file, R>), - #[cfg(feature = "elf")] - Elf32(elf::ElfSection32<'data, 'file, Endianness, R>), - #[cfg(feature = "elf")] - Elf64(elf::ElfSection64<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO32(macho::MachOSection32<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO64(macho::MachOSection64<'data, 'file, Endianness, R>), - #[cfg(feature = "pe")] - Pe32(pe::PeSection32<'data, 'file, R>), - #[cfg(feature = "pe")] - Pe64(pe::PeSection64<'data, 'file, R>), - #[cfg(feature = "wasm")] - Wasm(wasm::WasmSection<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff32(xcoff::XcoffSection32<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff64(xcoff::XcoffSection64<'data, 'file, R>), -} - -impl<'data, 'file, R: ReadRef<'data>> fmt::Debug for Section<'data, 'file, R> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // It's painful to do much better than this - let mut s = f.debug_struct("Section"); - match self.segment_name() { - Ok(Some(ref name)) => { - s.field("segment", name); - } - Ok(None) => {} - Err(_) => { - s.field("segment", &""); - } - } - s.field("name", &self.name().unwrap_or("")) - .field("address", &self.address()) - .field("size", &self.size()) - .field("align", &self.align()) - .field("kind", &self.kind()) - .field("flags", &self.flags()) - .finish() - } -} - -impl<'data, 'file, R: ReadRef<'data>> read::private::Sealed for Section<'data, 'file, R> {} - -impl<'data, 'file, R: ReadRef<'data>> ObjectSection<'data> for Section<'data, 'file, R> { - type RelocationIterator = SectionRelocationIterator<'data, 'file, R>; - - fn index(&self) -> SectionIndex { - with_inner!(self.inner, SectionInternal, |x| x.index()) - } - - fn address(&self) -> u64 { - with_inner!(self.inner, SectionInternal, |x| x.address()) - } - - fn size(&self) -> u64 { - with_inner!(self.inner, SectionInternal, |x| x.size()) - } - - fn align(&self) -> u64 { - with_inner!(self.inner, SectionInternal, |x| x.align()) - } - - fn file_range(&self) -> Option<(u64, u64)> { - with_inner!(self.inner, SectionInternal, |x| x.file_range()) - } - - fn data(&self) -> Result<&'data [u8]> { - with_inner!(self.inner, SectionInternal, |x| x.data()) - } - - fn data_range(&self, address: u64, size: u64) -> Result> { - with_inner!(self.inner, SectionInternal, |x| x.data_range(address, size)) - } - - fn compressed_file_range(&self) -> Result { - with_inner!(self.inner, SectionInternal, |x| x.compressed_file_range()) - } - - fn compressed_data(&self) -> Result> { - with_inner!(self.inner, SectionInternal, |x| x.compressed_data()) - } - - fn name_bytes(&self) -> Result<&[u8]> { - with_inner!(self.inner, SectionInternal, |x| x.name_bytes()) - } - - fn name(&self) -> Result<&str> { - with_inner!(self.inner, SectionInternal, |x| x.name()) - } - - fn segment_name_bytes(&self) -> Result> { - with_inner!(self.inner, SectionInternal, |x| x.segment_name_bytes()) - } - - fn segment_name(&self) -> Result> { - with_inner!(self.inner, SectionInternal, |x| x.segment_name()) - } - - fn kind(&self) -> SectionKind { - with_inner!(self.inner, SectionInternal, |x| x.kind()) - } - - fn relocations(&self) -> SectionRelocationIterator<'data, 'file, R> { - SectionRelocationIterator { - inner: map_inner!( - self.inner, - SectionInternal, - SectionRelocationIteratorInternal, - |x| x.relocations() - ), - } - } - - fn flags(&self) -> SectionFlags { - with_inner!(self.inner, SectionInternal, |x| x.flags()) - } -} - -/// An iterator of the COMDAT section groups of a `File`. -#[derive(Debug)] -pub struct ComdatIterator<'data, 'file, R: ReadRef<'data> = &'data [u8]> { - inner: ComdatIteratorInternal<'data, 'file, R>, -} - -#[derive(Debug)] -enum ComdatIteratorInternal<'data, 'file, R: ReadRef<'data>> { - #[cfg(feature = "coff")] - Coff(coff::CoffComdatIterator<'data, 'file, R>), - #[cfg(feature = "coff")] - CoffBig(coff::CoffBigComdatIterator<'data, 'file, R>), - #[cfg(feature = "elf")] - Elf32(elf::ElfComdatIterator32<'data, 'file, Endianness, R>), - #[cfg(feature = "elf")] - Elf64(elf::ElfComdatIterator64<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO32(macho::MachOComdatIterator32<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO64(macho::MachOComdatIterator64<'data, 'file, Endianness, R>), - #[cfg(feature = "pe")] - Pe32(pe::PeComdatIterator32<'data, 'file, R>), - #[cfg(feature = "pe")] - Pe64(pe::PeComdatIterator64<'data, 'file, R>), - #[cfg(feature = "wasm")] - Wasm(wasm::WasmComdatIterator<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff32(xcoff::XcoffComdatIterator32<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff64(xcoff::XcoffComdatIterator64<'data, 'file, R>), -} - -impl<'data, 'file, R: ReadRef<'data>> Iterator for ComdatIterator<'data, 'file, R> { - type Item = Comdat<'data, 'file, R>; - - fn next(&mut self) -> Option { - next_inner!(self.inner, ComdatIteratorInternal, ComdatInternal) - .map(|inner| Comdat { inner }) - } -} - -/// A COMDAT section group of a `File`. -pub struct Comdat<'data, 'file, R: ReadRef<'data> = &'data [u8]> { - inner: ComdatInternal<'data, 'file, R>, -} - -enum ComdatInternal<'data, 'file, R: ReadRef<'data>> { - #[cfg(feature = "coff")] - Coff(coff::CoffComdat<'data, 'file, R>), - #[cfg(feature = "coff")] - CoffBig(coff::CoffBigComdat<'data, 'file, R>), - #[cfg(feature = "elf")] - Elf32(elf::ElfComdat32<'data, 'file, Endianness, R>), - #[cfg(feature = "elf")] - Elf64(elf::ElfComdat64<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO32(macho::MachOComdat32<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO64(macho::MachOComdat64<'data, 'file, Endianness, R>), - #[cfg(feature = "pe")] - Pe32(pe::PeComdat32<'data, 'file, R>), - #[cfg(feature = "pe")] - Pe64(pe::PeComdat64<'data, 'file, R>), - #[cfg(feature = "wasm")] - Wasm(wasm::WasmComdat<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff32(xcoff::XcoffComdat32<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff64(xcoff::XcoffComdat64<'data, 'file, R>), -} - -impl<'data, 'file, R: ReadRef<'data>> fmt::Debug for Comdat<'data, 'file, R> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut s = f.debug_struct("Comdat"); - s.field("symbol", &self.symbol()) - .field("name", &self.name().unwrap_or("")) - .field("kind", &self.kind()) - .finish() - } -} - -impl<'data, 'file, R: ReadRef<'data>> read::private::Sealed for Comdat<'data, 'file, R> {} - -impl<'data, 'file, R: ReadRef<'data>> ObjectComdat<'data> for Comdat<'data, 'file, R> { - type SectionIterator = ComdatSectionIterator<'data, 'file, R>; - - fn kind(&self) -> ComdatKind { - with_inner!(self.inner, ComdatInternal, |x| x.kind()) - } - - fn symbol(&self) -> SymbolIndex { - with_inner!(self.inner, ComdatInternal, |x| x.symbol()) - } - - fn name_bytes(&self) -> Result<&[u8]> { - with_inner!(self.inner, ComdatInternal, |x| x.name_bytes()) - } - - fn name(&self) -> Result<&str> { - with_inner!(self.inner, ComdatInternal, |x| x.name()) - } - - fn sections(&self) -> ComdatSectionIterator<'data, 'file, R> { - ComdatSectionIterator { - inner: map_inner!( - self.inner, - ComdatInternal, - ComdatSectionIteratorInternal, - |x| x.sections() - ), - } - } -} - -/// An iterator over COMDAT section entries. -#[derive(Debug)] -pub struct ComdatSectionIterator<'data, 'file, R: ReadRef<'data> = &'data [u8]> { - inner: ComdatSectionIteratorInternal<'data, 'file, R>, -} - -#[derive(Debug)] -enum ComdatSectionIteratorInternal<'data, 'file, R: ReadRef<'data>> { - #[cfg(feature = "coff")] - Coff(coff::CoffComdatSectionIterator<'data, 'file, R>), - #[cfg(feature = "coff")] - CoffBig(coff::CoffBigComdatSectionIterator<'data, 'file, R>), - #[cfg(feature = "elf")] - Elf32(elf::ElfComdatSectionIterator32<'data, 'file, Endianness, R>), - #[cfg(feature = "elf")] - Elf64(elf::ElfComdatSectionIterator64<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO32(macho::MachOComdatSectionIterator32<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO64(macho::MachOComdatSectionIterator64<'data, 'file, Endianness, R>), - #[cfg(feature = "pe")] - Pe32(pe::PeComdatSectionIterator32<'data, 'file, R>), - #[cfg(feature = "pe")] - Pe64(pe::PeComdatSectionIterator64<'data, 'file, R>), - #[cfg(feature = "wasm")] - Wasm(wasm::WasmComdatSectionIterator<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff32(xcoff::XcoffComdatSectionIterator32<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff64(xcoff::XcoffComdatSectionIterator64<'data, 'file, R>), -} - -impl<'data, 'file, R: ReadRef<'data>> Iterator for ComdatSectionIterator<'data, 'file, R> { - type Item = SectionIndex; - - fn next(&mut self) -> Option { - with_inner_mut!(self.inner, ComdatSectionIteratorInternal, |x| x.next()) - } -} - -/// A symbol table. -#[derive(Debug)] -pub struct SymbolTable<'data, 'file, R = &'data [u8]> -where - R: ReadRef<'data>, -{ - inner: SymbolTableInternal<'data, 'file, R>, -} - -#[derive(Debug)] -enum SymbolTableInternal<'data, 'file, R> -where - R: ReadRef<'data>, -{ - #[cfg(feature = "coff")] - Coff((coff::CoffSymbolTable<'data, 'file, R>, PhantomData)), - #[cfg(feature = "coff")] - CoffBig((coff::CoffBigSymbolTable<'data, 'file, R>, PhantomData)), - #[cfg(feature = "elf")] - Elf32( - ( - elf::ElfSymbolTable32<'data, 'file, Endianness, R>, - PhantomData, - ), - ), - #[cfg(feature = "elf")] - Elf64( - ( - elf::ElfSymbolTable64<'data, 'file, Endianness, R>, - PhantomData, - ), - ), - #[cfg(feature = "macho")] - MachO32( - ( - macho::MachOSymbolTable32<'data, 'file, Endianness, R>, - PhantomData<()>, - ), - ), - #[cfg(feature = "macho")] - MachO64( - ( - macho::MachOSymbolTable64<'data, 'file, Endianness, R>, - PhantomData<()>, - ), - ), - #[cfg(feature = "pe")] - Pe32((coff::CoffSymbolTable<'data, 'file, R>, PhantomData)), - #[cfg(feature = "pe")] - Pe64((coff::CoffSymbolTable<'data, 'file, R>, PhantomData)), - #[cfg(feature = "wasm")] - Wasm((wasm::WasmSymbolTable<'data, 'file>, PhantomData)), - #[cfg(feature = "xcoff")] - Xcoff32((xcoff::XcoffSymbolTable32<'data, 'file, R>, PhantomData)), - #[cfg(feature = "xcoff")] - Xcoff64((xcoff::XcoffSymbolTable64<'data, 'file, R>, PhantomData)), -} - -impl<'data, 'file, R: ReadRef<'data>> read::private::Sealed for SymbolTable<'data, 'file, R> {} - -impl<'data, 'file, R: ReadRef<'data>> ObjectSymbolTable<'data> for SymbolTable<'data, 'file, R> { - type Symbol = Symbol<'data, 'file, R>; - type SymbolIterator = SymbolIterator<'data, 'file, R>; - - fn symbols(&self) -> Self::SymbolIterator { - SymbolIterator { - inner: map_inner!( - self.inner, - SymbolTableInternal, - SymbolIteratorInternal, - |x| (x.0.symbols(), PhantomData) - ), - } - } - - fn symbol_by_index(&self, index: SymbolIndex) -> Result { - map_inner_option!(self.inner, SymbolTableInternal, SymbolInternal, |x| x - .0 - .symbol_by_index(index) - .map(|x| (x, PhantomData))) - .map(|inner| Symbol { inner }) - } -} - -/// An iterator over symbol table entries. -#[derive(Debug)] -pub struct SymbolIterator<'data, 'file, R = &'data [u8]> -where - R: ReadRef<'data>, -{ - inner: SymbolIteratorInternal<'data, 'file, R>, -} - -#[derive(Debug)] -enum SymbolIteratorInternal<'data, 'file, R> -where - R: ReadRef<'data>, -{ - #[cfg(feature = "coff")] - Coff((coff::CoffSymbolIterator<'data, 'file, R>, PhantomData)), - #[cfg(feature = "coff")] - CoffBig((coff::CoffBigSymbolIterator<'data, 'file, R>, PhantomData)), - #[cfg(feature = "elf")] - Elf32( - ( - elf::ElfSymbolIterator32<'data, 'file, Endianness, R>, - PhantomData, - ), - ), - #[cfg(feature = "elf")] - Elf64( - ( - elf::ElfSymbolIterator64<'data, 'file, Endianness, R>, - PhantomData, - ), - ), - #[cfg(feature = "macho")] - MachO32( - ( - macho::MachOSymbolIterator32<'data, 'file, Endianness, R>, - PhantomData<()>, - ), - ), - #[cfg(feature = "macho")] - MachO64( - ( - macho::MachOSymbolIterator64<'data, 'file, Endianness, R>, - PhantomData<()>, - ), - ), - #[cfg(feature = "pe")] - Pe32((coff::CoffSymbolIterator<'data, 'file, R>, PhantomData)), - #[cfg(feature = "pe")] - Pe64((coff::CoffSymbolIterator<'data, 'file, R>, PhantomData)), - #[cfg(feature = "wasm")] - Wasm((wasm::WasmSymbolIterator<'data, 'file>, PhantomData)), - #[cfg(feature = "xcoff")] - Xcoff32( - ( - xcoff::XcoffSymbolIterator32<'data, 'file, R>, - PhantomData, - ), - ), - #[cfg(feature = "xcoff")] - Xcoff64( - ( - xcoff::XcoffSymbolIterator64<'data, 'file, R>, - PhantomData, - ), - ), -} - -impl<'data, 'file, R: ReadRef<'data>> Iterator for SymbolIterator<'data, 'file, R> { - type Item = Symbol<'data, 'file, R>; - - fn next(&mut self) -> Option { - map_inner_option_mut!(self.inner, SymbolIteratorInternal, SymbolInternal, |iter| { - iter.0.next().map(|x| (x, PhantomData)) - }) - .map(|inner| Symbol { inner }) - } -} - -/// A symbol table entry. -pub struct Symbol<'data, 'file, R = &'data [u8]> -where - R: ReadRef<'data>, -{ - inner: SymbolInternal<'data, 'file, R>, -} - -enum SymbolInternal<'data, 'file, R> -where - R: ReadRef<'data>, -{ - #[cfg(feature = "coff")] - Coff((coff::CoffSymbol<'data, 'file, R>, PhantomData)), - #[cfg(feature = "coff")] - CoffBig((coff::CoffBigSymbol<'data, 'file, R>, PhantomData)), - #[cfg(feature = "elf")] - Elf32( - ( - elf::ElfSymbol32<'data, 'file, Endianness, R>, - PhantomData, - ), - ), - #[cfg(feature = "elf")] - Elf64( - ( - elf::ElfSymbol64<'data, 'file, Endianness, R>, - PhantomData, - ), - ), - #[cfg(feature = "macho")] - MachO32( - ( - macho::MachOSymbol32<'data, 'file, Endianness, R>, - PhantomData<()>, - ), - ), - #[cfg(feature = "macho")] - MachO64( - ( - macho::MachOSymbol64<'data, 'file, Endianness, R>, - PhantomData<()>, - ), - ), - #[cfg(feature = "pe")] - Pe32((coff::CoffSymbol<'data, 'file, R>, PhantomData)), - #[cfg(feature = "pe")] - Pe64((coff::CoffSymbol<'data, 'file, R>, PhantomData)), - #[cfg(feature = "wasm")] - Wasm((wasm::WasmSymbol<'data, 'file>, PhantomData)), - #[cfg(feature = "xcoff")] - Xcoff32((xcoff::XcoffSymbol32<'data, 'file, R>, PhantomData)), - #[cfg(feature = "xcoff")] - Xcoff64((xcoff::XcoffSymbol64<'data, 'file, R>, PhantomData)), -} - -impl<'data, 'file, R: ReadRef<'data>> fmt::Debug for Symbol<'data, 'file, R> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Symbol") - .field("name", &self.name().unwrap_or("")) - .field("address", &self.address()) - .field("size", &self.size()) - .field("kind", &self.kind()) - .field("section", &self.section()) - .field("scope", &self.scope()) - .field("weak", &self.is_weak()) - .field("flags", &self.flags()) - .finish() - } -} - -impl<'data, 'file, R: ReadRef<'data>> read::private::Sealed for Symbol<'data, 'file, R> {} - -impl<'data, 'file, R: ReadRef<'data>> ObjectSymbol<'data> for Symbol<'data, 'file, R> { - fn index(&self) -> SymbolIndex { - with_inner!(self.inner, SymbolInternal, |x| x.0.index()) - } - - fn name_bytes(&self) -> Result<&'data [u8]> { - with_inner!(self.inner, SymbolInternal, |x| x.0.name_bytes()) - } - - fn name(&self) -> Result<&'data str> { - with_inner!(self.inner, SymbolInternal, |x| x.0.name()) - } - - fn address(&self) -> u64 { - with_inner!(self.inner, SymbolInternal, |x| x.0.address()) - } - - fn size(&self) -> u64 { - with_inner!(self.inner, SymbolInternal, |x| x.0.size()) - } - - fn kind(&self) -> SymbolKind { - with_inner!(self.inner, SymbolInternal, |x| x.0.kind()) - } - - fn section(&self) -> SymbolSection { - with_inner!(self.inner, SymbolInternal, |x| x.0.section()) - } - - fn is_undefined(&self) -> bool { - with_inner!(self.inner, SymbolInternal, |x| x.0.is_undefined()) - } - - fn is_definition(&self) -> bool { - with_inner!(self.inner, SymbolInternal, |x| x.0.is_definition()) - } - - fn is_common(&self) -> bool { - with_inner!(self.inner, SymbolInternal, |x| x.0.is_common()) - } - - fn is_weak(&self) -> bool { - with_inner!(self.inner, SymbolInternal, |x| x.0.is_weak()) - } - - fn scope(&self) -> SymbolScope { - with_inner!(self.inner, SymbolInternal, |x| x.0.scope()) - } - - fn is_global(&self) -> bool { - with_inner!(self.inner, SymbolInternal, |x| x.0.is_global()) - } - - fn is_local(&self) -> bool { - with_inner!(self.inner, SymbolInternal, |x| x.0.is_local()) - } - - fn flags(&self) -> SymbolFlags { - with_inner!(self.inner, SymbolInternal, |x| x.0.flags()) - } -} - -/// An iterator over dynamic relocation entries. -#[derive(Debug)] -pub struct DynamicRelocationIterator<'data, 'file, R = &'data [u8]> -where - R: ReadRef<'data>, -{ - inner: DynamicRelocationIteratorInternal<'data, 'file, R>, -} - -#[derive(Debug)] -enum DynamicRelocationIteratorInternal<'data, 'file, R> -where - R: ReadRef<'data>, -{ - #[cfg(feature = "elf")] - Elf32(elf::ElfDynamicRelocationIterator32<'data, 'file, Endianness, R>), - #[cfg(feature = "elf")] - Elf64(elf::ElfDynamicRelocationIterator64<'data, 'file, Endianness, R>), - // We need to always use the lifetime parameters. - #[allow(unused)] - None(PhantomData<(&'data (), &'file (), R)>), -} - -impl<'data, 'file, R: ReadRef<'data>> Iterator for DynamicRelocationIterator<'data, 'file, R> { - type Item = (u64, Relocation); - - fn next(&mut self) -> Option { - match self.inner { - #[cfg(feature = "elf")] - DynamicRelocationIteratorInternal::Elf32(ref mut elf) => elf.next(), - #[cfg(feature = "elf")] - DynamicRelocationIteratorInternal::Elf64(ref mut elf) => elf.next(), - DynamicRelocationIteratorInternal::None(_) => None, - } - } -} - -/// An iterator over section relocation entries. -#[derive(Debug)] -pub struct SectionRelocationIterator<'data, 'file, R: ReadRef<'data> = &'data [u8]> { - inner: SectionRelocationIteratorInternal<'data, 'file, R>, -} - -#[derive(Debug)] -enum SectionRelocationIteratorInternal<'data, 'file, R: ReadRef<'data>> { - #[cfg(feature = "coff")] - Coff(coff::CoffRelocationIterator<'data, 'file, R>), - #[cfg(feature = "coff")] - CoffBig(coff::CoffBigRelocationIterator<'data, 'file, R>), - #[cfg(feature = "elf")] - Elf32(elf::ElfSectionRelocationIterator32<'data, 'file, Endianness, R>), - #[cfg(feature = "elf")] - Elf64(elf::ElfSectionRelocationIterator64<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO32(macho::MachORelocationIterator32<'data, 'file, Endianness, R>), - #[cfg(feature = "macho")] - MachO64(macho::MachORelocationIterator64<'data, 'file, Endianness, R>), - #[cfg(feature = "pe")] - Pe32(pe::PeRelocationIterator<'data, 'file, R>), - #[cfg(feature = "pe")] - Pe64(pe::PeRelocationIterator<'data, 'file, R>), - #[cfg(feature = "wasm")] - Wasm(wasm::WasmRelocationIterator<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff32(xcoff::XcoffRelocationIterator32<'data, 'file, R>), - #[cfg(feature = "xcoff")] - Xcoff64(xcoff::XcoffRelocationIterator64<'data, 'file, R>), -} - -impl<'data, 'file, R: ReadRef<'data>> Iterator for SectionRelocationIterator<'data, 'file, R> { - type Item = (u64, Relocation); - - fn next(&mut self) -> Option { - with_inner_mut!(self.inner, SectionRelocationIteratorInternal, |x| x.next()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/archive.rs s390-tools-2.33.1/rust-vendor/object/src/read/archive.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/archive.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/archive.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,739 +0,0 @@ -//! Support for archive files. - -use core::convert::TryInto; - -use crate::archive; -use crate::read::{self, Bytes, Error, ReadError, ReadRef}; - -/// The kind of archive format. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum ArchiveKind { - /// There are no special files that indicate the archive format. - Unknown, - /// The GNU (or System V) archive format. - Gnu, - /// The GNU (or System V) archive format with 64-bit symbol table. - Gnu64, - /// The BSD archive format. - Bsd, - /// The BSD archive format with 64-bit symbol table. - /// - /// This is used for Darwin. - Bsd64, - /// The Windows COFF archive format. - Coff, - /// The AIX big archive format. - AixBig, -} - -/// The list of members in the archive. -#[derive(Debug, Clone, Copy)] -enum Members<'data> { - Common { - offset: u64, - end_offset: u64, - }, - AixBig { - index: &'data [archive::AixMemberOffset], - }, -} - -/// A partially parsed archive file. -#[derive(Debug, Clone, Copy)] -pub struct ArchiveFile<'data, R: ReadRef<'data> = &'data [u8]> { - data: R, - kind: ArchiveKind, - members: Members<'data>, - symbols: (u64, u64), - names: &'data [u8], -} - -impl<'data, R: ReadRef<'data>> ArchiveFile<'data, R> { - /// Parse the archive header and special members. - pub fn parse(data: R) -> read::Result { - let len = data.len().read_error("Unknown archive length")?; - let mut tail = 0; - let magic = data - .read_bytes(&mut tail, archive::MAGIC.len() as u64) - .read_error("Invalid archive size")?; - - if magic == archive::AIX_BIG_MAGIC { - return Self::parse_aixbig(data); - } else if magic != archive::MAGIC { - return Err(Error("Unsupported archive identifier")); - } - - let mut members_offset = tail; - let members_end_offset = len; - - let mut file = ArchiveFile { - data, - kind: ArchiveKind::Unknown, - members: Members::Common { - offset: 0, - end_offset: 0, - }, - symbols: (0, 0), - names: &[], - }; - - // The first few members may be special, so parse them. - // GNU has: - // - "/" or "/SYM64/": symbol table (optional) - // - "//": names table (optional) - // COFF has: - // - "/": first linker member - // - "/": second linker member - // - "//": names table - // BSD has: - // - "__.SYMDEF" or "__.SYMDEF SORTED": symbol table (optional) - // BSD 64-bit has: - // - "__.SYMDEF_64" or "__.SYMDEF_64 SORTED": symbol table (optional) - // BSD may use the extended name for the symbol table. This is handled - // by `ArchiveMember::parse`. - if tail < len { - let member = ArchiveMember::parse(data, &mut tail, &[])?; - if member.name == b"/" { - // GNU symbol table (unless we later determine this is COFF). - file.kind = ArchiveKind::Gnu; - file.symbols = member.file_range(); - members_offset = tail; - - if tail < len { - let member = ArchiveMember::parse(data, &mut tail, &[])?; - if member.name == b"/" { - // COFF linker member. - file.kind = ArchiveKind::Coff; - file.symbols = member.file_range(); - members_offset = tail; - - if tail < len { - let member = ArchiveMember::parse(data, &mut tail, &[])?; - if member.name == b"//" { - // COFF names table. - file.names = member.data(data)?; - members_offset = tail; - } - } - } else if member.name == b"//" { - // GNU names table. - file.names = member.data(data)?; - members_offset = tail; - } - } - } else if member.name == b"/SYM64/" { - // GNU 64-bit symbol table. - file.kind = ArchiveKind::Gnu64; - file.symbols = member.file_range(); - members_offset = tail; - - if tail < len { - let member = ArchiveMember::parse(data, &mut tail, &[])?; - if member.name == b"//" { - // GNU names table. - file.names = member.data(data)?; - members_offset = tail; - } - } - } else if member.name == b"//" { - // GNU names table. - file.kind = ArchiveKind::Gnu; - file.names = member.data(data)?; - members_offset = tail; - } else if member.name == b"__.SYMDEF" || member.name == b"__.SYMDEF SORTED" { - // BSD symbol table. - file.kind = ArchiveKind::Bsd; - file.symbols = member.file_range(); - members_offset = tail; - } else if member.name == b"__.SYMDEF_64" || member.name == b"__.SYMDEF_64 SORTED" { - // BSD 64-bit symbol table. - file.kind = ArchiveKind::Bsd64; - file.symbols = member.file_range(); - members_offset = tail; - } else { - // TODO: This could still be a BSD file. We leave this as unknown for now. - } - } - file.members = Members::Common { - offset: members_offset, - end_offset: members_end_offset, - }; - Ok(file) - } - - fn parse_aixbig(data: R) -> read::Result { - let mut tail = 0; - - let file_header = data - .read::(&mut tail) - .read_error("Invalid AIX big archive file header")?; - // Caller already validated this. - debug_assert_eq!(file_header.magic, archive::AIX_BIG_MAGIC); - - let mut file = ArchiveFile { - data, - kind: ArchiveKind::AixBig, - members: Members::AixBig { index: &[] }, - symbols: (0, 0), - names: &[], - }; - - // Read the span of symbol table. - let symtbl64 = parse_u64_digits(&file_header.gst64off, 10) - .read_error("Invalid offset to 64-bit symbol table in AIX big archive")?; - if symtbl64 > 0 { - // The symbol table is also a file with header. - let member = ArchiveMember::parse_aixbig(data, symtbl64)?; - file.symbols = member.file_range(); - } else { - let symtbl = parse_u64_digits(&file_header.gstoff, 10) - .read_error("Invalid offset to symbol table in AIX big archive")?; - if symtbl > 0 { - // The symbol table is also a file with header. - let member = ArchiveMember::parse_aixbig(data, symtbl)?; - file.symbols = member.file_range(); - } - } - - // Big archive member index table lists file entries with offsets and names. - // To avoid potential infinite loop (members are double-linked list), the - // iterator goes through the index instead of real members. - let member_table_offset = parse_u64_digits(&file_header.memoff, 10) - .read_error("Invalid offset for member table of AIX big archive")?; - if member_table_offset == 0 { - // The offset would be zero if archive contains no file. - return Ok(file); - } - - // The member index table is also a file with header. - let member = ArchiveMember::parse_aixbig(data, member_table_offset)?; - let mut member_data = Bytes(member.data(data)?); - - // Structure of member index table: - // Number of entries (20 bytes) - // Offsets of each entry (20*N bytes) - // Names string table (the rest of bytes to fill size defined in header) - let members_count_bytes = member_data - .read_slice::(20) - .read_error("Missing member count in AIX big archive")?; - let members_count = parse_u64_digits(members_count_bytes, 10) - .and_then(|size| size.try_into().ok()) - .read_error("Invalid member count in AIX big archive")?; - let index = member_data - .read_slice::(members_count) - .read_error("Member count overflow in AIX big archive")?; - file.members = Members::AixBig { index }; - - Ok(file) - } - - /// Return the archive format. - #[inline] - pub fn kind(&self) -> ArchiveKind { - self.kind - } - - /// Iterate over the members of the archive. - /// - /// This does not return special members. - #[inline] - pub fn members(&self) -> ArchiveMemberIterator<'data, R> { - ArchiveMemberIterator { - data: self.data, - members: self.members, - names: self.names, - } - } -} - -/// An iterator over the members of an archive. -#[derive(Debug)] -pub struct ArchiveMemberIterator<'data, R: ReadRef<'data> = &'data [u8]> { - data: R, - members: Members<'data>, - names: &'data [u8], -} - -impl<'data, R: ReadRef<'data>> Iterator for ArchiveMemberIterator<'data, R> { - type Item = read::Result>; - - fn next(&mut self) -> Option { - match &mut self.members { - Members::Common { - ref mut offset, - ref mut end_offset, - } => { - if *offset >= *end_offset { - return None; - } - let member = ArchiveMember::parse(self.data, offset, self.names); - if member.is_err() { - *offset = *end_offset; - } - Some(member) - } - Members::AixBig { ref mut index } => match **index { - [] => None, - [ref first, ref rest @ ..] => { - *index = rest; - let member = ArchiveMember::parse_aixbig_index(self.data, first); - if member.is_err() { - *index = &[]; - } - Some(member) - } - }, - } - } -} - -/// An archive member header. -#[derive(Debug, Clone, Copy)] -enum MemberHeader<'data> { - /// Common header used by many formats. - Common(&'data archive::Header), - /// AIX big archive header - AixBig(&'data archive::AixHeader), -} - -/// A partially parsed archive member. -#[derive(Debug)] -pub struct ArchiveMember<'data> { - header: MemberHeader<'data>, - name: &'data [u8], - offset: u64, - size: u64, -} - -impl<'data> ArchiveMember<'data> { - /// Parse the member header, name, and file data in an archive with the common format. - /// - /// This reads the extended name (if any) and adjusts the file size. - fn parse>( - data: R, - offset: &mut u64, - names: &'data [u8], - ) -> read::Result { - let header = data - .read::(offset) - .read_error("Invalid archive member header")?; - if header.terminator != archive::TERMINATOR { - return Err(Error("Invalid archive terminator")); - } - - let mut file_offset = *offset; - let mut file_size = - parse_u64_digits(&header.size, 10).read_error("Invalid archive member size")?; - *offset = offset - .checked_add(file_size) - .read_error("Archive member size is too large")?; - // Entries are padded to an even number of bytes. - if (file_size & 1) != 0 { - *offset = offset.saturating_add(1); - } - - let name = if header.name[0] == b'/' && (header.name[1] as char).is_ascii_digit() { - // Read file name from the names table. - parse_sysv_extended_name(&header.name[1..], names) - .read_error("Invalid archive extended name offset")? - } else if &header.name[..3] == b"#1/" && (header.name[3] as char).is_ascii_digit() { - // Read file name from the start of the file data. - parse_bsd_extended_name(&header.name[3..], data, &mut file_offset, &mut file_size) - .read_error("Invalid archive extended name length")? - } else if header.name[0] == b'/' { - let name_len = memchr::memchr(b' ', &header.name).unwrap_or(header.name.len()); - &header.name[..name_len] - } else { - let name_len = memchr::memchr(b'/', &header.name) - .or_else(|| memchr::memchr(b' ', &header.name)) - .unwrap_or(header.name.len()); - &header.name[..name_len] - }; - - Ok(ArchiveMember { - header: MemberHeader::Common(header), - name, - offset: file_offset, - size: file_size, - }) - } - - /// Parse a member index entry in an AIX big archive, - /// and then parse the member header, name, and file data. - fn parse_aixbig_index>( - data: R, - index: &archive::AixMemberOffset, - ) -> read::Result { - let offset = parse_u64_digits(&index.0, 10) - .read_error("Invalid AIX big archive file member offset")?; - Self::parse_aixbig(data, offset) - } - - /// Parse the member header, name, and file data in an AIX big archive. - fn parse_aixbig>(data: R, mut offset: u64) -> read::Result { - // The format was described at - // https://www.ibm.com/docs/en/aix/7.3?topic=formats-ar-file-format-big - let header = data - .read::(&mut offset) - .read_error("Invalid AIX big archive member header")?; - let name_length = parse_u64_digits(&header.namlen, 10) - .read_error("Invalid AIX big archive member name length")?; - let name = data - .read_bytes(&mut offset, name_length) - .read_error("Invalid AIX big archive member name")?; - - // The actual data for a file member begins at the first even-byte boundary beyond the - // member header and continues for the number of bytes specified by the ar_size field. The - // ar command inserts null bytes for padding where necessary. - if offset & 1 != 0 { - offset = offset.saturating_add(1); - } - // Because of the even-byte boundary, we have to read and check terminator after header. - let terminator = data - .read_bytes(&mut offset, 2) - .read_error("Invalid AIX big archive terminator")?; - if terminator != archive::TERMINATOR { - return Err(Error("Invalid AIX big archive terminator")); - } - - let size = parse_u64_digits(&header.size, 10) - .read_error("Invalid archive member size in AIX big archive")?; - Ok(ArchiveMember { - header: MemberHeader::AixBig(header), - name, - offset, - size, - }) - } - - /// Return the raw header that is common to many archive formats. - /// - /// Returns `None` if this archive does not use the common header format. - #[inline] - pub fn header(&self) -> Option<&'data archive::Header> { - match self.header { - MemberHeader::Common(header) => Some(header), - _ => None, - } - } - - /// Return the raw header for AIX big archives. - /// - /// Returns `None` if this is not an AIX big archive. - #[inline] - pub fn aix_header(&self) -> Option<&'data archive::AixHeader> { - match self.header { - MemberHeader::AixBig(header) => Some(header), - _ => None, - } - } - - /// Return the parsed file name. - /// - /// This may be an extended file name. - #[inline] - pub fn name(&self) -> &'data [u8] { - self.name - } - - /// Parse the file modification timestamp from the header. - #[inline] - pub fn date(&self) -> Option { - match &self.header { - MemberHeader::Common(header) => parse_u64_digits(&header.date, 10), - MemberHeader::AixBig(header) => parse_u64_digits(&header.date, 10), - } - } - - /// Parse the user ID from the header. - #[inline] - pub fn uid(&self) -> Option { - match &self.header { - MemberHeader::Common(header) => parse_u64_digits(&header.uid, 10), - MemberHeader::AixBig(header) => parse_u64_digits(&header.uid, 10), - } - } - - /// Parse the group ID from the header. - #[inline] - pub fn gid(&self) -> Option { - match &self.header { - MemberHeader::Common(header) => parse_u64_digits(&header.gid, 10), - MemberHeader::AixBig(header) => parse_u64_digits(&header.gid, 10), - } - } - - /// Parse the file mode from the header. - #[inline] - pub fn mode(&self) -> Option { - match &self.header { - MemberHeader::Common(header) => parse_u64_digits(&header.mode, 8), - MemberHeader::AixBig(header) => parse_u64_digits(&header.mode, 8), - } - } - - /// Return the offset and size of the file data. - pub fn file_range(&self) -> (u64, u64) { - (self.offset, self.size) - } - - /// Return the file data. - #[inline] - pub fn data>(&self, data: R) -> read::Result<&'data [u8]> { - data.read_bytes_at(self.offset, self.size) - .read_error("Archive member size is too large") - } -} - -// Ignores bytes starting from the first space. -fn parse_u64_digits(digits: &[u8], radix: u32) -> Option { - if let [b' ', ..] = digits { - return None; - } - let mut result: u64 = 0; - for &c in digits { - if c == b' ' { - return Some(result); - } else { - let x = (c as char).to_digit(radix)?; - result = result - .checked_mul(u64::from(radix))? - .checked_add(u64::from(x))?; - } - } - Some(result) -} - -fn parse_sysv_extended_name<'data>(digits: &[u8], names: &'data [u8]) -> Result<&'data [u8], ()> { - let offset = parse_u64_digits(digits, 10).ok_or(())?; - let offset = offset.try_into().map_err(|_| ())?; - let name_data = names.get(offset..).ok_or(())?; - let name = match memchr::memchr2(b'/', b'\0', name_data) { - Some(len) => &name_data[..len], - None => name_data, - }; - Ok(name) -} - -/// Modifies `data` to start after the extended name. -fn parse_bsd_extended_name<'data, R: ReadRef<'data>>( - digits: &[u8], - data: R, - offset: &mut u64, - size: &mut u64, -) -> Result<&'data [u8], ()> { - let len = parse_u64_digits(digits, 10).ok_or(())?; - *size = size.checked_sub(len).ok_or(())?; - let name_data = data.read_bytes(offset, len)?; - let name = match memchr::memchr(b'\0', name_data) { - Some(len) => &name_data[..len], - None => name_data, - }; - Ok(name) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn kind() { - let data = b"!\n"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Unknown); - - let data = b"\ - !\n\ - / 4 `\n\ - 0000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Gnu); - - let data = b"\ - !\n\ - // 4 `\n\ - 0000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Gnu); - - let data = b"\ - !\n\ - / 4 `\n\ - 0000\ - // 4 `\n\ - 0000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Gnu); - - let data = b"\ - !\n\ - /SYM64/ 4 `\n\ - 0000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Gnu64); - - let data = b"\ - !\n\ - /SYM64/ 4 `\n\ - 0000\ - // 4 `\n\ - 0000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Gnu64); - - let data = b"\ - !\n\ - __.SYMDEF 4 `\n\ - 0000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Bsd); - - let data = b"\ - !\n\ - #1/9 13 `\n\ - __.SYMDEF0000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Bsd); - - let data = b"\ - !\n\ - #1/16 20 `\n\ - __.SYMDEF SORTED0000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Bsd); - - let data = b"\ - !\n\ - __.SYMDEF_64 4 `\n\ - 0000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Bsd64); - - let data = b"\ - !\n\ - #1/12 16 `\n\ - __.SYMDEF_640000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Bsd64); - - let data = b"\ - !\n\ - #1/19 23 `\n\ - __.SYMDEF_64 SORTED0000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Bsd64); - - let data = b"\ - !\n\ - / 4 `\n\ - 0000\ - / 4 `\n\ - 0000\ - // 4 `\n\ - 0000"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Coff); - - let data = b"\ - \n\ - 0 0 \ - 0 0 \ - 0 128 \ - 6 0 \ - 0 \0\0\0\0\0\0\0\0\0\0\0\0\ - \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ - \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ - \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; - let archive = ArchiveFile::parse(&data[..]).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::AixBig); - } - - #[test] - fn gnu_names() { - let data = b"\ - !\n\ - // 18 `\n\ - 0123456789abcdef/\n\ - s p a c e/ 0 0 0 644 4 `\n\ - 0000\ - 0123456789abcde/0 0 0 644 3 `\n\ - odd\n\ - /0 0 0 0 644 4 `\n\ - even"; - let data = &data[..]; - let archive = ArchiveFile::parse(data).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Gnu); - let mut members = archive.members(); - - let member = members.next().unwrap().unwrap(); - assert_eq!(member.name(), b"s p a c e"); - assert_eq!(member.data(data).unwrap(), &b"0000"[..]); - - let member = members.next().unwrap().unwrap(); - assert_eq!(member.name(), b"0123456789abcde"); - assert_eq!(member.data(data).unwrap(), &b"odd"[..]); - - let member = members.next().unwrap().unwrap(); - assert_eq!(member.name(), b"0123456789abcdef"); - assert_eq!(member.data(data).unwrap(), &b"even"[..]); - - assert!(members.next().is_none()); - } - - #[test] - fn bsd_names() { - let data = b"\ - !\n\ - 0123456789abcde 0 0 0 644 3 `\n\ - odd\n\ - #1/16 0 0 0 644 20 `\n\ - 0123456789abcdefeven"; - let data = &data[..]; - let archive = ArchiveFile::parse(data).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::Unknown); - let mut members = archive.members(); - - let member = members.next().unwrap().unwrap(); - assert_eq!(member.name(), b"0123456789abcde"); - assert_eq!(member.data(data).unwrap(), &b"odd"[..]); - - let member = members.next().unwrap().unwrap(); - assert_eq!(member.name(), b"0123456789abcdef"); - assert_eq!(member.data(data).unwrap(), &b"even"[..]); - - assert!(members.next().is_none()); - } - - #[test] - fn aix_names() { - let data = b"\ - \n\ - 396 0 0 \ - 128 262 0 \ - 4 262 0 \ - 1662610370 223 1 644 16 \ - 0123456789abcdef`\nord\n\ - 4 396 128 \ - 1662610374 223 1 644 16 \ - fedcba9876543210`\nrev\n\ - 94 0 262 \ - 0 0 0 0 0 \ - `\n2 128 \ - 262 0123456789abcdef\0fedcba9876543210\0"; - let data = &data[..]; - let archive = ArchiveFile::parse(data).unwrap(); - assert_eq!(archive.kind(), ArchiveKind::AixBig); - let mut members = archive.members(); - - let member = members.next().unwrap().unwrap(); - assert_eq!(member.name(), b"0123456789abcdef"); - assert_eq!(member.data(data).unwrap(), &b"ord\n"[..]); - - let member = members.next().unwrap().unwrap(); - assert_eq!(member.name(), b"fedcba9876543210"); - assert_eq!(member.data(data).unwrap(), &b"rev\n"[..]); - - assert!(members.next().is_none()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/coff/comdat.rs s390-tools-2.33.1/rust-vendor/object/src/read/coff/comdat.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/coff/comdat.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/coff/comdat.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,207 +0,0 @@ -use core::str; - -use crate::endian::LittleEndian as LE; -use crate::pe; -use crate::read::{ - self, ComdatKind, ObjectComdat, ReadError, ReadRef, Result, SectionIndex, SymbolIndex, -}; - -use super::{CoffFile, CoffHeader, ImageSymbol}; - -/// An iterator over the COMDAT section groups of a `CoffBigFile`. -pub type CoffBigComdatIterator<'data, 'file, R = &'data [u8]> = - CoffComdatIterator<'data, 'file, R, pe::AnonObjectHeaderBigobj>; - -/// An iterator over the COMDAT section groups of a `CoffFile`. -#[derive(Debug)] -pub struct CoffComdatIterator< - 'data, - 'file, - R: ReadRef<'data> = &'data [u8], - Coff: CoffHeader = pe::ImageFileHeader, -> { - pub(super) file: &'file CoffFile<'data, R, Coff>, - pub(super) index: usize, -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> Iterator - for CoffComdatIterator<'data, 'file, R, Coff> -{ - type Item = CoffComdat<'data, 'file, R, Coff>; - - fn next(&mut self) -> Option { - loop { - let index = self.index; - let symbol = self.file.common.symbols.symbol(index).ok()?; - self.index += 1 + symbol.number_of_aux_symbols() as usize; - if let Some(comdat) = CoffComdat::parse(self.file, symbol, index) { - return Some(comdat); - } - } - } -} - -/// A COMDAT section group of a `CoffBigFile`. -pub type CoffBigComdat<'data, 'file, R = &'data [u8]> = - CoffComdat<'data, 'file, R, pe::AnonObjectHeaderBigobj>; - -/// A COMDAT section group of a `CoffFile`. -#[derive(Debug)] -pub struct CoffComdat< - 'data, - 'file, - R: ReadRef<'data> = &'data [u8], - Coff: CoffHeader = pe::ImageFileHeader, -> { - file: &'file CoffFile<'data, R, Coff>, - symbol_index: SymbolIndex, - symbol: &'data Coff::ImageSymbol, - selection: u8, -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> CoffComdat<'data, 'file, R, Coff> { - fn parse( - file: &'file CoffFile<'data, R, Coff>, - section_symbol: &'data Coff::ImageSymbol, - index: usize, - ) -> Option> { - // Must be a section symbol. - if !section_symbol.has_aux_section() { - return None; - } - - // Auxiliary record must have a non-associative selection. - let aux = file.common.symbols.aux_section(index).ok()?; - let selection = aux.selection; - if selection == 0 || selection == pe::IMAGE_COMDAT_SELECT_ASSOCIATIVE { - return None; - } - - // Find the COMDAT symbol. - let mut symbol_index = index; - let mut symbol = section_symbol; - let section_number = section_symbol.section_number(); - loop { - symbol_index += 1 + symbol.number_of_aux_symbols() as usize; - symbol = file.common.symbols.symbol(symbol_index).ok()?; - if section_number == symbol.section_number() { - break; - } - } - - Some(CoffComdat { - file, - symbol_index: SymbolIndex(symbol_index), - symbol, - selection, - }) - } -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> read::private::Sealed - for CoffComdat<'data, 'file, R, Coff> -{ -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> ObjectComdat<'data> - for CoffComdat<'data, 'file, R, Coff> -{ - type SectionIterator = CoffComdatSectionIterator<'data, 'file, R, Coff>; - - #[inline] - fn kind(&self) -> ComdatKind { - match self.selection { - pe::IMAGE_COMDAT_SELECT_NODUPLICATES => ComdatKind::NoDuplicates, - pe::IMAGE_COMDAT_SELECT_ANY => ComdatKind::Any, - pe::IMAGE_COMDAT_SELECT_SAME_SIZE => ComdatKind::SameSize, - pe::IMAGE_COMDAT_SELECT_EXACT_MATCH => ComdatKind::ExactMatch, - pe::IMAGE_COMDAT_SELECT_LARGEST => ComdatKind::Largest, - pe::IMAGE_COMDAT_SELECT_NEWEST => ComdatKind::Newest, - _ => ComdatKind::Unknown, - } - } - - #[inline] - fn symbol(&self) -> SymbolIndex { - self.symbol_index - } - - #[inline] - fn name_bytes(&self) -> Result<&[u8]> { - // Find the name of first symbol referring to the section. - self.symbol.name(self.file.common.symbols.strings()) - } - - #[inline] - fn name(&self) -> Result<&str> { - let bytes = self.name_bytes()?; - str::from_utf8(bytes) - .ok() - .read_error("Non UTF-8 COFF COMDAT name") - } - - #[inline] - fn sections(&self) -> Self::SectionIterator { - CoffComdatSectionIterator { - file: self.file, - section_number: self.symbol.section_number(), - index: 0, - } - } -} - -/// An iterator over the sections in a COMDAT section group of a `CoffBigFile`. -pub type CoffBigComdatSectionIterator<'data, 'file, R = &'data [u8]> = - CoffComdatSectionIterator<'data, 'file, R, pe::AnonObjectHeaderBigobj>; - -/// An iterator over the sections in a COMDAT section group of a `CoffFile`. -#[derive(Debug)] -pub struct CoffComdatSectionIterator< - 'data, - 'file, - R: ReadRef<'data> = &'data [u8], - Coff: CoffHeader = pe::ImageFileHeader, -> { - file: &'file CoffFile<'data, R, Coff>, - section_number: i32, - index: usize, -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> Iterator - for CoffComdatSectionIterator<'data, 'file, R, Coff> -{ - type Item = SectionIndex; - - fn next(&mut self) -> Option { - // Find associated COMDAT symbols. - // TODO: it seems gcc doesn't use associated symbols for this - loop { - let index = self.index; - let symbol = self.file.common.symbols.symbol(index).ok()?; - self.index += 1 + symbol.number_of_aux_symbols() as usize; - - // Must be a section symbol. - if !symbol.has_aux_section() { - continue; - } - - let section_number = symbol.section_number(); - - let aux = self.file.common.symbols.aux_section(index).ok()?; - if aux.selection == pe::IMAGE_COMDAT_SELECT_ASSOCIATIVE { - let number = if Coff::is_type_bigobj() { - u32::from(aux.number.get(LE)) | (u32::from(aux.high_number.get(LE)) << 16) - } else { - u32::from(aux.number.get(LE)) - }; - if number as i32 == self.section_number { - return Some(SectionIndex(section_number as usize)); - } - } else if aux.selection != 0 { - if section_number == self.section_number { - return Some(SectionIndex(section_number as usize)); - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/coff/file.rs s390-tools-2.33.1/rust-vendor/object/src/read/coff/file.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/coff/file.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/coff/file.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,364 +0,0 @@ -use alloc::vec::Vec; -use core::fmt::Debug; - -use crate::read::{ - self, Architecture, Export, FileFlags, Import, NoDynamicRelocationIterator, Object, ObjectKind, - ObjectSection, ReadError, ReadRef, Result, SectionIndex, SymbolIndex, -}; -use crate::{pe, LittleEndian as LE, Pod}; - -use super::{ - CoffComdat, CoffComdatIterator, CoffSection, CoffSectionIterator, CoffSegment, - CoffSegmentIterator, CoffSymbol, CoffSymbolIterator, CoffSymbolTable, ImageSymbol, - SectionTable, SymbolTable, -}; - -/// The common parts of `PeFile` and `CoffFile`. -#[derive(Debug)] -pub(crate) struct CoffCommon<'data, R: ReadRef<'data>, Coff: CoffHeader = pe::ImageFileHeader> { - pub(crate) sections: SectionTable<'data>, - pub(crate) symbols: SymbolTable<'data, R, Coff>, - pub(crate) image_base: u64, -} - -/// A COFF bigobj object file with 32-bit section numbers. -pub type CoffBigFile<'data, R = &'data [u8]> = CoffFile<'data, R, pe::AnonObjectHeaderBigobj>; - -/// A COFF object file. -#[derive(Debug)] -pub struct CoffFile<'data, R: ReadRef<'data> = &'data [u8], Coff: CoffHeader = pe::ImageFileHeader> -{ - pub(super) header: &'data Coff, - pub(super) common: CoffCommon<'data, R, Coff>, - pub(super) data: R, -} - -impl<'data, R: ReadRef<'data>, Coff: CoffHeader> CoffFile<'data, R, Coff> { - /// Parse the raw COFF file data. - pub fn parse(data: R) -> Result { - let mut offset = 0; - let header = Coff::parse(data, &mut offset)?; - let sections = header.sections(data, offset)?; - let symbols = header.symbols(data)?; - - Ok(CoffFile { - header, - common: CoffCommon { - sections, - symbols, - image_base: 0, - }, - data, - }) - } -} - -impl<'data, R: ReadRef<'data>, Coff: CoffHeader> read::private::Sealed - for CoffFile<'data, R, Coff> -{ -} - -impl<'data, 'file, R, Coff> Object<'data, 'file> for CoffFile<'data, R, Coff> -where - 'data: 'file, - R: 'file + ReadRef<'data>, - Coff: CoffHeader, -{ - type Segment = CoffSegment<'data, 'file, R, Coff>; - type SegmentIterator = CoffSegmentIterator<'data, 'file, R, Coff>; - type Section = CoffSection<'data, 'file, R, Coff>; - type SectionIterator = CoffSectionIterator<'data, 'file, R, Coff>; - type Comdat = CoffComdat<'data, 'file, R, Coff>; - type ComdatIterator = CoffComdatIterator<'data, 'file, R, Coff>; - type Symbol = CoffSymbol<'data, 'file, R, Coff>; - type SymbolIterator = CoffSymbolIterator<'data, 'file, R, Coff>; - type SymbolTable = CoffSymbolTable<'data, 'file, R, Coff>; - type DynamicRelocationIterator = NoDynamicRelocationIterator; - - fn architecture(&self) -> Architecture { - match self.header.machine() { - pe::IMAGE_FILE_MACHINE_ARMNT => Architecture::Arm, - pe::IMAGE_FILE_MACHINE_ARM64 => Architecture::Aarch64, - pe::IMAGE_FILE_MACHINE_I386 => Architecture::I386, - pe::IMAGE_FILE_MACHINE_AMD64 => Architecture::X86_64, - _ => Architecture::Unknown, - } - } - - #[inline] - fn is_little_endian(&self) -> bool { - true - } - - #[inline] - fn is_64(&self) -> bool { - // Windows COFF is always 32-bit, even for 64-bit architectures. This could be confusing. - false - } - - fn kind(&self) -> ObjectKind { - ObjectKind::Relocatable - } - - fn segments(&'file self) -> CoffSegmentIterator<'data, 'file, R, Coff> { - CoffSegmentIterator { - file: self, - iter: self.common.sections.iter(), - } - } - - fn section_by_name_bytes( - &'file self, - section_name: &[u8], - ) -> Option> { - self.sections() - .find(|section| section.name_bytes() == Ok(section_name)) - } - - fn section_by_index( - &'file self, - index: SectionIndex, - ) -> Result> { - let section = self.common.sections.section(index.0)?; - Ok(CoffSection { - file: self, - index, - section, - }) - } - - fn sections(&'file self) -> CoffSectionIterator<'data, 'file, R, Coff> { - CoffSectionIterator { - file: self, - iter: self.common.sections.iter().enumerate(), - } - } - - fn comdats(&'file self) -> CoffComdatIterator<'data, 'file, R, Coff> { - CoffComdatIterator { - file: self, - index: 0, - } - } - - fn symbol_by_index( - &'file self, - index: SymbolIndex, - ) -> Result> { - let symbol = self.common.symbols.symbol(index.0)?; - Ok(CoffSymbol { - file: &self.common, - index, - symbol, - }) - } - - fn symbols(&'file self) -> CoffSymbolIterator<'data, 'file, R, Coff> { - CoffSymbolIterator { - file: &self.common, - index: 0, - } - } - - #[inline] - fn symbol_table(&'file self) -> Option> { - Some(CoffSymbolTable { file: &self.common }) - } - - fn dynamic_symbols(&'file self) -> CoffSymbolIterator<'data, 'file, R, Coff> { - CoffSymbolIterator { - file: &self.common, - // Hack: don't return any. - index: self.common.symbols.len(), - } - } - - #[inline] - fn dynamic_symbol_table(&'file self) -> Option> { - None - } - - #[inline] - fn dynamic_relocations(&'file self) -> Option { - None - } - - #[inline] - fn imports(&self) -> Result>> { - // TODO: this could return undefined symbols, but not needed yet. - Ok(Vec::new()) - } - - #[inline] - fn exports(&self) -> Result>> { - // TODO: this could return global symbols, but not needed yet. - Ok(Vec::new()) - } - - fn has_debug_symbols(&self) -> bool { - self.section_by_name(".debug_info").is_some() - } - - fn relative_address_base(&self) -> u64 { - 0 - } - - #[inline] - fn entry(&self) -> u64 { - 0 - } - - fn flags(&self) -> FileFlags { - FileFlags::Coff { - characteristics: self.header.characteristics(), - } - } -} - -/// Read the `class_id` field from an anon object header. -/// -/// This can be used to determine the format of the header. -pub fn anon_object_class_id<'data, R: ReadRef<'data>>(data: R) -> Result { - let header = data - .read_at::(0) - .read_error("Invalid anon object header size or alignment")?; - Ok(header.class_id) -} - -/// A trait for generic access to `ImageFileHeader` and `AnonObjectHeaderBigobj`. -#[allow(missing_docs)] -pub trait CoffHeader: Debug + Pod { - type ImageSymbol: ImageSymbol; - type ImageSymbolBytes: Debug + Pod; - - /// Return true if this type is `AnonObjectHeaderBigobj`. - /// - /// This is a property of the type, not a value in the header data. - fn is_type_bigobj() -> bool; - - fn machine(&self) -> u16; - fn number_of_sections(&self) -> u32; - fn pointer_to_symbol_table(&self) -> u32; - fn number_of_symbols(&self) -> u32; - fn characteristics(&self) -> u16; - - /// Read the file header. - /// - /// `data` must be the entire file data. - /// `offset` must be the file header offset. It is updated to point after the optional header, - /// which is where the section headers are located. - fn parse<'data, R: ReadRef<'data>>(data: R, offset: &mut u64) -> read::Result<&'data Self>; - - /// Read the section table. - /// - /// `data` must be the entire file data. - /// `offset` must be after the optional file header. - #[inline] - fn sections<'data, R: ReadRef<'data>>( - &self, - data: R, - offset: u64, - ) -> read::Result> { - SectionTable::parse(self, data, offset) - } - - /// Read the symbol table and string table. - /// - /// `data` must be the entire file data. - #[inline] - fn symbols<'data, R: ReadRef<'data>>( - &self, - data: R, - ) -> read::Result> { - SymbolTable::parse(self, data) - } -} - -impl CoffHeader for pe::ImageFileHeader { - type ImageSymbol = pe::ImageSymbol; - type ImageSymbolBytes = pe::ImageSymbolBytes; - - fn is_type_bigobj() -> bool { - false - } - - fn machine(&self) -> u16 { - self.machine.get(LE) - } - - fn number_of_sections(&self) -> u32 { - self.number_of_sections.get(LE).into() - } - - fn pointer_to_symbol_table(&self) -> u32 { - self.pointer_to_symbol_table.get(LE) - } - - fn number_of_symbols(&self) -> u32 { - self.number_of_symbols.get(LE) - } - - fn characteristics(&self) -> u16 { - self.characteristics.get(LE) - } - - fn parse<'data, R: ReadRef<'data>>(data: R, offset: &mut u64) -> read::Result<&'data Self> { - let header = data - .read::(offset) - .read_error("Invalid COFF file header size or alignment")?; - - // Skip over the optional header. - *offset = offset - .checked_add(header.size_of_optional_header.get(LE).into()) - .read_error("Invalid COFF optional header size")?; - - // TODO: maybe validate that the machine is known? - Ok(header) - } -} - -impl CoffHeader for pe::AnonObjectHeaderBigobj { - type ImageSymbol = pe::ImageSymbolEx; - type ImageSymbolBytes = pe::ImageSymbolExBytes; - - fn is_type_bigobj() -> bool { - true - } - - fn machine(&self) -> u16 { - self.machine.get(LE) - } - - fn number_of_sections(&self) -> u32 { - self.number_of_sections.get(LE) - } - - fn pointer_to_symbol_table(&self) -> u32 { - self.pointer_to_symbol_table.get(LE) - } - - fn number_of_symbols(&self) -> u32 { - self.number_of_symbols.get(LE) - } - - fn characteristics(&self) -> u16 { - 0 - } - - fn parse<'data, R: ReadRef<'data>>(data: R, offset: &mut u64) -> read::Result<&'data Self> { - let header = data - .read::(offset) - .read_error("Invalid COFF bigobj file header size or alignment")?; - - if header.sig1.get(LE) != pe::IMAGE_FILE_MACHINE_UNKNOWN - || header.sig2.get(LE) != 0xffff - || header.version.get(LE) < 2 - || header.class_id != pe::ANON_OBJECT_HEADER_BIGOBJ_CLASS_ID - { - return Err(read::Error("Invalid COFF bigobj header values")); - } - - // TODO: maybe validate that the machine is known? - Ok(header) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/coff/import.rs s390-tools-2.33.1/rust-vendor/object/src/read/coff/import.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/coff/import.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/coff/import.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,209 +0,0 @@ -//! Support for reading short import files. -//! -//! These are used by some Windows linkers as a more compact way to describe -//! dynamically imported symbols. - -use crate::read::{Architecture, Error, ReadError, ReadRef, Result}; -use crate::{pe, ByteString, Bytes, LittleEndian as LE}; - -/// A Windows short form description of a symbol to import. -/// -/// Used in Windows import libraries to provide a mapping from -/// a symbol name to a DLL export. This is not an object file. -#[derive(Debug, Clone)] -pub struct ImportFile<'data> { - header: &'data pe::ImportObjectHeader, - kind: ImportType, - dll: ByteString<'data>, - symbol: ByteString<'data>, - import: Option>, -} - -impl<'data> ImportFile<'data> { - /// Parse it. - pub fn parse>(data: R) -> Result { - let mut offset = 0; - let header = pe::ImportObjectHeader::parse(data, &mut offset)?; - let data = header.parse_data(data, &mut offset)?; - - // Unmangles a name by removing a `?`, `@` or `_` prefix. - fn strip_prefix(s: &[u8]) -> &[u8] { - match s.split_first() { - Some((b, rest)) if [b'?', b'@', b'_'].contains(b) => rest, - _ => s, - } - } - Ok(Self { - header, - dll: data.dll, - symbol: data.symbol, - kind: match header.import_type() { - pe::IMPORT_OBJECT_CODE => ImportType::Code, - pe::IMPORT_OBJECT_DATA => ImportType::Data, - pe::IMPORT_OBJECT_CONST => ImportType::Const, - _ => return Err(Error("Invalid COFF import library import type")), - }, - import: match header.name_type() { - pe::IMPORT_OBJECT_ORDINAL => None, - pe::IMPORT_OBJECT_NAME => Some(data.symbol()), - pe::IMPORT_OBJECT_NAME_NO_PREFIX => Some(strip_prefix(data.symbol())), - pe::IMPORT_OBJECT_NAME_UNDECORATE => Some( - strip_prefix(data.symbol()) - .split(|&b| b == b'@') - .next() - .unwrap(), - ), - pe::IMPORT_OBJECT_NAME_EXPORTAS => data.export(), - _ => return Err(Error("Unknown COFF import library name type")), - } - .map(ByteString), - }) - } - - /// Get the machine type. - pub fn architecture(&self) -> Architecture { - match self.header.machine.get(LE) { - pe::IMAGE_FILE_MACHINE_ARMNT => Architecture::Arm, - pe::IMAGE_FILE_MACHINE_ARM64 => Architecture::Aarch64, - pe::IMAGE_FILE_MACHINE_I386 => Architecture::I386, - pe::IMAGE_FILE_MACHINE_AMD64 => Architecture::X86_64, - _ => Architecture::Unknown, - } - } - - /// The public symbol name. - pub fn symbol(&self) -> &'data [u8] { - self.symbol.0 - } - - /// The name of the DLL to import the symbol from. - pub fn dll(&self) -> &'data [u8] { - self.dll.0 - } - - /// The name exported from the DLL. - pub fn import(&self) -> ImportName<'data> { - match self.import { - Some(name) => ImportName::Name(name.0), - None => ImportName::Ordinal(self.header.ordinal_or_hint.get(LE)), - } - } - - /// The type of import. Usually either a function or data. - pub fn import_type(&self) -> ImportType { - self.kind - } -} - -/// The name or ordinal to import from a DLL. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum ImportName<'data> { - /// Import by ordinal. Ordinarily this is a 1-based index. - Ordinal(u16), - /// Import by name. - Name(&'data [u8]), -} - -/// The kind of import symbol. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub enum ImportType { - /// An executable code symbol. - Code, - /// A data symbol. - Data, - /// A constant value. - Const, -} - -impl pe::ImportObjectHeader { - /// Read the short import header. - /// - /// Also checks that the signature and version are valid. - /// Directly following this header will be the string data. - pub fn parse<'data, R: ReadRef<'data>>(data: R, offset: &mut u64) -> Result<&'data Self> { - let header = data - .read::(offset) - .read_error("Invalid COFF import library header size")?; - if header.sig1.get(LE) != 0 || header.sig2.get(LE) != pe::IMPORT_OBJECT_HDR_SIG2 { - Err(Error("Invalid COFF import library header")) - } else if header.version.get(LE) != 0 { - Err(Error("Unknown COFF import library header version")) - } else { - Ok(header) - } - } - - /// Parse the data following the header. - pub fn parse_data<'data, R: ReadRef<'data>>( - &self, - data: R, - offset: &mut u64, - ) -> Result> { - let mut data = Bytes( - data.read_bytes(offset, u64::from(self.size_of_data.get(LE))) - .read_error("Invalid COFF import library data size")?, - ); - let symbol = data - .read_string() - .map(ByteString) - .read_error("Could not read COFF import library symbol name")?; - let dll = data - .read_string() - .map(ByteString) - .read_error("Could not read COFF import library DLL name")?; - let export = if self.name_type() == pe::IMPORT_OBJECT_NAME_EXPORTAS { - data.read_string() - .map(ByteString) - .map(Some) - .read_error("Could not read COFF import library export name")? - } else { - None - }; - Ok(ImportObjectData { - symbol, - dll, - export, - }) - } - - /// The type of import. - /// - /// This is one of the `IMPORT_OBJECT_*` constants. - pub fn import_type(&self) -> u16 { - self.name_type.get(LE) & pe::IMPORT_OBJECT_TYPE_MASK - } - - /// The type of import name. - /// - /// This is one of the `IMPORT_OBJECT_*` constants. - pub fn name_type(&self) -> u16 { - (self.name_type.get(LE) >> pe::IMPORT_OBJECT_NAME_SHIFT) & pe::IMPORT_OBJECT_NAME_MASK - } -} - -/// The data following `ImportObjectHeader`. -#[derive(Debug, Clone)] -pub struct ImportObjectData<'data> { - symbol: ByteString<'data>, - dll: ByteString<'data>, - export: Option>, -} - -impl<'data> ImportObjectData<'data> { - /// The public symbol name. - pub fn symbol(&self) -> &'data [u8] { - self.symbol.0 - } - - /// The name of the DLL to import the symbol from. - pub fn dll(&self) -> &'data [u8] { - self.dll.0 - } - - /// The name exported from the DLL. - /// - /// This is only set if the name is not derived from the symbol name. - pub fn export(&self) -> Option<&'data [u8]> { - self.export.map(|export| export.0) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/coff/mod.rs s390-tools-2.33.1/rust-vendor/object/src/read/coff/mod.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/coff/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/coff/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,21 +0,0 @@ -//! Support for reading Windows COFF files. -//! -//! Provides `CoffFile` and related types which implement the `Object` trait. - -mod file; -pub use file::*; - -mod section; -pub use section::*; - -mod symbol; -pub use symbol::*; - -mod relocation; -pub use relocation::*; - -mod comdat; -pub use comdat::*; - -mod import; -pub use import::*; diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/coff/relocation.rs s390-tools-2.33.1/rust-vendor/object/src/read/coff/relocation.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/coff/relocation.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/coff/relocation.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,104 +0,0 @@ -use alloc::fmt; -use core::slice; - -use crate::endian::LittleEndian as LE; -use crate::pe; -use crate::read::{ - ReadRef, Relocation, RelocationEncoding, RelocationKind, RelocationTarget, SymbolIndex, -}; - -use super::{CoffFile, CoffHeader}; - -/// An iterator over the relocations in a `CoffBigSection`. -pub type CoffBigRelocationIterator<'data, 'file, R = &'data [u8]> = - CoffRelocationIterator<'data, 'file, R, pe::AnonObjectHeaderBigobj>; - -/// An iterator over the relocations in a `CoffSection`. -pub struct CoffRelocationIterator< - 'data, - 'file, - R: ReadRef<'data> = &'data [u8], - Coff: CoffHeader = pe::ImageFileHeader, -> { - pub(super) file: &'file CoffFile<'data, R, Coff>, - pub(super) iter: slice::Iter<'data, pe::ImageRelocation>, -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> Iterator - for CoffRelocationIterator<'data, 'file, R, Coff> -{ - type Item = (u64, Relocation); - - fn next(&mut self) -> Option { - self.iter.next().map(|relocation| { - let (kind, size, addend) = match self.file.header.machine() { - pe::IMAGE_FILE_MACHINE_ARMNT => match relocation.typ.get(LE) { - pe::IMAGE_REL_ARM_ADDR32 => (RelocationKind::Absolute, 32, 0), - pe::IMAGE_REL_ARM_ADDR32NB => (RelocationKind::ImageOffset, 32, 0), - pe::IMAGE_REL_ARM_REL32 => (RelocationKind::Relative, 32, -4), - pe::IMAGE_REL_ARM_SECTION => (RelocationKind::SectionIndex, 16, 0), - pe::IMAGE_REL_ARM_SECREL => (RelocationKind::SectionOffset, 32, 0), - typ => (RelocationKind::Coff(typ), 0, 0), - }, - pe::IMAGE_FILE_MACHINE_ARM64 => match relocation.typ.get(LE) { - pe::IMAGE_REL_ARM64_ADDR32 => (RelocationKind::Absolute, 32, 0), - pe::IMAGE_REL_ARM64_ADDR32NB => (RelocationKind::ImageOffset, 32, 0), - pe::IMAGE_REL_ARM64_SECREL => (RelocationKind::SectionOffset, 32, 0), - pe::IMAGE_REL_ARM64_SECTION => (RelocationKind::SectionIndex, 16, 0), - pe::IMAGE_REL_ARM64_ADDR64 => (RelocationKind::Absolute, 64, 0), - pe::IMAGE_REL_ARM64_REL32 => (RelocationKind::Relative, 32, -4), - typ => (RelocationKind::Coff(typ), 0, 0), - }, - pe::IMAGE_FILE_MACHINE_I386 => match relocation.typ.get(LE) { - pe::IMAGE_REL_I386_DIR16 => (RelocationKind::Absolute, 16, 0), - pe::IMAGE_REL_I386_REL16 => (RelocationKind::Relative, 16, 0), - pe::IMAGE_REL_I386_DIR32 => (RelocationKind::Absolute, 32, 0), - pe::IMAGE_REL_I386_DIR32NB => (RelocationKind::ImageOffset, 32, 0), - pe::IMAGE_REL_I386_SECTION => (RelocationKind::SectionIndex, 16, 0), - pe::IMAGE_REL_I386_SECREL => (RelocationKind::SectionOffset, 32, 0), - pe::IMAGE_REL_I386_SECREL7 => (RelocationKind::SectionOffset, 7, 0), - pe::IMAGE_REL_I386_REL32 => (RelocationKind::Relative, 32, -4), - typ => (RelocationKind::Coff(typ), 0, 0), - }, - pe::IMAGE_FILE_MACHINE_AMD64 => match relocation.typ.get(LE) { - pe::IMAGE_REL_AMD64_ADDR64 => (RelocationKind::Absolute, 64, 0), - pe::IMAGE_REL_AMD64_ADDR32 => (RelocationKind::Absolute, 32, 0), - pe::IMAGE_REL_AMD64_ADDR32NB => (RelocationKind::ImageOffset, 32, 0), - pe::IMAGE_REL_AMD64_REL32 => (RelocationKind::Relative, 32, -4), - pe::IMAGE_REL_AMD64_REL32_1 => (RelocationKind::Relative, 32, -5), - pe::IMAGE_REL_AMD64_REL32_2 => (RelocationKind::Relative, 32, -6), - pe::IMAGE_REL_AMD64_REL32_3 => (RelocationKind::Relative, 32, -7), - pe::IMAGE_REL_AMD64_REL32_4 => (RelocationKind::Relative, 32, -8), - pe::IMAGE_REL_AMD64_REL32_5 => (RelocationKind::Relative, 32, -9), - pe::IMAGE_REL_AMD64_SECTION => (RelocationKind::SectionIndex, 16, 0), - pe::IMAGE_REL_AMD64_SECREL => (RelocationKind::SectionOffset, 32, 0), - pe::IMAGE_REL_AMD64_SECREL7 => (RelocationKind::SectionOffset, 7, 0), - typ => (RelocationKind::Coff(typ), 0, 0), - }, - _ => (RelocationKind::Coff(relocation.typ.get(LE)), 0, 0), - }; - let target = RelocationTarget::Symbol(SymbolIndex( - relocation.symbol_table_index.get(LE) as usize, - )); - ( - u64::from(relocation.virtual_address.get(LE)), - Relocation { - kind, - encoding: RelocationEncoding::Generic, - size, - target, - addend, - implicit_addend: true, - }, - ) - }) - } -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> fmt::Debug - for CoffRelocationIterator<'data, 'file, R, Coff> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("CoffRelocationIterator").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/coff/section.rs s390-tools-2.33.1/rust-vendor/object/src/read/coff/section.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/coff/section.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/coff/section.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,574 +0,0 @@ -use core::convert::TryFrom; -use core::{iter, result, slice, str}; - -use crate::endian::LittleEndian as LE; -use crate::pe; -use crate::read::util::StringTable; -use crate::read::{ - self, CompressedData, CompressedFileRange, Error, ObjectSection, ObjectSegment, ReadError, - ReadRef, Result, SectionFlags, SectionIndex, SectionKind, SegmentFlags, -}; - -use super::{CoffFile, CoffHeader, CoffRelocationIterator}; - -/// The table of section headers in a COFF or PE file. -#[derive(Debug, Default, Clone, Copy)] -pub struct SectionTable<'data> { - sections: &'data [pe::ImageSectionHeader], -} - -impl<'data> SectionTable<'data> { - /// Parse the section table. - /// - /// `data` must be the entire file data. - /// `offset` must be after the optional file header. - pub fn parse>( - header: &Coff, - data: R, - offset: u64, - ) -> Result { - let sections = data - .read_slice_at(offset, header.number_of_sections() as usize) - .read_error("Invalid COFF/PE section headers")?; - Ok(SectionTable { sections }) - } - - /// Iterate over the section headers. - /// - /// Warning: sections indices start at 1. - #[inline] - pub fn iter(&self) -> slice::Iter<'data, pe::ImageSectionHeader> { - self.sections.iter() - } - - /// Return true if the section table is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.sections.is_empty() - } - - /// The number of section headers. - #[inline] - pub fn len(&self) -> usize { - self.sections.len() - } - - /// Return the section header at the given index. - /// - /// The index is 1-based. - pub fn section(&self, index: usize) -> read::Result<&'data pe::ImageSectionHeader> { - self.sections - .get(index.wrapping_sub(1)) - .read_error("Invalid COFF/PE section index") - } - - /// Return the section header with the given name. - /// - /// The returned index is 1-based. - /// - /// Ignores sections with invalid names. - pub fn section_by_name>( - &self, - strings: StringTable<'data, R>, - name: &[u8], - ) -> Option<(usize, &'data pe::ImageSectionHeader)> { - self.sections - .iter() - .enumerate() - .find(|(_, section)| section.name(strings) == Ok(name)) - .map(|(index, section)| (index + 1, section)) - } - - /// Compute the maximum file offset used by sections. - /// - /// This will usually match the end of file, unless the PE file has a - /// [data overlay](https://security.stackexchange.com/questions/77336/how-is-the-file-overlay-read-by-an-exe-virus) - pub fn max_section_file_offset(&self) -> u64 { - let mut max = 0; - for section in self.iter() { - match (section.pointer_to_raw_data.get(LE) as u64) - .checked_add(section.size_of_raw_data.get(LE) as u64) - { - None => { - // This cannot happen, we're suming two u32 into a u64 - continue; - } - Some(end_of_section) => { - if end_of_section > max { - max = end_of_section; - } - } - } - } - max - } -} - -/// An iterator over the loadable sections of a `CoffBigFile`. -pub type CoffBigSegmentIterator<'data, 'file, R = &'data [u8]> = - CoffSegmentIterator<'data, 'file, R, pe::AnonObjectHeaderBigobj>; - -/// An iterator over the loadable sections of a `CoffFile`. -#[derive(Debug)] -pub struct CoffSegmentIterator< - 'data, - 'file, - R: ReadRef<'data> = &'data [u8], - Coff: CoffHeader = pe::ImageFileHeader, -> { - pub(super) file: &'file CoffFile<'data, R, Coff>, - pub(super) iter: slice::Iter<'data, pe::ImageSectionHeader>, -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> Iterator - for CoffSegmentIterator<'data, 'file, R, Coff> -{ - type Item = CoffSegment<'data, 'file, R, Coff>; - - fn next(&mut self) -> Option { - self.iter.next().map(|section| CoffSegment { - file: self.file, - section, - }) - } -} - -/// A loadable section of a `CoffBigFile`. -pub type CoffBigSegment<'data, 'file, R = &'data [u8]> = - CoffSegment<'data, 'file, R, pe::AnonObjectHeaderBigobj>; - -/// A loadable section of a `CoffFile`. -#[derive(Debug)] -pub struct CoffSegment< - 'data, - 'file, - R: ReadRef<'data> = &'data [u8], - Coff: CoffHeader = pe::ImageFileHeader, -> { - pub(super) file: &'file CoffFile<'data, R, Coff>, - pub(super) section: &'data pe::ImageSectionHeader, -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> CoffSegment<'data, 'file, R, Coff> { - fn bytes(&self) -> Result<&'data [u8]> { - self.section - .coff_data(self.file.data) - .read_error("Invalid COFF section offset or size") - } -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> read::private::Sealed - for CoffSegment<'data, 'file, R, Coff> -{ -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> ObjectSegment<'data> - for CoffSegment<'data, 'file, R, Coff> -{ - #[inline] - fn address(&self) -> u64 { - u64::from(self.section.virtual_address.get(LE)) - } - - #[inline] - fn size(&self) -> u64 { - u64::from(self.section.virtual_size.get(LE)) - } - - #[inline] - fn align(&self) -> u64 { - self.section.coff_alignment() - } - - #[inline] - fn file_range(&self) -> (u64, u64) { - let (offset, size) = self.section.coff_file_range().unwrap_or((0, 0)); - (u64::from(offset), u64::from(size)) - } - - fn data(&self) -> Result<&'data [u8]> { - self.bytes() - } - - fn data_range(&self, address: u64, size: u64) -> Result> { - Ok(read::util::data_range( - self.bytes()?, - self.address(), - address, - size, - )) - } - - #[inline] - fn name_bytes(&self) -> Result> { - self.section - .name(self.file.common.symbols.strings()) - .map(Some) - } - - #[inline] - fn name(&self) -> Result> { - let name = self.section.name(self.file.common.symbols.strings())?; - str::from_utf8(name) - .ok() - .read_error("Non UTF-8 COFF section name") - .map(Some) - } - - #[inline] - fn flags(&self) -> SegmentFlags { - let characteristics = self.section.characteristics.get(LE); - SegmentFlags::Coff { characteristics } - } -} - -/// An iterator over the sections of a `CoffBigFile`. -pub type CoffBigSectionIterator<'data, 'file, R = &'data [u8]> = - CoffSectionIterator<'data, 'file, R, pe::AnonObjectHeaderBigobj>; - -/// An iterator over the sections of a `CoffFile`. -#[derive(Debug)] -pub struct CoffSectionIterator< - 'data, - 'file, - R: ReadRef<'data> = &'data [u8], - Coff: CoffHeader = pe::ImageFileHeader, -> { - pub(super) file: &'file CoffFile<'data, R, Coff>, - pub(super) iter: iter::Enumerate>, -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> Iterator - for CoffSectionIterator<'data, 'file, R, Coff> -{ - type Item = CoffSection<'data, 'file, R, Coff>; - - fn next(&mut self) -> Option { - self.iter.next().map(|(index, section)| CoffSection { - file: self.file, - index: SectionIndex(index + 1), - section, - }) - } -} - -/// A section of a `CoffBigFile`. -pub type CoffBigSection<'data, 'file, R = &'data [u8]> = - CoffSection<'data, 'file, R, pe::AnonObjectHeaderBigobj>; - -/// A section of a `CoffFile`. -#[derive(Debug)] -pub struct CoffSection< - 'data, - 'file, - R: ReadRef<'data> = &'data [u8], - Coff: CoffHeader = pe::ImageFileHeader, -> { - pub(super) file: &'file CoffFile<'data, R, Coff>, - pub(super) index: SectionIndex, - pub(super) section: &'data pe::ImageSectionHeader, -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> CoffSection<'data, 'file, R, Coff> { - fn bytes(&self) -> Result<&'data [u8]> { - self.section - .coff_data(self.file.data) - .read_error("Invalid COFF section offset or size") - } -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> read::private::Sealed - for CoffSection<'data, 'file, R, Coff> -{ -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> ObjectSection<'data> - for CoffSection<'data, 'file, R, Coff> -{ - type RelocationIterator = CoffRelocationIterator<'data, 'file, R, Coff>; - - #[inline] - fn index(&self) -> SectionIndex { - self.index - } - - #[inline] - fn address(&self) -> u64 { - u64::from(self.section.virtual_address.get(LE)) - } - - #[inline] - fn size(&self) -> u64 { - // TODO: This may need to be the length from the auxiliary symbol for this section. - u64::from(self.section.size_of_raw_data.get(LE)) - } - - #[inline] - fn align(&self) -> u64 { - self.section.coff_alignment() - } - - #[inline] - fn file_range(&self) -> Option<(u64, u64)> { - let (offset, size) = self.section.coff_file_range()?; - Some((u64::from(offset), u64::from(size))) - } - - fn data(&self) -> Result<&'data [u8]> { - self.bytes() - } - - fn data_range(&self, address: u64, size: u64) -> Result> { - Ok(read::util::data_range( - self.bytes()?, - self.address(), - address, - size, - )) - } - - #[inline] - fn compressed_file_range(&self) -> Result { - Ok(CompressedFileRange::none(self.file_range())) - } - - #[inline] - fn compressed_data(&self) -> Result> { - self.data().map(CompressedData::none) - } - - #[inline] - fn name_bytes(&self) -> Result<&[u8]> { - self.section.name(self.file.common.symbols.strings()) - } - - #[inline] - fn name(&self) -> Result<&str> { - let name = self.name_bytes()?; - str::from_utf8(name) - .ok() - .read_error("Non UTF-8 COFF section name") - } - - #[inline] - fn segment_name_bytes(&self) -> Result> { - Ok(None) - } - - #[inline] - fn segment_name(&self) -> Result> { - Ok(None) - } - - #[inline] - fn kind(&self) -> SectionKind { - self.section.kind() - } - - fn relocations(&self) -> CoffRelocationIterator<'data, 'file, R, Coff> { - let relocations = self.section.coff_relocations(self.file.data).unwrap_or(&[]); - CoffRelocationIterator { - file: self.file, - iter: relocations.iter(), - } - } - - fn flags(&self) -> SectionFlags { - SectionFlags::Coff { - characteristics: self.section.characteristics.get(LE), - } - } -} - -impl pe::ImageSectionHeader { - pub(crate) fn kind(&self) -> SectionKind { - let characteristics = self.characteristics.get(LE); - if characteristics & (pe::IMAGE_SCN_CNT_CODE | pe::IMAGE_SCN_MEM_EXECUTE) != 0 { - SectionKind::Text - } else if characteristics & pe::IMAGE_SCN_CNT_INITIALIZED_DATA != 0 { - if characteristics & pe::IMAGE_SCN_MEM_DISCARDABLE != 0 { - SectionKind::Other - } else if characteristics & pe::IMAGE_SCN_MEM_WRITE != 0 { - SectionKind::Data - } else { - SectionKind::ReadOnlyData - } - } else if characteristics & pe::IMAGE_SCN_CNT_UNINITIALIZED_DATA != 0 { - SectionKind::UninitializedData - } else if characteristics & pe::IMAGE_SCN_LNK_INFO != 0 { - SectionKind::Linker - } else { - SectionKind::Unknown - } - } -} - -impl pe::ImageSectionHeader { - /// Return the string table offset of the section name. - /// - /// Returns `Ok(None)` if the name doesn't use the string table - /// and can be obtained with `raw_name` instead. - pub fn name_offset(&self) -> Result> { - let bytes = &self.name; - if bytes[0] != b'/' { - return Ok(None); - } - - if bytes[1] == b'/' { - let mut offset = 0; - for byte in bytes[2..].iter() { - let digit = match byte { - b'A'..=b'Z' => byte - b'A', - b'a'..=b'z' => byte - b'a' + 26, - b'0'..=b'9' => byte - b'0' + 52, - b'+' => 62, - b'/' => 63, - _ => return Err(Error("Invalid COFF section name base-64 offset")), - }; - offset = offset * 64 + digit as u64; - } - u32::try_from(offset) - .ok() - .read_error("Invalid COFF section name base-64 offset") - .map(Some) - } else { - let mut offset = 0; - for byte in bytes[1..].iter() { - let digit = match byte { - b'0'..=b'9' => byte - b'0', - 0 => break, - _ => return Err(Error("Invalid COFF section name base-10 offset")), - }; - offset = offset * 10 + digit as u32; - } - Ok(Some(offset)) - } - } - - /// Return the section name. - /// - /// This handles decoding names that are offsets into the symbol string table. - pub fn name<'data, R: ReadRef<'data>>( - &'data self, - strings: StringTable<'data, R>, - ) -> Result<&'data [u8]> { - if let Some(offset) = self.name_offset()? { - strings - .get(offset) - .read_error("Invalid COFF section name offset") - } else { - Ok(self.raw_name()) - } - } - - /// Return the raw section name. - pub fn raw_name(&self) -> &[u8] { - let bytes = &self.name; - match memchr::memchr(b'\0', bytes) { - Some(end) => &bytes[..end], - None => &bytes[..], - } - } - - /// Return the offset and size of the section in a COFF file. - /// - /// Returns `None` for sections that have no data in the file. - pub fn coff_file_range(&self) -> Option<(u32, u32)> { - if self.characteristics.get(LE) & pe::IMAGE_SCN_CNT_UNINITIALIZED_DATA != 0 { - None - } else { - let offset = self.pointer_to_raw_data.get(LE); - // Note: virtual size is not used for COFF. - let size = self.size_of_raw_data.get(LE); - Some((offset, size)) - } - } - - /// Return the section data in a COFF file. - /// - /// Returns `Ok(&[])` if the section has no data. - /// Returns `Err` for invalid values. - pub fn coff_data<'data, R: ReadRef<'data>>(&self, data: R) -> result::Result<&'data [u8], ()> { - if let Some((offset, size)) = self.coff_file_range() { - data.read_bytes_at(offset.into(), size.into()) - } else { - Ok(&[]) - } - } - - /// Return the section alignment in bytes. - /// - /// This is only valid for sections in a COFF file. - pub fn coff_alignment(&self) -> u64 { - match self.characteristics.get(LE) & pe::IMAGE_SCN_ALIGN_MASK { - pe::IMAGE_SCN_ALIGN_1BYTES => 1, - pe::IMAGE_SCN_ALIGN_2BYTES => 2, - pe::IMAGE_SCN_ALIGN_4BYTES => 4, - pe::IMAGE_SCN_ALIGN_8BYTES => 8, - pe::IMAGE_SCN_ALIGN_16BYTES => 16, - pe::IMAGE_SCN_ALIGN_32BYTES => 32, - pe::IMAGE_SCN_ALIGN_64BYTES => 64, - pe::IMAGE_SCN_ALIGN_128BYTES => 128, - pe::IMAGE_SCN_ALIGN_256BYTES => 256, - pe::IMAGE_SCN_ALIGN_512BYTES => 512, - pe::IMAGE_SCN_ALIGN_1024BYTES => 1024, - pe::IMAGE_SCN_ALIGN_2048BYTES => 2048, - pe::IMAGE_SCN_ALIGN_4096BYTES => 4096, - pe::IMAGE_SCN_ALIGN_8192BYTES => 8192, - _ => 16, - } - } - - /// Read the relocations in a COFF file. - /// - /// `data` must be the entire file data. - pub fn coff_relocations<'data, R: ReadRef<'data>>( - &self, - data: R, - ) -> read::Result<&'data [pe::ImageRelocation]> { - let mut pointer = self.pointer_to_relocations.get(LE).into(); - let mut number: usize = self.number_of_relocations.get(LE).into(); - if number == core::u16::MAX.into() - && self.characteristics.get(LE) & pe::IMAGE_SCN_LNK_NRELOC_OVFL != 0 - { - // Extended relocations. Read first relocation (which contains extended count) & adjust - // relocations pointer. - let extended_relocation_info = data - .read_at::(pointer) - .read_error("Invalid COFF relocation offset or number")?; - number = extended_relocation_info.virtual_address.get(LE) as usize; - if number == 0 { - return Err(Error("Invalid COFF relocation number")); - } - pointer += core::mem::size_of::() as u64; - // Extended relocation info does not contribute to the count of sections. - number -= 1; - } - data.read_slice_at(pointer, number) - .read_error("Invalid COFF relocation offset or number") - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn name_offset() { - let mut section = pe::ImageSectionHeader::default(); - section.name = *b"xxxxxxxx"; - assert_eq!(section.name_offset(), Ok(None)); - section.name = *b"/0\0\0\0\0\0\0"; - assert_eq!(section.name_offset(), Ok(Some(0))); - section.name = *b"/9999999"; - assert_eq!(section.name_offset(), Ok(Some(999_9999))); - section.name = *b"//AAAAAA"; - assert_eq!(section.name_offset(), Ok(Some(0))); - section.name = *b"//D/////"; - assert_eq!(section.name_offset(), Ok(Some(0xffff_ffff))); - section.name = *b"//EAAAAA"; - assert!(section.name_offset().is_err()); - section.name = *b"////////"; - assert!(section.name_offset().is_err()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/coff/symbol.rs s390-tools-2.33.1/rust-vendor/object/src/read/coff/symbol.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/coff/symbol.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/coff/symbol.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,626 +0,0 @@ -use alloc::fmt; -use alloc::vec::Vec; -use core::convert::TryInto; -use core::fmt::Debug; -use core::str; - -use super::{CoffCommon, CoffHeader, SectionTable}; -use crate::endian::{LittleEndian as LE, U32Bytes}; -use crate::pe; -use crate::pod::{bytes_of, bytes_of_slice, Pod}; -use crate::read::util::StringTable; -use crate::read::{ - self, Bytes, ObjectSymbol, ObjectSymbolTable, ReadError, ReadRef, Result, SectionIndex, - SymbolFlags, SymbolIndex, SymbolKind, SymbolMap, SymbolMapEntry, SymbolScope, SymbolSection, -}; - -/// A table of symbol entries in a COFF or PE file. -/// -/// Also includes the string table used for the symbol names. -#[derive(Debug)] -pub struct SymbolTable<'data, R = &'data [u8], Coff = pe::ImageFileHeader> -where - R: ReadRef<'data>, - Coff: CoffHeader, -{ - symbols: &'data [Coff::ImageSymbolBytes], - strings: StringTable<'data, R>, -} - -impl<'data, R: ReadRef<'data>, Coff: CoffHeader> Default for SymbolTable<'data, R, Coff> { - fn default() -> Self { - Self { - symbols: &[], - strings: StringTable::default(), - } - } -} - -impl<'data, R: ReadRef<'data>, Coff: CoffHeader> SymbolTable<'data, R, Coff> { - /// Read the symbol table. - pub fn parse(header: &Coff, data: R) -> Result { - // The symbol table may not be present. - let mut offset = header.pointer_to_symbol_table().into(); - let (symbols, strings) = if offset != 0 { - let symbols = data - .read_slice(&mut offset, header.number_of_symbols() as usize) - .read_error("Invalid COFF symbol table offset or size")?; - - // Note: don't update data when reading length; the length includes itself. - let length = data - .read_at::>(offset) - .read_error("Missing COFF string table")? - .get(LE); - let str_end = offset - .checked_add(length as u64) - .read_error("Invalid COFF string table length")?; - let strings = StringTable::new(data, offset, str_end); - - (symbols, strings) - } else { - (&[][..], StringTable::default()) - }; - - Ok(SymbolTable { symbols, strings }) - } - - /// Return the string table used for the symbol names. - #[inline] - pub fn strings(&self) -> StringTable<'data, R> { - self.strings - } - - /// Return true if the symbol table is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.symbols.is_empty() - } - - /// The number of symbol table entries. - /// - /// This includes auxiliary symbol table entries. - #[inline] - pub fn len(&self) -> usize { - self.symbols.len() - } - - /// Iterate over the symbols. - #[inline] - pub fn iter<'table>(&'table self) -> SymbolIterator<'data, 'table, R, Coff> { - SymbolIterator { - symbols: self, - index: 0, - } - } - - /// Return the symbol table entry at the given index. - #[inline] - pub fn symbol(&self, index: usize) -> Result<&'data Coff::ImageSymbol> { - self.get::(index, 0) - } - - /// Return the auxiliary function symbol for the symbol table entry at the given index. - /// - /// Note that the index is of the symbol, not the first auxiliary record. - #[inline] - pub fn aux_function(&self, index: usize) -> Result<&'data pe::ImageAuxSymbolFunction> { - self.get::(index, 1) - } - - /// Return the auxiliary section symbol for the symbol table entry at the given index. - /// - /// Note that the index is of the symbol, not the first auxiliary record. - #[inline] - pub fn aux_section(&self, index: usize) -> Result<&'data pe::ImageAuxSymbolSection> { - self.get::(index, 1) - } - - /// Return the auxiliary file name for the symbol table entry at the given index. - /// - /// Note that the index is of the symbol, not the first auxiliary record. - pub fn aux_file_name(&self, index: usize, aux_count: u8) -> Result<&'data [u8]> { - let entries = index - .checked_add(1) - .and_then(|x| Some(x..x.checked_add(aux_count.into())?)) - .and_then(|x| self.symbols.get(x)) - .read_error("Invalid COFF symbol index")?; - let bytes = bytes_of_slice(entries); - // The name is padded with nulls. - Ok(match memchr::memchr(b'\0', bytes) { - Some(end) => &bytes[..end], - None => bytes, - }) - } - - /// Return the symbol table entry or auxiliary record at the given index and offset. - pub fn get(&self, index: usize, offset: usize) -> Result<&'data T> { - let bytes = index - .checked_add(offset) - .and_then(|x| self.symbols.get(x)) - .read_error("Invalid COFF symbol index")?; - Bytes(bytes_of(bytes)) - .read() - .read_error("Invalid COFF symbol data") - } - - /// Construct a map from addresses to a user-defined map entry. - pub fn map Option>( - &self, - f: F, - ) -> SymbolMap { - let mut symbols = Vec::with_capacity(self.symbols.len()); - for (_, symbol) in self.iter() { - if !symbol.is_definition() { - continue; - } - if let Some(entry) = f(symbol) { - symbols.push(entry); - } - } - SymbolMap::new(symbols) - } -} - -/// An iterator for symbol entries in a COFF or PE file. -/// -/// Yields the index and symbol structure for each symbol. -#[derive(Debug)] -pub struct SymbolIterator<'data, 'table, R = &'data [u8], Coff = pe::ImageFileHeader> -where - R: ReadRef<'data>, - Coff: CoffHeader, -{ - symbols: &'table SymbolTable<'data, R, Coff>, - index: usize, -} - -impl<'data, 'table, R: ReadRef<'data>, Coff: CoffHeader> Iterator - for SymbolIterator<'data, 'table, R, Coff> -{ - type Item = (usize, &'data Coff::ImageSymbol); - - fn next(&mut self) -> Option { - let index = self.index; - let symbol = self.symbols.symbol(index).ok()?; - self.index += 1 + symbol.number_of_aux_symbols() as usize; - Some((index, symbol)) - } -} - -/// A symbol table of a `CoffBigFile`. -pub type CoffBigSymbolTable<'data, 'file, R = &'data [u8]> = - CoffSymbolTable<'data, 'file, R, pe::AnonObjectHeaderBigobj>; - -/// A symbol table of a `CoffFile`. -#[derive(Debug, Clone, Copy)] -pub struct CoffSymbolTable<'data, 'file, R = &'data [u8], Coff = pe::ImageFileHeader> -where - R: ReadRef<'data>, - Coff: CoffHeader, -{ - pub(crate) file: &'file CoffCommon<'data, R, Coff>, -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> read::private::Sealed - for CoffSymbolTable<'data, 'file, R, Coff> -{ -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> ObjectSymbolTable<'data> - for CoffSymbolTable<'data, 'file, R, Coff> -{ - type Symbol = CoffSymbol<'data, 'file, R, Coff>; - type SymbolIterator = CoffSymbolIterator<'data, 'file, R, Coff>; - - fn symbols(&self) -> Self::SymbolIterator { - CoffSymbolIterator { - file: self.file, - index: 0, - } - } - - fn symbol_by_index(&self, index: SymbolIndex) -> Result { - let symbol = self.file.symbols.symbol(index.0)?; - Ok(CoffSymbol { - file: self.file, - index, - symbol, - }) - } -} - -/// An iterator over the symbols of a `CoffBigFile`. -pub type CoffBigSymbolIterator<'data, 'file, R = &'data [u8]> = - CoffSymbolIterator<'data, 'file, R, pe::AnonObjectHeaderBigobj>; - -/// An iterator over the symbols of a `CoffFile`. -pub struct CoffSymbolIterator<'data, 'file, R = &'data [u8], Coff = pe::ImageFileHeader> -where - R: ReadRef<'data>, - Coff: CoffHeader, -{ - pub(crate) file: &'file CoffCommon<'data, R, Coff>, - pub(crate) index: usize, -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> fmt::Debug - for CoffSymbolIterator<'data, 'file, R, Coff> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("CoffSymbolIterator").finish() - } -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> Iterator - for CoffSymbolIterator<'data, 'file, R, Coff> -{ - type Item = CoffSymbol<'data, 'file, R, Coff>; - - fn next(&mut self) -> Option { - let index = self.index; - let symbol = self.file.symbols.symbol(index).ok()?; - self.index += 1 + symbol.number_of_aux_symbols() as usize; - Some(CoffSymbol { - file: self.file, - index: SymbolIndex(index), - symbol, - }) - } -} - -/// A symbol of a `CoffBigFile`. -pub type CoffBigSymbol<'data, 'file, R = &'data [u8]> = - CoffSymbol<'data, 'file, R, pe::AnonObjectHeaderBigobj>; - -/// A symbol of a `CoffFile`. -#[derive(Debug, Clone, Copy)] -pub struct CoffSymbol<'data, 'file, R = &'data [u8], Coff = pe::ImageFileHeader> -where - R: ReadRef<'data>, - Coff: CoffHeader, -{ - pub(crate) file: &'file CoffCommon<'data, R, Coff>, - pub(crate) index: SymbolIndex, - pub(crate) symbol: &'data Coff::ImageSymbol, -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> CoffSymbol<'data, 'file, R, Coff> { - #[inline] - /// Get the raw `ImageSymbol` struct. - pub fn raw_symbol(&self) -> &'data Coff::ImageSymbol { - self.symbol - } -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> read::private::Sealed - for CoffSymbol<'data, 'file, R, Coff> -{ -} - -impl<'data, 'file, R: ReadRef<'data>, Coff: CoffHeader> ObjectSymbol<'data> - for CoffSymbol<'data, 'file, R, Coff> -{ - #[inline] - fn index(&self) -> SymbolIndex { - self.index - } - - fn name_bytes(&self) -> read::Result<&'data [u8]> { - if self.symbol.has_aux_file_name() { - self.file - .symbols - .aux_file_name(self.index.0, self.symbol.number_of_aux_symbols()) - } else { - self.symbol.name(self.file.symbols.strings()) - } - } - - fn name(&self) -> read::Result<&'data str> { - let name = self.name_bytes()?; - str::from_utf8(name) - .ok() - .read_error("Non UTF-8 COFF symbol name") - } - - fn address(&self) -> u64 { - // Only return an address for storage classes that we know use an address. - match self.symbol.storage_class() { - pe::IMAGE_SYM_CLASS_STATIC - | pe::IMAGE_SYM_CLASS_WEAK_EXTERNAL - | pe::IMAGE_SYM_CLASS_LABEL => {} - pe::IMAGE_SYM_CLASS_EXTERNAL => { - if self.symbol.section_number() == pe::IMAGE_SYM_UNDEFINED { - // Undefined or common data, neither of which have an address. - return 0; - } - } - _ => return 0, - } - self.symbol - .address(self.file.image_base, &self.file.sections) - .unwrap_or(0) - } - - fn size(&self) -> u64 { - match self.symbol.storage_class() { - pe::IMAGE_SYM_CLASS_STATIC => { - // Section symbols may duplicate the size from the section table. - if self.symbol.has_aux_section() { - if let Ok(aux) = self.file.symbols.aux_section(self.index.0) { - u64::from(aux.length.get(LE)) - } else { - 0 - } - } else { - 0 - } - } - pe::IMAGE_SYM_CLASS_EXTERNAL => { - if self.symbol.section_number() == pe::IMAGE_SYM_UNDEFINED { - // For undefined symbols, symbol.value is 0 and the size is 0. - // For common data, symbol.value is the size. - u64::from(self.symbol.value()) - } else if self.symbol.has_aux_function() { - // Function symbols may have a size. - if let Ok(aux) = self.file.symbols.aux_function(self.index.0) { - u64::from(aux.total_size.get(LE)) - } else { - 0 - } - } else { - 0 - } - } - // Most symbols don't have sizes. - _ => 0, - } - } - - fn kind(&self) -> SymbolKind { - let derived_kind = if self.symbol.derived_type() == pe::IMAGE_SYM_DTYPE_FUNCTION { - SymbolKind::Text - } else { - SymbolKind::Data - }; - match self.symbol.storage_class() { - pe::IMAGE_SYM_CLASS_STATIC => { - if self.symbol.value() == 0 && self.symbol.number_of_aux_symbols() > 0 { - SymbolKind::Section - } else { - derived_kind - } - } - pe::IMAGE_SYM_CLASS_EXTERNAL | pe::IMAGE_SYM_CLASS_WEAK_EXTERNAL => derived_kind, - pe::IMAGE_SYM_CLASS_SECTION => SymbolKind::Section, - pe::IMAGE_SYM_CLASS_FILE => SymbolKind::File, - pe::IMAGE_SYM_CLASS_LABEL => SymbolKind::Label, - _ => SymbolKind::Unknown, - } - } - - fn section(&self) -> SymbolSection { - match self.symbol.section_number() { - pe::IMAGE_SYM_UNDEFINED => { - if self.symbol.storage_class() == pe::IMAGE_SYM_CLASS_EXTERNAL - && self.symbol.value() == 0 - { - SymbolSection::Undefined - } else { - SymbolSection::Common - } - } - pe::IMAGE_SYM_ABSOLUTE => SymbolSection::Absolute, - pe::IMAGE_SYM_DEBUG => { - if self.symbol.storage_class() == pe::IMAGE_SYM_CLASS_FILE { - SymbolSection::None - } else { - SymbolSection::Unknown - } - } - index if index > 0 => SymbolSection::Section(SectionIndex(index as usize)), - _ => SymbolSection::Unknown, - } - } - - #[inline] - fn is_undefined(&self) -> bool { - self.symbol.storage_class() == pe::IMAGE_SYM_CLASS_EXTERNAL - && self.symbol.section_number() == pe::IMAGE_SYM_UNDEFINED - && self.symbol.value() == 0 - } - - #[inline] - fn is_definition(&self) -> bool { - self.symbol.is_definition() - } - - #[inline] - fn is_common(&self) -> bool { - self.symbol.storage_class() == pe::IMAGE_SYM_CLASS_EXTERNAL - && self.symbol.section_number() == pe::IMAGE_SYM_UNDEFINED - && self.symbol.value() != 0 - } - - #[inline] - fn is_weak(&self) -> bool { - self.symbol.storage_class() == pe::IMAGE_SYM_CLASS_WEAK_EXTERNAL - } - - #[inline] - fn scope(&self) -> SymbolScope { - match self.symbol.storage_class() { - pe::IMAGE_SYM_CLASS_EXTERNAL | pe::IMAGE_SYM_CLASS_WEAK_EXTERNAL => { - // TODO: determine if symbol is exported - SymbolScope::Linkage - } - _ => SymbolScope::Compilation, - } - } - - #[inline] - fn is_global(&self) -> bool { - match self.symbol.storage_class() { - pe::IMAGE_SYM_CLASS_EXTERNAL | pe::IMAGE_SYM_CLASS_WEAK_EXTERNAL => true, - _ => false, - } - } - - #[inline] - fn is_local(&self) -> bool { - !self.is_global() - } - - fn flags(&self) -> SymbolFlags { - if self.symbol.has_aux_section() { - if let Ok(aux) = self.file.symbols.aux_section(self.index.0) { - let number = if Coff::is_type_bigobj() { - u32::from(aux.number.get(LE)) | (u32::from(aux.high_number.get(LE)) << 16) - } else { - u32::from(aux.number.get(LE)) - }; - return SymbolFlags::CoffSection { - selection: aux.selection, - associative_section: if number == 0 { - None - } else { - Some(SectionIndex(number as usize)) - }, - }; - } - } - SymbolFlags::None - } -} - -/// A trait for generic access to `ImageSymbol` and `ImageSymbolEx`. -#[allow(missing_docs)] -pub trait ImageSymbol: Debug + Pod { - fn raw_name(&self) -> &[u8; 8]; - fn value(&self) -> u32; - fn section_number(&self) -> i32; - fn typ(&self) -> u16; - fn storage_class(&self) -> u8; - fn number_of_aux_symbols(&self) -> u8; - - /// Parse a COFF symbol name. - /// - /// `strings` must be the string table used for symbol names. - fn name<'data, R: ReadRef<'data>>( - &'data self, - strings: StringTable<'data, R>, - ) -> Result<&'data [u8]> { - let name = self.raw_name(); - if name[0] == 0 { - // If the name starts with 0 then the last 4 bytes are a string table offset. - let offset = u32::from_le_bytes(name[4..8].try_into().unwrap()); - strings - .get(offset) - .read_error("Invalid COFF symbol name offset") - } else { - // The name is inline and padded with nulls. - Ok(match memchr::memchr(b'\0', name) { - Some(end) => &name[..end], - None => &name[..], - }) - } - } - - /// Return the symbol address. - /// - /// This takes into account the image base and the section address. - fn address(&self, image_base: u64, sections: &SectionTable<'_>) -> Result { - let section_number = self.section_number() as usize; - let section = sections.section(section_number)?; - let virtual_address = u64::from(section.virtual_address.get(LE)); - let value = u64::from(self.value()); - Ok(image_base + virtual_address + value) - } - - /// Return true if the symbol is a definition of a function or data object. - fn is_definition(&self) -> bool { - let section_number = self.section_number(); - if section_number == pe::IMAGE_SYM_UNDEFINED { - return false; - } - match self.storage_class() { - pe::IMAGE_SYM_CLASS_STATIC => { - // Exclude section symbols. - !(self.value() == 0 && self.number_of_aux_symbols() > 0) - } - pe::IMAGE_SYM_CLASS_EXTERNAL | pe::IMAGE_SYM_CLASS_WEAK_EXTERNAL => true, - _ => false, - } - } - - /// Return true if the symbol has an auxiliary file name. - fn has_aux_file_name(&self) -> bool { - self.number_of_aux_symbols() > 0 && self.storage_class() == pe::IMAGE_SYM_CLASS_FILE - } - - /// Return true if the symbol has an auxiliary function symbol. - fn has_aux_function(&self) -> bool { - self.number_of_aux_symbols() > 0 && self.derived_type() == pe::IMAGE_SYM_DTYPE_FUNCTION - } - - /// Return true if the symbol has an auxiliary section symbol. - fn has_aux_section(&self) -> bool { - self.number_of_aux_symbols() > 0 - && self.storage_class() == pe::IMAGE_SYM_CLASS_STATIC - && self.value() == 0 - } - - fn base_type(&self) -> u16 { - self.typ() & pe::N_BTMASK - } - - fn derived_type(&self) -> u16 { - (self.typ() & pe::N_TMASK) >> pe::N_BTSHFT - } -} - -impl ImageSymbol for pe::ImageSymbol { - fn raw_name(&self) -> &[u8; 8] { - &self.name - } - fn value(&self) -> u32 { - self.value.get(LE) - } - fn section_number(&self) -> i32 { - let section_number = self.section_number.get(LE); - if section_number >= pe::IMAGE_SYM_SECTION_MAX { - (section_number as i16) as i32 - } else { - section_number as i32 - } - } - fn typ(&self) -> u16 { - self.typ.get(LE) - } - fn storage_class(&self) -> u8 { - self.storage_class - } - fn number_of_aux_symbols(&self) -> u8 { - self.number_of_aux_symbols - } -} - -impl ImageSymbol for pe::ImageSymbolEx { - fn raw_name(&self) -> &[u8; 8] { - &self.name - } - fn value(&self) -> u32 { - self.value.get(LE) - } - fn section_number(&self) -> i32 { - self.section_number.get(LE) - } - fn typ(&self) -> u16 { - self.typ.get(LE) - } - fn storage_class(&self) -> u8 { - self.storage_class - } - fn number_of_aux_symbols(&self) -> u8 { - self.number_of_aux_symbols - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/attributes.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/attributes.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/attributes.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/attributes.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,303 +0,0 @@ -use core::convert::TryInto; - -use crate::elf; -use crate::endian; -use crate::read::{Bytes, Error, ReadError, Result}; - -use super::FileHeader; - -/// An ELF attributes section. -/// -/// This may be a GNU attributes section, or an architecture specific attributes section. -/// -/// An attributes section contains a series of subsections. -#[derive(Debug, Clone)] -pub struct AttributesSection<'data, Elf: FileHeader> { - endian: Elf::Endian, - version: u8, - data: Bytes<'data>, -} - -impl<'data, Elf: FileHeader> AttributesSection<'data, Elf> { - /// Parse an ELF attributes section given the section data. - pub fn new(endian: Elf::Endian, data: &'data [u8]) -> Result { - let mut data = Bytes(data); - - // Skip the version field that is one byte long. - let version = *data - .read::() - .read_error("Invalid ELF attributes section offset or size")?; - - Ok(AttributesSection { - endian, - version, - data, - }) - } - - /// Return the version of the attributes section. - pub fn version(&self) -> u8 { - self.version - } - - /// Return an iterator over the subsections. - pub fn subsections(&self) -> Result> { - // There is currently only one format version. - if self.version != b'A' { - return Err(Error("Unsupported ELF attributes section version")); - } - - Ok(AttributesSubsectionIterator { - endian: self.endian, - data: self.data, - }) - } -} - -/// An iterator over the subsections in an ELF attributes section. -#[derive(Debug, Clone)] -pub struct AttributesSubsectionIterator<'data, Elf: FileHeader> { - endian: Elf::Endian, - data: Bytes<'data>, -} - -impl<'data, Elf: FileHeader> AttributesSubsectionIterator<'data, Elf> { - /// Return the next subsection. - pub fn next(&mut self) -> Result>> { - if self.data.is_empty() { - return Ok(None); - } - - let result = self.parse(); - if result.is_err() { - self.data = Bytes(&[]); - } - result - } - - fn parse(&mut self) -> Result>> { - // First read the subsection length. - let mut data = self.data; - let length = data - .read::>() - .read_error("ELF attributes section is too short")? - .get(self.endian); - - // Now read the entire subsection, updating self.data. - let mut data = self - .data - .read_bytes(length as usize) - .read_error("Invalid ELF attributes subsection length")?; - // Skip the subsection length field. - data.skip(4) - .read_error("Invalid ELF attributes subsection length")?; - - let vendor = data - .read_string() - .read_error("Invalid ELF attributes vendor")?; - - Ok(Some(AttributesSubsection { - endian: self.endian, - length, - vendor, - data, - })) - } -} - -/// A subsection in an ELF attributes section. -/// -/// A subsection is identified by a vendor name. It contains a series of sub-subsections. -#[derive(Debug, Clone)] -pub struct AttributesSubsection<'data, Elf: FileHeader> { - endian: Elf::Endian, - length: u32, - vendor: &'data [u8], - data: Bytes<'data>, -} - -impl<'data, Elf: FileHeader> AttributesSubsection<'data, Elf> { - /// Return the length of the attributes subsection. - pub fn length(&self) -> u32 { - self.length - } - - /// Return the vendor name of the attributes subsection. - pub fn vendor(&self) -> &'data [u8] { - self.vendor - } - - /// Return an iterator over the sub-subsections. - pub fn subsubsections(&self) -> AttributesSubsubsectionIterator<'data, Elf> { - AttributesSubsubsectionIterator { - endian: self.endian, - data: self.data, - } - } -} - -/// An iterator over the sub-subsections in an ELF attributes section. -#[derive(Debug, Clone)] -pub struct AttributesSubsubsectionIterator<'data, Elf: FileHeader> { - endian: Elf::Endian, - data: Bytes<'data>, -} - -impl<'data, Elf: FileHeader> AttributesSubsubsectionIterator<'data, Elf> { - /// Return the next sub-subsection. - pub fn next(&mut self) -> Result>> { - if self.data.is_empty() { - return Ok(None); - } - - let result = self.parse(); - if result.is_err() { - self.data = Bytes(&[]); - } - result - } - - fn parse(&mut self) -> Result>> { - // The format of a sub-section looks like this: - // - // * - // | * 0 * - // | * 0 * - let mut data = self.data; - let tag = *data - .read::() - .read_error("ELF attributes subsection is too short")?; - let length = data - .read::>() - .read_error("ELF attributes subsection is too short")? - .get(self.endian); - - // Now read the entire sub-subsection, updating self.data. - let mut data = self - .data - .read_bytes(length as usize) - .read_error("Invalid ELF attributes sub-subsection length")?; - // Skip the tag and sub-subsection size field. - data.skip(1 + 4) - .read_error("Invalid ELF attributes sub-subsection length")?; - - let indices = if tag == elf::Tag_Section || tag == elf::Tag_Symbol { - data.read_string() - .map(Bytes) - .read_error("Missing ELF attributes sub-subsection indices")? - } else if tag == elf::Tag_File { - Bytes(&[]) - } else { - return Err(Error("Unimplemented ELF attributes sub-subsection tag")); - }; - - Ok(Some(AttributesSubsubsection { - tag, - length, - indices, - data, - })) - } -} - -/// A sub-subsection in an ELF attributes section. -/// -/// A sub-subsection is identified by a tag. It contains an optional series of indices, -/// followed by a series of attributes. -#[derive(Debug, Clone)] -pub struct AttributesSubsubsection<'data> { - tag: u8, - length: u32, - indices: Bytes<'data>, - data: Bytes<'data>, -} - -impl<'data> AttributesSubsubsection<'data> { - /// Return the tag of the attributes sub-subsection. - pub fn tag(&self) -> u8 { - self.tag - } - - /// Return the length of the attributes sub-subsection. - pub fn length(&self) -> u32 { - self.length - } - - /// Return the data containing the indices. - pub fn indices_data(&self) -> &'data [u8] { - self.indices.0 - } - - /// Return the indices. - /// - /// This will be section indices if the tag is `Tag_Section`, - /// or symbol indices if the tag is `Tag_Symbol`, - /// and otherwise it will be empty. - pub fn indices(&self) -> AttributeIndexIterator<'data> { - AttributeIndexIterator { data: self.indices } - } - - /// Return the data containing the attributes. - pub fn attributes_data(&self) -> &'data [u8] { - self.data.0 - } - - /// Return a parser for the data containing the attributes. - pub fn attributes(&self) -> AttributeReader<'data> { - AttributeReader { data: self.data } - } -} - -/// An iterator over the indices in a sub-subsection in an ELF attributes section. -#[derive(Debug, Clone)] -pub struct AttributeIndexIterator<'data> { - data: Bytes<'data>, -} - -impl<'data> AttributeIndexIterator<'data> { - /// Parse the next index. - pub fn next(&mut self) -> Result> { - if self.data.is_empty() { - return Ok(None); - } - let err = "Invalid ELF attribute index"; - self.data - .read_uleb128() - .read_error(err)? - .try_into() - .map_err(|_| ()) - .read_error(err) - .map(Some) - } -} - -/// A parser for the attributes in a sub-subsection in an ELF attributes section. -/// -/// The parser relies on the caller to know the format of the data for each attribute tag. -#[derive(Debug, Clone)] -pub struct AttributeReader<'data> { - data: Bytes<'data>, -} - -impl<'data> AttributeReader<'data> { - /// Parse a tag. - pub fn read_tag(&mut self) -> Result> { - if self.data.is_empty() { - return Ok(None); - } - let err = "Invalid ELF attribute tag"; - self.data.read_uleb128().read_error(err).map(Some) - } - - /// Parse an integer value. - pub fn read_integer(&mut self) -> Result { - let err = "Invalid ELF attribute integer value"; - self.data.read_uleb128().read_error(err) - } - - /// Parse a string value. - pub fn read_string(&mut self) -> Result<&'data [u8]> { - let err = "Invalid ELF attribute string value"; - self.data.read_string().read_error(err) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/comdat.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/comdat.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/comdat.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/comdat.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,160 +0,0 @@ -use core::fmt::Debug; -use core::{iter, slice, str}; - -use crate::elf; -use crate::endian::{Endianness, U32Bytes}; -use crate::read::{self, ComdatKind, ObjectComdat, ReadError, ReadRef, SectionIndex, SymbolIndex}; - -use super::{ElfFile, FileHeader, SectionHeader, Sym}; - -/// An iterator over the COMDAT section groups of an `ElfFile32`. -pub type ElfComdatIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfComdatIterator<'data, 'file, elf::FileHeader32, R>; -/// An iterator over the COMDAT section groups of an `ElfFile64`. -pub type ElfComdatIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfComdatIterator<'data, 'file, elf::FileHeader64, R>; - -/// An iterator over the COMDAT section groups of an `ElfFile`. -#[derive(Debug)] -pub struct ElfComdatIterator<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file ElfFile<'data, Elf, R>, - pub(super) iter: iter::Enumerate>, -} - -impl<'data, 'file, Elf, R> Iterator for ElfComdatIterator<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - type Item = ElfComdat<'data, 'file, Elf, R>; - - fn next(&mut self) -> Option { - for (_index, section) in self.iter.by_ref() { - if let Some(comdat) = ElfComdat::parse(self.file, section) { - return Some(comdat); - } - } - None - } -} - -/// A COMDAT section group of an `ElfFile32`. -pub type ElfComdat32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfComdat<'data, 'file, elf::FileHeader32, R>; -/// A COMDAT section group of an `ElfFile64`. -pub type ElfComdat64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfComdat<'data, 'file, elf::FileHeader64, R>; - -/// A COMDAT section group of an `ElfFile`. -#[derive(Debug)] -pub struct ElfComdat<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - file: &'file ElfFile<'data, Elf, R>, - section: &'data Elf::SectionHeader, - sections: &'data [U32Bytes], -} - -impl<'data, 'file, Elf, R> ElfComdat<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - fn parse( - file: &'file ElfFile<'data, Elf, R>, - section: &'data Elf::SectionHeader, - ) -> Option> { - let (flag, sections) = section.group(file.endian, file.data).ok()??; - if flag != elf::GRP_COMDAT { - return None; - } - Some(ElfComdat { - file, - section, - sections, - }) - } -} - -impl<'data, 'file, Elf, R> read::private::Sealed for ElfComdat<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Elf, R> ObjectComdat<'data> for ElfComdat<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - type SectionIterator = ElfComdatSectionIterator<'data, 'file, Elf, R>; - - #[inline] - fn kind(&self) -> ComdatKind { - ComdatKind::Any - } - - #[inline] - fn symbol(&self) -> SymbolIndex { - SymbolIndex(self.section.sh_info(self.file.endian) as usize) - } - - fn name_bytes(&self) -> read::Result<&[u8]> { - // FIXME: check sh_link - let index = self.section.sh_info(self.file.endian) as usize; - let symbol = self.file.symbols.symbol(index)?; - symbol.name(self.file.endian, self.file.symbols.strings()) - } - - fn name(&self) -> read::Result<&str> { - let name = self.name_bytes()?; - str::from_utf8(name) - .ok() - .read_error("Non UTF-8 ELF COMDAT name") - } - - fn sections(&self) -> Self::SectionIterator { - ElfComdatSectionIterator { - file: self.file, - sections: self.sections.iter(), - } - } -} - -/// An iterator over the sections in a COMDAT section group of an `ElfFile32`. -pub type ElfComdatSectionIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfComdatSectionIterator<'data, 'file, elf::FileHeader32, R>; -/// An iterator over the sections in a COMDAT section group of an `ElfFile64`. -pub type ElfComdatSectionIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfComdatSectionIterator<'data, 'file, elf::FileHeader64, R>; - -/// An iterator over the sections in a COMDAT section group of an `ElfFile`. -#[derive(Debug)] -pub struct ElfComdatSectionIterator<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - file: &'file ElfFile<'data, Elf, R>, - sections: slice::Iter<'data, U32Bytes>, -} - -impl<'data, 'file, Elf, R> Iterator for ElfComdatSectionIterator<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - type Item = SectionIndex; - - fn next(&mut self) -> Option { - let index = self.sections.next()?; - Some(SectionIndex(index.get(self.file.endian) as usize)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/compression.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/compression.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/compression.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/compression.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -use core::fmt::Debug; - -use crate::elf; -use crate::endian; -use crate::pod::Pod; - -/// A trait for generic access to `CompressionHeader32` and `CompressionHeader64`. -#[allow(missing_docs)] -pub trait CompressionHeader: Debug + Pod { - type Word: Into; - type Endian: endian::Endian; - - fn ch_type(&self, endian: Self::Endian) -> u32; - fn ch_size(&self, endian: Self::Endian) -> Self::Word; - fn ch_addralign(&self, endian: Self::Endian) -> Self::Word; -} - -impl CompressionHeader for elf::CompressionHeader32 { - type Word = u32; - type Endian = Endian; - - #[inline] - fn ch_type(&self, endian: Self::Endian) -> u32 { - self.ch_type.get(endian) - } - - #[inline] - fn ch_size(&self, endian: Self::Endian) -> Self::Word { - self.ch_size.get(endian) - } - - #[inline] - fn ch_addralign(&self, endian: Self::Endian) -> Self::Word { - self.ch_addralign.get(endian) - } -} - -impl CompressionHeader for elf::CompressionHeader64 { - type Word = u64; - type Endian = Endian; - - #[inline] - fn ch_type(&self, endian: Self::Endian) -> u32 { - self.ch_type.get(endian) - } - - #[inline] - fn ch_size(&self, endian: Self::Endian) -> Self::Word { - self.ch_size.get(endian) - } - - #[inline] - fn ch_addralign(&self, endian: Self::Endian) -> Self::Word { - self.ch_addralign.get(endian) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/dynamic.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/dynamic.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/dynamic.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/dynamic.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,117 +0,0 @@ -use core::convert::TryInto; -use core::fmt::Debug; - -use crate::elf; -use crate::endian; -use crate::pod::Pod; -use crate::read::{ReadError, Result, StringTable}; - -/// A trait for generic access to `Dyn32` and `Dyn64`. -#[allow(missing_docs)] -pub trait Dyn: Debug + Pod { - type Word: Into; - type Endian: endian::Endian; - - fn d_tag(&self, endian: Self::Endian) -> Self::Word; - fn d_val(&self, endian: Self::Endian) -> Self::Word; - - /// Try to convert the tag to a `u32`. - fn tag32(&self, endian: Self::Endian) -> Option { - self.d_tag(endian).into().try_into().ok() - } - - /// Try to convert the value to a `u32`. - fn val32(&self, endian: Self::Endian) -> Option { - self.d_val(endian).into().try_into().ok() - } - - /// Return true if the value is an offset in the dynamic string table. - fn is_string(&self, endian: Self::Endian) -> bool { - if let Some(tag) = self.tag32(endian) { - match tag { - elf::DT_NEEDED - | elf::DT_SONAME - | elf::DT_RPATH - | elf::DT_RUNPATH - | elf::DT_AUXILIARY - | elf::DT_FILTER => true, - _ => false, - } - } else { - false - } - } - - /// Use the value to get a string in a string table. - /// - /// Does not check for an appropriate tag. - fn string<'data>( - &self, - endian: Self::Endian, - strings: StringTable<'data>, - ) -> Result<&'data [u8]> { - self.val32(endian) - .and_then(|val| strings.get(val).ok()) - .read_error("Invalid ELF dyn string") - } - - /// Return true if the value is an address. - fn is_address(&self, endian: Self::Endian) -> bool { - if let Some(tag) = self.tag32(endian) { - match tag { - elf::DT_PLTGOT - | elf::DT_HASH - | elf::DT_STRTAB - | elf::DT_SYMTAB - | elf::DT_RELA - | elf::DT_INIT - | elf::DT_FINI - | elf::DT_SYMBOLIC - | elf::DT_REL - | elf::DT_DEBUG - | elf::DT_JMPREL - | elf::DT_FINI_ARRAY - | elf::DT_INIT_ARRAY - | elf::DT_PREINIT_ARRAY - | elf::DT_SYMTAB_SHNDX - | elf::DT_VERDEF - | elf::DT_VERNEED - | elf::DT_VERSYM - | elf::DT_ADDRRNGLO..=elf::DT_ADDRRNGHI => true, - _ => false, - } - } else { - false - } - } -} - -impl Dyn for elf::Dyn32 { - type Word = u32; - type Endian = Endian; - - #[inline] - fn d_tag(&self, endian: Self::Endian) -> Self::Word { - self.d_tag.get(endian) - } - - #[inline] - fn d_val(&self, endian: Self::Endian) -> Self::Word { - self.d_val.get(endian) - } -} - -impl Dyn for elf::Dyn64 { - type Word = u64; - type Endian = Endian; - - #[inline] - fn d_tag(&self, endian: Self::Endian) -> Self::Word { - self.d_tag.get(endian) - } - - #[inline] - fn d_val(&self, endian: Self::Endian) -> Self::Word { - self.d_val.get(endian) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/file.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/file.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/file.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/file.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,911 +0,0 @@ -use alloc::vec::Vec; -use core::convert::TryInto; -use core::fmt::Debug; -use core::mem; - -use crate::read::{ - self, util, Architecture, ByteString, Bytes, Error, Export, FileFlags, Import, Object, - ObjectKind, ReadError, ReadRef, SectionIndex, StringTable, SymbolIndex, -}; -use crate::{elf, endian, Endian, Endianness, Pod, U32}; - -use super::{ - CompressionHeader, Dyn, ElfComdat, ElfComdatIterator, ElfDynamicRelocationIterator, ElfSection, - ElfSectionIterator, ElfSegment, ElfSegmentIterator, ElfSymbol, ElfSymbolIterator, - ElfSymbolTable, NoteHeader, ProgramHeader, Rel, Rela, RelocationSections, SectionHeader, - SectionTable, Sym, SymbolTable, -}; - -/// A 32-bit ELF object file. -pub type ElfFile32<'data, Endian = Endianness, R = &'data [u8]> = - ElfFile<'data, elf::FileHeader32, R>; -/// A 64-bit ELF object file. -pub type ElfFile64<'data, Endian = Endianness, R = &'data [u8]> = - ElfFile<'data, elf::FileHeader64, R>; - -/// A partially parsed ELF file. -/// -/// Most of the functionality of this type is provided by the `Object` trait implementation. -#[derive(Debug)] -pub struct ElfFile<'data, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - pub(super) endian: Elf::Endian, - pub(super) data: R, - pub(super) header: &'data Elf, - pub(super) segments: &'data [Elf::ProgramHeader], - pub(super) sections: SectionTable<'data, Elf, R>, - pub(super) relocations: RelocationSections, - pub(super) symbols: SymbolTable<'data, Elf, R>, - pub(super) dynamic_symbols: SymbolTable<'data, Elf, R>, -} - -impl<'data, Elf, R> ElfFile<'data, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - /// Parse the raw ELF file data. - pub fn parse(data: R) -> read::Result { - let header = Elf::parse(data)?; - let endian = header.endian()?; - let segments = header.program_headers(endian, data)?; - let sections = header.sections(endian, data)?; - let symbols = sections.symbols(endian, data, elf::SHT_SYMTAB)?; - // TODO: get dynamic symbols from DT_SYMTAB if there are no sections - let dynamic_symbols = sections.symbols(endian, data, elf::SHT_DYNSYM)?; - // The API we provide requires a mapping from section to relocations, so build it now. - let relocations = sections.relocation_sections(endian, symbols.section())?; - - Ok(ElfFile { - endian, - data, - header, - segments, - sections, - relocations, - symbols, - dynamic_symbols, - }) - } - - /// Returns the endianness. - pub fn endian(&self) -> Elf::Endian { - self.endian - } - - /// Returns the raw data. - pub fn data(&self) -> R { - self.data - } - - /// Returns the raw ELF file header. - pub fn raw_header(&self) -> &'data Elf { - self.header - } - - /// Returns the raw ELF segments. - pub fn raw_segments(&self) -> &'data [Elf::ProgramHeader] { - self.segments - } - - fn raw_section_by_name<'file>( - &'file self, - section_name: &[u8], - ) -> Option> { - self.sections - .section_by_name(self.endian, section_name) - .map(|(index, section)| ElfSection { - file: self, - index: SectionIndex(index), - section, - }) - } - - #[cfg(feature = "compression")] - fn zdebug_section_by_name<'file>( - &'file self, - section_name: &[u8], - ) -> Option> { - if !section_name.starts_with(b".debug_") { - return None; - } - let mut name = Vec::with_capacity(section_name.len() + 1); - name.extend_from_slice(b".zdebug_"); - name.extend_from_slice(§ion_name[7..]); - self.raw_section_by_name(&name) - } - - #[cfg(not(feature = "compression"))] - fn zdebug_section_by_name<'file>( - &'file self, - _section_name: &[u8], - ) -> Option> { - None - } -} - -impl<'data, Elf, R> read::private::Sealed for ElfFile<'data, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Elf, R> Object<'data, 'file> for ElfFile<'data, Elf, R> -where - 'data: 'file, - Elf: FileHeader, - R: 'file + ReadRef<'data>, -{ - type Segment = ElfSegment<'data, 'file, Elf, R>; - type SegmentIterator = ElfSegmentIterator<'data, 'file, Elf, R>; - type Section = ElfSection<'data, 'file, Elf, R>; - type SectionIterator = ElfSectionIterator<'data, 'file, Elf, R>; - type Comdat = ElfComdat<'data, 'file, Elf, R>; - type ComdatIterator = ElfComdatIterator<'data, 'file, Elf, R>; - type Symbol = ElfSymbol<'data, 'file, Elf, R>; - type SymbolIterator = ElfSymbolIterator<'data, 'file, Elf, R>; - type SymbolTable = ElfSymbolTable<'data, 'file, Elf, R>; - type DynamicRelocationIterator = ElfDynamicRelocationIterator<'data, 'file, Elf, R>; - - fn architecture(&self) -> Architecture { - match ( - self.header.e_machine(self.endian), - self.header.is_class_64(), - ) { - (elf::EM_AARCH64, true) => Architecture::Aarch64, - (elf::EM_AARCH64, false) => Architecture::Aarch64_Ilp32, - (elf::EM_ARM, _) => Architecture::Arm, - (elf::EM_AVR, _) => Architecture::Avr, - (elf::EM_BPF, _) => Architecture::Bpf, - (elf::EM_CSKY, _) => Architecture::Csky, - (elf::EM_386, _) => Architecture::I386, - (elf::EM_X86_64, false) => Architecture::X86_64_X32, - (elf::EM_X86_64, true) => Architecture::X86_64, - (elf::EM_HEXAGON, _) => Architecture::Hexagon, - (elf::EM_LOONGARCH, true) => Architecture::LoongArch64, - (elf::EM_MIPS, false) => Architecture::Mips, - (elf::EM_MIPS, true) => Architecture::Mips64, - (elf::EM_MSP430, _) => Architecture::Msp430, - (elf::EM_PPC, _) => Architecture::PowerPc, - (elf::EM_PPC64, _) => Architecture::PowerPc64, - (elf::EM_RISCV, false) => Architecture::Riscv32, - (elf::EM_RISCV, true) => Architecture::Riscv64, - // This is either s390 or s390x, depending on the ELF class. - // We only support the 64-bit variant s390x here. - (elf::EM_S390, true) => Architecture::S390x, - (elf::EM_SBF, _) => Architecture::Sbf, - (elf::EM_SPARCV9, true) => Architecture::Sparc64, - (elf::EM_XTENSA, false) => Architecture::Xtensa, - _ => Architecture::Unknown, - } - } - - #[inline] - fn is_little_endian(&self) -> bool { - self.header.is_little_endian() - } - - #[inline] - fn is_64(&self) -> bool { - self.header.is_class_64() - } - - fn kind(&self) -> ObjectKind { - match self.header.e_type(self.endian) { - elf::ET_REL => ObjectKind::Relocatable, - elf::ET_EXEC => ObjectKind::Executable, - // TODO: check for `DF_1_PIE`? - elf::ET_DYN => ObjectKind::Dynamic, - elf::ET_CORE => ObjectKind::Core, - _ => ObjectKind::Unknown, - } - } - - fn segments(&'file self) -> ElfSegmentIterator<'data, 'file, Elf, R> { - ElfSegmentIterator { - file: self, - iter: self.segments.iter(), - } - } - - fn section_by_name_bytes( - &'file self, - section_name: &[u8], - ) -> Option> { - self.raw_section_by_name(section_name) - .or_else(|| self.zdebug_section_by_name(section_name)) - } - - fn section_by_index( - &'file self, - index: SectionIndex, - ) -> read::Result> { - let section = self.sections.section(index)?; - Ok(ElfSection { - file: self, - index, - section, - }) - } - - fn sections(&'file self) -> ElfSectionIterator<'data, 'file, Elf, R> { - ElfSectionIterator { - file: self, - iter: self.sections.iter().enumerate(), - } - } - - fn comdats(&'file self) -> ElfComdatIterator<'data, 'file, Elf, R> { - ElfComdatIterator { - file: self, - iter: self.sections.iter().enumerate(), - } - } - - fn symbol_by_index( - &'file self, - index: SymbolIndex, - ) -> read::Result> { - let symbol = self.symbols.symbol(index.0)?; - Ok(ElfSymbol { - endian: self.endian, - symbols: &self.symbols, - index, - symbol, - }) - } - - fn symbols(&'file self) -> ElfSymbolIterator<'data, 'file, Elf, R> { - ElfSymbolIterator { - endian: self.endian, - symbols: &self.symbols, - index: 0, - } - } - - fn symbol_table(&'file self) -> Option> { - if self.symbols.is_empty() { - return None; - } - Some(ElfSymbolTable { - endian: self.endian, - symbols: &self.symbols, - }) - } - - fn dynamic_symbols(&'file self) -> ElfSymbolIterator<'data, 'file, Elf, R> { - ElfSymbolIterator { - endian: self.endian, - symbols: &self.dynamic_symbols, - index: 0, - } - } - - fn dynamic_symbol_table(&'file self) -> Option> { - if self.dynamic_symbols.is_empty() { - return None; - } - Some(ElfSymbolTable { - endian: self.endian, - symbols: &self.dynamic_symbols, - }) - } - - fn dynamic_relocations( - &'file self, - ) -> Option> { - Some(ElfDynamicRelocationIterator { - section_index: SectionIndex(1), - file: self, - relocations: None, - }) - } - - /// Get the imported symbols. - fn imports(&self) -> read::Result>> { - let mut imports = Vec::new(); - for symbol in self.dynamic_symbols.iter() { - if symbol.is_undefined(self.endian) { - let name = symbol.name(self.endian, self.dynamic_symbols.strings())?; - if !name.is_empty() { - // TODO: use symbol versioning to determine library - imports.push(Import { - name: ByteString(name), - library: ByteString(&[]), - }); - } - } - } - Ok(imports) - } - - /// Get the exported symbols. - fn exports(&self) -> read::Result>> { - let mut exports = Vec::new(); - for symbol in self.dynamic_symbols.iter() { - if symbol.is_definition(self.endian) { - let name = symbol.name(self.endian, self.dynamic_symbols.strings())?; - let address = symbol.st_value(self.endian).into(); - exports.push(Export { - name: ByteString(name), - address, - }); - } - } - Ok(exports) - } - - fn has_debug_symbols(&self) -> bool { - for section in self.sections.iter() { - if let Ok(name) = self.sections.section_name(self.endian, section) { - if name == b".debug_info" || name == b".zdebug_info" { - return true; - } - } - } - false - } - - fn build_id(&self) -> read::Result> { - let endian = self.endian; - // Use section headers if present, otherwise use program headers. - if !self.sections.is_empty() { - for section in self.sections.iter() { - if let Some(mut notes) = section.notes(endian, self.data)? { - while let Some(note) = notes.next()? { - if note.name() == elf::ELF_NOTE_GNU - && note.n_type(endian) == elf::NT_GNU_BUILD_ID - { - return Ok(Some(note.desc())); - } - } - } - } - } else { - for segment in self.segments { - if let Some(mut notes) = segment.notes(endian, self.data)? { - while let Some(note) = notes.next()? { - if note.name() == elf::ELF_NOTE_GNU - && note.n_type(endian) == elf::NT_GNU_BUILD_ID - { - return Ok(Some(note.desc())); - } - } - } - } - } - Ok(None) - } - - fn gnu_debuglink(&self) -> read::Result> { - let section = match self.raw_section_by_name(b".gnu_debuglink") { - Some(section) => section, - None => return Ok(None), - }; - let data = section - .section - .data(self.endian, self.data) - .read_error("Invalid ELF .gnu_debuglink section offset or size") - .map(Bytes)?; - let filename = data - .read_string_at(0) - .read_error("Missing ELF .gnu_debuglink filename")?; - let crc_offset = util::align(filename.len() + 1, 4); - let crc = data - .read_at::>(crc_offset) - .read_error("Missing ELF .gnu_debuglink crc")? - .get(self.endian); - Ok(Some((filename, crc))) - } - - fn gnu_debugaltlink(&self) -> read::Result> { - let section = match self.raw_section_by_name(b".gnu_debugaltlink") { - Some(section) => section, - None => return Ok(None), - }; - let mut data = section - .section - .data(self.endian, self.data) - .read_error("Invalid ELF .gnu_debugaltlink section offset or size") - .map(Bytes)?; - let filename = data - .read_string() - .read_error("Missing ELF .gnu_debugaltlink filename")?; - let build_id = data.0; - Ok(Some((filename, build_id))) - } - - fn relative_address_base(&self) -> u64 { - 0 - } - - fn entry(&self) -> u64 { - self.header.e_entry(self.endian).into() - } - - fn flags(&self) -> FileFlags { - FileFlags::Elf { - os_abi: self.header.e_ident().os_abi, - abi_version: self.header.e_ident().abi_version, - e_flags: self.header.e_flags(self.endian), - } - } -} - -/// A trait for generic access to `FileHeader32` and `FileHeader64`. -#[allow(missing_docs)] -pub trait FileHeader: Debug + Pod { - // Ideally this would be a `u64: From`, but can't express that. - type Word: Into; - type Sword: Into; - type Endian: endian::Endian; - type ProgramHeader: ProgramHeader; - type SectionHeader: SectionHeader; - type CompressionHeader: CompressionHeader; - type NoteHeader: NoteHeader; - type Dyn: Dyn; - type Sym: Sym; - type Rel: Rel; - type Rela: Rela + From; - - /// Return true if this type is a 64-bit header. - /// - /// This is a property of the type, not a value in the header data. - fn is_type_64(&self) -> bool; - - /// Return true if this type is a 64-bit header. - /// - /// This is a property of the type, not a value in the header data. - /// - /// This is the same as `is_type_64`, but is non-dispatchable. - fn is_type_64_sized() -> bool - where - Self: Sized; - - fn e_ident(&self) -> &elf::Ident; - fn e_type(&self, endian: Self::Endian) -> u16; - fn e_machine(&self, endian: Self::Endian) -> u16; - fn e_version(&self, endian: Self::Endian) -> u32; - fn e_entry(&self, endian: Self::Endian) -> Self::Word; - fn e_phoff(&self, endian: Self::Endian) -> Self::Word; - fn e_shoff(&self, endian: Self::Endian) -> Self::Word; - fn e_flags(&self, endian: Self::Endian) -> u32; - fn e_ehsize(&self, endian: Self::Endian) -> u16; - fn e_phentsize(&self, endian: Self::Endian) -> u16; - fn e_phnum(&self, endian: Self::Endian) -> u16; - fn e_shentsize(&self, endian: Self::Endian) -> u16; - fn e_shnum(&self, endian: Self::Endian) -> u16; - fn e_shstrndx(&self, endian: Self::Endian) -> u16; - - // Provided methods. - - /// Read the file header. - /// - /// Also checks that the ident field in the file header is a supported format. - fn parse<'data, R: ReadRef<'data>>(data: R) -> read::Result<&'data Self> { - let header = data - .read_at::(0) - .read_error("Invalid ELF header size or alignment")?; - if !header.is_supported() { - return Err(Error("Unsupported ELF header")); - } - // TODO: Check self.e_ehsize? - Ok(header) - } - - /// Check that the ident field in the file header is a supported format. - /// - /// This checks the magic number, version, class, and endianness. - fn is_supported(&self) -> bool { - let ident = self.e_ident(); - // TODO: Check self.e_version too? Requires endian though. - ident.magic == elf::ELFMAG - && (self.is_type_64() || self.is_class_32()) - && (!self.is_type_64() || self.is_class_64()) - && (self.is_little_endian() || self.is_big_endian()) - && ident.version == elf::EV_CURRENT - } - - fn is_class_32(&self) -> bool { - self.e_ident().class == elf::ELFCLASS32 - } - - fn is_class_64(&self) -> bool { - self.e_ident().class == elf::ELFCLASS64 - } - - fn is_little_endian(&self) -> bool { - self.e_ident().data == elf::ELFDATA2LSB - } - - fn is_big_endian(&self) -> bool { - self.e_ident().data == elf::ELFDATA2MSB - } - - fn endian(&self) -> read::Result { - Self::Endian::from_big_endian(self.is_big_endian()).read_error("Unsupported ELF endian") - } - - /// Return the first section header, if present. - /// - /// Section 0 is a special case because getting the section headers normally - /// requires `shnum`, but `shnum` may be in the first section header. - fn section_0<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result> { - let shoff: u64 = self.e_shoff(endian).into(); - if shoff == 0 { - // No section headers is ok. - return Ok(None); - } - let shentsize = usize::from(self.e_shentsize(endian)); - if shentsize != mem::size_of::() { - // Section header size must match. - return Err(Error("Invalid ELF section header entry size")); - } - data.read_at(shoff) - .map(Some) - .read_error("Invalid ELF section header offset or size") - } - - /// Return the `e_phnum` field of the header. Handles extended values. - /// - /// Returns `Err` for invalid values. - fn phnum<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result { - let e_phnum = self.e_phnum(endian); - if e_phnum < elf::PN_XNUM { - Ok(e_phnum as usize) - } else if let Some(section_0) = self.section_0(endian, data)? { - Ok(section_0.sh_info(endian) as usize) - } else { - // Section 0 must exist if e_phnum overflows. - Err(Error("Missing ELF section headers for e_phnum overflow")) - } - } - - /// Return the `e_shnum` field of the header. Handles extended values. - /// - /// Returns `Err` for invalid values. - fn shnum<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result { - let e_shnum = self.e_shnum(endian); - if e_shnum > 0 { - Ok(e_shnum as usize) - } else if let Some(section_0) = self.section_0(endian, data)? { - section_0 - .sh_size(endian) - .into() - .try_into() - .ok() - .read_error("Invalid ELF extended e_shnum") - } else { - // No section headers is ok. - Ok(0) - } - } - - /// Return the `e_shstrndx` field of the header. Handles extended values. - /// - /// Returns `Err` for invalid values (including if the index is 0). - fn shstrndx<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result { - let e_shstrndx = self.e_shstrndx(endian); - let index = if e_shstrndx != elf::SHN_XINDEX { - e_shstrndx.into() - } else if let Some(section_0) = self.section_0(endian, data)? { - section_0.sh_link(endian) - } else { - // Section 0 must exist if we're trying to read e_shstrndx. - return Err(Error("Missing ELF section headers for e_shstrndx overflow")); - }; - if index == 0 { - return Err(Error("Missing ELF e_shstrndx")); - } - Ok(index) - } - - /// Return the slice of program headers. - /// - /// Returns `Ok(&[])` if there are no program headers. - /// Returns `Err` for invalid values. - fn program_headers<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result<&'data [Self::ProgramHeader]> { - let phoff: u64 = self.e_phoff(endian).into(); - if phoff == 0 { - // No program headers is ok. - return Ok(&[]); - } - let phnum = self.phnum(endian, data)?; - if phnum == 0 { - // No program headers is ok. - return Ok(&[]); - } - let phentsize = self.e_phentsize(endian) as usize; - if phentsize != mem::size_of::() { - // Program header size must match. - return Err(Error("Invalid ELF program header entry size")); - } - data.read_slice_at(phoff, phnum) - .read_error("Invalid ELF program header size or alignment") - } - - /// Return the slice of section headers. - /// - /// Returns `Ok(&[])` if there are no section headers. - /// Returns `Err` for invalid values. - fn section_headers<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result<&'data [Self::SectionHeader]> { - let shoff: u64 = self.e_shoff(endian).into(); - if shoff == 0 { - // No section headers is ok. - return Ok(&[]); - } - let shnum = self.shnum(endian, data)?; - if shnum == 0 { - // No section headers is ok. - return Ok(&[]); - } - let shentsize = usize::from(self.e_shentsize(endian)); - if shentsize != mem::size_of::() { - // Section header size must match. - return Err(Error("Invalid ELF section header entry size")); - } - data.read_slice_at(shoff, shnum) - .read_error("Invalid ELF section header offset/size/alignment") - } - - /// Return the string table for the section headers. - fn section_strings<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - sections: &[Self::SectionHeader], - ) -> read::Result> { - if sections.is_empty() { - return Ok(StringTable::default()); - } - let index = self.shstrndx(endian, data)? as usize; - let shstrtab = sections.get(index).read_error("Invalid ELF e_shstrndx")?; - let strings = if let Some((shstrtab_offset, shstrtab_size)) = shstrtab.file_range(endian) { - let shstrtab_end = shstrtab_offset - .checked_add(shstrtab_size) - .read_error("Invalid ELF shstrtab size")?; - StringTable::new(data, shstrtab_offset, shstrtab_end) - } else { - StringTable::default() - }; - Ok(strings) - } - - /// Return the section table. - fn sections<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result> { - let sections = self.section_headers(endian, data)?; - let strings = self.section_strings(endian, data, sections)?; - Ok(SectionTable::new(sections, strings)) - } - - /// Returns whether this is a mips64el elf file. - fn is_mips64el(&self, endian: Self::Endian) -> bool { - self.is_class_64() && self.is_little_endian() && self.e_machine(endian) == elf::EM_MIPS - } -} - -impl FileHeader for elf::FileHeader32 { - type Word = u32; - type Sword = i32; - type Endian = Endian; - type ProgramHeader = elf::ProgramHeader32; - type SectionHeader = elf::SectionHeader32; - type CompressionHeader = elf::CompressionHeader32; - type NoteHeader = elf::NoteHeader32; - type Dyn = elf::Dyn32; - type Sym = elf::Sym32; - type Rel = elf::Rel32; - type Rela = elf::Rela32; - - #[inline] - fn is_type_64(&self) -> bool { - false - } - - #[inline] - fn is_type_64_sized() -> bool - where - Self: Sized, - { - false - } - - #[inline] - fn e_ident(&self) -> &elf::Ident { - &self.e_ident - } - - #[inline] - fn e_type(&self, endian: Self::Endian) -> u16 { - self.e_type.get(endian) - } - - #[inline] - fn e_machine(&self, endian: Self::Endian) -> u16 { - self.e_machine.get(endian) - } - - #[inline] - fn e_version(&self, endian: Self::Endian) -> u32 { - self.e_version.get(endian) - } - - #[inline] - fn e_entry(&self, endian: Self::Endian) -> Self::Word { - self.e_entry.get(endian) - } - - #[inline] - fn e_phoff(&self, endian: Self::Endian) -> Self::Word { - self.e_phoff.get(endian) - } - - #[inline] - fn e_shoff(&self, endian: Self::Endian) -> Self::Word { - self.e_shoff.get(endian) - } - - #[inline] - fn e_flags(&self, endian: Self::Endian) -> u32 { - self.e_flags.get(endian) - } - - #[inline] - fn e_ehsize(&self, endian: Self::Endian) -> u16 { - self.e_ehsize.get(endian) - } - - #[inline] - fn e_phentsize(&self, endian: Self::Endian) -> u16 { - self.e_phentsize.get(endian) - } - - #[inline] - fn e_phnum(&self, endian: Self::Endian) -> u16 { - self.e_phnum.get(endian) - } - - #[inline] - fn e_shentsize(&self, endian: Self::Endian) -> u16 { - self.e_shentsize.get(endian) - } - - #[inline] - fn e_shnum(&self, endian: Self::Endian) -> u16 { - self.e_shnum.get(endian) - } - - #[inline] - fn e_shstrndx(&self, endian: Self::Endian) -> u16 { - self.e_shstrndx.get(endian) - } -} - -impl FileHeader for elf::FileHeader64 { - type Word = u64; - type Sword = i64; - type Endian = Endian; - type ProgramHeader = elf::ProgramHeader64; - type SectionHeader = elf::SectionHeader64; - type CompressionHeader = elf::CompressionHeader64; - type NoteHeader = elf::NoteHeader32; - type Dyn = elf::Dyn64; - type Sym = elf::Sym64; - type Rel = elf::Rel64; - type Rela = elf::Rela64; - - #[inline] - fn is_type_64(&self) -> bool { - true - } - - #[inline] - fn is_type_64_sized() -> bool - where - Self: Sized, - { - true - } - - #[inline] - fn e_ident(&self) -> &elf::Ident { - &self.e_ident - } - - #[inline] - fn e_type(&self, endian: Self::Endian) -> u16 { - self.e_type.get(endian) - } - - #[inline] - fn e_machine(&self, endian: Self::Endian) -> u16 { - self.e_machine.get(endian) - } - - #[inline] - fn e_version(&self, endian: Self::Endian) -> u32 { - self.e_version.get(endian) - } - - #[inline] - fn e_entry(&self, endian: Self::Endian) -> Self::Word { - self.e_entry.get(endian) - } - - #[inline] - fn e_phoff(&self, endian: Self::Endian) -> Self::Word { - self.e_phoff.get(endian) - } - - #[inline] - fn e_shoff(&self, endian: Self::Endian) -> Self::Word { - self.e_shoff.get(endian) - } - - #[inline] - fn e_flags(&self, endian: Self::Endian) -> u32 { - self.e_flags.get(endian) - } - - #[inline] - fn e_ehsize(&self, endian: Self::Endian) -> u16 { - self.e_ehsize.get(endian) - } - - #[inline] - fn e_phentsize(&self, endian: Self::Endian) -> u16 { - self.e_phentsize.get(endian) - } - - #[inline] - fn e_phnum(&self, endian: Self::Endian) -> u16 { - self.e_phnum.get(endian) - } - - #[inline] - fn e_shentsize(&self, endian: Self::Endian) -> u16 { - self.e_shentsize.get(endian) - } - - #[inline] - fn e_shnum(&self, endian: Self::Endian) -> u16 { - self.e_shnum.get(endian) - } - - #[inline] - fn e_shstrndx(&self, endian: Self::Endian) -> u16 { - self.e_shstrndx.get(endian) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/hash.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/hash.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/hash.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/hash.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,220 +0,0 @@ -use core::mem; - -use crate::elf; -use crate::read::{ReadError, ReadRef, Result}; -use crate::{U32, U64}; - -use super::{FileHeader, Sym, SymbolTable, Version, VersionTable}; - -/// A SysV symbol hash table in an ELF file. -#[derive(Debug)] -pub struct HashTable<'data, Elf: FileHeader> { - buckets: &'data [U32], - chains: &'data [U32], -} - -impl<'data, Elf: FileHeader> HashTable<'data, Elf> { - /// Parse a SysV hash table. - /// - /// `data` should be from a `SHT_HASH` section, or from a - /// segment pointed to via the `DT_HASH` entry. - /// - /// The header is read at offset 0 in the given `data`. - pub fn parse(endian: Elf::Endian, data: &'data [u8]) -> Result { - let mut offset = 0; - let header = data - .read::>(&mut offset) - .read_error("Invalid hash header")?; - let buckets = data - .read_slice(&mut offset, header.bucket_count.get(endian) as usize) - .read_error("Invalid hash buckets")?; - let chains = data - .read_slice(&mut offset, header.chain_count.get(endian) as usize) - .read_error("Invalid hash chains")?; - Ok(HashTable { buckets, chains }) - } - - /// Return the symbol table length. - pub fn symbol_table_length(&self) -> u32 { - self.chains.len() as u32 - } - - /// Use the hash table to find the symbol table entry with the given name, hash and version. - pub fn find>( - &self, - endian: Elf::Endian, - name: &[u8], - hash: u32, - version: Option<&Version<'_>>, - symbols: &SymbolTable<'data, Elf, R>, - versions: &VersionTable<'data, Elf>, - ) -> Option<(usize, &'data Elf::Sym)> { - // Get the chain start from the bucket for this hash. - let mut index = self.buckets[(hash as usize) % self.buckets.len()].get(endian) as usize; - // Avoid infinite loop. - let mut i = 0; - let strings = symbols.strings(); - while index != 0 && i < self.chains.len() { - if let Ok(symbol) = symbols.symbol(index) { - if symbol.name(endian, strings) == Ok(name) - && versions.matches(endian, index, version) - { - return Some((index, symbol)); - } - } - index = self.chains.get(index)?.get(endian) as usize; - i += 1; - } - None - } -} - -/// A GNU symbol hash table in an ELF file. -#[derive(Debug)] -pub struct GnuHashTable<'data, Elf: FileHeader> { - symbol_base: u32, - bloom_shift: u32, - bloom_filters: &'data [u8], - buckets: &'data [U32], - values: &'data [U32], -} - -impl<'data, Elf: FileHeader> GnuHashTable<'data, Elf> { - /// Parse a GNU hash table. - /// - /// `data` should be from a `SHT_GNU_HASH` section, or from a - /// segment pointed to via the `DT_GNU_HASH` entry. - /// - /// The header is read at offset 0 in the given `data`. - /// - /// The header does not contain a length field, and so all of `data` - /// will be used as the hash table values. It does not matter if this - /// is longer than needed, and this will often the case when accessing - /// the hash table via the `DT_GNU_HASH` entry. - pub fn parse(endian: Elf::Endian, data: &'data [u8]) -> Result { - let mut offset = 0; - let header = data - .read::>(&mut offset) - .read_error("Invalid GNU hash header")?; - let bloom_len = - u64::from(header.bloom_count.get(endian)) * mem::size_of::() as u64; - let bloom_filters = data - .read_bytes(&mut offset, bloom_len) - .read_error("Invalid GNU hash bloom filters")?; - let buckets = data - .read_slice(&mut offset, header.bucket_count.get(endian) as usize) - .read_error("Invalid GNU hash buckets")?; - let chain_count = (data.len() - offset as usize) / 4; - let values = data - .read_slice(&mut offset, chain_count) - .read_error("Invalid GNU hash values")?; - Ok(GnuHashTable { - symbol_base: header.symbol_base.get(endian), - bloom_shift: header.bloom_shift.get(endian), - bloom_filters, - buckets, - values, - }) - } - - /// Return the symbol table index of the first symbol in the hash table. - pub fn symbol_base(&self) -> u32 { - self.symbol_base - } - - /// Determine the symbol table length by finding the last entry in the hash table. - /// - /// Returns `None` if the hash table is empty or invalid. - pub fn symbol_table_length(&self, endian: Elf::Endian) -> Option { - // Ensure we find a non-empty bucket. - if self.symbol_base == 0 { - return None; - } - - // Find the highest chain index in a bucket. - let mut max_symbol = 0; - for bucket in self.buckets { - let bucket = bucket.get(endian); - if max_symbol < bucket { - max_symbol = bucket; - } - } - - // Find the end of the chain. - for value in self - .values - .get(max_symbol.checked_sub(self.symbol_base)? as usize..)? - { - max_symbol += 1; - if value.get(endian) & 1 != 0 { - return Some(max_symbol); - } - } - - None - } - - /// Use the hash table to find the symbol table entry with the given name, hash, and version. - pub fn find>( - &self, - endian: Elf::Endian, - name: &[u8], - hash: u32, - version: Option<&Version<'_>>, - symbols: &SymbolTable<'data, Elf, R>, - versions: &VersionTable<'data, Elf>, - ) -> Option<(usize, &'data Elf::Sym)> { - let word_bits = mem::size_of::() as u32 * 8; - - // Test against bloom filter. - let bloom_count = self.bloom_filters.len() / mem::size_of::(); - let offset = - ((hash / word_bits) & (bloom_count as u32 - 1)) * mem::size_of::() as u32; - let filter = if word_bits == 64 { - self.bloom_filters - .read_at::>(offset.into()) - .ok()? - .get(endian) - } else { - self.bloom_filters - .read_at::>(offset.into()) - .ok()? - .get(endian) - .into() - }; - if filter & (1 << (hash % word_bits)) == 0 { - return None; - } - if filter & (1 << ((hash >> self.bloom_shift) % word_bits)) == 0 { - return None; - } - - // Get the chain start from the bucket for this hash. - let mut index = self.buckets[(hash as usize) % self.buckets.len()].get(endian) as usize; - if index == 0 { - return None; - } - - // Test symbols in the chain. - let strings = symbols.strings(); - let symbols = symbols.symbols().get(index..)?; - let values = self - .values - .get(index.checked_sub(self.symbol_base as usize)?..)?; - for (symbol, value) in symbols.iter().zip(values.iter()) { - let value = value.get(endian); - if value | 1 == hash | 1 { - if symbol.name(endian, strings) == Ok(name) - && versions.matches(endian, index, version) - { - return Some((index, symbol)); - } - } - if value & 1 != 0 { - break; - } - index += 1; - } - None - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/mod.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/mod.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,42 +0,0 @@ -//! Support for reading ELF files. -//! -//! Defines traits to abstract over the difference between ELF32/ELF64, -//! and implements read functionality in terms of these traits. -//! -//! Also provides `ElfFile` and related types which implement the `Object` trait. - -mod file; -pub use file::*; - -mod segment; -pub use segment::*; - -mod section; -pub use section::*; - -mod symbol; -pub use symbol::*; - -mod relocation; -pub use relocation::*; - -mod comdat; -pub use comdat::*; - -mod dynamic; -pub use dynamic::*; - -mod compression; -pub use compression::*; - -mod note; -pub use note::*; - -mod hash; -pub use hash::*; - -mod version; -pub use version::*; - -mod attributes; -pub use attributes::*; diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/note.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/note.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/note.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/note.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,266 +0,0 @@ -use core::fmt::Debug; -use core::mem; - -use crate::elf; -use crate::endian::{self, U32}; -use crate::pod::Pod; -use crate::read::util; -use crate::read::{self, Bytes, Error, ReadError}; - -use super::FileHeader; - -/// An iterator over the notes in an ELF section or segment. -#[derive(Debug)] -pub struct NoteIterator<'data, Elf> -where - Elf: FileHeader, -{ - endian: Elf::Endian, - align: usize, - data: Bytes<'data>, -} - -impl<'data, Elf> NoteIterator<'data, Elf> -where - Elf: FileHeader, -{ - /// An iterator over the notes in an ELF section or segment. - /// - /// `align` should be from the `p_align` field of the segment, - /// or the `sh_addralign` field of the section. Supported values are - /// either 4 or 8, but values less than 4 are treated as 4. - /// This matches the behaviour of binutils. - /// - /// Returns `Err` if `align` is invalid. - pub fn new(endian: Elf::Endian, align: Elf::Word, data: &'data [u8]) -> read::Result { - let align = match align.into() { - 0u64..=4 => 4, - 8 => 8, - _ => return Err(Error("Invalid ELF note alignment")), - }; - // TODO: check data alignment? - Ok(NoteIterator { - endian, - align, - data: Bytes(data), - }) - } - - /// Returns the next note. - pub fn next(&mut self) -> read::Result>> { - let mut data = self.data; - if data.is_empty() { - return Ok(None); - } - - let header = data - .read_at::(0) - .read_error("ELF note is too short")?; - - // The name has no alignment requirement. - let offset = mem::size_of::(); - let namesz = header.n_namesz(self.endian) as usize; - let name = data - .read_bytes_at(offset, namesz) - .read_error("Invalid ELF note namesz")? - .0; - - // The descriptor must be aligned. - let offset = util::align(offset + namesz, self.align); - let descsz = header.n_descsz(self.endian) as usize; - let desc = data - .read_bytes_at(offset, descsz) - .read_error("Invalid ELF note descsz")? - .0; - - // The next note (if any) must be aligned. - let offset = util::align(offset + descsz, self.align); - if data.skip(offset).is_err() { - data = Bytes(&[]); - } - self.data = data; - - Ok(Some(Note { header, name, desc })) - } -} - -/// A parsed `NoteHeader`. -#[derive(Debug)] -pub struct Note<'data, Elf> -where - Elf: FileHeader, -{ - header: &'data Elf::NoteHeader, - name: &'data [u8], - desc: &'data [u8], -} - -impl<'data, Elf: FileHeader> Note<'data, Elf> { - /// Return the `n_type` field of the `NoteHeader`. - /// - /// The meaning of this field is determined by `name`. - pub fn n_type(&self, endian: Elf::Endian) -> u32 { - self.header.n_type(endian) - } - - /// Return the `n_namesz` field of the `NoteHeader`. - pub fn n_namesz(&self, endian: Elf::Endian) -> u32 { - self.header.n_namesz(endian) - } - - /// Return the `n_descsz` field of the `NoteHeader`. - pub fn n_descsz(&self, endian: Elf::Endian) -> u32 { - self.header.n_descsz(endian) - } - - /// Return the bytes for the name field following the `NoteHeader`. - /// - /// This field is usually a string including one or more trailing null bytes - /// (but it is not required to be). - /// - /// The length of this field is given by `n_namesz`. - pub fn name_bytes(&self) -> &'data [u8] { - self.name - } - - /// Return the bytes for the name field following the `NoteHeader`, - /// excluding all trailing null bytes. - pub fn name(&self) -> &'data [u8] { - let mut name = self.name; - while let [rest @ .., 0] = name { - name = rest; - } - name - } - - /// Return the bytes for the desc field following the `NoteHeader`. - /// - /// The length of this field is given by `n_descsz`. The meaning - /// of this field is determined by `name` and `n_type`. - pub fn desc(&self) -> &'data [u8] { - self.desc - } - - /// Return an iterator for properties if this note's type is `NT_GNU_PROPERTY_TYPE_0`. - pub fn gnu_properties( - &self, - endian: Elf::Endian, - ) -> Option> { - if self.name() != elf::ELF_NOTE_GNU || self.n_type(endian) != elf::NT_GNU_PROPERTY_TYPE_0 { - return None; - } - // Use the ELF class instead of the section alignment. - // This matches what other parsers do. - let align = if Elf::is_type_64_sized() { 8 } else { 4 }; - Some(GnuPropertyIterator { - endian, - align, - data: Bytes(self.desc), - }) - } -} - -/// A trait for generic access to `NoteHeader32` and `NoteHeader64`. -#[allow(missing_docs)] -pub trait NoteHeader: Debug + Pod { - type Endian: endian::Endian; - - fn n_namesz(&self, endian: Self::Endian) -> u32; - fn n_descsz(&self, endian: Self::Endian) -> u32; - fn n_type(&self, endian: Self::Endian) -> u32; -} - -impl NoteHeader for elf::NoteHeader32 { - type Endian = Endian; - - #[inline] - fn n_namesz(&self, endian: Self::Endian) -> u32 { - self.n_namesz.get(endian) - } - - #[inline] - fn n_descsz(&self, endian: Self::Endian) -> u32 { - self.n_descsz.get(endian) - } - - #[inline] - fn n_type(&self, endian: Self::Endian) -> u32 { - self.n_type.get(endian) - } -} - -impl NoteHeader for elf::NoteHeader64 { - type Endian = Endian; - - #[inline] - fn n_namesz(&self, endian: Self::Endian) -> u32 { - self.n_namesz.get(endian) - } - - #[inline] - fn n_descsz(&self, endian: Self::Endian) -> u32 { - self.n_descsz.get(endian) - } - - #[inline] - fn n_type(&self, endian: Self::Endian) -> u32 { - self.n_type.get(endian) - } -} - -/// An iterator over the properties in a `NT_GNU_PROPERTY_TYPE_0` note. -#[derive(Debug)] -pub struct GnuPropertyIterator<'data, Endian: endian::Endian> { - endian: Endian, - align: usize, - data: Bytes<'data>, -} - -impl<'data, Endian: endian::Endian> GnuPropertyIterator<'data, Endian> { - /// Returns the next property. - pub fn next(&mut self) -> read::Result>> { - let mut data = self.data; - if data.is_empty() { - return Ok(None); - } - - (|| -> Result<_, ()> { - let pr_type = data.read_at::>(0)?.get(self.endian); - let pr_datasz = data.read_at::>(4)?.get(self.endian) as usize; - let pr_data = data.read_bytes_at(8, pr_datasz)?.0; - data.skip(util::align(8 + pr_datasz, self.align))?; - self.data = data; - Ok(Some(GnuProperty { pr_type, pr_data })) - })() - .read_error("Invalid ELF GNU property") - } -} - -/// A property in a `NT_GNU_PROPERTY_TYPE_0` note. -#[derive(Debug)] -pub struct GnuProperty<'data> { - pr_type: u32, - pr_data: &'data [u8], -} - -impl<'data> GnuProperty<'data> { - /// Return the property type. - /// - /// This is one of the `GNU_PROPERTY_*` constants. - pub fn pr_type(&self) -> u32 { - self.pr_type - } - - /// Return the property data. - pub fn pr_data(&self) -> &'data [u8] { - self.pr_data - } - - /// Parse the property data as an unsigned 32-bit integer. - pub fn data_u32(&self, endian: E) -> read::Result { - Bytes(self.pr_data) - .read_at::>(0) - .read_error("Invalid ELF GNU property data") - .map(|val| val.get(endian)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/relocation.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/relocation.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/relocation.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/relocation.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,576 +0,0 @@ -use alloc::fmt; -use alloc::vec::Vec; -use core::fmt::Debug; -use core::slice; - -use crate::elf; -use crate::endian::{self, Endianness}; -use crate::pod::Pod; -use crate::read::{ - self, Error, ReadRef, Relocation, RelocationEncoding, RelocationKind, RelocationTarget, - SectionIndex, SymbolIndex, -}; - -use super::{ElfFile, FileHeader, SectionHeader, SectionTable}; - -/// A mapping from section index to associated relocation sections. -#[derive(Debug)] -pub struct RelocationSections { - relocations: Vec, -} - -impl RelocationSections { - /// Create a new mapping using the section table. - /// - /// Skips relocation sections that do not use the given symbol table section. - pub fn parse<'data, Elf: FileHeader, R: ReadRef<'data>>( - endian: Elf::Endian, - sections: &SectionTable<'data, Elf, R>, - symbol_section: SectionIndex, - ) -> read::Result { - let mut relocations = vec![0; sections.len()]; - for (index, section) in sections.iter().enumerate().rev() { - let sh_type = section.sh_type(endian); - if sh_type == elf::SHT_REL || sh_type == elf::SHT_RELA { - // The symbol indices used in relocations must be for the symbol table - // we are expecting to use. - let sh_link = SectionIndex(section.sh_link(endian) as usize); - if sh_link != symbol_section { - continue; - } - - let sh_info = section.sh_info(endian) as usize; - if sh_info == 0 { - // Skip dynamic relocations. - continue; - } - if sh_info >= relocations.len() { - return Err(Error("Invalid ELF sh_info for relocation section")); - } - - // Handle multiple relocation sections by chaining them. - let next = relocations[sh_info]; - relocations[sh_info] = index; - relocations[index] = next; - } - } - Ok(Self { relocations }) - } - - /// Given a section index, return the section index of the associated relocation section. - /// - /// This may also be called with a relocation section index, and it will return the - /// next associated relocation section. - pub fn get(&self, index: usize) -> Option { - self.relocations.get(index).cloned().filter(|x| *x != 0) - } -} - -pub(super) enum ElfRelaIterator<'data, Elf: FileHeader> { - Rel(slice::Iter<'data, Elf::Rel>), - Rela(slice::Iter<'data, Elf::Rela>), -} - -impl<'data, Elf: FileHeader> ElfRelaIterator<'data, Elf> { - fn is_rel(&self) -> bool { - match self { - ElfRelaIterator::Rel(_) => true, - ElfRelaIterator::Rela(_) => false, - } - } -} - -impl<'data, Elf: FileHeader> Iterator for ElfRelaIterator<'data, Elf> { - type Item = Elf::Rela; - - fn next(&mut self) -> Option { - match self { - ElfRelaIterator::Rel(ref mut i) => i.next().cloned().map(Self::Item::from), - ElfRelaIterator::Rela(ref mut i) => i.next().cloned(), - } - } -} - -/// An iterator over the dynamic relocations for an `ElfFile32`. -pub type ElfDynamicRelocationIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfDynamicRelocationIterator<'data, 'file, elf::FileHeader32, R>; -/// An iterator over the dynamic relocations for an `ElfFile64`. -pub type ElfDynamicRelocationIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfDynamicRelocationIterator<'data, 'file, elf::FileHeader64, R>; - -/// An iterator over the dynamic relocations for an `ElfFile`. -pub struct ElfDynamicRelocationIterator<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - /// The current relocation section index. - pub(super) section_index: SectionIndex, - pub(super) file: &'file ElfFile<'data, Elf, R>, - pub(super) relocations: Option>, -} - -impl<'data, 'file, Elf, R> Iterator for ElfDynamicRelocationIterator<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - type Item = (u64, Relocation); - - fn next(&mut self) -> Option { - let endian = self.file.endian; - loop { - if let Some(ref mut relocations) = self.relocations { - if let Some(reloc) = relocations.next() { - let relocation = - parse_relocation(self.file.header, endian, reloc, relocations.is_rel()); - return Some((reloc.r_offset(endian).into(), relocation)); - } - self.relocations = None; - } - - let section = self.file.sections.section(self.section_index).ok()?; - self.section_index.0 += 1; - - let sh_link = SectionIndex(section.sh_link(endian) as usize); - if sh_link != self.file.dynamic_symbols.section() { - continue; - } - - match section.sh_type(endian) { - elf::SHT_REL => { - if let Ok(relocations) = section.data_as_array(endian, self.file.data) { - self.relocations = Some(ElfRelaIterator::Rel(relocations.iter())); - } - } - elf::SHT_RELA => { - if let Ok(relocations) = section.data_as_array(endian, self.file.data) { - self.relocations = Some(ElfRelaIterator::Rela(relocations.iter())); - } - } - _ => {} - } - } - } -} - -impl<'data, 'file, Elf, R> fmt::Debug for ElfDynamicRelocationIterator<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ElfDynamicRelocationIterator").finish() - } -} - -/// An iterator over the relocations for an `ElfSection32`. -pub type ElfSectionRelocationIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSectionRelocationIterator<'data, 'file, elf::FileHeader32, R>; -/// An iterator over the relocations for an `ElfSection64`. -pub type ElfSectionRelocationIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSectionRelocationIterator<'data, 'file, elf::FileHeader64, R>; - -/// An iterator over the relocations for an `ElfSection`. -pub struct ElfSectionRelocationIterator<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - /// The current pointer in the chain of relocation sections. - pub(super) section_index: SectionIndex, - pub(super) file: &'file ElfFile<'data, Elf, R>, - pub(super) relocations: Option>, -} - -impl<'data, 'file, Elf, R> Iterator for ElfSectionRelocationIterator<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - type Item = (u64, Relocation); - - fn next(&mut self) -> Option { - let endian = self.file.endian; - loop { - if let Some(ref mut relocations) = self.relocations { - if let Some(reloc) = relocations.next() { - let relocation = - parse_relocation(self.file.header, endian, reloc, relocations.is_rel()); - return Some((reloc.r_offset(endian).into(), relocation)); - } - self.relocations = None; - } - self.section_index = SectionIndex(self.file.relocations.get(self.section_index.0)?); - // The construction of RelocationSections ensures section_index is valid. - let section = self.file.sections.section(self.section_index).unwrap(); - match section.sh_type(endian) { - elf::SHT_REL => { - if let Ok(relocations) = section.data_as_array(endian, self.file.data) { - self.relocations = Some(ElfRelaIterator::Rel(relocations.iter())); - } - } - elf::SHT_RELA => { - if let Ok(relocations) = section.data_as_array(endian, self.file.data) { - self.relocations = Some(ElfRelaIterator::Rela(relocations.iter())); - } - } - _ => {} - } - } - } -} - -impl<'data, 'file, Elf, R> fmt::Debug for ElfSectionRelocationIterator<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ElfSectionRelocationIterator").finish() - } -} - -fn parse_relocation( - header: &Elf, - endian: Elf::Endian, - reloc: Elf::Rela, - implicit_addend: bool, -) -> Relocation { - let mut encoding = RelocationEncoding::Generic; - let is_mips64el = header.is_mips64el(endian); - let (kind, size) = match header.e_machine(endian) { - elf::EM_AARCH64 => { - if header.is_type_64() { - match reloc.r_type(endian, false) { - elf::R_AARCH64_ABS64 => (RelocationKind::Absolute, 64), - elf::R_AARCH64_ABS32 => (RelocationKind::Absolute, 32), - elf::R_AARCH64_ABS16 => (RelocationKind::Absolute, 16), - elf::R_AARCH64_PREL64 => (RelocationKind::Relative, 64), - elf::R_AARCH64_PREL32 => (RelocationKind::Relative, 32), - elf::R_AARCH64_PREL16 => (RelocationKind::Relative, 16), - elf::R_AARCH64_CALL26 => { - encoding = RelocationEncoding::AArch64Call; - (RelocationKind::PltRelative, 26) - } - r_type => (RelocationKind::Elf(r_type), 0), - } - } else { - match reloc.r_type(endian, false) { - elf::R_AARCH64_P32_ABS32 => (RelocationKind::Absolute, 32), - r_type => (RelocationKind::Elf(r_type), 0), - } - } - } - elf::EM_ARM => match reloc.r_type(endian, false) { - elf::R_ARM_ABS32 => (RelocationKind::Absolute, 32), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_AVR => match reloc.r_type(endian, false) { - elf::R_AVR_32 => (RelocationKind::Absolute, 32), - elf::R_AVR_16 => (RelocationKind::Absolute, 16), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_BPF => match reloc.r_type(endian, false) { - elf::R_BPF_64_64 => (RelocationKind::Absolute, 64), - elf::R_BPF_64_32 => (RelocationKind::Absolute, 32), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_CSKY => match reloc.r_type(endian, false) { - elf::R_CKCORE_ADDR32 => (RelocationKind::Absolute, 32), - elf::R_CKCORE_PCREL32 => (RelocationKind::Relative, 32), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_386 => match reloc.r_type(endian, false) { - elf::R_386_32 => (RelocationKind::Absolute, 32), - elf::R_386_PC32 => (RelocationKind::Relative, 32), - elf::R_386_GOT32 => (RelocationKind::Got, 32), - elf::R_386_PLT32 => (RelocationKind::PltRelative, 32), - elf::R_386_GOTOFF => (RelocationKind::GotBaseOffset, 32), - elf::R_386_GOTPC => (RelocationKind::GotBaseRelative, 32), - elf::R_386_16 => (RelocationKind::Absolute, 16), - elf::R_386_PC16 => (RelocationKind::Relative, 16), - elf::R_386_8 => (RelocationKind::Absolute, 8), - elf::R_386_PC8 => (RelocationKind::Relative, 8), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_X86_64 => match reloc.r_type(endian, false) { - elf::R_X86_64_64 => (RelocationKind::Absolute, 64), - elf::R_X86_64_PC32 => (RelocationKind::Relative, 32), - elf::R_X86_64_GOT32 => (RelocationKind::Got, 32), - elf::R_X86_64_PLT32 => (RelocationKind::PltRelative, 32), - elf::R_X86_64_GOTPCREL => (RelocationKind::GotRelative, 32), - elf::R_X86_64_32 => (RelocationKind::Absolute, 32), - elf::R_X86_64_32S => { - encoding = RelocationEncoding::X86Signed; - (RelocationKind::Absolute, 32) - } - elf::R_X86_64_16 => (RelocationKind::Absolute, 16), - elf::R_X86_64_PC16 => (RelocationKind::Relative, 16), - elf::R_X86_64_8 => (RelocationKind::Absolute, 8), - elf::R_X86_64_PC8 => (RelocationKind::Relative, 8), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_HEXAGON => match reloc.r_type(endian, false) { - elf::R_HEX_32 => (RelocationKind::Absolute, 32), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_LOONGARCH => match reloc.r_type(endian, false) { - elf::R_LARCH_32 => (RelocationKind::Absolute, 32), - elf::R_LARCH_64 => (RelocationKind::Absolute, 64), - elf::R_LARCH_32_PCREL => (RelocationKind::Relative, 32), - elf::R_LARCH_B16 => { - encoding = RelocationEncoding::LoongArchBranch; - (RelocationKind::Relative, 16) - } - elf::R_LARCH_B21 => { - encoding = RelocationEncoding::LoongArchBranch; - (RelocationKind::Relative, 21) - } - elf::R_LARCH_B26 => { - encoding = RelocationEncoding::LoongArchBranch; - (RelocationKind::Relative, 26) - } - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_MIPS => match reloc.r_type(endian, is_mips64el) { - elf::R_MIPS_16 => (RelocationKind::Absolute, 16), - elf::R_MIPS_32 => (RelocationKind::Absolute, 32), - elf::R_MIPS_64 => (RelocationKind::Absolute, 64), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_MSP430 => match reloc.r_type(endian, false) { - elf::R_MSP430_32 => (RelocationKind::Absolute, 32), - elf::R_MSP430_16_BYTE => (RelocationKind::Absolute, 16), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_PPC => match reloc.r_type(endian, false) { - elf::R_PPC_ADDR32 => (RelocationKind::Absolute, 32), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_PPC64 => match reloc.r_type(endian, false) { - elf::R_PPC64_ADDR32 => (RelocationKind::Absolute, 32), - elf::R_PPC64_ADDR64 => (RelocationKind::Absolute, 64), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_RISCV => match reloc.r_type(endian, false) { - elf::R_RISCV_32 => (RelocationKind::Absolute, 32), - elf::R_RISCV_64 => (RelocationKind::Absolute, 64), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_S390 => match reloc.r_type(endian, false) { - elf::R_390_8 => (RelocationKind::Absolute, 8), - elf::R_390_16 => (RelocationKind::Absolute, 16), - elf::R_390_32 => (RelocationKind::Absolute, 32), - elf::R_390_64 => (RelocationKind::Absolute, 64), - elf::R_390_PC16 => (RelocationKind::Relative, 16), - elf::R_390_PC32 => (RelocationKind::Relative, 32), - elf::R_390_PC64 => (RelocationKind::Relative, 64), - elf::R_390_PC16DBL => { - encoding = RelocationEncoding::S390xDbl; - (RelocationKind::Relative, 16) - } - elf::R_390_PC32DBL => { - encoding = RelocationEncoding::S390xDbl; - (RelocationKind::Relative, 32) - } - elf::R_390_PLT16DBL => { - encoding = RelocationEncoding::S390xDbl; - (RelocationKind::PltRelative, 16) - } - elf::R_390_PLT32DBL => { - encoding = RelocationEncoding::S390xDbl; - (RelocationKind::PltRelative, 32) - } - elf::R_390_GOT16 => (RelocationKind::Got, 16), - elf::R_390_GOT32 => (RelocationKind::Got, 32), - elf::R_390_GOT64 => (RelocationKind::Got, 64), - elf::R_390_GOTENT => { - encoding = RelocationEncoding::S390xDbl; - (RelocationKind::GotRelative, 32) - } - elf::R_390_GOTOFF16 => (RelocationKind::GotBaseOffset, 16), - elf::R_390_GOTOFF32 => (RelocationKind::GotBaseOffset, 32), - elf::R_390_GOTOFF64 => (RelocationKind::GotBaseOffset, 64), - elf::R_390_GOTPC => (RelocationKind::GotBaseRelative, 64), - elf::R_390_GOTPCDBL => { - encoding = RelocationEncoding::S390xDbl; - (RelocationKind::GotBaseRelative, 32) - } - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_SBF => match reloc.r_type(endian, false) { - elf::R_SBF_64_64 => (RelocationKind::Absolute, 64), - elf::R_SBF_64_32 => (RelocationKind::Absolute, 32), - r_type => (RelocationKind::Elf(r_type), 0), - }, - elf::EM_SPARC | elf::EM_SPARC32PLUS | elf::EM_SPARCV9 => { - match reloc.r_type(endian, false) { - elf::R_SPARC_32 | elf::R_SPARC_UA32 => (RelocationKind::Absolute, 32), - elf::R_SPARC_64 | elf::R_SPARC_UA64 => (RelocationKind::Absolute, 64), - r_type => (RelocationKind::Elf(r_type), 0), - } - } - elf::EM_XTENSA => match reloc.r_type(endian, false) { - elf::R_XTENSA_32 => (RelocationKind::Absolute, 32), - elf::R_XTENSA_32_PCREL => (RelocationKind::Relative, 32), - r_type => (RelocationKind::Elf(r_type), 0), - }, - _ => (RelocationKind::Elf(reloc.r_type(endian, false)), 0), - }; - let sym = reloc.r_sym(endian, is_mips64el) as usize; - let target = if sym == 0 { - RelocationTarget::Absolute - } else { - RelocationTarget::Symbol(SymbolIndex(sym)) - }; - Relocation { - kind, - encoding, - size, - target, - addend: reloc.r_addend(endian).into(), - implicit_addend, - } -} - -/// A trait for generic access to `Rel32` and `Rel64`. -#[allow(missing_docs)] -pub trait Rel: Debug + Pod + Clone { - type Word: Into; - type Sword: Into; - type Endian: endian::Endian; - - fn r_offset(&self, endian: Self::Endian) -> Self::Word; - fn r_info(&self, endian: Self::Endian) -> Self::Word; - fn r_sym(&self, endian: Self::Endian) -> u32; - fn r_type(&self, endian: Self::Endian) -> u32; -} - -impl Rel for elf::Rel32 { - type Word = u32; - type Sword = i32; - type Endian = Endian; - - #[inline] - fn r_offset(&self, endian: Self::Endian) -> Self::Word { - self.r_offset.get(endian) - } - - #[inline] - fn r_info(&self, endian: Self::Endian) -> Self::Word { - self.r_info.get(endian) - } - - #[inline] - fn r_sym(&self, endian: Self::Endian) -> u32 { - self.r_sym(endian) - } - - #[inline] - fn r_type(&self, endian: Self::Endian) -> u32 { - self.r_type(endian) - } -} - -impl Rel for elf::Rel64 { - type Word = u64; - type Sword = i64; - type Endian = Endian; - - #[inline] - fn r_offset(&self, endian: Self::Endian) -> Self::Word { - self.r_offset.get(endian) - } - - #[inline] - fn r_info(&self, endian: Self::Endian) -> Self::Word { - self.r_info.get(endian) - } - - #[inline] - fn r_sym(&self, endian: Self::Endian) -> u32 { - self.r_sym(endian) - } - - #[inline] - fn r_type(&self, endian: Self::Endian) -> u32 { - self.r_type(endian) - } -} - -/// A trait for generic access to `Rela32` and `Rela64`. -#[allow(missing_docs)] -pub trait Rela: Debug + Pod + Clone { - type Word: Into; - type Sword: Into; - type Endian: endian::Endian; - - fn r_offset(&self, endian: Self::Endian) -> Self::Word; - fn r_info(&self, endian: Self::Endian, is_mips64el: bool) -> Self::Word; - fn r_addend(&self, endian: Self::Endian) -> Self::Sword; - fn r_sym(&self, endian: Self::Endian, is_mips64el: bool) -> u32; - fn r_type(&self, endian: Self::Endian, is_mips64el: bool) -> u32; -} - -impl Rela for elf::Rela32 { - type Word = u32; - type Sword = i32; - type Endian = Endian; - - #[inline] - fn r_offset(&self, endian: Self::Endian) -> Self::Word { - self.r_offset.get(endian) - } - - #[inline] - fn r_info(&self, endian: Self::Endian, _is_mips64el: bool) -> Self::Word { - self.r_info.get(endian) - } - - #[inline] - fn r_addend(&self, endian: Self::Endian) -> Self::Sword { - self.r_addend.get(endian) - } - - #[inline] - fn r_sym(&self, endian: Self::Endian, _is_mips64el: bool) -> u32 { - self.r_sym(endian) - } - - #[inline] - fn r_type(&self, endian: Self::Endian, _is_mips64el: bool) -> u32 { - self.r_type(endian) - } -} - -impl Rela for elf::Rela64 { - type Word = u64; - type Sword = i64; - type Endian = Endian; - - #[inline] - fn r_offset(&self, endian: Self::Endian) -> Self::Word { - self.r_offset.get(endian) - } - - #[inline] - fn r_info(&self, endian: Self::Endian, is_mips64el: bool) -> Self::Word { - self.get_r_info(endian, is_mips64el) - } - - #[inline] - fn r_addend(&self, endian: Self::Endian) -> Self::Sword { - self.r_addend.get(endian) - } - - #[inline] - fn r_sym(&self, endian: Self::Endian, is_mips64el: bool) -> u32 { - self.r_sym(endian, is_mips64el) - } - - #[inline] - fn r_type(&self, endian: Self::Endian, is_mips64el: bool) -> u32 { - self.r_type(endian, is_mips64el) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/section.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/section.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/section.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/section.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1146 +0,0 @@ -use core::fmt::Debug; -use core::{iter, mem, slice, str}; - -use crate::elf; -use crate::endian::{self, Endianness, U32Bytes}; -use crate::pod::Pod; -use crate::read::{ - self, Bytes, CompressedData, CompressedFileRange, CompressionFormat, Error, ObjectSection, - ReadError, ReadRef, SectionFlags, SectionIndex, SectionKind, StringTable, -}; - -use super::{ - AttributesSection, CompressionHeader, ElfFile, ElfSectionRelocationIterator, FileHeader, - GnuHashTable, HashTable, NoteIterator, RelocationSections, SymbolTable, VerdefIterator, - VerneedIterator, VersionTable, -}; - -/// The table of section headers in an ELF file. -/// -/// Also includes the string table used for the section names. -#[derive(Debug, Default, Clone, Copy)] -pub struct SectionTable<'data, Elf: FileHeader, R = &'data [u8]> -where - R: ReadRef<'data>, -{ - sections: &'data [Elf::SectionHeader], - strings: StringTable<'data, R>, -} - -impl<'data, Elf: FileHeader, R: ReadRef<'data>> SectionTable<'data, Elf, R> { - /// Create a new section table. - #[inline] - pub fn new(sections: &'data [Elf::SectionHeader], strings: StringTable<'data, R>) -> Self { - SectionTable { sections, strings } - } - - /// Iterate over the section headers. - #[inline] - pub fn iter(&self) -> slice::Iter<'data, Elf::SectionHeader> { - self.sections.iter() - } - - /// Return true if the section table is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.sections.is_empty() - } - - /// The number of section headers. - #[inline] - pub fn len(&self) -> usize { - self.sections.len() - } - - /// Return the section header at the given index. - pub fn section(&self, index: SectionIndex) -> read::Result<&'data Elf::SectionHeader> { - self.sections - .get(index.0) - .read_error("Invalid ELF section index") - } - - /// Return the section header with the given name. - /// - /// Ignores sections with invalid names. - pub fn section_by_name( - &self, - endian: Elf::Endian, - name: &[u8], - ) -> Option<(usize, &'data Elf::SectionHeader)> { - self.sections - .iter() - .enumerate() - .find(|(_, section)| self.section_name(endian, section) == Ok(name)) - } - - /// Return the section name for the given section header. - pub fn section_name( - &self, - endian: Elf::Endian, - section: &'data Elf::SectionHeader, - ) -> read::Result<&'data [u8]> { - section.name(endian, self.strings) - } - - /// Return the string table at the given section index. - /// - /// Returns an error if the section is not a string table. - #[inline] - pub fn strings( - &self, - endian: Elf::Endian, - data: R, - index: SectionIndex, - ) -> read::Result> { - self.section(index)? - .strings(endian, data)? - .read_error("Invalid ELF string section type") - } - - /// Return the symbol table of the given section type. - /// - /// Returns an empty symbol table if the symbol table does not exist. - #[inline] - pub fn symbols( - &self, - endian: Elf::Endian, - data: R, - sh_type: u32, - ) -> read::Result> { - debug_assert!(sh_type == elf::SHT_DYNSYM || sh_type == elf::SHT_SYMTAB); - - let (index, section) = match self - .iter() - .enumerate() - .find(|s| s.1.sh_type(endian) == sh_type) - { - Some(s) => s, - None => return Ok(SymbolTable::default()), - }; - - SymbolTable::parse(endian, data, self, SectionIndex(index), section) - } - - /// Return the symbol table at the given section index. - /// - /// Returns an error if the section is not a symbol table. - #[inline] - pub fn symbol_table_by_index( - &self, - endian: Elf::Endian, - data: R, - index: SectionIndex, - ) -> read::Result> { - let section = self.section(index)?; - match section.sh_type(endian) { - elf::SHT_DYNSYM | elf::SHT_SYMTAB => {} - _ => return Err(Error("Invalid ELF symbol table section type")), - } - SymbolTable::parse(endian, data, self, index, section) - } - - /// Create a mapping from section index to associated relocation sections. - #[inline] - pub fn relocation_sections( - &self, - endian: Elf::Endian, - symbol_section: SectionIndex, - ) -> read::Result { - RelocationSections::parse(endian, self, symbol_section) - } - - /// Return the contents of a dynamic section. - /// - /// Also returns the linked string table index. - /// - /// Returns `Ok(None)` if there is no `SHT_DYNAMIC` section. - /// Returns `Err` for invalid values. - pub fn dynamic( - &self, - endian: Elf::Endian, - data: R, - ) -> read::Result> { - for section in self.sections { - if let Some(dynamic) = section.dynamic(endian, data)? { - return Ok(Some(dynamic)); - } - } - Ok(None) - } - - /// Return the header of a SysV hash section. - /// - /// Returns `Ok(None)` if there is no SysV GNU hash section. - /// Returns `Err` for invalid values. - pub fn hash_header( - &self, - endian: Elf::Endian, - data: R, - ) -> read::Result>> { - for section in self.sections { - if let Some(hash) = section.hash_header(endian, data)? { - return Ok(Some(hash)); - } - } - Ok(None) - } - - /// Return the contents of a SysV hash section. - /// - /// Also returns the linked symbol table index. - /// - /// Returns `Ok(None)` if there is no SysV hash section. - /// Returns `Err` for invalid values. - pub fn hash( - &self, - endian: Elf::Endian, - data: R, - ) -> read::Result, SectionIndex)>> { - for section in self.sections { - if let Some(hash) = section.hash(endian, data)? { - return Ok(Some(hash)); - } - } - Ok(None) - } - - /// Return the header of a GNU hash section. - /// - /// Returns `Ok(None)` if there is no GNU hash section. - /// Returns `Err` for invalid values. - pub fn gnu_hash_header( - &self, - endian: Elf::Endian, - data: R, - ) -> read::Result>> { - for section in self.sections { - if let Some(hash) = section.gnu_hash_header(endian, data)? { - return Ok(Some(hash)); - } - } - Ok(None) - } - - /// Return the contents of a GNU hash section. - /// - /// Also returns the linked symbol table index. - /// - /// Returns `Ok(None)` if there is no GNU hash section. - /// Returns `Err` for invalid values. - pub fn gnu_hash( - &self, - endian: Elf::Endian, - data: R, - ) -> read::Result, SectionIndex)>> { - for section in self.sections { - if let Some(hash) = section.gnu_hash(endian, data)? { - return Ok(Some(hash)); - } - } - Ok(None) - } - - /// Return the contents of a `SHT_GNU_VERSYM` section. - /// - /// Also returns the linked symbol table index. - /// - /// Returns `Ok(None)` if there is no `SHT_GNU_VERSYM` section. - /// Returns `Err` for invalid values. - pub fn gnu_versym( - &self, - endian: Elf::Endian, - data: R, - ) -> read::Result], SectionIndex)>> { - for section in self.sections { - if let Some(syms) = section.gnu_versym(endian, data)? { - return Ok(Some(syms)); - } - } - Ok(None) - } - - /// Return the contents of a `SHT_GNU_VERDEF` section. - /// - /// Also returns the linked string table index. - /// - /// Returns `Ok(None)` if there is no `SHT_GNU_VERDEF` section. - /// Returns `Err` for invalid values. - pub fn gnu_verdef( - &self, - endian: Elf::Endian, - data: R, - ) -> read::Result, SectionIndex)>> { - for section in self.sections { - if let Some(defs) = section.gnu_verdef(endian, data)? { - return Ok(Some(defs)); - } - } - Ok(None) - } - - /// Return the contents of a `SHT_GNU_VERNEED` section. - /// - /// Also returns the linked string table index. - /// - /// Returns `Ok(None)` if there is no `SHT_GNU_VERNEED` section. - /// Returns `Err` for invalid values. - pub fn gnu_verneed( - &self, - endian: Elf::Endian, - data: R, - ) -> read::Result, SectionIndex)>> { - for section in self.sections { - if let Some(needs) = section.gnu_verneed(endian, data)? { - return Ok(Some(needs)); - } - } - Ok(None) - } - - /// Returns the symbol version table. - /// - /// Returns `Ok(None)` if there is no `SHT_GNU_VERSYM` section. - /// Returns `Err` for invalid values. - pub fn versions( - &self, - endian: Elf::Endian, - data: R, - ) -> read::Result>> { - let (versyms, link) = match self.gnu_versym(endian, data)? { - Some(val) => val, - None => return Ok(None), - }; - let strings = self.symbol_table_by_index(endian, data, link)?.strings(); - // TODO: check links? - let verdefs = self.gnu_verdef(endian, data)?.map(|x| x.0); - let verneeds = self.gnu_verneed(endian, data)?.map(|x| x.0); - VersionTable::parse(endian, versyms, verdefs, verneeds, strings).map(Some) - } -} - -/// An iterator over the sections of an `ElfFile32`. -pub type ElfSectionIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSectionIterator<'data, 'file, elf::FileHeader32, R>; -/// An iterator over the sections of an `ElfFile64`. -pub type ElfSectionIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSectionIterator<'data, 'file, elf::FileHeader64, R>; - -/// An iterator over the sections of an `ElfFile`. -#[derive(Debug)] -pub struct ElfSectionIterator<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file ElfFile<'data, Elf, R>, - pub(super) iter: iter::Enumerate>, -} - -impl<'data, 'file, Elf, R> Iterator for ElfSectionIterator<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - type Item = ElfSection<'data, 'file, Elf, R>; - - fn next(&mut self) -> Option { - self.iter.next().map(|(index, section)| ElfSection { - index: SectionIndex(index), - file: self.file, - section, - }) - } -} - -/// A section of an `ElfFile32`. -pub type ElfSection32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSection<'data, 'file, elf::FileHeader32, R>; -/// A section of an `ElfFile64`. -pub type ElfSection64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSection<'data, 'file, elf::FileHeader64, R>; - -/// A section of an `ElfFile`. -#[derive(Debug)] -pub struct ElfSection<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file ElfFile<'data, Elf, R>, - pub(super) index: SectionIndex, - pub(super) section: &'data Elf::SectionHeader, -} - -impl<'data, 'file, Elf: FileHeader, R: ReadRef<'data>> ElfSection<'data, 'file, Elf, R> { - fn bytes(&self) -> read::Result<&'data [u8]> { - self.section - .data(self.file.endian, self.file.data) - .read_error("Invalid ELF section size or offset") - } - - fn maybe_compressed(&self) -> read::Result> { - let endian = self.file.endian; - if let Some((header, offset, compressed_size)) = - self.section.compression(endian, self.file.data)? - { - let format = match header.ch_type(endian) { - elf::ELFCOMPRESS_ZLIB => CompressionFormat::Zlib, - elf::ELFCOMPRESS_ZSTD => CompressionFormat::Zstandard, - _ => return Err(Error("Unsupported ELF compression type")), - }; - let uncompressed_size = header.ch_size(endian).into(); - Ok(Some(CompressedFileRange { - format, - offset, - compressed_size, - uncompressed_size, - })) - } else { - Ok(None) - } - } - - /// Try GNU-style "ZLIB" header decompression. - fn maybe_compressed_gnu(&self) -> read::Result> { - let name = match self.name() { - Ok(name) => name, - // I think it's ok to ignore this error? - Err(_) => return Ok(None), - }; - if !name.starts_with(".zdebug_") { - return Ok(None); - } - let (section_offset, section_size) = self - .section - .file_range(self.file.endian) - .read_error("Invalid ELF GNU compressed section type")?; - let mut offset = section_offset; - let data = self.file.data; - // Assume ZLIB-style uncompressed data is no more than 4GB to avoid accidentally - // huge allocations. This also reduces the chance of accidentally matching on a - // .debug_str that happens to start with "ZLIB". - if data - .read_bytes(&mut offset, 8) - .read_error("ELF GNU compressed section is too short")? - != b"ZLIB\0\0\0\0" - { - return Err(Error("Invalid ELF GNU compressed section header")); - } - let uncompressed_size = data - .read::>(&mut offset) - .read_error("ELF GNU compressed section is too short")? - .get(endian::BigEndian) - .into(); - let compressed_size = section_size - .checked_sub(offset - section_offset) - .read_error("ELF GNU compressed section is too short")?; - Ok(Some(CompressedFileRange { - format: CompressionFormat::Zlib, - offset, - compressed_size, - uncompressed_size, - })) - } -} - -impl<'data, 'file, Elf, R> read::private::Sealed for ElfSection<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Elf, R> ObjectSection<'data> for ElfSection<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - type RelocationIterator = ElfSectionRelocationIterator<'data, 'file, Elf, R>; - - #[inline] - fn index(&self) -> SectionIndex { - self.index - } - - #[inline] - fn address(&self) -> u64 { - self.section.sh_addr(self.file.endian).into() - } - - #[inline] - fn size(&self) -> u64 { - self.section.sh_size(self.file.endian).into() - } - - #[inline] - fn align(&self) -> u64 { - self.section.sh_addralign(self.file.endian).into() - } - - #[inline] - fn file_range(&self) -> Option<(u64, u64)> { - self.section.file_range(self.file.endian) - } - - #[inline] - fn data(&self) -> read::Result<&'data [u8]> { - self.bytes() - } - - fn data_range(&self, address: u64, size: u64) -> read::Result> { - Ok(read::util::data_range( - self.bytes()?, - self.address(), - address, - size, - )) - } - - fn compressed_file_range(&self) -> read::Result { - Ok(if let Some(data) = self.maybe_compressed()? { - data - } else if let Some(data) = self.maybe_compressed_gnu()? { - data - } else { - CompressedFileRange::none(self.file_range()) - }) - } - - fn compressed_data(&self) -> read::Result> { - self.compressed_file_range()?.data(self.file.data) - } - - fn name_bytes(&self) -> read::Result<&[u8]> { - self.file - .sections - .section_name(self.file.endian, self.section) - } - - fn name(&self) -> read::Result<&str> { - let name = self.name_bytes()?; - str::from_utf8(name) - .ok() - .read_error("Non UTF-8 ELF section name") - } - - #[inline] - fn segment_name_bytes(&self) -> read::Result> { - Ok(None) - } - - #[inline] - fn segment_name(&self) -> read::Result> { - Ok(None) - } - - fn kind(&self) -> SectionKind { - let flags = self.section.sh_flags(self.file.endian).into(); - let sh_type = self.section.sh_type(self.file.endian); - match sh_type { - elf::SHT_PROGBITS => { - if flags & u64::from(elf::SHF_ALLOC) != 0 { - if flags & u64::from(elf::SHF_EXECINSTR) != 0 { - SectionKind::Text - } else if flags & u64::from(elf::SHF_TLS) != 0 { - SectionKind::Tls - } else if flags & u64::from(elf::SHF_WRITE) != 0 { - SectionKind::Data - } else if flags & u64::from(elf::SHF_STRINGS) != 0 { - SectionKind::ReadOnlyString - } else { - SectionKind::ReadOnlyData - } - } else if flags & u64::from(elf::SHF_STRINGS) != 0 { - SectionKind::OtherString - } else { - SectionKind::Other - } - } - elf::SHT_NOBITS => { - if flags & u64::from(elf::SHF_TLS) != 0 { - SectionKind::UninitializedTls - } else { - SectionKind::UninitializedData - } - } - elf::SHT_NOTE => SectionKind::Note, - elf::SHT_NULL - | elf::SHT_SYMTAB - | elf::SHT_STRTAB - | elf::SHT_RELA - | elf::SHT_HASH - | elf::SHT_DYNAMIC - | elf::SHT_REL - | elf::SHT_DYNSYM - | elf::SHT_GROUP => SectionKind::Metadata, - _ => SectionKind::Elf(sh_type), - } - } - - fn relocations(&self) -> ElfSectionRelocationIterator<'data, 'file, Elf, R> { - ElfSectionRelocationIterator { - section_index: self.index, - file: self.file, - relocations: None, - } - } - - fn flags(&self) -> SectionFlags { - SectionFlags::Elf { - sh_flags: self.section.sh_flags(self.file.endian).into(), - } - } -} - -/// A trait for generic access to `SectionHeader32` and `SectionHeader64`. -#[allow(missing_docs)] -pub trait SectionHeader: Debug + Pod { - type Elf: FileHeader; - type Word: Into; - type Endian: endian::Endian; - - fn sh_name(&self, endian: Self::Endian) -> u32; - fn sh_type(&self, endian: Self::Endian) -> u32; - fn sh_flags(&self, endian: Self::Endian) -> Self::Word; - fn sh_addr(&self, endian: Self::Endian) -> Self::Word; - fn sh_offset(&self, endian: Self::Endian) -> Self::Word; - fn sh_size(&self, endian: Self::Endian) -> Self::Word; - fn sh_link(&self, endian: Self::Endian) -> u32; - fn sh_info(&self, endian: Self::Endian) -> u32; - fn sh_addralign(&self, endian: Self::Endian) -> Self::Word; - fn sh_entsize(&self, endian: Self::Endian) -> Self::Word; - - /// Parse the section name from the string table. - fn name<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - strings: StringTable<'data, R>, - ) -> read::Result<&'data [u8]> { - strings - .get(self.sh_name(endian)) - .read_error("Invalid ELF section name offset") - } - - /// Return the offset and size of the section in the file. - /// - /// Returns `None` for sections that have no data in the file. - fn file_range(&self, endian: Self::Endian) -> Option<(u64, u64)> { - if self.sh_type(endian) == elf::SHT_NOBITS { - None - } else { - Some((self.sh_offset(endian).into(), self.sh_size(endian).into())) - } - } - - /// Return the section data. - /// - /// Returns `Ok(&[])` if the section has no data. - /// Returns `Err` for invalid values. - fn data<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result<&'data [u8]> { - if let Some((offset, size)) = self.file_range(endian) { - data.read_bytes_at(offset, size) - .read_error("Invalid ELF section size or offset") - } else { - Ok(&[]) - } - } - - /// Return the section data as a slice of the given type. - /// - /// Allows padding at the end of the data. - /// Returns `Ok(&[])` if the section has no data. - /// Returns `Err` for invalid values, including bad alignment. - fn data_as_array<'data, T: Pod, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result<&'data [T]> { - let mut data = self.data(endian, data).map(Bytes)?; - data.read_slice(data.len() / mem::size_of::()) - .read_error("Invalid ELF section size or offset") - } - - /// Return the strings in the section. - /// - /// Returns `Ok(None)` if the section does not contain strings. - /// Returns `Err` for invalid values. - fn strings<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result>> { - if self.sh_type(endian) != elf::SHT_STRTAB { - return Ok(None); - } - let str_offset = self.sh_offset(endian).into(); - let str_size = self.sh_size(endian).into(); - let str_end = str_offset - .checked_add(str_size) - .read_error("Invalid ELF string section offset or size")?; - Ok(Some(StringTable::new(data, str_offset, str_end))) - } - - /// Return the symbols in the section. - /// - /// Also finds the linked string table in `sections`. - /// - /// `section_index` must be the 0-based index of this section, and is used - /// to find the corresponding extended section index table in `sections`. - /// - /// Returns `Ok(None)` if the section does not contain symbols. - /// Returns `Err` for invalid values. - fn symbols<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - sections: &SectionTable<'data, Self::Elf, R>, - section_index: SectionIndex, - ) -> read::Result>> { - let sh_type = self.sh_type(endian); - if sh_type != elf::SHT_SYMTAB && sh_type != elf::SHT_DYNSYM { - return Ok(None); - } - SymbolTable::parse(endian, data, sections, section_index, self).map(Some) - } - - /// Return the `Elf::Rel` entries in the section. - /// - /// Also returns the linked symbol table index. - /// - /// Returns `Ok(None)` if the section does not contain relocations. - /// Returns `Err` for invalid values. - fn rel<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result::Rel], SectionIndex)>> { - if self.sh_type(endian) != elf::SHT_REL { - return Ok(None); - } - let rel = self - .data_as_array(endian, data) - .read_error("Invalid ELF relocation section offset or size")?; - let link = SectionIndex(self.sh_link(endian) as usize); - Ok(Some((rel, link))) - } - - /// Return the `Elf::Rela` entries in the section. - /// - /// Also returns the linked symbol table index. - /// - /// Returns `Ok(None)` if the section does not contain relocations. - /// Returns `Err` for invalid values. - fn rela<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result::Rela], SectionIndex)>> { - if self.sh_type(endian) != elf::SHT_RELA { - return Ok(None); - } - let rela = self - .data_as_array(endian, data) - .read_error("Invalid ELF relocation section offset or size")?; - let link = SectionIndex(self.sh_link(endian) as usize); - Ok(Some((rela, link))) - } - - /// Return entries in a dynamic section. - /// - /// Also returns the linked string table index. - /// - /// Returns `Ok(None)` if the section type is not `SHT_DYNAMIC`. - /// Returns `Err` for invalid values. - fn dynamic<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result::Dyn], SectionIndex)>> { - if self.sh_type(endian) != elf::SHT_DYNAMIC { - return Ok(None); - } - let dynamic = self - .data_as_array(endian, data) - .read_error("Invalid ELF dynamic section offset or size")?; - let link = SectionIndex(self.sh_link(endian) as usize); - Ok(Some((dynamic, link))) - } - - /// Return a note iterator for the section data. - /// - /// Returns `Ok(None)` if the section does not contain notes. - /// Returns `Err` for invalid values. - fn notes<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result>> { - if self.sh_type(endian) != elf::SHT_NOTE { - return Ok(None); - } - let data = self - .data(endian, data) - .read_error("Invalid ELF note section offset or size")?; - let notes = NoteIterator::new(endian, self.sh_addralign(endian), data)?; - Ok(Some(notes)) - } - - /// Return the contents of a group section. - /// - /// The first value is a `GRP_*` value, and the remaining values - /// are section indices. - /// - /// Returns `Ok(None)` if the section does not define a group. - /// Returns `Err` for invalid values. - fn group<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result])>> { - if self.sh_type(endian) != elf::SHT_GROUP { - return Ok(None); - } - let mut data = self - .data(endian, data) - .read_error("Invalid ELF group section offset or size") - .map(Bytes)?; - let flag = data - .read::>() - .read_error("Invalid ELF group section offset or size")? - .get(endian); - let count = data.len() / mem::size_of::>(); - let sections = data - .read_slice(count) - .read_error("Invalid ELF group section offset or size")?; - Ok(Some((flag, sections))) - } - - /// Return the header of a SysV hash section. - /// - /// Returns `Ok(None)` if the section does not contain a SysV hash. - /// Returns `Err` for invalid values. - fn hash_header<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result>> { - if self.sh_type(endian) != elf::SHT_HASH { - return Ok(None); - } - let data = self - .data(endian, data) - .read_error("Invalid ELF hash section offset or size")?; - let header = data - .read_at::>(0) - .read_error("Invalid hash header")?; - Ok(Some(header)) - } - - /// Return the contents of a SysV hash section. - /// - /// Also returns the linked symbol table index. - /// - /// Returns `Ok(None)` if the section does not contain a SysV hash. - /// Returns `Err` for invalid values. - fn hash<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result, SectionIndex)>> { - if self.sh_type(endian) != elf::SHT_HASH { - return Ok(None); - } - let data = self - .data(endian, data) - .read_error("Invalid ELF hash section offset or size")?; - let hash = HashTable::parse(endian, data)?; - let link = SectionIndex(self.sh_link(endian) as usize); - Ok(Some((hash, link))) - } - - /// Return the header of a GNU hash section. - /// - /// Returns `Ok(None)` if the section does not contain a GNU hash. - /// Returns `Err` for invalid values. - fn gnu_hash_header<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result>> { - if self.sh_type(endian) != elf::SHT_GNU_HASH { - return Ok(None); - } - let data = self - .data(endian, data) - .read_error("Invalid ELF GNU hash section offset or size")?; - let header = data - .read_at::>(0) - .read_error("Invalid GNU hash header")?; - Ok(Some(header)) - } - - /// Return the contents of a GNU hash section. - /// - /// Also returns the linked symbol table index. - /// - /// Returns `Ok(None)` if the section does not contain a GNU hash. - /// Returns `Err` for invalid values. - fn gnu_hash<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result, SectionIndex)>> { - if self.sh_type(endian) != elf::SHT_GNU_HASH { - return Ok(None); - } - let data = self - .data(endian, data) - .read_error("Invalid ELF GNU hash section offset or size")?; - let hash = GnuHashTable::parse(endian, data)?; - let link = SectionIndex(self.sh_link(endian) as usize); - Ok(Some((hash, link))) - } - - /// Return the contents of a `SHT_GNU_VERSYM` section. - /// - /// Also returns the linked symbol table index. - /// - /// Returns `Ok(None)` if the section type is not `SHT_GNU_VERSYM`. - /// Returns `Err` for invalid values. - fn gnu_versym<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result], SectionIndex)>> { - if self.sh_type(endian) != elf::SHT_GNU_VERSYM { - return Ok(None); - } - let versym = self - .data_as_array(endian, data) - .read_error("Invalid ELF GNU versym section offset or size")?; - let link = SectionIndex(self.sh_link(endian) as usize); - Ok(Some((versym, link))) - } - - /// Return an iterator for the entries of a `SHT_GNU_VERDEF` section. - /// - /// Also returns the linked string table index. - /// - /// Returns `Ok(None)` if the section type is not `SHT_GNU_VERDEF`. - /// Returns `Err` for invalid values. - fn gnu_verdef<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result, SectionIndex)>> { - if self.sh_type(endian) != elf::SHT_GNU_VERDEF { - return Ok(None); - } - let verdef = self - .data(endian, data) - .read_error("Invalid ELF GNU verdef section offset or size")?; - let link = SectionIndex(self.sh_link(endian) as usize); - Ok(Some((VerdefIterator::new(endian, verdef), link))) - } - - /// Return an iterator for the entries of a `SHT_GNU_VERNEED` section. - /// - /// Also returns the linked string table index. - /// - /// Returns `Ok(None)` if the section type is not `SHT_GNU_VERNEED`. - /// Returns `Err` for invalid values. - fn gnu_verneed<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result, SectionIndex)>> { - if self.sh_type(endian) != elf::SHT_GNU_VERNEED { - return Ok(None); - } - let verneed = self - .data(endian, data) - .read_error("Invalid ELF GNU verneed section offset or size")?; - let link = SectionIndex(self.sh_link(endian) as usize); - Ok(Some((VerneedIterator::new(endian, verneed), link))) - } - - /// Return the contents of a `SHT_GNU_ATTRIBUTES` section. - /// - /// Returns `Ok(None)` if the section type is not `SHT_GNU_ATTRIBUTES`. - /// Returns `Err` for invalid values. - fn gnu_attributes<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result>> { - if self.sh_type(endian) != elf::SHT_GNU_ATTRIBUTES { - return Ok(None); - } - self.attributes(endian, data).map(Some) - } - - /// Parse the contents of the section as attributes. - /// - /// This function does not check whether section type corresponds - /// to a section that contains attributes. - /// - /// Returns `Err` for invalid values. - fn attributes<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result> { - let data = self.data(endian, data)?; - AttributesSection::new(endian, data) - } - - /// Parse the compression header if present. - /// - /// Returns the header, and the offset and size of the compressed section data - /// in the file. - /// - /// Returns `Ok(None)` if the section flags do not have `SHF_COMPRESSED`. - /// Returns `Err` for invalid values. - fn compression<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result< - Option<( - &'data ::CompressionHeader, - u64, - u64, - )>, - > { - if (self.sh_flags(endian).into() & u64::from(elf::SHF_COMPRESSED)) == 0 { - return Ok(None); - } - let (section_offset, section_size) = self - .file_range(endian) - .read_error("Invalid ELF compressed section type")?; - let mut offset = section_offset; - let header = data - .read::<::CompressionHeader>(&mut offset) - .read_error("Invalid ELF compressed section offset")?; - let compressed_size = section_size - .checked_sub(offset - section_offset) - .read_error("Invalid ELF compressed section size")?; - Ok(Some((header, offset, compressed_size))) - } -} - -impl SectionHeader for elf::SectionHeader32 { - type Elf = elf::FileHeader32; - type Word = u32; - type Endian = Endian; - - #[inline] - fn sh_name(&self, endian: Self::Endian) -> u32 { - self.sh_name.get(endian) - } - - #[inline] - fn sh_type(&self, endian: Self::Endian) -> u32 { - self.sh_type.get(endian) - } - - #[inline] - fn sh_flags(&self, endian: Self::Endian) -> Self::Word { - self.sh_flags.get(endian) - } - - #[inline] - fn sh_addr(&self, endian: Self::Endian) -> Self::Word { - self.sh_addr.get(endian) - } - - #[inline] - fn sh_offset(&self, endian: Self::Endian) -> Self::Word { - self.sh_offset.get(endian) - } - - #[inline] - fn sh_size(&self, endian: Self::Endian) -> Self::Word { - self.sh_size.get(endian) - } - - #[inline] - fn sh_link(&self, endian: Self::Endian) -> u32 { - self.sh_link.get(endian) - } - - #[inline] - fn sh_info(&self, endian: Self::Endian) -> u32 { - self.sh_info.get(endian) - } - - #[inline] - fn sh_addralign(&self, endian: Self::Endian) -> Self::Word { - self.sh_addralign.get(endian) - } - - #[inline] - fn sh_entsize(&self, endian: Self::Endian) -> Self::Word { - self.sh_entsize.get(endian) - } -} - -impl SectionHeader for elf::SectionHeader64 { - type Word = u64; - type Endian = Endian; - type Elf = elf::FileHeader64; - - #[inline] - fn sh_name(&self, endian: Self::Endian) -> u32 { - self.sh_name.get(endian) - } - - #[inline] - fn sh_type(&self, endian: Self::Endian) -> u32 { - self.sh_type.get(endian) - } - - #[inline] - fn sh_flags(&self, endian: Self::Endian) -> Self::Word { - self.sh_flags.get(endian) - } - - #[inline] - fn sh_addr(&self, endian: Self::Endian) -> Self::Word { - self.sh_addr.get(endian) - } - - #[inline] - fn sh_offset(&self, endian: Self::Endian) -> Self::Word { - self.sh_offset.get(endian) - } - - #[inline] - fn sh_size(&self, endian: Self::Endian) -> Self::Word { - self.sh_size.get(endian) - } - - #[inline] - fn sh_link(&self, endian: Self::Endian) -> u32 { - self.sh_link.get(endian) - } - - #[inline] - fn sh_info(&self, endian: Self::Endian) -> u32 { - self.sh_info.get(endian) - } - - #[inline] - fn sh_addralign(&self, endian: Self::Endian) -> Self::Word { - self.sh_addralign.get(endian) - } - - #[inline] - fn sh_entsize(&self, endian: Self::Endian) -> Self::Word { - self.sh_entsize.get(endian) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/segment.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/segment.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/segment.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/segment.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,332 +0,0 @@ -use core::fmt::Debug; -use core::{mem, slice, str}; - -use crate::elf; -use crate::endian::{self, Endianness}; -use crate::pod::Pod; -use crate::read::{self, Bytes, ObjectSegment, ReadError, ReadRef, SegmentFlags}; - -use super::{ElfFile, FileHeader, NoteIterator}; - -/// An iterator over the segments of an `ElfFile32`. -pub type ElfSegmentIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSegmentIterator<'data, 'file, elf::FileHeader32, R>; -/// An iterator over the segments of an `ElfFile64`. -pub type ElfSegmentIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSegmentIterator<'data, 'file, elf::FileHeader64, R>; - -/// An iterator over the segments of an `ElfFile`. -#[derive(Debug)] -pub struct ElfSegmentIterator<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file ElfFile<'data, Elf, R>, - pub(super) iter: slice::Iter<'data, Elf::ProgramHeader>, -} - -impl<'data, 'file, Elf, R> Iterator for ElfSegmentIterator<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - type Item = ElfSegment<'data, 'file, Elf, R>; - - fn next(&mut self) -> Option { - for segment in self.iter.by_ref() { - if segment.p_type(self.file.endian) == elf::PT_LOAD { - return Some(ElfSegment { - file: self.file, - segment, - }); - } - } - None - } -} - -/// A segment of an `ElfFile32`. -pub type ElfSegment32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSegment<'data, 'file, elf::FileHeader32, R>; -/// A segment of an `ElfFile64`. -pub type ElfSegment64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSegment<'data, 'file, elf::FileHeader64, R>; - -/// A segment of an `ElfFile`. -#[derive(Debug)] -pub struct ElfSegment<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file ElfFile<'data, Elf, R>, - pub(super) segment: &'data Elf::ProgramHeader, -} - -impl<'data, 'file, Elf: FileHeader, R: ReadRef<'data>> ElfSegment<'data, 'file, Elf, R> { - fn bytes(&self) -> read::Result<&'data [u8]> { - self.segment - .data(self.file.endian, self.file.data) - .read_error("Invalid ELF segment size or offset") - } -} - -impl<'data, 'file, Elf, R> read::private::Sealed for ElfSegment<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Elf, R> ObjectSegment<'data> for ElfSegment<'data, 'file, Elf, R> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - #[inline] - fn address(&self) -> u64 { - self.segment.p_vaddr(self.file.endian).into() - } - - #[inline] - fn size(&self) -> u64 { - self.segment.p_memsz(self.file.endian).into() - } - - #[inline] - fn align(&self) -> u64 { - self.segment.p_align(self.file.endian).into() - } - - #[inline] - fn file_range(&self) -> (u64, u64) { - self.segment.file_range(self.file.endian) - } - - #[inline] - fn data(&self) -> read::Result<&'data [u8]> { - self.bytes() - } - - fn data_range(&self, address: u64, size: u64) -> read::Result> { - Ok(read::util::data_range( - self.bytes()?, - self.address(), - address, - size, - )) - } - - #[inline] - fn name_bytes(&self) -> read::Result> { - Ok(None) - } - - #[inline] - fn name(&self) -> read::Result> { - Ok(None) - } - - #[inline] - fn flags(&self) -> SegmentFlags { - let p_flags = self.segment.p_flags(self.file.endian); - SegmentFlags::Elf { p_flags } - } -} - -/// A trait for generic access to `ProgramHeader32` and `ProgramHeader64`. -#[allow(missing_docs)] -pub trait ProgramHeader: Debug + Pod { - type Elf: FileHeader; - type Word: Into; - type Endian: endian::Endian; - - fn p_type(&self, endian: Self::Endian) -> u32; - fn p_flags(&self, endian: Self::Endian) -> u32; - fn p_offset(&self, endian: Self::Endian) -> Self::Word; - fn p_vaddr(&self, endian: Self::Endian) -> Self::Word; - fn p_paddr(&self, endian: Self::Endian) -> Self::Word; - fn p_filesz(&self, endian: Self::Endian) -> Self::Word; - fn p_memsz(&self, endian: Self::Endian) -> Self::Word; - fn p_align(&self, endian: Self::Endian) -> Self::Word; - - /// Return the offset and size of the segment in the file. - fn file_range(&self, endian: Self::Endian) -> (u64, u64) { - (self.p_offset(endian).into(), self.p_filesz(endian).into()) - } - - /// Return the segment data. - /// - /// Returns `Err` for invalid values. - fn data<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> Result<&'data [u8], ()> { - let (offset, size) = self.file_range(endian); - data.read_bytes_at(offset, size) - } - - /// Return the segment data as a slice of the given type. - /// - /// Allows padding at the end of the data. - /// Returns `Ok(&[])` if the segment has no data. - /// Returns `Err` for invalid values, including bad alignment. - fn data_as_array<'data, T: Pod, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> Result<&'data [T], ()> { - let mut data = self.data(endian, data).map(Bytes)?; - data.read_slice(data.len() / mem::size_of::()) - } - - /// Return the segment data in the given virtual address range - /// - /// Returns `Ok(None)` if the segment does not contain the address. - /// Returns `Err` for invalid values. - fn data_range<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - address: u64, - size: u64, - ) -> Result, ()> { - Ok(read::util::data_range( - self.data(endian, data)?, - self.p_vaddr(endian).into(), - address, - size, - )) - } - - /// Return entries in a dynamic segment. - /// - /// Returns `Ok(None)` if the segment is not `PT_DYNAMIC`. - /// Returns `Err` for invalid values. - fn dynamic<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result::Dyn]>> { - if self.p_type(endian) != elf::PT_DYNAMIC { - return Ok(None); - } - let dynamic = self - .data_as_array(endian, data) - .read_error("Invalid ELF dynamic segment offset or size")?; - Ok(Some(dynamic)) - } - - /// Return a note iterator for the segment data. - /// - /// Returns `Ok(None)` if the segment does not contain notes. - /// Returns `Err` for invalid values. - fn notes<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> read::Result>> { - if self.p_type(endian) != elf::PT_NOTE { - return Ok(None); - } - let data = self - .data(endian, data) - .read_error("Invalid ELF note segment offset or size")?; - let notes = NoteIterator::new(endian, self.p_align(endian), data)?; - Ok(Some(notes)) - } -} - -impl ProgramHeader for elf::ProgramHeader32 { - type Word = u32; - type Endian = Endian; - type Elf = elf::FileHeader32; - - #[inline] - fn p_type(&self, endian: Self::Endian) -> u32 { - self.p_type.get(endian) - } - - #[inline] - fn p_flags(&self, endian: Self::Endian) -> u32 { - self.p_flags.get(endian) - } - - #[inline] - fn p_offset(&self, endian: Self::Endian) -> Self::Word { - self.p_offset.get(endian) - } - - #[inline] - fn p_vaddr(&self, endian: Self::Endian) -> Self::Word { - self.p_vaddr.get(endian) - } - - #[inline] - fn p_paddr(&self, endian: Self::Endian) -> Self::Word { - self.p_paddr.get(endian) - } - - #[inline] - fn p_filesz(&self, endian: Self::Endian) -> Self::Word { - self.p_filesz.get(endian) - } - - #[inline] - fn p_memsz(&self, endian: Self::Endian) -> Self::Word { - self.p_memsz.get(endian) - } - - #[inline] - fn p_align(&self, endian: Self::Endian) -> Self::Word { - self.p_align.get(endian) - } -} - -impl ProgramHeader for elf::ProgramHeader64 { - type Word = u64; - type Endian = Endian; - type Elf = elf::FileHeader64; - - #[inline] - fn p_type(&self, endian: Self::Endian) -> u32 { - self.p_type.get(endian) - } - - #[inline] - fn p_flags(&self, endian: Self::Endian) -> u32 { - self.p_flags.get(endian) - } - - #[inline] - fn p_offset(&self, endian: Self::Endian) -> Self::Word { - self.p_offset.get(endian) - } - - #[inline] - fn p_vaddr(&self, endian: Self::Endian) -> Self::Word { - self.p_vaddr.get(endian) - } - - #[inline] - fn p_paddr(&self, endian: Self::Endian) -> Self::Word { - self.p_paddr.get(endian) - } - - #[inline] - fn p_filesz(&self, endian: Self::Endian) -> Self::Word { - self.p_filesz.get(endian) - } - - #[inline] - fn p_memsz(&self, endian: Self::Endian) -> Self::Word { - self.p_memsz.get(endian) - } - - #[inline] - fn p_align(&self, endian: Self::Endian) -> Self::Word { - self.p_align.get(endian) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/symbol.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/symbol.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/symbol.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/symbol.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,585 +0,0 @@ -use alloc::fmt; -use alloc::vec::Vec; -use core::fmt::Debug; -use core::slice; -use core::str; - -use crate::endian::{self, Endianness}; -use crate::pod::Pod; -use crate::read::util::StringTable; -use crate::read::{ - self, ObjectSymbol, ObjectSymbolTable, ReadError, ReadRef, SectionIndex, SymbolFlags, - SymbolIndex, SymbolKind, SymbolMap, SymbolMapEntry, SymbolScope, SymbolSection, -}; -use crate::{elf, U32}; - -use super::{FileHeader, SectionHeader, SectionTable}; - -/// A table of symbol entries in an ELF file. -/// -/// Also includes the string table used for the symbol names. -#[derive(Debug, Clone, Copy)] -pub struct SymbolTable<'data, Elf: FileHeader, R = &'data [u8]> -where - R: ReadRef<'data>, -{ - section: SectionIndex, - string_section: SectionIndex, - shndx_section: SectionIndex, - symbols: &'data [Elf::Sym], - strings: StringTable<'data, R>, - shndx: &'data [U32], -} - -impl<'data, Elf: FileHeader, R: ReadRef<'data>> Default for SymbolTable<'data, Elf, R> { - fn default() -> Self { - SymbolTable { - section: SectionIndex(0), - string_section: SectionIndex(0), - shndx_section: SectionIndex(0), - symbols: &[], - strings: Default::default(), - shndx: &[], - } - } -} - -impl<'data, Elf: FileHeader, R: ReadRef<'data>> SymbolTable<'data, Elf, R> { - /// Parse the given symbol table section. - pub fn parse( - endian: Elf::Endian, - data: R, - sections: &SectionTable<'data, Elf, R>, - section_index: SectionIndex, - section: &Elf::SectionHeader, - ) -> read::Result> { - debug_assert!( - section.sh_type(endian) == elf::SHT_DYNSYM - || section.sh_type(endian) == elf::SHT_SYMTAB - ); - - let symbols = section - .data_as_array(endian, data) - .read_error("Invalid ELF symbol table data")?; - - let link = SectionIndex(section.sh_link(endian) as usize); - let strings = sections.strings(endian, data, link)?; - - let mut shndx_section = SectionIndex(0); - let mut shndx = &[][..]; - for (i, s) in sections.iter().enumerate() { - if s.sh_type(endian) == elf::SHT_SYMTAB_SHNDX - && s.sh_link(endian) as usize == section_index.0 - { - shndx_section = SectionIndex(i); - shndx = s - .data_as_array(endian, data) - .read_error("Invalid ELF symtab_shndx data")?; - } - } - - Ok(SymbolTable { - section: section_index, - string_section: link, - symbols, - strings, - shndx, - shndx_section, - }) - } - - /// Return the section index of this symbol table. - #[inline] - pub fn section(&self) -> SectionIndex { - self.section - } - - /// Return the section index of the shndx table. - #[inline] - pub fn shndx_section(&self) -> SectionIndex { - self.shndx_section - } - - /// Return the section index of the linked string table. - #[inline] - pub fn string_section(&self) -> SectionIndex { - self.string_section - } - - /// Return the string table used for the symbol names. - #[inline] - pub fn strings(&self) -> StringTable<'data, R> { - self.strings - } - - /// Return the symbol table. - #[inline] - pub fn symbols(&self) -> &'data [Elf::Sym] { - self.symbols - } - - /// Iterate over the symbols. - #[inline] - pub fn iter(&self) -> slice::Iter<'data, Elf::Sym> { - self.symbols.iter() - } - - /// Return true if the symbol table is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.symbols.is_empty() - } - - /// The number of symbols. - #[inline] - pub fn len(&self) -> usize { - self.symbols.len() - } - - /// Return the symbol at the given index. - pub fn symbol(&self, index: usize) -> read::Result<&'data Elf::Sym> { - self.symbols - .get(index) - .read_error("Invalid ELF symbol index") - } - - /// Return the extended section index for the given symbol if present. - #[inline] - pub fn shndx(&self, endian: Elf::Endian, index: usize) -> Option { - self.shndx.get(index).map(|x| x.get(endian)) - } - - /// Return the section index for the given symbol. - /// - /// This uses the extended section index if present. - pub fn symbol_section( - &self, - endian: Elf::Endian, - symbol: &'data Elf::Sym, - index: usize, - ) -> read::Result> { - match symbol.st_shndx(endian) { - elf::SHN_UNDEF => Ok(None), - elf::SHN_XINDEX => self - .shndx(endian, index) - .read_error("Missing ELF symbol extended index") - .map(|index| Some(SectionIndex(index as usize))), - shndx if shndx < elf::SHN_LORESERVE => Ok(Some(SectionIndex(shndx.into()))), - _ => Ok(None), - } - } - - /// Return the symbol name for the given symbol. - pub fn symbol_name( - &self, - endian: Elf::Endian, - symbol: &'data Elf::Sym, - ) -> read::Result<&'data [u8]> { - symbol.name(endian, self.strings) - } - - /// Construct a map from addresses to a user-defined map entry. - pub fn map Option>( - &self, - endian: Elf::Endian, - f: F, - ) -> SymbolMap { - let mut symbols = Vec::with_capacity(self.symbols.len()); - for symbol in self.symbols { - if !symbol.is_definition(endian) { - continue; - } - if let Some(entry) = f(symbol) { - symbols.push(entry); - } - } - SymbolMap::new(symbols) - } -} - -/// A symbol table of an `ElfFile32`. -pub type ElfSymbolTable32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSymbolTable<'data, 'file, elf::FileHeader32, R>; -/// A symbol table of an `ElfFile32`. -pub type ElfSymbolTable64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSymbolTable<'data, 'file, elf::FileHeader64, R>; - -/// A symbol table of an `ElfFile`. -#[derive(Debug, Clone, Copy)] -pub struct ElfSymbolTable<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - pub(super) endian: Elf::Endian, - pub(super) symbols: &'file SymbolTable<'data, Elf, R>, -} - -impl<'data, 'file, Elf: FileHeader, R: ReadRef<'data>> read::private::Sealed - for ElfSymbolTable<'data, 'file, Elf, R> -{ -} - -impl<'data, 'file, Elf: FileHeader, R: ReadRef<'data>> ObjectSymbolTable<'data> - for ElfSymbolTable<'data, 'file, Elf, R> -{ - type Symbol = ElfSymbol<'data, 'file, Elf, R>; - type SymbolIterator = ElfSymbolIterator<'data, 'file, Elf, R>; - - fn symbols(&self) -> Self::SymbolIterator { - ElfSymbolIterator { - endian: self.endian, - symbols: self.symbols, - index: 0, - } - } - - fn symbol_by_index(&self, index: SymbolIndex) -> read::Result { - let symbol = self.symbols.symbol(index.0)?; - Ok(ElfSymbol { - endian: self.endian, - symbols: self.symbols, - index, - symbol, - }) - } -} - -/// An iterator over the symbols of an `ElfFile32`. -pub type ElfSymbolIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSymbolIterator<'data, 'file, elf::FileHeader32, R>; -/// An iterator over the symbols of an `ElfFile64`. -pub type ElfSymbolIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSymbolIterator<'data, 'file, elf::FileHeader64, R>; - -/// An iterator over the symbols of an `ElfFile`. -pub struct ElfSymbolIterator<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - pub(super) endian: Elf::Endian, - pub(super) symbols: &'file SymbolTable<'data, Elf, R>, - pub(super) index: usize, -} - -impl<'data, 'file, Elf: FileHeader, R: ReadRef<'data>> fmt::Debug - for ElfSymbolIterator<'data, 'file, Elf, R> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ElfSymbolIterator").finish() - } -} - -impl<'data, 'file, Elf: FileHeader, R: ReadRef<'data>> Iterator - for ElfSymbolIterator<'data, 'file, Elf, R> -{ - type Item = ElfSymbol<'data, 'file, Elf, R>; - - fn next(&mut self) -> Option { - let index = self.index; - let symbol = self.symbols.symbols.get(index)?; - self.index += 1; - Some(ElfSymbol { - endian: self.endian, - symbols: self.symbols, - index: SymbolIndex(index), - symbol, - }) - } -} - -/// A symbol of an `ElfFile32`. -pub type ElfSymbol32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSymbol<'data, 'file, elf::FileHeader32, R>; -/// A symbol of an `ElfFile64`. -pub type ElfSymbol64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - ElfSymbol<'data, 'file, elf::FileHeader64, R>; - -/// A symbol of an `ElfFile`. -#[derive(Debug, Clone, Copy)] -pub struct ElfSymbol<'data, 'file, Elf, R = &'data [u8]> -where - Elf: FileHeader, - R: ReadRef<'data>, -{ - pub(super) endian: Elf::Endian, - pub(super) symbols: &'file SymbolTable<'data, Elf, R>, - pub(super) index: SymbolIndex, - pub(super) symbol: &'data Elf::Sym, -} - -impl<'data, 'file, Elf: FileHeader, R: ReadRef<'data>> ElfSymbol<'data, 'file, Elf, R> { - /// Return a reference to the raw symbol structure. - #[inline] - pub fn raw_symbol(&self) -> &'data Elf::Sym { - self.symbol - } -} - -impl<'data, 'file, Elf: FileHeader, R: ReadRef<'data>> read::private::Sealed - for ElfSymbol<'data, 'file, Elf, R> -{ -} - -impl<'data, 'file, Elf: FileHeader, R: ReadRef<'data>> ObjectSymbol<'data> - for ElfSymbol<'data, 'file, Elf, R> -{ - #[inline] - fn index(&self) -> SymbolIndex { - self.index - } - - fn name_bytes(&self) -> read::Result<&'data [u8]> { - self.symbol.name(self.endian, self.symbols.strings()) - } - - fn name(&self) -> read::Result<&'data str> { - let name = self.name_bytes()?; - str::from_utf8(name) - .ok() - .read_error("Non UTF-8 ELF symbol name") - } - - #[inline] - fn address(&self) -> u64 { - self.symbol.st_value(self.endian).into() - } - - #[inline] - fn size(&self) -> u64 { - self.symbol.st_size(self.endian).into() - } - - fn kind(&self) -> SymbolKind { - match self.symbol.st_type() { - elf::STT_NOTYPE if self.index.0 == 0 => SymbolKind::Null, - elf::STT_NOTYPE => SymbolKind::Label, - elf::STT_OBJECT | elf::STT_COMMON => SymbolKind::Data, - elf::STT_FUNC | elf::STT_GNU_IFUNC => SymbolKind::Text, - elf::STT_SECTION => SymbolKind::Section, - elf::STT_FILE => SymbolKind::File, - elf::STT_TLS => SymbolKind::Tls, - _ => SymbolKind::Unknown, - } - } - - fn section(&self) -> SymbolSection { - match self.symbol.st_shndx(self.endian) { - elf::SHN_UNDEF => SymbolSection::Undefined, - elf::SHN_ABS => { - if self.symbol.st_type() == elf::STT_FILE { - SymbolSection::None - } else { - SymbolSection::Absolute - } - } - elf::SHN_COMMON => SymbolSection::Common, - elf::SHN_XINDEX => match self.symbols.shndx(self.endian, self.index.0) { - Some(index) => SymbolSection::Section(SectionIndex(index as usize)), - None => SymbolSection::Unknown, - }, - index if index < elf::SHN_LORESERVE => { - SymbolSection::Section(SectionIndex(index as usize)) - } - _ => SymbolSection::Unknown, - } - } - - #[inline] - fn is_undefined(&self) -> bool { - self.symbol.st_shndx(self.endian) == elf::SHN_UNDEF - } - - #[inline] - fn is_definition(&self) -> bool { - self.symbol.is_definition(self.endian) - } - - #[inline] - fn is_common(&self) -> bool { - self.symbol.st_shndx(self.endian) == elf::SHN_COMMON - } - - #[inline] - fn is_weak(&self) -> bool { - self.symbol.st_bind() == elf::STB_WEAK - } - - fn scope(&self) -> SymbolScope { - if self.symbol.st_shndx(self.endian) == elf::SHN_UNDEF { - SymbolScope::Unknown - } else { - match self.symbol.st_bind() { - elf::STB_LOCAL => SymbolScope::Compilation, - elf::STB_GLOBAL | elf::STB_WEAK => { - if self.symbol.st_visibility() == elf::STV_HIDDEN { - SymbolScope::Linkage - } else { - SymbolScope::Dynamic - } - } - _ => SymbolScope::Unknown, - } - } - } - - #[inline] - fn is_global(&self) -> bool { - self.symbol.st_bind() != elf::STB_LOCAL - } - - #[inline] - fn is_local(&self) -> bool { - self.symbol.st_bind() == elf::STB_LOCAL - } - - #[inline] - fn flags(&self) -> SymbolFlags { - SymbolFlags::Elf { - st_info: self.symbol.st_info(), - st_other: self.symbol.st_other(), - } - } -} - -/// A trait for generic access to `Sym32` and `Sym64`. -#[allow(missing_docs)] -pub trait Sym: Debug + Pod { - type Word: Into; - type Endian: endian::Endian; - - fn st_name(&self, endian: Self::Endian) -> u32; - fn st_info(&self) -> u8; - fn st_bind(&self) -> u8; - fn st_type(&self) -> u8; - fn st_other(&self) -> u8; - fn st_visibility(&self) -> u8; - fn st_shndx(&self, endian: Self::Endian) -> u16; - fn st_value(&self, endian: Self::Endian) -> Self::Word; - fn st_size(&self, endian: Self::Endian) -> Self::Word; - - /// Parse the symbol name from the string table. - fn name<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - strings: StringTable<'data, R>, - ) -> read::Result<&'data [u8]> { - strings - .get(self.st_name(endian)) - .read_error("Invalid ELF symbol name offset") - } - - /// Return true if the symbol is undefined. - #[inline] - fn is_undefined(&self, endian: Self::Endian) -> bool { - self.st_shndx(endian) == elf::SHN_UNDEF - } - - /// Return true if the symbol is a definition of a function or data object. - fn is_definition(&self, endian: Self::Endian) -> bool { - let st_type = self.st_type(); - (st_type == elf::STT_NOTYPE || st_type == elf::STT_FUNC || st_type == elf::STT_OBJECT) - && self.st_shndx(endian) != elf::SHN_UNDEF - } -} - -impl Sym for elf::Sym32 { - type Word = u32; - type Endian = Endian; - - #[inline] - fn st_name(&self, endian: Self::Endian) -> u32 { - self.st_name.get(endian) - } - - #[inline] - fn st_info(&self) -> u8 { - self.st_info - } - - #[inline] - fn st_bind(&self) -> u8 { - self.st_bind() - } - - #[inline] - fn st_type(&self) -> u8 { - self.st_type() - } - - #[inline] - fn st_other(&self) -> u8 { - self.st_other - } - - #[inline] - fn st_visibility(&self) -> u8 { - self.st_visibility() - } - - #[inline] - fn st_shndx(&self, endian: Self::Endian) -> u16 { - self.st_shndx.get(endian) - } - - #[inline] - fn st_value(&self, endian: Self::Endian) -> Self::Word { - self.st_value.get(endian) - } - - #[inline] - fn st_size(&self, endian: Self::Endian) -> Self::Word { - self.st_size.get(endian) - } -} - -impl Sym for elf::Sym64 { - type Word = u64; - type Endian = Endian; - - #[inline] - fn st_name(&self, endian: Self::Endian) -> u32 { - self.st_name.get(endian) - } - - #[inline] - fn st_info(&self) -> u8 { - self.st_info - } - - #[inline] - fn st_bind(&self) -> u8 { - self.st_bind() - } - - #[inline] - fn st_type(&self) -> u8 { - self.st_type() - } - - #[inline] - fn st_other(&self) -> u8 { - self.st_other - } - - #[inline] - fn st_visibility(&self) -> u8 { - self.st_visibility() - } - - #[inline] - fn st_shndx(&self, endian: Self::Endian) -> u16 { - self.st_shndx.get(endian) - } - - #[inline] - fn st_value(&self, endian: Self::Endian) -> Self::Word { - self.st_value.get(endian) - } - - #[inline] - fn st_size(&self, endian: Self::Endian) -> Self::Word { - self.st_size.get(endian) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/elf/version.rs s390-tools-2.33.1/rust-vendor/object/src/read/elf/version.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/elf/version.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/elf/version.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,421 +0,0 @@ -use alloc::vec::Vec; - -use crate::read::{Bytes, ReadError, ReadRef, Result, StringTable}; -use crate::{elf, endian}; - -use super::FileHeader; - -/// A version index. -#[derive(Debug, Default, Clone, Copy)] -pub struct VersionIndex(pub u16); - -impl VersionIndex { - /// Return the version index. - pub fn index(&self) -> u16 { - self.0 & elf::VERSYM_VERSION - } - - /// Return true if it is the local index. - pub fn is_local(&self) -> bool { - self.index() == elf::VER_NDX_LOCAL - } - - /// Return true if it is the global index. - pub fn is_global(&self) -> bool { - self.index() == elf::VER_NDX_GLOBAL - } - - /// Return the hidden flag. - pub fn is_hidden(&self) -> bool { - self.0 & elf::VERSYM_HIDDEN != 0 - } -} - -/// A version definition or requirement. -/// -/// This is derived from entries in the `SHT_GNU_verdef` and `SHT_GNU_verneed` sections. -#[derive(Debug, Default, Clone, Copy)] -pub struct Version<'data> { - name: &'data [u8], - hash: u32, - // Used to keep track of valid indices in `VersionTable`. - valid: bool, -} - -impl<'data> Version<'data> { - /// Return the version name. - pub fn name(&self) -> &'data [u8] { - self.name - } - - /// Return hash of the version name. - pub fn hash(&self) -> u32 { - self.hash - } -} - -/// A table of version definitions and requirements. -/// -/// It allows looking up the version information for a given symbol index. -/// -/// This is derived from entries in the `SHT_GNU_versym`, `SHT_GNU_verdef` and `SHT_GNU_verneed` sections. -#[derive(Debug, Clone)] -pub struct VersionTable<'data, Elf: FileHeader> { - symbols: &'data [elf::Versym], - versions: Vec>, -} - -impl<'data, Elf: FileHeader> Default for VersionTable<'data, Elf> { - fn default() -> Self { - VersionTable { - symbols: &[], - versions: Vec::new(), - } - } -} - -impl<'data, Elf: FileHeader> VersionTable<'data, Elf> { - /// Parse the version sections. - pub fn parse>( - endian: Elf::Endian, - versyms: &'data [elf::Versym], - verdefs: Option>, - verneeds: Option>, - strings: StringTable<'data, R>, - ) -> Result { - let mut max_index = 0; - if let Some(mut verdefs) = verdefs.clone() { - while let Some((verdef, _)) = verdefs.next()? { - if verdef.vd_flags.get(endian) & elf::VER_FLG_BASE != 0 { - continue; - } - let index = verdef.vd_ndx.get(endian) & elf::VERSYM_VERSION; - if max_index < index { - max_index = index; - } - } - } - if let Some(mut verneeds) = verneeds.clone() { - while let Some((_, mut vernauxs)) = verneeds.next()? { - while let Some(vernaux) = vernauxs.next()? { - let index = vernaux.vna_other.get(endian) & elf::VERSYM_VERSION; - if max_index < index { - max_index = index; - } - } - } - } - - // Indices should be sequential, but this could be up to - // 32k * size_of::() if max_index is bad. - let mut versions = vec![Version::default(); max_index as usize + 1]; - - if let Some(mut verdefs) = verdefs { - while let Some((verdef, mut verdauxs)) = verdefs.next()? { - if verdef.vd_flags.get(endian) & elf::VER_FLG_BASE != 0 { - continue; - } - let index = verdef.vd_ndx.get(endian) & elf::VERSYM_VERSION; - if index <= elf::VER_NDX_GLOBAL { - // TODO: return error? - continue; - } - if let Some(verdaux) = verdauxs.next()? { - versions[usize::from(index)] = Version { - name: verdaux.name(endian, strings)?, - hash: verdef.vd_hash.get(endian), - valid: true, - }; - } - } - } - if let Some(mut verneeds) = verneeds { - while let Some((_, mut vernauxs)) = verneeds.next()? { - while let Some(vernaux) = vernauxs.next()? { - let index = vernaux.vna_other.get(endian) & elf::VERSYM_VERSION; - if index <= elf::VER_NDX_GLOBAL { - // TODO: return error? - continue; - } - versions[usize::from(index)] = Version { - name: vernaux.name(endian, strings)?, - hash: vernaux.vna_hash.get(endian), - valid: true, - }; - } - } - } - - Ok(VersionTable { - symbols: versyms, - versions, - }) - } - - /// Return true if the version table is empty. - pub fn is_empty(&self) -> bool { - self.symbols.is_empty() - } - - /// Return version index for a given symbol index. - pub fn version_index(&self, endian: Elf::Endian, index: usize) -> VersionIndex { - let version_index = match self.symbols.get(index) { - Some(x) => x.0.get(endian), - // Ideally this would be VER_NDX_LOCAL for undefined symbols, - // but currently there are no checks that need this distinction. - None => elf::VER_NDX_GLOBAL, - }; - VersionIndex(version_index) - } - - /// Return version information for a given symbol version index. - /// - /// Returns `Ok(None)` for local and global versions. - /// Returns `Err(_)` if index is invalid. - pub fn version(&self, index: VersionIndex) -> Result>> { - if index.index() <= elf::VER_NDX_GLOBAL { - return Ok(None); - } - self.versions - .get(usize::from(index.index())) - .filter(|version| version.valid) - .read_error("Invalid ELF symbol version index") - .map(Some) - } - - /// Return true if the given symbol index satisfies the requirements of `need`. - /// - /// Returns false for any error. - /// - /// Note: this function hasn't been fully tested and is likely to be incomplete. - pub fn matches(&self, endian: Elf::Endian, index: usize, need: Option<&Version<'_>>) -> bool { - let version_index = self.version_index(endian, index); - let def = match self.version(version_index) { - Ok(def) => def, - Err(_) => return false, - }; - match (def, need) { - (Some(def), Some(need)) => need.hash == def.hash && need.name == def.name, - (None, Some(_need)) => { - // Version must be present if needed. - false - } - (Some(_def), None) => { - // For a dlsym call, use the newest version. - // TODO: if not a dlsym call, then use the oldest version. - !version_index.is_hidden() - } - (None, None) => true, - } - } -} - -/// An iterator over the entries in an ELF `SHT_GNU_verdef` section. -#[derive(Debug, Clone)] -pub struct VerdefIterator<'data, Elf: FileHeader> { - endian: Elf::Endian, - data: Bytes<'data>, -} - -impl<'data, Elf: FileHeader> VerdefIterator<'data, Elf> { - pub(super) fn new(endian: Elf::Endian, data: &'data [u8]) -> Self { - VerdefIterator { - endian, - data: Bytes(data), - } - } - - /// Return the next `Verdef` entry. - pub fn next( - &mut self, - ) -> Result, VerdauxIterator<'data, Elf>)>> { - if self.data.is_empty() { - return Ok(None); - } - - let verdef = self - .data - .read_at::>(0) - .read_error("ELF verdef is too short")?; - - let mut verdaux_data = self.data; - verdaux_data - .skip(verdef.vd_aux.get(self.endian) as usize) - .read_error("Invalid ELF vd_aux")?; - let verdaux = - VerdauxIterator::new(self.endian, verdaux_data.0, verdef.vd_cnt.get(self.endian)); - - let next = verdef.vd_next.get(self.endian); - if next != 0 { - self.data - .skip(next as usize) - .read_error("Invalid ELF vd_next")?; - } else { - self.data = Bytes(&[]); - } - Ok(Some((verdef, verdaux))) - } -} - -/// An iterator over the auxiliary records for an entry in an ELF `SHT_GNU_verdef` section. -#[derive(Debug, Clone)] -pub struct VerdauxIterator<'data, Elf: FileHeader> { - endian: Elf::Endian, - data: Bytes<'data>, - count: u16, -} - -impl<'data, Elf: FileHeader> VerdauxIterator<'data, Elf> { - pub(super) fn new(endian: Elf::Endian, data: &'data [u8], count: u16) -> Self { - VerdauxIterator { - endian, - data: Bytes(data), - count, - } - } - - /// Return the next `Verdaux` entry. - pub fn next(&mut self) -> Result>> { - if self.count == 0 { - return Ok(None); - } - - let verdaux = self - .data - .read_at::>(0) - .read_error("ELF verdaux is too short")?; - - self.data - .skip(verdaux.vda_next.get(self.endian) as usize) - .read_error("Invalid ELF vda_next")?; - self.count -= 1; - Ok(Some(verdaux)) - } -} - -/// An iterator over the entries in an ELF `SHT_GNU_verneed` section. -#[derive(Debug, Clone)] -pub struct VerneedIterator<'data, Elf: FileHeader> { - endian: Elf::Endian, - data: Bytes<'data>, -} - -impl<'data, Elf: FileHeader> VerneedIterator<'data, Elf> { - pub(super) fn new(endian: Elf::Endian, data: &'data [u8]) -> Self { - VerneedIterator { - endian, - data: Bytes(data), - } - } - - /// Return the next `Verneed` entry. - pub fn next( - &mut self, - ) -> Result< - Option<( - &'data elf::Verneed, - VernauxIterator<'data, Elf>, - )>, - > { - if self.data.is_empty() { - return Ok(None); - } - - let verneed = self - .data - .read_at::>(0) - .read_error("ELF verneed is too short")?; - - let mut vernaux_data = self.data; - vernaux_data - .skip(verneed.vn_aux.get(self.endian) as usize) - .read_error("Invalid ELF vn_aux")?; - let vernaux = - VernauxIterator::new(self.endian, vernaux_data.0, verneed.vn_cnt.get(self.endian)); - - let next = verneed.vn_next.get(self.endian); - if next != 0 { - self.data - .skip(next as usize) - .read_error("Invalid ELF vn_next")?; - } else { - self.data = Bytes(&[]); - } - Ok(Some((verneed, vernaux))) - } -} - -/// An iterator over the auxiliary records for an entry in an ELF `SHT_GNU_verneed` section. -#[derive(Debug, Clone)] -pub struct VernauxIterator<'data, Elf: FileHeader> { - endian: Elf::Endian, - data: Bytes<'data>, - count: u16, -} - -impl<'data, Elf: FileHeader> VernauxIterator<'data, Elf> { - pub(super) fn new(endian: Elf::Endian, data: &'data [u8], count: u16) -> Self { - VernauxIterator { - endian, - data: Bytes(data), - count, - } - } - - /// Return the next `Vernaux` entry. - pub fn next(&mut self) -> Result>> { - if self.count == 0 { - return Ok(None); - } - - let vernaux = self - .data - .read_at::>(0) - .read_error("ELF vernaux is too short")?; - - self.data - .skip(vernaux.vna_next.get(self.endian) as usize) - .read_error("Invalid ELF vna_next")?; - self.count -= 1; - Ok(Some(vernaux)) - } -} - -impl elf::Verdaux { - /// Parse the version name from the string table. - pub fn name<'data, R: ReadRef<'data>>( - &self, - endian: Endian, - strings: StringTable<'data, R>, - ) -> Result<&'data [u8]> { - strings - .get(self.vda_name.get(endian)) - .read_error("Invalid ELF vda_name") - } -} - -impl elf::Verneed { - /// Parse the file from the string table. - pub fn file<'data, R: ReadRef<'data>>( - &self, - endian: Endian, - strings: StringTable<'data, R>, - ) -> Result<&'data [u8]> { - strings - .get(self.vn_file.get(endian)) - .read_error("Invalid ELF vn_file") - } -} - -impl elf::Vernaux { - /// Parse the version name from the string table. - pub fn name<'data, R: ReadRef<'data>>( - &self, - endian: Endian, - strings: StringTable<'data, R>, - ) -> Result<&'data [u8]> { - strings - .get(self.vna_name.get(endian)) - .read_error("Invalid ELF vna_name") - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/macho/dyld_cache.rs s390-tools-2.33.1/rust-vendor/object/src/read/macho/dyld_cache.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/macho/dyld_cache.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/macho/dyld_cache.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,343 +0,0 @@ -use alloc::vec::Vec; -use core::slice; - -use crate::read::{Error, File, ReadError, ReadRef, Result}; -use crate::{macho, Architecture, Endian, Endianness}; - -/// A parsed representation of the dyld shared cache. -#[derive(Debug)] -pub struct DyldCache<'data, E = Endianness, R = &'data [u8]> -where - E: Endian, - R: ReadRef<'data>, -{ - endian: E, - data: R, - subcaches: Vec>, - mappings: &'data [macho::DyldCacheMappingInfo], - images: &'data [macho::DyldCacheImageInfo], - arch: Architecture, -} - -/// Information about a subcache. -#[derive(Debug)] -pub struct DyldSubCache<'data, E = Endianness, R = &'data [u8]> -where - E: Endian, - R: ReadRef<'data>, -{ - data: R, - mappings: &'data [macho::DyldCacheMappingInfo], -} - -// This is the offset of the images_across_all_subcaches_count field. -const MIN_HEADER_SIZE_SUBCACHES: u32 = 0x1c4; - -impl<'data, E, R> DyldCache<'data, E, R> -where - E: Endian, - R: ReadRef<'data>, -{ - /// Parse the raw dyld shared cache data. - /// For shared caches from macOS 12 / iOS 15 and above, the subcache files need to be - /// supplied as well, in the correct order, with the .symbols subcache last (if present). - /// For example, data would be the data for dyld_shared_cache_x86_64, - /// and subcache_data would be the data for [dyld_shared_cache_x86_64.1, dyld_shared_cache_x86_64.2, ...] - pub fn parse(data: R, subcache_data: &[R]) -> Result { - let header = macho::DyldCacheHeader::parse(data)?; - let (arch, endian) = header.parse_magic()?; - let mappings = header.mappings(endian, data)?; - - let symbols_subcache_uuid = header.symbols_subcache_uuid(endian); - let subcaches_info = header.subcaches(endian, data)?.unwrap_or(&[]); - - if subcache_data.len() != subcaches_info.len() + symbols_subcache_uuid.is_some() as usize { - return Err(Error("Incorrect number of SubCaches")); - } - - // Split out the .symbols subcache data from the other subcaches. - let (symbols_subcache_data_and_uuid, subcache_data) = - if let Some(symbols_uuid) = symbols_subcache_uuid { - let (sym_data, rest_data) = subcache_data.split_last().unwrap(); - (Some((*sym_data, symbols_uuid)), rest_data) - } else { - (None, subcache_data) - }; - - // Read the regular SubCaches (.1, .2, ...), if present. - let mut subcaches = Vec::new(); - for (&data, info) in subcache_data.iter().zip(subcaches_info.iter()) { - let sc_header = macho::DyldCacheHeader::::parse(data)?; - if sc_header.uuid != info.uuid { - return Err(Error("Unexpected SubCache UUID")); - } - let mappings = sc_header.mappings(endian, data)?; - subcaches.push(DyldSubCache { data, mappings }); - } - - // Read the .symbols SubCache, if present. - // Other than the UUID verification, the symbols SubCache is currently unused. - let _symbols_subcache = match symbols_subcache_data_and_uuid { - Some((data, uuid)) => { - let sc_header = macho::DyldCacheHeader::::parse(data)?; - if sc_header.uuid != uuid { - return Err(Error("Unexpected .symbols SubCache UUID")); - } - let mappings = sc_header.mappings(endian, data)?; - Some(DyldSubCache { data, mappings }) - } - None => None, - }; - - let images = header.images(endian, data)?; - Ok(DyldCache { - endian, - data, - subcaches, - mappings, - images, - arch, - }) - } - - /// Get the architecture type of the file. - pub fn architecture(&self) -> Architecture { - self.arch - } - - /// Get the endianness of the file. - #[inline] - pub fn endianness(&self) -> Endianness { - if self.is_little_endian() { - Endianness::Little - } else { - Endianness::Big - } - } - - /// Return true if the file is little endian, false if it is big endian. - pub fn is_little_endian(&self) -> bool { - self.endian.is_little_endian() - } - - /// Iterate over the images in this cache. - pub fn images<'cache>(&'cache self) -> DyldCacheImageIterator<'data, 'cache, E, R> { - DyldCacheImageIterator { - cache: self, - iter: self.images.iter(), - } - } - - /// Find the address in a mapping and return the cache or subcache data it was found in, - /// together with the translated file offset. - pub fn data_and_offset_for_address(&self, address: u64) -> Option<(R, u64)> { - if let Some(file_offset) = address_to_file_offset(address, self.endian, self.mappings) { - return Some((self.data, file_offset)); - } - for subcache in &self.subcaches { - if let Some(file_offset) = - address_to_file_offset(address, self.endian, subcache.mappings) - { - return Some((subcache.data, file_offset)); - } - } - None - } -} - -/// An iterator over all the images (dylibs) in the dyld shared cache. -#[derive(Debug)] -pub struct DyldCacheImageIterator<'data, 'cache, E = Endianness, R = &'data [u8]> -where - E: Endian, - R: ReadRef<'data>, -{ - cache: &'cache DyldCache<'data, E, R>, - iter: slice::Iter<'data, macho::DyldCacheImageInfo>, -} - -impl<'data, 'cache, E, R> Iterator for DyldCacheImageIterator<'data, 'cache, E, R> -where - E: Endian, - R: ReadRef<'data>, -{ - type Item = DyldCacheImage<'data, 'cache, E, R>; - - fn next(&mut self) -> Option> { - let image_info = self.iter.next()?; - Some(DyldCacheImage { - cache: self.cache, - image_info, - }) - } -} - -/// One image (dylib) from inside the dyld shared cache. -#[derive(Debug)] -pub struct DyldCacheImage<'data, 'cache, E = Endianness, R = &'data [u8]> -where - E: Endian, - R: ReadRef<'data>, -{ - pub(crate) cache: &'cache DyldCache<'data, E, R>, - image_info: &'data macho::DyldCacheImageInfo, -} - -impl<'data, 'cache, E, R> DyldCacheImage<'data, 'cache, E, R> -where - E: Endian, - R: ReadRef<'data>, -{ - /// The file system path of this image. - pub fn path(&self) -> Result<&'data str> { - let path = self.image_info.path(self.cache.endian, self.cache.data)?; - // The path should always be ascii, so from_utf8 should always succeed. - let path = core::str::from_utf8(path).map_err(|_| Error("Path string not valid utf-8"))?; - Ok(path) - } - - /// The subcache data which contains the Mach-O header for this image, - /// together with the file offset at which this image starts. - pub fn image_data_and_offset(&self) -> Result<(R, u64)> { - let address = self.image_info.address.get(self.cache.endian); - self.cache - .data_and_offset_for_address(address) - .ok_or(Error("Address not found in any mapping")) - } - - /// Parse this image into an Object. - pub fn parse_object(&self) -> Result> { - File::parse_dyld_cache_image(self) - } -} - -impl macho::DyldCacheHeader { - /// Read the dyld cache header. - pub fn parse<'data, R: ReadRef<'data>>(data: R) -> Result<&'data Self> { - data.read_at::>(0) - .read_error("Invalid dyld cache header size or alignment") - } - - /// Returns (arch, endian) based on the magic string. - pub fn parse_magic(&self) -> Result<(Architecture, E)> { - let (arch, is_big_endian) = match &self.magic { - b"dyld_v1 i386\0" => (Architecture::I386, false), - b"dyld_v1 x86_64\0" => (Architecture::X86_64, false), - b"dyld_v1 x86_64h\0" => (Architecture::X86_64, false), - b"dyld_v1 ppc\0" => (Architecture::PowerPc, true), - b"dyld_v1 armv6\0" => (Architecture::Arm, false), - b"dyld_v1 armv7\0" => (Architecture::Arm, false), - b"dyld_v1 armv7f\0" => (Architecture::Arm, false), - b"dyld_v1 armv7s\0" => (Architecture::Arm, false), - b"dyld_v1 armv7k\0" => (Architecture::Arm, false), - b"dyld_v1 arm64\0" => (Architecture::Aarch64, false), - b"dyld_v1 arm64e\0" => (Architecture::Aarch64, false), - _ => return Err(Error("Unrecognized dyld cache magic")), - }; - let endian = - E::from_big_endian(is_big_endian).read_error("Unsupported dyld cache endian")?; - Ok((arch, endian)) - } - - /// Return the mapping information table. - pub fn mappings<'data, R: ReadRef<'data>>( - &self, - endian: E, - data: R, - ) -> Result<&'data [macho::DyldCacheMappingInfo]> { - data.read_slice_at::>( - self.mapping_offset.get(endian).into(), - self.mapping_count.get(endian) as usize, - ) - .read_error("Invalid dyld cache mapping size or alignment") - } - - /// Return the information about subcaches, if present. - pub fn subcaches<'data, R: ReadRef<'data>>( - &self, - endian: E, - data: R, - ) -> Result]>> { - if self.mapping_offset.get(endian) >= MIN_HEADER_SIZE_SUBCACHES { - let subcaches = data - .read_slice_at::>( - self.subcaches_offset.get(endian).into(), - self.subcaches_count.get(endian) as usize, - ) - .read_error("Invalid dyld subcaches size or alignment")?; - Ok(Some(subcaches)) - } else { - Ok(None) - } - } - - /// Return the UUID for the .symbols subcache, if present. - pub fn symbols_subcache_uuid(&self, endian: E) -> Option<[u8; 16]> { - if self.mapping_offset.get(endian) >= MIN_HEADER_SIZE_SUBCACHES { - let uuid = self.symbols_subcache_uuid; - if uuid != [0; 16] { - return Some(uuid); - } - } - None - } - - /// Return the image information table. - pub fn images<'data, R: ReadRef<'data>>( - &self, - endian: E, - data: R, - ) -> Result<&'data [macho::DyldCacheImageInfo]> { - if self.mapping_offset.get(endian) >= MIN_HEADER_SIZE_SUBCACHES { - data.read_slice_at::>( - self.images_across_all_subcaches_offset.get(endian).into(), - self.images_across_all_subcaches_count.get(endian) as usize, - ) - .read_error("Invalid dyld cache image size or alignment") - } else { - data.read_slice_at::>( - self.images_offset.get(endian).into(), - self.images_count.get(endian) as usize, - ) - .read_error("Invalid dyld cache image size or alignment") - } - } -} - -impl macho::DyldCacheImageInfo { - /// The file system path of this image. - pub fn path<'data, R: ReadRef<'data>>(&self, endian: E, data: R) -> Result<&'data [u8]> { - let r_start = self.path_file_offset.get(endian).into(); - let r_end = data.len().read_error("Couldn't get data len()")?; - data.read_bytes_at_until(r_start..r_end, 0) - .read_error("Couldn't read dyld cache image path") - } - - /// Find the file offset of the image by looking up its address in the mappings. - pub fn file_offset( - &self, - endian: E, - mappings: &[macho::DyldCacheMappingInfo], - ) -> Result { - let address = self.address.get(endian); - address_to_file_offset(address, endian, mappings) - .read_error("Invalid dyld cache image address") - } -} - -/// Find the file offset of the image by looking up its address in the mappings. -pub fn address_to_file_offset( - address: u64, - endian: E, - mappings: &[macho::DyldCacheMappingInfo], -) -> Option { - for mapping in mappings { - let mapping_address = mapping.address.get(endian); - if address >= mapping_address - && address < mapping_address.wrapping_add(mapping.size.get(endian)) - { - return Some(address - mapping_address + mapping.file_offset.get(endian)); - } - } - None -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/macho/fat.rs s390-tools-2.33.1/rust-vendor/object/src/read/macho/fat.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/macho/fat.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/macho/fat.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,122 +0,0 @@ -use crate::read::{Architecture, Error, ReadError, ReadRef, Result}; -use crate::{macho, BigEndian, Pod}; - -pub use macho::{FatArch32, FatArch64, FatHeader}; - -impl FatHeader { - /// Attempt to parse a fat header. - /// - /// Does not validate the magic value. - pub fn parse<'data, R: ReadRef<'data>>(file: R) -> Result<&'data FatHeader> { - file.read_at::(0) - .read_error("Invalid fat header size or alignment") - } - - /// Attempt to parse a fat header and 32-bit fat arches. - pub fn parse_arch32<'data, R: ReadRef<'data>>(file: R) -> Result<&'data [FatArch32]> { - let mut offset = 0; - let header = file - .read::(&mut offset) - .read_error("Invalid fat header size or alignment")?; - if header.magic.get(BigEndian) != macho::FAT_MAGIC { - return Err(Error("Invalid 32-bit fat magic")); - } - file.read_slice::(&mut offset, header.nfat_arch.get(BigEndian) as usize) - .read_error("Invalid nfat_arch") - } - - /// Attempt to parse a fat header and 64-bit fat arches. - pub fn parse_arch64<'data, R: ReadRef<'data>>(file: R) -> Result<&'data [FatArch64]> { - let mut offset = 0; - let header = file - .read::(&mut offset) - .read_error("Invalid fat header size or alignment")?; - if header.magic.get(BigEndian) != macho::FAT_MAGIC_64 { - return Err(Error("Invalid 64-bit fat magic")); - } - file.read_slice::(&mut offset, header.nfat_arch.get(BigEndian) as usize) - .read_error("Invalid nfat_arch") - } -} - -/// A trait for generic access to `FatArch32` and `FatArch64`. -#[allow(missing_docs)] -pub trait FatArch: Pod { - type Word: Into; - - fn cputype(&self) -> u32; - fn cpusubtype(&self) -> u32; - fn offset(&self) -> Self::Word; - fn size(&self) -> Self::Word; - fn align(&self) -> u32; - - fn architecture(&self) -> Architecture { - match self.cputype() { - macho::CPU_TYPE_ARM => Architecture::Arm, - macho::CPU_TYPE_ARM64 => Architecture::Aarch64, - macho::CPU_TYPE_X86 => Architecture::I386, - macho::CPU_TYPE_X86_64 => Architecture::X86_64, - macho::CPU_TYPE_MIPS => Architecture::Mips, - macho::CPU_TYPE_POWERPC => Architecture::PowerPc, - macho::CPU_TYPE_POWERPC64 => Architecture::PowerPc64, - _ => Architecture::Unknown, - } - } - - fn file_range(&self) -> (u64, u64) { - (self.offset().into(), self.size().into()) - } - - fn data<'data, R: ReadRef<'data>>(&self, file: R) -> Result<&'data [u8]> { - file.read_bytes_at(self.offset().into(), self.size().into()) - .read_error("Invalid fat arch offset or size") - } -} - -impl FatArch for FatArch32 { - type Word = u32; - - fn cputype(&self) -> u32 { - self.cputype.get(BigEndian) - } - - fn cpusubtype(&self) -> u32 { - self.cpusubtype.get(BigEndian) - } - - fn offset(&self) -> Self::Word { - self.offset.get(BigEndian) - } - - fn size(&self) -> Self::Word { - self.size.get(BigEndian) - } - - fn align(&self) -> u32 { - self.align.get(BigEndian) - } -} - -impl FatArch for FatArch64 { - type Word = u64; - - fn cputype(&self) -> u32 { - self.cputype.get(BigEndian) - } - - fn cpusubtype(&self) -> u32 { - self.cpusubtype.get(BigEndian) - } - - fn offset(&self) -> Self::Word { - self.offset.get(BigEndian) - } - - fn size(&self) -> Self::Word { - self.size.get(BigEndian) - } - - fn align(&self) -> u32 { - self.align.get(BigEndian) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/macho/file.rs s390-tools-2.33.1/rust-vendor/object/src/read/macho/file.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/macho/file.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/macho/file.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,731 +0,0 @@ -use alloc::vec::Vec; -use core::fmt::Debug; -use core::{mem, str}; - -use crate::read::{ - self, Architecture, ComdatKind, Error, Export, FileFlags, Import, NoDynamicRelocationIterator, - Object, ObjectComdat, ObjectKind, ObjectMap, ObjectSection, ReadError, ReadRef, Result, - SectionIndex, SymbolIndex, -}; -use crate::{endian, macho, BigEndian, ByteString, Endian, Endianness, Pod}; - -use super::{ - DyldCacheImage, LoadCommandIterator, MachOSection, MachOSectionInternal, MachOSectionIterator, - MachOSegment, MachOSegmentInternal, MachOSegmentIterator, MachOSymbol, MachOSymbolIterator, - MachOSymbolTable, Nlist, Section, Segment, SymbolTable, -}; - -/// A 32-bit Mach-O object file. -pub type MachOFile32<'data, Endian = Endianness, R = &'data [u8]> = - MachOFile<'data, macho::MachHeader32, R>; -/// A 64-bit Mach-O object file. -pub type MachOFile64<'data, Endian = Endianness, R = &'data [u8]> = - MachOFile<'data, macho::MachHeader64, R>; - -/// A partially parsed Mach-O file. -/// -/// Most of the functionality of this type is provided by the `Object` trait implementation. -#[derive(Debug)] -pub struct MachOFile<'data, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - pub(super) endian: Mach::Endian, - pub(super) data: R, - pub(super) header_offset: u64, - pub(super) header: &'data Mach, - pub(super) segments: Vec>, - pub(super) sections: Vec>, - pub(super) symbols: SymbolTable<'data, Mach, R>, -} - -impl<'data, Mach, R> MachOFile<'data, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - /// Parse the raw Mach-O file data. - pub fn parse(data: R) -> Result { - let header = Mach::parse(data, 0)?; - let endian = header.endian()?; - - // Build a list of segments and sections to make some operations more efficient. - let mut segments = Vec::new(); - let mut sections = Vec::new(); - let mut symbols = SymbolTable::default(); - if let Ok(mut commands) = header.load_commands(endian, data, 0) { - while let Ok(Some(command)) = commands.next() { - if let Some((segment, section_data)) = Mach::Segment::from_command(command)? { - let segment_index = segments.len(); - segments.push(MachOSegmentInternal { segment, data }); - for section in segment.sections(endian, section_data)? { - let index = SectionIndex(sections.len() + 1); - sections.push(MachOSectionInternal::parse(index, segment_index, section)); - } - } else if let Some(symtab) = command.symtab()? { - symbols = symtab.symbols(endian, data)?; - } - } - } - - Ok(MachOFile { - endian, - data, - header_offset: 0, - header, - segments, - sections, - symbols, - }) - } - - /// Parse the Mach-O file for the given image from the dyld shared cache. - /// This will read different sections from different subcaches, if necessary. - pub fn parse_dyld_cache_image<'cache, E: Endian>( - image: &DyldCacheImage<'data, 'cache, E, R>, - ) -> Result { - let (data, header_offset) = image.image_data_and_offset()?; - let header = Mach::parse(data, header_offset)?; - let endian = header.endian()?; - - // Build a list of sections to make some operations more efficient. - // Also build a list of segments, because we need to remember which ReadRef - // to read each section's data from. Only the DyldCache knows this information, - // and we won't have access to it once we've exited this function. - let mut segments = Vec::new(); - let mut sections = Vec::new(); - let mut linkedit_data: Option = None; - let mut symtab = None; - if let Ok(mut commands) = header.load_commands(endian, data, header_offset) { - while let Ok(Some(command)) = commands.next() { - if let Some((segment, section_data)) = Mach::Segment::from_command(command)? { - // Each segment can be stored in a different subcache. Get the segment's - // address and look it up in the cache mappings, to find the correct cache data. - let addr = segment.vmaddr(endian).into(); - let (data, _offset) = image - .cache - .data_and_offset_for_address(addr) - .read_error("Could not find segment data in dyld shared cache")?; - if segment.name() == macho::SEG_LINKEDIT.as_bytes() { - linkedit_data = Some(data); - } - let segment_index = segments.len(); - segments.push(MachOSegmentInternal { segment, data }); - - for section in segment.sections(endian, section_data)? { - let index = SectionIndex(sections.len() + 1); - sections.push(MachOSectionInternal::parse(index, segment_index, section)); - } - } else if let Some(st) = command.symtab()? { - symtab = Some(st); - } - } - } - - // The symbols are found in the __LINKEDIT segment, so make sure to read them from the - // correct subcache. - let symbols = match (symtab, linkedit_data) { - (Some(symtab), Some(linkedit_data)) => symtab.symbols(endian, linkedit_data)?, - _ => SymbolTable::default(), - }; - - Ok(MachOFile { - endian, - data, - header_offset, - header, - segments, - sections, - symbols, - }) - } - - /// Return the section at the given index. - #[inline] - pub(super) fn section_internal( - &self, - index: SectionIndex, - ) -> Result<&MachOSectionInternal<'data, Mach>> { - index - .0 - .checked_sub(1) - .and_then(|index| self.sections.get(index)) - .read_error("Invalid Mach-O section index") - } - - pub(super) fn segment_internal( - &self, - index: usize, - ) -> Result<&MachOSegmentInternal<'data, Mach, R>> { - self.segments - .get(index) - .read_error("Invalid Mach-O segment index") - } -} - -impl<'data, Mach, R> read::private::Sealed for MachOFile<'data, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Mach, R> Object<'data, 'file> for MachOFile<'data, Mach, R> -where - 'data: 'file, - Mach: MachHeader, - R: 'file + ReadRef<'data>, -{ - type Segment = MachOSegment<'data, 'file, Mach, R>; - type SegmentIterator = MachOSegmentIterator<'data, 'file, Mach, R>; - type Section = MachOSection<'data, 'file, Mach, R>; - type SectionIterator = MachOSectionIterator<'data, 'file, Mach, R>; - type Comdat = MachOComdat<'data, 'file, Mach, R>; - type ComdatIterator = MachOComdatIterator<'data, 'file, Mach, R>; - type Symbol = MachOSymbol<'data, 'file, Mach, R>; - type SymbolIterator = MachOSymbolIterator<'data, 'file, Mach, R>; - type SymbolTable = MachOSymbolTable<'data, 'file, Mach, R>; - type DynamicRelocationIterator = NoDynamicRelocationIterator; - - fn architecture(&self) -> Architecture { - match self.header.cputype(self.endian) { - macho::CPU_TYPE_ARM => Architecture::Arm, - macho::CPU_TYPE_ARM64 => Architecture::Aarch64, - macho::CPU_TYPE_ARM64_32 => Architecture::Aarch64_Ilp32, - macho::CPU_TYPE_X86 => Architecture::I386, - macho::CPU_TYPE_X86_64 => Architecture::X86_64, - macho::CPU_TYPE_MIPS => Architecture::Mips, - macho::CPU_TYPE_POWERPC => Architecture::PowerPc, - macho::CPU_TYPE_POWERPC64 => Architecture::PowerPc64, - _ => Architecture::Unknown, - } - } - - #[inline] - fn is_little_endian(&self) -> bool { - self.header.is_little_endian() - } - - #[inline] - fn is_64(&self) -> bool { - self.header.is_type_64() - } - - fn kind(&self) -> ObjectKind { - match self.header.filetype(self.endian) { - macho::MH_OBJECT => ObjectKind::Relocatable, - macho::MH_EXECUTE => ObjectKind::Executable, - macho::MH_CORE => ObjectKind::Core, - macho::MH_DYLIB => ObjectKind::Dynamic, - _ => ObjectKind::Unknown, - } - } - - fn segments(&'file self) -> MachOSegmentIterator<'data, 'file, Mach, R> { - MachOSegmentIterator { - file: self, - iter: self.segments.iter(), - } - } - - fn section_by_name_bytes( - &'file self, - section_name: &[u8], - ) -> Option> { - // Translate the "." prefix to the "__" prefix used by OSX/Mach-O, eg - // ".debug_info" to "__debug_info", and limit to 16 bytes total. - let system_name = if section_name.starts_with(b".") { - if section_name.len() > 15 { - Some(§ion_name[1..15]) - } else { - Some(§ion_name[1..]) - } - } else { - None - }; - let cmp_section_name = |section: &MachOSection<'data, 'file, Mach, R>| { - section - .name_bytes() - .map(|name| { - section_name == name - || system_name - .filter(|system_name| { - name.starts_with(b"__") && name[2..] == **system_name - }) - .is_some() - }) - .unwrap_or(false) - }; - - self.sections().find(cmp_section_name) - } - - fn section_by_index( - &'file self, - index: SectionIndex, - ) -> Result> { - let internal = *self.section_internal(index)?; - Ok(MachOSection { - file: self, - internal, - }) - } - - fn sections(&'file self) -> MachOSectionIterator<'data, 'file, Mach, R> { - MachOSectionIterator { - file: self, - iter: self.sections.iter(), - } - } - - fn comdats(&'file self) -> MachOComdatIterator<'data, 'file, Mach, R> { - MachOComdatIterator { file: self } - } - - fn symbol_by_index( - &'file self, - index: SymbolIndex, - ) -> Result> { - let nlist = self.symbols.symbol(index.0)?; - MachOSymbol::new(self, index, nlist).read_error("Unsupported Mach-O symbol index") - } - - fn symbols(&'file self) -> MachOSymbolIterator<'data, 'file, Mach, R> { - MachOSymbolIterator { - file: self, - index: 0, - } - } - - #[inline] - fn symbol_table(&'file self) -> Option> { - Some(MachOSymbolTable { file: self }) - } - - fn dynamic_symbols(&'file self) -> MachOSymbolIterator<'data, 'file, Mach, R> { - MachOSymbolIterator { - file: self, - index: self.symbols.len(), - } - } - - #[inline] - fn dynamic_symbol_table(&'file self) -> Option> { - None - } - - fn object_map(&'file self) -> ObjectMap<'data> { - self.symbols.object_map(self.endian) - } - - fn imports(&self) -> Result>> { - let mut dysymtab = None; - let mut libraries = Vec::new(); - let twolevel = self.header.flags(self.endian) & macho::MH_TWOLEVEL != 0; - if twolevel { - libraries.push(&[][..]); - } - let mut commands = self - .header - .load_commands(self.endian, self.data, self.header_offset)?; - while let Some(command) = commands.next()? { - if let Some(command) = command.dysymtab()? { - dysymtab = Some(command); - } - if twolevel { - if let Some(dylib) = command.dylib()? { - libraries.push(command.string(self.endian, dylib.dylib.name)?); - } - } - } - - let mut imports = Vec::new(); - if let Some(dysymtab) = dysymtab { - let index = dysymtab.iundefsym.get(self.endian) as usize; - let number = dysymtab.nundefsym.get(self.endian) as usize; - for i in index..(index.wrapping_add(number)) { - let symbol = self.symbols.symbol(i)?; - let name = symbol.name(self.endian, self.symbols.strings())?; - let library = if twolevel { - libraries - .get(symbol.library_ordinal(self.endian) as usize) - .copied() - .read_error("Invalid Mach-O symbol library ordinal")? - } else { - &[] - }; - imports.push(Import { - name: ByteString(name), - library: ByteString(library), - }); - } - } - Ok(imports) - } - - fn exports(&self) -> Result>> { - let mut dysymtab = None; - let mut commands = self - .header - .load_commands(self.endian, self.data, self.header_offset)?; - while let Some(command) = commands.next()? { - if let Some(command) = command.dysymtab()? { - dysymtab = Some(command); - break; - } - } - - let mut exports = Vec::new(); - if let Some(dysymtab) = dysymtab { - let index = dysymtab.iextdefsym.get(self.endian) as usize; - let number = dysymtab.nextdefsym.get(self.endian) as usize; - for i in index..(index.wrapping_add(number)) { - let symbol = self.symbols.symbol(i)?; - let name = symbol.name(self.endian, self.symbols.strings())?; - let address = symbol.n_value(self.endian).into(); - exports.push(Export { - name: ByteString(name), - address, - }); - } - } - Ok(exports) - } - - #[inline] - fn dynamic_relocations(&'file self) -> Option { - None - } - - fn has_debug_symbols(&self) -> bool { - self.section_by_name(".debug_info").is_some() - } - - fn mach_uuid(&self) -> Result> { - self.header.uuid(self.endian, self.data, self.header_offset) - } - - fn relative_address_base(&self) -> u64 { - 0 - } - - fn entry(&self) -> u64 { - if let Ok(mut commands) = - self.header - .load_commands(self.endian, self.data, self.header_offset) - { - while let Ok(Some(command)) = commands.next() { - if let Ok(Some(command)) = command.entry_point() { - return command.entryoff.get(self.endian); - } - } - } - 0 - } - - fn flags(&self) -> FileFlags { - FileFlags::MachO { - flags: self.header.flags(self.endian), - } - } -} - -/// An iterator over the COMDAT section groups of a `MachOFile64`. -pub type MachOComdatIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOComdatIterator<'data, 'file, macho::MachHeader32, R>; -/// An iterator over the COMDAT section groups of a `MachOFile64`. -pub type MachOComdatIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOComdatIterator<'data, 'file, macho::MachHeader64, R>; - -/// An iterator over the COMDAT section groups of a `MachOFile`. -#[derive(Debug)] -pub struct MachOComdatIterator<'data, 'file, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - #[allow(unused)] - file: &'file MachOFile<'data, Mach, R>, -} - -impl<'data, 'file, Mach, R> Iterator for MachOComdatIterator<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - type Item = MachOComdat<'data, 'file, Mach, R>; - - #[inline] - fn next(&mut self) -> Option { - None - } -} - -/// A COMDAT section group of a `MachOFile32`. -pub type MachOComdat32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOComdat<'data, 'file, macho::MachHeader32, R>; - -/// A COMDAT section group of a `MachOFile64`. -pub type MachOComdat64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOComdat<'data, 'file, macho::MachHeader64, R>; - -/// A COMDAT section group of a `MachOFile`. -#[derive(Debug)] -pub struct MachOComdat<'data, 'file, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - #[allow(unused)] - file: &'file MachOFile<'data, Mach, R>, -} - -impl<'data, 'file, Mach, R> read::private::Sealed for MachOComdat<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Mach, R> ObjectComdat<'data> for MachOComdat<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - type SectionIterator = MachOComdatSectionIterator<'data, 'file, Mach, R>; - - #[inline] - fn kind(&self) -> ComdatKind { - unreachable!(); - } - - #[inline] - fn symbol(&self) -> SymbolIndex { - unreachable!(); - } - - #[inline] - fn name_bytes(&self) -> Result<&[u8]> { - unreachable!(); - } - - #[inline] - fn name(&self) -> Result<&str> { - unreachable!(); - } - - #[inline] - fn sections(&self) -> Self::SectionIterator { - unreachable!(); - } -} - -/// An iterator over the sections in a COMDAT section group of a `MachOFile32`. -pub type MachOComdatSectionIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOComdatSectionIterator<'data, 'file, macho::MachHeader32, R>; -/// An iterator over the sections in a COMDAT section group of a `MachOFile64`. -pub type MachOComdatSectionIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOComdatSectionIterator<'data, 'file, macho::MachHeader64, R>; - -/// An iterator over the sections in a COMDAT section group of a `MachOFile`. -#[derive(Debug)] -pub struct MachOComdatSectionIterator<'data, 'file, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - #[allow(unused)] - file: &'file MachOFile<'data, Mach, R>, -} - -impl<'data, 'file, Mach, R> Iterator for MachOComdatSectionIterator<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - type Item = SectionIndex; - - fn next(&mut self) -> Option { - None - } -} - -/// A trait for generic access to `MachHeader32` and `MachHeader64`. -#[allow(missing_docs)] -pub trait MachHeader: Debug + Pod { - type Word: Into; - type Endian: endian::Endian; - type Segment: Segment; - type Section: Section; - type Nlist: Nlist; - - /// Return true if this type is a 64-bit header. - /// - /// This is a property of the type, not a value in the header data. - fn is_type_64(&self) -> bool; - - /// Return true if the `magic` field signifies big-endian. - fn is_big_endian(&self) -> bool; - - /// Return true if the `magic` field signifies little-endian. - fn is_little_endian(&self) -> bool; - - fn magic(&self) -> u32; - fn cputype(&self, endian: Self::Endian) -> u32; - fn cpusubtype(&self, endian: Self::Endian) -> u32; - fn filetype(&self, endian: Self::Endian) -> u32; - fn ncmds(&self, endian: Self::Endian) -> u32; - fn sizeofcmds(&self, endian: Self::Endian) -> u32; - fn flags(&self, endian: Self::Endian) -> u32; - - // Provided methods. - - /// Read the file header. - /// - /// Also checks that the magic field in the file header is a supported format. - fn parse<'data, R: ReadRef<'data>>(data: R, offset: u64) -> read::Result<&'data Self> { - let header = data - .read_at::(offset) - .read_error("Invalid Mach-O header size or alignment")?; - if !header.is_supported() { - return Err(Error("Unsupported Mach-O header")); - } - Ok(header) - } - - fn is_supported(&self) -> bool { - self.is_little_endian() || self.is_big_endian() - } - - fn endian(&self) -> Result { - Self::Endian::from_big_endian(self.is_big_endian()).read_error("Unsupported Mach-O endian") - } - - fn load_commands<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - header_offset: u64, - ) -> Result> { - let data = data - .read_bytes_at( - header_offset + mem::size_of::() as u64, - self.sizeofcmds(endian).into(), - ) - .read_error("Invalid Mach-O load command table size")?; - Ok(LoadCommandIterator::new(endian, data, self.ncmds(endian))) - } - - /// Return the UUID from the `LC_UUID` load command, if one is present. - fn uuid<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - header_offset: u64, - ) -> Result> { - let mut commands = self.load_commands(endian, data, header_offset)?; - while let Some(command) = commands.next()? { - if let Ok(Some(uuid)) = command.uuid() { - return Ok(Some(uuid.uuid)); - } - } - Ok(None) - } -} - -impl MachHeader for macho::MachHeader32 { - type Word = u32; - type Endian = Endian; - type Segment = macho::SegmentCommand32; - type Section = macho::Section32; - type Nlist = macho::Nlist32; - - fn is_type_64(&self) -> bool { - false - } - - fn is_big_endian(&self) -> bool { - self.magic() == macho::MH_MAGIC - } - - fn is_little_endian(&self) -> bool { - self.magic() == macho::MH_CIGAM - } - - fn magic(&self) -> u32 { - self.magic.get(BigEndian) - } - - fn cputype(&self, endian: Self::Endian) -> u32 { - self.cputype.get(endian) - } - - fn cpusubtype(&self, endian: Self::Endian) -> u32 { - self.cpusubtype.get(endian) - } - - fn filetype(&self, endian: Self::Endian) -> u32 { - self.filetype.get(endian) - } - - fn ncmds(&self, endian: Self::Endian) -> u32 { - self.ncmds.get(endian) - } - - fn sizeofcmds(&self, endian: Self::Endian) -> u32 { - self.sizeofcmds.get(endian) - } - - fn flags(&self, endian: Self::Endian) -> u32 { - self.flags.get(endian) - } -} - -impl MachHeader for macho::MachHeader64 { - type Word = u64; - type Endian = Endian; - type Segment = macho::SegmentCommand64; - type Section = macho::Section64; - type Nlist = macho::Nlist64; - - fn is_type_64(&self) -> bool { - true - } - - fn is_big_endian(&self) -> bool { - self.magic() == macho::MH_MAGIC_64 - } - - fn is_little_endian(&self) -> bool { - self.magic() == macho::MH_CIGAM_64 - } - - fn magic(&self) -> u32 { - self.magic.get(BigEndian) - } - - fn cputype(&self, endian: Self::Endian) -> u32 { - self.cputype.get(endian) - } - - fn cpusubtype(&self, endian: Self::Endian) -> u32 { - self.cpusubtype.get(endian) - } - - fn filetype(&self, endian: Self::Endian) -> u32 { - self.filetype.get(endian) - } - - fn ncmds(&self, endian: Self::Endian) -> u32 { - self.ncmds.get(endian) - } - - fn sizeofcmds(&self, endian: Self::Endian) -> u32 { - self.sizeofcmds.get(endian) - } - - fn flags(&self, endian: Self::Endian) -> u32 { - self.flags.get(endian) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/macho/load_command.rs s390-tools-2.33.1/rust-vendor/object/src/read/macho/load_command.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/macho/load_command.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/macho/load_command.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,373 +0,0 @@ -use core::marker::PhantomData; -use core::mem; - -use crate::endian::Endian; -use crate::macho; -use crate::pod::Pod; -use crate::read::macho::{MachHeader, SymbolTable}; -use crate::read::{Bytes, Error, ReadError, ReadRef, Result, StringTable}; - -/// An iterator over the load commands of a `MachHeader`. -#[derive(Debug, Default, Clone, Copy)] -pub struct LoadCommandIterator<'data, E: Endian> { - endian: E, - data: Bytes<'data>, - ncmds: u32, -} - -impl<'data, E: Endian> LoadCommandIterator<'data, E> { - pub(super) fn new(endian: E, data: &'data [u8], ncmds: u32) -> Self { - LoadCommandIterator { - endian, - data: Bytes(data), - ncmds, - } - } - - /// Return the next load command. - pub fn next(&mut self) -> Result>> { - if self.ncmds == 0 { - return Ok(None); - } - let header = self - .data - .read_at::>(0) - .read_error("Invalid Mach-O load command header")?; - let cmd = header.cmd.get(self.endian); - let cmdsize = header.cmdsize.get(self.endian) as usize; - if cmdsize < mem::size_of::>() { - return Err(Error("Invalid Mach-O load command size")); - } - let data = self - .data - .read_bytes(cmdsize) - .read_error("Invalid Mach-O load command size")?; - self.ncmds -= 1; - Ok(Some(LoadCommandData { - cmd, - data, - marker: Default::default(), - })) - } -} - -/// The data for a `LoadCommand`. -#[derive(Debug, Clone, Copy)] -pub struct LoadCommandData<'data, E: Endian> { - cmd: u32, - // Includes the header. - data: Bytes<'data>, - marker: PhantomData, -} - -impl<'data, E: Endian> LoadCommandData<'data, E> { - /// Return the `cmd` field of the `LoadCommand`. - /// - /// This is one of the `LC_` constants. - pub fn cmd(&self) -> u32 { - self.cmd - } - - /// Return the `cmdsize` field of the `LoadCommand`. - pub fn cmdsize(&self) -> u32 { - self.data.len() as u32 - } - - /// Parse the data as the given type. - #[inline] - pub fn data(&self) -> Result<&'data T> { - self.data - .read_at(0) - .read_error("Invalid Mach-O command size") - } - - /// Raw bytes of this LoadCommand structure. - pub fn raw_data(&self) -> &'data [u8] { - self.data.0 - } - - /// Parse a load command string value. - /// - /// Strings used by load commands are specified by offsets that are - /// relative to the load command header. - pub fn string(&self, endian: E, s: macho::LcStr) -> Result<&'data [u8]> { - self.data - .read_string_at(s.offset.get(endian) as usize) - .read_error("Invalid load command string offset") - } - - /// Parse the command data according to the `cmd` field. - pub fn variant(&self) -> Result> { - Ok(match self.cmd { - macho::LC_SEGMENT => { - let mut data = self.data; - let segment = data.read().read_error("Invalid Mach-O command size")?; - LoadCommandVariant::Segment32(segment, data.0) - } - macho::LC_SYMTAB => LoadCommandVariant::Symtab(self.data()?), - macho::LC_THREAD | macho::LC_UNIXTHREAD => { - let mut data = self.data; - let thread = data.read().read_error("Invalid Mach-O command size")?; - LoadCommandVariant::Thread(thread, data.0) - } - macho::LC_DYSYMTAB => LoadCommandVariant::Dysymtab(self.data()?), - macho::LC_LOAD_DYLIB - | macho::LC_LOAD_WEAK_DYLIB - | macho::LC_REEXPORT_DYLIB - | macho::LC_LAZY_LOAD_DYLIB - | macho::LC_LOAD_UPWARD_DYLIB => LoadCommandVariant::Dylib(self.data()?), - macho::LC_ID_DYLIB => LoadCommandVariant::IdDylib(self.data()?), - macho::LC_LOAD_DYLINKER => LoadCommandVariant::LoadDylinker(self.data()?), - macho::LC_ID_DYLINKER => LoadCommandVariant::IdDylinker(self.data()?), - macho::LC_PREBOUND_DYLIB => LoadCommandVariant::PreboundDylib(self.data()?), - macho::LC_ROUTINES => LoadCommandVariant::Routines32(self.data()?), - macho::LC_SUB_FRAMEWORK => LoadCommandVariant::SubFramework(self.data()?), - macho::LC_SUB_UMBRELLA => LoadCommandVariant::SubUmbrella(self.data()?), - macho::LC_SUB_CLIENT => LoadCommandVariant::SubClient(self.data()?), - macho::LC_SUB_LIBRARY => LoadCommandVariant::SubLibrary(self.data()?), - macho::LC_TWOLEVEL_HINTS => LoadCommandVariant::TwolevelHints(self.data()?), - macho::LC_PREBIND_CKSUM => LoadCommandVariant::PrebindCksum(self.data()?), - macho::LC_SEGMENT_64 => { - let mut data = self.data; - let segment = data.read().read_error("Invalid Mach-O command size")?; - LoadCommandVariant::Segment64(segment, data.0) - } - macho::LC_ROUTINES_64 => LoadCommandVariant::Routines64(self.data()?), - macho::LC_UUID => LoadCommandVariant::Uuid(self.data()?), - macho::LC_RPATH => LoadCommandVariant::Rpath(self.data()?), - macho::LC_CODE_SIGNATURE - | macho::LC_SEGMENT_SPLIT_INFO - | macho::LC_FUNCTION_STARTS - | macho::LC_DATA_IN_CODE - | macho::LC_DYLIB_CODE_SIGN_DRS - | macho::LC_LINKER_OPTIMIZATION_HINT - | macho::LC_DYLD_EXPORTS_TRIE - | macho::LC_DYLD_CHAINED_FIXUPS => LoadCommandVariant::LinkeditData(self.data()?), - macho::LC_ENCRYPTION_INFO => LoadCommandVariant::EncryptionInfo32(self.data()?), - macho::LC_DYLD_INFO | macho::LC_DYLD_INFO_ONLY => { - LoadCommandVariant::DyldInfo(self.data()?) - } - macho::LC_VERSION_MIN_MACOSX - | macho::LC_VERSION_MIN_IPHONEOS - | macho::LC_VERSION_MIN_TVOS - | macho::LC_VERSION_MIN_WATCHOS => LoadCommandVariant::VersionMin(self.data()?), - macho::LC_DYLD_ENVIRONMENT => LoadCommandVariant::DyldEnvironment(self.data()?), - macho::LC_MAIN => LoadCommandVariant::EntryPoint(self.data()?), - macho::LC_SOURCE_VERSION => LoadCommandVariant::SourceVersion(self.data()?), - macho::LC_ENCRYPTION_INFO_64 => LoadCommandVariant::EncryptionInfo64(self.data()?), - macho::LC_LINKER_OPTION => LoadCommandVariant::LinkerOption(self.data()?), - macho::LC_NOTE => LoadCommandVariant::Note(self.data()?), - macho::LC_BUILD_VERSION => LoadCommandVariant::BuildVersion(self.data()?), - macho::LC_FILESET_ENTRY => LoadCommandVariant::FilesetEntry(self.data()?), - _ => LoadCommandVariant::Other, - }) - } - - /// Try to parse this command as a `SegmentCommand32`. - /// - /// Returns the segment command and the data containing the sections. - pub fn segment_32(self) -> Result, &'data [u8])>> { - if self.cmd == macho::LC_SEGMENT { - let mut data = self.data; - let segment = data.read().read_error("Invalid Mach-O command size")?; - Ok(Some((segment, data.0))) - } else { - Ok(None) - } - } - - /// Try to parse this command as a `SymtabCommand`. - /// - /// Returns the segment command and the data containing the sections. - pub fn symtab(self) -> Result>> { - if self.cmd == macho::LC_SYMTAB { - Some(self.data()).transpose() - } else { - Ok(None) - } - } - - /// Try to parse this command as a `DysymtabCommand`. - pub fn dysymtab(self) -> Result>> { - if self.cmd == macho::LC_DYSYMTAB { - Some(self.data()).transpose() - } else { - Ok(None) - } - } - - /// Try to parse this command as a `DylibCommand`. - pub fn dylib(self) -> Result>> { - if self.cmd == macho::LC_LOAD_DYLIB - || self.cmd == macho::LC_LOAD_WEAK_DYLIB - || self.cmd == macho::LC_REEXPORT_DYLIB - || self.cmd == macho::LC_LAZY_LOAD_DYLIB - || self.cmd == macho::LC_LOAD_UPWARD_DYLIB - { - Some(self.data()).transpose() - } else { - Ok(None) - } - } - - /// Try to parse this command as a `UuidCommand`. - pub fn uuid(self) -> Result>> { - if self.cmd == macho::LC_UUID { - Some(self.data()).transpose() - } else { - Ok(None) - } - } - - /// Try to parse this command as a `SegmentCommand64`. - pub fn segment_64(self) -> Result, &'data [u8])>> { - if self.cmd == macho::LC_SEGMENT_64 { - let mut data = self.data; - let command = data.read().read_error("Invalid Mach-O command size")?; - Ok(Some((command, data.0))) - } else { - Ok(None) - } - } - - /// Try to parse this command as a `DyldInfoCommand`. - pub fn dyld_info(self) -> Result>> { - if self.cmd == macho::LC_DYLD_INFO || self.cmd == macho::LC_DYLD_INFO_ONLY { - Some(self.data()).transpose() - } else { - Ok(None) - } - } - - /// Try to parse this command as an `EntryPointCommand`. - pub fn entry_point(self) -> Result>> { - if self.cmd == macho::LC_MAIN { - Some(self.data()).transpose() - } else { - Ok(None) - } - } -} - -/// A `LoadCommand` that has been interpreted according to its `cmd` field. -#[derive(Debug, Clone, Copy)] -#[non_exhaustive] -pub enum LoadCommandVariant<'data, E: Endian> { - /// `LC_SEGMENT` - Segment32(&'data macho::SegmentCommand32, &'data [u8]), - /// `LC_SYMTAB` - Symtab(&'data macho::SymtabCommand), - // obsolete: `LC_SYMSEG` - //Symseg(&'data macho::SymsegCommand), - /// `LC_THREAD` or `LC_UNIXTHREAD` - Thread(&'data macho::ThreadCommand, &'data [u8]), - // obsolete: `LC_IDFVMLIB` or `LC_LOADFVMLIB` - //Fvmlib(&'data macho::FvmlibCommand), - // obsolete: `LC_IDENT` - //Ident(&'data macho::IdentCommand), - // internal: `LC_FVMFILE` - //Fvmfile(&'data macho::FvmfileCommand), - // internal: `LC_PREPAGE` - /// `LC_DYSYMTAB` - Dysymtab(&'data macho::DysymtabCommand), - /// `LC_LOAD_DYLIB`, `LC_LOAD_WEAK_DYLIB`, `LC_REEXPORT_DYLIB`, - /// `LC_LAZY_LOAD_DYLIB`, or `LC_LOAD_UPWARD_DYLIB` - Dylib(&'data macho::DylibCommand), - /// `LC_ID_DYLIB` - IdDylib(&'data macho::DylibCommand), - /// `LC_LOAD_DYLINKER` - LoadDylinker(&'data macho::DylinkerCommand), - /// `LC_ID_DYLINKER` - IdDylinker(&'data macho::DylinkerCommand), - /// `LC_PREBOUND_DYLIB` - PreboundDylib(&'data macho::PreboundDylibCommand), - /// `LC_ROUTINES` - Routines32(&'data macho::RoutinesCommand32), - /// `LC_SUB_FRAMEWORK` - SubFramework(&'data macho::SubFrameworkCommand), - /// `LC_SUB_UMBRELLA` - SubUmbrella(&'data macho::SubUmbrellaCommand), - /// `LC_SUB_CLIENT` - SubClient(&'data macho::SubClientCommand), - /// `LC_SUB_LIBRARY` - SubLibrary(&'data macho::SubLibraryCommand), - /// `LC_TWOLEVEL_HINTS` - TwolevelHints(&'data macho::TwolevelHintsCommand), - /// `LC_PREBIND_CKSUM` - PrebindCksum(&'data macho::PrebindCksumCommand), - /// `LC_SEGMENT_64` - Segment64(&'data macho::SegmentCommand64, &'data [u8]), - /// `LC_ROUTINES_64` - Routines64(&'data macho::RoutinesCommand64), - /// `LC_UUID` - Uuid(&'data macho::UuidCommand), - /// `LC_RPATH` - Rpath(&'data macho::RpathCommand), - /// `LC_CODE_SIGNATURE`, `LC_SEGMENT_SPLIT_INFO`, `LC_FUNCTION_STARTS`, - /// `LC_DATA_IN_CODE`, `LC_DYLIB_CODE_SIGN_DRS`, `LC_LINKER_OPTIMIZATION_HINT`, - /// `LC_DYLD_EXPORTS_TRIE`, or `LC_DYLD_CHAINED_FIXUPS`. - LinkeditData(&'data macho::LinkeditDataCommand), - /// `LC_ENCRYPTION_INFO` - EncryptionInfo32(&'data macho::EncryptionInfoCommand32), - /// `LC_DYLD_INFO` or `LC_DYLD_INFO_ONLY` - DyldInfo(&'data macho::DyldInfoCommand), - /// `LC_VERSION_MIN_MACOSX`, `LC_VERSION_MIN_IPHONEOS`, `LC_VERSION_MIN_WATCHOS`, - /// or `LC_VERSION_MIN_TVOS` - VersionMin(&'data macho::VersionMinCommand), - /// `LC_DYLD_ENVIRONMENT` - DyldEnvironment(&'data macho::DylinkerCommand), - /// `LC_MAIN` - EntryPoint(&'data macho::EntryPointCommand), - /// `LC_SOURCE_VERSION` - SourceVersion(&'data macho::SourceVersionCommand), - /// `LC_ENCRYPTION_INFO_64` - EncryptionInfo64(&'data macho::EncryptionInfoCommand64), - /// `LC_LINKER_OPTION` - LinkerOption(&'data macho::LinkerOptionCommand), - /// `LC_NOTE` - Note(&'data macho::NoteCommand), - /// `LC_BUILD_VERSION` - BuildVersion(&'data macho::BuildVersionCommand), - /// `LC_FILESET_ENTRY` - FilesetEntry(&'data macho::FilesetEntryCommand), - /// An unrecognized or obsolete load command. - Other, -} - -impl macho::SymtabCommand { - /// Return the symbol table that this command references. - pub fn symbols<'data, Mach: MachHeader, R: ReadRef<'data>>( - &self, - endian: E, - data: R, - ) -> Result> { - let symbols = data - .read_slice_at( - self.symoff.get(endian).into(), - self.nsyms.get(endian) as usize, - ) - .read_error("Invalid Mach-O symbol table offset or size")?; - let str_start: u64 = self.stroff.get(endian).into(); - let str_end = str_start - .checked_add(self.strsize.get(endian).into()) - .read_error("Invalid Mach-O string table length")?; - let strings = StringTable::new(data, str_start, str_end); - Ok(SymbolTable::new(symbols, strings)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::LittleEndian; - - #[test] - fn cmd_size_invalid() { - let mut commands = LoadCommandIterator::new(LittleEndian, &[0; 8], 10); - assert!(commands.next().is_err()); - let mut commands = LoadCommandIterator::new(LittleEndian, &[0, 0, 0, 0, 7, 0, 0, 0, 0], 10); - assert!(commands.next().is_err()); - let mut commands = LoadCommandIterator::new(LittleEndian, &[0, 0, 0, 0, 8, 0, 0, 0, 0], 10); - assert!(commands.next().is_ok()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/macho/mod.rs s390-tools-2.33.1/rust-vendor/object/src/read/macho/mod.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/macho/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/macho/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ -//! Support for reading Mach-O files. -//! -//! Defines traits to abstract over the difference between 32-bit and 64-bit -//! Mach-O files, and implements read functionality in terms of these traits. -//! -//! Also provides `MachOFile` and related types which implement the `Object` trait. - -mod dyld_cache; -pub use dyld_cache::*; - -mod fat; -pub use fat::*; - -mod file; -pub use file::*; - -mod load_command; -pub use load_command::*; - -mod segment; -pub use segment::*; - -mod section; -pub use section::*; - -mod symbol; -pub use symbol::*; - -mod relocation; -pub use relocation::*; diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/macho/relocation.rs s390-tools-2.33.1/rust-vendor/object/src/read/macho/relocation.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/macho/relocation.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/macho/relocation.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,127 +0,0 @@ -use core::{fmt, slice}; - -use crate::endian::Endianness; -use crate::macho; -use crate::read::{ - ReadRef, Relocation, RelocationEncoding, RelocationKind, RelocationTarget, SectionIndex, - SymbolIndex, -}; - -use super::{MachHeader, MachOFile}; - -/// An iterator over the relocations in a `MachOSection32`. -pub type MachORelocationIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachORelocationIterator<'data, 'file, macho::MachHeader32, R>; -/// An iterator over the relocations in a `MachOSection64`. -pub type MachORelocationIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachORelocationIterator<'data, 'file, macho::MachHeader64, R>; - -/// An iterator over the relocations in a `MachOSection`. -pub struct MachORelocationIterator<'data, 'file, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file MachOFile<'data, Mach, R>, - pub(super) relocations: slice::Iter<'data, macho::Relocation>, -} - -impl<'data, 'file, Mach, R> Iterator for MachORelocationIterator<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - type Item = (u64, Relocation); - - fn next(&mut self) -> Option { - loop { - let reloc = self.relocations.next()?; - let endian = self.file.endian; - let cputype = self.file.header.cputype(endian); - if reloc.r_scattered(endian, cputype) { - // FIXME: handle scattered relocations - // We need to add `RelocationTarget::Address` for this. - continue; - } - let reloc = reloc.info(self.file.endian); - let mut encoding = RelocationEncoding::Generic; - let kind = match cputype { - macho::CPU_TYPE_ARM => match (reloc.r_type, reloc.r_pcrel) { - (macho::ARM_RELOC_VANILLA, false) => RelocationKind::Absolute, - _ => RelocationKind::MachO { - value: reloc.r_type, - relative: reloc.r_pcrel, - }, - }, - macho::CPU_TYPE_ARM64 | macho::CPU_TYPE_ARM64_32 => { - match (reloc.r_type, reloc.r_pcrel) { - (macho::ARM64_RELOC_UNSIGNED, false) => RelocationKind::Absolute, - _ => RelocationKind::MachO { - value: reloc.r_type, - relative: reloc.r_pcrel, - }, - } - } - macho::CPU_TYPE_X86 => match (reloc.r_type, reloc.r_pcrel) { - (macho::GENERIC_RELOC_VANILLA, false) => RelocationKind::Absolute, - _ => RelocationKind::MachO { - value: reloc.r_type, - relative: reloc.r_pcrel, - }, - }, - macho::CPU_TYPE_X86_64 => match (reloc.r_type, reloc.r_pcrel) { - (macho::X86_64_RELOC_UNSIGNED, false) => RelocationKind::Absolute, - (macho::X86_64_RELOC_SIGNED, true) => { - encoding = RelocationEncoding::X86RipRelative; - RelocationKind::Relative - } - (macho::X86_64_RELOC_BRANCH, true) => { - encoding = RelocationEncoding::X86Branch; - RelocationKind::Relative - } - (macho::X86_64_RELOC_GOT, true) => RelocationKind::GotRelative, - (macho::X86_64_RELOC_GOT_LOAD, true) => { - encoding = RelocationEncoding::X86RipRelativeMovq; - RelocationKind::GotRelative - } - _ => RelocationKind::MachO { - value: reloc.r_type, - relative: reloc.r_pcrel, - }, - }, - _ => RelocationKind::MachO { - value: reloc.r_type, - relative: reloc.r_pcrel, - }, - }; - let size = 8 << reloc.r_length; - let target = if reloc.r_extern { - RelocationTarget::Symbol(SymbolIndex(reloc.r_symbolnum as usize)) - } else { - RelocationTarget::Section(SectionIndex(reloc.r_symbolnum as usize)) - }; - let addend = if reloc.r_pcrel { -4 } else { 0 }; - return Some(( - reloc.r_address as u64, - Relocation { - kind, - encoding, - size, - target, - addend, - implicit_addend: true, - }, - )); - } - } -} - -impl<'data, 'file, Mach, R> fmt::Debug for MachORelocationIterator<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MachORelocationIterator").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/macho/section.rs s390-tools-2.33.1/rust-vendor/object/src/read/macho/section.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/macho/section.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/macho/section.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,387 +0,0 @@ -use core::fmt::Debug; -use core::{fmt, result, slice, str}; - -use crate::endian::{self, Endianness}; -use crate::macho; -use crate::pod::Pod; -use crate::read::{ - self, CompressedData, CompressedFileRange, ObjectSection, ReadError, ReadRef, Result, - SectionFlags, SectionIndex, SectionKind, -}; - -use super::{MachHeader, MachOFile, MachORelocationIterator}; - -/// An iterator over the sections of a `MachOFile32`. -pub type MachOSectionIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSectionIterator<'data, 'file, macho::MachHeader32, R>; -/// An iterator over the sections of a `MachOFile64`. -pub type MachOSectionIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSectionIterator<'data, 'file, macho::MachHeader64, R>; - -/// An iterator over the sections of a `MachOFile`. -pub struct MachOSectionIterator<'data, 'file, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file MachOFile<'data, Mach, R>, - pub(super) iter: slice::Iter<'file, MachOSectionInternal<'data, Mach>>, -} - -impl<'data, 'file, Mach, R> fmt::Debug for MachOSectionIterator<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // It's painful to do much better than this - f.debug_struct("MachOSectionIterator").finish() - } -} - -impl<'data, 'file, Mach, R> Iterator for MachOSectionIterator<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - type Item = MachOSection<'data, 'file, Mach, R>; - - fn next(&mut self) -> Option { - self.iter.next().map(|&internal| MachOSection { - file: self.file, - internal, - }) - } -} - -/// A section of a `MachOFile32`. -pub type MachOSection32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSection<'data, 'file, macho::MachHeader32, R>; -/// A section of a `MachOFile64`. -pub type MachOSection64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSection<'data, 'file, macho::MachHeader64, R>; - -/// A section of a `MachOFile`. -#[derive(Debug)] -pub struct MachOSection<'data, 'file, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file MachOFile<'data, Mach, R>, - pub(super) internal: MachOSectionInternal<'data, Mach>, -} - -impl<'data, 'file, Mach, R> MachOSection<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - fn bytes(&self) -> Result<&'data [u8]> { - let segment_index = self.internal.segment_index; - let segment = self.file.segment_internal(segment_index)?; - self.internal - .section - .data(self.file.endian, segment.data) - .read_error("Invalid Mach-O section size or offset") - } -} - -impl<'data, 'file, Mach, R> read::private::Sealed for MachOSection<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Mach, R> ObjectSection<'data> for MachOSection<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - type RelocationIterator = MachORelocationIterator<'data, 'file, Mach, R>; - - #[inline] - fn index(&self) -> SectionIndex { - self.internal.index - } - - #[inline] - fn address(&self) -> u64 { - self.internal.section.addr(self.file.endian).into() - } - - #[inline] - fn size(&self) -> u64 { - self.internal.section.size(self.file.endian).into() - } - - #[inline] - fn align(&self) -> u64 { - let align = self.internal.section.align(self.file.endian); - if align < 64 { - 1 << align - } else { - 0 - } - } - - #[inline] - fn file_range(&self) -> Option<(u64, u64)> { - self.internal.section.file_range(self.file.endian) - } - - #[inline] - fn data(&self) -> Result<&'data [u8]> { - self.bytes() - } - - fn data_range(&self, address: u64, size: u64) -> Result> { - Ok(read::util::data_range( - self.bytes()?, - self.address(), - address, - size, - )) - } - - #[inline] - fn compressed_file_range(&self) -> Result { - Ok(CompressedFileRange::none(self.file_range())) - } - - #[inline] - fn compressed_data(&self) -> Result> { - self.data().map(CompressedData::none) - } - - #[inline] - fn name_bytes(&self) -> Result<&[u8]> { - Ok(self.internal.section.name()) - } - - #[inline] - fn name(&self) -> Result<&str> { - str::from_utf8(self.internal.section.name()) - .ok() - .read_error("Non UTF-8 Mach-O section name") - } - - #[inline] - fn segment_name_bytes(&self) -> Result> { - Ok(Some(self.internal.section.segment_name())) - } - - #[inline] - fn segment_name(&self) -> Result> { - Ok(Some( - str::from_utf8(self.internal.section.segment_name()) - .ok() - .read_error("Non UTF-8 Mach-O segment name")?, - )) - } - - fn kind(&self) -> SectionKind { - self.internal.kind - } - - fn relocations(&self) -> MachORelocationIterator<'data, 'file, Mach, R> { - MachORelocationIterator { - file: self.file, - relocations: self - .internal - .section - .relocations(self.file.endian, self.file.data) - .unwrap_or(&[]) - .iter(), - } - } - - fn flags(&self) -> SectionFlags { - SectionFlags::MachO { - flags: self.internal.section.flags(self.file.endian), - } - } -} - -#[derive(Debug, Clone, Copy)] -pub(super) struct MachOSectionInternal<'data, Mach: MachHeader> { - pub index: SectionIndex, - pub segment_index: usize, - pub kind: SectionKind, - pub section: &'data Mach::Section, -} - -impl<'data, Mach: MachHeader> MachOSectionInternal<'data, Mach> { - pub(super) fn parse( - index: SectionIndex, - segment_index: usize, - section: &'data Mach::Section, - ) -> Self { - // TODO: we don't validate flags, should we? - let kind = match (section.segment_name(), section.name()) { - (b"__TEXT", b"__text") => SectionKind::Text, - (b"__TEXT", b"__const") => SectionKind::ReadOnlyData, - (b"__TEXT", b"__cstring") => SectionKind::ReadOnlyString, - (b"__TEXT", b"__literal4") => SectionKind::ReadOnlyData, - (b"__TEXT", b"__literal8") => SectionKind::ReadOnlyData, - (b"__TEXT", b"__literal16") => SectionKind::ReadOnlyData, - (b"__TEXT", b"__eh_frame") => SectionKind::ReadOnlyData, - (b"__TEXT", b"__gcc_except_tab") => SectionKind::ReadOnlyData, - (b"__DATA", b"__data") => SectionKind::Data, - (b"__DATA", b"__const") => SectionKind::ReadOnlyData, - (b"__DATA", b"__bss") => SectionKind::UninitializedData, - (b"__DATA", b"__common") => SectionKind::Common, - (b"__DATA", b"__thread_data") => SectionKind::Tls, - (b"__DATA", b"__thread_bss") => SectionKind::UninitializedTls, - (b"__DATA", b"__thread_vars") => SectionKind::TlsVariables, - (b"__DWARF", _) => SectionKind::Debug, - _ => SectionKind::Unknown, - }; - MachOSectionInternal { - index, - segment_index, - kind, - section, - } - } -} - -/// A trait for generic access to `Section32` and `Section64`. -#[allow(missing_docs)] -pub trait Section: Debug + Pod { - type Word: Into; - type Endian: endian::Endian; - - fn sectname(&self) -> &[u8; 16]; - fn segname(&self) -> &[u8; 16]; - fn addr(&self, endian: Self::Endian) -> Self::Word; - fn size(&self, endian: Self::Endian) -> Self::Word; - fn offset(&self, endian: Self::Endian) -> u32; - fn align(&self, endian: Self::Endian) -> u32; - fn reloff(&self, endian: Self::Endian) -> u32; - fn nreloc(&self, endian: Self::Endian) -> u32; - fn flags(&self, endian: Self::Endian) -> u32; - - /// Return the `sectname` bytes up until the null terminator. - fn name(&self) -> &[u8] { - let sectname = &self.sectname()[..]; - match memchr::memchr(b'\0', sectname) { - Some(end) => §name[..end], - None => sectname, - } - } - - /// Return the `segname` bytes up until the null terminator. - fn segment_name(&self) -> &[u8] { - let segname = &self.segname()[..]; - match memchr::memchr(b'\0', segname) { - Some(end) => &segname[..end], - None => segname, - } - } - - /// Return the offset and size of the section in the file. - /// - /// Returns `None` for sections that have no data in the file. - fn file_range(&self, endian: Self::Endian) -> Option<(u64, u64)> { - match self.flags(endian) & macho::SECTION_TYPE { - macho::S_ZEROFILL | macho::S_GB_ZEROFILL | macho::S_THREAD_LOCAL_ZEROFILL => None, - _ => Some((self.offset(endian).into(), self.size(endian).into())), - } - } - - /// Return the section data. - /// - /// Returns `Ok(&[])` if the section has no data. - /// Returns `Err` for invalid values. - fn data<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> result::Result<&'data [u8], ()> { - if let Some((offset, size)) = self.file_range(endian) { - data.read_bytes_at(offset, size) - } else { - Ok(&[]) - } - } - - /// Return the relocation array. - /// - /// Returns `Err` for invalid values. - fn relocations<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> Result<&'data [macho::Relocation]> { - data.read_slice_at(self.reloff(endian).into(), self.nreloc(endian) as usize) - .read_error("Invalid Mach-O relocations offset or number") - } -} - -impl Section for macho::Section32 { - type Word = u32; - type Endian = Endian; - - fn sectname(&self) -> &[u8; 16] { - &self.sectname - } - fn segname(&self) -> &[u8; 16] { - &self.segname - } - fn addr(&self, endian: Self::Endian) -> Self::Word { - self.addr.get(endian) - } - fn size(&self, endian: Self::Endian) -> Self::Word { - self.size.get(endian) - } - fn offset(&self, endian: Self::Endian) -> u32 { - self.offset.get(endian) - } - fn align(&self, endian: Self::Endian) -> u32 { - self.align.get(endian) - } - fn reloff(&self, endian: Self::Endian) -> u32 { - self.reloff.get(endian) - } - fn nreloc(&self, endian: Self::Endian) -> u32 { - self.nreloc.get(endian) - } - fn flags(&self, endian: Self::Endian) -> u32 { - self.flags.get(endian) - } -} - -impl Section for macho::Section64 { - type Word = u64; - type Endian = Endian; - - fn sectname(&self) -> &[u8; 16] { - &self.sectname - } - fn segname(&self) -> &[u8; 16] { - &self.segname - } - fn addr(&self, endian: Self::Endian) -> Self::Word { - self.addr.get(endian) - } - fn size(&self, endian: Self::Endian) -> Self::Word { - self.size.get(endian) - } - fn offset(&self, endian: Self::Endian) -> u32 { - self.offset.get(endian) - } - fn align(&self, endian: Self::Endian) -> u32 { - self.align.get(endian) - } - fn reloff(&self, endian: Self::Endian) -> u32 { - self.reloff.get(endian) - } - fn nreloc(&self, endian: Self::Endian) -> u32 { - self.nreloc.get(endian) - } - fn flags(&self, endian: Self::Endian) -> u32 { - self.flags.get(endian) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/macho/segment.rs s390-tools-2.33.1/rust-vendor/object/src/read/macho/segment.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/macho/segment.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/macho/segment.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,301 +0,0 @@ -use core::fmt::Debug; -use core::{result, slice, str}; - -use crate::endian::{self, Endianness}; -use crate::macho; -use crate::pod::Pod; -use crate::read::{self, ObjectSegment, ReadError, ReadRef, Result, SegmentFlags}; - -use super::{LoadCommandData, MachHeader, MachOFile, Section}; - -/// An iterator over the segments of a `MachOFile32`. -pub type MachOSegmentIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSegmentIterator<'data, 'file, macho::MachHeader32, R>; -/// An iterator over the segments of a `MachOFile64`. -pub type MachOSegmentIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSegmentIterator<'data, 'file, macho::MachHeader64, R>; - -/// An iterator over the segments of a `MachOFile`. -#[derive(Debug)] -pub struct MachOSegmentIterator<'data, 'file, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file MachOFile<'data, Mach, R>, - pub(super) iter: slice::Iter<'file, MachOSegmentInternal<'data, Mach, R>>, -} - -impl<'data, 'file, Mach, R> Iterator for MachOSegmentIterator<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - type Item = MachOSegment<'data, 'file, Mach, R>; - - fn next(&mut self) -> Option { - self.iter.next().map(|internal| MachOSegment { - file: self.file, - internal, - }) - } -} - -/// A segment of a `MachOFile32`. -pub type MachOSegment32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSegment<'data, 'file, macho::MachHeader32, R>; -/// A segment of a `MachOFile64`. -pub type MachOSegment64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSegment<'data, 'file, macho::MachHeader64, R>; - -/// A segment of a `MachOFile`. -#[derive(Debug)] -pub struct MachOSegment<'data, 'file, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - file: &'file MachOFile<'data, Mach, R>, - internal: &'file MachOSegmentInternal<'data, Mach, R>, -} - -impl<'data, 'file, Mach, R> MachOSegment<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - fn bytes(&self) -> Result<&'data [u8]> { - self.internal - .segment - .data(self.file.endian, self.file.data) - .read_error("Invalid Mach-O segment size or offset") - } -} - -impl<'data, 'file, Mach, R> read::private::Sealed for MachOSegment<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Mach, R> ObjectSegment<'data> for MachOSegment<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - #[inline] - fn address(&self) -> u64 { - self.internal.segment.vmaddr(self.file.endian).into() - } - - #[inline] - fn size(&self) -> u64 { - self.internal.segment.vmsize(self.file.endian).into() - } - - #[inline] - fn align(&self) -> u64 { - // Page size. - 0x1000 - } - - #[inline] - fn file_range(&self) -> (u64, u64) { - self.internal.segment.file_range(self.file.endian) - } - - fn data(&self) -> Result<&'data [u8]> { - self.bytes() - } - - fn data_range(&self, address: u64, size: u64) -> Result> { - Ok(read::util::data_range( - self.bytes()?, - self.address(), - address, - size, - )) - } - - #[inline] - fn name_bytes(&self) -> Result> { - Ok(Some(self.internal.segment.name())) - } - - #[inline] - fn name(&self) -> Result> { - Ok(Some( - str::from_utf8(self.internal.segment.name()) - .ok() - .read_error("Non UTF-8 Mach-O segment name")?, - )) - } - - #[inline] - fn flags(&self) -> SegmentFlags { - let flags = self.internal.segment.flags(self.file.endian); - let maxprot = self.internal.segment.maxprot(self.file.endian); - let initprot = self.internal.segment.initprot(self.file.endian); - SegmentFlags::MachO { - flags, - maxprot, - initprot, - } - } -} - -#[derive(Debug, Clone, Copy)] -pub(super) struct MachOSegmentInternal<'data, Mach: MachHeader, R: ReadRef<'data>> { - pub data: R, - pub segment: &'data Mach::Segment, -} - -/// A trait for generic access to `SegmentCommand32` and `SegmentCommand64`. -#[allow(missing_docs)] -pub trait Segment: Debug + Pod { - type Word: Into; - type Endian: endian::Endian; - type Section: Section; - - fn from_command(command: LoadCommandData<'_, Self::Endian>) -> Result>; - - fn cmd(&self, endian: Self::Endian) -> u32; - fn cmdsize(&self, endian: Self::Endian) -> u32; - fn segname(&self) -> &[u8; 16]; - fn vmaddr(&self, endian: Self::Endian) -> Self::Word; - fn vmsize(&self, endian: Self::Endian) -> Self::Word; - fn fileoff(&self, endian: Self::Endian) -> Self::Word; - fn filesize(&self, endian: Self::Endian) -> Self::Word; - fn maxprot(&self, endian: Self::Endian) -> u32; - fn initprot(&self, endian: Self::Endian) -> u32; - fn nsects(&self, endian: Self::Endian) -> u32; - fn flags(&self, endian: Self::Endian) -> u32; - - /// Return the `segname` bytes up until the null terminator. - fn name(&self) -> &[u8] { - let segname = &self.segname()[..]; - match memchr::memchr(b'\0', segname) { - Some(end) => &segname[..end], - None => segname, - } - } - - /// Return the offset and size of the segment in the file. - fn file_range(&self, endian: Self::Endian) -> (u64, u64) { - (self.fileoff(endian).into(), self.filesize(endian).into()) - } - - /// Get the segment data from the file data. - /// - /// Returns `Err` for invalid values. - fn data<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - data: R, - ) -> result::Result<&'data [u8], ()> { - let (offset, size) = self.file_range(endian); - data.read_bytes_at(offset, size) - } - - /// Get the array of sections from the data following the segment command. - /// - /// Returns `Err` for invalid values. - fn sections<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - section_data: R, - ) -> Result<&'data [Self::Section]> { - section_data - .read_slice_at(0, self.nsects(endian) as usize) - .read_error("Invalid Mach-O number of sections") - } -} - -impl Segment for macho::SegmentCommand32 { - type Word = u32; - type Endian = Endian; - type Section = macho::Section32; - - fn from_command(command: LoadCommandData<'_, Self::Endian>) -> Result> { - command.segment_32() - } - - fn cmd(&self, endian: Self::Endian) -> u32 { - self.cmd.get(endian) - } - fn cmdsize(&self, endian: Self::Endian) -> u32 { - self.cmdsize.get(endian) - } - fn segname(&self) -> &[u8; 16] { - &self.segname - } - fn vmaddr(&self, endian: Self::Endian) -> Self::Word { - self.vmaddr.get(endian) - } - fn vmsize(&self, endian: Self::Endian) -> Self::Word { - self.vmsize.get(endian) - } - fn fileoff(&self, endian: Self::Endian) -> Self::Word { - self.fileoff.get(endian) - } - fn filesize(&self, endian: Self::Endian) -> Self::Word { - self.filesize.get(endian) - } - fn maxprot(&self, endian: Self::Endian) -> u32 { - self.maxprot.get(endian) - } - fn initprot(&self, endian: Self::Endian) -> u32 { - self.initprot.get(endian) - } - fn nsects(&self, endian: Self::Endian) -> u32 { - self.nsects.get(endian) - } - fn flags(&self, endian: Self::Endian) -> u32 { - self.flags.get(endian) - } -} - -impl Segment for macho::SegmentCommand64 { - type Word = u64; - type Endian = Endian; - type Section = macho::Section64; - - fn from_command(command: LoadCommandData<'_, Self::Endian>) -> Result> { - command.segment_64() - } - - fn cmd(&self, endian: Self::Endian) -> u32 { - self.cmd.get(endian) - } - fn cmdsize(&self, endian: Self::Endian) -> u32 { - self.cmdsize.get(endian) - } - fn segname(&self) -> &[u8; 16] { - &self.segname - } - fn vmaddr(&self, endian: Self::Endian) -> Self::Word { - self.vmaddr.get(endian) - } - fn vmsize(&self, endian: Self::Endian) -> Self::Word { - self.vmsize.get(endian) - } - fn fileoff(&self, endian: Self::Endian) -> Self::Word { - self.fileoff.get(endian) - } - fn filesize(&self, endian: Self::Endian) -> Self::Word { - self.filesize.get(endian) - } - fn maxprot(&self, endian: Self::Endian) -> u32 { - self.maxprot.get(endian) - } - fn initprot(&self, endian: Self::Endian) -> u32 { - self.initprot.get(endian) - } - fn nsects(&self, endian: Self::Endian) -> u32 { - self.nsects.get(endian) - } - fn flags(&self, endian: Self::Endian) -> u32 { - self.flags.get(endian) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/macho/symbol.rs s390-tools-2.33.1/rust-vendor/object/src/read/macho/symbol.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/macho/symbol.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/macho/symbol.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,488 +0,0 @@ -use alloc::vec::Vec; -use core::fmt::Debug; -use core::{fmt, slice, str}; - -use crate::endian::{self, Endianness}; -use crate::macho; -use crate::pod::Pod; -use crate::read::util::StringTable; -use crate::read::{ - self, ObjectMap, ObjectMapEntry, ObjectSymbol, ObjectSymbolTable, ReadError, ReadRef, Result, - SectionIndex, SectionKind, SymbolFlags, SymbolIndex, SymbolKind, SymbolMap, SymbolMapEntry, - SymbolScope, SymbolSection, -}; - -use super::{MachHeader, MachOFile}; - -/// A table of symbol entries in a Mach-O file. -/// -/// Also includes the string table used for the symbol names. -#[derive(Debug, Clone, Copy)] -pub struct SymbolTable<'data, Mach: MachHeader, R = &'data [u8]> -where - R: ReadRef<'data>, -{ - symbols: &'data [Mach::Nlist], - strings: StringTable<'data, R>, -} - -impl<'data, Mach: MachHeader, R: ReadRef<'data>> Default for SymbolTable<'data, Mach, R> { - fn default() -> Self { - SymbolTable { - symbols: &[], - strings: Default::default(), - } - } -} - -impl<'data, Mach: MachHeader, R: ReadRef<'data>> SymbolTable<'data, Mach, R> { - #[inline] - pub(super) fn new(symbols: &'data [Mach::Nlist], strings: StringTable<'data, R>) -> Self { - SymbolTable { symbols, strings } - } - - /// Return the string table used for the symbol names. - #[inline] - pub fn strings(&self) -> StringTable<'data, R> { - self.strings - } - - /// Iterate over the symbols. - #[inline] - pub fn iter(&self) -> slice::Iter<'data, Mach::Nlist> { - self.symbols.iter() - } - - /// Return true if the symbol table is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.symbols.is_empty() - } - - /// The number of symbols. - #[inline] - pub fn len(&self) -> usize { - self.symbols.len() - } - - /// Return the symbol at the given index. - pub fn symbol(&self, index: usize) -> Result<&'data Mach::Nlist> { - self.symbols - .get(index) - .read_error("Invalid Mach-O symbol index") - } - - /// Construct a map from addresses to a user-defined map entry. - pub fn map Option>( - &self, - f: F, - ) -> SymbolMap { - let mut symbols = Vec::new(); - for nlist in self.symbols { - if !nlist.is_definition() { - continue; - } - if let Some(entry) = f(nlist) { - symbols.push(entry); - } - } - SymbolMap::new(symbols) - } - - /// Construct a map from addresses to symbol names and object file names. - pub fn object_map(&self, endian: Mach::Endian) -> ObjectMap<'data> { - let mut symbols = Vec::new(); - let mut objects = Vec::new(); - let mut object = None; - let mut current_function = None; - // Each module starts with one or two N_SO symbols (path, or directory + filename) - // and one N_OSO symbol. The module is terminated by an empty N_SO symbol. - for nlist in self.symbols { - let n_type = nlist.n_type(); - if n_type & macho::N_STAB == 0 { - continue; - } - // TODO: includes variables too (N_GSYM, N_STSYM). These may need to get their - // address from regular symbols though. - match n_type { - macho::N_SO => { - object = None; - } - macho::N_OSO => { - object = None; - if let Ok(name) = nlist.name(endian, self.strings) { - if !name.is_empty() { - object = Some(objects.len()); - objects.push(name); - } - } - } - macho::N_FUN => { - if let Ok(name) = nlist.name(endian, self.strings) { - if !name.is_empty() { - current_function = Some((name, nlist.n_value(endian).into())) - } else if let Some((name, address)) = current_function.take() { - if let Some(object) = object { - symbols.push(ObjectMapEntry { - address, - size: nlist.n_value(endian).into(), - name, - object, - }); - } - } - } - } - _ => {} - } - } - ObjectMap { - symbols: SymbolMap::new(symbols), - objects, - } - } -} - -/// An iterator over the symbols of a `MachOFile32`. -pub type MachOSymbolTable32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSymbolTable<'data, 'file, macho::MachHeader32, R>; -/// An iterator over the symbols of a `MachOFile64`. -pub type MachOSymbolTable64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSymbolTable<'data, 'file, macho::MachHeader64, R>; - -/// A symbol table of a `MachOFile`. -#[derive(Debug, Clone, Copy)] -pub struct MachOSymbolTable<'data, 'file, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file MachOFile<'data, Mach, R>, -} - -impl<'data, 'file, Mach, R> read::private::Sealed for MachOSymbolTable<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Mach, R> ObjectSymbolTable<'data> for MachOSymbolTable<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - type Symbol = MachOSymbol<'data, 'file, Mach, R>; - type SymbolIterator = MachOSymbolIterator<'data, 'file, Mach, R>; - - fn symbols(&self) -> Self::SymbolIterator { - MachOSymbolIterator { - file: self.file, - index: 0, - } - } - - fn symbol_by_index(&self, index: SymbolIndex) -> Result { - let nlist = self.file.symbols.symbol(index.0)?; - MachOSymbol::new(self.file, index, nlist).read_error("Unsupported Mach-O symbol index") - } -} - -/// An iterator over the symbols of a `MachOFile32`. -pub type MachOSymbolIterator32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSymbolIterator<'data, 'file, macho::MachHeader32, R>; -/// An iterator over the symbols of a `MachOFile64`. -pub type MachOSymbolIterator64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSymbolIterator<'data, 'file, macho::MachHeader64, R>; - -/// An iterator over the symbols of a `MachOFile`. -pub struct MachOSymbolIterator<'data, 'file, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file MachOFile<'data, Mach, R>, - pub(super) index: usize, -} - -impl<'data, 'file, Mach, R> fmt::Debug for MachOSymbolIterator<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MachOSymbolIterator").finish() - } -} - -impl<'data, 'file, Mach, R> Iterator for MachOSymbolIterator<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - type Item = MachOSymbol<'data, 'file, Mach, R>; - - fn next(&mut self) -> Option { - loop { - let index = self.index; - let nlist = self.file.symbols.symbols.get(index)?; - self.index += 1; - if let Some(symbol) = MachOSymbol::new(self.file, SymbolIndex(index), nlist) { - return Some(symbol); - } - } - } -} - -/// A symbol of a `MachOFile32`. -pub type MachOSymbol32<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSymbol<'data, 'file, macho::MachHeader32, R>; -/// A symbol of a `MachOFile64`. -pub type MachOSymbol64<'data, 'file, Endian = Endianness, R = &'data [u8]> = - MachOSymbol<'data, 'file, macho::MachHeader64, R>; - -/// A symbol of a `MachOFile`. -#[derive(Debug, Clone, Copy)] -pub struct MachOSymbol<'data, 'file, Mach, R = &'data [u8]> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - file: &'file MachOFile<'data, Mach, R>, - index: SymbolIndex, - nlist: &'data Mach::Nlist, -} - -impl<'data, 'file, Mach, R> MachOSymbol<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - pub(super) fn new( - file: &'file MachOFile<'data, Mach, R>, - index: SymbolIndex, - nlist: &'data Mach::Nlist, - ) -> Option { - if nlist.n_type() & macho::N_STAB != 0 { - return None; - } - Some(MachOSymbol { file, index, nlist }) - } -} - -impl<'data, 'file, Mach, R> read::private::Sealed for MachOSymbol<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Mach, R> ObjectSymbol<'data> for MachOSymbol<'data, 'file, Mach, R> -where - Mach: MachHeader, - R: ReadRef<'data>, -{ - #[inline] - fn index(&self) -> SymbolIndex { - self.index - } - - fn name_bytes(&self) -> Result<&'data [u8]> { - self.nlist.name(self.file.endian, self.file.symbols.strings) - } - - fn name(&self) -> Result<&'data str> { - let name = self.name_bytes()?; - str::from_utf8(name) - .ok() - .read_error("Non UTF-8 Mach-O symbol name") - } - - #[inline] - fn address(&self) -> u64 { - self.nlist.n_value(self.file.endian).into() - } - - #[inline] - fn size(&self) -> u64 { - 0 - } - - fn kind(&self) -> SymbolKind { - self.section() - .index() - .and_then(|index| self.file.section_internal(index).ok()) - .map(|section| match section.kind { - SectionKind::Text => SymbolKind::Text, - SectionKind::Data - | SectionKind::ReadOnlyData - | SectionKind::ReadOnlyString - | SectionKind::UninitializedData - | SectionKind::Common => SymbolKind::Data, - SectionKind::Tls | SectionKind::UninitializedTls | SectionKind::TlsVariables => { - SymbolKind::Tls - } - _ => SymbolKind::Unknown, - }) - .unwrap_or(SymbolKind::Unknown) - } - - fn section(&self) -> SymbolSection { - match self.nlist.n_type() & macho::N_TYPE { - macho::N_UNDF => SymbolSection::Undefined, - macho::N_ABS => SymbolSection::Absolute, - macho::N_SECT => { - let n_sect = self.nlist.n_sect(); - if n_sect != 0 { - SymbolSection::Section(SectionIndex(n_sect as usize)) - } else { - SymbolSection::Unknown - } - } - _ => SymbolSection::Unknown, - } - } - - #[inline] - fn is_undefined(&self) -> bool { - self.nlist.n_type() & macho::N_TYPE == macho::N_UNDF - } - - #[inline] - fn is_definition(&self) -> bool { - self.nlist.is_definition() - } - - #[inline] - fn is_common(&self) -> bool { - // Mach-O common symbols are based on section, not symbol - false - } - - #[inline] - fn is_weak(&self) -> bool { - self.nlist.n_desc(self.file.endian) & (macho::N_WEAK_REF | macho::N_WEAK_DEF) != 0 - } - - fn scope(&self) -> SymbolScope { - let n_type = self.nlist.n_type(); - if n_type & macho::N_TYPE == macho::N_UNDF { - SymbolScope::Unknown - } else if n_type & macho::N_EXT == 0 { - SymbolScope::Compilation - } else if n_type & macho::N_PEXT != 0 { - SymbolScope::Linkage - } else { - SymbolScope::Dynamic - } - } - - #[inline] - fn is_global(&self) -> bool { - self.scope() != SymbolScope::Compilation - } - - #[inline] - fn is_local(&self) -> bool { - self.scope() == SymbolScope::Compilation - } - - #[inline] - fn flags(&self) -> SymbolFlags { - let n_desc = self.nlist.n_desc(self.file.endian); - SymbolFlags::MachO { n_desc } - } -} - -/// A trait for generic access to `Nlist32` and `Nlist64`. -#[allow(missing_docs)] -pub trait Nlist: Debug + Pod { - type Word: Into; - type Endian: endian::Endian; - - fn n_strx(&self, endian: Self::Endian) -> u32; - fn n_type(&self) -> u8; - fn n_sect(&self) -> u8; - fn n_desc(&self, endian: Self::Endian) -> u16; - fn n_value(&self, endian: Self::Endian) -> Self::Word; - - fn name<'data, R: ReadRef<'data>>( - &self, - endian: Self::Endian, - strings: StringTable<'data, R>, - ) -> Result<&'data [u8]> { - strings - .get(self.n_strx(endian)) - .read_error("Invalid Mach-O symbol name offset") - } - - /// Return true if this is a STAB symbol. - /// - /// This determines the meaning of the `n_type` field. - fn is_stab(&self) -> bool { - self.n_type() & macho::N_STAB != 0 - } - - /// Return true if this is an undefined symbol. - fn is_undefined(&self) -> bool { - let n_type = self.n_type(); - n_type & macho::N_STAB == 0 && n_type & macho::N_TYPE == macho::N_UNDF - } - - /// Return true if the symbol is a definition of a function or data object. - fn is_definition(&self) -> bool { - let n_type = self.n_type(); - n_type & macho::N_STAB == 0 && n_type & macho::N_TYPE != macho::N_UNDF - } - - /// Return the library ordinal. - /// - /// This is either a 1-based index into the dylib load commands, - /// or a special ordinal. - #[inline] - fn library_ordinal(&self, endian: Self::Endian) -> u8 { - (self.n_desc(endian) >> 8) as u8 - } -} - -impl Nlist for macho::Nlist32 { - type Word = u32; - type Endian = Endian; - - fn n_strx(&self, endian: Self::Endian) -> u32 { - self.n_strx.get(endian) - } - fn n_type(&self) -> u8 { - self.n_type - } - fn n_sect(&self) -> u8 { - self.n_sect - } - fn n_desc(&self, endian: Self::Endian) -> u16 { - self.n_desc.get(endian) - } - fn n_value(&self, endian: Self::Endian) -> Self::Word { - self.n_value.get(endian) - } -} - -impl Nlist for macho::Nlist64 { - type Word = u64; - type Endian = Endian; - - fn n_strx(&self, endian: Self::Endian) -> u32 { - self.n_strx.get(endian) - } - fn n_type(&self) -> u8 { - self.n_type - } - fn n_sect(&self) -> u8 { - self.n_sect - } - fn n_desc(&self, endian: Self::Endian) -> u16 { - self.n_desc.get(endian) - } - fn n_value(&self, endian: Self::Endian) -> Self::Word { - self.n_value.get(endian) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/mod.rs s390-tools-2.33.1/rust-vendor/object/src/read/mod.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,767 +0,0 @@ -//! Interface for reading object files. - -use alloc::borrow::Cow; -use alloc::vec::Vec; -use core::{fmt, result}; - -use crate::common::*; - -mod read_ref; -pub use read_ref::*; - -#[cfg(feature = "std")] -mod read_cache; -#[cfg(feature = "std")] -pub use read_cache::*; - -mod util; -pub use util::*; - -#[cfg(any( - feature = "coff", - feature = "elf", - feature = "macho", - feature = "pe", - feature = "wasm", - feature = "xcoff" -))] -mod any; -#[cfg(any( - feature = "coff", - feature = "elf", - feature = "macho", - feature = "pe", - feature = "wasm", - feature = "xcoff" -))] -pub use any::*; - -#[cfg(feature = "archive")] -pub mod archive; - -#[cfg(feature = "coff")] -pub mod coff; - -#[cfg(feature = "elf")] -pub mod elf; - -#[cfg(feature = "macho")] -pub mod macho; - -#[cfg(feature = "pe")] -pub mod pe; - -#[cfg(feature = "wasm")] -pub mod wasm; - -#[cfg(feature = "xcoff")] -pub mod xcoff; - -mod traits; -pub use traits::*; - -mod private { - pub trait Sealed {} -} - -/// The error type used within the read module. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct Error(&'static str); - -impl fmt::Display for Error { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.0) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for Error {} - -/// The result type used within the read module. -pub type Result = result::Result; - -trait ReadError { - fn read_error(self, error: &'static str) -> Result; -} - -impl ReadError for result::Result { - fn read_error(self, error: &'static str) -> Result { - self.map_err(|()| Error(error)) - } -} - -impl ReadError for result::Result { - fn read_error(self, error: &'static str) -> Result { - self.map_err(|_| Error(error)) - } -} - -impl ReadError for Option { - fn read_error(self, error: &'static str) -> Result { - self.ok_or(Error(error)) - } -} - -/// The native executable file for the target platform. -#[cfg(all( - unix, - not(target_os = "macos"), - target_pointer_width = "32", - feature = "elf" -))] -pub type NativeFile<'data, R = &'data [u8]> = elf::ElfFile32<'data, crate::Endianness, R>; - -/// The native executable file for the target platform. -#[cfg(all( - unix, - not(target_os = "macos"), - target_pointer_width = "64", - feature = "elf" -))] -pub type NativeFile<'data, R = &'data [u8]> = elf::ElfFile64<'data, crate::Endianness, R>; - -/// The native executable file for the target platform. -#[cfg(all(target_os = "macos", target_pointer_width = "32", feature = "macho"))] -pub type NativeFile<'data, R = &'data [u8]> = macho::MachOFile32<'data, crate::Endianness, R>; - -/// The native executable file for the target platform. -#[cfg(all(target_os = "macos", target_pointer_width = "64", feature = "macho"))] -pub type NativeFile<'data, R = &'data [u8]> = macho::MachOFile64<'data, crate::Endianness, R>; - -/// The native executable file for the target platform. -#[cfg(all(target_os = "windows", target_pointer_width = "32", feature = "pe"))] -pub type NativeFile<'data, R = &'data [u8]> = pe::PeFile32<'data, R>; - -/// The native executable file for the target platform. -#[cfg(all(target_os = "windows", target_pointer_width = "64", feature = "pe"))] -pub type NativeFile<'data, R = &'data [u8]> = pe::PeFile64<'data, R>; - -/// The native executable file for the target platform. -#[cfg(all(feature = "wasm", target_arch = "wasm32", feature = "wasm"))] -pub type NativeFile<'data, R = &'data [u8]> = wasm::WasmFile<'data, R>; - -/// A file format kind. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum FileKind { - /// A Unix archive. - #[cfg(feature = "archive")] - Archive, - /// A COFF object file. - #[cfg(feature = "coff")] - Coff, - /// A COFF bigobj object file. - /// - /// This supports a larger number of sections. - #[cfg(feature = "coff")] - CoffBig, - /// A Windows short import file. - #[cfg(feature = "coff")] - CoffImport, - /// A dyld cache file containing Mach-O images. - #[cfg(feature = "macho")] - DyldCache, - /// A 32-bit ELF file. - #[cfg(feature = "elf")] - Elf32, - /// A 64-bit ELF file. - #[cfg(feature = "elf")] - Elf64, - /// A 32-bit Mach-O file. - #[cfg(feature = "macho")] - MachO32, - /// A 64-bit Mach-O file. - #[cfg(feature = "macho")] - MachO64, - /// A 32-bit Mach-O fat binary. - #[cfg(feature = "macho")] - MachOFat32, - /// A 64-bit Mach-O fat binary. - #[cfg(feature = "macho")] - MachOFat64, - /// A 32-bit PE file. - #[cfg(feature = "pe")] - Pe32, - /// A 64-bit PE file. - #[cfg(feature = "pe")] - Pe64, - /// A Wasm file. - #[cfg(feature = "wasm")] - Wasm, - /// A 32-bit XCOFF file. - #[cfg(feature = "xcoff")] - Xcoff32, - /// A 64-bit XCOFF file. - #[cfg(feature = "xcoff")] - Xcoff64, -} - -impl FileKind { - /// Determine a file kind by parsing the start of the file. - pub fn parse<'data, R: ReadRef<'data>>(data: R) -> Result { - Self::parse_at(data, 0) - } - - /// Determine a file kind by parsing at the given offset. - pub fn parse_at<'data, R: ReadRef<'data>>(data: R, offset: u64) -> Result { - let magic = data - .read_bytes_at(offset, 16) - .read_error("Could not read file magic")?; - if magic.len() < 16 { - return Err(Error("File too short")); - } - - let kind = match [magic[0], magic[1], magic[2], magic[3], magic[4], magic[5], magic[6], magic[7]] { - #[cfg(feature = "archive")] - [b'!', b'<', b'a', b'r', b'c', b'h', b'>', b'\n'] => FileKind::Archive, - #[cfg(feature = "macho")] - [b'd', b'y', b'l', b'd', b'_', b'v', b'1', b' '] => FileKind::DyldCache, - #[cfg(feature = "elf")] - [0x7f, b'E', b'L', b'F', 1, ..] => FileKind::Elf32, - #[cfg(feature = "elf")] - [0x7f, b'E', b'L', b'F', 2, ..] => FileKind::Elf64, - #[cfg(feature = "macho")] - [0xfe, 0xed, 0xfa, 0xce, ..] - | [0xce, 0xfa, 0xed, 0xfe, ..] => FileKind::MachO32, - #[cfg(feature = "macho")] - | [0xfe, 0xed, 0xfa, 0xcf, ..] - | [0xcf, 0xfa, 0xed, 0xfe, ..] => FileKind::MachO64, - #[cfg(feature = "macho")] - [0xca, 0xfe, 0xba, 0xbe, ..] => FileKind::MachOFat32, - #[cfg(feature = "macho")] - [0xca, 0xfe, 0xba, 0xbf, ..] => FileKind::MachOFat64, - #[cfg(feature = "wasm")] - [0x00, b'a', b's', b'm', ..] => FileKind::Wasm, - #[cfg(feature = "pe")] - [b'M', b'Z', ..] if offset == 0 => { - // offset == 0 restriction is because optional_header_magic only looks at offset 0 - match pe::optional_header_magic(data) { - Ok(crate::pe::IMAGE_NT_OPTIONAL_HDR32_MAGIC) => { - FileKind::Pe32 - } - Ok(crate::pe::IMAGE_NT_OPTIONAL_HDR64_MAGIC) => { - FileKind::Pe64 - } - _ => return Err(Error("Unknown MS-DOS file")), - } - } - // TODO: more COFF machines - #[cfg(feature = "coff")] - // COFF arm - [0xc4, 0x01, ..] - // COFF arm64 - | [0x64, 0xaa, ..] - // COFF x86 - | [0x4c, 0x01, ..] - // COFF x86-64 - | [0x64, 0x86, ..] => FileKind::Coff, - #[cfg(feature = "coff")] - [0x00, 0x00, 0xff, 0xff, 0x00, 0x00, ..] => FileKind::CoffImport, - #[cfg(feature = "coff")] - [0x00, 0x00, 0xff, 0xff, 0x02, 0x00, ..] if offset == 0 => { - // offset == 0 restriction is because anon_object_class_id only looks at offset 0 - match coff::anon_object_class_id(data) { - Ok(crate::pe::ANON_OBJECT_HEADER_BIGOBJ_CLASS_ID) => FileKind::CoffBig, - _ => return Err(Error("Unknown anon object file")), - } - } - #[cfg(feature = "xcoff")] - [0x01, 0xdf, ..] => FileKind::Xcoff32, - #[cfg(feature = "xcoff")] - [0x01, 0xf7, ..] => FileKind::Xcoff64, - _ => return Err(Error("Unknown file magic")), - }; - Ok(kind) - } -} - -/// An object kind. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum ObjectKind { - /// The object kind is unknown. - Unknown, - /// Relocatable object. - Relocatable, - /// Executable. - Executable, - /// Dynamic shared object. - Dynamic, - /// Core. - Core, -} - -/// The index used to identify a section of a file. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct SectionIndex(pub usize); - -/// The index used to identify a symbol of a file. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct SymbolIndex(pub usize); - -/// The section where a symbol is defined. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum SymbolSection { - /// The section is unknown. - Unknown, - /// The section is not applicable for this symbol (such as file symbols). - None, - /// The symbol is undefined. - Undefined, - /// The symbol has an absolute value. - Absolute, - /// The symbol is a zero-initialized symbol that will be combined with duplicate definitions. - Common, - /// The symbol is defined in the given section. - Section(SectionIndex), -} - -impl SymbolSection { - /// Returns the section index for the section where the symbol is defined. - /// - /// May return `None` if the symbol is not defined in a section. - #[inline] - pub fn index(self) -> Option { - if let SymbolSection::Section(index) = self { - Some(index) - } else { - None - } - } -} - -/// An entry in a `SymbolMap`. -pub trait SymbolMapEntry { - /// The symbol address. - fn address(&self) -> u64; -} - -/// A map from addresses to symbols. -#[derive(Debug, Default, Clone)] -pub struct SymbolMap { - symbols: Vec, -} - -impl SymbolMap { - /// Construct a new symbol map. - /// - /// This function will sort the symbols by address. - pub fn new(mut symbols: Vec) -> Self { - symbols.sort_unstable_by_key(|s| s.address()); - SymbolMap { symbols } - } - - /// Get the symbol before the given address. - pub fn get(&self, address: u64) -> Option<&T> { - let index = match self - .symbols - .binary_search_by_key(&address, |symbol| symbol.address()) - { - Ok(index) => index, - Err(index) => index.checked_sub(1)?, - }; - self.symbols.get(index) - } - - /// Get all symbols in the map. - #[inline] - pub fn symbols(&self) -> &[T] { - &self.symbols - } -} - -/// A `SymbolMap` entry for symbol names. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct SymbolMapName<'data> { - address: u64, - name: &'data str, -} - -impl<'data> SymbolMapName<'data> { - /// Construct a `SymbolMapName`. - pub fn new(address: u64, name: &'data str) -> Self { - SymbolMapName { address, name } - } - - /// The symbol address. - #[inline] - pub fn address(&self) -> u64 { - self.address - } - - /// The symbol name. - #[inline] - pub fn name(&self) -> &'data str { - self.name - } -} - -impl<'data> SymbolMapEntry for SymbolMapName<'data> { - #[inline] - fn address(&self) -> u64 { - self.address - } -} - -/// A map from addresses to symbol names and object files. -/// -/// This is derived from STAB entries in Mach-O files. -#[derive(Debug, Default, Clone)] -pub struct ObjectMap<'data> { - symbols: SymbolMap>, - objects: Vec<&'data [u8]>, -} - -impl<'data> ObjectMap<'data> { - /// Get the entry containing the given address. - pub fn get(&self, address: u64) -> Option<&ObjectMapEntry<'data>> { - self.symbols - .get(address) - .filter(|entry| entry.size == 0 || address.wrapping_sub(entry.address) < entry.size) - } - - /// Get all symbols in the map. - #[inline] - pub fn symbols(&self) -> &[ObjectMapEntry<'data>] { - self.symbols.symbols() - } - - /// Get all objects in the map. - #[inline] - pub fn objects(&self) -> &[&'data [u8]] { - &self.objects - } -} - -/// A `ObjectMap` entry. -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] -pub struct ObjectMapEntry<'data> { - address: u64, - size: u64, - name: &'data [u8], - object: usize, -} - -impl<'data> ObjectMapEntry<'data> { - /// Get the symbol address. - #[inline] - pub fn address(&self) -> u64 { - self.address - } - - /// Get the symbol size. - /// - /// This may be 0 if the size is unknown. - #[inline] - pub fn size(&self) -> u64 { - self.size - } - - /// Get the symbol name. - #[inline] - pub fn name(&self) -> &'data [u8] { - self.name - } - - /// Get the index of the object file name. - #[inline] - pub fn object_index(&self) -> usize { - self.object - } - - /// Get the object file name. - #[inline] - pub fn object(&self, map: &ObjectMap<'data>) -> &'data [u8] { - map.objects[self.object] - } -} - -impl<'data> SymbolMapEntry for ObjectMapEntry<'data> { - #[inline] - fn address(&self) -> u64 { - self.address - } -} - -/// An imported symbol. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct Import<'data> { - library: ByteString<'data>, - // TODO: or ordinal - name: ByteString<'data>, -} - -impl<'data> Import<'data> { - /// The symbol name. - #[inline] - pub fn name(&self) -> &'data [u8] { - self.name.0 - } - - /// The name of the library to import the symbol from. - #[inline] - pub fn library(&self) -> &'data [u8] { - self.library.0 - } -} - -/// An exported symbol. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct Export<'data> { - // TODO: and ordinal? - name: ByteString<'data>, - address: u64, -} - -impl<'data> Export<'data> { - /// The symbol name. - #[inline] - pub fn name(&self) -> &'data [u8] { - self.name.0 - } - - /// The virtual address of the symbol. - #[inline] - pub fn address(&self) -> u64 { - self.address - } -} - -/// PDB Information -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct CodeView<'data> { - guid: [u8; 16], - path: ByteString<'data>, - age: u32, -} - -impl<'data> CodeView<'data> { - /// The path to the PDB as stored in CodeView - #[inline] - pub fn path(&self) -> &'data [u8] { - self.path.0 - } - - /// The age of the PDB - #[inline] - pub fn age(&self) -> u32 { - self.age - } - - /// The GUID of the PDB. - #[inline] - pub fn guid(&self) -> [u8; 16] { - self.guid - } -} - -/// The target referenced by a relocation. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum RelocationTarget { - /// The target is a symbol. - Symbol(SymbolIndex), - /// The target is a section. - Section(SectionIndex), - /// The offset is an absolute address. - Absolute, -} - -/// A relocation entry. -#[derive(Debug)] -pub struct Relocation { - kind: RelocationKind, - encoding: RelocationEncoding, - size: u8, - target: RelocationTarget, - addend: i64, - implicit_addend: bool, -} - -impl Relocation { - /// The operation used to calculate the result of the relocation. - #[inline] - pub fn kind(&self) -> RelocationKind { - self.kind - } - - /// Information about how the result of the relocation operation is encoded in the place. - #[inline] - pub fn encoding(&self) -> RelocationEncoding { - self.encoding - } - - /// The size in bits of the place of the relocation. - /// - /// If 0, then the size is determined by the relocation kind. - #[inline] - pub fn size(&self) -> u8 { - self.size - } - - /// The target of the relocation. - #[inline] - pub fn target(&self) -> RelocationTarget { - self.target - } - - /// The addend to use in the relocation calculation. - #[inline] - pub fn addend(&self) -> i64 { - self.addend - } - - /// Set the addend to use in the relocation calculation. - #[inline] - pub fn set_addend(&mut self, addend: i64) { - self.addend = addend - } - - /// Returns true if there is an implicit addend stored in the data at the offset - /// to be relocated. - #[inline] - pub fn has_implicit_addend(&self) -> bool { - self.implicit_addend - } -} - -/// A data compression format. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum CompressionFormat { - /// The data is uncompressed. - None, - /// The data is compressed, but the compression format is unknown. - Unknown, - /// ZLIB/DEFLATE. - /// - /// Used for ELF compression and GNU compressed debug information. - Zlib, - /// Zstandard. - /// - /// Used for ELF compression. - Zstandard, -} - -/// A range in a file that may be compressed. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct CompressedFileRange { - /// The data compression format. - pub format: CompressionFormat, - /// The file offset of the compressed data. - pub offset: u64, - /// The compressed data size. - pub compressed_size: u64, - /// The uncompressed data size. - pub uncompressed_size: u64, -} - -impl CompressedFileRange { - /// Data that is uncompressed. - #[inline] - pub fn none(range: Option<(u64, u64)>) -> Self { - if let Some((offset, size)) = range { - CompressedFileRange { - format: CompressionFormat::None, - offset, - compressed_size: size, - uncompressed_size: size, - } - } else { - CompressedFileRange { - format: CompressionFormat::None, - offset: 0, - compressed_size: 0, - uncompressed_size: 0, - } - } - } - - /// Convert to `CompressedData` by reading from the file. - pub fn data<'data, R: ReadRef<'data>>(self, file: R) -> Result> { - let data = file - .read_bytes_at(self.offset, self.compressed_size) - .read_error("Invalid compressed data size or offset")?; - Ok(CompressedData { - format: self.format, - data, - uncompressed_size: self.uncompressed_size, - }) - } -} - -/// Data that may be compressed. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct CompressedData<'data> { - /// The data compression format. - pub format: CompressionFormat, - /// The compressed data. - pub data: &'data [u8], - /// The uncompressed data size. - pub uncompressed_size: u64, -} - -impl<'data> CompressedData<'data> { - /// Data that is uncompressed. - #[inline] - pub fn none(data: &'data [u8]) -> Self { - CompressedData { - format: CompressionFormat::None, - data, - uncompressed_size: data.len() as u64, - } - } - - /// Return the uncompressed data. - /// - /// Returns an error for invalid data or unsupported compression. - /// This includes if the data is compressed but the `compression` feature - /// for this crate is disabled. - pub fn decompress(self) -> Result> { - match self.format { - CompressionFormat::None => Ok(Cow::Borrowed(self.data)), - #[cfg(feature = "compression")] - CompressionFormat::Zlib => { - use core::convert::TryInto; - let size = self - .uncompressed_size - .try_into() - .ok() - .read_error("Uncompressed data size is too large.")?; - let mut decompressed = Vec::with_capacity(size); - let mut decompress = flate2::Decompress::new(true); - decompress - .decompress_vec( - self.data, - &mut decompressed, - flate2::FlushDecompress::Finish, - ) - .ok() - .read_error("Invalid zlib compressed data")?; - Ok(Cow::Owned(decompressed)) - } - #[cfg(feature = "compression")] - CompressionFormat::Zstandard => { - use core::convert::TryInto; - use std::io::Read; - let size = self - .uncompressed_size - .try_into() - .ok() - .read_error("Uncompressed data size is too large.")?; - let mut decompressed = Vec::with_capacity(size); - let mut decoder = ruzstd::StreamingDecoder::new(self.data) - .ok() - .read_error("Invalid zstd compressed data")?; - decoder - .read_to_end(&mut decompressed) - .ok() - .read_error("Invalid zstd compressed data")?; - Ok(Cow::Owned(decompressed)) - } - _ => Err(Error("Unsupported compressed data.")), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/pe/data_directory.rs s390-tools-2.33.1/rust-vendor/object/src/read/pe/data_directory.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/pe/data_directory.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/pe/data_directory.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,211 +0,0 @@ -use core::slice; - -use crate::read::{Error, ReadError, ReadRef, Result}; -use crate::{pe, LittleEndian as LE}; - -use super::{ - DelayLoadImportTable, ExportTable, ImportTable, RelocationBlockIterator, ResourceDirectory, - SectionTable, -}; - -/// The table of data directories in a PE file. -#[derive(Debug, Clone, Copy)] -pub struct DataDirectories<'data> { - entries: &'data [pe::ImageDataDirectory], -} - -impl<'data> DataDirectories<'data> { - /// Parse the data directory table. - /// - /// `data` must be the remaining optional data following the - /// [optional header](pe::ImageOptionalHeader64). `number` must be from the - /// [`number_of_rva_and_sizes`](pe::ImageOptionalHeader64::number_of_rva_and_sizes) - /// field of the optional header. - pub fn parse(data: &'data [u8], number: u32) -> Result { - let entries = data - .read_slice_at(0, number as usize) - .read_error("Invalid PE number of RVA and sizes")?; - Ok(DataDirectories { entries }) - } - - /// The number of data directories. - #[allow(clippy::len_without_is_empty)] - pub fn len(&self) -> usize { - self.entries.len() - } - - /// Iterator over the data directories. - pub fn iter(&self) -> slice::Iter<'data, pe::ImageDataDirectory> { - self.entries.iter() - } - - /// Iterator which gives the directories as well as their index (one of the IMAGE_DIRECTORY_ENTRY_* constants). - pub fn enumerate(&self) -> core::iter::Enumerate> { - self.entries.iter().enumerate() - } - - /// Returns the data directory at the given index. - /// - /// Index should be one of the `IMAGE_DIRECTORY_ENTRY_*` constants. - /// - /// Returns `None` if the index is larger than the table size, - /// or if the entry at the index has a zero virtual address. - pub fn get(&self, index: usize) -> Option<&'data pe::ImageDataDirectory> { - self.entries - .get(index) - .filter(|d| d.virtual_address.get(LE) != 0) - } - - /// Returns the unparsed export directory. - /// - /// `data` must be the entire file data. - pub fn export_directory>( - &self, - data: R, - sections: &SectionTable<'data>, - ) -> Result> { - let data_dir = match self.get(pe::IMAGE_DIRECTORY_ENTRY_EXPORT) { - Some(data_dir) => data_dir, - None => return Ok(None), - }; - let export_data = data_dir.data(data, sections)?; - ExportTable::parse_directory(export_data).map(Some) - } - - /// Returns the partially parsed export directory. - /// - /// `data` must be the entire file data. - pub fn export_table>( - &self, - data: R, - sections: &SectionTable<'data>, - ) -> Result>> { - let data_dir = match self.get(pe::IMAGE_DIRECTORY_ENTRY_EXPORT) { - Some(data_dir) => data_dir, - None => return Ok(None), - }; - let export_va = data_dir.virtual_address.get(LE); - let export_data = data_dir.data(data, sections)?; - ExportTable::parse(export_data, export_va).map(Some) - } - - /// Returns the partially parsed import directory. - /// - /// `data` must be the entire file data. - pub fn import_table>( - &self, - data: R, - sections: &SectionTable<'data>, - ) -> Result>> { - let data_dir = match self.get(pe::IMAGE_DIRECTORY_ENTRY_IMPORT) { - Some(data_dir) => data_dir, - None => return Ok(None), - }; - let import_va = data_dir.virtual_address.get(LE); - let (section_data, section_va) = sections - .pe_data_containing(data, import_va) - .read_error("Invalid import data dir virtual address")?; - Ok(Some(ImportTable::new(section_data, section_va, import_va))) - } - - /// Returns the partially parsed delay-load import directory. - /// - /// `data` must be the entire file data. - pub fn delay_load_import_table>( - &self, - data: R, - sections: &SectionTable<'data>, - ) -> Result>> { - let data_dir = match self.get(pe::IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT) { - Some(data_dir) => data_dir, - None => return Ok(None), - }; - let import_va = data_dir.virtual_address.get(LE); - let (section_data, section_va) = sections - .pe_data_containing(data, import_va) - .read_error("Invalid import data dir virtual address")?; - Ok(Some(DelayLoadImportTable::new( - section_data, - section_va, - import_va, - ))) - } - - /// Returns the blocks in the base relocation directory. - /// - /// `data` must be the entire file data. - pub fn relocation_blocks>( - &self, - data: R, - sections: &SectionTable<'data>, - ) -> Result>> { - let data_dir = match self.get(pe::IMAGE_DIRECTORY_ENTRY_BASERELOC) { - Some(data_dir) => data_dir, - None => return Ok(None), - }; - let reloc_data = data_dir.data(data, sections)?; - Ok(Some(RelocationBlockIterator::new(reloc_data))) - } - - /// Returns the resource directory. - /// - /// `data` must be the entire file data. - pub fn resource_directory>( - &self, - data: R, - sections: &SectionTable<'data>, - ) -> Result>> { - let data_dir = match self.get(pe::IMAGE_DIRECTORY_ENTRY_RESOURCE) { - Some(data_dir) => data_dir, - None => return Ok(None), - }; - let rsrc_data = data_dir.data(data, sections)?; - Ok(Some(ResourceDirectory::new(rsrc_data))) - } -} - -impl pe::ImageDataDirectory { - /// Return the virtual address range of this directory entry. - pub fn address_range(&self) -> (u32, u32) { - (self.virtual_address.get(LE), self.size.get(LE)) - } - - /// Return the file offset and size of this directory entry. - /// - /// This function has some limitations: - /// - It requires that the data is contained in a single section. - /// - It uses the size field of the directory entry, which is - /// not desirable for all data directories. - /// - It uses the `virtual_address` of the directory entry as an address, - /// which is not valid for `IMAGE_DIRECTORY_ENTRY_SECURITY`. - pub fn file_range(&self, sections: &SectionTable<'_>) -> Result<(u32, u32)> { - let (offset, section_size) = sections - .pe_file_range_at(self.virtual_address.get(LE)) - .read_error("Invalid data dir virtual address")?; - let size = self.size.get(LE); - if size > section_size { - return Err(Error("Invalid data dir size")); - } - Ok((offset, size)) - } - - /// Get the data referenced by this directory entry. - /// - /// This function has some limitations: - /// - It requires that the data is contained in a single section. - /// - It uses the size field of the directory entry, which is - /// not desirable for all data directories. - /// - It uses the `virtual_address` of the directory entry as an address, - /// which is not valid for `IMAGE_DIRECTORY_ENTRY_SECURITY`. - pub fn data<'data, R: ReadRef<'data>>( - &self, - data: R, - sections: &SectionTable<'data>, - ) -> Result<&'data [u8]> { - sections - .pe_data_at(data, self.virtual_address.get(LE)) - .read_error("Invalid data dir virtual address")? - .get(..self.size.get(LE) as usize) - .read_error("Invalid data dir size") - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/pe/export.rs s390-tools-2.33.1/rust-vendor/object/src/read/pe/export.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/pe/export.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/pe/export.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,331 +0,0 @@ -use alloc::vec::Vec; -use core::fmt::Debug; - -use crate::read::{ByteString, Bytes, Error, ReadError, ReadRef, Result}; -use crate::{pe, LittleEndian as LE, U16Bytes, U32Bytes}; - -/// Where an export is pointing to. -#[derive(Clone, Copy)] -pub enum ExportTarget<'data> { - /// The address of the export, relative to the image base. - Address(u32), - /// Forwarded to an export ordinal in another DLL. - /// - /// This gives the name of the DLL, and the ordinal. - ForwardByOrdinal(&'data [u8], u32), - /// Forwarded to an export name in another DLL. - /// - /// This gives the name of the DLL, and the export name. - ForwardByName(&'data [u8], &'data [u8]), -} - -impl<'data> ExportTarget<'data> { - /// Returns true if the target is an address. - pub fn is_address(&self) -> bool { - match self { - ExportTarget::Address(_) => true, - _ => false, - } - } - - /// Returns true if the export is forwarded to another DLL. - pub fn is_forward(&self) -> bool { - !self.is_address() - } -} - -/// An export from a PE file. -/// -/// There are multiple kinds of PE exports (with or without a name, and local or forwarded). -#[derive(Clone, Copy)] -pub struct Export<'data> { - /// The ordinal of the export. - /// - /// These are sequential, starting at a base specified in the DLL. - pub ordinal: u32, - /// The name of the export, if known. - pub name: Option<&'data [u8]>, - /// The target of this export. - pub target: ExportTarget<'data>, -} - -impl<'a> Debug for Export<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::result::Result<(), core::fmt::Error> { - f.debug_struct("Export") - .field("ordinal", &self.ordinal) - .field("name", &self.name.map(ByteString)) - .field("target", &self.target) - .finish() - } -} - -impl<'a> Debug for ExportTarget<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::result::Result<(), core::fmt::Error> { - match self { - ExportTarget::Address(address) => write!(f, "Address({:#x})", address), - ExportTarget::ForwardByOrdinal(library, ordinal) => write!( - f, - "ForwardByOrdinal({:?}.#{})", - ByteString(library), - ordinal - ), - ExportTarget::ForwardByName(library, name) => write!( - f, - "ForwardByName({:?}.{:?})", - ByteString(library), - ByteString(name) - ), - } - } -} - -/// A partially parsed PE export table. -#[derive(Debug, Clone)] -pub struct ExportTable<'data> { - data: Bytes<'data>, - virtual_address: u32, - directory: &'data pe::ImageExportDirectory, - addresses: &'data [U32Bytes], - names: &'data [U32Bytes], - name_ordinals: &'data [U16Bytes], -} - -impl<'data> ExportTable<'data> { - /// Parse the export table given its section data and address. - pub fn parse(data: &'data [u8], virtual_address: u32) -> Result { - let directory = Self::parse_directory(data)?; - let data = Bytes(data); - - let mut addresses = &[][..]; - let address_of_functions = directory.address_of_functions.get(LE); - if address_of_functions != 0 { - addresses = data - .read_slice_at::>( - address_of_functions.wrapping_sub(virtual_address) as usize, - directory.number_of_functions.get(LE) as usize, - ) - .read_error("Invalid PE export address table")?; - } - - let mut names = &[][..]; - let mut name_ordinals = &[][..]; - let address_of_names = directory.address_of_names.get(LE); - let address_of_name_ordinals = directory.address_of_name_ordinals.get(LE); - if address_of_names != 0 { - if address_of_name_ordinals == 0 { - return Err(Error("Missing PE export ordinal table")); - } - - let number = directory.number_of_names.get(LE) as usize; - names = data - .read_slice_at::>( - address_of_names.wrapping_sub(virtual_address) as usize, - number, - ) - .read_error("Invalid PE export name pointer table")?; - name_ordinals = data - .read_slice_at::>( - address_of_name_ordinals.wrapping_sub(virtual_address) as usize, - number, - ) - .read_error("Invalid PE export ordinal table")?; - } - - Ok(ExportTable { - data, - virtual_address, - directory, - addresses, - names, - name_ordinals, - }) - } - - /// Parse the export directory given its section data. - pub fn parse_directory(data: &'data [u8]) -> Result<&'data pe::ImageExportDirectory> { - data.read_at::(0) - .read_error("Invalid PE export dir size") - } - - /// Returns the header of the export table. - pub fn directory(&self) -> &'data pe::ImageExportDirectory { - self.directory - } - - /// Returns the base value of ordinals. - /// - /// Adding this to an address index will give an ordinal. - pub fn ordinal_base(&self) -> u32 { - self.directory.base.get(LE) - } - - /// Returns the unparsed address table. - /// - /// An address table entry may be a local address, or the address of a forwarded export entry. - /// See [`Self::is_forward`] and [`Self::target_from_address`]. - pub fn addresses(&self) -> &'data [U32Bytes] { - self.addresses - } - - /// Returns the unparsed name pointer table. - /// - /// A name pointer table entry can be used with [`Self::name_from_pointer`]. - pub fn name_pointers(&self) -> &'data [U32Bytes] { - self.names - } - - /// Returns the unparsed ordinal table. - /// - /// An ordinal table entry is a 0-based index into the address table. - /// See [`Self::address_by_index`] and [`Self::target_by_index`]. - pub fn name_ordinals(&self) -> &'data [U16Bytes] { - self.name_ordinals - } - - /// Returns an iterator for the entries in the name pointer table and ordinal table. - /// - /// A name pointer table entry can be used with [`Self::name_from_pointer`]. - /// - /// An ordinal table entry is a 0-based index into the address table. - /// See [`Self::address_by_index`] and [`Self::target_by_index`]. - pub fn name_iter(&self) -> impl Iterator + 'data { - self.names - .iter() - .map(|x| x.get(LE)) - .zip(self.name_ordinals.iter().map(|x| x.get(LE))) - } - - /// Returns the export address table entry at the given address index. - /// - /// This may be a local address, or the address of a forwarded export entry. - /// See [`Self::is_forward`] and [`Self::target_from_address`]. - /// - /// `index` is a 0-based index into the export address table. - pub fn address_by_index(&self, index: u32) -> Result { - Ok(self - .addresses - .get(index as usize) - .read_error("Invalid PE export address index")? - .get(LE)) - } - - /// Returns the export address table entry at the given ordinal. - /// - /// This may be a local address, or the address of a forwarded export entry. - /// See [`Self::is_forward`] and [`Self::target_from_address`]. - pub fn address_by_ordinal(&self, ordinal: u32) -> Result { - self.address_by_index(ordinal.wrapping_sub(self.ordinal_base())) - } - - /// Returns the target of the export at the given address index. - /// - /// `index` is a 0-based index into the export address table. - pub fn target_by_index(&self, index: u32) -> Result> { - self.target_from_address(self.address_by_index(index)?) - } - - /// Returns the target of the export at the given ordinal. - pub fn target_by_ordinal(&self, ordinal: u32) -> Result> { - self.target_from_address(self.address_by_ordinal(ordinal)?) - } - - /// Convert an export address table entry into a target. - pub fn target_from_address(&self, address: u32) -> Result> { - Ok(if let Some(forward) = self.forward_string(address)? { - let i = forward - .iter() - .position(|x| *x == b'.') - .read_error("Missing PE forwarded export separator")?; - let library = &forward[..i]; - match &forward[i + 1..] { - [b'#', digits @ ..] => { - let ordinal = - parse_ordinal(digits).read_error("Invalid PE forwarded export ordinal")?; - ExportTarget::ForwardByOrdinal(library, ordinal) - } - [] => { - return Err(Error("Missing PE forwarded export name")); - } - name => ExportTarget::ForwardByName(library, name), - } - } else { - ExportTarget::Address(address) - }) - } - - fn forward_offset(&self, address: u32) -> Option { - let offset = address.wrapping_sub(self.virtual_address) as usize; - if offset < self.data.len() { - Some(offset) - } else { - None - } - } - - /// Return true if the export address table entry is a forward. - pub fn is_forward(&self, address: u32) -> bool { - self.forward_offset(address).is_some() - } - - /// Return the forward string if the export address table entry is a forward. - pub fn forward_string(&self, address: u32) -> Result> { - if let Some(offset) = self.forward_offset(address) { - self.data - .read_string_at(offset) - .read_error("Invalid PE forwarded export address") - .map(Some) - } else { - Ok(None) - } - } - - /// Convert an export name pointer table entry into a name. - pub fn name_from_pointer(&self, name_pointer: u32) -> Result<&'data [u8]> { - let offset = name_pointer.wrapping_sub(self.virtual_address); - self.data - .read_string_at(offset as usize) - .read_error("Invalid PE export name pointer") - } - - /// Returns the parsed exports in this table. - pub fn exports(&self) -> Result>> { - // First, let's list all exports. - let mut exports = Vec::new(); - let ordinal_base = self.ordinal_base(); - for (i, address) in self.addresses.iter().enumerate() { - // Convert from an array index to an ordinal. - let ordinal = ordinal_base.wrapping_add(i as u32); - let target = self.target_from_address(address.get(LE))?; - exports.push(Export { - ordinal, - target, - // Might be populated later. - name: None, - }); - } - - // Now, check whether some (or all) of them have an associated name. - // `ordinal_index` is a 0-based index into `addresses`. - for (name_pointer, ordinal_index) in self.name_iter() { - let name = self.name_from_pointer(name_pointer)?; - exports - .get_mut(ordinal_index as usize) - .read_error("Invalid PE export ordinal")? - .name = Some(name); - } - - Ok(exports) - } -} - -fn parse_ordinal(digits: &[u8]) -> Option { - if digits.is_empty() { - return None; - } - let mut result: u32 = 0; - for &c in digits { - let x = (c as char).to_digit(10)?; - result = result.checked_mul(10)?.checked_add(x)?; - } - Some(result) -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/pe/file.rs s390-tools-2.33.1/rust-vendor/object/src/read/pe/file.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/pe/file.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/pe/file.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1029 +0,0 @@ -use alloc::vec::Vec; -use core::fmt::Debug; -use core::{mem, str}; - -use core::convert::TryInto; - -use crate::read::coff::{CoffCommon, CoffSymbol, CoffSymbolIterator, CoffSymbolTable, SymbolTable}; -use crate::read::{ - self, Architecture, ComdatKind, Error, Export, FileFlags, Import, NoDynamicRelocationIterator, - Object, ObjectComdat, ObjectKind, ReadError, ReadRef, Result, SectionIndex, SymbolIndex, -}; -use crate::{pe, ByteString, Bytes, CodeView, LittleEndian as LE, Pod, U32}; - -use super::{ - DataDirectories, ExportTable, ImageThunkData, ImportTable, PeSection, PeSectionIterator, - PeSegment, PeSegmentIterator, RichHeaderInfo, SectionTable, -}; - -/// A PE32 (32-bit) image file. -pub type PeFile32<'data, R = &'data [u8]> = PeFile<'data, pe::ImageNtHeaders32, R>; -/// A PE32+ (64-bit) image file. -pub type PeFile64<'data, R = &'data [u8]> = PeFile<'data, pe::ImageNtHeaders64, R>; - -/// A PE object file. -#[derive(Debug)] -pub struct PeFile<'data, Pe, R = &'data [u8]> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - pub(super) dos_header: &'data pe::ImageDosHeader, - pub(super) nt_headers: &'data Pe, - pub(super) data_directories: DataDirectories<'data>, - pub(super) common: CoffCommon<'data, R>, - pub(super) data: R, -} - -impl<'data, Pe, R> PeFile<'data, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - /// Parse the raw PE file data. - pub fn parse(data: R) -> Result { - let dos_header = pe::ImageDosHeader::parse(data)?; - let mut offset = dos_header.nt_headers_offset().into(); - let (nt_headers, data_directories) = Pe::parse(data, &mut offset)?; - let sections = nt_headers.sections(data, offset)?; - let coff_symbols = nt_headers.symbols(data); - let image_base = nt_headers.optional_header().image_base(); - - Ok(PeFile { - dos_header, - nt_headers, - data_directories, - common: CoffCommon { - sections, - // The PE file format deprecates the COFF symbol table (https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#coff-file-header-object-and-image) - // We do not want to prevent parsing the rest of the PE file for a corrupt COFF header, but rather return an empty symbol table - symbols: coff_symbols.unwrap_or_default(), - image_base, - }, - data, - }) - } - - /// Returns this binary data. - pub fn data(&self) -> R { - self.data - } - - /// Return the DOS header of this file. - pub fn dos_header(&self) -> &'data pe::ImageDosHeader { - self.dos_header - } - - /// Return the NT Headers of this file. - pub fn nt_headers(&self) -> &'data Pe { - self.nt_headers - } - - /// Returns information about the rich header of this file (if any). - pub fn rich_header_info(&self) -> Option> { - RichHeaderInfo::parse(self.data, self.dos_header.nt_headers_offset().into()) - } - - /// Returns the section table of this binary. - pub fn section_table(&self) -> SectionTable<'data> { - self.common.sections - } - - /// Returns the data directories of this file. - pub fn data_directories(&self) -> DataDirectories<'data> { - self.data_directories - } - - /// Returns the data directory at the given index. - pub fn data_directory(&self, id: usize) -> Option<&'data pe::ImageDataDirectory> { - self.data_directories.get(id) - } - - /// Returns the export table of this file. - /// - /// The export table is located using the data directory. - pub fn export_table(&self) -> Result>> { - self.data_directories - .export_table(self.data, &self.common.sections) - } - - /// Returns the import table of this file. - /// - /// The import table is located using the data directory. - pub fn import_table(&self) -> Result>> { - self.data_directories - .import_table(self.data, &self.common.sections) - } - - pub(super) fn section_alignment(&self) -> u64 { - u64::from(self.nt_headers.optional_header().section_alignment()) - } -} - -impl<'data, Pe, R> read::private::Sealed for PeFile<'data, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Pe, R> Object<'data, 'file> for PeFile<'data, Pe, R> -where - 'data: 'file, - Pe: ImageNtHeaders, - R: 'file + ReadRef<'data>, -{ - type Segment = PeSegment<'data, 'file, Pe, R>; - type SegmentIterator = PeSegmentIterator<'data, 'file, Pe, R>; - type Section = PeSection<'data, 'file, Pe, R>; - type SectionIterator = PeSectionIterator<'data, 'file, Pe, R>; - type Comdat = PeComdat<'data, 'file, Pe, R>; - type ComdatIterator = PeComdatIterator<'data, 'file, Pe, R>; - type Symbol = CoffSymbol<'data, 'file, R>; - type SymbolIterator = CoffSymbolIterator<'data, 'file, R>; - type SymbolTable = CoffSymbolTable<'data, 'file, R>; - type DynamicRelocationIterator = NoDynamicRelocationIterator; - - fn architecture(&self) -> Architecture { - match self.nt_headers.file_header().machine.get(LE) { - pe::IMAGE_FILE_MACHINE_ARMNT => Architecture::Arm, - pe::IMAGE_FILE_MACHINE_ARM64 => Architecture::Aarch64, - pe::IMAGE_FILE_MACHINE_I386 => Architecture::I386, - pe::IMAGE_FILE_MACHINE_AMD64 => Architecture::X86_64, - _ => Architecture::Unknown, - } - } - - #[inline] - fn is_little_endian(&self) -> bool { - // Only little endian is supported. - true - } - - #[inline] - fn is_64(&self) -> bool { - self.nt_headers.is_type_64() - } - - fn kind(&self) -> ObjectKind { - let characteristics = self.nt_headers.file_header().characteristics.get(LE); - if characteristics & pe::IMAGE_FILE_DLL != 0 { - ObjectKind::Dynamic - } else if characteristics & pe::IMAGE_FILE_SYSTEM != 0 { - ObjectKind::Unknown - } else { - ObjectKind::Executable - } - } - - fn segments(&'file self) -> PeSegmentIterator<'data, 'file, Pe, R> { - PeSegmentIterator { - file: self, - iter: self.common.sections.iter(), - } - } - - fn section_by_name_bytes( - &'file self, - section_name: &[u8], - ) -> Option> { - self.common - .sections - .section_by_name(self.common.symbols.strings(), section_name) - .map(|(index, section)| PeSection { - file: self, - index: SectionIndex(index), - section, - }) - } - - fn section_by_index( - &'file self, - index: SectionIndex, - ) -> Result> { - let section = self.common.sections.section(index.0)?; - Ok(PeSection { - file: self, - index, - section, - }) - } - - fn sections(&'file self) -> PeSectionIterator<'data, 'file, Pe, R> { - PeSectionIterator { - file: self, - iter: self.common.sections.iter().enumerate(), - } - } - - fn comdats(&'file self) -> PeComdatIterator<'data, 'file, Pe, R> { - PeComdatIterator { file: self } - } - - fn symbol_by_index(&'file self, index: SymbolIndex) -> Result> { - let symbol = self.common.symbols.symbol(index.0)?; - Ok(CoffSymbol { - file: &self.common, - index, - symbol, - }) - } - - fn symbols(&'file self) -> CoffSymbolIterator<'data, 'file, R> { - CoffSymbolIterator { - file: &self.common, - index: 0, - } - } - - fn symbol_table(&'file self) -> Option> { - Some(CoffSymbolTable { file: &self.common }) - } - - fn dynamic_symbols(&'file self) -> CoffSymbolIterator<'data, 'file, R> { - CoffSymbolIterator { - file: &self.common, - // Hack: don't return any. - index: self.common.symbols.len(), - } - } - - fn dynamic_symbol_table(&'file self) -> Option> { - None - } - - fn dynamic_relocations(&'file self) -> Option { - None - } - - fn imports(&self) -> Result>> { - let mut imports = Vec::new(); - if let Some(import_table) = self.import_table()? { - let mut import_descs = import_table.descriptors()?; - while let Some(import_desc) = import_descs.next()? { - let library = import_table.name(import_desc.name.get(LE))?; - let mut first_thunk = import_desc.original_first_thunk.get(LE); - if first_thunk == 0 { - first_thunk = import_desc.first_thunk.get(LE); - } - let mut thunks = import_table.thunks(first_thunk)?; - while let Some(thunk) = thunks.next::()? { - if !thunk.is_ordinal() { - let (_hint, name) = import_table.hint_name(thunk.address())?; - imports.push(Import { - library: ByteString(library), - name: ByteString(name), - }); - } - } - } - } - Ok(imports) - } - - fn exports(&self) -> Result>> { - let mut exports = Vec::new(); - if let Some(export_table) = self.export_table()? { - for (name_pointer, address_index) in export_table.name_iter() { - let name = export_table.name_from_pointer(name_pointer)?; - let address = export_table.address_by_index(address_index.into())?; - if !export_table.is_forward(address) { - exports.push(Export { - name: ByteString(name), - address: self.common.image_base.wrapping_add(address.into()), - }) - } - } - } - Ok(exports) - } - - fn pdb_info(&self) -> Result>> { - let data_dir = match self.data_directory(pe::IMAGE_DIRECTORY_ENTRY_DEBUG) { - Some(data_dir) => data_dir, - None => return Ok(None), - }; - let debug_data = data_dir.data(self.data, &self.common.sections).map(Bytes)?; - let debug_data_size = data_dir.size.get(LE) as usize; - - let count = debug_data_size / mem::size_of::(); - let rem = debug_data_size % mem::size_of::(); - if rem != 0 || count < 1 { - return Err(Error("Invalid PE debug dir size")); - } - - let debug_dirs = debug_data - .read_slice_at::(0, count) - .read_error("Invalid PE debug dir size")?; - - for debug_dir in debug_dirs { - if debug_dir.typ.get(LE) != pe::IMAGE_DEBUG_TYPE_CODEVIEW { - continue; - } - - let info = self - .data - .read_slice_at::( - debug_dir.pointer_to_raw_data.get(LE) as u64, - debug_dir.size_of_data.get(LE) as usize, - ) - .read_error("Invalid CodeView Info address")?; - - let mut info = Bytes(info); - - let sig = info - .read_bytes(4) - .read_error("Invalid CodeView signature")?; - if sig.0 != b"RSDS" { - continue; - } - - let guid: [u8; 16] = info - .read_bytes(16) - .read_error("Invalid CodeView GUID")? - .0 - .try_into() - .unwrap(); - - let age = info.read::>().read_error("Invalid CodeView Age")?; - - let path = info - .read_string() - .read_error("Invalid CodeView file path")?; - - return Ok(Some(CodeView { - path: ByteString(path), - guid, - age: age.get(LE), - })); - } - Ok(None) - } - - fn has_debug_symbols(&self) -> bool { - self.section_by_name(".debug_info").is_some() - } - - fn relative_address_base(&self) -> u64 { - self.common.image_base - } - - fn entry(&self) -> u64 { - u64::from(self.nt_headers.optional_header().address_of_entry_point()) - .wrapping_add(self.common.image_base) - } - - fn flags(&self) -> FileFlags { - FileFlags::Coff { - characteristics: self.nt_headers.file_header().characteristics.get(LE), - } - } -} - -/// An iterator over the COMDAT section groups of a `PeFile32`. -pub type PeComdatIterator32<'data, 'file, R = &'data [u8]> = - PeComdatIterator<'data, 'file, pe::ImageNtHeaders32, R>; -/// An iterator over the COMDAT section groups of a `PeFile64`. -pub type PeComdatIterator64<'data, 'file, R = &'data [u8]> = - PeComdatIterator<'data, 'file, pe::ImageNtHeaders64, R>; - -/// An iterator over the COMDAT section groups of a `PeFile`. -#[derive(Debug)] -pub struct PeComdatIterator<'data, 'file, Pe, R = &'data [u8]> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - #[allow(unused)] - file: &'file PeFile<'data, Pe, R>, -} - -impl<'data, 'file, Pe, R> Iterator for PeComdatIterator<'data, 'file, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - type Item = PeComdat<'data, 'file, Pe, R>; - - #[inline] - fn next(&mut self) -> Option { - None - } -} - -/// A COMDAT section group of a `PeFile32`. -pub type PeComdat32<'data, 'file, R = &'data [u8]> = - PeComdat<'data, 'file, pe::ImageNtHeaders32, R>; -/// A COMDAT section group of a `PeFile64`. -pub type PeComdat64<'data, 'file, R = &'data [u8]> = - PeComdat<'data, 'file, pe::ImageNtHeaders64, R>; - -/// A COMDAT section group of a `PeFile`. -#[derive(Debug)] -pub struct PeComdat<'data, 'file, Pe, R = &'data [u8]> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - #[allow(unused)] - file: &'file PeFile<'data, Pe, R>, -} - -impl<'data, 'file, Pe, R> read::private::Sealed for PeComdat<'data, 'file, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Pe, R> ObjectComdat<'data> for PeComdat<'data, 'file, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - type SectionIterator = PeComdatSectionIterator<'data, 'file, Pe, R>; - - #[inline] - fn kind(&self) -> ComdatKind { - unreachable!(); - } - - #[inline] - fn symbol(&self) -> SymbolIndex { - unreachable!(); - } - - #[inline] - fn name_bytes(&self) -> Result<&[u8]> { - unreachable!(); - } - - #[inline] - fn name(&self) -> Result<&str> { - unreachable!(); - } - - #[inline] - fn sections(&self) -> Self::SectionIterator { - unreachable!(); - } -} - -/// An iterator over the sections in a COMDAT section group of a `PeFile32`. -pub type PeComdatSectionIterator32<'data, 'file, R = &'data [u8]> = - PeComdatSectionIterator<'data, 'file, pe::ImageNtHeaders32, R>; -/// An iterator over the sections in a COMDAT section group of a `PeFile64`. -pub type PeComdatSectionIterator64<'data, 'file, R = &'data [u8]> = - PeComdatSectionIterator<'data, 'file, pe::ImageNtHeaders64, R>; - -/// An iterator over the sections in a COMDAT section group of a `PeFile`. -#[derive(Debug)] -pub struct PeComdatSectionIterator<'data, 'file, Pe, R = &'data [u8]> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - #[allow(unused)] - file: &'file PeFile<'data, Pe, R>, -} - -impl<'data, 'file, Pe, R> Iterator for PeComdatSectionIterator<'data, 'file, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - type Item = SectionIndex; - - fn next(&mut self) -> Option { - None - } -} - -impl pe::ImageDosHeader { - /// Read the DOS header. - /// - /// Also checks that the `e_magic` field in the header is valid. - pub fn parse<'data, R: ReadRef<'data>>(data: R) -> read::Result<&'data Self> { - // DOS header comes first. - let dos_header = data - .read_at::(0) - .read_error("Invalid DOS header size or alignment")?; - if dos_header.e_magic.get(LE) != pe::IMAGE_DOS_SIGNATURE { - return Err(Error("Invalid DOS magic")); - } - Ok(dos_header) - } - - /// Return the file offset of the nt_headers. - #[inline] - pub fn nt_headers_offset(&self) -> u32 { - self.e_lfanew.get(LE) - } -} - -/// Find the optional header and read the `optional_header.magic`. -/// -/// It can be useful to know this magic value before trying to -/// fully parse the NT headers. -pub fn optional_header_magic<'data, R: ReadRef<'data>>(data: R) -> Result { - let dos_header = pe::ImageDosHeader::parse(data)?; - // NT headers are at an offset specified in the DOS header. - let offset = dos_header.nt_headers_offset().into(); - // It doesn't matter which NT header type is used for the purpose - // of reading the optional header magic. - let nt_headers = data - .read_at::(offset) - .read_error("Invalid NT headers offset, size, or alignment")?; - if nt_headers.signature() != pe::IMAGE_NT_SIGNATURE { - return Err(Error("Invalid PE magic")); - } - Ok(nt_headers.optional_header().magic()) -} - -/// A trait for generic access to `ImageNtHeaders32` and `ImageNtHeaders64`. -#[allow(missing_docs)] -pub trait ImageNtHeaders: Debug + Pod { - type ImageOptionalHeader: ImageOptionalHeader; - type ImageThunkData: ImageThunkData; - - /// Return true if this type is a 64-bit header. - /// - /// This is a property of the type, not a value in the header data. - fn is_type_64(&self) -> bool; - - /// Return true if the magic field in the optional header is valid. - fn is_valid_optional_magic(&self) -> bool; - - /// Return the signature - fn signature(&self) -> u32; - - /// Return the file header. - fn file_header(&self) -> &pe::ImageFileHeader; - - /// Return the optional header. - fn optional_header(&self) -> &Self::ImageOptionalHeader; - - // Provided methods. - - /// Read the NT headers, including the data directories. - /// - /// `data` must be for the entire file. - /// - /// `offset` must be headers offset, which can be obtained from `ImageDosHeader::nt_headers_offset`. - /// It is updated to point after the optional header, which is where the section headers are located. - /// - /// Also checks that the `signature` and `magic` fields in the headers are valid. - fn parse<'data, R: ReadRef<'data>>( - data: R, - offset: &mut u64, - ) -> read::Result<(&'data Self, DataDirectories<'data>)> { - // Note that this does not include the data directories in the optional header. - let nt_headers = data - .read::(offset) - .read_error("Invalid PE headers offset or size")?; - if nt_headers.signature() != pe::IMAGE_NT_SIGNATURE { - return Err(Error("Invalid PE magic")); - } - if !nt_headers.is_valid_optional_magic() { - return Err(Error("Invalid PE optional header magic")); - } - - // Read the rest of the optional header, and then read the data directories from that. - let optional_data_size = - u64::from(nt_headers.file_header().size_of_optional_header.get(LE)) - .checked_sub(mem::size_of::() as u64) - .read_error("PE optional header size is too small")?; - let optional_data = data - .read_bytes(offset, optional_data_size) - .read_error("Invalid PE optional header size")?; - let data_directories = DataDirectories::parse( - optional_data, - nt_headers.optional_header().number_of_rva_and_sizes(), - )?; - - Ok((nt_headers, data_directories)) - } - - /// Read the section table. - /// - /// `data` must be for the entire file. - /// `offset` must be after the optional file header. - #[inline] - fn sections<'data, R: ReadRef<'data>>( - &self, - data: R, - offset: u64, - ) -> read::Result> { - SectionTable::parse(self.file_header(), data, offset) - } - - /// Read the COFF symbol table and string table. - /// - /// `data` must be the entire file data. - #[inline] - fn symbols<'data, R: ReadRef<'data>>(&self, data: R) -> read::Result> { - SymbolTable::parse(self.file_header(), data) - } -} - -/// A trait for generic access to `ImageOptionalHeader32` and `ImageOptionalHeader64`. -#[allow(missing_docs)] -pub trait ImageOptionalHeader: Debug + Pod { - // Standard fields. - fn magic(&self) -> u16; - fn major_linker_version(&self) -> u8; - fn minor_linker_version(&self) -> u8; - fn size_of_code(&self) -> u32; - fn size_of_initialized_data(&self) -> u32; - fn size_of_uninitialized_data(&self) -> u32; - fn address_of_entry_point(&self) -> u32; - fn base_of_code(&self) -> u32; - fn base_of_data(&self) -> Option; - - // NT additional fields. - fn image_base(&self) -> u64; - fn section_alignment(&self) -> u32; - fn file_alignment(&self) -> u32; - fn major_operating_system_version(&self) -> u16; - fn minor_operating_system_version(&self) -> u16; - fn major_image_version(&self) -> u16; - fn minor_image_version(&self) -> u16; - fn major_subsystem_version(&self) -> u16; - fn minor_subsystem_version(&self) -> u16; - fn win32_version_value(&self) -> u32; - fn size_of_image(&self) -> u32; - fn size_of_headers(&self) -> u32; - fn check_sum(&self) -> u32; - fn subsystem(&self) -> u16; - fn dll_characteristics(&self) -> u16; - fn size_of_stack_reserve(&self) -> u64; - fn size_of_stack_commit(&self) -> u64; - fn size_of_heap_reserve(&self) -> u64; - fn size_of_heap_commit(&self) -> u64; - fn loader_flags(&self) -> u32; - fn number_of_rva_and_sizes(&self) -> u32; -} - -impl ImageNtHeaders for pe::ImageNtHeaders32 { - type ImageOptionalHeader = pe::ImageOptionalHeader32; - type ImageThunkData = pe::ImageThunkData32; - - #[inline] - fn is_type_64(&self) -> bool { - false - } - - #[inline] - fn is_valid_optional_magic(&self) -> bool { - self.optional_header.magic.get(LE) == pe::IMAGE_NT_OPTIONAL_HDR32_MAGIC - } - - #[inline] - fn signature(&self) -> u32 { - self.signature.get(LE) - } - - #[inline] - fn file_header(&self) -> &pe::ImageFileHeader { - &self.file_header - } - - #[inline] - fn optional_header(&self) -> &Self::ImageOptionalHeader { - &self.optional_header - } -} - -impl ImageOptionalHeader for pe::ImageOptionalHeader32 { - #[inline] - fn magic(&self) -> u16 { - self.magic.get(LE) - } - - #[inline] - fn major_linker_version(&self) -> u8 { - self.major_linker_version - } - - #[inline] - fn minor_linker_version(&self) -> u8 { - self.minor_linker_version - } - - #[inline] - fn size_of_code(&self) -> u32 { - self.size_of_code.get(LE) - } - - #[inline] - fn size_of_initialized_data(&self) -> u32 { - self.size_of_initialized_data.get(LE) - } - - #[inline] - fn size_of_uninitialized_data(&self) -> u32 { - self.size_of_uninitialized_data.get(LE) - } - - #[inline] - fn address_of_entry_point(&self) -> u32 { - self.address_of_entry_point.get(LE) - } - - #[inline] - fn base_of_code(&self) -> u32 { - self.base_of_code.get(LE) - } - - #[inline] - fn base_of_data(&self) -> Option { - Some(self.base_of_data.get(LE)) - } - - #[inline] - fn image_base(&self) -> u64 { - self.image_base.get(LE).into() - } - - #[inline] - fn section_alignment(&self) -> u32 { - self.section_alignment.get(LE) - } - - #[inline] - fn file_alignment(&self) -> u32 { - self.file_alignment.get(LE) - } - - #[inline] - fn major_operating_system_version(&self) -> u16 { - self.major_operating_system_version.get(LE) - } - - #[inline] - fn minor_operating_system_version(&self) -> u16 { - self.minor_operating_system_version.get(LE) - } - - #[inline] - fn major_image_version(&self) -> u16 { - self.major_image_version.get(LE) - } - - #[inline] - fn minor_image_version(&self) -> u16 { - self.minor_image_version.get(LE) - } - - #[inline] - fn major_subsystem_version(&self) -> u16 { - self.major_subsystem_version.get(LE) - } - - #[inline] - fn minor_subsystem_version(&self) -> u16 { - self.minor_subsystem_version.get(LE) - } - - #[inline] - fn win32_version_value(&self) -> u32 { - self.win32_version_value.get(LE) - } - - #[inline] - fn size_of_image(&self) -> u32 { - self.size_of_image.get(LE) - } - - #[inline] - fn size_of_headers(&self) -> u32 { - self.size_of_headers.get(LE) - } - - #[inline] - fn check_sum(&self) -> u32 { - self.check_sum.get(LE) - } - - #[inline] - fn subsystem(&self) -> u16 { - self.subsystem.get(LE) - } - - #[inline] - fn dll_characteristics(&self) -> u16 { - self.dll_characteristics.get(LE) - } - - #[inline] - fn size_of_stack_reserve(&self) -> u64 { - self.size_of_stack_reserve.get(LE).into() - } - - #[inline] - fn size_of_stack_commit(&self) -> u64 { - self.size_of_stack_commit.get(LE).into() - } - - #[inline] - fn size_of_heap_reserve(&self) -> u64 { - self.size_of_heap_reserve.get(LE).into() - } - - #[inline] - fn size_of_heap_commit(&self) -> u64 { - self.size_of_heap_commit.get(LE).into() - } - - #[inline] - fn loader_flags(&self) -> u32 { - self.loader_flags.get(LE) - } - - #[inline] - fn number_of_rva_and_sizes(&self) -> u32 { - self.number_of_rva_and_sizes.get(LE) - } -} - -impl ImageNtHeaders for pe::ImageNtHeaders64 { - type ImageOptionalHeader = pe::ImageOptionalHeader64; - type ImageThunkData = pe::ImageThunkData64; - - #[inline] - fn is_type_64(&self) -> bool { - true - } - - #[inline] - fn is_valid_optional_magic(&self) -> bool { - self.optional_header.magic.get(LE) == pe::IMAGE_NT_OPTIONAL_HDR64_MAGIC - } - - #[inline] - fn signature(&self) -> u32 { - self.signature.get(LE) - } - - #[inline] - fn file_header(&self) -> &pe::ImageFileHeader { - &self.file_header - } - - #[inline] - fn optional_header(&self) -> &Self::ImageOptionalHeader { - &self.optional_header - } -} - -impl ImageOptionalHeader for pe::ImageOptionalHeader64 { - #[inline] - fn magic(&self) -> u16 { - self.magic.get(LE) - } - - #[inline] - fn major_linker_version(&self) -> u8 { - self.major_linker_version - } - - #[inline] - fn minor_linker_version(&self) -> u8 { - self.minor_linker_version - } - - #[inline] - fn size_of_code(&self) -> u32 { - self.size_of_code.get(LE) - } - - #[inline] - fn size_of_initialized_data(&self) -> u32 { - self.size_of_initialized_data.get(LE) - } - - #[inline] - fn size_of_uninitialized_data(&self) -> u32 { - self.size_of_uninitialized_data.get(LE) - } - - #[inline] - fn address_of_entry_point(&self) -> u32 { - self.address_of_entry_point.get(LE) - } - - #[inline] - fn base_of_code(&self) -> u32 { - self.base_of_code.get(LE) - } - - #[inline] - fn base_of_data(&self) -> Option { - None - } - - #[inline] - fn image_base(&self) -> u64 { - self.image_base.get(LE) - } - - #[inline] - fn section_alignment(&self) -> u32 { - self.section_alignment.get(LE) - } - - #[inline] - fn file_alignment(&self) -> u32 { - self.file_alignment.get(LE) - } - - #[inline] - fn major_operating_system_version(&self) -> u16 { - self.major_operating_system_version.get(LE) - } - - #[inline] - fn minor_operating_system_version(&self) -> u16 { - self.minor_operating_system_version.get(LE) - } - - #[inline] - fn major_image_version(&self) -> u16 { - self.major_image_version.get(LE) - } - - #[inline] - fn minor_image_version(&self) -> u16 { - self.minor_image_version.get(LE) - } - - #[inline] - fn major_subsystem_version(&self) -> u16 { - self.major_subsystem_version.get(LE) - } - - #[inline] - fn minor_subsystem_version(&self) -> u16 { - self.minor_subsystem_version.get(LE) - } - - #[inline] - fn win32_version_value(&self) -> u32 { - self.win32_version_value.get(LE) - } - - #[inline] - fn size_of_image(&self) -> u32 { - self.size_of_image.get(LE) - } - - #[inline] - fn size_of_headers(&self) -> u32 { - self.size_of_headers.get(LE) - } - - #[inline] - fn check_sum(&self) -> u32 { - self.check_sum.get(LE) - } - - #[inline] - fn subsystem(&self) -> u16 { - self.subsystem.get(LE) - } - - #[inline] - fn dll_characteristics(&self) -> u16 { - self.dll_characteristics.get(LE) - } - - #[inline] - fn size_of_stack_reserve(&self) -> u64 { - self.size_of_stack_reserve.get(LE) - } - - #[inline] - fn size_of_stack_commit(&self) -> u64 { - self.size_of_stack_commit.get(LE) - } - - #[inline] - fn size_of_heap_reserve(&self) -> u64 { - self.size_of_heap_reserve.get(LE) - } - - #[inline] - fn size_of_heap_commit(&self) -> u64 { - self.size_of_heap_commit.get(LE) - } - - #[inline] - fn loader_flags(&self) -> u32 { - self.loader_flags.get(LE) - } - - #[inline] - fn number_of_rva_and_sizes(&self) -> u32 { - self.number_of_rva_and_sizes.get(LE) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/pe/import.rs s390-tools-2.33.1/rust-vendor/object/src/read/pe/import.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/pe/import.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/pe/import.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,332 +0,0 @@ -use core::fmt::Debug; -use core::mem; - -use crate::read::{Bytes, ReadError, Result}; -use crate::{pe, LittleEndian as LE, Pod, U16Bytes}; - -use super::ImageNtHeaders; - -/// Information for parsing a PE import table. -#[derive(Debug, Clone)] -pub struct ImportTable<'data> { - section_data: Bytes<'data>, - section_address: u32, - import_address: u32, -} - -impl<'data> ImportTable<'data> { - /// Create a new import table parser. - /// - /// The import descriptors start at `import_address`. - /// The size declared in the `IMAGE_DIRECTORY_ENTRY_IMPORT` data directory is - /// ignored by the Windows loader, and so descriptors will be parsed until a null entry. - /// - /// `section_data` should be from the section containing `import_address`, and - /// `section_address` should be the address of that section. Pointers within the - /// descriptors and thunks may point to anywhere within the section data. - pub fn new(section_data: &'data [u8], section_address: u32, import_address: u32) -> Self { - ImportTable { - section_data: Bytes(section_data), - section_address, - import_address, - } - } - - /// Return an iterator for the import descriptors. - pub fn descriptors(&self) -> Result> { - let offset = self.import_address.wrapping_sub(self.section_address); - let mut data = self.section_data; - data.skip(offset as usize) - .read_error("Invalid PE import descriptor address")?; - Ok(ImportDescriptorIterator { data }) - } - - /// Return a library name given its address. - /// - /// This address may be from [`pe::ImageImportDescriptor::name`]. - pub fn name(&self, address: u32) -> Result<&'data [u8]> { - self.section_data - .read_string_at(address.wrapping_sub(self.section_address) as usize) - .read_error("Invalid PE import descriptor name") - } - - /// Return a list of thunks given its address. - /// - /// This address may be from [`pe::ImageImportDescriptor::original_first_thunk`] - /// or [`pe::ImageImportDescriptor::first_thunk`]. - pub fn thunks(&self, address: u32) -> Result> { - let offset = address.wrapping_sub(self.section_address); - let mut data = self.section_data; - data.skip(offset as usize) - .read_error("Invalid PE import thunk table address")?; - Ok(ImportThunkList { data }) - } - - /// Parse a thunk. - pub fn import(&self, thunk: Pe::ImageThunkData) -> Result> { - if thunk.is_ordinal() { - Ok(Import::Ordinal(thunk.ordinal())) - } else { - let (hint, name) = self.hint_name(thunk.address())?; - Ok(Import::Name(hint, name)) - } - } - - /// Return the hint and name at the given address. - /// - /// This address may be from [`pe::ImageThunkData32`] or [`pe::ImageThunkData64`]. - /// - /// The hint is an index into the export name pointer table in the target library. - pub fn hint_name(&self, address: u32) -> Result<(u16, &'data [u8])> { - let offset = address.wrapping_sub(self.section_address); - let mut data = self.section_data; - data.skip(offset as usize) - .read_error("Invalid PE import thunk address")?; - let hint = data - .read::>() - .read_error("Missing PE import thunk hint")? - .get(LE); - let name = data - .read_string() - .read_error("Missing PE import thunk name")?; - Ok((hint, name)) - } -} - -/// A fallible iterator for the descriptors in the import data directory. -#[derive(Debug, Clone)] -pub struct ImportDescriptorIterator<'data> { - data: Bytes<'data>, -} - -impl<'data> ImportDescriptorIterator<'data> { - /// Return the next descriptor. - /// - /// Returns `Ok(None)` when a null descriptor is found. - pub fn next(&mut self) -> Result> { - let import_desc = self - .data - .read::() - .read_error("Missing PE null import descriptor")?; - if import_desc.is_null() { - Ok(None) - } else { - Ok(Some(import_desc)) - } - } -} - -/// A list of import thunks. -/// -/// These may be in the import lookup table, or the import address table. -#[derive(Debug, Clone)] -pub struct ImportThunkList<'data> { - data: Bytes<'data>, -} - -impl<'data> ImportThunkList<'data> { - /// Get the thunk at the given index. - pub fn get(&self, index: usize) -> Result { - let thunk = self - .data - .read_at(index * mem::size_of::()) - .read_error("Invalid PE import thunk index")?; - Ok(*thunk) - } - - /// Return the first thunk in the list, and update `self` to point after it. - /// - /// Returns `Ok(None)` when a null thunk is found. - pub fn next(&mut self) -> Result> { - let thunk = self - .data - .read::() - .read_error("Missing PE null import thunk")?; - if thunk.address() == 0 { - Ok(None) - } else { - Ok(Some(*thunk)) - } - } -} - -/// A parsed import thunk. -#[derive(Debug, Clone, Copy)] -pub enum Import<'data> { - /// Import by ordinal. - Ordinal(u16), - /// Import by name. - /// - /// Includes a hint for the index into the export name pointer table in the target library. - Name(u16, &'data [u8]), -} - -/// A trait for generic access to [`pe::ImageThunkData32`] and [`pe::ImageThunkData64`]. -#[allow(missing_docs)] -pub trait ImageThunkData: Debug + Pod { - /// Return the raw thunk value. - fn raw(self) -> u64; - - /// Returns true if the ordinal flag is set. - fn is_ordinal(self) -> bool; - - /// Return the ordinal portion of the thunk. - /// - /// Does not check the ordinal flag. - fn ordinal(self) -> u16; - - /// Return the RVA portion of the thunk. - /// - /// Does not check the ordinal flag. - fn address(self) -> u32; -} - -impl ImageThunkData for pe::ImageThunkData64 { - fn raw(self) -> u64 { - self.0.get(LE) - } - - fn is_ordinal(self) -> bool { - self.0.get(LE) & pe::IMAGE_ORDINAL_FLAG64 != 0 - } - - fn ordinal(self) -> u16 { - self.0.get(LE) as u16 - } - - fn address(self) -> u32 { - self.0.get(LE) as u32 & 0x7fff_ffff - } -} - -impl ImageThunkData for pe::ImageThunkData32 { - fn raw(self) -> u64 { - self.0.get(LE).into() - } - - fn is_ordinal(self) -> bool { - self.0.get(LE) & pe::IMAGE_ORDINAL_FLAG32 != 0 - } - - fn ordinal(self) -> u16 { - self.0.get(LE) as u16 - } - - fn address(self) -> u32 { - self.0.get(LE) & 0x7fff_ffff - } -} - -/// Information for parsing a PE delay-load import table. -#[derive(Debug, Clone)] -pub struct DelayLoadImportTable<'data> { - section_data: Bytes<'data>, - section_address: u32, - import_address: u32, -} - -impl<'data> DelayLoadImportTable<'data> { - /// Create a new delay load import table parser. - /// - /// The import descriptors start at `import_address`. - /// This table works in the same way the import table does: descriptors will be - /// parsed until a null entry. - /// - /// `section_data` should be from the section containing `import_address`, and - /// `section_address` should be the address of that section. Pointers within the - /// descriptors and thunks may point to anywhere within the section data. - pub fn new(section_data: &'data [u8], section_address: u32, import_address: u32) -> Self { - DelayLoadImportTable { - section_data: Bytes(section_data), - section_address, - import_address, - } - } - - /// Return an iterator for the import descriptors. - pub fn descriptors(&self) -> Result> { - let offset = self.import_address.wrapping_sub(self.section_address); - let mut data = self.section_data; - data.skip(offset as usize) - .read_error("Invalid PE delay-load import descriptor address")?; - Ok(DelayLoadDescriptorIterator { data }) - } - - /// Return a library name given its address. - /// - /// This address may be from [`pe::ImageDelayloadDescriptor::dll_name_rva`]. - pub fn name(&self, address: u32) -> Result<&'data [u8]> { - self.section_data - .read_string_at(address.wrapping_sub(self.section_address) as usize) - .read_error("Invalid PE import descriptor name") - } - - /// Return a list of thunks given its address. - /// - /// This address may be from the INT, i.e. from - /// [`pe::ImageDelayloadDescriptor::import_name_table_rva`]. - /// - /// Please note that others RVA values from [`pe::ImageDelayloadDescriptor`] are used - /// by the delay loader at runtime to store values, and thus do not point inside the same - /// section as the INT. Calling this function on those addresses will fail. - pub fn thunks(&self, address: u32) -> Result> { - let offset = address.wrapping_sub(self.section_address); - let mut data = self.section_data; - data.skip(offset as usize) - .read_error("Invalid PE delay load import thunk table address")?; - Ok(ImportThunkList { data }) - } - - /// Parse a thunk. - pub fn import(&self, thunk: Pe::ImageThunkData) -> Result> { - if thunk.is_ordinal() { - Ok(Import::Ordinal(thunk.ordinal())) - } else { - let (hint, name) = self.hint_name(thunk.address())?; - Ok(Import::Name(hint, name)) - } - } - - /// Return the hint and name at the given address. - /// - /// This address may be from [`pe::ImageThunkData32`] or [`pe::ImageThunkData64`]. - /// - /// The hint is an index into the export name pointer table in the target library. - pub fn hint_name(&self, address: u32) -> Result<(u16, &'data [u8])> { - let offset = address.wrapping_sub(self.section_address); - let mut data = self.section_data; - data.skip(offset as usize) - .read_error("Invalid PE delay load import thunk address")?; - let hint = data - .read::>() - .read_error("Missing PE delay load import thunk hint")? - .get(LE); - let name = data - .read_string() - .read_error("Missing PE delay load import thunk name")?; - Ok((hint, name)) - } -} - -/// A fallible iterator for the descriptors in the delay-load data directory. -#[derive(Debug, Clone)] -pub struct DelayLoadDescriptorIterator<'data> { - data: Bytes<'data>, -} - -impl<'data> DelayLoadDescriptorIterator<'data> { - /// Return the next descriptor. - /// - /// Returns `Ok(None)` when a null descriptor is found. - pub fn next(&mut self) -> Result> { - let import_desc = self - .data - .read::() - .read_error("Missing PE null delay-load import descriptor")?; - if import_desc.is_null() { - Ok(None) - } else { - Ok(Some(import_desc)) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/pe/mod.rs s390-tools-2.33.1/rust-vendor/object/src/read/pe/mod.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/pe/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/pe/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,34 +0,0 @@ -//! Support for reading PE files. -//! -//! Defines traits to abstract over the difference between PE32/PE32+, -//! and implements read functionality in terms of these traits. -//! -//! This module reuses some of the COFF functionality. -//! -//! Also provides `PeFile` and related types which implement the `Object` trait. - -mod file; -pub use file::*; - -mod section; -pub use section::*; - -mod data_directory; -pub use data_directory::*; - -mod export; -pub use export::*; - -mod import; -pub use import::*; - -mod relocation; -pub use relocation::*; - -mod resource; -pub use resource::*; - -mod rich; -pub use rich::*; - -pub use super::coff::{SectionTable, SymbolTable}; diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/pe/relocation.rs s390-tools-2.33.1/rust-vendor/object/src/read/pe/relocation.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/pe/relocation.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/pe/relocation.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,90 +0,0 @@ -use core::slice; - -use crate::endian::{LittleEndian as LE, U16}; -use crate::pe; -use crate::read::{Bytes, Error, ReadError, Result}; - -/// An iterator over the relocation blocks in the `.reloc` section of a PE file. -#[derive(Debug, Default, Clone, Copy)] -pub struct RelocationBlockIterator<'data> { - data: Bytes<'data>, -} - -impl<'data> RelocationBlockIterator<'data> { - /// Construct a new iterator from the data of the `.reloc` section. - pub fn new(data: &'data [u8]) -> Self { - RelocationBlockIterator { data: Bytes(data) } - } - - /// Read the next relocation page. - pub fn next(&mut self) -> Result>> { - if self.data.is_empty() { - return Ok(None); - } - let header = self - .data - .read::() - .read_error("Invalid PE reloc section size")?; - let virtual_address = header.virtual_address.get(LE); - let size = header.size_of_block.get(LE); - if size <= 8 || size & 3 != 0 { - return Err(Error("Invalid PE reloc block size")); - } - let count = (size - 8) / 2; - let relocs = self - .data - .read_slice::>(count as usize) - .read_error("Invalid PE reloc block size")? - .iter(); - Ok(Some(RelocationIterator { - virtual_address, - size, - relocs, - })) - } -} - -/// An iterator of the relocations in a block in the `.reloc` section of a PE file. -#[derive(Debug, Clone)] -pub struct RelocationIterator<'data> { - virtual_address: u32, - size: u32, - relocs: slice::Iter<'data, U16>, -} - -impl<'data> RelocationIterator<'data> { - /// Return the virtual address of the page that this block of relocations applies to. - pub fn virtual_address(&self) -> u32 { - self.virtual_address - } - - /// Return the size in bytes of this block of relocations. - pub fn size(&self) -> u32 { - self.size - } -} - -impl<'data> Iterator for RelocationIterator<'data> { - type Item = Relocation; - - fn next(&mut self) -> Option { - loop { - let reloc = self.relocs.next()?.get(LE); - if reloc != 0 { - return Some(Relocation { - virtual_address: self.virtual_address.wrapping_add((reloc & 0xfff) as u32), - typ: reloc >> 12, - }); - } - } - } -} - -/// A relocation in the `.reloc` section of a PE file. -#[derive(Debug, Default, Clone, Copy)] -pub struct Relocation { - /// The virtual address of the relocation. - pub virtual_address: u32, - /// One of the `pe::IMAGE_REL_BASED_*` constants. - pub typ: u16, -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/pe/resource.rs s390-tools-2.33.1/rust-vendor/object/src/read/pe/resource.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/pe/resource.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/pe/resource.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,207 +0,0 @@ -use alloc::string::String; -use core::char; - -use crate::read::{ReadError, ReadRef, Result}; -use crate::{pe, LittleEndian as LE, U16Bytes}; - -/// The `.rsrc` section of a PE file. -#[derive(Debug, Clone, Copy)] -pub struct ResourceDirectory<'data> { - data: &'data [u8], -} - -impl<'data> ResourceDirectory<'data> { - /// Construct from the data of the `.rsrc` section. - pub fn new(data: &'data [u8]) -> Self { - ResourceDirectory { data } - } - - /// Parses the root resource directory. - pub fn root(&self) -> Result> { - ResourceDirectoryTable::parse(self.data, 0) - } -} - -/// A table of resource entries. -#[derive(Debug, Clone)] -pub struct ResourceDirectoryTable<'data> { - /// The table header. - pub header: &'data pe::ImageResourceDirectory, - /// The table entries. - pub entries: &'data [pe::ImageResourceDirectoryEntry], -} - -impl<'data> ResourceDirectoryTable<'data> { - fn parse(data: &'data [u8], offset: u32) -> Result { - let mut offset = u64::from(offset); - let header = data - .read::(&mut offset) - .read_error("Invalid resource table header")?; - let entries_count = header.number_of_id_entries.get(LE) as usize - + header.number_of_named_entries.get(LE) as usize; - let entries = data - .read_slice::(&mut offset, entries_count) - .read_error("Invalid resource table entries")?; - Ok(Self { header, entries }) - } -} - -impl pe::ImageResourceDirectoryEntry { - /// Returns true if the entry has a name, rather than an ID. - pub fn has_name(&self) -> bool { - self.name_or_id.get(LE) & pe::IMAGE_RESOURCE_NAME_IS_STRING != 0 - } - - /// Returns the section offset of the name. - /// - /// Valid if `has_name()` returns true. - fn name(&self) -> ResourceName { - let offset = self.name_or_id.get(LE) & !pe::IMAGE_RESOURCE_NAME_IS_STRING; - ResourceName { offset } - } - - /// Returns the ID. - /// - /// Valid if `has_string_name()` returns false. - fn id(&self) -> u16 { - (self.name_or_id.get(LE) & 0x0000_FFFF) as u16 - } - - /// Returns the entry name - pub fn name_or_id(&self) -> ResourceNameOrId { - if self.has_name() { - ResourceNameOrId::Name(self.name()) - } else { - ResourceNameOrId::Id(self.id()) - } - } - - /// Returns true if the entry is a subtable. - pub fn is_table(&self) -> bool { - self.offset_to_data_or_directory.get(LE) & pe::IMAGE_RESOURCE_DATA_IS_DIRECTORY != 0 - } - - /// Returns the section offset of the associated table or data. - pub fn data_offset(&self) -> u32 { - self.offset_to_data_or_directory.get(LE) & !pe::IMAGE_RESOURCE_DATA_IS_DIRECTORY - } - - /// Returns the data associated to this directory entry. - pub fn data<'data>( - &self, - section: ResourceDirectory<'data>, - ) -> Result> { - if self.is_table() { - ResourceDirectoryTable::parse(section.data, self.data_offset()) - .map(ResourceDirectoryEntryData::Table) - } else { - section - .data - .read_at::(self.data_offset().into()) - .read_error("Invalid resource entry") - .map(ResourceDirectoryEntryData::Data) - } - } -} - -/// Data associated with a resource directory entry. -#[derive(Debug, Clone)] -pub enum ResourceDirectoryEntryData<'data> { - /// A subtable entry. - Table(ResourceDirectoryTable<'data>), - /// A resource data entry. - Data(&'data pe::ImageResourceDataEntry), -} - -impl<'data> ResourceDirectoryEntryData<'data> { - /// Converts to an option of table. - /// - /// Helper for iterator filtering. - pub fn table(self) -> Option> { - match self { - Self::Table(dir) => Some(dir), - _ => None, - } - } - - /// Converts to an option of data entry. - /// - /// Helper for iterator filtering. - pub fn data(self) -> Option<&'data pe::ImageResourceDataEntry> { - match self { - Self::Data(rsc) => Some(rsc), - _ => None, - } - } -} - -/// A resource name. -#[derive(Debug, Clone, Copy)] -pub struct ResourceName { - offset: u32, -} - -impl ResourceName { - /// Converts to a `String`. - pub fn to_string_lossy(&self, directory: ResourceDirectory<'_>) -> Result { - let d = self.data(directory)?.iter().map(|c| c.get(LE)); - - Ok(char::decode_utf16(d) - .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER)) - .collect::()) - } - - /// Returns the string unicode buffer. - pub fn data<'data>( - &self, - directory: ResourceDirectory<'data>, - ) -> Result<&'data [U16Bytes]> { - let mut offset = u64::from(self.offset); - let len = directory - .data - .read::>(&mut offset) - .read_error("Invalid resource name offset")?; - directory - .data - .read_slice::>(&mut offset, len.get(LE).into()) - .read_error("Invalid resource name length") - } - - /// Returns the string buffer as raw bytes. - pub fn raw_data<'data>(&self, directory: ResourceDirectory<'data>) -> Result<&'data [u8]> { - self.data(directory).map(crate::pod::bytes_of_slice) - } -} - -/// A resource name or ID. -/// -/// Can be either a string or a numeric ID. -#[derive(Debug)] -pub enum ResourceNameOrId { - /// A resource name. - Name(ResourceName), - /// A resource ID. - Id(u16), -} - -impl ResourceNameOrId { - /// Converts to an option of name. - /// - /// Helper for iterator filtering. - pub fn name(self) -> Option { - match self { - Self::Name(name) => Some(name), - _ => None, - } - } - - /// Converts to an option of ID. - /// - /// Helper for iterator filtering. - pub fn id(self) -> Option { - match self { - Self::Id(id) => Some(id), - _ => None, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/pe/rich.rs s390-tools-2.33.1/rust-vendor/object/src/read/pe/rich.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/pe/rich.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/pe/rich.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,91 +0,0 @@ -//! PE rich header handling - -use core::mem; - -use crate::pod::bytes_of_slice; -use crate::read::Bytes; -use crate::{pe, LittleEndian as LE, ReadRef, U32}; - -/// Parsed information about a Rich Header. -#[derive(Debug, Clone, Copy)] -pub struct RichHeaderInfo<'data> { - /// The offset at which the rich header starts. - pub offset: usize, - /// The length (in bytes) of the rich header. - /// - /// This includes the payload, but also the 16-byte start sequence and the - /// 8-byte final "Rich" and XOR key. - pub length: usize, - /// The XOR key used to mask the rich header. - /// - /// Unless the file has been tampered with, it should be equal to a checksum - /// of the file header. - pub xor_key: u32, - masked_entries: &'data [pe::MaskedRichHeaderEntry], -} - -/// A PE rich header entry after it has been unmasked. -/// -/// See [`pe::MaskedRichHeaderEntry`]. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct RichHeaderEntry { - /// ID of the component. - pub comp_id: u32, - /// Number of times this component has been used when building this PE. - pub count: u32, -} - -impl<'data> RichHeaderInfo<'data> { - /// Try to locate a rich header and its entries in the current PE file. - pub fn parse>(data: R, nt_header_offset: u64) -> Option { - // Locate the rich header, if any. - // It ends with the "Rich" string and an XOR key, before the NT header. - let data = data.read_bytes_at(0, nt_header_offset).map(Bytes).ok()?; - let end_marker_offset = memmem(data.0, b"Rich", 4)?; - let xor_key = *data.read_at::>(end_marker_offset + 4).ok()?; - - // It starts at the masked "DanS" string and 3 masked zeroes. - let masked_start_marker = U32::new(LE, 0x536e_6144 ^ xor_key.get(LE)); - let start_header = [masked_start_marker, xor_key, xor_key, xor_key]; - let start_sequence = bytes_of_slice(&start_header); - let start_marker_offset = memmem(&data.0[..end_marker_offset], start_sequence, 4)?; - - // Extract the items between the markers. - let items_offset = start_marker_offset + start_sequence.len(); - let items_len = end_marker_offset - items_offset; - let item_count = items_len / mem::size_of::(); - let items = data.read_slice_at(items_offset, item_count).ok()?; - Some(RichHeaderInfo { - offset: start_marker_offset, - // Includes "Rich" marker and the XOR key. - length: end_marker_offset - start_marker_offset + 8, - xor_key: xor_key.get(LE), - masked_entries: items, - }) - } - - /// Returns an iterator over the unmasked entries. - pub fn unmasked_entries(&self) -> impl Iterator + 'data { - let xor_key = self.xor_key; - self.masked_entries - .iter() - .map(move |entry| RichHeaderEntry { - comp_id: entry.masked_comp_id.get(LE) ^ xor_key, - count: entry.masked_count.get(LE) ^ xor_key, - }) - } -} - -/// Find the offset of the first occurrence of needle in the data. -/// -/// The offset must have the given alignment. -fn memmem(data: &[u8], needle: &[u8], align: usize) -> Option { - let mut offset = 0; - loop { - if data.get(offset..)?.get(..needle.len())? == needle { - return Some(offset); - } - offset += align; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/pe/section.rs s390-tools-2.33.1/rust-vendor/object/src/read/pe/section.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/pe/section.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/pe/section.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,434 +0,0 @@ -use core::marker::PhantomData; -use core::{cmp, iter, slice, str}; - -use crate::endian::LittleEndian as LE; -use crate::pe; -use crate::pe::ImageSectionHeader; -use crate::read::{ - self, CompressedData, CompressedFileRange, ObjectSection, ObjectSegment, ReadError, ReadRef, - Relocation, Result, SectionFlags, SectionIndex, SectionKind, SegmentFlags, -}; - -use super::{ImageNtHeaders, PeFile, SectionTable}; - -/// An iterator over the loadable sections of a `PeFile32`. -pub type PeSegmentIterator32<'data, 'file, R = &'data [u8]> = - PeSegmentIterator<'data, 'file, pe::ImageNtHeaders32, R>; -/// An iterator over the loadable sections of a `PeFile64`. -pub type PeSegmentIterator64<'data, 'file, R = &'data [u8]> = - PeSegmentIterator<'data, 'file, pe::ImageNtHeaders64, R>; - -/// An iterator over the loadable sections of a `PeFile`. -#[derive(Debug)] -pub struct PeSegmentIterator<'data, 'file, Pe, R = &'data [u8]> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - pub(super) file: &'file PeFile<'data, Pe, R>, - pub(super) iter: slice::Iter<'data, pe::ImageSectionHeader>, -} - -impl<'data, 'file, Pe, R> Iterator for PeSegmentIterator<'data, 'file, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - type Item = PeSegment<'data, 'file, Pe, R>; - - fn next(&mut self) -> Option { - self.iter.next().map(|section| PeSegment { - file: self.file, - section, - }) - } -} - -/// A loadable section of a `PeFile32`. -pub type PeSegment32<'data, 'file, R = &'data [u8]> = - PeSegment<'data, 'file, pe::ImageNtHeaders32, R>; -/// A loadable section of a `PeFile64`. -pub type PeSegment64<'data, 'file, R = &'data [u8]> = - PeSegment<'data, 'file, pe::ImageNtHeaders64, R>; - -/// A loadable section of a `PeFile`. -#[derive(Debug)] -pub struct PeSegment<'data, 'file, Pe, R = &'data [u8]> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - file: &'file PeFile<'data, Pe, R>, - section: &'data pe::ImageSectionHeader, -} - -impl<'data, 'file, Pe, R> read::private::Sealed for PeSegment<'data, 'file, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Pe, R> ObjectSegment<'data> for PeSegment<'data, 'file, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - #[inline] - fn address(&self) -> u64 { - u64::from(self.section.virtual_address.get(LE)).wrapping_add(self.file.common.image_base) - } - - #[inline] - fn size(&self) -> u64 { - u64::from(self.section.virtual_size.get(LE)) - } - - #[inline] - fn align(&self) -> u64 { - self.file.section_alignment() - } - - #[inline] - fn file_range(&self) -> (u64, u64) { - let (offset, size) = self.section.pe_file_range(); - (u64::from(offset), u64::from(size)) - } - - fn data(&self) -> Result<&'data [u8]> { - self.section.pe_data(self.file.data) - } - - fn data_range(&self, address: u64, size: u64) -> Result> { - Ok(read::util::data_range( - self.data()?, - self.address(), - address, - size, - )) - } - - #[inline] - fn name_bytes(&self) -> Result> { - self.section - .name(self.file.common.symbols.strings()) - .map(Some) - } - - #[inline] - fn name(&self) -> Result> { - let name = self.section.name(self.file.common.symbols.strings())?; - Ok(Some( - str::from_utf8(name) - .ok() - .read_error("Non UTF-8 PE section name")?, - )) - } - - #[inline] - fn flags(&self) -> SegmentFlags { - let characteristics = self.section.characteristics.get(LE); - SegmentFlags::Coff { characteristics } - } -} - -/// An iterator over the sections of a `PeFile32`. -pub type PeSectionIterator32<'data, 'file, R = &'data [u8]> = - PeSectionIterator<'data, 'file, pe::ImageNtHeaders32, R>; -/// An iterator over the sections of a `PeFile64`. -pub type PeSectionIterator64<'data, 'file, R = &'data [u8]> = - PeSectionIterator<'data, 'file, pe::ImageNtHeaders64, R>; - -/// An iterator over the sections of a `PeFile`. -#[derive(Debug)] -pub struct PeSectionIterator<'data, 'file, Pe, R = &'data [u8]> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - pub(super) file: &'file PeFile<'data, Pe, R>, - pub(super) iter: iter::Enumerate>, -} - -impl<'data, 'file, Pe, R> Iterator for PeSectionIterator<'data, 'file, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - type Item = PeSection<'data, 'file, Pe, R>; - - fn next(&mut self) -> Option { - self.iter.next().map(|(index, section)| PeSection { - file: self.file, - index: SectionIndex(index + 1), - section, - }) - } -} - -/// A section of a `PeFile32`. -pub type PeSection32<'data, 'file, R = &'data [u8]> = - PeSection<'data, 'file, pe::ImageNtHeaders32, R>; -/// A section of a `PeFile64`. -pub type PeSection64<'data, 'file, R = &'data [u8]> = - PeSection<'data, 'file, pe::ImageNtHeaders64, R>; - -/// A section of a `PeFile`. -#[derive(Debug)] -pub struct PeSection<'data, 'file, Pe, R = &'data [u8]> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - pub(super) file: &'file PeFile<'data, Pe, R>, - pub(super) index: SectionIndex, - pub(super) section: &'data pe::ImageSectionHeader, -} - -impl<'data, 'file, Pe, R> read::private::Sealed for PeSection<'data, 'file, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Pe, R> ObjectSection<'data> for PeSection<'data, 'file, Pe, R> -where - Pe: ImageNtHeaders, - R: ReadRef<'data>, -{ - type RelocationIterator = PeRelocationIterator<'data, 'file, R>; - - #[inline] - fn index(&self) -> SectionIndex { - self.index - } - - #[inline] - fn address(&self) -> u64 { - u64::from(self.section.virtual_address.get(LE)).wrapping_add(self.file.common.image_base) - } - - #[inline] - fn size(&self) -> u64 { - u64::from(self.section.virtual_size.get(LE)) - } - - #[inline] - fn align(&self) -> u64 { - self.file.section_alignment() - } - - #[inline] - fn file_range(&self) -> Option<(u64, u64)> { - let (offset, size) = self.section.pe_file_range(); - if size == 0 { - None - } else { - Some((u64::from(offset), u64::from(size))) - } - } - - fn data(&self) -> Result<&'data [u8]> { - self.section.pe_data(self.file.data) - } - - fn data_range(&self, address: u64, size: u64) -> Result> { - Ok(read::util::data_range( - self.data()?, - self.address(), - address, - size, - )) - } - - #[inline] - fn compressed_file_range(&self) -> Result { - Ok(CompressedFileRange::none(self.file_range())) - } - - #[inline] - fn compressed_data(&self) -> Result> { - self.data().map(CompressedData::none) - } - - #[inline] - fn name_bytes(&self) -> Result<&[u8]> { - self.section.name(self.file.common.symbols.strings()) - } - - #[inline] - fn name(&self) -> Result<&str> { - let name = self.name_bytes()?; - str::from_utf8(name) - .ok() - .read_error("Non UTF-8 PE section name") - } - - #[inline] - fn segment_name_bytes(&self) -> Result> { - Ok(None) - } - - #[inline] - fn segment_name(&self) -> Result> { - Ok(None) - } - - #[inline] - fn kind(&self) -> SectionKind { - self.section.kind() - } - - fn relocations(&self) -> PeRelocationIterator<'data, 'file, R> { - PeRelocationIterator(PhantomData) - } - - fn flags(&self) -> SectionFlags { - SectionFlags::Coff { - characteristics: self.section.characteristics.get(LE), - } - } -} - -impl<'data> SectionTable<'data> { - /// Return the file offset of the given virtual address, and the size up - /// to the end of the section containing it. - /// - /// Returns `None` if no section contains the address. - pub fn pe_file_range_at(&self, va: u32) -> Option<(u32, u32)> { - self.iter().find_map(|section| section.pe_file_range_at(va)) - } - - /// Return the data starting at the given virtual address, up to the end of the - /// section containing it. - /// - /// Ignores sections with invalid data. - /// - /// Returns `None` if no section contains the address. - pub fn pe_data_at>(&self, data: R, va: u32) -> Option<&'data [u8]> { - self.iter().find_map(|section| section.pe_data_at(data, va)) - } - - /// Return the data of the section that contains the given virtual address in a PE file. - /// - /// Also returns the virtual address of that section. - /// - /// Ignores sections with invalid data. - pub fn pe_data_containing>( - &self, - data: R, - va: u32, - ) -> Option<(&'data [u8], u32)> { - self.iter() - .find_map(|section| section.pe_data_containing(data, va)) - } - - /// Return the section that contains a given virtual address. - pub fn section_containing(&self, va: u32) -> Option<&'data ImageSectionHeader> { - self.iter().find(|section| section.contains_rva(va)) - } -} - -impl pe::ImageSectionHeader { - /// Return the offset and size of the section in a PE file. - /// - /// The size of the range will be the minimum of the file size and virtual size. - pub fn pe_file_range(&self) -> (u32, u32) { - // Pointer and size will be zero for uninitialized data; we don't need to validate this. - let offset = self.pointer_to_raw_data.get(LE); - let size = cmp::min(self.virtual_size.get(LE), self.size_of_raw_data.get(LE)); - (offset, size) - } - - /// Return the file offset of the given virtual address, and the remaining size up - /// to the end of the section. - /// - /// Returns `None` if the section does not contain the address. - pub fn pe_file_range_at(&self, va: u32) -> Option<(u32, u32)> { - let section_va = self.virtual_address.get(LE); - let offset = va.checked_sub(section_va)?; - let (section_offset, section_size) = self.pe_file_range(); - // Address must be within section (and not at its end). - if offset < section_size { - Some((section_offset.checked_add(offset)?, section_size - offset)) - } else { - None - } - } - - /// Return the virtual address and size of the section. - pub fn pe_address_range(&self) -> (u32, u32) { - (self.virtual_address.get(LE), self.virtual_size.get(LE)) - } - - /// Return the section data in a PE file. - /// - /// The length of the data will be the minimum of the file size and virtual size. - pub fn pe_data<'data, R: ReadRef<'data>>(&self, data: R) -> Result<&'data [u8]> { - let (offset, size) = self.pe_file_range(); - data.read_bytes_at(offset.into(), size.into()) - .read_error("Invalid PE section offset or size") - } - - /// Return the data starting at the given virtual address, up to the end of the - /// section. - /// - /// Ignores sections with invalid data. - /// - /// Returns `None` if the section does not contain the address. - pub fn pe_data_at<'data, R: ReadRef<'data>>(&self, data: R, va: u32) -> Option<&'data [u8]> { - let (offset, size) = self.pe_file_range_at(va)?; - data.read_bytes_at(offset.into(), size.into()).ok() - } - - /// Tests whether a given RVA is part of this section - pub fn contains_rva(&self, va: u32) -> bool { - let section_va = self.virtual_address.get(LE); - match va.checked_sub(section_va) { - None => false, - Some(offset) => { - // Address must be within section (and not at its end). - offset < self.virtual_size.get(LE) - } - } - } - - /// Return the section data if it contains the given virtual address. - /// - /// Also returns the virtual address of that section. - /// - /// Ignores sections with invalid data. - pub fn pe_data_containing<'data, R: ReadRef<'data>>( - &self, - data: R, - va: u32, - ) -> Option<(&'data [u8], u32)> { - let section_va = self.virtual_address.get(LE); - let offset = va.checked_sub(section_va)?; - let (section_offset, section_size) = self.pe_file_range(); - // Address must be within section (and not at its end). - if offset < section_size { - let section_data = data - .read_bytes_at(section_offset.into(), section_size.into()) - .ok()?; - Some((section_data, section_va)) - } else { - None - } - } -} - -/// An iterator over the relocations in an `PeSection`. -#[derive(Debug)] -pub struct PeRelocationIterator<'data, 'file, R = &'data [u8]>( - PhantomData<(&'data (), &'file (), R)>, -); - -impl<'data, 'file, R> Iterator for PeRelocationIterator<'data, 'file, R> { - type Item = (u64, Relocation); - - fn next(&mut self) -> Option { - None - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/read_cache.rs s390-tools-2.33.1/rust-vendor/object/src/read/read_cache.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/read_cache.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/read_cache.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,182 +0,0 @@ -use core::ops::Range; -use std::boxed::Box; -use std::cell::RefCell; -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::convert::TryInto; -use std::io::{Read, Seek, SeekFrom}; -use std::mem; -use std::vec::Vec; - -use crate::read::ReadRef; - -/// An implementation of `ReadRef` for data in a stream that implements -/// `Read + Seek`. -/// -/// Contains a cache of read-only blocks of data, allowing references to -/// them to be returned. Entries in the cache are never removed. -/// Entries are keyed on the offset and size of the read. -/// Currently overlapping reads are considered separate reads. -#[derive(Debug)] -pub struct ReadCache { - cache: RefCell>, -} - -#[derive(Debug)] -struct ReadCacheInternal { - read: R, - bufs: HashMap<(u64, u64), Box<[u8]>>, - strings: HashMap<(u64, u8), Box<[u8]>>, -} - -impl ReadCache { - /// Create an empty `ReadCache` for the given stream. - pub fn new(read: R) -> Self { - ReadCache { - cache: RefCell::new(ReadCacheInternal { - read, - bufs: HashMap::new(), - strings: HashMap::new(), - }), - } - } - - /// Return an implementation of `ReadRef` that restricts reads - /// to the given range of the stream. - pub fn range(&self, offset: u64, size: u64) -> ReadCacheRange<'_, R> { - ReadCacheRange { - r: self, - offset, - size, - } - } - - /// Free buffers used by the cache. - pub fn clear(&mut self) { - self.cache.borrow_mut().bufs.clear(); - } - - /// Unwrap this `ReadCache`, returning the underlying reader. - pub fn into_inner(self) -> R { - self.cache.into_inner().read - } -} - -impl<'a, R: Read + Seek> ReadRef<'a> for &'a ReadCache { - fn len(self) -> Result { - let cache = &mut *self.cache.borrow_mut(); - cache.read.seek(SeekFrom::End(0)).map_err(|_| ()) - } - - fn read_bytes_at(self, offset: u64, size: u64) -> Result<&'a [u8], ()> { - if size == 0 { - return Ok(&[]); - } - let cache = &mut *self.cache.borrow_mut(); - let buf = match cache.bufs.entry((offset, size)) { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => { - let size = size.try_into().map_err(|_| ())?; - cache.read.seek(SeekFrom::Start(offset)).map_err(|_| ())?; - let mut bytes = vec![0; size].into_boxed_slice(); - cache.read.read_exact(&mut bytes).map_err(|_| ())?; - entry.insert(bytes) - } - }; - // Extend the lifetime to that of self. - // This is OK because we never mutate or remove entries. - Ok(unsafe { mem::transmute::<&[u8], &[u8]>(buf) }) - } - - fn read_bytes_at_until(self, range: Range, delimiter: u8) -> Result<&'a [u8], ()> { - let cache = &mut *self.cache.borrow_mut(); - let buf = match cache.strings.entry((range.start, delimiter)) { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => { - cache - .read - .seek(SeekFrom::Start(range.start)) - .map_err(|_| ())?; - - let max_check: usize = (range.end - range.start).try_into().map_err(|_| ())?; - // Strings should be relatively small. - // TODO: make this configurable? - let max_check = max_check.min(4096); - - let mut bytes = Vec::new(); - let mut checked = 0; - loop { - bytes.resize((checked + 256).min(max_check), 0); - let read = cache.read.read(&mut bytes[checked..]).map_err(|_| ())?; - if read == 0 { - return Err(()); - } - if let Some(len) = memchr::memchr(delimiter, &bytes[checked..][..read]) { - bytes.truncate(checked + len); - break entry.insert(bytes.into_boxed_slice()); - } - checked += read; - if checked >= max_check { - return Err(()); - } - } - } - }; - // Extend the lifetime to that of self. - // This is OK because we never mutate or remove entries. - Ok(unsafe { mem::transmute::<&[u8], &[u8]>(buf) }) - } -} - -/// An implementation of `ReadRef` for a range of data in a stream that -/// implements `Read + Seek`. -/// -/// Shares an underlying `ReadCache` with a lifetime of `'a`. -#[derive(Debug)] -pub struct ReadCacheRange<'a, R: Read + Seek> { - r: &'a ReadCache, - offset: u64, - size: u64, -} - -impl<'a, R: Read + Seek> Clone for ReadCacheRange<'a, R> { - fn clone(&self) -> Self { - Self { - r: self.r, - offset: self.offset, - size: self.size, - } - } -} - -impl<'a, R: Read + Seek> Copy for ReadCacheRange<'a, R> {} - -impl<'a, R: Read + Seek> ReadRef<'a> for ReadCacheRange<'a, R> { - fn len(self) -> Result { - Ok(self.size) - } - - fn read_bytes_at(self, offset: u64, size: u64) -> Result<&'a [u8], ()> { - if size == 0 { - return Ok(&[]); - } - let end = offset.checked_add(size).ok_or(())?; - if end > self.size { - return Err(()); - } - let r_offset = self.offset.checked_add(offset).ok_or(())?; - self.r.read_bytes_at(r_offset, size) - } - - fn read_bytes_at_until(self, range: Range, delimiter: u8) -> Result<&'a [u8], ()> { - let r_start = self.offset.checked_add(range.start).ok_or(())?; - let r_end = self.offset.checked_add(range.end).ok_or(())?; - let bytes = self.r.read_bytes_at_until(r_start..r_end, delimiter)?; - let size = bytes.len().try_into().map_err(|_| ())?; - let end = range.start.checked_add(size).ok_or(())?; - if end > self.size { - return Err(()); - } - Ok(bytes) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/read_ref.rs s390-tools-2.33.1/rust-vendor/object/src/read/read_ref.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/read_ref.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/read_ref.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,137 +0,0 @@ -#![allow(clippy::len_without_is_empty)] - -use core::convert::TryInto; -use core::ops::Range; -use core::{mem, result}; - -use crate::pod::{from_bytes, slice_from_bytes, Pod}; - -type Result = result::Result; - -/// A trait for reading references to `Pod` types from a block of data. -/// -/// This allows parsers to handle both of these cases: -/// - the block of data exists in memory, and it is desirable -/// to use references to this block instead of copying it, -/// - the block of data exists in storage, and it is desirable -/// to read on demand to minimize I/O and memory usage. -/// -/// The methods accept `self` by value because `Self` is expected to behave -/// similar to a reference: it may be a reference with a lifetime of `'a`, -/// or it may be a wrapper of a reference. -/// -/// The `Clone` and `Copy` bounds are for convenience, and since `Self` is -/// expected to be similar to a reference, these are easily satisfied. -/// -/// Object file parsers typically use offsets to locate the structures -/// in the block, and will most commonly use the `*_at` methods to -/// read a structure at a known offset. -/// -/// Occasionally file parsers will need to treat the block as a stream, -/// and so convenience methods are provided that update an offset with -/// the size that was read. -// -// An alternative would be for methods to accept `&mut self` and use a -// `seek` method instead of the `offset` parameters, but this is less -// convenient for implementers. -pub trait ReadRef<'a>: Clone + Copy { - /// The total size of the block of data. - fn len(self) -> Result; - - /// Get a reference to a `u8` slice at the given offset. - /// - /// Returns an error if offset or size are out of bounds. - fn read_bytes_at(self, offset: u64, size: u64) -> Result<&'a [u8]>; - - /// Get a reference to a delimited `u8` slice which starts at range.start. - /// - /// Does not include the delimiter. - /// - /// Returns an error if the range is out of bounds or the delimiter is - /// not found in the range. - fn read_bytes_at_until(self, range: Range, delimiter: u8) -> Result<&'a [u8]>; - - /// Get a reference to a `u8` slice at the given offset, and update the offset. - /// - /// Returns an error if offset or size are out of bounds. - fn read_bytes(self, offset: &mut u64, size: u64) -> Result<&'a [u8]> { - let bytes = self.read_bytes_at(*offset, size)?; - *offset = offset.wrapping_add(size); - Ok(bytes) - } - - /// Get a reference to a `Pod` type at the given offset, and update the offset. - /// - /// Returns an error if offset or size are out of bounds. - /// - /// The default implementation uses `read_bytes`, and returns an error if - /// `read_bytes` does not return bytes with the correct alignment for `T`. - /// Implementors may want to provide their own implementation that ensures - /// the alignment can be satisfied. Alternatively, only use this method with - /// types that do not need alignment (see the `unaligned` feature of this crate). - fn read(self, offset: &mut u64) -> Result<&'a T> { - let size = mem::size_of::().try_into().map_err(|_| ())?; - let bytes = self.read_bytes(offset, size)?; - let (t, _) = from_bytes(bytes)?; - Ok(t) - } - - /// Get a reference to a `Pod` type at the given offset. - /// - /// Returns an error if offset or size are out of bounds. - /// - /// Also see the `read` method for information regarding alignment of `T`. - fn read_at(self, mut offset: u64) -> Result<&'a T> { - self.read(&mut offset) - } - - /// Get a reference to a slice of a `Pod` type at the given offset, and update the offset. - /// - /// Returns an error if offset or size are out of bounds. - /// - /// Also see the `read` method for information regarding alignment of `T`. - fn read_slice(self, offset: &mut u64, count: usize) -> Result<&'a [T]> { - let size = count - .checked_mul(mem::size_of::()) - .ok_or(())? - .try_into() - .map_err(|_| ())?; - let bytes = self.read_bytes(offset, size)?; - let (t, _) = slice_from_bytes(bytes, count)?; - Ok(t) - } - - /// Get a reference to a slice of a `Pod` type at the given offset. - /// - /// Returns an error if offset or size are out of bounds. - /// - /// Also see the `read` method for information regarding alignment of `T`. - fn read_slice_at(self, mut offset: u64, count: usize) -> Result<&'a [T]> { - self.read_slice(&mut offset, count) - } -} - -impl<'a> ReadRef<'a> for &'a [u8] { - fn len(self) -> Result { - self.len().try_into().map_err(|_| ()) - } - - fn read_bytes_at(self, offset: u64, size: u64) -> Result<&'a [u8]> { - let offset: usize = offset.try_into().map_err(|_| ())?; - let size: usize = size.try_into().map_err(|_| ())?; - self.get(offset..).ok_or(())?.get(..size).ok_or(()) - } - - fn read_bytes_at_until(self, range: Range, delimiter: u8) -> Result<&'a [u8]> { - let start: usize = range.start.try_into().map_err(|_| ())?; - let end: usize = range.end.try_into().map_err(|_| ())?; - let bytes = self.get(start..end).ok_or(())?; - match memchr::memchr(delimiter, bytes) { - Some(len) => { - // This will never fail. - bytes.get(..len).ok_or(()) - } - None => Err(()), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/traits.rs s390-tools-2.33.1/rust-vendor/object/src/read/traits.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/traits.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/traits.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,469 +0,0 @@ -use alloc::borrow::Cow; -use alloc::vec::Vec; - -use crate::read::{ - self, Architecture, CodeView, ComdatKind, CompressedData, CompressedFileRange, Export, - FileFlags, Import, ObjectKind, ObjectMap, Relocation, Result, SectionFlags, SectionIndex, - SectionKind, SegmentFlags, SymbolFlags, SymbolIndex, SymbolKind, SymbolMap, SymbolMapName, - SymbolScope, SymbolSection, -}; -use crate::Endianness; - -/// An object file. -pub trait Object<'data: 'file, 'file>: read::private::Sealed { - /// A segment in the object file. - type Segment: ObjectSegment<'data>; - - /// An iterator over the segments in the object file. - type SegmentIterator: Iterator; - - /// A section in the object file. - type Section: ObjectSection<'data>; - - /// An iterator over the sections in the object file. - type SectionIterator: Iterator; - - /// A COMDAT section group in the object file. - type Comdat: ObjectComdat<'data>; - - /// An iterator over the COMDAT section groups in the object file. - type ComdatIterator: Iterator; - - /// A symbol in the object file. - type Symbol: ObjectSymbol<'data>; - - /// An iterator over symbols in the object file. - type SymbolIterator: Iterator; - - /// A symbol table in the object file. - type SymbolTable: ObjectSymbolTable< - 'data, - Symbol = Self::Symbol, - SymbolIterator = Self::SymbolIterator, - >; - - /// An iterator over dynamic relocations in the file. - /// - /// The first field in the item tuple is the address - /// that the relocation applies to. - type DynamicRelocationIterator: Iterator; - - /// Get the architecture type of the file. - fn architecture(&self) -> Architecture; - - /// Get the endianness of the file. - #[inline] - fn endianness(&self) -> Endianness { - if self.is_little_endian() { - Endianness::Little - } else { - Endianness::Big - } - } - - /// Return true if the file is little endian, false if it is big endian. - fn is_little_endian(&self) -> bool; - - /// Return true if the file can contain 64-bit addresses. - fn is_64(&self) -> bool; - - /// Return the kind of this object. - fn kind(&self) -> ObjectKind; - - /// Get an iterator over the segments in the file. - fn segments(&'file self) -> Self::SegmentIterator; - - /// Get the section named `section_name`, if such a section exists. - /// - /// If `section_name` starts with a '.' then it is treated as a system section name, - /// and is compared using the conventions specific to the object file format. This - /// includes: - /// - if ".debug_str_offsets" is requested for a Mach-O object file, then the actual - /// section name that is searched for is "__debug_str_offs". - /// - if ".debug_info" is requested for an ELF object file, then - /// ".zdebug_info" may be returned (and similarly for other debug sections). - /// - /// For some object files, multiple segments may contain sections with the same - /// name. In this case, the first matching section will be used. - /// - /// This method skips over sections with invalid names. - fn section_by_name(&'file self, section_name: &str) -> Option { - self.section_by_name_bytes(section_name.as_bytes()) - } - - /// Like [`Self::section_by_name`], but allows names that are not UTF-8. - fn section_by_name_bytes(&'file self, section_name: &[u8]) -> Option; - - /// Get the section at the given index. - /// - /// The meaning of the index depends on the object file. - /// - /// For some object files, this requires iterating through all sections. - /// - /// Returns an error if the index is invalid. - fn section_by_index(&'file self, index: SectionIndex) -> Result; - - /// Get an iterator over the sections in the file. - fn sections(&'file self) -> Self::SectionIterator; - - /// Get an iterator over the COMDAT section groups in the file. - fn comdats(&'file self) -> Self::ComdatIterator; - - /// Get the symbol table, if any. - fn symbol_table(&'file self) -> Option; - - /// Get the debugging symbol at the given index. - /// - /// The meaning of the index depends on the object file. - /// - /// Returns an error if the index is invalid. - fn symbol_by_index(&'file self, index: SymbolIndex) -> Result; - - /// Get an iterator over the debugging symbols in the file. - /// - /// This may skip over symbols that are malformed or unsupported. - /// - /// For Mach-O files, this does not include STAB entries. - fn symbols(&'file self) -> Self::SymbolIterator; - - /// Get the dynamic linking symbol table, if any. - /// - /// Only ELF has a separate dynamic linking symbol table. - fn dynamic_symbol_table(&'file self) -> Option; - - /// Get an iterator over the dynamic linking symbols in the file. - /// - /// This may skip over symbols that are malformed or unsupported. - /// - /// Only ELF has separate dynamic linking symbols. - /// Other file formats will return an empty iterator. - fn dynamic_symbols(&'file self) -> Self::SymbolIterator; - - /// Get the dynamic relocations for this file. - /// - /// Symbol indices in these relocations refer to the dynamic symbol table. - /// - /// Only ELF has dynamic relocations. - fn dynamic_relocations(&'file self) -> Option; - - /// Construct a map from addresses to symbol names. - /// - /// The map will only contain defined text and data symbols. - /// The dynamic symbol table will only be used if there are no debugging symbols. - fn symbol_map(&'file self) -> SymbolMap> { - let mut symbols = Vec::new(); - if let Some(table) = self.symbol_table().or_else(|| self.dynamic_symbol_table()) { - for symbol in table.symbols() { - if !symbol.is_definition() { - continue; - } - if let Ok(name) = symbol.name() { - symbols.push(SymbolMapName::new(symbol.address(), name)); - } - } - } - SymbolMap::new(symbols) - } - - /// Construct a map from addresses to symbol names and object file names. - /// - /// This is derived from Mach-O STAB entries. - fn object_map(&'file self) -> ObjectMap<'data> { - ObjectMap::default() - } - - /// Get the imported symbols. - fn imports(&self) -> Result>>; - - /// Get the exported symbols that expose both a name and an address. - /// - /// Some file formats may provide other kinds of symbols, that can be retrieved using - /// the lower-level API. - fn exports(&self) -> Result>>; - - /// Return true if the file contains debug information sections, false if not. - fn has_debug_symbols(&self) -> bool; - - /// The UUID from a Mach-O `LC_UUID` load command. - #[inline] - fn mach_uuid(&self) -> Result> { - Ok(None) - } - - /// The build ID from an ELF `NT_GNU_BUILD_ID` note. - #[inline] - fn build_id(&self) -> Result> { - Ok(None) - } - - /// The filename and CRC from a `.gnu_debuglink` section. - #[inline] - fn gnu_debuglink(&self) -> Result> { - Ok(None) - } - - /// The filename and build ID from a `.gnu_debugaltlink` section. - #[inline] - fn gnu_debugaltlink(&self) -> Result> { - Ok(None) - } - - /// The filename and GUID from the PE CodeView section - #[inline] - fn pdb_info(&self) -> Result>> { - Ok(None) - } - - /// Get the base address used for relative virtual addresses. - /// - /// Currently this is only non-zero for PE. - fn relative_address_base(&'file self) -> u64; - - /// Get the virtual address of the entry point of the binary - fn entry(&'file self) -> u64; - - /// File flags that are specific to each file format. - fn flags(&self) -> FileFlags; -} - -/// A loadable segment defined in an object file. -/// -/// For ELF, this is a program header with type `PT_LOAD`. -/// For Mach-O, this is a load command with type `LC_SEGMENT` or `LC_SEGMENT_64`. -pub trait ObjectSegment<'data>: read::private::Sealed { - /// Returns the virtual address of the segment. - fn address(&self) -> u64; - - /// Returns the size of the segment in memory. - fn size(&self) -> u64; - - /// Returns the alignment of the segment in memory. - fn align(&self) -> u64; - - /// Returns the offset and size of the segment in the file. - fn file_range(&self) -> (u64, u64); - - /// Returns a reference to the file contents of the segment. - /// - /// The length of this data may be different from the size of the - /// segment in memory. - fn data(&self) -> Result<&'data [u8]>; - - /// Return the segment data in the given range. - /// - /// Returns `Ok(None)` if the segment does not contain the given range. - fn data_range(&self, address: u64, size: u64) -> Result>; - - /// Returns the name of the segment. - fn name_bytes(&self) -> Result>; - - /// Returns the name of the segment. - /// - /// Returns an error if the name is not UTF-8. - fn name(&self) -> Result>; - - /// Return the flags of segment. - fn flags(&self) -> SegmentFlags; -} - -/// A section defined in an object file. -pub trait ObjectSection<'data>: read::private::Sealed { - /// An iterator over the relocations for a section. - /// - /// The first field in the item tuple is the section offset - /// that the relocation applies to. - type RelocationIterator: Iterator; - - /// Returns the section index. - fn index(&self) -> SectionIndex; - - /// Returns the address of the section. - fn address(&self) -> u64; - - /// Returns the size of the section in memory. - fn size(&self) -> u64; - - /// Returns the alignment of the section in memory. - fn align(&self) -> u64; - - /// Returns offset and size of on-disk segment (if any). - fn file_range(&self) -> Option<(u64, u64)>; - - /// Returns the raw contents of the section. - /// - /// The length of this data may be different from the size of the - /// section in memory. - /// - /// This does not do any decompression. - fn data(&self) -> Result<&'data [u8]>; - - /// Return the raw contents of the section data in the given range. - /// - /// This does not do any decompression. - /// - /// Returns `Ok(None)` if the section does not contain the given range. - fn data_range(&self, address: u64, size: u64) -> Result>; - - /// Returns the potentially compressed file range of the section, - /// along with information about the compression. - fn compressed_file_range(&self) -> Result; - - /// Returns the potentially compressed contents of the section, - /// along with information about the compression. - fn compressed_data(&self) -> Result>; - - /// Returns the uncompressed contents of the section. - /// - /// The length of this data may be different from the size of the - /// section in memory. - /// - /// If no compression is detected, then returns the data unchanged. - /// Returns `Err` if decompression fails. - fn uncompressed_data(&self) -> Result> { - self.compressed_data()?.decompress() - } - - /// Returns the name of the section. - fn name_bytes(&self) -> Result<&[u8]>; - - /// Returns the name of the section. - /// - /// Returns an error if the name is not UTF-8. - fn name(&self) -> Result<&str>; - - /// Returns the name of the segment for this section. - fn segment_name_bytes(&self) -> Result>; - - /// Returns the name of the segment for this section. - /// - /// Returns an error if the name is not UTF-8. - fn segment_name(&self) -> Result>; - - /// Return the kind of this section. - fn kind(&self) -> SectionKind; - - /// Get the relocations for this section. - fn relocations(&self) -> Self::RelocationIterator; - - /// Section flags that are specific to each file format. - fn flags(&self) -> SectionFlags; -} - -/// A COMDAT section group defined in an object file. -pub trait ObjectComdat<'data>: read::private::Sealed { - /// An iterator over the sections in the object file. - type SectionIterator: Iterator; - - /// Returns the COMDAT selection kind. - fn kind(&self) -> ComdatKind; - - /// Returns the index of the symbol used for the name of COMDAT section group. - fn symbol(&self) -> SymbolIndex; - - /// Returns the name of the COMDAT section group. - fn name_bytes(&self) -> Result<&[u8]>; - - /// Returns the name of the COMDAT section group. - /// - /// Returns an error if the name is not UTF-8. - fn name(&self) -> Result<&str>; - - /// Get the sections in this section group. - fn sections(&self) -> Self::SectionIterator; -} - -/// A symbol table. -pub trait ObjectSymbolTable<'data>: read::private::Sealed { - /// A symbol table entry. - type Symbol: ObjectSymbol<'data>; - - /// An iterator over the symbols in a symbol table. - type SymbolIterator: Iterator; - - /// Get an iterator over the symbols in the table. - /// - /// This may skip over symbols that are malformed or unsupported. - fn symbols(&self) -> Self::SymbolIterator; - - /// Get the symbol at the given index. - /// - /// The meaning of the index depends on the object file. - /// - /// Returns an error if the index is invalid. - fn symbol_by_index(&self, index: SymbolIndex) -> Result; -} - -/// A symbol table entry. -pub trait ObjectSymbol<'data>: read::private::Sealed { - /// The index of the symbol. - fn index(&self) -> SymbolIndex; - - /// The name of the symbol. - fn name_bytes(&self) -> Result<&'data [u8]>; - - /// The name of the symbol. - /// - /// Returns an error if the name is not UTF-8. - fn name(&self) -> Result<&'data str>; - - /// The address of the symbol. May be zero if the address is unknown. - fn address(&self) -> u64; - - /// The size of the symbol. May be zero if the size is unknown. - fn size(&self) -> u64; - - /// Return the kind of this symbol. - fn kind(&self) -> SymbolKind; - - /// Returns the section where the symbol is defined. - fn section(&self) -> SymbolSection; - - /// Returns the section index for the section containing this symbol. - /// - /// May return `None` if the symbol is not defined in a section. - fn section_index(&self) -> Option { - self.section().index() - } - - /// Return true if the symbol is undefined. - fn is_undefined(&self) -> bool; - - /// Return true if the symbol is a definition of a function or data object - /// that has a known address. - fn is_definition(&self) -> bool; - - /// Return true if the symbol is common data. - /// - /// Note: does not check for `SymbolSection::Section` with `SectionKind::Common`. - fn is_common(&self) -> bool; - - /// Return true if the symbol is weak. - fn is_weak(&self) -> bool; - - /// Returns the symbol scope. - fn scope(&self) -> SymbolScope; - - /// Return true if the symbol visible outside of the compilation unit. - /// - /// This treats `SymbolScope::Unknown` as global. - fn is_global(&self) -> bool; - - /// Return true if the symbol is only visible within the compilation unit. - fn is_local(&self) -> bool; - - /// Symbol flags that are specific to each file format. - fn flags(&self) -> SymbolFlags; -} - -/// An iterator for files that don't have dynamic relocations. -#[derive(Debug)] -pub struct NoDynamicRelocationIterator; - -impl Iterator for NoDynamicRelocationIterator { - type Item = (u64, Relocation); - - #[inline] - fn next(&mut self) -> Option { - None - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/util.rs s390-tools-2.33.1/rust-vendor/object/src/read/util.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/util.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/util.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,425 +0,0 @@ -use alloc::string::String; -use core::convert::TryInto; -use core::fmt; -use core::marker::PhantomData; - -use crate::pod::{from_bytes, slice_from_bytes, Pod}; -use crate::ReadRef; - -/// A newtype for byte slices. -/// -/// It has these important features: -/// - no methods that can panic, such as `Index` -/// - convenience methods for `Pod` types -/// - a useful `Debug` implementation -#[derive(Default, Clone, Copy, PartialEq, Eq)] -pub struct Bytes<'data>(pub &'data [u8]); - -impl<'data> fmt::Debug for Bytes<'data> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - debug_list_bytes(self.0, fmt) - } -} - -impl<'data> Bytes<'data> { - /// Return the length of the byte slice. - #[inline] - pub fn len(&self) -> usize { - self.0.len() - } - - /// Return true if the byte slice is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Skip over the given number of bytes at the start of the byte slice. - /// - /// Modifies the byte slice to start after the bytes. - /// - /// Returns an error if there are too few bytes. - #[inline] - pub fn skip(&mut self, offset: usize) -> Result<(), ()> { - match self.0.get(offset..) { - Some(tail) => { - self.0 = tail; - Ok(()) - } - None => { - self.0 = &[]; - Err(()) - } - } - } - - /// Return a reference to the given number of bytes at the start of the byte slice. - /// - /// Modifies the byte slice to start after the bytes. - /// - /// Returns an error if there are too few bytes. - #[inline] - pub fn read_bytes(&mut self, count: usize) -> Result, ()> { - match (self.0.get(..count), self.0.get(count..)) { - (Some(head), Some(tail)) => { - self.0 = tail; - Ok(Bytes(head)) - } - _ => { - self.0 = &[]; - Err(()) - } - } - } - - /// Return a reference to the given number of bytes at the given offset of the byte slice. - /// - /// Returns an error if the offset is invalid or there are too few bytes. - #[inline] - pub fn read_bytes_at(mut self, offset: usize, count: usize) -> Result, ()> { - self.skip(offset)?; - self.read_bytes(count) - } - - /// Return a reference to a `Pod` struct at the start of the byte slice. - /// - /// Modifies the byte slice to start after the bytes. - /// - /// Returns an error if there are too few bytes or the slice is incorrectly aligned. - #[inline] - pub fn read(&mut self) -> Result<&'data T, ()> { - match from_bytes(self.0) { - Ok((value, tail)) => { - self.0 = tail; - Ok(value) - } - Err(()) => { - self.0 = &[]; - Err(()) - } - } - } - - /// Return a reference to a `Pod` struct at the given offset of the byte slice. - /// - /// Returns an error if there are too few bytes or the offset is incorrectly aligned. - #[inline] - pub fn read_at(mut self, offset: usize) -> Result<&'data T, ()> { - self.skip(offset)?; - self.read() - } - - /// Return a reference to a slice of `Pod` structs at the start of the byte slice. - /// - /// Modifies the byte slice to start after the bytes. - /// - /// Returns an error if there are too few bytes or the offset is incorrectly aligned. - #[inline] - pub fn read_slice(&mut self, count: usize) -> Result<&'data [T], ()> { - match slice_from_bytes(self.0, count) { - Ok((value, tail)) => { - self.0 = tail; - Ok(value) - } - Err(()) => { - self.0 = &[]; - Err(()) - } - } - } - - /// Return a reference to a slice of `Pod` structs at the given offset of the byte slice. - /// - /// Returns an error if there are too few bytes or the offset is incorrectly aligned. - #[inline] - pub fn read_slice_at(mut self, offset: usize, count: usize) -> Result<&'data [T], ()> { - self.skip(offset)?; - self.read_slice(count) - } - - /// Read a null terminated string. - /// - /// Does not assume any encoding. - /// Reads past the null byte, but doesn't return it. - #[inline] - pub fn read_string(&mut self) -> Result<&'data [u8], ()> { - match memchr::memchr(b'\0', self.0) { - Some(null) => { - // These will never fail. - let bytes = self.read_bytes(null)?; - self.skip(1)?; - Ok(bytes.0) - } - None => { - self.0 = &[]; - Err(()) - } - } - } - - /// Read a null terminated string at an offset. - /// - /// Does not assume any encoding. Does not return the null byte. - #[inline] - pub fn read_string_at(mut self, offset: usize) -> Result<&'data [u8], ()> { - self.skip(offset)?; - self.read_string() - } - - /// Read an unsigned LEB128 number. - pub fn read_uleb128(&mut self) -> Result { - let mut result = 0; - let mut shift = 0; - - loop { - let byte = *self.read::()?; - if shift == 63 && byte != 0x00 && byte != 0x01 { - return Err(()); - } - result |= u64::from(byte & 0x7f) << shift; - shift += 7; - - if byte & 0x80 == 0 { - return Ok(result); - } - } - } - - /// Read a signed LEB128 number. - pub fn read_sleb128(&mut self) -> Result { - let mut result = 0; - let mut shift = 0; - - loop { - let byte = *self.read::()?; - if shift == 63 && byte != 0x00 && byte != 0x7f { - return Err(()); - } - result |= i64::from(byte & 0x7f) << shift; - shift += 7; - - if byte & 0x80 == 0 { - if shift < 64 && (byte & 0x40) != 0 { - // Sign extend the result. - result |= !0 << shift; - } - return Ok(result); - } - } - } -} - -// Only for Debug impl of `Bytes`. -fn debug_list_bytes(bytes: &[u8], fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut list = fmt.debug_list(); - list.entries(bytes.iter().take(8).copied().map(DebugByte)); - if bytes.len() > 8 { - list.entry(&DebugLen(bytes.len())); - } - list.finish() -} - -struct DebugByte(u8); - -impl fmt::Debug for DebugByte { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "0x{:02x}", self.0) - } -} - -struct DebugLen(usize); - -impl fmt::Debug for DebugLen { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "...; {}", self.0) - } -} - -/// A newtype for byte strings. -/// -/// For byte slices that are strings of an unknown encoding. -/// -/// Provides a `Debug` implementation that interprets the bytes as UTF-8. -#[derive(Default, Clone, Copy, PartialEq, Eq)] -pub(crate) struct ByteString<'data>(pub &'data [u8]); - -impl<'data> fmt::Debug for ByteString<'data> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "\"{}\"", String::from_utf8_lossy(self.0)) - } -} - -#[allow(dead_code)] -#[inline] -pub(crate) fn align(offset: usize, size: usize) -> usize { - (offset + (size - 1)) & !(size - 1) -} - -#[allow(dead_code)] -pub(crate) fn data_range( - data: &[u8], - data_address: u64, - range_address: u64, - size: u64, -) -> Option<&[u8]> { - let offset = range_address.checked_sub(data_address)?; - data.get(offset.try_into().ok()?..)? - .get(..size.try_into().ok()?) -} - -/// A table of zero-terminated strings. -/// -/// This is used for most file formats. -#[derive(Debug, Clone, Copy)] -pub struct StringTable<'data, R = &'data [u8]> -where - R: ReadRef<'data>, -{ - data: Option, - start: u64, - end: u64, - marker: PhantomData<&'data ()>, -} - -impl<'data, R: ReadRef<'data>> StringTable<'data, R> { - /// Interpret the given data as a string table. - pub fn new(data: R, start: u64, end: u64) -> Self { - StringTable { - data: Some(data), - start, - end, - marker: PhantomData, - } - } - - /// Return the string at the given offset. - pub fn get(&self, offset: u32) -> Result<&'data [u8], ()> { - match self.data { - Some(data) => { - let r_start = self.start.checked_add(offset.into()).ok_or(())?; - data.read_bytes_at_until(r_start..self.end, 0) - } - None => Err(()), - } - } -} - -impl<'data, R: ReadRef<'data>> Default for StringTable<'data, R> { - fn default() -> Self { - StringTable { - data: None, - start: 0, - end: 0, - marker: PhantomData, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::pod::bytes_of; - - #[test] - fn bytes() { - let x = u32::to_be(0x0123_4567); - let data = Bytes(bytes_of(&x)); - - let mut bytes = data; - assert_eq!(bytes.skip(0), Ok(())); - assert_eq!(bytes, data); - - let mut bytes = data; - assert_eq!(bytes.skip(4), Ok(())); - assert_eq!(bytes, Bytes(&[])); - - let mut bytes = data; - assert_eq!(bytes.skip(5), Err(())); - assert_eq!(bytes, Bytes(&[])); - - let mut bytes = data; - assert_eq!(bytes.read_bytes(0), Ok(Bytes(&[]))); - assert_eq!(bytes, data); - - let mut bytes = data; - assert_eq!(bytes.read_bytes(4), Ok(data)); - assert_eq!(bytes, Bytes(&[])); - - let mut bytes = data; - assert_eq!(bytes.read_bytes(5), Err(())); - assert_eq!(bytes, Bytes(&[])); - - assert_eq!(data.read_bytes_at(0, 0), Ok(Bytes(&[]))); - assert_eq!(data.read_bytes_at(4, 0), Ok(Bytes(&[]))); - assert_eq!(data.read_bytes_at(0, 4), Ok(data)); - assert_eq!(data.read_bytes_at(1, 4), Err(())); - - let mut bytes = data; - assert_eq!(bytes.read::(), Ok(&u16::to_be(0x0123))); - assert_eq!(bytes, Bytes(&[0x45, 0x67])); - assert_eq!(data.read_at::(2), Ok(&u16::to_be(0x4567))); - assert_eq!(data.read_at::(3), Err(())); - assert_eq!(data.read_at::(4), Err(())); - - let mut bytes = data; - assert_eq!(bytes.read::(), Ok(&x)); - assert_eq!(bytes, Bytes(&[])); - - let mut bytes = data; - assert_eq!(bytes.read::(), Err(())); - assert_eq!(bytes, Bytes(&[])); - - let mut bytes = data; - assert_eq!(bytes.read_slice::(0), Ok(&[][..])); - assert_eq!(bytes, data); - - let mut bytes = data; - assert_eq!(bytes.read_slice::(4), Ok(data.0)); - assert_eq!(bytes, Bytes(&[])); - - let mut bytes = data; - assert_eq!(bytes.read_slice::(5), Err(())); - assert_eq!(bytes, Bytes(&[])); - - assert_eq!(data.read_slice_at::(0, 0), Ok(&[][..])); - assert_eq!(data.read_slice_at::(4, 0), Ok(&[][..])); - assert_eq!(data.read_slice_at::(0, 4), Ok(data.0)); - assert_eq!(data.read_slice_at::(1, 4), Err(())); - - let data = Bytes(&[0x01, 0x02, 0x00, 0x04]); - - let mut bytes = data; - assert_eq!(bytes.read_string(), Ok(&data.0[..2])); - assert_eq!(bytes.0, &data.0[3..]); - - let mut bytes = data; - bytes.skip(3).unwrap(); - assert_eq!(bytes.read_string(), Err(())); - assert_eq!(bytes.0, &[]); - - assert_eq!(data.read_string_at(0), Ok(&data.0[..2])); - assert_eq!(data.read_string_at(1), Ok(&data.0[1..2])); - assert_eq!(data.read_string_at(2), Ok(&[][..])); - assert_eq!(data.read_string_at(3), Err(())); - } - - #[test] - fn bytes_debug() { - assert_eq!(format!("{:?}", Bytes(&[])), "[]"); - assert_eq!(format!("{:?}", Bytes(&[0x01])), "[0x01]"); - assert_eq!( - format!( - "{:?}", - Bytes(&[0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]) - ), - "[0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]" - ); - assert_eq!( - format!( - "{:?}", - Bytes(&[0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09]) - ), - "[0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, ...; 9]" - ); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/wasm.rs s390-tools-2.33.1/rust-vendor/object/src/read/wasm.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/wasm.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/wasm.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,951 +0,0 @@ -//! Support for reading Wasm files. -//! -//! Provides `WasmFile` and related types which implement the `Object` trait. -//! -//! Currently implements the minimum required to access DWARF debugging information. -use alloc::boxed::Box; -use alloc::vec::Vec; -use core::marker::PhantomData; -use core::ops::Range; -use core::{slice, str}; -use wasmparser as wp; - -use crate::read::{ - self, Architecture, ComdatKind, CompressedData, CompressedFileRange, Error, Export, FileFlags, - Import, NoDynamicRelocationIterator, Object, ObjectComdat, ObjectKind, ObjectSection, - ObjectSegment, ObjectSymbol, ObjectSymbolTable, ReadError, ReadRef, Relocation, Result, - SectionFlags, SectionIndex, SectionKind, SegmentFlags, SymbolFlags, SymbolIndex, SymbolKind, - SymbolScope, SymbolSection, -}; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(usize)] -enum SectionId { - Custom = 0, - Type = 1, - Import = 2, - Function = 3, - Table = 4, - Memory = 5, - Global = 6, - Export = 7, - Start = 8, - Element = 9, - Code = 10, - Data = 11, - DataCount = 12, -} -// Update this constant when adding new section id: -const MAX_SECTION_ID: usize = SectionId::DataCount as usize; - -/// A WebAssembly object file. -#[derive(Debug)] -pub struct WasmFile<'data, R = &'data [u8]> { - data: &'data [u8], - has_memory64: bool, - // All sections, including custom sections. - sections: Vec>, - // Indices into `sections` of sections with a non-zero id. - id_sections: Box<[Option; MAX_SECTION_ID + 1]>, - // Whether the file has DWARF information. - has_debug_symbols: bool, - // Symbols collected from imports, exports, code and name sections. - symbols: Vec>, - // Address of the function body for the entry point. - entry: u64, - marker: PhantomData, -} - -#[derive(Debug)] -struct SectionHeader<'data> { - id: SectionId, - range: Range, - name: &'data str, -} - -#[derive(Clone)] -enum LocalFunctionKind { - Unknown, - Exported { symbol_ids: Vec }, - Local { symbol_id: u32 }, -} - -impl ReadError for wasmparser::Result { - fn read_error(self, error: &'static str) -> Result { - self.map_err(|_| Error(error)) - } -} - -impl<'data, R: ReadRef<'data>> WasmFile<'data, R> { - /// Parse the raw wasm data. - pub fn parse(data: R) -> Result { - let len = data.len().read_error("Unknown Wasm file size")?; - let data = data.read_bytes_at(0, len).read_error("Wasm read failed")?; - let parser = wp::Parser::new(0).parse_all(data); - - let mut file = WasmFile { - data, - has_memory64: false, - sections: Vec::new(), - id_sections: Default::default(), - has_debug_symbols: false, - symbols: Vec::new(), - entry: 0, - marker: PhantomData, - }; - - let mut main_file_symbol = Some(WasmSymbolInternal { - name: "", - address: 0, - size: 0, - kind: SymbolKind::File, - section: SymbolSection::None, - scope: SymbolScope::Compilation, - }); - - let mut imported_funcs_count = 0; - let mut local_func_kinds = Vec::new(); - let mut entry_func_id = None; - let mut code_range_start = 0; - let mut code_func_index = 0; - // One-to-one mapping of globals to their value (if the global is a constant integer). - let mut global_values = Vec::new(); - - for payload in parser { - let payload = payload.read_error("Invalid Wasm section header")?; - - match payload { - wp::Payload::TypeSection(section) => { - file.add_section(SectionId::Type, section.range(), ""); - } - wp::Payload::ImportSection(section) => { - file.add_section(SectionId::Import, section.range(), ""); - let mut last_module_name = None; - - for import in section { - let import = import.read_error("Couldn't read an import item")?; - let module_name = import.module; - - if last_module_name != Some(module_name) { - file.symbols.push(WasmSymbolInternal { - name: module_name, - address: 0, - size: 0, - kind: SymbolKind::File, - section: SymbolSection::None, - scope: SymbolScope::Dynamic, - }); - last_module_name = Some(module_name); - } - - let kind = match import.ty { - wp::TypeRef::Func(_) => { - imported_funcs_count += 1; - SymbolKind::Text - } - wp::TypeRef::Memory(memory) => { - file.has_memory64 |= memory.memory64; - SymbolKind::Data - } - wp::TypeRef::Table(_) | wp::TypeRef::Global(_) => SymbolKind::Data, - wp::TypeRef::Tag(_) => SymbolKind::Unknown, - }; - - file.symbols.push(WasmSymbolInternal { - name: import.name, - address: 0, - size: 0, - kind, - section: SymbolSection::Undefined, - scope: SymbolScope::Dynamic, - }); - } - } - wp::Payload::FunctionSection(section) => { - file.add_section(SectionId::Function, section.range(), ""); - local_func_kinds = - vec![LocalFunctionKind::Unknown; section.into_iter().count()]; - } - wp::Payload::TableSection(section) => { - file.add_section(SectionId::Table, section.range(), ""); - } - wp::Payload::MemorySection(section) => { - file.add_section(SectionId::Memory, section.range(), ""); - for memory in section { - let memory = memory.read_error("Couldn't read a memory item")?; - file.has_memory64 |= memory.memory64; - } - } - wp::Payload::GlobalSection(section) => { - file.add_section(SectionId::Global, section.range(), ""); - for global in section { - let global = global.read_error("Couldn't read a global item")?; - let mut address = None; - if !global.ty.mutable { - // There should be exactly one instruction. - let init = global.init_expr.get_operators_reader().read(); - address = match init.read_error("Couldn't read a global init expr")? { - wp::Operator::I32Const { value } => Some(value as u64), - wp::Operator::I64Const { value } => Some(value as u64), - _ => None, - }; - } - global_values.push(address); - } - } - wp::Payload::ExportSection(section) => { - file.add_section(SectionId::Export, section.range(), ""); - if let Some(main_file_symbol) = main_file_symbol.take() { - file.symbols.push(main_file_symbol); - } - - for export in section { - let export = export.read_error("Couldn't read an export item")?; - - let (kind, section_idx) = match export.kind { - wp::ExternalKind::Func => { - if let Some(local_func_id) = - export.index.checked_sub(imported_funcs_count) - { - let local_func_kind = - &mut local_func_kinds[local_func_id as usize]; - if let LocalFunctionKind::Unknown = local_func_kind { - *local_func_kind = LocalFunctionKind::Exported { - symbol_ids: Vec::new(), - }; - } - let symbol_ids = match local_func_kind { - LocalFunctionKind::Exported { symbol_ids } => symbol_ids, - _ => unreachable!(), - }; - symbol_ids.push(file.symbols.len() as u32); - } - (SymbolKind::Text, SectionId::Code) - } - wp::ExternalKind::Table - | wp::ExternalKind::Memory - | wp::ExternalKind::Global => (SymbolKind::Data, SectionId::Data), - // TODO - wp::ExternalKind::Tag => continue, - }; - - // Try to guess the symbol address. Rust and C export a global containing - // the address in linear memory of the symbol. - let mut address = 0; - if export.kind == wp::ExternalKind::Global { - if let Some(&Some(x)) = global_values.get(export.index as usize) { - address = x; - } - } - - file.symbols.push(WasmSymbolInternal { - name: export.name, - address, - size: 0, - kind, - section: SymbolSection::Section(SectionIndex(section_idx as usize)), - scope: SymbolScope::Dynamic, - }); - } - } - wp::Payload::StartSection { func, range, .. } => { - file.add_section(SectionId::Start, range, ""); - entry_func_id = Some(func); - } - wp::Payload::ElementSection(section) => { - file.add_section(SectionId::Element, section.range(), ""); - } - wp::Payload::CodeSectionStart { range, .. } => { - code_range_start = range.start; - file.add_section(SectionId::Code, range, ""); - if let Some(main_file_symbol) = main_file_symbol.take() { - file.symbols.push(main_file_symbol); - } - } - wp::Payload::CodeSectionEntry(body) => { - let i = code_func_index; - code_func_index += 1; - - let range = body.range(); - - let address = range.start as u64 - code_range_start as u64; - let size = (range.end - range.start) as u64; - - if entry_func_id == Some(i as u32) { - file.entry = address; - } - - let local_func_kind = &mut local_func_kinds[i]; - match local_func_kind { - LocalFunctionKind::Unknown => { - *local_func_kind = LocalFunctionKind::Local { - symbol_id: file.symbols.len() as u32, - }; - file.symbols.push(WasmSymbolInternal { - name: "", - address, - size, - kind: SymbolKind::Text, - section: SymbolSection::Section(SectionIndex( - SectionId::Code as usize, - )), - scope: SymbolScope::Compilation, - }); - } - LocalFunctionKind::Exported { symbol_ids } => { - for symbol_id in core::mem::take(symbol_ids) { - let export_symbol = &mut file.symbols[symbol_id as usize]; - export_symbol.address = address; - export_symbol.size = size; - } - } - _ => unreachable!(), - } - } - wp::Payload::DataSection(section) => { - file.add_section(SectionId::Data, section.range(), ""); - } - wp::Payload::DataCountSection { range, .. } => { - file.add_section(SectionId::DataCount, range, ""); - } - wp::Payload::CustomSection(section) => { - let name = section.name(); - let size = section.data().len(); - let mut range = section.range(); - range.start = range.end - size; - file.add_section(SectionId::Custom, range, name); - if name == "name" { - for name in - wp::NameSectionReader::new(section.data(), section.data_offset()) - { - // TODO: Right now, ill-formed name subsections - // are silently ignored in order to maintain - // compatibility with extended name sections, which - // are not yet supported by the version of - // `wasmparser` currently used. - // A better fix would be to update `wasmparser` to - // the newest version, but this requires - // a major rewrite of this file. - if let Ok(wp::Name::Function(name_map)) = name { - for naming in name_map { - let naming = - naming.read_error("Couldn't read a function name")?; - if let Some(local_index) = - naming.index.checked_sub(imported_funcs_count) - { - if let LocalFunctionKind::Local { symbol_id } = - local_func_kinds[local_index as usize] - { - file.symbols[symbol_id as usize].name = naming.name; - } - } - } - } - } - } else if name.starts_with(".debug_") { - file.has_debug_symbols = true; - } - } - _ => {} - } - } - - Ok(file) - } - - fn add_section(&mut self, id: SectionId, range: Range, name: &'data str) { - let section = SectionHeader { id, range, name }; - self.id_sections[id as usize] = Some(self.sections.len()); - self.sections.push(section); - } -} - -impl<'data, R> read::private::Sealed for WasmFile<'data, R> {} - -impl<'data, 'file, R: ReadRef<'data>> Object<'data, 'file> for WasmFile<'data, R> -where - 'data: 'file, - R: 'file, -{ - type Segment = WasmSegment<'data, 'file, R>; - type SegmentIterator = WasmSegmentIterator<'data, 'file, R>; - type Section = WasmSection<'data, 'file, R>; - type SectionIterator = WasmSectionIterator<'data, 'file, R>; - type Comdat = WasmComdat<'data, 'file, R>; - type ComdatIterator = WasmComdatIterator<'data, 'file, R>; - type Symbol = WasmSymbol<'data, 'file>; - type SymbolIterator = WasmSymbolIterator<'data, 'file>; - type SymbolTable = WasmSymbolTable<'data, 'file>; - type DynamicRelocationIterator = NoDynamicRelocationIterator; - - #[inline] - fn architecture(&self) -> Architecture { - if self.has_memory64 { - Architecture::Wasm64 - } else { - Architecture::Wasm32 - } - } - - #[inline] - fn is_little_endian(&self) -> bool { - true - } - - #[inline] - fn is_64(&self) -> bool { - self.has_memory64 - } - - fn kind(&self) -> ObjectKind { - // TODO: check for `linking` custom section - ObjectKind::Unknown - } - - fn segments(&'file self) -> Self::SegmentIterator { - WasmSegmentIterator { file: self } - } - - fn section_by_name_bytes( - &'file self, - section_name: &[u8], - ) -> Option> { - self.sections() - .find(|section| section.name_bytes() == Ok(section_name)) - } - - fn section_by_index(&'file self, index: SectionIndex) -> Result> { - // TODO: Missing sections should return an empty section. - let id_section = self - .id_sections - .get(index.0) - .and_then(|x| *x) - .read_error("Invalid Wasm section index")?; - let section = self.sections.get(id_section).unwrap(); - Ok(WasmSection { - file: self, - section, - }) - } - - fn sections(&'file self) -> Self::SectionIterator { - WasmSectionIterator { - file: self, - sections: self.sections.iter(), - } - } - - fn comdats(&'file self) -> Self::ComdatIterator { - WasmComdatIterator { file: self } - } - - #[inline] - fn symbol_by_index(&'file self, index: SymbolIndex) -> Result> { - let symbol = self - .symbols - .get(index.0) - .read_error("Invalid Wasm symbol index")?; - Ok(WasmSymbol { index, symbol }) - } - - fn symbols(&'file self) -> Self::SymbolIterator { - WasmSymbolIterator { - symbols: self.symbols.iter().enumerate(), - } - } - - fn symbol_table(&'file self) -> Option> { - Some(WasmSymbolTable { - symbols: &self.symbols, - }) - } - - fn dynamic_symbols(&'file self) -> Self::SymbolIterator { - WasmSymbolIterator { - symbols: [].iter().enumerate(), - } - } - - #[inline] - fn dynamic_symbol_table(&'file self) -> Option> { - None - } - - #[inline] - fn dynamic_relocations(&self) -> Option { - None - } - - fn imports(&self) -> Result>> { - // TODO: return entries in the import section - Ok(Vec::new()) - } - - fn exports(&self) -> Result>> { - // TODO: return entries in the export section - Ok(Vec::new()) - } - - fn has_debug_symbols(&self) -> bool { - self.has_debug_symbols - } - - fn relative_address_base(&self) -> u64 { - 0 - } - - #[inline] - fn entry(&'file self) -> u64 { - self.entry - } - - #[inline] - fn flags(&self) -> FileFlags { - FileFlags::None - } -} - -/// An iterator over the segments of a `WasmFile`. -#[derive(Debug)] -pub struct WasmSegmentIterator<'data, 'file, R = &'data [u8]> { - #[allow(unused)] - file: &'file WasmFile<'data, R>, -} - -impl<'data, 'file, R> Iterator for WasmSegmentIterator<'data, 'file, R> { - type Item = WasmSegment<'data, 'file, R>; - - #[inline] - fn next(&mut self) -> Option { - None - } -} - -/// A segment of a `WasmFile`. -#[derive(Debug)] -pub struct WasmSegment<'data, 'file, R = &'data [u8]> { - #[allow(unused)] - file: &'file WasmFile<'data, R>, -} - -impl<'data, 'file, R> read::private::Sealed for WasmSegment<'data, 'file, R> {} - -impl<'data, 'file, R> ObjectSegment<'data> for WasmSegment<'data, 'file, R> { - #[inline] - fn address(&self) -> u64 { - unreachable!() - } - - #[inline] - fn size(&self) -> u64 { - unreachable!() - } - - #[inline] - fn align(&self) -> u64 { - unreachable!() - } - - #[inline] - fn file_range(&self) -> (u64, u64) { - unreachable!() - } - - fn data(&self) -> Result<&'data [u8]> { - unreachable!() - } - - fn data_range(&self, _address: u64, _size: u64) -> Result> { - unreachable!() - } - - #[inline] - fn name_bytes(&self) -> Result> { - unreachable!() - } - - #[inline] - fn name(&self) -> Result> { - unreachable!() - } - - #[inline] - fn flags(&self) -> SegmentFlags { - unreachable!() - } -} - -/// An iterator over the sections of a `WasmFile`. -#[derive(Debug)] -pub struct WasmSectionIterator<'data, 'file, R = &'data [u8]> { - file: &'file WasmFile<'data, R>, - sections: slice::Iter<'file, SectionHeader<'data>>, -} - -impl<'data, 'file, R> Iterator for WasmSectionIterator<'data, 'file, R> { - type Item = WasmSection<'data, 'file, R>; - - fn next(&mut self) -> Option { - let section = self.sections.next()?; - Some(WasmSection { - file: self.file, - section, - }) - } -} - -/// A section of a `WasmFile`. -#[derive(Debug)] -pub struct WasmSection<'data, 'file, R = &'data [u8]> { - file: &'file WasmFile<'data, R>, - section: &'file SectionHeader<'data>, -} - -impl<'data, 'file, R> read::private::Sealed for WasmSection<'data, 'file, R> {} - -impl<'data, 'file, R: ReadRef<'data>> ObjectSection<'data> for WasmSection<'data, 'file, R> { - type RelocationIterator = WasmRelocationIterator<'data, 'file, R>; - - #[inline] - fn index(&self) -> SectionIndex { - // Note that we treat all custom sections as index 0. - // This is ok because they are never looked up by index. - SectionIndex(self.section.id as usize) - } - - #[inline] - fn address(&self) -> u64 { - 0 - } - - #[inline] - fn size(&self) -> u64 { - let range = &self.section.range; - (range.end - range.start) as u64 - } - - #[inline] - fn align(&self) -> u64 { - 1 - } - - #[inline] - fn file_range(&self) -> Option<(u64, u64)> { - let range = &self.section.range; - Some((range.start as _, range.end as _)) - } - - #[inline] - fn data(&self) -> Result<&'data [u8]> { - let range = &self.section.range; - self.file - .data - .read_bytes_at(range.start as u64, range.end as u64 - range.start as u64) - .read_error("Invalid Wasm section size or offset") - } - - fn data_range(&self, _address: u64, _size: u64) -> Result> { - unimplemented!() - } - - #[inline] - fn compressed_file_range(&self) -> Result { - Ok(CompressedFileRange::none(self.file_range())) - } - - #[inline] - fn compressed_data(&self) -> Result> { - self.data().map(CompressedData::none) - } - - #[inline] - fn name_bytes(&self) -> Result<&[u8]> { - self.name().map(str::as_bytes) - } - - #[inline] - fn name(&self) -> Result<&str> { - Ok(match self.section.id { - SectionId::Custom => self.section.name, - SectionId::Type => "", - SectionId::Import => "", - SectionId::Function => "", - SectionId::Table => "", - SectionId::Memory => "", - SectionId::Global => "", - SectionId::Export => "", - SectionId::Start => "", - SectionId::Element => "", - SectionId::Code => "", - SectionId::Data => "", - SectionId::DataCount => "", - }) - } - - #[inline] - fn segment_name_bytes(&self) -> Result> { - Ok(None) - } - - #[inline] - fn segment_name(&self) -> Result> { - Ok(None) - } - - #[inline] - fn kind(&self) -> SectionKind { - match self.section.id { - SectionId::Custom => match self.section.name { - "reloc." | "linking" => SectionKind::Linker, - _ => SectionKind::Other, - }, - SectionId::Type => SectionKind::Metadata, - SectionId::Import => SectionKind::Linker, - SectionId::Function => SectionKind::Metadata, - SectionId::Table => SectionKind::UninitializedData, - SectionId::Memory => SectionKind::UninitializedData, - SectionId::Global => SectionKind::Data, - SectionId::Export => SectionKind::Linker, - SectionId::Start => SectionKind::Linker, - SectionId::Element => SectionKind::Data, - SectionId::Code => SectionKind::Text, - SectionId::Data => SectionKind::Data, - SectionId::DataCount => SectionKind::UninitializedData, - } - } - - #[inline] - fn relocations(&self) -> WasmRelocationIterator<'data, 'file, R> { - WasmRelocationIterator(PhantomData) - } - - #[inline] - fn flags(&self) -> SectionFlags { - SectionFlags::None - } -} - -/// An iterator over the COMDAT section groups of a `WasmFile`. -#[derive(Debug)] -pub struct WasmComdatIterator<'data, 'file, R = &'data [u8]> { - #[allow(unused)] - file: &'file WasmFile<'data, R>, -} - -impl<'data, 'file, R> Iterator for WasmComdatIterator<'data, 'file, R> { - type Item = WasmComdat<'data, 'file, R>; - - #[inline] - fn next(&mut self) -> Option { - None - } -} - -/// A COMDAT section group of a `WasmFile`. -#[derive(Debug)] -pub struct WasmComdat<'data, 'file, R = &'data [u8]> { - #[allow(unused)] - file: &'file WasmFile<'data, R>, -} - -impl<'data, 'file, R> read::private::Sealed for WasmComdat<'data, 'file, R> {} - -impl<'data, 'file, R> ObjectComdat<'data> for WasmComdat<'data, 'file, R> { - type SectionIterator = WasmComdatSectionIterator<'data, 'file, R>; - - #[inline] - fn kind(&self) -> ComdatKind { - unreachable!(); - } - - #[inline] - fn symbol(&self) -> SymbolIndex { - unreachable!(); - } - - #[inline] - fn name_bytes(&self) -> Result<&[u8]> { - unreachable!(); - } - - #[inline] - fn name(&self) -> Result<&str> { - unreachable!(); - } - - #[inline] - fn sections(&self) -> Self::SectionIterator { - unreachable!(); - } -} - -/// An iterator over the sections in a COMDAT section group of a `WasmFile`. -#[derive(Debug)] -pub struct WasmComdatSectionIterator<'data, 'file, R = &'data [u8]> { - #[allow(unused)] - file: &'file WasmFile<'data, R>, -} - -impl<'data, 'file, R> Iterator for WasmComdatSectionIterator<'data, 'file, R> { - type Item = SectionIndex; - - fn next(&mut self) -> Option { - None - } -} - -/// A symbol table of a `WasmFile`. -#[derive(Debug)] -pub struct WasmSymbolTable<'data, 'file> { - symbols: &'file [WasmSymbolInternal<'data>], -} - -impl<'data, 'file> read::private::Sealed for WasmSymbolTable<'data, 'file> {} - -impl<'data, 'file> ObjectSymbolTable<'data> for WasmSymbolTable<'data, 'file> { - type Symbol = WasmSymbol<'data, 'file>; - type SymbolIterator = WasmSymbolIterator<'data, 'file>; - - fn symbols(&self) -> Self::SymbolIterator { - WasmSymbolIterator { - symbols: self.symbols.iter().enumerate(), - } - } - - fn symbol_by_index(&self, index: SymbolIndex) -> Result { - let symbol = self - .symbols - .get(index.0) - .read_error("Invalid Wasm symbol index")?; - Ok(WasmSymbol { index, symbol }) - } -} - -/// An iterator over the symbols of a `WasmFile`. -#[derive(Debug)] -pub struct WasmSymbolIterator<'data, 'file> { - symbols: core::iter::Enumerate>>, -} - -impl<'data, 'file> Iterator for WasmSymbolIterator<'data, 'file> { - type Item = WasmSymbol<'data, 'file>; - - fn next(&mut self) -> Option { - let (index, symbol) = self.symbols.next()?; - Some(WasmSymbol { - index: SymbolIndex(index), - symbol, - }) - } -} - -/// A symbol of a `WasmFile`. -#[derive(Clone, Copy, Debug)] -pub struct WasmSymbol<'data, 'file> { - index: SymbolIndex, - symbol: &'file WasmSymbolInternal<'data>, -} - -#[derive(Clone, Debug)] -struct WasmSymbolInternal<'data> { - name: &'data str, - address: u64, - size: u64, - kind: SymbolKind, - section: SymbolSection, - scope: SymbolScope, -} - -impl<'data, 'file> read::private::Sealed for WasmSymbol<'data, 'file> {} - -impl<'data, 'file> ObjectSymbol<'data> for WasmSymbol<'data, 'file> { - #[inline] - fn index(&self) -> SymbolIndex { - self.index - } - - #[inline] - fn name_bytes(&self) -> read::Result<&'data [u8]> { - Ok(self.symbol.name.as_bytes()) - } - - #[inline] - fn name(&self) -> read::Result<&'data str> { - Ok(self.symbol.name) - } - - #[inline] - fn address(&self) -> u64 { - self.symbol.address - } - - #[inline] - fn size(&self) -> u64 { - self.symbol.size - } - - #[inline] - fn kind(&self) -> SymbolKind { - self.symbol.kind - } - - #[inline] - fn section(&self) -> SymbolSection { - self.symbol.section - } - - #[inline] - fn is_undefined(&self) -> bool { - self.symbol.section == SymbolSection::Undefined - } - - #[inline] - fn is_definition(&self) -> bool { - self.symbol.kind == SymbolKind::Text && self.symbol.section != SymbolSection::Undefined - } - - #[inline] - fn is_common(&self) -> bool { - self.symbol.section == SymbolSection::Common - } - - #[inline] - fn is_weak(&self) -> bool { - false - } - - #[inline] - fn scope(&self) -> SymbolScope { - self.symbol.scope - } - - #[inline] - fn is_global(&self) -> bool { - self.symbol.scope != SymbolScope::Compilation - } - - #[inline] - fn is_local(&self) -> bool { - self.symbol.scope == SymbolScope::Compilation - } - - #[inline] - fn flags(&self) -> SymbolFlags { - SymbolFlags::None - } -} - -/// An iterator over the relocations in a `WasmSection`. -#[derive(Debug)] -pub struct WasmRelocationIterator<'data, 'file, R = &'data [u8]>( - PhantomData<(&'data (), &'file (), R)>, -); - -impl<'data, 'file, R> Iterator for WasmRelocationIterator<'data, 'file, R> { - type Item = (u64, Relocation); - - #[inline] - fn next(&mut self) -> Option { - None - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/comdat.rs s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/comdat.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/comdat.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/comdat.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,129 +0,0 @@ -//! XCOFF doesn't support the COMDAT section. - -use core::fmt::Debug; - -use crate::xcoff; - -use crate::read::{self, ComdatKind, ObjectComdat, ReadRef, Result, SectionIndex, SymbolIndex}; - -use super::{FileHeader, XcoffFile}; - -/// An iterator over the COMDAT section groups of a `XcoffFile32`. -pub type XcoffComdatIterator32<'data, 'file, R = &'data [u8]> = - XcoffComdatIterator<'data, 'file, xcoff::FileHeader32, R>; -/// An iterator over the COMDAT section groups of a `XcoffFile64`. -pub type XcoffComdatIterator64<'data, 'file, R = &'data [u8]> = - XcoffComdatIterator<'data, 'file, xcoff::FileHeader64, R>; - -/// An iterator over the COMDAT section groups of a `XcoffFile`. -#[derive(Debug)] -pub struct XcoffComdatIterator<'data, 'file, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - #[allow(unused)] - pub(crate) file: &'file XcoffFile<'data, Xcoff, R>, -} - -impl<'data, 'file, Xcoff, R> Iterator for XcoffComdatIterator<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - type Item = XcoffComdat<'data, 'file, Xcoff, R>; - - #[inline] - fn next(&mut self) -> Option { - None - } -} - -/// A COMDAT section group of a `XcoffFile32`. -pub type XcoffComdat32<'data, 'file, R = &'data [u8]> = - XcoffComdat<'data, 'file, xcoff::FileHeader32, R>; - -/// A COMDAT section group of a `XcoffFile64`. -pub type XcoffComdat64<'data, 'file, R = &'data [u8]> = - XcoffComdat<'data, 'file, xcoff::FileHeader64, R>; - -/// A COMDAT section group of a `XcoffFile`. -#[derive(Debug)] -pub struct XcoffComdat<'data, 'file, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - #[allow(unused)] - file: &'file XcoffFile<'data, Xcoff, R>, -} - -impl<'data, 'file, Xcoff, R> read::private::Sealed for XcoffComdat<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Xcoff, R> ObjectComdat<'data> for XcoffComdat<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - type SectionIterator = XcoffComdatSectionIterator<'data, 'file, Xcoff, R>; - - #[inline] - fn kind(&self) -> ComdatKind { - unreachable!(); - } - - #[inline] - fn symbol(&self) -> SymbolIndex { - unreachable!(); - } - - #[inline] - fn name_bytes(&self) -> Result<&[u8]> { - unreachable!(); - } - - #[inline] - fn name(&self) -> Result<&str> { - unreachable!(); - } - - #[inline] - fn sections(&self) -> Self::SectionIterator { - unreachable!(); - } -} - -/// An iterator over the sections in a COMDAT section group of a `XcoffFile32`. -pub type XcoffComdatSectionIterator32<'data, 'file, R = &'data [u8]> = - XcoffComdatSectionIterator<'data, 'file, xcoff::FileHeader32, R>; -/// An iterator over the sections in a COMDAT section group of a `XcoffFile64`. -pub type XcoffComdatSectionIterator64<'data, 'file, R = &'data [u8]> = - XcoffComdatSectionIterator<'data, 'file, xcoff::FileHeader64, R>; - -/// An iterator over the sections in a COMDAT section group of a `XcoffFile`. -#[derive(Debug)] -pub struct XcoffComdatSectionIterator<'data, 'file, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - #[allow(unused)] - file: &'file XcoffFile<'data, Xcoff, R>, -} - -impl<'data, 'file, Xcoff, R> Iterator for XcoffComdatSectionIterator<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - type Item = SectionIndex; - - fn next(&mut self) -> Option { - None - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/file.rs s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/file.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/file.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/file.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,629 +0,0 @@ -use core::fmt::Debug; -use core::mem; - -use alloc::vec::Vec; - -use crate::read::{self, Error, NoDynamicRelocationIterator, Object, ReadError, ReadRef, Result}; - -use crate::{ - xcoff, Architecture, BigEndian as BE, FileFlags, ObjectKind, ObjectSection, Pod, SectionIndex, - SymbolIndex, -}; - -use super::{ - CsectAux, FileAux, SectionHeader, SectionTable, Symbol, SymbolTable, XcoffComdat, - XcoffComdatIterator, XcoffSection, XcoffSectionIterator, XcoffSegment, XcoffSegmentIterator, - XcoffSymbol, XcoffSymbolIterator, XcoffSymbolTable, -}; - -/// A 32-bit XCOFF object file. -pub type XcoffFile32<'data, R = &'data [u8]> = XcoffFile<'data, xcoff::FileHeader32, R>; -/// A 64-bit XCOFF object file. -pub type XcoffFile64<'data, R = &'data [u8]> = XcoffFile<'data, xcoff::FileHeader64, R>; - -/// A partially parsed XCOFF file. -/// -/// Most of the functionality of this type is provided by the `Object` trait implementation. -#[derive(Debug)] -pub struct XcoffFile<'data, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - pub(super) data: R, - pub(super) header: &'data Xcoff, - pub(super) aux_header: Option<&'data Xcoff::AuxHeader>, - pub(super) sections: SectionTable<'data, Xcoff>, - pub(super) symbols: SymbolTable<'data, Xcoff, R>, -} - -impl<'data, Xcoff, R> XcoffFile<'data, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - /// Parse the raw XCOFF file data. - pub fn parse(data: R) -> Result { - let mut offset = 0; - let header = Xcoff::parse(data, &mut offset)?; - let aux_header = header.aux_header(data, &mut offset)?; - let sections = header.sections(data, &mut offset)?; - let symbols = header.symbols(data)?; - - Ok(XcoffFile { - data, - header, - aux_header, - sections, - symbols, - }) - } - - /// Returns the raw data. - pub fn data(&self) -> R { - self.data - } - - /// Returns the raw XCOFF file header. - pub fn raw_header(&self) -> &'data Xcoff { - self.header - } -} - -impl<'data, Xcoff, R> read::private::Sealed for XcoffFile<'data, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Xcoff, R> Object<'data, 'file> for XcoffFile<'data, Xcoff, R> -where - 'data: 'file, - Xcoff: FileHeader, - R: 'file + ReadRef<'data>, -{ - type Segment = XcoffSegment<'data, 'file, Xcoff, R>; - type SegmentIterator = XcoffSegmentIterator<'data, 'file, Xcoff, R>; - type Section = XcoffSection<'data, 'file, Xcoff, R>; - type SectionIterator = XcoffSectionIterator<'data, 'file, Xcoff, R>; - type Comdat = XcoffComdat<'data, 'file, Xcoff, R>; - type ComdatIterator = XcoffComdatIterator<'data, 'file, Xcoff, R>; - type Symbol = XcoffSymbol<'data, 'file, Xcoff, R>; - type SymbolIterator = XcoffSymbolIterator<'data, 'file, Xcoff, R>; - type SymbolTable = XcoffSymbolTable<'data, 'file, Xcoff, R>; - type DynamicRelocationIterator = NoDynamicRelocationIterator; - - fn architecture(&self) -> crate::Architecture { - if self.is_64() { - Architecture::PowerPc64 - } else { - Architecture::PowerPc - } - } - - fn is_little_endian(&self) -> bool { - false - } - - fn is_64(&self) -> bool { - self.header.is_type_64() - } - - fn kind(&self) -> ObjectKind { - let flags = self.header.f_flags(); - if flags & xcoff::F_EXEC != 0 { - ObjectKind::Executable - } else if flags & xcoff::F_SHROBJ != 0 { - ObjectKind::Dynamic - } else if flags & xcoff::F_RELFLG == 0 { - ObjectKind::Relocatable - } else { - ObjectKind::Unknown - } - } - - fn segments(&'file self) -> XcoffSegmentIterator<'data, 'file, Xcoff, R> { - XcoffSegmentIterator { file: self } - } - - fn section_by_name_bytes( - &'file self, - section_name: &[u8], - ) -> Option> { - self.sections() - .find(|section| section.name_bytes() == Ok(section_name)) - } - - fn section_by_index( - &'file self, - index: SectionIndex, - ) -> Result> { - let section = self.sections.section(index)?; - Ok(XcoffSection { - file: self, - section, - index, - }) - } - - fn sections(&'file self) -> XcoffSectionIterator<'data, 'file, Xcoff, R> { - XcoffSectionIterator { - file: self, - iter: self.sections.iter().enumerate(), - } - } - - fn comdats(&'file self) -> XcoffComdatIterator<'data, 'file, Xcoff, R> { - XcoffComdatIterator { file: self } - } - - fn symbol_table(&'file self) -> Option> { - if self.symbols.is_empty() { - return None; - } - Some(XcoffSymbolTable { - symbols: &self.symbols, - file: self, - }) - } - - fn symbol_by_index( - &'file self, - index: SymbolIndex, - ) -> Result> { - let symbol = self.symbols.symbol(index.0)?; - Ok(XcoffSymbol { - symbols: &self.symbols, - index, - symbol, - file: self, - }) - } - - fn symbols(&'file self) -> XcoffSymbolIterator<'data, 'file, Xcoff, R> { - XcoffSymbolIterator { - symbols: &self.symbols, - index: 0, - file: self, - } - } - - fn dynamic_symbol_table(&'file self) -> Option> { - None - } - - fn dynamic_symbols(&'file self) -> XcoffSymbolIterator<'data, 'file, Xcoff, R> { - // TODO: return the symbols in the STYP_LOADER section. - XcoffSymbolIterator { - file: self, - symbols: &self.symbols, - // Hack: don't return any. - index: self.symbols.len(), - } - } - - fn dynamic_relocations(&'file self) -> Option { - // TODO: return the relocations in the STYP_LOADER section. - None - } - - fn imports(&self) -> Result>> { - // TODO: return the imports in the STYP_LOADER section. - Ok(Vec::new()) - } - - fn exports(&self) -> Result>> { - // TODO: return the exports in the STYP_LOADER section. - Ok(Vec::new()) - } - - fn has_debug_symbols(&self) -> bool { - self.section_by_name(".debug").is_some() || self.section_by_name(".dwinfo").is_some() - } - - fn relative_address_base(&'file self) -> u64 { - 0 - } - - fn entry(&'file self) -> u64 { - if let Some(aux_header) = self.aux_header { - aux_header.o_entry().into() - } else { - 0 - } - } - - fn flags(&self) -> FileFlags { - FileFlags::Xcoff { - f_flags: self.header.f_flags(), - } - } -} - -/// A trait for generic access to `FileHeader32` and `FileHeader64`. -#[allow(missing_docs)] -pub trait FileHeader: Debug + Pod { - type Word: Into; - type AuxHeader: AuxHeader; - type SectionHeader: SectionHeader; - type Symbol: Symbol; - type FileAux: FileAux; - type CsectAux: CsectAux; - - /// Return true if this type is a 64-bit header. - fn is_type_64(&self) -> bool; - - fn f_magic(&self) -> u16; - fn f_nscns(&self) -> u16; - fn f_timdat(&self) -> u32; - fn f_symptr(&self) -> Self::Word; - fn f_nsyms(&self) -> u32; - fn f_opthdr(&self) -> u16; - fn f_flags(&self) -> u16; - - // Provided methods. - - /// Read the file header. - /// - /// Also checks that the magic field in the file header is a supported format. - fn parse<'data, R: ReadRef<'data>>(data: R, offset: &mut u64) -> Result<&'data Self> { - let header = data - .read::(offset) - .read_error("Invalid XCOFF header size or alignment")?; - if !header.is_supported() { - return Err(Error("Unsupported XCOFF header")); - } - Ok(header) - } - - fn is_supported(&self) -> bool { - (self.is_type_64() && self.f_magic() == xcoff::MAGIC_64) - || (!self.is_type_64() && self.f_magic() == xcoff::MAGIC_32) - } - - /// Read the auxiliary file header. - fn aux_header<'data, R: ReadRef<'data>>( - &self, - data: R, - offset: &mut u64, - ) -> Result> { - let aux_header_size = self.f_opthdr(); - if self.f_flags() & xcoff::F_EXEC == 0 { - // No auxiliary header is required for an object file that is not an executable. - // TODO: Some AIX programs generate auxiliary headers for 32-bit object files - // that end after the data_start field. - *offset += u64::from(aux_header_size); - return Ok(None); - } - // Executables, however, must have auxiliary headers that include the - // full structure definitions. - if aux_header_size != mem::size_of::() as u16 { - *offset += u64::from(aux_header_size); - return Ok(None); - } - let aux_header = data - .read::(offset) - .read_error("Invalid XCOFF auxiliary header size")?; - Ok(Some(aux_header)) - } - - /// Read the section table. - #[inline] - fn sections<'data, R: ReadRef<'data>>( - &self, - data: R, - offset: &mut u64, - ) -> Result> { - SectionTable::parse(self, data, offset) - } - - /// Return the symbol table. - #[inline] - fn symbols<'data, R: ReadRef<'data>>(&self, data: R) -> Result> { - SymbolTable::parse(*self, data) - } -} - -impl FileHeader for xcoff::FileHeader32 { - type Word = u32; - type AuxHeader = xcoff::AuxHeader32; - type SectionHeader = xcoff::SectionHeader32; - type Symbol = xcoff::Symbol32; - type FileAux = xcoff::FileAux32; - type CsectAux = xcoff::CsectAux32; - - fn is_type_64(&self) -> bool { - false - } - - fn f_magic(&self) -> u16 { - self.f_magic.get(BE) - } - - fn f_nscns(&self) -> u16 { - self.f_nscns.get(BE) - } - - fn f_timdat(&self) -> u32 { - self.f_timdat.get(BE) - } - - fn f_symptr(&self) -> Self::Word { - self.f_symptr.get(BE) - } - - fn f_nsyms(&self) -> u32 { - self.f_nsyms.get(BE) - } - - fn f_opthdr(&self) -> u16 { - self.f_opthdr.get(BE) - } - - fn f_flags(&self) -> u16 { - self.f_flags.get(BE) - } -} - -impl FileHeader for xcoff::FileHeader64 { - type Word = u64; - type AuxHeader = xcoff::AuxHeader64; - type SectionHeader = xcoff::SectionHeader64; - type Symbol = xcoff::Symbol64; - type FileAux = xcoff::FileAux64; - type CsectAux = xcoff::CsectAux64; - - fn is_type_64(&self) -> bool { - true - } - - fn f_magic(&self) -> u16 { - self.f_magic.get(BE) - } - - fn f_nscns(&self) -> u16 { - self.f_nscns.get(BE) - } - - fn f_timdat(&self) -> u32 { - self.f_timdat.get(BE) - } - - fn f_symptr(&self) -> Self::Word { - self.f_symptr.get(BE) - } - - fn f_nsyms(&self) -> u32 { - self.f_nsyms.get(BE) - } - - fn f_opthdr(&self) -> u16 { - self.f_opthdr.get(BE) - } - - fn f_flags(&self) -> u16 { - self.f_flags.get(BE) - } -} - -#[allow(missing_docs)] -pub trait AuxHeader: Debug + Pod { - type Word: Into; - - fn o_vstamp(&self) -> u16; - fn o_tsize(&self) -> Self::Word; - fn o_dsize(&self) -> Self::Word; - fn o_bsize(&self) -> Self::Word; - fn o_entry(&self) -> Self::Word; - fn o_text_start(&self) -> Self::Word; - fn o_data_start(&self) -> Self::Word; - fn o_toc(&self) -> Self::Word; - fn o_snentry(&self) -> u16; - fn o_sntext(&self) -> u16; - fn o_sndata(&self) -> u16; - fn o_sntoc(&self) -> u16; - fn o_snloader(&self) -> u16; - fn o_snbss(&self) -> u16; - fn o_sntdata(&self) -> u16; - fn o_sntbss(&self) -> u16; - fn o_algntext(&self) -> u16; - fn o_algndata(&self) -> u16; - fn o_maxstack(&self) -> Self::Word; - fn o_maxdata(&self) -> Self::Word; - fn o_textpsize(&self) -> u8; - fn o_datapsize(&self) -> u8; - fn o_stackpsize(&self) -> u8; -} - -impl AuxHeader for xcoff::AuxHeader32 { - type Word = u32; - - fn o_vstamp(&self) -> u16 { - self.o_vstamp.get(BE) - } - - fn o_tsize(&self) -> Self::Word { - self.o_tsize.get(BE) - } - - fn o_dsize(&self) -> Self::Word { - self.o_dsize.get(BE) - } - - fn o_bsize(&self) -> Self::Word { - self.o_bsize.get(BE) - } - - fn o_entry(&self) -> Self::Word { - self.o_entry.get(BE) - } - - fn o_text_start(&self) -> Self::Word { - self.o_text_start.get(BE) - } - - fn o_data_start(&self) -> Self::Word { - self.o_data_start.get(BE) - } - - fn o_toc(&self) -> Self::Word { - self.o_toc.get(BE) - } - - fn o_snentry(&self) -> u16 { - self.o_snentry.get(BE) - } - - fn o_sntext(&self) -> u16 { - self.o_sntext.get(BE) - } - - fn o_sndata(&self) -> u16 { - self.o_sndata.get(BE) - } - - fn o_sntoc(&self) -> u16 { - self.o_sntoc.get(BE) - } - - fn o_snloader(&self) -> u16 { - self.o_snloader.get(BE) - } - - fn o_snbss(&self) -> u16 { - self.o_snbss.get(BE) - } - - fn o_sntdata(&self) -> u16 { - self.o_sntdata.get(BE) - } - - fn o_sntbss(&self) -> u16 { - self.o_sntbss.get(BE) - } - - fn o_algntext(&self) -> u16 { - self.o_algntext.get(BE) - } - - fn o_algndata(&self) -> u16 { - self.o_algndata.get(BE) - } - - fn o_maxstack(&self) -> Self::Word { - self.o_maxstack.get(BE) - } - - fn o_maxdata(&self) -> Self::Word { - self.o_maxdata.get(BE) - } - - fn o_textpsize(&self) -> u8 { - self.o_textpsize - } - - fn o_datapsize(&self) -> u8 { - self.o_datapsize - } - - fn o_stackpsize(&self) -> u8 { - self.o_stackpsize - } -} - -impl AuxHeader for xcoff::AuxHeader64 { - type Word = u64; - - fn o_vstamp(&self) -> u16 { - self.o_vstamp.get(BE) - } - - fn o_tsize(&self) -> Self::Word { - self.o_tsize.get(BE) - } - - fn o_dsize(&self) -> Self::Word { - self.o_dsize.get(BE) - } - - fn o_bsize(&self) -> Self::Word { - self.o_bsize.get(BE) - } - - fn o_entry(&self) -> Self::Word { - self.o_entry.get(BE) - } - - fn o_text_start(&self) -> Self::Word { - self.o_text_start.get(BE) - } - - fn o_data_start(&self) -> Self::Word { - self.o_data_start.get(BE) - } - - fn o_toc(&self) -> Self::Word { - self.o_toc.get(BE) - } - - fn o_snentry(&self) -> u16 { - self.o_snentry.get(BE) - } - - fn o_sntext(&self) -> u16 { - self.o_sntext.get(BE) - } - - fn o_sndata(&self) -> u16 { - self.o_sndata.get(BE) - } - - fn o_sntoc(&self) -> u16 { - self.o_sntoc.get(BE) - } - - fn o_snloader(&self) -> u16 { - self.o_snloader.get(BE) - } - - fn o_snbss(&self) -> u16 { - self.o_snbss.get(BE) - } - - fn o_sntdata(&self) -> u16 { - self.o_sntdata.get(BE) - } - - fn o_sntbss(&self) -> u16 { - self.o_sntbss.get(BE) - } - - fn o_algntext(&self) -> u16 { - self.o_algntext.get(BE) - } - - fn o_algndata(&self) -> u16 { - self.o_algndata.get(BE) - } - - fn o_maxstack(&self) -> Self::Word { - self.o_maxstack.get(BE) - } - - fn o_maxdata(&self) -> Self::Word { - self.o_maxdata.get(BE) - } - - fn o_textpsize(&self) -> u8 { - self.o_textpsize - } - - fn o_datapsize(&self) -> u8 { - self.o_datapsize - } - - fn o_stackpsize(&self) -> u8 { - self.o_stackpsize - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/mod.rs s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/mod.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,21 +0,0 @@ -//! Support for reading AIX XCOFF files. -//! -//! Provides `XcoffFile` and related types which implement the `Object` trait. - -mod file; -pub use file::*; - -mod section; -pub use section::*; - -mod symbol; -pub use symbol::*; - -mod relocation; -pub use relocation::*; - -mod comdat; -pub use comdat::*; - -mod segment; -pub use segment::*; diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/relocation.rs s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/relocation.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/relocation.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/relocation.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,127 +0,0 @@ -use alloc::fmt; -use core::fmt::Debug; -use core::slice; - -use crate::pod::Pod; -use crate::{xcoff, BigEndian as BE, Relocation}; - -use crate::read::{ReadRef, RelocationEncoding, RelocationKind, RelocationTarget, SymbolIndex}; - -use super::{FileHeader, SectionHeader, XcoffFile}; - -/// An iterator over the relocations in a `XcoffSection32`. -pub type XcoffRelocationIterator32<'data, 'file, R = &'data [u8]> = - XcoffRelocationIterator<'data, 'file, xcoff::FileHeader32, R>; -/// An iterator over the relocations in a `XcoffSection64`. -pub type XcoffRelocationIterator64<'data, 'file, R = &'data [u8]> = - XcoffRelocationIterator<'data, 'file, xcoff::FileHeader64, R>; - -/// An iterator over the relocations in a `XcoffSection`. -pub struct XcoffRelocationIterator<'data, 'file, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - #[allow(unused)] - pub(super) file: &'file XcoffFile<'data, Xcoff, R>, - pub(super) relocations: - slice::Iter<'data, <::SectionHeader as SectionHeader>::Rel>, -} - -impl<'data, 'file, Xcoff, R> Iterator for XcoffRelocationIterator<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - type Item = (u64, Relocation); - - fn next(&mut self) -> Option { - self.relocations.next().map(|relocation| { - let encoding = RelocationEncoding::Generic; - let (kind, addend) = match relocation.r_rtype() { - xcoff::R_POS - | xcoff::R_RL - | xcoff::R_RLA - | xcoff::R_BA - | xcoff::R_RBA - | xcoff::R_TLS => (RelocationKind::Absolute, 0), - xcoff::R_REL | xcoff::R_BR | xcoff::R_RBR => (RelocationKind::Relative, -4), - xcoff::R_TOC | xcoff::R_TOCL | xcoff::R_TOCU => (RelocationKind::Got, 0), - r_type => (RelocationKind::Xcoff(r_type), 0), - }; - let size = (relocation.r_rsize() & 0x3F) + 1; - let target = RelocationTarget::Symbol(SymbolIndex(relocation.r_symndx() as usize)); - ( - relocation.r_vaddr().into(), - Relocation { - kind, - encoding, - size, - target, - addend, - implicit_addend: true, - }, - ) - }) - } -} - -impl<'data, 'file, Xcoff, R> fmt::Debug for XcoffRelocationIterator<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("XcoffRelocationIterator").finish() - } -} - -/// A trait for generic access to `Rel32` and `Rel64`. -#[allow(missing_docs)] -pub trait Rel: Debug + Pod { - type Word: Into; - fn r_vaddr(&self) -> Self::Word; - fn r_symndx(&self) -> u32; - fn r_rsize(&self) -> u8; - fn r_rtype(&self) -> u8; -} - -impl Rel for xcoff::Rel32 { - type Word = u32; - - fn r_vaddr(&self) -> Self::Word { - self.r_vaddr.get(BE) - } - - fn r_symndx(&self) -> u32 { - self.r_symndx.get(BE) - } - - fn r_rsize(&self) -> u8 { - self.r_rsize - } - - fn r_rtype(&self) -> u8 { - self.r_rtype - } -} - -impl Rel for xcoff::Rel64 { - type Word = u64; - - fn r_vaddr(&self) -> Self::Word { - self.r_vaddr.get(BE) - } - - fn r_symndx(&self) -> u32 { - self.r_symndx.get(BE) - } - - fn r_rsize(&self) -> u8 { - self.r_rsize - } - - fn r_rtype(&self) -> u8 { - self.r_rtype - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/section.rs s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/section.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/section.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/section.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,427 +0,0 @@ -use core::fmt::Debug; -use core::{iter, result, slice, str}; - -use crate::{ - xcoff, BigEndian as BE, CompressedData, CompressedFileRange, Pod, SectionFlags, SectionKind, -}; - -use crate::read::{self, Error, ObjectSection, ReadError, ReadRef, Result, SectionIndex}; - -use super::{AuxHeader, FileHeader, Rel, XcoffFile, XcoffRelocationIterator}; - -/// An iterator over the sections of an `XcoffFile32`. -pub type XcoffSectionIterator32<'data, 'file, R = &'data [u8]> = - XcoffSectionIterator<'data, 'file, xcoff::FileHeader32, R>; -/// An iterator over the sections of an `XcoffFile64`. -pub type XcoffSectionIterator64<'data, 'file, R = &'data [u8]> = - XcoffSectionIterator<'data, 'file, xcoff::FileHeader64, R>; - -/// An iterator over the sections of an `XcoffFile`. -#[derive(Debug)] -pub struct XcoffSectionIterator<'data, 'file, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file XcoffFile<'data, Xcoff, R>, - pub(super) iter: iter::Enumerate>, -} - -impl<'data, 'file, Xcoff, R> Iterator for XcoffSectionIterator<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - type Item = XcoffSection<'data, 'file, Xcoff, R>; - - fn next(&mut self) -> Option { - self.iter.next().map(|(index, section)| XcoffSection { - index: SectionIndex(index + 1), - file: self.file, - section, - }) - } -} - -/// A section of an `XcoffFile32`. -pub type XcoffSection32<'data, 'file, R = &'data [u8]> = - XcoffSection<'data, 'file, xcoff::FileHeader32, R>; -/// A section of an `XcoffFile64`. -pub type XcoffSection64<'data, 'file, R = &'data [u8]> = - XcoffSection<'data, 'file, xcoff::FileHeader64, R>; - -/// A section of an `XcoffFile`. -#[derive(Debug)] -pub struct XcoffSection<'data, 'file, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - pub(super) file: &'file XcoffFile<'data, Xcoff, R>, - pub(super) section: &'data Xcoff::SectionHeader, - pub(super) index: SectionIndex, -} - -impl<'data, 'file, Xcoff: FileHeader, R: ReadRef<'data>> XcoffSection<'data, 'file, Xcoff, R> { - fn bytes(&self) -> Result<&'data [u8]> { - self.section - .data(self.file.data) - .read_error("Invalid XCOFF section offset or size") - } -} - -impl<'data, 'file, Xcoff, R> read::private::Sealed for XcoffSection<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Xcoff, R> ObjectSection<'data> for XcoffSection<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - type RelocationIterator = XcoffRelocationIterator<'data, 'file, Xcoff, R>; - - fn index(&self) -> SectionIndex { - self.index - } - - fn address(&self) -> u64 { - self.section.s_paddr().into() - } - - fn size(&self) -> u64 { - self.section.s_size().into() - } - - fn align(&self) -> u64 { - // The default section alignment is 4. - if let Some(aux_header) = self.file.aux_header { - match self.kind() { - SectionKind::Text => aux_header.o_algntext().into(), - SectionKind::Data => aux_header.o_algndata().into(), - _ => 4, - } - } else { - 4 - } - } - - fn file_range(&self) -> Option<(u64, u64)> { - self.section.file_range() - } - - fn data(&self) -> Result<&'data [u8]> { - self.bytes() - } - - fn data_range(&self, address: u64, size: u64) -> Result> { - Ok(read::util::data_range( - self.bytes()?, - self.address(), - address, - size, - )) - } - - fn compressed_file_range(&self) -> Result { - Ok(CompressedFileRange::none(self.file_range())) - } - - fn compressed_data(&self) -> Result> { - self.data().map(CompressedData::none) - } - - fn name_bytes(&self) -> read::Result<&[u8]> { - Ok(self.section.name()) - } - - fn name(&self) -> read::Result<&str> { - let name = self.name_bytes()?; - str::from_utf8(name) - .ok() - .read_error("Non UTF-8 XCOFF section name") - } - - fn segment_name_bytes(&self) -> Result> { - Ok(None) - } - - fn segment_name(&self) -> Result> { - Ok(None) - } - - fn kind(&self) -> SectionKind { - let section_type = self.section.s_flags() as u16; - if section_type & xcoff::STYP_TEXT != 0 { - SectionKind::Text - } else if section_type & xcoff::STYP_DATA != 0 { - SectionKind::Data - } else if section_type & xcoff::STYP_TDATA != 0 { - SectionKind::Tls - } else if section_type & xcoff::STYP_BSS != 0 { - SectionKind::UninitializedData - } else if section_type & xcoff::STYP_TBSS != 0 { - SectionKind::UninitializedTls - } else if section_type & (xcoff::STYP_DEBUG | xcoff::STYP_DWARF) != 0 { - SectionKind::Debug - } else if section_type & (xcoff::STYP_LOADER | xcoff::STYP_OVRFLO) != 0 { - SectionKind::Metadata - } else if section_type - & (xcoff::STYP_INFO | xcoff::STYP_EXCEPT | xcoff::STYP_PAD | xcoff::STYP_TYPCHK) - != 0 - { - SectionKind::Other - } else { - SectionKind::Unknown - } - } - - fn relocations(&self) -> Self::RelocationIterator { - let rel = self.section.relocations(self.file.data).unwrap_or(&[]); - XcoffRelocationIterator { - file: self.file, - relocations: rel.iter(), - } - } - - fn flags(&self) -> SectionFlags { - SectionFlags::Xcoff { - s_flags: self.section.s_flags(), - } - } - - fn uncompressed_data(&self) -> Result> { - self.compressed_data()?.decompress() - } -} - -/// The table of section headers in an XCOFF file. -#[derive(Debug, Clone, Copy)] -pub struct SectionTable<'data, Xcoff: FileHeader> { - sections: &'data [Xcoff::SectionHeader], -} - -impl<'data, Xcoff> Default for SectionTable<'data, Xcoff> -where - Xcoff: FileHeader, -{ - fn default() -> Self { - Self { sections: &[] } - } -} - -impl<'data, Xcoff> SectionTable<'data, Xcoff> -where - Xcoff: FileHeader, -{ - /// Parse the section table. - /// - /// `data` must be the entire file data. - /// `offset` must be after the optional file header. - pub fn parse>(header: &Xcoff, data: R, offset: &mut u64) -> Result { - let section_num = header.f_nscns(); - if section_num == 0 { - return Ok(SectionTable::default()); - } - let sections = data - .read_slice(offset, section_num as usize) - .read_error("Invalid XCOFF section headers")?; - Ok(SectionTable { sections }) - } - - /// Iterate over the section headers. - #[inline] - pub fn iter(&self) -> slice::Iter<'data, Xcoff::SectionHeader> { - self.sections.iter() - } - - /// Return true if the section table is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.sections.is_empty() - } - - /// The number of section headers. - #[inline] - pub fn len(&self) -> usize { - self.sections.len() - } - - /// Return the section header at the given index. - /// - /// The index is 1-based. - pub fn section(&self, index: SectionIndex) -> read::Result<&'data Xcoff::SectionHeader> { - self.sections - .get(index.0.wrapping_sub(1)) - .read_error("Invalid XCOFF section index") - } -} - -/// A trait for generic access to `SectionHeader32` and `SectionHeader64`. -#[allow(missing_docs)] -pub trait SectionHeader: Debug + Pod { - type Word: Into; - type HalfWord: Into; - type Xcoff: FileHeader; - type Rel: Rel; - - fn s_name(&self) -> &[u8; 8]; - fn s_paddr(&self) -> Self::Word; - fn s_vaddr(&self) -> Self::Word; - fn s_size(&self) -> Self::Word; - fn s_scnptr(&self) -> Self::Word; - fn s_relptr(&self) -> Self::Word; - fn s_lnnoptr(&self) -> Self::Word; - fn s_nreloc(&self) -> Self::HalfWord; - fn s_nlnno(&self) -> Self::HalfWord; - fn s_flags(&self) -> u32; - - /// Return the section name. - fn name(&self) -> &[u8] { - let sectname = &self.s_name()[..]; - match memchr::memchr(b'\0', sectname) { - Some(end) => §name[..end], - None => sectname, - } - } - - /// Return the offset and size of the section in the file. - fn file_range(&self) -> Option<(u64, u64)> { - Some((self.s_scnptr().into(), self.s_size().into())) - } - - /// Return the section data. - /// - /// Returns `Ok(&[])` if the section has no data. - /// Returns `Err` for invalid values. - fn data<'data, R: ReadRef<'data>>(&self, data: R) -> result::Result<&'data [u8], ()> { - if let Some((offset, size)) = self.file_range() { - data.read_bytes_at(offset, size) - } else { - Ok(&[]) - } - } - - /// Read the relocations. - fn relocations<'data, R: ReadRef<'data>>(&self, data: R) -> read::Result<&'data [Self::Rel]>; -} - -impl SectionHeader for xcoff::SectionHeader32 { - type Word = u32; - type HalfWord = u16; - type Xcoff = xcoff::FileHeader32; - type Rel = xcoff::Rel32; - - fn s_name(&self) -> &[u8; 8] { - &self.s_name - } - - fn s_paddr(&self) -> Self::Word { - self.s_paddr.get(BE) - } - - fn s_vaddr(&self) -> Self::Word { - self.s_vaddr.get(BE) - } - - fn s_size(&self) -> Self::Word { - self.s_size.get(BE) - } - - fn s_scnptr(&self) -> Self::Word { - self.s_scnptr.get(BE) - } - - fn s_relptr(&self) -> Self::Word { - self.s_relptr.get(BE) - } - - fn s_lnnoptr(&self) -> Self::Word { - self.s_lnnoptr.get(BE) - } - - fn s_nreloc(&self) -> Self::HalfWord { - self.s_nreloc.get(BE) - } - - fn s_nlnno(&self) -> Self::HalfWord { - self.s_nlnno.get(BE) - } - - fn s_flags(&self) -> u32 { - self.s_flags.get(BE) - } - - /// Read the relocations in a XCOFF32 file. - /// - /// `data` must be the entire file data. - fn relocations<'data, R: ReadRef<'data>>(&self, data: R) -> read::Result<&'data [Self::Rel]> { - let reloc_num = self.s_nreloc() as usize; - // TODO: If more than 65,534 relocation entries are required, the field value will be 65535, - // and an STYP_OVRFLO section header will contain the actual count of relocation entries in - // the s_paddr field. - if reloc_num == 65535 { - return Err(Error("Overflow section is not supported yet.")); - } - data.read_slice_at(self.s_relptr().into(), reloc_num) - .read_error("Invalid XCOFF relocation offset or number") - } -} - -impl SectionHeader for xcoff::SectionHeader64 { - type Word = u64; - type HalfWord = u32; - type Xcoff = xcoff::FileHeader64; - type Rel = xcoff::Rel64; - - fn s_name(&self) -> &[u8; 8] { - &self.s_name - } - - fn s_paddr(&self) -> Self::Word { - self.s_paddr.get(BE) - } - - fn s_vaddr(&self) -> Self::Word { - self.s_vaddr.get(BE) - } - - fn s_size(&self) -> Self::Word { - self.s_size.get(BE) - } - - fn s_scnptr(&self) -> Self::Word { - self.s_scnptr.get(BE) - } - - fn s_relptr(&self) -> Self::Word { - self.s_relptr.get(BE) - } - - fn s_lnnoptr(&self) -> Self::Word { - self.s_lnnoptr.get(BE) - } - - fn s_nreloc(&self) -> Self::HalfWord { - self.s_nreloc.get(BE) - } - - fn s_nlnno(&self) -> Self::HalfWord { - self.s_nlnno.get(BE) - } - - fn s_flags(&self) -> u32 { - self.s_flags.get(BE) - } - - /// Read the relocations in a XCOFF64 file. - /// - /// `data` must be the entire file data. - fn relocations<'data, R: ReadRef<'data>>(&self, data: R) -> read::Result<&'data [Self::Rel]> { - data.read_slice_at(self.s_relptr(), self.s_nreloc() as usize) - .read_error("Invalid XCOFF relocation offset or number") - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/segment.rs s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/segment.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/segment.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/segment.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,113 +0,0 @@ -//! TODO: Support the segment for XCOFF when auxiliary file header and loader section is ready. - -use core::fmt::Debug; -use core::str; - -use crate::read::{self, ObjectSegment, ReadRef, Result}; -use crate::xcoff; - -use super::{FileHeader, XcoffFile}; - -/// An iterator over the segments of an `XcoffFile32`. -pub type XcoffSegmentIterator32<'data, 'file, R = &'data [u8]> = - XcoffSegmentIterator<'data, 'file, xcoff::FileHeader32, R>; -/// An iterator over the segments of an `XcoffFile64`. -pub type XcoffSegmentIterator64<'data, 'file, R = &'data [u8]> = - XcoffSegmentIterator<'data, 'file, xcoff::FileHeader64, R>; - -/// An iterator over the segments of an `XcoffFile`. -#[derive(Debug)] -pub struct XcoffSegmentIterator<'data, 'file, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - #[allow(unused)] - pub(super) file: &'file XcoffFile<'data, Xcoff, R>, -} - -impl<'data, 'file, Xcoff, R> Iterator for XcoffSegmentIterator<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - type Item = XcoffSegment<'data, 'file, Xcoff, R>; - - fn next(&mut self) -> Option { - None - } -} - -/// A segment of an `XcoffFile32`. -pub type XcoffSegment32<'data, 'file, R = &'data [u8]> = - XcoffSegment<'data, 'file, xcoff::FileHeader32, R>; -/// A segment of an `XcoffFile64`. -pub type XcoffSegment64<'data, 'file, R = &'data [u8]> = - XcoffSegment<'data, 'file, xcoff::FileHeader64, R>; - -/// A loadable section of an `XcoffFile`. -#[derive(Debug)] -pub struct XcoffSegment<'data, 'file, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - #[allow(unused)] - pub(super) file: &'file XcoffFile<'data, Xcoff, R>, -} - -impl<'data, 'file, Xcoff, R> XcoffSegment<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Xcoff, R> read::private::Sealed for XcoffSegment<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ -} - -impl<'data, 'file, Xcoff, R> ObjectSegment<'data> for XcoffSegment<'data, 'file, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - fn address(&self) -> u64 { - unreachable!(); - } - - fn size(&self) -> u64 { - unreachable!(); - } - - fn align(&self) -> u64 { - unreachable!(); - } - - fn file_range(&self) -> (u64, u64) { - unreachable!(); - } - - fn data(&self) -> Result<&'data [u8]> { - unreachable!(); - } - - fn data_range(&self, _address: u64, _size: u64) -> Result> { - unreachable!(); - } - - fn name_bytes(&self) -> Result> { - unreachable!(); - } - - fn name(&self) -> Result> { - unreachable!(); - } - - fn flags(&self) -> crate::SegmentFlags { - unreachable!(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/symbol.rs s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/symbol.rs --- s390-tools-2.31.0/rust-vendor/object/src/read/xcoff/symbol.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/read/xcoff/symbol.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,695 +0,0 @@ -use alloc::fmt; -use core::convert::TryInto; -use core::fmt::Debug; -use core::marker::PhantomData; -use core::str; - -use crate::endian::{BigEndian as BE, U32Bytes}; -use crate::pod::{bytes_of, Pod}; -use crate::read::util::StringTable; -use crate::xcoff; - -use crate::read::{ - self, Bytes, Error, ObjectSymbol, ObjectSymbolTable, ReadError, ReadRef, Result, SectionIndex, - SymbolFlags, SymbolIndex, SymbolKind, SymbolScope, SymbolSection, -}; - -use super::{FileHeader, XcoffFile}; - -/// A table of symbol entries in an XCOFF file. -/// -/// Also includes the string table used for the symbol names. -#[derive(Debug)] -pub struct SymbolTable<'data, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - symbols: &'data [xcoff::SymbolBytes], - strings: StringTable<'data, R>, - header: PhantomData, -} - -impl<'data, Xcoff, R> Default for SymbolTable<'data, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - fn default() -> Self { - Self { - symbols: &[], - strings: StringTable::default(), - header: PhantomData, - } - } -} - -impl<'data, Xcoff, R> SymbolTable<'data, Xcoff, R> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - /// Parse the symbol table. - pub fn parse(header: Xcoff, data: R) -> Result { - let mut offset = header.f_symptr().into(); - let (symbols, strings) = if offset != 0 { - let symbols = data - .read_slice(&mut offset, header.f_nsyms() as usize) - .read_error("Invalid XCOFF symbol table offset or size")?; - - // Parse the string table. - // Note: don't update data when reading length; the length includes itself. - let length = data - .read_at::>(offset) - .read_error("Missing XCOFF string table")? - .get(BE); - let str_end = offset - .checked_add(length as u64) - .read_error("Invalid XCOFF string table length")?; - let strings = StringTable::new(data, offset, str_end); - - (symbols, strings) - } else { - (&[][..], StringTable::default()) - }; - - Ok(SymbolTable { - symbols, - strings, - header: PhantomData, - }) - } - - /// Return the symbol entry at the given index and offset. - pub fn get(&self, index: usize, offset: usize) -> Result<&'data T> { - let entry = index - .checked_add(offset) - .and_then(|x| self.symbols.get(x)) - .read_error("Invalid XCOFF symbol index")?; - let bytes = bytes_of(entry); - Bytes(bytes).read().read_error("Invalid XCOFF symbol data") - } - - /// Return the symbol at the given index. - pub fn symbol(&self, index: usize) -> Result<&'data Xcoff::Symbol> { - self.get::(index, 0) - } - - /// Return a file auxiliary symbol. - pub fn aux_file(&self, index: usize, offset: usize) -> Result<&'data Xcoff::FileAux> { - debug_assert!(self.symbol(index)?.has_aux_file()); - let aux_file = self.get::(index, offset)?; - if let Some(aux_type) = aux_file.x_auxtype() { - if aux_type != xcoff::AUX_FILE { - return Err(Error("Invalid index for file auxiliary symbol.")); - } - } - Ok(aux_file) - } - - /// Return the csect auxiliary symbol. - pub fn aux_csect(&self, index: usize, offset: usize) -> Result<&'data Xcoff::CsectAux> { - debug_assert!(self.symbol(index)?.has_aux_csect()); - let aux_csect = self.get::(index, offset)?; - if let Some(aux_type) = aux_csect.x_auxtype() { - if aux_type != xcoff::AUX_CSECT { - return Err(Error("Invalid index/offset for csect auxiliary symbol.")); - } - } - Ok(aux_csect) - } - - /// Return true if the symbol table is empty. - #[inline] - pub fn is_empty(&self) -> bool { - self.symbols.is_empty() - } - - /// The number of symbol table entries. - /// - /// This includes auxiliary symbol table entries. - #[inline] - pub fn len(&self) -> usize { - self.symbols.len() - } -} - -/// A symbol table of an `XcoffFile32`. -pub type XcoffSymbolTable32<'data, 'file, R = &'data [u8]> = - XcoffSymbolTable<'data, 'file, xcoff::FileHeader32, R>; -/// A symbol table of an `XcoffFile64`. -pub type XcoffSymbolTable64<'data, 'file, R = &'data [u8]> = - XcoffSymbolTable<'data, 'file, xcoff::FileHeader64, R>; - -/// A symbol table of an `XcoffFile`. -#[derive(Debug, Clone, Copy)] -pub struct XcoffSymbolTable<'data, 'file, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - pub(crate) file: &'file XcoffFile<'data, Xcoff, R>, - pub(super) symbols: &'file SymbolTable<'data, Xcoff, R>, -} - -impl<'data, 'file, Xcoff: FileHeader, R: ReadRef<'data>> read::private::Sealed - for XcoffSymbolTable<'data, 'file, Xcoff, R> -{ -} - -impl<'data, 'file, Xcoff: FileHeader, R: ReadRef<'data>> ObjectSymbolTable<'data> - for XcoffSymbolTable<'data, 'file, Xcoff, R> -{ - type Symbol = XcoffSymbol<'data, 'file, Xcoff, R>; - type SymbolIterator = XcoffSymbolIterator<'data, 'file, Xcoff, R>; - - fn symbols(&self) -> Self::SymbolIterator { - XcoffSymbolIterator { - file: self.file, - symbols: self.symbols, - index: 0, - } - } - - fn symbol_by_index(&self, index: SymbolIndex) -> read::Result { - let symbol = self.symbols.symbol(index.0)?; - Ok(XcoffSymbol { - file: self.file, - symbols: self.symbols, - index, - symbol, - }) - } -} - -/// An iterator over the symbols of an `XcoffFile32`. -pub type XcoffSymbolIterator32<'data, 'file, R = &'data [u8]> = - XcoffSymbolIterator<'data, 'file, xcoff::FileHeader32, R>; -/// An iterator over the symbols of an `XcoffFile64`. -pub type XcoffSymbolIterator64<'data, 'file, R = &'data [u8]> = - XcoffSymbolIterator<'data, 'file, xcoff::FileHeader64, R>; - -/// An iterator over the symbols of an `XcoffFile`. -pub struct XcoffSymbolIterator<'data, 'file, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - pub(crate) file: &'file XcoffFile<'data, Xcoff, R>, - pub(super) symbols: &'file SymbolTable<'data, Xcoff, R>, - pub(super) index: usize, -} - -impl<'data, 'file, Xcoff: FileHeader, R: ReadRef<'data>> fmt::Debug - for XcoffSymbolIterator<'data, 'file, Xcoff, R> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("XcoffSymbolIterator").finish() - } -} - -impl<'data, 'file, Xcoff: FileHeader, R: ReadRef<'data>> Iterator - for XcoffSymbolIterator<'data, 'file, Xcoff, R> -{ - type Item = XcoffSymbol<'data, 'file, Xcoff, R>; - - fn next(&mut self) -> Option { - let index = self.index; - let symbol = self.symbols.symbol(index).ok()?; - // TODO: skip over the auxiliary symbols for now. - self.index += 1 + symbol.n_numaux() as usize; - Some(XcoffSymbol { - file: self.file, - symbols: self.symbols, - index: SymbolIndex(index), - symbol, - }) - } -} - -/// A symbol of an `XcoffFile32`. -pub type XcoffSymbol32<'data, 'file, R = &'data [u8]> = - XcoffSymbol<'data, 'file, xcoff::FileHeader32, R>; -/// A symbol of an `XcoffFile64`. -pub type XcoffSymbol64<'data, 'file, R = &'data [u8]> = - XcoffSymbol<'data, 'file, xcoff::FileHeader64, R>; - -/// A symbol of an `XcoffFile`. -#[derive(Debug, Clone, Copy)] -pub struct XcoffSymbol<'data, 'file, Xcoff, R = &'data [u8]> -where - Xcoff: FileHeader, - R: ReadRef<'data>, -{ - pub(crate) file: &'file XcoffFile<'data, Xcoff, R>, - pub(super) symbols: &'file SymbolTable<'data, Xcoff, R>, - pub(super) index: SymbolIndex, - pub(super) symbol: &'data Xcoff::Symbol, -} - -impl<'data, 'file, Xcoff: FileHeader, R: ReadRef<'data>> read::private::Sealed - for XcoffSymbol<'data, 'file, Xcoff, R> -{ -} - -impl<'data, 'file, Xcoff: FileHeader, R: ReadRef<'data>> ObjectSymbol<'data> - for XcoffSymbol<'data, 'file, Xcoff, R> -{ - #[inline] - fn index(&self) -> SymbolIndex { - self.index - } - - fn name_bytes(&self) -> Result<&'data [u8]> { - if self.symbol.has_aux_file() { - // By convention the file name is in the first auxiliary entry. - self.symbols - .aux_file(self.index.0, 1)? - .fname(self.symbols.strings) - } else { - self.symbol.name(self.symbols.strings) - } - } - - fn name(&self) -> Result<&'data str> { - let name = self.name_bytes()?; - str::from_utf8(name) - .ok() - .read_error("Non UTF-8 XCOFF symbol name") - } - - #[inline] - fn address(&self) -> u64 { - match self.symbol.n_sclass() { - // Relocatable address. - xcoff::C_EXT - | xcoff::C_WEAKEXT - | xcoff::C_HIDEXT - | xcoff::C_FCN - | xcoff::C_BLOCK - | xcoff::C_STAT - | xcoff::C_INFO => self.symbol.n_value().into(), - _ => 0, - } - } - - #[inline] - fn size(&self) -> u64 { - if self.symbol.has_aux_csect() { - // XCOFF32 must have the csect auxiliary entry as the last auxiliary entry. - // XCOFF64 doesn't require this, but conventionally does. - if let Ok(aux_csect) = self - .file - .symbols - .aux_csect(self.index.0, self.symbol.n_numaux() as usize) - { - let sym_type = aux_csect.sym_type() & 0x07; - if sym_type == xcoff::XTY_SD || sym_type == xcoff::XTY_CM { - return aux_csect.x_scnlen(); - } - } - } - 0 - } - - fn kind(&self) -> SymbolKind { - if self.symbol.has_aux_csect() { - if let Ok(aux_csect) = self - .file - .symbols - .aux_csect(self.index.0, self.symbol.n_numaux() as usize) - { - let sym_type = aux_csect.sym_type() & 0x07; - if sym_type == xcoff::XTY_SD || sym_type == xcoff::XTY_CM { - return match aux_csect.x_smclas() { - xcoff::XMC_PR | xcoff::XMC_GL => SymbolKind::Text, - xcoff::XMC_RO | xcoff::XMC_RW | xcoff::XMC_TD | xcoff::XMC_BS => { - SymbolKind::Data - } - xcoff::XMC_TL | xcoff::XMC_UL => SymbolKind::Tls, - xcoff::XMC_DS | xcoff::XMC_TC0 | xcoff::XMC_TC => { - // `Metadata` might be a better kind for these if we had it. - SymbolKind::Data - } - _ => SymbolKind::Unknown, - }; - } else if sym_type == xcoff::XTY_LD { - // A function entry point. Neither `Text` nor `Label` are a good fit for this. - return SymbolKind::Text; - } else if sym_type == xcoff::XTY_ER { - return SymbolKind::Unknown; - } - } - } - match self.symbol.n_sclass() { - xcoff::C_NULL => SymbolKind::Null, - xcoff::C_FILE => SymbolKind::File, - _ => SymbolKind::Unknown, - } - } - - fn section(&self) -> SymbolSection { - match self.symbol.n_scnum() { - xcoff::N_ABS => SymbolSection::Absolute, - xcoff::N_UNDEF => SymbolSection::Undefined, - xcoff::N_DEBUG => SymbolSection::None, - index if index > 0 => SymbolSection::Section(SectionIndex(index as usize)), - _ => SymbolSection::Unknown, - } - } - - #[inline] - fn is_undefined(&self) -> bool { - self.symbol.is_undefined() - } - - /// Return true if the symbol is a definition of a function or data object. - #[inline] - fn is_definition(&self) -> bool { - if self.symbol.has_aux_csect() { - if let Ok(aux_csect) = self - .symbols - .aux_csect(self.index.0, self.symbol.n_numaux() as usize) - { - let smclas = aux_csect.x_smclas(); - self.symbol.n_scnum() != xcoff::N_UNDEF - && (smclas == xcoff::XMC_PR - || smclas == xcoff::XMC_RW - || smclas == xcoff::XMC_RO) - } else { - false - } - } else { - false - } - } - - #[inline] - fn is_common(&self) -> bool { - self.symbol.n_sclass() == xcoff::C_EXT && self.symbol.n_scnum() == xcoff::N_UNDEF - } - - #[inline] - fn is_weak(&self) -> bool { - self.symbol.n_sclass() == xcoff::C_WEAKEXT - } - - fn scope(&self) -> SymbolScope { - if self.symbol.n_scnum() == xcoff::N_UNDEF { - SymbolScope::Unknown - } else { - match self.symbol.n_sclass() { - xcoff::C_EXT | xcoff::C_WEAKEXT | xcoff::C_HIDEXT => { - let visibility = self.symbol.n_type() & xcoff::SYM_V_MASK; - if visibility == xcoff::SYM_V_HIDDEN { - SymbolScope::Linkage - } else { - SymbolScope::Dynamic - } - } - _ => SymbolScope::Compilation, - } - } - } - - #[inline] - fn is_global(&self) -> bool { - match self.symbol.n_sclass() { - xcoff::C_EXT | xcoff::C_WEAKEXT => true, - _ => false, - } - } - - #[inline] - fn is_local(&self) -> bool { - !self.is_global() - } - - #[inline] - fn flags(&self) -> SymbolFlags { - let mut x_smtyp = 0; - let mut x_smclas = 0; - let mut containing_csect = None; - if self.symbol.has_aux_csect() { - if let Ok(aux_csect) = self - .file - .symbols - .aux_csect(self.index.0, self.symbol.n_numaux() as usize) - { - x_smtyp = aux_csect.x_smtyp(); - x_smclas = aux_csect.x_smclas(); - if x_smtyp == xcoff::XTY_LD { - containing_csect = Some(SymbolIndex(aux_csect.x_scnlen() as usize)) - } - } - } - SymbolFlags::Xcoff { - n_sclass: self.symbol.n_sclass(), - x_smtyp, - x_smclas, - containing_csect, - } - } -} - -/// A trait for generic access to `Symbol32` and `Symbol64`. -#[allow(missing_docs)] -pub trait Symbol: Debug + Pod { - type Word: Into; - - fn n_value(&self) -> Self::Word; - fn n_scnum(&self) -> i16; - fn n_type(&self) -> u16; - fn n_sclass(&self) -> u8; - fn n_numaux(&self) -> u8; - - fn name<'data, R: ReadRef<'data>>( - &'data self, - strings: StringTable<'data, R>, - ) -> Result<&'data [u8]>; - - /// Return true if the symbol is undefined. - #[inline] - fn is_undefined(&self) -> bool { - let n_sclass = self.n_sclass(); - (n_sclass == xcoff::C_EXT || n_sclass == xcoff::C_WEAKEXT) - && self.n_scnum() == xcoff::N_UNDEF - } - - /// Return true if the symbol has file auxiliary entry. - fn has_aux_file(&self) -> bool { - self.n_numaux() > 0 && self.n_sclass() == xcoff::C_FILE - } - - /// Return true if the symbol has csect auxiliary entry. - /// - /// A csect auxiliary entry is required for each symbol table entry that has - /// a storage class value of C_EXT, C_WEAKEXT, or C_HIDEXT. - fn has_aux_csect(&self) -> bool { - let sclass = self.n_sclass(); - self.n_numaux() > 0 - && (sclass == xcoff::C_EXT || sclass == xcoff::C_WEAKEXT || sclass == xcoff::C_HIDEXT) - } -} - -impl Symbol for xcoff::Symbol64 { - type Word = u64; - - fn n_value(&self) -> Self::Word { - self.n_value.get(BE) - } - - fn n_scnum(&self) -> i16 { - self.n_scnum.get(BE) - } - - fn n_type(&self) -> u16 { - self.n_type.get(BE) - } - - fn n_sclass(&self) -> u8 { - self.n_sclass - } - - fn n_numaux(&self) -> u8 { - self.n_numaux - } - - /// Parse the symbol name for XCOFF64. - fn name<'data, R: ReadRef<'data>>( - &'data self, - strings: StringTable<'data, R>, - ) -> Result<&'data [u8]> { - strings - .get(self.n_offset.get(BE)) - .read_error("Invalid XCOFF symbol name offset") - } -} - -impl Symbol for xcoff::Symbol32 { - type Word = u32; - - fn n_value(&self) -> Self::Word { - self.n_value.get(BE) - } - - fn n_scnum(&self) -> i16 { - self.n_scnum.get(BE) - } - - fn n_type(&self) -> u16 { - self.n_type.get(BE) - } - - fn n_sclass(&self) -> u8 { - self.n_sclass - } - - fn n_numaux(&self) -> u8 { - self.n_numaux - } - - /// Parse the symbol name for XCOFF32. - fn name<'data, R: ReadRef<'data>>( - &'data self, - strings: StringTable<'data, R>, - ) -> Result<&'data [u8]> { - if self.n_name[0] == 0 { - // If the name starts with 0 then the last 4 bytes are a string table offset. - let offset = u32::from_be_bytes(self.n_name[4..8].try_into().unwrap()); - strings - .get(offset) - .read_error("Invalid XCOFF symbol name offset") - } else { - // The name is inline and padded with nulls. - Ok(match memchr::memchr(b'\0', &self.n_name) { - Some(end) => &self.n_name[..end], - None => &self.n_name, - }) - } - } -} - -/// A trait for generic access to `FileAux32` and `FileAux64`. -#[allow(missing_docs)] -pub trait FileAux: Debug + Pod { - fn x_fname(&self) -> &[u8; 8]; - fn x_ftype(&self) -> u8; - fn x_auxtype(&self) -> Option; - - /// Parse the x_fname field, which may be an inline string or a string table offset. - fn fname<'data, R: ReadRef<'data>>( - &'data self, - strings: StringTable<'data, R>, - ) -> Result<&'data [u8]> { - let x_fname = self.x_fname(); - if x_fname[0] == 0 { - // If the name starts with 0 then the last 4 bytes are a string table offset. - let offset = u32::from_be_bytes(x_fname[4..8].try_into().unwrap()); - strings - .get(offset) - .read_error("Invalid XCOFF symbol name offset") - } else { - // The name is inline and padded with nulls. - Ok(match memchr::memchr(b'\0', x_fname) { - Some(end) => &x_fname[..end], - None => x_fname, - }) - } - } -} - -impl FileAux for xcoff::FileAux64 { - fn x_fname(&self) -> &[u8; 8] { - &self.x_fname - } - - fn x_ftype(&self) -> u8 { - self.x_ftype - } - - fn x_auxtype(&self) -> Option { - Some(self.x_auxtype) - } -} - -impl FileAux for xcoff::FileAux32 { - fn x_fname(&self) -> &[u8; 8] { - &self.x_fname - } - - fn x_ftype(&self) -> u8 { - self.x_ftype - } - - fn x_auxtype(&self) -> Option { - None - } -} - -/// A trait for generic access to `CsectAux32` and `CsectAux64`. -#[allow(missing_docs)] -pub trait CsectAux: Debug + Pod { - fn x_scnlen(&self) -> u64; - fn x_parmhash(&self) -> u32; - fn x_snhash(&self) -> u16; - fn x_smtyp(&self) -> u8; - fn x_smclas(&self) -> u8; - fn x_auxtype(&self) -> Option; - - fn sym_type(&self) -> u8 { - self.x_smtyp() & 0x07 - } -} - -impl CsectAux for xcoff::CsectAux64 { - fn x_scnlen(&self) -> u64 { - self.x_scnlen_lo.get(BE) as u64 | ((self.x_scnlen_hi.get(BE) as u64) << 32) - } - - fn x_parmhash(&self) -> u32 { - self.x_parmhash.get(BE) - } - - fn x_snhash(&self) -> u16 { - self.x_snhash.get(BE) - } - - fn x_smtyp(&self) -> u8 { - self.x_smtyp - } - - fn x_smclas(&self) -> u8 { - self.x_smclas - } - - fn x_auxtype(&self) -> Option { - Some(self.x_auxtype) - } -} - -impl CsectAux for xcoff::CsectAux32 { - fn x_scnlen(&self) -> u64 { - self.x_scnlen.get(BE) as u64 - } - - fn x_parmhash(&self) -> u32 { - self.x_parmhash.get(BE) - } - - fn x_snhash(&self) -> u16 { - self.x_snhash.get(BE) - } - - fn x_smtyp(&self) -> u8 { - self.x_smtyp - } - - fn x_smclas(&self) -> u8 { - self.x_smclas - } - - fn x_auxtype(&self) -> Option { - None - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/write/coff.rs s390-tools-2.33.1/rust-vendor/object/src/write/coff.rs --- s390-tools-2.31.0/rust-vendor/object/src/write/coff.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/write/coff.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,725 +0,0 @@ -use alloc::vec::Vec; -use core::mem; - -use crate::endian::{LittleEndian as LE, U16Bytes, U32Bytes, U16, U32}; -use crate::pe as coff; -use crate::write::string::*; -use crate::write::util::*; -use crate::write::*; - -#[derive(Default, Clone, Copy)] -struct SectionOffsets { - offset: usize, - str_id: Option, - reloc_offset: usize, - selection: u8, - associative_section: u16, -} - -#[derive(Default, Clone, Copy)] -struct SymbolOffsets { - index: usize, - str_id: Option, - aux_count: u8, -} - -/// Internal format to use for the `.drectve` section containing linker -/// directives for symbol exports. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum CoffExportStyle { - /// MSVC format supported by link.exe and LLD. - Msvc, - /// Gnu format supported by GNU LD and LLD. - Gnu, -} - -impl<'a> Object<'a> { - pub(crate) fn coff_section_info( - &self, - section: StandardSection, - ) -> (&'static [u8], &'static [u8], SectionKind, SectionFlags) { - match section { - StandardSection::Text => (&[], &b".text"[..], SectionKind::Text, SectionFlags::None), - StandardSection::Data => (&[], &b".data"[..], SectionKind::Data, SectionFlags::None), - StandardSection::ReadOnlyData - | StandardSection::ReadOnlyDataWithRel - | StandardSection::ReadOnlyString => ( - &[], - &b".rdata"[..], - SectionKind::ReadOnlyData, - SectionFlags::None, - ), - StandardSection::UninitializedData => ( - &[], - &b".bss"[..], - SectionKind::UninitializedData, - SectionFlags::None, - ), - // TLS sections are data sections with a special name. - StandardSection::Tls => (&[], &b".tls$"[..], SectionKind::Data, SectionFlags::None), - StandardSection::UninitializedTls => { - // Unsupported section. - (&[], &[], SectionKind::UninitializedTls, SectionFlags::None) - } - StandardSection::TlsVariables => { - // Unsupported section. - (&[], &[], SectionKind::TlsVariables, SectionFlags::None) - } - StandardSection::Common => { - // Unsupported section. - (&[], &[], SectionKind::Common, SectionFlags::None) - } - StandardSection::GnuProperty => { - // Unsupported section. - (&[], &[], SectionKind::Note, SectionFlags::None) - } - } - } - - pub(crate) fn coff_subsection_name(&self, section: &[u8], value: &[u8]) -> Vec { - let mut name = section.to_vec(); - name.push(b'$'); - name.extend_from_slice(value); - name - } - - pub(crate) fn coff_fixup_relocation(&mut self, relocation: &mut Relocation) -> i64 { - if relocation.kind == RelocationKind::GotRelative { - // Use a stub symbol for the relocation instead. - // This isn't really a GOT, but it's a similar purpose. - // TODO: need to handle DLL imports differently? - relocation.kind = RelocationKind::Relative; - relocation.symbol = self.coff_add_stub_symbol(relocation.symbol); - } else if relocation.kind == RelocationKind::PltRelative { - // Windows doesn't need a separate relocation type for - // references to functions in import libraries. - // For convenience, treat this the same as Relative. - relocation.kind = RelocationKind::Relative; - } - - let constant = match self.architecture { - Architecture::I386 | Architecture::Arm | Architecture::Aarch64 => match relocation.kind - { - RelocationKind::Relative => { - // IMAGE_REL_I386_REL32, IMAGE_REL_ARM_REL32, IMAGE_REL_ARM64_REL32 - relocation.addend + 4 - } - _ => relocation.addend, - }, - Architecture::X86_64 => match relocation.kind { - RelocationKind::Relative => { - // IMAGE_REL_AMD64_REL32 through to IMAGE_REL_AMD64_REL32_5 - if relocation.addend <= -4 && relocation.addend >= -9 { - 0 - } else { - relocation.addend + 4 - } - } - _ => relocation.addend, - }, - _ => unimplemented!(), - }; - relocation.addend -= constant; - constant - } - - fn coff_add_stub_symbol(&mut self, symbol_id: SymbolId) -> SymbolId { - if let Some(stub_id) = self.stub_symbols.get(&symbol_id) { - return *stub_id; - } - let stub_size = self.architecture.address_size().unwrap().bytes(); - - let name = b".rdata$.refptr".to_vec(); - let section_id = self.add_section(Vec::new(), name, SectionKind::ReadOnlyData); - let section = self.section_mut(section_id); - section.set_data(vec![0; stub_size as usize], u64::from(stub_size)); - section.relocations = vec![Relocation { - offset: 0, - size: stub_size * 8, - kind: RelocationKind::Absolute, - encoding: RelocationEncoding::Generic, - symbol: symbol_id, - addend: 0, - }]; - - let mut name = b".refptr.".to_vec(); - name.extend_from_slice(&self.symbol(symbol_id).name); - let stub_id = self.add_raw_symbol(Symbol { - name, - value: 0, - size: u64::from(stub_size), - kind: SymbolKind::Data, - scope: SymbolScope::Compilation, - weak: false, - section: SymbolSection::Section(section_id), - flags: SymbolFlags::None, - }); - self.stub_symbols.insert(symbol_id, stub_id); - - stub_id - } - - /// Appends linker directives to the `.drectve` section to tell the linker - /// to export all symbols with `SymbolScope::Dynamic`. - /// - /// This must be called after all symbols have been defined. - pub fn add_coff_exports(&mut self, style: CoffExportStyle) { - assert_eq!(self.format, BinaryFormat::Coff); - - let mut directives = vec![]; - for symbol in &self.symbols { - if symbol.scope == SymbolScope::Dynamic { - match style { - CoffExportStyle::Msvc => directives.extend(b" /EXPORT:\""), - CoffExportStyle::Gnu => directives.extend(b" -export:\""), - } - directives.extend(&symbol.name); - directives.extend(b"\""); - if symbol.kind != SymbolKind::Text { - match style { - CoffExportStyle::Msvc => directives.extend(b",DATA"), - CoffExportStyle::Gnu => directives.extend(b",data"), - } - } - } - } - let drectve = self.add_section(vec![], b".drectve".to_vec(), SectionKind::Linker); - self.append_section_data(drectve, &directives, 1); - } - - pub(crate) fn coff_write(&self, buffer: &mut dyn WritableBuffer) -> Result<()> { - // Calculate offsets of everything, and build strtab. - let mut offset = 0; - let mut strtab = StringTable::default(); - - // COFF header. - offset += mem::size_of::(); - - // Section headers. - offset += self.sections.len() * mem::size_of::(); - - // Calculate size of section data and add section strings to strtab. - let mut section_offsets = vec![SectionOffsets::default(); self.sections.len()]; - for (index, section) in self.sections.iter().enumerate() { - if section.name.len() > 8 { - section_offsets[index].str_id = Some(strtab.add(§ion.name)); - } - - let len = section.data.len(); - if len != 0 { - // TODO: not sure what alignment is required here, but this seems to match LLVM - offset = align(offset, 4); - section_offsets[index].offset = offset; - offset += len; - } else { - section_offsets[index].offset = 0; - } - - // Calculate size of relocations. - let mut count = section.relocations.len(); - if count != 0 { - section_offsets[index].reloc_offset = offset; - if count > 0xffff { - count += 1; - } - offset += count * mem::size_of::(); - } - } - - // Set COMDAT flags. - for comdat in &self.comdats { - let symbol = &self.symbols[comdat.symbol.0]; - let comdat_section = match symbol.section { - SymbolSection::Section(id) => id.0, - _ => { - return Err(Error(format!( - "unsupported COMDAT symbol `{}` section {:?}", - symbol.name().unwrap_or(""), - symbol.section - ))); - } - }; - section_offsets[comdat_section].selection = match comdat.kind { - ComdatKind::NoDuplicates => coff::IMAGE_COMDAT_SELECT_NODUPLICATES, - ComdatKind::Any => coff::IMAGE_COMDAT_SELECT_ANY, - ComdatKind::SameSize => coff::IMAGE_COMDAT_SELECT_SAME_SIZE, - ComdatKind::ExactMatch => coff::IMAGE_COMDAT_SELECT_EXACT_MATCH, - ComdatKind::Largest => coff::IMAGE_COMDAT_SELECT_LARGEST, - ComdatKind::Newest => coff::IMAGE_COMDAT_SELECT_NEWEST, - ComdatKind::Unknown => { - return Err(Error(format!( - "unsupported COMDAT symbol `{}` kind {:?}", - symbol.name().unwrap_or(""), - comdat.kind - ))); - } - }; - for id in &comdat.sections { - let section = &self.sections[id.0]; - if section.symbol.is_none() { - return Err(Error(format!( - "missing symbol for COMDAT section `{}`", - section.name().unwrap_or(""), - ))); - } - if id.0 != comdat_section { - section_offsets[id.0].selection = coff::IMAGE_COMDAT_SELECT_ASSOCIATIVE; - section_offsets[id.0].associative_section = comdat_section as u16 + 1; - } - } - } - - // Calculate size of symbols and add symbol strings to strtab. - let mut symbol_offsets = vec![SymbolOffsets::default(); self.symbols.len()]; - let mut symtab_count = 0; - for (index, symbol) in self.symbols.iter().enumerate() { - symbol_offsets[index].index = symtab_count; - symtab_count += 1; - match symbol.kind { - SymbolKind::File => { - // Name goes in auxiliary symbol records. - let aux_count = (symbol.name.len() + coff::IMAGE_SIZEOF_SYMBOL - 1) - / coff::IMAGE_SIZEOF_SYMBOL; - symbol_offsets[index].aux_count = aux_count as u8; - symtab_count += aux_count; - // Don't add name to strtab. - continue; - } - SymbolKind::Section => { - symbol_offsets[index].aux_count = 1; - symtab_count += 1; - } - _ => {} - } - if symbol.name.len() > 8 { - symbol_offsets[index].str_id = Some(strtab.add(&symbol.name)); - } - } - - // Calculate size of symtab. - let symtab_offset = offset; - let symtab_len = symtab_count * coff::IMAGE_SIZEOF_SYMBOL; - offset += symtab_len; - - // Calculate size of strtab. - let strtab_offset = offset; - let mut strtab_data = Vec::new(); - // First 4 bytes of strtab are the length. - strtab.write(4, &mut strtab_data); - let strtab_len = strtab_data.len() + 4; - offset += strtab_len; - - // Start writing. - buffer - .reserve(offset) - .map_err(|_| Error(String::from("Cannot allocate buffer")))?; - - // Write file header. - let header = coff::ImageFileHeader { - machine: U16::new( - LE, - match self.architecture { - Architecture::Arm => coff::IMAGE_FILE_MACHINE_ARMNT, - Architecture::Aarch64 => coff::IMAGE_FILE_MACHINE_ARM64, - Architecture::I386 => coff::IMAGE_FILE_MACHINE_I386, - Architecture::X86_64 => coff::IMAGE_FILE_MACHINE_AMD64, - _ => { - return Err(Error(format!( - "unimplemented architecture {:?}", - self.architecture - ))); - } - }, - ), - number_of_sections: U16::new(LE, self.sections.len() as u16), - time_date_stamp: U32::default(), - pointer_to_symbol_table: U32::new(LE, symtab_offset as u32), - number_of_symbols: U32::new(LE, symtab_count as u32), - size_of_optional_header: U16::default(), - characteristics: match self.flags { - FileFlags::Coff { characteristics } => U16::new(LE, characteristics), - _ => U16::default(), - }, - }; - buffer.write(&header); - - // Write section headers. - for (index, section) in self.sections.iter().enumerate() { - let mut characteristics = if let SectionFlags::Coff { - characteristics, .. - } = section.flags - { - characteristics - } else { - match section.kind { - SectionKind::Text => { - coff::IMAGE_SCN_CNT_CODE - | coff::IMAGE_SCN_MEM_EXECUTE - | coff::IMAGE_SCN_MEM_READ - } - SectionKind::Data => { - coff::IMAGE_SCN_CNT_INITIALIZED_DATA - | coff::IMAGE_SCN_MEM_READ - | coff::IMAGE_SCN_MEM_WRITE - } - SectionKind::UninitializedData => { - coff::IMAGE_SCN_CNT_UNINITIALIZED_DATA - | coff::IMAGE_SCN_MEM_READ - | coff::IMAGE_SCN_MEM_WRITE - } - SectionKind::ReadOnlyData - | SectionKind::ReadOnlyDataWithRel - | SectionKind::ReadOnlyString => { - coff::IMAGE_SCN_CNT_INITIALIZED_DATA | coff::IMAGE_SCN_MEM_READ - } - SectionKind::Debug | SectionKind::Other | SectionKind::OtherString => { - coff::IMAGE_SCN_CNT_INITIALIZED_DATA - | coff::IMAGE_SCN_MEM_READ - | coff::IMAGE_SCN_MEM_DISCARDABLE - } - SectionKind::Linker => coff::IMAGE_SCN_LNK_INFO | coff::IMAGE_SCN_LNK_REMOVE, - SectionKind::Common - | SectionKind::Tls - | SectionKind::UninitializedTls - | SectionKind::TlsVariables - | SectionKind::Note - | SectionKind::Unknown - | SectionKind::Metadata - | SectionKind::Elf(_) => { - return Err(Error(format!( - "unimplemented section `{}` kind {:?}", - section.name().unwrap_or(""), - section.kind - ))); - } - } - }; - if section_offsets[index].selection != 0 { - characteristics |= coff::IMAGE_SCN_LNK_COMDAT; - }; - if section.relocations.len() > 0xffff { - characteristics |= coff::IMAGE_SCN_LNK_NRELOC_OVFL; - } - characteristics |= match section.align { - 1 => coff::IMAGE_SCN_ALIGN_1BYTES, - 2 => coff::IMAGE_SCN_ALIGN_2BYTES, - 4 => coff::IMAGE_SCN_ALIGN_4BYTES, - 8 => coff::IMAGE_SCN_ALIGN_8BYTES, - 16 => coff::IMAGE_SCN_ALIGN_16BYTES, - 32 => coff::IMAGE_SCN_ALIGN_32BYTES, - 64 => coff::IMAGE_SCN_ALIGN_64BYTES, - 128 => coff::IMAGE_SCN_ALIGN_128BYTES, - 256 => coff::IMAGE_SCN_ALIGN_256BYTES, - 512 => coff::IMAGE_SCN_ALIGN_512BYTES, - 1024 => coff::IMAGE_SCN_ALIGN_1024BYTES, - 2048 => coff::IMAGE_SCN_ALIGN_2048BYTES, - 4096 => coff::IMAGE_SCN_ALIGN_4096BYTES, - 8192 => coff::IMAGE_SCN_ALIGN_8192BYTES, - _ => { - return Err(Error(format!( - "unimplemented section `{}` align {}", - section.name().unwrap_or(""), - section.align - ))); - } - }; - let mut coff_section = coff::ImageSectionHeader { - name: [0; 8], - virtual_size: U32::default(), - virtual_address: U32::default(), - size_of_raw_data: U32::new(LE, section.size as u32), - pointer_to_raw_data: U32::new(LE, section_offsets[index].offset as u32), - pointer_to_relocations: U32::new(LE, section_offsets[index].reloc_offset as u32), - pointer_to_linenumbers: U32::default(), - number_of_relocations: if section.relocations.len() > 0xffff { - U16::new(LE, 0xffff) - } else { - U16::new(LE, section.relocations.len() as u16) - }, - number_of_linenumbers: U16::default(), - characteristics: U32::new(LE, characteristics), - }; - if section.name.len() <= 8 { - coff_section.name[..section.name.len()].copy_from_slice(§ion.name); - } else { - let mut str_offset = strtab.get_offset(section_offsets[index].str_id.unwrap()); - if str_offset <= 9_999_999 { - let mut name = [0; 7]; - let mut len = 0; - if str_offset == 0 { - name[6] = b'0'; - len = 1; - } else { - while str_offset != 0 { - let rem = (str_offset % 10) as u8; - str_offset /= 10; - name[6 - len] = b'0' + rem; - len += 1; - } - } - coff_section.name = [0; 8]; - coff_section.name[0] = b'/'; - coff_section.name[1..][..len].copy_from_slice(&name[7 - len..]); - } else if str_offset as u64 <= 0xf_ffff_ffff { - coff_section.name[0] = b'/'; - coff_section.name[1] = b'/'; - for i in 0..6 { - let rem = (str_offset % 64) as u8; - str_offset /= 64; - let c = match rem { - 0..=25 => b'A' + rem, - 26..=51 => b'a' + rem - 26, - 52..=61 => b'0' + rem - 52, - 62 => b'+', - 63 => b'/', - _ => unreachable!(), - }; - coff_section.name[7 - i] = c; - } - } else { - return Err(Error(format!("invalid section name offset {}", str_offset))); - } - } - buffer.write(&coff_section); - } - - // Write section data and relocations. - for (index, section) in self.sections.iter().enumerate() { - let len = section.data.len(); - if len != 0 { - write_align(buffer, 4); - debug_assert_eq!(section_offsets[index].offset, buffer.len()); - buffer.write_bytes(§ion.data); - } - - if !section.relocations.is_empty() { - debug_assert_eq!(section_offsets[index].reloc_offset, buffer.len()); - if section.relocations.len() > 0xffff { - let coff_relocation = coff::ImageRelocation { - virtual_address: U32Bytes::new(LE, section.relocations.len() as u32 + 1), - symbol_table_index: U32Bytes::new(LE, 0), - typ: U16Bytes::new(LE, 0), - }; - buffer.write(&coff_relocation); - } - for reloc in §ion.relocations { - //assert!(reloc.implicit_addend); - let typ = match self.architecture { - Architecture::I386 => match (reloc.kind, reloc.size, reloc.addend) { - (RelocationKind::Absolute, 16, 0) => coff::IMAGE_REL_I386_DIR16, - (RelocationKind::Relative, 16, 0) => coff::IMAGE_REL_I386_REL16, - (RelocationKind::Absolute, 32, 0) => coff::IMAGE_REL_I386_DIR32, - (RelocationKind::ImageOffset, 32, 0) => coff::IMAGE_REL_I386_DIR32NB, - (RelocationKind::SectionIndex, 16, 0) => coff::IMAGE_REL_I386_SECTION, - (RelocationKind::SectionOffset, 32, 0) => coff::IMAGE_REL_I386_SECREL, - (RelocationKind::SectionOffset, 7, 0) => coff::IMAGE_REL_I386_SECREL7, - (RelocationKind::Relative, 32, -4) => coff::IMAGE_REL_I386_REL32, - (RelocationKind::Coff(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::X86_64 => match (reloc.kind, reloc.size, reloc.addend) { - (RelocationKind::Absolute, 64, 0) => coff::IMAGE_REL_AMD64_ADDR64, - (RelocationKind::Absolute, 32, 0) => coff::IMAGE_REL_AMD64_ADDR32, - (RelocationKind::ImageOffset, 32, 0) => coff::IMAGE_REL_AMD64_ADDR32NB, - (RelocationKind::Relative, 32, -4) => coff::IMAGE_REL_AMD64_REL32, - (RelocationKind::Relative, 32, -5) => coff::IMAGE_REL_AMD64_REL32_1, - (RelocationKind::Relative, 32, -6) => coff::IMAGE_REL_AMD64_REL32_2, - (RelocationKind::Relative, 32, -7) => coff::IMAGE_REL_AMD64_REL32_3, - (RelocationKind::Relative, 32, -8) => coff::IMAGE_REL_AMD64_REL32_4, - (RelocationKind::Relative, 32, -9) => coff::IMAGE_REL_AMD64_REL32_5, - (RelocationKind::SectionIndex, 16, 0) => coff::IMAGE_REL_AMD64_SECTION, - (RelocationKind::SectionOffset, 32, 0) => coff::IMAGE_REL_AMD64_SECREL, - (RelocationKind::SectionOffset, 7, 0) => coff::IMAGE_REL_AMD64_SECREL7, - (RelocationKind::Coff(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Arm => match (reloc.kind, reloc.size, reloc.addend) { - (RelocationKind::Absolute, 32, 0) => coff::IMAGE_REL_ARM_ADDR32, - (RelocationKind::ImageOffset, 32, 0) => coff::IMAGE_REL_ARM_ADDR32NB, - (RelocationKind::Relative, 32, -4) => coff::IMAGE_REL_ARM_REL32, - (RelocationKind::SectionIndex, 16, 0) => coff::IMAGE_REL_ARM_SECTION, - (RelocationKind::SectionOffset, 32, 0) => coff::IMAGE_REL_ARM_SECREL, - (RelocationKind::Coff(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Aarch64 => match (reloc.kind, reloc.size, reloc.addend) { - (RelocationKind::Absolute, 32, 0) => coff::IMAGE_REL_ARM64_ADDR32, - (RelocationKind::ImageOffset, 32, 0) => coff::IMAGE_REL_ARM64_ADDR32NB, - (RelocationKind::SectionIndex, 16, 0) => coff::IMAGE_REL_ARM64_SECTION, - (RelocationKind::SectionOffset, 32, 0) => coff::IMAGE_REL_ARM64_SECREL, - (RelocationKind::Absolute, 64, 0) => coff::IMAGE_REL_ARM64_ADDR64, - (RelocationKind::Relative, 32, -4) => coff::IMAGE_REL_ARM64_REL32, - (RelocationKind::Coff(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - _ => { - return Err(Error(format!( - "unimplemented architecture {:?}", - self.architecture - ))); - } - }; - let coff_relocation = coff::ImageRelocation { - virtual_address: U32Bytes::new(LE, reloc.offset as u32), - symbol_table_index: U32Bytes::new( - LE, - symbol_offsets[reloc.symbol.0].index as u32, - ), - typ: U16Bytes::new(LE, typ), - }; - buffer.write(&coff_relocation); - } - } - } - - // Write symbols. - debug_assert_eq!(symtab_offset, buffer.len()); - for (index, symbol) in self.symbols.iter().enumerate() { - let mut name = &symbol.name[..]; - let section_number = match symbol.section { - SymbolSection::None => { - debug_assert_eq!(symbol.kind, SymbolKind::File); - coff::IMAGE_SYM_DEBUG as u16 - } - SymbolSection::Undefined => coff::IMAGE_SYM_UNDEFINED as u16, - SymbolSection::Absolute => coff::IMAGE_SYM_ABSOLUTE as u16, - SymbolSection::Common => coff::IMAGE_SYM_UNDEFINED as u16, - SymbolSection::Section(id) => id.0 as u16 + 1, - }; - let typ = if symbol.kind == SymbolKind::Text { - coff::IMAGE_SYM_DTYPE_FUNCTION << coff::IMAGE_SYM_DTYPE_SHIFT - } else { - coff::IMAGE_SYM_TYPE_NULL - }; - let storage_class = match symbol.kind { - SymbolKind::File => { - // Name goes in auxiliary symbol records. - name = b".file"; - coff::IMAGE_SYM_CLASS_FILE - } - SymbolKind::Section => coff::IMAGE_SYM_CLASS_STATIC, - SymbolKind::Label => coff::IMAGE_SYM_CLASS_LABEL, - SymbolKind::Text | SymbolKind::Data | SymbolKind::Tls => { - match symbol.section { - SymbolSection::None => { - return Err(Error(format!( - "missing section for symbol `{}`", - symbol.name().unwrap_or("") - ))); - } - SymbolSection::Undefined | SymbolSection::Common => { - coff::IMAGE_SYM_CLASS_EXTERNAL - } - SymbolSection::Absolute | SymbolSection::Section(_) => { - match symbol.scope { - // TODO: does this need aux symbol records too? - _ if symbol.weak => coff::IMAGE_SYM_CLASS_WEAK_EXTERNAL, - SymbolScope::Unknown => { - return Err(Error(format!( - "unimplemented symbol `{}` scope {:?}", - symbol.name().unwrap_or(""), - symbol.scope - ))); - } - SymbolScope::Compilation => coff::IMAGE_SYM_CLASS_STATIC, - SymbolScope::Linkage | SymbolScope::Dynamic => { - coff::IMAGE_SYM_CLASS_EXTERNAL - } - } - } - } - } - SymbolKind::Unknown | SymbolKind::Null => { - return Err(Error(format!( - "unimplemented symbol `{}` kind {:?}", - symbol.name().unwrap_or(""), - symbol.kind - ))); - } - }; - let number_of_aux_symbols = symbol_offsets[index].aux_count; - let value = if symbol.section == SymbolSection::Common { - symbol.size as u32 - } else { - symbol.value as u32 - }; - let mut coff_symbol = coff::ImageSymbol { - name: [0; 8], - value: U32Bytes::new(LE, value), - section_number: U16Bytes::new(LE, section_number), - typ: U16Bytes::new(LE, typ), - storage_class, - number_of_aux_symbols, - }; - if name.len() <= 8 { - coff_symbol.name[..name.len()].copy_from_slice(name); - } else { - let str_offset = strtab.get_offset(symbol_offsets[index].str_id.unwrap()); - coff_symbol.name[4..8].copy_from_slice(&u32::to_le_bytes(str_offset as u32)); - } - buffer.write(&coff_symbol); - - // Write auxiliary symbols. - match symbol.kind { - SymbolKind::File => { - let aux_len = number_of_aux_symbols as usize * coff::IMAGE_SIZEOF_SYMBOL; - debug_assert!(aux_len >= symbol.name.len()); - let old_len = buffer.len(); - buffer.write_bytes(&symbol.name); - buffer.resize(old_len + aux_len); - } - SymbolKind::Section => { - debug_assert_eq!(number_of_aux_symbols, 1); - let section_index = symbol.section.id().unwrap().0; - let section = &self.sections[section_index]; - let aux = coff::ImageAuxSymbolSection { - length: U32Bytes::new(LE, section.size as u32), - number_of_relocations: if section.relocations.len() > 0xffff { - U16Bytes::new(LE, 0xffff) - } else { - U16Bytes::new(LE, section.relocations.len() as u16) - }, - number_of_linenumbers: U16Bytes::default(), - check_sum: U32Bytes::new(LE, checksum(section.data())), - number: U16Bytes::new( - LE, - section_offsets[section_index].associative_section, - ), - selection: section_offsets[section_index].selection, - reserved: 0, - // TODO: bigobj - high_number: U16Bytes::default(), - }; - buffer.write(&aux); - } - _ => { - debug_assert_eq!(number_of_aux_symbols, 0); - } - } - } - - // Write strtab section. - debug_assert_eq!(strtab_offset, buffer.len()); - buffer.write_bytes(&u32::to_le_bytes(strtab_len as u32)); - buffer.write_bytes(&strtab_data); - - debug_assert_eq!(offset, buffer.len()); - - Ok(()) - } -} - -// JamCRC -fn checksum(data: &[u8]) -> u32 { - let mut hasher = crc32fast::Hasher::new_with_initial(0xffff_ffff); - hasher.update(data); - !hasher.finalize() -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/write/elf/mod.rs s390-tools-2.33.1/rust-vendor/object/src/write/elf/mod.rs --- s390-tools-2.31.0/rust-vendor/object/src/write/elf/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/write/elf/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,9 +0,0 @@ -//! Support for writing ELF files. -//! -//! Provides [`Writer`] for low level writing of ELF files. -//! This is also used to provide ELF support for [`write::Object`](crate::write::Object). - -mod object; - -mod writer; -pub use writer::*; diff -Nru s390-tools-2.31.0/rust-vendor/object/src/write/elf/object.rs s390-tools-2.33.1/rust-vendor/object/src/write/elf/object.rs --- s390-tools-2.31.0/rust-vendor/object/src/write/elf/object.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/write/elf/object.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,903 +0,0 @@ -use alloc::vec::Vec; - -use crate::write::elf::writer::*; -use crate::write::string::StringId; -use crate::write::*; -use crate::AddressSize; -use crate::{elf, pod}; - -#[derive(Clone, Copy)] -struct ComdatOffsets { - offset: usize, - str_id: StringId, -} - -#[derive(Clone, Copy)] -struct SectionOffsets { - index: SectionIndex, - offset: usize, - str_id: StringId, - reloc_offset: usize, - reloc_str_id: Option, -} - -#[derive(Default, Clone, Copy)] -struct SymbolOffsets { - index: SymbolIndex, - str_id: Option, -} - -// Public methods. -impl<'a> Object<'a> { - /// Add a property with a u32 value to the ELF ".note.gnu.property" section. - /// - /// Requires `feature = "elf"`. - pub fn add_elf_gnu_property_u32(&mut self, property: u32, value: u32) { - if self.format != BinaryFormat::Elf { - return; - } - - let align = if self.elf_is_64() { 8 } else { 4 }; - let mut data = Vec::with_capacity(32); - let n_name = b"GNU\0"; - data.extend_from_slice(pod::bytes_of(&elf::NoteHeader32 { - n_namesz: U32::new(self.endian, n_name.len() as u32), - n_descsz: U32::new(self.endian, util::align(3 * 4, align) as u32), - n_type: U32::new(self.endian, elf::NT_GNU_PROPERTY_TYPE_0), - })); - data.extend_from_slice(n_name); - // This happens to already be aligned correctly. - debug_assert_eq!(util::align(data.len(), align), data.len()); - data.extend_from_slice(pod::bytes_of(&U32::new(self.endian, property))); - // Value size - data.extend_from_slice(pod::bytes_of(&U32::new(self.endian, 4))); - data.extend_from_slice(pod::bytes_of(&U32::new(self.endian, value))); - util::write_align(&mut data, align); - - let section = self.section_id(StandardSection::GnuProperty); - self.append_section_data(section, &data, align as u64); - } -} - -// Private methods. -impl<'a> Object<'a> { - pub(crate) fn elf_section_info( - &self, - section: StandardSection, - ) -> (&'static [u8], &'static [u8], SectionKind, SectionFlags) { - match section { - StandardSection::Text => (&[], &b".text"[..], SectionKind::Text, SectionFlags::None), - StandardSection::Data => (&[], &b".data"[..], SectionKind::Data, SectionFlags::None), - StandardSection::ReadOnlyData | StandardSection::ReadOnlyString => ( - &[], - &b".rodata"[..], - SectionKind::ReadOnlyData, - SectionFlags::None, - ), - StandardSection::ReadOnlyDataWithRel => ( - &[], - b".data.rel.ro", - SectionKind::ReadOnlyDataWithRel, - SectionFlags::None, - ), - StandardSection::UninitializedData => ( - &[], - &b".bss"[..], - SectionKind::UninitializedData, - SectionFlags::None, - ), - StandardSection::Tls => (&[], &b".tdata"[..], SectionKind::Tls, SectionFlags::None), - StandardSection::UninitializedTls => ( - &[], - &b".tbss"[..], - SectionKind::UninitializedTls, - SectionFlags::None, - ), - StandardSection::TlsVariables => { - // Unsupported section. - (&[], &[], SectionKind::TlsVariables, SectionFlags::None) - } - StandardSection::Common => { - // Unsupported section. - (&[], &[], SectionKind::Common, SectionFlags::None) - } - StandardSection::GnuProperty => ( - &[], - &b".note.gnu.property"[..], - SectionKind::Note, - SectionFlags::Elf { - sh_flags: u64::from(elf::SHF_ALLOC), - }, - ), - } - } - - pub(crate) fn elf_subsection_name(&self, section: &[u8], value: &[u8]) -> Vec { - let mut name = section.to_vec(); - name.push(b'.'); - name.extend_from_slice(value); - name - } - - fn elf_has_relocation_addend(&self) -> Result { - Ok(match self.architecture { - Architecture::Aarch64 => true, - Architecture::Aarch64_Ilp32 => true, - Architecture::Arm => false, - Architecture::Avr => true, - Architecture::Bpf => false, - Architecture::Csky => true, - Architecture::I386 => false, - Architecture::X86_64 => true, - Architecture::X86_64_X32 => true, - Architecture::Hexagon => true, - Architecture::LoongArch64 => true, - Architecture::Mips => false, - Architecture::Mips64 => true, - Architecture::Msp430 => true, - Architecture::PowerPc => true, - Architecture::PowerPc64 => true, - Architecture::Riscv64 => true, - Architecture::Riscv32 => true, - Architecture::S390x => true, - Architecture::Sbf => false, - Architecture::Sparc64 => true, - Architecture::Xtensa => true, - _ => { - return Err(Error(format!( - "unimplemented architecture {:?}", - self.architecture - ))); - } - }) - } - - pub(crate) fn elf_fixup_relocation(&mut self, relocation: &mut Relocation) -> Result { - // Return true if we should use a section symbol to avoid preemption. - fn want_section_symbol(relocation: &Relocation, symbol: &Symbol) -> bool { - if symbol.scope != SymbolScope::Dynamic { - // Only dynamic symbols can be preemptible. - return false; - } - match symbol.kind { - SymbolKind::Text | SymbolKind::Data => {} - _ => return false, - } - match relocation.kind { - // Anything using GOT or PLT is preemptible. - // We also require that `Other` relocations must already be correct. - RelocationKind::Got - | RelocationKind::GotRelative - | RelocationKind::GotBaseRelative - | RelocationKind::PltRelative - | RelocationKind::Elf(_) => return false, - // Absolute relocations are preemptible for non-local data. - // TODO: not sure if this rule is exactly correct - // This rule was added to handle global data references in debuginfo. - // Maybe this should be a new relocation kind so that the caller can decide. - RelocationKind::Absolute => { - if symbol.kind == SymbolKind::Data { - return false; - } - } - _ => {} - } - true - } - - // Use section symbols for relocations where required to avoid preemption. - // Otherwise, the linker will fail with: - // relocation R_X86_64_PC32 against symbol `SomeSymbolName' can not be used when - // making a shared object; recompile with -fPIC - let symbol = &self.symbols[relocation.symbol.0]; - if want_section_symbol(relocation, symbol) { - if let Some(section) = symbol.section.id() { - relocation.addend += symbol.value as i64; - relocation.symbol = self.section_symbol(section); - } - } - - // Determine whether the addend is stored in the relocation or the data. - if self.elf_has_relocation_addend()? { - Ok(0) - } else { - let constant = relocation.addend; - relocation.addend = 0; - Ok(constant) - } - } - - pub(crate) fn elf_is_64(&self) -> bool { - match self.architecture.address_size().unwrap() { - AddressSize::U8 | AddressSize::U16 | AddressSize::U32 => false, - AddressSize::U64 => true, - } - } - - pub(crate) fn elf_write(&self, buffer: &mut dyn WritableBuffer) -> Result<()> { - // Create reloc section header names so we can reference them. - let is_rela = self.elf_has_relocation_addend()?; - let reloc_names: Vec<_> = self - .sections - .iter() - .map(|section| { - let mut reloc_name = Vec::with_capacity( - if is_rela { ".rela".len() } else { ".rel".len() } + section.name.len(), - ); - if !section.relocations.is_empty() { - reloc_name.extend_from_slice(if is_rela { - &b".rela"[..] - } else { - &b".rel"[..] - }); - reloc_name.extend_from_slice(§ion.name); - } - reloc_name - }) - .collect(); - - // Start calculating offsets of everything. - let mut writer = Writer::new(self.endian, self.elf_is_64(), buffer); - writer.reserve_file_header(); - - // Calculate size of section data. - let mut comdat_offsets = Vec::with_capacity(self.comdats.len()); - for comdat in &self.comdats { - if comdat.kind != ComdatKind::Any { - return Err(Error(format!( - "unsupported COMDAT symbol `{}` kind {:?}", - self.symbols[comdat.symbol.0].name().unwrap_or(""), - comdat.kind - ))); - } - - writer.reserve_section_index(); - let offset = writer.reserve_comdat(comdat.sections.len()); - let str_id = writer.add_section_name(b".group"); - comdat_offsets.push(ComdatOffsets { offset, str_id }); - } - let mut section_offsets = Vec::with_capacity(self.sections.len()); - for (section, reloc_name) in self.sections.iter().zip(reloc_names.iter()) { - let index = writer.reserve_section_index(); - let offset = writer.reserve(section.data.len(), section.align as usize); - let str_id = writer.add_section_name(§ion.name); - let mut reloc_str_id = None; - if !section.relocations.is_empty() { - writer.reserve_section_index(); - reloc_str_id = Some(writer.add_section_name(reloc_name)); - } - section_offsets.push(SectionOffsets { - index, - offset, - str_id, - // Relocation data is reserved later. - reloc_offset: 0, - reloc_str_id, - }); - } - - // Calculate index of symbols and add symbol strings to strtab. - let mut symbol_offsets = vec![SymbolOffsets::default(); self.symbols.len()]; - writer.reserve_null_symbol_index(); - // Local symbols must come before global. - for (index, symbol) in self.symbols.iter().enumerate() { - if symbol.is_local() { - let section_index = symbol.section.id().map(|s| section_offsets[s.0].index); - symbol_offsets[index].index = writer.reserve_symbol_index(section_index); - } - } - let symtab_num_local = writer.symbol_count(); - for (index, symbol) in self.symbols.iter().enumerate() { - if !symbol.is_local() { - let section_index = symbol.section.id().map(|s| section_offsets[s.0].index); - symbol_offsets[index].index = writer.reserve_symbol_index(section_index); - } - } - for (index, symbol) in self.symbols.iter().enumerate() { - if symbol.kind != SymbolKind::Section && !symbol.name.is_empty() { - symbol_offsets[index].str_id = Some(writer.add_string(&symbol.name)); - } - } - - // Calculate size of symbols. - writer.reserve_symtab_section_index(); - writer.reserve_symtab(); - if writer.symtab_shndx_needed() { - writer.reserve_symtab_shndx_section_index(); - } - writer.reserve_symtab_shndx(); - writer.reserve_strtab_section_index(); - writer.reserve_strtab(); - - // Calculate size of relocations. - for (index, section) in self.sections.iter().enumerate() { - let count = section.relocations.len(); - if count != 0 { - section_offsets[index].reloc_offset = writer.reserve_relocations(count, is_rela); - } - } - - // Calculate size of section headers. - writer.reserve_shstrtab_section_index(); - writer.reserve_shstrtab(); - writer.reserve_section_headers(); - - // Start writing. - let e_type = elf::ET_REL; - let e_machine = match self.architecture { - Architecture::Aarch64 => elf::EM_AARCH64, - Architecture::Aarch64_Ilp32 => elf::EM_AARCH64, - Architecture::Arm => elf::EM_ARM, - Architecture::Avr => elf::EM_AVR, - Architecture::Bpf => elf::EM_BPF, - Architecture::Csky => elf::EM_CSKY, - Architecture::I386 => elf::EM_386, - Architecture::X86_64 => elf::EM_X86_64, - Architecture::X86_64_X32 => elf::EM_X86_64, - Architecture::Hexagon => elf::EM_HEXAGON, - Architecture::LoongArch64 => elf::EM_LOONGARCH, - Architecture::Mips => elf::EM_MIPS, - Architecture::Mips64 => elf::EM_MIPS, - Architecture::Msp430 => elf::EM_MSP430, - Architecture::PowerPc => elf::EM_PPC, - Architecture::PowerPc64 => elf::EM_PPC64, - Architecture::Riscv32 => elf::EM_RISCV, - Architecture::Riscv64 => elf::EM_RISCV, - Architecture::S390x => elf::EM_S390, - Architecture::Sbf => elf::EM_SBF, - Architecture::Sparc64 => elf::EM_SPARCV9, - Architecture::Xtensa => elf::EM_XTENSA, - _ => { - return Err(Error(format!( - "unimplemented architecture {:?}", - self.architecture - ))); - } - }; - let (os_abi, abi_version, e_flags) = if let FileFlags::Elf { - os_abi, - abi_version, - e_flags, - } = self.flags - { - (os_abi, abi_version, e_flags) - } else { - (elf::ELFOSABI_NONE, 0, 0) - }; - writer.write_file_header(&FileHeader { - os_abi, - abi_version, - e_type, - e_machine, - e_entry: 0, - e_flags, - })?; - - // Write section data. - for comdat in &self.comdats { - writer.write_comdat_header(); - for section in &comdat.sections { - writer.write_comdat_entry(section_offsets[section.0].index); - } - } - for (index, section) in self.sections.iter().enumerate() { - writer.write_align(section.align as usize); - debug_assert_eq!(section_offsets[index].offset, writer.len()); - writer.write(§ion.data); - } - - // Write symbols. - writer.write_null_symbol(); - let mut write_symbol = |index: usize, symbol: &Symbol| -> Result<()> { - let st_info = if let SymbolFlags::Elf { st_info, .. } = symbol.flags { - st_info - } else { - let st_type = match symbol.kind { - SymbolKind::Null => elf::STT_NOTYPE, - SymbolKind::Text => { - if symbol.is_undefined() { - elf::STT_NOTYPE - } else { - elf::STT_FUNC - } - } - SymbolKind::Data => { - if symbol.is_undefined() { - elf::STT_NOTYPE - } else if symbol.is_common() { - elf::STT_COMMON - } else { - elf::STT_OBJECT - } - } - SymbolKind::Section => elf::STT_SECTION, - SymbolKind::File => elf::STT_FILE, - SymbolKind::Tls => elf::STT_TLS, - SymbolKind::Label => elf::STT_NOTYPE, - SymbolKind::Unknown => { - if symbol.is_undefined() { - elf::STT_NOTYPE - } else { - return Err(Error(format!( - "unimplemented symbol `{}` kind {:?}", - symbol.name().unwrap_or(""), - symbol.kind - ))); - } - } - }; - let st_bind = if symbol.weak { - elf::STB_WEAK - } else if symbol.is_undefined() { - elf::STB_GLOBAL - } else if symbol.is_local() { - elf::STB_LOCAL - } else { - elf::STB_GLOBAL - }; - (st_bind << 4) + st_type - }; - let st_other = if let SymbolFlags::Elf { st_other, .. } = symbol.flags { - st_other - } else if symbol.scope == SymbolScope::Linkage { - elf::STV_HIDDEN - } else { - elf::STV_DEFAULT - }; - let (st_shndx, section) = match symbol.section { - SymbolSection::None => { - debug_assert_eq!(symbol.kind, SymbolKind::File); - (elf::SHN_ABS, None) - } - SymbolSection::Undefined => (elf::SHN_UNDEF, None), - SymbolSection::Absolute => (elf::SHN_ABS, None), - SymbolSection::Common => (elf::SHN_COMMON, None), - SymbolSection::Section(id) => (0, Some(section_offsets[id.0].index)), - }; - writer.write_symbol(&Sym { - name: symbol_offsets[index].str_id, - section, - st_info, - st_other, - st_shndx, - st_value: symbol.value, - st_size: symbol.size, - }); - Ok(()) - }; - for (index, symbol) in self.symbols.iter().enumerate() { - if symbol.is_local() { - write_symbol(index, symbol)?; - } - } - for (index, symbol) in self.symbols.iter().enumerate() { - if !symbol.is_local() { - write_symbol(index, symbol)?; - } - } - writer.write_symtab_shndx(); - writer.write_strtab(); - - // Write relocations. - for (index, section) in self.sections.iter().enumerate() { - if !section.relocations.is_empty() { - writer.write_align_relocation(); - debug_assert_eq!(section_offsets[index].reloc_offset, writer.len()); - for reloc in §ion.relocations { - let r_type = match self.architecture { - Architecture::Aarch64 => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, RelocationEncoding::Generic, 64) => { - elf::R_AARCH64_ABS64 - } - (RelocationKind::Absolute, RelocationEncoding::Generic, 32) => { - elf::R_AARCH64_ABS32 - } - (RelocationKind::Absolute, RelocationEncoding::Generic, 16) => { - elf::R_AARCH64_ABS16 - } - (RelocationKind::Relative, RelocationEncoding::Generic, 64) => { - elf::R_AARCH64_PREL64 - } - (RelocationKind::Relative, RelocationEncoding::Generic, 32) => { - elf::R_AARCH64_PREL32 - } - (RelocationKind::Relative, RelocationEncoding::Generic, 16) => { - elf::R_AARCH64_PREL16 - } - (RelocationKind::Relative, RelocationEncoding::AArch64Call, 26) - | (RelocationKind::PltRelative, RelocationEncoding::AArch64Call, 26) => { - elf::R_AARCH64_CALL26 - } - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Aarch64_Ilp32 => { - match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, RelocationEncoding::Generic, 32) => { - elf::R_AARCH64_P32_ABS32 - } - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!( - "unimplemented relocation {:?}", - reloc - ))); - } - } - } - Architecture::Arm => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 32) => elf::R_ARM_ABS32, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Avr => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 32) => elf::R_AVR_32, - (RelocationKind::Absolute, _, 16) => elf::R_AVR_16, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Bpf => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 64) => elf::R_BPF_64_64, - (RelocationKind::Absolute, _, 32) => elf::R_BPF_64_32, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Csky => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 32) => elf::R_CKCORE_ADDR32, - (RelocationKind::Relative, RelocationEncoding::Generic, 32) => { - elf::R_CKCORE_PCREL32 - } - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::I386 => match (reloc.kind, reloc.size) { - (RelocationKind::Absolute, 32) => elf::R_386_32, - (RelocationKind::Relative, 32) => elf::R_386_PC32, - (RelocationKind::Got, 32) => elf::R_386_GOT32, - (RelocationKind::PltRelative, 32) => elf::R_386_PLT32, - (RelocationKind::GotBaseOffset, 32) => elf::R_386_GOTOFF, - (RelocationKind::GotBaseRelative, 32) => elf::R_386_GOTPC, - (RelocationKind::Absolute, 16) => elf::R_386_16, - (RelocationKind::Relative, 16) => elf::R_386_PC16, - (RelocationKind::Absolute, 8) => elf::R_386_8, - (RelocationKind::Relative, 8) => elf::R_386_PC8, - (RelocationKind::Elf(x), _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::X86_64 | Architecture::X86_64_X32 => { - match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, RelocationEncoding::Generic, 64) => { - elf::R_X86_64_64 - } - (RelocationKind::Relative, _, 32) => elf::R_X86_64_PC32, - (RelocationKind::Got, _, 32) => elf::R_X86_64_GOT32, - (RelocationKind::PltRelative, _, 32) => elf::R_X86_64_PLT32, - (RelocationKind::GotRelative, _, 32) => elf::R_X86_64_GOTPCREL, - (RelocationKind::Absolute, RelocationEncoding::Generic, 32) => { - elf::R_X86_64_32 - } - (RelocationKind::Absolute, RelocationEncoding::X86Signed, 32) => { - elf::R_X86_64_32S - } - (RelocationKind::Absolute, _, 16) => elf::R_X86_64_16, - (RelocationKind::Relative, _, 16) => elf::R_X86_64_PC16, - (RelocationKind::Absolute, _, 8) => elf::R_X86_64_8, - (RelocationKind::Relative, _, 8) => elf::R_X86_64_PC8, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!( - "unimplemented relocation {:?}", - reloc - ))); - } - } - } - Architecture::Hexagon => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 32) => elf::R_HEX_32, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::LoongArch64 => match (reloc.kind, reloc.encoding, reloc.size) - { - (RelocationKind::Absolute, _, 32) => elf::R_LARCH_32, - (RelocationKind::Absolute, _, 64) => elf::R_LARCH_64, - (RelocationKind::Relative, _, 32) => elf::R_LARCH_32_PCREL, - (RelocationKind::Relative, RelocationEncoding::LoongArchBranch, 16) - | ( - RelocationKind::PltRelative, - RelocationEncoding::LoongArchBranch, - 16, - ) => elf::R_LARCH_B16, - (RelocationKind::Relative, RelocationEncoding::LoongArchBranch, 21) - | ( - RelocationKind::PltRelative, - RelocationEncoding::LoongArchBranch, - 21, - ) => elf::R_LARCH_B21, - (RelocationKind::Relative, RelocationEncoding::LoongArchBranch, 26) - | ( - RelocationKind::PltRelative, - RelocationEncoding::LoongArchBranch, - 26, - ) => elf::R_LARCH_B26, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Mips | Architecture::Mips64 => { - match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 16) => elf::R_MIPS_16, - (RelocationKind::Absolute, _, 32) => elf::R_MIPS_32, - (RelocationKind::Absolute, _, 64) => elf::R_MIPS_64, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!( - "unimplemented relocation {:?}", - reloc - ))); - } - } - } - Architecture::Msp430 => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 32) => elf::R_MSP430_32, - (RelocationKind::Absolute, _, 16) => elf::R_MSP430_16_BYTE, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::PowerPc => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 32) => elf::R_PPC_ADDR32, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::PowerPc64 => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 32) => elf::R_PPC64_ADDR32, - (RelocationKind::Absolute, _, 64) => elf::R_PPC64_ADDR64, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Riscv32 | Architecture::Riscv64 => { - match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 32) => elf::R_RISCV_32, - (RelocationKind::Absolute, _, 64) => elf::R_RISCV_64, - (RelocationKind::Relative, RelocationEncoding::Generic, 32) => { - elf::R_RISCV_32_PCREL - } - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!( - "unimplemented relocation {:?}", - reloc - ))); - } - } - } - Architecture::S390x => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, RelocationEncoding::Generic, 8) => { - elf::R_390_8 - } - (RelocationKind::Absolute, RelocationEncoding::Generic, 16) => { - elf::R_390_16 - } - (RelocationKind::Absolute, RelocationEncoding::Generic, 32) => { - elf::R_390_32 - } - (RelocationKind::Absolute, RelocationEncoding::Generic, 64) => { - elf::R_390_64 - } - (RelocationKind::Relative, RelocationEncoding::Generic, 16) => { - elf::R_390_PC16 - } - (RelocationKind::Relative, RelocationEncoding::Generic, 32) => { - elf::R_390_PC32 - } - (RelocationKind::Relative, RelocationEncoding::Generic, 64) => { - elf::R_390_PC64 - } - (RelocationKind::Relative, RelocationEncoding::S390xDbl, 16) => { - elf::R_390_PC16DBL - } - (RelocationKind::Relative, RelocationEncoding::S390xDbl, 32) => { - elf::R_390_PC32DBL - } - (RelocationKind::PltRelative, RelocationEncoding::S390xDbl, 16) => { - elf::R_390_PLT16DBL - } - (RelocationKind::PltRelative, RelocationEncoding::S390xDbl, 32) => { - elf::R_390_PLT32DBL - } - (RelocationKind::Got, RelocationEncoding::Generic, 16) => { - elf::R_390_GOT16 - } - (RelocationKind::Got, RelocationEncoding::Generic, 32) => { - elf::R_390_GOT32 - } - (RelocationKind::Got, RelocationEncoding::Generic, 64) => { - elf::R_390_GOT64 - } - (RelocationKind::GotRelative, RelocationEncoding::S390xDbl, 32) => { - elf::R_390_GOTENT - } - (RelocationKind::GotBaseOffset, RelocationEncoding::Generic, 16) => { - elf::R_390_GOTOFF16 - } - (RelocationKind::GotBaseOffset, RelocationEncoding::Generic, 32) => { - elf::R_390_GOTOFF32 - } - (RelocationKind::GotBaseOffset, RelocationEncoding::Generic, 64) => { - elf::R_390_GOTOFF64 - } - (RelocationKind::GotBaseRelative, RelocationEncoding::Generic, 64) => { - elf::R_390_GOTPC - } - (RelocationKind::GotBaseRelative, RelocationEncoding::S390xDbl, 32) => { - elf::R_390_GOTPCDBL - } - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Sbf => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 64) => elf::R_SBF_64_64, - (RelocationKind::Absolute, _, 32) => elf::R_SBF_64_32, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Sparc64 => match (reloc.kind, reloc.encoding, reloc.size) { - // TODO: use R_SPARC_32/R_SPARC_64 if aligned. - (RelocationKind::Absolute, _, 32) => elf::R_SPARC_UA32, - (RelocationKind::Absolute, _, 64) => elf::R_SPARC_UA64, - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Xtensa => match (reloc.kind, reloc.encoding, reloc.size) { - (RelocationKind::Absolute, _, 32) => elf::R_XTENSA_32, - (RelocationKind::Relative, RelocationEncoding::Generic, 32) => { - elf::R_XTENSA_32_PCREL - } - (RelocationKind::Elf(x), _, _) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - _ => { - if let RelocationKind::Elf(x) = reloc.kind { - x - } else { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - } - }; - let r_sym = symbol_offsets[reloc.symbol.0].index.0; - writer.write_relocation( - is_rela, - &Rel { - r_offset: reloc.offset, - r_sym, - r_type, - r_addend: reloc.addend, - }, - ); - } - } - } - - writer.write_shstrtab(); - - // Write section headers. - writer.write_null_section_header(); - - let symtab_index = writer.symtab_index(); - for (comdat, comdat_offset) in self.comdats.iter().zip(comdat_offsets.iter()) { - writer.write_comdat_section_header( - comdat_offset.str_id, - symtab_index, - symbol_offsets[comdat.symbol.0].index, - comdat_offset.offset, - comdat.sections.len(), - ); - } - for (index, section) in self.sections.iter().enumerate() { - let sh_type = match section.kind { - SectionKind::UninitializedData | SectionKind::UninitializedTls => elf::SHT_NOBITS, - SectionKind::Note => elf::SHT_NOTE, - SectionKind::Elf(sh_type) => sh_type, - _ => elf::SHT_PROGBITS, - }; - let sh_flags = if let SectionFlags::Elf { sh_flags } = section.flags { - sh_flags - } else { - match section.kind { - SectionKind::Text => elf::SHF_ALLOC | elf::SHF_EXECINSTR, - SectionKind::Data | SectionKind::ReadOnlyDataWithRel => { - elf::SHF_ALLOC | elf::SHF_WRITE - } - SectionKind::Tls => elf::SHF_ALLOC | elf::SHF_WRITE | elf::SHF_TLS, - SectionKind::UninitializedData => elf::SHF_ALLOC | elf::SHF_WRITE, - SectionKind::UninitializedTls => elf::SHF_ALLOC | elf::SHF_WRITE | elf::SHF_TLS, - SectionKind::ReadOnlyData => elf::SHF_ALLOC, - SectionKind::ReadOnlyString => { - elf::SHF_ALLOC | elf::SHF_STRINGS | elf::SHF_MERGE - } - SectionKind::OtherString => elf::SHF_STRINGS | elf::SHF_MERGE, - SectionKind::Other - | SectionKind::Debug - | SectionKind::Metadata - | SectionKind::Linker - | SectionKind::Note - | SectionKind::Elf(_) => 0, - SectionKind::Unknown | SectionKind::Common | SectionKind::TlsVariables => { - return Err(Error(format!( - "unimplemented section `{}` kind {:?}", - section.name().unwrap_or(""), - section.kind - ))); - } - } - .into() - }; - // TODO: not sure if this is correct, maybe user should determine this - let sh_entsize = match section.kind { - SectionKind::ReadOnlyString | SectionKind::OtherString => 1, - _ => 0, - }; - writer.write_section_header(&SectionHeader { - name: Some(section_offsets[index].str_id), - sh_type, - sh_flags, - sh_addr: 0, - sh_offset: section_offsets[index].offset as u64, - sh_size: section.size, - sh_link: 0, - sh_info: 0, - sh_addralign: section.align, - sh_entsize, - }); - - if !section.relocations.is_empty() { - writer.write_relocation_section_header( - section_offsets[index].reloc_str_id.unwrap(), - section_offsets[index].index, - symtab_index, - section_offsets[index].reloc_offset, - section.relocations.len(), - is_rela, - ); - } - } - - writer.write_symtab_section_header(symtab_num_local); - writer.write_symtab_shndx_section_header(); - writer.write_strtab_section_header(); - writer.write_shstrtab_section_header(); - - debug_assert_eq!(writer.reserved_len(), writer.len()); - - Ok(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/write/elf/writer.rs s390-tools-2.33.1/rust-vendor/object/src/write/elf/writer.rs --- s390-tools-2.31.0/rust-vendor/object/src/write/elf/writer.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/write/elf/writer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2143 +0,0 @@ -//! Helper for writing ELF files. -use alloc::string::String; -use alloc::vec::Vec; -use core::mem; - -use crate::elf; -use crate::endian::*; -use crate::pod; -use crate::write::string::{StringId, StringTable}; -use crate::write::util; -use crate::write::{Error, Result, WritableBuffer}; - -/// The index of an ELF section. -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct SectionIndex(pub u32); - -/// The index of an ELF symbol. -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct SymbolIndex(pub u32); - -/// A helper for writing ELF files. -/// -/// Writing uses a two phase approach. The first phase builds up all of the information -/// that may need to be known ahead of time: -/// - build string tables -/// - reserve section indices -/// - reserve symbol indices -/// - reserve file ranges for headers and sections -/// -/// Some of the information has ordering requirements. For example, strings must be added -/// to string tables before reserving the file range for the string table. Symbol indices -/// must be reserved after reserving the section indices they reference. There are debug -/// asserts to check some of these requirements. -/// -/// The second phase writes everything out in order. Thus the caller must ensure writing -/// is in the same order that file ranges were reserved. There are debug asserts to assist -/// with checking this. -#[allow(missing_debug_implementations)] -pub struct Writer<'a> { - endian: Endianness, - is_64: bool, - is_mips64el: bool, - elf_align: usize, - - buffer: &'a mut dyn WritableBuffer, - len: usize, - - segment_offset: usize, - segment_num: u32, - - section_offset: usize, - section_num: u32, - - shstrtab: StringTable<'a>, - shstrtab_str_id: Option, - shstrtab_index: SectionIndex, - shstrtab_offset: usize, - shstrtab_data: Vec, - - need_strtab: bool, - strtab: StringTable<'a>, - strtab_str_id: Option, - strtab_index: SectionIndex, - strtab_offset: usize, - strtab_data: Vec, - - symtab_str_id: Option, - symtab_index: SectionIndex, - symtab_offset: usize, - symtab_num: u32, - - need_symtab_shndx: bool, - symtab_shndx_str_id: Option, - symtab_shndx_offset: usize, - symtab_shndx_data: Vec, - - need_dynstr: bool, - dynstr: StringTable<'a>, - dynstr_str_id: Option, - dynstr_index: SectionIndex, - dynstr_offset: usize, - dynstr_data: Vec, - - dynsym_str_id: Option, - dynsym_index: SectionIndex, - dynsym_offset: usize, - dynsym_num: u32, - - dynamic_str_id: Option, - dynamic_offset: usize, - dynamic_num: usize, - - hash_str_id: Option, - hash_offset: usize, - hash_size: usize, - - gnu_hash_str_id: Option, - gnu_hash_offset: usize, - gnu_hash_size: usize, - - gnu_versym_str_id: Option, - gnu_versym_offset: usize, - - gnu_verdef_str_id: Option, - gnu_verdef_offset: usize, - gnu_verdef_size: usize, - gnu_verdef_count: u16, - gnu_verdef_remaining: u16, - gnu_verdaux_remaining: u16, - - gnu_verneed_str_id: Option, - gnu_verneed_offset: usize, - gnu_verneed_size: usize, - gnu_verneed_count: u16, - gnu_verneed_remaining: u16, - gnu_vernaux_remaining: u16, - - gnu_attributes_str_id: Option, - gnu_attributes_offset: usize, - gnu_attributes_size: usize, -} - -impl<'a> Writer<'a> { - /// Create a new `Writer` for the given endianness and ELF class. - pub fn new(endian: Endianness, is_64: bool, buffer: &'a mut dyn WritableBuffer) -> Self { - let elf_align = if is_64 { 8 } else { 4 }; - Writer { - endian, - is_64, - // Determined later. - is_mips64el: false, - elf_align, - - buffer, - len: 0, - - segment_offset: 0, - segment_num: 0, - - section_offset: 0, - section_num: 0, - - shstrtab: StringTable::default(), - shstrtab_str_id: None, - shstrtab_index: SectionIndex(0), - shstrtab_offset: 0, - shstrtab_data: Vec::new(), - - need_strtab: false, - strtab: StringTable::default(), - strtab_str_id: None, - strtab_index: SectionIndex(0), - strtab_offset: 0, - strtab_data: Vec::new(), - - symtab_str_id: None, - symtab_index: SectionIndex(0), - symtab_offset: 0, - symtab_num: 0, - - need_symtab_shndx: false, - symtab_shndx_str_id: None, - symtab_shndx_offset: 0, - symtab_shndx_data: Vec::new(), - - need_dynstr: false, - dynstr: StringTable::default(), - dynstr_str_id: None, - dynstr_index: SectionIndex(0), - dynstr_offset: 0, - dynstr_data: Vec::new(), - - dynsym_str_id: None, - dynsym_index: SectionIndex(0), - dynsym_offset: 0, - dynsym_num: 0, - - dynamic_str_id: None, - dynamic_offset: 0, - dynamic_num: 0, - - hash_str_id: None, - hash_offset: 0, - hash_size: 0, - - gnu_hash_str_id: None, - gnu_hash_offset: 0, - gnu_hash_size: 0, - - gnu_versym_str_id: None, - gnu_versym_offset: 0, - - gnu_verdef_str_id: None, - gnu_verdef_offset: 0, - gnu_verdef_size: 0, - gnu_verdef_count: 0, - gnu_verdef_remaining: 0, - gnu_verdaux_remaining: 0, - - gnu_verneed_str_id: None, - gnu_verneed_offset: 0, - gnu_verneed_size: 0, - gnu_verneed_count: 0, - gnu_verneed_remaining: 0, - gnu_vernaux_remaining: 0, - - gnu_attributes_str_id: None, - gnu_attributes_offset: 0, - gnu_attributes_size: 0, - } - } - - /// Return the current file length that has been reserved. - pub fn reserved_len(&self) -> usize { - self.len - } - - /// Return the current file length that has been written. - #[allow(clippy::len_without_is_empty)] - pub fn len(&self) -> usize { - self.buffer.len() - } - - /// Reserve a file range with the given size and starting alignment. - /// - /// Returns the aligned offset of the start of the range. - pub fn reserve(&mut self, len: usize, align_start: usize) -> usize { - if align_start > 1 { - self.len = util::align(self.len, align_start); - } - let offset = self.len; - self.len += len; - offset - } - - /// Write alignment padding bytes. - pub fn write_align(&mut self, align_start: usize) { - if align_start > 1 { - util::write_align(self.buffer, align_start); - } - } - - /// Write data. - /// - /// This is typically used to write section data. - pub fn write(&mut self, data: &[u8]) { - self.buffer.write_bytes(data); - } - - /// Reserve the file range up to the given file offset. - pub fn reserve_until(&mut self, offset: usize) { - debug_assert!(self.len <= offset); - self.len = offset; - } - - /// Write padding up to the given file offset. - pub fn pad_until(&mut self, offset: usize) { - debug_assert!(self.buffer.len() <= offset); - self.buffer.resize(offset); - } - - fn file_header_size(&self) -> usize { - if self.is_64 { - mem::size_of::>() - } else { - mem::size_of::>() - } - } - - /// Reserve the range for the file header. - /// - /// This must be at the start of the file. - pub fn reserve_file_header(&mut self) { - debug_assert_eq!(self.len, 0); - self.reserve(self.file_header_size(), 1); - } - - /// Write the file header. - /// - /// This must be at the start of the file. - /// - /// Fields that can be derived from known information are automatically set by this function. - pub fn write_file_header(&mut self, header: &FileHeader) -> Result<()> { - debug_assert_eq!(self.buffer.len(), 0); - - self.is_mips64el = - self.is_64 && self.endian.is_little_endian() && header.e_machine == elf::EM_MIPS; - - // Start writing. - self.buffer - .reserve(self.len) - .map_err(|_| Error(String::from("Cannot allocate buffer")))?; - - // Write file header. - let e_ident = elf::Ident { - magic: elf::ELFMAG, - class: if self.is_64 { - elf::ELFCLASS64 - } else { - elf::ELFCLASS32 - }, - data: if self.endian.is_little_endian() { - elf::ELFDATA2LSB - } else { - elf::ELFDATA2MSB - }, - version: elf::EV_CURRENT, - os_abi: header.os_abi, - abi_version: header.abi_version, - padding: [0; 7], - }; - - let e_ehsize = self.file_header_size() as u16; - - let e_phoff = self.segment_offset as u64; - let e_phentsize = if self.segment_num == 0 { - 0 - } else { - self.program_header_size() as u16 - }; - // TODO: overflow - let e_phnum = self.segment_num as u16; - - let e_shoff = self.section_offset as u64; - let e_shentsize = if self.section_num == 0 { - 0 - } else { - self.section_header_size() as u16 - }; - let e_shnum = if self.section_num >= elf::SHN_LORESERVE.into() { - 0 - } else { - self.section_num as u16 - }; - let e_shstrndx = if self.shstrtab_index.0 >= elf::SHN_LORESERVE.into() { - elf::SHN_XINDEX - } else { - self.shstrtab_index.0 as u16 - }; - - let endian = self.endian; - if self.is_64 { - let file = elf::FileHeader64 { - e_ident, - e_type: U16::new(endian, header.e_type), - e_machine: U16::new(endian, header.e_machine), - e_version: U32::new(endian, elf::EV_CURRENT.into()), - e_entry: U64::new(endian, header.e_entry), - e_phoff: U64::new(endian, e_phoff), - e_shoff: U64::new(endian, e_shoff), - e_flags: U32::new(endian, header.e_flags), - e_ehsize: U16::new(endian, e_ehsize), - e_phentsize: U16::new(endian, e_phentsize), - e_phnum: U16::new(endian, e_phnum), - e_shentsize: U16::new(endian, e_shentsize), - e_shnum: U16::new(endian, e_shnum), - e_shstrndx: U16::new(endian, e_shstrndx), - }; - self.buffer.write(&file) - } else { - let file = elf::FileHeader32 { - e_ident, - e_type: U16::new(endian, header.e_type), - e_machine: U16::new(endian, header.e_machine), - e_version: U32::new(endian, elf::EV_CURRENT.into()), - e_entry: U32::new(endian, header.e_entry as u32), - e_phoff: U32::new(endian, e_phoff as u32), - e_shoff: U32::new(endian, e_shoff as u32), - e_flags: U32::new(endian, header.e_flags), - e_ehsize: U16::new(endian, e_ehsize), - e_phentsize: U16::new(endian, e_phentsize), - e_phnum: U16::new(endian, e_phnum), - e_shentsize: U16::new(endian, e_shentsize), - e_shnum: U16::new(endian, e_shnum), - e_shstrndx: U16::new(endian, e_shstrndx), - }; - self.buffer.write(&file); - } - - Ok(()) - } - - fn program_header_size(&self) -> usize { - if self.is_64 { - mem::size_of::>() - } else { - mem::size_of::>() - } - } - - /// Reserve the range for the program headers. - pub fn reserve_program_headers(&mut self, num: u32) { - debug_assert_eq!(self.segment_offset, 0); - if num == 0 { - return; - } - self.segment_num = num; - self.segment_offset = - self.reserve(num as usize * self.program_header_size(), self.elf_align); - } - - /// Write alignment padding bytes prior to the program headers. - pub fn write_align_program_headers(&mut self) { - if self.segment_offset == 0 { - return; - } - util::write_align(self.buffer, self.elf_align); - debug_assert_eq!(self.segment_offset, self.buffer.len()); - } - - /// Write a program header. - pub fn write_program_header(&mut self, header: &ProgramHeader) { - let endian = self.endian; - if self.is_64 { - let header = elf::ProgramHeader64 { - p_type: U32::new(endian, header.p_type), - p_flags: U32::new(endian, header.p_flags), - p_offset: U64::new(endian, header.p_offset), - p_vaddr: U64::new(endian, header.p_vaddr), - p_paddr: U64::new(endian, header.p_paddr), - p_filesz: U64::new(endian, header.p_filesz), - p_memsz: U64::new(endian, header.p_memsz), - p_align: U64::new(endian, header.p_align), - }; - self.buffer.write(&header); - } else { - let header = elf::ProgramHeader32 { - p_type: U32::new(endian, header.p_type), - p_offset: U32::new(endian, header.p_offset as u32), - p_vaddr: U32::new(endian, header.p_vaddr as u32), - p_paddr: U32::new(endian, header.p_paddr as u32), - p_filesz: U32::new(endian, header.p_filesz as u32), - p_memsz: U32::new(endian, header.p_memsz as u32), - p_flags: U32::new(endian, header.p_flags), - p_align: U32::new(endian, header.p_align as u32), - }; - self.buffer.write(&header); - } - } - - /// Reserve the section index for the null section header. - /// - /// The null section header is usually automatically reserved, - /// but this can be used to force an empty section table. - /// - /// This must be called before [`Self::reserve_section_headers`]. - pub fn reserve_null_section_index(&mut self) -> SectionIndex { - debug_assert_eq!(self.section_num, 0); - if self.section_num == 0 { - self.section_num = 1; - } - SectionIndex(0) - } - - /// Reserve a section table index. - /// - /// Automatically also reserves the null section header if required. - /// - /// This must be called before [`Self::reserve_section_headers`]. - pub fn reserve_section_index(&mut self) -> SectionIndex { - debug_assert_eq!(self.section_offset, 0); - if self.section_num == 0 { - self.section_num = 1; - } - let index = self.section_num; - self.section_num += 1; - SectionIndex(index) - } - - fn section_header_size(&self) -> usize { - if self.is_64 { - mem::size_of::>() - } else { - mem::size_of::>() - } - } - - /// Reserve the range for the section headers. - /// - /// This function does nothing if no sections were reserved. - /// This must be called after [`Self::reserve_section_index`] - /// and other functions that reserve section indices. - pub fn reserve_section_headers(&mut self) { - debug_assert_eq!(self.section_offset, 0); - if self.section_num == 0 { - return; - } - self.section_offset = self.reserve( - self.section_num as usize * self.section_header_size(), - self.elf_align, - ); - } - - /// Write the null section header. - /// - /// This must be the first section header that is written. - /// This function does nothing if no sections were reserved. - pub fn write_null_section_header(&mut self) { - if self.section_num == 0 { - return; - } - util::write_align(self.buffer, self.elf_align); - debug_assert_eq!(self.section_offset, self.buffer.len()); - self.write_section_header(&SectionHeader { - name: None, - sh_type: 0, - sh_flags: 0, - sh_addr: 0, - sh_offset: 0, - sh_size: if self.section_num >= elf::SHN_LORESERVE.into() { - self.section_num.into() - } else { - 0 - }, - sh_link: if self.shstrtab_index.0 >= elf::SHN_LORESERVE.into() { - self.shstrtab_index.0 - } else { - 0 - }, - // TODO: e_phnum overflow - sh_info: 0, - sh_addralign: 0, - sh_entsize: 0, - }); - } - - /// Write a section header. - pub fn write_section_header(&mut self, section: &SectionHeader) { - let sh_name = if let Some(name) = section.name { - self.shstrtab.get_offset(name) as u32 - } else { - 0 - }; - let endian = self.endian; - if self.is_64 { - let section = elf::SectionHeader64 { - sh_name: U32::new(endian, sh_name), - sh_type: U32::new(endian, section.sh_type), - sh_flags: U64::new(endian, section.sh_flags), - sh_addr: U64::new(endian, section.sh_addr), - sh_offset: U64::new(endian, section.sh_offset), - sh_size: U64::new(endian, section.sh_size), - sh_link: U32::new(endian, section.sh_link), - sh_info: U32::new(endian, section.sh_info), - sh_addralign: U64::new(endian, section.sh_addralign), - sh_entsize: U64::new(endian, section.sh_entsize), - }; - self.buffer.write(§ion); - } else { - let section = elf::SectionHeader32 { - sh_name: U32::new(endian, sh_name), - sh_type: U32::new(endian, section.sh_type), - sh_flags: U32::new(endian, section.sh_flags as u32), - sh_addr: U32::new(endian, section.sh_addr as u32), - sh_offset: U32::new(endian, section.sh_offset as u32), - sh_size: U32::new(endian, section.sh_size as u32), - sh_link: U32::new(endian, section.sh_link), - sh_info: U32::new(endian, section.sh_info), - sh_addralign: U32::new(endian, section.sh_addralign as u32), - sh_entsize: U32::new(endian, section.sh_entsize as u32), - }; - self.buffer.write(§ion); - } - } - - /// Add a section name to the section header string table. - /// - /// This will be stored in the `.shstrtab` section. - /// - /// This must be called before [`Self::reserve_shstrtab`]. - pub fn add_section_name(&mut self, name: &'a [u8]) -> StringId { - debug_assert_eq!(self.shstrtab_offset, 0); - self.shstrtab.add(name) - } - - /// Reserve the range for the section header string table. - /// - /// This range is used for a section named `.shstrtab`. - /// - /// This function does nothing if no sections were reserved. - /// This must be called after [`Self::add_section_name`]. - /// and other functions that reserve section names and indices. - pub fn reserve_shstrtab(&mut self) { - debug_assert_eq!(self.shstrtab_offset, 0); - if self.section_num == 0 { - return; - } - // Start with null section name. - self.shstrtab_data = vec![0]; - self.shstrtab.write(1, &mut self.shstrtab_data); - self.shstrtab_offset = self.reserve(self.shstrtab_data.len(), 1); - } - - /// Write the section header string table. - /// - /// This function does nothing if the section was not reserved. - pub fn write_shstrtab(&mut self) { - if self.shstrtab_offset == 0 { - return; - } - debug_assert_eq!(self.shstrtab_offset, self.buffer.len()); - self.buffer.write_bytes(&self.shstrtab_data); - } - - /// Reserve the section index for the section header string table. - /// - /// This must be called before [`Self::reserve_shstrtab`] - /// and [`Self::reserve_section_headers`]. - pub fn reserve_shstrtab_section_index(&mut self) -> SectionIndex { - debug_assert_eq!(self.shstrtab_index, SectionIndex(0)); - self.shstrtab_str_id = Some(self.add_section_name(&b".shstrtab"[..])); - self.shstrtab_index = self.reserve_section_index(); - self.shstrtab_index - } - - /// Write the section header for the section header string table. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_shstrtab_section_header(&mut self) { - if self.shstrtab_index == SectionIndex(0) { - return; - } - self.write_section_header(&SectionHeader { - name: self.shstrtab_str_id, - sh_type: elf::SHT_STRTAB, - sh_flags: 0, - sh_addr: 0, - sh_offset: self.shstrtab_offset as u64, - sh_size: self.shstrtab_data.len() as u64, - sh_link: 0, - sh_info: 0, - sh_addralign: 1, - sh_entsize: 0, - }); - } - - /// Add a string to the string table. - /// - /// This will be stored in the `.strtab` section. - /// - /// This must be called before [`Self::reserve_strtab`]. - pub fn add_string(&mut self, name: &'a [u8]) -> StringId { - debug_assert_eq!(self.strtab_offset, 0); - self.need_strtab = true; - self.strtab.add(name) - } - - /// Return true if `.strtab` is needed. - pub fn strtab_needed(&self) -> bool { - self.need_strtab - } - - /// Reserve the range for the string table. - /// - /// This range is used for a section named `.strtab`. - /// - /// This function does nothing if no strings or symbols were defined. - /// This must be called after [`Self::add_string`]. - pub fn reserve_strtab(&mut self) { - debug_assert_eq!(self.strtab_offset, 0); - if !self.need_strtab { - return; - } - // Start with null string. - self.strtab_data = vec![0]; - self.strtab.write(1, &mut self.strtab_data); - self.strtab_offset = self.reserve(self.strtab_data.len(), 1); - } - - /// Write the string table. - /// - /// This function does nothing if the section was not reserved. - pub fn write_strtab(&mut self) { - if self.strtab_offset == 0 { - return; - } - debug_assert_eq!(self.strtab_offset, self.buffer.len()); - self.buffer.write_bytes(&self.strtab_data); - } - - /// Reserve the section index for the string table. - /// - /// This must be called before [`Self::reserve_section_headers`]. - pub fn reserve_strtab_section_index(&mut self) -> SectionIndex { - debug_assert_eq!(self.strtab_index, SectionIndex(0)); - self.strtab_str_id = Some(self.add_section_name(&b".strtab"[..])); - self.strtab_index = self.reserve_section_index(); - self.strtab_index - } - - /// Write the section header for the string table. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_strtab_section_header(&mut self) { - if self.strtab_index == SectionIndex(0) { - return; - } - self.write_section_header(&SectionHeader { - name: self.strtab_str_id, - sh_type: elf::SHT_STRTAB, - sh_flags: 0, - sh_addr: 0, - sh_offset: self.strtab_offset as u64, - sh_size: self.strtab_data.len() as u64, - sh_link: 0, - sh_info: 0, - sh_addralign: 1, - sh_entsize: 0, - }); - } - - /// Reserve the null symbol table entry. - /// - /// This will be stored in the `.symtab` section. - /// - /// The null symbol table entry is usually automatically reserved, - /// but this can be used to force an empty symbol table. - /// - /// This must be called before [`Self::reserve_symtab`]. - pub fn reserve_null_symbol_index(&mut self) -> SymbolIndex { - debug_assert_eq!(self.symtab_offset, 0); - debug_assert_eq!(self.symtab_num, 0); - self.symtab_num = 1; - // The symtab must link to a strtab. - self.need_strtab = true; - SymbolIndex(0) - } - - /// Reserve a symbol table entry. - /// - /// This will be stored in the `.symtab` section. - /// - /// `section_index` is used to determine whether `.symtab_shndx` is required. - /// - /// Automatically also reserves the null symbol if required. - /// Callers may assume that the returned indices will be sequential - /// starting at 1. - /// - /// This must be called before [`Self::reserve_symtab`] and - /// [`Self::reserve_symtab_shndx`]. - pub fn reserve_symbol_index(&mut self, section_index: Option) -> SymbolIndex { - debug_assert_eq!(self.symtab_offset, 0); - debug_assert_eq!(self.symtab_shndx_offset, 0); - if self.symtab_num == 0 { - self.symtab_num = 1; - // The symtab must link to a strtab. - self.need_strtab = true; - } - let index = self.symtab_num; - self.symtab_num += 1; - if let Some(section_index) = section_index { - if section_index.0 >= elf::SHN_LORESERVE.into() { - self.need_symtab_shndx = true; - } - } - SymbolIndex(index) - } - - /// Return the number of reserved symbol table entries. - /// - /// Includes the null symbol. - pub fn symbol_count(&self) -> u32 { - self.symtab_num - } - - fn symbol_size(&self) -> usize { - if self.is_64 { - mem::size_of::>() - } else { - mem::size_of::>() - } - } - - /// Reserve the range for the symbol table. - /// - /// This range is used for a section named `.symtab`. - /// This function does nothing if no symbols were reserved. - /// This must be called after [`Self::reserve_symbol_index`]. - pub fn reserve_symtab(&mut self) { - debug_assert_eq!(self.symtab_offset, 0); - if self.symtab_num == 0 { - return; - } - self.symtab_offset = self.reserve( - self.symtab_num as usize * self.symbol_size(), - self.elf_align, - ); - } - - /// Write the null symbol. - /// - /// This must be the first symbol that is written. - /// This function does nothing if no symbols were reserved. - pub fn write_null_symbol(&mut self) { - if self.symtab_num == 0 { - return; - } - util::write_align(self.buffer, self.elf_align); - debug_assert_eq!(self.symtab_offset, self.buffer.len()); - if self.is_64 { - self.buffer.write(&elf::Sym64::::default()); - } else { - self.buffer.write(&elf::Sym32::::default()); - } - - if self.need_symtab_shndx { - self.symtab_shndx_data.write_pod(&U32::new(self.endian, 0)); - } - } - - /// Write a symbol. - pub fn write_symbol(&mut self, sym: &Sym) { - let st_name = if let Some(name) = sym.name { - self.strtab.get_offset(name) as u32 - } else { - 0 - }; - let st_shndx = if let Some(section) = sym.section { - if section.0 >= elf::SHN_LORESERVE as u32 { - elf::SHN_XINDEX - } else { - section.0 as u16 - } - } else { - sym.st_shndx - }; - - let endian = self.endian; - if self.is_64 { - let sym = elf::Sym64 { - st_name: U32::new(endian, st_name), - st_info: sym.st_info, - st_other: sym.st_other, - st_shndx: U16::new(endian, st_shndx), - st_value: U64::new(endian, sym.st_value), - st_size: U64::new(endian, sym.st_size), - }; - self.buffer.write(&sym); - } else { - let sym = elf::Sym32 { - st_name: U32::new(endian, st_name), - st_info: sym.st_info, - st_other: sym.st_other, - st_shndx: U16::new(endian, st_shndx), - st_value: U32::new(endian, sym.st_value as u32), - st_size: U32::new(endian, sym.st_size as u32), - }; - self.buffer.write(&sym); - } - - if self.need_symtab_shndx { - let section_index = sym.section.unwrap_or(SectionIndex(0)); - self.symtab_shndx_data - .write_pod(&U32::new(self.endian, section_index.0)); - } - } - - /// Reserve the section index for the symbol table. - /// - /// This must be called before [`Self::reserve_section_headers`]. - pub fn reserve_symtab_section_index(&mut self) -> SectionIndex { - debug_assert_eq!(self.symtab_index, SectionIndex(0)); - self.symtab_str_id = Some(self.add_section_name(&b".symtab"[..])); - self.symtab_index = self.reserve_section_index(); - self.symtab_index - } - - /// Return the section index of the symbol table. - pub fn symtab_index(&mut self) -> SectionIndex { - self.symtab_index - } - - /// Write the section header for the symbol table. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_symtab_section_header(&mut self, num_local: u32) { - if self.symtab_index == SectionIndex(0) { - return; - } - self.write_section_header(&SectionHeader { - name: self.symtab_str_id, - sh_type: elf::SHT_SYMTAB, - sh_flags: 0, - sh_addr: 0, - sh_offset: self.symtab_offset as u64, - sh_size: self.symtab_num as u64 * self.symbol_size() as u64, - sh_link: self.strtab_index.0, - sh_info: num_local, - sh_addralign: self.elf_align as u64, - sh_entsize: self.symbol_size() as u64, - }); - } - - /// Return true if `.symtab_shndx` is needed. - pub fn symtab_shndx_needed(&self) -> bool { - self.need_symtab_shndx - } - - /// Reserve the range for the extended section indices for the symbol table. - /// - /// This range is used for a section named `.symtab_shndx`. - /// This also reserves a section index. - /// - /// This function does nothing if extended section indices are not needed. - /// This must be called after [`Self::reserve_symbol_index`]. - pub fn reserve_symtab_shndx(&mut self) { - debug_assert_eq!(self.symtab_shndx_offset, 0); - if !self.need_symtab_shndx { - return; - } - self.symtab_shndx_offset = self.reserve(self.symtab_num as usize * 4, 4); - self.symtab_shndx_data.reserve(self.symtab_num as usize * 4); - } - - /// Write the extended section indices for the symbol table. - /// - /// This function does nothing if the section was not reserved. - pub fn write_symtab_shndx(&mut self) { - if self.symtab_shndx_offset == 0 { - return; - } - debug_assert_eq!(self.symtab_shndx_offset, self.buffer.len()); - debug_assert_eq!(self.symtab_num as usize * 4, self.symtab_shndx_data.len()); - self.buffer.write_bytes(&self.symtab_shndx_data); - } - - /// Reserve the section index for the extended section indices symbol table. - /// - /// You should check [`Self::symtab_shndx_needed`] before calling this - /// unless you have other means of knowing if this section is needed. - /// - /// This must be called before [`Self::reserve_section_headers`]. - pub fn reserve_symtab_shndx_section_index(&mut self) -> SectionIndex { - debug_assert!(self.symtab_shndx_str_id.is_none()); - self.symtab_shndx_str_id = Some(self.add_section_name(&b".symtab_shndx"[..])); - self.reserve_section_index() - } - - /// Write the section header for the extended section indices for the symbol table. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_symtab_shndx_section_header(&mut self) { - if self.symtab_shndx_str_id.is_none() { - return; - } - let sh_size = if self.symtab_shndx_offset == 0 { - 0 - } else { - (self.symtab_num * 4) as u64 - }; - self.write_section_header(&SectionHeader { - name: self.symtab_shndx_str_id, - sh_type: elf::SHT_SYMTAB_SHNDX, - sh_flags: 0, - sh_addr: 0, - sh_offset: self.symtab_shndx_offset as u64, - sh_size, - sh_link: self.symtab_index.0, - sh_info: 0, - sh_addralign: 4, - sh_entsize: 4, - }); - } - - /// Add a string to the dynamic string table. - /// - /// This will be stored in the `.dynstr` section. - /// - /// This must be called before [`Self::reserve_dynstr`]. - pub fn add_dynamic_string(&mut self, name: &'a [u8]) -> StringId { - debug_assert_eq!(self.dynstr_offset, 0); - self.need_dynstr = true; - self.dynstr.add(name) - } - - /// Get a string that was previously added to the dynamic string table. - /// - /// Panics if the string was not added. - pub fn get_dynamic_string(&self, name: &'a [u8]) -> StringId { - self.dynstr.get_id(name) - } - - /// Return true if `.dynstr` is needed. - pub fn dynstr_needed(&self) -> bool { - self.need_dynstr - } - - /// Reserve the range for the dynamic string table. - /// - /// This range is used for a section named `.dynstr`. - /// - /// This function does nothing if no dynamic strings or symbols were defined. - /// This must be called after [`Self::add_dynamic_string`]. - pub fn reserve_dynstr(&mut self) { - debug_assert_eq!(self.dynstr_offset, 0); - if !self.need_dynstr { - return; - } - // Start with null string. - self.dynstr_data = vec![0]; - self.dynstr.write(1, &mut self.dynstr_data); - self.dynstr_offset = self.reserve(self.dynstr_data.len(), 1); - } - - /// Write the dynamic string table. - /// - /// This function does nothing if the section was not reserved. - pub fn write_dynstr(&mut self) { - if self.dynstr_offset == 0 { - return; - } - debug_assert_eq!(self.dynstr_offset, self.buffer.len()); - self.buffer.write_bytes(&self.dynstr_data); - } - - /// Reserve the section index for the dynamic string table. - /// - /// This must be called before [`Self::reserve_section_headers`]. - pub fn reserve_dynstr_section_index(&mut self) -> SectionIndex { - debug_assert_eq!(self.dynstr_index, SectionIndex(0)); - self.dynstr_str_id = Some(self.add_section_name(&b".dynstr"[..])); - self.dynstr_index = self.reserve_section_index(); - self.dynstr_index - } - - /// Return the section index of the dynamic string table. - pub fn dynstr_index(&mut self) -> SectionIndex { - self.dynstr_index - } - - /// Write the section header for the dynamic string table. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_dynstr_section_header(&mut self, sh_addr: u64) { - if self.dynstr_index == SectionIndex(0) { - return; - } - self.write_section_header(&SectionHeader { - name: self.dynstr_str_id, - sh_type: elf::SHT_STRTAB, - sh_flags: elf::SHF_ALLOC.into(), - sh_addr, - sh_offset: self.dynstr_offset as u64, - sh_size: self.dynstr_data.len() as u64, - sh_link: 0, - sh_info: 0, - sh_addralign: 1, - sh_entsize: 0, - }); - } - - /// Reserve the null dynamic symbol table entry. - /// - /// This will be stored in the `.dynsym` section. - /// - /// The null dynamic symbol table entry is usually automatically reserved, - /// but this can be used to force an empty dynamic symbol table. - /// - /// This must be called before [`Self::reserve_dynsym`]. - pub fn reserve_null_dynamic_symbol_index(&mut self) -> SymbolIndex { - debug_assert_eq!(self.dynsym_offset, 0); - debug_assert_eq!(self.dynsym_num, 0); - self.dynsym_num = 1; - // The symtab must link to a strtab. - self.need_dynstr = true; - SymbolIndex(0) - } - - /// Reserve a dynamic symbol table entry. - /// - /// This will be stored in the `.dynsym` section. - /// - /// Automatically also reserves the null symbol if required. - /// Callers may assume that the returned indices will be sequential - /// starting at 1. - /// - /// This must be called before [`Self::reserve_dynsym`]. - pub fn reserve_dynamic_symbol_index(&mut self) -> SymbolIndex { - debug_assert_eq!(self.dynsym_offset, 0); - if self.dynsym_num == 0 { - self.dynsym_num = 1; - // The symtab must link to a strtab. - self.need_dynstr = true; - } - let index = self.dynsym_num; - self.dynsym_num += 1; - SymbolIndex(index) - } - - /// Return the number of reserved dynamic symbols. - /// - /// Includes the null symbol. - pub fn dynamic_symbol_count(&mut self) -> u32 { - self.dynsym_num - } - - /// Reserve the range for the dynamic symbol table. - /// - /// This range is used for a section named `.dynsym`. - /// - /// This function does nothing if no dynamic symbols were reserved. - /// This must be called after [`Self::reserve_dynamic_symbol_index`]. - pub fn reserve_dynsym(&mut self) { - debug_assert_eq!(self.dynsym_offset, 0); - if self.dynsym_num == 0 { - return; - } - self.dynsym_offset = self.reserve( - self.dynsym_num as usize * self.symbol_size(), - self.elf_align, - ); - } - - /// Write the null dynamic symbol. - /// - /// This must be the first dynamic symbol that is written. - /// This function does nothing if no dynamic symbols were reserved. - pub fn write_null_dynamic_symbol(&mut self) { - if self.dynsym_num == 0 { - return; - } - util::write_align(self.buffer, self.elf_align); - debug_assert_eq!(self.dynsym_offset, self.buffer.len()); - if self.is_64 { - self.buffer.write(&elf::Sym64::::default()); - } else { - self.buffer.write(&elf::Sym32::::default()); - } - } - - /// Write a dynamic symbol. - pub fn write_dynamic_symbol(&mut self, sym: &Sym) { - let st_name = if let Some(name) = sym.name { - self.dynstr.get_offset(name) as u32 - } else { - 0 - }; - - let st_shndx = if let Some(section) = sym.section { - if section.0 >= elf::SHN_LORESERVE as u32 { - // TODO: we don't actually write out .dynsym_shndx yet. - // This is unlikely to be needed though. - elf::SHN_XINDEX - } else { - section.0 as u16 - } - } else { - sym.st_shndx - }; - - let endian = self.endian; - if self.is_64 { - let sym = elf::Sym64 { - st_name: U32::new(endian, st_name), - st_info: sym.st_info, - st_other: sym.st_other, - st_shndx: U16::new(endian, st_shndx), - st_value: U64::new(endian, sym.st_value), - st_size: U64::new(endian, sym.st_size), - }; - self.buffer.write(&sym); - } else { - let sym = elf::Sym32 { - st_name: U32::new(endian, st_name), - st_info: sym.st_info, - st_other: sym.st_other, - st_shndx: U16::new(endian, st_shndx), - st_value: U32::new(endian, sym.st_value as u32), - st_size: U32::new(endian, sym.st_size as u32), - }; - self.buffer.write(&sym); - } - } - - /// Reserve the section index for the dynamic symbol table. - /// - /// This must be called before [`Self::reserve_section_headers`]. - pub fn reserve_dynsym_section_index(&mut self) -> SectionIndex { - debug_assert_eq!(self.dynsym_index, SectionIndex(0)); - self.dynsym_str_id = Some(self.add_section_name(&b".dynsym"[..])); - self.dynsym_index = self.reserve_section_index(); - self.dynsym_index - } - - /// Return the section index of the dynamic symbol table. - pub fn dynsym_index(&mut self) -> SectionIndex { - self.dynsym_index - } - - /// Write the section header for the dynamic symbol table. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_dynsym_section_header(&mut self, sh_addr: u64, num_local: u32) { - if self.dynsym_index == SectionIndex(0) { - return; - } - self.write_section_header(&SectionHeader { - name: self.dynsym_str_id, - sh_type: elf::SHT_DYNSYM, - sh_flags: elf::SHF_ALLOC.into(), - sh_addr, - sh_offset: self.dynsym_offset as u64, - sh_size: self.dynsym_num as u64 * self.symbol_size() as u64, - sh_link: self.dynstr_index.0, - sh_info: num_local, - sh_addralign: self.elf_align as u64, - sh_entsize: self.symbol_size() as u64, - }); - } - - fn dyn_size(&self) -> usize { - if self.is_64 { - mem::size_of::>() - } else { - mem::size_of::>() - } - } - - /// Reserve the range for the `.dynamic` section. - /// - /// This function does nothing if `dynamic_num` is zero. - pub fn reserve_dynamic(&mut self, dynamic_num: usize) { - debug_assert_eq!(self.dynamic_offset, 0); - if dynamic_num == 0 { - return; - } - self.dynamic_num = dynamic_num; - self.dynamic_offset = self.reserve(dynamic_num * self.dyn_size(), self.elf_align); - } - - /// Write alignment padding bytes prior to the `.dynamic` section. - /// - /// This function does nothing if the section was not reserved. - pub fn write_align_dynamic(&mut self) { - if self.dynamic_offset == 0 { - return; - } - util::write_align(self.buffer, self.elf_align); - debug_assert_eq!(self.dynamic_offset, self.buffer.len()); - } - - /// Write a dynamic string entry. - pub fn write_dynamic_string(&mut self, tag: u32, id: StringId) { - self.write_dynamic(tag, self.dynstr.get_offset(id) as u64); - } - - /// Write a dynamic value entry. - pub fn write_dynamic(&mut self, d_tag: u32, d_val: u64) { - debug_assert!(self.dynamic_offset <= self.buffer.len()); - let endian = self.endian; - if self.is_64 { - let d = elf::Dyn64 { - d_tag: U64::new(endian, d_tag.into()), - d_val: U64::new(endian, d_val), - }; - self.buffer.write(&d); - } else { - let d = elf::Dyn32 { - d_tag: U32::new(endian, d_tag), - d_val: U32::new(endian, d_val as u32), - }; - self.buffer.write(&d); - } - debug_assert!( - self.dynamic_offset + self.dynamic_num * self.dyn_size() >= self.buffer.len() - ); - } - - /// Reserve the section index for the dynamic table. - pub fn reserve_dynamic_section_index(&mut self) -> SectionIndex { - debug_assert!(self.dynamic_str_id.is_none()); - self.dynamic_str_id = Some(self.add_section_name(&b".dynamic"[..])); - self.reserve_section_index() - } - - /// Write the section header for the dynamic table. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_dynamic_section_header(&mut self, sh_addr: u64) { - if self.dynamic_str_id.is_none() { - return; - } - self.write_section_header(&SectionHeader { - name: self.dynamic_str_id, - sh_type: elf::SHT_DYNAMIC, - sh_flags: (elf::SHF_WRITE | elf::SHF_ALLOC).into(), - sh_addr, - sh_offset: self.dynamic_offset as u64, - sh_size: (self.dynamic_num * self.dyn_size()) as u64, - sh_link: self.dynstr_index.0, - sh_info: 0, - sh_addralign: self.elf_align as u64, - sh_entsize: self.dyn_size() as u64, - }); - } - - fn rel_size(&self, is_rela: bool) -> usize { - if self.is_64 { - if is_rela { - mem::size_of::>() - } else { - mem::size_of::>() - } - } else { - if is_rela { - mem::size_of::>() - } else { - mem::size_of::>() - } - } - } - - /// Reserve a file range for a SysV hash section. - /// - /// `symbol_count` is the number of symbols in the hash, - /// not the total number of symbols. - pub fn reserve_hash(&mut self, bucket_count: u32, chain_count: u32) { - self.hash_size = mem::size_of::>() - + bucket_count as usize * 4 - + chain_count as usize * 4; - self.hash_offset = self.reserve(self.hash_size, self.elf_align); - } - - /// Write a SysV hash section. - /// - /// `chain_count` is the number of symbols in the hash. - /// The argument to `hash` will be in the range `0..chain_count`. - pub fn write_hash(&mut self, bucket_count: u32, chain_count: u32, hash: F) - where - F: Fn(u32) -> Option, - { - let mut buckets = vec![U32::new(self.endian, 0); bucket_count as usize]; - let mut chains = vec![U32::new(self.endian, 0); chain_count as usize]; - for i in 0..chain_count { - if let Some(hash) = hash(i) { - let bucket = hash % bucket_count; - chains[i as usize] = buckets[bucket as usize]; - buckets[bucket as usize] = U32::new(self.endian, i); - } - } - - util::write_align(self.buffer, self.elf_align); - debug_assert_eq!(self.hash_offset, self.buffer.len()); - self.buffer.write(&elf::HashHeader { - bucket_count: U32::new(self.endian, bucket_count), - chain_count: U32::new(self.endian, chain_count), - }); - self.buffer.write_slice(&buckets); - self.buffer.write_slice(&chains); - } - - /// Reserve the section index for the SysV hash table. - pub fn reserve_hash_section_index(&mut self) -> SectionIndex { - debug_assert!(self.hash_str_id.is_none()); - self.hash_str_id = Some(self.add_section_name(&b".hash"[..])); - self.reserve_section_index() - } - - /// Write the section header for the SysV hash table. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_hash_section_header(&mut self, sh_addr: u64) { - if self.hash_str_id.is_none() { - return; - } - self.write_section_header(&SectionHeader { - name: self.hash_str_id, - sh_type: elf::SHT_HASH, - sh_flags: elf::SHF_ALLOC.into(), - sh_addr, - sh_offset: self.hash_offset as u64, - sh_size: self.hash_size as u64, - sh_link: self.dynsym_index.0, - sh_info: 0, - sh_addralign: self.elf_align as u64, - sh_entsize: 4, - }); - } - - /// Reserve a file range for a GNU hash section. - /// - /// `symbol_count` is the number of symbols in the hash, - /// not the total number of symbols. - pub fn reserve_gnu_hash(&mut self, bloom_count: u32, bucket_count: u32, symbol_count: u32) { - self.gnu_hash_size = mem::size_of::>() - + bloom_count as usize * self.elf_align - + bucket_count as usize * 4 - + symbol_count as usize * 4; - self.gnu_hash_offset = self.reserve(self.gnu_hash_size, self.elf_align); - } - - /// Write a GNU hash section. - /// - /// `symbol_count` is the number of symbols in the hash. - /// The argument to `hash` will be in the range `0..symbol_count`. - /// - /// This requires that symbols are already sorted by bucket. - pub fn write_gnu_hash( - &mut self, - symbol_base: u32, - bloom_shift: u32, - bloom_count: u32, - bucket_count: u32, - symbol_count: u32, - hash: F, - ) where - F: Fn(u32) -> u32, - { - util::write_align(self.buffer, self.elf_align); - debug_assert_eq!(self.gnu_hash_offset, self.buffer.len()); - self.buffer.write(&elf::GnuHashHeader { - bucket_count: U32::new(self.endian, bucket_count), - symbol_base: U32::new(self.endian, symbol_base), - bloom_count: U32::new(self.endian, bloom_count), - bloom_shift: U32::new(self.endian, bloom_shift), - }); - - // Calculate and write bloom filter. - if self.is_64 { - let mut bloom_filters = vec![0; bloom_count as usize]; - for i in 0..symbol_count { - let h = hash(i); - bloom_filters[((h / 64) & (bloom_count - 1)) as usize] |= - 1 << (h % 64) | 1 << ((h >> bloom_shift) % 64); - } - for bloom_filter in bloom_filters { - self.buffer.write(&U64::new(self.endian, bloom_filter)); - } - } else { - let mut bloom_filters = vec![0; bloom_count as usize]; - for i in 0..symbol_count { - let h = hash(i); - bloom_filters[((h / 32) & (bloom_count - 1)) as usize] |= - 1 << (h % 32) | 1 << ((h >> bloom_shift) % 32); - } - for bloom_filter in bloom_filters { - self.buffer.write(&U32::new(self.endian, bloom_filter)); - } - } - - // Write buckets. - // - // This requires that symbols are already sorted by bucket. - let mut bucket = 0; - for i in 0..symbol_count { - let symbol_bucket = hash(i) % bucket_count; - while bucket < symbol_bucket { - self.buffer.write(&U32::new(self.endian, 0)); - bucket += 1; - } - if bucket == symbol_bucket { - self.buffer.write(&U32::new(self.endian, symbol_base + i)); - bucket += 1; - } - } - while bucket < bucket_count { - self.buffer.write(&U32::new(self.endian, 0)); - bucket += 1; - } - - // Write hash values. - for i in 0..symbol_count { - let mut h = hash(i); - if i == symbol_count - 1 || h % bucket_count != hash(i + 1) % bucket_count { - h |= 1; - } else { - h &= !1; - } - self.buffer.write(&U32::new(self.endian, h)); - } - } - - /// Reserve the section index for the GNU hash table. - pub fn reserve_gnu_hash_section_index(&mut self) -> SectionIndex { - debug_assert!(self.gnu_hash_str_id.is_none()); - self.gnu_hash_str_id = Some(self.add_section_name(&b".gnu.hash"[..])); - self.reserve_section_index() - } - - /// Write the section header for the GNU hash table. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_gnu_hash_section_header(&mut self, sh_addr: u64) { - if self.gnu_hash_str_id.is_none() { - return; - } - self.write_section_header(&SectionHeader { - name: self.gnu_hash_str_id, - sh_type: elf::SHT_GNU_HASH, - sh_flags: elf::SHF_ALLOC.into(), - sh_addr, - sh_offset: self.gnu_hash_offset as u64, - sh_size: self.gnu_hash_size as u64, - sh_link: self.dynsym_index.0, - sh_info: 0, - sh_addralign: self.elf_align as u64, - sh_entsize: 0, - }); - } - - /// Reserve the range for the `.gnu.version` section. - /// - /// This function does nothing if no dynamic symbols were reserved. - pub fn reserve_gnu_versym(&mut self) { - debug_assert_eq!(self.gnu_versym_offset, 0); - if self.dynsym_num == 0 { - return; - } - self.gnu_versym_offset = self.reserve(self.dynsym_num as usize * 2, 2); - } - - /// Write the null symbol version entry. - /// - /// This must be the first symbol version that is written. - /// This function does nothing if no dynamic symbols were reserved. - pub fn write_null_gnu_versym(&mut self) { - if self.dynsym_num == 0 { - return; - } - util::write_align(self.buffer, 2); - debug_assert_eq!(self.gnu_versym_offset, self.buffer.len()); - self.write_gnu_versym(0); - } - - /// Write a symbol version entry. - pub fn write_gnu_versym(&mut self, versym: u16) { - self.buffer.write(&U16::new(self.endian, versym)); - } - - /// Reserve the section index for the `.gnu.version` section. - pub fn reserve_gnu_versym_section_index(&mut self) -> SectionIndex { - debug_assert!(self.gnu_versym_str_id.is_none()); - self.gnu_versym_str_id = Some(self.add_section_name(&b".gnu.version"[..])); - self.reserve_section_index() - } - - /// Write the section header for the `.gnu.version` section. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_gnu_versym_section_header(&mut self, sh_addr: u64) { - if self.gnu_versym_str_id.is_none() { - return; - } - self.write_section_header(&SectionHeader { - name: self.gnu_versym_str_id, - sh_type: elf::SHT_GNU_VERSYM, - sh_flags: elf::SHF_ALLOC.into(), - sh_addr, - sh_offset: self.gnu_versym_offset as u64, - sh_size: self.dynsym_num as u64 * 2, - sh_link: self.dynsym_index.0, - sh_info: 0, - sh_addralign: 2, - sh_entsize: 2, - }); - } - - /// Reserve the range for the `.gnu.version_d` section. - pub fn reserve_gnu_verdef(&mut self, verdef_count: usize, verdaux_count: usize) { - debug_assert_eq!(self.gnu_verdef_offset, 0); - if verdef_count == 0 { - return; - } - self.gnu_verdef_size = verdef_count * mem::size_of::>() - + verdaux_count * mem::size_of::>(); - self.gnu_verdef_offset = self.reserve(self.gnu_verdef_size, self.elf_align); - self.gnu_verdef_count = verdef_count as u16; - self.gnu_verdef_remaining = self.gnu_verdef_count; - } - - /// Write alignment padding bytes prior to a `.gnu.version_d` section. - pub fn write_align_gnu_verdef(&mut self) { - if self.gnu_verdef_offset == 0 { - return; - } - util::write_align(self.buffer, self.elf_align); - debug_assert_eq!(self.gnu_verdef_offset, self.buffer.len()); - } - - /// Write a version definition entry. - pub fn write_gnu_verdef(&mut self, verdef: &Verdef) { - debug_assert_ne!(self.gnu_verdef_remaining, 0); - self.gnu_verdef_remaining -= 1; - let vd_next = if self.gnu_verdef_remaining == 0 { - 0 - } else { - mem::size_of::>() as u32 - + verdef.aux_count as u32 * mem::size_of::>() as u32 - }; - - self.gnu_verdaux_remaining = verdef.aux_count; - let vd_aux = if verdef.aux_count == 0 { - 0 - } else { - mem::size_of::>() as u32 - }; - - self.buffer.write(&elf::Verdef { - vd_version: U16::new(self.endian, verdef.version), - vd_flags: U16::new(self.endian, verdef.flags), - vd_ndx: U16::new(self.endian, verdef.index), - vd_cnt: U16::new(self.endian, verdef.aux_count), - vd_hash: U32::new(self.endian, elf::hash(self.dynstr.get_string(verdef.name))), - vd_aux: U32::new(self.endian, vd_aux), - vd_next: U32::new(self.endian, vd_next), - }); - self.write_gnu_verdaux(verdef.name); - } - - /// Write a version definition auxiliary entry. - pub fn write_gnu_verdaux(&mut self, name: StringId) { - debug_assert_ne!(self.gnu_verdaux_remaining, 0); - self.gnu_verdaux_remaining -= 1; - let vda_next = if self.gnu_verdaux_remaining == 0 { - 0 - } else { - mem::size_of::>() as u32 - }; - self.buffer.write(&elf::Verdaux { - vda_name: U32::new(self.endian, self.dynstr.get_offset(name) as u32), - vda_next: U32::new(self.endian, vda_next), - }); - } - - /// Reserve the section index for the `.gnu.version_d` section. - pub fn reserve_gnu_verdef_section_index(&mut self) -> SectionIndex { - debug_assert!(self.gnu_verdef_str_id.is_none()); - self.gnu_verdef_str_id = Some(self.add_section_name(&b".gnu.version_d"[..])); - self.reserve_section_index() - } - - /// Write the section header for the `.gnu.version_d` section. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_gnu_verdef_section_header(&mut self, sh_addr: u64) { - if self.gnu_verdef_str_id.is_none() { - return; - } - self.write_section_header(&SectionHeader { - name: self.gnu_verdef_str_id, - sh_type: elf::SHT_GNU_VERDEF, - sh_flags: elf::SHF_ALLOC.into(), - sh_addr, - sh_offset: self.gnu_verdef_offset as u64, - sh_size: self.gnu_verdef_size as u64, - sh_link: self.dynstr_index.0, - sh_info: self.gnu_verdef_count.into(), - sh_addralign: self.elf_align as u64, - sh_entsize: 0, - }); - } - - /// Reserve the range for the `.gnu.version_r` section. - pub fn reserve_gnu_verneed(&mut self, verneed_count: usize, vernaux_count: usize) { - debug_assert_eq!(self.gnu_verneed_offset, 0); - if verneed_count == 0 { - return; - } - self.gnu_verneed_size = verneed_count * mem::size_of::>() - + vernaux_count * mem::size_of::>(); - self.gnu_verneed_offset = self.reserve(self.gnu_verneed_size, self.elf_align); - self.gnu_verneed_count = verneed_count as u16; - self.gnu_verneed_remaining = self.gnu_verneed_count; - } - - /// Write alignment padding bytes prior to a `.gnu.version_r` section. - pub fn write_align_gnu_verneed(&mut self) { - if self.gnu_verneed_offset == 0 { - return; - } - util::write_align(self.buffer, self.elf_align); - debug_assert_eq!(self.gnu_verneed_offset, self.buffer.len()); - } - - /// Write a version need entry. - pub fn write_gnu_verneed(&mut self, verneed: &Verneed) { - debug_assert_ne!(self.gnu_verneed_remaining, 0); - self.gnu_verneed_remaining -= 1; - let vn_next = if self.gnu_verneed_remaining == 0 { - 0 - } else { - mem::size_of::>() as u32 - + verneed.aux_count as u32 * mem::size_of::>() as u32 - }; - - self.gnu_vernaux_remaining = verneed.aux_count; - let vn_aux = if verneed.aux_count == 0 { - 0 - } else { - mem::size_of::>() as u32 - }; - - self.buffer.write(&elf::Verneed { - vn_version: U16::new(self.endian, verneed.version), - vn_cnt: U16::new(self.endian, verneed.aux_count), - vn_file: U32::new(self.endian, self.dynstr.get_offset(verneed.file) as u32), - vn_aux: U32::new(self.endian, vn_aux), - vn_next: U32::new(self.endian, vn_next), - }); - } - - /// Write a version need auxiliary entry. - pub fn write_gnu_vernaux(&mut self, vernaux: &Vernaux) { - debug_assert_ne!(self.gnu_vernaux_remaining, 0); - self.gnu_vernaux_remaining -= 1; - let vna_next = if self.gnu_vernaux_remaining == 0 { - 0 - } else { - mem::size_of::>() as u32 - }; - self.buffer.write(&elf::Vernaux { - vna_hash: U32::new(self.endian, elf::hash(self.dynstr.get_string(vernaux.name))), - vna_flags: U16::new(self.endian, vernaux.flags), - vna_other: U16::new(self.endian, vernaux.index), - vna_name: U32::new(self.endian, self.dynstr.get_offset(vernaux.name) as u32), - vna_next: U32::new(self.endian, vna_next), - }); - } - - /// Reserve the section index for the `.gnu.version_r` section. - pub fn reserve_gnu_verneed_section_index(&mut self) -> SectionIndex { - debug_assert!(self.gnu_verneed_str_id.is_none()); - self.gnu_verneed_str_id = Some(self.add_section_name(&b".gnu.version_r"[..])); - self.reserve_section_index() - } - - /// Write the section header for the `.gnu.version_r` section. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_gnu_verneed_section_header(&mut self, sh_addr: u64) { - if self.gnu_verneed_str_id.is_none() { - return; - } - self.write_section_header(&SectionHeader { - name: self.gnu_verneed_str_id, - sh_type: elf::SHT_GNU_VERNEED, - sh_flags: elf::SHF_ALLOC.into(), - sh_addr, - sh_offset: self.gnu_verneed_offset as u64, - sh_size: self.gnu_verneed_size as u64, - sh_link: self.dynstr_index.0, - sh_info: self.gnu_verneed_count.into(), - sh_addralign: self.elf_align as u64, - sh_entsize: 0, - }); - } - - /// Reserve the section index for the `.gnu.attributes` section. - pub fn reserve_gnu_attributes_section_index(&mut self) -> SectionIndex { - debug_assert!(self.gnu_attributes_str_id.is_none()); - self.gnu_attributes_str_id = Some(self.add_section_name(&b".gnu.attributes"[..])); - self.reserve_section_index() - } - - /// Reserve the range for the `.gnu.attributes` section. - pub fn reserve_gnu_attributes(&mut self, gnu_attributes_size: usize) { - debug_assert_eq!(self.gnu_attributes_offset, 0); - if gnu_attributes_size == 0 { - return; - } - self.gnu_attributes_size = gnu_attributes_size; - self.gnu_attributes_offset = self.reserve(self.gnu_attributes_size, self.elf_align); - } - - /// Write the section header for the `.gnu.attributes` section. - /// - /// This function does nothing if the section index was not reserved. - pub fn write_gnu_attributes_section_header(&mut self) { - if self.gnu_attributes_str_id.is_none() { - return; - } - self.write_section_header(&SectionHeader { - name: self.gnu_attributes_str_id, - sh_type: elf::SHT_GNU_ATTRIBUTES, - sh_flags: 0, - sh_addr: 0, - sh_offset: self.gnu_attributes_offset as u64, - sh_size: self.gnu_attributes_size as u64, - sh_link: self.dynstr_index.0, - sh_info: 0, // TODO - sh_addralign: self.elf_align as u64, - sh_entsize: 0, - }); - } - - /// Write the data for the `.gnu.attributes` section. - pub fn write_gnu_attributes(&mut self, data: &[u8]) { - if self.gnu_attributes_offset == 0 { - return; - } - util::write_align(self.buffer, self.elf_align); - debug_assert_eq!(self.gnu_attributes_offset, self.buffer.len()); - self.buffer.write_bytes(data); - } - - /// Reserve a file range for the given number of relocations. - /// - /// Returns the offset of the range. - pub fn reserve_relocations(&mut self, count: usize, is_rela: bool) -> usize { - self.reserve(count * self.rel_size(is_rela), self.elf_align) - } - - /// Write alignment padding bytes prior to a relocation section. - pub fn write_align_relocation(&mut self) { - util::write_align(self.buffer, self.elf_align); - } - - /// Write a relocation. - pub fn write_relocation(&mut self, is_rela: bool, rel: &Rel) { - let endian = self.endian; - if self.is_64 { - if is_rela { - let rel = elf::Rela64 { - r_offset: U64::new(endian, rel.r_offset), - r_info: elf::Rela64::r_info(endian, self.is_mips64el, rel.r_sym, rel.r_type), - r_addend: I64::new(endian, rel.r_addend), - }; - self.buffer.write(&rel); - } else { - let rel = elf::Rel64 { - r_offset: U64::new(endian, rel.r_offset), - r_info: elf::Rel64::r_info(endian, rel.r_sym, rel.r_type), - }; - self.buffer.write(&rel); - } - } else { - if is_rela { - let rel = elf::Rela32 { - r_offset: U32::new(endian, rel.r_offset as u32), - r_info: elf::Rel32::r_info(endian, rel.r_sym, rel.r_type as u8), - r_addend: I32::new(endian, rel.r_addend as i32), - }; - self.buffer.write(&rel); - } else { - let rel = elf::Rel32 { - r_offset: U32::new(endian, rel.r_offset as u32), - r_info: elf::Rel32::r_info(endian, rel.r_sym, rel.r_type as u8), - }; - self.buffer.write(&rel); - } - } - } - - /// Write the section header for a relocation section. - /// - /// `section` is the index of the section the relocations apply to, - /// or 0 if none. - /// - /// `symtab` is the index of the symbol table the relocations refer to, - /// or 0 if none. - /// - /// `offset` is the file offset of the relocations. - pub fn write_relocation_section_header( - &mut self, - name: StringId, - section: SectionIndex, - symtab: SectionIndex, - offset: usize, - count: usize, - is_rela: bool, - ) { - self.write_section_header(&SectionHeader { - name: Some(name), - sh_type: if is_rela { elf::SHT_RELA } else { elf::SHT_REL }, - sh_flags: elf::SHF_INFO_LINK.into(), - sh_addr: 0, - sh_offset: offset as u64, - sh_size: (count * self.rel_size(is_rela)) as u64, - sh_link: symtab.0, - sh_info: section.0, - sh_addralign: self.elf_align as u64, - sh_entsize: self.rel_size(is_rela) as u64, - }); - } - - /// Reserve a file range for a COMDAT section. - /// - /// `count` is the number of sections in the COMDAT group. - /// - /// Returns the offset of the range. - pub fn reserve_comdat(&mut self, count: usize) -> usize { - self.reserve((count + 1) * 4, 4) - } - - /// Write `GRP_COMDAT` at the start of the COMDAT section. - pub fn write_comdat_header(&mut self) { - util::write_align(self.buffer, 4); - self.buffer.write(&U32::new(self.endian, elf::GRP_COMDAT)); - } - - /// Write an entry in a COMDAT section. - pub fn write_comdat_entry(&mut self, entry: SectionIndex) { - self.buffer.write(&U32::new(self.endian, entry.0)); - } - - /// Write the section header for a COMDAT section. - pub fn write_comdat_section_header( - &mut self, - name: StringId, - symtab: SectionIndex, - symbol: SymbolIndex, - offset: usize, - count: usize, - ) { - self.write_section_header(&SectionHeader { - name: Some(name), - sh_type: elf::SHT_GROUP, - sh_flags: 0, - sh_addr: 0, - sh_offset: offset as u64, - sh_size: ((count + 1) * 4) as u64, - sh_link: symtab.0, - sh_info: symbol.0, - sh_addralign: 4, - sh_entsize: 4, - }); - } - - /// Return a helper for writing an attributes section. - pub fn attributes_writer(&self) -> AttributesWriter { - AttributesWriter::new(self.endian) - } -} - -/// A helper for writing an attributes section. -/// -/// Attributes have a variable length encoding, so it is awkward to write them in a -/// single pass. Instead, we build the entire attributes section data in memory, using -/// placeholders for unknown lengths that are filled in later. -#[allow(missing_debug_implementations)] -pub struct AttributesWriter { - endian: Endianness, - data: Vec, - subsection_offset: usize, - subsubsection_offset: usize, -} - -impl AttributesWriter { - /// Create a new `AttributesWriter` for the given endianness. - pub fn new(endian: Endianness) -> Self { - AttributesWriter { - endian, - data: vec![0x41], - subsection_offset: 0, - subsubsection_offset: 0, - } - } - - /// Start a new subsection with the given vendor name. - pub fn start_subsection(&mut self, vendor: &[u8]) { - debug_assert_eq!(self.subsection_offset, 0); - debug_assert_eq!(self.subsubsection_offset, 0); - self.subsection_offset = self.data.len(); - self.data.extend_from_slice(&[0; 4]); - self.data.extend_from_slice(vendor); - self.data.push(0); - } - - /// End the subsection. - /// - /// The subsection length is automatically calculated and written. - pub fn end_subsection(&mut self) { - debug_assert_ne!(self.subsection_offset, 0); - debug_assert_eq!(self.subsubsection_offset, 0); - let length = self.data.len() - self.subsection_offset; - self.data[self.subsection_offset..][..4] - .copy_from_slice(pod::bytes_of(&U32::new(self.endian, length as u32))); - self.subsection_offset = 0; - } - - /// Start a new sub-subsection with the given tag. - pub fn start_subsubsection(&mut self, tag: u8) { - debug_assert_ne!(self.subsection_offset, 0); - debug_assert_eq!(self.subsubsection_offset, 0); - self.subsubsection_offset = self.data.len(); - self.data.push(tag); - self.data.extend_from_slice(&[0; 4]); - } - - /// Write a section or symbol index to the sub-subsection. - /// - /// The user must also call this function to write the terminating 0 index. - pub fn write_subsubsection_index(&mut self, index: u32) { - debug_assert_ne!(self.subsection_offset, 0); - debug_assert_ne!(self.subsubsection_offset, 0); - util::write_uleb128(&mut self.data, u64::from(index)); - } - - /// Write raw index data to the sub-subsection. - /// - /// The terminating 0 index is automatically written. - pub fn write_subsubsection_indices(&mut self, indices: &[u8]) { - debug_assert_ne!(self.subsection_offset, 0); - debug_assert_ne!(self.subsubsection_offset, 0); - self.data.extend_from_slice(indices); - self.data.push(0); - } - - /// Write an attribute tag to the sub-subsection. - pub fn write_attribute_tag(&mut self, tag: u64) { - debug_assert_ne!(self.subsection_offset, 0); - debug_assert_ne!(self.subsubsection_offset, 0); - util::write_uleb128(&mut self.data, tag); - } - - /// Write an attribute integer value to the sub-subsection. - pub fn write_attribute_integer(&mut self, value: u64) { - debug_assert_ne!(self.subsection_offset, 0); - debug_assert_ne!(self.subsubsection_offset, 0); - util::write_uleb128(&mut self.data, value); - } - - /// Write an attribute string value to the sub-subsection. - /// - /// The value must not include the null terminator. - pub fn write_attribute_string(&mut self, value: &[u8]) { - debug_assert_ne!(self.subsection_offset, 0); - debug_assert_ne!(self.subsubsection_offset, 0); - self.data.extend_from_slice(value); - self.data.push(0); - } - - /// Write raw attribute data to the sub-subsection. - pub fn write_subsubsection_attributes(&mut self, attributes: &[u8]) { - debug_assert_ne!(self.subsection_offset, 0); - debug_assert_ne!(self.subsubsection_offset, 0); - self.data.extend_from_slice(attributes); - } - - /// End the sub-subsection. - /// - /// The sub-subsection length is automatically calculated and written. - pub fn end_subsubsection(&mut self) { - debug_assert_ne!(self.subsection_offset, 0); - debug_assert_ne!(self.subsubsection_offset, 0); - let length = self.data.len() - self.subsubsection_offset; - self.data[self.subsubsection_offset + 1..][..4] - .copy_from_slice(pod::bytes_of(&U32::new(self.endian, length as u32))); - self.subsubsection_offset = 0; - } - - /// Return the completed section data. - pub fn data(self) -> Vec { - debug_assert_eq!(self.subsection_offset, 0); - debug_assert_eq!(self.subsubsection_offset, 0); - self.data - } -} - -/// Native endian version of [`elf::FileHeader64`]. -#[allow(missing_docs)] -#[derive(Debug, Clone)] -pub struct FileHeader { - pub os_abi: u8, - pub abi_version: u8, - pub e_type: u16, - pub e_machine: u16, - pub e_entry: u64, - pub e_flags: u32, -} - -/// Native endian version of [`elf::ProgramHeader64`]. -#[allow(missing_docs)] -#[derive(Debug, Clone)] -pub struct ProgramHeader { - pub p_type: u32, - pub p_flags: u32, - pub p_offset: u64, - pub p_vaddr: u64, - pub p_paddr: u64, - pub p_filesz: u64, - pub p_memsz: u64, - pub p_align: u64, -} - -/// Native endian version of [`elf::SectionHeader64`]. -#[allow(missing_docs)] -#[derive(Debug, Clone)] -pub struct SectionHeader { - pub name: Option, - pub sh_type: u32, - pub sh_flags: u64, - pub sh_addr: u64, - pub sh_offset: u64, - pub sh_size: u64, - pub sh_link: u32, - pub sh_info: u32, - pub sh_addralign: u64, - pub sh_entsize: u64, -} - -/// Native endian version of [`elf::Sym64`]. -#[allow(missing_docs)] -#[derive(Debug, Clone)] -pub struct Sym { - pub name: Option, - pub section: Option, - pub st_info: u8, - pub st_other: u8, - pub st_shndx: u16, - pub st_value: u64, - pub st_size: u64, -} - -/// Unified native endian version of [`elf::Rel64`] and [`elf::Rela64`]. -#[allow(missing_docs)] -#[derive(Debug, Clone)] -pub struct Rel { - pub r_offset: u64, - pub r_sym: u32, - pub r_type: u32, - pub r_addend: i64, -} - -/// Information required for writing [`elf::Verdef`]. -#[allow(missing_docs)] -#[derive(Debug, Clone)] -pub struct Verdef { - pub version: u16, - pub flags: u16, - pub index: u16, - pub aux_count: u16, - /// The name for the first [`elf::Verdaux`] entry. - pub name: StringId, -} - -/// Information required for writing [`elf::Verneed`]. -#[allow(missing_docs)] -#[derive(Debug, Clone)] -pub struct Verneed { - pub version: u16, - pub aux_count: u16, - pub file: StringId, -} - -/// Information required for writing [`elf::Vernaux`]. -#[allow(missing_docs)] -#[derive(Debug, Clone)] -pub struct Vernaux { - pub flags: u16, - pub index: u16, - pub name: StringId, -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/write/macho.rs s390-tools-2.33.1/rust-vendor/object/src/write/macho.rs --- s390-tools-2.31.0/rust-vendor/object/src/write/macho.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/write/macho.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,978 +0,0 @@ -use core::mem; - -use crate::endian::*; -use crate::macho; -use crate::write::string::*; -use crate::write::util::*; -use crate::write::*; -use crate::AddressSize; - -#[derive(Default, Clone, Copy)] -struct SectionOffsets { - index: usize, - offset: usize, - address: u64, - reloc_offset: usize, -} - -#[derive(Default, Clone, Copy)] -struct SymbolOffsets { - emit: bool, - index: usize, - str_id: Option, -} - -/// The customizable portion of a [`macho::BuildVersionCommand`]. -#[derive(Debug, Default, Clone, Copy)] -#[non_exhaustive] // May want to add the tool list? -pub struct MachOBuildVersion { - /// One of the `PLATFORM_` constants (for example, - /// [`object::macho::PLATFORM_MACOS`](macho::PLATFORM_MACOS)). - pub platform: u32, - /// The minimum OS version, where `X.Y.Z` is encoded in nibbles as - /// `xxxx.yy.zz`. - pub minos: u32, - /// The SDK version as `X.Y.Z`, where `X.Y.Z` is encoded in nibbles as - /// `xxxx.yy.zz`. - pub sdk: u32, -} - -impl MachOBuildVersion { - fn cmdsize(&self) -> u32 { - // Same size for both endianness, and we don't have `ntools`. - let sz = mem::size_of::>(); - debug_assert!(sz <= u32::MAX as usize); - sz as u32 - } -} - -// Public methods. -impl<'a> Object<'a> { - /// Specify the Mach-O CPU subtype. - /// - /// Requires `feature = "macho"`. - #[inline] - pub fn set_macho_cpu_subtype(&mut self, cpu_subtype: u32) { - self.macho_cpu_subtype = Some(cpu_subtype); - } - - /// Specify information for a Mach-O `LC_BUILD_VERSION` command. - /// - /// Requires `feature = "macho"`. - #[inline] - pub fn set_macho_build_version(&mut self, info: MachOBuildVersion) { - self.macho_build_version = Some(info); - } -} - -// Private methods. -impl<'a> Object<'a> { - pub(crate) fn macho_set_subsections_via_symbols(&mut self) { - let flags = match self.flags { - FileFlags::MachO { flags } => flags, - _ => 0, - }; - self.flags = FileFlags::MachO { - flags: flags | macho::MH_SUBSECTIONS_VIA_SYMBOLS, - }; - } - - pub(crate) fn macho_segment_name(&self, segment: StandardSegment) -> &'static [u8] { - match segment { - StandardSegment::Text => &b"__TEXT"[..], - StandardSegment::Data => &b"__DATA"[..], - StandardSegment::Debug => &b"__DWARF"[..], - } - } - - pub(crate) fn macho_section_info( - &self, - section: StandardSection, - ) -> (&'static [u8], &'static [u8], SectionKind, SectionFlags) { - match section { - StandardSection::Text => ( - &b"__TEXT"[..], - &b"__text"[..], - SectionKind::Text, - SectionFlags::None, - ), - StandardSection::Data => ( - &b"__DATA"[..], - &b"__data"[..], - SectionKind::Data, - SectionFlags::None, - ), - StandardSection::ReadOnlyData => ( - &b"__TEXT"[..], - &b"__const"[..], - SectionKind::ReadOnlyData, - SectionFlags::None, - ), - StandardSection::ReadOnlyDataWithRel => ( - &b"__DATA"[..], - &b"__const"[..], - SectionKind::ReadOnlyDataWithRel, - SectionFlags::None, - ), - StandardSection::ReadOnlyString => ( - &b"__TEXT"[..], - &b"__cstring"[..], - SectionKind::ReadOnlyString, - SectionFlags::None, - ), - StandardSection::UninitializedData => ( - &b"__DATA"[..], - &b"__bss"[..], - SectionKind::UninitializedData, - SectionFlags::None, - ), - StandardSection::Tls => ( - &b"__DATA"[..], - &b"__thread_data"[..], - SectionKind::Tls, - SectionFlags::None, - ), - StandardSection::UninitializedTls => ( - &b"__DATA"[..], - &b"__thread_bss"[..], - SectionKind::UninitializedTls, - SectionFlags::None, - ), - StandardSection::TlsVariables => ( - &b"__DATA"[..], - &b"__thread_vars"[..], - SectionKind::TlsVariables, - SectionFlags::None, - ), - StandardSection::Common => ( - &b"__DATA"[..], - &b"__common"[..], - SectionKind::Common, - SectionFlags::None, - ), - StandardSection::GnuProperty => { - // Unsupported section. - (&[], &[], SectionKind::Note, SectionFlags::None) - } - } - } - - fn macho_tlv_bootstrap(&mut self) -> SymbolId { - match self.tlv_bootstrap { - Some(id) => id, - None => { - let id = self.add_symbol(Symbol { - name: b"_tlv_bootstrap".to_vec(), - value: 0, - size: 0, - kind: SymbolKind::Text, - scope: SymbolScope::Dynamic, - weak: false, - section: SymbolSection::Undefined, - flags: SymbolFlags::None, - }); - self.tlv_bootstrap = Some(id); - id - } - } - } - - /// Create the `__thread_vars` entry for a TLS variable. - /// - /// The symbol given by `symbol_id` will be updated to point to this entry. - /// - /// A new `SymbolId` will be returned. The caller must update this symbol - /// to point to the initializer. - /// - /// If `symbol_id` is not for a TLS variable, then it is returned unchanged. - pub(crate) fn macho_add_thread_var(&mut self, symbol_id: SymbolId) -> SymbolId { - let symbol = self.symbol_mut(symbol_id); - if symbol.kind != SymbolKind::Tls { - return symbol_id; - } - - // Create the initializer symbol. - let mut name = symbol.name.clone(); - name.extend_from_slice(b"$tlv$init"); - let init_symbol_id = self.add_raw_symbol(Symbol { - name, - value: 0, - size: 0, - kind: SymbolKind::Tls, - scope: SymbolScope::Compilation, - weak: false, - section: SymbolSection::Undefined, - flags: SymbolFlags::None, - }); - - // Add the tlv entry. - // Three pointers in size: - // - __tlv_bootstrap - used to make sure support exists - // - spare pointer - used when mapped by the runtime - // - pointer to symbol initializer - let section = self.section_id(StandardSection::TlsVariables); - let address_size = self.architecture.address_size().unwrap().bytes(); - let size = u64::from(address_size) * 3; - let data = vec![0; size as usize]; - let offset = self.append_section_data(section, &data, u64::from(address_size)); - - let tlv_bootstrap = self.macho_tlv_bootstrap(); - self.add_relocation( - section, - Relocation { - offset, - size: address_size * 8, - kind: RelocationKind::Absolute, - encoding: RelocationEncoding::Generic, - symbol: tlv_bootstrap, - addend: 0, - }, - ) - .unwrap(); - self.add_relocation( - section, - Relocation { - offset: offset + u64::from(address_size) * 2, - size: address_size * 8, - kind: RelocationKind::Absolute, - encoding: RelocationEncoding::Generic, - symbol: init_symbol_id, - addend: 0, - }, - ) - .unwrap(); - - // Update the symbol to point to the tlv. - let symbol = self.symbol_mut(symbol_id); - symbol.value = offset; - symbol.size = size; - symbol.section = SymbolSection::Section(section); - - init_symbol_id - } - - pub(crate) fn macho_fixup_relocation(&mut self, relocation: &mut Relocation) -> i64 { - let constant = match relocation.kind { - // AArch64Call relocations have special handling for the addend, so don't adjust it - RelocationKind::Relative if relocation.encoding == RelocationEncoding::AArch64Call => 0, - RelocationKind::Relative - | RelocationKind::GotRelative - | RelocationKind::PltRelative => relocation.addend + 4, - _ => relocation.addend, - }; - // Aarch64 relocs of these sizes act as if they are double-word length - if self.architecture == Architecture::Aarch64 && matches!(relocation.size, 12 | 21 | 26) { - relocation.size = 32; - } - relocation.addend -= constant; - constant - } - - pub(crate) fn macho_write(&self, buffer: &mut dyn WritableBuffer) -> Result<()> { - let address_size = self.architecture.address_size().unwrap(); - let endian = self.endian; - let macho32 = MachO32 { endian }; - let macho64 = MachO64 { endian }; - let macho: &dyn MachO = match address_size { - AddressSize::U8 | AddressSize::U16 | AddressSize::U32 => &macho32, - AddressSize::U64 => &macho64, - }; - let pointer_align = address_size.bytes() as usize; - - // Calculate offsets of everything, and build strtab. - let mut offset = 0; - - // Calculate size of Mach-O header. - offset += macho.mach_header_size(); - - // Calculate size of commands. - let mut ncmds = 0; - let command_offset = offset; - - let build_version_offset = offset; - if let Some(version) = &self.macho_build_version { - offset += version.cmdsize() as usize; - ncmds += 1; - } - - // Calculate size of segment command and section headers. - let segment_command_offset = offset; - let segment_command_len = - macho.segment_command_size() + self.sections.len() * macho.section_header_size(); - offset += segment_command_len; - ncmds += 1; - - // Calculate size of symtab command. - let symtab_command_offset = offset; - let symtab_command_len = mem::size_of::>(); - offset += symtab_command_len; - ncmds += 1; - - let sizeofcmds = offset - command_offset; - - // Calculate size of section data. - // Section data can immediately follow the load commands without any alignment padding. - let segment_file_offset = offset; - let mut section_offsets = vec![SectionOffsets::default(); self.sections.len()]; - let mut address = 0; - for (index, section) in self.sections.iter().enumerate() { - section_offsets[index].index = 1 + index; - if !section.is_bss() { - address = align_u64(address, section.align); - section_offsets[index].address = address; - section_offsets[index].offset = segment_file_offset + address as usize; - address += section.size; - } - } - let segment_file_size = address as usize; - offset += address as usize; - for (index, section) in self.sections.iter().enumerate() { - if section.is_bss() { - debug_assert!(section.data.is_empty()); - address = align_u64(address, section.align); - section_offsets[index].address = address; - address += section.size; - } - } - - // Count symbols and add symbol strings to strtab. - let mut strtab = StringTable::default(); - let mut symbol_offsets = vec![SymbolOffsets::default(); self.symbols.len()]; - let mut nsyms = 0; - for (index, symbol) in self.symbols.iter().enumerate() { - // The unified API allows creating symbols that we don't emit, so filter - // them out here. - // - // Since we don't actually emit the symbol kind, we validate it here too. - match symbol.kind { - SymbolKind::Text | SymbolKind::Data | SymbolKind::Tls | SymbolKind::Unknown => {} - SymbolKind::File | SymbolKind::Section => continue, - SymbolKind::Null | SymbolKind::Label => { - return Err(Error(format!( - "unimplemented symbol `{}` kind {:?}", - symbol.name().unwrap_or(""), - symbol.kind - ))); - } - } - symbol_offsets[index].emit = true; - symbol_offsets[index].index = nsyms; - nsyms += 1; - if !symbol.name.is_empty() { - symbol_offsets[index].str_id = Some(strtab.add(&symbol.name)); - } - } - - // Calculate size of symtab. - offset = align(offset, pointer_align); - let symtab_offset = offset; - let symtab_len = nsyms * macho.nlist_size(); - offset += symtab_len; - - // Calculate size of strtab. - let strtab_offset = offset; - // Start with null name. - let mut strtab_data = vec![0]; - strtab.write(1, &mut strtab_data); - offset += strtab_data.len(); - - // Calculate size of relocations. - for (index, section) in self.sections.iter().enumerate() { - let count = section.relocations.len(); - if count != 0 { - offset = align(offset, 4); - section_offsets[index].reloc_offset = offset; - let len = count * mem::size_of::>(); - offset += len; - } - } - - // Start writing. - buffer - .reserve(offset) - .map_err(|_| Error(String::from("Cannot allocate buffer")))?; - - // Write file header. - let (cputype, mut cpusubtype) = match self.architecture { - Architecture::Arm => (macho::CPU_TYPE_ARM, macho::CPU_SUBTYPE_ARM_ALL), - Architecture::Aarch64 => (macho::CPU_TYPE_ARM64, macho::CPU_SUBTYPE_ARM64_ALL), - Architecture::Aarch64_Ilp32 => { - (macho::CPU_TYPE_ARM64_32, macho::CPU_SUBTYPE_ARM64_32_V8) - } - Architecture::I386 => (macho::CPU_TYPE_X86, macho::CPU_SUBTYPE_I386_ALL), - Architecture::X86_64 => (macho::CPU_TYPE_X86_64, macho::CPU_SUBTYPE_X86_64_ALL), - Architecture::PowerPc => (macho::CPU_TYPE_POWERPC, macho::CPU_SUBTYPE_POWERPC_ALL), - Architecture::PowerPc64 => (macho::CPU_TYPE_POWERPC64, macho::CPU_SUBTYPE_POWERPC_ALL), - _ => { - return Err(Error(format!( - "unimplemented architecture {:?}", - self.architecture - ))); - } - }; - - if let Some(cpu_subtype) = self.macho_cpu_subtype { - cpusubtype = cpu_subtype; - } - - let flags = match self.flags { - FileFlags::MachO { flags } => flags, - _ => 0, - }; - macho.write_mach_header( - buffer, - MachHeader { - cputype, - cpusubtype, - filetype: macho::MH_OBJECT, - ncmds, - sizeofcmds: sizeofcmds as u32, - flags, - }, - ); - - if let Some(version) = &self.macho_build_version { - debug_assert_eq!(build_version_offset, buffer.len()); - buffer.write(&macho::BuildVersionCommand { - cmd: U32::new(endian, macho::LC_BUILD_VERSION), - cmdsize: U32::new(endian, version.cmdsize()), - platform: U32::new(endian, version.platform), - minos: U32::new(endian, version.minos), - sdk: U32::new(endian, version.sdk), - ntools: U32::new(endian, 0), - }); - } - - // Write segment command. - debug_assert_eq!(segment_command_offset, buffer.len()); - macho.write_segment_command( - buffer, - SegmentCommand { - cmdsize: segment_command_len as u32, - segname: [0; 16], - vmaddr: 0, - vmsize: address, - fileoff: segment_file_offset as u64, - filesize: segment_file_size as u64, - maxprot: macho::VM_PROT_READ | macho::VM_PROT_WRITE | macho::VM_PROT_EXECUTE, - initprot: macho::VM_PROT_READ | macho::VM_PROT_WRITE | macho::VM_PROT_EXECUTE, - nsects: self.sections.len() as u32, - flags: 0, - }, - ); - - // Write section headers. - for (index, section) in self.sections.iter().enumerate() { - let mut sectname = [0; 16]; - sectname - .get_mut(..section.name.len()) - .ok_or_else(|| { - Error(format!( - "section name `{}` is too long", - section.name().unwrap_or(""), - )) - })? - .copy_from_slice(§ion.name); - let mut segname = [0; 16]; - segname - .get_mut(..section.segment.len()) - .ok_or_else(|| { - Error(format!( - "segment name `{}` is too long", - section.segment().unwrap_or(""), - )) - })? - .copy_from_slice(§ion.segment); - let flags = if let SectionFlags::MachO { flags } = section.flags { - flags - } else { - match section.kind { - SectionKind::Text => { - macho::S_ATTR_PURE_INSTRUCTIONS | macho::S_ATTR_SOME_INSTRUCTIONS - } - SectionKind::Data => 0, - SectionKind::ReadOnlyData | SectionKind::ReadOnlyDataWithRel => 0, - SectionKind::ReadOnlyString => macho::S_CSTRING_LITERALS, - SectionKind::UninitializedData | SectionKind::Common => macho::S_ZEROFILL, - SectionKind::Tls => macho::S_THREAD_LOCAL_REGULAR, - SectionKind::UninitializedTls => macho::S_THREAD_LOCAL_ZEROFILL, - SectionKind::TlsVariables => macho::S_THREAD_LOCAL_VARIABLES, - SectionKind::Debug => macho::S_ATTR_DEBUG, - SectionKind::OtherString => macho::S_CSTRING_LITERALS, - SectionKind::Other | SectionKind::Linker | SectionKind::Metadata => 0, - SectionKind::Note | SectionKind::Unknown | SectionKind::Elf(_) => { - return Err(Error(format!( - "unimplemented section `{}` kind {:?}", - section.name().unwrap_or(""), - section.kind - ))); - } - } - }; - macho.write_section( - buffer, - SectionHeader { - sectname, - segname, - addr: section_offsets[index].address, - size: section.size, - offset: section_offsets[index].offset as u32, - align: section.align.trailing_zeros(), - reloff: section_offsets[index].reloc_offset as u32, - nreloc: section.relocations.len() as u32, - flags, - }, - ); - } - - // Write symtab command. - debug_assert_eq!(symtab_command_offset, buffer.len()); - let symtab_command = macho::SymtabCommand { - cmd: U32::new(endian, macho::LC_SYMTAB), - cmdsize: U32::new(endian, symtab_command_len as u32), - symoff: U32::new(endian, symtab_offset as u32), - nsyms: U32::new(endian, nsyms as u32), - stroff: U32::new(endian, strtab_offset as u32), - strsize: U32::new(endian, strtab_data.len() as u32), - }; - buffer.write(&symtab_command); - - // Write section data. - for (index, section) in self.sections.iter().enumerate() { - if !section.is_bss() { - buffer.resize(section_offsets[index].offset); - buffer.write_bytes(§ion.data); - } - } - debug_assert_eq!(segment_file_offset + segment_file_size, buffer.len()); - - // Write symtab. - write_align(buffer, pointer_align); - debug_assert_eq!(symtab_offset, buffer.len()); - for (index, symbol) in self.symbols.iter().enumerate() { - if !symbol_offsets[index].emit { - continue; - } - // TODO: N_STAB - let (mut n_type, n_sect) = match symbol.section { - SymbolSection::Undefined => (macho::N_UNDF | macho::N_EXT, 0), - SymbolSection::Absolute => (macho::N_ABS, 0), - SymbolSection::Section(id) => (macho::N_SECT, id.0 + 1), - SymbolSection::None | SymbolSection::Common => { - return Err(Error(format!( - "unimplemented symbol `{}` section {:?}", - symbol.name().unwrap_or(""), - symbol.section - ))); - } - }; - match symbol.scope { - SymbolScope::Unknown | SymbolScope::Compilation => {} - SymbolScope::Linkage => { - n_type |= macho::N_EXT | macho::N_PEXT; - } - SymbolScope::Dynamic => { - n_type |= macho::N_EXT; - } - } - - let n_desc = if let SymbolFlags::MachO { n_desc } = symbol.flags { - n_desc - } else { - let mut n_desc = 0; - if symbol.weak { - if symbol.is_undefined() { - n_desc |= macho::N_WEAK_REF; - } else { - n_desc |= macho::N_WEAK_DEF; - } - } - n_desc - }; - - let n_value = match symbol.section.id() { - Some(section) => section_offsets[section.0].address + symbol.value, - None => symbol.value, - }; - - let n_strx = symbol_offsets[index] - .str_id - .map(|id| strtab.get_offset(id)) - .unwrap_or(0); - - macho.write_nlist( - buffer, - Nlist { - n_strx: n_strx as u32, - n_type, - n_sect: n_sect as u8, - n_desc, - n_value, - }, - ); - } - - // Write strtab. - debug_assert_eq!(strtab_offset, buffer.len()); - buffer.write_bytes(&strtab_data); - - // Write relocations. - for (index, section) in self.sections.iter().enumerate() { - if !section.relocations.is_empty() { - write_align(buffer, 4); - debug_assert_eq!(section_offsets[index].reloc_offset, buffer.len()); - for reloc in §ion.relocations { - let r_extern; - let mut r_symbolnum; - let symbol = &self.symbols[reloc.symbol.0]; - if symbol.kind == SymbolKind::Section { - r_symbolnum = section_offsets[symbol.section.id().unwrap().0].index as u32; - r_extern = false; - } else { - r_symbolnum = symbol_offsets[reloc.symbol.0].index as u32; - r_extern = true; - } - let r_length = match reloc.size { - 8 => 0, - 16 => 1, - 32 => 2, - 64 => 3, - _ => return Err(Error(format!("unimplemented reloc size {:?}", reloc))), - }; - let (r_pcrel, r_type) = match self.architecture { - Architecture::I386 => match reloc.kind { - RelocationKind::Absolute => (false, macho::GENERIC_RELOC_VANILLA), - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::X86_64 => match (reloc.kind, reloc.encoding, reloc.addend) { - (RelocationKind::Absolute, RelocationEncoding::Generic, 0) => { - (false, macho::X86_64_RELOC_UNSIGNED) - } - (RelocationKind::Relative, RelocationEncoding::Generic, -4) => { - (true, macho::X86_64_RELOC_SIGNED) - } - (RelocationKind::Relative, RelocationEncoding::X86RipRelative, -4) => { - (true, macho::X86_64_RELOC_SIGNED) - } - (RelocationKind::Relative, RelocationEncoding::X86Branch, -4) => { - (true, macho::X86_64_RELOC_BRANCH) - } - (RelocationKind::PltRelative, RelocationEncoding::X86Branch, -4) => { - (true, macho::X86_64_RELOC_BRANCH) - } - (RelocationKind::GotRelative, RelocationEncoding::Generic, -4) => { - (true, macho::X86_64_RELOC_GOT) - } - ( - RelocationKind::GotRelative, - RelocationEncoding::X86RipRelativeMovq, - -4, - ) => (true, macho::X86_64_RELOC_GOT_LOAD), - (RelocationKind::MachO { value, relative }, _, _) => (relative, value), - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }, - Architecture::Aarch64 | Architecture::Aarch64_Ilp32 => { - match (reloc.kind, reloc.encoding, reloc.addend) { - (RelocationKind::Absolute, RelocationEncoding::Generic, 0) => { - (false, macho::ARM64_RELOC_UNSIGNED) - } - (RelocationKind::Relative, RelocationEncoding::AArch64Call, 0) => { - (true, macho::ARM64_RELOC_BRANCH26) - } - // Non-zero addend, so we have to encode the addend separately - ( - RelocationKind::Relative, - RelocationEncoding::AArch64Call, - value, - ) => { - // first emit the BR26 relocation - let reloc_info = macho::RelocationInfo { - r_address: reloc.offset as u32, - r_symbolnum, - r_pcrel: true, - r_length, - r_extern: true, - r_type: macho::ARM64_RELOC_BRANCH26, - }; - buffer.write(&reloc_info.relocation(endian)); - - // set up a separate relocation for the addend - r_symbolnum = value as u32; - (false, macho::ARM64_RELOC_ADDEND) - } - ( - RelocationKind::MachO { value, relative }, - RelocationEncoding::Generic, - 0, - ) => (relative, value), - _ => { - return Err(Error(format!( - "unimplemented relocation {:?}", - reloc - ))); - } - } - } - _ => { - if let RelocationKind::MachO { value, relative } = reloc.kind { - (relative, value) - } else { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - } - }; - let reloc_info = macho::RelocationInfo { - r_address: reloc.offset as u32, - r_symbolnum, - r_pcrel, - r_length, - r_extern, - r_type, - }; - buffer.write(&reloc_info.relocation(endian)); - } - } - } - - debug_assert_eq!(offset, buffer.len()); - - Ok(()) - } -} - -struct MachHeader { - cputype: u32, - cpusubtype: u32, - filetype: u32, - ncmds: u32, - sizeofcmds: u32, - flags: u32, -} - -struct SegmentCommand { - cmdsize: u32, - segname: [u8; 16], - vmaddr: u64, - vmsize: u64, - fileoff: u64, - filesize: u64, - maxprot: u32, - initprot: u32, - nsects: u32, - flags: u32, -} - -pub struct SectionHeader { - sectname: [u8; 16], - segname: [u8; 16], - addr: u64, - size: u64, - offset: u32, - align: u32, - reloff: u32, - nreloc: u32, - flags: u32, -} - -struct Nlist { - n_strx: u32, - n_type: u8, - n_sect: u8, - n_desc: u16, - n_value: u64, -} - -trait MachO { - fn mach_header_size(&self) -> usize; - fn segment_command_size(&self) -> usize; - fn section_header_size(&self) -> usize; - fn nlist_size(&self) -> usize; - fn write_mach_header(&self, buffer: &mut dyn WritableBuffer, section: MachHeader); - fn write_segment_command(&self, buffer: &mut dyn WritableBuffer, segment: SegmentCommand); - fn write_section(&self, buffer: &mut dyn WritableBuffer, section: SectionHeader); - fn write_nlist(&self, buffer: &mut dyn WritableBuffer, nlist: Nlist); -} - -struct MachO32 { - endian: E, -} - -impl MachO for MachO32 { - fn mach_header_size(&self) -> usize { - mem::size_of::>() - } - - fn segment_command_size(&self) -> usize { - mem::size_of::>() - } - - fn section_header_size(&self) -> usize { - mem::size_of::>() - } - - fn nlist_size(&self) -> usize { - mem::size_of::>() - } - - fn write_mach_header(&self, buffer: &mut dyn WritableBuffer, header: MachHeader) { - let endian = self.endian; - let magic = if endian.is_big_endian() { - macho::MH_MAGIC - } else { - macho::MH_CIGAM - }; - let header = macho::MachHeader32 { - magic: U32::new(BigEndian, magic), - cputype: U32::new(endian, header.cputype), - cpusubtype: U32::new(endian, header.cpusubtype), - filetype: U32::new(endian, header.filetype), - ncmds: U32::new(endian, header.ncmds), - sizeofcmds: U32::new(endian, header.sizeofcmds), - flags: U32::new(endian, header.flags), - }; - buffer.write(&header); - } - - fn write_segment_command(&self, buffer: &mut dyn WritableBuffer, segment: SegmentCommand) { - let endian = self.endian; - let segment = macho::SegmentCommand32 { - cmd: U32::new(endian, macho::LC_SEGMENT), - cmdsize: U32::new(endian, segment.cmdsize), - segname: segment.segname, - vmaddr: U32::new(endian, segment.vmaddr as u32), - vmsize: U32::new(endian, segment.vmsize as u32), - fileoff: U32::new(endian, segment.fileoff as u32), - filesize: U32::new(endian, segment.filesize as u32), - maxprot: U32::new(endian, segment.maxprot), - initprot: U32::new(endian, segment.initprot), - nsects: U32::new(endian, segment.nsects), - flags: U32::new(endian, segment.flags), - }; - buffer.write(&segment); - } - - fn write_section(&self, buffer: &mut dyn WritableBuffer, section: SectionHeader) { - let endian = self.endian; - let section = macho::Section32 { - sectname: section.sectname, - segname: section.segname, - addr: U32::new(endian, section.addr as u32), - size: U32::new(endian, section.size as u32), - offset: U32::new(endian, section.offset), - align: U32::new(endian, section.align), - reloff: U32::new(endian, section.reloff), - nreloc: U32::new(endian, section.nreloc), - flags: U32::new(endian, section.flags), - reserved1: U32::default(), - reserved2: U32::default(), - }; - buffer.write(§ion); - } - - fn write_nlist(&self, buffer: &mut dyn WritableBuffer, nlist: Nlist) { - let endian = self.endian; - let nlist = macho::Nlist32 { - n_strx: U32::new(endian, nlist.n_strx), - n_type: nlist.n_type, - n_sect: nlist.n_sect, - n_desc: U16::new(endian, nlist.n_desc), - n_value: U32::new(endian, nlist.n_value as u32), - }; - buffer.write(&nlist); - } -} - -struct MachO64 { - endian: E, -} - -impl MachO for MachO64 { - fn mach_header_size(&self) -> usize { - mem::size_of::>() - } - - fn segment_command_size(&self) -> usize { - mem::size_of::>() - } - - fn section_header_size(&self) -> usize { - mem::size_of::>() - } - - fn nlist_size(&self) -> usize { - mem::size_of::>() - } - - fn write_mach_header(&self, buffer: &mut dyn WritableBuffer, header: MachHeader) { - let endian = self.endian; - let magic = if endian.is_big_endian() { - macho::MH_MAGIC_64 - } else { - macho::MH_CIGAM_64 - }; - let header = macho::MachHeader64 { - magic: U32::new(BigEndian, magic), - cputype: U32::new(endian, header.cputype), - cpusubtype: U32::new(endian, header.cpusubtype), - filetype: U32::new(endian, header.filetype), - ncmds: U32::new(endian, header.ncmds), - sizeofcmds: U32::new(endian, header.sizeofcmds), - flags: U32::new(endian, header.flags), - reserved: U32::default(), - }; - buffer.write(&header); - } - - fn write_segment_command(&self, buffer: &mut dyn WritableBuffer, segment: SegmentCommand) { - let endian = self.endian; - let segment = macho::SegmentCommand64 { - cmd: U32::new(endian, macho::LC_SEGMENT_64), - cmdsize: U32::new(endian, segment.cmdsize), - segname: segment.segname, - vmaddr: U64::new(endian, segment.vmaddr), - vmsize: U64::new(endian, segment.vmsize), - fileoff: U64::new(endian, segment.fileoff), - filesize: U64::new(endian, segment.filesize), - maxprot: U32::new(endian, segment.maxprot), - initprot: U32::new(endian, segment.initprot), - nsects: U32::new(endian, segment.nsects), - flags: U32::new(endian, segment.flags), - }; - buffer.write(&segment); - } - - fn write_section(&self, buffer: &mut dyn WritableBuffer, section: SectionHeader) { - let endian = self.endian; - let section = macho::Section64 { - sectname: section.sectname, - segname: section.segname, - addr: U64::new(endian, section.addr), - size: U64::new(endian, section.size), - offset: U32::new(endian, section.offset), - align: U32::new(endian, section.align), - reloff: U32::new(endian, section.reloff), - nreloc: U32::new(endian, section.nreloc), - flags: U32::new(endian, section.flags), - reserved1: U32::default(), - reserved2: U32::default(), - reserved3: U32::default(), - }; - buffer.write(§ion); - } - - fn write_nlist(&self, buffer: &mut dyn WritableBuffer, nlist: Nlist) { - let endian = self.endian; - let nlist = macho::Nlist64 { - n_strx: U32::new(endian, nlist.n_strx), - n_type: nlist.n_type, - n_sect: nlist.n_sect, - n_desc: U16::new(endian, nlist.n_desc), - n_value: U64Bytes::new(endian, nlist.n_value), - }; - buffer.write(&nlist); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/write/mod.rs s390-tools-2.33.1/rust-vendor/object/src/write/mod.rs --- s390-tools-2.31.0/rust-vendor/object/src/write/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/write/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,948 +0,0 @@ -//! Interface for writing object files. - -use alloc::borrow::Cow; -use alloc::string::String; -use alloc::vec::Vec; -use core::{fmt, result, str}; -#[cfg(not(feature = "std"))] -use hashbrown::HashMap; -#[cfg(feature = "std")] -use std::{boxed::Box, collections::HashMap, error, io}; - -use crate::endian::{Endianness, U32, U64}; -use crate::{ - Architecture, BinaryFormat, ComdatKind, FileFlags, RelocationEncoding, RelocationKind, - SectionFlags, SectionKind, SymbolFlags, SymbolKind, SymbolScope, -}; - -#[cfg(feature = "coff")] -mod coff; -#[cfg(feature = "coff")] -pub use coff::CoffExportStyle; - -#[cfg(feature = "elf")] -pub mod elf; - -#[cfg(feature = "macho")] -mod macho; -#[cfg(feature = "macho")] -pub use macho::MachOBuildVersion; - -#[cfg(feature = "pe")] -pub mod pe; - -#[cfg(feature = "xcoff")] -mod xcoff; - -mod string; -pub use string::StringId; - -mod util; -pub use util::*; - -/// The error type used within the write module. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Error(String); - -impl fmt::Display for Error { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(&self.0) - } -} - -#[cfg(feature = "std")] -impl error::Error for Error {} - -/// The result type used within the write module. -pub type Result = result::Result; - -/// A writable relocatable object file. -#[derive(Debug)] -pub struct Object<'a> { - format: BinaryFormat, - architecture: Architecture, - endian: Endianness, - sections: Vec>, - standard_sections: HashMap, - symbols: Vec, - symbol_map: HashMap, SymbolId>, - stub_symbols: HashMap, - comdats: Vec, - /// File flags that are specific to each file format. - pub flags: FileFlags, - /// The symbol name mangling scheme. - pub mangling: Mangling, - /// Mach-O "_tlv_bootstrap" symbol. - tlv_bootstrap: Option, - /// Mach-O CPU subtype. - #[cfg(feature = "macho")] - macho_cpu_subtype: Option, - #[cfg(feature = "macho")] - macho_build_version: Option, -} - -impl<'a> Object<'a> { - /// Create an empty object file. - pub fn new(format: BinaryFormat, architecture: Architecture, endian: Endianness) -> Object<'a> { - Object { - format, - architecture, - endian, - sections: Vec::new(), - standard_sections: HashMap::new(), - symbols: Vec::new(), - symbol_map: HashMap::new(), - stub_symbols: HashMap::new(), - comdats: Vec::new(), - flags: FileFlags::None, - mangling: Mangling::default(format, architecture), - tlv_bootstrap: None, - #[cfg(feature = "macho")] - macho_cpu_subtype: None, - #[cfg(feature = "macho")] - macho_build_version: None, - } - } - - /// Return the file format. - #[inline] - pub fn format(&self) -> BinaryFormat { - self.format - } - - /// Return the architecture. - #[inline] - pub fn architecture(&self) -> Architecture { - self.architecture - } - - /// Return the current mangling setting. - #[inline] - pub fn mangling(&self) -> Mangling { - self.mangling - } - - /// Specify the mangling setting. - #[inline] - pub fn set_mangling(&mut self, mangling: Mangling) { - self.mangling = mangling; - } - - /// Return the name for a standard segment. - /// - /// This will vary based on the file format. - #[allow(unused_variables)] - pub fn segment_name(&self, segment: StandardSegment) -> &'static [u8] { - match self.format { - #[cfg(feature = "coff")] - BinaryFormat::Coff => &[], - #[cfg(feature = "elf")] - BinaryFormat::Elf => &[], - #[cfg(feature = "macho")] - BinaryFormat::MachO => self.macho_segment_name(segment), - _ => unimplemented!(), - } - } - - /// Get the section with the given `SectionId`. - #[inline] - pub fn section(&self, section: SectionId) -> &Section<'a> { - &self.sections[section.0] - } - - /// Mutably get the section with the given `SectionId`. - #[inline] - pub fn section_mut(&mut self, section: SectionId) -> &mut Section<'a> { - &mut self.sections[section.0] - } - - /// Set the data for an existing section. - /// - /// Must not be called for sections that already have data, or that contain uninitialized data. - pub fn set_section_data(&mut self, section: SectionId, data: T, align: u64) - where - T: Into>, - { - self.sections[section.0].set_data(data, align) - } - - /// Append data to an existing section. Returns the section offset of the data. - pub fn append_section_data(&mut self, section: SectionId, data: &[u8], align: u64) -> u64 { - self.sections[section.0].append_data(data, align) - } - - /// Append zero-initialized data to an existing section. Returns the section offset of the data. - pub fn append_section_bss(&mut self, section: SectionId, size: u64, align: u64) -> u64 { - self.sections[section.0].append_bss(size, align) - } - - /// Return the `SectionId` of a standard section. - /// - /// If the section doesn't already exist then it is created. - pub fn section_id(&mut self, section: StandardSection) -> SectionId { - self.standard_sections - .get(§ion) - .cloned() - .unwrap_or_else(|| { - let (segment, name, kind, flags) = self.section_info(section); - let id = self.add_section(segment.to_vec(), name.to_vec(), kind); - self.section_mut(id).flags = flags; - id - }) - } - - /// Add a new section and return its `SectionId`. - /// - /// This also creates a section symbol. - pub fn add_section(&mut self, segment: Vec, name: Vec, kind: SectionKind) -> SectionId { - let id = SectionId(self.sections.len()); - self.sections.push(Section { - segment, - name, - kind, - size: 0, - align: 1, - data: Cow::Borrowed(&[]), - relocations: Vec::new(), - symbol: None, - flags: SectionFlags::None, - }); - - // Add to self.standard_sections if required. This may match multiple standard sections. - let section = &self.sections[id.0]; - for standard_section in StandardSection::all() { - if !self.standard_sections.contains_key(standard_section) { - let (segment, name, kind, _flags) = self.section_info(*standard_section); - if segment == &*section.segment && name == &*section.name && kind == section.kind { - self.standard_sections.insert(*standard_section, id); - } - } - } - - id - } - - fn section_info( - &self, - section: StandardSection, - ) -> (&'static [u8], &'static [u8], SectionKind, SectionFlags) { - match self.format { - #[cfg(feature = "coff")] - BinaryFormat::Coff => self.coff_section_info(section), - #[cfg(feature = "elf")] - BinaryFormat::Elf => self.elf_section_info(section), - #[cfg(feature = "macho")] - BinaryFormat::MachO => self.macho_section_info(section), - #[cfg(feature = "xcoff")] - BinaryFormat::Xcoff => self.xcoff_section_info(section), - _ => unimplemented!(), - } - } - - /// Add a subsection. Returns the `SectionId` and section offset of the data. - pub fn add_subsection( - &mut self, - section: StandardSection, - name: &[u8], - data: &[u8], - align: u64, - ) -> (SectionId, u64) { - let section_id = if self.has_subsections_via_symbols() { - self.set_subsections_via_symbols(); - self.section_id(section) - } else { - let (segment, name, kind, flags) = self.subsection_info(section, name); - let id = self.add_section(segment.to_vec(), name, kind); - self.section_mut(id).flags = flags; - id - }; - let offset = self.append_section_data(section_id, data, align); - (section_id, offset) - } - - fn has_subsections_via_symbols(&self) -> bool { - match self.format { - BinaryFormat::Coff | BinaryFormat::Elf | BinaryFormat::Xcoff => false, - BinaryFormat::MachO => true, - _ => unimplemented!(), - } - } - - fn set_subsections_via_symbols(&mut self) { - match self.format { - #[cfg(feature = "macho")] - BinaryFormat::MachO => self.macho_set_subsections_via_symbols(), - _ => unimplemented!(), - } - } - - fn subsection_info( - &self, - section: StandardSection, - value: &[u8], - ) -> (&'static [u8], Vec, SectionKind, SectionFlags) { - let (segment, section, kind, flags) = self.section_info(section); - let name = self.subsection_name(section, value); - (segment, name, kind, flags) - } - - #[allow(unused_variables)] - fn subsection_name(&self, section: &[u8], value: &[u8]) -> Vec { - debug_assert!(!self.has_subsections_via_symbols()); - match self.format { - #[cfg(feature = "coff")] - BinaryFormat::Coff => self.coff_subsection_name(section, value), - #[cfg(feature = "elf")] - BinaryFormat::Elf => self.elf_subsection_name(section, value), - _ => unimplemented!(), - } - } - - /// Get the COMDAT section group with the given `ComdatId`. - #[inline] - pub fn comdat(&self, comdat: ComdatId) -> &Comdat { - &self.comdats[comdat.0] - } - - /// Mutably get the COMDAT section group with the given `ComdatId`. - #[inline] - pub fn comdat_mut(&mut self, comdat: ComdatId) -> &mut Comdat { - &mut self.comdats[comdat.0] - } - - /// Add a new COMDAT section group and return its `ComdatId`. - pub fn add_comdat(&mut self, comdat: Comdat) -> ComdatId { - let comdat_id = ComdatId(self.comdats.len()); - self.comdats.push(comdat); - comdat_id - } - - /// Get the `SymbolId` of the symbol with the given name. - pub fn symbol_id(&self, name: &[u8]) -> Option { - self.symbol_map.get(name).cloned() - } - - /// Get the symbol with the given `SymbolId`. - #[inline] - pub fn symbol(&self, symbol: SymbolId) -> &Symbol { - &self.symbols[symbol.0] - } - - /// Mutably get the symbol with the given `SymbolId`. - #[inline] - pub fn symbol_mut(&mut self, symbol: SymbolId) -> &mut Symbol { - &mut self.symbols[symbol.0] - } - - /// Add a new symbol and return its `SymbolId`. - pub fn add_symbol(&mut self, mut symbol: Symbol) -> SymbolId { - // Defined symbols must have a scope. - debug_assert!(symbol.is_undefined() || symbol.scope != SymbolScope::Unknown); - if symbol.kind == SymbolKind::Section { - // There can only be one section symbol, but update its flags, since - // the automatically generated section symbol will have none. - let symbol_id = self.section_symbol(symbol.section.id().unwrap()); - if symbol.flags != SymbolFlags::None { - self.symbol_mut(symbol_id).flags = symbol.flags; - } - return symbol_id; - } - if !symbol.name.is_empty() - && (symbol.kind == SymbolKind::Text - || symbol.kind == SymbolKind::Data - || symbol.kind == SymbolKind::Tls) - { - let unmangled_name = symbol.name.clone(); - if let Some(prefix) = self.mangling.global_prefix() { - symbol.name.insert(0, prefix); - } - let symbol_id = self.add_raw_symbol(symbol); - self.symbol_map.insert(unmangled_name, symbol_id); - symbol_id - } else { - self.add_raw_symbol(symbol) - } - } - - fn add_raw_symbol(&mut self, symbol: Symbol) -> SymbolId { - let symbol_id = SymbolId(self.symbols.len()); - self.symbols.push(symbol); - symbol_id - } - - /// Return true if the file format supports `StandardSection::UninitializedTls`. - #[inline] - pub fn has_uninitialized_tls(&self) -> bool { - self.format != BinaryFormat::Coff - } - - /// Return true if the file format supports `StandardSection::Common`. - #[inline] - pub fn has_common(&self) -> bool { - self.format == BinaryFormat::MachO - } - - /// Add a new common symbol and return its `SymbolId`. - /// - /// For Mach-O, this appends the symbol to the `__common` section. - pub fn add_common_symbol(&mut self, mut symbol: Symbol, size: u64, align: u64) -> SymbolId { - if self.has_common() { - let symbol_id = self.add_symbol(symbol); - let section = self.section_id(StandardSection::Common); - self.add_symbol_bss(symbol_id, section, size, align); - symbol_id - } else { - symbol.section = SymbolSection::Common; - symbol.size = size; - self.add_symbol(symbol) - } - } - - /// Add a new file symbol and return its `SymbolId`. - pub fn add_file_symbol(&mut self, name: Vec) -> SymbolId { - self.add_raw_symbol(Symbol { - name, - value: 0, - size: 0, - kind: SymbolKind::File, - scope: SymbolScope::Compilation, - weak: false, - section: SymbolSection::None, - flags: SymbolFlags::None, - }) - } - - /// Get the symbol for a section. - pub fn section_symbol(&mut self, section_id: SectionId) -> SymbolId { - let section = &mut self.sections[section_id.0]; - if let Some(symbol) = section.symbol { - return symbol; - } - let name = if self.format == BinaryFormat::Coff { - section.name.clone() - } else { - Vec::new() - }; - let symbol_id = SymbolId(self.symbols.len()); - self.symbols.push(Symbol { - name, - value: 0, - size: 0, - kind: SymbolKind::Section, - scope: SymbolScope::Compilation, - weak: false, - section: SymbolSection::Section(section_id), - flags: SymbolFlags::None, - }); - section.symbol = Some(symbol_id); - symbol_id - } - - /// Append data to an existing section, and update a symbol to refer to it. - /// - /// For Mach-O, this also creates a `__thread_vars` entry for TLS symbols, and the - /// symbol will indirectly point to the added data via the `__thread_vars` entry. - /// - /// Returns the section offset of the data. - pub fn add_symbol_data( - &mut self, - symbol_id: SymbolId, - section: SectionId, - data: &[u8], - align: u64, - ) -> u64 { - let offset = self.append_section_data(section, data, align); - self.set_symbol_data(symbol_id, section, offset, data.len() as u64); - offset - } - - /// Append zero-initialized data to an existing section, and update a symbol to refer to it. - /// - /// For Mach-O, this also creates a `__thread_vars` entry for TLS symbols, and the - /// symbol will indirectly point to the added data via the `__thread_vars` entry. - /// - /// Returns the section offset of the data. - pub fn add_symbol_bss( - &mut self, - symbol_id: SymbolId, - section: SectionId, - size: u64, - align: u64, - ) -> u64 { - let offset = self.append_section_bss(section, size, align); - self.set_symbol_data(symbol_id, section, offset, size); - offset - } - - /// Update a symbol to refer to the given data within a section. - /// - /// For Mach-O, this also creates a `__thread_vars` entry for TLS symbols, and the - /// symbol will indirectly point to the data via the `__thread_vars` entry. - #[allow(unused_mut)] - pub fn set_symbol_data( - &mut self, - mut symbol_id: SymbolId, - section: SectionId, - offset: u64, - size: u64, - ) { - // Defined symbols must have a scope. - debug_assert!(self.symbol(symbol_id).scope != SymbolScope::Unknown); - match self.format { - #[cfg(feature = "macho")] - BinaryFormat::MachO => symbol_id = self.macho_add_thread_var(symbol_id), - _ => {} - } - let symbol = self.symbol_mut(symbol_id); - symbol.value = offset; - symbol.size = size; - symbol.section = SymbolSection::Section(section); - } - - /// Convert a symbol to a section symbol and offset. - /// - /// Returns `None` if the symbol does not have a section. - pub fn symbol_section_and_offset(&mut self, symbol_id: SymbolId) -> Option<(SymbolId, u64)> { - let symbol = self.symbol(symbol_id); - if symbol.kind == SymbolKind::Section { - return Some((symbol_id, 0)); - } - let symbol_offset = symbol.value; - let section = symbol.section.id()?; - let section_symbol = self.section_symbol(section); - Some((section_symbol, symbol_offset)) - } - - /// Add a relocation to a section. - /// - /// Relocations must only be added after the referenced symbols have been added - /// and defined (if applicable). - pub fn add_relocation(&mut self, section: SectionId, mut relocation: Relocation) -> Result<()> { - let addend = match self.format { - #[cfg(feature = "coff")] - BinaryFormat::Coff => self.coff_fixup_relocation(&mut relocation), - #[cfg(feature = "elf")] - BinaryFormat::Elf => self.elf_fixup_relocation(&mut relocation)?, - #[cfg(feature = "macho")] - BinaryFormat::MachO => self.macho_fixup_relocation(&mut relocation), - #[cfg(feature = "xcoff")] - BinaryFormat::Xcoff => self.xcoff_fixup_relocation(&mut relocation), - _ => unimplemented!(), - }; - if addend != 0 { - self.write_relocation_addend(section, &relocation, addend)?; - } - self.sections[section.0].relocations.push(relocation); - Ok(()) - } - - fn write_relocation_addend( - &mut self, - section: SectionId, - relocation: &Relocation, - addend: i64, - ) -> Result<()> { - let data = self.sections[section.0].data_mut(); - let offset = relocation.offset as usize; - match relocation.size { - 32 => data.write_at(offset, &U32::new(self.endian, addend as u32)), - 64 => data.write_at(offset, &U64::new(self.endian, addend as u64)), - _ => { - return Err(Error(format!( - "unimplemented relocation addend {:?}", - relocation - ))); - } - } - .map_err(|_| { - Error(format!( - "invalid relocation offset {}+{} (max {})", - relocation.offset, - relocation.size, - data.len() - )) - }) - } - - /// Write the object to a `Vec`. - pub fn write(&self) -> Result> { - let mut buffer = Vec::new(); - self.emit(&mut buffer)?; - Ok(buffer) - } - - /// Write the object to a `Write` implementation. - /// - /// Also flushes the writer. - /// - /// It is advisable to use a buffered writer like [`BufWriter`](std::io::BufWriter) - /// instead of an unbuffered writer like [`File`](std::fs::File). - #[cfg(feature = "std")] - pub fn write_stream(&self, w: W) -> result::Result<(), Box> { - let mut stream = StreamingBuffer::new(w); - self.emit(&mut stream)?; - stream.result()?; - stream.into_inner().flush()?; - Ok(()) - } - - /// Write the object to a `WritableBuffer`. - pub fn emit(&self, buffer: &mut dyn WritableBuffer) -> Result<()> { - match self.format { - #[cfg(feature = "coff")] - BinaryFormat::Coff => self.coff_write(buffer), - #[cfg(feature = "elf")] - BinaryFormat::Elf => self.elf_write(buffer), - #[cfg(feature = "macho")] - BinaryFormat::MachO => self.macho_write(buffer), - #[cfg(feature = "xcoff")] - BinaryFormat::Xcoff => self.xcoff_write(buffer), - _ => unimplemented!(), - } - } -} - -/// A standard segment kind. -#[allow(missing_docs)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[non_exhaustive] -pub enum StandardSegment { - Text, - Data, - Debug, -} - -/// A standard section kind. -#[allow(missing_docs)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[non_exhaustive] -pub enum StandardSection { - Text, - Data, - ReadOnlyData, - ReadOnlyDataWithRel, - ReadOnlyString, - UninitializedData, - Tls, - /// Zero-fill TLS initializers. Unsupported for COFF. - UninitializedTls, - /// TLS variable structures. Only supported for Mach-O. - TlsVariables, - /// Common data. Only supported for Mach-O. - Common, - /// Notes for GNU properties. Only supported for ELF. - GnuProperty, -} - -impl StandardSection { - /// Return the section kind of a standard section. - pub fn kind(self) -> SectionKind { - match self { - StandardSection::Text => SectionKind::Text, - StandardSection::Data => SectionKind::Data, - StandardSection::ReadOnlyData => SectionKind::ReadOnlyData, - StandardSection::ReadOnlyDataWithRel => SectionKind::ReadOnlyDataWithRel, - StandardSection::ReadOnlyString => SectionKind::ReadOnlyString, - StandardSection::UninitializedData => SectionKind::UninitializedData, - StandardSection::Tls => SectionKind::Tls, - StandardSection::UninitializedTls => SectionKind::UninitializedTls, - StandardSection::TlsVariables => SectionKind::TlsVariables, - StandardSection::Common => SectionKind::Common, - StandardSection::GnuProperty => SectionKind::Note, - } - } - - // TODO: remembering to update this is error-prone, can we do better? - fn all() -> &'static [StandardSection] { - &[ - StandardSection::Text, - StandardSection::Data, - StandardSection::ReadOnlyData, - StandardSection::ReadOnlyDataWithRel, - StandardSection::ReadOnlyString, - StandardSection::UninitializedData, - StandardSection::Tls, - StandardSection::UninitializedTls, - StandardSection::TlsVariables, - StandardSection::Common, - StandardSection::GnuProperty, - ] - } -} - -/// An identifier used to reference a section. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct SectionId(usize); - -/// A section in an object file. -#[derive(Debug)] -pub struct Section<'a> { - segment: Vec, - name: Vec, - kind: SectionKind, - size: u64, - align: u64, - data: Cow<'a, [u8]>, - relocations: Vec, - symbol: Option, - /// Section flags that are specific to each file format. - pub flags: SectionFlags, -} - -impl<'a> Section<'a> { - /// Try to convert the name to a utf8 string. - #[inline] - pub fn name(&self) -> Option<&str> { - str::from_utf8(&self.name).ok() - } - - /// Try to convert the segment to a utf8 string. - #[inline] - pub fn segment(&self) -> Option<&str> { - str::from_utf8(&self.segment).ok() - } - - /// Return true if this section contains zerofill data. - #[inline] - pub fn is_bss(&self) -> bool { - self.kind.is_bss() - } - - /// Set the data for a section. - /// - /// Must not be called for sections that already have data, or that contain uninitialized data. - pub fn set_data(&mut self, data: T, align: u64) - where - T: Into>, - { - debug_assert!(!self.is_bss()); - debug_assert_eq!(align & (align - 1), 0); - debug_assert!(self.data.is_empty()); - self.data = data.into(); - self.size = self.data.len() as u64; - self.align = align; - } - - /// Append data to a section. - /// - /// Must not be called for sections that contain uninitialized data. - pub fn append_data(&mut self, append_data: &[u8], align: u64) -> u64 { - debug_assert!(!self.is_bss()); - debug_assert_eq!(align & (align - 1), 0); - if self.align < align { - self.align = align; - } - let align = align as usize; - let data = self.data.to_mut(); - let mut offset = data.len(); - if offset & (align - 1) != 0 { - offset += align - (offset & (align - 1)); - data.resize(offset, 0); - } - data.extend_from_slice(append_data); - self.size = data.len() as u64; - offset as u64 - } - - /// Append uninitialized data to a section. - /// - /// Must not be called for sections that contain initialized data. - pub fn append_bss(&mut self, size: u64, align: u64) -> u64 { - debug_assert!(self.is_bss()); - debug_assert_eq!(align & (align - 1), 0); - if self.align < align { - self.align = align; - } - let mut offset = self.size; - if offset & (align - 1) != 0 { - offset += align - (offset & (align - 1)); - self.size = offset; - } - self.size += size; - offset - } - - /// Returns the section as-built so far. - /// - /// This requires that the section is not a bss section. - pub fn data(&self) -> &[u8] { - debug_assert!(!self.is_bss()); - &self.data - } - - /// Returns the section as-built so far. - /// - /// This requires that the section is not a bss section. - pub fn data_mut(&mut self) -> &mut [u8] { - debug_assert!(!self.is_bss()); - self.data.to_mut() - } -} - -/// The section where a symbol is defined. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum SymbolSection { - /// The section is not applicable for this symbol (such as file symbols). - None, - /// The symbol is undefined. - Undefined, - /// The symbol has an absolute value. - Absolute, - /// The symbol is a zero-initialized symbol that will be combined with duplicate definitions. - Common, - /// The symbol is defined in the given section. - Section(SectionId), -} - -impl SymbolSection { - /// Returns the section id for the section where the symbol is defined. - /// - /// May return `None` if the symbol is not defined in a section. - #[inline] - pub fn id(self) -> Option { - if let SymbolSection::Section(id) = self { - Some(id) - } else { - None - } - } -} - -/// An identifier used to reference a symbol. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct SymbolId(usize); - -/// A symbol in an object file. -#[derive(Debug)] -pub struct Symbol { - /// The name of the symbol. - pub name: Vec, - /// The value of the symbol. - /// - /// If the symbol defined in a section, then this is the section offset of the symbol. - pub value: u64, - /// The size of the symbol. - pub size: u64, - /// The kind of the symbol. - pub kind: SymbolKind, - /// The scope of the symbol. - pub scope: SymbolScope, - /// Whether the symbol has weak binding. - pub weak: bool, - /// The section containing the symbol. - pub section: SymbolSection, - /// Symbol flags that are specific to each file format. - pub flags: SymbolFlags, -} - -impl Symbol { - /// Try to convert the name to a utf8 string. - #[inline] - pub fn name(&self) -> Option<&str> { - str::from_utf8(&self.name).ok() - } - - /// Return true if the symbol is undefined. - #[inline] - pub fn is_undefined(&self) -> bool { - self.section == SymbolSection::Undefined - } - - /// Return true if the symbol is common data. - /// - /// Note: does not check for `SymbolSection::Section` with `SectionKind::Common`. - #[inline] - pub fn is_common(&self) -> bool { - self.section == SymbolSection::Common - } - - /// Return true if the symbol scope is local. - #[inline] - pub fn is_local(&self) -> bool { - self.scope == SymbolScope::Compilation - } -} - -/// A relocation in an object file. -#[derive(Debug)] -pub struct Relocation { - /// The section offset of the place of the relocation. - pub offset: u64, - /// The size in bits of the place of relocation. - pub size: u8, - /// The operation used to calculate the result of the relocation. - pub kind: RelocationKind, - /// Information about how the result of the relocation operation is encoded in the place. - pub encoding: RelocationEncoding, - /// The symbol referred to by the relocation. - /// - /// This may be a section symbol. - pub symbol: SymbolId, - /// The addend to use in the relocation calculation. - /// - /// This may be in addition to an implicit addend stored at the place of the relocation. - pub addend: i64, -} - -/// An identifier used to reference a COMDAT section group. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct ComdatId(usize); - -/// A COMDAT section group. -#[derive(Debug)] -pub struct Comdat { - /// The COMDAT selection kind. - /// - /// This determines the way in which the linker resolves multiple definitions of the COMDAT - /// sections. - pub kind: ComdatKind, - /// The COMDAT symbol. - /// - /// If this symbol is referenced, then all sections in the group will be included by the - /// linker. - pub symbol: SymbolId, - /// The sections in the group. - pub sections: Vec, -} - -/// The symbol name mangling scheme. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum Mangling { - /// No symbol mangling. - None, - /// Windows COFF symbol mangling. - Coff, - /// Windows COFF i386 symbol mangling. - CoffI386, - /// ELF symbol mangling. - Elf, - /// Mach-O symbol mangling. - MachO, - /// Xcoff symbol mangling. - Xcoff, -} - -impl Mangling { - /// Return the default symboling mangling for the given format and architecture. - pub fn default(format: BinaryFormat, architecture: Architecture) -> Self { - match (format, architecture) { - (BinaryFormat::Coff, Architecture::I386) => Mangling::CoffI386, - (BinaryFormat::Coff, _) => Mangling::Coff, - (BinaryFormat::Elf, _) => Mangling::Elf, - (BinaryFormat::MachO, _) => Mangling::MachO, - (BinaryFormat::Xcoff, _) => Mangling::Xcoff, - _ => Mangling::None, - } - } - - /// Return the prefix to use for global symbols. - pub fn global_prefix(self) -> Option { - match self { - Mangling::None | Mangling::Elf | Mangling::Coff | Mangling::Xcoff => None, - Mangling::CoffI386 | Mangling::MachO => Some(b'_'), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/write/pe.rs s390-tools-2.33.1/rust-vendor/object/src/write/pe.rs --- s390-tools-2.31.0/rust-vendor/object/src/write/pe.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/write/pe.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,847 +0,0 @@ -//! Helper for writing PE files. -use alloc::string::String; -use alloc::vec::Vec; -use core::mem; - -use crate::endian::{LittleEndian as LE, *}; -use crate::pe; -use crate::write::util; -use crate::write::{Error, Result, WritableBuffer}; - -/// A helper for writing PE files. -/// -/// Writing uses a two phase approach. The first phase reserves file ranges and virtual -/// address ranges for everything in the order that they will be written. -/// -/// The second phase writes everything out in order. Thus the caller must ensure writing -/// is in the same order that file ranges were reserved. -#[allow(missing_debug_implementations)] -pub struct Writer<'a> { - is_64: bool, - section_alignment: u32, - file_alignment: u32, - - buffer: &'a mut dyn WritableBuffer, - len: u32, - virtual_len: u32, - headers_len: u32, - - code_address: u32, - data_address: u32, - code_len: u32, - data_len: u32, - bss_len: u32, - - nt_headers_offset: u32, - data_directories: Vec, - section_header_num: u16, - sections: Vec
, - - symbol_offset: u32, - symbol_num: u32, - - reloc_blocks: Vec, - relocs: Vec>, - reloc_offset: u32, -} - -impl<'a> Writer<'a> { - /// Create a new `Writer`. - pub fn new( - is_64: bool, - section_alignment: u32, - file_alignment: u32, - buffer: &'a mut dyn WritableBuffer, - ) -> Self { - Writer { - is_64, - section_alignment, - file_alignment, - - buffer, - len: 0, - virtual_len: 0, - headers_len: 0, - - code_address: 0, - data_address: 0, - code_len: 0, - data_len: 0, - bss_len: 0, - - nt_headers_offset: 0, - data_directories: Vec::new(), - section_header_num: 0, - sections: Vec::new(), - - symbol_offset: 0, - symbol_num: 0, - - reloc_blocks: Vec::new(), - relocs: Vec::new(), - reloc_offset: 0, - } - } - - /// Return the current virtual address size that has been reserved. - /// - /// This is only valid after section headers have been reserved. - pub fn virtual_len(&self) -> u32 { - self.virtual_len - } - - /// Reserve a virtual address range with the given size. - /// - /// The reserved length will be increased to match the section alignment. - /// - /// Returns the aligned offset of the start of the range. - pub fn reserve_virtual(&mut self, len: u32) -> u32 { - let offset = self.virtual_len; - self.virtual_len += len; - self.virtual_len = util::align_u32(self.virtual_len, self.section_alignment); - offset - } - - /// Reserve up to the given virtual address. - /// - /// The reserved length will be increased to match the section alignment. - pub fn reserve_virtual_until(&mut self, address: u32) { - debug_assert!(self.virtual_len <= address); - self.virtual_len = util::align_u32(address, self.section_alignment); - } - - /// Return the current file length that has been reserved. - pub fn reserved_len(&self) -> u32 { - self.len - } - - /// Return the current file length that has been written. - #[allow(clippy::len_without_is_empty)] - pub fn len(&self) -> usize { - self.buffer.len() - } - - /// Reserve a file range with the given size and starting alignment. - /// - /// Returns the aligned offset of the start of the range. - pub fn reserve(&mut self, len: u32, align_start: u32) -> u32 { - if len == 0 { - return self.len; - } - self.reserve_align(align_start); - let offset = self.len; - self.len += len; - offset - } - - /// Reserve a file range with the given size and using the file alignment. - /// - /// Returns the aligned offset of the start of the range. - pub fn reserve_file(&mut self, len: u32) -> u32 { - self.reserve(len, self.file_alignment) - } - - /// Write data. - pub fn write(&mut self, data: &[u8]) { - self.buffer.write_bytes(data); - } - - /// Reserve alignment padding bytes. - pub fn reserve_align(&mut self, align_start: u32) { - self.len = util::align_u32(self.len, align_start); - } - - /// Write alignment padding bytes. - pub fn write_align(&mut self, align_start: u32) { - util::write_align(self.buffer, align_start as usize); - } - - /// Write padding up to the next multiple of file alignment. - pub fn write_file_align(&mut self) { - self.write_align(self.file_alignment); - } - - /// Reserve the file range up to the given file offset. - pub fn reserve_until(&mut self, offset: u32) { - debug_assert!(self.len <= offset); - self.len = offset; - } - - /// Write padding up to the given file offset. - pub fn pad_until(&mut self, offset: u32) { - debug_assert!(self.buffer.len() <= offset as usize); - self.buffer.resize(offset as usize); - } - - /// Reserve the range for the DOS header. - /// - /// This must be at the start of the file. - /// - /// When writing, you may use `write_custom_dos_header` or `write_empty_dos_header`. - pub fn reserve_dos_header(&mut self) { - debug_assert_eq!(self.len, 0); - self.reserve(mem::size_of::() as u32, 1); - } - - /// Write a custom DOS header. - /// - /// This must be at the start of the file. - pub fn write_custom_dos_header(&mut self, dos_header: &pe::ImageDosHeader) -> Result<()> { - debug_assert_eq!(self.buffer.len(), 0); - - // Start writing. - self.buffer - .reserve(self.len as usize) - .map_err(|_| Error(String::from("Cannot allocate buffer")))?; - - self.buffer.write(dos_header); - Ok(()) - } - - /// Write the DOS header for a file without a stub. - /// - /// This must be at the start of the file. - /// - /// Uses default values for all fields. - pub fn write_empty_dos_header(&mut self) -> Result<()> { - self.write_custom_dos_header(&pe::ImageDosHeader { - e_magic: U16::new(LE, pe::IMAGE_DOS_SIGNATURE), - e_cblp: U16::new(LE, 0), - e_cp: U16::new(LE, 0), - e_crlc: U16::new(LE, 0), - e_cparhdr: U16::new(LE, 0), - e_minalloc: U16::new(LE, 0), - e_maxalloc: U16::new(LE, 0), - e_ss: U16::new(LE, 0), - e_sp: U16::new(LE, 0), - e_csum: U16::new(LE, 0), - e_ip: U16::new(LE, 0), - e_cs: U16::new(LE, 0), - e_lfarlc: U16::new(LE, 0), - e_ovno: U16::new(LE, 0), - e_res: [U16::new(LE, 0); 4], - e_oemid: U16::new(LE, 0), - e_oeminfo: U16::new(LE, 0), - e_res2: [U16::new(LE, 0); 10], - e_lfanew: U32::new(LE, self.nt_headers_offset), - }) - } - - /// Reserve a fixed DOS header and stub. - /// - /// Use `reserve_dos_header` and `reserve` if you need a custom stub. - pub fn reserve_dos_header_and_stub(&mut self) { - self.reserve_dos_header(); - self.reserve(64, 1); - } - - /// Write a fixed DOS header and stub. - /// - /// Use `write_custom_dos_header` and `write` if you need a custom stub. - pub fn write_dos_header_and_stub(&mut self) -> Result<()> { - self.write_custom_dos_header(&pe::ImageDosHeader { - e_magic: U16::new(LE, pe::IMAGE_DOS_SIGNATURE), - e_cblp: U16::new(LE, 0x90), - e_cp: U16::new(LE, 3), - e_crlc: U16::new(LE, 0), - e_cparhdr: U16::new(LE, 4), - e_minalloc: U16::new(LE, 0), - e_maxalloc: U16::new(LE, 0xffff), - e_ss: U16::new(LE, 0), - e_sp: U16::new(LE, 0xb8), - e_csum: U16::new(LE, 0), - e_ip: U16::new(LE, 0), - e_cs: U16::new(LE, 0), - e_lfarlc: U16::new(LE, 0x40), - e_ovno: U16::new(LE, 0), - e_res: [U16::new(LE, 0); 4], - e_oemid: U16::new(LE, 0), - e_oeminfo: U16::new(LE, 0), - e_res2: [U16::new(LE, 0); 10], - e_lfanew: U32::new(LE, self.nt_headers_offset), - })?; - - #[rustfmt::skip] - self.buffer.write_bytes(&[ - 0x0e, 0x1f, 0xba, 0x0e, 0x00, 0xb4, 0x09, 0xcd, - 0x21, 0xb8, 0x01, 0x4c, 0xcd, 0x21, 0x54, 0x68, - 0x69, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x67, 0x72, - 0x61, 0x6d, 0x20, 0x63, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x75, 0x6e, - 0x20, 0x69, 0x6e, 0x20, 0x44, 0x4f, 0x53, 0x20, - 0x6d, 0x6f, 0x64, 0x65, 0x2e, 0x0d, 0x0d, 0x0a, - 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - ]); - - Ok(()) - } - - fn nt_headers_size(&self) -> u32 { - if self.is_64 { - mem::size_of::() as u32 - } else { - mem::size_of::() as u32 - } - } - - fn optional_header_size(&self) -> u32 { - let size = if self.is_64 { - mem::size_of::() as u32 - } else { - mem::size_of::() as u32 - }; - size + self.data_directories.len() as u32 * mem::size_of::() as u32 - } - - /// Return the offset of the NT headers, if reserved. - pub fn nt_headers_offset(&self) -> u32 { - self.nt_headers_offset - } - - /// Reserve the range for the NT headers. - pub fn reserve_nt_headers(&mut self, data_directory_num: usize) { - debug_assert_eq!(self.nt_headers_offset, 0); - self.nt_headers_offset = self.reserve(self.nt_headers_size(), 8); - self.data_directories = vec![DataDirectory::default(); data_directory_num]; - self.reserve( - data_directory_num as u32 * mem::size_of::() as u32, - 1, - ); - } - - /// Set the virtual address and size of a data directory. - pub fn set_data_directory(&mut self, index: usize, virtual_address: u32, size: u32) { - self.data_directories[index] = DataDirectory { - virtual_address, - size, - } - } - - /// Write the NT headers. - pub fn write_nt_headers(&mut self, nt_headers: NtHeaders) { - self.pad_until(self.nt_headers_offset); - self.buffer.write(&U32::new(LE, pe::IMAGE_NT_SIGNATURE)); - let file_header = pe::ImageFileHeader { - machine: U16::new(LE, nt_headers.machine), - number_of_sections: U16::new(LE, self.section_header_num), - time_date_stamp: U32::new(LE, nt_headers.time_date_stamp), - pointer_to_symbol_table: U32::new(LE, self.symbol_offset), - number_of_symbols: U32::new(LE, self.symbol_num), - size_of_optional_header: U16::new(LE, self.optional_header_size() as u16), - characteristics: U16::new(LE, nt_headers.characteristics), - }; - self.buffer.write(&file_header); - if self.is_64 { - let optional_header = pe::ImageOptionalHeader64 { - magic: U16::new(LE, pe::IMAGE_NT_OPTIONAL_HDR64_MAGIC), - major_linker_version: nt_headers.major_linker_version, - minor_linker_version: nt_headers.minor_linker_version, - size_of_code: U32::new(LE, self.code_len), - size_of_initialized_data: U32::new(LE, self.data_len), - size_of_uninitialized_data: U32::new(LE, self.bss_len), - address_of_entry_point: U32::new(LE, nt_headers.address_of_entry_point), - base_of_code: U32::new(LE, self.code_address), - image_base: U64::new(LE, nt_headers.image_base), - section_alignment: U32::new(LE, self.section_alignment), - file_alignment: U32::new(LE, self.file_alignment), - major_operating_system_version: U16::new( - LE, - nt_headers.major_operating_system_version, - ), - minor_operating_system_version: U16::new( - LE, - nt_headers.minor_operating_system_version, - ), - major_image_version: U16::new(LE, nt_headers.major_image_version), - minor_image_version: U16::new(LE, nt_headers.minor_image_version), - major_subsystem_version: U16::new(LE, nt_headers.major_subsystem_version), - minor_subsystem_version: U16::new(LE, nt_headers.minor_subsystem_version), - win32_version_value: U32::new(LE, 0), - size_of_image: U32::new(LE, self.virtual_len), - size_of_headers: U32::new(LE, self.headers_len), - check_sum: U32::new(LE, 0), - subsystem: U16::new(LE, nt_headers.subsystem), - dll_characteristics: U16::new(LE, nt_headers.dll_characteristics), - size_of_stack_reserve: U64::new(LE, nt_headers.size_of_stack_reserve), - size_of_stack_commit: U64::new(LE, nt_headers.size_of_stack_commit), - size_of_heap_reserve: U64::new(LE, nt_headers.size_of_heap_reserve), - size_of_heap_commit: U64::new(LE, nt_headers.size_of_heap_commit), - loader_flags: U32::new(LE, 0), - number_of_rva_and_sizes: U32::new(LE, self.data_directories.len() as u32), - }; - self.buffer.write(&optional_header); - } else { - let optional_header = pe::ImageOptionalHeader32 { - magic: U16::new(LE, pe::IMAGE_NT_OPTIONAL_HDR32_MAGIC), - major_linker_version: nt_headers.major_linker_version, - minor_linker_version: nt_headers.minor_linker_version, - size_of_code: U32::new(LE, self.code_len), - size_of_initialized_data: U32::new(LE, self.data_len), - size_of_uninitialized_data: U32::new(LE, self.bss_len), - address_of_entry_point: U32::new(LE, nt_headers.address_of_entry_point), - base_of_code: U32::new(LE, self.code_address), - base_of_data: U32::new(LE, self.data_address), - image_base: U32::new(LE, nt_headers.image_base as u32), - section_alignment: U32::new(LE, self.section_alignment), - file_alignment: U32::new(LE, self.file_alignment), - major_operating_system_version: U16::new( - LE, - nt_headers.major_operating_system_version, - ), - minor_operating_system_version: U16::new( - LE, - nt_headers.minor_operating_system_version, - ), - major_image_version: U16::new(LE, nt_headers.major_image_version), - minor_image_version: U16::new(LE, nt_headers.minor_image_version), - major_subsystem_version: U16::new(LE, nt_headers.major_subsystem_version), - minor_subsystem_version: U16::new(LE, nt_headers.minor_subsystem_version), - win32_version_value: U32::new(LE, 0), - size_of_image: U32::new(LE, self.virtual_len), - size_of_headers: U32::new(LE, self.headers_len), - check_sum: U32::new(LE, 0), - subsystem: U16::new(LE, nt_headers.subsystem), - dll_characteristics: U16::new(LE, nt_headers.dll_characteristics), - size_of_stack_reserve: U32::new(LE, nt_headers.size_of_stack_reserve as u32), - size_of_stack_commit: U32::new(LE, nt_headers.size_of_stack_commit as u32), - size_of_heap_reserve: U32::new(LE, nt_headers.size_of_heap_reserve as u32), - size_of_heap_commit: U32::new(LE, nt_headers.size_of_heap_commit as u32), - loader_flags: U32::new(LE, 0), - number_of_rva_and_sizes: U32::new(LE, self.data_directories.len() as u32), - }; - self.buffer.write(&optional_header); - } - - for dir in &self.data_directories { - self.buffer.write(&pe::ImageDataDirectory { - virtual_address: U32::new(LE, dir.virtual_address), - size: U32::new(LE, dir.size), - }) - } - } - - /// Reserve the section headers. - /// - /// The number of reserved section headers must be the same as the number of sections that - /// are later reserved. - // TODO: change this to a maximum number of sections? - pub fn reserve_section_headers(&mut self, section_header_num: u16) { - debug_assert_eq!(self.section_header_num, 0); - self.section_header_num = section_header_num; - self.reserve( - u32::from(section_header_num) * mem::size_of::() as u32, - 1, - ); - // Padding before sections must be included in headers_len. - self.reserve_align(self.file_alignment); - self.headers_len = self.len; - self.reserve_virtual(self.len); - } - - /// Write the section headers. - /// - /// This uses information that was recorded when the sections were reserved. - pub fn write_section_headers(&mut self) { - debug_assert_eq!(self.section_header_num as usize, self.sections.len()); - for section in &self.sections { - let section_header = pe::ImageSectionHeader { - name: section.name, - virtual_size: U32::new(LE, section.range.virtual_size), - virtual_address: U32::new(LE, section.range.virtual_address), - size_of_raw_data: U32::new(LE, section.range.file_size), - pointer_to_raw_data: U32::new(LE, section.range.file_offset), - pointer_to_relocations: U32::new(LE, 0), - pointer_to_linenumbers: U32::new(LE, 0), - number_of_relocations: U16::new(LE, 0), - number_of_linenumbers: U16::new(LE, 0), - characteristics: U32::new(LE, section.characteristics), - }; - self.buffer.write(§ion_header); - } - } - - /// Reserve a section. - /// - /// Returns the file range and virtual address range that are reserved - /// for the section. - pub fn reserve_section( - &mut self, - name: [u8; 8], - characteristics: u32, - virtual_size: u32, - data_size: u32, - ) -> SectionRange { - let virtual_address = self.reserve_virtual(virtual_size); - - // Padding after section must be included in section file size. - let file_size = util::align_u32(data_size, self.file_alignment); - let file_offset = if file_size != 0 { - self.reserve(file_size, self.file_alignment) - } else { - 0 - }; - - // Sizes in optional header use the virtual size with the file alignment. - let aligned_virtual_size = util::align_u32(virtual_size, self.file_alignment); - if characteristics & pe::IMAGE_SCN_CNT_CODE != 0 { - if self.code_address == 0 { - self.code_address = virtual_address; - } - self.code_len += aligned_virtual_size; - } else if characteristics & pe::IMAGE_SCN_CNT_INITIALIZED_DATA != 0 { - if self.data_address == 0 { - self.data_address = virtual_address; - } - self.data_len += aligned_virtual_size; - } else if characteristics & pe::IMAGE_SCN_CNT_UNINITIALIZED_DATA != 0 { - if self.data_address == 0 { - self.data_address = virtual_address; - } - self.bss_len += aligned_virtual_size; - } - - let range = SectionRange { - virtual_address, - virtual_size, - file_offset, - file_size, - }; - self.sections.push(Section { - name, - characteristics, - range, - }); - range - } - - /// Write the data for a section. - pub fn write_section(&mut self, offset: u32, data: &[u8]) { - if data.is_empty() { - return; - } - self.pad_until(offset); - self.write(data); - self.write_align(self.file_alignment); - } - - /// Reserve a `.text` section. - /// - /// Contains executable code. - pub fn reserve_text_section(&mut self, size: u32) -> SectionRange { - self.reserve_section( - *b".text\0\0\0", - pe::IMAGE_SCN_CNT_CODE | pe::IMAGE_SCN_MEM_EXECUTE | pe::IMAGE_SCN_MEM_READ, - size, - size, - ) - } - - /// Reserve a `.data` section. - /// - /// Contains initialized data. - /// - /// May also contain uninitialized data if `virtual_size` is greater than `data_size`. - pub fn reserve_data_section(&mut self, virtual_size: u32, data_size: u32) -> SectionRange { - self.reserve_section( - *b".data\0\0\0", - pe::IMAGE_SCN_CNT_INITIALIZED_DATA | pe::IMAGE_SCN_MEM_READ | pe::IMAGE_SCN_MEM_WRITE, - virtual_size, - data_size, - ) - } - - /// Reserve a `.rdata` section. - /// - /// Contains read-only initialized data. - pub fn reserve_rdata_section(&mut self, size: u32) -> SectionRange { - self.reserve_section( - *b".rdata\0\0", - pe::IMAGE_SCN_CNT_INITIALIZED_DATA | pe::IMAGE_SCN_MEM_READ, - size, - size, - ) - } - - /// Reserve a `.bss` section. - /// - /// Contains uninitialized data. - pub fn reserve_bss_section(&mut self, size: u32) -> SectionRange { - self.reserve_section( - *b".bss\0\0\0\0", - pe::IMAGE_SCN_CNT_UNINITIALIZED_DATA | pe::IMAGE_SCN_MEM_READ | pe::IMAGE_SCN_MEM_WRITE, - size, - 0, - ) - } - - /// Reserve an `.idata` section. - /// - /// Contains import tables. Note that it is permissible to store import tables in a different - /// section. - /// - /// This also sets the `pe::IMAGE_DIRECTORY_ENTRY_IMPORT` data directory. - pub fn reserve_idata_section(&mut self, size: u32) -> SectionRange { - let range = self.reserve_section( - *b".idata\0\0", - pe::IMAGE_SCN_CNT_INITIALIZED_DATA | pe::IMAGE_SCN_MEM_READ | pe::IMAGE_SCN_MEM_WRITE, - size, - size, - ); - let dir = &mut self.data_directories[pe::IMAGE_DIRECTORY_ENTRY_IMPORT]; - debug_assert_eq!(dir.virtual_address, 0); - *dir = DataDirectory { - virtual_address: range.virtual_address, - size, - }; - range - } - - /// Reserve an `.edata` section. - /// - /// Contains export tables. - /// - /// This also sets the `pe::IMAGE_DIRECTORY_ENTRY_EXPORT` data directory. - pub fn reserve_edata_section(&mut self, size: u32) -> SectionRange { - let range = self.reserve_section( - *b".edata\0\0", - pe::IMAGE_SCN_CNT_INITIALIZED_DATA | pe::IMAGE_SCN_MEM_READ, - size, - size, - ); - let dir = &mut self.data_directories[pe::IMAGE_DIRECTORY_ENTRY_EXPORT]; - debug_assert_eq!(dir.virtual_address, 0); - *dir = DataDirectory { - virtual_address: range.virtual_address, - size, - }; - range - } - - /// Reserve a `.pdata` section. - /// - /// Contains exception information. - /// - /// This also sets the `pe::IMAGE_DIRECTORY_ENTRY_EXCEPTION` data directory. - pub fn reserve_pdata_section(&mut self, size: u32) -> SectionRange { - let range = self.reserve_section( - *b".pdata\0\0", - pe::IMAGE_SCN_CNT_INITIALIZED_DATA | pe::IMAGE_SCN_MEM_READ, - size, - size, - ); - let dir = &mut self.data_directories[pe::IMAGE_DIRECTORY_ENTRY_EXCEPTION]; - debug_assert_eq!(dir.virtual_address, 0); - *dir = DataDirectory { - virtual_address: range.virtual_address, - size, - }; - range - } - - /// Reserve a `.xdata` section. - /// - /// Contains exception information. - pub fn reserve_xdata_section(&mut self, size: u32) -> SectionRange { - self.reserve_section( - *b".xdata\0\0", - pe::IMAGE_SCN_CNT_INITIALIZED_DATA | pe::IMAGE_SCN_MEM_READ, - size, - size, - ) - } - - /// Reserve a `.rsrc` section. - /// - /// Contains the resource directory. - /// - /// This also sets the `pe::IMAGE_DIRECTORY_ENTRY_RESOURCE` data directory. - pub fn reserve_rsrc_section(&mut self, size: u32) -> SectionRange { - let range = self.reserve_section( - *b".rsrc\0\0\0", - pe::IMAGE_SCN_CNT_INITIALIZED_DATA | pe::IMAGE_SCN_MEM_READ, - size, - size, - ); - let dir = &mut self.data_directories[pe::IMAGE_DIRECTORY_ENTRY_RESOURCE]; - debug_assert_eq!(dir.virtual_address, 0); - *dir = DataDirectory { - virtual_address: range.virtual_address, - size, - }; - range - } - - /// Add a base relocation. - /// - /// `typ` must be one of the `IMAGE_REL_BASED_*` constants. - pub fn add_reloc(&mut self, mut virtual_address: u32, typ: u16) { - let reloc = U16::new(LE, typ << 12 | (virtual_address & 0xfff) as u16); - virtual_address &= !0xfff; - if let Some(block) = self.reloc_blocks.last_mut() { - if block.virtual_address == virtual_address { - self.relocs.push(reloc); - block.count += 1; - return; - } - // Blocks must have an even number of relocations. - if block.count & 1 != 0 { - self.relocs.push(U16::new(LE, 0)); - block.count += 1; - } - debug_assert!(block.virtual_address < virtual_address); - } - self.relocs.push(reloc); - self.reloc_blocks.push(RelocBlock { - virtual_address, - count: 1, - }); - } - - /// Return true if a base relocation has been added. - pub fn has_relocs(&mut self) -> bool { - !self.relocs.is_empty() - } - - /// Reserve a `.reloc` section. - /// - /// This contains the base relocations that were added with `add_reloc`. - /// - /// This also sets the `pe::IMAGE_DIRECTORY_ENTRY_BASERELOC` data directory. - pub fn reserve_reloc_section(&mut self) -> SectionRange { - if let Some(block) = self.reloc_blocks.last_mut() { - // Blocks must have an even number of relocations. - if block.count & 1 != 0 { - self.relocs.push(U16::new(LE, 0)); - block.count += 1; - } - } - let size = self.reloc_blocks.iter().map(RelocBlock::size).sum(); - let range = self.reserve_section( - *b".reloc\0\0", - pe::IMAGE_SCN_CNT_INITIALIZED_DATA - | pe::IMAGE_SCN_MEM_READ - | pe::IMAGE_SCN_MEM_DISCARDABLE, - size, - size, - ); - let dir = &mut self.data_directories[pe::IMAGE_DIRECTORY_ENTRY_BASERELOC]; - debug_assert_eq!(dir.virtual_address, 0); - *dir = DataDirectory { - virtual_address: range.virtual_address, - size, - }; - self.reloc_offset = range.file_offset; - range - } - - /// Write a `.reloc` section. - /// - /// This contains the base relocations that were added with `add_reloc`. - pub fn write_reloc_section(&mut self) { - if self.reloc_offset == 0 { - return; - } - self.pad_until(self.reloc_offset); - - let mut total = 0; - for block in &self.reloc_blocks { - self.buffer.write(&pe::ImageBaseRelocation { - virtual_address: U32::new(LE, block.virtual_address), - size_of_block: U32::new(LE, block.size()), - }); - self.buffer - .write_slice(&self.relocs[total..][..block.count as usize]); - total += block.count as usize; - } - debug_assert_eq!(total, self.relocs.len()); - - self.write_align(self.file_alignment); - } - - /// Reserve the certificate table. - /// - /// This also sets the `pe::IMAGE_DIRECTORY_ENTRY_SECURITY` data directory. - // TODO: reserve individual certificates - pub fn reserve_certificate_table(&mut self, size: u32) { - let size = util::align_u32(size, 8); - let offset = self.reserve(size, 8); - let dir = &mut self.data_directories[pe::IMAGE_DIRECTORY_ENTRY_SECURITY]; - debug_assert_eq!(dir.virtual_address, 0); - *dir = DataDirectory { - virtual_address: offset, - size, - }; - } - - /// Write the certificate table. - // TODO: write individual certificates - pub fn write_certificate_table(&mut self, data: &[u8]) { - let dir = self.data_directories[pe::IMAGE_DIRECTORY_ENTRY_SECURITY]; - self.pad_until(dir.virtual_address); - self.write(data); - self.pad_until(dir.virtual_address + dir.size); - } -} - -/// Information required for writing [`pe::ImageNtHeaders32`] or [`pe::ImageNtHeaders64`]. -#[allow(missing_docs)] -#[derive(Debug, Clone)] -pub struct NtHeaders { - // ImageFileHeader - pub machine: u16, - pub time_date_stamp: u32, - pub characteristics: u16, - // ImageOptionalHeader - pub major_linker_version: u8, - pub minor_linker_version: u8, - pub address_of_entry_point: u32, - pub image_base: u64, - pub major_operating_system_version: u16, - pub minor_operating_system_version: u16, - pub major_image_version: u16, - pub minor_image_version: u16, - pub major_subsystem_version: u16, - pub minor_subsystem_version: u16, - pub subsystem: u16, - pub dll_characteristics: u16, - pub size_of_stack_reserve: u64, - pub size_of_stack_commit: u64, - pub size_of_heap_reserve: u64, - pub size_of_heap_commit: u64, -} - -#[derive(Default, Clone, Copy)] -struct DataDirectory { - virtual_address: u32, - size: u32, -} - -/// Information required for writing [`pe::ImageSectionHeader`]. -#[allow(missing_docs)] -#[derive(Debug, Clone)] -pub struct Section { - pub name: [u8; pe::IMAGE_SIZEOF_SHORT_NAME], - pub characteristics: u32, - pub range: SectionRange, -} - -/// The file range and virtual address range for a section. -#[allow(missing_docs)] -#[derive(Debug, Default, Clone, Copy)] -pub struct SectionRange { - pub virtual_address: u32, - pub virtual_size: u32, - pub file_offset: u32, - pub file_size: u32, -} - -struct RelocBlock { - virtual_address: u32, - count: u32, -} - -impl RelocBlock { - fn size(&self) -> u32 { - mem::size_of::() as u32 + self.count * mem::size_of::() as u32 - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/write/string.rs s390-tools-2.33.1/rust-vendor/object/src/write/string.rs --- s390-tools-2.31.0/rust-vendor/object/src/write/string.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/write/string.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,159 +0,0 @@ -use alloc::vec::Vec; - -#[cfg(feature = "std")] -type IndexSet = indexmap::IndexSet; -#[cfg(not(feature = "std"))] -type IndexSet = indexmap::IndexSet; - -/// An identifier for an entry in a string table. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct StringId(usize); - -#[derive(Debug, Default)] -pub(crate) struct StringTable<'a> { - strings: IndexSet<&'a [u8]>, - offsets: Vec, -} - -impl<'a> StringTable<'a> { - /// Add a string to the string table. - /// - /// Panics if the string table has already been written, or - /// if the string contains a null byte. - pub fn add(&mut self, string: &'a [u8]) -> StringId { - assert!(self.offsets.is_empty()); - assert!(!string.contains(&0)); - let id = self.strings.insert_full(string).0; - StringId(id) - } - - /// Return the id of the given string. - /// - /// Panics if the string is not in the string table. - pub fn get_id(&self, string: &[u8]) -> StringId { - let id = self.strings.get_index_of(string).unwrap(); - StringId(id) - } - - /// Return the string for the given id. - /// - /// Panics if the string is not in the string table. - pub fn get_string(&self, id: StringId) -> &'a [u8] { - self.strings.get_index(id.0).unwrap() - } - - /// Return the offset of the given string. - /// - /// Panics if the string table has not been written, or - /// if the string is not in the string table. - pub fn get_offset(&self, id: StringId) -> usize { - self.offsets[id.0] - } - - /// Append the string table to the given `Vec`, and - /// calculate the list of string offsets. - /// - /// `base` is the initial string table offset. For example, - /// this should be 1 for ELF, to account for the initial - /// null byte (which must have been written by the caller). - pub fn write(&mut self, base: usize, w: &mut Vec) { - assert!(self.offsets.is_empty()); - - let mut ids: Vec<_> = (0..self.strings.len()).collect(); - sort(&mut ids, 1, &self.strings); - - self.offsets = vec![0; ids.len()]; - let mut offset = base; - let mut previous = &[][..]; - for id in ids { - let string = self.strings.get_index(id).unwrap(); - if previous.ends_with(string) { - self.offsets[id] = offset - string.len() - 1; - } else { - self.offsets[id] = offset; - w.extend_from_slice(string); - w.push(0); - offset += string.len() + 1; - previous = string; - } - } - } -} - -// Multi-key quicksort. -// -// Ordering is such that if a string is a suffix of at least one other string, -// then it is placed immediately after one of those strings. That is: -// - comparison starts at the end of the string -// - shorter strings come later -// -// Based on the implementation in LLVM. -fn sort(mut ids: &mut [usize], mut pos: usize, strings: &IndexSet<&[u8]>) { - loop { - if ids.len() <= 1 { - return; - } - - let pivot = byte(ids[0], pos, strings); - let mut lower = 0; - let mut upper = ids.len(); - let mut i = 1; - while i < upper { - let b = byte(ids[i], pos, strings); - if b > pivot { - ids.swap(lower, i); - lower += 1; - i += 1; - } else if b < pivot { - upper -= 1; - ids.swap(upper, i); - } else { - i += 1; - } - } - - sort(&mut ids[..lower], pos, strings); - sort(&mut ids[upper..], pos, strings); - - if pivot == 0 { - return; - } - ids = &mut ids[lower..upper]; - pos += 1; - } -} - -fn byte(id: usize, pos: usize, strings: &IndexSet<&[u8]>) -> u8 { - let string = strings.get_index(id).unwrap(); - let len = string.len(); - if len >= pos { - string[len - pos] - } else { - // We know the strings don't contain null bytes. - 0 - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn string_table() { - let mut table = StringTable::default(); - let id0 = table.add(b""); - let id1 = table.add(b"foo"); - let id2 = table.add(b"bar"); - let id3 = table.add(b"foobar"); - - let mut data = Vec::new(); - data.push(0); - table.write(1, &mut data); - assert_eq!(data, b"\0foobar\0foo\0"); - - assert_eq!(table.get_offset(id0), 11); - assert_eq!(table.get_offset(id1), 8); - assert_eq!(table.get_offset(id2), 4); - assert_eq!(table.get_offset(id3), 1); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/write/util.rs s390-tools-2.33.1/rust-vendor/object/src/write/util.rs --- s390-tools-2.31.0/rust-vendor/object/src/write/util.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/write/util.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,260 +0,0 @@ -use alloc::vec::Vec; -#[cfg(feature = "std")] -use std::{io, mem}; - -use crate::pod::{bytes_of, bytes_of_slice, Pod}; - -/// Trait for writable buffer. -#[allow(clippy::len_without_is_empty)] -pub trait WritableBuffer { - /// Returns position/offset for data to be written at. - /// - /// Should only be used in debug assertions - fn len(&self) -> usize; - - /// Reserves specified number of bytes in the buffer. - /// - /// This will be called exactly once before writing anything to the buffer, - /// and the given size is the exact total number of bytes that will be written. - fn reserve(&mut self, size: usize) -> Result<(), ()>; - - /// Writes zero bytes at the end of the buffer until the buffer - /// has the specified length. - fn resize(&mut self, new_len: usize); - - /// Writes the specified slice of bytes at the end of the buffer. - fn write_bytes(&mut self, val: &[u8]); - - /// Writes the specified `Pod` type at the end of the buffer. - fn write_pod(&mut self, val: &T) - where - Self: Sized, - { - self.write_bytes(bytes_of(val)) - } - - /// Writes the specified `Pod` slice at the end of the buffer. - fn write_pod_slice(&mut self, val: &[T]) - where - Self: Sized, - { - self.write_bytes(bytes_of_slice(val)) - } -} - -impl<'a> dyn WritableBuffer + 'a { - /// Writes the specified `Pod` type at the end of the buffer. - pub fn write(&mut self, val: &T) { - self.write_bytes(bytes_of(val)) - } - - /// Writes the specified `Pod` slice at the end of the buffer. - pub fn write_slice(&mut self, val: &[T]) { - self.write_bytes(bytes_of_slice(val)) - } -} - -impl WritableBuffer for Vec { - #[inline] - fn len(&self) -> usize { - self.len() - } - - #[inline] - fn reserve(&mut self, size: usize) -> Result<(), ()> { - debug_assert!(self.is_empty()); - self.reserve(size); - Ok(()) - } - - #[inline] - fn resize(&mut self, new_len: usize) { - debug_assert!(new_len >= self.len()); - self.resize(new_len, 0); - } - - #[inline] - fn write_bytes(&mut self, val: &[u8]) { - debug_assert!(self.len() + val.len() <= self.capacity()); - self.extend_from_slice(val) - } -} - -/// A [`WritableBuffer`] that streams data to a [`Write`](std::io::Write) implementation. -/// -/// [`Self::result`] must be called to determine if an I/O error occurred during writing. -/// -/// It is advisable to use a buffered writer like [`BufWriter`](std::io::BufWriter) -/// instead of an unbuffered writer like [`File`](std::fs::File). -#[cfg(feature = "std")] -#[derive(Debug)] -pub struct StreamingBuffer { - writer: W, - len: usize, - result: Result<(), io::Error>, -} - -#[cfg(feature = "std")] -impl StreamingBuffer { - /// Create a new `StreamingBuffer` backed by the given writer. - pub fn new(writer: W) -> Self { - StreamingBuffer { - writer, - len: 0, - result: Ok(()), - } - } - - /// Unwraps this [`StreamingBuffer`] giving back the original writer. - pub fn into_inner(self) -> W { - self.writer - } - - /// Returns any error that occurred during writing. - pub fn result(&mut self) -> Result<(), io::Error> { - mem::replace(&mut self.result, Ok(())) - } -} - -#[cfg(feature = "std")] -impl WritableBuffer for StreamingBuffer { - #[inline] - fn len(&self) -> usize { - self.len - } - - #[inline] - fn reserve(&mut self, _size: usize) -> Result<(), ()> { - Ok(()) - } - - #[inline] - fn resize(&mut self, new_len: usize) { - debug_assert!(self.len <= new_len); - while self.len < new_len { - let write_amt = (new_len - self.len - 1) % 1024 + 1; - self.write_bytes(&[0; 1024][..write_amt]); - } - } - - #[inline] - fn write_bytes(&mut self, val: &[u8]) { - if self.result.is_ok() { - self.result = self.writer.write_all(val); - } - self.len += val.len(); - } -} - -/// A trait for mutable byte slices. -/// -/// It provides convenience methods for `Pod` types. -pub(crate) trait BytesMut { - fn write_at(self, offset: usize, val: &T) -> Result<(), ()>; -} - -impl<'a> BytesMut for &'a mut [u8] { - #[inline] - fn write_at(self, offset: usize, val: &T) -> Result<(), ()> { - let src = bytes_of(val); - let dest = self.get_mut(offset..).ok_or(())?; - let dest = dest.get_mut(..src.len()).ok_or(())?; - dest.copy_from_slice(src); - Ok(()) - } -} - -/// Write an unsigned number using the LEB128 encoding to a buffer. -/// -/// Returns the number of bytes written. -pub(crate) fn write_uleb128(buf: &mut Vec, mut val: u64) -> usize { - let mut len = 0; - loop { - let mut byte = (val & 0x7f) as u8; - val >>= 7; - let done = val == 0; - if !done { - byte |= 0x80; - } - - buf.push(byte); - len += 1; - - if done { - return len; - } - } -} - -/// Write a signed number using the LEB128 encoding to a buffer. -/// -/// Returns the number of bytes written. -#[allow(dead_code)] -pub(crate) fn write_sleb128(buf: &mut Vec, mut val: i64) -> usize { - let mut len = 0; - loop { - let mut byte = val as u8; - // Keep the sign bit for testing - val >>= 6; - let done = val == 0 || val == -1; - if done { - byte &= !0x80; - } else { - // Remove the sign bit - val >>= 1; - byte |= 0x80; - } - - buf.push(byte); - len += 1; - - if done { - return len; - } - } -} - -pub(crate) fn align(offset: usize, size: usize) -> usize { - (offset + (size - 1)) & !(size - 1) -} - -#[allow(dead_code)] -pub(crate) fn align_u32(offset: u32, size: u32) -> u32 { - (offset + (size - 1)) & !(size - 1) -} - -#[allow(dead_code)] -pub(crate) fn align_u64(offset: u64, size: u64) -> u64 { - (offset + (size - 1)) & !(size - 1) -} - -pub(crate) fn write_align(buffer: &mut dyn WritableBuffer, size: usize) { - let new_len = align(buffer.len(), size); - buffer.resize(new_len); -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn bytes_mut() { - let data = vec![0x01, 0x23, 0x45, 0x67]; - - let mut bytes = data.clone(); - bytes.extend_from_slice(bytes_of(&u16::to_be(0x89ab))); - assert_eq!(bytes, [0x01, 0x23, 0x45, 0x67, 0x89, 0xab]); - - let mut bytes = data.clone(); - assert_eq!(bytes.write_at(0, &u16::to_be(0x89ab)), Ok(())); - assert_eq!(bytes, [0x89, 0xab, 0x45, 0x67]); - - let mut bytes = data.clone(); - assert_eq!(bytes.write_at(2, &u16::to_be(0x89ab)), Ok(())); - assert_eq!(bytes, [0x01, 0x23, 0x89, 0xab]); - - assert_eq!(bytes.write_at(3, &u16::to_be(0x89ab)), Err(())); - assert_eq!(bytes.write_at(4, &u16::to_be(0x89ab)), Err(())); - assert_eq!(vec![].write_at(0, &u32::to_be(0x89ab)), Err(())); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/write/xcoff.rs s390-tools-2.33.1/rust-vendor/object/src/write/xcoff.rs --- s390-tools-2.31.0/rust-vendor/object/src/write/xcoff.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/write/xcoff.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,556 +0,0 @@ -use core::mem; - -use crate::endian::{BigEndian as BE, I16, U16, U32}; -use crate::write::string::*; -use crate::write::util::*; -use crate::write::*; - -use crate::{xcoff, AddressSize}; - -#[derive(Default, Clone, Copy)] -struct SectionOffsets { - address: u64, - data_offset: usize, - reloc_offset: usize, -} - -#[derive(Default, Clone, Copy)] -struct SymbolOffsets { - index: usize, - str_id: Option, - aux_count: u8, - storage_class: u8, -} - -impl<'a> Object<'a> { - pub(crate) fn xcoff_section_info( - &self, - section: StandardSection, - ) -> (&'static [u8], &'static [u8], SectionKind, SectionFlags) { - match section { - StandardSection::Text => (&[], &b".text"[..], SectionKind::Text, SectionFlags::None), - StandardSection::Data => (&[], &b".data"[..], SectionKind::Data, SectionFlags::None), - StandardSection::ReadOnlyData - | StandardSection::ReadOnlyDataWithRel - | StandardSection::ReadOnlyString => ( - &[], - &b".rdata"[..], - SectionKind::ReadOnlyData, - SectionFlags::None, - ), - StandardSection::UninitializedData => ( - &[], - &b".bss"[..], - SectionKind::UninitializedData, - SectionFlags::None, - ), - StandardSection::Tls => (&[], &b".tdata"[..], SectionKind::Tls, SectionFlags::None), - StandardSection::UninitializedTls => ( - &[], - &b".tbss"[..], - SectionKind::UninitializedTls, - SectionFlags::None, - ), - StandardSection::TlsVariables => { - // Unsupported section. - (&[], &[], SectionKind::TlsVariables, SectionFlags::None) - } - StandardSection::Common => { - // Unsupported section. - (&[], &[], SectionKind::Common, SectionFlags::None) - } - StandardSection::GnuProperty => { - // Unsupported section. - (&[], &[], SectionKind::Note, SectionFlags::None) - } - } - } - - pub(crate) fn xcoff_fixup_relocation(&mut self, relocation: &mut Relocation) -> i64 { - let constant = match relocation.kind { - RelocationKind::Relative => relocation.addend + 4, - _ => relocation.addend, - }; - relocation.addend -= constant; - constant - } - - pub(crate) fn xcoff_write(&self, buffer: &mut dyn WritableBuffer) -> Result<()> { - let is_64 = match self.architecture.address_size().unwrap() { - AddressSize::U8 | AddressSize::U16 | AddressSize::U32 => false, - AddressSize::U64 => true, - }; - - let (hdr_size, sechdr_size, rel_size, sym_size) = if is_64 { - ( - mem::size_of::(), - mem::size_of::(), - mem::size_of::(), - mem::size_of::(), - ) - } else { - ( - mem::size_of::(), - mem::size_of::(), - mem::size_of::(), - mem::size_of::(), - ) - }; - - // Calculate offsets and build strtab. - let mut offset = 0; - let mut strtab = StringTable::default(); - // We place the shared address 0 immediately after the section header table. - let mut address = 0; - - // XCOFF file header. - offset += hdr_size; - // Section headers. - offset += self.sections.len() * sechdr_size; - - // Calculate size of section data. - let mut section_offsets = vec![SectionOffsets::default(); self.sections.len()]; - for (index, section) in self.sections.iter().enumerate() { - let len = section.data.len(); - let sectype = section.kind; - // Section address should be 0 for all sections except the .text, .data, and .bss sections. - if sectype == SectionKind::Data - || sectype == SectionKind::Text - || sectype == SectionKind::UninitializedData - { - section_offsets[index].address = address as u64; - address += len; - address = align(address, 4); - } else { - section_offsets[index].address = 0; - } - if len != 0 { - // Set the default section alignment as 4. - offset = align(offset, 4); - section_offsets[index].data_offset = offset; - offset += len; - } else { - section_offsets[index].data_offset = 0; - } - } - - // Calculate size of relocations. - for (index, section) in self.sections.iter().enumerate() { - let count = section.relocations.len(); - if count != 0 { - section_offsets[index].reloc_offset = offset; - offset += count * rel_size; - } else { - section_offsets[index].reloc_offset = 0; - } - } - - // Calculate size of symbols. - let mut file_str_id = None; - let mut symbol_offsets = vec![SymbolOffsets::default(); self.symbols.len()]; - let mut symtab_count = 0; - for (index, symbol) in self.symbols.iter().enumerate() { - symbol_offsets[index].index = symtab_count; - symtab_count += 1; - - let storage_class = if let SymbolFlags::Xcoff { n_sclass, .. } = symbol.flags { - n_sclass - } else { - match symbol.kind { - SymbolKind::Null => xcoff::C_NULL, - SymbolKind::File => xcoff::C_FILE, - SymbolKind::Text | SymbolKind::Data | SymbolKind::Tls => { - if symbol.is_local() { - xcoff::C_STAT - } else if symbol.weak { - xcoff::C_WEAKEXT - } else { - xcoff::C_EXT - } - } - SymbolKind::Section | SymbolKind::Label | SymbolKind::Unknown => { - return Err(Error(format!( - "unimplemented symbol `{}` kind {:?}", - symbol.name().unwrap_or(""), - symbol.kind - ))); - } - } - }; - symbol_offsets[index].storage_class = storage_class; - - if storage_class == xcoff::C_FILE { - if is_64 && file_str_id.is_none() { - file_str_id = Some(strtab.add(b".file")); - } - if symbol.name.len() > 8 { - symbol_offsets[index].str_id = Some(strtab.add(&symbol.name)); - } - } else if is_64 || symbol.name.len() > 8 { - symbol_offsets[index].str_id = Some(strtab.add(&symbol.name)); - } - - symbol_offsets[index].aux_count = 0; - match storage_class { - xcoff::C_FILE => { - symbol_offsets[index].aux_count = 1; - symtab_count += 1; - } - xcoff::C_EXT | xcoff::C_WEAKEXT | xcoff::C_HIDEXT => { - symbol_offsets[index].aux_count = 1; - symtab_count += 1; - } - // TODO: support auxiliary entry for other types of symbol. - _ => {} - } - } - let symtab_offset = offset; - let symtab_len = symtab_count * sym_size; - offset += symtab_len; - - // Calculate size of strtab. - let strtab_offset = offset; - let mut strtab_data = Vec::new(); - // First 4 bytes of strtab are the length. - strtab.write(4, &mut strtab_data); - let strtab_len = strtab_data.len() + 4; - offset += strtab_len; - - // Start writing. - buffer - .reserve(offset) - .map_err(|_| Error(String::from("Cannot allocate buffer")))?; - - // Write file header. - if is_64 { - let header = xcoff::FileHeader64 { - f_magic: U16::new(BE, xcoff::MAGIC_64), - f_nscns: U16::new(BE, self.sections.len() as u16), - f_timdat: U32::new(BE, 0), - f_symptr: U64::new(BE, symtab_offset as u64), - f_nsyms: U32::new(BE, symtab_count as u32), - f_opthdr: U16::new(BE, 0), - f_flags: match self.flags { - FileFlags::Xcoff { f_flags } => U16::new(BE, f_flags), - _ => U16::default(), - }, - }; - buffer.write(&header); - } else { - let header = xcoff::FileHeader32 { - f_magic: U16::new(BE, xcoff::MAGIC_32), - f_nscns: U16::new(BE, self.sections.len() as u16), - f_timdat: U32::new(BE, 0), - f_symptr: U32::new(BE, symtab_offset as u32), - f_nsyms: U32::new(BE, symtab_count as u32), - f_opthdr: U16::new(BE, 0), - f_flags: match self.flags { - FileFlags::Xcoff { f_flags } => U16::new(BE, f_flags), - _ => U16::default(), - }, - }; - buffer.write(&header); - } - - // Write section headers. - for (index, section) in self.sections.iter().enumerate() { - let mut sectname = [0; 8]; - sectname - .get_mut(..section.name.len()) - .ok_or_else(|| { - Error(format!( - "section name `{}` is too long", - section.name().unwrap_or(""), - )) - })? - .copy_from_slice(§ion.name); - let flags = if let SectionFlags::Xcoff { s_flags } = section.flags { - s_flags - } else { - match section.kind { - SectionKind::Text - | SectionKind::ReadOnlyData - | SectionKind::ReadOnlyString - | SectionKind::ReadOnlyDataWithRel => xcoff::STYP_TEXT, - SectionKind::Data => xcoff::STYP_DATA, - SectionKind::UninitializedData => xcoff::STYP_BSS, - SectionKind::Tls => xcoff::STYP_TDATA, - SectionKind::UninitializedTls => xcoff::STYP_TBSS, - SectionKind::OtherString => xcoff::STYP_INFO, - SectionKind::Debug => xcoff::STYP_DEBUG, - SectionKind::Other | SectionKind::Metadata => 0, - SectionKind::Note - | SectionKind::Linker - | SectionKind::Common - | SectionKind::Unknown - | SectionKind::TlsVariables - | SectionKind::Elf(_) => { - return Err(Error(format!( - "unimplemented section `{}` kind {:?}", - section.name().unwrap_or(""), - section.kind - ))); - } - } - .into() - }; - if is_64 { - let section_header = xcoff::SectionHeader64 { - s_name: sectname, - s_paddr: U64::new(BE, section_offsets[index].address), - // This field has the same value as the s_paddr field. - s_vaddr: U64::new(BE, section_offsets[index].address), - s_size: U64::new(BE, section.data.len() as u64), - s_scnptr: U64::new(BE, section_offsets[index].data_offset as u64), - s_relptr: U64::new(BE, section_offsets[index].reloc_offset as u64), - s_lnnoptr: U64::new(BE, 0), - s_nreloc: U32::new(BE, section.relocations.len() as u32), - s_nlnno: U32::new(BE, 0), - s_flags: U32::new(BE, flags), - s_reserve: U32::new(BE, 0), - }; - buffer.write(§ion_header); - } else { - let section_header = xcoff::SectionHeader32 { - s_name: sectname, - s_paddr: U32::new(BE, section_offsets[index].address as u32), - // This field has the same value as the s_paddr field. - s_vaddr: U32::new(BE, section_offsets[index].address as u32), - s_size: U32::new(BE, section.data.len() as u32), - s_scnptr: U32::new(BE, section_offsets[index].data_offset as u32), - s_relptr: U32::new(BE, section_offsets[index].reloc_offset as u32), - s_lnnoptr: U32::new(BE, 0), - // TODO: If more than 65,534 relocation entries are required, the field - // value will be 65535, and an STYP_OVRFLO section header will contain - // the actual count of relocation entries in the s_paddr field. - s_nreloc: U16::new(BE, section.relocations.len() as u16), - s_nlnno: U16::new(BE, 0), - s_flags: U32::new(BE, flags), - }; - buffer.write(§ion_header); - } - } - - // Write section data. - for (index, section) in self.sections.iter().enumerate() { - let len = section.data.len(); - if len != 0 { - write_align(buffer, 4); - debug_assert_eq!(section_offsets[index].data_offset, buffer.len()); - buffer.write_bytes(§ion.data); - } - } - - // Write relocations. - for (index, section) in self.sections.iter().enumerate() { - if !section.relocations.is_empty() { - debug_assert_eq!(section_offsets[index].reloc_offset, buffer.len()); - for reloc in §ion.relocations { - let rtype = match reloc.kind { - RelocationKind::Absolute => xcoff::R_POS, - RelocationKind::Relative => xcoff::R_REL, - RelocationKind::Got => xcoff::R_TOC, - RelocationKind::Xcoff(x) => x, - _ => { - return Err(Error(format!("unimplemented relocation {:?}", reloc))); - } - }; - if is_64 { - let xcoff_rel = xcoff::Rel64 { - r_vaddr: U64::new(BE, reloc.offset), - r_symndx: U32::new(BE, symbol_offsets[reloc.symbol.0].index as u32), - // Specifies the bit length of the relocatable reference minus one. - r_rsize: (reloc.size - 1), - r_rtype: rtype, - }; - buffer.write(&xcoff_rel); - } else { - let xcoff_rel = xcoff::Rel32 { - r_vaddr: U32::new(BE, reloc.offset as u32), - r_symndx: U32::new(BE, symbol_offsets[reloc.symbol.0].index as u32), - r_rsize: (reloc.size - 1), - r_rtype: rtype, - }; - buffer.write(&xcoff_rel); - } - } - } - } - - // Write symbols. - debug_assert_eq!(symtab_offset, buffer.len()); - for (index, symbol) in self.symbols.iter().enumerate() { - let (n_value, section_kind) = if let SymbolSection::Section(id) = symbol.section { - ( - section_offsets[id.0].address + symbol.value, - self.sections[id.0].kind, - ) - } else { - (symbol.value, SectionKind::Unknown) - }; - let n_scnum = match symbol.section { - SymbolSection::None => { - debug_assert_eq!(symbol.kind, SymbolKind::File); - xcoff::N_DEBUG - } - SymbolSection::Undefined | SymbolSection::Common => xcoff::N_UNDEF, - SymbolSection::Absolute => xcoff::N_ABS, - SymbolSection::Section(id) => id.0 as i16 + 1, - }; - let n_sclass = symbol_offsets[index].storage_class; - let n_type = if (symbol.scope == SymbolScope::Linkage) - && (n_sclass == xcoff::C_EXT - || n_sclass == xcoff::C_WEAKEXT - || n_sclass == xcoff::C_HIDEXT) - { - xcoff::SYM_V_HIDDEN - } else { - 0 - }; - let n_numaux = symbol_offsets[index].aux_count; - if is_64 { - let str_id = if n_sclass == xcoff::C_FILE { - file_str_id.unwrap() - } else { - symbol_offsets[index].str_id.unwrap() - }; - let xcoff_sym = xcoff::Symbol64 { - n_value: U64::new(BE, n_value), - n_offset: U32::new(BE, strtab.get_offset(str_id) as u32), - n_scnum: I16::new(BE, n_scnum), - n_type: U16::new(BE, n_type), - n_sclass, - n_numaux, - }; - buffer.write(&xcoff_sym); - } else { - let mut sym_name = [0; 8]; - if n_sclass == xcoff::C_FILE { - sym_name[..5].copy_from_slice(b".file"); - } else if symbol.name.len() <= 8 { - sym_name[..symbol.name.len()].copy_from_slice(&symbol.name[..]); - } else { - let str_offset = strtab.get_offset(symbol_offsets[index].str_id.unwrap()); - sym_name[4..8].copy_from_slice(&u32::to_be_bytes(str_offset as u32)); - } - let xcoff_sym = xcoff::Symbol32 { - n_name: sym_name, - n_value: U32::new(BE, n_value as u32), - n_scnum: I16::new(BE, n_scnum), - n_type: U16::new(BE, n_type), - n_sclass, - n_numaux, - }; - buffer.write(&xcoff_sym); - } - // Generate auxiliary entries. - if n_sclass == xcoff::C_FILE { - debug_assert_eq!(n_numaux, 1); - let mut x_fname = [0; 8]; - if symbol.name.len() <= 8 { - x_fname[..symbol.name.len()].copy_from_slice(&symbol.name[..]); - } else { - let str_offset = strtab.get_offset(symbol_offsets[index].str_id.unwrap()); - x_fname[4..8].copy_from_slice(&u32::to_be_bytes(str_offset as u32)); - } - if is_64 { - let file_aux = xcoff::FileAux64 { - x_fname, - x_fpad: Default::default(), - x_ftype: xcoff::XFT_FN, - x_freserve: Default::default(), - x_auxtype: xcoff::AUX_FILE, - }; - buffer.write(&file_aux); - } else { - let file_aux = xcoff::FileAux32 { - x_fname, - x_fpad: Default::default(), - x_ftype: xcoff::XFT_FN, - x_freserve: Default::default(), - }; - buffer.write(&file_aux); - } - } else if n_sclass == xcoff::C_EXT - || n_sclass == xcoff::C_WEAKEXT - || n_sclass == xcoff::C_HIDEXT - { - debug_assert_eq!(n_numaux, 1); - let (x_smtyp, x_smclas) = if let SymbolFlags::Xcoff { - x_smtyp, x_smclas, .. - } = symbol.flags - { - (x_smtyp, x_smclas) - } else { - match symbol.kind { - SymbolKind::Text => (xcoff::XTY_SD, xcoff::XMC_PR), - SymbolKind::Data => { - if section_kind == SectionKind::UninitializedData { - (xcoff::XTY_CM, xcoff::XMC_BS) - } else if section_kind == SectionKind::ReadOnlyData { - (xcoff::XTY_SD, xcoff::XMC_RO) - } else { - (xcoff::XTY_SD, xcoff::XMC_RW) - } - } - SymbolKind::Tls => { - if section_kind == SectionKind::UninitializedTls { - (xcoff::XTY_CM, xcoff::XMC_UL) - } else { - (xcoff::XTY_SD, xcoff::XMC_TL) - } - } - _ => { - return Err(Error(format!( - "unimplemented symbol `{}` kind {:?}", - symbol.name().unwrap_or(""), - symbol.kind - ))); - } - } - }; - let scnlen = if let SymbolFlags::Xcoff { - containing_csect: Some(containing_csect), - .. - } = symbol.flags - { - symbol_offsets[containing_csect.0].index as u64 - } else { - symbol.size - }; - if is_64 { - let csect_aux = xcoff::CsectAux64 { - x_scnlen_lo: U32::new(BE, (scnlen & 0xFFFFFFFF) as u32), - x_scnlen_hi: U32::new(BE, ((scnlen >> 32) & 0xFFFFFFFF) as u32), - x_parmhash: U32::new(BE, 0), - x_snhash: U16::new(BE, 0), - x_smtyp, - x_smclas, - pad: 0, - x_auxtype: xcoff::AUX_CSECT, - }; - buffer.write(&csect_aux); - } else { - let csect_aux = xcoff::CsectAux32 { - x_scnlen: U32::new(BE, scnlen as u32), - x_parmhash: U32::new(BE, 0), - x_snhash: U16::new(BE, 0), - x_smtyp, - x_smclas, - x_stab: U32::new(BE, 0), - x_snstab: U16::new(BE, 0), - }; - buffer.write(&csect_aux); - } - } - } - - // Write string table. - debug_assert_eq!(strtab_offset, buffer.len()); - buffer.write_bytes(&u32::to_be_bytes(strtab_len as u32)); - buffer.write_bytes(&strtab_data); - - debug_assert_eq!(offset, buffer.len()); - Ok(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/object/src/xcoff.rs s390-tools-2.33.1/rust-vendor/object/src/xcoff.rs --- s390-tools-2.31.0/rust-vendor/object/src/xcoff.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/object/src/xcoff.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,893 +0,0 @@ -//! XCOFF definitions -//! -//! These definitions are independent of read/write support, although we do implement -//! some traits useful for those. -//! -//! This module is the equivalent of /usr/include/xcoff.h, and is based heavily on it. - -#![allow(missing_docs)] - -use crate::endian::{BigEndian as BE, I16, U16, U32, U64}; -use crate::pod::Pod; - -/// The header at the start of every 32-bit XCOFF file. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FileHeader32 { - /// Magic number. Must be 0x01DF. - pub f_magic: U16, - /// Number of sections. - pub f_nscns: U16, - /// Time and date of file creation. - pub f_timdat: U32, - /// Byte offset to symbol table start. - pub f_symptr: U32, - /// Number of entries in symbol table. - pub f_nsyms: U32, - /// Number of bytes in optional header - pub f_opthdr: U16, - /// Extra flags. - pub f_flags: U16, -} - -/// The header at the start of every 64-bit XCOFF file. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FileHeader64 { - /// Magic number. Must be 0x01F7. - pub f_magic: U16, - /// Number of sections. - pub f_nscns: U16, - /// Time and date of file creation - pub f_timdat: U32, - /// Byte offset to symbol table start. - pub f_symptr: U64, - /// Number of bytes in optional header - pub f_opthdr: U16, - /// Extra flags. - pub f_flags: U16, - /// Number of entries in symbol table. - pub f_nsyms: U32, -} - -// Values for `f_magic`. -// -/// the 64-bit mach magic number -pub const MAGIC_64: u16 = 0x01F7; -/// the 32-bit mach magic number -pub const MAGIC_32: u16 = 0x01DF; - -// Values for `f_flags`. -// -/// Indicates that the relocation information for binding has been removed from -/// the file. -pub const F_RELFLG: u16 = 0x0001; -/// Indicates that the file is executable. No unresolved external references exist. -pub const F_EXEC: u16 = 0x0002; -/// Indicates that line numbers have been stripped from the file by a utility program. -pub const F_LNNO: u16 = 0x0004; -/// Indicates that the file was profiled with the fdpr command. -pub const F_FDPR_PROF: u16 = 0x0010; -/// Indicates that the file was reordered with the fdpr command. -pub const F_FDPR_OPTI: u16 = 0x0020; -/// Indicates that the file uses Very Large Program Support. -pub const F_DSA: u16 = 0x0040; -/// Indicates that one of the members of the auxiliary header specifying the -/// medium page sizes is non-zero. -pub const F_VARPG: u16 = 0x0100; -/// Indicates the file is dynamically loadable and executable. External references -/// are resolved by way of imports, and the file might contain exports and loader -/// relocation. -pub const F_DYNLOAD: u16 = 0x1000; -/// Indicates the file is a shared object (shared library). The file is separately -/// loadable. That is, it is not normally bound with other objects, and its loader -/// exports symbols are used as automatic import symbols for other object files. -pub const F_SHROBJ: u16 = 0x2000; -/// If the object file is a member of an archive, it can be loaded by the system -/// loader, but the member is ignored by the binder. If the object file is not in -/// an archive, this flag has no effect. -pub const F_LOADONLY: u16 = 0x4000; - -/// The auxiliary header immediately following file header. If the value of the -/// f_opthdr field in the file header is 0, the auxiliary header does not exist. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct AuxHeader32 { - /// Flags. - pub o_mflag: U16, - /// Version. - pub o_vstamp: U16, - /// Text size in bytes. - pub o_tsize: U32, - /// Initialized data size in bytes. - pub o_dsize: U32, - /// Uninitialized data size in bytes. - pub o_bsize: U32, - /// Entry point descriptor (virtual address). - pub o_entry: U32, - /// Base address of text (virtual address). - pub o_text_start: U32, - /// Base address of data (virtual address). - pub o_data_start: U32, - /// Address of TOC anchor. - pub o_toc: U32, - /// Section number for entry point. - pub o_snentry: U16, - /// Section number for .text. - pub o_sntext: U16, - /// Section number for .data. - pub o_sndata: U16, - /// Section number for TOC. - pub o_sntoc: U16, - /// Section number for loader data. - pub o_snloader: U16, - /// Section number for .bss. - pub o_snbss: U16, - /// Maximum alignment for .text. - pub o_algntext: U16, - /// Maximum alignment for .data. - pub o_algndata: U16, - /// Module type field. - pub o_modtype: U16, - /// Bit flags - cpu types of objects. - pub o_cpuflag: u8, - /// Reserved for CPU type. - pub o_cputype: u8, - /// Maximum stack size allowed (bytes). - pub o_maxstack: U32, - /// Maximum data size allowed (bytes). - pub o_maxdata: U32, - /// Reserved for debuggers. - pub o_debugger: U32, - /// Requested text page size. - pub o_textpsize: u8, - /// Requested data page size. - pub o_datapsize: u8, - /// Requested stack page size. - pub o_stackpsize: u8, - /// Flags and thread-local storage alignment. - pub o_flags: u8, - /// Section number for .tdata. - pub o_sntdata: U16, - /// Section number for .tbss. - pub o_sntbss: U16, -} - -/// The auxiliary header immediately following file header. If the value of the -/// f_opthdr field in the file header is 0, the auxiliary header does not exist. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct AuxHeader64 { - /// Flags. - pub o_mflag: U16, - /// Version. - pub o_vstamp: U16, - /// Reserved for debuggers. - pub o_debugger: U32, - /// Base address of text (virtual address). - pub o_text_start: U64, - /// Base address of data (virtual address). - pub o_data_start: U64, - /// Address of TOC anchor. - pub o_toc: U64, - /// Section number for entry point. - pub o_snentry: U16, - /// Section number for .text. - pub o_sntext: U16, - /// Section number for .data. - pub o_sndata: U16, - /// Section number for TOC. - pub o_sntoc: U16, - /// Section number for loader data. - pub o_snloader: U16, - /// Section number for .bss. - pub o_snbss: U16, - /// Maximum alignment for .text. - pub o_algntext: U16, - /// Maximum alignment for .data. - pub o_algndata: U16, - /// Module type field. - pub o_modtype: U16, - /// Bit flags - cpu types of objects. - pub o_cpuflag: u8, - /// Reserved for CPU type. - pub o_cputype: u8, - /// Requested text page size. - pub o_textpsize: u8, - /// Requested data page size. - pub o_datapsize: u8, - /// Requested stack page size. - pub o_stackpsize: u8, - /// Flags and thread-local storage alignment. - pub o_flags: u8, - /// Text size in bytes. - pub o_tsize: U64, - /// Initialized data size in bytes. - pub o_dsize: U64, - /// Uninitialized data size in bytes. - pub o_bsize: U64, - /// Entry point descriptor (virtual address). - pub o_entry: U64, - /// Maximum stack size allowed (bytes). - pub o_maxstack: U64, - /// Maximum data size allowed (bytes). - pub o_maxdata: U64, - /// Section number for .tdata. - pub o_sntdata: U16, - /// Section number for .tbss. - pub o_sntbss: U16, - /// XCOFF64 flags. - pub o_x64flags: U16, - /// Reserved. - pub o_resv3a: U16, - /// Reserved. - pub o_resv3: [U32; 2], -} - -/// Some AIX programs generate auxiliary headers for 32-bit object files that -/// end after the data_start field. -pub const AOUTHSZ_SHORT: u16 = 28; - -/// Section header. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SectionHeader32 { - /// Section name. - pub s_name: [u8; 8], - /// Physical address. - pub s_paddr: U32, - /// Virtual address (same as physical address). - pub s_vaddr: U32, - /// Section size. - pub s_size: U32, - /// Offset in file to raw data for section. - pub s_scnptr: U32, - /// Offset in file to relocation entries for section. - pub s_relptr: U32, - /// Offset in file to line number entries for section. - pub s_lnnoptr: U32, - /// Number of relocation entries. - pub s_nreloc: U16, - /// Number of line number entries. - pub s_nlnno: U16, - /// Flags to define the section type. - pub s_flags: U32, -} - -/// Section header. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SectionHeader64 { - /// Section name. - pub s_name: [u8; 8], - /// Physical address. - pub s_paddr: U64, - /// Virtual address (same as physical address). - pub s_vaddr: U64, - /// Section size. - pub s_size: U64, - /// Offset in file to raw data for section. - pub s_scnptr: U64, - /// Offset in file to relocation entries for section. - pub s_relptr: U64, - /// Offset in file to line number entries for section. - pub s_lnnoptr: U64, - /// Number of relocation entries. - pub s_nreloc: U32, - /// Number of line number entries. - pub s_nlnno: U32, - /// Flags to define the section type. - pub s_flags: U32, - /// Reserved. - pub s_reserve: U32, -} - -// Values for `s_flags`. -// -/// "regular" section -pub const STYP_REG: u16 = 0x00; -/// Specifies a pad section. A section of this type is used to provide alignment -/// padding between sections within an XCOFF executable object file. This section -/// header type is obsolete since padding is allowed in an XCOFF file without a -/// corresponding pad section header. -pub const STYP_PAD: u16 = 0x08; -/// Specifies a DWARF debugging section, which provide source file and symbol -/// information for the symbolic debugger. -pub const STYP_DWARF: u16 = 0x10; -/// Specifies an executable text (code) section. A section of this type contains -/// the executable instructions of a program. -pub const STYP_TEXT: u16 = 0x20; -/// Specifies an initialized data section. A section of this type contains the -/// initialized data and the TOC of a program. -pub const STYP_DATA: u16 = 0x40; -/// Specifies an uninitialized data section. A section header of this type -/// defines the uninitialized data of a program. -pub const STYP_BSS: u16 = 0x80; -/// Specifies an exception section. A section of this type provides information -/// to identify the reason that a trap or exception occurred within an executable -/// object program. -pub const STYP_EXCEPT: u16 = 0x0100; -/// Specifies a comment section. A section of this type provides comments or data -/// to special processing utility programs. -pub const STYP_INFO: u16 = 0x0200; -/// Specifies an initialized thread-local data section. -pub const STYP_TDATA: u16 = 0x0400; -/// Specifies an uninitialized thread-local data section. -pub const STYP_TBSS: u16 = 0x0800; -/// Specifies a loader section. A section of this type contains object file -/// information for the system loader to load an XCOFF executable. The information -/// includes imported symbols, exported symbols, relocation data, type-check -/// information, and shared object names. -pub const STYP_LOADER: u16 = 0x1000; -/// Specifies a debug section. A section of this type contains stabstring -/// information used by the symbolic debugger. -pub const STYP_DEBUG: u16 = 0x2000; -/// Specifies a type-check section. A section of this type contains -/// parameter/argument type-check strings used by the binder. -pub const STYP_TYPCHK: u16 = 0x4000; -/// Specifies a relocation or line-number field overflow section. A section -/// header of this type contains the count of relocation entries and line -/// number entries for some other section. This section header is required -/// when either of the counts exceeds 65,534. -pub const STYP_OVRFLO: u16 = 0x8000; - -pub const SIZEOF_SYMBOL: usize = 18; - -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct SymbolBytes(pub [u8; SIZEOF_SYMBOL]); - -/// Symbol table entry. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Symbol32 { - /// Symbol name. - /// - /// If first 4 bytes are 0, then second 4 bytes are offset into string table. - pub n_name: [u8; 8], - /// Symbol value; storage class-dependent. - pub n_value: U32, - /// Section number of symbol. - pub n_scnum: I16, - /// Basic and derived type specification. - pub n_type: U16, - /// Storage class of symbol. - pub n_sclass: u8, - /// Number of auxiliary entries. - pub n_numaux: u8, -} - -/// Symbol table entry. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Symbol64 { - /// Symbol value; storage class-dependent. - pub n_value: U64, - /// Offset of the name in string table or .debug section. - pub n_offset: U32, - /// Section number of symbol. - pub n_scnum: I16, - /// Basic and derived type specification. - pub n_type: U16, - /// Storage class of symbol. - pub n_sclass: u8, - /// Number of auxiliary entries. - pub n_numaux: u8, -} - -// Values for `n_scnum`. -// -/// A special symbolic debugging symbol. -pub const N_DEBUG: i16 = -2; -/// An absolute symbol. The symbol has a value but is not relocatable. -pub const N_ABS: i16 = -1; -/// An undefined external symbol. -pub const N_UNDEF: i16 = 0; - -// Vlaues for `n_type`. -// -/// Values for visibility as they would appear when encoded in the high 4 bits -/// of the 16-bit unsigned n_type field of symbol table entries. Valid for -/// 32-bit XCOFF only when the o_vstamp in the auxiliary header is greater than 1. -pub const SYM_V_MASK: u16 = 0xF000; -pub const SYM_V_INTERNAL: u16 = 0x1000; -pub const SYM_V_HIDDEN: u16 = 0x2000; -pub const SYM_V_PROTECTED: u16 = 0x3000; -pub const SYM_V_EXPORTED: u16 = 0x4000; - -// Values for `n_sclass`. -// -// Storage classes used for symbolic debugging symbols. -// -/// Source file name and compiler information. -pub const C_FILE: u8 = 103; -/// Beginning of include file. -pub const C_BINCL: u8 = 108; -/// Ending of include file. -pub const C_EINCL: u8 = 109; -/// Global variable. -pub const C_GSYM: u8 = 128; -/// Statically allocated symbol. -pub const C_STSYM: u8 = 133; -/// Beginning of common block. -pub const C_BCOMM: u8 = 135; -/// End of common block. -pub const C_ECOMM: u8 = 137; -/// Alternate entry. -pub const C_ENTRY: u8 = 141; -/// Beginning of static block. -pub const C_BSTAT: u8 = 143; -/// End of static block. -pub const C_ESTAT: u8 = 144; -/// Global thread-local variable. -pub const C_GTLS: u8 = 145; -/// Static thread-local variable. -pub const C_STTLS: u8 = 146; -/// DWARF section symbol. -pub const C_DWARF: u8 = 112; -// -// Storage classes used for absolute symbols. -// -/// Automatic variable allocated on stack. -pub const C_LSYM: u8 = 129; -/// Argument to subroutine allocated on stack. -pub const C_PSYM: u8 = 130; -/// Register variable. -pub const C_RSYM: u8 = 131; -/// Argument to function or procedure stored in register. -pub const C_RPSYM: u8 = 132; -/// Local member of common block. -pub const C_ECOML: u8 = 136; -/// Function or procedure. -pub const C_FUN: u8 = 142; -// -// Storage classes used for undefined external symbols or symbols of general sections. -// -/// External symbol. -pub const C_EXT: u8 = 2; -/// Weak external symbol. -pub const C_WEAKEXT: u8 = 111; -// -// Storage classes used for symbols of general sections. -// -/// Symbol table entry marked for deletion. -pub const C_NULL: u8 = 0; -/// Static. -pub const C_STAT: u8 = 3; -/// Beginning or end of inner block. -pub const C_BLOCK: u8 = 100; -/// Beginning or end of function. -pub const C_FCN: u8 = 101; -/// Un-named external symbol. -pub const C_HIDEXT: u8 = 107; -/// Comment string in .info section. -pub const C_INFO: u8 = 110; -/// Declaration of object (type). -pub const C_DECL: u8 = 140; -// -// Storage classes - Obsolete/Undocumented. -// -/// Automatic variable. -pub const C_AUTO: u8 = 1; -/// Register variable. -pub const C_REG: u8 = 4; -/// External definition. -pub const C_EXTDEF: u8 = 5; -/// Label. -pub const C_LABEL: u8 = 6; -/// Undefined label. -pub const C_ULABEL: u8 = 7; -/// Member of structure. -pub const C_MOS: u8 = 8; -/// Function argument. -pub const C_ARG: u8 = 9; -/// Structure tag. -pub const C_STRTAG: u8 = 10; -/// Member of union. -pub const C_MOU: u8 = 11; -/// Union tag. -pub const C_UNTAG: u8 = 12; -/// Type definition. -pub const C_TPDEF: u8 = 13; -/// Undefined static. -pub const C_USTATIC: u8 = 14; -/// Enumeration tag. -pub const C_ENTAG: u8 = 15; -/// Member of enumeration. -pub const C_MOE: u8 = 16; -/// Register parameter. -pub const C_REGPARM: u8 = 17; -/// Bit field. -pub const C_FIELD: u8 = 18; -/// End of structure. -pub const C_EOS: u8 = 102; -/// Duplicate tag. -pub const C_ALIAS: u8 = 105; -/// Special storage class for external. -pub const C_HIDDEN: u8 = 106; -/// Physical end of function. -pub const C_EFCN: u8 = 255; -/// Reserved. -pub const C_TCSYM: u8 = 134; - -/// File Auxiliary Entry for C_FILE Symbols. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FileAux32 { - /// The source file name or compiler-related string. - /// - /// If first 4 bytes are 0, then second 4 bytes are offset into string table. - pub x_fname: [u8; 8], - /// Pad size for file name. - pub x_fpad: [u8; 6], - /// The source-file string type. - pub x_ftype: u8, - /// Reserved. - pub x_freserve: [u8; 3], -} - -/// File Auxiliary Entry for C_FILE Symbols. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FileAux64 { - /// The source file name or compiler-related string. - /// - /// If first 4 bytes are 0, then second 4 bytes are offset into string table. - pub x_fname: [u8; 8], - /// Pad size for file name. - pub x_fpad: [u8; 6], - /// The source-file string type. - pub x_ftype: u8, - /// Reserved. - pub x_freserve: [u8; 2], - /// Specifies the type of auxiliary entry. Contains _AUX_FILE for this auxiliary entry. - pub x_auxtype: u8, -} - -// Values for `x_ftype`. -// -/// Specifies the source-file name. -pub const XFT_FN: u8 = 0; -/// Specifies the compiler time stamp. -pub const XFT_CT: u8 = 1; -/// Specifies the compiler version number. -pub const XFT_CV: u8 = 2; -/// Specifies compiler-defined information. -pub const XFT_CD: u8 = 128; - -/// Csect auxiliary entry for C_EXT, C_WEAKEXT, and C_HIDEXT symbols. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct CsectAux32 { - /// Section length. - pub x_scnlen: U32, - /// Offset of parameter type-check hash in .typchk section. - pub x_parmhash: U32, - /// .typchk section number. - pub x_snhash: U16, - /// Symbol alignment and type. - pub x_smtyp: u8, - /// Storage mapping class. - pub x_smclas: u8, - /// Reserved. - pub x_stab: U32, - /// x_snstab. - pub x_snstab: U16, -} - -/// Csect auxiliary entry for C_EXT, C_WEAKEXT, and C_HIDEXT symbols. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct CsectAux64 { - /// Low 4 bytes of section length. - pub x_scnlen_lo: U32, - /// Offset of parameter type-check hash in .typchk section. - pub x_parmhash: U32, - /// .typchk section number. - pub x_snhash: U16, - /// Symbol alignment and type. - pub x_smtyp: u8, - /// Storage mapping class. - pub x_smclas: u8, - /// High 4 bytes of section length. - pub x_scnlen_hi: U32, - /// Reserved. - pub pad: u8, - /// Contains _AUX_CSECT; indicates type of auxiliary entry. - pub x_auxtype: u8, -} - -// Values for `x_smtyp`. -// -/// External reference. -pub const XTY_ER: u8 = 0; -/// Csect definition for initialized storage. -pub const XTY_SD: u8 = 1; -/// Defines an entry point to an initialized csect. -pub const XTY_LD: u8 = 2; -/// Common csect definition. For uninitialized storage. -pub const XTY_CM: u8 = 3; - -// Values for `x_smclas`. -// -// READ ONLY CLASSES -// -/// Program Code -pub const XMC_PR: u8 = 0; -/// Read Only Constant -pub const XMC_RO: u8 = 1; -/// Debug Dictionary Table -pub const XMC_DB: u8 = 2; -/// Global Linkage (Interfile Interface Code) -pub const XMC_GL: u8 = 6; -/// Extended Operation (Pseudo Machine Instruction) -pub const XMC_XO: u8 = 7; -/// Supervisor Call (32-bit process only) -pub const XMC_SV: u8 = 8; -/// Supervisor Call for 64-bit process -pub const XMC_SV64: u8 = 17; -/// Supervisor Call for both 32- and 64-bit processes -pub const XMC_SV3264: u8 = 18; -/// Traceback Index csect -pub const XMC_TI: u8 = 12; -/// Traceback Table csect -pub const XMC_TB: u8 = 13; -// -// READ WRITE CLASSES -// -/// Read Write Data -pub const XMC_RW: u8 = 5; -/// TOC Anchor for TOC Addressability -pub const XMC_TC0: u8 = 15; -/// General TOC item -pub const XMC_TC: u8 = 3; -/// Scalar data item in the TOC -pub const XMC_TD: u8 = 16; -/// Descriptor csect -pub const XMC_DS: u8 = 10; -/// Unclassified - Treated as Read Write -pub const XMC_UA: u8 = 4; -/// BSS class (uninitialized static internal) -pub const XMC_BS: u8 = 9; -/// Un-named Fortran Common -pub const XMC_UC: u8 = 11; -/// Initialized thread-local variable -pub const XMC_TL: u8 = 20; -/// Uninitialized thread-local variable -pub const XMC_UL: u8 = 21; -/// Symbol mapped at the end of TOC -pub const XMC_TE: u8 = 22; - -/// Function auxiliary entry. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FunAux32 { - /// File offset to exception table entry. - pub x_exptr: U32, - /// Size of function in bytes. - pub x_fsize: U32, - /// File pointer to line number - pub x_lnnoptr: U32, - /// Symbol table index of next entry beyond this function. - pub x_endndx: U32, - /// Pad - pub pad: U16, -} - -/// Function auxiliary entry. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct FunAux64 { - /// File pointer to line number - pub x_lnnoptr: U64, - /// Size of function in bytes. - pub x_fsize: U32, - /// Symbol table index of next entry beyond this function. - pub x_endndx: U32, - /// Pad - pub pad: u8, - /// Contains _AUX_FCN; Type of auxiliary entry. - pub x_auxtype: u8, -} - -/// Exception auxiliary entry. (XCOFF64 only) -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct ExpAux { - /// File offset to exception table entry. - pub x_exptr: U64, - /// Size of function in bytes. - pub x_fsize: U32, - /// Symbol table index of next entry beyond this function. - pub x_endndx: U32, - /// Pad - pub pad: u8, - /// Contains _AUX_EXCEPT; Type of auxiliary entry - pub x_auxtype: u8, -} - -/// Block auxiliary entry for the C_BLOCK and C_FCN Symbols. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct BlockAux32 { - /// Reserved. - pub pad: [u8; 2], - /// High-order 2 bytes of the source line number. - pub x_lnnohi: U16, - /// Low-order 2 bytes of the source line number. - pub x_lnnolo: U16, - /// Reserved. - pub pad2: [u8; 12], -} - -/// Block auxiliary entry for the C_BLOCK and C_FCN Symbols. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct BlockAux64 { - /// Source line number. - pub x_lnno: U32, - /// Reserved. - pub pad: [u8; 13], - /// Contains _AUX_SYM; Type of auxiliary entry. - pub x_auxtype: u8, -} - -/// Section auxiliary entry for the C_STAT Symbol. (XCOFF32 Only) -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct StatAux { - /// Section length. - pub x_scnlen: U32, - /// Number of relocation entries. - pub x_nreloc: U16, - /// Number of line numbers. - pub x_nlinno: U16, - /// Reserved. - pub pad: [u8; 10], -} - -/// Section auxiliary entry Format for C_DWARF symbols. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DwarfAux32 { - /// Length of portion of section represented by symbol. - pub x_scnlen: U32, - /// Reserved. - pub pad: [u8; 4], - /// Number of relocation entries in section. - pub x_nreloc: U32, - /// Reserved. - pub pad2: [u8; 6], -} - -/// Section auxiliary entry Format for C_DWARF symbols. -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct DwarfAux64 { - /// Length of portion of section represented by symbol. - pub x_scnlen: U64, - /// Number of relocation entries in section. - pub x_nreloc: U64, - /// Reserved. - pub pad: u8, - /// Contains _AUX_SECT; Type of Auxiliary entry. - pub x_auxtype: u8, -} - -// Values for `x_auxtype` -// -/// Identifies an exception auxiliary entry. -pub const AUX_EXCEPT: u8 = 255; -/// Identifies a function auxiliary entry. -pub const AUX_FCN: u8 = 254; -/// Identifies a symbol auxiliary entry. -pub const AUX_SYM: u8 = 253; -/// Identifies a file auxiliary entry. -pub const AUX_FILE: u8 = 252; -/// Identifies a csect auxiliary entry. -pub const AUX_CSECT: u8 = 251; -/// Identifies a SECT auxiliary entry. -pub const AUX_SECT: u8 = 250; - -/// Relocation table entry -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Rel32 { - /// Virtual address (position) in section to be relocated. - pub r_vaddr: U32, - /// Symbol table index of item that is referenced. - pub r_symndx: U32, - /// Relocation size and information. - pub r_rsize: u8, - /// Relocation type. - pub r_rtype: u8, -} - -/// Relocation table entry -#[derive(Debug, Clone, Copy)] -#[repr(C)] -pub struct Rel64 { - /// Virtual address (position) in section to be relocated. - pub r_vaddr: U64, - /// Symbol table index of item that is referenced. - pub r_symndx: U32, - /// Relocation size and information. - pub r_rsize: u8, - /// Relocation type. - pub r_rtype: u8, -} - -// Values for `r_rtype`. -// -/// Positive relocation. -pub const R_POS: u8 = 0x00; -/// Positive indirect load relocation. -pub const R_RL: u8 = 0x0c; -/// Positive load address relocation. Modifiable instruction. -pub const R_RLA: u8 = 0x0d; -/// Negative relocation. -pub const R_NEG: u8 = 0x01; -/// Relative to self relocation. -pub const R_REL: u8 = 0x02; -/// Relative to the TOC relocation. -pub const R_TOC: u8 = 0x03; -/// TOC relative indirect load relocation. -pub const R_TRL: u8 = 0x12; -/// Relative to the TOC or to the thread-local storage base relocation. -pub const R_TRLA: u8 = 0x13; -/// Global linkage-external TOC address relocation. -pub const R_GL: u8 = 0x05; -/// Local object TOC address relocation. -pub const R_TCL: u8 = 0x06; -/// A non-relocating relocation. -pub const R_REF: u8 = 0x0f; -/// Branch absolute relocation. References a non-modifiable instruction. -pub const R_BA: u8 = 0x08; -/// Branch relative to self relocation. References a non-modifiable instruction. -pub const R_BR: u8 = 0x0a; -/// Branch absolute relocation. References a modifiable instruction. -pub const R_RBA: u8 = 0x18; -/// Branch relative to self relocation. References a modifiable instruction. -pub const R_RBR: u8 = 0x1a; -/// General-dynamic reference to TLS symbol. -pub const R_TLS: u8 = 0x20; -/// Initial-exec reference to TLS symbol. -pub const R_TLS_IE: u8 = 0x21; -/// Local-dynamic reference to TLS symbol. -pub const R_TLS_LD: u8 = 0x22; -/// Local-exec reference to TLS symbol. -pub const R_TLS_LE: u8 = 0x23; -/// Module reference to TLS. -pub const R_TLSM: u8 = 0x24; -/// Module reference to the local TLS storage. -pub const R_TLSML: u8 = 0x25; -/// Relative to TOC upper. -pub const R_TOCU: u8 = 0x30; -/// Relative to TOC lower. -pub const R_TOCL: u8 = 0x31; - -unsafe_impl_pod!( - FileHeader32, - FileHeader64, - AuxHeader32, - AuxHeader64, - SectionHeader32, - SectionHeader64, - SymbolBytes, - Symbol32, - Symbol64, - FileAux32, - FileAux64, - CsectAux32, - CsectAux64, - FunAux32, - FunAux64, - ExpAux, - BlockAux32, - BlockAux64, - StatAux, - DwarfAux32, - DwarfAux64, - Rel32, - Rel64, -); diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/bors.toml s390-tools-2.33.1/rust-vendor/parking_lot/bors.toml --- s390-tools-2.31.0/rust-vendor/parking_lot/bors.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/bors.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,4 +0,0 @@ -status = [ - "build_tier_one", - "build_other_platforms", -] diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/parking_lot/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/parking_lot/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/Cargo.toml s390-tools-2.33.1/rust-vendor/parking_lot/Cargo.toml --- s390-tools-2.31.0/rust-vendor/parking_lot/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,53 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "parking_lot" -version = "0.12.1" -authors = ["Amanieu d'Antras "] -description = "More compact and efficient implementations of the standard synchronization primitives." -readme = "README.md" -keywords = [ - "mutex", - "condvar", - "rwlock", - "once", - "thread", -] -categories = ["concurrency"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/Amanieu/parking_lot" - -[dependencies.lock_api] -version = "0.4.6" - -[dependencies.parking_lot_core] -version = "0.9.0" - -[dev-dependencies.bincode] -version = "1.3.3" - -[dev-dependencies.rand] -version = "0.8.3" - -[features] -arc_lock = ["lock_api/arc_lock"] -deadlock_detection = ["parking_lot_core/deadlock_detection"] -default = [] -hardware-lock-elision = [] -nightly = [ - "parking_lot_core/nightly", - "lock_api/nightly", -] -owning_ref = ["lock_api/owning_ref"] -send_guard = [] -serde = ["lock_api/serde"] diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/CHANGELOG.md s390-tools-2.33.1/rust-vendor/parking_lot/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/parking_lot/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,203 +0,0 @@ -## parking_lot 0.12.1 (2022-05-31) - -- Fixed incorrect memory ordering in `RwLock`. (#344) -- Added `Condvar::wait_while` convenience methods (#343) - -## parking_lot_core 0.9.3 (2022-04-30) - -- Bump windows-sys dependency to 0.36. (#339) - -## parking_lot_core 0.9.2, lock_api 0.4.7 (2022-03-25) - -- Enable const new() on lock types on stable. (#325) -- Added `MutexGuard::leak` function. (#333) -- Bump windows-sys dependency to 0.34. (#331) -- Bump petgraph dependency to 0.6. (#326) -- Don't use pthread attributes on the espidf platform. (#319) - -## parking_lot_core 0.9.1 (2022-02-06) - -- Bump windows-sys dependency to 0.32. (#316) - -## parking_lot 0.12.0, parking_lot_core 0.9.0, lock_api 0.4.6 (2022-01-28) - -- The MSRV is bumped to 1.49.0. -- Disabled eventual fairness on wasm32-unknown-unknown. (#302) -- Added a rwlock method to report if lock is held exclusively. (#303) -- Use new `asm!` macro. (#304) -- Use windows-rs instead of winapi for faster builds. (#311) -- Moved hardware lock elision support to a separate Cargo feature. (#313) -- Removed used of deprecated `spin_loop_hint`. (#314) - -## parking_lot 0.11.2, parking_lot_core 0.8.4, lock_api 0.4.5 (2021-08-28) - -- Fixed incorrect memory orderings on `RwLock` and `WordLock`. (#294, #292) -- Added `Arc`-based lock guards. (#291) -- Added workaround for TSan's lack of support for `fence`. (#292) - -## lock_api 0.4.4 (2021-05-01) - -- Update for latest nightly. (#281) - -## lock_api 0.4.3 (2021-04-03) - -- Added `[Raw]ReentrantMutex::is_owned`. (#280) - -## parking_lot_core 0.8.3 (2021-02-12) - -- Updated smallvec to 1.6. (#276) - -## parking_lot_core 0.8.2 (2020-12-21) - -- Fixed assertion failure on OpenBSD. (#270) - -## parking_lot_core 0.8.1 (2020-12-04) - -- Removed deprecated CloudABI support. (#263) -- Fixed build on wasm32-unknown-unknown. (#265) -- Relaxed dependency on `smallvec`. (#266) - -## parking_lot 0.11.1, lock_api 0.4.2 (2020-11-18) - -- Fix bounds on Send and Sync impls for lock guards. (#262) -- Fix incorrect memory ordering in `RwLock`. (#260) - -## lock_api 0.4.1 (2020-07-06) - -- Add `data_ptr` method to lock types to allow unsafely accessing the inner data - without a guard. (#247) - -## parking_lot 0.11.0, parking_lot_core 0.8.0, lock_api 0.4.0 (2020-06-23) - -- Add `is_locked` method to mutex types. (#235) -- Make `RawReentrantMutex` public. (#233) -- Allow lock guard to be sent to another thread with the `send_guard` feature. (#240) -- Use `Instant` type from the `instant` crate on wasm32-unknown-unknown. (#231) -- Remove deprecated and unsound `MappedRwLockWriteGuard::downgrade`. (#244) -- Most methods on the `Raw*` traits have been made unsafe since they assume - the current thread holds the lock. (#243) - -## parking_lot_core 0.7.2 (2020-04-21) - -- Add support for `wasm32-unknown-unknown` under the "nightly" feature. (#226) - -## parking_lot 0.10.2 (2020-04-10) - -- Update minimum version of `lock_api`. - -## parking_lot 0.10.1, parking_lot_core 0.7.1, lock_api 0.3.4 (2020-04-10) - -- Add methods to construct `Mutex`, `RwLock`, etc in a `const` context. (#217) -- Add `FairMutex` which always uses fair unlocking. (#204) -- Fixed panic with deadlock detection on macOS. (#203) -- Fixed incorrect synchronization in `create_hashtable`. (#210) -- Use `llvm_asm!` instead of the deprecated `asm!`. (#223) - -## lock_api 0.3.3 (2020-01-04) - -- Deprecate unsound `MappedRwLockWriteGuard::downgrade` (#198) - -## parking_lot 0.10.0, parking_lot_core 0.7.0, lock_api 0.3.2 (2019-11-25) - -- Upgrade smallvec dependency to 1.0 in parking_lot_core. -- Replace all usage of `mem::uninitialized` with `mem::MaybeUninit`. -- The minimum required Rust version is bumped to 1.36. Because of the above two changes. -- Make methods on `WaitTimeoutResult` and `OnceState` take `self` by value instead of reference. - -## parking_lot_core 0.6.2 (2019-07-22) - -- Fixed compile error on Windows with old cfg_if version. (#164) - -## parking_lot_core 0.6.1 (2019-07-17) - -- Fixed Android build. (#163) - -## parking_lot 0.9.0, parking_lot_core 0.6.0, lock_api 0.3.1 (2019-07-14) - -- Re-export lock_api (0.3.1) from parking_lot (#150) -- Removed (non-dev) dependency on rand crate for fairness mechanism, by - including a simple xorshift PRNG in core (#144) -- Android now uses the futex-based ThreadParker. (#140) -- Fixed CloudABI ThreadParker. (#140) -- Fix race condition in lock_api::ReentrantMutex (da16c2c7) - -## lock_api 0.3.0 (2019-07-03, _yanked_) - -- Use NonZeroUsize in GetThreadId::nonzero_thread_id (#148) -- Debug assert lock_count in ReentrantMutex (#148) -- Tag as `unsafe` and document some internal methods (#148) -- This release was _yanked_ due to a regression in ReentrantMutex (da16c2c7) - -## parking_lot 0.8.1 (2019-07-03, _yanked_) - -- Re-export lock_api (0.3.0) from parking_lot (#150) -- This release was _yanked_ from crates.io due to unexpected breakage (#156) - -## parking_lot 0.8.0, parking_lot_core 0.5.0, lock_api 0.2.0 (2019-05-04) - -- Fix race conditions in deadlock detection. -- Support for more platforms by adding ThreadParker implementations for - Wasm, Redox, SGX and CloudABI. -- Drop support for older Rust. parking_lot now requires 1.31 and is a - Rust 2018 edition crate (#122). -- Disable the owning_ref feature by default. -- Fix was_last_thread value in the timeout callback of park() (#129). -- Support single byte Mutex/Once on stable Rust when compiler is at least - version 1.34. -- Make Condvar::new and Once::new const fns on stable Rust and remove - ONCE_INIT (#134). -- Add optional Serde support (#135). - -## parking_lot 0.7.1 (2019-01-01) - -- Fixed potential deadlock when upgrading a RwLock. -- Fixed overflow panic on very long timeouts (#111). - -## parking_lot 0.7.0, parking_lot_core 0.4.0 (2018-11-26) - -- Return if or how many threads were notified from `Condvar::notify_*` - -## parking_lot 0.6.3 (2018-07-18) - -- Export `RawMutex`, `RawRwLock` and `RawThreadId`. - -## parking_lot 0.6.2 (2018-06-18) - -- Enable `lock_api/nightly` feature from `parking_lot/nightly` (#79) - -## parking_lot 0.6.1 (2018-06-08) - -Added missing typedefs for mapped lock guards: - -- `MappedMutexGuard` -- `MappedReentrantMutexGuard` -- `MappedRwLockReadGuard` -- `MappedRwLockWriteGuard` - -## parking_lot 0.6.0 (2018-06-08) - -This release moves most of the code for type-safe `Mutex` and `RwLock` types -into a separate crate called `lock_api`. This new crate is compatible with -`no_std` and provides `Mutex` and `RwLock` type-safe wrapper types from a raw -mutex type which implements the `RawMutex` or `RawRwLock` trait. The API -provided by the wrapper types can be extended by implementing more traits on -the raw mutex type which provide more functionality (e.g. `RawMutexTimed`). See -the crate documentation for more details. - -There are also several major changes: - -- The minimum required Rust version is bumped to 1.26. -- All methods on `MutexGuard` (and other guard types) are no longer inherent - methods and must be called as `MutexGuard::method(self)`. This avoids - conflicts with methods from the inner type. -- `MutexGuard` (and other guard types) add the `unlocked` method which - temporarily unlocks a mutex, runs the given closure, and then re-locks the - mutex. -- `MutexGuard` (and other guard types) add the `bump` method which gives a - chance for other threads to acquire the mutex by temporarily unlocking it and - re-locking it. However this is optimized for the common case where there are - no threads waiting on the lock, in which case no unlocking is performed. -- `MutexGuard` (and other guard types) add the `map` method which returns a - `MappedMutexGuard` which holds only a subset of the original locked type. The - `MappedMutexGuard` type is identical to `MutexGuard` except that it does not - support the `unlocked` and `bump` methods, and can't be used with `CondVar`. diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/parking_lot/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/parking_lot/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/LICENSE-MIT s390-tools-2.33.1/rust-vendor/parking_lot/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/parking_lot/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2016 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/README.md s390-tools-2.33.1/rust-vendor/parking_lot/README.md --- s390-tools-2.31.0/rust-vendor/parking_lot/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,156 +0,0 @@ -parking_lot -============ - -[![Rust](https://github.com/Amanieu/parking_lot/workflows/Rust/badge.svg)](https://github.com/Amanieu/parking_lot/actions) -[![Crates.io](https://img.shields.io/crates/v/parking_lot.svg)](https://crates.io/crates/parking_lot) - -[Documentation (synchronization primitives)](https://docs.rs/parking_lot/) - -[Documentation (core parking lot API)](https://docs.rs/parking_lot_core/) - -[Documentation (type-safe lock API)](https://docs.rs/lock_api/) - -This library provides implementations of `Mutex`, `RwLock`, `Condvar` and -`Once` that are smaller, faster and more flexible than those in the Rust -standard library, as well as a `ReentrantMutex` type which supports recursive -locking. It also exposes a low-level API for creating your own efficient -synchronization primitives. - -When tested on x86_64 Linux, `parking_lot::Mutex` was found to be 1.5x -faster than `std::sync::Mutex` when uncontended, and up to 5x faster when -contended from multiple threads. The numbers for `RwLock` vary depending on -the number of reader and writer threads, but are almost always faster than -the standard library `RwLock`, and even up to 50x faster in some cases. - -## Features - -The primitives provided by this library have several advantages over those -in the Rust standard library: - -1. `Mutex` and `Once` only require 1 byte of storage space, while `Condvar` - and `RwLock` only require 1 word of storage space. On the other hand the - standard library primitives require a dynamically allocated `Box` to hold - OS-specific synchronization primitives. The small size of `Mutex` in - particular encourages the use of fine-grained locks to increase - parallelism. -2. Since they consist of just a single atomic variable, have constant - initializers and don't need destructors, these primitives can be used as - `static` global variables. The standard library primitives require - dynamic initialization and thus need to be lazily initialized with - `lazy_static!`. -3. Uncontended lock acquisition and release is done through fast inline - paths which only require a single atomic operation. -4. Microcontention (a contended lock with a short critical section) is - efficiently handled by spinning a few times while trying to acquire a - lock. -5. The locks are adaptive and will suspend a thread after a few failed spin - attempts. This makes the locks suitable for both long and short critical - sections. -6. `Condvar`, `RwLock` and `Once` work on Windows XP, unlike the standard - library versions of those types. -7. `RwLock` takes advantage of hardware lock elision on processors that - support it, which can lead to huge performance wins with many readers. - This must be enabled with the `hardware-lock-elision` feature. -8. `RwLock` uses a task-fair locking policy, which avoids reader and writer - starvation, whereas the standard library version makes no guarantees. -9. `Condvar` is guaranteed not to produce spurious wakeups. A thread will - only be woken up if it timed out or it was woken up by a notification. -10. `Condvar::notify_all` will only wake up a single thread and requeue the - rest to wait on the associated `Mutex`. This avoids a thundering herd - problem where all threads try to acquire the lock at the same time. -11. `RwLock` supports atomically downgrading a write lock into a read lock. -12. `Mutex` and `RwLock` allow raw unlocking without a RAII guard object. -13. `Mutex<()>` and `RwLock<()>` allow raw locking without a RAII guard - object. -14. `Mutex` and `RwLock` support [eventual fairness](https://trac.webkit.org/changeset/203350) - which allows them to be fair on average without sacrificing performance. -15. A `ReentrantMutex` type which supports recursive locking. -16. An *experimental* deadlock detector that works for `Mutex`, - `RwLock` and `ReentrantMutex`. This feature is disabled by default and - can be enabled via the `deadlock_detection` feature. -17. `RwLock` supports atomically upgrading an "upgradable" read lock into a - write lock. -18. Optional support for [serde](https://docs.serde.rs/serde/). Enable via the - feature `serde`. **NOTE!** this support is for `Mutex`, `ReentrantMutex`, - and `RwLock` only; `Condvar` and `Once` are not currently supported. -19. Lock guards can be sent to other threads when the `send_guard` feature is - enabled. - -## The parking lot - -To keep these primitives small, all thread queuing and suspending -functionality is offloaded to the *parking lot*. The idea behind this is -based on the Webkit [`WTF::ParkingLot`](https://webkit.org/blog/6161/locking-in-webkit/) -class, which essentially consists of a hash table mapping of lock addresses -to queues of parked (sleeping) threads. The Webkit parking lot was itself -inspired by Linux [futexes](https://man7.org/linux/man-pages/man2/futex.2.html), -but it is more powerful since it allows invoking callbacks while holding a queue -lock. - -## Nightly vs stable - -There are a few restrictions when using this library on stable Rust: - -- The `wasm32-unknown-unknown` target is only fully supported on nightly with - `-C target-feature=+atomics` in `RUSTFLAGS` and `-Z build-std` passed to cargo. - parking_lot will work mostly fine on stable, the only difference is it will - panic instead of block forever if you hit a deadlock. - Just make sure not to enable `-C target-feature=+atomics` on stable as that - will allow wasm to run with multiple threads which will completely break - parking_lot's concurrency guarantees. - -To enable nightly-only functionality, you need to enable the `nightly` feature -in Cargo (see below). - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -parking_lot = "0.12" -``` - -To enable nightly-only features, add this to your `Cargo.toml` instead: - -```toml -[dependencies] -parking_lot = { version = "0.12", features = ["nightly"] } -``` - -The experimental deadlock detector can be enabled with the -`deadlock_detection` Cargo feature. - -To allow sending `MutexGuard`s and `RwLock*Guard`s to other threads, enable the -`send_guard` option. - -Note that the `deadlock_detection` and `send_guard` features are incompatible -and cannot be used together. - -Hardware lock elision support for x86 can be enabled with the -`hardware-lock-elision` feature. This requires Rust 1.59 due to the use of -inline assembly. - -The core parking lot API is provided by the `parking_lot_core` crate. It is -separate from the synchronization primitives in the `parking_lot` crate so that -changes to the core API do not cause breaking changes for users of `parking_lot`. - -## Minimum Rust version - -The current minimum required Rust version is 1.49. Any change to this is -considered a breaking change and will require a major version bump. - -## License - -Licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any -additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/condvar.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/condvar.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/condvar.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/condvar.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1271 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::mutex::MutexGuard; -use crate::raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL}; -use crate::{deadlock, util}; -use core::{ - fmt, ptr, - sync::atomic::{AtomicPtr, Ordering}, -}; -use lock_api::RawMutex as RawMutex_; -use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN}; -use std::ops::DerefMut; -use std::time::{Duration, Instant}; - -/// A type indicating whether a timed wait on a condition variable returned -/// due to a time out or not. -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct WaitTimeoutResult(bool); - -impl WaitTimeoutResult { - /// Returns whether the wait was known to have timed out. - #[inline] - pub fn timed_out(self) -> bool { - self.0 - } -} - -/// A Condition Variable -/// -/// Condition variables represent the ability to block a thread such that it -/// consumes no CPU time while waiting for an event to occur. Condition -/// variables are typically associated with a boolean predicate (a condition) -/// and a mutex. The predicate is always verified inside of the mutex before -/// determining that thread must block. -/// -/// Note that this module places one additional restriction over the system -/// condition variables: each condvar can be used with only one mutex at a -/// time. Any attempt to use multiple mutexes on the same condition variable -/// simultaneously will result in a runtime panic. However it is possible to -/// switch to a different mutex if there are no threads currently waiting on -/// the condition variable. -/// -/// # Differences from the standard library `Condvar` -/// -/// - No spurious wakeups: A wait will only return a non-timeout result if it -/// was woken up by `notify_one` or `notify_all`. -/// - `Condvar::notify_all` will only wake up a single thread, the rest are -/// requeued to wait for the `Mutex` to be unlocked by the thread that was -/// woken up. -/// - Only requires 1 word of space, whereas the standard library boxes the -/// `Condvar` due to platform limitations. -/// - Can be statically constructed. -/// - Does not require any drop glue when dropped. -/// - Inline fast path for the uncontended case. -/// -/// # Examples -/// -/// ``` -/// use parking_lot::{Mutex, Condvar}; -/// use std::sync::Arc; -/// use std::thread; -/// -/// let pair = Arc::new((Mutex::new(false), Condvar::new())); -/// let pair2 = pair.clone(); -/// -/// // Inside of our lock, spawn a new thread, and then wait for it to start -/// thread::spawn(move|| { -/// let &(ref lock, ref cvar) = &*pair2; -/// let mut started = lock.lock(); -/// *started = true; -/// cvar.notify_one(); -/// }); -/// -/// // wait for the thread to start up -/// let &(ref lock, ref cvar) = &*pair; -/// let mut started = lock.lock(); -/// if !*started { -/// cvar.wait(&mut started); -/// } -/// // Note that we used an if instead of a while loop above. This is only -/// // possible because parking_lot's Condvar will never spuriously wake up. -/// // This means that wait() will only return after notify_one or notify_all is -/// // called. -/// ``` -pub struct Condvar { - state: AtomicPtr, -} - -impl Condvar { - /// Creates a new condition variable which is ready to be waited on and - /// notified. - #[inline] - pub const fn new() -> Condvar { - Condvar { - state: AtomicPtr::new(ptr::null_mut()), - } - } - - /// Wakes up one blocked thread on this condvar. - /// - /// Returns whether a thread was woken up. - /// - /// If there is a blocked thread on this condition variable, then it will - /// be woken up from its call to `wait` or `wait_timeout`. Calls to - /// `notify_one` are not buffered in any way. - /// - /// To wake up all threads, see `notify_all()`. - /// - /// # Examples - /// - /// ``` - /// use parking_lot::Condvar; - /// - /// let condvar = Condvar::new(); - /// - /// // do something with condvar, share it with other threads - /// - /// if !condvar.notify_one() { - /// println!("Nobody was listening for this."); - /// } - /// ``` - #[inline] - pub fn notify_one(&self) -> bool { - // Nothing to do if there are no waiting threads - let state = self.state.load(Ordering::Relaxed); - if state.is_null() { - return false; - } - - self.notify_one_slow(state) - } - - #[cold] - fn notify_one_slow(&self, mutex: *mut RawMutex) -> bool { - // Unpark one thread and requeue the rest onto the mutex - let from = self as *const _ as usize; - let to = mutex as usize; - let validate = || { - // Make sure that our atomic state still points to the same - // mutex. If not then it means that all threads on the current - // mutex were woken up and a new waiting thread switched to a - // different mutex. In that case we can get away with doing - // nothing. - if self.state.load(Ordering::Relaxed) != mutex { - return RequeueOp::Abort; - } - - // Unpark one thread if the mutex is unlocked, otherwise just - // requeue everything to the mutex. This is safe to do here - // since unlocking the mutex when the parked bit is set requires - // locking the queue. There is the possibility of a race if the - // mutex gets locked after we check, but that doesn't matter in - // this case. - if unsafe { (*mutex).mark_parked_if_locked() } { - RequeueOp::RequeueOne - } else { - RequeueOp::UnparkOne - } - }; - let callback = |_op, result: UnparkResult| { - // Clear our state if there are no more waiting threads - if !result.have_more_threads { - self.state.store(ptr::null_mut(), Ordering::Relaxed); - } - TOKEN_NORMAL - }; - let res = unsafe { parking_lot_core::unpark_requeue(from, to, validate, callback) }; - - res.unparked_threads + res.requeued_threads != 0 - } - - /// Wakes up all blocked threads on this condvar. - /// - /// Returns the number of threads woken up. - /// - /// This method will ensure that any current waiters on the condition - /// variable are awoken. Calls to `notify_all()` are not buffered in any - /// way. - /// - /// To wake up only one thread, see `notify_one()`. - #[inline] - pub fn notify_all(&self) -> usize { - // Nothing to do if there are no waiting threads - let state = self.state.load(Ordering::Relaxed); - if state.is_null() { - return 0; - } - - self.notify_all_slow(state) - } - - #[cold] - fn notify_all_slow(&self, mutex: *mut RawMutex) -> usize { - // Unpark one thread and requeue the rest onto the mutex - let from = self as *const _ as usize; - let to = mutex as usize; - let validate = || { - // Make sure that our atomic state still points to the same - // mutex. If not then it means that all threads on the current - // mutex were woken up and a new waiting thread switched to a - // different mutex. In that case we can get away with doing - // nothing. - if self.state.load(Ordering::Relaxed) != mutex { - return RequeueOp::Abort; - } - - // Clear our state since we are going to unpark or requeue all - // threads. - self.state.store(ptr::null_mut(), Ordering::Relaxed); - - // Unpark one thread if the mutex is unlocked, otherwise just - // requeue everything to the mutex. This is safe to do here - // since unlocking the mutex when the parked bit is set requires - // locking the queue. There is the possibility of a race if the - // mutex gets locked after we check, but that doesn't matter in - // this case. - if unsafe { (*mutex).mark_parked_if_locked() } { - RequeueOp::RequeueAll - } else { - RequeueOp::UnparkOneRequeueRest - } - }; - let callback = |op, result: UnparkResult| { - // If we requeued threads to the mutex, mark it as having - // parked threads. The RequeueAll case is already handled above. - if op == RequeueOp::UnparkOneRequeueRest && result.requeued_threads != 0 { - unsafe { (*mutex).mark_parked() }; - } - TOKEN_NORMAL - }; - let res = unsafe { parking_lot_core::unpark_requeue(from, to, validate, callback) }; - - res.unparked_threads + res.requeued_threads - } - - /// Blocks the current thread until this condition variable receives a - /// notification. - /// - /// This function will atomically unlock the mutex specified (represented by - /// `mutex_guard`) and block the current thread. This means that any calls - /// to `notify_*()` which happen logically after the mutex is unlocked are - /// candidates to wake this thread up. When this function call returns, the - /// lock specified will have been re-acquired. - /// - /// # Panics - /// - /// This function will panic if another thread is waiting on the `Condvar` - /// with a different `Mutex` object. - #[inline] - pub fn wait(&self, mutex_guard: &mut MutexGuard<'_, T>) { - self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, None); - } - - /// Waits on this condition variable for a notification, timing out after - /// the specified time instant. - /// - /// The semantics of this function are equivalent to `wait()` except that - /// the thread will be blocked roughly until `timeout` is reached. This - /// method should not be used for precise timing due to anomalies such as - /// preemption or platform differences that may not cause the maximum - /// amount of time waited to be precisely `timeout`. - /// - /// Note that the best effort is made to ensure that the time waited is - /// measured with a monotonic clock, and not affected by the changes made to - /// the system time. - /// - /// The returned `WaitTimeoutResult` value indicates if the timeout is - /// known to have elapsed. - /// - /// Like `wait`, the lock specified will be re-acquired when this function - /// returns, regardless of whether the timeout elapsed or not. - /// - /// # Panics - /// - /// This function will panic if another thread is waiting on the `Condvar` - /// with a different `Mutex` object. - #[inline] - pub fn wait_until( - &self, - mutex_guard: &mut MutexGuard<'_, T>, - timeout: Instant, - ) -> WaitTimeoutResult { - self.wait_until_internal( - unsafe { MutexGuard::mutex(mutex_guard).raw() }, - Some(timeout), - ) - } - - // This is a non-generic function to reduce the monomorphization cost of - // using `wait_until`. - fn wait_until_internal(&self, mutex: &RawMutex, timeout: Option) -> WaitTimeoutResult { - let result; - let mut bad_mutex = false; - let mut requeued = false; - { - let addr = self as *const _ as usize; - let lock_addr = mutex as *const _ as *mut _; - let validate = || { - // Ensure we don't use two different mutexes with the same - // Condvar at the same time. This is done while locked to - // avoid races with notify_one - let state = self.state.load(Ordering::Relaxed); - if state.is_null() { - self.state.store(lock_addr, Ordering::Relaxed); - } else if state != lock_addr { - bad_mutex = true; - return false; - } - true - }; - let before_sleep = || { - // Unlock the mutex before sleeping... - unsafe { mutex.unlock() }; - }; - let timed_out = |k, was_last_thread| { - // If we were requeued to a mutex, then we did not time out. - // We'll just park ourselves on the mutex again when we try - // to lock it later. - requeued = k != addr; - - // If we were the last thread on the queue then we need to - // clear our state. This is normally done by the - // notify_{one,all} functions when not timing out. - if !requeued && was_last_thread { - self.state.store(ptr::null_mut(), Ordering::Relaxed); - } - }; - result = unsafe { parking_lot_core::park( - addr, - validate, - before_sleep, - timed_out, - DEFAULT_PARK_TOKEN, - timeout, - ) }; - } - - // Panic if we tried to use multiple mutexes with a Condvar. Note - // that at this point the MutexGuard is still locked. It will be - // unlocked by the unwinding logic. - if bad_mutex { - panic!("attempted to use a condition variable with more than one mutex"); - } - - // ... and re-lock it once we are done sleeping - if result == ParkResult::Unparked(TOKEN_HANDOFF) { - unsafe { deadlock::acquire_resource(mutex as *const _ as usize) }; - } else { - mutex.lock(); - } - - WaitTimeoutResult(!(result.is_unparked() || requeued)) - } - - /// Waits on this condition variable for a notification, timing out after a - /// specified duration. - /// - /// The semantics of this function are equivalent to `wait()` except that - /// the thread will be blocked for roughly no longer than `timeout`. This - /// method should not be used for precise timing due to anomalies such as - /// preemption or platform differences that may not cause the maximum - /// amount of time waited to be precisely `timeout`. - /// - /// Note that the best effort is made to ensure that the time waited is - /// measured with a monotonic clock, and not affected by the changes made to - /// the system time. - /// - /// The returned `WaitTimeoutResult` value indicates if the timeout is - /// known to have elapsed. - /// - /// Like `wait`, the lock specified will be re-acquired when this function - /// returns, regardless of whether the timeout elapsed or not. - #[inline] - pub fn wait_for( - &self, - mutex_guard: &mut MutexGuard<'_, T>, - timeout: Duration, - ) -> WaitTimeoutResult { - let deadline = util::to_deadline(timeout); - self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, deadline) - } - - #[inline] - fn wait_while_until_internal( - &self, - mutex_guard: &mut MutexGuard<'_, T>, - mut condition: F, - timeout: Option, - ) -> WaitTimeoutResult - where - T: ?Sized, - F: FnMut(&mut T) -> bool, - { - let mut result = WaitTimeoutResult(false); - - while !result.timed_out() && condition(mutex_guard.deref_mut()) { - result = - self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, timeout); - } - - result - } - /// Blocks the current thread until this condition variable receives a - /// notification. If the provided condition evaluates to `false`, then the - /// thread is no longer blocked and the operation is completed. If the - /// condition evaluates to `true`, then the thread is blocked again and - /// waits for another notification before repeating this process. - /// - /// This function will atomically unlock the mutex specified (represented by - /// `mutex_guard`) and block the current thread. This means that any calls - /// to `notify_*()` which happen logically after the mutex is unlocked are - /// candidates to wake this thread up. When this function call returns, the - /// lock specified will have been re-acquired. - /// - /// # Panics - /// - /// This function will panic if another thread is waiting on the `Condvar` - /// with a different `Mutex` object. - #[inline] - pub fn wait_while(&self, mutex_guard: &mut MutexGuard<'_, T>, condition: F) - where - T: ?Sized, - F: FnMut(&mut T) -> bool, - { - self.wait_while_until_internal(mutex_guard, condition, None); - } - - /// Waits on this condition variable for a notification, timing out after - /// the specified time instant. If the provided condition evaluates to - /// `false`, then the thread is no longer blocked and the operation is - /// completed. If the condition evaluates to `true`, then the thread is - /// blocked again and waits for another notification before repeating - /// this process. - /// - /// The semantics of this function are equivalent to `wait()` except that - /// the thread will be blocked roughly until `timeout` is reached. This - /// method should not be used for precise timing due to anomalies such as - /// preemption or platform differences that may not cause the maximum - /// amount of time waited to be precisely `timeout`. - /// - /// Note that the best effort is made to ensure that the time waited is - /// measured with a monotonic clock, and not affected by the changes made to - /// the system time. - /// - /// The returned `WaitTimeoutResult` value indicates if the timeout is - /// known to have elapsed. - /// - /// Like `wait`, the lock specified will be re-acquired when this function - /// returns, regardless of whether the timeout elapsed or not. - /// - /// # Panics - /// - /// This function will panic if another thread is waiting on the `Condvar` - /// with a different `Mutex` object. - #[inline] - pub fn wait_while_until( - &self, - mutex_guard: &mut MutexGuard<'_, T>, - condition: F, - timeout: Instant, - ) -> WaitTimeoutResult - where - T: ?Sized, - F: FnMut(&mut T) -> bool, - { - self.wait_while_until_internal(mutex_guard, condition, Some(timeout)) - } - - /// Waits on this condition variable for a notification, timing out after a - /// specified duration. If the provided condition evaluates to `false`, - /// then the thread is no longer blocked and the operation is completed. - /// If the condition evaluates to `true`, then the thread is blocked again - /// and waits for another notification before repeating this process. - /// - /// The semantics of this function are equivalent to `wait()` except that - /// the thread will be blocked for roughly no longer than `timeout`. This - /// method should not be used for precise timing due to anomalies such as - /// preemption or platform differences that may not cause the maximum - /// amount of time waited to be precisely `timeout`. - /// - /// Note that the best effort is made to ensure that the time waited is - /// measured with a monotonic clock, and not affected by the changes made to - /// the system time. - /// - /// The returned `WaitTimeoutResult` value indicates if the timeout is - /// known to have elapsed. - /// - /// Like `wait`, the lock specified will be re-acquired when this function - /// returns, regardless of whether the timeout elapsed or not. - #[inline] - pub fn wait_while_for( - &self, - mutex_guard: &mut MutexGuard<'_, T>, - condition: F, - timeout: Duration, - ) -> WaitTimeoutResult - where - F: FnMut(&mut T) -> bool, - { - let deadline = util::to_deadline(timeout); - self.wait_while_until_internal(mutex_guard, condition, deadline) - } -} - -impl Default for Condvar { - #[inline] - fn default() -> Condvar { - Condvar::new() - } -} - -impl fmt::Debug for Condvar { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Condvar { .. }") - } -} - -#[cfg(test)] -mod tests { - use crate::{Condvar, Mutex, MutexGuard}; - use std::sync::mpsc::channel; - use std::sync::Arc; - use std::thread; - use std::thread::sleep; - use std::thread::JoinHandle; - use std::time::Duration; - use std::time::Instant; - - #[test] - fn smoke() { - let c = Condvar::new(); - c.notify_one(); - c.notify_all(); - } - - #[test] - fn notify_one() { - let m = Arc::new(Mutex::new(())); - let m2 = m.clone(); - let c = Arc::new(Condvar::new()); - let c2 = c.clone(); - - let mut g = m.lock(); - let _t = thread::spawn(move || { - let _g = m2.lock(); - c2.notify_one(); - }); - c.wait(&mut g); - } - - #[test] - fn notify_all() { - const N: usize = 10; - - let data = Arc::new((Mutex::new(0), Condvar::new())); - let (tx, rx) = channel(); - for _ in 0..N { - let data = data.clone(); - let tx = tx.clone(); - thread::spawn(move || { - let &(ref lock, ref cond) = &*data; - let mut cnt = lock.lock(); - *cnt += 1; - if *cnt == N { - tx.send(()).unwrap(); - } - while *cnt != 0 { - cond.wait(&mut cnt); - } - tx.send(()).unwrap(); - }); - } - drop(tx); - - let &(ref lock, ref cond) = &*data; - rx.recv().unwrap(); - let mut cnt = lock.lock(); - *cnt = 0; - cond.notify_all(); - drop(cnt); - - for _ in 0..N { - rx.recv().unwrap(); - } - } - - #[test] - fn notify_one_return_true() { - let m = Arc::new(Mutex::new(())); - let m2 = m.clone(); - let c = Arc::new(Condvar::new()); - let c2 = c.clone(); - - let mut g = m.lock(); - let _t = thread::spawn(move || { - let _g = m2.lock(); - assert!(c2.notify_one()); - }); - c.wait(&mut g); - } - - #[test] - fn notify_one_return_false() { - let m = Arc::new(Mutex::new(())); - let c = Arc::new(Condvar::new()); - - let _t = thread::spawn(move || { - let _g = m.lock(); - assert!(!c.notify_one()); - }); - } - - #[test] - fn notify_all_return() { - const N: usize = 10; - - let data = Arc::new((Mutex::new(0), Condvar::new())); - let (tx, rx) = channel(); - for _ in 0..N { - let data = data.clone(); - let tx = tx.clone(); - thread::spawn(move || { - let &(ref lock, ref cond) = &*data; - let mut cnt = lock.lock(); - *cnt += 1; - if *cnt == N { - tx.send(()).unwrap(); - } - while *cnt != 0 { - cond.wait(&mut cnt); - } - tx.send(()).unwrap(); - }); - } - drop(tx); - - let &(ref lock, ref cond) = &*data; - rx.recv().unwrap(); - let mut cnt = lock.lock(); - *cnt = 0; - assert_eq!(cond.notify_all(), N); - drop(cnt); - - for _ in 0..N { - rx.recv().unwrap(); - } - - assert_eq!(cond.notify_all(), 0); - } - - #[test] - fn wait_for() { - let m = Arc::new(Mutex::new(())); - let m2 = m.clone(); - let c = Arc::new(Condvar::new()); - let c2 = c.clone(); - - let mut g = m.lock(); - let no_timeout = c.wait_for(&mut g, Duration::from_millis(1)); - assert!(no_timeout.timed_out()); - - let _t = thread::spawn(move || { - let _g = m2.lock(); - c2.notify_one(); - }); - let timeout_res = c.wait_for(&mut g, Duration::from_secs(u64::max_value())); - assert!(!timeout_res.timed_out()); - - drop(g); - } - - #[test] - fn wait_until() { - let m = Arc::new(Mutex::new(())); - let m2 = m.clone(); - let c = Arc::new(Condvar::new()); - let c2 = c.clone(); - - let mut g = m.lock(); - let no_timeout = c.wait_until(&mut g, Instant::now() + Duration::from_millis(1)); - assert!(no_timeout.timed_out()); - let _t = thread::spawn(move || { - let _g = m2.lock(); - c2.notify_one(); - }); - let timeout_res = c.wait_until( - &mut g, - Instant::now() + Duration::from_millis(u32::max_value() as u64), - ); - assert!(!timeout_res.timed_out()); - drop(g); - } - - fn spawn_wait_while_notifier( - mutex: Arc>, - cv: Arc, - num_iters: u32, - timeout: Option, - ) -> JoinHandle<()> { - thread::spawn(move || { - for epoch in 1..=num_iters { - // spin to wait for main test thread to block - // before notifying it to wake back up and check - // its condition. - let mut sleep_backoff = Duration::from_millis(1); - let _mutex_guard = loop { - let mutex_guard = mutex.lock(); - - if let Some(timeout) = timeout { - if Instant::now() >= timeout { - return; - } - } - - if *mutex_guard == epoch { - break mutex_guard; - } - - drop(mutex_guard); - - // give main test thread a good chance to - // acquire the lock before this thread does. - sleep(sleep_backoff); - sleep_backoff *= 2; - }; - - cv.notify_one(); - } - }) - } - - #[test] - fn wait_while_until_internal_does_not_wait_if_initially_false() { - let mutex = Arc::new(Mutex::new(0)); - let cv = Arc::new(Condvar::new()); - - let condition = |counter: &mut u32| { - *counter += 1; - false - }; - - let mut mutex_guard = mutex.lock(); - let timeout_result = cv - .wait_while_until_internal(&mut mutex_guard, condition, None); - - assert!(!timeout_result.timed_out()); - assert!(*mutex_guard == 1); - } - - #[test] - fn wait_while_until_internal_times_out_before_false() { - let mutex = Arc::new(Mutex::new(0)); - let cv = Arc::new(Condvar::new()); - - let num_iters = 3; - let condition = |counter: &mut u32| { - *counter += 1; - true - }; - - let mut mutex_guard = mutex.lock(); - let timeout = Some(Instant::now() + Duration::from_millis(500)); - let handle = spawn_wait_while_notifier(mutex.clone(), cv.clone(), num_iters, timeout); - - let timeout_result = - cv.wait_while_until_internal(&mut mutex_guard, condition, timeout); - - assert!(timeout_result.timed_out()); - assert!(*mutex_guard == num_iters + 1); - - // prevent deadlock with notifier - drop(mutex_guard); - handle.join().unwrap(); - } - - #[test] - fn wait_while_until_internal() { - let mutex = Arc::new(Mutex::new(0)); - let cv = Arc::new(Condvar::new()); - - let num_iters = 4; - - let condition = |counter: &mut u32| { - *counter += 1; - *counter <= num_iters - }; - - let mut mutex_guard = mutex.lock(); - let handle = spawn_wait_while_notifier(mutex.clone(), cv.clone(), num_iters, None); - - let timeout_result = - cv.wait_while_until_internal(&mut mutex_guard, condition, None); - - assert!(!timeout_result.timed_out()); - assert!(*mutex_guard == num_iters + 1); - - let timeout_result = cv.wait_while_until_internal(&mut mutex_guard, condition, None); - handle.join().unwrap(); - - assert!(!timeout_result.timed_out()); - assert!(*mutex_guard == num_iters + 2); - } - - #[test] - #[should_panic] - fn two_mutexes() { - let m = Arc::new(Mutex::new(())); - let m2 = m.clone(); - let m3 = Arc::new(Mutex::new(())); - let c = Arc::new(Condvar::new()); - let c2 = c.clone(); - - // Make sure we don't leave the child thread dangling - struct PanicGuard<'a>(&'a Condvar); - impl<'a> Drop for PanicGuard<'a> { - fn drop(&mut self) { - self.0.notify_one(); - } - } - - let (tx, rx) = channel(); - let g = m.lock(); - let _t = thread::spawn(move || { - let mut g = m2.lock(); - tx.send(()).unwrap(); - c2.wait(&mut g); - }); - drop(g); - rx.recv().unwrap(); - let _g = m.lock(); - let _guard = PanicGuard(&*c); - c.wait(&mut m3.lock()); - } - - #[test] - fn two_mutexes_disjoint() { - let m = Arc::new(Mutex::new(())); - let m2 = m.clone(); - let m3 = Arc::new(Mutex::new(())); - let c = Arc::new(Condvar::new()); - let c2 = c.clone(); - - let mut g = m.lock(); - let _t = thread::spawn(move || { - let _g = m2.lock(); - c2.notify_one(); - }); - c.wait(&mut g); - drop(g); - - let _ = c.wait_for(&mut m3.lock(), Duration::from_millis(1)); - } - - #[test] - fn test_debug_condvar() { - let c = Condvar::new(); - assert_eq!(format!("{:?}", c), "Condvar { .. }"); - } - - #[test] - fn test_condvar_requeue() { - let m = Arc::new(Mutex::new(())); - let m2 = m.clone(); - let c = Arc::new(Condvar::new()); - let c2 = c.clone(); - let t = thread::spawn(move || { - let mut g = m2.lock(); - c2.wait(&mut g); - }); - - let mut g = m.lock(); - while !c.notify_one() { - // Wait for the thread to get into wait() - MutexGuard::bump(&mut g); - // Yield, so the other thread gets a chance to do something. - // (At least Miri needs this, because it doesn't preempt threads.) - thread::yield_now(); - } - // The thread should have been requeued to the mutex, which we wake up now. - drop(g); - t.join().unwrap(); - } - - #[test] - fn test_issue_129() { - let locks = Arc::new((Mutex::new(()), Condvar::new())); - - let (tx, rx) = channel(); - for _ in 0..4 { - let locks = locks.clone(); - let tx = tx.clone(); - thread::spawn(move || { - let mut guard = locks.0.lock(); - locks.1.wait(&mut guard); - locks.1.wait_for(&mut guard, Duration::from_millis(1)); - locks.1.notify_one(); - tx.send(()).unwrap(); - }); - } - - thread::sleep(Duration::from_millis(100)); - locks.1.notify_one(); - - for _ in 0..4 { - assert_eq!(rx.recv_timeout(Duration::from_millis(500)), Ok(())); - } - } -} - -/// This module contains an integration test that is heavily inspired from WebKit's own integration -/// tests for it's own Condvar. -#[cfg(test)] -mod webkit_queue_test { - use crate::{Condvar, Mutex, MutexGuard}; - use std::{collections::VecDeque, sync::Arc, thread, time::Duration}; - - #[derive(Clone, Copy)] - enum Timeout { - Bounded(Duration), - Forever, - } - - #[derive(Clone, Copy)] - enum NotifyStyle { - One, - All, - } - - struct Queue { - items: VecDeque, - should_continue: bool, - } - - impl Queue { - fn new() -> Self { - Self { - items: VecDeque::new(), - should_continue: true, - } - } - } - - fn wait( - condition: &Condvar, - lock: &mut MutexGuard<'_, T>, - predicate: impl Fn(&mut MutexGuard<'_, T>) -> bool, - timeout: &Timeout, - ) { - while !predicate(lock) { - match timeout { - Timeout::Forever => condition.wait(lock), - Timeout::Bounded(bound) => { - condition.wait_for(lock, *bound); - } - } - } - } - - fn notify(style: NotifyStyle, condition: &Condvar, should_notify: bool) { - match style { - NotifyStyle::One => { - condition.notify_one(); - } - NotifyStyle::All => { - if should_notify { - condition.notify_all(); - } - } - } - } - - fn run_queue_test( - num_producers: usize, - num_consumers: usize, - max_queue_size: usize, - messages_per_producer: usize, - notify_style: NotifyStyle, - timeout: Timeout, - delay: Duration, - ) { - let input_queue = Arc::new(Mutex::new(Queue::new())); - let empty_condition = Arc::new(Condvar::new()); - let full_condition = Arc::new(Condvar::new()); - - let output_vec = Arc::new(Mutex::new(vec![])); - - let consumers = (0..num_consumers) - .map(|_| { - consumer_thread( - input_queue.clone(), - empty_condition.clone(), - full_condition.clone(), - timeout, - notify_style, - output_vec.clone(), - max_queue_size, - ) - }) - .collect::>(); - let producers = (0..num_producers) - .map(|_| { - producer_thread( - messages_per_producer, - input_queue.clone(), - empty_condition.clone(), - full_condition.clone(), - timeout, - notify_style, - max_queue_size, - ) - }) - .collect::>(); - - thread::sleep(delay); - - for producer in producers.into_iter() { - producer.join().expect("Producer thread panicked"); - } - - { - let mut input_queue = input_queue.lock(); - input_queue.should_continue = false; - } - empty_condition.notify_all(); - - for consumer in consumers.into_iter() { - consumer.join().expect("Consumer thread panicked"); - } - - let mut output_vec = output_vec.lock(); - assert_eq!(output_vec.len(), num_producers * messages_per_producer); - output_vec.sort(); - for msg_idx in 0..messages_per_producer { - for producer_idx in 0..num_producers { - assert_eq!(msg_idx, output_vec[msg_idx * num_producers + producer_idx]); - } - } - } - - fn consumer_thread( - input_queue: Arc>, - empty_condition: Arc, - full_condition: Arc, - timeout: Timeout, - notify_style: NotifyStyle, - output_queue: Arc>>, - max_queue_size: usize, - ) -> thread::JoinHandle<()> { - thread::spawn(move || loop { - let (should_notify, result) = { - let mut queue = input_queue.lock(); - wait( - &*empty_condition, - &mut queue, - |state| -> bool { !state.items.is_empty() || !state.should_continue }, - &timeout, - ); - if queue.items.is_empty() && !queue.should_continue { - return; - } - let should_notify = queue.items.len() == max_queue_size; - let result = queue.items.pop_front(); - std::mem::drop(queue); - (should_notify, result) - }; - notify(notify_style, &*full_condition, should_notify); - - if let Some(result) = result { - output_queue.lock().push(result); - } - }) - } - - fn producer_thread( - num_messages: usize, - queue: Arc>, - empty_condition: Arc, - full_condition: Arc, - timeout: Timeout, - notify_style: NotifyStyle, - max_queue_size: usize, - ) -> thread::JoinHandle<()> { - thread::spawn(move || { - for message in 0..num_messages { - let should_notify = { - let mut queue = queue.lock(); - wait( - &*full_condition, - &mut queue, - |state| state.items.len() < max_queue_size, - &timeout, - ); - let should_notify = queue.items.is_empty(); - queue.items.push_back(message); - std::mem::drop(queue); - should_notify - }; - notify(notify_style, &*empty_condition, should_notify); - } - }) - } - - macro_rules! run_queue_tests { - ( $( $name:ident( - num_producers: $num_producers:expr, - num_consumers: $num_consumers:expr, - max_queue_size: $max_queue_size:expr, - messages_per_producer: $messages_per_producer:expr, - notification_style: $notification_style:expr, - timeout: $timeout:expr, - delay_seconds: $delay_seconds:expr); - )* ) => { - $(#[test] - fn $name() { - let delay = Duration::from_secs($delay_seconds); - run_queue_test( - $num_producers, - $num_consumers, - $max_queue_size, - $messages_per_producer, - $notification_style, - $timeout, - delay, - ); - })* - }; - } - - run_queue_tests! { - sanity_check_queue( - num_producers: 1, - num_consumers: 1, - max_queue_size: 1, - messages_per_producer: 100_000, - notification_style: NotifyStyle::All, - timeout: Timeout::Bounded(Duration::from_secs(1)), - delay_seconds: 0 - ); - sanity_check_queue_timeout( - num_producers: 1, - num_consumers: 1, - max_queue_size: 1, - messages_per_producer: 100_000, - notification_style: NotifyStyle::All, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - new_test_without_timeout_5( - num_producers: 1, - num_consumers: 5, - max_queue_size: 1, - messages_per_producer: 100_000, - notification_style: NotifyStyle::All, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - one_producer_one_consumer_one_slot( - num_producers: 1, - num_consumers: 1, - max_queue_size: 1, - messages_per_producer: 100_000, - notification_style: NotifyStyle::All, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - one_producer_one_consumer_one_slot_timeout( - num_producers: 1, - num_consumers: 1, - max_queue_size: 1, - messages_per_producer: 100_000, - notification_style: NotifyStyle::All, - timeout: Timeout::Forever, - delay_seconds: 1 - ); - one_producer_one_consumer_hundred_slots( - num_producers: 1, - num_consumers: 1, - max_queue_size: 100, - messages_per_producer: 1_000_000, - notification_style: NotifyStyle::All, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - ten_producers_one_consumer_one_slot( - num_producers: 10, - num_consumers: 1, - max_queue_size: 1, - messages_per_producer: 10000, - notification_style: NotifyStyle::All, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - ten_producers_one_consumer_hundred_slots_notify_all( - num_producers: 10, - num_consumers: 1, - max_queue_size: 100, - messages_per_producer: 10000, - notification_style: NotifyStyle::All, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - ten_producers_one_consumer_hundred_slots_notify_one( - num_producers: 10, - num_consumers: 1, - max_queue_size: 100, - messages_per_producer: 10000, - notification_style: NotifyStyle::One, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - one_producer_ten_consumers_one_slot( - num_producers: 1, - num_consumers: 10, - max_queue_size: 1, - messages_per_producer: 10000, - notification_style: NotifyStyle::All, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - one_producer_ten_consumers_hundred_slots_notify_all( - num_producers: 1, - num_consumers: 10, - max_queue_size: 100, - messages_per_producer: 100_000, - notification_style: NotifyStyle::All, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - one_producer_ten_consumers_hundred_slots_notify_one( - num_producers: 1, - num_consumers: 10, - max_queue_size: 100, - messages_per_producer: 100_000, - notification_style: NotifyStyle::One, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - ten_producers_ten_consumers_one_slot( - num_producers: 10, - num_consumers: 10, - max_queue_size: 1, - messages_per_producer: 50000, - notification_style: NotifyStyle::All, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - ten_producers_ten_consumers_hundred_slots_notify_all( - num_producers: 10, - num_consumers: 10, - max_queue_size: 100, - messages_per_producer: 50000, - notification_style: NotifyStyle::All, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - ten_producers_ten_consumers_hundred_slots_notify_one( - num_producers: 10, - num_consumers: 10, - max_queue_size: 100, - messages_per_producer: 50000, - notification_style: NotifyStyle::One, - timeout: Timeout::Forever, - delay_seconds: 0 - ); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/deadlock.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/deadlock.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/deadlock.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/deadlock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,232 +0,0 @@ -//! \[Experimental\] Deadlock detection -//! -//! This feature is optional and can be enabled via the `deadlock_detection` feature flag. -//! -//! # Example -//! -//! ``` -//! #[cfg(feature = "deadlock_detection")] -//! { // only for #[cfg] -//! use std::thread; -//! use std::time::Duration; -//! use parking_lot::deadlock; -//! -//! // Create a background thread which checks for deadlocks every 10s -//! thread::spawn(move || { -//! loop { -//! thread::sleep(Duration::from_secs(10)); -//! let deadlocks = deadlock::check_deadlock(); -//! if deadlocks.is_empty() { -//! continue; -//! } -//! -//! println!("{} deadlocks detected", deadlocks.len()); -//! for (i, threads) in deadlocks.iter().enumerate() { -//! println!("Deadlock #{}", i); -//! for t in threads { -//! println!("Thread Id {:#?}", t.thread_id()); -//! println!("{:#?}", t.backtrace()); -//! } -//! } -//! } -//! }); -//! } // only for #[cfg] -//! ``` - -#[cfg(feature = "deadlock_detection")] -pub use parking_lot_core::deadlock::check_deadlock; -pub(crate) use parking_lot_core::deadlock::{acquire_resource, release_resource}; - -#[cfg(test)] -#[cfg(feature = "deadlock_detection")] -mod tests { - use crate::{Mutex, ReentrantMutex, RwLock}; - use std::sync::{Arc, Barrier}; - use std::thread::{self, sleep}; - use std::time::Duration; - - // We need to serialize these tests since deadlock detection uses global state - static DEADLOCK_DETECTION_LOCK: Mutex<()> = crate::const_mutex(()); - - fn check_deadlock() -> bool { - use parking_lot_core::deadlock::check_deadlock; - !check_deadlock().is_empty() - } - - #[test] - fn test_mutex_deadlock() { - let _guard = DEADLOCK_DETECTION_LOCK.lock(); - - let m1: Arc> = Default::default(); - let m2: Arc> = Default::default(); - let m3: Arc> = Default::default(); - let b = Arc::new(Barrier::new(4)); - - let m1_ = m1.clone(); - let m2_ = m2.clone(); - let m3_ = m3.clone(); - let b1 = b.clone(); - let b2 = b.clone(); - let b3 = b.clone(); - - assert!(!check_deadlock()); - - let _t1 = thread::spawn(move || { - let _g = m1.lock(); - b1.wait(); - let _ = m2_.lock(); - }); - - let _t2 = thread::spawn(move || { - let _g = m2.lock(); - b2.wait(); - let _ = m3_.lock(); - }); - - let _t3 = thread::spawn(move || { - let _g = m3.lock(); - b3.wait(); - let _ = m1_.lock(); - }); - - assert!(!check_deadlock()); - - b.wait(); - sleep(Duration::from_millis(50)); - assert!(check_deadlock()); - - assert!(!check_deadlock()); - } - - #[test] - fn test_mutex_deadlock_reentrant() { - let _guard = DEADLOCK_DETECTION_LOCK.lock(); - - let m1: Arc> = Default::default(); - - assert!(!check_deadlock()); - - let _t1 = thread::spawn(move || { - let _g = m1.lock(); - let _ = m1.lock(); - }); - - sleep(Duration::from_millis(50)); - assert!(check_deadlock()); - - assert!(!check_deadlock()); - } - - #[test] - fn test_remutex_deadlock() { - let _guard = DEADLOCK_DETECTION_LOCK.lock(); - - let m1: Arc> = Default::default(); - let m2: Arc> = Default::default(); - let m3: Arc> = Default::default(); - let b = Arc::new(Barrier::new(4)); - - let m1_ = m1.clone(); - let m2_ = m2.clone(); - let m3_ = m3.clone(); - let b1 = b.clone(); - let b2 = b.clone(); - let b3 = b.clone(); - - assert!(!check_deadlock()); - - let _t1 = thread::spawn(move || { - let _g = m1.lock(); - let _g = m1.lock(); - b1.wait(); - let _ = m2_.lock(); - }); - - let _t2 = thread::spawn(move || { - let _g = m2.lock(); - let _g = m2.lock(); - b2.wait(); - let _ = m3_.lock(); - }); - - let _t3 = thread::spawn(move || { - let _g = m3.lock(); - let _g = m3.lock(); - b3.wait(); - let _ = m1_.lock(); - }); - - assert!(!check_deadlock()); - - b.wait(); - sleep(Duration::from_millis(50)); - assert!(check_deadlock()); - - assert!(!check_deadlock()); - } - - #[test] - fn test_rwlock_deadlock() { - let _guard = DEADLOCK_DETECTION_LOCK.lock(); - - let m1: Arc> = Default::default(); - let m2: Arc> = Default::default(); - let m3: Arc> = Default::default(); - let b = Arc::new(Barrier::new(4)); - - let m1_ = m1.clone(); - let m2_ = m2.clone(); - let m3_ = m3.clone(); - let b1 = b.clone(); - let b2 = b.clone(); - let b3 = b.clone(); - - assert!(!check_deadlock()); - - let _t1 = thread::spawn(move || { - let _g = m1.read(); - b1.wait(); - let _g = m2_.write(); - }); - - let _t2 = thread::spawn(move || { - let _g = m2.read(); - b2.wait(); - let _g = m3_.write(); - }); - - let _t3 = thread::spawn(move || { - let _g = m3.read(); - b3.wait(); - let _ = m1_.write(); - }); - - assert!(!check_deadlock()); - - b.wait(); - sleep(Duration::from_millis(50)); - assert!(check_deadlock()); - - assert!(!check_deadlock()); - } - - #[cfg(rwlock_deadlock_detection_not_supported)] - #[test] - fn test_rwlock_deadlock_reentrant() { - let _guard = DEADLOCK_DETECTION_LOCK.lock(); - - let m1: Arc> = Default::default(); - - assert!(!check_deadlock()); - - let _t1 = thread::spawn(move || { - let _g = m1.read(); - let _ = m1.write(); - }); - - sleep(Duration::from_millis(50)); - assert!(check_deadlock()); - - assert!(!check_deadlock()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/elision.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/elision.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/elision.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/elision.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,112 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -#[cfg(all(feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64")))] -use std::arch::asm; -use std::sync::atomic::AtomicUsize; - -// Extension trait to add lock elision primitives to atomic types -pub trait AtomicElisionExt { - type IntType; - - // Perform a compare_exchange and start a transaction - fn elision_compare_exchange_acquire( - &self, - current: Self::IntType, - new: Self::IntType, - ) -> Result; - - // Perform a fetch_sub and end a transaction - fn elision_fetch_sub_release(&self, val: Self::IntType) -> Self::IntType; -} - -// Indicates whether the target architecture supports lock elision -#[inline] -pub fn have_elision() -> bool { - cfg!(all( - feature = "hardware-lock-elision", - any(target_arch = "x86", target_arch = "x86_64"), - )) -} - -// This implementation is never actually called because it is guarded by -// have_elision(). -#[cfg(not(all(feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64"))))] -impl AtomicElisionExt for AtomicUsize { - type IntType = usize; - - #[inline] - fn elision_compare_exchange_acquire(&self, _: usize, _: usize) -> Result { - unreachable!(); - } - - #[inline] - fn elision_fetch_sub_release(&self, _: usize) -> usize { - unreachable!(); - } -} - -#[cfg(all(feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64")))] -impl AtomicElisionExt for AtomicUsize { - type IntType = usize; - - #[inline] - fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result { - unsafe { - use core::arch::asm; - let prev: usize; - #[cfg(target_pointer_width = "32")] - asm!( - "xacquire", - "lock", - "cmpxchg [{:e}], {:e}", - in(reg) self, - in(reg) new, - inout("eax") current => prev, - ); - #[cfg(target_pointer_width = "64")] - asm!( - "xacquire", - "lock", - "cmpxchg [{}], {}", - in(reg) self, - in(reg) new, - inout("rax") current => prev, - ); - if prev == current { - Ok(prev) - } else { - Err(prev) - } - } - } - - #[inline] - fn elision_fetch_sub_release(&self, val: usize) -> usize { - unsafe { - use core::arch::asm; - let prev: usize; - #[cfg(target_pointer_width = "32")] - asm!( - "xrelease", - "lock", - "xadd [{:e}], {:e}", - in(reg) self, - inout(reg) val.wrapping_neg() => prev, - ); - #[cfg(target_pointer_width = "64")] - asm!( - "xrelease", - "lock", - "xadd [{}], {}", - in(reg) self, - inout(reg) val.wrapping_neg() => prev, - ); - prev - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/fair_mutex.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/fair_mutex.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/fair_mutex.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/fair_mutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,275 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::raw_fair_mutex::RawFairMutex; -use lock_api; - -/// A mutual exclusive primitive that is always fair, useful for protecting shared data -/// -/// This mutex will block threads waiting for the lock to become available. The -/// mutex can be statically initialized or created by the `new` -/// constructor. Each mutex has a type parameter which represents the data that -/// it is protecting. The data can only be accessed through the RAII guards -/// returned from `lock` and `try_lock`, which guarantees that the data is only -/// ever accessed when the mutex is locked. -/// -/// The regular mutex provided by `parking_lot` uses eventual fairness -/// (after some time it will default to the fair algorithm), but eventual -/// fairness does not provide the same guarantees an always fair method would. -/// Fair mutexes are generally slower, but sometimes needed. -/// -/// In a fair mutex the waiters form a queue, and the lock is always granted to -/// the next requester in the queue, in first-in first-out order. This ensures -/// that one thread cannot starve others by quickly re-acquiring the lock after -/// releasing it. -/// -/// A fair mutex may not be interesting if threads have different priorities (this is known as -/// priority inversion). -/// -/// # Differences from the standard library `Mutex` -/// -/// - No poisoning, the lock is released normally on panic. -/// - Only requires 1 byte of space, whereas the standard library boxes the -/// `FairMutex` due to platform limitations. -/// - Can be statically constructed. -/// - Does not require any drop glue when dropped. -/// - Inline fast path for the uncontended case. -/// - Efficient handling of micro-contention using adaptive spinning. -/// - Allows raw locking & unlocking without a guard. -/// -/// # Examples -/// -/// ``` -/// use parking_lot::FairMutex; -/// use std::sync::{Arc, mpsc::channel}; -/// use std::thread; -/// -/// const N: usize = 10; -/// -/// // Spawn a few threads to increment a shared variable (non-atomically), and -/// // let the main thread know once all increments are done. -/// // -/// // Here we're using an Arc to share memory among threads, and the data inside -/// // the Arc is protected with a mutex. -/// let data = Arc::new(FairMutex::new(0)); -/// -/// let (tx, rx) = channel(); -/// for _ in 0..10 { -/// let (data, tx) = (Arc::clone(&data), tx.clone()); -/// thread::spawn(move || { -/// // The shared state can only be accessed once the lock is held. -/// // Our non-atomic increment is safe because we're the only thread -/// // which can access the shared state when the lock is held. -/// let mut data = data.lock(); -/// *data += 1; -/// if *data == N { -/// tx.send(()).unwrap(); -/// } -/// // the lock is unlocked here when `data` goes out of scope. -/// }); -/// } -/// -/// rx.recv().unwrap(); -/// ``` -pub type FairMutex = lock_api::Mutex; - -/// Creates a new fair mutex in an unlocked state ready for use. -/// -/// This allows creating a fair mutex in a constant context on stable Rust. -pub const fn const_fair_mutex(val: T) -> FairMutex { - FairMutex::const_new(::INIT, val) -} - -/// An RAII implementation of a "scoped lock" of a mutex. When this structure is -/// dropped (falls out of scope), the lock will be unlocked. -/// -/// The data protected by the mutex can be accessed through this guard via its -/// `Deref` and `DerefMut` implementations. -pub type FairMutexGuard<'a, T> = lock_api::MutexGuard<'a, RawFairMutex, T>; - -/// An RAII mutex guard returned by `FairMutexGuard::map`, which can point to a -/// subfield of the protected data. -/// -/// The main difference between `MappedFairMutexGuard` and `FairMutexGuard` is that the -/// former doesn't support temporarily unlocking and re-locking, since that -/// could introduce soundness issues if the locked object is modified by another -/// thread. -pub type MappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawFairMutex, T>; - -#[cfg(test)] -mod tests { - use crate::FairMutex; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::mpsc::channel; - use std::sync::Arc; - use std::thread; - - #[cfg(feature = "serde")] - use bincode::{deserialize, serialize}; - - #[derive(Eq, PartialEq, Debug)] - struct NonCopy(i32); - - #[test] - fn smoke() { - let m = FairMutex::new(()); - drop(m.lock()); - drop(m.lock()); - } - - #[test] - fn lots_and_lots() { - const J: u32 = 1000; - const K: u32 = 3; - - let m = Arc::new(FairMutex::new(0)); - - fn inc(m: &FairMutex) { - for _ in 0..J { - *m.lock() += 1; - } - } - - let (tx, rx) = channel(); - for _ in 0..K { - let tx2 = tx.clone(); - let m2 = m.clone(); - thread::spawn(move || { - inc(&m2); - tx2.send(()).unwrap(); - }); - let tx2 = tx.clone(); - let m2 = m.clone(); - thread::spawn(move || { - inc(&m2); - tx2.send(()).unwrap(); - }); - } - - drop(tx); - for _ in 0..2 * K { - rx.recv().unwrap(); - } - assert_eq!(*m.lock(), J * K * 2); - } - - #[test] - fn try_lock() { - let m = FairMutex::new(()); - *m.try_lock().unwrap() = (); - } - - #[test] - fn test_into_inner() { - let m = FairMutex::new(NonCopy(10)); - assert_eq!(m.into_inner(), NonCopy(10)); - } - - #[test] - fn test_into_inner_drop() { - struct Foo(Arc); - impl Drop for Foo { - fn drop(&mut self) { - self.0.fetch_add(1, Ordering::SeqCst); - } - } - let num_drops = Arc::new(AtomicUsize::new(0)); - let m = FairMutex::new(Foo(num_drops.clone())); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - { - let _inner = m.into_inner(); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - } - assert_eq!(num_drops.load(Ordering::SeqCst), 1); - } - - #[test] - fn test_get_mut() { - let mut m = FairMutex::new(NonCopy(10)); - *m.get_mut() = NonCopy(20); - assert_eq!(m.into_inner(), NonCopy(20)); - } - - #[test] - fn test_mutex_arc_nested() { - // Tests nested mutexes and access - // to underlying data. - let arc = Arc::new(FairMutex::new(1)); - let arc2 = Arc::new(FairMutex::new(arc)); - let (tx, rx) = channel(); - let _t = thread::spawn(move || { - let lock = arc2.lock(); - let lock2 = lock.lock(); - assert_eq!(*lock2, 1); - tx.send(()).unwrap(); - }); - rx.recv().unwrap(); - } - - #[test] - fn test_mutex_arc_access_in_unwind() { - let arc = Arc::new(FairMutex::new(1)); - let arc2 = arc.clone(); - let _ = thread::spawn(move || { - struct Unwinder { - i: Arc>, - } - impl Drop for Unwinder { - fn drop(&mut self) { - *self.i.lock() += 1; - } - } - let _u = Unwinder { i: arc2 }; - panic!(); - }) - .join(); - let lock = arc.lock(); - assert_eq!(*lock, 2); - } - - #[test] - fn test_mutex_unsized() { - let mutex: &FairMutex<[i32]> = &FairMutex::new([1, 2, 3]); - { - let b = &mut *mutex.lock(); - b[0] = 4; - b[2] = 5; - } - let comp: &[i32] = &[4, 2, 5]; - assert_eq!(&*mutex.lock(), comp); - } - - #[test] - fn test_mutexguard_sync() { - fn sync(_: T) {} - - let mutex = FairMutex::new(()); - sync(mutex.lock()); - } - - #[test] - fn test_mutex_debug() { - let mutex = FairMutex::new(vec![0u8, 10]); - - assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }"); - let _lock = mutex.lock(); - assert_eq!(format!("{:?}", mutex), "Mutex { data: }"); - } - - #[cfg(feature = "serde")] - #[test] - fn test_serde() { - let contents: Vec = vec![0, 1, 2]; - let mutex = FairMutex::new(contents.clone()); - - let serialized = serialize(&mutex).unwrap(); - let deserialized: FairMutex> = deserialize(&serialized).unwrap(); - - assert_eq!(*(mutex.lock()), *(deserialized.lock())); - assert_eq!(contents, *(deserialized.lock())); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/lib.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/lib.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -//! This library provides implementations of `Mutex`, `RwLock`, `Condvar` and -//! `Once` that are smaller, faster and more flexible than those in the Rust -//! standard library. It also provides a `ReentrantMutex` type. - -#![warn(missing_docs)] -#![warn(rust_2018_idioms)] - -mod condvar; -mod elision; -mod fair_mutex; -mod mutex; -mod once; -mod raw_fair_mutex; -mod raw_mutex; -mod raw_rwlock; -mod remutex; -mod rwlock; -mod util; - -#[cfg(feature = "deadlock_detection")] -pub mod deadlock; -#[cfg(not(feature = "deadlock_detection"))] -mod deadlock; - -// If deadlock detection is enabled, we cannot allow lock guards to be sent to -// other threads. -#[cfg(all(feature = "send_guard", feature = "deadlock_detection"))] -compile_error!("the `send_guard` and `deadlock_detection` features cannot be used together"); -#[cfg(feature = "send_guard")] -type GuardMarker = lock_api::GuardSend; -#[cfg(not(feature = "send_guard"))] -type GuardMarker = lock_api::GuardNoSend; - -pub use self::condvar::{Condvar, WaitTimeoutResult}; -pub use self::fair_mutex::{const_fair_mutex, FairMutex, FairMutexGuard, MappedFairMutexGuard}; -pub use self::mutex::{const_mutex, MappedMutexGuard, Mutex, MutexGuard}; -pub use self::once::{Once, OnceState}; -pub use self::raw_fair_mutex::RawFairMutex; -pub use self::raw_mutex::RawMutex; -pub use self::raw_rwlock::RawRwLock; -pub use self::remutex::{ - const_reentrant_mutex, MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, - ReentrantMutexGuard, -}; -pub use self::rwlock::{ - const_rwlock, MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, - RwLockUpgradableReadGuard, RwLockWriteGuard, -}; -pub use ::lock_api; diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/mutex.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/mutex.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/mutex.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/mutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,312 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::raw_mutex::RawMutex; -use lock_api; - -/// A mutual exclusion primitive useful for protecting shared data -/// -/// This mutex will block threads waiting for the lock to become available. The -/// mutex can be statically initialized or created by the `new` -/// constructor. Each mutex has a type parameter which represents the data that -/// it is protecting. The data can only be accessed through the RAII guards -/// returned from `lock` and `try_lock`, which guarantees that the data is only -/// ever accessed when the mutex is locked. -/// -/// # Fairness -/// -/// A typical unfair lock can often end up in a situation where a single thread -/// quickly acquires and releases the same mutex in succession, which can starve -/// other threads waiting to acquire the mutex. While this improves throughput -/// because it doesn't force a context switch when a thread tries to re-acquire -/// a mutex it has just released, this can starve other threads. -/// -/// This mutex uses [eventual fairness](https://trac.webkit.org/changeset/203350) -/// to ensure that the lock will be fair on average without sacrificing -/// throughput. This is done by forcing a fair unlock on average every 0.5ms, -/// which will force the lock to go to the next thread waiting for the mutex. -/// -/// Additionally, any critical section longer than 1ms will always use a fair -/// unlock, which has a negligible impact on throughput considering the length -/// of the critical section. -/// -/// You can also force a fair unlock by calling `MutexGuard::unlock_fair` when -/// unlocking a mutex instead of simply dropping the `MutexGuard`. -/// -/// # Differences from the standard library `Mutex` -/// -/// - No poisoning, the lock is released normally on panic. -/// - Only requires 1 byte of space, whereas the standard library boxes the -/// `Mutex` due to platform limitations. -/// - Can be statically constructed. -/// - Does not require any drop glue when dropped. -/// - Inline fast path for the uncontended case. -/// - Efficient handling of micro-contention using adaptive spinning. -/// - Allows raw locking & unlocking without a guard. -/// - Supports eventual fairness so that the mutex is fair on average. -/// - Optionally allows making the mutex fair by calling `MutexGuard::unlock_fair`. -/// -/// # Examples -/// -/// ``` -/// use parking_lot::Mutex; -/// use std::sync::{Arc, mpsc::channel}; -/// use std::thread; -/// -/// const N: usize = 10; -/// -/// // Spawn a few threads to increment a shared variable (non-atomically), and -/// // let the main thread know once all increments are done. -/// // -/// // Here we're using an Arc to share memory among threads, and the data inside -/// // the Arc is protected with a mutex. -/// let data = Arc::new(Mutex::new(0)); -/// -/// let (tx, rx) = channel(); -/// for _ in 0..10 { -/// let (data, tx) = (Arc::clone(&data), tx.clone()); -/// thread::spawn(move || { -/// // The shared state can only be accessed once the lock is held. -/// // Our non-atomic increment is safe because we're the only thread -/// // which can access the shared state when the lock is held. -/// let mut data = data.lock(); -/// *data += 1; -/// if *data == N { -/// tx.send(()).unwrap(); -/// } -/// // the lock is unlocked here when `data` goes out of scope. -/// }); -/// } -/// -/// rx.recv().unwrap(); -/// ``` -pub type Mutex = lock_api::Mutex; - -/// Creates a new mutex in an unlocked state ready for use. -/// -/// This allows creating a mutex in a constant context on stable Rust. -pub const fn const_mutex(val: T) -> Mutex { - Mutex::const_new(::INIT, val) -} - -/// An RAII implementation of a "scoped lock" of a mutex. When this structure is -/// dropped (falls out of scope), the lock will be unlocked. -/// -/// The data protected by the mutex can be accessed through this guard via its -/// `Deref` and `DerefMut` implementations. -pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>; - -/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a -/// subfield of the protected data. -/// -/// The main difference between `MappedMutexGuard` and `MutexGuard` is that the -/// former doesn't support temporarily unlocking and re-locking, since that -/// could introduce soundness issues if the locked object is modified by another -/// thread. -pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>; - -#[cfg(test)] -mod tests { - use crate::{Condvar, Mutex}; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::mpsc::channel; - use std::sync::Arc; - use std::thread; - - #[cfg(feature = "serde")] - use bincode::{deserialize, serialize}; - - struct Packet(Arc<(Mutex, Condvar)>); - - #[derive(Eq, PartialEq, Debug)] - struct NonCopy(i32); - - unsafe impl Send for Packet {} - unsafe impl Sync for Packet {} - - #[test] - fn smoke() { - let m = Mutex::new(()); - drop(m.lock()); - drop(m.lock()); - } - - #[test] - fn lots_and_lots() { - const J: u32 = 1000; - const K: u32 = 3; - - let m = Arc::new(Mutex::new(0)); - - fn inc(m: &Mutex) { - for _ in 0..J { - *m.lock() += 1; - } - } - - let (tx, rx) = channel(); - for _ in 0..K { - let tx2 = tx.clone(); - let m2 = m.clone(); - thread::spawn(move || { - inc(&m2); - tx2.send(()).unwrap(); - }); - let tx2 = tx.clone(); - let m2 = m.clone(); - thread::spawn(move || { - inc(&m2); - tx2.send(()).unwrap(); - }); - } - - drop(tx); - for _ in 0..2 * K { - rx.recv().unwrap(); - } - assert_eq!(*m.lock(), J * K * 2); - } - - #[test] - fn try_lock() { - let m = Mutex::new(()); - *m.try_lock().unwrap() = (); - } - - #[test] - fn test_into_inner() { - let m = Mutex::new(NonCopy(10)); - assert_eq!(m.into_inner(), NonCopy(10)); - } - - #[test] - fn test_into_inner_drop() { - struct Foo(Arc); - impl Drop for Foo { - fn drop(&mut self) { - self.0.fetch_add(1, Ordering::SeqCst); - } - } - let num_drops = Arc::new(AtomicUsize::new(0)); - let m = Mutex::new(Foo(num_drops.clone())); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - { - let _inner = m.into_inner(); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - } - assert_eq!(num_drops.load(Ordering::SeqCst), 1); - } - - #[test] - fn test_get_mut() { - let mut m = Mutex::new(NonCopy(10)); - *m.get_mut() = NonCopy(20); - assert_eq!(m.into_inner(), NonCopy(20)); - } - - #[test] - fn test_mutex_arc_condvar() { - let packet = Packet(Arc::new((Mutex::new(false), Condvar::new()))); - let packet2 = Packet(packet.0.clone()); - let (tx, rx) = channel(); - let _t = thread::spawn(move || { - // wait until parent gets in - rx.recv().unwrap(); - let &(ref lock, ref cvar) = &*packet2.0; - let mut lock = lock.lock(); - *lock = true; - cvar.notify_one(); - }); - - let &(ref lock, ref cvar) = &*packet.0; - let mut lock = lock.lock(); - tx.send(()).unwrap(); - assert!(!*lock); - while !*lock { - cvar.wait(&mut lock); - } - } - - #[test] - fn test_mutex_arc_nested() { - // Tests nested mutexes and access - // to underlying data. - let arc = Arc::new(Mutex::new(1)); - let arc2 = Arc::new(Mutex::new(arc)); - let (tx, rx) = channel(); - let _t = thread::spawn(move || { - let lock = arc2.lock(); - let lock2 = lock.lock(); - assert_eq!(*lock2, 1); - tx.send(()).unwrap(); - }); - rx.recv().unwrap(); - } - - #[test] - fn test_mutex_arc_access_in_unwind() { - let arc = Arc::new(Mutex::new(1)); - let arc2 = arc.clone(); - let _ = thread::spawn(move || { - struct Unwinder { - i: Arc>, - } - impl Drop for Unwinder { - fn drop(&mut self) { - *self.i.lock() += 1; - } - } - let _u = Unwinder { i: arc2 }; - panic!(); - }) - .join(); - let lock = arc.lock(); - assert_eq!(*lock, 2); - } - - #[test] - fn test_mutex_unsized() { - let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); - { - let b = &mut *mutex.lock(); - b[0] = 4; - b[2] = 5; - } - let comp: &[i32] = &[4, 2, 5]; - assert_eq!(&*mutex.lock(), comp); - } - - #[test] - fn test_mutexguard_sync() { - fn sync(_: T) {} - - let mutex = Mutex::new(()); - sync(mutex.lock()); - } - - #[test] - fn test_mutex_debug() { - let mutex = Mutex::new(vec![0u8, 10]); - - assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }"); - let _lock = mutex.lock(); - assert_eq!(format!("{:?}", mutex), "Mutex { data: }"); - } - - #[cfg(feature = "serde")] - #[test] - fn test_serde() { - let contents: Vec = vec![0, 1, 2]; - let mutex = Mutex::new(contents.clone()); - - let serialized = serialize(&mutex).unwrap(); - let deserialized: Mutex> = deserialize(&serialized).unwrap(); - - assert_eq!(*(mutex.lock()), *(deserialized.lock())); - assert_eq!(contents, *(deserialized.lock())); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/once.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/once.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/once.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/once.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,458 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::util::UncheckedOptionExt; -use core::{ - fmt, mem, - sync::atomic::{fence, AtomicU8, Ordering}, -}; -use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; - -const DONE_BIT: u8 = 1; -const POISON_BIT: u8 = 2; -const LOCKED_BIT: u8 = 4; -const PARKED_BIT: u8 = 8; - -/// Current state of a `Once`. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum OnceState { - /// A closure has not been executed yet - New, - - /// A closure was executed but panicked. - Poisoned, - - /// A thread is currently executing a closure. - InProgress, - - /// A closure has completed successfully. - Done, -} - -impl OnceState { - /// Returns whether the associated `Once` has been poisoned. - /// - /// Once an initialization routine for a `Once` has panicked it will forever - /// indicate to future forced initialization routines that it is poisoned. - #[inline] - pub fn poisoned(self) -> bool { - match self { - OnceState::Poisoned => true, - _ => false, - } - } - - /// Returns whether the associated `Once` has successfully executed a - /// closure. - #[inline] - pub fn done(self) -> bool { - match self { - OnceState::Done => true, - _ => false, - } - } -} - -/// A synchronization primitive which can be used to run a one-time -/// initialization. Useful for one-time initialization for globals, FFI or -/// related functionality. -/// -/// # Differences from the standard library `Once` -/// -/// - Only requires 1 byte of space, instead of 1 word. -/// - Not required to be `'static`. -/// - Relaxed memory barriers in the fast path, which can significantly improve -/// performance on some architectures. -/// - Efficient handling of micro-contention using adaptive spinning. -/// -/// # Examples -/// -/// ``` -/// use parking_lot::Once; -/// -/// static START: Once = Once::new(); -/// -/// START.call_once(|| { -/// // run initialization here -/// }); -/// ``` -pub struct Once(AtomicU8); - -impl Once { - /// Creates a new `Once` value. - #[inline] - pub const fn new() -> Once { - Once(AtomicU8::new(0)) - } - - /// Returns the current state of this `Once`. - #[inline] - pub fn state(&self) -> OnceState { - let state = self.0.load(Ordering::Acquire); - if state & DONE_BIT != 0 { - OnceState::Done - } else if state & LOCKED_BIT != 0 { - OnceState::InProgress - } else if state & POISON_BIT != 0 { - OnceState::Poisoned - } else { - OnceState::New - } - } - - /// Performs an initialization routine once and only once. The given closure - /// will be executed if this is the first time `call_once` has been called, - /// and otherwise the routine will *not* be invoked. - /// - /// This method will block the calling thread if another initialization - /// routine is currently running. - /// - /// When this function returns, it is guaranteed that some initialization - /// has run and completed (it may not be the closure specified). It is also - /// guaranteed that any memory writes performed by the executed closure can - /// be reliably observed by other threads at this point (there is a - /// happens-before relation between the closure and code executing after the - /// return). - /// - /// # Examples - /// - /// ``` - /// use parking_lot::Once; - /// - /// static mut VAL: usize = 0; - /// static INIT: Once = Once::new(); - /// - /// // Accessing a `static mut` is unsafe much of the time, but if we do so - /// // in a synchronized fashion (e.g. write once or read all) then we're - /// // good to go! - /// // - /// // This function will only call `expensive_computation` once, and will - /// // otherwise always return the value returned from the first invocation. - /// fn get_cached_val() -> usize { - /// unsafe { - /// INIT.call_once(|| { - /// VAL = expensive_computation(); - /// }); - /// VAL - /// } - /// } - /// - /// fn expensive_computation() -> usize { - /// // ... - /// # 2 - /// } - /// ``` - /// - /// # Panics - /// - /// The closure `f` will only be executed once if this is called - /// concurrently amongst many threads. If that closure panics, however, then - /// it will *poison* this `Once` instance, causing all future invocations of - /// `call_once` to also panic. - #[inline] - pub fn call_once(&self, f: F) - where - F: FnOnce(), - { - if self.0.load(Ordering::Acquire) == DONE_BIT { - return; - } - - let mut f = Some(f); - self.call_once_slow(false, &mut |_| unsafe { f.take().unchecked_unwrap()() }); - } - - /// Performs the same function as `call_once` except ignores poisoning. - /// - /// If this `Once` has been poisoned (some initialization panicked) then - /// this function will continue to attempt to call initialization functions - /// until one of them doesn't panic. - /// - /// The closure `f` is yielded a structure which can be used to query the - /// state of this `Once` (whether initialization has previously panicked or - /// not). - #[inline] - pub fn call_once_force(&self, f: F) - where - F: FnOnce(OnceState), - { - if self.0.load(Ordering::Acquire) == DONE_BIT { - return; - } - - let mut f = Some(f); - self.call_once_slow(true, &mut |state| unsafe { - f.take().unchecked_unwrap()(state) - }); - } - - // This is a non-generic function to reduce the monomorphization cost of - // using `call_once` (this isn't exactly a trivial or small implementation). - // - // Additionally, this is tagged with `#[cold]` as it should indeed be cold - // and it helps let LLVM know that calls to this function should be off the - // fast path. Essentially, this should help generate more straight line code - // in LLVM. - // - // Finally, this takes an `FnMut` instead of a `FnOnce` because there's - // currently no way to take an `FnOnce` and call it via virtual dispatch - // without some allocation overhead. - #[cold] - fn call_once_slow(&self, ignore_poison: bool, f: &mut dyn FnMut(OnceState)) { - let mut spinwait = SpinWait::new(); - let mut state = self.0.load(Ordering::Relaxed); - loop { - // If another thread called the closure, we're done - if state & DONE_BIT != 0 { - // An acquire fence is needed here since we didn't load the - // state with Ordering::Acquire. - fence(Ordering::Acquire); - return; - } - - // If the state has been poisoned and we aren't forcing, then panic - if state & POISON_BIT != 0 && !ignore_poison { - // Need the fence here as well for the same reason - fence(Ordering::Acquire); - panic!("Once instance has previously been poisoned"); - } - - // Grab the lock if it isn't locked, even if there is a queue on it. - // We also clear the poison bit since we are going to try running - // the closure again. - if state & LOCKED_BIT == 0 { - match self.0.compare_exchange_weak( - state, - (state | LOCKED_BIT) & !POISON_BIT, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => break, - Err(x) => state = x, - } - continue; - } - - // If there is no queue, try spinning a few times - if state & PARKED_BIT == 0 && spinwait.spin() { - state = self.0.load(Ordering::Relaxed); - continue; - } - - // Set the parked bit - if state & PARKED_BIT == 0 { - if let Err(x) = self.0.compare_exchange_weak( - state, - state | PARKED_BIT, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - state = x; - continue; - } - } - - // Park our thread until we are woken up by the thread that owns the - // lock. - let addr = self as *const _ as usize; - let validate = || self.0.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; - let before_sleep = || {}; - let timed_out = |_, _| unreachable!(); - unsafe { - parking_lot_core::park( - addr, - validate, - before_sleep, - timed_out, - DEFAULT_PARK_TOKEN, - None, - ); - } - - // Loop back and check if the done bit was set - spinwait.reset(); - state = self.0.load(Ordering::Relaxed); - } - - struct PanicGuard<'a>(&'a Once); - impl<'a> Drop for PanicGuard<'a> { - fn drop(&mut self) { - // Mark the state as poisoned, unlock it and unpark all threads. - let once = self.0; - let state = once.0.swap(POISON_BIT, Ordering::Release); - if state & PARKED_BIT != 0 { - let addr = once as *const _ as usize; - unsafe { - parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN); - } - } - } - } - - // At this point we have the lock, so run the closure. Make sure we - // properly clean up if the closure panicks. - let guard = PanicGuard(self); - let once_state = if state & POISON_BIT != 0 { - OnceState::Poisoned - } else { - OnceState::New - }; - f(once_state); - mem::forget(guard); - - // Now unlock the state, set the done bit and unpark all threads - let state = self.0.swap(DONE_BIT, Ordering::Release); - if state & PARKED_BIT != 0 { - let addr = self as *const _ as usize; - unsafe { - parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN); - } - } - } -} - -impl Default for Once { - #[inline] - fn default() -> Once { - Once::new() - } -} - -impl fmt::Debug for Once { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Once") - .field("state", &self.state()) - .finish() - } -} - -#[cfg(test)] -mod tests { - use crate::Once; - use std::panic; - use std::sync::mpsc::channel; - use std::thread; - - #[test] - fn smoke_once() { - static O: Once = Once::new(); - let mut a = 0; - O.call_once(|| a += 1); - assert_eq!(a, 1); - O.call_once(|| a += 1); - assert_eq!(a, 1); - } - - #[test] - fn stampede_once() { - static O: Once = Once::new(); - static mut RUN: bool = false; - - let (tx, rx) = channel(); - for _ in 0..10 { - let tx = tx.clone(); - thread::spawn(move || { - for _ in 0..4 { - thread::yield_now() - } - unsafe { - O.call_once(|| { - assert!(!RUN); - RUN = true; - }); - assert!(RUN); - } - tx.send(()).unwrap(); - }); - } - - unsafe { - O.call_once(|| { - assert!(!RUN); - RUN = true; - }); - assert!(RUN); - } - - for _ in 0..10 { - rx.recv().unwrap(); - } - } - - #[test] - fn poison_bad() { - static O: Once = Once::new(); - - // poison the once - let t = panic::catch_unwind(|| { - O.call_once(|| panic!()); - }); - assert!(t.is_err()); - - // poisoning propagates - let t = panic::catch_unwind(|| { - O.call_once(|| {}); - }); - assert!(t.is_err()); - - // we can subvert poisoning, however - let mut called = false; - O.call_once_force(|p| { - called = true; - assert!(p.poisoned()) - }); - assert!(called); - - // once any success happens, we stop propagating the poison - O.call_once(|| {}); - } - - #[test] - fn wait_for_force_to_finish() { - static O: Once = Once::new(); - - // poison the once - let t = panic::catch_unwind(|| { - O.call_once(|| panic!()); - }); - assert!(t.is_err()); - - // make sure someone's waiting inside the once via a force - let (tx1, rx1) = channel(); - let (tx2, rx2) = channel(); - let t1 = thread::spawn(move || { - O.call_once_force(|p| { - assert!(p.poisoned()); - tx1.send(()).unwrap(); - rx2.recv().unwrap(); - }); - }); - - rx1.recv().unwrap(); - - // put another waiter on the once - let t2 = thread::spawn(|| { - let mut called = false; - O.call_once(|| { - called = true; - }); - assert!(!called); - }); - - tx2.send(()).unwrap(); - - assert!(t1.join().is_ok()); - assert!(t2.join().is_ok()); - } - - #[test] - fn test_once_debug() { - static O: Once = Once::new(); - - assert_eq!(format!("{:?}", O), "Once { state: New }"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/raw_fair_mutex.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/raw_fair_mutex.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/raw_fair_mutex.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/raw_fair_mutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,65 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::raw_mutex::RawMutex; -use lock_api::RawMutexFair; - -/// Raw fair mutex type backed by the parking lot. -pub struct RawFairMutex(RawMutex); - -unsafe impl lock_api::RawMutex for RawFairMutex { - const INIT: Self = RawFairMutex(::INIT); - - type GuardMarker = ::GuardMarker; - - #[inline] - fn lock(&self) { - self.0.lock() - } - - #[inline] - fn try_lock(&self) -> bool { - self.0.try_lock() - } - - #[inline] - unsafe fn unlock(&self) { - self.unlock_fair() - } - - #[inline] - fn is_locked(&self) -> bool { - self.0.is_locked() - } -} - -unsafe impl lock_api::RawMutexFair for RawFairMutex { - #[inline] - unsafe fn unlock_fair(&self) { - self.0.unlock_fair() - } - - #[inline] - unsafe fn bump(&self) { - self.0.bump() - } -} - -unsafe impl lock_api::RawMutexTimed for RawFairMutex { - type Duration = ::Duration; - type Instant = ::Instant; - - #[inline] - fn try_lock_until(&self, timeout: Self::Instant) -> bool { - self.0.try_lock_until(timeout) - } - - #[inline] - fn try_lock_for(&self, timeout: Self::Duration) -> bool { - self.0.try_lock_for(timeout) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/raw_mutex.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/raw_mutex.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/raw_mutex.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/raw_mutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,331 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::{deadlock, util}; -use core::{ - sync::atomic::{AtomicU8, Ordering}, - time::Duration, -}; -use lock_api::RawMutex as RawMutex_; -use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN}; -use std::time::Instant; - -// UnparkToken used to indicate that that the target thread should attempt to -// lock the mutex again as soon as it is unparked. -pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0); - -// UnparkToken used to indicate that the mutex is being handed off to the target -// thread directly without unlocking it. -pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1); - -/// This bit is set in the `state` of a `RawMutex` when that mutex is locked by some thread. -const LOCKED_BIT: u8 = 0b01; -/// This bit is set in the `state` of a `RawMutex` just before parking a thread. A thread is being -/// parked if it wants to lock the mutex, but it is currently being held by some other thread. -const PARKED_BIT: u8 = 0b10; - -/// Raw mutex type backed by the parking lot. -pub struct RawMutex { - /// This atomic integer holds the current state of the mutex instance. Only the two lowest bits - /// are used. See `LOCKED_BIT` and `PARKED_BIT` for the bitmask for these bits. - /// - /// # State table: - /// - /// PARKED_BIT | LOCKED_BIT | Description - /// 0 | 0 | The mutex is not locked, nor is anyone waiting for it. - /// -----------+------------+------------------------------------------------------------------ - /// 0 | 1 | The mutex is locked by exactly one thread. No other thread is - /// | | waiting for it. - /// -----------+------------+------------------------------------------------------------------ - /// 1 | 0 | The mutex is not locked. One or more thread is parked or about to - /// | | park. At least one of the parked threads are just about to be - /// | | unparked, or a thread heading for parking might abort the park. - /// -----------+------------+------------------------------------------------------------------ - /// 1 | 1 | The mutex is locked by exactly one thread. One or more thread is - /// | | parked or about to park, waiting for the lock to become available. - /// | | In this state, PARKED_BIT is only ever cleared when a bucket lock - /// | | is held (i.e. in a parking_lot_core callback). This ensures that - /// | | we never end up in a situation where there are parked threads but - /// | | PARKED_BIT is not set (which would result in those threads - /// | | potentially never getting woken up). - state: AtomicU8, -} - -unsafe impl lock_api::RawMutex for RawMutex { - const INIT: RawMutex = RawMutex { - state: AtomicU8::new(0), - }; - - type GuardMarker = crate::GuardMarker; - - #[inline] - fn lock(&self) { - if self - .state - .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) - .is_err() - { - self.lock_slow(None); - } - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; - } - - #[inline] - fn try_lock(&self) -> bool { - let mut state = self.state.load(Ordering::Relaxed); - loop { - if state & LOCKED_BIT != 0 { - return false; - } - match self.state.compare_exchange_weak( - state, - state | LOCKED_BIT, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; - return true; - } - Err(x) => state = x, - } - } - } - - #[inline] - unsafe fn unlock(&self) { - deadlock::release_resource(self as *const _ as usize); - if self - .state - .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) - .is_ok() - { - return; - } - self.unlock_slow(false); - } - - #[inline] - fn is_locked(&self) -> bool { - let state = self.state.load(Ordering::Relaxed); - state & LOCKED_BIT != 0 - } -} - -unsafe impl lock_api::RawMutexFair for RawMutex { - #[inline] - unsafe fn unlock_fair(&self) { - deadlock::release_resource(self as *const _ as usize); - if self - .state - .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) - .is_ok() - { - return; - } - self.unlock_slow(true); - } - - #[inline] - unsafe fn bump(&self) { - if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { - self.bump_slow(); - } - } -} - -unsafe impl lock_api::RawMutexTimed for RawMutex { - type Duration = Duration; - type Instant = Instant; - - #[inline] - fn try_lock_until(&self, timeout: Instant) -> bool { - let result = if self - .state - .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - true - } else { - self.lock_slow(Some(timeout)) - }; - if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; - } - result - } - - #[inline] - fn try_lock_for(&self, timeout: Duration) -> bool { - let result = if self - .state - .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - true - } else { - self.lock_slow(util::to_deadline(timeout)) - }; - if result { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; - } - result - } -} - -impl RawMutex { - // Used by Condvar when requeuing threads to us, must be called while - // holding the queue lock. - #[inline] - pub(crate) fn mark_parked_if_locked(&self) -> bool { - let mut state = self.state.load(Ordering::Relaxed); - loop { - if state & LOCKED_BIT == 0 { - return false; - } - match self.state.compare_exchange_weak( - state, - state | PARKED_BIT, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - } - } - } - - // Used by Condvar when requeuing threads to us, must be called while - // holding the queue lock. - #[inline] - pub(crate) fn mark_parked(&self) { - self.state.fetch_or(PARKED_BIT, Ordering::Relaxed); - } - - #[cold] - fn lock_slow(&self, timeout: Option) -> bool { - let mut spinwait = SpinWait::new(); - let mut state = self.state.load(Ordering::Relaxed); - loop { - // Grab the lock if it isn't locked, even if there is a queue on it - if state & LOCKED_BIT == 0 { - match self.state.compare_exchange_weak( - state, - state | LOCKED_BIT, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - } - continue; - } - - // If there is no queue, try spinning a few times - if state & PARKED_BIT == 0 && spinwait.spin() { - state = self.state.load(Ordering::Relaxed); - continue; - } - - // Set the parked bit - if state & PARKED_BIT == 0 { - if let Err(x) = self.state.compare_exchange_weak( - state, - state | PARKED_BIT, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - state = x; - continue; - } - } - - // Park our thread until we are woken up by an unlock - let addr = self as *const _ as usize; - let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; - let before_sleep = || {}; - let timed_out = |_, was_last_thread| { - // Clear the parked bit if we were the last parked thread - if was_last_thread { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - }; - // SAFETY: - // * `addr` is an address we control. - // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. - // * `before_sleep` does not call `park`, nor does it panic. - match unsafe { - parking_lot_core::park( - addr, - validate, - before_sleep, - timed_out, - DEFAULT_PARK_TOKEN, - timeout, - ) - } { - // The thread that unparked us passed the lock on to us - // directly without unlocking it. - ParkResult::Unparked(TOKEN_HANDOFF) => return true, - - // We were unparked normally, try acquiring the lock again - ParkResult::Unparked(_) => (), - - // The validation function failed, try locking again - ParkResult::Invalid => (), - - // Timeout expired - ParkResult::TimedOut => return false, - } - - // Loop back and try locking again - spinwait.reset(); - state = self.state.load(Ordering::Relaxed); - } - } - - #[cold] - fn unlock_slow(&self, force_fair: bool) { - // Unpark one thread and leave the parked bit set if there might - // still be parked threads on this address. - let addr = self as *const _ as usize; - let callback = |result: UnparkResult| { - // If we are using a fair unlock then we should keep the - // mutex locked and hand it off to the unparked thread. - if result.unparked_threads != 0 && (force_fair || result.be_fair) { - // Clear the parked bit if there are no more parked - // threads. - if !result.have_more_threads { - self.state.store(LOCKED_BIT, Ordering::Relaxed); - } - return TOKEN_HANDOFF; - } - - // Clear the locked bit, and the parked bit as well if there - // are no more parked threads. - if result.have_more_threads { - self.state.store(PARKED_BIT, Ordering::Release); - } else { - self.state.store(0, Ordering::Release); - } - TOKEN_NORMAL - }; - // SAFETY: - // * `addr` is an address we control. - // * `callback` does not panic or call into any function of `parking_lot`. - unsafe { - parking_lot_core::unpark_one(addr, callback); - } - } - - #[cold] - fn bump_slow(&self) { - unsafe { deadlock::release_resource(self as *const _ as usize) }; - self.unlock_slow(true); - self.lock(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/raw_rwlock.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/raw_rwlock.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/raw_rwlock.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/raw_rwlock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1149 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::elision::{have_elision, AtomicElisionExt}; -use crate::raw_mutex::{TOKEN_HANDOFF, TOKEN_NORMAL}; -use crate::util; -use core::{ - cell::Cell, - sync::atomic::{AtomicUsize, Ordering}, -}; -use lock_api::{RawRwLock as RawRwLock_, RawRwLockUpgrade}; -use parking_lot_core::{ - self, deadlock, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult, UnparkToken, -}; -use std::time::{Duration, Instant}; - -// This reader-writer lock implementation is based on Boost's upgrade_mutex: -// https://github.com/boostorg/thread/blob/fc08c1fe2840baeeee143440fba31ef9e9a813c8/include/boost/thread/v2/shared_mutex.hpp#L432 -// -// This implementation uses 2 wait queues, one at key [addr] and one at key -// [addr + 1]. The primary queue is used for all new waiting threads, and the -// secondary queue is used by the thread which has acquired WRITER_BIT but is -// waiting for the remaining readers to exit the lock. -// -// This implementation is fair between readers and writers since it uses the -// order in which threads first started queuing to alternate between read phases -// and write phases. In particular is it not vulnerable to write starvation -// since readers will block if there is a pending writer. - -// There is at least one thread in the main queue. -const PARKED_BIT: usize = 0b0001; -// There is a parked thread holding WRITER_BIT. WRITER_BIT must be set. -const WRITER_PARKED_BIT: usize = 0b0010; -// A reader is holding an upgradable lock. The reader count must be non-zero and -// WRITER_BIT must not be set. -const UPGRADABLE_BIT: usize = 0b0100; -// If the reader count is zero: a writer is currently holding an exclusive lock. -// Otherwise: a writer is waiting for the remaining readers to exit the lock. -const WRITER_BIT: usize = 0b1000; -// Mask of bits used to count readers. -const READERS_MASK: usize = !0b1111; -// Base unit for counting readers. -const ONE_READER: usize = 0b10000; - -// Token indicating what type of lock a queued thread is trying to acquire -const TOKEN_SHARED: ParkToken = ParkToken(ONE_READER); -const TOKEN_EXCLUSIVE: ParkToken = ParkToken(WRITER_BIT); -const TOKEN_UPGRADABLE: ParkToken = ParkToken(ONE_READER | UPGRADABLE_BIT); - -/// Raw reader-writer lock type backed by the parking lot. -pub struct RawRwLock { - state: AtomicUsize, -} - -unsafe impl lock_api::RawRwLock for RawRwLock { - const INIT: RawRwLock = RawRwLock { - state: AtomicUsize::new(0), - }; - - type GuardMarker = crate::GuardMarker; - - #[inline] - fn lock_exclusive(&self) { - if self - .state - .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) - .is_err() - { - let result = self.lock_exclusive_slow(None); - debug_assert!(result); - } - self.deadlock_acquire(); - } - - #[inline] - fn try_lock_exclusive(&self) -> bool { - if self - .state - .compare_exchange(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - self.deadlock_acquire(); - true - } else { - false - } - } - - #[inline] - unsafe fn unlock_exclusive(&self) { - self.deadlock_release(); - if self - .state - .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) - .is_ok() - { - return; - } - self.unlock_exclusive_slow(false); - } - - #[inline] - fn lock_shared(&self) { - if !self.try_lock_shared_fast(false) { - let result = self.lock_shared_slow(false, None); - debug_assert!(result); - } - self.deadlock_acquire(); - } - - #[inline] - fn try_lock_shared(&self) -> bool { - let result = if self.try_lock_shared_fast(false) { - true - } else { - self.try_lock_shared_slow(false) - }; - if result { - self.deadlock_acquire(); - } - result - } - - #[inline] - unsafe fn unlock_shared(&self) { - self.deadlock_release(); - let state = if have_elision() { - self.state.elision_fetch_sub_release(ONE_READER) - } else { - self.state.fetch_sub(ONE_READER, Ordering::Release) - }; - if state & (READERS_MASK | WRITER_PARKED_BIT) == (ONE_READER | WRITER_PARKED_BIT) { - self.unlock_shared_slow(); - } - } - - #[inline] - fn is_locked(&self) -> bool { - let state = self.state.load(Ordering::Relaxed); - state & (WRITER_BIT | READERS_MASK) != 0 - } - - #[inline] - fn is_locked_exclusive(&self) -> bool { - let state = self.state.load(Ordering::Relaxed); - state & (WRITER_BIT) != 0 - } -} - -unsafe impl lock_api::RawRwLockFair for RawRwLock { - #[inline] - unsafe fn unlock_shared_fair(&self) { - // Shared unlocking is always fair in this implementation. - self.unlock_shared(); - } - - #[inline] - unsafe fn unlock_exclusive_fair(&self) { - self.deadlock_release(); - if self - .state - .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) - .is_ok() - { - return; - } - self.unlock_exclusive_slow(true); - } - - #[inline] - unsafe fn bump_shared(&self) { - if self.state.load(Ordering::Relaxed) & (READERS_MASK | WRITER_BIT) - == ONE_READER | WRITER_BIT - { - self.bump_shared_slow(); - } - } - - #[inline] - unsafe fn bump_exclusive(&self) { - if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { - self.bump_exclusive_slow(); - } - } -} - -unsafe impl lock_api::RawRwLockDowngrade for RawRwLock { - #[inline] - unsafe fn downgrade(&self) { - let state = self - .state - .fetch_add(ONE_READER - WRITER_BIT, Ordering::Release); - - // Wake up parked shared and upgradable threads if there are any - if state & PARKED_BIT != 0 { - self.downgrade_slow(); - } - } -} - -unsafe impl lock_api::RawRwLockTimed for RawRwLock { - type Duration = Duration; - type Instant = Instant; - - #[inline] - fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool { - let result = if self.try_lock_shared_fast(false) { - true - } else { - self.lock_shared_slow(false, util::to_deadline(timeout)) - }; - if result { - self.deadlock_acquire(); - } - result - } - - #[inline] - fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool { - let result = if self.try_lock_shared_fast(false) { - true - } else { - self.lock_shared_slow(false, Some(timeout)) - }; - if result { - self.deadlock_acquire(); - } - result - } - - #[inline] - fn try_lock_exclusive_for(&self, timeout: Duration) -> bool { - let result = if self - .state - .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - true - } else { - self.lock_exclusive_slow(util::to_deadline(timeout)) - }; - if result { - self.deadlock_acquire(); - } - result - } - - #[inline] - fn try_lock_exclusive_until(&self, timeout: Instant) -> bool { - let result = if self - .state - .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - true - } else { - self.lock_exclusive_slow(Some(timeout)) - }; - if result { - self.deadlock_acquire(); - } - result - } -} - -unsafe impl lock_api::RawRwLockRecursive for RawRwLock { - #[inline] - fn lock_shared_recursive(&self) { - if !self.try_lock_shared_fast(true) { - let result = self.lock_shared_slow(true, None); - debug_assert!(result); - } - self.deadlock_acquire(); - } - - #[inline] - fn try_lock_shared_recursive(&self) -> bool { - let result = if self.try_lock_shared_fast(true) { - true - } else { - self.try_lock_shared_slow(true) - }; - if result { - self.deadlock_acquire(); - } - result - } -} - -unsafe impl lock_api::RawRwLockRecursiveTimed for RawRwLock { - #[inline] - fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool { - let result = if self.try_lock_shared_fast(true) { - true - } else { - self.lock_shared_slow(true, util::to_deadline(timeout)) - }; - if result { - self.deadlock_acquire(); - } - result - } - - #[inline] - fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool { - let result = if self.try_lock_shared_fast(true) { - true - } else { - self.lock_shared_slow(true, Some(timeout)) - }; - if result { - self.deadlock_acquire(); - } - result - } -} - -unsafe impl lock_api::RawRwLockUpgrade for RawRwLock { - #[inline] - fn lock_upgradable(&self) { - if !self.try_lock_upgradable_fast() { - let result = self.lock_upgradable_slow(None); - debug_assert!(result); - } - self.deadlock_acquire(); - } - - #[inline] - fn try_lock_upgradable(&self) -> bool { - let result = if self.try_lock_upgradable_fast() { - true - } else { - self.try_lock_upgradable_slow() - }; - if result { - self.deadlock_acquire(); - } - result - } - - #[inline] - unsafe fn unlock_upgradable(&self) { - self.deadlock_release(); - let state = self.state.load(Ordering::Relaxed); - if state & PARKED_BIT == 0 { - if self - .state - .compare_exchange_weak( - state, - state - (ONE_READER | UPGRADABLE_BIT), - Ordering::Release, - Ordering::Relaxed, - ) - .is_ok() - { - return; - } - } - self.unlock_upgradable_slow(false); - } - - #[inline] - unsafe fn upgrade(&self) { - let state = self.state.fetch_sub( - (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, - Ordering::Acquire, - ); - if state & READERS_MASK != ONE_READER { - let result = self.upgrade_slow(None); - debug_assert!(result); - } - } - - #[inline] - unsafe fn try_upgrade(&self) -> bool { - if self - .state - .compare_exchange_weak( - ONE_READER | UPGRADABLE_BIT, - WRITER_BIT, - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_ok() - { - true - } else { - self.try_upgrade_slow() - } - } -} - -unsafe impl lock_api::RawRwLockUpgradeFair for RawRwLock { - #[inline] - unsafe fn unlock_upgradable_fair(&self) { - self.deadlock_release(); - let state = self.state.load(Ordering::Relaxed); - if state & PARKED_BIT == 0 { - if self - .state - .compare_exchange_weak( - state, - state - (ONE_READER | UPGRADABLE_BIT), - Ordering::Release, - Ordering::Relaxed, - ) - .is_ok() - { - return; - } - } - self.unlock_upgradable_slow(false); - } - - #[inline] - unsafe fn bump_upgradable(&self) { - if self.state.load(Ordering::Relaxed) == ONE_READER | UPGRADABLE_BIT | PARKED_BIT { - self.bump_upgradable_slow(); - } - } -} - -unsafe impl lock_api::RawRwLockUpgradeDowngrade for RawRwLock { - #[inline] - unsafe fn downgrade_upgradable(&self) { - let state = self.state.fetch_sub(UPGRADABLE_BIT, Ordering::Relaxed); - - // Wake up parked upgradable threads if there are any - if state & PARKED_BIT != 0 { - self.downgrade_slow(); - } - } - - #[inline] - unsafe fn downgrade_to_upgradable(&self) { - let state = self.state.fetch_add( - (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, - Ordering::Release, - ); - - // Wake up parked shared threads if there are any - if state & PARKED_BIT != 0 { - self.downgrade_to_upgradable_slow(); - } - } -} - -unsafe impl lock_api::RawRwLockUpgradeTimed for RawRwLock { - #[inline] - fn try_lock_upgradable_until(&self, timeout: Instant) -> bool { - let result = if self.try_lock_upgradable_fast() { - true - } else { - self.lock_upgradable_slow(Some(timeout)) - }; - if result { - self.deadlock_acquire(); - } - result - } - - #[inline] - fn try_lock_upgradable_for(&self, timeout: Duration) -> bool { - let result = if self.try_lock_upgradable_fast() { - true - } else { - self.lock_upgradable_slow(util::to_deadline(timeout)) - }; - if result { - self.deadlock_acquire(); - } - result - } - - #[inline] - unsafe fn try_upgrade_until(&self, timeout: Instant) -> bool { - let state = self.state.fetch_sub( - (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, - Ordering::Relaxed, - ); - if state & READERS_MASK == ONE_READER { - true - } else { - self.upgrade_slow(Some(timeout)) - } - } - - #[inline] - unsafe fn try_upgrade_for(&self, timeout: Duration) -> bool { - let state = self.state.fetch_sub( - (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, - Ordering::Relaxed, - ); - if state & READERS_MASK == ONE_READER { - true - } else { - self.upgrade_slow(util::to_deadline(timeout)) - } - } -} - -impl RawRwLock { - #[inline(always)] - fn try_lock_shared_fast(&self, recursive: bool) -> bool { - let state = self.state.load(Ordering::Relaxed); - - // We can't allow grabbing a shared lock if there is a writer, even if - // the writer is still waiting for the remaining readers to exit. - if state & WRITER_BIT != 0 { - // To allow recursive locks, we make an exception and allow readers - // to skip ahead of a pending writer to avoid deadlocking, at the - // cost of breaking the fairness guarantees. - if !recursive || state & READERS_MASK == 0 { - return false; - } - } - - // Use hardware lock elision to avoid cache conflicts when multiple - // readers try to acquire the lock. We only do this if the lock is - // completely empty since elision handles conflicts poorly. - if have_elision() && state == 0 { - self.state - .elision_compare_exchange_acquire(0, ONE_READER) - .is_ok() - } else if let Some(new_state) = state.checked_add(ONE_READER) { - self.state - .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - } else { - false - } - } - - #[cold] - fn try_lock_shared_slow(&self, recursive: bool) -> bool { - let mut state = self.state.load(Ordering::Relaxed); - loop { - // This mirrors the condition in try_lock_shared_fast - if state & WRITER_BIT != 0 { - if !recursive || state & READERS_MASK == 0 { - return false; - } - } - if have_elision() && state == 0 { - match self.state.elision_compare_exchange_acquire(0, ONE_READER) { - Ok(_) => return true, - Err(x) => state = x, - } - } else { - match self.state.compare_exchange_weak( - state, - state - .checked_add(ONE_READER) - .expect("RwLock reader count overflow"), - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - } - } - } - } - - #[inline(always)] - fn try_lock_upgradable_fast(&self) -> bool { - let state = self.state.load(Ordering::Relaxed); - - // We can't grab an upgradable lock if there is already a writer or - // upgradable reader. - if state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { - return false; - } - - if let Some(new_state) = state.checked_add(ONE_READER | UPGRADABLE_BIT) { - self.state - .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - } else { - false - } - } - - #[cold] - fn try_lock_upgradable_slow(&self) -> bool { - let mut state = self.state.load(Ordering::Relaxed); - loop { - // This mirrors the condition in try_lock_upgradable_fast - if state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { - return false; - } - - match self.state.compare_exchange_weak( - state, - state - .checked_add(ONE_READER | UPGRADABLE_BIT) - .expect("RwLock reader count overflow"), - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - } - } - } - - #[cold] - fn lock_exclusive_slow(&self, timeout: Option) -> bool { - let try_lock = |state: &mut usize| { - loop { - if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { - return false; - } - - // Grab WRITER_BIT if it isn't set, even if there are parked threads. - match self.state.compare_exchange_weak( - *state, - *state | WRITER_BIT, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => *state = x, - } - } - }; - - // Step 1: grab exclusive ownership of WRITER_BIT - let timed_out = !self.lock_common( - timeout, - TOKEN_EXCLUSIVE, - try_lock, - WRITER_BIT | UPGRADABLE_BIT, - ); - if timed_out { - return false; - } - - // Step 2: wait for all remaining readers to exit the lock. - self.wait_for_readers(timeout, 0) - } - - #[cold] - fn unlock_exclusive_slow(&self, force_fair: bool) { - // There are threads to unpark. Try to unpark as many as we can. - let callback = |mut new_state, result: UnparkResult| { - // If we are using a fair unlock then we should keep the - // rwlock locked and hand it off to the unparked threads. - if result.unparked_threads != 0 && (force_fair || result.be_fair) { - if result.have_more_threads { - new_state |= PARKED_BIT; - } - self.state.store(new_state, Ordering::Release); - TOKEN_HANDOFF - } else { - // Clear the parked bit if there are no more parked threads. - if result.have_more_threads { - self.state.store(PARKED_BIT, Ordering::Release); - } else { - self.state.store(0, Ordering::Release); - } - TOKEN_NORMAL - } - }; - // SAFETY: `callback` does not panic or call into any function of `parking_lot`. - unsafe { - self.wake_parked_threads(0, callback); - } - } - - #[cold] - fn lock_shared_slow(&self, recursive: bool, timeout: Option) -> bool { - let try_lock = |state: &mut usize| { - let mut spinwait_shared = SpinWait::new(); - loop { - // Use hardware lock elision to avoid cache conflicts when multiple - // readers try to acquire the lock. We only do this if the lock is - // completely empty since elision handles conflicts poorly. - if have_elision() && *state == 0 { - match self.state.elision_compare_exchange_acquire(0, ONE_READER) { - Ok(_) => return true, - Err(x) => *state = x, - } - } - - // This is the same condition as try_lock_shared_fast - if *state & WRITER_BIT != 0 { - if !recursive || *state & READERS_MASK == 0 { - return false; - } - } - - if self - .state - .compare_exchange_weak( - *state, - state - .checked_add(ONE_READER) - .expect("RwLock reader count overflow"), - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_ok() - { - return true; - } - - // If there is high contention on the reader count then we want - // to leave some time between attempts to acquire the lock to - // let other threads make progress. - spinwait_shared.spin_no_yield(); - *state = self.state.load(Ordering::Relaxed); - } - }; - self.lock_common(timeout, TOKEN_SHARED, try_lock, WRITER_BIT) - } - - #[cold] - fn unlock_shared_slow(&self) { - // At this point WRITER_PARKED_BIT is set and READER_MASK is empty. We - // just need to wake up a potentially sleeping pending writer. - // Using the 2nd key at addr + 1 - let addr = self as *const _ as usize + 1; - let callback = |_result: UnparkResult| { - // Clear the WRITER_PARKED_BIT here since there can only be one - // parked writer thread. - self.state.fetch_and(!WRITER_PARKED_BIT, Ordering::Relaxed); - TOKEN_NORMAL - }; - // SAFETY: - // * `addr` is an address we control. - // * `callback` does not panic or call into any function of `parking_lot`. - unsafe { - parking_lot_core::unpark_one(addr, callback); - } - } - - #[cold] - fn lock_upgradable_slow(&self, timeout: Option) -> bool { - let try_lock = |state: &mut usize| { - let mut spinwait_shared = SpinWait::new(); - loop { - if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { - return false; - } - - if self - .state - .compare_exchange_weak( - *state, - state - .checked_add(ONE_READER | UPGRADABLE_BIT) - .expect("RwLock reader count overflow"), - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_ok() - { - return true; - } - - // If there is high contention on the reader count then we want - // to leave some time between attempts to acquire the lock to - // let other threads make progress. - spinwait_shared.spin_no_yield(); - *state = self.state.load(Ordering::Relaxed); - } - }; - self.lock_common( - timeout, - TOKEN_UPGRADABLE, - try_lock, - WRITER_BIT | UPGRADABLE_BIT, - ) - } - - #[cold] - fn unlock_upgradable_slow(&self, force_fair: bool) { - // Just release the lock if there are no parked threads. - let mut state = self.state.load(Ordering::Relaxed); - while state & PARKED_BIT == 0 { - match self.state.compare_exchange_weak( - state, - state - (ONE_READER | UPGRADABLE_BIT), - Ordering::Release, - Ordering::Relaxed, - ) { - Ok(_) => return, - Err(x) => state = x, - } - } - - // There are threads to unpark. Try to unpark as many as we can. - let callback = |new_state, result: UnparkResult| { - // If we are using a fair unlock then we should keep the - // rwlock locked and hand it off to the unparked threads. - let mut state = self.state.load(Ordering::Relaxed); - if force_fair || result.be_fair { - // Fall back to normal unpark on overflow. Panicking is - // not allowed in parking_lot callbacks. - while let Some(mut new_state) = - (state - (ONE_READER | UPGRADABLE_BIT)).checked_add(new_state) - { - if result.have_more_threads { - new_state |= PARKED_BIT; - } else { - new_state &= !PARKED_BIT; - } - match self.state.compare_exchange_weak( - state, - new_state, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - Ok(_) => return TOKEN_HANDOFF, - Err(x) => state = x, - } - } - } - - // Otherwise just release the upgradable lock and update PARKED_BIT. - loop { - let mut new_state = state - (ONE_READER | UPGRADABLE_BIT); - if result.have_more_threads { - new_state |= PARKED_BIT; - } else { - new_state &= !PARKED_BIT; - } - match self.state.compare_exchange_weak( - state, - new_state, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - Ok(_) => return TOKEN_NORMAL, - Err(x) => state = x, - } - } - }; - // SAFETY: `callback` does not panic or call into any function of `parking_lot`. - unsafe { - self.wake_parked_threads(0, callback); - } - } - - #[cold] - fn try_upgrade_slow(&self) -> bool { - let mut state = self.state.load(Ordering::Relaxed); - loop { - if state & READERS_MASK != ONE_READER { - return false; - } - match self.state.compare_exchange_weak( - state, - state - (ONE_READER | UPGRADABLE_BIT) + WRITER_BIT, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => state = x, - } - } - } - - #[cold] - fn upgrade_slow(&self, timeout: Option) -> bool { - self.wait_for_readers(timeout, ONE_READER | UPGRADABLE_BIT) - } - - #[cold] - fn downgrade_slow(&self) { - // We only reach this point if PARKED_BIT is set. - let callback = |_, result: UnparkResult| { - // Clear the parked bit if there no more parked threads - if !result.have_more_threads { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - TOKEN_NORMAL - }; - // SAFETY: `callback` does not panic or call into any function of `parking_lot`. - unsafe { - self.wake_parked_threads(ONE_READER, callback); - } - } - - #[cold] - fn downgrade_to_upgradable_slow(&self) { - // We only reach this point if PARKED_BIT is set. - let callback = |_, result: UnparkResult| { - // Clear the parked bit if there no more parked threads - if !result.have_more_threads { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - TOKEN_NORMAL - }; - // SAFETY: `callback` does not panic or call into any function of `parking_lot`. - unsafe { - self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); - } - } - - #[cold] - unsafe fn bump_shared_slow(&self) { - self.unlock_shared(); - self.lock_shared(); - } - - #[cold] - fn bump_exclusive_slow(&self) { - self.deadlock_release(); - self.unlock_exclusive_slow(true); - self.lock_exclusive(); - } - - #[cold] - fn bump_upgradable_slow(&self) { - self.deadlock_release(); - self.unlock_upgradable_slow(true); - self.lock_upgradable(); - } - - /// Common code for waking up parked threads after releasing WRITER_BIT or - /// UPGRADABLE_BIT. - /// - /// # Safety - /// - /// `callback` must uphold the requirements of the `callback` parameter to - /// `parking_lot_core::unpark_filter`. Meaning no panics or calls into any function in - /// `parking_lot`. - #[inline] - unsafe fn wake_parked_threads( - &self, - new_state: usize, - callback: impl FnOnce(usize, UnparkResult) -> UnparkToken, - ) { - // We must wake up at least one upgrader or writer if there is one, - // otherwise they may end up parked indefinitely since unlock_shared - // does not call wake_parked_threads. - let new_state = Cell::new(new_state); - let addr = self as *const _ as usize; - let filter = |ParkToken(token)| { - let s = new_state.get(); - - // If we are waking up a writer, don't wake anything else. - if s & WRITER_BIT != 0 { - return FilterOp::Stop; - } - - // Otherwise wake *all* readers and one upgrader/writer. - if token & (UPGRADABLE_BIT | WRITER_BIT) != 0 && s & UPGRADABLE_BIT != 0 { - // Skip writers and upgradable readers if we already have - // a writer/upgradable reader. - FilterOp::Skip - } else { - new_state.set(s + token); - FilterOp::Unpark - } - }; - let callback = |result| callback(new_state.get(), result); - // SAFETY: - // * `addr` is an address we control. - // * `filter` does not panic or call into any function of `parking_lot`. - // * `callback` safety responsibility is on caller - parking_lot_core::unpark_filter(addr, filter, callback); - } - - // Common code for waiting for readers to exit the lock after acquiring - // WRITER_BIT. - #[inline] - fn wait_for_readers(&self, timeout: Option, prev_value: usize) -> bool { - // At this point WRITER_BIT is already set, we just need to wait for the - // remaining readers to exit the lock. - let mut spinwait = SpinWait::new(); - let mut state = self.state.load(Ordering::Acquire); - while state & READERS_MASK != 0 { - // Spin a few times to wait for readers to exit - if spinwait.spin() { - state = self.state.load(Ordering::Acquire); - continue; - } - - // Set the parked bit - if state & WRITER_PARKED_BIT == 0 { - if let Err(x) = self.state.compare_exchange_weak( - state, - state | WRITER_PARKED_BIT, - Ordering::Acquire, - Ordering::Acquire, - ) { - state = x; - continue; - } - } - - // Park our thread until we are woken up by an unlock - // Using the 2nd key at addr + 1 - let addr = self as *const _ as usize + 1; - let validate = || { - let state = self.state.load(Ordering::Relaxed); - state & READERS_MASK != 0 && state & WRITER_PARKED_BIT != 0 - }; - let before_sleep = || {}; - let timed_out = |_, _| {}; - // SAFETY: - // * `addr` is an address we control. - // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. - // * `before_sleep` does not call `park`, nor does it panic. - let park_result = unsafe { - parking_lot_core::park( - addr, - validate, - before_sleep, - timed_out, - TOKEN_EXCLUSIVE, - timeout, - ) - }; - match park_result { - // We still need to re-check the state if we are unparked - // since a previous writer timing-out could have allowed - // another reader to sneak in before we parked. - ParkResult::Unparked(_) | ParkResult::Invalid => { - state = self.state.load(Ordering::Acquire); - continue; - } - - // Timeout expired - ParkResult::TimedOut => { - // We need to release WRITER_BIT and revert back to - // our previous value. We also wake up any threads that - // might be waiting on WRITER_BIT. - let state = self.state.fetch_add( - prev_value.wrapping_sub(WRITER_BIT | WRITER_PARKED_BIT), - Ordering::Relaxed, - ); - if state & PARKED_BIT != 0 { - let callback = |_, result: UnparkResult| { - // Clear the parked bit if there no more parked threads - if !result.have_more_threads { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - TOKEN_NORMAL - }; - // SAFETY: `callback` does not panic or call any function of `parking_lot`. - unsafe { - self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); - } - } - return false; - } - } - } - true - } - - /// Common code for acquiring a lock - #[inline] - fn lock_common( - &self, - timeout: Option, - token: ParkToken, - mut try_lock: impl FnMut(&mut usize) -> bool, - validate_flags: usize, - ) -> bool { - let mut spinwait = SpinWait::new(); - let mut state = self.state.load(Ordering::Relaxed); - loop { - // Attempt to grab the lock - if try_lock(&mut state) { - return true; - } - - // If there are no parked threads, try spinning a few times. - if state & (PARKED_BIT | WRITER_PARKED_BIT) == 0 && spinwait.spin() { - state = self.state.load(Ordering::Relaxed); - continue; - } - - // Set the parked bit - if state & PARKED_BIT == 0 { - if let Err(x) = self.state.compare_exchange_weak( - state, - state | PARKED_BIT, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - state = x; - continue; - } - } - - // Park our thread until we are woken up by an unlock - let addr = self as *const _ as usize; - let validate = || { - let state = self.state.load(Ordering::Relaxed); - state & PARKED_BIT != 0 && (state & validate_flags != 0) - }; - let before_sleep = || {}; - let timed_out = |_, was_last_thread| { - // Clear the parked bit if we were the last parked thread - if was_last_thread { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - }; - - // SAFETY: - // * `addr` is an address we control. - // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. - // * `before_sleep` does not call `park`, nor does it panic. - let park_result = unsafe { - parking_lot_core::park(addr, validate, before_sleep, timed_out, token, timeout) - }; - match park_result { - // The thread that unparked us passed the lock on to us - // directly without unlocking it. - ParkResult::Unparked(TOKEN_HANDOFF) => return true, - - // We were unparked normally, try acquiring the lock again - ParkResult::Unparked(_) => (), - - // The validation function failed, try locking again - ParkResult::Invalid => (), - - // Timeout expired - ParkResult::TimedOut => return false, - } - - // Loop back and try locking again - spinwait.reset(); - state = self.state.load(Ordering::Relaxed); - } - } - - #[inline] - fn deadlock_acquire(&self) { - unsafe { deadlock::acquire_resource(self as *const _ as usize) }; - unsafe { deadlock::acquire_resource(self as *const _ as usize + 1) }; - } - - #[inline] - fn deadlock_release(&self) { - unsafe { deadlock::release_resource(self as *const _ as usize) }; - unsafe { deadlock::release_resource(self as *const _ as usize + 1) }; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/remutex.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/remutex.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/remutex.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/remutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,149 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::raw_mutex::RawMutex; -use core::num::NonZeroUsize; -use lock_api::{self, GetThreadId}; - -/// Implementation of the `GetThreadId` trait for `lock_api::ReentrantMutex`. -pub struct RawThreadId; - -unsafe impl GetThreadId for RawThreadId { - const INIT: RawThreadId = RawThreadId; - - fn nonzero_thread_id(&self) -> NonZeroUsize { - // The address of a thread-local variable is guaranteed to be unique to the - // current thread, and is also guaranteed to be non-zero. The variable has to have a - // non-zero size to guarantee it has a unique address for each thread. - thread_local!(static KEY: u8 = 0); - KEY.with(|x| { - NonZeroUsize::new(x as *const _ as usize) - .expect("thread-local variable address is null") - }) - } -} - -/// A mutex which can be recursively locked by a single thread. -/// -/// This type is identical to `Mutex` except for the following points: -/// -/// - Locking multiple times from the same thread will work correctly instead of -/// deadlocking. -/// - `ReentrantMutexGuard` does not give mutable references to the locked data. -/// Use a `RefCell` if you need this. -/// -/// See [`Mutex`](type.Mutex.html) for more details about the underlying mutex -/// primitive. -pub type ReentrantMutex = lock_api::ReentrantMutex; - -/// Creates a new reentrant mutex in an unlocked state ready for use. -/// -/// This allows creating a reentrant mutex in a constant context on stable Rust. -pub const fn const_reentrant_mutex(val: T) -> ReentrantMutex { - ReentrantMutex::const_new( - ::INIT, - ::INIT, - val, - ) -} - -/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure -/// is dropped (falls out of scope), the lock will be unlocked. -/// -/// The data protected by the mutex can be accessed through this guard via its -/// `Deref` implementation. -pub type ReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<'a, RawMutex, RawThreadId, T>; - -/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a -/// subfield of the protected data. -/// -/// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the -/// former doesn't support temporarily unlocking and re-locking, since that -/// could introduce soundness issues if the locked object is modified by another -/// thread. -pub type MappedReentrantMutexGuard<'a, T> = - lock_api::MappedReentrantMutexGuard<'a, RawMutex, RawThreadId, T>; - -#[cfg(test)] -mod tests { - use crate::ReentrantMutex; - use std::cell::RefCell; - use std::sync::Arc; - use std::thread; - - #[cfg(feature = "serde")] - use bincode::{deserialize, serialize}; - - #[test] - fn smoke() { - let m = ReentrantMutex::new(2); - { - let a = m.lock(); - { - let b = m.lock(); - { - let c = m.lock(); - assert_eq!(*c, 2); - } - assert_eq!(*b, 2); - } - assert_eq!(*a, 2); - } - } - - #[test] - fn is_mutex() { - let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); - let m2 = m.clone(); - let lock = m.lock(); - let child = thread::spawn(move || { - let lock = m2.lock(); - assert_eq!(*lock.borrow(), 4950); - }); - for i in 0..100 { - let lock = m.lock(); - *lock.borrow_mut() += i; - } - drop(lock); - child.join().unwrap(); - } - - #[test] - fn trylock_works() { - let m = Arc::new(ReentrantMutex::new(())); - let m2 = m.clone(); - let _lock = m.try_lock(); - let _lock2 = m.try_lock(); - thread::spawn(move || { - let lock = m2.try_lock(); - assert!(lock.is_none()); - }) - .join() - .unwrap(); - let _lock3 = m.try_lock(); - } - - #[test] - fn test_reentrant_mutex_debug() { - let mutex = ReentrantMutex::new(vec![0u8, 10]); - - assert_eq!(format!("{:?}", mutex), "ReentrantMutex { data: [0, 10] }"); - } - - #[cfg(feature = "serde")] - #[test] - fn test_serde() { - let contents: Vec = vec![0, 1, 2]; - let mutex = ReentrantMutex::new(contents.clone()); - - let serialized = serialize(&mutex).unwrap(); - let deserialized: ReentrantMutex> = deserialize(&serialized).unwrap(); - - assert_eq!(*(mutex.lock()), *(deserialized.lock())); - assert_eq!(contents, *(deserialized.lock())); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/rwlock.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/rwlock.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/rwlock.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/rwlock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,642 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::raw_rwlock::RawRwLock; -use lock_api; - -/// A reader-writer lock -/// -/// This type of lock allows a number of readers or at most one writer at any -/// point in time. The write portion of this lock typically allows modification -/// of the underlying data (exclusive access) and the read portion of this lock -/// typically allows for read-only access (shared access). -/// -/// This lock uses a task-fair locking policy which avoids both reader and -/// writer starvation. This means that readers trying to acquire the lock will -/// block even if the lock is unlocked when there are writers waiting to acquire -/// the lock. Because of this, attempts to recursively acquire a read lock -/// within a single thread may result in a deadlock. -/// -/// The type parameter `T` represents the data that this lock protects. It is -/// required that `T` satisfies `Send` to be shared across threads and `Sync` to -/// allow concurrent access through readers. The RAII guards returned from the -/// locking methods implement `Deref` (and `DerefMut` for the `write` methods) -/// to allow access to the contained of the lock. -/// -/// # Fairness -/// -/// A typical unfair lock can often end up in a situation where a single thread -/// quickly acquires and releases the same lock in succession, which can starve -/// other threads waiting to acquire the rwlock. While this improves throughput -/// because it doesn't force a context switch when a thread tries to re-acquire -/// a rwlock it has just released, this can starve other threads. -/// -/// This rwlock uses [eventual fairness](https://trac.webkit.org/changeset/203350) -/// to ensure that the lock will be fair on average without sacrificing -/// throughput. This is done by forcing a fair unlock on average every 0.5ms, -/// which will force the lock to go to the next thread waiting for the rwlock. -/// -/// Additionally, any critical section longer than 1ms will always use a fair -/// unlock, which has a negligible impact on throughput considering the length -/// of the critical section. -/// -/// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair` -/// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply -/// dropping the guard. -/// -/// # Differences from the standard library `RwLock` -/// -/// - Supports atomically downgrading a write lock into a read lock. -/// - Task-fair locking policy instead of an unspecified platform default. -/// - No poisoning, the lock is released normally on panic. -/// - Only requires 1 word of space, whereas the standard library boxes the -/// `RwLock` due to platform limitations. -/// - Can be statically constructed. -/// - Does not require any drop glue when dropped. -/// - Inline fast path for the uncontended case. -/// - Efficient handling of micro-contention using adaptive spinning. -/// - Allows raw locking & unlocking without a guard. -/// - Supports eventual fairness so that the rwlock is fair on average. -/// - Optionally allows making the rwlock fair by calling -/// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`. -/// -/// # Examples -/// -/// ``` -/// use parking_lot::RwLock; -/// -/// let lock = RwLock::new(5); -/// -/// // many reader locks can be held at once -/// { -/// let r1 = lock.read(); -/// let r2 = lock.read(); -/// assert_eq!(*r1, 5); -/// assert_eq!(*r2, 5); -/// } // read locks are dropped at this point -/// -/// // only one write lock may be held, however -/// { -/// let mut w = lock.write(); -/// *w += 1; -/// assert_eq!(*w, 6); -/// } // write lock is dropped here -/// ``` -pub type RwLock = lock_api::RwLock; - -/// Creates a new instance of an `RwLock` which is unlocked. -/// -/// This allows creating a `RwLock` in a constant context on stable Rust. -pub const fn const_rwlock(val: T) -> RwLock { - RwLock::const_new(::INIT, val) -} - -/// RAII structure used to release the shared read access of a lock when -/// dropped. -pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>; - -/// RAII structure used to release the exclusive write access of a lock when -/// dropped. -pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>; - -/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a -/// subfield of the protected data. -/// -/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the -/// former doesn't support temporarily unlocking and re-locking, since that -/// could introduce soundness issues if the locked object is modified by another -/// thread. -pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>; - -/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a -/// subfield of the protected data. -/// -/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the -/// former doesn't support temporarily unlocking and re-locking, since that -/// could introduce soundness issues if the locked object is modified by another -/// thread. -pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>; - -/// RAII structure used to release the upgradable read access of a lock when -/// dropped. -pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>; - -#[cfg(test)] -mod tests { - use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; - use rand::Rng; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::mpsc::channel; - use std::sync::Arc; - use std::thread; - use std::time::Duration; - - #[cfg(feature = "serde")] - use bincode::{deserialize, serialize}; - - #[derive(Eq, PartialEq, Debug)] - struct NonCopy(i32); - - #[test] - fn smoke() { - let l = RwLock::new(()); - drop(l.read()); - drop(l.write()); - drop(l.upgradable_read()); - drop((l.read(), l.read())); - drop((l.read(), l.upgradable_read())); - drop(l.write()); - } - - #[test] - fn frob() { - const N: u32 = 10; - const M: u32 = 1000; - - let r = Arc::new(RwLock::new(())); - - let (tx, rx) = channel::<()>(); - for _ in 0..N { - let tx = tx.clone(); - let r = r.clone(); - thread::spawn(move || { - let mut rng = rand::thread_rng(); - for _ in 0..M { - if rng.gen_bool(1.0 / N as f64) { - drop(r.write()); - } else { - drop(r.read()); - } - } - drop(tx); - }); - } - drop(tx); - let _ = rx.recv(); - } - - #[test] - fn test_rw_arc_no_poison_wr() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.write(); - panic!(); - }) - .join(); - let lock = arc.read(); - assert_eq!(*lock, 1); - } - - #[test] - fn test_rw_arc_no_poison_ww() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.write(); - panic!(); - }) - .join(); - let lock = arc.write(); - assert_eq!(*lock, 1); - } - - #[test] - fn test_rw_arc_no_poison_rr() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.read(); - panic!(); - }) - .join(); - let lock = arc.read(); - assert_eq!(*lock, 1); - } - - #[test] - fn test_rw_arc_no_poison_rw() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.read(); - panic!() - }) - .join(); - let lock = arc.write(); - assert_eq!(*lock, 1); - } - - #[test] - fn test_ruw_arc() { - let arc = Arc::new(RwLock::new(0)); - let arc2 = arc.clone(); - let (tx, rx) = channel(); - - thread::spawn(move || { - for _ in 0..10 { - let mut lock = arc2.write(); - let tmp = *lock; - *lock = -1; - thread::yield_now(); - *lock = tmp + 1; - } - tx.send(()).unwrap(); - }); - - let mut children = Vec::new(); - - // Upgradable readers try to catch the writer in the act and also - // try to touch the value - for _ in 0..5 { - let arc3 = arc.clone(); - children.push(thread::spawn(move || { - let lock = arc3.upgradable_read(); - let tmp = *lock; - assert!(tmp >= 0); - thread::yield_now(); - let mut lock = RwLockUpgradableReadGuard::upgrade(lock); - assert_eq!(tmp, *lock); - *lock = -1; - thread::yield_now(); - *lock = tmp + 1; - })); - } - - // Readers try to catch the writers in the act - for _ in 0..5 { - let arc4 = arc.clone(); - children.push(thread::spawn(move || { - let lock = arc4.read(); - assert!(*lock >= 0); - })); - } - - // Wait for children to pass their asserts - for r in children { - assert!(r.join().is_ok()); - } - - // Wait for writer to finish - rx.recv().unwrap(); - let lock = arc.read(); - assert_eq!(*lock, 15); - } - - #[test] - fn test_rw_arc() { - let arc = Arc::new(RwLock::new(0)); - let arc2 = arc.clone(); - let (tx, rx) = channel(); - - thread::spawn(move || { - let mut lock = arc2.write(); - for _ in 0..10 { - let tmp = *lock; - *lock = -1; - thread::yield_now(); - *lock = tmp + 1; - } - tx.send(()).unwrap(); - }); - - // Readers try to catch the writer in the act - let mut children = Vec::new(); - for _ in 0..5 { - let arc3 = arc.clone(); - children.push(thread::spawn(move || { - let lock = arc3.read(); - assert!(*lock >= 0); - })); - } - - // Wait for children to pass their asserts - for r in children { - assert!(r.join().is_ok()); - } - - // Wait for writer to finish - rx.recv().unwrap(); - let lock = arc.read(); - assert_eq!(*lock, 10); - } - - #[test] - fn test_rw_arc_access_in_unwind() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _ = thread::spawn(move || { - struct Unwinder { - i: Arc>, - } - impl Drop for Unwinder { - fn drop(&mut self) { - let mut lock = self.i.write(); - *lock += 1; - } - } - let _u = Unwinder { i: arc2 }; - panic!(); - }) - .join(); - let lock = arc.read(); - assert_eq!(*lock, 2); - } - - #[test] - fn test_rwlock_unsized() { - let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); - { - let b = &mut *rw.write(); - b[0] = 4; - b[2] = 5; - } - let comp: &[i32] = &[4, 2, 5]; - assert_eq!(&*rw.read(), comp); - } - - #[test] - fn test_rwlock_try_read() { - let lock = RwLock::new(0isize); - { - let read_guard = lock.read(); - - let read_result = lock.try_read(); - assert!( - read_result.is_some(), - "try_read should succeed while read_guard is in scope" - ); - - drop(read_guard); - } - { - let upgrade_guard = lock.upgradable_read(); - - let read_result = lock.try_read(); - assert!( - read_result.is_some(), - "try_read should succeed while upgrade_guard is in scope" - ); - - drop(upgrade_guard); - } - { - let write_guard = lock.write(); - - let read_result = lock.try_read(); - assert!( - read_result.is_none(), - "try_read should fail while write_guard is in scope" - ); - - drop(write_guard); - } - } - - #[test] - fn test_rwlock_try_write() { - let lock = RwLock::new(0isize); - { - let read_guard = lock.read(); - - let write_result = lock.try_write(); - assert!( - write_result.is_none(), - "try_write should fail while read_guard is in scope" - ); - assert!(lock.is_locked()); - assert!(!lock.is_locked_exclusive()); - - drop(read_guard); - } - { - let upgrade_guard = lock.upgradable_read(); - - let write_result = lock.try_write(); - assert!( - write_result.is_none(), - "try_write should fail while upgrade_guard is in scope" - ); - assert!(lock.is_locked()); - assert!(!lock.is_locked_exclusive()); - - drop(upgrade_guard); - } - { - let write_guard = lock.write(); - - let write_result = lock.try_write(); - assert!( - write_result.is_none(), - "try_write should fail while write_guard is in scope" - ); - assert!(lock.is_locked()); - assert!(lock.is_locked_exclusive()); - - drop(write_guard); - } - } - - #[test] - fn test_rwlock_try_upgrade() { - let lock = RwLock::new(0isize); - { - let read_guard = lock.read(); - - let upgrade_result = lock.try_upgradable_read(); - assert!( - upgrade_result.is_some(), - "try_upgradable_read should succeed while read_guard is in scope" - ); - - drop(read_guard); - } - { - let upgrade_guard = lock.upgradable_read(); - - let upgrade_result = lock.try_upgradable_read(); - assert!( - upgrade_result.is_none(), - "try_upgradable_read should fail while upgrade_guard is in scope" - ); - - drop(upgrade_guard); - } - { - let write_guard = lock.write(); - - let upgrade_result = lock.try_upgradable_read(); - assert!( - upgrade_result.is_none(), - "try_upgradable should fail while write_guard is in scope" - ); - - drop(write_guard); - } - } - - #[test] - fn test_into_inner() { - let m = RwLock::new(NonCopy(10)); - assert_eq!(m.into_inner(), NonCopy(10)); - } - - #[test] - fn test_into_inner_drop() { - struct Foo(Arc); - impl Drop for Foo { - fn drop(&mut self) { - self.0.fetch_add(1, Ordering::SeqCst); - } - } - let num_drops = Arc::new(AtomicUsize::new(0)); - let m = RwLock::new(Foo(num_drops.clone())); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - { - let _inner = m.into_inner(); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - } - assert_eq!(num_drops.load(Ordering::SeqCst), 1); - } - - #[test] - fn test_get_mut() { - let mut m = RwLock::new(NonCopy(10)); - *m.get_mut() = NonCopy(20); - assert_eq!(m.into_inner(), NonCopy(20)); - } - - #[test] - fn test_rwlockguard_sync() { - fn sync(_: T) {} - - let rwlock = RwLock::new(()); - sync(rwlock.read()); - sync(rwlock.write()); - } - - #[test] - fn test_rwlock_downgrade() { - let x = Arc::new(RwLock::new(0)); - let mut handles = Vec::new(); - for _ in 0..8 { - let x = x.clone(); - handles.push(thread::spawn(move || { - for _ in 0..100 { - let mut writer = x.write(); - *writer += 1; - let cur_val = *writer; - let reader = RwLockWriteGuard::downgrade(writer); - assert_eq!(cur_val, *reader); - } - })); - } - for handle in handles { - handle.join().unwrap() - } - assert_eq!(*x.read(), 800); - } - - #[test] - fn test_rwlock_recursive() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let lock1 = arc.read(); - let t = thread::spawn(move || { - let _lock = arc2.write(); - }); - - if cfg!(not(all(target_env = "sgx", target_vendor = "fortanix"))) { - thread::sleep(Duration::from_millis(100)); - } else { - // FIXME: https://github.com/fortanix/rust-sgx/issues/31 - for _ in 0..100 { - thread::yield_now(); - } - } - - // A normal read would block here since there is a pending writer - let lock2 = arc.read_recursive(); - - // Unblock the thread and join it. - drop(lock1); - drop(lock2); - t.join().unwrap(); - } - - #[test] - fn test_rwlock_debug() { - let x = RwLock::new(vec![0u8, 10]); - - assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }"); - let _lock = x.write(); - assert_eq!(format!("{:?}", x), "RwLock { data: }"); - } - - #[test] - fn test_clone() { - let rwlock = RwLock::new(Arc::new(1)); - let a = rwlock.read_recursive(); - let b = a.clone(); - assert_eq!(Arc::strong_count(&b), 2); - } - - #[cfg(feature = "serde")] - #[test] - fn test_serde() { - let contents: Vec = vec![0, 1, 2]; - let mutex = RwLock::new(contents.clone()); - - let serialized = serialize(&mutex).unwrap(); - let deserialized: RwLock> = deserialize(&serialized).unwrap(); - - assert_eq!(*(mutex.read()), *(deserialized.read())); - assert_eq!(contents, *(deserialized.read())); - } - - #[test] - fn test_issue_203() { - struct Bar(RwLock<()>); - - impl Drop for Bar { - fn drop(&mut self) { - let _n = self.0.write(); - } - } - - thread_local! { - static B: Bar = Bar(RwLock::new(())); - } - - thread::spawn(|| { - B.with(|_| ()); - - let a = RwLock::new(()); - let _a = a.read(); - }) - .join() - .unwrap(); - } - - #[test] - fn test_rw_write_is_locked() { - let lock = RwLock::new(0isize); - { - let _read_guard = lock.read(); - - assert!(lock.is_locked()); - assert!(!lock.is_locked_exclusive()); - } - - { - let _write_guard = lock.write(); - - assert!(lock.is_locked()); - assert!(lock.is_locked_exclusive()); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot/src/util.rs s390-tools-2.33.1/rust-vendor/parking_lot/src/util.rs --- s390-tools-2.31.0/rust-vendor/parking_lot/src/util.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot/src/util.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,38 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use std::time::{Duration, Instant}; - -// Option::unchecked_unwrap -pub trait UncheckedOptionExt { - unsafe fn unchecked_unwrap(self) -> T; -} - -impl UncheckedOptionExt for Option { - #[inline] - unsafe fn unchecked_unwrap(self) -> T { - match self { - Some(x) => x, - None => unreachable(), - } - } -} - -// hint::unreachable_unchecked() in release mode -#[inline] -unsafe fn unreachable() -> ! { - if cfg!(debug_assertions) { - unreachable!(); - } else { - core::hint::unreachable_unchecked() - } -} - -#[inline] -pub fn to_deadline(timeout: Duration) -> Option { - Instant::now().checked_add(timeout) -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/build.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/build.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/build.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,10 +0,0 @@ -// Automatically detect tsan in a way that's compatible with both stable (which -// doesn't support sanitizers) and nightly (which does). Works because build -// scripts gets `cfg` info, even if the cfg is unstable. -fn main() { - println!("cargo:rerun-if-changed=build.rs"); - let santizer_list = std::env::var("CARGO_CFG_SANITIZE").unwrap_or_default(); - if santizer_list.contains("thread") { - println!("cargo:rustc-cfg=tsan_enabled"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/parking_lot_core/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/parking_lot_core/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/Cargo.toml s390-tools-2.33.1/rust-vendor/parking_lot_core/Cargo.toml --- s390-tools-2.31.0/rust-vendor/parking_lot_core/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,66 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.49.0" -name = "parking_lot_core" -version = "0.9.9" -authors = ["Amanieu d'Antras "] -description = "An advanced API for creating custom synchronization primitives." -keywords = [ - "mutex", - "condvar", - "rwlock", - "once", - "thread", -] -categories = ["concurrency"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/Amanieu/parking_lot" - -[package.metadata.docs.rs] -rustdoc-args = ["--generate-link-to-definition"] - -[dependencies.backtrace] -version = "0.3.60" -optional = true - -[dependencies.cfg-if] -version = "1.0.0" - -[dependencies.petgraph] -version = "0.6.0" -optional = true - -[dependencies.smallvec] -version = "1.6.1" - -[dependencies.thread-id] -version = "4.0.0" -optional = true - -[features] -deadlock_detection = [ - "petgraph", - "thread-id", - "backtrace", -] -nightly = [] - -[target."cfg(target_os = \"redox\")".dependencies.redox_syscall] -version = "0.4" - -[target."cfg(unix)".dependencies.libc] -version = "0.2.95" - -[target."cfg(windows)".dependencies.windows-targets] -version = "0.48.0" diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/parking_lot_core/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/parking_lot_core/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/LICENSE-MIT s390-tools-2.33.1/rust-vendor/parking_lot_core/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/parking_lot_core/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2016 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/lib.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/lib.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,67 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -//! This library exposes a low-level API for creating your own efficient -//! synchronization primitives. -//! -//! # The parking lot -//! -//! To keep synchronization primitives small, all thread queuing and suspending -//! functionality is offloaded to the *parking lot*. The idea behind this is based -//! on the Webkit [`WTF::ParkingLot`](https://webkit.org/blog/6161/locking-in-webkit/) -//! class, which essentially consists of a hash table mapping of lock addresses -//! to queues of parked (sleeping) threads. The Webkit parking lot was itself -//! inspired by Linux [futexes](http://man7.org/linux/man-pages/man2/futex.2.html), -//! but it is more powerful since it allows invoking callbacks while holding a -//! queue lock. -//! -//! There are two main operations that can be performed on the parking lot: -//! -//! - *Parking* refers to suspending the thread while simultaneously enqueuing it -//! on a queue keyed by some address. -//! - *Unparking* refers to dequeuing a thread from a queue keyed by some address -//! and resuming it. -//! -//! See the documentation of the individual functions for more details. -//! -//! # Building custom synchronization primitives -//! -//! Building custom synchronization primitives is very simple since the parking -//! lot takes care of all the hard parts for you. A simple example for a -//! custom primitive would be to integrate a `Mutex` inside another data type. -//! Since a mutex only requires 2 bits, it can share space with other data. -//! For example, one could create an `ArcMutex` type that combines the atomic -//! reference count and the two mutex bits in the same atomic word. - -#![warn(missing_docs)] -#![warn(rust_2018_idioms)] -#![cfg_attr( - all(target_env = "sgx", target_vendor = "fortanix"), - feature(sgx_platform) -)] -#![cfg_attr( - all( - feature = "nightly", - target_family = "wasm", - target_feature = "atomics" - ), - feature(stdsimd) -)] - -mod parking_lot; -mod spinwait; -mod thread_parker; -mod util; -mod word_lock; - -pub use self::parking_lot::deadlock; -pub use self::parking_lot::{park, unpark_all, unpark_filter, unpark_one, unpark_requeue}; -pub use self::parking_lot::{ - FilterOp, ParkResult, ParkToken, RequeueOp, UnparkResult, UnparkToken, -}; -pub use self::parking_lot::{DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; -pub use self::spinwait::SpinWait; diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/parking_lot.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/parking_lot.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/parking_lot.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/parking_lot.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1700 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. -use crate::thread_parker::{ThreadParker, ThreadParkerT, UnparkHandleT}; -use crate::util::UncheckedOptionExt; -use crate::word_lock::WordLock; -use core::{ - cell::{Cell, UnsafeCell}, - ptr, - sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, -}; -use smallvec::SmallVec; -use std::time::{Duration, Instant}; - -// Don't use Instant on wasm32-unknown-unknown, it just panics. -cfg_if::cfg_if! { - if #[cfg(all( - target_family = "wasm", - target_os = "unknown", - target_vendor = "unknown" - ))] { - #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] - struct TimeoutInstant; - impl TimeoutInstant { - fn now() -> TimeoutInstant { - TimeoutInstant - } - } - impl core::ops::Add for TimeoutInstant { - type Output = Self; - fn add(self, _rhs: Duration) -> Self::Output { - TimeoutInstant - } - } - } else { - use std::time::Instant as TimeoutInstant; - } -} - -static NUM_THREADS: AtomicUsize = AtomicUsize::new(0); - -/// Holds the pointer to the currently active `HashTable`. -/// -/// # Safety -/// -/// Except for the initial value of null, it must always point to a valid `HashTable` instance. -/// Any `HashTable` this global static has ever pointed to must never be freed. -static HASHTABLE: AtomicPtr = AtomicPtr::new(ptr::null_mut()); - -// Even with 3x more buckets than threads, the memory overhead per thread is -// still only a few hundred bytes per thread. -const LOAD_FACTOR: usize = 3; - -struct HashTable { - // Hash buckets for the table - entries: Box<[Bucket]>, - - // Number of bits used for the hash function - hash_bits: u32, - - // Previous table. This is only kept to keep leak detectors happy. - _prev: *const HashTable, -} - -impl HashTable { - #[inline] - fn new(num_threads: usize, prev: *const HashTable) -> Box { - let new_size = (num_threads * LOAD_FACTOR).next_power_of_two(); - let hash_bits = 0usize.leading_zeros() - new_size.leading_zeros() - 1; - - let now = TimeoutInstant::now(); - let mut entries = Vec::with_capacity(new_size); - for i in 0..new_size { - // We must ensure the seed is not zero - entries.push(Bucket::new(now, i as u32 + 1)); - } - - Box::new(HashTable { - entries: entries.into_boxed_slice(), - hash_bits, - _prev: prev, - }) - } -} - -#[repr(align(64))] -struct Bucket { - // Lock protecting the queue - mutex: WordLock, - - // Linked list of threads waiting on this bucket - queue_head: Cell<*const ThreadData>, - queue_tail: Cell<*const ThreadData>, - - // Next time at which point be_fair should be set - fair_timeout: UnsafeCell, -} - -impl Bucket { - #[inline] - pub fn new(timeout: TimeoutInstant, seed: u32) -> Self { - Self { - mutex: WordLock::new(), - queue_head: Cell::new(ptr::null()), - queue_tail: Cell::new(ptr::null()), - fair_timeout: UnsafeCell::new(FairTimeout::new(timeout, seed)), - } - } -} - -struct FairTimeout { - // Next time at which point be_fair should be set - timeout: TimeoutInstant, - - // the PRNG state for calculating the next timeout - seed: u32, -} - -impl FairTimeout { - #[inline] - fn new(timeout: TimeoutInstant, seed: u32) -> FairTimeout { - FairTimeout { timeout, seed } - } - - // Determine whether we should force a fair unlock, and update the timeout - #[inline] - fn should_timeout(&mut self) -> bool { - let now = TimeoutInstant::now(); - if now > self.timeout { - // Time between 0 and 1ms. - let nanos = self.gen_u32() % 1_000_000; - self.timeout = now + Duration::new(0, nanos); - true - } else { - false - } - } - - // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia. - fn gen_u32(&mut self) -> u32 { - self.seed ^= self.seed << 13; - self.seed ^= self.seed >> 17; - self.seed ^= self.seed << 5; - self.seed - } -} - -struct ThreadData { - parker: ThreadParker, - - // Key that this thread is sleeping on. This may change if the thread is - // requeued to a different key. - key: AtomicUsize, - - // Linked list of parked threads in a bucket - next_in_queue: Cell<*const ThreadData>, - - // UnparkToken passed to this thread when it is unparked - unpark_token: Cell, - - // ParkToken value set by the thread when it was parked - park_token: Cell, - - // Is the thread parked with a timeout? - parked_with_timeout: Cell, - - // Extra data for deadlock detection - #[cfg(feature = "deadlock_detection")] - deadlock_data: deadlock::DeadlockData, -} - -impl ThreadData { - fn new() -> ThreadData { - // Keep track of the total number of live ThreadData objects and resize - // the hash table accordingly. - let num_threads = NUM_THREADS.fetch_add(1, Ordering::Relaxed) + 1; - grow_hashtable(num_threads); - - ThreadData { - parker: ThreadParker::new(), - key: AtomicUsize::new(0), - next_in_queue: Cell::new(ptr::null()), - unpark_token: Cell::new(DEFAULT_UNPARK_TOKEN), - park_token: Cell::new(DEFAULT_PARK_TOKEN), - parked_with_timeout: Cell::new(false), - #[cfg(feature = "deadlock_detection")] - deadlock_data: deadlock::DeadlockData::new(), - } - } -} - -// Invokes the given closure with a reference to the current thread `ThreadData`. -#[inline(always)] -fn with_thread_data(f: impl FnOnce(&ThreadData) -> T) -> T { - // Unlike word_lock::ThreadData, parking_lot::ThreadData is always expensive - // to construct. Try to use a thread-local version if possible. Otherwise just - // create a ThreadData on the stack - let mut thread_data_storage = None; - thread_local!(static THREAD_DATA: ThreadData = ThreadData::new()); - let thread_data_ptr = THREAD_DATA - .try_with(|x| x as *const ThreadData) - .unwrap_or_else(|_| thread_data_storage.get_or_insert_with(ThreadData::new)); - - f(unsafe { &*thread_data_ptr }) -} - -impl Drop for ThreadData { - fn drop(&mut self) { - NUM_THREADS.fetch_sub(1, Ordering::Relaxed); - } -} - -/// Returns a reference to the latest hash table, creating one if it doesn't exist yet. -/// The reference is valid forever. However, the `HashTable` it references might become stale -/// at any point. Meaning it still exists, but it is not the instance in active use. -#[inline] -fn get_hashtable() -> &'static HashTable { - let table = HASHTABLE.load(Ordering::Acquire); - - // If there is no table, create one - if table.is_null() { - create_hashtable() - } else { - // SAFETY: when not null, `HASHTABLE` always points to a `HashTable` that is never freed. - unsafe { &*table } - } -} - -/// Returns a reference to the latest hash table, creating one if it doesn't exist yet. -/// The reference is valid forever. However, the `HashTable` it references might become stale -/// at any point. Meaning it still exists, but it is not the instance in active use. -#[cold] -fn create_hashtable() -> &'static HashTable { - let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null())); - - // If this fails then it means some other thread created the hash table first. - let table = match HASHTABLE.compare_exchange( - ptr::null_mut(), - new_table, - Ordering::AcqRel, - Ordering::Acquire, - ) { - Ok(_) => new_table, - Err(old_table) => { - // Free the table we created - // SAFETY: `new_table` is created from `Box::into_raw` above and only freed here. - unsafe { - let _ = Box::from_raw(new_table); - } - old_table - } - }; - // SAFETY: The `HashTable` behind `table` is never freed. It is either the table pointer we - // created here, or it is one loaded from `HASHTABLE`. - unsafe { &*table } -} - -// Grow the hash table so that it is big enough for the given number of threads. -// This isn't performance-critical since it is only done when a ThreadData is -// created, which only happens once per thread. -fn grow_hashtable(num_threads: usize) { - // Lock all buckets in the existing table and get a reference to it - let old_table = loop { - let table = get_hashtable(); - - // Check if we need to resize the existing table - if table.entries.len() >= LOAD_FACTOR * num_threads { - return; - } - - // Lock all buckets in the old table - for bucket in &table.entries[..] { - bucket.mutex.lock(); - } - - // Now check if our table is still the latest one. Another thread could - // have grown the hash table between us reading HASHTABLE and locking - // the buckets. - if HASHTABLE.load(Ordering::Relaxed) == table as *const _ as *mut _ { - break table; - } - - // Unlock buckets and try again - for bucket in &table.entries[..] { - // SAFETY: We hold the lock here, as required - unsafe { bucket.mutex.unlock() }; - } - }; - - // Create the new table - let mut new_table = HashTable::new(num_threads, old_table); - - // Move the entries from the old table to the new one - for bucket in &old_table.entries[..] { - // SAFETY: The park, unpark* and check_wait_graph_fast functions create only correct linked - // lists. All `ThreadData` instances in these lists will remain valid as long as they are - // present in the lists, meaning as long as their threads are parked. - unsafe { rehash_bucket_into(bucket, &mut new_table) }; - } - - // Publish the new table. No races are possible at this point because - // any other thread trying to grow the hash table is blocked on the bucket - // locks in the old table. - HASHTABLE.store(Box::into_raw(new_table), Ordering::Release); - - // Unlock all buckets in the old table - for bucket in &old_table.entries[..] { - // SAFETY: We hold the lock here, as required - unsafe { bucket.mutex.unlock() }; - } -} - -/// Iterate through all `ThreadData` objects in the bucket and insert them into the given table -/// in the bucket their key correspond to for this table. -/// -/// # Safety -/// -/// The given `bucket` must have a correctly constructed linked list under `queue_head`, containing -/// `ThreadData` instances that must stay valid at least as long as the given `table` is in use. -/// -/// The given `table` must only contain buckets with correctly constructed linked lists. -unsafe fn rehash_bucket_into(bucket: &'static Bucket, table: &mut HashTable) { - let mut current: *const ThreadData = bucket.queue_head.get(); - while !current.is_null() { - let next = (*current).next_in_queue.get(); - let hash = hash((*current).key.load(Ordering::Relaxed), table.hash_bits); - if table.entries[hash].queue_tail.get().is_null() { - table.entries[hash].queue_head.set(current); - } else { - (*table.entries[hash].queue_tail.get()) - .next_in_queue - .set(current); - } - table.entries[hash].queue_tail.set(current); - (*current).next_in_queue.set(ptr::null()); - current = next; - } -} - -// Hash function for addresses -#[cfg(target_pointer_width = "32")] -#[inline] -fn hash(key: usize, bits: u32) -> usize { - key.wrapping_mul(0x9E3779B9) >> (32 - bits) -} -#[cfg(target_pointer_width = "64")] -#[inline] -fn hash(key: usize, bits: u32) -> usize { - key.wrapping_mul(0x9E3779B97F4A7C15) >> (64 - bits) -} - -/// Locks the bucket for the given key and returns a reference to it. -/// The returned bucket must be unlocked again in order to not cause deadlocks. -#[inline] -fn lock_bucket(key: usize) -> &'static Bucket { - loop { - let hashtable = get_hashtable(); - - let hash = hash(key, hashtable.hash_bits); - let bucket = &hashtable.entries[hash]; - - // Lock the bucket - bucket.mutex.lock(); - - // If no other thread has rehashed the table before we grabbed the lock - // then we are good to go! The lock we grabbed prevents any rehashes. - if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ { - return bucket; - } - - // Unlock the bucket and try again - // SAFETY: We hold the lock here, as required - unsafe { bucket.mutex.unlock() }; - } -} - -/// Locks the bucket for the given key and returns a reference to it. But checks that the key -/// hasn't been changed in the meantime due to a requeue. -/// The returned bucket must be unlocked again in order to not cause deadlocks. -#[inline] -fn lock_bucket_checked(key: &AtomicUsize) -> (usize, &'static Bucket) { - loop { - let hashtable = get_hashtable(); - let current_key = key.load(Ordering::Relaxed); - - let hash = hash(current_key, hashtable.hash_bits); - let bucket = &hashtable.entries[hash]; - - // Lock the bucket - bucket.mutex.lock(); - - // Check that both the hash table and key are correct while the bucket - // is locked. Note that the key can't change once we locked the proper - // bucket for it, so we just keep trying until we have the correct key. - if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ - && key.load(Ordering::Relaxed) == current_key - { - return (current_key, bucket); - } - - // Unlock the bucket and try again - // SAFETY: We hold the lock here, as required - unsafe { bucket.mutex.unlock() }; - } -} - -/// Locks the two buckets for the given pair of keys and returns references to them. -/// The returned buckets must be unlocked again in order to not cause deadlocks. -/// -/// If both keys hash to the same value, both returned references will be to the same bucket. Be -/// careful to only unlock it once in this case, always use `unlock_bucket_pair`. -#[inline] -fn lock_bucket_pair(key1: usize, key2: usize) -> (&'static Bucket, &'static Bucket) { - loop { - let hashtable = get_hashtable(); - - let hash1 = hash(key1, hashtable.hash_bits); - let hash2 = hash(key2, hashtable.hash_bits); - - // Get the bucket at the lowest hash/index first - let bucket1 = if hash1 <= hash2 { - &hashtable.entries[hash1] - } else { - &hashtable.entries[hash2] - }; - - // Lock the first bucket - bucket1.mutex.lock(); - - // If no other thread has rehashed the table before we grabbed the lock - // then we are good to go! The lock we grabbed prevents any rehashes. - if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ { - // Now lock the second bucket and return the two buckets - if hash1 == hash2 { - return (bucket1, bucket1); - } else if hash1 < hash2 { - let bucket2 = &hashtable.entries[hash2]; - bucket2.mutex.lock(); - return (bucket1, bucket2); - } else { - let bucket2 = &hashtable.entries[hash1]; - bucket2.mutex.lock(); - return (bucket2, bucket1); - } - } - - // Unlock the bucket and try again - // SAFETY: We hold the lock here, as required - unsafe { bucket1.mutex.unlock() }; - } -} - -/// Unlock a pair of buckets -/// -/// # Safety -/// -/// Both buckets must be locked -#[inline] -unsafe fn unlock_bucket_pair(bucket1: &Bucket, bucket2: &Bucket) { - bucket1.mutex.unlock(); - if !ptr::eq(bucket1, bucket2) { - bucket2.mutex.unlock(); - } -} - -/// Result of a park operation. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum ParkResult { - /// We were unparked by another thread with the given token. - Unparked(UnparkToken), - - /// The validation callback returned false. - Invalid, - - /// The timeout expired. - TimedOut, -} - -impl ParkResult { - /// Returns true if we were unparked by another thread. - #[inline] - pub fn is_unparked(self) -> bool { - if let ParkResult::Unparked(_) = self { - true - } else { - false - } - } -} - -/// Result of an unpark operation. -#[derive(Copy, Clone, Default, Eq, PartialEq, Debug)] -pub struct UnparkResult { - /// The number of threads that were unparked. - pub unparked_threads: usize, - - /// The number of threads that were requeued. - pub requeued_threads: usize, - - /// Whether there are any threads remaining in the queue. This only returns - /// true if a thread was unparked. - pub have_more_threads: bool, - - /// This is set to true on average once every 0.5ms for any given key. It - /// should be used to switch to a fair unlocking mechanism for a particular - /// unlock. - pub be_fair: bool, - - /// Private field so new fields can be added without breakage. - _sealed: (), -} - -/// Operation that `unpark_requeue` should perform. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum RequeueOp { - /// Abort the operation without doing anything. - Abort, - - /// Unpark one thread and requeue the rest onto the target queue. - UnparkOneRequeueRest, - - /// Requeue all threads onto the target queue. - RequeueAll, - - /// Unpark one thread and leave the rest parked. No requeuing is done. - UnparkOne, - - /// Requeue one thread and leave the rest parked on the original queue. - RequeueOne, -} - -/// Operation that `unpark_filter` should perform for each thread. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum FilterOp { - /// Unpark the thread and continue scanning the list of parked threads. - Unpark, - - /// Don't unpark the thread and continue scanning the list of parked threads. - Skip, - - /// Don't unpark the thread and stop scanning the list of parked threads. - Stop, -} - -/// A value which is passed from an unparker to a parked thread. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub struct UnparkToken(pub usize); - -/// A value associated with a parked thread which can be used by `unpark_filter`. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub struct ParkToken(pub usize); - -/// A default unpark token to use. -pub const DEFAULT_UNPARK_TOKEN: UnparkToken = UnparkToken(0); - -/// A default park token to use. -pub const DEFAULT_PARK_TOKEN: ParkToken = ParkToken(0); - -/// Parks the current thread in the queue associated with the given key. -/// -/// The `validate` function is called while the queue is locked and can abort -/// the operation by returning false. If `validate` returns true then the -/// current thread is appended to the queue and the queue is unlocked. -/// -/// The `before_sleep` function is called after the queue is unlocked but before -/// the thread is put to sleep. The thread will then sleep until it is unparked -/// or the given timeout is reached. -/// -/// The `timed_out` function is also called while the queue is locked, but only -/// if the timeout was reached. It is passed the key of the queue it was in when -/// it timed out, which may be different from the original key if -/// `unpark_requeue` was called. It is also passed a bool which indicates -/// whether it was the last thread in the queue. -/// -/// # Safety -/// -/// You should only call this function with an address that you control, since -/// you could otherwise interfere with the operation of other synchronization -/// primitives. -/// -/// The `validate` and `timed_out` functions are called while the queue is -/// locked and must not panic or call into any function in `parking_lot`. -/// -/// The `before_sleep` function is called outside the queue lock and is allowed -/// to call `unpark_one`, `unpark_all`, `unpark_requeue` or `unpark_filter`, but -/// it is not allowed to call `park` or panic. -#[inline] -pub unsafe fn park( - key: usize, - validate: impl FnOnce() -> bool, - before_sleep: impl FnOnce(), - timed_out: impl FnOnce(usize, bool), - park_token: ParkToken, - timeout: Option, -) -> ParkResult { - // Grab our thread data, this also ensures that the hash table exists - with_thread_data(|thread_data| { - // Lock the bucket for the given key - let bucket = lock_bucket(key); - - // If the validation function fails, just return - if !validate() { - // SAFETY: We hold the lock here, as required - bucket.mutex.unlock(); - return ParkResult::Invalid; - } - - // Append our thread data to the queue and unlock the bucket - thread_data.parked_with_timeout.set(timeout.is_some()); - thread_data.next_in_queue.set(ptr::null()); - thread_data.key.store(key, Ordering::Relaxed); - thread_data.park_token.set(park_token); - thread_data.parker.prepare_park(); - if !bucket.queue_head.get().is_null() { - (*bucket.queue_tail.get()).next_in_queue.set(thread_data); - } else { - bucket.queue_head.set(thread_data); - } - bucket.queue_tail.set(thread_data); - // SAFETY: We hold the lock here, as required - bucket.mutex.unlock(); - - // Invoke the pre-sleep callback - before_sleep(); - - // Park our thread and determine whether we were woken up by an unpark - // or by our timeout. Note that this isn't precise: we can still be - // unparked since we are still in the queue. - let unparked = match timeout { - Some(timeout) => thread_data.parker.park_until(timeout), - None => { - thread_data.parker.park(); - // call deadlock detection on_unpark hook - deadlock::on_unpark(thread_data); - true - } - }; - - // If we were unparked, return now - if unparked { - return ParkResult::Unparked(thread_data.unpark_token.get()); - } - - // Lock our bucket again. Note that the hashtable may have been rehashed in - // the meantime. Our key may also have changed if we were requeued. - let (key, bucket) = lock_bucket_checked(&thread_data.key); - - // Now we need to check again if we were unparked or timed out. Unlike the - // last check this is precise because we hold the bucket lock. - if !thread_data.parker.timed_out() { - // SAFETY: We hold the lock here, as required - bucket.mutex.unlock(); - return ParkResult::Unparked(thread_data.unpark_token.get()); - } - - // We timed out, so we now need to remove our thread from the queue - let mut link = &bucket.queue_head; - let mut current = bucket.queue_head.get(); - let mut previous = ptr::null(); - let mut was_last_thread = true; - while !current.is_null() { - if current == thread_data { - let next = (*current).next_in_queue.get(); - link.set(next); - if bucket.queue_tail.get() == current { - bucket.queue_tail.set(previous); - } else { - // Scan the rest of the queue to see if there are any other - // entries with the given key. - let mut scan = next; - while !scan.is_null() { - if (*scan).key.load(Ordering::Relaxed) == key { - was_last_thread = false; - break; - } - scan = (*scan).next_in_queue.get(); - } - } - - // Callback to indicate that we timed out, and whether we were the - // last thread on the queue. - timed_out(key, was_last_thread); - break; - } else { - if (*current).key.load(Ordering::Relaxed) == key { - was_last_thread = false; - } - link = &(*current).next_in_queue; - previous = current; - current = link.get(); - } - } - - // There should be no way for our thread to have been removed from the queue - // if we timed out. - debug_assert!(!current.is_null()); - - // Unlock the bucket, we are done - // SAFETY: We hold the lock here, as required - bucket.mutex.unlock(); - ParkResult::TimedOut - }) -} - -/// Unparks one thread from the queue associated with the given key. -/// -/// The `callback` function is called while the queue is locked and before the -/// target thread is woken up. The `UnparkResult` argument to the function -/// indicates whether a thread was found in the queue and whether this was the -/// last thread in the queue. This value is also returned by `unpark_one`. -/// -/// The `callback` function should return an `UnparkToken` value which will be -/// passed to the thread that is unparked. If no thread is unparked then the -/// returned value is ignored. -/// -/// # Safety -/// -/// You should only call this function with an address that you control, since -/// you could otherwise interfere with the operation of other synchronization -/// primitives. -/// -/// The `callback` function is called while the queue is locked and must not -/// panic or call into any function in `parking_lot`. -/// -/// The `parking_lot` functions are not re-entrant and calling this method -/// from the context of an asynchronous signal handler may result in undefined -/// behavior, including corruption of internal state and/or deadlocks. -#[inline] -pub unsafe fn unpark_one( - key: usize, - callback: impl FnOnce(UnparkResult) -> UnparkToken, -) -> UnparkResult { - // Lock the bucket for the given key - let bucket = lock_bucket(key); - - // Find a thread with a matching key and remove it from the queue - let mut link = &bucket.queue_head; - let mut current = bucket.queue_head.get(); - let mut previous = ptr::null(); - let mut result = UnparkResult::default(); - while !current.is_null() { - if (*current).key.load(Ordering::Relaxed) == key { - // Remove the thread from the queue - let next = (*current).next_in_queue.get(); - link.set(next); - if bucket.queue_tail.get() == current { - bucket.queue_tail.set(previous); - } else { - // Scan the rest of the queue to see if there are any other - // entries with the given key. - let mut scan = next; - while !scan.is_null() { - if (*scan).key.load(Ordering::Relaxed) == key { - result.have_more_threads = true; - break; - } - scan = (*scan).next_in_queue.get(); - } - } - - // Invoke the callback before waking up the thread - result.unparked_threads = 1; - result.be_fair = (*bucket.fair_timeout.get()).should_timeout(); - let token = callback(result); - - // Set the token for the target thread - (*current).unpark_token.set(token); - - // This is a bit tricky: we first lock the ThreadParker to prevent - // the thread from exiting and freeing its ThreadData if its wait - // times out. Then we unlock the queue since we don't want to keep - // the queue locked while we perform a system call. Finally we wake - // up the parked thread. - let handle = (*current).parker.unpark_lock(); - // SAFETY: We hold the lock here, as required - bucket.mutex.unlock(); - handle.unpark(); - - return result; - } else { - link = &(*current).next_in_queue; - previous = current; - current = link.get(); - } - } - - // No threads with a matching key were found in the bucket - callback(result); - // SAFETY: We hold the lock here, as required - bucket.mutex.unlock(); - result -} - -/// Unparks all threads in the queue associated with the given key. -/// -/// The given `UnparkToken` is passed to all unparked threads. -/// -/// This function returns the number of threads that were unparked. -/// -/// # Safety -/// -/// You should only call this function with an address that you control, since -/// you could otherwise interfere with the operation of other synchronization -/// primitives. -/// -/// The `parking_lot` functions are not re-entrant and calling this method -/// from the context of an asynchronous signal handler may result in undefined -/// behavior, including corruption of internal state and/or deadlocks. -#[inline] -pub unsafe fn unpark_all(key: usize, unpark_token: UnparkToken) -> usize { - // Lock the bucket for the given key - let bucket = lock_bucket(key); - - // Remove all threads with the given key in the bucket - let mut link = &bucket.queue_head; - let mut current = bucket.queue_head.get(); - let mut previous = ptr::null(); - let mut threads = SmallVec::<[_; 8]>::new(); - while !current.is_null() { - if (*current).key.load(Ordering::Relaxed) == key { - // Remove the thread from the queue - let next = (*current).next_in_queue.get(); - link.set(next); - if bucket.queue_tail.get() == current { - bucket.queue_tail.set(previous); - } - - // Set the token for the target thread - (*current).unpark_token.set(unpark_token); - - // Don't wake up threads while holding the queue lock. See comment - // in unpark_one. For now just record which threads we need to wake - // up. - threads.push((*current).parker.unpark_lock()); - current = next; - } else { - link = &(*current).next_in_queue; - previous = current; - current = link.get(); - } - } - - // Unlock the bucket - // SAFETY: We hold the lock here, as required - bucket.mutex.unlock(); - - // Now that we are outside the lock, wake up all the threads that we removed - // from the queue. - let num_threads = threads.len(); - for handle in threads.into_iter() { - handle.unpark(); - } - - num_threads -} - -/// Removes all threads from the queue associated with `key_from`, optionally -/// unparks the first one and requeues the rest onto the queue associated with -/// `key_to`. -/// -/// The `validate` function is called while both queues are locked. Its return -/// value will determine which operation is performed, or whether the operation -/// should be aborted. See `RequeueOp` for details about the different possible -/// return values. -/// -/// The `callback` function is also called while both queues are locked. It is -/// passed the `RequeueOp` returned by `validate` and an `UnparkResult` -/// indicating whether a thread was unparked and whether there are threads still -/// parked in the new queue. This `UnparkResult` value is also returned by -/// `unpark_requeue`. -/// -/// The `callback` function should return an `UnparkToken` value which will be -/// passed to the thread that is unparked. If no thread is unparked then the -/// returned value is ignored. -/// -/// # Safety -/// -/// You should only call this function with an address that you control, since -/// you could otherwise interfere with the operation of other synchronization -/// primitives. -/// -/// The `validate` and `callback` functions are called while the queue is locked -/// and must not panic or call into any function in `parking_lot`. -#[inline] -pub unsafe fn unpark_requeue( - key_from: usize, - key_to: usize, - validate: impl FnOnce() -> RequeueOp, - callback: impl FnOnce(RequeueOp, UnparkResult) -> UnparkToken, -) -> UnparkResult { - // Lock the two buckets for the given key - let (bucket_from, bucket_to) = lock_bucket_pair(key_from, key_to); - - // If the validation function fails, just return - let mut result = UnparkResult::default(); - let op = validate(); - if op == RequeueOp::Abort { - // SAFETY: Both buckets are locked, as required. - unlock_bucket_pair(bucket_from, bucket_to); - return result; - } - - // Remove all threads with the given key in the source bucket - let mut link = &bucket_from.queue_head; - let mut current = bucket_from.queue_head.get(); - let mut previous = ptr::null(); - let mut requeue_threads: *const ThreadData = ptr::null(); - let mut requeue_threads_tail: *const ThreadData = ptr::null(); - let mut wakeup_thread = None; - while !current.is_null() { - if (*current).key.load(Ordering::Relaxed) == key_from { - // Remove the thread from the queue - let next = (*current).next_in_queue.get(); - link.set(next); - if bucket_from.queue_tail.get() == current { - bucket_from.queue_tail.set(previous); - } - - // Prepare the first thread for wakeup and requeue the rest. - if (op == RequeueOp::UnparkOneRequeueRest || op == RequeueOp::UnparkOne) - && wakeup_thread.is_none() - { - wakeup_thread = Some(current); - result.unparked_threads = 1; - } else { - if !requeue_threads.is_null() { - (*requeue_threads_tail).next_in_queue.set(current); - } else { - requeue_threads = current; - } - requeue_threads_tail = current; - (*current).key.store(key_to, Ordering::Relaxed); - result.requeued_threads += 1; - } - if op == RequeueOp::UnparkOne || op == RequeueOp::RequeueOne { - // Scan the rest of the queue to see if there are any other - // entries with the given key. - let mut scan = next; - while !scan.is_null() { - if (*scan).key.load(Ordering::Relaxed) == key_from { - result.have_more_threads = true; - break; - } - scan = (*scan).next_in_queue.get(); - } - break; - } - current = next; - } else { - link = &(*current).next_in_queue; - previous = current; - current = link.get(); - } - } - - // Add the requeued threads to the destination bucket - if !requeue_threads.is_null() { - (*requeue_threads_tail).next_in_queue.set(ptr::null()); - if !bucket_to.queue_head.get().is_null() { - (*bucket_to.queue_tail.get()) - .next_in_queue - .set(requeue_threads); - } else { - bucket_to.queue_head.set(requeue_threads); - } - bucket_to.queue_tail.set(requeue_threads_tail); - } - - // Invoke the callback before waking up the thread - if result.unparked_threads != 0 { - result.be_fair = (*bucket_from.fair_timeout.get()).should_timeout(); - } - let token = callback(op, result); - - // See comment in unpark_one for why we mess with the locking - if let Some(wakeup_thread) = wakeup_thread { - (*wakeup_thread).unpark_token.set(token); - let handle = (*wakeup_thread).parker.unpark_lock(); - // SAFETY: Both buckets are locked, as required. - unlock_bucket_pair(bucket_from, bucket_to); - handle.unpark(); - } else { - // SAFETY: Both buckets are locked, as required. - unlock_bucket_pair(bucket_from, bucket_to); - } - - result -} - -/// Unparks a number of threads from the front of the queue associated with -/// `key` depending on the results of a filter function which inspects the -/// `ParkToken` associated with each thread. -/// -/// The `filter` function is called for each thread in the queue or until -/// `FilterOp::Stop` is returned. This function is passed the `ParkToken` -/// associated with a particular thread, which is unparked if `FilterOp::Unpark` -/// is returned. -/// -/// The `callback` function is also called while both queues are locked. It is -/// passed an `UnparkResult` indicating the number of threads that were unparked -/// and whether there are still parked threads in the queue. This `UnparkResult` -/// value is also returned by `unpark_filter`. -/// -/// The `callback` function should return an `UnparkToken` value which will be -/// passed to all threads that are unparked. If no thread is unparked then the -/// returned value is ignored. -/// -/// # Safety -/// -/// You should only call this function with an address that you control, since -/// you could otherwise interfere with the operation of other synchronization -/// primitives. -/// -/// The `filter` and `callback` functions are called while the queue is locked -/// and must not panic or call into any function in `parking_lot`. -#[inline] -pub unsafe fn unpark_filter( - key: usize, - mut filter: impl FnMut(ParkToken) -> FilterOp, - callback: impl FnOnce(UnparkResult) -> UnparkToken, -) -> UnparkResult { - // Lock the bucket for the given key - let bucket = lock_bucket(key); - - // Go through the queue looking for threads with a matching key - let mut link = &bucket.queue_head; - let mut current = bucket.queue_head.get(); - let mut previous = ptr::null(); - let mut threads = SmallVec::<[_; 8]>::new(); - let mut result = UnparkResult::default(); - while !current.is_null() { - if (*current).key.load(Ordering::Relaxed) == key { - // Call the filter function with the thread's ParkToken - let next = (*current).next_in_queue.get(); - match filter((*current).park_token.get()) { - FilterOp::Unpark => { - // Remove the thread from the queue - link.set(next); - if bucket.queue_tail.get() == current { - bucket.queue_tail.set(previous); - } - - // Add the thread to our list of threads to unpark - threads.push((current, None)); - - current = next; - } - FilterOp::Skip => { - result.have_more_threads = true; - link = &(*current).next_in_queue; - previous = current; - current = link.get(); - } - FilterOp::Stop => { - result.have_more_threads = true; - break; - } - } - } else { - link = &(*current).next_in_queue; - previous = current; - current = link.get(); - } - } - - // Invoke the callback before waking up the threads - result.unparked_threads = threads.len(); - if result.unparked_threads != 0 { - result.be_fair = (*bucket.fair_timeout.get()).should_timeout(); - } - let token = callback(result); - - // Pass the token to all threads that are going to be unparked and prepare - // them for unparking. - for t in threads.iter_mut() { - (*t.0).unpark_token.set(token); - t.1 = Some((*t.0).parker.unpark_lock()); - } - - // SAFETY: We hold the lock here, as required - bucket.mutex.unlock(); - - // Now that we are outside the lock, wake up all the threads that we removed - // from the queue. - for (_, handle) in threads.into_iter() { - handle.unchecked_unwrap().unpark(); - } - - result -} - -/// \[Experimental\] Deadlock detection -/// -/// Enabled via the `deadlock_detection` feature flag. -pub mod deadlock { - #[cfg(feature = "deadlock_detection")] - use super::deadlock_impl; - - #[cfg(feature = "deadlock_detection")] - pub(super) use super::deadlock_impl::DeadlockData; - - /// Acquire a resource identified by key in the deadlock detector - /// Noop if deadlock_detection feature isn't enabled. - /// - /// # Safety - /// - /// Call after the resource is acquired - #[inline] - pub unsafe fn acquire_resource(_key: usize) { - #[cfg(feature = "deadlock_detection")] - deadlock_impl::acquire_resource(_key); - } - - /// Release a resource identified by key in the deadlock detector. - /// Noop if deadlock_detection feature isn't enabled. - /// - /// # Panics - /// - /// Panics if the resource was already released or wasn't acquired in this thread. - /// - /// # Safety - /// - /// Call before the resource is released - #[inline] - pub unsafe fn release_resource(_key: usize) { - #[cfg(feature = "deadlock_detection")] - deadlock_impl::release_resource(_key); - } - - /// Returns all deadlocks detected *since* the last call. - /// Each cycle consist of a vector of `DeadlockedThread`. - #[cfg(feature = "deadlock_detection")] - #[inline] - pub fn check_deadlock() -> Vec> { - deadlock_impl::check_deadlock() - } - - #[inline] - pub(super) unsafe fn on_unpark(_td: &super::ThreadData) { - #[cfg(feature = "deadlock_detection")] - deadlock_impl::on_unpark(_td); - } -} - -#[cfg(feature = "deadlock_detection")] -mod deadlock_impl { - use super::{get_hashtable, lock_bucket, with_thread_data, ThreadData, NUM_THREADS}; - use crate::thread_parker::{ThreadParkerT, UnparkHandleT}; - use crate::word_lock::WordLock; - use backtrace::Backtrace; - use petgraph; - use petgraph::graphmap::DiGraphMap; - use std::cell::{Cell, UnsafeCell}; - use std::collections::HashSet; - use std::sync::atomic::Ordering; - use std::sync::mpsc; - use thread_id; - - /// Representation of a deadlocked thread - pub struct DeadlockedThread { - thread_id: usize, - backtrace: Backtrace, - } - - impl DeadlockedThread { - /// The system thread id - pub fn thread_id(&self) -> usize { - self.thread_id - } - - /// The thread backtrace - pub fn backtrace(&self) -> &Backtrace { - &self.backtrace - } - } - - pub struct DeadlockData { - // Currently owned resources (keys) - resources: UnsafeCell>, - - // Set when there's a pending callstack request - deadlocked: Cell, - - // Sender used to report the backtrace - backtrace_sender: UnsafeCell>>, - - // System thread id - thread_id: usize, - } - - impl DeadlockData { - pub fn new() -> Self { - DeadlockData { - resources: UnsafeCell::new(Vec::new()), - deadlocked: Cell::new(false), - backtrace_sender: UnsafeCell::new(None), - thread_id: thread_id::get(), - } - } - } - - pub(super) unsafe fn on_unpark(td: &ThreadData) { - if td.deadlock_data.deadlocked.get() { - let sender = (*td.deadlock_data.backtrace_sender.get()).take().unwrap(); - sender - .send(DeadlockedThread { - thread_id: td.deadlock_data.thread_id, - backtrace: Backtrace::new(), - }) - .unwrap(); - // make sure to close this sender - drop(sender); - - // park until the end of the time - td.parker.prepare_park(); - td.parker.park(); - unreachable!("unparked deadlocked thread!"); - } - } - - pub unsafe fn acquire_resource(key: usize) { - with_thread_data(|thread_data| { - (*thread_data.deadlock_data.resources.get()).push(key); - }); - } - - pub unsafe fn release_resource(key: usize) { - with_thread_data(|thread_data| { - let resources = &mut (*thread_data.deadlock_data.resources.get()); - - // There is only one situation where we can fail to find the - // resource: we are currently running TLS destructors and our - // ThreadData has already been freed. There isn't much we can do - // about it at this point, so just ignore it. - if let Some(p) = resources.iter().rposition(|x| *x == key) { - resources.swap_remove(p); - } - }); - } - - pub fn check_deadlock() -> Vec> { - unsafe { - // fast pass - if check_wait_graph_fast() { - // double check - check_wait_graph_slow() - } else { - Vec::new() - } - } - } - - // Simple algorithm that builds a wait graph f the threads and the resources, - // then checks for the presence of cycles (deadlocks). - // This variant isn't precise as it doesn't lock the entire table before checking - unsafe fn check_wait_graph_fast() -> bool { - let table = get_hashtable(); - let thread_count = NUM_THREADS.load(Ordering::Relaxed); - let mut graph = DiGraphMap::::with_capacity(thread_count * 2, thread_count * 2); - - for b in &(*table).entries[..] { - b.mutex.lock(); - let mut current = b.queue_head.get(); - while !current.is_null() { - if !(*current).parked_with_timeout.get() - && !(*current).deadlock_data.deadlocked.get() - { - // .resources are waiting for their owner - for &resource in &(*(*current).deadlock_data.resources.get()) { - graph.add_edge(resource, current as usize, ()); - } - // owner waits for resource .key - graph.add_edge(current as usize, (*current).key.load(Ordering::Relaxed), ()); - } - current = (*current).next_in_queue.get(); - } - // SAFETY: We hold the lock here, as required - b.mutex.unlock(); - } - - petgraph::algo::is_cyclic_directed(&graph) - } - - #[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] - enum WaitGraphNode { - Thread(*const ThreadData), - Resource(usize), - } - - use self::WaitGraphNode::*; - - // Contrary to the _fast variant this locks the entries table before looking for cycles. - // Returns all detected thread wait cycles. - // Note that once a cycle is reported it's never reported again. - unsafe fn check_wait_graph_slow() -> Vec> { - static DEADLOCK_DETECTION_LOCK: WordLock = WordLock::new(); - DEADLOCK_DETECTION_LOCK.lock(); - - let mut table = get_hashtable(); - loop { - // Lock all buckets in the old table - for b in &table.entries[..] { - b.mutex.lock(); - } - - // Now check if our table is still the latest one. Another thread could - // have grown the hash table between us getting and locking the hash table. - let new_table = get_hashtable(); - if new_table as *const _ == table as *const _ { - break; - } - - // Unlock buckets and try again - for b in &table.entries[..] { - // SAFETY: We hold the lock here, as required - b.mutex.unlock(); - } - - table = new_table; - } - - let thread_count = NUM_THREADS.load(Ordering::Relaxed); - let mut graph = - DiGraphMap::::with_capacity(thread_count * 2, thread_count * 2); - - for b in &table.entries[..] { - let mut current = b.queue_head.get(); - while !current.is_null() { - if !(*current).parked_with_timeout.get() - && !(*current).deadlock_data.deadlocked.get() - { - // .resources are waiting for their owner - for &resource in &(*(*current).deadlock_data.resources.get()) { - graph.add_edge(Resource(resource), Thread(current), ()); - } - // owner waits for resource .key - graph.add_edge( - Thread(current), - Resource((*current).key.load(Ordering::Relaxed)), - (), - ); - } - current = (*current).next_in_queue.get(); - } - } - - for b in &table.entries[..] { - // SAFETY: We hold the lock here, as required - b.mutex.unlock(); - } - - // find cycles - let cycles = graph_cycles(&graph); - - let mut results = Vec::with_capacity(cycles.len()); - - for cycle in cycles { - let (sender, receiver) = mpsc::channel(); - for td in cycle { - let bucket = lock_bucket((*td).key.load(Ordering::Relaxed)); - (*td).deadlock_data.deadlocked.set(true); - *(*td).deadlock_data.backtrace_sender.get() = Some(sender.clone()); - let handle = (*td).parker.unpark_lock(); - // SAFETY: We hold the lock here, as required - bucket.mutex.unlock(); - // unpark the deadlocked thread! - // on unpark it'll notice the deadlocked flag and report back - handle.unpark(); - } - // make sure to drop our sender before collecting results - drop(sender); - results.push(receiver.iter().collect()); - } - - DEADLOCK_DETECTION_LOCK.unlock(); - - results - } - - // normalize a cycle to start with the "smallest" node - fn normalize_cycle(input: &[T]) -> Vec { - let min_pos = input - .iter() - .enumerate() - .min_by_key(|&(_, &t)| t) - .map(|(p, _)| p) - .unwrap_or(0); - input - .iter() - .cycle() - .skip(min_pos) - .take(input.len()) - .cloned() - .collect() - } - - // returns all thread cycles in the wait graph - fn graph_cycles(g: &DiGraphMap) -> Vec> { - use petgraph::visit::depth_first_search; - use petgraph::visit::DfsEvent; - use petgraph::visit::NodeIndexable; - - let mut cycles = HashSet::new(); - let mut path = Vec::with_capacity(g.node_bound()); - // start from threads to get the correct threads cycle - let threads = g - .nodes() - .filter(|n| if let &Thread(_) = n { true } else { false }); - - depth_first_search(g, threads, |e| match e { - DfsEvent::Discover(Thread(n), _) => path.push(n), - DfsEvent::Finish(Thread(_), _) => { - path.pop(); - } - DfsEvent::BackEdge(_, Thread(n)) => { - let from = path.iter().rposition(|&i| i == n).unwrap(); - cycles.insert(normalize_cycle(&path[from..])); - } - _ => (), - }); - - cycles.iter().cloned().collect() - } -} - -#[cfg(test)] -mod tests { - use super::{ThreadData, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; - use std::{ - ptr, - sync::{ - atomic::{AtomicIsize, AtomicPtr, AtomicUsize, Ordering}, - Arc, - }, - thread, - time::Duration, - }; - - /// Calls a closure for every `ThreadData` currently parked on a given key - fn for_each(key: usize, mut f: impl FnMut(&ThreadData)) { - let bucket = super::lock_bucket(key); - - let mut current: *const ThreadData = bucket.queue_head.get(); - while !current.is_null() { - let current_ref = unsafe { &*current }; - if current_ref.key.load(Ordering::Relaxed) == key { - f(current_ref); - } - current = current_ref.next_in_queue.get(); - } - - // SAFETY: We hold the lock here, as required - unsafe { bucket.mutex.unlock() }; - } - - macro_rules! test { - ( $( $name:ident( - repeats: $repeats:expr, - latches: $latches:expr, - delay: $delay:expr, - threads: $threads:expr, - single_unparks: $single_unparks:expr); - )* ) => { - $(#[test] - fn $name() { - let delay = Duration::from_micros($delay); - for _ in 0..$repeats { - run_parking_test($latches, delay, $threads, $single_unparks); - } - })* - }; - } - - test! { - unpark_all_one_fast( - repeats: 1000, latches: 1, delay: 0, threads: 1, single_unparks: 0 - ); - unpark_all_hundred_fast( - repeats: 100, latches: 1, delay: 0, threads: 100, single_unparks: 0 - ); - unpark_one_one_fast( - repeats: 1000, latches: 1, delay: 0, threads: 1, single_unparks: 1 - ); - unpark_one_hundred_fast( - repeats: 20, latches: 1, delay: 0, threads: 100, single_unparks: 100 - ); - unpark_one_fifty_then_fifty_all_fast( - repeats: 50, latches: 1, delay: 0, threads: 100, single_unparks: 50 - ); - unpark_all_one( - repeats: 100, latches: 1, delay: 10000, threads: 1, single_unparks: 0 - ); - unpark_all_hundred( - repeats: 100, latches: 1, delay: 10000, threads: 100, single_unparks: 0 - ); - unpark_one_one( - repeats: 10, latches: 1, delay: 10000, threads: 1, single_unparks: 1 - ); - unpark_one_fifty( - repeats: 1, latches: 1, delay: 10000, threads: 50, single_unparks: 50 - ); - unpark_one_fifty_then_fifty_all( - repeats: 2, latches: 1, delay: 10000, threads: 100, single_unparks: 50 - ); - hundred_unpark_all_one_fast( - repeats: 100, latches: 100, delay: 0, threads: 1, single_unparks: 0 - ); - hundred_unpark_all_one( - repeats: 1, latches: 100, delay: 10000, threads: 1, single_unparks: 0 - ); - } - - fn run_parking_test( - num_latches: usize, - delay: Duration, - num_threads: usize, - num_single_unparks: usize, - ) { - let mut tests = Vec::with_capacity(num_latches); - - for _ in 0..num_latches { - let test = Arc::new(SingleLatchTest::new(num_threads)); - let mut threads = Vec::with_capacity(num_threads); - for _ in 0..num_threads { - let test = test.clone(); - threads.push(thread::spawn(move || test.run())); - } - tests.push((test, threads)); - } - - for unpark_index in 0..num_single_unparks { - thread::sleep(delay); - for (test, _) in &tests { - test.unpark_one(unpark_index); - } - } - - for (test, threads) in tests { - test.finish(num_single_unparks); - for thread in threads { - thread.join().expect("Test thread panic"); - } - } - } - - struct SingleLatchTest { - semaphore: AtomicIsize, - num_awake: AtomicUsize, - /// Holds the pointer to the last *unprocessed* woken up thread. - last_awoken: AtomicPtr, - /// Total number of threads participating in this test. - num_threads: usize, - } - - impl SingleLatchTest { - pub fn new(num_threads: usize) -> Self { - Self { - // This implements a fair (FIFO) semaphore, and it starts out unavailable. - semaphore: AtomicIsize::new(0), - num_awake: AtomicUsize::new(0), - last_awoken: AtomicPtr::new(ptr::null_mut()), - num_threads, - } - } - - pub fn run(&self) { - // Get one slot from the semaphore - self.down(); - - // Report back to the test verification code that this thread woke up - let this_thread_ptr = super::with_thread_data(|t| t as *const _ as *mut _); - self.last_awoken.store(this_thread_ptr, Ordering::SeqCst); - self.num_awake.fetch_add(1, Ordering::SeqCst); - } - - pub fn unpark_one(&self, single_unpark_index: usize) { - // last_awoken should be null at all times except between self.up() and at the bottom - // of this method where it's reset to null again - assert!(self.last_awoken.load(Ordering::SeqCst).is_null()); - - let mut queue: Vec<*mut ThreadData> = Vec::with_capacity(self.num_threads); - for_each(self.semaphore_addr(), |thread_data| { - queue.push(thread_data as *const _ as *mut _); - }); - assert!(queue.len() <= self.num_threads - single_unpark_index); - - let num_awake_before_up = self.num_awake.load(Ordering::SeqCst); - - self.up(); - - // Wait for a parked thread to wake up and update num_awake + last_awoken. - while self.num_awake.load(Ordering::SeqCst) != num_awake_before_up + 1 { - thread::yield_now(); - } - - // At this point the other thread should have set last_awoken inside the run() method - let last_awoken = self.last_awoken.load(Ordering::SeqCst); - assert!(!last_awoken.is_null()); - if !queue.is_empty() && queue[0] != last_awoken { - panic!( - "Woke up wrong thread:\n\tqueue: {:?}\n\tlast awoken: {:?}", - queue, last_awoken - ); - } - self.last_awoken.store(ptr::null_mut(), Ordering::SeqCst); - } - - pub fn finish(&self, num_single_unparks: usize) { - // The amount of threads not unparked via unpark_one - let mut num_threads_left = self.num_threads.checked_sub(num_single_unparks).unwrap(); - - // Wake remaining threads up with unpark_all. Has to be in a loop, because there might - // still be threads that has not yet parked. - while num_threads_left > 0 { - let mut num_waiting_on_address = 0; - for_each(self.semaphore_addr(), |_thread_data| { - num_waiting_on_address += 1; - }); - assert!(num_waiting_on_address <= num_threads_left); - - let num_awake_before_unpark = self.num_awake.load(Ordering::SeqCst); - - let num_unparked = - unsafe { super::unpark_all(self.semaphore_addr(), DEFAULT_UNPARK_TOKEN) }; - assert!(num_unparked >= num_waiting_on_address); - assert!(num_unparked <= num_threads_left); - - // Wait for all unparked threads to wake up and update num_awake + last_awoken. - while self.num_awake.load(Ordering::SeqCst) - != num_awake_before_unpark + num_unparked - { - thread::yield_now() - } - - num_threads_left = num_threads_left.checked_sub(num_unparked).unwrap(); - } - // By now, all threads should have been woken up - assert_eq!(self.num_awake.load(Ordering::SeqCst), self.num_threads); - - // Make sure no thread is parked on our semaphore address - let mut num_waiting_on_address = 0; - for_each(self.semaphore_addr(), |_thread_data| { - num_waiting_on_address += 1; - }); - assert_eq!(num_waiting_on_address, 0); - } - - pub fn down(&self) { - let old_semaphore_value = self.semaphore.fetch_sub(1, Ordering::SeqCst); - - if old_semaphore_value > 0 { - // We acquired the semaphore. Done. - return; - } - - // We need to wait. - let validate = || true; - let before_sleep = || {}; - let timed_out = |_, _| {}; - unsafe { - super::park( - self.semaphore_addr(), - validate, - before_sleep, - timed_out, - DEFAULT_PARK_TOKEN, - None, - ); - } - } - - pub fn up(&self) { - let old_semaphore_value = self.semaphore.fetch_add(1, Ordering::SeqCst); - - // Check if anyone was waiting on the semaphore. If they were, then pass ownership to them. - if old_semaphore_value < 0 { - // We need to continue until we have actually unparked someone. It might be that - // the thread we want to pass ownership to has decremented the semaphore counter, - // but not yet parked. - loop { - match unsafe { - super::unpark_one(self.semaphore_addr(), |_| DEFAULT_UNPARK_TOKEN) - .unparked_threads - } { - 1 => break, - 0 => (), - i => panic!("Should not wake up {} threads", i), - } - } - } - } - - fn semaphore_addr(&self) -> usize { - &self.semaphore as *const _ as usize - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/spinwait.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/spinwait.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/spinwait.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/spinwait.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,74 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::thread_parker; -use core::hint::spin_loop; - -// Wastes some CPU time for the given number of iterations, -// using a hint to indicate to the CPU that we are spinning. -#[inline] -fn cpu_relax(iterations: u32) { - for _ in 0..iterations { - spin_loop() - } -} - -/// A counter used to perform exponential backoff in spin loops. -#[derive(Default)] -pub struct SpinWait { - counter: u32, -} - -impl SpinWait { - /// Creates a new `SpinWait`. - #[inline] - pub fn new() -> Self { - Self::default() - } - - /// Resets a `SpinWait` to its initial state. - #[inline] - pub fn reset(&mut self) { - self.counter = 0; - } - - /// Spins until the sleep threshold has been reached. - /// - /// This function returns whether the sleep threshold has been reached, at - /// which point further spinning has diminishing returns and the thread - /// should be parked instead. - /// - /// The spin strategy will initially use a CPU-bound loop but will fall back - /// to yielding the CPU to the OS after a few iterations. - #[inline] - pub fn spin(&mut self) -> bool { - if self.counter >= 10 { - return false; - } - self.counter += 1; - if self.counter <= 3 { - cpu_relax(1 << self.counter); - } else { - thread_parker::thread_yield(); - } - true - } - - /// Spins without yielding the thread to the OS. - /// - /// Instead, the backoff is simply capped at a maximum value. This can be - /// used to improve throughput in `compare_exchange` loops that have high - /// contention. - #[inline] - pub fn spin_no_yield(&mut self) { - self.counter += 1; - if self.counter > 10 { - self.counter = 10; - } - cpu_relax(1 << self.counter); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/generic.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/generic.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/generic.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/generic.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,79 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -//! A simple spin lock based thread parker. Used on platforms without better -//! parking facilities available. - -use core::hint::spin_loop; -use core::sync::atomic::{AtomicBool, Ordering}; -use std::thread; -use std::time::Instant; - -// Helper type for putting a thread to sleep until some other thread wakes it up -pub struct ThreadParker { - parked: AtomicBool, -} - -impl super::ThreadParkerT for ThreadParker { - type UnparkHandle = UnparkHandle; - - const IS_CHEAP_TO_CONSTRUCT: bool = true; - - #[inline] - fn new() -> ThreadParker { - ThreadParker { - parked: AtomicBool::new(false), - } - } - - #[inline] - unsafe fn prepare_park(&self) { - self.parked.store(true, Ordering::Relaxed); - } - - #[inline] - unsafe fn timed_out(&self) -> bool { - self.parked.load(Ordering::Relaxed) != false - } - - #[inline] - unsafe fn park(&self) { - while self.parked.load(Ordering::Acquire) != false { - spin_loop(); - } - } - - #[inline] - unsafe fn park_until(&self, timeout: Instant) -> bool { - while self.parked.load(Ordering::Acquire) != false { - if Instant::now() >= timeout { - return false; - } - spin_loop(); - } - true - } - - #[inline] - unsafe fn unpark_lock(&self) -> UnparkHandle { - // We don't need to lock anything, just clear the state - self.parked.store(false, Ordering::Release); - UnparkHandle(()) - } -} - -pub struct UnparkHandle(()); - -impl super::UnparkHandleT for UnparkHandle { - #[inline] - unsafe fn unpark(self) {} -} - -#[inline] -pub fn thread_yield() { - thread::yield_now(); -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/linux.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/linux.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/linux.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/linux.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,156 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use core::{ - ptr, - sync::atomic::{AtomicI32, Ordering}, -}; -use libc; -use std::thread; -use std::time::Instant; - -// x32 Linux uses a non-standard type for tv_nsec in timespec. -// See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 -#[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] -#[allow(non_camel_case_types)] -type tv_nsec_t = i64; -#[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] -#[allow(non_camel_case_types)] -type tv_nsec_t = libc::c_long; - -fn errno() -> libc::c_int { - #[cfg(target_os = "linux")] - unsafe { - *libc::__errno_location() - } - #[cfg(target_os = "android")] - unsafe { - *libc::__errno() - } -} - -// Helper type for putting a thread to sleep until some other thread wakes it up -pub struct ThreadParker { - futex: AtomicI32, -} - -impl super::ThreadParkerT for ThreadParker { - type UnparkHandle = UnparkHandle; - - const IS_CHEAP_TO_CONSTRUCT: bool = true; - - #[inline] - fn new() -> ThreadParker { - ThreadParker { - futex: AtomicI32::new(0), - } - } - - #[inline] - unsafe fn prepare_park(&self) { - self.futex.store(1, Ordering::Relaxed); - } - - #[inline] - unsafe fn timed_out(&self) -> bool { - self.futex.load(Ordering::Relaxed) != 0 - } - - #[inline] - unsafe fn park(&self) { - while self.futex.load(Ordering::Acquire) != 0 { - self.futex_wait(None); - } - } - - #[inline] - unsafe fn park_until(&self, timeout: Instant) -> bool { - while self.futex.load(Ordering::Acquire) != 0 { - let now = Instant::now(); - if timeout <= now { - return false; - } - let diff = timeout - now; - if diff.as_secs() as libc::time_t as u64 != diff.as_secs() { - // Timeout overflowed, just sleep indefinitely - self.park(); - return true; - } - // SAFETY: libc::timespec is zero initializable. - let mut ts: libc::timespec = std::mem::zeroed(); - ts.tv_sec = diff.as_secs() as libc::time_t; - ts.tv_nsec = diff.subsec_nanos() as tv_nsec_t; - self.futex_wait(Some(ts)); - } - true - } - - // Locks the parker to prevent the target thread from exiting. This is - // necessary to ensure that thread-local ThreadData objects remain valid. - // This should be called while holding the queue lock. - #[inline] - unsafe fn unpark_lock(&self) -> UnparkHandle { - // We don't need to lock anything, just clear the state - self.futex.store(0, Ordering::Release); - - UnparkHandle { futex: &self.futex } - } -} - -impl ThreadParker { - #[inline] - fn futex_wait(&self, ts: Option) { - let ts_ptr = ts - .as_ref() - .map(|ts_ref| ts_ref as *const _) - .unwrap_or(ptr::null()); - let r = unsafe { - libc::syscall( - libc::SYS_futex, - &self.futex, - libc::FUTEX_WAIT | libc::FUTEX_PRIVATE_FLAG, - 1, - ts_ptr, - ) - }; - debug_assert!(r == 0 || r == -1); - if r == -1 { - debug_assert!( - errno() == libc::EINTR - || errno() == libc::EAGAIN - || (ts.is_some() && errno() == libc::ETIMEDOUT) - ); - } - } -} - -pub struct UnparkHandle { - futex: *const AtomicI32, -} - -impl super::UnparkHandleT for UnparkHandle { - #[inline] - unsafe fn unpark(self) { - // The thread data may have been freed at this point, but it doesn't - // matter since the syscall will just return EFAULT in that case. - let r = libc::syscall( - libc::SYS_futex, - self.futex, - libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG, - 1, - ); - debug_assert!(r == 0 || r == 1 || r == -1); - if r == -1 { - debug_assert_eq!(errno(), libc::EFAULT); - } - } -} - -#[inline] -pub fn thread_yield() { - thread::yield_now(); -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/mod.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/mod.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,85 +0,0 @@ -use cfg_if::cfg_if; -use std::time::Instant; - -/// Trait for the platform thread parker implementation. -/// -/// All unsafe methods are unsafe because the Unix thread parker is based on -/// pthread mutexes and condvars. Those primitives must not be moved and used -/// from any other memory address than the one they were located at when they -/// were initialized. As such, it's UB to call any unsafe method on -/// `ThreadParkerT` if the implementing instance has moved since the last -/// call to any of the unsafe methods. -pub trait ThreadParkerT { - type UnparkHandle: UnparkHandleT; - - const IS_CHEAP_TO_CONSTRUCT: bool; - - fn new() -> Self; - - /// Prepares the parker. This should be called before adding it to the queue. - unsafe fn prepare_park(&self); - - /// Checks if the park timed out. This should be called while holding the - /// queue lock after park_until has returned false. - unsafe fn timed_out(&self) -> bool; - - /// Parks the thread until it is unparked. This should be called after it has - /// been added to the queue, after unlocking the queue. - unsafe fn park(&self); - - /// Parks the thread until it is unparked or the timeout is reached. This - /// should be called after it has been added to the queue, after unlocking - /// the queue. Returns true if we were unparked and false if we timed out. - unsafe fn park_until(&self, timeout: Instant) -> bool; - - /// Locks the parker to prevent the target thread from exiting. This is - /// necessary to ensure that thread-local ThreadData objects remain valid. - /// This should be called while holding the queue lock. - unsafe fn unpark_lock(&self) -> Self::UnparkHandle; -} - -/// Handle for a thread that is about to be unparked. We need to mark the thread -/// as unparked while holding the queue lock, but we delay the actual unparking -/// until after the queue lock is released. -pub trait UnparkHandleT { - /// Wakes up the parked thread. This should be called after the queue lock is - /// released to avoid blocking the queue for too long. - /// - /// This method is unsafe for the same reason as the unsafe methods in - /// `ThreadParkerT`. - unsafe fn unpark(self); -} - -cfg_if! { - if #[cfg(any(target_os = "linux", target_os = "android"))] { - #[path = "linux.rs"] - mod imp; - } else if #[cfg(unix)] { - #[path = "unix.rs"] - mod imp; - } else if #[cfg(windows)] { - #[path = "windows/mod.rs"] - mod imp; - } else if #[cfg(target_os = "redox")] { - #[path = "redox.rs"] - mod imp; - } else if #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] { - #[path = "sgx.rs"] - mod imp; - } else if #[cfg(all( - feature = "nightly", - target_family = "wasm", - target_feature = "atomics" - ))] { - #[path = "wasm_atomic.rs"] - mod imp; - } else if #[cfg(target_family = "wasm")] { - #[path = "wasm.rs"] - mod imp; - } else { - #[path = "generic.rs"] - mod imp; - } -} - -pub use self::imp::{thread_yield, ThreadParker, UnparkHandle}; diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/redox.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/redox.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/redox.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/redox.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,139 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use core::{ - ptr, - sync::atomic::{AtomicI32, Ordering}, -}; -use std::thread; -use std::time::Instant; -use syscall::{ - call::futex, - data::TimeSpec, - error::{Error, EAGAIN, EFAULT, EINTR, ETIMEDOUT}, - flag::{FUTEX_WAIT, FUTEX_WAKE}, -}; - -const UNPARKED: i32 = 0; -const PARKED: i32 = 1; - -// Helper type for putting a thread to sleep until some other thread wakes it up -pub struct ThreadParker { - futex: AtomicI32, -} - -impl super::ThreadParkerT for ThreadParker { - type UnparkHandle = UnparkHandle; - - const IS_CHEAP_TO_CONSTRUCT: bool = true; - - #[inline] - fn new() -> ThreadParker { - ThreadParker { - futex: AtomicI32::new(UNPARKED), - } - } - - #[inline] - unsafe fn prepare_park(&self) { - self.futex.store(PARKED, Ordering::Relaxed); - } - - #[inline] - unsafe fn timed_out(&self) -> bool { - self.futex.load(Ordering::Relaxed) != UNPARKED - } - - #[inline] - unsafe fn park(&self) { - while self.futex.load(Ordering::Acquire) != UNPARKED { - self.futex_wait(None); - } - } - - #[inline] - unsafe fn park_until(&self, timeout: Instant) -> bool { - while self.futex.load(Ordering::Acquire) != UNPARKED { - let now = Instant::now(); - if timeout <= now { - return false; - } - let diff = timeout - now; - if diff.as_secs() > i64::max_value() as u64 { - // Timeout overflowed, just sleep indefinitely - self.park(); - return true; - } - let ts = TimeSpec { - tv_sec: diff.as_secs() as i64, - tv_nsec: diff.subsec_nanos() as i32, - }; - self.futex_wait(Some(ts)); - } - true - } - - #[inline] - unsafe fn unpark_lock(&self) -> UnparkHandle { - // We don't need to lock anything, just clear the state - self.futex.store(UNPARKED, Ordering::Release); - - UnparkHandle { futex: self.ptr() } - } -} - -impl ThreadParker { - #[inline] - fn futex_wait(&self, ts: Option) { - let ts_ptr = ts - .as_ref() - .map(|ts_ref| ts_ref as *const _) - .unwrap_or(ptr::null()); - let r = unsafe { - futex( - self.ptr(), - FUTEX_WAIT, - PARKED, - ts_ptr as usize, - ptr::null_mut(), - ) - }; - match r { - Ok(r) => debug_assert_eq!(r, 0), - Err(Error { errno }) => { - debug_assert!(errno == EINTR || errno == EAGAIN || errno == ETIMEDOUT); - } - } - } - - #[inline] - fn ptr(&self) -> *mut i32 { - &self.futex as *const AtomicI32 as *mut i32 - } -} - -pub struct UnparkHandle { - futex: *mut i32, -} - -impl super::UnparkHandleT for UnparkHandle { - #[inline] - unsafe fn unpark(self) { - // The thread data may have been freed at this point, but it doesn't - // matter since the syscall will just return EFAULT in that case. - let r = futex(self.futex, FUTEX_WAKE, PARKED, 0, ptr::null_mut()); - match r { - Ok(num_woken) => debug_assert!(num_woken == 0 || num_woken == 1), - Err(Error { errno }) => debug_assert_eq!(errno, EFAULT), - } - } -} - -#[inline] -pub fn thread_yield() { - thread::yield_now(); -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/sgx.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/sgx.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/sgx.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/sgx.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,94 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use core::sync::atomic::{AtomicBool, Ordering}; -use std::time::Instant; -use std::{ - io, - os::fortanix_sgx::{ - thread::current as current_tcs, - usercalls::{ - self, - raw::{Tcs, EV_UNPARK, WAIT_INDEFINITE}, - }, - }, - thread, -}; - -// Helper type for putting a thread to sleep until some other thread wakes it up -pub struct ThreadParker { - parked: AtomicBool, - tcs: Tcs, -} - -impl super::ThreadParkerT for ThreadParker { - type UnparkHandle = UnparkHandle; - - const IS_CHEAP_TO_CONSTRUCT: bool = true; - - #[inline] - fn new() -> ThreadParker { - ThreadParker { - parked: AtomicBool::new(false), - tcs: current_tcs(), - } - } - - #[inline] - unsafe fn prepare_park(&self) { - self.parked.store(true, Ordering::Relaxed); - } - - #[inline] - unsafe fn timed_out(&self) -> bool { - self.parked.load(Ordering::Relaxed) - } - - #[inline] - unsafe fn park(&self) { - while self.parked.load(Ordering::Acquire) { - let result = usercalls::wait(EV_UNPARK, WAIT_INDEFINITE); - debug_assert_eq!(result.expect("wait returned error") & EV_UNPARK, EV_UNPARK); - } - } - - #[inline] - unsafe fn park_until(&self, _timeout: Instant) -> bool { - // FIXME: https://github.com/fortanix/rust-sgx/issues/31 - panic!("timeout not supported in SGX"); - } - - #[inline] - unsafe fn unpark_lock(&self) -> UnparkHandle { - // We don't need to lock anything, just clear the state - self.parked.store(false, Ordering::Release); - UnparkHandle(self.tcs) - } -} - -pub struct UnparkHandle(Tcs); - -impl super::UnparkHandleT for UnparkHandle { - #[inline] - unsafe fn unpark(self) { - let result = usercalls::send(EV_UNPARK, Some(self.0)); - if cfg!(debug_assertions) { - if let Err(error) = result { - // `InvalidInput` may be returned if the thread we send to has - // already been unparked and exited. - if error.kind() != io::ErrorKind::InvalidInput { - panic!("send returned an unexpected error: {:?}", error); - } - } - } - } -} - -#[inline] -pub fn thread_yield() { - thread::yield_now(); -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/unix.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/unix.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/unix.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/unix.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,256 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -#[cfg(any(target_os = "macos", target_os = "tvos", target_os = "ios", target_os = "watchos"))] -use core::ptr; -use core::{ - cell::{Cell, UnsafeCell}, - mem::MaybeUninit, -}; -use libc; -use std::time::Instant; -use std::{thread, time::Duration}; - -// x32 Linux uses a non-standard type for tv_nsec in timespec. -// See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 -#[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] -#[allow(non_camel_case_types)] -type tv_nsec_t = i64; -#[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] -#[allow(non_camel_case_types)] -type tv_nsec_t = libc::c_long; - -// Helper type for putting a thread to sleep until some other thread wakes it up -pub struct ThreadParker { - should_park: Cell, - mutex: UnsafeCell, - condvar: UnsafeCell, - initialized: Cell, -} - -impl super::ThreadParkerT for ThreadParker { - type UnparkHandle = UnparkHandle; - - const IS_CHEAP_TO_CONSTRUCT: bool = false; - - #[inline] - fn new() -> ThreadParker { - ThreadParker { - should_park: Cell::new(false), - mutex: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER), - condvar: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER), - initialized: Cell::new(false), - } - } - - #[inline] - unsafe fn prepare_park(&self) { - self.should_park.set(true); - if !self.initialized.get() { - self.init(); - self.initialized.set(true); - } - } - - #[inline] - unsafe fn timed_out(&self) -> bool { - // We need to grab the mutex here because another thread may be - // concurrently executing UnparkHandle::unpark, which is done without - // holding the queue lock. - let r = libc::pthread_mutex_lock(self.mutex.get()); - debug_assert_eq!(r, 0); - let should_park = self.should_park.get(); - let r = libc::pthread_mutex_unlock(self.mutex.get()); - debug_assert_eq!(r, 0); - should_park - } - - #[inline] - unsafe fn park(&self) { - let r = libc::pthread_mutex_lock(self.mutex.get()); - debug_assert_eq!(r, 0); - while self.should_park.get() { - let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get()); - debug_assert_eq!(r, 0); - } - let r = libc::pthread_mutex_unlock(self.mutex.get()); - debug_assert_eq!(r, 0); - } - - #[inline] - unsafe fn park_until(&self, timeout: Instant) -> bool { - let r = libc::pthread_mutex_lock(self.mutex.get()); - debug_assert_eq!(r, 0); - while self.should_park.get() { - let now = Instant::now(); - if timeout <= now { - let r = libc::pthread_mutex_unlock(self.mutex.get()); - debug_assert_eq!(r, 0); - return false; - } - - if let Some(ts) = timeout_to_timespec(timeout - now) { - let r = libc::pthread_cond_timedwait(self.condvar.get(), self.mutex.get(), &ts); - if ts.tv_sec < 0 { - // On some systems, negative timeouts will return EINVAL. In - // that case we won't sleep and will just busy loop instead, - // which is the best we can do. - debug_assert!(r == 0 || r == libc::ETIMEDOUT || r == libc::EINVAL); - } else { - debug_assert!(r == 0 || r == libc::ETIMEDOUT); - } - } else { - // Timeout calculation overflowed, just sleep indefinitely - let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get()); - debug_assert_eq!(r, 0); - } - } - let r = libc::pthread_mutex_unlock(self.mutex.get()); - debug_assert_eq!(r, 0); - true - } - - #[inline] - unsafe fn unpark_lock(&self) -> UnparkHandle { - let r = libc::pthread_mutex_lock(self.mutex.get()); - debug_assert_eq!(r, 0); - - UnparkHandle { - thread_parker: self, - } - } -} - -impl ThreadParker { - /// Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME. - #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - target_os = "android", - target_os = "espidf" - ))] - #[inline] - unsafe fn init(&self) {} - - /// Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME. - #[cfg(not(any( - target_os = "macos", - target_os = "ios", - target_os = "tvos", - target_os = "watchos", - target_os = "android", - target_os = "espidf" - )))] - #[inline] - unsafe fn init(&self) { - let mut attr = MaybeUninit::::uninit(); - let r = libc::pthread_condattr_init(attr.as_mut_ptr()); - debug_assert_eq!(r, 0); - let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC); - debug_assert_eq!(r, 0); - let r = libc::pthread_cond_init(self.condvar.get(), attr.as_ptr()); - debug_assert_eq!(r, 0); - let r = libc::pthread_condattr_destroy(attr.as_mut_ptr()); - debug_assert_eq!(r, 0); - } -} - -impl Drop for ThreadParker { - #[inline] - fn drop(&mut self) { - // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a - // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER. - // Once it is used (locked/unlocked) or pthread_mutex_init() is called, - // this behaviour no longer occurs. The same applies to condvars. - unsafe { - let r = libc::pthread_mutex_destroy(self.mutex.get()); - debug_assert!(r == 0 || r == libc::EINVAL); - let r = libc::pthread_cond_destroy(self.condvar.get()); - debug_assert!(r == 0 || r == libc::EINVAL); - } - } -} - -pub struct UnparkHandle { - thread_parker: *const ThreadParker, -} - -impl super::UnparkHandleT for UnparkHandle { - #[inline] - unsafe fn unpark(self) { - (*self.thread_parker).should_park.set(false); - - // We notify while holding the lock here to avoid races with the target - // thread. In particular, the thread could exit after we unlock the - // mutex, which would make the condvar access invalid memory. - let r = libc::pthread_cond_signal((*self.thread_parker).condvar.get()); - debug_assert_eq!(r, 0); - let r = libc::pthread_mutex_unlock((*self.thread_parker).mutex.get()); - debug_assert_eq!(r, 0); - } -} - -// Returns the current time on the clock used by pthread_cond_t as a timespec. -#[cfg(any(target_os = "macos", target_os = "ios", target_os = "tvos", target_os = "watchos"))] -#[inline] -fn timespec_now() -> libc::timespec { - let mut now = MaybeUninit::::uninit(); - let r = unsafe { libc::gettimeofday(now.as_mut_ptr(), ptr::null_mut()) }; - debug_assert_eq!(r, 0); - // SAFETY: We know `libc::gettimeofday` has initialized the value. - let now = unsafe { now.assume_init() }; - libc::timespec { - tv_sec: now.tv_sec, - tv_nsec: now.tv_usec as tv_nsec_t * 1000, - } -} -#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "tvos", target_os = "watchos")))] -#[inline] -fn timespec_now() -> libc::timespec { - let mut now = MaybeUninit::::uninit(); - let clock = if cfg!(target_os = "android") { - // Android doesn't support pthread_condattr_setclock, so we need to - // specify the timeout in CLOCK_REALTIME. - libc::CLOCK_REALTIME - } else { - libc::CLOCK_MONOTONIC - }; - let r = unsafe { libc::clock_gettime(clock, now.as_mut_ptr()) }; - debug_assert_eq!(r, 0); - // SAFETY: We know `libc::clock_gettime` has initialized the value. - unsafe { now.assume_init() } -} - -// Converts a relative timeout into an absolute timeout in the clock used by -// pthread_cond_t. -#[inline] -fn timeout_to_timespec(timeout: Duration) -> Option { - // Handle overflows early on - if timeout.as_secs() > libc::time_t::max_value() as u64 { - return None; - } - - let now = timespec_now(); - let mut nsec = now.tv_nsec + timeout.subsec_nanos() as tv_nsec_t; - let mut sec = now.tv_sec.checked_add(timeout.as_secs() as libc::time_t); - if nsec >= 1_000_000_000 { - nsec -= 1_000_000_000; - sec = sec.and_then(|sec| sec.checked_add(1)); - } - - sec.map(|sec| libc::timespec { - tv_nsec: nsec, - tv_sec: sec, - }) -} - -#[inline] -pub fn thread_yield() { - thread::yield_now(); -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/wasm_atomic.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/wasm_atomic.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/wasm_atomic.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/wasm_atomic.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,97 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use core::{ - arch::wasm32, - sync::atomic::{AtomicI32, Ordering}, -}; -use std::time::{Duration, Instant}; -use std::{convert::TryFrom, thread}; - -// Helper type for putting a thread to sleep until some other thread wakes it up -pub struct ThreadParker { - parked: AtomicI32, -} - -const UNPARKED: i32 = 0; -const PARKED: i32 = 1; - -impl super::ThreadParkerT for ThreadParker { - type UnparkHandle = UnparkHandle; - - const IS_CHEAP_TO_CONSTRUCT: bool = true; - - #[inline] - fn new() -> ThreadParker { - ThreadParker { - parked: AtomicI32::new(UNPARKED), - } - } - - #[inline] - unsafe fn prepare_park(&self) { - self.parked.store(PARKED, Ordering::Relaxed); - } - - #[inline] - unsafe fn timed_out(&self) -> bool { - self.parked.load(Ordering::Relaxed) == PARKED - } - - #[inline] - unsafe fn park(&self) { - while self.parked.load(Ordering::Acquire) == PARKED { - let r = wasm32::memory_atomic_wait32(self.ptr(), PARKED, -1); - // we should have either woken up (0) or got a not-equal due to a - // race (1). We should never time out (2) - debug_assert!(r == 0 || r == 1); - } - } - - #[inline] - unsafe fn park_until(&self, timeout: Instant) -> bool { - while self.parked.load(Ordering::Acquire) == PARKED { - if let Some(left) = timeout.checked_duration_since(Instant::now()) { - let nanos_left = i64::try_from(left.as_nanos()).unwrap_or(i64::max_value()); - let r = wasm32::memory_atomic_wait32(self.ptr(), PARKED, nanos_left); - debug_assert!(r == 0 || r == 1 || r == 2); - } else { - return false; - } - } - true - } - - #[inline] - unsafe fn unpark_lock(&self) -> UnparkHandle { - // We don't need to lock anything, just clear the state - self.parked.store(UNPARKED, Ordering::Release); - UnparkHandle(self.ptr()) - } -} - -impl ThreadParker { - #[inline] - fn ptr(&self) -> *mut i32 { - &self.parked as *const AtomicI32 as *mut i32 - } -} - -pub struct UnparkHandle(*mut i32); - -impl super::UnparkHandleT for UnparkHandle { - #[inline] - unsafe fn unpark(self) { - let num_notified = wasm32::memory_atomic_notify(self.0 as *mut i32, 1); - debug_assert!(num_notified == 0 || num_notified == 1); - } -} - -#[inline] -pub fn thread_yield() { - thread::yield_now(); -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/wasm.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/wasm.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/wasm.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/wasm.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,54 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -//! The wasm platform can't park when atomic support is not available. -//! So this ThreadParker just panics on any attempt to park. - -use std::thread; -use std::time::Instant; - -pub struct ThreadParker(()); - -impl super::ThreadParkerT for ThreadParker { - type UnparkHandle = UnparkHandle; - - const IS_CHEAP_TO_CONSTRUCT: bool = true; - - fn new() -> ThreadParker { - ThreadParker(()) - } - - unsafe fn prepare_park(&self) { - panic!("Parking not supported on this platform"); - } - - unsafe fn timed_out(&self) -> bool { - panic!("Parking not supported on this platform"); - } - - unsafe fn park(&self) { - panic!("Parking not supported on this platform"); - } - - unsafe fn park_until(&self, _timeout: Instant) -> bool { - panic!("Parking not supported on this platform"); - } - - unsafe fn unpark_lock(&self) -> UnparkHandle { - panic!("Parking not supported on this platform"); - } -} - -pub struct UnparkHandle(()); - -impl super::UnparkHandleT for UnparkHandle { - unsafe fn unpark(self) {} -} - -pub fn thread_yield() { - thread::yield_now(); -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/windows/bindings.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/windows/bindings.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/windows/bindings.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/windows/bindings.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -//! Manual bindings to the win32 API to avoid dependencies on windows-sys or winapi -//! as these bindings will **never** change and parking_lot_core is a foundational -//! dependency for the Rust ecosystem, so the dependencies used by it have an -//! outsize affect - -pub const INFINITE: u32 = 4294967295; -pub const ERROR_TIMEOUT: u32 = 1460; -pub const GENERIC_READ: u32 = 2147483648; -pub const GENERIC_WRITE: u32 = 1073741824; -pub const STATUS_SUCCESS: i32 = 0; -pub const STATUS_TIMEOUT: i32 = 258; - -pub type HANDLE = isize; -pub type HINSTANCE = isize; -pub type BOOL = i32; -pub type BOOLEAN = u8; -pub type NTSTATUS = i32; -pub type FARPROC = Option isize>; -pub type WaitOnAddress = unsafe extern "system" fn( - Address: *const std::ffi::c_void, - CompareAddress: *const std::ffi::c_void, - AddressSize: usize, - dwMilliseconds: u32, -) -> BOOL; -pub type WakeByAddressSingle = unsafe extern "system" fn(Address: *const std::ffi::c_void); - -windows_targets::link!("kernel32.dll" "system" fn GetLastError() -> u32); -windows_targets::link!("kernel32.dll" "system" fn CloseHandle(hObject: HANDLE) -> BOOL); -windows_targets::link!("kernel32.dll" "system" fn GetModuleHandleA(lpModuleName: *const u8) -> HINSTANCE); -windows_targets::link!("kernel32.dll" "system" fn GetProcAddress(hModule: HINSTANCE, lpProcName: *const u8) -> FARPROC); -windows_targets::link!("kernel32.dll" "system" fn Sleep(dwMilliseconds: u32) -> ()); diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,194 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use core::{ - ffi, - mem::{self, MaybeUninit}, - ptr, -}; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::time::Instant; - -const STATE_UNPARKED: usize = 0; -const STATE_PARKED: usize = 1; -const STATE_TIMED_OUT: usize = 2; - -use super::bindings::*; - -#[allow(non_snake_case)] -pub struct KeyedEvent { - handle: HANDLE, - NtReleaseKeyedEvent: extern "system" fn( - EventHandle: HANDLE, - Key: *mut ffi::c_void, - Alertable: BOOLEAN, - Timeout: *mut i64, - ) -> NTSTATUS, - NtWaitForKeyedEvent: extern "system" fn( - EventHandle: HANDLE, - Key: *mut ffi::c_void, - Alertable: BOOLEAN, - Timeout: *mut i64, - ) -> NTSTATUS, -} - -impl KeyedEvent { - #[inline] - unsafe fn wait_for(&self, key: *mut ffi::c_void, timeout: *mut i64) -> NTSTATUS { - (self.NtWaitForKeyedEvent)(self.handle, key, false.into(), timeout) - } - - #[inline] - unsafe fn release(&self, key: *mut ffi::c_void) -> NTSTATUS { - (self.NtReleaseKeyedEvent)(self.handle, key, false.into(), ptr::null_mut()) - } - - #[allow(non_snake_case)] - pub fn create() -> Option { - let ntdll = unsafe { GetModuleHandleA(b"ntdll.dll\0".as_ptr()) }; - if ntdll == 0 { - return None; - } - - let NtCreateKeyedEvent = - unsafe { GetProcAddress(ntdll, b"NtCreateKeyedEvent\0".as_ptr())? }; - let NtReleaseKeyedEvent = - unsafe { GetProcAddress(ntdll, b"NtReleaseKeyedEvent\0".as_ptr())? }; - let NtWaitForKeyedEvent = - unsafe { GetProcAddress(ntdll, b"NtWaitForKeyedEvent\0".as_ptr())? }; - - let NtCreateKeyedEvent: extern "system" fn( - KeyedEventHandle: *mut HANDLE, - DesiredAccess: u32, - ObjectAttributes: *mut ffi::c_void, - Flags: u32, - ) -> NTSTATUS = unsafe { mem::transmute(NtCreateKeyedEvent) }; - let mut handle = MaybeUninit::uninit(); - let status = NtCreateKeyedEvent( - handle.as_mut_ptr(), - GENERIC_READ | GENERIC_WRITE, - ptr::null_mut(), - 0, - ); - if status != STATUS_SUCCESS { - return None; - } - - Some(KeyedEvent { - handle: unsafe { handle.assume_init() }, - NtReleaseKeyedEvent: unsafe { mem::transmute(NtReleaseKeyedEvent) }, - NtWaitForKeyedEvent: unsafe { mem::transmute(NtWaitForKeyedEvent) }, - }) - } - - #[inline] - pub fn prepare_park(&'static self, key: &AtomicUsize) { - key.store(STATE_PARKED, Ordering::Relaxed); - } - - #[inline] - pub fn timed_out(&'static self, key: &AtomicUsize) -> bool { - key.load(Ordering::Relaxed) == STATE_TIMED_OUT - } - - #[inline] - pub unsafe fn park(&'static self, key: &AtomicUsize) { - let status = self.wait_for(key as *const _ as *mut ffi::c_void, ptr::null_mut()); - debug_assert_eq!(status, STATUS_SUCCESS); - } - - #[inline] - pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool { - let now = Instant::now(); - if timeout <= now { - // If another thread unparked us, we need to call - // NtWaitForKeyedEvent otherwise that thread will stay stuck at - // NtReleaseKeyedEvent. - if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED { - self.park(key); - return true; - } - return false; - } - - // NT uses a timeout in units of 100ns. We use a negative value to - // indicate a relative timeout based on a monotonic clock. - let diff = timeout - now; - let value = (diff.as_secs() as i64) - .checked_mul(-10000000) - .and_then(|x| x.checked_sub((diff.subsec_nanos() as i64 + 99) / 100)); - - let mut nt_timeout = match value { - Some(x) => x, - None => { - // Timeout overflowed, just sleep indefinitely - self.park(key); - return true; - } - }; - - let status = self.wait_for(key as *const _ as *mut ffi::c_void, &mut nt_timeout); - if status == STATUS_SUCCESS { - return true; - } - debug_assert_eq!(status, STATUS_TIMEOUT); - - // If another thread unparked us, we need to call NtWaitForKeyedEvent - // otherwise that thread will stay stuck at NtReleaseKeyedEvent. - if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED { - self.park(key); - return true; - } - false - } - - #[inline] - pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle { - // If the state was STATE_PARKED then we need to wake up the thread - if key.swap(STATE_UNPARKED, Ordering::Relaxed) == STATE_PARKED { - UnparkHandle { - key: key, - keyed_event: self, - } - } else { - UnparkHandle { - key: ptr::null(), - keyed_event: self, - } - } - } -} - -impl Drop for KeyedEvent { - #[inline] - fn drop(&mut self) { - unsafe { - let ok = CloseHandle(self.handle); - debug_assert_eq!(ok, true.into()); - } - } -} - -// Handle for a thread that is about to be unparked. We need to mark the thread -// as unparked while holding the queue lock, but we delay the actual unparking -// until after the queue lock is released. -pub struct UnparkHandle { - key: *const AtomicUsize, - keyed_event: &'static KeyedEvent, -} - -impl UnparkHandle { - // Wakes up the parked thread. This should be called after the queue lock is - // released to avoid blocking the queue for too long. - #[inline] - pub unsafe fn unpark(self) { - if !self.key.is_null() { - let status = self.keyed_event.release(self.key as *mut ffi::c_void); - debug_assert_eq!(status, STATUS_SUCCESS); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/windows/mod.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/windows/mod.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/windows/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/windows/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,175 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use core::{ - ptr, - sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, -}; -use std::time::Instant; - -mod bindings; -mod keyed_event; -mod waitaddress; - -enum Backend { - KeyedEvent(keyed_event::KeyedEvent), - WaitAddress(waitaddress::WaitAddress), -} - -static BACKEND: AtomicPtr = AtomicPtr::new(ptr::null_mut()); - -impl Backend { - #[inline] - fn get() -> &'static Backend { - // Fast path: use the existing object - let backend_ptr = BACKEND.load(Ordering::Acquire); - if !backend_ptr.is_null() { - return unsafe { &*backend_ptr }; - }; - - Backend::create() - } - - #[cold] - fn create() -> &'static Backend { - // Try to create a new Backend - let backend; - if let Some(waitaddress) = waitaddress::WaitAddress::create() { - backend = Backend::WaitAddress(waitaddress); - } else if let Some(keyed_event) = keyed_event::KeyedEvent::create() { - backend = Backend::KeyedEvent(keyed_event); - } else { - panic!( - "parking_lot requires either NT Keyed Events (WinXP+) or \ - WaitOnAddress/WakeByAddress (Win8+)" - ); - } - - // Try to set our new Backend as the global one - let backend_ptr = Box::into_raw(Box::new(backend)); - match BACKEND.compare_exchange( - ptr::null_mut(), - backend_ptr, - Ordering::Release, - Ordering::Relaxed, - ) { - Ok(_) => unsafe { &*backend_ptr }, - Err(global_backend_ptr) => { - unsafe { - // We lost the race, free our object and return the global one - let _ = Box::from_raw(backend_ptr); - &*global_backend_ptr - } - } - } - } -} - -// Helper type for putting a thread to sleep until some other thread wakes it up -pub struct ThreadParker { - key: AtomicUsize, - backend: &'static Backend, -} - -impl super::ThreadParkerT for ThreadParker { - type UnparkHandle = UnparkHandle; - - const IS_CHEAP_TO_CONSTRUCT: bool = true; - - #[inline] - fn new() -> ThreadParker { - // Initialize the backend here to ensure we don't get any panics - // later on, which could leave synchronization primitives in a broken - // state. - ThreadParker { - key: AtomicUsize::new(0), - backend: Backend::get(), - } - } - - // Prepares the parker. This should be called before adding it to the queue. - #[inline] - unsafe fn prepare_park(&self) { - match *self.backend { - Backend::KeyedEvent(ref x) => x.prepare_park(&self.key), - Backend::WaitAddress(ref x) => x.prepare_park(&self.key), - } - } - - // Checks if the park timed out. This should be called while holding the - // queue lock after park_until has returned false. - #[inline] - unsafe fn timed_out(&self) -> bool { - match *self.backend { - Backend::KeyedEvent(ref x) => x.timed_out(&self.key), - Backend::WaitAddress(ref x) => x.timed_out(&self.key), - } - } - - // Parks the thread until it is unparked. This should be called after it has - // been added to the queue, after unlocking the queue. - #[inline] - unsafe fn park(&self) { - match *self.backend { - Backend::KeyedEvent(ref x) => x.park(&self.key), - Backend::WaitAddress(ref x) => x.park(&self.key), - } - } - - // Parks the thread until it is unparked or the timeout is reached. This - // should be called after it has been added to the queue, after unlocking - // the queue. Returns true if we were unparked and false if we timed out. - #[inline] - unsafe fn park_until(&self, timeout: Instant) -> bool { - match *self.backend { - Backend::KeyedEvent(ref x) => x.park_until(&self.key, timeout), - Backend::WaitAddress(ref x) => x.park_until(&self.key, timeout), - } - } - - // Locks the parker to prevent the target thread from exiting. This is - // necessary to ensure that thread-local ThreadData objects remain valid. - // This should be called while holding the queue lock. - #[inline] - unsafe fn unpark_lock(&self) -> UnparkHandle { - match *self.backend { - Backend::KeyedEvent(ref x) => UnparkHandle::KeyedEvent(x.unpark_lock(&self.key)), - Backend::WaitAddress(ref x) => UnparkHandle::WaitAddress(x.unpark_lock(&self.key)), - } - } -} - -// Handle for a thread that is about to be unparked. We need to mark the thread -// as unparked while holding the queue lock, but we delay the actual unparking -// until after the queue lock is released. -pub enum UnparkHandle { - KeyedEvent(keyed_event::UnparkHandle), - WaitAddress(waitaddress::UnparkHandle), -} - -impl super::UnparkHandleT for UnparkHandle { - // Wakes up the parked thread. This should be called after the queue lock is - // released to avoid blocking the queue for too long. - #[inline] - unsafe fn unpark(self) { - match self { - UnparkHandle::KeyedEvent(x) => x.unpark(), - UnparkHandle::WaitAddress(x) => x.unpark(), - } - } -} - -// Yields the rest of the current timeslice to the OS -#[inline] -pub fn thread_yield() { - unsafe { - // We don't use SwitchToThread here because it doesn't consider all - // threads in the system and the thread we are waiting for may not get - // selected. - bindings::Sleep(0); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,125 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use core::{ - mem, - sync::atomic::{AtomicUsize, Ordering}, -}; -use std::{ffi, time::Instant}; - -use super::bindings::*; - -#[allow(non_snake_case)] -pub struct WaitAddress { - WaitOnAddress: WaitOnAddress, - WakeByAddressSingle: WakeByAddressSingle, -} - -impl WaitAddress { - #[allow(non_snake_case)] - pub fn create() -> Option { - let synch_dll = unsafe { GetModuleHandleA(b"api-ms-win-core-synch-l1-2-0.dll\0".as_ptr()) }; - if synch_dll == 0 { - return None; - } - - let WaitOnAddress = unsafe { GetProcAddress(synch_dll, b"WaitOnAddress\0".as_ptr())? }; - let WakeByAddressSingle = - unsafe { GetProcAddress(synch_dll, b"WakeByAddressSingle\0".as_ptr())? }; - - Some(WaitAddress { - WaitOnAddress: unsafe { mem::transmute(WaitOnAddress) }, - WakeByAddressSingle: unsafe { mem::transmute(WakeByAddressSingle) }, - }) - } - - #[inline] - pub fn prepare_park(&'static self, key: &AtomicUsize) { - key.store(1, Ordering::Relaxed); - } - - #[inline] - pub fn timed_out(&'static self, key: &AtomicUsize) -> bool { - key.load(Ordering::Relaxed) != 0 - } - - #[inline] - pub fn park(&'static self, key: &AtomicUsize) { - while key.load(Ordering::Acquire) != 0 { - let r = self.wait_on_address(key, INFINITE); - debug_assert!(r == true.into()); - } - } - - #[inline] - pub fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool { - while key.load(Ordering::Acquire) != 0 { - let now = Instant::now(); - if timeout <= now { - return false; - } - let diff = timeout - now; - let timeout = diff - .as_secs() - .checked_mul(1000) - .and_then(|x| x.checked_add((diff.subsec_nanos() as u64 + 999999) / 1000000)) - .map(|ms| { - if ms > std::u32::MAX as u64 { - INFINITE - } else { - ms as u32 - } - }) - .unwrap_or(INFINITE); - if self.wait_on_address(key, timeout) == false.into() { - debug_assert_eq!(unsafe { GetLastError() }, ERROR_TIMEOUT); - } - } - true - } - - #[inline] - pub fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle { - // We don't need to lock anything, just clear the state - key.store(0, Ordering::Release); - - UnparkHandle { - key: key, - waitaddress: self, - } - } - - #[inline] - fn wait_on_address(&'static self, key: &AtomicUsize, timeout: u32) -> BOOL { - let cmp = 1usize; - unsafe { - (self.WaitOnAddress)( - key as *const _ as *mut ffi::c_void, - &cmp as *const _ as *mut ffi::c_void, - mem::size_of::(), - timeout, - ) - } - } -} - -// Handle for a thread that is about to be unparked. We need to mark the thread -// as unparked while holding the queue lock, but we delay the actual unparking -// until after the queue lock is released. -pub struct UnparkHandle { - key: *const AtomicUsize, - waitaddress: &'static WaitAddress, -} - -impl UnparkHandle { - // Wakes up the parked thread. This should be called after the queue lock is - // released to avoid blocking the queue for too long. - #[inline] - pub fn unpark(self) { - unsafe { (self.waitaddress.WakeByAddressSingle)(self.key as *mut ffi::c_void) }; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/util.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/util.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/util.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/util.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -// Option::unchecked_unwrap -pub trait UncheckedOptionExt { - unsafe fn unchecked_unwrap(self) -> T; -} - -impl UncheckedOptionExt for Option { - #[inline] - unsafe fn unchecked_unwrap(self) -> T { - match self { - Some(x) => x, - None => unreachable(), - } - } -} - -// hint::unreachable_unchecked() in release mode -#[inline] -unsafe fn unreachable() -> ! { - if cfg!(debug_assertions) { - unreachable!(); - } else { - core::hint::unreachable_unchecked() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/parking_lot_core/src/word_lock.rs s390-tools-2.33.1/rust-vendor/parking_lot_core/src/word_lock.rs --- s390-tools-2.31.0/rust-vendor/parking_lot_core/src/word_lock.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/parking_lot_core/src/word_lock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,327 +0,0 @@ -// Copyright 2016 Amanieu d'Antras -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -use crate::spinwait::SpinWait; -use crate::thread_parker::{ThreadParker, ThreadParkerT, UnparkHandleT}; -use core::{ - cell::Cell, - mem, ptr, - sync::atomic::{fence, AtomicUsize, Ordering}, -}; - -struct ThreadData { - parker: ThreadParker, - - // Linked list of threads in the queue. The queue is split into two parts: - // the processed part and the unprocessed part. When new nodes are added to - // the list, they only have the next pointer set, and queue_tail is null. - // - // Nodes are processed with the queue lock held, which consists of setting - // the prev pointer for each node and setting the queue_tail pointer on the - // first processed node of the list. - // - // This setup allows nodes to be added to the queue without a lock, while - // still allowing O(1) removal of nodes from the processed part of the list. - // The only cost is the O(n) processing, but this only needs to be done - // once for each node, and therefore isn't too expensive. - queue_tail: Cell<*const ThreadData>, - prev: Cell<*const ThreadData>, - next: Cell<*const ThreadData>, -} - -impl ThreadData { - #[inline] - fn new() -> ThreadData { - assert!(mem::align_of::() > !QUEUE_MASK); - ThreadData { - parker: ThreadParker::new(), - queue_tail: Cell::new(ptr::null()), - prev: Cell::new(ptr::null()), - next: Cell::new(ptr::null()), - } - } -} - -// Invokes the given closure with a reference to the current thread `ThreadData`. -#[inline] -fn with_thread_data(f: impl FnOnce(&ThreadData) -> T) -> T { - let mut thread_data_ptr = ptr::null(); - // If ThreadData is expensive to construct, then we want to use a cached - // version in thread-local storage if possible. - if !ThreadParker::IS_CHEAP_TO_CONSTRUCT { - thread_local!(static THREAD_DATA: ThreadData = ThreadData::new()); - if let Ok(tls_thread_data) = THREAD_DATA.try_with(|x| x as *const ThreadData) { - thread_data_ptr = tls_thread_data; - } - } - // Otherwise just create a ThreadData on the stack - let mut thread_data_storage = None; - if thread_data_ptr.is_null() { - thread_data_ptr = thread_data_storage.get_or_insert_with(ThreadData::new); - } - - f(unsafe { &*thread_data_ptr }) -} - -const LOCKED_BIT: usize = 1; -const QUEUE_LOCKED_BIT: usize = 2; -const QUEUE_MASK: usize = !3; - -// Word-sized lock that is used to implement the parking_lot API. Since this -// can't use parking_lot, it instead manages its own queue of waiting threads. -pub struct WordLock { - state: AtomicUsize, -} - -impl WordLock { - /// Returns a new, unlocked, WordLock. - pub const fn new() -> Self { - WordLock { - state: AtomicUsize::new(0), - } - } - - #[inline] - pub fn lock(&self) { - if self - .state - .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - return; - } - self.lock_slow(); - } - - /// Must not be called on an already unlocked `WordLock`! - #[inline] - pub unsafe fn unlock(&self) { - let state = self.state.fetch_sub(LOCKED_BIT, Ordering::Release); - if state.is_queue_locked() || state.queue_head().is_null() { - return; - } - self.unlock_slow(); - } - - #[cold] - fn lock_slow(&self) { - let mut spinwait = SpinWait::new(); - let mut state = self.state.load(Ordering::Relaxed); - loop { - // Grab the lock if it isn't locked, even if there is a queue on it - if !state.is_locked() { - match self.state.compare_exchange_weak( - state, - state | LOCKED_BIT, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => return, - Err(x) => state = x, - } - continue; - } - - // If there is no queue, try spinning a few times - if state.queue_head().is_null() && spinwait.spin() { - state = self.state.load(Ordering::Relaxed); - continue; - } - - // Get our thread data and prepare it for parking - state = with_thread_data(|thread_data| { - // The pthread implementation is still unsafe, so we need to surround `prepare_park` - // with `unsafe {}`. - #[allow(unused_unsafe)] - unsafe { - thread_data.parker.prepare_park(); - } - - // Add our thread to the front of the queue - let queue_head = state.queue_head(); - if queue_head.is_null() { - thread_data.queue_tail.set(thread_data); - thread_data.prev.set(ptr::null()); - } else { - thread_data.queue_tail.set(ptr::null()); - thread_data.prev.set(ptr::null()); - thread_data.next.set(queue_head); - } - if let Err(x) = self.state.compare_exchange_weak( - state, - state.with_queue_head(thread_data), - Ordering::AcqRel, - Ordering::Relaxed, - ) { - return x; - } - - // Sleep until we are woken up by an unlock - // Ignoring unused unsafe, since it's only a few platforms where this is unsafe. - #[allow(unused_unsafe)] - unsafe { - thread_data.parker.park(); - } - - // Loop back and try locking again - spinwait.reset(); - self.state.load(Ordering::Relaxed) - }); - } - } - - #[cold] - fn unlock_slow(&self) { - let mut state = self.state.load(Ordering::Relaxed); - loop { - // We just unlocked the WordLock. Just check if there is a thread - // to wake up. If the queue is locked then another thread is already - // taking care of waking up a thread. - if state.is_queue_locked() || state.queue_head().is_null() { - return; - } - - // Try to grab the queue lock - match self.state.compare_exchange_weak( - state, - state | QUEUE_LOCKED_BIT, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => break, - Err(x) => state = x, - } - } - - // Now we have the queue lock and the queue is non-empty - 'outer: loop { - // First, we need to fill in the prev pointers for any newly added - // threads. We do this until we reach a node that we previously - // processed, which has a non-null queue_tail pointer. - let queue_head = state.queue_head(); - let mut queue_tail; - let mut current = queue_head; - loop { - queue_tail = unsafe { (*current).queue_tail.get() }; - if !queue_tail.is_null() { - break; - } - unsafe { - let next = (*current).next.get(); - (*next).prev.set(current); - current = next; - } - } - - // Set queue_tail on the queue head to indicate that the whole list - // has prev pointers set correctly. - unsafe { - (*queue_head).queue_tail.set(queue_tail); - } - - // If the WordLock is locked, then there is no point waking up a - // thread now. Instead we let the next unlocker take care of waking - // up a thread. - if state.is_locked() { - match self.state.compare_exchange_weak( - state, - state & !QUEUE_LOCKED_BIT, - Ordering::Release, - Ordering::Relaxed, - ) { - Ok(_) => return, - Err(x) => state = x, - } - - // Need an acquire fence before reading the new queue - fence_acquire(&self.state); - continue; - } - - // Remove the last thread from the queue and unlock the queue - let new_tail = unsafe { (*queue_tail).prev.get() }; - if new_tail.is_null() { - loop { - match self.state.compare_exchange_weak( - state, - state & LOCKED_BIT, - Ordering::Release, - Ordering::Relaxed, - ) { - Ok(_) => break, - Err(x) => state = x, - } - - // If the compare_exchange failed because a new thread was - // added to the queue then we need to re-scan the queue to - // find the previous element. - if state.queue_head().is_null() { - continue; - } else { - // Need an acquire fence before reading the new queue - fence_acquire(&self.state); - continue 'outer; - } - } - } else { - unsafe { - (*queue_head).queue_tail.set(new_tail); - } - self.state.fetch_and(!QUEUE_LOCKED_BIT, Ordering::Release); - } - - // Finally, wake up the thread we removed from the queue. Note that - // we don't need to worry about any races here since the thread is - // guaranteed to be sleeping right now and we are the only one who - // can wake it up. - unsafe { - (*queue_tail).parker.unpark_lock().unpark(); - } - break; - } - } -} - -// Thread-Sanitizer only has partial fence support, so when running under it, we -// try and avoid false positives by using a discarded acquire load instead. -#[inline] -fn fence_acquire(a: &AtomicUsize) { - if cfg!(tsan_enabled) { - let _ = a.load(Ordering::Acquire); - } else { - fence(Ordering::Acquire); - } -} - -trait LockState { - fn is_locked(self) -> bool; - fn is_queue_locked(self) -> bool; - fn queue_head(self) -> *const ThreadData; - fn with_queue_head(self, thread_data: *const ThreadData) -> Self; -} - -impl LockState for usize { - #[inline] - fn is_locked(self) -> bool { - self & LOCKED_BIT != 0 - } - - #[inline] - fn is_queue_locked(self) -> bool { - self & QUEUE_LOCKED_BIT != 0 - } - - #[inline] - fn queue_head(self) -> *const ThreadData { - (self & QUEUE_MASK) as *const ThreadData - } - - #[inline] - fn with_queue_head(self, thread_data: *const ThreadData) -> Self { - (self & !QUEUE_MASK) | thread_data as *const _ as usize - } -} diff -Nru s390-tools-2.31.0/rust-vendor/percent-encoding/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/percent-encoding/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/percent-encoding/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/percent-encoding/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/percent-encoding/Cargo.toml s390-tools-2.33.1/rust-vendor/percent-encoding/Cargo.toml --- s390-tools-2.31.0/rust-vendor/percent-encoding/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/percent-encoding/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.51" -name = "percent-encoding" -version = "2.3.0" -authors = ["The rust-url developers"] -description = "Percent encoding and decoding" -categories = ["no_std"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/servo/rust-url/" - -[features] -alloc = [] -default = ["std"] -std = ["alloc"] diff -Nru s390-tools-2.31.0/rust-vendor/percent-encoding/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/percent-encoding/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/percent-encoding/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/percent-encoding/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/percent-encoding/LICENSE-MIT s390-tools-2.33.1/rust-vendor/percent-encoding/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/percent-encoding/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/percent-encoding/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2013-2022 The rust-url developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/percent-encoding/src/lib.rs s390-tools-2.33.1/rust-vendor/percent-encoding/src/lib.rs --- s390-tools-2.31.0/rust-vendor/percent-encoding/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/percent-encoding/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,477 +0,0 @@ -// Copyright 2013-2016 The rust-url developers. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! URLs use special characters to indicate the parts of the request. -//! For example, a `?` question mark marks the end of a path and the start of a query string. -//! In order for that character to exist inside a path, it needs to be encoded differently. -//! -//! Percent encoding replaces reserved characters with the `%` escape character -//! followed by a byte value as two hexadecimal digits. -//! For example, an ASCII space is replaced with `%20`. -//! -//! When encoding, the set of characters that can (and should, for readability) be left alone -//! depends on the context. -//! The `?` question mark mentioned above is not a separator when used literally -//! inside of a query string, and therefore does not need to be encoded. -//! The [`AsciiSet`] parameter of [`percent_encode`] and [`utf8_percent_encode`] -//! lets callers configure this. -//! -//! This crate deliberately does not provide many different sets. -//! Users should consider in what context the encoded string will be used, -//! read relevant specifications, and define their own set. -//! This is done by using the `add` method of an existing set. -//! -//! # Examples -//! -//! ``` -//! use percent_encoding::{utf8_percent_encode, AsciiSet, CONTROLS}; -//! -//! /// https://url.spec.whatwg.org/#fragment-percent-encode-set -//! const FRAGMENT: &AsciiSet = &CONTROLS.add(b' ').add(b'"').add(b'<').add(b'>').add(b'`'); -//! -//! assert_eq!(utf8_percent_encode("foo ", FRAGMENT).to_string(), "foo%20%3Cbar%3E"); -//! ``` -#![no_std] - -// For forwards compatibility -#[cfg(feature = "std")] -extern crate std as _; - -#[cfg(feature = "alloc")] -extern crate alloc; - -#[cfg(feature = "alloc")] -use alloc::{ - borrow::{Cow, ToOwned}, - string::String, - vec::Vec, -}; -use core::{fmt, mem, slice, str}; - -/// Represents a set of characters or bytes in the ASCII range. -/// -/// This is used in [`percent_encode`] and [`utf8_percent_encode`]. -/// This is similar to [percent-encode sets](https://url.spec.whatwg.org/#percent-encoded-bytes). -/// -/// Use the `add` method of an existing set to define a new set. For example: -/// -/// ``` -/// use percent_encoding::{AsciiSet, CONTROLS}; -/// -/// /// https://url.spec.whatwg.org/#fragment-percent-encode-set -/// const FRAGMENT: &AsciiSet = &CONTROLS.add(b' ').add(b'"').add(b'<').add(b'>').add(b'`'); -/// ``` -pub struct AsciiSet { - mask: [Chunk; ASCII_RANGE_LEN / BITS_PER_CHUNK], -} - -type Chunk = u32; - -const ASCII_RANGE_LEN: usize = 0x80; - -const BITS_PER_CHUNK: usize = 8 * mem::size_of::(); - -impl AsciiSet { - /// Called with UTF-8 bytes rather than code points. - /// Not used for non-ASCII bytes. - const fn contains(&self, byte: u8) -> bool { - let chunk = self.mask[byte as usize / BITS_PER_CHUNK]; - let mask = 1 << (byte as usize % BITS_PER_CHUNK); - (chunk & mask) != 0 - } - - fn should_percent_encode(&self, byte: u8) -> bool { - !byte.is_ascii() || self.contains(byte) - } - - pub const fn add(&self, byte: u8) -> Self { - let mut mask = self.mask; - mask[byte as usize / BITS_PER_CHUNK] |= 1 << (byte as usize % BITS_PER_CHUNK); - AsciiSet { mask } - } - - pub const fn remove(&self, byte: u8) -> Self { - let mut mask = self.mask; - mask[byte as usize / BITS_PER_CHUNK] &= !(1 << (byte as usize % BITS_PER_CHUNK)); - AsciiSet { mask } - } -} - -/// The set of 0x00 to 0x1F (C0 controls), and 0x7F (DEL). -/// -/// Note that this includes the newline and tab characters, but not the space 0x20. -/// -/// -pub const CONTROLS: &AsciiSet = &AsciiSet { - mask: [ - !0_u32, // C0: 0x00 to 0x1F (32 bits set) - 0, - 0, - 1 << (0x7F_u32 % 32), // DEL: 0x7F (one bit set) - ], -}; - -macro_rules! static_assert { - ($( $bool: expr, )+) => { - fn _static_assert() { - $( - let _ = mem::transmute::<[u8; $bool as usize], u8>; - )+ - } - } -} - -static_assert! { - CONTROLS.contains(0x00), - CONTROLS.contains(0x1F), - !CONTROLS.contains(0x20), - !CONTROLS.contains(0x7E), - CONTROLS.contains(0x7F), -} - -/// Everything that is not an ASCII letter or digit. -/// -/// This is probably more eager than necessary in any context. -pub const NON_ALPHANUMERIC: &AsciiSet = &CONTROLS - .add(b' ') - .add(b'!') - .add(b'"') - .add(b'#') - .add(b'$') - .add(b'%') - .add(b'&') - .add(b'\'') - .add(b'(') - .add(b')') - .add(b'*') - .add(b'+') - .add(b',') - .add(b'-') - .add(b'.') - .add(b'/') - .add(b':') - .add(b';') - .add(b'<') - .add(b'=') - .add(b'>') - .add(b'?') - .add(b'@') - .add(b'[') - .add(b'\\') - .add(b']') - .add(b'^') - .add(b'_') - .add(b'`') - .add(b'{') - .add(b'|') - .add(b'}') - .add(b'~'); - -/// Return the percent-encoding of the given byte. -/// -/// This is unconditional, unlike `percent_encode()` which has an `AsciiSet` parameter. -/// -/// # Examples -/// -/// ``` -/// use percent_encoding::percent_encode_byte; -/// -/// assert_eq!("foo bar".bytes().map(percent_encode_byte).collect::(), -/// "%66%6F%6F%20%62%61%72"); -/// ``` -#[inline] -pub fn percent_encode_byte(byte: u8) -> &'static str { - static ENC_TABLE: &[u8; 768] = b"\ - %00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F\ - %10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F\ - %20%21%22%23%24%25%26%27%28%29%2A%2B%2C%2D%2E%2F\ - %30%31%32%33%34%35%36%37%38%39%3A%3B%3C%3D%3E%3F\ - %40%41%42%43%44%45%46%47%48%49%4A%4B%4C%4D%4E%4F\ - %50%51%52%53%54%55%56%57%58%59%5A%5B%5C%5D%5E%5F\ - %60%61%62%63%64%65%66%67%68%69%6A%6B%6C%6D%6E%6F\ - %70%71%72%73%74%75%76%77%78%79%7A%7B%7C%7D%7E%7F\ - %80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F\ - %90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F\ - %A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF\ - %B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF\ - %C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF\ - %D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF\ - %E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF\ - %F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF\ - "; - - let index = usize::from(byte) * 3; - // SAFETY: ENC_TABLE is ascii-only, so any subset if it should be - // ascii-only too, which is valid utf8. - unsafe { str::from_utf8_unchecked(&ENC_TABLE[index..index + 3]) } -} - -/// Percent-encode the given bytes with the given set. -/// -/// Non-ASCII bytes and bytes in `ascii_set` are encoded. -/// -/// The return type: -/// -/// * Implements `Iterator` and therefore has a `.collect::()` method, -/// * Implements `Display` and therefore has a `.to_string()` method, -/// * Implements `Into>` borrowing `input` when none of its bytes are encoded. -/// -/// # Examples -/// -/// ``` -/// use percent_encoding::{percent_encode, NON_ALPHANUMERIC}; -/// -/// assert_eq!(percent_encode(b"foo bar?", NON_ALPHANUMERIC).to_string(), "foo%20bar%3F"); -/// ``` -#[inline] -pub fn percent_encode<'a>(input: &'a [u8], ascii_set: &'static AsciiSet) -> PercentEncode<'a> { - PercentEncode { - bytes: input, - ascii_set, - } -} - -/// Percent-encode the UTF-8 encoding of the given string. -/// -/// See [`percent_encode`] regarding the return type. -/// -/// # Examples -/// -/// ``` -/// use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC}; -/// -/// assert_eq!(utf8_percent_encode("foo bar?", NON_ALPHANUMERIC).to_string(), "foo%20bar%3F"); -/// ``` -#[inline] -pub fn utf8_percent_encode<'a>(input: &'a str, ascii_set: &'static AsciiSet) -> PercentEncode<'a> { - percent_encode(input.as_bytes(), ascii_set) -} - -/// The return type of [`percent_encode`] and [`utf8_percent_encode`]. -#[derive(Clone)] -pub struct PercentEncode<'a> { - bytes: &'a [u8], - ascii_set: &'static AsciiSet, -} - -impl<'a> Iterator for PercentEncode<'a> { - type Item = &'a str; - - fn next(&mut self) -> Option<&'a str> { - if let Some((&first_byte, remaining)) = self.bytes.split_first() { - if self.ascii_set.should_percent_encode(first_byte) { - self.bytes = remaining; - Some(percent_encode_byte(first_byte)) - } else { - // The unsafe blocks here are appropriate because the bytes are - // confirmed as a subset of UTF-8 in should_percent_encode. - for (i, &byte) in remaining.iter().enumerate() { - if self.ascii_set.should_percent_encode(byte) { - // 1 for first_byte + i for previous iterations of this loop - let (unchanged_slice, remaining) = self.bytes.split_at(1 + i); - self.bytes = remaining; - return Some(unsafe { str::from_utf8_unchecked(unchanged_slice) }); - } - } - let unchanged_slice = self.bytes; - self.bytes = &[][..]; - Some(unsafe { str::from_utf8_unchecked(unchanged_slice) }) - } - } else { - None - } - } - - fn size_hint(&self) -> (usize, Option) { - if self.bytes.is_empty() { - (0, Some(0)) - } else { - (1, Some(self.bytes.len())) - } - } -} - -impl<'a> fmt::Display for PercentEncode<'a> { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - for c in (*self).clone() { - formatter.write_str(c)? - } - Ok(()) - } -} - -#[cfg(feature = "alloc")] -impl<'a> From> for Cow<'a, str> { - fn from(mut iter: PercentEncode<'a>) -> Self { - match iter.next() { - None => "".into(), - Some(first) => match iter.next() { - None => first.into(), - Some(second) => { - let mut string = first.to_owned(); - string.push_str(second); - string.extend(iter); - string.into() - } - }, - } - } -} - -/// Percent-decode the given string. -/// -/// -/// -/// See [`percent_decode`] regarding the return type. -#[inline] -pub fn percent_decode_str(input: &str) -> PercentDecode<'_> { - percent_decode(input.as_bytes()) -} - -/// Percent-decode the given bytes. -/// -/// -/// -/// Any sequence of `%` followed by two hexadecimal digits is decoded. -/// The return type: -/// -/// * Implements `Into>` borrowing `input` when it contains no percent-encoded sequence, -/// * Implements `Iterator` and therefore has a `.collect::>()` method, -/// * Has `decode_utf8()` and `decode_utf8_lossy()` methods. -/// -/// # Examples -/// -/// ``` -/// use percent_encoding::percent_decode; -/// -/// assert_eq!(percent_decode(b"foo%20bar%3f").decode_utf8().unwrap(), "foo bar?"); -/// ``` -#[inline] -pub fn percent_decode(input: &[u8]) -> PercentDecode<'_> { - PercentDecode { - bytes: input.iter(), - } -} - -/// The return type of [`percent_decode`]. -#[derive(Clone, Debug)] -pub struct PercentDecode<'a> { - bytes: slice::Iter<'a, u8>, -} - -fn after_percent_sign(iter: &mut slice::Iter<'_, u8>) -> Option { - let mut cloned_iter = iter.clone(); - let h = char::from(*cloned_iter.next()?).to_digit(16)?; - let l = char::from(*cloned_iter.next()?).to_digit(16)?; - *iter = cloned_iter; - Some(h as u8 * 0x10 + l as u8) -} - -impl<'a> Iterator for PercentDecode<'a> { - type Item = u8; - - fn next(&mut self) -> Option { - self.bytes.next().map(|&byte| { - if byte == b'%' { - after_percent_sign(&mut self.bytes).unwrap_or(byte) - } else { - byte - } - }) - } - - fn size_hint(&self) -> (usize, Option) { - let bytes = self.bytes.len(); - ((bytes + 2) / 3, Some(bytes)) - } -} - -#[cfg(feature = "alloc")] -impl<'a> From> for Cow<'a, [u8]> { - fn from(iter: PercentDecode<'a>) -> Self { - match iter.if_any() { - Some(vec) => Cow::Owned(vec), - None => Cow::Borrowed(iter.bytes.as_slice()), - } - } -} - -impl<'a> PercentDecode<'a> { - /// If the percent-decoding is different from the input, return it as a new bytes vector. - #[cfg(feature = "alloc")] - fn if_any(&self) -> Option> { - let mut bytes_iter = self.bytes.clone(); - while bytes_iter.any(|&b| b == b'%') { - if let Some(decoded_byte) = after_percent_sign(&mut bytes_iter) { - let initial_bytes = self.bytes.as_slice(); - let unchanged_bytes_len = initial_bytes.len() - bytes_iter.len() - 3; - let mut decoded = initial_bytes[..unchanged_bytes_len].to_owned(); - decoded.push(decoded_byte); - decoded.extend(PercentDecode { bytes: bytes_iter }); - return Some(decoded); - } - } - // Nothing to decode - None - } - - /// Decode the result of percent-decoding as UTF-8. - /// - /// This is return `Err` when the percent-decoded bytes are not well-formed in UTF-8. - #[cfg(feature = "alloc")] - pub fn decode_utf8(self) -> Result, str::Utf8Error> { - match self.clone().into() { - Cow::Borrowed(bytes) => match str::from_utf8(bytes) { - Ok(s) => Ok(s.into()), - Err(e) => Err(e), - }, - Cow::Owned(bytes) => match String::from_utf8(bytes) { - Ok(s) => Ok(s.into()), - Err(e) => Err(e.utf8_error()), - }, - } - } - - /// Decode the result of percent-decoding as UTF-8, lossily. - /// - /// Invalid UTF-8 percent-encoded byte sequences will be replaced � U+FFFD, - /// the replacement character. - #[cfg(feature = "alloc")] - pub fn decode_utf8_lossy(self) -> Cow<'a, str> { - decode_utf8_lossy(self.clone().into()) - } -} - -#[cfg(feature = "alloc")] -fn decode_utf8_lossy(input: Cow<'_, [u8]>) -> Cow<'_, str> { - // Note: This function is duplicated in `form_urlencoded/src/query_encoding.rs`. - match input { - Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes), - Cow::Owned(bytes) => { - match String::from_utf8_lossy(&bytes) { - Cow::Borrowed(utf8) => { - // If from_utf8_lossy returns a Cow::Borrowed, then we can - // be sure our original bytes were valid UTF-8. This is because - // if the bytes were invalid UTF-8 from_utf8_lossy would have - // to allocate a new owned string to back the Cow so it could - // replace invalid bytes with a placeholder. - - // First we do a debug_assert to confirm our description above. - let raw_utf8: *const [u8] = utf8.as_bytes(); - debug_assert!(raw_utf8 == &*bytes as *const [u8]); - - // Given we know the original input bytes are valid UTF-8, - // and we have ownership of those bytes, we re-use them and - // return a Cow::Owned here. - Cow::Owned(unsafe { String::from_utf8_unchecked(bytes) }) - } - Cow::Owned(s) => Cow::Owned(s), - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/pin-project-lite/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/pin-project-lite/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/pin-project-lite/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-project-lite/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/pin-project-lite/Cargo.toml s390-tools-2.33.1/rust-vendor/pin-project-lite/Cargo.toml --- s390-tools-2.31.0/rust-vendor/pin-project-lite/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-project-lite/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,69 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.37" -name = "pin-project-lite" -version = "0.2.13" -exclude = [ - "/.*", - "/tools", - "/DEVELOPMENT.md", -] -description = """ -A lightweight version of pin-project written with declarative macros. -""" -readme = "README.md" -keywords = [ - "pin", - "macros", -] -categories = [ - "no-std", - "no-std::no-alloc", - "rust-patterns", -] -license = "Apache-2.0 OR MIT" -repository = "https://github.com/taiki-e/pin-project-lite" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[lib] -doc-scrape-examples = false - -[dev-dependencies.macrotest] -version = "1.0.9" - -[dev-dependencies.once_cell] -version = "=1.14" - -[dev-dependencies.proc-macro2] -version = "=1.0.65" - -[dev-dependencies.quote] -version = "=1.0.30" - -[dev-dependencies.rustversion] -version = "1" - -[dev-dependencies.serde] -version = "=1.0.156" - -[dev-dependencies.static_assertions] -version = "1" - -[dev-dependencies.toml] -version = "=0.5.9" - -[dev-dependencies.trybuild] -version = "=1.0.67" diff -Nru s390-tools-2.31.0/rust-vendor/pin-project-lite/CHANGELOG.md s390-tools-2.33.1/rust-vendor/pin-project-lite/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/pin-project-lite/CHANGELOG.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-project-lite/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,242 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -This project adheres to [Semantic Versioning](https://semver.org). - - - -## [Unreleased] - -## [0.2.13] - 2023-08-25 - -- Allow attributes in impl and method of `PinnedDrop` implementation. - -## [0.2.12] - 2023-08-09 - -- Work around an issue where the projected types/methods appear in the documentation as if they were part of the public API if the visibility is not correctly parsed due to the rustc bug. See [#77](https://github.com/taiki-e/pin-project-lite/issues/77#issuecomment-1671540180) for details. - -## [0.2.11] - 2023-08-06 - -- Add support for `#[project(!Unpin)]`. This is equivalent to pin-project's [!Unpin](https://docs.rs/pin-project/latest/pin_project/attr.pin_project.html#unpin) option. ([#76](https://github.com/taiki-e/pin-project-lite/pull/76), thanks @matheus-consoli) - -## [0.2.10] - 2023-07-02 - -- Inline project methods. ([#74](https://github.com/taiki-e/pin-project-lite/pull/74), thanks @EFanZh) - -## [0.2.9] - 2022-04-26 - -- Improve compile time of `pin_project!` calls. ([#71](https://github.com/taiki-e/pin-project-lite/pull/71), thanks @nnethercote) - -## [0.2.8] - 2021-12-31 - -- Fix handling of trailing commas in `PinnedDrop` impl. ([#64](https://github.com/taiki-e/pin-project-lite/pull/64), thanks @Michael-J-Ward) - -## [0.2.7] - 2021-06-26 - -- [Support custom Drop implementation.](https://github.com/taiki-e/pin-project-lite/pull/25) See [#25](https://github.com/taiki-e/pin-project-lite/pull/25) for details. - -## [0.2.6] - 2021-03-04 - -- Support item attributes in any order. ([#57](https://github.com/taiki-e/pin-project-lite/pull/57), thanks @SabrinaJewson) - -## [0.2.5] - 2021-03-02 - -- [Prepare for removal of `safe_packed_borrows` lint.](https://github.com/taiki-e/pin-project-lite/pull/55) See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -## [0.2.4] - 2021-01-11 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- Add `project_replace`. ([#43](https://github.com/taiki-e/pin-project-lite/pull/43), thanks @Marwes) - -## [0.2.3] - 2021-01-09 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Suppress `clippy::unknown_clippy_lints` lint in generated code.](https://github.com/taiki-e/pin-project-lite/pull/47) - -## [0.2.2] - 2021-01-09 - -**Note:** This release has been yanked.** See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Suppress `clippy::ref_option_ref` lint in generated code.](https://github.com/taiki-e/pin-project-lite/pull/45) - -## [0.2.1] - 2021-01-05 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- Exclude unneeded files from crates.io. - -## [0.2.0] - 2020-11-13 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [`pin_project!` macro now supports enums.](https://github.com/taiki-e/pin-project-lite/pull/28) - - To use `pin_project!` on enums, you need to name the projection type returned from the method. - - ```rust - use pin_project_lite::pin_project; - use std::pin::Pin; - - pin_project! { - #[project = EnumProj] - enum Enum { - Variant { #[pin] pinned: T, unpinned: U }, - } - } - - impl Enum { - fn method(self: Pin<&mut Self>) { - match self.project() { - EnumProj::Variant { pinned, unpinned } => { - let _: Pin<&mut T> = pinned; - let _: &mut U = unpinned; - } - } - } - } - ``` - -- [Support naming the projection types.](https://github.com/taiki-e/pin-project-lite/pull/28) - - By passing an attribute with the same name as the method, you can name the projection type returned from the method: - - ```rust - use pin_project_lite::pin_project; - use std::pin::Pin; - - pin_project! { - #[project = StructProj] - struct Struct { - #[pin] - field: T, - } - } - - fn func(x: Pin<&mut Struct>) { - let StructProj { field } = x.project(); - let _: Pin<&mut T> = field; - } - ``` - -## [0.1.12] - 2021-03-02 - -- [Prepare for removal of `safe_packed_borrows` lint.](https://github.com/taiki-e/pin-project-lite/pull/55) See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -## [0.1.11] - 2020-10-20 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- Suppress `clippy::redundant_pub_crate` lint in generated code. - -- Documentation improvements. - -## [0.1.10] - 2020-10-01 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- Suppress `drop_bounds` lint, which will be added to rustc in the future. See [taiki-e/pin-project#272](https://github.com/taiki-e/pin-project/issues/272) for more details. - -## [0.1.9] - 2020-09-29 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Fix trailing comma support in generics.](https://github.com/taiki-e/pin-project-lite/pull/32) - -## [0.1.8] - 2020-09-26 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Fix compatibility of generated code with `forbid(future_incompatible)`.](https://github.com/taiki-e/pin-project-lite/pull/30) - - Note: This does not guarantee compatibility with `forbid(future_incompatible)` in the future. - If rustc adds a new lint, we may not be able to keep this. - -## [0.1.7] - 2020-06-04 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Support `?Sized` bounds in where clauses.](https://github.com/taiki-e/pin-project-lite/pull/22) - -- [Fix lifetime inference error when an associated type is used in fields.](https://github.com/taiki-e/pin-project-lite/pull/20) - -- Suppress `clippy::used_underscore_binding` lint in generated code. - -- Documentation improvements. - -## [0.1.6] - 2020-05-31 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Support lifetime bounds in where clauses.](https://github.com/taiki-e/pin-project-lite/pull/18) - -- Documentation improvements. - -## [0.1.5] - 2020-05-07 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Support overwriting the name of `core` crate.](https://github.com/taiki-e/pin-project-lite/pull/14) - -## [0.1.4] - 2020-01-20 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Support ?Sized bounds in generic parameters.](https://github.com/taiki-e/pin-project-lite/pull/9) - -## [0.1.3] - 2020-01-20 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [Support lifetime bounds in generic parameters.](https://github.com/taiki-e/pin-project-lite/pull/7) - -## [0.1.2] - 2020-01-05 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- Support recognizing default generic parameters. ([#6](https://github.com/taiki-e/pin-project-lite/pull/6), thanks @kennytm) - -## [0.1.1] - 2019-11-15 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -- [`pin_project!` macro now determines the visibility of the projection type/method is based on the original type.](https://github.com/taiki-e/pin-project-lite/pull/5) - -## [0.1.0] - 2019-10-22 - -**Note:** This release has been yanked. See [#55](https://github.com/taiki-e/pin-project-lite/pull/55) for details. - -Initial release - -[Unreleased]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.13...HEAD -[0.2.13]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.12...v0.2.13 -[0.2.12]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.11...v0.2.12 -[0.2.11]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.10...v0.2.11 -[0.2.10]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.9...v0.2.10 -[0.2.9]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.8...v0.2.9 -[0.2.8]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.7...v0.2.8 -[0.2.7]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.6...v0.2.7 -[0.2.6]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.5...v0.2.6 -[0.2.5]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.4...v0.2.5 -[0.2.4]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.3...v0.2.4 -[0.2.3]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.2...v0.2.3 -[0.2.2]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.1...v0.2.2 -[0.2.1]: https://github.com/taiki-e/pin-project-lite/compare/v0.2.0...v0.2.1 -[0.2.0]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.11...v0.2.0 -[0.1.12]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.11...v0.1.12 -[0.1.11]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.10...v0.1.11 -[0.1.10]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.9...v0.1.10 -[0.1.9]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.8...v0.1.9 -[0.1.8]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.7...v0.1.8 -[0.1.7]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.6...v0.1.7 -[0.1.6]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.5...v0.1.6 -[0.1.5]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.4...v0.1.5 -[0.1.4]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.3...v0.1.4 -[0.1.3]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.2...v0.1.3 -[0.1.2]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.1...v0.1.2 -[0.1.1]: https://github.com/taiki-e/pin-project-lite/compare/v0.1.0...v0.1.1 -[0.1.0]: https://github.com/taiki-e/pin-project-lite/releases/tag/v0.1.0 diff -Nru s390-tools-2.31.0/rust-vendor/pin-project-lite/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/pin-project-lite/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/pin-project-lite/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-project-lite/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,177 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff -Nru s390-tools-2.31.0/rust-vendor/pin-project-lite/LICENSE-MIT s390-tools-2.33.1/rust-vendor/pin-project-lite/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/pin-project-lite/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-project-lite/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/pin-project-lite/README.md s390-tools-2.33.1/rust-vendor/pin-project-lite/README.md --- s390-tools-2.31.0/rust-vendor/pin-project-lite/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-project-lite/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,130 +0,0 @@ -# pin-project-lite - -[![crates.io](https://img.shields.io/crates/v/pin-project-lite?style=flat-square&logo=rust)](https://crates.io/crates/pin-project-lite) -[![docs.rs](https://img.shields.io/badge/docs.rs-pin--project--lite-blue?style=flat-square&logo=docs.rs)](https://docs.rs/pin-project-lite) -[![license](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue?style=flat-square)](#license) -[![rustc](https://img.shields.io/badge/rustc-1.37+-blue?style=flat-square&logo=rust)](https://www.rust-lang.org) -[![build status](https://img.shields.io/github/actions/workflow/status/taiki-e/pin-project-lite/ci.yml?branch=main&style=flat-square&logo=github)](https://github.com/taiki-e/pin-project-lite/actions) - - -A lightweight version of [pin-project] written with declarative macros. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -pin-project-lite = "0.2" -``` - -*Compiler support: requires rustc 1.37+* - -## Examples - -[`pin_project!`] macro creates a projection type covering all the fields of -struct. - -```rust -use std::pin::Pin; - -use pin_project_lite::pin_project; - -pin_project! { - struct Struct { - #[pin] - pinned: T, - unpinned: U, - } -} - -impl Struct { - fn method(self: Pin<&mut Self>) { - let this = self.project(); - let _: Pin<&mut T> = this.pinned; // Pinned reference to the field - let _: &mut U = this.unpinned; // Normal reference to the field - } -} -``` - -To use [`pin_project!`] on enums, you need to name the projection type -returned from the method. - -```rust -use std::pin::Pin; - -use pin_project_lite::pin_project; - -pin_project! { - #[project = EnumProj] - enum Enum { - Variant { #[pin] pinned: T, unpinned: U }, - } -} - -impl Enum { - fn method(self: Pin<&mut Self>) { - match self.project() { - EnumProj::Variant { pinned, unpinned } => { - let _: Pin<&mut T> = pinned; - let _: &mut U = unpinned; - } - } - } -} -``` - -## [pin-project] vs pin-project-lite - -Here are some similarities and differences compared to [pin-project]. - -### Similar: Safety - -pin-project-lite guarantees safety in much the same way as [pin-project]. -Both are completely safe unless you write other unsafe code. - -### Different: Minimal design - -This library does not tackle as expansive of a range of use cases as -[pin-project] does. If your use case is not already covered, please use -[pin-project]. - -### Different: No proc-macro related dependencies - -This is the **only** reason to use this crate. However, **if you already -have proc-macro related dependencies in your crate's dependency graph, there -is no benefit from using this crate.** (Note: There is almost no difference -in the amount of code generated between [pin-project] and pin-project-lite.) - -### Different: No useful error messages - -This macro does not handle any invalid input. So error messages are not to -be useful in most cases. If you do need useful error messages, then upon -error you can pass the same input to [pin-project] to receive a helpful -description of the compile error. - -### Different: No support for custom Unpin implementation - -pin-project supports this by [`UnsafeUnpin`][unsafe-unpin]. (`!Unpin` is supported by both [pin-project][not-unpin] and [pin-project-lite][not-unpin-lite].) - -### Different: No support for tuple structs and tuple variants - -pin-project supports this. - -[not-unpin]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#unpin -[not-unpin-lite]: https://docs.rs/pin-project-lite/0.2/pin_project_lite/macro.pin_project.html#unpin -[pin-project]: https://github.com/taiki-e/pin-project -[unsafe-unpin]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#unsafeunpin - - - -[`pin_project!`]: https://docs.rs/pin-project-lite/0.2/pin_project_lite/macro.pin_project.html - -## License - -Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or -[MIT license](LICENSE-MIT) at your option. - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/pin-project-lite/src/lib.rs s390-tools-2.33.1/rust-vendor/pin-project-lite/src/lib.rs --- s390-tools-2.31.0/rust-vendor/pin-project-lite/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-project-lite/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1682 +0,0 @@ -/*! - -A lightweight version of [pin-project] written with declarative macros. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -pin-project-lite = "0.2" -``` - -*Compiler support: requires rustc 1.37+* - -## Examples - -[`pin_project!`] macro creates a projection type covering all the fields of -struct. - -```rust -use std::pin::Pin; - -use pin_project_lite::pin_project; - -pin_project! { - struct Struct { - #[pin] - pinned: T, - unpinned: U, - } -} - -impl Struct { - fn method(self: Pin<&mut Self>) { - let this = self.project(); - let _: Pin<&mut T> = this.pinned; // Pinned reference to the field - let _: &mut U = this.unpinned; // Normal reference to the field - } -} -``` - -To use [`pin_project!`] on enums, you need to name the projection type -returned from the method. - -```rust -use std::pin::Pin; - -use pin_project_lite::pin_project; - -pin_project! { - #[project = EnumProj] - enum Enum { - Variant { #[pin] pinned: T, unpinned: U }, - } -} - -impl Enum { - fn method(self: Pin<&mut Self>) { - match self.project() { - EnumProj::Variant { pinned, unpinned } => { - let _: Pin<&mut T> = pinned; - let _: &mut U = unpinned; - } - } - } -} -``` - -## [pin-project] vs pin-project-lite - -Here are some similarities and differences compared to [pin-project]. - -### Similar: Safety - -pin-project-lite guarantees safety in much the same way as [pin-project]. -Both are completely safe unless you write other unsafe code. - -### Different: Minimal design - -This library does not tackle as expansive of a range of use cases as -[pin-project] does. If your use case is not already covered, please use -[pin-project]. - -### Different: No proc-macro related dependencies - -This is the **only** reason to use this crate. However, **if you already -have proc-macro related dependencies in your crate's dependency graph, there -is no benefit from using this crate.** (Note: There is almost no difference -in the amount of code generated between [pin-project] and pin-project-lite.) - -### Different: No useful error messages - -This macro does not handle any invalid input. So error messages are not to -be useful in most cases. If you do need useful error messages, then upon -error you can pass the same input to [pin-project] to receive a helpful -description of the compile error. - -### Different: No support for custom Unpin implementation - -pin-project supports this by [`UnsafeUnpin`][unsafe-unpin]. (`!Unpin` is supported by both [pin-project][not-unpin] and [pin-project-lite][not-unpin-lite].) - -### Different: No support for tuple structs and tuple variants - -pin-project supports this. - -[not-unpin]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#unpin -[not-unpin-lite]: https://docs.rs/pin-project-lite/0.2/pin_project_lite/macro.pin_project.html#unpin -[pin-project]: https://github.com/taiki-e/pin-project -[unsafe-unpin]: https://docs.rs/pin-project/1/pin_project/attr.pin_project.html#unsafeunpin - - -*/ - -#![no_std] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_variables) - ) -))] -#![warn(rust_2018_idioms, single_use_lifetimes, unreachable_pub)] -#![warn( - clippy::pedantic, - // lints for public library - clippy::alloc_instead_of_core, - clippy::exhaustive_enums, - clippy::exhaustive_structs, - clippy::std_instead_of_alloc, - clippy::std_instead_of_core, - // lints that help writing unsafe code - clippy::as_ptr_cast_mut, - clippy::default_union_representation, - clippy::trailing_empty_array, - clippy::transmute_undefined_repr, - clippy::undocumented_unsafe_blocks, -)] - -/// A macro that creates a projection type covering all the fields of struct. -/// -/// This macro creates a projection type according to the following rules: -/// -/// - For the field that uses `#[pin]` attribute, makes the pinned reference to the field. -/// - For the other fields, makes the unpinned reference to the field. -/// -/// And the following methods are implemented on the original type: -/// -/// ```rust -/// # use std::pin::Pin; -/// # type Projection<'a> = &'a (); -/// # type ProjectionRef<'a> = &'a (); -/// # trait Dox { -/// fn project(self: Pin<&mut Self>) -> Projection<'_>; -/// fn project_ref(self: Pin<&Self>) -> ProjectionRef<'_>; -/// # } -/// ``` -/// -/// By passing an attribute with the same name as the method to the macro, -/// you can name the projection type returned from the method. This allows you -/// to use pattern matching on the projected types. -/// -/// ```rust -/// # use pin_project_lite::pin_project; -/// # use std::pin::Pin; -/// pin_project! { -/// #[project = EnumProj] -/// enum Enum { -/// Variant { #[pin] field: T }, -/// } -/// } -/// -/// impl Enum { -/// fn method(self: Pin<&mut Self>) { -/// let this: EnumProj<'_, T> = self.project(); -/// match this { -/// EnumProj::Variant { field } => { -/// let _: Pin<&mut T> = field; -/// } -/// } -/// } -/// } -/// ``` -/// -/// By passing the `#[project_replace = MyProjReplace]` attribute you may create an additional -/// method which allows the contents of `Pin<&mut Self>` to be replaced while simultaneously moving -/// out all unpinned fields in `Self`. -/// -/// ```rust -/// # use std::pin::Pin; -/// # type MyProjReplace = (); -/// # trait Dox { -/// fn project_replace(self: Pin<&mut Self>, replacement: Self) -> MyProjReplace; -/// # } -/// ``` -/// -/// Also, note that the projection types returned by `project` and `project_ref` have -/// an additional lifetime at the beginning of generics. -/// -/// ```text -/// let this: EnumProj<'_, T> = self.project(); -/// ^^ -/// ``` -/// -/// The visibility of the projected types and projection methods is based on the -/// original type. However, if the visibility of the original type is `pub`, the -/// visibility of the projected types and the projection methods is downgraded -/// to `pub(crate)`. -/// -/// # Safety -/// -/// `pin_project!` macro guarantees safety in much the same way as [pin-project] crate. -/// Both are completely safe unless you write other unsafe code. -/// -/// See [pin-project] crate for more details. -/// -/// # Examples -/// -/// ```rust -/// use std::pin::Pin; -/// -/// use pin_project_lite::pin_project; -/// -/// pin_project! { -/// struct Struct { -/// #[pin] -/// pinned: T, -/// unpinned: U, -/// } -/// } -/// -/// impl Struct { -/// fn method(self: Pin<&mut Self>) { -/// let this = self.project(); -/// let _: Pin<&mut T> = this.pinned; // Pinned reference to the field -/// let _: &mut U = this.unpinned; // Normal reference to the field -/// } -/// } -/// ``` -/// -/// To use `pin_project!` on enums, you need to name the projection type -/// returned from the method. -/// -/// ```rust -/// use std::pin::Pin; -/// -/// use pin_project_lite::pin_project; -/// -/// pin_project! { -/// #[project = EnumProj] -/// enum Enum { -/// Struct { -/// #[pin] -/// field: T, -/// }, -/// Unit, -/// } -/// } -/// -/// impl Enum { -/// fn method(self: Pin<&mut Self>) { -/// match self.project() { -/// EnumProj::Struct { field } => { -/// let _: Pin<&mut T> = field; -/// } -/// EnumProj::Unit => {} -/// } -/// } -/// } -/// ``` -/// -/// If you want to call the `project()` method multiple times or later use the -/// original [`Pin`] type, it needs to use [`.as_mut()`][`Pin::as_mut`] to avoid -/// consuming the [`Pin`]. -/// -/// ```rust -/// use std::pin::Pin; -/// -/// use pin_project_lite::pin_project; -/// -/// pin_project! { -/// struct Struct { -/// #[pin] -/// field: T, -/// } -/// } -/// -/// impl Struct { -/// fn call_project_twice(mut self: Pin<&mut Self>) { -/// // `project` consumes `self`, so reborrow the `Pin<&mut Self>` via `as_mut`. -/// self.as_mut().project(); -/// self.as_mut().project(); -/// } -/// } -/// ``` -/// -/// # `!Unpin` -/// -/// If you want to make sure `Unpin` is not implemented, use the `#[project(!Unpin)]` -/// attribute. -/// -/// ``` -/// use pin_project_lite::pin_project; -/// -/// pin_project! { -/// #[project(!Unpin)] -/// struct Struct { -/// #[pin] -/// field: T, -/// } -/// } -/// ``` -/// -/// This is equivalent to using `#[pin]` attribute for a [`PhantomPinned`] field. -/// -/// ```rust -/// use std::marker::PhantomPinned; -/// -/// use pin_project_lite::pin_project; -/// -/// pin_project! { -/// struct Struct { -/// field: T, -/// #[pin] -/// _pin: PhantomPinned, -/// } -/// } -/// ``` -/// -/// Note that using [`PhantomPinned`] without `#[pin]` or `#[project(!Unpin)]` -/// attribute has no effect. -/// -/// [`PhantomPinned`]: core::marker::PhantomPinned -/// [`Pin::as_mut`]: core::pin::Pin::as_mut -/// [`Pin`]: core::pin::Pin -/// [pin-project]: https://github.com/taiki-e/pin-project -#[macro_export] -macro_rules! pin_project { - ($($tt:tt)*) => { - $crate::__pin_project_internal! { - [][][][][] - $($tt)* - } - }; -} - -// limitations: -// - no support for tuple structs and tuple variant (wontfix). -// - no support for multiple trait/lifetime bounds. -// - no support for `Self` in where clauses. (wontfix) -// - no support for overlapping lifetime names. (wontfix) -// - no interoperability with other field attributes. -// - no useful error messages. (wontfix) -// etc... - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_expand { - ( - [$($proj_mut_ident:ident)?] - [$($proj_ref_ident:ident)?] - [$($proj_replace_ident:ident)?] - [$($proj_not_unpin_mark:ident)?] - [$proj_vis:vis] - [$(#[$attrs:meta])* $vis:vis $struct_ty_ident:ident $ident:ident] - [$($def_generics:tt)*] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - { - $($body_data:tt)* - } - $($(#[$drop_impl_attrs:meta])* impl $($pinned_drop:tt)*)? - ) => { - $crate::__pin_project_reconstruct! { - [$(#[$attrs])* $vis $struct_ty_ident $ident] - [$($def_generics)*] [$($impl_generics)*] - [$($ty_generics)*] [$(where $($where_clause)*)?] - { - $($body_data)* - } - } - - $crate::__pin_project_make_proj_ty! { - [$($proj_mut_ident)?] - [$proj_vis $struct_ty_ident $ident] - [__pin_project_make_proj_field_mut] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - { - $($body_data)* - } - } - $crate::__pin_project_make_proj_ty! { - [$($proj_ref_ident)?] - [$proj_vis $struct_ty_ident $ident] - [__pin_project_make_proj_field_ref] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - { - $($body_data)* - } - } - $crate::__pin_project_make_proj_replace_ty! { - [$($proj_replace_ident)?] - [$proj_vis $struct_ty_ident] - [__pin_project_make_proj_field_replace] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - { - $($body_data)* - } - } - - $crate::__pin_project_constant! { - [$(#[$attrs])* $vis $struct_ty_ident $ident] - [$($proj_mut_ident)?] [$($proj_ref_ident)?] [$($proj_replace_ident)?] - [$($proj_not_unpin_mark)?] - [$proj_vis] - [$($def_generics)*] [$($impl_generics)*] - [$($ty_generics)*] [$(where $($where_clause)*)?] - { - $($body_data)* - } - $($(#[$drop_impl_attrs])* impl $($pinned_drop)*)? - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_constant { - ( - [$(#[$attrs:meta])* $vis:vis struct $ident:ident] - [$($proj_mut_ident:ident)?] [$($proj_ref_ident:ident)?] [$($proj_replace_ident:ident)?] - [$($proj_not_unpin_mark:ident)?] - [$proj_vis:vis] - [$($def_generics:tt)*] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - { - $( - $(#[$pin:ident])? - $field_vis:vis $field:ident: $field_ty:ty - ),+ $(,)? - } - $($(#[$drop_impl_attrs:meta])* impl $($pinned_drop:tt)*)? - ) => { - #[allow(explicit_outlives_requirements)] // https://github.com/rust-lang/rust/issues/60993 - #[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 - // This lint warns of `clippy::*` generated by external macros. - // We allow this lint for compatibility with older compilers. - #[allow(clippy::unknown_clippy_lints)] - #[allow(clippy::redundant_pub_crate)] // This lint warns `pub(crate)` field in private struct. - #[allow(clippy::used_underscore_binding)] - const _: () = { - $crate::__pin_project_make_proj_ty! { - [$($proj_mut_ident)? Projection] - [$proj_vis struct $ident] - [__pin_project_make_proj_field_mut] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - { - $( - $(#[$pin])? - $field_vis $field: $field_ty - ),+ - } - } - $crate::__pin_project_make_proj_ty! { - [$($proj_ref_ident)? ProjectionRef] - [$proj_vis struct $ident] - [__pin_project_make_proj_field_ref] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - { - $( - $(#[$pin])? - $field_vis $field: $field_ty - ),+ - } - } - - impl <$($impl_generics)*> $ident <$($ty_generics)*> - $(where - $($where_clause)*)? - { - $crate::__pin_project_struct_make_proj_method! { - [$($proj_mut_ident)? Projection] - [$proj_vis] - [project get_unchecked_mut mut] - [$($ty_generics)*] - { - $( - $(#[$pin])? - $field_vis $field - ),+ - } - } - $crate::__pin_project_struct_make_proj_method! { - [$($proj_ref_ident)? ProjectionRef] - [$proj_vis] - [project_ref get_ref] - [$($ty_generics)*] - { - $( - $(#[$pin])? - $field_vis $field - ),+ - } - } - $crate::__pin_project_struct_make_proj_replace_method! { - [$($proj_replace_ident)?] - [$proj_vis] - [ProjectionReplace] - [$($ty_generics)*] - { - $( - $(#[$pin])? - $field_vis $field - ),+ - } - } - } - - $crate::__pin_project_make_unpin_impl! { - [$($proj_not_unpin_mark)?] - [$vis $ident] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - $( - $field: $crate::__pin_project_make_unpin_bound!( - $(#[$pin])? $field_ty - ) - ),+ - } - - $crate::__pin_project_make_drop_impl! { - [$ident] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - $($(#[$drop_impl_attrs])* impl $($pinned_drop)*)? - } - - // Ensure that it's impossible to use pin projections on a #[repr(packed)] struct. - // - // Taking a reference to a packed field is UB, and applying - // `#[forbid(unaligned_references)]` makes sure that doing this is a hard error. - // - // If the struct ends up having #[repr(packed)] applied somehow, - // this will generate an (unfriendly) error message. Under all reasonable - // circumstances, we'll detect the #[repr(packed)] attribute, and generate - // a much nicer error above. - // - // See https://github.com/taiki-e/pin-project/pull/34 for more details. - // - // Note: - // - Lint-based tricks aren't perfect, but they're much better than nothing: - // https://github.com/taiki-e/pin-project-lite/issues/26 - // - // - Enable both unaligned_references and safe_packed_borrows lints - // because unaligned_references lint does not exist in older compilers: - // https://github.com/taiki-e/pin-project-lite/pull/55 - // https://github.com/rust-lang/rust/pull/82525 - #[forbid(unaligned_references, safe_packed_borrows)] - fn __assert_not_repr_packed <$($impl_generics)*> (this: &$ident <$($ty_generics)*>) - $(where - $($where_clause)*)? - { - $( - let _ = &this.$field; - )+ - } - }; - }; - ( - [$(#[$attrs:meta])* $vis:vis enum $ident:ident] - [$($proj_mut_ident:ident)?] [$($proj_ref_ident:ident)?] [$($proj_replace_ident:ident)?] - [$($proj_not_unpin_mark:ident)?] - [$proj_vis:vis] - [$($def_generics:tt)*] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - { - $( - $(#[$variant_attrs:meta])* - $variant:ident $({ - $( - $(#[$pin:ident])? - $field:ident: $field_ty:ty - ),+ $(,)? - })? - ),+ $(,)? - } - $($(#[$drop_impl_attrs:meta])* impl $($pinned_drop:tt)*)? - ) => { - #[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 - // This lint warns of `clippy::*` generated by external macros. - // We allow this lint for compatibility with older compilers. - #[allow(clippy::unknown_clippy_lints)] - #[allow(clippy::used_underscore_binding)] - const _: () = { - impl <$($impl_generics)*> $ident <$($ty_generics)*> - $(where - $($where_clause)*)? - { - $crate::__pin_project_enum_make_proj_method! { - [$($proj_mut_ident)?] - [$proj_vis] - [project get_unchecked_mut mut] - [$($ty_generics)*] - { - $( - $variant $({ - $( - $(#[$pin])? - $field - ),+ - })? - ),+ - } - } - $crate::__pin_project_enum_make_proj_method! { - [$($proj_ref_ident)?] - [$proj_vis] - [project_ref get_ref] - [$($ty_generics)*] - { - $( - $variant $({ - $( - $(#[$pin])? - $field - ),+ - })? - ),+ - } - } - $crate::__pin_project_enum_make_proj_replace_method! { - [$($proj_replace_ident)?] - [$proj_vis] - [$($ty_generics)*] - { - $( - $variant $({ - $( - $(#[$pin])? - $field - ),+ - })? - ),+ - } - } - } - - $crate::__pin_project_make_unpin_impl! { - [$($proj_not_unpin_mark)?] - [$vis $ident] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - $( - $variant: ($( - $( - $crate::__pin_project_make_unpin_bound!( - $(#[$pin])? $field_ty - ) - ),+ - )?) - ),+ - } - - $crate::__pin_project_make_drop_impl! { - [$ident] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - $($(#[$drop_impl_attrs])* impl $($pinned_drop)*)? - } - - // We don't need to check for '#[repr(packed)]', - // since it does not apply to enums. - }; - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_reconstruct { - ( - [$(#[$attrs:meta])* $vis:vis struct $ident:ident] - [$($def_generics:tt)*] [$($impl_generics:tt)*] - [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - { - $( - $(#[$pin:ident])? - $field_vis:vis $field:ident: $field_ty:ty - ),+ $(,)? - } - ) => { - $(#[$attrs])* - $vis struct $ident $($def_generics)* - $(where - $($where_clause)*)? - { - $( - $field_vis $field: $field_ty - ),+ - } - }; - ( - [$(#[$attrs:meta])* $vis:vis enum $ident:ident] - [$($def_generics:tt)*] [$($impl_generics:tt)*] - [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - { - $( - $(#[$variant_attrs:meta])* - $variant:ident $({ - $( - $(#[$pin:ident])? - $field:ident: $field_ty:ty - ),+ $(,)? - })? - ),+ $(,)? - } - ) => { - $(#[$attrs])* - $vis enum $ident $($def_generics)* - $(where - $($where_clause)*)? - { - $( - $(#[$variant_attrs])* - $variant $({ - $( - $field: $field_ty - ),+ - })? - ),+ - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_proj_ty { - ([] $($field:tt)*) => {}; - ( - [$proj_ty_ident:ident $default_ident:ident] - [$proj_vis:vis struct $ident:ident] - $($field:tt)* - ) => {}; - ( - [$proj_ty_ident:ident] - [$proj_vis:vis struct $ident:ident] - [$__pin_project_make_proj_field:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - { - $( - $(#[$pin:ident])? - $field_vis:vis $field:ident: $field_ty:ty - ),+ $(,)? - } - ) => { - $crate::__pin_project_make_proj_ty_body! { - [$proj_ty_ident] - [$proj_vis struct $ident] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - [ - $( - $field_vis $field: $crate::$__pin_project_make_proj_field!( - $(#[$pin])? $field_ty - ) - ),+ - ] - } - }; - ( - [$proj_ty_ident:ident] - [$proj_vis:vis enum $ident:ident] - [$__pin_project_make_proj_field:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - { - $( - $(#[$variant_attrs:meta])* - $variant:ident $({ - $( - $(#[$pin:ident])? - $field:ident: $field_ty:ty - ),+ $(,)? - })? - ),+ $(,)? - } - ) => { - $crate::__pin_project_make_proj_ty_body! { - [$proj_ty_ident] - [$proj_vis enum $ident] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - [ - $( - $variant $({ - $( - $field: $crate::$__pin_project_make_proj_field!( - $(#[$pin])? $field_ty - ) - ),+ - })? - ),+ - ] - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_proj_ty_body { - ( - [$proj_ty_ident:ident] - [$proj_vis:vis $struct_ty_ident:ident $ident:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - [$($body_data:tt)+] - ) => { - #[doc(hidden)] // Workaround for rustc bug: see https://github.com/taiki-e/pin-project-lite/issues/77#issuecomment-1671540180 for more. - #[allow(dead_code)] // This lint warns unused fields/variants. - #[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 - // This lint warns of `clippy::*` generated by external macros. - // We allow this lint for compatibility with older compilers. - #[allow(clippy::unknown_clippy_lints)] - #[allow(clippy::mut_mut)] // This lint warns `&mut &mut `. (only needed for project) - #[allow(clippy::redundant_pub_crate)] // This lint warns `pub(crate)` field in private struct. - #[allow(clippy::ref_option_ref)] // This lint warns `&Option<&>`. (only needed for project_ref) - #[allow(clippy::type_repetition_in_bounds)] // https://github.com/rust-lang/rust-clippy/issues/4326 - $proj_vis $struct_ty_ident $proj_ty_ident <'__pin, $($impl_generics)*> - where - $ident <$($ty_generics)*>: '__pin - $(, $($where_clause)*)? - { - $($body_data)+ - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_proj_replace_ty { - ([] $($field:tt)*) => {}; - ( - [$proj_ty_ident:ident] - [$proj_vis:vis struct] - [$__pin_project_make_proj_field:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - { - $( - $(#[$pin:ident])? - $field_vis:vis $field:ident: $field_ty:ty - ),+ $(,)? - } - ) => { - $crate::__pin_project_make_proj_replace_ty_body! { - [$proj_ty_ident] - [$proj_vis struct] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - [ - $( - $field_vis $field: $crate::$__pin_project_make_proj_field!( - $(#[$pin])? $field_ty - ) - ),+ - ] - } - }; - ( - [$proj_ty_ident:ident] - [$proj_vis:vis enum] - [$__pin_project_make_proj_field:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - { - $( - $(#[$variant_attrs:meta])* - $variant:ident $({ - $( - $(#[$pin:ident])? - $field:ident: $field_ty:ty - ),+ $(,)? - })? - ),+ $(,)? - } - ) => { - $crate::__pin_project_make_proj_replace_ty_body! { - [$proj_ty_ident] - [$proj_vis enum] - [$($impl_generics)*] [$($ty_generics)*] [$(where $($where_clause)*)?] - [ - $( - $variant $({ - $( - $field: $crate::$__pin_project_make_proj_field!( - $(#[$pin])? $field_ty - ) - ),+ - })? - ),+ - ] - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_proj_replace_ty_body { - ( - [$proj_ty_ident:ident] - [$proj_vis:vis $struct_ty_ident:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - [$($body_data:tt)+] - ) => { - #[doc(hidden)] // Workaround for rustc bug: see https://github.com/taiki-e/pin-project-lite/issues/77#issuecomment-1671540180 for more. - #[allow(dead_code)] // This lint warns unused fields/variants. - #[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058 - #[allow(clippy::mut_mut)] // This lint warns `&mut &mut `. (only needed for project) - #[allow(clippy::redundant_pub_crate)] // This lint warns `pub(crate)` field in private struct. - #[allow(clippy::type_repetition_in_bounds)] // https://github.com/rust-lang/rust-clippy/issues/4326 - $proj_vis $struct_ty_ident $proj_ty_ident <$($impl_generics)*> - where - $($($where_clause)*)? - { - $($body_data)+ - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_proj_replace_block { - ( - [$($proj_path:tt)+] - { - $( - $(#[$pin:ident])? - $field_vis:vis $field:ident - ),+ - } - ) => { - let result = $($proj_path)* { - $( - $field: $crate::__pin_project_make_replace_field_proj!( - $(#[$pin])? $field - ) - ),+ - }; - - { - ( $( - $crate::__pin_project_make_unsafe_drop_in_place_guard!( - $(#[$pin])? $field - ), - )* ); - } - - result - }; - ([$($proj_path:tt)+]) => { $($proj_path)* }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_struct_make_proj_method { - ([] $($variant:tt)*) => {}; - ( - [$proj_ty_ident:ident $_ignored_default_arg:ident] - [$proj_vis:vis] - [$method_ident:ident $get_method:ident $($mut:ident)?] - [$($ty_generics:tt)*] - $($variant:tt)* - ) => { - $crate::__pin_project_struct_make_proj_method! { - [$proj_ty_ident] - [$proj_vis] - [$method_ident $get_method $($mut)?] - [$($ty_generics)*] - $($variant)* - } - }; - ( - [$proj_ty_ident:ident] - [$proj_vis:vis] - [$method_ident:ident $get_method:ident $($mut:ident)?] - [$($ty_generics:tt)*] - { - $( - $(#[$pin:ident])? - $field_vis:vis $field:ident - ),+ - } - ) => { - #[doc(hidden)] // Workaround for rustc bug: see https://github.com/taiki-e/pin-project-lite/issues/77#issuecomment-1671540180 for more. - #[inline] - $proj_vis fn $method_ident<'__pin>( - self: $crate::__private::Pin<&'__pin $($mut)? Self>, - ) -> $proj_ty_ident <'__pin, $($ty_generics)*> { - unsafe { - let Self { $($field),* } = self.$get_method(); - $proj_ty_ident { - $( - $field: $crate::__pin_project_make_unsafe_field_proj!( - $(#[$pin])? $field - ) - ),+ - } - } - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_struct_make_proj_replace_method { - ([] $($field:tt)*) => {}; - ( - [$proj_ty_ident:ident] - [$proj_vis:vis] - [$_proj_ty_ident:ident] - [$($ty_generics:tt)*] - { - $( - $(#[$pin:ident])? - $field_vis:vis $field:ident - ),+ - } - ) => { - #[doc(hidden)] // Workaround for rustc bug: see https://github.com/taiki-e/pin-project-lite/issues/77#issuecomment-1671540180 for more. - #[inline] - $proj_vis fn project_replace( - self: $crate::__private::Pin<&mut Self>, - replacement: Self, - ) -> $proj_ty_ident <$($ty_generics)*> { - unsafe { - let __self_ptr: *mut Self = self.get_unchecked_mut(); - - // Destructors will run in reverse order, so next create a guard to overwrite - // `self` with the replacement value without calling destructors. - let __guard = $crate::__private::UnsafeOverwriteGuard::new(__self_ptr, replacement); - - let Self { $($field),* } = &mut *__self_ptr; - - $crate::__pin_project_make_proj_replace_block! { - [$proj_ty_ident] - { - $( - $(#[$pin])? - $field - ),+ - } - } - } - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_enum_make_proj_method { - ([] $($variant:tt)*) => {}; - ( - [$proj_ty_ident:ident] - [$proj_vis:vis] - [$method_ident:ident $get_method:ident $($mut:ident)?] - [$($ty_generics:tt)*] - { - $( - $variant:ident $({ - $( - $(#[$pin:ident])? - $field:ident - ),+ - })? - ),+ - } - ) => { - #[doc(hidden)] // Workaround for rustc bug: see https://github.com/taiki-e/pin-project-lite/issues/77#issuecomment-1671540180 for more. - #[inline] - $proj_vis fn $method_ident<'__pin>( - self: $crate::__private::Pin<&'__pin $($mut)? Self>, - ) -> $proj_ty_ident <'__pin, $($ty_generics)*> { - unsafe { - match self.$get_method() { - $( - Self::$variant $({ - $($field),+ - })? => { - $proj_ty_ident::$variant $({ - $( - $field: $crate::__pin_project_make_unsafe_field_proj!( - $(#[$pin])? $field - ) - ),+ - })? - } - ),+ - } - } - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_enum_make_proj_replace_method { - ([] $($field:tt)*) => {}; - ( - [$proj_ty_ident:ident] - [$proj_vis:vis] - [$($ty_generics:tt)*] - { - $( - $variant:ident $({ - $( - $(#[$pin:ident])? - $field:ident - ),+ - })? - ),+ - } - ) => { - #[doc(hidden)] // Workaround for rustc bug: see https://github.com/taiki-e/pin-project-lite/issues/77#issuecomment-1671540180 for more. - #[inline] - $proj_vis fn project_replace( - self: $crate::__private::Pin<&mut Self>, - replacement: Self, - ) -> $proj_ty_ident <$($ty_generics)*> { - unsafe { - let __self_ptr: *mut Self = self.get_unchecked_mut(); - - // Destructors will run in reverse order, so next create a guard to overwrite - // `self` with the replacement value without calling destructors. - let __guard = $crate::__private::UnsafeOverwriteGuard::new(__self_ptr, replacement); - - match &mut *__self_ptr { - $( - Self::$variant $({ - $($field),+ - })? => { - $crate::__pin_project_make_proj_replace_block! { - [$proj_ty_ident :: $variant] - $({ - $( - $(#[$pin])? - $field - ),+ - })? - } - } - ),+ - } - } - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_unpin_impl { - ( - [] - [$vis:vis $ident:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - $($field:tt)* - ) => { - // Automatically create the appropriate conditional `Unpin` implementation. - // - // Basically this is equivalent to the following code: - // ```rust - // impl Unpin for Struct where T: Unpin {} - // ``` - // - // However, if struct is public and there is a private type field, - // this would cause an E0446 (private type in public interface). - // - // When RFC 2145 is implemented (rust-lang/rust#48054), - // this will become a lint, rather then a hard error. - // - // As a workaround for this, we generate a new struct, containing all of the pinned - // fields from our #[pin_project] type. This struct is declared within - // a function, which makes it impossible to be named by user code. - // This guarantees that it will use the default auto-trait impl for Unpin - - // that is, it will implement Unpin iff all of its fields implement Unpin. - // This type can be safely declared as 'public', satisfying the privacy - // checker without actually allowing user code to access it. - // - // This allows users to apply the #[pin_project] attribute to types - // regardless of the privacy of the types of their fields. - // - // See also https://github.com/taiki-e/pin-project/pull/53. - #[allow(non_snake_case)] - $vis struct __Origin <'__pin, $($impl_generics)*> - $(where - $($where_clause)*)? - { - __dummy_lifetime: $crate::__private::PhantomData<&'__pin ()>, - $($field)* - } - impl <'__pin, $($impl_generics)*> $crate::__private::Unpin for $ident <$($ty_generics)*> - where - __Origin <'__pin, $($ty_generics)*>: $crate::__private::Unpin - $(, $($where_clause)*)? - { - } - }; - ( - [$proj_not_unpin_mark:ident] - [$vis:vis $ident:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - $($field:tt)* - ) => { - #[doc(hidden)] - impl <'__pin, $($impl_generics)*> $crate::__private::Unpin for $ident <$($ty_generics)*> - where - ( - ::core::marker::PhantomData<&'__pin ()>, - ::core::marker::PhantomPinned, - ): $crate::__private::Unpin - $(, $($where_clause)*)? - { - } - } -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_drop_impl { - ( - [$_ident:ident] - [$($_impl_generics:tt)*] [$($_ty_generics:tt)*] [$(where $($_where_clause:tt)*)?] - $(#[$drop_impl_attrs:meta])* - impl $(< - $( $lifetime:lifetime $(: $lifetime_bound:lifetime)? ),* $(,)? - $( $generics:ident - $(: $generics_bound:path)? - $(: ?$generics_unsized_bound:path)? - $(: $generics_lifetime_bound:lifetime)? - ),* - >)? PinnedDrop for $self_ty:ty - $(where - $( $where_clause_ty:ty - $(: $where_clause_bound:path)? - $(: ?$where_clause_unsized_bound:path)? - $(: $where_clause_lifetime_bound:lifetime)? - ),* $(,)? - )? - { - $(#[$drop_fn_attrs:meta])* - fn drop($($arg:ident)+: Pin<&mut Self>) { - $($tt:tt)* - } - } - ) => { - $(#[$drop_impl_attrs])* - impl $(< - $( $lifetime $(: $lifetime_bound)? ,)* - $( $generics - $(: $generics_bound)? - $(: ?$generics_unsized_bound)? - $(: $generics_lifetime_bound)? - ),* - >)? $crate::__private::Drop for $self_ty - $(where - $( $where_clause_ty - $(: $where_clause_bound)? - $(: ?$where_clause_unsized_bound)? - $(: $where_clause_lifetime_bound)? - ),* - )? - { - $(#[$drop_fn_attrs])* - fn drop(&mut self) { - // Implementing `__DropInner::__drop_inner` is safe, but calling it is not safe. - // This is because destructors can be called multiple times in safe code and - // [double dropping is unsound](https://github.com/rust-lang/rust/pull/62360). - // - // `__drop_inner` is defined as a safe method, but this is fine since - // `__drop_inner` is not accessible by the users and we call `__drop_inner` only - // once. - // - // Users can implement [`Drop`] safely using `pin_project!` and can drop a - // type that implements `PinnedDrop` using the [`drop`] function safely. - fn __drop_inner $(< - $( $lifetime $(: $lifetime_bound)? ,)* - $( $generics - $(: $generics_bound)? - $(: ?$generics_unsized_bound)? - $(: $generics_lifetime_bound)? - ),* - >)? ( - $($arg)+: $crate::__private::Pin<&mut $self_ty>, - ) - $(where - $( $where_clause_ty - $(: $where_clause_bound)? - $(: ?$where_clause_unsized_bound)? - $(: $where_clause_lifetime_bound)? - ),* - )? - { - // A dummy `__drop_inner` function to prevent users call outer `__drop_inner`. - fn __drop_inner() {} - $($tt)* - } - - // Safety - we're in 'drop', so we know that 'self' will - // never move again. - let pinned_self: $crate::__private::Pin<&mut Self> - = unsafe { $crate::__private::Pin::new_unchecked(self) }; - // We call `__drop_inner` only once. Since `__DropInner::__drop_inner` - // is not accessible by the users, it is never called again. - __drop_inner(pinned_self); - } - } - }; - ( - [$ident:ident] - [$($impl_generics:tt)*] [$($ty_generics:tt)*] [$(where $($where_clause:tt)*)?] - ) => { - // Ensure that struct does not implement `Drop`. - // - // There are two possible cases: - // 1. The user type does not implement Drop. In this case, - // the first blanked impl will not apply to it. This code - // will compile, as there is only one impl of MustNotImplDrop for the user type - // 2. The user type does impl Drop. This will make the blanket impl applicable, - // which will then conflict with the explicit MustNotImplDrop impl below. - // This will result in a compilation error, which is exactly what we want. - trait MustNotImplDrop {} - #[allow(clippy::drop_bounds, drop_bounds)] - impl MustNotImplDrop for T {} - impl <$($impl_generics)*> MustNotImplDrop for $ident <$($ty_generics)*> - $(where - $($where_clause)*)? - { - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_unpin_bound { - (#[pin] $field_ty:ty) => { - $field_ty - }; - ($field_ty:ty) => { - $crate::__private::AlwaysUnpin<$field_ty> - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_unsafe_field_proj { - (#[pin] $field:ident) => { - $crate::__private::Pin::new_unchecked($field) - }; - ($field:ident) => { - $field - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_replace_field_proj { - (#[pin] $field:ident) => { - $crate::__private::PhantomData - }; - ($field:ident) => { - $crate::__private::ptr::read($field) - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_unsafe_drop_in_place_guard { - (#[pin] $field:ident) => { - $crate::__private::UnsafeDropInPlaceGuard::new($field) - }; - ($field:ident) => { - () - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_proj_field_mut { - (#[pin] $field_ty:ty) => { - $crate::__private::Pin<&'__pin mut ($field_ty)> - }; - ($field_ty:ty) => { - &'__pin mut ($field_ty) - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_proj_field_ref { - (#[pin] $field_ty:ty) => { - $crate::__private::Pin<&'__pin ($field_ty)> - }; - ($field_ty:ty) => { - &'__pin ($field_ty) - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_make_proj_field_replace { - (#[pin] $field_ty:ty) => { - $crate::__private::PhantomData<$field_ty> - }; - ($field_ty:ty) => { - $field_ty - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_internal { - // parsing proj_mut_ident - ( - [] - [$($proj_ref_ident:ident)?] - [$($proj_replace_ident:ident)?] - [$( ! $proj_not_unpin_mark:ident)?] - [$($attrs:tt)*] - - #[project = $proj_mut_ident:ident] - $($tt:tt)* - ) => { - $crate::__pin_project_internal! { - [$proj_mut_ident] - [$($proj_ref_ident)?] - [$($proj_replace_ident)?] - [$( ! $proj_not_unpin_mark)?] - [$($attrs)*] - $($tt)* - } - }; - // parsing proj_ref_ident - ( - [$($proj_mut_ident:ident)?] - [] - [$($proj_replace_ident:ident)?] - [$( ! $proj_not_unpin_mark:ident)?] - [$($attrs:tt)*] - - #[project_ref = $proj_ref_ident:ident] - $($tt:tt)* - ) => { - $crate::__pin_project_internal! { - [$($proj_mut_ident)?] - [$proj_ref_ident] - [$($proj_replace_ident)?] - [$( ! $proj_not_unpin_mark)?] - [$($attrs)*] - $($tt)* - } - }; - // parsing proj_replace_ident - ( - [$($proj_mut_ident:ident)?] - [$($proj_ref_ident:ident)?] - [] - [$( ! $proj_not_unpin_mark:ident)?] - [$($attrs:tt)*] - - #[project_replace = $proj_replace_ident:ident] - $($tt:tt)* - ) => { - $crate::__pin_project_internal! { - [$($proj_mut_ident)?] - [$($proj_ref_ident)?] - [$proj_replace_ident] - [$( ! $proj_not_unpin_mark)?] - [$($attrs)*] - $($tt)* - } - }; - // parsing !Unpin - ( - [$($proj_mut_ident:ident)?] - [$($proj_ref_ident:ident)?] - [$($proj_replace_ident:ident)?] - [] - [$($attrs:tt)*] - - #[project( ! $proj_not_unpin_mark:ident)] - $($tt:tt)* - ) => { - $crate::__pin_project_internal! { - [$($proj_mut_ident)?] - [$($proj_ref_ident)?] - [$($proj_replace_ident)?] - [ ! $proj_not_unpin_mark] - [$($attrs)*] - $($tt)* - } - }; - // this is actually part of a recursive step that picks off a single non-`pin_project_lite` attribute - // there could be more to parse - ( - [$($proj_mut_ident:ident)?] - [$($proj_ref_ident:ident)?] - [$($proj_replace_ident:ident)?] - [$( ! $proj_not_unpin_mark:ident)?] - [$($attrs:tt)*] - - #[$($attr:tt)*] - $($tt:tt)* - ) => { - $crate::__pin_project_internal! { - [$($proj_mut_ident)?] - [$($proj_ref_ident)?] - [$($proj_replace_ident)?] - [$( ! $proj_not_unpin_mark)?] - [$($attrs)* #[$($attr)*]] - $($tt)* - } - }; - // now determine visibility - // if public, downgrade - ( - [$($proj_mut_ident:ident)?] - [$($proj_ref_ident:ident)?] - [$($proj_replace_ident:ident)?] - [$( ! $proj_not_unpin_mark:ident)?] - [$($attrs:tt)*] - pub $struct_ty_ident:ident $ident:ident - $($tt:tt)* - ) => { - $crate::__pin_project_parse_generics! { - [$($proj_mut_ident)?] - [$($proj_ref_ident)?] - [$($proj_replace_ident)?] - [$($proj_not_unpin_mark)?] - [$($attrs)*] - [pub $struct_ty_ident $ident pub(crate)] - $($tt)* - } - }; - ( - [$($proj_mut_ident:ident)?] - [$($proj_ref_ident:ident)?] - [$($proj_replace_ident:ident)?] - [$( ! $proj_not_unpin_mark:ident)?] - [$($attrs:tt)*] - $vis:vis $struct_ty_ident:ident $ident:ident - $($tt:tt)* - ) => { - $crate::__pin_project_parse_generics! { - [$($proj_mut_ident)?] - [$($proj_ref_ident)?] - [$($proj_replace_ident)?] - [$($proj_not_unpin_mark)?] - [$($attrs)*] - [$vis $struct_ty_ident $ident $vis] - $($tt)* - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pin_project_parse_generics { - ( - [$($proj_mut_ident:ident)?] - [$($proj_ref_ident:ident)?] - [$($proj_replace_ident:ident)?] - [$($proj_not_unpin_mark:ident)?] - [$($attrs:tt)*] - [$vis:vis $struct_ty_ident:ident $ident:ident $proj_vis:vis] - $(< - $( $lifetime:lifetime $(: $lifetime_bound:lifetime)? ),* $(,)? - $( $generics:ident - $(: $generics_bound:path)? - $(: ?$generics_unsized_bound:path)? - $(: $generics_lifetime_bound:lifetime)? - $(= $generics_default:ty)? - ),* $(,)? - >)? - $(where - $( $where_clause_ty:ty - $(: $where_clause_bound:path)? - $(: ?$where_clause_unsized_bound:path)? - $(: $where_clause_lifetime_bound:lifetime)? - ),* $(,)? - )? - { - $($body_data:tt)* - } - $($(#[$drop_impl_attrs:meta])* impl $($pinned_drop:tt)*)? - ) => { - $crate::__pin_project_expand! { - [$($proj_mut_ident)?] - [$($proj_ref_ident)?] - [$($proj_replace_ident)?] - [$($proj_not_unpin_mark)?] - [$proj_vis] - [$($attrs)* $vis $struct_ty_ident $ident] - [$(< - $( $lifetime $(: $lifetime_bound)? ,)* - $( $generics - $(: $generics_bound)? - $(: ?$generics_unsized_bound)? - $(: $generics_lifetime_bound)? - $(= $generics_default)? - ),* - >)?] - [$( - $( $lifetime $(: $lifetime_bound)? ,)* - $( $generics - $(: $generics_bound)? - $(: ?$generics_unsized_bound)? - $(: $generics_lifetime_bound)? - ),* - )?] - [$( $( $lifetime ,)* $( $generics ),* )?] - [$(where $( $where_clause_ty - $(: $where_clause_bound)? - $(: ?$where_clause_unsized_bound)? - $(: $where_clause_lifetime_bound)? - ),* )?] - { - $($body_data)* - } - $($(#[$drop_impl_attrs])* impl $($pinned_drop)*)? - } - }; -} - -#[doc(hidden)] -pub mod __private { - use core::mem::ManuallyDrop; - #[doc(hidden)] - pub use core::{ - marker::{PhantomData, Unpin}, - ops::Drop, - pin::Pin, - ptr, - }; - - // This is an internal helper struct used by `pin_project!`. - #[doc(hidden)] - pub struct AlwaysUnpin(PhantomData); - - impl Unpin for AlwaysUnpin {} - - // This is an internal helper used to ensure a value is dropped. - #[doc(hidden)] - pub struct UnsafeDropInPlaceGuard(*mut T); - - impl UnsafeDropInPlaceGuard { - #[doc(hidden)] - pub unsafe fn new(ptr: *mut T) -> Self { - Self(ptr) - } - } - - impl Drop for UnsafeDropInPlaceGuard { - fn drop(&mut self) { - // SAFETY: the caller of `UnsafeDropInPlaceGuard::new` must guarantee - // that `ptr` is valid for drop when this guard is destructed. - unsafe { - ptr::drop_in_place(self.0); - } - } - } - - // This is an internal helper used to ensure a value is overwritten without - // its destructor being called. - #[doc(hidden)] - pub struct UnsafeOverwriteGuard { - target: *mut T, - value: ManuallyDrop, - } - - impl UnsafeOverwriteGuard { - #[doc(hidden)] - pub unsafe fn new(target: *mut T, value: T) -> Self { - Self { target, value: ManuallyDrop::new(value) } - } - } - - impl Drop for UnsafeOverwriteGuard { - fn drop(&mut self) { - // SAFETY: the caller of `UnsafeOverwriteGuard::new` must guarantee - // that `target` is valid for writes when this guard is destructed. - unsafe { - ptr::write(self.target, ptr::read(&*self.value)); - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/pin-utils/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/pin-utils/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/pin-utils/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-utils/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/pin-utils/Cargo.toml s390-tools-2.33.1/rust-vendor/pin-utils/Cargo.toml --- s390-tools-2.31.0/rust-vendor/pin-utils/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-utils/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -edition = "2018" -name = "pin-utils" -version = "0.1.0" -authors = ["Josef Brandl "] -description = "Utilities for pinning\n" -documentation = "https://docs.rs/pin-utils" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang-nursery/pin-utils" diff -Nru s390-tools-2.31.0/rust-vendor/pin-utils/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/pin-utils/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/pin-utils/LICENSE-APACHE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-utils/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2018 The pin-utils authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/pin-utils/LICENSE-MIT s390-tools-2.33.1/rust-vendor/pin-utils/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/pin-utils/LICENSE-MIT 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-utils/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2018 The pin-utils authors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/pin-utils/README.md s390-tools-2.33.1/rust-vendor/pin-utils/README.md --- s390-tools-2.31.0/rust-vendor/pin-utils/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-utils/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,42 +0,0 @@ -# pin-utils - -Utilities for pinning - -[![Build Status](https://travis-ci.com/rust-lang-nursery/pin-utils.svg?branch=master)](https://travis-ci.com/rust-lang-nursery/pin-utils) -[![Crates.io](https://img.shields.io/crates/v/pin-utils.svg)](https://crates.io/crates/pin-utils) - -[Documentation](https://docs.rs/pin-utils) - -## Usage - -First, add this to your `Cargo.toml`: - -```toml -[dependencies] -pin-utils = "0.1.0-alpha.4" -``` - -Now, you can use it: - -```rust -use pin_utils::pin_mut; // And more... -``` - -The current version of pin-utils requires Rust 1.33 or later. - -# License - -This project is licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - http://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in pin-utils by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/pin-utils/src/lib.rs s390-tools-2.33.1/rust-vendor/pin-utils/src/lib.rs --- s390-tools-2.31.0/rust-vendor/pin-utils/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-utils/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,17 +0,0 @@ -//! Utilities for pinning - -#![no_std] -#![warn(missing_docs, missing_debug_implementations)] -#![deny(bare_trait_objects)] -#![allow(unknown_lints)] -#![doc(html_root_url = "https://docs.rs/pin-utils/0.1.0")] - -#[doc(hidden)] -pub mod core_reexport { - pub use core::*; -} - -#[macro_use] -mod stack_pin; -#[macro_use] -mod projection; diff -Nru s390-tools-2.31.0/rust-vendor/pin-utils/src/projection.rs s390-tools-2.33.1/rust-vendor/pin-utils/src/projection.rs --- s390-tools-2.31.0/rust-vendor/pin-utils/src/projection.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-utils/src/projection.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,100 +0,0 @@ -/// A pinned projection of a struct field. -/// -/// # Safety -/// -/// To make using this macro safe, three things need to be ensured: -/// - If the struct implements [`Drop`], the [`drop`] method is not allowed to -/// move the value of the field. -/// - If the struct wants to implement [`Unpin`], it has to do so conditionally: -/// The struct can only implement [`Unpin`] if the field's type is [`Unpin`]. -/// - The struct must not be `#[repr(packed)]`. -/// -/// # Example -/// -/// ```rust -/// use pin_utils::unsafe_pinned; -/// use std::marker::Unpin; -/// use std::pin::Pin; -/// -/// struct Foo { -/// field: T, -/// } -/// -/// impl Foo { -/// unsafe_pinned!(field: T); -/// -/// fn baz(mut self: Pin<&mut Self>) { -/// let _: Pin<&mut T> = self.field(); // Pinned reference to the field -/// } -/// } -/// -/// impl Unpin for Foo {} // Conditional Unpin impl -/// ``` -/// -/// Note: borrowing the field multiple times requires using `.as_mut()` to -/// avoid consuming the `Pin`. -/// -/// [`Unpin`]: core::marker::Unpin -/// [`drop`]: Drop::drop -#[macro_export] -macro_rules! unsafe_pinned { - ($f:tt: $t:ty) => ( - #[allow(unsafe_code)] - fn $f<'__a>( - self: $crate::core_reexport::pin::Pin<&'__a mut Self> - ) -> $crate::core_reexport::pin::Pin<&'__a mut $t> { - unsafe { - $crate::core_reexport::pin::Pin::map_unchecked_mut( - self, |x| &mut x.$f - ) - } - } - ) -} - -/// An unpinned projection of a struct field. -/// -/// # Safety -/// -/// This macro is unsafe because it creates a method that returns a normal -/// non-pin reference to the struct field. It is up to the programmer to ensure -/// that the contained value can be considered not pinned in the current -/// context. -/// -/// # Example -/// -/// ```rust -/// use pin_utils::unsafe_unpinned; -/// use std::pin::Pin; -/// -/// struct Bar; -/// struct Foo { -/// field: Bar, -/// } -/// -/// impl Foo { -/// unsafe_unpinned!(field: Bar); -/// -/// fn baz(mut self: Pin<&mut Self>) { -/// let _: &mut Bar = self.field(); // Normal reference to the field -/// } -/// } -/// ``` -/// -/// Note: borrowing the field multiple times requires using `.as_mut()` to -/// avoid consuming the [`Pin`]. -/// -/// [`Pin`]: core::pin::Pin -#[macro_export] -macro_rules! unsafe_unpinned { - ($f:tt: $t:ty) => ( - #[allow(unsafe_code)] - fn $f<'__a>( - self: $crate::core_reexport::pin::Pin<&'__a mut Self> - ) -> &'__a mut $t { - unsafe { - &mut $crate::core_reexport::pin::Pin::get_unchecked_mut(self).$f - } - } - ) -} diff -Nru s390-tools-2.31.0/rust-vendor/pin-utils/src/stack_pin.rs s390-tools-2.33.1/rust-vendor/pin-utils/src/stack_pin.rs --- s390-tools-2.31.0/rust-vendor/pin-utils/src/stack_pin.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/pin-utils/src/stack_pin.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -/// Pins a value on the stack. -/// -/// # Example -/// -/// ```rust -/// # use pin_utils::pin_mut; -/// # use core::pin::Pin; -/// # struct Foo {} -/// let foo = Foo { /* ... */ }; -/// pin_mut!(foo); -/// let _: Pin<&mut Foo> = foo; -/// ``` -#[macro_export] -macro_rules! pin_mut { - ($($x:ident),* $(,)?) => { $( - // Move the value to ensure that it is owned - let mut $x = $x; - // Shadow the original binding so that it can't be directly accessed - // ever again. - #[allow(unused_mut)] - let mut $x = unsafe { - $crate::core_reexport::pin::Pin::new_unchecked(&mut $x) - }; - )* } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/redox_syscall/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/redox_syscall/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/Cargo.toml s390-tools-2.33.1/rust-vendor/redox_syscall/Cargo.toml --- s390-tools-2.31.0/rust-vendor/redox_syscall/Cargo.toml 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,38 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "redox_syscall" -version = "0.4.1" -authors = ["Jeremy Soller "] -description = "A Rust library to access raw Redox system calls" -documentation = "https://docs.rs/redox_syscall" -readme = "README.md" -license = "MIT" -repository = "https://gitlab.redox-os.org/redox-os/syscall" - -[lib] -name = "syscall" - -[dependencies.bitflags] -version = "1.1.0" - -[dependencies.core] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-core" - -[features] -rustc-dep-of-std = [ - "core", - "bitflags/rustc-dep-of-std", -] diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/LICENSE s390-tools-2.33.1/rust-vendor/redox_syscall/LICENSE --- s390-tools-2.31.0/rust-vendor/redox_syscall/LICENSE 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ -Copyright (c) 2017 Redox OS Developers - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/README.md s390-tools-2.33.1/rust-vendor/redox_syscall/README.md --- s390-tools-2.31.0/rust-vendor/redox_syscall/README.md 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,6 +0,0 @@ -# syscall -[Redox OS](https://gitlab.redox-os.org/redox-os/redox)'s syscall API - -[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) -[![crates.io](http://meritbadge.herokuapp.com/redox_syscall)](https://crates.io/crates/redox_syscall) -[![docs.rs](https://docs.rs/redox_syscall/badge.svg)](https://docs.rs/redox_syscall) diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/arch/aarch64.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/arch/aarch64.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/arch/aarch64.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/arch/aarch64.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,150 +0,0 @@ -use core::{mem, slice}; -use core::ops::{Deref, DerefMut}; - -use super::error::{Error, Result}; - -pub const PAGE_SIZE: usize = 4096; - -macro_rules! syscall { - ($($name:ident($a:ident, $($b:ident, $($c:ident, $($d:ident, $($e:ident, $($f:ident, )?)?)?)?)?);)+) => { - $( - pub unsafe fn $name($a: usize, $($b: usize, $($c: usize, $($d: usize, $($e: usize, $($f: usize)?)?)?)?)?) -> Result { - let ret: usize; - - core::arch::asm!( - "svc 0", - in("x8") $a, - $( - in("x0") $b, - $( - in("x1") $c, - $( - in("x2") $d, - $( - in("x3") $e, - $( - in("x4") $f, - )? - )? - )? - )? - )? - lateout("x0") ret, - options(nostack), - ); - - Error::demux(ret) - } - )+ - }; -} - -syscall! { - syscall0(a,); - syscall1(a, b,); - syscall2(a, b, c,); - syscall3(a, b, c, d,); - syscall4(a, b, c, d, e,); - syscall5(a, b, c, d, e, f,); -} - -#[derive(Copy, Clone, Debug, Default)] -#[repr(C)] -pub struct IntRegisters { - pub x30: usize, - pub x29: usize, - pub x28: usize, - pub x27: usize, - pub x26: usize, - pub x25: usize, - pub x24: usize, - pub x23: usize, - pub x22: usize, - pub x21: usize, - pub x20: usize, - pub x19: usize, - pub x18: usize, - pub x17: usize, - pub x16: usize, - pub x15: usize, - pub x14: usize, - pub x13: usize, - pub x12: usize, - pub x11: usize, - pub x10: usize, - pub x9: usize, - pub x8: usize, - pub x7: usize, - pub x6: usize, - pub x5: usize, - pub x4: usize, - pub x3: usize, - pub x2: usize, - pub x1: usize, - pub x0: usize -} - -impl Deref for IntRegisters { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const IntRegisters as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for IntRegisters { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut IntRegisters as *mut u8, mem::size_of::()) - } - } -} - -#[derive(Clone, Copy, Debug, Default)] -#[repr(packed)] -pub struct FloatRegisters { - pub fp_simd_regs: [u128; 32], - pub fpsr: u32, - pub fpcr: u32 -} - -impl Deref for FloatRegisters { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const FloatRegisters as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for FloatRegisters { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut FloatRegisters as *mut u8, mem::size_of::()) - } - } -} - -#[derive(Clone, Copy, Debug, Default)] -#[repr(packed)] -pub struct EnvRegisters { - pub tpidr_el0: usize, - pub tpidrro_el0: usize, -} -impl Deref for EnvRegisters { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const EnvRegisters as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for EnvRegisters { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut EnvRegisters as *mut u8, mem::size_of::()) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/arch/nonredox.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/arch/nonredox.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/arch/nonredox.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/arch/nonredox.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,29 +0,0 @@ -use super::error::{Error, Result, ENOSYS}; - -// Doesn't really matter, but since we will most likely run on an x86_64 host, why not 4096? -pub const PAGE_SIZE: usize = 4096; - -pub unsafe fn syscall0(_a: usize) -> Result { - Err(Error::new(ENOSYS)) -} - -pub unsafe fn syscall1(_a: usize, _b: usize) -> Result { - Err(Error::new(ENOSYS)) -} - -pub unsafe fn syscall2(_a: usize, _b: usize, _c: usize) -> Result { - Err(Error::new(ENOSYS)) -} - -pub unsafe fn syscall3(_a: usize, _b: usize, _c: usize, _d: usize) -> Result { - Err(Error::new(ENOSYS)) -} - -pub unsafe fn syscall4(_a: usize, _b: usize, _c: usize, _d: usize, _e: usize) -> Result { - Err(Error::new(ENOSYS)) -} - -pub unsafe fn syscall5(_a: usize, _b: usize, _c: usize, _d: usize, _e: usize, _f: usize) - -> Result { - Err(Error::new(ENOSYS)) -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/arch/riscv64.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/arch/riscv64.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/arch/riscv64.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/arch/riscv64.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,93 +0,0 @@ -use core::{mem, slice}; -use core::ops::{Deref, DerefMut}; - -use super::error::{Error, Result}; - -macro_rules! syscall { - ($($name:ident($a:ident, $($b:ident, $($c:ident, $($d:ident, $($e:ident, $($f:ident, )?)?)?)?)?);)+) => { - $( - pub unsafe fn $name($a: usize, $($b: usize, $($c: usize, $($d: usize, $($e: usize, $($f: usize)?)?)?)?)?) -> Result { - let ret: usize; - - asm!( - "ecall", - in("a7") $a, - $( - in("a0") $b, - $( - in("a1") $c, - $( - in("a2") $d, - $( - in("a3") $e, - $( - in("a4") $f, - )? - )? - )? - )? - )? - lateout("a0") ret, - options(nostack), - ); - - Error::demux(ret) - } - )+ - }; -} - -syscall! { - syscall0(a,); - syscall1(a, b,); - syscall2(a, b, c,); - syscall3(a, b, c, d,); - syscall4(a, b, c, d, e,); - syscall5(a, b, c, d, e, f,); -} - -#[derive(Copy, Clone, Debug, Default)] -#[repr(C)] -pub struct IntRegisters { - //TODO -} - -impl Deref for IntRegisters { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const IntRegisters as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for IntRegisters { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut IntRegisters as *mut u8, mem::size_of::()) - } - } -} - -#[derive(Clone, Copy, Debug, Default)] -#[repr(packed)] -pub struct FloatRegisters { - //TODO -} - -impl Deref for FloatRegisters { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const FloatRegisters as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for FloatRegisters { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut FloatRegisters as *mut u8, mem::size_of::()) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/arch/x86_64.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/arch/x86_64.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/arch/x86_64.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/arch/x86_64.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,157 +0,0 @@ -use core::{mem, slice}; -use core::arch::asm; -use core::ops::{Deref, DerefMut}; - -use super::error::{Error, Result}; - -pub const PAGE_SIZE: usize = 4096; - -macro_rules! syscall { - ($($name:ident($a:ident, $($b:ident, $($c:ident, $($d:ident, $($e:ident, $($f:ident, )?)?)?)?)?);)+) => { - $( - pub unsafe fn $name(mut $a: usize, $($b: usize, $($c: usize, $($d: usize, $($e: usize, $($f: usize)?)?)?)?)?) -> Result { - asm!( - "syscall", - inout("rax") $a, - $( - in("rdi") $b, - $( - in("rsi") $c, - $( - in("rdx") $d, - $( - in("r10") $e, - $( - in("r8") $f, - )? - )? - )? - )? - )? - out("rcx") _, - out("r11") _, - options(nostack), - ); - - Error::demux($a) - } - )+ - }; -} - -syscall! { - syscall0(a,); - syscall1(a, b,); - syscall2(a, b, c,); - syscall3(a, b, c, d,); - syscall4(a, b, c, d, e,); - syscall5(a, b, c, d, e, f,); -} - -#[derive(Copy, Clone, Debug, Default)] -#[repr(C)] -pub struct IntRegisters { - // TODO: Some of these don't get set by Redox yet. Should they? - - pub r15: usize, - pub r14: usize, - pub r13: usize, - pub r12: usize, - pub rbp: usize, - pub rbx: usize, - pub r11: usize, - pub r10: usize, - pub r9: usize, - pub r8: usize, - pub rax: usize, - pub rcx: usize, - pub rdx: usize, - pub rsi: usize, - pub rdi: usize, - // pub orig_rax: usize, - pub rip: usize, - pub cs: usize, - pub rflags: usize, - pub rsp: usize, - pub ss: usize, - // pub fs_base: usize, - // pub gs_base: usize, - // pub ds: usize, - // pub es: usize, - pub fs: usize, - // pub gs: usize -} - -impl Deref for IntRegisters { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const IntRegisters as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for IntRegisters { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut IntRegisters as *mut u8, mem::size_of::()) - } - } -} - -#[derive(Clone, Copy, Debug, Default)] -#[repr(packed)] -pub struct FloatRegisters { - pub fcw: u16, - pub fsw: u16, - pub ftw: u8, - pub _reserved: u8, - pub fop: u16, - pub fip: u64, - pub fdp: u64, - pub mxcsr: u32, - pub mxcsr_mask: u32, - pub st_space: [u128; 8], - pub xmm_space: [u128; 16], - // TODO: YMM/ZMM -} - -impl Deref for FloatRegisters { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const FloatRegisters as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for FloatRegisters { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut FloatRegisters as *mut u8, mem::size_of::()) - } - } -} -#[derive(Clone, Copy, Debug, Default)] -#[repr(packed)] -pub struct EnvRegisters { - pub fsbase: u64, - pub gsbase: u64, - // TODO: PKRU? -} -impl Deref for EnvRegisters { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const EnvRegisters as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for EnvRegisters { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut EnvRegisters as *mut u8, mem::size_of::()) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/arch/x86.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/arch/x86.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/arch/x86.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/arch/x86.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,184 +0,0 @@ -use core::{mem, slice}; -use core::arch::asm; -use core::ops::{Deref, DerefMut}; - -use super::error::{Error, Result}; - -pub const PAGE_SIZE: usize = 4096; - -macro_rules! syscall { - ($($name:ident($a:ident, $($b:ident, $($c:ident, $($d:ident, $($e:ident, $($f:ident, )?)?)?)?)?);)+) => { - $( - pub unsafe fn $name(mut $a: usize, $($b: usize, $($c: usize, $($d: usize, $($e: usize, $($f: usize)?)?)?)?)?) -> Result { - asm!( - "int 0x80", - inout("eax") $a, - $( - in("ebx") $b, - $( - in("ecx") $c, - $( - in("edx") $d, - $( - in("esi") $e, - $( - in("edi") $f, - )? - )? - )? - )? - )? - options(nostack), - ); - - Error::demux($a) - } - )+ - }; -} - -syscall! { - syscall0(a,); - syscall1(a, b,); - syscall2(a, b, c,); - syscall3(a, b, c, d,); - // Must be done custom because LLVM reserves ESI - //syscall4(a, b, c, d, e,); - //syscall5(a, b, c, d, e, f,); -} - -pub unsafe fn syscall4(mut a: usize, b: usize, c: usize, d: usize, e: usize) - -> Result { - asm!( - "xchg esi, {e} - int 0x80 - xchg esi, {e}", - e = in(reg) e, - inout("eax") a, - in("ebx") b, - in("ecx") c, - in("edx") d, - options(nostack), - ); - - Error::demux(a) -} - -pub unsafe fn syscall5(mut a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) - -> Result { - asm!( - "xchg esi, {e} - int 0x80 - xchg esi, {e}", - e = in(reg) e, - inout("eax") a, - in("ebx") b, - in("ecx") c, - in("edx") d, - in("edi") f, - options(nostack), - ); - - Error::demux(a) -} - -#[derive(Copy, Clone, Debug, Default)] -#[repr(C)] -pub struct IntRegisters { - // TODO: Some of these don't get set by Redox yet. Should they? - - pub ebp: usize, - pub esi: usize, - pub edi: usize, - pub ebx: usize, - pub eax: usize, - pub ecx: usize, - pub edx: usize, - // pub orig_rax: usize, - pub eip: usize, - pub cs: usize, - pub eflags: usize, - pub esp: usize, - pub ss: usize, - // pub fs_base: usize, - // pub gs_base: usize, - // pub ds: usize, - // pub es: usize, - pub fs: usize, - // pub gs: usize -} - -impl Deref for IntRegisters { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const IntRegisters as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for IntRegisters { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut IntRegisters as *mut u8, mem::size_of::()) - } - } -} - -#[derive(Clone, Copy, Debug, Default)] -#[repr(packed)] -pub struct FloatRegisters { - pub fcw: u16, - pub fsw: u16, - pub ftw: u8, - pub _reserved: u8, - pub fop: u16, - pub fip: u64, - pub fdp: u64, - pub mxcsr: u32, - pub mxcsr_mask: u32, - pub st_space: [u128; 8], - pub xmm_space: [u128; 16], - // TODO: YMM/ZMM -} - -impl Deref for FloatRegisters { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const FloatRegisters as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for FloatRegisters { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut FloatRegisters as *mut u8, mem::size_of::()) - } - } -} - -#[derive(Clone, Copy, Debug, Default)] -#[repr(packed)] -pub struct EnvRegisters { - pub fsbase: u32, - pub gsbase: u32, -} - -impl Deref for EnvRegisters { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const EnvRegisters as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for EnvRegisters { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut EnvRegisters as *mut u8, mem::size_of::()) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/call.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/call.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/call.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/call.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,364 +0,0 @@ -use super::arch::*; -use super::data::{Map, SigAction, Stat, StatVfs, TimeSpec}; -use super::error::Result; -use super::flag::*; -use super::number::*; - -use core::{mem, ptr}; - -// Signal restorer -extern "C" fn restorer() -> ! { - sigreturn().unwrap(); - unreachable!(); -} - -/// Close a file -pub fn close(fd: usize) -> Result { - unsafe { syscall1(SYS_CLOSE, fd) } -} - -/// Get the current system time -pub fn clock_gettime(clock: usize, tp: &mut TimeSpec) -> Result { - unsafe { syscall2(SYS_CLOCK_GETTIME, clock, tp as *mut TimeSpec as usize) } -} - -/// Copy and transform a file descriptor -pub fn dup(fd: usize, buf: &[u8]) -> Result { - unsafe { syscall3(SYS_DUP, fd, buf.as_ptr() as usize, buf.len()) } -} - -/// Copy and transform a file descriptor -pub fn dup2(fd: usize, newfd: usize, buf: &[u8]) -> Result { - unsafe { syscall4(SYS_DUP2, fd, newfd, buf.as_ptr() as usize, buf.len()) } -} - -/// Exit the current process -pub fn exit(status: usize) -> Result { - unsafe { syscall1(SYS_EXIT, status) } -} - -/// Change file permissions -pub fn fchmod(fd: usize, mode: u16) -> Result { - unsafe { syscall2(SYS_FCHMOD, fd, mode as usize) } - -} - -/// Change file ownership -pub fn fchown(fd: usize, uid: u32, gid: u32) -> Result { - unsafe { syscall3(SYS_FCHOWN, fd, uid as usize, gid as usize) } - -} - -/// Change file descriptor flags -pub fn fcntl(fd: usize, cmd: usize, arg: usize) -> Result { - unsafe { syscall3(SYS_FCNTL, fd, cmd, arg) } -} - -/// Map a file into memory, but with the ability to set the address to map into, either as a hint -/// or as a requirement of the map. -/// -/// # Errors -/// `EACCES` - the file descriptor was not open for reading -/// `EBADF` - if the file descriptor was invalid -/// `ENODEV` - mmapping was not supported -/// `EINVAL` - invalid combination of flags -/// `EEXIST` - if [`MapFlags::MAP_FIXED`] was set, and the address specified was already in use. -/// -pub unsafe fn fmap(fd: usize, map: &Map) -> Result { - syscall3(SYS_FMAP, fd, map as *const Map as usize, mem::size_of::()) -} - -/// Unmap whole (or partial) continous memory-mapped files -pub unsafe fn funmap(addr: usize, len: usize) -> Result { - syscall2(SYS_FUNMAP, addr, len) -} - -/// Retrieve the canonical path of a file -pub fn fpath(fd: usize, buf: &mut [u8]) -> Result { - unsafe { syscall3(SYS_FPATH, fd, buf.as_mut_ptr() as usize, buf.len()) } -} - -/// Rename a file -pub fn frename>(fd: usize, path: T) -> Result { - unsafe { syscall3(SYS_FRENAME, fd, path.as_ref().as_ptr() as usize, path.as_ref().len()) } -} - -/// Get metadata about a file -pub fn fstat(fd: usize, stat: &mut Stat) -> Result { - unsafe { syscall3(SYS_FSTAT, fd, stat as *mut Stat as usize, mem::size_of::()) } -} - -/// Get metadata about a filesystem -pub fn fstatvfs(fd: usize, stat: &mut StatVfs) -> Result { - unsafe { syscall3(SYS_FSTATVFS, fd, stat as *mut StatVfs as usize, mem::size_of::()) } -} - -/// Sync a file descriptor to its underlying medium -pub fn fsync(fd: usize) -> Result { - unsafe { syscall1(SYS_FSYNC, fd) } -} - -/// Truncate or extend a file to a specified length -pub fn ftruncate(fd: usize, len: usize) -> Result { - unsafe { syscall2(SYS_FTRUNCATE, fd, len) } -} - -// Change modify and/or access times -pub fn futimens(fd: usize, times: &[TimeSpec]) -> Result { - unsafe { syscall3(SYS_FUTIMENS, fd, times.as_ptr() as usize, times.len() * mem::size_of::()) } -} - -/// Fast userspace mutex -pub unsafe fn futex(addr: *mut i32, op: usize, val: i32, val2: usize, addr2: *mut i32) - -> Result { - syscall5(SYS_FUTEX, addr as usize, op, (val as isize) as usize, val2, addr2 as usize) -} - -/// Get the effective group ID -pub fn getegid() -> Result { - unsafe { syscall0(SYS_GETEGID) } -} - -/// Get the effective namespace -pub fn getens() -> Result { - unsafe { syscall0(SYS_GETENS) } -} - -/// Get the effective user ID -pub fn geteuid() -> Result { - unsafe { syscall0(SYS_GETEUID) } -} - -/// Get the current group ID -pub fn getgid() -> Result { - unsafe { syscall0(SYS_GETGID) } -} - -/// Get the current namespace -pub fn getns() -> Result { - unsafe { syscall0(SYS_GETNS) } -} - -/// Get the current process ID -pub fn getpid() -> Result { - unsafe { syscall0(SYS_GETPID) } -} - -/// Get the process group ID -pub fn getpgid(pid: usize) -> Result { - unsafe { syscall1(SYS_GETPGID, pid) } -} - -/// Get the parent process ID -pub fn getppid() -> Result { - unsafe { syscall0(SYS_GETPPID) } -} - -/// Get the current user ID -pub fn getuid() -> Result { - unsafe { syscall0(SYS_GETUID) } -} - -/// Set the I/O privilege level -/// -/// # Errors -/// -/// * `EPERM` - `uid != 0` -/// * `EINVAL` - `level > 3` -pub unsafe fn iopl(level: usize) -> Result { - syscall1(SYS_IOPL, level) -} - -/// Send a signal `sig` to the process identified by `pid` -pub fn kill(pid: usize, sig: usize) -> Result { - unsafe { syscall2(SYS_KILL, pid, sig) } -} - -/// Create a link to a file -pub unsafe fn link(old: *const u8, new: *const u8) -> Result { - syscall2(SYS_LINK, old as usize, new as usize) -} - -/// Seek to `offset` bytes in a file descriptor -pub fn lseek(fd: usize, offset: isize, whence: usize) -> Result { - unsafe { syscall3(SYS_LSEEK, fd, offset as usize, whence) } -} - -/// Make a new scheme namespace -pub fn mkns(schemes: &[[usize; 2]]) -> Result { - unsafe { syscall2(SYS_MKNS, schemes.as_ptr() as usize, schemes.len()) } -} - -/// Change mapping flags -pub unsafe fn mprotect(addr: usize, size: usize, flags: MapFlags) -> Result { - syscall3(SYS_MPROTECT, addr, size, flags.bits()) -} - -/// Sleep for the time specified in `req` -pub fn nanosleep(req: &TimeSpec, rem: &mut TimeSpec) -> Result { - unsafe { syscall2(SYS_NANOSLEEP, req as *const TimeSpec as usize, - rem as *mut TimeSpec as usize) } -} - -/// Open a file -pub fn open>(path: T, flags: usize) -> Result { - unsafe { syscall3(SYS_OPEN, path.as_ref().as_ptr() as usize, path.as_ref().len(), flags) } -} - -/// Allocate frames, linearly in physical memory. -/// -/// # Errors -/// -/// * `EPERM` - `uid != 0` -/// * `ENOMEM` - the system has run out of available memory -pub unsafe fn physalloc(size: usize) -> Result { - syscall1(SYS_PHYSALLOC, size) -} - -/// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain -/// [`PARTIAL_ALLOC`], this will result in `physalloc3` with `min = 1`. -/// -/// Refer to the simpler [`physalloc`] and the more complex [`physalloc3`], that this convenience -/// function is based on. -/// -/// # Errors -/// -/// * `EPERM` - `uid != 0` -/// * `ENOMEM` - the system has run out of available memory -pub unsafe fn physalloc2(size: usize, flags: usize) -> Result { - let mut ret = 1usize; - physalloc3(size, flags, &mut ret) -} - -/// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain -/// [`PARTIAL_ALLOC`], the `min` parameter specifies the number of frames that have to be allocated -/// for this operation to succeed. The return value is the offset of the first frame, and `min` is -/// overwritten with the number of frames actually allocated. -/// -/// Refer to the simpler [`physalloc`] and the simpler library function [`physalloc2`]. -/// -/// # Errors -/// -/// * `EPERM` - `uid != 0` -/// * `ENOMEM` - the system has run out of available memory -/// * `EINVAL` - `min = 0` -pub unsafe fn physalloc3(size: usize, flags: usize, min: &mut usize) -> Result { - syscall3(SYS_PHYSALLOC3, size, flags, min as *mut usize as usize) -} - -/// Free physically allocated pages -/// -/// # Errors -/// -/// * `EPERM` - `uid != 0` -pub unsafe fn physfree(physical_address: usize, size: usize) -> Result { - syscall2(SYS_PHYSFREE, physical_address, size) -} - -/// Map physical memory to virtual memory -/// -/// # Errors -/// -/// * `EPERM` - `uid != 0` -pub unsafe fn physmap(physical_address: usize, size: usize, flags: PhysmapFlags) -> Result { - syscall3(SYS_PHYSMAP, physical_address, size, flags.bits()) -} - -/// Read from a file descriptor into a buffer -pub fn read(fd: usize, buf: &mut [u8]) -> Result { - unsafe { syscall3(SYS_READ, fd, buf.as_mut_ptr() as usize, buf.len()) } -} - -/// Remove a directory -pub fn rmdir>(path: T) -> Result { - unsafe { syscall2(SYS_RMDIR, path.as_ref().as_ptr() as usize, path.as_ref().len()) } -} - -/// Set the process group ID -pub fn setpgid(pid: usize, pgid: usize) -> Result { - unsafe { syscall2(SYS_SETPGID, pid, pgid) } -} - -/// Set the current process group IDs -pub fn setregid(rgid: usize, egid: usize) -> Result { - unsafe { syscall2(SYS_SETREGID, rgid, egid) } -} - -/// Make a new scheme namespace -pub fn setrens(rns: usize, ens: usize) -> Result { - unsafe { syscall2(SYS_SETRENS, rns, ens) } -} - -/// Set the current process user IDs -pub fn setreuid(ruid: usize, euid: usize) -> Result { - unsafe { syscall2(SYS_SETREUID, ruid, euid) } -} - -/// Set up a signal handler -pub fn sigaction(sig: usize, act: Option<&SigAction>, oldact: Option<&mut SigAction>) -> Result { - unsafe { syscall4(SYS_SIGACTION, sig, - act.map(|x| x as *const _).unwrap_or_else(ptr::null) as usize, - oldact.map(|x| x as *mut _).unwrap_or_else(ptr::null_mut) as usize, - restorer as usize) } -} - -/// Get and/or set signal masks -pub fn sigprocmask(how: usize, set: Option<&[u64; 2]>, oldset: Option<&mut [u64; 2]>) -> Result { - unsafe { syscall3(SYS_SIGPROCMASK, how, - set.map(|x| x as *const _).unwrap_or_else(ptr::null) as usize, - oldset.map(|x| x as *mut _).unwrap_or_else(ptr::null_mut) as usize) } -} - -// Return from signal handler -pub fn sigreturn() -> Result { - unsafe { syscall0(SYS_SIGRETURN) } -} - -/// Set the file mode creation mask -pub fn umask(mask: usize) -> Result { - unsafe { syscall1(SYS_UMASK, mask) } -} - -/// Remove a file -pub fn unlink>(path: T) -> Result { - unsafe { syscall2(SYS_UNLINK, path.as_ref().as_ptr() as usize, path.as_ref().len()) } -} - -/// Convert a virtual address to a physical one -/// -/// # Errors -/// -/// * `EPERM` - `uid != 0` -pub unsafe fn virttophys(virtual_address: usize) -> Result { - syscall1(SYS_VIRTTOPHYS, virtual_address) -} - -/// Check if a child process has exited or received a signal -pub fn waitpid(pid: usize, status: &mut usize, options: WaitFlags) -> Result { - unsafe { syscall3(SYS_WAITPID, pid, status as *mut usize as usize, options.bits()) } -} - -/// Write a buffer to a file descriptor -/// -/// The kernel will attempt to write the bytes in `buf` to the file descriptor `fd`, returning -/// either an `Err`, explained below, or `Ok(count)` where `count` is the number of bytes which -/// were written. -/// -/// # Errors -/// -/// * `EAGAIN` - the file descriptor was opened with `O_NONBLOCK` and writing would block -/// * `EBADF` - the file descriptor is not valid or is not open for writing -/// * `EFAULT` - `buf` does not point to the process's addressible memory -/// * `EIO` - an I/O error occurred -/// * `ENOSPC` - the device containing the file descriptor has no room for data -/// * `EPIPE` - the file descriptor refers to a pipe or socket whose reading end is closed -pub fn write(fd: usize, buf: &[u8]) -> Result { - unsafe { syscall3(SYS_WRITE, fd, buf.as_ptr() as usize, buf.len()) } -} - -/// Yield the process's time slice to the kernel -/// -/// This function will return Ok(0) on success -pub fn sched_yield() -> Result { - unsafe { syscall0(SYS_YIELD) } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/data.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/data.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/data.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/data.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,356 +0,0 @@ -use core::ops::{Deref, DerefMut}; -use core::{mem, slice}; -use crate::flag::{EventFlags, MapFlags, PtraceFlags, SigActionFlags}; - -#[derive(Copy, Clone, Debug, Default)] -#[repr(C)] -pub struct Event { - pub id: usize, - pub flags: EventFlags, - pub data: usize -} - -impl Deref for Event { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const Event as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for Event { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut Event as *mut u8, mem::size_of::()) - } - } -} - -#[derive(Copy, Clone, Debug, Default)] -#[repr(C)] -pub struct ITimerSpec { - pub it_interval: TimeSpec, - pub it_value: TimeSpec, -} - -impl Deref for ITimerSpec { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const ITimerSpec as *const u8, - mem::size_of::()) - } - } -} - -impl DerefMut for ITimerSpec { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut ITimerSpec as *mut u8, - mem::size_of::()) - } - } -} - -#[derive(Copy, Clone, Debug, Default)] -#[repr(C)] -pub struct OldMap { - pub offset: usize, - pub size: usize, - pub flags: MapFlags, -} - -impl Deref for OldMap { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const OldMap as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for OldMap { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut OldMap as *mut u8, mem::size_of::()) - } - } -} -#[derive(Copy, Clone, Debug, Default)] -#[repr(C)] -pub struct Map { - /// The offset inside the file that is being mapped. - pub offset: usize, - - /// The size of the memory map. - pub size: usize, - - /// Contains both prot and map flags. - pub flags: MapFlags, - - /// Functions as a hint to where in the virtual address space of the running process, to place - /// the memory map. If [`MapFlags::MAP_FIXED`] is set, then this address must be the address to - /// map to. - pub address: usize, -} - -impl Deref for Map { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const Map as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for Map { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut Map as *mut u8, mem::size_of::()) - } - } -} - -#[derive(Copy, Clone, Debug, Default)] -#[repr(C)] -pub struct Packet { - pub id: u64, - pub pid: usize, - pub uid: u32, - pub gid: u32, - pub a: usize, - pub b: usize, - pub c: usize, - pub d: usize -} - -impl Deref for Packet { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const Packet as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for Packet { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut Packet as *mut u8, mem::size_of::()) - } - } -} - -#[derive(Copy, Clone, Debug, Default, PartialEq)] -#[repr(C)] -pub struct SigAction { - pub sa_handler: Option, - pub sa_mask: [u64; 2], - pub sa_flags: SigActionFlags, -} -impl Deref for SigAction { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const SigAction as *const u8, - mem::size_of::()) - } - } -} - -impl DerefMut for SigAction { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut SigAction as *mut u8, - mem::size_of::()) - } - } -} - -#[allow(dead_code)] -unsafe fn _assert_size_of_function_is_sane() { - // Transmuting will complain *at compile time* if sizes differ. - // Rust forbids a fn-pointer from being 0 so to allow SIG_DFL to - // exist, we use Option which will mean 0 - // becomes None - let _ = mem::transmute::, usize>(None); -} - -#[derive(Copy, Clone, Debug, Default, PartialEq)] -#[repr(C)] -pub struct Stat { - pub st_dev: u64, - pub st_ino: u64, - pub st_mode: u16, - pub st_nlink: u32, - pub st_uid: u32, - pub st_gid: u32, - pub st_size: u64, - pub st_blksize: u32, - pub st_blocks: u64, - pub st_mtime: u64, - pub st_mtime_nsec: u32, - pub st_atime: u64, - pub st_atime_nsec: u32, - pub st_ctime: u64, - pub st_ctime_nsec: u32, -} - -impl Deref for Stat { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const Stat as *const u8, - mem::size_of::()) - } - } -} - -impl DerefMut for Stat { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut Stat as *mut u8, - mem::size_of::()) - } - } -} - -#[derive(Copy, Clone, Debug, Default, PartialEq)] -#[repr(C)] -pub struct StatVfs { - pub f_bsize: u32, - pub f_blocks: u64, - pub f_bfree: u64, - pub f_bavail: u64, -} - -impl Deref for StatVfs { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const StatVfs as *const u8, - mem::size_of::()) - } - } -} - -impl DerefMut for StatVfs { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut StatVfs as *mut u8, - mem::size_of::()) - } - } -} - -#[derive(Copy, Clone, Debug, Default, PartialEq)] -#[repr(C)] -pub struct TimeSpec { - pub tv_sec: i64, - pub tv_nsec: i32, -} - -impl Deref for TimeSpec { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const TimeSpec as *const u8, - mem::size_of::()) - } - } -} - -impl DerefMut for TimeSpec { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut TimeSpec as *mut u8, - mem::size_of::()) - } - } -} - -#[derive(Clone, Copy, Debug, Default)] -#[repr(C)] -pub struct PtraceEvent { - pub cause: PtraceFlags, - pub a: usize, - pub b: usize, - pub c: usize, - pub d: usize, - pub e: usize, - pub f: usize -} - -impl Deref for PtraceEvent { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const PtraceEvent as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for PtraceEvent { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut PtraceEvent as *mut u8, mem::size_of::()) - } - } -} - -#[macro_export] -macro_rules! ptrace_event { - ($cause:expr $(, $a:expr $(, $b:expr $(, $c:expr)?)?)?) => { - $crate::data::PtraceEvent { - cause: $cause, - $(a: $a, - $(b: $b, - $(c: $c,)? - )? - )? - ..Default::default() - } - } -} - -bitflags::bitflags! { - #[derive(Default)] - pub struct GrantFlags: usize { - const GRANT_READ = 0x0000_0001; - const GRANT_WRITE = 0x0000_0002; - const GRANT_EXEC = 0x0000_0004; - - const GRANT_SHARED = 0x0000_0008; - const GRANT_LAZY = 0x0000_0010; - const GRANT_SCHEME = 0x0000_0020; - const GRANT_PHYS = 0x0000_0040; - const GRANT_PINNED = 0x0000_0080; - } -} - -#[derive(Clone, Copy, Debug, Default)] -#[repr(C)] -pub struct GrantDesc { - pub base: usize, - pub size: usize, - pub flags: GrantFlags, - pub offset: u64, -} - -impl Deref for GrantDesc { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { - slice::from_raw_parts(self as *const GrantDesc as *const u8, mem::size_of::()) - } - } -} - -impl DerefMut for GrantDesc { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { - slice::from_raw_parts_mut(self as *mut GrantDesc as *mut u8, mem::size_of::()) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/error.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/error.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/error.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,313 +0,0 @@ -use core::{fmt, result}; - -#[derive(Eq, PartialEq)] -pub struct Error { - pub errno: i32, -} - -pub type Result = result::Result; - -impl Error { - pub fn new(errno: i32) -> Error { - Error { errno: errno } - } - - pub fn mux(result: Result) -> usize { - match result { - Ok(value) => value, - Err(error) => -error.errno as usize, - } - } - - pub fn demux(value: usize) -> Result { - let errno = -(value as i32); - if errno >= 1 && errno < STR_ERROR.len() as i32 { - Err(Error::new(errno)) - } else { - Ok(value) - } - } - - pub fn text(&self) -> &'static str { - STR_ERROR.get(self.errno as usize).map(|&x| x).unwrap_or("Unknown Error") - } -} - -impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - f.write_str(self.text()) - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - f.write_str(self.text()) - } -} - -pub const EPERM: i32 = 1; /* Operation not permitted */ -pub const ENOENT: i32 = 2; /* No such file or directory */ -pub const ESRCH: i32 = 3; /* No such process */ -pub const EINTR: i32 = 4; /* Interrupted system call */ -pub const EIO: i32 = 5; /* I/O error */ -pub const ENXIO: i32 = 6; /* No such device or address */ -pub const E2BIG: i32 = 7; /* Argument list too long */ -pub const ENOEXEC: i32 = 8; /* Exec format error */ -pub const EBADF: i32 = 9; /* Bad file number */ -pub const ECHILD: i32 = 10; /* No child processes */ -pub const EAGAIN: i32 = 11; /* Try again */ -pub const ENOMEM: i32 = 12; /* Out of memory */ -pub const EACCES: i32 = 13; /* Permission denied */ -pub const EFAULT: i32 = 14; /* Bad address */ -pub const ENOTBLK: i32 = 15; /* Block device required */ -pub const EBUSY: i32 = 16; /* Device or resource busy */ -pub const EEXIST: i32 = 17; /* File exists */ -pub const EXDEV: i32 = 18; /* Cross-device link */ -pub const ENODEV: i32 = 19; /* No such device */ -pub const ENOTDIR: i32 = 20; /* Not a directory */ -pub const EISDIR: i32 = 21; /* Is a directory */ -pub const EINVAL: i32 = 22; /* Invalid argument */ -pub const ENFILE: i32 = 23; /* File table overflow */ -pub const EMFILE: i32 = 24; /* Too many open files */ -pub const ENOTTY: i32 = 25; /* Not a typewriter */ -pub const ETXTBSY: i32 = 26; /* Text file busy */ -pub const EFBIG: i32 = 27; /* File too large */ -pub const ENOSPC: i32 = 28; /* No space left on device */ -pub const ESPIPE: i32 = 29; /* Illegal seek */ -pub const EROFS: i32 = 30; /* Read-only file system */ -pub const EMLINK: i32 = 31; /* Too many links */ -pub const EPIPE: i32 = 32; /* Broken pipe */ -pub const EDOM: i32 = 33; /* Math argument out of domain of func */ -pub const ERANGE: i32 = 34; /* Math result not representable */ -pub const EDEADLK: i32 = 35; /* Resource deadlock would occur */ -pub const ENAMETOOLONG: i32 = 36; /* File name too long */ -pub const ENOLCK: i32 = 37; /* No record locks available */ -pub const ENOSYS: i32 = 38; /* Function not implemented */ -pub const ENOTEMPTY: i32 = 39; /* Directory not empty */ -pub const ELOOP: i32 = 40; /* Too many symbolic links encountered */ -pub const EWOULDBLOCK: i32 = 41; /* Operation would block */ -pub const ENOMSG: i32 = 42; /* No message of desired type */ -pub const EIDRM: i32 = 43; /* Identifier removed */ -pub const ECHRNG: i32 = 44; /* Channel number out of range */ -pub const EL2NSYNC: i32 = 45; /* Level 2 not synchronized */ -pub const EL3HLT: i32 = 46; /* Level 3 halted */ -pub const EL3RST: i32 = 47; /* Level 3 reset */ -pub const ELNRNG: i32 = 48; /* Link number out of range */ -pub const EUNATCH: i32 = 49; /* Protocol driver not attached */ -pub const ENOCSI: i32 = 50; /* No CSI structure available */ -pub const EL2HLT: i32 = 51; /* Level 2 halted */ -pub const EBADE: i32 = 52; /* Invalid exchange */ -pub const EBADR: i32 = 53; /* Invalid request descriptor */ -pub const EXFULL: i32 = 54; /* Exchange full */ -pub const ENOANO: i32 = 55; /* No anode */ -pub const EBADRQC: i32 = 56; /* Invalid request code */ -pub const EBADSLT: i32 = 57; /* Invalid slot */ -pub const EDEADLOCK: i32 = 58; /* Resource deadlock would occur */ -pub const EBFONT: i32 = 59; /* Bad font file format */ -pub const ENOSTR: i32 = 60; /* Device not a stream */ -pub const ENODATA: i32 = 61; /* No data available */ -pub const ETIME: i32 = 62; /* Timer expired */ -pub const ENOSR: i32 = 63; /* Out of streams resources */ -pub const ENONET: i32 = 64; /* Machine is not on the network */ -pub const ENOPKG: i32 = 65; /* Package not installed */ -pub const EREMOTE: i32 = 66; /* Object is remote */ -pub const ENOLINK: i32 = 67; /* Link has been severed */ -pub const EADV: i32 = 68; /* Advertise error */ -pub const ESRMNT: i32 = 69; /* Srmount error */ -pub const ECOMM: i32 = 70; /* Communication error on send */ -pub const EPROTO: i32 = 71; /* Protocol error */ -pub const EMULTIHOP: i32 = 72; /* Multihop attempted */ -pub const EDOTDOT: i32 = 73; /* RFS specific error */ -pub const EBADMSG: i32 = 74; /* Not a data message */ -pub const EOVERFLOW: i32 = 75; /* Value too large for defined data type */ -pub const ENOTUNIQ: i32 = 76; /* Name not unique on network */ -pub const EBADFD: i32 = 77; /* File descriptor in bad state */ -pub const EREMCHG: i32 = 78; /* Remote address changed */ -pub const ELIBACC: i32 = 79; /* Can not access a needed shared library */ -pub const ELIBBAD: i32 = 80; /* Accessing a corrupted shared library */ -pub const ELIBSCN: i32 = 81; /* .lib section in a.out corrupted */ -pub const ELIBMAX: i32 = 82; /* Attempting to link in too many shared libraries */ -pub const ELIBEXEC: i32 = 83; /* Cannot exec a shared library directly */ -pub const EILSEQ: i32 = 84; /* Illegal byte sequence */ -pub const ERESTART: i32 = 85; /* Interrupted system call should be restarted */ -pub const ESTRPIPE: i32 = 86; /* Streams pipe error */ -pub const EUSERS: i32 = 87; /* Too many users */ -pub const ENOTSOCK: i32 = 88; /* Socket operation on non-socket */ -pub const EDESTADDRREQ: i32 = 89; /* Destination address required */ -pub const EMSGSIZE: i32 = 90; /* Message too long */ -pub const EPROTOTYPE: i32 = 91; /* Protocol wrong type for socket */ -pub const ENOPROTOOPT: i32 = 92; /* Protocol not available */ -pub const EPROTONOSUPPORT: i32 = 93; /* Protocol not supported */ -pub const ESOCKTNOSUPPORT: i32 = 94; /* Socket type not supported */ -pub const EOPNOTSUPP: i32 = 95; /* Operation not supported on transport endpoint */ -pub const EPFNOSUPPORT: i32 = 96; /* Protocol family not supported */ -pub const EAFNOSUPPORT: i32 = 97; /* Address family not supported by protocol */ -pub const EADDRINUSE: i32 = 98; /* Address already in use */ -pub const EADDRNOTAVAIL: i32 = 99; /* Cannot assign requested address */ -pub const ENETDOWN: i32 = 100; /* Network is down */ -pub const ENETUNREACH: i32 = 101; /* Network is unreachable */ -pub const ENETRESET: i32 = 102; /* Network dropped connection because of reset */ -pub const ECONNABORTED: i32 = 103; /* Software caused connection abort */ -pub const ECONNRESET: i32 = 104; /* Connection reset by peer */ -pub const ENOBUFS: i32 = 105; /* No buffer space available */ -pub const EISCONN: i32 = 106; /* Transport endpoint is already connected */ -pub const ENOTCONN: i32 = 107; /* Transport endpoint is not connected */ -pub const ESHUTDOWN: i32 = 108; /* Cannot send after transport endpoint shutdown */ -pub const ETOOMANYREFS: i32 = 109; /* Too many references: cannot splice */ -pub const ETIMEDOUT: i32 = 110; /* Connection timed out */ -pub const ECONNREFUSED: i32 = 111; /* Connection refused */ -pub const EHOSTDOWN: i32 = 112; /* Host is down */ -pub const EHOSTUNREACH: i32 = 113; /* No route to host */ -pub const EALREADY: i32 = 114; /* Operation already in progress */ -pub const EINPROGRESS: i32 = 115; /* Operation now in progress */ -pub const ESTALE: i32 = 116; /* Stale NFS file handle */ -pub const EUCLEAN: i32 = 117; /* Structure needs cleaning */ -pub const ENOTNAM: i32 = 118; /* Not a XENIX named type file */ -pub const ENAVAIL: i32 = 119; /* No XENIX semaphores available */ -pub const EISNAM: i32 = 120; /* Is a named type file */ -pub const EREMOTEIO: i32 = 121; /* Remote I/O error */ -pub const EDQUOT: i32 = 122; /* Quota exceeded */ -pub const ENOMEDIUM: i32 = 123; /* No medium found */ -pub const EMEDIUMTYPE: i32 = 124; /* Wrong medium type */ -pub const ECANCELED: i32 = 125; /* Operation Canceled */ -pub const ENOKEY: i32 = 126; /* Required key not available */ -pub const EKEYEXPIRED: i32 = 127; /* Key has expired */ -pub const EKEYREVOKED: i32 = 128; /* Key has been revoked */ -pub const EKEYREJECTED: i32 = 129; /* Key was rejected by service */ -pub const EOWNERDEAD: i32 = 130; /* Owner died */ -pub const ENOTRECOVERABLE: i32 = 131; /* State not recoverable */ -pub const ESKMSG: i32 = 132; /* Scheme-kernel message code */ - -pub static STR_ERROR: [&'static str; 133] = ["Success", - "Operation not permitted", - "No such file or directory", - "No such process", - "Interrupted system call", - "I/O error", - "No such device or address", - "Argument list too long", - "Exec format error", - "Bad file number", - "No child processes", - "Try again", - "Out of memory", - "Permission denied", - "Bad address", - "Block device required", - "Device or resource busy", - "File exists", - "Cross-device link", - "No such device", - "Not a directory", - "Is a directory", - "Invalid argument", - "File table overflow", - "Too many open files", - "Not a typewriter", - "Text file busy", - "File too large", - "No space left on device", - "Illegal seek", - "Read-only file system", - "Too many links", - "Broken pipe", - "Math argument out of domain of func", - "Math result not representable", - "Resource deadlock would occur", - "File name too long", - "No record locks available", - "Function not implemented", - "Directory not empty", - "Too many symbolic links encountered", - "Operation would block", - "No message of desired type", - "Identifier removed", - "Channel number out of range", - "Level 2 not synchronized", - "Level 3 halted", - "Level 3 reset", - "Link number out of range", - "Protocol driver not attached", - "No CSI structure available", - "Level 2 halted", - "Invalid exchange", - "Invalid request descriptor", - "Exchange full", - "No anode", - "Invalid request code", - "Invalid slot", - "Resource deadlock would occur", - "Bad font file format", - "Device not a stream", - "No data available", - "Timer expired", - "Out of streams resources", - "Machine is not on the network", - "Package not installed", - "Object is remote", - "Link has been severed", - "Advertise error", - "Srmount error", - "Communication error on send", - "Protocol error", - "Multihop attempted", - "RFS specific error", - "Not a data message", - "Value too large for defined data type", - "Name not unique on network", - "File descriptor in bad state", - "Remote address changed", - "Can not access a needed shared library", - "Accessing a corrupted shared library", - ".lib section in a.out corrupted", - "Attempting to link in too many shared libraries", - "Cannot exec a shared library directly", - "Illegal byte sequence", - "Interrupted system call should be restarted", - "Streams pipe error", - "Too many users", - "Socket operation on non-socket", - "Destination address required", - "Message too long", - "Protocol wrong type for socket", - "Protocol not available", - "Protocol not supported", - "Socket type not supported", - "Operation not supported on transport endpoint", - "Protocol family not supported", - "Address family not supported by protocol", - "Address already in use", - "Cannot assign requested address", - "Network is down", - "Network is unreachable", - "Network dropped connection because of reset", - "Software caused connection abort", - "Connection reset by peer", - "No buffer space available", - "Transport endpoint is already connected", - "Transport endpoint is not connected", - "Cannot send after transport endpoint shutdown", - "Too many references: cannot splice", - "Connection timed out", - "Connection refused", - "Host is down", - "No route to host", - "Operation already in progress", - "Operation now in progress", - "Stale NFS file handle", - "Structure needs cleaning", - "Not a XENIX named type file", - "No XENIX semaphores available", - "Is a named type file", - "Remote I/O error", - "Quota exceeded", - "No medium found", - "Wrong medium type", - "Operation Canceled", - "Required key not available", - "Key has expired", - "Key has been revoked", - "Key was rejected by service", - "Owner died", - "State not recoverable", - "Scheme-kernel message code"]; diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/flag.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/flag.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/flag.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/flag.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,374 +0,0 @@ -use bitflags::bitflags as inner_bitflags; -use core::{mem, ops::Deref, slice}; - -macro_rules! bitflags { - ( - $(#[$outer:meta])* - pub struct $BitFlags:ident: $T:ty { - $( - $(#[$inner:ident $($args:tt)*])* - const $Flag:ident = $value:expr; - )+ - } - ) => { - // First, use the inner bitflags - inner_bitflags! { - #[derive(Default)] - $(#[$outer])* - pub struct $BitFlags: $T { - $( - $(#[$inner $($args)*])* - const $Flag = $value; - )+ - } - } - - // Secondly, re-export all inner constants - // (`pub use self::Struct::*` doesn't work) - $( - $(#[$inner $($args)*])* - pub const $Flag: $BitFlags = $BitFlags::$Flag; - )+ - } -} - -pub const CLOCK_REALTIME: usize = 1; -pub const CLOCK_MONOTONIC: usize = 4; - -bitflags! { - pub struct EventFlags: usize { - const EVENT_NONE = 0; - const EVENT_READ = 1; - const EVENT_WRITE = 2; - } -} - -pub const F_DUPFD: usize = 0; -pub const F_GETFD: usize = 1; -pub const F_SETFD: usize = 2; -pub const F_GETFL: usize = 3; -pub const F_SETFL: usize = 4; - -pub const FUTEX_WAIT: usize = 0; -pub const FUTEX_WAKE: usize = 1; -pub const FUTEX_REQUEUE: usize = 2; -pub const FUTEX_WAIT64: usize = 3; - -// packet.c = fd -pub const SKMSG_FRETURNFD: usize = 0; - -// packet.uid:packet.gid = offset, packet.c = base address, packet.d = page count -pub const SKMSG_PROVIDE_MMAP: usize = 1; - -bitflags! { - pub struct MapFlags: usize { - // TODO: Downgrade PROT_NONE to global constant? (bitflags specifically states zero flags - // can cause buggy behavior). - const PROT_NONE = 0x0000_0000; - - const PROT_EXEC = 0x0001_0000; - const PROT_WRITE = 0x0002_0000; - const PROT_READ = 0x0004_0000; - - const MAP_SHARED = 0x0001; - const MAP_PRIVATE = 0x0002; - - const MAP_FIXED = 0x0004; - const MAP_FIXED_NOREPLACE = 0x000C; - - /// For *userspace-backed mmaps*, return from the mmap call before all pages have been - /// provided by the scheme. This requires the scheme to be trusted, as the current context - /// can block indefinitely, if the scheme does not respond to the page fault handler's - /// request, as it tries to map the page by requesting it from the scheme. - /// - /// In some cases however, such as the program loader, the data needs to be trusted as much - /// with or without MAP_LAZY, and if so, mapping lazily will not cause insecureness by - /// itself. - /// - /// For kernel-backed mmaps, this flag has no effect at all. It is unspecified whether - /// kernel mmaps are lazy or not. - const MAP_LAZY = 0x0010; - } -} -bitflags! { - pub struct MunmapFlags: usize { - /// Indicates whether the funmap call must implicitly do an msync, for the changes to - /// become visible later. - /// - /// This flag will currently be set if and only if MAP_SHARED | PROT_WRITE are set. - const NEEDS_SYNC = 1; - } -} - -pub const MODE_TYPE: u16 = 0xF000; -pub const MODE_DIR: u16 = 0x4000; -pub const MODE_FILE: u16 = 0x8000; -pub const MODE_SYMLINK: u16 = 0xA000; -pub const MODE_FIFO: u16 = 0x1000; -pub const MODE_CHR: u16 = 0x2000; - -pub const MODE_PERM: u16 = 0x0FFF; -pub const MODE_SETUID: u16 = 0o4000; -pub const MODE_SETGID: u16 = 0o2000; - -pub const O_RDONLY: usize = 0x0001_0000; -pub const O_WRONLY: usize = 0x0002_0000; -pub const O_RDWR: usize = 0x0003_0000; -pub const O_NONBLOCK: usize = 0x0004_0000; -pub const O_APPEND: usize = 0x0008_0000; -pub const O_SHLOCK: usize = 0x0010_0000; -pub const O_EXLOCK: usize = 0x0020_0000; -pub const O_ASYNC: usize = 0x0040_0000; -pub const O_FSYNC: usize = 0x0080_0000; -pub const O_CLOEXEC: usize = 0x0100_0000; -pub const O_CREAT: usize = 0x0200_0000; -pub const O_TRUNC: usize = 0x0400_0000; -pub const O_EXCL: usize = 0x0800_0000; -pub const O_DIRECTORY: usize = 0x1000_0000; -pub const O_STAT: usize = 0x2000_0000; -pub const O_SYMLINK: usize = 0x4000_0000; -pub const O_NOFOLLOW: usize = 0x8000_0000; -pub const O_ACCMODE: usize = O_RDONLY | O_WRONLY | O_RDWR; - -bitflags! { - pub struct PhysmapFlags: usize { - const PHYSMAP_WRITE = 0x0000_0001; - const PHYSMAP_WRITE_COMBINE = 0x0000_0002; - const PHYSMAP_NO_CACHE = 0x0000_0004; - } -} -bitflags! { - /// Extra flags for [`physalloc2`] or [`physalloc3`]. - /// - /// [`physalloc2`]: ../call/fn.physalloc2.html - /// [`physalloc3`]: ../call/fn.physalloc3.html - pub struct PhysallocFlags: usize { - /// Only allocate memory within the 32-bit physical memory space. This is necessary for - /// some devices may not support 64-bit memory. - const SPACE_32 = 0x0000_0001; - - /// The frame that will be allocated, is going to reside anywhere in 64-bit space. This - /// flag is redundant for the most part, except when overriding some other default. - const SPACE_64 = 0x0000_0002; - - /// Do a "partial allocation", which means that not all of the frames specified in the - /// frame count `size` actually have to be allocated. This means that if the allocator was - /// unable to find a physical memory range large enough, it can instead return whatever - /// range it decides is optimal. Thus, instead of letting one driver get an expensive - /// 128MiB physical memory range when the physical memory has become fragmented, and - /// failing, it can instead be given a more optimal range. If the device supports - /// scatter-gather lists, then the driver only has to allocate more ranges, and the device - /// will do vectored I/O. - /// - /// PARTIAL_ALLOC supports different allocation strategies, refer to - /// [`Optimal`], [`GreatestRange`]. - /// - /// [`Optimal`]: ./enum.PartialAllocStrategy.html - /// [`GreatestRange`]: ./enum.PartialAllocStrategy.html - const PARTIAL_ALLOC = 0x0000_0004; - } -} - -/// The bitmask of the partial allocation strategy. Currently four different strategies are -/// supported. If [`PARTIAL_ALLOC`] is not set, this bitmask is no longer reserved. -pub const PARTIAL_ALLOC_STRATEGY_MASK: usize = 0x0003_0000; - -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] -#[repr(usize)] -pub enum PartialAllocStrategy { - /// The allocator decides itself the size of the memory range, based on e.g. free memory ranges - /// and other processes which require large physical memory chunks. - Optimal = 0x0001_0000, - - /// The allocator returns the absolute greatest range it can find. - GreatestRange = 0x0002_0000, - - /// The allocator returns the first range that fits the minimum count, without searching extra. - Greedy = 0x0003_0000, -} -impl Default for PartialAllocStrategy { - fn default() -> Self { - Self::Optimal - } -} - -impl PartialAllocStrategy { - pub fn from_raw(raw: usize) -> Option { - match raw { - 0x0001_0000 => Some(Self::Optimal), - 0x0002_0000 => Some(Self::GreatestRange), - 0x0003_0000 => Some(Self::Greedy), - _ => None, - } - } -} - -// The top 48 bits of PTRACE_* are reserved, for now - -bitflags! { - pub struct PtraceFlags: u64 { - /// Stop before a syscall is handled. Send PTRACE_FLAG_IGNORE to not - /// handle the syscall. - const PTRACE_STOP_PRE_SYSCALL = 0x0000_0000_0000_0001; - /// Stop after a syscall is handled. - const PTRACE_STOP_POST_SYSCALL = 0x0000_0000_0000_0002; - /// Stop after exactly one instruction. TODO: This may not handle - /// fexec/signal boundaries. Should it? - const PTRACE_STOP_SINGLESTEP = 0x0000_0000_0000_0004; - /// Stop before a signal is handled. Send PTRACE_FLAG_IGNORE to not - /// handle signal. - const PTRACE_STOP_SIGNAL = 0x0000_0000_0000_0008; - /// Stop on a software breakpoint, such as the int3 instruction for - /// x86_64. - const PTRACE_STOP_BREAKPOINT = 0x0000_0000_0000_0010; - /// Stop just before exiting for good. - const PTRACE_STOP_EXIT = 0x0000_0000_0000_0020; - - const PTRACE_STOP_MASK = 0x0000_0000_0000_00FF; - - - /// Sent when a child is cloned, giving you the opportunity to trace it. - /// If you don't catch this, the child is started as normal. - const PTRACE_EVENT_CLONE = 0x0000_0000_0000_0100; - - /// Sent when current-addrspace is changed, allowing the tracer to reopen the memory file. - const PTRACE_EVENT_ADDRSPACE_SWITCH = 0x0000_0000_0000_0200; - - const PTRACE_EVENT_MASK = 0x0000_0000_0000_0F00; - - /// Special meaning, depending on the event. Usually, when fired before - /// an action, it will skip performing that action. - const PTRACE_FLAG_IGNORE = 0x0000_0000_0000_1000; - - const PTRACE_FLAG_MASK = 0x0000_0000_0000_F000; - } -} -impl Deref for PtraceFlags { - type Target = [u8]; - fn deref(&self) -> &Self::Target { - // Same as to_ne_bytes but in-place - unsafe { - slice::from_raw_parts( - &self.bits as *const _ as *const u8, - mem::size_of::() - ) - } - } -} - -pub const SEEK_SET: usize = 0; -pub const SEEK_CUR: usize = 1; -pub const SEEK_END: usize = 2; - -pub const SIGHUP: usize = 1; -pub const SIGINT: usize = 2; -pub const SIGQUIT: usize = 3; -pub const SIGILL: usize = 4; -pub const SIGTRAP: usize = 5; -pub const SIGABRT: usize = 6; -pub const SIGBUS: usize = 7; -pub const SIGFPE: usize = 8; -pub const SIGKILL: usize = 9; -pub const SIGUSR1: usize = 10; -pub const SIGSEGV: usize = 11; -pub const SIGUSR2: usize = 12; -pub const SIGPIPE: usize = 13; -pub const SIGALRM: usize = 14; -pub const SIGTERM: usize = 15; -pub const SIGSTKFLT: usize= 16; -pub const SIGCHLD: usize = 17; -pub const SIGCONT: usize = 18; -pub const SIGSTOP: usize = 19; -pub const SIGTSTP: usize = 20; -pub const SIGTTIN: usize = 21; -pub const SIGTTOU: usize = 22; -pub const SIGURG: usize = 23; -pub const SIGXCPU: usize = 24; -pub const SIGXFSZ: usize = 25; -pub const SIGVTALRM: usize= 26; -pub const SIGPROF: usize = 27; -pub const SIGWINCH: usize = 28; -pub const SIGIO: usize = 29; -pub const SIGPWR: usize = 30; -pub const SIGSYS: usize = 31; - -pub const SIG_DFL: usize = 0; -pub const SIG_IGN: usize = 1; - -pub const SIG_BLOCK: usize = 0; -pub const SIG_UNBLOCK: usize = 1; -pub const SIG_SETMASK: usize = 2; - -bitflags! { - pub struct SigActionFlags: usize { - const SA_NOCLDSTOP = 0x00000001; - const SA_NOCLDWAIT = 0x00000002; - const SA_SIGINFO = 0x00000004; - const SA_RESTORER = 0x04000000; - const SA_ONSTACK = 0x08000000; - const SA_RESTART = 0x10000000; - const SA_NODEFER = 0x40000000; - const SA_RESETHAND = 0x80000000; - } -} - -bitflags! { - pub struct WaitFlags: usize { - const WNOHANG = 0x01; - const WUNTRACED = 0x02; - const WCONTINUED = 0x08; - } -} - -pub const ADDRSPACE_OP_MMAP: usize = 0; -pub const ADDRSPACE_OP_MUNMAP: usize = 1; -pub const ADDRSPACE_OP_MPROTECT: usize = 2; -pub const ADDRSPACE_OP_TRANSFER: usize = 3; - -/// True if status indicates the child is stopped. -pub fn wifstopped(status: usize) -> bool { - (status & 0xff) == 0x7f -} - -/// If wifstopped(status), the signal that stopped the child. -pub fn wstopsig(status: usize) -> usize { - (status >> 8) & 0xff -} - -/// True if status indicates the child continued after a stop. -pub fn wifcontinued(status: usize) -> bool { - status == 0xffff -} - -/// True if STATUS indicates termination by a signal. -pub fn wifsignaled(status: usize) -> bool { - ((status & 0x7f) + 1) as i8 >= 2 -} - -/// If wifsignaled(status), the terminating signal. -pub fn wtermsig(status: usize) -> usize { - status & 0x7f -} - -/// True if status indicates normal termination. -pub fn wifexited(status: usize) -> bool { - wtermsig(status) == 0 -} - -/// If wifexited(status), the exit status. -pub fn wexitstatus(status: usize) -> usize { - (status >> 8) & 0xff -} - -/// True if status indicates a core dump was created. -pub fn wcoredump(status: usize) -> bool { - (status & 0x80) != 0 -} - -bitflags! { - pub struct MremapFlags: usize { - const FIXED = 1; - const FIXED_REPLACE = 3; - // TODO: MAYMOVE, DONTUNMAP - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/io/dma.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/io/dma.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/io/dma.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/io/dma.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,219 +0,0 @@ -use core::mem::{self, MaybeUninit}; -use core::ops::{Deref, DerefMut}; -use core::{ptr, slice}; - -use crate::Result; -use crate::{PartialAllocStrategy, PhysallocFlags, PhysmapFlags}; -use crate::arch::PAGE_SIZE; - -/// An RAII guard of a physical memory allocation. Currently all physically allocated memory are -/// page-aligned and take up at least 4k of space (on x86_64). -#[derive(Debug)] -pub struct PhysBox { - address: usize, - size: usize -} - -const fn round_up(x: usize) -> usize { - (x + PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE -} -fn assert_aligned(x: usize) { - assert_eq!(x % PAGE_SIZE, 0); -} - -#[cfg(target_arch = "aarch64")] -fn physmap_flags() -> PhysmapFlags { - // aarch64 currently must map DMA memory without caching to ensure coherence - crate::PHYSMAP_NO_CACHE | crate::PHYSMAP_WRITE -} - -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -fn physmap_flags() -> PhysmapFlags { - // x86 ensures cache coherence with DMA memory - crate::PHYSMAP_WRITE -} - -impl PhysBox { - /// Construct a PhysBox from an address and a size. The address must be page-aligned, and the - /// size must similarly be a multiple of the page size. - /// - /// # Safety - /// This function is unsafe because when dropping, Self has to a valid allocation. - pub unsafe fn from_raw_parts(address: usize, size: usize) -> Self { - assert_aligned(address); - assert_aligned(size); - - Self { - address, - size, - } - } - - /// Retrieve the byte address in physical memory, of this allocation. - pub fn address(&self) -> usize { - self.address - } - - /// Retrieve the size in bytes of the alloc. - pub fn size(&self) -> usize { - self.size - } - - /// Allocate physical memory that must reside in 32-bit space. - pub fn new_in_32bit_space(size: usize) -> Result { - Self::new_with_flags(size, PhysallocFlags::SPACE_32) - } - - pub fn new_with_flags(size: usize, flags: PhysallocFlags) -> Result { - assert!(!flags.contains(PhysallocFlags::PARTIAL_ALLOC)); - assert_aligned(size); - - let address = unsafe { crate::physalloc2(size, flags.bits())? }; - Ok(unsafe { Self::from_raw_parts(address, size) }) - } - - /// "Partially" allocate physical memory, in the sense that the allocation may be smaller than - /// expected, but still with a minimum limit. This is particularly useful when the physical - /// memory space is fragmented, and a device supports scatter-gather I/O. In that case, the - /// driver can optimistically request e.g. 1 alloc of 1 MiB, with the minimum of 512 KiB. If - /// that first allocation only returns half the size, the driver can do another allocation - /// and then let the device use both buffers. - pub fn new_partial_allocation(size: usize, flags: PhysallocFlags, strategy: Option, mut min: usize) -> Result { - assert_aligned(size); - debug_assert!(!(flags.contains(PhysallocFlags::PARTIAL_ALLOC) && strategy.is_none())); - - let address = unsafe { crate::physalloc3(size, flags.bits() | strategy.map_or(0, |s| s as usize), &mut min)? }; - Ok(unsafe { Self::from_raw_parts(address, size) }) - } - - pub fn new(size: usize) -> Result { - assert_aligned(size); - - let address = unsafe { crate::physalloc(size)? }; - Ok(unsafe { Self::from_raw_parts(address, size) }) - } -} - -impl Drop for PhysBox { - fn drop(&mut self) { - let _ = unsafe { crate::physfree(self.address, self.size) }; - } -} - -pub struct Dma { - phys: PhysBox, - virt: *mut T, -} - -impl Dma { - pub fn from_physbox_uninit(phys: PhysBox) -> Result>> { - let virt = unsafe { crate::physmap(phys.address, phys.size, physmap_flags())? } as *mut MaybeUninit; - - Ok(Dma { - phys, - virt, - }) - } - pub fn from_physbox_zeroed(phys: PhysBox) -> Result>> { - let this = Self::from_physbox_uninit(phys)?; - unsafe { ptr::write_bytes(this.virt as *mut MaybeUninit, 0, this.phys.size) } - Ok(this) - } - - pub fn from_physbox(phys: PhysBox, value: T) -> Result { - let this = Self::from_physbox_uninit(phys)?; - - Ok(unsafe { - ptr::write(this.virt, MaybeUninit::new(value)); - this.assume_init() - }) - } - - pub fn new(value: T) -> Result { - let phys = PhysBox::new(round_up(mem::size_of::()))?; - Self::from_physbox(phys, value) - } - pub fn zeroed() -> Result>> { - let phys = PhysBox::new(round_up(mem::size_of::()))?; - Self::from_physbox_zeroed(phys) - } -} - -impl Dma> { - pub unsafe fn assume_init(self) -> Dma { - let &Dma { phys: PhysBox { address, size }, virt } = &self; - mem::forget(self); - - Dma { - phys: PhysBox { address, size }, - virt: virt as *mut T, - } - } -} -impl Dma { - pub fn physical(&self) -> usize { - self.phys.address() - } - pub fn size(&self) -> usize { - self.phys.size() - } - pub fn phys(&self) -> &PhysBox { - &self.phys - } -} - -impl Dma<[T]> { - pub fn from_physbox_uninit_unsized(phys: PhysBox, len: usize) -> Result]>> { - let max_len = phys.size() / mem::size_of::(); - assert!(len <= max_len); - - Ok(Dma { - virt: unsafe { slice::from_raw_parts_mut(crate::physmap(phys.address, phys.size, physmap_flags())? as *mut MaybeUninit, len) } as *mut [MaybeUninit], - phys, - }) - } - pub fn from_physbox_zeroed_unsized(phys: PhysBox, len: usize) -> Result]>> { - let this = Self::from_physbox_uninit_unsized(phys, len)?; - unsafe { ptr::write_bytes(this.virt as *mut MaybeUninit, 0, this.phys.size()) } - Ok(this) - } - /// Creates a new DMA buffer with a size only known at runtime. - /// ## Safety - /// * `T` must be properly aligned. - /// * `T` must be valid as zeroed (i.e. no NonNull pointers). - pub unsafe fn zeroed_unsized(count: usize) -> Result { - let phys = PhysBox::new(round_up(mem::size_of::() * count))?; - Ok(Self::from_physbox_zeroed_unsized(phys, count)?.assume_init()) - } -} -impl Dma<[MaybeUninit]> { - pub unsafe fn assume_init(self) -> Dma<[T]> { - let &Dma { phys: PhysBox { address, size }, virt } = &self; - mem::forget(self); - - Dma { - phys: PhysBox { address, size }, - virt: virt as *mut [T], - } - } -} - -impl Deref for Dma { - type Target = T; - fn deref(&self) -> &T { - unsafe { &*self.virt } - } -} - -impl DerefMut for Dma { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.virt } - } -} - -impl Drop for Dma { - fn drop(&mut self) { - unsafe { ptr::drop_in_place(self.virt) } - let _ = unsafe { crate::funmap(self.virt as *mut u8 as usize, self.phys.size) }; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/io/io.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/io/io.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/io/io.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/io/io.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,71 +0,0 @@ -use core::cmp::PartialEq; -use core::ops::{BitAnd, BitOr, Not}; - -pub trait Io { - type Value: Copy + PartialEq + BitAnd + BitOr + Not; - - fn read(&self) -> Self::Value; - fn write(&mut self, value: Self::Value); - - #[inline(always)] - fn readf(&self, flags: Self::Value) -> bool { - (self.read() & flags) as Self::Value == flags - } - - #[inline(always)] - fn writef(&mut self, flags: Self::Value, value: bool) { - let tmp: Self::Value = match value { - true => self.read() | flags, - false => self.read() & !flags, - }; - self.write(tmp); - } -} - -pub struct ReadOnly { - inner: I -} - -impl ReadOnly { - pub const fn new(inner: I) -> ReadOnly { - ReadOnly { - inner: inner - } - } -} - -impl ReadOnly { - #[inline(always)] - pub fn read(&self) -> I::Value { - self.inner.read() - } - - #[inline(always)] - pub fn readf(&self, flags: I::Value) -> bool { - self.inner.readf(flags) - } -} - -pub struct WriteOnly { - inner: I -} - -impl WriteOnly { - pub const fn new(inner: I) -> WriteOnly { - WriteOnly { - inner: inner - } - } -} - -impl WriteOnly { - #[inline(always)] - pub fn write(&mut self, value: I::Value) { - self.inner.write(value) - } - - #[inline(always)] - pub fn writef(&mut self, flags: I::Value, value: bool) { - self.inner.writef(flags, value) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/io/mmio.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/io/mmio.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/io/mmio.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/io/mmio.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,168 +0,0 @@ -use core::mem::MaybeUninit; -use core::ptr; -#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] -use core::ops::{BitAnd, BitOr, Not}; - -use super::io::Io; - -#[repr(packed)] -pub struct Mmio { - value: MaybeUninit, -} - -impl Mmio { - /// Create a new Mmio without initializing - #[deprecated = "unsound because it's possible to read even though it's uninitialized"] - pub fn new() -> Self { - unsafe { Self::uninit() } - } - pub unsafe fn zeroed() -> Self { - Self { - value: MaybeUninit::zeroed(), - } - } - pub unsafe fn uninit() -> Self { - Self { - value: MaybeUninit::uninit(), - } - } - pub const fn from(value: T) -> Self { - Self { - value: MaybeUninit::new(value), - } - } -} - -// Generic implementation (WARNING: requires aligned pointers!) -#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] -impl Io for Mmio where T: Copy + PartialEq + BitAnd + BitOr + Not { - type Value = T; - - fn read(&self) -> T { - unsafe { ptr::read_volatile(ptr::addr_of!(self.value).cast::()) } - } - - fn write(&mut self, value: T) { - unsafe { ptr::write_volatile(ptr::addr_of_mut!(self.value).cast::(), value) }; - } -} - -// x86 u8 implementation -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -impl Io for Mmio { - type Value = u8; - - fn read(&self) -> Self::Value { - unsafe { - let value: Self::Value; - let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::(); - core::arch::asm!( - "mov {}, [{}]", - out(reg_byte) value, - in(reg) ptr - ); - value - } - } - - fn write(&mut self, value: Self::Value) { - unsafe { - let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::(); - core::arch::asm!( - "mov [{}], {}", - in(reg) ptr, - in(reg_byte) value, - ); - } - } -} - -// x86 u16 implementation -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -impl Io for Mmio { - type Value = u16; - - fn read(&self) -> Self::Value { - unsafe { - let value: Self::Value; - let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::(); - core::arch::asm!( - "mov {:x}, [{}]", - out(reg) value, - in(reg) ptr - ); - value - } - } - - fn write(&mut self, value: Self::Value) { - unsafe { - let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::(); - core::arch::asm!( - "mov [{}], {:x}", - in(reg) ptr, - in(reg) value, - ); - } - } -} - -// x86 u32 implementation -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -impl Io for Mmio { - type Value = u32; - - fn read(&self) -> Self::Value { - unsafe { - let value: Self::Value; - let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::(); - core::arch::asm!( - "mov {:e}, [{}]", - out(reg) value, - in(reg) ptr - ); - value - } - } - - fn write(&mut self, value: Self::Value) { - unsafe { - let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::(); - core::arch::asm!( - "mov [{}], {:e}", - in(reg) ptr, - in(reg) value, - ); - } - } -} - -// x86 u64 implementation (x86_64 only) -#[cfg(target_arch = "x86_64")] -impl Io for Mmio { - type Value = u64; - - fn read(&self) -> Self::Value { - unsafe { - let value: Self::Value; - let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::(); - core::arch::asm!( - "mov {:r}, [{}]", - out(reg) value, - in(reg) ptr - ); - value - } - } - - fn write(&mut self, value: Self::Value) { - unsafe { - let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::(); - core::arch::asm!( - "mov [{}], {:r}", - in(reg) ptr, - in(reg) value, - ); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/io/mod.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/io/mod.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/io/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/io/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -//! I/O functions - -pub use self::dma::*; -pub use self::io::*; -pub use self::mmio::*; - -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -pub use self::pio::*; - -mod dma; -mod io; -mod mmio; - -#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -mod pio; diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/io/pio.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/io/pio.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/io/pio.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/io/pio.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,90 +0,0 @@ -use core::arch::asm; -use core::marker::PhantomData; - -use super::io::Io; - -/// Generic PIO -#[derive(Copy, Clone)] -pub struct Pio { - port: u16, - value: PhantomData, -} - -impl Pio { - /// Create a PIO from a given port - pub const fn new(port: u16) -> Self { - Pio:: { - port, - value: PhantomData, - } - } -} - -/// Read/Write for byte PIO -impl Io for Pio { - type Value = u8; - - /// Read - #[inline(always)] - fn read(&self) -> u8 { - let value: u8; - unsafe { - asm!("in al, dx", in("dx") self.port, out("al") value, options(nostack, nomem, preserves_flags)); - } - value - } - - /// Write - #[inline(always)] - fn write(&mut self, value: u8) { - unsafe { - asm!("out dx, al", in("dx") self.port, in("al") value, options(nostack, nomem, preserves_flags)); - } - } -} - -/// Read/Write for word PIO -impl Io for Pio { - type Value = u16; - - /// Read - #[inline(always)] - fn read(&self) -> u16 { - let value: u16; - unsafe { - asm!("in ax, dx", in("dx") self.port, out("ax") value, options(nostack, nomem, preserves_flags)); - } - value - } - - /// Write - #[inline(always)] - fn write(&mut self, value: u16) { - unsafe { - asm!("out dx, ax", in("dx") self.port, in("ax") value, options(nostack, nomem, preserves_flags)); - } - } -} - -/// Read/Write for doubleword PIO -impl Io for Pio { - type Value = u32; - - /// Read - #[inline(always)] - fn read(&self) -> u32 { - let value: u32; - unsafe { - asm!("in eax, dx", in("dx") self.port, out("eax") value, options(nostack, nomem, preserves_flags)); - } - value - } - - /// Write - #[inline(always)] - fn write(&mut self, value: u32) { - unsafe { - asm!("out dx, eax", in("dx") self.port, in("eax") value, options(nostack, nomem, preserves_flags)); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/lib.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/lib.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/lib.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,61 +0,0 @@ -#![cfg_attr(not(test), no_std)] - -#[cfg(test)] -extern crate core; - -pub use self::arch::*; -pub use self::call::*; -pub use self::data::*; -pub use self::error::*; -pub use self::flag::*; -pub use self::io::*; -pub use self::number::*; -pub use self::scheme::*; - -#[cfg(all(any(target_os = "none", target_os = "redox"), target_arch = "arm"))] -#[path="arch/nonredox.rs"] -mod arch; - -#[cfg(all(any(target_os = "none", target_os = "redox"), target_arch = "aarch64"))] -#[path="arch/aarch64.rs"] -mod arch; - -#[cfg(all(any(target_os = "none", target_os = "redox"), target_arch = "riscv64"))] -#[path="arch/riscv64.rs"] -mod arch; - -#[cfg(all(any(target_os = "none", target_os = "redox"), target_arch = "x86"))] -#[path="arch/x86.rs"] -mod arch; - -#[cfg(all(any(target_os = "none", target_os = "redox"), target_arch = "x86_64"))] -#[path="arch/x86_64.rs"] -mod arch; - -#[cfg(not(any(target_os = "none", target_os = "redox")))] -#[path="arch/nonredox.rs"] -mod arch; - -/// Function definitions -pub mod call; - -/// Complex structures that are used for some system calls -pub mod data; - -/// All errors that can be generated by a system call -pub mod error; - -/// Flags used as an argument to many system calls -pub mod flag; - -/// Functions for low level hardware control -pub mod io; - -/// Call numbers used by each system call -pub mod number; - -/// A trait useful for scheme handlers -pub mod scheme; - -#[cfg(test)] -mod tests; diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/number.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/number.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/number.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/number.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,89 +0,0 @@ -pub const SYS_CLASS: usize = 0xF000_0000; -pub const SYS_CLASS_PATH: usize=0x1000_0000; -pub const SYS_CLASS_FILE: usize=0x2000_0000; - -pub const SYS_ARG: usize = 0x0F00_0000; -pub const SYS_ARG_SLICE: usize =0x0100_0000; -pub const SYS_ARG_MSLICE: usize=0x0200_0000; -pub const SYS_ARG_PATH: usize = 0x0300_0000; - -pub const SYS_RET: usize = 0x00F0_0000; -pub const SYS_RET_FILE: usize = 0x0010_0000; - -pub const SYS_LINK: usize = SYS_CLASS_PATH | SYS_ARG_PATH | 9; -pub const SYS_OPEN: usize = SYS_CLASS_PATH | SYS_RET_FILE | 5; -pub const SYS_RMDIR: usize = SYS_CLASS_PATH | 84; -pub const SYS_UNLINK: usize = SYS_CLASS_PATH | 10; - -pub const SYS_CLOSE: usize = SYS_CLASS_FILE | 6; -pub const SYS_DUP: usize = SYS_CLASS_FILE | SYS_RET_FILE | 41; -pub const SYS_DUP2: usize = SYS_CLASS_FILE | SYS_RET_FILE | 63; -pub const SYS_READ: usize = SYS_CLASS_FILE | SYS_ARG_MSLICE | 3; -pub const SYS_WRITE: usize = SYS_CLASS_FILE | SYS_ARG_SLICE | 4; -pub const SYS_LSEEK: usize = SYS_CLASS_FILE | 19; -pub const SYS_FCHMOD: usize = SYS_CLASS_FILE | 94; -pub const SYS_FCHOWN: usize = SYS_CLASS_FILE | 207; -pub const SYS_FCNTL: usize = SYS_CLASS_FILE | 55; -pub const SYS_FEVENT: usize = SYS_CLASS_FILE | 927; - -// TODO: Rename FMAP/FUNMAP to MMAP/MUNMAP -pub const SYS_FMAP_OLD: usize = SYS_CLASS_FILE | SYS_ARG_SLICE | 90; -pub const SYS_FMAP: usize = SYS_CLASS_FILE | SYS_ARG_SLICE | 900; -// TODO: SYS_FUNMAP should be SYS_CLASS_FILE -// TODO: Remove FMAP/FMAP_OLD -pub const SYS_FUNMAP_OLD: usize = SYS_CLASS_FILE | 91; -pub const SYS_FUNMAP: usize = SYS_CLASS_FILE | 92; -pub const SYS_MREMAP: usize = 155; - -pub const SYS_FPATH: usize = SYS_CLASS_FILE | SYS_ARG_MSLICE | 928; -pub const SYS_FRENAME: usize = SYS_CLASS_FILE | SYS_ARG_PATH | 38; -pub const SYS_FSTAT: usize = SYS_CLASS_FILE | SYS_ARG_MSLICE | 28; -pub const SYS_FSTATVFS: usize = SYS_CLASS_FILE | SYS_ARG_MSLICE | 100; -pub const SYS_FSYNC: usize = SYS_CLASS_FILE | 118; -pub const SYS_FTRUNCATE: usize = SYS_CLASS_FILE | 93; -pub const SYS_FUTIMENS: usize = SYS_CLASS_FILE | SYS_ARG_SLICE | 320; - -// b = file, c = flags, d = required_page_count, uid:gid = offset -pub const KSMSG_MMAP: usize = SYS_CLASS_FILE | 72; - -// b = file, c = flags, d = page_count, uid:gid = offset -pub const KSMSG_MSYNC: usize = SYS_CLASS_FILE | 73; - -// b = file, c = page_count, uid:gid = offset -pub const KSMSG_MUNMAP: usize = SYS_CLASS_FILE | 74; - -// b = file, c = flags, d = page_count, uid:gid = offset -pub const KSMSG_MMAP_PREP: usize = SYS_CLASS_FILE | 75; - -pub const SYS_CLOCK_GETTIME: usize = 265; -pub const SYS_EXIT: usize = 1; -pub const SYS_FUTEX: usize = 240; -pub const SYS_GETEGID: usize = 202; -pub const SYS_GETENS: usize = 951; -pub const SYS_GETEUID: usize = 201; -pub const SYS_GETGID: usize = 200; -pub const SYS_GETNS: usize = 950; -pub const SYS_GETPID: usize = 20; -pub const SYS_GETPGID: usize = 132; -pub const SYS_GETPPID: usize = 64; -pub const SYS_GETUID: usize = 199; -pub const SYS_IOPL: usize = 110; -pub const SYS_KILL: usize = 37; -pub const SYS_MPROTECT: usize = 125; -pub const SYS_MKNS: usize = 984; -pub const SYS_NANOSLEEP: usize =162; -pub const SYS_PHYSALLOC: usize =945; -pub const SYS_PHYSALLOC3: usize=9453; -pub const SYS_PHYSFREE: usize = 946; -pub const SYS_PHYSMAP: usize = 947; -pub const SYS_VIRTTOPHYS: usize=949; -pub const SYS_SETPGID: usize = 57; -pub const SYS_SETREGID: usize = 204; -pub const SYS_SETRENS: usize = 952; -pub const SYS_SETREUID: usize = 203; -pub const SYS_SIGACTION: usize =67; -pub const SYS_SIGPROCMASK:usize=126; -pub const SYS_SIGRETURN: usize =119; -pub const SYS_UMASK: usize = 60; -pub const SYS_WAITPID: usize = 7; -pub const SYS_YIELD: usize = 158; diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/generate.sh s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/generate.sh --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/generate.sh 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/generate.sh 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -#!/usr/bin/env bash - -set -e - -echo "Generating SchemeMut from Scheme" -sed 's/trait Scheme/trait SchemeMut/' scheme.rs \ -| sed 's/\&self/\&mut self/g' \ -> scheme_mut.rs - -echo "Generating SchemeBlock from Scheme" -sed 's/trait Scheme/trait SchemeBlock/' scheme.rs \ -| sed 's/fn handle(\&self, packet: \&mut Packet)/fn handle(\&self, packet: \&Packet) -> Option/' \ -| sed 's/packet.a = Error::mux(res);/res.transpose().map(Error::mux)/' \ -| sed 's/\.map(|f| f\.bits())/\.map(|f| f.map(|f| f.bits()))/' \ -| sed 's/\.map(|o| o as usize)/.map(|o| o.map(|o| o as usize))/' \ -| sed 's/Ok(0)/Ok(Some(0))/g' \ -| sed 's/Result<\([^>]\+\)>/Result>/g' \ -| sed 's/convert_to_this_scheme/convert_to_this_scheme_block/g' \ -| sed 's/convert_in_scheme_handle/convert_in_scheme_handle_block/g' \ -> scheme_block.rs - -echo "Generating SchemeBlockMut from SchemeBlock" -sed 's/trait SchemeBlock/trait SchemeBlockMut/' scheme_block.rs \ -| sed 's/\&self/\&mut self/g' \ -> scheme_block_mut.rs diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/mod.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/mod.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/mod.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,68 +0,0 @@ -use core::{slice, str}; - -use crate::{Error, Result, EOPNOTSUPP, ESKMSG, Packet, SKMSG_FRETURNFD}; - -pub use self::scheme::Scheme; -pub use self::scheme_mut::SchemeMut; -pub use self::scheme_block::SchemeBlock; -pub use self::scheme_block_mut::SchemeBlockMut; -pub use self::seek::*; - -unsafe fn str_from_raw_parts(ptr: *const u8, len: usize) -> Option<&'static str> { - let slice = slice::from_raw_parts(ptr, len); - str::from_utf8(slice).ok() -} - -mod scheme; -mod scheme_mut; -mod scheme_block; -mod scheme_block_mut; -mod seek; - -pub struct CallerCtx { - pub pid: usize, - pub uid: u32, - pub gid: u32, -} - -pub enum OpenResult { - ThisScheme { number: usize }, - OtherScheme { fd: usize }, -} - -// TODO: Find a better solution than generate.sh -pub(crate) fn convert_to_this_scheme(r: Result) -> Result { - r.map(|number| OpenResult::ThisScheme { number }) -} -pub(crate) fn convert_to_this_scheme_block(r: Result>) -> Result> { - r.map(|o| o.map(|number| OpenResult::ThisScheme { number })) -} -pub(crate) fn convert_in_scheme_handle_block(_: &Packet, result: Result>) -> Result> { - match result { - Ok(Some(OpenResult::ThisScheme { number })) => Ok(Some(number)), - Ok(Some(OpenResult::OtherScheme { .. })) => Err(Error::new(EOPNOTSUPP)), - Ok(None) => Ok(None), - Err(err) => Err(err), - } -} -pub(crate) fn convert_in_scheme_handle(packet: &mut Packet, result: Result) -> Result { - match result { - Ok(OpenResult::ThisScheme { number }) => Ok(number), - Ok(OpenResult::OtherScheme { fd }) => { - packet.b = SKMSG_FRETURNFD; - packet.c = fd; - Err(Error::new(ESKMSG)) - } - Err(err) => Err(err), - } -} - -impl CallerCtx { - pub fn from_packet(packet: &Packet) -> Self { - Self { - pid: packet.pid, - uid: packet.uid, - gid: packet.gid, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/scheme_block_mut.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/scheme_block_mut.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/scheme_block_mut.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/scheme_block_mut.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,194 +0,0 @@ -use core::{mem, slice}; - -use crate::CallerCtx; -use crate::OpenResult; -use crate::data::*; -use crate::error::*; -use crate::flag::*; -use crate::number::*; -use crate::scheme::*; - -pub trait SchemeBlockMut { - fn handle(&mut self, packet: &Packet) -> Option { - let res = match packet.a { - SYS_OPEN => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - convert_in_scheme_handle_block(packet, self.xopen(path, packet.d, &CallerCtx::from_packet(&packet))) - } - else { - Err(Error::new(EINVAL)) - }, - SYS_RMDIR => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - self.rmdir(path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - SYS_UNLINK => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - self.unlink(path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - - SYS_DUP => convert_in_scheme_handle_block(packet, self.xdup(packet.b, unsafe { slice::from_raw_parts(packet.c as *const u8, packet.d) }, &CallerCtx::from_packet(&packet))), - SYS_READ => self.read(packet.b, unsafe { slice::from_raw_parts_mut(packet.c as *mut u8, packet.d) }), - SYS_WRITE => self.write(packet.b, unsafe { slice::from_raw_parts(packet.c as *const u8, packet.d) }), - SYS_LSEEK => self.seek(packet.b, packet.c as isize, packet.d).map(|o| o.map(|o| o as usize)), - SYS_FCHMOD => self.fchmod(packet.b, packet.c as u16), - SYS_FCHOWN => self.fchown(packet.b, packet.c as u32, packet.d as u32), - SYS_FCNTL => self.fcntl(packet.b, packet.c, packet.d), - SYS_FEVENT => self.fevent(packet.b, EventFlags::from_bits_truncate(packet.c)).map(|f| f.map(|f| f.bits())), - SYS_FPATH => self.fpath(packet.b, unsafe { slice::from_raw_parts_mut(packet.c as *mut u8, packet.d) }), - SYS_FRENAME => if let Some(path) = unsafe { str_from_raw_parts(packet.c as *const u8, packet.d) } { - self.frename(packet.b, path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - SYS_FSTAT => if packet.d >= mem::size_of::() { - self.fstat(packet.b, unsafe { &mut *(packet.c as *mut Stat) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_FSTATVFS => if packet.d >= mem::size_of::() { - self.fstatvfs(packet.b, unsafe { &mut *(packet.c as *mut StatVfs) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_FSYNC => self.fsync(packet.b), - SYS_FTRUNCATE => self.ftruncate(packet.b, packet.c), - SYS_FUTIMENS => if packet.d >= mem::size_of::() { - self.futimens(packet.b, unsafe { slice::from_raw_parts(packet.c as *const TimeSpec, packet.d / mem::size_of::()) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_CLOSE => self.close(packet.b), - - KSMSG_MMAP_PREP => self.mmap_prep(packet.b, u64::from(packet.uid) | (u64::from(packet.gid) << 32), packet.c, MapFlags::from_bits_truncate(packet.d)), - KSMSG_MUNMAP => self.munmap(packet.b, u64::from(packet.uid) | (u64::from(packet.gid) << 32), packet.c, MunmapFlags::from_bits_truncate(packet.d)), - - _ => Err(Error::new(ENOSYS)) - }; - - res.transpose().map(Error::mux) - } - - /* Scheme operations */ - - #[allow(unused_variables)] - fn open(&mut self, path: &str, flags: usize, uid: u32, gid: u32) -> Result> { - Err(Error::new(ENOENT)) - } - #[allow(unused_variables)] - fn xopen(&mut self, path: &str, flags: usize, ctx: &CallerCtx) -> Result> { - convert_to_this_scheme_block(self.open(path, flags, ctx.uid, ctx.gid)) - } - - #[allow(unused_variables)] - fn chmod(&mut self, path: &str, mode: u16, uid: u32, gid: u32) -> Result> { - Err(Error::new(ENOENT)) - } - - #[allow(unused_variables)] - fn rmdir(&mut self, path: &str, uid: u32, gid: u32) -> Result> { - Err(Error::new(ENOENT)) - } - - #[allow(unused_variables)] - fn unlink(&mut self, path: &str, uid: u32, gid: u32) -> Result> { - Err(Error::new(ENOENT)) - } - - /* Resource operations */ - #[allow(unused_variables)] - fn dup(&mut self, old_id: usize, buf: &[u8]) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn xdup(&mut self, old_id: usize, buf: &[u8], ctx: &CallerCtx) -> Result> { - convert_to_this_scheme_block(self.dup(old_id, buf)) - } - - #[allow(unused_variables)] - fn read(&mut self, id: usize, buf: &mut [u8]) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn write(&mut self, id: usize, buf: &[u8]) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn seek(&mut self, id: usize, pos: isize, whence: usize) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fchmod(&mut self, id: usize, mode: u16) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fchown(&mut self, id: usize, uid: u32, gid: u32) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fcntl(&mut self, id: usize, cmd: usize, arg: usize) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fevent(&mut self, id: usize, flags: EventFlags) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fpath(&mut self, id: usize, buf: &mut [u8]) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn frename(&mut self, id: usize, path: &str, uid: u32, gid: u32) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fstat(&mut self, id: usize, stat: &mut Stat) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fstatvfs(&mut self, id: usize, stat: &mut StatVfs) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fsync(&mut self, id: usize) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn ftruncate(&mut self, id: usize, len: usize) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn futimens(&mut self, id: usize, times: &[TimeSpec]) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn close(&mut self, id: usize) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn mmap_prep(&mut self, id: usize, offset: u64, size: usize, flags: MapFlags) -> Result> { - Err(Error::new(EOPNOTSUPP)) - } - - #[allow(unused_variables)] - fn munmap(&mut self, id: usize, offset: u64, size: usize, flags: MunmapFlags) -> Result> { - Err(Error::new(EOPNOTSUPP)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/scheme_block.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/scheme_block.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/scheme_block.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/scheme_block.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,194 +0,0 @@ -use core::{mem, slice}; - -use crate::CallerCtx; -use crate::OpenResult; -use crate::data::*; -use crate::error::*; -use crate::flag::*; -use crate::number::*; -use crate::scheme::*; - -pub trait SchemeBlock { - fn handle(&self, packet: &Packet) -> Option { - let res = match packet.a { - SYS_OPEN => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - convert_in_scheme_handle_block(packet, self.xopen(path, packet.d, &CallerCtx::from_packet(&packet))) - } - else { - Err(Error::new(EINVAL)) - }, - SYS_RMDIR => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - self.rmdir(path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - SYS_UNLINK => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - self.unlink(path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - - SYS_DUP => convert_in_scheme_handle_block(packet, self.xdup(packet.b, unsafe { slice::from_raw_parts(packet.c as *const u8, packet.d) }, &CallerCtx::from_packet(&packet))), - SYS_READ => self.read(packet.b, unsafe { slice::from_raw_parts_mut(packet.c as *mut u8, packet.d) }), - SYS_WRITE => self.write(packet.b, unsafe { slice::from_raw_parts(packet.c as *const u8, packet.d) }), - SYS_LSEEK => self.seek(packet.b, packet.c as isize, packet.d).map(|o| o.map(|o| o as usize)), - SYS_FCHMOD => self.fchmod(packet.b, packet.c as u16), - SYS_FCHOWN => self.fchown(packet.b, packet.c as u32, packet.d as u32), - SYS_FCNTL => self.fcntl(packet.b, packet.c, packet.d), - SYS_FEVENT => self.fevent(packet.b, EventFlags::from_bits_truncate(packet.c)).map(|f| f.map(|f| f.bits())), - SYS_FPATH => self.fpath(packet.b, unsafe { slice::from_raw_parts_mut(packet.c as *mut u8, packet.d) }), - SYS_FRENAME => if let Some(path) = unsafe { str_from_raw_parts(packet.c as *const u8, packet.d) } { - self.frename(packet.b, path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - SYS_FSTAT => if packet.d >= mem::size_of::() { - self.fstat(packet.b, unsafe { &mut *(packet.c as *mut Stat) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_FSTATVFS => if packet.d >= mem::size_of::() { - self.fstatvfs(packet.b, unsafe { &mut *(packet.c as *mut StatVfs) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_FSYNC => self.fsync(packet.b), - SYS_FTRUNCATE => self.ftruncate(packet.b, packet.c), - SYS_FUTIMENS => if packet.d >= mem::size_of::() { - self.futimens(packet.b, unsafe { slice::from_raw_parts(packet.c as *const TimeSpec, packet.d / mem::size_of::()) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_CLOSE => self.close(packet.b), - - KSMSG_MMAP_PREP => self.mmap_prep(packet.b, u64::from(packet.uid) | (u64::from(packet.gid) << 32), packet.c, MapFlags::from_bits_truncate(packet.d)), - KSMSG_MUNMAP => self.munmap(packet.b, u64::from(packet.uid) | (u64::from(packet.gid) << 32), packet.c, MunmapFlags::from_bits_truncate(packet.d)), - - _ => Err(Error::new(ENOSYS)) - }; - - res.transpose().map(Error::mux) - } - - /* Scheme operations */ - - #[allow(unused_variables)] - fn open(&self, path: &str, flags: usize, uid: u32, gid: u32) -> Result> { - Err(Error::new(ENOENT)) - } - #[allow(unused_variables)] - fn xopen(&self, path: &str, flags: usize, ctx: &CallerCtx) -> Result> { - convert_to_this_scheme_block(self.open(path, flags, ctx.uid, ctx.gid)) - } - - #[allow(unused_variables)] - fn chmod(&self, path: &str, mode: u16, uid: u32, gid: u32) -> Result> { - Err(Error::new(ENOENT)) - } - - #[allow(unused_variables)] - fn rmdir(&self, path: &str, uid: u32, gid: u32) -> Result> { - Err(Error::new(ENOENT)) - } - - #[allow(unused_variables)] - fn unlink(&self, path: &str, uid: u32, gid: u32) -> Result> { - Err(Error::new(ENOENT)) - } - - /* Resource operations */ - #[allow(unused_variables)] - fn dup(&self, old_id: usize, buf: &[u8]) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn xdup(&self, old_id: usize, buf: &[u8], ctx: &CallerCtx) -> Result> { - convert_to_this_scheme_block(self.dup(old_id, buf)) - } - - #[allow(unused_variables)] - fn read(&self, id: usize, buf: &mut [u8]) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn write(&self, id: usize, buf: &[u8]) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn seek(&self, id: usize, pos: isize, whence: usize) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fchmod(&self, id: usize, mode: u16) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fchown(&self, id: usize, uid: u32, gid: u32) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fcntl(&self, id: usize, cmd: usize, arg: usize) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fevent(&self, id: usize, flags: EventFlags) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fpath(&self, id: usize, buf: &mut [u8]) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn frename(&self, id: usize, path: &str, uid: u32, gid: u32) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fstat(&self, id: usize, stat: &mut Stat) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fstatvfs(&self, id: usize, stat: &mut StatVfs) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fsync(&self, id: usize) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn ftruncate(&self, id: usize, len: usize) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn futimens(&self, id: usize, times: &[TimeSpec]) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn close(&self, id: usize) -> Result> { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn mmap_prep(&self, id: usize, offset: u64, size: usize, flags: MapFlags) -> Result> { - Err(Error::new(EOPNOTSUPP)) - } - - #[allow(unused_variables)] - fn munmap(&self, id: usize, offset: u64, size: usize, flags: MunmapFlags) -> Result> { - Err(Error::new(EOPNOTSUPP)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/scheme_mut.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/scheme_mut.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/scheme_mut.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/scheme_mut.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,194 +0,0 @@ -use core::{mem, slice}; - -use crate::CallerCtx; -use crate::OpenResult; -use crate::data::*; -use crate::error::*; -use crate::flag::*; -use crate::number::*; -use crate::scheme::*; - -pub trait SchemeMut { - fn handle(&mut self, packet: &mut Packet) { - let res = match packet.a { - SYS_OPEN => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - convert_in_scheme_handle(packet, self.xopen(path, packet.d, &CallerCtx::from_packet(&packet))) - } - else { - Err(Error::new(EINVAL)) - }, - SYS_RMDIR => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - self.rmdir(path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - SYS_UNLINK => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - self.unlink(path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - - SYS_DUP => convert_in_scheme_handle(packet, self.xdup(packet.b, unsafe { slice::from_raw_parts(packet.c as *const u8, packet.d) }, &CallerCtx::from_packet(&packet))), - SYS_READ => self.read(packet.b, unsafe { slice::from_raw_parts_mut(packet.c as *mut u8, packet.d) }), - SYS_WRITE => self.write(packet.b, unsafe { slice::from_raw_parts(packet.c as *const u8, packet.d) }), - SYS_LSEEK => self.seek(packet.b, packet.c as isize, packet.d).map(|o| o as usize), - SYS_FCHMOD => self.fchmod(packet.b, packet.c as u16), - SYS_FCHOWN => self.fchown(packet.b, packet.c as u32, packet.d as u32), - SYS_FCNTL => self.fcntl(packet.b, packet.c, packet.d), - SYS_FEVENT => self.fevent(packet.b, EventFlags::from_bits_truncate(packet.c)).map(|f| f.bits()), - SYS_FPATH => self.fpath(packet.b, unsafe { slice::from_raw_parts_mut(packet.c as *mut u8, packet.d) }), - SYS_FRENAME => if let Some(path) = unsafe { str_from_raw_parts(packet.c as *const u8, packet.d) } { - self.frename(packet.b, path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - SYS_FSTAT => if packet.d >= mem::size_of::() { - self.fstat(packet.b, unsafe { &mut *(packet.c as *mut Stat) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_FSTATVFS => if packet.d >= mem::size_of::() { - self.fstatvfs(packet.b, unsafe { &mut *(packet.c as *mut StatVfs) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_FSYNC => self.fsync(packet.b), - SYS_FTRUNCATE => self.ftruncate(packet.b, packet.c), - SYS_FUTIMENS => if packet.d >= mem::size_of::() { - self.futimens(packet.b, unsafe { slice::from_raw_parts(packet.c as *const TimeSpec, packet.d / mem::size_of::()) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_CLOSE => self.close(packet.b), - - KSMSG_MMAP_PREP => self.mmap_prep(packet.b, u64::from(packet.uid) | (u64::from(packet.gid) << 32), packet.c, MapFlags::from_bits_truncate(packet.d)), - KSMSG_MUNMAP => self.munmap(packet.b, u64::from(packet.uid) | (u64::from(packet.gid) << 32), packet.c, MunmapFlags::from_bits_truncate(packet.d)), - - _ => Err(Error::new(ENOSYS)) - }; - - packet.a = Error::mux(res); - } - - /* Scheme operations */ - - #[allow(unused_variables)] - fn open(&mut self, path: &str, flags: usize, uid: u32, gid: u32) -> Result { - Err(Error::new(ENOENT)) - } - #[allow(unused_variables)] - fn xopen(&mut self, path: &str, flags: usize, ctx: &CallerCtx) -> Result { - convert_to_this_scheme(self.open(path, flags, ctx.uid, ctx.gid)) - } - - #[allow(unused_variables)] - fn chmod(&mut self, path: &str, mode: u16, uid: u32, gid: u32) -> Result { - Err(Error::new(ENOENT)) - } - - #[allow(unused_variables)] - fn rmdir(&mut self, path: &str, uid: u32, gid: u32) -> Result { - Err(Error::new(ENOENT)) - } - - #[allow(unused_variables)] - fn unlink(&mut self, path: &str, uid: u32, gid: u32) -> Result { - Err(Error::new(ENOENT)) - } - - /* Resource operations */ - #[allow(unused_variables)] - fn dup(&mut self, old_id: usize, buf: &[u8]) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn xdup(&mut self, old_id: usize, buf: &[u8], ctx: &CallerCtx) -> Result { - convert_to_this_scheme(self.dup(old_id, buf)) - } - - #[allow(unused_variables)] - fn read(&mut self, id: usize, buf: &mut [u8]) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn write(&mut self, id: usize, buf: &[u8]) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn seek(&mut self, id: usize, pos: isize, whence: usize) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fchmod(&mut self, id: usize, mode: u16) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fchown(&mut self, id: usize, uid: u32, gid: u32) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fcntl(&mut self, id: usize, cmd: usize, arg: usize) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fevent(&mut self, id: usize, flags: EventFlags) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fpath(&mut self, id: usize, buf: &mut [u8]) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn frename(&mut self, id: usize, path: &str, uid: u32, gid: u32) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fstat(&mut self, id: usize, stat: &mut Stat) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fstatvfs(&mut self, id: usize, stat: &mut StatVfs) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fsync(&mut self, id: usize) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn ftruncate(&mut self, id: usize, len: usize) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn futimens(&mut self, id: usize, times: &[TimeSpec]) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn close(&mut self, id: usize) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn mmap_prep(&mut self, id: usize, offset: u64, size: usize, flags: MapFlags) -> Result { - Err(Error::new(EOPNOTSUPP)) - } - - #[allow(unused_variables)] - fn munmap(&mut self, id: usize, offset: u64, size: usize, flags: MunmapFlags) -> Result { - Err(Error::new(EOPNOTSUPP)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/scheme.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/scheme.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/scheme.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/scheme.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,194 +0,0 @@ -use core::{mem, slice}; - -use crate::CallerCtx; -use crate::OpenResult; -use crate::data::*; -use crate::error::*; -use crate::flag::*; -use crate::number::*; -use crate::scheme::*; - -pub trait Scheme { - fn handle(&self, packet: &mut Packet) { - let res = match packet.a { - SYS_OPEN => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - convert_in_scheme_handle(packet, self.xopen(path, packet.d, &CallerCtx::from_packet(&packet))) - } - else { - Err(Error::new(EINVAL)) - }, - SYS_RMDIR => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - self.rmdir(path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - SYS_UNLINK => if let Some(path) = unsafe { str_from_raw_parts(packet.b as *const u8, packet.c) } { - self.unlink(path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - - SYS_DUP => convert_in_scheme_handle(packet, self.xdup(packet.b, unsafe { slice::from_raw_parts(packet.c as *const u8, packet.d) }, &CallerCtx::from_packet(&packet))), - SYS_READ => self.read(packet.b, unsafe { slice::from_raw_parts_mut(packet.c as *mut u8, packet.d) }), - SYS_WRITE => self.write(packet.b, unsafe { slice::from_raw_parts(packet.c as *const u8, packet.d) }), - SYS_LSEEK => self.seek(packet.b, packet.c as isize, packet.d).map(|o| o as usize), - SYS_FCHMOD => self.fchmod(packet.b, packet.c as u16), - SYS_FCHOWN => self.fchown(packet.b, packet.c as u32, packet.d as u32), - SYS_FCNTL => self.fcntl(packet.b, packet.c, packet.d), - SYS_FEVENT => self.fevent(packet.b, EventFlags::from_bits_truncate(packet.c)).map(|f| f.bits()), - SYS_FPATH => self.fpath(packet.b, unsafe { slice::from_raw_parts_mut(packet.c as *mut u8, packet.d) }), - SYS_FRENAME => if let Some(path) = unsafe { str_from_raw_parts(packet.c as *const u8, packet.d) } { - self.frename(packet.b, path, packet.uid, packet.gid) - } else { - Err(Error::new(EINVAL)) - }, - SYS_FSTAT => if packet.d >= mem::size_of::() { - self.fstat(packet.b, unsafe { &mut *(packet.c as *mut Stat) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_FSTATVFS => if packet.d >= mem::size_of::() { - self.fstatvfs(packet.b, unsafe { &mut *(packet.c as *mut StatVfs) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_FSYNC => self.fsync(packet.b), - SYS_FTRUNCATE => self.ftruncate(packet.b, packet.c), - SYS_FUTIMENS => if packet.d >= mem::size_of::() { - self.futimens(packet.b, unsafe { slice::from_raw_parts(packet.c as *const TimeSpec, packet.d / mem::size_of::()) }) - } else { - Err(Error::new(EFAULT)) - }, - SYS_CLOSE => self.close(packet.b), - - KSMSG_MMAP_PREP => self.mmap_prep(packet.b, u64::from(packet.uid) | (u64::from(packet.gid) << 32), packet.c, MapFlags::from_bits_truncate(packet.d)), - KSMSG_MUNMAP => self.munmap(packet.b, u64::from(packet.uid) | (u64::from(packet.gid) << 32), packet.c, MunmapFlags::from_bits_truncate(packet.d)), - - _ => Err(Error::new(ENOSYS)) - }; - - packet.a = Error::mux(res); - } - - /* Scheme operations */ - - #[allow(unused_variables)] - fn open(&self, path: &str, flags: usize, uid: u32, gid: u32) -> Result { - Err(Error::new(ENOENT)) - } - #[allow(unused_variables)] - fn xopen(&self, path: &str, flags: usize, ctx: &CallerCtx) -> Result { - convert_to_this_scheme(self.open(path, flags, ctx.uid, ctx.gid)) - } - - #[allow(unused_variables)] - fn chmod(&self, path: &str, mode: u16, uid: u32, gid: u32) -> Result { - Err(Error::new(ENOENT)) - } - - #[allow(unused_variables)] - fn rmdir(&self, path: &str, uid: u32, gid: u32) -> Result { - Err(Error::new(ENOENT)) - } - - #[allow(unused_variables)] - fn unlink(&self, path: &str, uid: u32, gid: u32) -> Result { - Err(Error::new(ENOENT)) - } - - /* Resource operations */ - #[allow(unused_variables)] - fn dup(&self, old_id: usize, buf: &[u8]) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn xdup(&self, old_id: usize, buf: &[u8], ctx: &CallerCtx) -> Result { - convert_to_this_scheme(self.dup(old_id, buf)) - } - - #[allow(unused_variables)] - fn read(&self, id: usize, buf: &mut [u8]) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn write(&self, id: usize, buf: &[u8]) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn seek(&self, id: usize, pos: isize, whence: usize) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fchmod(&self, id: usize, mode: u16) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fchown(&self, id: usize, uid: u32, gid: u32) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fcntl(&self, id: usize, cmd: usize, arg: usize) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fevent(&self, id: usize, flags: EventFlags) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fpath(&self, id: usize, buf: &mut [u8]) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn frename(&self, id: usize, path: &str, uid: u32, gid: u32) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fstat(&self, id: usize, stat: &mut Stat) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fstatvfs(&self, id: usize, stat: &mut StatVfs) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn fsync(&self, id: usize) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn ftruncate(&self, id: usize, len: usize) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn futimens(&self, id: usize, times: &[TimeSpec]) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn close(&self, id: usize) -> Result { - Err(Error::new(EBADF)) - } - - #[allow(unused_variables)] - fn mmap_prep(&self, id: usize, offset: u64, size: usize, flags: MapFlags) -> Result { - Err(Error::new(EOPNOTSUPP)) - } - - #[allow(unused_variables)] - fn munmap(&self, id: usize, offset: u64, size: usize, flags: MunmapFlags) -> Result { - Err(Error::new(EOPNOTSUPP)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/seek.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/seek.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/scheme/seek.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/scheme/seek.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,33 +0,0 @@ -use core::cmp; -use core::convert::TryFrom; -use crate::error::*; -use crate::flag::*; - -/// Helper for seek calls -/// In most cases it's easier to use a usize to track the offset and buffer size internally, -/// but the seek interface uses isize. This wrapper ensures EOVERFLOW errors are returned -/// as appropriate if the value in the usize can't fit in the isize. -pub fn calc_seek_offset_usize(cur_offset: usize, pos: isize, whence: usize, buf_len: usize) -> Result { - let cur_offset = isize::try_from(cur_offset).or_else(|_| Err(Error::new(EOVERFLOW)))?; - let buf_len = isize::try_from(buf_len).or_else(|_| Err(Error::new(EOVERFLOW)))?; - calc_seek_offset_isize(cur_offset, pos, whence, buf_len) -} - -/// Helper for seek calls -/// Result is guaranteed to be positive. -/// EOVERFLOW returned if the arguments would cause an overflow. -/// EINVAL returned if the new offset is out of bounds. -pub fn calc_seek_offset_isize(cur_offset: isize, pos: isize, whence: usize, buf_len: isize) -> Result { - let new_offset = match whence { - SEEK_CUR => pos.checked_add(cur_offset), - SEEK_END => pos.checked_add(buf_len), - SEEK_SET => Some(pos), - _ => None, - }; - - match new_offset { - Some(new_offset) if new_offset < 0 => Err(Error::new(EINVAL)), - Some(new_offset) => Ok(cmp::min(new_offset, buf_len)), - None => Err(Error::new(EOVERFLOW)) - } -} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/redox_syscall/src/tests.rs s390-tools-2.33.1/rust-vendor/redox_syscall/src/tests.rs --- s390-tools-2.31.0/rust-vendor/redox_syscall/src/tests.rs 2024-02-06 12:28:08.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/redox_syscall/src/tests.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,416 +0,0 @@ -#[test] -fn clone() { - let expected_status = 42; - let pid_res = unsafe { crate::clone(crate::CloneFlags::empty()) }; - if pid_res == Ok(0) { - crate::exit(expected_status).unwrap(); - panic!("failed to exit"); - } else { - let pid = dbg!(pid_res).unwrap(); - let mut status = 0; - assert_eq!(dbg!(crate::waitpid(pid, &mut status, crate::WaitFlags::empty())), Ok(pid)); - assert_eq!(dbg!(crate::wifexited(status)), true); - assert_eq!(dbg!(crate::wexitstatus(status)), expected_status); - } -} - -//TODO: close - -#[test] -fn clock_gettime() { - let mut tp = crate::TimeSpec::default(); - assert_eq!(dbg!( - crate::clock_gettime(crate::CLOCK_MONOTONIC, &mut tp) - ), Ok(0)); - assert_ne!(dbg!(tp), crate::TimeSpec::default()); - - tp = crate::TimeSpec::default(); - assert_eq!(dbg!( - crate::clock_gettime(crate::CLOCK_REALTIME, &mut tp) - ), Ok(0)); - assert_ne!(dbg!(tp), crate::TimeSpec::default()); -} - -//TODO: dup - -//TODO: dup2 - -//TODO: exit (handled by clone?) - -//TODO: fchmod - -//TODO: fcntl - -#[test] -fn fexec() { - let name = "file:/bin/ls"; - - let fd = dbg!( - crate::open(name, crate::O_RDONLY | crate::O_CLOEXEC) - ).unwrap(); - - let args = &[ - [name.as_ptr() as usize, name.len()] - ]; - - let vars = &[]; - - let pid_res = unsafe { crate::clone(crate::CloneFlags::empty()) }; - if pid_res == Ok(0) { - crate::fexec(fd, args, vars).unwrap(); - panic!("failed to fexec"); - } else { - assert_eq!(dbg!(crate::close(fd)), Ok(0)); - - let pid = dbg!(pid_res).unwrap(); - let mut status = 0; - assert_eq!(dbg!(crate::waitpid(pid, &mut status, crate::WaitFlags::empty())), Ok(pid)); - assert_eq!(dbg!(crate::wifexited(status)), true); - assert_eq!(dbg!(crate::wexitstatus(status)), 0); - } -} - -#[test] -fn fmap() { - use std::slice; - - let fd = dbg!( - crate::open( - "file:/tmp/syscall-tests-fmap", - crate::O_CREAT | crate::O_RDWR | crate::O_CLOEXEC - ) - ).unwrap(); - - let size = 128; - - let map = unsafe { - slice::from_raw_parts_mut( - dbg!( - crate::fmap(fd, &crate::Map { - address: 0, - offset: 0, - size, - flags: crate::PROT_READ | crate::PROT_WRITE - }) - ).unwrap() as *mut u8, - 128 - ) - }; - - // Maps should be available after closing - assert_eq!(dbg!(crate::close(fd)), Ok(0)); - - for i in 0..128 { - map[i as usize] = i; - assert_eq!(map[i as usize], i); - } - - //TODO: add msync - unsafe { - assert_eq!(dbg!( - crate::funmap(map.as_mut_ptr() as usize, size) - ), Ok(0)); - } -} - -// funmap tested by fmap - -#[test] -fn fpath() { - use std::str; - - let path = "file:/tmp/syscall-tests-fpath"; - let fd = dbg!( - crate::open( - dbg!(path), - crate::O_CREAT | crate::O_RDWR | crate::O_CLOEXEC - ) - ).unwrap(); - - let mut buf = [0; 4096]; - let count = dbg!( - crate::fpath(fd, &mut buf) - ).unwrap(); - - assert_eq!(dbg!(str::from_utf8(&buf[..count])), Ok(path)); - - assert_eq!(dbg!(crate::close(fd)), Ok(0)); -} - -//TODO: frename - -#[test] -fn fstat() { - let path = "file:/tmp/syscall-tests-fstat"; - let fd = dbg!( - crate::open( - dbg!(path), - crate::O_CREAT | crate::O_RDWR | crate::O_CLOEXEC - ) - ).unwrap(); - - let mut stat = crate::Stat::default(); - assert_eq!(dbg!(crate::fstat(fd, &mut stat)), Ok(0)); - assert_ne!(dbg!(stat), crate::Stat::default()); - - assert_eq!(dbg!(crate::close(fd)), Ok(0)); -} - -#[test] -fn fstatvfs() { - let path = "file:/tmp/syscall-tests-fstatvfs"; - let fd = dbg!( - crate::open( - dbg!(path), - crate::O_CREAT | crate::O_RDWR | crate::O_CLOEXEC - ) - ).unwrap(); - - let mut statvfs = crate::StatVfs::default(); - assert_eq!(dbg!(crate::fstatvfs(fd, &mut statvfs)), Ok(0)); - assert_ne!(dbg!(statvfs), crate::StatVfs::default()); - - assert_eq!(dbg!(crate::close(fd)), Ok(0)); -} - -//TODO: fsync - -//TODO: ftruncate - -//TODO: futimens - -//TODO: futex - -#[test] -fn getegid() { - assert_eq!(crate::getegid(), Ok(0)); -} - -#[test] -fn getens() { - assert_eq!(crate::getens(), Ok(1)); -} - -#[test] -fn geteuid() { - assert_eq!(crate::geteuid(), Ok(0)); -} - -#[test] -fn getgid() { - assert_eq!(crate::getgid(), Ok(0)); -} - -#[test] -fn getns() { - assert_eq!(crate::getns(), Ok(1)); -} - -//TODO: getpid - -//TODO: getpgid - -//TODO: getppid - -#[test] -fn getuid() { - assert_eq!(crate::getuid(), Ok(0)); -} - -//TODO: iopl - -//TODO: kill - -//TODO: link (probably will not work) - -#[test] -fn lseek() { - let path = "file:/tmp/syscall-tests-lseek"; - let fd = dbg!( - crate::open( - dbg!(path), - crate::O_CREAT | crate::O_RDWR | crate::O_CLOEXEC - ) - ).unwrap(); - - { - let mut buf = [0; 256]; - for i in 0..buf.len() { - buf[i] = i as u8; - } - assert_eq!(dbg!(crate::write(fd, &buf)), Ok(buf.len())); - - assert_eq!(dbg!(crate::lseek(fd, 0, crate::SEEK_CUR)), Ok(buf.len())); - assert_eq!(dbg!(crate::lseek(fd, 0, crate::SEEK_SET)), Ok(0)); - assert_eq!(dbg!(crate::lseek(fd, 0, crate::SEEK_END)), Ok(buf.len())); - assert_eq!(dbg!(crate::lseek(fd, 0, crate::SEEK_SET)), Ok(0)); - } - - { - let mut buf = [0; 256]; - assert_eq!(dbg!(crate::read(fd, &mut buf)), Ok(buf.len())); - for i in 0..buf.len() { - assert_eq!(buf[i], i as u8); - } - - assert_eq!(dbg!(crate::lseek(fd, 0, crate::SEEK_CUR)), Ok(buf.len())); - assert_eq!(dbg!(crate::lseek(fd, 0, crate::SEEK_SET)), Ok(0)); - assert_eq!(dbg!(crate::lseek(fd, 0, crate::SEEK_END)), Ok(buf.len())); - assert_eq!(dbg!(crate::lseek(fd, 0, crate::SEEK_SET)), Ok(0)); - } - - assert_eq!(dbg!(crate::close(fd)), Ok(0)); -} - -//TODO: mkns - -//TODO: mprotect - -#[test] -fn nanosleep() { - let req = crate::TimeSpec { - tv_sec: 0, - tv_nsec: 0, - }; - let mut rem = crate::TimeSpec::default(); - assert_eq!(crate::nanosleep(&req, &mut rem), Ok(0)); - assert_eq!(rem, crate::TimeSpec::default()); -} - -//TODO: open - -//TODO: physalloc - -//TODO: physfree - -//TODO: physmap - -//TODO: physunmap - -//TODO: read - -#[test] -fn rmdir() { - let path = "file:/tmp/syscall-tests-rmdir"; - let fd = dbg!( - crate::open( - dbg!(path), - crate::O_CREAT | crate::O_DIRECTORY | crate::O_CLOEXEC - ) - ).unwrap(); - - assert_eq!(dbg!(crate::close(fd)), Ok(0)); - - assert_eq!(dbg!(crate::rmdir(path)), Ok(0)); -} - -//TODO: setpgid - -//TODO: setregid - -//TODO: setrens - -//TODO: setreuid - -//TODO: sigaction - -//TODO: sigprocmask - -//TODO: sigreturn - -#[test] -fn umask() { - let old = dbg!(crate::umask(0o244)).unwrap(); - assert_eq!(dbg!(crate::umask(old)), Ok(0o244)); -} - -#[test] -fn unlink() { - let path = "file:/tmp/syscall-tests-unlink"; - let fd = dbg!( - crate::open( - dbg!(path), - crate::O_CREAT | crate::O_RDWR | crate::O_CLOEXEC - ) - ).unwrap(); - - assert_eq!(dbg!(crate::close(fd)), Ok(0)); - - assert_eq!(dbg!(crate::unlink(path)), Ok(0)); -} - -//TODO: virttophys - -// waitpid tested by clone - -//TODO: write - -#[test] -fn sched_yield() { - assert_eq!(dbg!(crate::sched_yield()), Ok(0)); -} - -#[test] -fn sigaction() { - use std::{ - mem, - sync::atomic::{AtomicBool, Ordering} - }; - - static SA_HANDLER_WAS_RAN: AtomicBool = AtomicBool::new(false); - static SA_HANDLER_2_WAS_IGNORED: AtomicBool = AtomicBool::new(false); - - let child = unsafe { crate::clone(crate::CLONE_VM).unwrap() }; - - if child == 0 { - let pid = crate::getpid().unwrap(); - - extern "C" fn hello_im_a_signal_handler(signal: usize) { - assert_eq!(signal, crate::SIGUSR1); - SA_HANDLER_WAS_RAN.store(true, Ordering::SeqCst); - } - - let my_signal_handler = crate::SigAction { - sa_handler: Some(hello_im_a_signal_handler), - ..Default::default() - }; - crate::sigaction(crate::SIGUSR1, Some(&my_signal_handler), None).unwrap(); - - crate::kill(pid, crate::SIGUSR1).unwrap(); // calls handler - - let mut old_signal_handler = crate::SigAction::default(); - crate::sigaction( - crate::SIGUSR1, - Some(&crate::SigAction { - sa_handler: unsafe { mem::transmute::>(crate::SIG_IGN) }, - ..Default::default() - }), - Some(&mut old_signal_handler) - ).unwrap(); - assert_eq!(my_signal_handler, old_signal_handler); - - crate::kill(pid, crate::SIGUSR1).unwrap(); // does nothing - - SA_HANDLER_2_WAS_IGNORED.store(true, Ordering::SeqCst); - - crate::sigaction( - crate::SIGUSR1, - Some(&crate::SigAction { - sa_handler: unsafe { mem::transmute::>(crate::SIG_DFL) }, - ..Default::default() - }), - Some(&mut old_signal_handler) - ).unwrap(); - - crate::kill(pid, crate::SIGUSR1).unwrap(); // actually exits - } else { - let mut status = 0; - dbg!(crate::waitpid(child, &mut status, crate::WaitFlags::empty())).unwrap(); - - assert!(crate::wifsignaled(status)); - assert_eq!(crate::wtermsig(status), crate::SIGUSR1); - - assert!(SA_HANDLER_WAS_RAN.load(Ordering::SeqCst)); - assert!(SA_HANDLER_2_WAS_IGNORED.load(Ordering::SeqCst)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/rustc-demangle/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/rustc-demangle/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/rustc-demangle/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/rustc-demangle/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/rustc-demangle/Cargo.toml s390-tools-2.33.1/rust-vendor/rustc-demangle/Cargo.toml --- s390-tools-2.31.0/rust-vendor/rustc-demangle/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/rustc-demangle/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,49 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -name = "rustc-demangle" -version = "0.1.23" -authors = ["Alex Crichton "] -description = """ -Rust compiler symbol demangling. -""" -homepage = "https://github.com/alexcrichton/rustc-demangle" -documentation = "https://docs.rs/rustc-demangle" -readme = "README.md" -license = "MIT/Apache-2.0" -repository = "https://github.com/alexcrichton/rustc-demangle" - -[package.metadata.docs.rs] -features = ["std"] -rustdoc-args = [ - "--cfg", - "docsrs", -] - -[profile.release] -lto = true - -[dependencies.compiler_builtins] -version = "0.1.2" -optional = true - -[dependencies.core] -version = "1.0.0" -optional = true -package = "rustc-std-workspace-core" - -[features] -rustc-dep-of-std = [ - "core", - "compiler_builtins", -] -std = [] diff -Nru s390-tools-2.31.0/rust-vendor/rustc-demangle/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/rustc-demangle/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/rustc-demangle/LICENSE-APACHE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/rustc-demangle/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/rustc-demangle/LICENSE-MIT s390-tools-2.33.1/rust-vendor/rustc-demangle/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/rustc-demangle/LICENSE-MIT 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/rustc-demangle/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2014 Alex Crichton - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/rustc-demangle/README.md s390-tools-2.33.1/rust-vendor/rustc-demangle/README.md --- s390-tools-2.31.0/rust-vendor/rustc-demangle/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/rustc-demangle/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,48 +0,0 @@ -# rustc-demangle - -Demangling for Rust symbols, written in Rust. - -[Documentation](https://docs.rs/rustc-demangle) - -## Usage - -You can add this as a dependency via your `Cargo.toml` - -```toml -[dependencies] -rustc-demangle = "0.1" -``` - -and then be sure to check out the [crate -documentation](https://docs.rs/rustc-demangle) for usage. - -## Usage from non-Rust languages - -You can also use this crate from other languages via the C API wrapper in the -`crates/capi` directory. This can be build with: - -```sh -$ cargo build -p rustc-demangle-capi --release -``` - -You'll then find `target/release/librustc_demangle.a` and -`target/release/librustc_demangle.so` (or a different name depending on your -platform). These objects implement the interface specified in -`crates/capi/include/rustc_demangle.h`. - -# License - -This project is licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - http://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in rustc-demangle you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/rustc-demangle/src/legacy.rs s390-tools-2.33.1/rust-vendor/rustc-demangle/src/legacy.rs --- s390-tools-2.31.0/rust-vendor/rustc-demangle/src/legacy.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/rustc-demangle/src/legacy.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,392 +0,0 @@ -use core::char; -use core::fmt; - -/// Representation of a demangled symbol name. -pub struct Demangle<'a> { - inner: &'a str, - /// The number of ::-separated elements in the original name. - elements: usize, -} - -/// De-mangles a Rust symbol into a more readable version -/// -/// All Rust symbols by default are mangled as they contain characters that -/// cannot be represented in all object files. The mangling mechanism is similar -/// to C++'s, but Rust has a few specifics to handle items like lifetimes in -/// symbols. -/// -/// This function will take a **mangled** symbol and return a value. When printed, -/// the de-mangled version will be written. If the symbol does not look like -/// a mangled symbol, the original value will be written instead. -/// -/// # Examples -/// -/// ``` -/// use rustc_demangle::demangle; -/// -/// assert_eq!(demangle("_ZN4testE").to_string(), "test"); -/// assert_eq!(demangle("_ZN3foo3barE").to_string(), "foo::bar"); -/// assert_eq!(demangle("foo").to_string(), "foo"); -/// ``` - -// All Rust symbols are in theory lists of "::"-separated identifiers. Some -// assemblers, however, can't handle these characters in symbol names. To get -// around this, we use C++-style mangling. The mangling method is: -// -// 1. Prefix the symbol with "_ZN" -// 2. For each element of the path, emit the length plus the element -// 3. End the path with "E" -// -// For example, "_ZN4testE" => "test" and "_ZN3foo3barE" => "foo::bar". -// -// We're the ones printing our backtraces, so we can't rely on anything else to -// demangle our symbols. It's *much* nicer to look at demangled symbols, so -// this function is implemented to give us nice pretty output. -// -// Note that this demangler isn't quite as fancy as it could be. We have lots -// of other information in our symbols like hashes, version, type information, -// etc. Additionally, this doesn't handle glue symbols at all. -pub fn demangle(s: &str) -> Result<(Demangle, &str), ()> { - // First validate the symbol. If it doesn't look like anything we're - // expecting, we just print it literally. Note that we must handle non-Rust - // symbols because we could have any function in the backtrace. - let inner = if s.starts_with("_ZN") { - &s[3..] - } else if s.starts_with("ZN") { - // On Windows, dbghelp strips leading underscores, so we accept "ZN...E" - // form too. - &s[2..] - } else if s.starts_with("__ZN") { - // On OSX, symbols are prefixed with an extra _ - &s[4..] - } else { - return Err(()); - }; - - // only work with ascii text - if inner.bytes().any(|c| c & 0x80 != 0) { - return Err(()); - } - - let mut elements = 0; - let mut chars = inner.chars(); - let mut c = chars.next().ok_or(())?; - while c != 'E' { - // Decode an identifier element's length. - if !c.is_digit(10) { - return Err(()); - } - let mut len = 0usize; - while let Some(d) = c.to_digit(10) { - len = len - .checked_mul(10) - .and_then(|len| len.checked_add(d as usize)) - .ok_or(())?; - c = chars.next().ok_or(())?; - } - - // `c` already contains the first character of this identifier, skip it and - // all the other characters of this identifier, to reach the next element. - for _ in 0..len { - c = chars.next().ok_or(())?; - } - - elements += 1; - } - - Ok((Demangle { inner, elements }, chars.as_str())) -} - -// Rust hashes are hex digits with an `h` prepended. -fn is_rust_hash(s: &str) -> bool { - s.starts_with('h') && s[1..].chars().all(|c| c.is_digit(16)) -} - -impl<'a> fmt::Display for Demangle<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // Alright, let's do this. - let mut inner = self.inner; - for element in 0..self.elements { - let mut rest = inner; - while rest.chars().next().unwrap().is_digit(10) { - rest = &rest[1..]; - } - let i: usize = inner[..(inner.len() - rest.len())].parse().unwrap(); - inner = &rest[i..]; - rest = &rest[..i]; - // Skip printing the hash if alternate formatting - // was requested. - if f.alternate() && element + 1 == self.elements && is_rust_hash(&rest) { - break; - } - if element != 0 { - f.write_str("::")?; - } - if rest.starts_with("_$") { - rest = &rest[1..]; - } - loop { - if rest.starts_with('.') { - if let Some('.') = rest[1..].chars().next() { - f.write_str("::")?; - rest = &rest[2..]; - } else { - f.write_str(".")?; - rest = &rest[1..]; - } - } else if rest.starts_with('$') { - let (escape, after_escape) = if let Some(end) = rest[1..].find('$') { - (&rest[1..=end], &rest[end + 2..]) - } else { - break; - }; - - // see src/librustc_codegen_utils/symbol_names/legacy.rs for these mappings - let unescaped = match escape { - "SP" => "@", - "BP" => "*", - "RF" => "&", - "LT" => "<", - "GT" => ">", - "LP" => "(", - "RP" => ")", - "C" => ",", - - _ => { - if escape.starts_with('u') { - let digits = &escape[1..]; - let all_lower_hex = digits.chars().all(|c| match c { - '0'..='9' | 'a'..='f' => true, - _ => false, - }); - let c = u32::from_str_radix(digits, 16) - .ok() - .and_then(char::from_u32); - if let (true, Some(c)) = (all_lower_hex, c) { - // FIXME(eddyb) do we need to filter out control codepoints? - if !c.is_control() { - c.fmt(f)?; - rest = after_escape; - continue; - } - } - } - break; - } - }; - f.write_str(unescaped)?; - rest = after_escape; - } else if let Some(i) = rest.find(|c| c == '$' || c == '.') { - f.write_str(&rest[..i])?; - rest = &rest[i..]; - } else { - break; - } - } - f.write_str(rest)?; - } - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use std::prelude::v1::*; - - macro_rules! t { - ($a:expr, $b:expr) => { - assert!(ok($a, $b)) - }; - } - - macro_rules! t_err { - ($a:expr) => { - assert!(ok_err($a)) - }; - } - - macro_rules! t_nohash { - ($a:expr, $b:expr) => {{ - assert_eq!(format!("{:#}", ::demangle($a)), $b); - }}; - } - - fn ok(sym: &str, expected: &str) -> bool { - match ::try_demangle(sym) { - Ok(s) => { - if s.to_string() == expected { - true - } else { - println!("\n{}\n!=\n{}\n", s, expected); - false - } - } - Err(_) => { - println!("error demangling"); - false - } - } - } - - fn ok_err(sym: &str) -> bool { - match ::try_demangle(sym) { - Ok(_) => { - println!("succeeded in demangling"); - false - } - Err(_) => ::demangle(sym).to_string() == sym, - } - } - - #[test] - fn demangle() { - t_err!("test"); - t!("_ZN4testE", "test"); - t_err!("_ZN4test"); - t!("_ZN4test1a2bcE", "test::a::bc"); - } - - #[test] - fn demangle_dollars() { - t!("_ZN4$RP$E", ")"); - t!("_ZN8$RF$testE", "&test"); - t!("_ZN8$BP$test4foobE", "*test::foob"); - t!("_ZN9$u20$test4foobE", " test::foob"); - t!("_ZN35Bar$LT$$u5b$u32$u3b$$u20$4$u5d$$GT$E", "Bar<[u32; 4]>"); - } - - #[test] - fn demangle_many_dollars() { - t!("_ZN13test$u20$test4foobE", "test test::foob"); - t!("_ZN12test$BP$test4foobE", "test*test::foob"); - } - - #[test] - fn demangle_osx() { - t!( - "__ZN5alloc9allocator6Layout9for_value17h02a996811f781011E", - "alloc::allocator::Layout::for_value::h02a996811f781011" - ); - t!("__ZN38_$LT$core..option..Option$LT$T$GT$$GT$6unwrap18_MSG_FILE_LINE_COL17haf7cb8d5824ee659E", ">::unwrap::_MSG_FILE_LINE_COL::haf7cb8d5824ee659"); - t!("__ZN4core5slice89_$LT$impl$u20$core..iter..traits..IntoIterator$u20$for$u20$$RF$$u27$a$u20$$u5b$T$u5d$$GT$9into_iter17h450e234d27262170E", "core::slice::::into_iter::h450e234d27262170"); - } - - #[test] - fn demangle_windows() { - t!("ZN4testE", "test"); - t!("ZN13test$u20$test4foobE", "test test::foob"); - t!("ZN12test$RF$test4foobE", "test&test::foob"); - } - - #[test] - fn demangle_elements_beginning_with_underscore() { - t!("_ZN13_$LT$test$GT$E", ""); - t!("_ZN28_$u7b$$u7b$closure$u7d$$u7d$E", "{{closure}}"); - t!("_ZN15__STATIC_FMTSTRE", "__STATIC_FMTSTR"); - } - - #[test] - fn demangle_trait_impls() { - t!( - "_ZN71_$LT$Test$u20$$u2b$$u20$$u27$static$u20$as$u20$foo..Bar$LT$Test$GT$$GT$3barE", - ">::bar" - ); - } - - #[test] - fn demangle_without_hash() { - let s = "_ZN3foo17h05af221e174051e9E"; - t!(s, "foo::h05af221e174051e9"); - t_nohash!(s, "foo"); - } - - #[test] - fn demangle_without_hash_edgecases() { - // One element, no hash. - t_nohash!("_ZN3fooE", "foo"); - // Two elements, no hash. - t_nohash!("_ZN3foo3barE", "foo::bar"); - // Longer-than-normal hash. - t_nohash!("_ZN3foo20h05af221e174051e9abcE", "foo"); - // Shorter-than-normal hash. - t_nohash!("_ZN3foo5h05afE", "foo"); - // Valid hash, but not at the end. - t_nohash!("_ZN17h05af221e174051e93fooE", "h05af221e174051e9::foo"); - // Not a valid hash, missing the 'h'. - t_nohash!("_ZN3foo16ffaf221e174051e9E", "foo::ffaf221e174051e9"); - // Not a valid hash, has a non-hex-digit. - t_nohash!("_ZN3foo17hg5af221e174051e9E", "foo::hg5af221e174051e9"); - } - - #[test] - fn demangle_thinlto() { - // One element, no hash. - t!("_ZN3fooE.llvm.9D1C9369", "foo"); - t!("_ZN3fooE.llvm.9D1C9369@@16", "foo"); - t_nohash!( - "_ZN9backtrace3foo17hbb467fcdaea5d79bE.llvm.A5310EB9", - "backtrace::foo" - ); - } - - #[test] - fn demangle_llvm_ir_branch_labels() { - t!("_ZN4core5slice77_$LT$impl$u20$core..ops..index..IndexMut$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$9index_mut17haf9727c2edfbc47bE.exit.i.i", "core::slice:: for [T]>::index_mut::haf9727c2edfbc47b.exit.i.i"); - t_nohash!("_ZN4core5slice77_$LT$impl$u20$core..ops..index..IndexMut$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$9index_mut17haf9727c2edfbc47bE.exit.i.i", "core::slice:: for [T]>::index_mut.exit.i.i"); - } - - #[test] - fn demangle_ignores_suffix_that_doesnt_look_like_a_symbol() { - t_err!("_ZN3fooE.llvm moocow"); - } - - #[test] - fn dont_panic() { - ::demangle("_ZN2222222222222222222222EE").to_string(); - ::demangle("_ZN5*70527e27.ll34csaÒ“E").to_string(); - ::demangle("_ZN5*70527a54.ll34_$b.1E").to_string(); - ::demangle( - "\ - _ZN5~saäb4e\n\ - 2734cOsbE\n\ - 5usage20h)3\0\0\0\0\0\0\07e2734cOsbE\ - ", - ) - .to_string(); - } - - #[test] - fn invalid_no_chop() { - t_err!("_ZNfooE"); - } - - #[test] - fn handle_assoc_types() { - t!("_ZN151_$LT$alloc..boxed..Box$LT$alloc..boxed..FnBox$LT$A$C$$u20$Output$u3d$R$GT$$u20$$u2b$$u20$$u27$a$GT$$u20$as$u20$core..ops..function..FnOnce$LT$A$GT$$GT$9call_once17h69e8f44b3723e1caE", " + 'a> as core::ops::function::FnOnce>::call_once::h69e8f44b3723e1ca"); - } - - #[test] - fn handle_bang() { - t!( - "_ZN88_$LT$core..result..Result$LT$$u21$$C$$u20$E$GT$$u20$as$u20$std..process..Termination$GT$6report17hfc41d0da4a40b3e8E", - " as std::process::Termination>::report::hfc41d0da4a40b3e8" - ); - } - - #[test] - fn demangle_utf8_idents() { - t_nohash!( - "_ZN11utf8_idents157_$u10e1$$u10d0$$u10ed$$u10db$$u10d4$$u10da$$u10d0$$u10d3$_$u10d2$$u10d4$$u10db$$u10e0$$u10d8$$u10d4$$u10da$$u10d8$_$u10e1$$u10d0$$u10d3$$u10d8$$u10da$$u10d8$17h21634fd5714000aaE", - "utf8_idents::სáƒáƒ­áƒ›áƒ”ლáƒáƒ“_გემრიელი_სáƒáƒ“ილი" - ); - } - - #[test] - fn demangle_issue_60925() { - t_nohash!( - "_ZN11issue_609253foo37Foo$LT$issue_60925..llv$u6d$..Foo$GT$3foo17h059a991a004536adE", - "issue_60925::foo::Foo::foo" - ); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/rustc-demangle/src/lib.rs s390-tools-2.33.1/rust-vendor/rustc-demangle/src/lib.rs --- s390-tools-2.31.0/rust-vendor/rustc-demangle/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/rustc-demangle/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,588 +0,0 @@ -//! Demangle Rust compiler symbol names. -//! -//! This crate provides a `demangle` function which will return a `Demangle` -//! sentinel value that can be used to learn about the demangled version of a -//! symbol name. The demangled representation will be the same as the original -//! if it doesn't look like a mangled symbol name. -//! -//! `Demangle` can be formatted with the `Display` trait. The alternate -//! modifier (`#`) can be used to format the symbol name without the -//! trailing hash value. -//! -//! # Examples -//! -//! ``` -//! use rustc_demangle::demangle; -//! -//! assert_eq!(demangle("_ZN4testE").to_string(), "test"); -//! assert_eq!(demangle("_ZN3foo3barE").to_string(), "foo::bar"); -//! assert_eq!(demangle("foo").to_string(), "foo"); -//! // With hash -//! assert_eq!(format!("{}", demangle("_ZN3foo17h05af221e174051e9E")), "foo::h05af221e174051e9"); -//! // Without hash -//! assert_eq!(format!("{:#}", demangle("_ZN3foo17h05af221e174051e9E")), "foo"); -//! ``` - -#![no_std] -#![deny(missing_docs)] -#![cfg_attr(docsrs, feature(doc_cfg))] - -#[cfg(any(test, feature = "std"))] -#[macro_use] -extern crate std; - -// HACK(eddyb) helper macros for tests. -#[cfg(test)] -macro_rules! assert_contains { - ($s:expr, $needle:expr) => {{ - let (s, needle) = ($s, $needle); - assert!( - s.contains(needle), - "{:?} should've contained {:?}", - s, - needle - ); - }}; -} -#[cfg(test)] -macro_rules! assert_ends_with { - ($s:expr, $suffix:expr) => {{ - let (s, suffix) = ($s, $suffix); - assert!( - s.ends_with(suffix), - "{:?} should've ended in {:?}", - s, - suffix - ); - }}; -} - -mod legacy; -mod v0; - -use core::fmt::{self, Write as _}; - -/// Representation of a demangled symbol name. -pub struct Demangle<'a> { - style: Option>, - original: &'a str, - suffix: &'a str, -} - -enum DemangleStyle<'a> { - Legacy(legacy::Demangle<'a>), - V0(v0::Demangle<'a>), -} - -/// De-mangles a Rust symbol into a more readable version -/// -/// This function will take a **mangled** symbol and return a value. When printed, -/// the de-mangled version will be written. If the symbol does not look like -/// a mangled symbol, the original value will be written instead. -/// -/// # Examples -/// -/// ``` -/// use rustc_demangle::demangle; -/// -/// assert_eq!(demangle("_ZN4testE").to_string(), "test"); -/// assert_eq!(demangle("_ZN3foo3barE").to_string(), "foo::bar"); -/// assert_eq!(demangle("foo").to_string(), "foo"); -/// ``` -pub fn demangle(mut s: &str) -> Demangle { - // During ThinLTO LLVM may import and rename internal symbols, so strip out - // those endings first as they're one of the last manglings applied to symbol - // names. - let llvm = ".llvm."; - if let Some(i) = s.find(llvm) { - let candidate = &s[i + llvm.len()..]; - let all_hex = candidate.chars().all(|c| match c { - 'A'..='F' | '0'..='9' | '@' => true, - _ => false, - }); - - if all_hex { - s = &s[..i]; - } - } - - let mut suffix = ""; - let mut style = match legacy::demangle(s) { - Ok((d, s)) => { - suffix = s; - Some(DemangleStyle::Legacy(d)) - } - Err(()) => match v0::demangle(s) { - Ok((d, s)) => { - suffix = s; - Some(DemangleStyle::V0(d)) - } - // FIXME(eddyb) would it make sense to treat an unknown-validity - // symbol (e.g. one that errored with `RecursedTooDeep`) as - // v0-mangled, and have the error show up in the demangling? - // (that error already gets past this initial check, and therefore - // will show up in the demangling, if hidden behind a backref) - Err(v0::ParseError::Invalid) | Err(v0::ParseError::RecursedTooDeep) => None, - }, - }; - - // Output like LLVM IR adds extra period-delimited words. See if - // we are in that case and save the trailing words if so. - if !suffix.is_empty() { - if suffix.starts_with('.') && is_symbol_like(suffix) { - // Keep the suffix. - } else { - // Reset the suffix and invalidate the demangling. - suffix = ""; - style = None; - } - } - - Demangle { - style, - original: s, - suffix, - } -} - -#[cfg(feature = "std")] -fn demangle_line( - line: &str, - output: &mut impl std::io::Write, - include_hash: bool, -) -> std::io::Result<()> { - let mut head = 0; - while head < line.len() { - // Move to the next potential match - let next_head = match (line[head..].find("_ZN"), line[head..].find("_R")) { - (Some(idx), None) | (None, Some(idx)) => head + idx, - (Some(idx1), Some(idx2)) => head + idx1.min(idx2), - (None, None) => { - // No more matches... - line.len() - } - }; - output.write_all(line[head..next_head].as_bytes())?; - head = next_head; - // Find the non-matching character. - // - // If we do not find a character, then until the end of the line is the - // thing to demangle. - let match_end = line[head..] - .find(|ch: char| !(ch == '$' || ch == '.' || ch == '_' || ch.is_ascii_alphanumeric())) - .map(|idx| head + idx) - .unwrap_or(line.len()); - - let mangled = &line[head..match_end]; - head = head + mangled.len(); - if let Ok(demangled) = try_demangle(mangled) { - if include_hash { - write!(output, "{}", demangled)?; - } else { - write!(output, "{:#}", demangled)?; - } - } else { - output.write_all(mangled.as_bytes())?; - } - } - Ok(()) -} - -/// Process a stream of data from `input` into the provided `output`, demangling any symbols found -/// within. -/// -/// Note that the underlying implementation will perform many relatively small writes to the -/// output. If the output is expensive to write to (e.g., requires syscalls), consider using -/// `std::io::BufWriter`. -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub fn demangle_stream( - input: &mut R, - output: &mut W, - include_hash: bool, -) -> std::io::Result<()> { - let mut buf = std::string::String::new(); - // We read in lines to reduce the memory usage at any time. - // - // demangle_line is also more efficient with relatively small buffers as it will copy around - // trailing data during demangling. In the future we might directly stream to the output but at - // least right now that seems to be less efficient. - while input.read_line(&mut buf)? > 0 { - demangle_line(&buf, output, include_hash)?; - buf.clear(); - } - Ok(()) -} - -/// Error returned from the `try_demangle` function below when demangling fails. -#[derive(Debug, Clone)] -pub struct TryDemangleError { - _priv: (), -} - -/// The same as `demangle`, except return an `Err` if the string does not appear -/// to be a Rust symbol, rather than "demangling" the given string as a no-op. -/// -/// ``` -/// extern crate rustc_demangle; -/// -/// let not_a_rust_symbol = "la la la"; -/// -/// // The `try_demangle` function will reject strings which are not Rust symbols. -/// assert!(rustc_demangle::try_demangle(not_a_rust_symbol).is_err()); -/// -/// // While `demangle` will just pass the non-symbol through as a no-op. -/// assert_eq!(rustc_demangle::demangle(not_a_rust_symbol).as_str(), not_a_rust_symbol); -/// ``` -pub fn try_demangle(s: &str) -> Result { - let sym = demangle(s); - if sym.style.is_some() { - Ok(sym) - } else { - Err(TryDemangleError { _priv: () }) - } -} - -impl<'a> Demangle<'a> { - /// Returns the underlying string that's being demangled. - pub fn as_str(&self) -> &'a str { - self.original - } -} - -fn is_symbol_like(s: &str) -> bool { - s.chars().all(|c| { - // Once `char::is_ascii_punctuation` and `char::is_ascii_alphanumeric` - // have been stable for long enough, use those instead for clarity - is_ascii_alphanumeric(c) || is_ascii_punctuation(c) - }) -} - -// Copied from the documentation of `char::is_ascii_alphanumeric` -fn is_ascii_alphanumeric(c: char) -> bool { - match c { - '\u{0041}'..='\u{005A}' | '\u{0061}'..='\u{007A}' | '\u{0030}'..='\u{0039}' => true, - _ => false, - } -} - -// Copied from the documentation of `char::is_ascii_punctuation` -fn is_ascii_punctuation(c: char) -> bool { - match c { - '\u{0021}'..='\u{002F}' - | '\u{003A}'..='\u{0040}' - | '\u{005B}'..='\u{0060}' - | '\u{007B}'..='\u{007E}' => true, - _ => false, - } -} - -impl<'a> fmt::Display for DemangleStyle<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - DemangleStyle::Legacy(ref d) => fmt::Display::fmt(d, f), - DemangleStyle::V0(ref d) => fmt::Display::fmt(d, f), - } - } -} - -// Maximum size of the symbol that we'll print. -const MAX_SIZE: usize = 1_000_000; - -#[derive(Copy, Clone, Debug)] -struct SizeLimitExhausted; - -struct SizeLimitedFmtAdapter { - remaining: Result, - inner: F, -} - -impl fmt::Write for SizeLimitedFmtAdapter { - fn write_str(&mut self, s: &str) -> fmt::Result { - self.remaining = self - .remaining - .and_then(|r| r.checked_sub(s.len()).ok_or(SizeLimitExhausted)); - - match self.remaining { - Ok(_) => self.inner.write_str(s), - Err(SizeLimitExhausted) => Err(fmt::Error), - } - } -} - -impl<'a> fmt::Display for Demangle<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.style { - None => f.write_str(self.original)?, - Some(ref d) => { - let alternate = f.alternate(); - let mut size_limited_fmt = SizeLimitedFmtAdapter { - remaining: Ok(MAX_SIZE), - inner: &mut *f, - }; - let fmt_result = if alternate { - write!(size_limited_fmt, "{:#}", d) - } else { - write!(size_limited_fmt, "{}", d) - }; - let size_limit_result = size_limited_fmt.remaining.map(|_| ()); - - // Translate a `fmt::Error` generated by `SizeLimitedFmtAdapter` - // into an error message, instead of propagating it upwards - // (which could cause panicking from inside e.g. `std::io::print`). - match (fmt_result, size_limit_result) { - (Err(_), Err(SizeLimitExhausted)) => f.write_str("{size limit reached}")?, - - _ => { - fmt_result?; - size_limit_result - .expect("`fmt::Error` from `SizeLimitedFmtAdapter` was discarded"); - } - } - } - } - f.write_str(self.suffix) - } -} - -impl<'a> fmt::Debug for Demangle<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -#[cfg(test)] -mod tests { - use std::prelude::v1::*; - - macro_rules! t { - ($a:expr, $b:expr) => { - assert!(ok($a, $b)) - }; - } - - macro_rules! t_err { - ($a:expr) => { - assert!(ok_err($a)) - }; - } - - macro_rules! t_nohash { - ($a:expr, $b:expr) => {{ - assert_eq!(format!("{:#}", super::demangle($a)), $b); - }}; - } - - fn ok(sym: &str, expected: &str) -> bool { - match super::try_demangle(sym) { - Ok(s) => { - if s.to_string() == expected { - true - } else { - println!("\n{}\n!=\n{}\n", s, expected); - false - } - } - Err(_) => { - println!("error demangling"); - false - } - } - } - - fn ok_err(sym: &str) -> bool { - match super::try_demangle(sym) { - Ok(_) => { - println!("succeeded in demangling"); - false - } - Err(_) => super::demangle(sym).to_string() == sym, - } - } - - #[test] - fn demangle() { - t_err!("test"); - t!("_ZN4testE", "test"); - t_err!("_ZN4test"); - t!("_ZN4test1a2bcE", "test::a::bc"); - } - - #[test] - fn demangle_dollars() { - t!("_ZN4$RP$E", ")"); - t!("_ZN8$RF$testE", "&test"); - t!("_ZN8$BP$test4foobE", "*test::foob"); - t!("_ZN9$u20$test4foobE", " test::foob"); - t!("_ZN35Bar$LT$$u5b$u32$u3b$$u20$4$u5d$$GT$E", "Bar<[u32; 4]>"); - } - - #[test] - fn demangle_many_dollars() { - t!("_ZN13test$u20$test4foobE", "test test::foob"); - t!("_ZN12test$BP$test4foobE", "test*test::foob"); - } - - #[test] - fn demangle_osx() { - t!( - "__ZN5alloc9allocator6Layout9for_value17h02a996811f781011E", - "alloc::allocator::Layout::for_value::h02a996811f781011" - ); - t!("__ZN38_$LT$core..option..Option$LT$T$GT$$GT$6unwrap18_MSG_FILE_LINE_COL17haf7cb8d5824ee659E", ">::unwrap::_MSG_FILE_LINE_COL::haf7cb8d5824ee659"); - t!("__ZN4core5slice89_$LT$impl$u20$core..iter..traits..IntoIterator$u20$for$u20$$RF$$u27$a$u20$$u5b$T$u5d$$GT$9into_iter17h450e234d27262170E", "core::slice::::into_iter::h450e234d27262170"); - } - - #[test] - fn demangle_windows() { - t!("ZN4testE", "test"); - t!("ZN13test$u20$test4foobE", "test test::foob"); - t!("ZN12test$RF$test4foobE", "test&test::foob"); - } - - #[test] - fn demangle_elements_beginning_with_underscore() { - t!("_ZN13_$LT$test$GT$E", ""); - t!("_ZN28_$u7b$$u7b$closure$u7d$$u7d$E", "{{closure}}"); - t!("_ZN15__STATIC_FMTSTRE", "__STATIC_FMTSTR"); - } - - #[test] - fn demangle_trait_impls() { - t!( - "_ZN71_$LT$Test$u20$$u2b$$u20$$u27$static$u20$as$u20$foo..Bar$LT$Test$GT$$GT$3barE", - ">::bar" - ); - } - - #[test] - fn demangle_without_hash() { - let s = "_ZN3foo17h05af221e174051e9E"; - t!(s, "foo::h05af221e174051e9"); - t_nohash!(s, "foo"); - } - - #[test] - fn demangle_without_hash_edgecases() { - // One element, no hash. - t_nohash!("_ZN3fooE", "foo"); - // Two elements, no hash. - t_nohash!("_ZN3foo3barE", "foo::bar"); - // Longer-than-normal hash. - t_nohash!("_ZN3foo20h05af221e174051e9abcE", "foo"); - // Shorter-than-normal hash. - t_nohash!("_ZN3foo5h05afE", "foo"); - // Valid hash, but not at the end. - t_nohash!("_ZN17h05af221e174051e93fooE", "h05af221e174051e9::foo"); - // Not a valid hash, missing the 'h'. - t_nohash!("_ZN3foo16ffaf221e174051e9E", "foo::ffaf221e174051e9"); - // Not a valid hash, has a non-hex-digit. - t_nohash!("_ZN3foo17hg5af221e174051e9E", "foo::hg5af221e174051e9"); - } - - #[test] - fn demangle_thinlto() { - // One element, no hash. - t!("_ZN3fooE.llvm.9D1C9369", "foo"); - t!("_ZN3fooE.llvm.9D1C9369@@16", "foo"); - t_nohash!( - "_ZN9backtrace3foo17hbb467fcdaea5d79bE.llvm.A5310EB9", - "backtrace::foo" - ); - } - - #[test] - fn demangle_llvm_ir_branch_labels() { - t!("_ZN4core5slice77_$LT$impl$u20$core..ops..index..IndexMut$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$9index_mut17haf9727c2edfbc47bE.exit.i.i", "core::slice:: for [T]>::index_mut::haf9727c2edfbc47b.exit.i.i"); - t_nohash!("_ZN4core5slice77_$LT$impl$u20$core..ops..index..IndexMut$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$9index_mut17haf9727c2edfbc47bE.exit.i.i", "core::slice:: for [T]>::index_mut.exit.i.i"); - } - - #[test] - fn demangle_ignores_suffix_that_doesnt_look_like_a_symbol() { - t_err!("_ZN3fooE.llvm moocow"); - } - - #[test] - fn dont_panic() { - super::demangle("_ZN2222222222222222222222EE").to_string(); - super::demangle("_ZN5*70527e27.ll34csaÒ“E").to_string(); - super::demangle("_ZN5*70527a54.ll34_$b.1E").to_string(); - super::demangle( - "\ - _ZN5~saäb4e\n\ - 2734cOsbE\n\ - 5usage20h)3\0\0\0\0\0\0\07e2734cOsbE\ - ", - ) - .to_string(); - } - - #[test] - fn invalid_no_chop() { - t_err!("_ZNfooE"); - } - - #[test] - fn handle_assoc_types() { - t!("_ZN151_$LT$alloc..boxed..Box$LT$alloc..boxed..FnBox$LT$A$C$$u20$Output$u3d$R$GT$$u20$$u2b$$u20$$u27$a$GT$$u20$as$u20$core..ops..function..FnOnce$LT$A$GT$$GT$9call_once17h69e8f44b3723e1caE", " + 'a> as core::ops::function::FnOnce>::call_once::h69e8f44b3723e1ca"); - } - - #[test] - fn handle_bang() { - t!( - "_ZN88_$LT$core..result..Result$LT$$u21$$C$$u20$E$GT$$u20$as$u20$std..process..Termination$GT$6report17hfc41d0da4a40b3e8E", - " as std::process::Termination>::report::hfc41d0da4a40b3e8" - ); - } - - #[test] - fn limit_recursion() { - assert_contains!( - super::demangle("_RNvB_1a").to_string(), - "{recursion limit reached}" - ); - assert_contains!( - super::demangle("_RMC0RB2_").to_string(), - "{recursion limit reached}" - ); - } - - #[test] - fn limit_output() { - assert_ends_with!( - super::demangle("RYFG_FGyyEvRYFF_EvRYFFEvERLB_B_B_ERLRjB_B_B_").to_string(), - "{size limit reached}" - ); - // NOTE(eddyb) somewhat reduced version of the above, effectively - // ` fn()>` with a larger number of lifetimes in `...`. - assert_ends_with!( - super::demangle("_RMC0FGZZZ_Eu").to_string(), - "{size limit reached}" - ); - } - - #[cfg(feature = "std")] - fn demangle_str(input: &str) -> String { - let mut output = Vec::new(); - super::demangle_line(input, &mut output, false); - String::from_utf8(output).unwrap() - } - - #[test] - #[cfg(feature = "std")] - fn find_multiple() { - assert_eq!( - demangle_str("_ZN3fooE.llvm moocow _ZN3fooE.llvm"), - "foo.llvm moocow foo.llvm" - ); - } - - #[test] - #[cfg(feature = "std")] - fn interleaved_new_legacy() { - assert_eq!( - demangle_str("_ZN3fooE.llvm moocow _RNvMNtNtNtNtCs8a2262Dv4r_3mio3sys4unix8selector5epollNtB2_8Selector6select _ZN3fooE.llvm"), - "foo.llvm moocow ::select foo.llvm" - ); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/rustc-demangle/src/v0-large-test-symbols/early-recursion-limit s390-tools-2.33.1/rust-vendor/rustc-demangle/src/v0-large-test-symbols/early-recursion-limit --- s390-tools-2.31.0/rust-vendor/rustc-demangle/src/v0-large-test-symbols/early-recursion-limit 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/rustc-demangle/src/v0-large-test-symbols/early-recursion-limit 1970-01-01 01:00:00.000000000 +0100 @@ -1,10 +0,0 @@ -# NOTE: empty lines, and lines starting with `#`, are ignored. - -# Large test symbols for `v0::test::demangling_limits`, that specifically cause -# a `RecursedTooDeep` error from `v0::demangle`'s shallow traversal (sanity check) -# of the mangled symbol, i.e. before any printing coudl be attempted. - -RICu4$TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOSOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu4,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu4,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu4,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu4,-xOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu4,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu4,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu4,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu4,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTYTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu4,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu4,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu5,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu3.,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOxxxRICu4,-xxxxffff..ffffffffffffffffffffffffffffffffffffffffffffffffffffffffxxxxxxxxxxxxxxxxxxxRaRBRaR>R>xxxu2IC,-xxxxxxRIC4xxxOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTxxxxxRICu4.,-xOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOTTTOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOxxxRICu4,-xxxxffff..ffffffffffffffffffffffffffffffffffffffffffffffffffffffffxxxxxxxxxxxxxxxxxxxRaRBRaR>R>xxxu2IC,-xxxxxxRIC4xxx..K..xRBRaR>RICu6$-RBKIQARICu6$-RBKIQAA........TvvKKKKKKKKKxxxxxxxxxxxxxxxBKIQARICu6$-RBKIQAA...._.xxx -RIYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRYYYYYYYYYXB_RXB_lYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRYYYYYYYYYXB_RXB_lYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRYFhhhhYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYNYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRYYYYYYYYYXB_RXB_lYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRYYYYYYYYYXB_RXB_lYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYNYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRYYYYYYYYYXB_RXB_lYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRYYYYYYYYYXB_RXB_lYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRYFhhhhYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYNYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRYYYYYYYYYXB_RXB_lYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRYYYYYYYYYXB_RXB_lYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRYYYYYYYYYXB_RXB_lYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYMYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRXB_RXYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYIBRIIRIIBRCIByEEj_ByEEj_EEj -RYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRSSSSSSRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSYYYYSSSSSSSSSSSSSSSSSYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYSSSSRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRSSSSSSSRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRSSSSSSSRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRSSSSSSSYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSSRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYyYYYbYYYYYYYYYYYYYYRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRYYYYYYYYYYYYYmYYYYYYYYYYYYYYYYYYYYYRCu3YYYYYYYYYYYPYYYYYYbYYYYYYYYYRYYYYYYYYYYYYYYYYYYSSSSSSSSSSSS -RYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYyYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYR; diff -Nru s390-tools-2.31.0/rust-vendor/rustc-demangle/src/v0.rs s390-tools-2.33.1/rust-vendor/rustc-demangle/src/v0.rs --- s390-tools-2.31.0/rust-vendor/rustc-demangle/src/v0.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/rustc-demangle/src/v0.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1530 +0,0 @@ -use core::convert::TryFrom; -use core::{char, fmt, iter, mem, str}; - -#[allow(unused_macros)] -macro_rules! write { - ($($ignored:tt)*) => { - compile_error!( - "use `self.print(value)` or `fmt::Trait::fmt(&value, self.out)`, \ - instead of `write!(self.out, \"{...}\", value)`" - ) - }; -} - -// Maximum recursion depth when parsing symbols before we just bail out saying -// "this symbol is invalid" -const MAX_DEPTH: u32 = 500; - -/// Representation of a demangled symbol name. -pub struct Demangle<'a> { - inner: &'a str, -} - -#[derive(PartialEq, Eq, Debug)] -pub enum ParseError { - /// Symbol doesn't match the expected `v0` grammar. - Invalid, - - /// Parsing the symbol crossed the recursion limit (see `MAX_DEPTH`). - RecursedTooDeep, -} - -/// De-mangles a Rust symbol into a more readable version -/// -/// This function will take a **mangled** symbol and return a value. When printed, -/// the de-mangled version will be written. If the symbol does not look like -/// a mangled symbol, the original value will be written instead. -pub fn demangle(s: &str) -> Result<(Demangle, &str), ParseError> { - // First validate the symbol. If it doesn't look like anything we're - // expecting, we just print it literally. Note that we must handle non-Rust - // symbols because we could have any function in the backtrace. - let inner; - if s.len() > 2 && s.starts_with("_R") { - inner = &s[2..]; - } else if s.len() > 1 && s.starts_with('R') { - // On Windows, dbghelp strips leading underscores, so we accept "R..." - // form too. - inner = &s[1..]; - } else if s.len() > 3 && s.starts_with("__R") { - // On OSX, symbols are prefixed with an extra _ - inner = &s[3..]; - } else { - return Err(ParseError::Invalid); - } - - // Paths always start with uppercase characters. - match inner.as_bytes()[0] { - b'A'..=b'Z' => {} - _ => return Err(ParseError::Invalid), - } - - // only work with ascii text - if inner.bytes().any(|c| c & 0x80 != 0) { - return Err(ParseError::Invalid); - } - - // Verify that the symbol is indeed a valid path. - let try_parse_path = |parser| { - let mut dummy_printer = Printer { - parser: Ok(parser), - out: None, - bound_lifetime_depth: 0, - }; - dummy_printer - .print_path(false) - .expect("`fmt::Error`s should be impossible without a `fmt::Formatter`"); - dummy_printer.parser - }; - let mut parser = Parser { - sym: inner, - next: 0, - depth: 0, - }; - parser = try_parse_path(parser)?; - - // Instantiating crate (paths always start with uppercase characters). - if let Some(&(b'A'..=b'Z')) = parser.sym.as_bytes().get(parser.next) { - parser = try_parse_path(parser)?; - } - - Ok((Demangle { inner }, &parser.sym[parser.next..])) -} - -impl<'s> fmt::Display for Demangle<'s> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut printer = Printer { - parser: Ok(Parser { - sym: self.inner, - next: 0, - depth: 0, - }), - out: Some(f), - bound_lifetime_depth: 0, - }; - printer.print_path(true) - } -} - -struct Ident<'s> { - /// ASCII part of the identifier. - ascii: &'s str, - /// Punycode insertion codes for Unicode codepoints, if any. - punycode: &'s str, -} - -const SMALL_PUNYCODE_LEN: usize = 128; - -impl<'s> Ident<'s> { - /// Attempt to decode punycode on the stack (allocation-free), - /// and pass the char slice to the closure, if successful. - /// This supports up to `SMALL_PUNYCODE_LEN` characters. - fn try_small_punycode_decode R, R>(&self, f: F) -> Option { - let mut out = ['\0'; SMALL_PUNYCODE_LEN]; - let mut out_len = 0; - let r = self.punycode_decode(|i, c| { - // Check there's space left for another character. - out.get(out_len).ok_or(())?; - - // Move the characters after the insert position. - let mut j = out_len; - out_len += 1; - - while j > i { - out[j] = out[j - 1]; - j -= 1; - } - - // Insert the new character. - out[i] = c; - - Ok(()) - }); - if r.is_ok() { - Some(f(&out[..out_len])) - } else { - None - } - } - - /// Decode punycode as insertion positions and characters - /// and pass them to the closure, which can return `Err(())` - /// to stop the decoding process. - fn punycode_decode Result<(), ()>>( - &self, - mut insert: F, - ) -> Result<(), ()> { - let mut punycode_bytes = self.punycode.bytes().peekable(); - if punycode_bytes.peek().is_none() { - return Err(()); - } - - let mut len = 0; - - // Populate initial output from ASCII fragment. - for c in self.ascii.chars() { - insert(len, c)?; - len += 1; - } - - // Punycode parameters and initial state. - let base = 36; - let t_min = 1; - let t_max = 26; - let skew = 38; - let mut damp = 700; - let mut bias = 72; - let mut i: usize = 0; - let mut n: usize = 0x80; - - loop { - // Read one delta value. - let mut delta: usize = 0; - let mut w = 1; - let mut k: usize = 0; - loop { - use core::cmp::{max, min}; - - k += base; - let t = min(max(k.saturating_sub(bias), t_min), t_max); - - let d = match punycode_bytes.next() { - Some(d @ b'a'..=b'z') => d - b'a', - Some(d @ b'0'..=b'9') => 26 + (d - b'0'), - _ => return Err(()), - }; - let d = d as usize; - delta = delta.checked_add(d.checked_mul(w).ok_or(())?).ok_or(())?; - if d < t { - break; - } - w = w.checked_mul(base - t).ok_or(())?; - } - - // Compute the new insert position and character. - len += 1; - i = i.checked_add(delta).ok_or(())?; - n = n.checked_add(i / len).ok_or(())?; - i %= len; - - let n_u32 = n as u32; - let c = if n_u32 as usize == n { - char::from_u32(n_u32).ok_or(())? - } else { - return Err(()); - }; - - // Insert the new character and increment the insert position. - insert(i, c)?; - i += 1; - - // If there are no more deltas, decoding is complete. - if punycode_bytes.peek().is_none() { - return Ok(()); - } - - // Perform bias adaptation. - delta /= damp; - damp = 2; - - delta += delta / len; - let mut k = 0; - while delta > ((base - t_min) * t_max) / 2 { - delta /= base - t_min; - k += base; - } - bias = k + ((base - t_min + 1) * delta) / (delta + skew); - } - } -} - -impl<'s> fmt::Display for Ident<'s> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.try_small_punycode_decode(|chars| { - for &c in chars { - c.fmt(f)?; - } - Ok(()) - }) - .unwrap_or_else(|| { - if !self.punycode.is_empty() { - f.write_str("punycode{")?; - - // Reconstruct a standard Punycode encoding, - // by using `-` as the separator. - if !self.ascii.is_empty() { - f.write_str(self.ascii)?; - f.write_str("-")?; - } - f.write_str(self.punycode)?; - - f.write_str("}") - } else { - f.write_str(self.ascii) - } - }) - } -} - -/// Sequence of lowercase hexadecimal nibbles (`0-9a-f`), used by leaf consts. -struct HexNibbles<'s> { - nibbles: &'s str, -} - -impl<'s> HexNibbles<'s> { - /// Decode an integer value (with the "most significant nibble" first), - /// returning `None` if it can't fit in an `u64`. - // FIXME(eddyb) should this "just" use `u128` instead? - fn try_parse_uint(&self) -> Option { - let nibbles = self.nibbles.trim_start_matches("0"); - - if nibbles.len() > 16 { - return None; - } - - let mut v = 0; - for nibble in nibbles.chars() { - v = (v << 4) | (nibble.to_digit(16).unwrap() as u64); - } - Some(v) - } - - /// Decode a UTF-8 byte sequence (with each byte using a pair of nibbles) - /// into individual `char`s, returning `None` for invalid UTF-8. - fn try_parse_str_chars(&self) -> Option + 's> { - if self.nibbles.len() % 2 != 0 { - return None; - } - - // FIXME(eddyb) use `array_chunks` instead, when that becomes stable. - let mut bytes = self - .nibbles - .as_bytes() - .chunks_exact(2) - .map(|slice| match slice { - [a, b] => [a, b], - _ => unreachable!(), - }) - .map(|[&hi, &lo]| { - let half = |nibble: u8| (nibble as char).to_digit(16).unwrap() as u8; - (half(hi) << 4) | half(lo) - }); - - let chars = iter::from_fn(move || { - // As long as there are any bytes left, there's at least one more - // UTF-8-encoded `char` to decode (or the possibility of error). - bytes.next().map(|first_byte| -> Result { - // FIXME(eddyb) this `enum` and `fn` should be somewhere in `core`. - enum Utf8FirstByteError { - ContinuationByte, - TooLong, - } - fn utf8_len_from_first_byte(byte: u8) -> Result { - match byte { - 0x00..=0x7f => Ok(1), - 0x80..=0xbf => Err(Utf8FirstByteError::ContinuationByte), - 0xc0..=0xdf => Ok(2), - 0xe0..=0xef => Ok(3), - 0xf0..=0xf7 => Ok(4), - 0xf8..=0xff => Err(Utf8FirstByteError::TooLong), - } - } - - // Collect the appropriate amount of bytes (up to 4), according - // to the UTF-8 length implied by the first byte. - let utf8_len = utf8_len_from_first_byte(first_byte).map_err(|_| ())?; - let utf8 = &mut [first_byte, 0, 0, 0][..utf8_len]; - for i in 1..utf8_len { - utf8[i] = bytes.next().ok_or(())?; - } - - // Fully validate the UTF-8 sequence. - let s = str::from_utf8(utf8).map_err(|_| ())?; - - // Since we included exactly one UTF-8 sequence, and validation - // succeeded, `str::chars` should return exactly one `char`. - let mut chars = s.chars(); - match (chars.next(), chars.next()) { - (Some(c), None) => Ok(c), - _ => unreachable!( - "str::from_utf8({:?}) = {:?} was expected to have 1 char, \ - but {} chars were found", - utf8, - s, - s.chars().count() - ), - } - }) - }); - - // HACK(eddyb) doing a separate validation iteration like this might be - // wasteful, but it's easier to avoid starting to print a string literal - // in the first place, than to abort it mid-string. - if chars.clone().any(|r| r.is_err()) { - None - } else { - Some(chars.map(Result::unwrap)) - } - } -} - -fn basic_type(tag: u8) -> Option<&'static str> { - Some(match tag { - b'b' => "bool", - b'c' => "char", - b'e' => "str", - b'u' => "()", - b'a' => "i8", - b's' => "i16", - b'l' => "i32", - b'x' => "i64", - b'n' => "i128", - b'i' => "isize", - b'h' => "u8", - b't' => "u16", - b'm' => "u32", - b'y' => "u64", - b'o' => "u128", - b'j' => "usize", - b'f' => "f32", - b'd' => "f64", - b'z' => "!", - b'p' => "_", - b'v' => "...", - - _ => return None, - }) -} - -struct Parser<'s> { - sym: &'s str, - next: usize, - depth: u32, -} - -impl<'s> Parser<'s> { - fn push_depth(&mut self) -> Result<(), ParseError> { - self.depth += 1; - if self.depth > MAX_DEPTH { - Err(ParseError::RecursedTooDeep) - } else { - Ok(()) - } - } - - fn pop_depth(&mut self) { - self.depth -= 1; - } - - fn peek(&self) -> Option { - self.sym.as_bytes().get(self.next).cloned() - } - - fn eat(&mut self, b: u8) -> bool { - if self.peek() == Some(b) { - self.next += 1; - true - } else { - false - } - } - - fn next(&mut self) -> Result { - let b = self.peek().ok_or(ParseError::Invalid)?; - self.next += 1; - Ok(b) - } - - fn hex_nibbles(&mut self) -> Result, ParseError> { - let start = self.next; - loop { - match self.next()? { - b'0'..=b'9' | b'a'..=b'f' => {} - b'_' => break, - _ => return Err(ParseError::Invalid), - } - } - Ok(HexNibbles { - nibbles: &self.sym[start..self.next - 1], - }) - } - - fn digit_10(&mut self) -> Result { - let d = match self.peek() { - Some(d @ b'0'..=b'9') => d - b'0', - _ => return Err(ParseError::Invalid), - }; - self.next += 1; - Ok(d) - } - - fn digit_62(&mut self) -> Result { - let d = match self.peek() { - Some(d @ b'0'..=b'9') => d - b'0', - Some(d @ b'a'..=b'z') => 10 + (d - b'a'), - Some(d @ b'A'..=b'Z') => 10 + 26 + (d - b'A'), - _ => return Err(ParseError::Invalid), - }; - self.next += 1; - Ok(d) - } - - fn integer_62(&mut self) -> Result { - if self.eat(b'_') { - return Ok(0); - } - - let mut x: u64 = 0; - while !self.eat(b'_') { - let d = self.digit_62()? as u64; - x = x.checked_mul(62).ok_or(ParseError::Invalid)?; - x = x.checked_add(d).ok_or(ParseError::Invalid)?; - } - x.checked_add(1).ok_or(ParseError::Invalid) - } - - fn opt_integer_62(&mut self, tag: u8) -> Result { - if !self.eat(tag) { - return Ok(0); - } - self.integer_62()?.checked_add(1).ok_or(ParseError::Invalid) - } - - fn disambiguator(&mut self) -> Result { - self.opt_integer_62(b's') - } - - fn namespace(&mut self) -> Result, ParseError> { - match self.next()? { - // Special namespaces, like closures and shims. - ns @ b'A'..=b'Z' => Ok(Some(ns as char)), - - // Implementation-specific/unspecified namespaces. - b'a'..=b'z' => Ok(None), - - _ => Err(ParseError::Invalid), - } - } - - fn backref(&mut self) -> Result, ParseError> { - let s_start = self.next - 1; - let i = self.integer_62()?; - if i >= s_start as u64 { - return Err(ParseError::Invalid); - } - let mut new_parser = Parser { - sym: self.sym, - next: i as usize, - depth: self.depth, - }; - new_parser.push_depth()?; - Ok(new_parser) - } - - fn ident(&mut self) -> Result, ParseError> { - let is_punycode = self.eat(b'u'); - let mut len = self.digit_10()? as usize; - if len != 0 { - while let Ok(d) = self.digit_10() { - len = len.checked_mul(10).ok_or(ParseError::Invalid)?; - len = len.checked_add(d as usize).ok_or(ParseError::Invalid)?; - } - } - - // Skip past the optional `_` separator. - self.eat(b'_'); - - let start = self.next; - self.next = self.next.checked_add(len).ok_or(ParseError::Invalid)?; - if self.next > self.sym.len() { - return Err(ParseError::Invalid); - } - - let ident = &self.sym[start..self.next]; - - if is_punycode { - let ident = match ident.bytes().rposition(|b| b == b'_') { - Some(i) => Ident { - ascii: &ident[..i], - punycode: &ident[i + 1..], - }, - None => Ident { - ascii: "", - punycode: ident, - }, - }; - if ident.punycode.is_empty() { - return Err(ParseError::Invalid); - } - Ok(ident) - } else { - Ok(Ident { - ascii: ident, - punycode: "", - }) - } - } -} - -struct Printer<'a, 'b: 'a, 's> { - /// The input parser to demangle from, or `Err` if any (parse) error was - /// encountered (in order to disallow further likely-incorrect demangling). - /// - /// See also the documentation on the `invalid!` and `parse!` macros below. - parser: Result, ParseError>, - - /// The output formatter to demangle to, or `None` while skipping printing. - out: Option<&'a mut fmt::Formatter<'b>>, - - /// Cumulative number of lifetimes bound by `for<...>` binders ('G'), - /// anywhere "around" the current entity (e.g. type) being demangled. - /// This value is not tracked while skipping printing, as it'd be unused. - /// - /// See also the documentation on the `Printer::in_binder` method. - bound_lifetime_depth: u32, -} - -impl ParseError { - /// Snippet to print when the error is initially encountered. - fn message(&self) -> &str { - match self { - ParseError::Invalid => "{invalid syntax}", - ParseError::RecursedTooDeep => "{recursion limit reached}", - } - } -} - -/// Mark the parser as errored (with `ParseError::Invalid`), print the -/// appropriate message (see `ParseError::message`) and return early. -macro_rules! invalid { - ($printer:ident) => {{ - let err = ParseError::Invalid; - $printer.print(err.message())?; - $printer.parser = Err(err); - return Ok(()); - }}; -} - -/// Call a parser method (if the parser hasn't errored yet), -/// and mark the parser as errored if it returns `Err`. -/// -/// If the parser errored, before or now, this returns early, -/// from the current function, after printing either: -/// * for a new error, the appropriate message (see `ParseError::message`) -/// * for an earlier error, only `?` - this allows callers to keep printing -/// the approximate syntax of the path/type/const, despite having errors, -/// e.g. `Vec<[(A, ?); ?]>` instead of `Vec<[(A, ?` -macro_rules! parse { - ($printer:ident, $method:ident $(($($arg:expr),*))*) => { - match $printer.parser { - Ok(ref mut parser) => match parser.$method($($($arg),*)*) { - Ok(x) => x, - Err(err) => { - $printer.print(err.message())?; - $printer.parser = Err(err); - return Ok(()); - } - } - Err(_) => return $printer.print("?"), - } - }; -} - -impl<'a, 'b, 's> Printer<'a, 'b, 's> { - /// Eat the given character from the parser, - /// returning `false` if the parser errored. - fn eat(&mut self, b: u8) -> bool { - self.parser.as_mut().map(|p| p.eat(b)) == Ok(true) - } - - /// Skip printing (i.e. `self.out` will be `None`) for the duration of the - /// given closure. This should not change parsing behavior, only disable the - /// output, but there may be optimizations (such as not traversing backrefs). - fn skipping_printing(&mut self, f: F) - where - F: FnOnce(&mut Self) -> fmt::Result, - { - let orig_out = self.out.take(); - f(self).expect("`fmt::Error`s should be impossible without a `fmt::Formatter`"); - self.out = orig_out; - } - - /// Print the target of a backref, using the given closure. - /// When printing is being skipped, the backref will only be parsed, - /// ignoring the backref's target completely. - fn print_backref(&mut self, f: F) -> fmt::Result - where - F: FnOnce(&mut Self) -> fmt::Result, - { - let backref_parser = parse!(self, backref); - - if self.out.is_none() { - return Ok(()); - } - - let orig_parser = mem::replace(&mut self.parser, Ok(backref_parser)); - let r = f(self); - self.parser = orig_parser; - r - } - - fn pop_depth(&mut self) { - if let Ok(ref mut parser) = self.parser { - parser.pop_depth(); - } - } - - /// Output the given value to `self.out` (using `fmt::Display` formatting), - /// if printing isn't being skipped. - fn print(&mut self, x: impl fmt::Display) -> fmt::Result { - if let Some(out) = &mut self.out { - fmt::Display::fmt(&x, out)?; - } - Ok(()) - } - - /// Output the given `char`s (escaped using `char::escape_debug`), with the - /// whole sequence wrapped in quotes, for either a `char` or `&str` literal, - /// if printing isn't being skipped. - fn print_quoted_escaped_chars( - &mut self, - quote: char, - chars: impl Iterator, - ) -> fmt::Result { - if let Some(out) = &mut self.out { - use core::fmt::Write; - - out.write_char(quote)?; - for c in chars { - // Special-case not escaping a single/double quote, when - // inside the opposite kind of quote. - if matches!((quote, c), ('\'', '"') | ('"', '\'')) { - out.write_char(c)?; - continue; - } - - for escaped in c.escape_debug() { - out.write_char(escaped)?; - } - } - out.write_char(quote)?; - } - Ok(()) - } - - /// Print the lifetime according to the previously decoded index. - /// An index of `0` always refers to `'_`, but starting with `1`, - /// indices refer to late-bound lifetimes introduced by a binder. - fn print_lifetime_from_index(&mut self, lt: u64) -> fmt::Result { - // Bound lifetimes aren't tracked when skipping printing. - if self.out.is_none() { - return Ok(()); - } - - self.print("'")?; - if lt == 0 { - return self.print("_"); - } - match (self.bound_lifetime_depth as u64).checked_sub(lt) { - Some(depth) => { - // Try to print lifetimes alphabetically first. - if depth < 26 { - let c = (b'a' + depth as u8) as char; - self.print(c) - } else { - // Use `'_123` after running out of letters. - self.print("_")?; - self.print(depth) - } - } - None => invalid!(self), - } - } - - /// Optionally enter a binder ('G') for late-bound lifetimes, - /// printing e.g. `for<'a, 'b> ` before calling the closure, - /// and make those lifetimes visible to it (via depth level). - fn in_binder(&mut self, f: F) -> fmt::Result - where - F: FnOnce(&mut Self) -> fmt::Result, - { - let bound_lifetimes = parse!(self, opt_integer_62(b'G')); - - // Don't track bound lifetimes when skipping printing. - if self.out.is_none() { - return f(self); - } - - if bound_lifetimes > 0 { - self.print("for<")?; - for i in 0..bound_lifetimes { - if i > 0 { - self.print(", ")?; - } - self.bound_lifetime_depth += 1; - self.print_lifetime_from_index(1)?; - } - self.print("> ")?; - } - - let r = f(self); - - // Restore `bound_lifetime_depth` to the previous value. - self.bound_lifetime_depth -= bound_lifetimes as u32; - - r - } - - /// Print list elements using the given closure and separator, - /// until the end of the list ('E') is found, or the parser errors. - /// Returns the number of elements printed. - fn print_sep_list(&mut self, f: F, sep: &str) -> Result - where - F: Fn(&mut Self) -> fmt::Result, - { - let mut i = 0; - while self.parser.is_ok() && !self.eat(b'E') { - if i > 0 { - self.print(sep)?; - } - f(self)?; - i += 1; - } - Ok(i) - } - - fn print_path(&mut self, in_value: bool) -> fmt::Result { - parse!(self, push_depth); - - let tag = parse!(self, next); - match tag { - b'C' => { - let dis = parse!(self, disambiguator); - let name = parse!(self, ident); - - self.print(name)?; - if let Some(out) = &mut self.out { - if !out.alternate() { - out.write_str("[")?; - fmt::LowerHex::fmt(&dis, out)?; - out.write_str("]")?; - } - } - } - b'N' => { - let ns = parse!(self, namespace); - - self.print_path(in_value)?; - - // HACK(eddyb) if the parser is already marked as having errored, - // `parse!` below will print a `?` without its preceding `::` - // (because printing the `::` is skipped in certain conditions, - // i.e. a lowercase namespace with an empty identifier), - // so in order to get `::?`, the `::` has to be printed here. - if self.parser.is_err() { - self.print("::")?; - } - - let dis = parse!(self, disambiguator); - let name = parse!(self, ident); - - match ns { - // Special namespaces, like closures and shims. - Some(ns) => { - self.print("::{")?; - match ns { - 'C' => self.print("closure")?, - 'S' => self.print("shim")?, - _ => self.print(ns)?, - } - if !name.ascii.is_empty() || !name.punycode.is_empty() { - self.print(":")?; - self.print(name)?; - } - self.print("#")?; - self.print(dis)?; - self.print("}")?; - } - - // Implementation-specific/unspecified namespaces. - None => { - if !name.ascii.is_empty() || !name.punycode.is_empty() { - self.print("::")?; - self.print(name)?; - } - } - } - } - b'M' | b'X' | b'Y' => { - if tag != b'Y' { - // Ignore the `impl`'s own path. - parse!(self, disambiguator); - self.skipping_printing(|this| this.print_path(false)); - } - - self.print("<")?; - self.print_type()?; - if tag != b'M' { - self.print(" as ")?; - self.print_path(false)?; - } - self.print(">")?; - } - b'I' => { - self.print_path(in_value)?; - if in_value { - self.print("::")?; - } - self.print("<")?; - self.print_sep_list(Self::print_generic_arg, ", ")?; - self.print(">")?; - } - b'B' => { - self.print_backref(|this| this.print_path(in_value))?; - } - _ => invalid!(self), - } - - self.pop_depth(); - Ok(()) - } - - fn print_generic_arg(&mut self) -> fmt::Result { - if self.eat(b'L') { - let lt = parse!(self, integer_62); - self.print_lifetime_from_index(lt) - } else if self.eat(b'K') { - self.print_const(false) - } else { - self.print_type() - } - } - - fn print_type(&mut self) -> fmt::Result { - let tag = parse!(self, next); - - if let Some(ty) = basic_type(tag) { - return self.print(ty); - } - - parse!(self, push_depth); - - match tag { - b'R' | b'Q' => { - self.print("&")?; - if self.eat(b'L') { - let lt = parse!(self, integer_62); - if lt != 0 { - self.print_lifetime_from_index(lt)?; - self.print(" ")?; - } - } - if tag != b'R' { - self.print("mut ")?; - } - self.print_type()?; - } - - b'P' | b'O' => { - self.print("*")?; - if tag != b'P' { - self.print("mut ")?; - } else { - self.print("const ")?; - } - self.print_type()?; - } - - b'A' | b'S' => { - self.print("[")?; - self.print_type()?; - if tag == b'A' { - self.print("; ")?; - self.print_const(true)?; - } - self.print("]")?; - } - b'T' => { - self.print("(")?; - let count = self.print_sep_list(Self::print_type, ", ")?; - if count == 1 { - self.print(",")?; - } - self.print(")")?; - } - b'F' => self.in_binder(|this| { - let is_unsafe = this.eat(b'U'); - let abi = if this.eat(b'K') { - if this.eat(b'C') { - Some("C") - } else { - let abi = parse!(this, ident); - if abi.ascii.is_empty() || !abi.punycode.is_empty() { - invalid!(this); - } - Some(abi.ascii) - } - } else { - None - }; - - if is_unsafe { - this.print("unsafe ")?; - } - - if let Some(abi) = abi { - this.print("extern \"")?; - - // If the ABI had any `-`, they were replaced with `_`, - // so the parts between `_` have to be re-joined with `-`. - let mut parts = abi.split('_'); - this.print(parts.next().unwrap())?; - for part in parts { - this.print("-")?; - this.print(part)?; - } - - this.print("\" ")?; - } - - this.print("fn(")?; - this.print_sep_list(Self::print_type, ", ")?; - this.print(")")?; - - if this.eat(b'u') { - // Skip printing the return type if it's 'u', i.e. `()`. - } else { - this.print(" -> ")?; - this.print_type()?; - } - - Ok(()) - })?, - b'D' => { - self.print("dyn ")?; - self.in_binder(|this| { - this.print_sep_list(Self::print_dyn_trait, " + ")?; - Ok(()) - })?; - - if !self.eat(b'L') { - invalid!(self); - } - let lt = parse!(self, integer_62); - if lt != 0 { - self.print(" + ")?; - self.print_lifetime_from_index(lt)?; - } - } - b'B' => { - self.print_backref(Self::print_type)?; - } - _ => { - // Go back to the tag, so `print_path` also sees it. - let _ = self.parser.as_mut().map(|p| p.next -= 1); - self.print_path(false)?; - } - } - - self.pop_depth(); - Ok(()) - } - - /// A trait in a trait object may have some "existential projections" - /// (i.e. associated type bindings) after it, which should be printed - /// in the `<...>` of the trait, e.g. `dyn Trait`. - /// To this end, this method will keep the `<...>` of an 'I' path - /// open, by omitting the `>`, and return `Ok(true)` in that case. - fn print_path_maybe_open_generics(&mut self) -> Result { - if self.eat(b'B') { - // NOTE(eddyb) the closure may not run if printing is being skipped, - // but in that case the returned boolean doesn't matter. - let mut open = false; - self.print_backref(|this| { - open = this.print_path_maybe_open_generics()?; - Ok(()) - })?; - Ok(open) - } else if self.eat(b'I') { - self.print_path(false)?; - self.print("<")?; - self.print_sep_list(Self::print_generic_arg, ", ")?; - Ok(true) - } else { - self.print_path(false)?; - Ok(false) - } - } - - fn print_dyn_trait(&mut self) -> fmt::Result { - let mut open = self.print_path_maybe_open_generics()?; - - while self.eat(b'p') { - if !open { - self.print("<")?; - open = true; - } else { - self.print(", ")?; - } - - let name = parse!(self, ident); - self.print(name)?; - self.print(" = ")?; - self.print_type()?; - } - - if open { - self.print(">")?; - } - - Ok(()) - } - - fn print_const(&mut self, in_value: bool) -> fmt::Result { - let tag = parse!(self, next); - - parse!(self, push_depth); - - // Only literals (and the names of `const` generic parameters, but they - // don't get mangled at all), can appear in generic argument position - // without any disambiguation, all other expressions require braces. - // To avoid duplicating the mapping between `tag` and what syntax gets - // used (especially any special-casing), every case that needs braces - // has to call `open_brace(self)?` (and the closing brace is automatic). - let mut opened_brace = false; - let mut open_brace_if_outside_expr = |this: &mut Self| { - // If this expression is nested in another, braces aren't required. - if in_value { - return Ok(()); - } - - opened_brace = true; - this.print("{") - }; - - match tag { - b'p' => self.print("_")?, - - // Primitive leaves with hex-encoded values (see `basic_type`). - b'h' | b't' | b'm' | b'y' | b'o' | b'j' => self.print_const_uint(tag)?, - b'a' | b's' | b'l' | b'x' | b'n' | b'i' => { - if self.eat(b'n') { - self.print("-")?; - } - - self.print_const_uint(tag)?; - } - b'b' => match parse!(self, hex_nibbles).try_parse_uint() { - Some(0) => self.print("false")?, - Some(1) => self.print("true")?, - _ => invalid!(self), - }, - b'c' => { - let valid_char = parse!(self, hex_nibbles) - .try_parse_uint() - .and_then(|v| u32::try_from(v).ok()) - .and_then(char::from_u32); - match valid_char { - Some(c) => self.print_quoted_escaped_chars('\'', iter::once(c))?, - None => invalid!(self), - } - } - b'e' => { - // NOTE(eddyb) a string literal `"..."` has type `&str`, so - // to get back the type `str`, `*"..."` syntax is needed - // (even if that may not be valid in Rust itself). - open_brace_if_outside_expr(self)?; - self.print("*")?; - - self.print_const_str_literal()?; - } - - b'R' | b'Q' => { - // NOTE(eddyb) this prints `"..."` instead of `&*"..."`, which - // is what `Re..._` would imply (see comment for `str` above). - if tag == b'R' && self.eat(b'e') { - self.print_const_str_literal()?; - } else { - open_brace_if_outside_expr(self)?; - self.print("&")?; - if tag != b'R' { - self.print("mut ")?; - } - self.print_const(true)?; - } - } - b'A' => { - open_brace_if_outside_expr(self)?; - self.print("[")?; - self.print_sep_list(|this| this.print_const(true), ", ")?; - self.print("]")?; - } - b'T' => { - open_brace_if_outside_expr(self)?; - self.print("(")?; - let count = self.print_sep_list(|this| this.print_const(true), ", ")?; - if count == 1 { - self.print(",")?; - } - self.print(")")?; - } - b'V' => { - open_brace_if_outside_expr(self)?; - self.print_path(true)?; - match parse!(self, next) { - b'U' => {} - b'T' => { - self.print("(")?; - self.print_sep_list(|this| this.print_const(true), ", ")?; - self.print(")")?; - } - b'S' => { - self.print(" { ")?; - self.print_sep_list( - |this| { - parse!(this, disambiguator); - let name = parse!(this, ident); - this.print(name)?; - this.print(": ")?; - this.print_const(true) - }, - ", ", - )?; - self.print(" }")?; - } - _ => invalid!(self), - } - } - b'B' => { - self.print_backref(|this| this.print_const(in_value))?; - } - _ => invalid!(self), - } - - if opened_brace { - self.print("}")?; - } - - self.pop_depth(); - Ok(()) - } - - fn print_const_uint(&mut self, ty_tag: u8) -> fmt::Result { - let hex = parse!(self, hex_nibbles); - - match hex.try_parse_uint() { - Some(v) => self.print(v)?, - - // Print anything that doesn't fit in `u64` verbatim. - None => { - self.print("0x")?; - self.print(hex.nibbles)?; - } - } - - if let Some(out) = &mut self.out { - if !out.alternate() { - let ty = basic_type(ty_tag).unwrap(); - self.print(ty)?; - } - } - - Ok(()) - } - - fn print_const_str_literal(&mut self) -> fmt::Result { - match parse!(self, hex_nibbles).try_parse_str_chars() { - Some(chars) => self.print_quoted_escaped_chars('"', chars), - None => invalid!(self), - } - } -} - -#[cfg(test)] -mod tests { - use std::prelude::v1::*; - - macro_rules! t { - ($a:expr, $b:expr) => {{ - assert_eq!(format!("{}", ::demangle($a)), $b); - }}; - } - macro_rules! t_nohash { - ($a:expr, $b:expr) => {{ - assert_eq!(format!("{:#}", ::demangle($a)), $b); - }}; - } - macro_rules! t_nohash_type { - ($a:expr, $b:expr) => { - t_nohash!(concat!("_RMC0", $a), concat!("<", $b, ">")) - }; - } - macro_rules! t_const { - ($mangled:expr, $value:expr) => { - t_nohash!( - concat!("_RIC0K", $mangled, "E"), - concat!("::<", $value, ">") - ) - }; - } - macro_rules! t_const_suffixed { - ($mangled:expr, $value:expr, $value_ty_suffix:expr) => {{ - t_const!($mangled, $value); - t!( - concat!("_RIC0K", $mangled, "E"), - concat!("[0]::<", $value, $value_ty_suffix, ">") - ); - }}; - } - - #[test] - fn demangle_crate_with_leading_digit() { - t_nohash!("_RNvC6_123foo3bar", "123foo::bar"); - } - - #[test] - fn demangle_utf8_idents() { - t_nohash!( - "_RNqCs4fqI2P2rA04_11utf8_identsu30____7hkackfecea1cbdathfdh9hlq6y", - "utf8_idents::სáƒáƒ­áƒ›áƒ”ლáƒáƒ“_გემრიელი_სáƒáƒ“ილი" - ); - } - - #[test] - fn demangle_closure() { - t_nohash!( - "_RNCNCNgCs6DXkGYLi8lr_2cc5spawn00B5_", - "cc::spawn::{closure#0}::{closure#0}" - ); - t_nohash!( - "_RNCINkXs25_NgCsbmNqQUJIY6D_4core5sliceINyB9_4IterhENuNgNoBb_4iter8iterator8Iterator9rpositionNCNgNpB9_6memchr7memrchrs_0E0Bb_", - " as core::iter::iterator::Iterator>::rposition::::{closure#0}" - ); - } - - #[test] - fn demangle_dyn_trait() { - t_nohash!( - "_RINbNbCskIICzLVDPPb_5alloc5alloc8box_freeDINbNiB4_5boxed5FnBoxuEp6OutputuEL_ECs1iopQbuBiw2_3std", - "alloc::alloc::box_free::>" - ); - } - - #[test] - fn demangle_const_generics_preview() { - // NOTE(eddyb) this was hand-written, before rustc had working - // const generics support (but the mangling format did include them). - t_nohash_type!( - "INtC8arrayvec8ArrayVechKj7b_E", - "arrayvec::ArrayVec" - ); - t_const_suffixed!("j7b_", "123", "usize"); - } - - #[test] - fn demangle_min_const_generics() { - t_const!("p", "_"); - t_const_suffixed!("hb_", "11", "u8"); - t_const_suffixed!("off00ff00ff00ff00ff_", "0xff00ff00ff00ff00ff", "u128"); - t_const_suffixed!("s98_", "152", "i16"); - t_const_suffixed!("anb_", "-11", "i8"); - t_const!("b0_", "false"); - t_const!("b1_", "true"); - t_const!("c76_", "'v'"); - t_const!("c22_", r#"'"'"#); - t_const!("ca_", "'\\n'"); - t_const!("c2202_", "'∂'"); - } - - #[test] - fn demangle_const_str() { - t_const!("e616263_", "{*\"abc\"}"); - t_const!("e27_", r#"{*"'"}"#); - t_const!("e090a_", "{*\"\\t\\n\"}"); - t_const!("ee28882c3bc_", "{*\"∂ü\"}"); - t_const!( - "ee183a1e18390e183ade1839be18394e1839ae18390e183935fe18392e18394e1839b\ - e183a0e18398e18394e1839ae183985fe183a1e18390e18393e18398e1839ae18398_", - "{*\"სáƒáƒ­áƒ›áƒ”ლáƒáƒ“_გემრიელი_სáƒáƒ“ილი\"}" - ); - t_const!( - "ef09f908af09fa688f09fa686f09f90ae20c2a720f09f90b6f09f9192e298\ - 95f09f94a520c2a720f09fa7a1f09f929bf09f929af09f9299f09f929c_", - "{*\"ðŸŠðŸ¦ˆðŸ¦†ðŸ® § ðŸ¶ðŸ‘’☕🔥 § 🧡💛💚💙💜\"}" - ); - } - - // NOTE(eddyb) this uses the same strings as `demangle_const_str` and should - // be kept in sync with it - while a macro could be used to generate both - // `str` and `&str` tests, from a single list of strings, this seems clearer. - #[test] - fn demangle_const_ref_str() { - t_const!("Re616263_", "\"abc\""); - t_const!("Re27_", r#""'""#); - t_const!("Re090a_", "\"\\t\\n\""); - t_const!("Ree28882c3bc_", "\"∂ü\""); - t_const!( - "Ree183a1e18390e183ade1839be18394e1839ae18390e183935fe18392e18394e1839b\ - e183a0e18398e18394e1839ae183985fe183a1e18390e18393e18398e1839ae18398_", - "\"სáƒáƒ­áƒ›áƒ”ლáƒáƒ“_გემრიელი_სáƒáƒ“ილი\"" - ); - t_const!( - "Ref09f908af09fa688f09fa686f09f90ae20c2a720f09f90b6f09f9192e298\ - 95f09f94a520c2a720f09fa7a1f09f929bf09f929af09f9299f09f929c_", - "\"ðŸŠðŸ¦ˆðŸ¦†ðŸ® § ðŸ¶ðŸ‘’☕🔥 § 🧡💛💚💙💜\"" - ); - } - - #[test] - fn demangle_const_ref() { - t_const!("Rp", "{&_}"); - t_const!("Rh7b_", "{&123}"); - t_const!("Rb0_", "{&false}"); - t_const!("Rc58_", "{&'X'}"); - t_const!("RRRh0_", "{&&&0}"); - t_const!("RRRe_", "{&&\"\"}"); - t_const!("QAE", "{&mut []}"); - } - - #[test] - fn demangle_const_array() { - t_const!("AE", "{[]}"); - t_const!("Aj0_E", "{[0]}"); - t_const!("Ah1_h2_h3_E", "{[1, 2, 3]}"); - t_const!("ARe61_Re62_Re63_E", "{[\"a\", \"b\", \"c\"]}"); - t_const!("AAh1_h2_EAh3_h4_EE", "{[[1, 2], [3, 4]]}"); - } - - #[test] - fn demangle_const_tuple() { - t_const!("TE", "{()}"); - t_const!("Tj0_E", "{(0,)}"); - t_const!("Th1_b0_E", "{(1, false)}"); - t_const!( - "TRe616263_c78_RAh1_h2_h3_EE", - "{(\"abc\", 'x', &[1, 2, 3])}" - ); - } - - #[test] - fn demangle_const_adt() { - t_const!( - "VNvINtNtC4core6option6OptionjE4NoneU", - "{core::option::Option::::None}" - ); - t_const!( - "VNvINtNtC4core6option6OptionjE4SomeTj0_E", - "{core::option::Option::::Some(0)}" - ); - t_const!( - "VNtC3foo3BarS1sRe616263_2chc78_5sliceRAh1_h2_h3_EE", - "{foo::Bar { s: \"abc\", ch: 'x', slice: &[1, 2, 3] }}" - ); - } - - #[test] - fn demangle_exponential_explosion() { - // NOTE(eddyb) because of the prefix added by `t_nohash_type!` is - // 3 bytes long, `B2_` refers to the start of the type, not `B_`. - // 6 backrefs (`B8_E` through `B3_E`) result in 2^6 = 64 copies of `_`. - // Also, because the `p` (`_`) type is after all of the starts of the - // backrefs, it can be replaced with any other type, independently. - t_nohash_type!( - concat!("TTTTTT", "p", "B8_E", "B7_E", "B6_E", "B5_E", "B4_E", "B3_E"), - "((((((_, _), (_, _)), ((_, _), (_, _))), (((_, _), (_, _)), ((_, _), (_, _)))), \ - ((((_, _), (_, _)), ((_, _), (_, _))), (((_, _), (_, _)), ((_, _), (_, _))))), \ - (((((_, _), (_, _)), ((_, _), (_, _))), (((_, _), (_, _)), ((_, _), (_, _)))), \ - ((((_, _), (_, _)), ((_, _), (_, _))), (((_, _), (_, _)), ((_, _), (_, _))))))" - ); - } - - #[test] - fn demangle_thinlto() { - t_nohash!("_RC3foo.llvm.9D1C9369", "foo"); - t_nohash!("_RC3foo.llvm.9D1C9369@@16", "foo"); - t_nohash!("_RNvC9backtrace3foo.llvm.A5310EB9", "backtrace::foo"); - } - - #[test] - fn demangle_extra_suffix() { - // From alexcrichton/rustc-demangle#27: - t_nohash!( - "_RNvNtNtNtNtCs92dm3009vxr_4rand4rngs7adapter9reseeding4fork23FORK_HANDLER_REGISTERED.0.0", - "rand::rngs::adapter::reseeding::fork::FORK_HANDLER_REGISTERED.0.0" - ); - } - - #[test] - fn demangling_limits() { - // Stress tests found via fuzzing. - - for sym in include_str!("v0-large-test-symbols/early-recursion-limit") - .lines() - .filter(|line| !line.is_empty() && !line.starts_with('#')) - { - assert_eq!( - super::demangle(sym).map(|_| ()), - Err(super::ParseError::RecursedTooDeep) - ); - } - - assert_contains!( - ::demangle( - "RIC20tRYIMYNRYFG05_EB5_B_B6_RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR\ - RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRB_E", - ) - .to_string(), - "{recursion limit reached}" - ); - } - - #[test] - fn recursion_limit_leaks() { - // NOTE(eddyb) this test checks that both paths and types support the - // recursion limit correctly, i.e. matching `push_depth` and `pop_depth`, - // and don't leak "recursion levels" and trip the limit. - // The test inputs are generated on the fly, using a repeated pattern, - // as hardcoding the actual strings would be too verbose. - // Also, `MAX_DEPTH` can be directly used, instead of assuming its value. - for &(sym_leaf, expected_leaf) in &[("p", "_"), ("Rp", "&_"), ("C1x", "x")] { - let mut sym = format!("_RIC0p"); - let mut expected = format!("::<_"); - for _ in 0..(super::MAX_DEPTH * 2) { - sym.push_str(sym_leaf); - expected.push_str(", "); - expected.push_str(expected_leaf); - } - sym.push('E'); - expected.push('>'); - - t_nohash!(&sym, expected); - } - } - - #[test] - fn recursion_limit_backref_free_bypass() { - // NOTE(eddyb) this test checks that long symbols cannot bypass the - // recursion limit by not using backrefs, and cause a stack overflow. - - // This value was chosen to be high enough that stack overflows were - // observed even with `cargo test --release`. - let depth = 100_000; - - // In order to hide the long mangling from the initial "shallow" parse, - // it's nested in an identifier (crate name), preceding its use. - let mut sym = format!("_RIC{}", depth); - let backref_start = sym.len() - 2; - for _ in 0..depth { - sym.push('R'); - } - - // Write a backref to just after the length of the identifier. - sym.push('B'); - sym.push(char::from_digit((backref_start - 1) as u32, 36).unwrap()); - sym.push('_'); - - // Close the `I` at the start. - sym.push('E'); - - assert_contains!(::demangle(&sym).to_string(), "{recursion limit reached}"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/scopeguard/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/scopeguard/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/scopeguard/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/scopeguard/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/scopeguard/Cargo.lock s390-tools-2.33.1/rust-vendor/scopeguard/Cargo.lock --- s390-tools-2.31.0/rust-vendor/scopeguard/Cargo.lock 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/scopeguard/Cargo.lock 1970-01-01 01:00:00.000000000 +0100 @@ -1,7 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "scopeguard" -version = "1.2.0" diff -Nru s390-tools-2.31.0/rust-vendor/scopeguard/Cargo.toml s390-tools-2.33.1/rust-vendor/scopeguard/Cargo.toml --- s390-tools-2.31.0/rust-vendor/scopeguard/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/scopeguard/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,43 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -name = "scopeguard" -version = "1.2.0" -authors = ["bluss"] -description = """ -A RAII scope guard that will run a given closure when it goes out of scope, -even if the code between panics (assuming unwinding panic). - -Defines the macros `defer!`, `defer_on_unwind!`, `defer_on_success!` as -shorthands for guards with one of the implemented strategies. -""" -documentation = "https://docs.rs/scopeguard/" -readme = "README.md" -keywords = [ - "scope-guard", - "defer", - "panic", - "unwind", -] -categories = [ - "rust-patterns", - "no-std", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/bluss/scopeguard" - -[package.metadata.release] -no-dev-version = true - -[features] -default = ["use_std"] -use_std = [] diff -Nru s390-tools-2.31.0/rust-vendor/scopeguard/examples/readme.rs s390-tools-2.33.1/rust-vendor/scopeguard/examples/readme.rs --- s390-tools-2.31.0/rust-vendor/scopeguard/examples/readme.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/scopeguard/examples/readme.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,29 +0,0 @@ -#[macro_use(defer)] -extern crate scopeguard; - -use scopeguard::guard; - -fn f() { - defer! { - println!("Called at return or panic"); - } - panic!(); -} - -use std::fs::File; -use std::io::Write; - -fn g() { - let f = File::create("newfile.txt").unwrap(); - let mut file = guard(f, |f| { - // write file at return or panic - let _ = f.sync_all(); - }); - // access the file through the scope guard itself - file.write_all(b"test me\n").unwrap(); -} - -fn main() { - f(); - g(); -} diff -Nru s390-tools-2.31.0/rust-vendor/scopeguard/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/scopeguard/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/scopeguard/LICENSE-APACHE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/scopeguard/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/scopeguard/LICENSE-MIT s390-tools-2.33.1/rust-vendor/scopeguard/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/scopeguard/LICENSE-MIT 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/scopeguard/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2016-2019 Ulrik Sverdrup "bluss" and scopeguard developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/scopeguard/README.md s390-tools-2.33.1/rust-vendor/scopeguard/README.md --- s390-tools-2.31.0/rust-vendor/scopeguard/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/scopeguard/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,102 +0,0 @@ -# scopeguard - -Rust crate for a convenient RAII scope guard that will run a given closure when -it goes out of scope, even if the code between panics (assuming unwinding panic). - -The `defer!` macro and `guard` are `no_std` compatible (require only `core`), -but the on unwinding / not on unwinding strategies require linking to `std`. -By default, the `use_std` crate feature is enabled. Disable the default features -for `no_std` support. - -Please read the [API documentation here](https://docs.rs/scopeguard/). - -Minimum supported Rust version: 1.20 - -[![build_status](https://github.com/bluss/scopeguard/actions/workflows/ci.yaml/badge.svg)](https://github.com/bluss/scopeguard/actions/workflows/ci.yaml) -[![crates](https://img.shields.io/crates/v/scopeguard.svg)](https://crates.io/crates/scopeguard) - -## How to use - -```rs -#[macro_use(defer)] -extern crate scopeguard; - -use scopeguard::guard; - -fn f() { - defer! { - println!("Called at return or panic"); - } - panic!(); -} - -use std::fs::File; -use std::io::Write; - -fn g() { - let f = File::create("newfile.txt").unwrap(); - let mut file = guard(f, |f| { - // write file at return or panic - let _ = f.sync_all(); - }); - // access the file through the scope guard itself - file.write_all(b"test me\n").unwrap(); -} -``` - -## Recent Changes - -- 1.2.0 - - - Use ManuallyDrop instead of mem::forget in into_inner. (by @willtunnels) - - Warn if the guard is not assigned to a variable and is dropped immediately - instead of at the scope's end. (by @sergey-v-galtsev) - -- 1.1.0 - - - Change macros (`defer!`, `defer_on_success!` and `defer_on_unwind!`) - to accept statements. (by @konsumlamm) - -- 1.0.0 - - - Change the closure type from `FnMut(&mut T)` to `FnOnce(T)`: - Passing the inner value by value instead of a mutable reference is a - breaking change, but allows the guard closure to consume it. (by @tormol) - - - Add `defer_on_success!`, `guard_on_success()` and `OnSuccess` - strategy, which triggers when scope is exited *without* panic. It's the - opposite to `defer_on_unwind!` / `guard_on_unwind()` / `OnUnwind`. - - - Add `ScopeGuard::into_inner()`, which "defuses" the guard and returns the - guarded value. (by @tormol) - - - Implement `Sync` for guards with non-`Sync` closures. - - - Require Rust 1.20 - -- 0.3.3 - - - Use `#[inline]` on a few more functions by @stjepang (#14) - - Add examples to crate documentation - -- 0.3.2 - - - Add crate categories - -- 0.3.1 - - - Add `defer_on_unwind!`, `Strategy` trait - - Rename `Guard` → `ScopeGuard` - - Add `ScopeGuard::with_strategy`. - - `ScopeGuard` now implements `Debug`. - - Require Rust 1.11 - -- 0.2.0 - - - Require Rust 1.6 - - Use `no_std` unconditionally - - No other changes - -- 0.1.2 - - - Add macro `defer!` diff -Nru s390-tools-2.31.0/rust-vendor/scopeguard/src/lib.rs s390-tools-2.33.1/rust-vendor/scopeguard/src/lib.rs --- s390-tools-2.31.0/rust-vendor/scopeguard/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/scopeguard/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,595 +0,0 @@ -#![cfg_attr(not(any(test, feature = "use_std")), no_std)] -#![doc(html_root_url = "https://docs.rs/scopeguard/1/")] - -//! A scope guard will run a given closure when it goes out of scope, -//! even if the code between panics. -//! (as long as panic doesn't abort) -//! -//! # Examples -//! -//! ## Hello World -//! -//! This example creates a scope guard with an example function: -//! -//! ``` -//! extern crate scopeguard; -//! -//! fn f() { -//! let _guard = scopeguard::guard((), |_| { -//! println!("Hello Scope Exit!"); -//! }); -//! -//! // rest of the code here. -//! -//! // Here, at the end of `_guard`'s scope, the guard's closure is called. -//! // It is also called if we exit this scope through unwinding instead. -//! } -//! # fn main() { -//! # f(); -//! # } -//! ``` -//! -//! ## `defer!` -//! -//! Use the `defer` macro to run an operation at scope exit, -//! either regular scope exit or during unwinding from a panic. -//! -//! ``` -//! #[macro_use(defer)] extern crate scopeguard; -//! -//! use std::cell::Cell; -//! -//! fn main() { -//! // use a cell to observe drops during and after the scope guard is active -//! let drop_counter = Cell::new(0); -//! { -//! // Create a scope guard using `defer!` for the current scope -//! defer! { -//! drop_counter.set(1 + drop_counter.get()); -//! } -//! -//! // Do regular operations here in the meantime. -//! -//! // Just before scope exit: it hasn't run yet. -//! assert_eq!(drop_counter.get(), 0); -//! -//! // The following scope end is where the defer closure is called -//! } -//! assert_eq!(drop_counter.get(), 1); -//! } -//! ``` -//! -//! ## Scope Guard with Value -//! -//! If the scope guard closure needs to access an outer value that is also -//! mutated outside of the scope guard, then you may want to use the scope guard -//! with a value. The guard works like a smart pointer, so the inner value can -//! be accessed by reference or by mutable reference. -//! -//! ### 1. The guard owns a file -//! -//! In this example, the scope guard owns a file and ensures pending writes are -//! synced at scope exit. -//! -//! ``` -//! extern crate scopeguard; -//! -//! use std::fs::*; -//! use std::io::{self, Write}; -//! # // Mock file so that we don't actually write a file -//! # struct MockFile; -//! # impl MockFile { -//! # fn create(_s: &str) -> io::Result { Ok(MockFile) } -//! # fn write_all(&self, _b: &[u8]) -> io::Result<()> { Ok(()) } -//! # fn sync_all(&self) -> io::Result<()> { Ok(()) } -//! # } -//! # use self::MockFile as File; -//! -//! fn try_main() -> io::Result<()> { -//! let f = File::create("newfile.txt")?; -//! let mut file = scopeguard::guard(f, |f| { -//! // ensure we flush file at return or panic -//! let _ = f.sync_all(); -//! }); -//! // Access the file through the scope guard itself -//! file.write_all(b"test me\n").map(|_| ()) -//! } -//! -//! fn main() { -//! try_main().unwrap(); -//! } -//! -//! ``` -//! -//! ### 2. The guard restores an invariant on scope exit -//! -//! ``` -//! extern crate scopeguard; -//! -//! use std::mem::ManuallyDrop; -//! use std::ptr; -//! -//! // This function, just for this example, takes the first element -//! // and inserts it into the assumed sorted tail of the vector. -//! // -//! // For optimization purposes we temporarily violate an invariant of the -//! // Vec, that it owns all of its elements. -//! // -//! // The safe approach is to use swap, which means two writes to memory, -//! // the optimization is to use a “hole†which uses only one write of memory -//! // for each position it moves. -//! // -//! // We *must* use a scope guard to run this code safely. We -//! // are running arbitrary user code (comparison operators) that may panic. -//! // The scope guard ensures we restore the invariant after successful -//! // exit or during unwinding from panic. -//! fn insertion_sort_first(v: &mut Vec) -//! where T: PartialOrd -//! { -//! struct Hole<'a, T: 'a> { -//! v: &'a mut Vec, -//! index: usize, -//! value: ManuallyDrop, -//! } -//! -//! unsafe { -//! // Create a moved-from location in the vector, a “holeâ€. -//! let value = ptr::read(&v[0]); -//! let mut hole = Hole { v: v, index: 0, value: ManuallyDrop::new(value) }; -//! -//! // Use a scope guard with a value. -//! // At scope exit, plug the hole so that the vector is fully -//! // initialized again. -//! // The scope guard owns the hole, but we can access it through the guard. -//! let mut hole_guard = scopeguard::guard(hole, |hole| { -//! // plug the hole in the vector with the value that was // taken out -//! let index = hole.index; -//! ptr::copy_nonoverlapping(&*hole.value, &mut hole.v[index], 1); -//! }); -//! -//! // run algorithm that moves the hole in the vector here -//! // move the hole until it's in a sorted position -//! for i in 1..hole_guard.v.len() { -//! if *hole_guard.value >= hole_guard.v[i] { -//! // move the element back and the hole forward -//! let index = hole_guard.index; -//! hole_guard.v.swap(index, index + 1); -//! hole_guard.index += 1; -//! } else { -//! break; -//! } -//! } -//! -//! // When the scope exits here, the Vec becomes whole again! -//! } -//! } -//! -//! fn main() { -//! let string = String::from; -//! let mut data = vec![string("c"), string("a"), string("b"), string("d")]; -//! insertion_sort_first(&mut data); -//! assert_eq!(data, vec!["a", "b", "c", "d"]); -//! } -//! -//! ``` -//! -//! -//! # Crate Features -//! -//! - `use_std` -//! + Enabled by default. Enables the `OnUnwind` and `OnSuccess` strategies. -//! + Disable to use `no_std`. -//! -//! # Rust Version -//! -//! This version of the crate requires Rust 1.20 or later. -//! -//! The scopeguard 1.x release series will use a carefully considered version -//! upgrade policy, where in a later 1.x version, we will raise the minimum -//! required Rust version. - -#[cfg(not(any(test, feature = "use_std")))] -extern crate core as std; - -use std::fmt; -use std::marker::PhantomData; -use std::mem::ManuallyDrop; -use std::ops::{Deref, DerefMut}; -use std::ptr; - -/// Controls in which cases the associated code should be run -pub trait Strategy { - /// Return `true` if the guard’s associated code should run - /// (in the context where this method is called). - fn should_run() -> bool; -} - -/// Always run on scope exit. -/// -/// “Always†run: on regular exit from a scope or on unwinding from a panic. -/// Can not run on abort, process exit, and other catastrophic events where -/// destructors don’t run. -#[derive(Debug)] -pub enum Always {} - -/// Run on scope exit through unwinding. -/// -/// Requires crate feature `use_std`. -#[cfg(feature = "use_std")] -#[derive(Debug)] -pub enum OnUnwind {} - -/// Run on regular scope exit, when not unwinding. -/// -/// Requires crate feature `use_std`. -#[cfg(feature = "use_std")] -#[derive(Debug)] -pub enum OnSuccess {} - -impl Strategy for Always { - #[inline(always)] - fn should_run() -> bool { - true - } -} - -#[cfg(feature = "use_std")] -impl Strategy for OnUnwind { - #[inline] - fn should_run() -> bool { - std::thread::panicking() - } -} - -#[cfg(feature = "use_std")] -impl Strategy for OnSuccess { - #[inline] - fn should_run() -> bool { - !std::thread::panicking() - } -} - -/// Macro to create a `ScopeGuard` (always run). -/// -/// The macro takes statements, which are the body of a closure -/// that will run when the scope is exited. -#[macro_export] -macro_rules! defer { - ($($t:tt)*) => { - let _guard = $crate::guard((), |()| { $($t)* }); - }; -} - -/// Macro to create a `ScopeGuard` (run on successful scope exit). -/// -/// The macro takes statements, which are the body of a closure -/// that will run when the scope is exited. -/// -/// Requires crate feature `use_std`. -#[cfg(feature = "use_std")] -#[macro_export] -macro_rules! defer_on_success { - ($($t:tt)*) => { - let _guard = $crate::guard_on_success((), |()| { $($t)* }); - }; -} - -/// Macro to create a `ScopeGuard` (run on unwinding from panic). -/// -/// The macro takes statements, which are the body of a closure -/// that will run when the scope is exited. -/// -/// Requires crate feature `use_std`. -#[cfg(feature = "use_std")] -#[macro_export] -macro_rules! defer_on_unwind { - ($($t:tt)*) => { - let _guard = $crate::guard_on_unwind((), |()| { $($t)* }); - }; -} - -/// `ScopeGuard` is a scope guard that may own a protected value. -/// -/// If you place a guard in a local variable, the closure can -/// run regardless how you leave the scope — through regular return or panic -/// (except if panic or other code aborts; so as long as destructors run). -/// It is run only once. -/// -/// The `S` parameter for [`Strategy`](trait.Strategy.html) determines if -/// the closure actually runs. -/// -/// The guard's closure will be called with the held value in the destructor. -/// -/// The `ScopeGuard` implements `Deref` so that you can access the inner value. -pub struct ScopeGuard -where - F: FnOnce(T), - S: Strategy, -{ - value: ManuallyDrop, - dropfn: ManuallyDrop, - // fn(S) -> S is used, so that the S is not taken into account for auto traits. - strategy: PhantomData S>, -} - -impl ScopeGuard -where - F: FnOnce(T), - S: Strategy, -{ - /// Create a `ScopeGuard` that owns `v` (accessible through deref) and calls - /// `dropfn` when its destructor runs. - /// - /// The `Strategy` decides whether the scope guard's closure should run. - #[inline] - #[must_use] - pub fn with_strategy(v: T, dropfn: F) -> ScopeGuard { - ScopeGuard { - value: ManuallyDrop::new(v), - dropfn: ManuallyDrop::new(dropfn), - strategy: PhantomData, - } - } - - /// “Defuse†the guard and extract the value without calling the closure. - /// - /// ``` - /// extern crate scopeguard; - /// - /// use scopeguard::{guard, ScopeGuard}; - /// - /// fn conditional() -> bool { true } - /// - /// fn main() { - /// let mut guard = guard(Vec::new(), |mut v| v.clear()); - /// guard.push(1); - /// - /// if conditional() { - /// // a condition maybe makes us decide to - /// // “defuse†the guard and get back its inner parts - /// let value = ScopeGuard::into_inner(guard); - /// } else { - /// // guard still exists in this branch - /// } - /// } - /// ``` - #[inline] - pub fn into_inner(guard: Self) -> T { - // Cannot move out of `Drop`-implementing types, - // so `ptr::read` the value and forget the guard. - let mut guard = ManuallyDrop::new(guard); - unsafe { - let value = ptr::read(&*guard.value); - // Drop the closure after `value` has been read, so that if the - // closure's `drop` function panics, unwinding still tries to drop - // `value`. - ManuallyDrop::drop(&mut guard.dropfn); - value - } - } -} - -/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`. -#[inline] -#[must_use] -pub fn guard(v: T, dropfn: F) -> ScopeGuard -where - F: FnOnce(T), -{ - ScopeGuard::with_strategy(v, dropfn) -} - -/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`. -/// -/// Requires crate feature `use_std`. -#[cfg(feature = "use_std")] -#[inline] -#[must_use] -pub fn guard_on_success(v: T, dropfn: F) -> ScopeGuard -where - F: FnOnce(T), -{ - ScopeGuard::with_strategy(v, dropfn) -} - -/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`. -/// -/// Requires crate feature `use_std`. -/// -/// ## Examples -/// -/// For performance reasons, or to emulate “only run guard on unwind†in -/// no-std environments, we can also use the default guard and simply manually -/// defuse it at the end of scope like the following example. (The performance -/// reason would be if the [`OnUnwind`]'s call to [std::thread::panicking()] is -/// an issue.) -/// -/// ``` -/// extern crate scopeguard; -/// -/// use scopeguard::ScopeGuard; -/// # fn main() { -/// { -/// let guard = scopeguard::guard((), |_| {}); -/// -/// // rest of the code here -/// -/// // we reached the end of scope without unwinding - defuse it -/// ScopeGuard::into_inner(guard); -/// } -/// # } -/// ``` -#[cfg(feature = "use_std")] -#[inline] -#[must_use] -pub fn guard_on_unwind(v: T, dropfn: F) -> ScopeGuard -where - F: FnOnce(T), -{ - ScopeGuard::with_strategy(v, dropfn) -} - -// ScopeGuard can be Sync even if F isn't because the closure is -// not accessible from references. -// The guard does not store any instance of S, so it is also irrelevant. -unsafe impl Sync for ScopeGuard -where - T: Sync, - F: FnOnce(T), - S: Strategy, -{ -} - -impl Deref for ScopeGuard -where - F: FnOnce(T), - S: Strategy, -{ - type Target = T; - - fn deref(&self) -> &T { - &*self.value - } -} - -impl DerefMut for ScopeGuard -where - F: FnOnce(T), - S: Strategy, -{ - fn deref_mut(&mut self) -> &mut T { - &mut *self.value - } -} - -impl Drop for ScopeGuard -where - F: FnOnce(T), - S: Strategy, -{ - fn drop(&mut self) { - // This is OK because the fields are `ManuallyDrop`s - // which will not be dropped by the compiler. - let (value, dropfn) = unsafe { (ptr::read(&*self.value), ptr::read(&*self.dropfn)) }; - if S::should_run() { - dropfn(value); - } - } -} - -impl fmt::Debug for ScopeGuard -where - T: fmt::Debug, - F: FnOnce(T), - S: Strategy, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct(stringify!(ScopeGuard)) - .field("value", &*self.value) - .finish() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::cell::Cell; - use std::panic::catch_unwind; - use std::panic::AssertUnwindSafe; - - #[test] - fn test_defer() { - let drops = Cell::new(0); - defer!(drops.set(1000)); - assert_eq!(drops.get(), 0); - } - - #[cfg(feature = "use_std")] - #[test] - fn test_defer_success_1() { - let drops = Cell::new(0); - { - defer_on_success!(drops.set(1)); - assert_eq!(drops.get(), 0); - } - assert_eq!(drops.get(), 1); - } - - #[cfg(feature = "use_std")] - #[test] - fn test_defer_success_2() { - let drops = Cell::new(0); - let _ = catch_unwind(AssertUnwindSafe(|| { - defer_on_success!(drops.set(1)); - panic!("failure") - })); - assert_eq!(drops.get(), 0); - } - - #[cfg(feature = "use_std")] - #[test] - fn test_defer_unwind_1() { - let drops = Cell::new(0); - let _ = catch_unwind(AssertUnwindSafe(|| { - defer_on_unwind!(drops.set(1)); - assert_eq!(drops.get(), 0); - panic!("failure") - })); - assert_eq!(drops.get(), 1); - } - - #[cfg(feature = "use_std")] - #[test] - fn test_defer_unwind_2() { - let drops = Cell::new(0); - { - defer_on_unwind!(drops.set(1)); - } - assert_eq!(drops.get(), 0); - } - - #[test] - fn test_only_dropped_by_closure_when_run() { - let value_drops = Cell::new(0); - let value = guard((), |()| value_drops.set(1 + value_drops.get())); - let closure_drops = Cell::new(0); - let guard = guard(value, |_| closure_drops.set(1 + closure_drops.get())); - assert_eq!(value_drops.get(), 0); - assert_eq!(closure_drops.get(), 0); - drop(guard); - assert_eq!(value_drops.get(), 1); - assert_eq!(closure_drops.get(), 1); - } - - #[cfg(feature = "use_std")] - #[test] - fn test_dropped_once_when_not_run() { - let value_drops = Cell::new(0); - let value = guard((), |()| value_drops.set(1 + value_drops.get())); - let captured_drops = Cell::new(0); - let captured = guard((), |()| captured_drops.set(1 + captured_drops.get())); - let closure_drops = Cell::new(0); - let guard = guard_on_unwind(value, |value| { - drop(value); - drop(captured); - closure_drops.set(1 + closure_drops.get()) - }); - assert_eq!(value_drops.get(), 0); - assert_eq!(captured_drops.get(), 0); - assert_eq!(closure_drops.get(), 0); - drop(guard); - assert_eq!(value_drops.get(), 1); - assert_eq!(captured_drops.get(), 1); - assert_eq!(closure_drops.get(), 0); - } - - #[test] - fn test_into_inner() { - let dropped = Cell::new(false); - let value = guard(42, |_| dropped.set(true)); - let guard = guard(value, |_| dropped.set(true)); - let inner = ScopeGuard::into_inner(guard); - assert_eq!(dropped.get(), false); - assert_eq!(*inner, 42); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/build.rs s390-tools-2.33.1/rust-vendor/serde_json/build.rs --- s390-tools-2.31.0/rust-vendor/serde_json/build.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,54 +0,0 @@ -use std::env; -use std::process::Command; -use std::str::{self, FromStr}; - -fn main() { - println!("cargo:rerun-if-changed=build.rs"); - - // Decide ideal limb width for arithmetic in the float parser. Refer to - // src/lexical/math.rs for where this has an effect. - let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap(); - match target_arch.as_str() { - "aarch64" | "mips64" | "powerpc64" | "x86_64" => { - println!("cargo:rustc-cfg=limb_width_64"); - } - _ => { - println!("cargo:rustc-cfg=limb_width_32"); - } - } - - let minor = match rustc_minor_version() { - Some(minor) => minor, - None => return, - }; - - // BTreeMap::get_key_value - // https://blog.rust-lang.org/2019/12/19/Rust-1.40.0.html#additions-to-the-standard-library - if minor < 40 { - println!("cargo:rustc-cfg=no_btreemap_get_key_value"); - } - - // BTreeMap::remove_entry - // https://blog.rust-lang.org/2020/07/16/Rust-1.45.0.html#library-changes - if minor < 45 { - println!("cargo:rustc-cfg=no_btreemap_remove_entry"); - } - - // BTreeMap::retain - // https://blog.rust-lang.org/2021/06/17/Rust-1.53.0.html#stabilized-apis - if minor < 53 { - println!("cargo:rustc-cfg=no_btreemap_retain"); - } -} - -fn rustc_minor_version() -> Option { - let rustc = env::var_os("RUSTC")?; - let output = Command::new(rustc).arg("--version").output().ok()?; - let version = str::from_utf8(&output.stdout).ok()?; - let mut pieces = version.split('.'); - if pieces.next() != Some("rustc 1") { - return None; - } - let next = pieces.next()?; - u32::from_str(next).ok() -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/serde_json/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/serde_json/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/Cargo.toml s390-tools-2.33.1/rust-vendor/serde_json/Cargo.toml --- s390-tools-2.31.0/rust-vendor/serde_json/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,108 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.36" -name = "serde_json" -version = "1.0.99" -authors = [ - "Erick Tryzelaar ", - "David Tolnay ", -] -description = "A JSON serialization file format" -documentation = "https://docs.rs/serde_json" -readme = "README.md" -keywords = [ - "json", - "serde", - "serialization", -] -categories = [ - "encoding", - "parser-implementations", - "no-std", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/serde-rs/json" - -[package.metadata.docs.rs] -features = [ - "raw_value", - "unbounded_depth", -] -rustdoc-args = [ - "--cfg", - "docsrs", -] -targets = ["x86_64-unknown-linux-gnu"] - -[package.metadata.playground] -features = ["raw_value"] - -[lib] -doc-scrape-examples = false - -[dependencies.indexmap] -version = "2" -optional = true - -[dependencies.itoa] -version = "1.0" - -[dependencies.ryu] -version = "1.0" - -[dependencies.serde] -version = "1.0.100" -default-features = false - -[dev-dependencies.automod] -version = "1.0" - -[dev-dependencies.indoc] -version = "2.0" - -[dev-dependencies.ref-cast] -version = "1.0" - -[dev-dependencies.rustversion] -version = "1.0" - -[dev-dependencies.serde] -version = "1.0.100" -features = ["derive"] - -[dev-dependencies.serde_bytes] -version = "0.11" - -[dev-dependencies.serde_derive] -version = "1.0" - -[dev-dependencies.serde_stacker] -version = "0.1" - -[dev-dependencies.trybuild] -version = "1.0.49" -features = ["diff"] - -[features] -alloc = ["serde/alloc"] -arbitrary_precision = [] -default = ["std"] -float_roundtrip = [] -preserve_order = [ - "indexmap", - "std", -] -raw_value = [] -std = ["serde/std"] -unbounded_depth = [] diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/CONTRIBUTING.md s390-tools-2.33.1/rust-vendor/serde_json/CONTRIBUTING.md --- s390-tools-2.31.0/rust-vendor/serde_json/CONTRIBUTING.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/CONTRIBUTING.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,46 +0,0 @@ -# Contributing to Serde - -Serde welcomes contribution from everyone in the form of suggestions, bug -reports, pull requests, and feedback. This document gives some guidance if you -are thinking of helping us. - -## Submitting bug reports and feature requests - -Serde development is spread across lots of repositories. In general, prefer to -open issues against the main [serde-rs/serde] repository unless the topic is -clearly specific to JSON. - -[serde-rs/serde]: https://github.com/serde-rs/serde - -When reporting a bug or asking for help, please include enough details so that -the people helping you can reproduce the behavior you are seeing. For some tips -on how to approach this, read about how to produce a [Minimal, Complete, and -Verifiable example]. - -[Minimal, Complete, and Verifiable example]: https://stackoverflow.com/help/mcve - -When making a feature request, please make it clear what problem you intend to -solve with the feature, any ideas for how Serde could support solving that -problem, any possible alternatives, and any disadvantages. - -## Running the test suite - -We encourage you to check that the test suite passes locally before submitting a -pull request with your changes. If anything does not pass, typically it will be -easier to iterate and fix it locally than waiting for the CI servers to run -tests for you. - -The test suite requires a nightly compiler. - -```sh -# Run the full test suite, including doc test and compile-tests -cargo test -``` - -## Conduct - -In all Serde-related forums, we follow the [Rust Code of Conduct]. For -escalation or moderation issues please contact Erick (erick.tryzelaar@gmail.com) -instead of the Rust moderation team. - -[Rust Code of Conduct]: https://www.rust-lang.org/policies/code-of-conduct diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/serde_json/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/serde_json/LICENSE-APACHE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/LICENSE-MIT s390-tools-2.33.1/rust-vendor/serde_json/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/serde_json/LICENSE-MIT 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/README.md s390-tools-2.33.1/rust-vendor/serde_json/README.md --- s390-tools-2.31.0/rust-vendor/serde_json/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,390 +0,0 @@ -# Serde JSON   [![Build Status]][actions] [![Latest Version]][crates.io] [![Rustc Version 1.36+]][rustc] - -[Build Status]: https://img.shields.io/github/actions/workflow/status/serde-rs/json/ci.yml?branch=master -[actions]: https://github.com/serde-rs/json/actions?query=branch%3Amaster -[Latest Version]: https://img.shields.io/crates/v/serde_json.svg -[crates.io]: https://crates.io/crates/serde\_json -[Rustc Version 1.36+]: https://img.shields.io/badge/rustc-1.36+-lightgray.svg -[rustc]: https://blog.rust-lang.org/2019/07/04/Rust-1.36.0.html - -**Serde is a framework for *ser*ializing and *de*serializing Rust data structures efficiently and generically.** - ---- - -```toml -[dependencies] -serde_json = "1.0" -``` - -You may be looking for: - -- [JSON API documentation](https://docs.rs/serde_json) -- [Serde API documentation](https://docs.rs/serde) -- [Detailed documentation about Serde](https://serde.rs/) -- [Setting up `#[derive(Serialize, Deserialize)]`](https://serde.rs/derive.html) -- [Release notes](https://github.com/serde-rs/json/releases) - -JSON is a ubiquitous open-standard format that uses human-readable text to -transmit data objects consisting of key-value pairs. - -```json -{ - "name": "John Doe", - "age": 43, - "address": { - "street": "10 Downing Street", - "city": "London" - }, - "phones": [ - "+44 1234567", - "+44 2345678" - ] -} -``` - -There are three common ways that you might find yourself needing to work with -JSON data in Rust. - - - **As text data.** An unprocessed string of JSON data that you receive on an - HTTP endpoint, read from a file, or prepare to send to a remote server. - - **As an untyped or loosely typed representation.** Maybe you want to check - that some JSON data is valid before passing it on, but without knowing the - structure of what it contains. Or you want to do very basic manipulations - like insert a key in a particular spot. - - **As a strongly typed Rust data structure.** When you expect all or most of - your data to conform to a particular structure and want to get real work done - without JSON's loosey-goosey nature tripping you up. - -Serde JSON provides efficient, flexible, safe ways of converting data between -each of these representations. - -## Operating on untyped JSON values - -Any valid JSON data can be manipulated in the following recursive enum -representation. This data structure is [`serde_json::Value`][value]. - -```rust -enum Value { - Null, - Bool(bool), - Number(Number), - String(String), - Array(Vec), - Object(Map), -} -``` - -A string of JSON data can be parsed into a `serde_json::Value` by the -[`serde_json::from_str`][from_str] function. There is also -[`from_slice`][from_slice] for parsing from a byte slice &[u8] and -[`from_reader`][from_reader] for parsing from any `io::Read` like a File or a -TCP stream. - - - -```rust -use serde_json::{Result, Value}; - -fn untyped_example() -> Result<()> { - // Some JSON input data as a &str. Maybe this comes from the user. - let data = r#" - { - "name": "John Doe", - "age": 43, - "phones": [ - "+44 1234567", - "+44 2345678" - ] - }"#; - - // Parse the string of data into serde_json::Value. - let v: Value = serde_json::from_str(data)?; - - // Access parts of the data by indexing with square brackets. - println!("Please call {} at the number {}", v["name"], v["phones"][0]); - - Ok(()) -} -``` - -The result of square bracket indexing like `v["name"]` is a borrow of the data -at that index, so the type is `&Value`. A JSON map can be indexed with string -keys, while a JSON array can be indexed with integer keys. If the type of the -data is not right for the type with which it is being indexed, or if a map does -not contain the key being indexed, or if the index into a vector is out of -bounds, the returned element is `Value::Null`. - -When a `Value` is printed, it is printed as a JSON string. So in the code above, -the output looks like `Please call "John Doe" at the number "+44 1234567"`. The -quotation marks appear because `v["name"]` is a `&Value` containing a JSON -string and its JSON representation is `"John Doe"`. Printing as a plain string -without quotation marks involves converting from a JSON string to a Rust string -with [`as_str()`] or avoiding the use of `Value` as described in the following -section. - -[`as_str()`]: https://docs.rs/serde_json/1/serde_json/enum.Value.html#method.as_str - -The `Value` representation is sufficient for very basic tasks but can be tedious -to work with for anything more significant. Error handling is verbose to -implement correctly, for example imagine trying to detect the presence of -unrecognized fields in the input data. The compiler is powerless to help you -when you make a mistake, for example imagine typoing `v["name"]` as `v["nmae"]` -in one of the dozens of places it is used in your code. - -## Parsing JSON as strongly typed data structures - -Serde provides a powerful way of mapping JSON data into Rust data structures -largely automatically. - -
- - - -
- -```rust -use serde::{Deserialize, Serialize}; -use serde_json::Result; - -#[derive(Serialize, Deserialize)] -struct Person { - name: String, - age: u8, - phones: Vec, -} - -fn typed_example() -> Result<()> { - // Some JSON input data as a &str. Maybe this comes from the user. - let data = r#" - { - "name": "John Doe", - "age": 43, - "phones": [ - "+44 1234567", - "+44 2345678" - ] - }"#; - - // Parse the string of data into a Person object. This is exactly the - // same function as the one that produced serde_json::Value above, but - // now we are asking it for a Person as output. - let p: Person = serde_json::from_str(data)?; - - // Do things just like with any other Rust data structure. - println!("Please call {} at the number {}", p.name, p.phones[0]); - - Ok(()) -} -``` - -This is the same `serde_json::from_str` function as before, but this time we -assign the return value to a variable of type `Person` so Serde will -automatically interpret the input data as a `Person` and produce informative -error messages if the layout does not conform to what a `Person` is expected to -look like. - -Any type that implements Serde's `Deserialize` trait can be deserialized this -way. This includes built-in Rust standard library types like `Vec` and -`HashMap`, as well as any structs or enums annotated with -`#[derive(Deserialize)]`. - -Once we have `p` of type `Person`, our IDE and the Rust compiler can help us use -it correctly like they do for any other Rust code. The IDE can autocomplete -field names to prevent typos, which was impossible in the `serde_json::Value` -representation. And the Rust compiler can check that when we write -`p.phones[0]`, then `p.phones` is guaranteed to be a `Vec` so indexing -into it makes sense and produces a `String`. - -The necessary setup for using Serde's derive macros is explained on the *[Using -derive]* page of the Serde site. - -[Using derive]: https://serde.rs/derive.html - -## Constructing JSON values - -Serde JSON provides a [`json!` macro][macro] to build `serde_json::Value` -objects with very natural JSON syntax. - -
- - - -
- -```rust -use serde_json::json; - -fn main() { - // The type of `john` is `serde_json::Value` - let john = json!({ - "name": "John Doe", - "age": 43, - "phones": [ - "+44 1234567", - "+44 2345678" - ] - }); - - println!("first phone number: {}", john["phones"][0]); - - // Convert to a string of JSON and print it out - println!("{}", john.to_string()); -} -``` - -The `Value::to_string()` function converts a `serde_json::Value` into a `String` -of JSON text. - -One neat thing about the `json!` macro is that variables and expressions can be -interpolated directly into the JSON value as you are building it. Serde will -check at compile time that the value you are interpolating is able to be -represented as JSON. - -
- - - -
- -```rust -let full_name = "John Doe"; -let age_last_year = 42; - -// The type of `john` is `serde_json::Value` -let john = json!({ - "name": full_name, - "age": age_last_year + 1, - "phones": [ - format!("+44 {}", random_phone()) - ] -}); -``` - -This is amazingly convenient, but we have the problem we had before with -`Value`: the IDE and Rust compiler cannot help us if we get it wrong. Serde JSON -provides a better way of serializing strongly-typed data structures into JSON -text. - -## Creating JSON by serializing data structures - -A data structure can be converted to a JSON string by -[`serde_json::to_string`][to_string]. There is also -[`serde_json::to_vec`][to_vec] which serializes to a `Vec` and -[`serde_json::to_writer`][to_writer] which serializes to any `io::Write` -such as a File or a TCP stream. - -
- - - -
- -```rust -use serde::{Deserialize, Serialize}; -use serde_json::Result; - -#[derive(Serialize, Deserialize)] -struct Address { - street: String, - city: String, -} - -fn print_an_address() -> Result<()> { - // Some data structure. - let address = Address { - street: "10 Downing Street".to_owned(), - city: "London".to_owned(), - }; - - // Serialize it to a JSON string. - let j = serde_json::to_string(&address)?; - - // Print, write to a file, or send to an HTTP server. - println!("{}", j); - - Ok(()) -} -``` - -Any type that implements Serde's `Serialize` trait can be serialized this way. -This includes built-in Rust standard library types like `Vec` and `HashMap`, as well as any structs or enums annotated with `#[derive(Serialize)]`. - -## Performance - -It is fast. You should expect in the ballpark of 500 to 1000 megabytes per -second deserialization and 600 to 900 megabytes per second serialization, -depending on the characteristics of your data. This is competitive with the -fastest C and C++ JSON libraries or even 30% faster for many use cases. -Benchmarks live in the [serde-rs/json-benchmark] repo. - -[serde-rs/json-benchmark]: https://github.com/serde-rs/json-benchmark - -## Getting help - -Serde is one of the most widely used Rust libraries, so any place that -Rustaceans congregate will be able to help you out. For chat, consider trying -the [#rust-questions] or [#rust-beginners] channels of the unofficial community -Discord (invite: ), the [#rust-usage] or -[#beginners] channels of the official Rust Project Discord (invite: -), or the [#general][zulip] stream in Zulip. For -asynchronous, consider the [\[rust\] tag on StackOverflow][stackoverflow], the -[/r/rust] subreddit which has a pinned weekly easy questions post, or the Rust -[Discourse forum][discourse]. It's acceptable to file a support issue in this -repo, but they tend not to get as many eyes as any of the above and may get -closed without a response after some time. - -[#rust-questions]: https://discord.com/channels/273534239310479360/274215136414400513 -[#rust-beginners]: https://discord.com/channels/273534239310479360/273541522815713281 -[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848 -[#beginners]: https://discord.com/channels/442252698964721669/448238009733742612 -[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general -[stackoverflow]: https://stackoverflow.com/questions/tagged/rust -[/r/rust]: https://www.reddit.com/r/rust -[discourse]: https://users.rust-lang.org - -## No-std support - -As long as there is a memory allocator, it is possible to use serde_json without -the rest of the Rust standard library. Disable the default "std" feature and -enable the "alloc" feature: - -```toml -[dependencies] -serde_json = { version = "1.0", default-features = false, features = ["alloc"] } -``` - -For JSON support in Serde without a memory allocator, please see the -[`serde-json-core`] crate. - -[`serde-json-core`]: https://github.com/rust-embedded-community/serde-json-core - -[value]: https://docs.rs/serde_json/1/serde_json/value/enum.Value.html -[from_str]: https://docs.rs/serde_json/1/serde_json/de/fn.from_str.html -[from_slice]: https://docs.rs/serde_json/1/serde_json/de/fn.from_slice.html -[from_reader]: https://docs.rs/serde_json/1/serde_json/de/fn.from_reader.html -[to_string]: https://docs.rs/serde_json/1/serde_json/ser/fn.to_string.html -[to_vec]: https://docs.rs/serde_json/1/serde_json/ser/fn.to_vec.html -[to_writer]: https://docs.rs/serde_json/1/serde_json/ser/fn.to_writer.html -[macro]: https://docs.rs/serde_json/1/serde_json/macro.json.html - -
- -#### License - - -Licensed under either of Apache License, Version -2.0 or MIT license at your option. - - -
- - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this crate by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. - diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/de.rs s390-tools-2.33.1/rust-vendor/serde_json/src/de.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/de.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/de.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2606 +0,0 @@ -//! Deserialize JSON data to a Rust data structure. - -use crate::error::{Error, ErrorCode, Result}; -#[cfg(feature = "float_roundtrip")] -use crate::lexical; -use crate::number::Number; -use crate::read::{self, Fused, Reference}; -use alloc::string::String; -use alloc::vec::Vec; -#[cfg(feature = "float_roundtrip")] -use core::iter; -use core::iter::FusedIterator; -use core::marker::PhantomData; -use core::result; -use core::str::FromStr; -use serde::de::{self, Expected, Unexpected}; -use serde::forward_to_deserialize_any; - -#[cfg(feature = "arbitrary_precision")] -use crate::number::NumberDeserializer; - -pub use crate::read::{Read, SliceRead, StrRead}; - -#[cfg(feature = "std")] -pub use crate::read::IoRead; - -////////////////////////////////////////////////////////////////////////////// - -/// A structure that deserializes JSON into Rust values. -pub struct Deserializer { - read: R, - scratch: Vec, - remaining_depth: u8, - #[cfg(feature = "float_roundtrip")] - single_precision: bool, - #[cfg(feature = "unbounded_depth")] - disable_recursion_limit: bool, -} - -impl<'de, R> Deserializer -where - R: read::Read<'de>, -{ - /// Create a JSON deserializer from one of the possible serde_json input - /// sources. - /// - /// Typically it is more convenient to use one of these methods instead: - /// - /// - Deserializer::from_str - /// - Deserializer::from_slice - /// - Deserializer::from_reader - pub fn new(read: R) -> Self { - Deserializer { - read, - scratch: Vec::new(), - remaining_depth: 128, - #[cfg(feature = "float_roundtrip")] - single_precision: false, - #[cfg(feature = "unbounded_depth")] - disable_recursion_limit: false, - } - } -} - -#[cfg(feature = "std")] -impl Deserializer> -where - R: crate::io::Read, -{ - /// Creates a JSON deserializer from an `io::Read`. - /// - /// Reader-based deserializers do not support deserializing borrowed types - /// like `&str`, since the `std::io::Read` trait has no non-copying methods - /// -- everything it does involves copying bytes out of the data source. - pub fn from_reader(reader: R) -> Self { - Deserializer::new(read::IoRead::new(reader)) - } -} - -impl<'a> Deserializer> { - /// Creates a JSON deserializer from a `&[u8]`. - pub fn from_slice(bytes: &'a [u8]) -> Self { - Deserializer::new(read::SliceRead::new(bytes)) - } -} - -impl<'a> Deserializer> { - /// Creates a JSON deserializer from a `&str`. - pub fn from_str(s: &'a str) -> Self { - Deserializer::new(read::StrRead::new(s)) - } -} - -macro_rules! overflow { - ($a:ident * 10 + $b:ident, $c:expr) => { - match $c { - c => $a >= c / 10 && ($a > c / 10 || $b > c % 10), - } - }; -} - -pub(crate) enum ParserNumber { - F64(f64), - U64(u64), - I64(i64), - #[cfg(feature = "arbitrary_precision")] - String(String), -} - -impl ParserNumber { - fn visit<'de, V>(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - match self { - ParserNumber::F64(x) => visitor.visit_f64(x), - ParserNumber::U64(x) => visitor.visit_u64(x), - ParserNumber::I64(x) => visitor.visit_i64(x), - #[cfg(feature = "arbitrary_precision")] - ParserNumber::String(x) => visitor.visit_map(NumberDeserializer { number: x.into() }), - } - } - - fn invalid_type(self, exp: &dyn Expected) -> Error { - match self { - ParserNumber::F64(x) => de::Error::invalid_type(Unexpected::Float(x), exp), - ParserNumber::U64(x) => de::Error::invalid_type(Unexpected::Unsigned(x), exp), - ParserNumber::I64(x) => de::Error::invalid_type(Unexpected::Signed(x), exp), - #[cfg(feature = "arbitrary_precision")] - ParserNumber::String(_) => de::Error::invalid_type(Unexpected::Other("number"), exp), - } - } -} - -impl<'de, R: Read<'de>> Deserializer { - /// The `Deserializer::end` method should be called after a value has been fully deserialized. - /// This allows the `Deserializer` to validate that the input stream is at the end or that it - /// only has trailing whitespace. - pub fn end(&mut self) -> Result<()> { - match tri!(self.parse_whitespace()) { - Some(_) => Err(self.peek_error(ErrorCode::TrailingCharacters)), - None => Ok(()), - } - } - - /// Turn a JSON deserializer into an iterator over values of type T. - pub fn into_iter(self) -> StreamDeserializer<'de, R, T> - where - T: de::Deserialize<'de>, - { - // This cannot be an implementation of std::iter::IntoIterator because - // we need the caller to choose what T is. - let offset = self.read.byte_offset(); - StreamDeserializer { - de: self, - offset, - failed: false, - output: PhantomData, - lifetime: PhantomData, - } - } - - /// Parse arbitrarily deep JSON structures without any consideration for - /// overflowing the stack. - /// - /// You will want to provide some other way to protect against stack - /// overflows, such as by wrapping your Deserializer in the dynamically - /// growing stack adapter provided by the serde_stacker crate. Additionally - /// you will need to be careful around other recursive operations on the - /// parsed result which may overflow the stack after deserialization has - /// completed, including, but not limited to, Display and Debug and Drop - /// impls. - /// - /// *This method is only available if serde_json is built with the - /// `"unbounded_depth"` feature.* - /// - /// # Examples - /// - /// ``` - /// use serde::Deserialize; - /// use serde_json::Value; - /// - /// fn main() { - /// let mut json = String::new(); - /// for _ in 0..10000 { - /// json = format!("[{}]", json); - /// } - /// - /// let mut deserializer = serde_json::Deserializer::from_str(&json); - /// deserializer.disable_recursion_limit(); - /// let deserializer = serde_stacker::Deserializer::new(&mut deserializer); - /// let value = Value::deserialize(deserializer).unwrap(); - /// - /// carefully_drop_nested_arrays(value); - /// } - /// - /// fn carefully_drop_nested_arrays(value: Value) { - /// let mut stack = vec![value]; - /// while let Some(value) = stack.pop() { - /// if let Value::Array(array) = value { - /// stack.extend(array); - /// } - /// } - /// } - /// ``` - #[cfg(feature = "unbounded_depth")] - #[cfg_attr(docsrs, doc(cfg(feature = "unbounded_depth")))] - pub fn disable_recursion_limit(&mut self) { - self.disable_recursion_limit = true; - } - - fn peek(&mut self) -> Result> { - self.read.peek() - } - - fn peek_or_null(&mut self) -> Result { - Ok(tri!(self.peek()).unwrap_or(b'\x00')) - } - - fn eat_char(&mut self) { - self.read.discard(); - } - - fn next_char(&mut self) -> Result> { - self.read.next() - } - - fn next_char_or_null(&mut self) -> Result { - Ok(tri!(self.next_char()).unwrap_or(b'\x00')) - } - - /// Error caused by a byte from next_char(). - #[cold] - fn error(&self, reason: ErrorCode) -> Error { - let position = self.read.position(); - Error::syntax(reason, position.line, position.column) - } - - /// Error caused by a byte from peek(). - #[cold] - fn peek_error(&self, reason: ErrorCode) -> Error { - let position = self.read.peek_position(); - Error::syntax(reason, position.line, position.column) - } - - /// Returns the first non-whitespace byte without consuming it, or `None` if - /// EOF is encountered. - fn parse_whitespace(&mut self) -> Result> { - loop { - match tri!(self.peek()) { - Some(b' ') | Some(b'\n') | Some(b'\t') | Some(b'\r') => { - self.eat_char(); - } - other => { - return Ok(other); - } - } - } - } - - #[cold] - fn peek_invalid_type(&mut self, exp: &dyn Expected) -> Error { - let err = match self.peek_or_null().unwrap_or(b'\x00') { - b'n' => { - self.eat_char(); - if let Err(err) = self.parse_ident(b"ull") { - return err; - } - de::Error::invalid_type(Unexpected::Unit, exp) - } - b't' => { - self.eat_char(); - if let Err(err) = self.parse_ident(b"rue") { - return err; - } - de::Error::invalid_type(Unexpected::Bool(true), exp) - } - b'f' => { - self.eat_char(); - if let Err(err) = self.parse_ident(b"alse") { - return err; - } - de::Error::invalid_type(Unexpected::Bool(false), exp) - } - b'-' => { - self.eat_char(); - match self.parse_any_number(false) { - Ok(n) => n.invalid_type(exp), - Err(err) => return err, - } - } - b'0'..=b'9' => match self.parse_any_number(true) { - Ok(n) => n.invalid_type(exp), - Err(err) => return err, - }, - b'"' => { - self.eat_char(); - self.scratch.clear(); - match self.read.parse_str(&mut self.scratch) { - Ok(s) => de::Error::invalid_type(Unexpected::Str(&s), exp), - Err(err) => return err, - } - } - b'[' => de::Error::invalid_type(Unexpected::Seq, exp), - b'{' => de::Error::invalid_type(Unexpected::Map, exp), - _ => self.peek_error(ErrorCode::ExpectedSomeValue), - }; - - self.fix_position(err) - } - - fn deserialize_number(&mut self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - let peek = match tri!(self.parse_whitespace()) { - Some(b) => b, - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - let value = match peek { - b'-' => { - self.eat_char(); - tri!(self.parse_integer(false)).visit(visitor) - } - b'0'..=b'9' => tri!(self.parse_integer(true)).visit(visitor), - _ => Err(self.peek_invalid_type(&visitor)), - }; - - match value { - Ok(value) => Ok(value), - Err(err) => Err(self.fix_position(err)), - } - } - - fn scan_integer128(&mut self, buf: &mut String) -> Result<()> { - match tri!(self.next_char_or_null()) { - b'0' => { - buf.push('0'); - // There can be only one leading '0'. - match tri!(self.peek_or_null()) { - b'0'..=b'9' => Err(self.peek_error(ErrorCode::InvalidNumber)), - _ => Ok(()), - } - } - c @ b'1'..=b'9' => { - buf.push(c as char); - while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); - buf.push(c as char); - } - Ok(()) - } - _ => Err(self.error(ErrorCode::InvalidNumber)), - } - } - - #[cold] - fn fix_position(&self, err: Error) -> Error { - err.fix_position(move |code| self.error(code)) - } - - fn parse_ident(&mut self, ident: &[u8]) -> Result<()> { - for expected in ident { - match tri!(self.next_char()) { - None => { - return Err(self.error(ErrorCode::EofWhileParsingValue)); - } - Some(next) => { - if next != *expected { - return Err(self.error(ErrorCode::ExpectedSomeIdent)); - } - } - } - } - - Ok(()) - } - - fn parse_integer(&mut self, positive: bool) -> Result { - let next = match tri!(self.next_char()) { - Some(b) => b, - None => { - return Err(self.error(ErrorCode::EofWhileParsingValue)); - } - }; - - match next { - b'0' => { - // There can be only one leading '0'. - match tri!(self.peek_or_null()) { - b'0'..=b'9' => Err(self.peek_error(ErrorCode::InvalidNumber)), - _ => self.parse_number(positive, 0), - } - } - c @ b'1'..=b'9' => { - let mut significand = (c - b'0') as u64; - - loop { - match tri!(self.peek_or_null()) { - c @ b'0'..=b'9' => { - let digit = (c - b'0') as u64; - - // We need to be careful with overflow. If we can, - // try to keep the number as a `u64` until we grow - // too large. At that point, switch to parsing the - // value as a `f64`. - if overflow!(significand * 10 + digit, u64::max_value()) { - return Ok(ParserNumber::F64(tri!( - self.parse_long_integer(positive, significand), - ))); - } - - self.eat_char(); - significand = significand * 10 + digit; - } - _ => { - return self.parse_number(positive, significand); - } - } - } - } - _ => Err(self.error(ErrorCode::InvalidNumber)), - } - } - - fn parse_number(&mut self, positive: bool, significand: u64) -> Result { - Ok(match tri!(self.peek_or_null()) { - b'.' => ParserNumber::F64(tri!(self.parse_decimal(positive, significand, 0))), - b'e' | b'E' => ParserNumber::F64(tri!(self.parse_exponent(positive, significand, 0))), - _ => { - if positive { - ParserNumber::U64(significand) - } else { - let neg = (significand as i64).wrapping_neg(); - - // Convert into a float if we underflow, or on `-0`. - if neg >= 0 { - ParserNumber::F64(-(significand as f64)) - } else { - ParserNumber::I64(neg) - } - } - } - }) - } - - fn parse_decimal( - &mut self, - positive: bool, - mut significand: u64, - exponent_before_decimal_point: i32, - ) -> Result { - self.eat_char(); - - let mut exponent_after_decimal_point = 0; - while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) { - let digit = (c - b'0') as u64; - - if overflow!(significand * 10 + digit, u64::max_value()) { - let exponent = exponent_before_decimal_point + exponent_after_decimal_point; - return self.parse_decimal_overflow(positive, significand, exponent); - } - - self.eat_char(); - significand = significand * 10 + digit; - exponent_after_decimal_point -= 1; - } - - // Error if there is not at least one digit after the decimal point. - if exponent_after_decimal_point == 0 { - match tri!(self.peek()) { - Some(_) => return Err(self.peek_error(ErrorCode::InvalidNumber)), - None => return Err(self.peek_error(ErrorCode::EofWhileParsingValue)), - } - } - - let exponent = exponent_before_decimal_point + exponent_after_decimal_point; - match tri!(self.peek_or_null()) { - b'e' | b'E' => self.parse_exponent(positive, significand, exponent), - _ => self.f64_from_parts(positive, significand, exponent), - } - } - - fn parse_exponent( - &mut self, - positive: bool, - significand: u64, - starting_exp: i32, - ) -> Result { - self.eat_char(); - - let positive_exp = match tri!(self.peek_or_null()) { - b'+' => { - self.eat_char(); - true - } - b'-' => { - self.eat_char(); - false - } - _ => true, - }; - - let next = match tri!(self.next_char()) { - Some(b) => b, - None => { - return Err(self.error(ErrorCode::EofWhileParsingValue)); - } - }; - - // Make sure a digit follows the exponent place. - let mut exp = match next { - c @ b'0'..=b'9' => (c - b'0') as i32, - _ => { - return Err(self.error(ErrorCode::InvalidNumber)); - } - }; - - while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); - let digit = (c - b'0') as i32; - - if overflow!(exp * 10 + digit, i32::max_value()) { - let zero_significand = significand == 0; - return self.parse_exponent_overflow(positive, zero_significand, positive_exp); - } - - exp = exp * 10 + digit; - } - - let final_exp = if positive_exp { - starting_exp.saturating_add(exp) - } else { - starting_exp.saturating_sub(exp) - }; - - self.f64_from_parts(positive, significand, final_exp) - } - - #[cfg(feature = "float_roundtrip")] - fn f64_from_parts(&mut self, positive: bool, significand: u64, exponent: i32) -> Result { - let f = if self.single_precision { - lexical::parse_concise_float::(significand, exponent) as f64 - } else { - lexical::parse_concise_float::(significand, exponent) - }; - - if f.is_infinite() { - Err(self.error(ErrorCode::NumberOutOfRange)) - } else { - Ok(if positive { f } else { -f }) - } - } - - #[cfg(not(feature = "float_roundtrip"))] - fn f64_from_parts( - &mut self, - positive: bool, - significand: u64, - mut exponent: i32, - ) -> Result { - let mut f = significand as f64; - loop { - match POW10.get(exponent.wrapping_abs() as usize) { - Some(&pow) => { - if exponent >= 0 { - f *= pow; - if f.is_infinite() { - return Err(self.error(ErrorCode::NumberOutOfRange)); - } - } else { - f /= pow; - } - break; - } - None => { - if f == 0.0 { - break; - } - if exponent >= 0 { - return Err(self.error(ErrorCode::NumberOutOfRange)); - } - f /= 1e308; - exponent += 308; - } - } - } - Ok(if positive { f } else { -f }) - } - - #[cfg(feature = "float_roundtrip")] - #[cold] - #[inline(never)] - fn parse_long_integer(&mut self, positive: bool, partial_significand: u64) -> Result { - // To deserialize floats we'll first push the integer and fraction - // parts, both as byte strings, into the scratch buffer and then feed - // both slices to lexical's parser. For example if the input is - // `12.34e5` we'll push b"1234" into scratch and then pass b"12" and - // b"34" to lexical. `integer_end` will be used to track where to split - // the scratch buffer. - // - // Note that lexical expects the integer part to contain *no* leading - // zeroes and the fraction part to contain *no* trailing zeroes. The - // first requirement is already handled by the integer parsing logic. - // The second requirement will be enforced just before passing the - // slices to lexical in f64_long_from_parts. - self.scratch.clear(); - self.scratch - .extend_from_slice(itoa::Buffer::new().format(partial_significand).as_bytes()); - - loop { - match tri!(self.peek_or_null()) { - c @ b'0'..=b'9' => { - self.scratch.push(c); - self.eat_char(); - } - b'.' => { - self.eat_char(); - return self.parse_long_decimal(positive, self.scratch.len()); - } - b'e' | b'E' => { - return self.parse_long_exponent(positive, self.scratch.len()); - } - _ => { - return self.f64_long_from_parts(positive, self.scratch.len(), 0); - } - } - } - } - - #[cfg(not(feature = "float_roundtrip"))] - #[cold] - #[inline(never)] - fn parse_long_integer(&mut self, positive: bool, significand: u64) -> Result { - let mut exponent = 0; - loop { - match tri!(self.peek_or_null()) { - b'0'..=b'9' => { - self.eat_char(); - // This could overflow... if your integer is gigabytes long. - // Ignore that possibility. - exponent += 1; - } - b'.' => { - return self.parse_decimal(positive, significand, exponent); - } - b'e' | b'E' => { - return self.parse_exponent(positive, significand, exponent); - } - _ => { - return self.f64_from_parts(positive, significand, exponent); - } - } - } - } - - #[cfg(feature = "float_roundtrip")] - #[cold] - fn parse_long_decimal(&mut self, positive: bool, integer_end: usize) -> Result { - let mut at_least_one_digit = integer_end < self.scratch.len(); - while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) { - self.scratch.push(c); - self.eat_char(); - at_least_one_digit = true; - } - - if !at_least_one_digit { - match tri!(self.peek()) { - Some(_) => return Err(self.peek_error(ErrorCode::InvalidNumber)), - None => return Err(self.peek_error(ErrorCode::EofWhileParsingValue)), - } - } - - match tri!(self.peek_or_null()) { - b'e' | b'E' => self.parse_long_exponent(positive, integer_end), - _ => self.f64_long_from_parts(positive, integer_end, 0), - } - } - - #[cfg(feature = "float_roundtrip")] - fn parse_long_exponent(&mut self, positive: bool, integer_end: usize) -> Result { - self.eat_char(); - - let positive_exp = match tri!(self.peek_or_null()) { - b'+' => { - self.eat_char(); - true - } - b'-' => { - self.eat_char(); - false - } - _ => true, - }; - - let next = match tri!(self.next_char()) { - Some(b) => b, - None => { - return Err(self.error(ErrorCode::EofWhileParsingValue)); - } - }; - - // Make sure a digit follows the exponent place. - let mut exp = match next { - c @ b'0'..=b'9' => (c - b'0') as i32, - _ => { - return Err(self.error(ErrorCode::InvalidNumber)); - } - }; - - while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); - let digit = (c - b'0') as i32; - - if overflow!(exp * 10 + digit, i32::max_value()) { - let zero_significand = self.scratch.iter().all(|&digit| digit == b'0'); - return self.parse_exponent_overflow(positive, zero_significand, positive_exp); - } - - exp = exp * 10 + digit; - } - - let final_exp = if positive_exp { exp } else { -exp }; - - self.f64_long_from_parts(positive, integer_end, final_exp) - } - - // This cold code should not be inlined into the middle of the hot - // decimal-parsing loop above. - #[cfg(feature = "float_roundtrip")] - #[cold] - #[inline(never)] - fn parse_decimal_overflow( - &mut self, - positive: bool, - significand: u64, - exponent: i32, - ) -> Result { - let mut buffer = itoa::Buffer::new(); - let significand = buffer.format(significand); - let fraction_digits = -exponent as usize; - self.scratch.clear(); - if let Some(zeros) = fraction_digits.checked_sub(significand.len() + 1) { - self.scratch.extend(iter::repeat(b'0').take(zeros + 1)); - } - self.scratch.extend_from_slice(significand.as_bytes()); - let integer_end = self.scratch.len() - fraction_digits; - self.parse_long_decimal(positive, integer_end) - } - - #[cfg(not(feature = "float_roundtrip"))] - #[cold] - #[inline(never)] - fn parse_decimal_overflow( - &mut self, - positive: bool, - significand: u64, - exponent: i32, - ) -> Result { - // The next multiply/add would overflow, so just ignore all further - // digits. - while let b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); - } - - match tri!(self.peek_or_null()) { - b'e' | b'E' => self.parse_exponent(positive, significand, exponent), - _ => self.f64_from_parts(positive, significand, exponent), - } - } - - // This cold code should not be inlined into the middle of the hot - // exponent-parsing loop above. - #[cold] - #[inline(never)] - fn parse_exponent_overflow( - &mut self, - positive: bool, - zero_significand: bool, - positive_exp: bool, - ) -> Result { - // Error instead of +/- infinity. - if !zero_significand && positive_exp { - return Err(self.error(ErrorCode::NumberOutOfRange)); - } - - while let b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); - } - Ok(if positive { 0.0 } else { -0.0 }) - } - - #[cfg(feature = "float_roundtrip")] - fn f64_long_from_parts( - &mut self, - positive: bool, - integer_end: usize, - exponent: i32, - ) -> Result { - let integer = &self.scratch[..integer_end]; - let fraction = &self.scratch[integer_end..]; - - let f = if self.single_precision { - lexical::parse_truncated_float::(integer, fraction, exponent) as f64 - } else { - lexical::parse_truncated_float::(integer, fraction, exponent) - }; - - if f.is_infinite() { - Err(self.error(ErrorCode::NumberOutOfRange)) - } else { - Ok(if positive { f } else { -f }) - } - } - - fn parse_any_signed_number(&mut self) -> Result { - let peek = match tri!(self.peek()) { - Some(b) => b, - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - let value = match peek { - b'-' => { - self.eat_char(); - self.parse_any_number(false) - } - b'0'..=b'9' => self.parse_any_number(true), - _ => Err(self.peek_error(ErrorCode::InvalidNumber)), - }; - - let value = match tri!(self.peek()) { - Some(_) => Err(self.peek_error(ErrorCode::InvalidNumber)), - None => value, - }; - - match value { - Ok(value) => Ok(value), - // The de::Error impl creates errors with unknown line and column. - // Fill in the position here by looking at the current index in the - // input. There is no way to tell whether this should call `error` - // or `peek_error` so pick the one that seems correct more often. - // Worst case, the position is off by one character. - Err(err) => Err(self.fix_position(err)), - } - } - - #[cfg(not(feature = "arbitrary_precision"))] - fn parse_any_number(&mut self, positive: bool) -> Result { - self.parse_integer(positive) - } - - #[cfg(feature = "arbitrary_precision")] - fn parse_any_number(&mut self, positive: bool) -> Result { - let mut buf = String::with_capacity(16); - if !positive { - buf.push('-'); - } - self.scan_integer(&mut buf)?; - if positive { - if let Ok(unsigned) = buf.parse() { - return Ok(ParserNumber::U64(unsigned)); - } - } else { - if let Ok(signed) = buf.parse() { - return Ok(ParserNumber::I64(signed)); - } - } - Ok(ParserNumber::String(buf)) - } - - #[cfg(feature = "arbitrary_precision")] - fn scan_or_eof(&mut self, buf: &mut String) -> Result { - match tri!(self.next_char()) { - Some(b) => { - buf.push(b as char); - Ok(b) - } - None => Err(self.error(ErrorCode::EofWhileParsingValue)), - } - } - - #[cfg(feature = "arbitrary_precision")] - fn scan_integer(&mut self, buf: &mut String) -> Result<()> { - match tri!(self.scan_or_eof(buf)) { - b'0' => { - // There can be only one leading '0'. - match tri!(self.peek_or_null()) { - b'0'..=b'9' => Err(self.peek_error(ErrorCode::InvalidNumber)), - _ => self.scan_number(buf), - } - } - b'1'..=b'9' => loop { - match tri!(self.peek_or_null()) { - c @ b'0'..=b'9' => { - self.eat_char(); - buf.push(c as char); - } - _ => { - return self.scan_number(buf); - } - } - }, - _ => Err(self.error(ErrorCode::InvalidNumber)), - } - } - - #[cfg(feature = "arbitrary_precision")] - fn scan_number(&mut self, buf: &mut String) -> Result<()> { - match tri!(self.peek_or_null()) { - b'.' => self.scan_decimal(buf), - e @ b'e' | e @ b'E' => self.scan_exponent(e as char, buf), - _ => Ok(()), - } - } - - #[cfg(feature = "arbitrary_precision")] - fn scan_decimal(&mut self, buf: &mut String) -> Result<()> { - self.eat_char(); - buf.push('.'); - - let mut at_least_one_digit = false; - while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); - buf.push(c as char); - at_least_one_digit = true; - } - - if !at_least_one_digit { - match tri!(self.peek()) { - Some(_) => return Err(self.peek_error(ErrorCode::InvalidNumber)), - None => return Err(self.peek_error(ErrorCode::EofWhileParsingValue)), - } - } - - match tri!(self.peek_or_null()) { - e @ b'e' | e @ b'E' => self.scan_exponent(e as char, buf), - _ => Ok(()), - } - } - - #[cfg(feature = "arbitrary_precision")] - fn scan_exponent(&mut self, e: char, buf: &mut String) -> Result<()> { - self.eat_char(); - buf.push(e); - - match tri!(self.peek_or_null()) { - b'+' => { - self.eat_char(); - buf.push('+'); - } - b'-' => { - self.eat_char(); - buf.push('-'); - } - _ => {} - } - - // Make sure a digit follows the exponent place. - match tri!(self.scan_or_eof(buf)) { - b'0'..=b'9' => {} - _ => { - return Err(self.error(ErrorCode::InvalidNumber)); - } - } - - while let c @ b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); - buf.push(c as char); - } - - Ok(()) - } - - fn parse_object_colon(&mut self) -> Result<()> { - match tri!(self.parse_whitespace()) { - Some(b':') => { - self.eat_char(); - Ok(()) - } - Some(_) => Err(self.peek_error(ErrorCode::ExpectedColon)), - None => Err(self.peek_error(ErrorCode::EofWhileParsingObject)), - } - } - - fn end_seq(&mut self) -> Result<()> { - match tri!(self.parse_whitespace()) { - Some(b']') => { - self.eat_char(); - Ok(()) - } - Some(b',') => { - self.eat_char(); - match self.parse_whitespace() { - Ok(Some(b']')) => Err(self.peek_error(ErrorCode::TrailingComma)), - _ => Err(self.peek_error(ErrorCode::TrailingCharacters)), - } - } - Some(_) => Err(self.peek_error(ErrorCode::TrailingCharacters)), - None => Err(self.peek_error(ErrorCode::EofWhileParsingList)), - } - } - - fn end_map(&mut self) -> Result<()> { - match tri!(self.parse_whitespace()) { - Some(b'}') => { - self.eat_char(); - Ok(()) - } - Some(b',') => Err(self.peek_error(ErrorCode::TrailingComma)), - Some(_) => Err(self.peek_error(ErrorCode::TrailingCharacters)), - None => Err(self.peek_error(ErrorCode::EofWhileParsingObject)), - } - } - - fn ignore_value(&mut self) -> Result<()> { - self.scratch.clear(); - let mut enclosing = None; - - loop { - let peek = match tri!(self.parse_whitespace()) { - Some(b) => b, - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - let frame = match peek { - b'n' => { - self.eat_char(); - tri!(self.parse_ident(b"ull")); - None - } - b't' => { - self.eat_char(); - tri!(self.parse_ident(b"rue")); - None - } - b'f' => { - self.eat_char(); - tri!(self.parse_ident(b"alse")); - None - } - b'-' => { - self.eat_char(); - tri!(self.ignore_integer()); - None - } - b'0'..=b'9' => { - tri!(self.ignore_integer()); - None - } - b'"' => { - self.eat_char(); - tri!(self.read.ignore_str()); - None - } - frame @ b'[' | frame @ b'{' => { - self.scratch.extend(enclosing.take()); - self.eat_char(); - Some(frame) - } - _ => return Err(self.peek_error(ErrorCode::ExpectedSomeValue)), - }; - - let (mut accept_comma, mut frame) = match frame { - Some(frame) => (false, frame), - None => match enclosing.take() { - Some(frame) => (true, frame), - None => match self.scratch.pop() { - Some(frame) => (true, frame), - None => return Ok(()), - }, - }, - }; - - loop { - match tri!(self.parse_whitespace()) { - Some(b',') if accept_comma => { - self.eat_char(); - break; - } - Some(b']') if frame == b'[' => {} - Some(b'}') if frame == b'{' => {} - Some(_) => { - if accept_comma { - return Err(self.peek_error(match frame { - b'[' => ErrorCode::ExpectedListCommaOrEnd, - b'{' => ErrorCode::ExpectedObjectCommaOrEnd, - _ => unreachable!(), - })); - } else { - break; - } - } - None => { - return Err(self.peek_error(match frame { - b'[' => ErrorCode::EofWhileParsingList, - b'{' => ErrorCode::EofWhileParsingObject, - _ => unreachable!(), - })); - } - } - - self.eat_char(); - frame = match self.scratch.pop() { - Some(frame) => frame, - None => return Ok(()), - }; - accept_comma = true; - } - - if frame == b'{' { - match tri!(self.parse_whitespace()) { - Some(b'"') => self.eat_char(), - Some(_) => return Err(self.peek_error(ErrorCode::KeyMustBeAString)), - None => return Err(self.peek_error(ErrorCode::EofWhileParsingObject)), - } - tri!(self.read.ignore_str()); - match tri!(self.parse_whitespace()) { - Some(b':') => self.eat_char(), - Some(_) => return Err(self.peek_error(ErrorCode::ExpectedColon)), - None => return Err(self.peek_error(ErrorCode::EofWhileParsingObject)), - } - } - - enclosing = Some(frame); - } - } - - fn ignore_integer(&mut self) -> Result<()> { - match tri!(self.next_char_or_null()) { - b'0' => { - // There can be only one leading '0'. - if let b'0'..=b'9' = tri!(self.peek_or_null()) { - return Err(self.peek_error(ErrorCode::InvalidNumber)); - } - } - b'1'..=b'9' => { - while let b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); - } - } - _ => { - return Err(self.error(ErrorCode::InvalidNumber)); - } - } - - match tri!(self.peek_or_null()) { - b'.' => self.ignore_decimal(), - b'e' | b'E' => self.ignore_exponent(), - _ => Ok(()), - } - } - - fn ignore_decimal(&mut self) -> Result<()> { - self.eat_char(); - - let mut at_least_one_digit = false; - while let b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); - at_least_one_digit = true; - } - - if !at_least_one_digit { - return Err(self.peek_error(ErrorCode::InvalidNumber)); - } - - match tri!(self.peek_or_null()) { - b'e' | b'E' => self.ignore_exponent(), - _ => Ok(()), - } - } - - fn ignore_exponent(&mut self) -> Result<()> { - self.eat_char(); - - match tri!(self.peek_or_null()) { - b'+' | b'-' => self.eat_char(), - _ => {} - } - - // Make sure a digit follows the exponent place. - match tri!(self.next_char_or_null()) { - b'0'..=b'9' => {} - _ => { - return Err(self.error(ErrorCode::InvalidNumber)); - } - } - - while let b'0'..=b'9' = tri!(self.peek_or_null()) { - self.eat_char(); - } - - Ok(()) - } - - #[cfg(feature = "raw_value")] - fn deserialize_raw_value(&mut self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.parse_whitespace()?; - self.read.begin_raw_buffering(); - self.ignore_value()?; - self.read.end_raw_buffering(visitor) - } -} - -impl FromStr for Number { - type Err = Error; - - fn from_str(s: &str) -> result::Result { - Deserializer::from_str(s) - .parse_any_signed_number() - .map(Into::into) - } -} - -#[cfg(not(feature = "float_roundtrip"))] -static POW10: [f64; 309] = [ - 1e000, 1e001, 1e002, 1e003, 1e004, 1e005, 1e006, 1e007, 1e008, 1e009, // - 1e010, 1e011, 1e012, 1e013, 1e014, 1e015, 1e016, 1e017, 1e018, 1e019, // - 1e020, 1e021, 1e022, 1e023, 1e024, 1e025, 1e026, 1e027, 1e028, 1e029, // - 1e030, 1e031, 1e032, 1e033, 1e034, 1e035, 1e036, 1e037, 1e038, 1e039, // - 1e040, 1e041, 1e042, 1e043, 1e044, 1e045, 1e046, 1e047, 1e048, 1e049, // - 1e050, 1e051, 1e052, 1e053, 1e054, 1e055, 1e056, 1e057, 1e058, 1e059, // - 1e060, 1e061, 1e062, 1e063, 1e064, 1e065, 1e066, 1e067, 1e068, 1e069, // - 1e070, 1e071, 1e072, 1e073, 1e074, 1e075, 1e076, 1e077, 1e078, 1e079, // - 1e080, 1e081, 1e082, 1e083, 1e084, 1e085, 1e086, 1e087, 1e088, 1e089, // - 1e090, 1e091, 1e092, 1e093, 1e094, 1e095, 1e096, 1e097, 1e098, 1e099, // - 1e100, 1e101, 1e102, 1e103, 1e104, 1e105, 1e106, 1e107, 1e108, 1e109, // - 1e110, 1e111, 1e112, 1e113, 1e114, 1e115, 1e116, 1e117, 1e118, 1e119, // - 1e120, 1e121, 1e122, 1e123, 1e124, 1e125, 1e126, 1e127, 1e128, 1e129, // - 1e130, 1e131, 1e132, 1e133, 1e134, 1e135, 1e136, 1e137, 1e138, 1e139, // - 1e140, 1e141, 1e142, 1e143, 1e144, 1e145, 1e146, 1e147, 1e148, 1e149, // - 1e150, 1e151, 1e152, 1e153, 1e154, 1e155, 1e156, 1e157, 1e158, 1e159, // - 1e160, 1e161, 1e162, 1e163, 1e164, 1e165, 1e166, 1e167, 1e168, 1e169, // - 1e170, 1e171, 1e172, 1e173, 1e174, 1e175, 1e176, 1e177, 1e178, 1e179, // - 1e180, 1e181, 1e182, 1e183, 1e184, 1e185, 1e186, 1e187, 1e188, 1e189, // - 1e190, 1e191, 1e192, 1e193, 1e194, 1e195, 1e196, 1e197, 1e198, 1e199, // - 1e200, 1e201, 1e202, 1e203, 1e204, 1e205, 1e206, 1e207, 1e208, 1e209, // - 1e210, 1e211, 1e212, 1e213, 1e214, 1e215, 1e216, 1e217, 1e218, 1e219, // - 1e220, 1e221, 1e222, 1e223, 1e224, 1e225, 1e226, 1e227, 1e228, 1e229, // - 1e230, 1e231, 1e232, 1e233, 1e234, 1e235, 1e236, 1e237, 1e238, 1e239, // - 1e240, 1e241, 1e242, 1e243, 1e244, 1e245, 1e246, 1e247, 1e248, 1e249, // - 1e250, 1e251, 1e252, 1e253, 1e254, 1e255, 1e256, 1e257, 1e258, 1e259, // - 1e260, 1e261, 1e262, 1e263, 1e264, 1e265, 1e266, 1e267, 1e268, 1e269, // - 1e270, 1e271, 1e272, 1e273, 1e274, 1e275, 1e276, 1e277, 1e278, 1e279, // - 1e280, 1e281, 1e282, 1e283, 1e284, 1e285, 1e286, 1e287, 1e288, 1e289, // - 1e290, 1e291, 1e292, 1e293, 1e294, 1e295, 1e296, 1e297, 1e298, 1e299, // - 1e300, 1e301, 1e302, 1e303, 1e304, 1e305, 1e306, 1e307, 1e308, -]; - -macro_rules! deserialize_number { - ($method:ident) => { - fn $method(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_number(visitor) - } - }; -} - -#[cfg(not(feature = "unbounded_depth"))] -macro_rules! if_checking_recursion_limit { - ($($body:tt)*) => { - $($body)* - }; -} - -#[cfg(feature = "unbounded_depth")] -macro_rules! if_checking_recursion_limit { - ($this:ident $($body:tt)*) => { - if !$this.disable_recursion_limit { - $this $($body)* - } - }; -} - -macro_rules! check_recursion { - ($this:ident $($body:tt)*) => { - if_checking_recursion_limit! { - $this.remaining_depth -= 1; - if $this.remaining_depth == 0 { - return Err($this.peek_error(ErrorCode::RecursionLimitExceeded)); - } - } - - $this $($body)* - - if_checking_recursion_limit! { - $this.remaining_depth += 1; - } - }; -} - -impl<'de, 'a, R: Read<'de>> de::Deserializer<'de> for &'a mut Deserializer { - type Error = Error; - - #[inline] - fn deserialize_any(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - let peek = match tri!(self.parse_whitespace()) { - Some(b) => b, - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - let value = match peek { - b'n' => { - self.eat_char(); - tri!(self.parse_ident(b"ull")); - visitor.visit_unit() - } - b't' => { - self.eat_char(); - tri!(self.parse_ident(b"rue")); - visitor.visit_bool(true) - } - b'f' => { - self.eat_char(); - tri!(self.parse_ident(b"alse")); - visitor.visit_bool(false) - } - b'-' => { - self.eat_char(); - tri!(self.parse_any_number(false)).visit(visitor) - } - b'0'..=b'9' => tri!(self.parse_any_number(true)).visit(visitor), - b'"' => { - self.eat_char(); - self.scratch.clear(); - match tri!(self.read.parse_str(&mut self.scratch)) { - Reference::Borrowed(s) => visitor.visit_borrowed_str(s), - Reference::Copied(s) => visitor.visit_str(s), - } - } - b'[' => { - check_recursion! { - self.eat_char(); - let ret = visitor.visit_seq(SeqAccess::new(self)); - } - - match (ret, self.end_seq()) { - (Ok(ret), Ok(())) => Ok(ret), - (Err(err), _) | (_, Err(err)) => Err(err), - } - } - b'{' => { - check_recursion! { - self.eat_char(); - let ret = visitor.visit_map(MapAccess::new(self)); - } - - match (ret, self.end_map()) { - (Ok(ret), Ok(())) => Ok(ret), - (Err(err), _) | (_, Err(err)) => Err(err), - } - } - _ => Err(self.peek_error(ErrorCode::ExpectedSomeValue)), - }; - - match value { - Ok(value) => Ok(value), - // The de::Error impl creates errors with unknown line and column. - // Fill in the position here by looking at the current index in the - // input. There is no way to tell whether this should call `error` - // or `peek_error` so pick the one that seems correct more often. - // Worst case, the position is off by one character. - Err(err) => Err(self.fix_position(err)), - } - } - - fn deserialize_bool(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - let peek = match tri!(self.parse_whitespace()) { - Some(b) => b, - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - let value = match peek { - b't' => { - self.eat_char(); - tri!(self.parse_ident(b"rue")); - visitor.visit_bool(true) - } - b'f' => { - self.eat_char(); - tri!(self.parse_ident(b"alse")); - visitor.visit_bool(false) - } - _ => Err(self.peek_invalid_type(&visitor)), - }; - - match value { - Ok(value) => Ok(value), - Err(err) => Err(self.fix_position(err)), - } - } - - deserialize_number!(deserialize_i8); - deserialize_number!(deserialize_i16); - deserialize_number!(deserialize_i32); - deserialize_number!(deserialize_i64); - deserialize_number!(deserialize_u8); - deserialize_number!(deserialize_u16); - deserialize_number!(deserialize_u32); - deserialize_number!(deserialize_u64); - #[cfg(not(feature = "float_roundtrip"))] - deserialize_number!(deserialize_f32); - deserialize_number!(deserialize_f64); - - #[cfg(feature = "float_roundtrip")] - fn deserialize_f32(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.single_precision = true; - let val = self.deserialize_number(visitor); - self.single_precision = false; - val - } - - fn deserialize_i128(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - let mut buf = String::new(); - - match tri!(self.parse_whitespace()) { - Some(b'-') => { - self.eat_char(); - buf.push('-'); - } - Some(_) => {} - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - tri!(self.scan_integer128(&mut buf)); - - let value = match buf.parse() { - Ok(int) => visitor.visit_i128(int), - Err(_) => { - return Err(self.error(ErrorCode::NumberOutOfRange)); - } - }; - - match value { - Ok(value) => Ok(value), - Err(err) => Err(self.fix_position(err)), - } - } - - fn deserialize_u128(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - match tri!(self.parse_whitespace()) { - Some(b'-') => { - return Err(self.peek_error(ErrorCode::NumberOutOfRange)); - } - Some(_) => {} - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - } - - let mut buf = String::new(); - tri!(self.scan_integer128(&mut buf)); - - let value = match buf.parse() { - Ok(int) => visitor.visit_u128(int), - Err(_) => { - return Err(self.error(ErrorCode::NumberOutOfRange)); - } - }; - - match value { - Ok(value) => Ok(value), - Err(err) => Err(self.fix_position(err)), - } - } - - fn deserialize_char(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_str(visitor) - } - - fn deserialize_str(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - let peek = match tri!(self.parse_whitespace()) { - Some(b) => b, - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - let value = match peek { - b'"' => { - self.eat_char(); - self.scratch.clear(); - match tri!(self.read.parse_str(&mut self.scratch)) { - Reference::Borrowed(s) => visitor.visit_borrowed_str(s), - Reference::Copied(s) => visitor.visit_str(s), - } - } - _ => Err(self.peek_invalid_type(&visitor)), - }; - - match value { - Ok(value) => Ok(value), - Err(err) => Err(self.fix_position(err)), - } - } - - fn deserialize_string(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_str(visitor) - } - - /// Parses a JSON string as bytes. Note that this function does not check - /// whether the bytes represent a valid UTF-8 string. - /// - /// The relevant part of the JSON specification is Section 8.2 of [RFC - /// 7159]: - /// - /// > When all the strings represented in a JSON text are composed entirely - /// > of Unicode characters (however escaped), then that JSON text is - /// > interoperable in the sense that all software implementations that - /// > parse it will agree on the contents of names and of string values in - /// > objects and arrays. - /// > - /// > However, the ABNF in this specification allows member names and string - /// > values to contain bit sequences that cannot encode Unicode characters; - /// > for example, "\uDEAD" (a single unpaired UTF-16 surrogate). Instances - /// > of this have been observed, for example, when a library truncates a - /// > UTF-16 string without checking whether the truncation split a - /// > surrogate pair. The behavior of software that receives JSON texts - /// > containing such values is unpredictable; for example, implementations - /// > might return different values for the length of a string value or even - /// > suffer fatal runtime exceptions. - /// - /// [RFC 7159]: https://tools.ietf.org/html/rfc7159 - /// - /// The behavior of serde_json is specified to fail on non-UTF-8 strings - /// when deserializing into Rust UTF-8 string types such as String, and - /// succeed with non-UTF-8 bytes when deserializing using this method. - /// - /// Escape sequences are processed as usual, and for `\uXXXX` escapes it is - /// still checked if the hex number represents a valid Unicode code point. - /// - /// # Examples - /// - /// You can use this to parse JSON strings containing invalid UTF-8 bytes, - /// or unpaired surrogates. - /// - /// ``` - /// use serde_bytes::ByteBuf; - /// - /// fn look_at_bytes() -> Result<(), serde_json::Error> { - /// let json_data = b"\"some bytes: \xe5\x00\xe5\""; - /// let bytes: ByteBuf = serde_json::from_slice(json_data)?; - /// - /// assert_eq!(b'\xe5', bytes[12]); - /// assert_eq!(b'\0', bytes[13]); - /// assert_eq!(b'\xe5', bytes[14]); - /// - /// Ok(()) - /// } - /// # - /// # look_at_bytes().unwrap(); - /// ``` - /// - /// Backslash escape sequences like `\n` are still interpreted and required - /// to be valid. `\u` escape sequences are required to represent a valid - /// Unicode code point or lone surrogate. - /// - /// ``` - /// use serde_bytes::ByteBuf; - /// - /// fn look_at_bytes() -> Result<(), serde_json::Error> { - /// let json_data = b"\"lone surrogate: \\uD801\""; - /// let bytes: ByteBuf = serde_json::from_slice(json_data)?; - /// let expected = b"lone surrogate: \xED\xA0\x81"; - /// assert_eq!(expected, bytes.as_slice()); - /// Ok(()) - /// } - /// # - /// # look_at_bytes(); - /// ``` - fn deserialize_bytes(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - let peek = match tri!(self.parse_whitespace()) { - Some(b) => b, - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - let value = match peek { - b'"' => { - self.eat_char(); - self.scratch.clear(); - match tri!(self.read.parse_str_raw(&mut self.scratch)) { - Reference::Borrowed(b) => visitor.visit_borrowed_bytes(b), - Reference::Copied(b) => visitor.visit_bytes(b), - } - } - b'[' => self.deserialize_seq(visitor), - _ => Err(self.peek_invalid_type(&visitor)), - }; - - match value { - Ok(value) => Ok(value), - Err(err) => Err(self.fix_position(err)), - } - } - - #[inline] - fn deserialize_byte_buf(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_bytes(visitor) - } - - /// Parses a `null` as a None, and any other values as a `Some(...)`. - #[inline] - fn deserialize_option(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - match tri!(self.parse_whitespace()) { - Some(b'n') => { - self.eat_char(); - tri!(self.parse_ident(b"ull")); - visitor.visit_none() - } - _ => visitor.visit_some(self), - } - } - - fn deserialize_unit(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - let peek = match tri!(self.parse_whitespace()) { - Some(b) => b, - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - let value = match peek { - b'n' => { - self.eat_char(); - tri!(self.parse_ident(b"ull")); - visitor.visit_unit() - } - _ => Err(self.peek_invalid_type(&visitor)), - }; - - match value { - Ok(value) => Ok(value), - Err(err) => Err(self.fix_position(err)), - } - } - - fn deserialize_unit_struct(self, _name: &'static str, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_unit(visitor) - } - - /// Parses a newtype struct as the underlying value. - #[inline] - fn deserialize_newtype_struct(self, name: &str, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - #[cfg(feature = "raw_value")] - { - if name == crate::raw::TOKEN { - return self.deserialize_raw_value(visitor); - } - } - - let _ = name; - visitor.visit_newtype_struct(self) - } - - fn deserialize_seq(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - let peek = match tri!(self.parse_whitespace()) { - Some(b) => b, - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - let value = match peek { - b'[' => { - check_recursion! { - self.eat_char(); - let ret = visitor.visit_seq(SeqAccess::new(self)); - } - - match (ret, self.end_seq()) { - (Ok(ret), Ok(())) => Ok(ret), - (Err(err), _) | (_, Err(err)) => Err(err), - } - } - _ => Err(self.peek_invalid_type(&visitor)), - }; - - match value { - Ok(value) => Ok(value), - Err(err) => Err(self.fix_position(err)), - } - } - - fn deserialize_tuple(self, _len: usize, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_seq(visitor) - } - - fn deserialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_seq(visitor) - } - - fn deserialize_map(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - let peek = match tri!(self.parse_whitespace()) { - Some(b) => b, - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - let value = match peek { - b'{' => { - check_recursion! { - self.eat_char(); - let ret = visitor.visit_map(MapAccess::new(self)); - } - - match (ret, self.end_map()) { - (Ok(ret), Ok(())) => Ok(ret), - (Err(err), _) | (_, Err(err)) => Err(err), - } - } - _ => Err(self.peek_invalid_type(&visitor)), - }; - - match value { - Ok(value) => Ok(value), - Err(err) => Err(self.fix_position(err)), - } - } - - fn deserialize_struct( - self, - _name: &'static str, - _fields: &'static [&'static str], - visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - let peek = match tri!(self.parse_whitespace()) { - Some(b) => b, - None => { - return Err(self.peek_error(ErrorCode::EofWhileParsingValue)); - } - }; - - let value = match peek { - b'[' => { - check_recursion! { - self.eat_char(); - let ret = visitor.visit_seq(SeqAccess::new(self)); - } - - match (ret, self.end_seq()) { - (Ok(ret), Ok(())) => Ok(ret), - (Err(err), _) | (_, Err(err)) => Err(err), - } - } - b'{' => { - check_recursion! { - self.eat_char(); - let ret = visitor.visit_map(MapAccess::new(self)); - } - - match (ret, self.end_map()) { - (Ok(ret), Ok(())) => Ok(ret), - (Err(err), _) | (_, Err(err)) => Err(err), - } - } - _ => Err(self.peek_invalid_type(&visitor)), - }; - - match value { - Ok(value) => Ok(value), - Err(err) => Err(self.fix_position(err)), - } - } - - /// Parses an enum as an object like `{"$KEY":$VALUE}`, where $VALUE is either a straight - /// value, a `[..]`, or a `{..}`. - #[inline] - fn deserialize_enum( - self, - _name: &str, - _variants: &'static [&'static str], - visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - match tri!(self.parse_whitespace()) { - Some(b'{') => { - check_recursion! { - self.eat_char(); - let value = tri!(visitor.visit_enum(VariantAccess::new(self))); - } - - match tri!(self.parse_whitespace()) { - Some(b'}') => { - self.eat_char(); - Ok(value) - } - Some(_) => Err(self.error(ErrorCode::ExpectedSomeValue)), - None => Err(self.error(ErrorCode::EofWhileParsingObject)), - } - } - Some(b'"') => visitor.visit_enum(UnitVariantAccess::new(self)), - Some(_) => Err(self.peek_error(ErrorCode::ExpectedSomeValue)), - None => Err(self.peek_error(ErrorCode::EofWhileParsingValue)), - } - } - - fn deserialize_identifier(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_str(visitor) - } - - fn deserialize_ignored_any(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - tri!(self.ignore_value()); - visitor.visit_unit() - } -} - -struct SeqAccess<'a, R: 'a> { - de: &'a mut Deserializer, - first: bool, -} - -impl<'a, R: 'a> SeqAccess<'a, R> { - fn new(de: &'a mut Deserializer) -> Self { - SeqAccess { de, first: true } - } -} - -impl<'de, 'a, R: Read<'de> + 'a> de::SeqAccess<'de> for SeqAccess<'a, R> { - type Error = Error; - - fn next_element_seed(&mut self, seed: T) -> Result> - where - T: de::DeserializeSeed<'de>, - { - let peek = match tri!(self.de.parse_whitespace()) { - Some(b']') => { - return Ok(None); - } - Some(b',') if !self.first => { - self.de.eat_char(); - tri!(self.de.parse_whitespace()) - } - Some(b) => { - if self.first { - self.first = false; - Some(b) - } else { - return Err(self.de.peek_error(ErrorCode::ExpectedListCommaOrEnd)); - } - } - None => { - return Err(self.de.peek_error(ErrorCode::EofWhileParsingList)); - } - }; - - match peek { - Some(b']') => Err(self.de.peek_error(ErrorCode::TrailingComma)), - Some(_) => Ok(Some(tri!(seed.deserialize(&mut *self.de)))), - None => Err(self.de.peek_error(ErrorCode::EofWhileParsingValue)), - } - } -} - -struct MapAccess<'a, R: 'a> { - de: &'a mut Deserializer, - first: bool, -} - -impl<'a, R: 'a> MapAccess<'a, R> { - fn new(de: &'a mut Deserializer) -> Self { - MapAccess { de, first: true } - } -} - -impl<'de, 'a, R: Read<'de> + 'a> de::MapAccess<'de> for MapAccess<'a, R> { - type Error = Error; - - fn next_key_seed(&mut self, seed: K) -> Result> - where - K: de::DeserializeSeed<'de>, - { - let peek = match tri!(self.de.parse_whitespace()) { - Some(b'}') => { - return Ok(None); - } - Some(b',') if !self.first => { - self.de.eat_char(); - tri!(self.de.parse_whitespace()) - } - Some(b) => { - if self.first { - self.first = false; - Some(b) - } else { - return Err(self.de.peek_error(ErrorCode::ExpectedObjectCommaOrEnd)); - } - } - None => { - return Err(self.de.peek_error(ErrorCode::EofWhileParsingObject)); - } - }; - - match peek { - Some(b'"') => seed.deserialize(MapKey { de: &mut *self.de }).map(Some), - Some(b'}') => Err(self.de.peek_error(ErrorCode::TrailingComma)), - Some(_) => Err(self.de.peek_error(ErrorCode::KeyMustBeAString)), - None => Err(self.de.peek_error(ErrorCode::EofWhileParsingValue)), - } - } - - fn next_value_seed(&mut self, seed: V) -> Result - where - V: de::DeserializeSeed<'de>, - { - tri!(self.de.parse_object_colon()); - - seed.deserialize(&mut *self.de) - } -} - -struct VariantAccess<'a, R: 'a> { - de: &'a mut Deserializer, -} - -impl<'a, R: 'a> VariantAccess<'a, R> { - fn new(de: &'a mut Deserializer) -> Self { - VariantAccess { de } - } -} - -impl<'de, 'a, R: Read<'de> + 'a> de::EnumAccess<'de> for VariantAccess<'a, R> { - type Error = Error; - type Variant = Self; - - fn variant_seed(self, seed: V) -> Result<(V::Value, Self)> - where - V: de::DeserializeSeed<'de>, - { - let val = tri!(seed.deserialize(&mut *self.de)); - tri!(self.de.parse_object_colon()); - Ok((val, self)) - } -} - -impl<'de, 'a, R: Read<'de> + 'a> de::VariantAccess<'de> for VariantAccess<'a, R> { - type Error = Error; - - fn unit_variant(self) -> Result<()> { - de::Deserialize::deserialize(self.de) - } - - fn newtype_variant_seed(self, seed: T) -> Result - where - T: de::DeserializeSeed<'de>, - { - seed.deserialize(self.de) - } - - fn tuple_variant(self, _len: usize, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - de::Deserializer::deserialize_seq(self.de, visitor) - } - - fn struct_variant(self, fields: &'static [&'static str], visitor: V) -> Result - where - V: de::Visitor<'de>, - { - de::Deserializer::deserialize_struct(self.de, "", fields, visitor) - } -} - -struct UnitVariantAccess<'a, R: 'a> { - de: &'a mut Deserializer, -} - -impl<'a, R: 'a> UnitVariantAccess<'a, R> { - fn new(de: &'a mut Deserializer) -> Self { - UnitVariantAccess { de } - } -} - -impl<'de, 'a, R: Read<'de> + 'a> de::EnumAccess<'de> for UnitVariantAccess<'a, R> { - type Error = Error; - type Variant = Self; - - fn variant_seed(self, seed: V) -> Result<(V::Value, Self)> - where - V: de::DeserializeSeed<'de>, - { - let variant = tri!(seed.deserialize(&mut *self.de)); - Ok((variant, self)) - } -} - -impl<'de, 'a, R: Read<'de> + 'a> de::VariantAccess<'de> for UnitVariantAccess<'a, R> { - type Error = Error; - - fn unit_variant(self) -> Result<()> { - Ok(()) - } - - fn newtype_variant_seed(self, _seed: T) -> Result - where - T: de::DeserializeSeed<'de>, - { - Err(de::Error::invalid_type( - Unexpected::UnitVariant, - &"newtype variant", - )) - } - - fn tuple_variant(self, _len: usize, _visitor: V) -> Result - where - V: de::Visitor<'de>, - { - Err(de::Error::invalid_type( - Unexpected::UnitVariant, - &"tuple variant", - )) - } - - fn struct_variant(self, _fields: &'static [&'static str], _visitor: V) -> Result - where - V: de::Visitor<'de>, - { - Err(de::Error::invalid_type( - Unexpected::UnitVariant, - &"struct variant", - )) - } -} - -/// Only deserialize from this after peeking a '"' byte! Otherwise it may -/// deserialize invalid JSON successfully. -struct MapKey<'a, R: 'a> { - de: &'a mut Deserializer, -} - -macro_rules! deserialize_integer_key { - ($method:ident => $visit:ident) => { - fn $method(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.de.eat_char(); - self.de.scratch.clear(); - let string = tri!(self.de.read.parse_str(&mut self.de.scratch)); - match (string.parse(), string) { - (Ok(integer), _) => visitor.$visit(integer), - (Err(_), Reference::Borrowed(s)) => visitor.visit_borrowed_str(s), - (Err(_), Reference::Copied(s)) => visitor.visit_str(s), - } - } - }; -} - -impl<'de, 'a, R> de::Deserializer<'de> for MapKey<'a, R> -where - R: Read<'de>, -{ - type Error = Error; - - #[inline] - fn deserialize_any(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.de.eat_char(); - self.de.scratch.clear(); - match tri!(self.de.read.parse_str(&mut self.de.scratch)) { - Reference::Borrowed(s) => visitor.visit_borrowed_str(s), - Reference::Copied(s) => visitor.visit_str(s), - } - } - - deserialize_integer_key!(deserialize_i8 => visit_i8); - deserialize_integer_key!(deserialize_i16 => visit_i16); - deserialize_integer_key!(deserialize_i32 => visit_i32); - deserialize_integer_key!(deserialize_i64 => visit_i64); - deserialize_integer_key!(deserialize_i128 => visit_i128); - deserialize_integer_key!(deserialize_u8 => visit_u8); - deserialize_integer_key!(deserialize_u16 => visit_u16); - deserialize_integer_key!(deserialize_u32 => visit_u32); - deserialize_integer_key!(deserialize_u64 => visit_u64); - deserialize_integer_key!(deserialize_u128 => visit_u128); - - #[inline] - fn deserialize_option(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - // Map keys cannot be null. - visitor.visit_some(self) - } - - #[inline] - fn deserialize_newtype_struct(self, name: &'static str, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - #[cfg(feature = "raw_value")] - { - if name == crate::raw::TOKEN { - return self.de.deserialize_raw_value(visitor); - } - } - - let _ = name; - visitor.visit_newtype_struct(self) - } - - #[inline] - fn deserialize_enum( - self, - name: &'static str, - variants: &'static [&'static str], - visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - self.de.deserialize_enum(name, variants, visitor) - } - - #[inline] - fn deserialize_bytes(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.de.deserialize_bytes(visitor) - } - - #[inline] - fn deserialize_byte_buf(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.de.deserialize_bytes(visitor) - } - - forward_to_deserialize_any! { - bool f32 f64 char str string unit unit_struct seq tuple tuple_struct map - struct identifier ignored_any - } -} - -////////////////////////////////////////////////////////////////////////////// - -/// Iterator that deserializes a stream into multiple JSON values. -/// -/// A stream deserializer can be created from any JSON deserializer using the -/// `Deserializer::into_iter` method. -/// -/// The data can consist of any JSON value. Values need to be a self-delineating value e.g. -/// arrays, objects, or strings, or be followed by whitespace or a self-delineating value. -/// -/// ``` -/// use serde_json::{Deserializer, Value}; -/// -/// fn main() { -/// let data = "{\"k\": 3}1\"cool\"\"stuff\" 3{} [0, 1, 2]"; -/// -/// let stream = Deserializer::from_str(data).into_iter::(); -/// -/// for value in stream { -/// println!("{}", value.unwrap()); -/// } -/// } -/// ``` -pub struct StreamDeserializer<'de, R, T> { - de: Deserializer, - offset: usize, - failed: bool, - output: PhantomData, - lifetime: PhantomData<&'de ()>, -} - -impl<'de, R, T> StreamDeserializer<'de, R, T> -where - R: read::Read<'de>, - T: de::Deserialize<'de>, -{ - /// Create a JSON stream deserializer from one of the possible serde_json - /// input sources. - /// - /// Typically it is more convenient to use one of these methods instead: - /// - /// - Deserializer::from_str(...).into_iter() - /// - Deserializer::from_slice(...).into_iter() - /// - Deserializer::from_reader(...).into_iter() - pub fn new(read: R) -> Self { - let offset = read.byte_offset(); - StreamDeserializer { - de: Deserializer::new(read), - offset, - failed: false, - output: PhantomData, - lifetime: PhantomData, - } - } - - /// Returns the number of bytes so far deserialized into a successful `T`. - /// - /// If a stream deserializer returns an EOF error, new data can be joined to - /// `old_data[stream.byte_offset()..]` to try again. - /// - /// ``` - /// let data = b"[0] [1] ["; - /// - /// let de = serde_json::Deserializer::from_slice(data); - /// let mut stream = de.into_iter::>(); - /// assert_eq!(0, stream.byte_offset()); - /// - /// println!("{:?}", stream.next()); // [0] - /// assert_eq!(3, stream.byte_offset()); - /// - /// println!("{:?}", stream.next()); // [1] - /// assert_eq!(7, stream.byte_offset()); - /// - /// println!("{:?}", stream.next()); // error - /// assert_eq!(8, stream.byte_offset()); - /// - /// // If err.is_eof(), can join the remaining data to new data and continue. - /// let remaining = &data[stream.byte_offset()..]; - /// ``` - /// - /// *Note:* In the future this method may be changed to return the number of - /// bytes so far deserialized into a successful T *or* syntactically valid - /// JSON skipped over due to a type error. See [serde-rs/json#70] for an - /// example illustrating this. - /// - /// [serde-rs/json#70]: https://github.com/serde-rs/json/issues/70 - pub fn byte_offset(&self) -> usize { - self.offset - } - - fn peek_end_of_value(&mut self) -> Result<()> { - match tri!(self.de.peek()) { - Some(b' ') | Some(b'\n') | Some(b'\t') | Some(b'\r') | Some(b'"') | Some(b'[') - | Some(b']') | Some(b'{') | Some(b'}') | Some(b',') | Some(b':') | None => Ok(()), - Some(_) => { - let position = self.de.read.peek_position(); - Err(Error::syntax( - ErrorCode::TrailingCharacters, - position.line, - position.column, - )) - } - } - } -} - -impl<'de, R, T> Iterator for StreamDeserializer<'de, R, T> -where - R: Read<'de>, - T: de::Deserialize<'de>, -{ - type Item = Result; - - fn next(&mut self) -> Option> { - if R::should_early_return_if_failed && self.failed { - return None; - } - - // skip whitespaces, if any - // this helps with trailing whitespaces, since whitespaces between - // values are handled for us. - match self.de.parse_whitespace() { - Ok(None) => { - self.offset = self.de.read.byte_offset(); - None - } - Ok(Some(b)) => { - // If the value does not have a clear way to show the end of the value - // (like numbers, null, true etc.) we have to look for whitespace or - // the beginning of a self-delineated value. - let self_delineated_value = match b { - b'[' | b'"' | b'{' => true, - _ => false, - }; - self.offset = self.de.read.byte_offset(); - let result = de::Deserialize::deserialize(&mut self.de); - - Some(match result { - Ok(value) => { - self.offset = self.de.read.byte_offset(); - if self_delineated_value { - Ok(value) - } else { - self.peek_end_of_value().map(|_| value) - } - } - Err(e) => { - self.de.read.set_failed(&mut self.failed); - Err(e) - } - }) - } - Err(e) => { - self.de.read.set_failed(&mut self.failed); - Some(Err(e)) - } - } - } -} - -impl<'de, R, T> FusedIterator for StreamDeserializer<'de, R, T> -where - R: Read<'de> + Fused, - T: de::Deserialize<'de>, -{ -} - -////////////////////////////////////////////////////////////////////////////// - -fn from_trait<'de, R, T>(read: R) -> Result -where - R: Read<'de>, - T: de::Deserialize<'de>, -{ - let mut de = Deserializer::new(read); - let value = tri!(de::Deserialize::deserialize(&mut de)); - - // Make sure the whole stream has been consumed. - tri!(de.end()); - Ok(value) -} - -/// Deserialize an instance of type `T` from an I/O stream of JSON. -/// -/// The content of the I/O stream is deserialized directly from the stream -/// without being buffered in memory by serde_json. -/// -/// When reading from a source against which short reads are not efficient, such -/// as a [`File`], you will want to apply your own buffering because serde_json -/// will not buffer the input. See [`std::io::BufReader`]. -/// -/// It is expected that the input stream ends after the deserialized object. -/// If the stream does not end, such as in the case of a persistent socket connection, -/// this function will not return. It is possible instead to deserialize from a prefix of an input -/// stream without looking for EOF by managing your own [`Deserializer`]. -/// -/// Note that counter to intuition, this function is usually slower than -/// reading a file completely into memory and then applying [`from_str`] -/// or [`from_slice`] on it. See [issue #160]. -/// -/// [`File`]: https://doc.rust-lang.org/std/fs/struct.File.html -/// [`std::io::BufReader`]: https://doc.rust-lang.org/std/io/struct.BufReader.html -/// [`from_str`]: ./fn.from_str.html -/// [`from_slice`]: ./fn.from_slice.html -/// [issue #160]: https://github.com/serde-rs/json/issues/160 -/// -/// # Example -/// -/// Reading the contents of a file. -/// -/// ``` -/// use serde::Deserialize; -/// -/// use std::error::Error; -/// use std::fs::File; -/// use std::io::BufReader; -/// use std::path::Path; -/// -/// #[derive(Deserialize, Debug)] -/// struct User { -/// fingerprint: String, -/// location: String, -/// } -/// -/// fn read_user_from_file>(path: P) -> Result> { -/// // Open the file in read-only mode with buffer. -/// let file = File::open(path)?; -/// let reader = BufReader::new(file); -/// -/// // Read the JSON contents of the file as an instance of `User`. -/// let u = serde_json::from_reader(reader)?; -/// -/// // Return the `User`. -/// Ok(u) -/// } -/// -/// fn main() { -/// # } -/// # fn fake_main() { -/// let u = read_user_from_file("test.json").unwrap(); -/// println!("{:#?}", u); -/// } -/// ``` -/// -/// Reading from a persistent socket connection. -/// -/// ``` -/// use serde::Deserialize; -/// -/// use std::error::Error; -/// use std::net::{TcpListener, TcpStream}; -/// -/// #[derive(Deserialize, Debug)] -/// struct User { -/// fingerprint: String, -/// location: String, -/// } -/// -/// fn read_user_from_stream(tcp_stream: TcpStream) -> Result> { -/// let mut de = serde_json::Deserializer::from_reader(tcp_stream); -/// let u = User::deserialize(&mut de)?; -/// -/// Ok(u) -/// } -/// -/// fn main() { -/// # } -/// # fn fake_main() { -/// let listener = TcpListener::bind("127.0.0.1:4000").unwrap(); -/// -/// for stream in listener.incoming() { -/// println!("{:#?}", read_user_from_stream(stream.unwrap())); -/// } -/// } -/// ``` -/// -/// # Errors -/// -/// This conversion can fail if the structure of the input does not match the -/// structure expected by `T`, for example if `T` is a struct type but the input -/// contains something other than a JSON map. It can also fail if the structure -/// is correct but `T`'s implementation of `Deserialize` decides that something -/// is wrong with the data, for example required struct fields are missing from -/// the JSON map or some number is too big to fit in the expected primitive -/// type. -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub fn from_reader(rdr: R) -> Result -where - R: crate::io::Read, - T: de::DeserializeOwned, -{ - from_trait(read::IoRead::new(rdr)) -} - -/// Deserialize an instance of type `T` from bytes of JSON text. -/// -/// # Example -/// -/// ``` -/// use serde::Deserialize; -/// -/// #[derive(Deserialize, Debug)] -/// struct User { -/// fingerprint: String, -/// location: String, -/// } -/// -/// fn main() { -/// // The type of `j` is `&[u8]` -/// let j = b" -/// { -/// \"fingerprint\": \"0xF9BA143B95FF6D82\", -/// \"location\": \"Menlo Park, CA\" -/// }"; -/// -/// let u: User = serde_json::from_slice(j).unwrap(); -/// println!("{:#?}", u); -/// } -/// ``` -/// -/// # Errors -/// -/// This conversion can fail if the structure of the input does not match the -/// structure expected by `T`, for example if `T` is a struct type but the input -/// contains something other than a JSON map. It can also fail if the structure -/// is correct but `T`'s implementation of `Deserialize` decides that something -/// is wrong with the data, for example required struct fields are missing from -/// the JSON map or some number is too big to fit in the expected primitive -/// type. -pub fn from_slice<'a, T>(v: &'a [u8]) -> Result -where - T: de::Deserialize<'a>, -{ - from_trait(read::SliceRead::new(v)) -} - -/// Deserialize an instance of type `T` from a string of JSON text. -/// -/// # Example -/// -/// ``` -/// use serde::Deserialize; -/// -/// #[derive(Deserialize, Debug)] -/// struct User { -/// fingerprint: String, -/// location: String, -/// } -/// -/// fn main() { -/// // The type of `j` is `&str` -/// let j = " -/// { -/// \"fingerprint\": \"0xF9BA143B95FF6D82\", -/// \"location\": \"Menlo Park, CA\" -/// }"; -/// -/// let u: User = serde_json::from_str(j).unwrap(); -/// println!("{:#?}", u); -/// } -/// ``` -/// -/// # Errors -/// -/// This conversion can fail if the structure of the input does not match the -/// structure expected by `T`, for example if `T` is a struct type but the input -/// contains something other than a JSON map. It can also fail if the structure -/// is correct but `T`'s implementation of `Deserialize` decides that something -/// is wrong with the data, for example required struct fields are missing from -/// the JSON map or some number is too big to fit in the expected primitive -/// type. -pub fn from_str<'a, T>(s: &'a str) -> Result -where - T: de::Deserialize<'a>, -{ - from_trait(read::StrRead::new(s)) -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/error.rs s390-tools-2.33.1/rust-vendor/serde_json/src/error.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/error.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,497 +0,0 @@ -//! When serializing or deserializing JSON goes wrong. - -use crate::io; -use alloc::boxed::Box; -use alloc::string::{String, ToString}; -use core::fmt::{self, Debug, Display}; -use core::result; -use core::str::FromStr; -use serde::{de, ser}; -#[cfg(feature = "std")] -use std::error; -#[cfg(feature = "std")] -use std::io::ErrorKind; - -/// This type represents all possible errors that can occur when serializing or -/// deserializing JSON data. -pub struct Error { - /// This `Box` allows us to keep the size of `Error` as small as possible. A - /// larger `Error` type was substantially slower due to all the functions - /// that pass around `Result`. - err: Box, -} - -/// Alias for a `Result` with the error type `serde_json::Error`. -pub type Result = result::Result; - -impl Error { - /// One-based line number at which the error was detected. - /// - /// Characters in the first line of the input (before the first newline - /// character) are in line 1. - pub fn line(&self) -> usize { - self.err.line - } - - /// One-based column number at which the error was detected. - /// - /// The first character in the input and any characters immediately - /// following a newline character are in column 1. - /// - /// Note that errors may occur in column 0, for example if a read from an - /// I/O stream fails immediately following a previously read newline - /// character. - pub fn column(&self) -> usize { - self.err.column - } - - /// Categorizes the cause of this error. - /// - /// - `Category::Io` - failure to read or write bytes on an I/O stream - /// - `Category::Syntax` - input that is not syntactically valid JSON - /// - `Category::Data` - input data that is semantically incorrect - /// - `Category::Eof` - unexpected end of the input data - pub fn classify(&self) -> Category { - match self.err.code { - ErrorCode::Message(_) => Category::Data, - ErrorCode::Io(_) => Category::Io, - ErrorCode::EofWhileParsingList - | ErrorCode::EofWhileParsingObject - | ErrorCode::EofWhileParsingString - | ErrorCode::EofWhileParsingValue => Category::Eof, - ErrorCode::ExpectedColon - | ErrorCode::ExpectedListCommaOrEnd - | ErrorCode::ExpectedObjectCommaOrEnd - | ErrorCode::ExpectedSomeIdent - | ErrorCode::ExpectedSomeValue - | ErrorCode::InvalidEscape - | ErrorCode::InvalidNumber - | ErrorCode::NumberOutOfRange - | ErrorCode::InvalidUnicodeCodePoint - | ErrorCode::ControlCharacterWhileParsingString - | ErrorCode::KeyMustBeAString - | ErrorCode::LoneLeadingSurrogateInHexEscape - | ErrorCode::TrailingComma - | ErrorCode::TrailingCharacters - | ErrorCode::UnexpectedEndOfHexEscape - | ErrorCode::RecursionLimitExceeded => Category::Syntax, - } - } - - /// Returns true if this error was caused by a failure to read or write - /// bytes on an I/O stream. - pub fn is_io(&self) -> bool { - self.classify() == Category::Io - } - - /// Returns true if this error was caused by input that was not - /// syntactically valid JSON. - pub fn is_syntax(&self) -> bool { - self.classify() == Category::Syntax - } - - /// Returns true if this error was caused by input data that was - /// semantically incorrect. - /// - /// For example, JSON containing a number is semantically incorrect when the - /// type being deserialized into holds a String. - pub fn is_data(&self) -> bool { - self.classify() == Category::Data - } - - /// Returns true if this error was caused by prematurely reaching the end of - /// the input data. - /// - /// Callers that process streaming input may be interested in retrying the - /// deserialization once more data is available. - pub fn is_eof(&self) -> bool { - self.classify() == Category::Eof - } - - /// The kind reported by the underlying standard library I/O error, if this - /// error was caused by a failure to read or write bytes on an I/O stream. - /// - /// # Example - /// - /// ``` - /// use serde_json::Value; - /// use std::io::{self, ErrorKind, Read}; - /// use std::process; - /// - /// struct ReaderThatWillTimeOut<'a>(&'a [u8]); - /// - /// impl<'a> Read for ReaderThatWillTimeOut<'a> { - /// fn read(&mut self, buf: &mut [u8]) -> io::Result { - /// if self.0.is_empty() { - /// Err(io::Error::new(ErrorKind::TimedOut, "timed out")) - /// } else { - /// self.0.read(buf) - /// } - /// } - /// } - /// - /// fn main() { - /// let reader = ReaderThatWillTimeOut(br#" {"k": "#); - /// - /// let _: Value = match serde_json::from_reader(reader) { - /// Ok(value) => value, - /// Err(error) => { - /// if error.io_error_kind() == Some(ErrorKind::TimedOut) { - /// // Maybe this application needs to retry certain kinds of errors. - /// - /// # return; - /// } else { - /// eprintln!("error: {}", error); - /// process::exit(1); - /// } - /// } - /// }; - /// } - /// ``` - #[cfg(feature = "std")] - pub fn io_error_kind(&self) -> Option { - if let ErrorCode::Io(io_error) = &self.err.code { - Some(io_error.kind()) - } else { - None - } - } -} - -/// Categorizes the cause of a `serde_json::Error`. -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum Category { - /// The error was caused by a failure to read or write bytes on an I/O - /// stream. - Io, - - /// The error was caused by input that was not syntactically valid JSON. - Syntax, - - /// The error was caused by input data that was semantically incorrect. - /// - /// For example, JSON containing a number is semantically incorrect when the - /// type being deserialized into holds a String. - Data, - - /// The error was caused by prematurely reaching the end of the input data. - /// - /// Callers that process streaming input may be interested in retrying the - /// deserialization once more data is available. - Eof, -} - -#[cfg(feature = "std")] -#[allow(clippy::fallible_impl_from)] -impl From for io::Error { - /// Convert a `serde_json::Error` into an `io::Error`. - /// - /// JSON syntax and data errors are turned into `InvalidData` I/O errors. - /// EOF errors are turned into `UnexpectedEof` I/O errors. - /// - /// ``` - /// use std::io; - /// - /// enum MyError { - /// Io(io::Error), - /// Json(serde_json::Error), - /// } - /// - /// impl From for MyError { - /// fn from(err: serde_json::Error) -> MyError { - /// use serde_json::error::Category; - /// match err.classify() { - /// Category::Io => { - /// MyError::Io(err.into()) - /// } - /// Category::Syntax | Category::Data | Category::Eof => { - /// MyError::Json(err) - /// } - /// } - /// } - /// } - /// ``` - fn from(j: Error) -> Self { - if let ErrorCode::Io(err) = j.err.code { - err - } else { - match j.classify() { - Category::Io => unreachable!(), - Category::Syntax | Category::Data => io::Error::new(ErrorKind::InvalidData, j), - Category::Eof => io::Error::new(ErrorKind::UnexpectedEof, j), - } - } - } -} - -struct ErrorImpl { - code: ErrorCode, - line: usize, - column: usize, -} - -pub(crate) enum ErrorCode { - /// Catchall for syntax error messages - Message(Box), - - /// Some I/O error occurred while serializing or deserializing. - Io(io::Error), - - /// EOF while parsing a list. - EofWhileParsingList, - - /// EOF while parsing an object. - EofWhileParsingObject, - - /// EOF while parsing a string. - EofWhileParsingString, - - /// EOF while parsing a JSON value. - EofWhileParsingValue, - - /// Expected this character to be a `':'`. - ExpectedColon, - - /// Expected this character to be either a `','` or a `']'`. - ExpectedListCommaOrEnd, - - /// Expected this character to be either a `','` or a `'}'`. - ExpectedObjectCommaOrEnd, - - /// Expected to parse either a `true`, `false`, or a `null`. - ExpectedSomeIdent, - - /// Expected this character to start a JSON value. - ExpectedSomeValue, - - /// Invalid hex escape code. - InvalidEscape, - - /// Invalid number. - InvalidNumber, - - /// Number is bigger than the maximum value of its type. - NumberOutOfRange, - - /// Invalid unicode code point. - InvalidUnicodeCodePoint, - - /// Control character found while parsing a string. - ControlCharacterWhileParsingString, - - /// Object key is not a string. - KeyMustBeAString, - - /// Lone leading surrogate in hex escape. - LoneLeadingSurrogateInHexEscape, - - /// JSON has a comma after the last value in an array or map. - TrailingComma, - - /// JSON has non-whitespace trailing characters after the value. - TrailingCharacters, - - /// Unexpected end of hex escape. - UnexpectedEndOfHexEscape, - - /// Encountered nesting of JSON maps and arrays more than 128 layers deep. - RecursionLimitExceeded, -} - -impl Error { - #[cold] - pub(crate) fn syntax(code: ErrorCode, line: usize, column: usize) -> Self { - Error { - err: Box::new(ErrorImpl { code, line, column }), - } - } - - // Not public API. Should be pub(crate). - // - // Update `eager_json` crate when this function changes. - #[doc(hidden)] - #[cold] - pub fn io(error: io::Error) -> Self { - Error { - err: Box::new(ErrorImpl { - code: ErrorCode::Io(error), - line: 0, - column: 0, - }), - } - } - - #[cold] - pub(crate) fn fix_position(self, f: F) -> Self - where - F: FnOnce(ErrorCode) -> Error, - { - if self.err.line == 0 { - f(self.err.code) - } else { - self - } - } -} - -impl Display for ErrorCode { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - ErrorCode::Message(msg) => f.write_str(msg), - ErrorCode::Io(err) => Display::fmt(err, f), - ErrorCode::EofWhileParsingList => f.write_str("EOF while parsing a list"), - ErrorCode::EofWhileParsingObject => f.write_str("EOF while parsing an object"), - ErrorCode::EofWhileParsingString => f.write_str("EOF while parsing a string"), - ErrorCode::EofWhileParsingValue => f.write_str("EOF while parsing a value"), - ErrorCode::ExpectedColon => f.write_str("expected `:`"), - ErrorCode::ExpectedListCommaOrEnd => f.write_str("expected `,` or `]`"), - ErrorCode::ExpectedObjectCommaOrEnd => f.write_str("expected `,` or `}`"), - ErrorCode::ExpectedSomeIdent => f.write_str("expected ident"), - ErrorCode::ExpectedSomeValue => f.write_str("expected value"), - ErrorCode::InvalidEscape => f.write_str("invalid escape"), - ErrorCode::InvalidNumber => f.write_str("invalid number"), - ErrorCode::NumberOutOfRange => f.write_str("number out of range"), - ErrorCode::InvalidUnicodeCodePoint => f.write_str("invalid unicode code point"), - ErrorCode::ControlCharacterWhileParsingString => { - f.write_str("control character (\\u0000-\\u001F) found while parsing a string") - } - ErrorCode::KeyMustBeAString => f.write_str("key must be a string"), - ErrorCode::LoneLeadingSurrogateInHexEscape => { - f.write_str("lone leading surrogate in hex escape") - } - ErrorCode::TrailingComma => f.write_str("trailing comma"), - ErrorCode::TrailingCharacters => f.write_str("trailing characters"), - ErrorCode::UnexpectedEndOfHexEscape => f.write_str("unexpected end of hex escape"), - ErrorCode::RecursionLimitExceeded => f.write_str("recursion limit exceeded"), - } - } -} - -impl serde::de::StdError for Error { - #[cfg(feature = "std")] - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match &self.err.code { - ErrorCode::Io(err) => err.source(), - _ => None, - } - } -} - -impl Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&*self.err, f) - } -} - -impl Display for ErrorImpl { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.line == 0 { - Display::fmt(&self.code, f) - } else { - write!( - f, - "{} at line {} column {}", - self.code, self.line, self.column - ) - } - } -} - -// Remove two layers of verbosity from the debug representation. Humans often -// end up seeing this representation because it is what unwrap() shows. -impl Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "Error({:?}, line: {}, column: {})", - self.err.code.to_string(), - self.err.line, - self.err.column - ) - } -} - -impl de::Error for Error { - #[cold] - fn custom(msg: T) -> Error { - make_error(msg.to_string()) - } - - #[cold] - fn invalid_type(unexp: de::Unexpected, exp: &dyn de::Expected) -> Self { - if let de::Unexpected::Unit = unexp { - Error::custom(format_args!("invalid type: null, expected {}", exp)) - } else { - Error::custom(format_args!("invalid type: {}, expected {}", unexp, exp)) - } - } -} - -impl ser::Error for Error { - #[cold] - fn custom(msg: T) -> Error { - make_error(msg.to_string()) - } -} - -// Parse our own error message that looks like "{} at line {} column {}" to work -// around erased-serde round-tripping the error through de::Error::custom. -fn make_error(mut msg: String) -> Error { - let (line, column) = parse_line_col(&mut msg).unwrap_or((0, 0)); - Error { - err: Box::new(ErrorImpl { - code: ErrorCode::Message(msg.into_boxed_str()), - line, - column, - }), - } -} - -fn parse_line_col(msg: &mut String) -> Option<(usize, usize)> { - let start_of_suffix = match msg.rfind(" at line ") { - Some(index) => index, - None => return None, - }; - - // Find start and end of line number. - let start_of_line = start_of_suffix + " at line ".len(); - let mut end_of_line = start_of_line; - while starts_with_digit(&msg[end_of_line..]) { - end_of_line += 1; - } - - if !msg[end_of_line..].starts_with(" column ") { - return None; - } - - // Find start and end of column number. - let start_of_column = end_of_line + " column ".len(); - let mut end_of_column = start_of_column; - while starts_with_digit(&msg[end_of_column..]) { - end_of_column += 1; - } - - if end_of_column < msg.len() { - return None; - } - - // Parse numbers. - let line = match usize::from_str(&msg[start_of_line..end_of_line]) { - Ok(line) => line, - Err(_) => return None, - }; - let column = match usize::from_str(&msg[start_of_column..end_of_column]) { - Ok(column) => column, - Err(_) => return None, - }; - - msg.truncate(start_of_suffix); - Some((line, column)) -} - -fn starts_with_digit(slice: &str) -> bool { - match slice.as_bytes().first() { - None => false, - Some(&byte) => byte >= b'0' && byte <= b'9', - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/features_check/error.rs s390-tools-2.33.1/rust-vendor/serde_json/src/features_check/error.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/features_check/error.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/features_check/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -"serde_json requires that either `std` (default) or `alloc` feature is enabled" diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/features_check/mod.rs s390-tools-2.33.1/rust-vendor/serde_json/src/features_check/mod.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/features_check/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/features_check/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,13 +0,0 @@ -//! Shows a user-friendly compiler error on incompatible selected features. - -#[allow(unused_macros)] -macro_rules! hide_from_rustfmt { - ($mod:item) => { - $mod - }; -} - -#[cfg(not(any(feature = "std", feature = "alloc")))] -hide_from_rustfmt! { - mod error; -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/io/core.rs s390-tools-2.33.1/rust-vendor/serde_json/src/io/core.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/io/core.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/io/core.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,79 +0,0 @@ -//! Reimplements core logic and types from `std::io` in an `alloc`-friendly -//! fashion. - -use alloc::vec::Vec; -use core::fmt::{self, Display}; -use core::result; - -pub enum ErrorKind { - Other, -} - -// I/O errors can never occur in no-std mode. All our no-std I/O implementations -// are infallible. -pub struct Error; - -impl Display for Error { - fn fmt(&self, _formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - unreachable!() - } -} - -impl Error { - pub(crate) fn new(_kind: ErrorKind, _error: &'static str) -> Error { - Error - } -} - -pub type Result = result::Result; - -pub trait Write { - fn write(&mut self, buf: &[u8]) -> Result; - - fn write_all(&mut self, buf: &[u8]) -> Result<()> { - // All our Write impls in no_std mode always write the whole buffer in - // one call infallibly. - let result = self.write(buf); - debug_assert!(result.is_ok()); - debug_assert_eq!(result.unwrap_or(0), buf.len()); - Ok(()) - } - - fn flush(&mut self) -> Result<()>; -} - -impl Write for &mut W { - #[inline] - fn write(&mut self, buf: &[u8]) -> Result { - (*self).write(buf) - } - - #[inline] - fn write_all(&mut self, buf: &[u8]) -> Result<()> { - (*self).write_all(buf) - } - - #[inline] - fn flush(&mut self) -> Result<()> { - (*self).flush() - } -} - -impl Write for Vec { - #[inline] - fn write(&mut self, buf: &[u8]) -> Result { - self.extend_from_slice(buf); - Ok(buf.len()) - } - - #[inline] - fn write_all(&mut self, buf: &[u8]) -> Result<()> { - self.extend_from_slice(buf); - Ok(()) - } - - #[inline] - fn flush(&mut self) -> Result<()> { - Ok(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/io/mod.rs s390-tools-2.33.1/rust-vendor/serde_json/src/io/mod.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/io/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/io/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ -//! A tiny, `no_std`-friendly facade around `std::io`. -//! Reexports types from `std` when available; otherwise reimplements and -//! provides some of the core logic. -//! -//! The main reason that `std::io` hasn't found itself reexported as part of -//! the `core` crate is the `std::io::{Read, Write}` traits' reliance on -//! `std::io::Error`, which may contain internally a heap-allocated `Box` -//! and/or now relying on OS-specific `std::backtrace::Backtrace`. - -pub use self::imp::{Error, ErrorKind, Result, Write}; - -#[cfg(not(feature = "std"))] -#[path = "core.rs"] -mod imp; - -#[cfg(feature = "std")] -use std::io as imp; - -#[cfg(feature = "std")] -pub use std::io::{Bytes, Read}; diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/iter.rs s390-tools-2.33.1/rust-vendor/serde_json/src/iter.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/iter.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/iter.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,70 +0,0 @@ -use crate::io; - -pub struct LineColIterator { - iter: I, - - /// Index of the current line. Characters in the first line of the input - /// (before the first newline character) are in line 1. - line: usize, - - /// Index of the current column. The first character in the input and any - /// characters immediately following a newline character are in column 1. - /// The column is 0 immediately after a newline character has been read. - col: usize, - - /// Byte offset of the start of the current line. This is the sum of lengths - /// of all previous lines. Keeping track of things this way allows efficient - /// computation of the current line, column, and byte offset while only - /// updating one of the counters in `next()` in the common case. - start_of_line: usize, -} - -impl LineColIterator -where - I: Iterator>, -{ - pub fn new(iter: I) -> LineColIterator { - LineColIterator { - iter, - line: 1, - col: 0, - start_of_line: 0, - } - } - - pub fn line(&self) -> usize { - self.line - } - - pub fn col(&self) -> usize { - self.col - } - - pub fn byte_offset(&self) -> usize { - self.start_of_line + self.col - } -} - -impl Iterator for LineColIterator -where - I: Iterator>, -{ - type Item = io::Result; - - fn next(&mut self) -> Option> { - match self.iter.next() { - None => None, - Some(Ok(b'\n')) => { - self.start_of_line += self.col + 1; - self.line += 1; - self.col = 0; - Some(Ok(b'\n')) - } - Some(Ok(c)) => { - self.col += 1; - Some(Ok(c)) - } - Some(Err(e)) => Some(Err(e)), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/algorithm.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/algorithm.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/algorithm.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/algorithm.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,193 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Algorithms to efficiently convert strings to floats. - -use super::bhcomp::*; -use super::cached::*; -use super::errors::*; -use super::float::ExtendedFloat; -use super::num::*; -use super::small_powers::*; - -// FAST -// ---- - -/// Convert mantissa to exact value for a non-base2 power. -/// -/// Returns the resulting float and if the value can be represented exactly. -pub(crate) fn fast_path(mantissa: u64, exponent: i32) -> Option -where - F: Float, -{ - // `mantissa >> (F::MANTISSA_SIZE+1) != 0` effectively checks if the - // value has a no bits above the hidden bit, which is what we want. - let (min_exp, max_exp) = F::exponent_limit(); - let shift_exp = F::mantissa_limit(); - let mantissa_size = F::MANTISSA_SIZE + 1; - if mantissa == 0 { - Some(F::ZERO) - } else if mantissa >> mantissa_size != 0 { - // Would require truncation of the mantissa. - None - } else if exponent == 0 { - // 0 exponent, same as value, exact representation. - let float = F::as_cast(mantissa); - Some(float) - } else if exponent >= min_exp && exponent <= max_exp { - // Value can be exactly represented, return the value. - // Do not use powi, since powi can incrementally introduce - // error. - let float = F::as_cast(mantissa); - Some(float.pow10(exponent)) - } else if exponent >= 0 && exponent <= max_exp + shift_exp { - // Check to see if we have a disguised fast-path, where the - // number of digits in the mantissa is very small, but and - // so digits can be shifted from the exponent to the mantissa. - // https://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/ - let small_powers = POW10_64; - let shift = exponent - max_exp; - let power = small_powers[shift as usize]; - - // Compute the product of the power, if it overflows, - // prematurely return early, otherwise, if we didn't overshoot, - // we can get an exact value. - let value = mantissa.checked_mul(power)?; - if value >> mantissa_size != 0 { - None - } else { - // Use powi, since it's correct, and faster on - // the fast-path. - let float = F::as_cast(value); - Some(float.pow10(max_exp)) - } - } else { - // Cannot be exactly represented, exponent too small or too big, - // would require truncation. - None - } -} - -// MODERATE -// -------- - -/// Multiply the floating-point by the exponent. -/// -/// Multiply by pre-calculated powers of the base, modify the extended- -/// float, and return if new value and if the value can be represented -/// accurately. -fn multiply_exponent_extended(fp: &mut ExtendedFloat, exponent: i32, truncated: bool) -> bool -where - F: Float, -{ - let powers = ExtendedFloat::get_powers(); - let exponent = exponent.saturating_add(powers.bias); - let small_index = exponent % powers.step; - let large_index = exponent / powers.step; - if exponent < 0 { - // Guaranteed underflow (assign 0). - fp.mant = 0; - true - } else if large_index as usize >= powers.large.len() { - // Overflow (assign infinity) - fp.mant = 1 << 63; - fp.exp = 0x7FF; - true - } else { - // Within the valid exponent range, multiply by the large and small - // exponents and return the resulting value. - - // Track errors to as a factor of unit in last-precision. - let mut errors: u32 = 0; - if truncated { - errors += u64::error_halfscale(); - } - - // Multiply by the small power. - // Check if we can directly multiply by an integer, if not, - // use extended-precision multiplication. - match fp - .mant - .overflowing_mul(powers.get_small_int(small_index as usize)) - { - // Overflow, multiplication unsuccessful, go slow path. - (_, true) => { - fp.normalize(); - fp.imul(&powers.get_small(small_index as usize)); - errors += u64::error_halfscale(); - } - // No overflow, multiplication successful. - (mant, false) => { - fp.mant = mant; - fp.normalize(); - } - } - - // Multiply by the large power - fp.imul(&powers.get_large(large_index as usize)); - if errors > 0 { - errors += 1; - } - errors += u64::error_halfscale(); - - // Normalize the floating point (and the errors). - let shift = fp.normalize(); - errors <<= shift; - - u64::error_is_accurate::(errors, fp) - } -} - -/// Create a precise native float using an intermediate extended-precision float. -/// -/// Return the float approximation and if the value can be accurately -/// represented with mantissa bits of precision. -#[inline] -pub(crate) fn moderate_path( - mantissa: u64, - exponent: i32, - truncated: bool, -) -> (ExtendedFloat, bool) -where - F: Float, -{ - let mut fp = ExtendedFloat { - mant: mantissa, - exp: 0, - }; - let valid = multiply_exponent_extended::(&mut fp, exponent, truncated); - (fp, valid) -} - -// FALLBACK -// -------- - -/// Fallback path when the fast path does not work. -/// -/// Uses the moderate path, if applicable, otherwise, uses the slow path -/// as required. -pub(crate) fn fallback_path( - integer: &[u8], - fraction: &[u8], - mantissa: u64, - exponent: i32, - mantissa_exponent: i32, - truncated: bool, -) -> F -where - F: Float, -{ - // Moderate path (use an extended 80-bit representation). - let (fp, valid) = moderate_path::(mantissa, mantissa_exponent, truncated); - if valid { - return fp.into_float::(); - } - - // Slow path, fast path didn't work. - let b = fp.into_downward_float::(); - if b.is_special() { - // We have a non-finite number, we get to leave early. - b - } else { - bhcomp(b, integer, fraction, exponent) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/bhcomp.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/bhcomp.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/bhcomp.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/bhcomp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,218 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Compare the mantissa to the halfway representation of the float. -//! -//! Compares the actual significant digits of the mantissa to the -//! theoretical digits from `b+h`, scaled into the proper range. - -use super::bignum::*; -use super::digit::*; -use super::exponent::*; -use super::float::*; -use super::math::*; -use super::num::*; -use super::rounding::*; -use core::{cmp, mem}; - -// MANTISSA - -/// Parse the full mantissa into a big integer. -/// -/// Max digits is the maximum number of digits plus one. -fn parse_mantissa(integer: &[u8], fraction: &[u8]) -> Bigint -where - F: Float, -{ - // Main loop - let small_powers = POW10_LIMB; - let step = small_powers.len() - 2; - let max_digits = F::MAX_DIGITS - 1; - let mut counter = 0; - let mut value: Limb = 0; - let mut i: usize = 0; - let mut result = Bigint::default(); - - // Iteratively process all the data in the mantissa. - for &digit in integer.iter().chain(fraction) { - // We've parsed the max digits using small values, add to bignum - if counter == step { - result.imul_small(small_powers[counter]); - result.iadd_small(value); - counter = 0; - value = 0; - } - - value *= 10; - value += as_limb(to_digit(digit).unwrap()); - - i += 1; - counter += 1; - if i == max_digits { - break; - } - } - - // We will always have a remainder, as long as we entered the loop - // once, or counter % step is 0. - if counter != 0 { - result.imul_small(small_powers[counter]); - result.iadd_small(value); - } - - // If we have any remaining digits after the last value, we need - // to add a 1 after the rest of the array, it doesn't matter where, - // just move it up. This is good for the worst-possible float - // representation. We also need to return an index. - // Since we already trimmed trailing zeros, we know there has - // to be a non-zero digit if there are any left. - if i < integer.len() + fraction.len() { - result.imul_small(10); - result.iadd_small(1); - } - - result -} - -// FLOAT OPS - -/// Calculate `b` from a a representation of `b` as a float. -#[inline] -pub(super) fn b_extended(f: F) -> ExtendedFloat { - ExtendedFloat::from_float(f) -} - -/// Calculate `b+h` from a a representation of `b` as a float. -#[inline] -pub(super) fn bh_extended(f: F) -> ExtendedFloat { - // None of these can overflow. - let b = b_extended(f); - ExtendedFloat { - mant: (b.mant << 1) + 1, - exp: b.exp - 1, - } -} - -// ROUNDING - -/// Custom round-nearest, tie-event algorithm for bhcomp. -#[inline] -fn round_nearest_tie_even(fp: &mut ExtendedFloat, shift: i32, is_truncated: bool) { - let (mut is_above, mut is_halfway) = round_nearest(fp, shift); - if is_halfway && is_truncated { - is_above = true; - is_halfway = false; - } - tie_even(fp, is_above, is_halfway); -} - -// BHCOMP - -/// Calculate the mantissa for a big integer with a positive exponent. -fn large_atof(mantissa: Bigint, exponent: i32) -> F -where - F: Float, -{ - let bits = mem::size_of::() * 8; - - // Simple, we just need to multiply by the power of the radix. - // Now, we can calculate the mantissa and the exponent from this. - // The binary exponent is the binary exponent for the mantissa - // shifted to the hidden bit. - let mut bigmant = mantissa; - bigmant.imul_pow10(exponent as u32); - - // Get the exact representation of the float from the big integer. - let (mant, is_truncated) = bigmant.hi64(); - let exp = bigmant.bit_length() as i32 - bits as i32; - let mut fp = ExtendedFloat { mant, exp }; - fp.round_to_native::(|fp, shift| round_nearest_tie_even(fp, shift, is_truncated)); - into_float(fp) -} - -/// Calculate the mantissa for a big integer with a negative exponent. -/// -/// This invokes the comparison with `b+h`. -fn small_atof(mantissa: Bigint, exponent: i32, f: F) -> F -where - F: Float, -{ - // Get the significant digits and radix exponent for the real digits. - let mut real_digits = mantissa; - let real_exp = exponent; - debug_assert!(real_exp < 0); - - // Get the significant digits and the binary exponent for `b+h`. - let theor = bh_extended(f); - let mut theor_digits = Bigint::from_u64(theor.mant); - let theor_exp = theor.exp; - - // We need to scale the real digits and `b+h` digits to be the same - // order. We currently have `real_exp`, in `radix`, that needs to be - // shifted to `theor_digits` (since it is negative), and `theor_exp` - // to either `theor_digits` or `real_digits` as a power of 2 (since it - // may be positive or negative). Try to remove as many powers of 2 - // as possible. All values are relative to `theor_digits`, that is, - // reflect the power you need to multiply `theor_digits` by. - - // Can remove a power-of-two, since the radix is 10. - // Both are on opposite-sides of equation, can factor out a - // power of two. - // - // Example: 10^-10, 2^-10 -> ( 0, 10, 0) - // Example: 10^-10, 2^-15 -> (-5, 10, 0) - // Example: 10^-10, 2^-5 -> ( 5, 10, 0) - // Example: 10^-10, 2^5 -> (15, 10, 0) - let binary_exp = theor_exp - real_exp; - let halfradix_exp = -real_exp; - let radix_exp = 0; - - // Carry out our multiplication. - if halfradix_exp != 0 { - theor_digits.imul_pow5(halfradix_exp as u32); - } - if radix_exp != 0 { - theor_digits.imul_pow10(radix_exp as u32); - } - if binary_exp > 0 { - theor_digits.imul_pow2(binary_exp as u32); - } else if binary_exp < 0 { - real_digits.imul_pow2(-binary_exp as u32); - } - - // Compare real digits to theoretical digits and round the float. - match real_digits.compare(&theor_digits) { - cmp::Ordering::Greater => f.next_positive(), - cmp::Ordering::Less => f, - cmp::Ordering::Equal => f.round_positive_even(), - } -} - -/// Calculate the exact value of the float. -/// -/// Note: fraction must not have trailing zeros. -pub(crate) fn bhcomp(b: F, integer: &[u8], mut fraction: &[u8], exponent: i32) -> F -where - F: Float, -{ - // Calculate the number of integer digits and use that to determine - // where the significant digits start in the fraction. - let integer_digits = integer.len(); - let fraction_digits = fraction.len(); - let digits_start = if integer_digits == 0 { - let start = fraction.iter().take_while(|&x| *x == b'0').count(); - fraction = &fraction[start..]; - start - } else { - 0 - }; - let sci_exp = scientific_exponent(exponent, integer_digits, digits_start); - let count = F::MAX_DIGITS.min(integer_digits + fraction_digits - digits_start); - let scaled_exponent = sci_exp + 1 - count as i32; - - let mantissa = parse_mantissa::(integer, fraction); - if scaled_exponent >= 0 { - large_atof(mantissa, scaled_exponent) - } else { - small_atof(mantissa, scaled_exponent, b) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/bignum.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/bignum.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/bignum.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/bignum.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,33 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Big integer type definition. - -use super::math::*; -use alloc::vec::Vec; - -/// Storage for a big integer type. -#[derive(Clone, PartialEq, Eq)] -pub(crate) struct Bigint { - /// Internal storage for the Bigint, in little-endian order. - pub(crate) data: Vec, -} - -impl Default for Bigint { - fn default() -> Self { - Bigint { - data: Vec::with_capacity(20), - } - } -} - -impl Math for Bigint { - #[inline] - fn data(&self) -> &Vec { - &self.data - } - - #[inline] - fn data_mut(&mut self) -> &mut Vec { - &mut self.data - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/cached_float80.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/cached_float80.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/cached_float80.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/cached_float80.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,206 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Cached exponents for basen values with 80-bit extended floats. -//! -//! Exact versions of base**n as an extended-precision float, with both -//! large and small powers. Use the large powers to minimize the amount -//! of compounded error. -//! -//! These values were calculated using Python, using the arbitrary-precision -//! integer to calculate exact extended-representation of each value. -//! These values are all normalized. - -use super::cached::{ExtendedFloatArray, ModeratePathPowers}; - -// LOW-LEVEL -// --------- - -// BASE10 - -const BASE10_SMALL_MANTISSA: [u64; 10] = [ - 9223372036854775808, // 10^0 - 11529215046068469760, // 10^1 - 14411518807585587200, // 10^2 - 18014398509481984000, // 10^3 - 11258999068426240000, // 10^4 - 14073748835532800000, // 10^5 - 17592186044416000000, // 10^6 - 10995116277760000000, // 10^7 - 13743895347200000000, // 10^8 - 17179869184000000000, // 10^9 -]; -const BASE10_SMALL_EXPONENT: [i32; 10] = [ - -63, // 10^0 - -60, // 10^1 - -57, // 10^2 - -54, // 10^3 - -50, // 10^4 - -47, // 10^5 - -44, // 10^6 - -40, // 10^7 - -37, // 10^8 - -34, // 10^9 -]; -const BASE10_LARGE_MANTISSA: [u64; 66] = [ - 11555125961253852697, // 10^-350 - 13451937075301367670, // 10^-340 - 15660115838168849784, // 10^-330 - 18230774251475056848, // 10^-320 - 10611707258198326947, // 10^-310 - 12353653155963782858, // 10^-300 - 14381545078898527261, // 10^-290 - 16742321987285426889, // 10^-280 - 9745314011399999080, // 10^-270 - 11345038669416679861, // 10^-260 - 13207363278391631158, // 10^-250 - 15375394465392026070, // 10^-240 - 17899314949046850752, // 10^-230 - 10418772551374772303, // 10^-220 - 12129047596099288555, // 10^-210 - 14120069793541087484, // 10^-200 - 16437924692338667210, // 10^-190 - 9568131466127621947, // 10^-180 - 11138771039116687545, // 10^-170 - 12967236152753102995, // 10^-160 - 15095849699286165408, // 10^-150 - 17573882009934360870, // 10^-140 - 10229345649675443343, // 10^-130 - 11908525658859223294, // 10^-120 - 13863348470604074297, // 10^-110 - 16139061738043178685, // 10^-100 - 9394170331095332911, // 10^-90 - 10936253623915059621, // 10^-80 - 12731474852090538039, // 10^-70 - 14821387422376473014, // 10^-60 - 17254365866976409468, // 10^-50 - 10043362776618689222, // 10^-40 - 11692013098647223345, // 10^-30 - 13611294676837538538, // 10^-20 - 15845632502852867518, // 10^-10 - 9223372036854775808, // 10^0 - 10737418240000000000, // 10^10 - 12500000000000000000, // 10^20 - 14551915228366851806, // 10^30 - 16940658945086006781, // 10^40 - 9860761315262647567, // 10^50 - 11479437019748901445, // 10^60 - 13363823550460978230, // 10^70 - 15557538194652854267, // 10^80 - 18111358157653424735, // 10^90 - 10542197943230523224, // 10^100 - 12272733663244316382, // 10^110 - 14287342391028437277, // 10^120 - 16632655625031838749, // 10^130 - 9681479787123295682, // 10^140 - 11270725851789228247, // 10^150 - 13120851772591970218, // 10^160 - 15274681817498023410, // 10^170 - 17782069995880619867, // 10^180 - 10350527006597618960, // 10^190 - 12049599325514420588, // 10^200 - 14027579833653779454, // 10^210 - 16330252207878254650, // 10^220 - 9505457831475799117, // 10^230 - 11065809325636130661, // 10^240 - 12882297539194266616, // 10^250 - 14996968138956309548, // 10^260 - 17458768723248864463, // 10^270 - 10162340898095201970, // 10^280 - 11830521861667747109, // 10^290 - 13772540099066387756, // 10^300 -]; -const BASE10_LARGE_EXPONENT: [i32; 66] = [ - -1226, // 10^-350 - -1193, // 10^-340 - -1160, // 10^-330 - -1127, // 10^-320 - -1093, // 10^-310 - -1060, // 10^-300 - -1027, // 10^-290 - -994, // 10^-280 - -960, // 10^-270 - -927, // 10^-260 - -894, // 10^-250 - -861, // 10^-240 - -828, // 10^-230 - -794, // 10^-220 - -761, // 10^-210 - -728, // 10^-200 - -695, // 10^-190 - -661, // 10^-180 - -628, // 10^-170 - -595, // 10^-160 - -562, // 10^-150 - -529, // 10^-140 - -495, // 10^-130 - -462, // 10^-120 - -429, // 10^-110 - -396, // 10^-100 - -362, // 10^-90 - -329, // 10^-80 - -296, // 10^-70 - -263, // 10^-60 - -230, // 10^-50 - -196, // 10^-40 - -163, // 10^-30 - -130, // 10^-20 - -97, // 10^-10 - -63, // 10^0 - -30, // 10^10 - 3, // 10^20 - 36, // 10^30 - 69, // 10^40 - 103, // 10^50 - 136, // 10^60 - 169, // 10^70 - 202, // 10^80 - 235, // 10^90 - 269, // 10^100 - 302, // 10^110 - 335, // 10^120 - 368, // 10^130 - 402, // 10^140 - 435, // 10^150 - 468, // 10^160 - 501, // 10^170 - 534, // 10^180 - 568, // 10^190 - 601, // 10^200 - 634, // 10^210 - 667, // 10^220 - 701, // 10^230 - 734, // 10^240 - 767, // 10^250 - 800, // 10^260 - 833, // 10^270 - 867, // 10^280 - 900, // 10^290 - 933, // 10^300 -]; -const BASE10_SMALL_INT_POWERS: [u64; 10] = [ - 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, -]; -const BASE10_STEP: i32 = 10; -const BASE10_BIAS: i32 = 350; - -// HIGH LEVEL -// ---------- - -const BASE10_POWERS: ModeratePathPowers = ModeratePathPowers { - small: ExtendedFloatArray { - mant: &BASE10_SMALL_MANTISSA, - exp: &BASE10_SMALL_EXPONENT, - }, - large: ExtendedFloatArray { - mant: &BASE10_LARGE_MANTISSA, - exp: &BASE10_LARGE_EXPONENT, - }, - small_int: &BASE10_SMALL_INT_POWERS, - step: BASE10_STEP, - bias: BASE10_BIAS, -}; - -/// Get powers from base. -pub(crate) fn get_powers() -> &'static ModeratePathPowers { - &BASE10_POWERS -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/cached.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/cached.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/cached.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/cached.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,82 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Cached powers trait for extended-precision floats. - -use super::cached_float80; -use super::float::ExtendedFloat; - -// POWERS - -/// Precalculated powers that uses two-separate arrays for memory-efficiency. -#[doc(hidden)] -pub(crate) struct ExtendedFloatArray { - // Pre-calculated mantissa for the powers. - pub mant: &'static [u64], - // Pre-calculated binary exponents for the powers. - pub exp: &'static [i32], -} - -/// Allow indexing of values without bounds checking -impl ExtendedFloatArray { - #[inline] - pub fn get_extended_float(&self, index: usize) -> ExtendedFloat { - let mant = self.mant[index]; - let exp = self.exp[index]; - ExtendedFloat { mant, exp } - } - - #[inline] - pub fn len(&self) -> usize { - self.mant.len() - } -} - -// MODERATE PATH POWERS - -/// Precalculated powers of base N for the moderate path. -#[doc(hidden)] -pub(crate) struct ModeratePathPowers { - // Pre-calculated small powers. - pub small: ExtendedFloatArray, - // Pre-calculated large powers. - pub large: ExtendedFloatArray, - /// Pre-calculated small powers as 64-bit integers - pub small_int: &'static [u64], - // Step between large powers and number of small powers. - pub step: i32, - // Exponent bias for the large powers. - pub bias: i32, -} - -/// Allow indexing of values without bounds checking -impl ModeratePathPowers { - #[inline] - pub fn get_small(&self, index: usize) -> ExtendedFloat { - self.small.get_extended_float(index) - } - - #[inline] - pub fn get_large(&self, index: usize) -> ExtendedFloat { - self.large.get_extended_float(index) - } - - #[inline] - pub fn get_small_int(&self, index: usize) -> u64 { - self.small_int[index] - } -} - -// CACHED EXTENDED POWERS - -/// Cached powers as a trait for a floating-point type. -pub(crate) trait ModeratePathCache { - /// Get cached powers. - fn get_powers() -> &'static ModeratePathPowers; -} - -impl ModeratePathCache for ExtendedFloat { - #[inline] - fn get_powers() -> &'static ModeratePathPowers { - cached_float80::get_powers() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/digit.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/digit.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/digit.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/digit.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Helpers to convert and add digits from characters. - -// Convert u8 to digit. -#[inline] -pub(crate) fn to_digit(c: u8) -> Option { - (c as char).to_digit(10) -} - -// Add digit to mantissa. -#[inline] -pub(crate) fn add_digit(value: u64, digit: u32) -> Option { - value.checked_mul(10)?.checked_add(digit as u64) -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/errors.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/errors.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/errors.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/errors.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,133 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Estimate the error in an 80-bit approximation of a float. -//! -//! This estimates the error in a floating-point representation. -//! -//! This implementation is loosely based off the Golang implementation, -//! found here: -//! https://golang.org/src/strconv/atof.go - -use super::float::*; -use super::num::*; -use super::rounding::*; - -pub(crate) trait FloatErrors { - /// Get the full error scale. - fn error_scale() -> u32; - /// Get the half error scale. - fn error_halfscale() -> u32; - /// Determine if the number of errors is tolerable for float precision. - fn error_is_accurate(count: u32, fp: &ExtendedFloat) -> bool; -} - -/// Check if the error is accurate with a round-nearest rounding scheme. -#[inline] -fn nearest_error_is_accurate(errors: u64, fp: &ExtendedFloat, extrabits: u64) -> bool { - // Round-to-nearest, need to use the halfway point. - if extrabits == 65 { - // Underflow, we have a shift larger than the mantissa. - // Representation is valid **only** if the value is close enough - // overflow to the next bit within errors. If it overflows, - // the representation is **not** valid. - !fp.mant.overflowing_add(errors).1 - } else { - let mask: u64 = lower_n_mask(extrabits); - let extra: u64 = fp.mant & mask; - - // Round-to-nearest, need to check if we're close to halfway. - // IE, b10100 | 100000, where `|` signifies the truncation point. - let halfway: u64 = lower_n_halfway(extrabits); - let cmp1 = halfway.wrapping_sub(errors) < extra; - let cmp2 = extra < halfway.wrapping_add(errors); - - // If both comparisons are true, we have significant rounding error, - // and the value cannot be exactly represented. Otherwise, the - // representation is valid. - !(cmp1 && cmp2) - } -} - -impl FloatErrors for u64 { - #[inline] - fn error_scale() -> u32 { - 8 - } - - #[inline] - fn error_halfscale() -> u32 { - u64::error_scale() / 2 - } - - #[inline] - fn error_is_accurate(count: u32, fp: &ExtendedFloat) -> bool { - // Determine if extended-precision float is a good approximation. - // If the error has affected too many units, the float will be - // inaccurate, or if the representation is too close to halfway - // that any operations could affect this halfway representation. - // See the documentation for dtoa for more information. - let bias = -(F::EXPONENT_BIAS - F::MANTISSA_SIZE); - let denormal_exp = bias - 63; - // This is always a valid u32, since (denormal_exp - fp.exp) - // will always be positive and the significand size is {23, 52}. - let extrabits = if fp.exp <= denormal_exp { - 64 - F::MANTISSA_SIZE + denormal_exp - fp.exp - } else { - 63 - F::MANTISSA_SIZE - }; - - // Our logic is as follows: we want to determine if the actual - // mantissa and the errors during calculation differ significantly - // from the rounding point. The rounding point for round-nearest - // is the halfway point, IE, this when the truncated bits start - // with b1000..., while the rounding point for the round-toward - // is when the truncated bits are equal to 0. - // To do so, we can check whether the rounding point +/- the error - // are >/< the actual lower n bits. - // - // For whether we need to use signed or unsigned types for this - // analysis, see this example, using u8 rather than u64 to simplify - // things. - // - // # Comparisons - // cmp1 = (halfway - errors) < extra - // cmp1 = extra < (halfway + errors) - // - // # Large Extrabits, Low Errors - // - // extrabits = 8 - // halfway = 0b10000000 - // extra = 0b10000010 - // errors = 0b00000100 - // halfway - errors = 0b01111100 - // halfway + errors = 0b10000100 - // - // Unsigned: - // halfway - errors = 124 - // halfway + errors = 132 - // extra = 130 - // cmp1 = true - // cmp2 = true - // Signed: - // halfway - errors = 124 - // halfway + errors = -124 - // extra = -126 - // cmp1 = false - // cmp2 = true - // - // # Conclusion - // - // Since errors will always be small, and since we want to detect - // if the representation is accurate, we need to use an **unsigned** - // type for comparisons. - - let extrabits = extrabits as u64; - let errors = count as u64; - if extrabits > 65 { - // Underflow, we have a literal 0. - return true; - } - - nearest_error_is_accurate(errors, fp, extrabits) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/exponent.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/exponent.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/exponent.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/exponent.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,50 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Utilities to calculate exponents. - -/// Convert usize into i32 without overflow. -/// -/// This is needed to ensure when adjusting the exponent relative to -/// the mantissa we do not overflow for comically-long exponents. -#[inline] -fn into_i32(value: usize) -> i32 { - if value > i32::max_value() as usize { - i32::max_value() - } else { - value as i32 - } -} - -// EXPONENT CALCULATION - -// Calculate the scientific notation exponent without overflow. -// -// For example, 0.1 would be -1, and 10 would be 1 in base 10. -#[inline] -pub(crate) fn scientific_exponent( - exponent: i32, - integer_digits: usize, - fraction_start: usize, -) -> i32 { - if integer_digits == 0 { - let fraction_start = into_i32(fraction_start); - exponent.saturating_sub(fraction_start).saturating_sub(1) - } else { - let integer_shift = into_i32(integer_digits - 1); - exponent.saturating_add(integer_shift) - } -} - -// Calculate the mantissa exponent without overflow. -// -// Remove the number of digits that contributed to the mantissa past -// the dot, and add the number of truncated digits from the mantissa, -// to calculate the scaling factor for the mantissa from a raw exponent. -#[inline] -pub(crate) fn mantissa_exponent(exponent: i32, fraction_digits: usize, truncated: usize) -> i32 { - if fraction_digits > truncated { - exponent.saturating_sub(into_i32(fraction_digits - truncated)) - } else { - exponent.saturating_add(into_i32(truncated - fraction_digits)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/float.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/float.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/float.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/float.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,183 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -// FLOAT TYPE - -use super::num::*; -use super::rounding::*; -use super::shift::*; - -/// Extended precision floating-point type. -/// -/// Private implementation, exposed only for testing purposes. -#[doc(hidden)] -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub(crate) struct ExtendedFloat { - /// Mantissa for the extended-precision float. - pub mant: u64, - /// Binary exponent for the extended-precision float. - pub exp: i32, -} - -impl ExtendedFloat { - // PROPERTIES - - // OPERATIONS - - /// Multiply two normalized extended-precision floats, as if by `a*b`. - /// - /// The precision is maximal when the numbers are normalized, however, - /// decent precision will occur as long as both values have high bits - /// set. The result is not normalized. - /// - /// Algorithm: - /// 1. Non-signed multiplication of mantissas (requires 2x as many bits as input). - /// 2. Normalization of the result (not done here). - /// 3. Addition of exponents. - pub(crate) fn mul(&self, b: &ExtendedFloat) -> ExtendedFloat { - // Logic check, values must be decently normalized prior to multiplication. - debug_assert!((self.mant & u64::HIMASK != 0) && (b.mant & u64::HIMASK != 0)); - - // Extract high-and-low masks. - let ah = self.mant >> u64::HALF; - let al = self.mant & u64::LOMASK; - let bh = b.mant >> u64::HALF; - let bl = b.mant & u64::LOMASK; - - // Get our products - let ah_bl = ah * bl; - let al_bh = al * bh; - let al_bl = al * bl; - let ah_bh = ah * bh; - - let mut tmp = (ah_bl & u64::LOMASK) + (al_bh & u64::LOMASK) + (al_bl >> u64::HALF); - // round up - tmp += 1 << (u64::HALF - 1); - - ExtendedFloat { - mant: ah_bh + (ah_bl >> u64::HALF) + (al_bh >> u64::HALF) + (tmp >> u64::HALF), - exp: self.exp + b.exp + u64::FULL, - } - } - - /// Multiply in-place, as if by `a*b`. - /// - /// The result is not normalized. - #[inline] - pub(crate) fn imul(&mut self, b: &ExtendedFloat) { - *self = self.mul(b); - } - - // NORMALIZE - - /// Normalize float-point number. - /// - /// Shift the mantissa so the number of leading zeros is 0, or the value - /// itself is 0. - /// - /// Get the number of bytes shifted. - #[inline] - pub(crate) fn normalize(&mut self) -> u32 { - // Note: - // Using the cltz intrinsic via leading_zeros is way faster (~10x) - // than shifting 1-bit at a time, via while loop, and also way - // faster (~2x) than an unrolled loop that checks at 32, 16, 4, - // 2, and 1 bit. - // - // Using a modulus of pow2 (which will get optimized to a bitwise - // and with 0x3F or faster) is slightly slower than an if/then, - // however, removing the if/then will likely optimize more branched - // code as it removes conditional logic. - - // Calculate the number of leading zeros, and then zero-out - // any overflowing bits, to avoid shl overflow when self.mant == 0. - let shift = if self.mant == 0 { - 0 - } else { - self.mant.leading_zeros() - }; - shl(self, shift as i32); - shift - } - - // ROUND - - /// Lossy round float-point number to native mantissa boundaries. - #[inline] - pub(crate) fn round_to_native(&mut self, algorithm: Algorithm) - where - F: Float, - Algorithm: FnOnce(&mut ExtendedFloat, i32), - { - round_to_native::(self, algorithm); - } - - // FROM - - /// Create extended float from native float. - #[inline] - pub fn from_float(f: F) -> ExtendedFloat { - from_float(f) - } - - // INTO - - /// Convert into default-rounded, lower-precision native float. - #[inline] - pub(crate) fn into_float(mut self) -> F { - self.round_to_native::(round_nearest_tie_even); - into_float(self) - } - - /// Convert into downward-rounded, lower-precision native float. - #[inline] - pub(crate) fn into_downward_float(mut self) -> F { - self.round_to_native::(round_downward); - into_float(self) - } -} - -// FROM FLOAT - -// Import ExtendedFloat from native float. -#[inline] -pub(crate) fn from_float(f: F) -> ExtendedFloat -where - F: Float, -{ - ExtendedFloat { - mant: u64::as_cast(f.mantissa()), - exp: f.exponent(), - } -} - -// INTO FLOAT - -// Export extended-precision float to native float. -// -// The extended-precision float must be in native float representation, -// with overflow/underflow appropriately handled. -#[inline] -pub(crate) fn into_float(fp: ExtendedFloat) -> F -where - F: Float, -{ - // Export floating-point number. - if fp.mant == 0 || fp.exp < F::DENORMAL_EXPONENT { - // sub-denormal, underflow - F::ZERO - } else if fp.exp >= F::MAX_EXPONENT { - // overflow - F::from_bits(F::INFINITY_BITS) - } else { - // calculate the exp and fraction bits, and return a float from bits. - let exp: u64; - if (fp.exp == F::DENORMAL_EXPONENT) && (fp.mant & F::HIDDEN_BIT_MASK.as_u64()) == 0 { - exp = 0; - } else { - exp = (fp.exp + F::EXPONENT_BIAS) as u64; - } - let exp = exp << F::MANTISSA_SIZE; - let mant = fp.mant & F::MANTISSA_MASK.as_u64(); - F::from_bits(F::Unsigned::as_cast(mant | exp)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/large_powers32.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/large_powers32.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/large_powers32.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/large_powers32.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,183 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Precalculated large powers for 32-bit limbs. - -/// Large powers (&[u32]) for base5 operations. -const POW5_1: [u32; 1] = [5]; -const POW5_2: [u32; 1] = [25]; -const POW5_3: [u32; 1] = [625]; -const POW5_4: [u32; 1] = [390625]; -const POW5_5: [u32; 2] = [2264035265, 35]; -const POW5_6: [u32; 3] = [2242703233, 762134875, 1262]; -const POW5_7: [u32; 5] = [3211403009, 1849224548, 3668416493, 3913284084, 1593091]; -const POW5_8: [u32; 10] = [ - 781532673, 64985353, 253049085, 594863151, 3553621484, 3288652808, 3167596762, 2788392729, - 3911132675, 590, -]; -const POW5_9: [u32; 19] = [ - 2553183233, 3201533787, 3638140786, 303378311, 1809731782, 3477761648, 3583367183, 649228654, - 2915460784, 487929380, 1011012442, 1677677582, 3428152256, 1710878487, 1438394610, 2161952759, - 4100910556, 1608314830, 349175, -]; -const POW5_10: [u32; 38] = [ - 4234999809, 2012377703, 2408924892, 1570150255, 3090844311, 3273530073, 1187251475, 2498123591, - 3364452033, 1148564857, 687371067, 2854068671, 1883165473, 505794538, 2988060450, 3159489326, - 2531348317, 3215191468, 849106862, 3892080979, 3288073877, 2242451748, 4183778142, 2995818208, - 2477501924, 325481258, 2487842652, 1774082830, 1933815724, 2962865281, 1168579910, 2724829000, - 2360374019, 2315984659, 2360052375, 3251779801, 1664357844, 28, -]; -const POW5_11: [u32; 75] = [ - 689565697, 4116392818, 1853628763, 516071302, 2568769159, 365238920, 336250165, 1283268122, - 3425490969, 248595470, 2305176814, 2111925499, 507770399, 2681111421, 589114268, 591287751, - 1708941527, 4098957707, 475844916, 3378731398, 2452339615, 2817037361, 2678008327, 1656645978, - 2383430340, 73103988, 448667107, 2329420453, 3124020241, 3625235717, 3208634035, 2412059158, - 2981664444, 4117622508, 838560765, 3069470027, 270153238, 1802868219, 3692709886, 2161737865, - 2159912357, 2585798786, 837488486, 4237238160, 2540319504, 3798629246, 3748148874, 1021550776, - 2386715342, 1973637538, 1823520457, 1146713475, 833971519, 3277251466, 905620390, 26278816, - 2680483154, 2294040859, 373297482, 5996609, 4109575006, 512575049, 917036550, 1942311753, - 2816916778, 3248920332, 1192784020, 3537586671, 2456567643, 2925660628, 759380297, 888447942, - 3559939476, 3654687237, 805, -]; -const POW5_12: [u32; 149] = [ - 322166785, 3809044581, 2994556223, 1239584207, 3962455841, 4001882964, 3053876612, 915114683, - 2783289745, 785739093, 4253185907, 3931164994, 1370983858, 2553556126, 3360742076, 2255410929, - 422849554, 2457422215, 3539495362, 1720790602, 1908931983, 1470596141, 592794347, 4219465164, - 4085652704, 941661409, 2534650953, 885063988, 2355909854, 2812815516, 767256131, 3821757683, - 2155151105, 3817418473, 281116564, 2834395026, 2821201622, 2524625843, 1511330880, 2572352493, - 330571332, 2951088579, 2730271766, 4044456479, 4212286644, 2444937588, 3603420843, 2387148597, - 1142537539, 3299235429, 1751012624, 861228086, 2873722519, 230498814, 1023297821, 2553128038, - 3421129895, 2651917435, 2042981258, 1606787143, 2228751918, 447345732, 1930371132, 1784132011, - 3612538790, 2275925090, 2487567871, 1080427616, 2009179183, 3383506781, 3899054063, 1950782960, - 2168622213, 2717674390, 3616636027, 2079341593, 1530129217, 1461057425, 2406264415, 3674671357, - 2972036238, 2019354295, 1455849819, 1866918619, 1324269294, 424891864, 2722422332, 2641594816, - 1400249021, 3482963993, 3734946379, 225889849, 1891545473, 777383150, 3589824633, 4117601611, - 4220028667, 334453379, 1083130821, 1060342180, 4208163139, 1489826908, 4163762246, 1096580926, - 689301528, 2336054516, 1782865703, 4175148410, 3398369392, 2329412588, 3001580596, 59740741, - 3202189932, 3351895776, 246185302, 718535188, 3772647488, 4151666556, 4055698133, 2461934110, - 2281316281, 3466396836, 3536023465, 1064267812, 2955456354, 2423805422, 3627960790, 1325057500, - 3876919979, 2009959531, 175455101, 184092852, 2358785571, 3842977831, 2485266289, 487121622, - 4159252710, 4075707558, 459389244, 300652075, 2521346588, 3458976673, 888631636, 2076098096, - 3844514585, 2363697580, 3729421522, 3051115477, 649395, -]; -const POW5_13: [u32; 298] = [ - 711442433, 3564261005, 2399042279, 4170849936, 4010295575, 1423987028, 330414929, 1349249065, - 4213813618, 3852031822, 4040843590, 2154565331, 3094013374, 1159028371, 3227065538, 2115927092, - 2085102554, 488590542, 2609619432, 3602898805, 3812736528, 3269439096, 23816114, 253984538, - 1035905997, 2942969204, 3400787671, 338562688, 1637191975, 740509713, 2264962817, 3410753922, - 4162231428, 2282041228, 1759373012, 3155367777, 4278913285, 1420532801, 1981002276, 438054990, - 1006507643, 1142697287, 1332538012, 2029019521, 3949305784, 818392641, 2491288846, 2716584663, - 3648886102, 556814413, 444795339, 4071412999, 1066321706, 4253169466, 2510832316, 672091442, - 4083256000, 2165985028, 1841538484, 3549854235, 364431512, 3707648143, 1162785440, 2268641545, - 281340310, 735693841, 848809228, 1700785200, 2919703985, 4094234344, 58530286, 965505005, - 1000010347, 3381961808, 3040089923, 1973852082, 2890971585, 1019960210, 4292895237, 2821887841, - 3756675650, 3951282907, 3885870583, 1008791145, 503998487, 1881258362, 1949332730, 392996726, - 2012973814, 3970014187, 2461725150, 2942547730, 3728066699, 2766901132, 3778532841, 1085564064, - 2278673896, 1116879805, 3448726271, 774279411, 157211670, 1506320155, 531168605, 1362654525, - 956967721, 2148871960, 769186085, 4186232894, 2055679604, 3248365487, 3981268013, 3975787984, - 2489510517, 3309046495, 212771124, 933418041, 3371839114, 562115198, 1853601831, 757336096, - 1354633440, 1486083256, 2872126393, 522920738, 1141587749, 3210903262, 1926940553, 3054024853, - 2021162538, 2262742000, 1877899947, 3147002868, 669840763, 4158174590, 4238502559, 1023731922, - 3386840011, 829588074, 3449720188, 2835142880, 2999162007, 813056473, 482949569, 638108879, - 3067201471, 1026714238, 4004452838, 2383667807, 3999477803, 771648919, 630660440, 3827121348, - 176185980, 2878191002, 2666149832, 3909811063, 2429163983, 2665690412, 907266128, 4269332098, - 2022665808, 1527122180, 3072053668, 1072477492, 3006022924, 549664855, 2800340954, 37352654, - 1212772743, 2711280533, 3029527946, 2511120040, 1305308377, 3474662224, 4226330922, 442988428, - 954940108, 3274548099, 4212288177, 2688499880, 3982226758, 3922609956, 1279948029, 1939943640, - 3650489901, 2733364929, 2494263275, 1864579964, 1225941120, 2390465139, 1267503249, 3533240729, - 904410805, 2842550015, 2517736241, 1796069820, 3335274381, 673539835, 1924694759, 3598098235, - 2792633405, 16535707, 3703535497, 3592841791, 2929082877, 1317622811, 294990855, 1396706563, - 2383271770, 3853857605, 277813677, 277580220, 1101318484, 3761974115, 1132150143, 2544692622, - 3419825776, 743770306, 1695464553, 1548693232, 2421159615, 2575672031, 2678971806, 1591267897, - 626546738, 3823443129, 267710932, 1455435162, 2353985540, 3248523795, 335348168, 3872552561, - 2814522612, 2634118860, 3503767026, 1301019273, 1414467789, 722985138, 3070909565, 4253482569, - 3744939841, 558142907, 2229819389, 13833173, 77003966, 2763671364, 3905603970, 2931990126, - 2280419384, 1879090457, 2934846267, 4284933164, 2331863845, 62191163, 3178861020, 1522063815, - 785672270, 1215568492, 2936443917, 802972489, 2956820173, 3916732783, 2893572089, 1391232801, - 3168640330, 2396859648, 894950918, 1103583736, 961991865, 2807302642, 305977505, 3054505899, - 1048256994, 781017659, 2459278754, 3164823415, 537658277, 905753687, 464963300, 4149131560, - 1029507924, 2278300961, 1231291503, 414073408, 3630740085, 2345841814, 475358196, 3258243317, - 4167625072, 4178911231, 2927355042, 655438830, 3138378018, 623200562, 2785714112, 273403236, - 807993669, 98, -]; -const POW5_14: [u32; 595] = [ - 1691320321, 2671006246, 1682531301, 2072858707, 1240508969, 3108358191, 1125119096, 2470144952, - 1610099978, 1690632660, 1941696884, 2663506355, 1006364675, 3909158537, 4147711374, 1072663936, - 4078768933, 745751659, 4123687570, 471458681, 655028926, 4113407388, 3945524552, 985625313, - 1254424514, 2127508744, 570530434, 945388122, 3194649404, 2589065070, 2731705399, 202030749, - 2090780394, 3348662271, 1481754777, 1130635472, 4025144705, 1924486271, 2578567861, 125491448, - 1558036315, 994248173, 3817216711, 763950077, 1030439870, 959586474, 3845661701, 483795093, - 1637944470, 2275463649, 3398804829, 1758016486, 2665513698, 2004912571, 1094885097, 4223064276, - 3307819021, 651121777, 1757003305, 3603542336, 129917786, 2215974994, 3042386306, 2205352757, - 3944939700, 3710987569, 97967515, 1217242524, 930630949, 3660328512, 1787663098, 1784141600, - 2500542892, 4034561586, 3444961378, 785043562, 3869499367, 885623728, 2625011087, 3053789617, - 1965731793, 3900511934, 2648823592, 3851062028, 3321968688, 799195417, 1011847510, 1369129160, - 1348009103, 2876796955, 2915408967, 3305284948, 263399535, 1715990604, 2645821294, 1587844552, - 2624912049, 3035631499, 2306636348, 3499275462, 675152704, 854794152, 4004972748, 1739996642, - 1333476491, 4012621867, 3658792931, 3297985728, 2864481726, 3066357406, 785287846, 1671499798, - 433044045, 1919608025, 264833858, 3999983367, 1116778570, 1301982149, 4213901070, 4081649357, - 536169226, 1389008649, 188923873, 373495152, 2551132278, 1800758715, 3951840330, 2632334454, - 3118778225, 1034046547, 1862428410, 3037609062, 1994608505, 29051798, 2571685694, 264151332, - 2260643090, 2717535964, 3508441116, 3283713017, 1903365635, 923575694, 1219598101, 2288281570, - 3676533911, 1014136356, 555142354, 2389170030, 4185108175, 884862419, 836141292, 2957159173, - 1997444768, 4233903127, 2876184692, 3089125070, 1480848293, 1097600237, 299700527, 2507669891, - 2982628312, 2114881043, 2529576251, 2812279824, 2987750993, 4241938954, 2204775591, 1037094060, - 829315638, 1231047149, 52608178, 3735136637, 3455232602, 962039123, 488286513, 50685385, - 3516451821, 843975207, 1572355722, 675489076, 2428445672, 1555117248, 3708476086, 10375249, - 4172112346, 2117510871, 2227658327, 3187664554, 3050656558, 328034318, 3179601324, 1247769761, - 3439263953, 1431538938, 2962525068, 1213366289, 3813013550, 2651093719, 1860661503, 3933716208, - 264320617, 789980519, 2257856172, 102000748, 977269860, 1113845122, 3008928583, 1461738106, - 557786285, 2926560363, 1038106190, 3643478847, 828004507, 457818698, 1933056971, 373408056, - 2076808229, 3160935130, 2781854874, 2519636100, 177606000, 4237103862, 3977834316, 1621936232, - 2599050516, 319893558, 3343370366, 765044144, 976657331, 7026264, 294277429, 3829376742, - 3029627280, 2705178718, 3614653880, 230519152, 3288033233, 293525479, 3805751881, 3227511198, - 2520308544, 3648103003, 1111086184, 437622105, 2232033852, 3239146386, 584244184, 1450926016, - 2462430443, 3226534010, 298582169, 4214576928, 1762099469, 964985185, 1585788148, 1641127666, - 787006566, 2315956284, 3258232694, 2275058964, 2541003317, 1508235863, 2613339827, 4080647514, - 1152057965, 3149266279, 731345410, 914737650, 65395712, 1884566942, 1379520432, 2611027720, - 4163073378, 2619704967, 2746552541, 1388822415, 3005141199, 843440249, 4288674003, 3136174279, - 4051522914, 4144149433, 3427566947, 3419023197, 3758479825, 3893877676, 96899594, 1657725776, - 253618880, 434129337, 1499045748, 2996992534, 4036042074, 2110713869, 906222950, 928326225, - 2541827893, 1604330202, 226792470, 4022228930, 815850898, 1466012310, 3377712199, 292769859, - 2822055597, 3225701344, 3052947004, 385831222, 705324593, 4030158636, 3540280538, 2982120874, - 2136414455, 255762046, 3852783591, 3262064164, 2358991588, 3756586117, 4143612643, 3326743817, - 2897365738, 807711264, 3719310016, 3721264861, 3627337076, 944539331, 3640975513, 3712525681, - 1162911839, 2008243316, 2179489649, 2867584109, 261861553, 3570253908, 2062868357, 2220328623, - 3857004679, 3744109002, 4138041873, 1451860932, 2364975637, 2802161722, 2680106834, 753401584, - 1223182946, 1245401957, 4163377735, 3565815922, 2216942838, 4036140094, 71979081, 3924559643, - 400477238, 551750683, 1174153235, 859969898, 1185921017, 1711399735, 812991545, 4051735761, - 3549118738, 1631653329, 3631835958, 3648867800, 1206500363, 2155893137, 361030362, 3454286017, - 2505909489, 1083595169, 453595313, 1510564703, 1706163902, 1632924345, 1381875722, 1661526119, - 1082778324, 3571910052, 1140625929, 851544870, 1145546234, 2938573139, 907528924, 1304752338, - 1764668294, 1788942063, 1700368828, 104979467, 1413911959, 3327497828, 1956384744, 1272712474, - 2815637534, 3307809377, 1320574940, 1111968962, 4073107827, 434096622, 169451929, 3201183459, - 3331028877, 2852366972, 3369830128, 2924794558, 3106537952, 3739481231, 1612955817, 4138608722, - 2721281595, 2755775390, 843505117, 982234295, 1157276611, 814674632, 4246504726, 3532006708, - 992340967, 1647538031, 204696133, 193866982, 3899126129, 300851698, 1379496684, 1759463683, - 1354782756, 1374637239, 3410883240, 1073406229, 3038431791, 1053909855, 3607043270, 173719711, - 3733903830, 171820911, 1573050589, 932781534, 4183534770, 2158849555, 372245998, 3573073830, - 841339264, 2759200520, 1610547277, 2603293319, 3890906486, 1557138278, 3964109906, 677238797, - 537994297, 1124184993, 4287078344, 4207654540, 2943022776, 2977947524, 3255359985, 4098397558, - 2274666217, 2915862060, 243524940, 2467726756, 2869020032, 507521339, 3403121914, 522051455, - 1803903108, 3471254194, 473535371, 1948602036, 3352095732, 3116527002, 1795743673, 775867940, - 2551469548, 3757442064, 3162525227, 3765412747, 3040105484, 1927625810, 48214767, 2997207130, - 1342349989, 2536583992, 1501320191, 3592287317, 887432730, 967585477, 3334212779, 948663609, - 1064513472, 15386372, 2465931737, 3230242590, 3036652803, 2063155087, 1927500726, 2821790499, - 2187774383, 501520074, 3688568496, 3606711121, 2576459247, 3176542345, 378322447, 156541411, - 1400607301, 1406179107, 677848877, 2253753529, 193196070, 4207435024, 4166396241, 509467541, - 2906024136, 1221753746, 3375413222, 431327897, 2749265123, 2848827671, 3412997614, 2051920238, - 1283516885, 1300498239, 1957256104, 2634010560, 3531900395, 360276850, 1461184973, 2012063967, - 2873572430, 2914608609, 4289554777, 1539331673, 1859532928, 4213441063, 538215691, 3512720863, - 4258743698, 3040408445, 982396546, 343095663, 4138069496, 1021581857, 214185242, 1968079460, - 2864275059, 3347192726, 4096783459, 3259169450, 3707808869, 142485006, 399610869, 230556456, - 2219467721, 4191227798, 2242548189, 3136366572, 179755707, 3464881829, 452317775, 3887426070, - 3446430233, 1473370015, 1576807208, 3964523248, 419325089, 2373067114, 1596072055, 1928415752, - 3635452689, 1005598891, 3335462724, 3290848636, 3669078247, 1178176812, 2110774376, 3068593619, - 1253036518, 908857731, 3631223047, 4138506423, 2903592318, 3596915748, 3289036113, 3721512676, - 2704409359, 3386016968, 3676268074, 2185259502, 1096257611, 3360076717, 3548676554, 170167319, - 3360064287, 3899940843, 9640, -]; - -pub(crate) const POW5: [&'static [u32]; 14] = [ - &POW5_1, &POW5_2, &POW5_3, &POW5_4, &POW5_5, &POW5_6, &POW5_7, &POW5_8, &POW5_9, &POW5_10, - &POW5_11, &POW5_12, &POW5_13, &POW5_14, -]; diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/large_powers64.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/large_powers64.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/large_powers64.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/large_powers64.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,625 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Precalculated large powers for 64-bit limbs. - -/// Large powers (&[u64]) for base5 operations. -const POW5_1: [u64; 1] = [5]; -const POW5_2: [u64; 1] = [25]; -const POW5_3: [u64; 1] = [625]; -const POW5_4: [u64; 1] = [390625]; -const POW5_5: [u64; 1] = [152587890625]; -const POW5_6: [u64; 2] = [3273344365508751233, 1262]; -const POW5_7: [u64; 3] = [7942358959831785217, 16807427164405733357, 1593091]; -const POW5_8: [u64; 5] = [ - 279109966635548161, - 2554917779393558781, - 14124656261812188652, - 11976055582626787546, - 2537941837315, -]; -const POW5_9: [u64; 10] = [ - 13750482914757213185, - 1302999927698857842, - 14936872543252795590, - 2788415840139466767, - 2095640732773017264, - 7205570348933370714, - 7348167152523113408, - 9285516396840364274, - 6907659600622710236, - 349175, -]; -const POW5_10: [u64; 19] = [ - 8643096425819600897, - 6743743997439985372, - 14059704609098336919, - 10729359125898331411, - 4933048501514368705, - 12258131603170554683, - 2172371001088594721, - 13569903330219142946, - 13809142207969578845, - 16716360519037769646, - 9631256923806107285, - 12866941232305103710, - 1397931361048440292, - 7619627737732970332, - 12725409486282665900, - 11703051443360963910, - 9947078370803086083, - 13966287901448440471, - 121923442132, -]; -const POW5_11: [u64; 38] = [ - 17679772531488845825, - 2216509366347768155, - 1568689219195129479, - 5511594616325588277, - 1067709417009240089, - 9070650952098657518, - 11515285870634858015, - 2539561553659505564, - 17604889300961091799, - 14511540856854204724, - 12099083339557485471, - 7115240299237943815, - 313979240050606788, - 10004784664717172195, - 15570268847930131473, - 10359715202835930803, - 17685054012115162812, - 13183273382855797757, - 7743260039872919062, - 9284593436392572926, - 11105921222066415013, - 18198799323400703846, - 16314988383739458320, - 4387527177871570570, - 8476708682254672590, - 4925096874831034057, - 14075687868072027455, - 112866656203221926, - 9852830467773230418, - 25755239915196746, - 2201493076310172510, - 8342165458688466438, - 13954006576066379050, - 15193819059903295636, - 12565616718911389531, - 3815854855847885129, - 15696762163583540628, - 805, -]; -const POW5_12: [u64; 75] = [ - 16359721904723189761, - 5323973632697650495, - 17187956456762001185, - 3930387638628283780, - 3374723710406992273, - 16884225088663222131, - 10967440051041439154, - 9686916182456720060, - 10554548046311730194, - 7390739362393647554, - 6316162333127736719, - 18122464886584070891, - 4044404959645932768, - 3801320885861987401, - 12080950653257274590, - 16414324262488991299, - 16395687498836410113, - 12173633940896186260, - 10843185433142632150, - 11048169832730399808, - 12674828934734683716, - 17370808310130582550, - 10500926985433408692, - 10252725158410704555, - 14170108270502067523, - 3698946465517688080, - 989984870770509463, - 10965601426733943069, - 11389898658438335655, - 6901098232861256586, - 1921335291173932590, - 7662788640922083388, - 9775023833308395430, - 4640401278902814207, - 14532050972198413359, - 8378549018693130223, - 11672322628395371653, - 8930704142764178555, - 6275193859483102017, - 15782593304269205087, - 8673060659034172558, - 8018354414354334043, - 1824896661540749038, - 11345563346725559868, - 14959216444480821949, - 970189517688324683, - 3338835207603007873, - 17684964260791738489, - 1436466329061721851, - 4554134986752476101, - 6398757850768963907, - 4709779218751158342, - 10033277748582410264, - 17932125878679265063, - 10004750887749091440, - 256584531835386932, - 14396282740722731628, - 3086085133731396950, - 17831272085689600064, - 10573926491412564693, - 14888061047859191737, - 4570995450261499817, - 10410165022312935266, - 5691078631447480790, - 8632710455805418155, - 790672778942823293, - 16505464105756800547, - 2092171438149740401, - 17505030673829275878, - 1291290830058928444, - 14856191690683232796, - 8916773426496500052, - 10152003807578858265, - 13104441193763861714, - 649395, -]; -const POW5_13: [u64; 149] = [ - 15308384451594534913, - 17913664074042735335, - 6115977719198531863, - 5794980608663993169, - 16544350702855106930, - 9253787637781258566, - 4977988951675168190, - 9087837664087448770, - 2098480401110016986, - 15474332540882100712, - 14042133997396540944, - 1090855284423485362, - 12639956485351058381, - 1454115676006639319, - 3180465001342538023, - 14649076551958697729, - 9801292446545910916, - 13552201410826594004, - 6101141927469189381, - 1881431857880609316, - 4907847477899433595, - 8714572486973123228, - 3514969632331374520, - 11667642286891470094, - 2391499697425323350, - 17486585679659076043, - 18267223761882105642, - 2886610765822313148, - 9302834862968900288, - 15246507846733637044, - 15924227519624562840, - 9743741243284697760, - 3159780987244964246, - 7304816812369628428, - 17584602612559717809, - 4146812420657846766, - 14525415362681041515, - 8477630142371600195, - 4380695748062263745, - 12119915994367943173, - 16970630866565485122, - 4332724980155264503, - 8079943140620527639, - 1687908087554405626, - 17051081099834002166, - 12638146269730763230, - 11883749876933445771, - 4662462156371383785, - 4796962238316531176, - 3325504751659868927, - 6469595803187862550, - 5852556621152583005, - 9229334792448387881, - 17979733373938620709, - 13951623534175792756, - 17075879371091039277, - 14212246479457938037, - 4008999959804158260, - 2414266395366403722, - 3252733766253918247, - 6382678985007829216, - 2245927470982310841, - 13790724502051307301, - 13116936866733148041, - 9718402891306794538, - 13516274400356104875, - 17859223875778049403, - 4396895129099725471, - 3563053650368467915, - 12176845952536972668, - 3492050964335269015, - 2740656767075170753, - 4409704077614761919, - 10237775279597492710, - 3314206875098230827, - 16437361028114095448, - 12361736225407656572, - 16792510651790145480, - 11449053143229929935, - 18336641737580333136, - 6558939822118891088, - 4606255756908155300, - 2360792578991605004, - 160428430149144538, - 11644861220729221511, - 10785178451159739786, - 14923560618031934681, - 1902620814992781610, - 14064076995338910412, - 11547019064112212657, - 16847481479966225734, - 8331994491163145469, - 11739712981738851885, - 8008309968651120619, - 10266969595459035264, - 15175153381217702033, - 12208659352573720245, - 7714061140750342961, - 2892831567213510541, - 15453714249045017319, - 71020323573871677, - 15431137995750602633, - 5659146884637671933, - 5998809010488554503, - 16552192379299157850, - 1192197967194298797, - 16157555793424861524, - 10929371590994640255, - 3194469143425738352, - 6651586784672005225, - 11062427140788057791, - 6834443579468668318, - 16421563197797455922, - 6251046422506172884, - 13952303462156793860, - 16632486601871393224, - 11313454360291325172, - 5587835232504462834, - 3105197524618514637, - 18268568531031972989, - 2397205535804309313, - 59413027864729597, - 11869878125348715710, - 12592801707270523266, - 8070632061321113656, - 18403647807860650811, - 267109013517069093, - 6537214311028855260, - 5220826919973709902, - 3448740582779163661, - 16822239213112884941, - 5975299384311048185, - 10294433804430712138, - 4739856055412448774, - 12057273038326387897, - 13119002941950056609, - 3354445304051737058, - 13592813067499314594, - 3890182464434078629, - 17820384357466425060, - 9785228118969879380, - 1778431746734556271, - 10075313876350055029, - 13994048489400919028, - 17948287074199726448, - 2815088342305858722, - 2676626035777198370, - 1174257960026283968, - 421714788677, -]; -const POW5_14: [u64; 298] = [ - 11471884475673051137, - 8902860357476377573, - 13350296775839230505, - 10609191786344608888, - 7261211985859587338, - 11439672689354862964, - 16789708072300570627, - 4607056528866348430, - 3202978990421512997, - 2024899620433984146, - 17666950207239811774, - 4233228489390288200, - 9137580478688460738, - 4060411066587388546, - 11119949806060600124, - 867715462473090103, - 14382394941384869610, - 4856042377419278489, - 8265605599571137921, - 538981667666252469, - 4270263388700786523, - 3281140600308898503, - 4121392524544394174, - 2077884106245940229, - 9773041957329767574, - 7550623316597646685, - 8611033926449791714, - 18137922955420802793, - 2796546741236224013, - 15477096484628446761, - 9517540128113714010, - 9471917970500821378, - 15938570248662483124, - 5228016831978462619, - 15720991252586974501, - 7662829825220776698, - 17328310068068434348, - 3371736428170309730, - 3803724952191098855, - 13115926536504376719, - 16752571196153442257, - 16540185467776259880, - 3432518182450051120, - 5880364967211798870, - 12355748840305392783, - 14196090758536469575, - 7370123524686686319, - 6819740424617592686, - 13037938013537368753, - 15029273671291927100, - 3671312928327205696, - 7473228676544792780, - 17234079691312938123, - 14164740848093544419, - 13169904779481875902, - 7179036968465894054, - 8244653688947194445, - 17179797746073799490, - 5591970751047577674, - 17530550506268329742, - 5965746721852312330, - 1604149463243472865, - 7734199791463116918, - 11305790396015856714, - 4441196105025505137, - 13046431581185664762, - 124776524294606713, - 1134521334706523966, - 11671728093344476434, - 14103440020972933148, - 3966727403013869059, - 9828094508409132821, - 4355682486381147287, - 10261407143988481234, - 3800455155249557199, - 12700901937937547500, - 18184475466894579360, - 13267691151779895412, - 4714157123477697445, - 10770360171308585263, - 9083344917597998040, - 12078649873810212155, - 18218989082046199377, - 4454285072780637351, - 5287307245618354742, - 16042289702059031730, - 4131926574212754010, - 217692071448455473, - 3624845916216282093, - 2901203491797614218, - 6679177724033967080, - 44561358851332790, - 9094639944041587162, - 13690915012276084311, - 1408896670826320686, - 5359130319612337580, - 6148412925099835601, - 5211368532286409612, - 11386360825549027374, - 16895182466965795071, - 3392940493846427241, - 438089879085393580, - 4783928372776399972, - 6278117363595909959, - 12569481049412674733, - 15648622492570893902, - 1966316336235305115, - 1603775390515993547, - 13576113010204316709, - 10821754650102840474, - 18198222517222903152, - 6966163076615302988, - 1373932372410129684, - 3285839581819684990, - 30177575069719475, - 16447047871247307061, - 11618654126674833808, - 990072222556306872, - 1260682336135768017, - 13862055046689532489, - 15668483092844698432, - 1879572630092764264, - 13912027797058626108, - 6231679788219816920, - 13857858054844167403, - 18101470072534728857, - 4144579812461609229, - 7048589655616599284, - 9946956499532694630, - 9771303850109874038, - 6477823708780339765, - 17526247621747041971, - 13525995675852669549, - 3928768291901239810, - 8094153383078124544, - 11214278667728965552, - 11251547162596832610, - 5964946855123292381, - 3622548288590237903, - 13469765967150053587, - 17798986288523466082, - 14684592818807932259, - 16724077276802963921, - 7119877993753121290, - 1864571304902781632, - 12871984921385213812, - 9065447042604670298, - 3987130777300360550, - 6890545752116901685, - 17275341711601865750, - 6296474927799264658, - 1257436973037243463, - 13854281781965301421, - 1657132483318662716, - 17309399540017292849, - 12808111630089217242, - 1098489625264462071, - 14010458905686364135, - 16134414519481621220, - 14288255900328821475, - 3469093466388187882, - 15982710881468295872, - 4056765540058056052, - 15945176389096104089, - 8625339365793505375, - 12316179968863788913, - 15334123773538054321, - 9536238824220581765, - 16080825720106203271, - 6235695225418121745, - 12035192956458019349, - 3235835166714703698, - 5348960676912581218, - 15315062772709464647, - 17335089708021308662, - 16855855317958414409, - 2369751139431140406, - 3693542588628609043, - 7350405893393987577, - 17402072586341663801, - 7007897690013647122, - 15671767872059304758, - 9259490518292347915, - 14836045474406130394, - 4654005815464502513, - 6487825998330548401, - 7013356660323385022, - 7136200343936679946, - 15341236858676437716, - 3657357368867197449, - 12621075530054608378, - 5603868621997066972, - 7683447656788439942, - 450883379216880060, - 14291494350184945047, - 5466258454997635048, - 14206933098432772126, - 4775870327277641692, - 1864430798867181939, - 13748978265070608793, - 12250822864261576589, - 12561896977498605296, - 16060949594257359328, - 17775189113543311529, - 11835965177892927035, - 4218664174878121437, - 3499000902478111683, - 15169853304359126294, - 7076121963053575143, - 832652347668916805, - 1292148207755194737, - 7556838978364207852, - 5904021986723518500, - 4610244652288570024, - 4526508363195533871, - 746120481022614726, - 737965197247830486, - 4006266184415762653, - 9272188239892688050, - 15346235246415709678, - 11850675997347533184, - 11181059668610842701, - 6687857983250662774, - 2908718488661492818, - 4828337780126983225, - 18071738646453002184, - 12790187227727197880, - 17602483480871623153, - 12523532189621855977, - 10598805712727696716, - 2179787555896149376, - 2242193929457337594, - 14908923241136742532, - 8369182018012550027, - 13385381554043022324, - 3332327430110633913, - 16138090784046208492, - 16172324607469047339, - 8279089815915615244, - 12872906602736235247, - 10894545290539475621, - 15428756545851905023, - 4155747980686992922, - 4074479178894544043, - 66083965608603584, - 13873786284662268377, - 8861183628277687555, - 12119497911296021430, - 2154012318305274287, - 15490706314503067312, - 13643145488710608367, - 672340241093017103, - 6039493278284091973, - 9679797700977436461, - 18070795828318171174, - 2188146431134935377, - 5247392385741514952, - 1852539214842869734, - 12235621681634112739, - 8812930319623534062, - 5585597406294108629, - 11312989214475901864, - 1547377291787797995, - 8641748937186208205, - 12518148659168623694, - 6611379197521520985, - 18096591571068008576, - 15087021227100112139, - 13058454842015958418, - 1473584652966833794, - 4387660670140018168, - 8452836916843525402, - 14376083294443363955, - 13998026203969090659, - 611968444648172645, - 990232438801273845, - 18001186324715561929, - 13470591857250177501, - 14881554140239420091, - 16696367836720124495, - 6328076032778459673, - 17027497695968504616, - 10192245646262428833, - 8282482589527318647, - 4319014353374321425, - 14134087271041670980, - 5060230880114618599, - 13179509240430058600, - 3903514232614801894, - 17774749744702165255, - 15448635507030969726, - 15983775238358480209, - 14542832143965487887, - 9385618098039514666, - 14431419612662304843, - 730863073501675978, - 16750118380379734815, - 9640, -]; - -pub(crate) const POW5: [&[u64]; 14] = [ - &POW5_1, &POW5_2, &POW5_3, &POW5_4, &POW5_5, &POW5_6, &POW5_7, &POW5_8, &POW5_9, &POW5_10, - &POW5_11, &POW5_12, &POW5_13, &POW5_14, -]; diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/large_powers.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/large_powers.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/large_powers.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/large_powers.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,9 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Precalculated large powers for limbs. - -#[cfg(limb_width_32)] -pub(crate) use super::large_powers32::*; - -#[cfg(limb_width_64)] -pub(crate) use super::large_powers64::*; diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/math.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/math.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/math.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/math.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,886 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Building-blocks for arbitrary-precision math. -//! -//! These algorithms assume little-endian order for the large integer -//! buffers, so for a `vec![0, 1, 2, 3]`, `3` is the most significant limb, -//! and `0` is the least significant limb. - -use super::large_powers; -use super::num::*; -use super::small_powers::*; -use alloc::vec::Vec; -use core::{cmp, iter, mem}; - -// ALIASES -// ------- - -// Type for a single limb of the big integer. -// -// A limb is analogous to a digit in base10, except, it stores 32-bit -// or 64-bit numbers instead. -// -// This should be all-known 64-bit platforms supported by Rust. -// https://forge.rust-lang.org/platform-support.html -// -// Platforms where native 128-bit multiplication is explicitly supported: -// - x86_64 (Supported via `MUL`). -// - mips64 (Supported via `DMULTU`, which `HI` and `LO` can be read-from). -// -// Platforms where native 64-bit multiplication is supported and -// you can extract hi-lo for 64-bit multiplications. -// aarch64 (Requires `UMULH` and `MUL` to capture high and low bits). -// powerpc64 (Requires `MULHDU` and `MULLD` to capture high and low bits). -// -// Platforms where native 128-bit multiplication is not supported, -// requiring software emulation. -// sparc64 (`UMUL` only supported double-word arguments). - -// 32-BIT LIMB -#[cfg(limb_width_32)] -pub type Limb = u32; - -#[cfg(limb_width_32)] -pub const POW5_LIMB: &[Limb] = &POW5_32; - -#[cfg(limb_width_32)] -pub const POW10_LIMB: &[Limb] = &POW10_32; - -#[cfg(limb_width_32)] -type Wide = u64; - -// 64-BIT LIMB -#[cfg(limb_width_64)] -pub type Limb = u64; - -#[cfg(limb_width_64)] -pub const POW5_LIMB: &[Limb] = &POW5_64; - -#[cfg(limb_width_64)] -pub const POW10_LIMB: &[Limb] = &POW10_64; - -#[cfg(limb_width_64)] -type Wide = u128; - -/// Cast to limb type. -#[inline] -pub(crate) fn as_limb(t: T) -> Limb { - Limb::as_cast(t) -} - -/// Cast to wide type. -#[inline] -fn as_wide(t: T) -> Wide { - Wide::as_cast(t) -} - -// SPLIT -// ----- - -/// Split u64 into limbs, in little-endian order. -#[inline] -#[cfg(limb_width_32)] -fn split_u64(x: u64) -> [Limb; 2] { - [as_limb(x), as_limb(x >> 32)] -} - -/// Split u64 into limbs, in little-endian order. -#[inline] -#[cfg(limb_width_64)] -fn split_u64(x: u64) -> [Limb; 1] { - [as_limb(x)] -} - -// HI64 -// ---- - -// NONZERO - -/// Check if any of the remaining bits are non-zero. -#[inline] -pub fn nonzero(x: &[T], rindex: usize) -> bool { - let len = x.len(); - let slc = &x[..len - rindex]; - slc.iter().rev().any(|&x| x != T::ZERO) -} - -/// Shift 64-bit integer to high 64-bits. -#[inline] -fn u64_to_hi64_1(r0: u64) -> (u64, bool) { - debug_assert!(r0 != 0); - let ls = r0.leading_zeros(); - (r0 << ls, false) -} - -/// Shift 2 64-bit integers to high 64-bits. -#[inline] -fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) { - debug_assert!(r0 != 0); - let ls = r0.leading_zeros(); - let rs = 64 - ls; - let v = match ls { - 0 => r0, - _ => (r0 << ls) | (r1 >> rs), - }; - let n = r1 << ls != 0; - (v, n) -} - -/// Trait to export the high 64-bits from a little-endian slice. -trait Hi64: AsRef<[T]> { - /// Get the hi64 bits from a 1-limb slice. - fn hi64_1(&self) -> (u64, bool); - - /// Get the hi64 bits from a 2-limb slice. - fn hi64_2(&self) -> (u64, bool); - - /// Get the hi64 bits from a 3-limb slice. - fn hi64_3(&self) -> (u64, bool); - - /// High-level exporter to extract the high 64 bits from a little-endian slice. - #[inline] - fn hi64(&self) -> (u64, bool) { - match self.as_ref().len() { - 0 => (0, false), - 1 => self.hi64_1(), - 2 => self.hi64_2(), - _ => self.hi64_3(), - } - } -} - -impl Hi64 for [u32] { - #[inline] - fn hi64_1(&self) -> (u64, bool) { - debug_assert!(self.len() == 1); - let r0 = self[0] as u64; - u64_to_hi64_1(r0) - } - - #[inline] - fn hi64_2(&self) -> (u64, bool) { - debug_assert!(self.len() == 2); - let r0 = (self[1] as u64) << 32; - let r1 = self[0] as u64; - u64_to_hi64_1(r0 | r1) - } - - #[inline] - fn hi64_3(&self) -> (u64, bool) { - debug_assert!(self.len() >= 3); - let r0 = self[self.len() - 1] as u64; - let r1 = (self[self.len() - 2] as u64) << 32; - let r2 = self[self.len() - 3] as u64; - let (v, n) = u64_to_hi64_2(r0, r1 | r2); - (v, n || nonzero(self, 3)) - } -} - -impl Hi64 for [u64] { - #[inline] - fn hi64_1(&self) -> (u64, bool) { - debug_assert!(self.len() == 1); - let r0 = self[0]; - u64_to_hi64_1(r0) - } - - #[inline] - fn hi64_2(&self) -> (u64, bool) { - debug_assert!(self.len() >= 2); - let r0 = self[self.len() - 1]; - let r1 = self[self.len() - 2]; - let (v, n) = u64_to_hi64_2(r0, r1); - (v, n || nonzero(self, 2)) - } - - #[inline] - fn hi64_3(&self) -> (u64, bool) { - self.hi64_2() - } -} - -// SCALAR -// ------ - -// Scalar-to-scalar operations, for building-blocks for arbitrary-precision -// operations. - -mod scalar { - use super::*; - - // ADDITION - - /// Add two small integers and return the resulting value and if overflow happens. - #[inline] - pub fn add(x: Limb, y: Limb) -> (Limb, bool) { - x.overflowing_add(y) - } - - /// AddAssign two small integers and return if overflow happens. - #[inline] - pub fn iadd(x: &mut Limb, y: Limb) -> bool { - let t = add(*x, y); - *x = t.0; - t.1 - } - - // SUBTRACTION - - /// Subtract two small integers and return the resulting value and if overflow happens. - #[inline] - pub fn sub(x: Limb, y: Limb) -> (Limb, bool) { - x.overflowing_sub(y) - } - - /// SubAssign two small integers and return if overflow happens. - #[inline] - pub fn isub(x: &mut Limb, y: Limb) -> bool { - let t = sub(*x, y); - *x = t.0; - t.1 - } - - // MULTIPLICATION - - /// Multiply two small integers (with carry) (and return the overflow contribution). - /// - /// Returns the (low, high) components. - #[inline] - pub fn mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) { - // Cannot overflow, as long as wide is 2x as wide. This is because - // the following is always true: - // `Wide::max_value() - (Narrow::max_value() * Narrow::max_value()) >= Narrow::max_value()` - let z: Wide = as_wide(x) * as_wide(y) + as_wide(carry); - let bits = mem::size_of::() * 8; - (as_limb(z), as_limb(z >> bits)) - } - - /// Multiply two small integers (with carry) (and return if overflow happens). - #[inline] - pub fn imul(x: &mut Limb, y: Limb, carry: Limb) -> Limb { - let t = mul(*x, y, carry); - *x = t.0; - t.1 - } -} // scalar - -// SMALL -// ----- - -// Large-to-small operations, to modify a big integer from a native scalar. - -mod small { - use super::*; - - // MULTIPLICATIION - - /// ADDITION - - /// Implied AddAssign implementation for adding a small integer to bigint. - /// - /// Allows us to choose a start-index in x to store, to allow incrementing - /// from a non-zero start. - #[inline] - pub fn iadd_impl(x: &mut Vec, y: Limb, xstart: usize) { - if x.len() <= xstart { - x.push(y); - } else { - // Initial add - let mut carry = scalar::iadd(&mut x[xstart], y); - - // Increment until overflow stops occurring. - let mut size = xstart + 1; - while carry && size < x.len() { - carry = scalar::iadd(&mut x[size], 1); - size += 1; - } - - // If we overflowed the buffer entirely, need to add 1 to the end - // of the buffer. - if carry { - x.push(1); - } - } - } - - /// AddAssign small integer to bigint. - #[inline] - pub fn iadd(x: &mut Vec, y: Limb) { - iadd_impl(x, y, 0); - } - - // SUBTRACTION - - /// SubAssign small integer to bigint. - /// Does not do overflowing subtraction. - #[inline] - pub fn isub_impl(x: &mut Vec, y: Limb, xstart: usize) { - debug_assert!(x.len() > xstart && (x[xstart] >= y || x.len() > xstart + 1)); - - // Initial subtraction - let mut carry = scalar::isub(&mut x[xstart], y); - - // Increment until overflow stops occurring. - let mut size = xstart + 1; - while carry && size < x.len() { - carry = scalar::isub(&mut x[size], 1); - size += 1; - } - normalize(x); - } - - // MULTIPLICATION - - /// MulAssign small integer to bigint. - #[inline] - pub fn imul(x: &mut Vec, y: Limb) { - // Multiply iteratively over all elements, adding the carry each time. - let mut carry: Limb = 0; - for xi in x.iter_mut() { - carry = scalar::imul(xi, y, carry); - } - - // Overflow of value, add to end. - if carry != 0 { - x.push(carry); - } - } - - /// Mul small integer to bigint. - #[inline] - pub fn mul(x: &[Limb], y: Limb) -> Vec { - let mut z = Vec::::default(); - z.extend_from_slice(x); - imul(&mut z, y); - z - } - - /// MulAssign by a power. - /// - /// Theoretically... - /// - /// Use an exponentiation by squaring method, since it reduces the time - /// complexity of the multiplication to ~`O(log(n))` for the squaring, - /// and `O(n*m)` for the result. Since `m` is typically a lower-order - /// factor, this significantly reduces the number of multiplications - /// we need to do. Iteratively multiplying by small powers follows - /// the nth triangular number series, which scales as `O(p^2)`, but - /// where `p` is `n+m`. In short, it scales very poorly. - /// - /// Practically.... - /// - /// Exponentiation by Squaring: - /// running 2 tests - /// test bigcomp_f32_lexical ... bench: 1,018 ns/iter (+/- 78) - /// test bigcomp_f64_lexical ... bench: 3,639 ns/iter (+/- 1,007) - /// - /// Exponentiation by Iterative Small Powers: - /// running 2 tests - /// test bigcomp_f32_lexical ... bench: 518 ns/iter (+/- 31) - /// test bigcomp_f64_lexical ... bench: 583 ns/iter (+/- 47) - /// - /// Exponentiation by Iterative Large Powers (of 2): - /// running 2 tests - /// test bigcomp_f32_lexical ... bench: 671 ns/iter (+/- 31) - /// test bigcomp_f64_lexical ... bench: 1,394 ns/iter (+/- 47) - /// - /// Even using worst-case scenarios, exponentiation by squaring is - /// significantly slower for our workloads. Just multiply by small powers, - /// in simple cases, and use precalculated large powers in other cases. - pub fn imul_pow5(x: &mut Vec, n: u32) { - use super::large::KARATSUBA_CUTOFF; - - let small_powers = POW5_LIMB; - let large_powers = large_powers::POW5; - - if n == 0 { - // No exponent, just return. - // The 0-index of the large powers is `2^0`, which is 1, so we want - // to make sure we don't take that path with a literal 0. - return; - } - - // We want to use the asymptotically faster algorithm if we're going - // to be using Karabatsu multiplication sometime during the result, - // otherwise, just use exponentiation by squaring. - let bit_length = 32 - n.leading_zeros() as usize; - debug_assert!(bit_length != 0 && bit_length <= large_powers.len()); - if x.len() + large_powers[bit_length - 1].len() < 2 * KARATSUBA_CUTOFF { - // We can use iterative small powers to make this faster for the - // easy cases. - - // Multiply by the largest small power until n < step. - let step = small_powers.len() - 1; - let power = small_powers[step]; - let mut n = n as usize; - while n >= step { - imul(x, power); - n -= step; - } - - // Multiply by the remainder. - imul(x, small_powers[n]); - } else { - // In theory, this code should be asymptotically a lot faster, - // in practice, our small::imul seems to be the limiting step, - // and large imul is slow as well. - - // Multiply by higher order powers. - let mut idx: usize = 0; - let mut bit: usize = 1; - let mut n = n as usize; - while n != 0 { - if n & bit != 0 { - debug_assert!(idx < large_powers.len()); - large::imul(x, large_powers[idx]); - n ^= bit; - } - idx += 1; - bit <<= 1; - } - } - } - - // BIT LENGTH - - /// Get number of leading zero bits in the storage. - #[inline] - pub fn leading_zeros(x: &[Limb]) -> usize { - x.last().map_or(0, |x| x.leading_zeros() as usize) - } - - /// Calculate the bit-length of the big-integer. - #[inline] - pub fn bit_length(x: &[Limb]) -> usize { - let bits = mem::size_of::() * 8; - // Avoid overflowing, calculate via total number of bits - // minus leading zero bits. - let nlz = leading_zeros(x); - bits.checked_mul(x.len()) - .map_or_else(usize::max_value, |v| v - nlz) - } - - // SHL - - /// Shift-left bits inside a buffer. - /// - /// Assumes `n < Limb::BITS`, IE, internally shifting bits. - #[inline] - pub fn ishl_bits(x: &mut Vec, n: usize) { - // Need to shift by the number of `bits % Limb::BITS)`. - let bits = mem::size_of::() * 8; - debug_assert!(n < bits); - if n == 0 { - return; - } - - // Internally, for each item, we shift left by n, and add the previous - // right shifted limb-bits. - // For example, we transform (for u8) shifted left 2, to: - // b10100100 b01000010 - // b10 b10010001 b00001000 - let rshift = bits - n; - let lshift = n; - let mut prev: Limb = 0; - for xi in x.iter_mut() { - let tmp = *xi; - *xi <<= lshift; - *xi |= prev >> rshift; - prev = tmp; - } - - // Always push the carry, even if it creates a non-normal result. - let carry = prev >> rshift; - if carry != 0 { - x.push(carry); - } - } - - /// Shift-left `n` digits inside a buffer. - /// - /// Assumes `n` is not 0. - #[inline] - pub fn ishl_limbs(x: &mut Vec, n: usize) { - debug_assert!(n != 0); - if !x.is_empty() { - x.reserve(n); - x.splice(..0, iter::repeat(0).take(n)); - } - } - - /// Shift-left buffer by n bits. - #[inline] - pub fn ishl(x: &mut Vec, n: usize) { - let bits = mem::size_of::() * 8; - // Need to pad with zeros for the number of `bits / Limb::BITS`, - // and shift-left with carry for `bits % Limb::BITS`. - let rem = n % bits; - let div = n / bits; - ishl_bits(x, rem); - if div != 0 { - ishl_limbs(x, div); - } - } - - // NORMALIZE - - /// Normalize the container by popping any leading zeros. - #[inline] - pub fn normalize(x: &mut Vec) { - // Remove leading zero if we cause underflow. Since we're dividing - // by a small power, we have at max 1 int removed. - while x.last() == Some(&0) { - x.pop(); - } - } -} // small - -// LARGE -// ----- - -// Large-to-large operations, to modify a big integer from a native scalar. - -mod large { - use super::*; - - // RELATIVE OPERATORS - - /// Compare `x` to `y`, in little-endian order. - #[inline] - pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering { - if x.len() > y.len() { - cmp::Ordering::Greater - } else if x.len() < y.len() { - cmp::Ordering::Less - } else { - let iter = x.iter().rev().zip(y.iter().rev()); - for (&xi, &yi) in iter { - if xi > yi { - return cmp::Ordering::Greater; - } else if xi < yi { - return cmp::Ordering::Less; - } - } - // Equal case. - cmp::Ordering::Equal - } - } - - /// Check if x is less than y. - #[inline] - pub fn less(x: &[Limb], y: &[Limb]) -> bool { - compare(x, y) == cmp::Ordering::Less - } - - /// Check if x is greater than or equal to y. - #[inline] - pub fn greater_equal(x: &[Limb], y: &[Limb]) -> bool { - !less(x, y) - } - - // ADDITION - - /// Implied AddAssign implementation for bigints. - /// - /// Allows us to choose a start-index in x to store, so we can avoid - /// padding the buffer with zeros when not needed, optimized for vectors. - pub fn iadd_impl(x: &mut Vec, y: &[Limb], xstart: usize) { - // The effective x buffer is from `xstart..x.len()`, so we need to treat - // that as the current range. If the effective y buffer is longer, need - // to resize to that, + the start index. - if y.len() > x.len() - xstart { - x.resize(y.len() + xstart, 0); - } - - // Iteratively add elements from y to x. - let mut carry = false; - for (xi, yi) in x[xstart..].iter_mut().zip(y.iter()) { - // Only one op of the two can overflow, since we added at max - // Limb::max_value() + Limb::max_value(). Add the previous carry, - // and store the current carry for the next. - let mut tmp = scalar::iadd(xi, *yi); - if carry { - tmp |= scalar::iadd(xi, 1); - } - carry = tmp; - } - - // Overflow from the previous bit. - if carry { - small::iadd_impl(x, 1, y.len() + xstart); - } - } - - /// AddAssign bigint to bigint. - #[inline] - pub fn iadd(x: &mut Vec, y: &[Limb]) { - iadd_impl(x, y, 0); - } - - /// Add bigint to bigint. - #[inline] - pub fn add(x: &[Limb], y: &[Limb]) -> Vec { - let mut z = Vec::::default(); - z.extend_from_slice(x); - iadd(&mut z, y); - z - } - - // SUBTRACTION - - /// SubAssign bigint to bigint. - pub fn isub(x: &mut Vec, y: &[Limb]) { - // Basic underflow checks. - debug_assert!(greater_equal(x, y)); - - // Iteratively add elements from y to x. - let mut carry = false; - for (xi, yi) in x.iter_mut().zip(y.iter()) { - // Only one op of the two can overflow, since we added at max - // Limb::max_value() + Limb::max_value(). Add the previous carry, - // and store the current carry for the next. - let mut tmp = scalar::isub(xi, *yi); - if carry { - tmp |= scalar::isub(xi, 1); - } - carry = tmp; - } - - if carry { - small::isub_impl(x, 1, y.len()); - } else { - small::normalize(x); - } - } - - // MULTIPLICATION - - /// Number of digits to bottom-out to asymptotically slow algorithms. - /// - /// Karatsuba tends to out-perform long-multiplication at ~320-640 bits, - /// so we go halfway, while Newton division tends to out-perform - /// Algorithm D at ~1024 bits. We can toggle this for optimal performance. - pub const KARATSUBA_CUTOFF: usize = 32; - - /// Grade-school multiplication algorithm. - /// - /// Slow, naive algorithm, using limb-bit bases and just shifting left for - /// each iteration. This could be optimized with numerous other algorithms, - /// but it's extremely simple, and works in O(n*m) time, which is fine - /// by me. Each iteration, of which there are `m` iterations, requires - /// `n` multiplications, and `n` additions, or grade-school multiplication. - fn long_mul(x: &[Limb], y: &[Limb]) -> Vec { - // Using the immutable value, multiply by all the scalars in y, using - // the algorithm defined above. Use a single buffer to avoid - // frequent reallocations. Handle the first case to avoid a redundant - // addition, since we know y.len() >= 1. - let mut z: Vec = small::mul(x, y[0]); - z.resize(x.len() + y.len(), 0); - - // Handle the iterative cases. - for (i, &yi) in y[1..].iter().enumerate() { - let zi: Vec = small::mul(x, yi); - iadd_impl(&mut z, &zi, i + 1); - } - - small::normalize(&mut z); - - z - } - - /// Split two buffers into halfway, into (lo, hi). - #[inline] - pub fn karatsuba_split(z: &[Limb], m: usize) -> (&[Limb], &[Limb]) { - (&z[..m], &z[m..]) - } - - /// Karatsuba multiplication algorithm with roughly equal input sizes. - /// - /// Assumes `y.len() >= x.len()`. - fn karatsuba_mul(x: &[Limb], y: &[Limb]) -> Vec { - if y.len() <= KARATSUBA_CUTOFF { - // Bottom-out to long division for small cases. - long_mul(x, y) - } else if x.len() < y.len() / 2 { - karatsuba_uneven_mul(x, y) - } else { - // Do our 3 multiplications. - let m = y.len() / 2; - let (xl, xh) = karatsuba_split(x, m); - let (yl, yh) = karatsuba_split(y, m); - let sumx = add(xl, xh); - let sumy = add(yl, yh); - let z0 = karatsuba_mul(xl, yl); - let mut z1 = karatsuba_mul(&sumx, &sumy); - let z2 = karatsuba_mul(xh, yh); - // Properly scale z1, which is `z1 - z2 - zo`. - isub(&mut z1, &z2); - isub(&mut z1, &z0); - - // Create our result, which is equal to, in little-endian order: - // [z0, z1 - z2 - z0, z2] - // z1 must be shifted m digits (2^(32m)) over. - // z2 must be shifted 2*m digits (2^(64m)) over. - let len = z0.len().max(m + z1.len()).max(2 * m + z2.len()); - let mut result = z0; - result.reserve_exact(len - result.len()); - iadd_impl(&mut result, &z1, m); - iadd_impl(&mut result, &z2, 2 * m); - - result - } - } - - /// Karatsuba multiplication algorithm where y is substantially larger than x. - /// - /// Assumes `y.len() >= x.len()`. - fn karatsuba_uneven_mul(x: &[Limb], mut y: &[Limb]) -> Vec { - let mut result = Vec::::default(); - result.resize(x.len() + y.len(), 0); - - // This effectively is like grade-school multiplication between - // two numbers, except we're using splits on `y`, and the intermediate - // step is a Karatsuba multiplication. - let mut start = 0; - while !y.is_empty() { - let m = x.len().min(y.len()); - let (yl, yh) = karatsuba_split(y, m); - let prod = karatsuba_mul(x, yl); - iadd_impl(&mut result, &prod, start); - y = yh; - start += m; - } - small::normalize(&mut result); - - result - } - - /// Forwarder to the proper Karatsuba algorithm. - #[inline] - fn karatsuba_mul_fwd(x: &[Limb], y: &[Limb]) -> Vec { - if x.len() < y.len() { - karatsuba_mul(x, y) - } else { - karatsuba_mul(y, x) - } - } - - /// MulAssign bigint to bigint. - #[inline] - pub fn imul(x: &mut Vec, y: &[Limb]) { - if y.len() == 1 { - small::imul(x, y[0]); - } else { - // We're not really in a condition where using Karatsuba - // multiplication makes sense, so we're just going to use long - // division. ~20% speedup compared to: - // *x = karatsuba_mul_fwd(x, y); - *x = karatsuba_mul_fwd(x, y); - } - } -} // large - -// TRAITS -// ------ - -/// Traits for shared operations for big integers. -/// -/// None of these are implemented using normal traits, since these -/// are very expensive operations, and we want to deliberately -/// and explicitly use these functions. -pub(crate) trait Math: Clone + Sized + Default { - // DATA - - /// Get access to the underlying data - fn data(&self) -> &Vec; - - /// Get access to the underlying data - fn data_mut(&mut self) -> &mut Vec; - - // RELATIVE OPERATIONS - - /// Compare self to y. - #[inline] - fn compare(&self, y: &Self) -> cmp::Ordering { - large::compare(self.data(), y.data()) - } - - // PROPERTIES - - /// Get the high 64-bits from the bigint and if there are remaining bits. - #[inline] - fn hi64(&self) -> (u64, bool) { - self.data().as_slice().hi64() - } - - /// Calculate the bit-length of the big-integer. - /// Returns usize::max_value() if the value overflows, - /// IE, if `self.data().len() > usize::max_value() / 8`. - #[inline] - fn bit_length(&self) -> usize { - small::bit_length(self.data()) - } - - // INTEGER CONVERSIONS - - /// Create new big integer from u64. - #[inline] - fn from_u64(x: u64) -> Self { - let mut v = Self::default(); - let slc = split_u64(x); - v.data_mut().extend_from_slice(&slc); - v.normalize(); - v - } - - // NORMALIZE - - /// Normalize the integer, so any leading zero values are removed. - #[inline] - fn normalize(&mut self) { - small::normalize(self.data_mut()); - } - - // ADDITION - - /// AddAssign small integer. - #[inline] - fn iadd_small(&mut self, y: Limb) { - small::iadd(self.data_mut(), y); - } - - // MULTIPLICATION - - /// MulAssign small integer. - #[inline] - fn imul_small(&mut self, y: Limb) { - small::imul(self.data_mut(), y); - } - - /// Multiply by a power of 2. - #[inline] - fn imul_pow2(&mut self, n: u32) { - self.ishl(n as usize); - } - - /// Multiply by a power of 5. - #[inline] - fn imul_pow5(&mut self, n: u32) { - small::imul_pow5(self.data_mut(), n); - } - - /// MulAssign by a power of 10. - #[inline] - fn imul_pow10(&mut self, n: u32) { - self.imul_pow5(n); - self.imul_pow2(n); - } - - // SHIFTS - - /// Shift-left the entire buffer n bits. - #[inline] - fn ishl(&mut self, n: usize) { - small::ishl(self.data_mut(), n); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/mod.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/mod.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,38 +0,0 @@ -// The code in this module is derived from the `lexical` crate by @Alexhuszagh -// which the author condensed into this minimal subset for use in serde_json. -// For the serde_json use case we care more about reliably round tripping all -// possible floating point values than about parsing any arbitrarily long string -// of digits with perfect accuracy, as the latter would take a high cost in -// compile time and performance. -// -// Dual licensed as MIT and Apache 2.0 just like the rest of serde_json, but -// copyright Alexander Huszagh. - -//! Fast, minimal float-parsing algorithm. - -// MODULES -pub(crate) mod algorithm; -mod bhcomp; -mod bignum; -mod cached; -mod cached_float80; -mod digit; -mod errors; -pub(crate) mod exponent; -pub(crate) mod float; -mod large_powers; -pub(crate) mod math; -pub(crate) mod num; -pub(crate) mod parse; -pub(crate) mod rounding; -mod shift; -mod small_powers; - -#[cfg(limb_width_32)] -mod large_powers32; - -#[cfg(limb_width_64)] -mod large_powers64; - -// API -pub use self::parse::{parse_concise_float, parse_truncated_float}; diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/num.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/num.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/num.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/num.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,440 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Utilities for Rust numbers. - -use core::ops; - -/// Precalculated values of radix**i for i in range [0, arr.len()-1]. -/// Each value can be **exactly** represented as that type. -const F32_POW10: [f32; 11] = [ - 1.0, - 10.0, - 100.0, - 1000.0, - 10000.0, - 100000.0, - 1000000.0, - 10000000.0, - 100000000.0, - 1000000000.0, - 10000000000.0, -]; - -/// Precalculated values of radix**i for i in range [0, arr.len()-1]. -/// Each value can be **exactly** represented as that type. -const F64_POW10: [f64; 23] = [ - 1.0, - 10.0, - 100.0, - 1000.0, - 10000.0, - 100000.0, - 1000000.0, - 10000000.0, - 100000000.0, - 1000000000.0, - 10000000000.0, - 100000000000.0, - 1000000000000.0, - 10000000000000.0, - 100000000000000.0, - 1000000000000000.0, - 10000000000000000.0, - 100000000000000000.0, - 1000000000000000000.0, - 10000000000000000000.0, - 100000000000000000000.0, - 1000000000000000000000.0, - 10000000000000000000000.0, -]; - -/// Type that can be converted to primitive with `as`. -pub trait AsPrimitive: Sized + Copy + PartialOrd { - fn as_u32(self) -> u32; - fn as_u64(self) -> u64; - fn as_u128(self) -> u128; - fn as_usize(self) -> usize; - fn as_f32(self) -> f32; - fn as_f64(self) -> f64; -} - -macro_rules! as_primitive_impl { - ($($ty:ident)*) => { - $( - impl AsPrimitive for $ty { - #[inline] - fn as_u32(self) -> u32 { - self as u32 - } - - #[inline] - fn as_u64(self) -> u64 { - self as u64 - } - - #[inline] - fn as_u128(self) -> u128 { - self as u128 - } - - #[inline] - fn as_usize(self) -> usize { - self as usize - } - - #[inline] - fn as_f32(self) -> f32 { - self as f32 - } - - #[inline] - fn as_f64(self) -> f64 { - self as f64 - } - } - )* - }; -} - -as_primitive_impl! { u32 u64 u128 usize f32 f64 } - -/// An interface for casting between machine scalars. -pub trait AsCast: AsPrimitive { - /// Creates a number from another value that can be converted into - /// a primitive via the `AsPrimitive` trait. - fn as_cast(n: N) -> Self; -} - -macro_rules! as_cast_impl { - ($ty:ident, $method:ident) => { - impl AsCast for $ty { - #[inline] - fn as_cast(n: N) -> Self { - n.$method() - } - } - }; -} - -as_cast_impl!(u32, as_u32); -as_cast_impl!(u64, as_u64); -as_cast_impl!(u128, as_u128); -as_cast_impl!(usize, as_usize); -as_cast_impl!(f32, as_f32); -as_cast_impl!(f64, as_f64); - -/// Numerical type trait. -pub trait Number: AsCast + ops::Add {} - -macro_rules! number_impl { - ($($ty:ident)*) => { - $( - impl Number for $ty {} - )* - }; -} - -number_impl! { u32 u64 u128 usize f32 f64 } - -/// Defines a trait that supports integral operations. -pub trait Integer: Number + ops::BitAnd + ops::Shr { - const ZERO: Self; -} - -macro_rules! integer_impl { - ($($ty:tt)*) => { - $( - impl Integer for $ty { - const ZERO: Self = 0; - } - )* - }; -} - -integer_impl! { u32 u64 u128 usize } - -/// Type trait for the mantissa type. -pub trait Mantissa: Integer { - /// Mask to extract the high bits from the integer. - const HIMASK: Self; - /// Mask to extract the low bits from the integer. - const LOMASK: Self; - /// Full size of the integer, in bits. - const FULL: i32; - /// Half size of the integer, in bits. - const HALF: i32 = Self::FULL / 2; -} - -impl Mantissa for u64 { - const HIMASK: u64 = 0xFFFFFFFF00000000; - const LOMASK: u64 = 0x00000000FFFFFFFF; - const FULL: i32 = 64; -} - -/// Get exact exponent limit for radix. -pub trait Float: Number { - /// Unsigned type of the same size. - type Unsigned: Integer; - - /// Literal zero. - const ZERO: Self; - /// Maximum number of digits that can contribute in the mantissa. - /// - /// We can exactly represent a float in radix `b` from radix 2 if - /// `b` is divisible by 2. This function calculates the exact number of - /// digits required to exactly represent that float. - /// - /// According to the "Handbook of Floating Point Arithmetic", - /// for IEEE754, with emin being the min exponent, p2 being the - /// precision, and b being the radix, the number of digits follows as: - /// - /// `−emin + p2 + ⌊(emin + 1) log(2, b) − log(1 − 2^(−p2), b)⌋` - /// - /// For f32, this follows as: - /// emin = -126 - /// p2 = 24 - /// - /// For f64, this follows as: - /// emin = -1022 - /// p2 = 53 - /// - /// In Python: - /// `-emin + p2 + math.floor((emin+1)*math.log(2, b) - math.log(1-2**(-p2), b))` - /// - /// This was used to calculate the maximum number of digits for [2, 36]. - const MAX_DIGITS: usize; - - // MASKS - - /// Bitmask for the sign bit. - const SIGN_MASK: Self::Unsigned; - /// Bitmask for the exponent, including the hidden bit. - const EXPONENT_MASK: Self::Unsigned; - /// Bitmask for the hidden bit in exponent, which is an implicit 1 in the fraction. - const HIDDEN_BIT_MASK: Self::Unsigned; - /// Bitmask for the mantissa (fraction), excluding the hidden bit. - const MANTISSA_MASK: Self::Unsigned; - - // PROPERTIES - - /// Positive infinity as bits. - const INFINITY_BITS: Self::Unsigned; - /// Positive infinity as bits. - const NEGATIVE_INFINITY_BITS: Self::Unsigned; - /// Size of the significand (mantissa) without hidden bit. - const MANTISSA_SIZE: i32; - /// Bias of the exponet - const EXPONENT_BIAS: i32; - /// Exponent portion of a denormal float. - const DENORMAL_EXPONENT: i32; - /// Maximum exponent value in float. - const MAX_EXPONENT: i32; - - // ROUNDING - - /// Default number of bits to shift (or 64 - mantissa size - 1). - const DEFAULT_SHIFT: i32; - /// Mask to determine if a full-carry occurred (1 in bit above hidden bit). - const CARRY_MASK: u64; - - /// Get min and max exponent limits (exact) from radix. - fn exponent_limit() -> (i32, i32); - - /// Get the number of digits that can be shifted from exponent to mantissa. - fn mantissa_limit() -> i32; - - // Re-exported methods from std. - fn pow10(self, n: i32) -> Self; - fn from_bits(u: Self::Unsigned) -> Self; - fn to_bits(self) -> Self::Unsigned; - fn is_sign_positive(self) -> bool; - fn is_sign_negative(self) -> bool; - - /// Returns true if the float is a denormal. - #[inline] - fn is_denormal(self) -> bool { - self.to_bits() & Self::EXPONENT_MASK == Self::Unsigned::ZERO - } - - /// Returns true if the float is a NaN or Infinite. - #[inline] - fn is_special(self) -> bool { - self.to_bits() & Self::EXPONENT_MASK == Self::EXPONENT_MASK - } - - /// Returns true if the float is infinite. - #[inline] - fn is_inf(self) -> bool { - self.is_special() && (self.to_bits() & Self::MANTISSA_MASK) == Self::Unsigned::ZERO - } - - /// Get exponent component from the float. - #[inline] - fn exponent(self) -> i32 { - if self.is_denormal() { - return Self::DENORMAL_EXPONENT; - } - - let bits = self.to_bits(); - let biased_e = ((bits & Self::EXPONENT_MASK) >> Self::MANTISSA_SIZE).as_u32(); - biased_e as i32 - Self::EXPONENT_BIAS - } - - /// Get mantissa (significand) component from float. - #[inline] - fn mantissa(self) -> Self::Unsigned { - let bits = self.to_bits(); - let s = bits & Self::MANTISSA_MASK; - if !self.is_denormal() { - s + Self::HIDDEN_BIT_MASK - } else { - s - } - } - - /// Get next greater float for a positive float. - /// Value must be >= 0.0 and < INFINITY. - #[inline] - fn next_positive(self) -> Self { - debug_assert!(self.is_sign_positive() && !self.is_inf()); - Self::from_bits(self.to_bits() + Self::Unsigned::as_cast(1u32)) - } - - /// Round a positive number to even. - #[inline] - fn round_positive_even(self) -> Self { - if self.mantissa() & Self::Unsigned::as_cast(1u32) == Self::Unsigned::as_cast(1u32) { - self.next_positive() - } else { - self - } - } -} - -impl Float for f32 { - type Unsigned = u32; - - const ZERO: f32 = 0.0; - const MAX_DIGITS: usize = 114; - const SIGN_MASK: u32 = 0x80000000; - const EXPONENT_MASK: u32 = 0x7F800000; - const HIDDEN_BIT_MASK: u32 = 0x00800000; - const MANTISSA_MASK: u32 = 0x007FFFFF; - const INFINITY_BITS: u32 = 0x7F800000; - const NEGATIVE_INFINITY_BITS: u32 = Self::INFINITY_BITS | Self::SIGN_MASK; - const MANTISSA_SIZE: i32 = 23; - const EXPONENT_BIAS: i32 = 127 + Self::MANTISSA_SIZE; - const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS; - const MAX_EXPONENT: i32 = 0xFF - Self::EXPONENT_BIAS; - const DEFAULT_SHIFT: i32 = u64::FULL - f32::MANTISSA_SIZE - 1; - const CARRY_MASK: u64 = 0x1000000; - - #[inline] - fn exponent_limit() -> (i32, i32) { - (-10, 10) - } - - #[inline] - fn mantissa_limit() -> i32 { - 7 - } - - #[inline] - fn pow10(self, n: i32) -> f32 { - // Check the exponent is within bounds in debug builds. - debug_assert!({ - let (min, max) = Self::exponent_limit(); - n >= min && n <= max - }); - - if n > 0 { - self * F32_POW10[n as usize] - } else { - self / F32_POW10[-n as usize] - } - } - - #[inline] - fn from_bits(u: u32) -> f32 { - f32::from_bits(u) - } - - #[inline] - fn to_bits(self) -> u32 { - f32::to_bits(self) - } - - #[inline] - fn is_sign_positive(self) -> bool { - f32::is_sign_positive(self) - } - - #[inline] - fn is_sign_negative(self) -> bool { - f32::is_sign_negative(self) - } -} - -impl Float for f64 { - type Unsigned = u64; - - const ZERO: f64 = 0.0; - const MAX_DIGITS: usize = 769; - const SIGN_MASK: u64 = 0x8000000000000000; - const EXPONENT_MASK: u64 = 0x7FF0000000000000; - const HIDDEN_BIT_MASK: u64 = 0x0010000000000000; - const MANTISSA_MASK: u64 = 0x000FFFFFFFFFFFFF; - const INFINITY_BITS: u64 = 0x7FF0000000000000; - const NEGATIVE_INFINITY_BITS: u64 = Self::INFINITY_BITS | Self::SIGN_MASK; - const MANTISSA_SIZE: i32 = 52; - const EXPONENT_BIAS: i32 = 1023 + Self::MANTISSA_SIZE; - const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS; - const MAX_EXPONENT: i32 = 0x7FF - Self::EXPONENT_BIAS; - const DEFAULT_SHIFT: i32 = u64::FULL - f64::MANTISSA_SIZE - 1; - const CARRY_MASK: u64 = 0x20000000000000; - - #[inline] - fn exponent_limit() -> (i32, i32) { - (-22, 22) - } - - #[inline] - fn mantissa_limit() -> i32 { - 15 - } - - #[inline] - fn pow10(self, n: i32) -> f64 { - // Check the exponent is within bounds in debug builds. - debug_assert!({ - let (min, max) = Self::exponent_limit(); - n >= min && n <= max - }); - - if n > 0 { - self * F64_POW10[n as usize] - } else { - self / F64_POW10[-n as usize] - } - } - - #[inline] - fn from_bits(u: u64) -> f64 { - f64::from_bits(u) - } - - #[inline] - fn to_bits(self) -> u64 { - f64::to_bits(self) - } - - #[inline] - fn is_sign_positive(self) -> bool { - f64::is_sign_positive(self) - } - - #[inline] - fn is_sign_negative(self) -> bool { - f64::is_sign_negative(self) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/parse.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/parse.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/parse.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/parse.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,83 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -use super::algorithm::*; -use super::bhcomp::*; -use super::digit::*; -use super::exponent::*; -use super::num::*; - -// PARSERS -// ------- - -/// Parse float for which the entire integer and fraction parts fit into a 64 -/// bit mantissa. -pub fn parse_concise_float(mantissa: u64, mant_exp: i32) -> F -where - F: Float, -{ - if let Some(float) = fast_path(mantissa, mant_exp) { - return float; - } - - // Moderate path (use an extended 80-bit representation). - let truncated = false; - let (fp, valid) = moderate_path::(mantissa, mant_exp, truncated); - if valid { - return fp.into_float::(); - } - - let b = fp.into_downward_float::(); - if b.is_special() { - // We have a non-finite number, we get to leave early. - return b; - } - - // Slow path, fast path didn't work. - let mut buffer = itoa::Buffer::new(); - let integer = buffer.format(mantissa).as_bytes(); - let fraction = &[]; - bhcomp(b, integer, fraction, mant_exp) -} - -/// Parse float from extracted float components. -/// -/// * `integer` - Slice containing the integer digits. -/// * `fraction` - Slice containing the fraction digits. -/// * `exponent` - Parsed, 32-bit exponent. -/// -/// Precondition: The integer must not have leading zeros. -pub fn parse_truncated_float(integer: &[u8], mut fraction: &[u8], exponent: i32) -> F -where - F: Float, -{ - // Trim trailing zeroes from the fraction part. - while fraction.last() == Some(&b'0') { - fraction = &fraction[..fraction.len() - 1]; - } - - // Calculate the number of truncated digits. - let mut truncated = 0; - let mut mantissa: u64 = 0; - let mut iter = integer.iter().chain(fraction); - for &c in &mut iter { - mantissa = match add_digit(mantissa, to_digit(c).unwrap()) { - Some(v) => v, - None => { - truncated = 1 + iter.count(); - break; - } - }; - } - - let mant_exp = mantissa_exponent(exponent, fraction.len(), truncated); - let is_truncated = true; - - fallback_path( - integer, - fraction, - mantissa, - exponent, - mant_exp, - is_truncated, - ) -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/rounding.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/rounding.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/rounding.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/rounding.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,231 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Defines rounding schemes for floating-point numbers. - -use super::float::ExtendedFloat; -use super::num::*; -use super::shift::*; -use core::mem; - -// MASKS - -/// Calculate a scalar factor of 2 above the halfway point. -#[inline] -pub(crate) fn nth_bit(n: u64) -> u64 { - let bits: u64 = mem::size_of::() as u64 * 8; - debug_assert!(n < bits, "nth_bit() overflow in shl."); - - 1 << n -} - -/// Generate a bitwise mask for the lower `n` bits. -#[inline] -pub(crate) fn lower_n_mask(n: u64) -> u64 { - let bits: u64 = mem::size_of::() as u64 * 8; - debug_assert!(n <= bits, "lower_n_mask() overflow in shl."); - - if n == bits { - u64::max_value() - } else { - (1 << n) - 1 - } -} - -/// Calculate the halfway point for the lower `n` bits. -#[inline] -pub(crate) fn lower_n_halfway(n: u64) -> u64 { - let bits: u64 = mem::size_of::() as u64 * 8; - debug_assert!(n <= bits, "lower_n_halfway() overflow in shl."); - - if n == 0 { - 0 - } else { - nth_bit(n - 1) - } -} - -/// Calculate a bitwise mask with `n` 1 bits starting at the `bit` position. -#[inline] -pub(crate) fn internal_n_mask(bit: u64, n: u64) -> u64 { - let bits: u64 = mem::size_of::() as u64 * 8; - debug_assert!(bit <= bits, "internal_n_halfway() overflow in shl."); - debug_assert!(n <= bits, "internal_n_halfway() overflow in shl."); - debug_assert!(bit >= n, "internal_n_halfway() overflow in sub."); - - lower_n_mask(bit) ^ lower_n_mask(bit - n) -} - -// NEAREST ROUNDING - -// Shift right N-bytes and round to the nearest. -// -// Return if we are above halfway and if we are halfway. -#[inline] -pub(crate) fn round_nearest(fp: &mut ExtendedFloat, shift: i32) -> (bool, bool) { - // Extract the truncated bits using mask. - // Calculate if the value of the truncated bits are either above - // the mid-way point, or equal to it. - // - // For example, for 4 truncated bytes, the mask would be b1111 - // and the midway point would be b1000. - let mask: u64 = lower_n_mask(shift as u64); - let halfway: u64 = lower_n_halfway(shift as u64); - - let truncated_bits = fp.mant & mask; - let is_above = truncated_bits > halfway; - let is_halfway = truncated_bits == halfway; - - // Bit shift so the leading bit is in the hidden bit. - overflowing_shr(fp, shift); - - (is_above, is_halfway) -} - -// Tie rounded floating point to event. -#[inline] -pub(crate) fn tie_even(fp: &mut ExtendedFloat, is_above: bool, is_halfway: bool) { - // Extract the last bit after shifting (and determine if it is odd). - let is_odd = fp.mant & 1 == 1; - - // Calculate if we need to roundup. - // We need to roundup if we are above halfway, or if we are odd - // and at half-way (need to tie-to-even). - if is_above || (is_odd && is_halfway) { - fp.mant += 1; - } -} - -// Shift right N-bytes and round nearest, tie-to-even. -// -// Floating-point arithmetic uses round to nearest, ties to even, -// which rounds to the nearest value, if the value is halfway in between, -// round to an even value. -#[inline] -pub(crate) fn round_nearest_tie_even(fp: &mut ExtendedFloat, shift: i32) { - let (is_above, is_halfway) = round_nearest(fp, shift); - tie_even(fp, is_above, is_halfway); -} - -// DIRECTED ROUNDING - -// Shift right N-bytes and round towards a direction. -// -// Return if we have any truncated bytes. -#[inline] -fn round_toward(fp: &mut ExtendedFloat, shift: i32) -> bool { - let mask: u64 = lower_n_mask(shift as u64); - let truncated_bits = fp.mant & mask; - - // Bit shift so the leading bit is in the hidden bit. - overflowing_shr(fp, shift); - - truncated_bits != 0 -} - -// Round down. -#[inline] -fn downard(_: &mut ExtendedFloat, _: bool) {} - -// Shift right N-bytes and round toward zero. -// -// Floating-point arithmetic defines round toward zero, which rounds -// towards positive zero. -#[inline] -pub(crate) fn round_downward(fp: &mut ExtendedFloat, shift: i32) { - // Bit shift so the leading bit is in the hidden bit. - // No rounding schemes, so we just ignore everything else. - let is_truncated = round_toward(fp, shift); - downard(fp, is_truncated); -} - -// ROUND TO FLOAT - -// Shift the ExtendedFloat fraction to the fraction bits in a native float. -// -// Floating-point arithmetic uses round to nearest, ties to even, -// which rounds to the nearest value, if the value is halfway in between, -// round to an even value. -#[inline] -pub(crate) fn round_to_float(fp: &mut ExtendedFloat, algorithm: Algorithm) -where - F: Float, - Algorithm: FnOnce(&mut ExtendedFloat, i32), -{ - // Calculate the difference to allow a single calculation - // rather than a loop, to minimize the number of ops required. - // This does underflow detection. - let final_exp = fp.exp + F::DEFAULT_SHIFT; - if final_exp < F::DENORMAL_EXPONENT { - // We would end up with a denormal exponent, try to round to more - // digits. Only shift right if we can avoid zeroing out the value, - // which requires the exponent diff to be < M::BITS. The value - // is already normalized, so we shouldn't have any issue zeroing - // out the value. - let diff = F::DENORMAL_EXPONENT - fp.exp; - if diff <= u64::FULL { - // We can avoid underflow, can get a valid representation. - algorithm(fp, diff); - } else { - // Certain underflow, assign literal 0s. - fp.mant = 0; - fp.exp = 0; - } - } else { - algorithm(fp, F::DEFAULT_SHIFT); - } - - if fp.mant & F::CARRY_MASK == F::CARRY_MASK { - // Roundup carried over to 1 past the hidden bit. - shr(fp, 1); - } -} - -// AVOID OVERFLOW/UNDERFLOW - -// Avoid overflow for large values, shift left as needed. -// -// Shift until a 1-bit is in the hidden bit, if the mantissa is not 0. -#[inline] -pub(crate) fn avoid_overflow(fp: &mut ExtendedFloat) -where - F: Float, -{ - // Calculate the difference to allow a single calculation - // rather than a loop, minimizing the number of ops required. - if fp.exp >= F::MAX_EXPONENT { - let diff = fp.exp - F::MAX_EXPONENT; - if diff <= F::MANTISSA_SIZE { - // Our overflow mask needs to start at the hidden bit, or at - // `F::MANTISSA_SIZE+1`, and needs to have `diff+1` bits set, - // to see if our value overflows. - let bit = (F::MANTISSA_SIZE + 1) as u64; - let n = (diff + 1) as u64; - let mask = internal_n_mask(bit, n); - if (fp.mant & mask) == 0 { - // If we have no 1-bit in the hidden-bit position, - // which is index 0, we need to shift 1. - let shift = diff + 1; - shl(fp, shift); - } - } - } -} - -// ROUND TO NATIVE - -// Round an extended-precision float to a native float representation. -#[inline] -pub(crate) fn round_to_native(fp: &mut ExtendedFloat, algorithm: Algorithm) -where - F: Float, - Algorithm: FnOnce(&mut ExtendedFloat, i32), -{ - // Shift all the way left, to ensure a consistent representation. - // The following right-shifts do not work for a non-normalized number. - fp.normalize(); - - // Round so the fraction is in a native mantissa representation, - // and avoid overflow/underflow. - round_to_float::(fp, algorithm); - avoid_overflow::(fp); -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/shift.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/shift.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/shift.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/shift.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,46 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Bit-shift helpers. - -use super::float::ExtendedFloat; -use core::mem; - -// Shift extended-precision float right `shift` bytes. -#[inline] -pub(crate) fn shr(fp: &mut ExtendedFloat, shift: i32) { - let bits: u64 = mem::size_of::() as u64 * 8; - debug_assert!((shift as u64) < bits, "shr() overflow in shift right."); - - fp.mant >>= shift; - fp.exp += shift; -} - -// Shift extended-precision float right `shift` bytes. -// -// Accepts when the shift is the same as the type size, and -// sets the value to 0. -#[inline] -pub(crate) fn overflowing_shr(fp: &mut ExtendedFloat, shift: i32) { - let bits: u64 = mem::size_of::() as u64 * 8; - debug_assert!( - (shift as u64) <= bits, - "overflowing_shr() overflow in shift right." - ); - - fp.mant = if shift as u64 == bits { - 0 - } else { - fp.mant >> shift - }; - fp.exp += shift; -} - -// Shift extended-precision float left `shift` bytes. -#[inline] -pub(crate) fn shl(fp: &mut ExtendedFloat, shift: i32) { - let bits: u64 = mem::size_of::() as u64 * 8; - debug_assert!((shift as u64) < bits, "shl() overflow in shift left."); - - fp.mant <<= shift; - fp.exp -= shift; -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/small_powers.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/small_powers.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lexical/small_powers.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lexical/small_powers.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,70 +0,0 @@ -// Adapted from https://github.com/Alexhuszagh/rust-lexical. - -//! Pre-computed small powers. - -// 32 BIT -#[cfg(limb_width_32)] -pub(crate) const POW5_32: [u32; 14] = [ - 1, 5, 25, 125, 625, 3125, 15625, 78125, 390625, 1953125, 9765625, 48828125, 244140625, - 1220703125, -]; - -#[cfg(limb_width_32)] -pub(crate) const POW10_32: [u32; 10] = [ - 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, -]; - -// 64 BIT -#[cfg(limb_width_64)] -pub(crate) const POW5_64: [u64; 28] = [ - 1, - 5, - 25, - 125, - 625, - 3125, - 15625, - 78125, - 390625, - 1953125, - 9765625, - 48828125, - 244140625, - 1220703125, - 6103515625, - 30517578125, - 152587890625, - 762939453125, - 3814697265625, - 19073486328125, - 95367431640625, - 476837158203125, - 2384185791015625, - 11920928955078125, - 59604644775390625, - 298023223876953125, - 1490116119384765625, - 7450580596923828125, -]; -pub(crate) const POW10_64: [u64; 20] = [ - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000, - 10000000000, - 100000000000, - 1000000000000, - 10000000000000, - 100000000000000, - 1000000000000000, - 10000000000000000, - 100000000000000000, - 1000000000000000000, - 10000000000000000000, -]; diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/lib.rs s390-tools-2.33.1/rust-vendor/serde_json/src/lib.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,422 +0,0 @@ -//! # Serde JSON -//! -//! JSON is a ubiquitous open-standard format that uses human-readable text to -//! transmit data objects consisting of key-value pairs. -//! -//! ```json -//! { -//! "name": "John Doe", -//! "age": 43, -//! "address": { -//! "street": "10 Downing Street", -//! "city": "London" -//! }, -//! "phones": [ -//! "+44 1234567", -//! "+44 2345678" -//! ] -//! } -//! ``` -//! -//! There are three common ways that you might find yourself needing to work -//! with JSON data in Rust. -//! -//! - **As text data.** An unprocessed string of JSON data that you receive on -//! an HTTP endpoint, read from a file, or prepare to send to a remote -//! server. -//! - **As an untyped or loosely typed representation.** Maybe you want to -//! check that some JSON data is valid before passing it on, but without -//! knowing the structure of what it contains. Or you want to do very basic -//! manipulations like insert a key in a particular spot. -//! - **As a strongly typed Rust data structure.** When you expect all or most -//! of your data to conform to a particular structure and want to get real -//! work done without JSON's loosey-goosey nature tripping you up. -//! -//! Serde JSON provides efficient, flexible, safe ways of converting data -//! between each of these representations. -//! -//! # Operating on untyped JSON values -//! -//! Any valid JSON data can be manipulated in the following recursive enum -//! representation. This data structure is [`serde_json::Value`][value]. -//! -//! ``` -//! # use serde_json::{Number, Map}; -//! # -//! # #[allow(dead_code)] -//! enum Value { -//! Null, -//! Bool(bool), -//! Number(Number), -//! String(String), -//! Array(Vec), -//! Object(Map), -//! } -//! ``` -//! -//! A string of JSON data can be parsed into a `serde_json::Value` by the -//! [`serde_json::from_str`][from_str] function. There is also -//! [`from_slice`][from_slice] for parsing from a byte slice &[u8] and -//! [`from_reader`][from_reader] for parsing from any `io::Read` like a File or -//! a TCP stream. -//! -//! ``` -//! use serde_json::{Result, Value}; -//! -//! fn untyped_example() -> Result<()> { -//! // Some JSON input data as a &str. Maybe this comes from the user. -//! let data = r#" -//! { -//! "name": "John Doe", -//! "age": 43, -//! "phones": [ -//! "+44 1234567", -//! "+44 2345678" -//! ] -//! }"#; -//! -//! // Parse the string of data into serde_json::Value. -//! let v: Value = serde_json::from_str(data)?; -//! -//! // Access parts of the data by indexing with square brackets. -//! println!("Please call {} at the number {}", v["name"], v["phones"][0]); -//! -//! Ok(()) -//! } -//! # -//! # fn main() { -//! # untyped_example().unwrap(); -//! # } -//! ``` -//! -//! The result of square bracket indexing like `v["name"]` is a borrow of the -//! data at that index, so the type is `&Value`. A JSON map can be indexed with -//! string keys, while a JSON array can be indexed with integer keys. If the -//! type of the data is not right for the type with which it is being indexed, -//! or if a map does not contain the key being indexed, or if the index into a -//! vector is out of bounds, the returned element is `Value::Null`. -//! -//! When a `Value` is printed, it is printed as a JSON string. So in the code -//! above, the output looks like `Please call "John Doe" at the number "+44 -//! 1234567"`. The quotation marks appear because `v["name"]` is a `&Value` -//! containing a JSON string and its JSON representation is `"John Doe"`. -//! Printing as a plain string without quotation marks involves converting from -//! a JSON string to a Rust string with [`as_str()`] or avoiding the use of -//! `Value` as described in the following section. -//! -//! [`as_str()`]: crate::Value::as_str -//! -//! The `Value` representation is sufficient for very basic tasks but can be -//! tedious to work with for anything more significant. Error handling is -//! verbose to implement correctly, for example imagine trying to detect the -//! presence of unrecognized fields in the input data. The compiler is powerless -//! to help you when you make a mistake, for example imagine typoing `v["name"]` -//! as `v["nmae"]` in one of the dozens of places it is used in your code. -//! -//! # Parsing JSON as strongly typed data structures -//! -//! Serde provides a powerful way of mapping JSON data into Rust data structures -//! largely automatically. -//! -//! ``` -//! use serde::{Deserialize, Serialize}; -//! use serde_json::Result; -//! -//! #[derive(Serialize, Deserialize)] -//! struct Person { -//! name: String, -//! age: u8, -//! phones: Vec, -//! } -//! -//! fn typed_example() -> Result<()> { -//! // Some JSON input data as a &str. Maybe this comes from the user. -//! let data = r#" -//! { -//! "name": "John Doe", -//! "age": 43, -//! "phones": [ -//! "+44 1234567", -//! "+44 2345678" -//! ] -//! }"#; -//! -//! // Parse the string of data into a Person object. This is exactly the -//! // same function as the one that produced serde_json::Value above, but -//! // now we are asking it for a Person as output. -//! let p: Person = serde_json::from_str(data)?; -//! -//! // Do things just like with any other Rust data structure. -//! println!("Please call {} at the number {}", p.name, p.phones[0]); -//! -//! Ok(()) -//! } -//! # -//! # fn main() { -//! # typed_example().unwrap(); -//! # } -//! ``` -//! -//! This is the same `serde_json::from_str` function as before, but this time we -//! assign the return value to a variable of type `Person` so Serde will -//! automatically interpret the input data as a `Person` and produce informative -//! error messages if the layout does not conform to what a `Person` is expected -//! to look like. -//! -//! Any type that implements Serde's `Deserialize` trait can be deserialized -//! this way. This includes built-in Rust standard library types like `Vec` -//! and `HashMap`, as well as any structs or enums annotated with -//! `#[derive(Deserialize)]`. -//! -//! Once we have `p` of type `Person`, our IDE and the Rust compiler can help us -//! use it correctly like they do for any other Rust code. The IDE can -//! autocomplete field names to prevent typos, which was impossible in the -//! `serde_json::Value` representation. And the Rust compiler can check that -//! when we write `p.phones[0]`, then `p.phones` is guaranteed to be a -//! `Vec` so indexing into it makes sense and produces a `String`. -//! -//! # Constructing JSON values -//! -//! Serde JSON provides a [`json!` macro][macro] to build `serde_json::Value` -//! objects with very natural JSON syntax. -//! -//! ``` -//! use serde_json::json; -//! -//! fn main() { -//! // The type of `john` is `serde_json::Value` -//! let john = json!({ -//! "name": "John Doe", -//! "age": 43, -//! "phones": [ -//! "+44 1234567", -//! "+44 2345678" -//! ] -//! }); -//! -//! println!("first phone number: {}", john["phones"][0]); -//! -//! // Convert to a string of JSON and print it out -//! println!("{}", john.to_string()); -//! } -//! ``` -//! -//! The `Value::to_string()` function converts a `serde_json::Value` into a -//! `String` of JSON text. -//! -//! One neat thing about the `json!` macro is that variables and expressions can -//! be interpolated directly into the JSON value as you are building it. Serde -//! will check at compile time that the value you are interpolating is able to -//! be represented as JSON. -//! -//! ``` -//! # use serde_json::json; -//! # -//! # fn random_phone() -> u16 { 0 } -//! # -//! let full_name = "John Doe"; -//! let age_last_year = 42; -//! -//! // The type of `john` is `serde_json::Value` -//! let john = json!({ -//! "name": full_name, -//! "age": age_last_year + 1, -//! "phones": [ -//! format!("+44 {}", random_phone()) -//! ] -//! }); -//! ``` -//! -//! This is amazingly convenient, but we have the problem we had before with -//! `Value`: the IDE and Rust compiler cannot help us if we get it wrong. Serde -//! JSON provides a better way of serializing strongly-typed data structures -//! into JSON text. -//! -//! # Creating JSON by serializing data structures -//! -//! A data structure can be converted to a JSON string by -//! [`serde_json::to_string`][to_string]. There is also -//! [`serde_json::to_vec`][to_vec] which serializes to a `Vec` and -//! [`serde_json::to_writer`][to_writer] which serializes to any `io::Write` -//! such as a File or a TCP stream. -//! -//! ``` -//! use serde::{Deserialize, Serialize}; -//! use serde_json::Result; -//! -//! #[derive(Serialize, Deserialize)] -//! struct Address { -//! street: String, -//! city: String, -//! } -//! -//! fn print_an_address() -> Result<()> { -//! // Some data structure. -//! let address = Address { -//! street: "10 Downing Street".to_owned(), -//! city: "London".to_owned(), -//! }; -//! -//! // Serialize it to a JSON string. -//! let j = serde_json::to_string(&address)?; -//! -//! // Print, write to a file, or send to an HTTP server. -//! println!("{}", j); -//! -//! Ok(()) -//! } -//! # -//! # fn main() { -//! # print_an_address().unwrap(); -//! # } -//! ``` -//! -//! Any type that implements Serde's `Serialize` trait can be serialized this -//! way. This includes built-in Rust standard library types like `Vec` and -//! `HashMap`, as well as any structs or enums annotated with -//! `#[derive(Serialize)]`. -//! -//! # No-std support -//! -//! As long as there is a memory allocator, it is possible to use serde_json -//! without the rest of the Rust standard library. Disable the default "std" -//! feature and enable the "alloc" feature: -//! -//! ```toml -//! [dependencies] -//! serde_json = { version = "1.0", default-features = false, features = ["alloc"] } -//! ``` -//! -//! For JSON support in Serde without a memory allocator, please see the -//! [`serde-json-core`] crate. -//! -//! [value]: crate::value::Value -//! [from_str]: crate::de::from_str -//! [from_slice]: crate::de::from_slice -//! [from_reader]: crate::de::from_reader -//! [to_string]: crate::ser::to_string -//! [to_vec]: crate::ser::to_vec -//! [to_writer]: crate::ser::to_writer -//! [macro]: crate::json -//! [`serde-json-core`]: https://github.com/rust-embedded-community/serde-json-core - -#![doc(html_root_url = "https://docs.rs/serde_json/1.0.99")] -// Ignored clippy lints -#![allow( - clippy::collapsible_else_if, - clippy::comparison_chain, - clippy::deprecated_cfg_attr, - clippy::doc_markdown, - clippy::excessive_precision, - clippy::explicit_auto_deref, - clippy::float_cmp, - clippy::manual_range_contains, - clippy::match_like_matches_macro, - clippy::match_single_binding, - clippy::needless_doctest_main, - clippy::needless_late_init, - // clippy bug: https://github.com/rust-lang/rust-clippy/issues/8366 - clippy::ptr_arg, - clippy::return_self_not_must_use, - clippy::transmute_ptr_to_ptr, - clippy::unnecessary_wraps, - // clippy bug: https://github.com/rust-lang/rust-clippy/issues/5704 - clippy::unnested_or_patterns, -)] -// Ignored clippy_pedantic lints -#![allow( - // buggy - clippy::iter_not_returning_iterator, // https://github.com/rust-lang/rust-clippy/issues/8285 - // Deserializer::from_str, into_iter - clippy::should_implement_trait, - // integer and float ser/de requires these sorts of casts - clippy::cast_possible_truncation, - clippy::cast_possible_wrap, - clippy::cast_precision_loss, - clippy::cast_sign_loss, - // correctly used - clippy::enum_glob_use, - clippy::if_not_else, - clippy::integer_division, - clippy::let_underscore_untyped, - clippy::map_err_ignore, - clippy::match_same_arms, - clippy::similar_names, - clippy::unused_self, - clippy::wildcard_imports, - // things are often more readable this way - clippy::cast_lossless, - clippy::module_name_repetitions, - clippy::redundant_else, - clippy::shadow_unrelated, - clippy::single_match_else, - clippy::too_many_lines, - clippy::unreadable_literal, - clippy::unseparated_literal_suffix, - clippy::use_self, - clippy::zero_prefixed_literal, - // we support older compilers - clippy::checked_conversions, - clippy::mem_replace_with_default, - // noisy - clippy::missing_errors_doc, - clippy::must_use_candidate, -)] -#![allow(non_upper_case_globals)] -#![deny(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(docsrs, feature(doc_cfg))] - -extern crate alloc; - -#[cfg(feature = "std")] -#[doc(inline)] -pub use crate::de::from_reader; -#[doc(inline)] -pub use crate::de::{from_slice, from_str, Deserializer, StreamDeserializer}; -#[doc(inline)] -pub use crate::error::{Error, Result}; -#[doc(inline)] -pub use crate::ser::{to_string, to_string_pretty, to_vec, to_vec_pretty}; -#[cfg(feature = "std")] -#[doc(inline)] -pub use crate::ser::{to_writer, to_writer_pretty, Serializer}; -#[doc(inline)] -pub use crate::value::{from_value, to_value, Map, Number, Value}; - -// We only use our own error type; no need for From conversions provided by the -// standard library's try! macro. This reduces lines of LLVM IR by 4%. -macro_rules! tri { - ($e:expr $(,)?) => { - match $e { - core::result::Result::Ok(val) => val, - core::result::Result::Err(err) => return core::result::Result::Err(err), - } - }; -} - -#[macro_use] -mod macros; - -pub mod de; -pub mod error; -pub mod map; -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub mod ser; -#[cfg(not(feature = "std"))] -mod ser; -pub mod value; - -mod features_check; - -mod io; -#[cfg(feature = "std")] -mod iter; -#[cfg(feature = "float_roundtrip")] -mod lexical; -mod number; -mod read; - -#[cfg(feature = "raw_value")] -mod raw; diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/macros.rs s390-tools-2.33.1/rust-vendor/serde_json/src/macros.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/macros.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/macros.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,304 +0,0 @@ -/// Construct a `serde_json::Value` from a JSON literal. -/// -/// ``` -/// # use serde_json::json; -/// # -/// let value = json!({ -/// "code": 200, -/// "success": true, -/// "payload": { -/// "features": [ -/// "serde", -/// "json" -/// ], -/// "homepage": null -/// } -/// }); -/// ``` -/// -/// Variables or expressions can be interpolated into the JSON literal. Any type -/// interpolated into an array element or object value must implement Serde's -/// `Serialize` trait, while any type interpolated into a object key must -/// implement `Into`. If the `Serialize` implementation of the -/// interpolated type decides to fail, or if the interpolated type contains a -/// map with non-string keys, the `json!` macro will panic. -/// -/// ``` -/// # use serde_json::json; -/// # -/// let code = 200; -/// let features = vec!["serde", "json"]; -/// -/// let value = json!({ -/// "code": code, -/// "success": code == 200, -/// "payload": { -/// features[0]: features[1] -/// } -/// }); -/// ``` -/// -/// Trailing commas are allowed inside both arrays and objects. -/// -/// ``` -/// # use serde_json::json; -/// # -/// let value = json!([ -/// "notice", -/// "the", -/// "trailing", -/// "comma -->", -/// ]); -/// ``` -#[macro_export(local_inner_macros)] -macro_rules! json { - // Hide distracting implementation details from the generated rustdoc. - ($($json:tt)+) => { - json_internal!($($json)+) - }; -} - -// Rocket relies on this because they export their own `json!` with a different -// doc comment than ours, and various Rust bugs prevent them from calling our -// `json!` from their `json!` so they call `json_internal!` directly. Check with -// @SergioBenitez before making breaking changes to this macro. -// -// Changes are fine as long as `json_internal!` does not call any new helper -// macros and can still be invoked as `json_internal!($($json)+)`. -#[macro_export(local_inner_macros)] -#[doc(hidden)] -macro_rules! json_internal { - ////////////////////////////////////////////////////////////////////////// - // TT muncher for parsing the inside of an array [...]. Produces a vec![...] - // of the elements. - // - // Must be invoked as: json_internal!(@array [] $($tt)*) - ////////////////////////////////////////////////////////////////////////// - - // Done with trailing comma. - (@array [$($elems:expr,)*]) => { - json_internal_vec![$($elems,)*] - }; - - // Done without trailing comma. - (@array [$($elems:expr),*]) => { - json_internal_vec![$($elems),*] - }; - - // Next element is `null`. - (@array [$($elems:expr,)*] null $($rest:tt)*) => { - json_internal!(@array [$($elems,)* json_internal!(null)] $($rest)*) - }; - - // Next element is `true`. - (@array [$($elems:expr,)*] true $($rest:tt)*) => { - json_internal!(@array [$($elems,)* json_internal!(true)] $($rest)*) - }; - - // Next element is `false`. - (@array [$($elems:expr,)*] false $($rest:tt)*) => { - json_internal!(@array [$($elems,)* json_internal!(false)] $($rest)*) - }; - - // Next element is an array. - (@array [$($elems:expr,)*] [$($array:tt)*] $($rest:tt)*) => { - json_internal!(@array [$($elems,)* json_internal!([$($array)*])] $($rest)*) - }; - - // Next element is a map. - (@array [$($elems:expr,)*] {$($map:tt)*} $($rest:tt)*) => { - json_internal!(@array [$($elems,)* json_internal!({$($map)*})] $($rest)*) - }; - - // Next element is an expression followed by comma. - (@array [$($elems:expr,)*] $next:expr, $($rest:tt)*) => { - json_internal!(@array [$($elems,)* json_internal!($next),] $($rest)*) - }; - - // Last element is an expression with no trailing comma. - (@array [$($elems:expr,)*] $last:expr) => { - json_internal!(@array [$($elems,)* json_internal!($last)]) - }; - - // Comma after the most recent element. - (@array [$($elems:expr),*] , $($rest:tt)*) => { - json_internal!(@array [$($elems,)*] $($rest)*) - }; - - // Unexpected token after most recent element. - (@array [$($elems:expr),*] $unexpected:tt $($rest:tt)*) => { - json_unexpected!($unexpected) - }; - - ////////////////////////////////////////////////////////////////////////// - // TT muncher for parsing the inside of an object {...}. Each entry is - // inserted into the given map variable. - // - // Must be invoked as: json_internal!(@object $map () ($($tt)*) ($($tt)*)) - // - // We require two copies of the input tokens so that we can match on one - // copy and trigger errors on the other copy. - ////////////////////////////////////////////////////////////////////////// - - // Done. - (@object $object:ident () () ()) => {}; - - // Insert the current entry followed by trailing comma. - (@object $object:ident [$($key:tt)+] ($value:expr) , $($rest:tt)*) => { - let _ = $object.insert(($($key)+).into(), $value); - json_internal!(@object $object () ($($rest)*) ($($rest)*)); - }; - - // Current entry followed by unexpected token. - (@object $object:ident [$($key:tt)+] ($value:expr) $unexpected:tt $($rest:tt)*) => { - json_unexpected!($unexpected); - }; - - // Insert the last entry without trailing comma. - (@object $object:ident [$($key:tt)+] ($value:expr)) => { - let _ = $object.insert(($($key)+).into(), $value); - }; - - // Next value is `null`. - (@object $object:ident ($($key:tt)+) (: null $($rest:tt)*) $copy:tt) => { - json_internal!(@object $object [$($key)+] (json_internal!(null)) $($rest)*); - }; - - // Next value is `true`. - (@object $object:ident ($($key:tt)+) (: true $($rest:tt)*) $copy:tt) => { - json_internal!(@object $object [$($key)+] (json_internal!(true)) $($rest)*); - }; - - // Next value is `false`. - (@object $object:ident ($($key:tt)+) (: false $($rest:tt)*) $copy:tt) => { - json_internal!(@object $object [$($key)+] (json_internal!(false)) $($rest)*); - }; - - // Next value is an array. - (@object $object:ident ($($key:tt)+) (: [$($array:tt)*] $($rest:tt)*) $copy:tt) => { - json_internal!(@object $object [$($key)+] (json_internal!([$($array)*])) $($rest)*); - }; - - // Next value is a map. - (@object $object:ident ($($key:tt)+) (: {$($map:tt)*} $($rest:tt)*) $copy:tt) => { - json_internal!(@object $object [$($key)+] (json_internal!({$($map)*})) $($rest)*); - }; - - // Next value is an expression followed by comma. - (@object $object:ident ($($key:tt)+) (: $value:expr , $($rest:tt)*) $copy:tt) => { - json_internal!(@object $object [$($key)+] (json_internal!($value)) , $($rest)*); - }; - - // Last value is an expression with no trailing comma. - (@object $object:ident ($($key:tt)+) (: $value:expr) $copy:tt) => { - json_internal!(@object $object [$($key)+] (json_internal!($value))); - }; - - // Missing value for last entry. Trigger a reasonable error message. - (@object $object:ident ($($key:tt)+) (:) $copy:tt) => { - // "unexpected end of macro invocation" - json_internal!(); - }; - - // Missing colon and value for last entry. Trigger a reasonable error - // message. - (@object $object:ident ($($key:tt)+) () $copy:tt) => { - // "unexpected end of macro invocation" - json_internal!(); - }; - - // Misplaced colon. Trigger a reasonable error message. - (@object $object:ident () (: $($rest:tt)*) ($colon:tt $($copy:tt)*)) => { - // Takes no arguments so "no rules expected the token `:`". - json_unexpected!($colon); - }; - - // Found a comma inside a key. Trigger a reasonable error message. - (@object $object:ident ($($key:tt)*) (, $($rest:tt)*) ($comma:tt $($copy:tt)*)) => { - // Takes no arguments so "no rules expected the token `,`". - json_unexpected!($comma); - }; - - // Key is fully parenthesized. This avoids clippy double_parens false - // positives because the parenthesization may be necessary here. - (@object $object:ident () (($key:expr) : $($rest:tt)*) $copy:tt) => { - json_internal!(@object $object ($key) (: $($rest)*) (: $($rest)*)); - }; - - // Refuse to absorb colon token into key expression. - (@object $object:ident ($($key:tt)*) (: $($unexpected:tt)+) $copy:tt) => { - json_expect_expr_comma!($($unexpected)+); - }; - - // Munch a token into the current key. - (@object $object:ident ($($key:tt)*) ($tt:tt $($rest:tt)*) $copy:tt) => { - json_internal!(@object $object ($($key)* $tt) ($($rest)*) ($($rest)*)); - }; - - ////////////////////////////////////////////////////////////////////////// - // The main implementation. - // - // Must be invoked as: json_internal!($($json)+) - ////////////////////////////////////////////////////////////////////////// - - (null) => { - $crate::Value::Null - }; - - (true) => { - $crate::Value::Bool(true) - }; - - (false) => { - $crate::Value::Bool(false) - }; - - ([]) => { - $crate::Value::Array(json_internal_vec![]) - }; - - ([ $($tt:tt)+ ]) => { - $crate::Value::Array(json_internal!(@array [] $($tt)+)) - }; - - ({}) => { - $crate::Value::Object($crate::Map::new()) - }; - - ({ $($tt:tt)+ }) => { - $crate::Value::Object({ - let mut object = $crate::Map::new(); - json_internal!(@object object () ($($tt)+) ($($tt)+)); - object - }) - }; - - // Any Serialize type: numbers, strings, struct literals, variables etc. - // Must be below every other rule. - ($other:expr) => { - $crate::to_value(&$other).unwrap() - }; -} - -// The json_internal macro above cannot invoke vec directly because it uses -// local_inner_macros. A vec invocation there would resolve to $crate::vec. -// Instead invoke vec here outside of local_inner_macros. -#[macro_export] -#[doc(hidden)] -macro_rules! json_internal_vec { - ($($content:tt)*) => { - vec![$($content)*] - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! json_unexpected { - () => {}; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! json_expect_expr_comma { - ($e:expr , $($tt:tt)*) => {}; -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/map.rs s390-tools-2.33.1/rust-vendor/serde_json/src/map.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/map.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,940 +0,0 @@ -//! A map of String to serde_json::Value. -//! -//! By default the map is backed by a [`BTreeMap`]. Enable the `preserve_order` -//! feature of serde_json to use [`IndexMap`] instead. -//! -//! [`BTreeMap`]: https://doc.rust-lang.org/std/collections/struct.BTreeMap.html -//! [`IndexMap`]: https://docs.rs/indexmap/*/indexmap/map/struct.IndexMap.html - -use crate::value::Value; -use alloc::string::String; -use core::borrow::Borrow; -use core::fmt::{self, Debug}; -use core::hash::Hash; -use core::iter::{FromIterator, FusedIterator}; -#[cfg(feature = "preserve_order")] -use core::mem; -use core::ops; -use serde::de; - -#[cfg(not(feature = "preserve_order"))] -use alloc::collections::{btree_map, BTreeMap}; -#[cfg(feature = "preserve_order")] -use indexmap::{self, IndexMap}; - -/// Represents a JSON key/value type. -pub struct Map { - map: MapImpl, -} - -#[cfg(not(feature = "preserve_order"))] -type MapImpl = BTreeMap; -#[cfg(feature = "preserve_order")] -type MapImpl = IndexMap; - -impl Map { - /// Makes a new empty Map. - #[inline] - pub fn new() -> Self { - Map { - map: MapImpl::new(), - } - } - - /// Makes a new empty Map with the given initial capacity. - #[inline] - pub fn with_capacity(capacity: usize) -> Self { - Map { - #[cfg(not(feature = "preserve_order"))] - map: { - // does not support with_capacity - let _ = capacity; - BTreeMap::new() - }, - #[cfg(feature = "preserve_order")] - map: IndexMap::with_capacity(capacity), - } - } - - /// Clears the map, removing all values. - #[inline] - pub fn clear(&mut self) { - self.map.clear(); - } - - /// Returns a reference to the value corresponding to the key. - /// - /// The key may be any borrowed form of the map's key type, but the ordering - /// on the borrowed form *must* match the ordering on the key type. - #[inline] - pub fn get(&self, key: &Q) -> Option<&Value> - where - String: Borrow, - Q: ?Sized + Ord + Eq + Hash, - { - self.map.get(key) - } - - /// Returns true if the map contains a value for the specified key. - /// - /// The key may be any borrowed form of the map's key type, but the ordering - /// on the borrowed form *must* match the ordering on the key type. - #[inline] - pub fn contains_key(&self, key: &Q) -> bool - where - String: Borrow, - Q: ?Sized + Ord + Eq + Hash, - { - self.map.contains_key(key) - } - - /// Returns a mutable reference to the value corresponding to the key. - /// - /// The key may be any borrowed form of the map's key type, but the ordering - /// on the borrowed form *must* match the ordering on the key type. - #[inline] - pub fn get_mut(&mut self, key: &Q) -> Option<&mut Value> - where - String: Borrow, - Q: ?Sized + Ord + Eq + Hash, - { - self.map.get_mut(key) - } - - /// Returns the key-value pair matching the given key. - /// - /// The key may be any borrowed form of the map's key type, but the ordering - /// on the borrowed form *must* match the ordering on the key type. - #[inline] - #[cfg(any(feature = "preserve_order", not(no_btreemap_get_key_value)))] - pub fn get_key_value(&self, key: &Q) -> Option<(&String, &Value)> - where - String: Borrow, - Q: ?Sized + Ord + Eq + Hash, - { - self.map.get_key_value(key) - } - - /// Inserts a key-value pair into the map. - /// - /// If the map did not have this key present, `None` is returned. - /// - /// If the map did have this key present, the value is updated, and the old - /// value is returned. - #[inline] - pub fn insert(&mut self, k: String, v: Value) -> Option { - self.map.insert(k, v) - } - - /// Removes a key from the map, returning the value at the key if the key - /// was previously in the map. - /// - /// The key may be any borrowed form of the map's key type, but the ordering - /// on the borrowed form *must* match the ordering on the key type. - #[inline] - pub fn remove(&mut self, key: &Q) -> Option - where - String: Borrow, - Q: ?Sized + Ord + Eq + Hash, - { - #[cfg(feature = "preserve_order")] - return self.map.swap_remove(key); - #[cfg(not(feature = "preserve_order"))] - return self.map.remove(key); - } - - /// Removes a key from the map, returning the stored key and value if the - /// key was previously in the map. - /// - /// The key may be any borrowed form of the map's key type, but the ordering - /// on the borrowed form *must* match the ordering on the key type. - pub fn remove_entry(&mut self, key: &Q) -> Option<(String, Value)> - where - String: Borrow, - Q: ?Sized + Ord + Eq + Hash, - { - #[cfg(any(feature = "preserve_order", not(no_btreemap_remove_entry)))] - return self.map.remove_entry(key); - #[cfg(all( - not(feature = "preserve_order"), - no_btreemap_remove_entry, - not(no_btreemap_get_key_value), - ))] - { - let (key, _value) = self.map.get_key_value(key)?; - let key = key.clone(); - let value = self.map.remove::(&key)?; - Some((key, value)) - } - #[cfg(all( - not(feature = "preserve_order"), - no_btreemap_remove_entry, - no_btreemap_get_key_value, - ))] - { - use core::ops::{Bound, RangeBounds}; - - struct Key<'a, Q: ?Sized>(&'a Q); - - impl<'a, Q: ?Sized> RangeBounds for Key<'a, Q> { - fn start_bound(&self) -> Bound<&Q> { - Bound::Included(self.0) - } - fn end_bound(&self) -> Bound<&Q> { - Bound::Included(self.0) - } - } - - let mut range = self.map.range(Key(key)); - let (key, _value) = range.next()?; - let key = key.clone(); - let value = self.map.remove::(&key)?; - Some((key, value)) - } - } - - /// Moves all elements from other into self, leaving other empty. - #[inline] - pub fn append(&mut self, other: &mut Self) { - #[cfg(feature = "preserve_order")] - self.map - .extend(mem::replace(&mut other.map, MapImpl::default())); - #[cfg(not(feature = "preserve_order"))] - self.map.append(&mut other.map); - } - - /// Gets the given key's corresponding entry in the map for in-place - /// manipulation. - pub fn entry(&mut self, key: S) -> Entry - where - S: Into, - { - #[cfg(not(feature = "preserve_order"))] - use alloc::collections::btree_map::Entry as EntryImpl; - #[cfg(feature = "preserve_order")] - use indexmap::map::Entry as EntryImpl; - - match self.map.entry(key.into()) { - EntryImpl::Vacant(vacant) => Entry::Vacant(VacantEntry { vacant }), - EntryImpl::Occupied(occupied) => Entry::Occupied(OccupiedEntry { occupied }), - } - } - - /// Returns the number of elements in the map. - #[inline] - pub fn len(&self) -> usize { - self.map.len() - } - - /// Returns true if the map contains no elements. - #[inline] - pub fn is_empty(&self) -> bool { - self.map.is_empty() - } - - /// Gets an iterator over the entries of the map. - #[inline] - pub fn iter(&self) -> Iter { - Iter { - iter: self.map.iter(), - } - } - - /// Gets a mutable iterator over the entries of the map. - #[inline] - pub fn iter_mut(&mut self) -> IterMut { - IterMut { - iter: self.map.iter_mut(), - } - } - - /// Gets an iterator over the keys of the map. - #[inline] - pub fn keys(&self) -> Keys { - Keys { - iter: self.map.keys(), - } - } - - /// Gets an iterator over the values of the map. - #[inline] - pub fn values(&self) -> Values { - Values { - iter: self.map.values(), - } - } - - /// Gets an iterator over mutable values of the map. - #[inline] - pub fn values_mut(&mut self) -> ValuesMut { - ValuesMut { - iter: self.map.values_mut(), - } - } - - /// Retains only the elements specified by the predicate. - /// - /// In other words, remove all pairs `(k, v)` such that `f(&k, &mut v)` - /// returns `false`. - #[cfg(not(no_btreemap_retain))] - #[inline] - pub fn retain(&mut self, f: F) - where - F: FnMut(&String, &mut Value) -> bool, - { - self.map.retain(f); - } -} - -#[allow(clippy::derivable_impls)] // clippy bug: https://github.com/rust-lang/rust-clippy/issues/7655 -impl Default for Map { - #[inline] - fn default() -> Self { - Map { - map: MapImpl::new(), - } - } -} - -impl Clone for Map { - #[inline] - fn clone(&self) -> Self { - Map { - map: self.map.clone(), - } - } - - #[inline] - fn clone_from(&mut self, source: &Self) { - self.map.clone_from(&source.map); - } -} - -impl PartialEq for Map { - #[inline] - fn eq(&self, other: &Self) -> bool { - self.map.eq(&other.map) - } -} - -impl Eq for Map {} - -/// Access an element of this map. Panics if the given key is not present in the -/// map. -/// -/// ``` -/// # use serde_json::Value; -/// # -/// # let val = &Value::String("".to_owned()); -/// # let _ = -/// match val { -/// Value::String(s) => Some(s.as_str()), -/// Value::Array(arr) => arr[0].as_str(), -/// Value::Object(map) => map["type"].as_str(), -/// _ => None, -/// } -/// # ; -/// ``` -impl<'a, Q> ops::Index<&'a Q> for Map -where - String: Borrow, - Q: ?Sized + Ord + Eq + Hash, -{ - type Output = Value; - - fn index(&self, index: &Q) -> &Value { - self.map.index(index) - } -} - -/// Mutably access an element of this map. Panics if the given key is not -/// present in the map. -/// -/// ``` -/// # use serde_json::json; -/// # -/// # let mut map = serde_json::Map::new(); -/// # map.insert("key".to_owned(), serde_json::Value::Null); -/// # -/// map["key"] = json!("value"); -/// ``` -impl<'a, Q> ops::IndexMut<&'a Q> for Map -where - String: Borrow, - Q: ?Sized + Ord + Eq + Hash, -{ - fn index_mut(&mut self, index: &Q) -> &mut Value { - self.map.get_mut(index).expect("no entry found for key") - } -} - -impl Debug for Map { - #[inline] - fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { - self.map.fmt(formatter) - } -} - -#[cfg(any(feature = "std", feature = "alloc"))] -impl serde::ser::Serialize for Map { - #[inline] - fn serialize(&self, serializer: S) -> Result - where - S: serde::ser::Serializer, - { - use serde::ser::SerializeMap; - let mut map = tri!(serializer.serialize_map(Some(self.len()))); - for (k, v) in self { - tri!(map.serialize_entry(k, v)); - } - map.end() - } -} - -impl<'de> de::Deserialize<'de> for Map { - #[inline] - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - struct Visitor; - - impl<'de> de::Visitor<'de> for Visitor { - type Value = Map; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a map") - } - - #[inline] - fn visit_unit(self) -> Result - where - E: de::Error, - { - Ok(Map::new()) - } - - #[cfg(any(feature = "std", feature = "alloc"))] - #[inline] - fn visit_map(self, mut visitor: V) -> Result - where - V: de::MapAccess<'de>, - { - let mut values = Map::new(); - - while let Some((key, value)) = tri!(visitor.next_entry()) { - values.insert(key, value); - } - - Ok(values) - } - } - - deserializer.deserialize_map(Visitor) - } -} - -impl FromIterator<(String, Value)> for Map { - fn from_iter(iter: T) -> Self - where - T: IntoIterator, - { - Map { - map: FromIterator::from_iter(iter), - } - } -} - -impl Extend<(String, Value)> for Map { - fn extend(&mut self, iter: T) - where - T: IntoIterator, - { - self.map.extend(iter); - } -} - -macro_rules! delegate_iterator { - (($name:ident $($generics:tt)*) => $item:ty) => { - impl $($generics)* Iterator for $name $($generics)* { - type Item = $item; - #[inline] - fn next(&mut self) -> Option { - self.iter.next() - } - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - } - - impl $($generics)* DoubleEndedIterator for $name $($generics)* { - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back() - } - } - - impl $($generics)* ExactSizeIterator for $name $($generics)* { - #[inline] - fn len(&self) -> usize { - self.iter.len() - } - } - - impl $($generics)* FusedIterator for $name $($generics)* {} - } -} - -////////////////////////////////////////////////////////////////////////////// - -/// A view into a single entry in a map, which may either be vacant or occupied. -/// This enum is constructed from the [`entry`] method on [`Map`]. -/// -/// [`entry`]: struct.Map.html#method.entry -/// [`Map`]: struct.Map.html -pub enum Entry<'a> { - /// A vacant Entry. - Vacant(VacantEntry<'a>), - /// An occupied Entry. - Occupied(OccupiedEntry<'a>), -} - -/// A vacant Entry. It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -pub struct VacantEntry<'a> { - vacant: VacantEntryImpl<'a>, -} - -/// An occupied Entry. It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -pub struct OccupiedEntry<'a> { - occupied: OccupiedEntryImpl<'a>, -} - -#[cfg(not(feature = "preserve_order"))] -type VacantEntryImpl<'a> = btree_map::VacantEntry<'a, String, Value>; -#[cfg(feature = "preserve_order")] -type VacantEntryImpl<'a> = indexmap::map::VacantEntry<'a, String, Value>; - -#[cfg(not(feature = "preserve_order"))] -type OccupiedEntryImpl<'a> = btree_map::OccupiedEntry<'a, String, Value>; -#[cfg(feature = "preserve_order")] -type OccupiedEntryImpl<'a> = indexmap::map::OccupiedEntry<'a, String, Value>; - -impl<'a> Entry<'a> { - /// Returns a reference to this entry's key. - /// - /// # Examples - /// - /// ``` - /// let mut map = serde_json::Map::new(); - /// assert_eq!(map.entry("serde").key(), &"serde"); - /// ``` - pub fn key(&self) -> &String { - match self { - Entry::Vacant(e) => e.key(), - Entry::Occupied(e) => e.key(), - } - } - - /// Ensures a value is in the entry by inserting the default if empty, and - /// returns a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// let mut map = serde_json::Map::new(); - /// map.entry("serde").or_insert(json!(12)); - /// - /// assert_eq!(map["serde"], 12); - /// ``` - pub fn or_insert(self, default: Value) -> &'a mut Value { - match self { - Entry::Vacant(entry) => entry.insert(default), - Entry::Occupied(entry) => entry.into_mut(), - } - } - - /// Ensures a value is in the entry by inserting the result of the default - /// function if empty, and returns a mutable reference to the value in the - /// entry. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// let mut map = serde_json::Map::new(); - /// map.entry("serde").or_insert_with(|| json!("hoho")); - /// - /// assert_eq!(map["serde"], "hoho".to_owned()); - /// ``` - pub fn or_insert_with(self, default: F) -> &'a mut Value - where - F: FnOnce() -> Value, - { - match self { - Entry::Vacant(entry) => entry.insert(default()), - Entry::Occupied(entry) => entry.into_mut(), - } - } - - /// Provides in-place mutable access to an occupied entry before any - /// potential inserts into the map. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// let mut map = serde_json::Map::new(); - /// map.entry("serde") - /// .and_modify(|e| *e = json!("rust")) - /// .or_insert(json!("cpp")); - /// - /// assert_eq!(map["serde"], "cpp"); - /// - /// map.entry("serde") - /// .and_modify(|e| *e = json!("rust")) - /// .or_insert(json!("cpp")); - /// - /// assert_eq!(map["serde"], "rust"); - /// ``` - pub fn and_modify(self, f: F) -> Self - where - F: FnOnce(&mut Value), - { - match self { - Entry::Occupied(mut entry) => { - f(entry.get_mut()); - Entry::Occupied(entry) - } - Entry::Vacant(entry) => Entry::Vacant(entry), - } - } -} - -impl<'a> VacantEntry<'a> { - /// Gets a reference to the key that would be used when inserting a value - /// through the VacantEntry. - /// - /// # Examples - /// - /// ``` - /// use serde_json::map::Entry; - /// - /// let mut map = serde_json::Map::new(); - /// - /// match map.entry("serde") { - /// Entry::Vacant(vacant) => { - /// assert_eq!(vacant.key(), &"serde"); - /// } - /// Entry::Occupied(_) => unimplemented!(), - /// } - /// ``` - #[inline] - pub fn key(&self) -> &String { - self.vacant.key() - } - - /// Sets the value of the entry with the VacantEntry's key, and returns a - /// mutable reference to it. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// use serde_json::map::Entry; - /// - /// let mut map = serde_json::Map::new(); - /// - /// match map.entry("serde") { - /// Entry::Vacant(vacant) => { - /// vacant.insert(json!("hoho")); - /// } - /// Entry::Occupied(_) => unimplemented!(), - /// } - /// ``` - #[inline] - pub fn insert(self, value: Value) -> &'a mut Value { - self.vacant.insert(value) - } -} - -impl<'a> OccupiedEntry<'a> { - /// Gets a reference to the key in the entry. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// use serde_json::map::Entry; - /// - /// let mut map = serde_json::Map::new(); - /// map.insert("serde".to_owned(), json!(12)); - /// - /// match map.entry("serde") { - /// Entry::Occupied(occupied) => { - /// assert_eq!(occupied.key(), &"serde"); - /// } - /// Entry::Vacant(_) => unimplemented!(), - /// } - /// ``` - #[inline] - pub fn key(&self) -> &String { - self.occupied.key() - } - - /// Gets a reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// use serde_json::map::Entry; - /// - /// let mut map = serde_json::Map::new(); - /// map.insert("serde".to_owned(), json!(12)); - /// - /// match map.entry("serde") { - /// Entry::Occupied(occupied) => { - /// assert_eq!(occupied.get(), 12); - /// } - /// Entry::Vacant(_) => unimplemented!(), - /// } - /// ``` - #[inline] - pub fn get(&self) -> &Value { - self.occupied.get() - } - - /// Gets a mutable reference to the value in the entry. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// use serde_json::map::Entry; - /// - /// let mut map = serde_json::Map::new(); - /// map.insert("serde".to_owned(), json!([1, 2, 3])); - /// - /// match map.entry("serde") { - /// Entry::Occupied(mut occupied) => { - /// occupied.get_mut().as_array_mut().unwrap().push(json!(4)); - /// } - /// Entry::Vacant(_) => unimplemented!(), - /// } - /// - /// assert_eq!(map["serde"].as_array().unwrap().len(), 4); - /// ``` - #[inline] - pub fn get_mut(&mut self) -> &mut Value { - self.occupied.get_mut() - } - - /// Converts the entry into a mutable reference to its value. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// use serde_json::map::Entry; - /// - /// let mut map = serde_json::Map::new(); - /// map.insert("serde".to_owned(), json!([1, 2, 3])); - /// - /// match map.entry("serde") { - /// Entry::Occupied(mut occupied) => { - /// occupied.into_mut().as_array_mut().unwrap().push(json!(4)); - /// } - /// Entry::Vacant(_) => unimplemented!(), - /// } - /// - /// assert_eq!(map["serde"].as_array().unwrap().len(), 4); - /// ``` - #[inline] - pub fn into_mut(self) -> &'a mut Value { - self.occupied.into_mut() - } - - /// Sets the value of the entry with the `OccupiedEntry`'s key, and returns - /// the entry's old value. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// use serde_json::map::Entry; - /// - /// let mut map = serde_json::Map::new(); - /// map.insert("serde".to_owned(), json!(12)); - /// - /// match map.entry("serde") { - /// Entry::Occupied(mut occupied) => { - /// assert_eq!(occupied.insert(json!(13)), 12); - /// assert_eq!(occupied.get(), 13); - /// } - /// Entry::Vacant(_) => unimplemented!(), - /// } - /// ``` - #[inline] - pub fn insert(&mut self, value: Value) -> Value { - self.occupied.insert(value) - } - - /// Takes the value of the entry out of the map, and returns it. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// use serde_json::map::Entry; - /// - /// let mut map = serde_json::Map::new(); - /// map.insert("serde".to_owned(), json!(12)); - /// - /// match map.entry("serde") { - /// Entry::Occupied(occupied) => { - /// assert_eq!(occupied.remove(), 12); - /// } - /// Entry::Vacant(_) => unimplemented!(), - /// } - /// ``` - #[inline] - pub fn remove(self) -> Value { - #[cfg(feature = "preserve_order")] - return self.occupied.swap_remove(); - #[cfg(not(feature = "preserve_order"))] - return self.occupied.remove(); - } -} - -////////////////////////////////////////////////////////////////////////////// - -impl<'a> IntoIterator for &'a Map { - type Item = (&'a String, &'a Value); - type IntoIter = Iter<'a>; - #[inline] - fn into_iter(self) -> Self::IntoIter { - Iter { - iter: self.map.iter(), - } - } -} - -/// An iterator over a serde_json::Map's entries. -pub struct Iter<'a> { - iter: IterImpl<'a>, -} - -#[cfg(not(feature = "preserve_order"))] -type IterImpl<'a> = btree_map::Iter<'a, String, Value>; -#[cfg(feature = "preserve_order")] -type IterImpl<'a> = indexmap::map::Iter<'a, String, Value>; - -delegate_iterator!((Iter<'a>) => (&'a String, &'a Value)); - -////////////////////////////////////////////////////////////////////////////// - -impl<'a> IntoIterator for &'a mut Map { - type Item = (&'a String, &'a mut Value); - type IntoIter = IterMut<'a>; - #[inline] - fn into_iter(self) -> Self::IntoIter { - IterMut { - iter: self.map.iter_mut(), - } - } -} - -/// A mutable iterator over a serde_json::Map's entries. -pub struct IterMut<'a> { - iter: IterMutImpl<'a>, -} - -#[cfg(not(feature = "preserve_order"))] -type IterMutImpl<'a> = btree_map::IterMut<'a, String, Value>; -#[cfg(feature = "preserve_order")] -type IterMutImpl<'a> = indexmap::map::IterMut<'a, String, Value>; - -delegate_iterator!((IterMut<'a>) => (&'a String, &'a mut Value)); - -////////////////////////////////////////////////////////////////////////////// - -impl IntoIterator for Map { - type Item = (String, Value); - type IntoIter = IntoIter; - #[inline] - fn into_iter(self) -> Self::IntoIter { - IntoIter { - iter: self.map.into_iter(), - } - } -} - -/// An owning iterator over a serde_json::Map's entries. -pub struct IntoIter { - iter: IntoIterImpl, -} - -#[cfg(not(feature = "preserve_order"))] -type IntoIterImpl = btree_map::IntoIter; -#[cfg(feature = "preserve_order")] -type IntoIterImpl = indexmap::map::IntoIter; - -delegate_iterator!((IntoIter) => (String, Value)); - -////////////////////////////////////////////////////////////////////////////// - -/// An iterator over a serde_json::Map's keys. -pub struct Keys<'a> { - iter: KeysImpl<'a>, -} - -#[cfg(not(feature = "preserve_order"))] -type KeysImpl<'a> = btree_map::Keys<'a, String, Value>; -#[cfg(feature = "preserve_order")] -type KeysImpl<'a> = indexmap::map::Keys<'a, String, Value>; - -delegate_iterator!((Keys<'a>) => &'a String); - -////////////////////////////////////////////////////////////////////////////// - -/// An iterator over a serde_json::Map's values. -pub struct Values<'a> { - iter: ValuesImpl<'a>, -} - -#[cfg(not(feature = "preserve_order"))] -type ValuesImpl<'a> = btree_map::Values<'a, String, Value>; -#[cfg(feature = "preserve_order")] -type ValuesImpl<'a> = indexmap::map::Values<'a, String, Value>; - -delegate_iterator!((Values<'a>) => &'a Value); - -////////////////////////////////////////////////////////////////////////////// - -/// A mutable iterator over a serde_json::Map's values. -pub struct ValuesMut<'a> { - iter: ValuesMutImpl<'a>, -} - -#[cfg(not(feature = "preserve_order"))] -type ValuesMutImpl<'a> = btree_map::ValuesMut<'a, String, Value>; -#[cfg(feature = "preserve_order")] -type ValuesMutImpl<'a> = indexmap::map::ValuesMut<'a, String, Value>; - -delegate_iterator!((ValuesMut<'a>) => &'a mut Value); diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/number.rs s390-tools-2.33.1/rust-vendor/serde_json/src/number.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/number.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/number.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,774 +0,0 @@ -use crate::de::ParserNumber; -use crate::error::Error; -#[cfg(feature = "arbitrary_precision")] -use crate::error::ErrorCode; -#[cfg(feature = "arbitrary_precision")] -use alloc::borrow::ToOwned; -#[cfg(feature = "arbitrary_precision")] -use alloc::string::{String, ToString}; -use core::fmt::{self, Debug, Display}; -#[cfg(not(feature = "arbitrary_precision"))] -use core::hash::{Hash, Hasher}; -use serde::de::{self, Unexpected, Visitor}; -#[cfg(feature = "arbitrary_precision")] -use serde::de::{IntoDeserializer, MapAccess}; -use serde::{forward_to_deserialize_any, Deserialize, Deserializer, Serialize, Serializer}; - -#[cfg(feature = "arbitrary_precision")] -pub(crate) const TOKEN: &str = "$serde_json::private::Number"; - -/// Represents a JSON number, whether integer or floating point. -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct Number { - n: N, -} - -#[cfg(not(feature = "arbitrary_precision"))] -#[derive(Copy, Clone)] -enum N { - PosInt(u64), - /// Always less than zero. - NegInt(i64), - /// Always finite. - Float(f64), -} - -#[cfg(not(feature = "arbitrary_precision"))] -impl PartialEq for N { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (N::PosInt(a), N::PosInt(b)) => a == b, - (N::NegInt(a), N::NegInt(b)) => a == b, - (N::Float(a), N::Float(b)) => a == b, - _ => false, - } - } -} - -// Implementing Eq is fine since any float values are always finite. -#[cfg(not(feature = "arbitrary_precision"))] -impl Eq for N {} - -#[cfg(not(feature = "arbitrary_precision"))] -impl Hash for N { - fn hash(&self, h: &mut H) { - match *self { - N::PosInt(i) => i.hash(h), - N::NegInt(i) => i.hash(h), - N::Float(f) => { - if f == 0.0f64 { - // There are 2 zero representations, +0 and -0, which - // compare equal but have different bits. We use the +0 hash - // for both so that hash(+0) == hash(-0). - 0.0f64.to_bits().hash(h); - } else { - f.to_bits().hash(h); - } - } - } - } -} - -#[cfg(feature = "arbitrary_precision")] -type N = String; - -impl Number { - /// Returns true if the `Number` is an integer between `i64::MIN` and - /// `i64::MAX`. - /// - /// For any Number on which `is_i64` returns true, `as_i64` is guaranteed to - /// return the integer value. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let big = i64::max_value() as u64 + 10; - /// let v = json!({ "a": 64, "b": big, "c": 256.0 }); - /// - /// assert!(v["a"].is_i64()); - /// - /// // Greater than i64::MAX. - /// assert!(!v["b"].is_i64()); - /// - /// // Numbers with a decimal point are not considered integers. - /// assert!(!v["c"].is_i64()); - /// ``` - #[inline] - pub fn is_i64(&self) -> bool { - #[cfg(not(feature = "arbitrary_precision"))] - match self.n { - N::PosInt(v) => v <= i64::max_value() as u64, - N::NegInt(_) => true, - N::Float(_) => false, - } - #[cfg(feature = "arbitrary_precision")] - self.as_i64().is_some() - } - - /// Returns true if the `Number` is an integer between zero and `u64::MAX`. - /// - /// For any Number on which `is_u64` returns true, `as_u64` is guaranteed to - /// return the integer value. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": 64, "b": -64, "c": 256.0 }); - /// - /// assert!(v["a"].is_u64()); - /// - /// // Negative integer. - /// assert!(!v["b"].is_u64()); - /// - /// // Numbers with a decimal point are not considered integers. - /// assert!(!v["c"].is_u64()); - /// ``` - #[inline] - pub fn is_u64(&self) -> bool { - #[cfg(not(feature = "arbitrary_precision"))] - match self.n { - N::PosInt(_) => true, - N::NegInt(_) | N::Float(_) => false, - } - #[cfg(feature = "arbitrary_precision")] - self.as_u64().is_some() - } - - /// Returns true if the `Number` can be represented by f64. - /// - /// For any Number on which `is_f64` returns true, `as_f64` is guaranteed to - /// return the floating point value. - /// - /// Currently this function returns true if and only if both `is_i64` and - /// `is_u64` return false but this is not a guarantee in the future. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": 256.0, "b": 64, "c": -64 }); - /// - /// assert!(v["a"].is_f64()); - /// - /// // Integers. - /// assert!(!v["b"].is_f64()); - /// assert!(!v["c"].is_f64()); - /// ``` - #[inline] - pub fn is_f64(&self) -> bool { - #[cfg(not(feature = "arbitrary_precision"))] - match self.n { - N::Float(_) => true, - N::PosInt(_) | N::NegInt(_) => false, - } - #[cfg(feature = "arbitrary_precision")] - { - for c in self.n.chars() { - if c == '.' || c == 'e' || c == 'E' { - return self.n.parse::().ok().map_or(false, f64::is_finite); - } - } - false - } - } - - /// If the `Number` is an integer, represent it as i64 if possible. Returns - /// None otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let big = i64::max_value() as u64 + 10; - /// let v = json!({ "a": 64, "b": big, "c": 256.0 }); - /// - /// assert_eq!(v["a"].as_i64(), Some(64)); - /// assert_eq!(v["b"].as_i64(), None); - /// assert_eq!(v["c"].as_i64(), None); - /// ``` - #[inline] - pub fn as_i64(&self) -> Option { - #[cfg(not(feature = "arbitrary_precision"))] - match self.n { - N::PosInt(n) => { - if n <= i64::max_value() as u64 { - Some(n as i64) - } else { - None - } - } - N::NegInt(n) => Some(n), - N::Float(_) => None, - } - #[cfg(feature = "arbitrary_precision")] - self.n.parse().ok() - } - - /// If the `Number` is an integer, represent it as u64 if possible. Returns - /// None otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": 64, "b": -64, "c": 256.0 }); - /// - /// assert_eq!(v["a"].as_u64(), Some(64)); - /// assert_eq!(v["b"].as_u64(), None); - /// assert_eq!(v["c"].as_u64(), None); - /// ``` - #[inline] - pub fn as_u64(&self) -> Option { - #[cfg(not(feature = "arbitrary_precision"))] - match self.n { - N::PosInt(n) => Some(n), - N::NegInt(_) | N::Float(_) => None, - } - #[cfg(feature = "arbitrary_precision")] - self.n.parse().ok() - } - - /// Represents the number as f64 if possible. Returns None otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": 256.0, "b": 64, "c": -64 }); - /// - /// assert_eq!(v["a"].as_f64(), Some(256.0)); - /// assert_eq!(v["b"].as_f64(), Some(64.0)); - /// assert_eq!(v["c"].as_f64(), Some(-64.0)); - /// ``` - #[inline] - pub fn as_f64(&self) -> Option { - #[cfg(not(feature = "arbitrary_precision"))] - match self.n { - N::PosInt(n) => Some(n as f64), - N::NegInt(n) => Some(n as f64), - N::Float(n) => Some(n), - } - #[cfg(feature = "arbitrary_precision")] - self.n.parse::().ok().filter(|float| float.is_finite()) - } - - /// Converts a finite `f64` to a `Number`. Infinite or NaN values are not JSON - /// numbers. - /// - /// ``` - /// # use std::f64; - /// # - /// # use serde_json::Number; - /// # - /// assert!(Number::from_f64(256.0).is_some()); - /// - /// assert!(Number::from_f64(f64::NAN).is_none()); - /// ``` - #[inline] - pub fn from_f64(f: f64) -> Option { - if f.is_finite() { - let n = { - #[cfg(not(feature = "arbitrary_precision"))] - { - N::Float(f) - } - #[cfg(feature = "arbitrary_precision")] - { - ryu::Buffer::new().format_finite(f).to_owned() - } - }; - Some(Number { n }) - } else { - None - } - } - - pub(crate) fn as_f32(&self) -> Option { - #[cfg(not(feature = "arbitrary_precision"))] - match self.n { - N::PosInt(n) => Some(n as f32), - N::NegInt(n) => Some(n as f32), - N::Float(n) => Some(n as f32), - } - #[cfg(feature = "arbitrary_precision")] - self.n.parse::().ok().filter(|float| float.is_finite()) - } - - pub(crate) fn from_f32(f: f32) -> Option { - if f.is_finite() { - let n = { - #[cfg(not(feature = "arbitrary_precision"))] - { - N::Float(f as f64) - } - #[cfg(feature = "arbitrary_precision")] - { - ryu::Buffer::new().format_finite(f).to_owned() - } - }; - Some(Number { n }) - } else { - None - } - } - - #[cfg(feature = "arbitrary_precision")] - /// Not public API. Only tests use this. - #[doc(hidden)] - #[inline] - pub fn from_string_unchecked(n: String) -> Self { - Number { n } - } -} - -impl Display for Number { - #[cfg(not(feature = "arbitrary_precision"))] - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match self.n { - N::PosInt(u) => formatter.write_str(itoa::Buffer::new().format(u)), - N::NegInt(i) => formatter.write_str(itoa::Buffer::new().format(i)), - N::Float(f) => formatter.write_str(ryu::Buffer::new().format_finite(f)), - } - } - - #[cfg(feature = "arbitrary_precision")] - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.n, formatter) - } -} - -impl Debug for Number { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "Number({})", self) - } -} - -impl Serialize for Number { - #[cfg(not(feature = "arbitrary_precision"))] - #[inline] - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match self.n { - N::PosInt(u) => serializer.serialize_u64(u), - N::NegInt(i) => serializer.serialize_i64(i), - N::Float(f) => serializer.serialize_f64(f), - } - } - - #[cfg(feature = "arbitrary_precision")] - #[inline] - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - use serde::ser::SerializeStruct; - - let mut s = serializer.serialize_struct(TOKEN, 1)?; - s.serialize_field(TOKEN, &self.n)?; - s.end() - } -} - -impl<'de> Deserialize<'de> for Number { - #[inline] - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct NumberVisitor; - - impl<'de> Visitor<'de> for NumberVisitor { - type Value = Number; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a JSON number") - } - - #[inline] - fn visit_i64(self, value: i64) -> Result { - Ok(value.into()) - } - - #[inline] - fn visit_u64(self, value: u64) -> Result { - Ok(value.into()) - } - - #[inline] - fn visit_f64(self, value: f64) -> Result - where - E: de::Error, - { - Number::from_f64(value).ok_or_else(|| de::Error::custom("not a JSON number")) - } - - #[cfg(feature = "arbitrary_precision")] - #[inline] - fn visit_map(self, mut visitor: V) -> Result - where - V: de::MapAccess<'de>, - { - let value = visitor.next_key::()?; - if value.is_none() { - return Err(de::Error::invalid_type(Unexpected::Map, &self)); - } - let v: NumberFromString = visitor.next_value()?; - Ok(v.value) - } - } - - deserializer.deserialize_any(NumberVisitor) - } -} - -#[cfg(feature = "arbitrary_precision")] -struct NumberKey; - -#[cfg(feature = "arbitrary_precision")] -impl<'de> de::Deserialize<'de> for NumberKey { - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - struct FieldVisitor; - - impl<'de> de::Visitor<'de> for FieldVisitor { - type Value = (); - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a valid number field") - } - - fn visit_str(self, s: &str) -> Result<(), E> - where - E: de::Error, - { - if s == TOKEN { - Ok(()) - } else { - Err(de::Error::custom("expected field with custom name")) - } - } - } - - deserializer.deserialize_identifier(FieldVisitor)?; - Ok(NumberKey) - } -} - -#[cfg(feature = "arbitrary_precision")] -pub struct NumberFromString { - pub value: Number, -} - -#[cfg(feature = "arbitrary_precision")] -impl<'de> de::Deserialize<'de> for NumberFromString { - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - struct Visitor; - - impl<'de> de::Visitor<'de> for Visitor { - type Value = NumberFromString; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("string containing a number") - } - - fn visit_str(self, s: &str) -> Result - where - E: de::Error, - { - let n = tri!(s.parse().map_err(de::Error::custom)); - Ok(NumberFromString { value: n }) - } - } - - deserializer.deserialize_str(Visitor) - } -} - -#[cfg(feature = "arbitrary_precision")] -fn invalid_number() -> Error { - Error::syntax(ErrorCode::InvalidNumber, 0, 0) -} - -macro_rules! deserialize_any { - (@expand [$($num_string:tt)*]) => { - #[cfg(not(feature = "arbitrary_precision"))] - #[inline] - fn deserialize_any(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self.n { - N::PosInt(u) => visitor.visit_u64(u), - N::NegInt(i) => visitor.visit_i64(i), - N::Float(f) => visitor.visit_f64(f), - } - } - - #[cfg(feature = "arbitrary_precision")] - #[inline] - fn deserialize_any(self, visitor: V) -> Result - where V: Visitor<'de> - { - if let Some(u) = self.as_u64() { - return visitor.visit_u64(u); - } else if let Some(i) = self.as_i64() { - return visitor.visit_i64(i); - } else if let Some(f) = self.as_f64() { - if ryu::Buffer::new().format_finite(f) == self.n || f.to_string() == self.n { - return visitor.visit_f64(f); - } - } - - visitor.visit_map(NumberDeserializer { - number: Some(self.$($num_string)*), - }) - } - }; - - (owned) => { - deserialize_any!(@expand [n]); - }; - - (ref) => { - deserialize_any!(@expand [n.clone()]); - }; -} - -macro_rules! deserialize_number { - ($deserialize:ident => $visit:ident) => { - #[cfg(not(feature = "arbitrary_precision"))] - fn $deserialize(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_any(visitor) - } - - #[cfg(feature = "arbitrary_precision")] - fn $deserialize(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - visitor.$visit(self.n.parse().map_err(|_| invalid_number())?) - } - }; -} - -impl<'de> Deserializer<'de> for Number { - type Error = Error; - - deserialize_any!(owned); - - deserialize_number!(deserialize_i8 => visit_i8); - deserialize_number!(deserialize_i16 => visit_i16); - deserialize_number!(deserialize_i32 => visit_i32); - deserialize_number!(deserialize_i64 => visit_i64); - deserialize_number!(deserialize_i128 => visit_i128); - deserialize_number!(deserialize_u8 => visit_u8); - deserialize_number!(deserialize_u16 => visit_u16); - deserialize_number!(deserialize_u32 => visit_u32); - deserialize_number!(deserialize_u64 => visit_u64); - deserialize_number!(deserialize_u128 => visit_u128); - deserialize_number!(deserialize_f32 => visit_f32); - deserialize_number!(deserialize_f64 => visit_f64); - - forward_to_deserialize_any! { - bool char str string bytes byte_buf option unit unit_struct - newtype_struct seq tuple tuple_struct map struct enum identifier - ignored_any - } -} - -impl<'de, 'a> Deserializer<'de> for &'a Number { - type Error = Error; - - deserialize_any!(ref); - - deserialize_number!(deserialize_i8 => visit_i8); - deserialize_number!(deserialize_i16 => visit_i16); - deserialize_number!(deserialize_i32 => visit_i32); - deserialize_number!(deserialize_i64 => visit_i64); - deserialize_number!(deserialize_i128 => visit_i128); - deserialize_number!(deserialize_u8 => visit_u8); - deserialize_number!(deserialize_u16 => visit_u16); - deserialize_number!(deserialize_u32 => visit_u32); - deserialize_number!(deserialize_u64 => visit_u64); - deserialize_number!(deserialize_u128 => visit_u128); - deserialize_number!(deserialize_f32 => visit_f32); - deserialize_number!(deserialize_f64 => visit_f64); - - forward_to_deserialize_any! { - bool char str string bytes byte_buf option unit unit_struct - newtype_struct seq tuple tuple_struct map struct enum identifier - ignored_any - } -} - -#[cfg(feature = "arbitrary_precision")] -pub(crate) struct NumberDeserializer { - pub number: Option, -} - -#[cfg(feature = "arbitrary_precision")] -impl<'de> MapAccess<'de> for NumberDeserializer { - type Error = Error; - - fn next_key_seed(&mut self, seed: K) -> Result, Error> - where - K: de::DeserializeSeed<'de>, - { - if self.number.is_none() { - return Ok(None); - } - seed.deserialize(NumberFieldDeserializer).map(Some) - } - - fn next_value_seed(&mut self, seed: V) -> Result - where - V: de::DeserializeSeed<'de>, - { - seed.deserialize(self.number.take().unwrap().into_deserializer()) - } -} - -#[cfg(feature = "arbitrary_precision")] -struct NumberFieldDeserializer; - -#[cfg(feature = "arbitrary_precision")] -impl<'de> Deserializer<'de> for NumberFieldDeserializer { - type Error = Error; - - fn deserialize_any(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_borrowed_str(TOKEN) - } - - forward_to_deserialize_any! { - bool u8 u16 u32 u64 u128 i8 i16 i32 i64 i128 f32 f64 char str string seq - bytes byte_buf map struct option unit newtype_struct ignored_any - unit_struct tuple_struct tuple enum identifier - } -} - -impl From for Number { - fn from(value: ParserNumber) -> Self { - let n = match value { - ParserNumber::F64(f) => { - #[cfg(not(feature = "arbitrary_precision"))] - { - N::Float(f) - } - #[cfg(feature = "arbitrary_precision")] - { - f.to_string() - } - } - ParserNumber::U64(u) => { - #[cfg(not(feature = "arbitrary_precision"))] - { - N::PosInt(u) - } - #[cfg(feature = "arbitrary_precision")] - { - u.to_string() - } - } - ParserNumber::I64(i) => { - #[cfg(not(feature = "arbitrary_precision"))] - { - N::NegInt(i) - } - #[cfg(feature = "arbitrary_precision")] - { - i.to_string() - } - } - #[cfg(feature = "arbitrary_precision")] - ParserNumber::String(s) => s, - }; - Number { n } - } -} - -macro_rules! impl_from_unsigned { - ( - $($ty:ty),* - ) => { - $( - impl From<$ty> for Number { - #[inline] - fn from(u: $ty) -> Self { - let n = { - #[cfg(not(feature = "arbitrary_precision"))] - { N::PosInt(u as u64) } - #[cfg(feature = "arbitrary_precision")] - { - itoa::Buffer::new().format(u).to_owned() - } - }; - Number { n } - } - } - )* - }; -} - -macro_rules! impl_from_signed { - ( - $($ty:ty),* - ) => { - $( - impl From<$ty> for Number { - #[inline] - fn from(i: $ty) -> Self { - let n = { - #[cfg(not(feature = "arbitrary_precision"))] - { - if i < 0 { - N::NegInt(i as i64) - } else { - N::PosInt(i as u64) - } - } - #[cfg(feature = "arbitrary_precision")] - { - itoa::Buffer::new().format(i).to_owned() - } - }; - Number { n } - } - } - )* - }; -} - -impl_from_unsigned!(u8, u16, u32, u64, usize); -impl_from_signed!(i8, i16, i32, i64, isize); - -#[cfg(feature = "arbitrary_precision")] -impl_from_unsigned!(u128); -#[cfg(feature = "arbitrary_precision")] -impl_from_signed!(i128); - -impl Number { - #[cfg(not(feature = "arbitrary_precision"))] - #[cold] - pub(crate) fn unexpected(&self) -> Unexpected { - match self.n { - N::PosInt(u) => Unexpected::Unsigned(u), - N::NegInt(i) => Unexpected::Signed(i), - N::Float(f) => Unexpected::Float(f), - } - } - - #[cfg(feature = "arbitrary_precision")] - #[cold] - pub(crate) fn unexpected(&self) -> Unexpected { - Unexpected::Other("number") - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/raw.rs s390-tools-2.33.1/rust-vendor/serde_json/src/raw.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/raw.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/raw.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,531 +0,0 @@ -use crate::error::Error; -use alloc::borrow::ToOwned; -use alloc::boxed::Box; -use alloc::string::String; -use core::fmt::{self, Debug, Display}; -use core::mem; -use serde::de::value::BorrowedStrDeserializer; -use serde::de::{ - self, Deserialize, DeserializeSeed, Deserializer, IntoDeserializer, MapAccess, Unexpected, - Visitor, -}; -use serde::forward_to_deserialize_any; -use serde::ser::{Serialize, SerializeStruct, Serializer}; - -/// Reference to a range of bytes encompassing a single valid JSON value in the -/// input data. -/// -/// A `RawValue` can be used to defer parsing parts of a payload until later, -/// or to avoid parsing it at all in the case that part of the payload just -/// needs to be transferred verbatim into a different output object. -/// -/// When serializing, a value of this type will retain its original formatting -/// and will not be minified or pretty-printed. -/// -/// # Note -/// -/// `RawValue` is only available if serde\_json is built with the `"raw_value"` -/// feature. -/// -/// ```toml -/// [dependencies] -/// serde_json = { version = "1.0", features = ["raw_value"] } -/// ``` -/// -/// # Example -/// -/// ``` -/// use serde::{Deserialize, Serialize}; -/// use serde_json::{Result, value::RawValue}; -/// -/// #[derive(Deserialize)] -/// struct Input<'a> { -/// code: u32, -/// #[serde(borrow)] -/// payload: &'a RawValue, -/// } -/// -/// #[derive(Serialize)] -/// struct Output<'a> { -/// info: (u32, &'a RawValue), -/// } -/// -/// // Efficiently rearrange JSON input containing separate "code" and "payload" -/// // keys into a single "info" key holding an array of code and payload. -/// // -/// // This could be done equivalently using serde_json::Value as the type for -/// // payload, but &RawValue will perform better because it does not require -/// // memory allocation. The correct range of bytes is borrowed from the input -/// // data and pasted verbatim into the output. -/// fn rearrange(input: &str) -> Result { -/// let input: Input = serde_json::from_str(input)?; -/// -/// let output = Output { -/// info: (input.code, input.payload), -/// }; -/// -/// serde_json::to_string(&output) -/// } -/// -/// fn main() -> Result<()> { -/// let out = rearrange(r#" {"code": 200, "payload": {}} "#)?; -/// -/// assert_eq!(out, r#"{"info":[200,{}]}"#); -/// -/// Ok(()) -/// } -/// ``` -/// -/// # Ownership -/// -/// The typical usage of `RawValue` will be in the borrowed form: -/// -/// ``` -/// # use serde::Deserialize; -/// # use serde_json::value::RawValue; -/// # -/// #[derive(Deserialize)] -/// struct SomeStruct<'a> { -/// #[serde(borrow)] -/// raw_value: &'a RawValue, -/// } -/// ``` -/// -/// The borrowed form is suitable when deserializing through -/// [`serde_json::from_str`] and [`serde_json::from_slice`] which support -/// borrowing from the input data without memory allocation. -/// -/// When deserializing through [`serde_json::from_reader`] you will need to use -/// the boxed form of `RawValue` instead. This is almost as efficient but -/// involves buffering the raw value from the I/O stream into memory. -/// -/// [`serde_json::from_str`]: ../fn.from_str.html -/// [`serde_json::from_slice`]: ../fn.from_slice.html -/// [`serde_json::from_reader`]: ../fn.from_reader.html -/// -/// ``` -/// # use serde::Deserialize; -/// # use serde_json::value::RawValue; -/// # -/// #[derive(Deserialize)] -/// struct SomeStruct { -/// raw_value: Box, -/// } -/// ``` -#[cfg_attr(not(doc), repr(transparent))] -#[cfg_attr(docsrs, doc(cfg(feature = "raw_value")))] -pub struct RawValue { - json: str, -} - -impl RawValue { - fn from_borrowed(json: &str) -> &Self { - unsafe { mem::transmute::<&str, &RawValue>(json) } - } - - fn from_owned(json: Box) -> Box { - unsafe { mem::transmute::, Box>(json) } - } - - fn into_owned(raw_value: Box) -> Box { - unsafe { mem::transmute::, Box>(raw_value) } - } -} - -impl Clone for Box { - fn clone(&self) -> Self { - (**self).to_owned() - } -} - -impl ToOwned for RawValue { - type Owned = Box; - - fn to_owned(&self) -> Self::Owned { - RawValue::from_owned(self.json.to_owned().into_boxed_str()) - } -} - -impl Default for Box { - fn default() -> Self { - RawValue::from_borrowed("null").to_owned() - } -} - -impl Debug for RawValue { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter - .debug_tuple("RawValue") - .field(&format_args!("{}", &self.json)) - .finish() - } -} - -impl Display for RawValue { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(&self.json) - } -} - -impl RawValue { - /// Convert an owned `String` of JSON data to an owned `RawValue`. - /// - /// This function is equivalent to `serde_json::from_str::>` - /// except that we avoid an allocation and memcpy if both of the following - /// are true: - /// - /// - the input has no leading or trailing whitespace, and - /// - the input has capacity equal to its length. - pub fn from_string(json: String) -> Result, Error> { - { - let borrowed = crate::from_str::<&Self>(&json)?; - if borrowed.json.len() < json.len() { - return Ok(borrowed.to_owned()); - } - } - Ok(Self::from_owned(json.into_boxed_str())) - } - - /// Access the JSON text underlying a raw value. - /// - /// # Example - /// - /// ``` - /// use serde::Deserialize; - /// use serde_json::{Result, value::RawValue}; - /// - /// #[derive(Deserialize)] - /// struct Response<'a> { - /// code: u32, - /// #[serde(borrow)] - /// payload: &'a RawValue, - /// } - /// - /// fn process(input: &str) -> Result<()> { - /// let response: Response = serde_json::from_str(input)?; - /// - /// let payload = response.payload.get(); - /// if payload.starts_with('{') { - /// // handle a payload which is a JSON map - /// } else { - /// // handle any other type - /// } - /// - /// Ok(()) - /// } - /// - /// fn main() -> Result<()> { - /// process(r#" {"code": 200, "payload": {}} "#)?; - /// Ok(()) - /// } - /// ``` - pub fn get(&self) -> &str { - &self.json - } -} - -impl From> for Box { - fn from(raw_value: Box) -> Self { - RawValue::into_owned(raw_value) - } -} - -/// Convert a `T` into a boxed `RawValue`. -/// -/// # Example -/// -/// ``` -/// // Upstream crate -/// # #[derive(Serialize)] -/// pub struct Thing { -/// foo: String, -/// bar: Option, -/// extra_data: Box, -/// } -/// -/// // Local crate -/// use serde::Serialize; -/// use serde_json::value::{to_raw_value, RawValue}; -/// -/// #[derive(Serialize)] -/// struct MyExtraData { -/// a: u32, -/// b: u32, -/// } -/// -/// let my_thing = Thing { -/// foo: "FooVal".into(), -/// bar: None, -/// extra_data: to_raw_value(&MyExtraData { a: 1, b: 2 }).unwrap(), -/// }; -/// # assert_eq!( -/// # serde_json::to_value(my_thing).unwrap(), -/// # serde_json::json!({ -/// # "foo": "FooVal", -/// # "bar": null, -/// # "extra_data": { "a": 1, "b": 2 } -/// # }) -/// # ); -/// ``` -/// -/// # Errors -/// -/// This conversion can fail if `T`'s implementation of `Serialize` decides to -/// fail, or if `T` contains a map with non-string keys. -/// -/// ``` -/// use std::collections::BTreeMap; -/// -/// // The keys in this map are vectors, not strings. -/// let mut map = BTreeMap::new(); -/// map.insert(vec![32, 64], "x86"); -/// -/// println!("{}", serde_json::value::to_raw_value(&map).unwrap_err()); -/// ``` -#[cfg_attr(docsrs, doc(cfg(feature = "raw_value")))] -pub fn to_raw_value(value: &T) -> Result, Error> -where - T: ?Sized + Serialize, -{ - let json_string = crate::to_string(value)?; - Ok(RawValue::from_owned(json_string.into_boxed_str())) -} - -pub const TOKEN: &str = "$serde_json::private::RawValue"; - -impl Serialize for RawValue { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut s = serializer.serialize_struct(TOKEN, 1)?; - s.serialize_field(TOKEN, &self.json)?; - s.end() - } -} - -impl<'de: 'a, 'a> Deserialize<'de> for &'a RawValue { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct ReferenceVisitor; - - impl<'de> Visitor<'de> for ReferenceVisitor { - type Value = &'de RawValue; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "any valid JSON value") - } - - fn visit_map(self, mut visitor: V) -> Result - where - V: MapAccess<'de>, - { - let value = visitor.next_key::()?; - if value.is_none() { - return Err(de::Error::invalid_type(Unexpected::Map, &self)); - } - visitor.next_value_seed(ReferenceFromString) - } - } - - deserializer.deserialize_newtype_struct(TOKEN, ReferenceVisitor) - } -} - -impl<'de> Deserialize<'de> for Box { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct BoxedVisitor; - - impl<'de> Visitor<'de> for BoxedVisitor { - type Value = Box; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "any valid JSON value") - } - - fn visit_map(self, mut visitor: V) -> Result - where - V: MapAccess<'de>, - { - let value = visitor.next_key::()?; - if value.is_none() { - return Err(de::Error::invalid_type(Unexpected::Map, &self)); - } - visitor.next_value_seed(BoxedFromString) - } - } - - deserializer.deserialize_newtype_struct(TOKEN, BoxedVisitor) - } -} - -struct RawKey; - -impl<'de> Deserialize<'de> for RawKey { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct FieldVisitor; - - impl<'de> Visitor<'de> for FieldVisitor { - type Value = (); - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("raw value") - } - - fn visit_str(self, s: &str) -> Result<(), E> - where - E: de::Error, - { - if s == TOKEN { - Ok(()) - } else { - Err(de::Error::custom("unexpected raw value")) - } - } - } - - deserializer.deserialize_identifier(FieldVisitor)?; - Ok(RawKey) - } -} - -pub struct ReferenceFromString; - -impl<'de> DeserializeSeed<'de> for ReferenceFromString { - type Value = &'de RawValue; - - fn deserialize(self, deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_str(self) - } -} - -impl<'de> Visitor<'de> for ReferenceFromString { - type Value = &'de RawValue; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("raw value") - } - - fn visit_borrowed_str(self, s: &'de str) -> Result - where - E: de::Error, - { - Ok(RawValue::from_borrowed(s)) - } -} - -pub struct BoxedFromString; - -impl<'de> DeserializeSeed<'de> for BoxedFromString { - type Value = Box; - - fn deserialize(self, deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_str(self) - } -} - -impl<'de> Visitor<'de> for BoxedFromString { - type Value = Box; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("raw value") - } - - fn visit_str(self, s: &str) -> Result - where - E: de::Error, - { - Ok(RawValue::from_owned(s.to_owned().into_boxed_str())) - } - - #[cfg(any(feature = "std", feature = "alloc"))] - fn visit_string(self, s: String) -> Result - where - E: de::Error, - { - Ok(RawValue::from_owned(s.into_boxed_str())) - } -} - -struct RawKeyDeserializer; - -impl<'de> Deserializer<'de> for RawKeyDeserializer { - type Error = Error; - - fn deserialize_any(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_borrowed_str(TOKEN) - } - - forward_to_deserialize_any! { - bool u8 u16 u32 u64 u128 i8 i16 i32 i64 i128 f32 f64 char str string seq - bytes byte_buf map struct option unit newtype_struct ignored_any - unit_struct tuple_struct tuple enum identifier - } -} - -pub struct OwnedRawDeserializer { - pub raw_value: Option, -} - -impl<'de> MapAccess<'de> for OwnedRawDeserializer { - type Error = Error; - - fn next_key_seed(&mut self, seed: K) -> Result, Error> - where - K: de::DeserializeSeed<'de>, - { - if self.raw_value.is_none() { - return Ok(None); - } - seed.deserialize(RawKeyDeserializer).map(Some) - } - - fn next_value_seed(&mut self, seed: V) -> Result - where - V: de::DeserializeSeed<'de>, - { - seed.deserialize(self.raw_value.take().unwrap().into_deserializer()) - } -} - -pub struct BorrowedRawDeserializer<'de> { - pub raw_value: Option<&'de str>, -} - -impl<'de> MapAccess<'de> for BorrowedRawDeserializer<'de> { - type Error = Error; - - fn next_key_seed(&mut self, seed: K) -> Result, Error> - where - K: de::DeserializeSeed<'de>, - { - if self.raw_value.is_none() { - return Ok(None); - } - seed.deserialize(RawKeyDeserializer).map(Some) - } - - fn next_value_seed(&mut self, seed: V) -> Result - where - V: de::DeserializeSeed<'de>, - { - seed.deserialize(BorrowedStrDeserializer::new(self.raw_value.take().unwrap())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/read.rs s390-tools-2.33.1/rust-vendor/serde_json/src/read.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/read.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/read.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1004 +0,0 @@ -use crate::error::{Error, ErrorCode, Result}; -use alloc::vec::Vec; -use core::char; -use core::cmp; -use core::ops::Deref; -use core::str; - -#[cfg(feature = "std")] -use crate::io; -#[cfg(feature = "std")] -use crate::iter::LineColIterator; - -#[cfg(feature = "raw_value")] -use crate::raw::BorrowedRawDeserializer; -#[cfg(all(feature = "raw_value", feature = "std"))] -use crate::raw::OwnedRawDeserializer; -#[cfg(feature = "raw_value")] -use serde::de::Visitor; - -/// Trait used by the deserializer for iterating over input. This is manually -/// "specialized" for iterating over &[u8]. Once feature(specialization) is -/// stable we can use actual specialization. -/// -/// This trait is sealed and cannot be implemented for types outside of -/// `serde_json`. -pub trait Read<'de>: private::Sealed { - #[doc(hidden)] - fn next(&mut self) -> Result>; - #[doc(hidden)] - fn peek(&mut self) -> Result>; - - /// Only valid after a call to peek(). Discards the peeked byte. - #[doc(hidden)] - fn discard(&mut self); - - /// Position of the most recent call to next(). - /// - /// The most recent call was probably next() and not peek(), but this method - /// should try to return a sensible result if the most recent call was - /// actually peek() because we don't always know. - /// - /// Only called in case of an error, so performance is not important. - #[doc(hidden)] - fn position(&self) -> Position; - - /// Position of the most recent call to peek(). - /// - /// The most recent call was probably peek() and not next(), but this method - /// should try to return a sensible result if the most recent call was - /// actually next() because we don't always know. - /// - /// Only called in case of an error, so performance is not important. - #[doc(hidden)] - fn peek_position(&self) -> Position; - - /// Offset from the beginning of the input to the next byte that would be - /// returned by next() or peek(). - #[doc(hidden)] - fn byte_offset(&self) -> usize; - - /// Assumes the previous byte was a quotation mark. Parses a JSON-escaped - /// string until the next quotation mark using the given scratch space if - /// necessary. The scratch space is initially empty. - #[doc(hidden)] - fn parse_str<'s>(&'s mut self, scratch: &'s mut Vec) -> Result>; - - /// Assumes the previous byte was a quotation mark. Parses a JSON-escaped - /// string until the next quotation mark using the given scratch space if - /// necessary. The scratch space is initially empty. - /// - /// This function returns the raw bytes in the string with escape sequences - /// expanded but without performing unicode validation. - #[doc(hidden)] - fn parse_str_raw<'s>( - &'s mut self, - scratch: &'s mut Vec, - ) -> Result>; - - /// Assumes the previous byte was a quotation mark. Parses a JSON-escaped - /// string until the next quotation mark but discards the data. - #[doc(hidden)] - fn ignore_str(&mut self) -> Result<()>; - - /// Assumes the previous byte was a hex escape sequnce ('\u') in a string. - /// Parses next hexadecimal sequence. - #[doc(hidden)] - fn decode_hex_escape(&mut self) -> Result; - - /// Switch raw buffering mode on. - /// - /// This is used when deserializing `RawValue`. - #[cfg(feature = "raw_value")] - #[doc(hidden)] - fn begin_raw_buffering(&mut self); - - /// Switch raw buffering mode off and provides the raw buffered data to the - /// given visitor. - #[cfg(feature = "raw_value")] - #[doc(hidden)] - fn end_raw_buffering(&mut self, visitor: V) -> Result - where - V: Visitor<'de>; - - /// Whether StreamDeserializer::next needs to check the failed flag. True - /// for IoRead, false for StrRead and SliceRead which can track failure by - /// truncating their input slice to avoid the extra check on every next - /// call. - #[doc(hidden)] - const should_early_return_if_failed: bool; - - /// Mark a persistent failure of StreamDeserializer, either by setting the - /// flag or by truncating the input data. - #[doc(hidden)] - fn set_failed(&mut self, failed: &mut bool); -} - -pub struct Position { - pub line: usize, - pub column: usize, -} - -pub enum Reference<'b, 'c, T> -where - T: ?Sized + 'static, -{ - Borrowed(&'b T), - Copied(&'c T), -} - -impl<'b, 'c, T> Deref for Reference<'b, 'c, T> -where - T: ?Sized + 'static, -{ - type Target = T; - - fn deref(&self) -> &Self::Target { - match *self { - Reference::Borrowed(b) => b, - Reference::Copied(c) => c, - } - } -} - -/// JSON input source that reads from a std::io input stream. -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub struct IoRead -where - R: io::Read, -{ - iter: LineColIterator>, - /// Temporary storage of peeked byte. - ch: Option, - #[cfg(feature = "raw_value")] - raw_buffer: Option>, -} - -/// JSON input source that reads from a slice of bytes. -// -// This is more efficient than other iterators because peek() can be read-only -// and we can compute line/col position only if an error happens. -pub struct SliceRead<'a> { - slice: &'a [u8], - /// Index of the *next* byte that will be returned by next() or peek(). - index: usize, - #[cfg(feature = "raw_value")] - raw_buffering_start_index: usize, -} - -/// JSON input source that reads from a UTF-8 string. -// -// Able to elide UTF-8 checks by assuming that the input is valid UTF-8. -pub struct StrRead<'a> { - delegate: SliceRead<'a>, - #[cfg(feature = "raw_value")] - data: &'a str, -} - -// Prevent users from implementing the Read trait. -mod private { - pub trait Sealed {} -} - -////////////////////////////////////////////////////////////////////////////// - -#[cfg(feature = "std")] -impl IoRead -where - R: io::Read, -{ - /// Create a JSON input source to read from a std::io input stream. - pub fn new(reader: R) -> Self { - IoRead { - iter: LineColIterator::new(reader.bytes()), - ch: None, - #[cfg(feature = "raw_value")] - raw_buffer: None, - } - } -} - -#[cfg(feature = "std")] -impl private::Sealed for IoRead where R: io::Read {} - -#[cfg(feature = "std")] -impl IoRead -where - R: io::Read, -{ - fn parse_str_bytes<'s, T, F>( - &'s mut self, - scratch: &'s mut Vec, - validate: bool, - result: F, - ) -> Result - where - T: 's, - F: FnOnce(&'s Self, &'s [u8]) -> Result, - { - loop { - let ch = tri!(next_or_eof(self)); - if !ESCAPE[ch as usize] { - scratch.push(ch); - continue; - } - match ch { - b'"' => { - return result(self, scratch); - } - b'\\' => { - tri!(parse_escape(self, validate, scratch)); - } - _ => { - if validate { - return error(self, ErrorCode::ControlCharacterWhileParsingString); - } - scratch.push(ch); - } - } - } - } -} - -#[cfg(feature = "std")] -impl<'de, R> Read<'de> for IoRead -where - R: io::Read, -{ - #[inline] - fn next(&mut self) -> Result> { - match self.ch.take() { - Some(ch) => { - #[cfg(feature = "raw_value")] - { - if let Some(buf) = &mut self.raw_buffer { - buf.push(ch); - } - } - Ok(Some(ch)) - } - None => match self.iter.next() { - Some(Err(err)) => Err(Error::io(err)), - Some(Ok(ch)) => { - #[cfg(feature = "raw_value")] - { - if let Some(buf) = &mut self.raw_buffer { - buf.push(ch); - } - } - Ok(Some(ch)) - } - None => Ok(None), - }, - } - } - - #[inline] - fn peek(&mut self) -> Result> { - match self.ch { - Some(ch) => Ok(Some(ch)), - None => match self.iter.next() { - Some(Err(err)) => Err(Error::io(err)), - Some(Ok(ch)) => { - self.ch = Some(ch); - Ok(self.ch) - } - None => Ok(None), - }, - } - } - - #[cfg(not(feature = "raw_value"))] - #[inline] - fn discard(&mut self) { - self.ch = None; - } - - #[cfg(feature = "raw_value")] - fn discard(&mut self) { - if let Some(ch) = self.ch.take() { - if let Some(buf) = &mut self.raw_buffer { - buf.push(ch); - } - } - } - - fn position(&self) -> Position { - Position { - line: self.iter.line(), - column: self.iter.col(), - } - } - - fn peek_position(&self) -> Position { - // The LineColIterator updates its position during peek() so it has the - // right one here. - self.position() - } - - fn byte_offset(&self) -> usize { - match self.ch { - Some(_) => self.iter.byte_offset() - 1, - None => self.iter.byte_offset(), - } - } - - fn parse_str<'s>(&'s mut self, scratch: &'s mut Vec) -> Result> { - self.parse_str_bytes(scratch, true, as_str) - .map(Reference::Copied) - } - - fn parse_str_raw<'s>( - &'s mut self, - scratch: &'s mut Vec, - ) -> Result> { - self.parse_str_bytes(scratch, false, |_, bytes| Ok(bytes)) - .map(Reference::Copied) - } - - fn ignore_str(&mut self) -> Result<()> { - loop { - let ch = tri!(next_or_eof(self)); - if !ESCAPE[ch as usize] { - continue; - } - match ch { - b'"' => { - return Ok(()); - } - b'\\' => { - tri!(ignore_escape(self)); - } - _ => { - return error(self, ErrorCode::ControlCharacterWhileParsingString); - } - } - } - } - - fn decode_hex_escape(&mut self) -> Result { - let mut n = 0; - for _ in 0..4 { - match decode_hex_val(tri!(next_or_eof(self))) { - None => return error(self, ErrorCode::InvalidEscape), - Some(val) => { - n = (n << 4) + val; - } - } - } - Ok(n) - } - - #[cfg(feature = "raw_value")] - fn begin_raw_buffering(&mut self) { - self.raw_buffer = Some(Vec::new()); - } - - #[cfg(feature = "raw_value")] - fn end_raw_buffering(&mut self, visitor: V) -> Result - where - V: Visitor<'de>, - { - let raw = self.raw_buffer.take().unwrap(); - let raw = match String::from_utf8(raw) { - Ok(raw) => raw, - Err(_) => return error(self, ErrorCode::InvalidUnicodeCodePoint), - }; - visitor.visit_map(OwnedRawDeserializer { - raw_value: Some(raw), - }) - } - - const should_early_return_if_failed: bool = true; - - #[inline] - #[cold] - fn set_failed(&mut self, failed: &mut bool) { - *failed = true; - } -} - -////////////////////////////////////////////////////////////////////////////// - -impl<'a> SliceRead<'a> { - /// Create a JSON input source to read from a slice of bytes. - pub fn new(slice: &'a [u8]) -> Self { - SliceRead { - slice, - index: 0, - #[cfg(feature = "raw_value")] - raw_buffering_start_index: 0, - } - } - - fn position_of_index(&self, i: usize) -> Position { - let mut position = Position { line: 1, column: 0 }; - for ch in &self.slice[..i] { - match *ch { - b'\n' => { - position.line += 1; - position.column = 0; - } - _ => { - position.column += 1; - } - } - } - position - } - - /// The big optimization here over IoRead is that if the string contains no - /// backslash escape sequences, the returned &str is a slice of the raw JSON - /// data so we avoid copying into the scratch space. - fn parse_str_bytes<'s, T, F>( - &'s mut self, - scratch: &'s mut Vec, - validate: bool, - result: F, - ) -> Result> - where - T: ?Sized + 's, - F: for<'f> FnOnce(&'s Self, &'f [u8]) -> Result<&'f T>, - { - // Index of the first byte not yet copied into the scratch space. - let mut start = self.index; - - loop { - while self.index < self.slice.len() && !ESCAPE[self.slice[self.index] as usize] { - self.index += 1; - } - if self.index == self.slice.len() { - return error(self, ErrorCode::EofWhileParsingString); - } - match self.slice[self.index] { - b'"' => { - if scratch.is_empty() { - // Fast path: return a slice of the raw JSON without any - // copying. - let borrowed = &self.slice[start..self.index]; - self.index += 1; - return result(self, borrowed).map(Reference::Borrowed); - } else { - scratch.extend_from_slice(&self.slice[start..self.index]); - self.index += 1; - return result(self, scratch).map(Reference::Copied); - } - } - b'\\' => { - scratch.extend_from_slice(&self.slice[start..self.index]); - self.index += 1; - tri!(parse_escape(self, validate, scratch)); - start = self.index; - } - _ => { - self.index += 1; - if validate { - return error(self, ErrorCode::ControlCharacterWhileParsingString); - } - } - } - } - } -} - -impl<'a> private::Sealed for SliceRead<'a> {} - -impl<'a> Read<'a> for SliceRead<'a> { - #[inline] - fn next(&mut self) -> Result> { - // `Ok(self.slice.get(self.index).map(|ch| { self.index += 1; *ch }))` - // is about 10% slower. - Ok(if self.index < self.slice.len() { - let ch = self.slice[self.index]; - self.index += 1; - Some(ch) - } else { - None - }) - } - - #[inline] - fn peek(&mut self) -> Result> { - // `Ok(self.slice.get(self.index).map(|ch| *ch))` is about 10% slower - // for some reason. - Ok(if self.index < self.slice.len() { - Some(self.slice[self.index]) - } else { - None - }) - } - - #[inline] - fn discard(&mut self) { - self.index += 1; - } - - fn position(&self) -> Position { - self.position_of_index(self.index) - } - - fn peek_position(&self) -> Position { - // Cap it at slice.len() just in case the most recent call was next() - // and it returned the last byte. - self.position_of_index(cmp::min(self.slice.len(), self.index + 1)) - } - - fn byte_offset(&self) -> usize { - self.index - } - - fn parse_str<'s>(&'s mut self, scratch: &'s mut Vec) -> Result> { - self.parse_str_bytes(scratch, true, as_str) - } - - fn parse_str_raw<'s>( - &'s mut self, - scratch: &'s mut Vec, - ) -> Result> { - self.parse_str_bytes(scratch, false, |_, bytes| Ok(bytes)) - } - - fn ignore_str(&mut self) -> Result<()> { - loop { - while self.index < self.slice.len() && !ESCAPE[self.slice[self.index] as usize] { - self.index += 1; - } - if self.index == self.slice.len() { - return error(self, ErrorCode::EofWhileParsingString); - } - match self.slice[self.index] { - b'"' => { - self.index += 1; - return Ok(()); - } - b'\\' => { - self.index += 1; - tri!(ignore_escape(self)); - } - _ => { - return error(self, ErrorCode::ControlCharacterWhileParsingString); - } - } - } - } - - fn decode_hex_escape(&mut self) -> Result { - if self.index + 4 > self.slice.len() { - self.index = self.slice.len(); - return error(self, ErrorCode::EofWhileParsingString); - } - - let mut n = 0; - for _ in 0..4 { - let ch = decode_hex_val(self.slice[self.index]); - self.index += 1; - match ch { - None => return error(self, ErrorCode::InvalidEscape), - Some(val) => { - n = (n << 4) + val; - } - } - } - Ok(n) - } - - #[cfg(feature = "raw_value")] - fn begin_raw_buffering(&mut self) { - self.raw_buffering_start_index = self.index; - } - - #[cfg(feature = "raw_value")] - fn end_raw_buffering(&mut self, visitor: V) -> Result - where - V: Visitor<'a>, - { - let raw = &self.slice[self.raw_buffering_start_index..self.index]; - let raw = match str::from_utf8(raw) { - Ok(raw) => raw, - Err(_) => return error(self, ErrorCode::InvalidUnicodeCodePoint), - }; - visitor.visit_map(BorrowedRawDeserializer { - raw_value: Some(raw), - }) - } - - const should_early_return_if_failed: bool = false; - - #[inline] - #[cold] - fn set_failed(&mut self, _failed: &mut bool) { - self.slice = &self.slice[..self.index]; - } -} - -////////////////////////////////////////////////////////////////////////////// - -impl<'a> StrRead<'a> { - /// Create a JSON input source to read from a UTF-8 string. - pub fn new(s: &'a str) -> Self { - StrRead { - delegate: SliceRead::new(s.as_bytes()), - #[cfg(feature = "raw_value")] - data: s, - } - } -} - -impl<'a> private::Sealed for StrRead<'a> {} - -impl<'a> Read<'a> for StrRead<'a> { - #[inline] - fn next(&mut self) -> Result> { - self.delegate.next() - } - - #[inline] - fn peek(&mut self) -> Result> { - self.delegate.peek() - } - - #[inline] - fn discard(&mut self) { - self.delegate.discard(); - } - - fn position(&self) -> Position { - self.delegate.position() - } - - fn peek_position(&self) -> Position { - self.delegate.peek_position() - } - - fn byte_offset(&self) -> usize { - self.delegate.byte_offset() - } - - fn parse_str<'s>(&'s mut self, scratch: &'s mut Vec) -> Result> { - self.delegate.parse_str_bytes(scratch, true, |_, bytes| { - // The deserialization input came in as &str with a UTF-8 guarantee, - // and the \u-escapes are checked along the way, so don't need to - // check here. - Ok(unsafe { str::from_utf8_unchecked(bytes) }) - }) - } - - fn parse_str_raw<'s>( - &'s mut self, - scratch: &'s mut Vec, - ) -> Result> { - self.delegate.parse_str_raw(scratch) - } - - fn ignore_str(&mut self) -> Result<()> { - self.delegate.ignore_str() - } - - fn decode_hex_escape(&mut self) -> Result { - self.delegate.decode_hex_escape() - } - - #[cfg(feature = "raw_value")] - fn begin_raw_buffering(&mut self) { - self.delegate.begin_raw_buffering(); - } - - #[cfg(feature = "raw_value")] - fn end_raw_buffering(&mut self, visitor: V) -> Result - where - V: Visitor<'a>, - { - let raw = &self.data[self.delegate.raw_buffering_start_index..self.delegate.index]; - visitor.visit_map(BorrowedRawDeserializer { - raw_value: Some(raw), - }) - } - - const should_early_return_if_failed: bool = false; - - #[inline] - #[cold] - fn set_failed(&mut self, failed: &mut bool) { - self.delegate.set_failed(failed); - } -} - -////////////////////////////////////////////////////////////////////////////// - -impl<'a, 'de, R> private::Sealed for &'a mut R where R: Read<'de> {} - -impl<'a, 'de, R> Read<'de> for &'a mut R -where - R: Read<'de>, -{ - fn next(&mut self) -> Result> { - R::next(self) - } - - fn peek(&mut self) -> Result> { - R::peek(self) - } - - fn discard(&mut self) { - R::discard(self); - } - - fn position(&self) -> Position { - R::position(self) - } - - fn peek_position(&self) -> Position { - R::peek_position(self) - } - - fn byte_offset(&self) -> usize { - R::byte_offset(self) - } - - fn parse_str<'s>(&'s mut self, scratch: &'s mut Vec) -> Result> { - R::parse_str(self, scratch) - } - - fn parse_str_raw<'s>( - &'s mut self, - scratch: &'s mut Vec, - ) -> Result> { - R::parse_str_raw(self, scratch) - } - - fn ignore_str(&mut self) -> Result<()> { - R::ignore_str(self) - } - - fn decode_hex_escape(&mut self) -> Result { - R::decode_hex_escape(self) - } - - #[cfg(feature = "raw_value")] - fn begin_raw_buffering(&mut self) { - R::begin_raw_buffering(self); - } - - #[cfg(feature = "raw_value")] - fn end_raw_buffering(&mut self, visitor: V) -> Result - where - V: Visitor<'de>, - { - R::end_raw_buffering(self, visitor) - } - - const should_early_return_if_failed: bool = R::should_early_return_if_failed; - - fn set_failed(&mut self, failed: &mut bool) { - R::set_failed(self, failed); - } -} - -////////////////////////////////////////////////////////////////////////////// - -/// Marker for whether StreamDeserializer can implement FusedIterator. -pub trait Fused: private::Sealed {} -impl<'a> Fused for SliceRead<'a> {} -impl<'a> Fused for StrRead<'a> {} - -// Lookup table of bytes that must be escaped. A value of true at index i means -// that byte i requires an escape sequence in the input. -static ESCAPE: [bool; 256] = { - const CT: bool = true; // control character \x00..=\x1F - const QU: bool = true; // quote \x22 - const BS: bool = true; // backslash \x5C - const __: bool = false; // allow unescaped - [ - // 1 2 3 4 5 6 7 8 9 A B C D E F - CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, // 0 - CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, CT, // 1 - __, __, QU, __, __, __, __, __, __, __, __, __, __, __, __, __, // 2 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 3 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 4 - __, __, __, __, __, __, __, __, __, __, __, __, BS, __, __, __, // 5 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 6 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 7 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 8 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 9 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // A - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // B - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // C - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // D - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // E - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // F - ] -}; - -fn next_or_eof<'de, R>(read: &mut R) -> Result -where - R: ?Sized + Read<'de>, -{ - match tri!(read.next()) { - Some(b) => Ok(b), - None => error(read, ErrorCode::EofWhileParsingString), - } -} - -fn peek_or_eof<'de, R>(read: &mut R) -> Result -where - R: ?Sized + Read<'de>, -{ - match tri!(read.peek()) { - Some(b) => Ok(b), - None => error(read, ErrorCode::EofWhileParsingString), - } -} - -fn error<'de, R, T>(read: &R, reason: ErrorCode) -> Result -where - R: ?Sized + Read<'de>, -{ - let position = read.position(); - Err(Error::syntax(reason, position.line, position.column)) -} - -fn as_str<'de, 's, R: Read<'de>>(read: &R, slice: &'s [u8]) -> Result<&'s str> { - str::from_utf8(slice).or_else(|_| error(read, ErrorCode::InvalidUnicodeCodePoint)) -} - -/// Parses a JSON escape sequence and appends it into the scratch space. Assumes -/// the previous byte read was a backslash. -fn parse_escape<'de, R: Read<'de>>( - read: &mut R, - validate: bool, - scratch: &mut Vec, -) -> Result<()> { - let ch = tri!(next_or_eof(read)); - - match ch { - b'"' => scratch.push(b'"'), - b'\\' => scratch.push(b'\\'), - b'/' => scratch.push(b'/'), - b'b' => scratch.push(b'\x08'), - b'f' => scratch.push(b'\x0c'), - b'n' => scratch.push(b'\n'), - b'r' => scratch.push(b'\r'), - b't' => scratch.push(b'\t'), - b'u' => { - fn encode_surrogate(scratch: &mut Vec, n: u16) { - scratch.extend_from_slice(&[ - (n >> 12 & 0b0000_1111) as u8 | 0b1110_0000, - (n >> 6 & 0b0011_1111) as u8 | 0b1000_0000, - (n & 0b0011_1111) as u8 | 0b1000_0000, - ]); - } - - let c = match tri!(read.decode_hex_escape()) { - n @ 0xDC00..=0xDFFF => { - return if validate { - error(read, ErrorCode::LoneLeadingSurrogateInHexEscape) - } else { - encode_surrogate(scratch, n); - Ok(()) - }; - } - - // Non-BMP characters are encoded as a sequence of two hex - // escapes, representing UTF-16 surrogates. If deserializing a - // utf-8 string the surrogates are required to be paired, - // whereas deserializing a byte string accepts lone surrogates. - n1 @ 0xD800..=0xDBFF => { - if tri!(peek_or_eof(read)) == b'\\' { - read.discard(); - } else { - return if validate { - read.discard(); - error(read, ErrorCode::UnexpectedEndOfHexEscape) - } else { - encode_surrogate(scratch, n1); - Ok(()) - }; - } - - if tri!(peek_or_eof(read)) == b'u' { - read.discard(); - } else { - return if validate { - read.discard(); - error(read, ErrorCode::UnexpectedEndOfHexEscape) - } else { - encode_surrogate(scratch, n1); - // The \ prior to this byte started an escape sequence, - // so we need to parse that now. This recursive call - // does not blow the stack on malicious input because - // the escape is not \u, so it will be handled by one - // of the easy nonrecursive cases. - parse_escape(read, validate, scratch) - }; - } - - let n2 = tri!(read.decode_hex_escape()); - - if n2 < 0xDC00 || n2 > 0xDFFF { - return error(read, ErrorCode::LoneLeadingSurrogateInHexEscape); - } - - let n = (((n1 - 0xD800) as u32) << 10 | (n2 - 0xDC00) as u32) + 0x1_0000; - - match char::from_u32(n) { - Some(c) => c, - None => { - return error(read, ErrorCode::InvalidUnicodeCodePoint); - } - } - } - - // Every u16 outside of the surrogate ranges above is guaranteed - // to be a legal char. - n => char::from_u32(n as u32).unwrap(), - }; - - scratch.extend_from_slice(c.encode_utf8(&mut [0_u8; 4]).as_bytes()); - } - _ => { - return error(read, ErrorCode::InvalidEscape); - } - } - - Ok(()) -} - -/// Parses a JSON escape sequence and discards the value. Assumes the previous -/// byte read was a backslash. -fn ignore_escape<'de, R>(read: &mut R) -> Result<()> -where - R: ?Sized + Read<'de>, -{ - let ch = tri!(next_or_eof(read)); - - match ch { - b'"' | b'\\' | b'/' | b'b' | b'f' | b'n' | b'r' | b't' => {} - b'u' => { - // At this point we don't care if the codepoint is valid. We just - // want to consume it. We don't actually know what is valid or not - // at this point, because that depends on if this string will - // ultimately be parsed into a string or a byte buffer in the "real" - // parse. - - tri!(read.decode_hex_escape()); - } - _ => { - return error(read, ErrorCode::InvalidEscape); - } - } - - Ok(()) -} - -static HEX: [u8; 256] = { - const __: u8 = 255; // not a hex digit - [ - // 1 2 3 4 5 6 7 8 9 A B C D E F - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 0 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 1 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 2 - 00, 01, 02, 03, 04, 05, 06, 07, 08, 09, __, __, __, __, __, __, // 3 - __, 10, 11, 12, 13, 14, 15, __, __, __, __, __, __, __, __, __, // 4 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 5 - __, 10, 11, 12, 13, 14, 15, __, __, __, __, __, __, __, __, __, // 6 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 7 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 8 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 9 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // A - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // B - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // C - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // D - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // E - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // F - ] -}; - -fn decode_hex_val(val: u8) -> Option { - let n = HEX[val as usize] as u16; - if n == 255 { - None - } else { - Some(n) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/ser.rs s390-tools-2.33.1/rust-vendor/serde_json/src/ser.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/ser.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/ser.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2183 +0,0 @@ -//! Serialize a Rust data structure into JSON data. - -use crate::error::{Error, ErrorCode, Result}; -use crate::io; -use alloc::string::{String, ToString}; -use alloc::vec::Vec; -use core::fmt::{self, Display}; -use core::num::FpCategory; -use serde::ser::{self, Impossible, Serialize}; - -/// A structure for serializing Rust values into JSON. -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub struct Serializer { - writer: W, - formatter: F, -} - -impl Serializer -where - W: io::Write, -{ - /// Creates a new JSON serializer. - #[inline] - pub fn new(writer: W) -> Self { - Serializer::with_formatter(writer, CompactFormatter) - } -} - -impl<'a, W> Serializer> -where - W: io::Write, -{ - /// Creates a new JSON pretty print serializer. - #[inline] - pub fn pretty(writer: W) -> Self { - Serializer::with_formatter(writer, PrettyFormatter::new()) - } -} - -impl Serializer -where - W: io::Write, - F: Formatter, -{ - /// Creates a new JSON visitor whose output will be written to the writer - /// specified. - #[inline] - pub fn with_formatter(writer: W, formatter: F) -> Self { - Serializer { writer, formatter } - } - - /// Unwrap the `Writer` from the `Serializer`. - #[inline] - pub fn into_inner(self) -> W { - self.writer - } -} - -impl<'a, W, F> ser::Serializer for &'a mut Serializer -where - W: io::Write, - F: Formatter, -{ - type Ok = (); - type Error = Error; - - type SerializeSeq = Compound<'a, W, F>; - type SerializeTuple = Compound<'a, W, F>; - type SerializeTupleStruct = Compound<'a, W, F>; - type SerializeTupleVariant = Compound<'a, W, F>; - type SerializeMap = Compound<'a, W, F>; - type SerializeStruct = Compound<'a, W, F>; - type SerializeStructVariant = Compound<'a, W, F>; - - #[inline] - fn serialize_bool(self, value: bool) -> Result<()> { - self.formatter - .write_bool(&mut self.writer, value) - .map_err(Error::io) - } - - #[inline] - fn serialize_i8(self, value: i8) -> Result<()> { - self.formatter - .write_i8(&mut self.writer, value) - .map_err(Error::io) - } - - #[inline] - fn serialize_i16(self, value: i16) -> Result<()> { - self.formatter - .write_i16(&mut self.writer, value) - .map_err(Error::io) - } - - #[inline] - fn serialize_i32(self, value: i32) -> Result<()> { - self.formatter - .write_i32(&mut self.writer, value) - .map_err(Error::io) - } - - #[inline] - fn serialize_i64(self, value: i64) -> Result<()> { - self.formatter - .write_i64(&mut self.writer, value) - .map_err(Error::io) - } - - fn serialize_i128(self, value: i128) -> Result<()> { - self.formatter - .write_i128(&mut self.writer, value) - .map_err(Error::io) - } - - #[inline] - fn serialize_u8(self, value: u8) -> Result<()> { - self.formatter - .write_u8(&mut self.writer, value) - .map_err(Error::io) - } - - #[inline] - fn serialize_u16(self, value: u16) -> Result<()> { - self.formatter - .write_u16(&mut self.writer, value) - .map_err(Error::io) - } - - #[inline] - fn serialize_u32(self, value: u32) -> Result<()> { - self.formatter - .write_u32(&mut self.writer, value) - .map_err(Error::io) - } - - #[inline] - fn serialize_u64(self, value: u64) -> Result<()> { - self.formatter - .write_u64(&mut self.writer, value) - .map_err(Error::io) - } - - fn serialize_u128(self, value: u128) -> Result<()> { - self.formatter - .write_u128(&mut self.writer, value) - .map_err(Error::io) - } - - #[inline] - fn serialize_f32(self, value: f32) -> Result<()> { - match value.classify() { - FpCategory::Nan | FpCategory::Infinite => self - .formatter - .write_null(&mut self.writer) - .map_err(Error::io), - _ => self - .formatter - .write_f32(&mut self.writer, value) - .map_err(Error::io), - } - } - - #[inline] - fn serialize_f64(self, value: f64) -> Result<()> { - match value.classify() { - FpCategory::Nan | FpCategory::Infinite => self - .formatter - .write_null(&mut self.writer) - .map_err(Error::io), - _ => self - .formatter - .write_f64(&mut self.writer, value) - .map_err(Error::io), - } - } - - #[inline] - fn serialize_char(self, value: char) -> Result<()> { - // A char encoded as UTF-8 takes 4 bytes at most. - let mut buf = [0; 4]; - self.serialize_str(value.encode_utf8(&mut buf)) - } - - #[inline] - fn serialize_str(self, value: &str) -> Result<()> { - format_escaped_str(&mut self.writer, &mut self.formatter, value).map_err(Error::io) - } - - #[inline] - fn serialize_bytes(self, value: &[u8]) -> Result<()> { - use serde::ser::SerializeSeq; - let mut seq = tri!(self.serialize_seq(Some(value.len()))); - for byte in value { - tri!(seq.serialize_element(byte)); - } - seq.end() - } - - #[inline] - fn serialize_unit(self) -> Result<()> { - self.formatter - .write_null(&mut self.writer) - .map_err(Error::io) - } - - #[inline] - fn serialize_unit_struct(self, _name: &'static str) -> Result<()> { - self.serialize_unit() - } - - #[inline] - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - ) -> Result<()> { - self.serialize_str(variant) - } - - /// Serialize newtypes without an object wrapper. - #[inline] - fn serialize_newtype_struct(self, _name: &'static str, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - value.serialize(self) - } - - #[inline] - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - value: &T, - ) -> Result<()> - where - T: ?Sized + Serialize, - { - tri!(self - .formatter - .begin_object(&mut self.writer) - .map_err(Error::io)); - tri!(self - .formatter - .begin_object_key(&mut self.writer, true) - .map_err(Error::io)); - tri!(self.serialize_str(variant)); - tri!(self - .formatter - .end_object_key(&mut self.writer) - .map_err(Error::io)); - tri!(self - .formatter - .begin_object_value(&mut self.writer) - .map_err(Error::io)); - tri!(value.serialize(&mut *self)); - tri!(self - .formatter - .end_object_value(&mut self.writer) - .map_err(Error::io)); - self.formatter - .end_object(&mut self.writer) - .map_err(Error::io) - } - - #[inline] - fn serialize_none(self) -> Result<()> { - self.serialize_unit() - } - - #[inline] - fn serialize_some(self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - value.serialize(self) - } - - #[inline] - fn serialize_seq(self, len: Option) -> Result { - tri!(self - .formatter - .begin_array(&mut self.writer) - .map_err(Error::io)); - if len == Some(0) { - tri!(self - .formatter - .end_array(&mut self.writer) - .map_err(Error::io)); - Ok(Compound::Map { - ser: self, - state: State::Empty, - }) - } else { - Ok(Compound::Map { - ser: self, - state: State::First, - }) - } - } - - #[inline] - fn serialize_tuple(self, len: usize) -> Result { - self.serialize_seq(Some(len)) - } - - #[inline] - fn serialize_tuple_struct( - self, - _name: &'static str, - len: usize, - ) -> Result { - self.serialize_seq(Some(len)) - } - - #[inline] - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - len: usize, - ) -> Result { - tri!(self - .formatter - .begin_object(&mut self.writer) - .map_err(Error::io)); - tri!(self - .formatter - .begin_object_key(&mut self.writer, true) - .map_err(Error::io)); - tri!(self.serialize_str(variant)); - tri!(self - .formatter - .end_object_key(&mut self.writer) - .map_err(Error::io)); - tri!(self - .formatter - .begin_object_value(&mut self.writer) - .map_err(Error::io)); - self.serialize_seq(Some(len)) - } - - #[inline] - fn serialize_map(self, len: Option) -> Result { - tri!(self - .formatter - .begin_object(&mut self.writer) - .map_err(Error::io)); - if len == Some(0) { - tri!(self - .formatter - .end_object(&mut self.writer) - .map_err(Error::io)); - Ok(Compound::Map { - ser: self, - state: State::Empty, - }) - } else { - Ok(Compound::Map { - ser: self, - state: State::First, - }) - } - } - - #[inline] - fn serialize_struct(self, name: &'static str, len: usize) -> Result { - match name { - #[cfg(feature = "arbitrary_precision")] - crate::number::TOKEN => Ok(Compound::Number { ser: self }), - #[cfg(feature = "raw_value")] - crate::raw::TOKEN => Ok(Compound::RawValue { ser: self }), - _ => self.serialize_map(Some(len)), - } - } - - #[inline] - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - len: usize, - ) -> Result { - tri!(self - .formatter - .begin_object(&mut self.writer) - .map_err(Error::io)); - tri!(self - .formatter - .begin_object_key(&mut self.writer, true) - .map_err(Error::io)); - tri!(self.serialize_str(variant)); - tri!(self - .formatter - .end_object_key(&mut self.writer) - .map_err(Error::io)); - tri!(self - .formatter - .begin_object_value(&mut self.writer) - .map_err(Error::io)); - self.serialize_map(Some(len)) - } - - fn collect_str(self, value: &T) -> Result<()> - where - T: ?Sized + Display, - { - use self::fmt::Write; - - struct Adapter<'ser, W: 'ser, F: 'ser> { - writer: &'ser mut W, - formatter: &'ser mut F, - error: Option, - } - - impl<'ser, W, F> Write for Adapter<'ser, W, F> - where - W: io::Write, - F: Formatter, - { - fn write_str(&mut self, s: &str) -> fmt::Result { - debug_assert!(self.error.is_none()); - match format_escaped_str_contents(self.writer, self.formatter, s) { - Ok(()) => Ok(()), - Err(err) => { - self.error = Some(err); - Err(fmt::Error) - } - } - } - } - - tri!(self - .formatter - .begin_string(&mut self.writer) - .map_err(Error::io)); - { - let mut adapter = Adapter { - writer: &mut self.writer, - formatter: &mut self.formatter, - error: None, - }; - match write!(adapter, "{}", value) { - Ok(()) => debug_assert!(adapter.error.is_none()), - Err(fmt::Error) => { - return Err(Error::io(adapter.error.expect("there should be an error"))); - } - } - } - self.formatter - .end_string(&mut self.writer) - .map_err(Error::io) - } -} - -// Not public API. Should be pub(crate). -#[doc(hidden)] -#[derive(Eq, PartialEq)] -pub enum State { - Empty, - First, - Rest, -} - -// Not public API. Should be pub(crate). -#[doc(hidden)] -pub enum Compound<'a, W: 'a, F: 'a> { - Map { - ser: &'a mut Serializer, - state: State, - }, - #[cfg(feature = "arbitrary_precision")] - Number { ser: &'a mut Serializer }, - #[cfg(feature = "raw_value")] - RawValue { ser: &'a mut Serializer }, -} - -impl<'a, W, F> ser::SerializeSeq for Compound<'a, W, F> -where - W: io::Write, - F: Formatter, -{ - type Ok = (); - type Error = Error; - - #[inline] - fn serialize_element(&mut self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - match self { - Compound::Map { ser, state } => { - tri!(ser - .formatter - .begin_array_value(&mut ser.writer, *state == State::First) - .map_err(Error::io)); - *state = State::Rest; - tri!(value.serialize(&mut **ser)); - ser.formatter - .end_array_value(&mut ser.writer) - .map_err(Error::io) - } - #[cfg(feature = "arbitrary_precision")] - Compound::Number { .. } => unreachable!(), - #[cfg(feature = "raw_value")] - Compound::RawValue { .. } => unreachable!(), - } - } - - #[inline] - fn end(self) -> Result<()> { - match self { - Compound::Map { ser, state } => match state { - State::Empty => Ok(()), - _ => ser.formatter.end_array(&mut ser.writer).map_err(Error::io), - }, - #[cfg(feature = "arbitrary_precision")] - Compound::Number { .. } => unreachable!(), - #[cfg(feature = "raw_value")] - Compound::RawValue { .. } => unreachable!(), - } - } -} - -impl<'a, W, F> ser::SerializeTuple for Compound<'a, W, F> -where - W: io::Write, - F: Formatter, -{ - type Ok = (); - type Error = Error; - - #[inline] - fn serialize_element(&mut self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - ser::SerializeSeq::serialize_element(self, value) - } - - #[inline] - fn end(self) -> Result<()> { - ser::SerializeSeq::end(self) - } -} - -impl<'a, W, F> ser::SerializeTupleStruct for Compound<'a, W, F> -where - W: io::Write, - F: Formatter, -{ - type Ok = (); - type Error = Error; - - #[inline] - fn serialize_field(&mut self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - ser::SerializeSeq::serialize_element(self, value) - } - - #[inline] - fn end(self) -> Result<()> { - ser::SerializeSeq::end(self) - } -} - -impl<'a, W, F> ser::SerializeTupleVariant for Compound<'a, W, F> -where - W: io::Write, - F: Formatter, -{ - type Ok = (); - type Error = Error; - - #[inline] - fn serialize_field(&mut self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - ser::SerializeSeq::serialize_element(self, value) - } - - #[inline] - fn end(self) -> Result<()> { - match self { - Compound::Map { ser, state } => { - match state { - State::Empty => {} - _ => tri!(ser.formatter.end_array(&mut ser.writer).map_err(Error::io)), - } - tri!(ser - .formatter - .end_object_value(&mut ser.writer) - .map_err(Error::io)); - ser.formatter.end_object(&mut ser.writer).map_err(Error::io) - } - #[cfg(feature = "arbitrary_precision")] - Compound::Number { .. } => unreachable!(), - #[cfg(feature = "raw_value")] - Compound::RawValue { .. } => unreachable!(), - } - } -} - -impl<'a, W, F> ser::SerializeMap for Compound<'a, W, F> -where - W: io::Write, - F: Formatter, -{ - type Ok = (); - type Error = Error; - - #[inline] - fn serialize_key(&mut self, key: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - match self { - Compound::Map { ser, state } => { - tri!(ser - .formatter - .begin_object_key(&mut ser.writer, *state == State::First) - .map_err(Error::io)); - *state = State::Rest; - - tri!(key.serialize(MapKeySerializer { ser: *ser })); - - ser.formatter - .end_object_key(&mut ser.writer) - .map_err(Error::io) - } - #[cfg(feature = "arbitrary_precision")] - Compound::Number { .. } => unreachable!(), - #[cfg(feature = "raw_value")] - Compound::RawValue { .. } => unreachable!(), - } - } - - #[inline] - fn serialize_value(&mut self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - match self { - Compound::Map { ser, .. } => { - tri!(ser - .formatter - .begin_object_value(&mut ser.writer) - .map_err(Error::io)); - tri!(value.serialize(&mut **ser)); - ser.formatter - .end_object_value(&mut ser.writer) - .map_err(Error::io) - } - #[cfg(feature = "arbitrary_precision")] - Compound::Number { .. } => unreachable!(), - #[cfg(feature = "raw_value")] - Compound::RawValue { .. } => unreachable!(), - } - } - - #[inline] - fn end(self) -> Result<()> { - match self { - Compound::Map { ser, state } => match state { - State::Empty => Ok(()), - _ => ser.formatter.end_object(&mut ser.writer).map_err(Error::io), - }, - #[cfg(feature = "arbitrary_precision")] - Compound::Number { .. } => unreachable!(), - #[cfg(feature = "raw_value")] - Compound::RawValue { .. } => unreachable!(), - } - } -} - -impl<'a, W, F> ser::SerializeStruct for Compound<'a, W, F> -where - W: io::Write, - F: Formatter, -{ - type Ok = (); - type Error = Error; - - #[inline] - fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - match self { - Compound::Map { .. } => ser::SerializeMap::serialize_entry(self, key, value), - #[cfg(feature = "arbitrary_precision")] - Compound::Number { ser, .. } => { - if key == crate::number::TOKEN { - value.serialize(NumberStrEmitter(ser)) - } else { - Err(invalid_number()) - } - } - #[cfg(feature = "raw_value")] - Compound::RawValue { ser, .. } => { - if key == crate::raw::TOKEN { - value.serialize(RawValueStrEmitter(ser)) - } else { - Err(invalid_raw_value()) - } - } - } - } - - #[inline] - fn end(self) -> Result<()> { - match self { - Compound::Map { .. } => ser::SerializeMap::end(self), - #[cfg(feature = "arbitrary_precision")] - Compound::Number { .. } => Ok(()), - #[cfg(feature = "raw_value")] - Compound::RawValue { .. } => Ok(()), - } - } -} - -impl<'a, W, F> ser::SerializeStructVariant for Compound<'a, W, F> -where - W: io::Write, - F: Formatter, -{ - type Ok = (); - type Error = Error; - - #[inline] - fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - match *self { - Compound::Map { .. } => ser::SerializeStruct::serialize_field(self, key, value), - #[cfg(feature = "arbitrary_precision")] - Compound::Number { .. } => unreachable!(), - #[cfg(feature = "raw_value")] - Compound::RawValue { .. } => unreachable!(), - } - } - - #[inline] - fn end(self) -> Result<()> { - match self { - Compound::Map { ser, state } => { - match state { - State::Empty => {} - _ => tri!(ser.formatter.end_object(&mut ser.writer).map_err(Error::io)), - } - tri!(ser - .formatter - .end_object_value(&mut ser.writer) - .map_err(Error::io)); - ser.formatter.end_object(&mut ser.writer).map_err(Error::io) - } - #[cfg(feature = "arbitrary_precision")] - Compound::Number { .. } => unreachable!(), - #[cfg(feature = "raw_value")] - Compound::RawValue { .. } => unreachable!(), - } - } -} - -struct MapKeySerializer<'a, W: 'a, F: 'a> { - ser: &'a mut Serializer, -} - -#[cfg(feature = "arbitrary_precision")] -fn invalid_number() -> Error { - Error::syntax(ErrorCode::InvalidNumber, 0, 0) -} - -#[cfg(feature = "raw_value")] -fn invalid_raw_value() -> Error { - Error::syntax(ErrorCode::ExpectedSomeValue, 0, 0) -} - -fn key_must_be_a_string() -> Error { - Error::syntax(ErrorCode::KeyMustBeAString, 0, 0) -} - -impl<'a, W, F> ser::Serializer for MapKeySerializer<'a, W, F> -where - W: io::Write, - F: Formatter, -{ - type Ok = (); - type Error = Error; - - #[inline] - fn serialize_str(self, value: &str) -> Result<()> { - self.ser.serialize_str(value) - } - - #[inline] - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - ) -> Result<()> { - self.ser.serialize_str(variant) - } - - #[inline] - fn serialize_newtype_struct(self, _name: &'static str, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - value.serialize(self) - } - - type SerializeSeq = Impossible<(), Error>; - type SerializeTuple = Impossible<(), Error>; - type SerializeTupleStruct = Impossible<(), Error>; - type SerializeTupleVariant = Impossible<(), Error>; - type SerializeMap = Impossible<(), Error>; - type SerializeStruct = Impossible<(), Error>; - type SerializeStructVariant = Impossible<(), Error>; - - fn serialize_bool(self, _value: bool) -> Result<()> { - Err(key_must_be_a_string()) - } - - fn serialize_i8(self, value: i8) -> Result<()> { - tri!(self - .ser - .formatter - .begin_string(&mut self.ser.writer) - .map_err(Error::io)); - tri!(self - .ser - .formatter - .write_i8(&mut self.ser.writer, value) - .map_err(Error::io)); - self.ser - .formatter - .end_string(&mut self.ser.writer) - .map_err(Error::io) - } - - fn serialize_i16(self, value: i16) -> Result<()> { - tri!(self - .ser - .formatter - .begin_string(&mut self.ser.writer) - .map_err(Error::io)); - tri!(self - .ser - .formatter - .write_i16(&mut self.ser.writer, value) - .map_err(Error::io)); - self.ser - .formatter - .end_string(&mut self.ser.writer) - .map_err(Error::io) - } - - fn serialize_i32(self, value: i32) -> Result<()> { - tri!(self - .ser - .formatter - .begin_string(&mut self.ser.writer) - .map_err(Error::io)); - tri!(self - .ser - .formatter - .write_i32(&mut self.ser.writer, value) - .map_err(Error::io)); - self.ser - .formatter - .end_string(&mut self.ser.writer) - .map_err(Error::io) - } - - fn serialize_i64(self, value: i64) -> Result<()> { - tri!(self - .ser - .formatter - .begin_string(&mut self.ser.writer) - .map_err(Error::io)); - tri!(self - .ser - .formatter - .write_i64(&mut self.ser.writer, value) - .map_err(Error::io)); - self.ser - .formatter - .end_string(&mut self.ser.writer) - .map_err(Error::io) - } - - fn serialize_i128(self, value: i128) -> Result<()> { - tri!(self - .ser - .formatter - .begin_string(&mut self.ser.writer) - .map_err(Error::io)); - tri!(self - .ser - .formatter - .write_i128(&mut self.ser.writer, value) - .map_err(Error::io)); - self.ser - .formatter - .end_string(&mut self.ser.writer) - .map_err(Error::io) - } - - fn serialize_u8(self, value: u8) -> Result<()> { - tri!(self - .ser - .formatter - .begin_string(&mut self.ser.writer) - .map_err(Error::io)); - tri!(self - .ser - .formatter - .write_u8(&mut self.ser.writer, value) - .map_err(Error::io)); - self.ser - .formatter - .end_string(&mut self.ser.writer) - .map_err(Error::io) - } - - fn serialize_u16(self, value: u16) -> Result<()> { - tri!(self - .ser - .formatter - .begin_string(&mut self.ser.writer) - .map_err(Error::io)); - tri!(self - .ser - .formatter - .write_u16(&mut self.ser.writer, value) - .map_err(Error::io)); - self.ser - .formatter - .end_string(&mut self.ser.writer) - .map_err(Error::io) - } - - fn serialize_u32(self, value: u32) -> Result<()> { - tri!(self - .ser - .formatter - .begin_string(&mut self.ser.writer) - .map_err(Error::io)); - tri!(self - .ser - .formatter - .write_u32(&mut self.ser.writer, value) - .map_err(Error::io)); - self.ser - .formatter - .end_string(&mut self.ser.writer) - .map_err(Error::io) - } - - fn serialize_u64(self, value: u64) -> Result<()> { - tri!(self - .ser - .formatter - .begin_string(&mut self.ser.writer) - .map_err(Error::io)); - tri!(self - .ser - .formatter - .write_u64(&mut self.ser.writer, value) - .map_err(Error::io)); - self.ser - .formatter - .end_string(&mut self.ser.writer) - .map_err(Error::io) - } - - fn serialize_u128(self, value: u128) -> Result<()> { - tri!(self - .ser - .formatter - .begin_string(&mut self.ser.writer) - .map_err(Error::io)); - tri!(self - .ser - .formatter - .write_u128(&mut self.ser.writer, value) - .map_err(Error::io)); - self.ser - .formatter - .end_string(&mut self.ser.writer) - .map_err(Error::io) - } - - fn serialize_f32(self, _value: f32) -> Result<()> { - Err(key_must_be_a_string()) - } - - fn serialize_f64(self, _value: f64) -> Result<()> { - Err(key_must_be_a_string()) - } - - fn serialize_char(self, value: char) -> Result<()> { - self.ser.serialize_str(&value.to_string()) - } - - fn serialize_bytes(self, _value: &[u8]) -> Result<()> { - Err(key_must_be_a_string()) - } - - fn serialize_unit(self) -> Result<()> { - Err(key_must_be_a_string()) - } - - fn serialize_unit_struct(self, _name: &'static str) -> Result<()> { - Err(key_must_be_a_string()) - } - - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _value: &T, - ) -> Result<()> - where - T: ?Sized + Serialize, - { - Err(key_must_be_a_string()) - } - - fn serialize_none(self) -> Result<()> { - Err(key_must_be_a_string()) - } - - fn serialize_some(self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - value.serialize(self) - } - - fn serialize_seq(self, _len: Option) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_tuple(self, _len: usize) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_map(self, _len: Option) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_struct(self, _name: &'static str, _len: usize) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(key_must_be_a_string()) - } - - fn collect_str(self, value: &T) -> Result<()> - where - T: ?Sized + Display, - { - self.ser.collect_str(value) - } -} - -#[cfg(feature = "arbitrary_precision")] -struct NumberStrEmitter<'a, W: 'a + io::Write, F: 'a + Formatter>(&'a mut Serializer); - -#[cfg(feature = "arbitrary_precision")] -impl<'a, W: io::Write, F: Formatter> ser::Serializer for NumberStrEmitter<'a, W, F> { - type Ok = (); - type Error = Error; - - type SerializeSeq = Impossible<(), Error>; - type SerializeTuple = Impossible<(), Error>; - type SerializeTupleStruct = Impossible<(), Error>; - type SerializeTupleVariant = Impossible<(), Error>; - type SerializeMap = Impossible<(), Error>; - type SerializeStruct = Impossible<(), Error>; - type SerializeStructVariant = Impossible<(), Error>; - - fn serialize_bool(self, _v: bool) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_i8(self, _v: i8) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_i16(self, _v: i16) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_i32(self, _v: i32) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_i64(self, _v: i64) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_i128(self, _v: i128) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_u8(self, _v: u8) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_u16(self, _v: u16) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_u32(self, _v: u32) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_u64(self, _v: u64) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_u128(self, _v: u128) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_f32(self, _v: f32) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_f64(self, _v: f64) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_char(self, _v: char) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_str(self, value: &str) -> Result<()> { - let NumberStrEmitter(serializer) = self; - serializer - .formatter - .write_number_str(&mut serializer.writer, value) - .map_err(Error::io) - } - - fn serialize_bytes(self, _value: &[u8]) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_none(self) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_some(self, _value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - Err(invalid_number()) - } - - fn serialize_unit(self) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_unit_struct(self, _name: &'static str) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - ) -> Result<()> { - Err(invalid_number()) - } - - fn serialize_newtype_struct(self, _name: &'static str, _value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - Err(invalid_number()) - } - - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _value: &T, - ) -> Result<()> - where - T: ?Sized + Serialize, - { - Err(invalid_number()) - } - - fn serialize_seq(self, _len: Option) -> Result { - Err(invalid_number()) - } - - fn serialize_tuple(self, _len: usize) -> Result { - Err(invalid_number()) - } - - fn serialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Err(invalid_number()) - } - - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(invalid_number()) - } - - fn serialize_map(self, _len: Option) -> Result { - Err(invalid_number()) - } - - fn serialize_struct(self, _name: &'static str, _len: usize) -> Result { - Err(invalid_number()) - } - - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(invalid_number()) - } -} - -#[cfg(feature = "raw_value")] -struct RawValueStrEmitter<'a, W: 'a + io::Write, F: 'a + Formatter>(&'a mut Serializer); - -#[cfg(feature = "raw_value")] -impl<'a, W: io::Write, F: Formatter> ser::Serializer for RawValueStrEmitter<'a, W, F> { - type Ok = (); - type Error = Error; - - type SerializeSeq = Impossible<(), Error>; - type SerializeTuple = Impossible<(), Error>; - type SerializeTupleStruct = Impossible<(), Error>; - type SerializeTupleVariant = Impossible<(), Error>; - type SerializeMap = Impossible<(), Error>; - type SerializeStruct = Impossible<(), Error>; - type SerializeStructVariant = Impossible<(), Error>; - - fn serialize_bool(self, _v: bool) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_i8(self, _v: i8) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_i16(self, _v: i16) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_i32(self, _v: i32) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_i64(self, _v: i64) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_i128(self, _v: i128) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_u8(self, _v: u8) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_u16(self, _v: u16) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_u32(self, _v: u32) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_u64(self, _v: u64) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_u128(self, _v: u128) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_f32(self, _v: f32) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_f64(self, _v: f64) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_char(self, _v: char) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_str(self, value: &str) -> Result<()> { - let RawValueStrEmitter(serializer) = self; - serializer - .formatter - .write_raw_fragment(&mut serializer.writer, value) - .map_err(Error::io) - } - - fn serialize_bytes(self, _value: &[u8]) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_none(self) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_some(self, _value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_unit(self) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_unit_struct(self, _name: &'static str) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - ) -> Result<()> { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_newtype_struct(self, _name: &'static str, _value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _value: &T, - ) -> Result<()> - where - T: ?Sized + Serialize, - { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_seq(self, _len: Option) -> Result { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_tuple(self, _len: usize) -> Result { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_map(self, _len: Option) -> Result { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_struct(self, _name: &'static str, _len: usize) -> Result { - Err(ser::Error::custom("expected RawValue")) - } - - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(ser::Error::custom("expected RawValue")) - } - - fn collect_str(self, value: &T) -> Result - where - T: ?Sized + Display, - { - self.serialize_str(&value.to_string()) - } -} - -/// Represents a character escape code in a type-safe manner. -pub enum CharEscape { - /// An escaped quote `"` - Quote, - /// An escaped reverse solidus `\` - ReverseSolidus, - /// An escaped solidus `/` - Solidus, - /// An escaped backspace character (usually escaped as `\b`) - Backspace, - /// An escaped form feed character (usually escaped as `\f`) - FormFeed, - /// An escaped line feed character (usually escaped as `\n`) - LineFeed, - /// An escaped carriage return character (usually escaped as `\r`) - CarriageReturn, - /// An escaped tab character (usually escaped as `\t`) - Tab, - /// An escaped ASCII plane control character (usually escaped as - /// `\u00XX` where `XX` are two hex characters) - AsciiControl(u8), -} - -impl CharEscape { - #[inline] - fn from_escape_table(escape: u8, byte: u8) -> CharEscape { - match escape { - self::BB => CharEscape::Backspace, - self::TT => CharEscape::Tab, - self::NN => CharEscape::LineFeed, - self::FF => CharEscape::FormFeed, - self::RR => CharEscape::CarriageReturn, - self::QU => CharEscape::Quote, - self::BS => CharEscape::ReverseSolidus, - self::UU => CharEscape::AsciiControl(byte), - _ => unreachable!(), - } - } -} - -/// This trait abstracts away serializing the JSON control characters, which allows the user to -/// optionally pretty print the JSON output. -pub trait Formatter { - /// Writes a `null` value to the specified writer. - #[inline] - fn write_null(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(b"null") - } - - /// Writes a `true` or `false` value to the specified writer. - #[inline] - fn write_bool(&mut self, writer: &mut W, value: bool) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let s = if value { - b"true" as &[u8] - } else { - b"false" as &[u8] - }; - writer.write_all(s) - } - - /// Writes an integer value like `-123` to the specified writer. - #[inline] - fn write_i8(&mut self, writer: &mut W, value: i8) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = itoa::Buffer::new(); - let s = buffer.format(value); - writer.write_all(s.as_bytes()) - } - - /// Writes an integer value like `-123` to the specified writer. - #[inline] - fn write_i16(&mut self, writer: &mut W, value: i16) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = itoa::Buffer::new(); - let s = buffer.format(value); - writer.write_all(s.as_bytes()) - } - - /// Writes an integer value like `-123` to the specified writer. - #[inline] - fn write_i32(&mut self, writer: &mut W, value: i32) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = itoa::Buffer::new(); - let s = buffer.format(value); - writer.write_all(s.as_bytes()) - } - - /// Writes an integer value like `-123` to the specified writer. - #[inline] - fn write_i64(&mut self, writer: &mut W, value: i64) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = itoa::Buffer::new(); - let s = buffer.format(value); - writer.write_all(s.as_bytes()) - } - - /// Writes an integer value like `-123` to the specified writer. - #[inline] - fn write_i128(&mut self, writer: &mut W, value: i128) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = itoa::Buffer::new(); - let s = buffer.format(value); - writer.write_all(s.as_bytes()) - } - - /// Writes an integer value like `123` to the specified writer. - #[inline] - fn write_u8(&mut self, writer: &mut W, value: u8) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = itoa::Buffer::new(); - let s = buffer.format(value); - writer.write_all(s.as_bytes()) - } - - /// Writes an integer value like `123` to the specified writer. - #[inline] - fn write_u16(&mut self, writer: &mut W, value: u16) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = itoa::Buffer::new(); - let s = buffer.format(value); - writer.write_all(s.as_bytes()) - } - - /// Writes an integer value like `123` to the specified writer. - #[inline] - fn write_u32(&mut self, writer: &mut W, value: u32) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = itoa::Buffer::new(); - let s = buffer.format(value); - writer.write_all(s.as_bytes()) - } - - /// Writes an integer value like `123` to the specified writer. - #[inline] - fn write_u64(&mut self, writer: &mut W, value: u64) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = itoa::Buffer::new(); - let s = buffer.format(value); - writer.write_all(s.as_bytes()) - } - - /// Writes an integer value like `123` to the specified writer. - #[inline] - fn write_u128(&mut self, writer: &mut W, value: u128) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = itoa::Buffer::new(); - let s = buffer.format(value); - writer.write_all(s.as_bytes()) - } - - /// Writes a floating point value like `-31.26e+12` to the specified writer. - #[inline] - fn write_f32(&mut self, writer: &mut W, value: f32) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = ryu::Buffer::new(); - let s = buffer.format_finite(value); - writer.write_all(s.as_bytes()) - } - - /// Writes a floating point value like `-31.26e+12` to the specified writer. - #[inline] - fn write_f64(&mut self, writer: &mut W, value: f64) -> io::Result<()> - where - W: ?Sized + io::Write, - { - let mut buffer = ryu::Buffer::new(); - let s = buffer.format_finite(value); - writer.write_all(s.as_bytes()) - } - - /// Writes a number that has already been rendered to a string. - #[inline] - fn write_number_str(&mut self, writer: &mut W, value: &str) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(value.as_bytes()) - } - - /// Called before each series of `write_string_fragment` and - /// `write_char_escape`. Writes a `"` to the specified writer. - #[inline] - fn begin_string(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(b"\"") - } - - /// Called after each series of `write_string_fragment` and - /// `write_char_escape`. Writes a `"` to the specified writer. - #[inline] - fn end_string(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(b"\"") - } - - /// Writes a string fragment that doesn't need any escaping to the - /// specified writer. - #[inline] - fn write_string_fragment(&mut self, writer: &mut W, fragment: &str) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(fragment.as_bytes()) - } - - /// Writes a character escape code to the specified writer. - #[inline] - fn write_char_escape(&mut self, writer: &mut W, char_escape: CharEscape) -> io::Result<()> - where - W: ?Sized + io::Write, - { - use self::CharEscape::*; - - let s = match char_escape { - Quote => b"\\\"", - ReverseSolidus => b"\\\\", - Solidus => b"\\/", - Backspace => b"\\b", - FormFeed => b"\\f", - LineFeed => b"\\n", - CarriageReturn => b"\\r", - Tab => b"\\t", - AsciiControl(byte) => { - static HEX_DIGITS: [u8; 16] = *b"0123456789abcdef"; - let bytes = &[ - b'\\', - b'u', - b'0', - b'0', - HEX_DIGITS[(byte >> 4) as usize], - HEX_DIGITS[(byte & 0xF) as usize], - ]; - return writer.write_all(bytes); - } - }; - - writer.write_all(s) - } - - /// Called before every array. Writes a `[` to the specified - /// writer. - #[inline] - fn begin_array(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(b"[") - } - - /// Called after every array. Writes a `]` to the specified - /// writer. - #[inline] - fn end_array(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(b"]") - } - - /// Called before every array value. Writes a `,` if needed to - /// the specified writer. - #[inline] - fn begin_array_value(&mut self, writer: &mut W, first: bool) -> io::Result<()> - where - W: ?Sized + io::Write, - { - if first { - Ok(()) - } else { - writer.write_all(b",") - } - } - - /// Called after every array value. - #[inline] - fn end_array_value(&mut self, _writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - Ok(()) - } - - /// Called before every object. Writes a `{` to the specified - /// writer. - #[inline] - fn begin_object(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(b"{") - } - - /// Called after every object. Writes a `}` to the specified - /// writer. - #[inline] - fn end_object(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(b"}") - } - - /// Called before every object key. - #[inline] - fn begin_object_key(&mut self, writer: &mut W, first: bool) -> io::Result<()> - where - W: ?Sized + io::Write, - { - if first { - Ok(()) - } else { - writer.write_all(b",") - } - } - - /// Called after every object key. A `:` should be written to the - /// specified writer by either this method or - /// `begin_object_value`. - #[inline] - fn end_object_key(&mut self, _writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - Ok(()) - } - - /// Called before every object value. A `:` should be written to - /// the specified writer by either this method or - /// `end_object_key`. - #[inline] - fn begin_object_value(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(b":") - } - - /// Called after every object value. - #[inline] - fn end_object_value(&mut self, _writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - Ok(()) - } - - /// Writes a raw JSON fragment that doesn't need any escaping to the - /// specified writer. - #[inline] - fn write_raw_fragment(&mut self, writer: &mut W, fragment: &str) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(fragment.as_bytes()) - } -} - -/// This structure compacts a JSON value with no extra whitespace. -#[derive(Clone, Debug)] -pub struct CompactFormatter; - -impl Formatter for CompactFormatter {} - -/// This structure pretty prints a JSON value to make it human readable. -#[derive(Clone, Debug)] -pub struct PrettyFormatter<'a> { - current_indent: usize, - has_value: bool, - indent: &'a [u8], -} - -impl<'a> PrettyFormatter<'a> { - /// Construct a pretty printer formatter that defaults to using two spaces for indentation. - pub fn new() -> Self { - PrettyFormatter::with_indent(b" ") - } - - /// Construct a pretty printer formatter that uses the `indent` string for indentation. - pub fn with_indent(indent: &'a [u8]) -> Self { - PrettyFormatter { - current_indent: 0, - has_value: false, - indent, - } - } -} - -impl<'a> Default for PrettyFormatter<'a> { - fn default() -> Self { - PrettyFormatter::new() - } -} - -impl<'a> Formatter for PrettyFormatter<'a> { - #[inline] - fn begin_array(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - self.current_indent += 1; - self.has_value = false; - writer.write_all(b"[") - } - - #[inline] - fn end_array(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - self.current_indent -= 1; - - if self.has_value { - tri!(writer.write_all(b"\n")); - tri!(indent(writer, self.current_indent, self.indent)); - } - - writer.write_all(b"]") - } - - #[inline] - fn begin_array_value(&mut self, writer: &mut W, first: bool) -> io::Result<()> - where - W: ?Sized + io::Write, - { - tri!(writer.write_all(if first { b"\n" } else { b",\n" })); - indent(writer, self.current_indent, self.indent) - } - - #[inline] - fn end_array_value(&mut self, _writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - self.has_value = true; - Ok(()) - } - - #[inline] - fn begin_object(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - self.current_indent += 1; - self.has_value = false; - writer.write_all(b"{") - } - - #[inline] - fn end_object(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - self.current_indent -= 1; - - if self.has_value { - tri!(writer.write_all(b"\n")); - tri!(indent(writer, self.current_indent, self.indent)); - } - - writer.write_all(b"}") - } - - #[inline] - fn begin_object_key(&mut self, writer: &mut W, first: bool) -> io::Result<()> - where - W: ?Sized + io::Write, - { - tri!(writer.write_all(if first { b"\n" } else { b",\n" })); - indent(writer, self.current_indent, self.indent) - } - - #[inline] - fn begin_object_value(&mut self, writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - writer.write_all(b": ") - } - - #[inline] - fn end_object_value(&mut self, _writer: &mut W) -> io::Result<()> - where - W: ?Sized + io::Write, - { - self.has_value = true; - Ok(()) - } -} - -fn format_escaped_str(writer: &mut W, formatter: &mut F, value: &str) -> io::Result<()> -where - W: ?Sized + io::Write, - F: ?Sized + Formatter, -{ - tri!(formatter.begin_string(writer)); - tri!(format_escaped_str_contents(writer, formatter, value)); - formatter.end_string(writer) -} - -fn format_escaped_str_contents( - writer: &mut W, - formatter: &mut F, - value: &str, -) -> io::Result<()> -where - W: ?Sized + io::Write, - F: ?Sized + Formatter, -{ - let bytes = value.as_bytes(); - - let mut start = 0; - - for (i, &byte) in bytes.iter().enumerate() { - let escape = ESCAPE[byte as usize]; - if escape == 0 { - continue; - } - - if start < i { - tri!(formatter.write_string_fragment(writer, &value[start..i])); - } - - let char_escape = CharEscape::from_escape_table(escape, byte); - tri!(formatter.write_char_escape(writer, char_escape)); - - start = i + 1; - } - - if start == bytes.len() { - return Ok(()); - } - - formatter.write_string_fragment(writer, &value[start..]) -} - -const BB: u8 = b'b'; // \x08 -const TT: u8 = b't'; // \x09 -const NN: u8 = b'n'; // \x0A -const FF: u8 = b'f'; // \x0C -const RR: u8 = b'r'; // \x0D -const QU: u8 = b'"'; // \x22 -const BS: u8 = b'\\'; // \x5C -const UU: u8 = b'u'; // \x00...\x1F except the ones above -const __: u8 = 0; - -// Lookup table of escape sequences. A value of b'x' at index i means that byte -// i is escaped as "\x" in JSON. A value of 0 means that byte i is not escaped. -static ESCAPE: [u8; 256] = [ - // 1 2 3 4 5 6 7 8 9 A B C D E F - UU, UU, UU, UU, UU, UU, UU, UU, BB, TT, NN, UU, FF, RR, UU, UU, // 0 - UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, // 1 - __, __, QU, __, __, __, __, __, __, __, __, __, __, __, __, __, // 2 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 3 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 4 - __, __, __, __, __, __, __, __, __, __, __, __, BS, __, __, __, // 5 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 6 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 7 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 8 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 9 - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // A - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // B - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // C - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // D - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // E - __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // F -]; - -/// Serialize the given data structure as JSON into the I/O stream. -/// -/// Serialization guarantees it only feeds valid UTF-8 sequences to the writer. -/// -/// # Errors -/// -/// Serialization can fail if `T`'s implementation of `Serialize` decides to -/// fail, or if `T` contains a map with non-string keys. -#[inline] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub fn to_writer(writer: W, value: &T) -> Result<()> -where - W: io::Write, - T: ?Sized + Serialize, -{ - let mut ser = Serializer::new(writer); - value.serialize(&mut ser) -} - -/// Serialize the given data structure as pretty-printed JSON into the I/O -/// stream. -/// -/// Serialization guarantees it only feeds valid UTF-8 sequences to the writer. -/// -/// # Errors -/// -/// Serialization can fail if `T`'s implementation of `Serialize` decides to -/// fail, or if `T` contains a map with non-string keys. -#[inline] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub fn to_writer_pretty(writer: W, value: &T) -> Result<()> -where - W: io::Write, - T: ?Sized + Serialize, -{ - let mut ser = Serializer::pretty(writer); - value.serialize(&mut ser) -} - -/// Serialize the given data structure as a JSON byte vector. -/// -/// # Errors -/// -/// Serialization can fail if `T`'s implementation of `Serialize` decides to -/// fail, or if `T` contains a map with non-string keys. -#[inline] -pub fn to_vec(value: &T) -> Result> -where - T: ?Sized + Serialize, -{ - let mut writer = Vec::with_capacity(128); - tri!(to_writer(&mut writer, value)); - Ok(writer) -} - -/// Serialize the given data structure as a pretty-printed JSON byte vector. -/// -/// # Errors -/// -/// Serialization can fail if `T`'s implementation of `Serialize` decides to -/// fail, or if `T` contains a map with non-string keys. -#[inline] -pub fn to_vec_pretty(value: &T) -> Result> -where - T: ?Sized + Serialize, -{ - let mut writer = Vec::with_capacity(128); - tri!(to_writer_pretty(&mut writer, value)); - Ok(writer) -} - -/// Serialize the given data structure as a String of JSON. -/// -/// # Errors -/// -/// Serialization can fail if `T`'s implementation of `Serialize` decides to -/// fail, or if `T` contains a map with non-string keys. -#[inline] -pub fn to_string(value: &T) -> Result -where - T: ?Sized + Serialize, -{ - let vec = tri!(to_vec(value)); - let string = unsafe { - // We do not emit invalid UTF-8. - String::from_utf8_unchecked(vec) - }; - Ok(string) -} - -/// Serialize the given data structure as a pretty-printed String of JSON. -/// -/// # Errors -/// -/// Serialization can fail if `T`'s implementation of `Serialize` decides to -/// fail, or if `T` contains a map with non-string keys. -#[inline] -pub fn to_string_pretty(value: &T) -> Result -where - T: ?Sized + Serialize, -{ - let vec = tri!(to_vec_pretty(value)); - let string = unsafe { - // We do not emit invalid UTF-8. - String::from_utf8_unchecked(vec) - }; - Ok(string) -} - -fn indent(wr: &mut W, n: usize, s: &[u8]) -> io::Result<()> -where - W: ?Sized + io::Write, -{ - for _ in 0..n { - tri!(wr.write_all(s)); - } - - Ok(()) -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/value/de.rs s390-tools-2.33.1/rust-vendor/serde_json/src/value/de.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/value/de.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/value/de.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1377 +0,0 @@ -use crate::error::Error; -use crate::map::Map; -use crate::number::Number; -use crate::value::Value; -use alloc::borrow::{Cow, ToOwned}; -use alloc::string::String; -#[cfg(feature = "raw_value")] -use alloc::string::ToString; -use alloc::vec::{self, Vec}; -use core::fmt; -use core::slice; -use core::str::FromStr; -use serde::de::{ - self, Deserialize, DeserializeSeed, EnumAccess, Expected, IntoDeserializer, MapAccess, - SeqAccess, Unexpected, VariantAccess, Visitor, -}; -use serde::forward_to_deserialize_any; - -#[cfg(feature = "arbitrary_precision")] -use crate::number::NumberFromString; - -impl<'de> Deserialize<'de> for Value { - #[inline] - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - struct ValueVisitor; - - impl<'de> Visitor<'de> for ValueVisitor { - type Value = Value; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("any valid JSON value") - } - - #[inline] - fn visit_bool(self, value: bool) -> Result { - Ok(Value::Bool(value)) - } - - #[inline] - fn visit_i64(self, value: i64) -> Result { - Ok(Value::Number(value.into())) - } - - #[inline] - fn visit_u64(self, value: u64) -> Result { - Ok(Value::Number(value.into())) - } - - #[inline] - fn visit_f64(self, value: f64) -> Result { - Ok(Number::from_f64(value).map_or(Value::Null, Value::Number)) - } - - #[cfg(any(feature = "std", feature = "alloc"))] - #[inline] - fn visit_str(self, value: &str) -> Result - where - E: serde::de::Error, - { - self.visit_string(String::from(value)) - } - - #[cfg(any(feature = "std", feature = "alloc"))] - #[inline] - fn visit_string(self, value: String) -> Result { - Ok(Value::String(value)) - } - - #[inline] - fn visit_none(self) -> Result { - Ok(Value::Null) - } - - #[inline] - fn visit_some(self, deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - Deserialize::deserialize(deserializer) - } - - #[inline] - fn visit_unit(self) -> Result { - Ok(Value::Null) - } - - #[inline] - fn visit_seq(self, mut visitor: V) -> Result - where - V: SeqAccess<'de>, - { - let mut vec = Vec::new(); - - while let Some(elem) = tri!(visitor.next_element()) { - vec.push(elem); - } - - Ok(Value::Array(vec)) - } - - #[cfg(any(feature = "std", feature = "alloc"))] - fn visit_map(self, mut visitor: V) -> Result - where - V: MapAccess<'de>, - { - match visitor.next_key_seed(KeyClassifier)? { - #[cfg(feature = "arbitrary_precision")] - Some(KeyClass::Number) => { - let number: NumberFromString = visitor.next_value()?; - Ok(Value::Number(number.value)) - } - #[cfg(feature = "raw_value")] - Some(KeyClass::RawValue) => { - let value = visitor.next_value_seed(crate::raw::BoxedFromString)?; - crate::from_str(value.get()).map_err(de::Error::custom) - } - Some(KeyClass::Map(first_key)) => { - let mut values = Map::new(); - - values.insert(first_key, tri!(visitor.next_value())); - while let Some((key, value)) = tri!(visitor.next_entry()) { - values.insert(key, value); - } - - Ok(Value::Object(values)) - } - None => Ok(Value::Object(Map::new())), - } - } - } - - deserializer.deserialize_any(ValueVisitor) - } -} - -impl FromStr for Value { - type Err = Error; - fn from_str(s: &str) -> Result { - super::super::de::from_str(s) - } -} - -macro_rules! deserialize_number { - ($method:ident) => { - #[cfg(not(feature = "arbitrary_precision"))] - fn $method(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Number(n) => n.deserialize_any(visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - - #[cfg(feature = "arbitrary_precision")] - fn $method(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Number(n) => n.$method(visitor), - _ => self.deserialize_any(visitor), - } - } - }; -} - -fn visit_array<'de, V>(array: Vec, visitor: V) -> Result -where - V: Visitor<'de>, -{ - let len = array.len(); - let mut deserializer = SeqDeserializer::new(array); - let seq = tri!(visitor.visit_seq(&mut deserializer)); - let remaining = deserializer.iter.len(); - if remaining == 0 { - Ok(seq) - } else { - Err(serde::de::Error::invalid_length( - len, - &"fewer elements in array", - )) - } -} - -fn visit_object<'de, V>(object: Map, visitor: V) -> Result -where - V: Visitor<'de>, -{ - let len = object.len(); - let mut deserializer = MapDeserializer::new(object); - let map = tri!(visitor.visit_map(&mut deserializer)); - let remaining = deserializer.iter.len(); - if remaining == 0 { - Ok(map) - } else { - Err(serde::de::Error::invalid_length( - len, - &"fewer elements in map", - )) - } -} - -impl<'de> serde::Deserializer<'de> for Value { - type Error = Error; - - #[inline] - fn deserialize_any(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Null => visitor.visit_unit(), - Value::Bool(v) => visitor.visit_bool(v), - Value::Number(n) => n.deserialize_any(visitor), - #[cfg(any(feature = "std", feature = "alloc"))] - Value::String(v) => visitor.visit_string(v), - Value::Array(v) => visit_array(v, visitor), - Value::Object(v) => visit_object(v, visitor), - } - } - - deserialize_number!(deserialize_i8); - deserialize_number!(deserialize_i16); - deserialize_number!(deserialize_i32); - deserialize_number!(deserialize_i64); - deserialize_number!(deserialize_i128); - deserialize_number!(deserialize_u8); - deserialize_number!(deserialize_u16); - deserialize_number!(deserialize_u32); - deserialize_number!(deserialize_u64); - deserialize_number!(deserialize_u128); - deserialize_number!(deserialize_f32); - deserialize_number!(deserialize_f64); - - #[inline] - fn deserialize_option(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Null => visitor.visit_none(), - _ => visitor.visit_some(self), - } - } - - #[inline] - fn deserialize_enum( - self, - _name: &str, - _variants: &'static [&'static str], - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - let (variant, value) = match self { - Value::Object(value) => { - let mut iter = value.into_iter(); - let (variant, value) = match iter.next() { - Some(v) => v, - None => { - return Err(serde::de::Error::invalid_value( - Unexpected::Map, - &"map with a single key", - )); - } - }; - // enums are encoded in json as maps with a single key:value pair - if iter.next().is_some() { - return Err(serde::de::Error::invalid_value( - Unexpected::Map, - &"map with a single key", - )); - } - (variant, Some(value)) - } - Value::String(variant) => (variant, None), - other => { - return Err(serde::de::Error::invalid_type( - other.unexpected(), - &"string or map", - )); - } - }; - - visitor.visit_enum(EnumDeserializer { variant, value }) - } - - #[inline] - fn deserialize_newtype_struct( - self, - name: &'static str, - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - #[cfg(feature = "raw_value")] - { - if name == crate::raw::TOKEN { - return visitor.visit_map(crate::raw::OwnedRawDeserializer { - raw_value: Some(self.to_string()), - }); - } - } - - let _ = name; - visitor.visit_newtype_struct(self) - } - - fn deserialize_bool(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Bool(v) => visitor.visit_bool(v), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_char(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_string(visitor) - } - - fn deserialize_str(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_string(visitor) - } - - fn deserialize_string(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - #[cfg(any(feature = "std", feature = "alloc"))] - Value::String(v) => visitor.visit_string(v), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_bytes(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_byte_buf(visitor) - } - - fn deserialize_byte_buf(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - #[cfg(any(feature = "std", feature = "alloc"))] - Value::String(v) => visitor.visit_string(v), - Value::Array(v) => visit_array(v, visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_unit(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Null => visitor.visit_unit(), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_unit_struct(self, _name: &'static str, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_unit(visitor) - } - - fn deserialize_seq(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Array(v) => visit_array(v, visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_tuple(self, _len: usize, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_seq(visitor) - } - - fn deserialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - self.deserialize_seq(visitor) - } - - fn deserialize_map(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Object(v) => visit_object(v, visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_struct( - self, - _name: &'static str, - _fields: &'static [&'static str], - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Array(v) => visit_array(v, visitor), - Value::Object(v) => visit_object(v, visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_identifier(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_string(visitor) - } - - fn deserialize_ignored_any(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - drop(self); - visitor.visit_unit() - } -} - -struct EnumDeserializer { - variant: String, - value: Option, -} - -impl<'de> EnumAccess<'de> for EnumDeserializer { - type Error = Error; - type Variant = VariantDeserializer; - - fn variant_seed(self, seed: V) -> Result<(V::Value, VariantDeserializer), Error> - where - V: DeserializeSeed<'de>, - { - let variant = self.variant.into_deserializer(); - let visitor = VariantDeserializer { value: self.value }; - seed.deserialize(variant).map(|v| (v, visitor)) - } -} - -impl<'de> IntoDeserializer<'de, Error> for Value { - type Deserializer = Self; - - fn into_deserializer(self) -> Self::Deserializer { - self - } -} - -struct VariantDeserializer { - value: Option, -} - -impl<'de> VariantAccess<'de> for VariantDeserializer { - type Error = Error; - - fn unit_variant(self) -> Result<(), Error> { - match self.value { - Some(value) => Deserialize::deserialize(value), - None => Ok(()), - } - } - - fn newtype_variant_seed(self, seed: T) -> Result - where - T: DeserializeSeed<'de>, - { - match self.value { - Some(value) => seed.deserialize(value), - None => Err(serde::de::Error::invalid_type( - Unexpected::UnitVariant, - &"newtype variant", - )), - } - } - - fn tuple_variant(self, _len: usize, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self.value { - Some(Value::Array(v)) => { - if v.is_empty() { - visitor.visit_unit() - } else { - visit_array(v, visitor) - } - } - Some(other) => Err(serde::de::Error::invalid_type( - other.unexpected(), - &"tuple variant", - )), - None => Err(serde::de::Error::invalid_type( - Unexpected::UnitVariant, - &"tuple variant", - )), - } - } - - fn struct_variant( - self, - _fields: &'static [&'static str], - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - match self.value { - Some(Value::Object(v)) => visit_object(v, visitor), - Some(other) => Err(serde::de::Error::invalid_type( - other.unexpected(), - &"struct variant", - )), - None => Err(serde::de::Error::invalid_type( - Unexpected::UnitVariant, - &"struct variant", - )), - } - } -} - -struct SeqDeserializer { - iter: vec::IntoIter, -} - -impl SeqDeserializer { - fn new(vec: Vec) -> Self { - SeqDeserializer { - iter: vec.into_iter(), - } - } -} - -impl<'de> SeqAccess<'de> for SeqDeserializer { - type Error = Error; - - fn next_element_seed(&mut self, seed: T) -> Result, Error> - where - T: DeserializeSeed<'de>, - { - match self.iter.next() { - Some(value) => seed.deserialize(value).map(Some), - None => Ok(None), - } - } - - fn size_hint(&self) -> Option { - match self.iter.size_hint() { - (lower, Some(upper)) if lower == upper => Some(upper), - _ => None, - } - } -} - -struct MapDeserializer { - iter: as IntoIterator>::IntoIter, - value: Option, -} - -impl MapDeserializer { - fn new(map: Map) -> Self { - MapDeserializer { - iter: map.into_iter(), - value: None, - } - } -} - -impl<'de> MapAccess<'de> for MapDeserializer { - type Error = Error; - - fn next_key_seed(&mut self, seed: T) -> Result, Error> - where - T: DeserializeSeed<'de>, - { - match self.iter.next() { - Some((key, value)) => { - self.value = Some(value); - let key_de = MapKeyDeserializer { - key: Cow::Owned(key), - }; - seed.deserialize(key_de).map(Some) - } - None => Ok(None), - } - } - - fn next_value_seed(&mut self, seed: T) -> Result - where - T: DeserializeSeed<'de>, - { - match self.value.take() { - Some(value) => seed.deserialize(value), - None => Err(serde::de::Error::custom("value is missing")), - } - } - - fn size_hint(&self) -> Option { - match self.iter.size_hint() { - (lower, Some(upper)) if lower == upper => Some(upper), - _ => None, - } - } -} - -macro_rules! deserialize_value_ref_number { - ($method:ident) => { - #[cfg(not(feature = "arbitrary_precision"))] - fn $method(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Number(n) => n.deserialize_any(visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - - #[cfg(feature = "arbitrary_precision")] - fn $method(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Number(n) => n.$method(visitor), - _ => self.deserialize_any(visitor), - } - } - }; -} - -fn visit_array_ref<'de, V>(array: &'de [Value], visitor: V) -> Result -where - V: Visitor<'de>, -{ - let len = array.len(); - let mut deserializer = SeqRefDeserializer::new(array); - let seq = tri!(visitor.visit_seq(&mut deserializer)); - let remaining = deserializer.iter.len(); - if remaining == 0 { - Ok(seq) - } else { - Err(serde::de::Error::invalid_length( - len, - &"fewer elements in array", - )) - } -} - -fn visit_object_ref<'de, V>(object: &'de Map, visitor: V) -> Result -where - V: Visitor<'de>, -{ - let len = object.len(); - let mut deserializer = MapRefDeserializer::new(object); - let map = tri!(visitor.visit_map(&mut deserializer)); - let remaining = deserializer.iter.len(); - if remaining == 0 { - Ok(map) - } else { - Err(serde::de::Error::invalid_length( - len, - &"fewer elements in map", - )) - } -} - -impl<'de> serde::Deserializer<'de> for &'de Value { - type Error = Error; - - fn deserialize_any(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Null => visitor.visit_unit(), - Value::Bool(v) => visitor.visit_bool(*v), - Value::Number(n) => n.deserialize_any(visitor), - Value::String(v) => visitor.visit_borrowed_str(v), - Value::Array(v) => visit_array_ref(v, visitor), - Value::Object(v) => visit_object_ref(v, visitor), - } - } - - deserialize_value_ref_number!(deserialize_i8); - deserialize_value_ref_number!(deserialize_i16); - deserialize_value_ref_number!(deserialize_i32); - deserialize_value_ref_number!(deserialize_i64); - deserialize_number!(deserialize_i128); - deserialize_value_ref_number!(deserialize_u8); - deserialize_value_ref_number!(deserialize_u16); - deserialize_value_ref_number!(deserialize_u32); - deserialize_value_ref_number!(deserialize_u64); - deserialize_number!(deserialize_u128); - deserialize_value_ref_number!(deserialize_f32); - deserialize_value_ref_number!(deserialize_f64); - - fn deserialize_option(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match *self { - Value::Null => visitor.visit_none(), - _ => visitor.visit_some(self), - } - } - - fn deserialize_enum( - self, - _name: &str, - _variants: &'static [&'static str], - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - let (variant, value) = match self { - Value::Object(value) => { - let mut iter = value.into_iter(); - let (variant, value) = match iter.next() { - Some(v) => v, - None => { - return Err(serde::de::Error::invalid_value( - Unexpected::Map, - &"map with a single key", - )); - } - }; - // enums are encoded in json as maps with a single key:value pair - if iter.next().is_some() { - return Err(serde::de::Error::invalid_value( - Unexpected::Map, - &"map with a single key", - )); - } - (variant, Some(value)) - } - Value::String(variant) => (variant, None), - other => { - return Err(serde::de::Error::invalid_type( - other.unexpected(), - &"string or map", - )); - } - }; - - visitor.visit_enum(EnumRefDeserializer { variant, value }) - } - - #[inline] - fn deserialize_newtype_struct( - self, - name: &'static str, - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - #[cfg(feature = "raw_value")] - { - if name == crate::raw::TOKEN { - return visitor.visit_map(crate::raw::OwnedRawDeserializer { - raw_value: Some(self.to_string()), - }); - } - } - - let _ = name; - visitor.visit_newtype_struct(self) - } - - fn deserialize_bool(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match *self { - Value::Bool(v) => visitor.visit_bool(v), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_char(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_str(visitor) - } - - fn deserialize_str(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::String(v) => visitor.visit_borrowed_str(v), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_string(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_str(visitor) - } - - fn deserialize_bytes(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::String(v) => visitor.visit_borrowed_str(v), - Value::Array(v) => visit_array_ref(v, visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_byte_buf(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_bytes(visitor) - } - - fn deserialize_unit(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match *self { - Value::Null => visitor.visit_unit(), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_unit_struct(self, _name: &'static str, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_unit(visitor) - } - - fn deserialize_seq(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Array(v) => visit_array_ref(v, visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_tuple(self, _len: usize, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_seq(visitor) - } - - fn deserialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - self.deserialize_seq(visitor) - } - - fn deserialize_map(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Object(v) => visit_object_ref(v, visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_struct( - self, - _name: &'static str, - _fields: &'static [&'static str], - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - match self { - Value::Array(v) => visit_array_ref(v, visitor), - Value::Object(v) => visit_object_ref(v, visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_identifier(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_str(visitor) - } - - fn deserialize_ignored_any(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - visitor.visit_unit() - } -} - -struct EnumRefDeserializer<'de> { - variant: &'de str, - value: Option<&'de Value>, -} - -impl<'de> EnumAccess<'de> for EnumRefDeserializer<'de> { - type Error = Error; - type Variant = VariantRefDeserializer<'de>; - - fn variant_seed(self, seed: V) -> Result<(V::Value, Self::Variant), Error> - where - V: DeserializeSeed<'de>, - { - let variant = self.variant.into_deserializer(); - let visitor = VariantRefDeserializer { value: self.value }; - seed.deserialize(variant).map(|v| (v, visitor)) - } -} - -struct VariantRefDeserializer<'de> { - value: Option<&'de Value>, -} - -impl<'de> VariantAccess<'de> for VariantRefDeserializer<'de> { - type Error = Error; - - fn unit_variant(self) -> Result<(), Error> { - match self.value { - Some(value) => Deserialize::deserialize(value), - None => Ok(()), - } - } - - fn newtype_variant_seed(self, seed: T) -> Result - where - T: DeserializeSeed<'de>, - { - match self.value { - Some(value) => seed.deserialize(value), - None => Err(serde::de::Error::invalid_type( - Unexpected::UnitVariant, - &"newtype variant", - )), - } - } - - fn tuple_variant(self, _len: usize, visitor: V) -> Result - where - V: Visitor<'de>, - { - match self.value { - Some(Value::Array(v)) => { - if v.is_empty() { - visitor.visit_unit() - } else { - visit_array_ref(v, visitor) - } - } - Some(other) => Err(serde::de::Error::invalid_type( - other.unexpected(), - &"tuple variant", - )), - None => Err(serde::de::Error::invalid_type( - Unexpected::UnitVariant, - &"tuple variant", - )), - } - } - - fn struct_variant( - self, - _fields: &'static [&'static str], - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - match self.value { - Some(Value::Object(v)) => visit_object_ref(v, visitor), - Some(other) => Err(serde::de::Error::invalid_type( - other.unexpected(), - &"struct variant", - )), - None => Err(serde::de::Error::invalid_type( - Unexpected::UnitVariant, - &"struct variant", - )), - } - } -} - -struct SeqRefDeserializer<'de> { - iter: slice::Iter<'de, Value>, -} - -impl<'de> SeqRefDeserializer<'de> { - fn new(slice: &'de [Value]) -> Self { - SeqRefDeserializer { iter: slice.iter() } - } -} - -impl<'de> SeqAccess<'de> for SeqRefDeserializer<'de> { - type Error = Error; - - fn next_element_seed(&mut self, seed: T) -> Result, Error> - where - T: DeserializeSeed<'de>, - { - match self.iter.next() { - Some(value) => seed.deserialize(value).map(Some), - None => Ok(None), - } - } - - fn size_hint(&self) -> Option { - match self.iter.size_hint() { - (lower, Some(upper)) if lower == upper => Some(upper), - _ => None, - } - } -} - -struct MapRefDeserializer<'de> { - iter: <&'de Map as IntoIterator>::IntoIter, - value: Option<&'de Value>, -} - -impl<'de> MapRefDeserializer<'de> { - fn new(map: &'de Map) -> Self { - MapRefDeserializer { - iter: map.into_iter(), - value: None, - } - } -} - -impl<'de> MapAccess<'de> for MapRefDeserializer<'de> { - type Error = Error; - - fn next_key_seed(&mut self, seed: T) -> Result, Error> - where - T: DeserializeSeed<'de>, - { - match self.iter.next() { - Some((key, value)) => { - self.value = Some(value); - let key_de = MapKeyDeserializer { - key: Cow::Borrowed(&**key), - }; - seed.deserialize(key_de).map(Some) - } - None => Ok(None), - } - } - - fn next_value_seed(&mut self, seed: T) -> Result - where - T: DeserializeSeed<'de>, - { - match self.value.take() { - Some(value) => seed.deserialize(value), - None => Err(serde::de::Error::custom("value is missing")), - } - } - - fn size_hint(&self) -> Option { - match self.iter.size_hint() { - (lower, Some(upper)) if lower == upper => Some(upper), - _ => None, - } - } -} - -struct MapKeyDeserializer<'de> { - key: Cow<'de, str>, -} - -macro_rules! deserialize_integer_key { - ($method:ident => $visit:ident) => { - fn $method(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match (self.key.parse(), self.key) { - (Ok(integer), _) => visitor.$visit(integer), - (Err(_), Cow::Borrowed(s)) => visitor.visit_borrowed_str(s), - #[cfg(any(feature = "std", feature = "alloc"))] - (Err(_), Cow::Owned(s)) => visitor.visit_string(s), - } - } - }; -} - -impl<'de> serde::Deserializer<'de> for MapKeyDeserializer<'de> { - type Error = Error; - - fn deserialize_any(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - BorrowedCowStrDeserializer::new(self.key).deserialize_any(visitor) - } - - deserialize_integer_key!(deserialize_i8 => visit_i8); - deserialize_integer_key!(deserialize_i16 => visit_i16); - deserialize_integer_key!(deserialize_i32 => visit_i32); - deserialize_integer_key!(deserialize_i64 => visit_i64); - deserialize_integer_key!(deserialize_i128 => visit_i128); - deserialize_integer_key!(deserialize_u8 => visit_u8); - deserialize_integer_key!(deserialize_u16 => visit_u16); - deserialize_integer_key!(deserialize_u32 => visit_u32); - deserialize_integer_key!(deserialize_u64 => visit_u64); - deserialize_integer_key!(deserialize_u128 => visit_u128); - - #[inline] - fn deserialize_option(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - // Map keys cannot be null. - visitor.visit_some(self) - } - - #[inline] - fn deserialize_newtype_struct( - self, - _name: &'static str, - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - visitor.visit_newtype_struct(self) - } - - fn deserialize_enum( - self, - name: &'static str, - variants: &'static [&'static str], - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - self.key - .into_deserializer() - .deserialize_enum(name, variants, visitor) - } - - forward_to_deserialize_any! { - bool f32 f64 char str string bytes byte_buf unit unit_struct seq tuple - tuple_struct map struct identifier ignored_any - } -} - -struct KeyClassifier; - -enum KeyClass { - Map(String), - #[cfg(feature = "arbitrary_precision")] - Number, - #[cfg(feature = "raw_value")] - RawValue, -} - -impl<'de> DeserializeSeed<'de> for KeyClassifier { - type Value = KeyClass; - - fn deserialize(self, deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - deserializer.deserialize_str(self) - } -} - -impl<'de> Visitor<'de> for KeyClassifier { - type Value = KeyClass; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a string key") - } - - fn visit_str(self, s: &str) -> Result - where - E: de::Error, - { - match s { - #[cfg(feature = "arbitrary_precision")] - crate::number::TOKEN => Ok(KeyClass::Number), - #[cfg(feature = "raw_value")] - crate::raw::TOKEN => Ok(KeyClass::RawValue), - _ => Ok(KeyClass::Map(s.to_owned())), - } - } - - #[cfg(any(feature = "std", feature = "alloc"))] - fn visit_string(self, s: String) -> Result - where - E: de::Error, - { - match s.as_str() { - #[cfg(feature = "arbitrary_precision")] - crate::number::TOKEN => Ok(KeyClass::Number), - #[cfg(feature = "raw_value")] - crate::raw::TOKEN => Ok(KeyClass::RawValue), - _ => Ok(KeyClass::Map(s)), - } - } -} - -impl Value { - #[cold] - fn invalid_type(&self, exp: &dyn Expected) -> E - where - E: serde::de::Error, - { - serde::de::Error::invalid_type(self.unexpected(), exp) - } - - #[cold] - fn unexpected(&self) -> Unexpected { - match self { - Value::Null => Unexpected::Unit, - Value::Bool(b) => Unexpected::Bool(*b), - Value::Number(n) => n.unexpected(), - Value::String(s) => Unexpected::Str(s), - Value::Array(_) => Unexpected::Seq, - Value::Object(_) => Unexpected::Map, - } - } -} - -struct BorrowedCowStrDeserializer<'de> { - value: Cow<'de, str>, -} - -impl<'de> BorrowedCowStrDeserializer<'de> { - fn new(value: Cow<'de, str>) -> Self { - BorrowedCowStrDeserializer { value } - } -} - -impl<'de> de::Deserializer<'de> for BorrowedCowStrDeserializer<'de> { - type Error = Error; - - fn deserialize_any(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - match self.value { - Cow::Borrowed(string) => visitor.visit_borrowed_str(string), - #[cfg(any(feature = "std", feature = "alloc"))] - Cow::Owned(string) => visitor.visit_string(string), - } - } - - fn deserialize_enum( - self, - _name: &str, - _variants: &'static [&'static str], - visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_enum(self) - } - - forward_to_deserialize_any! { - bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string - bytes byte_buf option unit unit_struct newtype_struct seq tuple - tuple_struct map struct identifier ignored_any - } -} - -impl<'de> de::EnumAccess<'de> for BorrowedCowStrDeserializer<'de> { - type Error = Error; - type Variant = UnitOnly; - - fn variant_seed(self, seed: T) -> Result<(T::Value, Self::Variant), Error> - where - T: de::DeserializeSeed<'de>, - { - let value = seed.deserialize(self)?; - Ok((value, UnitOnly)) - } -} - -struct UnitOnly; - -impl<'de> de::VariantAccess<'de> for UnitOnly { - type Error = Error; - - fn unit_variant(self) -> Result<(), Error> { - Ok(()) - } - - fn newtype_variant_seed(self, _seed: T) -> Result - where - T: de::DeserializeSeed<'de>, - { - Err(de::Error::invalid_type( - Unexpected::UnitVariant, - &"newtype variant", - )) - } - - fn tuple_variant(self, _len: usize, _visitor: V) -> Result - where - V: de::Visitor<'de>, - { - Err(de::Error::invalid_type( - Unexpected::UnitVariant, - &"tuple variant", - )) - } - - fn struct_variant( - self, - _fields: &'static [&'static str], - _visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - Err(de::Error::invalid_type( - Unexpected::UnitVariant, - &"struct variant", - )) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/value/from.rs s390-tools-2.33.1/rust-vendor/serde_json/src/value/from.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/value/from.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/value/from.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,277 +0,0 @@ -use super::Value; -use crate::map::Map; -use crate::number::Number; -use alloc::borrow::Cow; -use alloc::string::{String, ToString}; -use alloc::vec::Vec; -use core::iter::FromIterator; - -macro_rules! from_integer { - ($($ty:ident)*) => { - $( - impl From<$ty> for Value { - fn from(n: $ty) -> Self { - Value::Number(n.into()) - } - } - )* - }; -} - -from_integer! { - i8 i16 i32 i64 isize - u8 u16 u32 u64 usize -} - -#[cfg(feature = "arbitrary_precision")] -from_integer! { - i128 u128 -} - -impl From for Value { - /// Convert 32-bit floating point number to `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::Value; - /// - /// let f: f32 = 13.37; - /// let x: Value = f.into(); - /// ``` - fn from(f: f32) -> Self { - Number::from_f32(f).map_or(Value::Null, Value::Number) - } -} - -impl From for Value { - /// Convert 64-bit floating point number to `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::Value; - /// - /// let f: f64 = 13.37; - /// let x: Value = f.into(); - /// ``` - fn from(f: f64) -> Self { - Number::from_f64(f).map_or(Value::Null, Value::Number) - } -} - -impl From for Value { - /// Convert boolean to `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::Value; - /// - /// let b = false; - /// let x: Value = b.into(); - /// ``` - fn from(f: bool) -> Self { - Value::Bool(f) - } -} - -impl From for Value { - /// Convert `String` to `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::Value; - /// - /// let s: String = "lorem".to_string(); - /// let x: Value = s.into(); - /// ``` - fn from(f: String) -> Self { - Value::String(f) - } -} - -impl<'a> From<&'a str> for Value { - /// Convert string slice to `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::Value; - /// - /// let s: &str = "lorem"; - /// let x: Value = s.into(); - /// ``` - fn from(f: &str) -> Self { - Value::String(f.to_string()) - } -} - -impl<'a> From> for Value { - /// Convert copy-on-write string to `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::Value; - /// use std::borrow::Cow; - /// - /// let s: Cow = Cow::Borrowed("lorem"); - /// let x: Value = s.into(); - /// ``` - /// - /// ``` - /// use serde_json::Value; - /// use std::borrow::Cow; - /// - /// let s: Cow = Cow::Owned("lorem".to_string()); - /// let x: Value = s.into(); - /// ``` - fn from(f: Cow<'a, str>) -> Self { - Value::String(f.into_owned()) - } -} - -impl From for Value { - /// Convert `Number` to `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::{Number, Value}; - /// - /// let n = Number::from(7); - /// let x: Value = n.into(); - /// ``` - fn from(f: Number) -> Self { - Value::Number(f) - } -} - -impl From> for Value { - /// Convert map (with string keys) to `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::{Map, Value}; - /// - /// let mut m = Map::new(); - /// m.insert("Lorem".to_string(), "ipsum".into()); - /// let x: Value = m.into(); - /// ``` - fn from(f: Map) -> Self { - Value::Object(f) - } -} - -impl> From> for Value { - /// Convert a `Vec` to `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::Value; - /// - /// let v = vec!["lorem", "ipsum", "dolor"]; - /// let x: Value = v.into(); - /// ``` - fn from(f: Vec) -> Self { - Value::Array(f.into_iter().map(Into::into).collect()) - } -} - -impl<'a, T: Clone + Into> From<&'a [T]> for Value { - /// Convert a slice to `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::Value; - /// - /// let v: &[&str] = &["lorem", "ipsum", "dolor"]; - /// let x: Value = v.into(); - /// ``` - fn from(f: &'a [T]) -> Self { - Value::Array(f.iter().cloned().map(Into::into).collect()) - } -} - -impl> FromIterator for Value { - /// Convert an iteratable type to a `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::Value; - /// - /// let v = std::iter::repeat(42).take(5); - /// let x: Value = v.collect(); - /// ``` - /// - /// ``` - /// use serde_json::Value; - /// - /// let v: Vec<_> = vec!["lorem", "ipsum", "dolor"]; - /// let x: Value = v.into_iter().collect(); - /// ``` - /// - /// ``` - /// use std::iter::FromIterator; - /// use serde_json::Value; - /// - /// let x: Value = Value::from_iter(vec!["lorem", "ipsum", "dolor"]); - /// ``` - fn from_iter>(iter: I) -> Self { - Value::Array(iter.into_iter().map(Into::into).collect()) - } -} - -impl, V: Into> FromIterator<(K, V)> for Value { - /// Convert an iteratable type to a `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::Value; - /// - /// let v: Vec<_> = vec![("lorem", 40), ("ipsum", 2)]; - /// let x: Value = v.into_iter().collect(); - /// ``` - fn from_iter>(iter: I) -> Self { - Value::Object( - iter.into_iter() - .map(|(k, v)| (k.into(), v.into())) - .collect(), - ) - } -} - -impl From<()> for Value { - /// Convert `()` to `Value` - /// - /// # Examples - /// - /// ``` - /// use serde_json::Value; - /// - /// let u = (); - /// let x: Value = u.into(); - /// ``` - fn from((): ()) -> Self { - Value::Null - } -} - -impl From> for Value -where - T: Into, -{ - fn from(opt: Option) -> Self { - match opt { - None => Value::Null, - Some(value) => Into::into(value), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/value/index.rs s390-tools-2.33.1/rust-vendor/serde_json/src/value/index.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/value/index.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/value/index.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,258 +0,0 @@ -use super::Value; -use crate::map::Map; -use alloc::borrow::ToOwned; -use alloc::string::String; -use core::fmt::{self, Display}; -use core::ops; - -/// A type that can be used to index into a `serde_json::Value`. -/// -/// The [`get`] and [`get_mut`] methods of `Value` accept any type that -/// implements `Index`, as does the [square-bracket indexing operator]. This -/// trait is implemented for strings which are used as the index into a JSON -/// map, and for `usize` which is used as the index into a JSON array. -/// -/// [`get`]: ../enum.Value.html#method.get -/// [`get_mut`]: ../enum.Value.html#method.get_mut -/// [square-bracket indexing operator]: ../enum.Value.html#impl-Index%3CI%3E -/// -/// This trait is sealed and cannot be implemented for types outside of -/// `serde_json`. -/// -/// # Examples -/// -/// ``` -/// # use serde_json::json; -/// # -/// let data = json!({ "inner": [1, 2, 3] }); -/// -/// // Data is a JSON map so it can be indexed with a string. -/// let inner = &data["inner"]; -/// -/// // Inner is a JSON array so it can be indexed with an integer. -/// let first = &inner[0]; -/// -/// assert_eq!(first, 1); -/// ``` -pub trait Index: private::Sealed { - /// Return None if the key is not already in the array or object. - #[doc(hidden)] - fn index_into<'v>(&self, v: &'v Value) -> Option<&'v Value>; - - /// Return None if the key is not already in the array or object. - #[doc(hidden)] - fn index_into_mut<'v>(&self, v: &'v mut Value) -> Option<&'v mut Value>; - - /// Panic if array index out of bounds. If key is not already in the object, - /// insert it with a value of null. Panic if Value is a type that cannot be - /// indexed into, except if Value is null then it can be treated as an empty - /// object. - #[doc(hidden)] - fn index_or_insert<'v>(&self, v: &'v mut Value) -> &'v mut Value; -} - -impl Index for usize { - fn index_into<'v>(&self, v: &'v Value) -> Option<&'v Value> { - match v { - Value::Array(vec) => vec.get(*self), - _ => None, - } - } - fn index_into_mut<'v>(&self, v: &'v mut Value) -> Option<&'v mut Value> { - match v { - Value::Array(vec) => vec.get_mut(*self), - _ => None, - } - } - fn index_or_insert<'v>(&self, v: &'v mut Value) -> &'v mut Value { - match v { - Value::Array(vec) => { - let len = vec.len(); - vec.get_mut(*self).unwrap_or_else(|| { - panic!( - "cannot access index {} of JSON array of length {}", - self, len - ) - }) - } - _ => panic!("cannot access index {} of JSON {}", self, Type(v)), - } - } -} - -impl Index for str { - fn index_into<'v>(&self, v: &'v Value) -> Option<&'v Value> { - match v { - Value::Object(map) => map.get(self), - _ => None, - } - } - fn index_into_mut<'v>(&self, v: &'v mut Value) -> Option<&'v mut Value> { - match v { - Value::Object(map) => map.get_mut(self), - _ => None, - } - } - fn index_or_insert<'v>(&self, v: &'v mut Value) -> &'v mut Value { - if let Value::Null = v { - *v = Value::Object(Map::new()); - } - match v { - Value::Object(map) => map.entry(self.to_owned()).or_insert(Value::Null), - _ => panic!("cannot access key {:?} in JSON {}", self, Type(v)), - } - } -} - -impl Index for String { - fn index_into<'v>(&self, v: &'v Value) -> Option<&'v Value> { - self[..].index_into(v) - } - fn index_into_mut<'v>(&self, v: &'v mut Value) -> Option<&'v mut Value> { - self[..].index_into_mut(v) - } - fn index_or_insert<'v>(&self, v: &'v mut Value) -> &'v mut Value { - self[..].index_or_insert(v) - } -} - -impl<'a, T> Index for &'a T -where - T: ?Sized + Index, -{ - fn index_into<'v>(&self, v: &'v Value) -> Option<&'v Value> { - (**self).index_into(v) - } - fn index_into_mut<'v>(&self, v: &'v mut Value) -> Option<&'v mut Value> { - (**self).index_into_mut(v) - } - fn index_or_insert<'v>(&self, v: &'v mut Value) -> &'v mut Value { - (**self).index_or_insert(v) - } -} - -// Prevent users from implementing the Index trait. -mod private { - pub trait Sealed {} - impl Sealed for usize {} - impl Sealed for str {} - impl Sealed for alloc::string::String {} - impl<'a, T> Sealed for &'a T where T: ?Sized + Sealed {} -} - -/// Used in panic messages. -struct Type<'a>(&'a Value); - -impl<'a> Display for Type<'a> { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match *self.0 { - Value::Null => formatter.write_str("null"), - Value::Bool(_) => formatter.write_str("boolean"), - Value::Number(_) => formatter.write_str("number"), - Value::String(_) => formatter.write_str("string"), - Value::Array(_) => formatter.write_str("array"), - Value::Object(_) => formatter.write_str("object"), - } - } -} - -// The usual semantics of Index is to panic on invalid indexing. -// -// That said, the usual semantics are for things like Vec and BTreeMap which -// have different use cases than Value. If you are working with a Vec, you know -// that you are working with a Vec and you can get the len of the Vec and make -// sure your indices are within bounds. The Value use cases are more -// loosey-goosey. You got some JSON from an endpoint and you want to pull values -// out of it. Outside of this Index impl, you already have the option of using -// value.as_array() and working with the Vec directly, or matching on -// Value::Array and getting the Vec directly. The Index impl means you can skip -// that and index directly into the thing using a concise syntax. You don't have -// to check the type, you don't have to check the len, it is all about what you -// expect the Value to look like. -// -// Basically the use cases that would be well served by panicking here are -// better served by using one of the other approaches: get and get_mut, -// as_array, or match. The value of this impl is that it adds a way of working -// with Value that is not well served by the existing approaches: concise and -// careless and sometimes that is exactly what you want. -impl ops::Index for Value -where - I: Index, -{ - type Output = Value; - - /// Index into a `serde_json::Value` using the syntax `value[0]` or - /// `value["k"]`. - /// - /// Returns `Value::Null` if the type of `self` does not match the type of - /// the index, for example if the index is a string and `self` is an array - /// or a number. Also returns `Value::Null` if the given key does not exist - /// in the map or the given index is not within the bounds of the array. - /// - /// For retrieving deeply nested values, you should have a look at the - /// `Value::pointer` method. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// let data = json!({ - /// "x": { - /// "y": ["z", "zz"] - /// } - /// }); - /// - /// assert_eq!(data["x"]["y"], json!(["z", "zz"])); - /// assert_eq!(data["x"]["y"][0], json!("z")); - /// - /// assert_eq!(data["a"], json!(null)); // returns null for undefined values - /// assert_eq!(data["a"]["b"], json!(null)); // does not panic - /// ``` - fn index(&self, index: I) -> &Value { - static NULL: Value = Value::Null; - index.index_into(self).unwrap_or(&NULL) - } -} - -impl ops::IndexMut for Value -where - I: Index, -{ - /// Write into a `serde_json::Value` using the syntax `value[0] = ...` or - /// `value["k"] = ...`. - /// - /// If the index is a number, the value must be an array of length bigger - /// than the index. Indexing into a value that is not an array or an array - /// that is too small will panic. - /// - /// If the index is a string, the value must be an object or null which is - /// treated like an empty object. If the key is not already present in the - /// object, it will be inserted with a value of null. Indexing into a value - /// that is neither an object nor null will panic. - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// let mut data = json!({ "x": 0 }); - /// - /// // replace an existing key - /// data["x"] = json!(1); - /// - /// // insert a new key - /// data["y"] = json!([false, false, false]); - /// - /// // replace an array value - /// data["y"][0] = json!(true); - /// - /// // inserted a deeply nested key - /// data["a"]["b"]["c"]["d"] = json!(true); - /// - /// println!("{}", data); - /// ``` - fn index_mut(&mut self, index: I) -> &mut Value { - index.index_or_insert(self) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/value/mod.rs s390-tools-2.33.1/rust-vendor/serde_json/src/value/mod.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/value/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/value/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,987 +0,0 @@ -//! The Value enum, a loosely typed way of representing any valid JSON value. -//! -//! # Constructing JSON -//! -//! Serde JSON provides a [`json!` macro][macro] to build `serde_json::Value` -//! objects with very natural JSON syntax. -//! -//! ``` -//! use serde_json::json; -//! -//! fn main() { -//! // The type of `john` is `serde_json::Value` -//! let john = json!({ -//! "name": "John Doe", -//! "age": 43, -//! "phones": [ -//! "+44 1234567", -//! "+44 2345678" -//! ] -//! }); -//! -//! println!("first phone number: {}", john["phones"][0]); -//! -//! // Convert to a string of JSON and print it out -//! println!("{}", john.to_string()); -//! } -//! ``` -//! -//! The `Value::to_string()` function converts a `serde_json::Value` into a -//! `String` of JSON text. -//! -//! One neat thing about the `json!` macro is that variables and expressions can -//! be interpolated directly into the JSON value as you are building it. Serde -//! will check at compile time that the value you are interpolating is able to -//! be represented as JSON. -//! -//! ``` -//! # use serde_json::json; -//! # -//! # fn random_phone() -> u16 { 0 } -//! # -//! let full_name = "John Doe"; -//! let age_last_year = 42; -//! -//! // The type of `john` is `serde_json::Value` -//! let john = json!({ -//! "name": full_name, -//! "age": age_last_year + 1, -//! "phones": [ -//! format!("+44 {}", random_phone()) -//! ] -//! }); -//! ``` -//! -//! A string of JSON data can be parsed into a `serde_json::Value` by the -//! [`serde_json::from_str`][from_str] function. There is also -//! [`from_slice`][from_slice] for parsing from a byte slice `&[u8]` and -//! [`from_reader`][from_reader] for parsing from any `io::Read` like a File or -//! a TCP stream. -//! -//! ``` -//! use serde_json::{json, Value, Error}; -//! -//! fn untyped_example() -> Result<(), Error> { -//! // Some JSON input data as a &str. Maybe this comes from the user. -//! let data = r#" -//! { -//! "name": "John Doe", -//! "age": 43, -//! "phones": [ -//! "+44 1234567", -//! "+44 2345678" -//! ] -//! }"#; -//! -//! // Parse the string of data into serde_json::Value. -//! let v: Value = serde_json::from_str(data)?; -//! -//! // Access parts of the data by indexing with square brackets. -//! println!("Please call {} at the number {}", v["name"], v["phones"][0]); -//! -//! Ok(()) -//! } -//! # -//! # untyped_example().unwrap(); -//! ``` -//! -//! [macro]: crate::json -//! [from_str]: crate::de::from_str -//! [from_slice]: crate::de::from_slice -//! [from_reader]: crate::de::from_reader - -use crate::error::Error; -use crate::io; -use alloc::string::String; -use alloc::vec::Vec; -use core::fmt::{self, Debug, Display}; -use core::mem; -use core::str; -use serde::de::DeserializeOwned; -use serde::ser::Serialize; - -pub use self::index::Index; -pub use self::ser::Serializer; -pub use crate::map::Map; -pub use crate::number::Number; - -#[cfg(feature = "raw_value")] -pub use crate::raw::{to_raw_value, RawValue}; - -/// Represents any valid JSON value. -/// -/// See the [`serde_json::value` module documentation](self) for usage examples. -#[derive(Clone, Eq, PartialEq)] -pub enum Value { - /// Represents a JSON null value. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!(null); - /// ``` - Null, - - /// Represents a JSON boolean. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!(true); - /// ``` - Bool(bool), - - /// Represents a JSON number, whether integer or floating point. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!(12.5); - /// ``` - Number(Number), - - /// Represents a JSON string. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!("a string"); - /// ``` - String(String), - - /// Represents a JSON array. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!(["an", "array"]); - /// ``` - Array(Vec), - - /// Represents a JSON object. - /// - /// By default the map is backed by a BTreeMap. Enable the `preserve_order` - /// feature of serde_json to use IndexMap instead, which preserves - /// entries in the order they are inserted into the map. In particular, this - /// allows JSON data to be deserialized into a Value and serialized to a - /// string while retaining the order of map keys in the input. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "an": "object" }); - /// ``` - Object(Map), -} - -impl Debug for Value { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match self { - Value::Null => formatter.write_str("Null"), - Value::Bool(boolean) => write!(formatter, "Bool({})", boolean), - Value::Number(number) => Debug::fmt(number, formatter), - Value::String(string) => write!(formatter, "String({:?})", string), - Value::Array(vec) => { - formatter.write_str("Array ")?; - Debug::fmt(vec, formatter) - } - Value::Object(map) => { - formatter.write_str("Object ")?; - Debug::fmt(map, formatter) - } - } - } -} - -impl Display for Value { - /// Display a JSON value as a string. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let json = json!({ "city": "London", "street": "10 Downing Street" }); - /// - /// // Compact format: - /// // - /// // {"city":"London","street":"10 Downing Street"} - /// let compact = format!("{}", json); - /// assert_eq!(compact, - /// "{\"city\":\"London\",\"street\":\"10 Downing Street\"}"); - /// - /// // Pretty format: - /// // - /// // { - /// // "city": "London", - /// // "street": "10 Downing Street" - /// // } - /// let pretty = format!("{:#}", json); - /// assert_eq!(pretty, - /// "{\n \"city\": \"London\",\n \"street\": \"10 Downing Street\"\n}"); - /// ``` - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - struct WriterFormatter<'a, 'b: 'a> { - inner: &'a mut fmt::Formatter<'b>, - } - - impl<'a, 'b> io::Write for WriterFormatter<'a, 'b> { - fn write(&mut self, buf: &[u8]) -> io::Result { - // Safety: the serializer below only emits valid utf8 when using - // the default formatter. - let s = unsafe { str::from_utf8_unchecked(buf) }; - tri!(self.inner.write_str(s).map_err(io_error)); - Ok(buf.len()) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } - } - - fn io_error(_: fmt::Error) -> io::Error { - // Error value does not matter because Display impl just maps it - // back to fmt::Error. - io::Error::new(io::ErrorKind::Other, "fmt error") - } - - let alternate = f.alternate(); - let mut wr = WriterFormatter { inner: f }; - if alternate { - // {:#} - super::ser::to_writer_pretty(&mut wr, self).map_err(|_| fmt::Error) - } else { - // {} - super::ser::to_writer(&mut wr, self).map_err(|_| fmt::Error) - } - } -} - -fn parse_index(s: &str) -> Option { - if s.starts_with('+') || (s.starts_with('0') && s.len() != 1) { - return None; - } - s.parse().ok() -} - -impl Value { - /// Index into a JSON array or map. A string index can be used to access a - /// value in a map, and a usize index can be used to access an element of an - /// array. - /// - /// Returns `None` if the type of `self` does not match the type of the - /// index, for example if the index is a string and `self` is an array or a - /// number. Also returns `None` if the given key does not exist in the map - /// or the given index is not within the bounds of the array. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let object = json!({ "A": 65, "B": 66, "C": 67 }); - /// assert_eq!(*object.get("A").unwrap(), json!(65)); - /// - /// let array = json!([ "A", "B", "C" ]); - /// assert_eq!(*array.get(2).unwrap(), json!("C")); - /// - /// assert_eq!(array.get("A"), None); - /// ``` - /// - /// Square brackets can also be used to index into a value in a more concise - /// way. This returns `Value::Null` in cases where `get` would have returned - /// `None`. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let object = json!({ - /// "A": ["a", "á", "à"], - /// "B": ["b", "bÌ"], - /// "C": ["c", "ć", "ć̣", "ḉ"], - /// }); - /// assert_eq!(object["B"][0], json!("b")); - /// - /// assert_eq!(object["D"], json!(null)); - /// assert_eq!(object[0]["x"]["y"]["z"], json!(null)); - /// ``` - pub fn get(&self, index: I) -> Option<&Value> { - index.index_into(self) - } - - /// Mutably index into a JSON array or map. A string index can be used to - /// access a value in a map, and a usize index can be used to access an - /// element of an array. - /// - /// Returns `None` if the type of `self` does not match the type of the - /// index, for example if the index is a string and `self` is an array or a - /// number. Also returns `None` if the given key does not exist in the map - /// or the given index is not within the bounds of the array. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let mut object = json!({ "A": 65, "B": 66, "C": 67 }); - /// *object.get_mut("A").unwrap() = json!(69); - /// - /// let mut array = json!([ "A", "B", "C" ]); - /// *array.get_mut(2).unwrap() = json!("D"); - /// ``` - pub fn get_mut(&mut self, index: I) -> Option<&mut Value> { - index.index_into_mut(self) - } - - /// Returns true if the `Value` is an Object. Returns false otherwise. - /// - /// For any Value on which `is_object` returns true, `as_object` and - /// `as_object_mut` are guaranteed to return the map representation of the - /// object. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let obj = json!({ "a": { "nested": true }, "b": ["an", "array"] }); - /// - /// assert!(obj.is_object()); - /// assert!(obj["a"].is_object()); - /// - /// // array, not an object - /// assert!(!obj["b"].is_object()); - /// ``` - pub fn is_object(&self) -> bool { - self.as_object().is_some() - } - - /// If the `Value` is an Object, returns the associated Map. Returns None - /// otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": { "nested": true }, "b": ["an", "array"] }); - /// - /// // The length of `{"nested": true}` is 1 entry. - /// assert_eq!(v["a"].as_object().unwrap().len(), 1); - /// - /// // The array `["an", "array"]` is not an object. - /// assert_eq!(v["b"].as_object(), None); - /// ``` - pub fn as_object(&self) -> Option<&Map> { - match self { - Value::Object(map) => Some(map), - _ => None, - } - } - - /// If the `Value` is an Object, returns the associated mutable Map. - /// Returns None otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let mut v = json!({ "a": { "nested": true } }); - /// - /// v["a"].as_object_mut().unwrap().clear(); - /// assert_eq!(v, json!({ "a": {} })); - /// ``` - pub fn as_object_mut(&mut self) -> Option<&mut Map> { - match self { - Value::Object(map) => Some(map), - _ => None, - } - } - - /// Returns true if the `Value` is an Array. Returns false otherwise. - /// - /// For any Value on which `is_array` returns true, `as_array` and - /// `as_array_mut` are guaranteed to return the vector representing the - /// array. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let obj = json!({ "a": ["an", "array"], "b": { "an": "object" } }); - /// - /// assert!(obj["a"].is_array()); - /// - /// // an object, not an array - /// assert!(!obj["b"].is_array()); - /// ``` - pub fn is_array(&self) -> bool { - self.as_array().is_some() - } - - /// If the `Value` is an Array, returns the associated vector. Returns None - /// otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": ["an", "array"], "b": { "an": "object" } }); - /// - /// // The length of `["an", "array"]` is 2 elements. - /// assert_eq!(v["a"].as_array().unwrap().len(), 2); - /// - /// // The object `{"an": "object"}` is not an array. - /// assert_eq!(v["b"].as_array(), None); - /// ``` - pub fn as_array(&self) -> Option<&Vec> { - match self { - Value::Array(array) => Some(array), - _ => None, - } - } - - /// If the `Value` is an Array, returns the associated mutable vector. - /// Returns None otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let mut v = json!({ "a": ["an", "array"] }); - /// - /// v["a"].as_array_mut().unwrap().clear(); - /// assert_eq!(v, json!({ "a": [] })); - /// ``` - pub fn as_array_mut(&mut self) -> Option<&mut Vec> { - match self { - Value::Array(list) => Some(list), - _ => None, - } - } - - /// Returns true if the `Value` is a String. Returns false otherwise. - /// - /// For any Value on which `is_string` returns true, `as_str` is guaranteed - /// to return the string slice. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": "some string", "b": false }); - /// - /// assert!(v["a"].is_string()); - /// - /// // The boolean `false` is not a string. - /// assert!(!v["b"].is_string()); - /// ``` - pub fn is_string(&self) -> bool { - self.as_str().is_some() - } - - /// If the `Value` is a String, returns the associated str. Returns None - /// otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": "some string", "b": false }); - /// - /// assert_eq!(v["a"].as_str(), Some("some string")); - /// - /// // The boolean `false` is not a string. - /// assert_eq!(v["b"].as_str(), None); - /// - /// // JSON values are printed in JSON representation, so strings are in quotes. - /// // - /// // The value is: "some string" - /// println!("The value is: {}", v["a"]); - /// - /// // Rust strings are printed without quotes. - /// // - /// // The value is: some string - /// println!("The value is: {}", v["a"].as_str().unwrap()); - /// ``` - pub fn as_str(&self) -> Option<&str> { - match self { - Value::String(s) => Some(s), - _ => None, - } - } - - /// Returns true if the `Value` is a Number. Returns false otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": 1, "b": "2" }); - /// - /// assert!(v["a"].is_number()); - /// - /// // The string `"2"` is a string, not a number. - /// assert!(!v["b"].is_number()); - /// ``` - pub fn is_number(&self) -> bool { - match *self { - Value::Number(_) => true, - _ => false, - } - } - - /// Returns true if the `Value` is an integer between `i64::MIN` and - /// `i64::MAX`. - /// - /// For any Value on which `is_i64` returns true, `as_i64` is guaranteed to - /// return the integer value. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let big = i64::max_value() as u64 + 10; - /// let v = json!({ "a": 64, "b": big, "c": 256.0 }); - /// - /// assert!(v["a"].is_i64()); - /// - /// // Greater than i64::MAX. - /// assert!(!v["b"].is_i64()); - /// - /// // Numbers with a decimal point are not considered integers. - /// assert!(!v["c"].is_i64()); - /// ``` - pub fn is_i64(&self) -> bool { - match self { - Value::Number(n) => n.is_i64(), - _ => false, - } - } - - /// Returns true if the `Value` is an integer between zero and `u64::MAX`. - /// - /// For any Value on which `is_u64` returns true, `as_u64` is guaranteed to - /// return the integer value. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": 64, "b": -64, "c": 256.0 }); - /// - /// assert!(v["a"].is_u64()); - /// - /// // Negative integer. - /// assert!(!v["b"].is_u64()); - /// - /// // Numbers with a decimal point are not considered integers. - /// assert!(!v["c"].is_u64()); - /// ``` - pub fn is_u64(&self) -> bool { - match self { - Value::Number(n) => n.is_u64(), - _ => false, - } - } - - /// Returns true if the `Value` is a number that can be represented by f64. - /// - /// For any Value on which `is_f64` returns true, `as_f64` is guaranteed to - /// return the floating point value. - /// - /// Currently this function returns true if and only if both `is_i64` and - /// `is_u64` return false but this is not a guarantee in the future. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": 256.0, "b": 64, "c": -64 }); - /// - /// assert!(v["a"].is_f64()); - /// - /// // Integers. - /// assert!(!v["b"].is_f64()); - /// assert!(!v["c"].is_f64()); - /// ``` - pub fn is_f64(&self) -> bool { - match self { - Value::Number(n) => n.is_f64(), - _ => false, - } - } - - /// If the `Value` is an integer, represent it as i64 if possible. Returns - /// None otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let big = i64::max_value() as u64 + 10; - /// let v = json!({ "a": 64, "b": big, "c": 256.0 }); - /// - /// assert_eq!(v["a"].as_i64(), Some(64)); - /// assert_eq!(v["b"].as_i64(), None); - /// assert_eq!(v["c"].as_i64(), None); - /// ``` - pub fn as_i64(&self) -> Option { - match self { - Value::Number(n) => n.as_i64(), - _ => None, - } - } - - /// If the `Value` is an integer, represent it as u64 if possible. Returns - /// None otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": 64, "b": -64, "c": 256.0 }); - /// - /// assert_eq!(v["a"].as_u64(), Some(64)); - /// assert_eq!(v["b"].as_u64(), None); - /// assert_eq!(v["c"].as_u64(), None); - /// ``` - pub fn as_u64(&self) -> Option { - match self { - Value::Number(n) => n.as_u64(), - _ => None, - } - } - - /// If the `Value` is a number, represent it as f64 if possible. Returns - /// None otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": 256.0, "b": 64, "c": -64 }); - /// - /// assert_eq!(v["a"].as_f64(), Some(256.0)); - /// assert_eq!(v["b"].as_f64(), Some(64.0)); - /// assert_eq!(v["c"].as_f64(), Some(-64.0)); - /// ``` - pub fn as_f64(&self) -> Option { - match self { - Value::Number(n) => n.as_f64(), - _ => None, - } - } - - /// Returns true if the `Value` is a Boolean. Returns false otherwise. - /// - /// For any Value on which `is_boolean` returns true, `as_bool` is - /// guaranteed to return the boolean value. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": false, "b": "false" }); - /// - /// assert!(v["a"].is_boolean()); - /// - /// // The string `"false"` is a string, not a boolean. - /// assert!(!v["b"].is_boolean()); - /// ``` - pub fn is_boolean(&self) -> bool { - self.as_bool().is_some() - } - - /// If the `Value` is a Boolean, returns the associated bool. Returns None - /// otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": false, "b": "false" }); - /// - /// assert_eq!(v["a"].as_bool(), Some(false)); - /// - /// // The string `"false"` is a string, not a boolean. - /// assert_eq!(v["b"].as_bool(), None); - /// ``` - pub fn as_bool(&self) -> Option { - match *self { - Value::Bool(b) => Some(b), - _ => None, - } - } - - /// Returns true if the `Value` is a Null. Returns false otherwise. - /// - /// For any Value on which `is_null` returns true, `as_null` is guaranteed - /// to return `Some(())`. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": null, "b": false }); - /// - /// assert!(v["a"].is_null()); - /// - /// // The boolean `false` is not null. - /// assert!(!v["b"].is_null()); - /// ``` - pub fn is_null(&self) -> bool { - self.as_null().is_some() - } - - /// If the `Value` is a Null, returns (). Returns None otherwise. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let v = json!({ "a": null, "b": false }); - /// - /// assert_eq!(v["a"].as_null(), Some(())); - /// - /// // The boolean `false` is not null. - /// assert_eq!(v["b"].as_null(), None); - /// ``` - pub fn as_null(&self) -> Option<()> { - match *self { - Value::Null => Some(()), - _ => None, - } - } - - /// Looks up a value by a JSON Pointer. - /// - /// JSON Pointer defines a string syntax for identifying a specific value - /// within a JavaScript Object Notation (JSON) document. - /// - /// A Pointer is a Unicode string with the reference tokens separated by `/`. - /// Inside tokens `/` is replaced by `~1` and `~` is replaced by `~0`. The - /// addressed value is returned and if there is no such value `None` is - /// returned. - /// - /// For more information read [RFC6901](https://tools.ietf.org/html/rfc6901). - /// - /// # Examples - /// - /// ``` - /// # use serde_json::json; - /// # - /// let data = json!({ - /// "x": { - /// "y": ["z", "zz"] - /// } - /// }); - /// - /// assert_eq!(data.pointer("/x/y/1").unwrap(), &json!("zz")); - /// assert_eq!(data.pointer("/a/b/c"), None); - /// ``` - pub fn pointer(&self, pointer: &str) -> Option<&Value> { - if pointer.is_empty() { - return Some(self); - } - if !pointer.starts_with('/') { - return None; - } - pointer - .split('/') - .skip(1) - .map(|x| x.replace("~1", "/").replace("~0", "~")) - .try_fold(self, |target, token| match target { - Value::Object(map) => map.get(&token), - Value::Array(list) => parse_index(&token).and_then(|x| list.get(x)), - _ => None, - }) - } - - /// Looks up a value by a JSON Pointer and returns a mutable reference to - /// that value. - /// - /// JSON Pointer defines a string syntax for identifying a specific value - /// within a JavaScript Object Notation (JSON) document. - /// - /// A Pointer is a Unicode string with the reference tokens separated by `/`. - /// Inside tokens `/` is replaced by `~1` and `~` is replaced by `~0`. The - /// addressed value is returned and if there is no such value `None` is - /// returned. - /// - /// For more information read [RFC6901](https://tools.ietf.org/html/rfc6901). - /// - /// # Example of Use - /// - /// ``` - /// use serde_json::Value; - /// - /// fn main() { - /// let s = r#"{"x": 1.0, "y": 2.0}"#; - /// let mut value: Value = serde_json::from_str(s).unwrap(); - /// - /// // Check value using read-only pointer - /// assert_eq!(value.pointer("/x"), Some(&1.0.into())); - /// // Change value with direct assignment - /// *value.pointer_mut("/x").unwrap() = 1.5.into(); - /// // Check that new value was written - /// assert_eq!(value.pointer("/x"), Some(&1.5.into())); - /// // Or change the value only if it exists - /// value.pointer_mut("/x").map(|v| *v = 1.5.into()); - /// - /// // "Steal" ownership of a value. Can replace with any valid Value. - /// let old_x = value.pointer_mut("/x").map(Value::take).unwrap(); - /// assert_eq!(old_x, 1.5); - /// assert_eq!(value.pointer("/x").unwrap(), &Value::Null); - /// } - /// ``` - pub fn pointer_mut(&mut self, pointer: &str) -> Option<&mut Value> { - if pointer.is_empty() { - return Some(self); - } - if !pointer.starts_with('/') { - return None; - } - pointer - .split('/') - .skip(1) - .map(|x| x.replace("~1", "/").replace("~0", "~")) - .try_fold(self, |target, token| match target { - Value::Object(map) => map.get_mut(&token), - Value::Array(list) => parse_index(&token).and_then(move |x| list.get_mut(x)), - _ => None, - }) - } - - /// Takes the value out of the `Value`, leaving a `Null` in its place. - /// - /// ``` - /// # use serde_json::json; - /// # - /// let mut v = json!({ "x": "y" }); - /// assert_eq!(v["x"].take(), json!("y")); - /// assert_eq!(v, json!({ "x": null })); - /// ``` - pub fn take(&mut self) -> Value { - mem::replace(self, Value::Null) - } -} - -/// The default value is `Value::Null`. -/// -/// This is useful for handling omitted `Value` fields when deserializing. -/// -/// # Examples -/// -/// ``` -/// # use serde::Deserialize; -/// use serde_json::Value; -/// -/// #[derive(Deserialize)] -/// struct Settings { -/// level: i32, -/// #[serde(default)] -/// extras: Value, -/// } -/// -/// # fn try_main() -> Result<(), serde_json::Error> { -/// let data = r#" { "level": 42 } "#; -/// let s: Settings = serde_json::from_str(data)?; -/// -/// assert_eq!(s.level, 42); -/// assert_eq!(s.extras, Value::Null); -/// # -/// # Ok(()) -/// # } -/// # -/// # try_main().unwrap() -/// ``` -impl Default for Value { - fn default() -> Value { - Value::Null - } -} - -mod de; -mod from; -mod index; -mod partial_eq; -mod ser; - -/// Convert a `T` into `serde_json::Value` which is an enum that can represent -/// any valid JSON data. -/// -/// # Example -/// -/// ``` -/// use serde::Serialize; -/// use serde_json::json; -/// -/// use std::error::Error; -/// -/// #[derive(Serialize)] -/// struct User { -/// fingerprint: String, -/// location: String, -/// } -/// -/// fn compare_json_values() -> Result<(), Box> { -/// let u = User { -/// fingerprint: "0xF9BA143B95FF6D82".to_owned(), -/// location: "Menlo Park, CA".to_owned(), -/// }; -/// -/// // The type of `expected` is `serde_json::Value` -/// let expected = json!({ -/// "fingerprint": "0xF9BA143B95FF6D82", -/// "location": "Menlo Park, CA", -/// }); -/// -/// let v = serde_json::to_value(u).unwrap(); -/// assert_eq!(v, expected); -/// -/// Ok(()) -/// } -/// # -/// # compare_json_values().unwrap(); -/// ``` -/// -/// # Errors -/// -/// This conversion can fail if `T`'s implementation of `Serialize` decides to -/// fail, or if `T` contains a map with non-string keys. -/// -/// ``` -/// use std::collections::BTreeMap; -/// -/// fn main() { -/// // The keys in this map are vectors, not strings. -/// let mut map = BTreeMap::new(); -/// map.insert(vec![32, 64], "x86"); -/// -/// println!("{}", serde_json::to_value(map).unwrap_err()); -/// } -/// ``` -// Taking by value is more friendly to iterator adapters, option and result -// consumers, etc. See https://github.com/serde-rs/json/pull/149. -pub fn to_value(value: T) -> Result -where - T: Serialize, -{ - value.serialize(Serializer) -} - -/// Interpret a `serde_json::Value` as an instance of type `T`. -/// -/// # Example -/// -/// ``` -/// use serde::Deserialize; -/// use serde_json::json; -/// -/// #[derive(Deserialize, Debug)] -/// struct User { -/// fingerprint: String, -/// location: String, -/// } -/// -/// fn main() { -/// // The type of `j` is `serde_json::Value` -/// let j = json!({ -/// "fingerprint": "0xF9BA143B95FF6D82", -/// "location": "Menlo Park, CA" -/// }); -/// -/// let u: User = serde_json::from_value(j).unwrap(); -/// println!("{:#?}", u); -/// } -/// ``` -/// -/// # Errors -/// -/// This conversion can fail if the structure of the Value does not match the -/// structure expected by `T`, for example if `T` is a struct type but the Value -/// contains something other than a JSON map. It can also fail if the structure -/// is correct but `T`'s implementation of `Deserialize` decides that something -/// is wrong with the data, for example required struct fields are missing from -/// the JSON map or some number is too big to fit in the expected primitive -/// type. -pub fn from_value(value: Value) -> Result -where - T: DeserializeOwned, -{ - T::deserialize(value) -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/value/partial_eq.rs s390-tools-2.33.1/rust-vendor/serde_json/src/value/partial_eq.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/value/partial_eq.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/value/partial_eq.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,103 +0,0 @@ -use super::Value; -use alloc::string::String; - -fn eq_i64(value: &Value, other: i64) -> bool { - value.as_i64().map_or(false, |i| i == other) -} - -fn eq_u64(value: &Value, other: u64) -> bool { - value.as_u64().map_or(false, |i| i == other) -} - -fn eq_f32(value: &Value, other: f32) -> bool { - match value { - Value::Number(n) => n.as_f32().map_or(false, |i| i == other), - _ => false, - } -} - -fn eq_f64(value: &Value, other: f64) -> bool { - value.as_f64().map_or(false, |i| i == other) -} - -fn eq_bool(value: &Value, other: bool) -> bool { - value.as_bool().map_or(false, |i| i == other) -} - -fn eq_str(value: &Value, other: &str) -> bool { - value.as_str().map_or(false, |i| i == other) -} - -impl PartialEq for Value { - fn eq(&self, other: &str) -> bool { - eq_str(self, other) - } -} - -impl<'a> PartialEq<&'a str> for Value { - fn eq(&self, other: &&str) -> bool { - eq_str(self, *other) - } -} - -impl PartialEq for str { - fn eq(&self, other: &Value) -> bool { - eq_str(other, self) - } -} - -impl<'a> PartialEq for &'a str { - fn eq(&self, other: &Value) -> bool { - eq_str(other, *self) - } -} - -impl PartialEq for Value { - fn eq(&self, other: &String) -> bool { - eq_str(self, other.as_str()) - } -} - -impl PartialEq for String { - fn eq(&self, other: &Value) -> bool { - eq_str(other, self.as_str()) - } -} - -macro_rules! partialeq_numeric { - ($($eq:ident [$($ty:ty)*])*) => { - $($( - impl PartialEq<$ty> for Value { - fn eq(&self, other: &$ty) -> bool { - $eq(self, *other as _) - } - } - - impl PartialEq for $ty { - fn eq(&self, other: &Value) -> bool { - $eq(other, *self as _) - } - } - - impl<'a> PartialEq<$ty> for &'a Value { - fn eq(&self, other: &$ty) -> bool { - $eq(*self, *other as _) - } - } - - impl<'a> PartialEq<$ty> for &'a mut Value { - fn eq(&self, other: &$ty) -> bool { - $eq(*self, *other as _) - } - } - )*)* - } -} - -partialeq_numeric! { - eq_i64[i8 i16 i32 i64 isize] - eq_u64[u8 u16 u32 u64 usize] - eq_f32[f32] - eq_f64[f64] - eq_bool[bool] -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_json/src/value/ser.rs s390-tools-2.33.1/rust-vendor/serde_json/src/value/ser.rs --- s390-tools-2.31.0/rust-vendor/serde_json/src/value/ser.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_json/src/value/ser.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1043 +0,0 @@ -use crate::error::{Error, ErrorCode, Result}; -use crate::map::Map; -use crate::value::{to_value, Value}; -use alloc::borrow::ToOwned; -use alloc::string::{String, ToString}; -use alloc::vec::Vec; -#[cfg(not(feature = "arbitrary_precision"))] -use core::convert::TryFrom; -use core::fmt::Display; -use core::result; -use serde::ser::{Impossible, Serialize}; - -impl Serialize for Value { - #[inline] - fn serialize(&self, serializer: S) -> result::Result - where - S: ::serde::Serializer, - { - match self { - Value::Null => serializer.serialize_unit(), - Value::Bool(b) => serializer.serialize_bool(*b), - Value::Number(n) => n.serialize(serializer), - Value::String(s) => serializer.serialize_str(s), - Value::Array(v) => v.serialize(serializer), - #[cfg(any(feature = "std", feature = "alloc"))] - Value::Object(m) => { - use serde::ser::SerializeMap; - let mut map = tri!(serializer.serialize_map(Some(m.len()))); - for (k, v) in m { - tri!(map.serialize_entry(k, v)); - } - map.end() - } - } - } -} - -/// Serializer whose output is a `Value`. -/// -/// This is the serializer that backs [`serde_json::to_value`][crate::to_value]. -/// Unlike the main serde_json serializer which goes from some serializable -/// value of type `T` to JSON text, this one goes from `T` to -/// `serde_json::Value`. -/// -/// The `to_value` function is implementable as: -/// -/// ``` -/// use serde::Serialize; -/// use serde_json::{Error, Value}; -/// -/// pub fn to_value(input: T) -> Result -/// where -/// T: Serialize, -/// { -/// input.serialize(serde_json::value::Serializer) -/// } -/// ``` -pub struct Serializer; - -impl serde::Serializer for Serializer { - type Ok = Value; - type Error = Error; - - type SerializeSeq = SerializeVec; - type SerializeTuple = SerializeVec; - type SerializeTupleStruct = SerializeVec; - type SerializeTupleVariant = SerializeTupleVariant; - type SerializeMap = SerializeMap; - type SerializeStruct = SerializeMap; - type SerializeStructVariant = SerializeStructVariant; - - #[inline] - fn serialize_bool(self, value: bool) -> Result { - Ok(Value::Bool(value)) - } - - #[inline] - fn serialize_i8(self, value: i8) -> Result { - self.serialize_i64(value as i64) - } - - #[inline] - fn serialize_i16(self, value: i16) -> Result { - self.serialize_i64(value as i64) - } - - #[inline] - fn serialize_i32(self, value: i32) -> Result { - self.serialize_i64(value as i64) - } - - fn serialize_i64(self, value: i64) -> Result { - Ok(Value::Number(value.into())) - } - - fn serialize_i128(self, value: i128) -> Result { - #[cfg(feature = "arbitrary_precision")] - { - Ok(Value::Number(value.into())) - } - - #[cfg(not(feature = "arbitrary_precision"))] - { - if let Ok(value) = u64::try_from(value) { - Ok(Value::Number(value.into())) - } else if let Ok(value) = i64::try_from(value) { - Ok(Value::Number(value.into())) - } else { - Err(Error::syntax(ErrorCode::NumberOutOfRange, 0, 0)) - } - } - } - - #[inline] - fn serialize_u8(self, value: u8) -> Result { - self.serialize_u64(value as u64) - } - - #[inline] - fn serialize_u16(self, value: u16) -> Result { - self.serialize_u64(value as u64) - } - - #[inline] - fn serialize_u32(self, value: u32) -> Result { - self.serialize_u64(value as u64) - } - - #[inline] - fn serialize_u64(self, value: u64) -> Result { - Ok(Value::Number(value.into())) - } - - fn serialize_u128(self, value: u128) -> Result { - #[cfg(feature = "arbitrary_precision")] - { - Ok(Value::Number(value.into())) - } - - #[cfg(not(feature = "arbitrary_precision"))] - { - if let Ok(value) = u64::try_from(value) { - Ok(Value::Number(value.into())) - } else { - Err(Error::syntax(ErrorCode::NumberOutOfRange, 0, 0)) - } - } - } - - #[inline] - fn serialize_f32(self, float: f32) -> Result { - Ok(Value::from(float)) - } - - #[inline] - fn serialize_f64(self, float: f64) -> Result { - Ok(Value::from(float)) - } - - #[inline] - fn serialize_char(self, value: char) -> Result { - let mut s = String::new(); - s.push(value); - Ok(Value::String(s)) - } - - #[inline] - fn serialize_str(self, value: &str) -> Result { - Ok(Value::String(value.to_owned())) - } - - fn serialize_bytes(self, value: &[u8]) -> Result { - let vec = value.iter().map(|&b| Value::Number(b.into())).collect(); - Ok(Value::Array(vec)) - } - - #[inline] - fn serialize_unit(self) -> Result { - Ok(Value::Null) - } - - #[inline] - fn serialize_unit_struct(self, _name: &'static str) -> Result { - self.serialize_unit() - } - - #[inline] - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - ) -> Result { - self.serialize_str(variant) - } - - #[inline] - fn serialize_newtype_struct(self, _name: &'static str, value: &T) -> Result - where - T: ?Sized + Serialize, - { - value.serialize(self) - } - - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - value: &T, - ) -> Result - where - T: ?Sized + Serialize, - { - let mut values = Map::new(); - values.insert(String::from(variant), tri!(to_value(value))); - Ok(Value::Object(values)) - } - - #[inline] - fn serialize_none(self) -> Result { - self.serialize_unit() - } - - #[inline] - fn serialize_some(self, value: &T) -> Result - where - T: ?Sized + Serialize, - { - value.serialize(self) - } - - fn serialize_seq(self, len: Option) -> Result { - Ok(SerializeVec { - vec: Vec::with_capacity(len.unwrap_or(0)), - }) - } - - fn serialize_tuple(self, len: usize) -> Result { - self.serialize_seq(Some(len)) - } - - fn serialize_tuple_struct( - self, - _name: &'static str, - len: usize, - ) -> Result { - self.serialize_seq(Some(len)) - } - - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - len: usize, - ) -> Result { - Ok(SerializeTupleVariant { - name: String::from(variant), - vec: Vec::with_capacity(len), - }) - } - - fn serialize_map(self, _len: Option) -> Result { - Ok(SerializeMap::Map { - map: Map::new(), - next_key: None, - }) - } - - fn serialize_struct(self, name: &'static str, len: usize) -> Result { - match name { - #[cfg(feature = "arbitrary_precision")] - crate::number::TOKEN => Ok(SerializeMap::Number { out_value: None }), - #[cfg(feature = "raw_value")] - crate::raw::TOKEN => Ok(SerializeMap::RawValue { out_value: None }), - _ => self.serialize_map(Some(len)), - } - } - - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - _len: usize, - ) -> Result { - Ok(SerializeStructVariant { - name: String::from(variant), - map: Map::new(), - }) - } - - fn collect_str(self, value: &T) -> Result - where - T: ?Sized + Display, - { - Ok(Value::String(value.to_string())) - } -} - -pub struct SerializeVec { - vec: Vec, -} - -pub struct SerializeTupleVariant { - name: String, - vec: Vec, -} - -pub enum SerializeMap { - Map { - map: Map, - next_key: Option, - }, - #[cfg(feature = "arbitrary_precision")] - Number { out_value: Option }, - #[cfg(feature = "raw_value")] - RawValue { out_value: Option }, -} - -pub struct SerializeStructVariant { - name: String, - map: Map, -} - -impl serde::ser::SerializeSeq for SerializeVec { - type Ok = Value; - type Error = Error; - - fn serialize_element(&mut self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - self.vec.push(tri!(to_value(value))); - Ok(()) - } - - fn end(self) -> Result { - Ok(Value::Array(self.vec)) - } -} - -impl serde::ser::SerializeTuple for SerializeVec { - type Ok = Value; - type Error = Error; - - fn serialize_element(&mut self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - serde::ser::SerializeSeq::serialize_element(self, value) - } - - fn end(self) -> Result { - serde::ser::SerializeSeq::end(self) - } -} - -impl serde::ser::SerializeTupleStruct for SerializeVec { - type Ok = Value; - type Error = Error; - - fn serialize_field(&mut self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - serde::ser::SerializeSeq::serialize_element(self, value) - } - - fn end(self) -> Result { - serde::ser::SerializeSeq::end(self) - } -} - -impl serde::ser::SerializeTupleVariant for SerializeTupleVariant { - type Ok = Value; - type Error = Error; - - fn serialize_field(&mut self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - self.vec.push(tri!(to_value(value))); - Ok(()) - } - - fn end(self) -> Result { - let mut object = Map::new(); - - object.insert(self.name, Value::Array(self.vec)); - - Ok(Value::Object(object)) - } -} - -impl serde::ser::SerializeMap for SerializeMap { - type Ok = Value; - type Error = Error; - - fn serialize_key(&mut self, key: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - match self { - SerializeMap::Map { next_key, .. } => { - *next_key = Some(tri!(key.serialize(MapKeySerializer))); - Ok(()) - } - #[cfg(feature = "arbitrary_precision")] - SerializeMap::Number { .. } => unreachable!(), - #[cfg(feature = "raw_value")] - SerializeMap::RawValue { .. } => unreachable!(), - } - } - - fn serialize_value(&mut self, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - match self { - SerializeMap::Map { map, next_key } => { - let key = next_key.take(); - // Panic because this indicates a bug in the program rather than an - // expected failure. - let key = key.expect("serialize_value called before serialize_key"); - map.insert(key, tri!(to_value(value))); - Ok(()) - } - #[cfg(feature = "arbitrary_precision")] - SerializeMap::Number { .. } => unreachable!(), - #[cfg(feature = "raw_value")] - SerializeMap::RawValue { .. } => unreachable!(), - } - } - - fn end(self) -> Result { - match self { - SerializeMap::Map { map, .. } => Ok(Value::Object(map)), - #[cfg(feature = "arbitrary_precision")] - SerializeMap::Number { .. } => unreachable!(), - #[cfg(feature = "raw_value")] - SerializeMap::RawValue { .. } => unreachable!(), - } - } -} - -struct MapKeySerializer; - -fn key_must_be_a_string() -> Error { - Error::syntax(ErrorCode::KeyMustBeAString, 0, 0) -} - -impl serde::Serializer for MapKeySerializer { - type Ok = String; - type Error = Error; - - type SerializeSeq = Impossible; - type SerializeTuple = Impossible; - type SerializeTupleStruct = Impossible; - type SerializeTupleVariant = Impossible; - type SerializeMap = Impossible; - type SerializeStruct = Impossible; - type SerializeStructVariant = Impossible; - - #[inline] - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - ) -> Result { - Ok(variant.to_owned()) - } - - #[inline] - fn serialize_newtype_struct(self, _name: &'static str, value: &T) -> Result - where - T: ?Sized + Serialize, - { - value.serialize(self) - } - - fn serialize_bool(self, _value: bool) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_i8(self, value: i8) -> Result { - Ok(value.to_string()) - } - - fn serialize_i16(self, value: i16) -> Result { - Ok(value.to_string()) - } - - fn serialize_i32(self, value: i32) -> Result { - Ok(value.to_string()) - } - - fn serialize_i64(self, value: i64) -> Result { - Ok(value.to_string()) - } - - fn serialize_u8(self, value: u8) -> Result { - Ok(value.to_string()) - } - - fn serialize_u16(self, value: u16) -> Result { - Ok(value.to_string()) - } - - fn serialize_u32(self, value: u32) -> Result { - Ok(value.to_string()) - } - - fn serialize_u64(self, value: u64) -> Result { - Ok(value.to_string()) - } - - fn serialize_f32(self, _value: f32) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_f64(self, _value: f64) -> Result { - Err(key_must_be_a_string()) - } - - #[inline] - fn serialize_char(self, value: char) -> Result { - Ok({ - let mut s = String::new(); - s.push(value); - s - }) - } - - #[inline] - fn serialize_str(self, value: &str) -> Result { - Ok(value.to_owned()) - } - - fn serialize_bytes(self, _value: &[u8]) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_unit(self) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_unit_struct(self, _name: &'static str) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _value: &T, - ) -> Result - where - T: ?Sized + Serialize, - { - Err(key_must_be_a_string()) - } - - fn serialize_none(self) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_some(self, _value: &T) -> Result - where - T: ?Sized + Serialize, - { - Err(key_must_be_a_string()) - } - - fn serialize_seq(self, _len: Option) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_tuple(self, _len: usize) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_map(self, _len: Option) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_struct(self, _name: &'static str, _len: usize) -> Result { - Err(key_must_be_a_string()) - } - - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(key_must_be_a_string()) - } - - fn collect_str(self, value: &T) -> Result - where - T: ?Sized + Display, - { - Ok(value.to_string()) - } -} - -impl serde::ser::SerializeStruct for SerializeMap { - type Ok = Value; - type Error = Error; - - fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - match self { - SerializeMap::Map { .. } => serde::ser::SerializeMap::serialize_entry(self, key, value), - #[cfg(feature = "arbitrary_precision")] - SerializeMap::Number { out_value } => { - if key == crate::number::TOKEN { - *out_value = Some(value.serialize(NumberValueEmitter)?); - Ok(()) - } else { - Err(invalid_number()) - } - } - #[cfg(feature = "raw_value")] - SerializeMap::RawValue { out_value } => { - if key == crate::raw::TOKEN { - *out_value = Some(value.serialize(RawValueEmitter)?); - Ok(()) - } else { - Err(invalid_raw_value()) - } - } - } - } - - fn end(self) -> Result { - match self { - SerializeMap::Map { .. } => serde::ser::SerializeMap::end(self), - #[cfg(feature = "arbitrary_precision")] - SerializeMap::Number { out_value, .. } => { - Ok(out_value.expect("number value was not emitted")) - } - #[cfg(feature = "raw_value")] - SerializeMap::RawValue { out_value, .. } => { - Ok(out_value.expect("raw value was not emitted")) - } - } - } -} - -impl serde::ser::SerializeStructVariant for SerializeStructVariant { - type Ok = Value; - type Error = Error; - - fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - self.map.insert(String::from(key), tri!(to_value(value))); - Ok(()) - } - - fn end(self) -> Result { - let mut object = Map::new(); - - object.insert(self.name, Value::Object(self.map)); - - Ok(Value::Object(object)) - } -} - -#[cfg(feature = "arbitrary_precision")] -struct NumberValueEmitter; - -#[cfg(feature = "arbitrary_precision")] -fn invalid_number() -> Error { - Error::syntax(ErrorCode::InvalidNumber, 0, 0) -} - -#[cfg(feature = "arbitrary_precision")] -impl serde::ser::Serializer for NumberValueEmitter { - type Ok = Value; - type Error = Error; - - type SerializeSeq = Impossible; - type SerializeTuple = Impossible; - type SerializeTupleStruct = Impossible; - type SerializeTupleVariant = Impossible; - type SerializeMap = Impossible; - type SerializeStruct = Impossible; - type SerializeStructVariant = Impossible; - - fn serialize_bool(self, _v: bool) -> Result { - Err(invalid_number()) - } - - fn serialize_i8(self, _v: i8) -> Result { - Err(invalid_number()) - } - - fn serialize_i16(self, _v: i16) -> Result { - Err(invalid_number()) - } - - fn serialize_i32(self, _v: i32) -> Result { - Err(invalid_number()) - } - - fn serialize_i64(self, _v: i64) -> Result { - Err(invalid_number()) - } - - fn serialize_u8(self, _v: u8) -> Result { - Err(invalid_number()) - } - - fn serialize_u16(self, _v: u16) -> Result { - Err(invalid_number()) - } - - fn serialize_u32(self, _v: u32) -> Result { - Err(invalid_number()) - } - - fn serialize_u64(self, _v: u64) -> Result { - Err(invalid_number()) - } - - fn serialize_f32(self, _v: f32) -> Result { - Err(invalid_number()) - } - - fn serialize_f64(self, _v: f64) -> Result { - Err(invalid_number()) - } - - fn serialize_char(self, _v: char) -> Result { - Err(invalid_number()) - } - - fn serialize_str(self, value: &str) -> Result { - let n = tri!(value.to_owned().parse()); - Ok(Value::Number(n)) - } - - fn serialize_bytes(self, _value: &[u8]) -> Result { - Err(invalid_number()) - } - - fn serialize_none(self) -> Result { - Err(invalid_number()) - } - - fn serialize_some(self, _value: &T) -> Result - where - T: ?Sized + Serialize, - { - Err(invalid_number()) - } - - fn serialize_unit(self) -> Result { - Err(invalid_number()) - } - - fn serialize_unit_struct(self, _name: &'static str) -> Result { - Err(invalid_number()) - } - - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - ) -> Result { - Err(invalid_number()) - } - - fn serialize_newtype_struct(self, _name: &'static str, _value: &T) -> Result - where - T: ?Sized + Serialize, - { - Err(invalid_number()) - } - - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _value: &T, - ) -> Result - where - T: ?Sized + Serialize, - { - Err(invalid_number()) - } - - fn serialize_seq(self, _len: Option) -> Result { - Err(invalid_number()) - } - - fn serialize_tuple(self, _len: usize) -> Result { - Err(invalid_number()) - } - - fn serialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Err(invalid_number()) - } - - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(invalid_number()) - } - - fn serialize_map(self, _len: Option) -> Result { - Err(invalid_number()) - } - - fn serialize_struct(self, _name: &'static str, _len: usize) -> Result { - Err(invalid_number()) - } - - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(invalid_number()) - } -} - -#[cfg(feature = "raw_value")] -struct RawValueEmitter; - -#[cfg(feature = "raw_value")] -fn invalid_raw_value() -> Error { - Error::syntax(ErrorCode::ExpectedSomeValue, 0, 0) -} - -#[cfg(feature = "raw_value")] -impl serde::ser::Serializer for RawValueEmitter { - type Ok = Value; - type Error = Error; - - type SerializeSeq = Impossible; - type SerializeTuple = Impossible; - type SerializeTupleStruct = Impossible; - type SerializeTupleVariant = Impossible; - type SerializeMap = Impossible; - type SerializeStruct = Impossible; - type SerializeStructVariant = Impossible; - - fn serialize_bool(self, _v: bool) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_i8(self, _v: i8) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_i16(self, _v: i16) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_i32(self, _v: i32) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_i64(self, _v: i64) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_u8(self, _v: u8) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_u16(self, _v: u16) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_u32(self, _v: u32) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_u64(self, _v: u64) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_f32(self, _v: f32) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_f64(self, _v: f64) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_char(self, _v: char) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_str(self, value: &str) -> Result { - crate::from_str(value) - } - - fn serialize_bytes(self, _value: &[u8]) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_none(self) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_some(self, _value: &T) -> Result - where - T: ?Sized + Serialize, - { - Err(invalid_raw_value()) - } - - fn serialize_unit(self) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_unit_struct(self, _name: &'static str) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - ) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_newtype_struct(self, _name: &'static str, _value: &T) -> Result - where - T: ?Sized + Serialize, - { - Err(invalid_raw_value()) - } - - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _value: &T, - ) -> Result - where - T: ?Sized + Serialize, - { - Err(invalid_raw_value()) - } - - fn serialize_seq(self, _len: Option) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_tuple(self, _len: usize) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_map(self, _len: Option) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_struct(self, _name: &'static str, _len: usize) -> Result { - Err(invalid_raw_value()) - } - - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(invalid_raw_value()) - } - - fn collect_str(self, value: &T) -> Result - where - T: ?Sized + Display, - { - self.serialize_str(&value.to_string()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/serde_urlencoded/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/Cargo.toml s390-tools-2.33.1/rust-vendor/serde_urlencoded/Cargo.toml --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "serde_urlencoded" -version = "0.7.1" -authors = ["Anthony Ramine "] -exclude = ["/.travis.yml", "/bors.toml"] -description = "`x-www-form-urlencoded` meets Serde" -documentation = "https://docs.rs/serde_urlencoded/0.7.1/serde_urlencoded/" -keywords = ["serde", "serialization", "urlencoded"] -categories = ["encoding", "web-programming"] -license = "MIT/Apache-2.0" -repository = "https://github.com/nox/serde_urlencoded" - -[lib] -test = false -[dependencies.form_urlencoded] -version = "1" - -[dependencies.itoa] -version = "1" - -[dependencies.ryu] -version = "1" - -[dependencies.serde] -version = "1.0.69" -[dev-dependencies.serde_derive] -version = "1" -[badges.travis-ci] -repository = "nox/serde_urlencoded" diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/serde_urlencoded/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/LICENSE-APACHE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/LICENSE-MIT s390-tools-2.33.1/rust-vendor/serde_urlencoded/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/LICENSE-MIT 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2016 Anthony Ramine - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/README.md s390-tools-2.33.1/rust-vendor/serde_urlencoded/README.md --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,53 +0,0 @@ -`x-www-form-urlencoded` meets Serde -=================================== - -This crate is a Rust library for serialising to and deserialising from -the [`application/x-www-form-urlencoded`][urlencoded] format. It is built -upon [Serde], a high performance generic serialization framework and [rust-url], -a URL parser for Rust. - -[rust-url]: https://github.com/servo/rust-url -[Serde]: https://github.com/serde-rs/serde -[urlencoded]: https://url.spec.whatwg.org/#application/x-www-form-urlencoded - -Installation -============ - -This crate works with Cargo and can be found on -[crates.io] with a `Cargo.toml` like: - -```toml -[dependencies] -serde_urlencoded = "0.7" -``` - -The documentation is available on [docs.rs]. - -[crates.io]: https://crates.io/crates/serde_urlencoded -[docs.rs]: https://docs.rs/serde_urlencoded/0.7.1/serde_urlencoded/ - -## Getting help - -Serde developers live in the #serde channel on -[`irc.mozilla.org`](https://wiki.mozilla.org/IRC) and most rust-url developers -live in the #servo one. The #rust channel is also a good resource with generally -faster response time but less specific knowledge about Serde, rust-url or this -crate. If IRC is not your thing, we are happy to respond to [GitHub -issues](https://github.com/nox/serde_urlencoded/issues/new) as well. - -## License - -serde_urlencoded is licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - http://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in serde_urlencoded by you, as defined in the Apache-2.0 license, -shall be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/rustfmt.toml s390-tools-2.33.1/rust-vendor/serde_urlencoded/rustfmt.toml --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/rustfmt.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/rustfmt.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,5 +0,0 @@ -match_block_trailing_comma = false -max_width = 80 -newline_style = "Unix" -reorder_imports = true -use_try_shorthand = true diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/de.rs s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/de.rs --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/de.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/de.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,321 +0,0 @@ -//! Deserialization support for the `application/x-www-form-urlencoded` format. - -use form_urlencoded::parse; -use form_urlencoded::Parse as UrlEncodedParse; -use serde::de::value::MapDeserializer; -use serde::de::Error as de_Error; -use serde::de::{self, IntoDeserializer}; -use serde::forward_to_deserialize_any; -use std::borrow::Cow; -use std::io::Read; - -#[doc(inline)] -pub use serde::de::value::Error; - -/// Deserializes a `application/x-www-form-urlencoded` value from a `&[u8]`. -/// -/// ``` -/// let meal = vec![ -/// ("bread".to_owned(), "baguette".to_owned()), -/// ("cheese".to_owned(), "comté".to_owned()), -/// ("meat".to_owned(), "ham".to_owned()), -/// ("fat".to_owned(), "butter".to_owned()), -/// ]; -/// -/// assert_eq!( -/// serde_urlencoded::from_bytes::>( -/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter"), -/// Ok(meal)); -/// ``` -pub fn from_bytes<'de, T>(input: &'de [u8]) -> Result -where - T: de::Deserialize<'de>, -{ - T::deserialize(Deserializer::new(parse(input))) -} - -/// Deserializes a `application/x-www-form-urlencoded` value from a `&str`. -/// -/// ``` -/// let meal = vec![ -/// ("bread".to_owned(), "baguette".to_owned()), -/// ("cheese".to_owned(), "comté".to_owned()), -/// ("meat".to_owned(), "ham".to_owned()), -/// ("fat".to_owned(), "butter".to_owned()), -/// ]; -/// -/// assert_eq!( -/// serde_urlencoded::from_str::>( -/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter"), -/// Ok(meal)); -/// ``` -pub fn from_str<'de, T>(input: &'de str) -> Result -where - T: de::Deserialize<'de>, -{ - from_bytes(input.as_bytes()) -} - -/// Convenience function that reads all bytes from `reader` and deserializes -/// them with `from_bytes`. -pub fn from_reader(mut reader: R) -> Result -where - T: de::DeserializeOwned, - R: Read, -{ - let mut buf = vec![]; - reader.read_to_end(&mut buf).map_err(|e| { - de::Error::custom(format_args!("could not read input: {}", e)) - })?; - from_bytes(&buf) -} - -/// A deserializer for the `application/x-www-form-urlencoded` format. -/// -/// * Supported top-level outputs are structs, maps and sequences of pairs, -/// with or without a given length. -/// -/// * Main `deserialize` methods defers to `deserialize_map`. -/// -/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size` -/// defers to `deserialize`. -pub struct Deserializer<'de> { - inner: MapDeserializer<'de, PartIterator<'de>, Error>, -} - -impl<'de> Deserializer<'de> { - /// Returns a new `Deserializer`. - pub fn new(parser: UrlEncodedParse<'de>) -> Self { - Deserializer { - inner: MapDeserializer::new(PartIterator(parser)), - } - } -} - -impl<'de> de::Deserializer<'de> for Deserializer<'de> { - type Error = Error; - - fn deserialize_any(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_map(visitor) - } - - fn deserialize_map(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_map(self.inner) - } - - fn deserialize_seq(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_seq(self.inner) - } - - fn deserialize_unit(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.inner.end()?; - visitor.visit_unit() - } - - forward_to_deserialize_any! { - bool - u8 - u16 - u32 - u64 - i8 - i16 - i32 - i64 - f32 - f64 - char - str - string - option - bytes - byte_buf - unit_struct - newtype_struct - tuple_struct - struct - identifier - tuple - enum - ignored_any - } -} - -struct PartIterator<'de>(UrlEncodedParse<'de>); - -impl<'de> Iterator for PartIterator<'de> { - type Item = (Part<'de>, Part<'de>); - - fn next(&mut self) -> Option { - self.0.next().map(|(k, v)| (Part(k), Part(v))) - } -} - -struct Part<'de>(Cow<'de, str>); - -impl<'de> IntoDeserializer<'de> for Part<'de> { - type Deserializer = Self; - - fn into_deserializer(self) -> Self::Deserializer { - self - } -} - -macro_rules! forward_parsed_value { - ($($ty:ident => $method:ident,)*) => { - $( - fn $method(self, visitor: V) -> Result - where V: de::Visitor<'de> - { - match self.0.parse::<$ty>() { - Ok(val) => val.into_deserializer().$method(visitor), - Err(e) => Err(de::Error::custom(e)) - } - } - )* - } -} - -impl<'de> de::Deserializer<'de> for Part<'de> { - type Error = Error; - - fn deserialize_any(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - match self.0 { - Cow::Borrowed(value) => visitor.visit_borrowed_str(value), - Cow::Owned(value) => visitor.visit_string(value), - } - } - - fn deserialize_option(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_some(self) - } - - fn deserialize_enum( - self, - _name: &'static str, - _variants: &'static [&'static str], - visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_enum(ValueEnumAccess(self.0)) - } - - fn deserialize_newtype_struct( - self, - _name: &'static str, - visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_newtype_struct(self) - } - - forward_to_deserialize_any! { - char - str - string - unit - bytes - byte_buf - unit_struct - tuple_struct - struct - identifier - tuple - ignored_any - seq - map - } - - forward_parsed_value! { - bool => deserialize_bool, - u8 => deserialize_u8, - u16 => deserialize_u16, - u32 => deserialize_u32, - u64 => deserialize_u64, - i8 => deserialize_i8, - i16 => deserialize_i16, - i32 => deserialize_i32, - i64 => deserialize_i64, - f32 => deserialize_f32, - f64 => deserialize_f64, - } -} - -struct ValueEnumAccess<'de>(Cow<'de, str>); - -impl<'de> de::EnumAccess<'de> for ValueEnumAccess<'de> { - type Error = Error; - type Variant = UnitOnlyVariantAccess; - - fn variant_seed( - self, - seed: V, - ) -> Result<(V::Value, Self::Variant), Self::Error> - where - V: de::DeserializeSeed<'de>, - { - let variant = seed.deserialize(self.0.into_deserializer())?; - Ok((variant, UnitOnlyVariantAccess)) - } -} - -struct UnitOnlyVariantAccess; - -impl<'de> de::VariantAccess<'de> for UnitOnlyVariantAccess { - type Error = Error; - - fn unit_variant(self) -> Result<(), Self::Error> { - Ok(()) - } - - fn newtype_variant_seed(self, _seed: T) -> Result - where - T: de::DeserializeSeed<'de>, - { - Err(Error::custom("expected unit variant")) - } - - fn tuple_variant( - self, - _len: usize, - _visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - Err(Error::custom("expected unit variant")) - } - - fn struct_variant( - self, - _fields: &'static [&'static str], - _visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - Err(Error::custom("expected unit variant")) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/lib.rs s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/lib.rs --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -//! `x-www-form-urlencoded` meets Serde - -#![warn(unused_extern_crates)] -#![forbid(unsafe_code)] - -pub mod de; -pub mod ser; - -#[doc(inline)] -pub use crate::de::{from_bytes, from_reader, from_str, Deserializer}; -#[doc(inline)] -pub use crate::ser::{to_string, Serializer}; diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/ser/key.rs s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/ser/key.rs --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/ser/key.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/ser/key.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,77 +0,0 @@ -use crate::ser::part::Sink; -use crate::ser::Error; -use serde::Serialize; -use std::borrow::Cow; -use std::ops::Deref; - -pub enum Key<'key> { - Static(&'static str), - Dynamic(Cow<'key, str>), -} - -impl<'key> Deref for Key<'key> { - type Target = str; - - fn deref(&self) -> &str { - match *self { - Key::Static(key) => key, - Key::Dynamic(ref key) => key, - } - } -} - -impl<'key> From> for Cow<'static, str> { - fn from(key: Key<'key>) -> Self { - match key { - Key::Static(key) => key.into(), - Key::Dynamic(key) => key.into_owned().into(), - } - } -} - -pub struct KeySink { - end: End, -} - -impl KeySink -where - End: for<'key> FnOnce(Key<'key>) -> Result, -{ - pub fn new(end: End) -> Self { - KeySink { end } - } -} - -impl Sink for KeySink -where - End: for<'key> FnOnce(Key<'key>) -> Result, -{ - type Ok = Ok; - - fn serialize_static_str(self, value: &'static str) -> Result { - (self.end)(Key::Static(value)) - } - - fn serialize_str(self, value: &str) -> Result { - (self.end)(Key::Dynamic(value.into())) - } - - fn serialize_string(self, value: String) -> Result { - (self.end)(Key::Dynamic(value.into())) - } - - fn serialize_none(self) -> Result { - Err(self.unsupported()) - } - - fn serialize_some( - self, - _value: &T, - ) -> Result { - Err(self.unsupported()) - } - - fn unsupported(self) -> Error { - Error::Custom("unsupported key".into()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/ser/mod.rs s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/ser/mod.rs --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/ser/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/ser/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,555 +0,0 @@ -//! Serialization support for the `application/x-www-form-urlencoded` format. - -mod key; -mod pair; -mod part; -mod value; - -use form_urlencoded::Serializer as UrlEncodedSerializer; -use form_urlencoded::Target as UrlEncodedTarget; -use serde::ser; -use std::borrow::Cow; -use std::error; -use std::fmt; -use std::str; - -/// Serializes a value into a `application/x-www-form-urlencoded` `String` buffer. -/// -/// ``` -/// let meal = &[ -/// ("bread", "baguette"), -/// ("cheese", "comté"), -/// ("meat", "ham"), -/// ("fat", "butter"), -/// ]; -/// -/// assert_eq!( -/// serde_urlencoded::to_string(meal), -/// Ok("bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter".to_owned())); -/// ``` -pub fn to_string(input: T) -> Result { - let mut urlencoder = UrlEncodedSerializer::new("".to_owned()); - input.serialize(Serializer::new(&mut urlencoder))?; - Ok(urlencoder.finish()) -} - -/// A serializer for the `application/x-www-form-urlencoded` format. -/// -/// * Supported top-level inputs are structs, maps and sequences of pairs, -/// with or without a given length. -/// -/// * Supported keys and values are integers, bytes (if convertible to strings), -/// unit structs and unit variants. -/// -/// * Newtype structs defer to their inner values. -pub struct Serializer<'input, 'output, Target: UrlEncodedTarget> { - urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, -} - -impl<'input, 'output, Target: 'output + UrlEncodedTarget> - Serializer<'input, 'output, Target> -{ - /// Returns a new `Serializer`. - pub fn new( - urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, - ) -> Self { - Serializer { urlencoder } - } -} - -/// Errors returned during serializing to `application/x-www-form-urlencoded`. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum Error { - Custom(Cow<'static, str>), - Utf8(str::Utf8Error), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Error::Custom(ref msg) => msg.fmt(f), - Error::Utf8(ref err) => write!(f, "invalid UTF-8: {}", err), - } - } -} - -impl error::Error for Error { - fn description(&self) -> &str { - match *self { - Error::Custom(ref msg) => msg, - Error::Utf8(ref err) => error::Error::description(err), - } - } - - /// The lower-level cause of this error, in the case of a `Utf8` error. - fn cause(&self) -> Option<&dyn error::Error> { - match *self { - Error::Custom(_) => None, - Error::Utf8(ref err) => Some(err), - } - } - - /// The lower-level source of this error, in the case of a `Utf8` error. - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match *self { - Error::Custom(_) => None, - Error::Utf8(ref err) => Some(err), - } - } -} - -impl ser::Error for Error { - fn custom(msg: T) -> Self { - Error::Custom(format!("{}", msg).into()) - } -} - -/// Sequence serializer. -pub struct SeqSerializer<'input, 'output, Target: UrlEncodedTarget> { - urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, -} - -/// Tuple serializer. -/// -/// Mostly used for arrays. -pub struct TupleSerializer<'input, 'output, Target: UrlEncodedTarget> { - urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, -} - -/// Tuple struct serializer. -/// -/// Never instantiated, tuple structs are not supported. -pub struct TupleStructSerializer<'input, 'output, T: UrlEncodedTarget> { - inner: ser::Impossible<&'output mut UrlEncodedSerializer<'input, T>, Error>, -} - -/// Tuple variant serializer. -/// -/// Never instantiated, tuple variants are not supported. -pub struct TupleVariantSerializer<'input, 'output, T: UrlEncodedTarget> { - inner: ser::Impossible<&'output mut UrlEncodedSerializer<'input, T>, Error>, -} - -/// Map serializer. -pub struct MapSerializer<'input, 'output, Target: UrlEncodedTarget> { - urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, - key: Option>, -} - -/// Struct serializer. -pub struct StructSerializer<'input, 'output, Target: UrlEncodedTarget> { - urlencoder: &'output mut UrlEncodedSerializer<'input, Target>, -} - -/// Struct variant serializer. -/// -/// Never instantiated, struct variants are not supported. -pub struct StructVariantSerializer<'input, 'output, T: UrlEncodedTarget> { - inner: ser::Impossible<&'output mut UrlEncodedSerializer<'input, T>, Error>, -} - -impl<'input, 'output, Target> ser::Serializer - for Serializer<'input, 'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer<'input, Target>; - type Error = Error; - type SerializeSeq = SeqSerializer<'input, 'output, Target>; - type SerializeTuple = TupleSerializer<'input, 'output, Target>; - type SerializeTupleStruct = TupleStructSerializer<'input, 'output, Target>; - type SerializeTupleVariant = - TupleVariantSerializer<'input, 'output, Target>; - type SerializeMap = MapSerializer<'input, 'output, Target>; - type SerializeStruct = StructSerializer<'input, 'output, Target>; - type SerializeStructVariant = - StructVariantSerializer<'input, 'output, Target>; - - /// Returns an error. - fn serialize_bool(self, _v: bool) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_i8(self, _v: i8) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_i16(self, _v: i16) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_i32(self, _v: i32) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_i64(self, _v: i64) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_u8(self, _v: u8) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_u16(self, _v: u16) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_u32(self, _v: u32) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_u64(self, _v: u64) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_f32(self, _v: f32) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_f64(self, _v: f64) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_char(self, _v: char) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_str(self, _value: &str) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_bytes(self, _value: &[u8]) -> Result { - Err(Error::top_level()) - } - - /// Returns `Ok`. - fn serialize_unit(self) -> Result { - Ok(self.urlencoder) - } - - /// Returns `Ok`. - fn serialize_unit_struct( - self, - _name: &'static str, - ) -> Result { - Ok(self.urlencoder) - } - - /// Returns an error. - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - ) -> Result { - Err(Error::top_level()) - } - - /// Serializes the inner value, ignoring the newtype name. - fn serialize_newtype_struct( - self, - _name: &'static str, - value: &T, - ) -> Result { - value.serialize(self) - } - - /// Returns an error. - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _value: &T, - ) -> Result { - Err(Error::top_level()) - } - - /// Returns `Ok`. - fn serialize_none(self) -> Result { - Ok(self.urlencoder) - } - - /// Serializes the given value. - fn serialize_some( - self, - value: &T, - ) -> Result { - value.serialize(self) - } - - /// Serialize a sequence, given length (if any) is ignored. - fn serialize_seq( - self, - _len: Option, - ) -> Result { - Ok(SeqSerializer { - urlencoder: self.urlencoder, - }) - } - - /// Returns an error. - fn serialize_tuple( - self, - _len: usize, - ) -> Result { - Ok(TupleSerializer { - urlencoder: self.urlencoder, - }) - } - - /// Returns an error. - fn serialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(Error::top_level()) - } - - /// Serializes a map, given length is ignored. - fn serialize_map( - self, - _len: Option, - ) -> Result { - Ok(MapSerializer { - urlencoder: self.urlencoder, - key: None, - }) - } - - /// Serializes a struct, given length is ignored. - fn serialize_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Ok(StructSerializer { - urlencoder: self.urlencoder, - }) - } - - /// Returns an error. - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(Error::top_level()) - } -} - -impl<'input, 'output, Target> ser::SerializeSeq - for SeqSerializer<'input, 'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer<'input, Target>; - type Error = Error; - - fn serialize_element( - &mut self, - value: &T, - ) -> Result<(), Error> { - value.serialize(pair::PairSerializer::new(self.urlencoder)) - } - - fn end(self) -> Result { - Ok(self.urlencoder) - } -} - -impl<'input, 'output, Target> ser::SerializeTuple - for TupleSerializer<'input, 'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer<'input, Target>; - type Error = Error; - - fn serialize_element( - &mut self, - value: &T, - ) -> Result<(), Error> { - value.serialize(pair::PairSerializer::new(self.urlencoder)) - } - - fn end(self) -> Result { - Ok(self.urlencoder) - } -} - -impl<'input, 'output, Target> ser::SerializeTupleStruct - for TupleStructSerializer<'input, 'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer<'input, Target>; - type Error = Error; - - fn serialize_field( - &mut self, - value: &T, - ) -> Result<(), Error> { - self.inner.serialize_field(value) - } - - fn end(self) -> Result { - self.inner.end() - } -} - -impl<'input, 'output, Target> ser::SerializeTupleVariant - for TupleVariantSerializer<'input, 'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer<'input, Target>; - type Error = Error; - - fn serialize_field( - &mut self, - value: &T, - ) -> Result<(), Error> { - self.inner.serialize_field(value) - } - - fn end(self) -> Result { - self.inner.end() - } -} - -impl<'input, 'output, Target> ser::SerializeMap - for MapSerializer<'input, 'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer<'input, Target>; - type Error = Error; - - fn serialize_entry< - K: ?Sized + ser::Serialize, - V: ?Sized + ser::Serialize, - >( - &mut self, - key: &K, - value: &V, - ) -> Result<(), Error> { - let key_sink = key::KeySink::new(|key| { - let value_sink = value::ValueSink::new(self.urlencoder, &key); - value.serialize(part::PartSerializer::new(value_sink))?; - self.key = None; - Ok(()) - }); - let entry_serializer = part::PartSerializer::new(key_sink); - key.serialize(entry_serializer) - } - - fn serialize_key( - &mut self, - key: &T, - ) -> Result<(), Error> { - let key_sink = key::KeySink::new(|key| Ok(key.into())); - let key_serializer = part::PartSerializer::new(key_sink); - self.key = Some(key.serialize(key_serializer)?); - Ok(()) - } - - fn serialize_value( - &mut self, - value: &T, - ) -> Result<(), Error> { - { - let key = self.key.as_ref().ok_or_else(Error::no_key)?; - let value_sink = value::ValueSink::new(self.urlencoder, &key); - value.serialize(part::PartSerializer::new(value_sink))?; - } - self.key = None; - Ok(()) - } - - fn end(self) -> Result { - Ok(self.urlencoder) - } -} - -impl<'input, 'output, Target> ser::SerializeStruct - for StructSerializer<'input, 'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer<'input, Target>; - type Error = Error; - - fn serialize_field( - &mut self, - key: &'static str, - value: &T, - ) -> Result<(), Error> { - let value_sink = value::ValueSink::new(self.urlencoder, key); - value.serialize(part::PartSerializer::new(value_sink)) - } - - fn end(self) -> Result { - Ok(self.urlencoder) - } -} - -impl<'input, 'output, Target> ser::SerializeStructVariant - for StructVariantSerializer<'input, 'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer<'input, Target>; - type Error = Error; - - fn serialize_field( - &mut self, - key: &'static str, - value: &T, - ) -> Result<(), Error> { - self.inner.serialize_field(key, value) - } - - fn end(self) -> Result { - self.inner.end() - } -} - -impl Error { - fn top_level() -> Self { - let msg = "top-level serializer supports only maps and structs"; - Error::Custom(msg.into()) - } - - fn no_key() -> Self { - let msg = "tried to serialize a value before serializing key"; - Error::Custom(msg.into()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/ser/pair.rs s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/ser/pair.rs --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/ser/pair.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/ser/pair.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,271 +0,0 @@ -use crate::ser::key::KeySink; -use crate::ser::part::PartSerializer; -use crate::ser::value::ValueSink; -use crate::ser::Error; -use form_urlencoded::Serializer as UrlEncodedSerializer; -use form_urlencoded::Target as UrlEncodedTarget; -use serde::ser; -use std::borrow::Cow; -use std::mem; - -pub struct PairSerializer<'input, 'target, Target: UrlEncodedTarget> { - urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, - state: PairState, -} - -impl<'input, 'target, Target> PairSerializer<'input, 'target, Target> -where - Target: 'target + UrlEncodedTarget, -{ - pub fn new( - urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, - ) -> Self { - PairSerializer { - urlencoder, - state: PairState::WaitingForKey, - } - } -} - -impl<'input, 'target, Target> ser::Serializer - for PairSerializer<'input, 'target, Target> -where - Target: 'target + UrlEncodedTarget, -{ - type Ok = (); - type Error = Error; - type SerializeSeq = ser::Impossible<(), Error>; - type SerializeTuple = Self; - type SerializeTupleStruct = ser::Impossible<(), Error>; - type SerializeTupleVariant = ser::Impossible<(), Error>; - type SerializeMap = ser::Impossible<(), Error>; - type SerializeStruct = ser::Impossible<(), Error>; - type SerializeStructVariant = ser::Impossible<(), Error>; - - fn serialize_bool(self, _v: bool) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_i8(self, _v: i8) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_i16(self, _v: i16) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_i32(self, _v: i32) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_i64(self, _v: i64) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_u8(self, _v: u8) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_u16(self, _v: u16) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_u32(self, _v: u32) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_u64(self, _v: u64) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_f32(self, _v: f32) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_f64(self, _v: f64) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_char(self, _v: char) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_str(self, _value: &str) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_bytes(self, _value: &[u8]) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_unit(self) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_unit_struct(self, _name: &'static str) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - ) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_newtype_struct( - self, - _name: &'static str, - value: &T, - ) -> Result<(), Error> { - value.serialize(self) - } - - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _value: &T, - ) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_none(self) -> Result<(), Error> { - Ok(()) - } - - fn serialize_some( - self, - value: &T, - ) -> Result<(), Error> { - value.serialize(self) - } - - fn serialize_seq( - self, - _len: Option, - ) -> Result { - Err(Error::unsupported_pair()) - } - - fn serialize_tuple(self, len: usize) -> Result { - if len == 2 { - Ok(self) - } else { - Err(Error::unsupported_pair()) - } - } - - fn serialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Err(Error::unsupported_pair()) - } - - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(Error::unsupported_pair()) - } - - fn serialize_map( - self, - _len: Option, - ) -> Result { - Err(Error::unsupported_pair()) - } - - fn serialize_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Err(Error::unsupported_pair()) - } - - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(Error::unsupported_pair()) - } -} - -impl<'input, 'target, Target> ser::SerializeTuple - for PairSerializer<'input, 'target, Target> -where - Target: 'target + UrlEncodedTarget, -{ - type Ok = (); - type Error = Error; - - fn serialize_element( - &mut self, - value: &T, - ) -> Result<(), Error> { - match mem::replace(&mut self.state, PairState::Done) { - PairState::WaitingForKey => { - let key_sink = KeySink::new(|key| Ok(key.into())); - let key_serializer = PartSerializer::new(key_sink); - self.state = PairState::WaitingForValue { - key: value.serialize(key_serializer)?, - }; - Ok(()) - } - PairState::WaitingForValue { key } => { - let result = { - let value_sink = ValueSink::new(self.urlencoder, &key); - let value_serializer = PartSerializer::new(value_sink); - value.serialize(value_serializer) - }; - if result.is_ok() { - self.state = PairState::Done; - } else { - self.state = PairState::WaitingForValue { key }; - } - result - } - PairState::Done => Err(Error::done()), - } - } - - fn end(self) -> Result<(), Error> { - if let PairState::Done = self.state { - Ok(()) - } else { - Err(Error::not_done()) - } - } -} - -enum PairState { - WaitingForKey, - WaitingForValue { key: Cow<'static, str> }, - Done, -} - -impl Error { - fn done() -> Self { - Error::Custom("this pair has already been serialized".into()) - } - - fn not_done() -> Self { - Error::Custom("this pair has not yet been serialized".into()) - } - - fn unsupported_pair() -> Self { - Error::Custom("unsupported pair".into()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/ser/part.rs s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/ser/part.rs --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/ser/part.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/ser/part.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,236 +0,0 @@ -use crate::ser::Error; -use serde::ser; -use std::str; - -pub struct PartSerializer { - sink: S, -} - -impl PartSerializer { - pub fn new(sink: S) -> Self { - PartSerializer { sink } - } -} - -pub trait Sink: Sized { - type Ok; - - fn serialize_static_str( - self, - value: &'static str, - ) -> Result; - - fn serialize_str(self, value: &str) -> Result; - fn serialize_string(self, value: String) -> Result; - fn serialize_none(self) -> Result; - - fn serialize_some( - self, - value: &T, - ) -> Result; - - fn unsupported(self) -> Error; -} - -impl ser::Serializer for PartSerializer { - type Ok = S::Ok; - type Error = Error; - type SerializeSeq = ser::Impossible; - type SerializeTuple = ser::Impossible; - type SerializeTupleStruct = ser::Impossible; - type SerializeTupleVariant = ser::Impossible; - type SerializeMap = ser::Impossible; - type SerializeStruct = ser::Impossible; - type SerializeStructVariant = ser::Impossible; - - fn serialize_bool(self, v: bool) -> Result { - self.sink - .serialize_static_str(if v { "true" } else { "false" }) - } - - fn serialize_i8(self, v: i8) -> Result { - self.serialize_integer(v) - } - - fn serialize_i16(self, v: i16) -> Result { - self.serialize_integer(v) - } - - fn serialize_i32(self, v: i32) -> Result { - self.serialize_integer(v) - } - - fn serialize_i64(self, v: i64) -> Result { - self.serialize_integer(v) - } - - fn serialize_u8(self, v: u8) -> Result { - self.serialize_integer(v) - } - - fn serialize_u16(self, v: u16) -> Result { - self.serialize_integer(v) - } - - fn serialize_u32(self, v: u32) -> Result { - self.serialize_integer(v) - } - - fn serialize_u64(self, v: u64) -> Result { - self.serialize_integer(v) - } - - fn serialize_u128(self, v: u128) -> Result { - self.serialize_integer(v) - } - - fn serialize_i128(self, v: i128) -> Result { - self.serialize_integer(v) - } - - fn serialize_f32(self, v: f32) -> Result { - self.serialize_floating(v) - } - - fn serialize_f64(self, v: f64) -> Result { - self.serialize_floating(v) - } - - fn serialize_char(self, v: char) -> Result { - self.sink.serialize_string(v.to_string()) - } - - fn serialize_str(self, value: &str) -> Result { - self.sink.serialize_str(value) - } - - fn serialize_bytes(self, value: &[u8]) -> Result { - match str::from_utf8(value) { - Ok(value) => self.sink.serialize_str(value), - Err(err) => Err(Error::Utf8(err)), - } - } - - fn serialize_unit(self) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_unit_struct(self, name: &'static str) -> Result { - self.sink.serialize_static_str(name) - } - - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - ) -> Result { - self.sink.serialize_static_str(variant) - } - - fn serialize_newtype_struct( - self, - _name: &'static str, - value: &T, - ) -> Result { - value.serialize(self) - } - - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _value: &T, - ) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_none(self) -> Result { - self.sink.serialize_none() - } - - fn serialize_some( - self, - value: &T, - ) -> Result { - self.sink.serialize_some(value) - } - - fn serialize_seq( - self, - _len: Option, - ) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_tuple( - self, - _len: usize, - ) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_map( - self, - _len: Option, - ) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - _variant: &'static str, - _len: usize, - ) -> Result { - Err(self.sink.unsupported()) - } -} - -impl PartSerializer { - fn serialize_integer(self, value: I) -> Result - where - I: itoa::Integer, - { - let mut buf = itoa::Buffer::new(); - let part = buf.format(value); - ser::Serializer::serialize_str(self, part) - } - - fn serialize_floating(self, value: F) -> Result - where - F: ryu::Float, - { - let mut buf = ryu::Buffer::new(); - let part = buf.format(value); - ser::Serializer::serialize_str(self, part) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/ser/value.rs s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/ser/value.rs --- s390-tools-2.31.0/rust-vendor/serde_urlencoded/src/ser/value.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/serde_urlencoded/src/ser/value.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,62 +0,0 @@ -use crate::ser::part::{PartSerializer, Sink}; -use crate::ser::Error; -use form_urlencoded::Serializer as UrlEncodedSerializer; -use form_urlencoded::Target as UrlEncodedTarget; -use serde::ser::Serialize; -use std::str; - -pub struct ValueSink<'input, 'key, 'target, Target> -where - Target: UrlEncodedTarget, -{ - urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, - key: &'key str, -} - -impl<'input, 'key, 'target, Target> ValueSink<'input, 'key, 'target, Target> -where - Target: 'target + UrlEncodedTarget, -{ - pub fn new( - urlencoder: &'target mut UrlEncodedSerializer<'input, Target>, - key: &'key str, - ) -> Self { - ValueSink { urlencoder, key } - } -} - -impl<'input, 'key, 'target, Target> Sink - for ValueSink<'input, 'key, 'target, Target> -where - Target: 'target + UrlEncodedTarget, -{ - type Ok = (); - - fn serialize_str(self, value: &str) -> Result<(), Error> { - self.urlencoder.append_pair(self.key, value); - Ok(()) - } - - fn serialize_static_str(self, value: &'static str) -> Result<(), Error> { - self.serialize_str(value) - } - - fn serialize_string(self, value: String) -> Result<(), Error> { - self.serialize_str(&value) - } - - fn serialize_none(self) -> Result { - Ok(()) - } - - fn serialize_some( - self, - value: &T, - ) -> Result { - value.serialize(PartSerializer::new(self)) - } - - fn unsupported(self) -> Error { - Error::Custom("unsupported value".into()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/signal-hook-registry/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/signal-hook-registry/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/signal-hook-registry/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/signal-hook-registry/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/signal-hook-registry/Cargo.toml s390-tools-2.33.1/rust-vendor/signal-hook-registry/Cargo.toml --- s390-tools-2.31.0/rust-vendor/signal-hook-registry/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/signal-hook-registry/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,40 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -name = "signal-hook-registry" -version = "1.4.1" -authors = [ - "Michal 'vorner' Vaner ", - "Masaki Hara ", -] -description = "Backend crate for signal-hook" -documentation = "https://docs.rs/signal-hook-registry" -readme = "README.md" -keywords = [ - "signal", - "unix", - "daemon", -] -license = "Apache-2.0/MIT" -repository = "https://github.com/vorner/signal-hook" - -[dependencies.libc] -version = "~0.2" - -[dev-dependencies.signal-hook] -version = "~0.3" - -[badges.maintenance] -status = "actively-developed" - -[badges.travis-ci] -repository = "vorner/signal-hook" diff -Nru s390-tools-2.31.0/rust-vendor/signal-hook-registry/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/signal-hook-registry/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/signal-hook-registry/LICENSE-APACHE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/signal-hook-registry/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/signal-hook-registry/LICENSE-MIT s390-tools-2.33.1/rust-vendor/signal-hook-registry/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/signal-hook-registry/LICENSE-MIT 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/signal-hook-registry/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2017 tokio-jsonrpc developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/signal-hook-registry/README.md s390-tools-2.33.1/rust-vendor/signal-hook-registry/README.md --- s390-tools-2.31.0/rust-vendor/signal-hook-registry/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/signal-hook-registry/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ -# Signal-hook-registry - -[![Travis Build Status](https://api.travis-ci.org/vorner/signal-hook.svg?branch=master)](https://travis-ci.org/vorner/signal-hook) - -This is the backend crate for the -[signal-hook](https://crates.io/crates/signal-hook) crate. The general direct use of -this crate is discouraged. See the -[documentation](https://docs.rs/signal-hook-registry) for further details. - -## License - -Licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally -submitted for inclusion in the work by you, as defined in the Apache-2.0 -license, shall be dual licensed as above, without any additional terms -or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/signal-hook-registry/src/half_lock.rs s390-tools-2.33.1/rust-vendor/signal-hook-registry/src/half_lock.rs --- s390-tools-2.31.0/rust-vendor/signal-hook-registry/src/half_lock.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/signal-hook-registry/src/half_lock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,232 +0,0 @@ -//! The half-lock structure -//! -//! We need a way to protect the structure with configured hooks ‒ a signal may happen in arbitrary -//! thread and needs to read them while another thread might be manipulating the structure. -//! -//! Under ordinary circumstances we would be happy to just use `Mutex>`. However, -//! as we use it in the signal handler, we are severely limited in what we can or can't use. So we -//! choose to implement kind of spin-look thing with atomics. -//! -//! In the reader it is always simply locked and then unlocked, making sure it doesn't disappear -//! while in use. -//! -//! The writer has a separate mutex (that prevents other writers; this is used outside of the -//! signal handler), makes a copy of the data and swaps an atomic pointer to the data structure. -//! But it waits until everything is unlocked (no signal handler has the old data) for dropping the -//! old instance. There's a generation trick to make sure that new signal locks another instance. -//! -//! The downside is, this is an active spin lock at the writer end. However, we assume than: -//! -//! * Signals are one time setup before we actually have threads. We just need to make *sure* we -//! are safe even if this is not true. -//! * Signals are rare, happening at the same time as the write even rarer. -//! * Signals are short, as there is mostly nothing allowed inside them anyway. -//! * Our tool box is severely limited. -//! -//! Therefore this is hopefully reasonable trade-off. -//! -//! # Atomic orderings -//! -//! The whole code uses SeqCst conservatively. Atomics are not used because of performance here and -//! are the minor price around signals anyway. But the comments state which orderings should be -//! enough in practice in case someone wants to get inspired (but do make your own check through -//! them anyway). - -use std::isize; -use std::marker::PhantomData; -use std::ops::Deref; -use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; -use std::sync::{Mutex, MutexGuard, PoisonError}; -use std::thread; - -use libc; - -const YIELD_EVERY: usize = 16; -const MAX_GUARDS: usize = (isize::MAX) as usize; - -pub(crate) struct ReadGuard<'a, T: 'a> { - data: &'a T, - lock: &'a AtomicUsize, -} - -impl<'a, T> Deref for ReadGuard<'a, T> { - type Target = T; - fn deref(&self) -> &T { - self.data - } -} - -impl<'a, T> Drop for ReadGuard<'a, T> { - fn drop(&mut self) { - // We effectively unlock; Release would be enough. - self.lock.fetch_sub(1, Ordering::SeqCst); - } -} - -pub(crate) struct WriteGuard<'a, T: 'a> { - _guard: MutexGuard<'a, ()>, - lock: &'a HalfLock, - data: &'a T, -} - -impl<'a, T> WriteGuard<'a, T> { - pub(crate) fn store(&mut self, val: T) { - // Move to the heap and convert to raw pointer for AtomicPtr. - let new = Box::into_raw(Box::new(val)); - - self.data = unsafe { &*new }; - - // We can just put the new value in here safely, we worry only about dropping the old one. - // Release might (?) be enough, to "upload" the data. - let old = self.lock.data.swap(new, Ordering::SeqCst); - - // Now we make sure there's no reader having the old data. - self.lock.write_barrier(); - - drop(unsafe { Box::from_raw(old) }); - } -} - -impl<'a, T> Deref for WriteGuard<'a, T> { - type Target = T; - fn deref(&self) -> &T { - // Protected by that mutex - self.data - } -} - -pub(crate) struct HalfLock { - // We conceptually contain an instance of T - _t: PhantomData, - // The actual data as a pointer. - data: AtomicPtr, - // The generation of the data. Influences which slot of the lock counter we use. - generation: AtomicUsize, - // How many active locks are there? - lock: [AtomicUsize; 2], - // Mutex for the writers; only one writer. - write_mutex: Mutex<()>, -} - -impl HalfLock { - pub(crate) fn new(data: T) -> Self { - // Move to the heap so we can safely point there. Then convert to raw pointer as AtomicPtr - // operates on raw pointers. The AtomicPtr effectively acts like Box for us semantically. - let ptr = Box::into_raw(Box::new(data)); - Self { - _t: PhantomData, - data: AtomicPtr::new(ptr), - generation: AtomicUsize::new(0), - lock: [AtomicUsize::new(0), AtomicUsize::new(0)], - write_mutex: Mutex::new(()), - } - } - - pub(crate) fn read(&self) -> ReadGuard { - // Relaxed should be enough; we only pick one or the other slot and the writer observes - // that both were 0 at some time. So the actual value doesn't really matter for safety, - // only the changing improves the performance. - let gen = self.generation.load(Ordering::SeqCst); - let lock = &self.lock[gen % 2]; - // Effectively locking something, acquire should be enough. - let guard_cnt = lock.fetch_add(1, Ordering::SeqCst); - - // This is to prevent overflowing the counter in some degenerate cases, which could lead to - // UB (freeing data while still in use). However, as this data structure is used only - // internally and it's not possible to leak the guard and the guard itself takes some - // memory, it should be really impossible to trigger this case. Still, we include it from - // abundance of caution. - // - // This technically is not fully correct as enough threads being in between here and the - // abort below could still overflow it and it could get freed for some *other* thread, but - // that would mean having too many active threads to fit into RAM too and is even more - // absurd corner case than the above. - if guard_cnt > MAX_GUARDS { - unsafe { libc::abort() }; - } - - // Acquire should be enough; we need to "download" the data, paired with the swap on the - // same pointer. - let data = self.data.load(Ordering::SeqCst); - // Safe: - // * It did point to valid data when put in. - // * Protected by lock, so still valid. - let data = unsafe { &*data }; - - ReadGuard { data, lock } - } - - fn update_seen(&self, seen_zero: &mut [bool; 2]) { - for (seen, slot) in seen_zero.iter_mut().zip(&self.lock) { - *seen = *seen || slot.load(Ordering::SeqCst) == 0; - } - } - - fn write_barrier(&self) { - // Do a first check of seeing zeroes before we switch the generation. At least one of them - // should be zero by now, due to having drained the generation before leaving the previous - // writer. - let mut seen_zero = [false; 2]; - self.update_seen(&mut seen_zero); - // By switching the generation to the other slot, we make sure the currently active starts - // draining while the other will start filling up. - self.generation.fetch_add(1, Ordering::SeqCst); // Overflow is fine. - - let mut iter = 0usize; - while !seen_zero.iter().all(|s| *s) { - iter = iter.wrapping_add(1); - - // Be somewhat less aggressive while looping, switch to the other threads if possible. - if cfg!(not(miri)) { - if iter % YIELD_EVERY == 0 { - thread::yield_now(); - } else { - // Replaced by hint::spin_loop, but we want to support older compiler - #[allow(deprecated)] - atomic::spin_loop_hint(); - } - } - - self.update_seen(&mut seen_zero); - } - } - - pub(crate) fn write(&self) -> WriteGuard { - // While it's possible the user code panics, our code in store doesn't and the data gets - // swapped atomically. So if it panics, nothing gets changed, therefore poisons are of no - // interest here. - let guard = self - .write_mutex - .lock() - .unwrap_or_else(PoisonError::into_inner); - - // Relaxed should be enough, as we are under the same mutex that was used to get the data - // in. - let data = self.data.load(Ordering::SeqCst); - // Safe: - // * Stored as valid data - // * Only this method, protected by mutex, can change the pointer, so it didn't go away. - let data = unsafe { &*data }; - - WriteGuard { - data, - _guard: guard, - lock: self, - } - } -} - -impl Drop for HalfLock { - fn drop(&mut self) { - // During drop we are sure there are no other borrows of the data so we are free to just - // drop it. Also, the drop impl won't be called in practice in our case, as it is used - // solely as a global variable, but we provide it for completeness and tests anyway. - // - // unsafe: the pointer in there is always valid, we just take the last instance out. - unsafe { - // Acquire should be enough. - let data = Box::from_raw(self.data.load(Ordering::SeqCst)); - drop(data); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/signal-hook-registry/src/lib.rs s390-tools-2.33.1/rust-vendor/signal-hook-registry/src/lib.rs --- s390-tools-2.31.0/rust-vendor/signal-hook-registry/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/signal-hook-registry/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,789 +0,0 @@ -#![doc(test(attr(deny(warnings))))] -#![warn(missing_docs)] -#![allow(unknown_lints, renamed_and_remove_lints, bare_trait_objects)] - -//! Backend of the [signal-hook] crate. -//! -//! The [signal-hook] crate tries to provide an API to the unix signals, which are a global -//! resource. Therefore, it is desirable an application contains just one version of the crate -//! which manages this global resource. But that makes it impossible to make breaking changes in -//! the API. -//! -//! Therefore, this crate provides very minimal and low level API to the signals that is unlikely -//! to have to change, while there may be multiple versions of the [signal-hook] that all use this -//! low-level API to provide different versions of the high level APIs. -//! -//! It is also possible some other crates might want to build a completely different API. This -//! split allows these crates to still reuse the same low-level routines in this crate instead of -//! going to the (much more dangerous) unix calls. -//! -//! # What this crate provides -//! -//! The only thing this crate does is multiplexing the signals. An application or library can add -//! or remove callbacks and have multiple callbacks for the same signal. -//! -//! It handles dispatching the callbacks and managing them in a way that uses only the -//! [async-signal-safe] functions inside the signal handler. Note that the callbacks are still run -//! inside the signal handler, so it is up to the caller to ensure they are also -//! [async-signal-safe]. -//! -//! # What this is for -//! -//! This is a building block for other libraries creating reasonable abstractions on top of -//! signals. The [signal-hook] is the generally preferred way if you need to handle signals in your -//! application and provides several safe patterns of doing so. -//! -//! # Rust version compatibility -//! -//! Currently builds on 1.26.0 an newer and this is very unlikely to change. However, tests -//! require dependencies that don't build there, so tests need newer Rust version (they are run on -//! stable). -//! -//! # Portability -//! -//! This crate includes a limited support for Windows, based on `signal`/`raise` in the CRT. -//! There are differences in both API and behavior: -//! -//! - Due to lack of `siginfo_t`, we don't provide `register_sigaction` or `register_unchecked`. -//! - Due to lack of signal blocking, there's a race condition. -//! After the call to `signal`, there's a moment where we miss a signal. -//! That means when you register a handler, there may be a signal which invokes -//! neither the default handler or the handler you register. -//! - Handlers registered by `signal` in Windows are cleared on first signal. -//! To match behavior in other platforms, we re-register the handler each time the handler is -//! called, but there's a moment where we miss a handler. -//! That means when you receive two signals in a row, there may be a signal which invokes -//! the default handler, nevertheless you certainly have registered the handler. -//! -//! [signal-hook]: https://docs.rs/signal-hook -//! [async-signal-safe]: http://www.man7.org/linux/man-pages/man7/signal-safety.7.html - -extern crate libc; - -mod half_lock; - -use std::collections::hash_map::Entry; -use std::collections::{BTreeMap, HashMap}; -use std::io::Error; -use std::mem; -#[cfg(not(windows))] -use std::ptr; -// Once::new is now a const-fn. But it is not stable in all the rustc versions we want to support -// yet. -#[allow(deprecated)] -use std::sync::ONCE_INIT; -use std::sync::{Arc, Once}; - -#[cfg(not(windows))] -use libc::{c_int, c_void, sigaction, siginfo_t}; -#[cfg(windows)] -use libc::{c_int, sighandler_t}; - -#[cfg(not(windows))] -use libc::{SIGFPE, SIGILL, SIGKILL, SIGSEGV, SIGSTOP}; -#[cfg(windows)] -use libc::{SIGFPE, SIGILL, SIGSEGV}; - -use half_lock::HalfLock; - -// These constants are not defined in the current version of libc, but it actually -// exists in Windows CRT. -#[cfg(windows)] -const SIG_DFL: sighandler_t = 0; -#[cfg(windows)] -const SIG_IGN: sighandler_t = 1; -#[cfg(windows)] -const SIG_GET: sighandler_t = 2; -#[cfg(windows)] -const SIG_ERR: sighandler_t = !0; - -// To simplify implementation. Not to be exposed. -#[cfg(windows)] -#[allow(non_camel_case_types)] -struct siginfo_t; - -// # Internal workings -// -// This uses a form of RCU. There's an atomic pointer to the current action descriptors (in the -// form of IndependentArcSwap, to be able to track what, if any, signal handlers still use the -// version). A signal handler takes a copy of the pointer and calls all the relevant actions. -// -// Modifications to that are protected by a mutex, to avoid juggling multiple signal handlers at -// once (eg. not calling sigaction concurrently). This should not be a problem, because modifying -// the signal actions should be initialization only anyway. To avoid all allocations and also -// deallocations inside the signal handler, after replacing the pointer, the modification routine -// needs to busy-wait for the reference count on the old pointer to drop to 1 and take ownership ‒ -// that way the one deallocating is the modification routine, outside of the signal handler. - -#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] -struct ActionId(u128); - -/// An ID of registered action. -/// -/// This is returned by all the registration routines and can be used to remove the action later on -/// with a call to [`unregister`]. -#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct SigId { - signal: c_int, - action: ActionId, -} - -// This should be dyn Fn(...), but we want to support Rust 1.26.0 and that one doesn't allow dyn -// yet. -#[allow(unknown_lints, bare_trait_objects)] -type Action = Fn(&siginfo_t) + Send + Sync; - -#[derive(Clone)] -struct Slot { - prev: Prev, - // We use BTreeMap here, because we want to run the actions in the order they were inserted. - // This works, because the ActionIds are assigned in an increasing order. - actions: BTreeMap>, -} - -impl Slot { - #[cfg(windows)] - fn new(signal: libc::c_int) -> Result { - let old = unsafe { libc::signal(signal, handler as sighandler_t) }; - if old == SIG_ERR { - return Err(Error::last_os_error()); - } - Ok(Slot { - prev: Prev { signal, info: old }, - actions: BTreeMap::new(), - }) - } - - #[cfg(not(windows))] - fn new(signal: libc::c_int) -> Result { - // C data structure, expected to be zeroed out. - let mut new: libc::sigaction = unsafe { mem::zeroed() }; - #[cfg(not(target_os = "aix"))] - { new.sa_sigaction = handler as usize; } - #[cfg(target_os = "aix")] - { new.sa_union.__su_sigaction = handler; } - // Android is broken and uses different int types than the rest (and different depending on - // the pointer width). This converts the flags to the proper type no matter what it is on - // the given platform. - let flags = libc::SA_RESTART; - #[allow(unused_assignments)] - let mut siginfo = flags; - siginfo = libc::SA_SIGINFO as _; - let flags = flags | siginfo; - new.sa_flags = flags as _; - // C data structure, expected to be zeroed out. - let mut old: libc::sigaction = unsafe { mem::zeroed() }; - // FFI ‒ pointers are valid, it doesn't take ownership. - if unsafe { libc::sigaction(signal, &new, &mut old) } != 0 { - return Err(Error::last_os_error()); - } - Ok(Slot { - prev: Prev { signal, info: old }, - actions: BTreeMap::new(), - }) - } -} - -#[derive(Clone)] -struct SignalData { - signals: HashMap, - next_id: u128, -} - -#[derive(Clone)] -struct Prev { - signal: c_int, - #[cfg(windows)] - info: sighandler_t, - #[cfg(not(windows))] - info: sigaction, -} - -impl Prev { - #[cfg(windows)] - fn detect(signal: c_int) -> Result { - let old = unsafe { libc::signal(signal, SIG_GET) }; - if old == SIG_ERR { - return Err(Error::last_os_error()); - } - Ok(Prev { signal, info: old }) - } - - #[cfg(not(windows))] - fn detect(signal: c_int) -> Result { - // C data structure, expected to be zeroed out. - let mut old: libc::sigaction = unsafe { mem::zeroed() }; - // FFI ‒ pointers are valid, it doesn't take ownership. - if unsafe { libc::sigaction(signal, ptr::null(), &mut old) } != 0 { - return Err(Error::last_os_error()); - } - - Ok(Prev { signal, info: old }) - } - - #[cfg(windows)] - fn execute(&self, sig: c_int) { - let fptr = self.info; - if fptr != 0 && fptr != SIG_DFL && fptr != SIG_IGN { - // FFI ‒ calling the original signal handler. - unsafe { - let action = mem::transmute::(fptr); - action(sig); - } - } - } - - #[cfg(not(windows))] - unsafe fn execute(&self, sig: c_int, info: *mut siginfo_t, data: *mut c_void) { - #[cfg(not(target_os = "aix"))] - let fptr = self.info.sa_sigaction; - #[cfg(target_os = "aix")] - let fptr = self.info.sa_union.__su_sigaction as usize; - if fptr != 0 && fptr != libc::SIG_DFL && fptr != libc::SIG_IGN { - // Android is broken and uses different int types than the rest (and different - // depending on the pointer width). This converts the flags to the proper type no - // matter what it is on the given platform. - // - // The trick is to create the same-typed variable as the sa_flags first and then - // set it to the proper value (does Rust have a way to copy a type in a different - // way?) - #[allow(unused_assignments)] - let mut siginfo = self.info.sa_flags; - siginfo = libc::SA_SIGINFO as _; - if self.info.sa_flags & siginfo == 0 { - let action = mem::transmute::(fptr); - action(sig); - } else { - type SigAction = extern "C" fn(c_int, *mut siginfo_t, *mut c_void); - let action = mem::transmute::(fptr); - action(sig, info, data); - } - } - } -} - -/// Lazy-initiated data structure with our global variables. -/// -/// Used inside a structure to cut down on boilerplate code to lazy-initialize stuff. We don't dare -/// use anything fancy like lazy-static or once-cell, since we are not sure they are -/// async-signal-safe in their access. Our code uses the [Once], but only on the write end outside -/// of signal handler. The handler assumes it has already been initialized. -struct GlobalData { - /// The data structure describing what needs to be run for each signal. - data: HalfLock, - - /// A fallback to fight/minimize a race condition during signal initialization. - /// - /// See the comment inside [`register_unchecked_impl`]. - race_fallback: HalfLock>, -} - -static mut GLOBAL_DATA: Option = None; -#[allow(deprecated)] -static GLOBAL_INIT: Once = ONCE_INIT; - -impl GlobalData { - fn get() -> &'static Self { - unsafe { GLOBAL_DATA.as_ref().unwrap() } - } - fn ensure() -> &'static Self { - GLOBAL_INIT.call_once(|| unsafe { - GLOBAL_DATA = Some(GlobalData { - data: HalfLock::new(SignalData { - signals: HashMap::new(), - next_id: 1, - }), - race_fallback: HalfLock::new(None), - }); - }); - Self::get() - } -} - -#[cfg(windows)] -extern "C" fn handler(sig: c_int) { - if sig != SIGFPE { - // Windows CRT `signal` resets handler every time, unless for SIGFPE. - // Reregister the handler to retain maximal compatibility. - // Problems: - // - It's racy. But this is inevitably racy in Windows. - // - Interacts poorly with handlers outside signal-hook-registry. - let old = unsafe { libc::signal(sig, handler as sighandler_t) }; - if old == SIG_ERR { - // MSDN doesn't describe which errors might occur, - // but we can tell from the Linux manpage that - // EINVAL (invalid signal number) is mostly the only case. - // Therefore, this branch must not occur. - // In any case we can do nothing useful in the signal handler, - // so we're going to abort silently. - unsafe { - libc::abort(); - } - } - } - - let globals = GlobalData::get(); - let fallback = globals.race_fallback.read(); - let sigdata = globals.data.read(); - - if let Some(ref slot) = sigdata.signals.get(&sig) { - slot.prev.execute(sig); - - for action in slot.actions.values() { - action(&siginfo_t); - } - } else if let Some(prev) = fallback.as_ref() { - // In case we get called but don't have the slot for this signal set up yet, we are under - // the race condition. We may have the old signal handler stored in the fallback - // temporarily. - if sig == prev.signal { - prev.execute(sig); - } - // else -> probably should not happen, but races with other threads are possible so - // better safe - } -} - -#[cfg(not(windows))] -extern "C" fn handler(sig: c_int, info: *mut siginfo_t, data: *mut c_void) { - let globals = GlobalData::get(); - let fallback = globals.race_fallback.read(); - let sigdata = globals.data.read(); - - if let Some(slot) = sigdata.signals.get(&sig) { - unsafe { slot.prev.execute(sig, info, data) }; - - let info = unsafe { info.as_ref() }; - let info = info.unwrap_or_else(|| { - // The info being null seems to be illegal according to POSIX, but has been observed on - // some probably broken platform. We can't do anything about that, that is just broken, - // but we are not allowed to panic in a signal handler, so we are left only with simply - // aborting. We try to write a message what happens, but using the libc stuff - // (`eprintln` is not guaranteed to be async-signal-safe). - unsafe { - const MSG: &[u8] = - b"Platform broken, got NULL as siginfo to signal handler. Aborting"; - libc::write(2, MSG.as_ptr() as *const _, MSG.len()); - libc::abort(); - } - }); - - for action in slot.actions.values() { - action(info); - } - } else if let Some(prev) = fallback.as_ref() { - // In case we get called but don't have the slot for this signal set up yet, we are under - // the race condition. We may have the old signal handler stored in the fallback - // temporarily. - if prev.signal == sig { - unsafe { prev.execute(sig, info, data) }; - } - // else -> probably should not happen, but races with other threads are possible so - // better safe - } -} - -/// List of forbidden signals. -/// -/// Some signals are impossible to replace according to POSIX and some are so special that this -/// library refuses to handle them (eg. SIGSEGV). The routines panic in case registering one of -/// these signals is attempted. -/// -/// See [`register`]. -pub const FORBIDDEN: &[c_int] = FORBIDDEN_IMPL; - -#[cfg(windows)] -const FORBIDDEN_IMPL: &[c_int] = &[SIGILL, SIGFPE, SIGSEGV]; -#[cfg(not(windows))] -const FORBIDDEN_IMPL: &[c_int] = &[SIGKILL, SIGSTOP, SIGILL, SIGFPE, SIGSEGV]; - -/// Registers an arbitrary action for the given signal. -/// -/// This makes sure there's a signal handler for the given signal. It then adds the action to the -/// ones called each time the signal is delivered. If multiple actions are set for the same signal, -/// all are called, in the order of registration. -/// -/// If there was a previous signal handler for the given signal, it is chained ‒ it will be called -/// as part of this library's signal handler, before any actions set through this function. -/// -/// On success, the function returns an ID that can be used to remove the action again with -/// [`unregister`]. -/// -/// # Panics -/// -/// If the signal is one of (see [`FORBIDDEN`]): -/// -/// * `SIGKILL` -/// * `SIGSTOP` -/// * `SIGILL` -/// * `SIGFPE` -/// * `SIGSEGV` -/// -/// The first two are not possible to override (and the underlying C functions simply ignore all -/// requests to do so, which smells of possible bugs, or return errors). The rest can be set, but -/// generally needs very special handling to do so correctly (direct manipulation of the -/// application's address space, `longjmp` and similar). Unless you know very well what you're -/// doing, you'll shoot yourself into the foot and this library won't help you with that. -/// -/// # Errors -/// -/// Since the library manipulates signals using the low-level C functions, all these can return -/// errors. Generally, the errors mean something like the specified signal does not exist on the -/// given platform ‒ after a program is debugged and tested on a given OS, it should never return -/// an error. -/// -/// However, if an error *is* returned, there are no guarantees if the given action was registered -/// or not. -/// -/// # Safety -/// -/// This function is unsafe, because the `action` is run inside a signal handler. The set of -/// functions allowed to be called from within is very limited (they are called async-signal-safe -/// functions by POSIX). These specifically do *not* contain mutexes and memory -/// allocation/deallocation. They *do* contain routines to terminate the program, to further -/// manipulate signals (by the low-level functions, not by this library) and to read and write file -/// descriptors. Calling program's own functions consisting only of these is OK, as is manipulating -/// program's variables ‒ however, as the action can be called on any thread that does not have the -/// given signal masked (by default no signal is masked on any thread), and mutexes are a no-go, -/// this is harder than it looks like at first. -/// -/// As panicking from within a signal handler would be a panic across FFI boundary (which is -/// undefined behavior), the passed handler must not panic. -/// -/// If you find these limitations hard to satisfy, choose from the helper functions in the -/// [signal-hook](https://docs.rs/signal-hook) crate ‒ these provide safe interface to use some -/// common signal handling patters. -/// -/// # Race condition -/// -/// Upon registering the first hook for a given signal into this library, there's a short race -/// condition under the following circumstances: -/// -/// * The program already has a signal handler installed for this particular signal (through some -/// other library, possibly). -/// * Concurrently, some other thread installs a different signal handler while it is being -/// installed by this library. -/// * At the same time, the signal is delivered. -/// -/// Under such conditions signal-hook might wrongly "chain" to the older signal handler for a short -/// while (until the registration is fully complete). -/// -/// Note that the exact conditions of the race condition might change in future versions of the -/// library. The recommended way to avoid it is to register signals before starting any additional -/// threads, or at least not to register signals concurrently. -/// -/// Alternatively, make sure all signals are handled through this library. -/// -/// # Performance -/// -/// Even when it is possible to repeatedly install and remove actions during the lifetime of a -/// program, the installation and removal is considered a slow operation and should not be done -/// very often. Also, there's limited (though huge) amount of distinct IDs (they are `u128`). -/// -/// # Examples -/// -/// ```rust -/// extern crate signal_hook_registry; -/// -/// use std::io::Error; -/// use std::process; -/// -/// fn main() -> Result<(), Error> { -/// let signal = unsafe { -/// signal_hook_registry::register(signal_hook::consts::SIGTERM, || process::abort()) -/// }?; -/// // Stuff here... -/// signal_hook_registry::unregister(signal); // Not really necessary. -/// Ok(()) -/// } -/// ``` -pub unsafe fn register(signal: c_int, action: F) -> Result -where - F: Fn() + Sync + Send + 'static, -{ - register_sigaction_impl(signal, move |_: &_| action()) -} - -/// Register a signal action. -/// -/// This acts in the same way as [`register`], including the drawbacks, panics and performance -/// characteristics. The only difference is the provided action accepts a [`siginfo_t`] argument, -/// providing information about the received signal. -/// -/// # Safety -/// -/// See the details of [`register`]. -#[cfg(not(windows))] -pub unsafe fn register_sigaction(signal: c_int, action: F) -> Result -where - F: Fn(&siginfo_t) + Sync + Send + 'static, -{ - register_sigaction_impl(signal, action) -} - -unsafe fn register_sigaction_impl(signal: c_int, action: F) -> Result -where - F: Fn(&siginfo_t) + Sync + Send + 'static, -{ - assert!( - !FORBIDDEN.contains(&signal), - "Attempted to register forbidden signal {}", - signal, - ); - register_unchecked_impl(signal, action) -} - -/// Register a signal action without checking for forbidden signals. -/// -/// This acts in the same way as [`register_unchecked`], including the drawbacks, panics and -/// performance characteristics. The only difference is the provided action doesn't accept a -/// [`siginfo_t`] argument. -/// -/// # Safety -/// -/// See the details of [`register`]. -pub unsafe fn register_signal_unchecked(signal: c_int, action: F) -> Result -where - F: Fn() + Sync + Send + 'static, -{ - register_unchecked_impl(signal, move |_: &_| action()) -} - -/// Register a signal action without checking for forbidden signals. -/// -/// This acts the same way as [`register_sigaction`], but without checking for the [`FORBIDDEN`] -/// signals. All the signals passed are registered and it is up to the caller to make some sense of -/// them. -/// -/// Note that you really need to know what you're doing if you change eg. the `SIGSEGV` signal -/// handler. Generally, you don't want to do that. But unlike the other functions here, this -/// function still allows you to do it. -/// -/// # Safety -/// -/// See the details of [`register`]. -#[cfg(not(windows))] -pub unsafe fn register_unchecked(signal: c_int, action: F) -> Result -where - F: Fn(&siginfo_t) + Sync + Send + 'static, -{ - register_unchecked_impl(signal, action) -} - -unsafe fn register_unchecked_impl(signal: c_int, action: F) -> Result -where - F: Fn(&siginfo_t) + Sync + Send + 'static, -{ - let globals = GlobalData::ensure(); - let action = Arc::from(action); - - let mut lock = globals.data.write(); - - let mut sigdata = SignalData::clone(&lock); - let id = ActionId(sigdata.next_id); - sigdata.next_id += 1; - - match sigdata.signals.entry(signal) { - Entry::Occupied(mut occupied) => { - assert!(occupied.get_mut().actions.insert(id, action).is_none()); - } - Entry::Vacant(place) => { - // While the sigaction/signal exchanges the old one atomically, we are not able to - // atomically store it somewhere a signal handler could read it. That poses a race - // condition where we could lose some signals delivered in between changing it and - // storing it. - // - // Therefore we first store the old one in the fallback storage. The fallback only - // covers the cases where the slot is not yet active and becomes "inert" after that, - // even if not removed (it may get overwritten by some other signal, but for that the - // mutex in globals.data must be unlocked here - and by that time we already stored the - // slot. - // - // And yes, this still leaves a short race condition when some other thread could - // replace the signal handler and we would be calling the outdated one for a short - // time, until we install the slot. - globals - .race_fallback - .write() - .store(Some(Prev::detect(signal)?)); - - let mut slot = Slot::new(signal)?; - slot.actions.insert(id, action); - place.insert(slot); - } - } - - lock.store(sigdata); - - Ok(SigId { signal, action: id }) -} - -/// Removes a previously installed action. -/// -/// This function does nothing if the action was already removed. It returns true if it was removed -/// and false if the action wasn't found. -/// -/// It can unregister all the actions installed by [`register`] as well as the ones from downstream -/// crates (like [`signal-hook`](https://docs.rs/signal-hook)). -/// -/// # Warning -/// -/// This does *not* currently return the default/previous signal handler if the last action for a -/// signal was just unregistered. That means that if you replaced for example `SIGTERM` and then -/// removed the action, the program will effectively ignore `SIGTERM` signals from now on, not -/// terminate on them as is the default action. This is OK if you remove it as part of a shutdown, -/// but it is not recommended to remove termination actions during the normal runtime of -/// application (unless the desired effect is to create something that can be terminated only by -/// SIGKILL). -pub fn unregister(id: SigId) -> bool { - let globals = GlobalData::ensure(); - let mut replace = false; - let mut lock = globals.data.write(); - let mut sigdata = SignalData::clone(&lock); - if let Some(slot) = sigdata.signals.get_mut(&id.signal) { - replace = slot.actions.remove(&id.action).is_some(); - } - if replace { - lock.store(sigdata); - } - replace -} - -// We keep this one here for strict backwards compatibility, but the API is kind of bad. One can -// delete actions that don't belong to them, which is kind of against the whole idea of not -// breaking stuff for others. -#[deprecated( - since = "1.3.0", - note = "Don't use. Can influence unrelated parts of program / unknown actions" -)] -#[doc(hidden)] -pub fn unregister_signal(signal: c_int) -> bool { - let globals = GlobalData::ensure(); - let mut replace = false; - let mut lock = globals.data.write(); - let mut sigdata = SignalData::clone(&lock); - if let Some(slot) = sigdata.signals.get_mut(&signal) { - if !slot.actions.is_empty() { - slot.actions.clear(); - replace = true; - } - } - if replace { - lock.store(sigdata); - } - replace -} - -#[cfg(test)] -mod tests { - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Arc; - use std::thread; - use std::time::Duration; - - #[cfg(not(windows))] - use libc::{pid_t, SIGUSR1, SIGUSR2}; - - #[cfg(windows)] - use libc::SIGTERM as SIGUSR1; - #[cfg(windows)] - use libc::SIGTERM as SIGUSR2; - - use super::*; - - #[test] - #[should_panic] - fn panic_forbidden() { - let _ = unsafe { register(SIGILL, || ()) }; - } - - /// Registering the forbidden signals is allowed in the _unchecked version. - #[test] - #[allow(clippy::redundant_closure)] // Clippy, you're wrong. Because it changes the return value. - fn forbidden_raw() { - unsafe { register_signal_unchecked(SIGFPE, || std::process::abort()).unwrap() }; - } - - #[test] - fn signal_without_pid() { - let status = Arc::new(AtomicUsize::new(0)); - let action = { - let status = Arc::clone(&status); - move || { - status.store(1, Ordering::Relaxed); - } - }; - unsafe { - register(SIGUSR2, action).unwrap(); - libc::raise(SIGUSR2); - } - for _ in 0..10 { - thread::sleep(Duration::from_millis(100)); - let current = status.load(Ordering::Relaxed); - match current { - // Not yet - 0 => continue, - // Good, we are done with the correct result - _ if current == 1 => return, - _ => panic!("Wrong result value {}", current), - } - } - panic!("Timed out waiting for the signal"); - } - - #[test] - #[cfg(not(windows))] - fn signal_with_pid() { - let status = Arc::new(AtomicUsize::new(0)); - let action = { - let status = Arc::clone(&status); - move |siginfo: &siginfo_t| { - // Hack: currently, libc exposes only the first 3 fields of siginfo_t. The pid - // comes somewhat later on. Therefore, we do a Really Ugly Hack and define our - // own structure (and hope it is correct on all platforms). But hey, this is - // only the tests, so we are going to get away with this. - #[repr(C)] - struct SigInfo { - _fields: [c_int; 3], - #[cfg(all(target_pointer_width = "64", target_os = "linux"))] - _pad: c_int, - pid: pid_t, - } - let s: &SigInfo = unsafe { - (siginfo as *const _ as usize as *const SigInfo) - .as_ref() - .unwrap() - }; - status.store(s.pid as usize, Ordering::Relaxed); - } - }; - let pid; - unsafe { - pid = libc::getpid(); - register_sigaction(SIGUSR2, action).unwrap(); - libc::raise(SIGUSR2); - } - for _ in 0..10 { - thread::sleep(Duration::from_millis(100)); - let current = status.load(Ordering::Relaxed); - match current { - // Not yet (PID == 0 doesn't happen) - 0 => continue, - // Good, we are done with the correct result - _ if current == pid as usize => return, - _ => panic!("Wrong status value {}", current), - } - } - panic!("Timed out waiting for the signal"); - } - - /// Check that registration works as expected and that unregister tells if it did or not. - #[test] - fn register_unregister() { - let signal = unsafe { register(SIGUSR1, || ()).unwrap() }; - // It was there now, so we can unregister - assert!(unregister(signal)); - // The next time unregistering does nothing and tells us so. - assert!(!unregister(signal)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/similar/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/similar/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/similar/Cargo.lock s390-tools-2.33.1/rust-vendor/similar/Cargo.lock --- s390-tools-2.31.0/rust-vendor/similar/Cargo.lock 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/Cargo.lock 1970-01-01 01:00:00.000000000 +0100 @@ -1,259 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata", -] - -[[package]] -name = "console" -version = "0.15.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" -dependencies = [ - "encode_unicode", - "lazy_static", - "libc", - "unicode-width", - "windows-sys", -] - -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - -[[package]] -name = "insta" -version = "1.31.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0770b0a3d4c70567f0d58331f3088b0e4c4f56c9b8d764efe654b4a5d46de3a" -dependencies = [ - "console", - "lazy_static", - "linked-hash-map", - "similar 2.2.1", - "yaml-rust", -] - -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.147" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" - -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "proc-macro2" -version = "1.0.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" - -[[package]] -name = "ryu" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" - -[[package]] -name = "serde" -version = "1.0.130" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.130" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "similar" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf" - -[[package]] -name = "similar" -version = "2.3.0" -dependencies = [ - "bstr", - "console", - "insta", - "serde", - "serde_json", - "unicode-segmentation", -] - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "unicode-ident" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" - -[[package]] -name = "unicode-segmentation" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" - -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/Cargo.lock.msrv s390-tools-2.33.1/rust-vendor/similar/Cargo.lock.msrv --- s390-tools-2.31.0/rust-vendor/similar/Cargo.lock.msrv 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/Cargo.lock.msrv 1970-01-01 01:00:00.000000000 +0100 @@ -1,259 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata", -] - -[[package]] -name = "console" -version = "0.15.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" -dependencies = [ - "encode_unicode", - "lazy_static", - "libc", - "unicode-width", - "windows-sys", -] - -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - -[[package]] -name = "insta" -version = "1.31.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0770b0a3d4c70567f0d58331f3088b0e4c4f56c9b8d764efe654b4a5d46de3a" -dependencies = [ - "console", - "lazy_static", - "linked-hash-map", - "similar 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "yaml-rust", -] - -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.147" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" - -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "proc-macro2" -version = "1.0.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" - -[[package]] -name = "ryu" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" - -[[package]] -name = "serde" -version = "1.0.130" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.130" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "similar" -version = "2.2.1" -dependencies = [ - "bstr", - "console", - "insta", - "serde", - "serde_json", - "unicode-segmentation", -] - -[[package]] -name = "similar" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "unicode-ident" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" - -[[package]] -name = "unicode-segmentation" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" - -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/Cargo.toml s390-tools-2.33.1/rust-vendor/similar/Cargo.toml --- s390-tools-2.31.0/rust-vendor/similar/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,121 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.41" -name = "similar" -version = "2.3.0" -authors = [ - "Armin Ronacher ", - "Pierre-Étienne Meunier ", - "Brandon Williams ", -] -exclude = ["assets/*"] -description = "A diff library for Rust" -readme = "README.md" -keywords = [ - "diff", - "difference", - "patience", - "compare", - "changes", -] -license = "Apache-2.0" -repository = "https://github.com/mitsuhiko/similar" - -[package.metadata.docs.rs] -all-features = true - -[profile.release] -debug = 2 - -[[example]] -name = "patience" -required-features = [ - "text", - "inline", -] - -[[example]] -name = "terminal" -required-features = ["text"] - -[[example]] -name = "terminal-inline" -required-features = [ - "text", - "inline", - "bytes", -] - -[[example]] -name = "original-slices" -required-features = ["text"] - -[[example]] -name = "udiff" -required-features = [ - "text", - "bytes", -] - -[[example]] -name = "close-matches" -required-features = ["text"] - -[[example]] -name = "large" -required-features = ["text"] - -[[example]] -name = "serde" -required-features = [ - "text", - "serde", -] - -[dependencies.bstr] -version = "0.2.14" -optional = true -default-features = false - -[dependencies.serde] -version = "1.0.130" -features = ["derive"] -optional = true - -[dependencies.unicode-segmentation] -version = "1.7.1" -optional = true - -[dev-dependencies.console] -version = "0.15.0" - -[dev-dependencies.insta] -version = "1.10.0" - -[dev-dependencies.serde_json] -version = "1.0.68" - -[features] -bytes = [ - "bstr", - "text", -] -default = ["text"] -inline = ["text"] -text = [] -unicode = [ - "text", - "unicode-segmentation", - "bstr/unicode", -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/CHANGELOG.md s390-tools-2.33.1/rust-vendor/similar/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/similar/CHANGELOG.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,109 +0,0 @@ -# Changelog - -All notable changes to similar are documented here. - -## 2.3.0 - -* Added support for `Change::value_ref` and `Change::value_mut`. - -## 2.2.1 - -* Fixed a panic in LCS diffs on matching input. (#43) - -## 2.2.0 - -* Fixed a panic in text diff generation. (#37) - -## 2.1.0 - -* Removed deprecated alternative slice diffing functions. -* Added `serde` feature to allow serialization with serde. - -## 2.0.0 - -* Change the `Change` type and associated methods to work on any `T: Clone` instead - of `&T`. This makes the `iter_changes` method also work on slices of integers - or other values. - -## 1.3.0 - -* Performance improvements for the LCS algorithm. -* Small performance improvements by adding an early opt-out for and inline highlighting. -* Added `IdentifyDistinct` to convert sequences to ints. -* Small performance improvements for larger text diffs by using `IdentifyDistinct` - automatically above a threshold. -* Added deadlines to all diffing algorithms to bail early. -* Deprecated slice diffing methods in the individual algorithm modules. -* Use a default timeout for the inline highlighting feature. -* Added a compacting step to clean up diffs. This results in nicer looking diffs and - fewer edits. This is happening automatically for captured diffs and is exposed - through the `Capture` type. -* Fix incorrect ranges in unified diff output. - -## 1.2.2 - -* Added support for Rust 1.41.0 for better compatibility. - -## 1.2.1 - -* Added support for Rust 1.43.0 for better compatibility. - -## 1.2.0 - -* Make the unicode feature optional for inline diffing. -* Added Hunt–McIlroy LCS algorithm (`lcs`). -* Changed the implementation of Mayer's diff. This has slightly changed the - behavior but resulted in significantly improved performance and more - readable code. -* Added `NoFinishHook` to aid composing of diff hooks. - -## 1.1.0 - -* More generic lifetimes for `iter_changes` and `iter_inline_changes`. -* Added `iter_all_changes` shortcut as this is commonly useful. -* Added `iter_slices` to `DiffOp` to quickly get an iterator over the - encoded slices rather than individual items like `iter_changes` does. -* Added the `utils` module with various text diffing utilities. -* Added `TextDiffRemapper` which helps with working with the original, pre - `TextDiff` tokenization slices. - -## 1.0.0 - -* Add `get_diff_ratio`. -* Add support for byte diffing and change the text interface to abstract - over `DiffableStr`. -* Restructured crate layout greatly. Text diffing is now on the crate root, - some functionality remains in the algorithms. -* The `Change` type now also works for non text diffs. - -## 0.5.0 - -* Add `DiffOp::apply_to_hook` to apply a captured op to a diff hook. -* Added missing newline handling to the `Changes` type. -* Made unified diff support more flexible through the introduction of - the `UnifiedDiff` type. -* Fixed grouped diff operation to return an empty result if the diff - does not show any changes. -* Added inline diff highlighting support. -* Changed word splitting to split into words and whitespace. -* Added support for unicode based word splitting (`TextDiff::from_unicode_words`). - -## 0.4.0 - -* Change `get_close_matches` to use Python's quick ratio optimization - and order lexicographically when tied. - -## 0.3.0 - -* Added grapheme and character level diffing utilities. -* `DiffOp::as_tag_tuple` is now taking the argument by reference. -* Added `TextDiff::ratio`. -* Added `get_close_matches`. - -## 0.2.0 - -* Fixed a bug in the patience algorithm causing it not not work. - -## 0.1.0 - -* Initial release. diff -Nru s390-tools-2.31.0/rust-vendor/similar/clippy.toml s390-tools-2.33.1/rust-vendor/similar/clippy.toml --- s390-tools-2.31.0/rust-vendor/similar/clippy.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/clippy.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -msrv = "1.41.0" diff -Nru s390-tools-2.31.0/rust-vendor/similar/examples/close-matches.rs s390-tools-2.33.1/rust-vendor/similar/examples/close-matches.rs --- s390-tools-2.31.0/rust-vendor/similar/examples/close-matches.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/examples/close-matches.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -use similar::get_close_matches; - -fn main() { - let words = vec![ - "apple", - "appu", - "appal", - "apparitor", - "beer", - "beeb", - "beeline", - ]; - println!("{:?}", get_close_matches("app", &words, 3, 0.7)); - println!("{:?}", get_close_matches("bee", &words, 3, 0.7)); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/examples/large.rs s390-tools-2.33.1/rust-vendor/similar/examples/large.rs --- s390-tools-2.31.0/rust-vendor/similar/examples/large.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/examples/large.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,8 +0,0 @@ -use similar::TextDiff; - -fn main() { - let x = "abc".repeat(2000); - let y = "abd".repeat(2000); - let diff = TextDiff::from_chars(&x, &y); - println!("{}", diff.unified_diff()); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/examples/nonstring.rs s390-tools-2.33.1/rust-vendor/similar/examples/nonstring.rs --- s390-tools-2.31.0/rust-vendor/similar/examples/nonstring.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/examples/nonstring.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,13 +0,0 @@ -use similar::{capture_diff_slices, Algorithm}; - -fn main() { - let old = vec![1, 2, 3]; - let new = vec![1, 2, 4]; - let ops = capture_diff_slices(Algorithm::Myers, &old, &new); - - for op in ops { - for change in op.iter_changes(&old, &new) { - println!("{:?}", change); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/examples/original-slices.rs s390-tools-2.33.1/rust-vendor/similar/examples/original-slices.rs --- s390-tools-2.31.0/rust-vendor/similar/examples/original-slices.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/examples/original-slices.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -use similar::utils::diff_chars; -use similar::Algorithm; - -fn main() { - let old = "1234567890abcdef".to_string(); - let new = "0123456789Oabzdef".to_string(); - - for (change_tag, value) in diff_chars(Algorithm::Myers, &old, &new) { - println!("{}{:?}", change_tag, value); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/examples/patience.rs s390-tools-2.33.1/rust-vendor/similar/examples/patience.rs --- s390-tools-2.31.0/rust-vendor/similar/examples/patience.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/examples/patience.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,48 +0,0 @@ -use similar::{Algorithm, TextDiff}; - -const OLD: &str = r#" -[ - ( - Major, - 2, - ), - ( - Minor, - 20, - ), - ( - Value, - 0, - ), -] -"#; -const NEW: &str = r#" -[ - ( - Major, - 2, - ), - ( - Minor, - 0, - ), - ( - Value, - 0, - ), - ( - Value, - 1, - ), -] -"#; - -fn main() { - println!( - "{}", - TextDiff::configure() - .algorithm(Algorithm::Patience) - .diff_lines(OLD, NEW) - .unified_diff() - ); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/examples/serde.rs s390-tools-2.33.1/rust-vendor/similar/examples/serde.rs --- s390-tools-2.31.0/rust-vendor/similar/examples/serde.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/examples/serde.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -use similar::TextDiff; - -fn main() { - let diff = TextDiff::from_lines( - "Hello World\nThis is the second line.\nThis is the third.", - "Hallo Welt\nThis is the second line.\nThis is life.\nMoar and more", - ); - - let all_changes = diff - .ops() - .iter() - .flat_map(|op| diff.iter_changes(op)) - .collect::>(); - println!("{}", serde_json::to_string_pretty(&all_changes).unwrap()); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/examples/terminal-inline.rs s390-tools-2.33.1/rust-vendor/similar/examples/terminal-inline.rs --- s390-tools-2.31.0/rust-vendor/similar/examples/terminal-inline.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/examples/terminal-inline.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -use std::fmt; -use std::fs::read; -use std::process::exit; - -use console::{style, Style}; -use similar::{ChangeTag, TextDiff}; - -struct Line(Option); - -impl fmt::Display for Line { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.0 { - None => write!(f, " "), - Some(idx) => write!(f, "{:<4}", idx + 1), - } - } -} - -fn main() { - let args: Vec<_> = std::env::args_os().collect(); - if args.len() != 3 { - eprintln!("usage: terminal-inline [old] [new]"); - exit(1); - } - - let old = read(&args[1]).unwrap(); - let new = read(&args[2]).unwrap(); - let diff = TextDiff::from_lines(&old, &new); - - for (idx, group) in diff.grouped_ops(3).iter().enumerate() { - if idx > 0 { - println!("{:-^1$}", "-", 80); - } - for op in group { - for change in diff.iter_inline_changes(op) { - let (sign, s) = match change.tag() { - ChangeTag::Delete => ("-", Style::new().red()), - ChangeTag::Insert => ("+", Style::new().green()), - ChangeTag::Equal => (" ", Style::new().dim()), - }; - print!( - "{}{} |{}", - style(Line(change.old_index())).dim(), - style(Line(change.new_index())).dim(), - s.apply_to(sign).bold(), - ); - for (emphasized, value) in change.iter_strings_lossy() { - if emphasized { - print!("{}", s.apply_to(value).underlined().on_black()); - } else { - print!("{}", s.apply_to(value)); - } - } - if change.missing_newline() { - println!(); - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/examples/terminal.rs s390-tools-2.33.1/rust-vendor/similar/examples/terminal.rs --- s390-tools-2.31.0/rust-vendor/similar/examples/terminal.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/examples/terminal.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ -use console::Style; -use similar::{ChangeTag, TextDiff}; - -fn main() { - let diff = TextDiff::from_lines( - "Hello World\nThis is the second line.\nThis is the third.", - "Hallo Welt\nThis is the second line.\nThis is life.\nMoar and more", - ); - - for op in diff.ops() { - for change in diff.iter_changes(op) { - let (sign, style) = match change.tag() { - ChangeTag::Delete => ("-", Style::new().red()), - ChangeTag::Insert => ("+", Style::new().green()), - ChangeTag::Equal => (" ", Style::new()), - }; - print!("{}{}", style.apply_to(sign).bold(), style.apply_to(change)); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/examples/udiff.rs s390-tools-2.33.1/rust-vendor/similar/examples/udiff.rs --- s390-tools-2.31.0/rust-vendor/similar/examples/udiff.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/examples/udiff.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ -use std::fs::read; -use std::io; -use std::process::exit; - -use similar::TextDiff; - -fn main() { - let args: Vec<_> = std::env::args_os().collect(); - if args.len() != 3 { - eprintln!("usage: udiff [old] [new]"); - exit(1); - } - - let old = read(&args[1]).unwrap(); - let new = read(&args[2]).unwrap(); - TextDiff::from_lines(&old, &new) - .unified_diff() - .header( - &args[1].as_os_str().to_string_lossy(), - &args[2].as_os_str().to_string_lossy(), - ) - .to_writer(io::stdout()) - .unwrap(); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/LICENSE s390-tools-2.33.1/rust-vendor/similar/LICENSE --- s390-tools-2.31.0/rust-vendor/similar/LICENSE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/similar/Makefile s390-tools-2.33.1/rust-vendor/similar/Makefile --- s390-tools-2.31.0/rust-vendor/similar/Makefile 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/Makefile 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -all: test - -build: - @cargo build --all-features - -doc: - @cargo doc --all-features - -test: - @cargo test - @cargo test --all-features - @cargo test --no-default-features - @cargo test --no-default-features --features bytes - -format: - @rustup component add rustfmt 2> /dev/null - @cargo fmt --all - -format-check: - @rustup component add rustfmt 2> /dev/null - @cargo fmt --all -- --check - -lint: - @rustup component add clippy 2> /dev/null - @cargo clippy - -.PHONY: all doc test format format-check lint diff -Nru s390-tools-2.31.0/rust-vendor/similar/README.md s390-tools-2.33.1/rust-vendor/similar/README.md --- s390-tools-2.31.0/rust-vendor/similar/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -# Similar: A Diffing Library - -[![Build Status](https://github.com/mitsuhiko/similar/workflows/Tests/badge.svg?branch=main)](https://github.com/mitsuhiko/similar/actions?query=workflow%3ATests) -[![Crates.io](https://img.shields.io/crates/d/similar.svg)](https://crates.io/crates/similar) -[![License](https://img.shields.io/github/license/mitsuhiko/similar)](https://github.com/mitsuhiko/similar/blob/main/LICENSE) -[![rustc 1.41.0](https://img.shields.io/badge/rust-1.41%2B-orange.svg)](https://img.shields.io/badge/rust-1.41%2B-orange.svg) -[![Documentation](https://docs.rs/similar/badge.svg)](https://docs.rs/similar) - -Similar is a dependency free crate for Rust that implements different diffing -algorithms and high level interfaces for it. It is based on the -[pijul](https://pijul.org/) implementation of the Patience algorithm and -inherits some ideas from there. It also incorporates the Myer's diff -algorithm which was largely written by Brandon Williams. This library was -built for the [insta snapshot testing library](https://insta.rs). - -```rust -use similar::{ChangeTag, TextDiff}; - -fn main() { - let diff = TextDiff::from_lines( - "Hello World\nThis is the second line.\nThis is the third.", - "Hallo Welt\nThis is the second line.\nThis is life.\nMoar and more", - ); - - for change in diff.iter_all_changes() { - let sign = match change.tag() { - ChangeTag::Delete => "-", - ChangeTag::Insert => "+", - ChangeTag::Equal => " ", - }; - print!("{}{}", sign, change); - } -} -``` - -## Screenshot - -![terminal highlighting](https://raw.githubusercontent.com/mitsuhiko/similar/main/assets/terminal-inline.png) - -## What's in the box? - -* Myer's diff -* Patience diff -* Hunt–McIlroy / Hunt–Szymanski LCS diff -* Diffing on arbitrary comparable sequences -* Line, word, character and grapheme level diffing -* Text and Byte diffing -* Unified diff generation - -## Related Projects - -* [insta](https://insta.rs) snapshot testing library -* [similar-asserts](https://github.com/mitsuhiko/similar-asserts) assertion library - -## License and Links - -* [Documentation](https://docs.rs/similar/) -* [Issue Tracker](https://github.com/mitsuhiko/similar/issues) -* [Examples](https://github.com/mitsuhiko/similar/tree/main/examples) -* License: [Apache-2.0](https://github.com/mitsuhiko/similar/blob/main/LICENSE) diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/capture.rs s390-tools-2.33.1/rust-vendor/similar/src/algorithms/capture.rs --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/capture.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/capture.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,117 +0,0 @@ -use std::convert::Infallible; - -use crate::algorithms::DiffHook; -use crate::{group_diff_ops, DiffOp}; - -/// A [`DiffHook`] that captures all diff operations. -#[derive(Default, Clone)] -pub struct Capture(Vec); - -impl Capture { - /// Creates a new capture hook. - pub fn new() -> Capture { - Capture::default() - } - - /// Converts the capture hook into a vector of ops. - pub fn into_ops(self) -> Vec { - self.0 - } - - /// Isolate change clusters by eliminating ranges with no changes. - /// - /// This is equivalent to calling [`group_diff_ops`] on [`Capture::into_ops`]. - pub fn into_grouped_ops(self, n: usize) -> Vec> { - group_diff_ops(self.into_ops(), n) - } - - /// Accesses the captured operations. - pub fn ops(&self) -> &[DiffOp] { - &self.0 - } -} - -impl DiffHook for Capture { - type Error = Infallible; - - #[inline(always)] - fn equal(&mut self, old_index: usize, new_index: usize, len: usize) -> Result<(), Self::Error> { - self.0.push(DiffOp::Equal { - old_index, - new_index, - len, - }); - Ok(()) - } - - #[inline(always)] - fn delete( - &mut self, - old_index: usize, - old_len: usize, - new_index: usize, - ) -> Result<(), Self::Error> { - self.0.push(DiffOp::Delete { - old_index, - old_len, - new_index, - }); - Ok(()) - } - - #[inline(always)] - fn insert( - &mut self, - old_index: usize, - new_index: usize, - new_len: usize, - ) -> Result<(), Self::Error> { - self.0.push(DiffOp::Insert { - old_index, - new_index, - new_len, - }); - Ok(()) - } - - #[inline(always)] - fn replace( - &mut self, - old_index: usize, - old_len: usize, - new_index: usize, - new_len: usize, - ) -> Result<(), Self::Error> { - self.0.push(DiffOp::Replace { - old_index, - old_len, - new_index, - new_len, - }); - Ok(()) - } -} - -#[test] -fn test_capture_hook_grouping() { - use crate::algorithms::{diff_slices, Algorithm, Replace}; - - let rng = (1..100).collect::>(); - let mut rng_new = rng.clone(); - rng_new[10] = 1000; - rng_new[13] = 1000; - rng_new[16] = 1000; - rng_new[34] = 1000; - - let mut d = Replace::new(Capture::new()); - diff_slices(Algorithm::Myers, &mut d, &rng, &rng_new).unwrap(); - - let ops = d.into_inner().into_grouped_ops(3); - let tags = ops - .iter() - .map(|group| group.iter().map(|x| x.as_tag_tuple()).collect::>()) - .collect::>(); - - insta::assert_debug_snapshot!(ops); - insta::assert_debug_snapshot!(tags); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/compact.rs s390-tools-2.33.1/rust-vendor/similar/src/algorithms/compact.rs --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/compact.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/compact.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,355 +0,0 @@ -//! Implements basic compacting. This is based on the compaction logic from -//! diffy by Brandon Williams. -use std::ops::Index; - -use crate::{DiffOp, DiffTag}; - -use super::utils::{common_prefix_len, common_suffix_len}; -use super::DiffHook; - -/// Performs semantic cleanup operations on a diff. -/// -/// This merges similar ops together but also tries to move hunks up and -/// down the diff with the desire to connect as many hunks as possible. -/// It still needs to be combined with [`Replace`](crate::algorithms::Replace) -/// to get actual replace diff ops out. -#[derive(Debug)] -pub struct Compact<'old, 'new, Old: ?Sized, New: ?Sized, D> { - d: D, - ops: Vec, - old: &'old Old, - new: &'new New, -} - -impl<'old, 'new, Old, New, D> Compact<'old, 'new, Old, New, D> -where - D: DiffHook, - Old: Index + ?Sized + 'old, - New: Index + ?Sized + 'new, - New::Output: PartialEq, -{ - /// Creates a new compact hook wrapping another hook. - pub fn new(d: D, old: &'old Old, new: &'new New) -> Self { - Compact { - d, - ops: Vec::new(), - old, - new, - } - } - - /// Extracts the inner hook. - pub fn into_inner(self) -> D { - self.d - } -} - -impl<'old, 'new, Old: ?Sized, New: ?Sized, D: DiffHook> AsRef - for Compact<'old, 'new, Old, New, D> -{ - fn as_ref(&self) -> &D { - &self.d - } -} - -impl<'old, 'new, Old: ?Sized, New: ?Sized, D: DiffHook> AsMut - for Compact<'old, 'new, Old, New, D> -{ - fn as_mut(&mut self) -> &mut D { - &mut self.d - } -} - -impl<'old, 'new, Old, New, D> DiffHook for Compact<'old, 'new, Old, New, D> -where - D: DiffHook, - Old: Index + ?Sized + 'old, - New: Index + ?Sized + 'new, - New::Output: PartialEq, -{ - type Error = D::Error; - - #[inline(always)] - fn equal(&mut self, old_index: usize, new_index: usize, len: usize) -> Result<(), Self::Error> { - self.ops.push(DiffOp::Equal { - old_index, - new_index, - len, - }); - Ok(()) - } - - #[inline(always)] - fn delete( - &mut self, - old_index: usize, - old_len: usize, - new_index: usize, - ) -> Result<(), Self::Error> { - self.ops.push(DiffOp::Delete { - old_index, - old_len, - new_index, - }); - Ok(()) - } - - #[inline(always)] - fn insert( - &mut self, - old_index: usize, - new_index: usize, - new_len: usize, - ) -> Result<(), Self::Error> { - self.ops.push(DiffOp::Insert { - old_index, - new_index, - new_len, - }); - Ok(()) - } - - fn finish(&mut self) -> Result<(), Self::Error> { - cleanup_diff_ops(self.old, self.new, &mut self.ops); - for op in &self.ops { - op.apply_to_hook(&mut self.d)?; - } - self.d.finish() - } -} - -// Walks through all edits and shifts them up and then down, trying to see if -// they run into similar edits which can be merged. -pub fn cleanup_diff_ops(old: &Old, new: &New, ops: &mut Vec) -where - Old: Index + ?Sized, - New: Index + ?Sized, - New::Output: PartialEq, -{ - // First attempt to compact all Deletions - let mut pointer = 0; - while let Some(&op) = ops.get(pointer) { - if let DiffTag::Delete = op.tag() { - pointer = shift_diff_ops_up(ops, old, new, pointer); - pointer = shift_diff_ops_down(ops, old, new, pointer); - } - pointer += 1; - } - - // Then attempt to compact all Insertions - let mut pointer = 0; - while let Some(&op) = ops.get(pointer) { - if let DiffTag::Insert = op.tag() { - pointer = shift_diff_ops_up(ops, old, new, pointer); - pointer = shift_diff_ops_down(ops, old, new, pointer); - } - pointer += 1; - } -} - -fn shift_diff_ops_up( - ops: &mut Vec, - old: &Old, - new: &New, - mut pointer: usize, -) -> usize -where - Old: Index + ?Sized, - New: Index + ?Sized, - New::Output: PartialEq, -{ - while let Some(&prev_op) = pointer.checked_sub(1).and_then(|idx| ops.get(idx)) { - let this_op = ops[pointer]; - match (this_op.tag(), prev_op.tag()) { - // Shift Inserts Upwards - (DiffTag::Insert, DiffTag::Equal) => { - let suffix_len = - common_suffix_len(old, prev_op.old_range(), new, this_op.new_range()); - if suffix_len > 0 { - if let Some(DiffTag::Equal) = ops.get(pointer + 1).map(|x| x.tag()) { - ops[pointer + 1].grow_left(suffix_len); - } else { - ops.insert( - pointer + 1, - DiffOp::Equal { - old_index: prev_op.old_range().end - suffix_len, - new_index: this_op.new_range().end - suffix_len, - len: suffix_len, - }, - ); - } - ops[pointer].shift_left(suffix_len); - ops[pointer - 1].shrink_left(suffix_len); - - if ops[pointer - 1].is_empty() { - ops.remove(pointer - 1); - pointer -= 1; - } - } else if ops[pointer - 1].is_empty() { - ops.remove(pointer - 1); - pointer -= 1; - } else { - // We can't shift upwards anymore - break; - } - } - // Shift Deletions Upwards - (DiffTag::Delete, DiffTag::Equal) => { - // check common suffix for the amount we can shift - let suffix_len = - common_suffix_len(old, prev_op.old_range(), new, this_op.new_range()); - if suffix_len != 0 { - if let Some(DiffTag::Equal) = ops.get(pointer + 1).map(|x| x.tag()) { - ops[pointer + 1].grow_left(suffix_len); - } else { - let old_range = prev_op.old_range(); - ops.insert( - pointer + 1, - DiffOp::Equal { - old_index: old_range.end - suffix_len, - new_index: this_op.new_range().end - suffix_len, - len: old_range.len() - suffix_len, - }, - ); - } - ops[pointer].shift_left(suffix_len); - ops[pointer - 1].shrink_left(suffix_len); - - if ops[pointer - 1].is_empty() { - ops.remove(pointer - 1); - pointer -= 1; - } - } else if ops[pointer - 1].is_empty() { - ops.remove(pointer - 1); - pointer -= 1; - } else { - // We can't shift upwards anymore - break; - } - } - // Swap the Delete and Insert - (DiffTag::Insert, DiffTag::Delete) | (DiffTag::Delete, DiffTag::Insert) => { - ops.swap(pointer - 1, pointer); - pointer -= 1; - } - // Merge the two ranges - (DiffTag::Insert, DiffTag::Insert) => { - ops[pointer - 1].grow_right(this_op.new_range().len()); - ops.remove(pointer); - pointer -= 1; - } - (DiffTag::Delete, DiffTag::Delete) => { - ops[pointer - 1].grow_right(this_op.old_range().len()); - ops.remove(pointer); - pointer -= 1; - } - _ => unreachable!("unexpected tag"), - } - } - pointer -} - -fn shift_diff_ops_down( - ops: &mut Vec, - old: &Old, - new: &New, - mut pointer: usize, -) -> usize -where - Old: Index + ?Sized, - New: Index + ?Sized, - New::Output: PartialEq, -{ - while let Some(&next_op) = pointer.checked_add(1).and_then(|idx| ops.get(idx)) { - let this_op = ops[pointer]; - match (this_op.tag(), next_op.tag()) { - // Shift Inserts Downwards - (DiffTag::Insert, DiffTag::Equal) => { - let prefix_len = - common_prefix_len(old, next_op.old_range(), new, this_op.new_range()); - if prefix_len > 0 { - if let Some(DiffTag::Equal) = pointer - .checked_sub(1) - .and_then(|x| ops.get(x)) - .map(|x| x.tag()) - { - ops[pointer - 1].grow_right(prefix_len); - } else { - ops.insert( - pointer, - DiffOp::Equal { - old_index: next_op.old_range().start, - new_index: this_op.new_range().start, - len: prefix_len, - }, - ); - pointer += 1; - } - ops[pointer].shift_right(prefix_len); - ops[pointer + 1].shrink_right(prefix_len); - - if ops[pointer + 1].is_empty() { - ops.remove(pointer + 1); - } - } else if ops[pointer + 1].is_empty() { - ops.remove(pointer + 1); - } else { - // We can't shift upwards anymore - break; - } - } - // Shift Deletions Downwards - (DiffTag::Delete, DiffTag::Equal) => { - // check common suffix for the amount we can shift - let prefix_len = - common_prefix_len(old, next_op.old_range(), new, this_op.new_range()); - if prefix_len > 0 { - if let Some(DiffTag::Equal) = pointer - .checked_sub(1) - .and_then(|x| ops.get(x)) - .map(|x| x.tag()) - { - ops[pointer - 1].grow_right(prefix_len); - } else { - ops.insert( - pointer, - DiffOp::Equal { - old_index: next_op.old_range().start, - new_index: this_op.new_range().start, - len: prefix_len, - }, - ); - pointer += 1; - } - ops[pointer].shift_right(prefix_len); - ops[pointer + 1].shrink_right(prefix_len); - - if ops[pointer + 1].is_empty() { - ops.remove(pointer + 1); - } - } else if ops[pointer + 1].is_empty() { - ops.remove(pointer + 1); - } else { - // We can't shift downwards anymore - break; - } - } - // Swap the Delete and Insert - (DiffTag::Insert, DiffTag::Delete) | (DiffTag::Delete, DiffTag::Insert) => { - ops.swap(pointer, pointer + 1); - pointer += 1; - } - // Merge the two ranges - (DiffTag::Insert, DiffTag::Insert) => { - ops[pointer].grow_right(next_op.new_range().len()); - ops.remove(pointer + 1); - } - (DiffTag::Delete, DiffTag::Delete) => { - ops[pointer].grow_right(next_op.old_range().len()); - ops.remove(pointer + 1); - } - _ => unreachable!("unexpected tag"), - } - } - pointer -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/hook.rs s390-tools-2.33.1/rust-vendor/similar/src/algorithms/hook.rs --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/hook.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/hook.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,178 +0,0 @@ -/// A trait for reacting to an edit script from the "old" version to -/// the "new" version. -pub trait DiffHook: Sized { - /// The error produced from the hook methods. - type Error; - - /// Called when lines with indices `old_index` (in the old version) and - /// `new_index` (in the new version) start an section equal in both - /// versions, of length `len`. - fn equal(&mut self, old_index: usize, new_index: usize, len: usize) -> Result<(), Self::Error> { - let _ = old_index; - let _ = new_index; - let _ = len; - Ok(()) - } - - /// Called when a section of length `old_len`, starting at `old_index`, - /// needs to be deleted from the old version. - fn delete( - &mut self, - old_index: usize, - old_len: usize, - new_index: usize, - ) -> Result<(), Self::Error> { - let _ = old_index; - let _ = old_len; - let _ = new_index; - Ok(()) - } - - /// Called when a section of the new version, of length `new_len` - /// and starting at `new_index`, needs to be inserted at position `old_index'. - fn insert( - &mut self, - old_index: usize, - new_index: usize, - new_len: usize, - ) -> Result<(), Self::Error> { - let _ = old_index; - let _ = new_index; - let _ = new_len; - Ok(()) - } - - /// Called when a section of the old version, starting at index - /// `old_index` and of length `old_len`, needs to be replaced with a - /// section of length `new_len`, starting at `new_index`, of the new - /// version. - /// - /// The default implementations invokes `delete` and `insert`. - /// - /// You can use the [`Replace`](crate::algorithms::Replace) hook to - /// automatically generate these. - #[inline(always)] - fn replace( - &mut self, - old_index: usize, - old_len: usize, - new_index: usize, - new_len: usize, - ) -> Result<(), Self::Error> { - self.delete(old_index, old_len, new_index)?; - self.insert(old_index, new_index, new_len) - } - - /// Always called at the end of the algorithm. - #[inline(always)] - fn finish(&mut self) -> Result<(), Self::Error> { - Ok(()) - } -} - -impl<'a, D: DiffHook + 'a> DiffHook for &'a mut D { - type Error = D::Error; - - #[inline(always)] - fn equal(&mut self, old_index: usize, new_index: usize, len: usize) -> Result<(), Self::Error> { - (*self).equal(old_index, new_index, len) - } - - #[inline(always)] - fn delete( - &mut self, - old_index: usize, - old_len: usize, - new_index: usize, - ) -> Result<(), Self::Error> { - (*self).delete(old_index, old_len, new_index) - } - - #[inline(always)] - fn insert( - &mut self, - old_index: usize, - new_index: usize, - new_len: usize, - ) -> Result<(), Self::Error> { - (*self).insert(old_index, new_index, new_len) - } - - #[inline(always)] - fn replace( - &mut self, - old: usize, - old_len: usize, - new: usize, - new_len: usize, - ) -> Result<(), Self::Error> { - (*self).replace(old, old_len, new, new_len) - } - - #[inline(always)] - fn finish(&mut self) -> Result<(), Self::Error> { - (*self).finish() - } -} - -/// Wrapper [`DiffHook`] that prevents calls to [`DiffHook::finish`]. -/// -/// This hook is useful in situations where diff hooks are composed but you -/// want to prevent that the finish hook method is called. -pub struct NoFinishHook(D); - -impl NoFinishHook { - /// Wraps another hook. - pub fn new(d: D) -> NoFinishHook { - NoFinishHook(d) - } - - /// Extracts the inner hook. - pub fn into_inner(self) -> D { - self.0 - } -} - -impl DiffHook for NoFinishHook { - type Error = D::Error; - - #[inline(always)] - fn equal(&mut self, old_index: usize, new_index: usize, len: usize) -> Result<(), Self::Error> { - self.0.equal(old_index, new_index, len) - } - - #[inline(always)] - fn delete( - &mut self, - old_index: usize, - old_len: usize, - new_index: usize, - ) -> Result<(), Self::Error> { - self.0.delete(old_index, old_len, new_index) - } - - #[inline(always)] - fn insert( - &mut self, - old_index: usize, - new_index: usize, - new_len: usize, - ) -> Result<(), Self::Error> { - self.0.insert(old_index, new_index, new_len) - } - - #[inline(always)] - fn replace( - &mut self, - old_index: usize, - old_len: usize, - new_index: usize, - new_len: usize, - ) -> Result<(), Self::Error> { - self.0.replace(old_index, old_len, new_index, new_len) - } - - fn finish(&mut self) -> Result<(), Self::Error> { - Ok(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/lcs.rs s390-tools-2.33.1/rust-vendor/similar/src/algorithms/lcs.rs --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/lcs.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/lcs.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,236 +0,0 @@ -//! LCS diff algorithm. -//! -//! * time: `O((NM)D log (M)D)` -//! * space `O(MN)` -use std::collections::BTreeMap; -use std::ops::{Index, Range}; -use std::time::Instant; - -use crate::algorithms::utils::{common_prefix_len, common_suffix_len, is_empty_range}; -use crate::algorithms::DiffHook; - -/// LCS diff algorithm. -/// -/// Diff `old`, between indices `old_range` and `new` between indices `new_range`. -/// -/// This diff is done with an optional deadline that defines the maximal -/// execution time permitted before it bails and falls back to an very bad -/// approximation. Deadlines with LCS do not make a lot of sense and should -/// not be used. -pub fn diff( - d: &mut D, - old: &Old, - old_range: Range, - new: &New, - new_range: Range, -) -> Result<(), D::Error> -where - Old: Index + ?Sized, - New: Index + ?Sized, - D: DiffHook, - New::Output: PartialEq, -{ - diff_deadline(d, old, old_range, new, new_range, None) -} - -/// LCS diff algorithm. -/// -/// Diff `old`, between indices `old_range` and `new` between indices `new_range`. -/// -/// This diff is done with an optional deadline that defines the maximal -/// execution time permitted before it bails and falls back to an approximation. -pub fn diff_deadline( - d: &mut D, - old: &Old, - old_range: Range, - new: &New, - new_range: Range, - deadline: Option, -) -> Result<(), D::Error> -where - Old: Index + ?Sized, - New: Index + ?Sized, - D: DiffHook, - New::Output: PartialEq, -{ - if is_empty_range(&new_range) { - d.delete(old_range.start, old_range.len(), new_range.start)?; - return Ok(()); - } else if is_empty_range(&old_range) { - d.insert(old_range.start, new_range.start, new_range.len())?; - return Ok(()); - } - - let common_prefix_len = common_prefix_len(old, old_range.clone(), new, new_range.clone()); - let common_suffix_len = common_suffix_len(old, old_range.clone(), new, new_range.clone()); - - // If the sequences are not different then we're done - if common_prefix_len == old_range.len() && (old_range.len() == new_range.len()) { - d.equal(0, 0, old_range.len())?; - return Ok(()); - } - - let maybe_table = make_table( - old, - common_prefix_len..(old_range.len() - common_suffix_len), - new, - common_prefix_len..(new_range.len() - common_suffix_len), - deadline, - ); - let mut old_idx = 0; - let mut new_idx = 0; - let new_len = new_range.len() - common_prefix_len - common_suffix_len; - let old_len = old_range.len() - common_prefix_len - common_suffix_len; - - if common_prefix_len > 0 { - d.equal(old_range.start, new_range.start, common_prefix_len)?; - } - - if let Some(table) = maybe_table { - while new_idx < new_len && old_idx < old_len { - let old_orig_idx = old_range.start + common_prefix_len + old_idx; - let new_orig_idx = new_range.start + common_prefix_len + new_idx; - - if new[new_orig_idx] == old[old_orig_idx] { - d.equal(old_orig_idx, new_orig_idx, 1)?; - old_idx += 1; - new_idx += 1; - } else if table.get(&(new_idx, old_idx + 1)).map_or(0, |&x| x) - >= table.get(&(new_idx + 1, old_idx)).map_or(0, |&x| x) - { - d.delete(old_orig_idx, 1, new_orig_idx)?; - old_idx += 1; - } else { - d.insert(old_orig_idx, new_orig_idx, 1)?; - new_idx += 1; - } - } - } else { - let old_orig_idx = old_range.start + common_prefix_len + old_idx; - let new_orig_idx = new_range.start + common_prefix_len + new_idx; - d.delete(old_orig_idx, old_len, new_orig_idx)?; - d.insert(old_orig_idx, new_orig_idx, new_len)?; - } - - if old_idx < old_len { - d.delete( - old_range.start + common_prefix_len + old_idx, - old_len - old_idx, - new_range.start + common_prefix_len + new_idx, - )?; - old_idx += old_len - old_idx; - } - - if new_idx < new_len { - d.insert( - old_range.start + common_prefix_len + old_idx, - new_range.start + common_prefix_len + new_idx, - new_len - new_idx, - )?; - } - - if common_suffix_len > 0 { - d.equal( - old_range.start + old_len + common_prefix_len, - new_range.start + new_len + common_prefix_len, - common_suffix_len, - )?; - } - - d.finish() -} - -fn make_table( - old: &Old, - old_range: Range, - new: &New, - new_range: Range, - deadline: Option, -) -> Option> -where - Old: Index + ?Sized, - New: Index + ?Sized, - New::Output: PartialEq, -{ - let old_len = old_range.len(); - let new_len = new_range.len(); - let mut table = BTreeMap::new(); - - for i in (0..new_len).rev() { - // are we running for too long? give up on the table - if let Some(deadline) = deadline { - if Instant::now() > deadline { - return None; - } - } - - for j in (0..old_len).rev() { - let val = if new[i] == old[j] { - table.get(&(i + 1, j + 1)).map_or(0, |&x| x) + 1 - } else { - table - .get(&(i + 1, j)) - .map_or(0, |&x| x) - .max(table.get(&(i, j + 1)).map_or(0, |&x| x)) - }; - if val > 0 { - table.insert((i, j), val); - } - } - } - - Some(table) -} - -#[test] -fn test_table() { - let table = make_table(&vec![2, 3], 0..2, &vec![0, 1, 2], 0..3, None).unwrap(); - let expected = { - let mut m = BTreeMap::new(); - m.insert((1, 0), 1); - m.insert((0, 0), 1); - m.insert((2, 0), 1); - m - }; - assert_eq!(table, expected); -} - -#[test] -fn test_diff() { - let a: &[usize] = &[0, 1, 2, 3, 4]; - let b: &[usize] = &[0, 1, 2, 9, 4]; - - let mut d = crate::algorithms::Replace::new(crate::algorithms::Capture::new()); - diff(&mut d, a, 0..a.len(), b, 0..b.len()).unwrap(); - insta::assert_debug_snapshot!(d.into_inner().ops()); -} - -#[test] -fn test_contiguous() { - let a: &[usize] = &[0, 1, 2, 3, 4, 4, 4, 5]; - let b: &[usize] = &[0, 1, 2, 8, 9, 4, 4, 7]; - - let mut d = crate::algorithms::Replace::new(crate::algorithms::Capture::new()); - diff(&mut d, a, 0..a.len(), b, 0..b.len()).unwrap(); - insta::assert_debug_snapshot!(d.into_inner().ops()); -} - -#[test] -fn test_pat() { - let a: &[usize] = &[0, 1, 3, 4, 5]; - let b: &[usize] = &[0, 1, 4, 5, 8, 9]; - - let mut d = crate::algorithms::Capture::new(); - diff(&mut d, a, 0..a.len(), b, 0..b.len()).unwrap(); - insta::assert_debug_snapshot!(d.ops()); -} - -#[test] -fn test_same() { - let a: &[usize] = &[0, 1, 2, 3, 4, 4, 4, 5]; - let b: &[usize] = &[0, 1, 2, 3, 4, 4, 4, 5]; - - let mut d = crate::algorithms::Capture::new(); - diff(&mut d, a, 0..a.len(), b, 0..b.len()).unwrap(); - insta::assert_debug_snapshot!(d.ops()); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/mod.rs s390-tools-2.33.1/rust-vendor/similar/src/algorithms/mod.rs --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,134 +0,0 @@ -//! Various diff (longest common subsequence) algorithms. -//! -//! The implementations of the algorithms in this module are relatively low -//! level and expose the most generic bounds possible for the algorithm. To -//! use them you would typically use the higher level API if possible but -//! direct access to these algorithms can be useful in some cases. -//! -//! All these algorithms provide a `diff` function which takes two indexable -//! objects (for instance slices) and a [`DiffHook`]. As the -//! diff is generated the diff hook is invoked. Note that the diff hook does -//! not get access to the actual values but only the indexes. This is why the -//! diff hook is not used outside of the raw algorithm implementations as for -//! most situations access to the values is useful of required. -//! -//! The algorithms module really is the most low-level module in similar and -//! generally not the place to start. -//! -//! # Example -//! -//! This is a simple example that shows how you can calculate the difference -//! between two sequences and capture the ops into a vector. -//! -//! ```rust -//! use similar::algorithms::{Algorithm, Replace, Capture, diff_slices}; -//! -//! let a = vec![1, 2, 3, 4, 5]; -//! let b = vec![1, 2, 3, 4, 7]; -//! let mut d = Replace::new(Capture::new()); -//! diff_slices(Algorithm::Myers, &mut d, &a, &b).unwrap(); -//! let ops = d.into_inner().into_ops(); -//! ``` -//! -//! The above example is equivalent to using -//! [`capture_diff_slices`](crate::capture_diff_slices). - -mod capture; -mod compact; -mod hook; -mod replace; -pub(crate) mod utils; - -use std::hash::Hash; -use std::ops::{Index, Range}; -use std::time::Instant; - -pub use capture::Capture; -pub use compact::Compact; -pub use hook::{DiffHook, NoFinishHook}; -pub use replace::Replace; -pub use utils::IdentifyDistinct; - -#[doc(no_inline)] -pub use crate::Algorithm; - -pub mod lcs; -pub mod myers; -pub mod patience; - -/// Creates a diff between old and new with the given algorithm. -/// -/// Diffs `old`, between indices `old_range` and `new` between indices `new_range`. -pub fn diff( - alg: Algorithm, - d: &mut D, - old: &Old, - old_range: Range, - new: &New, - new_range: Range, -) -> Result<(), D::Error> -where - Old: Index + ?Sized, - New: Index + ?Sized, - D: DiffHook, - Old::Output: Hash + Eq + Ord, - New::Output: PartialEq + Hash + Eq + Ord, -{ - diff_deadline(alg, d, old, old_range, new, new_range, None) -} - -/// Creates a diff between old and new with the given algorithm with deadline. -/// -/// Diffs `old`, between indices `old_range` and `new` between indices `new_range`. -/// -/// This diff is done with an optional deadline that defines the maximal -/// execution time permitted before it bails and falls back to an approximation. -/// Note that not all algorithms behave well if they reach the deadline (LCS -/// for instance produces a very simplistic diff when the deadline is reached -/// in all cases). -pub fn diff_deadline( - alg: Algorithm, - d: &mut D, - old: &Old, - old_range: Range, - new: &New, - new_range: Range, - deadline: Option, -) -> Result<(), D::Error> -where - Old: Index + ?Sized, - New: Index + ?Sized, - D: DiffHook, - Old::Output: Hash + Eq + Ord, - New::Output: PartialEq + Hash + Eq + Ord, -{ - match alg { - Algorithm::Myers => myers::diff_deadline(d, old, old_range, new, new_range, deadline), - Algorithm::Patience => patience::diff_deadline(d, old, old_range, new, new_range, deadline), - Algorithm::Lcs => lcs::diff_deadline(d, old, old_range, new, new_range, deadline), - } -} - -/// Shortcut for diffing slices with a specific algorithm. -pub fn diff_slices(alg: Algorithm, d: &mut D, old: &[T], new: &[T]) -> Result<(), D::Error> -where - D: DiffHook, - T: Eq + Hash + Ord, -{ - diff(alg, d, old, 0..old.len(), new, 0..new.len()) -} - -/// Shortcut for diffing slices with a specific algorithm. -pub fn diff_slices_deadline( - alg: Algorithm, - d: &mut D, - old: &[T], - new: &[T], - deadline: Option, -) -> Result<(), D::Error> -where - D: DiffHook, - T: Eq + Hash + Ord, -{ - diff_deadline(alg, d, old, 0..old.len(), new, 0..new.len(), deadline) -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/myers.rs s390-tools-2.33.1/rust-vendor/similar/src/algorithms/myers.rs --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/myers.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/myers.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,415 +0,0 @@ -//! Myers' diff algorithm. -//! -//! * time: `O((N+M)D)` -//! * space `O(N+M)` -//! -//! See [the original article by Eugene W. Myers](http://www.xmailserver.org/diff2.pdf) -//! describing it. -//! -//! The implementation of this algorithm is based on the implementation by -//! Brandon Williams. -//! -//! # Heuristics -//! -//! At present this implementation of Myers' does not implement any more advanced -//! heuristics that would solve some pathological cases. For instance passing two -//! large and completely distinct sequences to the algorithm will make it spin -//! without making reasonable progress. Currently the only protection in the -//! library against this is to pass a deadline to the diffing algorithm. -//! -//! For potential improvements here see [similar#15](https://github.com/mitsuhiko/similar/issues/15). - -use std::ops::{Index, IndexMut, Range}; -use std::time::Instant; - -use crate::algorithms::utils::{common_prefix_len, common_suffix_len, is_empty_range}; -use crate::algorithms::DiffHook; - -/// Myers' diff algorithm. -/// -/// Diff `old`, between indices `old_range` and `new` between indices `new_range`. -pub fn diff( - d: &mut D, - old: &Old, - old_range: Range, - new: &New, - new_range: Range, -) -> Result<(), D::Error> -where - Old: Index + ?Sized, - New: Index + ?Sized, - D: DiffHook, - New::Output: PartialEq, -{ - diff_deadline(d, old, old_range, new, new_range, None) -} - -/// Myers' diff algorithm with deadline. -/// -/// Diff `old`, between indices `old_range` and `new` between indices `new_range`. -/// -/// This diff is done with an optional deadline that defines the maximal -/// execution time permitted before it bails and falls back to an approximation. -pub fn diff_deadline( - d: &mut D, - old: &Old, - old_range: Range, - new: &New, - new_range: Range, - deadline: Option, -) -> Result<(), D::Error> -where - Old: Index + ?Sized, - New: Index + ?Sized, - D: DiffHook, - New::Output: PartialEq, -{ - let max_d = max_d(old_range.len(), new_range.len()); - let mut vb = V::new(max_d); - let mut vf = V::new(max_d); - conquer( - d, old, old_range, new, new_range, &mut vf, &mut vb, deadline, - )?; - d.finish() -} - -// A D-path is a path which starts at (0,0) that has exactly D non-diagonal -// edges. All D-paths consist of a (D - 1)-path followed by a non-diagonal edge -// and then a possibly empty sequence of diagonal edges called a snake. - -/// `V` contains the endpoints of the furthest reaching `D-paths`. For each -/// recorded endpoint `(x,y)` in diagonal `k`, we only need to retain `x` because -/// `y` can be computed from `x - k`. In other words, `V` is an array of integers -/// where `V[k]` contains the row index of the endpoint of the furthest reaching -/// path in diagonal `k`. -/// -/// We can't use a traditional Vec to represent `V` since we use `k` as an index -/// and it can take on negative values. So instead `V` is represented as a -/// light-weight wrapper around a Vec plus an `offset` which is the maximum value -/// `k` can take on in order to map negative `k`'s back to a value >= 0. -#[derive(Debug)] -struct V { - offset: isize, - v: Vec, // Look into initializing this to -1 and storing isize -} - -impl V { - fn new(max_d: usize) -> Self { - Self { - offset: max_d as isize, - v: vec![0; 2 * max_d], - } - } - - fn len(&self) -> usize { - self.v.len() - } -} - -impl Index for V { - type Output = usize; - - fn index(&self, index: isize) -> &Self::Output { - &self.v[(index + self.offset) as usize] - } -} - -impl IndexMut for V { - fn index_mut(&mut self, index: isize) -> &mut Self::Output { - &mut self.v[(index + self.offset) as usize] - } -} - -fn max_d(len1: usize, len2: usize) -> usize { - // XXX look into reducing the need to have the additional '+ 1' - (len1 + len2 + 1) / 2 + 1 -} - -#[inline(always)] -fn split_at(range: Range, at: usize) -> (Range, Range) { - (range.start..at, at..range.end) -} - -/// A `Snake` is a sequence of diagonal edges in the edit graph. Normally -/// a snake has a start end end point (and it is possible for a snake to have -/// a length of zero, meaning the start and end points are the same) however -/// we do not need the end point which is why it's not implemented here. -/// -/// The divide part of a divide-and-conquer strategy. A D-path has D+1 snakes -/// some of which may be empty. The divide step requires finding the ceil(D/2) + -/// 1 or middle snake of an optimal D-path. The idea for doing so is to -/// simultaneously run the basic algorithm in both the forward and reverse -/// directions until furthest reaching forward and reverse paths starting at -/// opposing corners 'overlap'. -fn find_middle_snake( - old: &Old, - old_range: Range, - new: &New, - new_range: Range, - vf: &mut V, - vb: &mut V, - deadline: Option, -) -> Option<(usize, usize)> -where - Old: Index + ?Sized, - New: Index + ?Sized, - New::Output: PartialEq, -{ - let n = old_range.len(); - let m = new_range.len(); - - // By Lemma 1 in the paper, the optimal edit script length is odd or even as - // `delta` is odd or even. - let delta = n as isize - m as isize; - let odd = delta & 1 == 1; - - // The initial point at (0, -1) - vf[1] = 0; - // The initial point at (N, M+1) - vb[1] = 0; - - // We only need to explore ceil(D/2) + 1 - let d_max = max_d(n, m); - assert!(vf.len() >= d_max); - assert!(vb.len() >= d_max); - - for d in 0..d_max as isize { - // are we running for too long? - if let Some(deadline) = deadline { - if Instant::now() > deadline { - break; - } - } - - // Forward path - for k in (-d..=d).rev().step_by(2) { - let mut x = if k == -d || (k != d && vf[k - 1] < vf[k + 1]) { - vf[k + 1] - } else { - vf[k - 1] + 1 - }; - let y = (x as isize - k) as usize; - - // The coordinate of the start of a snake - let (x0, y0) = (x, y); - // While these sequences are identical, keep moving through the - // graph with no cost - if x < old_range.len() && y < new_range.len() { - let advance = common_prefix_len( - old, - old_range.start + x..old_range.end, - new, - new_range.start + y..new_range.end, - ); - x += advance; - } - - // This is the new best x value - vf[k] = x; - - // Only check for connections from the forward search when N - M is - // odd and when there is a reciprocal k line coming from the other - // direction. - if odd && (k - delta).abs() <= (d - 1) { - // TODO optimize this so we don't have to compare against n - if vf[k] + vb[-(k - delta)] >= n { - // Return the snake - return Some((x0 + old_range.start, y0 + new_range.start)); - } - } - } - - // Backward path - for k in (-d..=d).rev().step_by(2) { - let mut x = if k == -d || (k != d && vb[k - 1] < vb[k + 1]) { - vb[k + 1] - } else { - vb[k - 1] + 1 - }; - let mut y = (x as isize - k) as usize; - - // The coordinate of the start of a snake - if x < n && y < m { - let advance = common_suffix_len( - old, - old_range.start..old_range.start + n - x, - new, - new_range.start..new_range.start + m - y, - ); - x += advance; - y += advance; - } - - // This is the new best x value - vb[k] = x; - - if !odd && (k - delta).abs() <= d { - // TODO optimize this so we don't have to compare against n - if vb[k] + vf[-(k - delta)] >= n { - // Return the snake - return Some((n - x + old_range.start, m - y + new_range.start)); - } - } - } - - // TODO: Maybe there's an opportunity to optimize and bail early? - } - - // deadline reached - None -} - -#[allow(clippy::too_many_arguments)] -fn conquer( - d: &mut D, - old: &Old, - mut old_range: Range, - new: &New, - mut new_range: Range, - vf: &mut V, - vb: &mut V, - deadline: Option, -) -> Result<(), D::Error> -where - Old: Index + ?Sized, - New: Index + ?Sized, - D: DiffHook, - New::Output: PartialEq, -{ - // Check for common prefix - let common_prefix_len = common_prefix_len(old, old_range.clone(), new, new_range.clone()); - if common_prefix_len > 0 { - d.equal(old_range.start, new_range.start, common_prefix_len)?; - } - old_range.start += common_prefix_len; - new_range.start += common_prefix_len; - - // Check for common suffix - let common_suffix_len = common_suffix_len(old, old_range.clone(), new, new_range.clone()); - let common_suffix = ( - old_range.end - common_suffix_len, - new_range.end - common_suffix_len, - ); - old_range.end -= common_suffix_len; - new_range.end -= common_suffix_len; - - if is_empty_range(&old_range) && is_empty_range(&new_range) { - // Do nothing - } else if is_empty_range(&new_range) { - d.delete(old_range.start, old_range.len(), new_range.start)?; - } else if is_empty_range(&old_range) { - d.insert(old_range.start, new_range.start, new_range.len())?; - } else if let Some((x_start, y_start)) = find_middle_snake( - old, - old_range.clone(), - new, - new_range.clone(), - vf, - vb, - deadline, - ) { - let (old_a, old_b) = split_at(old_range, x_start); - let (new_a, new_b) = split_at(new_range, y_start); - conquer(d, old, old_a, new, new_a, vf, vb, deadline)?; - conquer(d, old, old_b, new, new_b, vf, vb, deadline)?; - } else { - d.delete( - old_range.start, - old_range.end - old_range.start, - new_range.start, - )?; - d.insert( - old_range.start, - new_range.start, - new_range.end - new_range.start, - )?; - } - - if common_suffix_len > 0 { - d.equal(common_suffix.0, common_suffix.1, common_suffix_len)?; - } - - Ok(()) -} - -#[test] -fn test_find_middle_snake() { - let a = &b"ABCABBA"[..]; - let b = &b"CBABAC"[..]; - let max_d = max_d(a.len(), b.len()); - let mut vf = V::new(max_d); - let mut vb = V::new(max_d); - let (x_start, y_start) = - find_middle_snake(a, 0..a.len(), b, 0..b.len(), &mut vf, &mut vb, None).unwrap(); - assert_eq!(x_start, 4); - assert_eq!(y_start, 1); -} - -#[test] -fn test_diff() { - let a: &[usize] = &[0, 1, 2, 3, 4]; - let b: &[usize] = &[0, 1, 2, 9, 4]; - - let mut d = crate::algorithms::Replace::new(crate::algorithms::Capture::new()); - diff(&mut d, a, 0..a.len(), b, 0..b.len()).unwrap(); - insta::assert_debug_snapshot!(d.into_inner().ops()); -} - -#[test] -fn test_contiguous() { - let a: &[usize] = &[0, 1, 2, 3, 4, 4, 4, 5]; - let b: &[usize] = &[0, 1, 2, 8, 9, 4, 4, 7]; - - let mut d = crate::algorithms::Replace::new(crate::algorithms::Capture::new()); - diff(&mut d, a, 0..a.len(), b, 0..b.len()).unwrap(); - insta::assert_debug_snapshot!(d.into_inner().ops()); -} - -#[test] -fn test_pat() { - let a: &[usize] = &[0, 1, 3, 4, 5]; - let b: &[usize] = &[0, 1, 4, 5, 8, 9]; - - let mut d = crate::algorithms::Capture::new(); - diff(&mut d, a, 0..a.len(), b, 0..b.len()).unwrap(); - insta::assert_debug_snapshot!(d.ops()); -} - -#[test] -fn test_deadline_reached() { - use std::ops::Index; - use std::time::Duration; - - let a = (0..100).collect::>(); - let mut b = (0..100).collect::>(); - b[10] = 99; - b[50] = 99; - b[25] = 99; - - struct SlowIndex<'a>(&'a [usize]); - - impl<'a> Index for SlowIndex<'a> { - type Output = usize; - - fn index(&self, index: usize) -> &Self::Output { - std::thread::sleep(Duration::from_millis(1)); - &self.0[index] - } - } - - let slow_a = SlowIndex(&a); - let slow_b = SlowIndex(&b); - - // don't give it enough time to do anything interesting - let mut d = crate::algorithms::Replace::new(crate::algorithms::Capture::new()); - diff_deadline( - &mut d, - &slow_a, - 0..a.len(), - &slow_b, - 0..b.len(), - Some(Instant::now() + Duration::from_millis(50)), - ) - .unwrap(); - insta::assert_debug_snapshot!(d.into_inner().ops()); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/patience.rs s390-tools-2.33.1/rust-vendor/similar/src/algorithms/patience.rs --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/patience.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/patience.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,169 +0,0 @@ -//! Patience diff algorithm. -//! -//! * time: `O(N log N + M log M + (N+M)D)` -//! * space: `O(N+M)` -//! -//! Tends to give more human-readable outputs. See [Bram Cohen's blog -//! post](https://bramcohen.livejournal.com/73318.html) describing it. -//! -//! This is based on the patience implementation of [pijul](https://pijul.org/) -//! by Pierre-Étienne Meunier. -use std::hash::Hash; -use std::ops::{Index, Range}; -use std::time::Instant; - -use crate::algorithms::{myers, DiffHook, NoFinishHook, Replace}; - -use super::utils::{unique, UniqueItem}; - -/// Patience diff algorithm. -/// -/// Diff `old`, between indices `old_range` and `new` between indices `new_range`. -pub fn diff( - d: &mut D, - old: &Old, - old_range: Range, - new: &New, - new_range: Range, -) -> Result<(), D::Error> -where - Old: Index + ?Sized, - New: Index + ?Sized, - Old::Output: Hash + Eq, - New::Output: PartialEq + Hash + Eq, - D: DiffHook, -{ - diff_deadline(d, old, old_range, new, new_range, None) -} - -/// Patience diff algorithm with deadline. -/// -/// Diff `old`, between indices `old_range` and `new` between indices `new_range`. -/// -/// This diff is done with an optional deadline that defines the maximal -/// execution time permitted before it bails and falls back to an approximation. -pub fn diff_deadline( - d: &mut D, - old: &Old, - old_range: Range, - new: &New, - new_range: Range, - deadline: Option, -) -> Result<(), D::Error> -where - Old: Index + ?Sized, - New: Index + ?Sized, - Old::Output: Hash + Eq, - New::Output: PartialEq + Hash + Eq, - D: DiffHook, -{ - let old_indexes = unique(old, old_range.clone()); - let new_indexes = unique(new, new_range.clone()); - - let mut d = Replace::new(Patience { - d, - old, - old_current: old_range.start, - old_end: old_range.end, - old_indexes: &old_indexes, - new, - new_current: new_range.start, - new_end: new_range.end, - new_indexes: &new_indexes, - deadline, - }); - myers::diff_deadline( - &mut d, - &old_indexes, - 0..old_indexes.len(), - &new_indexes, - 0..new_indexes.len(), - deadline, - )?; - Ok(()) -} - -struct Patience<'old, 'new, 'd, Old: ?Sized, New: ?Sized, D> { - d: &'d mut D, - old: &'old Old, - old_current: usize, - old_end: usize, - old_indexes: &'old [UniqueItem<'old, Old>], - new: &'new New, - new_current: usize, - new_end: usize, - new_indexes: &'new [UniqueItem<'new, New>], - deadline: Option, -} - -impl<'old, 'new, 'd, Old, New, D> DiffHook for Patience<'old, 'new, 'd, Old, New, D> -where - D: DiffHook + 'd, - Old: Index + ?Sized + 'old, - New: Index + ?Sized + 'new, - New::Output: PartialEq, -{ - type Error = D::Error; - fn equal(&mut self, old: usize, new: usize, len: usize) -> Result<(), D::Error> { - for (old, new) in (old..old + len).zip(new..new + len) { - let a0 = self.old_current; - let b0 = self.new_current; - while self.old_current < self.old_indexes[old].original_index() - && self.new_current < self.new_indexes[new].original_index() - && self.new[self.new_current] == self.old[self.old_current] - { - self.old_current += 1; - self.new_current += 1; - } - if self.old_current > a0 { - self.d.equal(a0, b0, self.old_current - a0)?; - } - let mut no_finish_d = NoFinishHook::new(&mut self.d); - myers::diff_deadline( - &mut no_finish_d, - self.old, - self.old_current..self.old_indexes[old].original_index(), - self.new, - self.new_current..self.new_indexes[new].original_index(), - self.deadline, - )?; - self.old_current = self.old_indexes[old].original_index(); - self.new_current = self.new_indexes[new].original_index(); - } - Ok(()) - } - - fn finish(&mut self) -> Result<(), D::Error> { - myers::diff_deadline( - self.d, - self.old, - self.old_current..self.old_end, - self.new, - self.new_current..self.new_end, - self.deadline, - ) - } -} - -#[test] -fn test_patience() { - let a: &[usize] = &[11, 1, 2, 2, 3, 4, 4, 4, 5, 47, 19]; - let b: &[usize] = &[10, 1, 2, 2, 8, 9, 4, 4, 7, 47, 18]; - - let mut d = Replace::new(crate::algorithms::Capture::new()); - diff(&mut d, a, 0..a.len(), b, 0..b.len()).unwrap(); - - insta::assert_debug_snapshot!(d.into_inner().ops()); -} - -#[test] -fn test_patience_out_of_bounds_bug() { - // this used to be a bug - let a: &[usize] = &[1, 2, 3, 4]; - let b: &[usize] = &[1, 2, 3]; - - let mut d = Replace::new(crate::algorithms::Capture::new()); - diff(&mut d, a, 0..a.len(), b, 0..b.len()).unwrap(); - - insta::assert_debug_snapshot!(d.into_inner().ops()); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/replace.rs s390-tools-2.33.1/rust-vendor/similar/src/algorithms/replace.rs --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/replace.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/replace.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,221 +0,0 @@ -use crate::algorithms::DiffHook; - -/// A [`DiffHook`] that combines deletions and insertions to give blocks -/// of maximal length, and replacements when appropriate. -/// -/// It will replace [`DiffHook::insert`] and [`DiffHook::delete`] events when -/// possible with [`DiffHook::replace`] events. Note that even though the -/// text processing in the crate does not use replace events and always resolves -/// then back to delete and insert, it's useful to always use the replacer to -/// ensure a consistent order of inserts and deletes. This is why for instance -/// the text diffing automatically uses this hook internally. -pub struct Replace { - d: D, - del: Option<(usize, usize, usize)>, - ins: Option<(usize, usize, usize)>, - eq: Option<(usize, usize, usize)>, -} - -impl Replace { - /// Creates a new replace hook wrapping another hook. - pub fn new(d: D) -> Self { - Replace { - d, - del: None, - ins: None, - eq: None, - } - } - - /// Extracts the inner hook. - pub fn into_inner(self) -> D { - self.d - } - - fn flush_eq(&mut self) -> Result<(), D::Error> { - if let Some((eq_old_index, eq_new_index, eq_len)) = self.eq.take() { - self.d.equal(eq_old_index, eq_new_index, eq_len)? - } - Ok(()) - } - - fn flush_del_ins(&mut self) -> Result<(), D::Error> { - if let Some((del_old_index, del_old_len, del_new_index)) = self.del.take() { - if let Some((_, ins_new_index, ins_new_len)) = self.ins.take() { - self.d - .replace(del_old_index, del_old_len, ins_new_index, ins_new_len)?; - } else { - self.d.delete(del_old_index, del_old_len, del_new_index)?; - } - } else if let Some((ins_old_index, ins_new_index, ins_new_len)) = self.ins.take() { - self.d.insert(ins_old_index, ins_new_index, ins_new_len)?; - } - Ok(()) - } -} - -impl AsRef for Replace { - fn as_ref(&self) -> &D { - &self.d - } -} - -impl AsMut for Replace { - fn as_mut(&mut self) -> &mut D { - &mut self.d - } -} - -impl DiffHook for Replace { - type Error = D::Error; - - fn equal(&mut self, old_index: usize, new_index: usize, len: usize) -> Result<(), D::Error> { - self.flush_del_ins()?; - - self.eq = if let Some((eq_old_index, eq_new_index, eq_len)) = self.eq.take() { - Some((eq_old_index, eq_new_index, eq_len + len)) - } else { - Some((old_index, new_index, len)) - }; - - Ok(()) - } - - fn delete( - &mut self, - old_index: usize, - old_len: usize, - new_index: usize, - ) -> Result<(), D::Error> { - self.flush_eq()?; - if let Some((del_old_index, del_old_len, del_new_index)) = self.del.take() { - debug_assert_eq!(old_index, del_old_index + del_old_len); - self.del = Some((del_old_index, del_old_len + old_len, del_new_index)); - } else { - self.del = Some((old_index, old_len, new_index)); - } - Ok(()) - } - - fn insert( - &mut self, - old_index: usize, - new_index: usize, - new_len: usize, - ) -> Result<(), D::Error> { - self.flush_eq()?; - self.ins = if let Some((ins_old_index, ins_new_index, ins_new_len)) = self.ins.take() { - debug_assert_eq!(ins_new_index + ins_new_len, new_index); - Some((ins_old_index, ins_new_index, new_len + ins_new_len)) - } else { - Some((old_index, new_index, new_len)) - }; - - Ok(()) - } - - fn replace( - &mut self, - old_index: usize, - old_len: usize, - new_index: usize, - new_len: usize, - ) -> Result<(), D::Error> { - self.flush_eq()?; - self.d.replace(old_index, old_len, new_index, new_len) - } - - fn finish(&mut self) -> Result<(), D::Error> { - self.flush_eq()?; - self.flush_del_ins()?; - self.d.finish() - } -} - -#[test] -fn test_mayers_replace() { - use crate::algorithms::{diff_slices, Algorithm}; - let a: &[&str] = &[ - ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n", - "a\n", - "b\n", - "c\n", - "================================\n", - "d\n", - "e\n", - "f\n", - "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n", - ]; - let b: &[&str] = &[ - ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n", - "x\n", - "b\n", - "c\n", - "================================\n", - "y\n", - "e\n", - "f\n", - "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n", - ]; - - let mut d = Replace::new(crate::algorithms::Capture::new()); - diff_slices(Algorithm::Myers, &mut d, a, b).unwrap(); - - insta::assert_debug_snapshot!(&d.into_inner().ops(), @r###" - [ - Equal { - old_index: 0, - new_index: 0, - len: 1, - }, - Replace { - old_index: 1, - old_len: 1, - new_index: 1, - new_len: 1, - }, - Equal { - old_index: 2, - new_index: 2, - len: 3, - }, - Replace { - old_index: 5, - old_len: 1, - new_index: 5, - new_len: 1, - }, - Equal { - old_index: 6, - new_index: 6, - len: 3, - }, - ] - "###); -} - -#[test] -fn test_replace() { - use crate::algorithms::{diff_slices, Algorithm}; - - let a: &[usize] = &[0, 1, 2, 3, 4]; - let b: &[usize] = &[0, 1, 2, 7, 8, 9]; - - let mut d = Replace::new(crate::algorithms::Capture::new()); - diff_slices(Algorithm::Myers, &mut d, a, b).unwrap(); - insta::assert_debug_snapshot!(d.into_inner().ops(), @r###" - [ - Equal { - old_index: 0, - new_index: 0, - len: 3, - }, - Replace { - old_index: 3, - old_len: 2, - new_index: 3, - new_len: 3, - }, - ] - "###); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__capture__capture_hook_grouping-2.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__capture__capture_hook_grouping-2.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__capture__capture_hook_grouping-2.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__capture__capture_hook_grouping-2.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ ---- -source: src/algorithms/capture.rs -expression: tags ---- -[ - [ - ( - Equal, - 7..10, - 7..10, - ), - ( - Replace, - 10..11, - 10..11, - ), - ( - Equal, - 11..13, - 11..13, - ), - ( - Replace, - 13..14, - 13..14, - ), - ( - Equal, - 14..16, - 14..16, - ), - ( - Replace, - 16..17, - 16..17, - ), - ( - Equal, - 17..20, - 17..20, - ), - ], - [ - ( - Equal, - 31..34, - 31..34, - ), - ( - Replace, - 34..35, - 34..35, - ), - ( - Equal, - 35..38, - 35..38, - ), - ], -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__capture__capture_hook_grouping.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__capture__capture_hook_grouping.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__capture__capture_hook_grouping.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__capture__capture_hook_grouping.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,64 +0,0 @@ ---- -source: src/algorithms/capture.rs -expression: ops ---- -[ - [ - Equal { - old_index: 7, - new_index: 7, - len: 3, - }, - Replace { - old_index: 10, - old_len: 1, - new_index: 10, - new_len: 1, - }, - Equal { - old_index: 11, - new_index: 11, - len: 2, - }, - Replace { - old_index: 13, - old_len: 1, - new_index: 13, - new_len: 1, - }, - Equal { - old_index: 14, - new_index: 14, - len: 2, - }, - Replace { - old_index: 16, - old_len: 1, - new_index: 16, - new_len: 1, - }, - Equal { - old_index: 17, - new_index: 17, - len: 3, - }, - ], - [ - Equal { - old_index: 31, - new_index: 31, - len: 3, - }, - Replace { - old_index: 34, - old_len: 1, - new_index: 34, - new_len: 1, - }, - Equal { - old_index: 35, - new_index: 35, - len: 3, - }, - ], -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__contiguous.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__contiguous.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__contiguous.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__contiguous.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,28 +0,0 @@ ---- -source: src/algorithms/lcs.rs -expression: d.into_inner().ops() ---- -[ - Equal { - old_index: 0, - new_index: 0, - len: 3, - }, - Replace { - old_index: 3, - old_len: 2, - new_index: 3, - new_len: 2, - }, - Equal { - old_index: 5, - new_index: 5, - len: 2, - }, - Replace { - old_index: 7, - old_len: 1, - new_index: 7, - new_len: 1, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__diff.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__diff.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__diff.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__diff.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ ---- -source: src/algorithms/lcs.rs -expression: d.into_inner().ops() ---- -[ - Equal { - old_index: 0, - new_index: 0, - len: 3, - }, - Replace { - old_index: 3, - old_len: 1, - new_index: 3, - new_len: 1, - }, - Equal { - old_index: 4, - new_index: 4, - len: 1, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__pat.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__pat.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__pat.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__pat.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ ---- -source: src/algorithms/lcs.rs -expression: d.ops() ---- -[ - Equal { - old_index: 0, - new_index: 0, - len: 2, - }, - Delete { - old_index: 2, - old_len: 1, - new_index: 2, - }, - Equal { - old_index: 3, - new_index: 2, - len: 1, - }, - Equal { - old_index: 4, - new_index: 3, - len: 1, - }, - Insert { - old_index: 5, - new_index: 4, - new_len: 2, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__same.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__same.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__same.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__lcs__same.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ ---- -source: src/algorithms/lcs.rs -assertion_line: 235 -expression: d.ops() ---- -[ - Equal { - old_index: 0, - new_index: 0, - len: 8, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__contiguous.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__contiguous.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__contiguous.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__contiguous.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,28 +0,0 @@ ---- -source: src/algorithms/myers.rs -expression: d.into_inner().ops() ---- -[ - Equal { - old_index: 0, - new_index: 0, - len: 3, - }, - Replace { - old_index: 3, - old_len: 1, - new_index: 3, - new_len: 2, - }, - Equal { - old_index: 4, - new_index: 5, - len: 2, - }, - Replace { - old_index: 6, - old_len: 2, - new_index: 7, - new_len: 1, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__deadline_reached.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__deadline_reached.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__deadline_reached.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__deadline_reached.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ ---- -source: src/algorithms/myers.rs -expression: d.into_inner().ops() ---- -[ - Equal { - old_index: 0, - new_index: 0, - len: 10, - }, - Replace { - old_index: 10, - old_len: 41, - new_index: 10, - new_len: 41, - }, - Equal { - old_index: 51, - new_index: 51, - len: 49, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__diff.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__diff.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__diff.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__diff.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ ---- -source: src/algorithms/myers.rs -expression: d.into_inner().ops() ---- -[ - Equal { - old_index: 0, - new_index: 0, - len: 3, - }, - Replace { - old_index: 3, - old_len: 1, - new_index: 3, - new_len: 1, - }, - Equal { - old_index: 4, - new_index: 4, - len: 1, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__pat.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__pat.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__pat.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__myers__pat.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ ---- -source: src/algorithms/myers.rs -expression: d.ops() ---- -[ - Equal { - old_index: 0, - new_index: 0, - len: 2, - }, - Delete { - old_index: 2, - old_len: 1, - new_index: 2, - }, - Equal { - old_index: 3, - new_index: 2, - len: 2, - }, - Insert { - old_index: 5, - new_index: 4, - new_len: 1, - }, - Insert { - old_index: 5, - new_index: 5, - new_len: 1, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__patience__patience_out_of_bounds_bug.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__patience__patience_out_of_bounds_bug.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__patience__patience_out_of_bounds_bug.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__patience__patience_out_of_bounds_bug.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,16 +0,0 @@ ---- -source: src/algorithms/patience.rs -expression: d.into_inner().ops() ---- -[ - Equal { - old_index: 0, - new_index: 0, - len: 3, - }, - Delete { - old_index: 3, - old_len: 1, - new_index: 3, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__patience__patience.snap s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__patience__patience.snap --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__patience__patience.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/snapshots/similar__algorithms__patience__patience.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,45 +0,0 @@ ---- -source: src/algorithms/patience.rs -expression: d.into_inner().ops() ---- -[ - Replace { - old_index: 0, - old_len: 1, - new_index: 0, - new_len: 1, - }, - Equal { - old_index: 1, - new_index: 1, - len: 3, - }, - Replace { - old_index: 4, - old_len: 1, - new_index: 4, - new_len: 2, - }, - Equal { - old_index: 5, - new_index: 6, - len: 2, - }, - Replace { - old_index: 7, - old_len: 2, - new_index: 8, - new_len: 1, - }, - Equal { - old_index: 9, - new_index: 9, - len: 1, - }, - Replace { - old_index: 10, - old_len: 1, - new_index: 10, - new_len: 1, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/algorithms/utils.rs s390-tools-2.33.1/rust-vendor/similar/src/algorithms/utils.rs --- s390-tools-2.31.0/rust-vendor/similar/src/algorithms/utils.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/algorithms/utils.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,379 +0,0 @@ -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::fmt::Debug; -use std::hash::{Hash, Hasher}; -use std::ops::{Add, Index, Range}; - -/// Utility function to check if a range is empty that works on older rust versions -#[inline(always)] -#[allow(clippy::neg_cmp_op_on_partial_ord)] -pub fn is_empty_range>(range: &Range) -> bool { - !(range.start < range.end) -} - -/// Represents an item in the vector returned by [`unique`]. -/// -/// It compares like the underlying item does it was created from but -/// carries the index it was originally created from. -pub struct UniqueItem<'a, Idx: ?Sized> { - lookup: &'a Idx, - index: usize, -} - -impl<'a, Idx: ?Sized> UniqueItem<'a, Idx> -where - Idx: Index, -{ - /// Returns the value. - #[inline(always)] - pub fn value(&self) -> &Idx::Output { - &self.lookup[self.index] - } - - /// Returns the original index. - #[inline(always)] - pub fn original_index(&self) -> usize { - self.index - } -} - -impl<'a, Idx: Index + 'a> Debug for UniqueItem<'a, Idx> -where - Idx::Output: Debug, -{ - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - f.debug_struct("UniqueItem") - .field("value", &self.value()) - .field("original_index", &self.original_index()) - .finish() - } -} - -impl<'a, 'b, A, B> PartialEq> for UniqueItem<'b, B> -where - A: Index + 'b + ?Sized, - B: Index + 'b + ?Sized, - B::Output: PartialEq, -{ - #[inline(always)] - fn eq(&self, other: &UniqueItem<'a, A>) -> bool { - self.value() == other.value() - } -} - -/// Returns only unique items in the sequence as vector. -/// -/// Each item is wrapped in a [`UniqueItem`] so that both the value and the -/// index can be extracted. -pub fn unique(lookup: &Idx, range: Range) -> Vec> -where - Idx: Index + ?Sized, - Idx::Output: Hash + Eq, -{ - let mut by_item = HashMap::new(); - for index in range { - match by_item.entry(&lookup[index]) { - Entry::Vacant(entry) => { - entry.insert(Some(index)); - } - Entry::Occupied(mut entry) => { - let entry = entry.get_mut(); - if entry.is_some() { - *entry = None - } - } - } - } - let mut rv = by_item - .into_iter() - .filter_map(|(_, x)| x) - .map(|index| UniqueItem { lookup, index }) - .collect::>(); - rv.sort_by_key(|a| a.original_index()); - rv -} - -/// Given two lookups and ranges calculates the length of the common prefix. -pub fn common_prefix_len( - old: &Old, - old_range: Range, - new: &New, - new_range: Range, -) -> usize -where - Old: Index + ?Sized, - New: Index + ?Sized, - New::Output: PartialEq, -{ - if is_empty_range(&old_range) || is_empty_range(&new_range) { - return 0; - } - new_range - .zip(old_range) - .take_while( - #[inline(always)] - |x| new[x.0] == old[x.1], - ) - .count() -} - -/// Given two lookups and ranges calculates the length of common suffix. -pub fn common_suffix_len( - old: &Old, - old_range: Range, - new: &New, - new_range: Range, -) -> usize -where - Old: Index + ?Sized, - New: Index + ?Sized, - New::Output: PartialEq, -{ - if is_empty_range(&old_range) || is_empty_range(&new_range) { - return 0; - } - new_range - .rev() - .zip(old_range.rev()) - .take_while( - #[inline(always)] - |x| new[x.0] == old[x.1], - ) - .count() -} - -struct OffsetLookup { - offset: usize, - vec: Vec, -} - -impl Index for OffsetLookup { - type Output = Int; - - #[inline(always)] - fn index(&self, index: usize) -> &Self::Output { - &self.vec[index - self.offset] - } -} - -/// A utility struct to convert distinct items to unique integers. -/// -/// This can be helpful on larger inputs to speed up the comparisons -/// performed by doing a first pass where the data set gets reduced -/// to (small) integers. -/// -/// The idea is that instead of passing two sequences to a diffling algorithm -/// you first pass it via [`IdentifyDistinct`]: -/// -/// ```rust -/// use similar::capture_diff; -/// use similar::algorithms::{Algorithm, IdentifyDistinct}; -/// -/// let old = &["foo", "bar", "baz"][..]; -/// let new = &["foo", "blah", "baz"][..]; -/// let h = IdentifyDistinct::::new(old, 0..old.len(), new, 0..new.len()); -/// let ops = capture_diff( -/// Algorithm::Myers, -/// h.old_lookup(), -/// h.old_range(), -/// h.new_lookup(), -/// h.new_range(), -/// ); -/// ``` -/// -/// The indexes are the same as with the passed source ranges. -pub struct IdentifyDistinct { - old: OffsetLookup, - new: OffsetLookup, -} - -impl IdentifyDistinct -where - Int: Add + From + Default + Copy, -{ - /// Creates an int hasher for two sequences. - pub fn new( - old: &Old, - old_range: Range, - new: &New, - new_range: Range, - ) -> Self - where - Old: Index + ?Sized, - Old::Output: Eq + Hash, - New: Index + ?Sized, - New::Output: Eq + Hash + PartialEq, - { - enum Key<'old, 'new, Old: ?Sized, New: ?Sized> { - Old(&'old Old), - New(&'new New), - } - - impl<'old, 'new, Old, New> Hash for Key<'old, 'new, Old, New> - where - Old: Hash + ?Sized, - New: Hash + ?Sized, - { - fn hash(&self, state: &mut H) { - match *self { - Key::Old(val) => val.hash(state), - Key::New(val) => val.hash(state), - } - } - } - - impl<'old, 'new, Old, New> PartialEq for Key<'old, 'new, Old, New> - where - Old: Eq + ?Sized, - New: Eq + PartialEq + ?Sized, - { - #[inline(always)] - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Key::Old(a), Key::Old(b)) => a == b, - (Key::New(a), Key::New(b)) => a == b, - (Key::Old(a), Key::New(b)) | (Key::New(b), Key::Old(a)) => b == a, - } - } - } - - impl<'old, 'new, Old, New> Eq for Key<'old, 'new, Old, New> - where - Old: Eq + ?Sized, - New: Eq + PartialEq + ?Sized, - { - } - - let mut map = HashMap::new(); - let mut old_seq = Vec::new(); - let mut new_seq = Vec::new(); - let mut next_id = Int::default(); - let step = Int::from(1); - let old_start = old_range.start; - let new_start = new_range.start; - - for idx in old_range { - let item = Key::Old(&old[idx]); - let id = match map.entry(item) { - Entry::Occupied(o) => *o.get(), - Entry::Vacant(v) => { - let id = next_id; - next_id = next_id + step; - *v.insert(id) - } - }; - old_seq.push(id); - } - - for idx in new_range { - let item = Key::New(&new[idx]); - let id = match map.entry(item) { - Entry::Occupied(o) => *o.get(), - Entry::Vacant(v) => { - let id = next_id; - next_id = next_id + step; - *v.insert(id) - } - }; - new_seq.push(id); - } - - IdentifyDistinct { - old: OffsetLookup { - offset: old_start, - vec: old_seq, - }, - new: OffsetLookup { - offset: new_start, - vec: new_seq, - }, - } - } - - /// Returns a lookup for the old side. - pub fn old_lookup(&self) -> &impl Index { - &self.old - } - - /// Returns a lookup for the new side. - pub fn new_lookup(&self) -> &impl Index { - &self.new - } - - /// Convenience method to get back the old range. - pub fn old_range(&self) -> Range { - self.old.offset..self.old.offset + self.old.vec.len() - } - - /// Convenience method to get back the new range. - pub fn new_range(&self) -> Range { - self.new.offset..self.new.offset + self.new.vec.len() - } -} - -#[test] -fn test_unique() { - let u = unique(&vec!['a', 'b', 'c', 'd', 'd', 'b'], 0..6) - .into_iter() - .map(|x| (*x.value(), x.original_index())) - .collect::>(); - assert_eq!(u, vec![('a', 0), ('c', 2)]); -} - -#[test] -fn test_int_hasher() { - let ih = IdentifyDistinct::::new( - &["", "foo", "bar", "baz"][..], - 1..4, - &["", "foo", "blah", "baz"][..], - 1..4, - ); - assert_eq!(ih.old_lookup()[1], 0); - assert_eq!(ih.old_lookup()[2], 1); - assert_eq!(ih.old_lookup()[3], 2); - assert_eq!(ih.new_lookup()[1], 0); - assert_eq!(ih.new_lookup()[2], 3); - assert_eq!(ih.new_lookup()[3], 2); - assert_eq!(ih.old_range(), 1..4); - assert_eq!(ih.new_range(), 1..4); -} - -#[test] -fn test_common_prefix_len() { - assert_eq!( - common_prefix_len("".as_bytes(), 0..0, "".as_bytes(), 0..0), - 0 - ); - assert_eq!( - common_prefix_len("foobarbaz".as_bytes(), 0..9, "foobarblah".as_bytes(), 0..10), - 7 - ); - assert_eq!( - common_prefix_len("foobarbaz".as_bytes(), 0..9, "blablabla".as_bytes(), 0..9), - 0 - ); - assert_eq!( - common_prefix_len("foobarbaz".as_bytes(), 3..9, "foobarblah".as_bytes(), 3..10), - 4 - ); -} - -#[test] -fn test_common_suffix_len() { - assert_eq!( - common_suffix_len("".as_bytes(), 0..0, "".as_bytes(), 0..0), - 0 - ); - assert_eq!( - common_suffix_len("1234".as_bytes(), 0..4, "X0001234".as_bytes(), 0..8), - 4 - ); - assert_eq!( - common_suffix_len("1234".as_bytes(), 0..4, "Xxxx".as_bytes(), 0..4), - 0 - ); - assert_eq!( - common_suffix_len("1234".as_bytes(), 2..4, "01234".as_bytes(), 2..5), - 2 - ); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/common.rs s390-tools-2.33.1/rust-vendor/similar/src/common.rs --- s390-tools-2.31.0/rust-vendor/similar/src/common.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/common.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,185 +0,0 @@ -use std::hash::Hash; -use std::ops::{Index, Range}; -use std::time::Instant; - -use crate::algorithms::{diff_deadline, Capture, Compact, Replace}; -use crate::{Algorithm, DiffOp}; - -/// Creates a diff between old and new with the given algorithm capturing the ops. -/// -/// This is like [`diff`](crate::algorithms::diff) but instead of using an -/// arbitrary hook this will always use [`Compact`] + [`Replace`] + [`Capture`] -/// and return the captured [`DiffOp`]s. -pub fn capture_diff( - alg: Algorithm, - old: &Old, - old_range: Range, - new: &New, - new_range: Range, -) -> Vec -where - Old: Index + ?Sized, - New: Index + ?Sized, - Old::Output: Hash + Eq + Ord, - New::Output: PartialEq + Hash + Eq + Ord, -{ - capture_diff_deadline(alg, old, old_range, new, new_range, None) -} - -/// Creates a diff between old and new with the given algorithm capturing the ops. -/// -/// Works like [`capture_diff`] but with an optional deadline. -pub fn capture_diff_deadline( - alg: Algorithm, - old: &Old, - old_range: Range, - new: &New, - new_range: Range, - deadline: Option, -) -> Vec -where - Old: Index + ?Sized, - New: Index + ?Sized, - Old::Output: Hash + Eq + Ord, - New::Output: PartialEq + Hash + Eq + Ord, -{ - let mut d = Compact::new(Replace::new(Capture::new()), old, new); - diff_deadline(alg, &mut d, old, old_range, new, new_range, deadline).unwrap(); - d.into_inner().into_inner().into_ops() -} - -/// Creates a diff between old and new with the given algorithm capturing the ops. -pub fn capture_diff_slices(alg: Algorithm, old: &[T], new: &[T]) -> Vec -where - T: Eq + Hash + Ord, -{ - capture_diff_slices_deadline(alg, old, new, None) -} - -/// Creates a diff between old and new with the given algorithm capturing the ops. -/// -/// Works like [`capture_diff_slices`] but with an optional deadline. -pub fn capture_diff_slices_deadline( - alg: Algorithm, - old: &[T], - new: &[T], - deadline: Option, -) -> Vec -where - T: Eq + Hash + Ord, -{ - capture_diff_deadline(alg, old, 0..old.len(), new, 0..new.len(), deadline) -} - -/// Return a measure of similarity in the range `0..=1`. -/// -/// A ratio of `1.0` means the two sequences are a complete match, a -/// ratio of `0.0` would indicate completely distinct sequences. The input -/// is the sequence of diff operations and the length of the old and new -/// sequence. -pub fn get_diff_ratio(ops: &[DiffOp], old_len: usize, new_len: usize) -> f32 { - let matches = ops - .iter() - .map(|op| { - if let DiffOp::Equal { len, .. } = *op { - len - } else { - 0 - } - }) - .sum::(); - let len = old_len + new_len; - if len == 0 { - 1.0 - } else { - 2.0 * matches as f32 / len as f32 - } -} - -/// Isolate change clusters by eliminating ranges with no changes. -/// -/// This will leave holes behind in long periods of equal ranges so that -/// you can build things like unified diffs. -pub fn group_diff_ops(mut ops: Vec, n: usize) -> Vec> { - if ops.is_empty() { - return vec![]; - } - - let mut pending_group = Vec::new(); - let mut rv = Vec::new(); - - if let Some(DiffOp::Equal { - old_index, - new_index, - len, - }) = ops.first_mut() - { - let offset = (*len).saturating_sub(n); - *old_index += offset; - *new_index += offset; - *len -= offset; - } - - if let Some(DiffOp::Equal { len, .. }) = ops.last_mut() { - *len -= (*len).saturating_sub(n); - } - - for op in ops.into_iter() { - if let DiffOp::Equal { - old_index, - new_index, - len, - } = op - { - // End the current group and start a new one whenever - // there is a large range with no changes. - if len > n * 2 { - pending_group.push(DiffOp::Equal { - old_index, - new_index, - len: n, - }); - rv.push(pending_group); - let offset = len.saturating_sub(n); - pending_group = vec![DiffOp::Equal { - old_index: old_index + offset, - new_index: new_index + offset, - len: len - offset, - }]; - continue; - } - } - pending_group.push(op); - } - - match &pending_group[..] { - &[] | &[DiffOp::Equal { .. }] => {} - _ => rv.push(pending_group), - } - - rv -} - -#[test] -fn test_non_string_iter_change() { - use crate::ChangeTag; - - let old = vec![1, 2, 3]; - let new = vec![1, 2, 4]; - let ops = capture_diff_slices(Algorithm::Myers, &old, &new); - let changes: Vec<_> = ops - .iter() - .flat_map(|x| x.iter_changes(&old, &new)) - .map(|x| (x.tag(), x.value())) - .collect(); - - assert_eq!( - changes, - vec![ - (ChangeTag::Equal, 1), - (ChangeTag::Equal, 2), - (ChangeTag::Delete, 3), - (ChangeTag::Insert, 4), - ] - ); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/iter.rs s390-tools-2.33.1/rust-vendor/similar/src/iter.rs --- s390-tools-2.31.0/rust-vendor/similar/src/iter.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/iter.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,195 +0,0 @@ -//! The various iterators this crate provides. -//! -//! These iterators are not a very stable interface and you really should -//! avoid considering them to be concrete types. A lot of the iterators in -//! this crate use `impl Iterator` for this reason but restrictions in the -//! language don't allow this to be used in all places on the versions of -//! rust this crate wants to compile for. -use std::marker::PhantomData; -use std::ops::{Index, Range}; - -use crate::{Change, ChangeTag, DiffOp, DiffTag}; - -/// Iterator for [`DiffOp::iter_changes`]. -pub struct ChangesIter<'lookup, Old: ?Sized, New: ?Sized, T> { - old: &'lookup Old, - new: &'lookup New, - old_range: Range, - new_range: Range, - old_index: usize, - new_index: usize, - old_i: usize, - new_i: usize, - tag: DiffTag, - _marker: PhantomData, -} - -impl<'lookup, Old, New, T> ChangesIter<'lookup, Old, New, T> -where - Old: Index + ?Sized, - New: Index + ?Sized, -{ - pub(crate) fn new(old: &'lookup Old, new: &'lookup New, op: DiffOp) -> Self { - let (tag, old_range, new_range) = op.as_tag_tuple(); - let old_index = old_range.start; - let new_index = new_range.start; - let old_i = old_range.start; - let new_i = new_range.start; - ChangesIter { - old, - new, - old_range, - new_range, - old_index, - new_index, - old_i, - new_i, - tag, - _marker: PhantomData, - } - } -} - -impl<'lookup, Old, New, T> Iterator for ChangesIter<'lookup, Old, New, T> -where - Old: Index + ?Sized, - New: Index + ?Sized, - T: Clone, -{ - type Item = Change; - - fn next(&mut self) -> Option { - match self.tag { - DiffTag::Equal => { - if self.old_i < self.old_range.end { - let value = self.old[self.old_i].clone(); - self.old_i += 1; - self.old_index += 1; - self.new_index += 1; - Some(Change { - tag: ChangeTag::Equal, - old_index: Some(self.old_index - 1), - new_index: Some(self.new_index - 1), - value, - }) - } else { - None - } - } - DiffTag::Delete => { - if self.old_i < self.old_range.end { - let value = self.old[self.old_i].clone(); - self.old_i += 1; - self.old_index += 1; - Some(Change { - tag: ChangeTag::Delete, - old_index: Some(self.old_index - 1), - new_index: None, - value, - }) - } else { - None - } - } - DiffTag::Insert => { - if self.new_i < self.new_range.end { - let value = self.new[self.new_i].clone(); - self.new_i += 1; - self.new_index += 1; - Some(Change { - tag: ChangeTag::Insert, - old_index: None, - new_index: Some(self.new_index - 1), - value, - }) - } else { - None - } - } - DiffTag::Replace => { - if self.old_i < self.old_range.end { - let value = self.old[self.old_i].clone(); - self.old_i += 1; - self.old_index += 1; - Some(Change { - tag: ChangeTag::Delete, - old_index: Some(self.old_index - 1), - new_index: None, - value, - }) - } else if self.new_i < self.new_range.end { - let value = self.new[self.new_i].clone(); - self.new_i += 1; - self.new_index += 1; - Some(Change { - tag: ChangeTag::Insert, - old_index: None, - new_index: Some(self.new_index - 1), - value, - }) - } else { - None - } - } - } - } -} - -#[cfg(feature = "text")] -mod text { - use super::*; - - /// Iterator for [`TextDiff::iter_all_changes`](crate::TextDiff::iter_all_changes). - pub struct AllChangesIter<'slf, 'data, T: ?Sized> { - old: &'slf [&'data T], - new: &'slf [&'data T], - ops: &'slf [DiffOp], - current_iter: Option>, - } - - impl<'slf, 'data, T> AllChangesIter<'slf, 'data, T> - where - T: 'data + ?Sized + PartialEq, - { - pub(crate) fn new( - old: &'slf [&'data T], - new: &'slf [&'data T], - ops: &'slf [DiffOp], - ) -> Self { - AllChangesIter { - old, - new, - ops, - current_iter: None, - } - } - } - - impl<'slf, 'data, T> Iterator for AllChangesIter<'slf, 'data, T> - where - T: PartialEq + 'data + ?Sized, - 'data: 'slf, - { - type Item = Change<&'data T>; - - fn next(&mut self) -> Option { - loop { - if let Some(ref mut iter) = self.current_iter { - if let Some(rv) = iter.next() { - return Some(rv); - } - self.current_iter.take(); - } - if let Some((&first, rest)) = self.ops.split_first() { - self.current_iter = Some(ChangesIter::new(self.old, self.new, first)); - self.ops = rest; - } else { - return None; - } - } - } - } -} - -#[cfg(feature = "text")] -pub use self::text::*; diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/lib.rs s390-tools-2.33.1/rust-vendor/similar/src/lib.rs --- s390-tools-2.31.0/rust-vendor/similar/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,163 +0,0 @@ -//! This crate implements diffing utilities. It attempts to provide an abstraction -//! interface over different types of diffing algorithms. The design of the -//! library is inspired by pijul's diff library by Pierre-Étienne Meunier and -//! also inherits the patience diff algorithm from there. -//! -//! The API of the crate is split into high and low level functionality. Most -//! of what you probably want to use is available top level. Additionally the -//! following sub modules exist: -//! -//! * [`algorithms`]: This implements the different types of diffing algorithms. -//! It provides both low level access to the algorithms with the minimal -//! trait bounds necessary, as well as a generic interface. -//! * [`udiff`]: Unified diff functionality. -//! * [`utils`]: utilities for common diff related operations. This module -//! provides additional diffing functions for working with text diffs. -//! -//! # Sequence Diffing -//! -//! If you want to diff sequences generally indexable things you can use the -//! [`capture_diff`] and [`capture_diff_slices`] functions. They will directly -//! diff an indexable object or slice and return a vector of [`DiffOp`] objects. -//! -//! ```rust -//! use similar::{Algorithm, capture_diff_slices}; -//! -//! let a = vec![1, 2, 3, 4, 5]; -//! let b = vec![1, 2, 3, 4, 7]; -//! let ops = capture_diff_slices(Algorithm::Myers, &a, &b); -//! ``` -//! -//! # Text Diffing -//! -//! Similar provides helpful utilities for text (and more specifically line) diff -//! operations. The main type you want to work with is [`TextDiff`] which -//! uses the underlying diff algorithms to expose a convenient API to work with -//! texts: -//! -//! ```rust -//! # #[cfg(feature = "text")] { -//! use similar::{ChangeTag, TextDiff}; -//! -//! let diff = TextDiff::from_lines( -//! "Hello World\nThis is the second line.\nThis is the third.", -//! "Hallo Welt\nThis is the second line.\nThis is life.\nMoar and more", -//! ); -//! -//! for change in diff.iter_all_changes() { -//! let sign = match change.tag() { -//! ChangeTag::Delete => "-", -//! ChangeTag::Insert => "+", -//! ChangeTag::Equal => " ", -//! }; -//! print!("{}{}", sign, change); -//! } -//! # } -//! ``` -//! -//! ## Trailing Newlines -//! -//! When working with line diffs (and unified diffs in general) there are two -//! "philosophies" to look at lines. One is to diff lines without their newline -//! character, the other is to diff with the newline character. Typically the -//! latter is done because text files do not _have_ to end in a newline character. -//! As a result there is a difference between `foo\n` and `foo` as far as diffs -//! are concerned. -//! -//! In similar this is handled on the [`Change`] or [`InlineChange`] level. If -//! a diff was created via [`TextDiff::from_lines`] the text diffing system is -//! instructed to check if there are missing newlines encountered -//! ([`TextDiff::newline_terminated`] returns true). -//! -//! In any case the [`Change`] object has a convenience method called -//! [`Change::missing_newline`] which returns `true` if the change is missing -//! a trailing newline. Armed with that information the caller knows to handle -//! this by either rendering a virtual newline at that position or to indicate -//! it in different ways. For instance the unified diff code will render the -//! special `\ No newline at end of file` marker. -//! -//! ## Bytes vs Unicode -//! -//! Similar module concerns itself with a looser definition of "text" than you would -//! normally see in Rust. While by default it can only operate on [`str`] types, -//! by enabling the `bytes` feature it gains support for byte slices with some -//! caveats. -//! -//! A lot of text diff functionality assumes that what is being diffed constitutes -//! text, but in the real world it can often be challenging to ensure that this is -//! all valid utf-8. Because of this the crate is built so that most functionality -//! also still works with bytes for as long as they are roughly ASCII compatible. -//! -//! This means you will be successful in creating a unified diff from latin1 -//! encoded bytes but if you try to do the same with EBCDIC encoded bytes you -//! will only get garbage. -//! -//! # Ops vs Changes -//! -//! Because very commonly two compared sequences will largely match this module -//! splits its functionality into two layers: -//! -//! Changes are encoded as [diff operations](crate::DiffOp). These are -//! ranges of the differences by index in the source sequence. Because this -//! can be cumbersome to work with, a separate method [`DiffOp::iter_changes`] -//! (and [`TextDiff::iter_changes`] when working with text diffs) is provided -//! which expands all the changes on an item by item level encoded in an operation. -//! -//! As the [`TextDiff::grouped_ops`] method can isolate clusters of changes -//! this even works for very long files if paired with this method. -//! -//! # Deadlines and Performance -//! -//! For large and very distinct inputs the algorithms as implemented can take -//! a very, very long time to execute. Too long to make sense in practice. -//! To work around this issue all diffing algorithms also provide a version -//! that accepts a deadline which is the point in time as defined by an -//! [`Instant`](std::time::Instant) after which the algorithm should give up. -//! What giving up means depends on the algorithm. For instance due to the -//! recursive, divide and conquer nature of Myer's diff you will still get a -//! pretty decent diff in many cases when a deadline is reached. Whereas on the -//! other hand the LCS diff is unlikely to give any decent results in such a -//! situation. -//! -//! The [`TextDiff`] type also lets you configure a deadline and/or timeout -//! when performing a text diff. -//! -//! # Feature Flags -//! -//! The crate by default does not have any dependencies however for some use -//! cases it's useful to pull in extra functionality. Likewise you can turn -//! off some functionality. -//! -//! * `text`: this feature is enabled by default and enables the text based -//! diffing types such as [`TextDiff`]. -//! If the crate is used without default features it's removed. -//! * `unicode`: when this feature is enabled the text diffing functionality -//! gains the ability to diff on a grapheme instead of character level. This -//! is particularly useful when working with text containing emojis. This -//! pulls in some relatively complex dependencies for working with the unicode -//! database. -//! * `bytes`: this feature adds support for working with byte slices in text -//! APIs in addition to unicode strings. This pulls in the -//! [`bstr`] dependency. -//! * `inline`: this feature gives access to additional functionality of the -//! text diffing to provide inline information about which values changed -//! in a line diff. This currently also enables the `unicode` feature. -//! * `serde`: this feature enables serialization to some types in this -//! crate. For enums without payload deserialization is then also supported. -#![warn(missing_docs)] -pub mod algorithms; -pub mod iter; -#[cfg(feature = "text")] -pub mod udiff; -#[cfg(feature = "text")] -pub mod utils; - -mod common; -#[cfg(feature = "text")] -mod text; -mod types; - -pub use self::common::*; -#[cfg(feature = "text")] -pub use self::text::*; -pub use self::types::*; diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff_newline_hint-2.snap s390-tools-2.33.1/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff_newline_hint-2.snap --- s390-tools-2.31.0/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff_newline_hint-2.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff_newline_hint-2.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,10 +0,0 @@ ---- -source: src/udiff.rs -expression: "&diff.unified_diff().missing_newline_hint(false).header(\"a.txt\",\n \"b.txt\").to_string()" ---- ---- a.txt -+++ b.txt -@@ -1 +1 @@ --a -+b - diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff_newline_hint.snap s390-tools-2.33.1/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff_newline_hint.snap --- s390-tools-2.31.0/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff_newline_hint.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff_newline_hint.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ ---- -source: src/udiff.rs -expression: "&diff.unified_diff().header(\"a.txt\", \"b.txt\").to_string()" ---- ---- a.txt -+++ b.txt -@@ -1 +1 @@ --a -+b -\ No newline at end of file - diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff.snap s390-tools-2.33.1/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff.snap --- s390-tools-2.31.0/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/snapshots/similar__udiff__unified_diff.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ ---- -source: src/udiff.rs -expression: "&diff.unified_diff().header(\"a.txt\", \"b.txt\").to_string()" ---- ---- a.txt -+++ b.txt -@@ -16,7 +16,7 @@ - p - q - r --s -+S - t - u - v -@@ -38,7 +38,7 @@ - L - M - N --O -+o - P - Q - R - diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/abstraction.rs s390-tools-2.33.1/rust-vendor/similar/src/text/abstraction.rs --- s390-tools-2.31.0/rust-vendor/similar/src/text/abstraction.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/abstraction.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,450 +0,0 @@ -use std::borrow::Cow; -use std::hash::Hash; -use std::ops::Range; - -/// Reference to a [`DiffableStr`]. -/// -/// This type exists because while the library only really provides ways to -/// work with `&str` and `&[u8]` there are types that deref into those string -/// slices such as `String` and `Vec`. -/// -/// This trait is used in the library whenever it's nice to be able to pass -/// strings of different types in. -/// -/// Requires the `text` feature. -pub trait DiffableStrRef { - /// The type of the resolved [`DiffableStr`]. - type Output: DiffableStr + ?Sized; - - /// Resolves the reference. - fn as_diffable_str(&self) -> &Self::Output; -} - -impl DiffableStrRef for T { - type Output = T; - - fn as_diffable_str(&self) -> &T { - self - } -} - -impl DiffableStrRef for String { - type Output = str; - - fn as_diffable_str(&self) -> &str { - self.as_str() - } -} - -impl<'a, T: DiffableStr + ?Sized> DiffableStrRef for Cow<'a, T> { - type Output = T; - - fn as_diffable_str(&self) -> &T { - self - } -} - -/// All supported diffable strings. -/// -/// The text module can work with different types of strings depending -/// on how the crate is compiled. Out of the box `&str` is always supported -/// but with the `bytes` feature one can also work with `[u8]` slices for -/// as long as they are ASCII compatible. -/// -/// Requires the `text` feature. -pub trait DiffableStr: Hash + PartialEq + PartialOrd + Ord + Eq + ToOwned { - /// Splits the value into newlines with newlines attached. - fn tokenize_lines(&self) -> Vec<&Self>; - - /// Splits the value into newlines with newlines separated. - fn tokenize_lines_and_newlines(&self) -> Vec<&Self>; - - /// Tokenizes into words. - fn tokenize_words(&self) -> Vec<&Self>; - - /// Tokenizes the input into characters. - fn tokenize_chars(&self) -> Vec<&Self>; - - /// Tokenizes into unicode words. - #[cfg(feature = "unicode")] - fn tokenize_unicode_words(&self) -> Vec<&Self>; - - /// Tokenizes into unicode graphemes. - #[cfg(feature = "unicode")] - fn tokenize_graphemes(&self) -> Vec<&Self>; - - /// Decodes the string (potentially) lossy. - fn as_str(&self) -> Option<&str>; - - /// Decodes the string (potentially) lossy. - fn to_string_lossy(&self) -> Cow<'_, str>; - - /// Checks if the string ends in a newline. - fn ends_with_newline(&self) -> bool; - - /// The length of the string. - fn len(&self) -> usize; - - /// Slices the string. - fn slice(&self, rng: Range) -> &Self; - - /// Returns the string as slice of raw bytes. - fn as_bytes(&self) -> &[u8]; - - /// Checks if the string is empty. - fn is_empty(&self) -> bool { - self.len() == 0 - } -} - -impl DiffableStr for str { - fn tokenize_lines(&self) -> Vec<&Self> { - let mut iter = self.char_indices().peekable(); - let mut last_pos = 0; - let mut lines = vec![]; - - while let Some((idx, c)) = iter.next() { - if c == '\r' { - if iter.peek().map_or(false, |x| x.1 == '\n') { - lines.push(&self[last_pos..=idx + 1]); - iter.next(); - last_pos = idx + 2; - } else { - lines.push(&self[last_pos..=idx]); - last_pos = idx + 1; - } - } else if c == '\n' { - lines.push(&self[last_pos..=idx]); - last_pos = idx + 1; - } - } - - if last_pos < self.len() { - lines.push(&self[last_pos..]); - } - - lines - } - - fn tokenize_lines_and_newlines(&self) -> Vec<&Self> { - let mut rv = vec![]; - let mut iter = self.char_indices().peekable(); - - while let Some((idx, c)) = iter.next() { - let is_newline = c == '\r' || c == '\n'; - let start = idx; - let mut end = idx + c.len_utf8(); - while let Some(&(_, next_char)) = iter.peek() { - if (next_char == '\r' || next_char == '\n') != is_newline { - break; - } - iter.next(); - end += next_char.len_utf8(); - } - rv.push(&self[start..end]); - } - - rv - } - - fn tokenize_words(&self) -> Vec<&Self> { - let mut iter = self.char_indices().peekable(); - let mut rv = vec![]; - - while let Some((idx, c)) = iter.next() { - let is_whitespace = c.is_whitespace(); - let start = idx; - let mut end = idx + c.len_utf8(); - while let Some(&(_, next_char)) = iter.peek() { - if next_char.is_whitespace() != is_whitespace { - break; - } - iter.next(); - end += next_char.len_utf8(); - } - rv.push(&self[start..end]); - } - - rv - } - - fn tokenize_chars(&self) -> Vec<&Self> { - self.char_indices() - .map(move |(i, c)| &self[i..i + c.len_utf8()]) - .collect() - } - - #[cfg(feature = "unicode")] - fn tokenize_unicode_words(&self) -> Vec<&Self> { - unicode_segmentation::UnicodeSegmentation::split_word_bounds(self).collect() - } - - #[cfg(feature = "unicode")] - fn tokenize_graphemes(&self) -> Vec<&Self> { - unicode_segmentation::UnicodeSegmentation::graphemes(self, true).collect() - } - - fn as_str(&self) -> Option<&str> { - Some(self) - } - - fn to_string_lossy(&self) -> Cow<'_, str> { - Cow::Borrowed(self) - } - - fn ends_with_newline(&self) -> bool { - self.ends_with(&['\r', '\n'][..]) - } - - fn len(&self) -> usize { - str::len(self) - } - - fn slice(&self, rng: Range) -> &Self { - &self[rng] - } - - fn as_bytes(&self) -> &[u8] { - str::as_bytes(self) - } -} - -#[cfg(feature = "bytes")] -mod bytes_support { - use super::*; - - use bstr::ByteSlice; - - impl DiffableStrRef for Vec { - type Output = [u8]; - - fn as_diffable_str(&self) -> &[u8] { - self.as_slice() - } - } - - /// Allows viewing ASCII compatible byte slices as strings. - /// - /// Requires the `bytes` feature. - impl DiffableStr for [u8] { - fn tokenize_lines(&self) -> Vec<&Self> { - let mut iter = self.char_indices().peekable(); - let mut last_pos = 0; - let mut lines = vec![]; - - while let Some((_, end, c)) = iter.next() { - if c == '\r' { - if iter.peek().map_or(false, |x| x.2 == '\n') { - lines.push(&self[last_pos..end + 1]); - iter.next(); - last_pos = end + 1; - } else { - lines.push(&self[last_pos..end]); - last_pos = end; - } - } else if c == '\n' { - lines.push(&self[last_pos..end]); - last_pos = end; - } - } - - if last_pos < self.len() { - lines.push(&self[last_pos..]); - } - - lines - } - - fn tokenize_lines_and_newlines(&self) -> Vec<&Self> { - let mut rv = vec![]; - let mut iter = self.char_indices().peekable(); - - while let Some((start, mut end, c)) = iter.next() { - let is_newline = c == '\r' || c == '\n'; - while let Some(&(_, new_end, next_char)) = iter.peek() { - if (next_char == '\r' || next_char == '\n') != is_newline { - break; - } - iter.next(); - end = new_end; - } - rv.push(&self[start..end]); - } - - rv - } - - fn tokenize_words(&self) -> Vec<&Self> { - let mut iter = self.char_indices().peekable(); - let mut rv = vec![]; - - while let Some((start, mut end, c)) = iter.next() { - let is_whitespace = c.is_whitespace(); - while let Some(&(_, new_end, next_char)) = iter.peek() { - if next_char.is_whitespace() != is_whitespace { - break; - } - iter.next(); - end = new_end; - } - rv.push(&self[start..end]); - } - - rv - } - - #[cfg(feature = "unicode")] - fn tokenize_unicode_words(&self) -> Vec<&Self> { - self.words_with_breaks().map(|x| x.as_bytes()).collect() - } - - #[cfg(feature = "unicode")] - fn tokenize_graphemes(&self) -> Vec<&Self> { - self.graphemes().map(|x| x.as_bytes()).collect() - } - - fn tokenize_chars(&self) -> Vec<&Self> { - self.char_indices() - .map(move |(start, end, _)| &self[start..end]) - .collect() - } - - fn as_str(&self) -> Option<&str> { - std::str::from_utf8(self).ok() - } - - fn to_string_lossy(&self) -> Cow<'_, str> { - String::from_utf8_lossy(self) - } - - fn ends_with_newline(&self) -> bool { - if let Some(b'\r') | Some(b'\n') = self.last_byte() { - true - } else { - false - } - } - - fn len(&self) -> usize { - <[u8]>::len(self) - } - - fn slice(&self, rng: Range) -> &Self { - &self[rng] - } - - fn as_bytes(&self) -> &[u8] { - self - } - } -} - -#[test] -fn test_split_lines() { - assert_eq!( - DiffableStr::tokenize_lines("first\nsecond\rthird\r\nfourth\nlast"), - vec!["first\n", "second\r", "third\r\n", "fourth\n", "last"] - ); - assert_eq!(DiffableStr::tokenize_lines("\n\n"), vec!["\n", "\n"]); - assert_eq!(DiffableStr::tokenize_lines("\n"), vec!["\n"]); - assert!(DiffableStr::tokenize_lines("").is_empty()); -} - -#[test] -fn test_split_words() { - assert_eq!( - DiffableStr::tokenize_words("foo bar baz\n\n aha"), - ["foo", " ", "bar", " ", "baz", "\n\n ", "aha"] - ); -} - -#[test] -fn test_split_chars() { - assert_eq!( - DiffableStr::tokenize_chars("abcföâ„ï¸"), - vec!["a", "b", "c", "f", "ö", "â„", "\u{fe0f}"] - ); -} - -#[test] -#[cfg(feature = "unicode")] -fn test_split_graphemes() { - assert_eq!( - DiffableStr::tokenize_graphemes("abcföâ„ï¸"), - vec!["a", "b", "c", "f", "ö", "â„ï¸"] - ); -} - -#[test] -#[cfg(feature = "bytes")] -fn test_split_lines_bytes() { - assert_eq!( - DiffableStr::tokenize_lines("first\nsecond\rthird\r\nfourth\nlast".as_bytes()), - vec![ - "first\n".as_bytes(), - "second\r".as_bytes(), - "third\r\n".as_bytes(), - "fourth\n".as_bytes(), - "last".as_bytes() - ] - ); - assert_eq!( - DiffableStr::tokenize_lines("\n\n".as_bytes()), - vec!["\n".as_bytes(), "\n".as_bytes()] - ); - assert_eq!( - DiffableStr::tokenize_lines("\n".as_bytes()), - vec!["\n".as_bytes()] - ); - assert!(DiffableStr::tokenize_lines("".as_bytes()).is_empty()); -} - -#[test] -#[cfg(feature = "bytes")] -fn test_split_words_bytes() { - assert_eq!( - DiffableStr::tokenize_words("foo bar baz\n\n aha".as_bytes()), - [ - &b"foo"[..], - &b" "[..], - &b"bar"[..], - &b" "[..], - &b"baz"[..], - &b"\n\n "[..], - &b"aha"[..] - ] - ); -} - -#[test] -#[cfg(feature = "bytes")] -fn test_split_chars_bytes() { - assert_eq!( - DiffableStr::tokenize_chars("abcföâ„ï¸".as_bytes()), - vec![ - &b"a"[..], - &b"b"[..], - &b"c"[..], - &b"f"[..], - "ö".as_bytes(), - "â„".as_bytes(), - "\u{fe0f}".as_bytes() - ] - ); -} - -#[test] -#[cfg(all(feature = "bytes", feature = "unicode"))] -fn test_split_graphemes_bytes() { - assert_eq!( - DiffableStr::tokenize_graphemes("abcföâ„ï¸".as_bytes()), - vec![ - &b"a"[..], - &b"b"[..], - &b"c"[..], - &b"f"[..], - "ö".as_bytes(), - "â„ï¸".as_bytes() - ] - ); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/inline.rs s390-tools-2.33.1/rust-vendor/similar/src/text/inline.rs --- s390-tools-2.31.0/rust-vendor/similar/src/text/inline.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/inline.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,337 +0,0 @@ -#![cfg(feature = "inline")] -use std::borrow::Cow; -use std::fmt; - -use crate::text::{DiffableStr, TextDiff}; -use crate::types::{Algorithm, Change, ChangeTag, DiffOp, DiffTag}; -use crate::{capture_diff_deadline, get_diff_ratio}; - -use std::ops::Index; -use std::time::{Duration, Instant}; - -use super::utils::upper_seq_ratio; - -struct MultiLookup<'bufs, 's, T: DiffableStr + ?Sized> { - strings: &'bufs [&'s T], - seqs: Vec<(&'s T, usize, usize)>, -} - -impl<'bufs, 's, T: DiffableStr + ?Sized> MultiLookup<'bufs, 's, T> { - fn new(strings: &'bufs [&'s T]) -> MultiLookup<'bufs, 's, T> { - let mut seqs = Vec::new(); - for (string_idx, string) in strings.iter().enumerate() { - let mut offset = 0; - let iter = { - #[cfg(feature = "unicode")] - { - string.tokenize_unicode_words() - } - #[cfg(not(feature = "unicode"))] - { - string.tokenize_words() - } - }; - for word in iter { - seqs.push((word, string_idx, offset)); - offset += word.len(); - } - } - MultiLookup { strings, seqs } - } - - pub fn len(&self) -> usize { - self.seqs.len() - } - - fn get_original_slices(&self, idx: usize, len: usize) -> Vec<(usize, &'s T)> { - let mut last = None; - let mut rv = Vec::new(); - - for offset in 0..len { - let (s, str_idx, char_idx) = self.seqs[idx + offset]; - last = match last { - None => Some((str_idx, char_idx, s.len())), - Some((last_str_idx, start_char_idx, last_len)) => { - if last_str_idx == str_idx { - Some((str_idx, start_char_idx, last_len + s.len())) - } else { - rv.push(( - last_str_idx, - self.strings[last_str_idx] - .slice(start_char_idx..start_char_idx + last_len), - )); - Some((str_idx, char_idx, s.len())) - } - } - }; - } - - if let Some((str_idx, start_char_idx, len)) = last { - rv.push(( - str_idx, - self.strings[str_idx].slice(start_char_idx..start_char_idx + len), - )); - } - - rv - } -} - -impl<'bufs, 's, T: DiffableStr + ?Sized> Index for MultiLookup<'bufs, 's, T> { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - self.seqs[index].0 - } -} - -fn push_values<'s, T: DiffableStr + ?Sized>( - v: &mut Vec>, - idx: usize, - emphasized: bool, - s: &'s T, -) { - v.resize_with(v.len().max(idx + 1), Vec::new); - // newlines cause all kinds of wacky stuff if they end up highlighted. - // because of this we want to unemphasize all newlines we encounter. - if emphasized { - for seg in s.tokenize_lines_and_newlines() { - v[idx].push((!seg.ends_with_newline(), seg)); - } - } else { - v[idx].push((false, s)); - } -} - -/// Represents the expanded textual change with inline highlights. -/// -/// This is like [`Change`] but with inline highlight info. -#[derive(Debug, PartialEq, Eq, Hash, Clone, Ord, PartialOrd)] -#[cfg_attr(feature = "serde", derive(serde::Serialize))] -pub struct InlineChange<'s, T: DiffableStr + ?Sized> { - tag: ChangeTag, - old_index: Option, - new_index: Option, - values: Vec<(bool, &'s T)>, -} - -impl<'s, T: DiffableStr + ?Sized> InlineChange<'s, T> { - /// Returns the change tag. - pub fn tag(&self) -> ChangeTag { - self.tag - } - - /// Returns the old index if available. - pub fn old_index(&self) -> Option { - self.old_index - } - - /// Returns the new index if available. - pub fn new_index(&self) -> Option { - self.new_index - } - - /// Returns the changed values. - /// - /// Each item is a tuple in the form `(emphasized, value)` where `emphasized` - /// is true if it should be highlighted as an inline diff. - /// - /// Depending on the type of the underlying [`DiffableStr`] this value is - /// more or less useful. If you always want to have a utf-8 string it's - /// better to use the [`InlineChange::iter_strings_lossy`] method. - pub fn values(&self) -> &[(bool, &'s T)] { - &self.values - } - - /// Iterates over all (potentially lossy) utf-8 decoded values. - /// - /// Each item is a tuple in the form `(emphasized, value)` where `emphasized` - /// is true if it should be highlighted as an inline diff. - pub fn iter_strings_lossy(&self) -> impl Iterator)> { - self.values() - .iter() - .map(|(emphasized, raw_value)| (*emphasized, raw_value.to_string_lossy())) - } - - /// Returns `true` if this change does not end in a newline and must be - /// followed up by one if line based diffs are used. - pub fn missing_newline(&self) -> bool { - !self.values.last().map_or(true, |x| x.1.ends_with_newline()) - } -} - -impl<'s, T: DiffableStr + ?Sized> From> for InlineChange<'s, T> { - fn from(change: Change<&'s T>) -> InlineChange<'s, T> { - InlineChange { - tag: change.tag(), - old_index: change.old_index(), - new_index: change.new_index(), - values: vec![(false, change.value())], - } - } -} - -impl<'s, T: DiffableStr + ?Sized> fmt::Display for InlineChange<'s, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for (emphasized, value) in self.iter_strings_lossy() { - let marker = match (emphasized, self.tag) { - (false, _) | (true, ChangeTag::Equal) => "", - (true, ChangeTag::Delete) => "-", - (true, ChangeTag::Insert) => "+", - }; - write!(f, "{}{}{}", marker, value, marker)?; - } - if self.missing_newline() { - writeln!(f)?; - } - Ok(()) - } -} - -const MIN_RATIO: f32 = 0.5; -const TIMEOUT_MS: u64 = 500; - -pub(crate) fn iter_inline_changes<'x, 'diff, 'old, 'new, 'bufs, T>( - diff: &'diff TextDiff<'old, 'new, 'bufs, T>, - op: &DiffOp, -) -> impl Iterator> + 'diff -where - T: DiffableStr + ?Sized, - 'x: 'diff, - 'old: 'x, - 'new: 'x, -{ - let (tag, old_range, new_range) = op.as_tag_tuple(); - - if let DiffTag::Equal | DiffTag::Insert | DiffTag::Delete = tag { - return Box::new(diff.iter_changes(op).map(|x| x.into())) as Box>; - } - - let mut old_index = old_range.start; - let mut new_index = new_range.start; - let old_slices = &diff.old_slices()[old_range]; - let new_slices = &diff.new_slices()[new_range]; - - if upper_seq_ratio(old_slices, new_slices) < MIN_RATIO { - return Box::new(diff.iter_changes(op).map(|x| x.into())) as Box>; - } - - let old_lookup = MultiLookup::new(old_slices); - let new_lookup = MultiLookup::new(new_slices); - - let ops = capture_diff_deadline( - Algorithm::Patience, - &old_lookup, - 0..old_lookup.len(), - &new_lookup, - 0..new_lookup.len(), - Some(Instant::now() + Duration::from_millis(TIMEOUT_MS)), - ); - - if get_diff_ratio(&ops, old_lookup.len(), new_lookup.len()) < MIN_RATIO { - return Box::new(diff.iter_changes(op).map(|x| x.into())) as Box>; - } - - let mut old_values = Vec::>::new(); - let mut new_values = Vec::>::new(); - - for op in ops { - match op { - DiffOp::Equal { - old_index, - len, - new_index, - } => { - for (idx, slice) in old_lookup.get_original_slices(old_index, len) { - push_values(&mut old_values, idx, false, slice); - } - for (idx, slice) in new_lookup.get_original_slices(new_index, len) { - push_values(&mut new_values, idx, false, slice); - } - } - DiffOp::Delete { - old_index, old_len, .. - } => { - for (idx, slice) in old_lookup.get_original_slices(old_index, old_len) { - push_values(&mut old_values, idx, true, slice); - } - } - DiffOp::Insert { - new_index, new_len, .. - } => { - for (idx, slice) in new_lookup.get_original_slices(new_index, new_len) { - push_values(&mut new_values, idx, true, slice); - } - } - DiffOp::Replace { - old_index, - old_len, - new_index, - new_len, - } => { - for (idx, slice) in old_lookup.get_original_slices(old_index, old_len) { - push_values(&mut old_values, idx, true, slice); - } - for (idx, slice) in new_lookup.get_original_slices(new_index, new_len) { - push_values(&mut new_values, idx, true, slice); - } - } - } - } - - let mut rv = Vec::new(); - - for values in old_values { - rv.push(InlineChange { - tag: ChangeTag::Delete, - old_index: Some(old_index), - new_index: None, - values, - }); - old_index += 1; - } - - for values in new_values { - rv.push(InlineChange { - tag: ChangeTag::Insert, - old_index: None, - new_index: Some(new_index), - values, - }); - new_index += 1; - } - - Box::new(rv.into_iter()) as Box> -} - -#[test] -fn test_line_ops_inline() { - let diff = TextDiff::from_lines( - "Hello World\nsome stuff here\nsome more stuff here\n\nAha stuff here\nand more stuff", - "Stuff\nHello World\nsome amazing stuff here\nsome more stuff here\n", - ); - assert!(diff.newline_terminated()); - let changes = diff - .ops() - .iter() - .flat_map(|op| diff.iter_inline_changes(op)) - .collect::>(); - insta::assert_debug_snapshot!(&changes); -} - -#[test] -#[cfg(feature = "serde")] -fn test_serde() { - let diff = TextDiff::from_lines( - "Hello World\nsome stuff here\nsome more stuff here\n\nAha stuff here\nand more stuff", - "Stuff\nHello World\nsome amazing stuff here\nsome more stuff here\n", - ); - assert!(diff.newline_terminated()); - let changes = diff - .ops() - .iter() - .flat_map(|op| diff.iter_inline_changes(op)) - .collect::>(); - let json = serde_json::to_string_pretty(&changes).unwrap(); - insta::assert_snapshot!(&json); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/mod.rs s390-tools-2.33.1/rust-vendor/similar/src/text/mod.rs --- s390-tools-2.31.0/rust-vendor/similar/src/text/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,771 +0,0 @@ -//! Text diffing utilities. -use std::borrow::Cow; -use std::cmp::Reverse; -use std::collections::BinaryHeap; -use std::time::{Duration, Instant}; - -mod abstraction; -#[cfg(feature = "inline")] -mod inline; -mod utils; - -pub use self::abstraction::{DiffableStr, DiffableStrRef}; -#[cfg(feature = "inline")] -pub use self::inline::InlineChange; - -use self::utils::{upper_seq_ratio, QuickSeqRatio}; -use crate::algorithms::IdentifyDistinct; -use crate::iter::{AllChangesIter, ChangesIter}; -use crate::udiff::UnifiedDiff; -use crate::{capture_diff_deadline, get_diff_ratio, group_diff_ops, Algorithm, DiffOp}; - -#[derive(Debug, Clone, Copy)] -enum Deadline { - Absolute(Instant), - Relative(Duration), -} - -impl Deadline { - fn into_instant(self) -> Instant { - match self { - Deadline::Absolute(instant) => instant, - Deadline::Relative(duration) => Instant::now() + duration, - } - } -} - -/// A builder type config for more complex uses of [`TextDiff`]. -/// -/// Requires the `text` feature. -#[derive(Clone, Debug, Default)] -pub struct TextDiffConfig { - algorithm: Algorithm, - newline_terminated: Option, - deadline: Option, -} - -impl TextDiffConfig { - /// Changes the algorithm. - /// - /// The default algorithm is [`Algorithm::Myers`]. - pub fn algorithm(&mut self, alg: Algorithm) -> &mut Self { - self.algorithm = alg; - self - } - - /// Sets a deadline for the diff operation. - /// - /// By default a diff will take as long as it takes. For certain diff - /// algorithms like Myer's and Patience a maximum running time can be - /// defined after which the algorithm gives up and approximates. - pub fn deadline(&mut self, deadline: Instant) -> &mut Self { - self.deadline = Some(Deadline::Absolute(deadline)); - self - } - - /// Sets a timeout for thediff operation. - /// - /// This is like [`deadline`](Self::deadline) but accepts a duration. - pub fn timeout(&mut self, timeout: Duration) -> &mut Self { - self.deadline = Some(Deadline::Relative(timeout)); - self - } - - /// Changes the newline termination flag. - /// - /// The default is automatic based on input. This flag controls the - /// behavior of [`TextDiff::iter_changes`] and unified diff generation - /// with regards to newlines. When the flag is set to `false` (which - /// is the default) then newlines are added. Otherwise the newlines - /// from the source sequences are reused. - pub fn newline_terminated(&mut self, yes: bool) -> &mut Self { - self.newline_terminated = Some(yes); - self - } - - /// Creates a diff of lines. - /// - /// This splits the text `old` and `new` into lines preserving newlines - /// in the input. Line diffs are very common and because of that enjoy - /// special handling in similar. When a line diff is created with this - /// method the `newline_terminated` flag is flipped to `true` and will - /// influence the behavior of unified diff generation. - /// - /// ```rust - /// use similar::{TextDiff, ChangeTag}; - /// - /// let diff = TextDiff::configure().diff_lines("a\nb\nc", "a\nb\nC"); - /// let changes: Vec<_> = diff - /// .iter_all_changes() - /// .map(|x| (x.tag(), x.value())) - /// .collect(); - /// - /// assert_eq!(changes, vec![ - /// (ChangeTag::Equal, "a\n"), - /// (ChangeTag::Equal, "b\n"), - /// (ChangeTag::Delete, "c"), - /// (ChangeTag::Insert, "C"), - /// ]); - /// ``` - pub fn diff_lines<'old, 'new, 'bufs, T: DiffableStrRef + ?Sized>( - &self, - old: &'old T, - new: &'new T, - ) -> TextDiff<'old, 'new, 'bufs, T::Output> { - self.diff( - Cow::Owned(old.as_diffable_str().tokenize_lines()), - Cow::Owned(new.as_diffable_str().tokenize_lines()), - true, - ) - } - - /// Creates a diff of words. - /// - /// This splits the text into words and whitespace. - /// - /// Note on word diffs: because the text differ will tokenize the strings - /// into small segments it can be inconvenient to work with the results - /// depending on the use case. You might also want to combine word level - /// diffs with the [`TextDiffRemapper`](crate::utils::TextDiffRemapper) - /// which lets you remap the diffs back to the original input strings. - /// - /// ```rust - /// use similar::{TextDiff, ChangeTag}; - /// - /// let diff = TextDiff::configure().diff_words("foo bar baz", "foo BAR baz"); - /// let changes: Vec<_> = diff - /// .iter_all_changes() - /// .map(|x| (x.tag(), x.value())) - /// .collect(); - /// - /// assert_eq!(changes, vec![ - /// (ChangeTag::Equal, "foo"), - /// (ChangeTag::Equal, " "), - /// (ChangeTag::Delete, "bar"), - /// (ChangeTag::Insert, "BAR"), - /// (ChangeTag::Equal, " "), - /// (ChangeTag::Equal, "baz"), - /// ]); - /// ``` - pub fn diff_words<'old, 'new, 'bufs, T: DiffableStrRef + ?Sized>( - &self, - old: &'old T, - new: &'new T, - ) -> TextDiff<'old, 'new, 'bufs, T::Output> { - self.diff( - Cow::Owned(old.as_diffable_str().tokenize_words()), - Cow::Owned(new.as_diffable_str().tokenize_words()), - false, - ) - } - - /// Creates a diff of characters. - /// - /// Note on character diffs: because the text differ will tokenize the strings - /// into small segments it can be inconvenient to work with the results - /// depending on the use case. You might also want to combine word level - /// diffs with the [`TextDiffRemapper`](crate::utils::TextDiffRemapper) - /// which lets you remap the diffs back to the original input strings. - /// - /// ```rust - /// use similar::{TextDiff, ChangeTag}; - /// - /// let diff = TextDiff::configure().diff_chars("abcdef", "abcDDf"); - /// let changes: Vec<_> = diff - /// .iter_all_changes() - /// .map(|x| (x.tag(), x.value())) - /// .collect(); - /// - /// assert_eq!(changes, vec![ - /// (ChangeTag::Equal, "a"), - /// (ChangeTag::Equal, "b"), - /// (ChangeTag::Equal, "c"), - /// (ChangeTag::Delete, "d"), - /// (ChangeTag::Delete, "e"), - /// (ChangeTag::Insert, "D"), - /// (ChangeTag::Insert, "D"), - /// (ChangeTag::Equal, "f"), - /// ]); - /// ``` - pub fn diff_chars<'old, 'new, 'bufs, T: DiffableStrRef + ?Sized>( - &self, - old: &'old T, - new: &'new T, - ) -> TextDiff<'old, 'new, 'bufs, T::Output> { - self.diff( - Cow::Owned(old.as_diffable_str().tokenize_chars()), - Cow::Owned(new.as_diffable_str().tokenize_chars()), - false, - ) - } - - /// Creates a diff of unicode words. - /// - /// This splits the text into words according to unicode rules. This is - /// generally recommended over [`TextDiffConfig::diff_words`] but - /// requires a dependency. - /// - /// This requires the `unicode` feature. - /// - /// Note on word diffs: because the text differ will tokenize the strings - /// into small segments it can be inconvenient to work with the results - /// depending on the use case. You might also want to combine word level - /// diffs with the [`TextDiffRemapper`](crate::utils::TextDiffRemapper) - /// which lets you remap the diffs back to the original input strings. - /// - /// ```rust - /// use similar::{TextDiff, ChangeTag}; - /// - /// let diff = TextDiff::configure().diff_unicode_words("ah(be)ce", "ah(ah)ce"); - /// let changes: Vec<_> = diff - /// .iter_all_changes() - /// .map(|x| (x.tag(), x.value())) - /// .collect(); - /// - /// assert_eq!(changes, vec![ - /// (ChangeTag::Equal, "ah"), - /// (ChangeTag::Equal, "("), - /// (ChangeTag::Delete, "be"), - /// (ChangeTag::Insert, "ah"), - /// (ChangeTag::Equal, ")"), - /// (ChangeTag::Equal, "ce"), - /// ]); - /// ``` - #[cfg(feature = "unicode")] - pub fn diff_unicode_words<'old, 'new, 'bufs, T: DiffableStrRef + ?Sized>( - &self, - old: &'old T, - new: &'new T, - ) -> TextDiff<'old, 'new, 'bufs, T::Output> { - self.diff( - Cow::Owned(old.as_diffable_str().tokenize_unicode_words()), - Cow::Owned(new.as_diffable_str().tokenize_unicode_words()), - false, - ) - } - - /// Creates a diff of graphemes. - /// - /// This requires the `unicode` feature. - /// - /// Note on grapheme diffs: because the text differ will tokenize the strings - /// into small segments it can be inconvenient to work with the results - /// depending on the use case. You might also want to combine word level - /// diffs with the [`TextDiffRemapper`](crate::utils::TextDiffRemapper) - /// which lets you remap the diffs back to the original input strings. - /// - /// ```rust - /// use similar::{TextDiff, ChangeTag}; - /// - /// let diff = TextDiff::configure().diff_graphemes("💩🇦🇹🦠", "💩🇦🇱â„ï¸"); - /// let changes: Vec<_> = diff - /// .iter_all_changes() - /// .map(|x| (x.tag(), x.value())) - /// .collect(); - /// - /// assert_eq!(changes, vec![ - /// (ChangeTag::Equal, "💩"), - /// (ChangeTag::Delete, "🇦🇹"), - /// (ChangeTag::Delete, "🦠"), - /// (ChangeTag::Insert, "🇦🇱"), - /// (ChangeTag::Insert, "â„ï¸"), - /// ]); - /// ``` - #[cfg(feature = "unicode")] - pub fn diff_graphemes<'old, 'new, 'bufs, T: DiffableStrRef + ?Sized>( - &self, - old: &'old T, - new: &'new T, - ) -> TextDiff<'old, 'new, 'bufs, T::Output> { - self.diff( - Cow::Owned(old.as_diffable_str().tokenize_graphemes()), - Cow::Owned(new.as_diffable_str().tokenize_graphemes()), - false, - ) - } - - /// Creates a diff of arbitrary slices. - /// - /// ```rust - /// use similar::{TextDiff, ChangeTag}; - /// - /// let old = &["foo", "bar", "baz"]; - /// let new = &["foo", "BAR", "baz"]; - /// let diff = TextDiff::configure().diff_slices(old, new); - /// let changes: Vec<_> = diff - /// .iter_all_changes() - /// .map(|x| (x.tag(), x.value())) - /// .collect(); - /// - /// assert_eq!(changes, vec![ - /// (ChangeTag::Equal, "foo"), - /// (ChangeTag::Delete, "bar"), - /// (ChangeTag::Insert, "BAR"), - /// (ChangeTag::Equal, "baz"), - /// ]); - /// ``` - pub fn diff_slices<'old, 'new, 'bufs, T: DiffableStr + ?Sized>( - &self, - old: &'bufs [&'old T], - new: &'bufs [&'new T], - ) -> TextDiff<'old, 'new, 'bufs, T> { - self.diff(Cow::Borrowed(old), Cow::Borrowed(new), false) - } - - fn diff<'old, 'new, 'bufs, T: DiffableStr + ?Sized>( - &self, - old: Cow<'bufs, [&'old T]>, - new: Cow<'bufs, [&'new T]>, - newline_terminated: bool, - ) -> TextDiff<'old, 'new, 'bufs, T> { - let deadline = self.deadline.map(|x| x.into_instant()); - let ops = if old.len() > 100 || new.len() > 100 { - let ih = IdentifyDistinct::::new(&old[..], 0..old.len(), &new[..], 0..new.len()); - capture_diff_deadline( - self.algorithm, - ih.old_lookup(), - ih.old_range(), - ih.new_lookup(), - ih.new_range(), - deadline, - ) - } else { - capture_diff_deadline( - self.algorithm, - &old[..], - 0..old.len(), - &new[..], - 0..new.len(), - deadline, - ) - }; - TextDiff { - old, - new, - ops, - newline_terminated: self.newline_terminated.unwrap_or(newline_terminated), - algorithm: self.algorithm, - } - } -} - -/// Captures diff op codes for textual diffs. -/// -/// The exact diff behavior is depending on the underlying [`DiffableStr`]. -/// For instance diffs on bytes and strings are slightly different. You can -/// create a text diff from constructors such as [`TextDiff::from_lines`] or -/// the [`TextDiffConfig`] created by [`TextDiff::configure`]. -/// -/// Requires the `text` feature. -pub struct TextDiff<'old, 'new, 'bufs, T: DiffableStr + ?Sized> { - old: Cow<'bufs, [&'old T]>, - new: Cow<'bufs, [&'new T]>, - ops: Vec, - newline_terminated: bool, - algorithm: Algorithm, -} - -impl<'old, 'new, 'bufs> TextDiff<'old, 'new, 'bufs, str> { - /// Configures a text differ before diffing. - pub fn configure() -> TextDiffConfig { - TextDiffConfig::default() - } - - /// Creates a diff of lines. - /// - /// For more information see [`TextDiffConfig::diff_lines`]. - pub fn from_lines( - old: &'old T, - new: &'new T, - ) -> TextDiff<'old, 'new, 'bufs, T::Output> { - TextDiff::configure().diff_lines(old, new) - } - - /// Creates a diff of words. - /// - /// For more information see [`TextDiffConfig::diff_words`]. - pub fn from_words( - old: &'old T, - new: &'new T, - ) -> TextDiff<'old, 'new, 'bufs, T::Output> { - TextDiff::configure().diff_words(old, new) - } - - /// Creates a diff of chars. - /// - /// For more information see [`TextDiffConfig::diff_chars`]. - pub fn from_chars( - old: &'old T, - new: &'new T, - ) -> TextDiff<'old, 'new, 'bufs, T::Output> { - TextDiff::configure().diff_chars(old, new) - } - - /// Creates a diff of unicode words. - /// - /// For more information see [`TextDiffConfig::diff_unicode_words`]. - /// - /// This requires the `unicode` feature. - #[cfg(feature = "unicode")] - pub fn from_unicode_words( - old: &'old T, - new: &'new T, - ) -> TextDiff<'old, 'new, 'bufs, T::Output> { - TextDiff::configure().diff_unicode_words(old, new) - } - - /// Creates a diff of graphemes. - /// - /// For more information see [`TextDiffConfig::diff_graphemes`]. - /// - /// This requires the `unicode` feature. - #[cfg(feature = "unicode")] - pub fn from_graphemes( - old: &'old T, - new: &'new T, - ) -> TextDiff<'old, 'new, 'bufs, T::Output> { - TextDiff::configure().diff_graphemes(old, new) - } -} - -impl<'old, 'new, 'bufs, T: DiffableStr + ?Sized + 'old + 'new> TextDiff<'old, 'new, 'bufs, T> { - /// Creates a diff of arbitrary slices. - /// - /// For more information see [`TextDiffConfig::diff_slices`]. - pub fn from_slices( - old: &'bufs [&'old T], - new: &'bufs [&'new T], - ) -> TextDiff<'old, 'new, 'bufs, T> { - TextDiff::configure().diff_slices(old, new) - } - - /// The name of the algorithm that created the diff. - pub fn algorithm(&self) -> Algorithm { - self.algorithm - } - - /// Returns `true` if items in the slice are newline terminated. - /// - /// This flag is used by the unified diff writer to determine if extra - /// newlines have to be added. - pub fn newline_terminated(&self) -> bool { - self.newline_terminated - } - - /// Returns all old slices. - pub fn old_slices(&self) -> &[&'old T] { - &self.old - } - - /// Returns all new slices. - pub fn new_slices(&self) -> &[&'new T] { - &self.new - } - - /// Return a measure of the sequences' similarity in the range `0..=1`. - /// - /// A ratio of `1.0` means the two sequences are a complete match, a - /// ratio of `0.0` would indicate completely distinct sequences. - /// - /// ```rust - /// # use similar::TextDiff; - /// let diff = TextDiff::from_chars("abcd", "bcde"); - /// assert_eq!(diff.ratio(), 0.75); - /// ``` - pub fn ratio(&self) -> f32 { - get_diff_ratio(self.ops(), self.old.len(), self.new.len()) - } - - /// Iterates over the changes the op expands to. - /// - /// This method is a convenient way to automatically resolve the different - /// ways in which a change could be encoded (insert/delete vs replace), look - /// up the value from the appropriate slice and also handle correct index - /// handling. - pub fn iter_changes<'x, 'slf>( - &'slf self, - op: &DiffOp, - ) -> ChangesIter<'slf, [&'x T], [&'x T], &'x T> - where - 'x: 'slf, - 'old: 'x, - 'new: 'x, - { - op.iter_changes(self.old_slices(), self.new_slices()) - } - - /// Returns the captured diff ops. - pub fn ops(&self) -> &[DiffOp] { - &self.ops - } - - /// Isolate change clusters by eliminating ranges with no changes. - /// - /// This is equivalent to calling [`group_diff_ops`] on [`TextDiff::ops`]. - pub fn grouped_ops(&self, n: usize) -> Vec> { - group_diff_ops(self.ops().to_vec(), n) - } - - /// Flattens out the diff into all changes. - /// - /// This is a shortcut for combining [`TextDiff::ops`] with - /// [`TextDiff::iter_changes`]. - pub fn iter_all_changes<'x, 'slf>(&'slf self) -> AllChangesIter<'slf, 'x, T> - where - 'x: 'slf + 'old + 'new, - 'old: 'x, - 'new: 'x, - { - AllChangesIter::new(&self.old[..], &self.new[..], self.ops()) - } - - /// Utility to return a unified diff formatter. - pub fn unified_diff<'diff>(&'diff self) -> UnifiedDiff<'diff, 'old, 'new, 'bufs, T> { - UnifiedDiff::from_text_diff(self) - } - - /// Iterates over the changes the op expands to with inline emphasis. - /// - /// This is very similar to [`TextDiff::iter_changes`] but it performs a second - /// level diff on adjacent line replacements. The exact behavior of - /// this function with regards to how it detects those inline changes - /// is currently not defined and will likely change over time. - /// - /// As of similar 1.2.0 the behavior of this function changes depending on - /// if the `unicode` feature is enabled or not. It will prefer unicode word - /// splitting over word splitting depending on the feature flag. - /// - /// Requires the `inline` feature. - #[cfg(feature = "inline")] - pub fn iter_inline_changes<'slf>( - &'slf self, - op: &DiffOp, - ) -> impl Iterator> + '_ - where - 'slf: 'old + 'new, - { - inline::iter_inline_changes(self, op) - } -} - -/// Use the text differ to find `n` close matches. -/// -/// `cutoff` defines the threshold which needs to be reached for a word -/// to be considered similar. See [`TextDiff::ratio`] for more information. -/// -/// ``` -/// # use similar::get_close_matches; -/// let matches = get_close_matches( -/// "appel", -/// &["ape", "apple", "peach", "puppy"][..], -/// 3, -/// 0.6 -/// ); -/// assert_eq!(matches, vec!["apple", "ape"]); -/// ``` -/// -/// Requires the `text` feature. -pub fn get_close_matches<'a, T: DiffableStr + ?Sized>( - word: &T, - possibilities: &[&'a T], - n: usize, - cutoff: f32, -) -> Vec<&'a T> { - let mut matches = BinaryHeap::new(); - let seq1 = word.tokenize_chars(); - let quick_ratio = QuickSeqRatio::new(&seq1); - - for &possibility in possibilities { - let seq2 = possibility.tokenize_chars(); - - if upper_seq_ratio(&seq1, &seq2) < cutoff || quick_ratio.calc(&seq2) < cutoff { - continue; - } - - let diff = TextDiff::from_slices(&seq1, &seq2); - let ratio = diff.ratio(); - if ratio >= cutoff { - // we're putting the word itself in reverse in so that matches with - // the same ratio are ordered lexicographically. - matches.push(((ratio * std::u32::MAX as f32) as u32, Reverse(possibility))); - } - } - - let mut rv = vec![]; - for _ in 0..n { - if let Some((_, elt)) = matches.pop() { - rv.push(elt.0); - } else { - break; - } - } - - rv -} - -#[test] -fn test_captured_ops() { - let diff = TextDiff::from_lines( - "Hello World\nsome stuff here\nsome more stuff here\n", - "Hello World\nsome amazing stuff here\nsome more stuff here\n", - ); - insta::assert_debug_snapshot!(&diff.ops()); -} - -#[test] -fn test_captured_word_ops() { - let diff = TextDiff::from_words( - "Hello World\nsome stuff here\nsome more stuff here\n", - "Hello World\nsome amazing stuff here\nsome more stuff here\n", - ); - let changes = diff - .ops() - .iter() - .flat_map(|op| diff.iter_changes(op)) - .collect::>(); - insta::assert_debug_snapshot!(&changes); -} - -#[test] -fn test_unified_diff() { - let diff = TextDiff::from_lines( - "Hello World\nsome stuff here\nsome more stuff here\n", - "Hello World\nsome amazing stuff here\nsome more stuff here\n", - ); - assert!(diff.newline_terminated()); - insta::assert_snapshot!(&diff - .unified_diff() - .context_radius(3) - .header("old", "new") - .to_string()); -} - -#[test] -fn test_line_ops() { - let a = "Hello World\nsome stuff here\nsome more stuff here\n"; - let b = "Hello World\nsome amazing stuff here\nsome more stuff here\n"; - let diff = TextDiff::from_lines(a, b); - assert!(diff.newline_terminated()); - let changes = diff - .ops() - .iter() - .flat_map(|op| diff.iter_changes(op)) - .collect::>(); - insta::assert_debug_snapshot!(&changes); - - #[cfg(feature = "bytes")] - { - let byte_diff = TextDiff::from_lines(a.as_bytes(), b.as_bytes()); - let byte_changes = byte_diff - .ops() - .iter() - .flat_map(|op| byte_diff.iter_changes(op)) - .collect::>(); - for (change, byte_change) in changes.iter().zip(byte_changes.iter()) { - assert_eq!(change.to_string_lossy(), byte_change.to_string_lossy()); - } - } -} - -#[test] -fn test_virtual_newlines() { - let diff = TextDiff::from_lines("a\nb", "a\nc\n"); - assert!(diff.newline_terminated()); - let changes = diff - .ops() - .iter() - .flat_map(|op| diff.iter_changes(op)) - .collect::>(); - insta::assert_debug_snapshot!(&changes); -} - -#[test] -fn test_char_diff() { - let diff = TextDiff::from_chars("Hello World", "Hallo Welt"); - insta::assert_debug_snapshot!(diff.ops()); - - #[cfg(feature = "bytes")] - { - let byte_diff = TextDiff::from_chars("Hello World".as_bytes(), "Hallo Welt".as_bytes()); - assert_eq!(diff.ops(), byte_diff.ops()); - } -} - -#[test] -fn test_ratio() { - let diff = TextDiff::from_chars("abcd", "bcde"); - assert_eq!(diff.ratio(), 0.75); - let diff = TextDiff::from_chars("", ""); - assert_eq!(diff.ratio(), 1.0); -} - -#[test] -fn test_get_close_matches() { - let matches = get_close_matches("appel", &["ape", "apple", "peach", "puppy"][..], 3, 0.6); - assert_eq!(matches, vec!["apple", "ape"]); - let matches = get_close_matches( - "hulo", - &[ - "hi", "hulu", "hali", "hoho", "amaz", "zulo", "blah", "hopp", "uulo", "aulo", - ][..], - 5, - 0.7, - ); - assert_eq!(matches, vec!["aulo", "hulu", "uulo", "zulo"]); -} - -#[test] -fn test_lifetimes_on_iter() { - use crate::Change; - - fn diff_lines<'x, T>(old: &'x T, new: &'x T) -> Vec> - where - T: DiffableStrRef + ?Sized, - { - TextDiff::from_lines(old, new).iter_all_changes().collect() - } - - let a = "1\n2\n3\n".to_string(); - let b = "1\n99\n3\n".to_string(); - let changes = diff_lines(&a, &b); - insta::assert_debug_snapshot!(&changes); -} - -#[test] -#[cfg(feature = "serde")] -fn test_serde() { - let diff = TextDiff::from_lines( - "Hello World\nsome stuff here\nsome more stuff here\n\nAha stuff here\nand more stuff", - "Stuff\nHello World\nsome amazing stuff here\nsome more stuff here\n", - ); - let changes = diff - .ops() - .iter() - .flat_map(|op| diff.iter_changes(op)) - .collect::>(); - let json = serde_json::to_string_pretty(&changes).unwrap(); - insta::assert_snapshot!(&json); -} - -#[test] -#[cfg(feature = "serde")] -fn test_serde_ops() { - let diff = TextDiff::from_lines( - "Hello World\nsome stuff here\nsome more stuff here\n\nAha stuff here\nand more stuff", - "Stuff\nHello World\nsome amazing stuff here\nsome more stuff here\n", - ); - let changes = diff.ops(); - let json = serde_json::to_string_pretty(&changes).unwrap(); - insta::assert_snapshot!(&json); -} - -#[test] -fn test_regression_issue_37() { - let config = TextDiffConfig::default(); - let diff = config.diff_lines("\u{18}\n\n", "\n\n\r"); - let mut output = diff.unified_diff(); - assert_eq!( - output.context_radius(0).to_string(), - "@@ -1 +1,0 @@\n-\u{18}\n@@ -2,0 +2,2 @@\n+\n+\r" - ); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__captured_ops.snap s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__captured_ops.snap --- s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__captured_ops.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__captured_ops.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ ---- -source: src/text/mod.rs -expression: "&diff.ops()" ---- -[ - Equal { - old_index: 0, - new_index: 0, - len: 1, - }, - Replace { - old_index: 1, - old_len: 1, - new_index: 1, - new_len: 1, - }, - Equal { - old_index: 2, - new_index: 2, - len: 1, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__captured_word_ops.snap s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__captured_word_ops.snap --- s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__captured_word_ops.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__captured_word_ops.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,202 +0,0 @@ ---- -source: src/text/mod.rs -expression: "&changes" ---- -[ - Change { - tag: Equal, - old_index: Some( - 0, - ), - new_index: Some( - 0, - ), - value: "Hello", - }, - Change { - tag: Equal, - old_index: Some( - 1, - ), - new_index: Some( - 1, - ), - value: " ", - }, - Change { - tag: Equal, - old_index: Some( - 2, - ), - new_index: Some( - 2, - ), - value: "World", - }, - Change { - tag: Equal, - old_index: Some( - 3, - ), - new_index: Some( - 3, - ), - value: "\n", - }, - Change { - tag: Equal, - old_index: Some( - 4, - ), - new_index: Some( - 4, - ), - value: "some", - }, - Change { - tag: Equal, - old_index: Some( - 5, - ), - new_index: Some( - 5, - ), - value: " ", - }, - Change { - tag: Insert, - old_index: None, - new_index: Some( - 6, - ), - value: "amazing", - }, - Change { - tag: Insert, - old_index: None, - new_index: Some( - 7, - ), - value: " ", - }, - Change { - tag: Equal, - old_index: Some( - 6, - ), - new_index: Some( - 8, - ), - value: "stuff", - }, - Change { - tag: Equal, - old_index: Some( - 7, - ), - new_index: Some( - 9, - ), - value: " ", - }, - Change { - tag: Equal, - old_index: Some( - 8, - ), - new_index: Some( - 10, - ), - value: "here", - }, - Change { - tag: Equal, - old_index: Some( - 9, - ), - new_index: Some( - 11, - ), - value: "\n", - }, - Change { - tag: Equal, - old_index: Some( - 10, - ), - new_index: Some( - 12, - ), - value: "some", - }, - Change { - tag: Equal, - old_index: Some( - 11, - ), - new_index: Some( - 13, - ), - value: " ", - }, - Change { - tag: Equal, - old_index: Some( - 12, - ), - new_index: Some( - 14, - ), - value: "more", - }, - Change { - tag: Equal, - old_index: Some( - 13, - ), - new_index: Some( - 15, - ), - value: " ", - }, - Change { - tag: Equal, - old_index: Some( - 14, - ), - new_index: Some( - 16, - ), - value: "stuff", - }, - Change { - tag: Equal, - old_index: Some( - 15, - ), - new_index: Some( - 17, - ), - value: " ", - }, - Change { - tag: Equal, - old_index: Some( - 16, - ), - new_index: Some( - 18, - ), - value: "here", - }, - Change { - tag: Equal, - old_index: Some( - 17, - ), - new_index: Some( - 19, - ), - value: "\n", - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__char_diff.snap s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__char_diff.snap --- s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__char_diff.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__char_diff.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ ---- -source: src/text/mod.rs -expression: diff.ops() ---- -[ - Equal { - old_index: 0, - new_index: 0, - len: 1, - }, - Replace { - old_index: 1, - old_len: 1, - new_index: 1, - new_len: 1, - }, - Equal { - old_index: 2, - new_index: 2, - len: 5, - }, - Replace { - old_index: 7, - old_len: 2, - new_index: 7, - new_len: 1, - }, - Equal { - old_index: 9, - new_index: 8, - len: 1, - }, - Replace { - old_index: 10, - old_len: 1, - new_index: 9, - new_len: 1, - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__inline__line_ops_inline.snap s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__inline__line_ops_inline.snap --- s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__inline__line_ops_inline.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__inline__line_ops_inline.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,126 +0,0 @@ ---- -source: src/text/inline.rs -expression: "&changes" ---- -[ - InlineChange { - tag: Insert, - old_index: None, - new_index: Some( - 0, - ), - values: [ - ( - false, - "Stuff\n", - ), - ], - }, - InlineChange { - tag: Equal, - old_index: Some( - 0, - ), - new_index: Some( - 1, - ), - values: [ - ( - false, - "Hello World\n", - ), - ], - }, - InlineChange { - tag: Delete, - old_index: Some( - 1, - ), - new_index: None, - values: [ - ( - false, - "some ", - ), - ( - false, - "stuff here\n", - ), - ], - }, - InlineChange { - tag: Insert, - old_index: None, - new_index: Some( - 2, - ), - values: [ - ( - false, - "some ", - ), - ( - true, - "amazing ", - ), - ( - false, - "stuff here\n", - ), - ], - }, - InlineChange { - tag: Equal, - old_index: Some( - 2, - ), - new_index: Some( - 3, - ), - values: [ - ( - false, - "some more stuff here\n", - ), - ], - }, - InlineChange { - tag: Delete, - old_index: Some( - 3, - ), - new_index: None, - values: [ - ( - false, - "\n", - ), - ], - }, - InlineChange { - tag: Delete, - old_index: Some( - 4, - ), - new_index: None, - values: [ - ( - false, - "Aha stuff here\n", - ), - ], - }, - InlineChange { - tag: Delete, - old_index: Some( - 5, - ), - new_index: None, - values: [ - ( - false, - "and more stuff", - ), - ], - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__inline__serde.snap s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__inline__serde.snap --- s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__inline__serde.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__inline__serde.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,107 +0,0 @@ ---- -source: src/text/inline.rs -expression: "&json" - ---- -[ - { - "tag": "insert", - "old_index": null, - "new_index": 0, - "values": [ - [ - false, - "Stuff\n" - ] - ] - }, - { - "tag": "equal", - "old_index": 0, - "new_index": 1, - "values": [ - [ - false, - "Hello World\n" - ] - ] - }, - { - "tag": "delete", - "old_index": 1, - "new_index": null, - "values": [ - [ - false, - "some " - ], - [ - false, - "stuff here\n" - ] - ] - }, - { - "tag": "insert", - "old_index": null, - "new_index": 2, - "values": [ - [ - false, - "some " - ], - [ - true, - "amazing " - ], - [ - false, - "stuff here\n" - ] - ] - }, - { - "tag": "equal", - "old_index": 2, - "new_index": 3, - "values": [ - [ - false, - "some more stuff here\n" - ] - ] - }, - { - "tag": "delete", - "old_index": 3, - "new_index": null, - "values": [ - [ - false, - "\n" - ] - ] - }, - { - "tag": "delete", - "old_index": 4, - "new_index": null, - "values": [ - [ - false, - "Aha stuff here\n" - ] - ] - }, - { - "tag": "delete", - "old_index": 5, - "new_index": null, - "values": [ - [ - false, - "and more stuff" - ] - ] - } -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__lifetimes_on_iter.snap s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__lifetimes_on_iter.snap --- s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__lifetimes_on_iter.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__lifetimes_on_iter.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,42 +0,0 @@ ---- -source: src/text/mod.rs -expression: "&changes" ---- -[ - Change { - tag: Equal, - old_index: Some( - 0, - ), - new_index: Some( - 0, - ), - value: "1\n", - }, - Change { - tag: Delete, - old_index: Some( - 1, - ), - new_index: None, - value: "2\n", - }, - Change { - tag: Insert, - old_index: None, - new_index: Some( - 1, - ), - value: "99\n", - }, - Change { - tag: Equal, - old_index: Some( - 2, - ), - new_index: Some( - 2, - ), - value: "3\n", - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__line_ops.snap s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__line_ops.snap --- s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__line_ops.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__line_ops.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,42 +0,0 @@ ---- -source: src/text/mod.rs -expression: "&changes" ---- -[ - Change { - tag: Equal, - old_index: Some( - 0, - ), - new_index: Some( - 0, - ), - value: "Hello World\n", - }, - Change { - tag: Delete, - old_index: Some( - 1, - ), - new_index: None, - value: "some stuff here\n", - }, - Change { - tag: Insert, - old_index: None, - new_index: Some( - 1, - ), - value: "some amazing stuff here\n", - }, - Change { - tag: Equal, - old_index: Some( - 2, - ), - new_index: Some( - 2, - ), - value: "some more stuff here\n", - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__serde_ops.snap s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__serde_ops.snap --- s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__serde_ops.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__serde_ops.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,38 +0,0 @@ ---- -source: src/text/mod.rs -expression: "&json" - ---- -[ - { - "op": "insert", - "old_index": 0, - "new_index": 0, - "new_len": 1 - }, - { - "op": "equal", - "old_index": 0, - "new_index": 1, - "len": 1 - }, - { - "op": "replace", - "old_index": 1, - "old_len": 1, - "new_index": 2, - "new_len": 1 - }, - { - "op": "equal", - "old_index": 2, - "new_index": 3, - "len": 1 - }, - { - "op": "delete", - "old_index": 3, - "old_len": 3, - "new_index": 4 - } -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__serde.snap s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__serde.snap --- s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__serde.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__serde.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ ---- -source: src/text/mod.rs -expression: "&json" - ---- -[ - { - "tag": "insert", - "old_index": null, - "new_index": 0, - "value": "Stuff\n" - }, - { - "tag": "equal", - "old_index": 0, - "new_index": 1, - "value": "Hello World\n" - }, - { - "tag": "delete", - "old_index": 1, - "new_index": null, - "value": "some stuff here\n" - }, - { - "tag": "insert", - "old_index": null, - "new_index": 2, - "value": "some amazing stuff here\n" - }, - { - "tag": "equal", - "old_index": 2, - "new_index": 3, - "value": "some more stuff here\n" - }, - { - "tag": "delete", - "old_index": 3, - "new_index": null, - "value": "\n" - }, - { - "tag": "delete", - "old_index": 4, - "new_index": null, - "value": "Aha stuff here\n" - }, - { - "tag": "delete", - "old_index": 5, - "new_index": null, - "value": "and more stuff" - } -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__unified_diff.snap s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__unified_diff.snap --- s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__unified_diff.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__unified_diff.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ ---- -source: src/text/mod.rs -expression: "&diff.unified_diff().context_radius(3).header(\"old\", \"new\").to_string()" ---- ---- old -+++ new -@@ -1,3 +1,3 @@ - Hello World --some stuff here -+some amazing stuff here - some more stuff here - diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__virtual_newlines.snap s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__virtual_newlines.snap --- s390-tools-2.31.0/rust-vendor/similar/src/text/snapshots/similar__text__virtual_newlines.snap 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/snapshots/similar__text__virtual_newlines.snap 1970-01-01 01:00:00.000000000 +0100 @@ -1,32 +0,0 @@ ---- -source: src/text/mod.rs -expression: "&changes" ---- -[ - Change { - tag: Equal, - old_index: Some( - 0, - ), - new_index: Some( - 0, - ), - value: "a\n", - }, - Change { - tag: Delete, - old_index: Some( - 1, - ), - new_index: None, - value: "b", - }, - Change { - tag: Insert, - old_index: None, - new_index: Some( - 1, - ), - value: "c\n", - }, -] diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/text/utils.rs s390-tools-2.33.1/rust-vendor/similar/src/text/utils.rs --- s390-tools-2.31.0/rust-vendor/similar/src/text/utils.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/text/utils.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -use std::collections::HashMap; -use std::hash::Hash; - -use super::DiffableStrRef; - -// quick and dirty way to get an upper sequence ratio. -pub fn upper_seq_ratio(seq1: &[T], seq2: &[T]) -> f32 { - let n = seq1.len() + seq2.len(); - if n == 0 { - 1.0 - } else { - 2.0 * seq1.len().min(seq2.len()) as f32 / n as f32 - } -} - -/// Internal utility to calculate an upper bound for a ratio for -/// [`get_close_matches`]. This is based on Python's difflib approach -/// of considering the two sets to be multisets. -/// -/// It counts the number of matches without regard to order, which is an -/// obvious upper bound. -pub struct QuickSeqRatio<'a, T: DiffableStrRef + ?Sized>(HashMap<&'a T, i32>); - -impl<'a, T: DiffableStrRef + Hash + Eq + ?Sized> QuickSeqRatio<'a, T> { - pub fn new(seq: &[&'a T]) -> QuickSeqRatio<'a, T> { - let mut counts = HashMap::new(); - for &word in seq { - *counts.entry(word).or_insert(0) += 1; - } - QuickSeqRatio(counts) - } - - pub fn calc(&self, seq: &[&T]) -> f32 { - let n = self.0.len() + seq.len(); - if n == 0 { - return 1.0; - } - - let mut available = HashMap::new(); - let mut matches = 0; - for &word in seq { - let x = if let Some(count) = available.get(&word) { - *count - } else { - self.0.get(&word).copied().unwrap_or(0) - }; - available.insert(word, x - 1); - if x > 0 { - matches += 1; - } - } - - 2.0 * matches as f32 / n as f32 - } -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/types.rs s390-tools-2.33.1/rust-vendor/similar/src/types.rs --- s390-tools-2.31.0/rust-vendor/similar/src/types.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/types.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,502 +0,0 @@ -use std::fmt; -use std::ops::{Index, Range}; - -use crate::algorithms::utils::is_empty_range; -use crate::algorithms::DiffHook; -use crate::iter::ChangesIter; - -/// An enum representing a diffing algorithm. -#[derive(Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Debug)] -#[cfg_attr( - feature = "serde", - derive(serde::Serialize, serde::Deserialize), - serde(rename_all = "snake_case") -)] -pub enum Algorithm { - /// Picks the myers algorithm from [`crate::algorithms::myers`] - Myers, - /// Picks the patience algorithm from [`crate::algorithms::patience`] - Patience, - /// Picks the LCS algorithm from [`crate::algorithms::lcs`] - Lcs, -} - -impl Default for Algorithm { - /// Returns the default algorithm ([`Algorithm::Myers`]). - fn default() -> Algorithm { - Algorithm::Myers - } -} - -/// The tag of a change. -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, Ord, PartialOrd)] -#[cfg_attr( - feature = "serde", - derive(serde::Serialize, serde::Deserialize), - serde(rename_all = "snake_case") -)] -pub enum ChangeTag { - /// The change indicates equality (not a change) - Equal, - /// The change indicates deleted text. - Delete, - /// The change indicates inserted text. - Insert, -} - -impl fmt::Display for ChangeTag { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "{}", - match &self { - ChangeTag::Equal => ' ', - ChangeTag::Delete => '-', - ChangeTag::Insert => '+', - } - ) - } -} - -/// Represents the expanded [`DiffOp`] change. -/// -/// This type is returned from [`DiffOp::iter_changes`] and -/// [`TextDiff::iter_changes`](crate::text::TextDiff::iter_changes). -/// -/// It exists so that it's more convenient to work with textual differences as -/// the underlying [`DiffOp`] encodes a group of changes. -/// -/// This type has additional methods that are only available for types -/// implementing [`DiffableStr`](crate::text::DiffableStr). -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, Ord, PartialOrd)] -#[cfg_attr(feature = "serde", derive(serde::Serialize))] -pub struct Change { - pub(crate) tag: ChangeTag, - pub(crate) old_index: Option, - pub(crate) new_index: Option, - pub(crate) value: T, -} - -/// These methods are available for all change types. -impl Change { - /// Returns the change tag. - pub fn tag(&self) -> ChangeTag { - self.tag - } - - /// Returns the old index if available. - pub fn old_index(&self) -> Option { - self.old_index - } - - /// Returns the new index if available. - pub fn new_index(&self) -> Option { - self.new_index - } - - /// Returns the underlying changed value. - /// - /// Depending on the type of the underlying [`crate::text::DiffableStr`] - /// this value is more or less useful. If you always want to have a utf-8 - /// string it's best to use the [`Change::as_str`] and - /// [`Change::to_string_lossy`] methods. - pub fn value(&self) -> T { - self.value.clone() - } - - /// Returns the underlying changed value as reference. - pub fn value_ref(&self) -> &T { - &self.value - } - - /// Returns the underlying changed value as mutable reference. - pub fn value_mut(&mut self) -> &mut T { - &mut self.value - } -} - -/// Utility enum to capture a diff operation. -/// -/// This is used by [`Capture`](crate::algorithms::Capture). -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -#[cfg_attr( - feature = "serde", - derive(serde::Serialize, serde::Deserialize), - serde(rename_all = "snake_case", tag = "op") -)] -pub enum DiffOp { - /// A segment is equal (see [`DiffHook::equal`]) - Equal { - /// The starting index in the old sequence. - old_index: usize, - /// The starting index in the new sequence. - new_index: usize, - /// The length of the segment. - len: usize, - }, - /// A segment was deleted (see [`DiffHook::delete`]) - Delete { - /// The starting index in the old sequence. - old_index: usize, - /// The length of the old segment. - old_len: usize, - /// The starting index in the new sequence. - new_index: usize, - }, - /// A segment was inserted (see [`DiffHook::insert`]) - Insert { - /// The starting index in the old sequence. - old_index: usize, - /// The starting index in the new sequence. - new_index: usize, - /// The length of the new segment. - new_len: usize, - }, - /// A segment was replaced (see [`DiffHook::replace`]) - Replace { - /// The starting index in the old sequence. - old_index: usize, - /// The length of the old segment. - old_len: usize, - /// The starting index in the new sequence. - new_index: usize, - /// The length of the new segment. - new_len: usize, - }, -} - -/// The tag of a diff operation. -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, Ord, PartialOrd)] -#[cfg_attr( - feature = "serde", - derive(serde::Serialize, serde::Deserialize), - serde(rename_all = "snake_case") -)] -pub enum DiffTag { - /// The diff op encodes an equal segment. - Equal, - /// The diff op encodes a deleted segment. - Delete, - /// The diff op encodes an inserted segment. - Insert, - /// The diff op encodes a replaced segment. - Replace, -} - -impl DiffOp { - /// Returns the tag of the operation. - pub fn tag(self) -> DiffTag { - self.as_tag_tuple().0 - } - - /// Returns the old range. - pub fn old_range(&self) -> Range { - self.as_tag_tuple().1 - } - - /// Returns the new range. - pub fn new_range(&self) -> Range { - self.as_tag_tuple().2 - } - - /// Transform the op into a tuple of diff tag and ranges. - /// - /// This is useful when operating on slices. The returned format is - /// `(tag, i1..i2, j1..j2)`: - /// - /// * `Replace`: `a[i1..i2]` should be replaced by `b[j1..j2]` - /// * `Delete`: `a[i1..i2]` should be deleted (`j1 == j2` in this case). - /// * `Insert`: `b[j1..j2]` should be inserted at `a[i1..i2]` (`i1 == i2` in this case). - /// * `Equal`: `a[i1..i2]` is equal to `b[j1..j2]`. - pub fn as_tag_tuple(&self) -> (DiffTag, Range, Range) { - match *self { - DiffOp::Equal { - old_index, - new_index, - len, - } => ( - DiffTag::Equal, - old_index..old_index + len, - new_index..new_index + len, - ), - DiffOp::Delete { - old_index, - new_index, - old_len, - } => ( - DiffTag::Delete, - old_index..old_index + old_len, - new_index..new_index, - ), - DiffOp::Insert { - old_index, - new_index, - new_len, - } => ( - DiffTag::Insert, - old_index..old_index, - new_index..new_index + new_len, - ), - DiffOp::Replace { - old_index, - old_len, - new_index, - new_len, - } => ( - DiffTag::Replace, - old_index..old_index + old_len, - new_index..new_index + new_len, - ), - } - } - - /// Apply this operation to a diff hook. - pub fn apply_to_hook(&self, d: &mut D) -> Result<(), D::Error> { - match *self { - DiffOp::Equal { - old_index, - new_index, - len, - } => d.equal(old_index, new_index, len), - DiffOp::Delete { - old_index, - old_len, - new_index, - } => d.delete(old_index, old_len, new_index), - DiffOp::Insert { - old_index, - new_index, - new_len, - } => d.insert(old_index, new_index, new_len), - DiffOp::Replace { - old_index, - old_len, - new_index, - new_len, - } => d.replace(old_index, old_len, new_index, new_len), - } - } - - /// Iterates over all changes encoded in the diff op against old and new - /// sequences. - /// - /// `old` and `new` are two indexable objects like the types you pass to - /// the diffing algorithm functions. - /// - /// ```rust - /// use similar::{ChangeTag, Algorithm}; - /// use similar::capture_diff_slices; - /// let old = vec!["foo", "bar", "baz"]; - /// let new = vec!["foo", "bar", "blah"]; - /// let ops = capture_diff_slices(Algorithm::Myers, &old, &new); - /// let changes: Vec<_> = ops - /// .iter() - /// .flat_map(|x| x.iter_changes(&old, &new)) - /// .map(|x| (x.tag(), x.value())) - /// .collect(); - /// assert_eq!(changes, vec![ - /// (ChangeTag::Equal, "foo"), - /// (ChangeTag::Equal, "bar"), - /// (ChangeTag::Delete, "baz"), - /// (ChangeTag::Insert, "blah"), - /// ]); - /// ``` - pub fn iter_changes<'lookup, Old, New, T>( - &self, - old: &'lookup Old, - new: &'lookup New, - ) -> ChangesIter<'lookup, Old, New, T> - where - Old: Index + ?Sized, - New: Index + ?Sized, - { - ChangesIter::new(old, new, *self) - } - - /// Given a diffop yields the changes it encodes against the given slices. - /// - /// This is similar to [`DiffOp::iter_changes`] but instead of yielding the - /// individual changes it yields consequitive changed slices. - /// - /// This will only ever yield a single tuple or two tuples in case a - /// [`DiffOp::Replace`] operation is passed. - /// - /// ```rust - /// use similar::{ChangeTag, Algorithm}; - /// use similar::capture_diff_slices; - /// let old = vec!["foo", "bar", "baz"]; - /// let new = vec!["foo", "bar", "blah"]; - /// let ops = capture_diff_slices(Algorithm::Myers, &old, &new); - /// let changes: Vec<_> = ops.iter().flat_map(|x| x.iter_slices(&old, &new)).collect(); - /// assert_eq!(changes, vec![ - /// (ChangeTag::Equal, &["foo", "bar"][..]), - /// (ChangeTag::Delete, &["baz"][..]), - /// (ChangeTag::Insert, &["blah"][..]), - /// ]); - /// ``` - /// - /// Due to lifetime restrictions it's currently impossible for the - /// returned slices to outlive the lookup. - pub fn iter_slices<'lookup, Old, New, T>( - &self, - old: &'lookup Old, - new: &'lookup New, - ) -> impl Iterator - where - T: 'lookup + ?Sized, - Old: Index, Output = T> + ?Sized, - New: Index, Output = T> + ?Sized, - { - match *self { - DiffOp::Equal { old_index, len, .. } => { - Some((ChangeTag::Equal, &old[old_index..old_index + len])) - .into_iter() - .chain(None) - } - DiffOp::Insert { - new_index, new_len, .. - } => Some((ChangeTag::Insert, &new[new_index..new_index + new_len])) - .into_iter() - .chain(None), - DiffOp::Delete { - old_index, old_len, .. - } => Some((ChangeTag::Delete, &old[old_index..old_index + old_len])) - .into_iter() - .chain(None), - DiffOp::Replace { - old_index, - old_len, - new_index, - new_len, - } => Some((ChangeTag::Delete, &old[old_index..old_index + old_len])) - .into_iter() - .chain(Some(( - ChangeTag::Insert, - &new[new_index..new_index + new_len], - ))), - } - } - - pub(crate) fn is_empty(&self) -> bool { - let (_, old, new) = self.as_tag_tuple(); - is_empty_range(&old) && is_empty_range(&new) - } - - pub(crate) fn shift_left(&mut self, adjust: usize) { - self.adjust((adjust, true), (0, false)); - } - - pub(crate) fn shift_right(&mut self, adjust: usize) { - self.adjust((adjust, false), (0, false)); - } - - pub(crate) fn grow_left(&mut self, adjust: usize) { - self.adjust((adjust, true), (adjust, false)); - } - - pub(crate) fn grow_right(&mut self, adjust: usize) { - self.adjust((0, false), (adjust, false)); - } - - pub(crate) fn shrink_left(&mut self, adjust: usize) { - self.adjust((0, false), (adjust, true)); - } - - pub(crate) fn shrink_right(&mut self, adjust: usize) { - self.adjust((adjust, false), (adjust, true)); - } - - fn adjust(&mut self, adjust_offset: (usize, bool), adjust_len: (usize, bool)) { - #[inline(always)] - fn modify(val: &mut usize, adj: (usize, bool)) { - if adj.1 { - *val -= adj.0; - } else { - *val += adj.0; - } - } - - match self { - DiffOp::Equal { - old_index, - new_index, - len, - } => { - modify(old_index, adjust_offset); - modify(new_index, adjust_offset); - modify(len, adjust_len); - } - DiffOp::Delete { - old_index, - old_len, - new_index, - } => { - modify(old_index, adjust_offset); - modify(old_len, adjust_len); - modify(new_index, adjust_offset); - } - DiffOp::Insert { - old_index, - new_index, - new_len, - } => { - modify(old_index, adjust_offset); - modify(new_index, adjust_offset); - modify(new_len, adjust_len); - } - DiffOp::Replace { - old_index, - old_len, - new_index, - new_len, - } => { - modify(old_index, adjust_offset); - modify(old_len, adjust_len); - modify(new_index, adjust_offset); - modify(new_len, adjust_len); - } - } - } -} - -#[cfg(feature = "text")] -mod text_additions { - use super::*; - use crate::text::DiffableStr; - use std::borrow::Cow; - - /// The text interface can produce changes over [`DiffableStr`] implementing - /// values. As those are generic interfaces for different types of strings - /// utility methods to make working with standard rust strings more enjoyable. - impl<'s, T: DiffableStr + ?Sized> Change<&'s T> { - /// Returns the value as string if it is utf-8. - pub fn as_str(&self) -> Option<&'s str> { - T::as_str(self.value) - } - - /// Returns the value (lossy) decoded as utf-8 string. - pub fn to_string_lossy(&self) -> Cow<'s, str> { - T::to_string_lossy(self.value) - } - - /// Returns `true` if this change does not end in a newline and must be - /// followed up by one if line based diffs are used. - /// - /// The [`std::fmt::Display`] implementation of [`Change`] will automatically - /// insert a newline after the value if this is true. - pub fn missing_newline(&self) -> bool { - !T::ends_with_newline(self.value) - } - } - - impl<'s, T: DiffableStr + ?Sized> fmt::Display for Change<&'s T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{}{}", - self.to_string_lossy(), - if self.missing_newline() { "\n" } else { "" } - ) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/udiff.rs s390-tools-2.33.1/rust-vendor/similar/src/udiff.rs --- s390-tools-2.31.0/rust-vendor/similar/src/udiff.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/udiff.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,359 +0,0 @@ -//! This module provides unified diff functionality. -//! -//! It is available for as long as the `text` feature is enabled which -//! is enabled by default: -//! -//! ```rust -//! use similar::TextDiff; -//! # let old_text = ""; -//! # let new_text = ""; -//! let text_diff = TextDiff::from_lines(old_text, new_text); -//! print!("{}", text_diff -//! .unified_diff() -//! .context_radius(10) -//! .header("old_file", "new_file")); -//! ``` -//! -//! # Unicode vs Bytes -//! -//! The [`UnifiedDiff`] type supports both unicode and byte diffs for all -//! types compatible with [`DiffableStr`]. You can pick between the two -//! versions by using the [`Display`](std::fmt::Display) implementation or -//! [`UnifiedDiff`] or [`UnifiedDiff::to_writer`]. -//! -//! The former uses [`DiffableStr::to_string_lossy`], the latter uses -//! [`DiffableStr::as_bytes`] for each line. -#[cfg(feature = "text")] -use std::{fmt, io}; - -use crate::iter::AllChangesIter; -use crate::text::{DiffableStr, TextDiff}; -use crate::types::{Algorithm, DiffOp}; - -struct MissingNewlineHint(bool); - -impl fmt::Display for MissingNewlineHint { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.0 { - write!(f, "\n\\ No newline at end of file")?; - } - Ok(()) - } -} - -#[derive(Copy, Clone, Debug)] -struct UnifiedDiffHunkRange(usize, usize); - -impl UnifiedDiffHunkRange { - fn start(&self) -> usize { - self.0 - } - - fn end(&self) -> usize { - self.1 - } -} - -impl fmt::Display for UnifiedDiffHunkRange { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut beginning = self.start() + 1; - let len = self.end().saturating_sub(self.start()); - if len == 1 { - write!(f, "{}", beginning) - } else { - if len == 0 { - // empty ranges begin at line just before the range - beginning -= 1; - } - write!(f, "{},{}", beginning, len) - } - } -} - -/// Unified diff hunk header formatter. -pub struct UnifiedHunkHeader { - old_range: UnifiedDiffHunkRange, - new_range: UnifiedDiffHunkRange, -} - -impl UnifiedHunkHeader { - /// Creates a hunk header from a (non empty) slice of diff ops. - pub fn new(ops: &[DiffOp]) -> UnifiedHunkHeader { - let first = ops[0]; - let last = ops[ops.len() - 1]; - let old_start = first.old_range().start; - let new_start = first.new_range().start; - let old_end = last.old_range().end; - let new_end = last.new_range().end; - UnifiedHunkHeader { - old_range: UnifiedDiffHunkRange(old_start, old_end), - new_range: UnifiedDiffHunkRange(new_start, new_end), - } - } -} - -impl fmt::Display for UnifiedHunkHeader { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "@@ -{} +{} @@", &self.old_range, &self.new_range) - } -} - -/// Unified diff formatter. -/// -/// ```rust -/// use similar::TextDiff; -/// # let old_text = ""; -/// # let new_text = ""; -/// let text_diff = TextDiff::from_lines(old_text, new_text); -/// print!("{}", text_diff -/// .unified_diff() -/// .context_radius(10) -/// .header("old_file", "new_file")); -/// ``` -/// -/// ## Unicode vs Bytes -/// -/// The [`UnifiedDiff`] type supports both unicode and byte diffs for all -/// types compatible with [`DiffableStr`]. You can pick between the two -/// versions by using [`UnifiedDiff.to_string`] or [`UnifiedDiff.to_writer`]. -/// The former uses [`DiffableStr::to_string_lossy`], the latter uses -/// [`DiffableStr::as_bytes`] for each line. -pub struct UnifiedDiff<'diff, 'old, 'new, 'bufs, T: DiffableStr + ?Sized> { - diff: &'diff TextDiff<'old, 'new, 'bufs, T>, - context_radius: usize, - missing_newline_hint: bool, - header: Option<(String, String)>, -} - -impl<'diff, 'old, 'new, 'bufs, T: DiffableStr + ?Sized> UnifiedDiff<'diff, 'old, 'new, 'bufs, T> { - /// Creates a formatter from a text diff object. - pub fn from_text_diff(diff: &'diff TextDiff<'old, 'new, 'bufs, T>) -> Self { - UnifiedDiff { - diff, - context_radius: 3, - missing_newline_hint: true, - header: None, - } - } - - /// Changes the context radius. - /// - /// The context radius is the number of lines between changes that should - /// be emitted. This defaults to `3`. - pub fn context_radius(&mut self, n: usize) -> &mut Self { - self.context_radius = n; - self - } - - /// Sets a header to the diff. - /// - /// `a` and `b` are the file names that are added to the top of the unified - /// file format. The names are accepted verbatim which lets you encode - /// a timestamp into it when separated by a tab (`\t`). For more information, - /// see [the unified diff format specification](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/diff.html#tag_20_34_10_07). - pub fn header(&mut self, a: &str, b: &str) -> &mut Self { - self.header = Some((a.to_string(), b.to_string())); - self - } - - /// Controls the missing newline hint. - /// - /// By default a special `\ No newline at end of file` marker is added to - /// the output when a file is not terminated with a final newline. This can - /// be disabled with this flag. - pub fn missing_newline_hint(&mut self, yes: bool) -> &mut Self { - self.missing_newline_hint = yes; - self - } - - /// Iterates over all hunks as configured. - pub fn iter_hunks(&self) -> impl Iterator> { - let diff = self.diff; - let missing_newline_hint = self.missing_newline_hint; - self.diff - .grouped_ops(self.context_radius) - .into_iter() - .filter(|ops| !ops.is_empty()) - .map(move |ops| UnifiedDiffHunk::new(ops, diff, missing_newline_hint)) - } - - /// Write the unified diff as bytes to the output stream. - pub fn to_writer(&self, mut w: W) -> Result<(), io::Error> - where - 'diff: 'old + 'new + 'bufs, - { - let mut header = self.header.as_ref(); - for hunk in self.iter_hunks() { - if let Some((old_file, new_file)) = header.take() { - writeln!(w, "--- {}", old_file)?; - writeln!(w, "+++ {}", new_file)?; - } - write!(w, "{}", hunk)?; - } - Ok(()) - } - - fn header_opt(&mut self, header: Option<(&str, &str)>) -> &mut Self { - if let Some((a, b)) = header { - self.header(a, b); - } - self - } -} - -/// Unified diff hunk formatter. -/// -/// The `Display` this renders out a single unified diff's hunk. -pub struct UnifiedDiffHunk<'diff, 'old, 'new, 'bufs, T: DiffableStr + ?Sized> { - diff: &'diff TextDiff<'old, 'new, 'bufs, T>, - ops: Vec, - missing_newline_hint: bool, -} - -impl<'diff, 'old, 'new, 'bufs, T: DiffableStr + ?Sized> - UnifiedDiffHunk<'diff, 'old, 'new, 'bufs, T> -{ - /// Creates a new hunk for some operations. - pub fn new( - ops: Vec, - diff: &'diff TextDiff<'old, 'new, 'bufs, T>, - missing_newline_hint: bool, - ) -> UnifiedDiffHunk<'diff, 'old, 'new, 'bufs, T> { - UnifiedDiffHunk { - diff, - ops, - missing_newline_hint, - } - } - - /// Returns the header for the hunk. - pub fn header(&self) -> UnifiedHunkHeader { - UnifiedHunkHeader::new(&self.ops) - } - - /// Returns all operations in the hunk. - pub fn ops(&self) -> &[DiffOp] { - &self.ops - } - - /// Returns the value of the `missing_newline_hint` flag. - pub fn missing_newline_hint(&self) -> bool { - self.missing_newline_hint - } - - /// Iterates over all changes in a hunk. - pub fn iter_changes<'x, 'slf>(&'slf self) -> AllChangesIter<'slf, 'x, T> - where - 'x: 'slf + 'old + 'new, - 'old: 'x, - 'new: 'x, - { - AllChangesIter::new(self.diff.old_slices(), self.diff.new_slices(), self.ops()) - } - - /// Write the hunk as bytes to the output stream. - pub fn to_writer(&self, mut w: W) -> Result<(), io::Error> - where - 'diff: 'old + 'new + 'bufs, - { - for (idx, change) in self.iter_changes().enumerate() { - if idx == 0 { - writeln!(w, "{}", self.header())?; - } - write!(w, "{}", change.tag())?; - w.write_all(change.value().as_bytes())?; - if !self.diff.newline_terminated() { - writeln!(w)?; - } - if self.diff.newline_terminated() && change.missing_newline() { - writeln!(w, "{}", MissingNewlineHint(self.missing_newline_hint))?; - } - } - Ok(()) - } -} - -impl<'diff, 'old, 'new, 'bufs, T: DiffableStr + ?Sized> fmt::Display - for UnifiedDiffHunk<'diff, 'old, 'new, 'bufs, T> -where - 'diff: 'old + 'new + 'bufs, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for (idx, change) in self.iter_changes().enumerate() { - if idx == 0 { - writeln!(f, "{}", self.header())?; - } - write!(f, "{}{}", change.tag(), change.to_string_lossy())?; - if !self.diff.newline_terminated() { - writeln!(f)?; - } - if self.diff.newline_terminated() && change.missing_newline() { - writeln!(f, "{}", MissingNewlineHint(self.missing_newline_hint))?; - } - } - Ok(()) - } -} - -impl<'diff, 'old, 'new, 'bufs, T: DiffableStr + ?Sized> fmt::Display - for UnifiedDiff<'diff, 'old, 'new, 'bufs, T> -where - 'diff: 'old + 'new + 'bufs, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut header = self.header.as_ref(); - for hunk in self.iter_hunks() { - if let Some((old_file, new_file)) = header.take() { - writeln!(f, "--- {}", old_file)?; - writeln!(f, "+++ {}", new_file)?; - } - write!(f, "{}", hunk)?; - } - Ok(()) - } -} - -/// Quick way to get a unified diff as string. -/// -/// `n` configures [`UnifiedDiff::context_radius`] and -/// `header` configures [`UnifiedDiff::header`] when not `None`. -pub fn unified_diff( - alg: Algorithm, - old: &str, - new: &str, - n: usize, - header: Option<(&str, &str)>, -) -> String { - TextDiff::configure() - .algorithm(alg) - .diff_lines(old, new) - .unified_diff() - .context_radius(n) - .header_opt(header) - .to_string() -} - -#[test] -fn test_unified_diff() { - let diff = TextDiff::from_lines( - "a\nb\nc\nd\ne\nf\ng\nh\ni\nj\nk\nl\nm\nn\no\np\nq\nr\ns\nt\nu\nv\nw\nx\ny\nz\nA\nB\nC\nD\nE\nF\nG\nH\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nU\nV\nW\nX\nY\nZ", - "a\nb\nc\nd\ne\nf\ng\nh\ni\nj\nk\nl\nm\nn\no\np\nq\nr\nS\nt\nu\nv\nw\nx\ny\nz\nA\nB\nC\nD\nE\nF\nG\nH\nI\nJ\nK\nL\nM\nN\no\nP\nQ\nR\nS\nT\nU\nV\nW\nX\nY\nZ", - ); - insta::assert_snapshot!(&diff.unified_diff().header("a.txt", "b.txt").to_string()); -} -#[test] -fn test_empty_unified_diff() { - let diff = TextDiff::from_lines("abc", "abc"); - assert_eq!(diff.unified_diff().header("a.txt", "b.txt").to_string(), ""); -} - -#[test] -fn test_unified_diff_newline_hint() { - let diff = TextDiff::from_lines("a\n", "b"); - insta::assert_snapshot!(&diff.unified_diff().header("a.txt", "b.txt").to_string()); - insta::assert_snapshot!(&diff - .unified_diff() - .missing_newline_hint(false) - .header("a.txt", "b.txt") - .to_string()); -} diff -Nru s390-tools-2.31.0/rust-vendor/similar/src/utils.rs s390-tools-2.33.1/rust-vendor/similar/src/utils.rs --- s390-tools-2.31.0/rust-vendor/similar/src/utils.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/similar/src/utils.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,412 +0,0 @@ -//! Utilities for common diff related operations. -//! -//! This module provides specialized utilities and simplified diff operations -//! for common operations. It's useful when you want to work with text diffs -//! and you're interested in getting vectors of these changes directly. -//! -//! # Slice Remapping -//! -//! When working with [`TextDiff`] it's common that one takes advantage of the -//! built-in tokenization of the differ. This for instance lets you do -//! grapheme level diffs. This is implemented by the differ generating rather -//! small slices of strings and running a diff algorithm over them. -//! -//! The downside of this is that all the [`DiffOp`] objects produced by the -//! diffing algorithm encode operations on these rather small slices. For -//! a lot of use cases this is not what one wants which can make this very -//! inconvenient. This module provides a [`TextDiffRemapper`] which lets you -//! map from the ranges that the [`TextDiff`] returns to the original input -//! strings. For more information see [`TextDiffRemapper`]. -//! -//! # Simple Diff Functions -//! -//! This module provides a range of common test diff functions that will -//! produce vectors of `(change_tag, value)` tuples. They will automatically -//! optimize towards returning the most useful slice that one would expect for -//! the type of diff performed. - -use std::hash::Hash; -use std::ops::{Index, Range}; - -use crate::{ - capture_diff_slices, Algorithm, ChangeTag, DiffOp, DiffableStr, DiffableStrRef, TextDiff, -}; - -struct SliceRemapper<'x, T: ?Sized> { - source: &'x T, - indexes: Vec>, -} - -impl<'x, T: DiffableStr + ?Sized> SliceRemapper<'x, T> { - fn new(source: &'x T, slices: &[&'x T]) -> SliceRemapper<'x, T> { - let indexes = slices - .iter() - .scan(0, |state, item| { - let start = *state; - let end = start + item.len(); - *state = end; - Some(start..end) - }) - .collect(); - SliceRemapper { source, indexes } - } - - fn slice(&self, range: Range) -> Option<&'x T> { - let start = self.indexes.get(range.start)?.start; - let end = self.indexes.get(range.end - 1)?.end; - Some(self.source.slice(start..end)) - } -} - -impl<'x, T: DiffableStr + ?Sized> Index> for SliceRemapper<'x, T> { - type Output = T; - - fn index(&self, range: Range) -> &Self::Output { - self.slice(range).expect("out of bounds") - } -} - -/// A remapper that can remap diff ops to the original slices. -/// -/// The idea here is that when a [`TextDiff`](crate::TextDiff) is created from -/// two strings and the internal tokenization is used, this remapper can take -/// a range in the tokenized sequences and remap it to the original string. -/// This is particularly useful when you want to do things like character or -/// grapheme level diffs but you want to not have to iterate over small sequences -/// but large consequitive ones from the source. -/// -/// ```rust -/// use similar::{ChangeTag, TextDiff}; -/// use similar::utils::TextDiffRemapper; -/// -/// let old = "yo! foo bar baz"; -/// let new = "yo! foo bor baz"; -/// let diff = TextDiff::from_words(old, new); -/// let remapper = TextDiffRemapper::from_text_diff(&diff, old, new); -/// let changes: Vec<_> = diff.ops() -/// .iter() -/// .flat_map(move |x| remapper.iter_slices(x)) -/// .collect(); -/// -/// assert_eq!(changes, vec![ -/// (ChangeTag::Equal, "yo! foo "), -/// (ChangeTag::Delete, "bar"), -/// (ChangeTag::Insert, "bor"), -/// (ChangeTag::Equal, " baz") -/// ]); -pub struct TextDiffRemapper<'x, T: ?Sized> { - old: SliceRemapper<'x, T>, - new: SliceRemapper<'x, T>, -} - -impl<'x, T: DiffableStr + ?Sized> TextDiffRemapper<'x, T> { - /// Creates a new remapper from strings and slices. - pub fn new( - old_slices: &[&'x T], - new_slices: &[&'x T], - old: &'x T, - new: &'x T, - ) -> TextDiffRemapper<'x, T> { - TextDiffRemapper { - old: SliceRemapper::new(old, old_slices), - new: SliceRemapper::new(new, new_slices), - } - } - - /// Creates a new remapper from a text diff and the original strings. - pub fn from_text_diff<'old, 'new, 'bufs>( - diff: &TextDiff<'old, 'new, 'bufs, T>, - old: &'x T, - new: &'x T, - ) -> TextDiffRemapper<'x, T> - where - 'old: 'x, - 'new: 'x, - { - TextDiffRemapper { - old: SliceRemapper::new(old, diff.old_slices()), - new: SliceRemapper::new(new, diff.new_slices()), - } - } - - /// Slices into the old string. - pub fn slice_old(&self, range: Range) -> Option<&'x T> { - self.old.slice(range) - } - - /// Slices into the new string. - pub fn slice_new(&self, range: Range) -> Option<&'x T> { - self.new.slice(range) - } - - /// Given a diffop yields the changes it encodes against the original strings. - /// - /// This is the same as the [`DiffOp::iter_slices`] method. - /// - /// ## Panics - /// - /// This method can panic if the input strings passed to the constructor - /// are incompatible with the input strings passed to the diffing algorithm. - pub fn iter_slices(&self, op: &DiffOp) -> impl Iterator { - // note: this is equivalent to the code in `DiffOp::iter_slices`. It is - // a copy/paste because the slicing currently cannot be well abstracted - // because of lifetime issues caused by the `Index` trait. - match *op { - DiffOp::Equal { old_index, len, .. } => { - Some((ChangeTag::Equal, self.old.slice(old_index..old_index + len))) - .into_iter() - .chain(None) - } - DiffOp::Insert { - new_index, new_len, .. - } => Some(( - ChangeTag::Insert, - self.new.slice(new_index..new_index + new_len), - )) - .into_iter() - .chain(None), - DiffOp::Delete { - old_index, old_len, .. - } => Some(( - ChangeTag::Delete, - self.old.slice(old_index..old_index + old_len), - )) - .into_iter() - .chain(None), - DiffOp::Replace { - old_index, - old_len, - new_index, - new_len, - } => Some(( - ChangeTag::Delete, - self.old.slice(old_index..old_index + old_len), - )) - .into_iter() - .chain(Some(( - ChangeTag::Insert, - self.new.slice(new_index..new_index + new_len), - ))), - } - .map(|(tag, opt_val)| (tag, opt_val.expect("slice out of bounds"))) - } -} - -/// Shortcut for diffing two slices. -/// -/// This function produces the diff of two slices and returns a vector -/// with the changes. -/// -/// ```rust -/// use similar::{Algorithm, ChangeTag}; -/// use similar::utils::diff_slices; -/// -/// let old = "foo\nbar\nbaz".lines().collect::>(); -/// let new = "foo\nbar\nBAZ".lines().collect::>(); -/// assert_eq!(diff_slices(Algorithm::Myers, &old, &new), vec![ -/// (ChangeTag::Equal, &["foo", "bar"][..]), -/// (ChangeTag::Delete, &["baz"][..]), -/// (ChangeTag::Insert, &["BAZ"][..]), -/// ]); -/// ``` -pub fn diff_slices<'x, T: PartialEq + Hash + Ord>( - alg: Algorithm, - old: &'x [T], - new: &'x [T], -) -> Vec<(ChangeTag, &'x [T])> { - capture_diff_slices(alg, old, new) - .iter() - .flat_map(|op| op.iter_slices(old, new)) - .collect() -} - -/// Shortcut for making a character level diff. -/// -/// This function produces the diff of two strings and returns a vector -/// with the changes. It returns connected slices into the original string -/// rather than character level slices. -/// -/// ```rust -/// use similar::{Algorithm, ChangeTag}; -/// use similar::utils::diff_chars; -/// -/// assert_eq!(diff_chars(Algorithm::Myers, "foobarbaz", "fooBARbaz"), vec![ -/// (ChangeTag::Equal, "foo"), -/// (ChangeTag::Delete, "bar"), -/// (ChangeTag::Insert, "BAR"), -/// (ChangeTag::Equal, "baz"), -/// ]); -/// ``` -pub fn diff_chars<'x, T: DiffableStrRef + ?Sized>( - alg: Algorithm, - old: &'x T, - new: &'x T, -) -> Vec<(ChangeTag, &'x T::Output)> { - let old = old.as_diffable_str(); - let new = new.as_diffable_str(); - let diff = TextDiff::configure().algorithm(alg).diff_chars(old, new); - let remapper = TextDiffRemapper::from_text_diff(&diff, old, new); - diff.ops() - .iter() - .flat_map(move |x| remapper.iter_slices(x)) - .collect() -} - -/// Shortcut for making a word level diff. -/// -/// This function produces the diff of two strings and returns a vector -/// with the changes. It returns connected slices into the original string -/// rather than word level slices. -/// -/// ```rust -/// use similar::{Algorithm, ChangeTag}; -/// use similar::utils::diff_words; -/// -/// assert_eq!(diff_words(Algorithm::Myers, "foo bar baz", "foo bor baz"), vec![ -/// (ChangeTag::Equal, "foo "), -/// (ChangeTag::Delete, "bar"), -/// (ChangeTag::Insert, "bor"), -/// (ChangeTag::Equal, " baz"), -/// ]); -/// ``` -pub fn diff_words<'x, T: DiffableStrRef + ?Sized>( - alg: Algorithm, - old: &'x T, - new: &'x T, -) -> Vec<(ChangeTag, &'x T::Output)> { - let old = old.as_diffable_str(); - let new = new.as_diffable_str(); - let diff = TextDiff::configure().algorithm(alg).diff_words(old, new); - let remapper = TextDiffRemapper::from_text_diff(&diff, old, new); - diff.ops() - .iter() - .flat_map(move |x| remapper.iter_slices(x)) - .collect() -} - -/// Shortcut for making a unicode word level diff. -/// -/// This function produces the diff of two strings and returns a vector -/// with the changes. It returns connected slices into the original string -/// rather than word level slices. -/// -/// ```rust -/// use similar::{Algorithm, ChangeTag}; -/// use similar::utils::diff_unicode_words; -/// -/// let old = "The quick (\"brown\") fox can't jump 32.3 feet, right?"; -/// let new = "The quick (\"brown\") fox can't jump 9.84 meters, right?"; -/// assert_eq!(diff_unicode_words(Algorithm::Myers, old, new), vec![ -/// (ChangeTag::Equal, "The quick (\"brown\") fox can\'t jump "), -/// (ChangeTag::Delete, "32.3"), -/// (ChangeTag::Insert, "9.84"), -/// (ChangeTag::Equal, " "), -/// (ChangeTag::Delete, "feet"), -/// (ChangeTag::Insert, "meters"), -/// (ChangeTag::Equal, ", right?") -/// ]); -/// ``` -/// -/// This requires the `unicode` feature. -#[cfg(feature = "unicode")] -pub fn diff_unicode_words<'x, T: DiffableStrRef + ?Sized>( - alg: Algorithm, - old: &'x T, - new: &'x T, -) -> Vec<(ChangeTag, &'x T::Output)> { - let old = old.as_diffable_str(); - let new = new.as_diffable_str(); - let diff = TextDiff::configure() - .algorithm(alg) - .diff_unicode_words(old, new); - let remapper = TextDiffRemapper::from_text_diff(&diff, old, new); - diff.ops() - .iter() - .flat_map(move |x| remapper.iter_slices(x)) - .collect() -} - -/// Shortcut for making a grapheme level diff. -/// -/// This function produces the diff of two strings and returns a vector -/// with the changes. It returns connected slices into the original string -/// rather than grapheme level slices. -/// -/// ```rust -/// use similar::{Algorithm, ChangeTag}; -/// use similar::utils::diff_graphemes; -/// -/// let old = "The flag of Austria is 🇦🇹"; -/// let new = "The flag of Albania is 🇦🇱"; -/// assert_eq!(diff_graphemes(Algorithm::Myers, old, new), vec![ -/// (ChangeTag::Equal, "The flag of A"), -/// (ChangeTag::Delete, "ustr"), -/// (ChangeTag::Insert, "lban"), -/// (ChangeTag::Equal, "ia is "), -/// (ChangeTag::Delete, "🇦🇹"), -/// (ChangeTag::Insert, "🇦🇱"), -/// ]); -/// ``` -/// -/// This requires the `unicode` feature. -#[cfg(feature = "unicode")] -pub fn diff_graphemes<'x, T: DiffableStrRef + ?Sized>( - alg: Algorithm, - old: &'x T, - new: &'x T, -) -> Vec<(ChangeTag, &'x T::Output)> { - let old = old.as_diffable_str(); - let new = new.as_diffable_str(); - let diff = TextDiff::configure() - .algorithm(alg) - .diff_graphemes(old, new); - let remapper = TextDiffRemapper::from_text_diff(&diff, old, new); - diff.ops() - .iter() - .flat_map(move |x| remapper.iter_slices(x)) - .collect() -} - -/// Shortcut for making a line diff. -/// -/// This function produces the diff of two slices and returns a vector -/// with the changes. Unlike [`diff_chars`] or [`diff_slices`] it returns a -/// change tag for each line. -/// -/// ```rust -/// use similar::{Algorithm, ChangeTag}; -/// use similar::utils::diff_lines; -/// -/// assert_eq!(diff_lines(Algorithm::Myers, "foo\nbar\nbaz\nblah", "foo\nbar\nbaz\nblurgh"), vec![ -/// (ChangeTag::Equal, "foo\n"), -/// (ChangeTag::Equal, "bar\n"), -/// (ChangeTag::Equal, "baz\n"), -/// (ChangeTag::Delete, "blah"), -/// (ChangeTag::Insert, "blurgh"), -/// ]); -/// ``` -pub fn diff_lines<'x, T: DiffableStrRef + ?Sized>( - alg: Algorithm, - old: &'x T, - new: &'x T, -) -> Vec<(ChangeTag, &'x T::Output)> { - TextDiff::configure() - .algorithm(alg) - .diff_lines(old, new) - .iter_all_changes() - .map(|change| (change.tag(), change.value())) - .collect() -} - -#[test] -fn test_remapper() { - let a = "foo bar baz"; - let words = a.tokenize_words(); - dbg!(&words); - let remap = SliceRemapper::new(a, &words); - assert_eq!(remap.slice(0..3), Some("foo bar")); - assert_eq!(remap.slice(1..3), Some(" bar")); - assert_eq!(remap.slice(0..1), Some("foo")); - assert_eq!(remap.slice(0..5), Some("foo bar baz")); - assert_eq!(remap.slice(0..6), None); -} diff -Nru s390-tools-2.31.0/rust-vendor/slab/build.rs s390-tools-2.33.1/rust-vendor/slab/build.rs --- s390-tools-2.31.0/rust-vendor/slab/build.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/slab/build.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ -fn main() { - let cfg = match autocfg::AutoCfg::new() { - Ok(cfg) => cfg, - Err(e) => { - // If we couldn't detect the compiler version and features, just - // print a warning. This isn't a fatal error: we can still build - // Slab, we just can't enable cfgs automatically. - println!( - "cargo:warning=slab: failed to detect compiler features: {}", - e - ); - return; - } - }; - // Note that this is `no_`*, not `has_*`. This allows treating as the latest - // stable rustc is used when the build script doesn't run. This is useful - // for non-cargo build systems that don't run the build script. - if !cfg.probe_rustc_version(1, 39) { - println!("cargo:rustc-cfg=slab_no_const_vec_new"); - } - if !cfg.probe_rustc_version(1, 46) { - println!("cargo:rustc-cfg=slab_no_track_caller"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/slab/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/slab/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/slab/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/slab/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/slab/Cargo.toml s390-tools-2.33.1/rust-vendor/slab/Cargo.toml --- s390-tools-2.31.0/rust-vendor/slab/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/slab/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.31" -name = "slab" -version = "0.4.9" -authors = ["Carl Lerche "] -exclude = ["/.*"] -description = "Pre-allocated storage for a uniform data type" -readme = "README.md" -keywords = [ - "slab", - "allocator", - "no_std", -] -categories = [ - "memory-management", - "data-structures", - "no-std", -] -license = "MIT" -repository = "https://github.com/tokio-rs/slab" - -[dependencies.serde] -version = "1.0.95" -features = ["alloc"] -optional = true -default-features = false - -[dev-dependencies.rustversion] -version = "1" - -[dev-dependencies.serde] -version = "1" -features = ["derive"] - -[dev-dependencies.serde_test] -version = "1" - -[build-dependencies.autocfg] -version = "1" - -[features] -default = ["std"] -std = [] diff -Nru s390-tools-2.31.0/rust-vendor/slab/CHANGELOG.md s390-tools-2.33.1/rust-vendor/slab/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/slab/CHANGELOG.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/slab/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,54 +0,0 @@ -# 0.4.9 (August 22, 2023) - -* Avoid reallocations in `Slab::clone_from` (#137) - -# 0.4.8 (January 20, 2023) - -* Fixed documentation about overflow (#124) -* Document panic in `get2_mut` (#131) -* Refactoring (#129, #132) - -# 0.4.7 (July 19, 2022) - -* Use `#[track_caller]` on Rust 1.46+ (#119) -* Make `Slab::new` const on Rust 1.39+ (#119) - -# 0.4.6 (April 2, 2022) - -* Add `Slab::vacant_key` (#114) -* Fix stacked borrows violation in `Slab::get2_unchecked_mut` (#115) - -# 0.4.5 (October 13, 2021) - -* Add alternate debug output for listing items in the slab (#108) -* Fix typo in debug output of IntoIter (#109) -* Impl 'Clone' for 'Iter' (#110) - -# 0.4.4 (August 06, 2021) - -* Fix panic in `FromIterator` impl (#102) -* Fix compatibility with older clippy versions (#104) -* Add `try_remove` method (#89) -* Implement `ExactSizeIterator` and `FusedIterator` for iterators (#92) - -# 0.4.3 (April 20, 2021) - -* Add no_std support for Rust 1.36 and above (#71). -* Add `get2_mut` and `get2_unchecked_mut` methods (#65). -* Make `shrink_to_fit()` remove trailing vacant entries (#62). -* Implement `FromIterator<(usize, T)>` (#62). -* Implement `IntoIterator` (#62). -* Provide `size_hint()` of the iterators (#62). -* Make all iterators reversible (#62). -* Add `key_of()` method (#61) -* Add `compact()` method (#60) -* Add support for serde (#85) - -# 0.4.2 (January 11, 2019) - -* Add `Slab::drain` (#56). - -# 0.4.1 (July 15, 2018) - -* Improve `reserve` and `reserve_exact` (#37). -* Implement `Default` for `Slab` (#43). diff -Nru s390-tools-2.31.0/rust-vendor/slab/LICENSE s390-tools-2.33.1/rust-vendor/slab/LICENSE --- s390-tools-2.31.0/rust-vendor/slab/LICENSE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/slab/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2019 Carl Lerche - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/slab/README.md s390-tools-2.33.1/rust-vendor/slab/README.md --- s390-tools-2.31.0/rust-vendor/slab/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/slab/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,51 +0,0 @@ -# Slab - -Pre-allocated storage for a uniform data type. - -[![Crates.io][crates-badge]][crates-url] -[![Build Status][ci-badge]][ci-url] - -[crates-badge]: https://img.shields.io/crates/v/slab -[crates-url]: https://crates.io/crates/slab -[ci-badge]: https://img.shields.io/github/actions/workflow/status/tokio-rs/slab/ci.yml?branch=master -[ci-url]: https://github.com/tokio-rs/slab/actions - -[Documentation](https://docs.rs/slab) - -## Usage - -To use `slab`, first add this to your `Cargo.toml`: - -```toml -[dependencies] -slab = "0.4" -``` - -Next, add this to your crate: - -```rust -use slab::Slab; - -let mut slab = Slab::new(); - -let hello = slab.insert("hello"); -let world = slab.insert("world"); - -assert_eq!(slab[hello], "hello"); -assert_eq!(slab[world], "world"); - -slab[world] = "earth"; -assert_eq!(slab[world], "earth"); -``` - -See [documentation](https://docs.rs/slab) for more details. - -## License - -This project is licensed under the [MIT license](LICENSE). - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in `slab` by you, shall be licensed as MIT, without any additional -terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/slab/src/builder.rs s390-tools-2.33.1/rust-vendor/slab/src/builder.rs --- s390-tools-2.31.0/rust-vendor/slab/src/builder.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/slab/src/builder.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,63 +0,0 @@ -use crate::{Entry, Slab}; - -// Building `Slab` from pairs (usize, T). -pub(crate) struct Builder { - slab: Slab, - vacant_list_broken: bool, - first_vacant_index: Option, -} - -impl Builder { - pub(crate) fn with_capacity(capacity: usize) -> Self { - Self { - slab: Slab::with_capacity(capacity), - vacant_list_broken: false, - first_vacant_index: None, - } - } - pub(crate) fn pair(&mut self, key: usize, value: T) { - let slab = &mut self.slab; - if key < slab.entries.len() { - // iterator is not sorted, might need to recreate vacant list - if let Entry::Vacant(_) = slab.entries[key] { - self.vacant_list_broken = true; - slab.len += 1; - } - // if an element with this key already exists, replace it. - // This is consistent with HashMap and BtreeMap - slab.entries[key] = Entry::Occupied(value); - } else { - if self.first_vacant_index.is_none() && slab.entries.len() < key { - self.first_vacant_index = Some(slab.entries.len()); - } - // insert holes as necessary - while slab.entries.len() < key { - // add the entry to the start of the vacant list - let next = slab.next; - slab.next = slab.entries.len(); - slab.entries.push(Entry::Vacant(next)); - } - slab.entries.push(Entry::Occupied(value)); - slab.len += 1; - } - } - - pub(crate) fn build(self) -> Slab { - let mut slab = self.slab; - if slab.len == slab.entries.len() { - // no vacant entries, so next might not have been updated - slab.next = slab.entries.len(); - } else if self.vacant_list_broken { - slab.recreate_vacant_list(); - } else if let Some(first_vacant_index) = self.first_vacant_index { - let next = slab.entries.len(); - match &mut slab.entries[first_vacant_index] { - Entry::Vacant(n) => *n = next, - _ => unreachable!(), - } - } else { - unreachable!() - } - slab - } -} diff -Nru s390-tools-2.31.0/rust-vendor/slab/src/lib.rs s390-tools-2.33.1/rust-vendor/slab/src/lib.rs --- s390-tools-2.31.0/rust-vendor/slab/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/slab/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1589 +0,0 @@ -#![cfg_attr(not(feature = "std"), no_std)] -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - unreachable_pub -)] -#![doc(test( - no_crate_inject, - attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) -))] - -//! Pre-allocated storage for a uniform data type. -//! -//! `Slab` provides pre-allocated storage for a single data type. If many values -//! of a single type are being allocated, it can be more efficient to -//! pre-allocate the necessary storage. Since the size of the type is uniform, -//! memory fragmentation can be avoided. Storing, clearing, and lookup -//! operations become very cheap. -//! -//! While `Slab` may look like other Rust collections, it is not intended to be -//! used as a general purpose collection. The primary difference between `Slab` -//! and `Vec` is that `Slab` returns the key when storing the value. -//! -//! It is important to note that keys may be reused. In other words, once a -//! value associated with a given key is removed from a slab, that key may be -//! returned from future calls to `insert`. -//! -//! # Examples -//! -//! Basic storing and retrieval. -//! -//! ``` -//! # use slab::*; -//! let mut slab = Slab::new(); -//! -//! let hello = slab.insert("hello"); -//! let world = slab.insert("world"); -//! -//! assert_eq!(slab[hello], "hello"); -//! assert_eq!(slab[world], "world"); -//! -//! slab[world] = "earth"; -//! assert_eq!(slab[world], "earth"); -//! ``` -//! -//! Sometimes it is useful to be able to associate the key with the value being -//! inserted in the slab. This can be done with the `vacant_entry` API as such: -//! -//! ``` -//! # use slab::*; -//! let mut slab = Slab::new(); -//! -//! let hello = { -//! let entry = slab.vacant_entry(); -//! let key = entry.key(); -//! -//! entry.insert((key, "hello")); -//! key -//! }; -//! -//! assert_eq!(hello, slab[hello].0); -//! assert_eq!("hello", slab[hello].1); -//! ``` -//! -//! It is generally a good idea to specify the desired capacity of a slab at -//! creation time. Note that `Slab` will grow the internal capacity when -//! attempting to insert a new value once the existing capacity has been reached. -//! To avoid this, add a check. -//! -//! ``` -//! # use slab::*; -//! let mut slab = Slab::with_capacity(1024); -//! -//! // ... use the slab -//! -//! if slab.len() == slab.capacity() { -//! panic!("slab full"); -//! } -//! -//! slab.insert("the slab is not at capacity yet"); -//! ``` -//! -//! # Capacity and reallocation -//! -//! The capacity of a slab is the amount of space allocated for any future -//! values that will be inserted in the slab. This is not to be confused with -//! the *length* of the slab, which specifies the number of actual values -//! currently being inserted. If a slab's length is equal to its capacity, the -//! next value inserted into the slab will require growing the slab by -//! reallocating. -//! -//! For example, a slab with capacity 10 and length 0 would be an empty slab -//! with space for 10 more stored values. Storing 10 or fewer elements into the -//! slab will not change its capacity or cause reallocation to occur. However, -//! if the slab length is increased to 11 (due to another `insert`), it will -//! have to reallocate, which can be slow. For this reason, it is recommended to -//! use [`Slab::with_capacity`] whenever possible to specify how many values the -//! slab is expected to store. -//! -//! # Implementation -//! -//! `Slab` is backed by a `Vec` of slots. Each slot is either occupied or -//! vacant. `Slab` maintains a stack of vacant slots using a linked list. To -//! find a vacant slot, the stack is popped. When a slot is released, it is -//! pushed onto the stack. -//! -//! If there are no more available slots in the stack, then `Vec::reserve(1)` is -//! called and a new slot is created. -//! -//! [`Slab::with_capacity`]: struct.Slab.html#with_capacity - -#[cfg(not(feature = "std"))] -extern crate alloc; -#[cfg(feature = "std")] -extern crate std as alloc; - -#[cfg(feature = "serde")] -mod serde; - -mod builder; - -use alloc::vec::{self, Vec}; -use core::iter::{self, FromIterator, FusedIterator}; -use core::{fmt, mem, ops, slice}; - -/// Pre-allocated storage for a uniform data type -/// -/// See the [module documentation] for more details. -/// -/// [module documentation]: index.html -pub struct Slab { - // Chunk of memory - entries: Vec>, - - // Number of Filled elements currently in the slab - len: usize, - - // Offset of the next available slot in the slab. Set to the slab's - // capacity when the slab is full. - next: usize, -} - -impl Clone for Slab -where - T: Clone, -{ - fn clone(&self) -> Self { - Self { - entries: self.entries.clone(), - len: self.len, - next: self.next, - } - } - - fn clone_from(&mut self, source: &Self) { - self.entries.clone_from(&source.entries); - self.len = source.len; - self.next = source.next; - } -} - -impl Default for Slab { - fn default() -> Self { - Slab::new() - } -} - -/// A handle to a vacant entry in a `Slab`. -/// -/// `VacantEntry` allows constructing values with the key that they will be -/// assigned to. -/// -/// # Examples -/// -/// ``` -/// # use slab::*; -/// let mut slab = Slab::new(); -/// -/// let hello = { -/// let entry = slab.vacant_entry(); -/// let key = entry.key(); -/// -/// entry.insert((key, "hello")); -/// key -/// }; -/// -/// assert_eq!(hello, slab[hello].0); -/// assert_eq!("hello", slab[hello].1); -/// ``` -#[derive(Debug)] -pub struct VacantEntry<'a, T> { - slab: &'a mut Slab, - key: usize, -} - -/// A consuming iterator over the values stored in a `Slab` -pub struct IntoIter { - entries: iter::Enumerate>>, - len: usize, -} - -/// An iterator over the values stored in the `Slab` -pub struct Iter<'a, T> { - entries: iter::Enumerate>>, - len: usize, -} - -impl<'a, T> Clone for Iter<'a, T> { - fn clone(&self) -> Self { - Self { - entries: self.entries.clone(), - len: self.len, - } - } -} - -/// A mutable iterator over the values stored in the `Slab` -pub struct IterMut<'a, T> { - entries: iter::Enumerate>>, - len: usize, -} - -/// A draining iterator for `Slab` -pub struct Drain<'a, T> { - inner: vec::Drain<'a, Entry>, - len: usize, -} - -#[derive(Clone)] -enum Entry { - Vacant(usize), - Occupied(T), -} - -impl Slab { - /// Construct a new, empty `Slab`. - /// - /// The function does not allocate and the returned slab will have no - /// capacity until `insert` is called or capacity is explicitly reserved. - /// - /// This is `const fn` on Rust 1.39+. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let slab: Slab = Slab::new(); - /// ``` - #[cfg(not(slab_no_const_vec_new))] - pub const fn new() -> Self { - Self { - entries: Vec::new(), - next: 0, - len: 0, - } - } - /// Construct a new, empty `Slab`. - /// - /// The function does not allocate and the returned slab will have no - /// capacity until `insert` is called or capacity is explicitly reserved. - /// - /// This is `const fn` on Rust 1.39+. - #[cfg(slab_no_const_vec_new)] - pub fn new() -> Self { - Self { - entries: Vec::new(), - next: 0, - len: 0, - } - } - - /// Construct a new, empty `Slab` with the specified capacity. - /// - /// The returned slab will be able to store exactly `capacity` without - /// reallocating. If `capacity` is 0, the slab will not allocate. - /// - /// It is important to note that this function does not specify the *length* - /// of the returned slab, but only the capacity. For an explanation of the - /// difference between length and capacity, see [Capacity and - /// reallocation](index.html#capacity-and-reallocation). - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::with_capacity(10); - /// - /// // The slab contains no values, even though it has capacity for more - /// assert_eq!(slab.len(), 0); - /// - /// // These are all done without reallocating... - /// for i in 0..10 { - /// slab.insert(i); - /// } - /// - /// // ...but this may make the slab reallocate - /// slab.insert(11); - /// ``` - pub fn with_capacity(capacity: usize) -> Slab { - Slab { - entries: Vec::with_capacity(capacity), - next: 0, - len: 0, - } - } - - /// Return the number of values the slab can store without reallocating. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let slab: Slab = Slab::with_capacity(10); - /// assert_eq!(slab.capacity(), 10); - /// ``` - pub fn capacity(&self) -> usize { - self.entries.capacity() - } - - /// Reserve capacity for at least `additional` more values to be stored - /// without allocating. - /// - /// `reserve` does nothing if the slab already has sufficient capacity for - /// `additional` more values. If more capacity is required, a new segment of - /// memory will be allocated and all existing values will be copied into it. - /// As such, if the slab is already very large, a call to `reserve` can end - /// up being expensive. - /// - /// The slab may reserve more than `additional` extra space in order to - /// avoid frequent reallocations. Use `reserve_exact` instead to guarantee - /// that only the requested space is allocated. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// slab.insert("hello"); - /// slab.reserve(10); - /// assert!(slab.capacity() >= 11); - /// ``` - pub fn reserve(&mut self, additional: usize) { - if self.capacity() - self.len >= additional { - return; - } - let need_add = additional - (self.entries.len() - self.len); - self.entries.reserve(need_add); - } - - /// Reserve the minimum capacity required to store exactly `additional` - /// more values. - /// - /// `reserve_exact` does nothing if the slab already has sufficient capacity - /// for `additional` more values. If more capacity is required, a new segment - /// of memory will be allocated and all existing values will be copied into - /// it. As such, if the slab is already very large, a call to `reserve` can - /// end up being expensive. - /// - /// Note that the allocator may give the slab more space than it requests. - /// Therefore capacity can not be relied upon to be precisely minimal. - /// Prefer `reserve` if future insertions are expected. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// slab.insert("hello"); - /// slab.reserve_exact(10); - /// assert!(slab.capacity() >= 11); - /// ``` - pub fn reserve_exact(&mut self, additional: usize) { - if self.capacity() - self.len >= additional { - return; - } - let need_add = additional - (self.entries.len() - self.len); - self.entries.reserve_exact(need_add); - } - - /// Shrink the capacity of the slab as much as possible without invalidating keys. - /// - /// Because values cannot be moved to a different index, the slab cannot - /// shrink past any stored values. - /// It will drop down as close as possible to the length but the allocator may - /// still inform the underlying vector that there is space for a few more elements. - /// - /// This function can take O(n) time even when the capacity cannot be reduced - /// or the allocation is shrunk in place. Repeated calls run in O(1) though. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::with_capacity(10); - /// - /// for i in 0..3 { - /// slab.insert(i); - /// } - /// - /// slab.shrink_to_fit(); - /// assert!(slab.capacity() >= 3 && slab.capacity() < 10); - /// ``` - /// - /// The slab cannot shrink past the last present value even if previous - /// values are removed: - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::with_capacity(10); - /// - /// for i in 0..4 { - /// slab.insert(i); - /// } - /// - /// slab.remove(0); - /// slab.remove(3); - /// - /// slab.shrink_to_fit(); - /// assert!(slab.capacity() >= 3 && slab.capacity() < 10); - /// ``` - pub fn shrink_to_fit(&mut self) { - // Remove all vacant entries after the last occupied one, so that - // the capacity can be reduced to what is actually needed. - // If the slab is empty the vector can simply be cleared, but that - // optimization would not affect time complexity when T: Drop. - let len_before = self.entries.len(); - while let Some(&Entry::Vacant(_)) = self.entries.last() { - self.entries.pop(); - } - - // Removing entries breaks the list of vacant entries, - // so it must be repaired - if self.entries.len() != len_before { - // Some vacant entries were removed, so the list now likely¹ - // either contains references to the removed entries, or has an - // invalid end marker. Fix this by recreating the list. - self.recreate_vacant_list(); - // ¹: If the removed entries formed the tail of the list, with the - // most recently popped entry being the head of them, (so that its - // index is now the end marker) the list is still valid. - // Checking for that unlikely scenario of this infrequently called - // is not worth the code complexity. - } - - self.entries.shrink_to_fit(); - } - - /// Iterate through all entries to recreate and repair the vacant list. - /// self.len must be correct and is not modified. - fn recreate_vacant_list(&mut self) { - self.next = self.entries.len(); - // We can stop once we've found all vacant entries - let mut remaining_vacant = self.entries.len() - self.len; - if remaining_vacant == 0 { - return; - } - - // Iterate in reverse order so that lower keys are at the start of - // the vacant list. This way future shrinks are more likely to be - // able to remove vacant entries. - for (i, entry) in self.entries.iter_mut().enumerate().rev() { - if let Entry::Vacant(ref mut next) = *entry { - *next = self.next; - self.next = i; - remaining_vacant -= 1; - if remaining_vacant == 0 { - break; - } - } - } - } - - /// Reduce the capacity as much as possible, changing the key for elements when necessary. - /// - /// To allow updating references to the elements which must be moved to a new key, - /// this function takes a closure which is called before moving each element. - /// The second and third parameters to the closure are the current key and - /// new key respectively. - /// In case changing the key for one element turns out not to be possible, - /// the move can be cancelled by returning `false` from the closure. - /// In that case no further attempts at relocating elements is made. - /// If the closure unwinds, the slab will be left in a consistent state, - /// but the value that the closure panicked on might be removed. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// - /// let mut slab = Slab::with_capacity(10); - /// let a = slab.insert('a'); - /// slab.insert('b'); - /// slab.insert('c'); - /// slab.remove(a); - /// slab.compact(|&mut value, from, to| { - /// assert_eq!((value, from, to), ('c', 2, 0)); - /// true - /// }); - /// assert!(slab.capacity() >= 2 && slab.capacity() < 10); - /// ``` - /// - /// The value is not moved when the closure returns `Err`: - /// - /// ``` - /// # use slab::*; - /// - /// let mut slab = Slab::with_capacity(100); - /// let a = slab.insert('a'); - /// let b = slab.insert('b'); - /// slab.remove(a); - /// slab.compact(|&mut value, from, to| false); - /// assert_eq!(slab.iter().next(), Some((b, &'b'))); - /// ``` - pub fn compact(&mut self, mut rekey: F) - where - F: FnMut(&mut T, usize, usize) -> bool, - { - // If the closure unwinds, we need to restore a valid list of vacant entries - struct CleanupGuard<'a, T> { - slab: &'a mut Slab, - decrement: bool, - } - impl Drop for CleanupGuard<'_, T> { - fn drop(&mut self) { - if self.decrement { - // Value was popped and not pushed back on - self.slab.len -= 1; - } - self.slab.recreate_vacant_list(); - } - } - let mut guard = CleanupGuard { - slab: self, - decrement: true, - }; - - let mut occupied_until = 0; - // While there are vacant entries - while guard.slab.entries.len() > guard.slab.len { - // Find a value that needs to be moved, - // by popping entries until we find an occupied one. - // (entries cannot be empty because 0 is not greater than anything) - if let Some(Entry::Occupied(mut value)) = guard.slab.entries.pop() { - // Found one, now find a vacant entry to move it to - while let Some(&Entry::Occupied(_)) = guard.slab.entries.get(occupied_until) { - occupied_until += 1; - } - // Let the caller try to update references to the key - if !rekey(&mut value, guard.slab.entries.len(), occupied_until) { - // Changing the key failed, so push the entry back on at its old index. - guard.slab.entries.push(Entry::Occupied(value)); - guard.decrement = false; - guard.slab.entries.shrink_to_fit(); - return; - // Guard drop handles cleanup - } - // Put the value in its new spot - guard.slab.entries[occupied_until] = Entry::Occupied(value); - // ... and mark it as occupied (this is optional) - occupied_until += 1; - } - } - guard.slab.next = guard.slab.len; - guard.slab.entries.shrink_to_fit(); - // Normal cleanup is not necessary - mem::forget(guard); - } - - /// Clear the slab of all values. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// for i in 0..3 { - /// slab.insert(i); - /// } - /// - /// slab.clear(); - /// assert!(slab.is_empty()); - /// ``` - pub fn clear(&mut self) { - self.entries.clear(); - self.len = 0; - self.next = 0; - } - - /// Return the number of stored values. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// for i in 0..3 { - /// slab.insert(i); - /// } - /// - /// assert_eq!(3, slab.len()); - /// ``` - pub fn len(&self) -> usize { - self.len - } - - /// Return `true` if there are no values stored in the slab. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// assert!(slab.is_empty()); - /// - /// slab.insert(1); - /// assert!(!slab.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - /// Return an iterator over the slab. - /// - /// This function should generally be **avoided** as it is not efficient. - /// Iterators must iterate over every slot in the slab even if it is - /// vacant. As such, a slab with a capacity of 1 million but only one - /// stored value must still iterate the million slots. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// for i in 0..3 { - /// slab.insert(i); - /// } - /// - /// let mut iterator = slab.iter(); - /// - /// assert_eq!(iterator.next(), Some((0, &0))); - /// assert_eq!(iterator.next(), Some((1, &1))); - /// assert_eq!(iterator.next(), Some((2, &2))); - /// assert_eq!(iterator.next(), None); - /// ``` - pub fn iter(&self) -> Iter<'_, T> { - Iter { - entries: self.entries.iter().enumerate(), - len: self.len, - } - } - - /// Return an iterator that allows modifying each value. - /// - /// This function should generally be **avoided** as it is not efficient. - /// Iterators must iterate over every slot in the slab even if it is - /// vacant. As such, a slab with a capacity of 1 million but only one - /// stored value must still iterate the million slots. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// let key1 = slab.insert(0); - /// let key2 = slab.insert(1); - /// - /// for (key, val) in slab.iter_mut() { - /// if key == key1 { - /// *val += 2; - /// } - /// } - /// - /// assert_eq!(slab[key1], 2); - /// assert_eq!(slab[key2], 1); - /// ``` - pub fn iter_mut(&mut self) -> IterMut<'_, T> { - IterMut { - entries: self.entries.iter_mut().enumerate(), - len: self.len, - } - } - - /// Return a reference to the value associated with the given key. - /// - /// If the given key is not associated with a value, then `None` is - /// returned. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// let key = slab.insert("hello"); - /// - /// assert_eq!(slab.get(key), Some(&"hello")); - /// assert_eq!(slab.get(123), None); - /// ``` - pub fn get(&self, key: usize) -> Option<&T> { - match self.entries.get(key) { - Some(Entry::Occupied(val)) => Some(val), - _ => None, - } - } - - /// Return a mutable reference to the value associated with the given key. - /// - /// If the given key is not associated with a value, then `None` is - /// returned. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// let key = slab.insert("hello"); - /// - /// *slab.get_mut(key).unwrap() = "world"; - /// - /// assert_eq!(slab[key], "world"); - /// assert_eq!(slab.get_mut(123), None); - /// ``` - pub fn get_mut(&mut self, key: usize) -> Option<&mut T> { - match self.entries.get_mut(key) { - Some(&mut Entry::Occupied(ref mut val)) => Some(val), - _ => None, - } - } - - /// Return two mutable references to the values associated with the two - /// given keys simultaneously. - /// - /// If any one of the given keys is not associated with a value, then `None` - /// is returned. - /// - /// This function can be used to get two mutable references out of one slab, - /// so that you can manipulate both of them at the same time, eg. swap them. - /// - /// # Panics - /// - /// This function will panic if `key1` and `key2` are the same. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// use std::mem; - /// - /// let mut slab = Slab::new(); - /// let key1 = slab.insert(1); - /// let key2 = slab.insert(2); - /// let (value1, value2) = slab.get2_mut(key1, key2).unwrap(); - /// mem::swap(value1, value2); - /// assert_eq!(slab[key1], 2); - /// assert_eq!(slab[key2], 1); - /// ``` - pub fn get2_mut(&mut self, key1: usize, key2: usize) -> Option<(&mut T, &mut T)> { - assert!(key1 != key2); - - let (entry1, entry2); - - if key1 > key2 { - let (slice1, slice2) = self.entries.split_at_mut(key1); - entry1 = slice2.get_mut(0); - entry2 = slice1.get_mut(key2); - } else { - let (slice1, slice2) = self.entries.split_at_mut(key2); - entry1 = slice1.get_mut(key1); - entry2 = slice2.get_mut(0); - } - - match (entry1, entry2) { - ( - Some(&mut Entry::Occupied(ref mut val1)), - Some(&mut Entry::Occupied(ref mut val2)), - ) => Some((val1, val2)), - _ => None, - } - } - - /// Return a reference to the value associated with the given key without - /// performing bounds checking. - /// - /// For a safe alternative see [`get`](Slab::get). - /// - /// This function should be used with care. - /// - /// # Safety - /// - /// The key must be within bounds. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// let key = slab.insert(2); - /// - /// unsafe { - /// assert_eq!(slab.get_unchecked(key), &2); - /// } - /// ``` - pub unsafe fn get_unchecked(&self, key: usize) -> &T { - match *self.entries.get_unchecked(key) { - Entry::Occupied(ref val) => val, - _ => unreachable!(), - } - } - - /// Return a mutable reference to the value associated with the given key - /// without performing bounds checking. - /// - /// For a safe alternative see [`get_mut`](Slab::get_mut). - /// - /// This function should be used with care. - /// - /// # Safety - /// - /// The key must be within bounds. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// let key = slab.insert(2); - /// - /// unsafe { - /// let val = slab.get_unchecked_mut(key); - /// *val = 13; - /// } - /// - /// assert_eq!(slab[key], 13); - /// ``` - pub unsafe fn get_unchecked_mut(&mut self, key: usize) -> &mut T { - match *self.entries.get_unchecked_mut(key) { - Entry::Occupied(ref mut val) => val, - _ => unreachable!(), - } - } - - /// Return two mutable references to the values associated with the two - /// given keys simultaneously without performing bounds checking and safety - /// condition checking. - /// - /// For a safe alternative see [`get2_mut`](Slab::get2_mut). - /// - /// This function should be used with care. - /// - /// # Safety - /// - /// - Both keys must be within bounds. - /// - The condition `key1 != key2` must hold. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// use std::mem; - /// - /// let mut slab = Slab::new(); - /// let key1 = slab.insert(1); - /// let key2 = slab.insert(2); - /// let (value1, value2) = unsafe { slab.get2_unchecked_mut(key1, key2) }; - /// mem::swap(value1, value2); - /// assert_eq!(slab[key1], 2); - /// assert_eq!(slab[key2], 1); - /// ``` - pub unsafe fn get2_unchecked_mut(&mut self, key1: usize, key2: usize) -> (&mut T, &mut T) { - debug_assert_ne!(key1, key2); - let ptr = self.entries.as_mut_ptr(); - let ptr1 = ptr.add(key1); - let ptr2 = ptr.add(key2); - match (&mut *ptr1, &mut *ptr2) { - (&mut Entry::Occupied(ref mut val1), &mut Entry::Occupied(ref mut val2)) => { - (val1, val2) - } - _ => unreachable!(), - } - } - - /// Get the key for an element in the slab. - /// - /// The reference must point to an element owned by the slab. - /// Otherwise this function will panic. - /// This is a constant-time operation because the key can be calculated - /// from the reference with pointer arithmetic. - /// - /// # Panics - /// - /// This function will panic if the reference does not point to an element - /// of the slab. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// - /// let mut slab = Slab::new(); - /// let key = slab.insert(String::from("foo")); - /// let value = &slab[key]; - /// assert_eq!(slab.key_of(value), key); - /// ``` - /// - /// Values are not compared, so passing a reference to a different location - /// will result in a panic: - /// - /// ```should_panic - /// # use slab::*; - /// - /// let mut slab = Slab::new(); - /// let key = slab.insert(0); - /// let bad = &0; - /// slab.key_of(bad); // this will panic - /// unreachable!(); - /// ``` - #[cfg_attr(not(slab_no_track_caller), track_caller)] - pub fn key_of(&self, present_element: &T) -> usize { - let element_ptr = present_element as *const T as usize; - let base_ptr = self.entries.as_ptr() as usize; - // Use wrapping subtraction in case the reference is bad - let byte_offset = element_ptr.wrapping_sub(base_ptr); - // The division rounds away any offset of T inside Entry - // The size of Entry is never zero even if T is due to Vacant(usize) - let key = byte_offset / mem::size_of::>(); - // Prevent returning unspecified (but out of bounds) values - if key >= self.entries.len() { - panic!("The reference points to a value outside this slab"); - } - // The reference cannot point to a vacant entry, because then it would not be valid - key - } - - /// Insert a value in the slab, returning key assigned to the value. - /// - /// The returned key can later be used to retrieve or remove the value using indexed - /// lookup and `remove`. Additional capacity is allocated if needed. See - /// [Capacity and reallocation](index.html#capacity-and-reallocation). - /// - /// # Panics - /// - /// Panics if the new storage in the vector exceeds `isize::MAX` bytes. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// let key = slab.insert("hello"); - /// assert_eq!(slab[key], "hello"); - /// ``` - pub fn insert(&mut self, val: T) -> usize { - let key = self.next; - - self.insert_at(key, val); - - key - } - - /// Returns the key of the next vacant entry. - /// - /// This function returns the key of the vacant entry which will be used - /// for the next insertion. This is equivalent to - /// `slab.vacant_entry().key()`, but it doesn't require mutable access. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// assert_eq!(slab.vacant_key(), 0); - /// - /// slab.insert(0); - /// assert_eq!(slab.vacant_key(), 1); - /// - /// slab.insert(1); - /// slab.remove(0); - /// assert_eq!(slab.vacant_key(), 0); - /// ``` - pub fn vacant_key(&self) -> usize { - self.next - } - - /// Return a handle to a vacant entry allowing for further manipulation. - /// - /// This function is useful when creating values that must contain their - /// slab key. The returned `VacantEntry` reserves a slot in the slab and is - /// able to query the associated key. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// let hello = { - /// let entry = slab.vacant_entry(); - /// let key = entry.key(); - /// - /// entry.insert((key, "hello")); - /// key - /// }; - /// - /// assert_eq!(hello, slab[hello].0); - /// assert_eq!("hello", slab[hello].1); - /// ``` - pub fn vacant_entry(&mut self) -> VacantEntry<'_, T> { - VacantEntry { - key: self.next, - slab: self, - } - } - - fn insert_at(&mut self, key: usize, val: T) { - self.len += 1; - - if key == self.entries.len() { - self.entries.push(Entry::Occupied(val)); - self.next = key + 1; - } else { - self.next = match self.entries.get(key) { - Some(&Entry::Vacant(next)) => next, - _ => unreachable!(), - }; - self.entries[key] = Entry::Occupied(val); - } - } - - /// Tries to remove the value associated with the given key, - /// returning the value if the key existed. - /// - /// The key is then released and may be associated with future stored - /// values. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// let hello = slab.insert("hello"); - /// - /// assert_eq!(slab.try_remove(hello), Some("hello")); - /// assert!(!slab.contains(hello)); - /// ``` - pub fn try_remove(&mut self, key: usize) -> Option { - if let Some(entry) = self.entries.get_mut(key) { - // Swap the entry at the provided value - let prev = mem::replace(entry, Entry::Vacant(self.next)); - - match prev { - Entry::Occupied(val) => { - self.len -= 1; - self.next = key; - return val.into(); - } - _ => { - // Woops, the entry is actually vacant, restore the state - *entry = prev; - } - } - } - None - } - - /// Remove and return the value associated with the given key. - /// - /// The key is then released and may be associated with future stored - /// values. - /// - /// # Panics - /// - /// Panics if `key` is not associated with a value. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// let hello = slab.insert("hello"); - /// - /// assert_eq!(slab.remove(hello), "hello"); - /// assert!(!slab.contains(hello)); - /// ``` - #[cfg_attr(not(slab_no_track_caller), track_caller)] - pub fn remove(&mut self, key: usize) -> T { - self.try_remove(key).expect("invalid key") - } - - /// Return `true` if a value is associated with the given key. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// let hello = slab.insert("hello"); - /// assert!(slab.contains(hello)); - /// - /// slab.remove(hello); - /// - /// assert!(!slab.contains(hello)); - /// ``` - pub fn contains(&self, key: usize) -> bool { - match self.entries.get(key) { - Some(&Entry::Occupied(_)) => true, - _ => false, - } - } - - /// Retain only the elements specified by the predicate. - /// - /// In other words, remove all elements `e` such that `f(usize, &mut e)` - /// returns false. This method operates in place and preserves the key - /// associated with the retained values. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// let k1 = slab.insert(0); - /// let k2 = slab.insert(1); - /// let k3 = slab.insert(2); - /// - /// slab.retain(|key, val| key == k1 || *val == 1); - /// - /// assert!(slab.contains(k1)); - /// assert!(slab.contains(k2)); - /// assert!(!slab.contains(k3)); - /// - /// assert_eq!(2, slab.len()); - /// ``` - pub fn retain(&mut self, mut f: F) - where - F: FnMut(usize, &mut T) -> bool, - { - for i in 0..self.entries.len() { - let keep = match self.entries[i] { - Entry::Occupied(ref mut v) => f(i, v), - _ => true, - }; - - if !keep { - self.remove(i); - } - } - } - - /// Return a draining iterator that removes all elements from the slab and - /// yields the removed items. - /// - /// Note: Elements are removed even if the iterator is only partially - /// consumed or not consumed at all. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// let _ = slab.insert(0); - /// let _ = slab.insert(1); - /// let _ = slab.insert(2); - /// - /// { - /// let mut drain = slab.drain(); - /// - /// assert_eq!(Some(0), drain.next()); - /// assert_eq!(Some(1), drain.next()); - /// assert_eq!(Some(2), drain.next()); - /// assert_eq!(None, drain.next()); - /// } - /// - /// assert!(slab.is_empty()); - /// ``` - pub fn drain(&mut self) -> Drain<'_, T> { - let old_len = self.len; - self.len = 0; - self.next = 0; - Drain { - inner: self.entries.drain(..), - len: old_len, - } - } -} - -impl ops::Index for Slab { - type Output = T; - - #[cfg_attr(not(slab_no_track_caller), track_caller)] - fn index(&self, key: usize) -> &T { - match self.entries.get(key) { - Some(Entry::Occupied(v)) => v, - _ => panic!("invalid key"), - } - } -} - -impl ops::IndexMut for Slab { - #[cfg_attr(not(slab_no_track_caller), track_caller)] - fn index_mut(&mut self, key: usize) -> &mut T { - match self.entries.get_mut(key) { - Some(&mut Entry::Occupied(ref mut v)) => v, - _ => panic!("invalid key"), - } - } -} - -impl IntoIterator for Slab { - type Item = (usize, T); - type IntoIter = IntoIter; - - fn into_iter(self) -> IntoIter { - IntoIter { - entries: self.entries.into_iter().enumerate(), - len: self.len, - } - } -} - -impl<'a, T> IntoIterator for &'a Slab { - type Item = (usize, &'a T); - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Iter<'a, T> { - self.iter() - } -} - -impl<'a, T> IntoIterator for &'a mut Slab { - type Item = (usize, &'a mut T); - type IntoIter = IterMut<'a, T>; - - fn into_iter(self) -> IterMut<'a, T> { - self.iter_mut() - } -} - -/// Create a slab from an iterator of key-value pairs. -/// -/// If the iterator produces duplicate keys, the previous value is replaced with the later one. -/// The keys does not need to be sorted beforehand, and this function always -/// takes O(n) time. -/// Note that the returned slab will use space proportional to the largest key, -/// so don't use `Slab` with untrusted keys. -/// -/// # Examples -/// -/// ``` -/// # use slab::*; -/// -/// let vec = vec![(2,'a'), (6,'b'), (7,'c')]; -/// let slab = vec.into_iter().collect::>(); -/// assert_eq!(slab.len(), 3); -/// assert!(slab.capacity() >= 8); -/// assert_eq!(slab[2], 'a'); -/// ``` -/// -/// With duplicate and unsorted keys: -/// -/// ``` -/// # use slab::*; -/// -/// let vec = vec![(20,'a'), (10,'b'), (11,'c'), (10,'d')]; -/// let slab = vec.into_iter().collect::>(); -/// assert_eq!(slab.len(), 3); -/// assert_eq!(slab[10], 'd'); -/// ``` -impl FromIterator<(usize, T)> for Slab { - fn from_iter(iterable: I) -> Self - where - I: IntoIterator, - { - let iterator = iterable.into_iter(); - let mut builder = builder::Builder::with_capacity(iterator.size_hint().0); - - for (key, value) in iterator { - builder.pair(key, value) - } - builder.build() - } -} - -impl fmt::Debug for Slab -where - T: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - if fmt.alternate() { - fmt.debug_map().entries(self.iter()).finish() - } else { - fmt.debug_struct("Slab") - .field("len", &self.len) - .field("cap", &self.capacity()) - .finish() - } - } -} - -impl fmt::Debug for IntoIter -where - T: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("IntoIter") - .field("remaining", &self.len) - .finish() - } -} - -impl fmt::Debug for Iter<'_, T> -where - T: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Iter") - .field("remaining", &self.len) - .finish() - } -} - -impl fmt::Debug for IterMut<'_, T> -where - T: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("IterMut") - .field("remaining", &self.len) - .finish() - } -} - -impl fmt::Debug for Drain<'_, T> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Drain").finish() - } -} - -// ===== VacantEntry ===== - -impl<'a, T> VacantEntry<'a, T> { - /// Insert a value in the entry, returning a mutable reference to the value. - /// - /// To get the key associated with the value, use `key` prior to calling - /// `insert`. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// let hello = { - /// let entry = slab.vacant_entry(); - /// let key = entry.key(); - /// - /// entry.insert((key, "hello")); - /// key - /// }; - /// - /// assert_eq!(hello, slab[hello].0); - /// assert_eq!("hello", slab[hello].1); - /// ``` - pub fn insert(self, val: T) -> &'a mut T { - self.slab.insert_at(self.key, val); - - match self.slab.entries.get_mut(self.key) { - Some(&mut Entry::Occupied(ref mut v)) => v, - _ => unreachable!(), - } - } - - /// Return the key associated with this entry. - /// - /// A value stored in this entry will be associated with this key. - /// - /// # Examples - /// - /// ``` - /// # use slab::*; - /// let mut slab = Slab::new(); - /// - /// let hello = { - /// let entry = slab.vacant_entry(); - /// let key = entry.key(); - /// - /// entry.insert((key, "hello")); - /// key - /// }; - /// - /// assert_eq!(hello, slab[hello].0); - /// assert_eq!("hello", slab[hello].1); - /// ``` - pub fn key(&self) -> usize { - self.key - } -} - -// ===== IntoIter ===== - -impl Iterator for IntoIter { - type Item = (usize, T); - - fn next(&mut self) -> Option { - for (key, entry) in &mut self.entries { - if let Entry::Occupied(v) = entry { - self.len -= 1; - return Some((key, v)); - } - } - - debug_assert_eq!(self.len, 0); - None - } - - fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) - } -} - -impl DoubleEndedIterator for IntoIter { - fn next_back(&mut self) -> Option { - while let Some((key, entry)) = self.entries.next_back() { - if let Entry::Occupied(v) = entry { - self.len -= 1; - return Some((key, v)); - } - } - - debug_assert_eq!(self.len, 0); - None - } -} - -impl ExactSizeIterator for IntoIter { - fn len(&self) -> usize { - self.len - } -} - -impl FusedIterator for IntoIter {} - -// ===== Iter ===== - -impl<'a, T> Iterator for Iter<'a, T> { - type Item = (usize, &'a T); - - fn next(&mut self) -> Option { - for (key, entry) in &mut self.entries { - if let Entry::Occupied(ref v) = *entry { - self.len -= 1; - return Some((key, v)); - } - } - - debug_assert_eq!(self.len, 0); - None - } - - fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) - } -} - -impl DoubleEndedIterator for Iter<'_, T> { - fn next_back(&mut self) -> Option { - while let Some((key, entry)) = self.entries.next_back() { - if let Entry::Occupied(ref v) = *entry { - self.len -= 1; - return Some((key, v)); - } - } - - debug_assert_eq!(self.len, 0); - None - } -} - -impl ExactSizeIterator for Iter<'_, T> { - fn len(&self) -> usize { - self.len - } -} - -impl FusedIterator for Iter<'_, T> {} - -// ===== IterMut ===== - -impl<'a, T> Iterator for IterMut<'a, T> { - type Item = (usize, &'a mut T); - - fn next(&mut self) -> Option { - for (key, entry) in &mut self.entries { - if let Entry::Occupied(ref mut v) = *entry { - self.len -= 1; - return Some((key, v)); - } - } - - debug_assert_eq!(self.len, 0); - None - } - - fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) - } -} - -impl DoubleEndedIterator for IterMut<'_, T> { - fn next_back(&mut self) -> Option { - while let Some((key, entry)) = self.entries.next_back() { - if let Entry::Occupied(ref mut v) = *entry { - self.len -= 1; - return Some((key, v)); - } - } - - debug_assert_eq!(self.len, 0); - None - } -} - -impl ExactSizeIterator for IterMut<'_, T> { - fn len(&self) -> usize { - self.len - } -} - -impl FusedIterator for IterMut<'_, T> {} - -// ===== Drain ===== - -impl Iterator for Drain<'_, T> { - type Item = T; - - fn next(&mut self) -> Option { - for entry in &mut self.inner { - if let Entry::Occupied(v) = entry { - self.len -= 1; - return Some(v); - } - } - - debug_assert_eq!(self.len, 0); - None - } - - fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) - } -} - -impl DoubleEndedIterator for Drain<'_, T> { - fn next_back(&mut self) -> Option { - while let Some(entry) = self.inner.next_back() { - if let Entry::Occupied(v) = entry { - self.len -= 1; - return Some(v); - } - } - - debug_assert_eq!(self.len, 0); - None - } -} - -impl ExactSizeIterator for Drain<'_, T> { - fn len(&self) -> usize { - self.len - } -} - -impl FusedIterator for Drain<'_, T> {} diff -Nru s390-tools-2.31.0/rust-vendor/slab/src/serde.rs s390-tools-2.33.1/rust-vendor/slab/src/serde.rs --- s390-tools-2.31.0/rust-vendor/slab/src/serde.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/slab/src/serde.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,62 +0,0 @@ -use core::fmt; -use core::marker::PhantomData; - -use serde::de::{Deserialize, Deserializer, MapAccess, Visitor}; -use serde::ser::{Serialize, SerializeMap, Serializer}; - -use super::{builder::Builder, Slab}; - -impl Serialize for Slab -where - T: Serialize, -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut map_serializer = serializer.serialize_map(Some(self.len()))?; - for (key, value) in self { - map_serializer.serialize_key(&key)?; - map_serializer.serialize_value(value)?; - } - map_serializer.end() - } -} - -struct SlabVisitor(PhantomData); - -impl<'de, T> Visitor<'de> for SlabVisitor -where - T: Deserialize<'de>, -{ - type Value = Slab; - - fn expecting(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "a map") - } - - fn visit_map(self, mut map: A) -> Result - where - A: MapAccess<'de>, - { - let mut builder = Builder::with_capacity(map.size_hint().unwrap_or(0)); - - while let Some((key, value)) = map.next_entry()? { - builder.pair(key, value) - } - - Ok(builder.build()) - } -} - -impl<'de, T> Deserialize<'de> for Slab -where - T: Deserialize<'de>, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_map(SlabVisitor(PhantomData)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/benches/bench.rs s390-tools-2.33.1/rust-vendor/smallvec/benches/bench.rs --- s390-tools-2.31.0/rust-vendor/smallvec/benches/bench.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/benches/bench.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,312 +0,0 @@ -#![feature(test)] -#![allow(deprecated)] - -extern crate test; - -use self::test::Bencher; -use smallvec::{ExtendFromSlice, smallvec, SmallVec}; - -const VEC_SIZE: usize = 16; -const SPILLED_SIZE: usize = 100; - -trait Vector: for<'a> From<&'a [T]> + Extend + ExtendFromSlice { - fn new() -> Self; - fn push(&mut self, val: T); - fn pop(&mut self) -> Option; - fn remove(&mut self, p: usize) -> T; - fn insert(&mut self, n: usize, val: T); - fn from_elem(val: T, n: usize) -> Self; - fn from_elems(val: &[T]) -> Self; -} - -impl Vector for Vec { - fn new() -> Self { - Self::with_capacity(VEC_SIZE) - } - - fn push(&mut self, val: T) { - self.push(val) - } - - fn pop(&mut self) -> Option { - self.pop() - } - - fn remove(&mut self, p: usize) -> T { - self.remove(p) - } - - fn insert(&mut self, n: usize, val: T) { - self.insert(n, val) - } - - fn from_elem(val: T, n: usize) -> Self { - vec![val; n] - } - - fn from_elems(val: &[T]) -> Self { - val.to_owned() - } -} - -impl Vector for SmallVec<[T; VEC_SIZE]> { - fn new() -> Self { - Self::new() - } - - fn push(&mut self, val: T) { - self.push(val) - } - - fn pop(&mut self) -> Option { - self.pop() - } - - fn remove(&mut self, p: usize) -> T { - self.remove(p) - } - - fn insert(&mut self, n: usize, val: T) { - self.insert(n, val) - } - - fn from_elem(val: T, n: usize) -> Self { - smallvec![val; n] - } - - fn from_elems(val: &[T]) -> Self { - SmallVec::from_slice(val) - } -} - -macro_rules! make_benches { - ($typ:ty { $($b_name:ident => $g_name:ident($($args:expr),*),)* }) => { - $( - #[bench] - fn $b_name(b: &mut Bencher) { - $g_name::<$typ>($($args,)* b) - } - )* - } -} - -make_benches! { - SmallVec<[u64; VEC_SIZE]> { - bench_push => gen_push(SPILLED_SIZE as _), - bench_push_small => gen_push(VEC_SIZE as _), - bench_insert_push => gen_insert_push(SPILLED_SIZE as _), - bench_insert_push_small => gen_insert_push(VEC_SIZE as _), - bench_insert => gen_insert(SPILLED_SIZE as _), - bench_insert_small => gen_insert(VEC_SIZE as _), - bench_remove => gen_remove(SPILLED_SIZE as _), - bench_remove_small => gen_remove(VEC_SIZE as _), - bench_extend => gen_extend(SPILLED_SIZE as _), - bench_extend_small => gen_extend(VEC_SIZE as _), - bench_from_iter => gen_from_iter(SPILLED_SIZE as _), - bench_from_iter_small => gen_from_iter(VEC_SIZE as _), - bench_from_slice => gen_from_slice(SPILLED_SIZE as _), - bench_from_slice_small => gen_from_slice(VEC_SIZE as _), - bench_extend_from_slice => gen_extend_from_slice(SPILLED_SIZE as _), - bench_extend_from_slice_small => gen_extend_from_slice(VEC_SIZE as _), - bench_macro_from_elem => gen_from_elem(SPILLED_SIZE as _), - bench_macro_from_elem_small => gen_from_elem(VEC_SIZE as _), - bench_pushpop => gen_pushpop(), - } -} - -make_benches! { - Vec { - bench_push_vec => gen_push(SPILLED_SIZE as _), - bench_push_vec_small => gen_push(VEC_SIZE as _), - bench_insert_push_vec => gen_insert_push(SPILLED_SIZE as _), - bench_insert_push_vec_small => gen_insert_push(VEC_SIZE as _), - bench_insert_vec => gen_insert(SPILLED_SIZE as _), - bench_insert_vec_small => gen_insert(VEC_SIZE as _), - bench_remove_vec => gen_remove(SPILLED_SIZE as _), - bench_remove_vec_small => gen_remove(VEC_SIZE as _), - bench_extend_vec => gen_extend(SPILLED_SIZE as _), - bench_extend_vec_small => gen_extend(VEC_SIZE as _), - bench_from_iter_vec => gen_from_iter(SPILLED_SIZE as _), - bench_from_iter_vec_small => gen_from_iter(VEC_SIZE as _), - bench_from_slice_vec => gen_from_slice(SPILLED_SIZE as _), - bench_from_slice_vec_small => gen_from_slice(VEC_SIZE as _), - bench_extend_from_slice_vec => gen_extend_from_slice(SPILLED_SIZE as _), - bench_extend_from_slice_vec_small => gen_extend_from_slice(VEC_SIZE as _), - bench_macro_from_elem_vec => gen_from_elem(SPILLED_SIZE as _), - bench_macro_from_elem_vec_small => gen_from_elem(VEC_SIZE as _), - bench_pushpop_vec => gen_pushpop(), - } -} - -fn gen_push>(n: u64, b: &mut Bencher) { - #[inline(never)] - fn push_noinline>(vec: &mut V, x: u64) { - vec.push(x); - } - - b.iter(|| { - let mut vec = V::new(); - for x in 0..n { - push_noinline(&mut vec, x); - } - vec - }); -} - -fn gen_insert_push>(n: u64, b: &mut Bencher) { - #[inline(never)] - fn insert_push_noinline>(vec: &mut V, x: u64) { - vec.insert(x as usize, x); - } - - b.iter(|| { - let mut vec = V::new(); - for x in 0..n { - insert_push_noinline(&mut vec, x); - } - vec - }); -} - -fn gen_insert>(n: u64, b: &mut Bencher) { - #[inline(never)] - fn insert_noinline>(vec: &mut V, p: usize, x: u64) { - vec.insert(p, x) - } - - b.iter(|| { - let mut vec = V::new(); - // Always insert at position 0 so that we are subject to shifts of - // many different lengths. - vec.push(0); - for x in 0..n { - insert_noinline(&mut vec, 0, x); - } - vec - }); -} - -fn gen_remove>(n: usize, b: &mut Bencher) { - #[inline(never)] - fn remove_noinline>(vec: &mut V, p: usize) -> u64 { - vec.remove(p) - } - - b.iter(|| { - let mut vec = V::from_elem(0, n as _); - - for _ in 0..n { - remove_noinline(&mut vec, 0); - } - }); -} - -fn gen_extend>(n: u64, b: &mut Bencher) { - b.iter(|| { - let mut vec = V::new(); - vec.extend(0..n); - vec - }); -} - -fn gen_from_iter>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let vec = V::from(&v); - vec - }); -} - -fn gen_from_slice>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let vec = V::from_elems(&v); - vec - }); -} - -fn gen_extend_from_slice>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let mut vec = V::new(); - vec.extend_from_slice(&v); - vec - }); -} - -fn gen_pushpop>(b: &mut Bencher) { - #[inline(never)] - fn pushpop_noinline>(vec: &mut V, x: u64) -> Option { - vec.push(x); - vec.pop() - } - - b.iter(|| { - let mut vec = V::new(); - for x in 0..SPILLED_SIZE as _ { - pushpop_noinline(&mut vec, x); - } - vec - }); -} - -fn gen_from_elem>(n: usize, b: &mut Bencher) { - b.iter(|| { - let vec = V::from_elem(42, n); - vec - }); -} - -#[bench] -fn bench_insert_many(b: &mut Bencher) { - #[inline(never)] - fn insert_many_noinline>( - vec: &mut SmallVec<[u64; VEC_SIZE]>, - index: usize, - iterable: I, - ) { - vec.insert_many(index, iterable) - } - - b.iter(|| { - let mut vec = SmallVec::<[u64; VEC_SIZE]>::new(); - insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _); - insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _); - vec - }); -} - -#[bench] -fn bench_insert_from_slice(b: &mut Bencher) { - let v: Vec = (0..SPILLED_SIZE as _).collect(); - b.iter(|| { - let mut vec = SmallVec::<[u64; VEC_SIZE]>::new(); - vec.insert_from_slice(0, &v); - vec.insert_from_slice(0, &v); - vec - }); -} - -#[bench] -fn bench_macro_from_list(b: &mut Bencher) { - b.iter(|| { - let vec: SmallVec<[u64; 16]> = smallvec![ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80, - 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000, - 0x80000, 0x100000, - ]; - vec - }); -} - -#[bench] -fn bench_macro_from_list_vec(b: &mut Bencher) { - b.iter(|| { - let vec: Vec = vec![ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80, - 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000, - 0x80000, 0x100000, - ]; - vec - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/smallvec/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/smallvec/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/Cargo.toml s390-tools-2.33.1/rust-vendor/smallvec/Cargo.toml --- s390-tools-2.31.0/rust-vendor/smallvec/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,72 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "smallvec" -version = "1.11.2" -authors = ["The Servo Project Developers"] -description = "'Small vector' optimization: store up to a small number of items on the stack" -documentation = "https://docs.rs/smallvec/" -readme = "README.md" -keywords = [ - "small", - "vec", - "vector", - "stack", - "no_std", -] -categories = ["data-structures"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/servo/rust-smallvec" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs", - "--generate-link-to-definition", -] - -[[test]] -name = "debugger_visualizer" -path = "tests/debugger_visualizer.rs" -test = false -required-features = ["debugger_visualizer"] - -[dependencies.arbitrary] -version = "1" -optional = true - -[dependencies.serde] -version = "1" -optional = true -default-features = false - -[dev-dependencies.bincode] -version = "1.0.1" - -[dev-dependencies.debugger_test] -version = "0.1.0" - -[dev-dependencies.debugger_test_parser] -version = "0.1.0" - -[features] -const_generics = [] -const_new = ["const_generics"] -debugger_visualizer = [] -drain_filter = [] -drain_keep_rest = ["drain_filter"] -may_dangle = [] -specialization = [] -union = [] -write = [] diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/debug_metadata/README.md s390-tools-2.33.1/rust-vendor/smallvec/debug_metadata/README.md --- s390-tools-2.31.0/rust-vendor/smallvec/debug_metadata/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/debug_metadata/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,111 +0,0 @@ -## Debugger Visualizers - -Many languages and debuggers enable developers to control how a type is -displayed in a debugger. These are called "debugger visualizations" or "debugger -views". - -The Windows debuggers (WinDbg\CDB) support defining custom debugger visualizations using -the `Natvis` framework. To use Natvis, developers write XML documents using the natvis -schema that describe how debugger types should be displayed with the `.natvis` extension. -(See: https://docs.microsoft.com/en-us/visualstudio/debugger/create-custom-views-of-native-objects?view=vs-2019) -The Natvis files provide patterns which match type names a description of how to display -those types. - -The Natvis schema can be found either online (See: https://code.visualstudio.com/docs/cpp/natvis#_schema) -or locally at `\Xml\Schemas\1033\natvis.xsd`. - -The GNU debugger (GDB) supports defining custom debugger views using Pretty Printers. -Pretty printers are written as python scripts that describe how a type should be displayed -when loaded up in GDB/LLDB. (See: https://sourceware.org/gdb/onlinedocs/gdb/Pretty-Printing.html#Pretty-Printing) -The pretty printers provide patterns, which match type names, and for matching -types, describe how to display those types. (For writing a pretty printer, see: https://sourceware.org/gdb/onlinedocs/gdb/Writing-a-Pretty_002dPrinter.html#Writing-a-Pretty_002dPrinter). - -### Embedding Visualizers - -Through the use of the currently unstable `#[debugger_visualizer]` attribute, the `smallvec` -crate can embed debugger visualizers into the crate metadata. - -Currently the two types of visualizers supported are Natvis and Pretty printers. - -For Natvis files, when linking an executable with a crate that includes Natvis files, -the MSVC linker will embed the contents of all Natvis files into the generated `PDB`. - -For pretty printers, the compiler will encode the contents of the pretty printer -in the `.debug_gdb_scripts` section of the `ELF` generated. - -### Testing Visualizers - -The `smallvec` crate supports testing debugger visualizers defined for this crate. The entry point for -these tests are `tests/debugger_visualizer.rs`. These tests are defined using the `debugger_test` and -`debugger_test_parser` crates. The `debugger_test` crate is a proc macro crate which defines a -single proc macro attribute, `#[debugger_test]`. For more detailed information about this crate, -see https://crates.io/crates/debugger_test. The CI pipeline for the `smallvec` crate has been updated -to run the debugger visualizer tests to ensure debugger visualizers do not become broken/stale. - -The `#[debugger_test]` proc macro attribute may only be used on test functions and will run the -function under the debugger specified by the `debugger` meta item. - -This proc macro attribute has 3 required values: - -1. The first required meta item, `debugger`, takes a string value which specifies the debugger to launch. -2. The second required meta item, `commands`, takes a string of new line (`\n`) separated list of debugger -commands to run. -3. The third required meta item, `expected_statements`, takes a string of new line (`\n`) separated list of -statements that must exist in the debugger output. Pattern matching through regular expressions is also -supported by using the `pattern:` prefix for each expected statement. - -#### Example: - -```rust -#[debugger_test( - debugger = "cdb", - commands = "command1\ncommand2\ncommand3", - expected_statements = "statement1\nstatement2\nstatement3")] -fn test() { - -} -``` - -Using a multiline string is also supported, with a single debugger command/expected statement per line: - -```rust -#[debugger_test( - debugger = "cdb", - commands = " -command1 -command2 -command3", - expected_statements = " -statement1 -pattern:statement[0-9]+ -statement3")] -fn test() { - -} -``` - -In the example above, the second expected statement uses pattern matching through a regular expression -by using the `pattern:` prefix. - -#### Testing Locally - -Currently, only Natvis visualizations have been defined for the `smallvec` crate via `debug_metadata/smallvec.natvis`, -which means the `tests/debugger_visualizer.rs` tests need to be run on Windows using the `*-pc-windows-msvc` targets. -To run these tests locally, first ensure the debugging tools for Windows are installed or install them following -the steps listed here, [Debugging Tools for Windows](https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/). -Once the debugging tools have been installed, the tests can be run in the same manner as they are in the CI -pipeline. - -#### Note - -When running the debugger visualizer tests, `tests/debugger_visualizer.rs`, they need to be run consecutively -and not in parallel. This can be achieved by passing the flag `--test-threads=1` to rustc. This is due to -how the debugger tests are run. Each test marked with the `#[debugger_test]` attribute launches a debugger -and attaches it to the current test process. If tests are running in parallel, the test will try to attach -a debugger to the current process which may already have a debugger attached causing the test to fail. - -For example: - -``` -cargo test --test debugger_visualizer --features debugger_visualizer -- --test-threads=1 -``` diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/debug_metadata/smallvec.natvis s390-tools-2.33.1/rust-vendor/smallvec/debug_metadata/smallvec.natvis --- s390-tools-2.31.0/rust-vendor/smallvec/debug_metadata/smallvec.natvis 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/debug_metadata/smallvec.natvis 1970-01-01 01:00:00.000000000 +0100 @@ -1,35 +0,0 @@ - - - - - - - {{ len={len()} is_inline={is_inline()} }} - - is_inline() ? $T2 : capacity - len() - data_ptr() - - - len() - data_ptr() - - - - - - - - - {{ len={len()} is_inline={is_inline()} }} - - is_inline() ? $T2 : capacity - len() - - - len() - data_ptr() - - - - \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/smallvec/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/smallvec/LICENSE-APACHE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/LICENSE-MIT s390-tools-2.33.1/rust-vendor/smallvec/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/smallvec/LICENSE-MIT 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2018 The Servo Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/README.md s390-tools-2.33.1/rust-vendor/smallvec/README.md --- s390-tools-2.31.0/rust-vendor/smallvec/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -rust-smallvec -============= - -[Documentation](https://docs.rs/smallvec/) - -[Release notes](https://github.com/servo/rust-smallvec/releases) - -"Small vector" optimization for Rust: store up to a small number of items on the stack - -## Example - -```rust -use smallvec::{SmallVec, smallvec}; - -// This SmallVec can hold up to 4 items on the stack: -let mut v: SmallVec<[i32; 4]> = smallvec![1, 2, 3, 4]; - -// It will automatically move its contents to the heap if -// contains more than four items: -v.push(5); - -// SmallVec points to a slice, so you can use normal slice -// indexing and other methods to access its contents: -v[0] = v[1] + v[2]; -v.sort(); -``` diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/scripts/run_miri.sh s390-tools-2.33.1/rust-vendor/smallvec/scripts/run_miri.sh --- s390-tools-2.31.0/rust-vendor/smallvec/scripts/run_miri.sh 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/scripts/run_miri.sh 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ -#!/usr/bin/bash - -set -ex - -# Clean out our target dir, which may have artifacts compiled by a version of -# rust different from the one we're about to download. -cargo clean - -# Install and run the latest version of nightly where miri built successfully. -# Taken from: https://github.com/rust-lang/miri#running-miri-on-ci - -MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri) -echo "Installing latest nightly with Miri: $MIRI_NIGHTLY" -rustup override unset -rustup default "$MIRI_NIGHTLY" - -rustup component add miri -cargo miri setup - -cargo miri test --verbose -cargo miri test --verbose --features union -cargo miri test --verbose --all-features - -rustup override set nightly diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/src/arbitrary.rs s390-tools-2.33.1/rust-vendor/smallvec/src/arbitrary.rs --- s390-tools-2.31.0/rust-vendor/smallvec/src/arbitrary.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/src/arbitrary.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ -use crate::{Array, SmallVec}; -use arbitrary::{Arbitrary, Unstructured}; - -impl<'a, A: Array> Arbitrary<'a> for SmallVec -where - ::Item: Arbitrary<'a>, -{ - fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { - u.arbitrary_iter()?.collect() - } - - fn arbitrary_take_rest(u: Unstructured<'a>) -> arbitrary::Result { - u.arbitrary_take_rest_iter()?.collect() - } - - fn size_hint(depth: usize) -> (usize, Option) { - arbitrary::size_hint::and(::size_hint(depth), (0, None)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/src/lib.rs s390-tools-2.33.1/rust-vendor/smallvec/src/lib.rs --- s390-tools-2.31.0/rust-vendor/smallvec/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2457 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Small vectors in various sizes. These store a certain number of elements inline, and fall back -//! to the heap for larger allocations. This can be a useful optimization for improving cache -//! locality and reducing allocator traffic for workloads that fit within the inline buffer. -//! -//! ## `no_std` support -//! -//! By default, `smallvec` does not depend on `std`. However, the optional -//! `write` feature implements the `std::io::Write` trait for vectors of `u8`. -//! When this feature is enabled, `smallvec` depends on `std`. -//! -//! ## Optional features -//! -//! ### `serde` -//! -//! When this optional dependency is enabled, `SmallVec` implements the `serde::Serialize` and -//! `serde::Deserialize` traits. -//! -//! ### `write` -//! -//! When this feature is enabled, `SmallVec<[u8; _]>` implements the `std::io::Write` trait. -//! This feature is not compatible with `#![no_std]` programs. -//! -//! ### `union` -//! -//! **This feature requires Rust 1.49.** -//! -//! When the `union` feature is enabled `smallvec` will track its state (inline or spilled) -//! without the use of an enum tag, reducing the size of the `smallvec` by one machine word. -//! This means that there is potentially no space overhead compared to `Vec`. -//! Note that `smallvec` can still be larger than `Vec` if the inline buffer is larger than two -//! machine words. -//! -//! To use this feature add `features = ["union"]` in the `smallvec` section of Cargo.toml. -//! Note that this feature requires Rust 1.49. -//! -//! Tracking issue: [rust-lang/rust#55149](https://github.com/rust-lang/rust/issues/55149) -//! -//! ### `const_generics` -//! -//! **This feature requires Rust 1.51.** -//! -//! When this feature is enabled, `SmallVec` works with any arrays of any size, not just a fixed -//! list of sizes. -//! -//! ### `const_new` -//! -//! **This feature requires Rust 1.51.** -//! -//! This feature exposes the functions [`SmallVec::new_const`], [`SmallVec::from_const`], and [`smallvec_inline`] which enables the `SmallVec` to be initialized from a const context. -//! For details, see the -//! [Rust Reference](https://doc.rust-lang.org/reference/const_eval.html#const-functions). -//! -//! ### `drain_filter` -//! -//! **This feature is unstable.** It may change to match the unstable `drain_filter` method in libstd. -//! -//! Enables the `drain_filter` method, which produces an iterator that calls a user-provided -//! closure to determine which elements of the vector to remove and yield from the iterator. -//! -//! ### `drain_keep_rest` -//! -//! **This feature is unstable.** It may change to match the unstable `drain_keep_rest` method in libstd. -//! -//! Enables the `DrainFilter::keep_rest` method. -//! -//! ### `specialization` -//! -//! **This feature is unstable and requires a nightly build of the Rust toolchain.** -//! -//! When this feature is enabled, `SmallVec::from(slice)` has improved performance for slices -//! of `Copy` types. (Without this feature, you can use `SmallVec::from_slice` to get optimal -//! performance for `Copy` types.) -//! -//! Tracking issue: [rust-lang/rust#31844](https://github.com/rust-lang/rust/issues/31844) -//! -//! ### `may_dangle` -//! -//! **This feature is unstable and requires a nightly build of the Rust toolchain.** -//! -//! This feature makes the Rust compiler less strict about use of vectors that contain borrowed -//! references. For details, see the -//! [Rustonomicon](https://doc.rust-lang.org/1.42.0/nomicon/dropck.html#an-escape-hatch). -//! -//! Tracking issue: [rust-lang/rust#34761](https://github.com/rust-lang/rust/issues/34761) - -#![no_std] -#![cfg_attr(docsrs, feature(doc_cfg))] -#![cfg_attr(feature = "specialization", allow(incomplete_features))] -#![cfg_attr(feature = "specialization", feature(specialization))] -#![cfg_attr(feature = "may_dangle", feature(dropck_eyepatch))] -#![cfg_attr( - feature = "debugger_visualizer", - feature(debugger_visualizer), - debugger_visualizer(natvis_file = "../debug_metadata/smallvec.natvis") -)] -#![deny(missing_docs)] - -#[doc(hidden)] -pub extern crate alloc; - -#[cfg(any(test, feature = "write"))] -extern crate std; - -#[cfg(test)] -mod tests; - -#[allow(deprecated)] -use alloc::alloc::{Layout, LayoutErr}; -use alloc::boxed::Box; -use alloc::{vec, vec::Vec}; -use core::borrow::{Borrow, BorrowMut}; -use core::cmp; -use core::fmt; -use core::hash::{Hash, Hasher}; -use core::hint::unreachable_unchecked; -use core::iter::{repeat, FromIterator, FusedIterator, IntoIterator}; -use core::mem; -use core::mem::MaybeUninit; -use core::ops::{self, Range, RangeBounds}; -use core::ptr::{self, NonNull}; -use core::slice::{self, SliceIndex}; - -#[cfg(feature = "serde")] -use serde::{ - de::{Deserialize, Deserializer, SeqAccess, Visitor}, - ser::{Serialize, SerializeSeq, Serializer}, -}; - -#[cfg(feature = "serde")] -use core::marker::PhantomData; - -#[cfg(feature = "write")] -use std::io; - -#[cfg(feature = "drain_keep_rest")] -use core::mem::ManuallyDrop; - -/// Creates a [`SmallVec`] containing the arguments. -/// -/// `smallvec!` allows `SmallVec`s to be defined with the same syntax as array expressions. -/// There are two forms of this macro: -/// -/// - Create a [`SmallVec`] containing a given list of elements: -/// -/// ``` -/// # use smallvec::{smallvec, SmallVec}; -/// # fn main() { -/// let v: SmallVec<[_; 128]> = smallvec![1, 2, 3]; -/// assert_eq!(v[0], 1); -/// assert_eq!(v[1], 2); -/// assert_eq!(v[2], 3); -/// # } -/// ``` -/// -/// - Create a [`SmallVec`] from a given element and size: -/// -/// ``` -/// # use smallvec::{smallvec, SmallVec}; -/// # fn main() { -/// let v: SmallVec<[_; 0x8000]> = smallvec![1; 3]; -/// assert_eq!(v, SmallVec::from_buf([1, 1, 1])); -/// # } -/// ``` -/// -/// Note that unlike array expressions this syntax supports all elements -/// which implement [`Clone`] and the number of elements doesn't have to be -/// a constant. -/// -/// This will use `clone` to duplicate an expression, so one should be careful -/// using this with types having a nonstandard `Clone` implementation. For -/// example, `smallvec![Rc::new(1); 5]` will create a vector of five references -/// to the same boxed integer value, not five references pointing to independently -/// boxed integers. - -#[macro_export] -macro_rules! smallvec { - // count helper: transform any expression into 1 - (@one $x:expr) => (1usize); - ($elem:expr; $n:expr) => ({ - $crate::SmallVec::from_elem($elem, $n) - }); - ($($x:expr),*$(,)*) => ({ - let count = 0usize $(+ $crate::smallvec!(@one $x))*; - #[allow(unused_mut)] - let mut vec = $crate::SmallVec::new(); - if count <= vec.inline_size() { - $(vec.push($x);)* - vec - } else { - $crate::SmallVec::from_vec($crate::alloc::vec![$($x,)*]) - } - }); -} - -/// Creates an inline [`SmallVec`] containing the arguments. This macro is enabled by the feature `const_new`. -/// -/// `smallvec_inline!` allows `SmallVec`s to be defined with the same syntax as array expressions in `const` contexts. -/// The inline storage `A` will always be an array of the size specified by the arguments. -/// There are two forms of this macro: -/// -/// - Create a [`SmallVec`] containing a given list of elements: -/// -/// ``` -/// # use smallvec::{smallvec_inline, SmallVec}; -/// # fn main() { -/// const V: SmallVec<[i32; 3]> = smallvec_inline![1, 2, 3]; -/// assert_eq!(V[0], 1); -/// assert_eq!(V[1], 2); -/// assert_eq!(V[2], 3); -/// # } -/// ``` -/// -/// - Create a [`SmallVec`] from a given element and size: -/// -/// ``` -/// # use smallvec::{smallvec_inline, SmallVec}; -/// # fn main() { -/// const V: SmallVec<[i32; 3]> = smallvec_inline![1; 3]; -/// assert_eq!(V, SmallVec::from_buf([1, 1, 1])); -/// # } -/// ``` -/// -/// Note that the behavior mimics that of array expressions, in contrast to [`smallvec`]. -#[cfg(feature = "const_new")] -#[cfg_attr(docsrs, doc(cfg(feature = "const_new")))] -#[macro_export] -macro_rules! smallvec_inline { - // count helper: transform any expression into 1 - (@one $x:expr) => (1usize); - ($elem:expr; $n:expr) => ({ - $crate::SmallVec::<[_; $n]>::from_const([$elem; $n]) - }); - ($($x:expr),+ $(,)?) => ({ - const N: usize = 0usize $(+ $crate::smallvec_inline!(@one $x))*; - $crate::SmallVec::<[_; N]>::from_const([$($x,)*]) - }); -} - -/// `panic!()` in debug builds, optimization hint in release. -#[cfg(not(feature = "union"))] -macro_rules! debug_unreachable { - () => { - debug_unreachable!("entered unreachable code") - }; - ($e:expr) => { - if cfg!(debug_assertions) { - panic!($e); - } else { - unreachable_unchecked(); - } - }; -} - -/// Trait to be implemented by a collection that can be extended from a slice -/// -/// ## Example -/// -/// ```rust -/// use smallvec::{ExtendFromSlice, SmallVec}; -/// -/// fn initialize>(v: &mut V) { -/// v.extend_from_slice(b"Test!"); -/// } -/// -/// let mut vec = Vec::new(); -/// initialize(&mut vec); -/// assert_eq!(&vec, b"Test!"); -/// -/// let mut small_vec = SmallVec::<[u8; 8]>::new(); -/// initialize(&mut small_vec); -/// assert_eq!(&small_vec as &[_], b"Test!"); -/// ``` -#[doc(hidden)] -#[deprecated] -pub trait ExtendFromSlice { - /// Extends a collection from a slice of its element type - fn extend_from_slice(&mut self, other: &[T]); -} - -#[allow(deprecated)] -impl ExtendFromSlice for Vec { - fn extend_from_slice(&mut self, other: &[T]) { - Vec::extend_from_slice(self, other) - } -} - -/// Error type for APIs with fallible heap allocation -#[derive(Debug)] -pub enum CollectionAllocErr { - /// Overflow `usize::MAX` or other error during size computation - CapacityOverflow, - /// The allocator return an error - AllocErr { - /// The layout that was passed to the allocator - layout: Layout, - }, -} - -impl fmt::Display for CollectionAllocErr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Allocation error: {:?}", self) - } -} - -#[allow(deprecated)] -impl From for CollectionAllocErr { - fn from(_: LayoutErr) -> Self { - CollectionAllocErr::CapacityOverflow - } -} - -fn infallible(result: Result) -> T { - match result { - Ok(x) => x, - Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), - Err(CollectionAllocErr::AllocErr { layout }) => alloc::alloc::handle_alloc_error(layout), - } -} - -/// FIXME: use `Layout::array` when we require a Rust version where it’s stable -/// -fn layout_array(n: usize) -> Result { - let size = mem::size_of::() - .checked_mul(n) - .ok_or(CollectionAllocErr::CapacityOverflow)?; - let align = mem::align_of::(); - Layout::from_size_align(size, align).map_err(|_| CollectionAllocErr::CapacityOverflow) -} - -unsafe fn deallocate(ptr: NonNull, capacity: usize) { - // This unwrap should succeed since the same did when allocating. - let layout = layout_array::(capacity).unwrap(); - alloc::alloc::dealloc(ptr.as_ptr() as *mut u8, layout) -} - -/// An iterator that removes the items from a `SmallVec` and yields them by value. -/// -/// Returned from [`SmallVec::drain`][1]. -/// -/// [1]: struct.SmallVec.html#method.drain -pub struct Drain<'a, T: 'a + Array> { - tail_start: usize, - tail_len: usize, - iter: slice::Iter<'a, T::Item>, - vec: NonNull>, -} - -impl<'a, T: 'a + Array> fmt::Debug for Drain<'a, T> -where - T::Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Drain").field(&self.iter.as_slice()).finish() - } -} - -unsafe impl<'a, T: Sync + Array> Sync for Drain<'a, T> {} -unsafe impl<'a, T: Send + Array> Send for Drain<'a, T> {} - -impl<'a, T: 'a + Array> Iterator for Drain<'a, T> { - type Item = T::Item; - - #[inline] - fn next(&mut self) -> Option { - self.iter - .next() - .map(|reference| unsafe { ptr::read(reference) }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl<'a, T: 'a + Array> DoubleEndedIterator for Drain<'a, T> { - #[inline] - fn next_back(&mut self) -> Option { - self.iter - .next_back() - .map(|reference| unsafe { ptr::read(reference) }) - } -} - -impl<'a, T: Array> ExactSizeIterator for Drain<'a, T> { - #[inline] - fn len(&self) -> usize { - self.iter.len() - } -} - -impl<'a, T: Array> FusedIterator for Drain<'a, T> {} - -impl<'a, T: 'a + Array> Drop for Drain<'a, T> { - fn drop(&mut self) { - self.for_each(drop); - - if self.tail_len > 0 { - unsafe { - let source_vec = self.vec.as_mut(); - - // memmove back untouched tail, update to new length - let start = source_vec.len(); - let tail = self.tail_start; - if tail != start { - // as_mut_ptr creates a &mut, invalidating other pointers. - // This pattern avoids calling it with a pointer already present. - let ptr = source_vec.as_mut_ptr(); - let src = ptr.add(tail); - let dst = ptr.add(start); - ptr::copy(src, dst, self.tail_len); - } - source_vec.set_len(start + self.tail_len); - } - } - } -} - -#[cfg(feature = "drain_filter")] -/// An iterator which uses a closure to determine if an element should be removed. -/// -/// Returned from [`SmallVec::drain_filter`][1]. -/// -/// [1]: struct.SmallVec.html#method.drain_filter -pub struct DrainFilter<'a, T, F> -where - F: FnMut(&mut T::Item) -> bool, - T: Array, -{ - vec: &'a mut SmallVec, - /// The index of the item that will be inspected by the next call to `next`. - idx: usize, - /// The number of items that have been drained (removed) thus far. - del: usize, - /// The original length of `vec` prior to draining. - old_len: usize, - /// The filter test predicate. - pred: F, - /// A flag that indicates a panic has occurred in the filter test predicate. - /// This is used as a hint in the drop implementation to prevent consumption - /// of the remainder of the `DrainFilter`. Any unprocessed items will be - /// backshifted in the `vec`, but no further items will be dropped or - /// tested by the filter predicate. - panic_flag: bool, -} - -#[cfg(feature = "drain_filter")] -impl fmt::Debug for DrainFilter<'_, T, F> -where - F: FnMut(&mut T::Item) -> bool, - T: Array, - T::Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("DrainFilter").field(&self.vec.as_slice()).finish() - } -} - -#[cfg(feature = "drain_filter")] -impl Iterator for DrainFilter<'_, T, F> -where - F: FnMut(&mut T::Item) -> bool, - T: Array, -{ - type Item = T::Item; - - fn next(&mut self) -> Option - { - unsafe { - while self.idx < self.old_len { - let i = self.idx; - let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len); - self.panic_flag = true; - let drained = (self.pred)(&mut v[i]); - self.panic_flag = false; - // Update the index *after* the predicate is called. If the index - // is updated prior and the predicate panics, the element at this - // index would be leaked. - self.idx += 1; - if drained { - self.del += 1; - return Some(ptr::read(&v[i])); - } else if self.del > 0 { - let del = self.del; - let src: *const Self::Item = &v[i]; - let dst: *mut Self::Item = &mut v[i - del]; - ptr::copy_nonoverlapping(src, dst, 1); - } - } - None - } - } - - fn size_hint(&self) -> (usize, Option) { - (0, Some(self.old_len - self.idx)) - } -} - -#[cfg(feature = "drain_filter")] -impl Drop for DrainFilter<'_, T, F> -where - F: FnMut(&mut T::Item) -> bool, - T: Array, -{ - fn drop(&mut self) { - struct BackshiftOnDrop<'a, 'b, T, F> - where - F: FnMut(&mut T::Item) -> bool, - T: Array - { - drain: &'b mut DrainFilter<'a, T, F>, - } - - impl<'a, 'b, T, F> Drop for BackshiftOnDrop<'a, 'b, T, F> - where - F: FnMut(&mut T::Item) -> bool, - T: Array - { - fn drop(&mut self) { - unsafe { - if self.drain.idx < self.drain.old_len && self.drain.del > 0 { - // This is a pretty messed up state, and there isn't really an - // obviously right thing to do. We don't want to keep trying - // to execute `pred`, so we just backshift all the unprocessed - // elements and tell the vec that they still exist. The backshift - // is required to prevent a double-drop of the last successfully - // drained item prior to a panic in the predicate. - let ptr = self.drain.vec.as_mut_ptr(); - let src = ptr.add(self.drain.idx); - let dst = src.sub(self.drain.del); - let tail_len = self.drain.old_len - self.drain.idx; - src.copy_to(dst, tail_len); - } - self.drain.vec.set_len(self.drain.old_len - self.drain.del); - } - } - } - - let backshift = BackshiftOnDrop { drain: self }; - - // Attempt to consume any remaining elements if the filter predicate - // has not yet panicked. We'll backshift any remaining elements - // whether we've already panicked or if the consumption here panics. - if !backshift.drain.panic_flag { - backshift.drain.for_each(drop); - } - } -} - -#[cfg(feature = "drain_keep_rest")] -impl DrainFilter<'_, T, F> -where - F: FnMut(&mut T::Item) -> bool, - T: Array -{ - /// Keep unyielded elements in the source `Vec`. - /// - /// # Examples - /// - /// ``` - /// # use smallvec::{smallvec, SmallVec}; - /// - /// let mut vec: SmallVec<[char; 2]> = smallvec!['a', 'b', 'c']; - /// let mut drain = vec.drain_filter(|_| true); - /// - /// assert_eq!(drain.next().unwrap(), 'a'); - /// - /// // This call keeps 'b' and 'c' in the vec. - /// drain.keep_rest(); - /// - /// // If we wouldn't call `keep_rest()`, - /// // `vec` would be empty. - /// assert_eq!(vec, SmallVec::<[char; 2]>::from_slice(&['b', 'c'])); - /// ``` - pub fn keep_rest(self) - { - // At this moment layout looks like this: - // - // _____________________/-- old_len - // / \ - // [kept] [yielded] [tail] - // \_______/ ^-- idx - // \-- del - // - // Normally `Drop` impl would drop [tail] (via .for_each(drop), ie still calling `pred`) - // - // 1. Move [tail] after [kept] - // 2. Update length of the original vec to `old_len - del` - // a. In case of ZST, this is the only thing we want to do - // 3. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do - let mut this = ManuallyDrop::new(self); - - unsafe { - // ZSTs have no identity, so we don't need to move them around. - let needs_move = mem::size_of::() != 0; - - if needs_move && this.idx < this.old_len && this.del > 0 { - let ptr = this.vec.as_mut_ptr(); - let src = ptr.add(this.idx); - let dst = src.sub(this.del); - let tail_len = this.old_len - this.idx; - src.copy_to(dst, tail_len); - } - - let new_len = this.old_len - this.del; - this.vec.set_len(new_len); - } - } -} - -#[cfg(feature = "union")] -union SmallVecData { - inline: core::mem::ManuallyDrop>, - heap: (NonNull, usize), -} - -#[cfg(all(feature = "union", feature = "const_new"))] -impl SmallVecData<[T; N]> { - #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))] - #[inline] - const fn from_const(inline: MaybeUninit<[T; N]>) -> Self { - SmallVecData { - inline: core::mem::ManuallyDrop::new(inline), - } - } -} - -#[cfg(feature = "union")] -impl SmallVecData { - #[inline] - unsafe fn inline(&self) -> ConstNonNull { - ConstNonNull::new(self.inline.as_ptr() as *const A::Item).unwrap() - } - #[inline] - unsafe fn inline_mut(&mut self) -> NonNull { - NonNull::new(self.inline.as_mut_ptr() as *mut A::Item).unwrap() - } - #[inline] - fn from_inline(inline: MaybeUninit) -> SmallVecData { - SmallVecData { - inline: core::mem::ManuallyDrop::new(inline), - } - } - #[inline] - unsafe fn into_inline(self) -> MaybeUninit { - core::mem::ManuallyDrop::into_inner(self.inline) - } - #[inline] - unsafe fn heap(&self) -> (ConstNonNull, usize) { - (ConstNonNull(self.heap.0), self.heap.1) - } - #[inline] - unsafe fn heap_mut(&mut self) -> (NonNull, &mut usize) { - let h = &mut self.heap; - (h.0, &mut h.1) - } - #[inline] - fn from_heap(ptr: NonNull, len: usize) -> SmallVecData { - SmallVecData { heap: (ptr, len) } - } -} - -#[cfg(not(feature = "union"))] -enum SmallVecData { - Inline(MaybeUninit), - // Using NonNull and NonZero here allows to reduce size of `SmallVec`. - Heap { - // Since we never allocate on heap - // unless our capacity is bigger than inline capacity - // heap capacity cannot be less than 1. - // Therefore, pointer cannot be null too. - ptr: NonNull, - len: usize, - }, -} - -#[cfg(all(not(feature = "union"), feature = "const_new"))] -impl SmallVecData<[T; N]> { - #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))] - #[inline] - const fn from_const(inline: MaybeUninit<[T; N]>) -> Self { - SmallVecData::Inline(inline) - } -} - -#[cfg(not(feature = "union"))] -impl SmallVecData { - #[inline] - unsafe fn inline(&self) -> ConstNonNull { - match self { - SmallVecData::Inline(a) => ConstNonNull::new(a.as_ptr() as *const A::Item).unwrap(), - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn inline_mut(&mut self) -> NonNull { - match self { - SmallVecData::Inline(a) => NonNull::new(a.as_mut_ptr() as *mut A::Item).unwrap(), - _ => debug_unreachable!(), - } - } - #[inline] - fn from_inline(inline: MaybeUninit) -> SmallVecData { - SmallVecData::Inline(inline) - } - #[inline] - unsafe fn into_inline(self) -> MaybeUninit { - match self { - SmallVecData::Inline(a) => a, - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn heap(&self) -> (ConstNonNull, usize) { - match self { - SmallVecData::Heap { ptr, len } => (ConstNonNull(*ptr), *len), - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn heap_mut(&mut self) -> (NonNull, &mut usize) { - match self { - SmallVecData::Heap { ptr, len } => (*ptr, len), - _ => debug_unreachable!(), - } - } - #[inline] - fn from_heap(ptr: NonNull, len: usize) -> SmallVecData { - SmallVecData::Heap { ptr, len } - } -} - -unsafe impl Send for SmallVecData {} -unsafe impl Sync for SmallVecData {} - -/// A `Vec`-like container that can store a small number of elements inline. -/// -/// `SmallVec` acts like a vector, but can store a limited amount of data inline within the -/// `SmallVec` struct rather than in a separate allocation. If the data exceeds this limit, the -/// `SmallVec` will "spill" its data onto the heap, allocating a new buffer to hold it. -/// -/// The amount of data that a `SmallVec` can store inline depends on its backing store. The backing -/// store can be any type that implements the `Array` trait; usually it is a small fixed-sized -/// array. For example a `SmallVec<[u64; 8]>` can hold up to eight 64-bit integers inline. -/// -/// ## Example -/// -/// ```rust -/// use smallvec::SmallVec; -/// let mut v = SmallVec::<[u8; 4]>::new(); // initialize an empty vector -/// -/// // The vector can hold up to 4 items without spilling onto the heap. -/// v.extend(0..4); -/// assert_eq!(v.len(), 4); -/// assert!(!v.spilled()); -/// -/// // Pushing another element will force the buffer to spill: -/// v.push(4); -/// assert_eq!(v.len(), 5); -/// assert!(v.spilled()); -/// ``` -pub struct SmallVec { - // The capacity field is used to determine which of the storage variants is active: - // If capacity <= Self::inline_capacity() then the inline variant is used and capacity holds the current length of the vector (number of elements actually in use). - // If capacity > Self::inline_capacity() then the heap variant is used and capacity holds the size of the memory allocation. - capacity: usize, - data: SmallVecData, -} - -impl SmallVec { - /// Construct an empty vector - #[inline] - pub fn new() -> SmallVec { - // Try to detect invalid custom implementations of `Array`. Hopefully, - // this check should be optimized away entirely for valid ones. - assert!( - mem::size_of::() == A::size() * mem::size_of::() - && mem::align_of::() >= mem::align_of::() - ); - SmallVec { - capacity: 0, - data: SmallVecData::from_inline(MaybeUninit::uninit()), - } - } - - /// Construct an empty vector with enough capacity pre-allocated to store at least `n` - /// elements. - /// - /// Will create a heap allocation only if `n` is larger than the inline capacity. - /// - /// ``` - /// # use smallvec::SmallVec; - /// - /// let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(100); - /// - /// assert!(v.is_empty()); - /// assert!(v.capacity() >= 100); - /// ``` - #[inline] - pub fn with_capacity(n: usize) -> Self { - let mut v = SmallVec::new(); - v.reserve_exact(n); - v - } - - /// Construct a new `SmallVec` from a `Vec`. - /// - /// Elements will be copied to the inline buffer if `vec.capacity() <= Self::inline_capacity()`. - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let vec = vec![1, 2, 3, 4, 5]; - /// let small_vec: SmallVec<[_; 3]> = SmallVec::from_vec(vec); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_vec(mut vec: Vec) -> SmallVec { - if vec.capacity() <= Self::inline_capacity() { - // Cannot use Vec with smaller capacity - // because we use value of `Self::capacity` field as indicator. - unsafe { - let mut data = SmallVecData::::from_inline(MaybeUninit::uninit()); - let len = vec.len(); - vec.set_len(0); - ptr::copy_nonoverlapping(vec.as_ptr(), data.inline_mut().as_ptr(), len); - - SmallVec { - capacity: len, - data, - } - } - } else { - let (ptr, cap, len) = (vec.as_mut_ptr(), vec.capacity(), vec.len()); - mem::forget(vec); - let ptr = NonNull::new(ptr) - // See docs: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.as_mut_ptr - .expect("Cannot be null by `Vec` invariant"); - - SmallVec { - capacity: cap, - data: SmallVecData::from_heap(ptr, len), - } - } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let buf = [1, 2, 3, 4, 5]; - /// let small_vec: SmallVec<_> = SmallVec::from_buf(buf); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_buf(buf: A) -> SmallVec { - SmallVec { - capacity: A::size(), - data: SmallVecData::from_inline(MaybeUninit::new(buf)), - } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. Also sets the length, which must be less or - /// equal to the size of `buf`. - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let buf = [1, 2, 3, 4, 5, 0, 0, 0]; - /// let small_vec: SmallVec<_> = SmallVec::from_buf_and_len(buf, 5); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_buf_and_len(buf: A, len: usize) -> SmallVec { - assert!(len <= A::size()); - unsafe { SmallVec::from_buf_and_len_unchecked(MaybeUninit::new(buf), len) } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. Also sets the length. The user is responsible - /// for ensuring that `len <= A::size()`. - /// - /// ```rust - /// use smallvec::SmallVec; - /// use std::mem::MaybeUninit; - /// - /// let buf = [1, 2, 3, 4, 5, 0, 0, 0]; - /// let small_vec: SmallVec<_> = unsafe { - /// SmallVec::from_buf_and_len_unchecked(MaybeUninit::new(buf), 5) - /// }; - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub unsafe fn from_buf_and_len_unchecked(buf: MaybeUninit, len: usize) -> SmallVec { - SmallVec { - capacity: len, - data: SmallVecData::from_inline(buf), - } - } - - /// Sets the length of a vector. - /// - /// This will explicitly set the size of the vector, without actually - /// modifying its buffers, so it is up to the caller to ensure that the - /// vector is actually the specified size. - pub unsafe fn set_len(&mut self, new_len: usize) { - let (_, len_ptr, _) = self.triple_mut(); - *len_ptr = new_len; - } - - /// The maximum number of elements this vector can hold inline - #[inline] - fn inline_capacity() -> usize { - if mem::size_of::() > 0 { - A::size() - } else { - // For zero-size items code like `ptr.add(offset)` always returns the same pointer. - // Therefore all items are at the same address, - // and any array size has capacity for infinitely many items. - // The capacity is limited by the bit width of the length field. - // - // `Vec` also does this: - // https://github.com/rust-lang/rust/blob/1.44.0/src/liballoc/raw_vec.rs#L186 - // - // In our case, this also ensures that a smallvec of zero-size items never spills, - // and we never try to allocate zero bytes which `std::alloc::alloc` disallows. - core::usize::MAX - } - } - - /// The maximum number of elements this vector can hold inline - #[inline] - pub fn inline_size(&self) -> usize { - Self::inline_capacity() - } - - /// The number of elements stored in the vector - #[inline] - pub fn len(&self) -> usize { - self.triple().1 - } - - /// Returns `true` if the vector is empty - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// The number of items the vector can hold without reallocating - #[inline] - pub fn capacity(&self) -> usize { - self.triple().2 - } - - /// Returns a tuple with (data ptr, len, capacity) - /// Useful to get all `SmallVec` properties with a single check of the current storage variant. - #[inline] - fn triple(&self) -> (ConstNonNull, usize, usize) { - unsafe { - if self.spilled() { - let (ptr, len) = self.data.heap(); - (ptr, len, self.capacity) - } else { - (self.data.inline(), self.capacity, Self::inline_capacity()) - } - } - } - - /// Returns a tuple with (data ptr, len ptr, capacity) - #[inline] - fn triple_mut(&mut self) -> (NonNull, &mut usize, usize) { - unsafe { - if self.spilled() { - let (ptr, len_ptr) = self.data.heap_mut(); - (ptr, len_ptr, self.capacity) - } else { - ( - self.data.inline_mut(), - &mut self.capacity, - Self::inline_capacity(), - ) - } - } - } - - /// Returns `true` if the data has spilled into a separate heap-allocated buffer. - #[inline] - pub fn spilled(&self) -> bool { - self.capacity > Self::inline_capacity() - } - - /// Creates a draining iterator that removes the specified range in the vector - /// and yields the removed items. - /// - /// Note 1: The element range is removed even if the iterator is only - /// partially consumed or not consumed at all. - /// - /// Note 2: It is unspecified how many elements are removed from the vector - /// if the `Drain` value is leaked. - /// - /// # Panics - /// - /// Panics if the starting point is greater than the end point or if - /// the end point is greater than the length of the vector. - pub fn drain(&mut self, range: R) -> Drain<'_, A> - where - R: RangeBounds, - { - use core::ops::Bound::*; - - let len = self.len(); - let start = match range.start_bound() { - Included(&n) => n, - Excluded(&n) => n.checked_add(1).expect("Range start out of bounds"), - Unbounded => 0, - }; - let end = match range.end_bound() { - Included(&n) => n.checked_add(1).expect("Range end out of bounds"), - Excluded(&n) => n, - Unbounded => len, - }; - - assert!(start <= end); - assert!(end <= len); - - unsafe { - self.set_len(start); - - let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start); - - Drain { - tail_start: end, - tail_len: len - end, - iter: range_slice.iter(), - // Since self is a &mut, passing it to a function would invalidate the slice iterator. - vec: NonNull::new_unchecked(self as *mut _), - } - } - } - - #[cfg(feature = "drain_filter")] - /// Creates an iterator which uses a closure to determine if an element should be removed. - /// - /// If the closure returns true, the element is removed and yielded. If the closure returns - /// false, the element will remain in the vector and will not be yielded by the iterator. - /// - /// Using this method is equivalent to the following code: - /// ``` - /// # use smallvec::SmallVec; - /// # let some_predicate = |x: &mut i32| { *x == 2 || *x == 3 || *x == 6 }; - /// # let mut vec: SmallVec<[i32; 8]> = SmallVec::from_slice(&[1i32, 2, 3, 4, 5, 6]); - /// let mut i = 0; - /// while i < vec.len() { - /// if some_predicate(&mut vec[i]) { - /// let val = vec.remove(i); - /// // your code here - /// } else { - /// i += 1; - /// } - /// } - /// - /// # assert_eq!(vec, SmallVec::<[i32; 8]>::from_slice(&[1i32, 4, 5])); - /// ``` - /// /// - /// But `drain_filter` is easier to use. `drain_filter` is also more efficient, - /// because it can backshift the elements of the array in bulk. - /// - /// Note that `drain_filter` also lets you mutate every element in the filter closure, - /// regardless of whether you choose to keep or remove it. - /// - /// # Examples - /// - /// Splitting an array into evens and odds, reusing the original allocation: - /// - /// ``` - /// # use smallvec::SmallVec; - /// let mut numbers: SmallVec<[i32; 16]> = SmallVec::from_slice(&[1i32, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15]); - /// - /// let evens = numbers.drain_filter(|x| *x % 2 == 0).collect::>(); - /// let odds = numbers; - /// - /// assert_eq!(evens, SmallVec::<[i32; 16]>::from_slice(&[2i32, 4, 6, 8, 14])); - /// assert_eq!(odds, SmallVec::<[i32; 16]>::from_slice(&[1i32, 3, 5, 9, 11, 13, 15])); - /// ``` - pub fn drain_filter(&mut self, filter: F) -> DrainFilter<'_, A, F,> - where - F: FnMut(&mut A::Item) -> bool, - { - let old_len = self.len(); - - // Guard against us getting leaked (leak amplification) - unsafe { - self.set_len(0); - } - - DrainFilter { vec: self, idx: 0, del: 0, old_len, pred: filter, panic_flag: false } - } - - /// Append an item to the vector. - #[inline] - pub fn push(&mut self, value: A::Item) { - unsafe { - let (mut ptr, mut len, cap) = self.triple_mut(); - if *len == cap { - self.reserve_one_unchecked(); - let (heap_ptr, heap_len) = self.data.heap_mut(); - ptr = heap_ptr; - len = heap_len; - } - ptr::write(ptr.as_ptr().add(*len), value); - *len += 1; - } - } - - /// Remove an item from the end of the vector and return it, or None if empty. - #[inline] - pub fn pop(&mut self) -> Option { - unsafe { - let (ptr, len_ptr, _) = self.triple_mut(); - let ptr: *const _ = ptr.as_ptr(); - if *len_ptr == 0 { - return None; - } - let last_index = *len_ptr - 1; - *len_ptr = last_index; - Some(ptr::read(ptr.add(last_index))) - } - } - - /// Moves all the elements of `other` into `self`, leaving `other` empty. - /// - /// # Example - /// - /// ``` - /// # use smallvec::{SmallVec, smallvec}; - /// let mut v0: SmallVec<[u8; 16]> = smallvec![1, 2, 3]; - /// let mut v1: SmallVec<[u8; 32]> = smallvec![4, 5, 6]; - /// v0.append(&mut v1); - /// assert_eq!(*v0, [1, 2, 3, 4, 5, 6]); - /// assert_eq!(*v1, []); - /// ``` - pub fn append(&mut self, other: &mut SmallVec) - where - B: Array, - { - self.extend(other.drain(..)) - } - - /// Re-allocate to set the capacity to `max(new_cap, inline_size())`. - /// - /// Panics if `new_cap` is less than the vector's length - /// or if the capacity computation overflows `usize`. - pub fn grow(&mut self, new_cap: usize) { - infallible(self.try_grow(new_cap)) - } - - /// Re-allocate to set the capacity to `max(new_cap, inline_size())`. - /// - /// Panics if `new_cap` is less than the vector's length - pub fn try_grow(&mut self, new_cap: usize) -> Result<(), CollectionAllocErr> { - unsafe { - let unspilled = !self.spilled(); - let (ptr, &mut len, cap) = self.triple_mut(); - assert!(new_cap >= len); - if new_cap <= Self::inline_capacity() { - if unspilled { - return Ok(()); - } - self.data = SmallVecData::from_inline(MaybeUninit::uninit()); - ptr::copy_nonoverlapping(ptr.as_ptr(), self.data.inline_mut().as_ptr(), len); - self.capacity = len; - deallocate(ptr, cap); - } else if new_cap != cap { - let layout = layout_array::(new_cap)?; - debug_assert!(layout.size() > 0); - let new_alloc; - if unspilled { - new_alloc = NonNull::new(alloc::alloc::alloc(layout)) - .ok_or(CollectionAllocErr::AllocErr { layout })? - .cast(); - ptr::copy_nonoverlapping(ptr.as_ptr(), new_alloc.as_ptr(), len); - } else { - // This should never fail since the same succeeded - // when previously allocating `ptr`. - let old_layout = layout_array::(cap)?; - - let new_ptr = - alloc::alloc::realloc(ptr.as_ptr() as *mut u8, old_layout, layout.size()); - new_alloc = NonNull::new(new_ptr) - .ok_or(CollectionAllocErr::AllocErr { layout })? - .cast(); - } - self.data = SmallVecData::from_heap(new_alloc, len); - self.capacity = new_cap; - } - Ok(()) - } - } - - /// Reserve capacity for `additional` more elements to be inserted. - /// - /// May reserve more space to avoid frequent reallocations. - /// - /// Panics if the capacity computation overflows `usize`. - #[inline] - pub fn reserve(&mut self, additional: usize) { - infallible(self.try_reserve(additional)) - } - - /// Internal method used to grow in push() and insert(), where we know already we have to grow. - #[cold] - fn reserve_one_unchecked(&mut self) { - debug_assert_eq!(self.len(), self.capacity()); - let new_cap = self.len() - .checked_add(1) - .and_then(usize::checked_next_power_of_two) - .expect("capacity overflow"); - infallible(self.try_grow(new_cap)) - } - - /// Reserve capacity for `additional` more elements to be inserted. - /// - /// May reserve more space to avoid frequent reallocations. - pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { - // prefer triple_mut() even if triple() would work so that the optimizer removes duplicated - // calls to it from callers. - let (_, &mut len, cap) = self.triple_mut(); - if cap - len >= additional { - return Ok(()); - } - let new_cap = len - .checked_add(additional) - .and_then(usize::checked_next_power_of_two) - .ok_or(CollectionAllocErr::CapacityOverflow)?; - self.try_grow(new_cap) - } - - /// Reserve the minimum capacity for `additional` more elements to be inserted. - /// - /// Panics if the new capacity overflows `usize`. - pub fn reserve_exact(&mut self, additional: usize) { - infallible(self.try_reserve_exact(additional)) - } - - /// Reserve the minimum capacity for `additional` more elements to be inserted. - pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { - let (_, &mut len, cap) = self.triple_mut(); - if cap - len >= additional { - return Ok(()); - } - let new_cap = len - .checked_add(additional) - .ok_or(CollectionAllocErr::CapacityOverflow)?; - self.try_grow(new_cap) - } - - /// Shrink the capacity of the vector as much as possible. - /// - /// When possible, this will move data from an external heap buffer to the vector's inline - /// storage. - pub fn shrink_to_fit(&mut self) { - if !self.spilled() { - return; - } - let len = self.len(); - if self.inline_size() >= len { - unsafe { - let (ptr, len) = self.data.heap(); - self.data = SmallVecData::from_inline(MaybeUninit::uninit()); - ptr::copy_nonoverlapping(ptr.as_ptr(), self.data.inline_mut().as_ptr(), len); - deallocate(ptr.0, self.capacity); - self.capacity = len; - } - } else if self.capacity() > len { - self.grow(len); - } - } - - /// Shorten the vector, keeping the first `len` elements and dropping the rest. - /// - /// If `len` is greater than or equal to the vector's current length, this has no - /// effect. - /// - /// This does not re-allocate. If you want the vector's capacity to shrink, call - /// `shrink_to_fit` after truncating. - pub fn truncate(&mut self, len: usize) { - unsafe { - let (ptr, len_ptr, _) = self.triple_mut(); - let ptr = ptr.as_ptr(); - while len < *len_ptr { - let last_index = *len_ptr - 1; - *len_ptr = last_index; - ptr::drop_in_place(ptr.add(last_index)); - } - } - } - - /// Extracts a slice containing the entire vector. - /// - /// Equivalent to `&s[..]`. - pub fn as_slice(&self) -> &[A::Item] { - self - } - - /// Extracts a mutable slice of the entire vector. - /// - /// Equivalent to `&mut s[..]`. - pub fn as_mut_slice(&mut self) -> &mut [A::Item] { - self - } - - /// Remove the element at position `index`, replacing it with the last element. - /// - /// This does not preserve ordering, but is O(1). - /// - /// Panics if `index` is out of bounds. - #[inline] - pub fn swap_remove(&mut self, index: usize) -> A::Item { - let len = self.len(); - self.swap(len - 1, index); - self.pop() - .unwrap_or_else(|| unsafe { unreachable_unchecked() }) - } - - /// Remove all elements from the vector. - #[inline] - pub fn clear(&mut self) { - self.truncate(0); - } - - /// Remove and return the element at position `index`, shifting all elements after it to the - /// left. - /// - /// Panics if `index` is out of bounds. - pub fn remove(&mut self, index: usize) -> A::Item { - unsafe { - let (ptr, len_ptr, _) = self.triple_mut(); - let len = *len_ptr; - assert!(index < len); - *len_ptr = len - 1; - let ptr = ptr.as_ptr().add(index); - let item = ptr::read(ptr); - ptr::copy(ptr.add(1), ptr, len - index - 1); - item - } - } - - /// Insert an element at position `index`, shifting all elements after it to the right. - /// - /// Panics if `index > len`. - pub fn insert(&mut self, index: usize, element: A::Item) { - unsafe { - let (mut ptr, mut len_ptr, cap) = self.triple_mut(); - if *len_ptr == cap { - self.reserve_one_unchecked(); - let (heap_ptr, heap_len_ptr) = self.data.heap_mut(); - ptr = heap_ptr; - len_ptr = heap_len_ptr; - } - let mut ptr = ptr.as_ptr(); - let len = *len_ptr; - ptr = ptr.add(index); - if index < len { - ptr::copy(ptr, ptr.add(1), len - index); - } else if index == len { - // No elements need shifting. - } else { - panic!("index exceeds length"); - } - *len_ptr = len + 1; - ptr::write(ptr, element); - } - } - - /// Insert multiple elements at position `index`, shifting all following elements toward the - /// back. - pub fn insert_many>(&mut self, index: usize, iterable: I) { - let mut iter = iterable.into_iter(); - if index == self.len() { - return self.extend(iter); - } - - let (lower_size_bound, _) = iter.size_hint(); - assert!(lower_size_bound <= core::isize::MAX as usize); // Ensure offset is indexable - assert!(index + lower_size_bound >= index); // Protect against overflow - - let mut num_added = 0; - let old_len = self.len(); - assert!(index <= old_len); - - unsafe { - // Reserve space for `lower_size_bound` elements. - self.reserve(lower_size_bound); - let start = self.as_mut_ptr(); - let ptr = start.add(index); - - // Move the trailing elements. - ptr::copy(ptr, ptr.add(lower_size_bound), old_len - index); - - // In case the iterator panics, don't double-drop the items we just copied above. - self.set_len(0); - let mut guard = DropOnPanic { - start, - skip: index..(index + lower_size_bound), - len: old_len + lower_size_bound, - }; - - // The set_len above invalidates the previous pointers, so we must re-create them. - let start = self.as_mut_ptr(); - let ptr = start.add(index); - - while num_added < lower_size_bound { - let element = match iter.next() { - Some(x) => x, - None => break, - }; - let cur = ptr.add(num_added); - ptr::write(cur, element); - guard.skip.start += 1; - num_added += 1; - } - - if num_added < lower_size_bound { - // Iterator provided fewer elements than the hint. Move the tail backward. - ptr::copy( - ptr.add(lower_size_bound), - ptr.add(num_added), - old_len - index, - ); - } - // There are no more duplicate or uninitialized slots, so the guard is not needed. - self.set_len(old_len + num_added); - mem::forget(guard); - } - - // Insert any remaining elements one-by-one. - for element in iter { - self.insert(index + num_added, element); - num_added += 1; - } - - struct DropOnPanic { - start: *mut T, - skip: Range, // Space we copied-out-of, but haven't written-to yet. - len: usize, - } - - impl Drop for DropOnPanic { - fn drop(&mut self) { - for i in 0..self.len { - if !self.skip.contains(&i) { - unsafe { - ptr::drop_in_place(self.start.add(i)); - } - } - } - } - } - } - - /// Convert a `SmallVec` to a `Vec`, without reallocating if the `SmallVec` has already spilled onto - /// the heap. - pub fn into_vec(mut self) -> Vec { - if self.spilled() { - unsafe { - let (ptr, &mut len) = self.data.heap_mut(); - let v = Vec::from_raw_parts(ptr.as_ptr(), len, self.capacity); - mem::forget(self); - v - } - } else { - self.into_iter().collect() - } - } - - /// Converts a `SmallVec` into a `Box<[T]>` without reallocating if the `SmallVec` has already spilled - /// onto the heap. - /// - /// Note that this will drop any excess capacity. - pub fn into_boxed_slice(self) -> Box<[A::Item]> { - self.into_vec().into_boxed_slice() - } - - /// Convert the `SmallVec` into an `A` if possible. Otherwise return `Err(Self)`. - /// - /// This method returns `Err(Self)` if the `SmallVec` is too short (and the `A` contains uninitialized elements), - /// or if the `SmallVec` is too long (and all the elements were spilled to the heap). - pub fn into_inner(self) -> Result { - if self.spilled() || self.len() != A::size() { - // Note: A::size, not Self::inline_capacity - Err(self) - } else { - unsafe { - let data = ptr::read(&self.data); - mem::forget(self); - Ok(data.into_inline().assume_init()) - } - } - } - - /// Retains only the elements specified by the predicate. - /// - /// In other words, remove all elements `e` such that `f(&e)` returns `false`. - /// This method operates in place and preserves the order of the retained - /// elements. - pub fn retain bool>(&mut self, mut f: F) { - let mut del = 0; - let len = self.len(); - for i in 0..len { - if !f(&mut self[i]) { - del += 1; - } else if del > 0 { - self.swap(i - del, i); - } - } - self.truncate(len - del); - } - - /// Retains only the elements specified by the predicate. - /// - /// This method is identical in behaviour to [`retain`]; it is included only - /// to maintain api-compatability with `std::Vec`, where the methods are - /// separate for historical reasons. - pub fn retain_mut bool>(&mut self, f: F) { - self.retain(f) - } - - /// Removes consecutive duplicate elements. - pub fn dedup(&mut self) - where - A::Item: PartialEq, - { - self.dedup_by(|a, b| a == b); - } - - /// Removes consecutive duplicate elements using the given equality relation. - pub fn dedup_by(&mut self, mut same_bucket: F) - where - F: FnMut(&mut A::Item, &mut A::Item) -> bool, - { - // See the implementation of Vec::dedup_by in the - // standard library for an explanation of this algorithm. - let len = self.len(); - if len <= 1 { - return; - } - - let ptr = self.as_mut_ptr(); - let mut w: usize = 1; - - unsafe { - for r in 1..len { - let p_r = ptr.add(r); - let p_wm1 = ptr.add(w - 1); - if !same_bucket(&mut *p_r, &mut *p_wm1) { - if r != w { - let p_w = p_wm1.add(1); - mem::swap(&mut *p_r, &mut *p_w); - } - w += 1; - } - } - } - - self.truncate(w); - } - - /// Removes consecutive elements that map to the same key. - pub fn dedup_by_key(&mut self, mut key: F) - where - F: FnMut(&mut A::Item) -> K, - K: PartialEq, - { - self.dedup_by(|a, b| key(a) == key(b)); - } - - /// Resizes the `SmallVec` in-place so that `len` is equal to `new_len`. - /// - /// If `new_len` is greater than `len`, the `SmallVec` is extended by the difference, with each - /// additional slot filled with the result of calling the closure `f`. The return values from `f` - /// will end up in the `SmallVec` in the order they have been generated. - /// - /// If `new_len` is less than `len`, the `SmallVec` is simply truncated. - /// - /// This method uses a closure to create new values on every push. If you'd rather `Clone` a given - /// value, use `resize`. If you want to use the `Default` trait to generate values, you can pass - /// `Default::default()` as the second argument. - /// - /// Added for `std::vec::Vec` compatibility (added in Rust 1.33.0) - /// - /// ``` - /// # use smallvec::{smallvec, SmallVec}; - /// let mut vec : SmallVec<[_; 4]> = smallvec![1, 2, 3]; - /// vec.resize_with(5, Default::default); - /// assert_eq!(&*vec, &[1, 2, 3, 0, 0]); - /// - /// let mut vec : SmallVec<[_; 4]> = smallvec![]; - /// let mut p = 1; - /// vec.resize_with(4, || { p *= 2; p }); - /// assert_eq!(&*vec, &[2, 4, 8, 16]); - /// ``` - pub fn resize_with(&mut self, new_len: usize, f: F) - where - F: FnMut() -> A::Item, - { - let old_len = self.len(); - if old_len < new_len { - let mut f = f; - let additional = new_len - old_len; - self.reserve(additional); - for _ in 0..additional { - self.push(f()); - } - } else if old_len > new_len { - self.truncate(new_len); - } - } - - /// Creates a `SmallVec` directly from the raw components of another - /// `SmallVec`. - /// - /// # Safety - /// - /// This is highly unsafe, due to the number of invariants that aren't - /// checked: - /// - /// * `ptr` needs to have been previously allocated via `SmallVec` for its - /// spilled storage (at least, it's highly likely to be incorrect if it - /// wasn't). - /// * `ptr`'s `A::Item` type needs to be the same size and alignment that - /// it was allocated with - /// * `length` needs to be less than or equal to `capacity`. - /// * `capacity` needs to be the capacity that the pointer was allocated - /// with. - /// - /// Violating these may cause problems like corrupting the allocator's - /// internal data structures. - /// - /// Additionally, `capacity` must be greater than the amount of inline - /// storage `A` has; that is, the new `SmallVec` must need to spill over - /// into heap allocated storage. This condition is asserted against. - /// - /// The ownership of `ptr` is effectively transferred to the - /// `SmallVec` which may then deallocate, reallocate or change the - /// contents of memory pointed to by the pointer at will. Ensure - /// that nothing else uses the pointer after calling this - /// function. - /// - /// # Examples - /// - /// ``` - /// # use smallvec::{smallvec, SmallVec}; - /// use std::mem; - /// use std::ptr; - /// - /// fn main() { - /// let mut v: SmallVec<[_; 1]> = smallvec![1, 2, 3]; - /// - /// // Pull out the important parts of `v`. - /// let p = v.as_mut_ptr(); - /// let len = v.len(); - /// let cap = v.capacity(); - /// let spilled = v.spilled(); - /// - /// unsafe { - /// // Forget all about `v`. The heap allocation that stored the - /// // three values won't be deallocated. - /// mem::forget(v); - /// - /// // Overwrite memory with [4, 5, 6]. - /// // - /// // This is only safe if `spilled` is true! Otherwise, we are - /// // writing into the old `SmallVec`'s inline storage on the - /// // stack. - /// assert!(spilled); - /// for i in 0..len { - /// ptr::write(p.add(i), 4 + i); - /// } - /// - /// // Put everything back together into a SmallVec with a different - /// // amount of inline storage, but which is still less than `cap`. - /// let rebuilt = SmallVec::<[_; 2]>::from_raw_parts(p, len, cap); - /// assert_eq!(&*rebuilt, &[4, 5, 6]); - /// } - /// } - #[inline] - pub unsafe fn from_raw_parts(ptr: *mut A::Item, length: usize, capacity: usize) -> SmallVec { - // SAFETY: We require caller to provide same ptr as we alloc - // and we never alloc null pointer. - let ptr = unsafe { - debug_assert!(!ptr.is_null(), "Called `from_raw_parts` with null pointer."); - NonNull::new_unchecked(ptr) - }; - assert!(capacity > Self::inline_capacity()); - SmallVec { - capacity, - data: SmallVecData::from_heap(ptr, length), - } - } - - /// Returns a raw pointer to the vector's buffer. - pub fn as_ptr(&self) -> *const A::Item { - // We shadow the slice method of the same name to avoid going through - // `deref`, which creates an intermediate reference that may place - // additional safety constraints on the contents of the slice. - self.triple().0.as_ptr() - } - - /// Returns a raw mutable pointer to the vector's buffer. - pub fn as_mut_ptr(&mut self) -> *mut A::Item { - // We shadow the slice method of the same name to avoid going through - // `deref_mut`, which creates an intermediate reference that may place - // additional safety constraints on the contents of the slice. - self.triple_mut().0.as_ptr() - } -} - -impl SmallVec -where - A::Item: Copy, -{ - /// Copy the elements from a slice into a new `SmallVec`. - /// - /// For slices of `Copy` types, this is more efficient than `SmallVec::from(slice)`. - pub fn from_slice(slice: &[A::Item]) -> Self { - let len = slice.len(); - if len <= Self::inline_capacity() { - SmallVec { - capacity: len, - data: SmallVecData::from_inline(unsafe { - let mut data: MaybeUninit = MaybeUninit::uninit(); - ptr::copy_nonoverlapping( - slice.as_ptr(), - data.as_mut_ptr() as *mut A::Item, - len, - ); - data - }), - } - } else { - let mut b = slice.to_vec(); - let cap = b.capacity(); - let ptr = NonNull::new(b.as_mut_ptr()).expect("Vec always contain non null pointers."); - mem::forget(b); - SmallVec { - capacity: cap, - data: SmallVecData::from_heap(ptr, len), - } - } - } - - /// Copy elements from a slice into the vector at position `index`, shifting any following - /// elements toward the back. - /// - /// For slices of `Copy` types, this is more efficient than `insert`. - #[inline] - pub fn insert_from_slice(&mut self, index: usize, slice: &[A::Item]) { - self.reserve(slice.len()); - - let len = self.len(); - assert!(index <= len); - - unsafe { - let slice_ptr = slice.as_ptr(); - let ptr = self.as_mut_ptr().add(index); - ptr::copy(ptr, ptr.add(slice.len()), len - index); - ptr::copy_nonoverlapping(slice_ptr, ptr, slice.len()); - self.set_len(len + slice.len()); - } - } - - /// Copy elements from a slice and append them to the vector. - /// - /// For slices of `Copy` types, this is more efficient than `extend`. - #[inline] - pub fn extend_from_slice(&mut self, slice: &[A::Item]) { - let len = self.len(); - self.insert_from_slice(len, slice); - } -} - -impl SmallVec -where - A::Item: Clone, -{ - /// Resizes the vector so that its length is equal to `len`. - /// - /// If `len` is less than the current length, the vector simply truncated. - /// - /// If `len` is greater than the current length, `value` is appended to the - /// vector until its length equals `len`. - pub fn resize(&mut self, len: usize, value: A::Item) { - let old_len = self.len(); - - if len > old_len { - self.extend(repeat(value).take(len - old_len)); - } else { - self.truncate(len); - } - } - - /// Creates a `SmallVec` with `n` copies of `elem`. - /// ``` - /// use smallvec::SmallVec; - /// - /// let v = SmallVec::<[char; 128]>::from_elem('d', 2); - /// assert_eq!(v, SmallVec::from_buf(['d', 'd'])); - /// ``` - pub fn from_elem(elem: A::Item, n: usize) -> Self { - if n > Self::inline_capacity() { - vec![elem; n].into() - } else { - let mut v = SmallVec::::new(); - unsafe { - let (ptr, len_ptr, _) = v.triple_mut(); - let ptr = ptr.as_ptr(); - let mut local_len = SetLenOnDrop::new(len_ptr); - - for i in 0..n { - ::core::ptr::write(ptr.add(i), elem.clone()); - local_len.increment_len(1); - } - } - v - } - } -} - -impl ops::Deref for SmallVec { - type Target = [A::Item]; - #[inline] - fn deref(&self) -> &[A::Item] { - unsafe { - let (ptr, len, _) = self.triple(); - slice::from_raw_parts(ptr.as_ptr(), len) - } - } -} - -impl ops::DerefMut for SmallVec { - #[inline] - fn deref_mut(&mut self) -> &mut [A::Item] { - unsafe { - let (ptr, &mut len, _) = self.triple_mut(); - slice::from_raw_parts_mut(ptr.as_ptr(), len) - } - } -} - -impl AsRef<[A::Item]> for SmallVec { - #[inline] - fn as_ref(&self) -> &[A::Item] { - self - } -} - -impl AsMut<[A::Item]> for SmallVec { - #[inline] - fn as_mut(&mut self) -> &mut [A::Item] { - self - } -} - -impl Borrow<[A::Item]> for SmallVec { - #[inline] - fn borrow(&self) -> &[A::Item] { - self - } -} - -impl BorrowMut<[A::Item]> for SmallVec { - #[inline] - fn borrow_mut(&mut self) -> &mut [A::Item] { - self - } -} - -#[cfg(feature = "write")] -#[cfg_attr(docsrs, doc(cfg(feature = "write")))] -impl> io::Write for SmallVec { - #[inline] - fn write(&mut self, buf: &[u8]) -> io::Result { - self.extend_from_slice(buf); - Ok(buf.len()) - } - - #[inline] - fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { - self.extend_from_slice(buf); - Ok(()) - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -#[cfg(feature = "serde")] -#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] -impl Serialize for SmallVec -where - A::Item: Serialize, -{ - fn serialize(&self, serializer: S) -> Result { - let mut state = serializer.serialize_seq(Some(self.len()))?; - for item in self { - state.serialize_element(&item)?; - } - state.end() - } -} - -#[cfg(feature = "serde")] -#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] -impl<'de, A: Array> Deserialize<'de> for SmallVec -where - A::Item: Deserialize<'de>, -{ - fn deserialize>(deserializer: D) -> Result { - deserializer.deserialize_seq(SmallVecVisitor { - phantom: PhantomData, - }) - } -} - -#[cfg(feature = "serde")] -struct SmallVecVisitor { - phantom: PhantomData, -} - -#[cfg(feature = "serde")] -impl<'de, A: Array> Visitor<'de> for SmallVecVisitor -where - A::Item: Deserialize<'de>, -{ - type Value = SmallVec; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("a sequence") - } - - fn visit_seq(self, mut seq: B) -> Result - where - B: SeqAccess<'de>, - { - use serde::de::Error; - let len = seq.size_hint().unwrap_or(0); - let mut values = SmallVec::new(); - values.try_reserve(len).map_err(B::Error::custom)?; - - while let Some(value) = seq.next_element()? { - values.push(value); - } - - Ok(values) - } -} - -#[cfg(feature = "specialization")] -trait SpecFrom { - fn spec_from(slice: S) -> SmallVec; -} - -#[cfg(feature = "specialization")] -mod specialization; - -#[cfg(feature = "arbitrary")] -mod arbitrary; - -#[cfg(feature = "specialization")] -impl<'a, A: Array> SpecFrom for SmallVec -where - A::Item: Copy, -{ - #[inline] - fn spec_from(slice: &'a [A::Item]) -> SmallVec { - SmallVec::from_slice(slice) - } -} - -impl<'a, A: Array> From<&'a [A::Item]> for SmallVec -where - A::Item: Clone, -{ - #[cfg(not(feature = "specialization"))] - #[inline] - fn from(slice: &'a [A::Item]) -> SmallVec { - slice.iter().cloned().collect() - } - - #[cfg(feature = "specialization")] - #[inline] - fn from(slice: &'a [A::Item]) -> SmallVec { - SmallVec::spec_from(slice) - } -} - -impl From> for SmallVec { - #[inline] - fn from(vec: Vec) -> SmallVec { - SmallVec::from_vec(vec) - } -} - -impl From for SmallVec { - #[inline] - fn from(array: A) -> SmallVec { - SmallVec::from_buf(array) - } -} - -impl> ops::Index for SmallVec { - type Output = I::Output; - - fn index(&self, index: I) -> &I::Output { - &(**self)[index] - } -} - -impl> ops::IndexMut for SmallVec { - fn index_mut(&mut self, index: I) -> &mut I::Output { - &mut (&mut **self)[index] - } -} - -#[allow(deprecated)] -impl ExtendFromSlice for SmallVec -where - A::Item: Copy, -{ - fn extend_from_slice(&mut self, other: &[A::Item]) { - SmallVec::extend_from_slice(self, other) - } -} - -impl FromIterator for SmallVec { - #[inline] - fn from_iter>(iterable: I) -> SmallVec { - let mut v = SmallVec::new(); - v.extend(iterable); - v - } -} - -impl Extend for SmallVec { - fn extend>(&mut self, iterable: I) { - let mut iter = iterable.into_iter(); - let (lower_size_bound, _) = iter.size_hint(); - self.reserve(lower_size_bound); - - unsafe { - let (ptr, len_ptr, cap) = self.triple_mut(); - let ptr = ptr.as_ptr(); - let mut len = SetLenOnDrop::new(len_ptr); - while len.get() < cap { - if let Some(out) = iter.next() { - ptr::write(ptr.add(len.get()), out); - len.increment_len(1); - } else { - return; - } - } - } - - for elem in iter { - self.push(elem); - } - } -} - -impl fmt::Debug for SmallVec -where - A::Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_list().entries(self.iter()).finish() - } -} - -impl Default for SmallVec { - #[inline] - fn default() -> SmallVec { - SmallVec::new() - } -} - -#[cfg(feature = "may_dangle")] -unsafe impl<#[may_dangle] A: Array> Drop for SmallVec { - fn drop(&mut self) { - unsafe { - if self.spilled() { - let (ptr, &mut len) = self.data.heap_mut(); - Vec::from_raw_parts(ptr.as_ptr(), len, self.capacity); - } else { - ptr::drop_in_place(&mut self[..]); - } - } - } -} - -#[cfg(not(feature = "may_dangle"))] -impl Drop for SmallVec { - fn drop(&mut self) { - unsafe { - if self.spilled() { - let (ptr, &mut len) = self.data.heap_mut(); - drop(Vec::from_raw_parts(ptr.as_ptr(), len, self.capacity)); - } else { - ptr::drop_in_place(&mut self[..]); - } - } - } -} - -impl Clone for SmallVec -where - A::Item: Clone, -{ - #[inline] - fn clone(&self) -> SmallVec { - SmallVec::from(self.as_slice()) - } - - fn clone_from(&mut self, source: &Self) { - // Inspired from `impl Clone for Vec`. - - // drop anything that will not be overwritten - self.truncate(source.len()); - - // self.len <= other.len due to the truncate above, so the - // slices here are always in-bounds. - let (init, tail) = source.split_at(self.len()); - - // reuse the contained values' allocations/resources. - self.clone_from_slice(init); - self.extend(tail.iter().cloned()); - } -} - -impl PartialEq> for SmallVec -where - A::Item: PartialEq, -{ - #[inline] - fn eq(&self, other: &SmallVec) -> bool { - self[..] == other[..] - } -} - -impl Eq for SmallVec where A::Item: Eq {} - -impl PartialOrd for SmallVec -where - A::Item: PartialOrd, -{ - #[inline] - fn partial_cmp(&self, other: &SmallVec) -> Option { - PartialOrd::partial_cmp(&**self, &**other) - } -} - -impl Ord for SmallVec -where - A::Item: Ord, -{ - #[inline] - fn cmp(&self, other: &SmallVec) -> cmp::Ordering { - Ord::cmp(&**self, &**other) - } -} - -impl Hash for SmallVec -where - A::Item: Hash, -{ - fn hash(&self, state: &mut H) { - (**self).hash(state) - } -} - -unsafe impl Send for SmallVec where A::Item: Send {} - -/// An iterator that consumes a `SmallVec` and yields its items by value. -/// -/// Returned from [`SmallVec::into_iter`][1]. -/// -/// [1]: struct.SmallVec.html#method.into_iter -pub struct IntoIter { - data: SmallVec, - current: usize, - end: usize, -} - -impl fmt::Debug for IntoIter -where - A::Item: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("IntoIter").field(&self.as_slice()).finish() - } -} - -impl Clone for IntoIter -where - A::Item: Clone, -{ - fn clone(&self) -> IntoIter { - SmallVec::from(self.as_slice()).into_iter() - } -} - -impl Drop for IntoIter { - fn drop(&mut self) { - for _ in self {} - } -} - -impl Iterator for IntoIter { - type Item = A::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.current == self.end { - None - } else { - unsafe { - let current = self.current; - self.current += 1; - Some(ptr::read(self.data.as_ptr().add(current))) - } - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let size = self.end - self.current; - (size, Some(size)) - } -} - -impl DoubleEndedIterator for IntoIter { - #[inline] - fn next_back(&mut self) -> Option { - if self.current == self.end { - None - } else { - unsafe { - self.end -= 1; - Some(ptr::read(self.data.as_ptr().add(self.end))) - } - } - } -} - -impl ExactSizeIterator for IntoIter {} -impl FusedIterator for IntoIter {} - -impl IntoIter { - /// Returns the remaining items of this iterator as a slice. - pub fn as_slice(&self) -> &[A::Item] { - let len = self.end - self.current; - unsafe { core::slice::from_raw_parts(self.data.as_ptr().add(self.current), len) } - } - - /// Returns the remaining items of this iterator as a mutable slice. - pub fn as_mut_slice(&mut self) -> &mut [A::Item] { - let len = self.end - self.current; - unsafe { core::slice::from_raw_parts_mut(self.data.as_mut_ptr().add(self.current), len) } - } -} - -impl IntoIterator for SmallVec { - type IntoIter = IntoIter; - type Item = A::Item; - fn into_iter(mut self) -> Self::IntoIter { - unsafe { - // Set SmallVec len to zero as `IntoIter` drop handles dropping of the elements - let len = self.len(); - self.set_len(0); - IntoIter { - data: self, - current: 0, - end: len, - } - } - } -} - -impl<'a, A: Array> IntoIterator for &'a SmallVec { - type IntoIter = slice::Iter<'a, A::Item>; - type Item = &'a A::Item; - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, A: Array> IntoIterator for &'a mut SmallVec { - type IntoIter = slice::IterMut<'a, A::Item>; - type Item = &'a mut A::Item; - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -/// Types that can be used as the backing store for a [`SmallVec`]. -pub unsafe trait Array { - /// The type of the array's elements. - type Item; - /// Returns the number of items the array can hold. - fn size() -> usize; -} - -/// Set the length of the vec when the `SetLenOnDrop` value goes out of scope. -/// -/// Copied from -struct SetLenOnDrop<'a> { - len: &'a mut usize, - local_len: usize, -} - -impl<'a> SetLenOnDrop<'a> { - #[inline] - fn new(len: &'a mut usize) -> Self { - SetLenOnDrop { - local_len: *len, - len, - } - } - - #[inline] - fn get(&self) -> usize { - self.local_len - } - - #[inline] - fn increment_len(&mut self, increment: usize) { - self.local_len += increment; - } -} - -impl<'a> Drop for SetLenOnDrop<'a> { - #[inline] - fn drop(&mut self) { - *self.len = self.local_len; - } -} - -#[cfg(feature = "const_new")] -impl SmallVec<[T; N]> { - /// Construct an empty vector. - /// - /// This is a `const` version of [`SmallVec::new`] that is enabled by the feature `const_new`, with the limitation that it only works for arrays. - #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))] - #[inline] - pub const fn new_const() -> Self { - SmallVec { - capacity: 0, - data: SmallVecData::from_const(MaybeUninit::uninit()), - } - } - - /// The array passed as an argument is moved to be an inline version of `SmallVec`. - /// - /// This is a `const` version of [`SmallVec::from_buf`] that is enabled by the feature `const_new`, with the limitation that it only works for arrays. - #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))] - #[inline] - pub const fn from_const(items: [T; N]) -> Self { - SmallVec { - capacity: N, - data: SmallVecData::from_const(MaybeUninit::new(items)), - } - } -} - -#[cfg(feature = "const_generics")] -#[cfg_attr(docsrs, doc(cfg(feature = "const_generics")))] -unsafe impl Array for [T; N] { - type Item = T; - #[inline] - fn size() -> usize { - N - } -} - -#[cfg(not(feature = "const_generics"))] -macro_rules! impl_array( - ($($size:expr),+) => { - $( - unsafe impl Array for [T; $size] { - type Item = T; - #[inline] - fn size() -> usize { $size } - } - )+ - } -); - -#[cfg(not(feature = "const_generics"))] -impl_array!( - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 31, 32, 36, 0x40, 0x60, 0x80, 0x100, 0x200, 0x400, 0x600, 0x800, 0x1000, - 0x2000, 0x4000, 0x6000, 0x8000, 0x10000, 0x20000, 0x40000, 0x60000, 0x80000, 0x10_0000 -); - -/// Convenience trait for constructing a `SmallVec` -pub trait ToSmallVec { - /// Construct a new `SmallVec` from a slice. - fn to_smallvec(&self) -> SmallVec; -} - -impl ToSmallVec for [A::Item] -where - A::Item: Copy, -{ - #[inline] - fn to_smallvec(&self) -> SmallVec { - SmallVec::from_slice(self) - } -} - -// Immutable counterpart for `NonNull`. -#[repr(transparent)] -struct ConstNonNull(NonNull); - -impl ConstNonNull { - #[inline] - fn new(ptr: *const T) -> Option { - NonNull::new(ptr as *mut T).map(Self) - } - #[inline] - fn as_ptr(self) -> *const T { - self.0.as_ptr() - } -} - -impl Clone for ConstNonNull { - #[inline] - fn clone(&self) -> Self { - *self - } -} - -impl Copy for ConstNonNull {} diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/src/specialization.rs s390-tools-2.33.1/rust-vendor/smallvec/src/specialization.rs --- s390-tools-2.31.0/rust-vendor/smallvec/src/specialization.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/src/specialization.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Implementations that require `default fn`. - -use super::{Array, SmallVec, SpecFrom}; - -impl<'a, A: Array> SpecFrom for SmallVec -where - A::Item: Clone, -{ - #[inline] - default fn spec_from(slice: &'a [A::Item]) -> SmallVec { - slice.into_iter().cloned().collect() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/smallvec/src/tests.rs s390-tools-2.33.1/rust-vendor/smallvec/src/tests.rs --- s390-tools-2.31.0/rust-vendor/smallvec/src/tests.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/smallvec/src/tests.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1013 +0,0 @@ -use crate::{smallvec, SmallVec}; - -use std::iter::FromIterator; - -use alloc::borrow::ToOwned; -use alloc::boxed::Box; -use alloc::rc::Rc; -use alloc::{vec, vec::Vec}; - -#[test] -pub fn test_zero() { - let mut v = SmallVec::<[_; 0]>::new(); - assert!(!v.spilled()); - v.push(0usize); - assert!(v.spilled()); - assert_eq!(&*v, &[0]); -} - -// We heap allocate all these strings so that double frees will show up under valgrind. - -#[test] -pub fn test_inline() { - let mut v = SmallVec::<[_; 16]>::new(); - v.push("hello".to_owned()); - v.push("there".to_owned()); - assert_eq!(&*v, &["hello".to_owned(), "there".to_owned(),][..]); -} - -#[test] -pub fn test_spill() { - let mut v = SmallVec::<[_; 2]>::new(); - v.push("hello".to_owned()); - assert_eq!(v[0], "hello"); - v.push("there".to_owned()); - v.push("burma".to_owned()); - assert_eq!(v[0], "hello"); - v.push("shave".to_owned()); - assert_eq!( - &*v, - &[ - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - ][..] - ); -} - -#[test] -pub fn test_double_spill() { - let mut v = SmallVec::<[_; 2]>::new(); - v.push("hello".to_owned()); - v.push("there".to_owned()); - v.push("burma".to_owned()); - v.push("shave".to_owned()); - v.push("hello".to_owned()); - v.push("there".to_owned()); - v.push("burma".to_owned()); - v.push("shave".to_owned()); - assert_eq!( - &*v, - &[ - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - ][..] - ); -} - -// https://github.com/servo/rust-smallvec/issues/4 -#[test] -fn issue_4() { - SmallVec::<[Box; 2]>::new(); -} - -// https://github.com/servo/rust-smallvec/issues/5 -#[test] -fn issue_5() { - assert!(Some(SmallVec::<[&u32; 2]>::new()).is_some()); -} - -#[test] -fn test_with_capacity() { - let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(1); - assert!(v.is_empty()); - assert!(!v.spilled()); - assert_eq!(v.capacity(), 3); - - let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(10); - assert!(v.is_empty()); - assert!(v.spilled()); - assert_eq!(v.capacity(), 10); -} - -#[test] -fn drain() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.drain(..).collect::>(), &[3]); - - // spilling the vec - v.push(3); - v.push(4); - v.push(5); - let old_capacity = v.capacity(); - assert_eq!(v.drain(1..).collect::>(), &[4, 5]); - // drain should not change the capacity - assert_eq!(v.capacity(), old_capacity); - - // Exercise the tail-shifting code when in the inline state - // This has the potential to produce UB due to aliasing - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(1); - v.push(2); - assert_eq!(v.drain(..1).collect::>(), &[1]); -} - -#[test] -fn drain_rev() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.drain(..).rev().collect::>(), &[3]); - - // spilling the vec - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.drain(..).rev().collect::>(), &[5, 4, 3]); -} - -#[test] -fn drain_forget() { - let mut v: SmallVec<[u8; 1]> = smallvec![0, 1, 2, 3, 4, 5, 6, 7]; - std::mem::forget(v.drain(2..5)); - assert_eq!(v.len(), 2); -} - -#[test] -fn into_iter() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.into_iter().collect::>(), &[3]); - - // spilling the vec - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.into_iter().collect::>(), &[3, 4, 5]); -} - -#[test] -fn into_iter_rev() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.into_iter().rev().collect::>(), &[3]); - - // spilling the vec - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.into_iter().rev().collect::>(), &[5, 4, 3]); -} - -#[test] -fn into_iter_drop() { - use std::cell::Cell; - - struct DropCounter<'a>(&'a Cell); - - impl<'a> Drop for DropCounter<'a> { - fn drop(&mut self) { - self.0.set(self.0.get() + 1); - } - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.into_iter(); - assert_eq!(cell.get(), 1); - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - assert!(v.into_iter().next().is_some()); - assert_eq!(cell.get(), 2); - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - assert!(v.into_iter().next().is_some()); - assert_eq!(cell.get(), 3); - } - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - { - let mut it = v.into_iter(); - assert!(it.next().is_some()); - assert!(it.next_back().is_some()); - } - assert_eq!(cell.get(), 3); - } -} - -#[test] -fn test_capacity() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.reserve(1); - assert_eq!(v.capacity(), 2); - assert!(!v.spilled()); - - v.reserve_exact(0x100); - assert!(v.capacity() >= 0x100); - - v.push(0); - v.push(1); - v.push(2); - v.push(3); - - v.shrink_to_fit(); - assert!(v.capacity() < 0x100); -} - -#[test] -fn test_truncate() { - let mut v: SmallVec<[Box; 8]> = SmallVec::new(); - - for x in 0..8 { - v.push(Box::new(x)); - } - v.truncate(4); - - assert_eq!(v.len(), 4); - assert!(!v.spilled()); - - assert_eq!(*v.swap_remove(1), 1); - assert_eq!(*v.remove(1), 3); - v.insert(1, Box::new(3)); - - assert_eq!(&v.iter().map(|v| **v).collect::>(), &[0, 3, 2]); -} - -#[test] -fn test_insert_many() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many(1, [5, 6].iter().cloned()); - assert_eq!( - &v.iter().map(|v| *v).collect::>(), - &[0, 5, 6, 1, 2, 3] - ); -} - -struct MockHintIter { - x: T, - hint: usize, -} -impl Iterator for MockHintIter { - type Item = T::Item; - fn next(&mut self) -> Option { - self.x.next() - } - fn size_hint(&self) -> (usize, Option) { - (self.hint, None) - } -} - -#[test] -fn test_insert_many_short_hint() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many( - 1, - MockHintIter { - x: [5, 6].iter().cloned(), - hint: 5, - }, - ); - assert_eq!( - &v.iter().map(|v| *v).collect::>(), - &[0, 5, 6, 1, 2, 3] - ); -} - -#[test] -fn test_insert_many_long_hint() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many( - 1, - MockHintIter { - x: [5, 6].iter().cloned(), - hint: 1, - }, - ); - assert_eq!( - &v.iter().map(|v| *v).collect::>(), - &[0, 5, 6, 1, 2, 3] - ); -} - -// https://github.com/servo/rust-smallvec/issues/96 -mod insert_many_panic { - use crate::{smallvec, SmallVec}; - use alloc::boxed::Box; - - struct PanicOnDoubleDrop { - dropped: Box, - } - - impl PanicOnDoubleDrop { - fn new() -> Self { - Self { - dropped: Box::new(false), - } - } - } - - impl Drop for PanicOnDoubleDrop { - fn drop(&mut self) { - assert!(!*self.dropped, "already dropped"); - *self.dropped = true; - } - } - - /// Claims to yield `hint` items, but actually yields `count`, then panics. - struct BadIter { - hint: usize, - count: usize, - } - - impl Iterator for BadIter { - type Item = PanicOnDoubleDrop; - fn size_hint(&self) -> (usize, Option) { - (self.hint, None) - } - fn next(&mut self) -> Option { - if self.count == 0 { - panic!() - } - self.count -= 1; - Some(PanicOnDoubleDrop::new()) - } - } - - #[test] - fn panic_early_at_start() { - let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> = - smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),]; - let result = ::std::panic::catch_unwind(move || { - vec.insert_many(0, BadIter { hint: 1, count: 0 }); - }); - assert!(result.is_err()); - } - - #[test] - fn panic_early_in_middle() { - let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> = - smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),]; - let result = ::std::panic::catch_unwind(move || { - vec.insert_many(1, BadIter { hint: 4, count: 2 }); - }); - assert!(result.is_err()); - } - - #[test] - fn panic_early_at_end() { - let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> = - smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),]; - let result = ::std::panic::catch_unwind(move || { - vec.insert_many(2, BadIter { hint: 3, count: 1 }); - }); - assert!(result.is_err()); - } - - #[test] - fn panic_late_at_start() { - let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> = - smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),]; - let result = ::std::panic::catch_unwind(move || { - vec.insert_many(0, BadIter { hint: 3, count: 5 }); - }); - assert!(result.is_err()); - } - - #[test] - fn panic_late_at_end() { - let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> = - smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),]; - let result = ::std::panic::catch_unwind(move || { - vec.insert_many(2, BadIter { hint: 3, count: 5 }); - }); - assert!(result.is_err()); - } -} - -#[test] -#[should_panic] -fn test_invalid_grow() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - v.extend(0..8); - v.grow(5); -} - -#[test] -#[should_panic] -fn drain_overflow() { - let mut v: SmallVec<[u8; 8]> = smallvec![0]; - v.drain(..=std::usize::MAX); -} - -#[test] -fn test_insert_from_slice() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_from_slice(1, &[5, 6]); - assert_eq!( - &v.iter().map(|v| *v).collect::>(), - &[0, 5, 6, 1, 2, 3] - ); -} - -#[test] -fn test_extend_from_slice() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.extend_from_slice(&[5, 6]); - assert_eq!( - &v.iter().map(|v| *v).collect::>(), - &[0, 1, 2, 3, 5, 6] - ); -} - -#[test] -#[should_panic] -fn test_drop_panic_smallvec() { - // This test should only panic once, and not double panic, - // which would mean a double drop - struct DropPanic; - - impl Drop for DropPanic { - fn drop(&mut self) { - panic!("drop"); - } - } - - let mut v = SmallVec::<[_; 1]>::new(); - v.push(DropPanic); -} - -#[test] -fn test_eq() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let mut b: SmallVec<[u32; 2]> = SmallVec::new(); - let mut c: SmallVec<[u32; 2]> = SmallVec::new(); - // a = [1, 2] - a.push(1); - a.push(2); - // b = [1, 2] - b.push(1); - b.push(2); - // c = [3, 4] - c.push(3); - c.push(4); - - assert!(a == b); - assert!(a != c); -} - -#[test] -fn test_ord() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let mut b: SmallVec<[u32; 2]> = SmallVec::new(); - let mut c: SmallVec<[u32; 2]> = SmallVec::new(); - // a = [1] - a.push(1); - // b = [1, 1] - b.push(1); - b.push(1); - // c = [1, 2] - c.push(1); - c.push(2); - - assert!(a < b); - assert!(b > a); - assert!(b < c); - assert!(c > b); -} - -#[test] -fn test_hash() { - use std::collections::hash_map::DefaultHasher; - use std::hash::Hash; - - { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let b = [1, 2]; - a.extend(b.iter().cloned()); - let mut hasher = DefaultHasher::new(); - assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher)); - } - { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let b = [1, 2, 11, 12]; - a.extend(b.iter().cloned()); - let mut hasher = DefaultHasher::new(); - assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher)); - } -} - -#[test] -fn test_as_ref() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.as_ref(), [1]); - a.push(2); - assert_eq!(a.as_ref(), [1, 2]); - a.push(3); - assert_eq!(a.as_ref(), [1, 2, 3]); -} - -#[test] -fn test_as_mut() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.as_mut(), [1]); - a.push(2); - assert_eq!(a.as_mut(), [1, 2]); - a.push(3); - assert_eq!(a.as_mut(), [1, 2, 3]); - a.as_mut()[1] = 4; - assert_eq!(a.as_mut(), [1, 4, 3]); -} - -#[test] -fn test_borrow() { - use std::borrow::Borrow; - - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.borrow(), [1]); - a.push(2); - assert_eq!(a.borrow(), [1, 2]); - a.push(3); - assert_eq!(a.borrow(), [1, 2, 3]); -} - -#[test] -fn test_borrow_mut() { - use std::borrow::BorrowMut; - - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.borrow_mut(), [1]); - a.push(2); - assert_eq!(a.borrow_mut(), [1, 2]); - a.push(3); - assert_eq!(a.borrow_mut(), [1, 2, 3]); - BorrowMut::<[u32]>::borrow_mut(&mut a)[1] = 4; - assert_eq!(a.borrow_mut(), [1, 4, 3]); -} - -#[test] -fn test_from() { - assert_eq!(&SmallVec::<[u32; 2]>::from(&[1][..])[..], [1]); - assert_eq!(&SmallVec::<[u32; 2]>::from(&[1, 2, 3][..])[..], [1, 2, 3]); - - let vec = vec![]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let array = [1]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from(array); - assert_eq!(&*small_vec, &[1]); - drop(small_vec); - - let array = [99; 128]; - let small_vec: SmallVec<[u8; 128]> = SmallVec::from(array); - assert_eq!(&*small_vec, vec![99u8; 128].as_slice()); - drop(small_vec); -} - -#[test] -fn test_from_slice() { - assert_eq!(&SmallVec::<[u32; 2]>::from_slice(&[1][..])[..], [1]); - assert_eq!( - &SmallVec::<[u32; 2]>::from_slice(&[1, 2, 3][..])[..], - [1, 2, 3] - ); -} - -#[test] -fn test_exact_size_iterator() { - let mut vec = SmallVec::<[u32; 2]>::from(&[1, 2, 3][..]); - assert_eq!(vec.clone().into_iter().len(), 3); - assert_eq!(vec.drain(..2).len(), 2); - assert_eq!(vec.into_iter().len(), 1); -} - -#[test] -fn test_into_iter_as_slice() { - let vec = SmallVec::<[u32; 2]>::from(&[1, 2, 3][..]); - let mut iter = vec.clone().into_iter(); - assert_eq!(iter.as_slice(), &[1, 2, 3]); - assert_eq!(iter.as_mut_slice(), &[1, 2, 3]); - iter.next(); - assert_eq!(iter.as_slice(), &[2, 3]); - assert_eq!(iter.as_mut_slice(), &[2, 3]); - iter.next_back(); - assert_eq!(iter.as_slice(), &[2]); - assert_eq!(iter.as_mut_slice(), &[2]); -} - -#[test] -fn test_into_iter_clone() { - // Test that the cloned iterator yields identical elements and that it owns its own copy - // (i.e. no use after move errors). - let mut iter = SmallVec::<[u8; 2]>::from_iter(0..3).into_iter(); - let mut clone_iter = iter.clone(); - while let Some(x) = iter.next() { - assert_eq!(x, clone_iter.next().unwrap()); - } - assert_eq!(clone_iter.next(), None); -} - -#[test] -fn test_into_iter_clone_partially_consumed_iterator() { - // Test that the cloned iterator only contains the remaining elements of the original iterator. - let mut iter = SmallVec::<[u8; 2]>::from_iter(0..3).into_iter().skip(1); - let mut clone_iter = iter.clone(); - while let Some(x) = iter.next() { - assert_eq!(x, clone_iter.next().unwrap()); - } - assert_eq!(clone_iter.next(), None); -} - -#[test] -fn test_into_iter_clone_empty_smallvec() { - let mut iter = SmallVec::<[u8; 2]>::new().into_iter(); - let mut clone_iter = iter.clone(); - assert_eq!(iter.next(), None); - assert_eq!(clone_iter.next(), None); -} - -#[test] -fn shrink_to_fit_unspill() { - let mut vec = SmallVec::<[u8; 2]>::from_iter(0..3); - vec.pop(); - assert!(vec.spilled()); - vec.shrink_to_fit(); - assert!(!vec.spilled(), "shrink_to_fit will un-spill if possible"); -} - -#[test] -fn test_into_vec() { - let vec = SmallVec::<[u8; 2]>::from_iter(0..2); - assert_eq!(vec.into_vec(), vec![0, 1]); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..3); - assert_eq!(vec.into_vec(), vec![0, 1, 2]); -} - -#[test] -fn test_into_inner() { - let vec = SmallVec::<[u8; 2]>::from_iter(0..2); - assert_eq!(vec.into_inner(), Ok([0, 1])); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..1); - assert_eq!(vec.clone().into_inner(), Err(vec)); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..3); - assert_eq!(vec.clone().into_inner(), Err(vec)); -} - -#[test] -fn test_from_vec() { - let vec = vec![]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![1]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1]); - drop(small_vec); - - let vec = vec![1, 2, 3]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); -} - -#[test] -fn test_retain() { - // Test inline data storate - let mut sv: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]); - sv.retain(|&mut i| i != 3); - assert_eq!(sv.pop(), Some(4)); - assert_eq!(sv.pop(), Some(2)); - assert_eq!(sv.pop(), Some(1)); - assert_eq!(sv.pop(), None); - - // Test spilled data storage - let mut sv: SmallVec<[i32; 3]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]); - sv.retain(|&mut i| i != 3); - assert_eq!(sv.pop(), Some(4)); - assert_eq!(sv.pop(), Some(2)); - assert_eq!(sv.pop(), Some(1)); - assert_eq!(sv.pop(), None); - - // Test that drop implementations are called for inline. - let one = Rc::new(1); - let mut sv: SmallVec<[Rc; 3]> = SmallVec::new(); - sv.push(Rc::clone(&one)); - assert_eq!(Rc::strong_count(&one), 2); - sv.retain(|_| false); - assert_eq!(Rc::strong_count(&one), 1); - - // Test that drop implementations are called for spilled data. - let mut sv: SmallVec<[Rc; 1]> = SmallVec::new(); - sv.push(Rc::clone(&one)); - sv.push(Rc::new(2)); - assert_eq!(Rc::strong_count(&one), 2); - sv.retain(|_| false); - assert_eq!(Rc::strong_count(&one), 1); -} - -#[test] -fn test_dedup() { - let mut dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 2, 3, 3]); - dupes.dedup(); - assert_eq!(&*dupes, &[1, 2, 3]); - - let mut empty: SmallVec<[i32; 5]> = SmallVec::new(); - empty.dedup(); - assert!(empty.is_empty()); - - let mut all_ones: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 1, 1, 1]); - all_ones.dedup(); - assert_eq!(all_ones.len(), 1); - - let mut no_dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 4, 5]); - no_dupes.dedup(); - assert_eq!(no_dupes.len(), 5); -} - -#[test] -fn test_resize() { - let mut v: SmallVec<[i32; 8]> = SmallVec::new(); - v.push(1); - v.resize(5, 0); - assert_eq!(v[..], [1, 0, 0, 0, 0][..]); - - v.resize(2, -1); - assert_eq!(v[..], [1, 0][..]); -} - -#[cfg(feature = "write")] -#[test] -fn test_write() { - use std::io::Write; - - let data = [1, 2, 3, 4, 5]; - - let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new(); - let len = small_vec.write(&data[..]).unwrap(); - assert_eq!(len, 5); - assert_eq!(small_vec.as_ref(), data.as_ref()); - - let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new(); - small_vec.write_all(&data[..]).unwrap(); - assert_eq!(small_vec.as_ref(), data.as_ref()); -} - -#[cfg(feature = "serde")] -#[test] -fn test_serde() { - use bincode::{config, deserialize}; - let mut small_vec: SmallVec<[i32; 2]> = SmallVec::new(); - small_vec.push(1); - let encoded = config().limit(100).serialize(&small_vec).unwrap(); - let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap(); - assert_eq!(small_vec, decoded); - small_vec.push(2); - // Spill the vec - small_vec.push(3); - small_vec.push(4); - // Check again after spilling. - let encoded = config().limit(100).serialize(&small_vec).unwrap(); - let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap(); - assert_eq!(small_vec, decoded); -} - -#[test] -fn grow_to_shrink() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(1); - v.push(2); - v.push(3); - assert!(v.spilled()); - v.clear(); - // Shrink to inline. - v.grow(2); - assert!(!v.spilled()); - assert_eq!(v.capacity(), 2); - assert_eq!(v.len(), 0); - v.push(4); - assert_eq!(v[..], [4]); -} - -#[test] -fn resumable_extend() { - let s = "a b c"; - // This iterator yields: (Some('a'), None, Some('b'), None, Some('c')), None - let it = s - .chars() - .scan(0, |_, ch| if ch.is_whitespace() { None } else { Some(ch) }); - let mut v: SmallVec<[char; 4]> = SmallVec::new(); - v.extend(it); - assert_eq!(v[..], ['a']); -} - -// #139 -#[test] -fn uninhabited() { - enum Void {} - let _sv = SmallVec::<[Void; 8]>::new(); -} - -#[test] -fn grow_spilled_same_size() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(0); - v.push(1); - v.push(2); - assert!(v.spilled()); - assert_eq!(v.capacity(), 4); - // grow with the same capacity - v.grow(4); - assert_eq!(v.capacity(), 4); - assert_eq!(v[..], [0, 1, 2]); -} - -#[cfg(feature = "const_generics")] -#[test] -fn const_generics() { - let _v = SmallVec::<[i32; 987]>::default(); -} - -#[cfg(feature = "const_new")] -#[test] -fn const_new() { - let v = const_new_inner(); - assert_eq!(v.capacity(), 4); - assert_eq!(v.len(), 0); - let v = const_new_inline_sized(); - assert_eq!(v.capacity(), 4); - assert_eq!(v.len(), 4); - assert_eq!(v[0], 1); - let v = const_new_inline_args(); - assert_eq!(v.capacity(), 2); - assert_eq!(v.len(), 2); - assert_eq!(v[0], 1); - assert_eq!(v[1], 4); -} -#[cfg(feature = "const_new")] -const fn const_new_inner() -> SmallVec<[i32; 4]> { - SmallVec::<[i32; 4]>::new_const() -} -#[cfg(feature = "const_new")] -const fn const_new_inline_sized() -> SmallVec<[i32; 4]> { - crate::smallvec_inline![1; 4] -} -#[cfg(feature = "const_new")] -const fn const_new_inline_args() -> SmallVec<[i32; 2]> { - crate::smallvec_inline![1, 4] -} - -#[test] -fn empty_macro() { - let _v: SmallVec<[u8; 1]> = smallvec![]; -} - -#[test] -fn zero_size_items() { - SmallVec::<[(); 0]>::new().push(()); -} - -#[test] -fn test_insert_many_overflow() { - let mut v: SmallVec<[u8; 1]> = SmallVec::new(); - v.push(123); - - // Prepare an iterator with small lower bound - let iter = (0u8..5).filter(|n| n % 2 == 0); - assert_eq!(iter.size_hint().0, 0); - - v.insert_many(0, iter); - assert_eq!(&*v, &[0, 2, 4, 123]); -} - -#[test] -fn test_clone_from() { - let mut a: SmallVec<[u8; 2]> = SmallVec::new(); - a.push(1); - a.push(2); - a.push(3); - - let mut b: SmallVec<[u8; 2]> = SmallVec::new(); - b.push(10); - - let mut c: SmallVec<[u8; 2]> = SmallVec::new(); - c.push(20); - c.push(21); - c.push(22); - - a.clone_from(&b); - assert_eq!(&*a, &[10]); - - b.clone_from(&c); - assert_eq!(&*b, &[20, 21, 22]); -} - -#[test] -fn test_size() { - use core::mem::size_of; - assert_eq!(24, size_of::>()); -} - -#[cfg(feature = "drain_filter")] -#[test] -fn drain_filter() { - let mut a: SmallVec<[u8; 2]> = smallvec![1u8, 2, 3, 4, 5, 6, 7, 8]; - - let b: SmallVec<[u8; 2]> = a.drain_filter(|x| *x % 3 == 0).collect(); - - assert_eq!(a, SmallVec::<[u8; 2]>::from_slice(&[1u8, 2, 4, 5, 7, 8])); - assert_eq!(b, SmallVec::<[u8; 2]>::from_slice(&[3u8, 6])); -} - -#[cfg(feature = "drain_keep_rest")] -#[test] -fn drain_keep_rest() { - let mut a: SmallVec<[i32; 3]> = smallvec![1i32, 2, 3, 4, 5, 6, 7, 8]; - let mut df = a.drain_filter(|x| *x % 2 == 0); - - assert_eq!(df.next().unwrap(), 2); - assert_eq!(df.next().unwrap(), 4); - - df.keep_rest(); - - assert_eq!(a, SmallVec::<[i32; 3]>::from_slice(&[1i32, 3, 5, 6, 7, 8])); -} diff -Nru s390-tools-2.31.0/rust-vendor/socket2/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/socket2/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/socket2/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2/.cargo-checksum.json 2024-05-28 11:57:39.000000000 +0200 @@ -1 +1 @@ -{"files":{},"package":"4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e"} \ No newline at end of file +{"files":{},"package":"64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/socket2/Cargo.toml s390-tools-2.33.1/rust-vendor/socket2/Cargo.toml --- s390-tools-2.31.0/rust-vendor/socket2/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2/Cargo.toml 2024-05-28 11:57:36.000000000 +0200 @@ -10,10 +10,9 @@ # See Cargo.toml.orig for the original contents. [package] -edition = "2021" -rust-version = "1.63" +edition = "2018" name = "socket2" -version = "0.5.4" +version = "0.4.9" authors = [ "Alex Crichton ", "Thomas de Zeeuw ", @@ -50,22 +49,6 @@ "--cfg", "docsrs", ] -targets = [ - "aarch64-apple-ios", - "aarch64-linux-android", - "x86_64-apple-darwin", - "x86_64-unknown-fuchsia", - "x86_64-pc-windows-msvc", - "x86_64-pc-solaris", - "x86_64-unknown-freebsd", - "x86_64-unknown-illumos", - "x86_64-unknown-linux-gnu", - "x86_64-unknown-linux-musl", - "x86_64-unknown-netbsd", - "x86_64-unknown-redox", - "armv7-linux-androideabi", - "i686-linux-android", -] [package.metadata.playground] features = ["all"] @@ -74,14 +57,12 @@ all = [] [target."cfg(unix)".dependencies.libc] -version = "0.2.141" +version = "0.2.139" -[target."cfg(windows)".dependencies.windows-sys] -version = "0.48" +[target."cfg(windows)".dependencies.winapi] +version = "0.3.9" features = [ - "Win32_Foundation", - "Win32_Networking_WinSock", - "Win32_System_IO", - "Win32_System_Threading", - "Win32_System_WindowsProgramming", + "handleapi", + "ws2ipdef", + "ws2tcpip", ] diff -Nru s390-tools-2.31.0/rust-vendor/socket2/README.md s390-tools-2.33.1/rust-vendor/socket2/README.md --- s390-tools-2.31.0/rust-vendor/socket2/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2/README.md 2024-05-28 11:57:36.000000000 +0200 @@ -19,13 +19,12 @@ [API documentation]: https://docs.rs/socket2 -# Branches +# Two branches -Currently Socket2 supports two versions: v0.5 and v0.4. Version 0.5 is being -developed in the master branch. Version 0.4 is developed in the [v0.4.x branch] -branch. +Currently Socket2 supports two versions: v0.4 and v0.3. Version 0.4 is developed +in the master branch, version 0.3 in the [v0.3.x branch]. -[v0.4.x branch]: https://github.com/rust-lang/socket2/tree/v0.4.x +[v0.3.x branch]: https://github.com/rust-lang/socket2/tree/v0.3.x # OS support @@ -65,7 +64,7 @@ # Minimum Supported Rust Version (MSRV) -Socket2 uses 1.63.0 as MSRV. +Socket2 uses 1.46.0 as MSRV. # License diff -Nru s390-tools-2.31.0/rust-vendor/socket2/src/lib.rs s390-tools-2.33.1/rust-vendor/socket2/src/lib.rs --- s390-tools-2.31.0/rust-vendor/socket2/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2/src/lib.rs 2024-05-28 11:57:36.000000000 +0200 @@ -50,6 +50,7 @@ //! This crate has a single feature `all`, which enables all functions even ones //! that are not available on all OSs. +#![doc(html_root_url = "https://docs.rs/socket2/0.4")] #![deny(missing_docs, missing_debug_implementations, rust_2018_idioms)] // Show required OS/features on docs.rs. #![cfg_attr(docsrs, feature(doc_cfg))] @@ -59,12 +60,6 @@ #![doc(test(attr(deny(warnings))))] use std::fmt; -#[cfg(not(target_os = "redox"))] -use std::io::IoSlice; -#[cfg(not(target_os = "redox"))] -use std::marker::PhantomData; -#[cfg(not(target_os = "redox"))] -use std::mem; use std::mem::MaybeUninit; use std::net::SocketAddr; use std::ops::{Deref, DerefMut}; @@ -83,7 +78,7 @@ $(#[$target: meta])* // The flag(s) to check. // Need to specific the libc crate because Windows doesn't use - // `libc` but `windows_sys`. + // `libc` but `winapi`. $libc: ident :: $flag: ident ),+ $(,)* ) => { @@ -94,7 +89,7 @@ $(#[$target])* $libc :: $flag => stringify!($flag), )+ - n => return write!(f, "{n}"), + n => return write!(f, "{}", n), }; f.write_str(string) } @@ -120,56 +115,6 @@ }; } -/// Link to online documentation for (almost) all supported OSs. -#[rustfmt::skip] -macro_rules! man_links { - // Links to all OSs. - ($syscall: tt ( $section: tt ) ) => { - concat!( - man_links!(__ intro), - man_links!(__ unix $syscall($section)), - man_links!(__ windows $syscall($section)), - ) - }; - // Links to Unix-like OSs. - (unix: $syscall: tt ( $section: tt ) ) => { - concat!( - man_links!(__ intro), - man_links!(__ unix $syscall($section)), - ) - }; - // Links to Windows only. - (windows: $syscall: tt ( $section: tt ) ) => { - concat!( - man_links!(__ intro), - man_links!(__ windows $syscall($section)), - ) - }; - // Internals. - (__ intro) => { - "\n\nAdditional documentation can be found in manual of the OS:\n\n" - }; - // List for Unix-like OSs. - (__ unix $syscall: tt ( $section: tt ) ) => { - concat!( - " * DragonFly BSD: \n", - " * FreeBSD: \n", - " * Linux: \n", - " * macOS: (archived, actually for iOS)\n", - " * NetBSD: \n", - " * OpenBSD: \n", - " * iOS: (archived)\n", - " * illumos: \n", - ) - }; - // List for Window (so just Windows). - (__ windows $syscall: tt ( $section: tt ) ) => { - concat!( - " * Windows: \n", - ) - }; -} - mod sockaddr; mod socket; mod sockref; @@ -215,9 +160,6 @@ /// Domain for IPv6 communication, corresponding to `AF_INET6`. pub const IPV6: Domain = Domain(sys::AF_INET6); - /// Domain for Unix socket communication, corresponding to `AF_UNIX`. - pub const UNIX: Domain = Domain(sys::AF_UNIX); - /// Returns the correct domain for `address`. pub const fn for_address(address: SocketAddr) -> Domain { match address { @@ -262,24 +204,14 @@ /// Used for protocols such as UDP. pub const DGRAM: Type = Type(sys::SOCK_DGRAM); - /// Type corresponding to `SOCK_DCCP`. - /// - /// Used for the DCCP protocol. - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub const DCCP: Type = Type(sys::SOCK_DCCP); - /// Type corresponding to `SOCK_SEQPACKET`. - #[cfg(all(feature = "all", not(target_os = "espidf")))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", not(target_os = "espidf")))))] + #[cfg(feature = "all")] + #[cfg_attr(docsrs, doc(cfg(feature = "all")))] pub const SEQPACKET: Type = Type(sys::SOCK_SEQPACKET); /// Type corresponding to `SOCK_RAW`. - #[cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))) - )] + #[cfg(all(feature = "all", not(target_os = "redox")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", not(target_os = "redox")))))] pub const RAW: Type = Type(sys::SOCK_RAW); } @@ -317,35 +249,6 @@ /// Protocol corresponding to `UDP`. pub const UDP: Protocol = Protocol(sys::IPPROTO_UDP); - - #[cfg(target_os = "linux")] - /// Protocol corresponding to `MPTCP`. - pub const MPTCP: Protocol = Protocol(sys::IPPROTO_MPTCP); - - /// Protocol corresponding to `DCCP`. - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub const DCCP: Protocol = Protocol(sys::IPPROTO_DCCP); - - /// Protocol corresponding to `SCTP`. - #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))] - pub const SCTP: Protocol = Protocol(sys::IPPROTO_SCTP); - - /// Protocol corresponding to `UDPLITE`. - #[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - ) - ))] - pub const UDPLITE: Protocol = Protocol(sys::IPPROTO_UDPLITE); - - /// Protocol corresponding to `DIVERT`. - #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "openbsd")))] - pub const DIVERT: Protocol = Protocol(sys::IPPROTO_DIVERT); } impl From for Protocol { @@ -377,7 +280,6 @@ /// /// On Unix this corresponds to the `MSG_TRUNC` flag. /// On Windows this corresponds to the `WSAEMSGSIZE` error code. - #[cfg(not(target_os = "espidf"))] pub const fn is_truncated(self) -> bool { self.0 & sys::MSG_TRUNC != 0 } @@ -432,7 +334,6 @@ target_os = "redox", target_os = "solaris", target_os = "nto", - target_os = "espidf", )))] interval: Option, #[cfg(not(any( @@ -441,7 +342,6 @@ target_os = "solaris", target_os = "windows", target_os = "nto", - target_os = "espidf", )))] retries: Option, } @@ -456,7 +356,6 @@ target_os = "redox", target_os = "solaris", target_os = "nto", - target_os = "espidf", )))] interval: None, #[cfg(not(any( @@ -465,7 +364,6 @@ target_os = "solaris", target_os = "windows", target_os = "nto", - target_os = "espidf", )))] retries: None, } @@ -496,35 +394,35 @@ /// /// Some platforms specify this value in seconds, so sub-second /// specifications may be omitted. - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "ios", - target_os = "linux", - target_os = "macos", - target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", - target_os = "windows", - ))] - #[cfg_attr( - docsrs, - doc(cfg(any( + #[cfg(all( + feature = "all", + any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", - target_os = "ios", target_os = "linux", - target_os = "macos", target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", - target_os = "windows", + target_vendor = "apple", + windows, + ) + ))] + #[cfg_attr( + docsrs, + doc(cfg(all( + feature = "all", + any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + windows, + ) ))) )] pub const fn with_interval(self, interval: Duration) -> Self { @@ -541,17 +439,15 @@ #[cfg(all( feature = "all", any( + doc, target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", - target_os = "ios", target_os = "linux", - target_os = "macos", target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ) ))] #[cfg_attr( @@ -564,12 +460,9 @@ target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", - target_os = "ios", target_os = "linux", - target_os = "macos", target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ) ))) )] @@ -580,138 +473,3 @@ } } } - -/// Configuration of a `sendmsg(2)` system call. -/// -/// This wraps `msghdr` on Unix and `WSAMSG` on Windows. Also see [`MsgHdrMut`] -/// for the variant used by `recvmsg(2)`. -#[cfg(not(target_os = "redox"))] -pub struct MsgHdr<'addr, 'bufs, 'control> { - inner: sys::msghdr, - #[allow(clippy::type_complexity)] - _lifetimes: PhantomData<(&'addr SockAddr, &'bufs IoSlice<'bufs>, &'control [u8])>, -} - -#[cfg(not(target_os = "redox"))] -impl<'addr, 'bufs, 'control> MsgHdr<'addr, 'bufs, 'control> { - /// Create a new `MsgHdr` with all empty/zero fields. - #[allow(clippy::new_without_default)] - pub fn new() -> MsgHdr<'addr, 'bufs, 'control> { - // SAFETY: all zero is valid for `msghdr` and `WSAMSG`. - MsgHdr { - inner: unsafe { mem::zeroed() }, - _lifetimes: PhantomData, - } - } - - /// Set the address (name) of the message. - /// - /// Corresponds to setting `msg_name` and `msg_namelen` on Unix and `name` - /// and `namelen` on Windows. - pub fn with_addr(mut self, addr: &'addr SockAddr) -> Self { - sys::set_msghdr_name(&mut self.inner, addr); - self - } - - /// Set the buffer(s) of the message. - /// - /// Corresponds to setting `msg_iov` and `msg_iovlen` on Unix and `lpBuffers` - /// and `dwBufferCount` on Windows. - pub fn with_buffers(mut self, bufs: &'bufs [IoSlice<'_>]) -> Self { - let ptr = bufs.as_ptr() as *mut _; - sys::set_msghdr_iov(&mut self.inner, ptr, bufs.len()); - self - } - - /// Set the control buffer of the message. - /// - /// Corresponds to setting `msg_control` and `msg_controllen` on Unix and - /// `Control` on Windows. - pub fn with_control(mut self, buf: &'control [u8]) -> Self { - let ptr = buf.as_ptr() as *mut _; - sys::set_msghdr_control(&mut self.inner, ptr, buf.len()); - self - } - - /// Set the flags of the message. - /// - /// Corresponds to setting `msg_flags` on Unix and `dwFlags` on Windows. - pub fn with_flags(mut self, flags: sys::c_int) -> Self { - sys::set_msghdr_flags(&mut self.inner, flags); - self - } -} - -#[cfg(not(target_os = "redox"))] -impl<'name, 'bufs, 'control> fmt::Debug for MsgHdr<'name, 'bufs, 'control> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - "MsgHdr".fmt(fmt) - } -} - -/// Configuration of a `recvmsg(2)` system call. -/// -/// This wraps `msghdr` on Unix and `WSAMSG` on Windows. Also see [`MsgHdr`] for -/// the variant used by `sendmsg(2)`. -#[cfg(not(target_os = "redox"))] -pub struct MsgHdrMut<'addr, 'bufs, 'control> { - inner: sys::msghdr, - #[allow(clippy::type_complexity)] - _lifetimes: PhantomData<( - &'addr mut SockAddr, - &'bufs mut MaybeUninitSlice<'bufs>, - &'control mut [u8], - )>, -} - -#[cfg(not(target_os = "redox"))] -impl<'addr, 'bufs, 'control> MsgHdrMut<'addr, 'bufs, 'control> { - /// Create a new `MsgHdrMut` with all empty/zero fields. - #[allow(clippy::new_without_default)] - pub fn new() -> MsgHdrMut<'addr, 'bufs, 'control> { - // SAFETY: all zero is valid for `msghdr` and `WSAMSG`. - MsgHdrMut { - inner: unsafe { mem::zeroed() }, - _lifetimes: PhantomData, - } - } - - /// Set the mutable address (name) of the message. - /// - /// Corresponds to setting `msg_name` and `msg_namelen` on Unix and `name` - /// and `namelen` on Windows. - pub fn with_addr(mut self, addr: &'addr mut SockAddr) -> Self { - sys::set_msghdr_name(&mut self.inner, addr); - self - } - - /// Set the mutable buffer(s) of the message. - /// - /// Corresponds to setting `msg_iov` and `msg_iovlen` on Unix and `lpBuffers` - /// and `dwBufferCount` on Windows. - pub fn with_buffers(mut self, bufs: &'bufs mut [MaybeUninitSlice<'_>]) -> Self { - sys::set_msghdr_iov(&mut self.inner, bufs.as_mut_ptr().cast(), bufs.len()); - self - } - - /// Set the mutable control buffer of the message. - /// - /// Corresponds to setting `msg_control` and `msg_controllen` on Unix and - /// `Control` on Windows. - pub fn with_control(mut self, buf: &'control mut [MaybeUninit]) -> Self { - sys::set_msghdr_control(&mut self.inner, buf.as_mut_ptr().cast(), buf.len()); - self - } - - /// Returns the flags of the message. - pub fn flags(&self) -> RecvFlags { - sys::msghdr_flags(&self.inner) - } -} - -#[cfg(not(target_os = "redox"))] -impl<'name, 'bufs, 'control> fmt::Debug for MsgHdrMut<'name, 'bufs, 'control> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - "MsgHdrMut".fmt(fmt) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/socket2/src/sockaddr.rs s390-tools-2.33.1/rust-vendor/socket2/src/sockaddr.rs --- s390-tools-2.31.0/rust-vendor/socket2/src/sockaddr.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2/src/sockaddr.rs 2024-05-28 11:57:36.000000000 +0200 @@ -1,17 +1,13 @@ -use std::hash::Hash; use std::mem::{self, size_of, MaybeUninit}; use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; -use std::path::Path; -use std::{fmt, io, ptr}; - -#[cfg(windows)] -use windows_sys::Win32::Networking::WinSock::SOCKADDR_IN6_0; +use std::{fmt, io}; use crate::sys::{ - c_int, sa_family_t, sockaddr, sockaddr_in, sockaddr_in6, sockaddr_storage, socklen_t, AF_INET, - AF_INET6, AF_UNIX, + sa_family_t, sockaddr, sockaddr_in, sockaddr_in6, sockaddr_storage, socklen_t, AF_INET, + AF_INET6, }; -use crate::Domain; +#[cfg(windows)] +use winapi::shared::ws2ipdef::SOCKADDR_IN6_LH_u; /// The address of a socket. /// @@ -104,7 +100,7 @@ /// /// // Initialise a `SocketAddr` byte calling `getsockname(2)`. /// let (_, address) = unsafe { - /// SockAddr::try_init(|addr_storage, len| { + /// SockAddr::init(|addr_storage, len| { /// // The `getsockname(2)` system call will intiliase `storage` for /// // us, setting `len` to the correct length. /// if libc::getsockname(socket.as_raw_fd(), addr_storage.cast(), len) == -1 { @@ -119,7 +115,7 @@ /// # Ok(()) /// # } /// ``` - pub unsafe fn try_init(init: F) -> io::Result<(T, SockAddr)> + pub unsafe fn init(init: F) -> io::Result<(T, SockAddr)> where F: FnOnce(*mut sockaddr_storage, *mut socklen_t) -> io::Result, { @@ -143,36 +139,11 @@ }) } - /// Constructs a `SockAddr` with the family `AF_UNIX` and the provided path. - /// - /// Returns an error if the path is longer than `SUN_LEN`. - pub fn unix

(path: P) -> io::Result - where - P: AsRef, - { - crate::sys::unix_sockaddr(path.as_ref()) - } - - /// Set the length of the address. - /// - /// # Safety - /// - /// Caller must ensure that the address up to `length` bytes are properly - /// initialised. - pub unsafe fn set_length(&mut self, length: socklen_t) { - self.len = length; - } - /// Returns this address's family. pub const fn family(&self) -> sa_family_t { self.storage.ss_family } - /// Returns this address's `Domain`. - pub const fn domain(&self) -> Domain { - Domain(self.storage.ss_family as c_int) - } - /// Returns the size of this address in bytes. pub const fn len(&self) -> socklen_t { self.len @@ -180,45 +151,29 @@ /// Returns a raw pointer to the address. pub const fn as_ptr(&self) -> *const sockaddr { - ptr::addr_of!(self.storage).cast() - } - - /// Retuns the address as the storage. - pub const fn as_storage(self) -> sockaddr_storage { - self.storage - } - - /// Returns true if this address is in the `AF_INET` (IPv4) family, false otherwise. - pub const fn is_ipv4(&self) -> bool { - self.storage.ss_family == AF_INET as sa_family_t + &self.storage as *const _ as *const _ } - /// Returns true if this address is in the `AF_INET6` (IPv6) family, false - /// otherwise. - pub const fn is_ipv6(&self) -> bool { - self.storage.ss_family == AF_INET6 as sa_family_t - } - - /// Returns true if this address is of a unix socket (for local interprocess communication), - /// i.e. it is from the `AF_UNIX` family, false otherwise. - pub fn is_unix(&self) -> bool { - self.storage.ss_family == AF_UNIX as sa_family_t + /// Returns a raw pointer to the address storage. + #[cfg(all(unix, not(target_os = "redox")))] + pub(crate) const fn as_storage_ptr(&self) -> *const sockaddr_storage { + &self.storage } /// Returns this address as a `SocketAddr` if it is in the `AF_INET` (IPv4) /// or `AF_INET6` (IPv6) family, otherwise returns `None`. pub fn as_socket(&self) -> Option { if self.storage.ss_family == AF_INET as sa_family_t { - // SAFETY: if the `ss_family` field is `AF_INET` then storage must - // be a `sockaddr_in`. - let addr = unsafe { &*(ptr::addr_of!(self.storage).cast::()) }; + // Safety: if the ss_family field is AF_INET then storage must be a sockaddr_in. + let addr = unsafe { &*(&self.storage as *const _ as *const sockaddr_in) }; + let ip = crate::sys::from_in_addr(addr.sin_addr); let port = u16::from_be(addr.sin_port); Some(SocketAddr::V4(SocketAddrV4::new(ip, port))) } else if self.storage.ss_family == AF_INET6 as sa_family_t { - // SAFETY: if the `ss_family` field is `AF_INET6` then storage must - // be a `sockaddr_in6`. - let addr = unsafe { &*(ptr::addr_of!(self.storage).cast::()) }; + // Safety: if the ss_family field is AF_INET6 then storage must be a sockaddr_in6. + let addr = unsafe { &*(&self.storage as *const _ as *const sockaddr_in6) }; + let ip = crate::sys::from_in6_addr(addr.sin6_addr); let port = u16::from_be(addr.sin6_port); Some(SocketAddr::V6(SocketAddrV6::new( @@ -229,7 +184,7 @@ addr.sin6_scope_id, #[cfg(windows)] unsafe { - addr.Anonymous.sin6_scope_id + *addr.u.sin6_scope_id() }, ))) } else { @@ -254,14 +209,6 @@ _ => None, } } - - /// Returns the initialised storage bytes. - fn as_bytes(&self) -> &[u8] { - // SAFETY: `self.storage` is a C struct which can always be treated a - // slice of bytes. Futhermore we ensure we don't read any unitialised - // bytes by using `self.len`. - unsafe { std::slice::from_raw_parts(self.as_ptr().cast(), self.len as usize) } - } } impl From for SockAddr { @@ -275,77 +222,70 @@ impl From for SockAddr { fn from(addr: SocketAddrV4) -> SockAddr { - // SAFETY: a `sockaddr_storage` of all zeros is valid. - let mut storage = unsafe { mem::zeroed::() }; - let len = { - let storage = unsafe { &mut *ptr::addr_of_mut!(storage).cast::() }; - storage.sin_family = AF_INET as sa_family_t; - storage.sin_port = addr.port().to_be(); - storage.sin_addr = crate::sys::to_in_addr(addr.ip()); - storage.sin_zero = Default::default(); - mem::size_of::() as socklen_t + let sockaddr_in = sockaddr_in { + sin_family: AF_INET as sa_family_t, + sin_port: addr.port().to_be(), + sin_addr: crate::sys::to_in_addr(addr.ip()), + sin_zero: Default::default(), + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "haiku", + target_os = "ios", + target_os = "macos", + target_os = "netbsd", + target_os = "openbsd" + ))] + sin_len: 0, }; - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "haiku", - target_os = "hermit", - target_os = "ios", - target_os = "macos", - target_os = "netbsd", - target_os = "nto", - target_os = "openbsd", - target_os = "tvos", - target_os = "vxworks", - target_os = "watchos", - ))] - { - storage.ss_len = len as u8; + let mut storage = MaybeUninit::::zeroed(); + // Safety: A `sockaddr_in` is memory compatible with a `sockaddr_storage` + unsafe { (storage.as_mut_ptr() as *mut sockaddr_in).write(sockaddr_in) }; + SockAddr { + storage: unsafe { storage.assume_init() }, + len: mem::size_of::() as socklen_t, } - SockAddr { storage, len } } } impl From for SockAddr { fn from(addr: SocketAddrV6) -> SockAddr { - // SAFETY: a `sockaddr_storage` of all zeros is valid. - let mut storage = unsafe { mem::zeroed::() }; - let len = { - let storage = unsafe { &mut *ptr::addr_of_mut!(storage).cast::() }; - storage.sin6_family = AF_INET6 as sa_family_t; - storage.sin6_port = addr.port().to_be(); - storage.sin6_addr = crate::sys::to_in6_addr(addr.ip()); - storage.sin6_flowinfo = addr.flowinfo(); + #[cfg(windows)] + let u = unsafe { + let mut u = mem::zeroed::(); + *u.sin6_scope_id_mut() = addr.scope_id(); + u + }; + + let sockaddr_in6 = sockaddr_in6 { + sin6_family: AF_INET6 as sa_family_t, + sin6_port: addr.port().to_be(), + sin6_addr: crate::sys::to_in6_addr(addr.ip()), + sin6_flowinfo: addr.flowinfo(), #[cfg(unix)] - { - storage.sin6_scope_id = addr.scope_id(); - } + sin6_scope_id: addr.scope_id(), #[cfg(windows)] - { - storage.Anonymous = SOCKADDR_IN6_0 { - sin6_scope_id: addr.scope_id(), - }; - } - mem::size_of::() as socklen_t + u, + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "haiku", + target_os = "ios", + target_os = "macos", + target_os = "netbsd", + target_os = "openbsd" + ))] + sin6_len: 0, + #[cfg(any(target_os = "solaris", target_os = "illumos"))] + __sin6_src_id: 0, }; - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "haiku", - target_os = "hermit", - target_os = "ios", - target_os = "macos", - target_os = "netbsd", - target_os = "nto", - target_os = "openbsd", - target_os = "tvos", - target_os = "vxworks", - target_os = "watchos", - ))] - { - storage.ss_len = len as u8; + let mut storage = MaybeUninit::::zeroed(); + // Safety: A `sockaddr_in6` is memory compatible with a `sockaddr_storage` + unsafe { (storage.as_mut_ptr() as *mut sockaddr_in6).write(sockaddr_in6) }; + SockAddr { + storage: unsafe { storage.assume_init() }, + len: mem::size_of::() as socklen_t, } - SockAddr { storage, len } } } @@ -360,11 +300,9 @@ target_os = "ios", target_os = "macos", target_os = "netbsd", - target_os = "nto", target_os = "openbsd", - target_os = "tvos", target_os = "vxworks", - target_os = "watchos", + target_os = "nto", ))] f.field("ss_len", &self.storage.ss_len); f.field("ss_family", &self.storage.ss_family) @@ -373,200 +311,40 @@ } } -impl PartialEq for SockAddr { - fn eq(&self, other: &Self) -> bool { - self.as_bytes() == other.as_bytes() - } -} - -impl Eq for SockAddr {} - -impl Hash for SockAddr { - fn hash(&self, state: &mut H) { - self.as_bytes().hash(state); - } +#[test] +fn ipv4() { + use std::net::Ipv4Addr; + let std = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876); + let addr = SockAddr::from(std); + assert_eq!(addr.family(), AF_INET as sa_family_t); + assert_eq!(addr.len(), size_of::() as socklen_t); + assert_eq!(addr.as_socket(), Some(SocketAddr::V4(std))); + assert_eq!(addr.as_socket_ipv4(), Some(std)); + assert!(addr.as_socket_ipv6().is_none()); + + let addr = SockAddr::from(SocketAddr::from(std)); + assert_eq!(addr.family(), AF_INET as sa_family_t); + assert_eq!(addr.len(), size_of::() as socklen_t); + assert_eq!(addr.as_socket(), Some(SocketAddr::V4(std))); + assert_eq!(addr.as_socket_ipv4(), Some(std)); + assert!(addr.as_socket_ipv6().is_none()); } -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn ipv4() { - use std::net::Ipv4Addr; - let std = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876); - let addr = SockAddr::from(std); - assert!(addr.is_ipv4()); - assert!(!addr.is_ipv6()); - assert!(!addr.is_unix()); - assert_eq!(addr.family(), AF_INET as sa_family_t); - assert_eq!(addr.domain(), Domain::IPV4); - assert_eq!(addr.len(), size_of::() as socklen_t); - assert_eq!(addr.as_socket(), Some(SocketAddr::V4(std))); - assert_eq!(addr.as_socket_ipv4(), Some(std)); - assert!(addr.as_socket_ipv6().is_none()); - - let addr = SockAddr::from(SocketAddr::from(std)); - assert_eq!(addr.family(), AF_INET as sa_family_t); - assert_eq!(addr.len(), size_of::() as socklen_t); - assert_eq!(addr.as_socket(), Some(SocketAddr::V4(std))); - assert_eq!(addr.as_socket_ipv4(), Some(std)); - assert!(addr.as_socket_ipv6().is_none()); - #[cfg(unix)] - { - assert!(addr.as_pathname().is_none()); - assert!(addr.as_abstract_namespace().is_none()); - } - } - - #[test] - fn ipv6() { - use std::net::Ipv6Addr; - let std = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12); - let addr = SockAddr::from(std); - assert!(addr.is_ipv6()); - assert!(!addr.is_ipv4()); - assert!(!addr.is_unix()); - assert_eq!(addr.family(), AF_INET6 as sa_family_t); - assert_eq!(addr.domain(), Domain::IPV6); - assert_eq!(addr.len(), size_of::() as socklen_t); - assert_eq!(addr.as_socket(), Some(SocketAddr::V6(std))); - assert!(addr.as_socket_ipv4().is_none()); - assert_eq!(addr.as_socket_ipv6(), Some(std)); - - let addr = SockAddr::from(SocketAddr::from(std)); - assert_eq!(addr.family(), AF_INET6 as sa_family_t); - assert_eq!(addr.len(), size_of::() as socklen_t); - assert_eq!(addr.as_socket(), Some(SocketAddr::V6(std))); - assert!(addr.as_socket_ipv4().is_none()); - assert_eq!(addr.as_socket_ipv6(), Some(std)); - #[cfg(unix)] - { - assert!(addr.as_pathname().is_none()); - assert!(addr.as_abstract_namespace().is_none()); - } - } - - #[test] - fn ipv4_eq() { - use std::net::Ipv4Addr; - - let std1 = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876); - let std2 = SocketAddrV4::new(Ipv4Addr::new(5, 6, 7, 8), 8765); - - test_eq( - SockAddr::from(std1), - SockAddr::from(std1), - SockAddr::from(std2), - ); - } - - #[test] - fn ipv4_hash() { - use std::net::Ipv4Addr; - - let std1 = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876); - let std2 = SocketAddrV4::new(Ipv4Addr::new(5, 6, 7, 8), 8765); - - test_hash( - SockAddr::from(std1), - SockAddr::from(std1), - SockAddr::from(std2), - ); - } - - #[test] - fn ipv6_eq() { - use std::net::Ipv6Addr; - - let std1 = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12); - let std2 = SocketAddrV6::new(Ipv6Addr::new(3, 4, 5, 6, 7, 8, 9, 0), 7654, 13, 14); - - test_eq( - SockAddr::from(std1), - SockAddr::from(std1), - SockAddr::from(std2), - ); - } - - #[test] - fn ipv6_hash() { - use std::net::Ipv6Addr; - - let std1 = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12); - let std2 = SocketAddrV6::new(Ipv6Addr::new(3, 4, 5, 6, 7, 8, 9, 0), 7654, 13, 14); - - test_hash( - SockAddr::from(std1), - SockAddr::from(std1), - SockAddr::from(std2), - ); - } - - #[test] - fn ipv4_ipv6_eq() { - use std::net::Ipv4Addr; - use std::net::Ipv6Addr; - - let std1 = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876); - let std2 = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12); - - test_eq( - SockAddr::from(std1), - SockAddr::from(std1), - SockAddr::from(std2), - ); - - test_eq( - SockAddr::from(std2), - SockAddr::from(std2), - SockAddr::from(std1), - ); - } - - #[test] - fn ipv4_ipv6_hash() { - use std::net::Ipv4Addr; - use std::net::Ipv6Addr; - - let std1 = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876); - let std2 = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12); - - test_hash( - SockAddr::from(std1), - SockAddr::from(std1), - SockAddr::from(std2), - ); - - test_hash( - SockAddr::from(std2), - SockAddr::from(std2), - SockAddr::from(std1), - ); - } - - #[allow(clippy::eq_op)] // allow a0 == a0 check - fn test_eq(a0: SockAddr, a1: SockAddr, b: SockAddr) { - assert!(a0 == a0); - assert!(a0 == a1); - assert!(a1 == a0); - assert!(a0 != b); - assert!(b != a0); - } - - fn test_hash(a0: SockAddr, a1: SockAddr, b: SockAddr) { - assert!(calculate_hash(&a0) == calculate_hash(&a0)); - assert!(calculate_hash(&a0) == calculate_hash(&a1)); - // technically unequal values can have the same hash, in this case x != z and both have different hashes - assert!(calculate_hash(&a0) != calculate_hash(&b)); - } - - fn calculate_hash(x: &SockAddr) -> u64 { - use std::collections::hash_map::DefaultHasher; - use std::hash::Hasher; - - let mut hasher = DefaultHasher::new(); - x.hash(&mut hasher); - hasher.finish() - } +#[test] +fn ipv6() { + use std::net::Ipv6Addr; + let std = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12); + let addr = SockAddr::from(std); + assert_eq!(addr.family(), AF_INET6 as sa_family_t); + assert_eq!(addr.len(), size_of::() as socklen_t); + assert_eq!(addr.as_socket(), Some(SocketAddr::V6(std))); + assert!(addr.as_socket_ipv4().is_none()); + assert_eq!(addr.as_socket_ipv6(), Some(std)); + + let addr = SockAddr::from(SocketAddr::from(std)); + assert_eq!(addr.family(), AF_INET6 as sa_family_t); + assert_eq!(addr.len(), size_of::() as socklen_t); + assert_eq!(addr.as_socket(), Some(SocketAddr::V6(std))); + assert!(addr.as_socket_ipv4().is_none()); + assert_eq!(addr.as_socket_ipv6(), Some(std)); } diff -Nru s390-tools-2.31.0/rust-vendor/socket2/src/socket.rs s390-tools-2.33.1/rust-vendor/socket2/src/socket.rs --- s390-tools-2.31.0/rust-vendor/socket2/src/socket.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2/src/socket.rs 2024-05-28 11:57:36.000000000 +0200 @@ -21,11 +21,9 @@ use std::time::Duration; use crate::sys::{self, c_int, getsockopt, setsockopt, Bool}; -#[cfg(all(unix, not(target_os = "redox")))] -use crate::MsgHdrMut; use crate::{Domain, Protocol, SockAddr, TcpKeepalive, Type}; #[cfg(not(target_os = "redox"))] -use crate::{MaybeUninitSlice, MsgHdr, RecvFlags}; +use crate::{MaybeUninitSlice, RecvFlags}; /// Owned wrapper around a system socket. /// @@ -48,8 +46,8 @@ /// # Notes /// /// Some methods that set options on `Socket` require two system calls to set -/// their options without overwriting previously set options. We do this by -/// first getting the current settings, applying the desired changes, and then +/// there options without overwriting previously set options. We do this by +/// first getting the current settings, applying the desired changes and than /// updating the settings. This means that the operation is **not** atomic. This /// can lead to a data race when two threads are changing options in parallel. /// @@ -59,12 +57,13 @@ /// use std::net::{SocketAddr, TcpListener}; /// use socket2::{Socket, Domain, Type}; /// -/// // create a TCP listener -/// let socket = Socket::new(Domain::IPV6, Type::STREAM, None)?; +/// // create a TCP listener bound to two addresses +/// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; /// /// let address: SocketAddr = "[::1]:12345".parse().unwrap(); /// let address = address.into(); /// socket.bind(&address)?; +/// socket.bind(&address)?; /// socket.listen(128)?; /// /// let listener: TcpListener = socket.into(); @@ -127,7 +126,6 @@ /// the socket is made non-inheritable. /// /// [`Socket::new_raw`] can be used if you don't want these flags to be set. - #[doc = man_links!(socket(2))] pub fn new(domain: Domain, ty: Type, protocol: Option) -> io::Result { let ty = set_common_type(ty); Socket::new_raw(domain, ty, protocol).and_then(set_common_flags) @@ -138,7 +136,7 @@ /// This function corresponds to `socket(2)` on Unix and `WSASocketW` on /// Windows and simply creates a new socket, no other configuration is done. pub fn new_raw(domain: Domain, ty: Type, protocol: Option) -> io::Result { - let protocol = protocol.map_or(0, |p| p.0); + let protocol = protocol.map(|p| p.0).unwrap_or(0); sys::socket(domain.0, ty.0, protocol).map(Socket::from_raw) } @@ -148,8 +146,7 @@ /// /// This function sets the same flags as in done for [`Socket::new`], /// [`Socket::pair_raw`] can be used if you don't want to set those flags. - #[doc = man_links!(unix: socketpair(2))] - #[cfg(all(feature = "all", unix))] + #[cfg(any(doc, all(feature = "all", unix)))] #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))] pub fn pair( domain: Domain, @@ -166,14 +163,14 @@ /// Creates a pair of sockets which are connected to each other. /// /// This function corresponds to `socketpair(2)`. - #[cfg(all(feature = "all", unix))] + #[cfg(any(doc, all(feature = "all", unix)))] #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))] pub fn pair_raw( domain: Domain, ty: Type, protocol: Option, ) -> io::Result<(Socket, Socket)> { - let protocol = protocol.map_or(0, |p| p.0); + let protocol = protocol.map(|p| p.0).unwrap_or(0); sys::socketpair(domain.0, ty.0, protocol) .map(|[a, b]| (Socket::from_raw(a), Socket::from_raw(b))) } @@ -182,7 +179,6 @@ /// /// This function directly corresponds to the `bind(2)` function on Windows /// and Unix. - #[doc = man_links!(bind(2))] pub fn bind(&self, address: &SockAddr) -> io::Result<()> { sys::bind(self.as_raw(), address) } @@ -194,7 +190,6 @@ /// /// An error will be returned if `listen` or `connect` has already been /// called on this builder. - #[doc = man_links!(connect(2))] /// /// # Notes /// @@ -249,7 +244,6 @@ /// /// An error will be returned if `listen` or `connect` has already been /// called on this builder. - #[doc = man_links!(listen(2))] pub fn listen(&self, backlog: c_int) -> io::Result<()> { sys::listen(self.as_raw(), backlog) } @@ -261,7 +255,6 @@ /// /// This function sets the same flags as in done for [`Socket::new`], /// [`Socket::accept_raw`] can be used if you don't want to set those flags. - #[doc = man_links!(accept(2))] pub fn accept(&self) -> io::Result<(Socket, SockAddr)> { // Use `accept4` on platforms that support it. #[cfg(any( @@ -308,10 +301,6 @@ /// Returns the socket address of the local half of this socket. /// - /// This function directly corresponds to the `getsockname(2)` function on - /// Windows and Unix. - #[doc = man_links!(getsockname(2))] - /// /// # Notes /// /// Depending on the OS this may return an error if the socket is not @@ -324,10 +313,6 @@ /// Returns the socket address of the remote peer of this socket. /// - /// This function directly corresponds to the `getpeername(2)` function on - /// Windows and Unix. - #[doc = man_links!(getpeername(2))] - /// /// # Notes /// /// This returns an error if the socket is not [`connect`ed]. @@ -360,21 +345,7 @@ sys::try_clone(self.as_raw()).map(Socket::from_raw) } - /// Returns true if this socket is set to nonblocking mode, false otherwise. - /// - /// # Notes - /// - /// On Unix this corresponds to calling `fcntl` returning the value of - /// `O_NONBLOCK`. - /// - /// On Windows it is not possible retrieve the nonblocking mode status. - #[cfg(all(feature = "all", unix))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))] - pub fn nonblocking(&self) -> io::Result { - sys::nonblocking(self.as_raw()) - } - - /// Moves this socket into or out of nonblocking mode. + /// Moves this TCP stream into or out of nonblocking mode. /// /// # Notes /// @@ -390,7 +361,6 @@ /// /// This function will cause all pending and future I/O on the specified /// portions to return immediately with an appropriate value. - #[doc = man_links!(shutdown(2))] pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { sys::shutdown(self.as_raw(), how) } @@ -400,7 +370,6 @@ /// /// The [`connect`] method will connect this socket to a remote address. /// This method might fail if the socket is not connected. - #[doc = man_links!(recv(2))] /// /// [`connect`]: Socket::connect /// @@ -427,7 +396,6 @@ /// /// [`recv`]: Socket::recv /// [`out_of_band_inline`]: Socket::out_of_band_inline - #[cfg_attr(target_os = "redox", allow(rustdoc::broken_intra_doc_links))] pub fn recv_out_of_band(&self, buf: &mut [MaybeUninit]) -> io::Result { self.recv_with_flags(buf, sys::MSG_OOB) } @@ -453,7 +421,6 @@ /// In addition to the number of bytes read, this function returns the flags /// for the received message. See [`RecvFlags`] for more information about /// the returned flags. - #[doc = man_links!(recvmsg(2))] /// /// [`recv`]: Socket::recv /// [`connect`]: Socket::connect @@ -519,7 +486,6 @@ /// Receives data from the socket. On success, returns the number of bytes /// read and the address from whence the data came. - #[doc = man_links!(recvfrom(2))] /// /// # Safety /// @@ -546,7 +512,6 @@ /// Receives data from the socket. Returns the amount of bytes read, the /// [`RecvFlags`] and the remote address from the data is coming. Unlike /// [`recv_from`] this allows passing multiple buffers. - #[doc = man_links!(recvmsg(2))] /// /// [`recv_from`]: Socket::recv_from /// @@ -629,26 +594,12 @@ sys::peek_sender(self.as_raw()) } - /// Receive a message from a socket using a message structure. - /// - /// This is not supported on Windows as calling `WSARecvMsg` (the `recvmsg` - /// equivalent) is not straight forward on Windows. See - /// - /// for an example (in C++). - #[doc = man_links!(recvmsg(2))] - #[cfg(all(unix, not(target_os = "redox")))] - #[cfg_attr(docsrs, doc(cfg(all(unix, not(target_os = "redox")))))] - pub fn recvmsg(&self, msg: &mut MsgHdrMut<'_, '_, '_>, flags: sys::c_int) -> io::Result { - sys::recvmsg(self.as_raw(), msg, flags) - } - /// Sends data on the socket to a connected peer. /// /// This is typically used on TCP sockets or datagram sockets which have /// been connected. /// /// On success returns the number of bytes that were sent. - #[doc = man_links!(send(2))] pub fn send(&self, buf: &[u8]) -> io::Result { self.send_with_flags(buf, 0) } @@ -656,7 +607,7 @@ /// Identical to [`send`] but allows for specification of arbitrary flags to the underlying /// `send` call. /// - /// [`send`]: Socket::send + /// [`send`]: #method.send pub fn send_with_flags(&self, buf: &[u8], flags: c_int) -> io::Result { sys::send(self.as_raw(), buf, flags) } @@ -670,7 +621,6 @@ /// Identical to [`send_vectored`] but allows for specification of arbitrary /// flags to the underlying `sendmsg`/`WSASend` call. - #[doc = man_links!(sendmsg(2))] /// /// [`send_vectored`]: Socket::send_vectored #[cfg(not(target_os = "redox"))] @@ -688,9 +638,8 @@ /// /// For more information, see [`send`], [`out_of_band_inline`]. /// - /// [`send`]: Socket::send - /// [`out_of_band_inline`]: Socket::out_of_band_inline - #[cfg_attr(target_os = "redox", allow(rustdoc::broken_intra_doc_links))] + /// [`send`]: #method.send + /// [`out_of_band_inline`]: #method.out_of_band_inline pub fn send_out_of_band(&self, buf: &[u8]) -> io::Result { self.send_with_flags(buf, sys::MSG_OOB) } @@ -699,7 +648,6 @@ /// number of bytes written. /// /// This is typically used on UDP or datagram-oriented sockets. - #[doc = man_links!(sendto(2))] pub fn send_to(&self, buf: &[u8], addr: &SockAddr) -> io::Result { self.send_to_with_flags(buf, addr, 0) } @@ -719,7 +667,6 @@ /// Send data to a peer listening on `addr`. Returns the amount of bytes /// written. - #[doc = man_links!(sendmsg(2))] #[cfg(not(target_os = "redox"))] #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] pub fn send_to_vectored(&self, bufs: &[IoSlice<'_>], addr: &SockAddr) -> io::Result { @@ -740,20 +687,12 @@ ) -> io::Result { sys::send_to_vectored(self.as_raw(), bufs, addr, flags) } - - /// Send a message on a socket using a message structure. - #[doc = man_links!(sendmsg(2))] - #[cfg(not(target_os = "redox"))] - #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] - pub fn sendmsg(&self, msg: &MsgHdr<'_, '_, '_>, flags: sys::c_int) -> io::Result { - sys::sendmsg(self.as_raw(), msg, flags) - } } /// Set `SOCK_CLOEXEC` and `NO_HANDLE_INHERIT` on the `ty`pe on platforms that /// support it. #[inline(always)] -const fn set_common_type(ty: Type) -> Type { +fn set_common_type(ty: Type) -> Type { // On platforms that support it set `SOCK_CLOEXEC`. #[cfg(any( target_os = "android", @@ -790,18 +729,12 @@ target_os = "linux", target_os = "netbsd", target_os = "openbsd", - target_os = "espidf", )) ))] socket._set_cloexec(true)?; // On Apple platforms set `NOSIGPIPE`. - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] + #[cfg(target_vendor = "apple")] socket._set_nosigpipe(true)?; Ok(socket) @@ -1077,7 +1010,7 @@ } } -const fn from_linger(linger: sys::linger) -> Option { +fn from_linger(linger: sys::linger) -> Option { if linger.l_onoff == 0 { None } else { @@ -1085,7 +1018,7 @@ } } -const fn into_linger(duration: Option) -> sys::linger { +fn into_linger(duration: Option) -> sys::linger { match duration { Some(duration) => sys::linger { l_onoff: 1, @@ -1109,11 +1042,8 @@ /// For more information about this option, see [`set_header_included`]. /// /// [`set_header_included`]: Socket::set_header_included - #[cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))) - )] + #[cfg(all(feature = "all", not(target_os = "redox")))] + #[cfg_attr(docsrs, doc(all(feature = "all", not(target_os = "redox"))))] pub fn header_included(&self) -> io::Result { unsafe { getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_HDRINCL) @@ -1132,15 +1062,8 @@ /// [raw(7)]: https://man7.org/linux/man-pages/man7/raw.7.html /// [`IP_TTL`]: Socket::set_ttl /// [`IP_TOS`]: Socket::set_tos - #[cfg_attr( - any(target_os = "fuchsia", target_os = "illumos", target_os = "solaris"), - allow(rustdoc::broken_intra_doc_links) - )] - #[cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))) - )] + #[cfg(all(feature = "all", not(target_os = "redox")))] + #[cfg_attr(docsrs, doc(all(feature = "all", not(target_os = "redox"))))] pub fn set_header_included(&self, included: bool) -> io::Result<()> { unsafe { setsockopt( @@ -1157,7 +1080,7 @@ /// For more information about this option, see [`set_ip_transparent`]. /// /// [`set_ip_transparent`]: Socket::set_ip_transparent - #[cfg(all(feature = "all", target_os = "linux"))] + #[cfg(any(doc, all(feature = "all", target_os = "linux")))] #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] pub fn ip_transparent(&self) -> io::Result { unsafe { @@ -1181,7 +1104,7 @@ /// /// TProxy redirection with the iptables TPROXY target also /// requires that this option be set on the redirected socket. - #[cfg(all(feature = "all", target_os = "linux"))] + #[cfg(any(doc, all(feature = "all", target_os = "linux")))] #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] pub fn set_ip_transparent(&self, transparent: bool) -> io::Result<()> { unsafe { @@ -1236,7 +1159,6 @@ /// the local interface with which the system should join the multicast /// group. See [`InterfaceIndexOrAddress`]. #[cfg(not(any( - target_os = "aix", target_os = "haiku", target_os = "illumos", target_os = "netbsd", @@ -1244,7 +1166,6 @@ target_os = "redox", target_os = "solaris", target_os = "nto", - target_os = "espidf", )))] pub fn join_multicast_v4_n( &self, @@ -1268,7 +1189,6 @@ /// /// [`join_multicast_v4_n`]: Socket::join_multicast_v4_n #[cfg(not(any( - target_os = "aix", target_os = "haiku", target_os = "illumos", target_os = "netbsd", @@ -1276,7 +1196,6 @@ target_os = "redox", target_os = "solaris", target_os = "nto", - target_os = "espidf", )))] pub fn leave_multicast_v4_n( &self, @@ -1309,7 +1228,6 @@ target_os = "redox", target_os = "fuchsia", target_os = "nto", - target_os = "espidf", )))] pub fn join_ssm_v4( &self, @@ -1345,7 +1263,6 @@ target_os = "redox", target_os = "fuchsia", target_os = "nto", - target_os = "espidf", )))] pub fn leave_ssm_v4( &self, @@ -1509,11 +1426,10 @@ /// Set the value of the `IP_RECVTOS` option for this socket. /// - /// If enabled, the `IP_TOS` ancillary message is passed with + /// If enabled, the IP_TOS ancillary message is passed with /// incoming packets. It contains a byte which specifies the /// Type of Service/Precedence field of the packet header. #[cfg(not(any( - target_os = "aix", target_os = "dragonfly", target_os = "fuchsia", target_os = "illumos", @@ -1521,11 +1437,12 @@ target_os = "openbsd", target_os = "redox", target_os = "solaris", - target_os = "haiku", + target_os = "windows", target_os = "nto", - target_os = "espidf", )))] pub fn set_recv_tos(&self, recv_tos: bool) -> io::Result<()> { + let recv_tos = if recv_tos { 1 } else { 0 }; + unsafe { setsockopt( self.as_raw(), @@ -1542,7 +1459,6 @@ /// /// [`set_recv_tos`]: Socket::set_recv_tos #[cfg(not(any( - target_os = "aix", target_os = "dragonfly", target_os = "fuchsia", target_os = "illumos", @@ -1550,9 +1466,8 @@ target_os = "openbsd", target_os = "redox", target_os = "solaris", - target_os = "haiku", + target_os = "windows", target_os = "nto", - target_os = "espidf", )))] pub fn recv_tos(&self) -> io::Result { unsafe { @@ -1753,56 +1668,6 @@ ) } } - - /// Get the value of the `IPV6_RECVTCLASS` option for this socket. - /// - /// For more information about this option, see [`set_recv_tclass_v6`]. - /// - /// [`set_recv_tclass_v6`]: Socket::set_recv_tclass_v6 - #[cfg(not(any( - target_os = "dragonfly", - target_os = "fuchsia", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "haiku", - target_os = "espidf", - )))] - pub fn recv_tclass_v6(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_RECVTCLASS) - .map(|recv_tclass| recv_tclass > 0) - } - } - - /// Set the value of the `IPV6_RECVTCLASS` option for this socket. - /// - /// If enabled, the `IPV6_TCLASS` ancillary message is passed with incoming - /// packets. It contains a byte which specifies the traffic class field of - /// the packet header. - #[cfg(not(any( - target_os = "dragonfly", - target_os = "fuchsia", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "haiku", - target_os = "espidf", - )))] - pub fn set_recv_tclass_v6(&self, recv_tclass: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IPV6, - sys::IPV6_RECVTCLASS, - recv_tclass as c_int, - ) - } - } } /// Socket options for TCP sockets, get/set using `IPPROTO_TCP`. @@ -1815,9 +1680,12 @@ /// /// This returns the value of `TCP_KEEPALIVE` on macOS and iOS and `TCP_KEEPIDLE` on all other /// supported Unix operating systems. - #[cfg(all( - feature = "all", - not(any(windows, target_os = "haiku", target_os = "openbsd")) + #[cfg(any( + doc, + all( + feature = "all", + not(any(windows, target_os = "haiku", target_os = "openbsd")) + ) ))] #[cfg_attr( docsrs, @@ -1838,17 +1706,15 @@ #[cfg(all( feature = "all", any( + doc, target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", - target_os = "ios", target_os = "linux", - target_os = "macos", target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ) ))] #[cfg_attr( @@ -1861,12 +1727,9 @@ target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", - target_os = "ios", target_os = "linux", - target_os = "macos", target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ) ))) )] @@ -1885,17 +1748,15 @@ #[cfg(all( feature = "all", any( + doc, target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", - target_os = "ios", target_os = "linux", - target_os = "macos", target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ) ))] #[cfg_attr( @@ -1908,12 +1769,9 @@ target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", - target_os = "ios", target_os = "linux", - target_os = "macos", target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ) ))) )] diff -Nru s390-tools-2.31.0/rust-vendor/socket2/src/sockref.rs s390-tools-2.33.1/rust-vendor/socket2/src/sockref.rs --- s390-tools-2.31.0/rust-vendor/socket2/src/sockref.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2/src/sockref.rs 2024-05-28 11:57:36.000000000 +0200 @@ -3,9 +3,9 @@ use std::mem::ManuallyDrop; use std::ops::Deref; #[cfg(unix)] -use std::os::unix::io::{AsFd, AsRawFd, FromRawFd}; +use std::os::unix::io::{AsRawFd, FromRawFd}; #[cfg(windows)] -use std::os::windows::io::{AsRawSocket, AsSocket, FromRawSocket}; +use std::os::windows::io::{AsRawSocket, FromRawSocket}; use crate::Socket; @@ -15,13 +15,14 @@ /// This allows for example a [`TcpStream`], found in the standard library, to /// be configured using all the additional methods found in the [`Socket`] API. /// -/// `SockRef` can be created from any socket type that implements [`AsFd`] -/// (Unix) or [`AsSocket`] (Windows) using the [`From`] implementation. +/// `SockRef` can be created from any socket type that implements [`AsRawFd`] +/// (Unix) or [`AsRawSocket`] (Windows) using the [`From`] implementation, but +/// the caller must ensure the file descriptor/socket is a valid. /// /// [`TcpStream`]: std::net::TcpStream // Don't use intra-doc links because they won't build on every platform. -/// [`AsFd`]: https://doc.rust-lang.org/stable/std/os/unix/io/trait.AsFd.html -/// [`AsSocket`]: https://doc.rust-lang.org/stable/std/os/windows/io/trait.AsSocket.html +/// [`AsRawFd`]: https://doc.rust-lang.org/stable/std/os/unix/io/trait.AsRawFd.html +/// [`AsRawSocket`]: https://doc.rust-lang.org/stable/std/os/windows/io/trait.AsRawSocket.html /// /// # Examples /// @@ -58,6 +59,29 @@ /// # Ok(()) /// # } /// ``` +/// +/// Below is an example of **incorrect usage** of `SockRef::from`, which is +/// currently possible (but not intended and will be fixed in future versions). +/// +/// ```compile_fail +/// use socket2::SockRef; +/// +/// # fn main() -> Result<(), Box> { +/// /// THIS USAGE IS NOT VALID! +/// let socket_ref = SockRef::from(&123); +/// // The above line is overseen possibility when using `SockRef::from`, it +/// // uses the `RawFd` (on Unix), which is a type alias for `c_int`/`i32`, +/// // which implements `AsRawFd`. However it may be clear that this usage is +/// // invalid as it doesn't guarantee that `123` is a valid file descriptor. +/// +/// // Using `Socket::set_nodelay` now will call it on a file descriptor we +/// // don't own! We don't even not if the file descriptor is valid or a socket. +/// socket_ref.set_nodelay(true)?; +/// drop(socket_ref); +/// # Ok(()) +/// # } +/// # DO_NOT_COMPILE +/// ``` pub struct SockRef<'s> { /// Because this is a reference we don't own the `Socket`, however `Socket` /// closes itself when dropped, so we use `ManuallyDrop` to prevent it from @@ -76,16 +100,16 @@ } } -/// On Windows, a corresponding `From<&impl AsSocket>` implementation exists. +/// On Windows, a corresponding `From<&impl AsRawSocket>` implementation exists. #[cfg(unix)] #[cfg_attr(docsrs, doc(cfg(unix)))] impl<'s, S> From<&'s S> for SockRef<'s> where - S: AsFd, + S: AsRawFd, { /// The caller must ensure `S` is actually a socket. fn from(socket: &'s S) -> Self { - let fd = socket.as_fd().as_raw_fd(); + let fd = socket.as_raw_fd(); assert!(fd >= 0); SockRef { socket: ManuallyDrop::new(unsafe { Socket::from_raw_fd(fd) }), @@ -94,17 +118,17 @@ } } -/// On Unix, a corresponding `From<&impl AsFd>` implementation exists. +/// On Unix, a corresponding `From<&impl AsRawFd>` implementation exists. #[cfg(windows)] #[cfg_attr(docsrs, doc(cfg(windows)))] impl<'s, S> From<&'s S> for SockRef<'s> where - S: AsSocket, + S: AsRawSocket, { - /// See the `From<&impl AsFd>` implementation. + /// See the `From<&impl AsRawFd>` implementation. fn from(socket: &'s S) -> Self { - let socket = socket.as_socket().as_raw_socket(); - assert!(socket != windows_sys::Win32::Networking::WinSock::INVALID_SOCKET as _); + let socket = socket.as_raw_socket(); + assert!(socket != winapi::um::winsock2::INVALID_SOCKET as _); SockRef { socket: ManuallyDrop::new(unsafe { Socket::from_raw_socket(socket) }), _lifetime: PhantomData, diff -Nru s390-tools-2.31.0/rust-vendor/socket2/src/sys/unix.rs s390-tools-2.33.1/rust-vendor/socket2/src/sys/unix.rs --- s390-tools-2.31.0/rust-vendor/socket2/src/sys/unix.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2/src/sys/unix.rs 2024-05-28 11:57:36.000000000 +0200 @@ -7,133 +7,79 @@ // except according to those terms. use std::cmp::min; -use std::ffi::OsStr; #[cfg(not(target_os = "redox"))] use std::io::IoSlice; use std::marker::PhantomData; use std::mem::{self, size_of, MaybeUninit}; use std::net::Shutdown; use std::net::{Ipv4Addr, Ipv6Addr}; -#[cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) -))] +#[cfg(all(feature = "all", target_vendor = "apple"))] use std::num::NonZeroU32; #[cfg(all( feature = "all", any( - target_os = "aix", target_os = "android", target_os = "freebsd", - target_os = "ios", target_os = "linux", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ) ))] use std::num::NonZeroUsize; +#[cfg(feature = "all")] use std::os::unix::ffi::OsStrExt; #[cfg(all( feature = "all", any( - target_os = "aix", target_os = "android", target_os = "freebsd", - target_os = "ios", target_os = "linux", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ) ))] use std::os::unix::io::RawFd; -use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd}; +use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd}; #[cfg(feature = "all")] use std::os::unix::net::{UnixDatagram, UnixListener, UnixStream}; +#[cfg(feature = "all")] use std::path::Path; +#[cfg(not(all(target_os = "redox", not(feature = "all"))))] use std::ptr; use std::time::{Duration, Instant}; use std::{io, slice}; -#[cfg(not(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", -)))] +#[cfg(not(target_vendor = "apple"))] use libc::ssize_t; -use libc::{in6_addr, in_addr}; +use libc::{c_void, in6_addr, in_addr}; -use crate::{Domain, Protocol, SockAddr, TcpKeepalive, Type}; #[cfg(not(target_os = "redox"))] -use crate::{MsgHdr, MsgHdrMut, RecvFlags}; +use crate::RecvFlags; +use crate::{Domain, Protocol, SockAddr, TcpKeepalive, Type}; pub(crate) use libc::c_int; // Used in `Domain`. -pub(crate) use libc::{AF_INET, AF_INET6, AF_UNIX}; +pub(crate) use libc::{AF_INET, AF_INET6}; // Used in `Type`. -#[cfg(all(feature = "all", target_os = "linux"))] -pub(crate) use libc::SOCK_DCCP; -#[cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))] +#[cfg(all(feature = "all", not(target_os = "redox")))] pub(crate) use libc::SOCK_RAW; -#[cfg(all(feature = "all", not(target_os = "espidf")))] +#[cfg(feature = "all")] pub(crate) use libc::SOCK_SEQPACKET; pub(crate) use libc::{SOCK_DGRAM, SOCK_STREAM}; // Used in `Protocol`. -#[cfg(all(feature = "all", target_os = "linux"))] -pub(crate) use libc::IPPROTO_DCCP; -#[cfg(target_os = "linux")] -pub(crate) use libc::IPPROTO_MPTCP; -#[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))] -pub(crate) use libc::IPPROTO_SCTP; -#[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - ) -))] -pub(crate) use libc::IPPROTO_UDPLITE; pub(crate) use libc::{IPPROTO_ICMP, IPPROTO_ICMPV6, IPPROTO_TCP, IPPROTO_UDP}; // Used in `SockAddr`. -#[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "openbsd")))] -pub(crate) use libc::IPPROTO_DIVERT; pub(crate) use libc::{ sa_family_t, sockaddr, sockaddr_in, sockaddr_in6, sockaddr_storage, socklen_t, }; // Used in `RecvFlags`. -#[cfg(not(any(target_os = "redox", target_os = "espidf")))] -pub(crate) use libc::MSG_TRUNC; #[cfg(not(target_os = "redox"))] -pub(crate) use libc::SO_OOBINLINE; +pub(crate) use libc::{MSG_TRUNC, SO_OOBINLINE}; // Used in `Socket`. #[cfg(not(target_os = "nto"))] pub(crate) use libc::ipv6_mreq as Ipv6Mreq; -#[cfg(not(any( - target_os = "dragonfly", - target_os = "fuchsia", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "haiku", - target_os = "espidf", -)))] -pub(crate) use libc::IPV6_RECVTCLASS; -#[cfg(all(feature = "all", not(any(target_os = "redox", target_os = "espidf"))))] +#[cfg(all(feature = "all", not(target_os = "redox")))] pub(crate) use libc::IP_HDRINCL; #[cfg(not(any( - target_os = "aix", target_os = "dragonfly", target_os = "fuchsia", target_os = "illumos", @@ -143,7 +89,6 @@ target_os = "solaris", target_os = "haiku", target_os = "nto", - target_os = "espidf", )))] pub(crate) use libc::IP_RECVTOS; #[cfg(not(any( @@ -153,19 +98,9 @@ target_os = "illumos", )))] pub(crate) use libc::IP_TOS; -#[cfg(not(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", -)))] +#[cfg(not(target_vendor = "apple"))] pub(crate) use libc::SO_LINGER; -#[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", -))] +#[cfg(target_vendor = "apple")] pub(crate) use libc::SO_LINGER_SEC as SO_LINGER; pub(crate) use libc::{ ip_mreq as IpMreq, linger, IPPROTO_IP, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, IPV6_MULTICAST_IF, @@ -182,7 +117,6 @@ target_os = "redox", target_os = "fuchsia", target_os = "nto", - target_os = "espidf", )))] pub(crate) use libc::{ ip_mreq_source as IpMreqSource, IP_ADD_SOURCE_MEMBERSHIP, IP_DROP_SOURCE_MEMBERSHIP, @@ -192,14 +126,11 @@ target_os = "freebsd", target_os = "haiku", target_os = "illumos", - target_os = "ios", - target_os = "macos", target_os = "netbsd", - target_os = "nto", target_os = "openbsd", target_os = "solaris", - target_os = "tvos", - target_os = "watchos", + target_os = "nto", + target_vendor = "apple" )))] pub(crate) use libc::{IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP}; #[cfg(any( @@ -207,13 +138,10 @@ target_os = "freebsd", target_os = "haiku", target_os = "illumos", - target_os = "ios", - target_os = "macos", target_os = "netbsd", target_os = "openbsd", target_os = "solaris", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ))] pub(crate) use libc::{ IPV6_JOIN_GROUP as IPV6_ADD_MEMBERSHIP, IPV6_LEAVE_GROUP as IPV6_DROP_MEMBERSHIP, @@ -226,12 +154,9 @@ target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", - target_os = "ios", target_os = "linux", - target_os = "macos", target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ) ))] pub(crate) use libc::{TCP_KEEPCNT, TCP_KEEPINTVL}; @@ -239,22 +164,13 @@ // See this type in the Windows file. pub(crate) type Bool = c_int; -#[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "nto", - target_os = "tvos", - target_os = "watchos", -))] +#[cfg(any(target_vendor = "apple", target_os = "nto"))] use libc::TCP_KEEPALIVE as KEEPALIVE_TIME; #[cfg(not(any( + target_vendor = "apple", target_os = "haiku", - target_os = "ios", - target_os = "macos", - target_os = "nto", target_os = "openbsd", - target_os = "tvos", - target_os = "watchos", + target_os = "nto", )))] use libc::TCP_KEEPIDLE as KEEPALIVE_TIME; @@ -272,13 +188,8 @@ } /// Maximum size of a buffer passed to system call like `recv` and `send`. -#[cfg(not(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", -)))] -const MAX_BUF_LEN: usize = ssize_t::MAX as usize; +#[cfg(not(target_vendor = "apple"))] +const MAX_BUF_LEN: usize = ::max_value() as usize; // The maximum read limit on most posix-like systems is `SSIZE_MAX`, with the // man page quoting that if the count of bytes to read is greater than @@ -288,17 +199,8 @@ // intentionally showing odd behavior by rejecting any read with a size larger // than or equal to INT_MAX. To handle both of these the read size is capped on // both platforms. -#[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", -))] -const MAX_BUF_LEN: usize = c_int::MAX as usize - 1; - -// TCP_CA_NAME_MAX isn't defined in user space include files(not in libc) -#[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))] -const TCP_CA_NAME_MAX: usize = 16; +#[cfg(target_vendor = "apple")] +const MAX_BUF_LEN: usize = ::max_value() as usize - 1; #[cfg(any( all( @@ -320,26 +222,25 @@ all(target_env = "uclibc", target_pointer_width = "32") ) ), - target_os = "aix", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "haiku", target_os = "illumos", - target_os = "ios", - target_os = "macos", target_os = "netbsd", - target_os = "nto", target_os = "openbsd", target_os = "solaris", - target_os = "tvos", - target_os = "watchos", - target_os = "espidf", + target_os = "nto", + target_vendor = "apple", ))] type IovLen = c_int; /// Unix only API. impl Domain { + /// Domain for Unix socket communication, corresponding to `AF_UNIX`. + #[cfg_attr(docsrs, doc(cfg(unix)))] + pub const UNIX: Domain = Domain(libc::AF_UNIX); + /// Domain for low-level packet interface, corresponding to `AF_PACKET`. #[cfg(all( feature = "all", @@ -427,9 +328,7 @@ target_os = "illumos", target_os = "linux", target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", + target_os = "openbsd" ) ))] #[cfg_attr( @@ -444,9 +343,7 @@ target_os = "illumos", target_os = "linux", target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", + target_os = "openbsd" ) ))) )] @@ -462,9 +359,7 @@ target_os = "illumos", target_os = "linux", target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", + target_os = "openbsd" ))] pub(crate) const fn _cloexec(self) -> Type { Type(self.0 | libc::SOCK_CLOEXEC) @@ -475,13 +370,10 @@ Type, libc::SOCK_STREAM, libc::SOCK_DGRAM, - #[cfg(all(feature = "all", target_os = "linux"))] - libc::SOCK_DCCP, - #[cfg(not(any(target_os = "redox", target_os = "espidf")))] + #[cfg(not(target_os = "redox"))] libc::SOCK_RAW, - #[cfg(not(any(target_os = "redox", target_os = "haiku", target_os = "espidf")))] + #[cfg(not(any(target_os = "redox", target_os = "haiku")))] libc::SOCK_RDM, - #[cfg(not(target_os = "espidf"))] libc::SOCK_SEQPACKET, /* TODO: add these optional bit OR-ed flags: #[cfg(any( @@ -513,24 +405,6 @@ libc::IPPROTO_ICMPV6, libc::IPPROTO_TCP, libc::IPPROTO_UDP, - #[cfg(target_os = "linux")] - libc::IPPROTO_MPTCP, - #[cfg(all(feature = "all", target_os = "linux"))] - libc::IPPROTO_DCCP, - #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))] - libc::IPPROTO_SCTP, - #[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - ) - ))] - libc::IPPROTO_UDPLITE, - #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "openbsd")))] - libc::IPPROTO_DIVERT, ); /// Unix-only API. @@ -538,14 +412,11 @@ impl RecvFlags { /// Check if the message terminates a record. /// - /// Not all socket types support the notion of records. For socket types - /// that do support it (such as [`SEQPACKET`]), a record is terminated by - /// sending a message with the end-of-record flag set. + /// Not all socket types support the notion of records. + /// For socket types that do support it (such as [`SEQPACKET`][Type::SEQPACKET]), + /// a record is terminated by sending a message with the end-of-record flag set. /// - /// On Unix this corresponds to the `MSG_EOR` flag. - /// - /// [`SEQPACKET`]: Type::SEQPACKET - #[cfg(not(target_os = "espidf"))] + /// On Unix this corresponds to the MSG_EOR flag. pub const fn is_end_of_record(self) -> bool { self.0 & libc::MSG_EOR != 0 } @@ -555,7 +426,7 @@ /// This is useful for protocols where you receive out-of-band data /// mixed in with the normal data stream. /// - /// On Unix this corresponds to the `MSG_OOB` flag. + /// On Unix this corresponds to the MSG_OOB flag. pub const fn is_out_of_band(self) -> bool { self.0 & libc::MSG_OOB != 0 } @@ -564,13 +435,11 @@ #[cfg(not(target_os = "redox"))] impl std::fmt::Debug for RecvFlags { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut s = f.debug_struct("RecvFlags"); - #[cfg(not(target_os = "espidf"))] - s.field("is_end_of_record", &self.is_end_of_record()); - s.field("is_out_of_band", &self.is_out_of_band()); - #[cfg(not(target_os = "espidf"))] - s.field("is_truncated", &self.is_truncated()); - s.finish() + f.debug_struct("RecvFlags") + .field("is_end_of_record", &self.is_end_of_record()) + .field("is_out_of_band", &self.is_out_of_band()) + .field("is_truncated", &self.is_truncated()) + .finish() } } @@ -604,92 +473,72 @@ } } -/// Returns the offset of the `sun_path` member of the passed unix socket address. -pub(crate) fn offset_of_path(storage: &libc::sockaddr_un) -> usize { - let base = storage as *const _ as usize; - let path = ptr::addr_of!(storage.sun_path) as usize; - path - base -} - -#[allow(unsafe_op_in_unsafe_fn)] -pub(crate) fn unix_sockaddr(path: &Path) -> io::Result { - // SAFETY: a `sockaddr_storage` of all zeros is valid. - let mut storage = unsafe { mem::zeroed::() }; - let len = { - let storage = unsafe { &mut *ptr::addr_of_mut!(storage).cast::() }; - - let bytes = path.as_os_str().as_bytes(); - let too_long = match bytes.first() { - None => false, - // linux abstract namespaces aren't null-terminated - Some(&0) => bytes.len() > storage.sun_path.len(), - Some(_) => bytes.len() >= storage.sun_path.len(), - }; - if too_long { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "path must be shorter than SUN_LEN", - )); - } - - storage.sun_family = libc::AF_UNIX as sa_family_t; - // SAFETY: `bytes` and `addr.sun_path` are not overlapping and - // both point to valid memory. - // `storage` was initialized to zero above, so the path is - // already NULL terminated. - unsafe { - ptr::copy_nonoverlapping( - bytes.as_ptr(), - storage.sun_path.as_mut_ptr().cast(), - bytes.len(), - ); - } - - let sun_path_offset = offset_of_path(storage); - sun_path_offset - + bytes.len() - + match bytes.first() { - Some(&0) | None => 0, - Some(_) => 1, - } - }; - Ok(unsafe { SockAddr::new(storage, len as socklen_t) }) -} - -// Used in `MsgHdr`. -#[cfg(not(target_os = "redox"))] -pub(crate) use libc::msghdr; - -#[cfg(not(target_os = "redox"))] -pub(crate) fn set_msghdr_name(msg: &mut msghdr, name: &SockAddr) { - msg.msg_name = name.as_ptr() as *mut _; - msg.msg_namelen = name.len(); -} - -#[cfg(not(target_os = "redox"))] -#[allow(clippy::unnecessary_cast)] // IovLen type can be `usize`. -pub(crate) fn set_msghdr_iov(msg: &mut msghdr, ptr: *mut libc::iovec, len: usize) { - msg.msg_iov = ptr; - msg.msg_iovlen = min(len, IovLen::MAX as usize) as IovLen; -} - -#[cfg(not(target_os = "redox"))] -pub(crate) fn set_msghdr_control(msg: &mut msghdr, ptr: *mut libc::c_void, len: usize) { - msg.msg_control = ptr; - msg.msg_controllen = len as _; -} +/// Unix only API. +impl SockAddr { + /// Constructs a `SockAddr` with the family `AF_UNIX` and the provided path. + /// + /// # Failure + /// + /// Returns an error if the path is longer than `SUN_LEN`. + #[cfg(feature = "all")] + #[cfg_attr(docsrs, doc(cfg(all(unix, feature = "all"))))] + #[allow(unused_unsafe)] // TODO: replace with `unsafe_op_in_unsafe_fn` once stable. + pub fn unix

(path: P) -> io::Result + where + P: AsRef, + { + unsafe { + SockAddr::init(|storage, len| { + // Safety: `SockAddr::init` zeros the address, which is a valid + // representation. + let storage: &mut libc::sockaddr_un = unsafe { &mut *storage.cast() }; + let len: &mut socklen_t = unsafe { &mut *len }; + + let bytes = path.as_ref().as_os_str().as_bytes(); + let too_long = match bytes.first() { + None => false, + // linux abstract namespaces aren't null-terminated + Some(&0) => bytes.len() > storage.sun_path.len(), + Some(_) => bytes.len() >= storage.sun_path.len(), + }; + if too_long { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "path must be shorter than SUN_LEN", + )); + } -#[cfg(not(target_os = "redox"))] -pub(crate) fn set_msghdr_flags(msg: &mut msghdr, flags: libc::c_int) { - msg.msg_flags = flags; -} + storage.sun_family = libc::AF_UNIX as sa_family_t; + // Safety: `bytes` and `addr.sun_path` are not overlapping and + // both point to valid memory. + // `SockAddr::init` zeroes the memory, so the path is already + // null terminated. + unsafe { + ptr::copy_nonoverlapping( + bytes.as_ptr(), + storage.sun_path.as_mut_ptr() as *mut u8, + bytes.len(), + ) + }; + + let base = storage as *const _ as usize; + let path = &storage.sun_path as *const _ as usize; + let sun_path_offset = path - base; + let length = sun_path_offset + + bytes.len() + + match bytes.first() { + Some(&0) | None => 0, + Some(_) => 1, + }; + *len = length as socklen_t; -#[cfg(not(target_os = "redox"))] -pub(crate) fn msghdr_flags(msg: &msghdr) -> RecvFlags { - RecvFlags(msg.msg_flags) + Ok(()) + }) + } + .map(|(_, addr)| addr) + } } -/// Unix only API. impl SockAddr { /// Constructs a `SockAddr` with the family `AF_VSOCK` and the provided CID/port. /// @@ -697,23 +546,30 @@ /// /// This function can never fail. In a future version of this library it will be made /// infallible. - #[allow(unsafe_op_in_unsafe_fn)] + #[allow(unused_unsafe)] // TODO: replace with `unsafe_op_in_unsafe_fn` once stable. #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] #[cfg_attr( docsrs, doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) )] - pub fn vsock(cid: u32, port: u32) -> SockAddr { - // SAFETY: a `sockaddr_storage` of all zeros is valid. - let mut storage = unsafe { mem::zeroed::() }; - { - let storage: &mut libc::sockaddr_vm = - unsafe { &mut *((&mut storage as *mut sockaddr_storage).cast()) }; - storage.svm_family = libc::AF_VSOCK as sa_family_t; - storage.svm_cid = cid; - storage.svm_port = port; + pub fn vsock(cid: u32, port: u32) -> io::Result { + unsafe { + SockAddr::init(|storage, len| { + // Safety: `SockAddr::init` zeros the address, which is a valid + // representation. + let storage: &mut libc::sockaddr_vm = unsafe { &mut *storage.cast() }; + let len: &mut socklen_t = unsafe { &mut *len }; + + storage.svm_family = libc::AF_VSOCK as sa_family_t; + storage.svm_cid = cid; + storage.svm_port = port; + + *len = mem::size_of::() as socklen_t; + + Ok(()) + }) } - unsafe { SockAddr::new(storage, mem::size_of::() as socklen_t) } + .map(|(_, addr)| addr) } /// Returns this address VSOCK CID/port if it is in the `AF_VSOCK` family, @@ -723,7 +579,7 @@ docsrs, doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) )] - pub fn as_vsock_address(&self) -> Option<(u32, u32)> { + pub fn vsock_address(&self) -> Option<(u32, u32)> { if self.family() == libc::AF_VSOCK as sa_family_t { // Safety: if the ss_family field is AF_VSOCK then storage must be a sockaddr_vm. let addr = unsafe { &*(self.as_ptr() as *const libc::sockaddr_vm) }; @@ -732,100 +588,6 @@ None } } - - /// Returns true if this address is an unnamed address from the `AF_UNIX` family (for local - /// interprocess communication), false otherwise. - pub fn is_unnamed(&self) -> bool { - self.as_sockaddr_un() - .map(|storage| { - self.len() == offset_of_path(storage) as _ - // On some non-linux platforms a zeroed path is returned for unnamed. - // Abstract addresses only exist on Linux. - // NOTE: although Fuchsia does define `AF_UNIX` it's not actually implemented. - // See https://github.com/rust-lang/socket2/pull/403#discussion_r1123557978 - || (cfg!(not(any(target_os = "linux", target_os = "android"))) - && storage.sun_path[0] == 0) - }) - .unwrap_or_default() - } - - /// Returns the underlying `sockaddr_un` object if this addres is from the `AF_UNIX` family, - /// otherwise returns `None`. - pub(crate) fn as_sockaddr_un(&self) -> Option<&libc::sockaddr_un> { - self.is_unix().then(|| { - // SAFETY: if unix socket, i.e. the `ss_family` field is `AF_UNIX` then storage must be - // a `sockaddr_un`. - unsafe { &*self.as_ptr().cast::() } - }) - } - - /// Get the length of the path bytes of the address, not including the terminating or initial - /// (for abstract names) null byte. - /// - /// Should not be called on unnamed addresses. - fn path_len(&self, storage: &libc::sockaddr_un) -> usize { - debug_assert!(!self.is_unnamed()); - self.len() as usize - offset_of_path(storage) - 1 - } - - /// Get a u8 slice for the bytes of the pathname or abstract name. - /// - /// Should not be called on unnamed addresses. - fn path_bytes(&self, storage: &libc::sockaddr_un, abstract_name: bool) -> &[u8] { - debug_assert!(!self.is_unnamed()); - // SAFETY: the pointed objects of type `i8` have the same memory layout as `u8`. The path is - // the last field in the storage and so its length is equal to - // TOTAL_LENGTH - OFFSET_OF_PATH -1 - // Where the 1 is either a terminating null if we have a pathname address, or the initial - // null byte, if it's an abstract name address. In the latter case, the path bytes start - // after the initial null byte, hence the `offset`. - // There is no safe way to convert a `&[i8]` to `&[u8]` - unsafe { - slice::from_raw_parts( - (storage.sun_path.as_ptr() as *const u8).offset(abstract_name as isize), - self.path_len(storage), - ) - } - } - - /// Returns this address as Unix `SocketAddr` if it is an `AF_UNIX` pathname - /// address, otherwise returns `None`. - pub fn as_unix(&self) -> Option { - let path = self.as_pathname()?; - // SAFETY: we can represent this as a valid pathname, then so can the - // standard library. - Some(std::os::unix::net::SocketAddr::from_pathname(path).unwrap()) - } - - /// Returns this address as a `Path` reference if it is an `AF_UNIX` - /// pathname address, otherwise returns `None`. - pub fn as_pathname(&self) -> Option<&Path> { - self.as_sockaddr_un().and_then(|storage| { - (self.len() > offset_of_path(storage) as _ && storage.sun_path[0] != 0).then(|| { - let path_slice = self.path_bytes(storage, false); - Path::new::(OsStrExt::from_bytes(path_slice)) - }) - }) - } - - /// Returns this address as a slice of bytes representing an abstract address if it is an - /// `AF_UNIX` abstract address, otherwise returns `None`. - /// - /// Abstract addresses are a Linux extension, so this method returns `None` on all non-Linux - /// platforms. - pub fn as_abstract_namespace(&self) -> Option<&[u8]> { - // NOTE: although Fuchsia does define `AF_UNIX` it's not actually implemented. - // See https://github.com/rust-lang/socket2/pull/403#discussion_r1123557978 - #[cfg(any(target_os = "linux", target_os = "android"))] - { - self.as_sockaddr_un().and_then(|storage| { - (self.len() > offset_of_path(storage) as _ && storage.sun_path[0] == 0) - .then(|| self.path_bytes(storage, true)) - }) - } - #[cfg(not(any(target_os = "linux", target_os = "android")))] - None - } } pub(crate) type Socket = c_int; @@ -846,8 +608,7 @@ syscall!(socket(family, ty, protocol)) } -#[cfg(all(feature = "all", unix))] -#[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))] +#[cfg(feature = "all")] pub(crate) fn socketpair(family: c_int, ty: c_int, protocol: c_int) -> io::Result<[Socket; 2]> { let mut fds = [0, 0]; syscall!(socketpair(family, ty, protocol, fds.as_mut_ptr())).map(|_| fds) @@ -877,7 +638,7 @@ } let timeout = (timeout - elapsed).as_millis(); - let timeout = timeout.clamp(1, c_int::MAX as u128) as c_int; + let timeout = clamp(timeout, 1, c_int::max_value() as u128) as c_int; match syscall!(poll(&mut pollfd, 1, timeout)) { Ok(0) => return Err(io::ErrorKind::TimedOut.into()), @@ -885,13 +646,14 @@ // Error or hang up indicates an error (or failure to connect). if (pollfd.revents & libc::POLLHUP) != 0 || (pollfd.revents & libc::POLLERR) != 0 { match socket.take_error() { - Ok(Some(err)) | Err(err) => return Err(err), + Ok(Some(err)) => return Err(err), Ok(None) => { return Err(io::Error::new( io::ErrorKind::Other, "no error set after POLLHUP", )) } + Err(err) => return Err(err), } } return Ok(()); @@ -903,24 +665,38 @@ } } +// TODO: use clamp from std lib, stable since 1.50. +fn clamp(value: T, min: T, max: T) -> T +where + T: Ord, +{ + if value <= min { + min + } else if value >= max { + max + } else { + value + } +} + pub(crate) fn listen(fd: Socket, backlog: c_int) -> io::Result<()> { syscall!(listen(fd, backlog)).map(|_| ()) } pub(crate) fn accept(fd: Socket) -> io::Result<(Socket, SockAddr)> { // Safety: `accept` initialises the `SockAddr` for us. - unsafe { SockAddr::try_init(|storage, len| syscall!(accept(fd, storage.cast(), len))) } + unsafe { SockAddr::init(|storage, len| syscall!(accept(fd, storage.cast(), len))) } } pub(crate) fn getsockname(fd: Socket) -> io::Result { // Safety: `accept` initialises the `SockAddr` for us. - unsafe { SockAddr::try_init(|storage, len| syscall!(getsockname(fd, storage.cast(), len))) } + unsafe { SockAddr::init(|storage, len| syscall!(getsockname(fd, storage.cast(), len))) } .map(|(_, addr)| addr) } pub(crate) fn getpeername(fd: Socket) -> io::Result { // Safety: `accept` initialises the `SockAddr` for us. - unsafe { SockAddr::try_init(|storage, len| syscall!(getpeername(fd, storage.cast(), len))) } + unsafe { SockAddr::init(|storage, len| syscall!(getpeername(fd, storage.cast(), len))) } .map(|(_, addr)| addr) } @@ -928,12 +704,6 @@ syscall!(fcntl(fd, libc::F_DUPFD_CLOEXEC, 0)) } -#[cfg(all(feature = "all", unix))] -pub(crate) fn nonblocking(fd: Socket) -> io::Result { - let file_status_flags = fcntl_get(fd, libc::F_GETFL)?; - Ok((file_status_flags & libc::O_NONBLOCK) != 0) -} - pub(crate) fn set_nonblocking(fd: Socket, nonblocking: bool) -> io::Result<()> { if nonblocking { fcntl_add(fd, libc::F_GETFL, libc::F_SETFL, libc::O_NONBLOCK) @@ -968,7 +738,7 @@ ) -> io::Result<(usize, SockAddr)> { // Safety: `recvfrom` initialises the `SockAddr` for us. unsafe { - SockAddr::try_init(|addr, addrlen| { + SockAddr::init(|addr, addrlen| { syscall!(recvfrom( fd, buf.as_mut_ptr().cast(), @@ -997,9 +767,7 @@ bufs: &mut [crate::MaybeUninitSlice<'_>], flags: c_int, ) -> io::Result<(usize, RecvFlags)> { - let mut msg = MsgHdrMut::new().with_buffers(bufs); - let n = recvmsg(fd, &mut msg, flags)?; - Ok((n, msg.flags())) + recvmsg(fd, ptr::null_mut(), bufs, flags).map(|(n, _, recv_flags)| (n, recv_flags)) } #[cfg(not(target_os = "redox"))] @@ -1008,29 +776,41 @@ bufs: &mut [crate::MaybeUninitSlice<'_>], flags: c_int, ) -> io::Result<(usize, RecvFlags, SockAddr)> { - let mut msg = MsgHdrMut::new().with_buffers(bufs); - // SAFETY: `recvmsg` initialises the address storage and we set the length + // Safety: `recvmsg` initialises the address storage and we set the length // manually. - let (n, addr) = unsafe { - SockAddr::try_init(|storage, len| { - msg.inner.msg_name = storage.cast(); - msg.inner.msg_namelen = *len; - let n = recvmsg(fd, &mut msg, flags)?; - // Set the correct address length. - *len = msg.inner.msg_namelen; - Ok(n) - })? - }; - Ok((n, msg.flags(), addr)) + unsafe { + SockAddr::init(|storage, len| { + recvmsg(fd, storage, bufs, flags).map(|(n, addrlen, recv_flags)| { + // Set the correct address length. + *len = addrlen; + (n, recv_flags) + }) + }) + } + .map(|((n, recv_flags), addr)| (n, recv_flags, addr)) } +/// Returns the (bytes received, sending address len, `RecvFlags`). #[cfg(not(target_os = "redox"))] -pub(crate) fn recvmsg( +fn recvmsg( fd: Socket, - msg: &mut MsgHdrMut<'_, '_, '_>, + msg_name: *mut sockaddr_storage, + bufs: &mut [crate::MaybeUninitSlice<'_>], flags: c_int, -) -> io::Result { - syscall!(recvmsg(fd, &mut msg.inner, flags)).map(|n| n as usize) +) -> io::Result<(usize, libc::socklen_t, RecvFlags)> { + let msg_namelen = if msg_name.is_null() { + 0 + } else { + size_of::() as libc::socklen_t + }; + // libc::msghdr contains unexported padding fields on Fuchsia. + let mut msg: libc::msghdr = unsafe { mem::zeroed() }; + msg.msg_name = msg_name.cast(); + msg.msg_namelen = msg_namelen; + msg.msg_iov = bufs.as_mut_ptr().cast(); + msg.msg_iovlen = min(bufs.len(), IovLen::MAX as usize) as IovLen; + syscall!(recvmsg(fd, &mut msg, flags)) + .map(|n| (n as usize, msg.msg_namelen, RecvFlags(msg.msg_flags))) } pub(crate) fn send(fd: Socket, buf: &[u8], flags: c_int) -> io::Result { @@ -1045,8 +825,7 @@ #[cfg(not(target_os = "redox"))] pub(crate) fn send_vectored(fd: Socket, bufs: &[IoSlice<'_>], flags: c_int) -> io::Result { - let msg = MsgHdr::new().with_buffers(bufs); - sendmsg(fd, &msg, flags) + sendmsg(fd, ptr::null(), 0, bufs, flags) } pub(crate) fn send_to(fd: Socket, buf: &[u8], addr: &SockAddr, flags: c_int) -> io::Result { @@ -1068,13 +847,29 @@ addr: &SockAddr, flags: c_int, ) -> io::Result { - let msg = MsgHdr::new().with_addr(addr).with_buffers(bufs); - sendmsg(fd, &msg, flags) + sendmsg(fd, addr.as_storage_ptr(), addr.len(), bufs, flags) } +/// Returns the (bytes received, sending address len, `RecvFlags`). #[cfg(not(target_os = "redox"))] -pub(crate) fn sendmsg(fd: Socket, msg: &MsgHdr<'_, '_, '_>, flags: c_int) -> io::Result { - syscall!(sendmsg(fd, &msg.inner, flags)).map(|n| n as usize) +fn sendmsg( + fd: Socket, + msg_name: *const sockaddr_storage, + msg_namelen: socklen_t, + bufs: &[IoSlice<'_>], + flags: c_int, +) -> io::Result { + // libc::msghdr contains unexported padding fields on Fuchsia. + let mut msg: libc::msghdr = unsafe { mem::zeroed() }; + // Safety: we're creating a `*mut` pointer from a reference, which is UB + // once actually used. However the OS should not write to it in the + // `sendmsg` system call. + msg.msg_name = (msg_name as *mut sockaddr_storage).cast(); + msg.msg_namelen = msg_namelen; + // Safety: Same as above about `*const` -> `*mut`. + msg.msg_iov = bufs.as_ptr() as *mut _; + msg.msg_iovlen = min(bufs.len(), IovLen::MAX as usize) as IovLen; + syscall!(sendmsg(fd, &msg, flags)).map(|n| n as usize) } /// Wrapper around `getsockopt` to deal with platform specific timeouts. @@ -1082,7 +877,7 @@ unsafe { getsockopt(fd, opt, val).map(from_timeval) } } -const fn from_timeval(duration: libc::timeval) -> Option { +fn from_timeval(duration: libc::timeval) -> Option { if duration.tv_sec == 0 && duration.tv_usec == 0 { None } else { @@ -1108,7 +903,7 @@ // https://github.com/rust-lang/libc/issues/1848 #[cfg_attr(target_env = "musl", allow(deprecated))] Some(duration) => libc::timeval { - tv_sec: min(duration.as_secs(), libc::time_t::MAX as u64) as libc::time_t, + tv_sec: min(duration.as_secs(), libc::time_t::max_value() as u64) as libc::time_t, tv_usec: duration.subsec_micros() as libc::suseconds_t, }, None => libc::timeval { @@ -1118,11 +913,8 @@ } } -#[cfg(all(feature = "all", not(any(target_os = "haiku", target_os = "openbsd"))))] -#[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", not(any(target_os = "haiku", target_os = "openbsd"))))) -)] +#[cfg(feature = "all")] +#[cfg(not(any(target_os = "haiku", target_os = "openbsd")))] pub(crate) fn keepalive_time(fd: Socket) -> io::Result { unsafe { getsockopt::(fd, IPPROTO_TCP, KEEPALIVE_TIME) @@ -1139,18 +931,14 @@ } #[cfg(any( - target_os = "aix", target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", - target_os = "ios", target_os = "linux", - target_os = "macos", target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ))] { if let Some(interval) = keepalive.interval { @@ -1174,17 +962,12 @@ #[cfg(not(any(target_os = "haiku", target_os = "openbsd", target_os = "nto")))] fn into_secs(duration: Duration) -> c_int { - min(duration.as_secs(), c_int::MAX as u64) as c_int -} - -/// Get the flags using `cmd`. -fn fcntl_get(fd: Socket, cmd: c_int) -> io::Result { - syscall!(fcntl(fd, cmd)) + min(duration.as_secs(), c_int::max_value() as u64) as c_int } /// Add `flag` to the current set flags of `F_GETFD`. fn fcntl_add(fd: Socket, get_cmd: c_int, set_cmd: c_int, flag: c_int) -> io::Result<()> { - let previous = fcntl_get(fd, get_cmd)?; + let previous = syscall!(fcntl(fd, get_cmd))?; let new = previous | flag; if new != previous { syscall!(fcntl(fd, set_cmd, new)).map(|_| ()) @@ -1196,7 +979,7 @@ /// Remove `flag` to the current set flags of `F_GETFD`. fn fcntl_remove(fd: Socket, get_cmd: c_int, set_cmd: c_int, flag: c_int) -> io::Result<()> { - let previous = fcntl_get(fd, get_cmd)?; + let previous = syscall!(fcntl(fd, get_cmd))?; let new = previous & !flag; if new != previous { syscall!(fcntl(fd, set_cmd, new)).map(|_| ()) @@ -1231,7 +1014,7 @@ val: c_int, payload: T, ) -> io::Result<()> { - let payload = ptr::addr_of!(payload).cast(); + let payload = &payload as *const T as *const c_void; syscall!(setsockopt( fd, opt, @@ -1242,7 +1025,7 @@ .map(|_| ()) } -pub(crate) const fn to_in_addr(addr: &Ipv4Addr) -> in_addr { +pub(crate) fn to_in_addr(addr: &Ipv4Addr) -> in_addr { // `s_addr` is stored as BE on all machines, and the array is in BE order. // So the native endian conversion method is used so that it's never // swapped. @@ -1255,7 +1038,7 @@ Ipv4Addr::from(in_addr.s_addr.to_ne_bytes()) } -pub(crate) const fn to_in6_addr(addr: &Ipv6Addr) -> in6_addr { +pub(crate) fn to_in6_addr(addr: &Ipv6Addr) -> in6_addr { in6_addr { s6_addr: addr.octets(), } @@ -1266,7 +1049,6 @@ } #[cfg(not(any( - target_os = "aix", target_os = "haiku", target_os = "illumos", target_os = "netbsd", @@ -1274,9 +1056,8 @@ target_os = "redox", target_os = "solaris", target_os = "nto", - target_os = "espidf", )))] -pub(crate) const fn to_mreqn( +pub(crate) fn to_mreqn( multiaddr: &Ipv4Addr, interface: &crate::socket::InterfaceIndexOrAddress, ) -> libc::ip_mreqn { @@ -1303,7 +1084,6 @@ /// This function will block the calling thread until a new connection is /// established. When established, the corresponding `Socket` and the remote /// peer's address will be returned. - #[doc = man_links!(unix: accept4(2))] #[cfg(all( feature = "all", any( @@ -1314,7 +1094,7 @@ target_os = "illumos", target_os = "linux", target_os = "netbsd", - target_os = "openbsd", + target_os = "openbsd" ) ))] #[cfg_attr( @@ -1329,7 +1109,7 @@ target_os = "illumos", target_os = "linux", target_os = "netbsd", - target_os = "openbsd", + target_os = "openbsd" ) ))) )] @@ -1345,12 +1125,12 @@ target_os = "illumos", target_os = "linux", target_os = "netbsd", - target_os = "openbsd", + target_os = "openbsd" ))] pub(crate) fn _accept4(&self, flags: c_int) -> io::Result<(crate::Socket, SockAddr)> { // Safety: `accept4` initialises the `SockAddr` for us. unsafe { - SockAddr::try_init(|storage, len| { + SockAddr::init(|storage, len| { syscall!(accept4(self.as_raw(), storage.cast(), len, flags)) .map(crate::Socket::from_raw) }) @@ -1362,15 +1142,6 @@ /// # Notes /// /// On supported platforms you can use [`Type::cloexec`]. - #[cfg_attr( - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos" - ), - allow(rustdoc::broken_intra_doc_links) - )] #[cfg(feature = "all")] #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))] pub fn set_cloexec(&self, close_on_exec: bool) -> io::Result<()> { @@ -1396,37 +1167,13 @@ } /// Sets `SO_NOSIGPIPE` on the socket. - #[cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))) - )] + #[cfg(all(feature = "all", any(doc, target_vendor = "apple")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_vendor = "apple"))))] pub fn set_nosigpipe(&self, nosigpipe: bool) -> io::Result<()> { self._set_nosigpipe(nosigpipe) } - #[cfg(any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ))] + #[cfg(target_vendor = "apple")] pub(crate) fn _set_nosigpipe(&self, nosigpipe: bool) -> io::Result<()> { unsafe { setsockopt( @@ -1474,7 +1221,6 @@ #[cfg(all( feature = "all", any( - target_os = "aix", target_os = "android", target_os = "freebsd", target_os = "fuchsia", @@ -1486,7 +1232,6 @@ doc(cfg(all( feature = "all", any( - target_os = "aix", target_os = "android", target_os = "freebsd", target_os = "fuchsia", @@ -1617,7 +1362,7 @@ /// /// For more information about this option, see [`set_cork`]. /// - /// [`set_cork`]: crate::Socket::set_cork + /// [`set_cork`]: Socket::set_cork #[cfg(all( feature = "all", any(target_os = "android", target_os = "fuchsia", target_os = "linux") @@ -1668,7 +1413,7 @@ /// /// For more information about this option, see [`set_quickack`]. /// - /// [`set_quickack`]: crate::Socket::set_quickack + /// [`set_quickack`]: Socket::set_quickack #[cfg(all( feature = "all", any(target_os = "android", target_os = "fuchsia", target_os = "linux") @@ -1719,7 +1464,7 @@ /// /// For more information about this option, see [`set_thin_linear_timeouts`]. /// - /// [`set_thin_linear_timeouts`]: crate::Socket::set_thin_linear_timeouts + /// [`set_thin_linear_timeouts`]: Socket::set_thin_linear_timeouts #[cfg(all( feature = "all", any(target_os = "android", target_os = "fuchsia", target_os = "linux") @@ -1841,8 +1586,8 @@ /// Sets the value for the `SO_SETFIB` option on this socket. /// /// Bind socket to the specified forwarding table (VRF) on a FreeBSD. - #[cfg(all(feature = "all", target_os = "freebsd"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "freebsd"))))] + #[cfg(all(feature = "all", any(target_os = "freebsd")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", any(target_os = "freebsd")))))] pub fn set_fib(&self, fib: u32) -> io::Result<()> { syscall!(setsockopt( self.as_raw(), @@ -1854,33 +1599,6 @@ .map(|_| ()) } - /// This method is deprecated, use [`crate::Socket::bind_device_by_index_v4`]. - #[cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))) - )] - #[deprecated = "Use `Socket::bind_device_by_index_v4` instead"] - pub fn bind_device_by_index(&self, interface: Option) -> io::Result<()> { - self.bind_device_by_index_v4(interface) - } - /// Sets the value for `IP_BOUND_IF` option on this socket. /// /// If a socket is bound to an interface, only packets received from that @@ -1889,162 +1607,28 @@ /// If `interface` is `None`, the binding is removed. If the `interface` /// index is not valid, an error is returned. /// - /// One can use [`libc::if_nametoindex`] to convert an interface alias to an + /// One can use `libc::if_nametoindex` to convert an interface alias to an /// index. - #[cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))) - )] - pub fn bind_device_by_index_v4(&self, interface: Option) -> io::Result<()> { - let index = interface.map_or(0, NonZeroU32::get); + #[cfg(all(feature = "all", target_vendor = "apple"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_vendor = "apple"))))] + pub fn bind_device_by_index(&self, interface: Option) -> io::Result<()> { + let index = interface.map(NonZeroU32::get).unwrap_or(0); unsafe { setsockopt(self.as_raw(), IPPROTO_IP, libc::IP_BOUND_IF, index) } } - /// Sets the value for `IPV6_BOUND_IF` option on this socket. - /// - /// If a socket is bound to an interface, only packets received from that - /// particular interface are processed by the socket. - /// - /// If `interface` is `None`, the binding is removed. If the `interface` - /// index is not valid, an error is returned. - /// - /// One can use [`libc::if_nametoindex`] to convert an interface alias to an - /// index. - #[cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))) - )] - pub fn bind_device_by_index_v6(&self, interface: Option) -> io::Result<()> { - let index = interface.map_or(0, NonZeroU32::get); - unsafe { setsockopt(self.as_raw(), IPPROTO_IPV6, libc::IPV6_BOUND_IF, index) } - } - /// Gets the value for `IP_BOUND_IF` option on this socket, i.e. the index /// for the interface to which the socket is bound. /// /// Returns `None` if the socket is not bound to any interface, otherwise /// returns an interface index. - #[cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))) - )] - pub fn device_index_v4(&self) -> io::Result> { + #[cfg(all(feature = "all", target_vendor = "apple"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_vendor = "apple"))))] + pub fn device_index(&self) -> io::Result> { let index = unsafe { getsockopt::(self.as_raw(), IPPROTO_IP, libc::IP_BOUND_IF)? }; Ok(NonZeroU32::new(index)) } - /// This method is deprecated, use [`crate::Socket::device_index_v4`]. - #[cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))) - )] - #[deprecated = "Use `Socket::device_index_v4` instead"] - pub fn device_index(&self) -> io::Result> { - self.device_index_v4() - } - - /// Gets the value for `IPV6_BOUND_IF` option on this socket, i.e. the index - /// for the interface to which the socket is bound. - /// - /// Returns `None` if the socket is not bound to any interface, otherwise - /// returns an interface index. - #[cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))) - )] - pub fn device_index_v6(&self) -> io::Result> { - let index = unsafe { - getsockopt::(self.as_raw(), IPPROTO_IPV6, libc::IPV6_BOUND_IF)? - }; - Ok(NonZeroU32::new(index)) - } - /// Get the value of the `SO_INCOMING_CPU` option on this socket. /// /// For more information about this option, see [`set_cpu_affinity`]. @@ -2127,35 +1711,6 @@ } } - /// Get the value of the `SO_REUSEPORT_LB` option on this socket. - /// - /// For more information about this option, see [`set_reuse_port_lb`]. - /// - /// [`set_reuse_port_lb`]: crate::Socket::set_reuse_port_lb - #[cfg(all(feature = "all", target_os = "freebsd"))] - pub fn reuse_port_lb(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_REUSEPORT_LB) - .map(|reuse| reuse != 0) - } - } - - /// Set value for the `SO_REUSEPORT_LB` option on this socket. - /// - /// This allows multiple programs or threads to bind to the same port and - /// incoming connections will be load balanced using a hash function. - #[cfg(all(feature = "all", target_os = "freebsd"))] - pub fn set_reuse_port_lb(&self, reuse: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_SOCKET, - libc::SO_REUSEPORT_LB, - reuse as c_int, - ) - } - } - /// Get the value of the `IP_FREEBIND` option on this socket. /// /// For more information about this option, see [`set_freebind`]. @@ -2273,62 +1828,6 @@ } } - /// Get the value for the `SO_ORIGINAL_DST` option on this socket. - /// - /// This value contains the original destination IPv4 address of the connection - /// redirected using `iptables` `REDIRECT` or `TPROXY`. - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn original_dst(&self) -> io::Result { - // Safety: `getsockopt` initialises the `SockAddr` for us. - unsafe { - SockAddr::try_init(|storage, len| { - syscall!(getsockopt( - self.as_raw(), - libc::SOL_IP, - libc::SO_ORIGINAL_DST, - storage.cast(), - len - )) - }) - } - .map(|(_, addr)| addr) - } - - /// Get the value for the `IP6T_SO_ORIGINAL_DST` option on this socket. - /// - /// This value contains the original destination IPv6 address of the connection - /// redirected using `ip6tables` `REDIRECT` or `TPROXY`. - #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) - )] - pub fn original_dst_ipv6(&self) -> io::Result { - // Safety: `getsockopt` initialises the `SockAddr` for us. - unsafe { - SockAddr::try_init(|storage, len| { - syscall!(getsockopt( - self.as_raw(), - libc::SOL_IPV6, - libc::IP6T_SO_ORIGINAL_DST, - storage.cast(), - len - )) - }) - } - .map(|(_, addr)| addr) - } - /// Copies data between a `file` and this socket using the `sendfile(2)` /// system call. Because this copying is done within the kernel, /// `sendfile()` is more efficient than the combination of `read(2)` and @@ -2338,7 +1837,6 @@ /// Different OSs support different kinds of `file`s, see the OS /// documentation for what kind of files are supported. Generally *regular* /// files are supported by all OSs. - #[doc = man_links!(unix: sendfile(2))] /// /// The `offset` is the absolute offset into the `file` to use as starting /// point. @@ -2351,14 +1849,10 @@ #[cfg(all( feature = "all", any( - target_os = "aix", target_os = "android", target_os = "freebsd", - target_os = "ios", target_os = "linux", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ) ))] #[cfg_attr( @@ -2366,14 +1860,10 @@ doc(cfg(all( feature = "all", any( - target_os = "aix", target_os = "android", target_os = "freebsd", - target_os = "ios", target_os = "linux", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", + target_vendor = "apple", ) ))) )] @@ -2389,15 +1879,7 @@ self._sendfile(file.as_raw_fd(), offset as _, length) } - #[cfg(all( - feature = "all", - any( - target_os = "ios", - target_os = "macos", - target_os = "tvos", - target_os = "watchos", - ) - ))] + #[cfg(all(feature = "all", target_vendor = "apple"))] fn _sendfile( &self, file: RawFd, @@ -2463,37 +1945,6 @@ .map(|_| sbytes as usize) } - #[cfg(all(feature = "all", target_os = "aix"))] - fn _sendfile( - &self, - file: RawFd, - offset: libc::off_t, - length: Option, - ) -> io::Result { - let nbytes = match length { - Some(n) => n.get() as i64, - None => -1, - }; - let mut params = libc::sf_parms { - header_data: ptr::null_mut(), - header_length: 0, - file_descriptor: file, - file_size: 0, - file_offset: offset as u64, - file_bytes: nbytes, - trailer_data: ptr::null_mut(), - trailer_length: 0, - bytes_sent: 0, - }; - // AIX doesn't support SF_REUSE, socket will be closed after successful transmission. - syscall!(send_file( - &mut self.as_raw() as *mut _, - &mut params as *mut _, - libc::SF_CLOSE as libc::c_uint, - )) - .map(|_| params.bytes_sent as usize) - } - /// Set the value of the `TCP_USER_TIMEOUT` option on this socket. /// /// If set, this specifies the maximum amount of time that transmitted data may remain @@ -2516,9 +1967,9 @@ ))) )] pub fn set_tcp_user_timeout(&self, timeout: Option) -> io::Result<()> { - let timeout = timeout.map_or(0, |to| { - min(to.as_millis(), libc::c_uint::MAX as u128) as libc::c_uint - }); + let timeout = timeout + .map(|to| min(to.as_millis(), libc::c_uint::MAX as u128) as libc::c_uint) + .unwrap_or(0); unsafe { setsockopt( self.as_raw(), @@ -2533,7 +1984,7 @@ /// /// For more information about this option, see [`set_tcp_user_timeout`]. /// - /// [`set_tcp_user_timeout`]: crate::Socket::set_tcp_user_timeout + /// [`set_tcp_user_timeout`]: Socket::set_tcp_user_timeout #[cfg(all( feature = "all", any(target_os = "android", target_os = "fuchsia", target_os = "linux") @@ -2584,414 +2035,10 @@ /// Detach Berkeley Packet Filter(BPF) from this socket. /// /// For more information about this option, see [`attach_filter`] - /// - /// [`attach_filter`]: crate::Socket::attach_filter #[cfg(all(feature = "all", any(target_os = "linux", target_os = "android")))] pub fn detach_filter(&self) -> io::Result<()> { unsafe { setsockopt(self.as_raw(), libc::SOL_SOCKET, libc::SO_DETACH_FILTER, 0) } } - - /// Gets the value for the `SO_COOKIE` option on this socket. - /// - /// The socket cookie is a unique, kernel-managed identifier tied to each socket. - /// Therefore, there is no corresponding `set` helper. - /// - /// For more information about this option, see [Linux patch](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5daab9db7b65df87da26fd8cfa695fb9546a1ddb) - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn cookie(&self) -> io::Result { - unsafe { getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_COOKIE) } - } - - /// Get the value of the `IPV6_TCLASS` option for this socket. - /// - /// For more information about this option, see [`set_tclass_v6`]. - /// - /// [`set_tclass_v6`]: crate::Socket::set_tclass_v6 - #[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd" - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd" - ) - ))) - )] - pub fn tclass_v6(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), IPPROTO_IPV6, libc::IPV6_TCLASS) - .map(|tclass| tclass as u32) - } - } - - /// Set the value of the `IPV6_TCLASS` option for this socket. - /// - /// Specifies the traffic class field that is used in every packets - /// sent from this socket. - #[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd" - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd" - ) - ))) - )] - pub fn set_tclass_v6(&self, tclass: u32) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - IPPROTO_IPV6, - libc::IPV6_TCLASS, - tclass as c_int, - ) - } - } - - /// Get the value of the `TCP_CONGESTION` option for this socket. - /// - /// For more information about this option, see [`set_tcp_congestion`]. - /// - /// [`set_tcp_congestion`]: crate::Socket::set_tcp_congestion - #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))) - )] - pub fn tcp_congestion(&self) -> io::Result> { - let mut payload: [u8; TCP_CA_NAME_MAX] = [0; TCP_CA_NAME_MAX]; - let mut len = payload.len() as libc::socklen_t; - syscall!(getsockopt( - self.as_raw(), - IPPROTO_TCP, - libc::TCP_CONGESTION, - payload.as_mut_ptr().cast(), - &mut len, - )) - .map(|_| payload[..len as usize].to_vec()) - } - - /// Set the value of the `TCP_CONGESTION` option for this socket. - /// - /// Specifies the TCP congestion control algorithm to use for this socket. - /// - /// The value must be a valid TCP congestion control algorithm name of the - /// platform. For example, Linux may supports "reno", "cubic". - #[cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", any(target_os = "freebsd", target_os = "linux")))) - )] - pub fn set_tcp_congestion(&self, tcp_ca_name: &[u8]) -> io::Result<()> { - syscall!(setsockopt( - self.as_raw(), - IPPROTO_TCP, - libc::TCP_CONGESTION, - tcp_ca_name.as_ptr() as *const _, - tcp_ca_name.len() as libc::socklen_t, - )) - .map(|_| ()) - } - - /// Set value for the `DCCP_SOCKOPT_SERVICE` option on this socket. - /// - /// Sets the DCCP service. The specification mandates use of service codes. - /// If this socket option is not set, the socket will fall back to 0 (which - /// means that no meaningful service code is present). On active sockets - /// this is set before [`connect`]. On passive sockets up to 32 service - /// codes can be set before calling [`bind`] - /// - /// [`connect`]: crate::Socket::connect - /// [`bind`]: crate::Socket::bind - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn set_dccp_service(&self, code: u32) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_DCCP, - libc::DCCP_SOCKOPT_SERVICE, - code, - ) - } - } - - /// Get the value of the `DCCP_SOCKOPT_SERVICE` option on this socket. - /// - /// For more information about this option see [`set_dccp_service`] - /// - /// [`set_dccp_service`]: crate::Socket::set_dccp_service - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn dccp_service(&self) -> io::Result { - unsafe { getsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_SERVICE) } - } - - /// Set value for the `DCCP_SOCKOPT_CCID` option on this socket. - /// - /// This option sets both the TX and RX CCIDs at the same time. - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn set_dccp_ccid(&self, ccid: u8) -> io::Result<()> { - unsafe { setsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_CCID, ccid) } - } - - /// Get the value of the `DCCP_SOCKOPT_TX_CCID` option on this socket. - /// - /// For more information about this option see [`set_dccp_ccid`]. - /// - /// [`set_dccp_ccid`]: crate::Socket::set_dccp_ccid - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn dccp_tx_ccid(&self) -> io::Result { - unsafe { getsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_TX_CCID) } - } - - /// Get the value of the `DCCP_SOCKOPT_RX_CCID` option on this socket. - /// - /// For more information about this option see [`set_dccp_ccid`]. - /// - /// [`set_dccp_ccid`]: crate::Socket::set_dccp_ccid - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn dccp_xx_ccid(&self) -> io::Result { - unsafe { getsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_RX_CCID) } - } - - /// Set value for the `DCCP_SOCKOPT_SERVER_TIMEWAIT` option on this socket. - /// - /// Enables a listening socket to hold timewait state when closing the - /// connection. This option must be set after `accept` returns. - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn set_dccp_server_timewait(&self, hold_timewait: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_DCCP, - libc::DCCP_SOCKOPT_SERVER_TIMEWAIT, - hold_timewait as c_int, - ) - } - } - - /// Get the value of the `DCCP_SOCKOPT_SERVER_TIMEWAIT` option on this socket. - /// - /// For more information see [`set_dccp_server_timewait`] - /// - /// [`set_dccp_server_timewait`]: crate::Socket::set_dccp_server_timewait - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn dccp_server_timewait(&self) -> io::Result { - unsafe { - getsockopt( - self.as_raw(), - libc::SOL_DCCP, - libc::DCCP_SOCKOPT_SERVER_TIMEWAIT, - ) - } - } - - /// Set value for the `DCCP_SOCKOPT_SEND_CSCOV` option on this socket. - /// - /// Both this option and `DCCP_SOCKOPT_RECV_CSCOV` are used for setting the - /// partial checksum coverage. The default is that checksums always cover - /// the entire packet and that only fully covered application data is - /// accepted by the receiver. Hence, when using this feature on the sender, - /// it must be enabled at the receiver too, with suitable choice of CsCov. - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn set_dccp_send_cscov(&self, level: u32) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_DCCP, - libc::DCCP_SOCKOPT_SEND_CSCOV, - level, - ) - } - } - - /// Get the value of the `DCCP_SOCKOPT_SEND_CSCOV` option on this socket. - /// - /// For more information on this option see [`set_dccp_send_cscov`]. - /// - /// [`set_dccp_send_cscov`]: crate::Socket::set_dccp_send_cscov - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn dccp_send_cscov(&self) -> io::Result { - unsafe { getsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_SEND_CSCOV) } - } - - /// Set the value of the `DCCP_SOCKOPT_RECV_CSCOV` option on this socket. - /// - /// This option is only useful when combined with [`set_dccp_send_cscov`]. - /// - /// [`set_dccp_send_cscov`]: crate::Socket::set_dccp_send_cscov - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn set_dccp_recv_cscov(&self, level: u32) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_DCCP, - libc::DCCP_SOCKOPT_RECV_CSCOV, - level, - ) - } - } - - /// Get the value of the `DCCP_SOCKOPT_RECV_CSCOV` option on this socket. - /// - /// For more information on this option see [`set_dccp_recv_cscov`]. - /// - /// [`set_dccp_recv_cscov`]: crate::Socket::set_dccp_recv_cscov - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn dccp_recv_cscov(&self) -> io::Result { - unsafe { getsockopt(self.as_raw(), libc::SOL_DCCP, libc::DCCP_SOCKOPT_RECV_CSCOV) } - } - - /// Set value for the `DCCP_SOCKOPT_QPOLICY_TXQLEN` option on this socket. - /// - /// This option sets the maximum length of the output queue. A zero value is - /// interpreted as unbounded queue length. - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn set_dccp_qpolicy_txqlen(&self, length: u32) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_DCCP, - libc::DCCP_SOCKOPT_QPOLICY_TXQLEN, - length, - ) - } - } - - /// Get the value of the `DCCP_SOCKOPT_QPOLICY_TXQLEN` on this socket. - /// - /// For more information on this option see [`set_dccp_qpolicy_txqlen`]. - /// - /// [`set_dccp_qpolicy_txqlen`]: crate::Socket::set_dccp_qpolicy_txqlen - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn dccp_qpolicy_txqlen(&self) -> io::Result { - unsafe { - getsockopt( - self.as_raw(), - libc::SOL_DCCP, - libc::DCCP_SOCKOPT_QPOLICY_TXQLEN, - ) - } - } - - /// Get the value of the `DCCP_SOCKOPT_AVAILABLE_CCIDS` option on this socket. - /// - /// Returns the list of CCIDs supported by the endpoint. - /// - /// The parameter `N` is used to get the maximum number of supported - /// endpoints. The [documentation] recommends a minimum of four at the time - /// of writing. - /// - /// [documentation]: https://www.kernel.org/doc/html/latest/networking/dccp.html - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn dccp_available_ccids(&self) -> io::Result> { - let mut endpoints = [0; N]; - let mut length = endpoints.len() as libc::socklen_t; - syscall!(getsockopt( - self.as_raw(), - libc::SOL_DCCP, - libc::DCCP_SOCKOPT_AVAILABLE_CCIDS, - endpoints.as_mut_ptr().cast(), - &mut length, - ))?; - Ok(CcidEndpoints { endpoints, length }) - } - - /// Get the value of the `DCCP_SOCKOPT_GET_CUR_MPS` option on this socket. - /// - /// This option retrieves the current maximum packet size (application - /// payload size) in bytes. - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn dccp_cur_mps(&self) -> io::Result { - unsafe { - getsockopt( - self.as_raw(), - libc::SOL_DCCP, - libc::DCCP_SOCKOPT_GET_CUR_MPS, - ) - } - } -} - -/// See [`Socket::dccp_available_ccids`]. -#[cfg(all(feature = "all", target_os = "linux"))] -#[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] -#[derive(Debug)] -pub struct CcidEndpoints { - endpoints: [u8; N], - length: u32, -} - -#[cfg(all(feature = "all", target_os = "linux"))] -#[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] -impl std::ops::Deref for CcidEndpoints { - type Target = [u8]; - - fn deref(&self) -> &[u8] { - &self.endpoints[0..self.length as usize] - } -} - -#[cfg_attr(docsrs, doc(cfg(unix)))] -impl AsFd for crate::Socket { - fn as_fd(&self) -> BorrowedFd<'_> { - // SAFETY: lifetime is bound by self. - unsafe { BorrowedFd::borrow_raw(self.as_raw()) } - } } #[cfg_attr(docsrs, doc(cfg(unix)))] @@ -3002,28 +2049,12 @@ } #[cfg_attr(docsrs, doc(cfg(unix)))] -impl From for OwnedFd { - fn from(sock: crate::Socket) -> OwnedFd { - // SAFETY: sock.into_raw() always returns a valid fd. - unsafe { OwnedFd::from_raw_fd(sock.into_raw()) } - } -} - -#[cfg_attr(docsrs, doc(cfg(unix)))] impl IntoRawFd for crate::Socket { fn into_raw_fd(self) -> c_int { self.into_raw() } } -#[cfg_attr(docsrs, doc(cfg(unix)))] -impl From for crate::Socket { - fn from(fd: OwnedFd) -> crate::Socket { - // SAFETY: `OwnedFd` ensures the fd is valid. - unsafe { crate::Socket::from_raw_fd(fd.into_raw_fd()) } - } -} - #[cfg_attr(docsrs, doc(cfg(unix)))] impl FromRawFd for crate::Socket { unsafe fn from_raw_fd(fd: c_int) -> crate::Socket { diff -Nru s390-tools-2.31.0/rust-vendor/socket2/src/sys/windows.rs s390-tools-2.33.1/rust-vendor/socket2/src/sys/windows.rs --- s390-tools-2.31.0/rust-vendor/socket2/src/sys/windows.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2/src/sys/windows.rs 2024-05-28 11:57:36.000000000 +0200 @@ -11,29 +11,32 @@ use std::marker::PhantomData; use std::mem::{self, size_of, MaybeUninit}; use std::net::{self, Ipv4Addr, Ipv6Addr, Shutdown}; -use std::os::windows::io::{ - AsRawSocket, AsSocket, BorrowedSocket, FromRawSocket, IntoRawSocket, OwnedSocket, RawSocket, -}; -use std::path::Path; +use std::os::windows::prelude::*; use std::sync::Once; use std::time::{Duration, Instant}; -use std::{process, ptr, slice}; +use std::{ptr, slice}; -use windows_sys::Win32::Foundation::{SetHandleInformation, HANDLE, HANDLE_FLAG_INHERIT}; -#[cfg(feature = "all")] -use windows_sys::Win32::Networking::WinSock::SO_PROTOCOL_INFOW; -use windows_sys::Win32::Networking::WinSock::{ - self, tcp_keepalive, FIONBIO, IN6_ADDR, IN6_ADDR_0, INVALID_SOCKET, IN_ADDR, IN_ADDR_0, - POLLERR, POLLHUP, POLLRDNORM, POLLWRNORM, SD_BOTH, SD_RECEIVE, SD_SEND, SIO_KEEPALIVE_VALS, - SOCKET_ERROR, WSABUF, WSAEMSGSIZE, WSAESHUTDOWN, WSAPOLLFD, WSAPROTOCOL_INFOW, - WSA_FLAG_NO_HANDLE_INHERIT, WSA_FLAG_OVERLAPPED, +use winapi::ctypes::c_long; +use winapi::shared::in6addr::*; +use winapi::shared::inaddr::*; +use winapi::shared::minwindef::DWORD; +use winapi::shared::minwindef::ULONG; +use winapi::shared::mstcpip::{tcp_keepalive, SIO_KEEPALIVE_VALS}; +use winapi::shared::ntdef::HANDLE; +use winapi::shared::ws2def; +use winapi::shared::ws2def::WSABUF; +use winapi::um::handleapi::SetHandleInformation; +use winapi::um::processthreadsapi::GetCurrentProcessId; +use winapi::um::winbase::{self, INFINITE}; +use winapi::um::winsock2::{ + self as sock, u_long, POLLERR, POLLHUP, POLLRDNORM, POLLWRNORM, SD_BOTH, SD_RECEIVE, SD_SEND, + WSAPOLLFD, }; -use windows_sys::Win32::System::Threading::INFINITE; +use winapi::um::winsock2::{SOCKET_ERROR, WSAEMSGSIZE, WSAESHUTDOWN}; -use crate::{MsgHdr, RecvFlags, SockAddr, TcpKeepalive, Type}; +use crate::{RecvFlags, SockAddr, TcpKeepalive, Type}; -#[allow(non_camel_case_types)] -pub(crate) type c_int = std::os::raw::c_int; +pub(crate) use winapi::ctypes::c_int; /// Fake MSG_TRUNC flag for the [`RecvFlags`] struct. /// @@ -42,45 +45,39 @@ pub(crate) const MSG_TRUNC: c_int = 0x01; // Used in `Domain`. -pub(crate) const AF_INET: c_int = windows_sys::Win32::Networking::WinSock::AF_INET as c_int; -pub(crate) const AF_INET6: c_int = windows_sys::Win32::Networking::WinSock::AF_INET6 as c_int; -pub(crate) const AF_UNIX: c_int = windows_sys::Win32::Networking::WinSock::AF_UNIX as c_int; -pub(crate) const AF_UNSPEC: c_int = windows_sys::Win32::Networking::WinSock::AF_UNSPEC as c_int; +pub(crate) use winapi::shared::ws2def::{AF_INET, AF_INET6}; // Used in `Type`. -pub(crate) const SOCK_STREAM: c_int = windows_sys::Win32::Networking::WinSock::SOCK_STREAM as c_int; -pub(crate) const SOCK_DGRAM: c_int = windows_sys::Win32::Networking::WinSock::SOCK_DGRAM as c_int; -pub(crate) const SOCK_RAW: c_int = windows_sys::Win32::Networking::WinSock::SOCK_RAW as c_int; -const SOCK_RDM: c_int = windows_sys::Win32::Networking::WinSock::SOCK_RDM as c_int; -pub(crate) const SOCK_SEQPACKET: c_int = - windows_sys::Win32::Networking::WinSock::SOCK_SEQPACKET as c_int; +pub(crate) use winapi::shared::ws2def::{SOCK_DGRAM, SOCK_STREAM}; +#[cfg(feature = "all")] +pub(crate) use winapi::shared::ws2def::{SOCK_RAW, SOCK_SEQPACKET}; // Used in `Protocol`. -pub(crate) use windows_sys::Win32::Networking::WinSock::{ - IPPROTO_ICMP, IPPROTO_ICMPV6, IPPROTO_TCP, IPPROTO_UDP, -}; +pub(crate) const IPPROTO_ICMP: c_int = winapi::shared::ws2def::IPPROTO_ICMP as c_int; +pub(crate) const IPPROTO_ICMPV6: c_int = winapi::shared::ws2def::IPPROTO_ICMPV6 as c_int; +pub(crate) const IPPROTO_TCP: c_int = winapi::shared::ws2def::IPPROTO_TCP as c_int; +pub(crate) const IPPROTO_UDP: c_int = winapi::shared::ws2def::IPPROTO_UDP as c_int; // Used in `SockAddr`. -pub(crate) use windows_sys::Win32::Networking::WinSock::{ - SOCKADDR as sockaddr, SOCKADDR_IN as sockaddr_in, SOCKADDR_IN6 as sockaddr_in6, +pub(crate) use winapi::shared::ws2def::{ + ADDRESS_FAMILY as sa_family_t, SOCKADDR as sockaddr, SOCKADDR_IN as sockaddr_in, SOCKADDR_STORAGE as sockaddr_storage, }; -#[allow(non_camel_case_types)] -pub(crate) type sa_family_t = windows_sys::Win32::Networking::WinSock::ADDRESS_FAMILY; -#[allow(non_camel_case_types)] -pub(crate) type socklen_t = windows_sys::Win32::Networking::WinSock::socklen_t; +pub(crate) use winapi::shared::ws2ipdef::SOCKADDR_IN6_LH as sockaddr_in6; +pub(crate) use winapi::um::ws2tcpip::socklen_t; // Used in `Socket`. +pub(crate) use winapi::shared::ws2def::{ + IPPROTO_IP, SOL_SOCKET, SO_BROADCAST, SO_ERROR, SO_KEEPALIVE, SO_LINGER, SO_OOBINLINE, + SO_RCVBUF, SO_RCVTIMEO, SO_REUSEADDR, SO_SNDBUF, SO_SNDTIMEO, SO_TYPE, TCP_NODELAY, +}; #[cfg(feature = "all")] -pub(crate) use windows_sys::Win32::Networking::WinSock::IP_HDRINCL; -pub(crate) use windows_sys::Win32::Networking::WinSock::{ - IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP, IPV6_MREQ as Ipv6Mreq, - IPV6_MULTICAST_HOPS, IPV6_MULTICAST_IF, IPV6_MULTICAST_LOOP, IPV6_RECVTCLASS, - IPV6_UNICAST_HOPS, IPV6_V6ONLY, IP_ADD_MEMBERSHIP, IP_ADD_SOURCE_MEMBERSHIP, - IP_DROP_MEMBERSHIP, IP_DROP_SOURCE_MEMBERSHIP, IP_MREQ as IpMreq, - IP_MREQ_SOURCE as IpMreqSource, IP_MULTICAST_IF, IP_MULTICAST_LOOP, IP_MULTICAST_TTL, - IP_RECVTOS, IP_TOS, IP_TTL, LINGER as linger, MSG_OOB, MSG_PEEK, SO_BROADCAST, SO_ERROR, - SO_KEEPALIVE, SO_LINGER, SO_OOBINLINE, SO_RCVBUF, SO_RCVTIMEO, SO_REUSEADDR, SO_SNDBUF, - SO_SNDTIMEO, SO_TYPE, TCP_NODELAY, +pub(crate) use winapi::shared::ws2ipdef::IP_HDRINCL; +pub(crate) use winapi::shared::ws2ipdef::{ + IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP, IPV6_MREQ as Ipv6Mreq, IPV6_MULTICAST_HOPS, + IPV6_MULTICAST_IF, IPV6_MULTICAST_LOOP, IPV6_UNICAST_HOPS, IPV6_V6ONLY, IP_ADD_MEMBERSHIP, + IP_ADD_SOURCE_MEMBERSHIP, IP_DROP_MEMBERSHIP, IP_DROP_SOURCE_MEMBERSHIP, IP_MREQ as IpMreq, + IP_MREQ_SOURCE as IpMreqSource, IP_MULTICAST_IF, IP_MULTICAST_LOOP, IP_MULTICAST_TTL, IP_TOS, + IP_TTL, }; -pub(crate) const IPPROTO_IP: c_int = windows_sys::Win32::Networking::WinSock::IPPROTO_IP as c_int; -pub(crate) const SOL_SOCKET: c_int = windows_sys::Win32::Networking::WinSock::SOL_SOCKET as c_int; +pub(crate) use winapi::um::winsock2::{linger, MSG_OOB, MSG_PEEK}; +pub(crate) const IPPROTO_IPV6: c_int = winapi::shared::ws2def::IPPROTO_IPV6 as c_int; /// Type used in set/getsockopt to retrieve the `TCP_NODELAY` option. /// @@ -89,16 +86,16 @@ /// `BOOL` (alias for `c_int`, 4 bytes), however in practice this turns out to /// be false (or misleading) as a `BOOLEAN` (`c_uchar`, 1 byte) is returned by /// `getsockopt`. -pub(crate) type Bool = windows_sys::Win32::Foundation::BOOLEAN; +pub(crate) type Bool = winapi::shared::ntdef::BOOLEAN; /// Maximum size of a buffer passed to system call like `recv` and `send`. -const MAX_BUF_LEN: usize = c_int::MAX as usize; +const MAX_BUF_LEN: usize = ::max_value() as usize; /// Helper macro to execute a system call that returns an `io::Result`. macro_rules! syscall { ($fn: ident ( $($arg: expr),* $(,)* ), $err_test: path, $err_value: expr) => {{ #[allow(unused_unsafe)] - let res = unsafe { windows_sys::Win32::Networking::WinSock::$fn($($arg, )*) }; + let res = unsafe { sock::$fn($($arg, )*) }; if $err_test(&res, &$err_value) { Err(io::Error::last_os_error()) } else { @@ -109,10 +106,10 @@ impl_debug!( crate::Domain, - self::AF_INET, - self::AF_INET6, - self::AF_UNIX, - self::AF_UNSPEC, + ws2def::AF_INET, + ws2def::AF_INET6, + ws2def::AF_UNIX, + ws2def::AF_UNSPEC, // = 0. ); /// Windows only API. @@ -135,19 +132,19 @@ impl_debug!( crate::Type, - self::SOCK_STREAM, - self::SOCK_DGRAM, - self::SOCK_RAW, - self::SOCK_RDM, - self::SOCK_SEQPACKET, + ws2def::SOCK_STREAM, + ws2def::SOCK_DGRAM, + ws2def::SOCK_RAW, + ws2def::SOCK_RDM, + ws2def::SOCK_SEQPACKET, ); impl_debug!( crate::Protocol, - WinSock::IPPROTO_ICMP, - WinSock::IPPROTO_ICMPV6, - WinSock::IPPROTO_TCP, - WinSock::IPPROTO_UDP, + self::IPPROTO_ICMP, + self::IPPROTO_ICMPV6, + self::IPPROTO_TCP, + self::IPPROTO_UDP, ); impl std::fmt::Debug for RecvFlags { @@ -170,10 +167,10 @@ impl<'a> MaybeUninitSlice<'a> { pub fn new(buf: &'a mut [MaybeUninit]) -> MaybeUninitSlice<'a> { - assert!(buf.len() <= u32::MAX as usize); + assert!(buf.len() <= ULONG::MAX as usize); MaybeUninitSlice { vec: WSABUF { - len: buf.len() as u32, + len: buf.len() as ULONG, buf: buf.as_mut_ptr().cast(), }, _lifetime: PhantomData, @@ -189,32 +186,6 @@ } } -// Used in `MsgHdr`. -pub(crate) use windows_sys::Win32::Networking::WinSock::WSAMSG as msghdr; - -pub(crate) fn set_msghdr_name(msg: &mut msghdr, name: &SockAddr) { - msg.name = name.as_ptr() as *mut _; - msg.namelen = name.len(); -} - -pub(crate) fn set_msghdr_iov(msg: &mut msghdr, ptr: *mut WSABUF, len: usize) { - msg.lpBuffers = ptr; - msg.dwBufferCount = min(len, u32::MAX as usize) as u32; -} - -pub(crate) fn set_msghdr_control(msg: &mut msghdr, ptr: *mut u8, len: usize) { - msg.Control.buf = ptr; - msg.Control.len = len as u32; -} - -pub(crate) fn set_msghdr_flags(msg: &mut msghdr, flags: c_int) { - msg.dwFlags = flags as u32; -} - -pub(crate) fn msghdr_flags(msg: &msghdr) -> RecvFlags { - RecvFlags(msg.dwFlags as c_int) -} - fn init() { static INIT: Once = Once::new(); @@ -226,7 +197,7 @@ }); } -pub(crate) type Socket = windows_sys::Win32::Networking::WinSock::SOCKET; +pub(crate) type Socket = sock::SOCKET; pub(crate) unsafe fn socket_from_raw(socket: Socket) -> crate::socket::Inner { crate::socket::Inner::from_raw_socket(socket as RawSocket) @@ -246,7 +217,7 @@ // Check if we set our custom flag. let flags = if ty & Type::NO_INHERIT != 0 { ty = ty & !Type::NO_INHERIT; - WSA_FLAG_NO_HANDLE_INHERIT + sock::WSA_FLAG_NO_HANDLE_INHERIT } else { 0 }; @@ -258,10 +229,10 @@ protocol, ptr::null_mut(), 0, - WSA_FLAG_OVERLAPPED | flags, + sock::WSA_FLAG_OVERLAPPED | flags, ), PartialEq::eq, - INVALID_SOCKET + sock::INVALID_SOCKET ) } @@ -278,7 +249,7 @@ let mut fd_array = WSAPOLLFD { fd: socket.as_raw(), - events: (POLLRDNORM | POLLWRNORM) as i16, + events: POLLRDNORM | POLLWRNORM, revents: 0, }; @@ -289,19 +260,17 @@ } let timeout = (timeout - elapsed).as_millis(); - let timeout = clamp(timeout, 1, c_int::MAX as u128) as c_int; + let timeout = clamp(timeout, 1, c_int::max_value() as u128) as c_int; match syscall!( WSAPoll(&mut fd_array, 1, timeout), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ) { Ok(0) => return Err(io::ErrorKind::TimedOut.into()), Ok(_) => { // Error or hang up indicates an error (or failure to connect). - if (fd_array.revents & POLLERR as i16) != 0 - || (fd_array.revents & POLLHUP as i16) != 0 - { + if (fd_array.revents & POLLERR) != 0 || (fd_array.revents & POLLHUP) != 0 { match socket.take_error() { Ok(Some(err)) => return Err(err), Ok(None) => { @@ -343,11 +312,11 @@ pub(crate) fn accept(socket: Socket) -> io::Result<(Socket, SockAddr)> { // Safety: `accept` initialises the `SockAddr` for us. unsafe { - SockAddr::try_init(|storage, len| { + SockAddr::init(|storage, len| { syscall!( accept(socket, storage.cast(), len), PartialEq::eq, - INVALID_SOCKET + sock::INVALID_SOCKET ) }) } @@ -356,11 +325,11 @@ pub(crate) fn getsockname(socket: Socket) -> io::Result { // Safety: `getsockname` initialises the `SockAddr` for us. unsafe { - SockAddr::try_init(|storage, len| { + SockAddr::init(|storage, len| { syscall!( getsockname(socket, storage.cast(), len), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ) }) } @@ -370,11 +339,11 @@ pub(crate) fn getpeername(socket: Socket) -> io::Result { // Safety: `getpeername` initialises the `SockAddr` for us. unsafe { - SockAddr::try_init(|storage, len| { + SockAddr::init(|storage, len| { syscall!( getpeername(socket, storage.cast(), len), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ) }) } @@ -382,12 +351,11 @@ } pub(crate) fn try_clone(socket: Socket) -> io::Result { - let mut info: MaybeUninit = MaybeUninit::uninit(); + let mut info: MaybeUninit = MaybeUninit::uninit(); syscall!( - // NOTE: `process.id` is the same as `GetCurrentProcessId`. - WSADuplicateSocketW(socket, process::id(), info.as_mut_ptr()), + WSADuplicateSocketW(socket, GetCurrentProcessId(), info.as_mut_ptr()), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR )?; // Safety: `WSADuplicateSocketW` intialised `info` for us. let mut info = unsafe { info.assume_init() }; @@ -399,16 +367,16 @@ info.iProtocol, &mut info, 0, - WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT, + sock::WSA_FLAG_OVERLAPPED | sock::WSA_FLAG_NO_HANDLE_INHERIT, ), PartialEq::eq, - INVALID_SOCKET + sock::INVALID_SOCKET ) } pub(crate) fn set_nonblocking(socket: Socket, nonblocking: bool) -> io::Result<()> { - let mut nonblocking = if nonblocking { 1 } else { 0 }; - ioctlsocket(socket, FIONBIO, &mut nonblocking) + let mut nonblocking = nonblocking as u_long; + ioctlsocket(socket, sock::FIONBIO, &mut nonblocking) } pub(crate) fn shutdown(socket: Socket, how: Shutdown) -> io::Result<()> { @@ -416,8 +384,8 @@ Shutdown::Write => SD_SEND, Shutdown::Read => SD_RECEIVE, Shutdown::Both => SD_BOTH, - } as i32; - syscall!(shutdown(socket, how), PartialEq::eq, SOCKET_ERROR).map(|_| ()) + }; + syscall!(shutdown(socket, how), PartialEq::eq, sock::SOCKET_ERROR).map(|_| ()) } pub(crate) fn recv(socket: Socket, buf: &mut [MaybeUninit], flags: c_int) -> io::Result { @@ -429,11 +397,11 @@ flags, ), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ); match res { Ok(n) => Ok(n as usize), - Err(ref err) if err.raw_os_error() == Some(WSAESHUTDOWN as i32) => Ok(0), + Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => Ok(0), Err(err) => Err(err), } } @@ -444,24 +412,26 @@ flags: c_int, ) -> io::Result<(usize, RecvFlags)> { let mut nread = 0; - let mut flags = flags as u32; + let mut flags = flags as DWORD; let res = syscall!( WSARecv( socket, bufs.as_mut_ptr().cast(), - min(bufs.len(), u32::MAX as usize) as u32, + min(bufs.len(), DWORD::max_value() as usize) as DWORD, &mut nread, &mut flags, ptr::null_mut(), None, ), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ); match res { Ok(_) => Ok((nread as usize, RecvFlags(0))), - Err(ref err) if err.raw_os_error() == Some(WSAESHUTDOWN as i32) => Ok((0, RecvFlags(0))), - Err(ref err) if err.raw_os_error() == Some(WSAEMSGSIZE as i32) => { + Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => { + Ok((0, RecvFlags(0))) + } + Err(ref err) if err.raw_os_error() == Some(sock::WSAEMSGSIZE as i32) => { Ok((nread as usize, RecvFlags(MSG_TRUNC))) } Err(err) => Err(err), @@ -475,7 +445,7 @@ ) -> io::Result<(usize, SockAddr)> { // Safety: `recvfrom` initialises the `SockAddr` for us. unsafe { - SockAddr::try_init(|storage, addrlen| { + SockAddr::init(|storage, addrlen| { let res = syscall!( recvfrom( socket, @@ -486,11 +456,11 @@ addrlen, ), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ); match res { Ok(n) => Ok(n as usize), - Err(ref err) if err.raw_os_error() == Some(WSAESHUTDOWN as i32) => Ok(0), + Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => Ok(0), Err(err) => Err(err), } }) @@ -500,7 +470,7 @@ pub(crate) fn peek_sender(socket: Socket) -> io::Result { // Safety: `recvfrom` initialises the `SockAddr` for us. let ((), sender) = unsafe { - SockAddr::try_init(|storage, addrlen| { + SockAddr::init(|storage, addrlen| { let res = syscall!( recvfrom( socket, @@ -536,14 +506,14 @@ ) -> io::Result<(usize, RecvFlags, SockAddr)> { // Safety: `recvfrom` initialises the `SockAddr` for us. unsafe { - SockAddr::try_init(|storage, addrlen| { + SockAddr::init(|storage, addrlen| { let mut nread = 0; - let mut flags = flags as u32; + let mut flags = flags as DWORD; let res = syscall!( WSARecvFrom( socket, bufs.as_mut_ptr().cast(), - min(bufs.len(), u32::MAX as usize) as u32, + min(bufs.len(), DWORD::max_value() as usize) as DWORD, &mut nread, &mut flags, storage.cast(), @@ -552,14 +522,14 @@ None, ), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ); match res { Ok(_) => Ok((nread as usize, RecvFlags(0))), - Err(ref err) if err.raw_os_error() == Some(WSAESHUTDOWN as i32) => { + Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => { Ok((nread as usize, RecvFlags(0))) } - Err(ref err) if err.raw_os_error() == Some(WSAEMSGSIZE as i32) => { + Err(ref err) if err.raw_os_error() == Some(sock::WSAEMSGSIZE as i32) => { Ok((nread as usize, RecvFlags(MSG_TRUNC))) } Err(err) => Err(err), @@ -578,7 +548,7 @@ flags, ), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ) .map(|n| n as usize) } @@ -606,14 +576,14 @@ // // [1] https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsasend bufs.as_ptr() as *mut _, - min(bufs.len(), u32::MAX as usize) as u32, + min(bufs.len(), DWORD::max_value() as usize) as DWORD, &mut nsent, - flags as u32, + flags as DWORD, std::ptr::null_mut(), None, ), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ) .map(|_| nsent as usize) } @@ -634,7 +604,7 @@ addr.len(), ), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ) .map(|n| n as usize) } @@ -651,43 +621,26 @@ socket, // FIXME: Same problem as in `send_vectored`. bufs.as_ptr() as *mut _, - bufs.len().min(u32::MAX as usize) as u32, + bufs.len().min(DWORD::MAX as usize) as DWORD, &mut nsent, - flags as u32, + flags as DWORD, addr.as_ptr(), addr.len(), ptr::null_mut(), None, ), PartialEq::eq, - SOCKET_ERROR - ) - .map(|_| nsent as usize) -} - -pub(crate) fn sendmsg(socket: Socket, msg: &MsgHdr<'_, '_, '_>, flags: c_int) -> io::Result { - let mut nsent = 0; - syscall!( - WSASendMsg( - socket, - &msg.inner, - flags as u32, - &mut nsent, - ptr::null_mut(), - None, - ), - PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ) .map(|_| nsent as usize) } /// Wrapper around `getsockopt` to deal with platform specific timeouts. -pub(crate) fn timeout_opt(fd: Socket, lvl: c_int, name: i32) -> io::Result> { +pub(crate) fn timeout_opt(fd: Socket, lvl: c_int, name: c_int) -> io::Result> { unsafe { getsockopt(fd, lvl, name).map(from_ms) } } -fn from_ms(duration: u32) -> Option { +fn from_ms(duration: DWORD) -> Option { if duration == 0 { None } else { @@ -699,16 +652,16 @@ /// Wrapper around `setsockopt` to deal with platform specific timeouts. pub(crate) fn set_timeout_opt( - socket: Socket, + fd: Socket, level: c_int, - optname: i32, + optname: c_int, duration: Option, ) -> io::Result<()> { let duration = into_ms(duration); - unsafe { setsockopt(socket, level, optname, duration) } + unsafe { setsockopt(fd, level, optname, duration) } } -fn into_ms(duration: Option) -> u32 { +fn into_ms(duration: Option) -> DWORD { // Note that a duration is a (u64, u32) (seconds, nanoseconds) pair, and the // timeouts in windows APIs are typically u32 milliseconds. To translate, we // have two pieces to take care of: @@ -716,9 +669,9 @@ // * Nanosecond precision is rounded up // * Greater than u32::MAX milliseconds (50 days) is rounded up to // INFINITE (never time out). - duration.map_or(0, |duration| { - min(duration.as_millis(), INFINITE as u128) as u32 - }) + duration + .map(|duration| min(duration.as_millis(), INFINITE as u128) as DWORD) + .unwrap_or(0) } pub(crate) fn set_tcp_keepalive(socket: Socket, keepalive: &TcpKeepalive) -> io::Result<()> { @@ -741,26 +694,25 @@ None, ), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ) .map(|_| ()) } /// Caller must ensure `T` is the correct type for `level` and `optname`. -// NOTE: `optname` is actually `i32`, but all constants are `u32`. -pub(crate) unsafe fn getsockopt(socket: Socket, level: c_int, optname: i32) -> io::Result { +pub(crate) unsafe fn getsockopt(socket: Socket, level: c_int, optname: c_int) -> io::Result { let mut optval: MaybeUninit = MaybeUninit::uninit(); let mut optlen = mem::size_of::() as c_int; syscall!( getsockopt( socket, - level as i32, + level, optname, optval.as_mut_ptr().cast(), &mut optlen, ), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ) .map(|_| { debug_assert_eq!(optlen as usize, mem::size_of::()); @@ -770,61 +722,57 @@ } /// Caller must ensure `T` is the correct type for `level` and `optname`. -// NOTE: `optname` is actually `i32`, but all constants are `u32`. pub(crate) unsafe fn setsockopt( socket: Socket, level: c_int, - optname: i32, + optname: c_int, optval: T, ) -> io::Result<()> { syscall!( setsockopt( socket, - level as i32, + level, optname, (&optval as *const T).cast(), mem::size_of::() as c_int, ), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ) .map(|_| ()) } -fn ioctlsocket(socket: Socket, cmd: i32, payload: &mut u32) -> io::Result<()> { +fn ioctlsocket(socket: Socket, cmd: c_long, payload: &mut u_long) -> io::Result<()> { syscall!( ioctlsocket(socket, cmd, payload), PartialEq::eq, - SOCKET_ERROR + sock::SOCKET_ERROR ) .map(|_| ()) } pub(crate) fn to_in_addr(addr: &Ipv4Addr) -> IN_ADDR { - IN_ADDR { - S_un: IN_ADDR_0 { - // `S_un` is stored as BE on all machines, and the array is in BE - // order. So the native endian conversion method is used so that - // it's never swapped. - S_addr: u32::from_ne_bytes(addr.octets()), - }, - } + let mut s_un: in_addr_S_un = unsafe { mem::zeroed() }; + // `S_un` is stored as BE on all machines, and the array is in BE order. So + // the native endian conversion method is used so that it's never swapped. + unsafe { *(s_un.S_addr_mut()) = u32::from_ne_bytes(addr.octets()) }; + IN_ADDR { S_un: s_un } } pub(crate) fn from_in_addr(in_addr: IN_ADDR) -> Ipv4Addr { - Ipv4Addr::from(unsafe { in_addr.S_un.S_addr }.to_ne_bytes()) + Ipv4Addr::from(unsafe { *in_addr.S_un.S_addr() }.to_ne_bytes()) } -pub(crate) fn to_in6_addr(addr: &Ipv6Addr) -> IN6_ADDR { - IN6_ADDR { - u: IN6_ADDR_0 { - Byte: addr.octets(), - }, - } +pub(crate) fn to_in6_addr(addr: &Ipv6Addr) -> in6_addr { + let mut ret_addr: in6_addr_u = unsafe { mem::zeroed() }; + unsafe { *(ret_addr.Byte_mut()) = addr.octets() }; + let mut ret: in6_addr = unsafe { mem::zeroed() }; + ret.u = ret_addr; + ret } -pub(crate) fn from_in6_addr(addr: IN6_ADDR) -> Ipv6Addr { - Ipv6Addr::from(unsafe { addr.u.Byte }) +pub(crate) fn from_in6_addr(addr: in6_addr) -> Ipv6Addr { + Ipv6Addr::from(*unsafe { addr.u.Byte() }) } pub(crate) fn to_mreqn( @@ -853,49 +801,6 @@ } } -#[allow(unsafe_op_in_unsafe_fn)] -pub(crate) fn unix_sockaddr(path: &Path) -> io::Result { - // SAFETY: a `sockaddr_storage` of all zeros is valid. - let mut storage = unsafe { mem::zeroed::() }; - let len = { - let storage: &mut windows_sys::Win32::Networking::WinSock::SOCKADDR_UN = - unsafe { &mut *(&mut storage as *mut sockaddr_storage).cast() }; - - // Windows expects a UTF-8 path here even though Windows paths are - // usually UCS-2 encoded. If Rust exposed OsStr's Wtf8 encoded - // buffer, this could be used directly, relying on Windows to - // validate the path, but Rust hides this implementation detail. - // - // See . - let bytes = path - .to_str() - .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "path must be valid UTF-8"))? - .as_bytes(); - - // Windows appears to allow non-null-terminated paths, but this is - // not documented, so do not rely on it yet. - // - // See . - if bytes.len() >= storage.sun_path.len() { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "path must be shorter than SUN_LEN", - )); - } - - storage.sun_family = crate::sys::AF_UNIX as sa_family_t; - // `storage` was initialized to zero above, so the path is - // already null terminated. - storage.sun_path[..bytes.len()].copy_from_slice(bytes); - - let base = storage as *const _ as usize; - let path = &storage.sun_path as *const _ as usize; - let sun_path_offset = path - base; - sun_path_offset + bytes.len() + 1 - }; - Ok(unsafe { SockAddr::new(storage, len as socklen_t) }) -} - /// Windows only API. impl crate::Socket { /// Sets `HANDLE_FLAG_INHERIT` using `SetHandleInformation`. @@ -907,11 +812,11 @@ pub(crate) fn _set_no_inherit(&self, no_inherit: bool) -> io::Result<()> { // NOTE: can't use `syscall!` because it expects the function in the - // `windows_sys::Win32::Networking::WinSock::` path. + // `sock::` path. let res = unsafe { SetHandleInformation( self.as_raw() as HANDLE, - HANDLE_FLAG_INHERIT, + winbase::HANDLE_FLAG_INHERIT, !no_inherit as _, ) }; @@ -922,62 +827,20 @@ Ok(()) } } - - /// Returns the [`Protocol`] of this socket by checking the `SO_PROTOCOL_INFOW` - /// option on this socket. - /// - /// [`Protocol`]: crate::Protocol - #[cfg(feature = "all")] - pub fn protocol(&self) -> io::Result> { - let info = unsafe { - getsockopt::(self.as_raw(), SOL_SOCKET, SO_PROTOCOL_INFOW)? - }; - match info.iProtocol { - 0 => Ok(None), - p => Ok(Some(crate::Protocol::from(p))), - } - } } -#[cfg_attr(docsrs, doc(cfg(windows)))] -impl AsSocket for crate::Socket { - fn as_socket(&self) -> BorrowedSocket<'_> { - // SAFETY: lifetime is bound by self. - unsafe { BorrowedSocket::borrow_raw(self.as_raw() as RawSocket) } - } -} - -#[cfg_attr(docsrs, doc(cfg(windows)))] impl AsRawSocket for crate::Socket { fn as_raw_socket(&self) -> RawSocket { self.as_raw() as RawSocket } } -#[cfg_attr(docsrs, doc(cfg(windows)))] -impl From for OwnedSocket { - fn from(sock: crate::Socket) -> OwnedSocket { - // SAFETY: sock.into_raw() always returns a valid fd. - unsafe { OwnedSocket::from_raw_socket(sock.into_raw() as RawSocket) } - } -} - -#[cfg_attr(docsrs, doc(cfg(windows)))] impl IntoRawSocket for crate::Socket { fn into_raw_socket(self) -> RawSocket { self.into_raw() as RawSocket } } -#[cfg_attr(docsrs, doc(cfg(windows)))] -impl From for crate::Socket { - fn from(fd: OwnedSocket) -> crate::Socket { - // SAFETY: `OwnedFd` ensures the fd is valid. - unsafe { crate::Socket::from_raw_socket(fd.into_raw_socket()) } - } -} - -#[cfg_attr(docsrs, doc(cfg(windows)))] impl FromRawSocket for crate::Socket { unsafe fn from_raw_socket(socket: RawSocket) -> crate::Socket { crate::Socket::from_raw(socket as Socket) @@ -988,13 +851,13 @@ fn in_addr_convertion() { let ip = Ipv4Addr::new(127, 0, 0, 1); let raw = to_in_addr(&ip); - assert_eq!(unsafe { raw.S_un.S_addr }, 127 << 0 | 1 << 24); + assert_eq!(unsafe { *raw.S_un.S_addr() }, 127 << 0 | 1 << 24); assert_eq!(from_in_addr(raw), ip); let ip = Ipv4Addr::new(127, 34, 4, 12); let raw = to_in_addr(&ip); assert_eq!( - unsafe { raw.S_un.S_addr }, + unsafe { *raw.S_un.S_addr() }, 127 << 0 | 34 << 8 | 4 << 16 | 12 << 24 ); assert_eq!(from_in_addr(raw), ip); @@ -1014,6 +877,6 @@ 6u16.to_be(), 7u16.to_be(), ]; - assert_eq!(unsafe { raw.u.Word }, want); + assert_eq!(unsafe { *raw.u.Word() }, want); assert_eq!(from_in6_addr(raw), ip); } diff -Nru s390-tools-2.31.0/rust-vendor/socket2-0.4.9/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/socket2-0.4.9/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/socket2-0.4.9/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2-0.4.9/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/socket2-0.4.9/Cargo.toml s390-tools-2.33.1/rust-vendor/socket2-0.4.9/Cargo.toml --- s390-tools-2.31.0/rust-vendor/socket2-0.4.9/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2-0.4.9/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,68 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "socket2" -version = "0.4.9" -authors = [ - "Alex Crichton ", - "Thomas de Zeeuw ", -] -include = [ - "Cargo.toml", - "LICENSE-APACHE", - "LICENSE-MIT", - "README.md", - "src/**/*.rs", -] -description = """ -Utilities for handling networking sockets with a maximal amount of configuration -possible intended. -""" -homepage = "https://github.com/rust-lang/socket2" -documentation = "https://docs.rs/socket2" -readme = "README.md" -keywords = [ - "io", - "socket", - "network", -] -categories = [ - "api-bindings", - "network-programming", -] -license = "MIT OR Apache-2.0" -repository = "https://github.com/rust-lang/socket2" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ - "--cfg", - "docsrs", -] - -[package.metadata.playground] -features = ["all"] - -[features] -all = [] - -[target."cfg(unix)".dependencies.libc] -version = "0.2.139" - -[target."cfg(windows)".dependencies.winapi] -version = "0.3.9" -features = [ - "handleapi", - "ws2ipdef", - "ws2tcpip", -] diff -Nru s390-tools-2.31.0/rust-vendor/socket2-0.4.9/LICENSE-APACHE s390-tools-2.33.1/rust-vendor/socket2-0.4.9/LICENSE-APACHE --- s390-tools-2.31.0/rust-vendor/socket2-0.4.9/LICENSE-APACHE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2-0.4.9/LICENSE-APACHE 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff -Nru s390-tools-2.31.0/rust-vendor/socket2-0.4.9/LICENSE-MIT s390-tools-2.33.1/rust-vendor/socket2-0.4.9/LICENSE-MIT --- s390-tools-2.31.0/rust-vendor/socket2-0.4.9/LICENSE-MIT 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2-0.4.9/LICENSE-MIT 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2014 Alex Crichton - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/socket2-0.4.9/README.md s390-tools-2.33.1/rust-vendor/socket2-0.4.9/README.md --- s390-tools-2.31.0/rust-vendor/socket2-0.4.9/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2-0.4.9/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,84 +0,0 @@ -# Socket2 - -Socket2 is a crate that provides utilities for creating and using sockets. - -The goal of this crate is to create and use a socket using advanced -configuration options (those that are not available in the types in the standard -library) without using any unsafe code. - -This crate provides as direct as possible access to the system's functionality -for sockets, this means little effort to provide cross-platform utilities. It is -up to the user to know how to use sockets when using this crate. *If you don't -know how to create a socket using libc/system calls then this crate is not for -you*. Most, if not all, functions directly relate to the equivalent system call -with no error handling applied, so no handling errors such as `EINTR`. As a -result using this crate can be a little wordy, but it should give you maximal -flexibility over configuration of sockets. - -See the [API documentation] for more. - -[API documentation]: https://docs.rs/socket2 - -# Two branches - -Currently Socket2 supports two versions: v0.4 and v0.3. Version 0.4 is developed -in the master branch, version 0.3 in the [v0.3.x branch]. - -[v0.3.x branch]: https://github.com/rust-lang/socket2/tree/v0.3.x - -# OS support - -Socket2 attempts to support the same OS/architectures as Rust does, see -https://doc.rust-lang.org/nightly/rustc/platform-support.html. However this is -not always possible, below is current list of support OSs. - -*If your favorite OS is not on the list consider contributing it! See [issue -#78].* - -[issue #78]: https://github.com/rust-lang/socket2/issues/78 - -### Tier 1 - -These OSs are tested with each commit in the CI and must always pass the tests. -All functions/types/etc., excluding ones behind the `all` feature, must work on -these OSs. - -* Linux -* macOS -* Windows - -### Tier 2 - -These OSs are currently build in the CI, but not tested. Not all -functions/types/etc. may work on these OSs, even ones **not** behind the `all` -feature flag. - -* Android -* FreeBSD -* Fuchsia -* iOS -* illumos -* NetBSD -* Redox -* Solaris - -# Minimum Supported Rust Version (MSRV) - -Socket2 uses 1.46.0 as MSRV. - -# License - -This project is licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - https://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - https://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this project by you, as defined in the Apache-2.0 license, -shall be dual licensed as above, without any additional terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/lib.rs s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/lib.rs --- s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,475 +0,0 @@ -// Copyright 2015 The Rust Project Developers. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Utilities for creating and using sockets. -//! -//! The goal of this crate is to create and use a socket using advanced -//! configuration options (those that are not available in the types in the -//! standard library) without using any unsafe code. -//! -//! This crate provides as direct as possible access to the system's -//! functionality for sockets, this means little effort to provide -//! cross-platform utilities. It is up to the user to know how to use sockets -//! when using this crate. *If you don't know how to create a socket using -//! libc/system calls then this crate is not for you*. Most, if not all, -//! functions directly relate to the equivalent system call with no error -//! handling applied, so no handling errors such as [`EINTR`]. As a result using -//! this crate can be a little wordy, but it should give you maximal flexibility -//! over configuration of sockets. -//! -//! [`EINTR`]: std::io::ErrorKind::Interrupted -//! -//! # Examples -//! -//! ```no_run -//! # fn main() -> std::io::Result<()> { -//! use std::net::{SocketAddr, TcpListener}; -//! use socket2::{Socket, Domain, Type}; -//! -//! // Create a TCP listener bound to two addresses. -//! let socket = Socket::new(Domain::IPV6, Type::STREAM, None)?; -//! -//! socket.set_only_v6(false)?; -//! let address: SocketAddr = "[::1]:12345".parse().unwrap(); -//! socket.bind(&address.into())?; -//! socket.listen(128)?; -//! -//! let listener: TcpListener = socket.into(); -//! // ... -//! # drop(listener); -//! # Ok(()) } -//! ``` -//! -//! ## Features -//! -//! This crate has a single feature `all`, which enables all functions even ones -//! that are not available on all OSs. - -#![doc(html_root_url = "https://docs.rs/socket2/0.4")] -#![deny(missing_docs, missing_debug_implementations, rust_2018_idioms)] -// Show required OS/features on docs.rs. -#![cfg_attr(docsrs, feature(doc_cfg))] -// Disallow warnings when running tests. -#![cfg_attr(test, deny(warnings))] -// Disallow warnings in examples. -#![doc(test(attr(deny(warnings))))] - -use std::fmt; -use std::mem::MaybeUninit; -use std::net::SocketAddr; -use std::ops::{Deref, DerefMut}; -use std::time::Duration; - -/// Macro to implement `fmt::Debug` for a type, printing the constant names -/// rather than a number. -/// -/// Note this is used in the `sys` module and thus must be defined before -/// defining the modules. -macro_rules! impl_debug { - ( - // Type name for which to implement `fmt::Debug`. - $type: path, - $( - $(#[$target: meta])* - // The flag(s) to check. - // Need to specific the libc crate because Windows doesn't use - // `libc` but `winapi`. - $libc: ident :: $flag: ident - ),+ $(,)* - ) => { - impl std::fmt::Debug for $type { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let string = match self.0 { - $( - $(#[$target])* - $libc :: $flag => stringify!($flag), - )+ - n => return write!(f, "{}", n), - }; - f.write_str(string) - } - } - }; -} - -/// Macro to convert from one network type to another. -macro_rules! from { - ($from: ty, $for: ty) => { - impl From<$from> for $for { - fn from(socket: $from) -> $for { - #[cfg(unix)] - unsafe { - <$for>::from_raw_fd(socket.into_raw_fd()) - } - #[cfg(windows)] - unsafe { - <$for>::from_raw_socket(socket.into_raw_socket()) - } - } - } - }; -} - -mod sockaddr; -mod socket; -mod sockref; - -#[cfg_attr(unix, path = "sys/unix.rs")] -#[cfg_attr(windows, path = "sys/windows.rs")] -mod sys; - -#[cfg(not(any(windows, unix)))] -compile_error!("Socket2 doesn't support the compile target"); - -use sys::c_int; - -pub use sockaddr::SockAddr; -pub use socket::Socket; -pub use sockref::SockRef; - -#[cfg(not(any( - target_os = "haiku", - target_os = "illumos", - target_os = "netbsd", - target_os = "redox", - target_os = "solaris", -)))] -pub use socket::InterfaceIndexOrAddress; - -/// Specification of the communication domain for a socket. -/// -/// This is a newtype wrapper around an integer which provides a nicer API in -/// addition to an injection point for documentation. Convenience constants such -/// as [`Domain::IPV4`], [`Domain::IPV6`], etc, are provided to avoid reaching -/// into libc for various constants. -/// -/// This type is freely interconvertible with C's `int` type, however, if a raw -/// value needs to be provided. -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct Domain(c_int); - -impl Domain { - /// Domain for IPv4 communication, corresponding to `AF_INET`. - pub const IPV4: Domain = Domain(sys::AF_INET); - - /// Domain for IPv6 communication, corresponding to `AF_INET6`. - pub const IPV6: Domain = Domain(sys::AF_INET6); - - /// Returns the correct domain for `address`. - pub const fn for_address(address: SocketAddr) -> Domain { - match address { - SocketAddr::V4(_) => Domain::IPV4, - SocketAddr::V6(_) => Domain::IPV6, - } - } -} - -impl From for Domain { - fn from(d: c_int) -> Domain { - Domain(d) - } -} - -impl From for c_int { - fn from(d: Domain) -> c_int { - d.0 - } -} - -/// Specification of communication semantics on a socket. -/// -/// This is a newtype wrapper around an integer which provides a nicer API in -/// addition to an injection point for documentation. Convenience constants such -/// as [`Type::STREAM`], [`Type::DGRAM`], etc, are provided to avoid reaching -/// into libc for various constants. -/// -/// This type is freely interconvertible with C's `int` type, however, if a raw -/// value needs to be provided. -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct Type(c_int); - -impl Type { - /// Type corresponding to `SOCK_STREAM`. - /// - /// Used for protocols such as TCP. - pub const STREAM: Type = Type(sys::SOCK_STREAM); - - /// Type corresponding to `SOCK_DGRAM`. - /// - /// Used for protocols such as UDP. - pub const DGRAM: Type = Type(sys::SOCK_DGRAM); - - /// Type corresponding to `SOCK_SEQPACKET`. - #[cfg(feature = "all")] - #[cfg_attr(docsrs, doc(cfg(feature = "all")))] - pub const SEQPACKET: Type = Type(sys::SOCK_SEQPACKET); - - /// Type corresponding to `SOCK_RAW`. - #[cfg(all(feature = "all", not(target_os = "redox")))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", not(target_os = "redox")))))] - pub const RAW: Type = Type(sys::SOCK_RAW); -} - -impl From for Type { - fn from(t: c_int) -> Type { - Type(t) - } -} - -impl From for c_int { - fn from(t: Type) -> c_int { - t.0 - } -} - -/// Protocol specification used for creating sockets via `Socket::new`. -/// -/// This is a newtype wrapper around an integer which provides a nicer API in -/// addition to an injection point for documentation. -/// -/// This type is freely interconvertible with C's `int` type, however, if a raw -/// value needs to be provided. -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct Protocol(c_int); - -impl Protocol { - /// Protocol corresponding to `ICMPv4`. - pub const ICMPV4: Protocol = Protocol(sys::IPPROTO_ICMP); - - /// Protocol corresponding to `ICMPv6`. - pub const ICMPV6: Protocol = Protocol(sys::IPPROTO_ICMPV6); - - /// Protocol corresponding to `TCP`. - pub const TCP: Protocol = Protocol(sys::IPPROTO_TCP); - - /// Protocol corresponding to `UDP`. - pub const UDP: Protocol = Protocol(sys::IPPROTO_UDP); -} - -impl From for Protocol { - fn from(p: c_int) -> Protocol { - Protocol(p) - } -} - -impl From for c_int { - fn from(p: Protocol) -> c_int { - p.0 - } -} - -/// Flags for incoming messages. -/// -/// Flags provide additional information about incoming messages. -#[cfg(not(target_os = "redox"))] -#[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct RecvFlags(c_int); - -#[cfg(not(target_os = "redox"))] -impl RecvFlags { - /// Check if the message contains a truncated datagram. - /// - /// This flag is only used for datagram-based sockets, - /// not for stream sockets. - /// - /// On Unix this corresponds to the `MSG_TRUNC` flag. - /// On Windows this corresponds to the `WSAEMSGSIZE` error code. - pub const fn is_truncated(self) -> bool { - self.0 & sys::MSG_TRUNC != 0 - } -} - -/// A version of [`IoSliceMut`] that allows the buffer to be uninitialised. -/// -/// [`IoSliceMut`]: std::io::IoSliceMut -#[repr(transparent)] -pub struct MaybeUninitSlice<'a>(sys::MaybeUninitSlice<'a>); - -impl<'a> fmt::Debug for MaybeUninitSlice<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self.0.as_slice(), fmt) - } -} - -impl<'a> MaybeUninitSlice<'a> { - /// Creates a new `MaybeUninitSlice` wrapping a byte slice. - /// - /// # Panics - /// - /// Panics on Windows if the slice is larger than 4GB. - pub fn new(buf: &'a mut [MaybeUninit]) -> MaybeUninitSlice<'a> { - MaybeUninitSlice(sys::MaybeUninitSlice::new(buf)) - } -} - -impl<'a> Deref for MaybeUninitSlice<'a> { - type Target = [MaybeUninit]; - - fn deref(&self) -> &[MaybeUninit] { - self.0.as_slice() - } -} - -impl<'a> DerefMut for MaybeUninitSlice<'a> { - fn deref_mut(&mut self) -> &mut [MaybeUninit] { - self.0.as_mut_slice() - } -} - -/// Configures a socket's TCP keepalive parameters. -/// -/// See [`Socket::set_tcp_keepalive`]. -#[derive(Debug, Clone)] -pub struct TcpKeepalive { - #[cfg_attr(target_os = "openbsd", allow(dead_code))] - time: Option, - #[cfg(not(any( - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "nto", - )))] - interval: Option, - #[cfg(not(any( - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "windows", - target_os = "nto", - )))] - retries: Option, -} - -impl TcpKeepalive { - /// Returns a new, empty set of TCP keepalive parameters. - pub const fn new() -> TcpKeepalive { - TcpKeepalive { - time: None, - #[cfg(not(any( - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "nto", - )))] - interval: None, - #[cfg(not(any( - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "windows", - target_os = "nto", - )))] - retries: None, - } - } - - /// Set the amount of time after which TCP keepalive probes will be sent on - /// idle connections. - /// - /// This will set `TCP_KEEPALIVE` on macOS and iOS, and - /// `TCP_KEEPIDLE` on all other Unix operating systems, except - /// OpenBSD and Haiku which don't support any way to set this - /// option. On Windows, this sets the value of the `tcp_keepalive` - /// struct's `keepalivetime` field. - /// - /// Some platforms specify this value in seconds, so sub-second - /// specifications may be omitted. - pub const fn with_time(self, time: Duration) -> Self { - Self { - time: Some(time), - ..self - } - } - - /// Set the value of the `TCP_KEEPINTVL` option. On Windows, this sets the - /// value of the `tcp_keepalive` struct's `keepaliveinterval` field. - /// - /// Sets the time interval between TCP keepalive probes. - /// - /// Some platforms specify this value in seconds, so sub-second - /// specifications may be omitted. - #[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - windows, - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - windows, - ) - ))) - )] - pub const fn with_interval(self, interval: Duration) -> Self { - Self { - interval: Some(interval), - ..self - } - } - - /// Set the value of the `TCP_KEEPCNT` option. - /// - /// Set the maximum number of TCP keepalive probes that will be sent before - /// dropping a connection, if TCP keepalive is enabled on this socket. - #[cfg(all( - feature = "all", - any( - doc, - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - ) - ))) - )] - pub const fn with_retries(self, retries: u32) -> Self { - Self { - retries: Some(retries), - ..self - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/sockaddr.rs s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/sockaddr.rs --- s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/sockaddr.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/sockaddr.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,350 +0,0 @@ -use std::mem::{self, size_of, MaybeUninit}; -use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; -use std::{fmt, io}; - -use crate::sys::{ - sa_family_t, sockaddr, sockaddr_in, sockaddr_in6, sockaddr_storage, socklen_t, AF_INET, - AF_INET6, -}; -#[cfg(windows)] -use winapi::shared::ws2ipdef::SOCKADDR_IN6_LH_u; - -/// The address of a socket. -/// -/// `SockAddr`s may be constructed directly to and from the standard library -/// [`SocketAddr`], [`SocketAddrV4`], and [`SocketAddrV6`] types. -#[derive(Clone)] -pub struct SockAddr { - storage: sockaddr_storage, - len: socklen_t, -} - -#[allow(clippy::len_without_is_empty)] -impl SockAddr { - /// Create a `SockAddr` from the underlying storage and its length. - /// - /// # Safety - /// - /// Caller must ensure that the address family and length match the type of - /// storage address. For example if `storage.ss_family` is set to `AF_INET` - /// the `storage` must be initialised as `sockaddr_in`, setting the content - /// and length appropriately. - /// - /// # Examples - /// - /// ``` - /// # fn main() -> std::io::Result<()> { - /// # #[cfg(unix)] { - /// use std::io; - /// use std::mem; - /// use std::os::unix::io::AsRawFd; - /// - /// use socket2::{SockAddr, Socket, Domain, Type}; - /// - /// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; - /// - /// // Initialise a `SocketAddr` byte calling `getsockname(2)`. - /// let mut addr_storage: libc::sockaddr_storage = unsafe { mem::zeroed() }; - /// let mut len = mem::size_of_val(&addr_storage) as libc::socklen_t; - /// - /// // The `getsockname(2)` system call will intiliase `storage` for - /// // us, setting `len` to the correct length. - /// let res = unsafe { - /// libc::getsockname( - /// socket.as_raw_fd(), - /// (&mut addr_storage as *mut libc::sockaddr_storage).cast(), - /// &mut len, - /// ) - /// }; - /// if res == -1 { - /// return Err(io::Error::last_os_error()); - /// } - /// - /// let address = unsafe { SockAddr::new(addr_storage, len) }; - /// # drop(address); - /// # } - /// # Ok(()) - /// # } - /// ``` - pub const unsafe fn new(storage: sockaddr_storage, len: socklen_t) -> SockAddr { - SockAddr { storage, len } - } - - /// Initialise a `SockAddr` by calling the function `init`. - /// - /// The type of the address storage and length passed to the function `init` - /// is OS/architecture specific. - /// - /// The address is zeroed before `init` is called and is thus valid to - /// dereference and read from. The length initialised to the maximum length - /// of the storage. - /// - /// # Safety - /// - /// Caller must ensure that the address family and length match the type of - /// storage address. For example if `storage.ss_family` is set to `AF_INET` - /// the `storage` must be initialised as `sockaddr_in`, setting the content - /// and length appropriately. - /// - /// # Examples - /// - /// ``` - /// # fn main() -> std::io::Result<()> { - /// # #[cfg(unix)] { - /// use std::io; - /// use std::os::unix::io::AsRawFd; - /// - /// use socket2::{SockAddr, Socket, Domain, Type}; - /// - /// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; - /// - /// // Initialise a `SocketAddr` byte calling `getsockname(2)`. - /// let (_, address) = unsafe { - /// SockAddr::init(|addr_storage, len| { - /// // The `getsockname(2)` system call will intiliase `storage` for - /// // us, setting `len` to the correct length. - /// if libc::getsockname(socket.as_raw_fd(), addr_storage.cast(), len) == -1 { - /// Err(io::Error::last_os_error()) - /// } else { - /// Ok(()) - /// } - /// }) - /// }?; - /// # drop(address); - /// # } - /// # Ok(()) - /// # } - /// ``` - pub unsafe fn init(init: F) -> io::Result<(T, SockAddr)> - where - F: FnOnce(*mut sockaddr_storage, *mut socklen_t) -> io::Result, - { - const STORAGE_SIZE: socklen_t = size_of::() as socklen_t; - // NOTE: `SockAddr::unix` depends on the storage being zeroed before - // calling `init`. - // NOTE: calling `recvfrom` with an empty buffer also depends on the - // storage being zeroed before calling `init` as the OS might not - // initialise it. - let mut storage = MaybeUninit::::zeroed(); - let mut len = STORAGE_SIZE; - init(storage.as_mut_ptr(), &mut len).map(|res| { - debug_assert!(len <= STORAGE_SIZE, "overflown address storage"); - let addr = SockAddr { - // Safety: zeroed-out `sockaddr_storage` is valid, caller must - // ensure at least `len` bytes are valid. - storage: storage.assume_init(), - len, - }; - (res, addr) - }) - } - - /// Returns this address's family. - pub const fn family(&self) -> sa_family_t { - self.storage.ss_family - } - - /// Returns the size of this address in bytes. - pub const fn len(&self) -> socklen_t { - self.len - } - - /// Returns a raw pointer to the address. - pub const fn as_ptr(&self) -> *const sockaddr { - &self.storage as *const _ as *const _ - } - - /// Returns a raw pointer to the address storage. - #[cfg(all(unix, not(target_os = "redox")))] - pub(crate) const fn as_storage_ptr(&self) -> *const sockaddr_storage { - &self.storage - } - - /// Returns this address as a `SocketAddr` if it is in the `AF_INET` (IPv4) - /// or `AF_INET6` (IPv6) family, otherwise returns `None`. - pub fn as_socket(&self) -> Option { - if self.storage.ss_family == AF_INET as sa_family_t { - // Safety: if the ss_family field is AF_INET then storage must be a sockaddr_in. - let addr = unsafe { &*(&self.storage as *const _ as *const sockaddr_in) }; - - let ip = crate::sys::from_in_addr(addr.sin_addr); - let port = u16::from_be(addr.sin_port); - Some(SocketAddr::V4(SocketAddrV4::new(ip, port))) - } else if self.storage.ss_family == AF_INET6 as sa_family_t { - // Safety: if the ss_family field is AF_INET6 then storage must be a sockaddr_in6. - let addr = unsafe { &*(&self.storage as *const _ as *const sockaddr_in6) }; - - let ip = crate::sys::from_in6_addr(addr.sin6_addr); - let port = u16::from_be(addr.sin6_port); - Some(SocketAddr::V6(SocketAddrV6::new( - ip, - port, - addr.sin6_flowinfo, - #[cfg(unix)] - addr.sin6_scope_id, - #[cfg(windows)] - unsafe { - *addr.u.sin6_scope_id() - }, - ))) - } else { - None - } - } - - /// Returns this address as a [`SocketAddrV4`] if it is in the `AF_INET` - /// family. - pub fn as_socket_ipv4(&self) -> Option { - match self.as_socket() { - Some(SocketAddr::V4(addr)) => Some(addr), - _ => None, - } - } - - /// Returns this address as a [`SocketAddrV6`] if it is in the `AF_INET6` - /// family. - pub fn as_socket_ipv6(&self) -> Option { - match self.as_socket() { - Some(SocketAddr::V6(addr)) => Some(addr), - _ => None, - } - } -} - -impl From for SockAddr { - fn from(addr: SocketAddr) -> SockAddr { - match addr { - SocketAddr::V4(addr) => addr.into(), - SocketAddr::V6(addr) => addr.into(), - } - } -} - -impl From for SockAddr { - fn from(addr: SocketAddrV4) -> SockAddr { - let sockaddr_in = sockaddr_in { - sin_family: AF_INET as sa_family_t, - sin_port: addr.port().to_be(), - sin_addr: crate::sys::to_in_addr(addr.ip()), - sin_zero: Default::default(), - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "haiku", - target_os = "ios", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd" - ))] - sin_len: 0, - }; - let mut storage = MaybeUninit::::zeroed(); - // Safety: A `sockaddr_in` is memory compatible with a `sockaddr_storage` - unsafe { (storage.as_mut_ptr() as *mut sockaddr_in).write(sockaddr_in) }; - SockAddr { - storage: unsafe { storage.assume_init() }, - len: mem::size_of::() as socklen_t, - } - } -} - -impl From for SockAddr { - fn from(addr: SocketAddrV6) -> SockAddr { - #[cfg(windows)] - let u = unsafe { - let mut u = mem::zeroed::(); - *u.sin6_scope_id_mut() = addr.scope_id(); - u - }; - - let sockaddr_in6 = sockaddr_in6 { - sin6_family: AF_INET6 as sa_family_t, - sin6_port: addr.port().to_be(), - sin6_addr: crate::sys::to_in6_addr(addr.ip()), - sin6_flowinfo: addr.flowinfo(), - #[cfg(unix)] - sin6_scope_id: addr.scope_id(), - #[cfg(windows)] - u, - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "haiku", - target_os = "ios", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd" - ))] - sin6_len: 0, - #[cfg(any(target_os = "solaris", target_os = "illumos"))] - __sin6_src_id: 0, - }; - let mut storage = MaybeUninit::::zeroed(); - // Safety: A `sockaddr_in6` is memory compatible with a `sockaddr_storage` - unsafe { (storage.as_mut_ptr() as *mut sockaddr_in6).write(sockaddr_in6) }; - SockAddr { - storage: unsafe { storage.assume_init() }, - len: mem::size_of::() as socklen_t, - } - } -} - -impl fmt::Debug for SockAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut f = fmt.debug_struct("SockAddr"); - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "haiku", - target_os = "hermit", - target_os = "ios", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "vxworks", - target_os = "nto", - ))] - f.field("ss_len", &self.storage.ss_len); - f.field("ss_family", &self.storage.ss_family) - .field("len", &self.len) - .finish() - } -} - -#[test] -fn ipv4() { - use std::net::Ipv4Addr; - let std = SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 9876); - let addr = SockAddr::from(std); - assert_eq!(addr.family(), AF_INET as sa_family_t); - assert_eq!(addr.len(), size_of::() as socklen_t); - assert_eq!(addr.as_socket(), Some(SocketAddr::V4(std))); - assert_eq!(addr.as_socket_ipv4(), Some(std)); - assert!(addr.as_socket_ipv6().is_none()); - - let addr = SockAddr::from(SocketAddr::from(std)); - assert_eq!(addr.family(), AF_INET as sa_family_t); - assert_eq!(addr.len(), size_of::() as socklen_t); - assert_eq!(addr.as_socket(), Some(SocketAddr::V4(std))); - assert_eq!(addr.as_socket_ipv4(), Some(std)); - assert!(addr.as_socket_ipv6().is_none()); -} - -#[test] -fn ipv6() { - use std::net::Ipv6Addr; - let std = SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9876, 11, 12); - let addr = SockAddr::from(std); - assert_eq!(addr.family(), AF_INET6 as sa_family_t); - assert_eq!(addr.len(), size_of::() as socklen_t); - assert_eq!(addr.as_socket(), Some(SocketAddr::V6(std))); - assert!(addr.as_socket_ipv4().is_none()); - assert_eq!(addr.as_socket_ipv6(), Some(std)); - - let addr = SockAddr::from(SocketAddr::from(std)); - assert_eq!(addr.family(), AF_INET6 as sa_family_t); - assert_eq!(addr.len(), size_of::() as socklen_t); - assert_eq!(addr.as_socket(), Some(SocketAddr::V6(std))); - assert!(addr.as_socket_ipv4().is_none()); - assert_eq!(addr.as_socket_ipv6(), Some(std)); -} diff -Nru s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/socket.rs s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/socket.rs --- s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/socket.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/socket.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1938 +0,0 @@ -// Copyright 2015 The Rust Project Developers. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::fmt; -use std::io::{self, Read, Write}; -#[cfg(not(target_os = "redox"))] -use std::io::{IoSlice, IoSliceMut}; -use std::mem::MaybeUninit; -#[cfg(not(target_os = "nto"))] -use std::net::Ipv6Addr; -use std::net::{self, Ipv4Addr, Shutdown}; -#[cfg(unix)] -use std::os::unix::io::{FromRawFd, IntoRawFd}; -#[cfg(windows)] -use std::os::windows::io::{FromRawSocket, IntoRawSocket}; -use std::time::Duration; - -use crate::sys::{self, c_int, getsockopt, setsockopt, Bool}; -use crate::{Domain, Protocol, SockAddr, TcpKeepalive, Type}; -#[cfg(not(target_os = "redox"))] -use crate::{MaybeUninitSlice, RecvFlags}; - -/// Owned wrapper around a system socket. -/// -/// This type simply wraps an instance of a file descriptor (`c_int`) on Unix -/// and an instance of `SOCKET` on Windows. This is the main type exported by -/// this crate and is intended to mirror the raw semantics of sockets on -/// platforms as closely as possible. Almost all methods correspond to -/// precisely one libc or OS API call which is essentially just a "Rustic -/// translation" of what's below. -/// -/// ## Converting to and from other types -/// -/// This type can be freely converted into the network primitives provided by -/// the standard library, such as [`TcpStream`] or [`UdpSocket`], using the -/// [`From`] trait, see the example below. -/// -/// [`TcpStream`]: std::net::TcpStream -/// [`UdpSocket`]: std::net::UdpSocket -/// -/// # Notes -/// -/// Some methods that set options on `Socket` require two system calls to set -/// there options without overwriting previously set options. We do this by -/// first getting the current settings, applying the desired changes and than -/// updating the settings. This means that the operation is **not** atomic. This -/// can lead to a data race when two threads are changing options in parallel. -/// -/// # Examples -/// ```no_run -/// # fn main() -> std::io::Result<()> { -/// use std::net::{SocketAddr, TcpListener}; -/// use socket2::{Socket, Domain, Type}; -/// -/// // create a TCP listener bound to two addresses -/// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; -/// -/// let address: SocketAddr = "[::1]:12345".parse().unwrap(); -/// let address = address.into(); -/// socket.bind(&address)?; -/// socket.bind(&address)?; -/// socket.listen(128)?; -/// -/// let listener: TcpListener = socket.into(); -/// // ... -/// # drop(listener); -/// # Ok(()) } -/// ``` -pub struct Socket { - inner: Inner, -} - -/// Store a `TcpStream` internally to take advantage of its niche optimizations on Unix platforms. -pub(crate) type Inner = std::net::TcpStream; - -impl Socket { - /// # Safety - /// - /// The caller must ensure `raw` is a valid file descriptor/socket. NOTE: - /// this should really be marked `unsafe`, but this being an internal - /// function, often passed as mapping function, it's makes it very - /// inconvenient to mark it as `unsafe`. - pub(crate) fn from_raw(raw: sys::Socket) -> Socket { - Socket { - inner: unsafe { - // SAFETY: the caller must ensure that `raw` is a valid file - // descriptor, but when it isn't it could return I/O errors, or - // potentially close a fd it doesn't own. All of that isn't - // memory unsafe, so it's not desired but never memory unsafe or - // causes UB. - // - // However there is one exception. We use `TcpStream` to - // represent the `Socket` internally (see `Inner` type), - // `TcpStream` has a layout optimisation that doesn't allow for - // negative file descriptors (as those are always invalid). - // Violating this assumption (fd never negative) causes UB, - // something we don't want. So check for that we have this - // `assert!`. - #[cfg(unix)] - assert!(raw >= 0, "tried to create a `Socket` with an invalid fd"); - sys::socket_from_raw(raw) - }, - } - } - - pub(crate) fn as_raw(&self) -> sys::Socket { - sys::socket_as_raw(&self.inner) - } - - pub(crate) fn into_raw(self) -> sys::Socket { - sys::socket_into_raw(self.inner) - } - - /// Creates a new socket and sets common flags. - /// - /// This function corresponds to `socket(2)` on Unix and `WSASocketW` on - /// Windows. - /// - /// On Unix-like systems, the close-on-exec flag is set on the new socket. - /// Additionally, on Apple platforms `SOCK_NOSIGPIPE` is set. On Windows, - /// the socket is made non-inheritable. - /// - /// [`Socket::new_raw`] can be used if you don't want these flags to be set. - pub fn new(domain: Domain, ty: Type, protocol: Option) -> io::Result { - let ty = set_common_type(ty); - Socket::new_raw(domain, ty, protocol).and_then(set_common_flags) - } - - /// Creates a new socket ready to be configured. - /// - /// This function corresponds to `socket(2)` on Unix and `WSASocketW` on - /// Windows and simply creates a new socket, no other configuration is done. - pub fn new_raw(domain: Domain, ty: Type, protocol: Option) -> io::Result { - let protocol = protocol.map(|p| p.0).unwrap_or(0); - sys::socket(domain.0, ty.0, protocol).map(Socket::from_raw) - } - - /// Creates a pair of sockets which are connected to each other. - /// - /// This function corresponds to `socketpair(2)`. - /// - /// This function sets the same flags as in done for [`Socket::new`], - /// [`Socket::pair_raw`] can be used if you don't want to set those flags. - #[cfg(any(doc, all(feature = "all", unix)))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))] - pub fn pair( - domain: Domain, - ty: Type, - protocol: Option, - ) -> io::Result<(Socket, Socket)> { - let ty = set_common_type(ty); - let (a, b) = Socket::pair_raw(domain, ty, protocol)?; - let a = set_common_flags(a)?; - let b = set_common_flags(b)?; - Ok((a, b)) - } - - /// Creates a pair of sockets which are connected to each other. - /// - /// This function corresponds to `socketpair(2)`. - #[cfg(any(doc, all(feature = "all", unix)))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))] - pub fn pair_raw( - domain: Domain, - ty: Type, - protocol: Option, - ) -> io::Result<(Socket, Socket)> { - let protocol = protocol.map(|p| p.0).unwrap_or(0); - sys::socketpair(domain.0, ty.0, protocol) - .map(|[a, b]| (Socket::from_raw(a), Socket::from_raw(b))) - } - - /// Binds this socket to the specified address. - /// - /// This function directly corresponds to the `bind(2)` function on Windows - /// and Unix. - pub fn bind(&self, address: &SockAddr) -> io::Result<()> { - sys::bind(self.as_raw(), address) - } - - /// Initiate a connection on this socket to the specified address. - /// - /// This function directly corresponds to the `connect(2)` function on - /// Windows and Unix. - /// - /// An error will be returned if `listen` or `connect` has already been - /// called on this builder. - /// - /// # Notes - /// - /// When using a non-blocking connect (by setting the socket into - /// non-blocking mode before calling this function), socket option can't be - /// set *while connecting*. This will cause errors on Windows. Socket - /// options can be safely set before and after connecting the socket. - pub fn connect(&self, address: &SockAddr) -> io::Result<()> { - sys::connect(self.as_raw(), address) - } - - /// Initiate a connection on this socket to the specified address, only - /// only waiting for a certain period of time for the connection to be - /// established. - /// - /// Unlike many other methods on `Socket`, this does *not* correspond to a - /// single C function. It sets the socket to nonblocking mode, connects via - /// connect(2), and then waits for the connection to complete with poll(2) - /// on Unix and select on Windows. When the connection is complete, the - /// socket is set back to blocking mode. On Unix, this will loop over - /// `EINTR` errors. - /// - /// # Warnings - /// - /// The non-blocking state of the socket is overridden by this function - - /// it will be returned in blocking mode on success, and in an indeterminate - /// state on failure. - /// - /// If the connection request times out, it may still be processing in the - /// background - a second call to `connect` or `connect_timeout` may fail. - pub fn connect_timeout(&self, addr: &SockAddr, timeout: Duration) -> io::Result<()> { - self.set_nonblocking(true)?; - let res = self.connect(addr); - self.set_nonblocking(false)?; - - match res { - Ok(()) => return Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} - #[cfg(unix)] - Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {} - Err(e) => return Err(e), - } - - sys::poll_connect(self, timeout) - } - - /// Mark a socket as ready to accept incoming connection requests using - /// [`Socket::accept()`]. - /// - /// This function directly corresponds to the `listen(2)` function on - /// Windows and Unix. - /// - /// An error will be returned if `listen` or `connect` has already been - /// called on this builder. - pub fn listen(&self, backlog: c_int) -> io::Result<()> { - sys::listen(self.as_raw(), backlog) - } - - /// Accept a new incoming connection from this listener. - /// - /// This function uses `accept4(2)` on platforms that support it and - /// `accept(2)` platforms that do not. - /// - /// This function sets the same flags as in done for [`Socket::new`], - /// [`Socket::accept_raw`] can be used if you don't want to set those flags. - pub fn accept(&self) -> io::Result<(Socket, SockAddr)> { - // Use `accept4` on platforms that support it. - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd", - ))] - return self._accept4(libc::SOCK_CLOEXEC); - - // Fall back to `accept` on platforms that do not support `accept4`. - #[cfg(not(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd", - )))] - { - let (socket, addr) = self.accept_raw()?; - let socket = set_common_flags(socket)?; - // `set_common_flags` does not disable inheritance on Windows because `Socket::new` - // unlike `accept` is able to create the socket with inheritance disabled. - #[cfg(windows)] - socket._set_no_inherit(true)?; - Ok((socket, addr)) - } - } - - /// Accept a new incoming connection from this listener. - /// - /// This function directly corresponds to the `accept(2)` function on - /// Windows and Unix. - pub fn accept_raw(&self) -> io::Result<(Socket, SockAddr)> { - sys::accept(self.as_raw()).map(|(inner, addr)| (Socket::from_raw(inner), addr)) - } - - /// Returns the socket address of the local half of this socket. - /// - /// # Notes - /// - /// Depending on the OS this may return an error if the socket is not - /// [bound]. - /// - /// [bound]: Socket::bind - pub fn local_addr(&self) -> io::Result { - sys::getsockname(self.as_raw()) - } - - /// Returns the socket address of the remote peer of this socket. - /// - /// # Notes - /// - /// This returns an error if the socket is not [`connect`ed]. - /// - /// [`connect`ed]: Socket::connect - pub fn peer_addr(&self) -> io::Result { - sys::getpeername(self.as_raw()) - } - - /// Returns the [`Type`] of this socket by checking the `SO_TYPE` option on - /// this socket. - pub fn r#type(&self) -> io::Result { - unsafe { getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_TYPE).map(Type) } - } - - /// Creates a new independently owned handle to the underlying socket. - /// - /// # Notes - /// - /// On Unix this uses `F_DUPFD_CLOEXEC` and thus sets the `FD_CLOEXEC` on - /// the returned socket. - /// - /// On Windows this uses `WSA_FLAG_NO_HANDLE_INHERIT` setting inheriting to - /// false. - /// - /// On Windows this can **not** be used function cannot be used on a - /// QOS-enabled socket, see - /// . - pub fn try_clone(&self) -> io::Result { - sys::try_clone(self.as_raw()).map(Socket::from_raw) - } - - /// Moves this TCP stream into or out of nonblocking mode. - /// - /// # Notes - /// - /// On Unix this corresponds to calling `fcntl` (un)setting `O_NONBLOCK`. - /// - /// On Windows this corresponds to calling `ioctlsocket` (un)setting - /// `FIONBIO`. - pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { - sys::set_nonblocking(self.as_raw(), nonblocking) - } - - /// Shuts down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O on the specified - /// portions to return immediately with an appropriate value. - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - sys::shutdown(self.as_raw(), how) - } - - /// Receives data on the socket from the remote address to which it is - /// connected. - /// - /// The [`connect`] method will connect this socket to a remote address. - /// This method might fail if the socket is not connected. - /// - /// [`connect`]: Socket::connect - /// - /// # Safety - /// - /// Normally casting a `&mut [u8]` to `&mut [MaybeUninit]` would be - /// unsound, as that allows us to write uninitialised bytes to the buffer. - /// However this implementation promises to not write uninitialised bytes to - /// the `buf`fer and passes it directly to `recv(2)` system call. This - /// promise ensures that this function can be called using a `buf`fer of - /// type `&mut [u8]`. - /// - /// Note that the [`io::Read::read`] implementation calls this function with - /// a `buf`fer of type `&mut [u8]`, allowing initialised buffers to be used - /// without using `unsafe`. - pub fn recv(&self, buf: &mut [MaybeUninit]) -> io::Result { - self.recv_with_flags(buf, 0) - } - - /// Receives out-of-band (OOB) data on the socket from the remote address to - /// which it is connected by setting the `MSG_OOB` flag for this call. - /// - /// For more information, see [`recv`], [`out_of_band_inline`]. - /// - /// [`recv`]: Socket::recv - /// [`out_of_band_inline`]: Socket::out_of_band_inline - pub fn recv_out_of_band(&self, buf: &mut [MaybeUninit]) -> io::Result { - self.recv_with_flags(buf, sys::MSG_OOB) - } - - /// Identical to [`recv`] but allows for specification of arbitrary flags to - /// the underlying `recv` call. - /// - /// [`recv`]: Socket::recv - pub fn recv_with_flags( - &self, - buf: &mut [MaybeUninit], - flags: sys::c_int, - ) -> io::Result { - sys::recv(self.as_raw(), buf, flags) - } - - /// Receives data on the socket from the remote address to which it is - /// connected. Unlike [`recv`] this allows passing multiple buffers. - /// - /// The [`connect`] method will connect this socket to a remote address. - /// This method might fail if the socket is not connected. - /// - /// In addition to the number of bytes read, this function returns the flags - /// for the received message. See [`RecvFlags`] for more information about - /// the returned flags. - /// - /// [`recv`]: Socket::recv - /// [`connect`]: Socket::connect - /// - /// # Safety - /// - /// Normally casting a `IoSliceMut` to `MaybeUninitSlice` would be unsound, - /// as that allows us to write uninitialised bytes to the buffer. However - /// this implementation promises to not write uninitialised bytes to the - /// `bufs` and passes it directly to `recvmsg(2)` system call. This promise - /// ensures that this function can be called using `bufs` of type `&mut - /// [IoSliceMut]`. - /// - /// Note that the [`io::Read::read_vectored`] implementation calls this - /// function with `buf`s of type `&mut [IoSliceMut]`, allowing initialised - /// buffers to be used without using `unsafe`. - #[cfg(not(target_os = "redox"))] - #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] - pub fn recv_vectored( - &self, - bufs: &mut [MaybeUninitSlice<'_>], - ) -> io::Result<(usize, RecvFlags)> { - self.recv_vectored_with_flags(bufs, 0) - } - - /// Identical to [`recv_vectored`] but allows for specification of arbitrary - /// flags to the underlying `recvmsg`/`WSARecv` call. - /// - /// [`recv_vectored`]: Socket::recv_vectored - /// - /// # Safety - /// - /// `recv_from_vectored` makes the same safety guarantees regarding `bufs` - /// as [`recv_vectored`]. - /// - /// [`recv_vectored`]: Socket::recv_vectored - #[cfg(not(target_os = "redox"))] - #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] - pub fn recv_vectored_with_flags( - &self, - bufs: &mut [MaybeUninitSlice<'_>], - flags: c_int, - ) -> io::Result<(usize, RecvFlags)> { - sys::recv_vectored(self.as_raw(), bufs, flags) - } - - /// Receives data on the socket from the remote adress to which it is - /// connected, without removing that data from the queue. On success, - /// returns the number of bytes peeked. - /// - /// Successive calls return the same data. This is accomplished by passing - /// `MSG_PEEK` as a flag to the underlying `recv` system call. - /// - /// # Safety - /// - /// `peek` makes the same safety guarantees regarding the `buf`fer as - /// [`recv`]. - /// - /// [`recv`]: Socket::recv - pub fn peek(&self, buf: &mut [MaybeUninit]) -> io::Result { - self.recv_with_flags(buf, sys::MSG_PEEK) - } - - /// Receives data from the socket. On success, returns the number of bytes - /// read and the address from whence the data came. - /// - /// # Safety - /// - /// `recv_from` makes the same safety guarantees regarding the `buf`fer as - /// [`recv`]. - /// - /// [`recv`]: Socket::recv - pub fn recv_from(&self, buf: &mut [MaybeUninit]) -> io::Result<(usize, SockAddr)> { - self.recv_from_with_flags(buf, 0) - } - - /// Identical to [`recv_from`] but allows for specification of arbitrary - /// flags to the underlying `recvfrom` call. - /// - /// [`recv_from`]: Socket::recv_from - pub fn recv_from_with_flags( - &self, - buf: &mut [MaybeUninit], - flags: c_int, - ) -> io::Result<(usize, SockAddr)> { - sys::recv_from(self.as_raw(), buf, flags) - } - - /// Receives data from the socket. Returns the amount of bytes read, the - /// [`RecvFlags`] and the remote address from the data is coming. Unlike - /// [`recv_from`] this allows passing multiple buffers. - /// - /// [`recv_from`]: Socket::recv_from - /// - /// # Safety - /// - /// `recv_from_vectored` makes the same safety guarantees regarding `bufs` - /// as [`recv_vectored`]. - /// - /// [`recv_vectored`]: Socket::recv_vectored - #[cfg(not(target_os = "redox"))] - #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] - pub fn recv_from_vectored( - &self, - bufs: &mut [MaybeUninitSlice<'_>], - ) -> io::Result<(usize, RecvFlags, SockAddr)> { - self.recv_from_vectored_with_flags(bufs, 0) - } - - /// Identical to [`recv_from_vectored`] but allows for specification of - /// arbitrary flags to the underlying `recvmsg`/`WSARecvFrom` call. - /// - /// [`recv_from_vectored`]: Socket::recv_from_vectored - /// - /// # Safety - /// - /// `recv_from_vectored` makes the same safety guarantees regarding `bufs` - /// as [`recv_vectored`]. - /// - /// [`recv_vectored`]: Socket::recv_vectored - #[cfg(not(target_os = "redox"))] - #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] - pub fn recv_from_vectored_with_flags( - &self, - bufs: &mut [MaybeUninitSlice<'_>], - flags: c_int, - ) -> io::Result<(usize, RecvFlags, SockAddr)> { - sys::recv_from_vectored(self.as_raw(), bufs, flags) - } - - /// Receives data from the socket, without removing it from the queue. - /// - /// Successive calls return the same data. This is accomplished by passing - /// `MSG_PEEK` as a flag to the underlying `recvfrom` system call. - /// - /// On success, returns the number of bytes peeked and the address from - /// whence the data came. - /// - /// # Safety - /// - /// `peek_from` makes the same safety guarantees regarding the `buf`fer as - /// [`recv`]. - /// - /// # Note: Datagram Sockets - /// For datagram sockets, the behavior of this method when `buf` is smaller than - /// the datagram at the head of the receive queue differs between Windows and - /// Unix-like platforms (Linux, macOS, BSDs, etc: colloquially termed "*nix"). - /// - /// On *nix platforms, the datagram is truncated to the length of `buf`. - /// - /// On Windows, an error corresponding to `WSAEMSGSIZE` will be returned. - /// - /// For consistency between platforms, be sure to provide a sufficiently large buffer to avoid - /// truncation; the exact size required depends on the underlying protocol. - /// - /// If you just want to know the sender of the data, try [`peek_sender`]. - /// - /// [`recv`]: Socket::recv - /// [`peek_sender`]: Socket::peek_sender - pub fn peek_from(&self, buf: &mut [MaybeUninit]) -> io::Result<(usize, SockAddr)> { - self.recv_from_with_flags(buf, sys::MSG_PEEK) - } - - /// Retrieve the sender for the data at the head of the receive queue. - /// - /// This is equivalent to calling [`peek_from`] with a zero-sized buffer, - /// but suppresses the `WSAEMSGSIZE` error on Windows. - /// - /// [`peek_from`]: Socket::peek_from - pub fn peek_sender(&self) -> io::Result { - sys::peek_sender(self.as_raw()) - } - - /// Sends data on the socket to a connected peer. - /// - /// This is typically used on TCP sockets or datagram sockets which have - /// been connected. - /// - /// On success returns the number of bytes that were sent. - pub fn send(&self, buf: &[u8]) -> io::Result { - self.send_with_flags(buf, 0) - } - - /// Identical to [`send`] but allows for specification of arbitrary flags to the underlying - /// `send` call. - /// - /// [`send`]: #method.send - pub fn send_with_flags(&self, buf: &[u8], flags: c_int) -> io::Result { - sys::send(self.as_raw(), buf, flags) - } - - /// Send data to the connected peer. Returns the amount of bytes written. - #[cfg(not(target_os = "redox"))] - #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] - pub fn send_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result { - self.send_vectored_with_flags(bufs, 0) - } - - /// Identical to [`send_vectored`] but allows for specification of arbitrary - /// flags to the underlying `sendmsg`/`WSASend` call. - /// - /// [`send_vectored`]: Socket::send_vectored - #[cfg(not(target_os = "redox"))] - #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] - pub fn send_vectored_with_flags( - &self, - bufs: &[IoSlice<'_>], - flags: c_int, - ) -> io::Result { - sys::send_vectored(self.as_raw(), bufs, flags) - } - - /// Sends out-of-band (OOB) data on the socket to connected peer - /// by setting the `MSG_OOB` flag for this call. - /// - /// For more information, see [`send`], [`out_of_band_inline`]. - /// - /// [`send`]: #method.send - /// [`out_of_band_inline`]: #method.out_of_band_inline - pub fn send_out_of_band(&self, buf: &[u8]) -> io::Result { - self.send_with_flags(buf, sys::MSG_OOB) - } - - /// Sends data on the socket to the given address. On success, returns the - /// number of bytes written. - /// - /// This is typically used on UDP or datagram-oriented sockets. - pub fn send_to(&self, buf: &[u8], addr: &SockAddr) -> io::Result { - self.send_to_with_flags(buf, addr, 0) - } - - /// Identical to [`send_to`] but allows for specification of arbitrary flags - /// to the underlying `sendto` call. - /// - /// [`send_to`]: Socket::send_to - pub fn send_to_with_flags( - &self, - buf: &[u8], - addr: &SockAddr, - flags: c_int, - ) -> io::Result { - sys::send_to(self.as_raw(), buf, addr, flags) - } - - /// Send data to a peer listening on `addr`. Returns the amount of bytes - /// written. - #[cfg(not(target_os = "redox"))] - #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] - pub fn send_to_vectored(&self, bufs: &[IoSlice<'_>], addr: &SockAddr) -> io::Result { - self.send_to_vectored_with_flags(bufs, addr, 0) - } - - /// Identical to [`send_to_vectored`] but allows for specification of - /// arbitrary flags to the underlying `sendmsg`/`WSASendTo` call. - /// - /// [`send_to_vectored`]: Socket::send_to_vectored - #[cfg(not(target_os = "redox"))] - #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] - pub fn send_to_vectored_with_flags( - &self, - bufs: &[IoSlice<'_>], - addr: &SockAddr, - flags: c_int, - ) -> io::Result { - sys::send_to_vectored(self.as_raw(), bufs, addr, flags) - } -} - -/// Set `SOCK_CLOEXEC` and `NO_HANDLE_INHERIT` on the `ty`pe on platforms that -/// support it. -#[inline(always)] -fn set_common_type(ty: Type) -> Type { - // On platforms that support it set `SOCK_CLOEXEC`. - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd", - ))] - let ty = ty._cloexec(); - - // On windows set `NO_HANDLE_INHERIT`. - #[cfg(windows)] - let ty = ty._no_inherit(); - - ty -} - -/// Set `FD_CLOEXEC` and `NOSIGPIPE` on the `socket` for platforms that need it. -#[inline(always)] -#[allow(clippy::unnecessary_wraps)] -fn set_common_flags(socket: Socket) -> io::Result { - // On platforms that don't have `SOCK_CLOEXEC` use `FD_CLOEXEC`. - #[cfg(all( - unix, - not(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd", - )) - ))] - socket._set_cloexec(true)?; - - // On Apple platforms set `NOSIGPIPE`. - #[cfg(target_vendor = "apple")] - socket._set_nosigpipe(true)?; - - Ok(socket) -} - -/// A local interface specified by its index or an address assigned to it. -/// -/// `Index(0)` and `Address(Ipv4Addr::UNSPECIFIED)` are equivalent and indicate -/// that an appropriate interface should be selected by the system. -#[cfg(not(any( - target_os = "haiku", - target_os = "illumos", - target_os = "netbsd", - target_os = "redox", - target_os = "solaris", -)))] -#[derive(Debug)] -pub enum InterfaceIndexOrAddress { - /// An interface index. - Index(u32), - /// An address assigned to an interface. - Address(Ipv4Addr), -} - -/// Socket options get/set using `SOL_SOCKET`. -/// -/// Additional documentation can be found in documentation of the OS. -/// * Linux: -/// * Windows: -impl Socket { - /// Get the value of the `SO_BROADCAST` option for this socket. - /// - /// For more information about this option, see [`set_broadcast`]. - /// - /// [`set_broadcast`]: Socket::set_broadcast - pub fn broadcast(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_BROADCAST) - .map(|broadcast| broadcast != 0) - } - } - - /// Set the value of the `SO_BROADCAST` option for this socket. - /// - /// When enabled, this socket is allowed to send packets to a broadcast - /// address. - pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::SOL_SOCKET, - sys::SO_BROADCAST, - broadcast as c_int, - ) - } - } - - /// Get the value of the `SO_ERROR` option on this socket. - /// - /// This will retrieve the stored error in the underlying socket, clearing - /// the field in the process. This can be useful for checking errors between - /// calls. - pub fn take_error(&self) -> io::Result> { - match unsafe { getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_ERROR) } { - Ok(0) => Ok(None), - Ok(errno) => Ok(Some(io::Error::from_raw_os_error(errno))), - Err(err) => Err(err), - } - } - - /// Get the value of the `SO_KEEPALIVE` option on this socket. - /// - /// For more information about this option, see [`set_keepalive`]. - /// - /// [`set_keepalive`]: Socket::set_keepalive - pub fn keepalive(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_KEEPALIVE) - .map(|keepalive| keepalive != 0) - } - } - - /// Set value for the `SO_KEEPALIVE` option on this socket. - /// - /// Enable sending of keep-alive messages on connection-oriented sockets. - pub fn set_keepalive(&self, keepalive: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::SOL_SOCKET, - sys::SO_KEEPALIVE, - keepalive as c_int, - ) - } - } - - /// Get the value of the `SO_LINGER` option on this socket. - /// - /// For more information about this option, see [`set_linger`]. - /// - /// [`set_linger`]: Socket::set_linger - pub fn linger(&self) -> io::Result> { - unsafe { - getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_LINGER) - .map(from_linger) - } - } - - /// Set value for the `SO_LINGER` option on this socket. - /// - /// If `linger` is not `None`, a close(2) or shutdown(2) will not return - /// until all queued messages for the socket have been successfully sent or - /// the linger timeout has been reached. Otherwise, the call returns - /// immediately and the closing is done in the background. When the socket - /// is closed as part of exit(2), it always lingers in the background. - /// - /// # Notes - /// - /// On most OSs the duration only has a precision of seconds and will be - /// silently truncated. - /// - /// On Apple platforms (e.g. macOS, iOS, etc) this uses `SO_LINGER_SEC`. - pub fn set_linger(&self, linger: Option) -> io::Result<()> { - let linger = into_linger(linger); - unsafe { setsockopt(self.as_raw(), sys::SOL_SOCKET, sys::SO_LINGER, linger) } - } - - /// Get value for the `SO_OOBINLINE` option on this socket. - /// - /// For more information about this option, see [`set_out_of_band_inline`]. - /// - /// [`set_out_of_band_inline`]: Socket::set_out_of_band_inline - #[cfg(not(target_os = "redox"))] - #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] - pub fn out_of_band_inline(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_OOBINLINE) - .map(|oob_inline| oob_inline != 0) - } - } - - /// Set value for the `SO_OOBINLINE` option on this socket. - /// - /// If this option is enabled, out-of-band data is directly placed into the - /// receive data stream. Otherwise, out-of-band data is passed only when the - /// `MSG_OOB` flag is set during receiving. As per RFC6093, TCP sockets - /// using the Urgent mechanism are encouraged to set this flag. - #[cfg(not(target_os = "redox"))] - #[cfg_attr(docsrs, doc(cfg(not(target_os = "redox"))))] - pub fn set_out_of_band_inline(&self, oob_inline: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::SOL_SOCKET, - sys::SO_OOBINLINE, - oob_inline as c_int, - ) - } - } - - /// Get value for the `SO_RCVBUF` option on this socket. - /// - /// For more information about this option, see [`set_recv_buffer_size`]. - /// - /// [`set_recv_buffer_size`]: Socket::set_recv_buffer_size - pub fn recv_buffer_size(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_RCVBUF) - .map(|size| size as usize) - } - } - - /// Set value for the `SO_RCVBUF` option on this socket. - /// - /// Changes the size of the operating system's receive buffer associated - /// with the socket. - pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::SOL_SOCKET, - sys::SO_RCVBUF, - size as c_int, - ) - } - } - - /// Get value for the `SO_RCVTIMEO` option on this socket. - /// - /// If the returned timeout is `None`, then `read` and `recv` calls will - /// block indefinitely. - pub fn read_timeout(&self) -> io::Result> { - sys::timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_RCVTIMEO) - } - - /// Set value for the `SO_RCVTIMEO` option on this socket. - /// - /// If `timeout` is `None`, then `read` and `recv` calls will block - /// indefinitely. - pub fn set_read_timeout(&self, duration: Option) -> io::Result<()> { - sys::set_timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_RCVTIMEO, duration) - } - - /// Get the value of the `SO_REUSEADDR` option on this socket. - /// - /// For more information about this option, see [`set_reuse_address`]. - /// - /// [`set_reuse_address`]: Socket::set_reuse_address - pub fn reuse_address(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_REUSEADDR) - .map(|reuse| reuse != 0) - } - } - - /// Set value for the `SO_REUSEADDR` option on this socket. - /// - /// This indicates that futher calls to `bind` may allow reuse of local - /// addresses. For IPv4 sockets this means that a socket may bind even when - /// there's a socket already listening on this port. - pub fn set_reuse_address(&self, reuse: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::SOL_SOCKET, - sys::SO_REUSEADDR, - reuse as c_int, - ) - } - } - - /// Get the value of the `SO_SNDBUF` option on this socket. - /// - /// For more information about this option, see [`set_send_buffer_size`]. - /// - /// [`set_send_buffer_size`]: Socket::set_send_buffer_size - pub fn send_buffer_size(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::SOL_SOCKET, sys::SO_SNDBUF) - .map(|size| size as usize) - } - } - - /// Set value for the `SO_SNDBUF` option on this socket. - /// - /// Changes the size of the operating system's send buffer associated with - /// the socket. - pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::SOL_SOCKET, - sys::SO_SNDBUF, - size as c_int, - ) - } - } - - /// Get value for the `SO_SNDTIMEO` option on this socket. - /// - /// If the returned timeout is `None`, then `write` and `send` calls will - /// block indefinitely. - pub fn write_timeout(&self) -> io::Result> { - sys::timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_SNDTIMEO) - } - - /// Set value for the `SO_SNDTIMEO` option on this socket. - /// - /// If `timeout` is `None`, then `write` and `send` calls will block - /// indefinitely. - pub fn set_write_timeout(&self, duration: Option) -> io::Result<()> { - sys::set_timeout_opt(self.as_raw(), sys::SOL_SOCKET, sys::SO_SNDTIMEO, duration) - } -} - -fn from_linger(linger: sys::linger) -> Option { - if linger.l_onoff == 0 { - None - } else { - Some(Duration::from_secs(linger.l_linger as u64)) - } -} - -fn into_linger(duration: Option) -> sys::linger { - match duration { - Some(duration) => sys::linger { - l_onoff: 1, - l_linger: duration.as_secs() as _, - }, - None => sys::linger { - l_onoff: 0, - l_linger: 0, - }, - } -} - -/// Socket options for IPv4 sockets, get/set using `IPPROTO_IP`. -/// -/// Additional documentation can be found in documentation of the OS. -/// * Linux: -/// * Windows: -impl Socket { - /// Get the value of the `IP_HDRINCL` option on this socket. - /// - /// For more information about this option, see [`set_header_included`]. - /// - /// [`set_header_included`]: Socket::set_header_included - #[cfg(all(feature = "all", not(target_os = "redox")))] - #[cfg_attr(docsrs, doc(all(feature = "all", not(target_os = "redox"))))] - pub fn header_included(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_HDRINCL) - .map(|included| included != 0) - } - } - - /// Set the value of the `IP_HDRINCL` option on this socket. - /// - /// If enabled, the user supplies an IP header in front of the user data. - /// Valid only for [`SOCK_RAW`] sockets; see [raw(7)] for more information. - /// When this flag is enabled, the values set by `IP_OPTIONS`, [`IP_TTL`], - /// and [`IP_TOS`] are ignored. - /// - /// [`SOCK_RAW`]: Type::RAW - /// [raw(7)]: https://man7.org/linux/man-pages/man7/raw.7.html - /// [`IP_TTL`]: Socket::set_ttl - /// [`IP_TOS`]: Socket::set_tos - #[cfg(all(feature = "all", not(target_os = "redox")))] - #[cfg_attr(docsrs, doc(all(feature = "all", not(target_os = "redox"))))] - pub fn set_header_included(&self, included: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IP, - sys::IP_HDRINCL, - included as c_int, - ) - } - } - - /// Get the value of the `IP_TRANSPARENT` option on this socket. - /// - /// For more information about this option, see [`set_ip_transparent`]. - /// - /// [`set_ip_transparent`]: Socket::set_ip_transparent - #[cfg(any(doc, all(feature = "all", target_os = "linux")))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn ip_transparent(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IP, libc::IP_TRANSPARENT) - .map(|transparent| transparent != 0) - } - } - - /// Set the value of the `IP_TRANSPARENT` option on this socket. - /// - /// Setting this boolean option enables transparent proxying - /// on this socket. This socket option allows the calling - /// application to bind to a nonlocal IP address and operate - /// both as a client and a server with the foreign address as - /// the local endpoint. NOTE: this requires that routing be - /// set up in a way that packets going to the foreign address - /// are routed through the TProxy box (i.e., the system - /// hosting the application that employs the IP_TRANSPARENT - /// socket option). Enabling this socket option requires - /// superuser privileges (the `CAP_NET_ADMIN` capability). - /// - /// TProxy redirection with the iptables TPROXY target also - /// requires that this option be set on the redirected socket. - #[cfg(any(doc, all(feature = "all", target_os = "linux")))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn set_ip_transparent(&self, transparent: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IP, - libc::IP_TRANSPARENT, - transparent as c_int, - ) - } - } - - /// Join a multicast group using `IP_ADD_MEMBERSHIP` option on this socket. - /// - /// This function specifies a new multicast group for this socket to join. - /// The address must be a valid multicast address, and `interface` is the - /// address of the local interface with which the system should join the - /// multicast group. If it's [`Ipv4Addr::UNSPECIFIED`] (`INADDR_ANY`) then - /// an appropriate interface is chosen by the system. - pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { - let mreq = sys::IpMreq { - imr_multiaddr: sys::to_in_addr(multiaddr), - imr_interface: sys::to_in_addr(interface), - }; - unsafe { setsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_ADD_MEMBERSHIP, mreq) } - } - - /// Leave a multicast group using `IP_DROP_MEMBERSHIP` option on this socket. - /// - /// For more information about this option, see [`join_multicast_v4`]. - /// - /// [`join_multicast_v4`]: Socket::join_multicast_v4 - pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> { - let mreq = sys::IpMreq { - imr_multiaddr: sys::to_in_addr(multiaddr), - imr_interface: sys::to_in_addr(interface), - }; - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IP, - sys::IP_DROP_MEMBERSHIP, - mreq, - ) - } - } - - /// Join a multicast group using `IP_ADD_MEMBERSHIP` option on this socket. - /// - /// This function specifies a new multicast group for this socket to join. - /// The address must be a valid multicast address, and `interface` specifies - /// the local interface with which the system should join the multicast - /// group. See [`InterfaceIndexOrAddress`]. - #[cfg(not(any( - target_os = "haiku", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "nto", - )))] - pub fn join_multicast_v4_n( - &self, - multiaddr: &Ipv4Addr, - interface: &InterfaceIndexOrAddress, - ) -> io::Result<()> { - let mreqn = sys::to_mreqn(multiaddr, interface); - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IP, - sys::IP_ADD_MEMBERSHIP, - mreqn, - ) - } - } - - /// Leave a multicast group using `IP_DROP_MEMBERSHIP` option on this socket. - /// - /// For more information about this option, see [`join_multicast_v4_n`]. - /// - /// [`join_multicast_v4_n`]: Socket::join_multicast_v4_n - #[cfg(not(any( - target_os = "haiku", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "nto", - )))] - pub fn leave_multicast_v4_n( - &self, - multiaddr: &Ipv4Addr, - interface: &InterfaceIndexOrAddress, - ) -> io::Result<()> { - let mreqn = sys::to_mreqn(multiaddr, interface); - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IP, - sys::IP_DROP_MEMBERSHIP, - mreqn, - ) - } - } - - /// Join a multicast SSM channel using `IP_ADD_SOURCE_MEMBERSHIP` option on this socket. - /// - /// This function specifies a new multicast channel for this socket to join. - /// The group must be a valid SSM group address, the source must be the address of the sender - /// and `interface` is the address of the local interface with which the system should join the - /// multicast group. If it's [`Ipv4Addr::UNSPECIFIED`] (`INADDR_ANY`) then - /// an appropriate interface is chosen by the system. - #[cfg(not(any( - target_os = "dragonfly", - target_os = "haiku", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "fuchsia", - target_os = "nto", - )))] - pub fn join_ssm_v4( - &self, - source: &Ipv4Addr, - group: &Ipv4Addr, - interface: &Ipv4Addr, - ) -> io::Result<()> { - let mreqs = sys::IpMreqSource { - imr_multiaddr: sys::to_in_addr(group), - imr_interface: sys::to_in_addr(interface), - imr_sourceaddr: sys::to_in_addr(source), - }; - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IP, - sys::IP_ADD_SOURCE_MEMBERSHIP, - mreqs, - ) - } - } - - /// Leave a multicast group using `IP_DROP_SOURCE_MEMBERSHIP` option on this socket. - /// - /// For more information about this option, see [`join_ssm_v4`]. - /// - /// [`join_ssm_v4`]: Socket::join_ssm_v4 - #[cfg(not(any( - target_os = "dragonfly", - target_os = "haiku", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "fuchsia", - target_os = "nto", - )))] - pub fn leave_ssm_v4( - &self, - source: &Ipv4Addr, - group: &Ipv4Addr, - interface: &Ipv4Addr, - ) -> io::Result<()> { - let mreqs = sys::IpMreqSource { - imr_multiaddr: sys::to_in_addr(group), - imr_interface: sys::to_in_addr(interface), - imr_sourceaddr: sys::to_in_addr(source), - }; - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IP, - sys::IP_DROP_SOURCE_MEMBERSHIP, - mreqs, - ) - } - } - - /// Get the value of the `IP_MULTICAST_IF` option for this socket. - /// - /// For more information about this option, see [`set_multicast_if_v4`]. - /// - /// [`set_multicast_if_v4`]: Socket::set_multicast_if_v4 - pub fn multicast_if_v4(&self) -> io::Result { - unsafe { - getsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_MULTICAST_IF).map(sys::from_in_addr) - } - } - - /// Set the value of the `IP_MULTICAST_IF` option for this socket. - /// - /// Specifies the interface to use for routing multicast packets. - pub fn set_multicast_if_v4(&self, interface: &Ipv4Addr) -> io::Result<()> { - let interface = sys::to_in_addr(interface); - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IP, - sys::IP_MULTICAST_IF, - interface, - ) - } - } - - /// Get the value of the `IP_MULTICAST_LOOP` option for this socket. - /// - /// For more information about this option, see [`set_multicast_loop_v4`]. - /// - /// [`set_multicast_loop_v4`]: Socket::set_multicast_loop_v4 - pub fn multicast_loop_v4(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_MULTICAST_LOOP) - .map(|loop_v4| loop_v4 != 0) - } - } - - /// Set the value of the `IP_MULTICAST_LOOP` option for this socket. - /// - /// If enabled, multicast packets will be looped back to the local socket. - /// Note that this may not have any affect on IPv6 sockets. - pub fn set_multicast_loop_v4(&self, loop_v4: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IP, - sys::IP_MULTICAST_LOOP, - loop_v4 as c_int, - ) - } - } - - /// Get the value of the `IP_MULTICAST_TTL` option for this socket. - /// - /// For more information about this option, see [`set_multicast_ttl_v4`]. - /// - /// [`set_multicast_ttl_v4`]: Socket::set_multicast_ttl_v4 - pub fn multicast_ttl_v4(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_MULTICAST_TTL) - .map(|ttl| ttl as u32) - } - } - - /// Set the value of the `IP_MULTICAST_TTL` option for this socket. - /// - /// Indicates the time-to-live value of outgoing multicast packets for - /// this socket. The default value is 1 which means that multicast packets - /// don't leave the local network unless explicitly requested. - /// - /// Note that this may not have any affect on IPv6 sockets. - pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IP, - sys::IP_MULTICAST_TTL, - ttl as c_int, - ) - } - } - - /// Get the value of the `IP_TTL` option for this socket. - /// - /// For more information about this option, see [`set_ttl`]. - /// - /// [`set_ttl`]: Socket::set_ttl - pub fn ttl(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_TTL).map(|ttl| ttl as u32) - } - } - - /// Set the value of the `IP_TTL` option for this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - unsafe { setsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_TTL, ttl as c_int) } - } - - /// Set the value of the `IP_TOS` option for this socket. - /// - /// This value sets the type-of-service field that is used in every packet - /// sent from this socket. - /// - /// NOTE: - /// documents that not all versions of windows support `IP_TOS`. - #[cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))] - pub fn set_tos(&self, tos: u32) -> io::Result<()> { - unsafe { setsockopt(self.as_raw(), sys::IPPROTO_IP, sys::IP_TOS, tos as c_int) } - } - - /// Get the value of the `IP_TOS` option for this socket. - /// - /// For more information about this option, see [`set_tos`]. - /// - /// NOTE: - /// documents that not all versions of windows support `IP_TOS`. - /// - /// [`set_tos`]: Socket::set_tos - #[cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))] - pub fn tos(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_TOS).map(|tos| tos as u32) - } - } - - /// Set the value of the `IP_RECVTOS` option for this socket. - /// - /// If enabled, the IP_TOS ancillary message is passed with - /// incoming packets. It contains a byte which specifies the - /// Type of Service/Precedence field of the packet header. - #[cfg(not(any( - target_os = "dragonfly", - target_os = "fuchsia", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "windows", - target_os = "nto", - )))] - pub fn set_recv_tos(&self, recv_tos: bool) -> io::Result<()> { - let recv_tos = if recv_tos { 1 } else { 0 }; - - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IP, - sys::IP_RECVTOS, - recv_tos as c_int, - ) - } - } - - /// Get the value of the `IP_RECVTOS` option for this socket. - /// - /// For more information about this option, see [`set_recv_tos`]. - /// - /// [`set_recv_tos`]: Socket::set_recv_tos - #[cfg(not(any( - target_os = "dragonfly", - target_os = "fuchsia", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "windows", - target_os = "nto", - )))] - pub fn recv_tos(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IP, sys::IP_RECVTOS) - .map(|recv_tos| recv_tos > 0) - } - } -} - -/// Socket options for IPv6 sockets, get/set using `IPPROTO_IPV6`. -/// -/// Additional documentation can be found in documentation of the OS. -/// * Linux: -/// * Windows: -impl Socket { - /// Join a multicast group using `IPV6_ADD_MEMBERSHIP` option on this socket. - /// - /// Some OSs use `IPV6_JOIN_GROUP` for this option. - /// - /// This function specifies a new multicast group for this socket to join. - /// The address must be a valid multicast address, and `interface` is the - /// index of the interface to join/leave (or 0 to indicate any interface). - #[cfg(not(target_os = "nto"))] - pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - let mreq = sys::Ipv6Mreq { - ipv6mr_multiaddr: sys::to_in6_addr(multiaddr), - // NOTE: some OSs use `c_int`, others use `c_uint`. - ipv6mr_interface: interface as _, - }; - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IPV6, - sys::IPV6_ADD_MEMBERSHIP, - mreq, - ) - } - } - - /// Leave a multicast group using `IPV6_DROP_MEMBERSHIP` option on this socket. - /// - /// Some OSs use `IPV6_LEAVE_GROUP` for this option. - /// - /// For more information about this option, see [`join_multicast_v6`]. - /// - /// [`join_multicast_v6`]: Socket::join_multicast_v6 - #[cfg(not(target_os = "nto"))] - pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - let mreq = sys::Ipv6Mreq { - ipv6mr_multiaddr: sys::to_in6_addr(multiaddr), - // NOTE: some OSs use `c_int`, others use `c_uint`. - ipv6mr_interface: interface as _, - }; - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IPV6, - sys::IPV6_DROP_MEMBERSHIP, - mreq, - ) - } - } - - /// Get the value of the `IPV6_MULTICAST_HOPS` option for this socket - /// - /// For more information about this option, see [`set_multicast_hops_v6`]. - /// - /// [`set_multicast_hops_v6`]: Socket::set_multicast_hops_v6 - pub fn multicast_hops_v6(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_MULTICAST_HOPS) - .map(|hops| hops as u32) - } - } - - /// Set the value of the `IPV6_MULTICAST_HOPS` option for this socket - /// - /// Indicates the number of "routers" multicast packets will transit for - /// this socket. The default value is 1 which means that multicast packets - /// don't leave the local network unless explicitly requested. - pub fn set_multicast_hops_v6(&self, hops: u32) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IPV6, - sys::IPV6_MULTICAST_HOPS, - hops as c_int, - ) - } - } - - /// Get the value of the `IPV6_MULTICAST_IF` option for this socket. - /// - /// For more information about this option, see [`set_multicast_if_v6`]. - /// - /// [`set_multicast_if_v6`]: Socket::set_multicast_if_v6 - pub fn multicast_if_v6(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_MULTICAST_IF) - .map(|interface| interface as u32) - } - } - - /// Set the value of the `IPV6_MULTICAST_IF` option for this socket. - /// - /// Specifies the interface to use for routing multicast packets. Unlike - /// ipv4, this is generally required in ipv6 contexts where network routing - /// prefixes may overlap. - pub fn set_multicast_if_v6(&self, interface: u32) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IPV6, - sys::IPV6_MULTICAST_IF, - interface as c_int, - ) - } - } - - /// Get the value of the `IPV6_MULTICAST_LOOP` option for this socket. - /// - /// For more information about this option, see [`set_multicast_loop_v6`]. - /// - /// [`set_multicast_loop_v6`]: Socket::set_multicast_loop_v6 - pub fn multicast_loop_v6(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_MULTICAST_LOOP) - .map(|loop_v6| loop_v6 != 0) - } - } - - /// Set the value of the `IPV6_MULTICAST_LOOP` option for this socket. - /// - /// Controls whether this socket sees the multicast packets it sends itself. - /// Note that this may not have any affect on IPv4 sockets. - pub fn set_multicast_loop_v6(&self, loop_v6: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IPV6, - sys::IPV6_MULTICAST_LOOP, - loop_v6 as c_int, - ) - } - } - - /// Get the value of the `IPV6_UNICAST_HOPS` option for this socket. - /// - /// Specifies the hop limit for ipv6 unicast packets - pub fn unicast_hops_v6(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_UNICAST_HOPS) - .map(|hops| hops as u32) - } - } - - /// Set the value for the `IPV6_UNICAST_HOPS` option on this socket. - /// - /// Specifies the hop limit for ipv6 unicast packets - pub fn set_unicast_hops_v6(&self, hops: u32) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IPV6, - sys::IPV6_UNICAST_HOPS, - hops as c_int, - ) - } - } - - /// Get the value of the `IPV6_V6ONLY` option for this socket. - /// - /// For more information about this option, see [`set_only_v6`]. - /// - /// [`set_only_v6`]: Socket::set_only_v6 - pub fn only_v6(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_IPV6, sys::IPV6_V6ONLY) - .map(|only_v6| only_v6 != 0) - } - } - - /// Set the value for the `IPV6_V6ONLY` option on this socket. - /// - /// If this is set to `true` then the socket is restricted to sending and - /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications - /// can bind the same port at the same time. - /// - /// If this is set to `false` then the socket can be used to send and - /// receive packets from an IPv4-mapped IPv6 address. - pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_IPV6, - sys::IPV6_V6ONLY, - only_v6 as c_int, - ) - } - } -} - -/// Socket options for TCP sockets, get/set using `IPPROTO_TCP`. -/// -/// Additional documentation can be found in documentation of the OS. -/// * Linux: -/// * Windows: -impl Socket { - /// Get the value of the `TCP_KEEPIDLE` option on this socket. - /// - /// This returns the value of `TCP_KEEPALIVE` on macOS and iOS and `TCP_KEEPIDLE` on all other - /// supported Unix operating systems. - #[cfg(any( - doc, - all( - feature = "all", - not(any(windows, target_os = "haiku", target_os = "openbsd")) - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - not(any(windows, target_os = "haiku", target_os = "openbsd")) - ))) - )] - pub fn keepalive_time(&self) -> io::Result { - sys::keepalive_time(self.as_raw()) - } - - /// Get the value of the `TCP_KEEPINTVL` option on this socket. - /// - /// For more information about this option, see [`set_tcp_keepalive`]. - /// - /// [`set_tcp_keepalive`]: Socket::set_tcp_keepalive - #[cfg(all( - feature = "all", - any( - doc, - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - ) - ))) - )] - pub fn keepalive_interval(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_TCP, sys::TCP_KEEPINTVL) - .map(|secs| Duration::from_secs(secs as u64)) - } - } - - /// Get the value of the `TCP_KEEPCNT` option on this socket. - /// - /// For more information about this option, see [`set_tcp_keepalive`]. - /// - /// [`set_tcp_keepalive`]: Socket::set_tcp_keepalive - #[cfg(all( - feature = "all", - any( - doc, - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - ) - ))) - )] - pub fn keepalive_retries(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_TCP, sys::TCP_KEEPCNT) - .map(|retries| retries as u32) - } - } - - /// Set parameters configuring TCP keepalive probes for this socket. - /// - /// The supported parameters depend on the operating system, and are - /// configured using the [`TcpKeepalive`] struct. At a minimum, all systems - /// support configuring the [keepalive time]: the time after which the OS - /// will start sending keepalive messages on an idle connection. - /// - /// [keepalive time]: TcpKeepalive::with_time - /// - /// # Notes - /// - /// * This will enable `SO_KEEPALIVE` on this socket, if it is not already - /// enabled. - /// * On some platforms, such as Windows, any keepalive parameters *not* - /// configured by the `TcpKeepalive` struct passed to this function may be - /// overwritten with their default values. Therefore, this function should - /// either only be called once per socket, or the same parameters should - /// be passed every time it is called. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// - /// use socket2::{Socket, TcpKeepalive, Domain, Type}; - /// - /// # fn main() -> std::io::Result<()> { - /// let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; - /// let keepalive = TcpKeepalive::new() - /// .with_time(Duration::from_secs(4)); - /// // Depending on the target operating system, we may also be able to - /// // configure the keepalive probe interval and/or the number of - /// // retries here as well. - /// - /// socket.set_tcp_keepalive(&keepalive)?; - /// # Ok(()) } - /// ``` - /// - pub fn set_tcp_keepalive(&self, params: &TcpKeepalive) -> io::Result<()> { - self.set_keepalive(true)?; - sys::set_tcp_keepalive(self.as_raw(), params) - } - - /// Get the value of the `TCP_NODELAY` option on this socket. - /// - /// For more information about this option, see [`set_nodelay`]. - /// - /// [`set_nodelay`]: Socket::set_nodelay - pub fn nodelay(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), sys::IPPROTO_TCP, sys::TCP_NODELAY) - .map(|nodelay| nodelay != 0) - } - } - - /// Set the value of the `TCP_NODELAY` option on this socket. - /// - /// If set, this option disables the Nagle algorithm. This means that - /// segments are always sent as soon as possible, even if there is only a - /// small amount of data. When not set, data is buffered until there is a - /// sufficient amount to send out, thereby avoiding the frequent sending of - /// small packets. - pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - sys::IPPROTO_TCP, - sys::TCP_NODELAY, - nodelay as c_int, - ) - } - } -} - -impl Read for Socket { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - // Safety: the `recv` implementation promises not to write uninitialised - // bytes to the `buf`fer, so this casting is safe. - let buf = unsafe { &mut *(buf as *mut [u8] as *mut [MaybeUninit]) }; - self.recv(buf) - } - - #[cfg(not(target_os = "redox"))] - fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result { - // Safety: both `IoSliceMut` and `MaybeUninitSlice` promise to have the - // same layout, that of `iovec`/`WSABUF`. Furthermore `recv_vectored` - // promises to not write unitialised bytes to the `bufs` and pass it - // directly to the `recvmsg` system call, so this is safe. - let bufs = unsafe { &mut *(bufs as *mut [IoSliceMut<'_>] as *mut [MaybeUninitSlice<'_>]) }; - self.recv_vectored(bufs).map(|(n, _)| n) - } -} - -impl<'a> Read for &'a Socket { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - // Safety: see other `Read::read` impl. - let buf = unsafe { &mut *(buf as *mut [u8] as *mut [MaybeUninit]) }; - self.recv(buf) - } - - #[cfg(not(target_os = "redox"))] - fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result { - // Safety: see other `Read::read` impl. - let bufs = unsafe { &mut *(bufs as *mut [IoSliceMut<'_>] as *mut [MaybeUninitSlice<'_>]) }; - self.recv_vectored(bufs).map(|(n, _)| n) - } -} - -impl Write for Socket { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.send(buf) - } - - #[cfg(not(target_os = "redox"))] - fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - self.send_vectored(bufs) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -impl<'a> Write for &'a Socket { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.send(buf) - } - - #[cfg(not(target_os = "redox"))] - fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - self.send_vectored(bufs) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -impl fmt::Debug for Socket { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Socket") - .field("raw", &self.as_raw()) - .field("local_addr", &self.local_addr().ok()) - .field("peer_addr", &self.peer_addr().ok()) - .finish() - } -} - -from!(net::TcpStream, Socket); -from!(net::TcpListener, Socket); -from!(net::UdpSocket, Socket); -from!(Socket, net::TcpStream); -from!(Socket, net::TcpListener); -from!(Socket, net::UdpSocket); diff -Nru s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/sockref.rs s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/sockref.rs --- s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/sockref.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/sockref.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,147 +0,0 @@ -use std::fmt; -use std::marker::PhantomData; -use std::mem::ManuallyDrop; -use std::ops::Deref; -#[cfg(unix)] -use std::os::unix::io::{AsRawFd, FromRawFd}; -#[cfg(windows)] -use std::os::windows::io::{AsRawSocket, FromRawSocket}; - -use crate::Socket; - -/// A reference to a [`Socket`] that can be used to configure socket types other -/// than the `Socket` type itself. -/// -/// This allows for example a [`TcpStream`], found in the standard library, to -/// be configured using all the additional methods found in the [`Socket`] API. -/// -/// `SockRef` can be created from any socket type that implements [`AsRawFd`] -/// (Unix) or [`AsRawSocket`] (Windows) using the [`From`] implementation, but -/// the caller must ensure the file descriptor/socket is a valid. -/// -/// [`TcpStream`]: std::net::TcpStream -// Don't use intra-doc links because they won't build on every platform. -/// [`AsRawFd`]: https://doc.rust-lang.org/stable/std/os/unix/io/trait.AsRawFd.html -/// [`AsRawSocket`]: https://doc.rust-lang.org/stable/std/os/windows/io/trait.AsRawSocket.html -/// -/// # Examples -/// -/// Below is an example of converting a [`TcpStream`] into a [`SockRef`]. -/// -/// ``` -/// use std::net::{TcpStream, SocketAddr}; -/// -/// use socket2::SockRef; -/// -/// # fn main() -> Result<(), Box> { -/// // Create `TcpStream` from the standard library. -/// let address: SocketAddr = "127.0.0.1:1234".parse()?; -/// # let b1 = std::sync::Arc::new(std::sync::Barrier::new(2)); -/// # let b2 = b1.clone(); -/// # let handle = std::thread::spawn(move || { -/// # let listener = std::net::TcpListener::bind(address).unwrap(); -/// # b2.wait(); -/// # let (stream, _) = listener.accept().unwrap(); -/// # std::thread::sleep(std::time::Duration::from_millis(10)); -/// # drop(stream); -/// # }); -/// # b1.wait(); -/// let stream = TcpStream::connect(address)?; -/// -/// // Create a `SockRef`erence to the stream. -/// let socket_ref = SockRef::from(&stream); -/// // Use `Socket::set_nodelay` on the stream. -/// socket_ref.set_nodelay(true)?; -/// drop(socket_ref); -/// -/// assert_eq!(stream.nodelay()?, true); -/// # handle.join().unwrap(); -/// # Ok(()) -/// # } -/// ``` -/// -/// Below is an example of **incorrect usage** of `SockRef::from`, which is -/// currently possible (but not intended and will be fixed in future versions). -/// -/// ```compile_fail -/// use socket2::SockRef; -/// -/// # fn main() -> Result<(), Box> { -/// /// THIS USAGE IS NOT VALID! -/// let socket_ref = SockRef::from(&123); -/// // The above line is overseen possibility when using `SockRef::from`, it -/// // uses the `RawFd` (on Unix), which is a type alias for `c_int`/`i32`, -/// // which implements `AsRawFd`. However it may be clear that this usage is -/// // invalid as it doesn't guarantee that `123` is a valid file descriptor. -/// -/// // Using `Socket::set_nodelay` now will call it on a file descriptor we -/// // don't own! We don't even not if the file descriptor is valid or a socket. -/// socket_ref.set_nodelay(true)?; -/// drop(socket_ref); -/// # Ok(()) -/// # } -/// # DO_NOT_COMPILE -/// ``` -pub struct SockRef<'s> { - /// Because this is a reference we don't own the `Socket`, however `Socket` - /// closes itself when dropped, so we use `ManuallyDrop` to prevent it from - /// closing itself. - socket: ManuallyDrop, - /// Because we don't own the socket we need to ensure the socket remains - /// open while we have a "reference" to it, the lifetime `'s` ensures this. - _lifetime: PhantomData<&'s Socket>, -} - -impl<'s> Deref for SockRef<'s> { - type Target = Socket; - - fn deref(&self) -> &Self::Target { - &self.socket - } -} - -/// On Windows, a corresponding `From<&impl AsRawSocket>` implementation exists. -#[cfg(unix)] -#[cfg_attr(docsrs, doc(cfg(unix)))] -impl<'s, S> From<&'s S> for SockRef<'s> -where - S: AsRawFd, -{ - /// The caller must ensure `S` is actually a socket. - fn from(socket: &'s S) -> Self { - let fd = socket.as_raw_fd(); - assert!(fd >= 0); - SockRef { - socket: ManuallyDrop::new(unsafe { Socket::from_raw_fd(fd) }), - _lifetime: PhantomData, - } - } -} - -/// On Unix, a corresponding `From<&impl AsRawFd>` implementation exists. -#[cfg(windows)] -#[cfg_attr(docsrs, doc(cfg(windows)))] -impl<'s, S> From<&'s S> for SockRef<'s> -where - S: AsRawSocket, -{ - /// See the `From<&impl AsRawFd>` implementation. - fn from(socket: &'s S) -> Self { - let socket = socket.as_raw_socket(); - assert!(socket != winapi::um::winsock2::INVALID_SOCKET as _); - SockRef { - socket: ManuallyDrop::new(unsafe { Socket::from_raw_socket(socket) }), - _lifetime: PhantomData, - } - } -} - -impl fmt::Debug for SockRef<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SockRef") - .field("raw", &self.socket.as_raw()) - .field("local_addr", &self.socket.local_addr().ok()) - .field("peer_addr", &self.socket.peer_addr().ok()) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/sys/unix.rs s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/sys/unix.rs --- s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/sys/unix.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/sys/unix.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2101 +0,0 @@ -// Copyright 2015 The Rust Project Developers. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::cmp::min; -#[cfg(not(target_os = "redox"))] -use std::io::IoSlice; -use std::marker::PhantomData; -use std::mem::{self, size_of, MaybeUninit}; -use std::net::Shutdown; -use std::net::{Ipv4Addr, Ipv6Addr}; -#[cfg(all(feature = "all", target_vendor = "apple"))] -use std::num::NonZeroU32; -#[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "freebsd", - target_os = "linux", - target_vendor = "apple", - ) -))] -use std::num::NonZeroUsize; -#[cfg(feature = "all")] -use std::os::unix::ffi::OsStrExt; -#[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "freebsd", - target_os = "linux", - target_vendor = "apple", - ) -))] -use std::os::unix::io::RawFd; -use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd}; -#[cfg(feature = "all")] -use std::os::unix::net::{UnixDatagram, UnixListener, UnixStream}; -#[cfg(feature = "all")] -use std::path::Path; -#[cfg(not(all(target_os = "redox", not(feature = "all"))))] -use std::ptr; -use std::time::{Duration, Instant}; -use std::{io, slice}; - -#[cfg(not(target_vendor = "apple"))] -use libc::ssize_t; -use libc::{c_void, in6_addr, in_addr}; - -#[cfg(not(target_os = "redox"))] -use crate::RecvFlags; -use crate::{Domain, Protocol, SockAddr, TcpKeepalive, Type}; - -pub(crate) use libc::c_int; - -// Used in `Domain`. -pub(crate) use libc::{AF_INET, AF_INET6}; -// Used in `Type`. -#[cfg(all(feature = "all", not(target_os = "redox")))] -pub(crate) use libc::SOCK_RAW; -#[cfg(feature = "all")] -pub(crate) use libc::SOCK_SEQPACKET; -pub(crate) use libc::{SOCK_DGRAM, SOCK_STREAM}; -// Used in `Protocol`. -pub(crate) use libc::{IPPROTO_ICMP, IPPROTO_ICMPV6, IPPROTO_TCP, IPPROTO_UDP}; -// Used in `SockAddr`. -pub(crate) use libc::{ - sa_family_t, sockaddr, sockaddr_in, sockaddr_in6, sockaddr_storage, socklen_t, -}; -// Used in `RecvFlags`. -#[cfg(not(target_os = "redox"))] -pub(crate) use libc::{MSG_TRUNC, SO_OOBINLINE}; -// Used in `Socket`. -#[cfg(not(target_os = "nto"))] -pub(crate) use libc::ipv6_mreq as Ipv6Mreq; -#[cfg(all(feature = "all", not(target_os = "redox")))] -pub(crate) use libc::IP_HDRINCL; -#[cfg(not(any( - target_os = "dragonfly", - target_os = "fuchsia", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "haiku", - target_os = "nto", -)))] -pub(crate) use libc::IP_RECVTOS; -#[cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", -)))] -pub(crate) use libc::IP_TOS; -#[cfg(not(target_vendor = "apple"))] -pub(crate) use libc::SO_LINGER; -#[cfg(target_vendor = "apple")] -pub(crate) use libc::SO_LINGER_SEC as SO_LINGER; -pub(crate) use libc::{ - ip_mreq as IpMreq, linger, IPPROTO_IP, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, IPV6_MULTICAST_IF, - IPV6_MULTICAST_LOOP, IPV6_UNICAST_HOPS, IPV6_V6ONLY, IP_ADD_MEMBERSHIP, IP_DROP_MEMBERSHIP, - IP_MULTICAST_IF, IP_MULTICAST_LOOP, IP_MULTICAST_TTL, IP_TTL, MSG_OOB, MSG_PEEK, SOL_SOCKET, - SO_BROADCAST, SO_ERROR, SO_KEEPALIVE, SO_RCVBUF, SO_RCVTIMEO, SO_REUSEADDR, SO_SNDBUF, - SO_SNDTIMEO, SO_TYPE, TCP_NODELAY, -}; -#[cfg(not(any( - target_os = "dragonfly", - target_os = "haiku", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "fuchsia", - target_os = "nto", -)))] -pub(crate) use libc::{ - ip_mreq_source as IpMreqSource, IP_ADD_SOURCE_MEMBERSHIP, IP_DROP_SOURCE_MEMBERSHIP, -}; -#[cfg(not(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "haiku", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "solaris", - target_os = "nto", - target_vendor = "apple" -)))] -pub(crate) use libc::{IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP}; -#[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "haiku", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "solaris", - target_vendor = "apple", -))] -pub(crate) use libc::{ - IPV6_JOIN_GROUP as IPV6_ADD_MEMBERSHIP, IPV6_LEAVE_GROUP as IPV6_DROP_MEMBERSHIP, -}; -#[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - ) -))] -pub(crate) use libc::{TCP_KEEPCNT, TCP_KEEPINTVL}; - -// See this type in the Windows file. -pub(crate) type Bool = c_int; - -#[cfg(any(target_vendor = "apple", target_os = "nto"))] -use libc::TCP_KEEPALIVE as KEEPALIVE_TIME; -#[cfg(not(any( - target_vendor = "apple", - target_os = "haiku", - target_os = "openbsd", - target_os = "nto", -)))] -use libc::TCP_KEEPIDLE as KEEPALIVE_TIME; - -/// Helper macro to execute a system call that returns an `io::Result`. -macro_rules! syscall { - ($fn: ident ( $($arg: expr),* $(,)* ) ) => {{ - #[allow(unused_unsafe)] - let res = unsafe { libc::$fn($($arg, )*) }; - if res == -1 { - Err(std::io::Error::last_os_error()) - } else { - Ok(res) - } - }}; -} - -/// Maximum size of a buffer passed to system call like `recv` and `send`. -#[cfg(not(target_vendor = "apple"))] -const MAX_BUF_LEN: usize = ::max_value() as usize; - -// The maximum read limit on most posix-like systems is `SSIZE_MAX`, with the -// man page quoting that if the count of bytes to read is greater than -// `SSIZE_MAX` the result is "unspecified". -// -// On macOS, however, apparently the 64-bit libc is either buggy or -// intentionally showing odd behavior by rejecting any read with a size larger -// than or equal to INT_MAX. To handle both of these the read size is capped on -// both platforms. -#[cfg(target_vendor = "apple")] -const MAX_BUF_LEN: usize = ::max_value() as usize - 1; - -#[cfg(any( - all( - target_os = "linux", - any( - target_env = "gnu", - all(target_env = "uclibc", target_pointer_width = "64") - ) - ), - target_os = "android", -))] -type IovLen = usize; - -#[cfg(any( - all( - target_os = "linux", - any( - target_env = "musl", - all(target_env = "uclibc", target_pointer_width = "32") - ) - ), - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "haiku", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "solaris", - target_os = "nto", - target_vendor = "apple", -))] -type IovLen = c_int; - -/// Unix only API. -impl Domain { - /// Domain for Unix socket communication, corresponding to `AF_UNIX`. - #[cfg_attr(docsrs, doc(cfg(unix)))] - pub const UNIX: Domain = Domain(libc::AF_UNIX); - - /// Domain for low-level packet interface, corresponding to `AF_PACKET`. - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub const PACKET: Domain = Domain(libc::AF_PACKET); - - /// Domain for low-level VSOCK interface, corresponding to `AF_VSOCK`. - #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) - )] - pub const VSOCK: Domain = Domain(libc::AF_VSOCK); -} - -impl_debug!( - Domain, - libc::AF_INET, - libc::AF_INET6, - libc::AF_UNIX, - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - #[cfg_attr( - docsrs, - doc(cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))) - )] - libc::AF_PACKET, - #[cfg(any(target_os = "android", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(any(target_os = "android", target_os = "linux"))))] - libc::AF_VSOCK, - libc::AF_UNSPEC, // = 0. -); - -/// Unix only API. -impl Type { - /// Set `SOCK_NONBLOCK` on the `Type`. - #[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - ) - ))) - )] - pub const fn nonblocking(self) -> Type { - Type(self.0 | libc::SOCK_NONBLOCK) - } - - /// Set `SOCK_CLOEXEC` on the `Type`. - #[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - ) - ))) - )] - pub const fn cloexec(self) -> Type { - self._cloexec() - } - - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - ))] - pub(crate) const fn _cloexec(self) -> Type { - Type(self.0 | libc::SOCK_CLOEXEC) - } -} - -impl_debug!( - Type, - libc::SOCK_STREAM, - libc::SOCK_DGRAM, - #[cfg(not(target_os = "redox"))] - libc::SOCK_RAW, - #[cfg(not(any(target_os = "redox", target_os = "haiku")))] - libc::SOCK_RDM, - libc::SOCK_SEQPACKET, - /* TODO: add these optional bit OR-ed flags: - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - ))] - libc::SOCK_NONBLOCK, - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - ))] - libc::SOCK_CLOEXEC, - */ -); - -impl_debug!( - Protocol, - libc::IPPROTO_ICMP, - libc::IPPROTO_ICMPV6, - libc::IPPROTO_TCP, - libc::IPPROTO_UDP, -); - -/// Unix-only API. -#[cfg(not(target_os = "redox"))] -impl RecvFlags { - /// Check if the message terminates a record. - /// - /// Not all socket types support the notion of records. - /// For socket types that do support it (such as [`SEQPACKET`][Type::SEQPACKET]), - /// a record is terminated by sending a message with the end-of-record flag set. - /// - /// On Unix this corresponds to the MSG_EOR flag. - pub const fn is_end_of_record(self) -> bool { - self.0 & libc::MSG_EOR != 0 - } - - /// Check if the message contains out-of-band data. - /// - /// This is useful for protocols where you receive out-of-band data - /// mixed in with the normal data stream. - /// - /// On Unix this corresponds to the MSG_OOB flag. - pub const fn is_out_of_band(self) -> bool { - self.0 & libc::MSG_OOB != 0 - } -} - -#[cfg(not(target_os = "redox"))] -impl std::fmt::Debug for RecvFlags { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("RecvFlags") - .field("is_end_of_record", &self.is_end_of_record()) - .field("is_out_of_band", &self.is_out_of_band()) - .field("is_truncated", &self.is_truncated()) - .finish() - } -} - -#[repr(transparent)] -pub struct MaybeUninitSlice<'a> { - vec: libc::iovec, - _lifetime: PhantomData<&'a mut [MaybeUninit]>, -} - -unsafe impl<'a> Send for MaybeUninitSlice<'a> {} - -unsafe impl<'a> Sync for MaybeUninitSlice<'a> {} - -impl<'a> MaybeUninitSlice<'a> { - pub(crate) fn new(buf: &'a mut [MaybeUninit]) -> MaybeUninitSlice<'a> { - MaybeUninitSlice { - vec: libc::iovec { - iov_base: buf.as_mut_ptr().cast(), - iov_len: buf.len(), - }, - _lifetime: PhantomData, - } - } - - pub(crate) fn as_slice(&self) -> &[MaybeUninit] { - unsafe { slice::from_raw_parts(self.vec.iov_base.cast(), self.vec.iov_len) } - } - - pub(crate) fn as_mut_slice(&mut self) -> &mut [MaybeUninit] { - unsafe { slice::from_raw_parts_mut(self.vec.iov_base.cast(), self.vec.iov_len) } - } -} - -/// Unix only API. -impl SockAddr { - /// Constructs a `SockAddr` with the family `AF_UNIX` and the provided path. - /// - /// # Failure - /// - /// Returns an error if the path is longer than `SUN_LEN`. - #[cfg(feature = "all")] - #[cfg_attr(docsrs, doc(cfg(all(unix, feature = "all"))))] - #[allow(unused_unsafe)] // TODO: replace with `unsafe_op_in_unsafe_fn` once stable. - pub fn unix

(path: P) -> io::Result - where - P: AsRef, - { - unsafe { - SockAddr::init(|storage, len| { - // Safety: `SockAddr::init` zeros the address, which is a valid - // representation. - let storage: &mut libc::sockaddr_un = unsafe { &mut *storage.cast() }; - let len: &mut socklen_t = unsafe { &mut *len }; - - let bytes = path.as_ref().as_os_str().as_bytes(); - let too_long = match bytes.first() { - None => false, - // linux abstract namespaces aren't null-terminated - Some(&0) => bytes.len() > storage.sun_path.len(), - Some(_) => bytes.len() >= storage.sun_path.len(), - }; - if too_long { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "path must be shorter than SUN_LEN", - )); - } - - storage.sun_family = libc::AF_UNIX as sa_family_t; - // Safety: `bytes` and `addr.sun_path` are not overlapping and - // both point to valid memory. - // `SockAddr::init` zeroes the memory, so the path is already - // null terminated. - unsafe { - ptr::copy_nonoverlapping( - bytes.as_ptr(), - storage.sun_path.as_mut_ptr() as *mut u8, - bytes.len(), - ) - }; - - let base = storage as *const _ as usize; - let path = &storage.sun_path as *const _ as usize; - let sun_path_offset = path - base; - let length = sun_path_offset - + bytes.len() - + match bytes.first() { - Some(&0) | None => 0, - Some(_) => 1, - }; - *len = length as socklen_t; - - Ok(()) - }) - } - .map(|(_, addr)| addr) - } -} - -impl SockAddr { - /// Constructs a `SockAddr` with the family `AF_VSOCK` and the provided CID/port. - /// - /// # Errors - /// - /// This function can never fail. In a future version of this library it will be made - /// infallible. - #[allow(unused_unsafe)] // TODO: replace with `unsafe_op_in_unsafe_fn` once stable. - #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) - )] - pub fn vsock(cid: u32, port: u32) -> io::Result { - unsafe { - SockAddr::init(|storage, len| { - // Safety: `SockAddr::init` zeros the address, which is a valid - // representation. - let storage: &mut libc::sockaddr_vm = unsafe { &mut *storage.cast() }; - let len: &mut socklen_t = unsafe { &mut *len }; - - storage.svm_family = libc::AF_VSOCK as sa_family_t; - storage.svm_cid = cid; - storage.svm_port = port; - - *len = mem::size_of::() as socklen_t; - - Ok(()) - }) - } - .map(|(_, addr)| addr) - } - - /// Returns this address VSOCK CID/port if it is in the `AF_VSOCK` family, - /// otherwise return `None`. - #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) - )] - pub fn vsock_address(&self) -> Option<(u32, u32)> { - if self.family() == libc::AF_VSOCK as sa_family_t { - // Safety: if the ss_family field is AF_VSOCK then storage must be a sockaddr_vm. - let addr = unsafe { &*(self.as_ptr() as *const libc::sockaddr_vm) }; - Some((addr.svm_cid, addr.svm_port)) - } else { - None - } - } -} - -pub(crate) type Socket = c_int; - -pub(crate) unsafe fn socket_from_raw(socket: Socket) -> crate::socket::Inner { - crate::socket::Inner::from_raw_fd(socket) -} - -pub(crate) fn socket_as_raw(socket: &crate::socket::Inner) -> Socket { - socket.as_raw_fd() -} - -pub(crate) fn socket_into_raw(socket: crate::socket::Inner) -> Socket { - socket.into_raw_fd() -} - -pub(crate) fn socket(family: c_int, ty: c_int, protocol: c_int) -> io::Result { - syscall!(socket(family, ty, protocol)) -} - -#[cfg(feature = "all")] -pub(crate) fn socketpair(family: c_int, ty: c_int, protocol: c_int) -> io::Result<[Socket; 2]> { - let mut fds = [0, 0]; - syscall!(socketpair(family, ty, protocol, fds.as_mut_ptr())).map(|_| fds) -} - -pub(crate) fn bind(fd: Socket, addr: &SockAddr) -> io::Result<()> { - syscall!(bind(fd, addr.as_ptr(), addr.len() as _)).map(|_| ()) -} - -pub(crate) fn connect(fd: Socket, addr: &SockAddr) -> io::Result<()> { - syscall!(connect(fd, addr.as_ptr(), addr.len())).map(|_| ()) -} - -pub(crate) fn poll_connect(socket: &crate::Socket, timeout: Duration) -> io::Result<()> { - let start = Instant::now(); - - let mut pollfd = libc::pollfd { - fd: socket.as_raw(), - events: libc::POLLIN | libc::POLLOUT, - revents: 0, - }; - - loop { - let elapsed = start.elapsed(); - if elapsed >= timeout { - return Err(io::ErrorKind::TimedOut.into()); - } - - let timeout = (timeout - elapsed).as_millis(); - let timeout = clamp(timeout, 1, c_int::max_value() as u128) as c_int; - - match syscall!(poll(&mut pollfd, 1, timeout)) { - Ok(0) => return Err(io::ErrorKind::TimedOut.into()), - Ok(_) => { - // Error or hang up indicates an error (or failure to connect). - if (pollfd.revents & libc::POLLHUP) != 0 || (pollfd.revents & libc::POLLERR) != 0 { - match socket.take_error() { - Ok(Some(err)) => return Err(err), - Ok(None) => { - return Err(io::Error::new( - io::ErrorKind::Other, - "no error set after POLLHUP", - )) - } - Err(err) => return Err(err), - } - } - return Ok(()); - } - // Got interrupted, try again. - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Err(err), - } - } -} - -// TODO: use clamp from std lib, stable since 1.50. -fn clamp(value: T, min: T, max: T) -> T -where - T: Ord, -{ - if value <= min { - min - } else if value >= max { - max - } else { - value - } -} - -pub(crate) fn listen(fd: Socket, backlog: c_int) -> io::Result<()> { - syscall!(listen(fd, backlog)).map(|_| ()) -} - -pub(crate) fn accept(fd: Socket) -> io::Result<(Socket, SockAddr)> { - // Safety: `accept` initialises the `SockAddr` for us. - unsafe { SockAddr::init(|storage, len| syscall!(accept(fd, storage.cast(), len))) } -} - -pub(crate) fn getsockname(fd: Socket) -> io::Result { - // Safety: `accept` initialises the `SockAddr` for us. - unsafe { SockAddr::init(|storage, len| syscall!(getsockname(fd, storage.cast(), len))) } - .map(|(_, addr)| addr) -} - -pub(crate) fn getpeername(fd: Socket) -> io::Result { - // Safety: `accept` initialises the `SockAddr` for us. - unsafe { SockAddr::init(|storage, len| syscall!(getpeername(fd, storage.cast(), len))) } - .map(|(_, addr)| addr) -} - -pub(crate) fn try_clone(fd: Socket) -> io::Result { - syscall!(fcntl(fd, libc::F_DUPFD_CLOEXEC, 0)) -} - -pub(crate) fn set_nonblocking(fd: Socket, nonblocking: bool) -> io::Result<()> { - if nonblocking { - fcntl_add(fd, libc::F_GETFL, libc::F_SETFL, libc::O_NONBLOCK) - } else { - fcntl_remove(fd, libc::F_GETFL, libc::F_SETFL, libc::O_NONBLOCK) - } -} - -pub(crate) fn shutdown(fd: Socket, how: Shutdown) -> io::Result<()> { - let how = match how { - Shutdown::Write => libc::SHUT_WR, - Shutdown::Read => libc::SHUT_RD, - Shutdown::Both => libc::SHUT_RDWR, - }; - syscall!(shutdown(fd, how)).map(|_| ()) -} - -pub(crate) fn recv(fd: Socket, buf: &mut [MaybeUninit], flags: c_int) -> io::Result { - syscall!(recv( - fd, - buf.as_mut_ptr().cast(), - min(buf.len(), MAX_BUF_LEN), - flags, - )) - .map(|n| n as usize) -} - -pub(crate) fn recv_from( - fd: Socket, - buf: &mut [MaybeUninit], - flags: c_int, -) -> io::Result<(usize, SockAddr)> { - // Safety: `recvfrom` initialises the `SockAddr` for us. - unsafe { - SockAddr::init(|addr, addrlen| { - syscall!(recvfrom( - fd, - buf.as_mut_ptr().cast(), - min(buf.len(), MAX_BUF_LEN), - flags, - addr.cast(), - addrlen - )) - .map(|n| n as usize) - }) - } -} - -pub(crate) fn peek_sender(fd: Socket) -> io::Result { - // Unix-like platforms simply truncate the returned data, so this implementation is trivial. - // However, for Windows this requires suppressing the `WSAEMSGSIZE` error, - // so that requires a different approach. - // NOTE: macOS does not populate `sockaddr` if you pass a zero-sized buffer. - let (_, sender) = recv_from(fd, &mut [MaybeUninit::uninit(); 8], MSG_PEEK)?; - Ok(sender) -} - -#[cfg(not(target_os = "redox"))] -pub(crate) fn recv_vectored( - fd: Socket, - bufs: &mut [crate::MaybeUninitSlice<'_>], - flags: c_int, -) -> io::Result<(usize, RecvFlags)> { - recvmsg(fd, ptr::null_mut(), bufs, flags).map(|(n, _, recv_flags)| (n, recv_flags)) -} - -#[cfg(not(target_os = "redox"))] -pub(crate) fn recv_from_vectored( - fd: Socket, - bufs: &mut [crate::MaybeUninitSlice<'_>], - flags: c_int, -) -> io::Result<(usize, RecvFlags, SockAddr)> { - // Safety: `recvmsg` initialises the address storage and we set the length - // manually. - unsafe { - SockAddr::init(|storage, len| { - recvmsg(fd, storage, bufs, flags).map(|(n, addrlen, recv_flags)| { - // Set the correct address length. - *len = addrlen; - (n, recv_flags) - }) - }) - } - .map(|((n, recv_flags), addr)| (n, recv_flags, addr)) -} - -/// Returns the (bytes received, sending address len, `RecvFlags`). -#[cfg(not(target_os = "redox"))] -fn recvmsg( - fd: Socket, - msg_name: *mut sockaddr_storage, - bufs: &mut [crate::MaybeUninitSlice<'_>], - flags: c_int, -) -> io::Result<(usize, libc::socklen_t, RecvFlags)> { - let msg_namelen = if msg_name.is_null() { - 0 - } else { - size_of::() as libc::socklen_t - }; - // libc::msghdr contains unexported padding fields on Fuchsia. - let mut msg: libc::msghdr = unsafe { mem::zeroed() }; - msg.msg_name = msg_name.cast(); - msg.msg_namelen = msg_namelen; - msg.msg_iov = bufs.as_mut_ptr().cast(); - msg.msg_iovlen = min(bufs.len(), IovLen::MAX as usize) as IovLen; - syscall!(recvmsg(fd, &mut msg, flags)) - .map(|n| (n as usize, msg.msg_namelen, RecvFlags(msg.msg_flags))) -} - -pub(crate) fn send(fd: Socket, buf: &[u8], flags: c_int) -> io::Result { - syscall!(send( - fd, - buf.as_ptr().cast(), - min(buf.len(), MAX_BUF_LEN), - flags, - )) - .map(|n| n as usize) -} - -#[cfg(not(target_os = "redox"))] -pub(crate) fn send_vectored(fd: Socket, bufs: &[IoSlice<'_>], flags: c_int) -> io::Result { - sendmsg(fd, ptr::null(), 0, bufs, flags) -} - -pub(crate) fn send_to(fd: Socket, buf: &[u8], addr: &SockAddr, flags: c_int) -> io::Result { - syscall!(sendto( - fd, - buf.as_ptr().cast(), - min(buf.len(), MAX_BUF_LEN), - flags, - addr.as_ptr(), - addr.len(), - )) - .map(|n| n as usize) -} - -#[cfg(not(target_os = "redox"))] -pub(crate) fn send_to_vectored( - fd: Socket, - bufs: &[IoSlice<'_>], - addr: &SockAddr, - flags: c_int, -) -> io::Result { - sendmsg(fd, addr.as_storage_ptr(), addr.len(), bufs, flags) -} - -/// Returns the (bytes received, sending address len, `RecvFlags`). -#[cfg(not(target_os = "redox"))] -fn sendmsg( - fd: Socket, - msg_name: *const sockaddr_storage, - msg_namelen: socklen_t, - bufs: &[IoSlice<'_>], - flags: c_int, -) -> io::Result { - // libc::msghdr contains unexported padding fields on Fuchsia. - let mut msg: libc::msghdr = unsafe { mem::zeroed() }; - // Safety: we're creating a `*mut` pointer from a reference, which is UB - // once actually used. However the OS should not write to it in the - // `sendmsg` system call. - msg.msg_name = (msg_name as *mut sockaddr_storage).cast(); - msg.msg_namelen = msg_namelen; - // Safety: Same as above about `*const` -> `*mut`. - msg.msg_iov = bufs.as_ptr() as *mut _; - msg.msg_iovlen = min(bufs.len(), IovLen::MAX as usize) as IovLen; - syscall!(sendmsg(fd, &msg, flags)).map(|n| n as usize) -} - -/// Wrapper around `getsockopt` to deal with platform specific timeouts. -pub(crate) fn timeout_opt(fd: Socket, opt: c_int, val: c_int) -> io::Result> { - unsafe { getsockopt(fd, opt, val).map(from_timeval) } -} - -fn from_timeval(duration: libc::timeval) -> Option { - if duration.tv_sec == 0 && duration.tv_usec == 0 { - None - } else { - let sec = duration.tv_sec as u64; - let nsec = (duration.tv_usec as u32) * 1000; - Some(Duration::new(sec, nsec)) - } -} - -/// Wrapper around `setsockopt` to deal with platform specific timeouts. -pub(crate) fn set_timeout_opt( - fd: Socket, - opt: c_int, - val: c_int, - duration: Option, -) -> io::Result<()> { - let duration = into_timeval(duration); - unsafe { setsockopt(fd, opt, val, duration) } -} - -fn into_timeval(duration: Option) -> libc::timeval { - match duration { - // https://github.com/rust-lang/libc/issues/1848 - #[cfg_attr(target_env = "musl", allow(deprecated))] - Some(duration) => libc::timeval { - tv_sec: min(duration.as_secs(), libc::time_t::max_value() as u64) as libc::time_t, - tv_usec: duration.subsec_micros() as libc::suseconds_t, - }, - None => libc::timeval { - tv_sec: 0, - tv_usec: 0, - }, - } -} - -#[cfg(feature = "all")] -#[cfg(not(any(target_os = "haiku", target_os = "openbsd")))] -pub(crate) fn keepalive_time(fd: Socket) -> io::Result { - unsafe { - getsockopt::(fd, IPPROTO_TCP, KEEPALIVE_TIME) - .map(|secs| Duration::from_secs(secs as u64)) - } -} - -#[allow(unused_variables)] -pub(crate) fn set_tcp_keepalive(fd: Socket, keepalive: &TcpKeepalive) -> io::Result<()> { - #[cfg(not(any(target_os = "haiku", target_os = "openbsd", target_os = "nto")))] - if let Some(time) = keepalive.time { - let secs = into_secs(time); - unsafe { setsockopt(fd, libc::IPPROTO_TCP, KEEPALIVE_TIME, secs)? } - } - - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_vendor = "apple", - ))] - { - if let Some(interval) = keepalive.interval { - let secs = into_secs(interval); - unsafe { setsockopt(fd, libc::IPPROTO_TCP, libc::TCP_KEEPINTVL, secs)? } - } - - if let Some(retries) = keepalive.retries { - unsafe { setsockopt(fd, libc::IPPROTO_TCP, libc::TCP_KEEPCNT, retries as c_int)? } - } - } - - #[cfg(target_os = "nto")] - if let Some(time) = keepalive.time { - let secs = into_timeval(Some(time)); - unsafe { setsockopt(fd, libc::IPPROTO_TCP, KEEPALIVE_TIME, secs)? } - } - - Ok(()) -} - -#[cfg(not(any(target_os = "haiku", target_os = "openbsd", target_os = "nto")))] -fn into_secs(duration: Duration) -> c_int { - min(duration.as_secs(), c_int::max_value() as u64) as c_int -} - -/// Add `flag` to the current set flags of `F_GETFD`. -fn fcntl_add(fd: Socket, get_cmd: c_int, set_cmd: c_int, flag: c_int) -> io::Result<()> { - let previous = syscall!(fcntl(fd, get_cmd))?; - let new = previous | flag; - if new != previous { - syscall!(fcntl(fd, set_cmd, new)).map(|_| ()) - } else { - // Flag was already set. - Ok(()) - } -} - -/// Remove `flag` to the current set flags of `F_GETFD`. -fn fcntl_remove(fd: Socket, get_cmd: c_int, set_cmd: c_int, flag: c_int) -> io::Result<()> { - let previous = syscall!(fcntl(fd, get_cmd))?; - let new = previous & !flag; - if new != previous { - syscall!(fcntl(fd, set_cmd, new)).map(|_| ()) - } else { - // Flag was already set. - Ok(()) - } -} - -/// Caller must ensure `T` is the correct type for `opt` and `val`. -pub(crate) unsafe fn getsockopt(fd: Socket, opt: c_int, val: c_int) -> io::Result { - let mut payload: MaybeUninit = MaybeUninit::uninit(); - let mut len = size_of::() as libc::socklen_t; - syscall!(getsockopt( - fd, - opt, - val, - payload.as_mut_ptr().cast(), - &mut len, - )) - .map(|_| { - debug_assert_eq!(len as usize, size_of::()); - // Safety: `getsockopt` initialised `payload` for us. - payload.assume_init() - }) -} - -/// Caller must ensure `T` is the correct type for `opt` and `val`. -pub(crate) unsafe fn setsockopt( - fd: Socket, - opt: c_int, - val: c_int, - payload: T, -) -> io::Result<()> { - let payload = &payload as *const T as *const c_void; - syscall!(setsockopt( - fd, - opt, - val, - payload, - mem::size_of::() as libc::socklen_t, - )) - .map(|_| ()) -} - -pub(crate) fn to_in_addr(addr: &Ipv4Addr) -> in_addr { - // `s_addr` is stored as BE on all machines, and the array is in BE order. - // So the native endian conversion method is used so that it's never - // swapped. - in_addr { - s_addr: u32::from_ne_bytes(addr.octets()), - } -} - -pub(crate) fn from_in_addr(in_addr: in_addr) -> Ipv4Addr { - Ipv4Addr::from(in_addr.s_addr.to_ne_bytes()) -} - -pub(crate) fn to_in6_addr(addr: &Ipv6Addr) -> in6_addr { - in6_addr { - s6_addr: addr.octets(), - } -} - -pub(crate) fn from_in6_addr(addr: in6_addr) -> Ipv6Addr { - Ipv6Addr::from(addr.s6_addr) -} - -#[cfg(not(any( - target_os = "haiku", - target_os = "illumos", - target_os = "netbsd", - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "nto", -)))] -pub(crate) fn to_mreqn( - multiaddr: &Ipv4Addr, - interface: &crate::socket::InterfaceIndexOrAddress, -) -> libc::ip_mreqn { - match interface { - crate::socket::InterfaceIndexOrAddress::Index(interface) => libc::ip_mreqn { - imr_multiaddr: to_in_addr(multiaddr), - imr_address: to_in_addr(&Ipv4Addr::UNSPECIFIED), - imr_ifindex: *interface as _, - }, - crate::socket::InterfaceIndexOrAddress::Address(interface) => libc::ip_mreqn { - imr_multiaddr: to_in_addr(multiaddr), - imr_address: to_in_addr(interface), - imr_ifindex: 0, - }, - } -} - -/// Unix only API. -impl crate::Socket { - /// Accept a new incoming connection from this listener. - /// - /// This function directly corresponds to the `accept4(2)` function. - /// - /// This function will block the calling thread until a new connection is - /// established. When established, the corresponding `Socket` and the remote - /// peer's address will be returned. - #[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - ) - ))) - )] - pub fn accept4(&self, flags: c_int) -> io::Result<(crate::Socket, SockAddr)> { - self._accept4(flags) - } - - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - ))] - pub(crate) fn _accept4(&self, flags: c_int) -> io::Result<(crate::Socket, SockAddr)> { - // Safety: `accept4` initialises the `SockAddr` for us. - unsafe { - SockAddr::init(|storage, len| { - syscall!(accept4(self.as_raw(), storage.cast(), len, flags)) - .map(crate::Socket::from_raw) - }) - } - } - - /// Sets `CLOEXEC` on the socket. - /// - /// # Notes - /// - /// On supported platforms you can use [`Type::cloexec`]. - #[cfg(feature = "all")] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix))))] - pub fn set_cloexec(&self, close_on_exec: bool) -> io::Result<()> { - self._set_cloexec(close_on_exec) - } - - pub(crate) fn _set_cloexec(&self, close_on_exec: bool) -> io::Result<()> { - if close_on_exec { - fcntl_add( - self.as_raw(), - libc::F_GETFD, - libc::F_SETFD, - libc::FD_CLOEXEC, - ) - } else { - fcntl_remove( - self.as_raw(), - libc::F_GETFD, - libc::F_SETFD, - libc::FD_CLOEXEC, - ) - } - } - - /// Sets `SO_NOSIGPIPE` on the socket. - #[cfg(all(feature = "all", any(doc, target_vendor = "apple")))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_vendor = "apple"))))] - pub fn set_nosigpipe(&self, nosigpipe: bool) -> io::Result<()> { - self._set_nosigpipe(nosigpipe) - } - - #[cfg(target_vendor = "apple")] - pub(crate) fn _set_nosigpipe(&self, nosigpipe: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_SOCKET, - libc::SO_NOSIGPIPE, - nosigpipe as c_int, - ) - } - } - - /// Gets the value of the `TCP_MAXSEG` option on this socket. - /// - /// For more information about this option, see [`set_mss`]. - /// - /// [`set_mss`]: crate::Socket::set_mss - #[cfg(all(feature = "all", not(target_os = "redox")))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix, not(target_os = "redox")))))] - pub fn mss(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_MAXSEG) - .map(|mss| mss as u32) - } - } - - /// Sets the value of the `TCP_MAXSEG` option on this socket. - /// - /// The `TCP_MAXSEG` option denotes the TCP Maximum Segment Size and is only - /// available on TCP sockets. - #[cfg(all(feature = "all", not(target_os = "redox")))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", unix, not(target_os = "redox")))))] - pub fn set_mss(&self, mss: u32) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::IPPROTO_TCP, - libc::TCP_MAXSEG, - mss as c_int, - ) - } - } - - /// Returns `true` if `listen(2)` was called on this socket by checking the - /// `SO_ACCEPTCONN` option on this socket. - #[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - ) - ))) - )] - pub fn is_listener(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_ACCEPTCONN) - .map(|v| v != 0) - } - } - - /// Returns the [`Domain`] of this socket by checking the `SO_DOMAIN` option - /// on this socket. - #[cfg(all( - feature = "all", - any( - target_os = "android", - // TODO: add FreeBSD. - // target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - ) - ))] - #[cfg_attr(docsrs, doc(cfg(all( - feature = "all", - any( - target_os = "android", - // TODO: add FreeBSD. - // target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - ) - ))))] - pub fn domain(&self) -> io::Result { - unsafe { getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_DOMAIN).map(Domain) } - } - - /// Returns the [`Protocol`] of this socket by checking the `SO_PROTOCOL` - /// option on this socket. - #[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "linux", - ) - ))) - )] - pub fn protocol(&self) -> io::Result> { - unsafe { - getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_PROTOCOL).map(|v| match v - { - 0 => None, - p => Some(Protocol(p)), - }) - } - } - - /// Gets the value for the `SO_MARK` option on this socket. - /// - /// This value gets the socket mark field for each packet sent through - /// this socket. - /// - /// On Linux this function requires the `CAP_NET_ADMIN` capability. - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn mark(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_MARK) - .map(|mark| mark as u32) - } - } - - /// Sets the value for the `SO_MARK` option on this socket. - /// - /// This value sets the socket mark field for each packet sent through - /// this socket. Changing the mark can be used for mark-based routing - /// without netfilter or for packet filtering. - /// - /// On Linux this function requires the `CAP_NET_ADMIN` capability. - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn set_mark(&self, mark: u32) -> io::Result<()> { - unsafe { - setsockopt::( - self.as_raw(), - libc::SOL_SOCKET, - libc::SO_MARK, - mark as c_int, - ) - } - } - - /// Get the value of the `TCP_CORK` option on this socket. - /// - /// For more information about this option, see [`set_cork`]. - /// - /// [`set_cork`]: Socket::set_cork - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn cork(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_CORK) - .map(|cork| cork != 0) - } - } - - /// Set the value of the `TCP_CORK` option on this socket. - /// - /// If set, don't send out partial frames. All queued partial frames are - /// sent when the option is cleared again. There is a 200 millisecond ceiling on - /// the time for which output is corked by `TCP_CORK`. If this ceiling is reached, - /// then queued data is automatically transmitted. - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn set_cork(&self, cork: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::IPPROTO_TCP, - libc::TCP_CORK, - cork as c_int, - ) - } - } - - /// Get the value of the `TCP_QUICKACK` option on this socket. - /// - /// For more information about this option, see [`set_quickack`]. - /// - /// [`set_quickack`]: Socket::set_quickack - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn quickack(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_QUICKACK) - .map(|quickack| quickack != 0) - } - } - - /// Set the value of the `TCP_QUICKACK` option on this socket. - /// - /// If set, acks are sent immediately, rather than delayed if needed in accordance to normal - /// TCP operation. This flag is not permanent, it only enables a switch to or from quickack mode. - /// Subsequent operation of the TCP protocol will once again enter/leave quickack mode depending on - /// internal protocol processing and factors such as delayed ack timeouts occurring and data transfer. - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn set_quickack(&self, quickack: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::IPPROTO_TCP, - libc::TCP_QUICKACK, - quickack as c_int, - ) - } - } - - /// Get the value of the `TCP_THIN_LINEAR_TIMEOUTS` option on this socket. - /// - /// For more information about this option, see [`set_thin_linear_timeouts`]. - /// - /// [`set_thin_linear_timeouts`]: Socket::set_thin_linear_timeouts - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn thin_linear_timeouts(&self) -> io::Result { - unsafe { - getsockopt::( - self.as_raw(), - libc::IPPROTO_TCP, - libc::TCP_THIN_LINEAR_TIMEOUTS, - ) - .map(|timeouts| timeouts != 0) - } - } - - /// Set the value of the `TCP_THIN_LINEAR_TIMEOUTS` option on this socket. - /// - /// If set, the kernel will dynamically detect a thin-stream connection if there are less than four packets in flight. - /// With less than four packets in flight the normal TCP fast retransmission will not be effective. - /// The kernel will modify the retransmission to avoid the very high latencies that thin stream suffer because of exponential backoff. - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn set_thin_linear_timeouts(&self, timeouts: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::IPPROTO_TCP, - libc::TCP_THIN_LINEAR_TIMEOUTS, - timeouts as c_int, - ) - } - } - - /// Gets the value for the `SO_BINDTODEVICE` option on this socket. - /// - /// This value gets the socket binded device's interface name. - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn device(&self) -> io::Result>> { - // TODO: replace with `MaybeUninit::uninit_array` once stable. - let mut buf: [MaybeUninit; libc::IFNAMSIZ] = - unsafe { MaybeUninit::uninit().assume_init() }; - let mut len = buf.len() as libc::socklen_t; - syscall!(getsockopt( - self.as_raw(), - libc::SOL_SOCKET, - libc::SO_BINDTODEVICE, - buf.as_mut_ptr().cast(), - &mut len, - ))?; - if len == 0 { - Ok(None) - } else { - let buf = &buf[..len as usize - 1]; - // TODO: use `MaybeUninit::slice_assume_init_ref` once stable. - Ok(Some(unsafe { &*(buf as *const [_] as *const [u8]) }.into())) - } - } - - /// Sets the value for the `SO_BINDTODEVICE` option on this socket. - /// - /// If a socket is bound to an interface, only packets received from that - /// particular interface are processed by the socket. Note that this only - /// works for some socket types, particularly `AF_INET` sockets. - /// - /// If `interface` is `None` or an empty string it removes the binding. - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn bind_device(&self, interface: Option<&[u8]>) -> io::Result<()> { - let (value, len) = if let Some(interface) = interface { - (interface.as_ptr(), interface.len()) - } else { - (ptr::null(), 0) - }; - syscall!(setsockopt( - self.as_raw(), - libc::SOL_SOCKET, - libc::SO_BINDTODEVICE, - value.cast(), - len as libc::socklen_t, - )) - .map(|_| ()) - } - - /// Sets the value for the `SO_SETFIB` option on this socket. - /// - /// Bind socket to the specified forwarding table (VRF) on a FreeBSD. - #[cfg(all(feature = "all", any(target_os = "freebsd")))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", any(target_os = "freebsd")))))] - pub fn set_fib(&self, fib: u32) -> io::Result<()> { - syscall!(setsockopt( - self.as_raw(), - libc::SOL_SOCKET, - libc::SO_SETFIB, - (&fib as *const u32).cast(), - mem::size_of::() as libc::socklen_t, - )) - .map(|_| ()) - } - - /// Sets the value for `IP_BOUND_IF` option on this socket. - /// - /// If a socket is bound to an interface, only packets received from that - /// particular interface are processed by the socket. - /// - /// If `interface` is `None`, the binding is removed. If the `interface` - /// index is not valid, an error is returned. - /// - /// One can use `libc::if_nametoindex` to convert an interface alias to an - /// index. - #[cfg(all(feature = "all", target_vendor = "apple"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_vendor = "apple"))))] - pub fn bind_device_by_index(&self, interface: Option) -> io::Result<()> { - let index = interface.map(NonZeroU32::get).unwrap_or(0); - unsafe { setsockopt(self.as_raw(), IPPROTO_IP, libc::IP_BOUND_IF, index) } - } - - /// Gets the value for `IP_BOUND_IF` option on this socket, i.e. the index - /// for the interface to which the socket is bound. - /// - /// Returns `None` if the socket is not bound to any interface, otherwise - /// returns an interface index. - #[cfg(all(feature = "all", target_vendor = "apple"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_vendor = "apple"))))] - pub fn device_index(&self) -> io::Result> { - let index = - unsafe { getsockopt::(self.as_raw(), IPPROTO_IP, libc::IP_BOUND_IF)? }; - Ok(NonZeroU32::new(index)) - } - - /// Get the value of the `SO_INCOMING_CPU` option on this socket. - /// - /// For more information about this option, see [`set_cpu_affinity`]. - /// - /// [`set_cpu_affinity`]: crate::Socket::set_cpu_affinity - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn cpu_affinity(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_INCOMING_CPU) - .map(|cpu| cpu as usize) - } - } - - /// Set value for the `SO_INCOMING_CPU` option on this socket. - /// - /// Sets the CPU affinity of the socket. - #[cfg(all(feature = "all", target_os = "linux"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "all", target_os = "linux"))))] - pub fn set_cpu_affinity(&self, cpu: usize) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_SOCKET, - libc::SO_INCOMING_CPU, - cpu as c_int, - ) - } - } - - /// Get the value of the `SO_REUSEPORT` option on this socket. - /// - /// For more information about this option, see [`set_reuse_port`]. - /// - /// [`set_reuse_port`]: crate::Socket::set_reuse_port - #[cfg(all( - feature = "all", - not(any(target_os = "solaris", target_os = "illumos")) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - unix, - not(any(target_os = "solaris", target_os = "illumos")) - ))) - )] - pub fn reuse_port(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), libc::SOL_SOCKET, libc::SO_REUSEPORT) - .map(|reuse| reuse != 0) - } - } - - /// Set value for the `SO_REUSEPORT` option on this socket. - /// - /// This indicates that further calls to `bind` may allow reuse of local - /// addresses. For IPv4 sockets this means that a socket may bind even when - /// there's a socket already listening on this port. - #[cfg(all( - feature = "all", - not(any(target_os = "solaris", target_os = "illumos")) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - unix, - not(any(target_os = "solaris", target_os = "illumos")) - ))) - )] - pub fn set_reuse_port(&self, reuse: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_SOCKET, - libc::SO_REUSEPORT, - reuse as c_int, - ) - } - } - - /// Get the value of the `IP_FREEBIND` option on this socket. - /// - /// For more information about this option, see [`set_freebind`]. - /// - /// [`set_freebind`]: crate::Socket::set_freebind - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn freebind(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), libc::SOL_IP, libc::IP_FREEBIND) - .map(|freebind| freebind != 0) - } - } - - /// Set value for the `IP_FREEBIND` option on this socket. - /// - /// If enabled, this boolean option allows binding to an IP address that is - /// nonlocal or does not (yet) exist. This permits listening on a socket, - /// without requiring the underlying network interface or the specified - /// dynamic IP address to be up at the time that the application is trying - /// to bind to it. - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn set_freebind(&self, freebind: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_IP, - libc::IP_FREEBIND, - freebind as c_int, - ) - } - } - - /// Get the value of the `IPV6_FREEBIND` option on this socket. - /// - /// This is an IPv6 counterpart of `IP_FREEBIND` socket option on - /// Android/Linux. For more information about this option, see - /// [`set_freebind`]. - /// - /// [`set_freebind`]: crate::Socket::set_freebind - #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) - )] - pub fn freebind_ipv6(&self) -> io::Result { - unsafe { - getsockopt::(self.as_raw(), libc::SOL_IPV6, libc::IPV6_FREEBIND) - .map(|freebind| freebind != 0) - } - } - - /// Set value for the `IPV6_FREEBIND` option on this socket. - /// - /// This is an IPv6 counterpart of `IP_FREEBIND` socket option on - /// Android/Linux. For more information about this option, see - /// [`set_freebind`]. - /// - /// [`set_freebind`]: crate::Socket::set_freebind - /// - /// # Examples - /// - /// On Linux: - /// - /// ``` - /// use socket2::{Domain, Socket, Type}; - /// use std::io::{self, Error, ErrorKind}; - /// - /// fn enable_freebind(socket: &Socket) -> io::Result<()> { - /// match socket.domain()? { - /// Domain::IPV4 => socket.set_freebind(true)?, - /// Domain::IPV6 => socket.set_freebind_ipv6(true)?, - /// _ => return Err(Error::new(ErrorKind::Other, "unsupported domain")), - /// }; - /// Ok(()) - /// } - /// - /// # fn main() -> io::Result<()> { - /// # let socket = Socket::new(Domain::IPV6, Type::STREAM, None)?; - /// # enable_freebind(&socket) - /// # } - /// ``` - #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))) - )] - pub fn set_freebind_ipv6(&self, freebind: bool) -> io::Result<()> { - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_IPV6, - libc::IPV6_FREEBIND, - freebind as c_int, - ) - } - } - - /// Copies data between a `file` and this socket using the `sendfile(2)` - /// system call. Because this copying is done within the kernel, - /// `sendfile()` is more efficient than the combination of `read(2)` and - /// `write(2)`, which would require transferring data to and from user - /// space. - /// - /// Different OSs support different kinds of `file`s, see the OS - /// documentation for what kind of files are supported. Generally *regular* - /// files are supported by all OSs. - /// - /// The `offset` is the absolute offset into the `file` to use as starting - /// point. - /// - /// Depending on the OS this function *may* change the offset of `file`. For - /// the best results reset the offset of the file before using it again. - /// - /// The `length` determines how many bytes to send, where a length of `None` - /// means it will try to send all bytes. - #[cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "freebsd", - target_os = "linux", - target_vendor = "apple", - ) - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any( - target_os = "android", - target_os = "freebsd", - target_os = "linux", - target_vendor = "apple", - ) - ))) - )] - pub fn sendfile( - &self, - file: &F, - offset: usize, - length: Option, - ) -> io::Result - where - F: AsRawFd, - { - self._sendfile(file.as_raw_fd(), offset as _, length) - } - - #[cfg(all(feature = "all", target_vendor = "apple"))] - fn _sendfile( - &self, - file: RawFd, - offset: libc::off_t, - length: Option, - ) -> io::Result { - // On macOS `length` is value-result parameter. It determines the number - // of bytes to write and returns the number of bytes written. - let mut length = match length { - Some(n) => n.get() as libc::off_t, - // A value of `0` means send all bytes. - None => 0, - }; - syscall!(sendfile( - file, - self.as_raw(), - offset, - &mut length, - ptr::null_mut(), - 0, - )) - .map(|_| length as usize) - } - - #[cfg(all(feature = "all", any(target_os = "android", target_os = "linux")))] - fn _sendfile( - &self, - file: RawFd, - offset: libc::off_t, - length: Option, - ) -> io::Result { - let count = match length { - Some(n) => n.get() as libc::size_t, - // The maximum the Linux kernel will write in a single call. - None => 0x7ffff000, // 2,147,479,552 bytes. - }; - let mut offset = offset; - syscall!(sendfile(self.as_raw(), file, &mut offset, count)).map(|n| n as usize) - } - - #[cfg(all(feature = "all", target_os = "freebsd"))] - fn _sendfile( - &self, - file: RawFd, - offset: libc::off_t, - length: Option, - ) -> io::Result { - let nbytes = match length { - Some(n) => n.get() as libc::size_t, - // A value of `0` means send all bytes. - None => 0, - }; - let mut sbytes: libc::off_t = 0; - syscall!(sendfile( - file, - self.as_raw(), - offset, - nbytes, - ptr::null_mut(), - &mut sbytes, - 0, - )) - .map(|_| sbytes as usize) - } - - /// Set the value of the `TCP_USER_TIMEOUT` option on this socket. - /// - /// If set, this specifies the maximum amount of time that transmitted data may remain - /// unacknowledged or buffered data may remain untransmitted before TCP will forcibly close the - /// corresponding connection. - /// - /// Setting `timeout` to `None` or a zero duration causes the system default timeouts to - /// be used. If `timeout` in milliseconds is larger than `c_uint::MAX`, the timeout is clamped - /// to `c_uint::MAX`. For example, when `c_uint` is a 32-bit value, this limits the timeout to - /// approximately 49.71 days. - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn set_tcp_user_timeout(&self, timeout: Option) -> io::Result<()> { - let timeout = timeout - .map(|to| min(to.as_millis(), libc::c_uint::MAX as u128) as libc::c_uint) - .unwrap_or(0); - unsafe { - setsockopt( - self.as_raw(), - libc::IPPROTO_TCP, - libc::TCP_USER_TIMEOUT, - timeout, - ) - } - } - - /// Get the value of the `TCP_USER_TIMEOUT` option on this socket. - /// - /// For more information about this option, see [`set_tcp_user_timeout`]. - /// - /// [`set_tcp_user_timeout`]: Socket::set_tcp_user_timeout - #[cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))] - #[cfg_attr( - docsrs, - doc(cfg(all( - feature = "all", - any(target_os = "android", target_os = "fuchsia", target_os = "linux") - ))) - )] - pub fn tcp_user_timeout(&self) -> io::Result> { - unsafe { - getsockopt::(self.as_raw(), libc::IPPROTO_TCP, libc::TCP_USER_TIMEOUT) - .map(|millis| { - if millis == 0 { - None - } else { - Some(Duration::from_millis(millis as u64)) - } - }) - } - } - - /// Attach Berkeley Packet Filter(BPF) on this socket. - /// - /// BPF allows a user-space program to attach a filter onto any socket - /// and allow or disallow certain types of data to come through the socket. - /// - /// For more information about this option, see [filter](https://www.kernel.org/doc/html/v5.12/networking/filter.html) - #[cfg(all(feature = "all", any(target_os = "linux", target_os = "android")))] - pub fn attach_filter(&self, filters: &[libc::sock_filter]) -> io::Result<()> { - let prog = libc::sock_fprog { - len: filters.len() as u16, - filter: filters.as_ptr() as *mut _, - }; - - unsafe { - setsockopt( - self.as_raw(), - libc::SOL_SOCKET, - libc::SO_ATTACH_FILTER, - prog, - ) - } - } - - /// Detach Berkeley Packet Filter(BPF) from this socket. - /// - /// For more information about this option, see [`attach_filter`] - #[cfg(all(feature = "all", any(target_os = "linux", target_os = "android")))] - pub fn detach_filter(&self) -> io::Result<()> { - unsafe { setsockopt(self.as_raw(), libc::SOL_SOCKET, libc::SO_DETACH_FILTER, 0) } - } -} - -#[cfg_attr(docsrs, doc(cfg(unix)))] -impl AsRawFd for crate::Socket { - fn as_raw_fd(&self) -> c_int { - self.as_raw() - } -} - -#[cfg_attr(docsrs, doc(cfg(unix)))] -impl IntoRawFd for crate::Socket { - fn into_raw_fd(self) -> c_int { - self.into_raw() - } -} - -#[cfg_attr(docsrs, doc(cfg(unix)))] -impl FromRawFd for crate::Socket { - unsafe fn from_raw_fd(fd: c_int) -> crate::Socket { - crate::Socket::from_raw(fd) - } -} - -#[cfg(feature = "all")] -from!(UnixStream, crate::Socket); -#[cfg(feature = "all")] -from!(UnixListener, crate::Socket); -#[cfg(feature = "all")] -from!(UnixDatagram, crate::Socket); -#[cfg(feature = "all")] -from!(crate::Socket, UnixStream); -#[cfg(feature = "all")] -from!(crate::Socket, UnixListener); -#[cfg(feature = "all")] -from!(crate::Socket, UnixDatagram); - -#[test] -fn in_addr_convertion() { - let ip = Ipv4Addr::new(127, 0, 0, 1); - let raw = to_in_addr(&ip); - // NOTE: `in_addr` is packed on NetBSD and it's unsafe to borrow. - let a = raw.s_addr; - assert_eq!(a, u32::from_ne_bytes([127, 0, 0, 1])); - assert_eq!(from_in_addr(raw), ip); - - let ip = Ipv4Addr::new(127, 34, 4, 12); - let raw = to_in_addr(&ip); - let a = raw.s_addr; - assert_eq!(a, u32::from_ne_bytes([127, 34, 4, 12])); - assert_eq!(from_in_addr(raw), ip); -} - -#[test] -fn in6_addr_convertion() { - let ip = Ipv6Addr::new(0x2000, 1, 2, 3, 4, 5, 6, 7); - let raw = to_in6_addr(&ip); - let want = [32, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7]; - assert_eq!(raw.s6_addr, want); - assert_eq!(from_in6_addr(raw), ip); -} diff -Nru s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/sys/windows.rs s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/sys/windows.rs --- s390-tools-2.31.0/rust-vendor/socket2-0.4.9/src/sys/windows.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/socket2-0.4.9/src/sys/windows.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,882 +0,0 @@ -// Copyright 2015 The Rust Project Developers. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::cmp::min; -use std::io::{self, IoSlice}; -use std::marker::PhantomData; -use std::mem::{self, size_of, MaybeUninit}; -use std::net::{self, Ipv4Addr, Ipv6Addr, Shutdown}; -use std::os::windows::prelude::*; -use std::sync::Once; -use std::time::{Duration, Instant}; -use std::{ptr, slice}; - -use winapi::ctypes::c_long; -use winapi::shared::in6addr::*; -use winapi::shared::inaddr::*; -use winapi::shared::minwindef::DWORD; -use winapi::shared::minwindef::ULONG; -use winapi::shared::mstcpip::{tcp_keepalive, SIO_KEEPALIVE_VALS}; -use winapi::shared::ntdef::HANDLE; -use winapi::shared::ws2def; -use winapi::shared::ws2def::WSABUF; -use winapi::um::handleapi::SetHandleInformation; -use winapi::um::processthreadsapi::GetCurrentProcessId; -use winapi::um::winbase::{self, INFINITE}; -use winapi::um::winsock2::{ - self as sock, u_long, POLLERR, POLLHUP, POLLRDNORM, POLLWRNORM, SD_BOTH, SD_RECEIVE, SD_SEND, - WSAPOLLFD, -}; -use winapi::um::winsock2::{SOCKET_ERROR, WSAEMSGSIZE, WSAESHUTDOWN}; - -use crate::{RecvFlags, SockAddr, TcpKeepalive, Type}; - -pub(crate) use winapi::ctypes::c_int; - -/// Fake MSG_TRUNC flag for the [`RecvFlags`] struct. -/// -/// The flag is enabled when a `WSARecv[From]` call returns `WSAEMSGSIZE`. The -/// value of the flag is defined by us. -pub(crate) const MSG_TRUNC: c_int = 0x01; - -// Used in `Domain`. -pub(crate) use winapi::shared::ws2def::{AF_INET, AF_INET6}; -// Used in `Type`. -pub(crate) use winapi::shared::ws2def::{SOCK_DGRAM, SOCK_STREAM}; -#[cfg(feature = "all")] -pub(crate) use winapi::shared::ws2def::{SOCK_RAW, SOCK_SEQPACKET}; -// Used in `Protocol`. -pub(crate) const IPPROTO_ICMP: c_int = winapi::shared::ws2def::IPPROTO_ICMP as c_int; -pub(crate) const IPPROTO_ICMPV6: c_int = winapi::shared::ws2def::IPPROTO_ICMPV6 as c_int; -pub(crate) const IPPROTO_TCP: c_int = winapi::shared::ws2def::IPPROTO_TCP as c_int; -pub(crate) const IPPROTO_UDP: c_int = winapi::shared::ws2def::IPPROTO_UDP as c_int; -// Used in `SockAddr`. -pub(crate) use winapi::shared::ws2def::{ - ADDRESS_FAMILY as sa_family_t, SOCKADDR as sockaddr, SOCKADDR_IN as sockaddr_in, - SOCKADDR_STORAGE as sockaddr_storage, -}; -pub(crate) use winapi::shared::ws2ipdef::SOCKADDR_IN6_LH as sockaddr_in6; -pub(crate) use winapi::um::ws2tcpip::socklen_t; -// Used in `Socket`. -pub(crate) use winapi::shared::ws2def::{ - IPPROTO_IP, SOL_SOCKET, SO_BROADCAST, SO_ERROR, SO_KEEPALIVE, SO_LINGER, SO_OOBINLINE, - SO_RCVBUF, SO_RCVTIMEO, SO_REUSEADDR, SO_SNDBUF, SO_SNDTIMEO, SO_TYPE, TCP_NODELAY, -}; -#[cfg(feature = "all")] -pub(crate) use winapi::shared::ws2ipdef::IP_HDRINCL; -pub(crate) use winapi::shared::ws2ipdef::{ - IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP, IPV6_MREQ as Ipv6Mreq, IPV6_MULTICAST_HOPS, - IPV6_MULTICAST_IF, IPV6_MULTICAST_LOOP, IPV6_UNICAST_HOPS, IPV6_V6ONLY, IP_ADD_MEMBERSHIP, - IP_ADD_SOURCE_MEMBERSHIP, IP_DROP_MEMBERSHIP, IP_DROP_SOURCE_MEMBERSHIP, IP_MREQ as IpMreq, - IP_MREQ_SOURCE as IpMreqSource, IP_MULTICAST_IF, IP_MULTICAST_LOOP, IP_MULTICAST_TTL, IP_TOS, - IP_TTL, -}; -pub(crate) use winapi::um::winsock2::{linger, MSG_OOB, MSG_PEEK}; -pub(crate) const IPPROTO_IPV6: c_int = winapi::shared::ws2def::IPPROTO_IPV6 as c_int; - -/// Type used in set/getsockopt to retrieve the `TCP_NODELAY` option. -/// -/// NOTE: -/// documents that options such as `TCP_NODELAY` and `SO_KEEPALIVE` expect a -/// `BOOL` (alias for `c_int`, 4 bytes), however in practice this turns out to -/// be false (or misleading) as a `BOOLEAN` (`c_uchar`, 1 byte) is returned by -/// `getsockopt`. -pub(crate) type Bool = winapi::shared::ntdef::BOOLEAN; - -/// Maximum size of a buffer passed to system call like `recv` and `send`. -const MAX_BUF_LEN: usize = ::max_value() as usize; - -/// Helper macro to execute a system call that returns an `io::Result`. -macro_rules! syscall { - ($fn: ident ( $($arg: expr),* $(,)* ), $err_test: path, $err_value: expr) => {{ - #[allow(unused_unsafe)] - let res = unsafe { sock::$fn($($arg, )*) }; - if $err_test(&res, &$err_value) { - Err(io::Error::last_os_error()) - } else { - Ok(res) - } - }}; -} - -impl_debug!( - crate::Domain, - ws2def::AF_INET, - ws2def::AF_INET6, - ws2def::AF_UNIX, - ws2def::AF_UNSPEC, // = 0. -); - -/// Windows only API. -impl Type { - /// Our custom flag to set `WSA_FLAG_NO_HANDLE_INHERIT` on socket creation. - /// Trying to mimic `Type::cloexec` on windows. - const NO_INHERIT: c_int = 1 << ((size_of::() * 8) - 1); // Last bit. - - /// Set `WSA_FLAG_NO_HANDLE_INHERIT` on the socket. - #[cfg(feature = "all")] - #[cfg_attr(docsrs, doc(cfg(all(windows, feature = "all"))))] - pub const fn no_inherit(self) -> Type { - self._no_inherit() - } - - pub(crate) const fn _no_inherit(self) -> Type { - Type(self.0 | Type::NO_INHERIT) - } -} - -impl_debug!( - crate::Type, - ws2def::SOCK_STREAM, - ws2def::SOCK_DGRAM, - ws2def::SOCK_RAW, - ws2def::SOCK_RDM, - ws2def::SOCK_SEQPACKET, -); - -impl_debug!( - crate::Protocol, - self::IPPROTO_ICMP, - self::IPPROTO_ICMPV6, - self::IPPROTO_TCP, - self::IPPROTO_UDP, -); - -impl std::fmt::Debug for RecvFlags { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("RecvFlags") - .field("is_truncated", &self.is_truncated()) - .finish() - } -} - -#[repr(transparent)] -pub struct MaybeUninitSlice<'a> { - vec: WSABUF, - _lifetime: PhantomData<&'a mut [MaybeUninit]>, -} - -unsafe impl<'a> Send for MaybeUninitSlice<'a> {} - -unsafe impl<'a> Sync for MaybeUninitSlice<'a> {} - -impl<'a> MaybeUninitSlice<'a> { - pub fn new(buf: &'a mut [MaybeUninit]) -> MaybeUninitSlice<'a> { - assert!(buf.len() <= ULONG::MAX as usize); - MaybeUninitSlice { - vec: WSABUF { - len: buf.len() as ULONG, - buf: buf.as_mut_ptr().cast(), - }, - _lifetime: PhantomData, - } - } - - pub fn as_slice(&self) -> &[MaybeUninit] { - unsafe { slice::from_raw_parts(self.vec.buf.cast(), self.vec.len as usize) } - } - - pub fn as_mut_slice(&mut self) -> &mut [MaybeUninit] { - unsafe { slice::from_raw_parts_mut(self.vec.buf.cast(), self.vec.len as usize) } - } -} - -fn init() { - static INIT: Once = Once::new(); - - INIT.call_once(|| { - // Initialize winsock through the standard library by just creating a - // dummy socket. Whether this is successful or not we drop the result as - // libstd will be sure to have initialized winsock. - let _ = net::UdpSocket::bind("127.0.0.1:34254"); - }); -} - -pub(crate) type Socket = sock::SOCKET; - -pub(crate) unsafe fn socket_from_raw(socket: Socket) -> crate::socket::Inner { - crate::socket::Inner::from_raw_socket(socket as RawSocket) -} - -pub(crate) fn socket_as_raw(socket: &crate::socket::Inner) -> Socket { - socket.as_raw_socket() as Socket -} - -pub(crate) fn socket_into_raw(socket: crate::socket::Inner) -> Socket { - socket.into_raw_socket() as Socket -} - -pub(crate) fn socket(family: c_int, mut ty: c_int, protocol: c_int) -> io::Result { - init(); - - // Check if we set our custom flag. - let flags = if ty & Type::NO_INHERIT != 0 { - ty = ty & !Type::NO_INHERIT; - sock::WSA_FLAG_NO_HANDLE_INHERIT - } else { - 0 - }; - - syscall!( - WSASocketW( - family, - ty, - protocol, - ptr::null_mut(), - 0, - sock::WSA_FLAG_OVERLAPPED | flags, - ), - PartialEq::eq, - sock::INVALID_SOCKET - ) -} - -pub(crate) fn bind(socket: Socket, addr: &SockAddr) -> io::Result<()> { - syscall!(bind(socket, addr.as_ptr(), addr.len()), PartialEq::ne, 0).map(|_| ()) -} - -pub(crate) fn connect(socket: Socket, addr: &SockAddr) -> io::Result<()> { - syscall!(connect(socket, addr.as_ptr(), addr.len()), PartialEq::ne, 0).map(|_| ()) -} - -pub(crate) fn poll_connect(socket: &crate::Socket, timeout: Duration) -> io::Result<()> { - let start = Instant::now(); - - let mut fd_array = WSAPOLLFD { - fd: socket.as_raw(), - events: POLLRDNORM | POLLWRNORM, - revents: 0, - }; - - loop { - let elapsed = start.elapsed(); - if elapsed >= timeout { - return Err(io::ErrorKind::TimedOut.into()); - } - - let timeout = (timeout - elapsed).as_millis(); - let timeout = clamp(timeout, 1, c_int::max_value() as u128) as c_int; - - match syscall!( - WSAPoll(&mut fd_array, 1, timeout), - PartialEq::eq, - sock::SOCKET_ERROR - ) { - Ok(0) => return Err(io::ErrorKind::TimedOut.into()), - Ok(_) => { - // Error or hang up indicates an error (or failure to connect). - if (fd_array.revents & POLLERR) != 0 || (fd_array.revents & POLLHUP) != 0 { - match socket.take_error() { - Ok(Some(err)) => return Err(err), - Ok(None) => { - return Err(io::Error::new( - io::ErrorKind::Other, - "no error set after POLLHUP", - )) - } - Err(err) => return Err(err), - } - } - return Ok(()); - } - // Got interrupted, try again. - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Err(err), - } - } -} - -// TODO: use clamp from std lib, stable since 1.50. -fn clamp(value: T, min: T, max: T) -> T -where - T: Ord, -{ - if value <= min { - min - } else if value >= max { - max - } else { - value - } -} - -pub(crate) fn listen(socket: Socket, backlog: c_int) -> io::Result<()> { - syscall!(listen(socket, backlog), PartialEq::ne, 0).map(|_| ()) -} - -pub(crate) fn accept(socket: Socket) -> io::Result<(Socket, SockAddr)> { - // Safety: `accept` initialises the `SockAddr` for us. - unsafe { - SockAddr::init(|storage, len| { - syscall!( - accept(socket, storage.cast(), len), - PartialEq::eq, - sock::INVALID_SOCKET - ) - }) - } -} - -pub(crate) fn getsockname(socket: Socket) -> io::Result { - // Safety: `getsockname` initialises the `SockAddr` for us. - unsafe { - SockAddr::init(|storage, len| { - syscall!( - getsockname(socket, storage.cast(), len), - PartialEq::eq, - sock::SOCKET_ERROR - ) - }) - } - .map(|(_, addr)| addr) -} - -pub(crate) fn getpeername(socket: Socket) -> io::Result { - // Safety: `getpeername` initialises the `SockAddr` for us. - unsafe { - SockAddr::init(|storage, len| { - syscall!( - getpeername(socket, storage.cast(), len), - PartialEq::eq, - sock::SOCKET_ERROR - ) - }) - } - .map(|(_, addr)| addr) -} - -pub(crate) fn try_clone(socket: Socket) -> io::Result { - let mut info: MaybeUninit = MaybeUninit::uninit(); - syscall!( - WSADuplicateSocketW(socket, GetCurrentProcessId(), info.as_mut_ptr()), - PartialEq::eq, - sock::SOCKET_ERROR - )?; - // Safety: `WSADuplicateSocketW` intialised `info` for us. - let mut info = unsafe { info.assume_init() }; - - syscall!( - WSASocketW( - info.iAddressFamily, - info.iSocketType, - info.iProtocol, - &mut info, - 0, - sock::WSA_FLAG_OVERLAPPED | sock::WSA_FLAG_NO_HANDLE_INHERIT, - ), - PartialEq::eq, - sock::INVALID_SOCKET - ) -} - -pub(crate) fn set_nonblocking(socket: Socket, nonblocking: bool) -> io::Result<()> { - let mut nonblocking = nonblocking as u_long; - ioctlsocket(socket, sock::FIONBIO, &mut nonblocking) -} - -pub(crate) fn shutdown(socket: Socket, how: Shutdown) -> io::Result<()> { - let how = match how { - Shutdown::Write => SD_SEND, - Shutdown::Read => SD_RECEIVE, - Shutdown::Both => SD_BOTH, - }; - syscall!(shutdown(socket, how), PartialEq::eq, sock::SOCKET_ERROR).map(|_| ()) -} - -pub(crate) fn recv(socket: Socket, buf: &mut [MaybeUninit], flags: c_int) -> io::Result { - let res = syscall!( - recv( - socket, - buf.as_mut_ptr().cast(), - min(buf.len(), MAX_BUF_LEN) as c_int, - flags, - ), - PartialEq::eq, - sock::SOCKET_ERROR - ); - match res { - Ok(n) => Ok(n as usize), - Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => Ok(0), - Err(err) => Err(err), - } -} - -pub(crate) fn recv_vectored( - socket: Socket, - bufs: &mut [crate::MaybeUninitSlice<'_>], - flags: c_int, -) -> io::Result<(usize, RecvFlags)> { - let mut nread = 0; - let mut flags = flags as DWORD; - let res = syscall!( - WSARecv( - socket, - bufs.as_mut_ptr().cast(), - min(bufs.len(), DWORD::max_value() as usize) as DWORD, - &mut nread, - &mut flags, - ptr::null_mut(), - None, - ), - PartialEq::eq, - sock::SOCKET_ERROR - ); - match res { - Ok(_) => Ok((nread as usize, RecvFlags(0))), - Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => { - Ok((0, RecvFlags(0))) - } - Err(ref err) if err.raw_os_error() == Some(sock::WSAEMSGSIZE as i32) => { - Ok((nread as usize, RecvFlags(MSG_TRUNC))) - } - Err(err) => Err(err), - } -} - -pub(crate) fn recv_from( - socket: Socket, - buf: &mut [MaybeUninit], - flags: c_int, -) -> io::Result<(usize, SockAddr)> { - // Safety: `recvfrom` initialises the `SockAddr` for us. - unsafe { - SockAddr::init(|storage, addrlen| { - let res = syscall!( - recvfrom( - socket, - buf.as_mut_ptr().cast(), - min(buf.len(), MAX_BUF_LEN) as c_int, - flags, - storage.cast(), - addrlen, - ), - PartialEq::eq, - sock::SOCKET_ERROR - ); - match res { - Ok(n) => Ok(n as usize), - Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => Ok(0), - Err(err) => Err(err), - } - }) - } -} - -pub(crate) fn peek_sender(socket: Socket) -> io::Result { - // Safety: `recvfrom` initialises the `SockAddr` for us. - let ((), sender) = unsafe { - SockAddr::init(|storage, addrlen| { - let res = syscall!( - recvfrom( - socket, - // Windows *appears* not to care if you pass a null pointer. - ptr::null_mut(), - 0, - MSG_PEEK, - storage.cast(), - addrlen, - ), - PartialEq::eq, - SOCKET_ERROR - ); - match res { - Ok(_n) => Ok(()), - Err(e) => match e.raw_os_error() { - Some(code) if code == (WSAESHUTDOWN as i32) || code == (WSAEMSGSIZE as i32) => { - Ok(()) - } - _ => Err(e), - }, - } - }) - }?; - - Ok(sender) -} - -pub(crate) fn recv_from_vectored( - socket: Socket, - bufs: &mut [crate::MaybeUninitSlice<'_>], - flags: c_int, -) -> io::Result<(usize, RecvFlags, SockAddr)> { - // Safety: `recvfrom` initialises the `SockAddr` for us. - unsafe { - SockAddr::init(|storage, addrlen| { - let mut nread = 0; - let mut flags = flags as DWORD; - let res = syscall!( - WSARecvFrom( - socket, - bufs.as_mut_ptr().cast(), - min(bufs.len(), DWORD::max_value() as usize) as DWORD, - &mut nread, - &mut flags, - storage.cast(), - addrlen, - ptr::null_mut(), - None, - ), - PartialEq::eq, - sock::SOCKET_ERROR - ); - match res { - Ok(_) => Ok((nread as usize, RecvFlags(0))), - Err(ref err) if err.raw_os_error() == Some(sock::WSAESHUTDOWN as i32) => { - Ok((nread as usize, RecvFlags(0))) - } - Err(ref err) if err.raw_os_error() == Some(sock::WSAEMSGSIZE as i32) => { - Ok((nread as usize, RecvFlags(MSG_TRUNC))) - } - Err(err) => Err(err), - } - }) - } - .map(|((n, recv_flags), addr)| (n, recv_flags, addr)) -} - -pub(crate) fn send(socket: Socket, buf: &[u8], flags: c_int) -> io::Result { - syscall!( - send( - socket, - buf.as_ptr().cast(), - min(buf.len(), MAX_BUF_LEN) as c_int, - flags, - ), - PartialEq::eq, - sock::SOCKET_ERROR - ) - .map(|n| n as usize) -} - -pub(crate) fn send_vectored( - socket: Socket, - bufs: &[IoSlice<'_>], - flags: c_int, -) -> io::Result { - let mut nsent = 0; - syscall!( - WSASend( - socket, - // FIXME: From the `WSASend` docs [1]: - // > For a Winsock application, once the WSASend function is called, - // > the system owns these buffers and the application may not - // > access them. - // - // So what we're doing is actually UB as `bufs` needs to be `&mut - // [IoSlice<'_>]`. - // - // Tracking issue: https://github.com/rust-lang/socket2-rs/issues/129. - // - // NOTE: `send_to_vectored` has the same problem. - // - // [1] https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsasend - bufs.as_ptr() as *mut _, - min(bufs.len(), DWORD::max_value() as usize) as DWORD, - &mut nsent, - flags as DWORD, - std::ptr::null_mut(), - None, - ), - PartialEq::eq, - sock::SOCKET_ERROR - ) - .map(|_| nsent as usize) -} - -pub(crate) fn send_to( - socket: Socket, - buf: &[u8], - addr: &SockAddr, - flags: c_int, -) -> io::Result { - syscall!( - sendto( - socket, - buf.as_ptr().cast(), - min(buf.len(), MAX_BUF_LEN) as c_int, - flags, - addr.as_ptr(), - addr.len(), - ), - PartialEq::eq, - sock::SOCKET_ERROR - ) - .map(|n| n as usize) -} - -pub(crate) fn send_to_vectored( - socket: Socket, - bufs: &[IoSlice<'_>], - addr: &SockAddr, - flags: c_int, -) -> io::Result { - let mut nsent = 0; - syscall!( - WSASendTo( - socket, - // FIXME: Same problem as in `send_vectored`. - bufs.as_ptr() as *mut _, - bufs.len().min(DWORD::MAX as usize) as DWORD, - &mut nsent, - flags as DWORD, - addr.as_ptr(), - addr.len(), - ptr::null_mut(), - None, - ), - PartialEq::eq, - sock::SOCKET_ERROR - ) - .map(|_| nsent as usize) -} - -/// Wrapper around `getsockopt` to deal with platform specific timeouts. -pub(crate) fn timeout_opt(fd: Socket, lvl: c_int, name: c_int) -> io::Result> { - unsafe { getsockopt(fd, lvl, name).map(from_ms) } -} - -fn from_ms(duration: DWORD) -> Option { - if duration == 0 { - None - } else { - let secs = duration / 1000; - let nsec = (duration % 1000) * 1000000; - Some(Duration::new(secs as u64, nsec as u32)) - } -} - -/// Wrapper around `setsockopt` to deal with platform specific timeouts. -pub(crate) fn set_timeout_opt( - fd: Socket, - level: c_int, - optname: c_int, - duration: Option, -) -> io::Result<()> { - let duration = into_ms(duration); - unsafe { setsockopt(fd, level, optname, duration) } -} - -fn into_ms(duration: Option) -> DWORD { - // Note that a duration is a (u64, u32) (seconds, nanoseconds) pair, and the - // timeouts in windows APIs are typically u32 milliseconds. To translate, we - // have two pieces to take care of: - // - // * Nanosecond precision is rounded up - // * Greater than u32::MAX milliseconds (50 days) is rounded up to - // INFINITE (never time out). - duration - .map(|duration| min(duration.as_millis(), INFINITE as u128) as DWORD) - .unwrap_or(0) -} - -pub(crate) fn set_tcp_keepalive(socket: Socket, keepalive: &TcpKeepalive) -> io::Result<()> { - let mut keepalive = tcp_keepalive { - onoff: 1, - keepalivetime: into_ms(keepalive.time), - keepaliveinterval: into_ms(keepalive.interval), - }; - let mut out = 0; - syscall!( - WSAIoctl( - socket, - SIO_KEEPALIVE_VALS, - &mut keepalive as *mut _ as *mut _, - size_of::() as _, - ptr::null_mut(), - 0, - &mut out, - ptr::null_mut(), - None, - ), - PartialEq::eq, - sock::SOCKET_ERROR - ) - .map(|_| ()) -} - -/// Caller must ensure `T` is the correct type for `level` and `optname`. -pub(crate) unsafe fn getsockopt(socket: Socket, level: c_int, optname: c_int) -> io::Result { - let mut optval: MaybeUninit = MaybeUninit::uninit(); - let mut optlen = mem::size_of::() as c_int; - syscall!( - getsockopt( - socket, - level, - optname, - optval.as_mut_ptr().cast(), - &mut optlen, - ), - PartialEq::eq, - sock::SOCKET_ERROR - ) - .map(|_| { - debug_assert_eq!(optlen as usize, mem::size_of::()); - // Safety: `getsockopt` initialised `optval` for us. - optval.assume_init() - }) -} - -/// Caller must ensure `T` is the correct type for `level` and `optname`. -pub(crate) unsafe fn setsockopt( - socket: Socket, - level: c_int, - optname: c_int, - optval: T, -) -> io::Result<()> { - syscall!( - setsockopt( - socket, - level, - optname, - (&optval as *const T).cast(), - mem::size_of::() as c_int, - ), - PartialEq::eq, - sock::SOCKET_ERROR - ) - .map(|_| ()) -} - -fn ioctlsocket(socket: Socket, cmd: c_long, payload: &mut u_long) -> io::Result<()> { - syscall!( - ioctlsocket(socket, cmd, payload), - PartialEq::eq, - sock::SOCKET_ERROR - ) - .map(|_| ()) -} - -pub(crate) fn to_in_addr(addr: &Ipv4Addr) -> IN_ADDR { - let mut s_un: in_addr_S_un = unsafe { mem::zeroed() }; - // `S_un` is stored as BE on all machines, and the array is in BE order. So - // the native endian conversion method is used so that it's never swapped. - unsafe { *(s_un.S_addr_mut()) = u32::from_ne_bytes(addr.octets()) }; - IN_ADDR { S_un: s_un } -} - -pub(crate) fn from_in_addr(in_addr: IN_ADDR) -> Ipv4Addr { - Ipv4Addr::from(unsafe { *in_addr.S_un.S_addr() }.to_ne_bytes()) -} - -pub(crate) fn to_in6_addr(addr: &Ipv6Addr) -> in6_addr { - let mut ret_addr: in6_addr_u = unsafe { mem::zeroed() }; - unsafe { *(ret_addr.Byte_mut()) = addr.octets() }; - let mut ret: in6_addr = unsafe { mem::zeroed() }; - ret.u = ret_addr; - ret -} - -pub(crate) fn from_in6_addr(addr: in6_addr) -> Ipv6Addr { - Ipv6Addr::from(*unsafe { addr.u.Byte() }) -} - -pub(crate) fn to_mreqn( - multiaddr: &Ipv4Addr, - interface: &crate::socket::InterfaceIndexOrAddress, -) -> IpMreq { - IpMreq { - imr_multiaddr: to_in_addr(multiaddr), - // Per https://docs.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-ip_mreq#members: - // - // imr_interface - // - // The local IPv4 address of the interface or the interface index on - // which the multicast group should be joined or dropped. This value is - // in network byte order. If this member specifies an IPv4 address of - // 0.0.0.0, the default IPv4 multicast interface is used. - // - // To use an interface index of 1 would be the same as an IP address of - // 0.0.0.1. - imr_interface: match interface { - crate::socket::InterfaceIndexOrAddress::Index(interface) => { - to_in_addr(&(*interface).into()) - } - crate::socket::InterfaceIndexOrAddress::Address(interface) => to_in_addr(interface), - }, - } -} - -/// Windows only API. -impl crate::Socket { - /// Sets `HANDLE_FLAG_INHERIT` using `SetHandleInformation`. - #[cfg(feature = "all")] - #[cfg_attr(docsrs, doc(cfg(all(windows, feature = "all"))))] - pub fn set_no_inherit(&self, no_inherit: bool) -> io::Result<()> { - self._set_no_inherit(no_inherit) - } - - pub(crate) fn _set_no_inherit(&self, no_inherit: bool) -> io::Result<()> { - // NOTE: can't use `syscall!` because it expects the function in the - // `sock::` path. - let res = unsafe { - SetHandleInformation( - self.as_raw() as HANDLE, - winbase::HANDLE_FLAG_INHERIT, - !no_inherit as _, - ) - }; - if res == 0 { - // Zero means error. - Err(io::Error::last_os_error()) - } else { - Ok(()) - } - } -} - -impl AsRawSocket for crate::Socket { - fn as_raw_socket(&self) -> RawSocket { - self.as_raw() as RawSocket - } -} - -impl IntoRawSocket for crate::Socket { - fn into_raw_socket(self) -> RawSocket { - self.into_raw() as RawSocket - } -} - -impl FromRawSocket for crate::Socket { - unsafe fn from_raw_socket(socket: RawSocket) -> crate::Socket { - crate::Socket::from_raw(socket as Socket) - } -} - -#[test] -fn in_addr_convertion() { - let ip = Ipv4Addr::new(127, 0, 0, 1); - let raw = to_in_addr(&ip); - assert_eq!(unsafe { *raw.S_un.S_addr() }, 127 << 0 | 1 << 24); - assert_eq!(from_in_addr(raw), ip); - - let ip = Ipv4Addr::new(127, 34, 4, 12); - let raw = to_in_addr(&ip); - assert_eq!( - unsafe { *raw.S_un.S_addr() }, - 127 << 0 | 34 << 8 | 4 << 16 | 12 << 24 - ); - assert_eq!(from_in_addr(raw), ip); -} - -#[test] -fn in6_addr_convertion() { - let ip = Ipv6Addr::new(0x2000, 1, 2, 3, 4, 5, 6, 7); - let raw = to_in6_addr(&ip); - let want = [ - 0x2000u16.to_be(), - 1u16.to_be(), - 2u16.to_be(), - 3u16.to_be(), - 4u16.to_be(), - 5u16.to_be(), - 6u16.to_be(), - 7u16.to_be(), - ]; - assert_eq!(unsafe { *raw.u.Word() }, want); - assert_eq!(from_in6_addr(raw), ip); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/tokio/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/tokio/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/tokio/Cargo.toml s390-tools-2.33.1/rust-vendor/tokio/Cargo.toml --- s390-tools-2.31.0/rust-vendor/tokio/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,230 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.63" -name = "tokio" -version = "1.33.0" -authors = ["Tokio Contributors "] -description = """ -An event-driven, non-blocking I/O platform for writing asynchronous I/O -backed applications. -""" -homepage = "https://tokio.rs" -readme = "README.md" -keywords = [ - "io", - "async", - "non-blocking", - "futures", -] -categories = [ - "asynchronous", - "network-programming", -] -license = "MIT" -repository = "https://github.com/tokio-rs/tokio" - -[package.metadata.docs.rs] -all-features = true -rustc-args = [ - "--cfg", - "tokio_unstable", - "--cfg", - "tokio_taskdump", -] -rustdoc-args = [ - "--cfg", - "docsrs", - "--cfg", - "tokio_unstable", - "--cfg", - "tokio_taskdump", -] - -[package.metadata.playground] -features = [ - "full", - "test-util", -] - -[dependencies.bytes] -version = "1.0.0" -optional = true - -[dependencies.mio] -version = "0.8.6" -optional = true -default-features = false - -[dependencies.num_cpus] -version = "1.8.0" -optional = true - -[dependencies.parking_lot] -version = "0.12.0" -optional = true - -[dependencies.pin-project-lite] -version = "0.2.11" - -[dependencies.tokio-macros] -version = "~2.1.0" -optional = true - -[dev-dependencies.async-stream] -version = "0.3" - -[dev-dependencies.futures] -version = "0.3.0" -features = ["async-await"] - -[dev-dependencies.mockall] -version = "0.11.1" - -[dev-dependencies.tokio-stream] -version = "0.1" - -[dev-dependencies.tokio-test] -version = "0.4.0" - -[features] -default = [] -fs = [] -full = [ - "fs", - "io-util", - "io-std", - "macros", - "net", - "parking_lot", - "process", - "rt", - "rt-multi-thread", - "signal", - "sync", - "time", -] -io-std = [] -io-util = ["bytes"] -macros = ["tokio-macros"] -net = [ - "libc", - "mio/os-poll", - "mio/os-ext", - "mio/net", - "socket2", - "windows-sys/Win32_Foundation", - "windows-sys/Win32_Security", - "windows-sys/Win32_Storage_FileSystem", - "windows-sys/Win32_System_Pipes", - "windows-sys/Win32_System_SystemServices", -] -process = [ - "bytes", - "libc", - "mio/os-poll", - "mio/os-ext", - "mio/net", - "signal-hook-registry", - "windows-sys/Win32_Foundation", - "windows-sys/Win32_System_Threading", - "windows-sys/Win32_System_WindowsProgramming", -] -rt = [] -rt-multi-thread = [ - "num_cpus", - "rt", -] -signal = [ - "libc", - "mio/os-poll", - "mio/net", - "mio/os-ext", - "signal-hook-registry", - "windows-sys/Win32_Foundation", - "windows-sys/Win32_System_Console", -] -sync = [] -test-util = [ - "rt", - "sync", - "time", -] -time = [] - -[target."cfg(all(target_family = \"wasm\", not(target_os = \"wasi\")))".dev-dependencies.wasm-bindgen-test] -version = "0.3.0" - -[target."cfg(loom)".dev-dependencies.loom] -version = "0.7" -features = [ - "futures", - "checkpoint", -] - -[target."cfg(not(all(target_family = \"wasm\", target_os = \"unknown\")))".dev-dependencies.rand] -version = "0.8.0" - -[target."cfg(not(target_family = \"wasm\"))".dependencies.socket2] -version = "0.5.3" -features = ["all"] -optional = true - -[target."cfg(not(target_family = \"wasm\"))".dev-dependencies.socket2] -version = "0.5.3" - -[target."cfg(not(target_family = \"wasm\"))".dev-dependencies.tempfile] -version = "3.1.0" - -[target."cfg(target_os = \"freebsd\")".dev-dependencies.mio-aio] -version = "0.7.0" -features = ["tokio"] - -[target."cfg(tokio_taskdump)".dependencies.backtrace] -version = "0.3.58" - -[target."cfg(tokio_unstable)".dependencies.tracing] -version = "0.1.25" -features = ["std"] -optional = true -default-features = false - -[target."cfg(unix)".dependencies.libc] -version = "0.2.145" -optional = true - -[target."cfg(unix)".dependencies.signal-hook-registry] -version = "1.1.1" -optional = true - -[target."cfg(unix)".dev-dependencies.libc] -version = "0.2.145" - -[target."cfg(unix)".dev-dependencies.nix] -version = "0.26" -features = [ - "fs", - "socket", -] -default-features = false - -[target."cfg(windows)".dependencies.windows-sys] -version = "0.48" -optional = true - -[target."cfg(windows)".dev-dependencies.windows-sys] -version = "0.48" -features = [ - "Win32_Foundation", - "Win32_Security_Authorization", -] diff -Nru s390-tools-2.31.0/rust-vendor/tokio/CHANGELOG.md s390-tools-2.33.1/rust-vendor/tokio/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/tokio/CHANGELOG.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,3090 +0,0 @@ -# 1.33.0 (October 9, 2023) - -### Fixed - -- io: mark `Interest::add` with `#[must_use]` ([#6037]) -- runtime: fix cache line size for RISC-V ([#5994]) -- sync: prevent lock poisoning in `watch::Receiver::wait_for` ([#6021]) -- task: fix `spawn_local` source location ([#5984]) - -### Changed - -- macros: use `::core` imports instead of `::std` in `tokio::test` ([#5973]) -- sync: use Acquire/Release orderings instead of SeqCst in `watch` ([#6018]) - -### Added - -- fs: add vectored writes to `tokio::fs::File` ([#5958]) -- io: add `Interest::remove` method ([#5906]) -- io: add vectored writes to `DuplexStream` ([#5985]) -- net: add Apple tvOS support ([#6045]) -- sync: add `?Sized` bound to `{MutexGuard,OwnedMutexGuard}::map` ([#5997]) -- sync: add `watch::Receiver::mark_unseen` ([#5962], [#6014], [#6017]) -- sync: add `watch::Sender::new` ([#5998]) -- sync: add const fn `OnceCell::from_value` ([#5903]) - -### Removed - -- remove unused `stats` feature ([#5952]) - -### Documented - -- add missing backticks in code examples ([#5938], [#6056]) -- fix typos ([#5988], [#6030]) -- process: document that `Child::wait` is cancel safe ([#5977]) -- sync: add examples for `Semaphore` ([#5939], [#5956], [#5978], [#6031], [#6032], [#6050]) -- sync: document that `broadcast` capacity is a lower bound ([#6042]) -- sync: document that `const_new` is not instrumented ([#6002]) -- sync: improve cancel-safety documentation for `mpsc::Sender::send` ([#5947]) -- sync: improve docs for `watch` channel ([#5954]) -- taskdump: render taskdump documentation on docs.rs ([#5972]) - -### Unstable - -- taskdump: fix potential deadlock ([#6036]) - -[#5903]: https://github.com/tokio-rs/tokio/pull/5903 -[#5906]: https://github.com/tokio-rs/tokio/pull/5906 -[#5938]: https://github.com/tokio-rs/tokio/pull/5938 -[#5939]: https://github.com/tokio-rs/tokio/pull/5939 -[#5947]: https://github.com/tokio-rs/tokio/pull/5947 -[#5952]: https://github.com/tokio-rs/tokio/pull/5952 -[#5954]: https://github.com/tokio-rs/tokio/pull/5954 -[#5956]: https://github.com/tokio-rs/tokio/pull/5956 -[#5958]: https://github.com/tokio-rs/tokio/pull/5958 -[#5960]: https://github.com/tokio-rs/tokio/pull/5960 -[#5962]: https://github.com/tokio-rs/tokio/pull/5962 -[#5971]: https://github.com/tokio-rs/tokio/pull/5971 -[#5972]: https://github.com/tokio-rs/tokio/pull/5972 -[#5973]: https://github.com/tokio-rs/tokio/pull/5973 -[#5977]: https://github.com/tokio-rs/tokio/pull/5977 -[#5978]: https://github.com/tokio-rs/tokio/pull/5978 -[#5984]: https://github.com/tokio-rs/tokio/pull/5984 -[#5985]: https://github.com/tokio-rs/tokio/pull/5985 -[#5988]: https://github.com/tokio-rs/tokio/pull/5988 -[#5994]: https://github.com/tokio-rs/tokio/pull/5994 -[#5997]: https://github.com/tokio-rs/tokio/pull/5997 -[#5998]: https://github.com/tokio-rs/tokio/pull/5998 -[#6002]: https://github.com/tokio-rs/tokio/pull/6002 -[#6014]: https://github.com/tokio-rs/tokio/pull/6014 -[#6017]: https://github.com/tokio-rs/tokio/pull/6017 -[#6018]: https://github.com/tokio-rs/tokio/pull/6018 -[#6021]: https://github.com/tokio-rs/tokio/pull/6021 -[#6030]: https://github.com/tokio-rs/tokio/pull/6030 -[#6031]: https://github.com/tokio-rs/tokio/pull/6031 -[#6032]: https://github.com/tokio-rs/tokio/pull/6032 -[#6036]: https://github.com/tokio-rs/tokio/pull/6036 -[#6037]: https://github.com/tokio-rs/tokio/pull/6037 -[#6042]: https://github.com/tokio-rs/tokio/pull/6042 -[#6045]: https://github.com/tokio-rs/tokio/pull/6045 -[#6050]: https://github.com/tokio-rs/tokio/pull/6050 -[#6056]: https://github.com/tokio-rs/tokio/pull/6056 -[#6058]: https://github.com/tokio-rs/tokio/pull/6058 - -# 1.32.0 (August 16, 2023) - -### Fixed - -- sync: fix potential quadradic behavior in `broadcast::Receiver` ([#5925]) - -### Added - -- process: stabilize `Command::raw_arg` ([#5930]) -- io: enable awaiting error readiness ([#5781]) - -### Unstable - -- rt(alt): improve scalability of alt runtime as the number of cores grows ([#5935]) - -[#5925]: https://github.com/tokio-rs/tokio/pull/5925 -[#5930]: https://github.com/tokio-rs/tokio/pull/5930 -[#5781]: https://github.com/tokio-rs/tokio/pull/5781 -[#5935]: https://github.com/tokio-rs/tokio/pull/5935 - -# 1.31.0 (August 10, 2023) - -### Fixed - -* io: delegate `WriteHalf::poll_write_vectored` ([#5914]) - -### Unstable - -* rt(alt): fix memory leak in unstable next-gen scheduler prototype ([#5911]) -* rt: expose mean task poll time metric ([#5927]) - -[#5914]: https://github.com/tokio-rs/tokio/pull/5914 -[#5911]: https://github.com/tokio-rs/tokio/pull/5911 -[#5927]: https://github.com/tokio-rs/tokio/pull/5927 - -# 1.30.0 (August 9, 2023) - -This release bumps the MSRV of Tokio to 1.63. ([#5887]) - -### Changed - -- tokio: reduce LLVM code generation ([#5859]) -- io: support `--cfg mio_unsupported_force_poll_poll` flag ([#5881]) -- sync: make `const_new` methods always available ([#5885]) -- sync: avoid false sharing in mpsc channel ([#5829]) -- rt: pop at least one task from inject queue ([#5908]) - -### Added - -- sync: add `broadcast::Sender::new` ([#5824]) -- net: implement `UCred` for espidf ([#5868]) -- fs: add `File::options()` ([#5869]) -- time: implement extra reset variants for `Interval` ([#5878]) -- process: add `{ChildStd*}::into_owned_{fd, handle}` ([#5899]) - -### Removed - -- tokio: removed unused `tokio_*` cfgs ([#5890]) -- remove build script to speed up compilation ([#5887]) - -### Documented - -- sync: mention lagging in docs for `broadcast::send` ([#5820]) -- runtime: expand on sharing runtime docs ([#5858]) -- io: use vec in example for `AsyncReadExt::read_exact` ([#5863]) -- time: mark `Sleep` as `!Unpin` in docs ([#5916]) -- process: fix `raw_arg` not showing up in docs ([#5865]) - -### Unstable - -- rt: add runtime ID ([#5864]) -- rt: initial implementation of new threaded runtime ([#5823]) - -[#5820]: https://github.com/tokio-rs/tokio/pull/5820 -[#5823]: https://github.com/tokio-rs/tokio/pull/5823 -[#5824]: https://github.com/tokio-rs/tokio/pull/5824 -[#5829]: https://github.com/tokio-rs/tokio/pull/5829 -[#5858]: https://github.com/tokio-rs/tokio/pull/5858 -[#5859]: https://github.com/tokio-rs/tokio/pull/5859 -[#5863]: https://github.com/tokio-rs/tokio/pull/5863 -[#5864]: https://github.com/tokio-rs/tokio/pull/5864 -[#5865]: https://github.com/tokio-rs/tokio/pull/5865 -[#5868]: https://github.com/tokio-rs/tokio/pull/5868 -[#5869]: https://github.com/tokio-rs/tokio/pull/5869 -[#5878]: https://github.com/tokio-rs/tokio/pull/5878 -[#5881]: https://github.com/tokio-rs/tokio/pull/5881 -[#5885]: https://github.com/tokio-rs/tokio/pull/5885 -[#5887]: https://github.com/tokio-rs/tokio/pull/5887 -[#5890]: https://github.com/tokio-rs/tokio/pull/5890 -[#5899]: https://github.com/tokio-rs/tokio/pull/5899 -[#5908]: https://github.com/tokio-rs/tokio/pull/5908 -[#5916]: https://github.com/tokio-rs/tokio/pull/5916 - -# 1.29.1 (June 29, 2023) - -### Fixed - -- rt: fix nesting two `block_in_place` with a `block_on` between ([#5837]) - -[#5837]: https://github.com/tokio-rs/tokio/pull/5837 - -# 1.29.0 (June 27, 2023) - -Technically a breaking change, the `Send` implementation is removed from -`runtime::EnterGuard`. This change fixes a bug and should not impact most users. - -### Breaking - -- rt: `EnterGuard` should not be `Send` ([#5766]) - -### Fixed - -- fs: reduce blocking ops in `fs::read_dir` ([#5653]) -- rt: fix possible starvation ([#5686], [#5712]) -- rt: fix stacked borrows issue in `JoinSet` ([#5693]) -- rt: panic if `EnterGuard` dropped incorrect order ([#5772]) -- time: do not overflow to signal value ([#5710]) -- fs: wait for in-flight ops before cloning `File` ([#5803]) - -### Changed - -- rt: reduce time to poll tasks scheduled from outside the runtime ([#5705], [#5720]) - -### Added - -- net: add uds doc alias for unix sockets ([#5659]) -- rt: add metric for number of tasks ([#5628]) -- sync: implement more traits for channel errors ([#5666]) -- net: add nodelay methods on TcpSocket ([#5672]) -- sync: add `broadcast::Receiver::blocking_recv` ([#5690]) -- process: add `raw_arg` method to `Command` ([#5704]) -- io: support PRIORITY epoll events ([#5566]) -- task: add `JoinSet::poll_join_next` ([#5721]) -- net: add support for Redox OS ([#5790]) - - -### Unstable - -- rt: add the ability to dump task backtraces ([#5608], [#5676], [#5708], [#5717]) -- rt: instrument task poll times with a histogram ([#5685]) - -[#5766]: https://github.com/tokio-rs/tokio/pull/5766 -[#5653]: https://github.com/tokio-rs/tokio/pull/5653 -[#5686]: https://github.com/tokio-rs/tokio/pull/5686 -[#5712]: https://github.com/tokio-rs/tokio/pull/5712 -[#5693]: https://github.com/tokio-rs/tokio/pull/5693 -[#5772]: https://github.com/tokio-rs/tokio/pull/5772 -[#5710]: https://github.com/tokio-rs/tokio/pull/5710 -[#5803]: https://github.com/tokio-rs/tokio/pull/5803 -[#5705]: https://github.com/tokio-rs/tokio/pull/5705 -[#5720]: https://github.com/tokio-rs/tokio/pull/5720 -[#5659]: https://github.com/tokio-rs/tokio/pull/5659 -[#5628]: https://github.com/tokio-rs/tokio/pull/5628 -[#5666]: https://github.com/tokio-rs/tokio/pull/5666 -[#5672]: https://github.com/tokio-rs/tokio/pull/5672 -[#5690]: https://github.com/tokio-rs/tokio/pull/5690 -[#5704]: https://github.com/tokio-rs/tokio/pull/5704 -[#5566]: https://github.com/tokio-rs/tokio/pull/5566 -[#5721]: https://github.com/tokio-rs/tokio/pull/5721 -[#5790]: https://github.com/tokio-rs/tokio/pull/5790 -[#5608]: https://github.com/tokio-rs/tokio/pull/5608 -[#5676]: https://github.com/tokio-rs/tokio/pull/5676 -[#5708]: https://github.com/tokio-rs/tokio/pull/5708 -[#5717]: https://github.com/tokio-rs/tokio/pull/5717 -[#5685]: https://github.com/tokio-rs/tokio/pull/5685 - -# 1.28.2 (May 28, 2023) - -Forward ports 1.18.6 changes. - -### Fixed - -- deps: disable default features for mio ([#5728]) - -[#5728]: https://github.com/tokio-rs/tokio/pull/5728 - -# 1.28.1 (May 10th, 2023) - -This release fixes a mistake in the build script that makes `AsFd` -implementations unavailable on Rust 1.63. ([#5677]) - -[#5677]: https://github.com/tokio-rs/tokio/pull/5677 - -# 1.28.0 (April 25th, 2023) - -### Added - -- io: add `AsyncFd::async_io` ([#5542]) -- io: impl BufMut for ReadBuf ([#5590]) -- net: add `recv_buf` for `UdpSocket` and `UnixDatagram` ([#5583]) -- sync: add `OwnedSemaphorePermit::semaphore` ([#5618]) -- sync: add `same_channel` to broadcast channel ([#5607]) -- sync: add `watch::Receiver::wait_for` ([#5611]) -- task: add `JoinSet::spawn_blocking` and `JoinSet::spawn_blocking_on` ([#5612]) - -### Changed - -- deps: update windows-sys to 0.48 ([#5591]) -- io: make `read_to_end` not grow unnecessarily ([#5610]) -- macros: make entrypoints more efficient ([#5621]) -- sync: improve Debug impl for `RwLock` ([#5647]) -- sync: reduce contention in `Notify` ([#5503]) - -### Fixed - -- net: support `get_peer_cred` on AIX ([#5065]) -- sync: avoid deadlocks in `broadcast` with custom wakers ([#5578]) - -### Documented - -- sync: fix typo in `Semaphore::MAX_PERMITS` ([#5645]) -- sync: fix typo in `tokio::sync::watch::Sender` docs ([#5587]) - -[#5065]: https://github.com/tokio-rs/tokio/pull/5065 -[#5503]: https://github.com/tokio-rs/tokio/pull/5503 -[#5542]: https://github.com/tokio-rs/tokio/pull/5542 -[#5578]: https://github.com/tokio-rs/tokio/pull/5578 -[#5583]: https://github.com/tokio-rs/tokio/pull/5583 -[#5587]: https://github.com/tokio-rs/tokio/pull/5587 -[#5590]: https://github.com/tokio-rs/tokio/pull/5590 -[#5591]: https://github.com/tokio-rs/tokio/pull/5591 -[#5607]: https://github.com/tokio-rs/tokio/pull/5607 -[#5610]: https://github.com/tokio-rs/tokio/pull/5610 -[#5611]: https://github.com/tokio-rs/tokio/pull/5611 -[#5612]: https://github.com/tokio-rs/tokio/pull/5612 -[#5618]: https://github.com/tokio-rs/tokio/pull/5618 -[#5621]: https://github.com/tokio-rs/tokio/pull/5621 -[#5645]: https://github.com/tokio-rs/tokio/pull/5645 -[#5647]: https://github.com/tokio-rs/tokio/pull/5647 - -# 1.27.0 (March 27th, 2023) - -This release bumps the MSRV of Tokio to 1.56. ([#5559]) - -### Added - -- io: add `async_io` helper method to sockets ([#5512]) -- io: add implementations of `AsFd`/`AsHandle`/`AsSocket` ([#5514], [#5540]) -- net: add `UdpSocket::peek_sender()` ([#5520]) -- sync: add `RwLockWriteGuard::{downgrade_map, try_downgrade_map}` ([#5527]) -- task: add `JoinHandle::abort_handle` ([#5543]) - -### Changed - -- io: use `memchr` from `libc` ([#5558]) -- macros: accept path as crate rename in `#[tokio::main]` ([#5557]) -- macros: update to syn 2.0.0 ([#5572]) -- time: don't register for a wakeup when `Interval` returns `Ready` ([#5553]) - -### Fixed - -- fs: fuse std iterator in `ReadDir` ([#5555]) -- tracing: fix `spawn_blocking` location fields ([#5573]) -- time: clean up redundant check in `Wheel::poll()` ([#5574]) - -### Documented - -- macros: define cancellation safety ([#5525]) -- io: add details to docs of `tokio::io::copy[_buf]` ([#5575]) -- io: refer to `ReaderStream` and `StreamReader` in module docs ([#5576]) - -[#5512]: https://github.com/tokio-rs/tokio/pull/5512 -[#5514]: https://github.com/tokio-rs/tokio/pull/5514 -[#5520]: https://github.com/tokio-rs/tokio/pull/5520 -[#5525]: https://github.com/tokio-rs/tokio/pull/5525 -[#5527]: https://github.com/tokio-rs/tokio/pull/5527 -[#5540]: https://github.com/tokio-rs/tokio/pull/5540 -[#5543]: https://github.com/tokio-rs/tokio/pull/5543 -[#5553]: https://github.com/tokio-rs/tokio/pull/5553 -[#5555]: https://github.com/tokio-rs/tokio/pull/5555 -[#5557]: https://github.com/tokio-rs/tokio/pull/5557 -[#5558]: https://github.com/tokio-rs/tokio/pull/5558 -[#5559]: https://github.com/tokio-rs/tokio/pull/5559 -[#5572]: https://github.com/tokio-rs/tokio/pull/5572 -[#5573]: https://github.com/tokio-rs/tokio/pull/5573 -[#5574]: https://github.com/tokio-rs/tokio/pull/5574 -[#5575]: https://github.com/tokio-rs/tokio/pull/5575 -[#5576]: https://github.com/tokio-rs/tokio/pull/5576 - -# 1.26.0 (March 1st, 2023) - -### Fixed - -- macros: fix empty `join!` and `try_join!` ([#5504]) -- sync: don't leak tracing spans in mutex guards ([#5469]) -- sync: drop wakers after unlocking the mutex in Notify ([#5471]) -- sync: drop wakers outside lock in semaphore ([#5475]) - -### Added - -- fs: add `fs::try_exists` ([#4299]) -- net: add types for named unix pipes ([#5351]) -- sync: add `MappedOwnedMutexGuard` ([#5474]) - -### Changed - -- chore: update windows-sys to 0.45 ([#5386]) -- net: use Message Read Mode for named pipes ([#5350]) -- sync: mark lock guards with `#[clippy::has_significant_drop]` ([#5422]) -- sync: reduce contention in watch channel ([#5464]) -- time: remove cache padding in timer entries ([#5468]) -- time: Improve `Instant::now()` perf with test-util ([#5513]) - -### Internal Changes - -- io: use `poll_fn` in `copy_bidirectional` ([#5486]) -- net: refactor named pipe builders to not use bitfields ([#5477]) -- rt: remove Arc from Clock ([#5434]) -- sync: make `notify_waiters` calls atomic ([#5458]) -- time: don't store deadline twice in sleep entries ([#5410]) - -### Unstable - -- metrics: add a new metric for budget exhaustion yields ([#5517]) - -### Documented - -- io: improve AsyncFd example ([#5481]) -- runtime: document the nature of the main future ([#5494]) -- runtime: remove extra period in docs ([#5511]) -- signal: updated Documentation for Signals ([#5459]) -- sync: add doc aliases for `blocking_*` methods ([#5448]) -- sync: fix docs for Send/Sync bounds in broadcast ([#5480]) -- sync: document drop behavior for channels ([#5497]) -- task: clarify what happens to spawned work during runtime shutdown ([#5394]) -- task: clarify `process::Command` docs ([#5413]) -- task: fix wording with 'unsend' ([#5452]) -- time: document immediate completion guarantee for timeouts ([#5509]) -- tokio: document supported platforms ([#5483]) - -[#4299]: https://github.com/tokio-rs/tokio/pull/4299 -[#5350]: https://github.com/tokio-rs/tokio/pull/5350 -[#5351]: https://github.com/tokio-rs/tokio/pull/5351 -[#5386]: https://github.com/tokio-rs/tokio/pull/5386 -[#5394]: https://github.com/tokio-rs/tokio/pull/5394 -[#5410]: https://github.com/tokio-rs/tokio/pull/5410 -[#5413]: https://github.com/tokio-rs/tokio/pull/5413 -[#5422]: https://github.com/tokio-rs/tokio/pull/5422 -[#5434]: https://github.com/tokio-rs/tokio/pull/5434 -[#5448]: https://github.com/tokio-rs/tokio/pull/5448 -[#5452]: https://github.com/tokio-rs/tokio/pull/5452 -[#5458]: https://github.com/tokio-rs/tokio/pull/5458 -[#5459]: https://github.com/tokio-rs/tokio/pull/5459 -[#5464]: https://github.com/tokio-rs/tokio/pull/5464 -[#5468]: https://github.com/tokio-rs/tokio/pull/5468 -[#5469]: https://github.com/tokio-rs/tokio/pull/5469 -[#5471]: https://github.com/tokio-rs/tokio/pull/5471 -[#5474]: https://github.com/tokio-rs/tokio/pull/5474 -[#5475]: https://github.com/tokio-rs/tokio/pull/5475 -[#5477]: https://github.com/tokio-rs/tokio/pull/5477 -[#5480]: https://github.com/tokio-rs/tokio/pull/5480 -[#5481]: https://github.com/tokio-rs/tokio/pull/5481 -[#5483]: https://github.com/tokio-rs/tokio/pull/5483 -[#5486]: https://github.com/tokio-rs/tokio/pull/5486 -[#5494]: https://github.com/tokio-rs/tokio/pull/5494 -[#5497]: https://github.com/tokio-rs/tokio/pull/5497 -[#5504]: https://github.com/tokio-rs/tokio/pull/5504 -[#5509]: https://github.com/tokio-rs/tokio/pull/5509 -[#5511]: https://github.com/tokio-rs/tokio/pull/5511 -[#5513]: https://github.com/tokio-rs/tokio/pull/5513 -[#5517]: https://github.com/tokio-rs/tokio/pull/5517 - -# 1.25.2 (September 22, 2023) - -Forward ports 1.20.6 changes. - -### Changed - -- io: use `memchr` from `libc` ([#5960]) - -[#5960]: https://github.com/tokio-rs/tokio/pull/5960 - -# 1.25.1 (May 28, 2023) - -Forward ports 1.18.6 changes. - -### Fixed - -- deps: disable default features for mio ([#5728]) - -[#5728]: https://github.com/tokio-rs/tokio/pull/5728 - -# 1.25.0 (January 28, 2023) - -### Fixed - -- rt: fix runtime metrics reporting ([#5330]) - -### Added - -- sync: add `broadcast::Sender::len` ([#5343]) - -### Changed - -- fs: increase maximum read buffer size to 2MiB ([#5397]) - -[#5330]: https://github.com/tokio-rs/tokio/pull/5330 -[#5343]: https://github.com/tokio-rs/tokio/pull/5343 -[#5397]: https://github.com/tokio-rs/tokio/pull/5397 - -# 1.24.2 (January 17, 2023) - -Forward ports 1.18.5 changes. - -### Fixed - -- io: fix unsoundness in `ReadHalf::unsplit` ([#5375]) - -[#5375]: https://github.com/tokio-rs/tokio/pull/5375 - -# 1.24.1 (January 6, 2022) - -This release fixes a compilation failure on targets without `AtomicU64` when using rustc older than 1.63. ([#5356]) - -[#5356]: https://github.com/tokio-rs/tokio/pull/5356 - -# 1.24.0 (January 5, 2022) - -### Fixed - - rt: improve native `AtomicU64` support detection ([#5284]) - -### Added - - rt: add configuration option for max number of I/O events polled from the OS - per tick ([#5186]) - - rt: add an environment variable for configuring the default number of worker - threads per runtime instance ([#4250]) - -### Changed - - sync: reduce MPSC channel stack usage ([#5294]) - - io: reduce lock contention in I/O operations ([#5300]) - - fs: speed up `read_dir()` by chunking operations ([#5309]) - - rt: use internal `ThreadId` implementation ([#5329]) - - test: don't auto-advance time when a `spawn_blocking` task is running ([#5115]) - -[#5186]: https://github.com/tokio-rs/tokio/pull/5186 -[#5294]: https://github.com/tokio-rs/tokio/pull/5294 -[#5284]: https://github.com/tokio-rs/tokio/pull/5284 -[#4250]: https://github.com/tokio-rs/tokio/pull/4250 -[#5300]: https://github.com/tokio-rs/tokio/pull/5300 -[#5329]: https://github.com/tokio-rs/tokio/pull/5329 -[#5115]: https://github.com/tokio-rs/tokio/pull/5115 -[#5309]: https://github.com/tokio-rs/tokio/pull/5309 - -# 1.23.1 (January 4, 2022) - -This release forward ports changes from 1.18.4. - -### Fixed - -- net: fix Windows named pipe server builder to maintain option when toggling - pipe mode ([#5336]). - -[#5336]: https://github.com/tokio-rs/tokio/pull/5336 - -# 1.23.0 (December 5, 2022) - -### Fixed - - - net: fix Windows named pipe connect ([#5208]) - - io: support vectored writes for `ChildStdin` ([#5216]) - - io: fix `async fn ready()` false positive for OS-specific events ([#5231]) - - ### Changed - - runtime: `yield_now` defers task until after driver poll ([#5223]) - - runtime: reduce amount of codegen needed per spawned task ([#5213]) - - windows: replace `winapi` dependency with `windows-sys` ([#5204]) - - [#5208]: https://github.com/tokio-rs/tokio/pull/5208 - [#5216]: https://github.com/tokio-rs/tokio/pull/5216 - [#5213]: https://github.com/tokio-rs/tokio/pull/5213 - [#5204]: https://github.com/tokio-rs/tokio/pull/5204 - [#5223]: https://github.com/tokio-rs/tokio/pull/5223 - [#5231]: https://github.com/tokio-rs/tokio/pull/5231 - -# 1.22.0 (November 17, 2022) - -### Added - - runtime: add `Handle::runtime_flavor` ([#5138]) - - sync: add `Mutex::blocking_lock_owned` ([#5130]) - - sync: add `Semaphore::MAX_PERMITS` ([#5144]) - - sync: add `merge()` to semaphore permits ([#4948]) - - sync: add `mpsc::WeakUnboundedSender` ([#5189]) - -### Added (unstable) - - - process: add `Command::process_group` ([#5114]) - - runtime: export metrics about the blocking thread pool ([#5161]) - - task: add `task::id()` and `task::try_id()` ([#5171]) - -### Fixed - - macros: don't take ownership of futures in macros ([#5087]) - - runtime: fix Stacked Borrows violation in `LocalOwnedTasks` ([#5099]) - - runtime: mitigate ABA with 32-bit queue indices when possible ([#5042]) - - task: wake local tasks to the local queue when woken by the same thread ([#5095]) - - time: panic in release mode when `mark_pending` called illegally ([#5093]) - - runtime: fix typo in expect message ([#5169]) - - runtime: fix `unsync_load` on atomic types ([#5175]) - - task: elaborate safety comments in task deallocation ([#5172]) - - runtime: fix `LocalSet` drop in thread local ([#5179]) - - net: remove libc type leakage in a public API ([#5191]) - - runtime: update the alignment of `CachePadded` ([#5106]) - -### Changed - - io: make `tokio::io::copy` continue filling the buffer when writer stalls ([#5066]) - - runtime: remove `coop::budget` from `LocalSet::run_until` ([#5155]) - - sync: make `Notify` panic safe ([#5154]) - -### Documented - - io: fix doc for `write_i8` to use signed integers ([#5040]) - - net: fix doc typos for TCP and UDP `set_tos` methods ([#5073]) - - net: fix function name in `UdpSocket::recv` documentation ([#5150]) - - sync: typo in `TryLockError` for `RwLock::try_write` ([#5160]) - - task: document that spawned tasks execute immediately ([#5117]) - - time: document return type of `timeout` ([#5118]) - - time: document that `timeout` checks only before poll ([#5126]) - - sync: specify return type of `oneshot::Receiver` in docs ([#5198]) - -### Internal changes - - runtime: use const `Mutex::new` for globals ([#5061]) - - runtime: remove `Option` around `mio::Events` in io driver ([#5078]) - - runtime: remove a conditional compilation clause ([#5104]) - - runtime: remove a reference to internal time handle ([#5107]) - - runtime: misc time driver cleanup ([#5120]) - - runtime: move signal driver to runtime module ([#5121]) - - runtime: signal driver now uses I/O driver directly ([#5125]) - - runtime: start decoupling I/O driver and I/O handle ([#5127]) - - runtime: switch `io::handle` refs with scheduler:Handle ([#5128]) - - runtime: remove Arc from I/O driver ([#5134]) - - runtime: use signal driver handle via `scheduler::Handle` ([#5135]) - - runtime: move internal clock fns out of context ([#5139]) - - runtime: remove `runtime::context` module ([#5140]) - - runtime: keep driver cfgs in `driver.rs` ([#5141]) - - runtime: add `runtime::context` to unify thread-locals ([#5143]) - - runtime: rename some confusing internal variables/fns ([#5151]) - - runtime: move `coop` mod into `runtime` ([#5152]) - - runtime: move budget state to context thread-local ([#5157]) - - runtime: move park logic into runtime module ([#5158]) - - runtime: move `Runtime` into its own file ([#5159]) - - runtime: unify entering a runtime with `Handle::enter` ([#5163]) - - runtime: remove handle reference from each scheduler ([#5166]) - - runtime: move `enter` into `context` ([#5167]) - - runtime: combine context and entered thread-locals ([#5168]) - - runtime: fix accidental unsetting of current handle ([#5178]) - - runtime: move `CoreStage` methods to `Core` ([#5182]) - - sync: name mpsc semaphore types ([#5146]) - -[#4948]: https://github.com/tokio-rs/tokio/pull/4948 -[#5040]: https://github.com/tokio-rs/tokio/pull/5040 -[#5042]: https://github.com/tokio-rs/tokio/pull/5042 -[#5061]: https://github.com/tokio-rs/tokio/pull/5061 -[#5066]: https://github.com/tokio-rs/tokio/pull/5066 -[#5073]: https://github.com/tokio-rs/tokio/pull/5073 -[#5078]: https://github.com/tokio-rs/tokio/pull/5078 -[#5087]: https://github.com/tokio-rs/tokio/pull/5087 -[#5093]: https://github.com/tokio-rs/tokio/pull/5093 -[#5095]: https://github.com/tokio-rs/tokio/pull/5095 -[#5099]: https://github.com/tokio-rs/tokio/pull/5099 -[#5104]: https://github.com/tokio-rs/tokio/pull/5104 -[#5106]: https://github.com/tokio-rs/tokio/pull/5106 -[#5107]: https://github.com/tokio-rs/tokio/pull/5107 -[#5114]: https://github.com/tokio-rs/tokio/pull/5114 -[#5117]: https://github.com/tokio-rs/tokio/pull/5117 -[#5118]: https://github.com/tokio-rs/tokio/pull/5118 -[#5120]: https://github.com/tokio-rs/tokio/pull/5120 -[#5121]: https://github.com/tokio-rs/tokio/pull/5121 -[#5125]: https://github.com/tokio-rs/tokio/pull/5125 -[#5126]: https://github.com/tokio-rs/tokio/pull/5126 -[#5127]: https://github.com/tokio-rs/tokio/pull/5127 -[#5128]: https://github.com/tokio-rs/tokio/pull/5128 -[#5130]: https://github.com/tokio-rs/tokio/pull/5130 -[#5134]: https://github.com/tokio-rs/tokio/pull/5134 -[#5135]: https://github.com/tokio-rs/tokio/pull/5135 -[#5138]: https://github.com/tokio-rs/tokio/pull/5138 -[#5138]: https://github.com/tokio-rs/tokio/pull/5138 -[#5139]: https://github.com/tokio-rs/tokio/pull/5139 -[#5140]: https://github.com/tokio-rs/tokio/pull/5140 -[#5141]: https://github.com/tokio-rs/tokio/pull/5141 -[#5143]: https://github.com/tokio-rs/tokio/pull/5143 -[#5144]: https://github.com/tokio-rs/tokio/pull/5144 -[#5144]: https://github.com/tokio-rs/tokio/pull/5144 -[#5146]: https://github.com/tokio-rs/tokio/pull/5146 -[#5150]: https://github.com/tokio-rs/tokio/pull/5150 -[#5151]: https://github.com/tokio-rs/tokio/pull/5151 -[#5152]: https://github.com/tokio-rs/tokio/pull/5152 -[#5154]: https://github.com/tokio-rs/tokio/pull/5154 -[#5155]: https://github.com/tokio-rs/tokio/pull/5155 -[#5157]: https://github.com/tokio-rs/tokio/pull/5157 -[#5158]: https://github.com/tokio-rs/tokio/pull/5158 -[#5159]: https://github.com/tokio-rs/tokio/pull/5159 -[#5160]: https://github.com/tokio-rs/tokio/pull/5160 -[#5161]: https://github.com/tokio-rs/tokio/pull/5161 -[#5163]: https://github.com/tokio-rs/tokio/pull/5163 -[#5166]: https://github.com/tokio-rs/tokio/pull/5166 -[#5167]: https://github.com/tokio-rs/tokio/pull/5167 -[#5168]: https://github.com/tokio-rs/tokio/pull/5168 -[#5169]: https://github.com/tokio-rs/tokio/pull/5169 -[#5171]: https://github.com/tokio-rs/tokio/pull/5171 -[#5172]: https://github.com/tokio-rs/tokio/pull/5172 -[#5175]: https://github.com/tokio-rs/tokio/pull/5175 -[#5178]: https://github.com/tokio-rs/tokio/pull/5178 -[#5179]: https://github.com/tokio-rs/tokio/pull/5179 -[#5182]: https://github.com/tokio-rs/tokio/pull/5182 -[#5189]: https://github.com/tokio-rs/tokio/pull/5189 -[#5191]: https://github.com/tokio-rs/tokio/pull/5191 -[#5198]: https://github.com/tokio-rs/tokio/pull/5198 - -# 1.21.2 (September 27, 2022) - -This release removes the dependency on the `once_cell` crate to restore the MSRV -of 1.21.x, which is the latest minor version at the time of release. ([#5048]) - -[#5048]: https://github.com/tokio-rs/tokio/pull/5048 - -# 1.21.1 (September 13, 2022) - -### Fixed - -- net: fix dependency resolution for socket2 ([#5000]) -- task: ignore failure to set TLS in `LocalSet` Drop ([#4976]) - -[#4976]: https://github.com/tokio-rs/tokio/pull/4976 -[#5000]: https://github.com/tokio-rs/tokio/pull/5000 - -# 1.21.0 (September 2, 2022) - -This release is the first release of Tokio to intentionally support WASM. The -`sync,macros,io-util,rt,time` features are stabilized on WASM. Additionally the -wasm32-wasi target is given unstable support for the `net` feature. - -### Added - -- net: add `device` and `bind_device` methods to TCP/UDP sockets ([#4882]) -- net: add `tos` and `set_tos` methods to TCP and UDP sockets ([#4877]) -- net: add security flags to named pipe `ServerOptions` ([#4845]) -- signal: add more windows signal handlers ([#4924]) -- sync: add `mpsc::Sender::max_capacity` method ([#4904]) -- sync: implement Weak version of `mpsc::Sender` ([#4595]) -- task: add `LocalSet::enter` ([#4765]) -- task: stabilize `JoinSet` and `AbortHandle` ([#4920]) -- tokio: add `track_caller` to public APIs ([#4805], [#4848], [#4852]) -- wasm: initial support for `wasm32-wasi` target ([#4716]) - -### Fixed - -- miri: improve miri compatibility by avoiding temporary references in `linked_list::Link` impls ([#4841]) -- signal: don't register write interest on signal pipe ([#4898]) -- sync: add `#[must_use]` to lock guards ([#4886]) -- sync: fix hang when calling `recv` on closed and reopened broadcast channel ([#4867]) -- task: propagate attributes on task-locals ([#4837]) - -### Changed - -- fs: change panic to error in `File::start_seek` ([#4897]) -- io: reduce syscalls in `poll_read` ([#4840]) -- process: use blocking threadpool for child stdio I/O ([#4824]) -- signal: make `SignalKind` methods const ([#4956]) - -### Internal changes - -- rt: extract `basic_scheduler::Config` ([#4935]) -- rt: move I/O driver into `runtime` module ([#4942]) -- rt: rename internal scheduler types ([#4945]) - -### Documented - -- chore: fix typos and grammar ([#4858], [#4894], [#4928]) -- io: fix typo in `AsyncSeekExt::rewind` docs ([#4893]) -- net: add documentation to `try_read()` for zero-length buffers ([#4937]) -- runtime: remove incorrect panic section for `Builder::worker_threads` ([#4849]) -- sync: doc of `watch::Sender::send` improved ([#4959]) -- task: add cancel safety docs to `JoinHandle` ([#4901]) -- task: expand on cancellation of `spawn_blocking` ([#4811]) -- time: clarify that the first tick of `Interval::tick` happens immediately ([#4951]) - -### Unstable - -- rt: add unstable option to disable the LIFO slot ([#4936]) -- task: fix incorrect signature in `Builder::spawn_on` ([#4953]) -- task: make `task::Builder::spawn*` methods fallible ([#4823]) - -[#4595]: https://github.com/tokio-rs/tokio/pull/4595 -[#4716]: https://github.com/tokio-rs/tokio/pull/4716 -[#4765]: https://github.com/tokio-rs/tokio/pull/4765 -[#4805]: https://github.com/tokio-rs/tokio/pull/4805 -[#4811]: https://github.com/tokio-rs/tokio/pull/4811 -[#4823]: https://github.com/tokio-rs/tokio/pull/4823 -[#4824]: https://github.com/tokio-rs/tokio/pull/4824 -[#4837]: https://github.com/tokio-rs/tokio/pull/4837 -[#4840]: https://github.com/tokio-rs/tokio/pull/4840 -[#4841]: https://github.com/tokio-rs/tokio/pull/4841 -[#4845]: https://github.com/tokio-rs/tokio/pull/4845 -[#4848]: https://github.com/tokio-rs/tokio/pull/4848 -[#4849]: https://github.com/tokio-rs/tokio/pull/4849 -[#4852]: https://github.com/tokio-rs/tokio/pull/4852 -[#4858]: https://github.com/tokio-rs/tokio/pull/4858 -[#4867]: https://github.com/tokio-rs/tokio/pull/4867 -[#4877]: https://github.com/tokio-rs/tokio/pull/4877 -[#4882]: https://github.com/tokio-rs/tokio/pull/4882 -[#4886]: https://github.com/tokio-rs/tokio/pull/4886 -[#4893]: https://github.com/tokio-rs/tokio/pull/4893 -[#4894]: https://github.com/tokio-rs/tokio/pull/4894 -[#4897]: https://github.com/tokio-rs/tokio/pull/4897 -[#4898]: https://github.com/tokio-rs/tokio/pull/4898 -[#4901]: https://github.com/tokio-rs/tokio/pull/4901 -[#4904]: https://github.com/tokio-rs/tokio/pull/4904 -[#4920]: https://github.com/tokio-rs/tokio/pull/4920 -[#4924]: https://github.com/tokio-rs/tokio/pull/4924 -[#4928]: https://github.com/tokio-rs/tokio/pull/4928 -[#4935]: https://github.com/tokio-rs/tokio/pull/4935 -[#4936]: https://github.com/tokio-rs/tokio/pull/4936 -[#4937]: https://github.com/tokio-rs/tokio/pull/4937 -[#4942]: https://github.com/tokio-rs/tokio/pull/4942 -[#4945]: https://github.com/tokio-rs/tokio/pull/4945 -[#4951]: https://github.com/tokio-rs/tokio/pull/4951 -[#4953]: https://github.com/tokio-rs/tokio/pull/4953 -[#4956]: https://github.com/tokio-rs/tokio/pull/4956 -[#4959]: https://github.com/tokio-rs/tokio/pull/4959 - -# 1.20.6 (September 22, 2023) - -This is a backport of a change from 1.27.0. - -### Changed - -- io: use `memchr` from `libc` ([#5960]) - -[#5960]: https://github.com/tokio-rs/tokio/pull/5960 - -# 1.20.5 (May 28, 2023) - -Forward ports 1.18.6 changes. - -### Fixed - -- deps: disable default features for mio ([#5728]) - -[#5728]: https://github.com/tokio-rs/tokio/pull/5728 - -# 1.20.4 (January 17, 2023) - -Forward ports 1.18.5 changes. - -### Fixed - -- io: fix unsoundness in `ReadHalf::unsplit` ([#5375]) - -[#5375]: https://github.com/tokio-rs/tokio/pull/5375 - -# 1.20.3 (January 3, 2022) - -This release forward ports changes from 1.18.4. - -### Fixed - -- net: fix Windows named pipe server builder to maintain option when toggling - pipe mode ([#5336]). - -[#5336]: https://github.com/tokio-rs/tokio/pull/5336 - -# 1.20.2 (September 27, 2022) - -This release removes the dependency on the `once_cell` crate to restore the MSRV -of the 1.20.x LTS release. ([#5048]) - -[#5048]: https://github.com/tokio-rs/tokio/pull/5048 - -# 1.20.1 (July 25, 2022) - -### Fixed - -- chore: fix version detection in build script ([#4860]) - -[#4860]: https://github.com/tokio-rs/tokio/pull/4860 - -# 1.20.0 (July 12, 2022) - -### Added -- tokio: add `track_caller` to public APIs ([#4772], [#4791], [#4793], [#4806], [#4808]) -- sync: Add `has_changed` method to `watch::Ref` ([#4758]) - -### Changed - -- time: remove `src/time/driver/wheel/stack.rs` ([#4766]) -- rt: clean up arguments passed to basic scheduler ([#4767]) -- net: be more specific about winapi features ([#4764]) -- tokio: use const initialized thread locals where possible ([#4677]) -- task: various small improvements to LocalKey ([#4795]) - -### Documented - -- fs: warn about performance pitfall ([#4762]) -- chore: fix spelling ([#4769]) -- sync: document spurious failures in oneshot ([#4777]) -- sync: add warning for watch in non-Send futures ([#4741]) -- chore: fix typo ([#4798]) - -### Unstable - -- joinset: rename `join_one` to `join_next` ([#4755]) -- rt: unhandled panic config for current thread rt ([#4770]) - -[#4677]: https://github.com/tokio-rs/tokio/pull/4677 -[#4741]: https://github.com/tokio-rs/tokio/pull/4741 -[#4755]: https://github.com/tokio-rs/tokio/pull/4755 -[#4758]: https://github.com/tokio-rs/tokio/pull/4758 -[#4762]: https://github.com/tokio-rs/tokio/pull/4762 -[#4764]: https://github.com/tokio-rs/tokio/pull/4764 -[#4766]: https://github.com/tokio-rs/tokio/pull/4766 -[#4767]: https://github.com/tokio-rs/tokio/pull/4767 -[#4769]: https://github.com/tokio-rs/tokio/pull/4769 -[#4770]: https://github.com/tokio-rs/tokio/pull/4770 -[#4772]: https://github.com/tokio-rs/tokio/pull/4772 -[#4777]: https://github.com/tokio-rs/tokio/pull/4777 -[#4791]: https://github.com/tokio-rs/tokio/pull/4791 -[#4793]: https://github.com/tokio-rs/tokio/pull/4793 -[#4795]: https://github.com/tokio-rs/tokio/pull/4795 -[#4798]: https://github.com/tokio-rs/tokio/pull/4798 -[#4806]: https://github.com/tokio-rs/tokio/pull/4806 -[#4808]: https://github.com/tokio-rs/tokio/pull/4808 - -# 1.19.2 (June 6, 2022) - -This release fixes another bug in `Notified::enable`. ([#4751]) - -[#4751]: https://github.com/tokio-rs/tokio/pull/4751 - -# 1.19.1 (June 5, 2022) - -This release fixes a bug in `Notified::enable`. ([#4747]) - -[#4747]: https://github.com/tokio-rs/tokio/pull/4747 - -# 1.19.0 (June 3, 2022) - -### Added - -- runtime: add `is_finished` method for `JoinHandle` and `AbortHandle` ([#4709]) -- runtime: make global queue and event polling intervals configurable ([#4671]) -- sync: add `Notified::enable` ([#4705]) -- sync: add `watch::Sender::send_if_modified` ([#4591]) -- sync: add resubscribe method to broadcast::Receiver ([#4607]) -- net: add `take_error` to `TcpSocket` and `TcpStream` ([#4739]) - -### Changed - -- io: refactor out usage of Weak in the io handle ([#4656]) - -### Fixed - -- macros: avoid starvation in `join!` and `try_join!` ([#4624]) - -### Documented - -- runtime: clarify semantics of tasks outliving `block_on` ([#4729]) -- time: fix example for `MissedTickBehavior::Burst` ([#4713]) - -### Unstable - -- metrics: correctly update atomics in `IoDriverMetrics` ([#4725]) -- metrics: fix compilation with unstable, process, and rt, but without net ([#4682]) -- task: add `#[track_caller]` to `JoinSet`/`JoinMap` ([#4697]) -- task: add `Builder::{spawn_on, spawn_local_on, spawn_blocking_on}` ([#4683]) -- task: add `consume_budget` for cooperative scheduling ([#4498]) -- task: add `join_set::Builder` for configuring `JoinSet` tasks ([#4687]) -- task: update return value of `JoinSet::join_one` ([#4726]) - -[#4498]: https://github.com/tokio-rs/tokio/pull/4498 -[#4591]: https://github.com/tokio-rs/tokio/pull/4591 -[#4607]: https://github.com/tokio-rs/tokio/pull/4607 -[#4624]: https://github.com/tokio-rs/tokio/pull/4624 -[#4656]: https://github.com/tokio-rs/tokio/pull/4656 -[#4671]: https://github.com/tokio-rs/tokio/pull/4671 -[#4682]: https://github.com/tokio-rs/tokio/pull/4682 -[#4683]: https://github.com/tokio-rs/tokio/pull/4683 -[#4687]: https://github.com/tokio-rs/tokio/pull/4687 -[#4697]: https://github.com/tokio-rs/tokio/pull/4697 -[#4705]: https://github.com/tokio-rs/tokio/pull/4705 -[#4709]: https://github.com/tokio-rs/tokio/pull/4709 -[#4713]: https://github.com/tokio-rs/tokio/pull/4713 -[#4725]: https://github.com/tokio-rs/tokio/pull/4725 -[#4726]: https://github.com/tokio-rs/tokio/pull/4726 -[#4729]: https://github.com/tokio-rs/tokio/pull/4729 -[#4739]: https://github.com/tokio-rs/tokio/pull/4739 - -# 1.18.6 (May 28, 2023) - -### Fixed - -- deps: disable default features for mio ([#5728]) - -[#5728]: https://github.com/tokio-rs/tokio/pull/5728 - -# 1.18.5 (January 17, 2023) - -### Fixed - -- io: fix unsoundness in `ReadHalf::unsplit` ([#5375]) - -[#5375]: https://github.com/tokio-rs/tokio/pull/5375 - -# 1.18.4 (January 3, 2022) - -### Fixed - -- net: fix Windows named pipe server builder to maintain option when toggling - pipe mode ([#5336]). - -[#5336]: https://github.com/tokio-rs/tokio/pull/5336 - -# 1.18.3 (September 27, 2022) - -This release removes the dependency on the `once_cell` crate to restore the MSRV -of the 1.18.x LTS release. ([#5048]) - -[#5048]: https://github.com/tokio-rs/tokio/pull/5048 - -# 1.18.2 (May 5, 2022) - -Add missing features for the `winapi` dependency. ([#4663]) - -[#4663]: https://github.com/tokio-rs/tokio/pull/4663 - -# 1.18.1 (May 2, 2022) - -The 1.18.0 release broke the build for targets without 64-bit atomics when -building with `tokio_unstable`. This release fixes that. ([#4649]) - -[#4649]: https://github.com/tokio-rs/tokio/pull/4649 - -# 1.18.0 (April 27, 2022) - -This release adds a number of new APIs in `tokio::net`, `tokio::signal`, and -`tokio::sync`. In addition, it adds new unstable APIs to `tokio::task` (`Id`s -for uniquely identifying a task, and `AbortHandle` for remotely cancelling a -task), as well as a number of bugfixes. - -### Fixed - -- blocking: add missing `#[track_caller]` for `spawn_blocking` ([#4616]) -- macros: fix `select` macro to process 64 branches ([#4519]) -- net: fix `try_io` methods not calling Mio's `try_io` internally ([#4582]) -- runtime: recover when OS fails to spawn a new thread ([#4485]) - -### Added - -- net: add `UdpSocket::peer_addr` ([#4611]) -- net: add `try_read_buf` method for named pipes ([#4626]) -- signal: add `SignalKind` `Hash`/`Eq` impls and `c_int` conversion ([#4540]) -- signal: add support for signals up to `SIGRTMAX` ([#4555]) -- sync: add `watch::Sender::send_modify` method ([#4310]) -- sync: add `broadcast::Receiver::len` method ([#4542]) -- sync: add `watch::Receiver::same_channel` method ([#4581]) -- sync: implement `Clone` for `RecvError` types ([#4560]) - -### Changed - -- update `mio` to 0.8.1 ([#4582]) -- macros: rename `tokio::select!`'s internal `util` module ([#4543]) -- runtime: use `Vec::with_capacity` when building runtime ([#4553]) - -### Documented - -- improve docs for `tokio_unstable` ([#4524]) -- runtime: include more documentation for thread_pool/worker ([#4511]) -- runtime: update `Handle::current`'s docs to mention `EnterGuard` ([#4567]) -- time: clarify platform specific timer resolution ([#4474]) -- signal: document that `Signal::recv` is cancel-safe ([#4634]) -- sync: `UnboundedReceiver` close docs ([#4548]) - -### Unstable - -The following changes only apply when building with `--cfg tokio_unstable`: - -- task: add `task::Id` type ([#4630]) -- task: add `AbortHandle` type for cancelling tasks in a `JoinSet` ([#4530], - [#4640]) -- task: fix missing `doc(cfg(...))` attributes for `JoinSet` ([#4531]) -- task: fix broken link in `AbortHandle` RustDoc ([#4545]) -- metrics: add initial IO driver metrics ([#4507]) - - -[#4616]: https://github.com/tokio-rs/tokio/pull/4616 -[#4519]: https://github.com/tokio-rs/tokio/pull/4519 -[#4582]: https://github.com/tokio-rs/tokio/pull/4582 -[#4485]: https://github.com/tokio-rs/tokio/pull/4485 -[#4613]: https://github.com/tokio-rs/tokio/pull/4613 -[#4611]: https://github.com/tokio-rs/tokio/pull/4611 -[#4626]: https://github.com/tokio-rs/tokio/pull/4626 -[#4540]: https://github.com/tokio-rs/tokio/pull/4540 -[#4555]: https://github.com/tokio-rs/tokio/pull/4555 -[#4310]: https://github.com/tokio-rs/tokio/pull/4310 -[#4542]: https://github.com/tokio-rs/tokio/pull/4542 -[#4581]: https://github.com/tokio-rs/tokio/pull/4581 -[#4560]: https://github.com/tokio-rs/tokio/pull/4560 -[#4631]: https://github.com/tokio-rs/tokio/pull/4631 -[#4582]: https://github.com/tokio-rs/tokio/pull/4582 -[#4543]: https://github.com/tokio-rs/tokio/pull/4543 -[#4553]: https://github.com/tokio-rs/tokio/pull/4553 -[#4524]: https://github.com/tokio-rs/tokio/pull/4524 -[#4511]: https://github.com/tokio-rs/tokio/pull/4511 -[#4567]: https://github.com/tokio-rs/tokio/pull/4567 -[#4474]: https://github.com/tokio-rs/tokio/pull/4474 -[#4634]: https://github.com/tokio-rs/tokio/pull/4634 -[#4548]: https://github.com/tokio-rs/tokio/pull/4548 -[#4630]: https://github.com/tokio-rs/tokio/pull/4630 -[#4530]: https://github.com/tokio-rs/tokio/pull/4530 -[#4640]: https://github.com/tokio-rs/tokio/pull/4640 -[#4531]: https://github.com/tokio-rs/tokio/pull/4531 -[#4545]: https://github.com/tokio-rs/tokio/pull/4545 -[#4507]: https://github.com/tokio-rs/tokio/pull/4507 - -# 1.17.0 (February 16, 2022) - -This release updates the minimum supported Rust version (MSRV) to 1.49, the -`mio` dependency to v0.8, and the (optional) `parking_lot` dependency to v0.12. -Additionally, it contains several bug fixes, as well as internal refactoring and -performance improvements. - -### Fixed - -- time: prevent panicking in `sleep` with large durations ([#4495]) -- time: eliminate potential panics in `Instant` arithmetic on platforms where - `Instant::now` is not monotonic ([#4461]) -- io: fix `DuplexStream` not participating in cooperative yielding ([#4478]) -- rt: fix potential double panic when dropping a `JoinHandle` ([#4430]) - -### Changed - -- update minimum supported Rust version to 1.49 ([#4457]) -- update `parking_lot` dependency to v0.12.0 ([#4459]) -- update `mio` dependency to v0.8 ([#4449]) -- rt: remove an unnecessary lock in the blocking pool ([#4436]) -- rt: remove an unnecessary enum in the basic scheduler ([#4462]) -- time: use bit manipulation instead of modulo to improve performance ([#4480]) -- net: use `std::future::Ready` instead of our own `Ready` future ([#4271]) -- replace deprecated `atomic::spin_loop_hint` with `hint::spin_loop` ([#4491]) -- fix miri failures in intrusive linked lists ([#4397]) - -### Documented - -- io: add an example for `tokio::process::ChildStdin` ([#4479]) - -### Unstable - -The following changes only apply when building with `--cfg tokio_unstable`: - -- task: fix missing location information in `tracing` spans generated by - `spawn_local` ([#4483]) -- task: add `JoinSet` for managing sets of tasks ([#4335]) -- metrics: fix compilation error on MIPS ([#4475]) -- metrics: fix compilation error on arm32v7 ([#4453]) - -[#4495]: https://github.com/tokio-rs/tokio/pull/4495 -[#4461]: https://github.com/tokio-rs/tokio/pull/4461 -[#4478]: https://github.com/tokio-rs/tokio/pull/4478 -[#4430]: https://github.com/tokio-rs/tokio/pull/4430 -[#4457]: https://github.com/tokio-rs/tokio/pull/4457 -[#4459]: https://github.com/tokio-rs/tokio/pull/4459 -[#4449]: https://github.com/tokio-rs/tokio/pull/4449 -[#4462]: https://github.com/tokio-rs/tokio/pull/4462 -[#4436]: https://github.com/tokio-rs/tokio/pull/4436 -[#4480]: https://github.com/tokio-rs/tokio/pull/4480 -[#4271]: https://github.com/tokio-rs/tokio/pull/4271 -[#4491]: https://github.com/tokio-rs/tokio/pull/4491 -[#4397]: https://github.com/tokio-rs/tokio/pull/4397 -[#4479]: https://github.com/tokio-rs/tokio/pull/4479 -[#4483]: https://github.com/tokio-rs/tokio/pull/4483 -[#4335]: https://github.com/tokio-rs/tokio/pull/4335 -[#4475]: https://github.com/tokio-rs/tokio/pull/4475 -[#4453]: https://github.com/tokio-rs/tokio/pull/4453 - -# 1.16.1 (January 28, 2022) - -This release fixes a bug in [#4428] with the change [#4437]. - -[#4428]: https://github.com/tokio-rs/tokio/pull/4428 -[#4437]: https://github.com/tokio-rs/tokio/pull/4437 - -# 1.16.0 (January 27, 2022) - -Fixes a soundness bug in `io::Take` ([#4428]). The unsoundness is exposed when -leaking memory in the given `AsyncRead` implementation and then overwriting the -supplied buffer: - -```rust -impl AsyncRead for Buggy { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_> - ) -> Poll> { - let new_buf = vec![0; 5].leak(); - *buf = ReadBuf::new(new_buf); - buf.put_slice(b"hello"); - Poll::Ready(Ok(())) - } -} -``` - -Also, this release includes improvements to the multi-threaded scheduler that -can increase throughput by up to 20% in some cases ([#4383]). - -### Fixed - -- io: **soundness** don't expose uninitialized memory when using `io::Take` in edge case ([#4428]) -- fs: ensure `File::write` results in a `write` syscall when the runtime shuts down ([#4316]) -- process: drop pipe after child exits in `wait_with_output` ([#4315]) -- rt: improve error message when spawning a thread fails ([#4398]) -- rt: reduce false-positive thread wakups in the multi-threaded scheduler ([#4383]) -- sync: don't inherit `Send` from `parking_lot::*Guard` ([#4359]) - -### Added - -- net: `TcpSocket::linger()` and `set_linger()` ([#4324]) -- net: impl `UnwindSafe` for socket types ([#4384]) -- rt: impl `UnwindSafe` for `JoinHandle` ([#4418]) -- sync: `watch::Receiver::has_changed()` ([#4342]) -- sync: `oneshot::Receiver::blocking_recv()` ([#4334]) -- sync: `RwLock` blocking operations ([#4425]) - -### Unstable - -The following changes only apply when building with `--cfg tokio_unstable` - -- rt: **breaking change** overhaul runtime metrics API ([#4373]) - -[#4428]: https://github.com/tokio-rs/tokio/pull/4428 -[#4316]: https://github.com/tokio-rs/tokio/pull/4316 -[#4315]: https://github.com/tokio-rs/tokio/pull/4315 -[#4398]: https://github.com/tokio-rs/tokio/pull/4398 -[#4383]: https://github.com/tokio-rs/tokio/pull/4383 -[#4359]: https://github.com/tokio-rs/tokio/pull/4359 -[#4324]: https://github.com/tokio-rs/tokio/pull/4324 -[#4384]: https://github.com/tokio-rs/tokio/pull/4384 -[#4418]: https://github.com/tokio-rs/tokio/pull/4418 -[#4342]: https://github.com/tokio-rs/tokio/pull/4342 -[#4334]: https://github.com/tokio-rs/tokio/pull/4334 -[#4425]: https://github.com/tokio-rs/tokio/pull/4425 -[#4373]: https://github.com/tokio-rs/tokio/pull/4373 - -# 1.15.0 (December 15, 2021) - -### Fixed - -- io: add cooperative yielding support to `io::empty()` ([#4300]) -- time: make timeout robust against budget-depleting tasks ([#4314]) - -### Changed - -- update minimum supported Rust version to 1.46. - -### Added - -- time: add `Interval::reset()` ([#4248]) -- io: add explicit lifetimes to `AsyncFdReadyGuard` ([#4267]) -- process: add `Command::as_std()` ([#4295]) - -### Added (unstable) - -- tracing: instrument `tokio::sync` types ([#4302]) - -[#4302]: https://github.com/tokio-rs/tokio/pull/4302 -[#4300]: https://github.com/tokio-rs/tokio/pull/4300 -[#4295]: https://github.com/tokio-rs/tokio/pull/4295 -[#4267]: https://github.com/tokio-rs/tokio/pull/4267 -[#4248]: https://github.com/tokio-rs/tokio/pull/4248 -[#4314]: https://github.com/tokio-rs/tokio/pull/4314 - -# 1.14.0 (November 15, 2021) - -### Fixed - -- macros: fix compiler errors when using `mut` patterns in `select!` ([#4211]) -- sync: fix a data race between `oneshot::Sender::send` and awaiting a - `oneshot::Receiver` when the oneshot has been closed ([#4226]) -- sync: make `AtomicWaker` panic safe ([#3689]) -- runtime: fix basic scheduler dropping tasks outside a runtime context - ([#4213]) - -### Added - -- stats: add `RuntimeStats::busy_duration_total` ([#4179], [#4223]) - -### Changed - -- io: updated `copy` buffer size to match `std::io::copy` ([#4209]) - -### Documented - -- io: rename buffer to file in doc-test ([#4230]) -- sync: fix Notify example ([#4212]) - -[#4211]: https://github.com/tokio-rs/tokio/pull/4211 -[#4226]: https://github.com/tokio-rs/tokio/pull/4226 -[#3689]: https://github.com/tokio-rs/tokio/pull/3689 -[#4213]: https://github.com/tokio-rs/tokio/pull/4213 -[#4179]: https://github.com/tokio-rs/tokio/pull/4179 -[#4223]: https://github.com/tokio-rs/tokio/pull/4223 -[#4209]: https://github.com/tokio-rs/tokio/pull/4209 -[#4230]: https://github.com/tokio-rs/tokio/pull/4230 -[#4212]: https://github.com/tokio-rs/tokio/pull/4212 - -# 1.13.1 (November 15, 2021) - -### Fixed - -- sync: fix a data race between `oneshot::Sender::send` and awaiting a - `oneshot::Receiver` when the oneshot has been closed ([#4226]) - -[#4226]: https://github.com/tokio-rs/tokio/pull/4226 - -# 1.13.0 (October 29, 2021) - -### Fixed - -- sync: fix `Notify` to clone the waker before locking its waiter list ([#4129]) -- tokio: add riscv32 to non atomic64 architectures ([#4185]) - -### Added - -- net: add `poll_{recv,send}_ready` methods to `udp` and `uds_datagram` ([#4131]) -- net: add `try_*`, `readable`, `writable`, `ready`, and `peer_addr` methods to split halves ([#4120]) -- sync: add `blocking_lock` to `Mutex` ([#4130]) -- sync: add `watch::Sender::send_replace` ([#3962], [#4195]) -- sync: expand `Debug` for `Mutex` impl to unsized `T` ([#4134]) -- tracing: instrument time::Sleep ([#4072]) -- tracing: use structured location fields for spawned tasks ([#4128]) - -### Changed - -- io: add assert in `copy_bidirectional` that `poll_write` is sensible ([#4125]) -- macros: use qualified syntax when polling in `select!` ([#4192]) -- runtime: handle `block_on` wakeups better ([#4157]) -- task: allocate callback on heap immediately in debug mode ([#4203]) -- tokio: assert platform-minimum requirements at build time ([#3797]) - -### Documented - -- docs: conversion of doc comments to indicative mood ([#4174]) -- docs: add returning on the first error example for `try_join!` ([#4133]) -- docs: fixing broken links in `tokio/src/lib.rs` ([#4132]) -- signal: add example with background listener ([#4171]) -- sync: add more oneshot examples ([#4153]) -- time: document `Interval::tick` cancel safety ([#4152]) - -[#3797]: https://github.com/tokio-rs/tokio/pull/3797 -[#3962]: https://github.com/tokio-rs/tokio/pull/3962 -[#4072]: https://github.com/tokio-rs/tokio/pull/4072 -[#4120]: https://github.com/tokio-rs/tokio/pull/4120 -[#4125]: https://github.com/tokio-rs/tokio/pull/4125 -[#4128]: https://github.com/tokio-rs/tokio/pull/4128 -[#4129]: https://github.com/tokio-rs/tokio/pull/4129 -[#4130]: https://github.com/tokio-rs/tokio/pull/4130 -[#4131]: https://github.com/tokio-rs/tokio/pull/4131 -[#4132]: https://github.com/tokio-rs/tokio/pull/4132 -[#4133]: https://github.com/tokio-rs/tokio/pull/4133 -[#4134]: https://github.com/tokio-rs/tokio/pull/4134 -[#4152]: https://github.com/tokio-rs/tokio/pull/4152 -[#4153]: https://github.com/tokio-rs/tokio/pull/4153 -[#4157]: https://github.com/tokio-rs/tokio/pull/4157 -[#4171]: https://github.com/tokio-rs/tokio/pull/4171 -[#4174]: https://github.com/tokio-rs/tokio/pull/4174 -[#4185]: https://github.com/tokio-rs/tokio/pull/4185 -[#4192]: https://github.com/tokio-rs/tokio/pull/4192 -[#4195]: https://github.com/tokio-rs/tokio/pull/4195 -[#4203]: https://github.com/tokio-rs/tokio/pull/4203 - -# 1.12.0 (September 21, 2021) - -### Fixed - -- mpsc: ensure `try_reserve` error is consistent with `try_send` ([#4119]) -- mpsc: use `spin_loop_hint` instead of `yield_now` ([#4115]) -- sync: make `SendError` field public ([#4097]) - -### Added - -- io: add POSIX AIO on FreeBSD ([#4054]) -- io: add convenience method `AsyncSeekExt::rewind` ([#4107]) -- runtime: add tracing span for `block_on` futures ([#4094]) -- runtime: callback when a worker parks and unparks ([#4070]) -- sync: implement `try_recv` for mpsc channels ([#4113]) - -### Documented - -- docs: clarify CPU-bound tasks on Tokio ([#4105]) -- mpsc: document spurious failures on `poll_recv` ([#4117]) -- mpsc: document that `PollSender` impls `Sink` ([#4110]) -- task: document non-guarantees of `yield_now` ([#4091]) -- time: document paused time details better ([#4061], [#4103]) - -[#4027]: https://github.com/tokio-rs/tokio/pull/4027 -[#4054]: https://github.com/tokio-rs/tokio/pull/4054 -[#4061]: https://github.com/tokio-rs/tokio/pull/4061 -[#4070]: https://github.com/tokio-rs/tokio/pull/4070 -[#4091]: https://github.com/tokio-rs/tokio/pull/4091 -[#4094]: https://github.com/tokio-rs/tokio/pull/4094 -[#4097]: https://github.com/tokio-rs/tokio/pull/4097 -[#4103]: https://github.com/tokio-rs/tokio/pull/4103 -[#4105]: https://github.com/tokio-rs/tokio/pull/4105 -[#4107]: https://github.com/tokio-rs/tokio/pull/4107 -[#4110]: https://github.com/tokio-rs/tokio/pull/4110 -[#4113]: https://github.com/tokio-rs/tokio/pull/4113 -[#4115]: https://github.com/tokio-rs/tokio/pull/4115 -[#4117]: https://github.com/tokio-rs/tokio/pull/4117 -[#4119]: https://github.com/tokio-rs/tokio/pull/4119 - -# 1.11.0 (August 31, 2021) - -### Fixed - - - time: don't panic when Instant is not monotonic ([#4044]) - - io: fix panic in `fill_buf` by not calling `poll_fill_buf` twice ([#4084]) - -### Added - - - watch: add `watch::Sender::subscribe` ([#3800]) - - process: add `from_std` to `ChildStd*` ([#4045]) - - stats: initial work on runtime stats ([#4043]) - -### Changed - - - tracing: change span naming to new console convention ([#4042]) - - io: speed-up waking by using uninitialized array ([#4055], [#4071], [#4075]) - -### Documented - - - time: make Sleep examples easier to find ([#4040]) - -[#3800]: https://github.com/tokio-rs/tokio/pull/3800 -[#4040]: https://github.com/tokio-rs/tokio/pull/4040 -[#4042]: https://github.com/tokio-rs/tokio/pull/4042 -[#4043]: https://github.com/tokio-rs/tokio/pull/4043 -[#4044]: https://github.com/tokio-rs/tokio/pull/4044 -[#4045]: https://github.com/tokio-rs/tokio/pull/4045 -[#4055]: https://github.com/tokio-rs/tokio/pull/4055 -[#4071]: https://github.com/tokio-rs/tokio/pull/4071 -[#4075]: https://github.com/tokio-rs/tokio/pull/4075 -[#4084]: https://github.com/tokio-rs/tokio/pull/4084 - -# 1.10.1 (August 24, 2021) - -### Fixed - - - runtime: fix leak in UnownedTask ([#4063]) - -[#4063]: https://github.com/tokio-rs/tokio/pull/4063 - -# 1.10.0 (August 12, 2021) - -### Added - - - io: add `(read|write)_f(32|64)[_le]` methods ([#4022]) - - io: add `fill_buf` and `consume` to `AsyncBufReadExt` ([#3991]) - - process: add `Child::raw_handle()` on windows ([#3998]) - -### Fixed - - - doc: fix non-doc builds with `--cfg docsrs` ([#4020]) - - io: flush eagerly in `io::copy` ([#4001]) - - runtime: a debug assert was sometimes triggered during shutdown ([#4005]) - - sync: use `spin_loop_hint` instead of `yield_now` in mpsc ([#4037]) - - tokio: the test-util feature depends on rt, sync, and time ([#4036]) - -### Changes - - - runtime: reorganize parts of the runtime ([#3979], [#4005]) - - signal: make windows docs for signal module show up on unix builds ([#3770]) - - task: quickly send task to heap on debug mode ([#4009]) - -### Documented - - - io: document cancellation safety of `AsyncBufReadExt` ([#3997]) - - sync: document when `watch::send` fails ([#4021]) - -[#3770]: https://github.com/tokio-rs/tokio/pull/3770 -[#3979]: https://github.com/tokio-rs/tokio/pull/3979 -[#3991]: https://github.com/tokio-rs/tokio/pull/3991 -[#3997]: https://github.com/tokio-rs/tokio/pull/3997 -[#3998]: https://github.com/tokio-rs/tokio/pull/3998 -[#4001]: https://github.com/tokio-rs/tokio/pull/4001 -[#4005]: https://github.com/tokio-rs/tokio/pull/4005 -[#4009]: https://github.com/tokio-rs/tokio/pull/4009 -[#4020]: https://github.com/tokio-rs/tokio/pull/4020 -[#4021]: https://github.com/tokio-rs/tokio/pull/4021 -[#4022]: https://github.com/tokio-rs/tokio/pull/4022 -[#4036]: https://github.com/tokio-rs/tokio/pull/4036 -[#4037]: https://github.com/tokio-rs/tokio/pull/4037 - -# 1.9.0 (July 22, 2021) - -### Added - - - net: allow customized I/O operations for `TcpStream` ([#3888]) - - sync: add getter for the mutex from a guard ([#3928]) - - task: expose nameable future for `TaskLocal::scope` ([#3273]) - -### Fixed - - - Fix leak if output of future panics on drop ([#3967]) - - Fix leak in `LocalSet` ([#3978]) - -### Changes - - - runtime: reorganize parts of the runtime ([#3909], [#3939], [#3950], [#3955], [#3980]) - - sync: clean up `OnceCell` ([#3945]) - - task: remove mutex in `JoinError` ([#3959]) - -[#3273]: https://github.com/tokio-rs/tokio/pull/3273 -[#3888]: https://github.com/tokio-rs/tokio/pull/3888 -[#3909]: https://github.com/tokio-rs/tokio/pull/3909 -[#3928]: https://github.com/tokio-rs/tokio/pull/3928 -[#3934]: https://github.com/tokio-rs/tokio/pull/3934 -[#3939]: https://github.com/tokio-rs/tokio/pull/3939 -[#3945]: https://github.com/tokio-rs/tokio/pull/3945 -[#3950]: https://github.com/tokio-rs/tokio/pull/3950 -[#3955]: https://github.com/tokio-rs/tokio/pull/3955 -[#3959]: https://github.com/tokio-rs/tokio/pull/3959 -[#3967]: https://github.com/tokio-rs/tokio/pull/3967 -[#3978]: https://github.com/tokio-rs/tokio/pull/3978 -[#3980]: https://github.com/tokio-rs/tokio/pull/3980 - -# 1.8.3 (July 26, 2021) - -This release backports two fixes from 1.9.0 - -### Fixed - - - Fix leak if output of future panics on drop ([#3967]) - - Fix leak in `LocalSet` ([#3978]) - -[#3967]: https://github.com/tokio-rs/tokio/pull/3967 -[#3978]: https://github.com/tokio-rs/tokio/pull/3978 - -# 1.8.2 (July 19, 2021) - -Fixes a missed edge case from 1.8.1. - -### Fixed - -- runtime: drop canceled future on next poll (#3965) - -# 1.8.1 (July 6, 2021) - -Forward ports 1.5.1 fixes. - -### Fixed - -- runtime: remotely abort tasks on `JoinHandle::abort` ([#3934]) - -[#3934]: https://github.com/tokio-rs/tokio/pull/3934 - -# 1.8.0 (July 2, 2021) - -### Added - -- io: add `get_{ref,mut}` methods to `AsyncFdReadyGuard` and `AsyncFdReadyMutGuard` ([#3807]) -- io: efficient implementation of vectored writes for `BufWriter` ([#3163]) -- net: add ready/try methods to `NamedPipe{Client,Server}` ([#3866], [#3899]) -- sync: add `watch::Receiver::borrow_and_update` ([#3813]) -- sync: implement `From` for `OnceCell` ([#3877]) -- time: allow users to specify Interval behaviour when delayed ([#3721]) - -### Added (unstable) - -- rt: add `tokio::task::Builder` ([#3881]) - -### Fixed - -- net: handle HUP event with `UnixStream` ([#3898]) - -### Documented - -- doc: document cancellation safety ([#3900]) -- time: add wait alias to sleep ([#3897]) -- time: document auto-advancing behaviour of runtime ([#3763]) - -[#3163]: https://github.com/tokio-rs/tokio/pull/3163 -[#3721]: https://github.com/tokio-rs/tokio/pull/3721 -[#3763]: https://github.com/tokio-rs/tokio/pull/3763 -[#3807]: https://github.com/tokio-rs/tokio/pull/3807 -[#3813]: https://github.com/tokio-rs/tokio/pull/3813 -[#3866]: https://github.com/tokio-rs/tokio/pull/3866 -[#3877]: https://github.com/tokio-rs/tokio/pull/3877 -[#3881]: https://github.com/tokio-rs/tokio/pull/3881 -[#3897]: https://github.com/tokio-rs/tokio/pull/3897 -[#3898]: https://github.com/tokio-rs/tokio/pull/3898 -[#3899]: https://github.com/tokio-rs/tokio/pull/3899 -[#3900]: https://github.com/tokio-rs/tokio/pull/3900 - -# 1.7.2 (July 6, 2021) - -Forward ports 1.5.1 fixes. - -### Fixed - -- runtime: remotely abort tasks on `JoinHandle::abort` ([#3934]) - -[#3934]: https://github.com/tokio-rs/tokio/pull/3934 - -# 1.7.1 (June 18, 2021) - -### Fixed - -- runtime: fix early task shutdown during runtime shutdown ([#3870]) - -[#3870]: https://github.com/tokio-rs/tokio/pull/3870 - -# 1.7.0 (June 15, 2021) - -### Added - -- net: add named pipes on windows ([#3760]) -- net: add `TcpSocket` from `std::net::TcpStream` conversion ([#3838]) -- sync: add `receiver_count` to `watch::Sender` ([#3729]) -- sync: export `sync::notify::Notified` future publicly ([#3840]) -- tracing: instrument task wakers ([#3836]) - -### Fixed - -- macros: suppress `clippy::default_numeric_fallback` lint in generated code ([#3831]) -- runtime: immediately drop new tasks when runtime is shut down ([#3752]) -- sync: deprecate unused `mpsc::RecvError` type ([#3833]) - -### Documented - -- io: clarify EOF condition for `AsyncReadExt::read_buf` ([#3850]) -- io: clarify limits on return values of `AsyncWrite::poll_write` ([#3820]) -- sync: add examples to Semaphore ([#3808]) - -[#3729]: https://github.com/tokio-rs/tokio/pull/3729 -[#3752]: https://github.com/tokio-rs/tokio/pull/3752 -[#3760]: https://github.com/tokio-rs/tokio/pull/3760 -[#3808]: https://github.com/tokio-rs/tokio/pull/3808 -[#3820]: https://github.com/tokio-rs/tokio/pull/3820 -[#3831]: https://github.com/tokio-rs/tokio/pull/3831 -[#3833]: https://github.com/tokio-rs/tokio/pull/3833 -[#3836]: https://github.com/tokio-rs/tokio/pull/3836 -[#3838]: https://github.com/tokio-rs/tokio/pull/3838 -[#3840]: https://github.com/tokio-rs/tokio/pull/3840 -[#3850]: https://github.com/tokio-rs/tokio/pull/3850 - -# 1.6.3 (July 6, 2021) - -Forward ports 1.5.1 fixes. - -### Fixed - -- runtime: remotely abort tasks on `JoinHandle::abort` ([#3934]) - -[#3934]: https://github.com/tokio-rs/tokio/pull/3934 - -# 1.6.2 (June 14, 2021) - -### Fixes - -- test: sub-ms `time:advance` regression introduced in 1.6 ([#3852]) - -[#3852]: https://github.com/tokio-rs/tokio/pull/3852 - -# 1.6.1 (May 28, 2021) - -This release reverts [#3518] because it doesn't work on some kernels due to -a kernel bug. ([#3803]) - -[#3518]: https://github.com/tokio-rs/tokio/issues/3518 -[#3803]: https://github.com/tokio-rs/tokio/issues/3803 - -# 1.6.0 (May 14, 2021) - -### Added - -- fs: try doing a non-blocking read before punting to the threadpool ([#3518]) -- io: add `write_all_buf` to `AsyncWriteExt` ([#3737]) -- io: implement `AsyncSeek` for `BufReader`, `BufWriter`, and `BufStream` ([#3491]) -- net: support non-blocking vectored I/O ([#3761]) -- sync: add `mpsc::Sender::{reserve_owned, try_reserve_owned}` ([#3704]) -- sync: add a `MutexGuard::map` method that returns a `MappedMutexGuard` ([#2472]) -- time: add getter for Interval's period ([#3705]) - -### Fixed - -- io: wake pending writers on `DuplexStream` close ([#3756]) -- process: avoid redundant effort to reap orphan processes ([#3743]) -- signal: use `std::os::raw::c_int` instead of `libc::c_int` on public API ([#3774]) -- sync: preserve permit state in `notify_waiters` ([#3660]) -- task: update `JoinHandle` panic message ([#3727]) -- time: prevent `time::advance` from going too far ([#3712]) - -### Documented - -- net: hide `net::unix::datagram` module from docs ([#3775]) -- process: updated example ([#3748]) -- sync: `Barrier` doc should use task, not thread ([#3780]) -- task: update documentation on `block_in_place` ([#3753]) - -[#2472]: https://github.com/tokio-rs/tokio/pull/2472 -[#3491]: https://github.com/tokio-rs/tokio/pull/3491 -[#3518]: https://github.com/tokio-rs/tokio/pull/3518 -[#3660]: https://github.com/tokio-rs/tokio/pull/3660 -[#3704]: https://github.com/tokio-rs/tokio/pull/3704 -[#3705]: https://github.com/tokio-rs/tokio/pull/3705 -[#3712]: https://github.com/tokio-rs/tokio/pull/3712 -[#3727]: https://github.com/tokio-rs/tokio/pull/3727 -[#3737]: https://github.com/tokio-rs/tokio/pull/3737 -[#3743]: https://github.com/tokio-rs/tokio/pull/3743 -[#3748]: https://github.com/tokio-rs/tokio/pull/3748 -[#3753]: https://github.com/tokio-rs/tokio/pull/3753 -[#3756]: https://github.com/tokio-rs/tokio/pull/3756 -[#3761]: https://github.com/tokio-rs/tokio/pull/3761 -[#3774]: https://github.com/tokio-rs/tokio/pull/3774 -[#3775]: https://github.com/tokio-rs/tokio/pull/3775 -[#3780]: https://github.com/tokio-rs/tokio/pull/3780 - -# 1.5.1 (July 6, 2021) - -### Fixed - -- runtime: remotely abort tasks on `JoinHandle::abort` ([#3934]) - -[#3934]: https://github.com/tokio-rs/tokio/pull/3934 - -# 1.5.0 (April 12, 2021) - -### Added - -- io: add `AsyncSeekExt::stream_position` ([#3650]) -- io: add `AsyncWriteExt::write_vectored` ([#3678]) -- io: add a `copy_bidirectional` utility ([#3572]) -- net: implement `IntoRawFd` for `TcpSocket` ([#3684]) -- sync: add `OnceCell` ([#3591]) -- sync: add `OwnedRwLockReadGuard` and `OwnedRwLockWriteGuard` ([#3340]) -- sync: add `Semaphore::is_closed` ([#3673]) -- sync: add `mpsc::Sender::capacity` ([#3690]) -- sync: allow configuring `RwLock` max reads ([#3644]) -- task: add `sync_scope` for `LocalKey` ([#3612]) - -### Fixed - -- chore: try to avoid `noalias` attributes on intrusive linked list ([#3654]) -- rt: fix panic in `JoinHandle::abort()` when called from other threads ([#3672]) -- sync: don't panic in `oneshot::try_recv` ([#3674]) -- sync: fix notifications getting dropped on receiver drop ([#3652]) -- sync: fix `Semaphore` permit overflow calculation ([#3644]) - -### Documented - -- io: clarify requirements of `AsyncFd` ([#3635]) -- runtime: fix unclear docs for `{Handle,Runtime}::block_on` ([#3628]) -- sync: document that `Semaphore` is fair ([#3693]) -- sync: improve doc on blocking mutex ([#3645]) - -[#3340]: https://github.com/tokio-rs/tokio/pull/3340 -[#3572]: https://github.com/tokio-rs/tokio/pull/3572 -[#3591]: https://github.com/tokio-rs/tokio/pull/3591 -[#3612]: https://github.com/tokio-rs/tokio/pull/3612 -[#3628]: https://github.com/tokio-rs/tokio/pull/3628 -[#3635]: https://github.com/tokio-rs/tokio/pull/3635 -[#3644]: https://github.com/tokio-rs/tokio/pull/3644 -[#3645]: https://github.com/tokio-rs/tokio/pull/3645 -[#3650]: https://github.com/tokio-rs/tokio/pull/3650 -[#3652]: https://github.com/tokio-rs/tokio/pull/3652 -[#3654]: https://github.com/tokio-rs/tokio/pull/3654 -[#3672]: https://github.com/tokio-rs/tokio/pull/3672 -[#3673]: https://github.com/tokio-rs/tokio/pull/3673 -[#3674]: https://github.com/tokio-rs/tokio/pull/3674 -[#3678]: https://github.com/tokio-rs/tokio/pull/3678 -[#3684]: https://github.com/tokio-rs/tokio/pull/3684 -[#3690]: https://github.com/tokio-rs/tokio/pull/3690 -[#3693]: https://github.com/tokio-rs/tokio/pull/3693 - -# 1.4.0 (March 20, 2021) - -### Added - -- macros: introduce biased argument for `select!` ([#3603]) -- runtime: add `Handle::block_on` ([#3569]) - -### Fixed - -- runtime: avoid unnecessary polling of `block_on` future ([#3582]) -- runtime: fix memory leak/growth when creating many runtimes ([#3564]) -- runtime: mark `EnterGuard` with `must_use` ([#3609]) - -### Documented - -- chore: mention fix for building docs in contributing guide ([#3618]) -- doc: add link to `PollSender` ([#3613]) -- doc: alias sleep to delay ([#3604]) -- sync: improve `Mutex` FIFO explanation ([#3615]) -- timer: fix double newline in module docs ([#3617]) - -[#3564]: https://github.com/tokio-rs/tokio/pull/3564 -[#3613]: https://github.com/tokio-rs/tokio/pull/3613 -[#3618]: https://github.com/tokio-rs/tokio/pull/3618 -[#3617]: https://github.com/tokio-rs/tokio/pull/3617 -[#3582]: https://github.com/tokio-rs/tokio/pull/3582 -[#3615]: https://github.com/tokio-rs/tokio/pull/3615 -[#3603]: https://github.com/tokio-rs/tokio/pull/3603 -[#3609]: https://github.com/tokio-rs/tokio/pull/3609 -[#3604]: https://github.com/tokio-rs/tokio/pull/3604 -[#3569]: https://github.com/tokio-rs/tokio/pull/3569 - -# 1.3.0 (March 9, 2021) - -### Added - -- coop: expose an `unconstrained()` opt-out ([#3547]) -- net: add `into_std` for net types without it ([#3509]) -- sync: add `same_channel` method to `mpsc::Sender` ([#3532]) -- sync: add `{try_,}acquire_many_owned` to `Semaphore` ([#3535]) -- sync: add back `RwLockWriteGuard::map` and `RwLockWriteGuard::try_map` ([#3348]) - -### Fixed - -- sync: allow `oneshot::Receiver::close` after successful `try_recv` ([#3552]) -- time: do not panic on `timeout(Duration::MAX)` ([#3551]) - -### Documented - -- doc: doc aliases for pre-1.0 function names ([#3523]) -- io: fix typos ([#3541]) -- io: note the EOF behaviour of `read_until` ([#3536]) -- io: update `AsyncRead::poll_read` doc ([#3557]) -- net: update `UdpSocket` splitting doc ([#3517]) -- runtime: add link to `LocalSet` on `new_current_thread` ([#3508]) -- runtime: update documentation of thread limits ([#3527]) -- sync: do not recommend `join_all` for `Barrier` ([#3514]) -- sync: documentation for `oneshot` ([#3592]) -- sync: rename `notify` to `notify_one` ([#3526]) -- time: fix typo in `Sleep` doc ([#3515]) -- time: sync `interval.rs` and `time/mod.rs` docs ([#3533]) - -[#3348]: https://github.com/tokio-rs/tokio/pull/3348 -[#3508]: https://github.com/tokio-rs/tokio/pull/3508 -[#3509]: https://github.com/tokio-rs/tokio/pull/3509 -[#3514]: https://github.com/tokio-rs/tokio/pull/3514 -[#3515]: https://github.com/tokio-rs/tokio/pull/3515 -[#3517]: https://github.com/tokio-rs/tokio/pull/3517 -[#3523]: https://github.com/tokio-rs/tokio/pull/3523 -[#3526]: https://github.com/tokio-rs/tokio/pull/3526 -[#3527]: https://github.com/tokio-rs/tokio/pull/3527 -[#3532]: https://github.com/tokio-rs/tokio/pull/3532 -[#3533]: https://github.com/tokio-rs/tokio/pull/3533 -[#3535]: https://github.com/tokio-rs/tokio/pull/3535 -[#3536]: https://github.com/tokio-rs/tokio/pull/3536 -[#3541]: https://github.com/tokio-rs/tokio/pull/3541 -[#3547]: https://github.com/tokio-rs/tokio/pull/3547 -[#3551]: https://github.com/tokio-rs/tokio/pull/3551 -[#3552]: https://github.com/tokio-rs/tokio/pull/3552 -[#3557]: https://github.com/tokio-rs/tokio/pull/3557 -[#3592]: https://github.com/tokio-rs/tokio/pull/3592 - -# 1.2.0 (February 5, 2021) - -### Added - -- signal: make `Signal::poll_recv` method public ([#3383]) - -### Fixed - -- time: make `test-util` paused time fully deterministic ([#3492]) - -### Documented - -- sync: link to new broadcast and watch wrappers ([#3504]) - -[#3383]: https://github.com/tokio-rs/tokio/pull/3383 -[#3492]: https://github.com/tokio-rs/tokio/pull/3492 -[#3504]: https://github.com/tokio-rs/tokio/pull/3504 - -# 1.1.1 (January 29, 2021) - -Forward ports 1.0.3 fix. - -### Fixed -- io: memory leak during shutdown ([#3477]). - -# 1.1.0 (January 22, 2021) - -### Added - -- net: add `try_read_buf` and `try_recv_buf` ([#3351]) -- mpsc: Add `Sender::try_reserve` function ([#3418]) -- sync: add `RwLock` `try_read` and `try_write` methods ([#3400]) -- io: add `ReadBuf::inner_mut` ([#3443]) - -### Changed - -- macros: improve `select!` error message ([#3352]) -- io: keep track of initialized bytes in `read_to_end` ([#3426]) -- runtime: consolidate errors for context missing ([#3441]) - -### Fixed - -- task: wake `LocalSet` on `spawn_local` ([#3369]) -- sync: fix panic in broadcast::Receiver drop ([#3434]) - -### Documented -- stream: link to new `Stream` wrappers in `tokio-stream` ([#3343]) -- docs: mention that `test-util` feature is not enabled with full ([#3397]) -- process: add documentation to process::Child fields ([#3437]) -- io: clarify `AsyncFd` docs about changes of the inner fd ([#3430]) -- net: update datagram docs on splitting ([#3448]) -- time: document that `Sleep` is not `Unpin` ([#3457]) -- sync: add link to `PollSemaphore` ([#3456]) -- task: add `LocalSet` example ([#3438]) -- sync: improve bounded `mpsc` documentation ([#3458]) - -[#3343]: https://github.com/tokio-rs/tokio/pull/3343 -[#3351]: https://github.com/tokio-rs/tokio/pull/3351 -[#3352]: https://github.com/tokio-rs/tokio/pull/3352 -[#3369]: https://github.com/tokio-rs/tokio/pull/3369 -[#3397]: https://github.com/tokio-rs/tokio/pull/3397 -[#3400]: https://github.com/tokio-rs/tokio/pull/3400 -[#3418]: https://github.com/tokio-rs/tokio/pull/3418 -[#3426]: https://github.com/tokio-rs/tokio/pull/3426 -[#3430]: https://github.com/tokio-rs/tokio/pull/3430 -[#3434]: https://github.com/tokio-rs/tokio/pull/3434 -[#3437]: https://github.com/tokio-rs/tokio/pull/3437 -[#3438]: https://github.com/tokio-rs/tokio/pull/3438 -[#3441]: https://github.com/tokio-rs/tokio/pull/3441 -[#3443]: https://github.com/tokio-rs/tokio/pull/3443 -[#3448]: https://github.com/tokio-rs/tokio/pull/3448 -[#3456]: https://github.com/tokio-rs/tokio/pull/3456 -[#3457]: https://github.com/tokio-rs/tokio/pull/3457 -[#3458]: https://github.com/tokio-rs/tokio/pull/3458 - -# 1.0.3 (January 28, 2021) - -### Fixed -- io: memory leak during shutdown ([#3477]). - -[#3477]: https://github.com/tokio-rs/tokio/pull/3477 - -# 1.0.2 (January 14, 2021) - -### Fixed -- io: soundness in `read_to_end` ([#3428]). - -[#3428]: https://github.com/tokio-rs/tokio/pull/3428 - -# 1.0.1 (December 25, 2020) - -This release fixes a soundness hole caused by the combination of `RwLockWriteGuard::map` -and `RwLockWriteGuard::downgrade` by removing the `map` function. This is a breaking -change, but breaking changes are allowed under our semver policy when they are required -to fix a soundness hole. (See [this RFC][semver] for more.) - -Note that we have chosen not to do a deprecation cycle or similar because Tokio 1.0.0 was -released two days ago, and therefore the impact should be minimal. - -Due to the soundness hole, we have also yanked Tokio version 1.0.0. - -### Removed - -- sync: remove `RwLockWriteGuard::map` and `RwLockWriteGuard::try_map` ([#3345]) - -### Fixed - -- docs: remove stream feature from docs ([#3335]) - -[semver]: https://github.com/rust-lang/rfcs/blob/master/text/1122-language-semver.md#soundness-changes -[#3335]: https://github.com/tokio-rs/tokio/pull/3335 -[#3345]: https://github.com/tokio-rs/tokio/pull/3345 - -# 1.0.0 (December 23, 2020) - -Commit to the API and long-term support. - -### Fixed - -- sync: spurious wakeup in `watch` ([#3234]). - -### Changed - -- io: rename `AsyncFd::with_io()` to `try_io()` ([#3306]) -- fs: avoid OS specific `*Ext` traits in favor of conditionally defining the fn ([#3264]). -- fs: `Sleep` is `!Unpin` ([#3278]). -- net: pass `SocketAddr` by value ([#3125]). -- net: `TcpStream::poll_peek` takes `ReadBuf` ([#3259]). -- rt: rename `runtime::Builder::max_threads()` to `max_blocking_threads()` ([#3287]). -- time: require `current_thread` runtime when calling `time::pause()` ([#3289]). - -### Removed - -- remove `tokio::prelude` ([#3299]). -- io: remove `AsyncFd::with_poll()` ([#3306]). -- net: remove `{Tcp,Unix}Stream::shutdown()` in favor of `AsyncWrite::shutdown()` ([#3298]). -- stream: move all stream utilities to `tokio-stream` until `Stream` is added to - `std` ([#3277]). -- sync: mpsc `try_recv()` due to unexpected behavior ([#3263]). -- tracing: make unstable as `tracing-core` is not 1.0 yet ([#3266]). - -### Added - -- fs: `poll_*` fns to `DirEntry` ([#3308]). -- io: `poll_*` fns to `io::Lines`, `io::Split` ([#3308]). -- io: `_mut` method variants to `AsyncFd` ([#3304]). -- net: `poll_*` fns to `UnixDatagram` ([#3223]). -- net: `UnixStream` readiness and non-blocking ops ([#3246]). -- sync: `UnboundedReceiver::blocking_recv()` ([#3262]). -- sync: `watch::Sender::borrow()` ([#3269]). -- sync: `Semaphore::close()` ([#3065]). -- sync: `poll_recv` fns to `mpsc::Receiver`, `mpsc::UnboundedReceiver` ([#3308]). -- time: `poll_tick` fn to `time::Interval` ([#3316]). - -[#3065]: https://github.com/tokio-rs/tokio/pull/3065 -[#3125]: https://github.com/tokio-rs/tokio/pull/3125 -[#3223]: https://github.com/tokio-rs/tokio/pull/3223 -[#3234]: https://github.com/tokio-rs/tokio/pull/3234 -[#3246]: https://github.com/tokio-rs/tokio/pull/3246 -[#3259]: https://github.com/tokio-rs/tokio/pull/3259 -[#3262]: https://github.com/tokio-rs/tokio/pull/3262 -[#3263]: https://github.com/tokio-rs/tokio/pull/3263 -[#3264]: https://github.com/tokio-rs/tokio/pull/3264 -[#3266]: https://github.com/tokio-rs/tokio/pull/3266 -[#3269]: https://github.com/tokio-rs/tokio/pull/3269 -[#3277]: https://github.com/tokio-rs/tokio/pull/3277 -[#3278]: https://github.com/tokio-rs/tokio/pull/3278 -[#3287]: https://github.com/tokio-rs/tokio/pull/3287 -[#3289]: https://github.com/tokio-rs/tokio/pull/3289 -[#3298]: https://github.com/tokio-rs/tokio/pull/3298 -[#3299]: https://github.com/tokio-rs/tokio/pull/3299 -[#3304]: https://github.com/tokio-rs/tokio/pull/3304 -[#3306]: https://github.com/tokio-rs/tokio/pull/3306 -[#3308]: https://github.com/tokio-rs/tokio/pull/3308 -[#3316]: https://github.com/tokio-rs/tokio/pull/3316 - -# 0.3.6 (December 14, 2020) - -### Fixed - -- rt: fix deadlock in shutdown ([#3228]) -- rt: fix panic in task abort when off rt ([#3159]) -- sync: make `add_permits` panic with usize::MAX >> 3 permits ([#3188]) -- time: Fix race condition in timer drop ([#3229]) -- watch: fix spurious wakeup ([#3244]) - -### Added - -- example: add back udp-codec example ([#3205]) -- net: add `TcpStream::into_std` ([#3189]) - -[#3159]: https://github.com/tokio-rs/tokio/pull/3159 -[#3188]: https://github.com/tokio-rs/tokio/pull/3188 -[#3189]: https://github.com/tokio-rs/tokio/pull/3189 -[#3205]: https://github.com/tokio-rs/tokio/pull/3205 -[#3228]: https://github.com/tokio-rs/tokio/pull/3228 -[#3229]: https://github.com/tokio-rs/tokio/pull/3229 -[#3244]: https://github.com/tokio-rs/tokio/pull/3244 - -# 0.3.5 (November 30, 2020) - -### Fixed - -- rt: fix `shutdown_timeout(0)` ([#3196]). -- time: fixed race condition with small sleeps ([#3069]). - -### Added - -- io: `AsyncFd::with_interest()` ([#3167]). -- signal: `CtrlC` stream on windows ([#3186]). - -[#3069]: https://github.com/tokio-rs/tokio/pull/3069 -[#3167]: https://github.com/tokio-rs/tokio/pull/3167 -[#3186]: https://github.com/tokio-rs/tokio/pull/3186 -[#3196]: https://github.com/tokio-rs/tokio/pull/3196 - -# 0.3.4 (November 18, 2020) - -### Fixed - -- stream: `StreamMap` `Default` impl bound ([#3093]). -- io: `AsyncFd::into_inner()` should deregister the FD ([#3104]). - -### Changed - -- meta: `parking_lot` feature enabled with `full` ([#3119]). - -### Added - -- io: `AsyncWrite` vectored writes ([#3149]). -- net: TCP/UDP readiness and non-blocking ops ([#3130], [#2743], [#3138]). -- net: TCP socket option (linger, send/recv buf size) ([#3145], [#3143]). -- net: PID field in `UCred` with solaris/illumos ([#3085]). -- rt: `runtime::Handle` allows spawning onto a runtime ([#3079]). -- sync: `Notify::notify_waiters()` ([#3098]). -- sync: `acquire_many()`, `try_acquire_many()` to `Semaphore` ([#3067]). - -[#2743]: https://github.com/tokio-rs/tokio/pull/2743 -[#3067]: https://github.com/tokio-rs/tokio/pull/3067 -[#3079]: https://github.com/tokio-rs/tokio/pull/3079 -[#3085]: https://github.com/tokio-rs/tokio/pull/3085 -[#3093]: https://github.com/tokio-rs/tokio/pull/3093 -[#3098]: https://github.com/tokio-rs/tokio/pull/3098 -[#3104]: https://github.com/tokio-rs/tokio/pull/3104 -[#3119]: https://github.com/tokio-rs/tokio/pull/3119 -[#3130]: https://github.com/tokio-rs/tokio/pull/3130 -[#3138]: https://github.com/tokio-rs/tokio/pull/3138 -[#3143]: https://github.com/tokio-rs/tokio/pull/3143 -[#3145]: https://github.com/tokio-rs/tokio/pull/3145 -[#3149]: https://github.com/tokio-rs/tokio/pull/3149 - -# 0.3.3 (November 2, 2020) - -Fixes a soundness hole by adding a missing `Send` bound to -`Runtime::spawn_blocking()`. - -### Fixed - -- rt: include missing `Send`, fixing soundness hole ([#3089]). -- tracing: avoid huge trace span names ([#3074]). - -### Added - -- net: `TcpSocket::reuseport()`, `TcpSocket::set_reuseport()` ([#3083]). -- net: `TcpSocket::reuseaddr()` ([#3093]). -- net: `TcpSocket::local_addr()` ([#3093]). -- net: add pid to `UCred` ([#2633]). - -[#2633]: https://github.com/tokio-rs/tokio/pull/2633 -[#3074]: https://github.com/tokio-rs/tokio/pull/3074 -[#3083]: https://github.com/tokio-rs/tokio/pull/3083 -[#3089]: https://github.com/tokio-rs/tokio/pull/3089 -[#3093]: https://github.com/tokio-rs/tokio/pull/3093 - -# 0.3.2 (October 27, 2020) - -Adds `AsyncFd` as a replacement for v0.2's `PollEvented`. - -### Fixed - -- io: fix a potential deadlock when shutting down the I/O driver ([#2903]). -- sync: `RwLockWriteGuard::downgrade()` bug ([#2957]). - -### Added - -- io: `AsyncFd` for receiving readiness events on raw FDs ([#2903]). -- net: `poll_*` function on `UdpSocket` ([#2981]). -- net: `UdpSocket::take_error()` ([#3051]). -- sync: `oneshot::Sender::poll_closed()` ([#3032]). - -[#2903]: https://github.com/tokio-rs/tokio/pull/2903 -[#2957]: https://github.com/tokio-rs/tokio/pull/2957 -[#2981]: https://github.com/tokio-rs/tokio/pull/2981 -[#3032]: https://github.com/tokio-rs/tokio/pull/3032 -[#3051]: https://github.com/tokio-rs/tokio/pull/3051 - -# 0.3.1 (October 21, 2020) - -This release fixes an use-after-free in the IO driver. Additionally, the `read_buf` -and `write_buf` methods have been added back to the IO traits, as the bytes crate -is now on track to reach version 1.0 together with Tokio. - -### Fixed - -- net: fix use-after-free ([#3019]). -- fs: ensure buffered data is written on shutdown ([#3009]). - -### Added - -- io: `copy_buf()` ([#2884]). -- io: `AsyncReadExt::read_buf()`, `AsyncReadExt::write_buf()` for working with - `Buf`/`BufMut` ([#3003]). -- rt: `Runtime::spawn_blocking()` ([#2980]). -- sync: `watch::Sender::is_closed()` ([#2991]). - -[#2884]: https://github.com/tokio-rs/tokio/pull/2884 -[#2980]: https://github.com/tokio-rs/tokio/pull/2980 -[#2991]: https://github.com/tokio-rs/tokio/pull/2991 -[#3003]: https://github.com/tokio-rs/tokio/pull/3003 -[#3009]: https://github.com/tokio-rs/tokio/pull/3009 -[#3019]: https://github.com/tokio-rs/tokio/pull/3019 - -# 0.3.0 (October 15, 2020) - -This represents a 1.0 beta release. APIs are polished and future-proofed. APIs -not included for 1.0 stabilization have been removed. - -Biggest changes are: - -- I/O driver internal rewrite. The windows implementation includes significant - changes. -- Runtime API is polished, especially with how it interacts with feature flag - combinations. -- Feature flags are simplified - - `rt-core` and `rt-util` are combined to `rt` - - `rt-threaded` is renamed to `rt-multi-thread` to match builder API - - `tcp`, `udp`, `uds`, `dns` are combined to `net`. - - `parking_lot` is included with `full` - -### Changes - -- meta: Minimum supported Rust version is now 1.45. -- io: `AsyncRead` trait now takes `ReadBuf` in order to safely handle reading - into uninitialized memory ([#2758]). -- io: Internal I/O driver storage is now able to compact ([#2757]). -- rt: `Runtime::block_on` now takes `&self` ([#2782]). -- sync: `watch` reworked to decouple receiving a change notification from - receiving the value ([#2814], [#2806]). -- sync: `Notify::notify` is renamed to `notify_one` ([#2822]). -- process: `Child::kill` is now an `async fn` that cleans zombies ([#2823]). -- sync: use `const fn` constructors as possible ([#2833], [#2790]) -- signal: reduce cross-thread notification ([#2835]). -- net: tcp,udp,uds types support operations with `&self` ([#2828], [#2919], [#2934]). -- sync: blocking `mpsc` channel supports `send` with `&self` ([#2861]). -- time: rename `delay_for` and `delay_until` to `sleep` and `sleep_until` ([#2826]). -- io: upgrade to `mio` 0.7 ([#2893]). -- io: `AsyncSeek` trait is tweaked ([#2885]). -- fs: `File` operations take `&self` ([#2930]). -- rt: runtime API, and `#[tokio::main]` macro polish ([#2876]) -- rt: `Runtime::enter` uses an RAII guard instead of a closure ([#2954]). -- net: the `from_std` function on all sockets no longer sets socket into non-blocking mode ([#2893]) - -### Added - -- sync: `map` function to lock guards ([#2445]). -- sync: `blocking_recv` and `blocking_send` fns to `mpsc` for use outside of Tokio ([#2685]). -- rt: `Builder::thread_name_fn` for configuring thread names ([#1921]). -- fs: impl `FromRawFd` and `FromRawHandle` for `File` ([#2792]). -- process: `Child::wait` and `Child::try_wait` ([#2796]). -- rt: support configuring thread keep-alive duration ([#2809]). -- rt: `task::JoinHandle::abort` forcibly cancels a spawned task ([#2474]). -- sync: `RwLock` write guard to read guard downgrading ([#2733]). -- net: add `poll_*` functions that take `&self` to all net types ([#2845]) -- sync: `get_mut()` for `Mutex`, `RwLock` ([#2856]). -- sync: `mpsc::Sender::closed()` waits for `Receiver` half to close ([#2840]). -- sync: `mpsc::Sender::is_closed()` returns true if `Receiver` half is closed ([#2726]). -- stream: `iter` and `iter_mut` to `StreamMap` ([#2890]). -- net: implement `AsRawSocket` on windows ([#2911]). -- net: `TcpSocket` creates a socket without binding or listening ([#2920]). - -### Removed - -- io: vectored ops are removed from `AsyncRead`, `AsyncWrite` traits ([#2882]). -- io: `mio` is removed from the public API. `PollEvented` and` Registration` are - removed ([#2893]). -- io: remove `bytes` from public API. `Buf` and `BufMut` implementation are - removed ([#2908]). -- time: `DelayQueue` is moved to `tokio-util` ([#2897]). - -### Fixed - -- io: `stdout` and `stderr` buffering on windows ([#2734]). - -[#1921]: https://github.com/tokio-rs/tokio/pull/1921 -[#2445]: https://github.com/tokio-rs/tokio/pull/2445 -[#2474]: https://github.com/tokio-rs/tokio/pull/2474 -[#2685]: https://github.com/tokio-rs/tokio/pull/2685 -[#2726]: https://github.com/tokio-rs/tokio/pull/2726 -[#2733]: https://github.com/tokio-rs/tokio/pull/2733 -[#2734]: https://github.com/tokio-rs/tokio/pull/2734 -[#2757]: https://github.com/tokio-rs/tokio/pull/2757 -[#2758]: https://github.com/tokio-rs/tokio/pull/2758 -[#2782]: https://github.com/tokio-rs/tokio/pull/2782 -[#2790]: https://github.com/tokio-rs/tokio/pull/2790 -[#2792]: https://github.com/tokio-rs/tokio/pull/2792 -[#2796]: https://github.com/tokio-rs/tokio/pull/2796 -[#2806]: https://github.com/tokio-rs/tokio/pull/2806 -[#2809]: https://github.com/tokio-rs/tokio/pull/2809 -[#2814]: https://github.com/tokio-rs/tokio/pull/2814 -[#2822]: https://github.com/tokio-rs/tokio/pull/2822 -[#2823]: https://github.com/tokio-rs/tokio/pull/2823 -[#2826]: https://github.com/tokio-rs/tokio/pull/2826 -[#2828]: https://github.com/tokio-rs/tokio/pull/2828 -[#2833]: https://github.com/tokio-rs/tokio/pull/2833 -[#2835]: https://github.com/tokio-rs/tokio/pull/2835 -[#2840]: https://github.com/tokio-rs/tokio/pull/2840 -[#2845]: https://github.com/tokio-rs/tokio/pull/2845 -[#2856]: https://github.com/tokio-rs/tokio/pull/2856 -[#2861]: https://github.com/tokio-rs/tokio/pull/2861 -[#2876]: https://github.com/tokio-rs/tokio/pull/2876 -[#2882]: https://github.com/tokio-rs/tokio/pull/2882 -[#2885]: https://github.com/tokio-rs/tokio/pull/2885 -[#2890]: https://github.com/tokio-rs/tokio/pull/2890 -[#2893]: https://github.com/tokio-rs/tokio/pull/2893 -[#2897]: https://github.com/tokio-rs/tokio/pull/2897 -[#2908]: https://github.com/tokio-rs/tokio/pull/2908 -[#2911]: https://github.com/tokio-rs/tokio/pull/2911 -[#2919]: https://github.com/tokio-rs/tokio/pull/2919 -[#2920]: https://github.com/tokio-rs/tokio/pull/2920 -[#2930]: https://github.com/tokio-rs/tokio/pull/2930 -[#2934]: https://github.com/tokio-rs/tokio/pull/2934 -[#2954]: https://github.com/tokio-rs/tokio/pull/2954 - -# 0.2.22 (July 21, 2020) - -### Fixes - -- docs: misc improvements ([#2572], [#2658], [#2663], [#2656], [#2647], [#2630], [#2487], [#2621], - [#2624], [#2600], [#2623], [#2622], [#2577], [#2569], [#2589], [#2575], [#2540], [#2564], [#2567], - [#2520], [#2521], [#2493]) -- rt: allow calls to `block_on` inside calls to `block_in_place` that are - themselves inside `block_on` ([#2645]) -- net: fix non-portable behavior when dropping `TcpStream` `OwnedWriteHalf` ([#2597]) -- io: improve stack usage by allocating large buffers on directly on the heap - ([#2634]) -- io: fix unsound pin projection in `AsyncReadExt::read_buf` and - `AsyncWriteExt::write_buf` ([#2612]) -- io: fix unnecessary zeroing for `AsyncRead` implementors ([#2525]) -- io: Fix `BufReader` not correctly forwarding `poll_write_buf` ([#2654]) -- io: fix panic in `AsyncReadExt::read_line` ([#2541]) - -### Changes - -- coop: returning `Poll::Pending` no longer decrements the task budget ([#2549]) - -### Added - -- io: little-endian variants of `AsyncReadExt` and `AsyncWriteExt` methods - ([#1915]) -- task: add [`tracing`] instrumentation to spawned tasks ([#2655]) -- sync: allow unsized types in `Mutex` and `RwLock` (via `default` constructors) - ([#2615]) -- net: add `ToSocketAddrs` implementation for `&[SocketAddr]` ([#2604]) -- fs: add `OpenOptionsExt` for `OpenOptions` ([#2515]) -- fs: add `DirBuilder` ([#2524]) - -[`tracing`]: https://crates.io/crates/tracing -[#1915]: https://github.com/tokio-rs/tokio/pull/1915 -[#2487]: https://github.com/tokio-rs/tokio/pull/2487 -[#2493]: https://github.com/tokio-rs/tokio/pull/2493 -[#2515]: https://github.com/tokio-rs/tokio/pull/2515 -[#2520]: https://github.com/tokio-rs/tokio/pull/2520 -[#2521]: https://github.com/tokio-rs/tokio/pull/2521 -[#2524]: https://github.com/tokio-rs/tokio/pull/2524 -[#2525]: https://github.com/tokio-rs/tokio/pull/2525 -[#2540]: https://github.com/tokio-rs/tokio/pull/2540 -[#2541]: https://github.com/tokio-rs/tokio/pull/2541 -[#2549]: https://github.com/tokio-rs/tokio/pull/2549 -[#2564]: https://github.com/tokio-rs/tokio/pull/2564 -[#2567]: https://github.com/tokio-rs/tokio/pull/2567 -[#2569]: https://github.com/tokio-rs/tokio/pull/2569 -[#2572]: https://github.com/tokio-rs/tokio/pull/2572 -[#2575]: https://github.com/tokio-rs/tokio/pull/2575 -[#2577]: https://github.com/tokio-rs/tokio/pull/2577 -[#2589]: https://github.com/tokio-rs/tokio/pull/2589 -[#2597]: https://github.com/tokio-rs/tokio/pull/2597 -[#2600]: https://github.com/tokio-rs/tokio/pull/2600 -[#2604]: https://github.com/tokio-rs/tokio/pull/2604 -[#2612]: https://github.com/tokio-rs/tokio/pull/2612 -[#2615]: https://github.com/tokio-rs/tokio/pull/2615 -[#2621]: https://github.com/tokio-rs/tokio/pull/2621 -[#2622]: https://github.com/tokio-rs/tokio/pull/2622 -[#2623]: https://github.com/tokio-rs/tokio/pull/2623 -[#2624]: https://github.com/tokio-rs/tokio/pull/2624 -[#2630]: https://github.com/tokio-rs/tokio/pull/2630 -[#2634]: https://github.com/tokio-rs/tokio/pull/2634 -[#2645]: https://github.com/tokio-rs/tokio/pull/2645 -[#2647]: https://github.com/tokio-rs/tokio/pull/2647 -[#2654]: https://github.com/tokio-rs/tokio/pull/2654 -[#2655]: https://github.com/tokio-rs/tokio/pull/2655 -[#2656]: https://github.com/tokio-rs/tokio/pull/2656 -[#2658]: https://github.com/tokio-rs/tokio/pull/2658 -[#2663]: https://github.com/tokio-rs/tokio/pull/2663 - -# 0.2.21 (May 13, 2020) - -### Fixes - -- macros: disambiguate built-in `#[test]` attribute in macro expansion ([#2503]) -- rt: `LocalSet` and task budgeting ([#2462]). -- rt: task budgeting with `block_in_place` ([#2502]). -- sync: release `broadcast` channel memory without sending a value ([#2509]). -- time: notify when resetting a `Delay` to a time in the past ([#2290]) - -### Added - -- io: `get_mut`, `get_ref`, and `into_inner` to `Lines` ([#2450]). -- io: `mio::Ready` argument to `PollEvented` ([#2419]). -- os: illumos support ([#2486]). -- rt: `Handle::spawn_blocking` ([#2501]). -- sync: `OwnedMutexGuard` for `Arc>` ([#2455]). - -[#2290]: https://github.com/tokio-rs/tokio/pull/2290 -[#2419]: https://github.com/tokio-rs/tokio/pull/2419 -[#2450]: https://github.com/tokio-rs/tokio/pull/2450 -[#2455]: https://github.com/tokio-rs/tokio/pull/2455 -[#2462]: https://github.com/tokio-rs/tokio/pull/2462 -[#2486]: https://github.com/tokio-rs/tokio/pull/2486 -[#2501]: https://github.com/tokio-rs/tokio/pull/2501 -[#2502]: https://github.com/tokio-rs/tokio/pull/2502 -[#2503]: https://github.com/tokio-rs/tokio/pull/2503 -[#2509]: https://github.com/tokio-rs/tokio/pull/2509 - -# 0.2.20 (April 28, 2020) - -### Fixes - -- sync: `broadcast` closing the channel no longer requires capacity ([#2448]). -- rt: regression when configuring runtime with `max_threads` less than number of CPUs ([#2457]). - -[#2448]: https://github.com/tokio-rs/tokio/pull/2448 -[#2457]: https://github.com/tokio-rs/tokio/pull/2457 - -# 0.2.19 (April 24, 2020) - -### Fixes - -- docs: misc improvements ([#2400], [#2405], [#2414], [#2420], [#2423], [#2426], [#2427], [#2434], [#2436], [#2440]). -- rt: support `block_in_place` in more contexts ([#2409], [#2410]). -- stream: no panic in `merge()` and `chain()` when using `size_hint()` ([#2430]). -- task: include visibility modifier when defining a task-local ([#2416]). - -### Added - -- rt: `runtime::Handle::block_on` ([#2437]). -- sync: owned `Semaphore` permit ([#2421]). -- tcp: owned split ([#2270]). - -[#2270]: https://github.com/tokio-rs/tokio/pull/2270 -[#2400]: https://github.com/tokio-rs/tokio/pull/2400 -[#2405]: https://github.com/tokio-rs/tokio/pull/2405 -[#2409]: https://github.com/tokio-rs/tokio/pull/2409 -[#2410]: https://github.com/tokio-rs/tokio/pull/2410 -[#2414]: https://github.com/tokio-rs/tokio/pull/2414 -[#2416]: https://github.com/tokio-rs/tokio/pull/2416 -[#2420]: https://github.com/tokio-rs/tokio/pull/2420 -[#2421]: https://github.com/tokio-rs/tokio/pull/2421 -[#2423]: https://github.com/tokio-rs/tokio/pull/2423 -[#2426]: https://github.com/tokio-rs/tokio/pull/2426 -[#2427]: https://github.com/tokio-rs/tokio/pull/2427 -[#2430]: https://github.com/tokio-rs/tokio/pull/2430 -[#2434]: https://github.com/tokio-rs/tokio/pull/2434 -[#2436]: https://github.com/tokio-rs/tokio/pull/2436 -[#2437]: https://github.com/tokio-rs/tokio/pull/2437 -[#2440]: https://github.com/tokio-rs/tokio/pull/2440 - -# 0.2.18 (April 12, 2020) - -### Fixes - -- task: `LocalSet` was incorrectly marked as `Send` ([#2398]) -- io: correctly report `WriteZero` failure in `write_int` ([#2334]) - -[#2334]: https://github.com/tokio-rs/tokio/pull/2334 -[#2398]: https://github.com/tokio-rs/tokio/pull/2398 - -# 0.2.17 (April 9, 2020) - -### Fixes - -- rt: bug in work-stealing queue ([#2387]) - -### Changes - -- rt: threadpool uses logical CPU count instead of physical by default ([#2391]) - -[#2387]: https://github.com/tokio-rs/tokio/pull/2387 -[#2391]: https://github.com/tokio-rs/tokio/pull/2391 - -# 0.2.16 (April 3, 2020) - -### Fixes - -- sync: fix a regression where `Mutex`, `Semaphore`, and `RwLock` futures no - longer implement `Sync` ([#2375]) -- fs: fix `fs::copy` not copying file permissions ([#2354]) - -### Added - -- time: added `deadline` method to `delay_queue::Expired` ([#2300]) -- io: added `StreamReader` ([#2052]) - -[#2052]: https://github.com/tokio-rs/tokio/pull/2052 -[#2300]: https://github.com/tokio-rs/tokio/pull/2300 -[#2354]: https://github.com/tokio-rs/tokio/pull/2354 -[#2375]: https://github.com/tokio-rs/tokio/pull/2375 - -# 0.2.15 (April 2, 2020) - -### Fixes - -- rt: fix queue regression ([#2362]). - -### Added - -- sync: Add disarm to `mpsc::Sender` ([#2358]). - -[#2358]: https://github.com/tokio-rs/tokio/pull/2358 -[#2362]: https://github.com/tokio-rs/tokio/pull/2362 - -# 0.2.14 (April 1, 2020) - -### Fixes - -- rt: concurrency bug in scheduler ([#2273]). -- rt: concurrency bug with shell runtime ([#2333]). -- test-util: correct pause/resume of time ([#2253]). -- time: `DelayQueue` correct wakeup after `insert` ([#2285]). - -### Added - -- io: impl `RawFd`, `AsRawHandle` for std io types ([#2335]). -- rt: automatic cooperative task yielding ([#2160], [#2343], [#2349]). -- sync: `RwLock::into_inner` ([#2321]). - -### Changed - -- sync: semaphore, mutex internals rewritten to avoid allocations ([#2325]). - -[#2160]: https://github.com/tokio-rs/tokio/pull/2160 -[#2253]: https://github.com/tokio-rs/tokio/pull/2253 -[#2273]: https://github.com/tokio-rs/tokio/pull/2273 -[#2285]: https://github.com/tokio-rs/tokio/pull/2285 -[#2321]: https://github.com/tokio-rs/tokio/pull/2321 -[#2325]: https://github.com/tokio-rs/tokio/pull/2325 -[#2333]: https://github.com/tokio-rs/tokio/pull/2333 -[#2335]: https://github.com/tokio-rs/tokio/pull/2335 -[#2343]: https://github.com/tokio-rs/tokio/pull/2343 -[#2349]: https://github.com/tokio-rs/tokio/pull/2349 - -# 0.2.13 (February 28, 2020) - -### Fixes - -- macros: unresolved import in `pin!` ([#2281]). - -[#2281]: https://github.com/tokio-rs/tokio/pull/2281 - -# 0.2.12 (February 27, 2020) - -### Fixes - -- net: `UnixStream::poll_shutdown` should call `shutdown(Write)` ([#2245]). -- process: Wake up read and write on `EPOLLERR` ([#2218]). -- rt: potential deadlock when using `block_in_place` and shutting down the - runtime ([#2119]). -- rt: only detect number of CPUs if `core_threads` not specified ([#2238]). -- sync: reduce `watch::Receiver` struct size ([#2191]). -- time: succeed when setting delay of `$MAX-1` ([#2184]). -- time: avoid having to poll `DelayQueue` after inserting new delay ([#2217]). - -### Added - -- macros: `pin!` variant that assigns to identifier and pins ([#2274]). -- net: impl `Stream` for `Listener` types ([#2275]). -- rt: `Runtime::shutdown_timeout` waits for runtime to shutdown for specified - duration ([#2186]). -- stream: `StreamMap` merges streams and can insert / remove streams at - runtime ([#2185]). -- stream: `StreamExt::skip()` skips a fixed number of items ([#2204]). -- stream: `StreamExt::skip_while()` skips items based on a predicate ([#2205]). -- sync: `Notify` provides basic `async` / `await` task notification ([#2210]). -- sync: `Mutex::into_inner` retrieves guarded data ([#2250]). -- sync: `mpsc::Sender::send_timeout` sends, waiting for up to specified duration - for channel capacity ([#2227]). -- time: impl `Ord` and `Hash` for `Instant` ([#2239]). - -[#2119]: https://github.com/tokio-rs/tokio/pull/2119 -[#2184]: https://github.com/tokio-rs/tokio/pull/2184 -[#2185]: https://github.com/tokio-rs/tokio/pull/2185 -[#2186]: https://github.com/tokio-rs/tokio/pull/2186 -[#2191]: https://github.com/tokio-rs/tokio/pull/2191 -[#2204]: https://github.com/tokio-rs/tokio/pull/2204 -[#2205]: https://github.com/tokio-rs/tokio/pull/2205 -[#2210]: https://github.com/tokio-rs/tokio/pull/2210 -[#2217]: https://github.com/tokio-rs/tokio/pull/2217 -[#2218]: https://github.com/tokio-rs/tokio/pull/2218 -[#2227]: https://github.com/tokio-rs/tokio/pull/2227 -[#2238]: https://github.com/tokio-rs/tokio/pull/2238 -[#2239]: https://github.com/tokio-rs/tokio/pull/2239 -[#2245]: https://github.com/tokio-rs/tokio/pull/2245 -[#2250]: https://github.com/tokio-rs/tokio/pull/2250 -[#2274]: https://github.com/tokio-rs/tokio/pull/2274 -[#2275]: https://github.com/tokio-rs/tokio/pull/2275 - -# 0.2.11 (January 27, 2020) - -### Fixes - -- docs: misc fixes and tweaks ([#2155], [#2103], [#2027], [#2167], [#2175]). -- macros: handle generics in `#[tokio::main]` method ([#2177]). -- sync: `broadcast` potential lost notifications ([#2135]). -- rt: improve "no runtime" panic messages ([#2145]). - -### Added - -- optional support for using `parking_lot` internally ([#2164]). -- fs: `fs::copy`, an async version of `std::fs::copy` ([#2079]). -- macros: `select!` waits for the first branch to complete ([#2152]). -- macros: `join!` waits for all branches to complete ([#2158]). -- macros: `try_join!` waits for all branches to complete or the first error ([#2169]). -- macros: `pin!` pins a value to the stack ([#2163]). -- net: `ReadHalf::poll()` and `ReadHalf::poll_peak` ([#2151]) -- stream: `StreamExt::timeout()` sets a per-item max duration ([#2149]). -- stream: `StreamExt::fold()` applies a function, producing a single value. ([#2122]). -- sync: impl `Eq`, `PartialEq` for `oneshot::RecvError` ([#2168]). -- task: methods for inspecting the `JoinError` cause ([#2051]). - -[#2027]: https://github.com/tokio-rs/tokio/pull/2027 -[#2051]: https://github.com/tokio-rs/tokio/pull/2051 -[#2079]: https://github.com/tokio-rs/tokio/pull/2079 -[#2103]: https://github.com/tokio-rs/tokio/pull/2103 -[#2122]: https://github.com/tokio-rs/tokio/pull/2122 -[#2135]: https://github.com/tokio-rs/tokio/pull/2135 -[#2145]: https://github.com/tokio-rs/tokio/pull/2145 -[#2149]: https://github.com/tokio-rs/tokio/pull/2149 -[#2151]: https://github.com/tokio-rs/tokio/pull/2151 -[#2152]: https://github.com/tokio-rs/tokio/pull/2152 -[#2155]: https://github.com/tokio-rs/tokio/pull/2155 -[#2158]: https://github.com/tokio-rs/tokio/pull/2158 -[#2163]: https://github.com/tokio-rs/tokio/pull/2163 -[#2164]: https://github.com/tokio-rs/tokio/pull/2164 -[#2167]: https://github.com/tokio-rs/tokio/pull/2167 -[#2168]: https://github.com/tokio-rs/tokio/pull/2168 -[#2169]: https://github.com/tokio-rs/tokio/pull/2169 -[#2175]: https://github.com/tokio-rs/tokio/pull/2175 -[#2177]: https://github.com/tokio-rs/tokio/pull/2177 - -# 0.2.10 (January 21, 2020) - -### Fixes - -- `#[tokio::main]` when `rt-core` feature flag is not enabled ([#2139]). -- remove `AsyncBufRead` from `BufStream` impl block ([#2108]). -- potential undefined behavior when implementing `AsyncRead` incorrectly ([#2030]). - -### Added - -- `BufStream::with_capacity` ([#2125]). -- impl `From` and `Default` for `RwLock` ([#2089]). -- `io::ReadHalf::is_pair_of` checks if provided `WriteHalf` is for the same - underlying object ([#1762], [#2144]). -- `runtime::Handle::try_current()` returns a handle to the current runtime ([#2118]). -- `stream::empty()` returns an immediately ready empty stream ([#2092]). -- `stream::once(val)` returns a stream that yields a single value: `val` ([#2094]). -- `stream::pending()` returns a stream that never becomes ready ([#2092]). -- `StreamExt::chain()` sequences a second stream after the first completes ([#2093]). -- `StreamExt::collect()` transform a stream into a collection ([#2109]). -- `StreamExt::fuse` ends the stream after the first `None` ([#2085]). -- `StreamExt::merge` combines two streams, yielding values as they become ready ([#2091]). -- Task-local storage ([#2126]). - -[#1762]: https://github.com/tokio-rs/tokio/pull/1762 -[#2030]: https://github.com/tokio-rs/tokio/pull/2030 -[#2085]: https://github.com/tokio-rs/tokio/pull/2085 -[#2089]: https://github.com/tokio-rs/tokio/pull/2089 -[#2091]: https://github.com/tokio-rs/tokio/pull/2091 -[#2092]: https://github.com/tokio-rs/tokio/pull/2092 -[#2093]: https://github.com/tokio-rs/tokio/pull/2093 -[#2094]: https://github.com/tokio-rs/tokio/pull/2094 -[#2108]: https://github.com/tokio-rs/tokio/pull/2108 -[#2109]: https://github.com/tokio-rs/tokio/pull/2109 -[#2118]: https://github.com/tokio-rs/tokio/pull/2118 -[#2125]: https://github.com/tokio-rs/tokio/pull/2125 -[#2126]: https://github.com/tokio-rs/tokio/pull/2126 -[#2139]: https://github.com/tokio-rs/tokio/pull/2139 -[#2144]: https://github.com/tokio-rs/tokio/pull/2144 - -# 0.2.9 (January 9, 2020) - -### Fixes - -- `AsyncSeek` impl for `File` ([#1986]). -- rt: shutdown deadlock in `threaded_scheduler` ([#2074], [#2082]). -- rt: memory ordering when dropping `JoinHandle` ([#2044]). -- docs: misc API documentation fixes and improvements. - -[#1986]: https://github.com/tokio-rs/tokio/pull/1986 -[#2044]: https://github.com/tokio-rs/tokio/pull/2044 -[#2074]: https://github.com/tokio-rs/tokio/pull/2074 -[#2082]: https://github.com/tokio-rs/tokio/pull/2082 - -# 0.2.8 (January 7, 2020) - -### Fixes - -- depend on new version of `tokio-macros`. - -# 0.2.7 (January 7, 2020) - -### Fixes - -- potential deadlock when dropping `basic_scheduler` Runtime. -- calling `spawn_blocking` from within a `spawn_blocking` ([#2006]). -- storing a `Runtime` instance in a thread-local ([#2011]). -- miscellaneous documentation fixes. -- rt: fix `Waker::will_wake` to return true when tasks match ([#2045]). -- test-util: `time::advance` runs pending tasks before changing the time ([#2059]). - -### Added - -- `net::lookup_host` maps a `T: ToSocketAddrs` to a stream of `SocketAddrs` ([#1870]). -- `process::Child` fields are made public to match `std` ([#2014]). -- impl `Stream` for `sync::broadcast::Receiver` ([#2012]). -- `sync::RwLock` provides an asynchronous read-write lock ([#1699]). -- `runtime::Handle::current` returns the handle for the current runtime ([#2040]). -- `StreamExt::filter` filters stream values according to a predicate ([#2001]). -- `StreamExt::filter_map` simultaneously filter and map stream values ([#2001]). -- `StreamExt::try_next` convenience for streams of `Result` ([#2005]). -- `StreamExt::take` limits a stream to a specified number of values ([#2025]). -- `StreamExt::take_while` limits a stream based on a predicate ([#2029]). -- `StreamExt::all` tests if every element of the stream matches a predicate ([#2035]). -- `StreamExt::any` tests if any element of the stream matches a predicate ([#2034]). -- `task::LocalSet.await` runs spawned tasks until the set is idle ([#1971]). -- `time::DelayQueue::len` returns the number entries in the queue ([#1755]). -- expose runtime options from the `#[tokio::main]` and `#[tokio::test]` ([#2022]). - -[#1699]: https://github.com/tokio-rs/tokio/pull/1699 -[#1755]: https://github.com/tokio-rs/tokio/pull/1755 -[#1870]: https://github.com/tokio-rs/tokio/pull/1870 -[#1971]: https://github.com/tokio-rs/tokio/pull/1971 -[#2001]: https://github.com/tokio-rs/tokio/pull/2001 -[#2005]: https://github.com/tokio-rs/tokio/pull/2005 -[#2006]: https://github.com/tokio-rs/tokio/pull/2006 -[#2011]: https://github.com/tokio-rs/tokio/pull/2011 -[#2012]: https://github.com/tokio-rs/tokio/pull/2012 -[#2014]: https://github.com/tokio-rs/tokio/pull/2014 -[#2022]: https://github.com/tokio-rs/tokio/pull/2022 -[#2025]: https://github.com/tokio-rs/tokio/pull/2025 -[#2029]: https://github.com/tokio-rs/tokio/pull/2029 -[#2034]: https://github.com/tokio-rs/tokio/pull/2034 -[#2035]: https://github.com/tokio-rs/tokio/pull/2035 -[#2040]: https://github.com/tokio-rs/tokio/pull/2040 -[#2045]: https://github.com/tokio-rs/tokio/pull/2045 -[#2059]: https://github.com/tokio-rs/tokio/pull/2059 - -# 0.2.6 (December 19, 2019) - -### Fixes - -- `fs::File::seek` API regression ([#1991]). - -[#1991]: https://github.com/tokio-rs/tokio/pull/1991 - -# 0.2.5 (December 18, 2019) - -### Added - -- `io::AsyncSeek` trait ([#1924]). -- `Mutex::try_lock` ([#1939]) -- `mpsc::Receiver::try_recv` and `mpsc::UnboundedReceiver::try_recv` ([#1939]). -- `writev` support for `TcpStream` ([#1956]). -- `time::throttle` for throttling streams ([#1949]). -- implement `Stream` for `time::DelayQueue` ([#1975]). -- `sync::broadcast` provides a fan-out channel ([#1943]). -- `sync::Semaphore` provides an async semaphore ([#1973]). -- `stream::StreamExt` provides stream utilities ([#1962]). - -### Fixes - -- deadlock risk while shutting down the runtime ([#1972]). -- panic while shutting down the runtime ([#1978]). -- `sync::MutexGuard` debug output ([#1961]). -- misc doc improvements ([#1933], [#1934], [#1940], [#1942]). - -### Changes - -- runtime threads are configured with `runtime::Builder::core_threads` and - `runtime::Builder::max_threads`. `runtime::Builder::num_threads` is - deprecated ([#1977]). - -[#1924]: https://github.com/tokio-rs/tokio/pull/1924 -[#1933]: https://github.com/tokio-rs/tokio/pull/1933 -[#1934]: https://github.com/tokio-rs/tokio/pull/1934 -[#1939]: https://github.com/tokio-rs/tokio/pull/1939 -[#1940]: https://github.com/tokio-rs/tokio/pull/1940 -[#1942]: https://github.com/tokio-rs/tokio/pull/1942 -[#1943]: https://github.com/tokio-rs/tokio/pull/1943 -[#1949]: https://github.com/tokio-rs/tokio/pull/1949 -[#1956]: https://github.com/tokio-rs/tokio/pull/1956 -[#1961]: https://github.com/tokio-rs/tokio/pull/1961 -[#1962]: https://github.com/tokio-rs/tokio/pull/1962 -[#1972]: https://github.com/tokio-rs/tokio/pull/1972 -[#1973]: https://github.com/tokio-rs/tokio/pull/1973 -[#1975]: https://github.com/tokio-rs/tokio/pull/1975 -[#1977]: https://github.com/tokio-rs/tokio/pull/1977 -[#1978]: https://github.com/tokio-rs/tokio/pull/1978 - -# 0.2.4 (December 6, 2019) - -### Fixes - -- `sync::Mutex` deadlock when `lock()` future is dropped early ([#1898]). - -[#1898]: https://github.com/tokio-rs/tokio/pull/1898 - -# 0.2.3 (December 6, 2019) - -### Added - -- read / write integers using `AsyncReadExt` and `AsyncWriteExt` ([#1863]). -- `read_buf` / `write_buf` for reading / writing `Buf` / `BufMut` ([#1881]). -- `TcpStream::poll_peek` - pollable API for performing TCP peek ([#1864]). -- `sync::oneshot::error::TryRecvError` provides variants to detect the error - kind ([#1874]). -- `LocalSet::block_on` accepts `!'static` task ([#1882]). -- `task::JoinError` is now `Sync` ([#1888]). -- impl conversions between `tokio::time::Instant` and - `std::time::Instant` ([#1904]). - -### Fixes - -- calling `spawn_blocking` after runtime shutdown ([#1875]). -- `LocalSet` drop infinite loop ([#1892]). -- `LocalSet` hang under load ([#1905]). -- improved documentation ([#1865], [#1866], [#1868], [#1874], [#1876], [#1911]). - -[#1863]: https://github.com/tokio-rs/tokio/pull/1863 -[#1864]: https://github.com/tokio-rs/tokio/pull/1864 -[#1865]: https://github.com/tokio-rs/tokio/pull/1865 -[#1866]: https://github.com/tokio-rs/tokio/pull/1866 -[#1868]: https://github.com/tokio-rs/tokio/pull/1868 -[#1874]: https://github.com/tokio-rs/tokio/pull/1874 -[#1875]: https://github.com/tokio-rs/tokio/pull/1875 -[#1876]: https://github.com/tokio-rs/tokio/pull/1876 -[#1881]: https://github.com/tokio-rs/tokio/pull/1881 -[#1882]: https://github.com/tokio-rs/tokio/pull/1882 -[#1888]: https://github.com/tokio-rs/tokio/pull/1888 -[#1892]: https://github.com/tokio-rs/tokio/pull/1892 -[#1904]: https://github.com/tokio-rs/tokio/pull/1904 -[#1905]: https://github.com/tokio-rs/tokio/pull/1905 -[#1911]: https://github.com/tokio-rs/tokio/pull/1911 - -# 0.2.2 (November 29, 2019) - -### Fixes - -- scheduling with `basic_scheduler` ([#1861]). -- update `spawn` panic message to specify that a task scheduler is required ([#1839]). -- API docs example for `runtime::Builder` to include a task scheduler ([#1841]). -- general documentation ([#1834]). -- building on illumos/solaris ([#1772]). -- panic when dropping `LocalSet` ([#1843]). -- API docs mention the required Cargo features for `Builder::{basic, threaded}_scheduler` ([#1858]). - -### Added - -- impl `Stream` for `signal::unix::Signal` ([#1849]). -- API docs for platform specific behavior of `signal::ctrl_c` and `signal::unix::Signal` ([#1854]). -- API docs for `signal::unix::Signal::{recv, poll_recv}` and `signal::windows::CtrlBreak::{recv, poll_recv}` ([#1854]). -- `File::into_std` and `File::try_into_std` methods ([#1856]). - -[#1772]: https://github.com/tokio-rs/tokio/pull/1772 -[#1834]: https://github.com/tokio-rs/tokio/pull/1834 -[#1839]: https://github.com/tokio-rs/tokio/pull/1839 -[#1841]: https://github.com/tokio-rs/tokio/pull/1841 -[#1843]: https://github.com/tokio-rs/tokio/pull/1843 -[#1849]: https://github.com/tokio-rs/tokio/pull/1849 -[#1854]: https://github.com/tokio-rs/tokio/pull/1854 -[#1856]: https://github.com/tokio-rs/tokio/pull/1856 -[#1858]: https://github.com/tokio-rs/tokio/pull/1858 -[#1861]: https://github.com/tokio-rs/tokio/pull/1861 - -# 0.2.1 (November 26, 2019) - -### Fixes - -- API docs for `TcpListener::incoming`, `UnixListener::incoming` ([#1831]). - -### Added - -- `tokio::task::LocalSet` provides a strategy for spawning `!Send` tasks ([#1733]). -- export `tokio::time::Elapsed` ([#1826]). -- impl `AsRawFd`, `AsRawHandle` for `tokio::fs::File` ([#1827]). - -[#1733]: https://github.com/tokio-rs/tokio/pull/1733 -[#1826]: https://github.com/tokio-rs/tokio/pull/1826 -[#1827]: https://github.com/tokio-rs/tokio/pull/1827 -[#1831]: https://github.com/tokio-rs/tokio/pull/1831 - -# 0.2.0 (November 26, 2019) - -A major breaking change. Most implementation and APIs have changed one way or -another. This changelog entry contains a highlight - -### Changed - -- APIs are updated to use `async / await`. -- most `tokio-*` crates are collapsed into this crate. -- Scheduler is rewritten. -- `tokio::spawn` returns a `JoinHandle`. -- A single I/O / timer is used per runtime. -- I/O driver uses a concurrent slab for allocating state. -- components are made available via feature flag. -- Use `bytes` 0.5 -- `tokio::codec` is moved to `tokio-util`. - -### Removed - -- Standalone `timer` and `net` drivers are removed, use `Runtime` instead -- `current_thread` runtime is removed, use `tokio::runtime::Runtime` with - `basic_scheduler` instead. - -# 0.1.21 (May 30, 2019) - -### Changed - -- Bump `tokio-trace-core` version to 0.2 ([#1111]). - -[#1111]: https://github.com/tokio-rs/tokio/pull/1111 - -# 0.1.20 (May 14, 2019) - -### Added - -- `tokio::runtime::Builder::panic_handler` allows configuring handling - panics on the runtime ([#1055]). - -[#1055]: https://github.com/tokio-rs/tokio/pull/1055 - -# 0.1.19 (April 22, 2019) - -### Added - -- Re-export `tokio::sync::Mutex` primitive ([#964]). - -[#964]: https://github.com/tokio-rs/tokio/pull/964 - -# 0.1.18 (March 22, 2019) - -### Added - -- `TypedExecutor` re-export and implementations ([#993]). - -[#993]: https://github.com/tokio-rs/tokio/pull/993 - -# 0.1.17 (March 13, 2019) - -### Added - -- Propagate trace subscriber in the runtime ([#966]). - -[#966]: https://github.com/tokio-rs/tokio/pull/966 - -# 0.1.16 (March 1, 2019) - -### Fixed - -- async-await: track latest nightly changes ([#940]). - -### Added - -- `sync::Watch`, a single value broadcast channel ([#922]). -- Async equivalent of read / write file helpers being added to `std` ([#896]). - -[#896]: https://github.com/tokio-rs/tokio/pull/896 -[#922]: https://github.com/tokio-rs/tokio/pull/922 -[#940]: https://github.com/tokio-rs/tokio/pull/940 - -# 0.1.15 (January 24, 2019) - -### Added - -- Re-export tokio-sync APIs ([#839]). -- Stream enumerate combinator ([#832]). - -[#832]: https://github.com/tokio-rs/tokio/pull/832 -[#839]: https://github.com/tokio-rs/tokio/pull/839 - -# 0.1.14 (January 6, 2019) - -- Use feature flags to break up the crate, allowing users to pick & choose - components ([#808]). -- Export `UnixDatagram` and `UnixDatagramFramed` ([#772]). - -[#772]: https://github.com/tokio-rs/tokio/pull/772 -[#808]: https://github.com/tokio-rs/tokio/pull/808 - -# 0.1.13 (November 21, 2018) - -- Fix `Runtime::reactor()` when no tasks are spawned ([#721]). -- `runtime::Builder` no longer uses deprecated methods ([#749]). -- Provide `after_start` and `before_stop` configuration settings for - `Runtime` ([#756]). -- Implement throttle stream combinator ([#736]). - -[#721]: https://github.com/tokio-rs/tokio/pull/721 -[#736]: https://github.com/tokio-rs/tokio/pull/736 -[#749]: https://github.com/tokio-rs/tokio/pull/749 -[#756]: https://github.com/tokio-rs/tokio/pull/756 - -# 0.1.12 (October 23, 2018) - -- runtime: expose `keep_alive` on runtime builder ([#676]). -- runtime: create a reactor per worker thread ([#660]). -- codec: fix panic in `LengthDelimitedCodec` ([#682]). -- io: re-export `tokio_io::io::read` function ([#689]). -- runtime: check for executor re-entry in more places ([#708]). - -[#660]: https://github.com/tokio-rs/tokio/pull/660 -[#676]: https://github.com/tokio-rs/tokio/pull/676 -[#682]: https://github.com/tokio-rs/tokio/pull/682 -[#689]: https://github.com/tokio-rs/tokio/pull/689 -[#708]: https://github.com/tokio-rs/tokio/pull/708 - -# 0.1.11 (September 28, 2018) - -- Fix `tokio-async-await` dependency ([#675]). - -[#675]: https://github.com/tokio-rs/tokio/pull/675 - -# 0.1.10 (September 27, 2018) - -- Fix minimal versions - -# 0.1.9 (September 27, 2018) - -- Experimental async/await improvements ([#661]). -- Re-export `TaskExecutor` from `tokio-current-thread` ([#652]). -- Improve `Runtime` builder API ([#645]). -- `tokio::run` panics when called from the context of an executor - ([#646]). -- Introduce `StreamExt` with a `timeout` helper ([#573]). -- Move `length_delimited` into `tokio` ([#575]). -- Re-organize `tokio::net` module ([#548]). -- Re-export `tokio-current-thread::spawn` in current_thread runtime - ([#579]). - -[#548]: https://github.com/tokio-rs/tokio/pull/548 -[#573]: https://github.com/tokio-rs/tokio/pull/573 -[#575]: https://github.com/tokio-rs/tokio/pull/575 -[#579]: https://github.com/tokio-rs/tokio/pull/579 -[#645]: https://github.com/tokio-rs/tokio/pull/645 -[#646]: https://github.com/tokio-rs/tokio/pull/646 -[#652]: https://github.com/tokio-rs/tokio/pull/652 -[#661]: https://github.com/tokio-rs/tokio/pull/661 - -# 0.1.8 (August 23, 2018) - -- Extract tokio::executor::current_thread to a sub crate ([#370]) -- Add `Runtime::block_on` ([#398]) -- Add `runtime::current_thread::block_on_all` ([#477]) -- Misc documentation improvements ([#450]) -- Implement `std::error::Error` for error types ([#501]) - -[#370]: https://github.com/tokio-rs/tokio/pull/370 -[#398]: https://github.com/tokio-rs/tokio/pull/398 -[#450]: https://github.com/tokio-rs/tokio/pull/450 -[#477]: https://github.com/tokio-rs/tokio/pull/477 -[#501]: https://github.com/tokio-rs/tokio/pull/501 - -# 0.1.7 (June 6, 2018) - -- Add `Runtime::block_on` for concurrent runtime ([#391]). -- Provide handle to `current_thread::Runtime` that allows spawning tasks from - other threads ([#340]). -- Provide `clock::now()`, a configurable source of time ([#381]). - -[#340]: https://github.com/tokio-rs/tokio/pull/340 -[#381]: https://github.com/tokio-rs/tokio/pull/381 -[#391]: https://github.com/tokio-rs/tokio/pull/391 - -# 0.1.6 (May 2, 2018) - -- Add asynchronous filesystem APIs ([#323]). -- Add "current thread" runtime variant ([#308]). -- `CurrentThread`: Expose inner `Park` instance. -- Improve fairness of `CurrentThread` executor ([#313]). - -[#308]: https://github.com/tokio-rs/tokio/pull/308 -[#313]: https://github.com/tokio-rs/tokio/pull/313 -[#323]: https://github.com/tokio-rs/tokio/pull/323 - -# 0.1.5 (March 30, 2018) - -- Provide timer API ([#266]) - -[#266]: https://github.com/tokio-rs/tokio/pull/266 - -# 0.1.4 (March 22, 2018) - -- Fix build on FreeBSD ([#218]) -- Shutdown the Runtime when the handle is dropped ([#214]) -- Set Runtime thread name prefix for worker threads ([#232]) -- Add builder for Runtime ([#234]) -- Extract TCP and UDP types into separate crates ([#224]) -- Optionally support futures 0.2. - -[#214]: https://github.com/tokio-rs/tokio/pull/214 -[#218]: https://github.com/tokio-rs/tokio/pull/218 -[#224]: https://github.com/tokio-rs/tokio/pull/224 -[#232]: https://github.com/tokio-rs/tokio/pull/232 -[#234]: https://github.com/tokio-rs/tokio/pull/234 - -# 0.1.3 (March 09, 2018) - -- Fix `CurrentThread::turn` to block on idle ([#212]). - -[#212]: https://github.com/tokio-rs/tokio/pull/212 - -# 0.1.2 (March 09, 2018) - -- Introduce Tokio Runtime ([#141]) -- Provide `CurrentThread` for more flexible usage of current thread executor ([#141]). -- Add Lio for platforms that support it ([#142]). -- I/O resources now lazily bind to the reactor ([#160]). -- Extract Reactor to dedicated crate ([#169]) -- Add facade to sub crates and add prelude ([#166]). -- Switch TCP/UDP fns to poll\_ -> Poll<...> style ([#175]) - -[#141]: https://github.com/tokio-rs/tokio/pull/141 -[#142]: https://github.com/tokio-rs/tokio/pull/142 -[#160]: https://github.com/tokio-rs/tokio/pull/160 -[#166]: https://github.com/tokio-rs/tokio/pull/166 -[#169]: https://github.com/tokio-rs/tokio/pull/169 -[#175]: https://github.com/tokio-rs/tokio/pull/175 - -# 0.1.1 (February 09, 2018) - -- Doc fixes - -# 0.1.0 (February 07, 2018) - -- Initial crate released based on [RFC](https://github.com/tokio-rs/tokio-rfcs/pull/3). diff -Nru s390-tools-2.31.0/rust-vendor/tokio/docs/reactor-refactor.md s390-tools-2.33.1/rust-vendor/tokio/docs/reactor-refactor.md --- s390-tools-2.31.0/rust-vendor/tokio/docs/reactor-refactor.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/docs/reactor-refactor.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,276 +0,0 @@ -# Refactor I/O driver - -Describes changes to the I/O driver for the Tokio 0.3 release. - -## Goals - -* Support `async fn` on I/O types with `&self`. -* Refine the `Registration` API. - -### Non-goals - -* Implement `AsyncRead` / `AsyncWrite` for `&TcpStream` or other reference type. - -## Overview - -Currently, I/O types require `&mut self` for `async` functions. The reason for -this is the task's waker is stored in the I/O resource's internal state -(`ScheduledIo`) instead of in the future returned by the `async` function. -Because of this limitation, I/O types limit the number of wakers to one per -direction (a direction is either read-related events or write-related events). - -Moving the waker from the internal I/O resource's state to the operation's -future enables multiple wakers to be registered per operation. The "intrusive -wake list" strategy used by `Notify` applies to this case, though there are some -concerns unique to the I/O driver. - -## Reworking the `Registration` type - -While `Registration` is made private (per #2728), it remains in Tokio as an -implementation detail backing I/O resources such as `TcpStream`. The API of -`Registration` is updated to support waiting for an arbitrary interest set with -`&self`. This supports concurrent waiters with a different readiness interest. - -```rust -struct Registration { ... } - -// TODO: naming -struct ReadyEvent { - tick: u32, - ready: mio::Ready, -} - -impl Registration { - /// `interest` must be a super set of **all** interest sets specified in - /// the other methods. This is the interest set passed to `mio`. - pub fn new(io: &T, interest: mio::Ready) -> io::Result - where T: mio::Evented; - - /// Awaits for any readiness event included in `interest`. Returns a - /// `ReadyEvent` representing the received readiness event. - async fn readiness(&self, interest: mio::Ready) -> io::Result; - - /// Clears resource level readiness represented by the specified `ReadyEvent` - async fn clear_readiness(&self, ready_event: ReadyEvent); -``` - -A new registration is created for a `T: mio::Evented` and a `interest`. This -creates a `ScheduledIo` entry with the I/O driver and registers the resource -with `mio`. - -Because Tokio uses **edge-triggered** notifications, the I/O driver only -receives readiness from the OS once the ready state **changes**. The I/O driver -must track each resource's known readiness state. This helps prevent syscalls -when the process knows the syscall should return with `EWOULDBLOCK`. - -A call to `readiness()` checks if the currently known resource readiness -overlaps with `interest`. If it does, then the `readiness()` immediately -returns. If it does not, then the task waits until the I/O driver receives a -readiness event. - -The pseudocode to perform a TCP read is as follows. - -```rust -async fn read(&self, buf: &mut [u8]) -> io::Result { - loop { - // Await readiness - let event = self.readiness(interest).await?; - - match self.mio_socket.read(buf) { - Ok(v) => return Ok(v), - Err(ref e) if e.kind() == WouldBlock => { - self.clear_readiness(event); - } - Err(e) => return Err(e), - } - } -} -``` - -## Reworking the `ScheduledIo` type - -The `ScheduledIo` type is switched to use an intrusive waker linked list. Each -entry in the linked list includes the `interest` set passed to `readiness()`. - -```rust -#[derive(Debug)] -pub(crate) struct ScheduledIo { - /// Resource's known state packed with other state that must be - /// atomically updated. - readiness: AtomicUsize, - - /// Tracks tasks waiting on the resource - waiters: Mutex, -} - -#[derive(Debug)] -struct Waiters { - // List of intrusive waiters. - list: LinkedList, - - /// Waiter used by `AsyncRead` implementations. - reader: Option, - - /// Waiter used by `AsyncWrite` implementations. - writer: Option, -} - -// This struct is contained by the **future** returned by `readiness()`. -#[derive(Debug)] -struct Waiter { - /// Intrusive linked-list pointers - pointers: linked_list::Pointers, - - /// Waker for task waiting on I/O resource - waiter: Option, - - /// Readiness events being waited on. This is - /// the value passed to `readiness()` - interest: mio::Ready, - - /// Should not be `Unpin`. - _p: PhantomPinned, -} -``` - -When an I/O event is received from `mio`, the associated resources' readiness is -updated and the waiter list is iterated. All waiters with `interest` that -overlap the received readiness event are notified. Any waiter with an `interest` -that does not overlap the readiness event remains in the list. - -## Cancel interest on drop - -The future returned by `readiness()` uses an intrusive linked list to store the -waker with `ScheduledIo`. Because `readiness()` can be called concurrently, many -wakers may be stored simultaneously in the list. If the `readiness()` future is -dropped early, it is essential that the waker is removed from the list. This -prevents leaking memory. - -## Race condition - -Consider how many tasks may concurrently attempt I/O operations. This, combined -with how Tokio uses edge-triggered events, can result in a race condition. Let's -revisit the TCP read function: - -```rust -async fn read(&self, buf: &mut [u8]) -> io::Result { - loop { - // Await readiness - let event = self.readiness(interest).await?; - - match self.mio_socket.read(buf) { - Ok(v) => return Ok(v), - Err(ref e) if e.kind() == WouldBlock => { - self.clear_readiness(event); - } - Err(e) => return Err(e), - } - } -} -``` - -If care is not taken, if between `mio_socket.read(buf)` returning and -`clear_readiness(event)` is called, a readiness event arrives, the `read()` -function could deadlock. This happens because the readiness event is received, -`clear_readiness()` unsets the readiness event, and on the next iteration, -`readiness().await` will block forever as a new readiness event is not received. - -The current I/O driver handles this condition by always registering the task's -waker before performing the operation. This is not ideal as it will result in -unnecessary task notification. - -Instead, we will use a strategy to prevent clearing readiness if an "unseen" -readiness event has been received. The I/O driver will maintain a "tick" value. -Every time the `mio` `poll()` function is called, the tick is incremented. Each -readiness event has an associated tick. When the I/O driver sets the resource's -readiness, the driver's tick is packed into the atomic `usize`. - -The `ScheduledIo` readiness `AtomicUsize` is structured as: - -``` -| shutdown | generation | driver tick | readiness | -|----------+------------+--------------+-----------| -| 1 bit | 7 bits + 8 bits + 16 bits | -``` - -The `shutdown` and `generation` components exist today. - -The `readiness()` function returns a `ReadyEvent` value. This value includes the -`tick` component read with the resource's readiness value. When -`clear_readiness()` is called, the `ReadyEvent` is provided. Readiness is only -cleared if the current `tick` matches the `tick` included in the `ReadyEvent`. -If the tick values do not match, the call to `readiness()` on the next iteration -will not block and the new `tick` is included in the new `ReadyToken.` - -TODO - -## Implementing `AsyncRead` / `AsyncWrite` - -The `AsyncRead` and `AsyncWrite` traits use a "poll" based API. This means that -it is not possible to use an intrusive linked list to track the waker. -Additionally, there is no future associated with the operation which means it is -not possible to cancel interest in the readiness events. - -To implement `AsyncRead` and `AsyncWrite`, `ScheduledIo` includes dedicated -waker values for the read direction and the write direction. These values are -used to store the waker. Specific `interest` is not tracked for `AsyncRead` and -`AsyncWrite` implementations. It is assumed that only events of interest are: - -* Read ready -* Read closed -* Write ready -* Write closed - -Note that "read closed" and "write closed" are only available with Mio 0.7. With -Mio 0.6, things were a bit messy. - -It is only possible to implement `AsyncRead` and `AsyncWrite` for resource types -themselves and not for `&Resource`. Implementing the traits for `&Resource` -would permit concurrent operations to the resource. Because only a single waker -is stored per direction, any concurrent usage would result in deadlocks. An -alternate implementation would call for a `Vec` but this would result in -memory leaks. - -## Enabling reads and writes for `&TcpStream` - -Instead of implementing `AsyncRead` and `AsyncWrite` for `&TcpStream`, a new -function is added to `TcpStream`. - -```rust -impl TcpStream { - /// Naming TBD - fn by_ref(&self) -> TcpStreamRef<'_>; -} - -struct TcpStreamRef<'a> { - stream: &'a TcpStream, - - // `Waiter` is the node in the intrusive waiter linked-list - read_waiter: Waiter, - write_waiter: Waiter, -} -``` - -Now, `AsyncRead` and `AsyncWrite` can be implemented on `TcpStreamRef<'a>`. When -the `TcpStreamRef` is dropped, all associated waker resources are cleaned up. - -### Removing all the `split()` functions - -With `TcpStream::by_ref()`, `TcpStream::split()` is no longer needed. Instead, -it is possible to do something as follows. - -```rust -let rd = my_stream.by_ref(); -let wr = my_stream.by_ref(); - -select! { - // use `rd` and `wr` in separate branches. -} -``` - -It is also possible to store a `TcpStream` in an `Arc`. - -```rust -let arc_stream = Arc::new(my_tcp_stream); -let n = arc_stream.by_ref().read(buf).await?; -``` diff -Nru s390-tools-2.31.0/rust-vendor/tokio/external-types.toml s390-tools-2.33.1/rust-vendor/tokio/external-types.toml --- s390-tools-2.31.0/rust-vendor/tokio/external-types.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/external-types.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -# This config file is for the `cargo-check-external-types` tool that is run in CI. - -# The following are types that are allowed to be exposed in Tokio's public API. -# The standard library is allowed by default. -allowed_external_types = [ - "bytes::buf::buf_impl::Buf", - "bytes::buf::buf_mut::BufMut", - - "tokio_macros::*", -] - diff -Nru s390-tools-2.31.0/rust-vendor/tokio/LICENSE s390-tools-2.33.1/rust-vendor/tokio/LICENSE --- s390-tools-2.31.0/rust-vendor/tokio/LICENSE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2023 Tokio Contributors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/tokio/README.md s390-tools-2.33.1/rust-vendor/tokio/README.md --- s390-tools-2.31.0/rust-vendor/tokio/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,250 +0,0 @@ -# Tokio - -A runtime for writing reliable, asynchronous, and slim applications with -the Rust programming language. It is: - -* **Fast**: Tokio's zero-cost abstractions give you bare-metal - performance. - -* **Reliable**: Tokio leverages Rust's ownership, type system, and - concurrency model to reduce bugs and ensure thread safety. - -* **Scalable**: Tokio has a minimal footprint, and handles backpressure - and cancellation naturally. - -[![Crates.io][crates-badge]][crates-url] -[![MIT licensed][mit-badge]][mit-url] -[![Build Status][actions-badge]][actions-url] -[![Discord chat][discord-badge]][discord-url] - -[crates-badge]: https://img.shields.io/crates/v/tokio.svg -[crates-url]: https://crates.io/crates/tokio -[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg -[mit-url]: https://github.com/tokio-rs/tokio/blob/master/LICENSE -[actions-badge]: https://github.com/tokio-rs/tokio/workflows/CI/badge.svg -[actions-url]: https://github.com/tokio-rs/tokio/actions?query=workflow%3ACI+branch%3Amaster -[discord-badge]: https://img.shields.io/discord/500028886025895936.svg?logo=discord&style=flat-square -[discord-url]: https://discord.gg/tokio - -[Website](https://tokio.rs) | -[Guides](https://tokio.rs/tokio/tutorial) | -[API Docs](https://docs.rs/tokio/latest/tokio) | -[Chat](https://discord.gg/tokio) - -## Overview - -Tokio is an event-driven, non-blocking I/O platform for writing -asynchronous applications with the Rust programming language. At a high -level, it provides a few major components: - -* A multithreaded, work-stealing based task [scheduler]. -* A reactor backed by the operating system's event queue (epoll, kqueue, - IOCP, etc...). -* Asynchronous [TCP and UDP][net] sockets. - -These components provide the runtime components necessary for building -an asynchronous application. - -[net]: https://docs.rs/tokio/latest/tokio/net/index.html -[scheduler]: https://docs.rs/tokio/latest/tokio/runtime/index.html - -## Example - -A basic TCP echo server with Tokio. - -Make sure you activated the full features of the tokio crate on Cargo.toml: - -```toml -[dependencies] -tokio = { version = "1.33.0", features = ["full"] } -``` -Then, on your main.rs: - -```rust,no_run -use tokio::net::TcpListener; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - let listener = TcpListener::bind("127.0.0.1:8080").await?; - - loop { - let (mut socket, _) = listener.accept().await?; - - tokio::spawn(async move { - let mut buf = [0; 1024]; - - // In a loop, read data from the socket and write the data back. - loop { - let n = match socket.read(&mut buf).await { - // socket closed - Ok(n) if n == 0 => return, - Ok(n) => n, - Err(e) => { - eprintln!("failed to read from socket; err = {:?}", e); - return; - } - }; - - // Write the data back - if let Err(e) = socket.write_all(&buf[0..n]).await { - eprintln!("failed to write to socket; err = {:?}", e); - return; - } - } - }); - } -} -``` - -More examples can be found [here][examples]. For a larger "real world" example, see the -[mini-redis] repository. - -[examples]: https://github.com/tokio-rs/tokio/tree/master/examples -[mini-redis]: https://github.com/tokio-rs/mini-redis/ - -To see a list of the available features flags that can be enabled, check our -[docs][feature-flag-docs]. - -## Getting Help - -First, see if the answer to your question can be found in the [Guides] or the -[API documentation]. If the answer is not there, there is an active community in -the [Tokio Discord server][chat]. We would be happy to try to answer your -question. You can also ask your question on [the discussions page][discussions]. - -[Guides]: https://tokio.rs/tokio/tutorial -[API documentation]: https://docs.rs/tokio/latest/tokio -[chat]: https://discord.gg/tokio -[discussions]: https://github.com/tokio-rs/tokio/discussions -[feature-flag-docs]: https://docs.rs/tokio/#feature-flags - -## Contributing - -:balloon: Thanks for your help improving the project! We are so happy to have -you! We have a [contributing guide][guide] to help you get involved in the Tokio -project. - -[guide]: https://github.com/tokio-rs/tokio/blob/master/CONTRIBUTING.md - -## Related Projects - -In addition to the crates in this repository, the Tokio project also maintains -several other libraries, including: - -* [`hyper`]: A fast and correct HTTP/1.1 and HTTP/2 implementation for Rust. - -* [`tonic`]: A gRPC over HTTP/2 implementation focused on high performance, interoperability, and flexibility. - -* [`warp`]: A super-easy, composable, web server framework for warp speeds. - -* [`tower`]: A library of modular and reusable components for building robust networking clients and servers. - -* [`tracing`] (formerly `tokio-trace`): A framework for application-level tracing and async-aware diagnostics. - -* [`rdbc`]: A Rust database connectivity library for MySQL, Postgres and SQLite. - -* [`mio`]: A low-level, cross-platform abstraction over OS I/O APIs that powers - `tokio`. - -* [`bytes`]: Utilities for working with bytes, including efficient byte buffers. - -* [`loom`]: A testing tool for concurrent Rust code - -[`warp`]: https://github.com/seanmonstar/warp -[`hyper`]: https://github.com/hyperium/hyper -[`tonic`]: https://github.com/hyperium/tonic -[`tower`]: https://github.com/tower-rs/tower -[`loom`]: https://github.com/tokio-rs/loom -[`rdbc`]: https://github.com/tokio-rs/rdbc -[`tracing`]: https://github.com/tokio-rs/tracing -[`mio`]: https://github.com/tokio-rs/mio -[`bytes`]: https://github.com/tokio-rs/bytes - -## Changelog - -The Tokio repository contains multiple crates. Each crate has its own changelog. - - * `tokio` - [view changelog](https://github.com/tokio-rs/tokio/blob/master/tokio/CHANGELOG.md) - * `tokio-util` - [view changelog](https://github.com/tokio-rs/tokio/blob/master/tokio-util/CHANGELOG.md) - * `tokio-stream` - [view changelog](https://github.com/tokio-rs/tokio/blob/master/tokio-stream/CHANGELOG.md) - * `tokio-macros` - [view changelog](https://github.com/tokio-rs/tokio/blob/master/tokio-macros/CHANGELOG.md) - * `tokio-test` - [view changelog](https://github.com/tokio-rs/tokio/blob/master/tokio-test/CHANGELOG.md) - -## Supported Rust Versions - - - -Tokio will keep a rolling MSRV (minimum supported rust version) policy of **at -least** 6 months. When increasing the MSRV, the new Rust version must have been -released at least six months ago. The current MSRV is 1.63. - -Note that the MSRV is not increased automatically, and only as part of a minor -release. The MSRV history for past minor releases can be found below: - - * 1.30 to now - Rust 1.63 - * 1.27 to 1.29 - Rust 1.56 - * 1.17 to 1.26 - Rust 1.49 - * 1.15 to 1.16 - Rust 1.46 - * 1.0 to 1.14 - Rust 1.45 - -Note that although we try to avoid the situation where a dependency transitively -increases the MSRV of Tokio, we do not guarantee that this does not happen. -However, every minor release will have some set of versions of dependencies that -works with the MSRV of that minor release. - -## Release schedule - -Tokio doesn't follow a fixed release schedule, but we typically make one to two -new minor releases each month. We make patch releases for bugfixes as necessary. - -## Bug patching policy - -For the purposes of making patch releases with bugfixes, we have designated -certain minor releases as LTS (long term support) releases. Whenever a bug -warrants a patch release with a fix for the bug, it will be backported and -released as a new patch release for each LTS minor version. Our current LTS -releases are: - - * `1.20.x` - LTS release until September 2023. (MSRV 1.49) - * `1.25.x` - LTS release until March 2024. (MSRV 1.49) - * `1.32.x` - LTS release until September 2024 (MSRV 1.63) - -Each LTS release will continue to receive backported fixes for at least a year. -If you wish to use a fixed minor release in your project, we recommend that you -use an LTS release. - -To use a fixed minor version, you can specify the version with a tilde. For -example, to specify that you wish to use the newest `1.25.x` patch release, you -can use the following dependency specification: -```text -tokio = { version = "~1.25", features = [...] } -``` - -### Previous LTS releases - - * `1.8.x` - LTS release until February 2022. - * `1.14.x` - LTS release until June 2022. - * `1.18.x` - LTS release until June 2023. - -## License - -This project is licensed under the [MIT license]. - -[MIT license]: https://github.com/tokio-rs/tokio/blob/master/LICENSE - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in Tokio by you, shall be licensed as MIT, without any additional -terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/blocking.rs s390-tools-2.33.1/rust-vendor/tokio/src/blocking.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/blocking.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/blocking.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,63 +0,0 @@ -cfg_rt! { - pub(crate) use crate::runtime::spawn_blocking; - - cfg_fs! { - #[allow(unused_imports)] - pub(crate) use crate::runtime::spawn_mandatory_blocking; - } - - pub(crate) use crate::task::JoinHandle; -} - -cfg_not_rt! { - use std::fmt; - use std::future::Future; - use std::pin::Pin; - use std::task::{Context, Poll}; - - pub(crate) fn spawn_blocking(_f: F) -> JoinHandle - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - assert_send_sync::>>(); - panic!("requires the `rt` Tokio feature flag") - } - - cfg_fs! { - pub(crate) fn spawn_mandatory_blocking(_f: F) -> Option> - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - panic!("requires the `rt` Tokio feature flag") - } - } - - pub(crate) struct JoinHandle { - _p: std::marker::PhantomData, - } - - unsafe impl Send for JoinHandle {} - unsafe impl Sync for JoinHandle {} - - impl Future for JoinHandle { - type Output = Result; - - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - unreachable!() - } - } - - impl fmt::Debug for JoinHandle - where - T: fmt::Debug, - { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("JoinHandle").finish() - } - } - - fn assert_send_sync() { - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/doc/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/doc/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/doc/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/doc/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -//! Types which are documented locally in the Tokio crate, but does not actually -//! live here. -//! -//! **Note** this module is only visible on docs.rs, you cannot use it directly -//! in your own code. - -/// The name of a type which is not defined here. -/// -/// This is typically used as an alias for another type, like so: -/// -/// ```rust,ignore -/// /// See [some::other::location](https://example.com). -/// type DEFINED_ELSEWHERE = crate::doc::NotDefinedHere; -/// ``` -/// -/// This type is uninhabitable like the [`never` type] to ensure that no one -/// will ever accidentally use it. -/// -/// [`never` type]: https://doc.rust-lang.org/std/primitive.never.html -#[derive(Debug)] -pub enum NotDefinedHere {} - -pub mod os; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/doc/os.rs s390-tools-2.33.1/rust-vendor/tokio/src/doc/os.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/doc/os.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/doc/os.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,68 +0,0 @@ -//! See [std::os](https://doc.rust-lang.org/std/os/index.html). - -/// Platform-specific extensions to `std` for Windows. -/// -/// See [std::os::windows](https://doc.rust-lang.org/std/os/windows/index.html). -pub mod windows { - /// Windows-specific extensions to general I/O primitives. - /// - /// See [std::os::windows::io](https://doc.rust-lang.org/std/os/windows/io/index.html). - pub mod io { - /// See [std::os::windows::io::RawHandle](https://doc.rust-lang.org/std/os/windows/io/type.RawHandle.html) - pub type RawHandle = crate::doc::NotDefinedHere; - - /// See [std::os::windows::io::OwnedHandle](https://doc.rust-lang.org/std/os/windows/io/struct.OwnedHandle.html) - pub type OwnedHandle = crate::doc::NotDefinedHere; - - /// See [std::os::windows::io::AsRawHandle](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html) - pub trait AsRawHandle { - /// See [std::os::windows::io::AsRawHandle::as_raw_handle](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html#tymethod.as_raw_handle) - fn as_raw_handle(&self) -> RawHandle; - } - - /// See [std::os::windows::io::FromRawHandle](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html) - pub trait FromRawHandle { - /// See [std::os::windows::io::FromRawHandle::from_raw_handle](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html#tymethod.from_raw_handle) - unsafe fn from_raw_handle(handle: RawHandle) -> Self; - } - - /// See [std::os::windows::io::RawSocket](https://doc.rust-lang.org/std/os/windows/io/type.RawSocket.html) - pub type RawSocket = crate::doc::NotDefinedHere; - - /// See [std::os::windows::io::AsRawSocket](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html) - pub trait AsRawSocket { - /// See [std::os::windows::io::AsRawSocket::as_raw_socket](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html#tymethod.as_raw_socket) - fn as_raw_socket(&self) -> RawSocket; - } - - /// See [std::os::windows::io::FromRawSocket](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawSocket.html) - pub trait FromRawSocket { - /// See [std::os::windows::io::FromRawSocket::from_raw_socket](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawSocket.html#tymethod.from_raw_socket) - unsafe fn from_raw_socket(sock: RawSocket) -> Self; - } - - /// See [std::os::windows::io::IntoRawSocket](https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawSocket.html) - pub trait IntoRawSocket { - /// See [std::os::windows::io::IntoRawSocket::into_raw_socket](https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawSocket.html#tymethod.into_raw_socket) - fn into_raw_socket(self) -> RawSocket; - } - - /// See [std::os::windows::io::BorrowedHandle](https://doc.rust-lang.org/std/os/windows/io/struct.BorrowedHandle.html) - pub type BorrowedHandle<'handle> = crate::doc::NotDefinedHere; - - /// See [std::os::windows::io::AsHandle](https://doc.rust-lang.org/std/os/windows/io/trait.AsHandle.html) - pub trait AsHandle { - /// See [std::os::windows::io::AsHandle::as_handle](https://doc.rust-lang.org/std/os/windows/io/trait.AsHandle.html#tymethod.as_handle) - fn as_handle(&self) -> BorrowedHandle<'_>; - } - - /// See [std::os::windows::io::BorrowedSocket](https://doc.rust-lang.org/std/os/windows/io/struct.BorrowedSocket.html) - pub type BorrowedSocket<'socket> = crate::doc::NotDefinedHere; - - /// See [std::os::windows::io::AsSocket](https://doc.rust-lang.org/std/os/windows/io/trait.AsSocket.html) - pub trait AsSocket { - /// See [std::os::windows::io::AsSocket::as_socket](https://doc.rust-lang.org/std/os/windows/io/trait.AsSocket.html#tymethod.as_socket) - fn as_socket(&self) -> BorrowedSocket<'_>; - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/canonicalize.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/canonicalize.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/canonicalize.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/canonicalize.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,51 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::{Path, PathBuf}; - -/// Returns the canonical, absolute form of a path with all intermediate -/// components normalized and symbolic links resolved. -/// -/// This is an async version of [`std::fs::canonicalize`][std] -/// -/// [std]: std::fs::canonicalize -/// -/// # Platform-specific behavior -/// -/// This function currently corresponds to the `realpath` function on Unix -/// and the `CreateFile` and `GetFinalPathNameByHandle` functions on Windows. -/// Note that, this [may change in the future][changes]. -/// -/// On Windows, this converts the path to use [extended length path][path] -/// syntax, which allows your program to use longer path names, but means you -/// can only join backslash-delimited paths to it, and it may be incompatible -/// with other applications (if passed to the application on the command-line, -/// or written to a file another application may read). -/// -/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior -/// [path]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath -/// -/// # Errors -/// -/// This function will return an error in the following situations, but is not -/// limited to just these cases: -/// -/// * `path` does not exist. -/// * A non-final component in path is not a directory. -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// use std::io; -/// -/// #[tokio::main] -/// async fn main() -> io::Result<()> { -/// let path = fs::canonicalize("../a/../foo.txt").await?; -/// Ok(()) -/// } -/// ``` -pub async fn canonicalize(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::canonicalize(path)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/copy.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/copy.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/copy.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/copy.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -use crate::fs::asyncify; -use std::path::Path; - -/// Copies the contents of one file to another. This function will also copy the permission bits -/// of the original file to the destination file. -/// This function will overwrite the contents of to. -/// -/// This is the async equivalent of [`std::fs::copy`][std]. -/// -/// [std]: fn@std::fs::copy -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// -/// # async fn dox() -> std::io::Result<()> { -/// fs::copy("foo.txt", "bar.txt").await?; -/// # Ok(()) -/// # } -/// ``` - -pub async fn copy(from: impl AsRef, to: impl AsRef) -> Result { - let from = from.as_ref().to_owned(); - let to = to.as_ref().to_owned(); - asyncify(|| std::fs::copy(from, to)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/create_dir_all.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/create_dir_all.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/create_dir_all.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/create_dir_all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,53 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Recursively creates a directory and all of its parent components if they -/// are missing. -/// -/// This is an async version of [`std::fs::create_dir_all`][std] -/// -/// [std]: std::fs::create_dir_all -/// -/// # Platform-specific behavior -/// -/// This function currently corresponds to the `mkdir` function on Unix -/// and the `CreateDirectory` function on Windows. -/// Note that, this [may change in the future][changes]. -/// -/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior -/// -/// # Errors -/// -/// This function will return an error in the following situations, but is not -/// limited to just these cases: -/// -/// * If any directory in the path specified by `path` does not already exist -/// and it could not be created otherwise. The specific error conditions for -/// when a directory is being created (after it is determined to not exist) are -/// outlined by [`fs::create_dir`]. -/// -/// Notable exception is made for situations where any of the directories -/// specified in the `path` could not be created as it was being created concurrently. -/// Such cases are considered to be successful. That is, calling `create_dir_all` -/// concurrently from multiple threads or processes is guaranteed not to fail -/// due to a race condition with itself. -/// -/// [`fs::create_dir`]: std::fs::create_dir -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// -/// #[tokio::main] -/// async fn main() -> std::io::Result<()> { -/// fs::create_dir_all("/some/dir").await?; -/// Ok(()) -/// } -/// ``` -pub async fn create_dir_all(path: impl AsRef) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::create_dir_all(path)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/create_dir.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/create_dir.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/create_dir.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/create_dir.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,52 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Creates a new, empty directory at the provided path. -/// -/// This is an async version of [`std::fs::create_dir`][std] -/// -/// [std]: std::fs::create_dir -/// -/// # Platform-specific behavior -/// -/// This function currently corresponds to the `mkdir` function on Unix -/// and the `CreateDirectory` function on Windows. -/// Note that, this [may change in the future][changes]. -/// -/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior -/// -/// **NOTE**: If a parent of the given path doesn't exist, this function will -/// return an error. To create a directory and all its missing parents at the -/// same time, use the [`create_dir_all`] function. -/// -/// # Errors -/// -/// This function will return an error in the following situations, but is not -/// limited to just these cases: -/// -/// * User lacks permissions to create directory at `path`. -/// * A parent of the given path doesn't exist. (To create a directory and all -/// its missing parents at the same time, use the [`create_dir_all`] -/// function.) -/// * `path` already exists. -/// -/// [`create_dir_all`]: super::create_dir_all() -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// use std::io; -/// -/// #[tokio::main] -/// async fn main() -> io::Result<()> { -/// fs::create_dir("/some/dir").await?; -/// Ok(()) -/// } -/// ``` -pub async fn create_dir(path: impl AsRef) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::create_dir(path)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/dir_builder.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/dir_builder.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/dir_builder.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/dir_builder.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,137 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// A builder for creating directories in various manners. -/// -/// This is a specialized version of [`std::fs::DirBuilder`] for usage on -/// the Tokio runtime. -/// -/// [std::fs::DirBuilder]: std::fs::DirBuilder -#[derive(Debug, Default)] -pub struct DirBuilder { - /// Indicates whether to create parent directories if they are missing. - recursive: bool, - - /// Sets the Unix mode for newly created directories. - #[cfg(unix)] - pub(super) mode: Option, -} - -impl DirBuilder { - /// Creates a new set of options with default mode/security settings for all - /// platforms and also non-recursive. - /// - /// This is an async version of [`std::fs::DirBuilder::new`][std] - /// - /// [std]: std::fs::DirBuilder::new - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::DirBuilder; - /// - /// let builder = DirBuilder::new(); - /// ``` - pub fn new() -> Self { - Default::default() - } - - /// Indicates whether to create directories recursively (including all parent directories). - /// Parents that do not exist are created with the same security and permissions settings. - /// - /// This option defaults to `false`. - /// - /// This is an async version of [`std::fs::DirBuilder::recursive`][std] - /// - /// [std]: std::fs::DirBuilder::recursive - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::DirBuilder; - /// - /// let mut builder = DirBuilder::new(); - /// builder.recursive(true); - /// ``` - pub fn recursive(&mut self, recursive: bool) -> &mut Self { - self.recursive = recursive; - self - } - - /// Creates the specified directory with the configured options. - /// - /// It is considered an error if the directory already exists unless - /// recursive mode is enabled. - /// - /// This is an async version of [`std::fs::DirBuilder::create`][std] - /// - /// [std]: std::fs::DirBuilder::create - /// - /// # Errors - /// - /// An error will be returned under the following circumstances: - /// - /// * Path already points to an existing file. - /// * Path already points to an existing directory and the mode is - /// non-recursive. - /// * The calling process doesn't have permissions to create the directory - /// or its missing parents. - /// * Other I/O error occurred. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::DirBuilder; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// DirBuilder::new() - /// .recursive(true) - /// .create("/tmp/foo/bar/baz") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub async fn create(&self, path: impl AsRef) -> io::Result<()> { - let path = path.as_ref().to_owned(); - let mut builder = std::fs::DirBuilder::new(); - builder.recursive(self.recursive); - - #[cfg(unix)] - { - if let Some(mode) = self.mode { - std::os::unix::fs::DirBuilderExt::mode(&mut builder, mode); - } - } - - asyncify(move || builder.create(path)).await - } -} - -feature! { - #![unix] - - impl DirBuilder { - /// Sets the mode to create new directories with. - /// - /// This option defaults to 0o777. - /// - /// # Examples - /// - /// - /// ```no_run - /// use tokio::fs::DirBuilder; - /// - /// let mut builder = DirBuilder::new(); - /// builder.mode(0o775); - /// ``` - pub fn mode(&mut self, mode: u32) -> &mut Self { - self.mode = Some(mode); - self - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/file/tests.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/file/tests.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/file/tests.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/file/tests.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,978 +0,0 @@ -use super::*; -use crate::{ - fs::mocks::*, - io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt}, -}; -use mockall::{predicate::eq, Sequence}; -use tokio_test::{assert_pending, assert_ready_err, assert_ready_ok, task}; - -const HELLO: &[u8] = b"hello world..."; -const FOO: &[u8] = b"foo bar baz..."; - -#[test] -fn open_read() { - let mut file = MockFile::default(); - file.expect_inner_read().once().returning(|buf| { - buf[0..HELLO.len()].copy_from_slice(HELLO); - Ok(HELLO.len()) - }); - let mut file = File::from_std(file); - - let mut buf = [0; 1024]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_eq!(0, pool::len()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - - pool::run_one(); - - assert!(t.is_woken()); - - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, HELLO.len()); - assert_eq!(&buf[..n], HELLO); -} - -#[test] -fn read_twice_before_dispatch() { - let mut file = MockFile::default(); - file.expect_inner_read().once().returning(|buf| { - buf[0..HELLO.len()].copy_from_slice(HELLO); - Ok(HELLO.len()) - }); - let mut file = File::from_std(file); - - let mut buf = [0; 1024]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_pending!(t.poll()); - assert_pending!(t.poll()); - - assert_eq!(pool::len(), 1); - pool::run_one(); - - assert!(t.is_woken()); - - let n = assert_ready_ok!(t.poll()); - assert_eq!(&buf[..n], HELLO); -} - -#[test] -fn read_with_smaller_buf() { - let mut file = MockFile::default(); - file.expect_inner_read().once().returning(|buf| { - buf[0..HELLO.len()].copy_from_slice(HELLO); - Ok(HELLO.len()) - }); - - let mut file = File::from_std(file); - - { - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - } - - pool::run_one(); - - { - let mut buf = [0; 4]; - let mut t = task::spawn(file.read(&mut buf)); - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, 4); - assert_eq!(&buf[..], &HELLO[..n]); - } - - // Calling again immediately succeeds with the rest of the buffer - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, 10); - assert_eq!(&buf[..n], &HELLO[4..]); - - assert_eq!(0, pool::len()); -} - -#[test] -fn read_with_bigger_buf() { - let mut seq = Sequence::new(); - let mut file = MockFile::default(); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(|buf| { - buf[0..4].copy_from_slice(&HELLO[..4]); - Ok(4) - }); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(|buf| { - buf[0..HELLO.len() - 4].copy_from_slice(&HELLO[4..]); - Ok(HELLO.len() - 4) - }); - - let mut file = File::from_std(file); - - { - let mut buf = [0; 4]; - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - } - - pool::run_one(); - - { - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, 4); - assert_eq!(&buf[..n], &HELLO[..n]); - } - - // Calling again immediately succeeds with the rest of the buffer - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, 10); - assert_eq!(&buf[..n], &HELLO[4..]); - - assert_eq!(0, pool::len()); -} - -#[test] -fn read_err_then_read_success() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(|_| Err(io::ErrorKind::Other.into())); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(|buf| { - buf[0..HELLO.len()].copy_from_slice(HELLO); - Ok(HELLO.len()) - }); - - let mut file = File::from_std(file); - - { - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - - pool::run_one(); - - assert_ready_err!(t.poll()); - } - - { - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - - pool::run_one(); - - let n = assert_ready_ok!(t.poll()); - - assert_eq!(n, HELLO.len()); - assert_eq!(&buf[..n], HELLO); - } -} - -#[test] -fn open_write() { - let mut file = MockFile::default(); - file.expect_inner_write() - .once() - .with(eq(HELLO)) - .returning(|buf| Ok(buf.len())); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - - assert_eq!(0, pool::len()); - assert_ready_ok!(t.poll()); - - assert_eq!(1, pool::len()); - - pool::run_one(); - - assert!(!t.is_woken()); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn flush_while_idle() { - let file = MockFile::default(); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); -} - -#[test] -#[cfg_attr(miri, ignore)] // takes a really long time with miri -fn read_with_buffer_larger_than_max() { - // Chunks - let chunk_a = crate::io::blocking::MAX_BUF; - let chunk_b = chunk_a * 2; - let chunk_c = chunk_a * 3; - let chunk_d = chunk_a * 4; - - assert_eq!(chunk_d / 1024 / 1024, 8); - - let mut data = vec![]; - for i in 0..(chunk_d - 1) { - data.push((i % 151) as u8); - } - let data = Arc::new(data); - let d0 = data.clone(); - let d1 = data.clone(); - let d2 = data.clone(); - let d3 = data.clone(); - - let mut seq = Sequence::new(); - let mut file = MockFile::default(); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(move |buf| { - buf[0..chunk_a].copy_from_slice(&d0[0..chunk_a]); - Ok(chunk_a) - }); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(move |buf| { - buf[..chunk_a].copy_from_slice(&d1[chunk_a..chunk_b]); - Ok(chunk_b - chunk_a) - }); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(move |buf| { - buf[..chunk_a].copy_from_slice(&d2[chunk_b..chunk_c]); - Ok(chunk_c - chunk_b) - }); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(move |buf| { - buf[..chunk_a - 1].copy_from_slice(&d3[chunk_c..]); - Ok(chunk_a - 1) - }); - let mut file = File::from_std(file); - - let mut actual = vec![0; chunk_d]; - let mut pos = 0; - - while pos < data.len() { - let mut t = task::spawn(file.read(&mut actual[pos..])); - - assert_pending!(t.poll()); - pool::run_one(); - assert!(t.is_woken()); - - let n = assert_ready_ok!(t.poll()); - assert!(n <= chunk_a); - - pos += n; - } - - assert_eq!(&data[..], &actual[..data.len()]); -} - -#[test] -#[cfg_attr(miri, ignore)] // takes a really long time with miri -fn write_with_buffer_larger_than_max() { - // Chunks - let chunk_a = crate::io::blocking::MAX_BUF; - let chunk_b = chunk_a * 2; - let chunk_c = chunk_a * 3; - let chunk_d = chunk_a * 4; - - assert_eq!(chunk_d / 1024 / 1024, 8); - - let mut data = vec![]; - for i in 0..(chunk_d - 1) { - data.push((i % 151) as u8); - } - let data = Arc::new(data); - let d0 = data.clone(); - let d1 = data.clone(); - let d2 = data.clone(); - let d3 = data.clone(); - - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .withf(move |buf| buf == &d0[0..chunk_a]) - .returning(|buf| Ok(buf.len())); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .withf(move |buf| buf == &d1[chunk_a..chunk_b]) - .returning(|buf| Ok(buf.len())); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .withf(move |buf| buf == &d2[chunk_b..chunk_c]) - .returning(|buf| Ok(buf.len())); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .withf(move |buf| buf == &d3[chunk_c..chunk_d - 1]) - .returning(|buf| Ok(buf.len())); - - let mut file = File::from_std(file); - - let mut rem = &data[..]; - - let mut first = true; - - while !rem.is_empty() { - let mut task = task::spawn(file.write(rem)); - - if !first { - assert_pending!(task.poll()); - pool::run_one(); - assert!(task.is_woken()); - } - - first = false; - - let n = assert_ready_ok!(task.poll()); - - rem = &rem[n..]; - } - - pool::run_one(); -} - -#[test] -fn write_twice_before_dispatch() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .with(eq(HELLO)) - .returning(|buf| Ok(buf.len())); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .with(eq(FOO)) - .returning(|buf| Ok(buf.len())); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.write(FOO)); - assert_pending!(t.poll()); - - assert_eq!(pool::len(), 1); - pool::run_one(); - - assert!(t.is_woken()); - - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.flush()); - assert_pending!(t.poll()); - - assert_eq!(pool::len(), 1); - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn incomplete_read_followed_by_write() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(|buf| { - buf[0..HELLO.len()].copy_from_slice(HELLO); - Ok(HELLO.len()) - }); - file.expect_inner_seek() - .once() - .with(eq(SeekFrom::Current(-(HELLO.len() as i64)))) - .in_sequence(&mut seq) - .returning(|_| Ok(0)); - file.expect_inner_write() - .once() - .with(eq(FOO)) - .returning(|_| Ok(FOO.len())); - - let mut file = File::from_std(file); - - let mut buf = [0; 32]; - - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_ok!(t.poll()); - - assert_eq!(pool::len(), 1); - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn incomplete_partial_read_followed_by_write() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(|buf| { - buf[0..HELLO.len()].copy_from_slice(HELLO); - Ok(HELLO.len()) - }); - file.expect_inner_seek() - .once() - .in_sequence(&mut seq) - .with(eq(SeekFrom::Current(-10))) - .returning(|_| Ok(0)); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .with(eq(FOO)) - .returning(|_| Ok(FOO.len())); - - let mut file = File::from_std(file); - - let mut buf = [0; 32]; - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - - pool::run_one(); - - let mut buf = [0; 4]; - let mut t = task::spawn(file.read(&mut buf)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_ok!(t.poll()); - - assert_eq!(pool::len(), 1); - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn incomplete_read_followed_by_flush() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(|buf| { - buf[0..HELLO.len()].copy_from_slice(HELLO); - Ok(HELLO.len()) - }); - file.expect_inner_seek() - .once() - .in_sequence(&mut seq) - .with(eq(SeekFrom::Current(-(HELLO.len() as i64)))) - .returning(|_| Ok(0)); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .with(eq(FOO)) - .returning(|_| Ok(FOO.len())); - - let mut file = File::from_std(file); - - let mut buf = [0; 32]; - - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); -} - -#[test] -fn incomplete_flush_followed_by_write() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .with(eq(HELLO)) - .returning(|_| Ok(HELLO.len())); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .with(eq(FOO)) - .returning(|_| Ok(FOO.len())); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - let n = assert_ready_ok!(t.poll()); - assert_eq!(n, HELLO.len()); - - let mut t = task::spawn(file.flush()); - assert_pending!(t.poll()); - - // TODO: Move under write - pool::run_one(); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn read_err() { - let mut file = MockFile::default(); - file.expect_inner_read() - .once() - .returning(|_| Err(io::ErrorKind::Other.into())); - - let mut file = File::from_std(file); - - let mut buf = [0; 1024]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_pending!(t.poll()); - - pool::run_one(); - assert!(t.is_woken()); - - assert_ready_err!(t.poll()); -} - -#[test] -fn write_write_err() { - let mut file = MockFile::default(); - file.expect_inner_write() - .once() - .returning(|_| Err(io::ErrorKind::Other.into())); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_err!(t.poll()); -} - -#[test] -fn write_read_write_err() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .returning(|_| Err(io::ErrorKind::Other.into())); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(|buf| { - buf[0..HELLO.len()].copy_from_slice(HELLO); - Ok(HELLO.len()) - }); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - let mut buf = [0; 1024]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_pending!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_err!(t.poll()); -} - -#[test] -fn write_read_flush_err() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .returning(|_| Err(io::ErrorKind::Other.into())); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(|buf| { - buf[0..HELLO.len()].copy_from_slice(HELLO); - Ok(HELLO.len()) - }); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - let mut buf = [0; 1024]; - let mut t = task::spawn(file.read(&mut buf)); - - assert_pending!(t.poll()); - - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_err!(t.poll()); -} - -#[test] -fn write_seek_write_err() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .returning(|_| Err(io::ErrorKind::Other.into())); - file.expect_inner_seek() - .once() - .with(eq(SeekFrom::Start(0))) - .in_sequence(&mut seq) - .returning(|_| Ok(0)); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - { - let mut t = task::spawn(file.seek(SeekFrom::Start(0))); - assert_pending!(t.poll()); - } - - pool::run_one(); - - let mut t = task::spawn(file.write(FOO)); - assert_ready_err!(t.poll()); -} - -#[test] -fn write_seek_flush_err() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .returning(|_| Err(io::ErrorKind::Other.into())); - file.expect_inner_seek() - .once() - .with(eq(SeekFrom::Start(0))) - .in_sequence(&mut seq) - .returning(|_| Ok(0)); - - let mut file = File::from_std(file); - - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - pool::run_one(); - - { - let mut t = task::spawn(file.seek(SeekFrom::Start(0))); - assert_pending!(t.poll()); - } - - pool::run_one(); - - let mut t = task::spawn(file.flush()); - assert_ready_err!(t.poll()); -} - -#[test] -fn sync_all_ordered_after_write() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .with(eq(HELLO)) - .returning(|_| Ok(HELLO.len())); - file.expect_sync_all().once().returning(|| Ok(())); - - let mut file = File::from_std(file); - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.sync_all()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn sync_all_err_ordered_after_write() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .with(eq(HELLO)) - .returning(|_| Ok(HELLO.len())); - file.expect_sync_all() - .once() - .returning(|| Err(io::ErrorKind::Other.into())); - - let mut file = File::from_std(file); - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.sync_all()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_err!(t.poll()); -} - -#[test] -fn sync_data_ordered_after_write() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .with(eq(HELLO)) - .returning(|_| Ok(HELLO.len())); - file.expect_sync_data().once().returning(|| Ok(())); - - let mut file = File::from_std(file); - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.sync_data()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn sync_data_err_ordered_after_write() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .with(eq(HELLO)) - .returning(|_| Ok(HELLO.len())); - file.expect_sync_data() - .once() - .returning(|| Err(io::ErrorKind::Other.into())); - - let mut file = File::from_std(file); - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - - let mut t = task::spawn(file.sync_data()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_pending!(t.poll()); - - assert_eq!(1, pool::len()); - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_err!(t.poll()); -} - -#[test] -fn open_set_len_ok() { - let mut file = MockFile::default(); - file.expect_set_len().with(eq(123)).returning(|_| Ok(())); - - let file = File::from_std(file); - let mut t = task::spawn(file.set_len(123)); - - assert_pending!(t.poll()); - - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_ok!(t.poll()); -} - -#[test] -fn open_set_len_err() { - let mut file = MockFile::default(); - file.expect_set_len() - .with(eq(123)) - .returning(|_| Err(io::ErrorKind::Other.into())); - - let file = File::from_std(file); - let mut t = task::spawn(file.set_len(123)); - - assert_pending!(t.poll()); - - pool::run_one(); - - assert!(t.is_woken()); - assert_ready_err!(t.poll()); -} - -#[test] -fn partial_read_set_len_ok() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(|buf| { - buf[0..HELLO.len()].copy_from_slice(HELLO); - Ok(HELLO.len()) - }); - file.expect_inner_seek() - .once() - .with(eq(SeekFrom::Current(-(HELLO.len() as i64)))) - .in_sequence(&mut seq) - .returning(|_| Ok(0)); - file.expect_set_len() - .once() - .in_sequence(&mut seq) - .with(eq(123)) - .returning(|_| Ok(())); - file.expect_inner_read() - .once() - .in_sequence(&mut seq) - .returning(|buf| { - buf[0..FOO.len()].copy_from_slice(FOO); - Ok(FOO.len()) - }); - - let mut buf = [0; 32]; - let mut file = File::from_std(file); - - { - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - } - - pool::run_one(); - - { - let mut t = task::spawn(file.set_len(123)); - - assert_pending!(t.poll()); - pool::run_one(); - assert_ready_ok!(t.poll()); - } - - let mut t = task::spawn(file.read(&mut buf)); - assert_pending!(t.poll()); - pool::run_one(); - let n = assert_ready_ok!(t.poll()); - - assert_eq!(n, FOO.len()); - assert_eq!(&buf[..n], FOO); -} - -#[test] -fn busy_file_seek_error() { - let mut file = MockFile::default(); - let mut seq = Sequence::new(); - file.expect_inner_write() - .once() - .in_sequence(&mut seq) - .returning(|_| Err(io::ErrorKind::Other.into())); - - let mut file = crate::io::BufReader::new(File::from_std(file)); - { - let mut t = task::spawn(file.write(HELLO)); - assert_ready_ok!(t.poll()); - } - - pool::run_one(); - - let mut t = task::spawn(file.seek(SeekFrom::Start(0))); - assert_ready_err!(t.poll()); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/file.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/file.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/file.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/file.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,917 +0,0 @@ -//! Types for working with [`File`]. -//! -//! [`File`]: File - -use crate::fs::{asyncify, OpenOptions}; -use crate::io::blocking::Buf; -use crate::io::{AsyncRead, AsyncSeek, AsyncWrite, ReadBuf}; -use crate::sync::Mutex; - -use std::fmt; -use std::fs::{Metadata, Permissions}; -use std::future::Future; -use std::io::{self, Seek, SeekFrom}; -use std::path::Path; -use std::pin::Pin; -use std::sync::Arc; -use std::task::Context; -use std::task::Poll; - -#[cfg(test)] -use super::mocks::JoinHandle; -#[cfg(test)] -use super::mocks::MockFile as StdFile; -#[cfg(test)] -use super::mocks::{spawn_blocking, spawn_mandatory_blocking}; -#[cfg(not(test))] -use crate::blocking::JoinHandle; -#[cfg(not(test))] -use crate::blocking::{spawn_blocking, spawn_mandatory_blocking}; -#[cfg(not(test))] -use std::fs::File as StdFile; - -/// A reference to an open file on the filesystem. -/// -/// This is a specialized version of [`std::fs::File`][std] for usage from the -/// Tokio runtime. -/// -/// An instance of a `File` can be read and/or written depending on what options -/// it was opened with. Files also implement [`AsyncSeek`] to alter the logical -/// cursor that the file contains internally. -/// -/// A file will not be closed immediately when it goes out of scope if there -/// are any IO operations that have not yet completed. To ensure that a file is -/// closed immediately when it is dropped, you should call [`flush`] before -/// dropping it. Note that this does not ensure that the file has been fully -/// written to disk; the operating system might keep the changes around in an -/// in-memory buffer. See the [`sync_all`] method for telling the OS to write -/// the data to disk. -/// -/// Reading and writing to a `File` is usually done using the convenience -/// methods found on the [`AsyncReadExt`] and [`AsyncWriteExt`] traits. -/// -/// [std]: struct@std::fs::File -/// [`AsyncSeek`]: trait@crate::io::AsyncSeek -/// [`flush`]: fn@crate::io::AsyncWriteExt::flush -/// [`sync_all`]: fn@crate::fs::File::sync_all -/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt -/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt -/// -/// # Examples -/// -/// Create a new file and asynchronously write bytes to it: -/// -/// ```no_run -/// use tokio::fs::File; -/// use tokio::io::AsyncWriteExt; // for write_all() -/// -/// # async fn dox() -> std::io::Result<()> { -/// let mut file = File::create("foo.txt").await?; -/// file.write_all(b"hello, world!").await?; -/// # Ok(()) -/// # } -/// ``` -/// -/// Read the contents of a file into a buffer: -/// -/// ```no_run -/// use tokio::fs::File; -/// use tokio::io::AsyncReadExt; // for read_to_end() -/// -/// # async fn dox() -> std::io::Result<()> { -/// let mut file = File::open("foo.txt").await?; -/// -/// let mut contents = vec![]; -/// file.read_to_end(&mut contents).await?; -/// -/// println!("len = {}", contents.len()); -/// # Ok(()) -/// # } -/// ``` -pub struct File { - std: Arc, - inner: Mutex, -} - -struct Inner { - state: State, - - /// Errors from writes/flushes are returned in write/flush calls. If a write - /// error is observed while performing a read, it is saved until the next - /// write / flush call. - last_write_err: Option, - - pos: u64, -} - -#[derive(Debug)] -enum State { - Idle(Option), - Busy(JoinHandle<(Operation, Buf)>), -} - -#[derive(Debug)] -enum Operation { - Read(io::Result), - Write(io::Result<()>), - Seek(io::Result), -} - -impl File { - /// Attempts to open a file in read-only mode. - /// - /// See [`OpenOptions`] for more details. - /// - /// # Errors - /// - /// This function will return an error if called from outside of the Tokio - /// runtime or if path does not already exist. Other errors may also be - /// returned according to OpenOptions::open. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::AsyncReadExt; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::open("foo.txt").await?; - /// - /// let mut contents = vec![]; - /// file.read_to_end(&mut contents).await?; - /// - /// println!("len = {}", contents.len()); - /// # Ok(()) - /// # } - /// ``` - /// - /// The [`read_to_end`] method is defined on the [`AsyncReadExt`] trait. - /// - /// [`read_to_end`]: fn@crate::io::AsyncReadExt::read_to_end - /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt - pub async fn open(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - let std = asyncify(|| StdFile::open(path)).await?; - - Ok(File::from_std(std)) - } - - /// Opens a file in write-only mode. - /// - /// This function will create a file if it does not exist, and will truncate - /// it if it does. - /// - /// See [`OpenOptions`] for more details. - /// - /// # Errors - /// - /// Results in an error if called from outside of the Tokio runtime or if - /// the underlying [`create`] call results in an error. - /// - /// [`create`]: std::fs::File::create - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::AsyncWriteExt; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// file.write_all(b"hello, world!").await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - pub async fn create(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - let std_file = asyncify(move || StdFile::create(path)).await?; - Ok(File::from_std(std_file)) - } - - /// Returns a new [`OpenOptions`] object. - /// - /// This function returns a new `OpenOptions` object that you can use to - /// open or create a file with specific options if `open()` or `create()` - /// are not appropriate. - /// - /// It is equivalent to `OpenOptions::new()`, but allows you to write more - /// readable code. Instead of - /// `OpenOptions::new().append(true).open("example.log")`, - /// you can write `File::options().append(true).open("example.log")`. This - /// also avoids the need to import `OpenOptions`. - /// - /// See the [`OpenOptions::new`] function for more details. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::AsyncWriteExt; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut f = File::options().append(true).open("example.log").await?; - /// f.write_all(b"new line\n").await?; - /// # Ok(()) - /// # } - /// ``` - #[must_use] - pub fn options() -> OpenOptions { - OpenOptions::new() - } - - /// Converts a [`std::fs::File`][std] to a [`tokio::fs::File`][file]. - /// - /// [std]: std::fs::File - /// [file]: File - /// - /// # Examples - /// - /// ```no_run - /// // This line could block. It is not recommended to do this on the Tokio - /// // runtime. - /// let std_file = std::fs::File::open("foo.txt").unwrap(); - /// let file = tokio::fs::File::from_std(std_file); - /// ``` - pub fn from_std(std: StdFile) -> File { - File { - std: Arc::new(std), - inner: Mutex::new(Inner { - state: State::Idle(Some(Buf::with_capacity(0))), - last_write_err: None, - pos: 0, - }), - } - } - - /// Attempts to sync all OS-internal metadata to disk. - /// - /// This function will attempt to ensure that all in-core data reaches the - /// filesystem before returning. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::AsyncWriteExt; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// file.write_all(b"hello, world!").await?; - /// file.sync_all().await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - pub async fn sync_all(&self) -> io::Result<()> { - let mut inner = self.inner.lock().await; - inner.complete_inflight().await; - - let std = self.std.clone(); - asyncify(move || std.sync_all()).await - } - - /// This function is similar to `sync_all`, except that it may not - /// synchronize file metadata to the filesystem. - /// - /// This is intended for use cases that must synchronize content, but don't - /// need the metadata on disk. The goal of this method is to reduce disk - /// operations. - /// - /// Note that some platforms may simply implement this in terms of `sync_all`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::AsyncWriteExt; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// file.write_all(b"hello, world!").await?; - /// file.sync_data().await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - pub async fn sync_data(&self) -> io::Result<()> { - let mut inner = self.inner.lock().await; - inner.complete_inflight().await; - - let std = self.std.clone(); - asyncify(move || std.sync_data()).await - } - - /// Truncates or extends the underlying file, updating the size of this file to become size. - /// - /// If the size is less than the current file's size, then the file will be - /// shrunk. If it is greater than the current file's size, then the file - /// will be extended to size and have all of the intermediate data filled in - /// with 0s. - /// - /// # Errors - /// - /// This function will return an error if the file is not opened for - /// writing. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::AsyncWriteExt; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// file.write_all(b"hello, world!").await?; - /// file.set_len(10).await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - pub async fn set_len(&self, size: u64) -> io::Result<()> { - let mut inner = self.inner.lock().await; - inner.complete_inflight().await; - - let mut buf = match inner.state { - State::Idle(ref mut buf_cell) => buf_cell.take().unwrap(), - _ => unreachable!(), - }; - - let seek = if !buf.is_empty() { - Some(SeekFrom::Current(buf.discard_read())) - } else { - None - }; - - let std = self.std.clone(); - - inner.state = State::Busy(spawn_blocking(move || { - let res = if let Some(seek) = seek { - (&*std).seek(seek).and_then(|_| std.set_len(size)) - } else { - std.set_len(size) - } - .map(|_| 0); // the value is discarded later - - // Return the result as a seek - (Operation::Seek(res), buf) - })); - - let (op, buf) = match inner.state { - State::Idle(_) => unreachable!(), - State::Busy(ref mut rx) => rx.await?, - }; - - inner.state = State::Idle(Some(buf)); - - match op { - Operation::Seek(res) => res.map(|pos| { - inner.pos = pos; - }), - _ => unreachable!(), - } - } - - /// Queries metadata about the underlying file. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let file = File::open("foo.txt").await?; - /// let metadata = file.metadata().await?; - /// - /// println!("{:?}", metadata); - /// # Ok(()) - /// # } - /// ``` - pub async fn metadata(&self) -> io::Result { - let std = self.std.clone(); - asyncify(move || std.metadata()).await - } - - /// Creates a new `File` instance that shares the same underlying file handle - /// as the existing `File` instance. Reads, writes, and seeks will affect both - /// File instances simultaneously. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let file = File::open("foo.txt").await?; - /// let file_clone = file.try_clone().await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn try_clone(&self) -> io::Result { - self.inner.lock().await.complete_inflight().await; - let std = self.std.clone(); - let std_file = asyncify(move || std.try_clone()).await?; - Ok(File::from_std(std_file)) - } - - /// Destructures `File` into a [`std::fs::File`][std]. This function is - /// async to allow any in-flight operations to complete. - /// - /// Use `File::try_into_std` to attempt conversion immediately. - /// - /// [std]: std::fs::File - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let tokio_file = File::open("foo.txt").await?; - /// let std_file = tokio_file.into_std().await; - /// # Ok(()) - /// # } - /// ``` - pub async fn into_std(mut self) -> StdFile { - self.inner.get_mut().complete_inflight().await; - Arc::try_unwrap(self.std).expect("Arc::try_unwrap failed") - } - - /// Tries to immediately destructure `File` into a [`std::fs::File`][std]. - /// - /// [std]: std::fs::File - /// - /// # Errors - /// - /// This function will return an error containing the file if some - /// operation is in-flight. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let tokio_file = File::open("foo.txt").await?; - /// let std_file = tokio_file.try_into_std().unwrap(); - /// # Ok(()) - /// # } - /// ``` - pub fn try_into_std(mut self) -> Result { - match Arc::try_unwrap(self.std) { - Ok(file) => Ok(file), - Err(std_file_arc) => { - self.std = std_file_arc; - Err(self) - } - } - } - - /// Changes the permissions on the underlying file. - /// - /// # Platform-specific behavior - /// - /// This function currently corresponds to the `fchmod` function on Unix and - /// the `SetFileInformationByHandle` function on Windows. Note that, this - /// [may change in the future][changes]. - /// - /// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior - /// - /// # Errors - /// - /// This function will return an error if the user lacks permission change - /// attributes on the underlying file. It may also return an error in other - /// os-specific unspecified cases. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let file = File::open("foo.txt").await?; - /// let mut perms = file.metadata().await?.permissions(); - /// perms.set_readonly(true); - /// file.set_permissions(perms).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn set_permissions(&self, perm: Permissions) -> io::Result<()> { - let std = self.std.clone(); - asyncify(move || std.set_permissions(perm)).await - } -} - -impl AsyncRead for File { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - dst: &mut ReadBuf<'_>, - ) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - let me = self.get_mut(); - let inner = me.inner.get_mut(); - - loop { - match inner.state { - State::Idle(ref mut buf_cell) => { - let mut buf = buf_cell.take().unwrap(); - - if !buf.is_empty() { - buf.copy_to(dst); - *buf_cell = Some(buf); - return Poll::Ready(Ok(())); - } - - buf.ensure_capacity_for(dst); - let std = me.std.clone(); - - inner.state = State::Busy(spawn_blocking(move || { - let res = buf.read_from(&mut &*std); - (Operation::Read(res), buf) - })); - } - State::Busy(ref mut rx) => { - let (op, mut buf) = ready!(Pin::new(rx).poll(cx))?; - - match op { - Operation::Read(Ok(_)) => { - buf.copy_to(dst); - inner.state = State::Idle(Some(buf)); - return Poll::Ready(Ok(())); - } - Operation::Read(Err(e)) => { - assert!(buf.is_empty()); - - inner.state = State::Idle(Some(buf)); - return Poll::Ready(Err(e)); - } - Operation::Write(Ok(_)) => { - assert!(buf.is_empty()); - inner.state = State::Idle(Some(buf)); - continue; - } - Operation::Write(Err(e)) => { - assert!(inner.last_write_err.is_none()); - inner.last_write_err = Some(e.kind()); - inner.state = State::Idle(Some(buf)); - } - Operation::Seek(result) => { - assert!(buf.is_empty()); - inner.state = State::Idle(Some(buf)); - if let Ok(pos) = result { - inner.pos = pos; - } - continue; - } - } - } - } - } - } -} - -impl AsyncSeek for File { - fn start_seek(self: Pin<&mut Self>, mut pos: SeekFrom) -> io::Result<()> { - let me = self.get_mut(); - let inner = me.inner.get_mut(); - - match inner.state { - State::Busy(_) => Err(io::Error::new( - io::ErrorKind::Other, - "other file operation is pending, call poll_complete before start_seek", - )), - State::Idle(ref mut buf_cell) => { - let mut buf = buf_cell.take().unwrap(); - - // Factor in any unread data from the buf - if !buf.is_empty() { - let n = buf.discard_read(); - - if let SeekFrom::Current(ref mut offset) = pos { - *offset += n; - } - } - - let std = me.std.clone(); - - inner.state = State::Busy(spawn_blocking(move || { - let res = (&*std).seek(pos); - (Operation::Seek(res), buf) - })); - Ok(()) - } - } - } - - fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - let inner = self.inner.get_mut(); - - loop { - match inner.state { - State::Idle(_) => return Poll::Ready(Ok(inner.pos)), - State::Busy(ref mut rx) => { - let (op, buf) = ready!(Pin::new(rx).poll(cx))?; - inner.state = State::Idle(Some(buf)); - - match op { - Operation::Read(_) => {} - Operation::Write(Err(e)) => { - assert!(inner.last_write_err.is_none()); - inner.last_write_err = Some(e.kind()); - } - Operation::Write(_) => {} - Operation::Seek(res) => { - if let Ok(pos) = res { - inner.pos = pos; - } - return Poll::Ready(res); - } - } - } - } - } - } -} - -impl AsyncWrite for File { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - src: &[u8], - ) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - let me = self.get_mut(); - let inner = me.inner.get_mut(); - - if let Some(e) = inner.last_write_err.take() { - return Poll::Ready(Err(e.into())); - } - - loop { - match inner.state { - State::Idle(ref mut buf_cell) => { - let mut buf = buf_cell.take().unwrap(); - - let seek = if !buf.is_empty() { - Some(SeekFrom::Current(buf.discard_read())) - } else { - None - }; - - let n = buf.copy_from(src); - let std = me.std.clone(); - - let blocking_task_join_handle = spawn_mandatory_blocking(move || { - let res = if let Some(seek) = seek { - (&*std).seek(seek).and_then(|_| buf.write_to(&mut &*std)) - } else { - buf.write_to(&mut &*std) - }; - - (Operation::Write(res), buf) - }) - .ok_or_else(|| { - io::Error::new(io::ErrorKind::Other, "background task failed") - })?; - - inner.state = State::Busy(blocking_task_join_handle); - - return Poll::Ready(Ok(n)); - } - State::Busy(ref mut rx) => { - let (op, buf) = ready!(Pin::new(rx).poll(cx))?; - inner.state = State::Idle(Some(buf)); - - match op { - Operation::Read(_) => { - // We don't care about the result here. The fact - // that the cursor has advanced will be reflected in - // the next iteration of the loop - continue; - } - Operation::Write(res) => { - // If the previous write was successful, continue. - // Otherwise, error. - res?; - continue; - } - Operation::Seek(_) => { - // Ignore the seek - continue; - } - } - } - } - } - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - let me = self.get_mut(); - let inner = me.inner.get_mut(); - - if let Some(e) = inner.last_write_err.take() { - return Poll::Ready(Err(e.into())); - } - - loop { - match inner.state { - State::Idle(ref mut buf_cell) => { - let mut buf = buf_cell.take().unwrap(); - - let seek = if !buf.is_empty() { - Some(SeekFrom::Current(buf.discard_read())) - } else { - None - }; - - let n = buf.copy_from_bufs(bufs); - let std = me.std.clone(); - - let blocking_task_join_handle = spawn_mandatory_blocking(move || { - let res = if let Some(seek) = seek { - (&*std).seek(seek).and_then(|_| buf.write_to(&mut &*std)) - } else { - buf.write_to(&mut &*std) - }; - - (Operation::Write(res), buf) - }) - .ok_or_else(|| { - io::Error::new(io::ErrorKind::Other, "background task failed") - })?; - - inner.state = State::Busy(blocking_task_join_handle); - - return Poll::Ready(Ok(n)); - } - State::Busy(ref mut rx) => { - let (op, buf) = ready!(Pin::new(rx).poll(cx))?; - inner.state = State::Idle(Some(buf)); - - match op { - Operation::Read(_) => { - // We don't care about the result here. The fact - // that the cursor has advanced will be reflected in - // the next iteration of the loop - continue; - } - Operation::Write(res) => { - // If the previous write was successful, continue. - // Otherwise, error. - res?; - continue; - } - Operation::Seek(_) => { - // Ignore the seek - continue; - } - } - } - } - } - } - - fn is_write_vectored(&self) -> bool { - true - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - let inner = self.inner.get_mut(); - inner.poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - self.poll_flush(cx) - } -} - -impl From for File { - fn from(std: StdFile) -> Self { - Self::from_std(std) - } -} - -impl fmt::Debug for File { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("tokio::fs::File") - .field("std", &self.std) - .finish() - } -} - -#[cfg(unix)] -impl std::os::unix::io::AsRawFd for File { - fn as_raw_fd(&self) -> std::os::unix::io::RawFd { - self.std.as_raw_fd() - } -} - -#[cfg(unix)] -impl std::os::unix::io::AsFd for File { - fn as_fd(&self) -> std::os::unix::io::BorrowedFd<'_> { - unsafe { - std::os::unix::io::BorrowedFd::borrow_raw(std::os::unix::io::AsRawFd::as_raw_fd(self)) - } - } -} - -#[cfg(unix)] -impl std::os::unix::io::FromRawFd for File { - unsafe fn from_raw_fd(fd: std::os::unix::io::RawFd) -> Self { - StdFile::from_raw_fd(fd).into() - } -} - -cfg_windows! { - use crate::os::windows::io::{AsRawHandle, FromRawHandle, RawHandle, AsHandle, BorrowedHandle}; - - impl AsRawHandle for File { - fn as_raw_handle(&self) -> RawHandle { - self.std.as_raw_handle() - } - } - - impl AsHandle for File { - fn as_handle(&self) -> BorrowedHandle<'_> { - unsafe { - BorrowedHandle::borrow_raw( - AsRawHandle::as_raw_handle(self), - ) - } - } - } - - impl FromRawHandle for File { - unsafe fn from_raw_handle(handle: RawHandle) -> Self { - StdFile::from_raw_handle(handle).into() - } - } -} - -impl Inner { - async fn complete_inflight(&mut self) { - use crate::future::poll_fn; - - poll_fn(|cx| self.poll_complete_inflight(cx)).await - } - - fn poll_complete_inflight(&mut self, cx: &mut Context<'_>) -> Poll<()> { - ready!(crate::trace::trace_leaf(cx)); - match self.poll_flush(cx) { - Poll::Ready(Err(e)) => { - self.last_write_err = Some(e.kind()); - Poll::Ready(()) - } - Poll::Ready(Ok(())) => Poll::Ready(()), - Poll::Pending => Poll::Pending, - } - } - - fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { - if let Some(e) = self.last_write_err.take() { - return Poll::Ready(Err(e.into())); - } - - let (op, buf) = match self.state { - State::Idle(_) => return Poll::Ready(Ok(())), - State::Busy(ref mut rx) => ready!(Pin::new(rx).poll(cx))?, - }; - - // The buffer is not used here - self.state = State::Idle(Some(buf)); - - match op { - Operation::Read(_) => Poll::Ready(Ok(())), - Operation::Write(res) => Poll::Ready(res), - Operation::Seek(_) => Poll::Ready(Ok(())), - } - } -} - -#[cfg(test)] -mod tests; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/hard_link.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/hard_link.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/hard_link.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/hard_link.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,46 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Creates a new hard link on the filesystem. -/// -/// This is an async version of [`std::fs::hard_link`][std] -/// -/// [std]: std::fs::hard_link -/// -/// The `dst` path will be a link pointing to the `src` path. Note that systems -/// often require these two paths to both be located on the same filesystem. -/// -/// # Platform-specific behavior -/// -/// This function currently corresponds to the `link` function on Unix -/// and the `CreateHardLink` function on Windows. -/// Note that, this [may change in the future][changes]. -/// -/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior -/// -/// # Errors -/// -/// This function will return an error in the following situations, but is not -/// limited to just these cases: -/// -/// * The `src` path is not a file or doesn't exist. -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// -/// #[tokio::main] -/// async fn main() -> std::io::Result<()> { -/// fs::hard_link("a.txt", "b.txt").await?; // Hard link a.txt to b.txt -/// Ok(()) -/// } -/// ``` -pub async fn hard_link(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { - let src = src.as_ref().to_owned(); - let dst = dst.as_ref().to_owned(); - - asyncify(move || std::fs::hard_link(src, dst)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/metadata.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/metadata.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/metadata.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/metadata.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,47 +0,0 @@ -use crate::fs::asyncify; - -use std::fs::Metadata; -use std::io; -use std::path::Path; - -/// Given a path, queries the file system to get information about a file, -/// directory, etc. -/// -/// This is an async version of [`std::fs::metadata`][std] -/// -/// This function will traverse symbolic links to query information about the -/// destination file. -/// -/// # Platform-specific behavior -/// -/// This function currently corresponds to the `stat` function on Unix and the -/// `GetFileAttributesEx` function on Windows. Note that, this [may change in -/// the future][changes]. -/// -/// [std]: std::fs::metadata -/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior -/// -/// # Errors -/// -/// This function will return an error in the following situations, but is not -/// limited to just these cases: -/// -/// * The user lacks permissions to perform `metadata` call on `path`. -/// * `path` does not exist. -/// -/// # Examples -/// -/// ```rust,no_run -/// use tokio::fs; -/// -/// #[tokio::main] -/// async fn main() -> std::io::Result<()> { -/// let attr = fs::metadata("/some/file/path.txt").await?; -/// // inspect attr ... -/// Ok(()) -/// } -/// ``` -pub async fn metadata(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - asyncify(|| std::fs::metadata(path)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/mocks.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/mocks.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/mocks.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/mocks.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,151 +0,0 @@ -//! Mock version of std::fs::File; -use mockall::mock; - -use crate::sync::oneshot; -use std::{ - cell::RefCell, - collections::VecDeque, - fs::{Metadata, Permissions}, - future::Future, - io::{self, Read, Seek, SeekFrom, Write}, - path::PathBuf, - pin::Pin, - task::{Context, Poll}, -}; - -mock! { - #[derive(Debug)] - pub File { - pub fn create(pb: PathBuf) -> io::Result; - // These inner_ methods exist because std::fs::File has two - // implementations for each of these methods: one on "&mut self" and - // one on "&&self". Defining both of those in terms of an inner_ method - // allows us to specify the expectation the same way, regardless of - // which method is used. - pub fn inner_flush(&self) -> io::Result<()>; - pub fn inner_read(&self, dst: &mut [u8]) -> io::Result; - pub fn inner_seek(&self, pos: SeekFrom) -> io::Result; - pub fn inner_write(&self, src: &[u8]) -> io::Result; - pub fn metadata(&self) -> io::Result; - pub fn open(pb: PathBuf) -> io::Result; - pub fn set_len(&self, size: u64) -> io::Result<()>; - pub fn set_permissions(&self, _perm: Permissions) -> io::Result<()>; - pub fn sync_all(&self) -> io::Result<()>; - pub fn sync_data(&self) -> io::Result<()>; - pub fn try_clone(&self) -> io::Result; - } - #[cfg(windows)] - impl std::os::windows::io::AsRawHandle for File { - fn as_raw_handle(&self) -> std::os::windows::io::RawHandle; - } - #[cfg(windows)] - impl std::os::windows::io::FromRawHandle for File { - unsafe fn from_raw_handle(h: std::os::windows::io::RawHandle) -> Self; - } - #[cfg(unix)] - impl std::os::unix::io::AsRawFd for File { - fn as_raw_fd(&self) -> std::os::unix::io::RawFd; - } - - #[cfg(unix)] - impl std::os::unix::io::FromRawFd for File { - unsafe fn from_raw_fd(h: std::os::unix::io::RawFd) -> Self; - } -} - -impl Read for MockFile { - fn read(&mut self, dst: &mut [u8]) -> io::Result { - self.inner_read(dst) - } -} - -impl Read for &'_ MockFile { - fn read(&mut self, dst: &mut [u8]) -> io::Result { - self.inner_read(dst) - } -} - -impl Seek for &'_ MockFile { - fn seek(&mut self, pos: SeekFrom) -> io::Result { - self.inner_seek(pos) - } -} - -impl Write for &'_ MockFile { - fn write(&mut self, src: &[u8]) -> io::Result { - self.inner_write(src) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner_flush() - } -} - -tokio_thread_local! { - static QUEUE: RefCell>> = RefCell::new(VecDeque::new()) -} - -#[derive(Debug)] -pub(super) struct JoinHandle { - rx: oneshot::Receiver, -} - -pub(super) fn spawn_blocking(f: F) -> JoinHandle -where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, -{ - let (tx, rx) = oneshot::channel(); - let task = Box::new(move || { - let _ = tx.send(f()); - }); - - QUEUE.with(|cell| cell.borrow_mut().push_back(task)); - - JoinHandle { rx } -} - -pub(super) fn spawn_mandatory_blocking(f: F) -> Option> -where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, -{ - let (tx, rx) = oneshot::channel(); - let task = Box::new(move || { - let _ = tx.send(f()); - }); - - QUEUE.with(|cell| cell.borrow_mut().push_back(task)); - - Some(JoinHandle { rx }) -} - -impl Future for JoinHandle { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - use std::task::Poll; - - match Pin::new(&mut self.rx).poll(cx) { - Poll::Ready(Ok(v)) => Poll::Ready(Ok(v)), - Poll::Ready(Err(e)) => panic!("error = {:?}", e), - Poll::Pending => Poll::Pending, - } - } -} - -pub(super) mod pool { - use super::*; - - pub(in super::super) fn len() -> usize { - QUEUE.with(|cell| cell.borrow().len()) - } - - pub(in super::super) fn run_one() { - let task = QUEUE - .with(|cell| cell.borrow_mut().pop_front()) - .expect("expected task to run, but none ready"); - - task(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,145 +0,0 @@ -#![cfg(not(loom))] - -//! Asynchronous file and standard stream adaptation. -//! -//! This module contains utility methods and adapter types for input/output to -//! files or standard streams (`Stdin`, `Stdout`, `Stderr`), and -//! filesystem manipulation, for use within (and only within) a Tokio runtime. -//! -//! Tasks run by *worker* threads should not block, as this could delay -//! servicing reactor events. Portable filesystem operations are blocking, -//! however. This module offers adapters which use a `blocking` annotation -//! to inform the runtime that a blocking operation is required. When -//! necessary, this allows the runtime to convert the current thread from a -//! *worker* to a *backup* thread, where blocking is acceptable. -//! -//! ## Usage -//! -//! Where possible, users should prefer the provided asynchronous-specific -//! traits such as [`AsyncRead`], or methods returning a `Future` or `Poll` -//! type. Adaptions also extend to traits like `std::io::Read` where methods -//! return `std::io::Result`. Be warned that these adapted methods may return -//! `std::io::ErrorKind::WouldBlock` if a *worker* thread can not be converted -//! to a *backup* thread immediately. -//! -//! **Warning**: These adapters may create a large number of temporary tasks, -//! especially when reading large files. When performing a lot of operations -//! in one batch, it may be significantly faster to use [`spawn_blocking`] -//! directly: -//! -//! ``` -//! use tokio::fs::File; -//! use std::io::{BufReader, BufRead}; -//! async fn count_lines(file: File) -> Result { -//! let file = file.into_std().await; -//! tokio::task::spawn_blocking(move || { -//! let line_count = BufReader::new(file).lines().count(); -//! Ok(line_count) -//! }).await? -//! } -//! ``` -//! -//! [`spawn_blocking`]: fn@crate::task::spawn_blocking -//! [`AsyncRead`]: trait@crate::io::AsyncRead - -mod canonicalize; -pub use self::canonicalize::canonicalize; - -mod create_dir; -pub use self::create_dir::create_dir; - -mod create_dir_all; -pub use self::create_dir_all::create_dir_all; - -mod dir_builder; -pub use self::dir_builder::DirBuilder; - -mod file; -pub use self::file::File; - -mod hard_link; -pub use self::hard_link::hard_link; - -mod metadata; -pub use self::metadata::metadata; - -mod open_options; -pub use self::open_options::OpenOptions; - -mod read; -pub use self::read::read; - -mod read_dir; -pub use self::read_dir::{read_dir, DirEntry, ReadDir}; - -mod read_link; -pub use self::read_link::read_link; - -mod read_to_string; -pub use self::read_to_string::read_to_string; - -mod remove_dir; -pub use self::remove_dir::remove_dir; - -mod remove_dir_all; -pub use self::remove_dir_all::remove_dir_all; - -mod remove_file; -pub use self::remove_file::remove_file; - -mod rename; -pub use self::rename::rename; - -mod set_permissions; -pub use self::set_permissions::set_permissions; - -mod symlink_metadata; -pub use self::symlink_metadata::symlink_metadata; - -mod write; -pub use self::write::write; - -mod copy; -pub use self::copy::copy; - -mod try_exists; -pub use self::try_exists::try_exists; - -#[cfg(test)] -mod mocks; - -feature! { - #![unix] - - mod symlink; - pub use self::symlink::symlink; -} - -cfg_windows! { - mod symlink_dir; - pub use self::symlink_dir::symlink_dir; - - mod symlink_file; - pub use self::symlink_file::symlink_file; -} - -use std::io; - -#[cfg(not(test))] -use crate::blocking::spawn_blocking; -#[cfg(test)] -use mocks::spawn_blocking; - -pub(crate) async fn asyncify(f: F) -> io::Result -where - F: FnOnce() -> io::Result + Send + 'static, - T: Send + 'static, -{ - match spawn_blocking(f).await { - Ok(res) => res, - Err(_) => Err(io::Error::new( - io::ErrorKind::Other, - "background task failed", - )), - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/open_options/mock_open_options.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/open_options/mock_open_options.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/open_options/mock_open_options.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/open_options/mock_open_options.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ -#![allow(unreachable_pub)] -//! Mock version of std::fs::OpenOptions; -use mockall::mock; - -use crate::fs::mocks::MockFile; -#[cfg(unix)] -use std::os::unix::fs::OpenOptionsExt; -#[cfg(windows)] -use std::os::windows::fs::OpenOptionsExt; -use std::{io, path::Path}; - -mock! { - #[derive(Debug)] - pub OpenOptions { - pub fn append(&mut self, append: bool) -> &mut Self; - pub fn create(&mut self, create: bool) -> &mut Self; - pub fn create_new(&mut self, create_new: bool) -> &mut Self; - pub fn open + 'static>(&self, path: P) -> io::Result; - pub fn read(&mut self, read: bool) -> &mut Self; - pub fn truncate(&mut self, truncate: bool) -> &mut Self; - pub fn write(&mut self, write: bool) -> &mut Self; - } - impl Clone for OpenOptions { - fn clone(&self) -> Self; - } - #[cfg(unix)] - impl OpenOptionsExt for OpenOptions { - fn custom_flags(&mut self, flags: i32) -> &mut Self; - fn mode(&mut self, mode: u32) -> &mut Self; - } - #[cfg(windows)] - impl OpenOptionsExt for OpenOptions { - fn access_mode(&mut self, access: u32) -> &mut Self; - fn share_mode(&mut self, val: u32) -> &mut Self; - fn custom_flags(&mut self, flags: u32) -> &mut Self; - fn attributes(&mut self, val: u32) -> &mut Self; - fn security_qos_flags(&mut self, flags: u32) -> &mut Self; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/open_options.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/open_options.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/open_options.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/open_options.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,664 +0,0 @@ -use crate::fs::{asyncify, File}; - -use std::io; -use std::path::Path; - -#[cfg(test)] -mod mock_open_options; -#[cfg(test)] -use mock_open_options::MockOpenOptions as StdOpenOptions; -#[cfg(not(test))] -use std::fs::OpenOptions as StdOpenOptions; - -#[cfg(unix)] -use std::os::unix::fs::OpenOptionsExt; -#[cfg(windows)] -use std::os::windows::fs::OpenOptionsExt; - -/// Options and flags which can be used to configure how a file is opened. -/// -/// This builder exposes the ability to configure how a [`File`] is opened and -/// what operations are permitted on the open file. The [`File::open`] and -/// [`File::create`] methods are aliases for commonly used options using this -/// builder. -/// -/// Generally speaking, when using `OpenOptions`, you'll first call [`new`], -/// then chain calls to methods to set each option, then call [`open`], passing -/// the path of the file you're trying to open. This will give you a -/// [`io::Result`][result] with a [`File`] inside that you can further operate -/// on. -/// -/// This is a specialized version of [`std::fs::OpenOptions`] for usage from -/// the Tokio runtime. -/// -/// `From` is implemented for more advanced configuration -/// than the methods provided here. -/// -/// [`new`]: OpenOptions::new -/// [`open`]: OpenOptions::open -/// [result]: std::io::Result -/// [`File`]: File -/// [`File::open`]: File::open -/// [`File::create`]: File::create -/// [`std::fs::OpenOptions`]: std::fs::OpenOptions -/// -/// # Examples -/// -/// Opening a file to read: -/// -/// ```no_run -/// use tokio::fs::OpenOptions; -/// use std::io; -/// -/// #[tokio::main] -/// async fn main() -> io::Result<()> { -/// let file = OpenOptions::new() -/// .read(true) -/// .open("foo.txt") -/// .await?; -/// -/// Ok(()) -/// } -/// ``` -/// -/// Opening a file for both reading and writing, as well as creating it if it -/// doesn't exist: -/// -/// ```no_run -/// use tokio::fs::OpenOptions; -/// use std::io; -/// -/// #[tokio::main] -/// async fn main() -> io::Result<()> { -/// let file = OpenOptions::new() -/// .read(true) -/// .write(true) -/// .create(true) -/// .open("foo.txt") -/// .await?; -/// -/// Ok(()) -/// } -/// ``` -#[derive(Clone, Debug)] -pub struct OpenOptions(StdOpenOptions); - -impl OpenOptions { - /// Creates a blank new set of options ready for configuration. - /// - /// All options are initially set to `false`. - /// - /// This is an async version of [`std::fs::OpenOptions::new`][std] - /// - /// [std]: std::fs::OpenOptions::new - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// - /// let mut options = OpenOptions::new(); - /// let future = options.read(true).open("foo.txt"); - /// ``` - pub fn new() -> OpenOptions { - OpenOptions(StdOpenOptions::new()) - } - - /// Sets the option for read access. - /// - /// This option, when true, will indicate that the file should be - /// `read`-able if opened. - /// - /// This is an async version of [`std::fs::OpenOptions::read`][std] - /// - /// [std]: std::fs::OpenOptions::read - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .read(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn read(&mut self, read: bool) -> &mut OpenOptions { - self.0.read(read); - self - } - - /// Sets the option for write access. - /// - /// This option, when true, will indicate that the file should be - /// `write`-able if opened. - /// - /// This is an async version of [`std::fs::OpenOptions::write`][std] - /// - /// [std]: std::fs::OpenOptions::write - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .write(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn write(&mut self, write: bool) -> &mut OpenOptions { - self.0.write(write); - self - } - - /// Sets the option for the append mode. - /// - /// This option, when true, means that writes will append to a file instead - /// of overwriting previous contents. Note that setting - /// `.write(true).append(true)` has the same effect as setting only - /// `.append(true)`. - /// - /// For most filesystems, the operating system guarantees that all writes are - /// atomic: no writes get mangled because another process writes at the same - /// time. - /// - /// One maybe obvious note when using append-mode: make sure that all data - /// that belongs together is written to the file in one operation. This - /// can be done by concatenating strings before passing them to [`write()`], - /// or using a buffered writer (with a buffer of adequate size), - /// and calling [`flush()`] when the message is complete. - /// - /// If a file is opened with both read and append access, beware that after - /// opening, and after every write, the position for reading may be set at the - /// end of the file. So, before writing, save the current position (using - /// [`seek`]`(`[`SeekFrom`]`::`[`Current`]`(0))`), and restore it before the next read. - /// - /// This is an async version of [`std::fs::OpenOptions::append`][std] - /// - /// [std]: std::fs::OpenOptions::append - /// - /// ## Note - /// - /// This function doesn't create the file if it doesn't exist. Use the [`create`] - /// method to do so. - /// - /// [`write()`]: crate::io::AsyncWriteExt::write - /// [`flush()`]: crate::io::AsyncWriteExt::flush - /// [`seek`]: crate::io::AsyncSeekExt::seek - /// [`SeekFrom`]: std::io::SeekFrom - /// [`Current`]: std::io::SeekFrom::Current - /// [`create`]: OpenOptions::create - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .append(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn append(&mut self, append: bool) -> &mut OpenOptions { - self.0.append(append); - self - } - - /// Sets the option for truncating a previous file. - /// - /// If a file is successfully opened with this option set it will truncate - /// the file to 0 length if it already exists. - /// - /// The file must be opened with write access for truncate to work. - /// - /// This is an async version of [`std::fs::OpenOptions::truncate`][std] - /// - /// [std]: std::fs::OpenOptions::truncate - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .write(true) - /// .truncate(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions { - self.0.truncate(truncate); - self - } - - /// Sets the option for creating a new file. - /// - /// This option indicates whether a new file will be created if the file - /// does not yet already exist. - /// - /// In order for the file to be created, [`write`] or [`append`] access must - /// be used. - /// - /// This is an async version of [`std::fs::OpenOptions::create`][std] - /// - /// [std]: std::fs::OpenOptions::create - /// [`write`]: OpenOptions::write - /// [`append`]: OpenOptions::append - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .write(true) - /// .create(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn create(&mut self, create: bool) -> &mut OpenOptions { - self.0.create(create); - self - } - - /// Sets the option to always create a new file. - /// - /// This option indicates whether a new file will be created. No file is - /// allowed to exist at the target location, also no (dangling) symlink. - /// - /// This option is useful because it is atomic. Otherwise between checking - /// whether a file exists and creating a new one, the file may have been - /// created by another process (a TOCTOU race condition / attack). - /// - /// If `.create_new(true)` is set, [`.create()`] and [`.truncate()`] are - /// ignored. - /// - /// The file must be opened with write or append access in order to create a - /// new file. - /// - /// This is an async version of [`std::fs::OpenOptions::create_new`][std] - /// - /// [std]: std::fs::OpenOptions::create_new - /// [`.create()`]: OpenOptions::create - /// [`.truncate()`]: OpenOptions::truncate - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new() - /// .write(true) - /// .create_new(true) - /// .open("foo.txt") - /// .await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn create_new(&mut self, create_new: bool) -> &mut OpenOptions { - self.0.create_new(create_new); - self - } - - /// Opens a file at `path` with the options specified by `self`. - /// - /// This is an async version of [`std::fs::OpenOptions::open`][std] - /// - /// [std]: std::fs::OpenOptions::open - /// - /// # Errors - /// - /// This function will return an error under a number of different - /// circumstances. Some of these error conditions are listed here, together - /// with their [`ErrorKind`]. The mapping to [`ErrorKind`]s is not part of - /// the compatibility contract of the function, especially the `Other` kind - /// might change to more specific kinds in the future. - /// - /// * [`NotFound`]: The specified file does not exist and neither `create` - /// or `create_new` is set. - /// * [`NotFound`]: One of the directory components of the file path does - /// not exist. - /// * [`PermissionDenied`]: The user lacks permission to get the specified - /// access rights for the file. - /// * [`PermissionDenied`]: The user lacks permission to open one of the - /// directory components of the specified path. - /// * [`AlreadyExists`]: `create_new` was specified and the file already - /// exists. - /// * [`InvalidInput`]: Invalid combinations of open options (truncate - /// without write access, no access mode set, etc.). - /// * [`Other`]: One of the directory components of the specified file path - /// was not, in fact, a directory. - /// * [`Other`]: Filesystem-level errors: full disk, write permission - /// requested on a read-only file system, exceeded disk quota, too many - /// open files, too long filename, too many symbolic links in the - /// specified path (Unix-like systems only), etc. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let file = OpenOptions::new().open("foo.txt").await?; - /// Ok(()) - /// } - /// ``` - /// - /// [`ErrorKind`]: std::io::ErrorKind - /// [`AlreadyExists`]: std::io::ErrorKind::AlreadyExists - /// [`InvalidInput`]: std::io::ErrorKind::InvalidInput - /// [`NotFound`]: std::io::ErrorKind::NotFound - /// [`Other`]: std::io::ErrorKind::Other - /// [`PermissionDenied`]: std::io::ErrorKind::PermissionDenied - pub async fn open(&self, path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - let opts = self.0.clone(); - - let std = asyncify(move || opts.open(path)).await?; - Ok(File::from_std(std)) - } - - /// Returns a mutable reference to the underlying `std::fs::OpenOptions` - pub(super) fn as_inner_mut(&mut self) -> &mut StdOpenOptions { - &mut self.0 - } -} - -feature! { - #![unix] - - impl OpenOptions { - /// Sets the mode bits that a new file will be created with. - /// - /// If a new file is created as part of an `OpenOptions::open` call then this - /// specified `mode` will be used as the permission bits for the new file. - /// If no `mode` is set, the default of `0o666` will be used. - /// The operating system masks out bits with the system's `umask`, to produce - /// the final permissions. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut options = OpenOptions::new(); - /// options.mode(0o644); // Give read/write for owner and read for others. - /// let file = options.open("foo.txt").await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn mode(&mut self, mode: u32) -> &mut OpenOptions { - self.as_inner_mut().mode(mode); - self - } - - /// Passes custom flags to the `flags` argument of `open`. - /// - /// The bits that define the access mode are masked out with `O_ACCMODE`, to - /// ensure they do not interfere with the access mode set by Rusts options. - /// - /// Custom flags can only set flags, not remove flags set by Rusts options. - /// This options overwrites any previously set custom flags. - /// - /// # Examples - /// - /// ```no_run - /// use libc; - /// use tokio::fs::OpenOptions; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut options = OpenOptions::new(); - /// options.write(true); - /// if cfg!(unix) { - /// options.custom_flags(libc::O_NOFOLLOW); - /// } - /// let file = options.open("foo.txt").await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn custom_flags(&mut self, flags: i32) -> &mut OpenOptions { - self.as_inner_mut().custom_flags(flags); - self - } - } -} - -cfg_windows! { - impl OpenOptions { - /// Overrides the `dwDesiredAccess` argument to the call to [`CreateFile`] - /// with the specified value. - /// - /// This will override the `read`, `write`, and `append` flags on the - /// `OpenOptions` structure. This method provides fine-grained control over - /// the permissions to read, write and append data, attributes (like hidden - /// and system), and extended attributes. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// - /// # #[tokio::main] - /// # async fn main() -> std::io::Result<()> { - /// // Open without read and write permission, for example if you only need - /// // to call `stat` on the file - /// let file = OpenOptions::new().access_mode(0).open("foo.txt").await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea - pub fn access_mode(&mut self, access: u32) -> &mut OpenOptions { - self.as_inner_mut().access_mode(access); - self - } - - /// Overrides the `dwShareMode` argument to the call to [`CreateFile`] with - /// the specified value. - /// - /// By default `share_mode` is set to - /// `FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE`. This allows - /// other processes to read, write, and delete/rename the same file - /// while it is open. Removing any of the flags will prevent other - /// processes from performing the corresponding operation until the file - /// handle is closed. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::OpenOptions; - /// - /// # #[tokio::main] - /// # async fn main() -> std::io::Result<()> { - /// // Do not allow others to read or modify this file while we have it open - /// // for writing. - /// let file = OpenOptions::new() - /// .write(true) - /// .share_mode(0) - /// .open("foo.txt").await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea - pub fn share_mode(&mut self, share: u32) -> &mut OpenOptions { - self.as_inner_mut().share_mode(share); - self - } - - /// Sets extra flags for the `dwFileFlags` argument to the call to - /// [`CreateFile2`] to the specified value (or combines it with - /// `attributes` and `security_qos_flags` to set the `dwFlagsAndAttributes` - /// for [`CreateFile`]). - /// - /// Custom flags can only set flags, not remove flags set by Rust's options. - /// This option overwrites any previously set custom flags. - /// - /// # Examples - /// - /// ```no_run - /// use windows_sys::Win32::Storage::FileSystem::FILE_FLAG_DELETE_ON_CLOSE; - /// use tokio::fs::OpenOptions; - /// - /// # #[tokio::main] - /// # async fn main() -> std::io::Result<()> { - /// let file = OpenOptions::new() - /// .create(true) - /// .write(true) - /// .custom_flags(FILE_FLAG_DELETE_ON_CLOSE) - /// .open("foo.txt").await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea - /// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2 - pub fn custom_flags(&mut self, flags: u32) -> &mut OpenOptions { - self.as_inner_mut().custom_flags(flags); - self - } - - /// Sets the `dwFileAttributes` argument to the call to [`CreateFile2`] to - /// the specified value (or combines it with `custom_flags` and - /// `security_qos_flags` to set the `dwFlagsAndAttributes` for - /// [`CreateFile`]). - /// - /// If a _new_ file is created because it does not yet exist and - /// `.create(true)` or `.create_new(true)` are specified, the new file is - /// given the attributes declared with `.attributes()`. - /// - /// If an _existing_ file is opened with `.create(true).truncate(true)`, its - /// existing attributes are preserved and combined with the ones declared - /// with `.attributes()`. - /// - /// In all other cases the attributes get ignored. - /// - /// # Examples - /// - /// ```no_run - /// use windows_sys::Win32::Storage::FileSystem::FILE_ATTRIBUTE_HIDDEN; - /// use tokio::fs::OpenOptions; - /// - /// # #[tokio::main] - /// # async fn main() -> std::io::Result<()> { - /// let file = OpenOptions::new() - /// .write(true) - /// .create(true) - /// .attributes(FILE_ATTRIBUTE_HIDDEN) - /// .open("foo.txt").await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea - /// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2 - pub fn attributes(&mut self, attributes: u32) -> &mut OpenOptions { - self.as_inner_mut().attributes(attributes); - self - } - - /// Sets the `dwSecurityQosFlags` argument to the call to [`CreateFile2`] to - /// the specified value (or combines it with `custom_flags` and `attributes` - /// to set the `dwFlagsAndAttributes` for [`CreateFile`]). - /// - /// By default `security_qos_flags` is not set. It should be specified when - /// opening a named pipe, to control to which degree a server process can - /// act on behalf of a client process (security impersonation level). - /// - /// When `security_qos_flags` is not set, a malicious program can gain the - /// elevated privileges of a privileged Rust process when it allows opening - /// user-specified paths, by tricking it into opening a named pipe. So - /// arguably `security_qos_flags` should also be set when opening arbitrary - /// paths. However the bits can then conflict with other flags, specifically - /// `FILE_FLAG_OPEN_NO_RECALL`. - /// - /// For information about possible values, see [Impersonation Levels] on the - /// Windows Dev Center site. The `SECURITY_SQOS_PRESENT` flag is set - /// automatically when using this method. - /// - /// # Examples - /// - /// ```no_run - /// use windows_sys::Win32::Storage::FileSystem::SECURITY_IDENTIFICATION; - /// use tokio::fs::OpenOptions; - /// - /// # #[tokio::main] - /// # async fn main() -> std::io::Result<()> { - /// let file = OpenOptions::new() - /// .write(true) - /// .create(true) - /// - /// // Sets the flag value to `SecurityIdentification`. - /// .security_qos_flags(SECURITY_IDENTIFICATION) - /// - /// .open(r"\\.\pipe\MyPipe").await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea - /// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2 - /// [Impersonation Levels]: - /// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level - pub fn security_qos_flags(&mut self, flags: u32) -> &mut OpenOptions { - self.as_inner_mut().security_qos_flags(flags); - self - } - } -} - -impl From for OpenOptions { - fn from(options: StdOpenOptions) -> OpenOptions { - OpenOptions(options) - } -} - -impl Default for OpenOptions { - fn default() -> Self { - Self::new() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/read_dir.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/read_dir.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/read_dir.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/read_dir.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,355 +0,0 @@ -use crate::fs::asyncify; - -use std::collections::VecDeque; -use std::ffi::OsString; -use std::fs::{FileType, Metadata}; -use std::future::Future; -use std::io; -use std::path::{Path, PathBuf}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::Context; -use std::task::Poll; - -#[cfg(test)] -use super::mocks::spawn_blocking; -#[cfg(test)] -use super::mocks::JoinHandle; -#[cfg(not(test))] -use crate::blocking::spawn_blocking; -#[cfg(not(test))] -use crate::blocking::JoinHandle; - -const CHUNK_SIZE: usize = 32; - -/// Returns a stream over the entries within a directory. -/// -/// This is an async version of [`std::fs::read_dir`](std::fs::read_dir) -/// -/// This operation is implemented by running the equivalent blocking -/// operation on a separate thread pool using [`spawn_blocking`]. -/// -/// [`spawn_blocking`]: crate::task::spawn_blocking -pub async fn read_dir(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - asyncify(|| -> io::Result { - let mut std = std::fs::read_dir(path)?; - let mut buf = VecDeque::with_capacity(CHUNK_SIZE); - let remain = ReadDir::next_chunk(&mut buf, &mut std); - - Ok(ReadDir(State::Idle(Some((buf, std, remain))))) - }) - .await -} - -/// Reads the entries in a directory. -/// -/// This struct is returned from the [`read_dir`] function of this module and -/// will yield instances of [`DirEntry`]. Through a [`DirEntry`] information -/// like the entry's path and possibly other metadata can be learned. -/// -/// A `ReadDir` can be turned into a `Stream` with [`ReadDirStream`]. -/// -/// [`ReadDirStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.ReadDirStream.html -/// -/// # Errors -/// -/// This stream will return an [`Err`] if there's some sort of intermittent -/// IO error during iteration. -/// -/// [`read_dir`]: read_dir -/// [`DirEntry`]: DirEntry -/// [`Err`]: std::result::Result::Err -#[derive(Debug)] -#[must_use = "streams do nothing unless polled"] -pub struct ReadDir(State); - -#[derive(Debug)] -enum State { - Idle(Option<(VecDeque>, std::fs::ReadDir, bool)>), - Pending(JoinHandle<(VecDeque>, std::fs::ReadDir, bool)>), -} - -impl ReadDir { - /// Returns the next entry in the directory stream. - /// - /// # Cancel safety - /// - /// This method is cancellation safe. - pub async fn next_entry(&mut self) -> io::Result> { - use crate::future::poll_fn; - poll_fn(|cx| self.poll_next_entry(cx)).await - } - - /// Polls for the next directory entry in the stream. - /// - /// This method returns: - /// - /// * `Poll::Pending` if the next directory entry is not yet available. - /// * `Poll::Ready(Ok(Some(entry)))` if the next directory entry is available. - /// * `Poll::Ready(Ok(None))` if there are no more directory entries in this - /// stream. - /// * `Poll::Ready(Err(err))` if an IO error occurred while reading the next - /// directory entry. - /// - /// When the method returns `Poll::Pending`, the `Waker` in the provided - /// `Context` is scheduled to receive a wakeup when the next directory entry - /// becomes available on the underlying IO resource. - /// - /// Note that on multiple calls to `poll_next_entry`, only the `Waker` from - /// the `Context` passed to the most recent call is scheduled to receive a - /// wakeup. - pub fn poll_next_entry(&mut self, cx: &mut Context<'_>) -> Poll>> { - loop { - match self.0 { - State::Idle(ref mut data) => { - let (buf, _, ref remain) = data.as_mut().unwrap(); - - if let Some(ent) = buf.pop_front() { - return Poll::Ready(ent.map(Some)); - } else if !remain { - return Poll::Ready(Ok(None)); - } - - let (mut buf, mut std, _) = data.take().unwrap(); - - self.0 = State::Pending(spawn_blocking(move || { - let remain = ReadDir::next_chunk(&mut buf, &mut std); - (buf, std, remain) - })); - } - State::Pending(ref mut rx) => { - self.0 = State::Idle(Some(ready!(Pin::new(rx).poll(cx))?)); - } - } - } - } - - fn next_chunk(buf: &mut VecDeque>, std: &mut std::fs::ReadDir) -> bool { - for _ in 0..CHUNK_SIZE { - let ret = match std.next() { - Some(ret) => ret, - None => return false, - }; - - let success = ret.is_ok(); - - buf.push_back(ret.map(|std| DirEntry { - #[cfg(not(any( - target_os = "solaris", - target_os = "illumos", - target_os = "haiku", - target_os = "vxworks", - target_os = "nto", - target_os = "vita", - )))] - file_type: std.file_type().ok(), - std: Arc::new(std), - })); - - if !success { - break; - } - } - - true - } -} - -feature! { - #![unix] - - use std::os::unix::fs::DirEntryExt; - - impl DirEntry { - /// Returns the underlying `d_ino` field in the contained `dirent` - /// structure. - /// - /// # Examples - /// - /// ``` - /// use tokio::fs; - /// - /// # #[tokio::main] - /// # async fn main() -> std::io::Result<()> { - /// let mut entries = fs::read_dir(".").await?; - /// while let Some(entry) = entries.next_entry().await? { - /// // Here, `entry` is a `DirEntry`. - /// println!("{:?}: {}", entry.file_name(), entry.ino()); - /// } - /// # Ok(()) - /// # } - /// ``` - pub fn ino(&self) -> u64 { - self.as_inner().ino() - } - } -} - -/// Entries returned by the [`ReadDir`] stream. -/// -/// [`ReadDir`]: struct@ReadDir -/// -/// This is a specialized version of [`std::fs::DirEntry`] for usage from the -/// Tokio runtime. -/// -/// An instance of `DirEntry` represents an entry inside of a directory on the -/// filesystem. Each entry can be inspected via methods to learn about the full -/// path or possibly other metadata through per-platform extension traits. -#[derive(Debug)] -pub struct DirEntry { - #[cfg(not(any( - target_os = "solaris", - target_os = "illumos", - target_os = "haiku", - target_os = "vxworks", - target_os = "nto", - target_os = "vita", - )))] - file_type: Option, - std: Arc, -} - -impl DirEntry { - /// Returns the full path to the file that this entry represents. - /// - /// The full path is created by joining the original path to `read_dir` - /// with the filename of this entry. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut entries = fs::read_dir(".").await?; - /// - /// while let Some(entry) = entries.next_entry().await? { - /// println!("{:?}", entry.path()); - /// } - /// # Ok(()) - /// # } - /// ``` - /// - /// This prints output like: - /// - /// ```text - /// "./whatever.txt" - /// "./foo.html" - /// "./hello_world.rs" - /// ``` - /// - /// The exact text, of course, depends on what files you have in `.`. - pub fn path(&self) -> PathBuf { - self.std.path() - } - - /// Returns the bare file name of this directory entry without any other - /// leading path component. - /// - /// # Examples - /// - /// ``` - /// use tokio::fs; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut entries = fs::read_dir(".").await?; - /// - /// while let Some(entry) = entries.next_entry().await? { - /// println!("{:?}", entry.file_name()); - /// } - /// # Ok(()) - /// # } - /// ``` - pub fn file_name(&self) -> OsString { - self.std.file_name() - } - - /// Returns the metadata for the file that this entry points at. - /// - /// This function will not traverse symlinks if this entry points at a - /// symlink. - /// - /// # Platform-specific behavior - /// - /// On Windows this function is cheap to call (no extra system calls - /// needed), but on Unix platforms this function is the equivalent of - /// calling `symlink_metadata` on the path. - /// - /// # Examples - /// - /// ``` - /// use tokio::fs; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut entries = fs::read_dir(".").await?; - /// - /// while let Some(entry) = entries.next_entry().await? { - /// if let Ok(metadata) = entry.metadata().await { - /// // Now let's show our entry's permissions! - /// println!("{:?}: {:?}", entry.path(), metadata.permissions()); - /// } else { - /// println!("Couldn't get file type for {:?}", entry.path()); - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn metadata(&self) -> io::Result { - let std = self.std.clone(); - asyncify(move || std.metadata()).await - } - - /// Returns the file type for the file that this entry points at. - /// - /// This function will not traverse symlinks if this entry points at a - /// symlink. - /// - /// # Platform-specific behavior - /// - /// On Windows and most Unix platforms this function is free (no extra - /// system calls needed), but some Unix platforms may require the equivalent - /// call to `symlink_metadata` to learn about the target file type. - /// - /// # Examples - /// - /// ``` - /// use tokio::fs; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut entries = fs::read_dir(".").await?; - /// - /// while let Some(entry) = entries.next_entry().await? { - /// if let Ok(file_type) = entry.file_type().await { - /// // Now let's show our entry's file type! - /// println!("{:?}: {:?}", entry.path(), file_type); - /// } else { - /// println!("Couldn't get file type for {:?}", entry.path()); - /// } - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn file_type(&self) -> io::Result { - #[cfg(not(any( - target_os = "solaris", - target_os = "illumos", - target_os = "haiku", - target_os = "vxworks", - target_os = "nto", - target_os = "vita", - )))] - if let Some(file_type) = self.file_type { - return Ok(file_type); - } - - let std = self.std.clone(); - asyncify(move || std.file_type()).await - } - - /// Returns a reference to the underlying `std::fs::DirEntry`. - #[cfg(unix)] - pub(super) fn as_inner(&self) -> &std::fs::DirEntry { - &self.std - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/read_link.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/read_link.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/read_link.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/read_link.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,14 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::{Path, PathBuf}; - -/// Reads a symbolic link, returning the file that the link points to. -/// -/// This is an async version of [`std::fs::read_link`][std] -/// -/// [std]: std::fs::read_link -pub async fn read_link(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::read_link(path)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/read.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/read.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/read.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/read.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,51 +0,0 @@ -use crate::fs::asyncify; - -use std::{io, path::Path}; - -/// Reads the entire contents of a file into a bytes vector. -/// -/// This is an async version of [`std::fs::read`][std] -/// -/// [std]: std::fs::read -/// -/// This is a convenience function for using [`File::open`] and [`read_to_end`] -/// with fewer imports and without an intermediate variable. It pre-allocates a -/// buffer based on the file size when available, so it is generally faster than -/// reading into a vector created with `Vec::new()`. -/// -/// This operation is implemented by running the equivalent blocking operation -/// on a separate thread pool using [`spawn_blocking`]. -/// -/// [`File::open`]: super::File::open -/// [`read_to_end`]: crate::io::AsyncReadExt::read_to_end -/// [`spawn_blocking`]: crate::task::spawn_blocking -/// -/// # Errors -/// -/// This function will return an error if `path` does not already exist. -/// Other errors may also be returned according to [`OpenOptions::open`]. -/// -/// [`OpenOptions::open`]: super::OpenOptions::open -/// -/// It will also return an error if it encounters while reading an error -/// of a kind other than [`ErrorKind::Interrupted`]. -/// -/// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// use std::net::SocketAddr; -/// -/// #[tokio::main] -/// async fn main() -> Result<(), Box> { -/// let contents = fs::read("address.txt").await?; -/// let foo: SocketAddr = String::from_utf8_lossy(&contents).parse()?; -/// Ok(()) -/// } -/// ``` -pub async fn read(path: impl AsRef) -> io::Result> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::read(path)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/read_to_string.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/read_to_string.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/read_to_string.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/read_to_string.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,30 +0,0 @@ -use crate::fs::asyncify; - -use std::{io, path::Path}; - -/// Creates a future which will open a file for reading and read the entire -/// contents into a string and return said string. -/// -/// This is the async equivalent of [`std::fs::read_to_string`][std]. -/// -/// This operation is implemented by running the equivalent blocking operation -/// on a separate thread pool using [`spawn_blocking`]. -/// -/// [`spawn_blocking`]: crate::task::spawn_blocking -/// [std]: fn@std::fs::read_to_string -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// -/// # async fn dox() -> std::io::Result<()> { -/// let contents = fs::read_to_string("foo.txt").await?; -/// println!("foo.txt contains {} bytes", contents.len()); -/// # Ok(()) -/// # } -/// ``` -pub async fn read_to_string(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::read_to_string(path)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/remove_dir_all.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/remove_dir_all.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/remove_dir_all.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/remove_dir_all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,14 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Removes a directory at this path, after removing all its contents. Use carefully! -/// -/// This is an async version of [`std::fs::remove_dir_all`][std] -/// -/// [std]: fn@std::fs::remove_dir_all -pub async fn remove_dir_all(path: impl AsRef) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::remove_dir_all(path)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/remove_dir.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/remove_dir.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/remove_dir.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/remove_dir.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Removes an existing, empty directory. -/// -/// This is an async version of [`std::fs::remove_dir`](std::fs::remove_dir) -pub async fn remove_dir(path: impl AsRef) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::remove_dir(path)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/remove_file.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/remove_file.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/remove_file.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/remove_file.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,18 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Removes a file from the filesystem. -/// -/// Note that there is no guarantee that the file is immediately deleted (e.g. -/// depending on platform, other open file descriptors may prevent immediate -/// removal). -/// -/// This is an async version of [`std::fs::remove_file`][std] -/// -/// [std]: std::fs::remove_file -pub async fn remove_file(path: impl AsRef) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(move || std::fs::remove_file(path)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/rename.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/rename.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/rename.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/rename.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,17 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Renames a file or directory to a new name, replacing the original file if -/// `to` already exists. -/// -/// This will not work if the new name is on a different mount point. -/// -/// This is an async version of [`std::fs::rename`](std::fs::rename) -pub async fn rename(from: impl AsRef, to: impl AsRef) -> io::Result<()> { - let from = from.as_ref().to_owned(); - let to = to.as_ref().to_owned(); - - asyncify(move || std::fs::rename(from, to)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/set_permissions.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/set_permissions.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/set_permissions.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/set_permissions.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -use crate::fs::asyncify; - -use std::fs::Permissions; -use std::io; -use std::path::Path; - -/// Changes the permissions found on a file or a directory. -/// -/// This is an async version of [`std::fs::set_permissions`][std] -/// -/// [std]: fn@std::fs::set_permissions -pub async fn set_permissions(path: impl AsRef, perm: Permissions) -> io::Result<()> { - let path = path.as_ref().to_owned(); - asyncify(|| std::fs::set_permissions(path, perm)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/symlink_dir.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/symlink_dir.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/symlink_dir.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/symlink_dir.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Creates a new directory symlink on the filesystem. -/// -/// The `dst` path will be a directory symbolic link pointing to the `src` -/// path. -/// -/// This is an async version of [`std::os::windows::fs::symlink_dir`][std] -/// -/// [std]: https://doc.rust-lang.org/std/os/windows/fs/fn.symlink_dir.html -pub async fn symlink_dir(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { - let src = src.as_ref().to_owned(); - let dst = dst.as_ref().to_owned(); - - asyncify(move || std::os::windows::fs::symlink_dir(src, dst)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/symlink_file.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/symlink_file.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/symlink_file.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/symlink_file.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Creates a new file symbolic link on the filesystem. -/// -/// The `dst` path will be a file symbolic link pointing to the `src` -/// path. -/// -/// This is an async version of [`std::os::windows::fs::symlink_file`][std] -/// -/// [std]: https://doc.rust-lang.org/std/os/windows/fs/fn.symlink_file.html -pub async fn symlink_file(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { - let src = src.as_ref().to_owned(); - let dst = dst.as_ref().to_owned(); - - asyncify(move || std::os::windows::fs::symlink_file(src, dst)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/symlink_metadata.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/symlink_metadata.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/symlink_metadata.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/symlink_metadata.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -use crate::fs::asyncify; - -use std::fs::Metadata; -use std::io; -use std::path::Path; - -/// Queries the file system metadata for a path. -/// -/// This is an async version of [`std::fs::symlink_metadata`][std] -/// -/// [std]: fn@std::fs::symlink_metadata -pub async fn symlink_metadata(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - asyncify(|| std::fs::symlink_metadata(path)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/symlink.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/symlink.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/symlink.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/symlink.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,18 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Creates a new symbolic link on the filesystem. -/// -/// The `dst` path will be a symbolic link pointing to the `src` path. -/// -/// This is an async version of [`std::os::unix::fs::symlink`][std] -/// -/// [std]: std::os::unix::fs::symlink -pub async fn symlink(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { - let src = src.as_ref().to_owned(); - let dst = dst.as_ref().to_owned(); - - asyncify(move || std::os::unix::fs::symlink(src, dst)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/try_exists.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/try_exists.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/try_exists.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/try_exists.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,34 +0,0 @@ -use crate::fs::asyncify; - -use std::io; -use std::path::Path; - -/// Returns `Ok(true)` if the path points at an existing entity. -/// -/// This function will traverse symbolic links to query information about the -/// destination file. In case of broken symbolic links this will return `Ok(false)`. -/// -/// This is the async equivalent of [`std::path::Path::try_exists`][std]. -/// -/// [std]: fn@std::path::Path::try_exists -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// -/// # async fn dox() -> std::io::Result<()> { -/// fs::try_exists("foo.txt").await?; -/// # Ok(()) -/// # } -/// ``` -pub async fn try_exists(path: impl AsRef) -> io::Result { - let path = path.as_ref().to_owned(); - // std's Path::try_exists is not available for current Rust min supported version. - // Current implementation is based on its internal implementation instead. - match asyncify(move || std::fs::metadata(path)).await { - Ok(_) => Ok(true), - Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(false), - Err(error) => Err(error), - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fs/write.rs s390-tools-2.33.1/rust-vendor/tokio/src/fs/write.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fs/write.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fs/write.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -use crate::fs::asyncify; - -use std::{io, path::Path}; - -/// Creates a future that will open a file for writing and write the entire -/// contents of `contents` to it. -/// -/// This is the async equivalent of [`std::fs::write`][std]. -/// -/// This operation is implemented by running the equivalent blocking operation -/// on a separate thread pool using [`spawn_blocking`]. -/// -/// [`spawn_blocking`]: crate::task::spawn_blocking -/// [std]: fn@std::fs::write -/// -/// # Examples -/// -/// ```no_run -/// use tokio::fs; -/// -/// # async fn dox() -> std::io::Result<()> { -/// fs::write("foo.txt", b"Hello world!").await?; -/// # Ok(()) -/// # } -/// ``` -pub async fn write(path: impl AsRef, contents: impl AsRef<[u8]>) -> io::Result<()> { - let path = path.as_ref().to_owned(); - let contents = contents.as_ref().to_owned(); - - asyncify(move || std::fs::write(path, contents)).await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/future/block_on.rs s390-tools-2.33.1/rust-vendor/tokio/src/future/block_on.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/future/block_on.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/future/block_on.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ -use std::future::Future; - -cfg_rt! { - #[track_caller] - pub(crate) fn block_on(f: F) -> F::Output { - let mut e = crate::runtime::context::try_enter_blocking_region().expect( - "Cannot block the current thread from within a runtime. This \ - happens because a function attempted to block the current \ - thread while the thread is being used to drive asynchronous \ - tasks." - ); - e.block_on(f).unwrap() - } -} - -cfg_not_rt! { - #[track_caller] - pub(crate) fn block_on(f: F) -> F::Output { - let mut park = crate::runtime::park::CachedParkThread::new(); - park.block_on(f).unwrap() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/future/maybe_done.rs s390-tools-2.33.1/rust-vendor/tokio/src/future/maybe_done.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/future/maybe_done.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/future/maybe_done.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -//! Definition of the MaybeDone combinator. - -use std::future::Future; -use std::mem; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// A future that may have completed. -#[derive(Debug)] -pub enum MaybeDone { - /// A not-yet-completed future. - Future(Fut), - /// The output of the completed future. - Done(Fut::Output), - /// The empty variant after the result of a [`MaybeDone`] has been - /// taken using the [`take_output`](MaybeDone::take_output) method. - Gone, -} - -// Safe because we never generate `Pin<&mut Fut::Output>` -impl Unpin for MaybeDone {} - -/// Wraps a future into a `MaybeDone`. -pub fn maybe_done(future: Fut) -> MaybeDone { - MaybeDone::Future(future) -} - -impl MaybeDone { - /// Returns an [`Option`] containing a mutable reference to the output of the future. - /// The output of this method will be [`Some`] if and only if the inner - /// future has been completed and [`take_output`](MaybeDone::take_output) - /// has not yet been called. - pub fn output_mut(self: Pin<&mut Self>) -> Option<&mut Fut::Output> { - unsafe { - let this = self.get_unchecked_mut(); - match this { - MaybeDone::Done(res) => Some(res), - _ => None, - } - } - } - - /// Attempts to take the output of a `MaybeDone` without driving it - /// towards completion. - #[inline] - pub fn take_output(self: Pin<&mut Self>) -> Option { - unsafe { - let this = self.get_unchecked_mut(); - match this { - MaybeDone::Done(_) => {} - MaybeDone::Future(_) | MaybeDone::Gone => return None, - }; - if let MaybeDone::Done(output) = mem::replace(this, MaybeDone::Gone) { - Some(output) - } else { - unreachable!() - } - } - } -} - -impl Future for MaybeDone { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let res = unsafe { - match self.as_mut().get_unchecked_mut() { - MaybeDone::Future(a) => ready!(Pin::new_unchecked(a).poll(cx)), - MaybeDone::Done(_) => return Poll::Ready(()), - MaybeDone::Gone => panic!("MaybeDone polled after value taken"), - } - }; - self.set(MaybeDone::Done(res)); - Poll::Ready(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/future/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/future/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/future/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/future/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -#![cfg_attr(not(feature = "macros"), allow(unreachable_pub))] - -//! Asynchronous values. - -#[cfg(any(feature = "macros", feature = "process"))] -pub(crate) mod maybe_done; - -mod poll_fn; -pub use poll_fn::poll_fn; - -cfg_process! { - mod try_join; - pub(crate) use try_join::try_join3; -} - -cfg_sync! { - mod block_on; - pub(crate) use block_on::block_on; -} - -cfg_trace! { - mod trace; - #[allow(unused_imports)] - pub(crate) use trace::InstrumentedFuture as Future; -} - -cfg_not_trace! { - cfg_rt! { - pub(crate) use std::future::Future; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/future/poll_fn.rs s390-tools-2.33.1/rust-vendor/tokio/src/future/poll_fn.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/future/poll_fn.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/future/poll_fn.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -#![allow(dead_code)] - -//! Definition of the `PollFn` adapter combinator. - -use std::fmt; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -// This struct is intentionally `!Unpin` when `F` is `!Unpin`. This is to -// mitigate the issue where rust puts noalias on mutable references to the -// `PollFn` type if it is `Unpin`. If the closure has ownership of a future, -// then this "leaks" and the future is affected by noalias too, which we don't -// want. -// -// See this thread for more information: -// -// -// The fact that `PollFn` is not `Unpin` when it shouldn't be is tested in -// `tests/async_send_sync.rs`. - -/// Future for the [`poll_fn`] function. -pub struct PollFn { - f: F, -} - -/// Creates a new future wrapping around a function returning [`Poll`]. -pub fn poll_fn(f: F) -> PollFn -where - F: FnMut(&mut Context<'_>) -> Poll, -{ - PollFn { f } -} - -impl fmt::Debug for PollFn { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PollFn").finish() - } -} - -impl Future for PollFn -where - F: FnMut(&mut Context<'_>) -> Poll, -{ - type Output = T; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // Safety: We never construct a `Pin<&mut F>` anywhere, so accessing `f` - // mutably in an unpinned way is sound. - // - // This use of unsafe cannot be replaced with the pin-project macro - // because: - // * If we put `#[pin]` on the field, then it gives us a `Pin<&mut F>`, - // which we can't use to call the closure. - // * If we don't put `#[pin]` on the field, then it makes `PollFn` be - // unconditionally `Unpin`, which we also don't want. - let me = unsafe { Pin::into_inner_unchecked(self) }; - (me.f)(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/future/trace.rs s390-tools-2.33.1/rust-vendor/tokio/src/future/trace.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/future/trace.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/future/trace.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -use std::future::Future; - -pub(crate) trait InstrumentedFuture: Future { - fn id(&self) -> Option; -} - -impl InstrumentedFuture for tracing::instrument::Instrumented { - fn id(&self) -> Option { - self.span().id() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/future/try_join.rs s390-tools-2.33.1/rust-vendor/tokio/src/future/try_join.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/future/try_join.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/future/try_join.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,82 +0,0 @@ -use crate::future::maybe_done::{maybe_done, MaybeDone}; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pub(crate) fn try_join3( - future1: F1, - future2: F2, - future3: F3, -) -> TryJoin3 -where - F1: Future>, - F2: Future>, - F3: Future>, -{ - TryJoin3 { - future1: maybe_done(future1), - future2: maybe_done(future2), - future3: maybe_done(future3), - } -} - -pin_project! { - pub(crate) struct TryJoin3 - where - F1: Future, - F2: Future, - F3: Future, - { - #[pin] - future1: MaybeDone, - #[pin] - future2: MaybeDone, - #[pin] - future3: MaybeDone, - } -} - -impl Future for TryJoin3 -where - F1: Future>, - F2: Future>, - F3: Future>, -{ - type Output = Result<(T1, T2, T3), E>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut all_done = true; - - let mut me = self.project(); - - if me.future1.as_mut().poll(cx).is_pending() { - all_done = false; - } else if me.future1.as_mut().output_mut().unwrap().is_err() { - return Poll::Ready(Err(me.future1.take_output().unwrap().err().unwrap())); - } - - if me.future2.as_mut().poll(cx).is_pending() { - all_done = false; - } else if me.future2.as_mut().output_mut().unwrap().is_err() { - return Poll::Ready(Err(me.future2.take_output().unwrap().err().unwrap())); - } - - if me.future3.as_mut().poll(cx).is_pending() { - all_done = false; - } else if me.future3.as_mut().output_mut().unwrap().is_err() { - return Poll::Ready(Err(me.future3.take_output().unwrap().err().unwrap())); - } - - if all_done { - Poll::Ready(Ok(( - me.future1.take_output().unwrap().ok().unwrap(), - me.future2.take_output().unwrap().ok().unwrap(), - me.future3.take_output().unwrap().ok().unwrap(), - ))) - } else { - Poll::Pending - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/fuzz.rs s390-tools-2.33.1/rust-vendor/tokio/src/fuzz.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/fuzz.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/fuzz.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -pub use crate::util::linked_list::tests::fuzz_linked_list; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/async_buf_read.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/async_buf_read.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/async_buf_read.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/async_buf_read.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,117 +0,0 @@ -use crate::io::AsyncRead; - -use std::io; -use std::ops::DerefMut; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Reads bytes asynchronously. -/// -/// This trait is analogous to [`std::io::BufRead`], but integrates with -/// the asynchronous task system. In particular, the [`poll_fill_buf`] method, -/// unlike [`BufRead::fill_buf`], will automatically queue the current task for wakeup -/// and return if data is not yet available, rather than blocking the calling -/// thread. -/// -/// Utilities for working with `AsyncBufRead` values are provided by -/// [`AsyncBufReadExt`]. -/// -/// [`std::io::BufRead`]: std::io::BufRead -/// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf -/// [`BufRead::fill_buf`]: std::io::BufRead::fill_buf -/// [`AsyncBufReadExt`]: crate::io::AsyncBufReadExt -pub trait AsyncBufRead: AsyncRead { - /// Attempts to return the contents of the internal buffer, filling it with more data - /// from the inner reader if it is empty. - /// - /// On success, returns `Poll::Ready(Ok(buf))`. - /// - /// If no data is available for reading, the method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes - /// readable or is closed. - /// - /// This function is a lower-level call. It needs to be paired with the - /// [`consume`] method to function properly. When calling this - /// method, none of the contents will be "read" in the sense that later - /// calling [`poll_read`] may return the same contents. As such, [`consume`] must - /// be called with the number of bytes that are consumed from this buffer to - /// ensure that the bytes are never returned twice. - /// - /// An empty buffer returned indicates that the stream has reached EOF. - /// - /// [`poll_read`]: AsyncRead::poll_read - /// [`consume`]: AsyncBufRead::consume - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - - /// Tells this buffer that `amt` bytes have been consumed from the buffer, - /// so they should no longer be returned in calls to [`poll_read`]. - /// - /// This function is a lower-level call. It needs to be paired with the - /// [`poll_fill_buf`] method to function properly. This function does - /// not perform any I/O, it simply informs this object that some amount of - /// its buffer, returned from [`poll_fill_buf`], has been consumed and should - /// no longer be returned. As such, this function may do odd things if - /// [`poll_fill_buf`] isn't called before calling it. - /// - /// The `amt` must be `<=` the number of bytes in the buffer returned by - /// [`poll_fill_buf`]. - /// - /// [`poll_read`]: AsyncRead::poll_read - /// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf - fn consume(self: Pin<&mut Self>, amt: usize); -} - -macro_rules! deref_async_buf_read { - () => { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self.get_mut()).poll_fill_buf(cx) - } - - fn consume(mut self: Pin<&mut Self>, amt: usize) { - Pin::new(&mut **self).consume(amt) - } - }; -} - -impl AsyncBufRead for Box { - deref_async_buf_read!(); -} - -impl AsyncBufRead for &mut T { - deref_async_buf_read!(); -} - -impl

AsyncBufRead for Pin

-where - P: DerefMut + Unpin, - P::Target: AsyncBufRead, -{ - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - self.get_mut().as_mut().consume(amt) - } -} - -impl AsyncBufRead for &[u8] { - fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(*self)) - } - - fn consume(mut self: Pin<&mut Self>, amt: usize) { - *self = &self[amt..]; - } -} - -impl + Unpin> AsyncBufRead for io::Cursor { - fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(io::BufRead::fill_buf(self.get_mut())) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - io::BufRead::consume(self.get_mut(), amt) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/async_fd.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/async_fd.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/async_fd.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/async_fd.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1243 +0,0 @@ -use crate::io::{Interest, Ready}; -use crate::runtime::io::{ReadyEvent, Registration}; -use crate::runtime::scheduler; - -use mio::unix::SourceFd; -use std::io; -use std::os::unix::io::{AsRawFd, RawFd}; -use std::{task::Context, task::Poll}; - -/// Associates an IO object backed by a Unix file descriptor with the tokio -/// reactor, allowing for readiness to be polled. The file descriptor must be of -/// a type that can be used with the OS polling facilities (ie, `poll`, `epoll`, -/// `kqueue`, etc), such as a network socket or pipe, and the file descriptor -/// must have the nonblocking mode set to true. -/// -/// Creating an AsyncFd registers the file descriptor with the current tokio -/// Reactor, allowing you to directly await the file descriptor being readable -/// or writable. Once registered, the file descriptor remains registered until -/// the AsyncFd is dropped. -/// -/// The AsyncFd takes ownership of an arbitrary object to represent the IO -/// object. It is intended that this object will handle closing the file -/// descriptor when it is dropped, avoiding resource leaks and ensuring that the -/// AsyncFd can clean up the registration before closing the file descriptor. -/// The [`AsyncFd::into_inner`] function can be used to extract the inner object -/// to retake control from the tokio IO reactor. -/// -/// The inner object is required to implement [`AsRawFd`]. This file descriptor -/// must not change while [`AsyncFd`] owns the inner object, i.e. the -/// [`AsRawFd::as_raw_fd`] method on the inner type must always return the same -/// file descriptor when called multiple times. Failure to uphold this results -/// in unspecified behavior in the IO driver, which may include breaking -/// notifications for other sockets/etc. -/// -/// Polling for readiness is done by calling the async functions [`readable`] -/// and [`writable`]. These functions complete when the associated readiness -/// condition is observed. Any number of tasks can query the same `AsyncFd` in -/// parallel, on the same or different conditions. -/// -/// On some platforms, the readiness detecting mechanism relies on -/// edge-triggered notifications. This means that the OS will only notify Tokio -/// when the file descriptor transitions from not-ready to ready. For this to -/// work you should first try to read or write and only poll for readiness -/// if that fails with an error of [`std::io::ErrorKind::WouldBlock`]. -/// -/// Tokio internally tracks when it has received a ready notification, and when -/// readiness checking functions like [`readable`] and [`writable`] are called, -/// if the readiness flag is set, these async functions will complete -/// immediately. This however does mean that it is critical to ensure that this -/// ready flag is cleared when (and only when) the file descriptor ceases to be -/// ready. The [`AsyncFdReadyGuard`] returned from readiness checking functions -/// serves this function; after calling a readiness-checking async function, -/// you must use this [`AsyncFdReadyGuard`] to signal to tokio whether the file -/// descriptor is no longer in a ready state. -/// -/// ## Use with to a poll-based API -/// -/// In some cases it may be desirable to use `AsyncFd` from APIs similar to -/// [`TcpStream::poll_read_ready`]. The [`AsyncFd::poll_read_ready`] and -/// [`AsyncFd::poll_write_ready`] functions are provided for this purpose. -/// Because these functions don't create a future to hold their state, they have -/// the limitation that only one task can wait on each direction (read or write) -/// at a time. -/// -/// # Examples -/// -/// This example shows how to turn [`std::net::TcpStream`] asynchronous using -/// `AsyncFd`. It implements the read/write operations both as an `async fn` -/// and using the IO traits [`AsyncRead`] and [`AsyncWrite`]. -/// -/// ```no_run -/// use futures::ready; -/// use std::io::{self, Read, Write}; -/// use std::net::TcpStream; -/// use std::pin::Pin; -/// use std::task::{Context, Poll}; -/// use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -/// use tokio::io::unix::AsyncFd; -/// -/// pub struct AsyncTcpStream { -/// inner: AsyncFd, -/// } -/// -/// impl AsyncTcpStream { -/// pub fn new(tcp: TcpStream) -> io::Result { -/// tcp.set_nonblocking(true)?; -/// Ok(Self { -/// inner: AsyncFd::new(tcp)?, -/// }) -/// } -/// -/// pub async fn read(&self, out: &mut [u8]) -> io::Result { -/// loop { -/// let mut guard = self.inner.readable().await?; -/// -/// match guard.try_io(|inner| inner.get_ref().read(out)) { -/// Ok(result) => return result, -/// Err(_would_block) => continue, -/// } -/// } -/// } -/// -/// pub async fn write(&self, buf: &[u8]) -> io::Result { -/// loop { -/// let mut guard = self.inner.writable().await?; -/// -/// match guard.try_io(|inner| inner.get_ref().write(buf)) { -/// Ok(result) => return result, -/// Err(_would_block) => continue, -/// } -/// } -/// } -/// } -/// -/// impl AsyncRead for AsyncTcpStream { -/// fn poll_read( -/// self: Pin<&mut Self>, -/// cx: &mut Context<'_>, -/// buf: &mut ReadBuf<'_> -/// ) -> Poll> { -/// loop { -/// let mut guard = ready!(self.inner.poll_read_ready(cx))?; -/// -/// let unfilled = buf.initialize_unfilled(); -/// match guard.try_io(|inner| inner.get_ref().read(unfilled)) { -/// Ok(Ok(len)) => { -/// buf.advance(len); -/// return Poll::Ready(Ok(())); -/// }, -/// Ok(Err(err)) => return Poll::Ready(Err(err)), -/// Err(_would_block) => continue, -/// } -/// } -/// } -/// } -/// -/// impl AsyncWrite for AsyncTcpStream { -/// fn poll_write( -/// self: Pin<&mut Self>, -/// cx: &mut Context<'_>, -/// buf: &[u8] -/// ) -> Poll> { -/// loop { -/// let mut guard = ready!(self.inner.poll_write_ready(cx))?; -/// -/// match guard.try_io(|inner| inner.get_ref().write(buf)) { -/// Ok(result) => return Poll::Ready(result), -/// Err(_would_block) => continue, -/// } -/// } -/// } -/// -/// fn poll_flush( -/// self: Pin<&mut Self>, -/// cx: &mut Context<'_>, -/// ) -> Poll> { -/// // tcp flush is a no-op -/// Poll::Ready(Ok(())) -/// } -/// -/// fn poll_shutdown( -/// self: Pin<&mut Self>, -/// cx: &mut Context<'_>, -/// ) -> Poll> { -/// self.inner.get_ref().shutdown(std::net::Shutdown::Write)?; -/// Poll::Ready(Ok(())) -/// } -/// } -/// ``` -/// -/// [`readable`]: method@Self::readable -/// [`writable`]: method@Self::writable -/// [`AsyncFdReadyGuard`]: struct@self::AsyncFdReadyGuard -/// [`TcpStream::poll_read_ready`]: struct@crate::net::TcpStream -/// [`AsyncRead`]: trait@crate::io::AsyncRead -/// [`AsyncWrite`]: trait@crate::io::AsyncWrite -pub struct AsyncFd { - registration: Registration, - // The inner value is always present. the Option is required for `drop` and `into_inner`. - // In all other methods `unwrap` is valid, and will never panic. - inner: Option, -} - -/// Represents an IO-ready event detected on a particular file descriptor that -/// has not yet been acknowledged. This is a `must_use` structure to help ensure -/// that you do not forget to explicitly clear (or not clear) the event. -/// -/// This type exposes an immutable reference to the underlying IO object. -#[must_use = "You must explicitly choose whether to clear the readiness state by calling a method on ReadyGuard"] -pub struct AsyncFdReadyGuard<'a, T: AsRawFd> { - async_fd: &'a AsyncFd, - event: Option, -} - -/// Represents an IO-ready event detected on a particular file descriptor that -/// has not yet been acknowledged. This is a `must_use` structure to help ensure -/// that you do not forget to explicitly clear (or not clear) the event. -/// -/// This type exposes a mutable reference to the underlying IO object. -#[must_use = "You must explicitly choose whether to clear the readiness state by calling a method on ReadyGuard"] -pub struct AsyncFdReadyMutGuard<'a, T: AsRawFd> { - async_fd: &'a mut AsyncFd, - event: Option, -} - -impl AsyncFd { - /// Creates an AsyncFd backed by (and taking ownership of) an object - /// implementing [`AsRawFd`]. The backing file descriptor is cached at the - /// time of creation. - /// - /// Only configures the [`Interest::READABLE`] and [`Interest::WRITABLE`] interests. For more - /// control, use [`AsyncFd::with_interest`]. - /// - /// This method must be called in the context of a tokio runtime. - /// - /// # Panics - /// - /// This function panics if there is no current reactor set, or if the `rt` - /// feature flag is not enabled. - #[inline] - #[track_caller] - pub fn new(inner: T) -> io::Result - where - T: AsRawFd, - { - Self::with_interest(inner, Interest::READABLE | Interest::WRITABLE) - } - - /// Creates an AsyncFd backed by (and taking ownership of) an object - /// implementing [`AsRawFd`], with a specific [`Interest`]. The backing - /// file descriptor is cached at the time of creation. - /// - /// # Panics - /// - /// This function panics if there is no current reactor set, or if the `rt` - /// feature flag is not enabled. - #[inline] - #[track_caller] - pub fn with_interest(inner: T, interest: Interest) -> io::Result - where - T: AsRawFd, - { - Self::new_with_handle_and_interest(inner, scheduler::Handle::current(), interest) - } - - #[track_caller] - pub(crate) fn new_with_handle_and_interest( - inner: T, - handle: scheduler::Handle, - interest: Interest, - ) -> io::Result { - let fd = inner.as_raw_fd(); - - let registration = - Registration::new_with_interest_and_handle(&mut SourceFd(&fd), interest, handle)?; - - Ok(AsyncFd { - registration, - inner: Some(inner), - }) - } - - /// Returns a shared reference to the backing object of this [`AsyncFd`]. - #[inline] - pub fn get_ref(&self) -> &T { - self.inner.as_ref().unwrap() - } - - /// Returns a mutable reference to the backing object of this [`AsyncFd`]. - #[inline] - pub fn get_mut(&mut self) -> &mut T { - self.inner.as_mut().unwrap() - } - - fn take_inner(&mut self) -> Option { - let inner = self.inner.take()?; - let fd = inner.as_raw_fd(); - - let _ = self.registration.deregister(&mut SourceFd(&fd)); - - Some(inner) - } - - /// Deregisters this file descriptor and returns ownership of the backing - /// object. - pub fn into_inner(mut self) -> T { - self.take_inner().unwrap() - } - - /// Polls for read readiness. - /// - /// If the file descriptor is not currently ready for reading, this method - /// will store a clone of the [`Waker`] from the provided [`Context`]. When the - /// file descriptor becomes ready for reading, [`Waker::wake`] will be called. - /// - /// Note that on multiple calls to [`poll_read_ready`] or - /// [`poll_read_ready_mut`], only the `Waker` from the `Context` passed to the - /// most recent call is scheduled to receive a wakeup. (However, - /// [`poll_write_ready`] retains a second, independent waker). - /// - /// This method is intended for cases where creating and pinning a future - /// via [`readable`] is not feasible. Where possible, using [`readable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// This method takes `&self`, so it is possible to call this method - /// concurrently with other methods on this struct. This method only - /// provides shared access to the inner IO resource when handling the - /// [`AsyncFdReadyGuard`]. - /// - /// [`poll_read_ready`]: method@Self::poll_read_ready - /// [`poll_read_ready_mut`]: method@Self::poll_read_ready_mut - /// [`poll_write_ready`]: method@Self::poll_write_ready - /// [`readable`]: method@Self::readable - /// [`Context`]: struct@std::task::Context - /// [`Waker`]: struct@std::task::Waker - /// [`Waker::wake`]: method@std::task::Waker::wake - pub fn poll_read_ready<'a>( - &'a self, - cx: &mut Context<'_>, - ) -> Poll>> { - let event = ready!(self.registration.poll_read_ready(cx))?; - - Poll::Ready(Ok(AsyncFdReadyGuard { - async_fd: self, - event: Some(event), - })) - } - - /// Polls for read readiness. - /// - /// If the file descriptor is not currently ready for reading, this method - /// will store a clone of the [`Waker`] from the provided [`Context`]. When the - /// file descriptor becomes ready for reading, [`Waker::wake`] will be called. - /// - /// Note that on multiple calls to [`poll_read_ready`] or - /// [`poll_read_ready_mut`], only the `Waker` from the `Context` passed to the - /// most recent call is scheduled to receive a wakeup. (However, - /// [`poll_write_ready`] retains a second, independent waker). - /// - /// This method is intended for cases where creating and pinning a future - /// via [`readable`] is not feasible. Where possible, using [`readable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// This method takes `&mut self`, so it is possible to access the inner IO - /// resource mutably when handling the [`AsyncFdReadyMutGuard`]. - /// - /// [`poll_read_ready`]: method@Self::poll_read_ready - /// [`poll_read_ready_mut`]: method@Self::poll_read_ready_mut - /// [`poll_write_ready`]: method@Self::poll_write_ready - /// [`readable`]: method@Self::readable - /// [`Context`]: struct@std::task::Context - /// [`Waker`]: struct@std::task::Waker - /// [`Waker::wake`]: method@std::task::Waker::wake - pub fn poll_read_ready_mut<'a>( - &'a mut self, - cx: &mut Context<'_>, - ) -> Poll>> { - let event = ready!(self.registration.poll_read_ready(cx))?; - - Poll::Ready(Ok(AsyncFdReadyMutGuard { - async_fd: self, - event: Some(event), - })) - } - - /// Polls for write readiness. - /// - /// If the file descriptor is not currently ready for writing, this method - /// will store a clone of the [`Waker`] from the provided [`Context`]. When the - /// file descriptor becomes ready for writing, [`Waker::wake`] will be called. - /// - /// Note that on multiple calls to [`poll_write_ready`] or - /// [`poll_write_ready_mut`], only the `Waker` from the `Context` passed to the - /// most recent call is scheduled to receive a wakeup. (However, - /// [`poll_read_ready`] retains a second, independent waker). - /// - /// This method is intended for cases where creating and pinning a future - /// via [`writable`] is not feasible. Where possible, using [`writable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// This method takes `&self`, so it is possible to call this method - /// concurrently with other methods on this struct. This method only - /// provides shared access to the inner IO resource when handling the - /// [`AsyncFdReadyGuard`]. - /// - /// [`poll_read_ready`]: method@Self::poll_read_ready - /// [`poll_write_ready`]: method@Self::poll_write_ready - /// [`poll_write_ready_mut`]: method@Self::poll_write_ready_mut - /// [`writable`]: method@Self::readable - /// [`Context`]: struct@std::task::Context - /// [`Waker`]: struct@std::task::Waker - /// [`Waker::wake`]: method@std::task::Waker::wake - pub fn poll_write_ready<'a>( - &'a self, - cx: &mut Context<'_>, - ) -> Poll>> { - let event = ready!(self.registration.poll_write_ready(cx))?; - - Poll::Ready(Ok(AsyncFdReadyGuard { - async_fd: self, - event: Some(event), - })) - } - - /// Polls for write readiness. - /// - /// If the file descriptor is not currently ready for writing, this method - /// will store a clone of the [`Waker`] from the provided [`Context`]. When the - /// file descriptor becomes ready for writing, [`Waker::wake`] will be called. - /// - /// Note that on multiple calls to [`poll_write_ready`] or - /// [`poll_write_ready_mut`], only the `Waker` from the `Context` passed to the - /// most recent call is scheduled to receive a wakeup. (However, - /// [`poll_read_ready`] retains a second, independent waker). - /// - /// This method is intended for cases where creating and pinning a future - /// via [`writable`] is not feasible. Where possible, using [`writable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// This method takes `&mut self`, so it is possible to access the inner IO - /// resource mutably when handling the [`AsyncFdReadyMutGuard`]. - /// - /// [`poll_read_ready`]: method@Self::poll_read_ready - /// [`poll_write_ready`]: method@Self::poll_write_ready - /// [`poll_write_ready_mut`]: method@Self::poll_write_ready_mut - /// [`writable`]: method@Self::readable - /// [`Context`]: struct@std::task::Context - /// [`Waker`]: struct@std::task::Waker - /// [`Waker::wake`]: method@std::task::Waker::wake - pub fn poll_write_ready_mut<'a>( - &'a mut self, - cx: &mut Context<'_>, - ) -> Poll>> { - let event = ready!(self.registration.poll_write_ready(cx))?; - - Poll::Ready(Ok(AsyncFdReadyMutGuard { - async_fd: self, - event: Some(event), - })) - } - - /// Waits for any of the requested ready states, returning a - /// [`AsyncFdReadyGuard`] that must be dropped to resume - /// polling for the requested ready states. - /// - /// The function may complete without the file descriptor being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// When an IO operation does return `io::ErrorKind::WouldBlock`, the readiness must be cleared. - /// When a combined interest is used, it is important to clear only the readiness - /// that is actually observed to block. For instance when the combined - /// interest `Interest::READABLE | Interest::WRITABLE` is used, and a read blocks, only - /// read readiness should be cleared using the [`AsyncFdReadyGuard::clear_ready_matching`] method: - /// `guard.clear_ready_matching(Ready::READABLE)`. - /// Also clearing the write readiness in this case would be incorrect. The [`AsyncFdReadyGuard::clear_ready`] - /// method clears all readiness flags. - /// - /// This method takes `&self`, so it is possible to call this method - /// concurrently with other methods on this struct. This method only - /// provides shared access to the inner IO resource when handling the - /// [`AsyncFdReadyGuard`]. - /// - /// # Examples - /// - /// Concurrently read and write to a [`std::net::TcpStream`] on the same task without - /// splitting. - /// - /// ```no_run - /// use std::error::Error; - /// use std::io; - /// use std::io::{Read, Write}; - /// use std::net::TcpStream; - /// use tokio::io::unix::AsyncFd; - /// use tokio::io::{Interest, Ready}; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080")?; - /// stream.set_nonblocking(true)?; - /// let stream = AsyncFd::new(stream)?; - /// - /// loop { - /// let mut guard = stream - /// .ready(Interest::READABLE | Interest::WRITABLE) - /// .await?; - /// - /// if guard.ready().is_readable() { - /// let mut data = vec![0; 1024]; - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.get_ref().read(&mut data) { - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// // a read has blocked, but a write might still succeed. - /// // clear only the read readiness. - /// guard.clear_ready_matching(Ready::READABLE); - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// if guard.ready().is_writable() { - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.get_ref().write(b"hello world") { - /// Ok(n) => { - /// println!("write {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// // a write has blocked, but a read might still succeed. - /// // clear only the write readiness. - /// guard.clear_ready_matching(Ready::WRITABLE); - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// } - /// } - /// ``` - pub async fn ready(&self, interest: Interest) -> io::Result> { - let event = self.registration.readiness(interest).await?; - - Ok(AsyncFdReadyGuard { - async_fd: self, - event: Some(event), - }) - } - - /// Waits for any of the requested ready states, returning a - /// [`AsyncFdReadyMutGuard`] that must be dropped to resume - /// polling for the requested ready states. - /// - /// The function may complete without the file descriptor being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// When an IO operation does return `io::ErrorKind::WouldBlock`, the readiness must be cleared. - /// When a combined interest is used, it is important to clear only the readiness - /// that is actually observed to block. For instance when the combined - /// interest `Interest::READABLE | Interest::WRITABLE` is used, and a read blocks, only - /// read readiness should be cleared using the [`AsyncFdReadyMutGuard::clear_ready_matching`] method: - /// `guard.clear_ready_matching(Ready::READABLE)`. - /// Also clearing the write readiness in this case would be incorrect. - /// The [`AsyncFdReadyMutGuard::clear_ready`] method clears all readiness flags. - /// - /// This method takes `&mut self`, so it is possible to access the inner IO - /// resource mutably when handling the [`AsyncFdReadyMutGuard`]. - /// - /// # Examples - /// - /// Concurrently read and write to a [`std::net::TcpStream`] on the same task without - /// splitting. - /// - /// ```no_run - /// use std::error::Error; - /// use std::io; - /// use std::io::{Read, Write}; - /// use std::net::TcpStream; - /// use tokio::io::unix::AsyncFd; - /// use tokio::io::{Interest, Ready}; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080")?; - /// stream.set_nonblocking(true)?; - /// let mut stream = AsyncFd::new(stream)?; - /// - /// loop { - /// let mut guard = stream - /// .ready_mut(Interest::READABLE | Interest::WRITABLE) - /// .await?; - /// - /// if guard.ready().is_readable() { - /// let mut data = vec![0; 1024]; - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match guard.get_inner_mut().read(&mut data) { - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// // a read has blocked, but a write might still succeed. - /// // clear only the read readiness. - /// guard.clear_ready_matching(Ready::READABLE); - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// if guard.ready().is_writable() { - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match guard.get_inner_mut().write(b"hello world") { - /// Ok(n) => { - /// println!("write {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// // a write has blocked, but a read might still succeed. - /// // clear only the write readiness. - /// guard.clear_ready_matching(Ready::WRITABLE); - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// } - /// } - /// ``` - pub async fn ready_mut( - &mut self, - interest: Interest, - ) -> io::Result> { - let event = self.registration.readiness(interest).await?; - - Ok(AsyncFdReadyMutGuard { - async_fd: self, - event: Some(event), - }) - } - - /// Waits for the file descriptor to become readable, returning a - /// [`AsyncFdReadyGuard`] that must be dropped to resume read-readiness - /// polling. - /// - /// This method takes `&self`, so it is possible to call this method - /// concurrently with other methods on this struct. This method only - /// provides shared access to the inner IO resource when handling the - /// [`AsyncFdReadyGuard`]. - #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering. - pub async fn readable<'a>(&'a self) -> io::Result> { - self.ready(Interest::READABLE).await - } - - /// Waits for the file descriptor to become readable, returning a - /// [`AsyncFdReadyMutGuard`] that must be dropped to resume read-readiness - /// polling. - /// - /// This method takes `&mut self`, so it is possible to access the inner IO - /// resource mutably when handling the [`AsyncFdReadyMutGuard`]. - #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering. - pub async fn readable_mut<'a>(&'a mut self) -> io::Result> { - self.ready_mut(Interest::READABLE).await - } - - /// Waits for the file descriptor to become writable, returning a - /// [`AsyncFdReadyGuard`] that must be dropped to resume write-readiness - /// polling. - /// - /// This method takes `&self`, so it is possible to call this method - /// concurrently with other methods on this struct. This method only - /// provides shared access to the inner IO resource when handling the - /// [`AsyncFdReadyGuard`]. - #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering. - pub async fn writable<'a>(&'a self) -> io::Result> { - self.ready(Interest::WRITABLE).await - } - - /// Waits for the file descriptor to become writable, returning a - /// [`AsyncFdReadyMutGuard`] that must be dropped to resume write-readiness - /// polling. - /// - /// This method takes `&mut self`, so it is possible to access the inner IO - /// resource mutably when handling the [`AsyncFdReadyMutGuard`]. - #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering. - pub async fn writable_mut<'a>(&'a mut self) -> io::Result> { - self.ready_mut(Interest::WRITABLE).await - } - - /// Reads or writes from the file descriptor using a user-provided IO operation. - /// - /// The `async_io` method is a convenience utility that waits for the file - /// descriptor to become ready, and then executes the provided IO operation. - /// Since file descriptors may be marked ready spuriously, the closure will - /// be called repeatedly until it returns something other than a - /// [`WouldBlock`] error. This is done using the following loop: - /// - /// ```no_run - /// # use std::io::{self, Result}; - /// # struct Dox { inner: T } - /// # impl Dox { - /// # async fn writable(&self) -> Result<&Self> { - /// # Ok(self) - /// # } - /// # fn try_io(&self, _: impl FnMut(&T) -> Result) -> Result> { - /// # panic!() - /// # } - /// async fn async_io(&self, mut f: impl FnMut(&T) -> io::Result) -> io::Result { - /// loop { - /// // or `readable` if called with the read interest. - /// let guard = self.writable().await?; - /// - /// match guard.try_io(&mut f) { - /// Ok(result) => return result, - /// Err(_would_block) => continue, - /// } - /// } - /// } - /// # } - /// ``` - /// - /// The closure should only return a [`WouldBlock`] error if it has performed - /// an IO operation on the file descriptor that failed due to the file descriptor not being - /// ready. Returning a [`WouldBlock`] error in any other situation will - /// incorrectly clear the readiness flag, which can cause the file descriptor to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio [`AsyncFd`] type, as this will mess with the - /// readiness flag and can cause the file descriptor to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - /// - /// # Examples - /// - /// This example sends some bytes on the inner [`std::net::UdpSocket`]. The `async_io` - /// method waits for readiness, and retries if the send operation does block. This example - /// is equivalent to the one given for [`try_io`]. - /// - /// ```no_run - /// use tokio::io::{Interest, unix::AsyncFd}; - /// - /// use std::io; - /// use std::net::UdpSocket; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let socket = UdpSocket::bind("0.0.0.0:8080")?; - /// socket.set_nonblocking(true)?; - /// let async_fd = AsyncFd::new(socket)?; - /// - /// let written = async_fd - /// .async_io(Interest::WRITABLE, |inner| inner.send(&[1, 2])) - /// .await?; - /// - /// println!("wrote {written} bytes"); - /// - /// Ok(()) - /// } - /// ``` - /// - /// [`try_io`]: AsyncFdReadyGuard::try_io - /// [`WouldBlock`]: std::io::ErrorKind::WouldBlock - pub async fn async_io( - &self, - interest: Interest, - mut f: impl FnMut(&T) -> io::Result, - ) -> io::Result { - self.registration - .async_io(interest, || f(self.get_ref())) - .await - } - - /// Reads or writes from the file descriptor using a user-provided IO operation. - /// - /// The behavior is the same as [`async_io`], except that the closure can mutate the inner - /// value of the [`AsyncFd`]. - /// - /// [`async_io`]: AsyncFd::async_io - pub async fn async_io_mut( - &mut self, - interest: Interest, - mut f: impl FnMut(&mut T) -> io::Result, - ) -> io::Result { - self.registration - .async_io(interest, || f(self.inner.as_mut().unwrap())) - .await - } -} - -impl AsRawFd for AsyncFd { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_ref().unwrap().as_raw_fd() - } -} - -impl std::os::unix::io::AsFd for AsyncFd { - fn as_fd(&self) -> std::os::unix::io::BorrowedFd<'_> { - unsafe { std::os::unix::io::BorrowedFd::borrow_raw(self.as_raw_fd()) } - } -} - -impl std::fmt::Debug for AsyncFd { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("AsyncFd") - .field("inner", &self.inner) - .finish() - } -} - -impl Drop for AsyncFd { - fn drop(&mut self) { - let _ = self.take_inner(); - } -} - -impl<'a, Inner: AsRawFd> AsyncFdReadyGuard<'a, Inner> { - /// Indicates to tokio that the file descriptor is no longer ready. All - /// internal readiness flags will be cleared, and tokio will wait for the - /// next edge-triggered readiness notification from the OS. - /// - /// This function is commonly used with guards returned by [`AsyncFd::readable`] and - /// [`AsyncFd::writable`]. - /// - /// It is critical that this function not be called unless your code - /// _actually observes_ that the file descriptor is _not_ ready. Do not call - /// it simply because, for example, a read succeeded; it should be called - /// when a read is observed to block. - pub fn clear_ready(&mut self) { - if let Some(event) = self.event.take() { - self.async_fd.registration.clear_readiness(event); - } - } - - /// Indicates to tokio that the file descriptor no longer has a specific readiness. - /// The internal readiness flag will be cleared, and tokio will wait for the - /// next edge-triggered readiness notification from the OS. - /// - /// This function is useful in combination with the [`AsyncFd::ready`] method when a - /// combined interest like `Interest::READABLE | Interest::WRITABLE` is used. - /// - /// It is critical that this function not be called unless your code - /// _actually observes_ that the file descriptor is _not_ ready for the provided `Ready`. - /// Do not call it simply because, for example, a read succeeded; it should be called - /// when a read is observed to block. Only clear the specific readiness that is observed to - /// block. For example when a read blocks when using a combined interest, - /// only clear `Ready::READABLE`. - /// - /// # Examples - /// - /// Concurrently read and write to a [`std::net::TcpStream`] on the same task without - /// splitting. - /// - /// ```no_run - /// use std::error::Error; - /// use std::io; - /// use std::io::{Read, Write}; - /// use std::net::TcpStream; - /// use tokio::io::unix::AsyncFd; - /// use tokio::io::{Interest, Ready}; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080")?; - /// stream.set_nonblocking(true)?; - /// let stream = AsyncFd::new(stream)?; - /// - /// loop { - /// let mut guard = stream - /// .ready(Interest::READABLE | Interest::WRITABLE) - /// .await?; - /// - /// if guard.ready().is_readable() { - /// let mut data = vec![0; 1024]; - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.get_ref().read(&mut data) { - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// // a read has blocked, but a write might still succeed. - /// // clear only the read readiness. - /// guard.clear_ready_matching(Ready::READABLE); - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// if guard.ready().is_writable() { - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.get_ref().write(b"hello world") { - /// Ok(n) => { - /// println!("write {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// // a write has blocked, but a read might still succeed. - /// // clear only the write readiness. - /// guard.clear_ready_matching(Ready::WRITABLE); - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// } - /// } - /// ``` - pub fn clear_ready_matching(&mut self, ready: Ready) { - if let Some(mut event) = self.event.take() { - self.async_fd - .registration - .clear_readiness(event.with_ready(ready)); - - // the event is no longer ready for the readiness that was just cleared - event.ready = event.ready - ready; - - if !event.ready.is_empty() { - self.event = Some(event); - } - } - } - - /// This method should be invoked when you intentionally want to keep the - /// ready flag asserted. - /// - /// While this function is itself a no-op, it satisfies the `#[must_use]` - /// constraint on the [`AsyncFdReadyGuard`] type. - pub fn retain_ready(&mut self) { - // no-op - } - - /// Get the [`Ready`] value associated with this guard. - /// - /// This method will return the empty readiness state if - /// [`AsyncFdReadyGuard::clear_ready`] has been called on - /// the guard. - /// - /// [`Ready`]: crate::io::Ready - pub fn ready(&self) -> Ready { - match &self.event { - Some(event) => event.ready, - None => Ready::EMPTY, - } - } - - /// Performs the provided IO operation. - /// - /// If `f` returns a [`WouldBlock`] error, the readiness state associated - /// with this file descriptor is cleared, and the method returns - /// `Err(TryIoError::WouldBlock)`. You will typically need to poll the - /// `AsyncFd` again when this happens. - /// - /// This method helps ensure that the readiness state of the underlying file - /// descriptor remains in sync with the tokio-side readiness state, by - /// clearing the tokio-side state only when a [`WouldBlock`] condition - /// occurs. It is the responsibility of the caller to ensure that `f` - /// returns [`WouldBlock`] only if the file descriptor that originated this - /// `AsyncFdReadyGuard` no longer expresses the readiness state that was queried to - /// create this `AsyncFdReadyGuard`. - /// - /// # Examples - /// - /// This example sends some bytes to the inner [`std::net::UdpSocket`]. Waiting - /// for write-readiness and retrying when the send operation does block are explicit. - /// This example can be written more succinctly using [`AsyncFd::async_io`]. - /// - /// ```no_run - /// use tokio::io::unix::AsyncFd; - /// - /// use std::io; - /// use std::net::UdpSocket; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let socket = UdpSocket::bind("0.0.0.0:8080")?; - /// socket.set_nonblocking(true)?; - /// let async_fd = AsyncFd::new(socket)?; - /// - /// let written = loop { - /// let mut guard = async_fd.writable().await?; - /// match guard.try_io(|inner| inner.get_ref().send(&[1, 2])) { - /// Ok(result) => { - /// break result?; - /// } - /// Err(_would_block) => { - /// // try_io already cleared the file descriptor's readiness state - /// continue; - /// } - /// } - /// }; - /// - /// println!("wrote {written} bytes"); - /// - /// Ok(()) - /// } - /// ``` - /// - /// [`WouldBlock`]: std::io::ErrorKind::WouldBlock - // Alias for old name in 0.x - #[cfg_attr(docsrs, doc(alias = "with_io"))] - pub fn try_io( - &mut self, - f: impl FnOnce(&'a AsyncFd) -> io::Result, - ) -> Result, TryIoError> { - let result = f(self.async_fd); - - match result { - Err(err) if err.kind() == io::ErrorKind::WouldBlock => { - self.clear_ready(); - Err(TryIoError(())) - } - result => Ok(result), - } - } - - /// Returns a shared reference to the inner [`AsyncFd`]. - pub fn get_ref(&self) -> &'a AsyncFd { - self.async_fd - } - - /// Returns a shared reference to the backing object of the inner [`AsyncFd`]. - pub fn get_inner(&self) -> &'a Inner { - self.get_ref().get_ref() - } -} - -impl<'a, Inner: AsRawFd> AsyncFdReadyMutGuard<'a, Inner> { - /// Indicates to tokio that the file descriptor is no longer ready. All - /// internal readiness flags will be cleared, and tokio will wait for the - /// next edge-triggered readiness notification from the OS. - /// - /// This function is commonly used with guards returned by [`AsyncFd::readable_mut`] and - /// [`AsyncFd::writable_mut`]. - /// - /// It is critical that this function not be called unless your code - /// _actually observes_ that the file descriptor is _not_ ready. Do not call - /// it simply because, for example, a read succeeded; it should be called - /// when a read is observed to block. - pub fn clear_ready(&mut self) { - if let Some(event) = self.event.take() { - self.async_fd.registration.clear_readiness(event); - } - } - - /// Indicates to tokio that the file descriptor no longer has a specific readiness. - /// The internal readiness flag will be cleared, and tokio will wait for the - /// next edge-triggered readiness notification from the OS. - /// - /// This function is useful in combination with the [`AsyncFd::ready_mut`] method when a - /// combined interest like `Interest::READABLE | Interest::WRITABLE` is used. - /// - /// It is critical that this function not be called unless your code - /// _actually observes_ that the file descriptor is _not_ ready for the provided `Ready`. - /// Do not call it simply because, for example, a read succeeded; it should be called - /// when a read is observed to block. Only clear the specific readiness that is observed to - /// block. For example when a read blocks when using a combined interest, - /// only clear `Ready::READABLE`. - /// - /// # Examples - /// - /// Concurrently read and write to a [`std::net::TcpStream`] on the same task without - /// splitting. - /// - /// ```no_run - /// use std::error::Error; - /// use std::io; - /// use std::io::{Read, Write}; - /// use std::net::TcpStream; - /// use tokio::io::unix::AsyncFd; - /// use tokio::io::{Interest, Ready}; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080")?; - /// stream.set_nonblocking(true)?; - /// let mut stream = AsyncFd::new(stream)?; - /// - /// loop { - /// let mut guard = stream - /// .ready_mut(Interest::READABLE | Interest::WRITABLE) - /// .await?; - /// - /// if guard.ready().is_readable() { - /// let mut data = vec![0; 1024]; - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match guard.get_inner_mut().read(&mut data) { - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// // a read has blocked, but a write might still succeed. - /// // clear only the read readiness. - /// guard.clear_ready_matching(Ready::READABLE); - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// if guard.ready().is_writable() { - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match guard.get_inner_mut().write(b"hello world") { - /// Ok(n) => { - /// println!("write {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// // a write has blocked, but a read might still succeed. - /// // clear only the write readiness. - /// guard.clear_ready_matching(Ready::WRITABLE); - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// } - /// } - /// ``` - pub fn clear_ready_matching(&mut self, ready: Ready) { - if let Some(mut event) = self.event.take() { - self.async_fd - .registration - .clear_readiness(event.with_ready(ready)); - - // the event is no longer ready for the readiness that was just cleared - event.ready = event.ready - ready; - - if !event.ready.is_empty() { - self.event = Some(event); - } - } - } - - /// This method should be invoked when you intentionally want to keep the - /// ready flag asserted. - /// - /// While this function is itself a no-op, it satisfies the `#[must_use]` - /// constraint on the [`AsyncFdReadyGuard`] type. - pub fn retain_ready(&mut self) { - // no-op - } - - /// Get the [`Ready`] value associated with this guard. - /// - /// This method will return the empty readiness state if - /// [`AsyncFdReadyGuard::clear_ready`] has been called on - /// the guard. - /// - /// [`Ready`]: super::Ready - pub fn ready(&self) -> Ready { - match &self.event { - Some(event) => event.ready, - None => Ready::EMPTY, - } - } - - /// Performs the provided IO operation. - /// - /// If `f` returns a [`WouldBlock`] error, the readiness state associated - /// with this file descriptor is cleared, and the method returns - /// `Err(TryIoError::WouldBlock)`. You will typically need to poll the - /// `AsyncFd` again when this happens. - /// - /// This method helps ensure that the readiness state of the underlying file - /// descriptor remains in sync with the tokio-side readiness state, by - /// clearing the tokio-side state only when a [`WouldBlock`] condition - /// occurs. It is the responsibility of the caller to ensure that `f` - /// returns [`WouldBlock`] only if the file descriptor that originated this - /// `AsyncFdReadyGuard` no longer expresses the readiness state that was queried to - /// create this `AsyncFdReadyGuard`. - /// - /// [`WouldBlock`]: std::io::ErrorKind::WouldBlock - pub fn try_io( - &mut self, - f: impl FnOnce(&mut AsyncFd) -> io::Result, - ) -> Result, TryIoError> { - let result = f(self.async_fd); - - match result { - Err(err) if err.kind() == io::ErrorKind::WouldBlock => { - self.clear_ready(); - Err(TryIoError(())) - } - result => Ok(result), - } - } - - /// Returns a shared reference to the inner [`AsyncFd`]. - pub fn get_ref(&self) -> &AsyncFd { - self.async_fd - } - - /// Returns a mutable reference to the inner [`AsyncFd`]. - pub fn get_mut(&mut self) -> &mut AsyncFd { - self.async_fd - } - - /// Returns a shared reference to the backing object of the inner [`AsyncFd`]. - pub fn get_inner(&self) -> &Inner { - self.get_ref().get_ref() - } - - /// Returns a mutable reference to the backing object of the inner [`AsyncFd`]. - pub fn get_inner_mut(&mut self) -> &mut Inner { - self.get_mut().get_mut() - } -} - -impl<'a, T: std::fmt::Debug + AsRawFd> std::fmt::Debug for AsyncFdReadyGuard<'a, T> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ReadyGuard") - .field("async_fd", &self.async_fd) - .finish() - } -} - -impl<'a, T: std::fmt::Debug + AsRawFd> std::fmt::Debug for AsyncFdReadyMutGuard<'a, T> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("MutReadyGuard") - .field("async_fd", &self.async_fd) - .finish() - } -} - -/// The error type returned by [`try_io`]. -/// -/// This error indicates that the IO resource returned a [`WouldBlock`] error. -/// -/// [`WouldBlock`]: std::io::ErrorKind::WouldBlock -/// [`try_io`]: method@AsyncFdReadyGuard::try_io -#[derive(Debug)] -pub struct TryIoError(()); diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/async_read.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/async_read.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/async_read.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/async_read.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,131 +0,0 @@ -use super::ReadBuf; -use std::io; -use std::ops::DerefMut; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Reads bytes from a source. -/// -/// This trait is analogous to the [`std::io::Read`] trait, but integrates with -/// the asynchronous task system. In particular, the [`poll_read`] method, -/// unlike [`Read::read`], will automatically queue the current task for wakeup -/// and return if data is not yet available, rather than blocking the calling -/// thread. -/// -/// Specifically, this means that the `poll_read` function will return one of -/// the following: -/// -/// * `Poll::Ready(Ok(()))` means that data was immediately read and placed into -/// the output buffer. The amount of data read can be determined by the -/// increase in the length of the slice returned by `ReadBuf::filled`. If the -/// difference is 0, EOF has been reached. -/// -/// * `Poll::Pending` means that no data was read into the buffer -/// provided. The I/O object is not currently readable but may become readable -/// in the future. Most importantly, **the current future's task is scheduled -/// to get unparked when the object is readable**. This means that like -/// `Future::poll` you'll receive a notification when the I/O object is -/// readable again. -/// -/// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the -/// underlying object. -/// -/// This trait importantly means that the `read` method only works in the -/// context of a future's task. The object may panic if used outside of a task. -/// -/// Utilities for working with `AsyncRead` values are provided by -/// [`AsyncReadExt`]. -/// -/// [`poll_read`]: AsyncRead::poll_read -/// [`std::io::Read`]: std::io::Read -/// [`Read::read`]: std::io::Read::read -/// [`AsyncReadExt`]: crate::io::AsyncReadExt -pub trait AsyncRead { - /// Attempts to read from the `AsyncRead` into `buf`. - /// - /// On success, returns `Poll::Ready(Ok(()))` and places data in the - /// unfilled portion of `buf`. If no data was read (`buf.filled().len()` is - /// unchanged), it implies that EOF has been reached. - /// - /// If no data is available for reading, the method returns `Poll::Pending` - /// and arranges for the current task (via `cx.waker()`) to receive a - /// notification when the object becomes readable or is closed. - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll>; -} - -macro_rules! deref_async_read { - () => { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - Pin::new(&mut **self).poll_read(cx, buf) - } - }; -} - -impl AsyncRead for Box { - deref_async_read!(); -} - -impl AsyncRead for &mut T { - deref_async_read!(); -} - -impl

AsyncRead for Pin

-where - P: DerefMut + Unpin, - P::Target: AsyncRead, -{ - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.get_mut().as_mut().poll_read(cx, buf) - } -} - -impl AsyncRead for &[u8] { - fn poll_read( - mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - let amt = std::cmp::min(self.len(), buf.remaining()); - let (a, b) = self.split_at(amt); - buf.put_slice(a); - *self = b; - Poll::Ready(Ok(())) - } -} - -impl + Unpin> AsyncRead for io::Cursor { - fn poll_read( - mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - let pos = self.position(); - let slice: &[u8] = (*self).get_ref().as_ref(); - - // The position could technically be out of bounds, so don't panic... - if pos > slice.len() as u64 { - return Poll::Ready(Ok(())); - } - - let start = pos as usize; - let amt = std::cmp::min(slice.len() - start, buf.remaining()); - // Add won't overflow because of pos check above. - let end = start + amt; - buf.put_slice(&slice[start..end]); - self.set_position(end as u64); - - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/async_seek.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/async_seek.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/async_seek.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/async_seek.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,90 +0,0 @@ -use std::io::{self, SeekFrom}; -use std::ops::DerefMut; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Seek bytes asynchronously. -/// -/// This trait is analogous to the [`std::io::Seek`] trait, but integrates -/// with the asynchronous task system. In particular, the `start_seek` -/// method, unlike [`Seek::seek`], will not block the calling thread. -/// -/// Utilities for working with `AsyncSeek` values are provided by -/// [`AsyncSeekExt`]. -/// -/// [`std::io::Seek`]: std::io::Seek -/// [`Seek::seek`]: std::io::Seek::seek() -/// [`AsyncSeekExt`]: crate::io::AsyncSeekExt -pub trait AsyncSeek { - /// Attempts to seek to an offset, in bytes, in a stream. - /// - /// A seek beyond the end of a stream is allowed, but behavior is defined - /// by the implementation. - /// - /// If this function returns successfully, then the job has been submitted. - /// To find out when it completes, call `poll_complete`. - /// - /// # Errors - /// - /// This function can return [`io::ErrorKind::Other`] in case there is - /// another seek in progress. To avoid this, it is advisable that any call - /// to `start_seek` is preceded by a call to `poll_complete` to ensure all - /// pending seeks have completed. - fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> io::Result<()>; - - /// Waits for a seek operation to complete. - /// - /// If the seek operation completed successfully, - /// this method returns the new position from the start of the stream. - /// That position can be used later with [`SeekFrom::Start`]. Repeatedly - /// calling this function without calling `start_seek` might return the - /// same result. - /// - /// # Errors - /// - /// Seeking to a negative offset is considered an error. - fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; -} - -macro_rules! deref_async_seek { - () => { - fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> { - Pin::new(&mut **self).start_seek(pos) - } - - fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_complete(cx) - } - }; -} - -impl AsyncSeek for Box { - deref_async_seek!(); -} - -impl AsyncSeek for &mut T { - deref_async_seek!(); -} - -impl

AsyncSeek for Pin

-where - P: DerefMut + Unpin, - P::Target: AsyncSeek, -{ - fn start_seek(self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> { - self.get_mut().as_mut().start_seek(pos) - } - - fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_complete(cx) - } -} - -impl + Unpin> AsyncSeek for io::Cursor { - fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> { - io::Seek::seek(&mut *self, pos).map(drop) - } - fn poll_complete(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(self.get_mut().position())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/async_write.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/async_write.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/async_write.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/async_write.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,408 +0,0 @@ -use std::io::{self, IoSlice}; -use std::ops::DerefMut; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Writes bytes asynchronously. -/// -/// The trait inherits from [`std::io::Write`] and indicates that an I/O object is -/// **nonblocking**. All non-blocking I/O objects must return an error when -/// bytes cannot be written instead of blocking the current thread. -/// -/// Specifically, this means that the [`poll_write`] function will return one of -/// the following: -/// -/// * `Poll::Ready(Ok(n))` means that `n` bytes of data was immediately -/// written. -/// -/// * `Poll::Pending` means that no data was written from the buffer -/// provided. The I/O object is not currently writable but may become writable -/// in the future. Most importantly, **the current future's task is scheduled -/// to get unparked when the object is writable**. This means that like -/// `Future::poll` you'll receive a notification when the I/O object is -/// writable again. -/// -/// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the -/// underlying object. -/// -/// This trait importantly means that the [`write`][stdwrite] method only works in -/// the context of a future's task. The object may panic if used outside of a task. -/// -/// Note that this trait also represents that the [`Write::flush`][stdflush] method -/// works very similarly to the `write` method, notably that `Ok(())` means that the -/// writer has successfully been flushed, a "would block" error means that the -/// current task is ready to receive a notification when flushing can make more -/// progress, and otherwise normal errors can happen as well. -/// -/// Utilities for working with `AsyncWrite` values are provided by -/// [`AsyncWriteExt`]. -/// -/// [`std::io::Write`]: std::io::Write -/// [`poll_write`]: AsyncWrite::poll_write() -/// [stdwrite]: std::io::Write::write() -/// [stdflush]: std::io::Write::flush() -/// [`AsyncWriteExt`]: crate::io::AsyncWriteExt -pub trait AsyncWrite { - /// Attempt to write bytes from `buf` into the object. - /// - /// On success, returns `Poll::Ready(Ok(num_bytes_written))`. If successful, - /// then it must be guaranteed that `n <= buf.len()`. A return value of `0` - /// typically means that the underlying object is no longer able to accept - /// bytes and will likely not be able to in the future as well, or that the - /// buffer provided is empty. - /// - /// If the object is not ready for writing, the method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker()`) to receive a notification when the object becomes - /// writable or is closed. - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll>; - - /// Attempts to flush the object, ensuring that any buffered data reach - /// their destination. - /// - /// On success, returns `Poll::Ready(Ok(()))`. - /// - /// If flushing cannot immediately complete, this method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker()`) to receive a notification when the object can make - /// progress towards flushing. - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - - /// Initiates or attempts to shut down this writer, returning success when - /// the I/O connection has completely shut down. - /// - /// This method is intended to be used for asynchronous shutdown of I/O - /// connections. For example this is suitable for implementing shutdown of a - /// TLS connection or calling `TcpStream::shutdown` on a proxied connection. - /// Protocols sometimes need to flush out final pieces of data or otherwise - /// perform a graceful shutdown handshake, reading/writing more data as - /// appropriate. This method is the hook for such protocols to implement the - /// graceful shutdown logic. - /// - /// This `shutdown` method is required by implementers of the - /// `AsyncWrite` trait. Wrappers typically just want to proxy this call - /// through to the wrapped type, and base types will typically implement - /// shutdown logic here or just return `Ok(().into())`. Note that if you're - /// wrapping an underlying `AsyncWrite` a call to `shutdown` implies that - /// transitively the entire stream has been shut down. After your wrapper's - /// shutdown logic has been executed you should shut down the underlying - /// stream. - /// - /// Invocation of a `shutdown` implies an invocation of `flush`. Once this - /// method returns `Ready` it implies that a flush successfully happened - /// before the shutdown happened. That is, callers don't need to call - /// `flush` before calling `shutdown`. They can rely that by calling - /// `shutdown` any pending buffered data will be written out. - /// - /// # Return value - /// - /// This function returns a `Poll>` classified as such: - /// - /// * `Poll::Ready(Ok(()))` - indicates that the connection was - /// successfully shut down and is now safe to deallocate/drop/close - /// resources associated with it. This method means that the current task - /// will no longer receive any notifications due to this method and the - /// I/O object itself is likely no longer usable. - /// - /// * `Poll::Pending` - indicates that shutdown is initiated but could - /// not complete just yet. This may mean that more I/O needs to happen to - /// continue this shutdown operation. The current task is scheduled to - /// receive a notification when it's otherwise ready to continue the - /// shutdown operation. When woken up this method should be called again. - /// - /// * `Poll::Ready(Err(e))` - indicates a fatal error has happened with shutdown, - /// indicating that the shutdown operation did not complete successfully. - /// This typically means that the I/O object is no longer usable. - /// - /// # Errors - /// - /// This function can return normal I/O errors through `Err`, described - /// above. Additionally this method may also render the underlying - /// `Write::write` method no longer usable (e.g. will return errors in the - /// future). It's recommended that once `shutdown` is called the - /// `write` method is no longer called. - /// - /// # Panics - /// - /// This function will panic if not called within the context of a future's - /// task. - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; - - /// Like [`poll_write`], except that it writes from a slice of buffers. - /// - /// Data is copied from each buffer in order, with the final buffer - /// read from possibly being only partially consumed. This method must - /// behave as a call to [`write`] with the buffers concatenated would. - /// - /// The default implementation calls [`poll_write`] with either the first nonempty - /// buffer provided, or an empty one if none exists. - /// - /// On success, returns `Poll::Ready(Ok(num_bytes_written))`. - /// - /// If the object is not ready for writing, the method returns - /// `Poll::Pending` and arranges for the current task (via - /// `cx.waker()`) to receive a notification when the object becomes - /// writable or is closed. - /// - /// # Note - /// - /// This should be implemented as a single "atomic" write action. If any - /// data has been partially written, it is wrong to return an error or - /// pending. - /// - /// [`poll_write`]: AsyncWrite::poll_write - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - let buf = bufs - .iter() - .find(|b| !b.is_empty()) - .map_or(&[][..], |b| &**b); - self.poll_write(cx, buf) - } - - /// Determines if this writer has an efficient [`poll_write_vectored`] - /// implementation. - /// - /// If a writer does not override the default [`poll_write_vectored`] - /// implementation, code using it may want to avoid the method all together - /// and coalesce writes into a single buffer for higher performance. - /// - /// The default implementation returns `false`. - /// - /// [`poll_write_vectored`]: AsyncWrite::poll_write_vectored - fn is_write_vectored(&self) -> bool { - false - } -} - -macro_rules! deref_async_write { - () => { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut **self).poll_write(cx, buf) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Pin::new(&mut **self).poll_write_vectored(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - (**self).is_write_vectored() - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_flush(cx) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut **self).poll_shutdown(cx) - } - }; -} - -impl AsyncWrite for Box { - deref_async_write!(); -} - -impl AsyncWrite for &mut T { - deref_async_write!(); -} - -impl

AsyncWrite for Pin

-where - P: DerefMut + Unpin, - P::Target: AsyncWrite, -{ - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.get_mut().as_mut().poll_write(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - self.get_mut().as_mut().poll_write_vectored(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - (**self).is_write_vectored() - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_mut().as_mut().poll_shutdown(cx) - } -} - -impl AsyncWrite for Vec { - fn poll_write( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.get_mut().extend_from_slice(buf); - Poll::Ready(Ok(buf.len())) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) - } - - fn is_write_vectored(&self) -> bool { - true - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -impl AsyncWrite for io::Cursor<&mut [u8]> { - fn poll_write( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(io::Write::write(&mut *self, buf)) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) - } - - fn is_write_vectored(&self) -> bool { - true - } - - fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::Write::flush(&mut *self)) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} - -impl AsyncWrite for io::Cursor<&mut Vec> { - fn poll_write( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(io::Write::write(&mut *self, buf)) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) - } - - fn is_write_vectored(&self) -> bool { - true - } - - fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::Write::flush(&mut *self)) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} - -impl AsyncWrite for io::Cursor> { - fn poll_write( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(io::Write::write(&mut *self, buf)) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) - } - - fn is_write_vectored(&self) -> bool { - true - } - - fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::Write::flush(&mut *self)) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} - -impl AsyncWrite for io::Cursor> { - fn poll_write( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(io::Write::write(&mut *self, buf)) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) - } - - fn is_write_vectored(&self) -> bool { - true - } - - fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(io::Write::flush(&mut *self)) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/blocking.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/blocking.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/blocking.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/blocking.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,294 +0,0 @@ -use crate::io::sys; -use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; - -use std::cmp; -use std::future::Future; -use std::io; -use std::io::prelude::*; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// `T` should not implement _both_ Read and Write. -#[derive(Debug)] -pub(crate) struct Blocking { - inner: Option, - state: State, - /// `true` if the lower IO layer needs flushing. - need_flush: bool, -} - -#[derive(Debug)] -pub(crate) struct Buf { - buf: Vec, - pos: usize, -} - -pub(crate) const MAX_BUF: usize = 2 * 1024 * 1024; - -#[derive(Debug)] -enum State { - Idle(Option), - Busy(sys::Blocking<(io::Result, Buf, T)>), -} - -cfg_io_blocking! { - impl Blocking { - #[cfg_attr(feature = "fs", allow(dead_code))] - pub(crate) fn new(inner: T) -> Blocking { - Blocking { - inner: Some(inner), - state: State::Idle(Some(Buf::with_capacity(0))), - need_flush: false, - } - } - } -} - -impl AsyncRead for Blocking -where - T: Read + Unpin + Send + 'static, -{ - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - dst: &mut ReadBuf<'_>, - ) -> Poll> { - loop { - match self.state { - State::Idle(ref mut buf_cell) => { - let mut buf = buf_cell.take().unwrap(); - - if !buf.is_empty() { - buf.copy_to(dst); - *buf_cell = Some(buf); - return Poll::Ready(Ok(())); - } - - buf.ensure_capacity_for(dst); - let mut inner = self.inner.take().unwrap(); - - self.state = State::Busy(sys::run(move || { - let res = buf.read_from(&mut inner); - (res, buf, inner) - })); - } - State::Busy(ref mut rx) => { - let (res, mut buf, inner) = ready!(Pin::new(rx).poll(cx))?; - self.inner = Some(inner); - - match res { - Ok(_) => { - buf.copy_to(dst); - self.state = State::Idle(Some(buf)); - return Poll::Ready(Ok(())); - } - Err(e) => { - assert!(buf.is_empty()); - - self.state = State::Idle(Some(buf)); - return Poll::Ready(Err(e)); - } - } - } - } - } - } -} - -impl AsyncWrite for Blocking -where - T: Write + Unpin + Send + 'static, -{ - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - src: &[u8], - ) -> Poll> { - loop { - match self.state { - State::Idle(ref mut buf_cell) => { - let mut buf = buf_cell.take().unwrap(); - - assert!(buf.is_empty()); - - let n = buf.copy_from(src); - let mut inner = self.inner.take().unwrap(); - - self.state = State::Busy(sys::run(move || { - let n = buf.len(); - let res = buf.write_to(&mut inner).map(|_| n); - - (res, buf, inner) - })); - self.need_flush = true; - - return Poll::Ready(Ok(n)); - } - State::Busy(ref mut rx) => { - let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?; - self.state = State::Idle(Some(buf)); - self.inner = Some(inner); - - // If error, return - res?; - } - } - } - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - let need_flush = self.need_flush; - match self.state { - // The buffer is not used here - State::Idle(ref mut buf_cell) => { - if need_flush { - let buf = buf_cell.take().unwrap(); - let mut inner = self.inner.take().unwrap(); - - self.state = State::Busy(sys::run(move || { - let res = inner.flush().map(|_| 0); - (res, buf, inner) - })); - - self.need_flush = false; - } else { - return Poll::Ready(Ok(())); - } - } - State::Busy(ref mut rx) => { - let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?; - self.state = State::Idle(Some(buf)); - self.inner = Some(inner); - - // If error, return - res?; - } - } - } - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -/// Repeats operations that are interrupted. -macro_rules! uninterruptibly { - ($e:expr) => {{ - loop { - match $e { - Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} - res => break res, - } - } - }}; -} - -impl Buf { - pub(crate) fn with_capacity(n: usize) -> Buf { - Buf { - buf: Vec::with_capacity(n), - pos: 0, - } - } - - pub(crate) fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub(crate) fn len(&self) -> usize { - self.buf.len() - self.pos - } - - pub(crate) fn copy_to(&mut self, dst: &mut ReadBuf<'_>) -> usize { - let n = cmp::min(self.len(), dst.remaining()); - dst.put_slice(&self.bytes()[..n]); - self.pos += n; - - if self.pos == self.buf.len() { - self.buf.truncate(0); - self.pos = 0; - } - - n - } - - pub(crate) fn copy_from(&mut self, src: &[u8]) -> usize { - assert!(self.is_empty()); - - let n = cmp::min(src.len(), MAX_BUF); - - self.buf.extend_from_slice(&src[..n]); - n - } - - pub(crate) fn bytes(&self) -> &[u8] { - &self.buf[self.pos..] - } - - pub(crate) fn ensure_capacity_for(&mut self, bytes: &ReadBuf<'_>) { - assert!(self.is_empty()); - - let len = cmp::min(bytes.remaining(), MAX_BUF); - - if self.buf.len() < len { - self.buf.reserve(len - self.buf.len()); - } - - unsafe { - self.buf.set_len(len); - } - } - - pub(crate) fn read_from(&mut self, rd: &mut T) -> io::Result { - let res = uninterruptibly!(rd.read(&mut self.buf)); - - if let Ok(n) = res { - self.buf.truncate(n); - } else { - self.buf.clear(); - } - - assert_eq!(self.pos, 0); - - res - } - - pub(crate) fn write_to(&mut self, wr: &mut T) -> io::Result<()> { - assert_eq!(self.pos, 0); - - // `write_all` already ignores interrupts - let res = wr.write_all(&self.buf); - self.buf.clear(); - res - } -} - -cfg_fs! { - impl Buf { - pub(crate) fn discard_read(&mut self) -> i64 { - let ret = -(self.bytes().len() as i64); - self.pos = 0; - self.buf.truncate(0); - ret - } - - pub(crate) fn copy_from_bufs(&mut self, bufs: &[io::IoSlice<'_>]) -> usize { - assert!(self.is_empty()); - - let mut rem = MAX_BUF; - for buf in bufs { - if rem == 0 { - break - } - - let len = buf.len().min(rem); - self.buf.extend_from_slice(&buf[..len]); - rem -= len; - } - - MAX_BUF - rem - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/bsd/poll_aio.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/bsd/poll_aio.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/bsd/poll_aio.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/bsd/poll_aio.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,197 +0,0 @@ -//! Use POSIX AIO futures with Tokio. - -use crate::io::interest::Interest; -use crate::runtime::io::{ReadyEvent, Registration}; -use crate::runtime::scheduler; -use mio::event::Source; -use mio::Registry; -use mio::Token; -use std::fmt; -use std::io; -use std::ops::{Deref, DerefMut}; -use std::os::unix::io::AsRawFd; -use std::os::unix::prelude::RawFd; -use std::task::{Context, Poll}; - -/// Like [`mio::event::Source`], but for POSIX AIO only. -/// -/// Tokio's consumer must pass an implementor of this trait to create a -/// [`Aio`] object. -pub trait AioSource { - /// Registers this AIO event source with Tokio's reactor. - fn register(&mut self, kq: RawFd, token: usize); - - /// Deregisters this AIO event source with Tokio's reactor. - fn deregister(&mut self); -} - -/// Wraps the user's AioSource in order to implement mio::event::Source, which -/// is what the rest of the crate wants. -struct MioSource(T); - -impl Source for MioSource { - fn register( - &mut self, - registry: &Registry, - token: Token, - interests: mio::Interest, - ) -> io::Result<()> { - assert!(interests.is_aio() || interests.is_lio()); - self.0.register(registry.as_raw_fd(), usize::from(token)); - Ok(()) - } - - fn deregister(&mut self, _registry: &Registry) -> io::Result<()> { - self.0.deregister(); - Ok(()) - } - - fn reregister( - &mut self, - registry: &Registry, - token: Token, - interests: mio::Interest, - ) -> io::Result<()> { - assert!(interests.is_aio() || interests.is_lio()); - self.0.register(registry.as_raw_fd(), usize::from(token)); - Ok(()) - } -} - -/// Associates a POSIX AIO control block with the reactor that drives it. -/// -/// `Aio`'s wrapped type must implement [`AioSource`] to be driven -/// by the reactor. -/// -/// The wrapped source may be accessed through the `Aio` via the `Deref` and -/// `DerefMut` traits. -/// -/// ## Clearing readiness -/// -/// If [`Aio::poll_ready`] returns ready, but the consumer determines that the -/// Source is not completely ready and must return to the Pending state, -/// [`Aio::clear_ready`] may be used. This can be useful with -/// [`lio_listio`], which may generate a kevent when only a portion of the -/// operations have completed. -/// -/// ## Platforms -/// -/// Only FreeBSD implements POSIX AIO with kqueue notification, so -/// `Aio` is only available for that operating system. -/// -/// [`lio_listio`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html -// Note: Unlike every other kqueue event source, POSIX AIO registers events not -// via kevent(2) but when the aiocb is submitted to the kernel via aio_read, -// aio_write, etc. It needs the kqueue's file descriptor to do that. So -// AsyncFd can't be used for POSIX AIO. -// -// Note that Aio doesn't implement Drop. There's no need. Unlike other -// kqueue sources, simply dropping the object effectively deregisters it. -pub struct Aio { - io: MioSource, - registration: Registration, -} - -// ===== impl Aio ===== - -impl Aio { - /// Creates a new `Aio` suitable for use with POSIX AIO functions. - /// - /// It will be associated with the default reactor. The runtime is usually - /// set implicitly when this function is called from a future driven by a - /// Tokio runtime, otherwise runtime can be set explicitly with - /// [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn new_for_aio(io: E) -> io::Result { - Self::new_with_interest(io, Interest::AIO) - } - - /// Creates a new `Aio` suitable for use with [`lio_listio`]. - /// - /// It will be associated with the default reactor. The runtime is usually - /// set implicitly when this function is called from a future driven by a - /// Tokio runtime, otherwise runtime can be set explicitly with - /// [`Runtime::enter`](crate::runtime::Runtime::enter) function. - /// - /// [`lio_listio`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html - pub fn new_for_lio(io: E) -> io::Result { - Self::new_with_interest(io, Interest::LIO) - } - - fn new_with_interest(io: E, interest: Interest) -> io::Result { - let mut io = MioSource(io); - let handle = scheduler::Handle::current(); - let registration = Registration::new_with_interest_and_handle(&mut io, interest, handle)?; - Ok(Self { io, registration }) - } - - /// Indicates to Tokio that the source is no longer ready. The internal - /// readiness flag will be cleared, and tokio will wait for the next - /// edge-triggered readiness notification from the OS. - /// - /// It is critical that this method not be called unless your code - /// _actually observes_ that the source is _not_ ready. The OS must - /// deliver a subsequent notification, or this source will block - /// forever. It is equally critical that you `do` call this method if you - /// resubmit the same structure to the kernel and poll it again. - /// - /// This method is not very useful with AIO readiness, since each `aiocb` - /// structure is typically only used once. It's main use with - /// [`lio_listio`], which will sometimes send notification when only a - /// portion of its elements are complete. In that case, the caller must - /// call `clear_ready` before resubmitting it. - /// - /// [`lio_listio`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html - pub fn clear_ready(&self, ev: AioEvent) { - self.registration.clear_readiness(ev.0) - } - - /// Destroy the [`Aio`] and return its inner source. - pub fn into_inner(self) -> E { - self.io.0 - } - - /// Polls for readiness. Either AIO or LIO counts. - /// - /// This method returns: - /// * `Poll::Pending` if the underlying operation is not complete, whether - /// or not it completed successfully. This will be true if the OS is - /// still processing it, or if it has not yet been submitted to the OS. - /// * `Poll::Ready(Ok(_))` if the underlying operation is complete. - /// * `Poll::Ready(Err(_))` if the reactor has been shutdown. This does - /// _not_ indicate that the underlying operation encountered an error. - /// - /// When the method returns `Poll::Pending`, the `Waker` in the provided `Context` - /// is scheduled to receive a wakeup when the underlying operation - /// completes. Note that on multiple calls to `poll_ready`, only the `Waker` from the - /// `Context` passed to the most recent call is scheduled to receive a wakeup. - pub fn poll_ready<'a>(&'a self, cx: &mut Context<'_>) -> Poll> { - let ev = ready!(self.registration.poll_read_ready(cx))?; - Poll::Ready(Ok(AioEvent(ev))) - } -} - -impl Deref for Aio { - type Target = E; - - fn deref(&self) -> &E { - &self.io.0 - } -} - -impl DerefMut for Aio { - fn deref_mut(&mut self) -> &mut E { - &mut self.io.0 - } -} - -impl fmt::Debug for Aio { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Aio").field("io", &self.io.0).finish() - } -} - -/// Opaque data returned by [`Aio::poll_ready`]. -/// -/// It can be fed back to [`Aio::clear_ready`]. -#[derive(Debug)] -pub struct AioEvent(ReadyEvent); diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/interest.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/interest.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/interest.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/interest.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,345 +0,0 @@ -#![cfg_attr(not(feature = "net"), allow(dead_code, unreachable_pub))] - -use crate::io::ready::Ready; - -use std::fmt; -use std::ops; - -// These must be unique. -// same as mio -const READABLE: usize = 0b0001; -const WRITABLE: usize = 0b0010; -// The following are not available on all platforms. -#[cfg(target_os = "freebsd")] -const AIO: usize = 0b0100; -#[cfg(target_os = "freebsd")] -const LIO: usize = 0b1000; -#[cfg(any(target_os = "linux", target_os = "android"))] -const PRIORITY: usize = 0b0001_0000; -// error is available on all platforms, but behavior is platform-specific -// mio does not have this interest -const ERROR: usize = 0b0010_0000; - -/// Readiness event interest. -/// -/// Specifies the readiness events the caller is interested in when awaiting on -/// I/O resource readiness states. -#[cfg_attr(docsrs, doc(cfg(feature = "net")))] -#[derive(Clone, Copy, Eq, PartialEq)] -pub struct Interest(usize); - -impl Interest { - // The non-FreeBSD definitions in this block are active only when - // building documentation. - cfg_aio! { - /// Interest for POSIX AIO. - #[cfg(target_os = "freebsd")] - pub const AIO: Interest = Interest(AIO); - - /// Interest for POSIX AIO. - #[cfg(not(target_os = "freebsd"))] - pub const AIO: Interest = Interest(READABLE); - - /// Interest for POSIX AIO lio_listio events. - #[cfg(target_os = "freebsd")] - pub const LIO: Interest = Interest(LIO); - - /// Interest for POSIX AIO lio_listio events. - #[cfg(not(target_os = "freebsd"))] - pub const LIO: Interest = Interest(READABLE); - } - - /// Interest in all readable events. - /// - /// Readable interest includes read-closed events. - pub const READABLE: Interest = Interest(READABLE); - - /// Interest in all writable events. - /// - /// Writable interest includes write-closed events. - pub const WRITABLE: Interest = Interest(WRITABLE); - - /// Interest in error events. - /// - /// Passes error interest to the underlying OS selector. - /// Behavior is platform-specific, read your platform's documentation. - pub const ERROR: Interest = Interest(ERROR); - - /// Returns a `Interest` set representing priority completion interests. - #[cfg(any(target_os = "linux", target_os = "android"))] - #[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))] - pub const PRIORITY: Interest = Interest(PRIORITY); - - /// Returns true if the value includes readable interest. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Interest; - /// - /// assert!(Interest::READABLE.is_readable()); - /// assert!(!Interest::WRITABLE.is_readable()); - /// - /// let both = Interest::READABLE | Interest::WRITABLE; - /// assert!(both.is_readable()); - /// ``` - pub const fn is_readable(self) -> bool { - self.0 & READABLE != 0 - } - - /// Returns true if the value includes writable interest. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Interest; - /// - /// assert!(!Interest::READABLE.is_writable()); - /// assert!(Interest::WRITABLE.is_writable()); - /// - /// let both = Interest::READABLE | Interest::WRITABLE; - /// assert!(both.is_writable()); - /// ``` - pub const fn is_writable(self) -> bool { - self.0 & WRITABLE != 0 - } - - /// Returns true if the value includes error interest. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Interest; - /// - /// assert!(Interest::ERROR.is_error()); - /// assert!(!Interest::WRITABLE.is_error()); - /// - /// let combined = Interest::READABLE | Interest::ERROR; - /// assert!(combined.is_error()); - /// ``` - pub const fn is_error(self) -> bool { - self.0 & ERROR != 0 - } - - #[cfg(target_os = "freebsd")] - const fn is_aio(self) -> bool { - self.0 & AIO != 0 - } - - #[cfg(target_os = "freebsd")] - const fn is_lio(self) -> bool { - self.0 & LIO != 0 - } - - /// Returns true if the value includes priority interest. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Interest; - /// - /// assert!(!Interest::READABLE.is_priority()); - /// assert!(Interest::PRIORITY.is_priority()); - /// - /// let both = Interest::READABLE | Interest::PRIORITY; - /// assert!(both.is_priority()); - /// ``` - #[cfg(any(target_os = "linux", target_os = "android"))] - #[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))] - pub const fn is_priority(self) -> bool { - self.0 & PRIORITY != 0 - } - - /// Add together two `Interest` values. - /// - /// This function works from a `const` context. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Interest; - /// - /// const BOTH: Interest = Interest::READABLE.add(Interest::WRITABLE); - /// - /// assert!(BOTH.is_readable()); - /// assert!(BOTH.is_writable()); - #[must_use = "this returns the result of the operation, without modifying the original"] - pub const fn add(self, other: Interest) -> Interest { - Self(self.0 | other.0) - } - - /// Remove `Interest` from `self`. - /// - /// Interests present in `other` but *not* in `self` are ignored. - /// - /// Returns `None` if the set would be empty after removing `Interest`. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Interest; - /// - /// const RW_INTEREST: Interest = Interest::READABLE.add(Interest::WRITABLE); - /// - /// let w_interest = RW_INTEREST.remove(Interest::READABLE).unwrap(); - /// assert!(!w_interest.is_readable()); - /// assert!(w_interest.is_writable()); - /// - /// // Removing all interests from the set returns `None`. - /// assert_eq!(w_interest.remove(Interest::WRITABLE), None); - /// - /// // Remove all interests at once. - /// assert_eq!(RW_INTEREST.remove(RW_INTEREST), None); - /// ``` - #[must_use = "this returns the result of the operation, without modifying the original"] - pub fn remove(self, other: Interest) -> Option { - let value = self.0 & !other.0; - - if value != 0 { - Some(Self(value)) - } else { - None - } - } - - // This function must be crate-private to avoid exposing a `mio` dependency. - pub(crate) fn to_mio(self) -> mio::Interest { - fn mio_add(wrapped: &mut Option, add: mio::Interest) { - match wrapped { - Some(inner) => *inner |= add, - None => *wrapped = Some(add), - } - } - - // mio does not allow and empty interest, so use None for empty - let mut mio = None; - - if self.is_readable() { - mio_add(&mut mio, mio::Interest::READABLE); - } - - if self.is_writable() { - mio_add(&mut mio, mio::Interest::WRITABLE); - } - - #[cfg(any(target_os = "linux", target_os = "android"))] - if self.is_priority() { - mio_add(&mut mio, mio::Interest::PRIORITY); - } - - #[cfg(target_os = "freebsd")] - if self.is_aio() { - mio_add(&mut mio, mio::Interest::AIO); - } - - #[cfg(target_os = "freebsd")] - if self.is_lio() { - mio_add(&mut mio, mio::Interest::LIO); - } - - if self.is_error() { - // There is no error interest in mio, because error events are always reported. - // But mio interests cannot be empty and an interest is needed just for the registeration. - // - // read readiness is filtered out in `Interest::mask` or `Ready::from_interest` if - // the read interest was not specified by the user. - mio_add(&mut mio, mio::Interest::READABLE); - } - - // the default `mio::Interest::READABLE` should never be used in practice. Either - // - // - at least one tokio interest with a mio counterpart was used - // - only the error tokio interest was specified - // - // in both cases, `mio` is Some already - mio.unwrap_or(mio::Interest::READABLE) - } - - pub(crate) fn mask(self) -> Ready { - match self { - Interest::READABLE => Ready::READABLE | Ready::READ_CLOSED, - Interest::WRITABLE => Ready::WRITABLE | Ready::WRITE_CLOSED, - #[cfg(any(target_os = "linux", target_os = "android"))] - Interest::PRIORITY => Ready::PRIORITY | Ready::READ_CLOSED, - Interest::ERROR => Ready::ERROR, - _ => Ready::EMPTY, - } - } -} - -impl ops::BitOr for Interest { - type Output = Self; - - #[inline] - fn bitor(self, other: Self) -> Self { - self.add(other) - } -} - -impl ops::BitOrAssign for Interest { - #[inline] - fn bitor_assign(&mut self, other: Self) { - *self = *self | other - } -} - -impl fmt::Debug for Interest { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut separator = false; - - if self.is_readable() { - if separator { - write!(fmt, " | ")?; - } - write!(fmt, "READABLE")?; - separator = true; - } - - if self.is_writable() { - if separator { - write!(fmt, " | ")?; - } - write!(fmt, "WRITABLE")?; - separator = true; - } - - #[cfg(any(target_os = "linux", target_os = "android"))] - if self.is_priority() { - if separator { - write!(fmt, " | ")?; - } - write!(fmt, "PRIORITY")?; - separator = true; - } - - #[cfg(target_os = "freebsd")] - if self.is_aio() { - if separator { - write!(fmt, " | ")?; - } - write!(fmt, "AIO")?; - separator = true; - } - - #[cfg(target_os = "freebsd")] - if self.is_lio() { - if separator { - write!(fmt, " | ")?; - } - write!(fmt, "LIO")?; - separator = true; - } - - if self.is_error() { - if separator { - write!(fmt, " | ")?; - } - write!(fmt, "ERROR")?; - separator = true; - } - - let _ = separator; - - Ok(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,290 +0,0 @@ -//! Traits, helpers, and type definitions for asynchronous I/O functionality. -//! -//! This module is the asynchronous version of `std::io`. Primarily, it -//! defines two traits, [`AsyncRead`] and [`AsyncWrite`], which are asynchronous -//! versions of the [`Read`] and [`Write`] traits in the standard library. -//! -//! # AsyncRead and AsyncWrite -//! -//! Like the standard library's [`Read`] and [`Write`] traits, [`AsyncRead`] and -//! [`AsyncWrite`] provide the most general interface for reading and writing -//! input and output. Unlike the standard library's traits, however, they are -//! _asynchronous_ — meaning that reading from or writing to a `tokio::io` -//! type will _yield_ to the Tokio scheduler when IO is not ready, rather than -//! blocking. This allows other tasks to run while waiting on IO. -//! -//! Another difference is that `AsyncRead` and `AsyncWrite` only contain -//! core methods needed to provide asynchronous reading and writing -//! functionality. Instead, utility methods are defined in the [`AsyncReadExt`] -//! and [`AsyncWriteExt`] extension traits. These traits are automatically -//! implemented for all values that implement `AsyncRead` and `AsyncWrite` -//! respectively. -//! -//! End users will rarely interact directly with `AsyncRead` and -//! `AsyncWrite`. Instead, they will use the async functions defined in the -//! extension traits. Library authors are expected to implement `AsyncRead` -//! and `AsyncWrite` in order to provide types that behave like byte streams. -//! -//! Even with these differences, Tokio's `AsyncRead` and `AsyncWrite` traits -//! can be used in almost exactly the same manner as the standard library's -//! `Read` and `Write`. Most types in the standard library that implement `Read` -//! and `Write` have asynchronous equivalents in `tokio` that implement -//! `AsyncRead` and `AsyncWrite`, such as [`File`] and [`TcpStream`]. -//! -//! For example, the standard library documentation introduces `Read` by -//! [demonstrating][std_example] reading some bytes from a [`std::fs::File`]. We -//! can do the same with [`tokio::fs::File`][`File`]: -//! -//! ```no_run -//! use tokio::io::{self, AsyncReadExt}; -//! use tokio::fs::File; -//! -//! #[tokio::main] -//! async fn main() -> io::Result<()> { -//! let mut f = File::open("foo.txt").await?; -//! let mut buffer = [0; 10]; -//! -//! // read up to 10 bytes -//! let n = f.read(&mut buffer).await?; -//! -//! println!("The bytes: {:?}", &buffer[..n]); -//! Ok(()) -//! } -//! ``` -//! -//! [`File`]: crate::fs::File -//! [`TcpStream`]: crate::net::TcpStream -//! [`std::fs::File`]: std::fs::File -//! [std_example]: std::io#read-and-write -//! -//! ## Buffered Readers and Writers -//! -//! Byte-based interfaces are unwieldy and can be inefficient, as we'd need to be -//! making near-constant calls to the operating system. To help with this, -//! `std::io` comes with [support for _buffered_ readers and writers][stdbuf], -//! and therefore, `tokio::io` does as well. -//! -//! Tokio provides an async version of the [`std::io::BufRead`] trait, -//! [`AsyncBufRead`]; and async [`BufReader`] and [`BufWriter`] structs, which -//! wrap readers and writers. These wrappers use a buffer, reducing the number -//! of calls and providing nicer methods for accessing exactly what you want. -//! -//! For example, [`BufReader`] works with the [`AsyncBufRead`] trait to add -//! extra methods to any async reader: -//! -//! ```no_run -//! use tokio::io::{self, BufReader, AsyncBufReadExt}; -//! use tokio::fs::File; -//! -//! #[tokio::main] -//! async fn main() -> io::Result<()> { -//! let f = File::open("foo.txt").await?; -//! let mut reader = BufReader::new(f); -//! let mut buffer = String::new(); -//! -//! // read a line into buffer -//! reader.read_line(&mut buffer).await?; -//! -//! println!("{}", buffer); -//! Ok(()) -//! } -//! ``` -//! -//! [`BufWriter`] doesn't add any new ways of writing; it just buffers every call -//! to [`write`](crate::io::AsyncWriteExt::write). However, you **must** flush -//! [`BufWriter`] to ensure that any buffered data is written. -//! -//! ```no_run -//! use tokio::io::{self, BufWriter, AsyncWriteExt}; -//! use tokio::fs::File; -//! -//! #[tokio::main] -//! async fn main() -> io::Result<()> { -//! let f = File::create("foo.txt").await?; -//! { -//! let mut writer = BufWriter::new(f); -//! -//! // Write a byte to the buffer. -//! writer.write(&[42u8]).await?; -//! -//! // Flush the buffer before it goes out of scope. -//! writer.flush().await?; -//! -//! } // Unless flushed or shut down, the contents of the buffer is discarded on drop. -//! -//! Ok(()) -//! } -//! ``` -//! -//! [stdbuf]: std::io#bufreader-and-bufwriter -//! [`std::io::BufRead`]: std::io::BufRead -//! [`AsyncBufRead`]: crate::io::AsyncBufRead -//! [`BufReader`]: crate::io::BufReader -//! [`BufWriter`]: crate::io::BufWriter -//! -//! ## Implementing AsyncRead and AsyncWrite -//! -//! Because they are traits, we can implement [`AsyncRead`] and [`AsyncWrite`] for -//! our own types, as well. Note that these traits must only be implemented for -//! non-blocking I/O types that integrate with the futures type system. In -//! other words, these types must never block the thread, and instead the -//! current task is notified when the I/O resource is ready. -//! -//! ## Conversion to and from Stream/Sink -//! -//! It is often convenient to encapsulate the reading and writing of bytes in a -//! [`Stream`] or [`Sink`] of data. -//! -//! Tokio provides simple wrappers for converting [`AsyncRead`] to [`Stream`] -//! and vice-versa in the [tokio-util] crate, see [`ReaderStream`] and -//! [`StreamReader`]. -//! -//! There are also utility traits that abstract the asynchronous buffering -//! necessary to write your own adaptors for encoding and decoding bytes to/from -//! your structured data, allowing to transform something that implements -//! [`AsyncRead`]/[`AsyncWrite`] into a [`Stream`]/[`Sink`], see [`Decoder`] and -//! [`Encoder`] in the [tokio-util::codec] module. -//! -//! [tokio-util]: https://docs.rs/tokio-util -//! [tokio-util::codec]: https://docs.rs/tokio-util/latest/tokio_util/codec/index.html -//! -//! # Standard input and output -//! -//! Tokio provides asynchronous APIs to standard [input], [output], and [error]. -//! These APIs are very similar to the ones provided by `std`, but they also -//! implement [`AsyncRead`] and [`AsyncWrite`]. -//! -//! Note that the standard input / output APIs **must** be used from the -//! context of the Tokio runtime, as they require Tokio-specific features to -//! function. Calling these functions outside of a Tokio runtime will panic. -//! -//! [input]: fn@stdin -//! [output]: fn@stdout -//! [error]: fn@stderr -//! -//! # `std` re-exports -//! -//! Additionally, [`Error`], [`ErrorKind`], [`Result`], and [`SeekFrom`] are -//! re-exported from `std::io` for ease of use. -//! -//! [`AsyncRead`]: trait@AsyncRead -//! [`AsyncWrite`]: trait@AsyncWrite -//! [`AsyncReadExt`]: trait@AsyncReadExt -//! [`AsyncWriteExt`]: trait@AsyncWriteExt -//! ["codec"]: https://docs.rs/tokio-util/latest/tokio_util/codec/index.html -//! [`Encoder`]: https://docs.rs/tokio-util/latest/tokio_util/codec/trait.Encoder.html -//! [`Decoder`]: https://docs.rs/tokio-util/latest/tokio_util/codec/trait.Decoder.html -//! [`ReaderStream`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.ReaderStream.html -//! [`StreamReader`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.StreamReader.html -//! [`Error`]: struct@Error -//! [`ErrorKind`]: enum@ErrorKind -//! [`Result`]: type@Result -//! [`Read`]: std::io::Read -//! [`SeekFrom`]: enum@SeekFrom -//! [`Sink`]: https://docs.rs/futures/0.3/futures/sink/trait.Sink.html -//! [`Stream`]: https://docs.rs/futures/0.3/futures/stream/trait.Stream.html -//! [`Write`]: std::io::Write - -#![cfg_attr( - not(all(feature = "rt", feature = "net")), - allow(dead_code, unused_imports) -)] - -cfg_io_blocking! { - pub(crate) mod blocking; -} - -mod async_buf_read; -pub use self::async_buf_read::AsyncBufRead; - -mod async_read; -pub use self::async_read::AsyncRead; - -mod async_seek; -pub use self::async_seek::AsyncSeek; - -mod async_write; -pub use self::async_write::AsyncWrite; - -mod read_buf; -pub use self::read_buf::ReadBuf; - -// Re-export some types from `std::io` so that users don't have to deal -// with conflicts when `use`ing `tokio::io` and `std::io`. -#[doc(no_inline)] -pub use std::io::{Error, ErrorKind, Result, SeekFrom}; - -cfg_io_driver_impl! { - pub(crate) mod interest; - pub(crate) mod ready; - - cfg_net! { - pub use interest::Interest; - pub use ready::Ready; - } - - #[cfg_attr(target_os = "wasi", allow(unused_imports))] - mod poll_evented; - - #[cfg(not(loom))] - #[cfg_attr(target_os = "wasi", allow(unused_imports))] - pub(crate) use poll_evented::PollEvented; -} - -cfg_aio! { - /// BSD-specific I/O types. - pub mod bsd { - mod poll_aio; - - pub use poll_aio::{Aio, AioEvent, AioSource}; - } -} - -cfg_net_unix! { - mod async_fd; - - pub mod unix { - //! Asynchronous IO structures specific to Unix-like operating systems. - pub use super::async_fd::{AsyncFd, AsyncFdReadyGuard, AsyncFdReadyMutGuard, TryIoError}; - } -} - -cfg_io_std! { - mod stdio_common; - - mod stderr; - pub use stderr::{stderr, Stderr}; - - mod stdin; - pub use stdin::{stdin, Stdin}; - - mod stdout; - pub use stdout::{stdout, Stdout}; -} - -cfg_io_util! { - mod split; - pub use split::{split, ReadHalf, WriteHalf}; - - pub(crate) mod seek; - pub(crate) mod util; - pub use util::{ - copy, copy_bidirectional, copy_buf, duplex, empty, repeat, sink, AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt, - BufReader, BufStream, BufWriter, DuplexStream, Empty, Lines, Repeat, Sink, Split, Take, - }; -} - -cfg_not_io_util! { - cfg_process! { - pub(crate) mod util; - } -} - -cfg_io_blocking! { - /// Types in this module can be mocked out in tests. - mod sys { - // TODO: don't rename - pub(crate) use crate::blocking::spawn_blocking as run; - pub(crate) use crate::blocking::JoinHandle as Blocking; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/poll_evented.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/poll_evented.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/poll_evented.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/poll_evented.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,258 +0,0 @@ -use crate::io::interest::Interest; -use crate::runtime::io::Registration; -use crate::runtime::scheduler; - -use mio::event::Source; -use std::fmt; -use std::io; -use std::ops::Deref; -use std::panic::{RefUnwindSafe, UnwindSafe}; - -cfg_io_driver! { - /// Associates an I/O resource that implements the [`std::io::Read`] and/or - /// [`std::io::Write`] traits with the reactor that drives it. - /// - /// `PollEvented` uses [`Registration`] internally to take a type that - /// implements [`mio::event::Source`] as well as [`std::io::Read`] and/or - /// [`std::io::Write`] and associate it with a reactor that will drive it. - /// - /// Once the [`mio::event::Source`] type is wrapped by `PollEvented`, it can be - /// used from within the future's execution model. As such, the - /// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`] - /// implementations using the underlying I/O resource as well as readiness - /// events provided by the reactor. - /// - /// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is - /// `Sync`), the caller must ensure that there are at most two tasks that - /// use a `PollEvented` instance concurrently. One for reading and one for - /// writing. While violating this requirement is "safe" from a Rust memory - /// model point of view, it will result in unexpected behavior in the form - /// of lost notifications and tasks hanging. - /// - /// ## Readiness events - /// - /// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations, - /// this type also supports access to the underlying readiness event stream. - /// While similar in function to what [`Registration`] provides, the - /// semantics are a bit different. - /// - /// Two functions are provided to access the readiness events: - /// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the - /// current readiness state of the `PollEvented` instance. If - /// [`poll_read_ready`] indicates read readiness, immediately calling - /// [`poll_read_ready`] again will also indicate read readiness. - /// - /// When the operation is attempted and is unable to succeed due to the I/O - /// resource not being ready, the caller must call [`clear_readiness`]. - /// This clears the readiness state until a new readiness event is received. - /// - /// This allows the caller to implement additional functions. For example, - /// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and - /// [`clear_readiness`]. - /// - /// ## Platform-specific events - /// - /// `PollEvented` also allows receiving platform-specific `mio::Ready` events. - /// These events are included as part of the read readiness event stream. The - /// write readiness event stream is only for `Ready::writable()` events. - /// - /// [`AsyncRead`]: crate::io::AsyncRead - /// [`AsyncWrite`]: crate::io::AsyncWrite - /// [`TcpListener`]: crate::net::TcpListener - /// [`clear_readiness`]: Registration::clear_readiness - /// [`poll_read_ready`]: Registration::poll_read_ready - /// [`poll_write_ready`]: Registration::poll_write_ready - pub(crate) struct PollEvented { - io: Option, - registration: Registration, - } -} - -// ===== impl PollEvented ===== - -impl PollEvented { - /// Creates a new `PollEvented` associated with the default reactor. - /// - /// The returned `PollEvented` has readable and writable interests. For more control, use - /// [`Self::new_with_interest`]. - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - #[track_caller] - #[cfg_attr(feature = "signal", allow(unused))] - pub(crate) fn new(io: E) -> io::Result { - PollEvented::new_with_interest(io, Interest::READABLE | Interest::WRITABLE) - } - - /// Creates a new `PollEvented` associated with the default reactor, for - /// specific `Interest` state. `new_with_interest` should be used over `new` - /// when you need control over the readiness state, such as when a file - /// descriptor only allows reads. This does not add `hup` or `error` so if - /// you are interested in those states, you will need to add them to the - /// readiness state passed to this function. - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called from - /// a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) - /// function. - #[track_caller] - #[cfg_attr(feature = "signal", allow(unused))] - pub(crate) fn new_with_interest(io: E, interest: Interest) -> io::Result { - Self::new_with_interest_and_handle(io, interest, scheduler::Handle::current()) - } - - #[track_caller] - pub(crate) fn new_with_interest_and_handle( - mut io: E, - interest: Interest, - handle: scheduler::Handle, - ) -> io::Result { - let registration = Registration::new_with_interest_and_handle(&mut io, interest, handle)?; - Ok(Self { - io: Some(io), - registration, - }) - } - - /// Returns a reference to the registration. - #[cfg(feature = "net")] - pub(crate) fn registration(&self) -> &Registration { - &self.registration - } - - /// Deregisters the inner io from the registration and returns a Result containing the inner io. - #[cfg(any(feature = "net", feature = "process"))] - pub(crate) fn into_inner(mut self) -> io::Result { - let mut inner = self.io.take().unwrap(); // As io shouldn't ever be None, just unwrap here. - self.registration.deregister(&mut inner)?; - Ok(inner) - } -} - -feature! { - #![any(feature = "net", all(unix, feature = "process"))] - - use crate::io::ReadBuf; - use std::task::{Context, Poll}; - - impl PollEvented { - // Safety: The caller must ensure that `E` can read into uninitialized memory - pub(crate) unsafe fn poll_read<'a>( - &'a self, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> - where - &'a E: io::Read + 'a, - { - use std::io::Read; - - loop { - let evt = ready!(self.registration.poll_read_ready(cx))?; - - let b = &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]); - let len = b.len(); - - match self.io.as_ref().unwrap().read(b) { - Ok(n) => { - // if we read a partially full buffer, this is sufficient on unix to show - // that the socket buffer has been drained. Unfortunately this assumption - // fails for level-triggered selectors (like on Windows or poll even for - // UNIX): https://github.com/tokio-rs/tokio/issues/5866 - if n > 0 && (!cfg!(windows) && !cfg!(mio_unsupported_force_poll_poll) && n < len) { - self.registration.clear_readiness(evt); - } - - // Safety: We trust `TcpStream::read` to have filled up `n` bytes in the - // buffer. - buf.assume_init(n); - buf.advance(n); - return Poll::Ready(Ok(())); - }, - Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - self.registration.clear_readiness(evt); - } - Err(e) => return Poll::Ready(Err(e)), - } - } - } - - pub(crate) fn poll_write<'a>(&'a self, cx: &mut Context<'_>, buf: &[u8]) -> Poll> - where - &'a E: io::Write + 'a, - { - use std::io::Write; - - loop { - let evt = ready!(self.registration.poll_write_ready(cx))?; - - match self.io.as_ref().unwrap().write(buf) { - Ok(n) => { - // if we write only part of our buffer, this is sufficient on unix to show - // that the socket buffer is full. Unfortunately this assumption - // fails for level-triggered selectors (like on Windows or poll even for - // UNIX): https://github.com/tokio-rs/tokio/issues/5866 - if n > 0 && (!cfg!(windows) && !cfg!(mio_unsupported_force_poll_poll) && n < buf.len()) { - self.registration.clear_readiness(evt); - } - - return Poll::Ready(Ok(n)); - }, - Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - self.registration.clear_readiness(evt); - } - Err(e) => return Poll::Ready(Err(e)), - } - } - } - - #[cfg(any(feature = "net", feature = "process"))] - pub(crate) fn poll_write_vectored<'a>( - &'a self, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> - where - &'a E: io::Write + 'a, - { - use std::io::Write; - self.registration.poll_write_io(cx, || self.io.as_ref().unwrap().write_vectored(bufs)) - } - } -} - -impl UnwindSafe for PollEvented {} - -impl RefUnwindSafe for PollEvented {} - -impl Deref for PollEvented { - type Target = E; - - fn deref(&self) -> &E { - self.io.as_ref().unwrap() - } -} - -impl fmt::Debug for PollEvented { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PollEvented").field("io", &self.io).finish() - } -} - -impl Drop for PollEvented { - fn drop(&mut self) { - if let Some(mut io) = self.io.take() { - // Ignore errors - let _ = self.registration.deregister(&mut io); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/read_buf.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/read_buf.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/read_buf.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/read_buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,322 +0,0 @@ -use std::fmt; -use std::mem::MaybeUninit; - -/// A wrapper around a byte buffer that is incrementally filled and initialized. -/// -/// This type is a sort of "double cursor". It tracks three regions in the -/// buffer: a region at the beginning of the buffer that has been logically -/// filled with data, a region that has been initialized at some point but not -/// yet logically filled, and a region at the end that may be uninitialized. -/// The filled region is guaranteed to be a subset of the initialized region. -/// -/// In summary, the contents of the buffer can be visualized as: -/// -/// ```not_rust -/// [ capacity ] -/// [ filled | unfilled ] -/// [ initialized | uninitialized ] -/// ``` -/// -/// It is undefined behavior to de-initialize any bytes from the uninitialized -/// region, since it is merely unknown whether this region is uninitialized or -/// not, and if part of it turns out to be initialized, it must stay initialized. -pub struct ReadBuf<'a> { - buf: &'a mut [MaybeUninit], - filled: usize, - initialized: usize, -} - -impl<'a> ReadBuf<'a> { - /// Creates a new `ReadBuf` from a fully initialized buffer. - #[inline] - pub fn new(buf: &'a mut [u8]) -> ReadBuf<'a> { - let initialized = buf.len(); - let buf = unsafe { slice_to_uninit_mut(buf) }; - ReadBuf { - buf, - filled: 0, - initialized, - } - } - - /// Creates a new `ReadBuf` from a fully uninitialized buffer. - /// - /// Use `assume_init` if part of the buffer is known to be already initialized. - #[inline] - pub fn uninit(buf: &'a mut [MaybeUninit]) -> ReadBuf<'a> { - ReadBuf { - buf, - filled: 0, - initialized: 0, - } - } - - /// Returns the total capacity of the buffer. - #[inline] - pub fn capacity(&self) -> usize { - self.buf.len() - } - - /// Returns a shared reference to the filled portion of the buffer. - #[inline] - pub fn filled(&self) -> &[u8] { - let slice = &self.buf[..self.filled]; - // safety: filled describes how far into the buffer that the - // user has filled with bytes, so it's been initialized. - unsafe { slice_assume_init(slice) } - } - - /// Returns a mutable reference to the filled portion of the buffer. - #[inline] - pub fn filled_mut(&mut self) -> &mut [u8] { - let slice = &mut self.buf[..self.filled]; - // safety: filled describes how far into the buffer that the - // user has filled with bytes, so it's been initialized. - unsafe { slice_assume_init_mut(slice) } - } - - /// Returns a new `ReadBuf` comprised of the unfilled section up to `n`. - #[inline] - pub fn take(&mut self, n: usize) -> ReadBuf<'_> { - let max = std::cmp::min(self.remaining(), n); - // Safety: We don't set any of the `unfilled_mut` with `MaybeUninit::uninit`. - unsafe { ReadBuf::uninit(&mut self.unfilled_mut()[..max]) } - } - - /// Returns a shared reference to the initialized portion of the buffer. - /// - /// This includes the filled portion. - #[inline] - pub fn initialized(&self) -> &[u8] { - let slice = &self.buf[..self.initialized]; - // safety: initialized describes how far into the buffer that the - // user has at some point initialized with bytes. - unsafe { slice_assume_init(slice) } - } - - /// Returns a mutable reference to the initialized portion of the buffer. - /// - /// This includes the filled portion. - #[inline] - pub fn initialized_mut(&mut self) -> &mut [u8] { - let slice = &mut self.buf[..self.initialized]; - // safety: initialized describes how far into the buffer that the - // user has at some point initialized with bytes. - unsafe { slice_assume_init_mut(slice) } - } - - /// Returns a mutable reference to the entire buffer, without ensuring that it has been fully - /// initialized. - /// - /// The elements between 0 and `self.filled().len()` are filled, and those between 0 and - /// `self.initialized().len()` are initialized (and so can be converted to a `&mut [u8]`). - /// - /// The caller of this method must ensure that these invariants are upheld. For example, if the - /// caller initializes some of the uninitialized section of the buffer, it must call - /// [`assume_init`](Self::assume_init) with the number of bytes initialized. - /// - /// # Safety - /// - /// The caller must not de-initialize portions of the buffer that have already been initialized. - /// This includes any bytes in the region marked as uninitialized by `ReadBuf`. - #[inline] - pub unsafe fn inner_mut(&mut self) -> &mut [MaybeUninit] { - self.buf - } - - /// Returns a mutable reference to the unfilled part of the buffer without ensuring that it has been fully - /// initialized. - /// - /// # Safety - /// - /// The caller must not de-initialize portions of the buffer that have already been initialized. - /// This includes any bytes in the region marked as uninitialized by `ReadBuf`. - #[inline] - pub unsafe fn unfilled_mut(&mut self) -> &mut [MaybeUninit] { - &mut self.buf[self.filled..] - } - - /// Returns a mutable reference to the unfilled part of the buffer, ensuring it is fully initialized. - /// - /// Since `ReadBuf` tracks the region of the buffer that has been initialized, this is effectively "free" after - /// the first use. - #[inline] - pub fn initialize_unfilled(&mut self) -> &mut [u8] { - self.initialize_unfilled_to(self.remaining()) - } - - /// Returns a mutable reference to the first `n` bytes of the unfilled part of the buffer, ensuring it is - /// fully initialized. - /// - /// # Panics - /// - /// Panics if `self.remaining()` is less than `n`. - #[inline] - #[track_caller] - pub fn initialize_unfilled_to(&mut self, n: usize) -> &mut [u8] { - assert!(self.remaining() >= n, "n overflows remaining"); - - // This can't overflow, otherwise the assert above would have failed. - let end = self.filled + n; - - if self.initialized < end { - unsafe { - self.buf[self.initialized..end] - .as_mut_ptr() - .write_bytes(0, end - self.initialized); - } - self.initialized = end; - } - - let slice = &mut self.buf[self.filled..end]; - // safety: just above, we checked that the end of the buf has - // been initialized to some value. - unsafe { slice_assume_init_mut(slice) } - } - - /// Returns the number of bytes at the end of the slice that have not yet been filled. - #[inline] - pub fn remaining(&self) -> usize { - self.capacity() - self.filled - } - - /// Clears the buffer, resetting the filled region to empty. - /// - /// The number of initialized bytes is not changed, and the contents of the buffer are not modified. - #[inline] - pub fn clear(&mut self) { - self.filled = 0; - } - - /// Advances the size of the filled region of the buffer. - /// - /// The number of initialized bytes is not changed. - /// - /// # Panics - /// - /// Panics if the filled region of the buffer would become larger than the initialized region. - #[inline] - #[track_caller] - pub fn advance(&mut self, n: usize) { - let new = self.filled.checked_add(n).expect("filled overflow"); - self.set_filled(new); - } - - /// Sets the size of the filled region of the buffer. - /// - /// The number of initialized bytes is not changed. - /// - /// Note that this can be used to *shrink* the filled region of the buffer in addition to growing it (for - /// example, by a `AsyncRead` implementation that compresses data in-place). - /// - /// # Panics - /// - /// Panics if the filled region of the buffer would become larger than the initialized region. - #[inline] - #[track_caller] - pub fn set_filled(&mut self, n: usize) { - assert!( - n <= self.initialized, - "filled must not become larger than initialized" - ); - self.filled = n; - } - - /// Asserts that the first `n` unfilled bytes of the buffer are initialized. - /// - /// `ReadBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer - /// bytes than are already known to be initialized. - /// - /// # Safety - /// - /// The caller must ensure that `n` unfilled bytes of the buffer have already been initialized. - #[inline] - pub unsafe fn assume_init(&mut self, n: usize) { - let new = self.filled + n; - if new > self.initialized { - self.initialized = new; - } - } - - /// Appends data to the buffer, advancing the written position and possibly also the initialized position. - /// - /// # Panics - /// - /// Panics if `self.remaining()` is less than `buf.len()`. - #[inline] - #[track_caller] - pub fn put_slice(&mut self, buf: &[u8]) { - assert!( - self.remaining() >= buf.len(), - "buf.len() must fit in remaining()" - ); - - let amt = buf.len(); - // Cannot overflow, asserted above - let end = self.filled + amt; - - // Safety: the length is asserted above - unsafe { - self.buf[self.filled..end] - .as_mut_ptr() - .cast::() - .copy_from_nonoverlapping(buf.as_ptr(), amt); - } - - if self.initialized < end { - self.initialized = end; - } - self.filled = end; - } -} - -#[cfg(feature = "io-util")] -#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] -unsafe impl<'a> bytes::BufMut for ReadBuf<'a> { - fn remaining_mut(&self) -> usize { - self.remaining() - } - - // SAFETY: The caller guarantees that at least `cnt` unfilled bytes have been initialized. - unsafe fn advance_mut(&mut self, cnt: usize) { - self.assume_init(cnt); - self.advance(cnt); - } - - fn chunk_mut(&mut self) -> &mut bytes::buf::UninitSlice { - // SAFETY: No region of `unfilled` will be deinitialized because it is - // exposed as an `UninitSlice`, whose API guarantees that the memory is - // never deinitialized. - let unfilled = unsafe { self.unfilled_mut() }; - let len = unfilled.len(); - let ptr = unfilled.as_mut_ptr() as *mut u8; - - // SAFETY: The pointer is valid for `len` bytes because it comes from a - // slice of that length. - unsafe { bytes::buf::UninitSlice::from_raw_parts_mut(ptr, len) } - } -} - -impl fmt::Debug for ReadBuf<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ReadBuf") - .field("filled", &self.filled) - .field("initialized", &self.initialized) - .field("capacity", &self.capacity()) - .finish() - } -} - -unsafe fn slice_to_uninit_mut(slice: &mut [u8]) -> &mut [MaybeUninit] { - &mut *(slice as *mut [u8] as *mut [MaybeUninit]) -} - -// TODO: This could use `MaybeUninit::slice_assume_init` when it is stable. -unsafe fn slice_assume_init(slice: &[MaybeUninit]) -> &[u8] { - &*(slice as *const [MaybeUninit] as *const [u8]) -} - -// TODO: This could use `MaybeUninit::slice_assume_init_mut` when it is stable. -unsafe fn slice_assume_init_mut(slice: &mut [MaybeUninit]) -> &mut [u8] { - &mut *(slice as *mut [MaybeUninit] as *mut [u8]) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/ready.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/ready.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/ready.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/ready.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,322 +0,0 @@ -#![cfg_attr(not(feature = "net"), allow(unreachable_pub))] - -use crate::io::interest::Interest; - -use std::fmt; -use std::ops; - -const READABLE: usize = 0b0_01; -const WRITABLE: usize = 0b0_10; -const READ_CLOSED: usize = 0b0_0100; -const WRITE_CLOSED: usize = 0b0_1000; -#[cfg(any(target_os = "linux", target_os = "android"))] -const PRIORITY: usize = 0b1_0000; -const ERROR: usize = 0b10_0000; - -/// Describes the readiness state of an I/O resources. -/// -/// `Ready` tracks which operation an I/O resource is ready to perform. -#[cfg_attr(docsrs, doc(cfg(feature = "net")))] -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub struct Ready(usize); - -impl Ready { - /// Returns the empty `Ready` set. - pub const EMPTY: Ready = Ready(0); - - /// Returns a `Ready` representing readable readiness. - pub const READABLE: Ready = Ready(READABLE); - - /// Returns a `Ready` representing writable readiness. - pub const WRITABLE: Ready = Ready(WRITABLE); - - /// Returns a `Ready` representing read closed readiness. - pub const READ_CLOSED: Ready = Ready(READ_CLOSED); - - /// Returns a `Ready` representing write closed readiness. - pub const WRITE_CLOSED: Ready = Ready(WRITE_CLOSED); - - /// Returns a `Ready` representing priority readiness. - #[cfg(any(target_os = "linux", target_os = "android"))] - #[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))] - pub const PRIORITY: Ready = Ready(PRIORITY); - - /// Returns a `Ready` representing error readiness. - pub const ERROR: Ready = Ready(ERROR); - - /// Returns a `Ready` representing readiness for all operations. - #[cfg(any(target_os = "linux", target_os = "android"))] - pub const ALL: Ready = - Ready(READABLE | WRITABLE | READ_CLOSED | WRITE_CLOSED | ERROR | PRIORITY); - - /// Returns a `Ready` representing readiness for all operations. - #[cfg(not(any(target_os = "linux", target_os = "android")))] - pub const ALL: Ready = Ready(READABLE | WRITABLE | READ_CLOSED | WRITE_CLOSED | ERROR); - - // Must remain crate-private to avoid adding a public dependency on Mio. - pub(crate) fn from_mio(event: &mio::event::Event) -> Ready { - let mut ready = Ready::EMPTY; - - #[cfg(all(target_os = "freebsd", feature = "net"))] - { - if event.is_aio() { - ready |= Ready::READABLE; - } - - if event.is_lio() { - ready |= Ready::READABLE; - } - } - - if event.is_readable() { - ready |= Ready::READABLE; - } - - if event.is_writable() { - ready |= Ready::WRITABLE; - } - - if event.is_read_closed() { - ready |= Ready::READ_CLOSED; - } - - if event.is_write_closed() { - ready |= Ready::WRITE_CLOSED; - } - - if event.is_error() { - ready |= Ready::ERROR; - } - - #[cfg(any(target_os = "linux", target_os = "android"))] - { - if event.is_priority() { - ready |= Ready::PRIORITY; - } - } - - ready - } - - /// Returns true if `Ready` is the empty set. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Ready; - /// - /// assert!(Ready::EMPTY.is_empty()); - /// assert!(!Ready::READABLE.is_empty()); - /// ``` - pub fn is_empty(self) -> bool { - self == Ready::EMPTY - } - - /// Returns `true` if the value includes `readable`. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Ready; - /// - /// assert!(!Ready::EMPTY.is_readable()); - /// assert!(Ready::READABLE.is_readable()); - /// assert!(Ready::READ_CLOSED.is_readable()); - /// assert!(!Ready::WRITABLE.is_readable()); - /// ``` - pub fn is_readable(self) -> bool { - self.contains(Ready::READABLE) || self.is_read_closed() - } - - /// Returns `true` if the value includes writable `readiness`. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Ready; - /// - /// assert!(!Ready::EMPTY.is_writable()); - /// assert!(!Ready::READABLE.is_writable()); - /// assert!(Ready::WRITABLE.is_writable()); - /// assert!(Ready::WRITE_CLOSED.is_writable()); - /// ``` - pub fn is_writable(self) -> bool { - self.contains(Ready::WRITABLE) || self.is_write_closed() - } - - /// Returns `true` if the value includes read-closed `readiness`. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Ready; - /// - /// assert!(!Ready::EMPTY.is_read_closed()); - /// assert!(!Ready::READABLE.is_read_closed()); - /// assert!(Ready::READ_CLOSED.is_read_closed()); - /// ``` - pub fn is_read_closed(self) -> bool { - self.contains(Ready::READ_CLOSED) - } - - /// Returns `true` if the value includes write-closed `readiness`. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Ready; - /// - /// assert!(!Ready::EMPTY.is_write_closed()); - /// assert!(!Ready::WRITABLE.is_write_closed()); - /// assert!(Ready::WRITE_CLOSED.is_write_closed()); - /// ``` - pub fn is_write_closed(self) -> bool { - self.contains(Ready::WRITE_CLOSED) - } - - /// Returns `true` if the value includes priority `readiness`. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Ready; - /// - /// assert!(!Ready::EMPTY.is_priority()); - /// assert!(!Ready::WRITABLE.is_priority()); - /// assert!(Ready::PRIORITY.is_priority()); - /// ``` - #[cfg(any(target_os = "linux", target_os = "android"))] - #[cfg_attr(docsrs, doc(cfg(any(target_os = "linux", target_os = "android"))))] - pub fn is_priority(self) -> bool { - self.contains(Ready::PRIORITY) - } - - /// Returns `true` if the value includes error `readiness`. - /// - /// # Examples - /// - /// ``` - /// use tokio::io::Ready; - /// - /// assert!(!Ready::EMPTY.is_error()); - /// assert!(!Ready::WRITABLE.is_error()); - /// assert!(Ready::ERROR.is_error()); - /// ``` - pub fn is_error(self) -> bool { - self.contains(Ready::ERROR) - } - - /// Returns true if `self` is a superset of `other`. - /// - /// `other` may represent more than one readiness operations, in which case - /// the function only returns true if `self` contains all readiness - /// specified in `other`. - pub(crate) fn contains>(self, other: T) -> bool { - let other = other.into(); - (self & other) == other - } - - /// Creates a `Ready` instance using the given `usize` representation. - /// - /// The `usize` representation must have been obtained from a call to - /// `Readiness::as_usize`. - /// - /// This function is mainly provided to allow the caller to get a - /// readiness value from an `AtomicUsize`. - pub(crate) fn from_usize(val: usize) -> Ready { - Ready(val & Ready::ALL.as_usize()) - } - - /// Returns a `usize` representation of the `Ready` value. - /// - /// This function is mainly provided to allow the caller to store a - /// readiness value in an `AtomicUsize`. - pub(crate) fn as_usize(self) -> usize { - self.0 - } - - pub(crate) fn from_interest(interest: Interest) -> Ready { - let mut ready = Ready::EMPTY; - - if interest.is_readable() { - ready |= Ready::READABLE; - ready |= Ready::READ_CLOSED; - } - - if interest.is_writable() { - ready |= Ready::WRITABLE; - ready |= Ready::WRITE_CLOSED; - } - - #[cfg(any(target_os = "linux", target_os = "android"))] - if interest.is_priority() { - ready |= Ready::PRIORITY; - ready |= Ready::READ_CLOSED; - } - - if interest.is_error() { - ready |= Ready::ERROR; - } - - ready - } - - pub(crate) fn intersection(self, interest: Interest) -> Ready { - Ready(self.0 & Ready::from_interest(interest).0) - } - - pub(crate) fn satisfies(self, interest: Interest) -> bool { - self.0 & Ready::from_interest(interest).0 != 0 - } -} - -impl ops::BitOr for Ready { - type Output = Ready; - - #[inline] - fn bitor(self, other: Ready) -> Ready { - Ready(self.0 | other.0) - } -} - -impl ops::BitOrAssign for Ready { - #[inline] - fn bitor_assign(&mut self, other: Ready) { - self.0 |= other.0; - } -} - -impl ops::BitAnd for Ready { - type Output = Ready; - - #[inline] - fn bitand(self, other: Ready) -> Ready { - Ready(self.0 & other.0) - } -} - -impl ops::Sub for Ready { - type Output = Ready; - - #[inline] - fn sub(self, other: Ready) -> Ready { - Ready(self.0 & !other.0) - } -} - -impl fmt::Debug for Ready { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut fmt = fmt.debug_struct("Ready"); - - fmt.field("is_readable", &self.is_readable()) - .field("is_writable", &self.is_writable()) - .field("is_read_closed", &self.is_read_closed()) - .field("is_write_closed", &self.is_write_closed()) - .field("is_error", &self.is_error()); - - #[cfg(any(target_os = "linux", target_os = "android"))] - fmt.field("is_priority", &self.is_priority()); - - fmt.finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/seek.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/seek.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/seek.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/seek.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,57 +0,0 @@ -use crate::io::AsyncSeek; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::io::{self, SeekFrom}; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Future for the [`seek`](crate::io::AsyncSeekExt::seek) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Seek<'a, S: ?Sized> { - seek: &'a mut S, - pos: Option, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -pub(crate) fn seek(seek: &mut S, pos: SeekFrom) -> Seek<'_, S> -where - S: AsyncSeek + ?Sized + Unpin, -{ - Seek { - seek, - pos: Some(pos), - _pin: PhantomPinned, - } -} - -impl Future for Seek<'_, S> -where - S: AsyncSeek + ?Sized + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - match me.pos { - Some(pos) => { - // ensure no seek in progress - ready!(Pin::new(&mut *me.seek).poll_complete(cx))?; - match Pin::new(&mut *me.seek).start_seek(*pos) { - Ok(()) => { - *me.pos = None; - Pin::new(&mut *me.seek).poll_complete(cx) - } - Err(e) => Poll::Ready(Err(e)), - } - } - None => Pin::new(&mut *me.seek).poll_complete(cx), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/split.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/split.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/split.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/split.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ -//! Split a single value implementing `AsyncRead + AsyncWrite` into separate -//! `AsyncRead` and `AsyncWrite` handles. -//! -//! To restore this read/write object from its `split::ReadHalf` and -//! `split::WriteHalf` use `unsplit`. - -use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; - -use std::cell::UnsafeCell; -use std::fmt; -use std::io; -use std::pin::Pin; -use std::sync::atomic::AtomicBool; -use std::sync::atomic::Ordering::{Acquire, Release}; -use std::sync::Arc; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// The readable half of a value returned from [`split`](split()). - pub struct ReadHalf { - inner: Arc>, - } - - /// The writable half of a value returned from [`split`](split()). - pub struct WriteHalf { - inner: Arc>, - } - - /// Splits a single value implementing `AsyncRead + AsyncWrite` into separate - /// `AsyncRead` and `AsyncWrite` handles. - /// - /// To restore this read/write object from its `ReadHalf` and - /// `WriteHalf` use [`unsplit`](ReadHalf::unsplit()). - pub fn split(stream: T) -> (ReadHalf, WriteHalf) - where - T: AsyncRead + AsyncWrite, - { - let is_write_vectored = stream.is_write_vectored(); - - let inner = Arc::new(Inner { - locked: AtomicBool::new(false), - stream: UnsafeCell::new(stream), - is_write_vectored, - }); - - let rd = ReadHalf { - inner: inner.clone(), - }; - - let wr = WriteHalf { inner }; - - (rd, wr) - } -} - -struct Inner { - locked: AtomicBool, - stream: UnsafeCell, - is_write_vectored: bool, -} - -struct Guard<'a, T> { - inner: &'a Inner, -} - -impl ReadHalf { - /// Checks if this `ReadHalf` and some `WriteHalf` were split from the same - /// stream. - pub fn is_pair_of(&self, other: &WriteHalf) -> bool { - other.is_pair_of(self) - } - - /// Reunites with a previously split `WriteHalf`. - /// - /// # Panics - /// - /// If this `ReadHalf` and the given `WriteHalf` do not originate from the - /// same `split` operation this method will panic. - /// This can be checked ahead of time by comparing the stream ID - /// of the two halves. - #[track_caller] - pub fn unsplit(self, wr: WriteHalf) -> T - where - T: Unpin, - { - if self.is_pair_of(&wr) { - drop(wr); - - let inner = Arc::try_unwrap(self.inner) - .ok() - .expect("`Arc::try_unwrap` failed"); - - inner.stream.into_inner() - } else { - panic!("Unrelated `split::Write` passed to `split::Read::unsplit`.") - } - } -} - -impl WriteHalf { - /// Checks if this `WriteHalf` and some `ReadHalf` were split from the same - /// stream. - pub fn is_pair_of(&self, other: &ReadHalf) -> bool { - Arc::ptr_eq(&self.inner, &other.inner) - } -} - -impl AsyncRead for ReadHalf { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_read(cx, buf) - } -} - -impl AsyncWrite for WriteHalf { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_write(cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_shutdown(cx) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_write_vectored(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - self.inner.is_write_vectored - } -} - -impl Inner { - fn poll_lock(&self, cx: &mut Context<'_>) -> Poll> { - if self - .locked - .compare_exchange(false, true, Acquire, Acquire) - .is_ok() - { - Poll::Ready(Guard { inner: self }) - } else { - // Spin... but investigate a better strategy - - std::thread::yield_now(); - cx.waker().wake_by_ref(); - - Poll::Pending - } - } -} - -impl Guard<'_, T> { - fn stream_pin(&mut self) -> Pin<&mut T> { - // safety: the stream is pinned in `Arc` and the `Guard` ensures mutual - // exclusion. - unsafe { Pin::new_unchecked(&mut *self.inner.stream.get()) } - } -} - -impl Drop for Guard<'_, T> { - fn drop(&mut self) { - self.inner.locked.store(false, Release); - } -} - -unsafe impl Send for ReadHalf {} -unsafe impl Send for WriteHalf {} -unsafe impl Sync for ReadHalf {} -unsafe impl Sync for WriteHalf {} - -impl fmt::Debug for ReadHalf { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("split::ReadHalf").finish() - } -} - -impl fmt::Debug for WriteHalf { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("split::WriteHalf").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/stderr.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/stderr.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/stderr.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/stderr.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,130 +0,0 @@ -use crate::io::blocking::Blocking; -use crate::io::stdio_common::SplitByUtf8BoundaryIfWindows; -use crate::io::AsyncWrite; - -use std::io; -use std::pin::Pin; -use std::task::Context; -use std::task::Poll; - -cfg_io_std! { - /// A handle to the standard error stream of a process. - /// - /// Concurrent writes to stderr must be executed with care: Only individual - /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular - /// you should be aware that writes using [`write_all`] are not guaranteed - /// to occur as a single write, so multiple threads writing data with - /// [`write_all`] may result in interleaved output. - /// - /// Created by the [`stderr`] function. - /// - /// [`stderr`]: stderr() - /// [`AsyncWrite`]: AsyncWrite - /// [`write_all`]: crate::io::AsyncWriteExt::write_all() - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut stderr = io::stdout(); - /// stderr.write_all(b"Print some error here.").await?; - /// Ok(()) - /// } - /// ``` - #[derive(Debug)] - pub struct Stderr { - std: SplitByUtf8BoundaryIfWindows>, - } - - /// Constructs a new handle to the standard error of the current process. - /// - /// The returned handle allows writing to standard error from the within the - /// Tokio runtime. - /// - /// Concurrent writes to stderr must be executed with care: Only individual - /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular - /// you should be aware that writes using [`write_all`] are not guaranteed - /// to occur as a single write, so multiple threads writing data with - /// [`write_all`] may result in interleaved output. - /// - /// [`AsyncWrite`]: AsyncWrite - /// [`write_all`]: crate::io::AsyncWriteExt::write_all() - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut stderr = io::stderr(); - /// stderr.write_all(b"Print some error here.").await?; - /// Ok(()) - /// } - /// ``` - pub fn stderr() -> Stderr { - let std = io::stderr(); - Stderr { - std: SplitByUtf8BoundaryIfWindows::new(Blocking::new(std)), - } - } -} - -#[cfg(unix)] -mod sys { - use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, RawFd}; - - use super::Stderr; - - impl AsRawFd for Stderr { - fn as_raw_fd(&self) -> RawFd { - std::io::stderr().as_raw_fd() - } - } - - impl AsFd for Stderr { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } - } -} - -cfg_windows! { - use crate::os::windows::io::{AsHandle, BorrowedHandle, AsRawHandle, RawHandle}; - - impl AsRawHandle for Stderr { - fn as_raw_handle(&self) -> RawHandle { - std::io::stderr().as_raw_handle() - } - } - - impl AsHandle for Stderr { - fn as_handle(&self) -> BorrowedHandle<'_> { - unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) } - } - } -} - -impl AsyncWrite for Stderr { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.std).poll_write(cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.std).poll_flush(cx) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut self.std).poll_shutdown(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/stdin.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/stdin.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/stdin.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/stdin.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,94 +0,0 @@ -use crate::io::blocking::Blocking; -use crate::io::{AsyncRead, ReadBuf}; - -use std::io; -use std::pin::Pin; -use std::task::Context; -use std::task::Poll; - -cfg_io_std! { - /// A handle to the standard input stream of a process. - /// - /// The handle implements the [`AsyncRead`] trait, but beware that concurrent - /// reads of `Stdin` must be executed with care. - /// - /// This handle is best used for non-interactive uses, such as when a file - /// is piped into the application. For technical reasons, `stdin` is - /// implemented by using an ordinary blocking read on a separate thread, and - /// it is impossible to cancel that read. This can make shutdown of the - /// runtime hang until the user presses enter. - /// - /// For interactive uses, it is recommended to spawn a thread dedicated to - /// user input and use blocking IO directly in that thread. - /// - /// Created by the [`stdin`] function. - /// - /// [`stdin`]: fn@stdin - /// [`AsyncRead`]: trait@AsyncRead - #[derive(Debug)] - pub struct Stdin { - std: Blocking, - } - - /// Constructs a new handle to the standard input of the current process. - /// - /// This handle is best used for non-interactive uses, such as when a file - /// is piped into the application. For technical reasons, `stdin` is - /// implemented by using an ordinary blocking read on a separate thread, and - /// it is impossible to cancel that read. This can make shutdown of the - /// runtime hang until the user presses enter. - /// - /// For interactive uses, it is recommended to spawn a thread dedicated to - /// user input and use blocking IO directly in that thread. - pub fn stdin() -> Stdin { - let std = io::stdin(); - Stdin { - std: Blocking::new(std), - } - } -} - -#[cfg(unix)] -mod sys { - use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, RawFd}; - - use super::Stdin; - - impl AsRawFd for Stdin { - fn as_raw_fd(&self) -> RawFd { - std::io::stdin().as_raw_fd() - } - } - - impl AsFd for Stdin { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } - } -} - -cfg_windows! { - use crate::os::windows::io::{AsHandle, BorrowedHandle, AsRawHandle, RawHandle}; - - impl AsRawHandle for Stdin { - fn as_raw_handle(&self) -> RawHandle { - std::io::stdin().as_raw_handle() - } - } - - impl AsHandle for Stdin { - fn as_handle(&self) -> BorrowedHandle<'_> { - unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) } - } - } -} - -impl AsyncRead for Stdin { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - Pin::new(&mut self.std).poll_read(cx, buf) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/stdio_common.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/stdio_common.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/stdio_common.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/stdio_common.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,221 +0,0 @@ -//! Contains utilities for stdout and stderr. -use crate::io::AsyncWrite; -use std::pin::Pin; -use std::task::{Context, Poll}; -/// # Windows -/// AsyncWrite adapter that finds last char boundary in given buffer and does not write the rest, -/// if buffer contents seems to be utf8. Otherwise it only trims buffer down to MAX_BUF. -/// That's why, wrapped writer will always receive well-formed utf-8 bytes. -/// # Other platforms -/// Passes data to `inner` as is. -#[derive(Debug)] -pub(crate) struct SplitByUtf8BoundaryIfWindows { - inner: W, -} - -impl SplitByUtf8BoundaryIfWindows { - pub(crate) fn new(inner: W) -> Self { - Self { inner } - } -} - -// this constant is defined by Unicode standard. -const MAX_BYTES_PER_CHAR: usize = 4; - -// Subject for tweaking here -const MAGIC_CONST: usize = 8; - -impl crate::io::AsyncWrite for SplitByUtf8BoundaryIfWindows -where - W: AsyncWrite + Unpin, -{ - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - mut buf: &[u8], - ) -> Poll> { - // just a closure to avoid repetitive code - let mut call_inner = move |buf| Pin::new(&mut self.inner).poll_write(cx, buf); - - // 1. Only windows stdio can suffer from non-utf8. - // We also check for `test` so that we can write some tests - // for further code. Since `AsyncWrite` can always shrink - // buffer at its discretion, excessive (i.e. in tests) shrinking - // does not break correctness. - // 2. If buffer is small, it will not be shrunk. - // That's why, it's "textness" will not change, so we don't have - // to fixup it. - if cfg!(not(any(target_os = "windows", test))) || buf.len() <= crate::io::blocking::MAX_BUF - { - return call_inner(buf); - } - - buf = &buf[..crate::io::blocking::MAX_BUF]; - - // Now there are two possibilities. - // If caller gave is binary buffer, we **should not** shrink it - // anymore, because excessive shrinking hits performance. - // If caller gave as binary buffer, we **must** additionally - // shrink it to strip incomplete char at the end of buffer. - // that's why check we will perform now is allowed to have - // false-positive. - - // Now let's look at the first MAX_BYTES_PER_CHAR * MAGIC_CONST bytes. - // if they are (possibly incomplete) utf8, then we can be quite sure - // that input buffer was utf8. - - let have_to_fix_up = match std::str::from_utf8(&buf[..MAX_BYTES_PER_CHAR * MAGIC_CONST]) { - Ok(_) => true, - Err(err) => { - let incomplete_bytes = MAX_BYTES_PER_CHAR * MAGIC_CONST - err.valid_up_to(); - incomplete_bytes < MAX_BYTES_PER_CHAR - } - }; - - if have_to_fix_up { - // We must pop several bytes at the end which form incomplete - // character. To achieve it, we exploit UTF8 encoding: - // for any code point, all bytes except first start with 0b10 prefix. - // see https://en.wikipedia.org/wiki/UTF-8#Encoding for details - let trailing_incomplete_char_size = buf - .iter() - .rev() - .take(MAX_BYTES_PER_CHAR) - .position(|byte| *byte < 0b1000_0000 || *byte >= 0b1100_0000) - .unwrap_or(0) - + 1; - buf = &buf[..buf.len() - trailing_incomplete_char_size]; - } - - call_inner(buf) - } - - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut self.inner).poll_flush(cx) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut self.inner).poll_shutdown(cx) - } -} - -#[cfg(test)] -#[cfg(not(loom))] -mod tests { - use crate::io::blocking::MAX_BUF; - use crate::io::AsyncWriteExt; - use std::io; - use std::pin::Pin; - use std::task::Context; - use std::task::Poll; - - struct TextMockWriter; - - impl crate::io::AsyncWrite for TextMockWriter { - fn poll_write( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - assert!(buf.len() <= MAX_BUF); - assert!(std::str::from_utf8(buf).is_ok()); - Poll::Ready(Ok(buf.len())) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - } - - struct LoggingMockWriter { - write_history: Vec, - } - - impl LoggingMockWriter { - fn new() -> Self { - LoggingMockWriter { - write_history: Vec::new(), - } - } - } - - impl crate::io::AsyncWrite for LoggingMockWriter { - fn poll_write( - mut self: Pin<&mut Self>, - _cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - assert!(buf.len() <= MAX_BUF); - self.write_history.push(buf.len()); - Poll::Ready(Ok(buf.len())) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown( - self: Pin<&mut Self>, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_splitter() { - let data = str::repeat("â–ˆ", MAX_BUF); - let mut wr = super::SplitByUtf8BoundaryIfWindows::new(TextMockWriter); - let fut = async move { - wr.write_all(data.as_bytes()).await.unwrap(); - }; - crate::runtime::Builder::new_current_thread() - .build() - .unwrap() - .block_on(fut); - } - - #[test] - #[cfg_attr(miri, ignore)] - fn test_pseudo_text() { - // In this test we write a piece of binary data, whose beginning is - // text though. We then validate that even in this corner case buffer - // was not shrunk too much. - let checked_count = super::MAGIC_CONST * super::MAX_BYTES_PER_CHAR; - let mut data: Vec = str::repeat("a", checked_count).into(); - data.extend(std::iter::repeat(0b1010_1010).take(MAX_BUF - checked_count + 1)); - let mut writer = LoggingMockWriter::new(); - let mut splitter = super::SplitByUtf8BoundaryIfWindows::new(&mut writer); - crate::runtime::Builder::new_current_thread() - .build() - .unwrap() - .block_on(async { - splitter.write_all(&data).await.unwrap(); - }); - // Check that at most two writes were performed - assert!(writer.write_history.len() <= 2); - // Check that all has been written - assert_eq!( - writer.write_history.iter().copied().sum::(), - data.len() - ); - // Check that at most MAX_BYTES_PER_CHAR + 1 (i.e. 5) bytes were shrunk - // from the buffer: one because it was outside of MAX_BUF boundary, and - // up to one "utf8 code point". - assert!(data.len() - writer.write_history[0] <= super::MAX_BYTES_PER_CHAR + 1); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/stdout.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/stdout.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/stdout.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/stdout.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,129 +0,0 @@ -use crate::io::blocking::Blocking; -use crate::io::stdio_common::SplitByUtf8BoundaryIfWindows; -use crate::io::AsyncWrite; -use std::io; -use std::pin::Pin; -use std::task::Context; -use std::task::Poll; - -cfg_io_std! { - /// A handle to the standard output stream of a process. - /// - /// Concurrent writes to stdout must be executed with care: Only individual - /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular - /// you should be aware that writes using [`write_all`] are not guaranteed - /// to occur as a single write, so multiple threads writing data with - /// [`write_all`] may result in interleaved output. - /// - /// Created by the [`stdout`] function. - /// - /// [`stdout`]: stdout() - /// [`AsyncWrite`]: AsyncWrite - /// [`write_all`]: crate::io::AsyncWriteExt::write_all() - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut stdout = io::stdout(); - /// stdout.write_all(b"Hello world!").await?; - /// Ok(()) - /// } - /// ``` - #[derive(Debug)] - pub struct Stdout { - std: SplitByUtf8BoundaryIfWindows>, - } - - /// Constructs a new handle to the standard output of the current process. - /// - /// The returned handle allows writing to standard out from the within the - /// Tokio runtime. - /// - /// Concurrent writes to stdout must be executed with care: Only individual - /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular - /// you should be aware that writes using [`write_all`] are not guaranteed - /// to occur as a single write, so multiple threads writing data with - /// [`write_all`] may result in interleaved output. - /// - /// [`AsyncWrite`]: AsyncWrite - /// [`write_all`]: crate::io::AsyncWriteExt::write_all() - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut stdout = io::stdout(); - /// stdout.write_all(b"Hello world!").await?; - /// Ok(()) - /// } - /// ``` - pub fn stdout() -> Stdout { - let std = io::stdout(); - Stdout { - std: SplitByUtf8BoundaryIfWindows::new(Blocking::new(std)), - } - } -} - -#[cfg(unix)] -mod sys { - use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, RawFd}; - - use super::Stdout; - - impl AsRawFd for Stdout { - fn as_raw_fd(&self) -> RawFd { - std::io::stdout().as_raw_fd() - } - } - - impl AsFd for Stdout { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } - } -} - -cfg_windows! { - use crate::os::windows::io::{AsHandle, BorrowedHandle, AsRawHandle, RawHandle}; - - impl AsRawHandle for Stdout { - fn as_raw_handle(&self) -> RawHandle { - std::io::stdout().as_raw_handle() - } - } - - impl AsHandle for Stdout { - fn as_handle(&self) -> BorrowedHandle<'_> { - unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) } - } - } -} - -impl AsyncWrite for Stdout { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.std).poll_write(cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.std).poll_flush(cx) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut self.std).poll_shutdown(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/async_buf_read_ext.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/async_buf_read_ext.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/async_buf_read_ext.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/async_buf_read_ext.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,351 +0,0 @@ -use crate::io::util::fill_buf::{fill_buf, FillBuf}; -use crate::io::util::lines::{lines, Lines}; -use crate::io::util::read_line::{read_line, ReadLine}; -use crate::io::util::read_until::{read_until, ReadUntil}; -use crate::io::util::split::{split, Split}; -use crate::io::AsyncBufRead; - -cfg_io_util! { - /// An extension trait which adds utility methods to [`AsyncBufRead`] types. - /// - /// [`AsyncBufRead`]: crate::io::AsyncBufRead - pub trait AsyncBufReadExt: AsyncBufRead { - /// Reads all bytes into `buf` until the delimiter `byte` or EOF is reached. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_until(&mut self, byte: u8, buf: &mut Vec) -> io::Result; - /// ``` - /// - /// This function will read bytes from the underlying stream until the - /// delimiter or EOF is found. Once found, all bytes up to, and including, - /// the delimiter (if found) will be appended to `buf`. - /// - /// If successful, this function will return the total number of bytes read. - /// - /// If this function returns `Ok(0)`, the stream has reached EOF. - /// - /// # Errors - /// - /// This function will ignore all instances of [`ErrorKind::Interrupted`] and - /// will otherwise return any errors returned by [`fill_buf`]. - /// - /// If an I/O error is encountered then all bytes read so far will be - /// present in `buf` and its length will have been adjusted appropriately. - /// - /// [`fill_buf`]: AsyncBufRead::poll_fill_buf - /// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted - /// - /// # Cancel safety - /// - /// If the method is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then some data may have been partially read. Any - /// partially read bytes are appended to `buf`, and the method can be - /// called again to continue reading until `byte`. - /// - /// This method returns the total number of bytes read. If you cancel - /// the call to `read_until` and then call it again to continue reading, - /// the counter is reset. - /// - /// # Examples - /// - /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In - /// this example, we use [`Cursor`] to read all the bytes in a byte slice - /// in hyphen delimited segments: - /// - /// [`Cursor`]: std::io::Cursor - /// - /// ``` - /// use tokio::io::AsyncBufReadExt; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut cursor = Cursor::new(b"lorem-ipsum"); - /// let mut buf = vec![]; - /// - /// // cursor is at 'l' - /// let num_bytes = cursor.read_until(b'-', &mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// - /// assert_eq!(num_bytes, 6); - /// assert_eq!(buf, b"lorem-"); - /// buf.clear(); - /// - /// // cursor is at 'i' - /// let num_bytes = cursor.read_until(b'-', &mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// - /// assert_eq!(num_bytes, 5); - /// assert_eq!(buf, b"ipsum"); - /// buf.clear(); - /// - /// // cursor is at EOF - /// let num_bytes = cursor.read_until(b'-', &mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// assert_eq!(num_bytes, 0); - /// assert_eq!(buf, b""); - /// } - /// ``` - fn read_until<'a>(&'a mut self, byte: u8, buf: &'a mut Vec) -> ReadUntil<'a, Self> - where - Self: Unpin, - { - read_until(self, byte, buf) - } - - /// Reads all bytes until a newline (the 0xA byte) is reached, and append - /// them to the provided buffer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_line(&mut self, buf: &mut String) -> io::Result; - /// ``` - /// - /// This function will read bytes from the underlying stream until the - /// newline delimiter (the 0xA byte) or EOF is found. Once found, all bytes - /// up to, and including, the delimiter (if found) will be appended to - /// `buf`. - /// - /// If successful, this function will return the total number of bytes read. - /// - /// If this function returns `Ok(0)`, the stream has reached EOF. - /// - /// # Errors - /// - /// This function has the same error semantics as [`read_until`] and will - /// also return an error if the read bytes are not valid UTF-8. If an I/O - /// error is encountered then `buf` may contain some bytes already read in - /// the event that all data read so far was valid UTF-8. - /// - /// [`read_until`]: AsyncBufReadExt::read_until - /// - /// # Cancel safety - /// - /// This method is not cancellation safe. If the method is used as the - /// event in a [`tokio::select!`](crate::select) statement and some - /// other branch completes first, then some data may have been partially - /// read, and this data is lost. There are no guarantees regarding the - /// contents of `buf` when the call is cancelled. The current - /// implementation replaces `buf` with the empty string, but this may - /// change in the future. - /// - /// This function does not behave like [`read_until`] because of the - /// requirement that a string contains only valid utf-8. If you need a - /// cancellation safe `read_line`, there are three options: - /// - /// * Call [`read_until`] with a newline character and manually perform the utf-8 check. - /// * The stream returned by [`lines`] has a cancellation safe - /// [`next_line`] method. - /// * Use [`tokio_util::codec::LinesCodec`][LinesCodec]. - /// - /// [LinesCodec]: https://docs.rs/tokio-util/latest/tokio_util/codec/struct.LinesCodec.html - /// [`read_until`]: Self::read_until - /// [`lines`]: Self::lines - /// [`next_line`]: crate::io::Lines::next_line - /// - /// # Examples - /// - /// [`std::io::Cursor`][`Cursor`] is a type that implements - /// `AsyncBufRead`. In this example, we use [`Cursor`] to read all the - /// lines in a byte slice: - /// - /// [`Cursor`]: std::io::Cursor - /// - /// ``` - /// use tokio::io::AsyncBufReadExt; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut cursor = Cursor::new(b"foo\nbar"); - /// let mut buf = String::new(); - /// - /// // cursor is at 'f' - /// let num_bytes = cursor.read_line(&mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// - /// assert_eq!(num_bytes, 4); - /// assert_eq!(buf, "foo\n"); - /// buf.clear(); - /// - /// // cursor is at 'b' - /// let num_bytes = cursor.read_line(&mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// - /// assert_eq!(num_bytes, 3); - /// assert_eq!(buf, "bar"); - /// buf.clear(); - /// - /// // cursor is at EOF - /// let num_bytes = cursor.read_line(&mut buf) - /// .await - /// .expect("reading from cursor won't fail"); - /// - /// assert_eq!(num_bytes, 0); - /// assert_eq!(buf, ""); - /// } - /// ``` - fn read_line<'a>(&'a mut self, buf: &'a mut String) -> ReadLine<'a, Self> - where - Self: Unpin, - { - read_line(self, buf) - } - - /// Returns a stream of the contents of this reader split on the byte - /// `byte`. - /// - /// This method is the asynchronous equivalent to - /// [`BufRead::split`](std::io::BufRead::split). - /// - /// The stream returned from this function will yield instances of - /// [`io::Result`]`<`[`Option`]`<`[`Vec`]`>>`. Each vector returned will *not* have - /// the delimiter byte at the end. - /// - /// [`io::Result`]: std::io::Result - /// [`Option`]: core::option::Option - /// [`Vec`]: std::vec::Vec - /// - /// # Errors - /// - /// Each item of the stream has the same error semantics as - /// [`AsyncBufReadExt::read_until`](AsyncBufReadExt::read_until). - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncBufRead; - /// use tokio::io::AsyncBufReadExt; - /// - /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { - /// let mut segments = my_buf_read.split(b'f'); - /// - /// while let Some(segment) = segments.next_segment().await? { - /// println!("length = {}", segment.len()) - /// } - /// # Ok(()) - /// # } - /// ``` - fn split(self, byte: u8) -> Split - where - Self: Sized + Unpin, - { - split(self, byte) - } - - /// Returns the contents of the internal buffer, filling it with more - /// data from the inner reader if it is empty. - /// - /// This function is a lower-level call. It needs to be paired with the - /// [`consume`] method to function properly. When calling this method, - /// none of the contents will be "read" in the sense that later calling - /// `read` may return the same contents. As such, [`consume`] must be - /// called with the number of bytes that are consumed from this buffer - /// to ensure that the bytes are never returned twice. - /// - /// An empty buffer returned indicates that the stream has reached EOF. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn fill_buf(&mut self) -> io::Result<&[u8]>; - /// ``` - /// - /// # Errors - /// - /// This function will return an I/O error if the underlying reader was - /// read, but returned an error. - /// - /// [`consume`]: crate::io::AsyncBufReadExt::consume - fn fill_buf(&mut self) -> FillBuf<'_, Self> - where - Self: Unpin, - { - fill_buf(self) - } - - /// Tells this buffer that `amt` bytes have been consumed from the - /// buffer, so they should no longer be returned in calls to [`read`]. - /// - /// This function is a lower-level call. It needs to be paired with the - /// [`fill_buf`] method to function properly. This function does not - /// perform any I/O, it simply informs this object that some amount of - /// its buffer, returned from [`fill_buf`], has been consumed and should - /// no longer be returned. As such, this function may do odd things if - /// [`fill_buf`] isn't called before calling it. - /// - /// The `amt` must be less than the number of bytes in the buffer - /// returned by [`fill_buf`]. - /// - /// [`read`]: crate::io::AsyncReadExt::read - /// [`fill_buf`]: crate::io::AsyncBufReadExt::fill_buf - fn consume(&mut self, amt: usize) - where - Self: Unpin, - { - std::pin::Pin::new(self).consume(amt) - } - - /// Returns a stream over the lines of this reader. - /// This method is the async equivalent to [`BufRead::lines`](std::io::BufRead::lines). - /// - /// The stream returned from this function will yield instances of - /// [`io::Result`]`<`[`Option`]`<`[`String`]`>>`. Each string returned will *not* have a newline - /// byte (the 0xA byte) or CRLF (0xD, 0xA bytes) at the end. - /// - /// [`io::Result`]: std::io::Result - /// [`Option`]: core::option::Option - /// [`String`]: String - /// - /// # Errors - /// - /// Each line of the stream has the same error semantics as [`AsyncBufReadExt::read_line`]. - /// - /// # Examples - /// - /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In - /// this example, we use [`Cursor`] to iterate over all the lines in a byte - /// slice. - /// - /// [`Cursor`]: std::io::Cursor - /// - /// ``` - /// use tokio::io::AsyncBufReadExt; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() { - /// let cursor = Cursor::new(b"lorem\nipsum\r\ndolor"); - /// - /// let mut lines = cursor.lines(); - /// - /// assert_eq!(lines.next_line().await.unwrap(), Some(String::from("lorem"))); - /// assert_eq!(lines.next_line().await.unwrap(), Some(String::from("ipsum"))); - /// assert_eq!(lines.next_line().await.unwrap(), Some(String::from("dolor"))); - /// assert_eq!(lines.next_line().await.unwrap(), None); - /// } - /// ``` - /// - /// [`AsyncBufReadExt::read_line`]: AsyncBufReadExt::read_line - fn lines(self) -> Lines - where - Self: Sized, - { - lines(self) - } - } -} - -impl AsyncBufReadExt for R {} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/async_read_ext.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/async_read_ext.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/async_read_ext.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/async_read_ext.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1298 +0,0 @@ -use crate::io::util::chain::{chain, Chain}; -use crate::io::util::read::{read, Read}; -use crate::io::util::read_buf::{read_buf, ReadBuf}; -use crate::io::util::read_exact::{read_exact, ReadExact}; -use crate::io::util::read_int::{ReadF32, ReadF32Le, ReadF64, ReadF64Le}; -use crate::io::util::read_int::{ - ReadI128, ReadI128Le, ReadI16, ReadI16Le, ReadI32, ReadI32Le, ReadI64, ReadI64Le, ReadI8, -}; -use crate::io::util::read_int::{ - ReadU128, ReadU128Le, ReadU16, ReadU16Le, ReadU32, ReadU32Le, ReadU64, ReadU64Le, ReadU8, -}; -use crate::io::util::read_to_end::{read_to_end, ReadToEnd}; -use crate::io::util::read_to_string::{read_to_string, ReadToString}; -use crate::io::util::take::{take, Take}; -use crate::io::AsyncRead; - -use bytes::BufMut; - -cfg_io_util! { - /// Defines numeric reader - macro_rules! read_impl { - ( - $( - $(#[$outer:meta])* - fn $name:ident(&mut self) -> $($fut:ident)*; - )* - ) => { - $( - $(#[$outer])* - fn $name(&mut self) -> $($fut)*<&mut Self> where Self: Unpin { - $($fut)*::new(self) - } - )* - } - } - - /// Reads bytes from a source. - /// - /// Implemented as an extension trait, adding utility methods to all - /// [`AsyncRead`] types. Callers will tend to import this trait instead of - /// [`AsyncRead`]. - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::{self, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let mut buffer = [0; 10]; - /// - /// // The `read` method is defined by this trait. - /// let n = f.read(&mut buffer[..]).await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// See [module][crate::io] documentation for more details. - /// - /// [`AsyncRead`]: AsyncRead - pub trait AsyncReadExt: AsyncRead { - /// Creates a new `AsyncRead` instance that chains this stream with - /// `next`. - /// - /// The returned `AsyncRead` instance will first read all bytes from this object - /// until EOF is encountered. Afterwards the output is equivalent to the - /// output of `next`. - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `AsyncRead`: - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::{self, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let f1 = File::open("foo.txt").await?; - /// let f2 = File::open("bar.txt").await?; - /// - /// let mut handle = f1.chain(f2); - /// let mut buffer = String::new(); - /// - /// // read the value into a String. We could use any AsyncRead - /// // method here, this is just one example. - /// handle.read_to_string(&mut buffer).await?; - /// Ok(()) - /// } - /// ``` - fn chain(self, next: R) -> Chain - where - Self: Sized, - R: AsyncRead, - { - chain(self, next) - } - - /// Pulls some bytes from this source into the specified buffer, - /// returning how many bytes were read. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read(&mut self, buf: &mut [u8]) -> io::Result; - /// ``` - /// - /// This method does not provide any guarantees about whether it - /// completes immediately or asynchronously. - /// - /// # Return - /// - /// If the return value of this method is `Ok(n)`, then it must be - /// guaranteed that `0 <= n <= buf.len()`. A nonzero `n` value indicates - /// that the buffer `buf` has been filled in with `n` bytes of data from - /// this source. If `n` is `0`, then it can indicate one of two - /// scenarios: - /// - /// 1. This reader has reached its "end of file" and will likely no longer - /// be able to produce bytes. Note that this does not mean that the - /// reader will *always* no longer be able to produce bytes. - /// 2. The buffer specified was 0 bytes in length. - /// - /// No guarantees are provided about the contents of `buf` when this - /// function is called, implementations cannot rely on any property of the - /// contents of `buf` being `true`. It is recommended that *implementations* - /// only write data to `buf` instead of reading its contents. - /// - /// Correspondingly, however, *callers* of this method may not assume - /// any guarantees about how the implementation uses `buf`. It is - /// possible that the code that's supposed to write to the buffer might - /// also read from it. It is your responsibility to make sure that `buf` - /// is initialized before calling `read`. - /// - /// # Errors - /// - /// If this function encounters any form of I/O or other error, an error - /// variant will be returned. If an error is returned then it must be - /// guaranteed that no bytes were read. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If you use it as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then it is guaranteed that no data was read. - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `Read`: - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::{self, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let mut buffer = [0; 10]; - /// - /// // read up to 10 bytes - /// let n = f.read(&mut buffer[..]).await?; - /// - /// println!("The bytes: {:?}", &buffer[..n]); - /// Ok(()) - /// } - /// ``` - fn read<'a>(&'a mut self, buf: &'a mut [u8]) -> Read<'a, Self> - where - Self: Unpin, - { - read(self, buf) - } - - /// Pulls some bytes from this source into the specified buffer, - /// advancing the buffer's internal cursor. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_buf(&mut self, buf: &mut B) -> io::Result; - /// ``` - /// - /// Usually, only a single `read` syscall is issued, even if there is - /// more space in the supplied buffer. - /// - /// This method does not provide any guarantees about whether it - /// completes immediately or asynchronously. - /// - /// # Return - /// - /// A nonzero `n` value indicates that the buffer `buf` has been filled - /// in with `n` bytes of data from this source. If `n` is `0`, then it - /// can indicate one of two scenarios: - /// - /// 1. This reader has reached its "end of file" and will likely no longer - /// be able to produce bytes. Note that this does not mean that the - /// reader will *always* no longer be able to produce bytes. - /// 2. The buffer specified had a remaining capacity of zero. - /// - /// # Errors - /// - /// If this function encounters any form of I/O or other error, an error - /// variant will be returned. If an error is returned then it must be - /// guaranteed that no bytes were read. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If you use it as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then it is guaranteed that no data was read. - /// - /// # Examples - /// - /// [`File`] implements `Read` and [`BytesMut`] implements [`BufMut`]: - /// - /// [`File`]: crate::fs::File - /// [`BytesMut`]: bytes::BytesMut - /// [`BufMut`]: bytes::BufMut - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use bytes::BytesMut; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let mut buffer = BytesMut::with_capacity(10); - /// - /// assert!(buffer.is_empty()); - /// assert!(buffer.capacity() >= 10); - /// - /// // note that the return value is not needed to access the data - /// // that was read as `buffer`'s internal cursor is updated. - /// // - /// // this might read more than 10 bytes if the capacity of `buffer` - /// // is larger than 10. - /// f.read_buf(&mut buffer).await?; - /// - /// println!("The bytes: {:?}", &buffer[..]); - /// Ok(()) - /// } - /// ``` - fn read_buf<'a, B>(&'a mut self, buf: &'a mut B) -> ReadBuf<'a, Self, B> - where - Self: Sized + Unpin, - B: BufMut, - { - read_buf(self, buf) - } - - /// Reads the exact number of bytes required to fill `buf`. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_exact(&mut self, buf: &mut [u8]) -> io::Result; - /// ``` - /// - /// This function reads as many bytes as necessary to completely fill - /// the specified buffer `buf`. - /// - /// # Errors - /// - /// If the operation encounters an "end of file" before completely - /// filling the buffer, it returns an error of the kind - /// [`ErrorKind::UnexpectedEof`]. The contents of `buf` are unspecified - /// in this case. - /// - /// If any other read error is encountered then the operation - /// immediately returns. The contents of `buf` are unspecified in this - /// case. - /// - /// If this operation returns an error, it is unspecified how many bytes - /// it has read, but it will never read more than would be necessary to - /// completely fill the buffer. - /// - /// # Cancel safety - /// - /// This method is not cancellation safe. If the method is used as the - /// event in a [`tokio::select!`](crate::select) statement and some - /// other branch completes first, then some data may already have been - /// read into `buf`. - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `Read`: - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::{self, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let len = 10; - /// let mut buffer = vec![0; len]; - /// - /// // read exactly 10 bytes - /// f.read_exact(&mut buffer).await?; - /// Ok(()) - /// } - /// ``` - /// - /// [`ErrorKind::UnexpectedEof`]: std::io::ErrorKind::UnexpectedEof - fn read_exact<'a>(&'a mut self, buf: &'a mut [u8]) -> ReadExact<'a, Self> - where - Self: Unpin, - { - read_exact(self, buf) - } - - read_impl! { - /// Reads an unsigned 8 bit integer from the underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u8(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 8 bit integers from an `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![2, 5]); - /// - /// assert_eq!(2, reader.read_u8().await?); - /// assert_eq!(5, reader.read_u8().await?); - /// - /// Ok(()) - /// } - /// ``` - fn read_u8(&mut self) -> ReadU8; - - /// Reads a signed 8 bit integer from the underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i8(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 8 bit integers from an `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x02, 0xfb]); - /// - /// assert_eq!(2, reader.read_i8().await?); - /// assert_eq!(-5, reader.read_i8().await?); - /// - /// Ok(()) - /// } - /// ``` - fn read_i8(&mut self) -> ReadI8; - - /// Reads an unsigned 16-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u16(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 16 bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![2, 5, 3, 0]); - /// - /// assert_eq!(517, reader.read_u16().await?); - /// assert_eq!(768, reader.read_u16().await?); - /// Ok(()) - /// } - /// ``` - fn read_u16(&mut self) -> ReadU16; - - /// Reads a signed 16-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i16(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 16 bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x00, 0xc1, 0xff, 0x7c]); - /// - /// assert_eq!(193, reader.read_i16().await?); - /// assert_eq!(-132, reader.read_i16().await?); - /// Ok(()) - /// } - /// ``` - fn read_i16(&mut self) -> ReadI16; - - /// Reads an unsigned 32-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u32(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 32-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x00, 0x00, 0x01, 0x0b]); - /// - /// assert_eq!(267, reader.read_u32().await?); - /// Ok(()) - /// } - /// ``` - fn read_u32(&mut self) -> ReadU32; - - /// Reads a signed 32-bit integer in big-endian order from the - /// underlying reader. - /// - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i32(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 32-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0xff, 0xff, 0x7a, 0x33]); - /// - /// assert_eq!(-34253, reader.read_i32().await?); - /// Ok(()) - /// } - /// ``` - fn read_i32(&mut self) -> ReadI32; - - /// Reads an unsigned 64-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u64(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 64-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 - /// ]); - /// - /// assert_eq!(918733457491587, reader.read_u64().await?); - /// Ok(()) - /// } - /// ``` - fn read_u64(&mut self) -> ReadU64; - - /// Reads an signed 64-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i64(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 64-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0]); - /// - /// assert_eq!(i64::MIN, reader.read_i64().await?); - /// Ok(()) - /// } - /// ``` - fn read_i64(&mut self) -> ReadI64; - - /// Reads an unsigned 128-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u128(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 128-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 - /// ]); - /// - /// assert_eq!(16947640962301618749969007319746179, reader.read_u128().await?); - /// Ok(()) - /// } - /// ``` - fn read_u128(&mut self) -> ReadU128; - - /// Reads an signed 128-bit integer in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i128(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 128-bit big-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x80, 0, 0, 0, 0, 0, 0, 0, - /// 0, 0, 0, 0, 0, 0, 0, 0 - /// ]); - /// - /// assert_eq!(i128::MIN, reader.read_i128().await?); - /// Ok(()) - /// } - /// ``` - fn read_i128(&mut self) -> ReadI128; - - /// Reads an 32-bit floating point type in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_f32(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read 32-bit floating point type from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0xff, 0x7f, 0xff, 0xff]); - /// - /// assert_eq!(f32::MIN, reader.read_f32().await?); - /// Ok(()) - /// } - /// ``` - fn read_f32(&mut self) -> ReadF32; - - /// Reads an 64-bit floating point type in big-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_f64(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read 64-bit floating point type from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - /// ]); - /// - /// assert_eq!(f64::MIN, reader.read_f64().await?); - /// Ok(()) - /// } - /// ``` - fn read_f64(&mut self) -> ReadF64; - - /// Reads an unsigned 16-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u16_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 16 bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![2, 5, 3, 0]); - /// - /// assert_eq!(1282, reader.read_u16_le().await?); - /// assert_eq!(3, reader.read_u16_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_u16_le(&mut self) -> ReadU16Le; - - /// Reads a signed 16-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i16_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 16 bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x00, 0xc1, 0xff, 0x7c]); - /// - /// assert_eq!(-16128, reader.read_i16_le().await?); - /// assert_eq!(31999, reader.read_i16_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_i16_le(&mut self) -> ReadI16Le; - - /// Reads an unsigned 32-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u32_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 32-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x00, 0x00, 0x01, 0x0b]); - /// - /// assert_eq!(184614912, reader.read_u32_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_u32_le(&mut self) -> ReadU32Le; - - /// Reads a signed 32-bit integer in little-endian order from the - /// underlying reader. - /// - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i32_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 32-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0xff, 0xff, 0x7a, 0x33]); - /// - /// assert_eq!(863698943, reader.read_i32_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_i32_le(&mut self) -> ReadI32Le; - - /// Reads an unsigned 64-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u64_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 64-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 - /// ]); - /// - /// assert_eq!(9477368352180732672, reader.read_u64_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_u64_le(&mut self) -> ReadU64Le; - - /// Reads an signed 64-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i64_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 64-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0]); - /// - /// assert_eq!(128, reader.read_i64_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_i64_le(&mut self) -> ReadI64Le; - - /// Reads an unsigned 128-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_u128_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read unsigned 128-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 - /// ]); - /// - /// assert_eq!(174826588484952389081207917399662330624, reader.read_u128_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_u128_le(&mut self) -> ReadU128Le; - - /// Reads an signed 128-bit integer in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_i128_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read signed 128-bit little-endian integers from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0x80, 0, 0, 0, 0, 0, 0, 0, - /// 0, 0, 0, 0, 0, 0, 0, 0 - /// ]); - /// - /// assert_eq!(128, reader.read_i128_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_i128_le(&mut self) -> ReadI128Le; - - /// Reads an 32-bit floating point type in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_f32_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read 32-bit floating point type from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![0xff, 0xff, 0x7f, 0xff]); - /// - /// assert_eq!(f32::MIN, reader.read_f32_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_f32_le(&mut self) -> ReadF32Le; - - /// Reads an 64-bit floating point type in little-endian order from the - /// underlying reader. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_f64_le(&mut self) -> io::Result; - /// ``` - /// - /// It is recommended to use a buffered reader to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncReadExt::read_exact`]. - /// - /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact - /// - /// # Examples - /// - /// Read 64-bit floating point type from a `AsyncRead`: - /// - /// ```rust - /// use tokio::io::{self, AsyncReadExt}; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut reader = Cursor::new(vec![ - /// 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff - /// ]); - /// - /// assert_eq!(f64::MIN, reader.read_f64_le().await?); - /// Ok(()) - /// } - /// ``` - fn read_f64_le(&mut self) -> ReadF64Le; - } - - /// Reads all bytes until EOF in this source, placing them into `buf`. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_to_end(&mut self, buf: &mut Vec) -> io::Result; - /// ``` - /// - /// All bytes read from this source will be appended to the specified - /// buffer `buf`. This function will continuously call [`read()`] to - /// append more data to `buf` until [`read()`] returns `Ok(0)`. - /// - /// If successful, the total number of bytes read is returned. - /// - /// [`read()`]: AsyncReadExt::read - /// - /// # Errors - /// - /// If a read error is encountered then the `read_to_end` operation - /// immediately completes. Any bytes which have already been read will - /// be appended to `buf`. - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `Read`: - /// - /// ```no_run - /// use tokio::io::{self, AsyncReadExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let mut buffer = Vec::new(); - /// - /// // read the whole file - /// f.read_to_end(&mut buffer).await?; - /// Ok(()) - /// } - /// ``` - /// - /// (See also the [`tokio::fs::read`] convenience function for reading from a - /// file.) - /// - /// [`tokio::fs::read`]: fn@crate::fs::read - fn read_to_end<'a>(&'a mut self, buf: &'a mut Vec) -> ReadToEnd<'a, Self> - where - Self: Unpin, - { - read_to_end(self, buf) - } - - /// Reads all bytes until EOF in this source, appending them to `buf`. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn read_to_string(&mut self, buf: &mut String) -> io::Result; - /// ``` - /// - /// If successful, the number of bytes which were read and appended to - /// `buf` is returned. - /// - /// # Errors - /// - /// If the data in this stream is *not* valid UTF-8 then an error is - /// returned and `buf` is unchanged. - /// - /// See [`read_to_end`][AsyncReadExt::read_to_end] for other error semantics. - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `Read`: - /// - /// ```no_run - /// use tokio::io::{self, AsyncReadExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut f = File::open("foo.txt").await?; - /// let mut buffer = String::new(); - /// - /// f.read_to_string(&mut buffer).await?; - /// Ok(()) - /// } - /// ``` - /// - /// (See also the [`crate::fs::read_to_string`] convenience function for - /// reading from a file.) - /// - /// [`crate::fs::read_to_string`]: fn@crate::fs::read_to_string - fn read_to_string<'a>(&'a mut self, dst: &'a mut String) -> ReadToString<'a, Self> - where - Self: Unpin, - { - read_to_string(self, dst) - } - - /// Creates an adaptor which reads at most `limit` bytes from it. - /// - /// This function returns a new instance of `AsyncRead` which will read - /// at most `limit` bytes, after which it will always return EOF - /// (`Ok(0)`). Any read errors will not count towards the number of - /// bytes read and future calls to [`read()`] may succeed. - /// - /// [`read()`]: fn@crate::io::AsyncReadExt::read - /// - /// [read]: AsyncReadExt::read - /// - /// # Examples - /// - /// [`File`][crate::fs::File]s implement `Read`: - /// - /// ```no_run - /// use tokio::io::{self, AsyncReadExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let f = File::open("foo.txt").await?; - /// let mut buffer = [0; 5]; - /// - /// // read at most five bytes - /// let mut handle = f.take(5); - /// - /// handle.read(&mut buffer).await?; - /// Ok(()) - /// } - /// ``` - fn take(self, limit: u64) -> Take - where - Self: Sized, - { - take(self, limit) - } - } -} - -impl AsyncReadExt for R {} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/async_seek_ext.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/async_seek_ext.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/async_seek_ext.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/async_seek_ext.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,93 +0,0 @@ -use crate::io::seek::{seek, Seek}; -use crate::io::AsyncSeek; -use std::io::SeekFrom; - -cfg_io_util! { - /// An extension trait that adds utility methods to [`AsyncSeek`] types. - /// - /// # Examples - /// - /// ``` - /// use std::io::{self, Cursor, SeekFrom}; - /// use tokio::io::{AsyncSeekExt, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut cursor = Cursor::new(b"abcdefg"); - /// - /// // the `seek` method is defined by this trait - /// cursor.seek(SeekFrom::Start(3)).await?; - /// - /// let mut buf = [0; 1]; - /// let n = cursor.read(&mut buf).await?; - /// assert_eq!(n, 1); - /// assert_eq!(buf, [b'd']); - /// - /// Ok(()) - /// } - /// ``` - /// - /// See [module][crate::io] documentation for more details. - /// - /// [`AsyncSeek`]: AsyncSeek - pub trait AsyncSeekExt: AsyncSeek { - /// Creates a future which will seek an IO object, and then yield the - /// new position in the object and the object itself. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn seek(&mut self, pos: SeekFrom) -> io::Result; - /// ``` - /// - /// In the case of an error the buffer and the object will be discarded, with - /// the error yielded. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::fs::File; - /// use tokio::io::{AsyncSeekExt, AsyncReadExt}; - /// - /// use std::io::SeekFrom; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut file = File::open("foo.txt").await?; - /// file.seek(SeekFrom::Start(6)).await?; - /// - /// let mut contents = vec![0u8; 10]; - /// file.read_exact(&mut contents).await?; - /// # Ok(()) - /// # } - /// ``` - fn seek(&mut self, pos: SeekFrom) -> Seek<'_, Self> - where - Self: Unpin, - { - seek(self, pos) - } - - /// Creates a future which will rewind to the beginning of the stream. - /// - /// This is convenience method, equivalent to `self.seek(SeekFrom::Start(0))`. - fn rewind(&mut self) -> Seek<'_, Self> - where - Self: Unpin, - { - self.seek(SeekFrom::Start(0)) - } - - /// Creates a future which will return the current seek position from the - /// start of the stream. - /// - /// This is equivalent to `self.seek(SeekFrom::Current(0))`. - fn stream_position(&mut self) -> Seek<'_, Self> - where - Self: Unpin, - { - self.seek(SeekFrom::Current(0)) - } - } -} - -impl AsyncSeekExt for S {} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/async_write_ext.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/async_write_ext.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/async_write_ext.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/async_write_ext.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1293 +0,0 @@ -use crate::io::util::flush::{flush, Flush}; -use crate::io::util::shutdown::{shutdown, Shutdown}; -use crate::io::util::write::{write, Write}; -use crate::io::util::write_all::{write_all, WriteAll}; -use crate::io::util::write_all_buf::{write_all_buf, WriteAllBuf}; -use crate::io::util::write_buf::{write_buf, WriteBuf}; -use crate::io::util::write_int::{WriteF32, WriteF32Le, WriteF64, WriteF64Le}; -use crate::io::util::write_int::{ - WriteI128, WriteI128Le, WriteI16, WriteI16Le, WriteI32, WriteI32Le, WriteI64, WriteI64Le, - WriteI8, -}; -use crate::io::util::write_int::{ - WriteU128, WriteU128Le, WriteU16, WriteU16Le, WriteU32, WriteU32Le, WriteU64, WriteU64Le, - WriteU8, -}; -use crate::io::util::write_vectored::{write_vectored, WriteVectored}; -use crate::io::AsyncWrite; -use std::io::IoSlice; - -use bytes::Buf; - -cfg_io_util! { - /// Defines numeric writer. - macro_rules! write_impl { - ( - $( - $(#[$outer:meta])* - fn $name:ident(&mut self, n: $ty:ty) -> $($fut:ident)*; - )* - ) => { - $( - $(#[$outer])* - fn $name(&mut self, n: $ty) -> $($fut)*<&mut Self> where Self: Unpin { - $($fut)*::new(self, n) - } - )* - } - } - - /// Writes bytes to a sink. - /// - /// Implemented as an extension trait, adding utility methods to all - /// [`AsyncWrite`] types. Callers will tend to import this trait instead of - /// [`AsyncWrite`]. - /// - /// ```no_run - /// use tokio::io::{self, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let data = b"some bytes"; - /// - /// let mut pos = 0; - /// let mut buffer = File::create("foo.txt").await?; - /// - /// while pos < data.len() { - /// let bytes_written = buffer.write(&data[pos..]).await?; - /// pos += bytes_written; - /// } - /// - /// Ok(()) - /// } - /// ``` - /// - /// See [module][crate::io] documentation for more details. - /// - /// [`AsyncWrite`]: AsyncWrite - pub trait AsyncWriteExt: AsyncWrite { - /// Writes a buffer into this writer, returning how many bytes were - /// written. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write(&mut self, buf: &[u8]) -> io::Result; - /// ``` - /// - /// This function will attempt to write the entire contents of `buf`, but - /// the entire write may not succeed, or the write may also generate an - /// error. A call to `write` represents *at most one* attempt to write to - /// any wrapped object. - /// - /// # Return - /// - /// If the return value is `Ok(n)` then it must be guaranteed that `n <= - /// buf.len()`. A return value of `0` typically means that the - /// underlying object is no longer able to accept bytes and will likely - /// not be able to in the future as well, or that the buffer provided is - /// empty. - /// - /// # Errors - /// - /// Each call to `write` may generate an I/O error indicating that the - /// operation could not be completed. If an error is returned then no bytes - /// in the buffer were written to this writer. - /// - /// It is **not** considered an error if the entire buffer could not be - /// written to this writer. - /// - /// # Cancel safety - /// - /// This method is cancellation safe in the sense that if it is used as - /// the event in a [`tokio::select!`](crate::select) statement and some - /// other branch completes first, then it is guaranteed that no data was - /// written to this `AsyncWrite`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// - /// // Writes some prefix of the byte string, not necessarily all of it. - /// file.write(b"some bytes").await?; - /// Ok(()) - /// } - /// ``` - fn write<'a>(&'a mut self, src: &'a [u8]) -> Write<'a, Self> - where - Self: Unpin, - { - write(self, src) - } - - /// Like [`write`], except that it writes from a slice of buffers. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result; - /// ``` - /// - /// See [`AsyncWrite::poll_write_vectored`] for more details. - /// - /// # Cancel safety - /// - /// This method is cancellation safe in the sense that if it is used as - /// the event in a [`tokio::select!`](crate::select) statement and some - /// other branch completes first, then it is guaranteed that no data was - /// written to this `AsyncWrite`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, AsyncWriteExt}; - /// use tokio::fs::File; - /// use std::io::IoSlice; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// - /// let bufs: &[_] = &[ - /// IoSlice::new(b"hello"), - /// IoSlice::new(b" "), - /// IoSlice::new(b"world"), - /// ]; - /// - /// file.write_vectored(&bufs).await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// [`write`]: AsyncWriteExt::write - fn write_vectored<'a, 'b>(&'a mut self, bufs: &'a [IoSlice<'b>]) -> WriteVectored<'a, 'b, Self> - where - Self: Unpin, - { - write_vectored(self, bufs) - } - - /// Writes a buffer into this writer, advancing the buffer's internal - /// cursor. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_buf(&mut self, buf: &mut B) -> io::Result; - /// ``` - /// - /// This function will attempt to write the entire contents of `buf`, but - /// the entire write may not succeed, or the write may also generate an - /// error. After the operation completes, the buffer's - /// internal cursor is advanced by the number of bytes written. A - /// subsequent call to `write_buf` using the **same** `buf` value will - /// resume from the point that the first call to `write_buf` completed. - /// A call to `write_buf` represents *at most one* attempt to write to any - /// wrapped object. - /// - /// # Return - /// - /// If the return value is `Ok(n)` then it must be guaranteed that `n <= - /// buf.len()`. A return value of `0` typically means that the - /// underlying object is no longer able to accept bytes and will likely - /// not be able to in the future as well, or that the buffer provided is - /// empty. - /// - /// # Errors - /// - /// Each call to `write` may generate an I/O error indicating that the - /// operation could not be completed. If an error is returned then no bytes - /// in the buffer were written to this writer. - /// - /// It is **not** considered an error if the entire buffer could not be - /// written to this writer. - /// - /// # Cancel safety - /// - /// This method is cancellation safe in the sense that if it is used as - /// the event in a [`tokio::select!`](crate::select) statement and some - /// other branch completes first, then it is guaranteed that no data was - /// written to this `AsyncWrite`. - /// - /// # Examples - /// - /// [`File`] implements [`AsyncWrite`] and [`Cursor`]`<&[u8]>` implements [`Buf`]: - /// - /// [`File`]: crate::fs::File - /// [`Buf`]: bytes::Buf - /// [`Cursor`]: std::io::Cursor - /// - /// ```no_run - /// use tokio::io::{self, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// use bytes::Buf; - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// let mut buffer = Cursor::new(b"data to write"); - /// - /// // Loop until the entire contents of the buffer are written to - /// // the file. - /// while buffer.has_remaining() { - /// // Writes some prefix of the byte string, not necessarily - /// // all of it. - /// file.write_buf(&mut buffer).await?; - /// } - /// - /// Ok(()) - /// } - /// ``` - fn write_buf<'a, B>(&'a mut self, src: &'a mut B) -> WriteBuf<'a, Self, B> - where - Self: Sized + Unpin, - B: Buf, - { - write_buf(self, src) - } - - /// Attempts to write an entire buffer into this writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_all_buf(&mut self, buf: impl Buf) -> Result<(), io::Error> { - /// while buf.has_remaining() { - /// self.write_buf(&mut buf).await?; - /// } - /// Ok(()) - /// } - /// ``` - /// - /// This method will continuously call [`write`] until - /// [`buf.has_remaining()`](bytes::Buf::has_remaining) returns false. This method will not - /// return until the entire buffer has been successfully written or an error occurs. The - /// first error generated will be returned. - /// - /// The buffer is advanced after each chunk is successfully written. After failure, - /// `src.chunk()` will return the chunk that failed to write. - /// - /// # Cancel safety - /// - /// If `write_all_buf` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then the data in the provided buffer may have been - /// partially written. However, it is guaranteed that the provided - /// buffer has been [advanced] by the amount of bytes that have been - /// partially written. - /// - /// # Examples - /// - /// [`File`] implements [`AsyncWrite`] and [`Cursor`]`<&[u8]>` implements [`Buf`]: - /// - /// [`File`]: crate::fs::File - /// [`Buf`]: bytes::Buf - /// [`Cursor`]: std::io::Cursor - /// [advanced]: bytes::Buf::advance - /// - /// ```no_run - /// use tokio::io::{self, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// use std::io::Cursor; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// let mut buffer = Cursor::new(b"data to write"); - /// - /// file.write_all_buf(&mut buffer).await?; - /// Ok(()) - /// } - /// ``` - /// - /// [`write`]: AsyncWriteExt::write - fn write_all_buf<'a, B>(&'a mut self, src: &'a mut B) -> WriteAllBuf<'a, Self, B> - where - Self: Sized + Unpin, - B: Buf, - { - write_all_buf(self, src) - } - - /// Attempts to write an entire buffer into this writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_all(&mut self, buf: &[u8]) -> io::Result<()>; - /// ``` - /// - /// This method will continuously call [`write`] until there is no more data - /// to be written. This method will not return until the entire buffer - /// has been successfully written or such an error occurs. The first - /// error generated from this method will be returned. - /// - /// # Cancel safety - /// - /// This method is not cancellation safe. If it is used as the event - /// in a [`tokio::select!`](crate::select) statement and some other - /// branch completes first, then the provided buffer may have been - /// partially written, but future calls to `write_all` will start over - /// from the beginning of the buffer. - /// - /// # Errors - /// - /// This function will return the first error that [`write`] returns. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut file = File::create("foo.txt").await?; - /// - /// file.write_all(b"some bytes").await?; - /// Ok(()) - /// } - /// ``` - /// - /// [`write`]: AsyncWriteExt::write - fn write_all<'a>(&'a mut self, src: &'a [u8]) -> WriteAll<'a, Self> - where - Self: Unpin, - { - write_all(self, src) - } - - write_impl! { - /// Writes an unsigned 8-bit integer to the underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u8(&mut self, n: u8) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 8 bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u8(2).await?; - /// writer.write_u8(5).await?; - /// - /// assert_eq!(writer, b"\x02\x05"); - /// Ok(()) - /// } - /// ``` - fn write_u8(&mut self, n: u8) -> WriteU8; - - /// Writes a signed 8-bit integer to the underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i8(&mut self, n: i8) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 8 bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i8(-2).await?; - /// writer.write_i8(126).await?; - /// - /// assert_eq!(writer, b"\xFE\x7E"); - /// Ok(()) - /// } - /// ``` - fn write_i8(&mut self, n: i8) -> WriteI8; - - /// Writes an unsigned 16-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u16(&mut self, n: u16) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 16-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u16(517).await?; - /// writer.write_u16(768).await?; - /// - /// assert_eq!(writer, b"\x02\x05\x03\x00"); - /// Ok(()) - /// } - /// ``` - fn write_u16(&mut self, n: u16) -> WriteU16; - - /// Writes a signed 16-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i16(&mut self, n: i16) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 16-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i16(193).await?; - /// writer.write_i16(-132).await?; - /// - /// assert_eq!(writer, b"\x00\xc1\xff\x7c"); - /// Ok(()) - /// } - /// ``` - fn write_i16(&mut self, n: i16) -> WriteI16; - - /// Writes an unsigned 32-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u32(&mut self, n: u32) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 32-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u32(267).await?; - /// writer.write_u32(1205419366).await?; - /// - /// assert_eq!(writer, b"\x00\x00\x01\x0b\x47\xd9\x3d\x66"); - /// Ok(()) - /// } - /// ``` - fn write_u32(&mut self, n: u32) -> WriteU32; - - /// Writes a signed 32-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i32(&mut self, n: i32) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 32-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i32(267).await?; - /// writer.write_i32(1205419366).await?; - /// - /// assert_eq!(writer, b"\x00\x00\x01\x0b\x47\xd9\x3d\x66"); - /// Ok(()) - /// } - /// ``` - fn write_i32(&mut self, n: i32) -> WriteI32; - - /// Writes an unsigned 64-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u64(&mut self, n: u64) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 64-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u64(918733457491587).await?; - /// writer.write_u64(143).await?; - /// - /// assert_eq!(writer, b"\x00\x03\x43\x95\x4d\x60\x86\x83\x00\x00\x00\x00\x00\x00\x00\x8f"); - /// Ok(()) - /// } - /// ``` - fn write_u64(&mut self, n: u64) -> WriteU64; - - /// Writes an signed 64-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i64(&mut self, n: i64) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 64-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i64(i64::MIN).await?; - /// writer.write_i64(i64::MAX).await?; - /// - /// assert_eq!(writer, b"\x80\x00\x00\x00\x00\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff"); - /// Ok(()) - /// } - /// ``` - fn write_i64(&mut self, n: i64) -> WriteI64; - - /// Writes an unsigned 128-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u128(&mut self, n: u128) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 128-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u128(16947640962301618749969007319746179).await?; - /// - /// assert_eq!(writer, vec![ - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, - /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 - /// ]); - /// Ok(()) - /// } - /// ``` - fn write_u128(&mut self, n: u128) -> WriteU128; - - /// Writes an signed 128-bit integer in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i128(&mut self, n: i128) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 128-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i128(i128::MIN).await?; - /// - /// assert_eq!(writer, vec![ - /// 0x80, 0, 0, 0, 0, 0, 0, 0, - /// 0, 0, 0, 0, 0, 0, 0, 0 - /// ]); - /// Ok(()) - /// } - /// ``` - fn write_i128(&mut self, n: i128) -> WriteI128; - - /// Writes an 32-bit floating point type in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_f32(&mut self, n: f32) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write 32-bit floating point type to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_f32(f32::MIN).await?; - /// - /// assert_eq!(writer, vec![0xff, 0x7f, 0xff, 0xff]); - /// Ok(()) - /// } - /// ``` - fn write_f32(&mut self, n: f32) -> WriteF32; - - /// Writes an 64-bit floating point type in big-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_f64(&mut self, n: f64) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write 64-bit floating point type to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_f64(f64::MIN).await?; - /// - /// assert_eq!(writer, vec![ - /// 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - /// ]); - /// Ok(()) - /// } - /// ``` - fn write_f64(&mut self, n: f64) -> WriteF64; - - /// Writes an unsigned 16-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u16_le(&mut self, n: u16) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 16-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u16_le(517).await?; - /// writer.write_u16_le(768).await?; - /// - /// assert_eq!(writer, b"\x05\x02\x00\x03"); - /// Ok(()) - /// } - /// ``` - fn write_u16_le(&mut self, n: u16) -> WriteU16Le; - - /// Writes a signed 16-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i16_le(&mut self, n: i16) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 16-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i16_le(193).await?; - /// writer.write_i16_le(-132).await?; - /// - /// assert_eq!(writer, b"\xc1\x00\x7c\xff"); - /// Ok(()) - /// } - /// ``` - fn write_i16_le(&mut self, n: i16) -> WriteI16Le; - - /// Writes an unsigned 32-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u32_le(&mut self, n: u32) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 32-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u32_le(267).await?; - /// writer.write_u32_le(1205419366).await?; - /// - /// assert_eq!(writer, b"\x0b\x01\x00\x00\x66\x3d\xd9\x47"); - /// Ok(()) - /// } - /// ``` - fn write_u32_le(&mut self, n: u32) -> WriteU32Le; - - /// Writes a signed 32-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i32_le(&mut self, n: i32) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 32-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i32_le(267).await?; - /// writer.write_i32_le(1205419366).await?; - /// - /// assert_eq!(writer, b"\x0b\x01\x00\x00\x66\x3d\xd9\x47"); - /// Ok(()) - /// } - /// ``` - fn write_i32_le(&mut self, n: i32) -> WriteI32Le; - - /// Writes an unsigned 64-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u64_le(&mut self, n: u64) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 64-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u64_le(918733457491587).await?; - /// writer.write_u64_le(143).await?; - /// - /// assert_eq!(writer, b"\x83\x86\x60\x4d\x95\x43\x03\x00\x8f\x00\x00\x00\x00\x00\x00\x00"); - /// Ok(()) - /// } - /// ``` - fn write_u64_le(&mut self, n: u64) -> WriteU64Le; - - /// Writes an signed 64-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i64_le(&mut self, n: i64) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 64-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i64_le(i64::MIN).await?; - /// writer.write_i64_le(i64::MAX).await?; - /// - /// assert_eq!(writer, b"\x00\x00\x00\x00\x00\x00\x00\x80\xff\xff\xff\xff\xff\xff\xff\x7f"); - /// Ok(()) - /// } - /// ``` - fn write_i64_le(&mut self, n: i64) -> WriteI64Le; - - /// Writes an unsigned 128-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_u128_le(&mut self, n: u128) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write unsigned 128-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_u128_le(16947640962301618749969007319746179).await?; - /// - /// assert_eq!(writer, vec![ - /// 0x83, 0x86, 0x60, 0x4d, 0x95, 0x43, 0x03, 0x00, - /// 0x83, 0x86, 0x60, 0x4d, 0x95, 0x43, 0x03, 0x00, - /// ]); - /// Ok(()) - /// } - /// ``` - fn write_u128_le(&mut self, n: u128) -> WriteU128Le; - - /// Writes an signed 128-bit integer in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_i128_le(&mut self, n: i128) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write signed 128-bit integers to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_i128_le(i128::MIN).await?; - /// - /// assert_eq!(writer, vec![ - /// 0, 0, 0, 0, 0, 0, 0, - /// 0, 0, 0, 0, 0, 0, 0, 0, 0x80 - /// ]); - /// Ok(()) - /// } - /// ``` - fn write_i128_le(&mut self, n: i128) -> WriteI128Le; - - /// Writes an 32-bit floating point type in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_f32_le(&mut self, n: f32) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write 32-bit floating point type to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_f32_le(f32::MIN).await?; - /// - /// assert_eq!(writer, vec![0xff, 0xff, 0x7f, 0xff]); - /// Ok(()) - /// } - /// ``` - fn write_f32_le(&mut self, n: f32) -> WriteF32Le; - - /// Writes an 64-bit floating point type in little-endian order to the - /// underlying writer. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn write_f64_le(&mut self, n: f64) -> io::Result<()>; - /// ``` - /// - /// It is recommended to use a buffered writer to avoid excessive - /// syscalls. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::write_all`]. - /// - /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all - /// - /// # Examples - /// - /// Write 64-bit floating point type to a `AsyncWrite`: - /// - /// ```rust - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut writer = Vec::new(); - /// - /// writer.write_f64_le(f64::MIN).await?; - /// - /// assert_eq!(writer, vec![ - /// 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff - /// ]); - /// Ok(()) - /// } - /// ``` - fn write_f64_le(&mut self, n: f64) -> WriteF64Le; - } - - /// Flushes this output stream, ensuring that all intermediately buffered - /// contents reach their destination. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn flush(&mut self) -> io::Result<()>; - /// ``` - /// - /// # Errors - /// - /// It is considered an error if not all bytes could be written due to - /// I/O errors or EOF being reached. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, BufWriter, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let f = File::create("foo.txt").await?; - /// let mut buffer = BufWriter::new(f); - /// - /// buffer.write_all(b"some bytes").await?; - /// buffer.flush().await?; - /// Ok(()) - /// } - /// ``` - fn flush(&mut self) -> Flush<'_, Self> - where - Self: Unpin, - { - flush(self) - } - - /// Shuts down the output stream, ensuring that the value can be dropped - /// cleanly. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn shutdown(&mut self) -> io::Result<()>; - /// ``` - /// - /// Similar to [`flush`], all intermediately buffered is written to the - /// underlying stream. Once the operation completes, the caller should - /// no longer attempt to write to the stream. For example, the - /// `TcpStream` implementation will issue a `shutdown(Write)` sys call. - /// - /// [`flush`]: fn@crate::io::AsyncWriteExt::flush - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, BufWriter, AsyncWriteExt}; - /// use tokio::fs::File; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let f = File::create("foo.txt").await?; - /// let mut buffer = BufWriter::new(f); - /// - /// buffer.write_all(b"some bytes").await?; - /// buffer.shutdown().await?; - /// Ok(()) - /// } - /// ``` - fn shutdown(&mut self) -> Shutdown<'_, Self> - where - Self: Unpin, - { - shutdown(self) - } - } -} - -impl AsyncWriteExt for W {} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/buf_reader.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/buf_reader.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/buf_reader.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/buf_reader.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,311 +0,0 @@ -use crate::io::util::DEFAULT_BUF_SIZE; -use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf}; - -use pin_project_lite::pin_project; -use std::io::{self, IoSlice, SeekFrom}; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{cmp, fmt, mem}; - -pin_project! { - /// The `BufReader` struct adds buffering to any reader. - /// - /// It can be excessively inefficient to work directly with a [`AsyncRead`] - /// instance. A `BufReader` performs large, infrequent reads on the underlying - /// [`AsyncRead`] and maintains an in-memory buffer of the results. - /// - /// `BufReader` can improve the speed of programs that make *small* and - /// *repeated* read calls to the same file or network socket. It does not - /// help when reading very large amounts at once, or reading just one or a few - /// times. It also provides no advantage when reading from a source that is - /// already in memory, like a `Vec`. - /// - /// When the `BufReader` is dropped, the contents of its buffer will be - /// discarded. Creating multiple instances of a `BufReader` on the same - /// stream can cause data loss. - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct BufReader { - #[pin] - pub(super) inner: R, - pub(super) buf: Box<[u8]>, - pub(super) pos: usize, - pub(super) cap: usize, - pub(super) seek_state: SeekState, - } -} - -impl BufReader { - /// Creates a new `BufReader` with a default buffer capacity. The default is currently 8 KB, - /// but may change in the future. - pub fn new(inner: R) -> Self { - Self::with_capacity(DEFAULT_BUF_SIZE, inner) - } - - /// Creates a new `BufReader` with the specified buffer capacity. - pub fn with_capacity(capacity: usize, inner: R) -> Self { - let buffer = vec![0; capacity]; - Self { - inner, - buf: buffer.into_boxed_slice(), - pos: 0, - cap: 0, - seek_state: SeekState::Init, - } - } - - /// Gets a reference to the underlying reader. - /// - /// It is inadvisable to directly read from the underlying reader. - pub fn get_ref(&self) -> &R { - &self.inner - } - - /// Gets a mutable reference to the underlying reader. - /// - /// It is inadvisable to directly read from the underlying reader. - pub fn get_mut(&mut self) -> &mut R { - &mut self.inner - } - - /// Gets a pinned mutable reference to the underlying reader. - /// - /// It is inadvisable to directly read from the underlying reader. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { - self.project().inner - } - - /// Consumes this `BufReader`, returning the underlying reader. - /// - /// Note that any leftover data in the internal buffer is lost. - pub fn into_inner(self) -> R { - self.inner - } - - /// Returns a reference to the internally buffered data. - /// - /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty. - pub fn buffer(&self) -> &[u8] { - &self.buf[self.pos..self.cap] - } - - /// Invalidates all data in the internal buffer. - #[inline] - fn discard_buffer(self: Pin<&mut Self>) { - let me = self.project(); - *me.pos = 0; - *me.cap = 0; - } -} - -impl AsyncRead for BufReader { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - // If we don't have any buffered data and we're doing a massive read - // (larger than our internal buffer), bypass our internal buffer - // entirely. - if self.pos == self.cap && buf.remaining() >= self.buf.len() { - let res = ready!(self.as_mut().get_pin_mut().poll_read(cx, buf)); - self.discard_buffer(); - return Poll::Ready(res); - } - let rem = ready!(self.as_mut().poll_fill_buf(cx))?; - let amt = std::cmp::min(rem.len(), buf.remaining()); - buf.put_slice(&rem[..amt]); - self.consume(amt); - Poll::Ready(Ok(())) - } -} - -impl AsyncBufRead for BufReader { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - - // If we've reached the end of our internal buffer then we need to fetch - // some more data from the underlying reader. - // Branch using `>=` instead of the more correct `==` - // to tell the compiler that the pos..cap slice is always valid. - if *me.pos >= *me.cap { - debug_assert!(*me.pos == *me.cap); - let mut buf = ReadBuf::new(me.buf); - ready!(me.inner.poll_read(cx, &mut buf))?; - *me.cap = buf.filled().len(); - *me.pos = 0; - } - Poll::Ready(Ok(&me.buf[*me.pos..*me.cap])) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - let me = self.project(); - *me.pos = cmp::min(*me.pos + amt, *me.cap); - } -} - -#[derive(Debug, Clone, Copy)] -pub(super) enum SeekState { - /// start_seek has not been called. - Init, - /// start_seek has been called, but poll_complete has not yet been called. - Start(SeekFrom), - /// Waiting for completion of the first poll_complete in the `n.checked_sub(remainder).is_none()` branch. - PendingOverflowed(i64), - /// Waiting for completion of poll_complete. - Pending, -} - -/// Seeks to an offset, in bytes, in the underlying reader. -/// -/// The position used for seeking with `SeekFrom::Current(_)` is the -/// position the underlying reader would be at if the `BufReader` had no -/// internal buffer. -/// -/// Seeking always discards the internal buffer, even if the seek position -/// would otherwise fall within it. This guarantees that calling -/// `.into_inner()` immediately after a seek yields the underlying reader -/// at the same position. -/// -/// See [`AsyncSeek`] for more details. -/// -/// Note: In the edge case where you're seeking with `SeekFrom::Current(n)` -/// where `n` minus the internal buffer length overflows an `i64`, two -/// seeks will be performed instead of one. If the second seek returns -/// `Err`, the underlying reader will be left at the same position it would -/// have if you called `seek` with `SeekFrom::Current(0)`. -impl AsyncSeek for BufReader { - fn start_seek(self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> { - // We needs to call seek operation multiple times. - // And we should always call both start_seek and poll_complete, - // as start_seek alone cannot guarantee that the operation will be completed. - // poll_complete receives a Context and returns a Poll, so it cannot be called - // inside start_seek. - *self.project().seek_state = SeekState::Start(pos); - Ok(()) - } - - fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let res = match mem::replace(self.as_mut().project().seek_state, SeekState::Init) { - SeekState::Init => { - // 1.x AsyncSeek recommends calling poll_complete before start_seek. - // We don't have to guarantee that the value returned by - // poll_complete called without start_seek is correct, - // so we'll return 0. - return Poll::Ready(Ok(0)); - } - SeekState::Start(SeekFrom::Current(n)) => { - let remainder = (self.cap - self.pos) as i64; - // it should be safe to assume that remainder fits within an i64 as the alternative - // means we managed to allocate 8 exbibytes and that's absurd. - // But it's not out of the realm of possibility for some weird underlying reader to - // support seeking by i64::MIN so we need to handle underflow when subtracting - // remainder. - if let Some(offset) = n.checked_sub(remainder) { - self.as_mut() - .get_pin_mut() - .start_seek(SeekFrom::Current(offset))?; - } else { - // seek backwards by our remainder, and then by the offset - self.as_mut() - .get_pin_mut() - .start_seek(SeekFrom::Current(-remainder))?; - if self.as_mut().get_pin_mut().poll_complete(cx)?.is_pending() { - *self.as_mut().project().seek_state = SeekState::PendingOverflowed(n); - return Poll::Pending; - } - - // https://github.com/rust-lang/rust/pull/61157#issuecomment-495932676 - self.as_mut().discard_buffer(); - - self.as_mut() - .get_pin_mut() - .start_seek(SeekFrom::Current(n))?; - } - self.as_mut().get_pin_mut().poll_complete(cx)? - } - SeekState::PendingOverflowed(n) => { - if self.as_mut().get_pin_mut().poll_complete(cx)?.is_pending() { - *self.as_mut().project().seek_state = SeekState::PendingOverflowed(n); - return Poll::Pending; - } - - // https://github.com/rust-lang/rust/pull/61157#issuecomment-495932676 - self.as_mut().discard_buffer(); - - self.as_mut() - .get_pin_mut() - .start_seek(SeekFrom::Current(n))?; - self.as_mut().get_pin_mut().poll_complete(cx)? - } - SeekState::Start(pos) => { - // Seeking with Start/End doesn't care about our buffer length. - self.as_mut().get_pin_mut().start_seek(pos)?; - self.as_mut().get_pin_mut().poll_complete(cx)? - } - SeekState::Pending => self.as_mut().get_pin_mut().poll_complete(cx)?, - }; - - match res { - Poll::Ready(res) => { - self.discard_buffer(); - Poll::Ready(Ok(res)) - } - Poll::Pending => { - *self.as_mut().project().seek_state = SeekState::Pending; - Poll::Pending - } - } - } -} - -impl AsyncWrite for BufReader { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.get_pin_mut().poll_write(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - self.get_pin_mut().poll_write_vectored(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - self.get_ref().is_write_vectored() - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_pin_mut().poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_pin_mut().poll_shutdown(cx) - } -} - -impl fmt::Debug for BufReader { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BufReader") - .field("reader", &self.inner) - .field( - "buffer", - &format_args!("{}/{}", self.cap - self.pos, self.buf.len()), - ) - .finish() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/buf_stream.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/buf_stream.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/buf_stream.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/buf_stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,207 +0,0 @@ -use crate::io::util::{BufReader, BufWriter}; -use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf}; - -use pin_project_lite::pin_project; -use std::io::{self, IoSlice, SeekFrom}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Wraps a type that is [`AsyncWrite`] and [`AsyncRead`], and buffers its input and output. - /// - /// It can be excessively inefficient to work directly with something that implements [`AsyncWrite`] - /// and [`AsyncRead`]. For example, every `write`, however small, has to traverse the syscall - /// interface, and similarly, every read has to do the same. The [`BufWriter`] and [`BufReader`] - /// types aid with these problems respectively, but do so in only one direction. `BufStream` wraps - /// one in the other so that both directions are buffered. See their documentation for details. - #[derive(Debug)] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct BufStream { - #[pin] - inner: BufReader>, - } -} - -impl BufStream { - /// Wraps a type in both [`BufWriter`] and [`BufReader`]. - /// - /// See the documentation for those types and [`BufStream`] for details. - pub fn new(stream: RW) -> BufStream { - BufStream { - inner: BufReader::new(BufWriter::new(stream)), - } - } - - /// Creates a `BufStream` with the specified [`BufReader`] capacity and [`BufWriter`] - /// capacity. - /// - /// See the documentation for those types and [`BufStream`] for details. - pub fn with_capacity( - reader_capacity: usize, - writer_capacity: usize, - stream: RW, - ) -> BufStream { - BufStream { - inner: BufReader::with_capacity( - reader_capacity, - BufWriter::with_capacity(writer_capacity, stream), - ), - } - } - - /// Gets a reference to the underlying I/O object. - /// - /// It is inadvisable to directly read from the underlying I/O object. - pub fn get_ref(&self) -> &RW { - self.inner.get_ref().get_ref() - } - - /// Gets a mutable reference to the underlying I/O object. - /// - /// It is inadvisable to directly read from the underlying I/O object. - pub fn get_mut(&mut self) -> &mut RW { - self.inner.get_mut().get_mut() - } - - /// Gets a pinned mutable reference to the underlying I/O object. - /// - /// It is inadvisable to directly read from the underlying I/O object. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut RW> { - self.project().inner.get_pin_mut().get_pin_mut() - } - - /// Consumes this `BufStream`, returning the underlying I/O object. - /// - /// Note that any leftover data in the internal buffer is lost. - pub fn into_inner(self) -> RW { - self.inner.into_inner().into_inner() - } -} - -impl From>> for BufStream { - fn from(b: BufReader>) -> Self { - BufStream { inner: b } - } -} - -impl From>> for BufStream { - fn from(b: BufWriter>) -> Self { - // we need to "invert" the reader and writer - let BufWriter { - inner: - BufReader { - inner, - buf: rbuf, - pos, - cap, - seek_state: rseek_state, - }, - buf: wbuf, - written, - seek_state: wseek_state, - } = b; - - BufStream { - inner: BufReader { - inner: BufWriter { - inner, - buf: wbuf, - written, - seek_state: wseek_state, - }, - buf: rbuf, - pos, - cap, - seek_state: rseek_state, - }, - } - } -} - -impl AsyncWrite for BufStream { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.project().inner.poll_write(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - self.project().inner.poll_write_vectored(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - self.inner.is_write_vectored() - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_shutdown(cx) - } -} - -impl AsyncRead for BufStream { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.project().inner.poll_read(cx, buf) - } -} - -/// Seek to an offset, in bytes, in the underlying stream. -/// -/// The position used for seeking with `SeekFrom::Current(_)` is the -/// position the underlying stream would be at if the `BufStream` had no -/// internal buffer. -/// -/// Seeking always discards the internal buffer, even if the seek position -/// would otherwise fall within it. This guarantees that calling -/// `.into_inner()` immediately after a seek yields the underlying reader -/// at the same position. -/// -/// See [`AsyncSeek`] for more details. -/// -/// Note: In the edge case where you're seeking with `SeekFrom::Current(n)` -/// where `n` minus the internal buffer length overflows an `i64`, two -/// seeks will be performed instead of one. If the second seek returns -/// `Err`, the underlying reader will be left at the same position it would -/// have if you called `seek` with `SeekFrom::Current(0)`. -impl AsyncSeek for BufStream { - fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> io::Result<()> { - self.project().inner.start_seek(position) - } - - fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_complete(cx) - } -} - -impl AsyncBufRead for BufStream { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - self.project().inner.consume(amt) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/buf_writer.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/buf_writer.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/buf_writer.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/buf_writer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,310 +0,0 @@ -use crate::io::util::DEFAULT_BUF_SIZE; -use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf}; - -use pin_project_lite::pin_project; -use std::fmt; -use std::io::{self, IoSlice, SeekFrom, Write}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Wraps a writer and buffers its output. - /// - /// It can be excessively inefficient to work directly with something that - /// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and - /// writes it to an underlying writer in large, infrequent batches. - /// - /// `BufWriter` can improve the speed of programs that make *small* and - /// *repeated* write calls to the same file or network socket. It does not - /// help when writing very large amounts at once, or writing just one or a few - /// times. It also provides no advantage when writing to a destination that is - /// in memory, like a `Vec`. - /// - /// When the `BufWriter` is dropped, the contents of its buffer will be - /// discarded. Creating multiple instances of a `BufWriter` on the same - /// stream can cause data loss. If you need to write out the contents of its - /// buffer, you must manually call flush before the writer is dropped. - /// - /// [`AsyncWrite`]: AsyncWrite - /// [`flush`]: super::AsyncWriteExt::flush - /// - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct BufWriter { - #[pin] - pub(super) inner: W, - pub(super) buf: Vec, - pub(super) written: usize, - pub(super) seek_state: SeekState, - } -} - -impl BufWriter { - /// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB, - /// but may change in the future. - pub fn new(inner: W) -> Self { - Self::with_capacity(DEFAULT_BUF_SIZE, inner) - } - - /// Creates a new `BufWriter` with the specified buffer capacity. - pub fn with_capacity(cap: usize, inner: W) -> Self { - Self { - inner, - buf: Vec::with_capacity(cap), - written: 0, - seek_state: SeekState::Init, - } - } - - fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut me = self.project(); - - let len = me.buf.len(); - let mut ret = Ok(()); - while *me.written < len { - match ready!(me.inner.as_mut().poll_write(cx, &me.buf[*me.written..])) { - Ok(0) => { - ret = Err(io::Error::new( - io::ErrorKind::WriteZero, - "failed to write the buffered data", - )); - break; - } - Ok(n) => *me.written += n, - Err(e) => { - ret = Err(e); - break; - } - } - } - if *me.written > 0 { - me.buf.drain(..*me.written); - } - *me.written = 0; - Poll::Ready(ret) - } - - /// Gets a reference to the underlying writer. - pub fn get_ref(&self) -> &W { - &self.inner - } - - /// Gets a mutable reference to the underlying writer. - /// - /// It is inadvisable to directly write to the underlying writer. - pub fn get_mut(&mut self) -> &mut W { - &mut self.inner - } - - /// Gets a pinned mutable reference to the underlying writer. - /// - /// It is inadvisable to directly write to the underlying writer. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { - self.project().inner - } - - /// Consumes this `BufWriter`, returning the underlying writer. - /// - /// Note that any leftover data in the internal buffer is lost. - pub fn into_inner(self) -> W { - self.inner - } - - /// Returns a reference to the internally buffered data. - pub fn buffer(&self) -> &[u8] { - &self.buf - } -} - -impl AsyncWrite for BufWriter { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - if self.buf.len() + buf.len() > self.buf.capacity() { - ready!(self.as_mut().flush_buf(cx))?; - } - - let me = self.project(); - if buf.len() >= me.buf.capacity() { - me.inner.poll_write(cx, buf) - } else { - Poll::Ready(me.buf.write(buf)) - } - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - mut bufs: &[IoSlice<'_>], - ) -> Poll> { - if self.inner.is_write_vectored() { - let total_len = bufs - .iter() - .fold(0usize, |acc, b| acc.saturating_add(b.len())); - if total_len > self.buf.capacity() - self.buf.len() { - ready!(self.as_mut().flush_buf(cx))?; - } - let me = self.as_mut().project(); - if total_len >= me.buf.capacity() { - // It's more efficient to pass the slices directly to the - // underlying writer than to buffer them. - // The case when the total_len calculation saturates at - // usize::MAX is also handled here. - me.inner.poll_write_vectored(cx, bufs) - } else { - bufs.iter().for_each(|b| me.buf.extend_from_slice(b)); - Poll::Ready(Ok(total_len)) - } - } else { - // Remove empty buffers at the beginning of bufs. - while bufs.first().map(|buf| buf.len()) == Some(0) { - bufs = &bufs[1..]; - } - if bufs.is_empty() { - return Poll::Ready(Ok(0)); - } - // Flush if the first buffer doesn't fit. - let first_len = bufs[0].len(); - if first_len > self.buf.capacity() - self.buf.len() { - ready!(self.as_mut().flush_buf(cx))?; - debug_assert!(self.buf.is_empty()); - } - let me = self.as_mut().project(); - if first_len >= me.buf.capacity() { - // The slice is at least as large as the buffering capacity, - // so it's better to write it directly, bypassing the buffer. - debug_assert!(me.buf.is_empty()); - return me.inner.poll_write(cx, &bufs[0]); - } else { - me.buf.extend_from_slice(&bufs[0]); - bufs = &bufs[1..]; - } - let mut total_written = first_len; - debug_assert!(total_written != 0); - // Append the buffers that fit in the internal buffer. - for buf in bufs { - if buf.len() > me.buf.capacity() - me.buf.len() { - break; - } else { - me.buf.extend_from_slice(buf); - total_written += buf.len(); - } - } - Poll::Ready(Ok(total_written)) - } - } - - fn is_write_vectored(&self) -> bool { - true - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().flush_buf(cx))?; - self.get_pin_mut().poll_flush(cx) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().flush_buf(cx))?; - self.get_pin_mut().poll_shutdown(cx) - } -} - -#[derive(Debug, Clone, Copy)] -pub(super) enum SeekState { - /// start_seek has not been called. - Init, - /// start_seek has been called, but poll_complete has not yet been called. - Start(SeekFrom), - /// Waiting for completion of poll_complete. - Pending, -} - -/// Seek to the offset, in bytes, in the underlying writer. -/// -/// Seeking always writes out the internal buffer before seeking. -impl AsyncSeek for BufWriter { - fn start_seek(self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> { - // We need to flush the internal buffer before seeking. - // It receives a `Context` and returns a `Poll`, so it cannot be called - // inside `start_seek`. - *self.project().seek_state = SeekState::Start(pos); - Ok(()) - } - - fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let pos = match self.seek_state { - SeekState::Init => { - return self.project().inner.poll_complete(cx); - } - SeekState::Start(pos) => Some(pos), - SeekState::Pending => None, - }; - - // Flush the internal buffer before seeking. - ready!(self.as_mut().flush_buf(cx))?; - - let mut me = self.project(); - if let Some(pos) = pos { - // Ensure previous seeks have finished before starting a new one - ready!(me.inner.as_mut().poll_complete(cx))?; - if let Err(e) = me.inner.as_mut().start_seek(pos) { - *me.seek_state = SeekState::Init; - return Poll::Ready(Err(e)); - } - } - match me.inner.poll_complete(cx) { - Poll::Ready(res) => { - *me.seek_state = SeekState::Init; - Poll::Ready(res) - } - Poll::Pending => { - *me.seek_state = SeekState::Pending; - Poll::Pending - } - } - } -} - -impl AsyncRead for BufWriter { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.get_pin_mut().poll_read(cx, buf) - } -} - -impl AsyncBufRead for BufWriter { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.get_pin_mut().poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - self.get_pin_mut().consume(amt) - } -} - -impl fmt::Debug for BufWriter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BufWriter") - .field("writer", &self.inner) - .field( - "buffer", - &format_args!("{}/{}", self.buf.len(), self.buf.capacity()), - ) - .field("written", &self.written) - .finish() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/chain.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/chain.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/chain.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/chain.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,144 +0,0 @@ -use crate::io::{AsyncBufRead, AsyncRead, ReadBuf}; - -use pin_project_lite::pin_project; -use std::fmt; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Stream for the [`chain`](super::AsyncReadExt::chain) method. - #[must_use = "streams do nothing unless polled"] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct Chain { - #[pin] - first: T, - #[pin] - second: U, - done_first: bool, - } -} - -pub(super) fn chain(first: T, second: U) -> Chain -where - T: AsyncRead, - U: AsyncRead, -{ - Chain { - first, - second, - done_first: false, - } -} - -impl Chain -where - T: AsyncRead, - U: AsyncRead, -{ - /// Gets references to the underlying readers in this `Chain`. - pub fn get_ref(&self) -> (&T, &U) { - (&self.first, &self.second) - } - - /// Gets mutable references to the underlying readers in this `Chain`. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying readers as doing so may corrupt the internal state of this - /// `Chain`. - pub fn get_mut(&mut self) -> (&mut T, &mut U) { - (&mut self.first, &mut self.second) - } - - /// Gets pinned mutable references to the underlying readers in this `Chain`. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying readers as doing so may corrupt the internal state of this - /// `Chain`. - pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut T>, Pin<&mut U>) { - let me = self.project(); - (me.first, me.second) - } - - /// Consumes the `Chain`, returning the wrapped readers. - pub fn into_inner(self) -> (T, U) { - (self.first, self.second) - } -} - -impl fmt::Debug for Chain -where - T: fmt::Debug, - U: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Chain") - .field("t", &self.first) - .field("u", &self.second) - .finish() - } -} - -impl AsyncRead for Chain -where - T: AsyncRead, - U: AsyncRead, -{ - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - let me = self.project(); - - if !*me.done_first { - let rem = buf.remaining(); - ready!(me.first.poll_read(cx, buf))?; - if buf.remaining() == rem { - *me.done_first = true; - } else { - return Poll::Ready(Ok(())); - } - } - me.second.poll_read(cx, buf) - } -} - -impl AsyncBufRead for Chain -where - T: AsyncBufRead, - U: AsyncBufRead, -{ - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - - if !*me.done_first { - match ready!(me.first.poll_fill_buf(cx)?) { - buf if buf.is_empty() => { - *me.done_first = true; - } - buf => return Poll::Ready(Ok(buf)), - } - } - me.second.poll_fill_buf(cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - let me = self.project(); - if !*me.done_first { - me.first.consume(amt) - } else { - me.second.consume(amt) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/copy_bidirectional.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/copy_bidirectional.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/copy_bidirectional.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/copy_bidirectional.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,91 +0,0 @@ -use super::copy::CopyBuffer; - -use crate::future::poll_fn; -use crate::io::{AsyncRead, AsyncWrite}; - -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -enum TransferState { - Running(CopyBuffer), - ShuttingDown(u64), - Done(u64), -} - -fn transfer_one_direction( - cx: &mut Context<'_>, - state: &mut TransferState, - r: &mut A, - w: &mut B, -) -> Poll> -where - A: AsyncRead + AsyncWrite + Unpin + ?Sized, - B: AsyncRead + AsyncWrite + Unpin + ?Sized, -{ - let mut r = Pin::new(r); - let mut w = Pin::new(w); - - loop { - match state { - TransferState::Running(buf) => { - let count = ready!(buf.poll_copy(cx, r.as_mut(), w.as_mut()))?; - *state = TransferState::ShuttingDown(count); - } - TransferState::ShuttingDown(count) => { - ready!(w.as_mut().poll_shutdown(cx))?; - - *state = TransferState::Done(*count); - } - TransferState::Done(count) => return Poll::Ready(Ok(*count)), - } - } -} -/// Copies data in both directions between `a` and `b`. -/// -/// This function returns a future that will read from both streams, -/// writing any data read to the opposing stream. -/// This happens in both directions concurrently. -/// -/// If an EOF is observed on one stream, [`shutdown()`] will be invoked on -/// the other, and reading from that stream will stop. Copying of data in -/// the other direction will continue. -/// -/// The future will complete successfully once both directions of communication has been shut down. -/// A direction is shut down when the reader reports EOF, -/// at which point [`shutdown()`] is called on the corresponding writer. When finished, -/// it will return a tuple of the number of bytes copied from a to b -/// and the number of bytes copied from b to a, in that order. -/// -/// [`shutdown()`]: crate::io::AsyncWriteExt::shutdown -/// -/// # Errors -/// -/// The future will immediately return an error if any IO operation on `a` -/// or `b` returns an error. Some data read from either stream may be lost (not -/// written to the other stream) in this case. -/// -/// # Return value -/// -/// Returns a tuple of bytes copied `a` to `b` and bytes copied `b` to `a`. -#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] -pub async fn copy_bidirectional(a: &mut A, b: &mut B) -> Result<(u64, u64), std::io::Error> -where - A: AsyncRead + AsyncWrite + Unpin + ?Sized, - B: AsyncRead + AsyncWrite + Unpin + ?Sized, -{ - let mut a_to_b = TransferState::Running(CopyBuffer::new()); - let mut b_to_a = TransferState::Running(CopyBuffer::new()); - poll_fn(|cx| { - let a_to_b = transfer_one_direction(cx, &mut a_to_b, a, b)?; - let b_to_a = transfer_one_direction(cx, &mut b_to_a, b, a)?; - - // It is not a problem if ready! returns early because transfer_one_direction for the - // other direction will keep returning TransferState::Done(count) in future calls to poll - let a_to_b = ready!(a_to_b); - let b_to_a = ready!(b_to_a); - - Poll::Ready(Ok((a_to_b, b_to_a))) - }) - .await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/copy_buf.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/copy_buf.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/copy_buf.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/copy_buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,108 +0,0 @@ -use crate::io::{AsyncBufRead, AsyncWrite}; -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// A future that asynchronously copies the entire contents of a reader into a - /// writer. - /// - /// This struct is generally created by calling [`copy_buf`][copy_buf]. Please - /// see the documentation of `copy_buf()` for more details. - /// - /// [copy_buf]: copy_buf() - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - struct CopyBuf<'a, R: ?Sized, W: ?Sized> { - reader: &'a mut R, - writer: &'a mut W, - amt: u64, - } - - /// Asynchronously copies the entire contents of a reader into a writer. - /// - /// This function returns a future that will continuously read data from - /// `reader` and then write it into `writer` in a streaming fashion until - /// `reader` returns EOF or fails. - /// - /// On success, the total number of bytes that were copied from `reader` to - /// `writer` is returned. - /// - /// This is a [`tokio::io::copy`] alternative for [`AsyncBufRead`] readers - /// with no extra buffer allocation, since [`AsyncBufRead`] allow access - /// to the reader's inner buffer. - /// - /// [`tokio::io::copy`]: crate::io::copy - /// [`AsyncBufRead`]: crate::io::AsyncBufRead - /// - /// # Errors - /// - /// The returned future will finish with an error will return an error - /// immediately if any call to `poll_fill_buf` or `poll_write` returns an - /// error. - /// - /// # Examples - /// - /// ``` - /// use tokio::io; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut reader: &[u8] = b"hello"; - /// let mut writer: Vec = vec![]; - /// - /// io::copy_buf(&mut reader, &mut writer).await?; - /// - /// assert_eq!(b"hello", &writer[..]); - /// # Ok(()) - /// # } - /// ``` - pub async fn copy_buf<'a, R, W>(reader: &'a mut R, writer: &'a mut W) -> io::Result - where - R: AsyncBufRead + Unpin + ?Sized, - W: AsyncWrite + Unpin + ?Sized, - { - CopyBuf { - reader, - writer, - amt: 0, - }.await - } -} - -impl Future for CopyBuf<'_, R, W> -where - R: AsyncBufRead + Unpin + ?Sized, - W: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - loop { - let me = &mut *self; - let buffer = ready!(Pin::new(&mut *me.reader).poll_fill_buf(cx))?; - if buffer.is_empty() { - ready!(Pin::new(&mut self.writer).poll_flush(cx))?; - return Poll::Ready(Ok(self.amt)); - } - - let i = ready!(Pin::new(&mut *me.writer).poll_write(cx, buffer))?; - if i == 0 { - return Poll::Ready(Err(std::io::ErrorKind::WriteZero.into())); - } - self.amt += i as u64; - Pin::new(&mut *self.reader).consume(i); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - use std::marker::PhantomPinned; - crate::is_unpin::>(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/copy.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/copy.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/copy.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/copy.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,219 +0,0 @@ -use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; - -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -#[derive(Debug)] -pub(super) struct CopyBuffer { - read_done: bool, - need_flush: bool, - pos: usize, - cap: usize, - amt: u64, - buf: Box<[u8]>, -} - -impl CopyBuffer { - pub(super) fn new() -> Self { - Self { - read_done: false, - need_flush: false, - pos: 0, - cap: 0, - amt: 0, - buf: vec![0; super::DEFAULT_BUF_SIZE].into_boxed_slice(), - } - } - - fn poll_fill_buf( - &mut self, - cx: &mut Context<'_>, - reader: Pin<&mut R>, - ) -> Poll> - where - R: AsyncRead + ?Sized, - { - let me = &mut *self; - let mut buf = ReadBuf::new(&mut me.buf); - buf.set_filled(me.cap); - - let res = reader.poll_read(cx, &mut buf); - if let Poll::Ready(Ok(_)) = res { - let filled_len = buf.filled().len(); - me.read_done = me.cap == filled_len; - me.cap = filled_len; - } - res - } - - fn poll_write_buf( - &mut self, - cx: &mut Context<'_>, - mut reader: Pin<&mut R>, - mut writer: Pin<&mut W>, - ) -> Poll> - where - R: AsyncRead + ?Sized, - W: AsyncWrite + ?Sized, - { - let me = &mut *self; - match writer.as_mut().poll_write(cx, &me.buf[me.pos..me.cap]) { - Poll::Pending => { - // Top up the buffer towards full if we can read a bit more - // data - this should improve the chances of a large write - if !me.read_done && me.cap < me.buf.len() { - ready!(me.poll_fill_buf(cx, reader.as_mut()))?; - } - Poll::Pending - } - res => res, - } - } - - pub(super) fn poll_copy( - &mut self, - cx: &mut Context<'_>, - mut reader: Pin<&mut R>, - mut writer: Pin<&mut W>, - ) -> Poll> - where - R: AsyncRead + ?Sized, - W: AsyncWrite + ?Sized, - { - loop { - // If our buffer is empty, then we need to read some data to - // continue. - if self.pos == self.cap && !self.read_done { - self.pos = 0; - self.cap = 0; - - match self.poll_fill_buf(cx, reader.as_mut()) { - Poll::Ready(Ok(_)) => (), - Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), - Poll::Pending => { - // Try flushing when the reader has no progress to avoid deadlock - // when the reader depends on buffered writer. - if self.need_flush { - ready!(writer.as_mut().poll_flush(cx))?; - self.need_flush = false; - } - - return Poll::Pending; - } - } - } - - // If our buffer has some data, let's write it out! - while self.pos < self.cap { - let i = ready!(self.poll_write_buf(cx, reader.as_mut(), writer.as_mut()))?; - if i == 0 { - return Poll::Ready(Err(io::Error::new( - io::ErrorKind::WriteZero, - "write zero byte into writer", - ))); - } else { - self.pos += i; - self.amt += i as u64; - self.need_flush = true; - } - } - - // If pos larger than cap, this loop will never stop. - // In particular, user's wrong poll_write implementation returning - // incorrect written length may lead to thread blocking. - debug_assert!( - self.pos <= self.cap, - "writer returned length larger than input slice" - ); - - // If we've written all the data and we've seen EOF, flush out the - // data and finish the transfer. - if self.pos == self.cap && self.read_done { - ready!(writer.as_mut().poll_flush(cx))?; - return Poll::Ready(Ok(self.amt)); - } - } - } -} - -/// A future that asynchronously copies the entire contents of a reader into a -/// writer. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -struct Copy<'a, R: ?Sized, W: ?Sized> { - reader: &'a mut R, - writer: &'a mut W, - buf: CopyBuffer, -} - -cfg_io_util! { - /// Asynchronously copies the entire contents of a reader into a writer. - /// - /// This function returns a future that will continuously read data from - /// `reader` and then write it into `writer` in a streaming fashion until - /// `reader` returns EOF or fails. - /// - /// On success, the total number of bytes that were copied from `reader` to - /// `writer` is returned. - /// - /// This is an asynchronous version of [`std::io::copy`][std]. - /// - /// A heap-allocated copy buffer with 8 KB is created to take data from the - /// reader to the writer, check [`copy_buf`] if you want an alternative for - /// [`AsyncBufRead`]. You can use `copy_buf` with [`BufReader`] to change the - /// buffer capacity. - /// - /// [std]: std::io::copy - /// [`copy_buf`]: crate::io::copy_buf - /// [`AsyncBufRead`]: crate::io::AsyncBufRead - /// [`BufReader`]: crate::io::BufReader - /// - /// # Errors - /// - /// The returned future will return an error immediately if any call to - /// `poll_read` or `poll_write` returns an error. - /// - /// # Examples - /// - /// ``` - /// use tokio::io; - /// - /// # async fn dox() -> std::io::Result<()> { - /// let mut reader: &[u8] = b"hello"; - /// let mut writer: Vec = vec![]; - /// - /// io::copy(&mut reader, &mut writer).await?; - /// - /// assert_eq!(&b"hello"[..], &writer[..]); - /// # Ok(()) - /// # } - /// ``` - pub async fn copy<'a, R, W>(reader: &'a mut R, writer: &'a mut W) -> io::Result - where - R: AsyncRead + Unpin + ?Sized, - W: AsyncWrite + Unpin + ?Sized, - { - Copy { - reader, - writer, - buf: CopyBuffer::new() - }.await - } -} - -impl Future for Copy<'_, R, W> -where - R: AsyncRead + Unpin + ?Sized, - W: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = &mut *self; - - me.buf - .poll_copy(cx, Pin::new(&mut *me.reader), Pin::new(&mut *me.writer)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/empty.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/empty.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/empty.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/empty.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,102 +0,0 @@ -use crate::io::{AsyncBufRead, AsyncRead, ReadBuf}; - -use std::fmt; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// An async reader which is always at EOF. - /// - /// This struct is generally created by calling [`empty`]. Please see - /// the documentation of [`empty()`][`empty`] for more details. - /// - /// This is an asynchronous version of [`std::io::empty`][std]. - /// - /// [`empty`]: fn@empty - /// [std]: std::io::empty - pub struct Empty { - _p: (), - } - - /// Creates a new empty async reader. - /// - /// All reads from the returned reader will return `Poll::Ready(Ok(0))`. - /// - /// This is an asynchronous version of [`std::io::empty`][std]. - /// - /// [std]: std::io::empty - /// - /// # Examples - /// - /// A slightly sad example of not reading anything into a buffer: - /// - /// ``` - /// use tokio::io::{self, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut buffer = String::new(); - /// io::empty().read_to_string(&mut buffer).await.unwrap(); - /// assert!(buffer.is_empty()); - /// } - /// ``` - pub fn empty() -> Empty { - Empty { _p: () } - } -} - -impl AsyncRead for Empty { - #[inline] - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - _: &mut ReadBuf<'_>, - ) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - ready!(poll_proceed_and_make_progress(cx)); - Poll::Ready(Ok(())) - } -} - -impl AsyncBufRead for Empty { - #[inline] - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - ready!(poll_proceed_and_make_progress(cx)); - Poll::Ready(Ok(&[])) - } - - #[inline] - fn consume(self: Pin<&mut Self>, _: usize) {} -} - -impl fmt::Debug for Empty { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Empty { .. }") - } -} - -cfg_coop! { - fn poll_proceed_and_make_progress(cx: &mut Context<'_>) -> Poll<()> { - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - coop.made_progress(); - Poll::Ready(()) - } -} - -cfg_not_coop! { - fn poll_proceed_and_make_progress(_: &mut Context<'_>) -> Poll<()> { - Poll::Ready(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/fill_buf.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/fill_buf.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/fill_buf.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/fill_buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,59 +0,0 @@ -use crate::io::AsyncBufRead; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Future for the [`fill_buf`](crate::io::AsyncBufReadExt::fill_buf) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct FillBuf<'a, R: ?Sized> { - reader: Option<&'a mut R>, - #[pin] - _pin: PhantomPinned, - } -} - -pub(crate) fn fill_buf(reader: &mut R) -> FillBuf<'_, R> -where - R: AsyncBufRead + ?Sized + Unpin, -{ - FillBuf { - reader: Some(reader), - _pin: PhantomPinned, - } -} - -impl<'a, R: AsyncBufRead + ?Sized + Unpin> Future for FillBuf<'a, R> { - type Output = io::Result<&'a [u8]>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - - let reader = me.reader.take().expect("Polled after completion."); - match Pin::new(&mut *reader).poll_fill_buf(cx) { - Poll::Ready(Ok(slice)) => unsafe { - // Safety: This is necessary only due to a limitation in the - // borrow checker. Once Rust starts using the polonius borrow - // checker, this can be simplified. - // - // The safety of this transmute relies on the fact that the - // value of `reader` is `None` when we return in this branch. - // Otherwise the caller could poll us again after - // completion, and access the mutable reference while the - // returned immutable reference still exists. - let slice = std::mem::transmute::<&[u8], &'a [u8]>(slice); - Poll::Ready(Ok(slice)) - }, - Poll::Ready(Err(err)) => Poll::Ready(Err(err)), - Poll::Pending => { - *me.reader = Some(reader); - Poll::Pending - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/flush.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/flush.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/flush.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/flush.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,46 +0,0 @@ -use crate::io::AsyncWrite; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A future used to fully flush an I/O object. - /// - /// Created by the [`AsyncWriteExt::flush`][flush] function. - /// [flush]: crate::io::AsyncWriteExt::flush - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Flush<'a, A: ?Sized> { - a: &'a mut A, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -/// Creates a future which will entirely flush an I/O object. -pub(super) fn flush(a: &mut A) -> Flush<'_, A> -where - A: AsyncWrite + Unpin + ?Sized, -{ - Flush { - a, - _pin: PhantomPinned, - } -} - -impl Future for Flush<'_, A> -where - A: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - Pin::new(&mut *me.a).poll_flush(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/lines.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/lines.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/lines.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/lines.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,145 +0,0 @@ -use crate::io::util::read_line::read_line_internal; -use crate::io::AsyncBufRead; - -use pin_project_lite::pin_project; -use std::io; -use std::mem; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Reads lines from an [`AsyncBufRead`]. - /// - /// A `Lines` can be turned into a `Stream` with [`LinesStream`]. - /// - /// This type is usually created using the [`lines`] method. - /// - /// [`AsyncBufRead`]: crate::io::AsyncBufRead - /// [`LinesStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.LinesStream.html - /// [`lines`]: crate::io::AsyncBufReadExt::lines - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct Lines { - #[pin] - reader: R, - buf: String, - bytes: Vec, - read: usize, - } -} - -pub(crate) fn lines(reader: R) -> Lines -where - R: AsyncBufRead, -{ - Lines { - reader, - buf: String::new(), - bytes: Vec::new(), - read: 0, - } -} - -impl Lines -where - R: AsyncBufRead + Unpin, -{ - /// Returns the next line in the stream. - /// - /// # Cancel safety - /// - /// This method is cancellation safe. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncBufRead; - /// use tokio::io::AsyncBufReadExt; - /// - /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { - /// let mut lines = my_buf_read.lines(); - /// - /// while let Some(line) = lines.next_line().await? { - /// println!("length = {}", line.len()) - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn next_line(&mut self) -> io::Result> { - use crate::future::poll_fn; - - poll_fn(|cx| Pin::new(&mut *self).poll_next_line(cx)).await - } - - /// Obtains a mutable reference to the underlying reader. - pub fn get_mut(&mut self) -> &mut R { - &mut self.reader - } - - /// Obtains a reference to the underlying reader. - pub fn get_ref(&mut self) -> &R { - &self.reader - } - - /// Unwraps this `Lines`, returning the underlying reader. - /// - /// Note that any leftover data in the internal buffer is lost. - /// Therefore, a following read from the underlying reader may lead to data loss. - pub fn into_inner(self) -> R { - self.reader - } -} - -impl Lines -where - R: AsyncBufRead, -{ - /// Polls for the next line in the stream. - /// - /// This method returns: - /// - /// * `Poll::Pending` if the next line is not yet available. - /// * `Poll::Ready(Ok(Some(line)))` if the next line is available. - /// * `Poll::Ready(Ok(None))` if there are no more lines in this stream. - /// * `Poll::Ready(Err(err))` if an IO error occurred while reading the next line. - /// - /// When the method returns `Poll::Pending`, the `Waker` in the provided - /// `Context` is scheduled to receive a wakeup when more bytes become - /// available on the underlying IO resource. Note that on multiple calls to - /// `poll_next_line`, only the `Waker` from the `Context` passed to the most - /// recent call is scheduled to receive a wakeup. - pub fn poll_next_line( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - let me = self.project(); - - let n = ready!(read_line_internal(me.reader, cx, me.buf, me.bytes, me.read))?; - debug_assert_eq!(*me.read, 0); - - if n == 0 && me.buf.is_empty() { - return Poll::Ready(Ok(None)); - } - - if me.buf.ends_with('\n') { - me.buf.pop(); - - if me.buf.ends_with('\r') { - me.buf.pop(); - } - } - - Poll::Ready(Ok(Some(mem::take(me.buf)))) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/mem.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/mem.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/mem.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/mem.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,374 +0,0 @@ -//! In-process memory IO types. - -use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; -use crate::loom::sync::Mutex; - -use bytes::{Buf, BytesMut}; -use std::{ - pin::Pin, - sync::Arc, - task::{self, Poll, Waker}, -}; - -/// A bidirectional pipe to read and write bytes in memory. -/// -/// A pair of `DuplexStream`s are created together, and they act as a "channel" -/// that can be used as in-memory IO types. Writing to one of the pairs will -/// allow that data to be read from the other, and vice versa. -/// -/// # Closing a `DuplexStream` -/// -/// If one end of the `DuplexStream` channel is dropped, any pending reads on -/// the other side will continue to read data until the buffer is drained, then -/// they will signal EOF by returning 0 bytes. Any writes to the other side, -/// including pending ones (that are waiting for free space in the buffer) will -/// return `Err(BrokenPipe)` immediately. -/// -/// # Example -/// -/// ``` -/// # async fn ex() -> std::io::Result<()> { -/// # use tokio::io::{AsyncReadExt, AsyncWriteExt}; -/// let (mut client, mut server) = tokio::io::duplex(64); -/// -/// client.write_all(b"ping").await?; -/// -/// let mut buf = [0u8; 4]; -/// server.read_exact(&mut buf).await?; -/// assert_eq!(&buf, b"ping"); -/// -/// server.write_all(b"pong").await?; -/// -/// client.read_exact(&mut buf).await?; -/// assert_eq!(&buf, b"pong"); -/// # Ok(()) -/// # } -/// ``` -#[derive(Debug)] -#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] -pub struct DuplexStream { - read: Arc>, - write: Arc>, -} - -/// A unidirectional IO over a piece of memory. -/// -/// Data can be written to the pipe, and reading will return that data. -#[derive(Debug)] -struct Pipe { - /// The buffer storing the bytes written, also read from. - /// - /// Using a `BytesMut` because it has efficient `Buf` and `BufMut` - /// functionality already. Additionally, it can try to copy data in the - /// same buffer if there read index has advanced far enough. - buffer: BytesMut, - /// Determines if the write side has been closed. - is_closed: bool, - /// The maximum amount of bytes that can be written before returning - /// `Poll::Pending`. - max_buf_size: usize, - /// If the `read` side has been polled and is pending, this is the waker - /// for that parked task. - read_waker: Option, - /// If the `write` side has filled the `max_buf_size` and returned - /// `Poll::Pending`, this is the waker for that parked task. - write_waker: Option, -} - -// ===== impl DuplexStream ===== - -/// Create a new pair of `DuplexStream`s that act like a pair of connected sockets. -/// -/// The `max_buf_size` argument is the maximum amount of bytes that can be -/// written to a side before the write returns `Poll::Pending`. -#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] -pub fn duplex(max_buf_size: usize) -> (DuplexStream, DuplexStream) { - let one = Arc::new(Mutex::new(Pipe::new(max_buf_size))); - let two = Arc::new(Mutex::new(Pipe::new(max_buf_size))); - - ( - DuplexStream { - read: one.clone(), - write: two.clone(), - }, - DuplexStream { - read: two, - write: one, - }, - ) -} - -impl AsyncRead for DuplexStream { - // Previous rustc required this `self` to be `mut`, even though newer - // versions recognize it isn't needed to call `lock()`. So for - // compatibility, we include the `mut` and `allow` the lint. - // - // See https://github.com/rust-lang/rust/issues/73592 - #[allow(unused_mut)] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - Pin::new(&mut *self.read.lock()).poll_read(cx, buf) - } -} - -impl AsyncWrite for DuplexStream { - #[allow(unused_mut)] - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut *self.write.lock()).poll_write(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - bufs: &[std::io::IoSlice<'_>], - ) -> Poll> { - Pin::new(&mut *self.write.lock()).poll_write_vectored(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - true - } - - #[allow(unused_mut)] - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll> { - Pin::new(&mut *self.write.lock()).poll_flush(cx) - } - - #[allow(unused_mut)] - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll> { - Pin::new(&mut *self.write.lock()).poll_shutdown(cx) - } -} - -impl Drop for DuplexStream { - fn drop(&mut self) { - // notify the other side of the closure - self.write.lock().close_write(); - self.read.lock().close_read(); - } -} - -// ===== impl Pipe ===== - -impl Pipe { - fn new(max_buf_size: usize) -> Self { - Pipe { - buffer: BytesMut::new(), - is_closed: false, - max_buf_size, - read_waker: None, - write_waker: None, - } - } - - fn close_write(&mut self) { - self.is_closed = true; - // needs to notify any readers that no more data will come - if let Some(waker) = self.read_waker.take() { - waker.wake(); - } - } - - fn close_read(&mut self) { - self.is_closed = true; - // needs to notify any writers that they have to abort - if let Some(waker) = self.write_waker.take() { - waker.wake(); - } - } - - fn poll_read_internal( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - if self.buffer.has_remaining() { - let max = self.buffer.remaining().min(buf.remaining()); - buf.put_slice(&self.buffer[..max]); - self.buffer.advance(max); - if max > 0 { - // The passed `buf` might have been empty, don't wake up if - // no bytes have been moved. - if let Some(waker) = self.write_waker.take() { - waker.wake(); - } - } - Poll::Ready(Ok(())) - } else if self.is_closed { - Poll::Ready(Ok(())) - } else { - self.read_waker = Some(cx.waker().clone()); - Poll::Pending - } - } - - fn poll_write_internal( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &[u8], - ) -> Poll> { - if self.is_closed { - return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into())); - } - let avail = self.max_buf_size - self.buffer.len(); - if avail == 0 { - self.write_waker = Some(cx.waker().clone()); - return Poll::Pending; - } - - let len = buf.len().min(avail); - self.buffer.extend_from_slice(&buf[..len]); - if let Some(waker) = self.read_waker.take() { - waker.wake(); - } - Poll::Ready(Ok(len)) - } - - fn poll_write_vectored_internal( - mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - bufs: &[std::io::IoSlice<'_>], - ) -> Poll> { - if self.is_closed { - return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into())); - } - let avail = self.max_buf_size - self.buffer.len(); - if avail == 0 { - self.write_waker = Some(cx.waker().clone()); - return Poll::Pending; - } - - let mut rem = avail; - for buf in bufs { - if rem == 0 { - break; - } - - let len = buf.len().min(rem); - self.buffer.extend_from_slice(&buf[..len]); - rem -= len; - } - - if let Some(waker) = self.read_waker.take() { - waker.wake(); - } - Poll::Ready(Ok(avail - rem)) - } -} - -impl AsyncRead for Pipe { - cfg_coop! { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - - let ret = self.poll_read_internal(cx, buf); - if ret.is_ready() { - coop.made_progress(); - } - ret - } - } - - cfg_not_coop! { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - self.poll_read_internal(cx, buf) - } - } -} - -impl AsyncWrite for Pipe { - cfg_coop! { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &[u8], - ) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - - let ret = self.poll_write_internal(cx, buf); - if ret.is_ready() { - coop.made_progress(); - } - ret - } - } - - cfg_not_coop! { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - buf: &[u8], - ) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - self.poll_write_internal(cx, buf) - } - } - - cfg_coop! { - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - bufs: &[std::io::IoSlice<'_>], - ) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - - let ret = self.poll_write_vectored_internal(cx, bufs); - if ret.is_ready() { - coop.made_progress(); - } - ret - } - } - - cfg_not_coop! { - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - bufs: &[std::io::IoSlice<'_>], - ) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - self.poll_write_vectored_internal(cx, bufs) - } - } - - fn is_write_vectored(&self) -> bool { - true - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - _: &mut task::Context<'_>, - ) -> Poll> { - self.close_write(); - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,97 +0,0 @@ -#![allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 - -cfg_io_util! { - mod async_buf_read_ext; - pub use async_buf_read_ext::AsyncBufReadExt; - - mod async_read_ext; - pub use async_read_ext::AsyncReadExt; - - mod async_seek_ext; - pub use async_seek_ext::AsyncSeekExt; - - mod async_write_ext; - pub use async_write_ext::AsyncWriteExt; - - mod buf_reader; - pub use buf_reader::BufReader; - - mod buf_stream; - pub use buf_stream::BufStream; - - mod buf_writer; - pub use buf_writer::BufWriter; - - mod chain; - - mod copy; - pub use copy::copy; - - mod copy_bidirectional; - pub use copy_bidirectional::copy_bidirectional; - - mod copy_buf; - pub use copy_buf::copy_buf; - - mod empty; - pub use empty::{empty, Empty}; - - mod flush; - - mod lines; - pub use lines::Lines; - - mod mem; - pub use mem::{duplex, DuplexStream}; - - mod read; - mod read_buf; - mod read_exact; - mod read_int; - mod read_line; - mod fill_buf; - - mod read_to_end; - mod vec_with_initialized; - cfg_process! { - pub(crate) use read_to_end::read_to_end; - } - - mod read_to_string; - mod read_until; - - mod repeat; - pub use repeat::{repeat, Repeat}; - - mod shutdown; - - mod sink; - pub use sink::{sink, Sink}; - - mod split; - pub use split::Split; - - mod take; - pub use take::Take; - - mod write; - mod write_vectored; - mod write_all; - mod write_buf; - mod write_all_buf; - mod write_int; - - - // used by `BufReader` and `BufWriter` - // https://github.com/rust-lang/rust/blob/master/library/std/src/sys_common/io.rs#L1 - const DEFAULT_BUF_SIZE: usize = 8 * 1024; -} - -cfg_not_io_util! { - cfg_process! { - mod vec_with_initialized; - mod read_to_end; - // Used by process - pub(crate) use read_to_end::read_to_end; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_buf.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_buf.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_buf.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,72 +0,0 @@ -use crate::io::AsyncRead; - -use bytes::BufMut; -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pub(crate) fn read_buf<'a, R, B>(reader: &'a mut R, buf: &'a mut B) -> ReadBuf<'a, R, B> -where - R: AsyncRead + Unpin, - B: BufMut, -{ - ReadBuf { - reader, - buf, - _pin: PhantomPinned, - } -} - -pin_project! { - /// Future returned by [`read_buf`](crate::io::AsyncReadExt::read_buf). - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ReadBuf<'a, R, B> { - reader: &'a mut R, - buf: &'a mut B, - #[pin] - _pin: PhantomPinned, - } -} - -impl Future for ReadBuf<'_, R, B> -where - R: AsyncRead + Unpin, - B: BufMut, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - use crate::io::ReadBuf; - use std::mem::MaybeUninit; - - let me = self.project(); - - if !me.buf.has_remaining_mut() { - return Poll::Ready(Ok(0)); - } - - let n = { - let dst = me.buf.chunk_mut(); - let dst = unsafe { &mut *(dst as *mut _ as *mut [MaybeUninit]) }; - let mut buf = ReadBuf::uninit(dst); - let ptr = buf.filled().as_ptr(); - ready!(Pin::new(me.reader).poll_read(cx, &mut buf)?); - - // Ensure the pointer does not change from under us - assert_eq!(ptr, buf.filled().as_ptr()); - buf.filled().len() - }; - - // Safety: This is guaranteed to be the number of initialized (and read) - // bytes due to the invariants provided by `ReadBuf::filled`. - unsafe { - me.buf.advance_mut(n); - } - - Poll::Ready(Ok(n)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_exact.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_exact.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_exact.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_exact.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,69 +0,0 @@ -use crate::io::{AsyncRead, ReadBuf}; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::marker::Unpin; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// A future which can be used to easily read exactly enough bytes to fill -/// a buffer. -/// -/// Created by the [`AsyncReadExt::read_exact`][read_exact]. -/// [read_exact]: [crate::io::AsyncReadExt::read_exact] -pub(crate) fn read_exact<'a, A>(reader: &'a mut A, buf: &'a mut [u8]) -> ReadExact<'a, A> -where - A: AsyncRead + Unpin + ?Sized, -{ - ReadExact { - reader, - buf: ReadBuf::new(buf), - _pin: PhantomPinned, - } -} - -pin_project! { - /// Creates a future which will read exactly enough bytes to fill `buf`, - /// returning an error if EOF is hit sooner. - /// - /// On success the number of bytes is returned - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ReadExact<'a, A: ?Sized> { - reader: &'a mut A, - buf: ReadBuf<'a>, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -fn eof() -> io::Error { - io::Error::new(io::ErrorKind::UnexpectedEof, "early eof") -} - -impl Future for ReadExact<'_, A> -where - A: AsyncRead + Unpin + ?Sized, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - - loop { - // if our buffer is empty, then we need to read some data to continue. - let rem = me.buf.remaining(); - if rem != 0 { - ready!(Pin::new(&mut *me.reader).poll_read(cx, me.buf))?; - if me.buf.remaining() == rem { - return Err(eof()).into(); - } - } else { - return Poll::Ready(Ok(me.buf.capacity())); - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_int.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_int.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_int.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_int.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,159 +0,0 @@ -use crate::io::{AsyncRead, ReadBuf}; - -use bytes::Buf; -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::io::ErrorKind::UnexpectedEof; -use std::marker::PhantomPinned; -use std::mem::size_of; -use std::pin::Pin; -use std::task::{Context, Poll}; - -macro_rules! reader { - ($name:ident, $ty:ty, $reader:ident) => { - reader!($name, $ty, $reader, size_of::<$ty>()); - }; - ($name:ident, $ty:ty, $reader:ident, $bytes:expr) => { - pin_project! { - #[doc(hidden)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct $name { - #[pin] - src: R, - buf: [u8; $bytes], - read: u8, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } - } - - impl $name { - pub(crate) fn new(src: R) -> Self { - $name { - src, - buf: [0; $bytes], - read: 0, - _pin: PhantomPinned, - } - } - } - - impl Future for $name - where - R: AsyncRead, - { - type Output = io::Result<$ty>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut me = self.project(); - - if *me.read == $bytes as u8 { - return Poll::Ready(Ok(Buf::$reader(&mut &me.buf[..]))); - } - - while *me.read < $bytes as u8 { - let mut buf = ReadBuf::new(&mut me.buf[*me.read as usize..]); - - *me.read += match me.src.as_mut().poll_read(cx, &mut buf) { - Poll::Pending => return Poll::Pending, - Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), - Poll::Ready(Ok(())) => { - let n = buf.filled().len(); - if n == 0 { - return Poll::Ready(Err(UnexpectedEof.into())); - } - - n as u8 - } - }; - } - - let num = Buf::$reader(&mut &me.buf[..]); - - Poll::Ready(Ok(num)) - } - } - }; -} - -macro_rules! reader8 { - ($name:ident, $ty:ty) => { - pin_project! { - /// Future returned from `read_u8` - #[doc(hidden)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct $name { - #[pin] - reader: R, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } - } - - impl $name { - pub(crate) fn new(reader: R) -> $name { - $name { - reader, - _pin: PhantomPinned, - } - } - } - - impl Future for $name - where - R: AsyncRead, - { - type Output = io::Result<$ty>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - - let mut buf = [0; 1]; - let mut buf = ReadBuf::new(&mut buf); - match me.reader.poll_read(cx, &mut buf) { - Poll::Pending => Poll::Pending, - Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), - Poll::Ready(Ok(())) => { - if buf.filled().len() == 0 { - return Poll::Ready(Err(UnexpectedEof.into())); - } - - Poll::Ready(Ok(buf.filled()[0] as $ty)) - } - } - } - } - }; -} - -reader8!(ReadU8, u8); -reader8!(ReadI8, i8); - -reader!(ReadU16, u16, get_u16); -reader!(ReadU32, u32, get_u32); -reader!(ReadU64, u64, get_u64); -reader!(ReadU128, u128, get_u128); - -reader!(ReadI16, i16, get_i16); -reader!(ReadI32, i32, get_i32); -reader!(ReadI64, i64, get_i64); -reader!(ReadI128, i128, get_i128); - -reader!(ReadF32, f32, get_f32); -reader!(ReadF64, f64, get_f64); - -reader!(ReadU16Le, u16, get_u16_le); -reader!(ReadU32Le, u32, get_u32_le); -reader!(ReadU64Le, u64, get_u64_le); -reader!(ReadU128Le, u128, get_u128_le); - -reader!(ReadI16Le, i16, get_i16_le); -reader!(ReadI32Le, i32, get_i32_le); -reader!(ReadI64Le, i64, get_i64_le); -reader!(ReadI128Le, i128, get_i128_le); - -reader!(ReadF32Le, f32, get_f32_le); -reader!(ReadF64Le, f64, get_f64_le); diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_line.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_line.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_line.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_line.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,119 +0,0 @@ -use crate::io::util::read_until::read_until_internal; -use crate::io::AsyncBufRead; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::mem; -use std::pin::Pin; -use std::string::FromUtf8Error; -use std::task::{Context, Poll}; - -pin_project! { - /// Future for the [`read_line`](crate::io::AsyncBufReadExt::read_line) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ReadLine<'a, R: ?Sized> { - reader: &'a mut R, - // This is the buffer we were provided. It will be replaced with an empty string - // while reading to postpone utf-8 handling until after reading. - output: &'a mut String, - // The actual allocation of the string is moved into this vector instead. - buf: Vec, - // The number of bytes appended to buf. This can be less than buf.len() if - // the buffer was not empty when the operation was started. - read: usize, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -pub(crate) fn read_line<'a, R>(reader: &'a mut R, string: &'a mut String) -> ReadLine<'a, R> -where - R: AsyncBufRead + ?Sized + Unpin, -{ - ReadLine { - reader, - buf: mem::take(string).into_bytes(), - output: string, - read: 0, - _pin: PhantomPinned, - } -} - -fn put_back_original_data(output: &mut String, mut vector: Vec, num_bytes_read: usize) { - let original_len = vector.len() - num_bytes_read; - vector.truncate(original_len); - *output = String::from_utf8(vector).expect("The original data must be valid utf-8."); -} - -/// This handles the various failure cases and puts the string back into `output`. -/// -/// The `truncate_on_io_error` bool is necessary because `read_to_string` and `read_line` -/// disagree on what should happen when an IO error occurs. -pub(super) fn finish_string_read( - io_res: io::Result, - utf8_res: Result, - read: usize, - output: &mut String, - truncate_on_io_error: bool, -) -> Poll> { - match (io_res, utf8_res) { - (Ok(num_bytes), Ok(string)) => { - debug_assert_eq!(read, 0); - *output = string; - Poll::Ready(Ok(num_bytes)) - } - (Err(io_err), Ok(string)) => { - *output = string; - if truncate_on_io_error { - let original_len = output.len() - read; - output.truncate(original_len); - } - Poll::Ready(Err(io_err)) - } - (Ok(num_bytes), Err(utf8_err)) => { - debug_assert_eq!(read, 0); - put_back_original_data(output, utf8_err.into_bytes(), num_bytes); - - Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidData, - "stream did not contain valid UTF-8", - ))) - } - (Err(io_err), Err(utf8_err)) => { - put_back_original_data(output, utf8_err.into_bytes(), read); - - Poll::Ready(Err(io_err)) - } - } -} - -pub(super) fn read_line_internal( - reader: Pin<&mut R>, - cx: &mut Context<'_>, - output: &mut String, - buf: &mut Vec, - read: &mut usize, -) -> Poll> { - let io_res = ready!(read_until_internal(reader, cx, b'\n', buf, read)); - let utf8_res = String::from_utf8(mem::take(buf)); - - // At this point both buf and output are empty. The allocation is in utf8_res. - - debug_assert!(buf.is_empty()); - debug_assert!(output.is_empty()); - finish_string_read(io_res, utf8_res, *read, output, false) -} - -impl Future for ReadLine<'_, R> { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - - read_line_internal(Pin::new(*me.reader), cx, me.output, me.buf, me.read) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -use crate::io::{AsyncRead, ReadBuf}; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::marker::Unpin; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Tries to read some bytes directly into the given `buf` in asynchronous -/// manner, returning a future type. -/// -/// The returned future will resolve to both the I/O stream and the buffer -/// as well as the number of bytes read once the read operation is completed. -pub(crate) fn read<'a, R>(reader: &'a mut R, buf: &'a mut [u8]) -> Read<'a, R> -where - R: AsyncRead + Unpin + ?Sized, -{ - Read { - reader, - buf, - _pin: PhantomPinned, - } -} - -pin_project! { - /// A future which can be used to easily read available number of bytes to fill - /// a buffer. - /// - /// Created by the [`read`] function. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Read<'a, R: ?Sized> { - reader: &'a mut R, - buf: &'a mut [u8], - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -impl Future for Read<'_, R> -where - R: AsyncRead + Unpin + ?Sized, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - let mut buf = ReadBuf::new(me.buf); - ready!(Pin::new(me.reader).poll_read(cx, &mut buf))?; - Poll::Ready(Ok(buf.filled().len())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_to_end.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_to_end.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_to_end.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_to_end.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,143 +0,0 @@ -use crate::io::util::vec_with_initialized::{into_read_buf_parts, VecU8, VecWithInitialized}; -use crate::io::{AsyncRead, ReadBuf}; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::mem::{self, MaybeUninit}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ReadToEnd<'a, R: ?Sized> { - reader: &'a mut R, - buf: VecWithInitialized<&'a mut Vec>, - // The number of bytes appended to buf. This can be less than buf.len() if - // the buffer was not empty when the operation was started. - read: usize, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -pub(crate) fn read_to_end<'a, R>(reader: &'a mut R, buffer: &'a mut Vec) -> ReadToEnd<'a, R> -where - R: AsyncRead + Unpin + ?Sized, -{ - ReadToEnd { - reader, - buf: VecWithInitialized::new(buffer), - read: 0, - _pin: PhantomPinned, - } -} - -pub(super) fn read_to_end_internal( - buf: &mut VecWithInitialized, - mut reader: Pin<&mut R>, - num_read: &mut usize, - cx: &mut Context<'_>, -) -> Poll> { - loop { - let ret = ready!(poll_read_to_end(buf, reader.as_mut(), cx)); - match ret { - Err(err) => return Poll::Ready(Err(err)), - Ok(0) => return Poll::Ready(Ok(mem::replace(num_read, 0))), - Ok(num) => { - *num_read += num; - } - } - } -} - -/// Tries to read from the provided AsyncRead. -/// -/// The length of the buffer is increased by the number of bytes read. -fn poll_read_to_end( - buf: &mut VecWithInitialized, - read: Pin<&mut R>, - cx: &mut Context<'_>, -) -> Poll> { - // This uses an adaptive system to extend the vector when it fills. We want to - // avoid paying to allocate and zero a huge chunk of memory if the reader only - // has 4 bytes while still making large reads if the reader does have a ton - // of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every - // time is 4,500 times (!) slower than this if the reader has a very small - // amount of data to return. When the vector is full with its starting - // capacity, we first try to read into a small buffer to see if we reached - // an EOF. This only happens when the starting capacity is >= NUM_BYTES, since - // we allocate at least NUM_BYTES each time. This avoids the unnecessary - // allocation that we attempt before reading into the vector. - - const NUM_BYTES: usize = 32; - let try_small_read = buf.try_small_read_first(NUM_BYTES); - - // Get a ReadBuf into the vector. - let mut read_buf; - let poll_result; - - let n = if try_small_read { - // Read some bytes using a small read. - let mut small_buf: [MaybeUninit; NUM_BYTES] = [MaybeUninit::uninit(); NUM_BYTES]; - let mut small_read_buf = ReadBuf::uninit(&mut small_buf); - poll_result = read.poll_read(cx, &mut small_read_buf); - let to_write = small_read_buf.filled(); - - // Ensure we have enough space to fill our vector with what we read. - read_buf = buf.get_read_buf(); - if to_write.len() > read_buf.remaining() { - buf.reserve(NUM_BYTES); - read_buf = buf.get_read_buf(); - } - read_buf.put_slice(to_write); - - to_write.len() - } else { - // Ensure we have enough space for reading. - buf.reserve(NUM_BYTES); - read_buf = buf.get_read_buf(); - - // Read data directly into vector. - let filled_before = read_buf.filled().len(); - poll_result = read.poll_read(cx, &mut read_buf); - - // Compute the number of bytes read. - read_buf.filled().len() - filled_before - }; - - // Update the length of the vector using the result of poll_read. - let read_buf_parts = into_read_buf_parts(read_buf); - buf.apply_read_buf(read_buf_parts); - - match poll_result { - Poll::Pending => { - // In this case, nothing should have been read. However we still - // update the vector in case the poll_read call initialized parts of - // the vector's unused capacity. - debug_assert_eq!(n, 0); - Poll::Pending - } - Poll::Ready(Err(err)) => { - debug_assert_eq!(n, 0); - Poll::Ready(Err(err)) - } - Poll::Ready(Ok(())) => Poll::Ready(Ok(n)), - } -} - -impl Future for ReadToEnd<'_, A> -where - A: AsyncRead + ?Sized + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - - read_to_end_internal(me.buf, Pin::new(*me.reader), me.read, cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_to_string.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_to_string.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_to_string.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_to_string.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,78 +0,0 @@ -use crate::io::util::read_line::finish_string_read; -use crate::io::util::read_to_end::read_to_end_internal; -use crate::io::util::vec_with_initialized::VecWithInitialized; -use crate::io::AsyncRead; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{io, mem}; - -pin_project! { - /// Future for the [`read_to_string`](super::AsyncReadExt::read_to_string) method. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ReadToString<'a, R: ?Sized> { - reader: &'a mut R, - // This is the buffer we were provided. It will be replaced with an empty string - // while reading to postpone utf-8 handling until after reading. - output: &'a mut String, - // The actual allocation of the string is moved into this vector instead. - buf: VecWithInitialized>, - // The number of bytes appended to buf. This can be less than buf.len() if - // the buffer was not empty when the operation was started. - read: usize, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -pub(crate) fn read_to_string<'a, R>( - reader: &'a mut R, - string: &'a mut String, -) -> ReadToString<'a, R> -where - R: AsyncRead + ?Sized + Unpin, -{ - let buf = mem::take(string).into_bytes(); - ReadToString { - reader, - buf: VecWithInitialized::new(buf), - output: string, - read: 0, - _pin: PhantomPinned, - } -} - -fn read_to_string_internal( - reader: Pin<&mut R>, - output: &mut String, - buf: &mut VecWithInitialized>, - read: &mut usize, - cx: &mut Context<'_>, -) -> Poll> { - let io_res = ready!(read_to_end_internal(buf, reader, read, cx)); - let utf8_res = String::from_utf8(buf.take()); - - // At this point both buf and output are empty. The allocation is in utf8_res. - - debug_assert!(buf.is_empty()); - debug_assert!(output.is_empty()); - finish_string_read(io_res, utf8_res, *read, output, true) -} - -impl Future for ReadToString<'_, A> -where - A: AsyncRead + ?Sized + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - - read_to_string_internal(Pin::new(*me.reader), me.output, me.buf, me.read, cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_until.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_until.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/read_until.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/read_until.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,80 +0,0 @@ -use crate::io::AsyncBufRead; -use crate::util::memchr; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::mem; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Future for the [`read_until`](crate::io::AsyncBufReadExt::read_until) method. - /// The delimiter is included in the resulting vector. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct ReadUntil<'a, R: ?Sized> { - reader: &'a mut R, - delimiter: u8, - buf: &'a mut Vec, - // The number of bytes appended to buf. This can be less than buf.len() if - // the buffer was not empty when the operation was started. - read: usize, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -pub(crate) fn read_until<'a, R>( - reader: &'a mut R, - delimiter: u8, - buf: &'a mut Vec, -) -> ReadUntil<'a, R> -where - R: AsyncBufRead + ?Sized + Unpin, -{ - ReadUntil { - reader, - delimiter, - buf, - read: 0, - _pin: PhantomPinned, - } -} - -pub(super) fn read_until_internal( - mut reader: Pin<&mut R>, - cx: &mut Context<'_>, - delimiter: u8, - buf: &mut Vec, - read: &mut usize, -) -> Poll> { - loop { - let (done, used) = { - let available = ready!(reader.as_mut().poll_fill_buf(cx))?; - if let Some(i) = memchr::memchr(delimiter, available) { - buf.extend_from_slice(&available[..=i]); - (true, i + 1) - } else { - buf.extend_from_slice(available); - (false, available.len()) - } - }; - reader.as_mut().consume(used); - *read += used; - if done || used == 0 { - return Poll::Ready(Ok(mem::replace(read, 0))); - } - } -} - -impl Future for ReadUntil<'_, R> { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - read_until_internal(Pin::new(*me.reader), cx, *me.delimiter, me.buf, me.read) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/repeat.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/repeat.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/repeat.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/repeat.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,72 +0,0 @@ -use crate::io::{AsyncRead, ReadBuf}; - -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// An async reader which yields one byte over and over and over and over and - /// over and... - /// - /// This struct is generally created by calling [`repeat`][repeat]. Please - /// see the documentation of `repeat()` for more details. - /// - /// This is an asynchronous version of [`std::io::Repeat`][std]. - /// - /// [repeat]: fn@repeat - /// [std]: std::io::Repeat - #[derive(Debug)] - pub struct Repeat { - byte: u8, - } - - /// Creates an instance of an async reader that infinitely repeats one byte. - /// - /// All reads from this reader will succeed by filling the specified buffer with - /// the given byte. - /// - /// This is an asynchronous version of [`std::io::repeat`][std]. - /// - /// [std]: std::io::repeat - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncReadExt}; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut buffer = [0; 3]; - /// io::repeat(0b101).read_exact(&mut buffer).await.unwrap(); - /// assert_eq!(buffer, [0b101, 0b101, 0b101]); - /// } - /// ``` - pub fn repeat(byte: u8) -> Repeat { - Repeat { byte } - } -} - -impl AsyncRead for Repeat { - #[inline] - fn poll_read( - self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - // TODO: could be faster, but should we unsafe it? - while buf.remaining() != 0 { - buf.put_slice(&[self.byte]); - } - Poll::Ready(Ok(())) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/shutdown.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/shutdown.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/shutdown.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/shutdown.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,46 +0,0 @@ -use crate::io::AsyncWrite; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A future used to shutdown an I/O object. - /// - /// Created by the [`AsyncWriteExt::shutdown`][shutdown] function. - /// [shutdown]: crate::io::AsyncWriteExt::shutdown - #[must_use = "futures do nothing unless you `.await` or poll them"] - #[derive(Debug)] - pub struct Shutdown<'a, A: ?Sized> { - a: &'a mut A, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -/// Creates a future which will shutdown an I/O object. -pub(super) fn shutdown(a: &mut A) -> Shutdown<'_, A> -where - A: AsyncWrite + Unpin + ?Sized, -{ - Shutdown { - a, - _pin: PhantomPinned, - } -} - -impl Future for Shutdown<'_, A> -where - A: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - Pin::new(me.a).poll_shutdown(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/sink.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/sink.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/sink.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/sink.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,87 +0,0 @@ -use crate::io::AsyncWrite; - -use std::fmt; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - /// An async writer which will move data into the void. - /// - /// This struct is generally created by calling [`sink`][sink]. Please - /// see the documentation of `sink()` for more details. - /// - /// This is an asynchronous version of [`std::io::Sink`][std]. - /// - /// [sink]: sink() - /// [std]: std::io::Sink - pub struct Sink { - _p: (), - } - - /// Creates an instance of an async writer which will successfully consume all - /// data. - /// - /// All calls to [`poll_write`] on the returned instance will return - /// `Poll::Ready(Ok(buf.len()))` and the contents of the buffer will not be - /// inspected. - /// - /// This is an asynchronous version of [`std::io::sink`][std]. - /// - /// [`poll_write`]: crate::io::AsyncWrite::poll_write() - /// [std]: std::io::sink - /// - /// # Examples - /// - /// ``` - /// use tokio::io::{self, AsyncWriteExt}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let buffer = vec![1, 2, 3, 5, 8]; - /// let num_bytes = io::sink().write(&buffer).await?; - /// assert_eq!(num_bytes, 5); - /// Ok(()) - /// } - /// ``` - pub fn sink() -> Sink { - Sink { _p: () } - } -} - -impl AsyncWrite for Sink { - #[inline] - fn poll_write( - self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Poll::Ready(Ok(buf.len())) - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - #[inline] - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -impl fmt::Debug for Sink { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Sink { .. }") - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/split.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/split.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/split.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/split.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,121 +0,0 @@ -use crate::io::util::read_until::read_until_internal; -use crate::io::AsyncBufRead; - -use pin_project_lite::pin_project; -use std::io; -use std::mem; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Splitter for the [`split`](crate::io::AsyncBufReadExt::split) method. - /// - /// A `Split` can be turned into a `Stream` with [`SplitStream`]. - /// - /// [`SplitStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.SplitStream.html - #[derive(Debug)] - #[must_use = "streams do nothing unless polled"] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct Split { - #[pin] - reader: R, - buf: Vec, - delim: u8, - read: usize, - } -} - -pub(crate) fn split(reader: R, delim: u8) -> Split -where - R: AsyncBufRead, -{ - Split { - reader, - buf: Vec::new(), - delim, - read: 0, - } -} - -impl Split -where - R: AsyncBufRead + Unpin, -{ - /// Returns the next segment in the stream. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncBufRead; - /// use tokio::io::AsyncBufReadExt; - /// - /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { - /// let mut segments = my_buf_read.split(b'f'); - /// - /// while let Some(segment) = segments.next_segment().await? { - /// println!("length = {}", segment.len()) - /// } - /// # Ok(()) - /// # } - /// ``` - pub async fn next_segment(&mut self) -> io::Result>> { - use crate::future::poll_fn; - - poll_fn(|cx| Pin::new(&mut *self).poll_next_segment(cx)).await - } -} - -impl Split -where - R: AsyncBufRead, -{ - /// Polls for the next segment in the stream. - /// - /// This method returns: - /// - /// * `Poll::Pending` if the next segment is not yet available. - /// * `Poll::Ready(Ok(Some(segment)))` if the next segment is available. - /// * `Poll::Ready(Ok(None))` if there are no more segments in this stream. - /// * `Poll::Ready(Err(err))` if an IO error occurred while reading the - /// next segment. - /// - /// When the method returns `Poll::Pending`, the `Waker` in the provided - /// `Context` is scheduled to receive a wakeup when more bytes become - /// available on the underlying IO resource. - /// - /// Note that on multiple calls to `poll_next_segment`, only the `Waker` - /// from the `Context` passed to the most recent call is scheduled to - /// receive a wakeup. - pub fn poll_next_segment( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>>> { - let me = self.project(); - - let n = ready!(read_until_internal( - me.reader, cx, *me.delim, me.buf, me.read, - ))?; - // read_until_internal resets me.read to zero once it finds the delimiter - debug_assert_eq!(*me.read, 0); - - if n == 0 && me.buf.is_empty() { - return Poll::Ready(Ok(None)); - } - - if me.buf.last() == Some(me.delim) { - me.buf.pop(); - } - - Poll::Ready(Ok(Some(mem::take(me.buf)))) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/take.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/take.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/take.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/take.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,137 +0,0 @@ -use crate::io::{AsyncBufRead, AsyncRead, ReadBuf}; - -use pin_project_lite::pin_project; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{cmp, io}; - -pin_project! { - /// Stream for the [`take`](super::AsyncReadExt::take) method. - #[derive(Debug)] - #[must_use = "streams do nothing unless you `.await` or poll them"] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - pub struct Take { - #[pin] - inner: R, - // Add '_' to avoid conflicts with `limit` method. - limit_: u64, - } -} - -pub(super) fn take(inner: R, limit: u64) -> Take { - Take { - inner, - limit_: limit, - } -} - -impl Take { - /// Returns the remaining number of bytes that can be - /// read before this instance will return EOF. - /// - /// # Note - /// - /// This instance may reach `EOF` after reading fewer bytes than indicated by - /// this method if the underlying [`AsyncRead`] instance reaches EOF. - pub fn limit(&self) -> u64 { - self.limit_ - } - - /// Sets the number of bytes that can be read before this instance will - /// return EOF. This is the same as constructing a new `Take` instance, so - /// the amount of bytes read and the previous limit value don't matter when - /// calling this method. - pub fn set_limit(&mut self, limit: u64) { - self.limit_ = limit - } - - /// Gets a reference to the underlying reader. - pub fn get_ref(&self) -> &R { - &self.inner - } - - /// Gets a mutable reference to the underlying reader. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying reader as doing so may corrupt the internal limit of this - /// `Take`. - pub fn get_mut(&mut self) -> &mut R { - &mut self.inner - } - - /// Gets a pinned mutable reference to the underlying reader. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying reader as doing so may corrupt the internal limit of this - /// `Take`. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { - self.project().inner - } - - /// Consumes the `Take`, returning the wrapped reader. - pub fn into_inner(self) -> R { - self.inner - } -} - -impl AsyncRead for Take { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - if self.limit_ == 0 { - return Poll::Ready(Ok(())); - } - - let me = self.project(); - let mut b = buf.take(*me.limit_ as usize); - - let buf_ptr = b.filled().as_ptr(); - ready!(me.inner.poll_read(cx, &mut b))?; - assert_eq!(b.filled().as_ptr(), buf_ptr); - - let n = b.filled().len(); - - // We need to update the original ReadBuf - unsafe { - buf.assume_init(n); - } - buf.advance(n); - *me.limit_ -= n as u64; - Poll::Ready(Ok(())) - } -} - -impl AsyncBufRead for Take { - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - - // Don't call into inner reader at all at EOF because it may still block - if *me.limit_ == 0 { - return Poll::Ready(Ok(&[])); - } - - let buf = ready!(me.inner.poll_fill_buf(cx)?); - let cap = cmp::min(buf.len() as u64, *me.limit_) as usize; - Poll::Ready(Ok(&buf[..cap])) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - let me = self.project(); - // Don't let callers reset the limit by passing an overlarge value - let amt = cmp::min(amt as u64, *me.limit_) as usize; - *me.limit_ -= amt as u64; - me.inner.consume(amt); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn assert_unpin() { - crate::is_unpin::>(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/vec_with_initialized.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/vec_with_initialized.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/vec_with_initialized.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/vec_with_initialized.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,142 +0,0 @@ -use crate::io::ReadBuf; -use std::mem::MaybeUninit; - -/// Something that looks like a `Vec`. -/// -/// # Safety -/// -/// The implementor must guarantee that the vector returned by the -/// `as_mut` and `as_mut` methods do not change from one call to -/// another. -pub(crate) unsafe trait VecU8: AsRef> + AsMut> {} - -unsafe impl VecU8 for Vec {} -unsafe impl VecU8 for &mut Vec {} - -/// This struct wraps a `Vec` or `&mut Vec`, combining it with a -/// `num_initialized`, which keeps track of the number of initialized bytes -/// in the unused capacity. -/// -/// The purpose of this struct is to remember how many bytes were initialized -/// through a `ReadBuf` from call to call. -/// -/// This struct has the safety invariant that the first `num_initialized` of the -/// vector's allocation must be initialized at any time. -#[derive(Debug)] -pub(crate) struct VecWithInitialized { - vec: V, - // The number of initialized bytes in the vector. - // Always between `vec.len()` and `vec.capacity()`. - num_initialized: usize, - starting_capacity: usize, -} - -impl VecWithInitialized> { - #[cfg(feature = "io-util")] - pub(crate) fn take(&mut self) -> Vec { - self.num_initialized = 0; - std::mem::take(&mut self.vec) - } -} - -impl VecWithInitialized -where - V: VecU8, -{ - pub(crate) fn new(mut vec: V) -> Self { - // SAFETY: The safety invariants of vector guarantee that the bytes up - // to its length are initialized. - Self { - num_initialized: vec.as_mut().len(), - starting_capacity: vec.as_ref().capacity(), - vec, - } - } - - pub(crate) fn reserve(&mut self, num_bytes: usize) { - let vec = self.vec.as_mut(); - if vec.capacity() - vec.len() >= num_bytes { - return; - } - // SAFETY: Setting num_initialized to `vec.len()` is correct as - // `reserve` does not change the length of the vector. - self.num_initialized = vec.len(); - vec.reserve(num_bytes); - } - - #[cfg(feature = "io-util")] - pub(crate) fn is_empty(&self) -> bool { - self.vec.as_ref().is_empty() - } - - pub(crate) fn get_read_buf<'a>(&'a mut self) -> ReadBuf<'a> { - let num_initialized = self.num_initialized; - - // SAFETY: Creating the slice is safe because of the safety invariants - // on Vec. The safety invariants of `ReadBuf` will further guarantee - // that no bytes in the slice are de-initialized. - let vec = self.vec.as_mut(); - let len = vec.len(); - let cap = vec.capacity(); - let ptr = vec.as_mut_ptr().cast::>(); - let slice = unsafe { std::slice::from_raw_parts_mut::<'a, MaybeUninit>(ptr, cap) }; - - // SAFETY: This is safe because the safety invariants of - // VecWithInitialized say that the first num_initialized bytes must be - // initialized. - let mut read_buf = ReadBuf::uninit(slice); - unsafe { - read_buf.assume_init(num_initialized); - } - read_buf.set_filled(len); - - read_buf - } - - pub(crate) fn apply_read_buf(&mut self, parts: ReadBufParts) { - let vec = self.vec.as_mut(); - assert_eq!(vec.as_ptr(), parts.ptr); - - // SAFETY: - // The ReadBufParts really does point inside `self.vec` due to the above - // check, and the safety invariants of `ReadBuf` guarantee that the - // first `parts.initialized` bytes of `self.vec` really have been - // initialized. Additionally, `ReadBuf` guarantees that `parts.len` is - // at most `parts.initialized`, so the first `parts.len` bytes are also - // initialized. - // - // Note that this relies on the fact that `V` is either `Vec` or - // `&mut Vec`, so the vector returned by `self.vec.as_mut()` cannot - // change from call to call. - unsafe { - self.num_initialized = parts.initialized; - vec.set_len(parts.len); - } - } - - // Returns a boolean telling the caller to try reading into a small local buffer first if true. - // Doing so would avoid overallocating when vec is filled to capacity and we reached EOF. - pub(crate) fn try_small_read_first(&self, num_bytes: usize) -> bool { - let vec = self.vec.as_ref(); - vec.capacity() - vec.len() < num_bytes - && self.starting_capacity == vec.capacity() - && self.starting_capacity >= num_bytes - } -} - -pub(crate) struct ReadBufParts { - // Pointer is only used to check that the ReadBuf actually came from the - // right VecWithInitialized. - ptr: *const u8, - len: usize, - initialized: usize, -} - -// This is needed to release the borrow on `VecWithInitialized`. -pub(crate) fn into_read_buf_parts(rb: ReadBuf<'_>) -> ReadBufParts { - ReadBufParts { - ptr: rb.filled().as_ptr(), - len: rb.filled().len(), - initialized: rb.initialized().len(), - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write_all_buf.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write_all_buf.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write_all_buf.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write_all_buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -use crate::io::AsyncWrite; - -use bytes::Buf; -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A future to write some of the buffer to an `AsyncWrite`. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct WriteAllBuf<'a, W, B> { - writer: &'a mut W, - buf: &'a mut B, - #[pin] - _pin: PhantomPinned, - } -} - -/// Tries to write some bytes from the given `buf` to the writer in an -/// asynchronous manner, returning a future. -pub(crate) fn write_all_buf<'a, W, B>(writer: &'a mut W, buf: &'a mut B) -> WriteAllBuf<'a, W, B> -where - W: AsyncWrite + Unpin, - B: Buf, -{ - WriteAllBuf { - writer, - buf, - _pin: PhantomPinned, - } -} - -impl Future for WriteAllBuf<'_, W, B> -where - W: AsyncWrite + Unpin, - B: Buf, -{ - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - while me.buf.has_remaining() { - let n = ready!(Pin::new(&mut *me.writer).poll_write(cx, me.buf.chunk())?); - me.buf.advance(n); - if n == 0 { - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - } - - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write_all.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write_all.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write_all.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write_all.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -use crate::io::AsyncWrite; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::mem; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct WriteAll<'a, W: ?Sized> { - writer: &'a mut W, - buf: &'a [u8], - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -pub(crate) fn write_all<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> WriteAll<'a, W> -where - W: AsyncWrite + Unpin + ?Sized, -{ - WriteAll { - writer, - buf, - _pin: PhantomPinned, - } -} - -impl Future for WriteAll<'_, W> -where - W: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - while !me.buf.is_empty() { - let n = ready!(Pin::new(&mut *me.writer).poll_write(cx, me.buf))?; - { - let (_, rest) = mem::take(&mut *me.buf).split_at(n); - *me.buf = rest; - } - if n == 0 { - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - } - - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write_buf.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write_buf.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write_buf.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write_buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -use crate::io::AsyncWrite; - -use bytes::Buf; -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A future to write some of the buffer to an `AsyncWrite`. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct WriteBuf<'a, W, B> { - writer: &'a mut W, - buf: &'a mut B, - #[pin] - _pin: PhantomPinned, - } -} - -/// Tries to write some bytes from the given `buf` to the writer in an -/// asynchronous manner, returning a future. -pub(crate) fn write_buf<'a, W, B>(writer: &'a mut W, buf: &'a mut B) -> WriteBuf<'a, W, B> -where - W: AsyncWrite + Unpin, - B: Buf, -{ - WriteBuf { - writer, - buf, - _pin: PhantomPinned, - } -} - -impl Future for WriteBuf<'_, W, B> -where - W: AsyncWrite + Unpin, - B: Buf, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - - if !me.buf.has_remaining() { - return Poll::Ready(Ok(0)); - } - - let n = ready!(Pin::new(me.writer).poll_write(cx, me.buf.chunk()))?; - me.buf.advance(n); - Poll::Ready(Ok(n)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write_int.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write_int.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write_int.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write_int.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,152 +0,0 @@ -use crate::io::AsyncWrite; - -use bytes::BufMut; -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::mem::size_of; -use std::pin::Pin; -use std::task::{Context, Poll}; - -macro_rules! writer { - ($name:ident, $ty:ty, $writer:ident) => { - writer!($name, $ty, $writer, size_of::<$ty>()); - }; - ($name:ident, $ty:ty, $writer:ident, $bytes:expr) => { - pin_project! { - #[doc(hidden)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct $name { - #[pin] - dst: W, - buf: [u8; $bytes], - written: u8, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } - } - - impl $name { - pub(crate) fn new(w: W, value: $ty) -> Self { - let mut writer = Self { - buf: [0; $bytes], - written: 0, - dst: w, - _pin: PhantomPinned, - }; - BufMut::$writer(&mut &mut writer.buf[..], value); - writer - } - } - - impl Future for $name - where - W: AsyncWrite, - { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut me = self.project(); - - if *me.written == $bytes as u8 { - return Poll::Ready(Ok(())); - } - - while *me.written < $bytes as u8 { - *me.written += match me - .dst - .as_mut() - .poll_write(cx, &me.buf[*me.written as usize..]) - { - Poll::Pending => return Poll::Pending, - Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), - Poll::Ready(Ok(0)) => { - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - Poll::Ready(Ok(n)) => n as u8, - }; - } - Poll::Ready(Ok(())) - } - } - }; -} - -macro_rules! writer8 { - ($name:ident, $ty:ty) => { - pin_project! { - #[doc(hidden)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct $name { - #[pin] - dst: W, - byte: $ty, - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } - } - - impl $name { - pub(crate) fn new(dst: W, byte: $ty) -> Self { - Self { - dst, - byte, - _pin: PhantomPinned, - } - } - } - - impl Future for $name - where - W: AsyncWrite, - { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - - let buf = [*me.byte as u8]; - - match me.dst.poll_write(cx, &buf[..]) { - Poll::Pending => Poll::Pending, - Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), - Poll::Ready(Ok(0)) => Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Poll::Ready(Ok(1)) => Poll::Ready(Ok(())), - Poll::Ready(Ok(_)) => unreachable!(), - } - } - } - }; -} - -writer8!(WriteU8, u8); -writer8!(WriteI8, i8); - -writer!(WriteU16, u16, put_u16); -writer!(WriteU32, u32, put_u32); -writer!(WriteU64, u64, put_u64); -writer!(WriteU128, u128, put_u128); - -writer!(WriteI16, i16, put_i16); -writer!(WriteI32, i32, put_i32); -writer!(WriteI64, i64, put_i64); -writer!(WriteI128, i128, put_i128); - -writer!(WriteF32, f32, put_f32); -writer!(WriteF64, f64, put_f64); - -writer!(WriteU16Le, u16, put_u16_le); -writer!(WriteU32Le, u32, put_u32_le); -writer!(WriteU64Le, u64, put_u64_le); -writer!(WriteU128Le, u128, put_u128_le); - -writer!(WriteI16Le, i16, put_i16_le); -writer!(WriteI32Le, i32, put_i32_le); -writer!(WriteI64Le, i64, put_i64_le); -writer!(WriteI128Le, i128, put_i128_le); - -writer!(WriteF32Le, f32, put_f32_le); -writer!(WriteF64Le, f64, put_f64_le); diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,46 +0,0 @@ -use crate::io::AsyncWrite; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::io; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A future to write some of the buffer to an `AsyncWrite`. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Write<'a, W: ?Sized> { - writer: &'a mut W, - buf: &'a [u8], - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -/// Tries to write some bytes from the given `buf` to the writer in an -/// asynchronous manner, returning a future. -pub(crate) fn write<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> Write<'a, W> -where - W: AsyncWrite + Unpin + ?Sized, -{ - Write { - writer, - buf, - _pin: PhantomPinned, - } -} - -impl Future for Write<'_, W> -where - W: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - Pin::new(&mut *me.writer).poll_write(cx, me.buf) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write_vectored.rs s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write_vectored.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/io/util/write_vectored.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/io/util/write_vectored.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,47 +0,0 @@ -use crate::io::AsyncWrite; - -use pin_project_lite::pin_project; -use std::io; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{future::Future, io::IoSlice}; - -pin_project! { - /// A future to write a slice of buffers to an `AsyncWrite`. - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct WriteVectored<'a, 'b, W: ?Sized> { - writer: &'a mut W, - bufs: &'a [IoSlice<'b>], - // Make this future `!Unpin` for compatibility with async trait methods. - #[pin] - _pin: PhantomPinned, - } -} - -pub(crate) fn write_vectored<'a, 'b, W>( - writer: &'a mut W, - bufs: &'a [IoSlice<'b>], -) -> WriteVectored<'a, 'b, W> -where - W: AsyncWrite + Unpin + ?Sized, -{ - WriteVectored { - writer, - bufs, - _pin: PhantomPinned, - } -} - -impl Future for WriteVectored<'_, '_, W> -where - W: AsyncWrite + Unpin + ?Sized, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - Pin::new(&mut *me.writer).poll_write_vectored(cx, me.bufs) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/lib.rs s390-tools-2.33.1/rust-vendor/tokio/src/lib.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,695 +0,0 @@ -#![allow( - clippy::cognitive_complexity, - clippy::large_enum_variant, - clippy::module_inception, - clippy::needless_doctest_main, - clippy::declare_interior_mutable_const -)] -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - unreachable_pub -)] -#![deny(unused_must_use)] -#![doc(test( - no_crate_inject, - attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) -))] -#![cfg_attr(docsrs, feature(doc_cfg))] -#![cfg_attr(docsrs, allow(unused_attributes))] -#![cfg_attr(loom, allow(dead_code, unreachable_pub))] - -//! A runtime for writing reliable network applications without compromising speed. -//! -//! Tokio is an event-driven, non-blocking I/O platform for writing asynchronous -//! applications with the Rust programming language. At a high level, it -//! provides a few major components: -//! -//! * Tools for [working with asynchronous tasks][tasks], including -//! [synchronization primitives and channels][sync] and [timeouts, sleeps, and -//! intervals][time]. -//! * APIs for [performing asynchronous I/O][io], including [TCP and UDP][net] sockets, -//! [filesystem][fs] operations, and [process] and [signal] management. -//! * A [runtime] for executing asynchronous code, including a task scheduler, -//! an I/O driver backed by the operating system's event queue (epoll, kqueue, -//! IOCP, etc...), and a high performance timer. -//! -//! Guide level documentation is found on the [website]. -//! -//! [tasks]: #working-with-tasks -//! [sync]: crate::sync -//! [time]: crate::time -//! [io]: #asynchronous-io -//! [net]: crate::net -//! [fs]: crate::fs -//! [process]: crate::process -//! [signal]: crate::signal -//! [fs]: crate::fs -//! [runtime]: crate::runtime -//! [website]: https://tokio.rs/tokio/tutorial -//! -//! # A Tour of Tokio -//! -//! Tokio consists of a number of modules that provide a range of functionality -//! essential for implementing asynchronous applications in Rust. In this -//! section, we will take a brief tour of Tokio, summarizing the major APIs and -//! their uses. -//! -//! The easiest way to get started is to enable all features. Do this by -//! enabling the `full` feature flag: -//! -//! ```toml -//! tokio = { version = "1", features = ["full"] } -//! ``` -//! -//! ### Authoring applications -//! -//! Tokio is great for writing applications and most users in this case shouldn't -//! worry too much about what features they should pick. If you're unsure, we suggest -//! going with `full` to ensure that you don't run into any road blocks while you're -//! building your application. -//! -//! #### Example -//! -//! This example shows the quickest way to get started with Tokio. -//! -//! ```toml -//! tokio = { version = "1", features = ["full"] } -//! ``` -//! -//! ### Authoring libraries -//! -//! As a library author your goal should be to provide the lightest weight crate -//! that is based on Tokio. To achieve this you should ensure that you only enable -//! the features you need. This allows users to pick up your crate without having -//! to enable unnecessary features. -//! -//! #### Example -//! -//! This example shows how you may want to import features for a library that just -//! needs to `tokio::spawn` and use a `TcpStream`. -//! -//! ```toml -//! tokio = { version = "1", features = ["rt", "net"] } -//! ``` -//! -//! ## Working With Tasks -//! -//! Asynchronous programs in Rust are based around lightweight, non-blocking -//! units of execution called [_tasks_][tasks]. The [`tokio::task`] module provides -//! important tools for working with tasks: -//! -//! * The [`spawn`] function and [`JoinHandle`] type, for scheduling a new task -//! on the Tokio runtime and awaiting the output of a spawned task, respectively, -//! * Functions for [running blocking operations][blocking] in an asynchronous -//! task context. -//! -//! The [`tokio::task`] module is present only when the "rt" feature flag -//! is enabled. -//! -//! [tasks]: task/index.html#what-are-tasks -//! [`tokio::task`]: crate::task -//! [`spawn`]: crate::task::spawn() -//! [`JoinHandle`]: crate::task::JoinHandle -//! [blocking]: task/index.html#blocking-and-yielding -//! -//! The [`tokio::sync`] module contains synchronization primitives to use when -//! needing to communicate or share data. These include: -//! -//! * channels ([`oneshot`], [`mpsc`], [`watch`], and [`broadcast`]), for sending values -//! between tasks, -//! * a non-blocking [`Mutex`], for controlling access to a shared, mutable -//! value, -//! * an asynchronous [`Barrier`] type, for multiple tasks to synchronize before -//! beginning a computation. -//! -//! The `tokio::sync` module is present only when the "sync" feature flag is -//! enabled. -//! -//! [`tokio::sync`]: crate::sync -//! [`Mutex`]: crate::sync::Mutex -//! [`Barrier`]: crate::sync::Barrier -//! [`oneshot`]: crate::sync::oneshot -//! [`mpsc`]: crate::sync::mpsc -//! [`watch`]: crate::sync::watch -//! [`broadcast`]: crate::sync::broadcast -//! -//! The [`tokio::time`] module provides utilities for tracking time and -//! scheduling work. This includes functions for setting [timeouts][timeout] for -//! tasks, [sleeping][sleep] work to run in the future, or [repeating an operation at an -//! interval][interval]. -//! -//! In order to use `tokio::time`, the "time" feature flag must be enabled. -//! -//! [`tokio::time`]: crate::time -//! [sleep]: crate::time::sleep() -//! [interval]: crate::time::interval() -//! [timeout]: crate::time::timeout() -//! -//! Finally, Tokio provides a _runtime_ for executing asynchronous tasks. Most -//! applications can use the [`#[tokio::main]`][main] macro to run their code on the -//! Tokio runtime. However, this macro provides only basic configuration options. As -//! an alternative, the [`tokio::runtime`] module provides more powerful APIs for configuring -//! and managing runtimes. You should use that module if the `#[tokio::main]` macro doesn't -//! provide the functionality you need. -//! -//! Using the runtime requires the "rt" or "rt-multi-thread" feature flags, to -//! enable the current-thread [single-threaded scheduler][rt] and the [multi-thread -//! scheduler][rt-multi-thread], respectively. See the [`runtime` module -//! documentation][rt-features] for details. In addition, the "macros" feature -//! flag enables the `#[tokio::main]` and `#[tokio::test]` attributes. -//! -//! [main]: attr.main.html -//! [`tokio::runtime`]: crate::runtime -//! [`Builder`]: crate::runtime::Builder -//! [`Runtime`]: crate::runtime::Runtime -//! [rt]: runtime/index.html#current-thread-scheduler -//! [rt-multi-thread]: runtime/index.html#multi-thread-scheduler -//! [rt-features]: runtime/index.html#runtime-scheduler -//! -//! ## CPU-bound tasks and blocking code -//! -//! Tokio is able to concurrently run many tasks on a few threads by repeatedly -//! swapping the currently running task on each thread. However, this kind of -//! swapping can only happen at `.await` points, so code that spends a long time -//! without reaching an `.await` will prevent other tasks from running. To -//! combat this, Tokio provides two kinds of threads: Core threads and blocking threads. -//! -//! The core threads are where all asynchronous code runs, and Tokio will by default -//! spawn one for each CPU core. You can use the environment variable `TOKIO_WORKER_THREADS` -//! to override the default value. -//! -//! The blocking threads are spawned on demand, can be used to run blocking code -//! that would otherwise block other tasks from running and are kept alive when -//! not used for a certain amount of time which can be configured with [`thread_keep_alive`]. -//! Since it is not possible for Tokio to swap out blocking tasks, like it -//! can do with asynchronous code, the upper limit on the number of blocking -//! threads is very large. These limits can be configured on the [`Builder`]. -//! -//! To spawn a blocking task, you should use the [`spawn_blocking`] function. -//! -//! [`Builder`]: crate::runtime::Builder -//! [`spawn_blocking`]: crate::task::spawn_blocking() -//! [`thread_keep_alive`]: crate::runtime::Builder::thread_keep_alive() -//! -//! ``` -//! #[tokio::main] -//! async fn main() { -//! // This is running on a core thread. -//! -//! let blocking_task = tokio::task::spawn_blocking(|| { -//! // This is running on a blocking thread. -//! // Blocking here is ok. -//! }); -//! -//! // We can wait for the blocking task like this: -//! // If the blocking task panics, the unwrap below will propagate the -//! // panic. -//! blocking_task.await.unwrap(); -//! } -//! ``` -//! -//! If your code is CPU-bound and you wish to limit the number of threads used -//! to run it, you should use a separate thread pool dedicated to CPU bound tasks. -//! For example, you could consider using the [rayon] library for CPU-bound -//! tasks. It is also possible to create an extra Tokio runtime dedicated to -//! CPU-bound tasks, but if you do this, you should be careful that the extra -//! runtime runs _only_ CPU-bound tasks, as IO-bound tasks on that runtime -//! will behave poorly. -//! -//! Hint: If using rayon, you can use a [`oneshot`] channel to send the result back -//! to Tokio when the rayon task finishes. -//! -//! [rayon]: https://docs.rs/rayon -//! [`oneshot`]: crate::sync::oneshot -//! -//! ## Asynchronous IO -//! -//! As well as scheduling and running tasks, Tokio provides everything you need -//! to perform input and output asynchronously. -//! -//! The [`tokio::io`] module provides Tokio's asynchronous core I/O primitives, -//! the [`AsyncRead`], [`AsyncWrite`], and [`AsyncBufRead`] traits. In addition, -//! when the "io-util" feature flag is enabled, it also provides combinators and -//! functions for working with these traits, forming as an asynchronous -//! counterpart to [`std::io`]. -//! -//! Tokio also includes APIs for performing various kinds of I/O and interacting -//! with the operating system asynchronously. These include: -//! -//! * [`tokio::net`], which contains non-blocking versions of [TCP], [UDP], and -//! [Unix Domain Sockets][UDS] (enabled by the "net" feature flag), -//! * [`tokio::fs`], similar to [`std::fs`] but for performing filesystem I/O -//! asynchronously (enabled by the "fs" feature flag), -//! * [`tokio::signal`], for asynchronously handling Unix and Windows OS signals -//! (enabled by the "signal" feature flag), -//! * [`tokio::process`], for spawning and managing child processes (enabled by -//! the "process" feature flag). -//! -//! [`tokio::io`]: crate::io -//! [`AsyncRead`]: crate::io::AsyncRead -//! [`AsyncWrite`]: crate::io::AsyncWrite -//! [`AsyncBufRead`]: crate::io::AsyncBufRead -//! [`std::io`]: std::io -//! [`tokio::net`]: crate::net -//! [TCP]: crate::net::tcp -//! [UDP]: crate::net::UdpSocket -//! [UDS]: crate::net::unix -//! [`tokio::fs`]: crate::fs -//! [`std::fs`]: std::fs -//! [`tokio::signal`]: crate::signal -//! [`tokio::process`]: crate::process -//! -//! # Examples -//! -//! A simple TCP echo server: -//! -//! ```no_run -//! use tokio::net::TcpListener; -//! use tokio::io::{AsyncReadExt, AsyncWriteExt}; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let listener = TcpListener::bind("127.0.0.1:8080").await?; -//! -//! loop { -//! let (mut socket, _) = listener.accept().await?; -//! -//! tokio::spawn(async move { -//! let mut buf = [0; 1024]; -//! -//! // In a loop, read data from the socket and write the data back. -//! loop { -//! let n = match socket.read(&mut buf).await { -//! // socket closed -//! Ok(n) if n == 0 => return, -//! Ok(n) => n, -//! Err(e) => { -//! eprintln!("failed to read from socket; err = {:?}", e); -//! return; -//! } -//! }; -//! -//! // Write the data back -//! if let Err(e) = socket.write_all(&buf[0..n]).await { -//! eprintln!("failed to write to socket; err = {:?}", e); -//! return; -//! } -//! } -//! }); -//! } -//! } -//! ``` -//! -//! ## Feature flags -//! -//! Tokio uses a set of [feature flags] to reduce the amount of compiled code. It -//! is possible to just enable certain features over others. By default, Tokio -//! does not enable any features but allows one to enable a subset for their use -//! case. Below is a list of the available feature flags. You may also notice -//! above each function, struct and trait there is listed one or more feature flags -//! that are required for that item to be used. If you are new to Tokio it is -//! recommended that you use the `full` feature flag which will enable all public APIs. -//! Beware though that this will pull in many extra dependencies that you may not -//! need. -//! -//! - `full`: Enables all features listed below except `test-util` and `tracing`. -//! - `rt`: Enables `tokio::spawn`, the current-thread scheduler, -//! and non-scheduler utilities. -//! - `rt-multi-thread`: Enables the heavier, multi-threaded, work-stealing scheduler. -//! - `io-util`: Enables the IO based `Ext` traits. -//! - `io-std`: Enable `Stdout`, `Stdin` and `Stderr` types. -//! - `net`: Enables `tokio::net` types such as `TcpStream`, `UnixStream` and -//! `UdpSocket`, as well as (on Unix-like systems) `AsyncFd` and (on -//! FreeBSD) `PollAio`. -//! - `time`: Enables `tokio::time` types and allows the schedulers to enable -//! the built in timer. -//! - `process`: Enables `tokio::process` types. -//! - `macros`: Enables `#[tokio::main]` and `#[tokio::test]` macros. -//! - `sync`: Enables all `tokio::sync` types. -//! - `signal`: Enables all `tokio::signal` types. -//! - `fs`: Enables `tokio::fs` types. -//! - `test-util`: Enables testing based infrastructure for the Tokio runtime. -//! - `parking_lot`: As a potential optimization, use the _parking_lot_ crate's -//! synchronization primitives internally. Also, this -//! dependency is necessary to construct some of our primitives -//! in a const context. MSRV may increase according to the -//! _parking_lot_ release in use. -//! -//! _Note: `AsyncRead` and `AsyncWrite` traits do not require any features and are -//! always available._ -//! -//! ### Unstable features -//! -//! Some feature flags are only available when specifying the `tokio_unstable` flag: -//! -//! - `tracing`: Enables tracing events. -//! -//! Likewise, some parts of the API are only available with the same flag: -//! -//! - [`task::Builder`] -//! - Some methods on [`task::JoinSet`] -//! - [`runtime::RuntimeMetrics`] -//! - [`runtime::Builder::unhandled_panic`] -//! - [`task::Id`] -//! -//! This flag enables **unstable** features. The public API of these features -//! may break in 1.x releases. To enable these features, the `--cfg -//! tokio_unstable` argument must be passed to `rustc` when compiling. This -//! serves to explicitly opt-in to features which may break semver conventions, -//! since Cargo [does not yet directly support such opt-ins][unstable features]. -//! -//! You can specify it in your project's `.cargo/config.toml` file: -//! -//! ```toml -//! [build] -//! rustflags = ["--cfg", "tokio_unstable"] -//! ``` -//! -//! Alternatively, you can specify it with an environment variable: -//! -//! ```sh -//! ## Many *nix shells: -//! export RUSTFLAGS="--cfg tokio_unstable" -//! cargo build -//! ``` -//! -//! ```powershell -//! ## Windows PowerShell: -//! $Env:RUSTFLAGS="--cfg tokio_unstable" -//! cargo build -//! ``` -//! -//! [unstable features]: https://internals.rust-lang.org/t/feature-request-unstable-opt-in-non-transitive-crate-features/16193#why-not-a-crate-feature-2 -//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section -//! -//! ## Supported platforms -//! -//! Tokio currently guarantees support for the following platforms: -//! -//! * Linux -//! * Windows -//! * Android (API level 21) -//! * macOS -//! * iOS -//! * FreeBSD -//! -//! Tokio will continue to support these platforms in the future. However, -//! future releases may change requirements such as the minimum required libc -//! version on Linux, the API level on Android, or the supported FreeBSD -//! release. -//! -//! Beyond the above platforms, Tokio is intended to work on all platforms -//! supported by the mio crate. You can find a longer list [in mio's -//! documentation][mio-supported]. However, these additional platforms may -//! become unsupported in the future. -//! -//! Note that Wine is considered to be a different platform from Windows. See -//! mio's documentation for more information on Wine support. -//! -//! [mio-supported]: https://crates.io/crates/mio#platforms -//! -//! ### WASM support -//! -//! Tokio has some limited support for the WASM platform. Without the -//! `tokio_unstable` flag, the following features are supported: -//! -//! * `sync` -//! * `macros` -//! * `io-util` -//! * `rt` -//! * `time` -//! -//! Enabling any other feature (including `full`) will cause a compilation -//! failure. -//! -//! The `time` module will only work on WASM platforms that have support for -//! timers (e.g. wasm32-wasi). The timing functions will panic if used on a WASM -//! platform that does not support timers. -//! -//! Note also that if the runtime becomes indefinitely idle, it will panic -//! immediately instead of blocking forever. On platforms that don't support -//! time, this means that the runtime can never be idle in any way. -//! -//! ### Unstable WASM support -//! -//! Tokio also has unstable support for some additional WASM features. This -//! requires the use of the `tokio_unstable` flag. -//! -//! Using this flag enables the use of `tokio::net` on the wasm32-wasi target. -//! However, not all methods are available on the networking types as WASI -//! currently does not support the creation of new sockets from within WASM. -//! Because of this, sockets must currently be created via the `FromRawFd` -//! trait. - -// Test that pointer width is compatible. This asserts that e.g. usize is at -// least 32 bits, which a lot of components in Tokio currently assumes. -// -// TODO: improve once we have MSRV access to const eval to make more flexible. -#[cfg(not(any( - target_pointer_width = "32", - target_pointer_width = "64", - target_pointer_width = "128" -)))] -compile_error! { - "Tokio requires the platform pointer width to be 32, 64, or 128 bits" -} - -#[cfg(all( - not(tokio_unstable), - target_family = "wasm", - any( - feature = "fs", - feature = "io-std", - feature = "net", - feature = "process", - feature = "rt-multi-thread", - feature = "signal" - ) -))] -compile_error!("Only features sync,macros,io-util,rt,time are supported on wasm."); - -#[cfg(all(not(tokio_unstable), tokio_taskdump))] -compile_error!("The `tokio_taskdump` feature requires `--cfg tokio_unstable`."); - -#[cfg(all( - tokio_taskdump, - not(all( - target_os = "linux", - any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") - )) -))] -compile_error!( - "The `tokio_taskdump` feature is only currently supported on \ -linux, on `aarch64`, `x86` and `x86_64`." -); - -// Includes re-exports used by macros. -// -// This module is not intended to be part of the public API. In general, any -// `doc(hidden)` code is not part of Tokio's public and stable API. -#[macro_use] -#[doc(hidden)] -pub mod macros; - -cfg_fs! { - pub mod fs; -} - -mod future; - -pub mod io; -pub mod net; - -mod loom; - -cfg_process! { - pub mod process; -} - -#[cfg(any( - feature = "fs", - feature = "io-std", - feature = "net", - all(windows, feature = "process"), -))] -mod blocking; - -cfg_rt! { - pub mod runtime; -} -cfg_not_rt! { - pub(crate) mod runtime; -} - -cfg_signal! { - pub mod signal; -} - -cfg_signal_internal! { - #[cfg(not(feature = "signal"))] - #[allow(dead_code)] - #[allow(unreachable_pub)] - pub(crate) mod signal; -} - -cfg_sync! { - pub mod sync; -} -cfg_not_sync! { - mod sync; -} - -pub mod task; -cfg_rt! { - pub use task::spawn; -} - -cfg_time! { - pub mod time; -} - -mod trace { - use std::future::Future; - use std::pin::Pin; - use std::task::{Context, Poll}; - - cfg_taskdump! { - pub(crate) use crate::runtime::task::trace::trace_leaf; - } - - cfg_not_taskdump! { - #[inline(always)] - #[allow(dead_code)] - pub(crate) fn trace_leaf(_: &mut std::task::Context<'_>) -> std::task::Poll<()> { - std::task::Poll::Ready(()) - } - } - - #[cfg_attr(not(feature = "sync"), allow(dead_code))] - pub(crate) fn async_trace_leaf() -> impl Future { - struct Trace; - - impl Future for Trace { - type Output = (); - - #[inline(always)] - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - trace_leaf(cx) - } - } - - Trace - } -} - -mod util; - -/// Due to the `Stream` trait's inclusion in `std` landing later than Tokio's 1.0 -/// release, most of the Tokio stream utilities have been moved into the [`tokio-stream`] -/// crate. -/// -/// # Why was `Stream` not included in Tokio 1.0? -/// -/// Originally, we had planned to ship Tokio 1.0 with a stable `Stream` type -/// but unfortunately the [RFC] had not been merged in time for `Stream` to -/// reach `std` on a stable compiler in time for the 1.0 release of Tokio. For -/// this reason, the team has decided to move all `Stream` based utilities to -/// the [`tokio-stream`] crate. While this is not ideal, once `Stream` has made -/// it into the standard library and the MSRV period has passed, we will implement -/// stream for our different types. -/// -/// While this may seem unfortunate, not all is lost as you can get much of the -/// `Stream` support with `async/await` and `while let` loops. It is also possible -/// to create a `impl Stream` from `async fn` using the [`async-stream`] crate. -/// -/// [`tokio-stream`]: https://docs.rs/tokio-stream -/// [`async-stream`]: https://docs.rs/async-stream -/// [RFC]: https://github.com/rust-lang/rfcs/pull/2996 -/// -/// # Example -/// -/// Convert a [`sync::mpsc::Receiver`] to an `impl Stream`. -/// -/// ```rust,no_run -/// use tokio::sync::mpsc; -/// -/// let (tx, mut rx) = mpsc::channel::(16); -/// -/// let stream = async_stream::stream! { -/// while let Some(item) = rx.recv().await { -/// yield item; -/// } -/// }; -/// ``` -pub mod stream {} - -// local re-exports of platform specific things, allowing for decent -// documentation to be shimmed in on docs.rs - -#[cfg(docsrs)] -pub mod doc; - -#[cfg(docsrs)] -#[allow(unused)] -pub(crate) use self::doc::os; - -#[cfg(not(docsrs))] -#[allow(unused)] -pub(crate) use std::os; - -cfg_macros! { - /// Implementation detail of the `select!` macro. This macro is **not** - /// intended to be used as part of the public API and is permitted to - /// change. - #[doc(hidden)] - pub use tokio_macros::select_priv_declare_output_enum; - - /// Implementation detail of the `select!` macro. This macro is **not** - /// intended to be used as part of the public API and is permitted to - /// change. - #[doc(hidden)] - pub use tokio_macros::select_priv_clean_pattern; - - cfg_rt! { - #[cfg(feature = "rt-multi-thread")] - #[cfg(not(test))] // Work around for rust-lang/rust#62127 - #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] - #[doc(inline)] - pub use tokio_macros::main; - - #[cfg(feature = "rt-multi-thread")] - #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] - #[doc(inline)] - pub use tokio_macros::test; - - cfg_not_rt_multi_thread! { - #[cfg(not(test))] // Work around for rust-lang/rust#62127 - #[doc(inline)] - pub use tokio_macros::main_rt as main; - - #[doc(inline)] - pub use tokio_macros::test_rt as test; - } - } - - // Always fail if rt is not enabled. - cfg_not_rt! { - #[cfg(not(test))] - #[doc(inline)] - pub use tokio_macros::main_fail as main; - - #[doc(inline)] - pub use tokio_macros::test_fail as test; - } -} - -// TODO: rm -#[cfg(feature = "io-util")] -#[cfg(test)] -fn is_unpin() {} - -/// fuzz test (fuzz_linked_list) -#[cfg(fuzzing)] -pub mod fuzz; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/mocked.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/mocked.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/mocked.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/mocked.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,53 +0,0 @@ -pub(crate) use loom::*; - -pub(crate) mod sync { - - pub(crate) use loom::sync::MutexGuard; - - #[derive(Debug)] - pub(crate) struct Mutex(loom::sync::Mutex); - - #[allow(dead_code)] - impl Mutex { - #[inline] - pub(crate) fn new(t: T) -> Mutex { - Mutex(loom::sync::Mutex::new(t)) - } - - #[inline] - #[track_caller] - pub(crate) fn lock(&self) -> MutexGuard<'_, T> { - self.0.lock().unwrap() - } - - #[inline] - pub(crate) fn try_lock(&self) -> Option> { - self.0.try_lock().ok() - } - } - pub(crate) use loom::sync::*; - - pub(crate) mod atomic { - pub(crate) use loom::sync::atomic::*; - - // TODO: implement a loom version - pub(crate) type StaticAtomicU64 = std::sync::atomic::AtomicU64; - } -} - -pub(crate) mod rand { - pub(crate) fn seed() -> u64 { - 1 - } -} - -pub(crate) mod sys { - pub(crate) fn num_cpus() -> usize { - 2 - } -} - -pub(crate) mod thread { - pub use loom::lazy_static::AccessError; - pub use loom::thread::*; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,14 +0,0 @@ -//! This module abstracts over `loom` and `std::sync` depending on whether we -//! are running tests or not. - -#![allow(unused)] - -#[cfg(not(all(test, loom)))] -mod std; -#[cfg(not(all(test, loom)))] -pub(crate) use self::std::*; - -#[cfg(all(test, loom))] -mod mocked; -#[cfg(all(test, loom))] -pub(crate) use self::mocked::*; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u16.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u16.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u16.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u16.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,44 +0,0 @@ -use std::cell::UnsafeCell; -use std::fmt; -use std::ops::Deref; - -/// `AtomicU16` providing an additional `unsync_load` function. -pub(crate) struct AtomicU16 { - inner: UnsafeCell, -} - -unsafe impl Send for AtomicU16 {} -unsafe impl Sync for AtomicU16 {} - -impl AtomicU16 { - pub(crate) const fn new(val: u16) -> AtomicU16 { - let inner = UnsafeCell::new(std::sync::atomic::AtomicU16::new(val)); - AtomicU16 { inner } - } - - /// Performs an unsynchronized load. - /// - /// # Safety - /// - /// All mutations must have happened before the unsynchronized load. - /// Additionally, there must be no concurrent mutations. - pub(crate) unsafe fn unsync_load(&self) -> u16 { - core::ptr::read(self.inner.get() as *const u16) - } -} - -impl Deref for AtomicU16 { - type Target = std::sync::atomic::AtomicU16; - - fn deref(&self) -> &Self::Target { - // safety: it is always safe to access `&self` fns on the inner value as - // we never perform unsafe mutations. - unsafe { &*self.inner.get() } - } -} - -impl fmt::Debug for AtomicU16 { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.deref().fmt(fmt) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u32.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u32.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u32.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u32.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,44 +0,0 @@ -use std::cell::UnsafeCell; -use std::fmt; -use std::ops::Deref; - -/// `AtomicU32` providing an additional `unsync_load` function. -pub(crate) struct AtomicU32 { - inner: UnsafeCell, -} - -unsafe impl Send for AtomicU32 {} -unsafe impl Sync for AtomicU32 {} - -impl AtomicU32 { - pub(crate) const fn new(val: u32) -> AtomicU32 { - let inner = UnsafeCell::new(std::sync::atomic::AtomicU32::new(val)); - AtomicU32 { inner } - } - - /// Performs an unsynchronized load. - /// - /// # Safety - /// - /// All mutations must have happened before the unsynchronized load. - /// Additionally, there must be no concurrent mutations. - pub(crate) unsafe fn unsync_load(&self) -> u32 { - core::ptr::read(self.inner.get() as *const u32) - } -} - -impl Deref for AtomicU32 { - type Target = std::sync::atomic::AtomicU32; - - fn deref(&self) -> &Self::Target { - // safety: it is always safe to access `&self` fns on the inner value as - // we never perform unsafe mutations. - unsafe { &*self.inner.get() } - } -} - -impl fmt::Debug for AtomicU32 { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.deref().fmt(fmt) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u64_as_mutex.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u64_as_mutex.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u64_as_mutex.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u64_as_mutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -use crate::loom::sync::Mutex; -use std::sync::atomic::Ordering; - -cfg_has_const_mutex_new! { - #[path = "atomic_u64_static_const_new.rs"] - mod static_macro; -} - -cfg_not_has_const_mutex_new! { - #[path = "atomic_u64_static_once_cell.rs"] - mod static_macro; -} - -pub(crate) use static_macro::StaticAtomicU64; - -#[derive(Debug)] -pub(crate) struct AtomicU64 { - inner: Mutex, -} - -impl AtomicU64 { - pub(crate) fn load(&self, _: Ordering) -> u64 { - *self.inner.lock() - } - - pub(crate) fn store(&self, val: u64, _: Ordering) { - *self.inner.lock() = val; - } - - pub(crate) fn fetch_add(&self, val: u64, _: Ordering) -> u64 { - let mut lock = self.inner.lock(); - let prev = *lock; - *lock = prev + val; - prev - } - - pub(crate) fn fetch_or(&self, val: u64, _: Ordering) -> u64 { - let mut lock = self.inner.lock(); - let prev = *lock; - *lock = prev | val; - prev - } - - pub(crate) fn compare_exchange( - &self, - current: u64, - new: u64, - _success: Ordering, - _failure: Ordering, - ) -> Result { - let mut lock = self.inner.lock(); - - if *lock == current { - *lock = new; - Ok(current) - } else { - Err(*lock) - } - } - - pub(crate) fn compare_exchange_weak( - &self, - current: u64, - new: u64, - success: Ordering, - failure: Ordering, - ) -> Result { - self.compare_exchange(current, new, success, failure) - } -} - -impl Default for AtomicU64 { - fn default() -> AtomicU64 { - AtomicU64::new(u64::default()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u64_native.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u64_native.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u64_native.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u64_native.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,4 +0,0 @@ -pub(crate) use std::sync::atomic::{AtomicU64, Ordering}; - -/// Alias `AtomicU64` to `StaticAtomicU64` -pub(crate) type StaticAtomicU64 = AtomicU64; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u64.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u64.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u64.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u64.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,19 +0,0 @@ -//! Implementation of an atomic u64 cell. On 64 bit platforms, this is a -//! re-export of `AtomicU64`. On 32 bit platforms, this is implemented using a -//! `Mutex`. - -// `AtomicU64` can only be used on targets with `target_has_atomic` is 64 or greater. -// Once `cfg_target_has_atomic` feature is stable, we can replace it with -// `#[cfg(target_has_atomic = "64")]`. -// Refs: https://github.com/rust-lang/rust/tree/master/src/librustc_target -cfg_has_atomic_u64! { - #[path = "atomic_u64_native.rs"] - mod imp; -} - -cfg_not_has_atomic_u64! { - #[path = "atomic_u64_as_mutex.rs"] - mod imp; -} - -pub(crate) use imp::{AtomicU64, StaticAtomicU64}; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u64_static_const_new.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u64_static_const_new.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u64_static_const_new.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u64_static_const_new.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -use super::AtomicU64; -use crate::loom::sync::Mutex; - -pub(crate) type StaticAtomicU64 = AtomicU64; - -impl AtomicU64 { - pub(crate) const fn new(val: u64) -> Self { - Self { - inner: Mutex::const_new(val), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u64_static_once_cell.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u64_static_once_cell.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_u64_static_once_cell.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_u64_static_once_cell.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,57 +0,0 @@ -use super::AtomicU64; -use crate::loom::sync::{atomic::Ordering, Mutex}; -use crate::util::once_cell::OnceCell; - -pub(crate) struct StaticAtomicU64 { - init: u64, - cell: OnceCell>, -} - -impl AtomicU64 { - pub(crate) fn new(val: u64) -> Self { - Self { - inner: Mutex::new(val), - } - } -} - -impl StaticAtomicU64 { - pub(crate) const fn new(val: u64) -> StaticAtomicU64 { - StaticAtomicU64 { - init: val, - cell: OnceCell::new(), - } - } - - pub(crate) fn load(&self, order: Ordering) -> u64 { - *self.inner().lock() - } - - pub(crate) fn fetch_add(&self, val: u64, order: Ordering) -> u64 { - let mut lock = self.inner().lock(); - let prev = *lock; - *lock = prev + val; - prev - } - - pub(crate) fn compare_exchange_weak( - &self, - current: u64, - new: u64, - _success: Ordering, - _failure: Ordering, - ) -> Result { - let mut lock = self.inner().lock(); - - if *lock == current { - *lock = new; - Ok(current) - } else { - Err(*lock) - } - } - - fn inner(&self) -> &Mutex { - self.cell.get(|| Mutex::new(self.init)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_usize.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_usize.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/atomic_usize.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/atomic_usize.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -use std::cell::UnsafeCell; -use std::fmt; -use std::ops; - -/// `AtomicUsize` providing an additional `unsync_load` function. -pub(crate) struct AtomicUsize { - inner: UnsafeCell, -} - -unsafe impl Send for AtomicUsize {} -unsafe impl Sync for AtomicUsize {} - -impl AtomicUsize { - pub(crate) const fn new(val: usize) -> AtomicUsize { - let inner = UnsafeCell::new(std::sync::atomic::AtomicUsize::new(val)); - AtomicUsize { inner } - } - - /// Performs an unsynchronized load. - /// - /// # Safety - /// - /// All mutations must have happened before the unsynchronized load. - /// Additionally, there must be no concurrent mutations. - pub(crate) unsafe fn unsync_load(&self) -> usize { - core::ptr::read(self.inner.get() as *const usize) - } - - pub(crate) fn with_mut(&mut self, f: impl FnOnce(&mut usize) -> R) -> R { - // safety: we have mutable access - f(unsafe { (*self.inner.get()).get_mut() }) - } -} - -impl ops::Deref for AtomicUsize { - type Target = std::sync::atomic::AtomicUsize; - - fn deref(&self) -> &Self::Target { - // safety: it is always safe to access `&self` fns on the inner value as - // we never perform unsafe mutations. - unsafe { &*self.inner.get() } - } -} - -impl ops::DerefMut for AtomicUsize { - fn deref_mut(&mut self) -> &mut Self::Target { - // safety: we hold `&mut self` - unsafe { &mut *self.inner.get() } - } -} - -impl fmt::Debug for AtomicUsize { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(fmt) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/barrier.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/barrier.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/barrier.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/barrier.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,217 +0,0 @@ -//! A `Barrier` that provides `wait_timeout`. -//! -//! This implementation mirrors that of the Rust standard library. - -use crate::loom::sync::{Condvar, Mutex}; -use std::fmt; -use std::time::{Duration, Instant}; - -/// A barrier enables multiple threads to synchronize the beginning -/// of some computation. -/// -/// # Examples -/// -/// ``` -/// use std::sync::{Arc, Barrier}; -/// use std::thread; -/// -/// let mut handles = Vec::with_capacity(10); -/// let barrier = Arc::new(Barrier::new(10)); -/// for _ in 0..10 { -/// let c = Arc::clone(&barrier); -/// // The same messages will be printed together. -/// // You will NOT see any interleaving. -/// handles.push(thread::spawn(move|| { -/// println!("before wait"); -/// c.wait(); -/// println!("after wait"); -/// })); -/// } -/// // Wait for other threads to finish. -/// for handle in handles { -/// handle.join().unwrap(); -/// } -/// ``` -pub(crate) struct Barrier { - lock: Mutex, - cvar: Condvar, - num_threads: usize, -} - -// The inner state of a double barrier -struct BarrierState { - count: usize, - generation_id: usize, -} - -/// A `BarrierWaitResult` is returned by [`Barrier::wait()`] when all threads -/// in the [`Barrier`] have rendezvoused. -/// -/// # Examples -/// -/// ``` -/// use std::sync::Barrier; -/// -/// let barrier = Barrier::new(1); -/// let barrier_wait_result = barrier.wait(); -/// ``` -pub(crate) struct BarrierWaitResult(bool); - -impl fmt::Debug for Barrier { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Barrier").finish_non_exhaustive() - } -} - -impl Barrier { - /// Creates a new barrier that can block a given number of threads. - /// - /// A barrier will block `n`-1 threads which call [`wait()`] and then wake - /// up all threads at once when the `n`th thread calls [`wait()`]. - /// - /// [`wait()`]: Barrier::wait - /// - /// # Examples - /// - /// ``` - /// use std::sync::Barrier; - /// - /// let barrier = Barrier::new(10); - /// ``` - #[must_use] - pub(crate) fn new(n: usize) -> Barrier { - Barrier { - lock: Mutex::new(BarrierState { - count: 0, - generation_id: 0, - }), - cvar: Condvar::new(), - num_threads: n, - } - } - - /// Blocks the current thread until all threads have rendezvoused here. - /// - /// Barriers are re-usable after all threads have rendezvoused once, and can - /// be used continuously. - /// - /// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that - /// returns `true` from [`BarrierWaitResult::is_leader()`] when returning - /// from this function, and all other threads will receive a result that - /// will return `false` from [`BarrierWaitResult::is_leader()`]. - /// - /// # Examples - /// - /// ``` - /// use std::sync::{Arc, Barrier}; - /// use std::thread; - /// - /// let mut handles = Vec::with_capacity(10); - /// let barrier = Arc::new(Barrier::new(10)); - /// for _ in 0..10 { - /// let c = Arc::clone(&barrier); - /// // The same messages will be printed together. - /// // You will NOT see any interleaving. - /// handles.push(thread::spawn(move|| { - /// println!("before wait"); - /// c.wait(); - /// println!("after wait"); - /// })); - /// } - /// // Wait for other threads to finish. - /// for handle in handles { - /// handle.join().unwrap(); - /// } - /// ``` - pub(crate) fn wait(&self) -> BarrierWaitResult { - let mut lock = self.lock.lock(); - let local_gen = lock.generation_id; - lock.count += 1; - if lock.count < self.num_threads { - // We need a while loop to guard against spurious wakeups. - // https://en.wikipedia.org/wiki/Spurious_wakeup - while local_gen == lock.generation_id { - lock = self.cvar.wait(lock).unwrap(); - } - BarrierWaitResult(false) - } else { - lock.count = 0; - lock.generation_id = lock.generation_id.wrapping_add(1); - self.cvar.notify_all(); - BarrierWaitResult(true) - } - } - - /// Blocks the current thread until all threads have rendezvoused here for - /// at most `timeout` duration. - pub(crate) fn wait_timeout(&self, timeout: Duration) -> Option { - // This implementation mirrors `wait`, but with each blocking operation - // replaced by a timeout-amenable alternative. - - let deadline = Instant::now() + timeout; - - // Acquire `self.lock` with at most `timeout` duration. - let mut lock = loop { - if let Some(guard) = self.lock.try_lock() { - break guard; - } else if Instant::now() > deadline { - return None; - } else { - std::thread::yield_now(); - } - }; - - // Shrink the `timeout` to account for the time taken to acquire `lock`. - let timeout = deadline.saturating_duration_since(Instant::now()); - - let local_gen = lock.generation_id; - lock.count += 1; - if lock.count < self.num_threads { - // We need a while loop to guard against spurious wakeups. - // https://en.wikipedia.org/wiki/Spurious_wakeup - while local_gen == lock.generation_id { - let (guard, timeout_result) = self.cvar.wait_timeout(lock, timeout).unwrap(); - lock = guard; - if timeout_result.timed_out() { - return None; - } - } - Some(BarrierWaitResult(false)) - } else { - lock.count = 0; - lock.generation_id = lock.generation_id.wrapping_add(1); - self.cvar.notify_all(); - Some(BarrierWaitResult(true)) - } - } -} - -impl fmt::Debug for BarrierWaitResult { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BarrierWaitResult") - .field("is_leader", &self.is_leader()) - .finish() - } -} - -impl BarrierWaitResult { - /// Returns `true` if this thread is the "leader thread" for the call to - /// [`Barrier::wait()`]. - /// - /// Only one thread will have `true` returned from their result, all other - /// threads will have `false` returned. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Barrier; - /// - /// let barrier = Barrier::new(1); - /// let barrier_wait_result = barrier.wait(); - /// println!("{:?}", barrier_wait_result.is_leader()); - /// ``` - #[must_use] - pub(crate) fn is_leader(&self) -> bool { - self.0 - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,127 +0,0 @@ -#![cfg_attr(any(not(feature = "full"), loom), allow(unused_imports, dead_code))] - -mod atomic_u16; -mod atomic_u32; -mod atomic_u64; -mod atomic_usize; -mod barrier; -mod mutex; -#[cfg(all(feature = "parking_lot", not(miri)))] -mod parking_lot; -mod unsafe_cell; - -pub(crate) mod cell { - pub(crate) use super::unsafe_cell::UnsafeCell; -} - -#[cfg(any( - feature = "net", - feature = "process", - feature = "signal", - feature = "sync", -))] -pub(crate) mod future { - pub(crate) use crate::sync::AtomicWaker; -} - -pub(crate) mod hint { - pub(crate) use std::hint::spin_loop; -} - -pub(crate) mod rand { - use std::collections::hash_map::RandomState; - use std::hash::{BuildHasher, Hash, Hasher}; - use std::sync::atomic::AtomicU32; - use std::sync::atomic::Ordering::Relaxed; - - static COUNTER: AtomicU32 = AtomicU32::new(1); - - pub(crate) fn seed() -> u64 { - let rand_state = RandomState::new(); - - let mut hasher = rand_state.build_hasher(); - - // Hash some unique-ish data to generate some new state - COUNTER.fetch_add(1, Relaxed).hash(&mut hasher); - - // Get the seed - hasher.finish() - } -} - -pub(crate) mod sync { - pub(crate) use std::sync::{Arc, Weak}; - - // Below, make sure all the feature-influenced types are exported for - // internal use. Note however that some are not _currently_ named by - // consuming code. - - #[cfg(all(feature = "parking_lot", not(miri)))] - #[allow(unused_imports)] - pub(crate) use crate::loom::std::parking_lot::{ - Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, WaitTimeoutResult, - }; - - #[cfg(not(all(feature = "parking_lot", not(miri))))] - #[allow(unused_imports)] - pub(crate) use std::sync::{Condvar, MutexGuard, RwLock, RwLockReadGuard, WaitTimeoutResult}; - - #[cfg(not(all(feature = "parking_lot", not(miri))))] - pub(crate) use crate::loom::std::mutex::Mutex; - - pub(crate) mod atomic { - pub(crate) use crate::loom::std::atomic_u16::AtomicU16; - pub(crate) use crate::loom::std::atomic_u32::AtomicU32; - pub(crate) use crate::loom::std::atomic_u64::{AtomicU64, StaticAtomicU64}; - pub(crate) use crate::loom::std::atomic_usize::AtomicUsize; - - pub(crate) use std::sync::atomic::{fence, AtomicBool, AtomicPtr, AtomicU8, Ordering}; - } - - pub(crate) use super::barrier::Barrier; -} - -pub(crate) mod sys { - #[cfg(feature = "rt-multi-thread")] - pub(crate) fn num_cpus() -> usize { - const ENV_WORKER_THREADS: &str = "TOKIO_WORKER_THREADS"; - - match std::env::var(ENV_WORKER_THREADS) { - Ok(s) => { - let n = s.parse().unwrap_or_else(|e| { - panic!( - "\"{}\" must be usize, error: {}, value: {}", - ENV_WORKER_THREADS, e, s - ) - }); - assert!(n > 0, "\"{}\" cannot be set to 0", ENV_WORKER_THREADS); - n - } - Err(std::env::VarError::NotPresent) => usize::max(1, num_cpus::get()), - Err(std::env::VarError::NotUnicode(e)) => { - panic!( - "\"{}\" must be valid unicode, error: {:?}", - ENV_WORKER_THREADS, e - ) - } - } - } - - #[cfg(not(feature = "rt-multi-thread"))] - pub(crate) fn num_cpus() -> usize { - 1 - } -} - -pub(crate) mod thread { - #[inline] - pub(crate) fn yield_now() { - std::hint::spin_loop(); - } - - #[allow(unused_imports)] - pub(crate) use std::thread::{ - current, panicking, park, park_timeout, sleep, spawn, AccessError, Builder, JoinHandle, - LocalKey, Result, Thread, ThreadId, - }; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/mutex.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/mutex.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/mutex.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/mutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,36 +0,0 @@ -use std::sync::{self, MutexGuard, TryLockError}; - -/// Adapter for `std::Mutex` that removes the poisoning aspects -/// from its api. -#[derive(Debug)] -pub(crate) struct Mutex(sync::Mutex); - -#[allow(dead_code)] -impl Mutex { - #[inline] - pub(crate) fn new(t: T) -> Mutex { - Mutex(sync::Mutex::new(t)) - } - - #[inline] - pub(crate) const fn const_new(t: T) -> Mutex { - Mutex(sync::Mutex::new(t)) - } - - #[inline] - pub(crate) fn lock(&self) -> MutexGuard<'_, T> { - match self.0.lock() { - Ok(guard) => guard, - Err(p_err) => p_err.into_inner(), - } - } - - #[inline] - pub(crate) fn try_lock(&self) -> Option> { - match self.0.try_lock() { - Ok(guard) => Some(guard), - Err(TryLockError::Poisoned(p_err)) => Some(p_err.into_inner()), - Err(TryLockError::WouldBlock) => None, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/parking_lot.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/parking_lot.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/parking_lot.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/parking_lot.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,183 +0,0 @@ -//! A minimal adaption of the `parking_lot` synchronization primitives to the -//! equivalent `std::sync` types. -//! -//! This can be extended to additional types/methods as required. - -use std::fmt; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut}; -use std::sync::LockResult; -use std::time::Duration; - -// All types in this file are marked with PhantomData to ensure that -// parking_lot's send_guard feature does not leak through and affect when Tokio -// types are Send. -// -// See for more info. - -// Types that do not need wrapping -pub(crate) use parking_lot::WaitTimeoutResult; - -#[derive(Debug)] -pub(crate) struct Mutex(PhantomData>, parking_lot::Mutex); - -#[derive(Debug)] -pub(crate) struct RwLock(PhantomData>, parking_lot::RwLock); - -#[derive(Debug)] -pub(crate) struct Condvar(PhantomData, parking_lot::Condvar); - -#[derive(Debug)] -pub(crate) struct MutexGuard<'a, T: ?Sized>( - PhantomData>, - parking_lot::MutexGuard<'a, T>, -); - -#[derive(Debug)] -pub(crate) struct RwLockReadGuard<'a, T: ?Sized>( - PhantomData>, - parking_lot::RwLockReadGuard<'a, T>, -); - -#[derive(Debug)] -pub(crate) struct RwLockWriteGuard<'a, T: ?Sized>( - PhantomData>, - parking_lot::RwLockWriteGuard<'a, T>, -); - -impl Mutex { - #[inline] - pub(crate) fn new(t: T) -> Mutex { - Mutex(PhantomData, parking_lot::Mutex::new(t)) - } - - #[inline] - #[cfg(not(all(loom, test)))] - pub(crate) const fn const_new(t: T) -> Mutex { - Mutex(PhantomData, parking_lot::const_mutex(t)) - } - - #[inline] - pub(crate) fn lock(&self) -> MutexGuard<'_, T> { - MutexGuard(PhantomData, self.1.lock()) - } - - #[inline] - pub(crate) fn try_lock(&self) -> Option> { - self.1 - .try_lock() - .map(|guard| MutexGuard(PhantomData, guard)) - } - - #[inline] - pub(crate) fn get_mut(&mut self) -> &mut T { - self.1.get_mut() - } - - // Note: Additional methods `is_poisoned` and `into_inner`, can be - // provided here as needed. -} - -impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { - type Target = T; - fn deref(&self) -> &T { - self.1.deref() - } -} - -impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { - fn deref_mut(&mut self) -> &mut T { - self.1.deref_mut() - } -} - -impl RwLock { - pub(crate) fn new(t: T) -> RwLock { - RwLock(PhantomData, parking_lot::RwLock::new(t)) - } - - pub(crate) fn read(&self) -> LockResult> { - Ok(RwLockReadGuard(PhantomData, self.1.read())) - } - - pub(crate) fn write(&self) -> LockResult> { - Ok(RwLockWriteGuard(PhantomData, self.1.write())) - } -} - -impl<'a, T: ?Sized> Deref for RwLockReadGuard<'a, T> { - type Target = T; - fn deref(&self) -> &T { - self.1.deref() - } -} - -impl<'a, T: ?Sized> Deref for RwLockWriteGuard<'a, T> { - type Target = T; - fn deref(&self) -> &T { - self.1.deref() - } -} - -impl<'a, T: ?Sized> DerefMut for RwLockWriteGuard<'a, T> { - fn deref_mut(&mut self) -> &mut T { - self.1.deref_mut() - } -} - -impl Condvar { - #[inline] - pub(crate) fn new() -> Condvar { - Condvar(PhantomData, parking_lot::Condvar::new()) - } - - #[inline] - pub(crate) fn notify_one(&self) { - self.1.notify_one(); - } - - #[inline] - pub(crate) fn notify_all(&self) { - self.1.notify_all(); - } - - #[inline] - pub(crate) fn wait<'a, T>( - &self, - mut guard: MutexGuard<'a, T>, - ) -> LockResult> { - self.1.wait(&mut guard.1); - Ok(guard) - } - - #[inline] - pub(crate) fn wait_timeout<'a, T>( - &self, - mut guard: MutexGuard<'a, T>, - timeout: Duration, - ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> { - let wtr = self.1.wait_for(&mut guard.1, timeout); - Ok((guard, wtr)) - } - - // Note: Additional methods `wait_timeout_ms`, `wait_timeout_until`, - // `wait_until` can be provided here as needed. -} - -impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&self.1, f) - } -} - -impl<'a, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&self.1, f) - } -} - -impl<'a, T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&self.1, f) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/unsafe_cell.rs s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/unsafe_cell.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/loom/std/unsafe_cell.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/loom/std/unsafe_cell.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,18 +0,0 @@ -#[derive(Debug)] -pub(crate) struct UnsafeCell(std::cell::UnsafeCell); - -impl UnsafeCell { - pub(crate) const fn new(data: T) -> UnsafeCell { - UnsafeCell(std::cell::UnsafeCell::new(data)) - } - - #[inline(always)] - pub(crate) fn with(&self, f: impl FnOnce(*const T) -> R) -> R { - f(self.0.get()) - } - - #[inline(always)] - pub(crate) fn with_mut(&self, f: impl FnOnce(*mut T) -> R) -> R { - f(self.0.get()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/addr_of.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/addr_of.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/addr_of.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/addr_of.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ -//! This module defines a macro that lets you go from a raw pointer to a struct -//! to a raw pointer to a field of the struct. - -macro_rules! generate_addr_of_methods { - ( - impl<$($gen:ident)*> $struct_name:ty {$( - $(#[$attrs:meta])* - $vis:vis unsafe fn $fn_name:ident(self: NonNull) -> NonNull<$field_type:ty> { - &self$(.$field_name:tt)+ - } - )*} - ) => { - impl<$($gen)*> $struct_name {$( - $(#[$attrs])* - $vis unsafe fn $fn_name(me: ::core::ptr::NonNull) -> ::core::ptr::NonNull<$field_type> { - let me = me.as_ptr(); - let field = ::std::ptr::addr_of_mut!((*me) $(.$field_name)+ ); - ::core::ptr::NonNull::new_unchecked(field) - } - )*} - }; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/cfg.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/cfg.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/cfg.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/cfg.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,575 +0,0 @@ -#![allow(unused_macros)] - -macro_rules! feature { - ( - #![$meta:meta] - $($item:item)* - ) => { - $( - #[cfg($meta)] - #[cfg_attr(docsrs, doc(cfg($meta)))] - $item - )* - } -} - -/// Enables Windows-specific code. -/// Use this macro instead of `cfg(windows)` to generate docs properly. -macro_rules! cfg_windows { - ($($item:item)*) => { - $( - #[cfg(any(all(doc, docsrs), windows))] - #[cfg_attr(docsrs, doc(cfg(windows)))] - $item - )* - } -} - -/// Enables unstable Windows-specific code. -/// Use this macro instead of `cfg(windows)` to generate docs properly. -macro_rules! cfg_unstable_windows { - ($($item:item)*) => { - $( - #[cfg(all(any(all(doc, docsrs), windows), tokio_unstable))] - #[cfg_attr(docsrs, doc(cfg(all(windows, tokio_unstable))))] - $item - )* - } -} - -/// Enables enter::block_on. -macro_rules! cfg_block_on { - ($($item:item)*) => { - $( - #[cfg(any( - feature = "fs", - feature = "net", - feature = "io-std", - feature = "rt", - ))] - $item - )* - } -} - -/// Enables internal `AtomicWaker` impl. -macro_rules! cfg_atomic_waker_impl { - ($($item:item)*) => { - $( - #[cfg(any( - feature = "net", - feature = "process", - feature = "rt", - feature = "signal", - feature = "time", - ))] - #[cfg(not(loom))] - $item - )* - } -} - -macro_rules! cfg_aio { - ($($item:item)*) => { - $( - #[cfg(all(any(docsrs, target_os = "freebsd"), feature = "net"))] - #[cfg_attr(docsrs, - doc(cfg(all(target_os = "freebsd", feature = "net"))) - )] - $item - )* - } -} - -macro_rules! cfg_fs { - ($($item:item)*) => { - $( - #[cfg(feature = "fs")] - #[cfg(not(target_os = "wasi"))] - #[cfg_attr(docsrs, doc(cfg(feature = "fs")))] - $item - )* - } -} - -macro_rules! cfg_io_blocking { - ($($item:item)*) => { - $( #[cfg(any( - feature = "io-std", - feature = "fs", - all(windows, feature = "process"), - ))] $item )* - } -} - -macro_rules! cfg_io_driver { - ($($item:item)*) => { - $( - #[cfg(any( - feature = "net", - all(unix, feature = "process"), - all(unix, feature = "signal"), - ))] - #[cfg_attr(docsrs, doc(cfg(any( - feature = "net", - all(unix, feature = "process"), - all(unix, feature = "signal"), - ))))] - $item - )* - } -} - -macro_rules! cfg_io_driver_impl { - ( $( $item:item )* ) => { - $( - #[cfg(any( - feature = "net", - all(unix, feature = "process"), - all(unix, feature = "signal"), - ))] - $item - )* - } -} - -macro_rules! cfg_not_io_driver { - ($($item:item)*) => { - $( - #[cfg(not(any( - feature = "net", - all(unix, feature = "process"), - all(unix, feature = "signal"), - )))] - $item - )* - } -} - -macro_rules! cfg_io_readiness { - ($($item:item)*) => { - $( - #[cfg(feature = "net")] - $item - )* - } -} - -macro_rules! cfg_io_std { - ($($item:item)*) => { - $( - #[cfg(feature = "io-std")] - #[cfg_attr(docsrs, doc(cfg(feature = "io-std")))] - $item - )* - } -} - -macro_rules! cfg_io_util { - ($($item:item)*) => { - $( - #[cfg(feature = "io-util")] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - $item - )* - } -} - -macro_rules! cfg_not_io_util { - ($($item:item)*) => { - $( #[cfg(not(feature = "io-util"))] $item )* - } -} - -macro_rules! cfg_loom { - ($($item:item)*) => { - $( #[cfg(loom)] $item )* - } -} - -macro_rules! cfg_not_loom { - ($($item:item)*) => { - $( #[cfg(not(loom))] $item )* - } -} - -macro_rules! cfg_macros { - ($($item:item)*) => { - $( - #[cfg(feature = "macros")] - #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] - $item - )* - } -} - -macro_rules! cfg_metrics { - ($($item:item)*) => { - $( - // For now, metrics is only disabled in loom tests. - // When stabilized, it might have a dedicated feature flag. - #[cfg(all(tokio_unstable, not(loom)))] - #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] - $item - )* - } -} - -macro_rules! cfg_not_metrics { - ($($item:item)*) => { - $( - #[cfg(not(all(tokio_unstable, not(loom))))] - $item - )* - } -} - -macro_rules! cfg_not_rt_and_metrics_and_net { - ($($item:item)*) => { - $( #[cfg(not(all(feature = "net", feature = "rt", all(tokio_unstable, not(loom)))))]$item )* - } -} - -macro_rules! cfg_net_or_process { - ($($item:item)*) => { - $( - #[cfg(any(feature = "net", feature = "process"))] - #[cfg_attr(docsrs, doc(cfg(any(feature = "net", feature = "process"))))] - $item - )* - } -} - -macro_rules! cfg_net { - ($($item:item)*) => { - $( - #[cfg(feature = "net")] - #[cfg_attr(docsrs, doc(cfg(feature = "net")))] - $item - )* - } -} - -macro_rules! cfg_net_unix { - ($($item:item)*) => { - $( - #[cfg(all(unix, feature = "net"))] - #[cfg_attr(docsrs, doc(cfg(all(unix, feature = "net"))))] - $item - )* - } -} - -macro_rules! cfg_net_windows { - ($($item:item)*) => { - $( - #[cfg(all(any(all(doc, docsrs), windows), feature = "net"))] - #[cfg_attr(docsrs, doc(cfg(all(windows, feature = "net"))))] - $item - )* - } -} - -macro_rules! cfg_process { - ($($item:item)*) => { - $( - #[cfg(feature = "process")] - #[cfg_attr(docsrs, doc(cfg(feature = "process")))] - #[cfg(not(loom))] - #[cfg(not(target_os = "wasi"))] - $item - )* - } -} - -macro_rules! cfg_process_driver { - ($($item:item)*) => { - #[cfg(unix)] - #[cfg(not(loom))] - cfg_process! { $($item)* } - } -} - -macro_rules! cfg_not_process_driver { - ($($item:item)*) => { - $( - #[cfg(not(all(unix, not(loom), feature = "process")))] - $item - )* - } -} - -macro_rules! cfg_signal { - ($($item:item)*) => { - $( - #[cfg(feature = "signal")] - #[cfg_attr(docsrs, doc(cfg(feature = "signal")))] - #[cfg(not(loom))] - #[cfg(not(target_os = "wasi"))] - $item - )* - } -} - -macro_rules! cfg_signal_internal { - ($($item:item)*) => { - $( - #[cfg(any(feature = "signal", all(unix, feature = "process")))] - #[cfg(not(loom))] - $item - )* - } -} - -macro_rules! cfg_signal_internal_and_unix { - ($($item:item)*) => { - #[cfg(unix)] - cfg_signal_internal! { $($item)* } - } -} - -macro_rules! cfg_not_signal_internal { - ($($item:item)*) => { - $( - #[cfg(any(loom, not(unix), not(any(feature = "signal", all(unix, feature = "process")))))] - $item - )* - } -} - -macro_rules! cfg_sync { - ($($item:item)*) => { - $( - #[cfg(feature = "sync")] - #[cfg_attr(docsrs, doc(cfg(feature = "sync")))] - $item - )* - } -} - -macro_rules! cfg_not_sync { - ($($item:item)*) => { - $( #[cfg(not(feature = "sync"))] $item )* - } -} - -macro_rules! cfg_rt { - ($($item:item)*) => { - $( - #[cfg(feature = "rt")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] - $item - )* - } -} - -macro_rules! cfg_not_rt { - ($($item:item)*) => { - $( #[cfg(not(feature = "rt"))] $item )* - } -} - -macro_rules! cfg_rt_multi_thread { - ($($item:item)*) => { - $( - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - #[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))] - $item - )* - } -} - -macro_rules! cfg_not_rt_multi_thread { - ($($item:item)*) => { - $( #[cfg(not(feature = "rt-multi-thread"))] $item )* - } -} - -macro_rules! cfg_taskdump { - ($($item:item)*) => { - $( - #[cfg(all( - tokio_unstable, - tokio_taskdump, - feature = "rt", - target_os = "linux", - any( - target_arch = "aarch64", - target_arch = "x86", - target_arch = "x86_64" - ) - ))] - $item - )* - }; -} - -macro_rules! cfg_not_taskdump { - ($($item:item)*) => { - $( - #[cfg(not(all( - tokio_unstable, - tokio_taskdump, - feature = "rt", - target_os = "linux", - any( - target_arch = "aarch64", - target_arch = "x86", - target_arch = "x86_64" - ) - )))] - $item - )* - }; -} - -macro_rules! cfg_test_util { - ($($item:item)*) => { - $( - #[cfg(feature = "test-util")] - #[cfg_attr(docsrs, doc(cfg(feature = "test-util")))] - $item - )* - } -} - -macro_rules! cfg_not_test_util { - ($($item:item)*) => { - $( #[cfg(not(feature = "test-util"))] $item )* - } -} - -macro_rules! cfg_time { - ($($item:item)*) => { - $( - #[cfg(feature = "time")] - #[cfg_attr(docsrs, doc(cfg(feature = "time")))] - $item - )* - } -} - -macro_rules! cfg_not_time { - ($($item:item)*) => { - $( #[cfg(not(feature = "time"))] $item )* - } -} - -macro_rules! cfg_trace { - ($($item:item)*) => { - $( - #[cfg(all(tokio_unstable, feature = "tracing"))] - #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] - $item - )* - }; -} - -macro_rules! cfg_unstable { - ($($item:item)*) => { - $( - #[cfg(tokio_unstable)] - #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] - $item - )* - }; -} - -macro_rules! cfg_not_trace { - ($($item:item)*) => { - $( - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - $item - )* - } -} - -macro_rules! cfg_coop { - ($($item:item)*) => { - $( - #[cfg(any( - feature = "fs", - feature = "io-std", - feature = "net", - feature = "process", - feature = "rt", - feature = "signal", - feature = "sync", - feature = "time", - ))] - $item - )* - } -} - -macro_rules! cfg_not_coop { - ($($item:item)*) => { - $( - #[cfg(not(any( - feature = "fs", - feature = "io-std", - feature = "net", - feature = "process", - feature = "rt", - feature = "signal", - feature = "sync", - feature = "time", - )))] - $item - )* - } -} - -macro_rules! cfg_has_atomic_u64 { - ($($item:item)*) => { - $( - #[cfg(target_has_atomic = "64")] - $item - )* - } -} - -macro_rules! cfg_not_has_atomic_u64 { - ($($item:item)*) => { - $( - #[cfg(not(target_has_atomic = "64"))] - $item - )* - } -} - -macro_rules! cfg_has_const_mutex_new { - ($($item:item)*) => { - $( - #[cfg(not(all(loom, test)))] - $item - )* - } -} - -macro_rules! cfg_not_has_const_mutex_new { - ($($item:item)*) => { - $( - #[cfg(all(loom, test))] - $item - )* - } -} - -macro_rules! cfg_not_wasi { - ($($item:item)*) => { - $( - #[cfg(not(target_os = "wasi"))] - $item - )* - } -} - -macro_rules! cfg_is_wasm_not_wasi { - ($($item:item)*) => { - $( - #[cfg(all(target_family = "wasm", not(target_os = "wasi")))] - $item - )* - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/join.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/join.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/join.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/join.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,166 +0,0 @@ -/// Waits on multiple concurrent branches, returning when **all** branches -/// complete. -/// -/// The `join!` macro must be used inside of async functions, closures, and -/// blocks. -/// -/// The `join!` macro takes a list of async expressions and evaluates them -/// concurrently on the same task. Each async expression evaluates to a future -/// and the futures from each expression are multiplexed on the current task. -/// -/// When working with async expressions returning `Result`, `join!` will wait -/// for **all** branches complete regardless if any complete with `Err`. Use -/// [`try_join!`] to return early when `Err` is encountered. -/// -/// [`try_join!`]: crate::try_join -/// -/// # Notes -/// -/// The supplied futures are stored inline and does not require allocating a -/// `Vec`. -/// -/// ### Runtime characteristics -/// -/// By running all async expressions on the current task, the expressions are -/// able to run **concurrently** but not in **parallel**. This means all -/// expressions are run on the same thread and if one branch blocks the thread, -/// all other expressions will be unable to continue. If parallelism is -/// required, spawn each async expression using [`tokio::spawn`] and pass the -/// join handle to `join!`. -/// -/// [`tokio::spawn`]: crate::spawn -/// -/// # Examples -/// -/// Basic join with two branches -/// -/// ``` -/// async fn do_stuff_async() { -/// // async work -/// } -/// -/// async fn more_async_work() { -/// // more here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let (first, second) = tokio::join!( -/// do_stuff_async(), -/// more_async_work()); -/// -/// // do something with the values -/// } -/// ``` -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(feature = "macros")))] -macro_rules! join { - (@ { - // One `_` for each branch in the `join!` macro. This is not used once - // normalization is complete. - ( $($count:tt)* ) - - // The expression `0+1+1+ ... +1` equal to the number of branches. - ( $($total:tt)* ) - - // Normalized join! branches - $( ( $($skip:tt)* ) $e:expr, )* - - }) => {{ - use $crate::macros::support::{maybe_done, poll_fn, Future, Pin}; - use $crate::macros::support::Poll::{Ready, Pending}; - - // Safety: nothing must be moved out of `futures`. This is to satisfy - // the requirement of `Pin::new_unchecked` called below. - // - // We can't use the `pin!` macro for this because `futures` is a tuple - // and the standard library provides no way to pin-project to the fields - // of a tuple. - let mut futures = ( $( maybe_done($e), )* ); - - // This assignment makes sure that the `poll_fn` closure only has a - // reference to the futures, instead of taking ownership of them. This - // mitigates the issue described in - // - let mut futures = &mut futures; - - // Each time the future created by poll_fn is polled, a different future will be polled first - // to ensure every future passed to join! gets a chance to make progress even if - // one of the futures consumes the whole budget. - // - // This is number of futures that will be skipped in the first loop - // iteration the next time. - let mut skip_next_time: u32 = 0; - - poll_fn(move |cx| { - const COUNT: u32 = $($total)*; - - let mut is_pending = false; - - let mut to_run = COUNT; - - // The number of futures that will be skipped in the first loop iteration. - let mut skip = skip_next_time; - - skip_next_time = if skip + 1 == COUNT { 0 } else { skip + 1 }; - - // This loop runs twice and the first `skip` futures - // are not polled in the first iteration. - loop { - $( - if skip == 0 { - if to_run == 0 { - // Every future has been polled - break; - } - to_run -= 1; - - // Extract the future for this branch from the tuple. - let ( $($skip,)* fut, .. ) = &mut *futures; - - // Safety: future is stored on the stack above - // and never moved. - let mut fut = unsafe { Pin::new_unchecked(fut) }; - - // Try polling - if fut.poll(cx).is_pending() { - is_pending = true; - } - } else { - // Future skipped, one less future to skip in the next iteration - skip -= 1; - } - )* - } - - if is_pending { - Pending - } else { - Ready(($({ - // Extract the future for this branch from the tuple. - let ( $($skip,)* fut, .. ) = &mut futures; - - // Safety: future is stored on the stack above - // and never moved. - let mut fut = unsafe { Pin::new_unchecked(fut) }; - - fut.take_output().expect("expected completed future") - },)*)) - } - }).await - }}; - - // ===== Normalize ===== - - (@ { ( $($s:tt)* ) ( $($n:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => { - $crate::join!(@{ ($($s)* _) ($($n)* + 1) $($t)* ($($s)*) $e, } $($r)*) - }; - - // ===== Entry point ===== - - ( $($e:expr),+ $(,)?) => { - $crate::join!(@{ () (0) } $($e,)*) - }; - - () => { async {}.await } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/loom.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/loom.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/loom.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/loom.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,8 +0,0 @@ -macro_rules! if_loom { - ($($t:tt)*) => {{ - #[cfg(loom)] - { - $($t)* - } - }} -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ -#![cfg_attr(not(feature = "full"), allow(unused_macros))] - -#[macro_use] -mod cfg; - -#[macro_use] -mod loom; - -#[macro_use] -mod pin; - -#[macro_use] -mod ready; - -#[macro_use] -mod thread_local; - -#[macro_use] -mod addr_of; - -cfg_trace! { - #[macro_use] - mod trace; -} - -cfg_macros! { - #[macro_use] - mod select; - - #[macro_use] - mod join; - - #[macro_use] - mod try_join; -} - -// Includes re-exports needed to implement macros -#[doc(hidden)] -pub mod support; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/pin.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/pin.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/pin.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/pin.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,144 +0,0 @@ -/// Pins a value on the stack. -/// -/// Calls to `async fn` return anonymous [`Future`] values that are `!Unpin`. -/// These values must be pinned before they can be polled. Calling `.await` will -/// handle this, but consumes the future. If it is required to call `.await` on -/// a `&mut _` reference, the caller is responsible for pinning the future. -/// -/// Pinning may be done by allocating with [`Box::pin`] or by using the stack -/// with the `pin!` macro. -/// -/// The following will **fail to compile**: -/// -/// ```compile_fail -/// async fn my_async_fn() { -/// // async logic here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let mut future = my_async_fn(); -/// (&mut future).await; -/// } -/// ``` -/// -/// To make this work requires pinning: -/// -/// ``` -/// use tokio::pin; -/// -/// async fn my_async_fn() { -/// // async logic here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let future = my_async_fn(); -/// pin!(future); -/// -/// (&mut future).await; -/// } -/// ``` -/// -/// Pinning is useful when using `select!` and stream operators that require `T: -/// Stream + Unpin`. -/// -/// [`Future`]: trait@std::future::Future -/// [`Box::pin`]: std::boxed::Box::pin -/// -/// # Usage -/// -/// The `pin!` macro takes **identifiers** as arguments. It does **not** work -/// with expressions. -/// -/// The following does not compile as an expression is passed to `pin!`. -/// -/// ```compile_fail -/// async fn my_async_fn() { -/// // async logic here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let mut future = pin!(my_async_fn()); -/// (&mut future).await; -/// } -/// ``` -/// -/// # Examples -/// -/// Using with select: -/// -/// ``` -/// use tokio::{pin, select}; -/// use tokio_stream::{self as stream, StreamExt}; -/// -/// async fn my_async_fn() { -/// // async logic here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let mut stream = stream::iter(vec![1, 2, 3, 4]); -/// -/// let future = my_async_fn(); -/// pin!(future); -/// -/// loop { -/// select! { -/// _ = &mut future => { -/// // Stop looping `future` will be polled after completion -/// break; -/// } -/// Some(val) = stream.next() => { -/// println!("got value = {}", val); -/// } -/// } -/// } -/// } -/// ``` -/// -/// Because assigning to a variable followed by pinning is common, there is also -/// a variant of the macro that supports doing both in one go. -/// -/// ``` -/// use tokio::{pin, select}; -/// -/// async fn my_async_fn() { -/// // async logic here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// pin! { -/// let future1 = my_async_fn(); -/// let future2 = my_async_fn(); -/// } -/// -/// select! { -/// _ = &mut future1 => {} -/// _ = &mut future2 => {} -/// } -/// } -/// ``` -#[macro_export] -macro_rules! pin { - ($($x:ident),*) => { $( - // Move the value to ensure that it is owned - let mut $x = $x; - // Shadow the original binding so that it can't be directly accessed - // ever again. - #[allow(unused_mut)] - let mut $x = unsafe { - $crate::macros::support::Pin::new_unchecked(&mut $x) - }; - )* }; - ($( - let $x:ident = $init:expr; - )*) => { - $( - let $x = $init; - $crate::pin!($x); - )* - }; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/ready.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/ready.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/ready.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/ready.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,8 +0,0 @@ -macro_rules! ready { - ($e:expr $(,)?) => { - match $e { - std::task::Poll::Ready(t) => t, - std::task::Poll::Pending => return std::task::Poll::Pending, - } - }; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/select.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/select.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/select.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/select.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1023 +0,0 @@ -/// Waits on multiple concurrent branches, returning when the **first** branch -/// completes, cancelling the remaining branches. -/// -/// The `select!` macro must be used inside of async functions, closures, and -/// blocks. -/// -/// The `select!` macro accepts one or more branches with the following pattern: -/// -/// ```text -/// = (, if )? => , -/// ``` -/// -/// Additionally, the `select!` macro may include a single, optional `else` -/// branch, which evaluates if none of the other branches match their patterns: -/// -/// ```text -/// else => -/// ``` -/// -/// The macro aggregates all `` expressions and runs them -/// concurrently on the **current** task. Once the **first** expression -/// completes with a value that matches its ``, the `select!` macro -/// returns the result of evaluating the completed branch's `` -/// expression. -/// -/// Additionally, each branch may include an optional `if` precondition. If the -/// precondition returns `false`, then the branch is disabled. The provided -/// `` is still evaluated but the resulting future is never -/// polled. This capability is useful when using `select!` within a loop. -/// -/// The complete lifecycle of a `select!` expression is as follows: -/// -/// 1. Evaluate all provided `` expressions. If the precondition -/// returns `false`, disable the branch for the remainder of the current call -/// to `select!`. Re-entering `select!` due to a loop clears the "disabled" -/// state. -/// 2. Aggregate the ``s from each branch, including the -/// disabled ones. If the branch is disabled, `` is still -/// evaluated, but the resulting future is not polled. -/// 3. Concurrently await on the results for all remaining ``s. -/// 4. Once an `` returns a value, attempt to apply the value -/// to the provided ``, if the pattern matches, evaluate `` -/// and return. If the pattern **does not** match, disable the current branch -/// and for the remainder of the current call to `select!`. Continue from step 3. -/// 5. If **all** branches are disabled, evaluate the `else` expression. If no -/// else branch is provided, panic. -/// -/// # Runtime characteristics -/// -/// By running all async expressions on the current task, the expressions are -/// able to run **concurrently** but not in **parallel**. This means all -/// expressions are run on the same thread and if one branch blocks the thread, -/// all other expressions will be unable to continue. If parallelism is -/// required, spawn each async expression using [`tokio::spawn`] and pass the -/// join handle to `select!`. -/// -/// [`tokio::spawn`]: crate::spawn -/// -/// # Fairness -/// -/// By default, `select!` randomly picks a branch to check first. This provides -/// some level of fairness when calling `select!` in a loop with branches that -/// are always ready. -/// -/// This behavior can be overridden by adding `biased;` to the beginning of the -/// macro usage. See the examples for details. This will cause `select` to poll -/// the futures in the order they appear from top to bottom. There are a few -/// reasons you may want this: -/// -/// - The random number generation of `tokio::select!` has a non-zero CPU cost -/// - Your futures may interact in a way where known polling order is significant -/// -/// But there is an important caveat to this mode. It becomes your responsibility -/// to ensure that the polling order of your futures is fair. If for example you -/// are selecting between a stream and a shutdown future, and the stream has a -/// huge volume of messages and zero or nearly zero time between them, you should -/// place the shutdown future earlier in the `select!` list to ensure that it is -/// always polled, and will not be ignored due to the stream being constantly -/// ready. -/// -/// # Panics -/// -/// The `select!` macro panics if all branches are disabled **and** there is no -/// provided `else` branch. A branch is disabled when the provided `if` -/// precondition returns `false` **or** when the pattern does not match the -/// result of ``. -/// -/// # Cancellation safety -/// -/// When using `select!` in a loop to receive messages from multiple sources, -/// you should make sure that the receive call is cancellation safe to avoid -/// losing messages. This section goes through various common methods and -/// describes whether they are cancel safe. The lists in this section are not -/// exhaustive. -/// -/// The following methods are cancellation safe: -/// -/// * [`tokio::sync::mpsc::Receiver::recv`](crate::sync::mpsc::Receiver::recv) -/// * [`tokio::sync::mpsc::UnboundedReceiver::recv`](crate::sync::mpsc::UnboundedReceiver::recv) -/// * [`tokio::sync::broadcast::Receiver::recv`](crate::sync::broadcast::Receiver::recv) -/// * [`tokio::sync::watch::Receiver::changed`](crate::sync::watch::Receiver::changed) -/// * [`tokio::net::TcpListener::accept`](crate::net::TcpListener::accept) -/// * [`tokio::net::UnixListener::accept`](crate::net::UnixListener::accept) -/// * [`tokio::signal::unix::Signal::recv`](crate::signal::unix::Signal::recv) -/// * [`tokio::io::AsyncReadExt::read`](crate::io::AsyncReadExt::read) on any `AsyncRead` -/// * [`tokio::io::AsyncReadExt::read_buf`](crate::io::AsyncReadExt::read_buf) on any `AsyncRead` -/// * [`tokio::io::AsyncWriteExt::write`](crate::io::AsyncWriteExt::write) on any `AsyncWrite` -/// * [`tokio::io::AsyncWriteExt::write_buf`](crate::io::AsyncWriteExt::write_buf) on any `AsyncWrite` -/// * [`tokio_stream::StreamExt::next`](https://docs.rs/tokio-stream/0.1/tokio_stream/trait.StreamExt.html#method.next) on any `Stream` -/// * [`futures::stream::StreamExt::next`](https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.next) on any `Stream` -/// -/// The following methods are not cancellation safe and can lead to loss of data: -/// -/// * [`tokio::io::AsyncReadExt::read_exact`](crate::io::AsyncReadExt::read_exact) -/// * [`tokio::io::AsyncReadExt::read_to_end`](crate::io::AsyncReadExt::read_to_end) -/// * [`tokio::io::AsyncReadExt::read_to_string`](crate::io::AsyncReadExt::read_to_string) -/// * [`tokio::io::AsyncWriteExt::write_all`](crate::io::AsyncWriteExt::write_all) -/// -/// The following methods are not cancellation safe because they use a queue for -/// fairness and cancellation makes you lose your place in the queue: -/// -/// * [`tokio::sync::Mutex::lock`](crate::sync::Mutex::lock) -/// * [`tokio::sync::RwLock::read`](crate::sync::RwLock::read) -/// * [`tokio::sync::RwLock::write`](crate::sync::RwLock::write) -/// * [`tokio::sync::Semaphore::acquire`](crate::sync::Semaphore::acquire) -/// * [`tokio::sync::Notify::notified`](crate::sync::Notify::notified) -/// -/// To determine whether your own methods are cancellation safe, look for the -/// location of uses of `.await`. This is because when an asynchronous method is -/// cancelled, that always happens at an `.await`. If your function behaves -/// correctly even if it is restarted while waiting at an `.await`, then it is -/// cancellation safe. -/// -/// Cancellation safety can be defined in the following way: If you have a -/// future that has not yet completed, then it must be a no-op to drop that -/// future and recreate it. This definition is motivated by the situation where -/// a `select!` is used in a loop. Without this guarantee, you would lose your -/// progress when another branch completes and you restart the `select!` by -/// going around the loop. -/// -/// Be aware that cancelling something that is not cancellation safe is not -/// necessarily wrong. For example, if you are cancelling a task because the -/// application is shutting down, then you probably don't care that partially -/// read data is lost. -/// -/// # Examples -/// -/// Basic select with two branches. -/// -/// ``` -/// async fn do_stuff_async() { -/// // async work -/// } -/// -/// async fn more_async_work() { -/// // more here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// tokio::select! { -/// _ = do_stuff_async() => { -/// println!("do_stuff_async() completed first") -/// } -/// _ = more_async_work() => { -/// println!("more_async_work() completed first") -/// } -/// }; -/// } -/// ``` -/// -/// Basic stream selecting. -/// -/// ``` -/// use tokio_stream::{self as stream, StreamExt}; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut stream1 = stream::iter(vec![1, 2, 3]); -/// let mut stream2 = stream::iter(vec![4, 5, 6]); -/// -/// let next = tokio::select! { -/// v = stream1.next() => v.unwrap(), -/// v = stream2.next() => v.unwrap(), -/// }; -/// -/// assert!(next == 1 || next == 4); -/// } -/// ``` -/// -/// Collect the contents of two streams. In this example, we rely on pattern -/// matching and the fact that `stream::iter` is "fused", i.e. once the stream -/// is complete, all calls to `next()` return `None`. -/// -/// ``` -/// use tokio_stream::{self as stream, StreamExt}; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut stream1 = stream::iter(vec![1, 2, 3]); -/// let mut stream2 = stream::iter(vec![4, 5, 6]); -/// -/// let mut values = vec![]; -/// -/// loop { -/// tokio::select! { -/// Some(v) = stream1.next() => values.push(v), -/// Some(v) = stream2.next() => values.push(v), -/// else => break, -/// } -/// } -/// -/// values.sort(); -/// assert_eq!(&[1, 2, 3, 4, 5, 6], &values[..]); -/// } -/// ``` -/// -/// Using the same future in multiple `select!` expressions can be done by passing -/// a reference to the future. Doing so requires the future to be [`Unpin`]. A -/// future can be made [`Unpin`] by either using [`Box::pin`] or stack pinning. -/// -/// [`Unpin`]: std::marker::Unpin -/// [`Box::pin`]: std::boxed::Box::pin -/// -/// Here, a stream is consumed for at most 1 second. -/// -/// ``` -/// use tokio_stream::{self as stream, StreamExt}; -/// use tokio::time::{self, Duration}; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut stream = stream::iter(vec![1, 2, 3]); -/// let sleep = time::sleep(Duration::from_secs(1)); -/// tokio::pin!(sleep); -/// -/// loop { -/// tokio::select! { -/// maybe_v = stream.next() => { -/// if let Some(v) = maybe_v { -/// println!("got = {}", v); -/// } else { -/// break; -/// } -/// } -/// _ = &mut sleep => { -/// println!("timeout"); -/// break; -/// } -/// } -/// } -/// } -/// ``` -/// -/// Joining two values using `select!`. -/// -/// ``` -/// use tokio::sync::oneshot; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx1, mut rx1) = oneshot::channel(); -/// let (tx2, mut rx2) = oneshot::channel(); -/// -/// tokio::spawn(async move { -/// tx1.send("first").unwrap(); -/// }); -/// -/// tokio::spawn(async move { -/// tx2.send("second").unwrap(); -/// }); -/// -/// let mut a = None; -/// let mut b = None; -/// -/// while a.is_none() || b.is_none() { -/// tokio::select! { -/// v1 = (&mut rx1), if a.is_none() => a = Some(v1.unwrap()), -/// v2 = (&mut rx2), if b.is_none() => b = Some(v2.unwrap()), -/// } -/// } -/// -/// let res = (a.unwrap(), b.unwrap()); -/// -/// assert_eq!(res.0, "first"); -/// assert_eq!(res.1, "second"); -/// } -/// ``` -/// -/// Using the `biased;` mode to control polling order. -/// -/// ``` -/// #[tokio::main] -/// async fn main() { -/// let mut count = 0u8; -/// -/// loop { -/// tokio::select! { -/// // If you run this example without `biased;`, the polling order is -/// // pseudo-random, and the assertions on the value of count will -/// // (probably) fail. -/// biased; -/// -/// _ = async {}, if count < 1 => { -/// count += 1; -/// assert_eq!(count, 1); -/// } -/// _ = async {}, if count < 2 => { -/// count += 1; -/// assert_eq!(count, 2); -/// } -/// _ = async {}, if count < 3 => { -/// count += 1; -/// assert_eq!(count, 3); -/// } -/// _ = async {}, if count < 4 => { -/// count += 1; -/// assert_eq!(count, 4); -/// } -/// -/// else => { -/// break; -/// } -/// }; -/// } -/// } -/// ``` -/// -/// ## Avoid racy `if` preconditions -/// -/// Given that `if` preconditions are used to disable `select!` branches, some -/// caution must be used to avoid missing values. -/// -/// For example, here is **incorrect** usage of `sleep` with `if`. The objective -/// is to repeatedly run an asynchronous task for up to 50 milliseconds. -/// However, there is a potential for the `sleep` completion to be missed. -/// -/// ```no_run,should_panic -/// use tokio::time::{self, Duration}; -/// -/// async fn some_async_work() { -/// // do work -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let sleep = time::sleep(Duration::from_millis(50)); -/// tokio::pin!(sleep); -/// -/// while !sleep.is_elapsed() { -/// tokio::select! { -/// _ = &mut sleep, if !sleep.is_elapsed() => { -/// println!("operation timed out"); -/// } -/// _ = some_async_work() => { -/// println!("operation completed"); -/// } -/// } -/// } -/// -/// panic!("This example shows how not to do it!"); -/// } -/// ``` -/// -/// In the above example, `sleep.is_elapsed()` may return `true` even if -/// `sleep.poll()` never returned `Ready`. This opens up a potential race -/// condition where `sleep` expires between the `while !sleep.is_elapsed()` -/// check and the call to `select!` resulting in the `some_async_work()` call to -/// run uninterrupted despite the sleep having elapsed. -/// -/// One way to write the above example without the race would be: -/// -/// ``` -/// use tokio::time::{self, Duration}; -/// -/// async fn some_async_work() { -/// # time::sleep(Duration::from_millis(10)).await; -/// // do work -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let sleep = time::sleep(Duration::from_millis(50)); -/// tokio::pin!(sleep); -/// -/// loop { -/// tokio::select! { -/// _ = &mut sleep => { -/// println!("operation timed out"); -/// break; -/// } -/// _ = some_async_work() => { -/// println!("operation completed"); -/// } -/// } -/// } -/// } -/// ``` -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(feature = "macros")))] -macro_rules! select { - // Uses a declarative macro to do **most** of the work. While it is possible - // to implement fully with a declarative macro, a procedural macro is used - // to enable improved error messages. - // - // The macro is structured as a tt-muncher. All branches are processed and - // normalized. Once the input is normalized, it is passed to the top-most - // rule. When entering the macro, `@{ }` is inserted at the front. This is - // used to collect the normalized input. - // - // The macro only recurses once per branch. This allows using `select!` - // without requiring the user to increase the recursion limit. - - // All input is normalized, now transform. - (@ { - // The index of the future to poll first (in bias mode), or the RNG - // expression to use to pick a future to poll first. - start=$start:expr; - - // One `_` for each branch in the `select!` macro. Passing this to - // `count!` converts $skip to an integer. - ( $($count:tt)* ) - - // Normalized select branches. `( $skip )` is a set of `_` characters. - // There is one `_` for each select branch **before** this one. Given - // that all input futures are stored in a tuple, $skip is useful for - // generating a pattern to reference the future for the current branch. - // $skip is also used as an argument to `count!`, returning the index of - // the current select branch. - $( ( $($skip:tt)* ) $bind:pat = $fut:expr, if $c:expr => $handle:expr, )+ - - // Fallback expression used when all select branches have been disabled. - ; $else:expr - - }) => {{ - // Enter a context where stable "function-like" proc macros can be used. - // - // This module is defined within a scope and should not leak out of this - // macro. - #[doc(hidden)] - mod __tokio_select_util { - // Generate an enum with one variant per select branch - $crate::select_priv_declare_output_enum!( ( $($count)* ) ); - } - - // `tokio::macros::support` is a public, but doc(hidden) module - // including a re-export of all types needed by this macro. - use $crate::macros::support::Future; - use $crate::macros::support::Pin; - use $crate::macros::support::Poll::{Ready, Pending}; - - const BRANCHES: u32 = $crate::count!( $($count)* ); - - let mut disabled: __tokio_select_util::Mask = Default::default(); - - // First, invoke all the pre-conditions. For any that return true, - // set the appropriate bit in `disabled`. - $( - if !$c { - let mask: __tokio_select_util::Mask = 1 << $crate::count!( $($skip)* ); - disabled |= mask; - } - )* - - // Create a scope to separate polling from handling the output. This - // adds borrow checker flexibility when using the macro. - let mut output = { - // Safety: Nothing must be moved out of `futures`. This is to - // satisfy the requirement of `Pin::new_unchecked` called below. - // - // We can't use the `pin!` macro for this because `futures` is a - // tuple and the standard library provides no way to pin-project to - // the fields of a tuple. - let mut futures = ( $( $fut , )+ ); - - // This assignment makes sure that the `poll_fn` closure only has a - // reference to the futures, instead of taking ownership of them. - // This mitigates the issue described in - // - let mut futures = &mut futures; - - $crate::macros::support::poll_fn(|cx| { - // Track if any branch returns pending. If no branch completes - // **or** returns pending, this implies that all branches are - // disabled. - let mut is_pending = false; - - // Choose a starting index to begin polling the futures at. In - // practice, this will either be a pseudo-randomly generated - // number by default, or the constant 0 if `biased;` is - // supplied. - let start = $start; - - for i in 0..BRANCHES { - let branch; - #[allow(clippy::modulo_one)] - { - branch = (start + i) % BRANCHES; - } - match branch { - $( - #[allow(unreachable_code)] - $crate::count!( $($skip)* ) => { - // First, if the future has previously been - // disabled, do not poll it again. This is done - // by checking the associated bit in the - // `disabled` bit field. - let mask = 1 << branch; - - if disabled & mask == mask { - // The future has been disabled. - continue; - } - - // Extract the future for this branch from the - // tuple - let ( $($skip,)* fut, .. ) = &mut *futures; - - // Safety: future is stored on the stack above - // and never moved. - let mut fut = unsafe { Pin::new_unchecked(fut) }; - - // Try polling it - let out = match Future::poll(fut, cx) { - Ready(out) => out, - Pending => { - // Track that at least one future is - // still pending and continue polling. - is_pending = true; - continue; - } - }; - - // Disable the future from future polling. - disabled |= mask; - - // The future returned a value, check if matches - // the specified pattern. - #[allow(unused_variables)] - #[allow(unused_mut)] - match &out { - $crate::select_priv_clean_pattern!($bind) => {} - _ => continue, - } - - // The select is complete, return the value - return Ready($crate::select_variant!(__tokio_select_util::Out, ($($skip)*))(out)); - } - )* - _ => unreachable!("reaching this means there probably is an off by one bug"), - } - } - - if is_pending { - Pending - } else { - // All branches have been disabled. - Ready(__tokio_select_util::Out::Disabled) - } - }).await - }; - - match output { - $( - $crate::select_variant!(__tokio_select_util::Out, ($($skip)*) ($bind)) => $handle, - )* - __tokio_select_util::Out::Disabled => $else, - _ => unreachable!("failed to match bind"), - } - }}; - - // ==== Normalize ===== - - // These rules match a single `select!` branch and normalize it for - // processing by the first rule. - - (@ { start=$start:expr; $($t:tt)* } ) => { - // No `else` branch - $crate::select!(@{ start=$start; $($t)*; panic!("all branches are disabled and there is no else branch") }) - }; - (@ { start=$start:expr; $($t:tt)* } else => $else:expr $(,)?) => { - $crate::select!(@{ start=$start; $($t)*; $else }) - }; - (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block, $($r:tt)* ) => { - $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*) - }; - (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block, $($r:tt)* ) => { - $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*) - }; - (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block $($r:tt)* ) => { - $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*) - }; - (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block $($r:tt)* ) => { - $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*) - }; - (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr ) => { - $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, }) - }; - (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr ) => { - $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, }) - }; - (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr, $($r:tt)* ) => { - $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*) - }; - (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr, $($r:tt)* ) => { - $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*) - }; - - // ===== Entry point ===== - - (biased; $p:pat = $($t:tt)* ) => { - $crate::select!(@{ start=0; () } $p = $($t)*) - }; - - ( $p:pat = $($t:tt)* ) => { - // Randomly generate a starting point. This makes `select!` a bit more - // fair and avoids always polling the first future. - $crate::select!(@{ start={ $crate::macros::support::thread_rng_n(BRANCHES) }; () } $p = $($t)*) - }; - () => { - compile_error!("select! requires at least one branch.") - }; -} - -// And here... we manually list out matches for up to 64 branches... I'm not -// happy about it either, but this is how we manage to use a declarative macro! - -#[macro_export] -#[doc(hidden)] -macro_rules! count { - () => { - 0 - }; - (_) => { - 1 - }; - (_ _) => { - 2 - }; - (_ _ _) => { - 3 - }; - (_ _ _ _) => { - 4 - }; - (_ _ _ _ _) => { - 5 - }; - (_ _ _ _ _ _) => { - 6 - }; - (_ _ _ _ _ _ _) => { - 7 - }; - (_ _ _ _ _ _ _ _) => { - 8 - }; - (_ _ _ _ _ _ _ _ _) => { - 9 - }; - (_ _ _ _ _ _ _ _ _ _) => { - 10 - }; - (_ _ _ _ _ _ _ _ _ _ _) => { - 11 - }; - (_ _ _ _ _ _ _ _ _ _ _ _) => { - 12 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _) => { - 13 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 14 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 15 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 16 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 17 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 18 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 19 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 20 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 21 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 22 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 23 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 24 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 25 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 26 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 27 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 28 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 29 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 30 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 31 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 32 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 33 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 34 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 35 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 36 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 37 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 38 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 39 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 40 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 41 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 42 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 43 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 44 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 45 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 46 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 47 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 48 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 49 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 50 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 51 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 52 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 53 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 54 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 55 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 56 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 57 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 58 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 59 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 60 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 61 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 62 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 63 - }; - (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { - 64 - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! select_variant { - ($($p:ident)::*, () $($t:tt)*) => { - $($p)::*::_0 $($t)* - }; - ($($p:ident)::*, (_) $($t:tt)*) => { - $($p)::*::_1 $($t)* - }; - ($($p:ident)::*, (_ _) $($t:tt)*) => { - $($p)::*::_2 $($t)* - }; - ($($p:ident)::*, (_ _ _) $($t:tt)*) => { - $($p)::*::_3 $($t)* - }; - ($($p:ident)::*, (_ _ _ _) $($t:tt)*) => { - $($p)::*::_4 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _) $($t:tt)*) => { - $($p)::*::_5 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_6 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_7 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_8 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_9 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_10 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_11 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_12 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_13 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_14 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_15 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_16 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_17 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_18 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_19 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_20 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_21 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_22 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_23 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_24 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_25 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_26 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_27 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_28 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_29 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_30 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_31 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_32 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_33 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_34 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_35 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_36 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_37 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_38 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_39 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_40 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_41 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_42 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_43 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_44 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_45 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_46 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_47 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_48 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_49 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_50 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_51 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_52 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_53 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_54 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_55 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_56 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_57 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_58 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_59 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_60 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_61 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_62 $($t)* - }; - ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { - $($p)::*::_63 $($t)* - }; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/support.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/support.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/support.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/support.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,13 +0,0 @@ -cfg_macros! { - pub use crate::future::poll_fn; - pub use crate::future::maybe_done::maybe_done; - - #[doc(hidden)] - pub fn thread_rng_n(n: u32) -> u32 { - crate::runtime::context::thread_rng_n(n) - } -} - -pub use std::future::Future; -pub use std::pin::Pin; -pub use std::task::Poll; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/thread_local.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/thread_local.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/thread_local.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/thread_local.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,18 +0,0 @@ -#[cfg(all(loom, test))] -macro_rules! tokio_thread_local { - ($(#[$attrs:meta])* $vis:vis static $name:ident: $ty:ty = const { $expr:expr } $(;)?) => { - loom::thread_local! { - $(#[$attrs])* - $vis static $name: $ty = $expr; - } - }; - - ($($tts:tt)+) => { loom::thread_local!{ $($tts)+ } } -} - -#[cfg(not(all(loom, test)))] -macro_rules! tokio_thread_local { - ($($tts:tt)+) => { - ::std::thread_local!{ $($tts)+ } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/trace.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/trace.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/trace.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/trace.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -cfg_trace! { - macro_rules! trace_op { - ($name:expr, $readiness:literal) => { - tracing::trace!( - target: "runtime::resource::poll_op", - op_name = $name, - is_ready = $readiness - ); - } - } - - macro_rules! trace_poll_op { - ($name:expr, $poll:expr $(,)*) => { - match $poll { - std::task::Poll::Ready(t) => { - trace_op!($name, true); - std::task::Poll::Ready(t) - } - std::task::Poll::Pending => { - trace_op!($name, false); - return std::task::Poll::Pending; - } - } - }; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/macros/try_join.rs s390-tools-2.33.1/rust-vendor/tokio/src/macros/try_join.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/macros/try_join.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/macros/try_join.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,218 +0,0 @@ -/// Waits on multiple concurrent branches, returning when **all** branches -/// complete with `Ok(_)` or on the first `Err(_)`. -/// -/// The `try_join!` macro must be used inside of async functions, closures, and -/// blocks. -/// -/// Similar to [`join!`], the `try_join!` macro takes a list of async -/// expressions and evaluates them concurrently on the same task. Each async -/// expression evaluates to a future and the futures from each expression are -/// multiplexed on the current task. The `try_join!` macro returns when **all** -/// branches return with `Ok` or when the **first** branch returns with `Err`. -/// -/// [`join!`]: macro@join -/// -/// # Notes -/// -/// The supplied futures are stored inline and does not require allocating a -/// `Vec`. -/// -/// ### Runtime characteristics -/// -/// By running all async expressions on the current task, the expressions are -/// able to run **concurrently** but not in **parallel**. This means all -/// expressions are run on the same thread and if one branch blocks the thread, -/// all other expressions will be unable to continue. If parallelism is -/// required, spawn each async expression using [`tokio::spawn`] and pass the -/// join handle to `try_join!`. -/// -/// [`tokio::spawn`]: crate::spawn -/// -/// # Examples -/// -/// Basic try_join with two branches. -/// -/// ``` -/// async fn do_stuff_async() -> Result<(), &'static str> { -/// // async work -/// # Ok(()) -/// } -/// -/// async fn more_async_work() -> Result<(), &'static str> { -/// // more here -/// # Ok(()) -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let res = tokio::try_join!( -/// do_stuff_async(), -/// more_async_work()); -/// -/// match res { -/// Ok((first, second)) => { -/// // do something with the values -/// } -/// Err(err) => { -/// println!("processing failed; error = {}", err); -/// } -/// } -/// } -/// ``` -/// -/// Using `try_join!` with spawned tasks. -/// -/// ``` -/// use tokio::task::JoinHandle; -/// -/// async fn do_stuff_async() -> Result<(), &'static str> { -/// // async work -/// # Err("failed") -/// } -/// -/// async fn more_async_work() -> Result<(), &'static str> { -/// // more here -/// # Ok(()) -/// } -/// -/// async fn flatten(handle: JoinHandle>) -> Result { -/// match handle.await { -/// Ok(Ok(result)) => Ok(result), -/// Ok(Err(err)) => Err(err), -/// Err(err) => Err("handling failed"), -/// } -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let handle1 = tokio::spawn(do_stuff_async()); -/// let handle2 = tokio::spawn(more_async_work()); -/// match tokio::try_join!(flatten(handle1), flatten(handle2)) { -/// Ok(val) => { -/// // do something with the values -/// } -/// Err(err) => { -/// println!("Failed with {}.", err); -/// # assert_eq!(err, "failed"); -/// } -/// } -/// } -/// ``` -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(feature = "macros")))] -macro_rules! try_join { - (@ { - // One `_` for each branch in the `try_join!` macro. This is not used once - // normalization is complete. - ( $($count:tt)* ) - - // The expression `0+1+1+ ... +1` equal to the number of branches. - ( $($total:tt)* ) - - // Normalized try_join! branches - $( ( $($skip:tt)* ) $e:expr, )* - - }) => {{ - use $crate::macros::support::{maybe_done, poll_fn, Future, Pin}; - use $crate::macros::support::Poll::{Ready, Pending}; - - // Safety: nothing must be moved out of `futures`. This is to satisfy - // the requirement of `Pin::new_unchecked` called below. - // - // We can't use the `pin!` macro for this because `futures` is a tuple - // and the standard library provides no way to pin-project to the fields - // of a tuple. - let mut futures = ( $( maybe_done($e), )* ); - - // This assignment makes sure that the `poll_fn` closure only has a - // reference to the futures, instead of taking ownership of them. This - // mitigates the issue described in - // - let mut futures = &mut futures; - - // Each time the future created by poll_fn is polled, a different future will be polled first - // to ensure every future passed to join! gets a chance to make progress even if - // one of the futures consumes the whole budget. - // - // This is number of futures that will be skipped in the first loop - // iteration the next time. - let mut skip_next_time: u32 = 0; - - poll_fn(move |cx| { - const COUNT: u32 = $($total)*; - - let mut is_pending = false; - - let mut to_run = COUNT; - - // The number of futures that will be skipped in the first loop iteration - let mut skip = skip_next_time; - - skip_next_time = if skip + 1 == COUNT { 0 } else { skip + 1 }; - - // This loop runs twice and the first `skip` futures - // are not polled in the first iteration. - loop { - $( - if skip == 0 { - if to_run == 0 { - // Every future has been polled - break; - } - to_run -= 1; - - // Extract the future for this branch from the tuple. - let ( $($skip,)* fut, .. ) = &mut *futures; - - // Safety: future is stored on the stack above - // and never moved. - let mut fut = unsafe { Pin::new_unchecked(fut) }; - - // Try polling - if fut.as_mut().poll(cx).is_pending() { - is_pending = true; - } else if fut.as_mut().output_mut().expect("expected completed future").is_err() { - return Ready(Err(fut.take_output().expect("expected completed future").err().unwrap())) - } - } else { - // Future skipped, one less future to skip in the next iteration - skip -= 1; - } - )* - } - - if is_pending { - Pending - } else { - Ready(Ok(($({ - // Extract the future for this branch from the tuple. - let ( $($skip,)* fut, .. ) = &mut futures; - - // Safety: future is stored on the stack above - // and never moved. - let mut fut = unsafe { Pin::new_unchecked(fut) }; - - fut - .take_output() - .expect("expected completed future") - .ok() - .expect("expected Ok(_)") - },)*))) - } - }).await - }}; - - // ===== Normalize ===== - - (@ { ( $($s:tt)* ) ( $($n:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => { - $crate::try_join!(@{ ($($s)* _) ($($n)* + 1) $($t)* ($($s)*) $e, } $($r)*) - }; - - // ===== Entry point ===== - - ( $($e:expr),+ $(,)?) => { - $crate::try_join!(@{ () (0) } $($e,)*) - }; - - () => { async { Ok(()) }.await } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/addr.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/addr.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/addr.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/addr.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,333 +0,0 @@ -use std::future; -use std::io; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; - -/// Converts or resolves without blocking to one or more `SocketAddr` values. -/// -/// # DNS -/// -/// Implementations of `ToSocketAddrs` for string types require a DNS lookup. -/// -/// # Calling -/// -/// Currently, this trait is only used as an argument to Tokio functions that -/// need to reference a target socket address. To perform a `SocketAddr` -/// conversion directly, use [`lookup_host()`](super::lookup_host()). -/// -/// This trait is sealed and is intended to be opaque. The details of the trait -/// will change. Stabilization is pending enhancements to the Rust language. -pub trait ToSocketAddrs: sealed::ToSocketAddrsPriv {} - -type ReadyFuture = future::Ready>; - -cfg_net! { - pub(crate) fn to_socket_addrs(arg: T) -> T::Future - where - T: ToSocketAddrs, - { - arg.to_socket_addrs(sealed::Internal) - } -} - -// ===== impl &impl ToSocketAddrs ===== - -impl ToSocketAddrs for &T {} - -impl sealed::ToSocketAddrsPriv for &T -where - T: sealed::ToSocketAddrsPriv + ?Sized, -{ - type Iter = T::Iter; - type Future = T::Future; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - (**self).to_socket_addrs(sealed::Internal) - } -} - -// ===== impl SocketAddr ===== - -impl ToSocketAddrs for SocketAddr {} - -impl sealed::ToSocketAddrsPriv for SocketAddr { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - let iter = Some(*self).into_iter(); - future::ready(Ok(iter)) - } -} - -// ===== impl SocketAddrV4 ===== - -impl ToSocketAddrs for SocketAddrV4 {} - -impl sealed::ToSocketAddrsPriv for SocketAddrV4 { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - SocketAddr::V4(*self).to_socket_addrs(sealed::Internal) - } -} - -// ===== impl SocketAddrV6 ===== - -impl ToSocketAddrs for SocketAddrV6 {} - -impl sealed::ToSocketAddrsPriv for SocketAddrV6 { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - SocketAddr::V6(*self).to_socket_addrs(sealed::Internal) - } -} - -// ===== impl (IpAddr, u16) ===== - -impl ToSocketAddrs for (IpAddr, u16) {} - -impl sealed::ToSocketAddrsPriv for (IpAddr, u16) { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - let iter = Some(SocketAddr::from(*self)).into_iter(); - future::ready(Ok(iter)) - } -} - -// ===== impl (Ipv4Addr, u16) ===== - -impl ToSocketAddrs for (Ipv4Addr, u16) {} - -impl sealed::ToSocketAddrsPriv for (Ipv4Addr, u16) { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - let (ip, port) = *self; - SocketAddrV4::new(ip, port).to_socket_addrs(sealed::Internal) - } -} - -// ===== impl (Ipv6Addr, u16) ===== - -impl ToSocketAddrs for (Ipv6Addr, u16) {} - -impl sealed::ToSocketAddrsPriv for (Ipv6Addr, u16) { - type Iter = std::option::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - let (ip, port) = *self; - SocketAddrV6::new(ip, port, 0, 0).to_socket_addrs(sealed::Internal) - } -} - -// ===== impl &[SocketAddr] ===== - -impl ToSocketAddrs for &[SocketAddr] {} - -impl sealed::ToSocketAddrsPriv for &[SocketAddr] { - type Iter = std::vec::IntoIter; - type Future = ReadyFuture; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - #[inline] - fn slice_to_vec(addrs: &[SocketAddr]) -> Vec { - addrs.to_vec() - } - - // This uses a helper method because clippy doesn't like the `to_vec()` - // call here (it will allocate, whereas `self.iter().copied()` would - // not), but it's actually necessary in order to ensure that the - // returned iterator is valid for the `'static` lifetime, which the - // borrowed `slice::Iter` iterator would not be. - // - // Note that we can't actually add an `allow` attribute for - // `clippy::unnecessary_to_owned` here, as Tokio's CI runs clippy lints - // on Rust 1.52 to avoid breaking LTS releases of Tokio. Users of newer - // Rust versions who see this lint should just ignore it. - let iter = slice_to_vec(self).into_iter(); - future::ready(Ok(iter)) - } -} - -cfg_net! { - // ===== impl str ===== - - impl ToSocketAddrs for str {} - - impl sealed::ToSocketAddrsPriv for str { - type Iter = sealed::OneOrMore; - type Future = sealed::MaybeReady; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - use crate::blocking::spawn_blocking; - use sealed::MaybeReady; - - // First check if the input parses as a socket address - let res: Result = self.parse(); - - if let Ok(addr) = res { - return MaybeReady(sealed::State::Ready(Some(addr))); - } - - // Run DNS lookup on the blocking pool - let s = self.to_owned(); - - MaybeReady(sealed::State::Blocking(spawn_blocking(move || { - std::net::ToSocketAddrs::to_socket_addrs(&s) - }))) - } - } - - // ===== impl (&str, u16) ===== - - impl ToSocketAddrs for (&str, u16) {} - - impl sealed::ToSocketAddrsPriv for (&str, u16) { - type Iter = sealed::OneOrMore; - type Future = sealed::MaybeReady; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - use crate::blocking::spawn_blocking; - use sealed::MaybeReady; - - let (host, port) = *self; - - // try to parse the host as a regular IP address first - if let Ok(addr) = host.parse::() { - let addr = SocketAddrV4::new(addr, port); - let addr = SocketAddr::V4(addr); - - return MaybeReady(sealed::State::Ready(Some(addr))); - } - - if let Ok(addr) = host.parse::() { - let addr = SocketAddrV6::new(addr, port, 0, 0); - let addr = SocketAddr::V6(addr); - - return MaybeReady(sealed::State::Ready(Some(addr))); - } - - let host = host.to_owned(); - - MaybeReady(sealed::State::Blocking(spawn_blocking(move || { - std::net::ToSocketAddrs::to_socket_addrs(&(&host[..], port)) - }))) - } - } - - // ===== impl (String, u16) ===== - - impl ToSocketAddrs for (String, u16) {} - - impl sealed::ToSocketAddrsPriv for (String, u16) { - type Iter = sealed::OneOrMore; - type Future = sealed::MaybeReady; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - (self.0.as_str(), self.1).to_socket_addrs(sealed::Internal) - } - } - - // ===== impl String ===== - - impl ToSocketAddrs for String {} - - impl sealed::ToSocketAddrsPriv for String { - type Iter = ::Iter; - type Future = ::Future; - - fn to_socket_addrs(&self, _: sealed::Internal) -> Self::Future { - self[..].to_socket_addrs(sealed::Internal) - } - } -} - -pub(crate) mod sealed { - //! The contents of this trait are intended to remain private and __not__ - //! part of the `ToSocketAddrs` public API. The details will change over - //! time. - - use std::future::Future; - use std::io; - use std::net::SocketAddr; - - #[doc(hidden)] - pub trait ToSocketAddrsPriv { - type Iter: Iterator + Send + 'static; - type Future: Future> + Send + 'static; - - fn to_socket_addrs(&self, internal: Internal) -> Self::Future; - } - - #[allow(missing_debug_implementations)] - pub struct Internal; - - cfg_net! { - use crate::blocking::JoinHandle; - - use std::option; - use std::pin::Pin; - use std::task::{Context, Poll}; - use std::vec; - - #[doc(hidden)] - #[derive(Debug)] - pub struct MaybeReady(pub(super) State); - - #[derive(Debug)] - pub(super) enum State { - Ready(Option), - Blocking(JoinHandle>>), - } - - #[doc(hidden)] - #[derive(Debug)] - pub enum OneOrMore { - One(option::IntoIter), - More(vec::IntoIter), - } - - impl Future for MaybeReady { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.0 { - State::Ready(ref mut i) => { - let iter = OneOrMore::One(i.take().into_iter()); - Poll::Ready(Ok(iter)) - } - State::Blocking(ref mut rx) => { - let res = ready!(Pin::new(rx).poll(cx))?.map(OneOrMore::More); - - Poll::Ready(res) - } - } - } - } - - impl Iterator for OneOrMore { - type Item = SocketAddr; - - fn next(&mut self) -> Option { - match self { - OneOrMore::One(i) => i.next(), - OneOrMore::More(i) => i.next(), - } - } - - fn size_hint(&self) -> (usize, Option) { - match self { - OneOrMore::One(i) => i.size_hint(), - OneOrMore::More(i) => i.size_hint(), - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/lookup_host.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/lookup_host.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/lookup_host.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/lookup_host.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,38 +0,0 @@ -cfg_net! { - use crate::net::addr::{self, ToSocketAddrs}; - - use std::io; - use std::net::SocketAddr; - - /// Performs a DNS resolution. - /// - /// The returned iterator may not actually yield any values depending on the - /// outcome of any resolution performed. - /// - /// This API is not intended to cover all DNS use cases. Anything beyond the - /// basic use case should be done with a specialized library. - /// - /// # Examples - /// - /// To resolve a DNS entry: - /// - /// ```no_run - /// use tokio::net; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// for addr in net::lookup_host("localhost:3000").await? { - /// println!("socket address is {}", addr); - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn lookup_host(host: T) -> io::Result> - where - T: ToSocketAddrs - { - addr::to_socket_addrs(host).await - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -#![cfg(not(loom))] - -//! TCP/UDP/Unix bindings for `tokio`. -//! -//! This module contains the TCP/UDP/Unix networking types, similar to the standard -//! library, which can be used to implement networking protocols. -//! -//! # Organization -//! -//! * [`TcpListener`] and [`TcpStream`] provide functionality for communication over TCP -//! * [`UdpSocket`] provides functionality for communication over UDP -//! * [`UnixListener`] and [`UnixStream`] provide functionality for communication over a -//! Unix Domain Stream Socket **(available on Unix only)** -//! * [`UnixDatagram`] provides functionality for communication -//! over Unix Domain Datagram Socket **(available on Unix only)** - -//! -//! [`TcpListener`]: TcpListener -//! [`TcpStream`]: TcpStream -//! [`UdpSocket`]: UdpSocket -//! [`UnixListener`]: UnixListener -//! [`UnixStream`]: UnixStream -//! [`UnixDatagram`]: UnixDatagram - -mod addr; -cfg_not_wasi! { - #[cfg(feature = "net")] - pub(crate) use addr::to_socket_addrs; -} -pub use addr::ToSocketAddrs; - -cfg_net! { - mod lookup_host; - pub use lookup_host::lookup_host; - - pub mod tcp; - pub use tcp::listener::TcpListener; - pub use tcp::stream::TcpStream; - cfg_not_wasi! { - pub use tcp::socket::TcpSocket; - - mod udp; - pub use udp::UdpSocket; - } -} - -cfg_net_unix! { - pub mod unix; - pub use unix::datagram::socket::UnixDatagram; - pub use unix::listener::UnixListener; - pub use unix::stream::UnixStream; -} - -cfg_net_windows! { - pub mod windows; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/listener.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/listener.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/listener.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/listener.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,451 +0,0 @@ -use crate::io::{Interest, PollEvented}; -use crate::net::tcp::TcpStream; - -cfg_not_wasi! { - use crate::net::{to_socket_addrs, ToSocketAddrs}; -} - -use std::fmt; -use std::io; -use std::net::{self, SocketAddr}; -use std::task::{Context, Poll}; - -cfg_net! { - /// A TCP socket server, listening for connections. - /// - /// You can accept a new connection by using the [`accept`](`TcpListener::accept`) - /// method. - /// - /// A `TcpListener` can be turned into a `Stream` with [`TcpListenerStream`]. - /// - /// [`TcpListenerStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.TcpListenerStream.html - /// - /// # Errors - /// - /// Note that accepting a connection can lead to various errors and not all - /// of them are necessarily fatal ‒ for example having too many open file - /// descriptors or the other side closing the connection while it waits in - /// an accept queue. These would terminate the stream if not handled in any - /// way. - /// - /// # Examples - /// - /// Using `accept`: - /// ```no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// - /// async fn process_socket(socket: T) { - /// # drop(socket); - /// // do work with socket here - /// } - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:8080").await?; - /// - /// loop { - /// let (socket, _) = listener.accept().await?; - /// process_socket(socket).await; - /// } - /// } - /// ``` - pub struct TcpListener { - io: PollEvented, - } -} - -impl TcpListener { - cfg_not_wasi! { - /// Creates a new TcpListener, which will be bound to the specified address. - /// - /// The returned listener is ready for accepting connections. - /// - /// Binding with a port number of 0 will request that the OS assigns a port - /// to this listener. The port allocated can be queried via the `local_addr` - /// method. - /// - /// The address type can be any implementor of the [`ToSocketAddrs`] trait. - /// If `addr` yields multiple addresses, bind will be attempted with each of - /// the addresses until one succeeds and returns the listener. If none of - /// the addresses succeed in creating a listener, the error returned from - /// the last attempt (the last address) is returned. - /// - /// This function sets the `SO_REUSEADDR` option on the socket. - /// - /// To configure the socket before binding, you can use the [`TcpSocket`] - /// type. - /// - /// [`ToSocketAddrs`]: trait@crate::net::ToSocketAddrs - /// [`TcpSocket`]: struct@crate::net::TcpSocket - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:2345").await?; - /// - /// // use the listener - /// - /// # let _ = listener; - /// Ok(()) - /// } - /// ``` - pub async fn bind(addr: A) -> io::Result { - let addrs = to_socket_addrs(addr).await?; - - let mut last_err = None; - - for addr in addrs { - match TcpListener::bind_addr(addr) { - Ok(listener) => return Ok(listener), - Err(e) => last_err = Some(e), - } - } - - Err(last_err.unwrap_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "could not resolve to any address", - ) - })) - } - - fn bind_addr(addr: SocketAddr) -> io::Result { - let listener = mio::net::TcpListener::bind(addr)?; - TcpListener::new(listener) - } - } - - /// Accepts a new incoming connection from this listener. - /// - /// This function will yield once a new TCP connection is established. When - /// established, the corresponding [`TcpStream`] and the remote peer's - /// address will be returned. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If the method is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then it is guaranteed that no new connections were - /// accepted by this method. - /// - /// [`TcpStream`]: struct@crate::net::TcpStream - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:8080").await?; - /// - /// match listener.accept().await { - /// Ok((_socket, addr)) => println!("new client: {:?}", addr), - /// Err(e) => println!("couldn't get client: {:?}", e), - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { - let (mio, addr) = self - .io - .registration() - .async_io(Interest::READABLE, || self.io.accept()) - .await?; - - let stream = TcpStream::new(mio)?; - Ok((stream, addr)) - } - - /// Polls to accept a new incoming connection to this listener. - /// - /// If there is no connection to accept, `Poll::Pending` is returned and the - /// current task will be notified by a waker. Note that on multiple calls - /// to `poll_accept`, only the `Waker` from the `Context` passed to the most - /// recent call is scheduled to receive a wakeup. - pub fn poll_accept(&self, cx: &mut Context<'_>) -> Poll> { - loop { - let ev = ready!(self.io.registration().poll_read_ready(cx))?; - - match self.io.accept() { - Ok((io, addr)) => { - let io = TcpStream::new(io)?; - return Poll::Ready(Ok((io, addr))); - } - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.registration().clear_readiness(ev); - } - Err(e) => return Poll::Ready(Err(e)), - } - } - } - - /// Creates new `TcpListener` from a `std::net::TcpListener`. - /// - /// This function is intended to be used to wrap a TCP listener from the - /// standard library in the Tokio equivalent. - /// - /// This API is typically paired with the `socket2` crate and the `Socket` - /// type to build up and customize a listener before it's shipped off to the - /// backing event loop. This allows configuration of options like - /// `SO_REUSEPORT`, binding to multiple addresses, etc. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the listener is in - /// non-blocking mode. Otherwise all I/O operations on the listener - /// will block the thread, which will cause unexpected behavior. - /// Non-blocking mode can be set using [`set_nonblocking`]. - /// - /// [`set_nonblocking`]: std::net::TcpListener::set_nonblocking - /// - /// # Examples - /// - /// ```rust,no_run - /// use std::error::Error; - /// use tokio::net::TcpListener; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let std_listener = std::net::TcpListener::bind("127.0.0.1:0")?; - /// std_listener.set_nonblocking(true)?; - /// let listener = TcpListener::from_std(std_listener)?; - /// Ok(()) - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - #[track_caller] - pub fn from_std(listener: net::TcpListener) -> io::Result { - let io = mio::net::TcpListener::from_std(listener); - let io = PollEvented::new(io)?; - Ok(TcpListener { io }) - } - - /// Turns a [`tokio::net::TcpListener`] into a [`std::net::TcpListener`]. - /// - /// The returned [`std::net::TcpListener`] will have nonblocking mode set as - /// `true`. Use [`set_nonblocking`] to change the blocking mode if needed. - /// - /// # Examples - /// - /// ```rust,no_run - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let tokio_listener = tokio::net::TcpListener::bind("127.0.0.1:0").await?; - /// let std_listener = tokio_listener.into_std()?; - /// std_listener.set_nonblocking(false)?; - /// Ok(()) - /// } - /// ``` - /// - /// [`tokio::net::TcpListener`]: TcpListener - /// [`std::net::TcpListener`]: std::net::TcpListener - /// [`set_nonblocking`]: fn@std::net::TcpListener::set_nonblocking - pub fn into_std(self) -> io::Result { - #[cfg(unix)] - { - use std::os::unix::io::{FromRawFd, IntoRawFd}; - self.io - .into_inner() - .map(|io| io.into_raw_fd()) - .map(|raw_fd| unsafe { std::net::TcpListener::from_raw_fd(raw_fd) }) - } - - #[cfg(windows)] - { - use std::os::windows::io::{FromRawSocket, IntoRawSocket}; - self.io - .into_inner() - .map(|io| io.into_raw_socket()) - .map(|raw_socket| unsafe { std::net::TcpListener::from_raw_socket(raw_socket) }) - } - - #[cfg(target_os = "wasi")] - { - use std::os::wasi::io::{FromRawFd, IntoRawFd}; - self.io - .into_inner() - .map(|io| io.into_raw_fd()) - .map(|raw_fd| unsafe { std::net::TcpListener::from_raw_fd(raw_fd) }) - } - } - - cfg_not_wasi! { - pub(crate) fn new(listener: mio::net::TcpListener) -> io::Result { - let io = PollEvented::new(listener)?; - Ok(TcpListener { io }) - } - } - - /// Returns the local address that this listener is bound to. - /// - /// This can be useful, for example, when binding to port 0 to figure out - /// which port was actually bound. - /// - /// # Examples - /// - /// ```rust,no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:8080").await?; - /// - /// assert_eq!(listener.local_addr()?, - /// SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080))); - /// - /// Ok(()) - /// } - /// ``` - pub fn local_addr(&self) -> io::Result { - self.io.local_addr() - } - - /// Gets the value of the `IP_TTL` option for this socket. - /// - /// For more information about this option, see [`set_ttl`]. - /// - /// [`set_ttl`]: method@Self::set_ttl - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:0").await?; - /// - /// listener.set_ttl(100).expect("could not set TTL"); - /// assert_eq!(listener.ttl()?, 100); - /// - /// Ok(()) - /// } - /// ``` - pub fn ttl(&self) -> io::Result { - self.io.ttl() - } - - /// Sets the value for the `IP_TTL` option on this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpListener; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:0").await?; - /// - /// listener.set_ttl(100).expect("could not set TTL"); - /// - /// Ok(()) - /// } - /// ``` - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - self.io.set_ttl(ttl) - } -} - -impl TryFrom for TcpListener { - type Error = io::Error; - - /// Consumes stream, returning the tokio I/O object. - /// - /// This is equivalent to - /// [`TcpListener::from_std(stream)`](TcpListener::from_std). - fn try_from(stream: net::TcpListener) -> Result { - Self::from_std(stream) - } -} - -impl fmt::Debug for TcpListener { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.fmt(f) - } -} - -#[cfg(unix)] -mod sys { - use super::TcpListener; - use std::os::unix::prelude::*; - - impl AsRawFd for TcpListener { - fn as_raw_fd(&self) -> RawFd { - self.io.as_raw_fd() - } - } - - impl AsFd for TcpListener { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } - } -} - -cfg_unstable! { - #[cfg(target_os = "wasi")] - mod sys { - use super::TcpListener; - use std::os::wasi::prelude::*; - - impl AsRawFd for TcpListener { - fn as_raw_fd(&self) -> RawFd { - self.io.as_raw_fd() - } - } - - impl AsFd for TcpListener { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } - } - } -} - -cfg_windows! { - use crate::os::windows::io::{AsRawSocket, RawSocket, AsSocket, BorrowedSocket}; - - impl AsRawSocket for TcpListener { - fn as_raw_socket(&self) -> RawSocket { - self.io.as_raw_socket() - } - } - - impl AsSocket for TcpListener { - fn as_socket(&self) -> BorrowedSocket<'_> { - unsafe { BorrowedSocket::borrow_raw(self.as_raw_socket()) } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,16 +0,0 @@ -//! TCP utility types. - -pub(crate) mod listener; - -cfg_not_wasi! { - pub(crate) mod socket; -} - -mod split; -pub use split::{ReadHalf, WriteHalf}; - -mod split_owned; -pub use split_owned::{OwnedReadHalf, OwnedWriteHalf, ReuniteError}; - -pub(crate) mod stream; -pub(crate) use stream::TcpStream; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/socket.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/socket.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/socket.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/socket.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,846 +0,0 @@ -use crate::net::{TcpListener, TcpStream}; - -use std::fmt; -use std::io; -use std::net::SocketAddr; - -#[cfg(unix)] -use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; -use std::time::Duration; - -cfg_windows! { - use crate::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket, AsSocket, BorrowedSocket}; -} - -cfg_net! { - /// A TCP socket that has not yet been converted to a `TcpStream` or - /// `TcpListener`. - /// - /// `TcpSocket` wraps an operating system socket and enables the caller to - /// configure the socket before establishing a TCP connection or accepting - /// inbound connections. The caller is able to set socket option and explicitly - /// bind the socket with a socket address. - /// - /// The underlying socket is closed when the `TcpSocket` value is dropped. - /// - /// `TcpSocket` should only be used directly if the default configuration used - /// by `TcpStream::connect` and `TcpListener::bind` does not meet the required - /// use case. - /// - /// Calling `TcpStream::connect("127.0.0.1:8080")` is equivalent to: - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:8080".parse().unwrap(); - /// - /// let socket = TcpSocket::new_v4()?; - /// let stream = socket.connect(addr).await?; - /// # drop(stream); - /// - /// Ok(()) - /// } - /// ``` - /// - /// Calling `TcpListener::bind("127.0.0.1:8080")` is equivalent to: - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:8080".parse().unwrap(); - /// - /// let socket = TcpSocket::new_v4()?; - /// // On platforms with Berkeley-derived sockets, this allows to quickly - /// // rebind a socket, without needing to wait for the OS to clean up the - /// // previous one. - /// // - /// // On Windows, this allows rebinding sockets which are actively in use, - /// // which allows “socket hijackingâ€, so we explicitly don't set it here. - /// // https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse - /// socket.set_reuseaddr(true)?; - /// socket.bind(addr)?; - /// - /// let listener = socket.listen(1024)?; - /// # drop(listener); - /// - /// Ok(()) - /// } - /// ``` - /// - /// Setting socket options not explicitly provided by `TcpSocket` may be done by - /// accessing the `RawFd`/`RawSocket` using [`AsRawFd`]/[`AsRawSocket`] and - /// setting the option with a crate like [`socket2`]. - /// - /// [`RawFd`]: https://doc.rust-lang.org/std/os/unix/io/type.RawFd.html - /// [`RawSocket`]: https://doc.rust-lang.org/std/os/windows/io/type.RawSocket.html - /// [`AsRawFd`]: https://doc.rust-lang.org/std/os/unix/io/trait.AsRawFd.html - /// [`AsRawSocket`]: https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html - /// [`socket2`]: https://docs.rs/socket2/ - #[cfg_attr(docsrs, doc(alias = "connect_std"))] - pub struct TcpSocket { - inner: socket2::Socket, - } -} - -impl TcpSocket { - /// Creates a new socket configured for IPv4. - /// - /// Calls `socket(2)` with `AF_INET` and `SOCK_STREAM`. - /// - /// # Returns - /// - /// On success, the newly created `TcpSocket` is returned. If an error is - /// encountered, it is returned instead. - /// - /// # Examples - /// - /// Create a new IPv4 socket and start listening. - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:8080".parse().unwrap(); - /// let socket = TcpSocket::new_v4()?; - /// socket.bind(addr)?; - /// - /// let listener = socket.listen(128)?; - /// # drop(listener); - /// Ok(()) - /// } - /// ``` - pub fn new_v4() -> io::Result { - TcpSocket::new(socket2::Domain::IPV4) - } - - /// Creates a new socket configured for IPv6. - /// - /// Calls `socket(2)` with `AF_INET6` and `SOCK_STREAM`. - /// - /// # Returns - /// - /// On success, the newly created `TcpSocket` is returned. If an error is - /// encountered, it is returned instead. - /// - /// # Examples - /// - /// Create a new IPv6 socket and start listening. - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "[::1]:8080".parse().unwrap(); - /// let socket = TcpSocket::new_v6()?; - /// socket.bind(addr)?; - /// - /// let listener = socket.listen(128)?; - /// # drop(listener); - /// Ok(()) - /// } - /// ``` - pub fn new_v6() -> io::Result { - TcpSocket::new(socket2::Domain::IPV6) - } - - fn new(domain: socket2::Domain) -> io::Result { - let ty = socket2::Type::STREAM; - #[cfg(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - ))] - let ty = ty.nonblocking(); - let inner = socket2::Socket::new(domain, ty, Some(socket2::Protocol::TCP))?; - #[cfg(not(any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "linux", - target_os = "netbsd", - target_os = "openbsd" - )))] - inner.set_nonblocking(true)?; - Ok(TcpSocket { inner }) - } - - /// Allows the socket to bind to an in-use address. - /// - /// Behavior is platform specific. Refer to the target platform's - /// documentation for more details. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:8080".parse().unwrap(); - /// - /// let socket = TcpSocket::new_v4()?; - /// socket.set_reuseaddr(true)?; - /// socket.bind(addr)?; - /// - /// let listener = socket.listen(1024)?; - /// # drop(listener); - /// - /// Ok(()) - /// } - /// ``` - pub fn set_reuseaddr(&self, reuseaddr: bool) -> io::Result<()> { - self.inner.set_reuse_address(reuseaddr) - } - - /// Retrieves the value set for `SO_REUSEADDR` on this socket. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:8080".parse().unwrap(); - /// - /// let socket = TcpSocket::new_v4()?; - /// socket.set_reuseaddr(true)?; - /// assert!(socket.reuseaddr().unwrap()); - /// socket.bind(addr)?; - /// - /// let listener = socket.listen(1024)?; - /// Ok(()) - /// } - /// ``` - pub fn reuseaddr(&self) -> io::Result { - self.inner.reuse_address() - } - - /// Allows the socket to bind to an in-use port. Only available for unix systems - /// (excluding Solaris & Illumos). - /// - /// Behavior is platform specific. Refer to the target platform's - /// documentation for more details. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:8080".parse().unwrap(); - /// - /// let socket = TcpSocket::new_v4()?; - /// socket.set_reuseport(true)?; - /// socket.bind(addr)?; - /// - /// let listener = socket.listen(1024)?; - /// Ok(()) - /// } - /// ``` - #[cfg(all(unix, not(target_os = "solaris"), not(target_os = "illumos")))] - #[cfg_attr( - docsrs, - doc(cfg(all(unix, not(target_os = "solaris"), not(target_os = "illumos")))) - )] - pub fn set_reuseport(&self, reuseport: bool) -> io::Result<()> { - self.inner.set_reuse_port(reuseport) - } - - /// Allows the socket to bind to an in-use port. Only available for unix systems - /// (excluding Solaris & Illumos). - /// - /// Behavior is platform specific. Refer to the target platform's - /// documentation for more details. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:8080".parse().unwrap(); - /// - /// let socket = TcpSocket::new_v4()?; - /// socket.set_reuseport(true)?; - /// assert!(socket.reuseport().unwrap()); - /// socket.bind(addr)?; - /// - /// let listener = socket.listen(1024)?; - /// Ok(()) - /// } - /// ``` - #[cfg(all(unix, not(target_os = "solaris"), not(target_os = "illumos")))] - #[cfg_attr( - docsrs, - doc(cfg(all(unix, not(target_os = "solaris"), not(target_os = "illumos")))) - )] - pub fn reuseport(&self) -> io::Result { - self.inner.reuse_port() - } - - /// Sets the size of the TCP send buffer on this socket. - /// - /// On most operating systems, this sets the `SO_SNDBUF` socket option. - pub fn set_send_buffer_size(&self, size: u32) -> io::Result<()> { - self.inner.set_send_buffer_size(size as usize) - } - - /// Returns the size of the TCP send buffer for this socket. - /// - /// On most operating systems, this is the value of the `SO_SNDBUF` socket - /// option. - /// - /// Note that if [`set_send_buffer_size`] has been called on this socket - /// previously, the value returned by this function may not be the same as - /// the argument provided to `set_send_buffer_size`. This is for the - /// following reasons: - /// - /// * Most operating systems have minimum and maximum allowed sizes for the - /// send buffer, and will clamp the provided value if it is below the - /// minimum or above the maximum. The minimum and maximum buffer sizes are - /// OS-dependent. - /// * Linux will double the buffer size to account for internal bookkeeping - /// data, and returns the doubled value from `getsockopt(2)`. As per `man - /// 7 socket`: - /// > Sets or gets the maximum socket send buffer in bytes. The - /// > kernel doubles this value (to allow space for bookkeeping - /// > overhead) when it is set using `setsockopt(2)`, and this doubled - /// > value is returned by `getsockopt(2)`. - /// - /// [`set_send_buffer_size`]: #method.set_send_buffer_size - pub fn send_buffer_size(&self) -> io::Result { - self.inner.send_buffer_size().map(|n| n as u32) - } - - /// Sets the size of the TCP receive buffer on this socket. - /// - /// On most operating systems, this sets the `SO_RCVBUF` socket option. - pub fn set_recv_buffer_size(&self, size: u32) -> io::Result<()> { - self.inner.set_recv_buffer_size(size as usize) - } - - /// Returns the size of the TCP receive buffer for this socket. - /// - /// On most operating systems, this is the value of the `SO_RCVBUF` socket - /// option. - /// - /// Note that if [`set_recv_buffer_size`] has been called on this socket - /// previously, the value returned by this function may not be the same as - /// the argument provided to `set_send_buffer_size`. This is for the - /// following reasons: - /// - /// * Most operating systems have minimum and maximum allowed sizes for the - /// receive buffer, and will clamp the provided value if it is below the - /// minimum or above the maximum. The minimum and maximum buffer sizes are - /// OS-dependent. - /// * Linux will double the buffer size to account for internal bookkeeping - /// data, and returns the doubled value from `getsockopt(2)`. As per `man - /// 7 socket`: - /// > Sets or gets the maximum socket send buffer in bytes. The - /// > kernel doubles this value (to allow space for bookkeeping - /// > overhead) when it is set using `setsockopt(2)`, and this doubled - /// > value is returned by `getsockopt(2)`. - /// - /// [`set_recv_buffer_size`]: #method.set_recv_buffer_size - pub fn recv_buffer_size(&self) -> io::Result { - self.inner.recv_buffer_size().map(|n| n as u32) - } - - /// Sets the linger duration of this socket by setting the SO_LINGER option. - /// - /// This option controls the action taken when a stream has unsent messages and the stream is - /// closed. If SO_LINGER is set, the system shall block the process until it can transmit the - /// data or until the time expires. - /// - /// If SO_LINGER is not specified, and the socket is closed, the system handles the call in a - /// way that allows the process to continue as quickly as possible. - pub fn set_linger(&self, dur: Option) -> io::Result<()> { - self.inner.set_linger(dur) - } - - /// Reads the linger duration for this socket by getting the `SO_LINGER` - /// option. - /// - /// For more information about this option, see [`set_linger`]. - /// - /// [`set_linger`]: TcpSocket::set_linger - pub fn linger(&self) -> io::Result> { - self.inner.linger() - } - - /// Sets the value of the `TCP_NODELAY` option on this socket. - /// - /// If set, this option disables the Nagle algorithm. This means that segments are always - /// sent as soon as possible, even if there is only a small amount of data. When not set, - /// data is buffered until there is a sufficient amount to send out, thereby avoiding - /// the frequent sending of small packets. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// # async fn dox() -> Result<(), Box> { - /// let socket = TcpSocket::new_v4()?; - /// - /// println!("{:?}", socket.nodelay()?); - /// # Ok(()) - /// # } - /// ``` - pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { - self.inner.set_nodelay(nodelay) - } - - /// Gets the value of the `TCP_NODELAY` option on this socket. - /// - /// For more information about this option, see [`set_nodelay`]. - /// - /// [`set_nodelay`]: TcpSocket::set_nodelay - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpSocket::new_v4()?; - /// - /// stream.set_nodelay(true)?; - /// # Ok(()) - /// # } - /// ``` - pub fn nodelay(&self) -> io::Result { - self.inner.nodelay() - } - - /// Gets the value of the `IP_TOS` option for this socket. - /// - /// For more information about this option, see [`set_tos`]. - /// - /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or - /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) - /// - /// [`set_tos`]: Self::set_tos - // https://docs.rs/socket2/0.5.3/src/socket2/socket.rs.html#1464 - #[cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))] - #[cfg_attr( - docsrs, - doc(cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))) - )] - pub fn tos(&self) -> io::Result { - self.inner.tos() - } - - /// Sets the value for the `IP_TOS` option on this socket. - /// - /// This value sets the type-of-service field that is used in every packet - /// sent from this socket. - /// - /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or - /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) - // https://docs.rs/socket2/0.5.3/src/socket2/socket.rs.html#1446 - #[cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))] - #[cfg_attr( - docsrs, - doc(cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))) - )] - pub fn set_tos(&self, tos: u32) -> io::Result<()> { - self.inner.set_tos(tos) - } - - /// Gets the value for the `SO_BINDTODEVICE` option on this socket - /// - /// This value gets the socket binded device's interface name. - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux",))] - #[cfg_attr( - docsrs, - doc(cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux",))) - )] - pub fn device(&self) -> io::Result>> { - self.inner.device() - } - - /// Sets the value for the `SO_BINDTODEVICE` option on this socket - /// - /// If a socket is bound to an interface, only packets received from that - /// particular interface are processed by the socket. Note that this only - /// works for some socket types, particularly `AF_INET` sockets. - /// - /// If `interface` is `None` or an empty string it removes the binding. - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - #[cfg_attr( - docsrs, - doc(cfg(all(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))) - )] - pub fn bind_device(&self, interface: Option<&[u8]>) -> io::Result<()> { - self.inner.bind_device(interface) - } - - /// Gets the local address of this socket. - /// - /// Will fail on windows if called before `bind`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:8080".parse().unwrap(); - /// - /// let socket = TcpSocket::new_v4()?; - /// socket.bind(addr)?; - /// assert_eq!(socket.local_addr().unwrap().to_string(), "127.0.0.1:8080"); - /// let listener = socket.listen(1024)?; - /// Ok(()) - /// } - /// ``` - pub fn local_addr(&self) -> io::Result { - self.inner.local_addr().and_then(convert_address) - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.inner.take_error() - } - - /// Binds the socket to the given address. - /// - /// This calls the `bind(2)` operating-system function. Behavior is - /// platform specific. Refer to the target platform's documentation for more - /// details. - /// - /// # Examples - /// - /// Bind a socket before listening. - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:8080".parse().unwrap(); - /// - /// let socket = TcpSocket::new_v4()?; - /// socket.bind(addr)?; - /// - /// let listener = socket.listen(1024)?; - /// # drop(listener); - /// - /// Ok(()) - /// } - /// ``` - pub fn bind(&self, addr: SocketAddr) -> io::Result<()> { - self.inner.bind(&addr.into()) - } - - /// Establishes a TCP connection with a peer at the specified socket address. - /// - /// The `TcpSocket` is consumed. Once the connection is established, a - /// connected [`TcpStream`] is returned. If the connection fails, the - /// encountered error is returned. - /// - /// [`TcpStream`]: TcpStream - /// - /// This calls the `connect(2)` operating-system function. Behavior is - /// platform specific. Refer to the target platform's documentation for more - /// details. - /// - /// # Examples - /// - /// Connecting to a peer. - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:8080".parse().unwrap(); - /// - /// let socket = TcpSocket::new_v4()?; - /// let stream = socket.connect(addr).await?; - /// # drop(stream); - /// - /// Ok(()) - /// } - /// ``` - pub async fn connect(self, addr: SocketAddr) -> io::Result { - if let Err(err) = self.inner.connect(&addr.into()) { - #[cfg(unix)] - if err.raw_os_error() != Some(libc::EINPROGRESS) { - return Err(err); - } - #[cfg(windows)] - if err.kind() != io::ErrorKind::WouldBlock { - return Err(err); - } - } - #[cfg(unix)] - let mio = { - use std::os::unix::io::{FromRawFd, IntoRawFd}; - - let raw_fd = self.inner.into_raw_fd(); - unsafe { mio::net::TcpStream::from_raw_fd(raw_fd) } - }; - - #[cfg(windows)] - let mio = { - use std::os::windows::io::{FromRawSocket, IntoRawSocket}; - - let raw_socket = self.inner.into_raw_socket(); - unsafe { mio::net::TcpStream::from_raw_socket(raw_socket) } - }; - - TcpStream::connect_mio(mio).await - } - - /// Converts the socket into a `TcpListener`. - /// - /// `backlog` defines the maximum number of pending connections are queued - /// by the operating system at any given time. Connection are removed from - /// the queue with [`TcpListener::accept`]. When the queue is full, the - /// operating-system will start rejecting connections. - /// - /// [`TcpListener::accept`]: TcpListener::accept - /// - /// This calls the `listen(2)` operating-system function, marking the socket - /// as a passive socket. Behavior is platform specific. Refer to the target - /// platform's documentation for more details. - /// - /// # Examples - /// - /// Create a `TcpListener`. - /// - /// ```no_run - /// use tokio::net::TcpSocket; - /// - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let addr = "127.0.0.1:8080".parse().unwrap(); - /// - /// let socket = TcpSocket::new_v4()?; - /// socket.bind(addr)?; - /// - /// let listener = socket.listen(1024)?; - /// # drop(listener); - /// - /// Ok(()) - /// } - /// ``` - pub fn listen(self, backlog: u32) -> io::Result { - self.inner.listen(backlog as i32)?; - #[cfg(unix)] - let mio = { - use std::os::unix::io::{FromRawFd, IntoRawFd}; - - let raw_fd = self.inner.into_raw_fd(); - unsafe { mio::net::TcpListener::from_raw_fd(raw_fd) } - }; - - #[cfg(windows)] - let mio = { - use std::os::windows::io::{FromRawSocket, IntoRawSocket}; - - let raw_socket = self.inner.into_raw_socket(); - unsafe { mio::net::TcpListener::from_raw_socket(raw_socket) } - }; - - TcpListener::new(mio) - } - - /// Converts a [`std::net::TcpStream`] into a `TcpSocket`. The provided - /// socket must not have been connected prior to calling this function. This - /// function is typically used together with crates such as [`socket2`] to - /// configure socket options that are not available on `TcpSocket`. - /// - /// [`std::net::TcpStream`]: struct@std::net::TcpStream - /// [`socket2`]: https://docs.rs/socket2/ - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. Otherwise all I/O operations on the socket - /// will block the thread, which will cause unexpected behavior. - /// Non-blocking mode can be set using [`set_nonblocking`]. - /// - /// [`set_nonblocking`]: std::net::TcpStream::set_nonblocking - /// - /// # Examples - /// - /// ``` - /// use tokio::net::TcpSocket; - /// use socket2::{Domain, Socket, Type}; - /// - /// #[tokio::main] - /// async fn main() -> std::io::Result<()> { - /// let socket2_socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; - /// socket2_socket.set_nonblocking(true)?; - /// - /// let socket = TcpSocket::from_std_stream(socket2_socket.into()); - /// - /// Ok(()) - /// } - /// ``` - pub fn from_std_stream(std_stream: std::net::TcpStream) -> TcpSocket { - #[cfg(unix)] - { - use std::os::unix::io::{FromRawFd, IntoRawFd}; - - let raw_fd = std_stream.into_raw_fd(); - unsafe { TcpSocket::from_raw_fd(raw_fd) } - } - - #[cfg(windows)] - { - use std::os::windows::io::{FromRawSocket, IntoRawSocket}; - - let raw_socket = std_stream.into_raw_socket(); - unsafe { TcpSocket::from_raw_socket(raw_socket) } - } - } -} - -fn convert_address(address: socket2::SockAddr) -> io::Result { - match address.as_socket() { - Some(address) => Ok(address), - None => Err(io::Error::new( - io::ErrorKind::InvalidInput, - "invalid address family (not IPv4 or IPv6)", - )), - } -} - -impl fmt::Debug for TcpSocket { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(fmt) - } -} - -#[cfg(unix)] -impl AsRawFd for TcpSocket { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -#[cfg(unix)] -impl AsFd for TcpSocket { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } -} - -#[cfg(unix)] -impl FromRawFd for TcpSocket { - /// Converts a `RawFd` to a `TcpSocket`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_fd(fd: RawFd) -> TcpSocket { - let inner = socket2::Socket::from_raw_fd(fd); - TcpSocket { inner } - } -} - -#[cfg(unix)] -impl IntoRawFd for TcpSocket { - fn into_raw_fd(self) -> RawFd { - self.inner.into_raw_fd() - } -} - -cfg_windows! { - impl IntoRawSocket for TcpSocket { - fn into_raw_socket(self) -> RawSocket { - self.inner.into_raw_socket() - } - } - - impl AsRawSocket for TcpSocket { - fn as_raw_socket(&self) -> RawSocket { - self.inner.as_raw_socket() - } - } - - impl AsSocket for TcpSocket { - fn as_socket(&self) -> BorrowedSocket<'_> { - unsafe { BorrowedSocket::borrow_raw(self.as_raw_socket()) } - } - } - - impl FromRawSocket for TcpSocket { - /// Converts a `RawSocket` to a `TcpStream`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_socket(socket: RawSocket) -> TcpSocket { - let inner = socket2::Socket::from_raw_socket(socket); - TcpSocket { inner } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/split_owned.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/split_owned.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/split_owned.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/split_owned.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,507 +0,0 @@ -//! `TcpStream` owned split support. -//! -//! A `TcpStream` can be split into an `OwnedReadHalf` and a `OwnedWriteHalf` -//! with the `TcpStream::into_split` method. `OwnedReadHalf` implements -//! `AsyncRead` while `OwnedWriteHalf` implements `AsyncWrite`. -//! -//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized -//! split has no associated overhead and enforces all invariants at the type -//! level. - -use crate::future::poll_fn; -use crate::io::{AsyncRead, AsyncWrite, Interest, ReadBuf, Ready}; -use crate::net::TcpStream; - -use std::error::Error; -use std::net::{Shutdown, SocketAddr}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::{fmt, io}; - -cfg_io_util! { - use bytes::BufMut; -} - -/// Owned read half of a [`TcpStream`], created by [`into_split`]. -/// -/// Reading from an `OwnedReadHalf` is usually done using the convenience methods found -/// on the [`AsyncReadExt`] trait. -/// -/// [`TcpStream`]: TcpStream -/// [`into_split`]: TcpStream::into_split() -/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt -#[derive(Debug)] -pub struct OwnedReadHalf { - inner: Arc, -} - -/// Owned write half of a [`TcpStream`], created by [`into_split`]. -/// -/// Note that in the [`AsyncWrite`] implementation of this type, [`poll_shutdown`] will -/// shut down the TCP stream in the write direction. Dropping the write half -/// will also shut down the write half of the TCP stream. -/// -/// Writing to an `OwnedWriteHalf` is usually done using the convenience methods found -/// on the [`AsyncWriteExt`] trait. -/// -/// [`TcpStream`]: TcpStream -/// [`into_split`]: TcpStream::into_split() -/// [`AsyncWrite`]: trait@crate::io::AsyncWrite -/// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown -/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt -#[derive(Debug)] -pub struct OwnedWriteHalf { - inner: Arc, - shutdown_on_drop: bool, -} - -pub(crate) fn split_owned(stream: TcpStream) -> (OwnedReadHalf, OwnedWriteHalf) { - let arc = Arc::new(stream); - let read = OwnedReadHalf { - inner: Arc::clone(&arc), - }; - let write = OwnedWriteHalf { - inner: arc, - shutdown_on_drop: true, - }; - (read, write) -} - -pub(crate) fn reunite( - read: OwnedReadHalf, - write: OwnedWriteHalf, -) -> Result { - if Arc::ptr_eq(&read.inner, &write.inner) { - write.forget(); - // This unwrap cannot fail as the api does not allow creating more than two Arcs, - // and we just dropped the other half. - Ok(Arc::try_unwrap(read.inner).expect("TcpStream: try_unwrap failed in reunite")) - } else { - Err(ReuniteError(read, write)) - } -} - -/// Error indicating that two halves were not from the same socket, and thus could -/// not be reunited. -#[derive(Debug)] -pub struct ReuniteError(pub OwnedReadHalf, pub OwnedWriteHalf); - -impl fmt::Display for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "tried to reunite halves that are not from the same socket" - ) - } -} - -impl Error for ReuniteError {} - -impl OwnedReadHalf { - /// Attempts to put the two halves of a `TcpStream` back together and - /// recover the original socket. Succeeds only if the two halves - /// originated from the same call to [`into_split`]. - /// - /// [`into_split`]: TcpStream::into_split() - pub fn reunite(self, other: OwnedWriteHalf) -> Result { - reunite(self, other) - } - - /// Attempt to receive data on the socket, without removing that data from - /// the queue, registering the current task for wakeup if data is not yet - /// available. - /// - /// Note that on multiple calls to `poll_peek` or `poll_read`, only the - /// `Waker` from the `Context` passed to the most recent call is scheduled - /// to receive a wakeup. - /// - /// See the [`TcpStream::poll_peek`] level documentation for more details. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, ReadBuf}; - /// use tokio::net::TcpStream; - /// - /// use futures::future::poll_fn; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let stream = TcpStream::connect("127.0.0.1:8000").await?; - /// let (mut read_half, _) = stream.into_split(); - /// let mut buf = [0; 10]; - /// let mut buf = ReadBuf::new(&mut buf); - /// - /// poll_fn(|cx| { - /// read_half.poll_peek(cx, &mut buf) - /// }).await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// [`TcpStream::poll_peek`]: TcpStream::poll_peek - pub fn poll_peek( - &mut self, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.inner.poll_peek(cx, buf) - } - - /// Receives data on the socket from the remote address to which it is - /// connected, without removing that data from the queue. On success, - /// returns the number of bytes peeked. - /// - /// See the [`TcpStream::peek`] level documentation for more details. - /// - /// [`TcpStream::peek`]: TcpStream::peek - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use tokio::io::AsyncReadExt; - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// let (mut read_half, _) = stream.into_split(); - /// - /// let mut b1 = [0; 10]; - /// let mut b2 = [0; 10]; - /// - /// // Peek at the data - /// let n = read_half.peek(&mut b1).await?; - /// - /// // Read the data - /// assert_eq!(n, read_half.read(&mut b2[..n]).await?); - /// assert_eq!(&b1[..n], &b2[..n]); - /// - /// Ok(()) - /// } - /// ``` - /// - /// The [`read`] method is defined on the [`AsyncReadExt`] trait. - /// - /// [`read`]: fn@crate::io::AsyncReadExt::read - /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt - pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result { - let mut buf = ReadBuf::new(buf); - poll_fn(|cx| self.poll_peek(cx, &mut buf)).await - } - - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with [`try_read()`]. It can be used instead - /// of [`readable()`] to check the returned ready set for [`Ready::READABLE`] - /// and [`Ready::READ_CLOSED`] events. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// This function is equivalent to [`TcpStream::ready`]. - /// - /// [`try_read()`]: Self::try_read - /// [`readable()`]: Self::readable - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn ready(&self, interest: Interest) -> io::Result { - self.inner.ready(interest).await - } - - /// Waits for the socket to become readable. - /// - /// This function is equivalent to `ready(Interest::READABLE)` and is usually - /// paired with `try_read()`. - /// - /// This function is also equivalent to [`TcpStream::ready`]. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn readable(&self) -> io::Result<()> { - self.inner.readable().await - } - - /// Tries to read data from the stream into the provided buffer, returning how - /// many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: - /// - /// 1. The stream's read half is closed and will no longer yield data. - /// 2. The specified buffer was 0 bytes in length. - /// - /// If the stream is not ready to read data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_read(&self, buf: &mut [u8]) -> io::Result { - self.inner.try_read(buf) - } - - /// Tries to read data from the stream into the provided buffers, returning - /// how many bytes were read. - /// - /// Data is copied to fill each buffer in order, with the final buffer - /// written to possibly being only partially filled. This method behaves - /// equivalently to a single call to [`try_read()`] with concatenated - /// buffers. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_vectored()` is non-blocking, the buffer does not have to be - /// stored by the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`try_read()`]: Self::try_read() - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result { - self.inner.try_read_vectored(bufs) - } - - cfg_io_util! { - /// Tries to read data from the stream into the provided buffer, advancing the - /// buffer's internal cursor, returning how many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_read_buf(&self, buf: &mut B) -> io::Result { - self.inner.try_read_buf(buf) - } - } - - /// Returns the remote address that this stream is connected to. - pub fn peer_addr(&self) -> io::Result { - self.inner.peer_addr() - } - - /// Returns the local address that this stream is bound to. - pub fn local_addr(&self) -> io::Result { - self.inner.local_addr() - } -} - -impl AsyncRead for OwnedReadHalf { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.inner.poll_read_priv(cx, buf) - } -} - -impl OwnedWriteHalf { - /// Attempts to put the two halves of a `TcpStream` back together and - /// recover the original socket. Succeeds only if the two halves - /// originated from the same call to [`into_split`]. - /// - /// [`into_split`]: TcpStream::into_split() - pub fn reunite(self, other: OwnedReadHalf) -> Result { - reunite(other, self) - } - - /// Destroys the write half, but don't close the write half of the stream - /// until the read half is dropped. If the read half has already been - /// dropped, this closes the stream. - pub fn forget(mut self) { - self.shutdown_on_drop = false; - drop(self); - } - - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with [`try_write()`]. It can be used instead - /// of [`writable()`] to check the returned ready set for [`Ready::WRITABLE`] - /// and [`Ready::WRITE_CLOSED`] events. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// This function is equivalent to [`TcpStream::ready`]. - /// - /// [`try_write()`]: Self::try_write - /// [`writable()`]: Self::writable - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn ready(&self, interest: Interest) -> io::Result { - self.inner.ready(interest).await - } - - /// Waits for the socket to become writable. - /// - /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually - /// paired with `try_write()`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn writable(&self) -> io::Result<()> { - self.inner.writable().await - } - - /// Tries to write a buffer to the stream, returning how many bytes were - /// written. - /// - /// The function will attempt to write the entire contents of `buf`, but - /// only part of the buffer may be written. - /// - /// This function is usually paired with `writable()`. - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_write(&self, buf: &[u8]) -> io::Result { - self.inner.try_write(buf) - } - - /// Tries to write several buffers to the stream, returning how many bytes - /// were written. - /// - /// Data is written from each buffer in order, with the final buffer read - /// from possible being only partially consumed. This method behaves - /// equivalently to a single call to [`try_write()`] with concatenated - /// buffers. - /// - /// This function is usually paired with `writable()`. - /// - /// [`try_write()`]: Self::try_write() - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_write_vectored(&self, bufs: &[io::IoSlice<'_>]) -> io::Result { - self.inner.try_write_vectored(bufs) - } - - /// Returns the remote address that this stream is connected to. - pub fn peer_addr(&self) -> io::Result { - self.inner.peer_addr() - } - - /// Returns the local address that this stream is bound to. - pub fn local_addr(&self) -> io::Result { - self.inner.local_addr() - } -} - -impl Drop for OwnedWriteHalf { - fn drop(&mut self) { - if self.shutdown_on_drop { - let _ = self.inner.shutdown_std(Shutdown::Write); - } - } -} - -impl AsyncWrite for OwnedWriteHalf { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.inner.poll_write_priv(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.inner.poll_write_vectored_priv(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - self.inner.is_write_vectored() - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // tcp flush is a no-op - Poll::Ready(Ok(())) - } - - // `poll_shutdown` on a write half shutdowns the stream in the "write" direction. - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - let res = self.inner.shutdown_std(Shutdown::Write); - if res.is_ok() { - Pin::into_inner(self).shutdown_on_drop = false; - } - res.into() - } -} - -impl AsRef for OwnedReadHalf { - fn as_ref(&self) -> &TcpStream { - &self.inner - } -} - -impl AsRef for OwnedWriteHalf { - fn as_ref(&self) -> &TcpStream { - &self.inner - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/split.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/split.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/split.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/split.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,423 +0,0 @@ -//! `TcpStream` split support. -//! -//! A `TcpStream` can be split into a `ReadHalf` and a -//! `WriteHalf` with the `TcpStream::split` method. `ReadHalf` -//! implements `AsyncRead` while `WriteHalf` implements `AsyncWrite`. -//! -//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized -//! split has no associated overhead and enforces all invariants at the type -//! level. - -use crate::future::poll_fn; -use crate::io::{AsyncRead, AsyncWrite, Interest, ReadBuf, Ready}; -use crate::net::TcpStream; - -use std::io; -use std::net::{Shutdown, SocketAddr}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - use bytes::BufMut; -} - -/// Borrowed read half of a [`TcpStream`], created by [`split`]. -/// -/// Reading from a `ReadHalf` is usually done using the convenience methods found on the -/// [`AsyncReadExt`] trait. -/// -/// [`TcpStream`]: TcpStream -/// [`split`]: TcpStream::split() -/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt -#[derive(Debug)] -pub struct ReadHalf<'a>(&'a TcpStream); - -/// Borrowed write half of a [`TcpStream`], created by [`split`]. -/// -/// Note that in the [`AsyncWrite`] implementation of this type, [`poll_shutdown`] will -/// shut down the TCP stream in the write direction. -/// -/// Writing to an `WriteHalf` is usually done using the convenience methods found -/// on the [`AsyncWriteExt`] trait. -/// -/// [`TcpStream`]: TcpStream -/// [`split`]: TcpStream::split() -/// [`AsyncWrite`]: trait@crate::io::AsyncWrite -/// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown -/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt -#[derive(Debug)] -pub struct WriteHalf<'a>(&'a TcpStream); - -pub(crate) fn split(stream: &mut TcpStream) -> (ReadHalf<'_>, WriteHalf<'_>) { - (ReadHalf(&*stream), WriteHalf(&*stream)) -} - -impl ReadHalf<'_> { - /// Attempts to receive data on the socket, without removing that data from - /// the queue, registering the current task for wakeup if data is not yet - /// available. - /// - /// Note that on multiple calls to `poll_peek` or `poll_read`, only the - /// `Waker` from the `Context` passed to the most recent call is scheduled - /// to receive a wakeup. - /// - /// See the [`TcpStream::poll_peek`] level documentation for more details. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, ReadBuf}; - /// use tokio::net::TcpStream; - /// - /// use futures::future::poll_fn; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let mut stream = TcpStream::connect("127.0.0.1:8000").await?; - /// let (mut read_half, _) = stream.split(); - /// let mut buf = [0; 10]; - /// let mut buf = ReadBuf::new(&mut buf); - /// - /// poll_fn(|cx| { - /// read_half.poll_peek(cx, &mut buf) - /// }).await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// [`TcpStream::poll_peek`]: TcpStream::poll_peek - pub fn poll_peek( - &mut self, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.0.poll_peek(cx, buf) - } - - /// Receives data on the socket from the remote address to which it is - /// connected, without removing that data from the queue. On success, - /// returns the number of bytes peeked. - /// - /// See the [`TcpStream::peek`] level documentation for more details. - /// - /// [`TcpStream::peek`]: TcpStream::peek - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use tokio::io::AsyncReadExt; - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; - /// let (mut read_half, _) = stream.split(); - /// - /// let mut b1 = [0; 10]; - /// let mut b2 = [0; 10]; - /// - /// // Peek at the data - /// let n = read_half.peek(&mut b1).await?; - /// - /// // Read the data - /// assert_eq!(n, read_half.read(&mut b2[..n]).await?); - /// assert_eq!(&b1[..n], &b2[..n]); - /// - /// Ok(()) - /// } - /// ``` - /// - /// The [`read`] method is defined on the [`AsyncReadExt`] trait. - /// - /// [`read`]: fn@crate::io::AsyncReadExt::read - /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt - pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result { - let mut buf = ReadBuf::new(buf); - poll_fn(|cx| self.poll_peek(cx, &mut buf)).await - } - - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with [`try_read()`]. It can be used instead - /// of [`readable()`] to check the returned ready set for [`Ready::READABLE`] - /// and [`Ready::READ_CLOSED`] events. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// This function is equivalent to [`TcpStream::ready`]. - /// - /// [`try_read()`]: Self::try_read - /// [`readable()`]: Self::readable - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn ready(&self, interest: Interest) -> io::Result { - self.0.ready(interest).await - } - - /// Waits for the socket to become readable. - /// - /// This function is equivalent to `ready(Interest::READABLE)` and is usually - /// paired with `try_read()`. - /// - /// This function is also equivalent to [`TcpStream::ready`]. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn readable(&self) -> io::Result<()> { - self.0.readable().await - } - - /// Tries to read data from the stream into the provided buffer, returning how - /// many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: - /// - /// 1. The stream's read half is closed and will no longer yield data. - /// 2. The specified buffer was 0 bytes in length. - /// - /// If the stream is not ready to read data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_read(&self, buf: &mut [u8]) -> io::Result { - self.0.try_read(buf) - } - - /// Tries to read data from the stream into the provided buffers, returning - /// how many bytes were read. - /// - /// Data is copied to fill each buffer in order, with the final buffer - /// written to possibly being only partially filled. This method behaves - /// equivalently to a single call to [`try_read()`] with concatenated - /// buffers. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_vectored()` is non-blocking, the buffer does not have to be - /// stored by the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`try_read()`]: Self::try_read() - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result { - self.0.try_read_vectored(bufs) - } - - cfg_io_util! { - /// Tries to read data from the stream into the provided buffer, advancing the - /// buffer's internal cursor, returning how many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_read_buf(&self, buf: &mut B) -> io::Result { - self.0.try_read_buf(buf) - } - } - - /// Returns the remote address that this stream is connected to. - pub fn peer_addr(&self) -> io::Result { - self.0.peer_addr() - } - - /// Returns the local address that this stream is bound to. - pub fn local_addr(&self) -> io::Result { - self.0.local_addr() - } -} - -impl WriteHalf<'_> { - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with [`try_write()`]. It can be used instead - /// of [`writable()`] to check the returned ready set for [`Ready::WRITABLE`] - /// and [`Ready::WRITE_CLOSED`] events. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// This function is equivalent to [`TcpStream::ready`]. - /// - /// [`try_write()`]: Self::try_write - /// [`writable()`]: Self::writable - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn ready(&self, interest: Interest) -> io::Result { - self.0.ready(interest).await - } - - /// Waits for the socket to become writable. - /// - /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually - /// paired with `try_write()`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn writable(&self) -> io::Result<()> { - self.0.writable().await - } - - /// Tries to write a buffer to the stream, returning how many bytes were - /// written. - /// - /// The function will attempt to write the entire contents of `buf`, but - /// only part of the buffer may be written. - /// - /// This function is usually paired with `writable()`. - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_write(&self, buf: &[u8]) -> io::Result { - self.0.try_write(buf) - } - - /// Tries to write several buffers to the stream, returning how many bytes - /// were written. - /// - /// Data is written from each buffer in order, with the final buffer read - /// from possible being only partially consumed. This method behaves - /// equivalently to a single call to [`try_write()`] with concatenated - /// buffers. - /// - /// This function is usually paired with `writable()`. - /// - /// [`try_write()`]: Self::try_write() - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_write_vectored(&self, bufs: &[io::IoSlice<'_>]) -> io::Result { - self.0.try_write_vectored(bufs) - } - - /// Returns the remote address that this stream is connected to. - pub fn peer_addr(&self) -> io::Result { - self.0.peer_addr() - } - - /// Returns the local address that this stream is bound to. - pub fn local_addr(&self) -> io::Result { - self.0.local_addr() - } -} - -impl AsyncRead for ReadHalf<'_> { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.0.poll_read_priv(cx, buf) - } -} - -impl AsyncWrite for WriteHalf<'_> { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.0.poll_write_priv(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.0.poll_write_vectored_priv(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - self.0.is_write_vectored() - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // tcp flush is a no-op - Poll::Ready(Ok(())) - } - - // `poll_shutdown` on a write half shutdowns the stream in the "write" direction. - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.0.shutdown_std(Shutdown::Write).into() - } -} - -impl AsRef for ReadHalf<'_> { - fn as_ref(&self) -> &TcpStream { - self.0 - } -} - -impl AsRef for WriteHalf<'_> { - fn as_ref(&self) -> &TcpStream { - self.0 - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/stream.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/stream.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/tcp/stream.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/tcp/stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1420 +0,0 @@ -cfg_not_wasi! { - use crate::future::poll_fn; - use crate::net::{to_socket_addrs, ToSocketAddrs}; - use std::time::Duration; -} - -use crate::io::{AsyncRead, AsyncWrite, Interest, PollEvented, ReadBuf, Ready}; -use crate::net::tcp::split::{split, ReadHalf, WriteHalf}; -use crate::net::tcp::split_owned::{split_owned, OwnedReadHalf, OwnedWriteHalf}; - -use std::fmt; -use std::io; -use std::net::{Shutdown, SocketAddr}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - use bytes::BufMut; -} - -cfg_net! { - /// A TCP stream between a local and a remote socket. - /// - /// A TCP stream can either be created by connecting to an endpoint, via the - /// [`connect`] method, or by [accepting] a connection from a [listener]. A - /// TCP stream can also be created via the [`TcpSocket`] type. - /// - /// Reading and writing to a `TcpStream` is usually done using the - /// convenience methods found on the [`AsyncReadExt`] and [`AsyncWriteExt`] - /// traits. - /// - /// [`connect`]: method@TcpStream::connect - /// [accepting]: method@crate::net::TcpListener::accept - /// [listener]: struct@crate::net::TcpListener - /// [`TcpSocket`]: struct@crate::net::TcpSocket - /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use tokio::io::AsyncWriteExt; - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// // Write some data. - /// stream.write_all(b"hello world!").await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - /// - /// To shut down the stream in the write direction, you can call the - /// [`shutdown()`] method. This will cause the other peer to receive a read of - /// length 0, indicating that no more data will be sent. This only closes - /// the stream in one direction. - /// - /// [`shutdown()`]: fn@crate::io::AsyncWriteExt::shutdown - pub struct TcpStream { - io: PollEvented, - } -} - -impl TcpStream { - cfg_not_wasi! { - /// Opens a TCP connection to a remote host. - /// - /// `addr` is an address of the remote host. Anything which implements the - /// [`ToSocketAddrs`] trait can be supplied as the address. If `addr` - /// yields multiple addresses, connect will be attempted with each of the - /// addresses until a connection is successful. If none of the addresses - /// result in a successful connection, the error returned from the last - /// connection attempt (the last address) is returned. - /// - /// To configure the socket before connecting, you can use the [`TcpSocket`] - /// type. - /// - /// [`ToSocketAddrs`]: trait@crate::net::ToSocketAddrs - /// [`TcpSocket`]: struct@crate::net::TcpSocket - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use tokio::io::AsyncWriteExt; - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// // Write some data. - /// stream.write_all(b"hello world!").await?; - /// - /// Ok(()) - /// } - /// ``` - /// - /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. - /// - /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all - /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt - pub async fn connect(addr: A) -> io::Result { - let addrs = to_socket_addrs(addr).await?; - - let mut last_err = None; - - for addr in addrs { - match TcpStream::connect_addr(addr).await { - Ok(stream) => return Ok(stream), - Err(e) => last_err = Some(e), - } - } - - Err(last_err.unwrap_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "could not resolve to any address", - ) - })) - } - - /// Establishes a connection to the specified `addr`. - async fn connect_addr(addr: SocketAddr) -> io::Result { - let sys = mio::net::TcpStream::connect(addr)?; - TcpStream::connect_mio(sys).await - } - - pub(crate) async fn connect_mio(sys: mio::net::TcpStream) -> io::Result { - let stream = TcpStream::new(sys)?; - - // Once we've connected, wait for the stream to be writable as - // that's when the actual connection has been initiated. Once we're - // writable we check for `take_socket_error` to see if the connect - // actually hit an error or not. - // - // If all that succeeded then we ship everything on up. - poll_fn(|cx| stream.io.registration().poll_write_ready(cx)).await?; - - if let Some(e) = stream.io.take_error()? { - return Err(e); - } - - Ok(stream) - } - } - - pub(crate) fn new(connected: mio::net::TcpStream) -> io::Result { - let io = PollEvented::new(connected)?; - Ok(TcpStream { io }) - } - - /// Creates new `TcpStream` from a `std::net::TcpStream`. - /// - /// This function is intended to be used to wrap a TCP stream from the - /// standard library in the Tokio equivalent. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the stream is in - /// non-blocking mode. Otherwise all I/O operations on the stream - /// will block the thread, which will cause unexpected behavior. - /// Non-blocking mode can be set using [`set_nonblocking`]. - /// - /// [`set_nonblocking`]: std::net::TcpStream::set_nonblocking - /// - /// # Examples - /// - /// ```rust,no_run - /// use std::error::Error; - /// use tokio::net::TcpStream; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let std_stream = std::net::TcpStream::connect("127.0.0.1:34254")?; - /// std_stream.set_nonblocking(true)?; - /// let stream = TcpStream::from_std(std_stream)?; - /// Ok(()) - /// } - /// ``` - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - #[track_caller] - pub fn from_std(stream: std::net::TcpStream) -> io::Result { - let io = mio::net::TcpStream::from_std(stream); - let io = PollEvented::new(io)?; - Ok(TcpStream { io }) - } - - /// Turns a [`tokio::net::TcpStream`] into a [`std::net::TcpStream`]. - /// - /// The returned [`std::net::TcpStream`] will have nonblocking mode set as `true`. - /// Use [`set_nonblocking`] to change the blocking mode if needed. - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::io::Read; - /// use tokio::net::TcpListener; - /// # use tokio::net::TcpStream; - /// # use tokio::io::AsyncWriteExt; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let mut data = [0u8; 12]; - /// let listener = TcpListener::bind("127.0.0.1:34254").await?; - /// # let handle = tokio::spawn(async { - /// # let mut stream: TcpStream = TcpStream::connect("127.0.0.1:34254").await.unwrap(); - /// # stream.write(b"Hello world!").await.unwrap(); - /// # }); - /// let (tokio_tcp_stream, _) = listener.accept().await?; - /// let mut std_tcp_stream = tokio_tcp_stream.into_std()?; - /// # handle.await.expect("The task being joined has panicked"); - /// std_tcp_stream.set_nonblocking(false)?; - /// std_tcp_stream.read_exact(&mut data)?; - /// # assert_eq!(b"Hello world!", &data); - /// Ok(()) - /// } - /// ``` - /// [`tokio::net::TcpStream`]: TcpStream - /// [`std::net::TcpStream`]: std::net::TcpStream - /// [`set_nonblocking`]: fn@std::net::TcpStream::set_nonblocking - pub fn into_std(self) -> io::Result { - #[cfg(unix)] - { - use std::os::unix::io::{FromRawFd, IntoRawFd}; - self.io - .into_inner() - .map(|io| io.into_raw_fd()) - .map(|raw_fd| unsafe { std::net::TcpStream::from_raw_fd(raw_fd) }) - } - - #[cfg(windows)] - { - use std::os::windows::io::{FromRawSocket, IntoRawSocket}; - self.io - .into_inner() - .map(|io| io.into_raw_socket()) - .map(|raw_socket| unsafe { std::net::TcpStream::from_raw_socket(raw_socket) }) - } - - #[cfg(target_os = "wasi")] - { - use std::os::wasi::io::{FromRawFd, IntoRawFd}; - self.io - .into_inner() - .map(|io| io.into_raw_fd()) - .map(|raw_fd| unsafe { std::net::TcpStream::from_raw_fd(raw_fd) }) - } - } - - /// Returns the local address that this stream is bound to. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.local_addr()?); - /// # Ok(()) - /// # } - /// ``` - pub fn local_addr(&self) -> io::Result { - self.io.local_addr() - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.io.take_error() - } - - /// Returns the remote address that this stream is connected to. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.peer_addr()?); - /// # Ok(()) - /// # } - /// ``` - pub fn peer_addr(&self) -> io::Result { - self.io.peer_addr() - } - - /// Attempts to receive data on the socket, without removing that data from - /// the queue, registering the current task for wakeup if data is not yet - /// available. - /// - /// Note that on multiple calls to `poll_peek`, `poll_read` or - /// `poll_read_ready`, only the `Waker` from the `Context` passed to the - /// most recent call is scheduled to receive a wakeup. (However, - /// `poll_write` retains a second, independent waker.) - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if data is not yet available. - /// * `Poll::Ready(Ok(n))` if data is available. `n` is the number of bytes peeked. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io::{self, ReadBuf}; - /// use tokio::net::TcpStream; - /// - /// use futures::future::poll_fn; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let stream = TcpStream::connect("127.0.0.1:8000").await?; - /// let mut buf = [0; 10]; - /// let mut buf = ReadBuf::new(&mut buf); - /// - /// poll_fn(|cx| { - /// stream.poll_peek(cx, &mut buf) - /// }).await?; - /// - /// Ok(()) - /// } - /// ``` - pub fn poll_peek( - &self, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - loop { - let ev = ready!(self.io.registration().poll_read_ready(cx))?; - - let b = unsafe { - &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]) - }; - - match self.io.peek(b) { - Ok(ret) => { - unsafe { buf.assume_init(ret) }; - buf.advance(ret); - return Poll::Ready(Ok(ret)); - } - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.registration().clear_readiness(ev); - } - Err(e) => return Poll::Ready(Err(e)), - } - } - } - - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with `try_read()` or `try_write()`. It - /// can be used to concurrently read / write to the same socket on a single - /// task without splitting the socket. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// Concurrently read and write to the stream on the same task without - /// splitting. - /// - /// ```no_run - /// use tokio::io::Interest; - /// use tokio::net::TcpStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// loop { - /// let ready = stream.ready(Interest::READABLE | Interest::WRITABLE).await?; - /// - /// if ready.is_readable() { - /// let mut data = vec![0; 1024]; - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_read(&mut data) { - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// - /// } - /// - /// if ready.is_writable() { - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_write(b"hello world") { - /// Ok(n) => { - /// println!("write {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// } - /// } - /// ``` - pub async fn ready(&self, interest: Interest) -> io::Result { - let event = self.io.registration().readiness(interest).await?; - Ok(event.ready) - } - - /// Waits for the socket to become readable. - /// - /// This function is equivalent to `ready(Interest::READABLE)` and is usually - /// paired with `try_read()`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// let mut msg = vec![0; 1024]; - /// - /// loop { - /// // Wait for the socket to be readable - /// stream.readable().await?; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_read(&mut msg) { - /// Ok(n) => { - /// msg.truncate(n); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// println!("GOT = {:?}", msg); - /// Ok(()) - /// } - /// ``` - pub async fn readable(&self) -> io::Result<()> { - self.ready(Interest::READABLE).await?; - Ok(()) - } - - /// Polls for read readiness. - /// - /// If the tcp stream is not currently ready for reading, this method will - /// store a clone of the `Waker` from the provided `Context`. When the tcp - /// stream becomes ready for reading, `Waker::wake` will be called on the - /// waker. - /// - /// Note that on multiple calls to `poll_read_ready`, `poll_read` or - /// `poll_peek`, only the `Waker` from the `Context` passed to the most - /// recent call is scheduled to receive a wakeup. (However, - /// `poll_write_ready` retains a second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`readable`] is not feasible. Where possible, using [`readable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the tcp stream is not ready for reading. - /// * `Poll::Ready(Ok(()))` if the tcp stream is ready for reading. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`readable`]: method@Self::readable - pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_read_ready(cx).map_ok(|_| ()) - } - - /// Tries to read data from the stream into the provided buffer, returning how - /// many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: TcpStream::readable() - /// [`ready()`]: TcpStream::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: - /// - /// 1. The stream's read half is closed and will no longer yield data. - /// 2. The specified buffer was 0 bytes in length. - /// - /// If the stream is not ready to read data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// loop { - /// // Wait for the socket to be readable - /// stream.readable().await?; - /// - /// // Creating the buffer **after** the `await` prevents it from - /// // being stored in the async task. - /// let mut buf = [0; 4096]; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_read(&mut buf) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read(&self, buf: &mut [u8]) -> io::Result { - use std::io::Read; - - self.io - .registration() - .try_io(Interest::READABLE, || (&*self.io).read(buf)) - } - - /// Tries to read data from the stream into the provided buffers, returning - /// how many bytes were read. - /// - /// Data is copied to fill each buffer in order, with the final buffer - /// written to possibly being only partially filled. This method behaves - /// equivalently to a single call to [`try_read()`] with concatenated - /// buffers. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_vectored()` is non-blocking, the buffer does not have to be - /// stored by the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`try_read()`]: TcpStream::try_read() - /// [`readable()`]: TcpStream::readable() - /// [`ready()`]: TcpStream::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use std::error::Error; - /// use std::io::{self, IoSliceMut}; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// loop { - /// // Wait for the socket to be readable - /// stream.readable().await?; - /// - /// // Creating the buffer **after** the `await` prevents it from - /// // being stored in the async task. - /// let mut buf_a = [0; 512]; - /// let mut buf_b = [0; 1024]; - /// let mut bufs = [ - /// IoSliceMut::new(&mut buf_a), - /// IoSliceMut::new(&mut buf_b), - /// ]; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_read_vectored(&mut bufs) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result { - use std::io::Read; - - self.io - .registration() - .try_io(Interest::READABLE, || (&*self.io).read_vectored(bufs)) - } - - cfg_io_util! { - /// Tries to read data from the stream into the provided buffer, advancing the - /// buffer's internal cursor, returning how many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: TcpStream::readable() - /// [`ready()`]: TcpStream::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// loop { - /// // Wait for the socket to be readable - /// stream.readable().await?; - /// - /// let mut buf = Vec::with_capacity(4096); - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_read_buf(&mut buf) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read_buf(&self, buf: &mut B) -> io::Result { - self.io.registration().try_io(Interest::READABLE, || { - use std::io::Read; - - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - // Safety: We trust `TcpStream::read` to have filled up `n` bytes in the - // buffer. - let n = (&*self.io).read(dst)?; - - unsafe { - buf.advance_mut(n); - } - - Ok(n) - }) - } - } - - /// Waits for the socket to become writable. - /// - /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually - /// paired with `try_write()`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to write that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// loop { - /// // Wait for the socket to be writable - /// stream.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_write(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn writable(&self) -> io::Result<()> { - self.ready(Interest::WRITABLE).await?; - Ok(()) - } - - /// Polls for write readiness. - /// - /// If the tcp stream is not currently ready for writing, this method will - /// store a clone of the `Waker` from the provided `Context`. When the tcp - /// stream becomes ready for writing, `Waker::wake` will be called on the - /// waker. - /// - /// Note that on multiple calls to `poll_write_ready` or `poll_write`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. (However, `poll_read_ready` retains a - /// second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`writable`] is not feasible. Where possible, using [`writable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the tcp stream is not ready for writing. - /// * `Poll::Ready(Ok(()))` if the tcp stream is ready for writing. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`writable`]: method@Self::writable - pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_write_ready(cx).map_ok(|_| ()) - } - - /// Try to write a buffer to the stream, returning how many bytes were - /// written. - /// - /// The function will attempt to write the entire contents of `buf`, but - /// only part of the buffer may be written. - /// - /// This function is usually paired with `writable()`. - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// loop { - /// // Wait for the socket to be writable - /// stream.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_write(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_write(&self, buf: &[u8]) -> io::Result { - use std::io::Write; - - self.io - .registration() - .try_io(Interest::WRITABLE, || (&*self.io).write(buf)) - } - - /// Tries to write several buffers to the stream, returning how many bytes - /// were written. - /// - /// Data is written from each buffer in order, with the final buffer read - /// from possible being only partially consumed. This method behaves - /// equivalently to a single call to [`try_write()`] with concatenated - /// buffers. - /// - /// This function is usually paired with `writable()`. - /// - /// [`try_write()`]: TcpStream::try_write() - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// let bufs = [io::IoSlice::new(b"hello "), io::IoSlice::new(b"world")]; - /// - /// loop { - /// // Wait for the socket to be writable - /// stream.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_write_vectored(&bufs) { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_write_vectored(&self, bufs: &[io::IoSlice<'_>]) -> io::Result { - use std::io::Write; - - self.io - .registration() - .try_io(Interest::WRITABLE, || (&*self.io).write_vectored(bufs)) - } - - /// Tries to read or write from the socket using a user-provided IO operation. - /// - /// If the socket is ready, the provided closure is called. The closure - /// should attempt to perform IO operation on the socket by manually - /// calling the appropriate syscall. If the operation fails because the - /// socket is not actually ready, then the closure should return a - /// `WouldBlock` error and the readiness flag is cleared. The return value - /// of the closure is then returned by `try_io`. - /// - /// If the socket is not ready, then the closure is not called - /// and a `WouldBlock` error is returned. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the socket that failed due to the socket not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the socket to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio `TcpStream` type, as this will mess with the - /// readiness flag and can cause the socket to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - /// - /// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: TcpStream::readable() - /// [`writable()`]: TcpStream::writable() - /// [`ready()`]: TcpStream::ready() - pub fn try_io( - &self, - interest: Interest, - f: impl FnOnce() -> io::Result, - ) -> io::Result { - self.io - .registration() - .try_io(interest, || self.io.try_io(f)) - } - - /// Reads or writes from the socket using a user-provided IO operation. - /// - /// The readiness of the socket is awaited and when the socket is ready, - /// the provided closure is called. The closure should attempt to perform - /// IO operation on the socket by manually calling the appropriate syscall. - /// If the operation fails because the socket is not actually ready, - /// then the closure should return a `WouldBlock` error. In such case the - /// readiness flag is cleared and the socket readiness is awaited again. - /// This loop is repeated until the closure returns an `Ok` or an error - /// other than `WouldBlock`. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the socket that failed due to the socket not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the socket to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio `TcpStream` type, as this will mess with the - /// readiness flag and can cause the socket to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - pub async fn async_io( - &self, - interest: Interest, - mut f: impl FnMut() -> io::Result, - ) -> io::Result { - self.io - .registration() - .async_io(interest, || self.io.try_io(&mut f)) - .await - } - - /// Receives data on the socket from the remote address to which it is - /// connected, without removing that data from the queue. On success, - /// returns the number of bytes peeked. - /// - /// Successive calls return the same data. This is accomplished by passing - /// `MSG_PEEK` as a flag to the underlying recv system call. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// use tokio::io::AsyncReadExt; - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// let mut b1 = [0; 10]; - /// let mut b2 = [0; 10]; - /// - /// // Peek at the data - /// let n = stream.peek(&mut b1).await?; - /// - /// // Read the data - /// assert_eq!(n, stream.read(&mut b2[..n]).await?); - /// assert_eq!(&b1[..n], &b2[..n]); - /// - /// Ok(()) - /// } - /// ``` - /// - /// The [`read`] method is defined on the [`AsyncReadExt`] trait. - /// - /// [`read`]: fn@crate::io::AsyncReadExt::read - /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt - pub async fn peek(&self, buf: &mut [u8]) -> io::Result { - self.io - .registration() - .async_io(Interest::READABLE, || self.io.peek(buf)) - .await - } - - /// Shuts down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O on the specified - /// portions to return immediately with an appropriate value (see the - /// documentation of `Shutdown`). - pub(super) fn shutdown_std(&self, how: Shutdown) -> io::Result<()> { - self.io.shutdown(how) - } - - /// Gets the value of the `TCP_NODELAY` option on this socket. - /// - /// For more information about this option, see [`set_nodelay`]. - /// - /// [`set_nodelay`]: TcpStream::set_nodelay - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.nodelay()?); - /// # Ok(()) - /// # } - /// ``` - pub fn nodelay(&self) -> io::Result { - self.io.nodelay() - } - - /// Sets the value of the `TCP_NODELAY` option on this socket. - /// - /// If set, this option disables the Nagle algorithm. This means that - /// segments are always sent as soon as possible, even if there is only a - /// small amount of data. When not set, data is buffered until there is a - /// sufficient amount to send out, thereby avoiding the frequent sending of - /// small packets. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// stream.set_nodelay(true)?; - /// # Ok(()) - /// # } - /// ``` - pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { - self.io.set_nodelay(nodelay) - } - - cfg_not_wasi! { - /// Reads the linger duration for this socket by getting the `SO_LINGER` - /// option. - /// - /// For more information about this option, see [`set_linger`]. - /// - /// [`set_linger`]: TcpStream::set_linger - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.linger()?); - /// # Ok(()) - /// # } - /// ``` - pub fn linger(&self) -> io::Result> { - socket2::SockRef::from(self).linger() - } - - /// Sets the linger duration of this socket by setting the SO_LINGER option. - /// - /// This option controls the action taken when a stream has unsent messages and the stream is - /// closed. If SO_LINGER is set, the system shall block the process until it can transmit the - /// data or until the time expires. - /// - /// If SO_LINGER is not specified, and the stream is closed, the system handles the call in a - /// way that allows the process to continue as quickly as possible. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// stream.set_linger(None)?; - /// # Ok(()) - /// # } - /// ``` - pub fn set_linger(&self, dur: Option) -> io::Result<()> { - socket2::SockRef::from(self).set_linger(dur) - } - } - - /// Gets the value of the `IP_TTL` option for this socket. - /// - /// For more information about this option, see [`set_ttl`]. - /// - /// [`set_ttl`]: TcpStream::set_ttl - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// println!("{:?}", stream.ttl()?); - /// # Ok(()) - /// # } - /// ``` - pub fn ttl(&self) -> io::Result { - self.io.ttl() - } - - /// Sets the value for the `IP_TTL` option on this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::TcpStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let stream = TcpStream::connect("127.0.0.1:8080").await?; - /// - /// stream.set_ttl(123)?; - /// # Ok(()) - /// # } - /// ``` - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - self.io.set_ttl(ttl) - } - - // These lifetime markers also appear in the generated documentation, and make - // it more clear that this is a *borrowed* split. - #[allow(clippy::needless_lifetimes)] - /// Splits a `TcpStream` into a read half and a write half, which can be used - /// to read and write the stream concurrently. - /// - /// This method is more efficient than [`into_split`], but the halves cannot be - /// moved into independently spawned tasks. - /// - /// [`into_split`]: TcpStream::into_split() - pub fn split<'a>(&'a mut self) -> (ReadHalf<'a>, WriteHalf<'a>) { - split(self) - } - - /// Splits a `TcpStream` into a read half and a write half, which can be used - /// to read and write the stream concurrently. - /// - /// Unlike [`split`], the owned halves can be moved to separate tasks, however - /// this comes at the cost of a heap allocation. - /// - /// **Note:** Dropping the write half will shut down the write half of the TCP - /// stream. This is equivalent to calling [`shutdown()`] on the `TcpStream`. - /// - /// [`split`]: TcpStream::split() - /// [`shutdown()`]: fn@crate::io::AsyncWriteExt::shutdown - pub fn into_split(self) -> (OwnedReadHalf, OwnedWriteHalf) { - split_owned(self) - } - - // == Poll IO functions that takes `&self` == - // - // To read or write without mutable access to the `UnixStream`, combine the - // `poll_read_ready` or `poll_write_ready` methods with the `try_read` or - // `try_write` methods. - - pub(crate) fn poll_read_priv( - &self, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - // Safety: `TcpStream::read` correctly handles reads into uninitialized memory - unsafe { self.io.poll_read(cx, buf) } - } - - pub(super) fn poll_write_priv( - &self, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.io.poll_write(cx, buf) - } - - pub(super) fn poll_write_vectored_priv( - &self, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.io.poll_write_vectored(cx, bufs) - } -} - -impl TryFrom for TcpStream { - type Error = io::Error; - - /// Consumes stream, returning the tokio I/O object. - /// - /// This is equivalent to - /// [`TcpStream::from_std(stream)`](TcpStream::from_std). - fn try_from(stream: std::net::TcpStream) -> Result { - Self::from_std(stream) - } -} - -// ===== impl Read / Write ===== - -impl AsyncRead for TcpStream { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.poll_read_priv(cx, buf) - } -} - -impl AsyncWrite for TcpStream { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.poll_write_priv(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.poll_write_vectored_priv(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - true - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // tcp flush is a no-op - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.shutdown_std(std::net::Shutdown::Write)?; - Poll::Ready(Ok(())) - } -} - -impl fmt::Debug for TcpStream { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.fmt(f) - } -} - -#[cfg(unix)] -mod sys { - use super::TcpStream; - use std::os::unix::prelude::*; - - impl AsRawFd for TcpStream { - fn as_raw_fd(&self) -> RawFd { - self.io.as_raw_fd() - } - } - - impl AsFd for TcpStream { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } - } -} - -cfg_windows! { - use crate::os::windows::io::{AsRawSocket, RawSocket, AsSocket, BorrowedSocket}; - - impl AsRawSocket for TcpStream { - fn as_raw_socket(&self) -> RawSocket { - self.io.as_raw_socket() - } - } - - impl AsSocket for TcpStream { - fn as_socket(&self) -> BorrowedSocket<'_> { - unsafe { BorrowedSocket::borrow_raw(self.as_raw_socket()) } - } - } -} - -#[cfg(all(tokio_unstable, target_os = "wasi"))] -mod sys { - use super::TcpStream; - use std::os::wasi::prelude::*; - - impl AsRawFd for TcpStream { - fn as_raw_fd(&self) -> RawFd { - self.io.as_raw_fd() - } - } - - impl AsFd for TcpStream { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/udp.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/udp.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/udp.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/udp.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2046 +0,0 @@ -use crate::io::{Interest, PollEvented, ReadBuf, Ready}; -use crate::net::{to_socket_addrs, ToSocketAddrs}; - -use std::fmt; -use std::io; -use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::task::{Context, Poll}; - -cfg_io_util! { - use bytes::BufMut; -} - -cfg_net! { - /// A UDP socket. - /// - /// UDP is "connectionless", unlike TCP. Meaning, regardless of what address you've bound to, a `UdpSocket` - /// is free to communicate with many different remotes. In tokio there are basically two main ways to use `UdpSocket`: - /// - /// * one to many: [`bind`](`UdpSocket::bind`) and use [`send_to`](`UdpSocket::send_to`) - /// and [`recv_from`](`UdpSocket::recv_from`) to communicate with many different addresses - /// * one to one: [`connect`](`UdpSocket::connect`) and associate with a single address, using [`send`](`UdpSocket::send`) - /// and [`recv`](`UdpSocket::recv`) to communicate only with that remote address - /// - /// This type does not provide a `split` method, because this functionality - /// can be achieved by instead wrapping the socket in an [`Arc`]. Note that - /// you do not need a `Mutex` to share the `UdpSocket` — an `Arc` - /// is enough. This is because all of the methods take `&self` instead of - /// `&mut self`. Once you have wrapped it in an `Arc`, you can call - /// `.clone()` on the `Arc` to get multiple shared handles to the - /// same socket. An example of such usage can be found further down. - /// - /// [`Arc`]: std::sync::Arc - /// - /// # Streams - /// - /// If you need to listen over UDP and produce a [`Stream`], you can look - /// at [`UdpFramed`]. - /// - /// [`UdpFramed`]: https://docs.rs/tokio-util/latest/tokio_util/udp/struct.UdpFramed.html - /// [`Stream`]: https://docs.rs/futures/0.3/futures/stream/trait.Stream.html - /// - /// # Example: one to many (bind) - /// - /// Using `bind` we can create a simple echo server that sends and recv's with many different clients: - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let sock = UdpSocket::bind("0.0.0.0:8080").await?; - /// let mut buf = [0; 1024]; - /// loop { - /// let (len, addr) = sock.recv_from(&mut buf).await?; - /// println!("{:?} bytes received from {:?}", len, addr); - /// - /// let len = sock.send_to(&buf[..len], addr).await?; - /// println!("{:?} bytes sent", len); - /// } - /// } - /// ``` - /// - /// # Example: one to one (connect) - /// - /// Or using `connect` we can echo with a single remote address using `send` and `recv`: - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let sock = UdpSocket::bind("0.0.0.0:8080").await?; - /// - /// let remote_addr = "127.0.0.1:59611"; - /// sock.connect(remote_addr).await?; - /// let mut buf = [0; 1024]; - /// loop { - /// let len = sock.recv(&mut buf).await?; - /// println!("{:?} bytes received from {:?}", len, remote_addr); - /// - /// let len = sock.send(&buf[..len]).await?; - /// println!("{:?} bytes sent", len); - /// } - /// } - /// ``` - /// - /// # Example: Splitting with `Arc` - /// - /// Because `send_to` and `recv_from` take `&self`. It's perfectly alright - /// to use an `Arc` and share the references to multiple tasks. - /// Here is a similar "echo" example that supports concurrent - /// sending/receiving: - /// - /// ```no_run - /// use tokio::{net::UdpSocket, sync::mpsc}; - /// use std::{io, net::SocketAddr, sync::Arc}; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let sock = UdpSocket::bind("0.0.0.0:8080".parse::().unwrap()).await?; - /// let r = Arc::new(sock); - /// let s = r.clone(); - /// let (tx, mut rx) = mpsc::channel::<(Vec, SocketAddr)>(1_000); - /// - /// tokio::spawn(async move { - /// while let Some((bytes, addr)) = rx.recv().await { - /// let len = s.send_to(&bytes, &addr).await.unwrap(); - /// println!("{:?} bytes sent", len); - /// } - /// }); - /// - /// let mut buf = [0; 1024]; - /// loop { - /// let (len, addr) = r.recv_from(&mut buf).await?; - /// println!("{:?} bytes received from {:?}", len, addr); - /// tx.send((buf[..len].to_vec(), addr)).await.unwrap(); - /// } - /// } - /// ``` - /// - pub struct UdpSocket { - io: PollEvented, - } -} - -impl UdpSocket { - /// This function will create a new UDP socket and attempt to bind it to - /// the `addr` provided. - /// - /// Binding with a port number of 0 will request that the OS assigns a port - /// to this listener. The port allocated can be queried via the `local_addr` - /// method. - /// - /// # Example - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let sock = UdpSocket::bind("0.0.0.0:8080").await?; - /// // use `sock` - /// # let _ = sock; - /// Ok(()) - /// } - /// ``` - pub async fn bind(addr: A) -> io::Result { - let addrs = to_socket_addrs(addr).await?; - let mut last_err = None; - - for addr in addrs { - match UdpSocket::bind_addr(addr) { - Ok(socket) => return Ok(socket), - Err(e) => last_err = Some(e), - } - } - - Err(last_err.unwrap_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "could not resolve to any address", - ) - })) - } - - fn bind_addr(addr: SocketAddr) -> io::Result { - let sys = mio::net::UdpSocket::bind(addr)?; - UdpSocket::new(sys) - } - - #[track_caller] - fn new(socket: mio::net::UdpSocket) -> io::Result { - let io = PollEvented::new(socket)?; - Ok(UdpSocket { io }) - } - - /// Creates new `UdpSocket` from a previously bound `std::net::UdpSocket`. - /// - /// This function is intended to be used to wrap a UDP socket from the - /// standard library in the Tokio equivalent. - /// - /// This can be used in conjunction with socket2's `Socket` interface to - /// configure a socket before it's handed off, such as setting options like - /// `reuse_address` or binding to multiple addresses. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. Otherwise all I/O operations on the socket - /// will block the thread, which will cause unexpected behavior. - /// Non-blocking mode can be set using [`set_nonblocking`]. - /// - /// [`set_nonblocking`]: std::net::UdpSocket::set_nonblocking - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - /// - /// # Example - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// # use std::{io, net::SocketAddr}; - /// - /// # #[tokio::main] - /// # async fn main() -> io::Result<()> { - /// let addr = "0.0.0.0:8080".parse::().unwrap(); - /// let std_sock = std::net::UdpSocket::bind(addr)?; - /// std_sock.set_nonblocking(true)?; - /// let sock = UdpSocket::from_std(std_sock)?; - /// // use `sock` - /// # Ok(()) - /// # } - /// ``` - #[track_caller] - pub fn from_std(socket: net::UdpSocket) -> io::Result { - let io = mio::net::UdpSocket::from_std(socket); - UdpSocket::new(io) - } - - /// Turns a [`tokio::net::UdpSocket`] into a [`std::net::UdpSocket`]. - /// - /// The returned [`std::net::UdpSocket`] will have nonblocking mode set as - /// `true`. Use [`set_nonblocking`] to change the blocking mode if needed. - /// - /// # Examples - /// - /// ```rust,no_run - /// use std::error::Error; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let tokio_socket = tokio::net::UdpSocket::bind("127.0.0.1:0").await?; - /// let std_socket = tokio_socket.into_std()?; - /// std_socket.set_nonblocking(false)?; - /// Ok(()) - /// } - /// ``` - /// - /// [`tokio::net::UdpSocket`]: UdpSocket - /// [`std::net::UdpSocket`]: std::net::UdpSocket - /// [`set_nonblocking`]: fn@std::net::UdpSocket::set_nonblocking - pub fn into_std(self) -> io::Result { - #[cfg(unix)] - { - use std::os::unix::io::{FromRawFd, IntoRawFd}; - self.io - .into_inner() - .map(|io| io.into_raw_fd()) - .map(|raw_fd| unsafe { std::net::UdpSocket::from_raw_fd(raw_fd) }) - } - - #[cfg(windows)] - { - use std::os::windows::io::{FromRawSocket, IntoRawSocket}; - self.io - .into_inner() - .map(|io| io.into_raw_socket()) - .map(|raw_socket| unsafe { std::net::UdpSocket::from_raw_socket(raw_socket) }) - } - } - - fn as_socket(&self) -> socket2::SockRef<'_> { - socket2::SockRef::from(self) - } - - /// Returns the local address that this socket is bound to. - /// - /// # Example - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// # use std::{io, net::SocketAddr}; - /// - /// # #[tokio::main] - /// # async fn main() -> io::Result<()> { - /// let addr = "0.0.0.0:8080".parse::().unwrap(); - /// let sock = UdpSocket::bind(addr).await?; - /// // the address the socket is bound to - /// let local_addr = sock.local_addr()?; - /// # Ok(()) - /// # } - /// ``` - pub fn local_addr(&self) -> io::Result { - self.io.local_addr() - } - - /// Returns the socket address of the remote peer this socket was connected to. - /// - /// # Example - /// - /// ``` - /// use tokio::net::UdpSocket; - /// - /// # use std::{io, net::SocketAddr}; - /// # #[tokio::main] - /// # async fn main() -> io::Result<()> { - /// let addr = "0.0.0.0:8080".parse::().unwrap(); - /// let peer = "127.0.0.1:11100".parse::().unwrap(); - /// let sock = UdpSocket::bind(addr).await?; - /// sock.connect(peer).await?; - /// assert_eq!(peer, sock.peer_addr()?); - /// # Ok(()) - /// # } - /// ``` - pub fn peer_addr(&self) -> io::Result { - self.io.peer_addr() - } - - /// Connects the UDP socket setting the default destination for send() and - /// limiting packets that are read via recv from the address specified in - /// `addr`. - /// - /// # Example - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// # use std::{io, net::SocketAddr}; - /// - /// # #[tokio::main] - /// # async fn main() -> io::Result<()> { - /// let sock = UdpSocket::bind("0.0.0.0:8080".parse::().unwrap()).await?; - /// - /// let remote_addr = "127.0.0.1:59600".parse::().unwrap(); - /// sock.connect(remote_addr).await?; - /// let mut buf = [0u8; 32]; - /// // recv from remote_addr - /// let len = sock.recv(&mut buf).await?; - /// // send to remote_addr - /// let _len = sock.send(&buf[..len]).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn connect(&self, addr: A) -> io::Result<()> { - let addrs = to_socket_addrs(addr).await?; - let mut last_err = None; - - for addr in addrs { - match self.io.connect(addr) { - Ok(_) => return Ok(()), - Err(e) => last_err = Some(e), - } - } - - Err(last_err.unwrap_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "could not resolve to any address", - ) - })) - } - - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with `try_recv()` or `try_send()`. It - /// can be used to concurrently recv / send to the same socket on a single - /// task without splitting the socket. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// Concurrently receive from and send to the socket on the same task - /// without splitting. - /// - /// ```no_run - /// use tokio::io::{self, Interest}; - /// use tokio::net::UdpSocket; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// socket.connect("127.0.0.1:8081").await?; - /// - /// loop { - /// let ready = socket.ready(Interest::READABLE | Interest::WRITABLE).await?; - /// - /// if ready.is_readable() { - /// // The buffer is **not** included in the async task and will only exist - /// // on the stack. - /// let mut data = [0; 1024]; - /// match socket.try_recv(&mut data[..]) { - /// Ok(n) => { - /// println!("received {:?}", &data[..n]); - /// } - /// // False-positive, continue - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// if ready.is_writable() { - /// // Write some data - /// match socket.try_send(b"hello world") { - /// Ok(n) => { - /// println!("sent {} bytes", n); - /// } - /// // False-positive, continue - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// } - /// } - /// ``` - pub async fn ready(&self, interest: Interest) -> io::Result { - let event = self.io.registration().readiness(interest).await?; - Ok(event.ready) - } - - /// Waits for the socket to become writable. - /// - /// This function is equivalent to `ready(Interest::WRITABLE)` and is - /// usually paired with `try_send()` or `try_send_to()`. - /// - /// The function may complete without the socket being writable. This is a - /// false-positive and attempting a `try_send()` will return with - /// `io::ErrorKind::WouldBlock`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to write that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Bind socket - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// socket.connect("127.0.0.1:8081").await?; - /// - /// loop { - /// // Wait for the socket to be writable - /// socket.writable().await?; - /// - /// // Try to send data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_send(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn writable(&self) -> io::Result<()> { - self.ready(Interest::WRITABLE).await?; - Ok(()) - } - - /// Polls for write/send readiness. - /// - /// If the udp stream is not currently ready for sending, this method will - /// store a clone of the `Waker` from the provided `Context`. When the udp - /// stream becomes ready for sending, `Waker::wake` will be called on the - /// waker. - /// - /// Note that on multiple calls to `poll_send_ready` or `poll_send`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. (However, `poll_recv_ready` retains a - /// second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`writable`] is not feasible. Where possible, using [`writable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the udp stream is not ready for writing. - /// * `Poll::Ready(Ok(()))` if the udp stream is ready for writing. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`writable`]: method@Self::writable - pub fn poll_send_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_write_ready(cx).map_ok(|_| ()) - } - - /// Sends data on the socket to the remote address that the socket is - /// connected to. - /// - /// The [`connect`] method will connect this socket to a remote address. - /// This method will fail if the socket is not connected. - /// - /// [`connect`]: method@Self::connect - /// - /// # Return - /// - /// On success, the number of bytes sent is returned, otherwise, the - /// encountered error is returned. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `send` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then it is guaranteed that the message was not sent. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::io; - /// use tokio::net::UdpSocket; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Bind socket - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// socket.connect("127.0.0.1:8081").await?; - /// - /// // Send a message - /// socket.send(b"hello world").await?; - /// - /// Ok(()) - /// } - /// ``` - pub async fn send(&self, buf: &[u8]) -> io::Result { - self.io - .registration() - .async_io(Interest::WRITABLE, || self.io.send(buf)) - .await - } - - /// Attempts to send data on the socket to the remote address to which it - /// was previously `connect`ed. - /// - /// The [`connect`] method will connect this socket to a remote address. - /// This method will fail if the socket is not connected. - /// - /// Note that on multiple calls to a `poll_*` method in the send direction, - /// only the `Waker` from the `Context` passed to the most recent call will - /// be scheduled to receive a wakeup. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the socket is not available to write - /// * `Poll::Ready(Ok(n))` `n` is the number of bytes sent - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`connect`]: method@Self::connect - pub fn poll_send(&self, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - self.io - .registration() - .poll_write_io(cx, || self.io.send(buf)) - } - - /// Tries to send data on the socket to the remote address to which it is - /// connected. - /// - /// When the socket buffer is full, `Err(io::ErrorKind::WouldBlock)` is - /// returned. This function is usually paired with `writable()`. - /// - /// # Returns - /// - /// If successful, `Ok(n)` is returned, where `n` is the number of bytes - /// sent. If the socket is not ready to send data, - /// `Err(ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Bind a UDP socket - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// - /// // Connect to a peer - /// socket.connect("127.0.0.1:8081").await?; - /// - /// loop { - /// // Wait for the socket to be writable - /// socket.writable().await?; - /// - /// // Try to send data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_send(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_send(&self, buf: &[u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::WRITABLE, || self.io.send(buf)) - } - - /// Waits for the socket to become readable. - /// - /// This function is equivalent to `ready(Interest::READABLE)` and is usually - /// paired with `try_recv()`. - /// - /// The function may complete without the socket being readable. This is a - /// false-positive and attempting a `try_recv()` will return with - /// `io::ErrorKind::WouldBlock`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// socket.connect("127.0.0.1:8081").await?; - /// - /// loop { - /// // Wait for the socket to be readable - /// socket.readable().await?; - /// - /// // The buffer is **not** included in the async task and will - /// // only exist on the stack. - /// let mut buf = [0; 1024]; - /// - /// // Try to recv data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_recv(&mut buf) { - /// Ok(n) => { - /// println!("GOT {:?}", &buf[..n]); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn readable(&self) -> io::Result<()> { - self.ready(Interest::READABLE).await?; - Ok(()) - } - - /// Polls for read/receive readiness. - /// - /// If the udp stream is not currently ready for receiving, this method will - /// store a clone of the `Waker` from the provided `Context`. When the udp - /// socket becomes ready for reading, `Waker::wake` will be called on the - /// waker. - /// - /// Note that on multiple calls to `poll_recv_ready`, `poll_recv` or - /// `poll_peek`, only the `Waker` from the `Context` passed to the most - /// recent call is scheduled to receive a wakeup. (However, - /// `poll_send_ready` retains a second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`readable`] is not feasible. Where possible, using [`readable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the udp stream is not ready for reading. - /// * `Poll::Ready(Ok(()))` if the udp stream is ready for reading. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`readable`]: method@Self::readable - pub fn poll_recv_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_read_ready(cx).map_ok(|_| ()) - } - - /// Receives a single datagram message on the socket from the remote address - /// to which it is connected. On success, returns the number of bytes read. - /// - /// The function must be called with valid byte array `buf` of sufficient - /// size to hold the message bytes. If a message is too long to fit in the - /// supplied buffer, excess bytes may be discarded. - /// - /// The [`connect`] method will connect this socket to a remote address. - /// This method will fail if the socket is not connected. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `recv` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, it is guaranteed that no messages were received on this - /// socket. - /// - /// [`connect`]: method@Self::connect - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Bind socket - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// socket.connect("127.0.0.1:8081").await?; - /// - /// let mut buf = vec![0; 10]; - /// let n = socket.recv(&mut buf).await?; - /// - /// println!("received {} bytes {:?}", n, &buf[..n]); - /// - /// Ok(()) - /// } - /// ``` - pub async fn recv(&self, buf: &mut [u8]) -> io::Result { - self.io - .registration() - .async_io(Interest::READABLE, || self.io.recv(buf)) - .await - } - - /// Attempts to receive a single datagram message on the socket from the remote - /// address to which it is `connect`ed. - /// - /// The [`connect`] method will connect this socket to a remote address. This method - /// resolves to an error if the socket is not connected. - /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the - /// `Waker` from the `Context` passed to the most recent call will be scheduled to - /// receive a wakeup. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the socket is not ready to read - /// * `Poll::Ready(Ok(()))` reads data `ReadBuf` if the socket is ready - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`connect`]: method@Self::connect - pub fn poll_recv(&self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { - let n = ready!(self.io.registration().poll_read_io(cx, || { - // Safety: will not read the maybe uninitialized bytes. - let b = unsafe { - &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]) - }; - - self.io.recv(b) - }))?; - - // Safety: We trust `recv` to have filled up `n` bytes in the buffer. - unsafe { - buf.assume_init(n); - } - buf.advance(n); - Poll::Ready(Ok(())) - } - - /// Tries to receive a single datagram message on the socket from the remote - /// address to which it is connected. On success, returns the number of - /// bytes read. - /// - /// This method must be called with valid byte array buf of sufficient size - /// to hold the message bytes. If a message is too long to fit in the - /// supplied buffer, excess bytes may be discarded. - /// - /// When there is no pending data, `Err(io::ErrorKind::WouldBlock)` is - /// returned. This function is usually paired with `readable()`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// socket.connect("127.0.0.1:8081").await?; - /// - /// loop { - /// // Wait for the socket to be readable - /// socket.readable().await?; - /// - /// // The buffer is **not** included in the async task and will - /// // only exist on the stack. - /// let mut buf = [0; 1024]; - /// - /// // Try to recv data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_recv(&mut buf) { - /// Ok(n) => { - /// println!("GOT {:?}", &buf[..n]); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_recv(&self, buf: &mut [u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::READABLE, || self.io.recv(buf)) - } - - cfg_io_util! { - /// Tries to receive data from the stream into the provided buffer, advancing the - /// buffer's internal cursor, returning how many bytes were read. - /// - /// This method must be called with valid byte array buf of sufficient size - /// to hold the message bytes. If a message is too long to fit in the - /// supplied buffer, excess bytes may be discarded. - /// - /// This method can be used even if `buf` is uninitialized. - /// - /// When there is no pending data, `Err(io::ErrorKind::WouldBlock)` is - /// returned. This function is usually paired with `readable()`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// socket.connect("127.0.0.1:8081").await?; - /// - /// loop { - /// // Wait for the socket to be readable - /// socket.readable().await?; - /// - /// let mut buf = Vec::with_capacity(1024); - /// - /// // Try to recv data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_recv_buf(&mut buf) { - /// Ok(n) => { - /// println!("GOT {:?}", &buf[..n]); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_recv_buf(&self, buf: &mut B) -> io::Result { - self.io.registration().try_io(Interest::READABLE, || { - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - let n = (*self.io).recv(dst)?; - - // Safety: We trust `UdpSocket::recv` to have filled up `n` bytes in the - // buffer. - unsafe { - buf.advance_mut(n); - } - - Ok(n) - }) - } - - /// Receives a single datagram message on the socket from the remote address - /// to which it is connected, advancing the buffer's internal cursor, - /// returning how many bytes were read. - /// - /// This method must be called with valid byte array buf of sufficient size - /// to hold the message bytes. If a message is too long to fit in the - /// supplied buffer, excess bytes may be discarded. - /// - /// This method can be used even if `buf` is uninitialized. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// socket.connect("127.0.0.1:8081").await?; - /// - /// let mut buf = Vec::with_capacity(512); - /// let len = socket.recv_buf(&mut buf).await?; - /// - /// println!("received {} bytes {:?}", len, &buf[..len]); - /// - /// Ok(()) - /// } - /// ``` - pub async fn recv_buf(&self, buf: &mut B) -> io::Result { - self.io.registration().async_io(Interest::READABLE, || { - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - let n = (*self.io).recv(dst)?; - - // Safety: We trust `UdpSocket::recv` to have filled up `n` bytes in the - // buffer. - unsafe { - buf.advance_mut(n); - } - - Ok(n) - }).await - } - - /// Tries to receive a single datagram message on the socket. On success, - /// returns the number of bytes read and the origin. - /// - /// This method must be called with valid byte array buf of sufficient size - /// to hold the message bytes. If a message is too long to fit in the - /// supplied buffer, excess bytes may be discarded. - /// - /// This method can be used even if `buf` is uninitialized. - /// - /// When there is no pending data, `Err(io::ErrorKind::WouldBlock)` is - /// returned. This function is usually paired with `readable()`. - /// - /// # Notes - /// Note that the socket address **cannot** be implicitly trusted, because it is relatively - /// trivial to send a UDP datagram with a spoofed origin in a [packet injection attack]. - /// Because UDP is stateless and does not validate the origin of a packet, - /// the attacker does not need to be able to intercept traffic in order to interfere. - /// It is important to be aware of this when designing your application-level protocol. - /// - /// [packet injection attack]: https://en.wikipedia.org/wiki/Packet_injection - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// - /// loop { - /// // Wait for the socket to be readable - /// socket.readable().await?; - /// - /// let mut buf = Vec::with_capacity(1024); - /// - /// // Try to recv data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_recv_buf_from(&mut buf) { - /// Ok((n, _addr)) => { - /// println!("GOT {:?}", &buf[..n]); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_recv_buf_from(&self, buf: &mut B) -> io::Result<(usize, SocketAddr)> { - self.io.registration().try_io(Interest::READABLE, || { - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - let (n, addr) = (*self.io).recv_from(dst)?; - - // Safety: We trust `UdpSocket::recv_from` to have filled up `n` bytes in the - // buffer. - unsafe { - buf.advance_mut(n); - } - - Ok((n, addr)) - }) - } - - /// Receives a single datagram message on the socket, advancing the - /// buffer's internal cursor, returning how many bytes were read and the origin. - /// - /// This method must be called with valid byte array buf of sufficient size - /// to hold the message bytes. If a message is too long to fit in the - /// supplied buffer, excess bytes may be discarded. - /// - /// This method can be used even if `buf` is uninitialized. - /// - /// # Notes - /// Note that the socket address **cannot** be implicitly trusted, because it is relatively - /// trivial to send a UDP datagram with a spoofed origin in a [packet injection attack]. - /// Because UDP is stateless and does not validate the origin of a packet, - /// the attacker does not need to be able to intercept traffic in order to interfere. - /// It is important to be aware of this when designing your application-level protocol. - /// - /// [packet injection attack]: https://en.wikipedia.org/wiki/Packet_injection - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// socket.connect("127.0.0.1:8081").await?; - /// - /// let mut buf = Vec::with_capacity(512); - /// let (len, addr) = socket.recv_buf_from(&mut buf).await?; - /// - /// println!("received {:?} bytes from {:?}", len, addr); - /// - /// Ok(()) - /// } - /// ``` - pub async fn recv_buf_from(&self, buf: &mut B) -> io::Result<(usize, SocketAddr)> { - self.io.registration().async_io(Interest::READABLE, || { - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - let (n, addr) = (*self.io).recv_from(dst)?; - - // Safety: We trust `UdpSocket::recv_from` to have filled up `n` bytes in the - // buffer. - unsafe { - buf.advance_mut(n); - } - - Ok((n,addr)) - }).await - } - } - - /// Sends data on the socket to the given address. On success, returns the - /// number of bytes written. - /// - /// Address type can be any implementor of [`ToSocketAddrs`] trait. See its - /// documentation for concrete examples. - /// - /// It is possible for `addr` to yield multiple addresses, but `send_to` - /// will only send data to the first address yielded by `addr`. - /// - /// This will return an error when the IP version of the local socket does - /// not match that returned from [`ToSocketAddrs`]. - /// - /// [`ToSocketAddrs`]: crate::net::ToSocketAddrs - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `send_to` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then it is guaranteed that the message was not sent. - /// - /// # Example - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// let len = socket.send_to(b"hello world", "127.0.0.1:8081").await?; - /// - /// println!("Sent {} bytes", len); - /// - /// Ok(()) - /// } - /// ``` - pub async fn send_to(&self, buf: &[u8], target: A) -> io::Result { - let mut addrs = to_socket_addrs(target).await?; - - match addrs.next() { - Some(target) => self.send_to_addr(buf, target).await, - None => Err(io::Error::new( - io::ErrorKind::InvalidInput, - "no addresses to send data to", - )), - } - } - - /// Attempts to send data on the socket to a given address. - /// - /// Note that on multiple calls to a `poll_*` method in the send direction, only the - /// `Waker` from the `Context` passed to the most recent call will be scheduled to - /// receive a wakeup. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the socket is not ready to write - /// * `Poll::Ready(Ok(n))` `n` is the number of bytes sent. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - pub fn poll_send_to( - &self, - cx: &mut Context<'_>, - buf: &[u8], - target: SocketAddr, - ) -> Poll> { - self.io - .registration() - .poll_write_io(cx, || self.io.send_to(buf, target)) - } - - /// Tries to send data on the socket to the given address, but if the send is - /// blocked this will return right away. - /// - /// This function is usually paired with `writable()`. - /// - /// # Returns - /// - /// If successful, returns the number of bytes sent - /// - /// Users should ensure that when the remote cannot receive, the - /// [`ErrorKind::WouldBlock`] is properly handled. An error can also occur - /// if the IP version of the socket does not match that of `target`. - /// - /// [`ErrorKind::WouldBlock`]: std::io::ErrorKind::WouldBlock - /// - /// # Example - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// - /// let dst = "127.0.0.1:8081".parse()?; - /// - /// loop { - /// socket.writable().await?; - /// - /// match socket.try_send_to(&b"hello world"[..], dst) { - /// Ok(sent) => { - /// println!("sent {} bytes", sent); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// // Writable false positive. - /// continue; - /// } - /// Err(e) => return Err(e.into()), - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_send_to(&self, buf: &[u8], target: SocketAddr) -> io::Result { - self.io - .registration() - .try_io(Interest::WRITABLE, || self.io.send_to(buf, target)) - } - - async fn send_to_addr(&self, buf: &[u8], target: SocketAddr) -> io::Result { - self.io - .registration() - .async_io(Interest::WRITABLE, || self.io.send_to(buf, target)) - .await - } - - /// Receives a single datagram message on the socket. On success, returns - /// the number of bytes read and the origin. - /// - /// The function must be called with valid byte array `buf` of sufficient - /// size to hold the message bytes. If a message is too long to fit in the - /// supplied buffer, excess bytes may be discarded. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `recv_from` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, it is guaranteed that no messages were received on this - /// socket. - /// - /// # Example - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// - /// let mut buf = vec![0u8; 32]; - /// let (len, addr) = socket.recv_from(&mut buf).await?; - /// - /// println!("received {:?} bytes from {:?}", len, addr); - /// - /// Ok(()) - /// } - /// ``` - /// - /// # Notes - /// Note that the socket address **cannot** be implicitly trusted, because it is relatively - /// trivial to send a UDP datagram with a spoofed origin in a [packet injection attack]. - /// Because UDP is stateless and does not validate the origin of a packet, - /// the attacker does not need to be able to intercept traffic in order to interfere. - /// It is important to be aware of this when designing your application-level protocol. - /// - /// [packet injection attack]: https://en.wikipedia.org/wiki/Packet_injection - pub async fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - self.io - .registration() - .async_io(Interest::READABLE, || self.io.recv_from(buf)) - .await - } - - /// Attempts to receive a single datagram on the socket. - /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the - /// `Waker` from the `Context` passed to the most recent call will be scheduled to - /// receive a wakeup. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the socket is not ready to read - /// * `Poll::Ready(Ok(addr))` reads data from `addr` into `ReadBuf` if the socket is ready - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// # Notes - /// Note that the socket address **cannot** be implicitly trusted, because it is relatively - /// trivial to send a UDP datagram with a spoofed origin in a [packet injection attack]. - /// Because UDP is stateless and does not validate the origin of a packet, - /// the attacker does not need to be able to intercept traffic in order to interfere. - /// It is important to be aware of this when designing your application-level protocol. - /// - /// [packet injection attack]: https://en.wikipedia.org/wiki/Packet_injection - pub fn poll_recv_from( - &self, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - let (n, addr) = ready!(self.io.registration().poll_read_io(cx, || { - // Safety: will not read the maybe uninitialized bytes. - let b = unsafe { - &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]) - }; - - self.io.recv_from(b) - }))?; - - // Safety: We trust `recv` to have filled up `n` bytes in the buffer. - unsafe { - buf.assume_init(n); - } - buf.advance(n); - Poll::Ready(Ok(addr)) - } - - /// Tries to receive a single datagram message on the socket. On success, - /// returns the number of bytes read and the origin. - /// - /// This method must be called with valid byte array buf of sufficient size - /// to hold the message bytes. If a message is too long to fit in the - /// supplied buffer, excess bytes may be discarded. - /// - /// When there is no pending data, `Err(io::ErrorKind::WouldBlock)` is - /// returned. This function is usually paired with `readable()`. - /// - /// # Notes - /// - /// Note that the socket address **cannot** be implicitly trusted, because it is relatively - /// trivial to send a UDP datagram with a spoofed origin in a [packet injection attack]. - /// Because UDP is stateless and does not validate the origin of a packet, - /// the attacker does not need to be able to intercept traffic in order to interfere. - /// It is important to be aware of this when designing your application-level protocol. - /// - /// [packet injection attack]: https://en.wikipedia.org/wiki/Packet_injection - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// - /// loop { - /// // Wait for the socket to be readable - /// socket.readable().await?; - /// - /// // The buffer is **not** included in the async task and will - /// // only exist on the stack. - /// let mut buf = [0; 1024]; - /// - /// // Try to recv data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_recv_from(&mut buf) { - /// Ok((n, _addr)) => { - /// println!("GOT {:?}", &buf[..n]); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - self.io - .registration() - .try_io(Interest::READABLE, || self.io.recv_from(buf)) - } - - /// Tries to read or write from the socket using a user-provided IO operation. - /// - /// If the socket is ready, the provided closure is called. The closure - /// should attempt to perform IO operation on the socket by manually - /// calling the appropriate syscall. If the operation fails because the - /// socket is not actually ready, then the closure should return a - /// `WouldBlock` error and the readiness flag is cleared. The return value - /// of the closure is then returned by `try_io`. - /// - /// If the socket is not ready, then the closure is not called - /// and a `WouldBlock` error is returned. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the socket that failed due to the socket not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the socket to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio `UdpSocket` type, as this will mess with the - /// readiness flag and can cause the socket to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - /// - /// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: UdpSocket::readable() - /// [`writable()`]: UdpSocket::writable() - /// [`ready()`]: UdpSocket::ready() - pub fn try_io( - &self, - interest: Interest, - f: impl FnOnce() -> io::Result, - ) -> io::Result { - self.io - .registration() - .try_io(interest, || self.io.try_io(f)) - } - - /// Reads or writes from the socket using a user-provided IO operation. - /// - /// The readiness of the socket is awaited and when the socket is ready, - /// the provided closure is called. The closure should attempt to perform - /// IO operation on the socket by manually calling the appropriate syscall. - /// If the operation fails because the socket is not actually ready, - /// then the closure should return a `WouldBlock` error. In such case the - /// readiness flag is cleared and the socket readiness is awaited again. - /// This loop is repeated until the closure returns an `Ok` or an error - /// other than `WouldBlock`. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the socket that failed due to the socket not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the socket to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio `UdpSocket` type, as this will mess with the - /// readiness flag and can cause the socket to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - pub async fn async_io( - &self, - interest: Interest, - mut f: impl FnMut() -> io::Result, - ) -> io::Result { - self.io - .registration() - .async_io(interest, || self.io.try_io(&mut f)) - .await - } - - /// Receives data from the socket, without removing it from the input queue. - /// On success, returns the number of bytes read and the address from whence - /// the data came. - /// - /// # Notes - /// - /// On Windows, if the data is larger than the buffer specified, the buffer - /// is filled with the first part of the data, and peek_from returns the error - /// WSAEMSGSIZE(10040). The excess data is lost. - /// Make sure to always use a sufficiently large buffer to hold the - /// maximum UDP packet size, which can be up to 65536 bytes in size. - /// - /// MacOS will return an error if you pass a zero-sized buffer. - /// - /// If you're merely interested in learning the sender of the data at the head of the queue, - /// try [`peek_sender`]. - /// - /// Note that the socket address **cannot** be implicitly trusted, because it is relatively - /// trivial to send a UDP datagram with a spoofed origin in a [packet injection attack]. - /// Because UDP is stateless and does not validate the origin of a packet, - /// the attacker does not need to be able to intercept traffic in order to interfere. - /// It is important to be aware of this when designing your application-level protocol. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let socket = UdpSocket::bind("127.0.0.1:8080").await?; - /// - /// let mut buf = vec![0u8; 32]; - /// let (len, addr) = socket.peek_from(&mut buf).await?; - /// - /// println!("peeked {:?} bytes from {:?}", len, addr); - /// - /// Ok(()) - /// } - /// ``` - /// - /// [`peek_sender`]: method@Self::peek_sender - /// [packet injection attack]: https://en.wikipedia.org/wiki/Packet_injection - pub async fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - self.io - .registration() - .async_io(Interest::READABLE, || self.io.peek_from(buf)) - .await - } - - /// Receives data from the socket, without removing it from the input queue. - /// On success, returns the sending address of the datagram. - /// - /// # Notes - /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the - /// `Waker` from the `Context` passed to the most recent call will be scheduled to - /// receive a wakeup - /// - /// On Windows, if the data is larger than the buffer specified, the buffer - /// is filled with the first part of the data, and peek returns the error - /// WSAEMSGSIZE(10040). The excess data is lost. - /// Make sure to always use a sufficiently large buffer to hold the - /// maximum UDP packet size, which can be up to 65536 bytes in size. - /// - /// MacOS will return an error if you pass a zero-sized buffer. - /// - /// If you're merely interested in learning the sender of the data at the head of the queue, - /// try [`poll_peek_sender`]. - /// - /// Note that the socket address **cannot** be implicitly trusted, because it is relatively - /// trivial to send a UDP datagram with a spoofed origin in a [packet injection attack]. - /// Because UDP is stateless and does not validate the origin of a packet, - /// the attacker does not need to be able to intercept traffic in order to interfere. - /// It is important to be aware of this when designing your application-level protocol. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the socket is not ready to read - /// * `Poll::Ready(Ok(addr))` reads data from `addr` into `ReadBuf` if the socket is ready - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`poll_peek_sender`]: method@Self::poll_peek_sender - /// [packet injection attack]: https://en.wikipedia.org/wiki/Packet_injection - pub fn poll_peek_from( - &self, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - let (n, addr) = ready!(self.io.registration().poll_read_io(cx, || { - // Safety: will not read the maybe uninitialized bytes. - let b = unsafe { - &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]) - }; - - self.io.peek_from(b) - }))?; - - // Safety: We trust `recv` to have filled up `n` bytes in the buffer. - unsafe { - buf.assume_init(n); - } - buf.advance(n); - Poll::Ready(Ok(addr)) - } - - /// Tries to receive data on the socket without removing it from the input queue. - /// On success, returns the number of bytes read and the sending address of the - /// datagram. - /// - /// When there is no pending data, `Err(io::ErrorKind::WouldBlock)` is - /// returned. This function is usually paired with `readable()`. - /// - /// # Notes - /// - /// On Windows, if the data is larger than the buffer specified, the buffer - /// is filled with the first part of the data, and peek returns the error - /// WSAEMSGSIZE(10040). The excess data is lost. - /// Make sure to always use a sufficiently large buffer to hold the - /// maximum UDP packet size, which can be up to 65536 bytes in size. - /// - /// MacOS will return an error if you pass a zero-sized buffer. - /// - /// If you're merely interested in learning the sender of the data at the head of the queue, - /// try [`try_peek_sender`]. - /// - /// Note that the socket address **cannot** be implicitly trusted, because it is relatively - /// trivial to send a UDP datagram with a spoofed origin in a [packet injection attack]. - /// Because UDP is stateless and does not validate the origin of a packet, - /// the attacker does not need to be able to intercept traffic in order to interfere. - /// It is important to be aware of this when designing your application-level protocol. - /// - /// [`try_peek_sender`]: method@Self::try_peek_sender - /// [packet injection attack]: https://en.wikipedia.org/wiki/Packet_injection - pub fn try_peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - self.io - .registration() - .try_io(Interest::READABLE, || self.io.peek_from(buf)) - } - - /// Retrieve the sender of the data at the head of the input queue, waiting if empty. - /// - /// This is equivalent to calling [`peek_from`] with a zero-sized buffer, - /// but suppresses the `WSAEMSGSIZE` error on Windows and the "invalid argument" error on macOS. - /// - /// Note that the socket address **cannot** be implicitly trusted, because it is relatively - /// trivial to send a UDP datagram with a spoofed origin in a [packet injection attack]. - /// Because UDP is stateless and does not validate the origin of a packet, - /// the attacker does not need to be able to intercept traffic in order to interfere. - /// It is important to be aware of this when designing your application-level protocol. - /// - /// [`peek_from`]: method@Self::peek_from - /// [packet injection attack]: https://en.wikipedia.org/wiki/Packet_injection - pub async fn peek_sender(&self) -> io::Result { - self.io - .registration() - .async_io(Interest::READABLE, || self.peek_sender_inner()) - .await - } - - /// Retrieve the sender of the data at the head of the input queue, - /// scheduling a wakeup if empty. - /// - /// This is equivalent to calling [`poll_peek_from`] with a zero-sized buffer, - /// but suppresses the `WSAEMSGSIZE` error on Windows and the "invalid argument" error on macOS. - /// - /// # Notes - /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the - /// `Waker` from the `Context` passed to the most recent call will be scheduled to - /// receive a wakeup. - /// - /// Note that the socket address **cannot** be implicitly trusted, because it is relatively - /// trivial to send a UDP datagram with a spoofed origin in a [packet injection attack]. - /// Because UDP is stateless and does not validate the origin of a packet, - /// the attacker does not need to be able to intercept traffic in order to interfere. - /// It is important to be aware of this when designing your application-level protocol. - /// - /// [`poll_peek_from`]: method@Self::poll_peek_from - /// [packet injection attack]: https://en.wikipedia.org/wiki/Packet_injection - pub fn poll_peek_sender(&self, cx: &mut Context<'_>) -> Poll> { - self.io - .registration() - .poll_read_io(cx, || self.peek_sender_inner()) - } - - /// Try to retrieve the sender of the data at the head of the input queue. - /// - /// When there is no pending data, `Err(io::ErrorKind::WouldBlock)` is - /// returned. This function is usually paired with `readable()`. - /// - /// Note that the socket address **cannot** be implicitly trusted, because it is relatively - /// trivial to send a UDP datagram with a spoofed origin in a [packet injection attack]. - /// Because UDP is stateless and does not validate the origin of a packet, - /// the attacker does not need to be able to intercept traffic in order to interfere. - /// It is important to be aware of this when designing your application-level protocol. - /// - /// [packet injection attack]: https://en.wikipedia.org/wiki/Packet_injection - pub fn try_peek_sender(&self) -> io::Result { - self.io - .registration() - .try_io(Interest::READABLE, || self.peek_sender_inner()) - } - - #[inline] - fn peek_sender_inner(&self) -> io::Result { - self.io.try_io(|| { - self.as_socket() - .peek_sender()? - // May be `None` if the platform doesn't populate the sender for some reason. - // In testing, that only occurred on macOS if you pass a zero-sized buffer, - // but the implementation of `Socket::peek_sender()` covers that. - .as_socket() - .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "sender not available")) - }) - } - - /// Gets the value of the `SO_BROADCAST` option for this socket. - /// - /// For more information about this option, see [`set_broadcast`]. - /// - /// [`set_broadcast`]: method@Self::set_broadcast - pub fn broadcast(&self) -> io::Result { - self.io.broadcast() - } - - /// Sets the value of the `SO_BROADCAST` option for this socket. - /// - /// When enabled, this socket is allowed to send packets to a broadcast - /// address. - pub fn set_broadcast(&self, on: bool) -> io::Result<()> { - self.io.set_broadcast(on) - } - - /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket. - /// - /// For more information about this option, see [`set_multicast_loop_v4`]. - /// - /// [`set_multicast_loop_v4`]: method@Self::set_multicast_loop_v4 - pub fn multicast_loop_v4(&self) -> io::Result { - self.io.multicast_loop_v4() - } - - /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket. - /// - /// If enabled, multicast packets will be looped back to the local socket. - /// - /// # Note - /// - /// This may not have any affect on IPv6 sockets. - pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> { - self.io.set_multicast_loop_v4(on) - } - - /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. - /// - /// For more information about this option, see [`set_multicast_ttl_v4`]. - /// - /// [`set_multicast_ttl_v4`]: method@Self::set_multicast_ttl_v4 - pub fn multicast_ttl_v4(&self) -> io::Result { - self.io.multicast_ttl_v4() - } - - /// Sets the value of the `IP_MULTICAST_TTL` option for this socket. - /// - /// Indicates the time-to-live value of outgoing multicast packets for - /// this socket. The default value is 1 which means that multicast packets - /// don't leave the local network unless explicitly requested. - /// - /// # Note - /// - /// This may not have any affect on IPv6 sockets. - pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> { - self.io.set_multicast_ttl_v4(ttl) - } - - /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket. - /// - /// For more information about this option, see [`set_multicast_loop_v6`]. - /// - /// [`set_multicast_loop_v6`]: method@Self::set_multicast_loop_v6 - pub fn multicast_loop_v6(&self) -> io::Result { - self.io.multicast_loop_v6() - } - - /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket. - /// - /// Controls whether this socket sees the multicast packets it sends itself. - /// - /// # Note - /// - /// This may not have any affect on IPv4 sockets. - pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> { - self.io.set_multicast_loop_v6(on) - } - - /// Gets the value of the `IP_TTL` option for this socket. - /// - /// For more information about this option, see [`set_ttl`]. - /// - /// [`set_ttl`]: method@Self::set_ttl - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// # use std::io; - /// - /// # async fn dox() -> io::Result<()> { - /// let sock = UdpSocket::bind("127.0.0.1:8080").await?; - /// - /// println!("{:?}", sock.ttl()?); - /// # Ok(()) - /// # } - /// ``` - pub fn ttl(&self) -> io::Result { - self.io.ttl() - } - - /// Sets the value for the `IP_TTL` option on this socket. - /// - /// This value sets the time-to-live field that is used in every packet sent - /// from this socket. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UdpSocket; - /// # use std::io; - /// - /// # async fn dox() -> io::Result<()> { - /// let sock = UdpSocket::bind("127.0.0.1:8080").await?; - /// sock.set_ttl(60)?; - /// - /// # Ok(()) - /// # } - /// ``` - pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { - self.io.set_ttl(ttl) - } - - /// Gets the value of the `IP_TOS` option for this socket. - /// - /// For more information about this option, see [`set_tos`]. - /// - /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or - /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) - /// - /// [`set_tos`]: Self::set_tos - // https://docs.rs/socket2/0.5.3/src/socket2/socket.rs.html#1464 - #[cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))] - #[cfg_attr( - docsrs, - doc(cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))) - )] - pub fn tos(&self) -> io::Result { - self.as_socket().tos() - } - - /// Sets the value for the `IP_TOS` option on this socket. - /// - /// This value sets the type-of-service field that is used in every packet - /// sent from this socket. - /// - /// **NOTE:** On Windows, `IP_TOS` is only supported on [Windows 8+ or - /// Windows Server 2012+.](https://docs.microsoft.com/en-us/windows/win32/winsock/ipproto-ip-socket-options) - // https://docs.rs/socket2/0.5.3/src/socket2/socket.rs.html#1446 - #[cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))] - #[cfg_attr( - docsrs, - doc(cfg(not(any( - target_os = "fuchsia", - target_os = "redox", - target_os = "solaris", - target_os = "illumos", - )))) - )] - pub fn set_tos(&self, tos: u32) -> io::Result<()> { - self.as_socket().set_tos(tos) - } - - /// Gets the value for the `SO_BINDTODEVICE` option on this socket - /// - /// This value gets the socket-bound device's interface name. - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux",))] - #[cfg_attr( - docsrs, - doc(cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux",))) - )] - pub fn device(&self) -> io::Result>> { - self.as_socket().device() - } - - /// Sets the value for the `SO_BINDTODEVICE` option on this socket - /// - /// If a socket is bound to an interface, only packets received from that - /// particular interface are processed by the socket. Note that this only - /// works for some socket types, particularly `AF_INET` sockets. - /// - /// If `interface` is `None` or an empty string it removes the binding. - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - #[cfg_attr( - docsrs, - doc(cfg(all(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))) - )] - pub fn bind_device(&self, interface: Option<&[u8]>) -> io::Result<()> { - self.as_socket().bind_device(interface) - } - - /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. - /// - /// This function specifies a new multicast group for this socket to join. - /// The address must be a valid multicast address, and `interface` is the - /// address of the local interface with which the system should join the - /// multicast group. If it's equal to `INADDR_ANY` then an appropriate - /// interface is chosen by the system. - pub fn join_multicast_v4(&self, multiaddr: Ipv4Addr, interface: Ipv4Addr) -> io::Result<()> { - self.io.join_multicast_v4(&multiaddr, &interface) - } - - /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type. - /// - /// This function specifies a new multicast group for this socket to join. - /// The address must be a valid multicast address, and `interface` is the - /// index of the interface to join/leave (or 0 to indicate any interface). - pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - self.io.join_multicast_v6(multiaddr, interface) - } - - /// Executes an operation of the `IP_DROP_MEMBERSHIP` type. - /// - /// For more information about this option, see [`join_multicast_v4`]. - /// - /// [`join_multicast_v4`]: method@Self::join_multicast_v4 - pub fn leave_multicast_v4(&self, multiaddr: Ipv4Addr, interface: Ipv4Addr) -> io::Result<()> { - self.io.leave_multicast_v4(&multiaddr, &interface) - } - - /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type. - /// - /// For more information about this option, see [`join_multicast_v6`]. - /// - /// [`join_multicast_v6`]: method@Self::join_multicast_v6 - pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { - self.io.leave_multicast_v6(multiaddr, interface) - } - - /// Returns the value of the `SO_ERROR` option. - /// - /// # Examples - /// ``` - /// use tokio::net::UdpSocket; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Create a socket - /// let socket = UdpSocket::bind("0.0.0.0:8080").await?; - /// - /// if let Ok(Some(err)) = socket.take_error() { - /// println!("Got error: {:?}", err); - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn take_error(&self) -> io::Result> { - self.io.take_error() - } -} - -impl TryFrom for UdpSocket { - type Error = io::Error; - - /// Consumes stream, returning the tokio I/O object. - /// - /// This is equivalent to - /// [`UdpSocket::from_std(stream)`](UdpSocket::from_std). - fn try_from(stream: std::net::UdpSocket) -> Result { - Self::from_std(stream) - } -} - -impl fmt::Debug for UdpSocket { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.fmt(f) - } -} - -#[cfg(unix)] -mod sys { - use super::UdpSocket; - use std::os::unix::prelude::*; - - impl AsRawFd for UdpSocket { - fn as_raw_fd(&self) -> RawFd { - self.io.as_raw_fd() - } - } - - impl AsFd for UdpSocket { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } - } -} - -cfg_windows! { - use crate::os::windows::io::{AsRawSocket, RawSocket}; - use crate::os::windows::io::{AsSocket, BorrowedSocket}; - - impl AsRawSocket for UdpSocket { - fn as_raw_socket(&self) -> RawSocket { - self.io.as_raw_socket() - } - } - - impl AsSocket for UdpSocket { - fn as_socket(&self) -> BorrowedSocket<'_> { - unsafe { BorrowedSocket::borrow_raw(self.as_raw_socket()) } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/datagram/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/datagram/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/datagram/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/datagram/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,3 +0,0 @@ -//! Unix datagram types. - -pub(crate) mod socket; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/datagram/socket.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/datagram/socket.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/datagram/socket.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/datagram/socket.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1583 +0,0 @@ -use crate::io::{Interest, PollEvented, ReadBuf, Ready}; -use crate::net::unix::SocketAddr; - -use std::fmt; -use std::io; -use std::net::Shutdown; -use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; -use std::os::unix::net; -use std::path::Path; -use std::task::{Context, Poll}; - -cfg_io_util! { - use bytes::BufMut; -} - -cfg_net_unix! { - /// An I/O object representing a Unix datagram socket. - /// - /// A socket can be either named (associated with a filesystem path) or - /// unnamed. - /// - /// This type does not provide a `split` method, because this functionality - /// can be achieved by wrapping the socket in an [`Arc`]. Note that you do - /// not need a `Mutex` to share the `UnixDatagram` — an `Arc` - /// is enough. This is because all of the methods take `&self` instead of - /// `&mut self`. - /// - /// **Note:** named sockets are persisted even after the object is dropped - /// and the program has exited, and cannot be reconnected. It is advised - /// that you either check for and unlink the existing socket if it exists, - /// or use a temporary file that is guaranteed to not already exist. - /// - /// [`Arc`]: std::sync::Arc - /// - /// # Examples - /// Using named sockets, associated with a filesystem path: - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// use tempfile::tempdir; - /// - /// // We use a temporary directory so that the socket - /// // files left by the bound sockets will get cleaned up. - /// let tmp = tempdir()?; - /// - /// // Bind each socket to a filesystem path - /// let tx_path = tmp.path().join("tx"); - /// let tx = UnixDatagram::bind(&tx_path)?; - /// let rx_path = tmp.path().join("rx"); - /// let rx = UnixDatagram::bind(&rx_path)?; - /// - /// let bytes = b"hello world"; - /// tx.send_to(bytes, &rx_path).await?; - /// - /// let mut buf = vec![0u8; 24]; - /// let (size, addr) = rx.recv_from(&mut buf).await?; - /// - /// let dgram = &buf[..size]; - /// assert_eq!(dgram, bytes); - /// assert_eq!(addr.as_pathname().unwrap(), &tx_path); - /// - /// # Ok(()) - /// # } - /// ``` - /// - /// Using unnamed sockets, created as a pair - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// - /// // Create the pair of sockets - /// let (sock1, sock2) = UnixDatagram::pair()?; - /// - /// // Since the sockets are paired, the paired send/recv - /// // functions can be used - /// let bytes = b"hello world"; - /// sock1.send(bytes).await?; - /// - /// let mut buff = vec![0u8; 24]; - /// let size = sock2.recv(&mut buff).await?; - /// - /// let dgram = &buff[..size]; - /// assert_eq!(dgram, bytes); - /// - /// # Ok(()) - /// # } - /// ``` - #[cfg_attr(docsrs, doc(alias = "uds"))] - pub struct UnixDatagram { - io: PollEvented, - } -} - -impl UnixDatagram { - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with `try_recv()` or `try_send()`. It - /// can be used to concurrently recv / send to the same socket on a single - /// task without splitting the socket. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// Concurrently receive from and send to the socket on the same task - /// without splitting. - /// - /// ```no_run - /// use tokio::io::Interest; - /// use tokio::net::UnixDatagram; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let dir = tempfile::tempdir().unwrap(); - /// let client_path = dir.path().join("client.sock"); - /// let server_path = dir.path().join("server.sock"); - /// let socket = UnixDatagram::bind(&client_path)?; - /// socket.connect(&server_path)?; - /// - /// loop { - /// let ready = socket.ready(Interest::READABLE | Interest::WRITABLE).await?; - /// - /// if ready.is_readable() { - /// let mut data = [0; 1024]; - /// match socket.try_recv(&mut data[..]) { - /// Ok(n) => { - /// println!("received {:?}", &data[..n]); - /// } - /// // False-positive, continue - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// if ready.is_writable() { - /// // Write some data - /// match socket.try_send(b"hello world") { - /// Ok(n) => { - /// println!("sent {} bytes", n); - /// } - /// // False-positive, continue - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// } - /// } - /// ``` - pub async fn ready(&self, interest: Interest) -> io::Result { - let event = self.io.registration().readiness(interest).await?; - Ok(event.ready) - } - - /// Waits for the socket to become writable. - /// - /// This function is equivalent to `ready(Interest::WRITABLE)` and is - /// usually paired with `try_send()` or `try_send_to()`. - /// - /// The function may complete without the socket being writable. This is a - /// false-positive and attempting a `try_send()` will return with - /// `io::ErrorKind::WouldBlock`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to write that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixDatagram; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let dir = tempfile::tempdir().unwrap(); - /// let client_path = dir.path().join("client.sock"); - /// let server_path = dir.path().join("server.sock"); - /// let socket = UnixDatagram::bind(&client_path)?; - /// socket.connect(&server_path)?; - /// - /// loop { - /// // Wait for the socket to be writable - /// socket.writable().await?; - /// - /// // Try to send data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_send(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn writable(&self) -> io::Result<()> { - self.ready(Interest::WRITABLE).await?; - Ok(()) - } - - /// Polls for write/send readiness. - /// - /// If the socket is not currently ready for sending, this method will - /// store a clone of the `Waker` from the provided `Context`. When the socket - /// becomes ready for sending, `Waker::wake` will be called on the - /// waker. - /// - /// Note that on multiple calls to `poll_send_ready` or `poll_send`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. (However, `poll_recv_ready` retains a - /// second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`writable`] is not feasible. Where possible, using [`writable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the socket is not ready for writing. - /// * `Poll::Ready(Ok(()))` if the socket is ready for writing. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`writable`]: method@Self::writable - pub fn poll_send_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_write_ready(cx).map_ok(|_| ()) - } - - /// Waits for the socket to become readable. - /// - /// This function is equivalent to `ready(Interest::READABLE)` and is usually - /// paired with `try_recv()`. - /// - /// The function may complete without the socket being readable. This is a - /// false-positive and attempting a `try_recv()` will return with - /// `io::ErrorKind::WouldBlock`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixDatagram; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let client_path = dir.path().join("client.sock"); - /// let server_path = dir.path().join("server.sock"); - /// let socket = UnixDatagram::bind(&client_path)?; - /// socket.connect(&server_path)?; - /// - /// loop { - /// // Wait for the socket to be readable - /// socket.readable().await?; - /// - /// // The buffer is **not** included in the async task and will - /// // only exist on the stack. - /// let mut buf = [0; 1024]; - /// - /// // Try to recv data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_recv(&mut buf) { - /// Ok(n) => { - /// println!("GOT {:?}", &buf[..n]); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn readable(&self) -> io::Result<()> { - self.ready(Interest::READABLE).await?; - Ok(()) - } - - /// Polls for read/receive readiness. - /// - /// If the socket is not currently ready for receiving, this method will - /// store a clone of the `Waker` from the provided `Context`. When the - /// socket becomes ready for reading, `Waker::wake` will be called on the - /// waker. - /// - /// Note that on multiple calls to `poll_recv_ready`, `poll_recv` or - /// `poll_peek`, only the `Waker` from the `Context` passed to the most - /// recent call is scheduled to receive a wakeup. (However, - /// `poll_send_ready` retains a second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`readable`] is not feasible. Where possible, using [`readable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the socket is not ready for reading. - /// * `Poll::Ready(Ok(()))` if the socket is ready for reading. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`readable`]: method@Self::readable - pub fn poll_recv_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_read_ready(cx).map_ok(|_| ()) - } - - /// Creates a new `UnixDatagram` bound to the specified path. - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// use tempfile::tempdir; - /// - /// // We use a temporary directory so that the socket - /// // files left by the bound sockets will get cleaned up. - /// let tmp = tempdir()?; - /// - /// // Bind the socket to a filesystem path - /// let socket_path = tmp.path().join("socket"); - /// let socket = UnixDatagram::bind(&socket_path)?; - /// - /// # Ok(()) - /// # } - /// ``` - pub fn bind

(path: P) -> io::Result - where - P: AsRef, - { - let socket = mio::net::UnixDatagram::bind(path)?; - UnixDatagram::new(socket) - } - - /// Creates an unnamed pair of connected sockets. - /// - /// This function will create a pair of interconnected Unix sockets for - /// communicating back and forth between one another. - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// - /// // Create the pair of sockets - /// let (sock1, sock2) = UnixDatagram::pair()?; - /// - /// // Since the sockets are paired, the paired send/recv - /// // functions can be used - /// let bytes = b"hail eris"; - /// sock1.send(bytes).await?; - /// - /// let mut buff = vec![0u8; 24]; - /// let size = sock2.recv(&mut buff).await?; - /// - /// let dgram = &buff[..size]; - /// assert_eq!(dgram, bytes); - /// - /// # Ok(()) - /// # } - /// ``` - pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> { - let (a, b) = mio::net::UnixDatagram::pair()?; - let a = UnixDatagram::new(a)?; - let b = UnixDatagram::new(b)?; - - Ok((a, b)) - } - - /// Creates new `UnixDatagram` from a `std::os::unix::net::UnixDatagram`. - /// - /// This function is intended to be used to wrap a UnixDatagram from the - /// standard library in the Tokio equivalent. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socker is in - /// non-blocking mode. Otherwise all I/O operations on the socket - /// will block the thread, which will cause unexpected behavior. - /// Non-blocking mode can be set using [`set_nonblocking`]. - /// - /// [`set_nonblocking`]: std::os::unix::net::UnixDatagram::set_nonblocking - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a Tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// use std::os::unix::net::UnixDatagram as StdUDS; - /// use tempfile::tempdir; - /// - /// // We use a temporary directory so that the socket - /// // files left by the bound sockets will get cleaned up. - /// let tmp = tempdir()?; - /// - /// // Bind the socket to a filesystem path - /// let socket_path = tmp.path().join("socket"); - /// let std_socket = StdUDS::bind(&socket_path)?; - /// std_socket.set_nonblocking(true)?; - /// let tokio_socket = UnixDatagram::from_std(std_socket)?; - /// - /// # Ok(()) - /// # } - /// ``` - #[track_caller] - pub fn from_std(datagram: net::UnixDatagram) -> io::Result { - let socket = mio::net::UnixDatagram::from_std(datagram); - let io = PollEvented::new(socket)?; - Ok(UnixDatagram { io }) - } - - /// Turns a [`tokio::net::UnixDatagram`] into a [`std::os::unix::net::UnixDatagram`]. - /// - /// The returned [`std::os::unix::net::UnixDatagram`] will have nonblocking - /// mode set as `true`. Use [`set_nonblocking`] to change the blocking mode - /// if needed. - /// - /// # Examples - /// - /// ```rust,no_run - /// # use std::error::Error; - /// # async fn dox() -> Result<(), Box> { - /// let tokio_socket = tokio::net::UnixDatagram::bind("/path/to/the/socket")?; - /// let std_socket = tokio_socket.into_std()?; - /// std_socket.set_nonblocking(false)?; - /// # Ok(()) - /// # } - /// ``` - /// - /// [`tokio::net::UnixDatagram`]: UnixDatagram - /// [`std::os::unix::net::UnixDatagram`]: std::os::unix::net::UnixDatagram - /// [`set_nonblocking`]: fn@std::os::unix::net::UnixDatagram::set_nonblocking - pub fn into_std(self) -> io::Result { - self.io - .into_inner() - .map(|io| io.into_raw_fd()) - .map(|raw_fd| unsafe { std::os::unix::net::UnixDatagram::from_raw_fd(raw_fd) }) - } - - fn new(socket: mio::net::UnixDatagram) -> io::Result { - let io = PollEvented::new(socket)?; - Ok(UnixDatagram { io }) - } - - /// Creates a new `UnixDatagram` which is not bound to any address. - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// use tempfile::tempdir; - /// - /// // Create an unbound socket - /// let tx = UnixDatagram::unbound()?; - /// - /// // Create another, bound socket - /// let tmp = tempdir()?; - /// let rx_path = tmp.path().join("rx"); - /// let rx = UnixDatagram::bind(&rx_path)?; - /// - /// // Send to the bound socket - /// let bytes = b"hello world"; - /// tx.send_to(bytes, &rx_path).await?; - /// - /// let mut buf = vec![0u8; 24]; - /// let (size, addr) = rx.recv_from(&mut buf).await?; - /// - /// let dgram = &buf[..size]; - /// assert_eq!(dgram, bytes); - /// - /// # Ok(()) - /// # } - /// ``` - pub fn unbound() -> io::Result { - let socket = mio::net::UnixDatagram::unbound()?; - UnixDatagram::new(socket) - } - - /// Connects the socket to the specified address. - /// - /// The `send` method may be used to send data to the specified address. - /// `recv` and `recv_from` will only receive data from that address. - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// use tempfile::tempdir; - /// - /// // Create an unbound socket - /// let tx = UnixDatagram::unbound()?; - /// - /// // Create another, bound socket - /// let tmp = tempdir()?; - /// let rx_path = tmp.path().join("rx"); - /// let rx = UnixDatagram::bind(&rx_path)?; - /// - /// // Connect to the bound socket - /// tx.connect(&rx_path)?; - /// - /// // Send to the bound socket - /// let bytes = b"hello world"; - /// tx.send(bytes).await?; - /// - /// let mut buf = vec![0u8; 24]; - /// let (size, addr) = rx.recv_from(&mut buf).await?; - /// - /// let dgram = &buf[..size]; - /// assert_eq!(dgram, bytes); - /// - /// # Ok(()) - /// # } - /// ``` - pub fn connect>(&self, path: P) -> io::Result<()> { - self.io.connect(path) - } - - /// Sends data on the socket to the socket's peer. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `send` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then it is guaranteed that the message was not sent. - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// - /// // Create the pair of sockets - /// let (sock1, sock2) = UnixDatagram::pair()?; - /// - /// // Since the sockets are paired, the paired send/recv - /// // functions can be used - /// let bytes = b"hello world"; - /// sock1.send(bytes).await?; - /// - /// let mut buff = vec![0u8; 24]; - /// let size = sock2.recv(&mut buff).await?; - /// - /// let dgram = &buff[..size]; - /// assert_eq!(dgram, bytes); - /// - /// # Ok(()) - /// # } - /// ``` - pub async fn send(&self, buf: &[u8]) -> io::Result { - self.io - .registration() - .async_io(Interest::WRITABLE, || self.io.send(buf)) - .await - } - - /// Tries to send a datagram to the peer without waiting. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixDatagram; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let dir = tempfile::tempdir().unwrap(); - /// let client_path = dir.path().join("client.sock"); - /// let server_path = dir.path().join("server.sock"); - /// let socket = UnixDatagram::bind(&client_path)?; - /// socket.connect(&server_path)?; - /// - /// loop { - /// // Wait for the socket to be writable - /// socket.writable().await?; - /// - /// // Try to send data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_send(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_send(&self, buf: &[u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::WRITABLE, || self.io.send(buf)) - } - - /// Tries to send a datagram to the peer without waiting. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixDatagram; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let dir = tempfile::tempdir().unwrap(); - /// let client_path = dir.path().join("client.sock"); - /// let server_path = dir.path().join("server.sock"); - /// let socket = UnixDatagram::bind(&client_path)?; - /// - /// loop { - /// // Wait for the socket to be writable - /// socket.writable().await?; - /// - /// // Try to send data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_send_to(b"hello world", &server_path) { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_send_to

(&self, buf: &[u8], target: P) -> io::Result - where - P: AsRef, - { - self.io - .registration() - .try_io(Interest::WRITABLE, || self.io.send_to(buf, target)) - } - - /// Receives data from the socket. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `recv` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, it is guaranteed that no messages were received on this - /// socket. - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// - /// // Create the pair of sockets - /// let (sock1, sock2) = UnixDatagram::pair()?; - /// - /// // Since the sockets are paired, the paired send/recv - /// // functions can be used - /// let bytes = b"hello world"; - /// sock1.send(bytes).await?; - /// - /// let mut buff = vec![0u8; 24]; - /// let size = sock2.recv(&mut buff).await?; - /// - /// let dgram = &buff[..size]; - /// assert_eq!(dgram, bytes); - /// - /// # Ok(()) - /// # } - /// ``` - pub async fn recv(&self, buf: &mut [u8]) -> io::Result { - self.io - .registration() - .async_io(Interest::READABLE, || self.io.recv(buf)) - .await - } - - /// Tries to receive a datagram from the peer without waiting. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixDatagram; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let client_path = dir.path().join("client.sock"); - /// let server_path = dir.path().join("server.sock"); - /// let socket = UnixDatagram::bind(&client_path)?; - /// socket.connect(&server_path)?; - /// - /// loop { - /// // Wait for the socket to be readable - /// socket.readable().await?; - /// - /// // The buffer is **not** included in the async task and will - /// // only exist on the stack. - /// let mut buf = [0; 1024]; - /// - /// // Try to recv data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_recv(&mut buf) { - /// Ok(n) => { - /// println!("GOT {:?}", &buf[..n]); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_recv(&self, buf: &mut [u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::READABLE, || self.io.recv(buf)) - } - - cfg_io_util! { - /// Tries to receive data from the socket without waiting. - /// - /// This method can be used even if `buf` is uninitialized. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixDatagram; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let client_path = dir.path().join("client.sock"); - /// let server_path = dir.path().join("server.sock"); - /// let socket = UnixDatagram::bind(&client_path)?; - /// - /// loop { - /// // Wait for the socket to be readable - /// socket.readable().await?; - /// - /// let mut buf = Vec::with_capacity(1024); - /// - /// // Try to recv data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_recv_buf_from(&mut buf) { - /// Ok((n, _addr)) => { - /// println!("GOT {:?}", &buf[..n]); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_recv_buf_from(&self, buf: &mut B) -> io::Result<(usize, SocketAddr)> { - let (n, addr) = self.io.registration().try_io(Interest::READABLE, || { - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - // Safety: We trust `UnixDatagram::recv_from` to have filled up `n` bytes in the - // buffer. - let (n, addr) = (*self.io).recv_from(dst)?; - - unsafe { - buf.advance_mut(n); - } - - Ok((n, addr)) - })?; - - Ok((n, SocketAddr(addr))) - } - - /// Receives from the socket, advances the - /// buffer's internal cursor and returns how many bytes were read and the origin. - /// - /// This method can be used even if `buf` is uninitialized. - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// use tempfile::tempdir; - /// - /// // We use a temporary directory so that the socket - /// // files left by the bound sockets will get cleaned up. - /// let tmp = tempdir()?; - /// - /// // Bind each socket to a filesystem path - /// let tx_path = tmp.path().join("tx"); - /// let tx = UnixDatagram::bind(&tx_path)?; - /// let rx_path = tmp.path().join("rx"); - /// let rx = UnixDatagram::bind(&rx_path)?; - /// - /// let bytes = b"hello world"; - /// tx.send_to(bytes, &rx_path).await?; - /// - /// let mut buf = Vec::with_capacity(24); - /// let (size, addr) = rx.recv_buf_from(&mut buf).await?; - /// - /// let dgram = &buf[..size]; - /// assert_eq!(dgram, bytes); - /// assert_eq!(addr.as_pathname().unwrap(), &tx_path); - /// - /// # Ok(()) - /// # } - /// ``` - pub async fn recv_buf_from(&self, buf: &mut B) -> io::Result<(usize, SocketAddr)> { - self.io.registration().async_io(Interest::READABLE, || { - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - // Safety: We trust `UnixDatagram::recv_from` to have filled up `n` bytes in the - // buffer. - let (n, addr) = (*self.io).recv_from(dst)?; - - unsafe { - buf.advance_mut(n); - } - Ok((n,SocketAddr(addr))) - }).await - } - - /// Tries to read data from the stream into the provided buffer, advancing the - /// buffer's internal cursor, returning how many bytes were read. - /// - /// This method can be used even if `buf` is uninitialized. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixDatagram; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let client_path = dir.path().join("client.sock"); - /// let server_path = dir.path().join("server.sock"); - /// let socket = UnixDatagram::bind(&client_path)?; - /// socket.connect(&server_path)?; - /// - /// loop { - /// // Wait for the socket to be readable - /// socket.readable().await?; - /// - /// let mut buf = Vec::with_capacity(1024); - /// - /// // Try to recv data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_recv_buf(&mut buf) { - /// Ok(n) => { - /// println!("GOT {:?}", &buf[..n]); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_recv_buf(&self, buf: &mut B) -> io::Result { - self.io.registration().try_io(Interest::READABLE, || { - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - // Safety: We trust `UnixDatagram::recv` to have filled up `n` bytes in the - // buffer. - let n = (*self.io).recv(dst)?; - - unsafe { - buf.advance_mut(n); - } - - Ok(n) - }) - } - - /// Receives data from the socket from the address to which it is connected, - /// advancing the buffer's internal cursor, returning how many bytes were read. - /// - /// This method can be used even if `buf` is uninitialized. - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// - /// // Create the pair of sockets - /// let (sock1, sock2) = UnixDatagram::pair()?; - /// - /// // Since the sockets are paired, the paired send/recv - /// // functions can be used - /// let bytes = b"hello world"; - /// sock1.send(bytes).await?; - /// - /// let mut buff = Vec::with_capacity(24); - /// let size = sock2.recv_buf(&mut buff).await?; - /// - /// let dgram = &buff[..size]; - /// assert_eq!(dgram, bytes); - /// - /// # Ok(()) - /// # } - /// ``` - pub async fn recv_buf(&self, buf: &mut B) -> io::Result { - self.io.registration().async_io(Interest::READABLE, || { - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - // Safety: We trust `UnixDatagram::recv_from` to have filled up `n` bytes in the - // buffer. - let n = (*self.io).recv(dst)?; - - unsafe { - buf.advance_mut(n); - } - Ok(n) - }).await - } - } - - /// Sends data on the socket to the specified address. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `send_to` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then it is guaranteed that the message was not sent. - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// use tempfile::tempdir; - /// - /// // We use a temporary directory so that the socket - /// // files left by the bound sockets will get cleaned up. - /// let tmp = tempdir()?; - /// - /// // Bind each socket to a filesystem path - /// let tx_path = tmp.path().join("tx"); - /// let tx = UnixDatagram::bind(&tx_path)?; - /// let rx_path = tmp.path().join("rx"); - /// let rx = UnixDatagram::bind(&rx_path)?; - /// - /// let bytes = b"hello world"; - /// tx.send_to(bytes, &rx_path).await?; - /// - /// let mut buf = vec![0u8; 24]; - /// let (size, addr) = rx.recv_from(&mut buf).await?; - /// - /// let dgram = &buf[..size]; - /// assert_eq!(dgram, bytes); - /// assert_eq!(addr.as_pathname().unwrap(), &tx_path); - /// - /// # Ok(()) - /// # } - /// ``` - pub async fn send_to

(&self, buf: &[u8], target: P) -> io::Result - where - P: AsRef, - { - self.io - .registration() - .async_io(Interest::WRITABLE, || self.io.send_to(buf, target.as_ref())) - .await - } - - /// Receives data from the socket. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `recv_from` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, it is guaranteed that no messages were received on this - /// socket. - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// use tempfile::tempdir; - /// - /// // We use a temporary directory so that the socket - /// // files left by the bound sockets will get cleaned up. - /// let tmp = tempdir()?; - /// - /// // Bind each socket to a filesystem path - /// let tx_path = tmp.path().join("tx"); - /// let tx = UnixDatagram::bind(&tx_path)?; - /// let rx_path = tmp.path().join("rx"); - /// let rx = UnixDatagram::bind(&rx_path)?; - /// - /// let bytes = b"hello world"; - /// tx.send_to(bytes, &rx_path).await?; - /// - /// let mut buf = vec![0u8; 24]; - /// let (size, addr) = rx.recv_from(&mut buf).await?; - /// - /// let dgram = &buf[..size]; - /// assert_eq!(dgram, bytes); - /// assert_eq!(addr.as_pathname().unwrap(), &tx_path); - /// - /// # Ok(()) - /// # } - /// ``` - pub async fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - let (n, addr) = self - .io - .registration() - .async_io(Interest::READABLE, || self.io.recv_from(buf)) - .await?; - - Ok((n, SocketAddr(addr))) - } - - /// Attempts to receive a single datagram on the specified address. - /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the - /// `Waker` from the `Context` passed to the most recent call will be scheduled to - /// receive a wakeup. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the socket is not ready to read - /// * `Poll::Ready(Ok(addr))` reads data from `addr` into `ReadBuf` if the socket is ready - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - pub fn poll_recv_from( - &self, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - let (n, addr) = ready!(self.io.registration().poll_read_io(cx, || { - // Safety: will not read the maybe uninitialized bytes. - let b = unsafe { - &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]) - }; - - self.io.recv_from(b) - }))?; - - // Safety: We trust `recv` to have filled up `n` bytes in the buffer. - unsafe { - buf.assume_init(n); - } - buf.advance(n); - Poll::Ready(Ok(SocketAddr(addr))) - } - - /// Attempts to send data to the specified address. - /// - /// Note that on multiple calls to a `poll_*` method in the send direction, only the - /// `Waker` from the `Context` passed to the most recent call will be scheduled to - /// receive a wakeup. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the socket is not ready to write - /// * `Poll::Ready(Ok(n))` `n` is the number of bytes sent. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - pub fn poll_send_to

( - &self, - cx: &mut Context<'_>, - buf: &[u8], - target: P, - ) -> Poll> - where - P: AsRef, - { - self.io - .registration() - .poll_write_io(cx, || self.io.send_to(buf, target.as_ref())) - } - - /// Attempts to send data on the socket to the remote address to which it - /// was previously `connect`ed. - /// - /// The [`connect`] method will connect this socket to a remote address. - /// This method will fail if the socket is not connected. - /// - /// Note that on multiple calls to a `poll_*` method in the send direction, - /// only the `Waker` from the `Context` passed to the most recent call will - /// be scheduled to receive a wakeup. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the socket is not available to write - /// * `Poll::Ready(Ok(n))` `n` is the number of bytes sent - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`connect`]: method@Self::connect - pub fn poll_send(&self, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - self.io - .registration() - .poll_write_io(cx, || self.io.send(buf)) - } - - /// Attempts to receive a single datagram message on the socket from the remote - /// address to which it is `connect`ed. - /// - /// The [`connect`] method will connect this socket to a remote address. This method - /// resolves to an error if the socket is not connected. - /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the - /// `Waker` from the `Context` passed to the most recent call will be scheduled to - /// receive a wakeup. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the socket is not ready to read - /// * `Poll::Ready(Ok(()))` reads data `ReadBuf` if the socket is ready - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`connect`]: method@Self::connect - pub fn poll_recv(&self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { - let n = ready!(self.io.registration().poll_read_io(cx, || { - // Safety: will not read the maybe uninitialized bytes. - let b = unsafe { - &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]) - }; - - self.io.recv(b) - }))?; - - // Safety: We trust `recv` to have filled up `n` bytes in the buffer. - unsafe { - buf.assume_init(n); - } - buf.advance(n); - Poll::Ready(Ok(())) - } - - /// Tries to receive data from the socket without waiting. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixDatagram; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let client_path = dir.path().join("client.sock"); - /// let server_path = dir.path().join("server.sock"); - /// let socket = UnixDatagram::bind(&client_path)?; - /// - /// loop { - /// // Wait for the socket to be readable - /// socket.readable().await?; - /// - /// // The buffer is **not** included in the async task and will - /// // only exist on the stack. - /// let mut buf = [0; 1024]; - /// - /// // Try to recv data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match socket.try_recv_from(&mut buf) { - /// Ok((n, _addr)) => { - /// println!("GOT {:?}", &buf[..n]); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - let (n, addr) = self - .io - .registration() - .try_io(Interest::READABLE, || self.io.recv_from(buf))?; - - Ok((n, SocketAddr(addr))) - } - - /// Tries to read or write from the socket using a user-provided IO operation. - /// - /// If the socket is ready, the provided closure is called. The closure - /// should attempt to perform IO operation on the socket by manually - /// calling the appropriate syscall. If the operation fails because the - /// socket is not actually ready, then the closure should return a - /// `WouldBlock` error and the readiness flag is cleared. The return value - /// of the closure is then returned by `try_io`. - /// - /// If the socket is not ready, then the closure is not called - /// and a `WouldBlock` error is returned. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the socket that failed due to the socket not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the socket to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio `UnixDatagram` type, as this will mess with the - /// readiness flag and can cause the socket to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - /// - /// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: UnixDatagram::readable() - /// [`writable()`]: UnixDatagram::writable() - /// [`ready()`]: UnixDatagram::ready() - pub fn try_io( - &self, - interest: Interest, - f: impl FnOnce() -> io::Result, - ) -> io::Result { - self.io - .registration() - .try_io(interest, || self.io.try_io(f)) - } - - /// Reads or writes from the socket using a user-provided IO operation. - /// - /// The readiness of the socket is awaited and when the socket is ready, - /// the provided closure is called. The closure should attempt to perform - /// IO operation on the socket by manually calling the appropriate syscall. - /// If the operation fails because the socket is not actually ready, - /// then the closure should return a `WouldBlock` error. In such case the - /// readiness flag is cleared and the socket readiness is awaited again. - /// This loop is repeated until the closure returns an `Ok` or an error - /// other than `WouldBlock`. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the socket that failed due to the socket not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the socket to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio `UnixDatagram` type, as this will mess with the - /// readiness flag and can cause the socket to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - pub async fn async_io( - &self, - interest: Interest, - mut f: impl FnMut() -> io::Result, - ) -> io::Result { - self.io - .registration() - .async_io(interest, || self.io.try_io(&mut f)) - .await - } - - /// Returns the local address that this socket is bound to. - /// - /// # Examples - /// For a socket bound to a local path - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// use tempfile::tempdir; - /// - /// // We use a temporary directory so that the socket - /// // files left by the bound sockets will get cleaned up. - /// let tmp = tempdir()?; - /// - /// // Bind socket to a filesystem path - /// let socket_path = tmp.path().join("socket"); - /// let socket = UnixDatagram::bind(&socket_path)?; - /// - /// assert_eq!(socket.local_addr()?.as_pathname().unwrap(), &socket_path); - /// - /// # Ok(()) - /// # } - /// ``` - /// - /// For an unbound socket - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// - /// // Create an unbound socket - /// let socket = UnixDatagram::unbound()?; - /// - /// assert!(socket.local_addr()?.is_unnamed()); - /// - /// # Ok(()) - /// # } - /// ``` - pub fn local_addr(&self) -> io::Result { - self.io.local_addr().map(SocketAddr) - } - - /// Returns the address of this socket's peer. - /// - /// The `connect` method will connect the socket to a peer. - /// - /// # Examples - /// For a peer with a local path - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// use tempfile::tempdir; - /// - /// // Create an unbound socket - /// let tx = UnixDatagram::unbound()?; - /// - /// // Create another, bound socket - /// let tmp = tempdir()?; - /// let rx_path = tmp.path().join("rx"); - /// let rx = UnixDatagram::bind(&rx_path)?; - /// - /// // Connect to the bound socket - /// tx.connect(&rx_path)?; - /// - /// assert_eq!(tx.peer_addr()?.as_pathname().unwrap(), &rx_path); - /// - /// # Ok(()) - /// # } - /// ``` - /// - /// For an unbound peer - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// - /// // Create the pair of sockets - /// let (sock1, sock2) = UnixDatagram::pair()?; - /// - /// assert!(sock1.peer_addr()?.is_unnamed()); - /// - /// # Ok(()) - /// # } - /// ``` - pub fn peer_addr(&self) -> io::Result { - self.io.peer_addr().map(SocketAddr) - } - - /// Returns the value of the `SO_ERROR` option. - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// - /// // Create an unbound socket - /// let socket = UnixDatagram::unbound()?; - /// - /// if let Ok(Some(err)) = socket.take_error() { - /// println!("Got error: {:?}", err); - /// } - /// - /// # Ok(()) - /// # } - /// ``` - pub fn take_error(&self) -> io::Result> { - self.io.take_error() - } - - /// Shuts down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O calls on the - /// specified portions to immediately return with an appropriate value - /// (see the documentation of `Shutdown`). - /// - /// # Examples - /// ``` - /// # use std::error::Error; - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use tokio::net::UnixDatagram; - /// use std::net::Shutdown; - /// - /// // Create an unbound socket - /// let (socket, other) = UnixDatagram::pair()?; - /// - /// socket.shutdown(Shutdown::Both)?; - /// - /// // NOTE: the following commented out code does NOT work as expected. - /// // Due to an underlying issue, the recv call will block indefinitely. - /// // See: https://github.com/tokio-rs/tokio/issues/1679 - /// //let mut buff = vec![0u8; 24]; - /// //let size = socket.recv(&mut buff).await?; - /// //assert_eq!(size, 0); - /// - /// let send_result = socket.send(b"hello world").await; - /// assert!(send_result.is_err()); - /// - /// # Ok(()) - /// # } - /// ``` - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.io.shutdown(how) - } -} - -impl TryFrom for UnixDatagram { - type Error = io::Error; - - /// Consumes stream, returning the Tokio I/O object. - /// - /// This is equivalent to - /// [`UnixDatagram::from_std(stream)`](UnixDatagram::from_std). - fn try_from(stream: std::os::unix::net::UnixDatagram) -> Result { - Self::from_std(stream) - } -} - -impl fmt::Debug for UnixDatagram { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.fmt(f) - } -} - -impl AsRawFd for UnixDatagram { - fn as_raw_fd(&self) -> RawFd { - self.io.as_raw_fd() - } -} - -impl AsFd for UnixDatagram { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/listener.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/listener.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/listener.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/listener.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,216 +0,0 @@ -use crate::io::{Interest, PollEvented}; -use crate::net::unix::{SocketAddr, UnixStream}; - -use std::fmt; -use std::io; -use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; -use std::os::unix::net; -use std::path::Path; -use std::task::{Context, Poll}; - -cfg_net_unix! { - /// A Unix socket which can accept connections from other Unix sockets. - /// - /// You can accept a new connection by using the [`accept`](`UnixListener::accept`) method. - /// - /// A `UnixListener` can be turned into a `Stream` with [`UnixListenerStream`]. - /// - /// [`UnixListenerStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.UnixListenerStream.html - /// - /// # Errors - /// - /// Note that accepting a connection can lead to various errors and not all - /// of them are necessarily fatal ‒ for example having too many open file - /// descriptors or the other side closing the connection while it waits in - /// an accept queue. These would terminate the stream if not handled in any - /// way. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixListener; - /// - /// #[tokio::main] - /// async fn main() { - /// let listener = UnixListener::bind("/path/to/the/socket").unwrap(); - /// loop { - /// match listener.accept().await { - /// Ok((stream, _addr)) => { - /// println!("new client!"); - /// } - /// Err(e) => { /* connection failed */ } - /// } - /// } - /// } - /// ``` - #[cfg_attr(docsrs, doc(alias = "uds"))] - pub struct UnixListener { - io: PollEvented, - } -} - -impl UnixListener { - /// Creates a new `UnixListener` bound to the specified path. - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - #[track_caller] - pub fn bind

(path: P) -> io::Result - where - P: AsRef, - { - let listener = mio::net::UnixListener::bind(path)?; - let io = PollEvented::new(listener)?; - Ok(UnixListener { io }) - } - - /// Creates new `UnixListener` from a `std::os::unix::net::UnixListener `. - /// - /// This function is intended to be used to wrap a UnixListener from the - /// standard library in the Tokio equivalent. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the listener is in - /// non-blocking mode. Otherwise all I/O operations on the listener - /// will block the thread, which will cause unexpected behavior. - /// Non-blocking mode can be set using [`set_nonblocking`]. - /// - /// [`set_nonblocking`]: std::os::unix::net::UnixListener::set_nonblocking - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixListener; - /// use std::os::unix::net::UnixListener as StdUnixListener; - /// # use std::error::Error; - /// - /// # async fn dox() -> Result<(), Box> { - /// let std_listener = StdUnixListener::bind("/path/to/the/socket")?; - /// std_listener.set_nonblocking(true)?; - /// let listener = UnixListener::from_std(std_listener)?; - /// # Ok(()) - /// # } - /// ``` - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - #[track_caller] - pub fn from_std(listener: net::UnixListener) -> io::Result { - let listener = mio::net::UnixListener::from_std(listener); - let io = PollEvented::new(listener)?; - Ok(UnixListener { io }) - } - - /// Turns a [`tokio::net::UnixListener`] into a [`std::os::unix::net::UnixListener`]. - /// - /// The returned [`std::os::unix::net::UnixListener`] will have nonblocking mode - /// set as `true`. Use [`set_nonblocking`] to change the blocking mode if needed. - /// - /// # Examples - /// - /// ```rust,no_run - /// # use std::error::Error; - /// # async fn dox() -> Result<(), Box> { - /// let tokio_listener = tokio::net::UnixListener::bind("/path/to/the/socket")?; - /// let std_listener = tokio_listener.into_std()?; - /// std_listener.set_nonblocking(false)?; - /// # Ok(()) - /// # } - /// ``` - /// - /// [`tokio::net::UnixListener`]: UnixListener - /// [`std::os::unix::net::UnixListener`]: std::os::unix::net::UnixListener - /// [`set_nonblocking`]: fn@std::os::unix::net::UnixListener::set_nonblocking - pub fn into_std(self) -> io::Result { - self.io - .into_inner() - .map(|io| io.into_raw_fd()) - .map(|raw_fd| unsafe { net::UnixListener::from_raw_fd(raw_fd) }) - } - - /// Returns the local socket address of this listener. - pub fn local_addr(&self) -> io::Result { - self.io.local_addr().map(SocketAddr) - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.io.take_error() - } - - /// Accepts a new incoming connection to this listener. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If the method is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then it is guaranteed that no new connections were - /// accepted by this method. - pub async fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> { - let (mio, addr) = self - .io - .registration() - .async_io(Interest::READABLE, || self.io.accept()) - .await?; - - let addr = SocketAddr(addr); - let stream = UnixStream::new(mio)?; - Ok((stream, addr)) - } - - /// Polls to accept a new incoming connection to this listener. - /// - /// If there is no connection to accept, `Poll::Pending` is returned and the - /// current task will be notified by a waker. Note that on multiple calls - /// to `poll_accept`, only the `Waker` from the `Context` passed to the most - /// recent call is scheduled to receive a wakeup. - pub fn poll_accept(&self, cx: &mut Context<'_>) -> Poll> { - let (sock, addr) = ready!(self.io.registration().poll_read_io(cx, || self.io.accept()))?; - let addr = SocketAddr(addr); - let sock = UnixStream::new(sock)?; - Poll::Ready(Ok((sock, addr))) - } -} - -impl TryFrom for UnixListener { - type Error = io::Error; - - /// Consumes stream, returning the tokio I/O object. - /// - /// This is equivalent to - /// [`UnixListener::from_std(stream)`](UnixListener::from_std). - fn try_from(stream: std::os::unix::net::UnixListener) -> io::Result { - Self::from_std(stream) - } -} - -impl fmt::Debug for UnixListener { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.fmt(f) - } -} - -impl AsRawFd for UnixListener { - fn as_raw_fd(&self) -> RawFd { - self.io.as_raw_fd() - } -} - -impl AsFd for UnixListener { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ -//! Unix specific network types. -// This module does not currently provide any public API, but it was -// unintentionally defined as a public module. Hide it from the documentation -// instead of changing it to a private module to avoid breakage. -#[doc(hidden)] -pub mod datagram; - -pub(crate) mod listener; - -mod split; -pub use split::{ReadHalf, WriteHalf}; - -mod split_owned; -pub use split_owned::{OwnedReadHalf, OwnedWriteHalf, ReuniteError}; - -mod socketaddr; -pub use socketaddr::SocketAddr; - -pub(crate) mod stream; -pub(crate) use stream::UnixStream; - -mod ucred; -pub use ucred::UCred; - -pub mod pipe; - -/// A type representing process and process group IDs. -#[allow(non_camel_case_types)] -pub type uid_t = u32; - -/// A type representing user ID. -#[allow(non_camel_case_types)] -pub type gid_t = u32; - -/// A type representing group ID. -#[allow(non_camel_case_types)] -pub type pid_t = i32; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/pipe.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/pipe.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/pipe.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/pipe.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1217 +0,0 @@ -//! Unix pipe types. - -use crate::io::interest::Interest; -use crate::io::{AsyncRead, AsyncWrite, PollEvented, ReadBuf, Ready}; - -use mio::unix::pipe as mio_pipe; -use std::fs::File; -use std::io::{self, Read, Write}; -use std::os::unix::fs::{FileTypeExt, OpenOptionsExt}; -use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; -use std::path::Path; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - use bytes::BufMut; -} - -/// Options and flags which can be used to configure how a FIFO file is opened. -/// -/// This builder allows configuring how to create a pipe end from a FIFO file. -/// Generally speaking, when using `OpenOptions`, you'll first call [`new`], -/// then chain calls to methods to set each option, then call either -/// [`open_receiver`] or [`open_sender`], passing the path of the FIFO file you -/// are trying to open. This will give you a [`io::Result`] with a pipe end -/// inside that you can further operate on. -/// -/// [`new`]: OpenOptions::new -/// [`open_receiver`]: OpenOptions::open_receiver -/// [`open_sender`]: OpenOptions::open_sender -/// -/// # Examples -/// -/// Opening a pair of pipe ends from a FIFO file: -/// -/// ```no_run -/// use tokio::net::unix::pipe; -/// # use std::error::Error; -/// -/// const FIFO_NAME: &str = "path/to/a/fifo"; -/// -/// # async fn dox() -> Result<(), Box> { -/// let rx = pipe::OpenOptions::new().open_receiver(FIFO_NAME)?; -/// let tx = pipe::OpenOptions::new().open_sender(FIFO_NAME)?; -/// # Ok(()) -/// # } -/// ``` -/// -/// Opening a [`Sender`] on Linux when you are sure the file is a FIFO: -/// -/// ```ignore -/// use tokio::net::unix::pipe; -/// use nix::{unistd::mkfifo, sys::stat::Mode}; -/// # use std::error::Error; -/// -/// // Our program has exclusive access to this path. -/// const FIFO_NAME: &str = "path/to/a/new/fifo"; -/// -/// # async fn dox() -> Result<(), Box> { -/// mkfifo(FIFO_NAME, Mode::S_IRWXU)?; -/// let tx = pipe::OpenOptions::new() -/// .read_write(true) -/// .unchecked(true) -/// .open_sender(FIFO_NAME)?; -/// # Ok(()) -/// # } -/// ``` -#[derive(Clone, Debug)] -pub struct OpenOptions { - #[cfg(target_os = "linux")] - read_write: bool, - unchecked: bool, -} - -impl OpenOptions { - /// Creates a blank new set of options ready for configuration. - /// - /// All options are initially set to `false`. - pub fn new() -> OpenOptions { - OpenOptions { - #[cfg(target_os = "linux")] - read_write: false, - unchecked: false, - } - } - - /// Sets the option for read-write access. - /// - /// This option, when true, will indicate that a FIFO file will be opened - /// in read-write access mode. This operation is not defined by the POSIX - /// standard and is only guaranteed to work on Linux. - /// - /// # Examples - /// - /// Opening a [`Sender`] even if there are no open reading ends: - /// - /// ```ignore - /// use tokio::net::unix::pipe; - /// - /// let tx = pipe::OpenOptions::new() - /// .read_write(true) - /// .open_sender("path/to/a/fifo"); - /// ``` - /// - /// Opening a resilient [`Receiver`] i.e. a reading pipe end which will not - /// fail with [`UnexpectedEof`] during reading if all writing ends of the - /// pipe close the FIFO file. - /// - /// [`UnexpectedEof`]: std::io::ErrorKind::UnexpectedEof - /// - /// ```ignore - /// use tokio::net::unix::pipe; - /// - /// let tx = pipe::OpenOptions::new() - /// .read_write(true) - /// .open_receiver("path/to/a/fifo"); - /// ``` - #[cfg(target_os = "linux")] - #[cfg_attr(docsrs, doc(cfg(target_os = "linux")))] - pub fn read_write(&mut self, value: bool) -> &mut Self { - self.read_write = value; - self - } - - /// Sets the option to skip the check for FIFO file type. - /// - /// By default, [`open_receiver`] and [`open_sender`] functions will check - /// if the opened file is a FIFO file. Set this option to `true` if you are - /// sure the file is a FIFO file. - /// - /// [`open_receiver`]: OpenOptions::open_receiver - /// [`open_sender`]: OpenOptions::open_sender - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::unix::pipe; - /// use nix::{unistd::mkfifo, sys::stat::Mode}; - /// # use std::error::Error; - /// - /// // Our program has exclusive access to this path. - /// const FIFO_NAME: &str = "path/to/a/new/fifo"; - /// - /// # async fn dox() -> Result<(), Box> { - /// mkfifo(FIFO_NAME, Mode::S_IRWXU)?; - /// let rx = pipe::OpenOptions::new() - /// .unchecked(true) - /// .open_receiver(FIFO_NAME)?; - /// # Ok(()) - /// # } - /// ``` - pub fn unchecked(&mut self, value: bool) -> &mut Self { - self.unchecked = value; - self - } - - /// Creates a [`Receiver`] from a FIFO file with the options specified by `self`. - /// - /// This function will open the FIFO file at the specified path, possibly - /// check if it is a pipe, and associate the pipe with the default event - /// loop for reading. - /// - /// # Errors - /// - /// If the file type check fails, this function will fail with `io::ErrorKind::InvalidInput`. - /// This function may also fail with other standard OS errors. - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn open_receiver>(&self, path: P) -> io::Result { - let file = self.open(path.as_ref(), PipeEnd::Receiver)?; - Receiver::from_file_unchecked(file) - } - - /// Creates a [`Sender`] from a FIFO file with the options specified by `self`. - /// - /// This function will open the FIFO file at the specified path, possibly - /// check if it is a pipe, and associate the pipe with the default event - /// loop for writing. - /// - /// # Errors - /// - /// If the file type check fails, this function will fail with `io::ErrorKind::InvalidInput`. - /// If the file is not opened in read-write access mode and the file is not - /// currently open for reading, this function will fail with `ENXIO`. - /// This function may also fail with other standard OS errors. - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn open_sender>(&self, path: P) -> io::Result { - let file = self.open(path.as_ref(), PipeEnd::Sender)?; - Sender::from_file_unchecked(file) - } - - fn open(&self, path: &Path, pipe_end: PipeEnd) -> io::Result { - let mut options = std::fs::OpenOptions::new(); - options - .read(pipe_end == PipeEnd::Receiver) - .write(pipe_end == PipeEnd::Sender) - .custom_flags(libc::O_NONBLOCK); - - #[cfg(target_os = "linux")] - if self.read_write { - options.read(true).write(true); - } - - let file = options.open(path)?; - - if !self.unchecked && !is_fifo(&file)? { - return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); - } - - Ok(file) - } -} - -impl Default for OpenOptions { - fn default() -> OpenOptions { - OpenOptions::new() - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -enum PipeEnd { - Sender, - Receiver, -} - -/// Writing end of a Unix pipe. -/// -/// It can be constructed from a FIFO file with [`OpenOptions::open_sender`]. -/// -/// Opening a named pipe for writing involves a few steps. -/// Call to [`OpenOptions::open_sender`] might fail with an error indicating -/// different things: -/// -/// * [`io::ErrorKind::NotFound`] - There is no file at the specified path. -/// * [`io::ErrorKind::InvalidInput`] - The file exists, but it is not a FIFO. -/// * [`ENXIO`] - The file is a FIFO, but no process has it open for reading. -/// Sleep for a while and try again. -/// * Other OS errors not specific to opening FIFO files. -/// -/// Opening a `Sender` from a FIFO file should look like this: -/// -/// ```no_run -/// use tokio::net::unix::pipe; -/// use tokio::time::{self, Duration}; -/// -/// const FIFO_NAME: &str = "path/to/a/fifo"; -/// -/// # async fn dox() -> Result<(), Box> { -/// // Wait for a reader to open the file. -/// let tx = loop { -/// match pipe::OpenOptions::new().open_sender(FIFO_NAME) { -/// Ok(tx) => break tx, -/// Err(e) if e.raw_os_error() == Some(libc::ENXIO) => {}, -/// Err(e) => return Err(e.into()), -/// } -/// -/// time::sleep(Duration::from_millis(50)).await; -/// }; -/// # Ok(()) -/// # } -/// ``` -/// -/// On Linux, it is possible to create a `Sender` without waiting in a sleeping -/// loop. This is done by opening a named pipe in read-write access mode with -/// `OpenOptions::read_write`. This way, a `Sender` can at the same time hold -/// both a writing end and a reading end, and the latter allows to open a FIFO -/// without [`ENXIO`] error since the pipe is open for reading as well. -/// -/// `Sender` cannot be used to read from a pipe, so in practice the read access -/// is only used when a FIFO is opened. However, using a `Sender` in read-write -/// mode **may lead to lost data**, because written data will be dropped by the -/// system as soon as all pipe ends are closed. To avoid lost data you have to -/// make sure that a reading end has been opened before dropping a `Sender`. -/// -/// Note that using read-write access mode with FIFO files is not defined by -/// the POSIX standard and it is only guaranteed to work on Linux. -/// -/// ```ignore -/// use tokio::io::AsyncWriteExt; -/// use tokio::net::unix::pipe; -/// -/// const FIFO_NAME: &str = "path/to/a/fifo"; -/// -/// # async fn dox() -> Result<(), Box> { -/// let mut tx = pipe::OpenOptions::new() -/// .read_write(true) -/// .open_sender(FIFO_NAME)?; -/// -/// // Asynchronously write to the pipe before a reader. -/// tx.write_all(b"hello world").await?; -/// # Ok(()) -/// # } -/// ``` -/// -/// [`ENXIO`]: https://docs.rs/libc/latest/libc/constant.ENXIO.html -#[derive(Debug)] -pub struct Sender { - io: PollEvented, -} - -impl Sender { - fn from_mio(mio_tx: mio_pipe::Sender) -> io::Result { - let io = PollEvented::new_with_interest(mio_tx, Interest::WRITABLE)?; - Ok(Sender { io }) - } - - /// Creates a new `Sender` from a [`File`]. - /// - /// This function is intended to construct a pipe from a [`File`] representing - /// a special FIFO file. It will check if the file is a pipe and has write access, - /// set it in non-blocking mode and perform the conversion. - /// - /// # Errors - /// - /// Fails with `io::ErrorKind::InvalidInput` if the file is not a pipe or it - /// does not have write access. Also fails with any standard OS error if it occurs. - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn from_file(mut file: File) -> io::Result { - if !is_fifo(&file)? { - return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); - } - - let flags = get_file_flags(&file)?; - if has_write_access(flags) { - set_nonblocking(&mut file, flags)?; - Sender::from_file_unchecked(file) - } else { - Err(io::Error::new( - io::ErrorKind::InvalidInput, - "not in O_WRONLY or O_RDWR access mode", - )) - } - } - - /// Creates a new `Sender` from a [`File`] without checking pipe properties. - /// - /// This function is intended to construct a pipe from a File representing - /// a special FIFO file. The conversion assumes nothing about the underlying - /// file; it is left up to the user to make sure it is opened with write access, - /// represents a pipe and is set in non-blocking mode. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::unix::pipe; - /// use std::fs::OpenOptions; - /// use std::os::unix::fs::{FileTypeExt, OpenOptionsExt}; - /// # use std::error::Error; - /// - /// const FIFO_NAME: &str = "path/to/a/fifo"; - /// - /// # async fn dox() -> Result<(), Box> { - /// let file = OpenOptions::new() - /// .write(true) - /// .custom_flags(libc::O_NONBLOCK) - /// .open(FIFO_NAME)?; - /// if file.metadata()?.file_type().is_fifo() { - /// let tx = pipe::Sender::from_file_unchecked(file)?; - /// /* use the Sender */ - /// } - /// # Ok(()) - /// # } - /// ``` - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn from_file_unchecked(file: File) -> io::Result { - let raw_fd = file.into_raw_fd(); - let mio_tx = unsafe { mio_pipe::Sender::from_raw_fd(raw_fd) }; - Sender::from_mio(mio_tx) - } - - /// Waits for any of the requested ready states. - /// - /// This function can be used instead of [`writable()`] to check the returned - /// ready set for [`Ready::WRITABLE`] and [`Ready::WRITE_CLOSED`] events. - /// - /// The function may complete without the pipe being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// [`writable()`]: Self::writable - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn ready(&self, interest: Interest) -> io::Result { - let event = self.io.registration().readiness(interest).await?; - Ok(event.ready) - } - - /// Waits for the pipe to become writable. - /// - /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually - /// paired with [`try_write()`]. - /// - /// [`try_write()`]: Self::try_write - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::unix::pipe; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Open a writing end of a fifo - /// let tx = pipe::OpenOptions::new().open_sender("path/to/a/fifo")?; - /// - /// loop { - /// // Wait for the pipe to be writable - /// tx.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match tx.try_write(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn writable(&self) -> io::Result<()> { - self.ready(Interest::WRITABLE).await?; - Ok(()) - } - - /// Polls for write readiness. - /// - /// If the pipe is not currently ready for writing, this method will - /// store a clone of the `Waker` from the provided `Context`. When the pipe - /// becomes ready for writing, `Waker::wake` will be called on the waker. - /// - /// Note that on multiple calls to `poll_write_ready` or `poll_write`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. - /// - /// This function is intended for cases where creating and pinning a future - /// via [`writable`] is not feasible. Where possible, using [`writable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// [`writable`]: Self::writable - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the pipe is not ready for writing. - /// * `Poll::Ready(Ok(()))` if the pipe is ready for writing. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_write_ready(cx).map_ok(|_| ()) - } - - /// Tries to write a buffer to the pipe, returning how many bytes were - /// written. - /// - /// The function will attempt to write the entire contents of `buf`, but - /// only part of the buffer may be written. If the length of `buf` is not - /// greater than `PIPE_BUF` (an OS constant, 4096 under Linux), then the - /// write is guaranteed to be atomic, i.e. either the entire content of - /// `buf` will be written or this method will fail with `WouldBlock`. There - /// is no such guarantee if `buf` is larger than `PIPE_BUF`. - /// - /// This function is usually paired with [`writable`]. - /// - /// [`writable`]: Self::writable - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the pipe is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::unix::pipe; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Open a writing end of a fifo - /// let tx = pipe::OpenOptions::new().open_sender("path/to/a/fifo")?; - /// - /// loop { - /// // Wait for the pipe to be writable - /// tx.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match tx.try_write(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_write(&self, buf: &[u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::WRITABLE, || (&*self.io).write(buf)) - } - - /// Tries to write several buffers to the pipe, returning how many bytes - /// were written. - /// - /// Data is written from each buffer in order, with the final buffer read - /// from possible being only partially consumed. This method behaves - /// equivalently to a single call to [`try_write()`] with concatenated - /// buffers. - /// - /// If the total length of buffers is not greater than `PIPE_BUF` (an OS - /// constant, 4096 under Linux), then the write is guaranteed to be atomic, - /// i.e. either the entire contents of buffers will be written or this - /// method will fail with `WouldBlock`. There is no such guarantee if the - /// total length of buffers is greater than `PIPE_BUF`. - /// - /// This function is usually paired with [`writable`]. - /// - /// [`try_write()`]: Self::try_write() - /// [`writable`]: Self::writable - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the pipe is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::unix::pipe; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Open a writing end of a fifo - /// let tx = pipe::OpenOptions::new().open_sender("path/to/a/fifo")?; - /// - /// let bufs = [io::IoSlice::new(b"hello "), io::IoSlice::new(b"world")]; - /// - /// loop { - /// // Wait for the pipe to be writable - /// tx.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match tx.try_write_vectored(&bufs) { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result { - self.io - .registration() - .try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf)) - } -} - -impl AsyncWrite for Sender { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.io.poll_write(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.io.poll_write_vectored(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - true - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -impl AsRawFd for Sender { - fn as_raw_fd(&self) -> RawFd { - self.io.as_raw_fd() - } -} - -impl AsFd for Sender { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } -} - -/// Reading end of a Unix pipe. -/// -/// It can be constructed from a FIFO file with [`OpenOptions::open_receiver`]. -/// -/// # Examples -/// -/// Receiving messages from a named pipe in a loop: -/// -/// ```no_run -/// use tokio::net::unix::pipe; -/// use tokio::io::{self, AsyncReadExt}; -/// -/// const FIFO_NAME: &str = "path/to/a/fifo"; -/// -/// # async fn dox() -> Result<(), Box> { -/// let mut rx = pipe::OpenOptions::new().open_receiver(FIFO_NAME)?; -/// loop { -/// let mut msg = vec![0; 256]; -/// match rx.read_exact(&mut msg).await { -/// Ok(_) => { -/// /* handle the message */ -/// } -/// Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => { -/// // Writing end has been closed, we should reopen the pipe. -/// rx = pipe::OpenOptions::new().open_receiver(FIFO_NAME)?; -/// } -/// Err(e) => return Err(e.into()), -/// } -/// } -/// # } -/// ``` -/// -/// On Linux, you can use a `Receiver` in read-write access mode to implement -/// resilient reading from a named pipe. Unlike `Receiver` opened in read-only -/// mode, read from a pipe in read-write mode will not fail with `UnexpectedEof` -/// when the writing end is closed. This way, a `Receiver` can asynchronously -/// wait for the next writer to open the pipe. -/// -/// You should not use functions waiting for EOF such as [`read_to_end`] with -/// a `Receiver` in read-write access mode, since it **may wait forever**. -/// `Receiver` in this mode also holds an open writing end, which prevents -/// receiving EOF. -/// -/// To set the read-write access mode you can use `OpenOptions::read_write`. -/// Note that using read-write access mode with FIFO files is not defined by -/// the POSIX standard and it is only guaranteed to work on Linux. -/// -/// ```ignore -/// use tokio::net::unix::pipe; -/// use tokio::io::AsyncReadExt; -/// # use std::error::Error; -/// -/// const FIFO_NAME: &str = "path/to/a/fifo"; -/// -/// # async fn dox() -> Result<(), Box> { -/// let mut rx = pipe::OpenOptions::new() -/// .read_write(true) -/// .open_receiver(FIFO_NAME)?; -/// loop { -/// let mut msg = vec![0; 256]; -/// rx.read_exact(&mut msg).await?; -/// /* handle the message */ -/// } -/// # } -/// ``` -/// -/// [`read_to_end`]: crate::io::AsyncReadExt::read_to_end -#[derive(Debug)] -pub struct Receiver { - io: PollEvented, -} - -impl Receiver { - fn from_mio(mio_rx: mio_pipe::Receiver) -> io::Result { - let io = PollEvented::new_with_interest(mio_rx, Interest::READABLE)?; - Ok(Receiver { io }) - } - - /// Creates a new `Receiver` from a [`File`]. - /// - /// This function is intended to construct a pipe from a [`File`] representing - /// a special FIFO file. It will check if the file is a pipe and has read access, - /// set it in non-blocking mode and perform the conversion. - /// - /// # Errors - /// - /// Fails with `io::ErrorKind::InvalidInput` if the file is not a pipe or it - /// does not have read access. Also fails with any standard OS error if it occurs. - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn from_file(mut file: File) -> io::Result { - if !is_fifo(&file)? { - return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); - } - - let flags = get_file_flags(&file)?; - if has_read_access(flags) { - set_nonblocking(&mut file, flags)?; - Receiver::from_file_unchecked(file) - } else { - Err(io::Error::new( - io::ErrorKind::InvalidInput, - "not in O_RDONLY or O_RDWR access mode", - )) - } - } - - /// Creates a new `Receiver` from a [`File`] without checking pipe properties. - /// - /// This function is intended to construct a pipe from a File representing - /// a special FIFO file. The conversion assumes nothing about the underlying - /// file; it is left up to the user to make sure it is opened with read access, - /// represents a pipe and is set in non-blocking mode. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::unix::pipe; - /// use std::fs::OpenOptions; - /// use std::os::unix::fs::{FileTypeExt, OpenOptionsExt}; - /// # use std::error::Error; - /// - /// const FIFO_NAME: &str = "path/to/a/fifo"; - /// - /// # async fn dox() -> Result<(), Box> { - /// let file = OpenOptions::new() - /// .read(true) - /// .custom_flags(libc::O_NONBLOCK) - /// .open(FIFO_NAME)?; - /// if file.metadata()?.file_type().is_fifo() { - /// let rx = pipe::Receiver::from_file_unchecked(file)?; - /// /* use the Receiver */ - /// } - /// # Ok(()) - /// # } - /// ``` - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn from_file_unchecked(file: File) -> io::Result { - let raw_fd = file.into_raw_fd(); - let mio_rx = unsafe { mio_pipe::Receiver::from_raw_fd(raw_fd) }; - Receiver::from_mio(mio_rx) - } - - /// Waits for any of the requested ready states. - /// - /// This function can be used instead of [`readable()`] to check the returned - /// ready set for [`Ready::READABLE`] and [`Ready::READ_CLOSED`] events. - /// - /// The function may complete without the pipe being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// [`readable()`]: Self::readable - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn ready(&self, interest: Interest) -> io::Result { - let event = self.io.registration().readiness(interest).await?; - Ok(event.ready) - } - - /// Waits for the pipe to become readable. - /// - /// This function is equivalent to `ready(Interest::READABLE)` and is usually - /// paired with [`try_read()`]. - /// - /// [`try_read()`]: Self::try_read() - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::unix::pipe; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Open a reading end of a fifo - /// let rx = pipe::OpenOptions::new().open_receiver("path/to/a/fifo")?; - /// - /// let mut msg = vec![0; 1024]; - /// - /// loop { - /// // Wait for the pipe to be readable - /// rx.readable().await?; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match rx.try_read(&mut msg) { - /// Ok(n) => { - /// msg.truncate(n); - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// println!("GOT = {:?}", msg); - /// Ok(()) - /// } - /// ``` - pub async fn readable(&self) -> io::Result<()> { - self.ready(Interest::READABLE).await?; - Ok(()) - } - - /// Polls for read readiness. - /// - /// If the pipe is not currently ready for reading, this method will - /// store a clone of the `Waker` from the provided `Context`. When the pipe - /// becomes ready for reading, `Waker::wake` will be called on the waker. - /// - /// Note that on multiple calls to `poll_read_ready` or `poll_read`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. - /// - /// This function is intended for cases where creating and pinning a future - /// via [`readable`] is not feasible. Where possible, using [`readable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// [`readable`]: Self::readable - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the pipe is not ready for reading. - /// * `Poll::Ready(Ok(()))` if the pipe is ready for reading. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_read_ready(cx).map_ok(|_| ()) - } - - /// Tries to read data from the pipe into the provided buffer, returning how - /// many bytes were read. - /// - /// Reads any pending data from the pipe but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually [`readable()`] is used with this function. - /// - /// [`readable()`]: Self::readable() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: - /// - /// 1. The pipe's writing end is closed and will no longer write data. - /// 2. The specified buffer was 0 bytes in length. - /// - /// If the pipe is not ready to read data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::unix::pipe; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Open a reading end of a fifo - /// let rx = pipe::OpenOptions::new().open_receiver("path/to/a/fifo")?; - /// - /// let mut msg = vec![0; 1024]; - /// - /// loop { - /// // Wait for the pipe to be readable - /// rx.readable().await?; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match rx.try_read(&mut msg) { - /// Ok(n) => { - /// msg.truncate(n); - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// println!("GOT = {:?}", msg); - /// Ok(()) - /// } - /// ``` - pub fn try_read(&self, buf: &mut [u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::READABLE, || (&*self.io).read(buf)) - } - - /// Tries to read data from the pipe into the provided buffers, returning - /// how many bytes were read. - /// - /// Data is copied to fill each buffer in order, with the final buffer - /// written to possibly being only partially filled. This method behaves - /// equivalently to a single call to [`try_read()`] with concatenated - /// buffers. - /// - /// Reads any pending data from the pipe but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_vectored()` is non-blocking, the buffer does not have to be - /// stored by the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] is used with this function. - /// - /// [`try_read()`]: Self::try_read() - /// [`readable()`]: Self::readable() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the pipe's writing end is - /// closed and will no longer write data. If the pipe is not ready to read - /// data `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::unix::pipe; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Open a reading end of a fifo - /// let rx = pipe::OpenOptions::new().open_receiver("path/to/a/fifo")?; - /// - /// loop { - /// // Wait for the pipe to be readable - /// rx.readable().await?; - /// - /// // Creating the buffer **after** the `await` prevents it from - /// // being stored in the async task. - /// let mut buf_a = [0; 512]; - /// let mut buf_b = [0; 1024]; - /// let mut bufs = [ - /// io::IoSliceMut::new(&mut buf_a), - /// io::IoSliceMut::new(&mut buf_b), - /// ]; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match rx.try_read_vectored(&mut bufs) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result { - self.io - .registration() - .try_io(Interest::READABLE, || (&*self.io).read_vectored(bufs)) - } - - cfg_io_util! { - /// Tries to read data from the pipe into the provided buffer, advancing the - /// buffer's internal cursor, returning how many bytes were read. - /// - /// Reads any pending data from the pipe but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: Self::readable - /// [`ready()`]: Self::ready - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the pipe's writing end is - /// closed and will no longer write data. If the pipe is not ready to read - /// data `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::unix::pipe; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// // Open a reading end of a fifo - /// let rx = pipe::OpenOptions::new().open_receiver("path/to/a/fifo")?; - /// - /// loop { - /// // Wait for the pipe to be readable - /// rx.readable().await?; - /// - /// let mut buf = Vec::with_capacity(4096); - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match rx.try_read_buf(&mut buf) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read_buf(&self, buf: &mut B) -> io::Result { - self.io.registration().try_io(Interest::READABLE, || { - use std::io::Read; - - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - // Safety: `mio_pipe::Receiver` uses a `std::fs::File` underneath, - // which correctly handles reads into uninitialized memory. - let n = (&*self.io).read(dst)?; - - unsafe { - buf.advance_mut(n); - } - - Ok(n) - }) - } - } -} - -impl AsyncRead for Receiver { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - // Safety: `mio_pipe::Receiver` uses a `std::fs::File` underneath, - // which correctly handles reads into uninitialized memory. - unsafe { self.io.poll_read(cx, buf) } - } -} - -impl AsRawFd for Receiver { - fn as_raw_fd(&self) -> RawFd { - self.io.as_raw_fd() - } -} - -impl AsFd for Receiver { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } -} - -/// Checks if file is a FIFO -fn is_fifo(file: &File) -> io::Result { - Ok(file.metadata()?.file_type().is_fifo()) -} - -/// Gets file descriptor's flags by fcntl. -fn get_file_flags(file: &File) -> io::Result { - let fd = file.as_raw_fd(); - let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) }; - if flags < 0 { - Err(io::Error::last_os_error()) - } else { - Ok(flags) - } -} - -/// Checks for O_RDONLY or O_RDWR access mode. -fn has_read_access(flags: libc::c_int) -> bool { - let mode = flags & libc::O_ACCMODE; - mode == libc::O_RDONLY || mode == libc::O_RDWR -} - -/// Checks for O_WRONLY or O_RDWR access mode. -fn has_write_access(flags: libc::c_int) -> bool { - let mode = flags & libc::O_ACCMODE; - mode == libc::O_WRONLY || mode == libc::O_RDWR -} - -/// Sets file's flags with O_NONBLOCK by fcntl. -fn set_nonblocking(file: &mut File, current_flags: libc::c_int) -> io::Result<()> { - let fd = file.as_raw_fd(); - - let flags = current_flags | libc::O_NONBLOCK; - - if flags != current_flags { - let ret = unsafe { libc::fcntl(fd, libc::F_SETFL, flags) }; - if ret < 0 { - return Err(io::Error::last_os_error()); - } - } - - Ok(()) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/socketaddr.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/socketaddr.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/socketaddr.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/socketaddr.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -use std::fmt; -use std::path::Path; - -/// An address associated with a Tokio Unix socket. -pub struct SocketAddr(pub(super) mio::net::SocketAddr); - -impl SocketAddr { - /// Returns `true` if the address is unnamed. - /// - /// Documentation reflected in [`SocketAddr`] - /// - /// [`SocketAddr`]: std::os::unix::net::SocketAddr - pub fn is_unnamed(&self) -> bool { - self.0.is_unnamed() - } - - /// Returns the contents of this address if it is a `pathname` address. - /// - /// Documentation reflected in [`SocketAddr`] - /// - /// [`SocketAddr`]: std::os::unix::net::SocketAddr - pub fn as_pathname(&self) -> Option<&Path> { - self.0.as_pathname() - } -} - -impl fmt::Debug for SocketAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(fmt) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/split_owned.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/split_owned.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/split_owned.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/split_owned.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,419 +0,0 @@ -//! `UnixStream` owned split support. -//! -//! A `UnixStream` can be split into an `OwnedReadHalf` and a `OwnedWriteHalf` -//! with the `UnixStream::into_split` method. `OwnedReadHalf` implements -//! `AsyncRead` while `OwnedWriteHalf` implements `AsyncWrite`. -//! -//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized -//! split has no associated overhead and enforces all invariants at the type -//! level. - -use crate::io::{AsyncRead, AsyncWrite, Interest, ReadBuf, Ready}; -use crate::net::UnixStream; - -use crate::net::unix::SocketAddr; -use std::error::Error; -use std::net::Shutdown; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::{fmt, io}; - -cfg_io_util! { - use bytes::BufMut; -} - -/// Owned read half of a [`UnixStream`], created by [`into_split`]. -/// -/// Reading from an `OwnedReadHalf` is usually done using the convenience methods found -/// on the [`AsyncReadExt`] trait. -/// -/// [`UnixStream`]: crate::net::UnixStream -/// [`into_split`]: crate::net::UnixStream::into_split() -/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt -#[derive(Debug)] -pub struct OwnedReadHalf { - inner: Arc, -} - -/// Owned write half of a [`UnixStream`], created by [`into_split`]. -/// -/// Note that in the [`AsyncWrite`] implementation of this type, -/// [`poll_shutdown`] will shut down the stream in the write direction. -/// Dropping the write half will also shut down the write half of the stream. -/// -/// Writing to an `OwnedWriteHalf` is usually done using the convenience methods -/// found on the [`AsyncWriteExt`] trait. -/// -/// [`UnixStream`]: crate::net::UnixStream -/// [`into_split`]: crate::net::UnixStream::into_split() -/// [`AsyncWrite`]: trait@crate::io::AsyncWrite -/// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown -/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt -#[derive(Debug)] -pub struct OwnedWriteHalf { - inner: Arc, - shutdown_on_drop: bool, -} - -pub(crate) fn split_owned(stream: UnixStream) -> (OwnedReadHalf, OwnedWriteHalf) { - let arc = Arc::new(stream); - let read = OwnedReadHalf { - inner: Arc::clone(&arc), - }; - let write = OwnedWriteHalf { - inner: arc, - shutdown_on_drop: true, - }; - (read, write) -} - -pub(crate) fn reunite( - read: OwnedReadHalf, - write: OwnedWriteHalf, -) -> Result { - if Arc::ptr_eq(&read.inner, &write.inner) { - write.forget(); - // This unwrap cannot fail as the api does not allow creating more than two Arcs, - // and we just dropped the other half. - Ok(Arc::try_unwrap(read.inner).expect("UnixStream: try_unwrap failed in reunite")) - } else { - Err(ReuniteError(read, write)) - } -} - -/// Error indicating that two halves were not from the same socket, and thus could -/// not be reunited. -#[derive(Debug)] -pub struct ReuniteError(pub OwnedReadHalf, pub OwnedWriteHalf); - -impl fmt::Display for ReuniteError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "tried to reunite halves that are not from the same socket" - ) - } -} - -impl Error for ReuniteError {} - -impl OwnedReadHalf { - /// Attempts to put the two halves of a `UnixStream` back together and - /// recover the original socket. Succeeds only if the two halves - /// originated from the same call to [`into_split`]. - /// - /// [`into_split`]: crate::net::UnixStream::into_split() - pub fn reunite(self, other: OwnedWriteHalf) -> Result { - reunite(self, other) - } - - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with [`try_read()`]. It can be used instead - /// of [`readable()`] to check the returned ready set for [`Ready::READABLE`] - /// and [`Ready::READ_CLOSED`] events. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// This function is equivalent to [`UnixStream::ready`]. - /// - /// [`try_read()`]: Self::try_read - /// [`readable()`]: Self::readable - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn ready(&self, interest: Interest) -> io::Result { - self.inner.ready(interest).await - } - - /// Waits for the socket to become readable. - /// - /// This function is equivalent to `ready(Interest::READABLE)` and is usually - /// paired with `try_read()`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn readable(&self) -> io::Result<()> { - self.inner.readable().await - } - - /// Tries to read data from the stream into the provided buffer, returning how - /// many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: - /// - /// 1. The stream's read half is closed and will no longer yield data. - /// 2. The specified buffer was 0 bytes in length. - /// - /// If the stream is not ready to read data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_read(&self, buf: &mut [u8]) -> io::Result { - self.inner.try_read(buf) - } - - cfg_io_util! { - /// Tries to read data from the stream into the provided buffer, advancing the - /// buffer's internal cursor, returning how many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_read_buf(&self, buf: &mut B) -> io::Result { - self.inner.try_read_buf(buf) - } - } - - /// Tries to read data from the stream into the provided buffers, returning - /// how many bytes were read. - /// - /// Data is copied to fill each buffer in order, with the final buffer - /// written to possibly being only partially filled. This method behaves - /// equivalently to a single call to [`try_read()`] with concatenated - /// buffers. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_vectored()` is non-blocking, the buffer does not have to be - /// stored by the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`try_read()`]: Self::try_read() - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result { - self.inner.try_read_vectored(bufs) - } - - /// Returns the socket address of the remote half of this connection. - pub fn peer_addr(&self) -> io::Result { - self.inner.peer_addr() - } - - /// Returns the socket address of the local half of this connection. - pub fn local_addr(&self) -> io::Result { - self.inner.local_addr() - } -} - -impl AsyncRead for OwnedReadHalf { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.inner.poll_read_priv(cx, buf) - } -} - -impl OwnedWriteHalf { - /// Attempts to put the two halves of a `UnixStream` back together and - /// recover the original socket. Succeeds only if the two halves - /// originated from the same call to [`into_split`]. - /// - /// [`into_split`]: crate::net::UnixStream::into_split() - pub fn reunite(self, other: OwnedReadHalf) -> Result { - reunite(other, self) - } - - /// Destroys the write half, but don't close the write half of the stream - /// until the read half is dropped. If the read half has already been - /// dropped, this closes the stream. - pub fn forget(mut self) { - self.shutdown_on_drop = false; - drop(self); - } - - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with [`try_write()`]. It can be used instead - /// of [`writable()`] to check the returned ready set for [`Ready::WRITABLE`] - /// and [`Ready::WRITE_CLOSED`] events. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// This function is equivalent to [`UnixStream::ready`]. - /// - /// [`try_write()`]: Self::try_write - /// [`writable()`]: Self::writable - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn ready(&self, interest: Interest) -> io::Result { - self.inner.ready(interest).await - } - - /// Waits for the socket to become writable. - /// - /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually - /// paired with `try_write()`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn writable(&self) -> io::Result<()> { - self.inner.writable().await - } - - /// Tries to write a buffer to the stream, returning how many bytes were - /// written. - /// - /// The function will attempt to write the entire contents of `buf`, but - /// only part of the buffer may be written. - /// - /// This function is usually paired with `writable()`. - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_write(&self, buf: &[u8]) -> io::Result { - self.inner.try_write(buf) - } - - /// Tries to write several buffers to the stream, returning how many bytes - /// were written. - /// - /// Data is written from each buffer in order, with the final buffer read - /// from possible being only partially consumed. This method behaves - /// equivalently to a single call to [`try_write()`] with concatenated - /// buffers. - /// - /// This function is usually paired with `writable()`. - /// - /// [`try_write()`]: Self::try_write() - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result { - self.inner.try_write_vectored(buf) - } - - /// Returns the socket address of the remote half of this connection. - pub fn peer_addr(&self) -> io::Result { - self.inner.peer_addr() - } - - /// Returns the socket address of the local half of this connection. - pub fn local_addr(&self) -> io::Result { - self.inner.local_addr() - } -} - -impl Drop for OwnedWriteHalf { - fn drop(&mut self) { - if self.shutdown_on_drop { - let _ = self.inner.shutdown_std(Shutdown::Write); - } - } -} - -impl AsyncWrite for OwnedWriteHalf { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.inner.poll_write_priv(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.inner.poll_write_vectored_priv(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - self.inner.is_write_vectored() - } - - #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // flush is a no-op - Poll::Ready(Ok(())) - } - - // `poll_shutdown` on a write half shutdowns the stream in the "write" direction. - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - let res = self.inner.shutdown_std(Shutdown::Write); - if res.is_ok() { - Pin::into_inner(self).shutdown_on_drop = false; - } - res.into() - } -} - -impl AsRef for OwnedReadHalf { - fn as_ref(&self) -> &UnixStream { - &self.inner - } -} - -impl AsRef for OwnedWriteHalf { - fn as_ref(&self) -> &UnixStream { - &self.inner - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/split.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/split.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/split.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/split.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,331 +0,0 @@ -//! `UnixStream` split support. -//! -//! A `UnixStream` can be split into a read half and a write half with -//! `UnixStream::split`. The read half implements `AsyncRead` while the write -//! half implements `AsyncWrite`. -//! -//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized -//! split has no associated overhead and enforces all invariants at the type -//! level. - -use crate::io::{AsyncRead, AsyncWrite, Interest, ReadBuf, Ready}; -use crate::net::UnixStream; - -use crate::net::unix::SocketAddr; -use std::io; -use std::net::Shutdown; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - use bytes::BufMut; -} - -/// Borrowed read half of a [`UnixStream`], created by [`split`]. -/// -/// Reading from a `ReadHalf` is usually done using the convenience methods found on the -/// [`AsyncReadExt`] trait. -/// -/// [`UnixStream`]: UnixStream -/// [`split`]: UnixStream::split() -/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt -#[derive(Debug)] -pub struct ReadHalf<'a>(&'a UnixStream); - -/// Borrowed write half of a [`UnixStream`], created by [`split`]. -/// -/// Note that in the [`AsyncWrite`] implementation of this type, [`poll_shutdown`] will -/// shut down the UnixStream stream in the write direction. -/// -/// Writing to an `WriteHalf` is usually done using the convenience methods found -/// on the [`AsyncWriteExt`] trait. -/// -/// [`UnixStream`]: UnixStream -/// [`split`]: UnixStream::split() -/// [`AsyncWrite`]: trait@crate::io::AsyncWrite -/// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown -/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt -#[derive(Debug)] -pub struct WriteHalf<'a>(&'a UnixStream); - -pub(crate) fn split(stream: &mut UnixStream) -> (ReadHalf<'_>, WriteHalf<'_>) { - (ReadHalf(stream), WriteHalf(stream)) -} - -impl ReadHalf<'_> { - /// Wait for any of the requested ready states. - /// - /// This function is usually paired with [`try_read()`]. It can be used instead - /// of [`readable()`] to check the returned ready set for [`Ready::READABLE`] - /// and [`Ready::READ_CLOSED`] events. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// This function is equivalent to [`UnixStream::ready`]. - /// - /// [`try_read()`]: Self::try_read - /// [`readable()`]: Self::readable - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn ready(&self, interest: Interest) -> io::Result { - self.0.ready(interest).await - } - - /// Waits for the socket to become readable. - /// - /// This function is equivalent to `ready(Interest::READABLE)` and is usually - /// paired with `try_read()`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn readable(&self) -> io::Result<()> { - self.0.readable().await - } - - /// Tries to read data from the stream into the provided buffer, returning how - /// many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: - /// - /// 1. The stream's read half is closed and will no longer yield data. - /// 2. The specified buffer was 0 bytes in length. - /// - /// If the stream is not ready to read data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_read(&self, buf: &mut [u8]) -> io::Result { - self.0.try_read(buf) - } - - cfg_io_util! { - /// Tries to read data from the stream into the provided buffer, advancing the - /// buffer's internal cursor, returning how many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - pub fn try_read_buf(&self, buf: &mut B) -> io::Result { - self.0.try_read_buf(buf) - } - } - - /// Tries to read data from the stream into the provided buffers, returning - /// how many bytes were read. - /// - /// Data is copied to fill each buffer in order, with the final buffer - /// written to possibly being only partially filled. This method behaves - /// equivalently to a single call to [`try_read()`] with concatenated - /// buffers. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_vectored()` is non-blocking, the buffer does not have to be - /// stored by the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`try_read()`]: Self::try_read() - /// [`readable()`]: Self::readable() - /// [`ready()`]: Self::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result { - self.0.try_read_vectored(bufs) - } - - /// Returns the socket address of the remote half of this connection. - pub fn peer_addr(&self) -> io::Result { - self.0.peer_addr() - } - - /// Returns the socket address of the local half of this connection. - pub fn local_addr(&self) -> io::Result { - self.0.local_addr() - } -} - -impl WriteHalf<'_> { - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with [`try_write()`]. It can be used instead - /// of [`writable()`] to check the returned ready set for [`Ready::WRITABLE`] - /// and [`Ready::WRITE_CLOSED`] events. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// This function is equivalent to [`UnixStream::ready`]. - /// - /// [`try_write()`]: Self::try_write - /// [`writable()`]: Self::writable - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn ready(&self, interest: Interest) -> io::Result { - self.0.ready(interest).await - } - - /// Waits for the socket to become writable. - /// - /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually - /// paired with `try_write()`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to write that fails with `WouldBlock` or - /// `Poll::Pending`. - pub async fn writable(&self) -> io::Result<()> { - self.0.writable().await - } - - /// Tries to write a buffer to the stream, returning how many bytes were - /// written. - /// - /// The function will attempt to write the entire contents of `buf`, but - /// only part of the buffer may be written. - /// - /// This function is usually paired with `writable()`. - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_write(&self, buf: &[u8]) -> io::Result { - self.0.try_write(buf) - } - - /// Tries to write several buffers to the stream, returning how many bytes - /// were written. - /// - /// Data is written from each buffer in order, with the final buffer read - /// from possible being only partially consumed. This method behaves - /// equivalently to a single call to [`try_write()`] with concatenated - /// buffers. - /// - /// This function is usually paired with `writable()`. - /// - /// [`try_write()`]: Self::try_write() - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result { - self.0.try_write_vectored(buf) - } - - /// Returns the socket address of the remote half of this connection. - pub fn peer_addr(&self) -> io::Result { - self.0.peer_addr() - } - - /// Returns the socket address of the local half of this connection. - pub fn local_addr(&self) -> io::Result { - self.0.local_addr() - } -} - -impl AsyncRead for ReadHalf<'_> { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.0.poll_read_priv(cx, buf) - } -} - -impl AsyncWrite for WriteHalf<'_> { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.0.poll_write_priv(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.0.poll_write_vectored_priv(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - self.0.is_write_vectored() - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.0.shutdown_std(Shutdown::Write).into() - } -} - -impl AsRef for ReadHalf<'_> { - fn as_ref(&self) -> &UnixStream { - self.0 - } -} - -impl AsRef for WriteHalf<'_> { - fn as_ref(&self) -> &UnixStream { - self.0 - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/stream.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/stream.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/stream.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1044 +0,0 @@ -use crate::future::poll_fn; -use crate::io::{AsyncRead, AsyncWrite, Interest, PollEvented, ReadBuf, Ready}; -use crate::net::unix::split::{split, ReadHalf, WriteHalf}; -use crate::net::unix::split_owned::{split_owned, OwnedReadHalf, OwnedWriteHalf}; -use crate::net::unix::ucred::{self, UCred}; -use crate::net::unix::SocketAddr; - -use std::fmt; -use std::io::{self, Read, Write}; -use std::net::Shutdown; -use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; -use std::os::unix::net; -use std::path::Path; -use std::pin::Pin; -use std::task::{Context, Poll}; - -cfg_io_util! { - use bytes::BufMut; -} - -cfg_net_unix! { - /// A structure representing a connected Unix socket. - /// - /// This socket can be connected directly with [`UnixStream::connect`] or accepted - /// from a listener with [`UnixListener::accept`]. Additionally, a pair of - /// anonymous Unix sockets can be created with `UnixStream::pair`. - /// - /// To shut down the stream in the write direction, you can call the - /// [`shutdown()`] method. This will cause the other peer to receive a read of - /// length 0, indicating that no more data will be sent. This only closes - /// the stream in one direction. - /// - /// [`shutdown()`]: fn@crate::io::AsyncWriteExt::shutdown - /// [`UnixListener::accept`]: crate::net::UnixListener::accept - #[cfg_attr(docsrs, doc(alias = "uds"))] - pub struct UnixStream { - io: PollEvented, - } -} - -impl UnixStream { - /// Connects to the socket named by `path`. - /// - /// This function will create a new Unix socket and connect to the path - /// specified, associating the returned stream with the default event loop's - /// handle. - pub async fn connect

(path: P) -> io::Result - where - P: AsRef, - { - let stream = mio::net::UnixStream::connect(path)?; - let stream = UnixStream::new(stream)?; - - poll_fn(|cx| stream.io.registration().poll_write_ready(cx)).await?; - - if let Some(e) = stream.io.take_error()? { - return Err(e); - } - - Ok(stream) - } - - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with `try_read()` or `try_write()`. It - /// can be used to concurrently read / write to the same socket on a single - /// task without splitting the socket. - /// - /// The function may complete without the socket being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read or write that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// Concurrently read and write to the stream on the same task without - /// splitting. - /// - /// ```no_run - /// use tokio::io::Interest; - /// use tokio::net::UnixStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let dir = tempfile::tempdir().unwrap(); - /// let bind_path = dir.path().join("bind_path"); - /// let stream = UnixStream::connect(bind_path).await?; - /// - /// loop { - /// let ready = stream.ready(Interest::READABLE | Interest::WRITABLE).await?; - /// - /// if ready.is_readable() { - /// let mut data = vec![0; 1024]; - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_read(&mut data) { - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// - /// } - /// - /// if ready.is_writable() { - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_write(b"hello world") { - /// Ok(n) => { - /// println!("write {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// } - /// } - /// ``` - pub async fn ready(&self, interest: Interest) -> io::Result { - let event = self.io.registration().readiness(interest).await?; - Ok(event.ready) - } - - /// Waits for the socket to become readable. - /// - /// This function is equivalent to `ready(Interest::READABLE)` and is usually - /// paired with `try_read()`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to read that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let bind_path = dir.path().join("bind_path"); - /// let stream = UnixStream::connect(bind_path).await?; - /// - /// let mut msg = vec![0; 1024]; - /// - /// loop { - /// // Wait for the socket to be readable - /// stream.readable().await?; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_read(&mut msg) { - /// Ok(n) => { - /// msg.truncate(n); - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// println!("GOT = {:?}", msg); - /// Ok(()) - /// } - /// ``` - pub async fn readable(&self) -> io::Result<()> { - self.ready(Interest::READABLE).await?; - Ok(()) - } - - /// Polls for read readiness. - /// - /// If the unix stream is not currently ready for reading, this method will - /// store a clone of the `Waker` from the provided `Context`. When the unix - /// stream becomes ready for reading, `Waker::wake` will be called on the - /// waker. - /// - /// Note that on multiple calls to `poll_read_ready` or `poll_read`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. (However, `poll_write_ready` retains a - /// second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`readable`] is not feasible. Where possible, using [`readable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the unix stream is not ready for reading. - /// * `Poll::Ready(Ok(()))` if the unix stream is ready for reading. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`readable`]: method@Self::readable - pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_read_ready(cx).map_ok(|_| ()) - } - - /// Try to read data from the stream into the provided buffer, returning how - /// many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: UnixStream::readable() - /// [`ready()`]: UnixStream::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: - /// - /// 1. The stream's read half is closed and will no longer yield data. - /// 2. The specified buffer was 0 bytes in length. - /// - /// If the stream is not ready to read data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let bind_path = dir.path().join("bind_path"); - /// let stream = UnixStream::connect(bind_path).await?; - /// - /// loop { - /// // Wait for the socket to be readable - /// stream.readable().await?; - /// - /// // Creating the buffer **after** the `await` prevents it from - /// // being stored in the async task. - /// let mut buf = [0; 4096]; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_read(&mut buf) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read(&self, buf: &mut [u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::READABLE, || (&*self.io).read(buf)) - } - - /// Tries to read data from the stream into the provided buffers, returning - /// how many bytes were read. - /// - /// Data is copied to fill each buffer in order, with the final buffer - /// written to possibly being only partially filled. This method behaves - /// equivalently to a single call to [`try_read()`] with concatenated - /// buffers. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_vectored()` is non-blocking, the buffer does not have to be - /// stored by the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`try_read()`]: UnixStream::try_read() - /// [`readable()`]: UnixStream::readable() - /// [`ready()`]: UnixStream::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixStream; - /// use std::error::Error; - /// use std::io::{self, IoSliceMut}; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let bind_path = dir.path().join("bind_path"); - /// let stream = UnixStream::connect(bind_path).await?; - /// - /// loop { - /// // Wait for the socket to be readable - /// stream.readable().await?; - /// - /// // Creating the buffer **after** the `await` prevents it from - /// // being stored in the async task. - /// let mut buf_a = [0; 512]; - /// let mut buf_b = [0; 1024]; - /// let mut bufs = [ - /// IoSliceMut::new(&mut buf_a), - /// IoSliceMut::new(&mut buf_b), - /// ]; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_read_vectored(&mut bufs) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result { - self.io - .registration() - .try_io(Interest::READABLE, || (&*self.io).read_vectored(bufs)) - } - - cfg_io_util! { - /// Tries to read data from the stream into the provided buffer, advancing the - /// buffer's internal cursor, returning how many bytes were read. - /// - /// Receives any pending data from the socket but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: UnixStream::readable() - /// [`ready()`]: UnixStream::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let bind_path = dir.path().join("bind_path"); - /// let stream = UnixStream::connect(bind_path).await?; - /// - /// loop { - /// // Wait for the socket to be readable - /// stream.readable().await?; - /// - /// let mut buf = Vec::with_capacity(4096); - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_read_buf(&mut buf) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read_buf(&self, buf: &mut B) -> io::Result { - self.io.registration().try_io(Interest::READABLE, || { - use std::io::Read; - - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - // Safety: We trust `UnixStream::read` to have filled up `n` bytes in the - // buffer. - let n = (&*self.io).read(dst)?; - - unsafe { - buf.advance_mut(n); - } - - Ok(n) - }) - } - } - - /// Waits for the socket to become writable. - /// - /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually - /// paired with `try_write()`. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once a readiness event occurs, the method - /// will continue to return immediately until the readiness event is - /// consumed by an attempt to write that fails with `WouldBlock` or - /// `Poll::Pending`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let bind_path = dir.path().join("bind_path"); - /// let stream = UnixStream::connect(bind_path).await?; - /// - /// loop { - /// // Wait for the socket to be writable - /// stream.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_write(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn writable(&self) -> io::Result<()> { - self.ready(Interest::WRITABLE).await?; - Ok(()) - } - - /// Polls for write readiness. - /// - /// If the unix stream is not currently ready for writing, this method will - /// store a clone of the `Waker` from the provided `Context`. When the unix - /// stream becomes ready for writing, `Waker::wake` will be called on the - /// waker. - /// - /// Note that on multiple calls to `poll_write_ready` or `poll_write`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. (However, `poll_read_ready` retains a - /// second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`writable`] is not feasible. Where possible, using [`writable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the unix stream is not ready for writing. - /// * `Poll::Ready(Ok(()))` if the unix stream is ready for writing. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`writable`]: method@Self::writable - pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_write_ready(cx).map_ok(|_| ()) - } - - /// Tries to write a buffer to the stream, returning how many bytes were - /// written. - /// - /// The function will attempt to write the entire contents of `buf`, but - /// only part of the buffer may be written. - /// - /// This function is usually paired with `writable()`. - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let bind_path = dir.path().join("bind_path"); - /// let stream = UnixStream::connect(bind_path).await?; - /// - /// loop { - /// // Wait for the socket to be writable - /// stream.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_write(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_write(&self, buf: &[u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::WRITABLE, || (&*self.io).write(buf)) - } - - /// Tries to write several buffers to the stream, returning how many bytes - /// were written. - /// - /// Data is written from each buffer in order, with the final buffer read - /// from possible being only partially consumed. This method behaves - /// equivalently to a single call to [`try_write()`] with concatenated - /// buffers. - /// - /// This function is usually paired with `writable()`. - /// - /// [`try_write()`]: UnixStream::try_write() - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the stream is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixStream; - /// use std::error::Error; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // Connect to a peer - /// let dir = tempfile::tempdir().unwrap(); - /// let bind_path = dir.path().join("bind_path"); - /// let stream = UnixStream::connect(bind_path).await?; - /// - /// let bufs = [io::IoSlice::new(b"hello "), io::IoSlice::new(b"world")]; - /// - /// loop { - /// // Wait for the socket to be writable - /// stream.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match stream.try_write_vectored(&bufs) { - /// Ok(n) => { - /// break; - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result { - self.io - .registration() - .try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf)) - } - - /// Tries to read or write from the socket using a user-provided IO operation. - /// - /// If the socket is ready, the provided closure is called. The closure - /// should attempt to perform IO operation on the socket by manually - /// calling the appropriate syscall. If the operation fails because the - /// socket is not actually ready, then the closure should return a - /// `WouldBlock` error and the readiness flag is cleared. The return value - /// of the closure is then returned by `try_io`. - /// - /// If the socket is not ready, then the closure is not called - /// and a `WouldBlock` error is returned. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the socket that failed due to the socket not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the socket to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio `UnixStream` type, as this will mess with the - /// readiness flag and can cause the socket to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - /// - /// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: UnixStream::readable() - /// [`writable()`]: UnixStream::writable() - /// [`ready()`]: UnixStream::ready() - pub fn try_io( - &self, - interest: Interest, - f: impl FnOnce() -> io::Result, - ) -> io::Result { - self.io - .registration() - .try_io(interest, || self.io.try_io(f)) - } - - /// Reads or writes from the socket using a user-provided IO operation. - /// - /// The readiness of the socket is awaited and when the socket is ready, - /// the provided closure is called. The closure should attempt to perform - /// IO operation on the socket by manually calling the appropriate syscall. - /// If the operation fails because the socket is not actually ready, - /// then the closure should return a `WouldBlock` error. In such case the - /// readiness flag is cleared and the socket readiness is awaited again. - /// This loop is repeated until the closure returns an `Ok` or an error - /// other than `WouldBlock`. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the socket that failed due to the socket not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the socket to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio `UnixStream` type, as this will mess with the - /// readiness flag and can cause the socket to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - pub async fn async_io( - &self, - interest: Interest, - mut f: impl FnMut() -> io::Result, - ) -> io::Result { - self.io - .registration() - .async_io(interest, || self.io.try_io(&mut f)) - .await - } - - /// Creates new `UnixStream` from a `std::os::unix::net::UnixStream`. - /// - /// This function is intended to be used to wrap a UnixStream from the - /// standard library in the Tokio equivalent. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the stream is in - /// non-blocking mode. Otherwise all I/O operations on the stream - /// will block the thread, which will cause unexpected behavior. - /// Non-blocking mode can be set using [`set_nonblocking`]. - /// - /// [`set_nonblocking`]: std::os::unix::net::UnixStream::set_nonblocking - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixStream; - /// use std::os::unix::net::UnixStream as StdUnixStream; - /// # use std::error::Error; - /// - /// # async fn dox() -> Result<(), Box> { - /// let std_stream = StdUnixStream::connect("/path/to/the/socket")?; - /// std_stream.set_nonblocking(true)?; - /// let stream = UnixStream::from_std(std_stream)?; - /// # Ok(()) - /// # } - /// ``` - /// - /// # Panics - /// - /// This function panics if it is not called from within a runtime with - /// IO enabled. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - #[track_caller] - pub fn from_std(stream: net::UnixStream) -> io::Result { - let stream = mio::net::UnixStream::from_std(stream); - let io = PollEvented::new(stream)?; - - Ok(UnixStream { io }) - } - - /// Turns a [`tokio::net::UnixStream`] into a [`std::os::unix::net::UnixStream`]. - /// - /// The returned [`std::os::unix::net::UnixStream`] will have nonblocking - /// mode set as `true`. Use [`set_nonblocking`] to change the blocking - /// mode if needed. - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::io::Read; - /// use tokio::net::UnixListener; - /// # use tokio::net::UnixStream; - /// # use tokio::io::AsyncWriteExt; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let dir = tempfile::tempdir().unwrap(); - /// let bind_path = dir.path().join("bind_path"); - /// - /// let mut data = [0u8; 12]; - /// let listener = UnixListener::bind(&bind_path)?; - /// # let handle = tokio::spawn(async { - /// # let mut stream = UnixStream::connect(bind_path).await.unwrap(); - /// # stream.write(b"Hello world!").await.unwrap(); - /// # }); - /// let (tokio_unix_stream, _) = listener.accept().await?; - /// let mut std_unix_stream = tokio_unix_stream.into_std()?; - /// # handle.await.expect("The task being joined has panicked"); - /// std_unix_stream.set_nonblocking(false)?; - /// std_unix_stream.read_exact(&mut data)?; - /// # assert_eq!(b"Hello world!", &data); - /// Ok(()) - /// } - /// ``` - /// [`tokio::net::UnixStream`]: UnixStream - /// [`std::os::unix::net::UnixStream`]: std::os::unix::net::UnixStream - /// [`set_nonblocking`]: fn@std::os::unix::net::UnixStream::set_nonblocking - pub fn into_std(self) -> io::Result { - self.io - .into_inner() - .map(|io| io.into_raw_fd()) - .map(|raw_fd| unsafe { std::os::unix::net::UnixStream::from_raw_fd(raw_fd) }) - } - - /// Creates an unnamed pair of connected sockets. - /// - /// This function will create a pair of interconnected Unix sockets for - /// communicating back and forth between one another. Each socket will - /// be associated with the default event loop's handle. - pub fn pair() -> io::Result<(UnixStream, UnixStream)> { - let (a, b) = mio::net::UnixStream::pair()?; - let a = UnixStream::new(a)?; - let b = UnixStream::new(b)?; - - Ok((a, b)) - } - - pub(crate) fn new(stream: mio::net::UnixStream) -> io::Result { - let io = PollEvented::new(stream)?; - Ok(UnixStream { io }) - } - - /// Returns the socket address of the local half of this connection. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let dir = tempfile::tempdir().unwrap(); - /// let bind_path = dir.path().join("bind_path"); - /// let stream = UnixStream::connect(bind_path).await?; - /// - /// println!("{:?}", stream.local_addr()?); - /// # Ok(()) - /// # } - /// ``` - pub fn local_addr(&self) -> io::Result { - self.io.local_addr().map(SocketAddr) - } - - /// Returns the socket address of the remote half of this connection. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::UnixStream; - /// - /// # async fn dox() -> Result<(), Box> { - /// let dir = tempfile::tempdir().unwrap(); - /// let bind_path = dir.path().join("bind_path"); - /// let stream = UnixStream::connect(bind_path).await?; - /// - /// println!("{:?}", stream.peer_addr()?); - /// # Ok(()) - /// # } - /// ``` - pub fn peer_addr(&self) -> io::Result { - self.io.peer_addr().map(SocketAddr) - } - - /// Returns effective credentials of the process which called `connect` or `pair`. - pub fn peer_cred(&self) -> io::Result { - ucred::get_peer_cred(self) - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.io.take_error() - } - - /// Shuts down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O calls on the - /// specified portions to immediately return with an appropriate value - /// (see the documentation of `Shutdown`). - pub(super) fn shutdown_std(&self, how: Shutdown) -> io::Result<()> { - self.io.shutdown(how) - } - - // These lifetime markers also appear in the generated documentation, and make - // it more clear that this is a *borrowed* split. - #[allow(clippy::needless_lifetimes)] - /// Splits a `UnixStream` into a read half and a write half, which can be used - /// to read and write the stream concurrently. - /// - /// This method is more efficient than [`into_split`], but the halves cannot be - /// moved into independently spawned tasks. - /// - /// [`into_split`]: Self::into_split() - pub fn split<'a>(&'a mut self) -> (ReadHalf<'a>, WriteHalf<'a>) { - split(self) - } - - /// Splits a `UnixStream` into a read half and a write half, which can be used - /// to read and write the stream concurrently. - /// - /// Unlike [`split`], the owned halves can be moved to separate tasks, however - /// this comes at the cost of a heap allocation. - /// - /// **Note:** Dropping the write half will shut down the write half of the - /// stream. This is equivalent to calling [`shutdown()`] on the `UnixStream`. - /// - /// [`split`]: Self::split() - /// [`shutdown()`]: fn@crate::io::AsyncWriteExt::shutdown - pub fn into_split(self) -> (OwnedReadHalf, OwnedWriteHalf) { - split_owned(self) - } -} - -impl TryFrom for UnixStream { - type Error = io::Error; - - /// Consumes stream, returning the tokio I/O object. - /// - /// This is equivalent to - /// [`UnixStream::from_std(stream)`](UnixStream::from_std). - fn try_from(stream: net::UnixStream) -> io::Result { - Self::from_std(stream) - } -} - -impl AsyncRead for UnixStream { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.poll_read_priv(cx, buf) - } -} - -impl AsyncWrite for UnixStream { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.poll_write_priv(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.poll_write_vectored_priv(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - true - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.shutdown_std(std::net::Shutdown::Write)?; - Poll::Ready(Ok(())) - } -} - -impl UnixStream { - // == Poll IO functions that takes `&self` == - // - // To read or write without mutable access to the `UnixStream`, combine the - // `poll_read_ready` or `poll_write_ready` methods with the `try_read` or - // `try_write` methods. - - pub(crate) fn poll_read_priv( - &self, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - // Safety: `UnixStream::read` correctly handles reads into uninitialized memory - unsafe { self.io.poll_read(cx, buf) } - } - - pub(crate) fn poll_write_priv( - &self, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.io.poll_write(cx, buf) - } - - pub(super) fn poll_write_vectored_priv( - &self, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.io.poll_write_vectored(cx, bufs) - } -} - -impl fmt::Debug for UnixStream { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.io.fmt(f) - } -} - -impl AsRawFd for UnixStream { - fn as_raw_fd(&self) -> RawFd { - self.io.as_raw_fd() - } -} - -impl AsFd for UnixStream { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/ucred.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/ucred.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/unix/ucred.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/unix/ucred.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,310 +0,0 @@ -use crate::net::unix; - -/// Credentials of a process. -#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] -pub struct UCred { - /// PID (process ID) of the process. - pid: Option, - /// UID (user ID) of the process. - uid: unix::uid_t, - /// GID (group ID) of the process. - gid: unix::gid_t, -} - -impl UCred { - /// Gets UID (user ID) of the process. - pub fn uid(&self) -> unix::uid_t { - self.uid - } - - /// Gets GID (group ID) of the process. - pub fn gid(&self) -> unix::gid_t { - self.gid - } - - /// Gets PID (process ID) of the process. - /// - /// This is only implemented under Linux, Android, iOS, macOS, Solaris and - /// Illumos. On other platforms this will always return `None`. - pub fn pid(&self) -> Option { - self.pid - } -} - -#[cfg(any( - target_os = "linux", - target_os = "redox", - target_os = "android", - target_os = "openbsd" -))] -pub(crate) use self::impl_linux::get_peer_cred; - -#[cfg(target_os = "netbsd")] -pub(crate) use self::impl_netbsd::get_peer_cred; - -#[cfg(any(target_os = "dragonfly", target_os = "freebsd"))] -pub(crate) use self::impl_bsd::get_peer_cred; - -#[cfg(any(target_os = "macos", target_os = "ios", target_os = "tvos"))] -pub(crate) use self::impl_macos::get_peer_cred; - -#[cfg(any(target_os = "solaris", target_os = "illumos"))] -pub(crate) use self::impl_solaris::get_peer_cred; - -#[cfg(target_os = "aix")] -pub(crate) use self::impl_aix::get_peer_cred; - -#[cfg(target_os = "espidf")] -pub(crate) use self::impl_noproc::get_peer_cred; - -#[cfg(any( - target_os = "linux", - target_os = "redox", - target_os = "android", - target_os = "openbsd" -))] -pub(crate) mod impl_linux { - use crate::net::unix::{self, UnixStream}; - - use libc::{c_void, getsockopt, socklen_t, SOL_SOCKET, SO_PEERCRED}; - use std::{io, mem}; - - #[cfg(target_os = "openbsd")] - use libc::sockpeercred as ucred; - #[cfg(any(target_os = "linux", target_os = "redox", target_os = "android"))] - use libc::ucred; - - pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result { - use std::os::unix::io::AsRawFd; - - unsafe { - let raw_fd = sock.as_raw_fd(); - - let mut ucred = ucred { - pid: 0, - uid: 0, - gid: 0, - }; - - let ucred_size = mem::size_of::(); - - // These paranoid checks should be optimized-out - assert!(mem::size_of::() <= mem::size_of::()); - assert!(ucred_size <= u32::MAX as usize); - - let mut ucred_size = ucred_size as socklen_t; - - let ret = getsockopt( - raw_fd, - SOL_SOCKET, - SO_PEERCRED, - &mut ucred as *mut ucred as *mut c_void, - &mut ucred_size, - ); - if ret == 0 && ucred_size as usize == mem::size_of::() { - Ok(super::UCred { - uid: ucred.uid as unix::uid_t, - gid: ucred.gid as unix::gid_t, - pid: Some(ucred.pid as unix::pid_t), - }) - } else { - Err(io::Error::last_os_error()) - } - } - } -} - -#[cfg(target_os = "netbsd")] -pub(crate) mod impl_netbsd { - use crate::net::unix::{self, UnixStream}; - - use libc::{c_void, getsockopt, socklen_t, unpcbid, LOCAL_PEEREID, SOL_SOCKET}; - use std::io; - use std::mem::size_of; - use std::os::unix::io::AsRawFd; - - pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result { - unsafe { - let raw_fd = sock.as_raw_fd(); - - let mut unpcbid = unpcbid { - unp_pid: 0, - unp_euid: 0, - unp_egid: 0, - }; - - let unpcbid_size = size_of::(); - let mut unpcbid_size = unpcbid_size as socklen_t; - - let ret = getsockopt( - raw_fd, - SOL_SOCKET, - LOCAL_PEEREID, - &mut unpcbid as *mut unpcbid as *mut c_void, - &mut unpcbid_size, - ); - if ret == 0 && unpcbid_size as usize == size_of::() { - Ok(super::UCred { - uid: unpcbid.unp_euid as unix::uid_t, - gid: unpcbid.unp_egid as unix::gid_t, - pid: Some(unpcbid.unp_pid as unix::pid_t), - }) - } else { - Err(io::Error::last_os_error()) - } - } - } -} - -#[cfg(any(target_os = "dragonfly", target_os = "freebsd"))] -pub(crate) mod impl_bsd { - use crate::net::unix::{self, UnixStream}; - - use libc::getpeereid; - use std::io; - use std::mem::MaybeUninit; - use std::os::unix::io::AsRawFd; - - pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result { - unsafe { - let raw_fd = sock.as_raw_fd(); - - let mut uid = MaybeUninit::uninit(); - let mut gid = MaybeUninit::uninit(); - - let ret = getpeereid(raw_fd, uid.as_mut_ptr(), gid.as_mut_ptr()); - - if ret == 0 { - Ok(super::UCred { - uid: uid.assume_init() as unix::uid_t, - gid: gid.assume_init() as unix::gid_t, - pid: None, - }) - } else { - Err(io::Error::last_os_error()) - } - } - } -} - -#[cfg(any(target_os = "macos", target_os = "ios", target_os = "tvos"))] -pub(crate) mod impl_macos { - use crate::net::unix::{self, UnixStream}; - - use libc::{c_void, getpeereid, getsockopt, pid_t, LOCAL_PEEREPID, SOL_LOCAL}; - use std::io; - use std::mem::size_of; - use std::mem::MaybeUninit; - use std::os::unix::io::AsRawFd; - - pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result { - unsafe { - let raw_fd = sock.as_raw_fd(); - - let mut uid = MaybeUninit::uninit(); - let mut gid = MaybeUninit::uninit(); - let mut pid: MaybeUninit = MaybeUninit::uninit(); - let mut pid_size: MaybeUninit = MaybeUninit::new(size_of::() as u32); - - if getsockopt( - raw_fd, - SOL_LOCAL, - LOCAL_PEEREPID, - pid.as_mut_ptr() as *mut c_void, - pid_size.as_mut_ptr(), - ) != 0 - { - return Err(io::Error::last_os_error()); - } - - assert!(pid_size.assume_init() == (size_of::() as u32)); - - let ret = getpeereid(raw_fd, uid.as_mut_ptr(), gid.as_mut_ptr()); - - if ret == 0 { - Ok(super::UCred { - uid: uid.assume_init() as unix::uid_t, - gid: gid.assume_init() as unix::gid_t, - pid: Some(pid.assume_init() as unix::pid_t), - }) - } else { - Err(io::Error::last_os_error()) - } - } - } -} - -#[cfg(any(target_os = "solaris", target_os = "illumos"))] -pub(crate) mod impl_solaris { - use crate::net::unix::{self, UnixStream}; - use std::io; - use std::os::unix::io::AsRawFd; - use std::ptr; - - pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result { - unsafe { - let raw_fd = sock.as_raw_fd(); - - let mut cred = ptr::null_mut(); - let ret = libc::getpeerucred(raw_fd, &mut cred); - - if ret == 0 { - let uid = libc::ucred_geteuid(cred); - let gid = libc::ucred_getegid(cred); - let pid = libc::ucred_getpid(cred); - - libc::ucred_free(cred); - - Ok(super::UCred { - uid: uid as unix::uid_t, - gid: gid as unix::gid_t, - pid: Some(pid as unix::pid_t), - }) - } else { - Err(io::Error::last_os_error()) - } - } - } -} - -#[cfg(target_os = "aix")] -pub(crate) mod impl_aix { - use crate::net::unix::UnixStream; - use std::io; - use std::os::unix::io::AsRawFd; - - pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result { - unsafe { - let raw_fd = sock.as_raw_fd(); - - let mut uid = std::mem::MaybeUninit::uninit(); - let mut gid = std::mem::MaybeUninit::uninit(); - - let ret = libc::getpeereid(raw_fd, uid.as_mut_ptr(), gid.as_mut_ptr()); - - if ret == 0 { - Ok(super::UCred { - uid: uid.assume_init(), - gid: gid.assume_init(), - pid: None, - }) - } else { - Err(io::Error::last_os_error()) - } - } - } -} - -#[cfg(target_os = "espidf")] -pub(crate) mod impl_noproc { - use crate::net::unix::UnixStream; - use std::io; - - pub(crate) fn get_peer_cred(_sock: &UnixStream) -> io::Result { - Ok(super::UCred { - uid: 0, - gid: 0, - pid: None, - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/windows/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/windows/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/windows/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/windows/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,3 +0,0 @@ -//! Windows specific network types. - -pub mod named_pipe; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/net/windows/named_pipe.rs s390-tools-2.33.1/rust-vendor/tokio/src/net/windows/named_pipe.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/net/windows/named_pipe.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/net/windows/named_pipe.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,2690 +0,0 @@ -//! Tokio support for [Windows named pipes]. -//! -//! [Windows named pipes]: https://docs.microsoft.com/en-us/windows/win32/ipc/named-pipes - -use std::ffi::c_void; -use std::ffi::OsStr; -use std::io::{self, Read, Write}; -use std::pin::Pin; -use std::ptr; -use std::task::{Context, Poll}; - -use crate::io::{AsyncRead, AsyncWrite, Interest, PollEvented, ReadBuf, Ready}; -use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, RawHandle}; - -cfg_io_util! { - use bytes::BufMut; -} - -// Hide imports which are not used when generating documentation. -#[cfg(not(docsrs))] -mod doc { - pub(super) use crate::os::windows::ffi::OsStrExt; - pub(super) mod windows_sys { - pub(crate) use windows_sys::{ - Win32::Foundation::*, Win32::Storage::FileSystem::*, Win32::System::Pipes::*, - Win32::System::SystemServices::*, - }; - } - pub(super) use mio::windows as mio_windows; -} - -// NB: none of these shows up in public API, so don't document them. -#[cfg(docsrs)] -mod doc { - pub(super) mod mio_windows { - pub type NamedPipe = crate::doc::NotDefinedHere; - } -} - -use self::doc::*; - -/// A [Windows named pipe] server. -/// -/// Accepting client connections involves creating a server with -/// [`ServerOptions::create`] and waiting for clients to connect using -/// [`NamedPipeServer::connect`]. -/// -/// To avoid having clients sporadically fail with -/// [`std::io::ErrorKind::NotFound`] when they connect to a server, we must -/// ensure that at least one server instance is available at all times. This -/// means that the typical listen loop for a server is a bit involved, because -/// we have to ensure that we never drop a server accidentally while a client -/// might connect. -/// -/// So a correctly implemented server looks like this: -/// -/// ```no_run -/// use std::io; -/// use tokio::net::windows::named_pipe::ServerOptions; -/// -/// const PIPE_NAME: &str = r"\\.\pipe\named-pipe-idiomatic-server"; -/// -/// # #[tokio::main] async fn main() -> std::io::Result<()> { -/// // The first server needs to be constructed early so that clients can -/// // be correctly connected. Otherwise calling .wait will cause the client to -/// // error. -/// // -/// // Here we also make use of `first_pipe_instance`, which will ensure that -/// // there are no other servers up and running already. -/// let mut server = ServerOptions::new() -/// .first_pipe_instance(true) -/// .create(PIPE_NAME)?; -/// -/// // Spawn the server loop. -/// let server = tokio::spawn(async move { -/// loop { -/// // Wait for a client to connect. -/// let connected = server.connect().await?; -/// -/// // Construct the next server to be connected before sending the one -/// // we already have of onto a task. This ensures that the server -/// // isn't closed (after it's done in the task) before a new one is -/// // available. Otherwise the client might error with -/// // `io::ErrorKind::NotFound`. -/// server = ServerOptions::new().create(PIPE_NAME)?; -/// -/// let client = tokio::spawn(async move { -/// /* use the connected client */ -/// # Ok::<_, std::io::Error>(()) -/// }); -/// # if true { break } // needed for type inference to work -/// } -/// -/// Ok::<_, io::Error>(()) -/// }); -/// -/// /* do something else not server related here */ -/// # Ok(()) } -/// ``` -/// -/// [Windows named pipe]: https://docs.microsoft.com/en-us/windows/win32/ipc/named-pipes -#[derive(Debug)] -pub struct NamedPipeServer { - io: PollEvented, -} - -impl NamedPipeServer { - /// Constructs a new named pipe server from the specified raw handle. - /// - /// This function will consume ownership of the handle given, passing - /// responsibility for closing the handle to the returned object. - /// - /// This function is also unsafe as the primitives currently returned have - /// the contract that they are the sole owner of the file descriptor they - /// are wrapping. Usage of this function could accidentally allow violating - /// this contract which can cause memory unsafety in code that relies on it - /// being true. - /// - /// # Errors - /// - /// This errors if called outside of a [Tokio Runtime], or in a runtime that - /// has not [enabled I/O], or if any OS-specific I/O errors occur. - /// - /// [Tokio Runtime]: crate::runtime::Runtime - /// [enabled I/O]: crate::runtime::Builder::enable_io - pub unsafe fn from_raw_handle(handle: RawHandle) -> io::Result { - let named_pipe = mio_windows::NamedPipe::from_raw_handle(handle); - - Ok(Self { - io: PollEvented::new(named_pipe)?, - }) - } - - /// Retrieves information about the named pipe the server is associated - /// with. - /// - /// ```no_run - /// use tokio::net::windows::named_pipe::{PipeEnd, PipeMode, ServerOptions}; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-info"; - /// - /// # #[tokio::main] async fn main() -> std::io::Result<()> { - /// let server = ServerOptions::new() - /// .pipe_mode(PipeMode::Message) - /// .max_instances(5) - /// .create(PIPE_NAME)?; - /// - /// let server_info = server.info()?; - /// - /// assert_eq!(server_info.end, PipeEnd::Server); - /// assert_eq!(server_info.mode, PipeMode::Message); - /// assert_eq!(server_info.max_instances, 5); - /// # Ok(()) } - /// ``` - pub fn info(&self) -> io::Result { - // Safety: we're ensuring the lifetime of the named pipe. - unsafe { named_pipe_info(self.io.as_raw_handle()) } - } - - /// Enables a named pipe server process to wait for a client process to - /// connect to an instance of a named pipe. A client process connects by - /// creating a named pipe with the same name. - /// - /// This corresponds to the [`ConnectNamedPipe`] system call. - /// - /// # Cancel safety - /// - /// This method is cancellation safe in the sense that if it is used as the - /// event in a [`select!`](crate::select) statement and some other branch - /// completes first, then no connection events have been lost. - /// - /// [`ConnectNamedPipe`]: https://docs.microsoft.com/en-us/windows/win32/api/namedpipeapi/nf-namedpipeapi-connectnamedpipe - /// - /// # Example - /// - /// ```no_run - /// use tokio::net::windows::named_pipe::ServerOptions; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\mynamedpipe"; - /// - /// # #[tokio::main] async fn main() -> std::io::Result<()> { - /// let pipe = ServerOptions::new().create(PIPE_NAME)?; - /// - /// // Wait for a client to connect. - /// pipe.connect().await?; - /// - /// // Use the connected client... - /// # Ok(()) } - /// ``` - pub async fn connect(&self) -> io::Result<()> { - match self.io.connect() { - Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io - .registration() - .async_io(Interest::WRITABLE, || self.io.connect()) - .await - } - x => x, - } - } - - /// Disconnects the server end of a named pipe instance from a client - /// process. - /// - /// ``` - /// use tokio::io::AsyncWriteExt; - /// use tokio::net::windows::named_pipe::{ClientOptions, ServerOptions}; - /// use windows_sys::Win32::Foundation::ERROR_PIPE_NOT_CONNECTED; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-disconnect"; - /// - /// # #[tokio::main] async fn main() -> std::io::Result<()> { - /// let server = ServerOptions::new() - /// .create(PIPE_NAME)?; - /// - /// let mut client = ClientOptions::new() - /// .open(PIPE_NAME)?; - /// - /// // Wait for a client to become connected. - /// server.connect().await?; - /// - /// // Forcibly disconnect the client. - /// server.disconnect()?; - /// - /// // Write fails with an OS-specific error after client has been - /// // disconnected. - /// let e = client.write(b"ping").await.unwrap_err(); - /// assert_eq!(e.raw_os_error(), Some(ERROR_PIPE_NOT_CONNECTED as i32)); - /// # Ok(()) } - /// ``` - pub fn disconnect(&self) -> io::Result<()> { - self.io.disconnect() - } - - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with `try_read()` or `try_write()`. It - /// can be used to concurrently read / write to the same pipe on a single - /// task without splitting the pipe. - /// - /// The function may complete without the pipe being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// # Examples - /// - /// Concurrently read and write to the pipe on the same task without - /// splitting. - /// - /// ```no_run - /// use tokio::io::Interest; - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-ready"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let server = named_pipe::ServerOptions::new() - /// .create(PIPE_NAME)?; - /// - /// loop { - /// let ready = server.ready(Interest::READABLE | Interest::WRITABLE).await?; - /// - /// if ready.is_readable() { - /// let mut data = vec![0; 1024]; - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match server.try_read(&mut data) { - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// if ready.is_writable() { - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match server.try_write(b"hello world") { - /// Ok(n) => { - /// println!("write {} bytes", n); - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// } - /// } - /// ``` - pub async fn ready(&self, interest: Interest) -> io::Result { - let event = self.io.registration().readiness(interest).await?; - Ok(event.ready) - } - - /// Waits for the pipe to become readable. - /// - /// This function is equivalent to `ready(Interest::READABLE)` and is usually - /// paired with `try_read()`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-readable"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let server = named_pipe::ServerOptions::new() - /// .create(PIPE_NAME)?; - /// - /// let mut msg = vec![0; 1024]; - /// - /// loop { - /// // Wait for the pipe to be readable - /// server.readable().await?; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match server.try_read(&mut msg) { - /// Ok(n) => { - /// msg.truncate(n); - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// println!("GOT = {:?}", msg); - /// Ok(()) - /// } - /// ``` - pub async fn readable(&self) -> io::Result<()> { - self.ready(Interest::READABLE).await?; - Ok(()) - } - - /// Polls for read readiness. - /// - /// If the pipe is not currently ready for reading, this method will - /// store a clone of the `Waker` from the provided `Context`. When the pipe - /// becomes ready for reading, `Waker::wake` will be called on the waker. - /// - /// Note that on multiple calls to `poll_read_ready` or `poll_read`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. (However, `poll_write_ready` retains a - /// second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`readable`] is not feasible. Where possible, using [`readable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the pipe is not ready for reading. - /// * `Poll::Ready(Ok(()))` if the pipe is ready for reading. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`readable`]: method@Self::readable - pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_read_ready(cx).map_ok(|_| ()) - } - - /// Tries to read data from the pipe into the provided buffer, returning how - /// many bytes were read. - /// - /// Receives any pending data from the pipe but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: NamedPipeServer::readable() - /// [`ready()`]: NamedPipeServer::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: - /// - /// 1. The pipe's read half is closed and will no longer yield data. - /// 2. The specified buffer was 0 bytes in length. - /// - /// If the pipe is not ready to read data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-try-read"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let server = named_pipe::ServerOptions::new() - /// .create(PIPE_NAME)?; - /// - /// loop { - /// // Wait for the pipe to be readable - /// server.readable().await?; - /// - /// // Creating the buffer **after** the `await` prevents it from - /// // being stored in the async task. - /// let mut buf = [0; 4096]; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match server.try_read(&mut buf) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read(&self, buf: &mut [u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::READABLE, || (&*self.io).read(buf)) - } - - /// Tries to read data from the pipe into the provided buffers, returning - /// how many bytes were read. - /// - /// Data is copied to fill each buffer in order, with the final buffer - /// written to possibly being only partially filled. This method behaves - /// equivalently to a single call to [`try_read()`] with concatenated - /// buffers. - /// - /// Receives any pending data from the pipe but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_vectored()` is non-blocking, the buffer does not have to be - /// stored by the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`try_read()`]: NamedPipeServer::try_read() - /// [`readable()`]: NamedPipeServer::readable() - /// [`ready()`]: NamedPipeServer::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the pipe's read half is closed - /// and will no longer yield data. If the pipe is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io::{self, IoSliceMut}; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-try-read-vectored"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let server = named_pipe::ServerOptions::new() - /// .create(PIPE_NAME)?; - /// - /// loop { - /// // Wait for the pipe to be readable - /// server.readable().await?; - /// - /// // Creating the buffer **after** the `await` prevents it from - /// // being stored in the async task. - /// let mut buf_a = [0; 512]; - /// let mut buf_b = [0; 1024]; - /// let mut bufs = [ - /// IoSliceMut::new(&mut buf_a), - /// IoSliceMut::new(&mut buf_b), - /// ]; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match server.try_read_vectored(&mut bufs) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result { - self.io - .registration() - .try_io(Interest::READABLE, || (&*self.io).read_vectored(bufs)) - } - - cfg_io_util! { - /// Tries to read data from the stream into the provided buffer, advancing the - /// buffer's internal cursor, returning how many bytes were read. - /// - /// Receives any pending data from the pipe but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: NamedPipeServer::readable() - /// [`ready()`]: NamedPipeServer::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-readable"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let server = named_pipe::ServerOptions::new().create(PIPE_NAME)?; - /// - /// loop { - /// // Wait for the pipe to be readable - /// server.readable().await?; - /// - /// let mut buf = Vec::with_capacity(4096); - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match server.try_read_buf(&mut buf) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read_buf(&self, buf: &mut B) -> io::Result { - self.io.registration().try_io(Interest::READABLE, || { - use std::io::Read; - - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - // Safety: We trust `NamedPipeServer::read` to have filled up `n` bytes in the - // buffer. - let n = (&*self.io).read(dst)?; - - unsafe { - buf.advance_mut(n); - } - - Ok(n) - }) - } - } - - /// Waits for the pipe to become writable. - /// - /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually - /// paired with `try_write()`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-writable"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let server = named_pipe::ServerOptions::new() - /// .create(PIPE_NAME)?; - /// - /// loop { - /// // Wait for the pipe to be writable - /// server.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match server.try_write(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn writable(&self) -> io::Result<()> { - self.ready(Interest::WRITABLE).await?; - Ok(()) - } - - /// Polls for write readiness. - /// - /// If the pipe is not currently ready for writing, this method will - /// store a clone of the `Waker` from the provided `Context`. When the pipe - /// becomes ready for writing, `Waker::wake` will be called on the waker. - /// - /// Note that on multiple calls to `poll_write_ready` or `poll_write`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. (However, `poll_read_ready` retains a - /// second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`writable`] is not feasible. Where possible, using [`writable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the pipe is not ready for writing. - /// * `Poll::Ready(Ok(()))` if the pipe is ready for writing. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`writable`]: method@Self::writable - pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_write_ready(cx).map_ok(|_| ()) - } - - /// Tries to write a buffer to the pipe, returning how many bytes were - /// written. - /// - /// The function will attempt to write the entire contents of `buf`, but - /// only part of the buffer may be written. - /// - /// This function is usually paired with `writable()`. - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the pipe is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-try-write"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let server = named_pipe::ServerOptions::new() - /// .create(PIPE_NAME)?; - /// - /// loop { - /// // Wait for the pipe to be writable - /// server.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match server.try_write(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_write(&self, buf: &[u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::WRITABLE, || (&*self.io).write(buf)) - } - - /// Tries to write several buffers to the pipe, returning how many bytes - /// were written. - /// - /// Data is written from each buffer in order, with the final buffer read - /// from possible being only partially consumed. This method behaves - /// equivalently to a single call to [`try_write()`] with concatenated - /// buffers. - /// - /// This function is usually paired with `writable()`. - /// - /// [`try_write()`]: NamedPipeServer::try_write() - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the pipe is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-server-try-write-vectored"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let server = named_pipe::ServerOptions::new() - /// .create(PIPE_NAME)?; - /// - /// let bufs = [io::IoSlice::new(b"hello "), io::IoSlice::new(b"world")]; - /// - /// loop { - /// // Wait for the pipe to be writable - /// server.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match server.try_write_vectored(&bufs) { - /// Ok(n) => { - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result { - self.io - .registration() - .try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf)) - } - - /// Tries to read or write from the pipe using a user-provided IO operation. - /// - /// If the pipe is ready, the provided closure is called. The closure - /// should attempt to perform IO operation from the pipe by manually - /// calling the appropriate syscall. If the operation fails because the - /// pipe is not actually ready, then the closure should return a - /// `WouldBlock` error and the readiness flag is cleared. The return value - /// of the closure is then returned by `try_io`. - /// - /// If the pipe is not ready, then the closure is not called - /// and a `WouldBlock` error is returned. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the pipe that failed due to the pipe not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the pipe to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the - /// methods defined on the Tokio `NamedPipeServer` type, as this will mess with - /// the readiness flag and can cause the pipe to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - /// - /// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: NamedPipeServer::readable() - /// [`writable()`]: NamedPipeServer::writable() - /// [`ready()`]: NamedPipeServer::ready() - pub fn try_io( - &self, - interest: Interest, - f: impl FnOnce() -> io::Result, - ) -> io::Result { - self.io.registration().try_io(interest, f) - } - - /// Reads or writes from the pipe using a user-provided IO operation. - /// - /// The readiness of the pipe is awaited and when the pipe is ready, - /// the provided closure is called. The closure should attempt to perform - /// IO operation on the pipe by manually calling the appropriate syscall. - /// If the operation fails because the pipe is not actually ready, - /// then the closure should return a `WouldBlock` error. In such case the - /// readiness flag is cleared and the pipe readiness is awaited again. - /// This loop is repeated until the closure returns an `Ok` or an error - /// other than `WouldBlock`. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the pipe that failed due to the pipe not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the pipe to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio `NamedPipeServer` type, as this will mess with the - /// readiness flag and can cause the pipe to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - pub async fn async_io( - &self, - interest: Interest, - f: impl FnMut() -> io::Result, - ) -> io::Result { - self.io.registration().async_io(interest, f).await - } -} - -impl AsyncRead for NamedPipeServer { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - unsafe { self.io.poll_read(cx, buf) } - } -} - -impl AsyncWrite for NamedPipeServer { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.io.poll_write(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.io.poll_write_vectored(cx, bufs) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} - -impl AsRawHandle for NamedPipeServer { - fn as_raw_handle(&self) -> RawHandle { - self.io.as_raw_handle() - } -} - -impl AsHandle for NamedPipeServer { - fn as_handle(&self) -> BorrowedHandle<'_> { - unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) } - } -} - -/// A [Windows named pipe] client. -/// -/// Constructed using [`ClientOptions::open`]. -/// -/// Connecting a client correctly involves a few steps. When connecting through -/// [`ClientOptions::open`], it might error indicating one of two things: -/// -/// * [`std::io::ErrorKind::NotFound`] - There is no server available. -/// * [`ERROR_PIPE_BUSY`] - There is a server available, but it is busy. Sleep -/// for a while and try again. -/// -/// So a correctly implemented client looks like this: -/// -/// ```no_run -/// use std::time::Duration; -/// use tokio::net::windows::named_pipe::ClientOptions; -/// use tokio::time; -/// use windows_sys::Win32::Foundation::ERROR_PIPE_BUSY; -/// -/// const PIPE_NAME: &str = r"\\.\pipe\named-pipe-idiomatic-client"; -/// -/// # #[tokio::main] async fn main() -> std::io::Result<()> { -/// let client = loop { -/// match ClientOptions::new().open(PIPE_NAME) { -/// Ok(client) => break client, -/// Err(e) if e.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => (), -/// Err(e) => return Err(e), -/// } -/// -/// time::sleep(Duration::from_millis(50)).await; -/// }; -/// -/// /* use the connected client */ -/// # Ok(()) } -/// ``` -/// -/// [`ERROR_PIPE_BUSY`]: https://docs.rs/windows-sys/latest/windows_sys/Win32/Foundation/constant.ERROR_PIPE_BUSY.html -/// [Windows named pipe]: https://docs.microsoft.com/en-us/windows/win32/ipc/named-pipes -#[derive(Debug)] -pub struct NamedPipeClient { - io: PollEvented, -} - -impl NamedPipeClient { - /// Constructs a new named pipe client from the specified raw handle. - /// - /// This function will consume ownership of the handle given, passing - /// responsibility for closing the handle to the returned object. - /// - /// This function is also unsafe as the primitives currently returned have - /// the contract that they are the sole owner of the file descriptor they - /// are wrapping. Usage of this function could accidentally allow violating - /// this contract which can cause memory unsafety in code that relies on it - /// being true. - /// - /// # Errors - /// - /// This errors if called outside of a [Tokio Runtime], or in a runtime that - /// has not [enabled I/O], or if any OS-specific I/O errors occur. - /// - /// [Tokio Runtime]: crate::runtime::Runtime - /// [enabled I/O]: crate::runtime::Builder::enable_io - pub unsafe fn from_raw_handle(handle: RawHandle) -> io::Result { - let named_pipe = mio_windows::NamedPipe::from_raw_handle(handle); - - Ok(Self { - io: PollEvented::new(named_pipe)?, - }) - } - - /// Retrieves information about the named pipe the client is associated - /// with. - /// - /// ```no_run - /// use tokio::net::windows::named_pipe::{ClientOptions, PipeEnd, PipeMode}; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-info"; - /// - /// # #[tokio::main] async fn main() -> std::io::Result<()> { - /// let client = ClientOptions::new() - /// .open(PIPE_NAME)?; - /// - /// let client_info = client.info()?; - /// - /// assert_eq!(client_info.end, PipeEnd::Client); - /// assert_eq!(client_info.mode, PipeMode::Message); - /// assert_eq!(client_info.max_instances, 5); - /// # Ok(()) } - /// ``` - pub fn info(&self) -> io::Result { - // Safety: we're ensuring the lifetime of the named pipe. - unsafe { named_pipe_info(self.io.as_raw_handle()) } - } - - /// Waits for any of the requested ready states. - /// - /// This function is usually paired with `try_read()` or `try_write()`. It - /// can be used to concurrently read / write to the same pipe on a single - /// task without splitting the pipe. - /// - /// The function may complete without the pipe being ready. This is a - /// false-positive and attempting an operation will return with - /// `io::ErrorKind::WouldBlock`. The function can also return with an empty - /// [`Ready`] set, so you should always check the returned value and possibly - /// wait again if the requested states are not set. - /// - /// # Examples - /// - /// Concurrently read and write to the pipe on the same task without - /// splitting. - /// - /// ```no_run - /// use tokio::io::Interest; - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-ready"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let client = named_pipe::ClientOptions::new().open(PIPE_NAME)?; - /// - /// loop { - /// let ready = client.ready(Interest::READABLE | Interest::WRITABLE).await?; - /// - /// if ready.is_readable() { - /// let mut data = vec![0; 1024]; - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match client.try_read(&mut data) { - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// if ready.is_writable() { - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match client.try_write(b"hello world") { - /// Ok(n) => { - /// println!("write {} bytes", n); - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// } - /// } - /// ``` - pub async fn ready(&self, interest: Interest) -> io::Result { - let event = self.io.registration().readiness(interest).await?; - Ok(event.ready) - } - - /// Waits for the pipe to become readable. - /// - /// This function is equivalent to `ready(Interest::READABLE)` and is usually - /// paired with `try_read()`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-readable"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let client = named_pipe::ClientOptions::new().open(PIPE_NAME)?; - /// - /// let mut msg = vec![0; 1024]; - /// - /// loop { - /// // Wait for the pipe to be readable - /// client.readable().await?; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match client.try_read(&mut msg) { - /// Ok(n) => { - /// msg.truncate(n); - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// println!("GOT = {:?}", msg); - /// Ok(()) - /// } - /// ``` - pub async fn readable(&self) -> io::Result<()> { - self.ready(Interest::READABLE).await?; - Ok(()) - } - - /// Polls for read readiness. - /// - /// If the pipe is not currently ready for reading, this method will - /// store a clone of the `Waker` from the provided `Context`. When the pipe - /// becomes ready for reading, `Waker::wake` will be called on the waker. - /// - /// Note that on multiple calls to `poll_read_ready` or `poll_read`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. (However, `poll_write_ready` retains a - /// second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`readable`] is not feasible. Where possible, using [`readable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the pipe is not ready for reading. - /// * `Poll::Ready(Ok(()))` if the pipe is ready for reading. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`readable`]: method@Self::readable - pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_read_ready(cx).map_ok(|_| ()) - } - - /// Tries to read data from the pipe into the provided buffer, returning how - /// many bytes were read. - /// - /// Receives any pending data from the pipe but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: NamedPipeClient::readable() - /// [`ready()`]: NamedPipeClient::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. If `n` is `0`, then it can indicate one of two scenarios: - /// - /// 1. The pipe's read half is closed and will no longer yield data. - /// 2. The specified buffer was 0 bytes in length. - /// - /// If the pipe is not ready to read data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-try-read"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let client = named_pipe::ClientOptions::new().open(PIPE_NAME)?; - /// - /// loop { - /// // Wait for the pipe to be readable - /// client.readable().await?; - /// - /// // Creating the buffer **after** the `await` prevents it from - /// // being stored in the async task. - /// let mut buf = [0; 4096]; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match client.try_read(&mut buf) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read(&self, buf: &mut [u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::READABLE, || (&*self.io).read(buf)) - } - - /// Tries to read data from the pipe into the provided buffers, returning - /// how many bytes were read. - /// - /// Data is copied to fill each buffer in order, with the final buffer - /// written to possibly being only partially filled. This method behaves - /// equivalently to a single call to [`try_read()`] with concatenated - /// buffers. - /// - /// Receives any pending data from the pipe but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_vectored()` is non-blocking, the buffer does not have to be - /// stored by the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`try_read()`]: NamedPipeClient::try_read() - /// [`readable()`]: NamedPipeClient::readable() - /// [`ready()`]: NamedPipeClient::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the pipe's read half is closed - /// and will no longer yield data. If the pipe is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io::{self, IoSliceMut}; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-try-read-vectored"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let client = named_pipe::ClientOptions::new().open(PIPE_NAME)?; - /// - /// loop { - /// // Wait for the pipe to be readable - /// client.readable().await?; - /// - /// // Creating the buffer **after** the `await` prevents it from - /// // being stored in the async task. - /// let mut buf_a = [0; 512]; - /// let mut buf_b = [0; 1024]; - /// let mut bufs = [ - /// IoSliceMut::new(&mut buf_a), - /// IoSliceMut::new(&mut buf_b), - /// ]; - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match client.try_read_vectored(&mut bufs) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result { - self.io - .registration() - .try_io(Interest::READABLE, || (&*self.io).read_vectored(bufs)) - } - - cfg_io_util! { - /// Tries to read data from the stream into the provided buffer, advancing the - /// buffer's internal cursor, returning how many bytes were read. - /// - /// Receives any pending data from the pipe but does not wait for new data - /// to arrive. On success, returns the number of bytes read. Because - /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by - /// the async task and can exist entirely on the stack. - /// - /// Usually, [`readable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: NamedPipeClient::readable() - /// [`ready()`]: NamedPipeClient::ready() - /// - /// # Return - /// - /// If data is successfully read, `Ok(n)` is returned, where `n` is the - /// number of bytes read. `Ok(0)` indicates the stream's read half is closed - /// and will no longer yield data. If the stream is not ready to read data - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-readable"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let client = named_pipe::ClientOptions::new().open(PIPE_NAME)?; - /// - /// loop { - /// // Wait for the pipe to be readable - /// client.readable().await?; - /// - /// let mut buf = Vec::with_capacity(4096); - /// - /// // Try to read data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match client.try_read_buf(&mut buf) { - /// Ok(0) => break, - /// Ok(n) => { - /// println!("read {} bytes", n); - /// } - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_read_buf(&self, buf: &mut B) -> io::Result { - self.io.registration().try_io(Interest::READABLE, || { - use std::io::Read; - - let dst = buf.chunk_mut(); - let dst = - unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - - // Safety: We trust `NamedPipeClient::read` to have filled up `n` bytes in the - // buffer. - let n = (&*self.io).read(dst)?; - - unsafe { - buf.advance_mut(n); - } - - Ok(n) - }) - } - } - - /// Waits for the pipe to become writable. - /// - /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually - /// paired with `try_write()`. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-writable"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let client = named_pipe::ClientOptions::new().open(PIPE_NAME)?; - /// - /// loop { - /// // Wait for the pipe to be writable - /// client.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match client.try_write(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn writable(&self) -> io::Result<()> { - self.ready(Interest::WRITABLE).await?; - Ok(()) - } - - /// Polls for write readiness. - /// - /// If the pipe is not currently ready for writing, this method will - /// store a clone of the `Waker` from the provided `Context`. When the pipe - /// becomes ready for writing, `Waker::wake` will be called on the waker. - /// - /// Note that on multiple calls to `poll_write_ready` or `poll_write`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. (However, `poll_read_ready` retains a - /// second, independent waker.) - /// - /// This function is intended for cases where creating and pinning a future - /// via [`writable`] is not feasible. Where possible, using [`writable`] is - /// preferred, as this supports polling from multiple tasks at once. - /// - /// # Return value - /// - /// The function returns: - /// - /// * `Poll::Pending` if the pipe is not ready for writing. - /// * `Poll::Ready(Ok(()))` if the pipe is ready for writing. - /// * `Poll::Ready(Err(e))` if an error is encountered. - /// - /// # Errors - /// - /// This function may encounter any standard I/O error except `WouldBlock`. - /// - /// [`writable`]: method@Self::writable - pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.io.registration().poll_write_ready(cx).map_ok(|_| ()) - } - - /// Tries to write a buffer to the pipe, returning how many bytes were - /// written. - /// - /// The function will attempt to write the entire contents of `buf`, but - /// only part of the buffer may be written. - /// - /// This function is usually paired with `writable()`. - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the pipe is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-try-write"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let client = named_pipe::ClientOptions::new().open(PIPE_NAME)?; - /// - /// loop { - /// // Wait for the pipe to be writable - /// client.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match client.try_write(b"hello world") { - /// Ok(n) => { - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_write(&self, buf: &[u8]) -> io::Result { - self.io - .registration() - .try_io(Interest::WRITABLE, || (&*self.io).write(buf)) - } - - /// Tries to write several buffers to the pipe, returning how many bytes - /// were written. - /// - /// Data is written from each buffer in order, with the final buffer read - /// from possible being only partially consumed. This method behaves - /// equivalently to a single call to [`try_write()`] with concatenated - /// buffers. - /// - /// This function is usually paired with `writable()`. - /// - /// [`try_write()`]: NamedPipeClient::try_write() - /// - /// # Return - /// - /// If data is successfully written, `Ok(n)` is returned, where `n` is the - /// number of bytes written. If the pipe is not ready to write data, - /// `Err(io::ErrorKind::WouldBlock)` is returned. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::net::windows::named_pipe; - /// use std::error::Error; - /// use std::io; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-try-write-vectored"; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let client = named_pipe::ClientOptions::new().open(PIPE_NAME)?; - /// - /// let bufs = [io::IoSlice::new(b"hello "), io::IoSlice::new(b"world")]; - /// - /// loop { - /// // Wait for the pipe to be writable - /// client.writable().await?; - /// - /// // Try to write data, this may still fail with `WouldBlock` - /// // if the readiness event is a false positive. - /// match client.try_write_vectored(&bufs) { - /// Ok(n) => { - /// break; - /// } - /// Err(e) if e.kind() == io::ErrorKind::WouldBlock => { - /// continue; - /// } - /// Err(e) => { - /// return Err(e.into()); - /// } - /// } - /// } - /// - /// Ok(()) - /// } - /// ``` - pub fn try_write_vectored(&self, buf: &[io::IoSlice<'_>]) -> io::Result { - self.io - .registration() - .try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf)) - } - - /// Tries to read or write from the pipe using a user-provided IO operation. - /// - /// If the pipe is ready, the provided closure is called. The closure - /// should attempt to perform IO operation from the pipe by manually - /// calling the appropriate syscall. If the operation fails because the - /// pipe is not actually ready, then the closure should return a - /// `WouldBlock` error and the readiness flag is cleared. The return value - /// of the closure is then returned by `try_io`. - /// - /// If the pipe is not ready, then the closure is not called - /// and a `WouldBlock` error is returned. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the pipe that failed due to the pipe not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the pipe to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio `NamedPipeClient` type, as this will mess with the - /// readiness flag and can cause the pipe to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - /// - /// Usually, [`readable()`], [`writable()`] or [`ready()`] is used with this function. - /// - /// [`readable()`]: NamedPipeClient::readable() - /// [`writable()`]: NamedPipeClient::writable() - /// [`ready()`]: NamedPipeClient::ready() - pub fn try_io( - &self, - interest: Interest, - f: impl FnOnce() -> io::Result, - ) -> io::Result { - self.io.registration().try_io(interest, f) - } - - /// Reads or writes from the pipe using a user-provided IO operation. - /// - /// The readiness of the pipe is awaited and when the pipe is ready, - /// the provided closure is called. The closure should attempt to perform - /// IO operation on the pipe by manually calling the appropriate syscall. - /// If the operation fails because the pipe is not actually ready, - /// then the closure should return a `WouldBlock` error. In such case the - /// readiness flag is cleared and the pipe readiness is awaited again. - /// This loop is repeated until the closure returns an `Ok` or an error - /// other than `WouldBlock`. - /// - /// The closure should only return a `WouldBlock` error if it has performed - /// an IO operation on the pipe that failed due to the pipe not being - /// ready. Returning a `WouldBlock` error in any other situation will - /// incorrectly clear the readiness flag, which can cause the pipe to - /// behave incorrectly. - /// - /// The closure should not perform the IO operation using any of the methods - /// defined on the Tokio `NamedPipeClient` type, as this will mess with the - /// readiness flag and can cause the pipe to behave incorrectly. - /// - /// This method is not intended to be used with combined interests. - /// The closure should perform only one type of IO operation, so it should not - /// require more than one ready state. This method may panic or sleep forever - /// if it is called with a combined interest. - pub async fn async_io( - &self, - interest: Interest, - f: impl FnMut() -> io::Result, - ) -> io::Result { - self.io.registration().async_io(interest, f).await - } -} - -impl AsyncRead for NamedPipeClient { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - unsafe { self.io.poll_read(cx, buf) } - } -} - -impl AsyncWrite for NamedPipeClient { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.io.poll_write(cx, buf) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.io.poll_write_vectored(cx, bufs) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.poll_flush(cx) - } -} - -impl AsRawHandle for NamedPipeClient { - fn as_raw_handle(&self) -> RawHandle { - self.io.as_raw_handle() - } -} - -impl AsHandle for NamedPipeClient { - fn as_handle(&self) -> BorrowedHandle<'_> { - unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) } - } -} - -/// A builder structure for construct a named pipe with named pipe-specific -/// options. This is required to use for named pipe servers who wants to modify -/// pipe-related options. -/// -/// See [`ServerOptions::create`]. -#[derive(Debug, Clone)] -pub struct ServerOptions { - // dwOpenMode - access_inbound: bool, - access_outbound: bool, - first_pipe_instance: bool, - write_dac: bool, - write_owner: bool, - access_system_security: bool, - // dwPipeMode - pipe_mode: PipeMode, - reject_remote_clients: bool, - // other options - max_instances: u32, - out_buffer_size: u32, - in_buffer_size: u32, - default_timeout: u32, -} - -impl ServerOptions { - /// Creates a new named pipe builder with the default settings. - /// - /// ``` - /// use tokio::net::windows::named_pipe::ServerOptions; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-new"; - /// - /// # #[tokio::main] async fn main() -> std::io::Result<()> { - /// let server = ServerOptions::new().create(PIPE_NAME)?; - /// # Ok(()) } - /// ``` - pub fn new() -> ServerOptions { - ServerOptions { - access_inbound: true, - access_outbound: true, - first_pipe_instance: false, - write_dac: false, - write_owner: false, - access_system_security: false, - pipe_mode: PipeMode::Byte, - reject_remote_clients: true, - max_instances: windows_sys::PIPE_UNLIMITED_INSTANCES, - out_buffer_size: 65536, - in_buffer_size: 65536, - default_timeout: 0, - } - } - - /// The pipe mode. - /// - /// The default pipe mode is [`PipeMode::Byte`]. See [`PipeMode`] for - /// documentation of what each mode means. - /// - /// This corresponds to specifying `PIPE_TYPE_` and `PIPE_READMODE_` in [`dwPipeMode`]. - /// - /// [`dwPipeMode`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea - pub fn pipe_mode(&mut self, pipe_mode: PipeMode) -> &mut Self { - self.pipe_mode = pipe_mode; - self - } - - /// The flow of data in the pipe goes from client to server only. - /// - /// This corresponds to setting [`PIPE_ACCESS_INBOUND`]. - /// - /// [`PIPE_ACCESS_INBOUND`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea#pipe_access_inbound - /// - /// # Errors - /// - /// Server side prevents connecting by denying inbound access, client errors - /// with [`std::io::ErrorKind::PermissionDenied`] when attempting to create - /// the connection. - /// - /// ``` - /// use std::io; - /// use tokio::net::windows::named_pipe::{ClientOptions, ServerOptions}; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-access-inbound-err1"; - /// - /// # #[tokio::main] async fn main() -> io::Result<()> { - /// let _server = ServerOptions::new() - /// .access_inbound(false) - /// .create(PIPE_NAME)?; - /// - /// let e = ClientOptions::new() - /// .open(PIPE_NAME) - /// .unwrap_err(); - /// - /// assert_eq!(e.kind(), io::ErrorKind::PermissionDenied); - /// # Ok(()) } - /// ``` - /// - /// Disabling writing allows a client to connect, but errors with - /// [`std::io::ErrorKind::PermissionDenied`] if a write is attempted. - /// - /// ``` - /// use std::io; - /// use tokio::io::AsyncWriteExt; - /// use tokio::net::windows::named_pipe::{ClientOptions, ServerOptions}; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-access-inbound-err2"; - /// - /// # #[tokio::main] async fn main() -> io::Result<()> { - /// let server = ServerOptions::new() - /// .access_inbound(false) - /// .create(PIPE_NAME)?; - /// - /// let mut client = ClientOptions::new() - /// .write(false) - /// .open(PIPE_NAME)?; - /// - /// server.connect().await?; - /// - /// let e = client.write(b"ping").await.unwrap_err(); - /// assert_eq!(e.kind(), io::ErrorKind::PermissionDenied); - /// # Ok(()) } - /// ``` - /// - /// # Examples - /// - /// A unidirectional named pipe that only supports server-to-client - /// communication. - /// - /// ``` - /// use std::io; - /// use tokio::io::{AsyncReadExt, AsyncWriteExt}; - /// use tokio::net::windows::named_pipe::{ClientOptions, ServerOptions}; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-access-inbound"; - /// - /// # #[tokio::main] async fn main() -> io::Result<()> { - /// let mut server = ServerOptions::new() - /// .access_inbound(false) - /// .create(PIPE_NAME)?; - /// - /// let mut client = ClientOptions::new() - /// .write(false) - /// .open(PIPE_NAME)?; - /// - /// server.connect().await?; - /// - /// let write = server.write_all(b"ping"); - /// - /// let mut buf = [0u8; 4]; - /// let read = client.read_exact(&mut buf); - /// - /// let ((), read) = tokio::try_join!(write, read)?; - /// - /// assert_eq!(read, 4); - /// assert_eq!(&buf[..], b"ping"); - /// # Ok(()) } - /// ``` - pub fn access_inbound(&mut self, allowed: bool) -> &mut Self { - self.access_inbound = allowed; - self - } - - /// The flow of data in the pipe goes from server to client only. - /// - /// This corresponds to setting [`PIPE_ACCESS_OUTBOUND`]. - /// - /// [`PIPE_ACCESS_OUTBOUND`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea#pipe_access_outbound - /// - /// # Errors - /// - /// Server side prevents connecting by denying outbound access, client - /// errors with [`std::io::ErrorKind::PermissionDenied`] when attempting to - /// create the connection. - /// - /// ``` - /// use std::io; - /// use tokio::net::windows::named_pipe::{ClientOptions, ServerOptions}; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-access-outbound-err1"; - /// - /// # #[tokio::main] async fn main() -> io::Result<()> { - /// let server = ServerOptions::new() - /// .access_outbound(false) - /// .create(PIPE_NAME)?; - /// - /// let e = ClientOptions::new() - /// .open(PIPE_NAME) - /// .unwrap_err(); - /// - /// assert_eq!(e.kind(), io::ErrorKind::PermissionDenied); - /// # Ok(()) } - /// ``` - /// - /// Disabling reading allows a client to connect, but attempting to read - /// will error with [`std::io::ErrorKind::PermissionDenied`]. - /// - /// ``` - /// use std::io; - /// use tokio::io::AsyncReadExt; - /// use tokio::net::windows::named_pipe::{ClientOptions, ServerOptions}; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-access-outbound-err2"; - /// - /// # #[tokio::main] async fn main() -> io::Result<()> { - /// let server = ServerOptions::new() - /// .access_outbound(false) - /// .create(PIPE_NAME)?; - /// - /// let mut client = ClientOptions::new() - /// .read(false) - /// .open(PIPE_NAME)?; - /// - /// server.connect().await?; - /// - /// let mut buf = [0u8; 4]; - /// let e = client.read(&mut buf).await.unwrap_err(); - /// assert_eq!(e.kind(), io::ErrorKind::PermissionDenied); - /// # Ok(()) } - /// ``` - /// - /// # Examples - /// - /// A unidirectional named pipe that only supports client-to-server - /// communication. - /// - /// ``` - /// use tokio::io::{AsyncReadExt, AsyncWriteExt}; - /// use tokio::net::windows::named_pipe::{ClientOptions, ServerOptions}; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-access-outbound"; - /// - /// # #[tokio::main] async fn main() -> std::io::Result<()> { - /// let mut server = ServerOptions::new() - /// .access_outbound(false) - /// .create(PIPE_NAME)?; - /// - /// let mut client = ClientOptions::new() - /// .read(false) - /// .open(PIPE_NAME)?; - /// - /// server.connect().await?; - /// - /// let write = client.write_all(b"ping"); - /// - /// let mut buf = [0u8; 4]; - /// let read = server.read_exact(&mut buf); - /// - /// let ((), read) = tokio::try_join!(write, read)?; - /// - /// println!("done reading and writing"); - /// - /// assert_eq!(read, 4); - /// assert_eq!(&buf[..], b"ping"); - /// # Ok(()) } - /// ``` - pub fn access_outbound(&mut self, allowed: bool) -> &mut Self { - self.access_outbound = allowed; - self - } - - /// If you attempt to create multiple instances of a pipe with this flag - /// set, creation of the first server instance succeeds, but creation of any - /// subsequent instances will fail with - /// [`std::io::ErrorKind::PermissionDenied`]. - /// - /// This option is intended to be used with servers that want to ensure that - /// they are the only process listening for clients on a given named pipe. - /// This is accomplished by enabling it for the first server instance - /// created in a process. - /// - /// This corresponds to setting [`FILE_FLAG_FIRST_PIPE_INSTANCE`]. - /// - /// # Errors - /// - /// If this option is set and more than one instance of the server for a - /// given named pipe exists, calling [`create`] will fail with - /// [`std::io::ErrorKind::PermissionDenied`]. - /// - /// ``` - /// use std::io; - /// use tokio::net::windows::named_pipe::ServerOptions; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-first-instance-error"; - /// - /// # #[tokio::main] async fn main() -> io::Result<()> { - /// let server1 = ServerOptions::new() - /// .first_pipe_instance(true) - /// .create(PIPE_NAME)?; - /// - /// // Second server errs, since it's not the first instance. - /// let e = ServerOptions::new() - /// .first_pipe_instance(true) - /// .create(PIPE_NAME) - /// .unwrap_err(); - /// - /// assert_eq!(e.kind(), io::ErrorKind::PermissionDenied); - /// # Ok(()) } - /// ``` - /// - /// # Examples - /// - /// ``` - /// use std::io; - /// use tokio::net::windows::named_pipe::ServerOptions; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-first-instance"; - /// - /// # #[tokio::main] async fn main() -> io::Result<()> { - /// let mut builder = ServerOptions::new(); - /// builder.first_pipe_instance(true); - /// - /// let server = builder.create(PIPE_NAME)?; - /// let e = builder.create(PIPE_NAME).unwrap_err(); - /// assert_eq!(e.kind(), io::ErrorKind::PermissionDenied); - /// drop(server); - /// - /// // OK: since, we've closed the other instance. - /// let _server2 = builder.create(PIPE_NAME)?; - /// # Ok(()) } - /// ``` - /// - /// [`create`]: ServerOptions::create - /// [`FILE_FLAG_FIRST_PIPE_INSTANCE`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea#pipe_first_pipe_instance - pub fn first_pipe_instance(&mut self, first: bool) -> &mut Self { - self.first_pipe_instance = first; - self - } - - /// Requests permission to modify the pipe's discretionary access control list. - /// - /// This corresponds to setting [`WRITE_DAC`] in dwOpenMode. - /// - /// # Examples - /// - /// ``` - /// use std::{io, os::windows::prelude::AsRawHandle, ptr}; - // - /// use tokio::net::windows::named_pipe::ServerOptions; - /// use windows_sys::{ - /// Win32::Foundation::ERROR_SUCCESS, - /// Win32::Security::DACL_SECURITY_INFORMATION, - /// Win32::Security::Authorization::{SetSecurityInfo, SE_KERNEL_OBJECT}, - /// }; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\write_dac_pipe"; - /// - /// # #[tokio::main] async fn main() -> io::Result<()> { - /// let mut pipe_template = ServerOptions::new(); - /// pipe_template.write_dac(true); - /// let pipe = pipe_template.create(PIPE_NAME)?; - /// - /// unsafe { - /// assert_eq!( - /// ERROR_SUCCESS, - /// SetSecurityInfo( - /// pipe.as_raw_handle() as _, - /// SE_KERNEL_OBJECT, - /// DACL_SECURITY_INFORMATION, - /// ptr::null_mut(), - /// ptr::null_mut(), - /// ptr::null_mut(), - /// ptr::null_mut(), - /// ) - /// ); - /// } - /// - /// # Ok(()) } - /// ``` - /// - /// ``` - /// use std::{io, os::windows::prelude::AsRawHandle, ptr}; - // - /// use tokio::net::windows::named_pipe::ServerOptions; - /// use windows_sys::{ - /// Win32::Foundation::ERROR_ACCESS_DENIED, - /// Win32::Security::DACL_SECURITY_INFORMATION, - /// Win32::Security::Authorization::{SetSecurityInfo, SE_KERNEL_OBJECT}, - /// }; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\write_dac_pipe_fail"; - /// - /// # #[tokio::main] async fn main() -> io::Result<()> { - /// let mut pipe_template = ServerOptions::new(); - /// pipe_template.write_dac(false); - /// let pipe = pipe_template.create(PIPE_NAME)?; - /// - /// unsafe { - /// assert_eq!( - /// ERROR_ACCESS_DENIED, - /// SetSecurityInfo( - /// pipe.as_raw_handle() as _, - /// SE_KERNEL_OBJECT, - /// DACL_SECURITY_INFORMATION, - /// ptr::null_mut(), - /// ptr::null_mut(), - /// ptr::null_mut(), - /// ptr::null_mut(), - /// ) - /// ); - /// } - /// - /// # Ok(()) } - /// ``` - /// - /// [`WRITE_DAC`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea - pub fn write_dac(&mut self, requested: bool) -> &mut Self { - self.write_dac = requested; - self - } - - /// Requests permission to modify the pipe's owner. - /// - /// This corresponds to setting [`WRITE_OWNER`] in dwOpenMode. - /// - /// [`WRITE_OWNER`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea - pub fn write_owner(&mut self, requested: bool) -> &mut Self { - self.write_owner = requested; - self - } - - /// Requests permission to modify the pipe's system access control list. - /// - /// This corresponds to setting [`ACCESS_SYSTEM_SECURITY`] in dwOpenMode. - /// - /// [`ACCESS_SYSTEM_SECURITY`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea - pub fn access_system_security(&mut self, requested: bool) -> &mut Self { - self.access_system_security = requested; - self - } - - /// Indicates whether this server can accept remote clients or not. Remote - /// clients are disabled by default. - /// - /// This corresponds to setting [`PIPE_REJECT_REMOTE_CLIENTS`]. - /// - /// [`PIPE_REJECT_REMOTE_CLIENTS`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea#pipe_reject_remote_clients - pub fn reject_remote_clients(&mut self, reject: bool) -> &mut Self { - self.reject_remote_clients = reject; - self - } - - /// The maximum number of instances that can be created for this pipe. The - /// first instance of the pipe can specify this value; the same number must - /// be specified for other instances of the pipe. Acceptable values are in - /// the range 1 through 254. The default value is unlimited. - /// - /// This corresponds to specifying [`nMaxInstances`]. - /// - /// [`nMaxInstances`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea - /// - /// # Errors - /// - /// The same numbers of `max_instances` have to be used by all servers. Any - /// additional servers trying to be built which uses a mismatching value - /// might error. - /// - /// ``` - /// use std::io; - /// use tokio::net::windows::named_pipe::{ServerOptions, ClientOptions}; - /// use windows_sys::Win32::Foundation::ERROR_PIPE_BUSY; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-max-instances"; - /// - /// # #[tokio::main] async fn main() -> io::Result<()> { - /// let mut server = ServerOptions::new(); - /// server.max_instances(2); - /// - /// let s1 = server.create(PIPE_NAME)?; - /// let c1 = ClientOptions::new().open(PIPE_NAME); - /// - /// let s2 = server.create(PIPE_NAME)?; - /// let c2 = ClientOptions::new().open(PIPE_NAME); - /// - /// // Too many servers! - /// let e = server.create(PIPE_NAME).unwrap_err(); - /// assert_eq!(e.raw_os_error(), Some(ERROR_PIPE_BUSY as i32)); - /// - /// // Still too many servers even if we specify a higher value! - /// let e = server.max_instances(100).create(PIPE_NAME).unwrap_err(); - /// assert_eq!(e.raw_os_error(), Some(ERROR_PIPE_BUSY as i32)); - /// # Ok(()) } - /// ``` - /// - /// # Panics - /// - /// This function will panic if more than 254 instances are specified. If - /// you do not wish to set an instance limit, leave it unspecified. - /// - /// ```should_panic - /// use tokio::net::windows::named_pipe::ServerOptions; - /// - /// # #[tokio::main] async fn main() -> std::io::Result<()> { - /// let builder = ServerOptions::new().max_instances(255); - /// # Ok(()) } - /// ``` - #[track_caller] - pub fn max_instances(&mut self, instances: usize) -> &mut Self { - assert!(instances < 255, "cannot specify more than 254 instances"); - self.max_instances = instances as u32; - self - } - - /// The number of bytes to reserve for the output buffer. - /// - /// This corresponds to specifying [`nOutBufferSize`]. - /// - /// [`nOutBufferSize`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea - pub fn out_buffer_size(&mut self, buffer: u32) -> &mut Self { - self.out_buffer_size = buffer; - self - } - - /// The number of bytes to reserve for the input buffer. - /// - /// This corresponds to specifying [`nInBufferSize`]. - /// - /// [`nInBufferSize`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea - pub fn in_buffer_size(&mut self, buffer: u32) -> &mut Self { - self.in_buffer_size = buffer; - self - } - - /// Creates the named pipe identified by `addr` for use as a server. - /// - /// This uses the [`CreateNamedPipe`] function. - /// - /// [`CreateNamedPipe`]: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-createnamedpipea - /// - /// # Errors - /// - /// This errors if called outside of a [Tokio Runtime], or in a runtime that - /// has not [enabled I/O], or if any OS-specific I/O errors occur. - /// - /// [Tokio Runtime]: crate::runtime::Runtime - /// [enabled I/O]: crate::runtime::Builder::enable_io - /// - /// # Examples - /// - /// ``` - /// use tokio::net::windows::named_pipe::ServerOptions; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-create"; - /// - /// # #[tokio::main] async fn main() -> std::io::Result<()> { - /// let server = ServerOptions::new().create(PIPE_NAME)?; - /// # Ok(()) } - /// ``` - pub fn create(&self, addr: impl AsRef) -> io::Result { - // Safety: We're calling create_with_security_attributes_raw w/ a null - // pointer which disables it. - unsafe { self.create_with_security_attributes_raw(addr, ptr::null_mut()) } - } - - /// Creates the named pipe identified by `addr` for use as a server. - /// - /// This is the same as [`create`] except that it supports providing the raw - /// pointer to a structure of [`SECURITY_ATTRIBUTES`] which will be passed - /// as the `lpSecurityAttributes` argument to [`CreateFile`]. - /// - /// # Errors - /// - /// This errors if called outside of a [Tokio Runtime], or in a runtime that - /// has not [enabled I/O], or if any OS-specific I/O errors occur. - /// - /// [Tokio Runtime]: crate::runtime::Runtime - /// [enabled I/O]: crate::runtime::Builder::enable_io - /// - /// # Safety - /// - /// The `attrs` argument must either be null or point at a valid instance of - /// the [`SECURITY_ATTRIBUTES`] structure. If the argument is null, the - /// behavior is identical to calling the [`create`] method. - /// - /// [`create`]: ServerOptions::create - /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew - /// [`SECURITY_ATTRIBUTES`]: https://docs.rs/windows-sys/latest/windows_sys/Win32/Security/struct.SECURITY_ATTRIBUTES.html - pub unsafe fn create_with_security_attributes_raw( - &self, - addr: impl AsRef, - attrs: *mut c_void, - ) -> io::Result { - let addr = encode_addr(addr); - - let pipe_mode = { - let mut mode = if matches!(self.pipe_mode, PipeMode::Message) { - windows_sys::PIPE_TYPE_MESSAGE | windows_sys::PIPE_READMODE_MESSAGE - } else { - windows_sys::PIPE_TYPE_BYTE | windows_sys::PIPE_READMODE_BYTE - }; - if self.reject_remote_clients { - mode |= windows_sys::PIPE_REJECT_REMOTE_CLIENTS; - } else { - mode |= windows_sys::PIPE_ACCEPT_REMOTE_CLIENTS; - } - mode - }; - let open_mode = { - let mut mode = windows_sys::FILE_FLAG_OVERLAPPED; - if self.access_inbound { - mode |= windows_sys::PIPE_ACCESS_INBOUND; - } - if self.access_outbound { - mode |= windows_sys::PIPE_ACCESS_OUTBOUND; - } - if self.first_pipe_instance { - mode |= windows_sys::FILE_FLAG_FIRST_PIPE_INSTANCE; - } - if self.write_dac { - mode |= windows_sys::WRITE_DAC; - } - if self.write_owner { - mode |= windows_sys::WRITE_OWNER; - } - if self.access_system_security { - mode |= windows_sys::ACCESS_SYSTEM_SECURITY; - } - mode - }; - - let h = windows_sys::CreateNamedPipeW( - addr.as_ptr(), - open_mode, - pipe_mode, - self.max_instances, - self.out_buffer_size, - self.in_buffer_size, - self.default_timeout, - attrs as *mut _, - ); - - if h == windows_sys::INVALID_HANDLE_VALUE { - return Err(io::Error::last_os_error()); - } - - NamedPipeServer::from_raw_handle(h as _) - } -} - -/// A builder suitable for building and interacting with named pipes from the -/// client side. -/// -/// See [`ClientOptions::open`]. -#[derive(Debug, Clone)] -pub struct ClientOptions { - generic_read: bool, - generic_write: bool, - security_qos_flags: u32, - pipe_mode: PipeMode, -} - -impl ClientOptions { - /// Creates a new named pipe builder with the default settings. - /// - /// ``` - /// use tokio::net::windows::named_pipe::{ServerOptions, ClientOptions}; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\tokio-named-pipe-client-new"; - /// - /// # #[tokio::main] async fn main() -> std::io::Result<()> { - /// // Server must be created in order for the client creation to succeed. - /// let server = ServerOptions::new().create(PIPE_NAME)?; - /// let client = ClientOptions::new().open(PIPE_NAME)?; - /// # Ok(()) } - /// ``` - pub fn new() -> Self { - Self { - generic_read: true, - generic_write: true, - security_qos_flags: windows_sys::SECURITY_IDENTIFICATION - | windows_sys::SECURITY_SQOS_PRESENT, - pipe_mode: PipeMode::Byte, - } - } - - /// If the client supports reading data. This is enabled by default. - /// - /// This corresponds to setting [`GENERIC_READ`] in the call to [`CreateFile`]. - /// - /// [`GENERIC_READ`]: https://docs.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights - /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew - pub fn read(&mut self, allowed: bool) -> &mut Self { - self.generic_read = allowed; - self - } - - /// If the created pipe supports writing data. This is enabled by default. - /// - /// This corresponds to setting [`GENERIC_WRITE`] in the call to [`CreateFile`]. - /// - /// [`GENERIC_WRITE`]: https://docs.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights - /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew - pub fn write(&mut self, allowed: bool) -> &mut Self { - self.generic_write = allowed; - self - } - - /// Sets qos flags which are combined with other flags and attributes in the - /// call to [`CreateFile`]. - /// - /// By default `security_qos_flags` is set to [`SECURITY_IDENTIFICATION`], - /// calling this function would override that value completely with the - /// argument specified. - /// - /// When `security_qos_flags` is not set, a malicious program can gain the - /// elevated privileges of a privileged Rust process when it allows opening - /// user-specified paths, by tricking it into opening a named pipe. So - /// arguably `security_qos_flags` should also be set when opening arbitrary - /// paths. However the bits can then conflict with other flags, specifically - /// `FILE_FLAG_OPEN_NO_RECALL`. - /// - /// For information about possible values, see [Impersonation Levels] on the - /// Windows Dev Center site. The `SECURITY_SQOS_PRESENT` flag is set - /// automatically when using this method. - /// - /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea - /// [`SECURITY_IDENTIFICATION`]: https://docs.rs/windows-sys/latest/windows_sys/Win32/Storage/FileSystem/constant.SECURITY_IDENTIFICATION.html - /// [Impersonation Levels]: https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level - pub fn security_qos_flags(&mut self, flags: u32) -> &mut Self { - // See: https://github.com/rust-lang/rust/pull/58216 - self.security_qos_flags = flags | windows_sys::SECURITY_SQOS_PRESENT; - self - } - - /// The pipe mode. - /// - /// The default pipe mode is [`PipeMode::Byte`]. See [`PipeMode`] for - /// documentation of what each mode means. - pub fn pipe_mode(&mut self, pipe_mode: PipeMode) -> &mut Self { - self.pipe_mode = pipe_mode; - self - } - - /// Opens the named pipe identified by `addr`. - /// - /// This opens the client using [`CreateFile`] with the - /// `dwCreationDisposition` option set to `OPEN_EXISTING`. - /// - /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea - /// - /// # Errors - /// - /// This errors if called outside of a [Tokio Runtime], or in a runtime that - /// has not [enabled I/O], or if any OS-specific I/O errors occur. - /// - /// There are a few errors you need to take into account when creating a - /// named pipe on the client side: - /// - /// * [`std::io::ErrorKind::NotFound`] - This indicates that the named pipe - /// does not exist. Presumably the server is not up. - /// * [`ERROR_PIPE_BUSY`] - This error is raised when the named pipe exists, - /// but the server is not currently waiting for a connection. Please see the - /// examples for how to check for this error. - /// - /// [`ERROR_PIPE_BUSY`]: https://docs.rs/windows-sys/latest/windows_sys/Win32/Foundation/constant.ERROR_PIPE_BUSY.html - /// [enabled I/O]: crate::runtime::Builder::enable_io - /// [Tokio Runtime]: crate::runtime::Runtime - /// - /// A connect loop that waits until a pipe becomes available looks like - /// this: - /// - /// ```no_run - /// use std::time::Duration; - /// use tokio::net::windows::named_pipe::ClientOptions; - /// use tokio::time; - /// use windows_sys::Win32::Foundation::ERROR_PIPE_BUSY; - /// - /// const PIPE_NAME: &str = r"\\.\pipe\mynamedpipe"; - /// - /// # #[tokio::main] async fn main() -> std::io::Result<()> { - /// let client = loop { - /// match ClientOptions::new().open(PIPE_NAME) { - /// Ok(client) => break client, - /// Err(e) if e.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => (), - /// Err(e) => return Err(e), - /// } - /// - /// time::sleep(Duration::from_millis(50)).await; - /// }; - /// - /// // use the connected client. - /// # Ok(()) } - /// ``` - pub fn open(&self, addr: impl AsRef) -> io::Result { - // Safety: We're calling open_with_security_attributes_raw w/ a null - // pointer which disables it. - unsafe { self.open_with_security_attributes_raw(addr, ptr::null_mut()) } - } - - /// Opens the named pipe identified by `addr`. - /// - /// This is the same as [`open`] except that it supports providing the raw - /// pointer to a structure of [`SECURITY_ATTRIBUTES`] which will be passed - /// as the `lpSecurityAttributes` argument to [`CreateFile`]. - /// - /// # Safety - /// - /// The `attrs` argument must either be null or point at a valid instance of - /// the [`SECURITY_ATTRIBUTES`] structure. If the argument is null, the - /// behavior is identical to calling the [`open`] method. - /// - /// [`open`]: ClientOptions::open - /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew - /// [`SECURITY_ATTRIBUTES`]: https://docs.rs/windows-sys/latest/windows_sys/Win32/Security/struct.SECURITY_ATTRIBUTES.html - pub unsafe fn open_with_security_attributes_raw( - &self, - addr: impl AsRef, - attrs: *mut c_void, - ) -> io::Result { - let addr = encode_addr(addr); - - let desired_access = { - let mut access = 0; - if self.generic_read { - access |= windows_sys::GENERIC_READ; - } - if self.generic_write { - access |= windows_sys::GENERIC_WRITE; - } - access - }; - - // NB: We could use a platform specialized `OpenOptions` here, but since - // we have access to windows_sys it ultimately doesn't hurt to use - // `CreateFile` explicitly since it allows the use of our already - // well-structured wide `addr` to pass into CreateFileW. - let h = windows_sys::CreateFileW( - addr.as_ptr(), - desired_access, - 0, - attrs as *mut _, - windows_sys::OPEN_EXISTING, - self.get_flags(), - 0, - ); - - if h == windows_sys::INVALID_HANDLE_VALUE { - return Err(io::Error::last_os_error()); - } - - if matches!(self.pipe_mode, PipeMode::Message) { - let mode = windows_sys::PIPE_READMODE_MESSAGE; - let result = - windows_sys::SetNamedPipeHandleState(h, &mode, ptr::null_mut(), ptr::null_mut()); - - if result == 0 { - return Err(io::Error::last_os_error()); - } - } - - NamedPipeClient::from_raw_handle(h as _) - } - - fn get_flags(&self) -> u32 { - self.security_qos_flags | windows_sys::FILE_FLAG_OVERLAPPED - } -} - -/// The pipe mode of a named pipe. -/// -/// Set through [`ServerOptions::pipe_mode`]. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum PipeMode { - /// Data is written to the pipe as a stream of bytes. The pipe does not - /// distinguish bytes written during different write operations. - /// - /// Corresponds to [`PIPE_TYPE_BYTE`]. - /// - /// [`PIPE_TYPE_BYTE`]: https://docs.rs/windows-sys/latest/windows_sys/Win32/System/Pipes/constant.PIPE_TYPE_BYTE.html - Byte, - /// Data is written to the pipe as a stream of messages. The pipe treats the - /// bytes written during each write operation as a message unit. Any reading - /// on a named pipe returns [`ERROR_MORE_DATA`] when a message is not read - /// completely. - /// - /// Corresponds to [`PIPE_TYPE_MESSAGE`]. - /// - /// [`ERROR_MORE_DATA`]: https://docs.rs/windows-sys/latest/windows_sys/Win32/Foundation/constant.ERROR_MORE_DATA.html - /// [`PIPE_TYPE_MESSAGE`]: https://docs.rs/windows-sys/latest/windows_sys/Win32/System/Pipes/constant.PIPE_TYPE_MESSAGE.html - Message, -} - -/// Indicates the end of a named pipe. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum PipeEnd { - /// The named pipe refers to the client end of a named pipe instance. - /// - /// Corresponds to [`PIPE_CLIENT_END`]. - /// - /// [`PIPE_CLIENT_END`]: https://docs.rs/windows-sys/latest/windows_sys/Win32/System/Pipes/constant.PIPE_CLIENT_END.html - Client, - /// The named pipe refers to the server end of a named pipe instance. - /// - /// Corresponds to [`PIPE_SERVER_END`]. - /// - /// [`PIPE_SERVER_END`]: https://docs.rs/windows-sys/latest/windows_sys/Win32/System/Pipes/constant.PIPE_SERVER_END.html - Server, -} - -/// Information about a named pipe. -/// -/// Constructed through [`NamedPipeServer::info`] or [`NamedPipeClient::info`]. -#[derive(Debug)] -#[non_exhaustive] -pub struct PipeInfo { - /// Indicates the mode of a named pipe. - pub mode: PipeMode, - /// Indicates the end of a named pipe. - pub end: PipeEnd, - /// The maximum number of instances that can be created for this pipe. - pub max_instances: u32, - /// The number of bytes to reserve for the output buffer. - pub out_buffer_size: u32, - /// The number of bytes to reserve for the input buffer. - pub in_buffer_size: u32, -} - -/// Encodes an address so that it is a null-terminated wide string. -fn encode_addr(addr: impl AsRef) -> Box<[u16]> { - let len = addr.as_ref().encode_wide().count(); - let mut vec = Vec::with_capacity(len + 1); - vec.extend(addr.as_ref().encode_wide()); - vec.push(0); - vec.into_boxed_slice() -} - -/// Internal function to get the info out of a raw named pipe. -unsafe fn named_pipe_info(handle: RawHandle) -> io::Result { - let mut flags = 0; - let mut out_buffer_size = 0; - let mut in_buffer_size = 0; - let mut max_instances = 0; - - let result = windows_sys::GetNamedPipeInfo( - handle as _, - &mut flags, - &mut out_buffer_size, - &mut in_buffer_size, - &mut max_instances, - ); - - if result == 0 { - return Err(io::Error::last_os_error()); - } - - let mut end = PipeEnd::Client; - let mut mode = PipeMode::Byte; - - if flags & windows_sys::PIPE_SERVER_END != 0 { - end = PipeEnd::Server; - } - - if flags & windows_sys::PIPE_TYPE_MESSAGE != 0 { - mode = PipeMode::Message; - } - - Ok(PipeInfo { - end, - mode, - out_buffer_size, - in_buffer_size, - max_instances, - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/process/kill.rs s390-tools-2.33.1/rust-vendor/tokio/src/process/kill.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/process/kill.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/process/kill.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,13 +0,0 @@ -use std::io; - -/// An interface for killing a running process. -pub(crate) trait Kill { - /// Forcefully kills the process. - fn kill(&mut self) -> io::Result<()>; -} - -impl Kill for &mut T { - fn kill(&mut self) -> io::Result<()> { - (**self).kill() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/process/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/process/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/process/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/process/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1678 +0,0 @@ -//! An implementation of asynchronous process management for Tokio. -//! -//! This module provides a [`Command`] struct that imitates the interface of the -//! [`std::process::Command`] type in the standard library, but provides asynchronous versions of -//! functions that create processes. These functions (`spawn`, `status`, `output` and their -//! variants) return "future aware" types that interoperate with Tokio. The asynchronous process -//! support is provided through signal handling on Unix and system APIs on Windows. -//! -//! [`std::process::Command`]: std::process::Command -//! -//! # Examples -//! -//! Here's an example program which will spawn `echo hello world` and then wait -//! for it complete. -//! -//! ```no_run -//! use tokio::process::Command; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! // The usage is similar as with the standard library's `Command` type -//! let mut child = Command::new("echo") -//! .arg("hello") -//! .arg("world") -//! .spawn() -//! .expect("failed to spawn"); -//! -//! // Await until the command completes -//! let status = child.wait().await?; -//! println!("the command exited with: {}", status); -//! Ok(()) -//! } -//! ``` -//! -//! Next, let's take a look at an example where we not only spawn `echo hello -//! world` but we also capture its output. -//! -//! ```no_run -//! use tokio::process::Command; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! // Like above, but use `output` which returns a future instead of -//! // immediately returning the `Child`. -//! let output = Command::new("echo").arg("hello").arg("world") -//! .output(); -//! -//! let output = output.await?; -//! -//! assert!(output.status.success()); -//! assert_eq!(output.stdout, b"hello world\n"); -//! Ok(()) -//! } -//! ``` -//! -//! We can also read input line by line. -//! -//! ```no_run -//! use tokio::io::{BufReader, AsyncBufReadExt}; -//! use tokio::process::Command; -//! -//! use std::process::Stdio; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let mut cmd = Command::new("cat"); -//! -//! // Specify that we want the command's standard output piped back to us. -//! // By default, standard input/output/error will be inherited from the -//! // current process (for example, this means that standard input will -//! // come from the keyboard and standard output/error will go directly to -//! // the terminal if this process is invoked from the command line). -//! cmd.stdout(Stdio::piped()); -//! -//! let mut child = cmd.spawn() -//! .expect("failed to spawn command"); -//! -//! let stdout = child.stdout.take() -//! .expect("child did not have a handle to stdout"); -//! -//! let mut reader = BufReader::new(stdout).lines(); -//! -//! // Ensure the child process is spawned in the runtime so it can -//! // make progress on its own while we await for any output. -//! tokio::spawn(async move { -//! let status = child.wait().await -//! .expect("child process encountered an error"); -//! -//! println!("child status was: {}", status); -//! }); -//! -//! while let Some(line) = reader.next_line().await? { -//! println!("Line: {}", line); -//! } -//! -//! Ok(()) -//! } -//! ``` -//! -//! Here is another example using `sort` writing into the child process -//! standard input, capturing the output of the sorted text. -//! -//! ```no_run -//! use tokio::io::AsyncWriteExt; -//! use tokio::process::Command; -//! -//! use std::process::Stdio; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let mut cmd = Command::new("sort"); -//! -//! // Specifying that we want pipe both the output and the input. -//! // Similarly to capturing the output, by configuring the pipe -//! // to stdin it can now be used as an asynchronous writer. -//! cmd.stdout(Stdio::piped()); -//! cmd.stdin(Stdio::piped()); -//! -//! let mut child = cmd.spawn().expect("failed to spawn command"); -//! -//! // These are the animals we want to sort -//! let animals: &[&str] = &["dog", "bird", "frog", "cat", "fish"]; -//! -//! let mut stdin = child -//! .stdin -//! .take() -//! .expect("child did not have a handle to stdin"); -//! -//! // Write our animals to the child process -//! // Note that the behavior of `sort` is to buffer _all input_ before writing any output. -//! // In the general sense, it is recommended to write to the child in a separate task as -//! // awaiting its exit (or output) to avoid deadlocks (for example, the child tries to write -//! // some output but gets stuck waiting on the parent to read from it, meanwhile the parent -//! // is stuck waiting to write its input completely before reading the output). -//! stdin -//! .write(animals.join("\n").as_bytes()) -//! .await -//! .expect("could not write to stdin"); -//! -//! // We drop the handle here which signals EOF to the child process. -//! // This tells the child process that it there is no more data on the pipe. -//! drop(stdin); -//! -//! let op = child.wait_with_output().await?; -//! -//! // Results should come back in sorted order -//! assert_eq!(op.stdout, "bird\ncat\ndog\nfish\nfrog\n".as_bytes()); -//! -//! Ok(()) -//! } -//! ``` -//! -//! With some coordination, we can also pipe the output of one command into -//! another. -//! -//! ```no_run -//! use tokio::join; -//! use tokio::process::Command; -//! use std::process::Stdio; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let mut echo = Command::new("echo") -//! .arg("hello world!") -//! .stdout(Stdio::piped()) -//! .spawn() -//! .expect("failed to spawn echo"); -//! -//! let tr_stdin: Stdio = echo -//! .stdout -//! .take() -//! .unwrap() -//! .try_into() -//! .expect("failed to convert to Stdio"); -//! -//! let tr = Command::new("tr") -//! .arg("a-z") -//! .arg("A-Z") -//! .stdin(tr_stdin) -//! .stdout(Stdio::piped()) -//! .spawn() -//! .expect("failed to spawn tr"); -//! -//! let (echo_result, tr_output) = join!(echo.wait(), tr.wait_with_output()); -//! -//! assert!(echo_result.unwrap().success()); -//! -//! let tr_output = tr_output.expect("failed to await tr"); -//! assert!(tr_output.status.success()); -//! -//! assert_eq!(tr_output.stdout, b"HELLO WORLD!\n"); -//! -//! Ok(()) -//! } -//! ``` -//! -//! # Caveats -//! -//! ## Dropping/Cancellation -//! -//! Similar to the behavior to the standard library, and unlike the futures -//! paradigm of dropping-implies-cancellation, a spawned process will, by -//! default, continue to execute even after the `Child` handle has been dropped. -//! -//! The [`Command::kill_on_drop`] method can be used to modify this behavior -//! and kill the child process if the `Child` wrapper is dropped before it -//! has exited. -//! -//! ## Unix Processes -//! -//! On Unix platforms processes must be "reaped" by their parent process after -//! they have exited in order to release all OS resources. A child process which -//! has exited, but has not yet been reaped by its parent is considered a "zombie" -//! process. Such processes continue to count against limits imposed by the system, -//! and having too many zombie processes present can prevent additional processes -//! from being spawned. -//! -//! The tokio runtime will, on a best-effort basis, attempt to reap and clean up -//! any process which it has spawned. No additional guarantees are made with regard to -//! how quickly or how often this procedure will take place. -//! -//! It is recommended to avoid dropping a [`Child`] process handle before it has been -//! fully `await`ed if stricter cleanup guarantees are required. -//! -//! [`Command`]: crate::process::Command -//! [`Command::kill_on_drop`]: crate::process::Command::kill_on_drop -//! [`Child`]: crate::process::Child - -#[path = "unix/mod.rs"] -#[cfg(unix)] -mod imp; - -#[cfg(unix)] -pub(crate) mod unix { - pub(crate) use super::imp::*; -} - -#[path = "windows.rs"] -#[cfg(windows)] -mod imp; - -mod kill; - -use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; -use crate::process::kill::Kill; - -use std::ffi::OsStr; -use std::future::Future; -use std::io; -use std::path::Path; -use std::pin::Pin; -use std::process::{Command as StdCommand, ExitStatus, Output, Stdio}; -use std::task::Context; -use std::task::Poll; - -#[cfg(unix)] -use std::os::unix::process::CommandExt; -#[cfg(windows)] -use std::os::windows::process::CommandExt; - -cfg_windows! { - use crate::os::windows::io::{AsRawHandle, RawHandle}; -} - -/// This structure mimics the API of [`std::process::Command`] found in the standard library, but -/// replaces functions that create a process with an asynchronous variant. The main provided -/// asynchronous functions are [spawn](Command::spawn), [status](Command::status), and -/// [output](Command::output). -/// -/// `Command` uses asynchronous versions of some `std` types (for example [`Child`]). -/// -/// [`std::process::Command`]: std::process::Command -/// [`Child`]: struct@Child -#[derive(Debug)] -pub struct Command { - std: StdCommand, - kill_on_drop: bool, -} - -pub(crate) struct SpawnedChild { - child: imp::Child, - stdin: Option, - stdout: Option, - stderr: Option, -} - -impl Command { - /// Constructs a new `Command` for launching the program at - /// path `program`, with the following default configuration: - /// - /// * No arguments to the program - /// * Inherit the current process's environment - /// * Inherit the current process's working directory - /// * Inherit stdin/stdout/stderr for `spawn` or `status`, but create pipes for `output` - /// - /// Builder methods are provided to change these defaults and - /// otherwise configure the process. - /// - /// If `program` is not an absolute path, the `PATH` will be searched in - /// an OS-defined way. - /// - /// The search path to be used may be controlled by setting the - /// `PATH` environment variable on the Command, - /// but this has some implementation limitations on Windows - /// (see issue [rust-lang/rust#37519]). - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// let mut command = Command::new("sh"); - /// # let _ = command.output(); // assert borrow checker - /// ``` - /// - /// [rust-lang/rust#37519]: https://github.com/rust-lang/rust/issues/37519 - pub fn new>(program: S) -> Command { - Self::from(StdCommand::new(program)) - } - - /// Cheaply convert to a `&std::process::Command` for places where the type from the standard - /// library is expected. - pub fn as_std(&self) -> &StdCommand { - &self.std - } - - /// Adds an argument to pass to the program. - /// - /// Only one argument can be passed per use. So instead of: - /// - /// ```no_run - /// let mut command = tokio::process::Command::new("sh"); - /// command.arg("-C /path/to/repo"); - /// - /// # let _ = command.output(); // assert borrow checker - /// ``` - /// - /// usage would be: - /// - /// ```no_run - /// let mut command = tokio::process::Command::new("sh"); - /// command.arg("-C"); - /// command.arg("/path/to/repo"); - /// - /// # let _ = command.output(); // assert borrow checker - /// ``` - /// - /// To pass multiple arguments see [`args`]. - /// - /// [`args`]: method@Self::args - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// # async fn test() { // allow using await - /// use tokio::process::Command; - /// - /// let output = Command::new("ls") - /// .arg("-l") - /// .arg("-a") - /// .output().await.unwrap(); - /// # } - /// - /// ``` - pub fn arg>(&mut self, arg: S) -> &mut Command { - self.std.arg(arg); - self - } - - /// Adds multiple arguments to pass to the program. - /// - /// To pass a single argument see [`arg`]. - /// - /// [`arg`]: method@Self::arg - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// # async fn test() { // allow using await - /// use tokio::process::Command; - /// - /// let output = Command::new("ls") - /// .args(&["-l", "-a"]) - /// .output().await.unwrap(); - /// # } - /// ``` - pub fn args(&mut self, args: I) -> &mut Command - where - I: IntoIterator, - S: AsRef, - { - self.std.args(args); - self - } - - cfg_windows! { - /// Append literal text to the command line without any quoting or escaping. - /// - /// This is useful for passing arguments to `cmd.exe /c`, which doesn't follow - /// `CommandLineToArgvW` escaping rules. - pub fn raw_arg>(&mut self, text_to_append_as_is: S) -> &mut Command { - self.std.raw_arg(text_to_append_as_is); - self - } - } - - /// Inserts or updates an environment variable mapping. - /// - /// Note that environment variable names are case-insensitive (but case-preserving) on Windows, - /// and case-sensitive on all other platforms. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// # async fn test() { // allow using await - /// use tokio::process::Command; - /// - /// let output = Command::new("ls") - /// .env("PATH", "/bin") - /// .output().await.unwrap(); - /// # } - /// ``` - pub fn env(&mut self, key: K, val: V) -> &mut Command - where - K: AsRef, - V: AsRef, - { - self.std.env(key, val); - self - } - - /// Adds or updates multiple environment variable mappings. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// # async fn test() { // allow using await - /// use tokio::process::Command; - /// use std::process::{Stdio}; - /// use std::env; - /// use std::collections::HashMap; - /// - /// let filtered_env : HashMap = - /// env::vars().filter(|&(ref k, _)| - /// k == "TERM" || k == "TZ" || k == "LANG" || k == "PATH" - /// ).collect(); - /// - /// let output = Command::new("printenv") - /// .stdin(Stdio::null()) - /// .stdout(Stdio::inherit()) - /// .env_clear() - /// .envs(&filtered_env) - /// .output().await.unwrap(); - /// # } - /// ``` - pub fn envs(&mut self, vars: I) -> &mut Command - where - I: IntoIterator, - K: AsRef, - V: AsRef, - { - self.std.envs(vars); - self - } - - /// Removes an environment variable mapping. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// # async fn test() { // allow using await - /// use tokio::process::Command; - /// - /// let output = Command::new("ls") - /// .env_remove("PATH") - /// .output().await.unwrap(); - /// # } - /// ``` - pub fn env_remove>(&mut self, key: K) -> &mut Command { - self.std.env_remove(key); - self - } - - /// Clears the entire environment map for the child process. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// # async fn test() { // allow using await - /// use tokio::process::Command; - /// - /// let output = Command::new("ls") - /// .env_clear() - /// .output().await.unwrap(); - /// # } - /// ``` - pub fn env_clear(&mut self) -> &mut Command { - self.std.env_clear(); - self - } - - /// Sets the working directory for the child process. - /// - /// # Platform-specific behavior - /// - /// If the program path is relative (e.g., `"./script.sh"`), it's ambiguous - /// whether it should be interpreted relative to the parent's working - /// directory or relative to `current_dir`. The behavior in this case is - /// platform specific and unstable, and it's recommended to use - /// [`canonicalize`] to get an absolute program path instead. - /// - /// [`canonicalize`]: crate::fs::canonicalize() - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// # async fn test() { // allow using await - /// use tokio::process::Command; - /// - /// let output = Command::new("ls") - /// .current_dir("/bin") - /// .output().await.unwrap(); - /// # } - /// ``` - pub fn current_dir>(&mut self, dir: P) -> &mut Command { - self.std.current_dir(dir); - self - } - - /// Sets configuration for the child process's standard input (stdin) handle. - /// - /// Defaults to [`inherit`] when used with `spawn` or `status`, and - /// defaults to [`piped`] when used with `output`. - /// - /// [`inherit`]: std::process::Stdio::inherit - /// [`piped`]: std::process::Stdio::piped - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// # async fn test() { // allow using await - /// use std::process::{Stdio}; - /// use tokio::process::Command; - /// - /// let output = Command::new("ls") - /// .stdin(Stdio::null()) - /// .output().await.unwrap(); - /// # } - /// ``` - pub fn stdin>(&mut self, cfg: T) -> &mut Command { - self.std.stdin(cfg); - self - } - - /// Sets configuration for the child process's standard output (stdout) handle. - /// - /// Defaults to [`inherit`] when used with `spawn` or `status`, and - /// defaults to [`piped`] when used with `output`. - /// - /// [`inherit`]: std::process::Stdio::inherit - /// [`piped`]: std::process::Stdio::piped - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// # async fn test() { // allow using await - /// use tokio::process::Command; - /// use std::process::Stdio; - /// - /// let output = Command::new("ls") - /// .stdout(Stdio::null()) - /// .output().await.unwrap(); - /// # } - /// ``` - pub fn stdout>(&mut self, cfg: T) -> &mut Command { - self.std.stdout(cfg); - self - } - - /// Sets configuration for the child process's standard error (stderr) handle. - /// - /// Defaults to [`inherit`] when used with `spawn` or `status`, and - /// defaults to [`piped`] when used with `output`. - /// - /// [`inherit`]: std::process::Stdio::inherit - /// [`piped`]: std::process::Stdio::piped - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// # async fn test() { // allow using await - /// use tokio::process::Command; - /// use std::process::{Stdio}; - /// - /// let output = Command::new("ls") - /// .stderr(Stdio::null()) - /// .output().await.unwrap(); - /// # } - /// ``` - pub fn stderr>(&mut self, cfg: T) -> &mut Command { - self.std.stderr(cfg); - self - } - - /// Controls whether a `kill` operation should be invoked on a spawned child - /// process when its corresponding `Child` handle is dropped. - /// - /// By default, this value is assumed to be `false`, meaning the next spawned - /// process will not be killed on drop, similar to the behavior of the standard - /// library. - /// - /// # Caveats - /// - /// On Unix platforms processes must be "reaped" by their parent process after - /// they have exited in order to release all OS resources. A child process which - /// has exited, but has not yet been reaped by its parent is considered a "zombie" - /// process. Such processes continue to count against limits imposed by the system, - /// and having too many zombie processes present can prevent additional processes - /// from being spawned. - /// - /// Although issuing a `kill` signal to the child process is a synchronous - /// operation, the resulting zombie process cannot be `.await`ed inside of the - /// destructor to avoid blocking other tasks. The tokio runtime will, on a - /// best-effort basis, attempt to reap and clean up such processes in the - /// background, but no additional guarantees are made with regard to - /// how quickly or how often this procedure will take place. - /// - /// If stronger guarantees are required, it is recommended to avoid dropping - /// a [`Child`] handle where possible, and instead utilize `child.wait().await` - /// or `child.kill().await` where possible. - pub fn kill_on_drop(&mut self, kill_on_drop: bool) -> &mut Command { - self.kill_on_drop = kill_on_drop; - self - } - - cfg_windows! { - /// Sets the [process creation flags][1] to be passed to `CreateProcess`. - /// - /// These will always be ORed with `CREATE_UNICODE_ENVIRONMENT`. - /// - /// [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863(v=vs.85).aspx - pub fn creation_flags(&mut self, flags: u32) -> &mut Command { - self.std.creation_flags(flags); - self - } - } - - /// Sets the child process's user ID. This translates to a - /// `setuid` call in the child process. Failure in the `setuid` - /// call will cause the spawn to fail. - #[cfg(unix)] - #[cfg_attr(docsrs, doc(cfg(unix)))] - pub fn uid(&mut self, id: u32) -> &mut Command { - self.std.uid(id); - self - } - - /// Similar to `uid` but sets the group ID of the child process. This has - /// the same semantics as the `uid` field. - #[cfg(unix)] - #[cfg_attr(docsrs, doc(cfg(unix)))] - pub fn gid(&mut self, id: u32) -> &mut Command { - self.std.gid(id); - self - } - - /// Sets executable argument. - /// - /// Set the first process argument, `argv[0]`, to something other than the - /// default executable path. - #[cfg(unix)] - #[cfg_attr(docsrs, doc(cfg(unix)))] - pub fn arg0(&mut self, arg: S) -> &mut Command - where - S: AsRef, - { - self.std.arg0(arg); - self - } - - /// Schedules a closure to be run just before the `exec` function is - /// invoked. - /// - /// The closure is allowed to return an I/O error whose OS error code will - /// be communicated back to the parent and returned as an error from when - /// the spawn was requested. - /// - /// Multiple closures can be registered and they will be called in order of - /// their registration. If a closure returns `Err` then no further closures - /// will be called and the spawn operation will immediately return with a - /// failure. - /// - /// # Safety - /// - /// This closure will be run in the context of the child process after a - /// `fork`. This primarily means that any modifications made to memory on - /// behalf of this closure will **not** be visible to the parent process. - /// This is often a very constrained environment where normal operations - /// like `malloc` or acquiring a mutex are not guaranteed to work (due to - /// other threads perhaps still running when the `fork` was run). - /// - /// This also means that all resources such as file descriptors and - /// memory-mapped regions got duplicated. It is your responsibility to make - /// sure that the closure does not violate library invariants by making - /// invalid use of these duplicates. - /// - /// When this closure is run, aspects such as the stdio file descriptors and - /// working directory have successfully been changed, so output to these - /// locations may not appear where intended. - #[cfg(unix)] - #[cfg_attr(docsrs, doc(cfg(unix)))] - pub unsafe fn pre_exec(&mut self, f: F) -> &mut Command - where - F: FnMut() -> io::Result<()> + Send + Sync + 'static, - { - self.std.pre_exec(f); - self - } - - /// Sets the process group ID (PGID) of the child process. Equivalent to a - /// setpgid call in the child process, but may be more efficient. - /// - /// Process groups determine which processes receive signals. - /// - /// **Note**: This is an [unstable API][unstable] but will be stabilised once - /// tokio's MSRV is sufficiently new. See [the documentation on - /// unstable features][unstable] for details about using unstable features. - /// - /// If you want similar behaviour without using this unstable feature you can - /// create a [`std::process::Command`] and convert that into a - /// [`tokio::process::Command`] using the `From` trait. - /// - /// [unstable]: crate#unstable-features - /// [`tokio::process::Command`]: crate::process::Command - /// - /// ```no_run - /// # async fn test() { // allow using await - /// use tokio::process::Command; - /// - /// let output = Command::new("ls") - /// .process_group(0) - /// .output().await.unwrap(); - /// # } - /// ``` - #[cfg(unix)] - #[cfg(tokio_unstable)] - #[cfg_attr(docsrs, doc(cfg(all(unix, tokio_unstable))))] - pub fn process_group(&mut self, pgroup: i32) -> &mut Command { - self.std.process_group(pgroup); - self - } - - /// Executes the command as a child process, returning a handle to it. - /// - /// By default, stdin, stdout and stderr are inherited from the parent. - /// - /// This method will spawn the child process synchronously and return a - /// handle to a future-aware child process. The `Child` returned implements - /// `Future` itself to acquire the `ExitStatus` of the child, and otherwise - /// the `Child` has methods to acquire handles to the stdin, stdout, and - /// stderr streams. - /// - /// All I/O this child does will be associated with the current default - /// event loop. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// async fn run_ls() -> std::process::ExitStatus { - /// Command::new("ls") - /// .spawn() - /// .expect("ls command failed to start") - /// .wait() - /// .await - /// .expect("ls command failed to run") - /// } - /// ``` - /// - /// # Caveats - /// - /// ## Dropping/Cancellation - /// - /// Similar to the behavior to the standard library, and unlike the futures - /// paradigm of dropping-implies-cancellation, a spawned process will, by - /// default, continue to execute even after the `Child` handle has been dropped. - /// - /// The [`Command::kill_on_drop`] method can be used to modify this behavior - /// and kill the child process if the `Child` wrapper is dropped before it - /// has exited. - /// - /// ## Unix Processes - /// - /// On Unix platforms processes must be "reaped" by their parent process after - /// they have exited in order to release all OS resources. A child process which - /// has exited, but has not yet been reaped by its parent is considered a "zombie" - /// process. Such processes continue to count against limits imposed by the system, - /// and having too many zombie processes present can prevent additional processes - /// from being spawned. - /// - /// The tokio runtime will, on a best-effort basis, attempt to reap and clean up - /// any process which it has spawned. No additional guarantees are made with regard to - /// how quickly or how often this procedure will take place. - /// - /// It is recommended to avoid dropping a [`Child`] process handle before it has been - /// fully `await`ed if stricter cleanup guarantees are required. - /// - /// [`Command`]: crate::process::Command - /// [`Command::kill_on_drop`]: crate::process::Command::kill_on_drop - /// [`Child`]: crate::process::Child - /// - /// # Errors - /// - /// On Unix platforms this method will fail with `std::io::ErrorKind::WouldBlock` - /// if the system process limit is reached (which includes other applications - /// running on the system). - pub fn spawn(&mut self) -> io::Result { - imp::spawn_child(&mut self.std).map(|spawned_child| Child { - child: FusedChild::Child(ChildDropGuard { - inner: spawned_child.child, - kill_on_drop: self.kill_on_drop, - }), - stdin: spawned_child.stdin.map(|inner| ChildStdin { inner }), - stdout: spawned_child.stdout.map(|inner| ChildStdout { inner }), - stderr: spawned_child.stderr.map(|inner| ChildStderr { inner }), - }) - } - - /// Executes the command as a child process, waiting for it to finish and - /// collecting its exit status. - /// - /// By default, stdin, stdout and stderr are inherited from the parent. - /// If any input/output handles are set to a pipe then they will be immediately - /// closed after the child is spawned. - /// - /// All I/O this child does will be associated with the current default - /// event loop. - /// - /// The destructor of the future returned by this function will kill - /// the child if [`kill_on_drop`] is set to true. - /// - /// [`kill_on_drop`]: fn@Self::kill_on_drop - /// - /// # Errors - /// - /// This future will return an error if the child process cannot be spawned - /// or if there is an error while awaiting its status. - /// - /// On Unix platforms this method will fail with `std::io::ErrorKind::WouldBlock` - /// if the system process limit is reached (which includes other applications - /// running on the system). - /// - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// async fn run_ls() -> std::process::ExitStatus { - /// Command::new("ls") - /// .status() - /// .await - /// .expect("ls command failed to run") - /// } - /// ``` - pub fn status(&mut self) -> impl Future> { - let child = self.spawn(); - - async { - let mut child = child?; - - // Ensure we close any stdio handles so we can't deadlock - // waiting on the child which may be waiting to read/write - // to a pipe we're holding. - child.stdin.take(); - child.stdout.take(); - child.stderr.take(); - - child.wait().await - } - } - - /// Executes the command as a child process, waiting for it to finish and - /// collecting all of its output. - /// - /// > **Note**: this method, unlike the standard library, will - /// > unconditionally configure the stdout/stderr handles to be pipes, even - /// > if they have been previously configured. If this is not desired then - /// > the `spawn` method should be used in combination with the - /// > `wait_with_output` method on child. - /// - /// This method will return a future representing the collection of the - /// child process's stdout/stderr. It will resolve to - /// the `Output` type in the standard library, containing `stdout` and - /// `stderr` as `Vec` along with an `ExitStatus` representing how the - /// process exited. - /// - /// All I/O this child does will be associated with the current default - /// event loop. - /// - /// The destructor of the future returned by this function will kill - /// the child if [`kill_on_drop`] is set to true. - /// - /// [`kill_on_drop`]: fn@Self::kill_on_drop - /// - /// # Errors - /// - /// This future will return an error if the child process cannot be spawned - /// or if there is an error while awaiting its status. - /// - /// On Unix platforms this method will fail with `std::io::ErrorKind::WouldBlock` - /// if the system process limit is reached (which includes other applications - /// running on the system). - /// # Examples - /// - /// Basic usage: - /// - /// ```no_run - /// use tokio::process::Command; - /// - /// async fn run_ls() { - /// let output: std::process::Output = Command::new("ls") - /// .output() - /// .await - /// .expect("ls command failed to run"); - /// println!("stderr of ls: {:?}", output.stderr); - /// } - /// ``` - pub fn output(&mut self) -> impl Future> { - self.std.stdout(Stdio::piped()); - self.std.stderr(Stdio::piped()); - - let child = self.spawn(); - - async { child?.wait_with_output().await } - } -} - -impl From for Command { - fn from(std: StdCommand) -> Command { - Command { - std, - kill_on_drop: false, - } - } -} - -/// A drop guard which can ensure the child process is killed on drop if specified. -#[derive(Debug)] -struct ChildDropGuard { - inner: T, - kill_on_drop: bool, -} - -impl Kill for ChildDropGuard { - fn kill(&mut self) -> io::Result<()> { - let ret = self.inner.kill(); - - if ret.is_ok() { - self.kill_on_drop = false; - } - - ret - } -} - -impl Drop for ChildDropGuard { - fn drop(&mut self) { - if self.kill_on_drop { - drop(self.kill()); - } - } -} - -impl Future for ChildDropGuard -where - F: Future> + Kill + Unpin, -{ - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - ready!(crate::trace::trace_leaf(cx)); - // Keep track of task budget - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - - let ret = Pin::new(&mut self.inner).poll(cx); - - if let Poll::Ready(Ok(_)) = ret { - // Avoid the overhead of trying to kill a reaped process - self.kill_on_drop = false; - } - - if ret.is_ready() { - coop.made_progress(); - } - - ret - } -} - -/// Keeps track of the exit status of a child process without worrying about -/// polling the underlying futures even after they have completed. -#[derive(Debug)] -enum FusedChild { - Child(ChildDropGuard), - Done(ExitStatus), -} - -/// Representation of a child process spawned onto an event loop. -/// -/// # Caveats -/// Similar to the behavior to the standard library, and unlike the futures -/// paradigm of dropping-implies-cancellation, a spawned process will, by -/// default, continue to execute even after the `Child` handle has been dropped. -/// -/// The `Command::kill_on_drop` method can be used to modify this behavior -/// and kill the child process if the `Child` wrapper is dropped before it -/// has exited. -#[derive(Debug)] -pub struct Child { - child: FusedChild, - - /// The handle for writing to the child's standard input (stdin), if it has - /// been captured. To avoid partially moving the `child` and thus blocking - /// yourself from calling functions on `child` while using `stdin`, you might - /// find it helpful to do: - /// - /// ```no_run - /// # let mut child = tokio::process::Command::new("echo").spawn().unwrap(); - /// let stdin = child.stdin.take().unwrap(); - /// ``` - pub stdin: Option, - - /// The handle for reading from the child's standard output (stdout), if it - /// has been captured. You might find it helpful to do - /// - /// ```no_run - /// # let mut child = tokio::process::Command::new("echo").spawn().unwrap(); - /// let stdout = child.stdout.take().unwrap(); - /// ``` - /// - /// to avoid partially moving the `child` and thus blocking yourself from calling - /// functions on `child` while using `stdout`. - pub stdout: Option, - - /// The handle for reading from the child's standard error (stderr), if it - /// has been captured. You might find it helpful to do - /// - /// ```no_run - /// # let mut child = tokio::process::Command::new("echo").spawn().unwrap(); - /// let stderr = child.stderr.take().unwrap(); - /// ``` - /// - /// to avoid partially moving the `child` and thus blocking yourself from calling - /// functions on `child` while using `stderr`. - pub stderr: Option, -} - -impl Child { - /// Returns the OS-assigned process identifier associated with this child - /// while it is still running. - /// - /// Once the child has been polled to completion this will return `None`. - /// This is done to avoid confusion on platforms like Unix where the OS - /// identifier could be reused once the process has completed. - pub fn id(&self) -> Option { - match &self.child { - FusedChild::Child(child) => Some(child.inner.id()), - FusedChild::Done(_) => None, - } - } - - cfg_windows! { - /// Extracts the raw handle of the process associated with this child while - /// it is still running. Returns `None` if the child has exited. - pub fn raw_handle(&self) -> Option { - match &self.child { - FusedChild::Child(c) => Some(c.inner.as_raw_handle()), - FusedChild::Done(_) => None, - } - } - } - - /// Attempts to force the child to exit, but does not wait for the request - /// to take effect. - /// - /// On Unix platforms, this is the equivalent to sending a SIGKILL. Note - /// that on Unix platforms it is possible for a zombie process to remain - /// after a kill is sent; to avoid this, the caller should ensure that either - /// `child.wait().await` or `child.try_wait()` is invoked successfully. - pub fn start_kill(&mut self) -> io::Result<()> { - match &mut self.child { - FusedChild::Child(child) => child.kill(), - FusedChild::Done(_) => Err(io::Error::new( - io::ErrorKind::InvalidInput, - "invalid argument: can't kill an exited process", - )), - } - } - - /// Forces the child to exit. - /// - /// This is equivalent to sending a SIGKILL on unix platforms. - /// - /// If the child has to be killed remotely, it is possible to do it using - /// a combination of the select! macro and a oneshot channel. In the following - /// example, the child will run until completion unless a message is sent on - /// the oneshot channel. If that happens, the child is killed immediately - /// using the `.kill()` method. - /// - /// ```no_run - /// use tokio::process::Command; - /// use tokio::sync::oneshot::channel; - /// - /// #[tokio::main] - /// async fn main() { - /// let (send, recv) = channel::<()>(); - /// let mut child = Command::new("sleep").arg("1").spawn().unwrap(); - /// tokio::spawn(async move { send.send(()) }); - /// tokio::select! { - /// _ = child.wait() => {} - /// _ = recv => child.kill().await.expect("kill failed"), - /// } - /// } - /// ``` - pub async fn kill(&mut self) -> io::Result<()> { - self.start_kill()?; - self.wait().await?; - Ok(()) - } - - /// Waits for the child to exit completely, returning the status that it - /// exited with. This function will continue to have the same return value - /// after it has been called at least once. - /// - /// The stdin handle to the child process, if any, will be closed - /// before waiting. This helps avoid deadlock: it ensures that the - /// child does not block waiting for input from the parent, while - /// the parent waits for the child to exit. - /// - /// If the caller wishes to explicitly control when the child's stdin - /// handle is closed, they may `.take()` it before calling `.wait()`: - /// - /// # Cancel safety - /// - /// This function is cancel safe. - /// - /// ``` - /// # #[cfg(not(unix))]fn main(){} - /// # #[cfg(unix)] - /// use tokio::io::AsyncWriteExt; - /// # #[cfg(unix)] - /// use tokio::process::Command; - /// # #[cfg(unix)] - /// use std::process::Stdio; - /// - /// # #[cfg(unix)] - /// #[tokio::main] - /// async fn main() { - /// let mut child = Command::new("cat") - /// .stdin(Stdio::piped()) - /// .spawn() - /// .unwrap(); - /// - /// let mut stdin = child.stdin.take().unwrap(); - /// tokio::spawn(async move { - /// // do something with stdin here... - /// stdin.write_all(b"hello world\n").await.unwrap(); - /// - /// // then drop when finished - /// drop(stdin); - /// }); - /// - /// // wait for the process to complete - /// let _ = child.wait().await; - /// } - /// ``` - pub async fn wait(&mut self) -> io::Result { - // Ensure stdin is closed so the child isn't stuck waiting on - // input while the parent is waiting for it to exit. - drop(self.stdin.take()); - - match &mut self.child { - FusedChild::Done(exit) => Ok(*exit), - FusedChild::Child(child) => { - let ret = child.await; - - if let Ok(exit) = ret { - self.child = FusedChild::Done(exit); - } - - ret - } - } - } - - /// Attempts to collect the exit status of the child if it has already - /// exited. - /// - /// This function will not block the calling thread and will only - /// check to see if the child process has exited or not. If the child has - /// exited then on Unix the process ID is reaped. This function is - /// guaranteed to repeatedly return a successful exit status so long as the - /// child has already exited. - /// - /// If the child has exited, then `Ok(Some(status))` is returned. If the - /// exit status is not available at this time then `Ok(None)` is returned. - /// If an error occurs, then that error is returned. - /// - /// Note that unlike `wait`, this function will not attempt to drop stdin, - /// nor will it wake the current task if the child exits. - pub fn try_wait(&mut self) -> io::Result> { - match &mut self.child { - FusedChild::Done(exit) => Ok(Some(*exit)), - FusedChild::Child(guard) => { - let ret = guard.inner.try_wait(); - - if let Ok(Some(exit)) = ret { - // Avoid the overhead of trying to kill a reaped process - guard.kill_on_drop = false; - self.child = FusedChild::Done(exit); - } - - ret - } - } - } - - /// Returns a future that will resolve to an `Output`, containing the exit - /// status, stdout, and stderr of the child process. - /// - /// The returned future will simultaneously waits for the child to exit and - /// collect all remaining output on the stdout/stderr handles, returning an - /// `Output` instance. - /// - /// The stdin handle to the child process, if any, will be closed before - /// waiting. This helps avoid deadlock: it ensures that the child does not - /// block waiting for input from the parent, while the parent waits for the - /// child to exit. - /// - /// By default, stdin, stdout and stderr are inherited from the parent. In - /// order to capture the output into this `Output` it is necessary to create - /// new pipes between parent and child. Use `stdout(Stdio::piped())` or - /// `stderr(Stdio::piped())`, respectively, when creating a `Command`. - pub async fn wait_with_output(mut self) -> io::Result { - use crate::future::try_join3; - - async fn read_to_end(io: &mut Option) -> io::Result> { - let mut vec = Vec::new(); - if let Some(io) = io.as_mut() { - crate::io::util::read_to_end(io, &mut vec).await?; - } - Ok(vec) - } - - let mut stdout_pipe = self.stdout.take(); - let mut stderr_pipe = self.stderr.take(); - - let stdout_fut = read_to_end(&mut stdout_pipe); - let stderr_fut = read_to_end(&mut stderr_pipe); - - let (status, stdout, stderr) = try_join3(self.wait(), stdout_fut, stderr_fut).await?; - - // Drop happens after `try_join` due to - drop(stdout_pipe); - drop(stderr_pipe); - - Ok(Output { - status, - stdout, - stderr, - }) - } -} - -/// The standard input stream for spawned children. -/// -/// This type implements the `AsyncWrite` trait to pass data to the stdin handle of -/// handle of a child process asynchronously. -#[derive(Debug)] -pub struct ChildStdin { - inner: imp::ChildStdio, -} - -/// The standard output stream for spawned children. -/// -/// This type implements the `AsyncRead` trait to read data from the stdout -/// handle of a child process asynchronously. -#[derive(Debug)] -pub struct ChildStdout { - inner: imp::ChildStdio, -} - -/// The standard error stream for spawned children. -/// -/// This type implements the `AsyncRead` trait to read data from the stderr -/// handle of a child process asynchronously. -#[derive(Debug)] -pub struct ChildStderr { - inner: imp::ChildStdio, -} - -impl ChildStdin { - /// Creates an asynchronous `ChildStdin` from a synchronous one. - /// - /// # Errors - /// - /// This method may fail if an error is encountered when setting the pipe to - /// non-blocking mode, or when registering the pipe with the runtime's IO - /// driver. - pub fn from_std(inner: std::process::ChildStdin) -> io::Result { - Ok(Self { - inner: imp::stdio(inner)?, - }) - } -} - -impl ChildStdout { - /// Creates an asynchronous `ChildStdout` from a synchronous one. - /// - /// # Errors - /// - /// This method may fail if an error is encountered when setting the pipe to - /// non-blocking mode, or when registering the pipe with the runtime's IO - /// driver. - pub fn from_std(inner: std::process::ChildStdout) -> io::Result { - Ok(Self { - inner: imp::stdio(inner)?, - }) - } -} - -impl ChildStderr { - /// Creates an asynchronous `ChildStderr` from a synchronous one. - /// - /// # Errors - /// - /// This method may fail if an error is encountered when setting the pipe to - /// non-blocking mode, or when registering the pipe with the runtime's IO - /// driver. - pub fn from_std(inner: std::process::ChildStderr) -> io::Result { - Ok(Self { - inner: imp::stdio(inner)?, - }) - } -} - -impl AsyncWrite for ChildStdin { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.inner).poll_write(cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_flush(cx) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_shutdown(cx) - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - self.inner.is_write_vectored() - } -} - -impl AsyncRead for ChildStdout { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - Pin::new(&mut self.inner).poll_read(cx, buf) - } -} - -impl AsyncRead for ChildStderr { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - Pin::new(&mut self.inner).poll_read(cx, buf) - } -} - -impl TryInto for ChildStdin { - type Error = io::Error; - - fn try_into(self) -> Result { - imp::convert_to_stdio(self.inner) - } -} - -impl TryInto for ChildStdout { - type Error = io::Error; - - fn try_into(self) -> Result { - imp::convert_to_stdio(self.inner) - } -} - -impl TryInto for ChildStderr { - type Error = io::Error; - - fn try_into(self) -> Result { - imp::convert_to_stdio(self.inner) - } -} - -#[cfg(unix)] -#[cfg_attr(docsrs, doc(cfg(unix)))] -mod sys { - use std::{ - io, - os::unix::io::{AsFd, AsRawFd, BorrowedFd, OwnedFd, RawFd}, - }; - - use super::{ChildStderr, ChildStdin, ChildStdout}; - - macro_rules! impl_traits { - ($type:ty) => { - impl $type { - /// Convert into [`OwnedFd`]. - pub fn into_owned_fd(self) -> io::Result { - self.inner.into_owned_fd() - } - } - - impl AsRawFd for $type { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } - } - - impl AsFd for $type { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } - } - }; - } - - impl_traits!(ChildStdin); - impl_traits!(ChildStdout); - impl_traits!(ChildStderr); -} - -#[cfg(any(windows, docsrs))] -#[cfg_attr(docsrs, doc(cfg(windows)))] -mod windows { - use super::*; - use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle, OwnedHandle, RawHandle}; - - #[cfg(not(docsrs))] - macro_rules! impl_traits { - ($type:ty) => { - impl $type { - /// Convert into [`OwnedHandle`]. - pub fn into_owned_handle(self) -> io::Result { - self.inner.into_owned_handle() - } - } - - impl AsRawHandle for $type { - fn as_raw_handle(&self) -> RawHandle { - self.inner.as_raw_handle() - } - } - - impl AsHandle for $type { - fn as_handle(&self) -> BorrowedHandle<'_> { - unsafe { BorrowedHandle::borrow_raw(self.as_raw_handle()) } - } - } - }; - } - - #[cfg(docsrs)] - macro_rules! impl_traits { - ($type:ty) => { - impl $type { - /// Convert into [`OwnedHandle`]. - pub fn into_owned_handle(self) -> io::Result { - todo!("For doc generation only") - } - } - - impl AsRawHandle for $type { - fn as_raw_handle(&self) -> RawHandle { - todo!("For doc generation only") - } - } - - impl AsHandle for $type { - fn as_handle(&self) -> BorrowedHandle<'_> { - todo!("For doc generation only") - } - } - }; - } - - impl_traits!(ChildStdin); - impl_traits!(ChildStdout); - impl_traits!(ChildStderr); -} - -#[cfg(all(test, not(loom)))] -mod test { - use super::kill::Kill; - use super::ChildDropGuard; - - use futures::future::FutureExt; - use std::future::Future; - use std::io; - use std::pin::Pin; - use std::task::{Context, Poll}; - - struct Mock { - num_kills: usize, - num_polls: usize, - poll_result: Poll>, - } - - impl Mock { - fn new() -> Self { - Self::with_result(Poll::Pending) - } - - fn with_result(result: Poll>) -> Self { - Self { - num_kills: 0, - num_polls: 0, - poll_result: result, - } - } - } - - impl Kill for Mock { - fn kill(&mut self) -> io::Result<()> { - self.num_kills += 1; - Ok(()) - } - } - - impl Future for Mock { - type Output = Result<(), ()>; - - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - let inner = Pin::get_mut(self); - inner.num_polls += 1; - inner.poll_result - } - } - - #[test] - fn kills_on_drop_if_specified() { - let mut mock = Mock::new(); - - { - let guard = ChildDropGuard { - inner: &mut mock, - kill_on_drop: true, - }; - drop(guard); - } - - assert_eq!(1, mock.num_kills); - assert_eq!(0, mock.num_polls); - } - - #[test] - fn no_kill_on_drop_by_default() { - let mut mock = Mock::new(); - - { - let guard = ChildDropGuard { - inner: &mut mock, - kill_on_drop: false, - }; - drop(guard); - } - - assert_eq!(0, mock.num_kills); - assert_eq!(0, mock.num_polls); - } - - #[test] - fn no_kill_if_already_killed() { - let mut mock = Mock::new(); - - { - let mut guard = ChildDropGuard { - inner: &mut mock, - kill_on_drop: true, - }; - let _ = guard.kill(); - drop(guard); - } - - assert_eq!(1, mock.num_kills); - assert_eq!(0, mock.num_polls); - } - - #[test] - fn no_kill_if_reaped() { - let mut mock_pending = Mock::with_result(Poll::Pending); - let mut mock_reaped = Mock::with_result(Poll::Ready(Ok(()))); - let mut mock_err = Mock::with_result(Poll::Ready(Err(()))); - - let waker = futures::task::noop_waker(); - let mut context = Context::from_waker(&waker); - { - let mut guard = ChildDropGuard { - inner: &mut mock_pending, - kill_on_drop: true, - }; - let _ = guard.poll_unpin(&mut context); - - let mut guard = ChildDropGuard { - inner: &mut mock_reaped, - kill_on_drop: true, - }; - let _ = guard.poll_unpin(&mut context); - - let mut guard = ChildDropGuard { - inner: &mut mock_err, - kill_on_drop: true, - }; - let _ = guard.poll_unpin(&mut context); - } - - assert_eq!(1, mock_pending.num_kills); - assert_eq!(1, mock_pending.num_polls); - - assert_eq!(0, mock_reaped.num_kills); - assert_eq!(1, mock_reaped.num_polls); - - assert_eq!(1, mock_err.num_kills); - assert_eq!(1, mock_err.num_polls); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/process/unix/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/process/unix/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/process/unix/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/process/unix/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,344 +0,0 @@ -//! Unix handling of child processes. -//! -//! Right now the only "fancy" thing about this is how we implement the -//! `Future` implementation on `Child` to get the exit status. Unix offers -//! no way to register a child with epoll, and the only real way to get a -//! notification when a process exits is the SIGCHLD signal. -//! -//! Signal handling in general is *super* hairy and complicated, and it's even -//! more complicated here with the fact that signals are coalesced, so we may -//! not get a SIGCHLD-per-child. -//! -//! Our best approximation here is to check *all spawned processes* for all -//! SIGCHLD signals received. To do that we create a `Signal`, implemented in -//! the `tokio-net` crate, which is a stream over signals being received. -//! -//! Later when we poll the process's exit status we simply check to see if a -//! SIGCHLD has happened since we last checked, and while that returns "yes" we -//! keep trying. -//! -//! Note that this means that this isn't really scalable, but then again -//! processes in general aren't scalable (e.g. millions) so it shouldn't be that -//! bad in theory... - -pub(crate) mod orphan; -use orphan::{OrphanQueue, OrphanQueueImpl, Wait}; - -mod reap; -use reap::Reaper; - -use crate::io::{AsyncRead, AsyncWrite, PollEvented, ReadBuf}; -use crate::process::kill::Kill; -use crate::process::SpawnedChild; -use crate::runtime::signal::Handle as SignalHandle; -use crate::signal::unix::{signal, Signal, SignalKind}; - -use mio::event::Source; -use mio::unix::SourceFd; -use std::fmt; -use std::fs::File; -use std::future::Future; -use std::io; -use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd}; -use std::pin::Pin; -use std::process::{Child as StdChild, ExitStatus, Stdio}; -use std::task::Context; -use std::task::Poll; - -impl Wait for StdChild { - fn id(&self) -> u32 { - self.id() - } - - fn try_wait(&mut self) -> io::Result> { - self.try_wait() - } -} - -impl Kill for StdChild { - fn kill(&mut self) -> io::Result<()> { - self.kill() - } -} - -cfg_not_has_const_mutex_new! { - fn get_orphan_queue() -> &'static OrphanQueueImpl { - use crate::util::once_cell::OnceCell; - - static ORPHAN_QUEUE: OnceCell> = OnceCell::new(); - - ORPHAN_QUEUE.get(OrphanQueueImpl::new) - } -} - -cfg_has_const_mutex_new! { - fn get_orphan_queue() -> &'static OrphanQueueImpl { - static ORPHAN_QUEUE: OrphanQueueImpl = OrphanQueueImpl::new(); - - &ORPHAN_QUEUE - } -} - -pub(crate) struct GlobalOrphanQueue; - -impl fmt::Debug for GlobalOrphanQueue { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - get_orphan_queue().fmt(fmt) - } -} - -impl GlobalOrphanQueue { - pub(crate) fn reap_orphans(handle: &SignalHandle) { - get_orphan_queue().reap_orphans(handle) - } -} - -impl OrphanQueue for GlobalOrphanQueue { - fn push_orphan(&self, orphan: StdChild) { - get_orphan_queue().push_orphan(orphan) - } -} - -#[must_use = "futures do nothing unless polled"] -pub(crate) struct Child { - inner: Reaper, -} - -impl fmt::Debug for Child { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Child") - .field("pid", &self.inner.id()) - .finish() - } -} - -pub(crate) fn spawn_child(cmd: &mut std::process::Command) -> io::Result { - let mut child = cmd.spawn()?; - let stdin = child.stdin.take().map(stdio).transpose()?; - let stdout = child.stdout.take().map(stdio).transpose()?; - let stderr = child.stderr.take().map(stdio).transpose()?; - - let signal = signal(SignalKind::child())?; - - Ok(SpawnedChild { - child: Child { - inner: Reaper::new(child, GlobalOrphanQueue, signal), - }, - stdin, - stdout, - stderr, - }) -} - -impl Child { - pub(crate) fn id(&self) -> u32 { - self.inner.id() - } - - pub(crate) fn try_wait(&mut self) -> io::Result> { - self.inner.inner_mut().try_wait() - } -} - -impl Kill for Child { - fn kill(&mut self) -> io::Result<()> { - self.inner.kill() - } -} - -impl Future for Child { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.inner).poll(cx) - } -} - -#[derive(Debug)] -pub(crate) struct Pipe { - // Actually a pipe is not a File. However, we are reusing `File` to get - // close on drop. This is a similar trick as `mio`. - fd: File, -} - -impl From for Pipe { - fn from(fd: T) -> Self { - let fd = unsafe { File::from_raw_fd(fd.into_raw_fd()) }; - Self { fd } - } -} - -impl<'a> io::Read for &'a Pipe { - fn read(&mut self, bytes: &mut [u8]) -> io::Result { - (&self.fd).read(bytes) - } -} - -impl<'a> io::Write for &'a Pipe { - fn write(&mut self, bytes: &[u8]) -> io::Result { - (&self.fd).write(bytes) - } - - fn flush(&mut self) -> io::Result<()> { - (&self.fd).flush() - } - - fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result { - (&self.fd).write_vectored(bufs) - } -} - -impl AsRawFd for Pipe { - fn as_raw_fd(&self) -> RawFd { - self.fd.as_raw_fd() - } -} - -impl AsFd for Pipe { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } -} - -fn convert_to_blocking_file(io: ChildStdio) -> io::Result { - let mut fd = io.inner.into_inner()?.fd; - - // Ensure that the fd to be inherited is set to *blocking* mode, as this - // is the default that virtually all programs expect to have. Those - // programs that know how to work with nonblocking stdio will know how to - // change it to nonblocking mode. - set_nonblocking(&mut fd, false)?; - - Ok(fd) -} - -pub(crate) fn convert_to_stdio(io: ChildStdio) -> io::Result { - convert_to_blocking_file(io).map(Stdio::from) -} - -impl Source for Pipe { - fn register( - &mut self, - registry: &mio::Registry, - token: mio::Token, - interest: mio::Interest, - ) -> io::Result<()> { - SourceFd(&self.as_raw_fd()).register(registry, token, interest) - } - - fn reregister( - &mut self, - registry: &mio::Registry, - token: mio::Token, - interest: mio::Interest, - ) -> io::Result<()> { - SourceFd(&self.as_raw_fd()).reregister(registry, token, interest) - } - - fn deregister(&mut self, registry: &mio::Registry) -> io::Result<()> { - SourceFd(&self.as_raw_fd()).deregister(registry) - } -} - -pub(crate) struct ChildStdio { - inner: PollEvented, -} - -impl ChildStdio { - pub(super) fn into_owned_fd(self) -> io::Result { - convert_to_blocking_file(self).map(OwnedFd::from) - } -} - -impl fmt::Debug for ChildStdio { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(fmt) - } -} - -impl AsRawFd for ChildStdio { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() - } -} - -impl AsFd for ChildStdio { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } - } -} - -impl AsyncWrite for ChildStdio { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.inner.poll_write(cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[io::IoSlice<'_>], - ) -> Poll> { - self.inner.poll_write_vectored(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - true - } -} - -impl AsyncRead for ChildStdio { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - // Safety: pipes support reading into uninitialized memory - unsafe { self.inner.poll_read(cx, buf) } - } -} - -fn set_nonblocking(fd: &mut T, nonblocking: bool) -> io::Result<()> { - unsafe { - let fd = fd.as_raw_fd(); - let previous = libc::fcntl(fd, libc::F_GETFL); - if previous == -1 { - return Err(io::Error::last_os_error()); - } - - let new = if nonblocking { - previous | libc::O_NONBLOCK - } else { - previous & !libc::O_NONBLOCK - }; - - let r = libc::fcntl(fd, libc::F_SETFL, new); - if r == -1 { - return Err(io::Error::last_os_error()); - } - } - - Ok(()) -} - -pub(super) fn stdio(io: T) -> io::Result -where - T: IntoRawFd, -{ - // Set the fd to nonblocking before we pass it to the event loop - let mut pipe = Pipe::from(io); - set_nonblocking(&mut pipe, true)?; - - PollEvented::new(pipe).map(|inner| ChildStdio { inner }) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/process/unix/orphan.rs s390-tools-2.33.1/rust-vendor/tokio/src/process/unix/orphan.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/process/unix/orphan.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/process/unix/orphan.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,333 +0,0 @@ -use crate::loom::sync::{Mutex, MutexGuard}; -use crate::runtime::signal::Handle as SignalHandle; -use crate::signal::unix::{signal_with_handle, SignalKind}; -use crate::sync::watch; -use std::io; -use std::process::ExitStatus; - -/// An interface for waiting on a process to exit. -pub(crate) trait Wait { - /// Get the identifier for this process or diagnostics. - fn id(&self) -> u32; - /// Try waiting for a process to exit in a non-blocking manner. - fn try_wait(&mut self) -> io::Result>; -} - -impl Wait for &mut T { - fn id(&self) -> u32 { - (**self).id() - } - - fn try_wait(&mut self) -> io::Result> { - (**self).try_wait() - } -} - -/// An interface for queueing up an orphaned process so that it can be reaped. -pub(crate) trait OrphanQueue { - /// Adds an orphan to the queue. - fn push_orphan(&self, orphan: T); -} - -impl> OrphanQueue for &O { - fn push_orphan(&self, orphan: T) { - (**self).push_orphan(orphan); - } -} - -/// An implementation of `OrphanQueue`. -#[derive(Debug)] -pub(crate) struct OrphanQueueImpl { - sigchild: Mutex>>, - queue: Mutex>, -} - -impl OrphanQueueImpl { - cfg_not_has_const_mutex_new! { - pub(crate) fn new() -> Self { - Self { - sigchild: Mutex::new(None), - queue: Mutex::new(Vec::new()), - } - } - } - - cfg_has_const_mutex_new! { - pub(crate) const fn new() -> Self { - Self { - sigchild: Mutex::const_new(None), - queue: Mutex::const_new(Vec::new()), - } - } - } - - #[cfg(test)] - fn len(&self) -> usize { - self.queue.lock().len() - } - - pub(crate) fn push_orphan(&self, orphan: T) - where - T: Wait, - { - self.queue.lock().push(orphan) - } - - /// Attempts to reap every process in the queue, ignoring any errors and - /// enqueueing any orphans which have not yet exited. - pub(crate) fn reap_orphans(&self, handle: &SignalHandle) - where - T: Wait, - { - // If someone else is holding the lock, they will be responsible for draining - // the queue as necessary, so we can safely bail if that happens - if let Some(mut sigchild_guard) = self.sigchild.try_lock() { - match &mut *sigchild_guard { - Some(sigchild) => { - if sigchild.try_has_changed().and_then(Result::ok).is_some() { - drain_orphan_queue(self.queue.lock()); - } - } - None => { - let queue = self.queue.lock(); - - // Be lazy and only initialize the SIGCHLD listener if there - // are any orphaned processes in the queue. - if !queue.is_empty() { - // An errors shouldn't really happen here, but if it does it - // means that the signal driver isn't running, in - // which case there isn't anything we can - // register/initialize here, so we can try again later - if let Ok(sigchild) = signal_with_handle(SignalKind::child(), handle) { - *sigchild_guard = Some(sigchild); - drain_orphan_queue(queue); - } - } - } - } - } - } -} - -fn drain_orphan_queue(mut queue: MutexGuard<'_, Vec>) -where - T: Wait, -{ - for i in (0..queue.len()).rev() { - match queue[i].try_wait() { - Ok(None) => {} - Ok(Some(_)) | Err(_) => { - // The stdlib handles interruption errors (EINTR) when polling a child process. - // All other errors represent invalid inputs or pids that have already been - // reaped, so we can drop the orphan in case an error is raised. - queue.swap_remove(i); - } - } - } - - drop(queue); -} - -#[cfg(all(test, not(loom)))] -pub(crate) mod test { - use super::*; - use crate::runtime::io::Driver as IoDriver; - use crate::runtime::signal::{Driver as SignalDriver, Handle as SignalHandle}; - use crate::sync::watch; - use std::cell::{Cell, RefCell}; - use std::io; - use std::os::unix::process::ExitStatusExt; - use std::process::ExitStatus; - use std::rc::Rc; - - pub(crate) struct MockQueue { - pub(crate) all_enqueued: RefCell>, - } - - impl MockQueue { - pub(crate) fn new() -> Self { - Self { - all_enqueued: RefCell::new(Vec::new()), - } - } - } - - impl OrphanQueue for MockQueue { - fn push_orphan(&self, orphan: W) { - self.all_enqueued.borrow_mut().push(orphan); - } - } - - struct MockWait { - total_waits: Rc>, - num_wait_until_status: usize, - return_err: bool, - } - - impl MockWait { - fn new(num_wait_until_status: usize) -> Self { - Self { - total_waits: Rc::new(Cell::new(0)), - num_wait_until_status, - return_err: false, - } - } - - fn with_err() -> Self { - Self { - total_waits: Rc::new(Cell::new(0)), - num_wait_until_status: 0, - return_err: true, - } - } - } - - impl Wait for MockWait { - fn id(&self) -> u32 { - 42 - } - - fn try_wait(&mut self) -> io::Result> { - let waits = self.total_waits.get(); - - let ret = if self.num_wait_until_status == waits { - if self.return_err { - Ok(Some(ExitStatus::from_raw(0))) - } else { - Err(io::Error::new(io::ErrorKind::Other, "mock err")) - } - } else { - Ok(None) - }; - - self.total_waits.set(waits + 1); - ret - } - } - - #[test] - fn drain_attempts_a_single_reap_of_all_queued_orphans() { - let first_orphan = MockWait::new(0); - let second_orphan = MockWait::new(1); - let third_orphan = MockWait::new(2); - let fourth_orphan = MockWait::with_err(); - - let first_waits = first_orphan.total_waits.clone(); - let second_waits = second_orphan.total_waits.clone(); - let third_waits = third_orphan.total_waits.clone(); - let fourth_waits = fourth_orphan.total_waits.clone(); - - let orphanage = OrphanQueueImpl::new(); - orphanage.push_orphan(first_orphan); - orphanage.push_orphan(third_orphan); - orphanage.push_orphan(second_orphan); - orphanage.push_orphan(fourth_orphan); - - assert_eq!(orphanage.len(), 4); - - drain_orphan_queue(orphanage.queue.lock()); - assert_eq!(orphanage.len(), 2); - assert_eq!(first_waits.get(), 1); - assert_eq!(second_waits.get(), 1); - assert_eq!(third_waits.get(), 1); - assert_eq!(fourth_waits.get(), 1); - - drain_orphan_queue(orphanage.queue.lock()); - assert_eq!(orphanage.len(), 1); - assert_eq!(first_waits.get(), 1); - assert_eq!(second_waits.get(), 2); - assert_eq!(third_waits.get(), 2); - assert_eq!(fourth_waits.get(), 1); - - drain_orphan_queue(orphanage.queue.lock()); - assert_eq!(orphanage.len(), 0); - assert_eq!(first_waits.get(), 1); - assert_eq!(second_waits.get(), 2); - assert_eq!(third_waits.get(), 3); - assert_eq!(fourth_waits.get(), 1); - - // Safe to reap when empty - drain_orphan_queue(orphanage.queue.lock()); - } - - #[test] - fn no_reap_if_no_signal_received() { - let (tx, rx) = watch::channel(()); - - let handle = SignalHandle::default(); - - let orphanage = OrphanQueueImpl::new(); - *orphanage.sigchild.lock() = Some(rx); - - let orphan = MockWait::new(2); - let waits = orphan.total_waits.clone(); - orphanage.push_orphan(orphan); - - orphanage.reap_orphans(&handle); - assert_eq!(waits.get(), 0); - - orphanage.reap_orphans(&handle); - assert_eq!(waits.get(), 0); - - tx.send(()).unwrap(); - orphanage.reap_orphans(&handle); - assert_eq!(waits.get(), 1); - } - - #[test] - fn no_reap_if_signal_lock_held() { - let handle = SignalHandle::default(); - - let orphanage = OrphanQueueImpl::new(); - let signal_guard = orphanage.sigchild.lock(); - - let orphan = MockWait::new(2); - let waits = orphan.total_waits.clone(); - orphanage.push_orphan(orphan); - - orphanage.reap_orphans(&handle); - assert_eq!(waits.get(), 0); - - drop(signal_guard); - } - - #[cfg_attr(miri, ignore)] // Miri does not support epoll. - #[test] - fn does_not_register_signal_if_queue_empty() { - let (io_driver, io_handle) = IoDriver::new(1024).unwrap(); - let signal_driver = SignalDriver::new(io_driver, &io_handle).unwrap(); - let handle = signal_driver.handle(); - - let orphanage = OrphanQueueImpl::new(); - assert!(orphanage.sigchild.lock().is_none()); // Sanity - - // No register when queue empty - orphanage.reap_orphans(&handle); - assert!(orphanage.sigchild.lock().is_none()); - - let orphan = MockWait::new(2); - let waits = orphan.total_waits.clone(); - orphanage.push_orphan(orphan); - - orphanage.reap_orphans(&handle); - assert!(orphanage.sigchild.lock().is_some()); - assert_eq!(waits.get(), 1); // Eager reap when registering listener - } - - #[test] - fn does_nothing_if_signal_could_not_be_registered() { - let handle = SignalHandle::default(); - - let orphanage = OrphanQueueImpl::new(); - assert!(orphanage.sigchild.lock().is_none()); - - let orphan = MockWait::new(2); - let waits = orphan.total_waits.clone(); - orphanage.push_orphan(orphan); - - // Signal handler has "gone away", nothing to register or reap - orphanage.reap_orphans(&handle); - assert!(orphanage.sigchild.lock().is_none()); - assert_eq!(waits.get(), 0); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/process/unix/reap.rs s390-tools-2.33.1/rust-vendor/tokio/src/process/unix/reap.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/process/unix/reap.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/process/unix/reap.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,298 +0,0 @@ -use crate::process::imp::orphan::{OrphanQueue, Wait}; -use crate::process::kill::Kill; -use crate::signal::unix::InternalStream; - -use std::future::Future; -use std::io; -use std::ops::Deref; -use std::pin::Pin; -use std::process::ExitStatus; -use std::task::Context; -use std::task::Poll; - -/// Orchestrates between registering interest for receiving signals when a -/// child process has exited, and attempting to poll for process completion. -#[derive(Debug)] -pub(crate) struct Reaper -where - W: Wait, - Q: OrphanQueue, -{ - inner: Option, - orphan_queue: Q, - signal: S, -} - -impl Deref for Reaper -where - W: Wait, - Q: OrphanQueue, -{ - type Target = W; - - fn deref(&self) -> &Self::Target { - self.inner() - } -} - -impl Reaper -where - W: Wait, - Q: OrphanQueue, -{ - pub(crate) fn new(inner: W, orphan_queue: Q, signal: S) -> Self { - Self { - inner: Some(inner), - orphan_queue, - signal, - } - } - - fn inner(&self) -> &W { - self.inner.as_ref().expect("inner has gone away") - } - - pub(crate) fn inner_mut(&mut self) -> &mut W { - self.inner.as_mut().expect("inner has gone away") - } -} - -impl Future for Reaper -where - W: Wait + Unpin, - Q: OrphanQueue + Unpin, - S: InternalStream + Unpin, -{ - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - loop { - // If the child hasn't exited yet, then it's our responsibility to - // ensure the current task gets notified when it might be able to - // make progress. We can use the delivery of a SIGCHLD signal as a - // sign that we can potentially make progress. - // - // However, we will register for a notification on the next signal - // BEFORE we poll the child. Otherwise it is possible that the child - // can exit and the signal can arrive after we last polled the child, - // but before we've registered for a notification on the next signal - // (this can cause a deadlock if there are no more spawned children - // which can generate a different signal for us). A side effect of - // pre-registering for signal notifications is that when the child - // exits, we will have already registered for an additional - // notification we don't need to consume. If another signal arrives, - // this future's task will be notified/woken up again. Since the - // futures model allows for spurious wake ups this extra wakeup - // should not cause significant issues with parent futures. - let registered_interest = self.signal.poll_recv(cx).is_pending(); - - if let Some(status) = self.inner_mut().try_wait()? { - return Poll::Ready(Ok(status)); - } - - // If our attempt to poll for the next signal was not ready, then - // we've arranged for our task to get notified and we can bail out. - if registered_interest { - return Poll::Pending; - } else { - // Otherwise, if the signal stream delivered a signal to us, we - // won't get notified at the next signal, so we'll loop and try - // again. - continue; - } - } - } -} - -impl Kill for Reaper -where - W: Kill + Wait, - Q: OrphanQueue, -{ - fn kill(&mut self) -> io::Result<()> { - self.inner_mut().kill() - } -} - -impl Drop for Reaper -where - W: Wait, - Q: OrphanQueue, -{ - fn drop(&mut self) { - if let Ok(Some(_)) = self.inner_mut().try_wait() { - return; - } - - let orphan = self.inner.take().unwrap(); - self.orphan_queue.push_orphan(orphan); - } -} - -#[cfg(all(test, not(loom)))] -mod test { - use super::*; - - use crate::process::unix::orphan::test::MockQueue; - use futures::future::FutureExt; - use std::os::unix::process::ExitStatusExt; - use std::process::ExitStatus; - use std::task::Context; - use std::task::Poll; - - #[derive(Debug)] - struct MockWait { - total_kills: usize, - total_waits: usize, - num_wait_until_status: usize, - status: ExitStatus, - } - - impl MockWait { - fn new(status: ExitStatus, num_wait_until_status: usize) -> Self { - Self { - total_kills: 0, - total_waits: 0, - num_wait_until_status, - status, - } - } - } - - impl Wait for MockWait { - fn id(&self) -> u32 { - 0 - } - - fn try_wait(&mut self) -> io::Result> { - let ret = if self.num_wait_until_status == self.total_waits { - Some(self.status) - } else { - None - }; - - self.total_waits += 1; - Ok(ret) - } - } - - impl Kill for MockWait { - fn kill(&mut self) -> io::Result<()> { - self.total_kills += 1; - Ok(()) - } - } - - struct MockStream { - total_polls: usize, - values: Vec>, - } - - impl MockStream { - fn new(values: Vec>) -> Self { - Self { - total_polls: 0, - values, - } - } - } - - impl InternalStream for MockStream { - fn poll_recv(&mut self, _cx: &mut Context<'_>) -> Poll> { - self.total_polls += 1; - match self.values.remove(0) { - Some(()) => Poll::Ready(Some(())), - None => Poll::Pending, - } - } - } - - #[test] - fn reaper() { - let exit = ExitStatus::from_raw(0); - let mock = MockWait::new(exit, 3); - let mut grim = Reaper::new( - mock, - MockQueue::new(), - MockStream::new(vec![None, Some(()), None, None, None]), - ); - - let waker = futures::task::noop_waker(); - let mut context = Context::from_waker(&waker); - - // Not yet exited, interest registered - assert!(grim.poll_unpin(&mut context).is_pending()); - assert_eq!(1, grim.signal.total_polls); - assert_eq!(1, grim.total_waits); - assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); - - // Not yet exited, couldn't register interest the first time - // but managed to register interest the second time around - assert!(grim.poll_unpin(&mut context).is_pending()); - assert_eq!(3, grim.signal.total_polls); - assert_eq!(3, grim.total_waits); - assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); - - // Exited - if let Poll::Ready(r) = grim.poll_unpin(&mut context) { - assert!(r.is_ok()); - let exit_code = r.unwrap(); - assert_eq!(exit_code, exit); - } else { - unreachable!(); - } - assert_eq!(4, grim.signal.total_polls); - assert_eq!(4, grim.total_waits); - assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); - } - - #[test] - fn kill() { - let exit = ExitStatus::from_raw(0); - let mut grim = Reaper::new( - MockWait::new(exit, 0), - MockQueue::new(), - MockStream::new(vec![None]), - ); - - grim.kill().unwrap(); - assert_eq!(1, grim.total_kills); - assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); - } - - #[test] - fn drop_reaps_if_possible() { - let exit = ExitStatus::from_raw(0); - let mut mock = MockWait::new(exit, 0); - - { - let queue = MockQueue::new(); - - let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![])); - - drop(grim); - - assert!(queue.all_enqueued.borrow().is_empty()); - } - - assert_eq!(1, mock.total_waits); - assert_eq!(0, mock.total_kills); - } - - #[test] - fn drop_enqueues_orphan_if_wait_fails() { - let exit = ExitStatus::from_raw(0); - let mut mock = MockWait::new(exit, 2); - - { - let queue = MockQueue::<&mut MockWait>::new(); - let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![])); - drop(grim); - - assert_eq!(1, queue.all_enqueued.borrow().len()); - } - - assert_eq!(1, mock.total_waits); - assert_eq!(0, mock.total_kills); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/process/windows.rs s390-tools-2.33.1/rust-vendor/tokio/src/process/windows.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/process/windows.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/process/windows.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,283 +0,0 @@ -//! Windows asynchronous process handling. -//! -//! Like with Unix we don't actually have a way of registering a process with an -//! IOCP object. As a result we similarly need another mechanism for getting a -//! signal when a process has exited. For now this is implemented with the -//! `RegisterWaitForSingleObject` function in the kernel32.dll. -//! -//! This strategy is the same that libuv takes and essentially just queues up a -//! wait for the process in a kernel32-specific thread pool. Once the object is -//! notified (e.g. the process exits) then we have a callback that basically -//! just completes a `Oneshot`. -//! -//! The `poll_exit` implementation will attempt to wait for the process in a -//! nonblocking fashion, but failing that it'll fire off a -//! `RegisterWaitForSingleObject` and then wait on the other end of the oneshot -//! from then on out. - -use crate::io::{blocking::Blocking, AsyncRead, AsyncWrite, ReadBuf}; -use crate::process::kill::Kill; -use crate::process::SpawnedChild; -use crate::sync::oneshot; - -use std::fmt; -use std::fs::File as StdFile; -use std::future::Future; -use std::io; -use std::os::windows::prelude::{AsRawHandle, IntoRawHandle, OwnedHandle, RawHandle}; -use std::pin::Pin; -use std::process::Stdio; -use std::process::{Child as StdChild, Command as StdCommand, ExitStatus}; -use std::sync::Arc; -use std::task::{Context, Poll}; - -use windows_sys::{ - Win32::Foundation::{ - DuplicateHandle, BOOLEAN, DUPLICATE_SAME_ACCESS, HANDLE, INVALID_HANDLE_VALUE, - }, - Win32::System::Threading::{ - GetCurrentProcess, RegisterWaitForSingleObject, UnregisterWaitEx, INFINITE, - WT_EXECUTEINWAITTHREAD, WT_EXECUTEONLYONCE, - }, -}; - -#[must_use = "futures do nothing unless polled"] -pub(crate) struct Child { - child: StdChild, - waiting: Option, -} - -impl fmt::Debug for Child { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Child") - .field("pid", &self.id()) - .field("child", &self.child) - .field("waiting", &"..") - .finish() - } -} - -struct Waiting { - rx: oneshot::Receiver<()>, - wait_object: HANDLE, - tx: *mut Option>, -} - -unsafe impl Sync for Waiting {} -unsafe impl Send for Waiting {} - -pub(crate) fn spawn_child(cmd: &mut StdCommand) -> io::Result { - let mut child = cmd.spawn()?; - let stdin = child.stdin.take().map(stdio).transpose()?; - let stdout = child.stdout.take().map(stdio).transpose()?; - let stderr = child.stderr.take().map(stdio).transpose()?; - - Ok(SpawnedChild { - child: Child { - child, - waiting: None, - }, - stdin, - stdout, - stderr, - }) -} - -impl Child { - pub(crate) fn id(&self) -> u32 { - self.child.id() - } - - pub(crate) fn try_wait(&mut self) -> io::Result> { - self.child.try_wait() - } -} - -impl Kill for Child { - fn kill(&mut self) -> io::Result<()> { - self.child.kill() - } -} - -impl Future for Child { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let inner = Pin::get_mut(self); - loop { - if let Some(ref mut w) = inner.waiting { - match Pin::new(&mut w.rx).poll(cx) { - Poll::Ready(Ok(())) => {} - Poll::Ready(Err(_)) => panic!("should not be canceled"), - Poll::Pending => return Poll::Pending, - } - let status = inner.try_wait()?.expect("not ready yet"); - return Poll::Ready(Ok(status)); - } - - if let Some(e) = inner.try_wait()? { - return Poll::Ready(Ok(e)); - } - let (tx, rx) = oneshot::channel(); - let ptr = Box::into_raw(Box::new(Some(tx))); - let mut wait_object = 0; - let rc = unsafe { - RegisterWaitForSingleObject( - &mut wait_object, - inner.child.as_raw_handle() as _, - Some(callback), - ptr as *mut _, - INFINITE, - WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE, - ) - }; - if rc == 0 { - let err = io::Error::last_os_error(); - drop(unsafe { Box::from_raw(ptr) }); - return Poll::Ready(Err(err)); - } - inner.waiting = Some(Waiting { - rx, - wait_object, - tx: ptr, - }); - } - } -} - -impl AsRawHandle for Child { - fn as_raw_handle(&self) -> RawHandle { - self.child.as_raw_handle() - } -} - -impl Drop for Waiting { - fn drop(&mut self) { - unsafe { - let rc = UnregisterWaitEx(self.wait_object, INVALID_HANDLE_VALUE); - if rc == 0 { - panic!("failed to unregister: {}", io::Error::last_os_error()); - } - drop(Box::from_raw(self.tx)); - } - } -} - -unsafe extern "system" fn callback(ptr: *mut std::ffi::c_void, _timer_fired: BOOLEAN) { - let complete = &mut *(ptr as *mut Option>); - let _ = complete.take().unwrap().send(()); -} - -#[derive(Debug)] -struct ArcFile(Arc); - -impl io::Read for ArcFile { - fn read(&mut self, bytes: &mut [u8]) -> io::Result { - (&*self.0).read(bytes) - } -} - -impl io::Write for ArcFile { - fn write(&mut self, bytes: &[u8]) -> io::Result { - (&*self.0).write(bytes) - } - - fn flush(&mut self) -> io::Result<()> { - (&*self.0).flush() - } -} - -#[derive(Debug)] -pub(crate) struct ChildStdio { - // Used for accessing the raw handle, even if the io version is busy - raw: Arc, - // For doing I/O operations asynchronously - io: Blocking, -} - -impl ChildStdio { - pub(super) fn into_owned_handle(self) -> io::Result { - convert_to_file(self).map(OwnedHandle::from) - } -} - -impl AsRawHandle for ChildStdio { - fn as_raw_handle(&self) -> RawHandle { - self.raw.as_raw_handle() - } -} - -impl AsyncRead for ChildStdio { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - Pin::new(&mut self.io).poll_read(cx, buf) - } -} - -impl AsyncWrite for ChildStdio { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.io).poll_write(cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.io).poll_flush(cx) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.io).poll_shutdown(cx) - } -} - -pub(super) fn stdio(io: T) -> io::Result -where - T: IntoRawHandle, -{ - use std::os::windows::prelude::FromRawHandle; - - let raw = Arc::new(unsafe { StdFile::from_raw_handle(io.into_raw_handle()) }); - let io = Blocking::new(ArcFile(raw.clone())); - Ok(ChildStdio { raw, io }) -} - -fn convert_to_file(child_stdio: ChildStdio) -> io::Result { - let ChildStdio { raw, io } = child_stdio; - drop(io); // Try to drop the Arc count here - - Arc::try_unwrap(raw).or_else(|raw| duplicate_handle(&*raw)) -} - -pub(crate) fn convert_to_stdio(child_stdio: ChildStdio) -> io::Result { - convert_to_file(child_stdio).map(Stdio::from) -} - -fn duplicate_handle(io: &T) -> io::Result { - use std::os::windows::prelude::FromRawHandle; - - unsafe { - let mut dup_handle = INVALID_HANDLE_VALUE; - let cur_proc = GetCurrentProcess(); - - let status = DuplicateHandle( - cur_proc, - io.as_raw_handle() as _, - cur_proc, - &mut dup_handle, - 0, - 0, - DUPLICATE_SAME_ACCESS, - ); - - if status == 0 { - return Err(io::Error::last_os_error()); - } - - Ok(StdFile::from_raw_handle(dup_handle as _)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/blocking/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/blocking/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/blocking/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/blocking/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -//! Abstracts out the APIs necessary to `Runtime` for integrating the blocking -//! pool. When the `blocking` feature flag is **not** enabled, these APIs are -//! shells. This isolates the complexity of dealing with conditional -//! compilation. - -mod pool; -pub(crate) use pool::{spawn_blocking, BlockingPool, Spawner}; - -cfg_fs! { - pub(crate) use pool::spawn_mandatory_blocking; -} - -cfg_trace! { - pub(crate) use pool::Mandatory; -} - -mod schedule; -mod shutdown; -mod task; -pub(crate) use task::BlockingTask; - -use crate::runtime::Builder; - -pub(crate) fn create_blocking_pool(builder: &Builder, thread_cap: usize) -> BlockingPool { - BlockingPool::new(builder, thread_cap) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/blocking/pool.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/blocking/pool.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/blocking/pool.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/blocking/pool.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,602 +0,0 @@ -//! Thread pool for blocking operations - -use crate::loom::sync::{Arc, Condvar, Mutex}; -use crate::loom::thread; -use crate::runtime::blocking::schedule::BlockingSchedule; -use crate::runtime::blocking::{shutdown, BlockingTask}; -use crate::runtime::builder::ThreadNameFn; -use crate::runtime::task::{self, JoinHandle}; -use crate::runtime::{Builder, Callback, Handle}; - -use std::collections::{HashMap, VecDeque}; -use std::fmt; -use std::io; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::time::Duration; - -pub(crate) struct BlockingPool { - spawner: Spawner, - shutdown_rx: shutdown::Receiver, -} - -#[derive(Clone)] -pub(crate) struct Spawner { - inner: Arc, -} - -#[derive(Default)] -pub(crate) struct SpawnerMetrics { - num_threads: AtomicUsize, - num_idle_threads: AtomicUsize, - queue_depth: AtomicUsize, -} - -impl SpawnerMetrics { - fn num_threads(&self) -> usize { - self.num_threads.load(Ordering::Relaxed) - } - - fn num_idle_threads(&self) -> usize { - self.num_idle_threads.load(Ordering::Relaxed) - } - - cfg_metrics! { - fn queue_depth(&self) -> usize { - self.queue_depth.load(Ordering::Relaxed) - } - } - - fn inc_num_threads(&self) { - self.num_threads.fetch_add(1, Ordering::Relaxed); - } - - fn dec_num_threads(&self) { - self.num_threads.fetch_sub(1, Ordering::Relaxed); - } - - fn inc_num_idle_threads(&self) { - self.num_idle_threads.fetch_add(1, Ordering::Relaxed); - } - - fn dec_num_idle_threads(&self) -> usize { - self.num_idle_threads.fetch_sub(1, Ordering::Relaxed) - } - - fn inc_queue_depth(&self) { - self.queue_depth.fetch_add(1, Ordering::Relaxed); - } - - fn dec_queue_depth(&self) { - self.queue_depth.fetch_sub(1, Ordering::Relaxed); - } -} - -struct Inner { - /// State shared between worker threads. - shared: Mutex, - - /// Pool threads wait on this. - condvar: Condvar, - - /// Spawned threads use this name. - thread_name: ThreadNameFn, - - /// Spawned thread stack size. - stack_size: Option, - - /// Call after a thread starts. - after_start: Option, - - /// Call before a thread stops. - before_stop: Option, - - // Maximum number of threads. - thread_cap: usize, - - // Customizable wait timeout. - keep_alive: Duration, - - // Metrics about the pool. - metrics: SpawnerMetrics, -} - -struct Shared { - queue: VecDeque, - num_notify: u32, - shutdown: bool, - shutdown_tx: Option, - /// Prior to shutdown, we clean up JoinHandles by having each timed-out - /// thread join on the previous timed-out thread. This is not strictly - /// necessary but helps avoid Valgrind false positives, see - /// - /// for more information. - last_exiting_thread: Option>, - /// This holds the JoinHandles for all running threads; on shutdown, the thread - /// calling shutdown handles joining on these. - worker_threads: HashMap>, - /// This is a counter used to iterate worker_threads in a consistent order (for loom's - /// benefit). - worker_thread_index: usize, -} - -pub(crate) struct Task { - task: task::UnownedTask, - mandatory: Mandatory, -} - -#[derive(PartialEq, Eq)] -pub(crate) enum Mandatory { - #[cfg_attr(not(fs), allow(dead_code))] - Mandatory, - NonMandatory, -} - -pub(crate) enum SpawnError { - /// Pool is shutting down and the task was not scheduled - ShuttingDown, - /// There are no worker threads available to take the task - /// and the OS failed to spawn a new one - NoThreads(io::Error), -} - -impl From for io::Error { - fn from(e: SpawnError) -> Self { - match e { - SpawnError::ShuttingDown => { - io::Error::new(io::ErrorKind::Other, "blocking pool shutting down") - } - SpawnError::NoThreads(e) => e, - } - } -} - -impl Task { - pub(crate) fn new(task: task::UnownedTask, mandatory: Mandatory) -> Task { - Task { task, mandatory } - } - - fn run(self) { - self.task.run(); - } - - fn shutdown_or_run_if_mandatory(self) { - match self.mandatory { - Mandatory::NonMandatory => self.task.shutdown(), - Mandatory::Mandatory => self.task.run(), - } - } -} - -const KEEP_ALIVE: Duration = Duration::from_secs(10); - -/// Runs the provided function on an executor dedicated to blocking operations. -/// Tasks will be scheduled as non-mandatory, meaning they may not get executed -/// in case of runtime shutdown. -#[track_caller] -#[cfg_attr(target_os = "wasi", allow(dead_code))] -pub(crate) fn spawn_blocking(func: F) -> JoinHandle -where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, -{ - let rt = Handle::current(); - rt.spawn_blocking(func) -} - -cfg_fs! { - #[cfg_attr(any( - all(loom, not(test)), // the function is covered by loom tests - test - ), allow(dead_code))] - /// Runs the provided function on an executor dedicated to blocking - /// operations. Tasks will be scheduled as mandatory, meaning they are - /// guaranteed to run unless a shutdown is already taking place. In case a - /// shutdown is already taking place, `None` will be returned. - pub(crate) fn spawn_mandatory_blocking(func: F) -> Option> - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - let rt = Handle::current(); - rt.inner.blocking_spawner().spawn_mandatory_blocking(&rt, func) - } -} - -// ===== impl BlockingPool ===== - -impl BlockingPool { - pub(crate) fn new(builder: &Builder, thread_cap: usize) -> BlockingPool { - let (shutdown_tx, shutdown_rx) = shutdown::channel(); - let keep_alive = builder.keep_alive.unwrap_or(KEEP_ALIVE); - - BlockingPool { - spawner: Spawner { - inner: Arc::new(Inner { - shared: Mutex::new(Shared { - queue: VecDeque::new(), - num_notify: 0, - shutdown: false, - shutdown_tx: Some(shutdown_tx), - last_exiting_thread: None, - worker_threads: HashMap::new(), - worker_thread_index: 0, - }), - condvar: Condvar::new(), - thread_name: builder.thread_name.clone(), - stack_size: builder.thread_stack_size, - after_start: builder.after_start.clone(), - before_stop: builder.before_stop.clone(), - thread_cap, - keep_alive, - metrics: Default::default(), - }), - }, - shutdown_rx, - } - } - - pub(crate) fn spawner(&self) -> &Spawner { - &self.spawner - } - - pub(crate) fn shutdown(&mut self, timeout: Option) { - let mut shared = self.spawner.inner.shared.lock(); - - // The function can be called multiple times. First, by explicitly - // calling `shutdown` then by the drop handler calling `shutdown`. This - // prevents shutting down twice. - if shared.shutdown { - return; - } - - shared.shutdown = true; - shared.shutdown_tx = None; - self.spawner.inner.condvar.notify_all(); - - let last_exited_thread = std::mem::take(&mut shared.last_exiting_thread); - let workers = std::mem::take(&mut shared.worker_threads); - - drop(shared); - - if self.shutdown_rx.wait(timeout) { - let _ = last_exited_thread.map(|th| th.join()); - - // Loom requires that execution be deterministic, so sort by thread ID before joining. - // (HashMaps use a randomly-seeded hash function, so the order is nondeterministic) - let mut workers: Vec<(usize, thread::JoinHandle<()>)> = workers.into_iter().collect(); - workers.sort_by_key(|(id, _)| *id); - - for (_id, handle) in workers.into_iter() { - let _ = handle.join(); - } - } - } -} - -impl Drop for BlockingPool { - fn drop(&mut self) { - self.shutdown(None); - } -} - -impl fmt::Debug for BlockingPool { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("BlockingPool").finish() - } -} - -// ===== impl Spawner ===== - -impl Spawner { - #[track_caller] - pub(crate) fn spawn_blocking(&self, rt: &Handle, func: F) -> JoinHandle - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - let (join_handle, spawn_result) = - if cfg!(debug_assertions) && std::mem::size_of::() > 2048 { - self.spawn_blocking_inner(Box::new(func), Mandatory::NonMandatory, None, rt) - } else { - self.spawn_blocking_inner(func, Mandatory::NonMandatory, None, rt) - }; - - match spawn_result { - Ok(()) => join_handle, - // Compat: do not panic here, return the join_handle even though it will never resolve - Err(SpawnError::ShuttingDown) => join_handle, - Err(SpawnError::NoThreads(e)) => { - panic!("OS can't spawn worker thread: {}", e) - } - } - } - - cfg_fs! { - #[track_caller] - #[cfg_attr(any( - all(loom, not(test)), // the function is covered by loom tests - test - ), allow(dead_code))] - pub(crate) fn spawn_mandatory_blocking(&self, rt: &Handle, func: F) -> Option> - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - let (join_handle, spawn_result) = if cfg!(debug_assertions) && std::mem::size_of::() > 2048 { - self.spawn_blocking_inner( - Box::new(func), - Mandatory::Mandatory, - None, - rt, - ) - } else { - self.spawn_blocking_inner( - func, - Mandatory::Mandatory, - None, - rt, - ) - }; - - if spawn_result.is_ok() { - Some(join_handle) - } else { - None - } - } - } - - #[track_caller] - pub(crate) fn spawn_blocking_inner( - &self, - func: F, - is_mandatory: Mandatory, - name: Option<&str>, - rt: &Handle, - ) -> (JoinHandle, Result<(), SpawnError>) - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - let fut = BlockingTask::new(func); - let id = task::Id::next(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let fut = { - use tracing::Instrument; - let location = std::panic::Location::caller(); - let span = tracing::trace_span!( - target: "tokio::task::blocking", - "runtime.spawn", - kind = %"blocking", - task.name = %name.unwrap_or_default(), - task.id = id.as_u64(), - "fn" = %std::any::type_name::(), - loc.file = location.file(), - loc.line = location.line(), - loc.col = location.column(), - ); - fut.instrument(span) - }; - - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let _ = name; - - let (task, handle) = task::unowned(fut, BlockingSchedule::new(rt), id); - - let spawned = self.spawn_task(Task::new(task, is_mandatory), rt); - (handle, spawned) - } - - fn spawn_task(&self, task: Task, rt: &Handle) -> Result<(), SpawnError> { - let mut shared = self.inner.shared.lock(); - - if shared.shutdown { - // Shutdown the task: it's fine to shutdown this task (even if - // mandatory) because it was scheduled after the shutdown of the - // runtime began. - task.task.shutdown(); - - // no need to even push this task; it would never get picked up - return Err(SpawnError::ShuttingDown); - } - - shared.queue.push_back(task); - self.inner.metrics.inc_queue_depth(); - - if self.inner.metrics.num_idle_threads() == 0 { - // No threads are able to process the task. - - if self.inner.metrics.num_threads() == self.inner.thread_cap { - // At max number of threads - } else { - assert!(shared.shutdown_tx.is_some()); - let shutdown_tx = shared.shutdown_tx.clone(); - - if let Some(shutdown_tx) = shutdown_tx { - let id = shared.worker_thread_index; - - match self.spawn_thread(shutdown_tx, rt, id) { - Ok(handle) => { - self.inner.metrics.inc_num_threads(); - shared.worker_thread_index += 1; - shared.worker_threads.insert(id, handle); - } - Err(ref e) - if is_temporary_os_thread_error(e) - && self.inner.metrics.num_threads() > 0 => - { - // OS temporarily failed to spawn a new thread. - // The task will be picked up eventually by a currently - // busy thread. - } - Err(e) => { - // The OS refused to spawn the thread and there is no thread - // to pick up the task that has just been pushed to the queue. - return Err(SpawnError::NoThreads(e)); - } - } - } - } - } else { - // Notify an idle worker thread. The notification counter - // is used to count the needed amount of notifications - // exactly. Thread libraries may generate spurious - // wakeups, this counter is used to keep us in a - // consistent state. - self.inner.metrics.dec_num_idle_threads(); - shared.num_notify += 1; - self.inner.condvar.notify_one(); - } - - Ok(()) - } - - fn spawn_thread( - &self, - shutdown_tx: shutdown::Sender, - rt: &Handle, - id: usize, - ) -> std::io::Result> { - let mut builder = thread::Builder::new().name((self.inner.thread_name)()); - - if let Some(stack_size) = self.inner.stack_size { - builder = builder.stack_size(stack_size); - } - - let rt = rt.clone(); - - builder.spawn(move || { - // Only the reference should be moved into the closure - let _enter = rt.enter(); - rt.inner.blocking_spawner().inner.run(id); - drop(shutdown_tx); - }) - } -} - -cfg_metrics! { - impl Spawner { - pub(crate) fn num_threads(&self) -> usize { - self.inner.metrics.num_threads() - } - - pub(crate) fn num_idle_threads(&self) -> usize { - self.inner.metrics.num_idle_threads() - } - - pub(crate) fn queue_depth(&self) -> usize { - self.inner.metrics.queue_depth() - } - } -} - -// Tells whether the error when spawning a thread is temporary. -#[inline] -fn is_temporary_os_thread_error(error: &std::io::Error) -> bool { - matches!(error.kind(), std::io::ErrorKind::WouldBlock) -} - -impl Inner { - fn run(&self, worker_thread_id: usize) { - if let Some(f) = &self.after_start { - f() - } - - let mut shared = self.shared.lock(); - let mut join_on_thread = None; - - 'main: loop { - // BUSY - while let Some(task) = shared.queue.pop_front() { - self.metrics.dec_queue_depth(); - drop(shared); - task.run(); - - shared = self.shared.lock(); - } - - // IDLE - self.metrics.inc_num_idle_threads(); - - while !shared.shutdown { - let lock_result = self.condvar.wait_timeout(shared, self.keep_alive).unwrap(); - - shared = lock_result.0; - let timeout_result = lock_result.1; - - if shared.num_notify != 0 { - // We have received a legitimate wakeup, - // acknowledge it by decrementing the counter - // and transition to the BUSY state. - shared.num_notify -= 1; - break; - } - - // Even if the condvar "timed out", if the pool is entering the - // shutdown phase, we want to perform the cleanup logic. - if !shared.shutdown && timeout_result.timed_out() { - // We'll join the prior timed-out thread's JoinHandle after dropping the lock. - // This isn't done when shutting down, because the thread calling shutdown will - // handle joining everything. - let my_handle = shared.worker_threads.remove(&worker_thread_id); - join_on_thread = std::mem::replace(&mut shared.last_exiting_thread, my_handle); - - break 'main; - } - - // Spurious wakeup detected, go back to sleep. - } - - if shared.shutdown { - // Drain the queue - while let Some(task) = shared.queue.pop_front() { - self.metrics.dec_queue_depth(); - drop(shared); - - task.shutdown_or_run_if_mandatory(); - - shared = self.shared.lock(); - } - - // Work was produced, and we "took" it (by decrementing num_notify). - // This means that num_idle was decremented once for our wakeup. - // But, since we are exiting, we need to "undo" that, as we'll stay idle. - self.metrics.inc_num_idle_threads(); - // NOTE: Technically we should also do num_notify++ and notify again, - // but since we're shutting down anyway, that won't be necessary. - break; - } - } - - // Thread exit - self.metrics.dec_num_threads(); - - // num_idle should now be tracked exactly, panic - // with a descriptive message if it is not the - // case. - let prev_idle = self.metrics.dec_num_idle_threads(); - if prev_idle < self.metrics.num_idle_threads() { - panic!("num_idle_threads underflowed on thread exit") - } - - if shared.shutdown && self.metrics.num_threads() == 0 { - self.condvar.notify_one(); - } - - drop(shared); - - if let Some(f) = &self.before_stop { - f() - } - - if let Some(handle) = join_on_thread { - let _ = handle.join(); - } - } -} - -impl fmt::Debug for Spawner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("blocking::Spawner").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/blocking/schedule.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/blocking/schedule.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/blocking/schedule.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/blocking/schedule.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -#[cfg(feature = "test-util")] -use crate::runtime::scheduler; -use crate::runtime::task::{self, Task}; -use crate::runtime::Handle; - -/// `task::Schedule` implementation that does nothing (except some bookkeeping -/// in test-util builds). This is unique to the blocking scheduler as tasks -/// scheduled are not really futures but blocking operations. -/// -/// We avoid storing the task by forgetting it in `bind` and re-materializing it -/// in `release`. -pub(crate) struct BlockingSchedule { - #[cfg(feature = "test-util")] - handle: Handle, -} - -impl BlockingSchedule { - #[cfg_attr(not(feature = "test-util"), allow(unused_variables))] - pub(crate) fn new(handle: &Handle) -> Self { - #[cfg(feature = "test-util")] - { - match &handle.inner { - scheduler::Handle::CurrentThread(handle) => { - handle.driver.clock.inhibit_auto_advance(); - } - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - scheduler::Handle::MultiThread(_) => {} - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - scheduler::Handle::MultiThreadAlt(_) => {} - } - } - BlockingSchedule { - #[cfg(feature = "test-util")] - handle: handle.clone(), - } - } -} - -impl task::Schedule for BlockingSchedule { - fn release(&self, _task: &Task) -> Option> { - #[cfg(feature = "test-util")] - { - match &self.handle.inner { - scheduler::Handle::CurrentThread(handle) => { - handle.driver.clock.allow_auto_advance(); - handle.driver.unpark(); - } - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - scheduler::Handle::MultiThread(_) => {} - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - scheduler::Handle::MultiThreadAlt(_) => {} - } - } - None - } - - fn schedule(&self, _task: task::Notified) { - unreachable!(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/blocking/shutdown.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/blocking/shutdown.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/blocking/shutdown.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/blocking/shutdown.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,71 +0,0 @@ -//! A shutdown channel. -//! -//! Each worker holds the `Sender` half. When all the `Sender` halves are -//! dropped, the `Receiver` receives a notification. - -use crate::loom::sync::Arc; -use crate::sync::oneshot; - -use std::time::Duration; - -#[derive(Debug, Clone)] -pub(super) struct Sender { - _tx: Arc>, -} - -#[derive(Debug)] -pub(super) struct Receiver { - rx: oneshot::Receiver<()>, -} - -pub(super) fn channel() -> (Sender, Receiver) { - let (tx, rx) = oneshot::channel(); - let tx = Sender { _tx: Arc::new(tx) }; - let rx = Receiver { rx }; - - (tx, rx) -} - -impl Receiver { - /// Blocks the current thread until all `Sender` handles drop. - /// - /// If `timeout` is `Some`, the thread is blocked for **at most** `timeout` - /// duration. If `timeout` is `None`, then the thread is blocked until the - /// shutdown signal is received. - /// - /// If the timeout has elapsed, it returns `false`, otherwise it returns `true`. - pub(crate) fn wait(&mut self, timeout: Option) -> bool { - use crate::runtime::context::try_enter_blocking_region; - - if timeout == Some(Duration::from_nanos(0)) { - return false; - } - - let mut e = match try_enter_blocking_region() { - Some(enter) => enter, - _ => { - if std::thread::panicking() { - // Don't panic in a panic - return false; - } else { - panic!( - "Cannot drop a runtime in a context where blocking is not allowed. \ - This happens when a runtime is dropped from within an asynchronous context." - ); - } - } - }; - - // The oneshot completes with an Err - // - // If blocking fails to wait, this indicates a problem parking the - // current thread (usually, shutting down a runtime stored in a - // thread-local). - if let Some(timeout) = timeout { - e.block_on_timeout(&mut self.rx, timeout).is_ok() - } else { - let _ = e.block_on(&mut self.rx); - true - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/blocking/task.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/blocking/task.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/blocking/task.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/blocking/task.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,44 +0,0 @@ -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Converts a function to a future that completes on poll. -pub(crate) struct BlockingTask { - func: Option, -} - -impl BlockingTask { - /// Initializes a new blocking task from the given function. - pub(crate) fn new(func: T) -> BlockingTask { - BlockingTask { func: Some(func) } - } -} - -// The closure `F` is never pinned -impl Unpin for BlockingTask {} - -impl Future for BlockingTask -where - T: FnOnce() -> R + Send + 'static, - R: Send + 'static, -{ - type Output = R; - - fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - let me = &mut *self; - let func = me - .func - .take() - .expect("[internal exception] blocking task ran twice."); - - // This is a little subtle: - // For convenience, we'd like _every_ call tokio ever makes to Task::poll() to be budgeted - // using coop. However, the way things are currently modeled, even running a blocking task - // currently goes through Task::poll(), and so is subject to budgeting. That isn't really - // what we want; a blocking task may itself want to run tasks (it might be a Worker!), so - // we want it to start without any budgeting. - crate::runtime::coop::stop(); - - Poll::Ready(func()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/builder.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/builder.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/builder.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/builder.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1336 +0,0 @@ -use crate::runtime::handle::Handle; -use crate::runtime::{blocking, driver, Callback, HistogramBuilder, Runtime}; -use crate::util::rand::{RngSeed, RngSeedGenerator}; - -use std::fmt; -use std::io; -use std::time::Duration; - -/// Builds Tokio Runtime with custom configuration values. -/// -/// Methods can be chained in order to set the configuration values. The -/// Runtime is constructed by calling [`build`]. -/// -/// New instances of `Builder` are obtained via [`Builder::new_multi_thread`] -/// or [`Builder::new_current_thread`]. -/// -/// See function level documentation for details on the various configuration -/// settings. -/// -/// [`build`]: method@Self::build -/// [`Builder::new_multi_thread`]: method@Self::new_multi_thread -/// [`Builder::new_current_thread`]: method@Self::new_current_thread -/// -/// # Examples -/// -/// ``` -/// use tokio::runtime::Builder; -/// -/// fn main() { -/// // build runtime -/// let runtime = Builder::new_multi_thread() -/// .worker_threads(4) -/// .thread_name("my-custom-name") -/// .thread_stack_size(3 * 1024 * 1024) -/// .build() -/// .unwrap(); -/// -/// // use runtime ... -/// } -/// ``` -pub struct Builder { - /// Runtime type - kind: Kind, - - /// Whether or not to enable the I/O driver - enable_io: bool, - nevents: usize, - - /// Whether or not to enable the time driver - enable_time: bool, - - /// Whether or not the clock should start paused. - start_paused: bool, - - /// The number of worker threads, used by Runtime. - /// - /// Only used when not using the current-thread executor. - worker_threads: Option, - - /// Cap on thread usage. - max_blocking_threads: usize, - - /// Name fn used for threads spawned by the runtime. - pub(super) thread_name: ThreadNameFn, - - /// Stack size used for threads spawned by the runtime. - pub(super) thread_stack_size: Option, - - /// Callback to run after each thread starts. - pub(super) after_start: Option, - - /// To run before each worker thread stops - pub(super) before_stop: Option, - - /// To run before each worker thread is parked. - pub(super) before_park: Option, - - /// To run after each thread is unparked. - pub(super) after_unpark: Option, - - /// Customizable keep alive timeout for BlockingPool - pub(super) keep_alive: Option, - - /// How many ticks before pulling a task from the global/remote queue? - /// - /// When `None`, the value is unspecified and behavior details are left to - /// the scheduler. Each scheduler flavor could choose to either pick its own - /// default value or use some other strategy to decide when to poll from the - /// global queue. For example, the multi-threaded scheduler uses a - /// self-tuning strategy based on mean task poll times. - pub(super) global_queue_interval: Option, - - /// How many ticks before yielding to the driver for timer and I/O events? - pub(super) event_interval: u32, - - pub(super) local_queue_capacity: usize, - - /// When true, the multi-threade scheduler LIFO slot should not be used. - /// - /// This option should only be exposed as unstable. - pub(super) disable_lifo_slot: bool, - - /// Specify a random number generator seed to provide deterministic results - pub(super) seed_generator: RngSeedGenerator, - - /// When true, enables task poll count histogram instrumentation. - pub(super) metrics_poll_count_histogram_enable: bool, - - /// Configures the task poll count histogram - pub(super) metrics_poll_count_histogram: HistogramBuilder, - - #[cfg(tokio_unstable)] - pub(super) unhandled_panic: UnhandledPanic, -} - -cfg_unstable! { - /// How the runtime should respond to unhandled panics. - /// - /// Instances of `UnhandledPanic` are passed to `Builder::unhandled_panic` - /// to configure the runtime behavior when a spawned task panics. - /// - /// See [`Builder::unhandled_panic`] for more details. - #[derive(Debug, Clone)] - #[non_exhaustive] - pub enum UnhandledPanic { - /// The runtime should ignore panics on spawned tasks. - /// - /// The panic is forwarded to the task's [`JoinHandle`] and all spawned - /// tasks continue running normally. - /// - /// This is the default behavior. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::{self, UnhandledPanic}; - /// - /// # pub fn main() { - /// let rt = runtime::Builder::new_current_thread() - /// .unhandled_panic(UnhandledPanic::Ignore) - /// .build() - /// .unwrap(); - /// - /// let task1 = rt.spawn(async { panic!("boom"); }); - /// let task2 = rt.spawn(async { - /// // This task completes normally - /// "done" - /// }); - /// - /// rt.block_on(async { - /// // The panic on the first task is forwarded to the `JoinHandle` - /// assert!(task1.await.is_err()); - /// - /// // The second task completes normally - /// assert!(task2.await.is_ok()); - /// }) - /// # } - /// ``` - /// - /// [`JoinHandle`]: struct@crate::task::JoinHandle - Ignore, - - /// The runtime should immediately shutdown if a spawned task panics. - /// - /// The runtime will immediately shutdown even if the panicked task's - /// [`JoinHandle`] is still available. All further spawned tasks will be - /// immediately dropped and call to [`Runtime::block_on`] will panic. - /// - /// # Examples - /// - /// ```should_panic - /// use tokio::runtime::{self, UnhandledPanic}; - /// - /// # pub fn main() { - /// let rt = runtime::Builder::new_current_thread() - /// .unhandled_panic(UnhandledPanic::ShutdownRuntime) - /// .build() - /// .unwrap(); - /// - /// rt.spawn(async { panic!("boom"); }); - /// rt.spawn(async { - /// // This task never completes. - /// }); - /// - /// rt.block_on(async { - /// // Do some work - /// # loop { tokio::task::yield_now().await; } - /// }) - /// # } - /// ``` - /// - /// [`JoinHandle`]: struct@crate::task::JoinHandle - ShutdownRuntime, - } -} - -pub(crate) type ThreadNameFn = std::sync::Arc String + Send + Sync + 'static>; - -#[derive(Clone, Copy)] -pub(crate) enum Kind { - CurrentThread, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - MultiThread, - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - MultiThreadAlt, -} - -impl Builder { - /// Returns a new builder with the current thread scheduler selected. - /// - /// Configuration methods can be chained on the return value. - /// - /// To spawn non-`Send` tasks on the resulting runtime, combine it with a - /// [`LocalSet`]. - /// - /// [`LocalSet`]: crate::task::LocalSet - pub fn new_current_thread() -> Builder { - #[cfg(loom)] - const EVENT_INTERVAL: u32 = 4; - // The number `61` is fairly arbitrary. I believe this value was copied from golang. - #[cfg(not(loom))] - const EVENT_INTERVAL: u32 = 61; - - Builder::new(Kind::CurrentThread, EVENT_INTERVAL) - } - - cfg_not_wasi! { - /// Returns a new builder with the multi thread scheduler selected. - /// - /// Configuration methods can be chained on the return value. - #[cfg(feature = "rt-multi-thread")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))] - pub fn new_multi_thread() -> Builder { - // The number `61` is fairly arbitrary. I believe this value was copied from golang. - Builder::new(Kind::MultiThread, 61) - } - - cfg_unstable! { - /// Returns a new builder with the alternate multi thread scheduler - /// selected. - /// - /// The alternate multi threaded scheduler is an in-progress - /// candidate to replace the existing multi threaded scheduler. It - /// currently does not scale as well to 16+ processors. - /// - /// This runtime flavor is currently **not considered production - /// ready**. - /// - /// Configuration methods can be chained on the return value. - #[cfg(feature = "rt-multi-thread")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))] - pub fn new_multi_thread_alt() -> Builder { - // The number `61` is fairly arbitrary. I believe this value was copied from golang. - Builder::new(Kind::MultiThreadAlt, 61) - } - } - } - - /// Returns a new runtime builder initialized with default configuration - /// values. - /// - /// Configuration methods can be chained on the return value. - pub(crate) fn new(kind: Kind, event_interval: u32) -> Builder { - Builder { - kind, - - // I/O defaults to "off" - enable_io: false, - nevents: 1024, - - // Time defaults to "off" - enable_time: false, - - // The clock starts not-paused - start_paused: false, - - // Read from environment variable first in multi-threaded mode. - // Default to lazy auto-detection (one thread per CPU core) - worker_threads: None, - - max_blocking_threads: 512, - - // Default thread name - thread_name: std::sync::Arc::new(|| "tokio-runtime-worker".into()), - - // Do not set a stack size by default - thread_stack_size: None, - - // No worker thread callbacks - after_start: None, - before_stop: None, - before_park: None, - after_unpark: None, - - keep_alive: None, - - // Defaults for these values depend on the scheduler kind, so we get them - // as parameters. - global_queue_interval: None, - event_interval, - - #[cfg(not(loom))] - local_queue_capacity: 256, - - #[cfg(loom)] - local_queue_capacity: 4, - - seed_generator: RngSeedGenerator::new(RngSeed::new()), - - #[cfg(tokio_unstable)] - unhandled_panic: UnhandledPanic::Ignore, - - metrics_poll_count_histogram_enable: false, - - metrics_poll_count_histogram: Default::default(), - - disable_lifo_slot: false, - } - } - - /// Enables both I/O and time drivers. - /// - /// Doing this is a shorthand for calling `enable_io` and `enable_time` - /// individually. If additional components are added to Tokio in the future, - /// `enable_all` will include these future components. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new_multi_thread() - /// .enable_all() - /// .build() - /// .unwrap(); - /// ``` - pub fn enable_all(&mut self) -> &mut Self { - #[cfg(any( - feature = "net", - all(unix, feature = "process"), - all(unix, feature = "signal") - ))] - self.enable_io(); - #[cfg(feature = "time")] - self.enable_time(); - - self - } - - /// Sets the number of worker threads the `Runtime` will use. - /// - /// This can be any number above 0 though it is advised to keep this value - /// on the smaller side. - /// - /// This will override the value read from environment variable `TOKIO_WORKER_THREADS`. - /// - /// # Default - /// - /// The default value is the number of cores available to the system. - /// - /// When using the `current_thread` runtime this method has no effect. - /// - /// # Examples - /// - /// ## Multi threaded runtime with 4 threads - /// - /// ``` - /// use tokio::runtime; - /// - /// // This will spawn a work-stealing runtime with 4 worker threads. - /// let rt = runtime::Builder::new_multi_thread() - /// .worker_threads(4) - /// .build() - /// .unwrap(); - /// - /// rt.spawn(async move {}); - /// ``` - /// - /// ## Current thread runtime (will only run on the current thread via `Runtime::block_on`) - /// - /// ``` - /// use tokio::runtime; - /// - /// // Create a runtime that _must_ be driven from a call - /// // to `Runtime::block_on`. - /// let rt = runtime::Builder::new_current_thread() - /// .build() - /// .unwrap(); - /// - /// // This will run the runtime and future on the current thread - /// rt.block_on(async move {}); - /// ``` - /// - /// # Panics - /// - /// This will panic if `val` is not larger than `0`. - #[track_caller] - pub fn worker_threads(&mut self, val: usize) -> &mut Self { - assert!(val > 0, "Worker threads cannot be set to 0"); - self.worker_threads = Some(val); - self - } - - /// Specifies the limit for additional threads spawned by the Runtime. - /// - /// These threads are used for blocking operations like tasks spawned - /// through [`spawn_blocking`], this includes but is not limited to: - /// - [`fs`] operations - /// - dns resolution through [`ToSocketAddrs`] - /// - writing to [`Stdout`] or [`Stderr`] - /// - reading from [`Stdin`] - /// - /// Unlike the [`worker_threads`], they are not always active and will exit - /// if left idle for too long. You can change this timeout duration with [`thread_keep_alive`]. - /// - /// It's recommended to not set this limit too low in order to avoid hanging on operations - /// requiring [`spawn_blocking`]. - /// - /// The default value is 512. - /// - /// # Panics - /// - /// This will panic if `val` is not larger than `0`. - /// - /// # Upgrading from 0.x - /// - /// In old versions `max_threads` limited both blocking and worker threads, but the - /// current `max_blocking_threads` does not include async worker threads in the count. - /// - /// [`spawn_blocking`]: fn@crate::task::spawn_blocking - /// [`fs`]: mod@crate::fs - /// [`ToSocketAddrs`]: trait@crate::net::ToSocketAddrs - /// [`Stdout`]: struct@crate::io::Stdout - /// [`Stdin`]: struct@crate::io::Stdin - /// [`Stderr`]: struct@crate::io::Stderr - /// [`worker_threads`]: Self::worker_threads - /// [`thread_keep_alive`]: Self::thread_keep_alive - #[track_caller] - #[cfg_attr(docsrs, doc(alias = "max_threads"))] - pub fn max_blocking_threads(&mut self, val: usize) -> &mut Self { - assert!(val > 0, "Max blocking threads cannot be set to 0"); - self.max_blocking_threads = val; - self - } - - /// Sets name of threads spawned by the `Runtime`'s thread pool. - /// - /// The default name is "tokio-runtime-worker". - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// - /// # pub fn main() { - /// let rt = runtime::Builder::new_multi_thread() - /// .thread_name("my-pool") - /// .build(); - /// # } - /// ``` - pub fn thread_name(&mut self, val: impl Into) -> &mut Self { - let val = val.into(); - self.thread_name = std::sync::Arc::new(move || val.clone()); - self - } - - /// Sets a function used to generate the name of threads spawned by the `Runtime`'s thread pool. - /// - /// The default name fn is `|| "tokio-runtime-worker".into()`. - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// # use std::sync::atomic::{AtomicUsize, Ordering}; - /// # pub fn main() { - /// let rt = runtime::Builder::new_multi_thread() - /// .thread_name_fn(|| { - /// static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); - /// let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst); - /// format!("my-pool-{}", id) - /// }) - /// .build(); - /// # } - /// ``` - pub fn thread_name_fn(&mut self, f: F) -> &mut Self - where - F: Fn() -> String + Send + Sync + 'static, - { - self.thread_name = std::sync::Arc::new(f); - self - } - - /// Sets the stack size (in bytes) for worker threads. - /// - /// The actual stack size may be greater than this value if the platform - /// specifies minimal stack size. - /// - /// The default stack size for spawned threads is 2 MiB, though this - /// particular stack size is subject to change in the future. - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// - /// # pub fn main() { - /// let rt = runtime::Builder::new_multi_thread() - /// .thread_stack_size(32 * 1024) - /// .build(); - /// # } - /// ``` - pub fn thread_stack_size(&mut self, val: usize) -> &mut Self { - self.thread_stack_size = Some(val); - self - } - - /// Executes function `f` after each thread is started but before it starts - /// doing work. - /// - /// This is intended for bookkeeping and monitoring use cases. - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// # pub fn main() { - /// let runtime = runtime::Builder::new_multi_thread() - /// .on_thread_start(|| { - /// println!("thread started"); - /// }) - /// .build(); - /// # } - /// ``` - #[cfg(not(loom))] - pub fn on_thread_start(&mut self, f: F) -> &mut Self - where - F: Fn() + Send + Sync + 'static, - { - self.after_start = Some(std::sync::Arc::new(f)); - self - } - - /// Executes function `f` before each thread stops. - /// - /// This is intended for bookkeeping and monitoring use cases. - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// # pub fn main() { - /// let runtime = runtime::Builder::new_multi_thread() - /// .on_thread_stop(|| { - /// println!("thread stopping"); - /// }) - /// .build(); - /// # } - /// ``` - #[cfg(not(loom))] - pub fn on_thread_stop(&mut self, f: F) -> &mut Self - where - F: Fn() + Send + Sync + 'static, - { - self.before_stop = Some(std::sync::Arc::new(f)); - self - } - - /// Executes function `f` just before a thread is parked (goes idle). - /// `f` is called within the Tokio context, so functions like [`tokio::spawn`](crate::spawn) - /// can be called, and may result in this thread being unparked immediately. - /// - /// This can be used to start work only when the executor is idle, or for bookkeeping - /// and monitoring purposes. - /// - /// Note: There can only be one park callback for a runtime; calling this function - /// more than once replaces the last callback defined, rather than adding to it. - /// - /// # Examples - /// - /// ## Multithreaded executor - /// ``` - /// # use std::sync::Arc; - /// # use std::sync::atomic::{AtomicBool, Ordering}; - /// # use tokio::runtime; - /// # use tokio::sync::Barrier; - /// # pub fn main() { - /// let once = AtomicBool::new(true); - /// let barrier = Arc::new(Barrier::new(2)); - /// - /// let runtime = runtime::Builder::new_multi_thread() - /// .worker_threads(1) - /// .on_thread_park({ - /// let barrier = barrier.clone(); - /// move || { - /// let barrier = barrier.clone(); - /// if once.swap(false, Ordering::Relaxed) { - /// tokio::spawn(async move { barrier.wait().await; }); - /// } - /// } - /// }) - /// .build() - /// .unwrap(); - /// - /// runtime.block_on(async { - /// barrier.wait().await; - /// }) - /// # } - /// ``` - /// ## Current thread executor - /// ``` - /// # use std::sync::Arc; - /// # use std::sync::atomic::{AtomicBool, Ordering}; - /// # use tokio::runtime; - /// # use tokio::sync::Barrier; - /// # pub fn main() { - /// let once = AtomicBool::new(true); - /// let barrier = Arc::new(Barrier::new(2)); - /// - /// let runtime = runtime::Builder::new_current_thread() - /// .on_thread_park({ - /// let barrier = barrier.clone(); - /// move || { - /// let barrier = barrier.clone(); - /// if once.swap(false, Ordering::Relaxed) { - /// tokio::spawn(async move { barrier.wait().await; }); - /// } - /// } - /// }) - /// .build() - /// .unwrap(); - /// - /// runtime.block_on(async { - /// barrier.wait().await; - /// }) - /// # } - /// ``` - #[cfg(not(loom))] - pub fn on_thread_park(&mut self, f: F) -> &mut Self - where - F: Fn() + Send + Sync + 'static, - { - self.before_park = Some(std::sync::Arc::new(f)); - self - } - - /// Executes function `f` just after a thread unparks (starts executing tasks). - /// - /// This is intended for bookkeeping and monitoring use cases; note that work - /// in this callback will increase latencies when the application has allowed one or - /// more runtime threads to go idle. - /// - /// Note: There can only be one unpark callback for a runtime; calling this function - /// more than once replaces the last callback defined, rather than adding to it. - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// # pub fn main() { - /// let runtime = runtime::Builder::new_multi_thread() - /// .on_thread_unpark(|| { - /// println!("thread unparking"); - /// }) - /// .build(); - /// - /// runtime.unwrap().block_on(async { - /// tokio::task::yield_now().await; - /// println!("Hello from Tokio!"); - /// }) - /// # } - /// ``` - #[cfg(not(loom))] - pub fn on_thread_unpark(&mut self, f: F) -> &mut Self - where - F: Fn() + Send + Sync + 'static, - { - self.after_unpark = Some(std::sync::Arc::new(f)); - self - } - - /// Creates the configured `Runtime`. - /// - /// The returned `Runtime` instance is ready to spawn tasks. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Builder; - /// - /// let rt = Builder::new_multi_thread().build().unwrap(); - /// - /// rt.block_on(async { - /// println!("Hello from the Tokio runtime"); - /// }); - /// ``` - pub fn build(&mut self) -> io::Result { - match &self.kind { - Kind::CurrentThread => self.build_current_thread_runtime(), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - Kind::MultiThread => self.build_threaded_runtime(), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - Kind::MultiThreadAlt => self.build_alt_threaded_runtime(), - } - } - - fn get_cfg(&self) -> driver::Cfg { - driver::Cfg { - enable_pause_time: match self.kind { - Kind::CurrentThread => true, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - Kind::MultiThread => false, - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - Kind::MultiThreadAlt => false, - }, - enable_io: self.enable_io, - enable_time: self.enable_time, - start_paused: self.start_paused, - nevents: self.nevents, - } - } - - /// Sets a custom timeout for a thread in the blocking pool. - /// - /// By default, the timeout for a thread is set to 10 seconds. This can - /// be overridden using .thread_keep_alive(). - /// - /// # Example - /// - /// ``` - /// # use tokio::runtime; - /// # use std::time::Duration; - /// # pub fn main() { - /// let rt = runtime::Builder::new_multi_thread() - /// .thread_keep_alive(Duration::from_millis(100)) - /// .build(); - /// # } - /// ``` - pub fn thread_keep_alive(&mut self, duration: Duration) -> &mut Self { - self.keep_alive = Some(duration); - self - } - - /// Sets the number of scheduler ticks after which the scheduler will poll the global - /// task queue. - /// - /// A scheduler "tick" roughly corresponds to one `poll` invocation on a task. - /// - /// By default the global queue interval is: - /// - /// * `31` for the current-thread scheduler. - /// * `61` for the multithreaded scheduler. - /// - /// Schedulers have a local queue of already-claimed tasks, and a global queue of incoming - /// tasks. Setting the interval to a smaller value increases the fairness of the scheduler, - /// at the cost of more synchronization overhead. That can be beneficial for prioritizing - /// getting started on new work, especially if tasks frequently yield rather than complete - /// or await on further I/O. Conversely, a higher value prioritizes existing work, and - /// is a good choice when most tasks quickly complete polling. - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// # pub fn main() { - /// let rt = runtime::Builder::new_multi_thread() - /// .global_queue_interval(31) - /// .build(); - /// # } - /// ``` - pub fn global_queue_interval(&mut self, val: u32) -> &mut Self { - self.global_queue_interval = Some(val); - self - } - - /// Sets the number of scheduler ticks after which the scheduler will poll for - /// external events (timers, I/O, and so on). - /// - /// A scheduler "tick" roughly corresponds to one `poll` invocation on a task. - /// - /// By default, the event interval is `61` for all scheduler types. - /// - /// Setting the event interval determines the effective "priority" of delivering - /// these external events (which may wake up additional tasks), compared to - /// executing tasks that are currently ready to run. A smaller value is useful - /// when tasks frequently spend a long time in polling, or frequently yield, - /// which can result in overly long delays picking up I/O events. Conversely, - /// picking up new events requires extra synchronization and syscall overhead, - /// so if tasks generally complete their polling quickly, a higher event interval - /// will minimize that overhead while still keeping the scheduler responsive to - /// events. - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime; - /// # pub fn main() { - /// let rt = runtime::Builder::new_multi_thread() - /// .event_interval(31) - /// .build(); - /// # } - /// ``` - pub fn event_interval(&mut self, val: u32) -> &mut Self { - self.event_interval = val; - self - } - - cfg_unstable! { - /// Configure how the runtime responds to an unhandled panic on a - /// spawned task. - /// - /// By default, an unhandled panic (i.e. a panic not caught by - /// [`std::panic::catch_unwind`]) has no impact on the runtime's - /// execution. The panic is error value is forwarded to the task's - /// [`JoinHandle`] and all other spawned tasks continue running. - /// - /// The `unhandled_panic` option enables configuring this behavior. - /// - /// * `UnhandledPanic::Ignore` is the default behavior. Panics on - /// spawned tasks have no impact on the runtime's execution. - /// * `UnhandledPanic::ShutdownRuntime` will force the runtime to - /// shutdown immediately when a spawned task panics even if that - /// task's `JoinHandle` has not been dropped. All other spawned tasks - /// will immediately terminate and further calls to - /// [`Runtime::block_on`] will panic. - /// - /// # Unstable - /// - /// This option is currently unstable and its implementation is - /// incomplete. The API may change or be removed in the future. See - /// tokio-rs/tokio#4516 for more details. - /// - /// # Examples - /// - /// The following demonstrates a runtime configured to shutdown on - /// panic. The first spawned task panics and results in the runtime - /// shutting down. The second spawned task never has a chance to - /// execute. The call to `block_on` will panic due to the runtime being - /// forcibly shutdown. - /// - /// ```should_panic - /// use tokio::runtime::{self, UnhandledPanic}; - /// - /// # pub fn main() { - /// let rt = runtime::Builder::new_current_thread() - /// .unhandled_panic(UnhandledPanic::ShutdownRuntime) - /// .build() - /// .unwrap(); - /// - /// rt.spawn(async { panic!("boom"); }); - /// rt.spawn(async { - /// // This task never completes. - /// }); - /// - /// rt.block_on(async { - /// // Do some work - /// # loop { tokio::task::yield_now().await; } - /// }) - /// # } - /// ``` - /// - /// [`JoinHandle`]: struct@crate::task::JoinHandle - pub fn unhandled_panic(&mut self, behavior: UnhandledPanic) -> &mut Self { - self.unhandled_panic = behavior; - self - } - - /// Disables the LIFO task scheduler heuristic. - /// - /// The multi-threaded scheduler includes a heuristic for optimizing - /// message-passing patterns. This heuristic results in the **last** - /// scheduled task being polled first. - /// - /// To implement this heuristic, each worker thread has a slot which - /// holds the task that should be polled next. However, this slot cannot - /// be stolen by other worker threads, which can result in lower total - /// throughput when tasks tend to have longer poll times. - /// - /// This configuration option will disable this heuristic resulting in - /// all scheduled tasks being pushed into the worker-local queue, which - /// is stealable. - /// - /// Consider trying this option when the task "scheduled" time is high - /// but the runtime is underutilized. Use tokio-rs/tokio-metrics to - /// collect this data. - /// - /// # Unstable - /// - /// This configuration option is considered a workaround for the LIFO - /// slot not being stealable. When the slot becomes stealable, we will - /// revisit whether or not this option is necessary. See - /// tokio-rs/tokio#4941. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new_multi_thread() - /// .disable_lifo_slot() - /// .build() - /// .unwrap(); - /// ``` - pub fn disable_lifo_slot(&mut self) -> &mut Self { - self.disable_lifo_slot = true; - self - } - - /// Specifies the random number generation seed to use within all - /// threads associated with the runtime being built. - /// - /// This option is intended to make certain parts of the runtime - /// deterministic (e.g. the [`tokio::select!`] macro). In the case of - /// [`tokio::select!`] it will ensure that the order that branches are - /// polled is deterministic. - /// - /// In addition to the code specifying `rng_seed` and interacting with - /// the runtime, the internals of Tokio and the Rust compiler may affect - /// the sequences of random numbers. In order to ensure repeatable - /// results, the version of Tokio, the versions of all other - /// dependencies that interact with Tokio, and the Rust compiler version - /// should also all remain constant. - /// - /// # Examples - /// - /// ``` - /// # use tokio::runtime::{self, RngSeed}; - /// # pub fn main() { - /// let seed = RngSeed::from_bytes(b"place your seed here"); - /// let rt = runtime::Builder::new_current_thread() - /// .rng_seed(seed) - /// .build(); - /// # } - /// ``` - /// - /// [`tokio::select!`]: crate::select - pub fn rng_seed(&mut self, seed: RngSeed) -> &mut Self { - self.seed_generator = RngSeedGenerator::new(seed); - self - } - } - - cfg_metrics! { - /// Enables tracking the distribution of task poll times. - /// - /// Task poll times are not instrumented by default as doing so requires - /// calling [`Instant::now()`] twice per task poll, which could add - /// measurable overhead. Use the [`Handle::metrics()`] to access the - /// metrics data. - /// - /// The histogram uses fixed bucket sizes. In other words, the histogram - /// buckets are not dynamic based on input values. Use the - /// `metrics_poll_count_histogram_` builder methods to configure the - /// histogram details. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new_multi_thread() - /// .enable_metrics_poll_count_histogram() - /// .build() - /// .unwrap(); - /// # // Test default values here - /// # fn us(n: u64) -> std::time::Duration { std::time::Duration::from_micros(n) } - /// # let m = rt.handle().metrics(); - /// # assert_eq!(m.poll_count_histogram_num_buckets(), 10); - /// # assert_eq!(m.poll_count_histogram_bucket_range(0), us(0)..us(100)); - /// # assert_eq!(m.poll_count_histogram_bucket_range(1), us(100)..us(200)); - /// ``` - /// - /// [`Handle::metrics()`]: crate::runtime::Handle::metrics - /// [`Instant::now()`]: std::time::Instant::now - pub fn enable_metrics_poll_count_histogram(&mut self) -> &mut Self { - self.metrics_poll_count_histogram_enable = true; - self - } - - /// Sets the histogram scale for tracking the distribution of task poll - /// times. - /// - /// Tracking the distribution of task poll times can be done using a - /// linear or log scale. When using linear scale, each histogram bucket - /// will represent the same range of poll times. When using log scale, - /// each histogram bucket will cover a range twice as big as the - /// previous bucket. - /// - /// **Default:** linear scale. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::{self, HistogramScale}; - /// - /// let rt = runtime::Builder::new_multi_thread() - /// .enable_metrics_poll_count_histogram() - /// .metrics_poll_count_histogram_scale(HistogramScale::Log) - /// .build() - /// .unwrap(); - /// ``` - pub fn metrics_poll_count_histogram_scale(&mut self, histogram_scale: crate::runtime::HistogramScale) -> &mut Self { - self.metrics_poll_count_histogram.scale = histogram_scale; - self - } - - /// Sets the histogram resolution for tracking the distribution of task - /// poll times. - /// - /// The resolution is the histogram's first bucket's range. When using a - /// linear histogram scale, each bucket will cover the same range. When - /// using a log scale, each bucket will cover a range twice as big as - /// the previous bucket. In the log case, the resolution represents the - /// smallest bucket range. - /// - /// Note that, when using log scale, the resolution is rounded up to the - /// nearest power of 2 in nanoseconds. - /// - /// **Default:** 100 microseconds. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// use std::time::Duration; - /// - /// let rt = runtime::Builder::new_multi_thread() - /// .enable_metrics_poll_count_histogram() - /// .metrics_poll_count_histogram_resolution(Duration::from_micros(100)) - /// .build() - /// .unwrap(); - /// ``` - pub fn metrics_poll_count_histogram_resolution(&mut self, resolution: Duration) -> &mut Self { - assert!(resolution > Duration::from_secs(0)); - // Sanity check the argument and also make the cast below safe. - assert!(resolution <= Duration::from_secs(1)); - - let resolution = resolution.as_nanos() as u64; - self.metrics_poll_count_histogram.resolution = resolution; - self - } - - /// Sets the number of buckets for the histogram tracking the - /// distribution of task poll times. - /// - /// The last bucket tracks all greater values that fall out of other - /// ranges. So, configuring the histogram using a linear scale, - /// resolution of 50ms, and 10 buckets, the 10th bucket will track task - /// polls that take more than 450ms to complete. - /// - /// **Default:** 10 - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new_multi_thread() - /// .enable_metrics_poll_count_histogram() - /// .metrics_poll_count_histogram_buckets(15) - /// .build() - /// .unwrap(); - /// ``` - pub fn metrics_poll_count_histogram_buckets(&mut self, buckets: usize) -> &mut Self { - self.metrics_poll_count_histogram.num_buckets = buckets; - self - } - } - - cfg_loom! { - pub(crate) fn local_queue_capacity(&mut self, value: usize) -> &mut Self { - assert!(value.is_power_of_two()); - self.local_queue_capacity = value; - self - } - } - - fn build_current_thread_runtime(&mut self) -> io::Result { - use crate::runtime::scheduler::{self, CurrentThread}; - use crate::runtime::{runtime::Scheduler, Config}; - - let (driver, driver_handle) = driver::Driver::new(self.get_cfg())?; - - // Blocking pool - let blocking_pool = blocking::create_blocking_pool(self, self.max_blocking_threads); - let blocking_spawner = blocking_pool.spawner().clone(); - - // Generate a rng seed for this runtime. - let seed_generator_1 = self.seed_generator.next_generator(); - let seed_generator_2 = self.seed_generator.next_generator(); - - // And now put a single-threaded scheduler on top of the timer. When - // there are no futures ready to do something, it'll let the timer or - // the reactor to generate some new stimuli for the futures to continue - // in their life. - let (scheduler, handle) = CurrentThread::new( - driver, - driver_handle, - blocking_spawner, - seed_generator_2, - Config { - before_park: self.before_park.clone(), - after_unpark: self.after_unpark.clone(), - global_queue_interval: self.global_queue_interval, - event_interval: self.event_interval, - local_queue_capacity: self.local_queue_capacity, - #[cfg(tokio_unstable)] - unhandled_panic: self.unhandled_panic.clone(), - disable_lifo_slot: self.disable_lifo_slot, - seed_generator: seed_generator_1, - metrics_poll_count_histogram: self.metrics_poll_count_histogram_builder(), - }, - ); - - let handle = Handle { - inner: scheduler::Handle::CurrentThread(handle), - }; - - Ok(Runtime::from_parts( - Scheduler::CurrentThread(scheduler), - handle, - blocking_pool, - )) - } - - fn metrics_poll_count_histogram_builder(&self) -> Option { - if self.metrics_poll_count_histogram_enable { - Some(self.metrics_poll_count_histogram.clone()) - } else { - None - } - } -} - -cfg_io_driver! { - impl Builder { - /// Enables the I/O driver. - /// - /// Doing this enables using net, process, signal, and some I/O types on - /// the runtime. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new_multi_thread() - /// .enable_io() - /// .build() - /// .unwrap(); - /// ``` - pub fn enable_io(&mut self) -> &mut Self { - self.enable_io = true; - self - } - - /// Enables the I/O driver and configures the max number of events to be - /// processed per tick. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new_current_thread() - /// .enable_io() - /// .max_io_events_per_tick(1024) - /// .build() - /// .unwrap(); - /// ``` - pub fn max_io_events_per_tick(&mut self, capacity: usize) -> &mut Self { - self.nevents = capacity; - self - } - } -} - -cfg_time! { - impl Builder { - /// Enables the time driver. - /// - /// Doing this enables using `tokio::time` on the runtime. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new_multi_thread() - /// .enable_time() - /// .build() - /// .unwrap(); - /// ``` - pub fn enable_time(&mut self) -> &mut Self { - self.enable_time = true; - self - } - } -} - -cfg_test_util! { - impl Builder { - /// Controls if the runtime's clock starts paused or advancing. - /// - /// Pausing time requires the current-thread runtime; construction of - /// the runtime will panic otherwise. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime; - /// - /// let rt = runtime::Builder::new_current_thread() - /// .enable_time() - /// .start_paused(true) - /// .build() - /// .unwrap(); - /// ``` - pub fn start_paused(&mut self, start_paused: bool) -> &mut Self { - self.start_paused = start_paused; - self - } - } -} - -cfg_rt_multi_thread! { - impl Builder { - fn build_threaded_runtime(&mut self) -> io::Result { - use crate::loom::sys::num_cpus; - use crate::runtime::{Config, runtime::Scheduler}; - use crate::runtime::scheduler::{self, MultiThread}; - - let core_threads = self.worker_threads.unwrap_or_else(num_cpus); - - let (driver, driver_handle) = driver::Driver::new(self.get_cfg())?; - - // Create the blocking pool - let blocking_pool = - blocking::create_blocking_pool(self, self.max_blocking_threads + core_threads); - let blocking_spawner = blocking_pool.spawner().clone(); - - // Generate a rng seed for this runtime. - let seed_generator_1 = self.seed_generator.next_generator(); - let seed_generator_2 = self.seed_generator.next_generator(); - - let (scheduler, handle, launch) = MultiThread::new( - core_threads, - driver, - driver_handle, - blocking_spawner, - seed_generator_2, - Config { - before_park: self.before_park.clone(), - after_unpark: self.after_unpark.clone(), - global_queue_interval: self.global_queue_interval, - event_interval: self.event_interval, - local_queue_capacity: self.local_queue_capacity, - #[cfg(tokio_unstable)] - unhandled_panic: self.unhandled_panic.clone(), - disable_lifo_slot: self.disable_lifo_slot, - seed_generator: seed_generator_1, - metrics_poll_count_histogram: self.metrics_poll_count_histogram_builder(), - }, - ); - - let handle = Handle { inner: scheduler::Handle::MultiThread(handle) }; - - // Spawn the thread pool workers - let _enter = handle.enter(); - launch.launch(); - - Ok(Runtime::from_parts(Scheduler::MultiThread(scheduler), handle, blocking_pool)) - } - - cfg_unstable! { - fn build_alt_threaded_runtime(&mut self) -> io::Result { - use crate::loom::sys::num_cpus; - use crate::runtime::{Config, runtime::Scheduler}; - use crate::runtime::scheduler::MultiThreadAlt; - - let core_threads = self.worker_threads.unwrap_or_else(num_cpus); - - let (driver, driver_handle) = driver::Driver::new(self.get_cfg())?; - - // Create the blocking pool - let blocking_pool = - blocking::create_blocking_pool(self, self.max_blocking_threads + core_threads); - let blocking_spawner = blocking_pool.spawner().clone(); - - // Generate a rng seed for this runtime. - let seed_generator_1 = self.seed_generator.next_generator(); - let seed_generator_2 = self.seed_generator.next_generator(); - - let (scheduler, handle) = MultiThreadAlt::new( - core_threads, - driver, - driver_handle, - blocking_spawner, - seed_generator_2, - Config { - before_park: self.before_park.clone(), - after_unpark: self.after_unpark.clone(), - global_queue_interval: self.global_queue_interval, - event_interval: self.event_interval, - local_queue_capacity: self.local_queue_capacity, - #[cfg(tokio_unstable)] - unhandled_panic: self.unhandled_panic.clone(), - disable_lifo_slot: self.disable_lifo_slot, - seed_generator: seed_generator_1, - metrics_poll_count_histogram: self.metrics_poll_count_histogram_builder(), - }, - ); - - Ok(Runtime::from_parts(Scheduler::MultiThreadAlt(scheduler), handle, blocking_pool)) - } - } - } -} - -impl fmt::Debug for Builder { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Builder") - .field("worker_threads", &self.worker_threads) - .field("max_blocking_threads", &self.max_blocking_threads) - .field( - "thread_name", - &" String + Send + Sync + 'static>", - ) - .field("thread_stack_size", &self.thread_stack_size) - .field("after_start", &self.after_start.as_ref().map(|_| "...")) - .field("before_stop", &self.before_stop.as_ref().map(|_| "...")) - .field("before_park", &self.before_park.as_ref().map(|_| "...")) - .field("after_unpark", &self.after_unpark.as_ref().map(|_| "...")) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/config.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/config.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/config.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/config.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,43 +0,0 @@ -#![cfg_attr( - any(not(all(tokio_unstable, feature = "full")), target_family = "wasm"), - allow(dead_code) -)] -use crate::runtime::Callback; -use crate::util::RngSeedGenerator; - -pub(crate) struct Config { - /// How many ticks before pulling a task from the global/remote queue? - pub(crate) global_queue_interval: Option, - - /// How many ticks before yielding to the driver for timer and I/O events? - pub(crate) event_interval: u32, - - /// How big to make each worker's local queue - pub(crate) local_queue_capacity: usize, - - /// Callback for a worker parking itself - pub(crate) before_park: Option, - - /// Callback for a worker unparking itself - pub(crate) after_unpark: Option, - - /// The multi-threaded scheduler includes a per-worker LIFO slot used to - /// store the last scheduled task. This can improve certain usage patterns, - /// especially message passing between tasks. However, this LIFO slot is not - /// currently stealable. - /// - /// Eventually, the LIFO slot **will** become stealable, however as a - /// stop-gap, this unstable option lets users disable the LIFO task. - pub(crate) disable_lifo_slot: bool, - - /// Random number generator seed to configure runtimes to act in a - /// deterministic way. - pub(crate) seed_generator: RngSeedGenerator, - - /// How to build poll time histograms - pub(crate) metrics_poll_count_histogram: Option, - - #[cfg(tokio_unstable)] - /// How to respond to unhandled task panics. - pub(crate) unhandled_panic: crate::runtime::UnhandledPanic, -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context/blocking.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context/blocking.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context/blocking.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context/blocking.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,121 +0,0 @@ -use super::{EnterRuntime, CONTEXT}; - -use crate::loom::thread::AccessError; -use crate::util::markers::NotSendOrSync; - -use std::marker::PhantomData; -use std::time::Duration; - -/// Guard tracking that a caller has entered a blocking region. -#[must_use] -pub(crate) struct BlockingRegionGuard { - _p: PhantomData, -} - -pub(crate) struct DisallowBlockInPlaceGuard(bool); - -pub(crate) fn try_enter_blocking_region() -> Option { - CONTEXT - .try_with(|c| { - if c.runtime.get().is_entered() { - None - } else { - Some(BlockingRegionGuard::new()) - } - // If accessing the thread-local fails, the thread is terminating - // and thread-locals are being destroyed. Because we don't know if - // we are currently in a runtime or not, we default to being - // permissive. - }) - .unwrap_or_else(|_| Some(BlockingRegionGuard::new())) -} - -/// Disallows blocking in the current runtime context until the guard is dropped. -pub(crate) fn disallow_block_in_place() -> DisallowBlockInPlaceGuard { - let reset = CONTEXT.with(|c| { - if let EnterRuntime::Entered { - allow_block_in_place: true, - } = c.runtime.get() - { - c.runtime.set(EnterRuntime::Entered { - allow_block_in_place: false, - }); - true - } else { - false - } - }); - - DisallowBlockInPlaceGuard(reset) -} - -impl BlockingRegionGuard { - pub(super) fn new() -> BlockingRegionGuard { - BlockingRegionGuard { _p: PhantomData } - } - - /// Blocks the thread on the specified future, returning the value with - /// which that future completes. - pub(crate) fn block_on(&mut self, f: F) -> Result - where - F: std::future::Future, - { - use crate::runtime::park::CachedParkThread; - - let mut park = CachedParkThread::new(); - park.block_on(f) - } - - /// Blocks the thread on the specified future for **at most** `timeout` - /// - /// If the future completes before `timeout`, the result is returned. If - /// `timeout` elapses, then `Err` is returned. - pub(crate) fn block_on_timeout(&mut self, f: F, timeout: Duration) -> Result - where - F: std::future::Future, - { - use crate::runtime::park::CachedParkThread; - use std::task::Context; - use std::task::Poll::Ready; - use std::time::Instant; - - let mut park = CachedParkThread::new(); - let waker = park.waker().map_err(|_| ())?; - let mut cx = Context::from_waker(&waker); - - pin!(f); - let when = Instant::now() + timeout; - - loop { - if let Ready(v) = crate::runtime::coop::budget(|| f.as_mut().poll(&mut cx)) { - return Ok(v); - } - - let now = Instant::now(); - - if now >= when { - return Err(()); - } - - park.park_timeout(when - now); - } - } -} - -impl Drop for DisallowBlockInPlaceGuard { - fn drop(&mut self) { - if self.0 { - // XXX: Do we want some kind of assertion here, or is "best effort" okay? - CONTEXT.with(|c| { - if let EnterRuntime::Entered { - allow_block_in_place: false, - } = c.runtime.get() - { - c.runtime.set(EnterRuntime::Entered { - allow_block_in_place: true, - }); - } - }) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context/current.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context/current.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context/current.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context/current.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,99 +0,0 @@ -use super::{Context, CONTEXT}; - -use crate::runtime::{scheduler, TryCurrentError}; -use crate::util::markers::SyncNotSend; - -use std::cell::{Cell, RefCell}; -use std::marker::PhantomData; - -#[derive(Debug)] -#[must_use] -pub(crate) struct SetCurrentGuard { - // The previous handle - prev: Option, - - // The depth for this guard - depth: usize, - - // Don't let the type move across threads. - _p: PhantomData, -} - -pub(super) struct HandleCell { - /// Current handle - handle: RefCell>, - - /// Tracks the number of nested calls to `try_set_current`. - depth: Cell, -} - -/// Sets this [`Handle`] as the current active [`Handle`]. -/// -/// [`Handle`]: crate::runtime::scheduler::Handle -pub(crate) fn try_set_current(handle: &scheduler::Handle) -> Option { - CONTEXT.try_with(|ctx| ctx.set_current(handle)).ok() -} - -pub(crate) fn with_current(f: F) -> Result -where - F: FnOnce(&scheduler::Handle) -> R, -{ - match CONTEXT.try_with(|ctx| ctx.current.handle.borrow().as_ref().map(f)) { - Ok(Some(ret)) => Ok(ret), - Ok(None) => Err(TryCurrentError::new_no_context()), - Err(_access_error) => Err(TryCurrentError::new_thread_local_destroyed()), - } -} - -impl Context { - pub(super) fn set_current(&self, handle: &scheduler::Handle) -> SetCurrentGuard { - let old_handle = self.current.handle.borrow_mut().replace(handle.clone()); - let depth = self.current.depth.get(); - - if depth == usize::MAX { - panic!("reached max `enter` depth"); - } - - let depth = depth + 1; - self.current.depth.set(depth); - - SetCurrentGuard { - prev: old_handle, - depth, - _p: PhantomData, - } - } -} - -impl HandleCell { - pub(super) const fn new() -> HandleCell { - HandleCell { - handle: RefCell::new(None), - depth: Cell::new(0), - } - } -} - -impl Drop for SetCurrentGuard { - fn drop(&mut self) { - CONTEXT.with(|ctx| { - let depth = ctx.current.depth.get(); - - if depth != self.depth { - if !std::thread::panicking() { - panic!( - "`EnterGuard` values dropped out of order. Guards returned by \ - `tokio::runtime::Handle::enter()` must be dropped in the reverse \ - order as they were acquired." - ); - } else { - // Just return... this will leave handles in a wonky state though... - return; - } - } - - *ctx.current.handle.borrow_mut() = self.prev.take(); - ctx.current.depth.set(depth - 1); - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context/runtime_mt.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context/runtime_mt.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context/runtime_mt.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context/runtime_mt.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,36 +0,0 @@ -use super::{EnterRuntime, CONTEXT}; - -/// Returns true if in a runtime context. -pub(crate) fn current_enter_context() -> EnterRuntime { - CONTEXT.with(|c| c.runtime.get()) -} - -/// Forces the current "entered" state to be cleared while the closure -/// is executed. -pub(crate) fn exit_runtime R, R>(f: F) -> R { - // Reset in case the closure panics - struct Reset(EnterRuntime); - - impl Drop for Reset { - fn drop(&mut self) { - CONTEXT.with(|c| { - assert!( - !c.runtime.get().is_entered(), - "closure claimed permanent executor" - ); - c.runtime.set(self.0); - }); - } - } - - let was = CONTEXT.with(|c| { - let e = c.runtime.get(); - assert!(e.is_entered(), "asked to exit when not entered"); - c.runtime.set(EnterRuntime::NotEntered); - e - }); - - let _reset = Reset(was); - // dropping _reset after f() will reset ENTERED - f() -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context/runtime.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context/runtime.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context/runtime.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context/runtime.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,99 +0,0 @@ -use super::{BlockingRegionGuard, SetCurrentGuard, CONTEXT}; - -use crate::runtime::scheduler; -use crate::util::rand::{FastRand, RngSeed}; - -use std::fmt; - -#[derive(Debug, Clone, Copy)] -#[must_use] -pub(crate) enum EnterRuntime { - /// Currently in a runtime context. - #[cfg_attr(not(feature = "rt"), allow(dead_code))] - Entered { allow_block_in_place: bool }, - - /// Not in a runtime context **or** a blocking region. - NotEntered, -} - -/// Guard tracking that a caller has entered a runtime context. -#[must_use] -pub(crate) struct EnterRuntimeGuard { - /// Tracks that the current thread has entered a blocking function call. - pub(crate) blocking: BlockingRegionGuard, - - #[allow(dead_code)] // Only tracking the guard. - pub(crate) handle: SetCurrentGuard, - - // Tracks the previous random number generator seed - old_seed: RngSeed, -} - -/// Marks the current thread as being within the dynamic extent of an -/// executor. -#[track_caller] -pub(crate) fn enter_runtime(handle: &scheduler::Handle, allow_block_in_place: bool, f: F) -> R -where - F: FnOnce(&mut BlockingRegionGuard) -> R, -{ - let maybe_guard = CONTEXT.with(|c| { - if c.runtime.get().is_entered() { - None - } else { - // Set the entered flag - c.runtime.set(EnterRuntime::Entered { - allow_block_in_place, - }); - - // Generate a new seed - let rng_seed = handle.seed_generator().next_seed(); - - // Swap the RNG seed - let mut rng = c.rng.get().unwrap_or_else(FastRand::new); - let old_seed = rng.replace_seed(rng_seed); - c.rng.set(Some(rng)); - - Some(EnterRuntimeGuard { - blocking: BlockingRegionGuard::new(), - handle: c.set_current(handle), - old_seed, - }) - } - }); - - if let Some(mut guard) = maybe_guard { - return f(&mut guard.blocking); - } - - panic!( - "Cannot start a runtime from within a runtime. This happens \ - because a function (like `block_on`) attempted to block the \ - current thread while the thread is being used to drive \ - asynchronous tasks." - ); -} - -impl fmt::Debug for EnterRuntimeGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Enter").finish() - } -} - -impl Drop for EnterRuntimeGuard { - fn drop(&mut self) { - CONTEXT.with(|c| { - assert!(c.runtime.get().is_entered()); - c.runtime.set(EnterRuntime::NotEntered); - // Replace the previous RNG seed - let mut rng = c.rng.get().unwrap_or_else(FastRand::new); - rng.replace_seed(self.old_seed.clone()); - c.rng.set(Some(rng)); - }); - } -} - -impl EnterRuntime { - pub(crate) fn is_entered(self) -> bool { - matches!(self, EnterRuntime::Entered { .. }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context/scoped.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context/scoped.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context/scoped.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context/scoped.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -use std::cell::Cell; -use std::ptr; - -/// Scoped thread-local storage -pub(super) struct Scoped { - pub(super) inner: Cell<*const T>, -} - -impl Scoped { - pub(super) const fn new() -> Scoped { - Scoped { - inner: Cell::new(ptr::null()), - } - } - - /// Inserts a value into the scoped cell for the duration of the closure - pub(super) fn set(&self, t: &T, f: F) -> R - where - F: FnOnce() -> R, - { - struct Reset<'a, T> { - cell: &'a Cell<*const T>, - prev: *const T, - } - - impl Drop for Reset<'_, T> { - fn drop(&mut self) { - self.cell.set(self.prev); - } - } - - let prev = self.inner.get(); - self.inner.set(t as *const _); - - let _reset = Reset { - cell: &self.inner, - prev, - }; - - f() - } - - /// Gets the value out of the scoped cell; - pub(super) fn with(&self, f: F) -> R - where - F: FnOnce(Option<&T>) -> R, - { - let val = self.inner.get(); - - if val.is_null() { - f(None) - } else { - unsafe { f(Some(&*val)) } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/context.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/context.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,191 +0,0 @@ -use crate::loom::thread::AccessError; -use crate::runtime::coop; - -use std::cell::Cell; - -#[cfg(any(feature = "rt", feature = "macros"))] -use crate::util::rand::FastRand; - -cfg_rt! { - mod blocking; - pub(crate) use blocking::{disallow_block_in_place, try_enter_blocking_region, BlockingRegionGuard}; - - mod current; - pub(crate) use current::{with_current, try_set_current, SetCurrentGuard}; - - mod runtime; - pub(crate) use runtime::{EnterRuntime, enter_runtime}; - - mod scoped; - use scoped::Scoped; - - use crate::runtime::{scheduler, task::Id}; - - use std::task::Waker; - - cfg_taskdump! { - use crate::runtime::task::trace; - } -} - -cfg_rt_multi_thread! { - mod runtime_mt; - pub(crate) use runtime_mt::{current_enter_context, exit_runtime}; -} - -struct Context { - /// Uniquely identifies the current thread - #[cfg(feature = "rt")] - thread_id: Cell>, - - /// Handle to the runtime scheduler running on the current thread. - #[cfg(feature = "rt")] - current: current::HandleCell, - - /// Handle to the scheduler's internal "context" - #[cfg(feature = "rt")] - scheduler: Scoped, - - #[cfg(feature = "rt")] - current_task_id: Cell>, - - /// Tracks if the current thread is currently driving a runtime. - /// Note, that if this is set to "entered", the current scheduler - /// handle may not reference the runtime currently executing. This - /// is because other runtime handles may be set to current from - /// within a runtime. - #[cfg(feature = "rt")] - runtime: Cell, - - #[cfg(any(feature = "rt", feature = "macros"))] - rng: Cell>, - - /// Tracks the amount of "work" a task may still do before yielding back to - /// the sheduler - budget: Cell, - - #[cfg(all( - tokio_unstable, - tokio_taskdump, - feature = "rt", - target_os = "linux", - any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") - ))] - trace: trace::Context, -} - -tokio_thread_local! { - static CONTEXT: Context = const { - Context { - #[cfg(feature = "rt")] - thread_id: Cell::new(None), - - // Tracks the current runtime handle to use when spawning, - // accessing drivers, etc... - #[cfg(feature = "rt")] - current: current::HandleCell::new(), - - // Tracks the current scheduler internal context - #[cfg(feature = "rt")] - scheduler: Scoped::new(), - - #[cfg(feature = "rt")] - current_task_id: Cell::new(None), - - // Tracks if the current thread is currently driving a runtime. - // Note, that if this is set to "entered", the current scheduler - // handle may not reference the runtime currently executing. This - // is because other runtime handles may be set to current from - // within a runtime. - #[cfg(feature = "rt")] - runtime: Cell::new(EnterRuntime::NotEntered), - - #[cfg(any(feature = "rt", feature = "macros"))] - rng: Cell::new(None), - - budget: Cell::new(coop::Budget::unconstrained()), - - #[cfg(all( - tokio_unstable, - tokio_taskdump, - feature = "rt", - target_os = "linux", - any( - target_arch = "aarch64", - target_arch = "x86", - target_arch = "x86_64" - ) - ))] - trace: trace::Context::new(), - } - } -} - -#[cfg(any(feature = "macros", all(feature = "sync", feature = "rt")))] -pub(crate) fn thread_rng_n(n: u32) -> u32 { - CONTEXT.with(|ctx| { - let mut rng = ctx.rng.get().unwrap_or_else(FastRand::new); - let ret = rng.fastrand_n(n); - ctx.rng.set(Some(rng)); - ret - }) -} - -pub(super) fn budget(f: impl FnOnce(&Cell) -> R) -> Result { - CONTEXT.try_with(|ctx| f(&ctx.budget)) -} - -cfg_rt! { - use crate::runtime::ThreadId; - - pub(crate) fn thread_id() -> Result { - CONTEXT.try_with(|ctx| { - match ctx.thread_id.get() { - Some(id) => id, - None => { - let id = ThreadId::next(); - ctx.thread_id.set(Some(id)); - id - } - } - }) - } - - pub(crate) fn set_current_task_id(id: Option) -> Option { - CONTEXT.try_with(|ctx| ctx.current_task_id.replace(id)).unwrap_or(None) - } - - pub(crate) fn current_task_id() -> Option { - CONTEXT.try_with(|ctx| ctx.current_task_id.get()).unwrap_or(None) - } - - #[track_caller] - pub(crate) fn defer(waker: &Waker) { - with_scheduler(|maybe_scheduler| { - if let Some(scheduler) = maybe_scheduler { - scheduler.defer(waker); - } else { - // Called from outside of the runtime, immediately wake the - // task. - waker.wake_by_ref(); - } - }); - } - - pub(super) fn set_scheduler(v: &scheduler::Context, f: impl FnOnce() -> R) -> R { - CONTEXT.with(|c| c.scheduler.set(v, f)) - } - - #[track_caller] - pub(super) fn with_scheduler(f: impl FnOnce(Option<&scheduler::Context>) -> R) -> R { - CONTEXT.with(|c| c.scheduler.with(f)) - } - - cfg_taskdump! { - /// SAFETY: Callers of this function must ensure that trace frames always - /// form a valid linked list. - pub(crate) unsafe fn with_trace(f: impl FnOnce(&trace::Context) -> R) -> Option { - CONTEXT.try_with(|c| f(&c.trace)).ok() - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/coop.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/coop.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/coop.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/coop.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,323 +0,0 @@ -#![cfg_attr(not(feature = "full"), allow(dead_code))] - -//! Yield points for improved cooperative scheduling. -//! -//! Documentation for this can be found in the [`tokio::task`] module. -//! -//! [`tokio::task`]: crate::task. - -// ```ignore -// # use tokio_stream::{Stream, StreamExt}; -// async fn drop_all(mut input: I) { -// while let Some(_) = input.next().await { -// tokio::coop::proceed().await; -// } -// } -// ``` -// -// The `proceed` future will coordinate with the executor to make sure that -// every so often control is yielded back to the executor so it can run other -// tasks. -// -// # Placing yield points -// -// Voluntary yield points should be placed _after_ at least some work has been -// done. If they are not, a future sufficiently deep in the task hierarchy may -// end up _never_ getting to run because of the number of yield points that -// inevitably appear before it is reached. In general, you will want yield -// points to only appear in "leaf" futures -- those that do not themselves poll -// other futures. By doing this, you avoid double-counting each iteration of -// the outer future against the cooperating budget. - -use crate::runtime::context; - -/// Opaque type tracking the amount of "work" a task may still do before -/// yielding back to the scheduler. -#[derive(Debug, Copy, Clone)] -pub(crate) struct Budget(Option); - -pub(crate) struct BudgetDecrement { - success: bool, - hit_zero: bool, -} - -impl Budget { - /// Budget assigned to a task on each poll. - /// - /// The value itself is chosen somewhat arbitrarily. It needs to be high - /// enough to amortize wakeup and scheduling costs, but low enough that we - /// do not starve other tasks for too long. The value also needs to be high - /// enough that particularly deep tasks are able to do at least some useful - /// work at all. - /// - /// Note that as more yield points are added in the ecosystem, this value - /// will probably also have to be raised. - const fn initial() -> Budget { - Budget(Some(128)) - } - - /// Returns an unconstrained budget. Operations will not be limited. - pub(super) const fn unconstrained() -> Budget { - Budget(None) - } - - fn has_remaining(self) -> bool { - self.0.map(|budget| budget > 0).unwrap_or(true) - } -} - -/// Runs the given closure with a cooperative task budget. When the function -/// returns, the budget is reset to the value prior to calling the function. -#[inline(always)] -pub(crate) fn budget(f: impl FnOnce() -> R) -> R { - with_budget(Budget::initial(), f) -} - -/// Runs the given closure with an unconstrained task budget. When the function returns, the budget -/// is reset to the value prior to calling the function. -#[inline(always)] -pub(crate) fn with_unconstrained(f: impl FnOnce() -> R) -> R { - with_budget(Budget::unconstrained(), f) -} - -#[inline(always)] -fn with_budget(budget: Budget, f: impl FnOnce() -> R) -> R { - struct ResetGuard { - prev: Budget, - } - - impl Drop for ResetGuard { - fn drop(&mut self) { - let _ = context::budget(|cell| { - cell.set(self.prev); - }); - } - } - - #[allow(unused_variables)] - let maybe_guard = context::budget(|cell| { - let prev = cell.get(); - cell.set(budget); - - ResetGuard { prev } - }); - - // The function is called regardless even if the budget is not successfully - // set due to the thread-local being destroyed. - f() -} - -#[inline(always)] -pub(crate) fn has_budget_remaining() -> bool { - // If the current budget cannot be accessed due to the thread-local being - // shutdown, then we assume there is budget remaining. - context::budget(|cell| cell.get().has_remaining()).unwrap_or(true) -} - -cfg_rt_multi_thread! { - /// Sets the current task's budget. - pub(crate) fn set(budget: Budget) { - let _ = context::budget(|cell| cell.set(budget)); - } -} - -cfg_rt! { - /// Forcibly removes the budgeting constraints early. - /// - /// Returns the remaining budget - pub(crate) fn stop() -> Budget { - context::budget(|cell| { - let prev = cell.get(); - cell.set(Budget::unconstrained()); - prev - }).unwrap_or(Budget::unconstrained()) - } -} - -cfg_coop! { - use std::cell::Cell; - use std::task::{Context, Poll}; - - #[must_use] - pub(crate) struct RestoreOnPending(Cell); - - impl RestoreOnPending { - pub(crate) fn made_progress(&self) { - self.0.set(Budget::unconstrained()); - } - } - - impl Drop for RestoreOnPending { - fn drop(&mut self) { - // Don't reset if budget was unconstrained or if we made progress. - // They are both represented as the remembered budget being unconstrained. - let budget = self.0.get(); - if !budget.is_unconstrained() { - let _ = context::budget(|cell| { - cell.set(budget); - }); - } - } - } - - /// Returns `Poll::Pending` if the current task has exceeded its budget and should yield. - /// - /// When you call this method, the current budget is decremented. However, to ensure that - /// progress is made every time a task is polled, the budget is automatically restored to its - /// former value if the returned `RestoreOnPending` is dropped. It is the caller's - /// responsibility to call `RestoreOnPending::made_progress` if it made progress, to ensure - /// that the budget empties appropriately. - /// - /// Note that `RestoreOnPending` restores the budget **as it was before `poll_proceed`**. - /// Therefore, if the budget is _further_ adjusted between when `poll_proceed` returns and - /// `RestRestoreOnPending` is dropped, those adjustments are erased unless the caller indicates - /// that progress was made. - #[inline] - pub(crate) fn poll_proceed(cx: &mut Context<'_>) -> Poll { - context::budget(|cell| { - let mut budget = cell.get(); - - let decrement = budget.decrement(); - - if decrement.success { - let restore = RestoreOnPending(Cell::new(cell.get())); - cell.set(budget); - - // avoid double counting - if decrement.hit_zero { - inc_budget_forced_yield_count(); - } - - Poll::Ready(restore) - } else { - cx.waker().wake_by_ref(); - Poll::Pending - } - }).unwrap_or(Poll::Ready(RestoreOnPending(Cell::new(Budget::unconstrained())))) - } - - cfg_rt! { - cfg_metrics! { - #[inline(always)] - fn inc_budget_forced_yield_count() { - let _ = context::with_current(|handle| { - handle.scheduler_metrics().inc_budget_forced_yield_count(); - }); - } - } - - cfg_not_metrics! { - #[inline(always)] - fn inc_budget_forced_yield_count() {} - } - } - - cfg_not_rt! { - #[inline(always)] - fn inc_budget_forced_yield_count() {} - } - - impl Budget { - /// Decrements the budget. Returns `true` if successful. Decrementing fails - /// when there is not enough remaining budget. - fn decrement(&mut self) -> BudgetDecrement { - if let Some(num) = &mut self.0 { - if *num > 0 { - *num -= 1; - - let hit_zero = *num == 0; - - BudgetDecrement { success: true, hit_zero } - } else { - BudgetDecrement { success: false, hit_zero: false } - } - } else { - BudgetDecrement { success: true, hit_zero: false } - } - } - - fn is_unconstrained(self) -> bool { - self.0.is_none() - } - } -} - -#[cfg(all(test, not(loom)))] -mod test { - use super::*; - - #[cfg(all(target_family = "wasm", not(target_os = "wasi")))] - use wasm_bindgen_test::wasm_bindgen_test as test; - - fn get() -> Budget { - context::budget(|cell| cell.get()).unwrap_or(Budget::unconstrained()) - } - - #[test] - fn budgeting() { - use futures::future::poll_fn; - use tokio_test::*; - - assert!(get().0.is_none()); - - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - - assert!(get().0.is_none()); - drop(coop); - assert!(get().0.is_none()); - - budget(|| { - assert_eq!(get().0, Budget::initial().0); - - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); - drop(coop); - // we didn't make progress - assert_eq!(get().0, Budget::initial().0); - - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); - coop.made_progress(); - drop(coop); - // we _did_ make progress - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); - - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 2); - coop.made_progress(); - drop(coop); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 2); - - budget(|| { - assert_eq!(get().0, Budget::initial().0); - - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); - coop.made_progress(); - drop(coop); - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 1); - }); - - assert_eq!(get().0.unwrap(), Budget::initial().0.unwrap() - 2); - }); - - assert!(get().0.is_none()); - - budget(|| { - let n = get().0.unwrap(); - - for _ in 0..n { - let coop = assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); - coop.made_progress(); - } - - let mut task = task::spawn(poll_fn(|cx| { - let coop = ready!(poll_proceed(cx)); - coop.made_progress(); - Poll::Ready(()) - })); - - assert_pending!(task.poll()); - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/driver.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/driver.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/driver.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/driver.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,367 +0,0 @@ -//! Abstracts out the entire chain of runtime sub-drivers into common types. - -// Eventually, this file will see significant refactoring / cleanup. For now, we -// don't need to worry much about dead code with certain feature permutations. -#![cfg_attr( - any(not(all(tokio_unstable, feature = "full")), target_family = "wasm"), - allow(dead_code) -)] - -use crate::runtime::park::{ParkThread, UnparkThread}; - -use std::io; -use std::time::Duration; - -#[derive(Debug)] -pub(crate) struct Driver { - inner: TimeDriver, -} - -#[derive(Debug)] -pub(crate) struct Handle { - /// IO driver handle - pub(crate) io: IoHandle, - - /// Signal driver handle - #[cfg_attr(any(not(unix), loom), allow(dead_code))] - pub(crate) signal: SignalHandle, - - /// Time driver handle - pub(crate) time: TimeHandle, - - /// Source of `Instant::now()` - #[cfg_attr(not(all(feature = "time", feature = "test-util")), allow(dead_code))] - pub(crate) clock: Clock, -} - -pub(crate) struct Cfg { - pub(crate) enable_io: bool, - pub(crate) enable_time: bool, - pub(crate) enable_pause_time: bool, - pub(crate) start_paused: bool, - pub(crate) nevents: usize, -} - -impl Driver { - pub(crate) fn new(cfg: Cfg) -> io::Result<(Self, Handle)> { - let (io_stack, io_handle, signal_handle) = create_io_stack(cfg.enable_io, cfg.nevents)?; - - let clock = create_clock(cfg.enable_pause_time, cfg.start_paused); - - let (time_driver, time_handle) = create_time_driver(cfg.enable_time, io_stack, &clock); - - Ok(( - Self { inner: time_driver }, - Handle { - io: io_handle, - signal: signal_handle, - time: time_handle, - clock, - }, - )) - } - - pub(crate) fn is_enabled(&self) -> bool { - self.inner.is_enabled() - } - - pub(crate) fn park(&mut self, handle: &Handle) { - self.inner.park(handle) - } - - pub(crate) fn park_timeout(&mut self, handle: &Handle, duration: Duration) { - self.inner.park_timeout(handle, duration) - } - - pub(crate) fn shutdown(&mut self, handle: &Handle) { - self.inner.shutdown(handle) - } -} - -impl Handle { - pub(crate) fn unpark(&self) { - #[cfg(feature = "time")] - if let Some(handle) = &self.time { - handle.unpark(); - } - - self.io.unpark(); - } - - cfg_io_driver! { - #[track_caller] - pub(crate) fn io(&self) -> &crate::runtime::io::Handle { - self.io - .as_ref() - .expect("A Tokio 1.x context was found, but IO is disabled. Call `enable_io` on the runtime builder to enable IO.") - } - } - - cfg_signal_internal_and_unix! { - #[track_caller] - pub(crate) fn signal(&self) -> &crate::runtime::signal::Handle { - self.signal - .as_ref() - .expect("there is no signal driver running, must be called from the context of Tokio runtime") - } - } - - cfg_time! { - /// Returns a reference to the time driver handle. - /// - /// Panics if no time driver is present. - #[track_caller] - pub(crate) fn time(&self) -> &crate::runtime::time::Handle { - self.time - .as_ref() - .expect("A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers.") - } - - pub(crate) fn clock(&self) -> &Clock { - &self.clock - } - } -} - -// ===== io driver ===== - -cfg_io_driver! { - pub(crate) type IoDriver = crate::runtime::io::Driver; - - #[derive(Debug)] - pub(crate) enum IoStack { - Enabled(ProcessDriver), - Disabled(ParkThread), - } - - #[derive(Debug)] - pub(crate) enum IoHandle { - Enabled(crate::runtime::io::Handle), - Disabled(UnparkThread), - } - - fn create_io_stack(enabled: bool, nevents: usize) -> io::Result<(IoStack, IoHandle, SignalHandle)> { - #[cfg(loom)] - assert!(!enabled); - - let ret = if enabled { - let (io_driver, io_handle) = crate::runtime::io::Driver::new(nevents)?; - - let (signal_driver, signal_handle) = create_signal_driver(io_driver, &io_handle)?; - let process_driver = create_process_driver(signal_driver); - - (IoStack::Enabled(process_driver), IoHandle::Enabled(io_handle), signal_handle) - } else { - let park_thread = ParkThread::new(); - let unpark_thread = park_thread.unpark(); - (IoStack::Disabled(park_thread), IoHandle::Disabled(unpark_thread), Default::default()) - }; - - Ok(ret) - } - - impl IoStack { - pub(crate) fn is_enabled(&self) -> bool { - match self { - IoStack::Enabled(..) => true, - IoStack::Disabled(..) => false, - } - } - - pub(crate) fn park(&mut self, handle: &Handle) { - match self { - IoStack::Enabled(v) => v.park(handle), - IoStack::Disabled(v) => v.park(), - } - } - - pub(crate) fn park_timeout(&mut self, handle: &Handle, duration: Duration) { - match self { - IoStack::Enabled(v) => v.park_timeout(handle, duration), - IoStack::Disabled(v) => v.park_timeout(duration), - } - } - - pub(crate) fn shutdown(&mut self, handle: &Handle) { - match self { - IoStack::Enabled(v) => v.shutdown(handle), - IoStack::Disabled(v) => v.shutdown(), - } - } - } - - impl IoHandle { - pub(crate) fn unpark(&self) { - match self { - IoHandle::Enabled(handle) => handle.unpark(), - IoHandle::Disabled(handle) => handle.unpark(), - } - } - - pub(crate) fn as_ref(&self) -> Option<&crate::runtime::io::Handle> { - match self { - IoHandle::Enabled(v) => Some(v), - IoHandle::Disabled(..) => None, - } - } - } -} - -cfg_not_io_driver! { - pub(crate) type IoHandle = UnparkThread; - - #[derive(Debug)] - pub(crate) struct IoStack(ParkThread); - - fn create_io_stack(_enabled: bool, _nevents: usize) -> io::Result<(IoStack, IoHandle, SignalHandle)> { - let park_thread = ParkThread::new(); - let unpark_thread = park_thread.unpark(); - Ok((IoStack(park_thread), unpark_thread, Default::default())) - } - - impl IoStack { - pub(crate) fn park(&mut self, _handle: &Handle) { - self.0.park(); - } - - pub(crate) fn park_timeout(&mut self, _handle: &Handle, duration: Duration) { - self.0.park_timeout(duration); - } - - pub(crate) fn shutdown(&mut self, _handle: &Handle) { - self.0.shutdown(); - } - - /// This is not a "real" driver, so it is not considered enabled. - pub(crate) fn is_enabled(&self) -> bool { - false - } - } -} - -// ===== signal driver ===== - -cfg_signal_internal_and_unix! { - type SignalDriver = crate::runtime::signal::Driver; - pub(crate) type SignalHandle = Option; - - fn create_signal_driver(io_driver: IoDriver, io_handle: &crate::runtime::io::Handle) -> io::Result<(SignalDriver, SignalHandle)> { - let driver = crate::runtime::signal::Driver::new(io_driver, io_handle)?; - let handle = driver.handle(); - Ok((driver, Some(handle))) - } -} - -cfg_not_signal_internal! { - pub(crate) type SignalHandle = (); - - cfg_io_driver! { - type SignalDriver = IoDriver; - - fn create_signal_driver(io_driver: IoDriver, _io_handle: &crate::runtime::io::Handle) -> io::Result<(SignalDriver, SignalHandle)> { - Ok((io_driver, ())) - } - } -} - -// ===== process driver ===== - -cfg_process_driver! { - type ProcessDriver = crate::runtime::process::Driver; - - fn create_process_driver(signal_driver: SignalDriver) -> ProcessDriver { - ProcessDriver::new(signal_driver) - } -} - -cfg_not_process_driver! { - cfg_io_driver! { - type ProcessDriver = SignalDriver; - - fn create_process_driver(signal_driver: SignalDriver) -> ProcessDriver { - signal_driver - } - } -} - -// ===== time driver ===== - -cfg_time! { - #[derive(Debug)] - pub(crate) enum TimeDriver { - Enabled { - driver: crate::runtime::time::Driver, - }, - Disabled(IoStack), - } - - pub(crate) type Clock = crate::time::Clock; - pub(crate) type TimeHandle = Option; - - fn create_clock(enable_pausing: bool, start_paused: bool) -> Clock { - crate::time::Clock::new(enable_pausing, start_paused) - } - - fn create_time_driver( - enable: bool, - io_stack: IoStack, - clock: &Clock, - ) -> (TimeDriver, TimeHandle) { - if enable { - let (driver, handle) = crate::runtime::time::Driver::new(io_stack, clock); - - (TimeDriver::Enabled { driver }, Some(handle)) - } else { - (TimeDriver::Disabled(io_stack), None) - } - } - - impl TimeDriver { - pub(crate) fn is_enabled(&self) -> bool { - match self { - TimeDriver::Enabled { .. } => true, - TimeDriver::Disabled(inner) => inner.is_enabled(), - } - } - - pub(crate) fn park(&mut self, handle: &Handle) { - match self { - TimeDriver::Enabled { driver, .. } => driver.park(handle), - TimeDriver::Disabled(v) => v.park(handle), - } - } - - pub(crate) fn park_timeout(&mut self, handle: &Handle, duration: Duration) { - match self { - TimeDriver::Enabled { driver } => driver.park_timeout(handle, duration), - TimeDriver::Disabled(v) => v.park_timeout(handle, duration), - } - } - - pub(crate) fn shutdown(&mut self, handle: &Handle) { - match self { - TimeDriver::Enabled { driver } => driver.shutdown(handle), - TimeDriver::Disabled(v) => v.shutdown(handle), - } - } - } -} - -cfg_not_time! { - type TimeDriver = IoStack; - - pub(crate) type Clock = (); - pub(crate) type TimeHandle = (); - - fn create_clock(_enable_pausing: bool, _start_paused: bool) -> Clock { - () - } - - fn create_time_driver( - _enable: bool, - io_stack: IoStack, - _clock: &Clock, - ) -> (TimeDriver, TimeHandle) { - (io_stack, ()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/dump.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/dump.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/dump.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/dump.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -//! Snapshots of runtime state. -//! -//! See [Handle::dump][crate::runtime::Handle::dump]. - -use std::fmt; - -/// A snapshot of a runtime's state. -/// -/// See [Handle::dump][crate::runtime::Handle::dump]. -#[derive(Debug)] -pub struct Dump { - tasks: Tasks, -} - -/// Snapshots of tasks. -/// -/// See [Handle::dump][crate::runtime::Handle::dump]. -#[derive(Debug)] -pub struct Tasks { - tasks: Vec, -} - -/// A snapshot of a task. -/// -/// See [Handle::dump][crate::runtime::Handle::dump]. -#[derive(Debug)] -pub struct Task { - trace: Trace, -} - -/// An execution trace of a task's last poll. -/// -/// See [Handle::dump][crate::runtime::Handle::dump]. -#[derive(Debug)] -pub struct Trace { - inner: super::task::trace::Trace, -} - -impl Dump { - pub(crate) fn new(tasks: Vec) -> Self { - Self { - tasks: Tasks { tasks }, - } - } - - /// Tasks in this snapshot. - pub fn tasks(&self) -> &Tasks { - &self.tasks - } -} - -impl Tasks { - /// Iterate over tasks. - pub fn iter(&self) -> impl Iterator { - self.tasks.iter() - } -} - -impl Task { - pub(crate) fn new(trace: super::task::trace::Trace) -> Self { - Self { - trace: Trace { inner: trace }, - } - } - - /// A trace of this task's state. - pub fn trace(&self) -> &Trace { - &self.trace - } -} - -impl fmt::Display for Trace { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/handle.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/handle.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/handle.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/handle.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,632 +0,0 @@ -#[cfg(tokio_unstable)] -use crate::runtime; -use crate::runtime::{context, scheduler, RuntimeFlavor}; - -/// Handle to the runtime. -/// -/// The handle is internally reference-counted and can be freely cloned. A handle can be -/// obtained using the [`Runtime::handle`] method. -/// -/// [`Runtime::handle`]: crate::runtime::Runtime::handle() -#[derive(Debug, Clone)] -// When the `rt` feature is *not* enabled, this type is still defined, but not -// included in the public API. -pub struct Handle { - pub(crate) inner: scheduler::Handle, -} - -use crate::runtime::task::JoinHandle; -use crate::util::error::{CONTEXT_MISSING_ERROR, THREAD_LOCAL_DESTROYED_ERROR}; - -use std::future::Future; -use std::marker::PhantomData; -use std::{error, fmt}; - -/// Runtime context guard. -/// -/// Returned by [`Runtime::enter`] and [`Handle::enter`], the context guard exits -/// the runtime context on drop. -/// -/// [`Runtime::enter`]: fn@crate::runtime::Runtime::enter -#[derive(Debug)] -#[must_use = "Creating and dropping a guard does nothing"] -pub struct EnterGuard<'a> { - _guard: context::SetCurrentGuard, - _handle_lifetime: PhantomData<&'a Handle>, -} - -impl Handle { - /// Enters the runtime context. This allows you to construct types that must - /// have an executor available on creation such as [`Sleep`] or - /// [`TcpStream`]. It will also allow you to call methods such as - /// [`tokio::spawn`] and [`Handle::current`] without panicking. - /// - /// # Panics - /// - /// When calling `Handle::enter` multiple times, the returned guards - /// **must** be dropped in the reverse order that they were acquired. - /// Failure to do so will result in a panic and possible memory leaks. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// let rt = Runtime::new().unwrap(); - /// - /// let _guard = rt.enter(); - /// tokio::spawn(async { - /// println!("Hello world!"); - /// }); - /// ``` - /// - /// Do **not** do the following, this shows a scenario that will result in a - /// panic and possible memory leak. - /// - /// ```should_panic - /// use tokio::runtime::Runtime; - /// - /// let rt1 = Runtime::new().unwrap(); - /// let rt2 = Runtime::new().unwrap(); - /// - /// let enter1 = rt1.enter(); - /// let enter2 = rt2.enter(); - /// - /// drop(enter1); - /// drop(enter2); - /// ``` - /// - /// [`Sleep`]: struct@crate::time::Sleep - /// [`TcpStream`]: struct@crate::net::TcpStream - /// [`tokio::spawn`]: fn@crate::spawn - pub fn enter(&self) -> EnterGuard<'_> { - EnterGuard { - _guard: match context::try_set_current(&self.inner) { - Some(guard) => guard, - None => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR), - }, - _handle_lifetime: PhantomData, - } - } - - /// Returns a `Handle` view over the currently running `Runtime`. - /// - /// # Panics - /// - /// This will panic if called outside the context of a Tokio runtime. That means that you must - /// call this on one of the threads **being run by the runtime**, or from a thread with an active - /// `EnterGuard`. Calling this from within a thread created by `std::thread::spawn` (for example) - /// will cause a panic unless that thread has an active `EnterGuard`. - /// - /// # Examples - /// - /// This can be used to obtain the handle of the surrounding runtime from an async - /// block or function running on that runtime. - /// - /// ``` - /// # use std::thread; - /// # use tokio::runtime::Runtime; - /// # fn dox() { - /// # let rt = Runtime::new().unwrap(); - /// # rt.spawn(async { - /// use tokio::runtime::Handle; - /// - /// // Inside an async block or function. - /// let handle = Handle::current(); - /// handle.spawn(async { - /// println!("now running in the existing Runtime"); - /// }); - /// - /// # let handle = - /// thread::spawn(move || { - /// // Notice that the handle is created outside of this thread and then moved in - /// handle.spawn(async { /* ... */ }); - /// // This next line would cause a panic because we haven't entered the runtime - /// // and created an EnterGuard - /// // let handle2 = Handle::current(); // panic - /// // So we create a guard here with Handle::enter(); - /// let _guard = handle.enter(); - /// // Now we can call Handle::current(); - /// let handle2 = Handle::current(); - /// }); - /// # handle.join().unwrap(); - /// # }); - /// # } - /// ``` - #[track_caller] - pub fn current() -> Self { - Handle { - inner: scheduler::Handle::current(), - } - } - - /// Returns a Handle view over the currently running Runtime - /// - /// Returns an error if no Runtime has been started - /// - /// Contrary to `current`, this never panics - pub fn try_current() -> Result { - context::with_current(|inner| Handle { - inner: inner.clone(), - }) - } - - /// Spawns a future onto the Tokio runtime. - /// - /// This spawns the given future onto the runtime's executor, usually a - /// thread pool. The thread pool is then responsible for polling the future - /// until it completes. - /// - /// The provided future will start running in the background immediately - /// when `spawn` is called, even if you don't await the returned - /// `JoinHandle`. - /// - /// See [module level][mod] documentation for more details. - /// - /// [mod]: index.html - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// # fn dox() { - /// // Create the runtime - /// let rt = Runtime::new().unwrap(); - /// // Get a handle from this runtime - /// let handle = rt.handle(); - /// - /// // Spawn a future onto the runtime using the handle - /// handle.spawn(async { - /// println!("now running on a worker thread"); - /// }); - /// # } - /// ``` - #[track_caller] - pub fn spawn(&self, future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - self.spawn_named(future, None) - } - - /// Runs the provided function on an executor dedicated to blocking - /// operations. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// # fn dox() { - /// // Create the runtime - /// let rt = Runtime::new().unwrap(); - /// // Get a handle from this runtime - /// let handle = rt.handle(); - /// - /// // Spawn a blocking function onto the runtime using the handle - /// handle.spawn_blocking(|| { - /// println!("now running on a worker thread"); - /// }); - /// # } - #[track_caller] - pub fn spawn_blocking(&self, func: F) -> JoinHandle - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - self.inner.blocking_spawner().spawn_blocking(self, func) - } - - /// Runs a future to completion on this `Handle`'s associated `Runtime`. - /// - /// This runs the given future on the current thread, blocking until it is - /// complete, and yielding its resolved result. Any tasks or timers which - /// the future spawns internally will be executed on the runtime. - /// - /// When this is used on a `current_thread` runtime, only the - /// [`Runtime::block_on`] method can drive the IO and timer drivers, but the - /// `Handle::block_on` method cannot drive them. This means that, when using - /// this method on a current_thread runtime, anything that relies on IO or - /// timers will not work unless there is another thread currently calling - /// [`Runtime::block_on`] on the same runtime. - /// - /// # If the runtime has been shut down - /// - /// If the `Handle`'s associated `Runtime` has been shut down (through - /// [`Runtime::shutdown_background`], [`Runtime::shutdown_timeout`], or by - /// dropping it) and `Handle::block_on` is used it might return an error or - /// panic. Specifically IO resources will return an error and timers will - /// panic. Runtime independent futures will run as normal. - /// - /// # Panics - /// - /// This function panics if the provided future panics, if called within an - /// asynchronous execution context, or if a timer future is executed on a - /// runtime that has been shut down. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// // Create the runtime - /// let rt = Runtime::new().unwrap(); - /// - /// // Get a handle from this runtime - /// let handle = rt.handle(); - /// - /// // Execute the future, blocking the current thread until completion - /// handle.block_on(async { - /// println!("hello"); - /// }); - /// ``` - /// - /// Or using `Handle::current`: - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main () { - /// let handle = Handle::current(); - /// std::thread::spawn(move || { - /// // Using Handle::block_on to run async code in the new thread. - /// handle.block_on(async { - /// println!("hello"); - /// }); - /// }); - /// } - /// ``` - /// - /// [`JoinError`]: struct@crate::task::JoinError - /// [`JoinHandle`]: struct@crate::task::JoinHandle - /// [`Runtime::block_on`]: fn@crate::runtime::Runtime::block_on - /// [`Runtime::shutdown_background`]: fn@crate::runtime::Runtime::shutdown_background - /// [`Runtime::shutdown_timeout`]: fn@crate::runtime::Runtime::shutdown_timeout - /// [`spawn_blocking`]: crate::task::spawn_blocking - /// [`tokio::fs`]: crate::fs - /// [`tokio::net`]: crate::net - /// [`tokio::time`]: crate::time - #[track_caller] - pub fn block_on(&self, future: F) -> F::Output { - #[cfg(all( - tokio_unstable, - tokio_taskdump, - feature = "rt", - target_os = "linux", - any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") - ))] - let future = super::task::trace::Trace::root(future); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let future = - crate::util::trace::task(future, "block_on", None, super::task::Id::next().as_u64()); - - // Enter the runtime context. This sets the current driver handles and - // prevents blocking an existing runtime. - context::enter_runtime(&self.inner, true, |blocking| { - blocking.block_on(future).expect("failed to park thread") - }) - } - - #[track_caller] - pub(crate) fn spawn_named(&self, future: F, _name: Option<&str>) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - let id = crate::runtime::task::Id::next(); - #[cfg(all( - tokio_unstable, - tokio_taskdump, - feature = "rt", - target_os = "linux", - any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") - ))] - let future = super::task::trace::Trace::root(future); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let future = crate::util::trace::task(future, "task", _name, id.as_u64()); - self.inner.spawn(future, id) - } - - /// Returns the flavor of the current `Runtime`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::{Handle, RuntimeFlavor}; - /// - /// #[tokio::main(flavor = "current_thread")] - /// async fn main() { - /// assert_eq!(RuntimeFlavor::CurrentThread, Handle::current().runtime_flavor()); - /// } - /// ``` - /// - /// ``` - /// use tokio::runtime::{Handle, RuntimeFlavor}; - /// - /// #[tokio::main(flavor = "multi_thread", worker_threads = 4)] - /// async fn main() { - /// assert_eq!(RuntimeFlavor::MultiThread, Handle::current().runtime_flavor()); - /// } - /// ``` - pub fn runtime_flavor(&self) -> RuntimeFlavor { - match self.inner { - scheduler::Handle::CurrentThread(_) => RuntimeFlavor::CurrentThread, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - scheduler::Handle::MultiThread(_) => RuntimeFlavor::MultiThread, - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - scheduler::Handle::MultiThreadAlt(_) => RuntimeFlavor::MultiThreadAlt, - } - } - - cfg_unstable! { - /// Returns the [`Id`] of the current `Runtime`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main(flavor = "current_thread")] - /// async fn main() { - /// println!("Current runtime id: {}", Handle::current().id()); - /// } - /// ``` - /// - /// **Note**: This is an [unstable API][unstable]. The public API of this type - /// may break in 1.x releases. See [the documentation on unstable - /// features][unstable] for details. - /// - /// [unstable]: crate#unstable-features - /// [`Id`]: struct@crate::runtime::Id - pub fn id(&self) -> runtime::Id { - let owned_id = match &self.inner { - scheduler::Handle::CurrentThread(handle) => handle.owned_id(), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - scheduler::Handle::MultiThread(handle) => handle.owned_id(), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - scheduler::Handle::MultiThreadAlt(handle) => handle.owned_id(), - }; - owned_id.into() - } - } -} - -cfg_metrics! { - use crate::runtime::RuntimeMetrics; - - impl Handle { - /// Returns a view that lets you get information about how the runtime - /// is performing. - pub fn metrics(&self) -> RuntimeMetrics { - RuntimeMetrics::new(self.clone()) - } - } -} - -cfg_taskdump! { - impl Handle { - /// Captures a snapshot of the runtime's state. - /// - /// This functionality is experimental, and comes with a number of - /// requirements and limitations. - /// - /// # Examples - /// - /// This can be used to get call traces of each task in the runtime. - /// Calls to `Handle::dump` should usually be enclosed in a - /// [timeout][crate::time::timeout], so that dumping does not escalate a - /// single blocked runtime thread into an entirely blocked runtime. - /// - /// ``` - /// # use tokio::runtime::Runtime; - /// # fn dox() { - /// # let rt = Runtime::new().unwrap(); - /// # rt.spawn(async { - /// use tokio::runtime::Handle; - /// use tokio::time::{timeout, Duration}; - /// - /// // Inside an async block or function. - /// let handle = Handle::current(); - /// if let Ok(dump) = timeout(Duration::from_secs(2), handle.dump()).await { - /// for (i, task) in dump.tasks().iter().enumerate() { - /// let trace = task.trace(); - /// println!("TASK {i}:"); - /// println!("{trace}\n"); - /// } - /// } - /// # }); - /// # } - /// ``` - /// - /// This produces highly detailed traces of tasks; e.g.: - /// - /// ```plain - /// TASK 0: - /// ╼ dump::main::{{closure}}::a::{{closure}} at /tokio/examples/dump.rs:18:20 - /// └╼ dump::main::{{closure}}::b::{{closure}} at /tokio/examples/dump.rs:23:20 - /// └╼ dump::main::{{closure}}::c::{{closure}} at /tokio/examples/dump.rs:28:24 - /// └╼ tokio::sync::barrier::Barrier::wait::{{closure}} at /tokio/tokio/src/sync/barrier.rs:129:10 - /// └╼ as core::future::future::Future>::poll at /tokio/tokio/src/util/trace.rs:77:46 - /// └╼ tokio::sync::barrier::Barrier::wait_internal::{{closure}} at /tokio/tokio/src/sync/barrier.rs:183:36 - /// └╼ tokio::sync::watch::Receiver::changed::{{closure}} at /tokio/tokio/src/sync/watch.rs:604:55 - /// └╼ tokio::sync::watch::changed_impl::{{closure}} at /tokio/tokio/src/sync/watch.rs:755:18 - /// └╼ ::poll at /tokio/tokio/src/sync/notify.rs:1103:9 - /// └╼ tokio::sync::notify::Notified::poll_notified at /tokio/tokio/src/sync/notify.rs:996:32 - /// ``` - /// - /// # Requirements - /// - /// ## Debug Info Must Be Available - /// - /// To produce task traces, the application must **not** be compiled - /// with split debuginfo. On Linux, including debuginfo within the - /// application binary is the (correct) default. You can further ensure - /// this behavior with the following directive in your `Cargo.toml`: - /// - /// ```toml - /// [profile.*] - /// split-debuginfo = "off" - /// ``` - /// - /// ## Unstable Features - /// - /// This functionality is **unstable**, and requires both the - /// `tokio_unstable` and `tokio_taskdump` cfg flags to be set. - /// - /// You can do this by setting the `RUSTFLAGS` environment variable - /// before invoking `cargo`; e.g.: - /// ```bash - /// RUSTFLAGS="--cfg tokio_unstable --cfg tokio_taskdump" cargo run --example dump - /// ``` - /// - /// Or by [configuring][cargo-config] `rustflags` in - /// `.cargo/config.toml`: - /// ```text - /// [build] - /// rustflags = ["--cfg tokio_unstable", "--cfg tokio_taskdump"] - /// ``` - /// - /// [cargo-config]: - /// https://doc.rust-lang.org/cargo/reference/config.html - /// - /// ## Platform Requirements - /// - /// Task dumps are supported on Linux atop aarch64, x86 and x86_64. - /// - /// ## Current Thread Runtime Requirements - /// - /// On the `current_thread` runtime, task dumps may only be requested - /// from *within* the context of the runtime being dumped. Do not, for - /// example, await `Handle::dump()` on a different runtime. - /// - /// # Limitations - /// - /// ## Performance - /// - /// Although enabling the `tokio_taskdump` feature imposes virtually no - /// additional runtime overhead, actually calling `Handle::dump` is - /// expensive. The runtime must synchronize and pause its workers, then - /// re-poll every task in a special tracing mode. Avoid requesting dumps - /// often. - /// - /// ## Local Executors - /// - /// Tasks managed by local executors (e.g., `FuturesUnordered` and - /// [`LocalSet`][crate::task::LocalSet]) may not appear in task dumps. - /// - /// ## Non-Termination When Workers Are Blocked - /// - /// The future produced by `Handle::dump` may never produce `Ready` if - /// another runtime worker is blocked for more than 250ms. This may - /// occur if a dump is requested during shutdown, or if another runtime - /// worker is infinite looping or synchronously deadlocked. For these - /// reasons, task dumping should usually be paired with an explicit - /// [timeout][crate::time::timeout]. - pub async fn dump(&self) -> crate::runtime::Dump { - match &self.inner { - scheduler::Handle::CurrentThread(handle) => handle.dump(), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - scheduler::Handle::MultiThread(handle) => { - // perform the trace in a separate thread so that the - // trace itself does not appear in the taskdump. - let handle = handle.clone(); - spawn_thread(async { - let handle = handle; - handle.dump().await - }).await - }, - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - scheduler::Handle::MultiThreadAlt(_) => panic!("task dump not implemented for this runtime flavor"), - } - } - - /// Produces `true` if the current task is being traced for a dump; - /// otherwise false. This function is only public for integration - /// testing purposes. Do not rely on it. - #[doc(hidden)] - pub fn is_tracing() -> bool { - super::task::trace::Context::is_tracing() - } - } - - cfg_rt_multi_thread! { - /// Spawn a new thread and asynchronously await on its result. - async fn spawn_thread(f: F) -> ::Output - where - F: Future + Send + 'static, - ::Output: Send + 'static - { - let (tx, rx) = crate::sync::oneshot::channel(); - crate::loom::thread::spawn(|| { - let rt = crate::runtime::Builder::new_current_thread().build().unwrap(); - rt.block_on(async { - let _ = tx.send(f.await); - }); - }); - rx.await.unwrap() - } - } -} - -/// Error returned by `try_current` when no Runtime has been started -#[derive(Debug)] -pub struct TryCurrentError { - kind: TryCurrentErrorKind, -} - -impl TryCurrentError { - pub(crate) fn new_no_context() -> Self { - Self { - kind: TryCurrentErrorKind::NoContext, - } - } - - pub(crate) fn new_thread_local_destroyed() -> Self { - Self { - kind: TryCurrentErrorKind::ThreadLocalDestroyed, - } - } - - /// Returns true if the call failed because there is currently no runtime in - /// the Tokio context. - pub fn is_missing_context(&self) -> bool { - matches!(self.kind, TryCurrentErrorKind::NoContext) - } - - /// Returns true if the call failed because the Tokio context thread-local - /// had been destroyed. This can usually only happen if in the destructor of - /// other thread-locals. - pub fn is_thread_local_destroyed(&self) -> bool { - matches!(self.kind, TryCurrentErrorKind::ThreadLocalDestroyed) - } -} - -enum TryCurrentErrorKind { - NoContext, - ThreadLocalDestroyed, -} - -impl fmt::Debug for TryCurrentErrorKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TryCurrentErrorKind::NoContext => f.write_str("NoContext"), - TryCurrentErrorKind::ThreadLocalDestroyed => f.write_str("ThreadLocalDestroyed"), - } - } -} - -impl fmt::Display for TryCurrentError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use TryCurrentErrorKind as E; - match self.kind { - E::NoContext => f.write_str(CONTEXT_MISSING_ERROR), - E::ThreadLocalDestroyed => f.write_str(THREAD_LOCAL_DESTROYED_ERROR), - } - } -} - -impl error::Error for TryCurrentError {} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/id.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/id.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/id.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/id.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,46 +0,0 @@ -use std::fmt; -use std::num::NonZeroU64; - -/// An opaque ID that uniquely identifies a runtime relative to all other currently -/// running runtimes. -/// -/// # Notes -/// -/// - Runtime IDs are unique relative to other *currently running* runtimes. -/// When a runtime completes, the same ID may be used for another runtime. -/// - Runtime IDs are *not* sequential, and do not indicate the order in which -/// runtimes are started or any other data. -/// - The runtime ID of the currently running task can be obtained from the -/// Handle. -/// -/// # Examples -/// -/// ``` -/// use tokio::runtime::Handle; -/// -/// #[tokio::main(flavor = "multi_thread", worker_threads = 4)] -/// async fn main() { -/// println!("Current runtime id: {}", Handle::current().id()); -/// } -/// ``` -/// -/// **Note**: This is an [unstable API][unstable]. The public API of this type -/// may break in 1.x releases. See [the documentation on unstable -/// features][unstable] for details. -/// -/// [unstable]: crate#unstable-features -#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] -#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq)] -pub struct Id(NonZeroU64); - -impl From for Id { - fn from(value: NonZeroU64) -> Self { - Id(value) - } -} - -impl fmt::Display for Id { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/driver/signal.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/driver/signal.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/driver/signal.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/driver/signal.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,22 +0,0 @@ -use super::{Driver, Handle, TOKEN_SIGNAL}; - -use std::io; - -impl Handle { - pub(crate) fn register_signal_receiver( - &self, - receiver: &mut mio::net::UnixStream, - ) -> io::Result<()> { - self.registry - .register(receiver, TOKEN_SIGNAL, mio::Interest::READABLE)?; - Ok(()) - } -} - -impl Driver { - pub(crate) fn consume_signal_ready(&mut self) -> bool { - let ret = self.signal_ready; - self.signal_ready = false; - ret - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/driver.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/driver.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/driver.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/driver.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,280 +0,0 @@ -// Signal handling -cfg_signal_internal_and_unix! { - mod signal; -} - -use crate::io::interest::Interest; -use crate::io::ready::Ready; -use crate::loom::sync::Mutex; -use crate::runtime::driver; -use crate::runtime::io::registration_set; -use crate::runtime::io::{IoDriverMetrics, RegistrationSet, ScheduledIo}; - -use mio::event::Source; -use std::fmt; -use std::io; -use std::sync::Arc; -use std::time::Duration; - -/// I/O driver, backed by Mio. -pub(crate) struct Driver { - /// Tracks the number of times `turn` is called. It is safe for this to wrap - /// as it is mostly used to determine when to call `compact()`. - tick: u8, - - /// True when an event with the signal token is received - signal_ready: bool, - - /// Reuse the `mio::Events` value across calls to poll. - events: mio::Events, - - /// The system event queue. - poll: mio::Poll, -} - -/// A reference to an I/O driver. -pub(crate) struct Handle { - /// Registers I/O resources. - registry: mio::Registry, - - /// Tracks all registrations - registrations: RegistrationSet, - - /// State that should be synchronized - synced: Mutex, - - /// Used to wake up the reactor from a call to `turn`. - /// Not supported on Wasi due to lack of threading support. - #[cfg(not(target_os = "wasi"))] - waker: mio::Waker, - - pub(crate) metrics: IoDriverMetrics, -} - -#[derive(Debug)] -pub(crate) struct ReadyEvent { - pub(super) tick: u8, - pub(crate) ready: Ready, - pub(super) is_shutdown: bool, -} - -cfg_net_unix!( - impl ReadyEvent { - pub(crate) fn with_ready(&self, ready: Ready) -> Self { - Self { - ready, - tick: self.tick, - is_shutdown: self.is_shutdown, - } - } - } -); - -#[derive(Debug, Eq, PartialEq, Clone, Copy)] -pub(super) enum Direction { - Read, - Write, -} - -pub(super) enum Tick { - Set(u8), - Clear(u8), -} - -const TOKEN_WAKEUP: mio::Token = mio::Token(0); -const TOKEN_SIGNAL: mio::Token = mio::Token(1); - -fn _assert_kinds() { - fn _assert() {} - - _assert::(); -} - -// ===== impl Driver ===== - -impl Driver { - /// Creates a new event loop, returning any error that happened during the - /// creation. - pub(crate) fn new(nevents: usize) -> io::Result<(Driver, Handle)> { - let poll = mio::Poll::new()?; - #[cfg(not(target_os = "wasi"))] - let waker = mio::Waker::new(poll.registry(), TOKEN_WAKEUP)?; - let registry = poll.registry().try_clone()?; - - let driver = Driver { - tick: 0, - signal_ready: false, - events: mio::Events::with_capacity(nevents), - poll, - }; - - let (registrations, synced) = RegistrationSet::new(); - - let handle = Handle { - registry, - registrations, - synced: Mutex::new(synced), - #[cfg(not(target_os = "wasi"))] - waker, - metrics: IoDriverMetrics::default(), - }; - - Ok((driver, handle)) - } - - pub(crate) fn park(&mut self, rt_handle: &driver::Handle) { - let handle = rt_handle.io(); - self.turn(handle, None); - } - - pub(crate) fn park_timeout(&mut self, rt_handle: &driver::Handle, duration: Duration) { - let handle = rt_handle.io(); - self.turn(handle, Some(duration)); - } - - pub(crate) fn shutdown(&mut self, rt_handle: &driver::Handle) { - let handle = rt_handle.io(); - let ios = handle.registrations.shutdown(&mut handle.synced.lock()); - - // `shutdown()` must be called without holding the lock. - for io in ios { - io.shutdown(); - } - } - - fn turn(&mut self, handle: &Handle, max_wait: Option) { - debug_assert!(!handle.registrations.is_shutdown(&handle.synced.lock())); - - self.tick = self.tick.wrapping_add(1); - - handle.release_pending_registrations(); - - let events = &mut self.events; - - // Block waiting for an event to happen, peeling out how many events - // happened. - match self.poll.poll(events, max_wait) { - Ok(_) => {} - Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} - #[cfg(target_os = "wasi")] - Err(e) if e.kind() == io::ErrorKind::InvalidInput => { - // In case of wasm32_wasi this error happens, when trying to poll without subscriptions - // just return from the park, as there would be nothing, which wakes us up. - } - Err(e) => panic!("unexpected error when polling the I/O driver: {:?}", e), - } - - // Process all the events that came in, dispatching appropriately - let mut ready_count = 0; - for event in events.iter() { - let token = event.token(); - - if token == TOKEN_WAKEUP { - // Nothing to do, the event is used to unblock the I/O driver - } else if token == TOKEN_SIGNAL { - self.signal_ready = true; - } else { - let ready = Ready::from_mio(event); - // Use std::ptr::from_exposed_addr when stable - let ptr: *const ScheduledIo = token.0 as *const _; - - // Safety: we ensure that the pointers used as tokens are not freed - // until they are both deregistered from mio **and** we know the I/O - // driver is not concurrently polling. The I/O driver holds ownership of - // an `Arc` so we can safely cast this to a ref. - let io: &ScheduledIo = unsafe { &*ptr }; - - io.set_readiness(Tick::Set(self.tick), |curr| curr | ready); - io.wake(ready); - - ready_count += 1; - } - } - - handle.metrics.incr_ready_count_by(ready_count); - } -} - -impl fmt::Debug for Driver { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Driver") - } -} - -impl Handle { - /// Forces a reactor blocked in a call to `turn` to wakeup, or otherwise - /// makes the next call to `turn` return immediately. - /// - /// This method is intended to be used in situations where a notification - /// needs to otherwise be sent to the main reactor. If the reactor is - /// currently blocked inside of `turn` then it will wake up and soon return - /// after this method has been called. If the reactor is not currently - /// blocked in `turn`, then the next call to `turn` will not block and - /// return immediately. - pub(crate) fn unpark(&self) { - #[cfg(not(target_os = "wasi"))] - self.waker.wake().expect("failed to wake I/O driver"); - } - - /// Registers an I/O resource with the reactor for a given `mio::Ready` state. - /// - /// The registration token is returned. - pub(super) fn add_source( - &self, - source: &mut impl mio::event::Source, - interest: Interest, - ) -> io::Result> { - let scheduled_io = self.registrations.allocate(&mut self.synced.lock())?; - let token = scheduled_io.token(); - - // TODO: if this returns an err, the `ScheduledIo` leaks... - self.registry.register(source, token, interest.to_mio())?; - - // TODO: move this logic to `RegistrationSet` and use a `CountedLinkedList` - self.metrics.incr_fd_count(); - - Ok(scheduled_io) - } - - /// Deregisters an I/O resource from the reactor. - pub(super) fn deregister_source( - &self, - registration: &Arc, - source: &mut impl Source, - ) -> io::Result<()> { - // Deregister the source with the OS poller **first** - self.registry.deregister(source)?; - - if self - .registrations - .deregister(&mut self.synced.lock(), registration) - { - self.unpark(); - } - - self.metrics.dec_fd_count(); - - Ok(()) - } - - fn release_pending_registrations(&self) { - if self.registrations.needs_release() { - self.registrations.release(&mut self.synced.lock()); - } - } -} - -impl fmt::Debug for Handle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Handle") - } -} - -impl Direction { - pub(super) fn mask(self) -> Ready { - match self { - Direction::Read => Ready::READABLE | Ready::READ_CLOSED, - Direction::Write => Ready::WRITABLE | Ready::WRITE_CLOSED, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/metrics.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/metrics.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/metrics.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/metrics.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ -//! This file contains mocks of the metrics types used in the I/O driver. -//! -//! The reason these mocks don't live in `src/runtime/mock.rs` is because -//! these need to be available in the case when `net` is enabled but -//! `rt` is not. - -cfg_not_rt_and_metrics_and_net! { - #[derive(Default)] - pub(crate) struct IoDriverMetrics {} - - impl IoDriverMetrics { - pub(crate) fn incr_fd_count(&self) {} - pub(crate) fn dec_fd_count(&self) {} - pub(crate) fn incr_ready_count_by(&self, _amt: u64) {} - } -} - -cfg_net! { - cfg_rt! { - cfg_metrics! { - pub(crate) use crate::runtime::IoDriverMetrics; - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,16 +0,0 @@ -#![cfg_attr(not(all(feature = "rt", feature = "net")), allow(dead_code))] -mod driver; -use driver::{Direction, Tick}; -pub(crate) use driver::{Driver, Handle, ReadyEvent}; - -mod registration; -pub(crate) use registration::Registration; - -mod registration_set; -use registration_set::RegistrationSet; - -mod scheduled_io; -use scheduled_io::ScheduledIo; - -mod metrics; -use metrics::IoDriverMetrics; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/registration.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/registration.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/registration.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/registration.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,254 +0,0 @@ -#![cfg_attr(not(feature = "net"), allow(dead_code))] - -use crate::io::interest::Interest; -use crate::runtime::io::{Direction, Handle, ReadyEvent, ScheduledIo}; -use crate::runtime::scheduler; - -use mio::event::Source; -use std::io; -use std::sync::Arc; -use std::task::{Context, Poll}; - -cfg_io_driver! { - /// Associates an I/O resource with the reactor instance that drives it. - /// - /// A registration represents an I/O resource registered with a Reactor such - /// that it will receive task notifications on readiness. This is the lowest - /// level API for integrating with a reactor. - /// - /// The association between an I/O resource is made by calling - /// [`new_with_interest_and_handle`]. - /// Once the association is established, it remains established until the - /// registration instance is dropped. - /// - /// A registration instance represents two separate readiness streams. One - /// for the read readiness and one for write readiness. These streams are - /// independent and can be consumed from separate tasks. - /// - /// **Note**: while `Registration` is `Sync`, the caller must ensure that - /// there are at most two tasks that use a registration instance - /// concurrently. One task for [`poll_read_ready`] and one task for - /// [`poll_write_ready`]. While violating this requirement is "safe" from a - /// Rust memory safety point of view, it will result in unexpected behavior - /// in the form of lost notifications and tasks hanging. - /// - /// ## Platform-specific events - /// - /// `Registration` also allows receiving platform-specific `mio::Ready` - /// events. These events are included as part of the read readiness event - /// stream. The write readiness event stream is only for `Ready::writable()` - /// events. - /// - /// [`new_with_interest_and_handle`]: method@Self::new_with_interest_and_handle - /// [`poll_read_ready`]: method@Self::poll_read_ready` - /// [`poll_write_ready`]: method@Self::poll_write_ready` - #[derive(Debug)] - pub(crate) struct Registration { - /// Handle to the associated runtime. - /// - /// TODO: this can probably be moved into `ScheduledIo`. - handle: scheduler::Handle, - - /// Reference to state stored by the driver. - shared: Arc, - } -} - -unsafe impl Send for Registration {} -unsafe impl Sync for Registration {} - -// ===== impl Registration ===== - -impl Registration { - /// Registers the I/O resource with the reactor for the provided handle, for - /// a specific `Interest`. This does not add `hup` or `error` so if you are - /// interested in those states, you will need to add them to the readiness - /// state passed to this function. - /// - /// # Return - /// - /// - `Ok` if the registration happened successfully - /// - `Err` if an error was encountered during registration - #[track_caller] - pub(crate) fn new_with_interest_and_handle( - io: &mut impl Source, - interest: Interest, - handle: scheduler::Handle, - ) -> io::Result { - let shared = handle.driver().io().add_source(io, interest)?; - - Ok(Registration { handle, shared }) - } - - /// Deregisters the I/O resource from the reactor it is associated with. - /// - /// This function must be called before the I/O resource associated with the - /// registration is dropped. - /// - /// Note that deregistering does not guarantee that the I/O resource can be - /// registered with a different reactor. Some I/O resource types can only be - /// associated with a single reactor instance for their lifetime. - /// - /// # Return - /// - /// If the deregistration was successful, `Ok` is returned. Any calls to - /// `Reactor::turn` that happen after a successful call to `deregister` will - /// no longer result in notifications getting sent for this registration. - /// - /// `Err` is returned if an error is encountered. - pub(crate) fn deregister(&mut self, io: &mut impl Source) -> io::Result<()> { - self.handle().deregister_source(&self.shared, io) - } - - pub(crate) fn clear_readiness(&self, event: ReadyEvent) { - self.shared.clear_readiness(event); - } - - // Uses the poll path, requiring the caller to ensure mutual exclusion for - // correctness. Only the last task to call this function is notified. - pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.poll_ready(cx, Direction::Read) - } - - // Uses the poll path, requiring the caller to ensure mutual exclusion for - // correctness. Only the last task to call this function is notified. - pub(crate) fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { - self.poll_ready(cx, Direction::Write) - } - - // Uses the poll path, requiring the caller to ensure mutual exclusion for - // correctness. Only the last task to call this function is notified. - #[cfg(not(target_os = "wasi"))] - pub(crate) fn poll_read_io( - &self, - cx: &mut Context<'_>, - f: impl FnMut() -> io::Result, - ) -> Poll> { - self.poll_io(cx, Direction::Read, f) - } - - // Uses the poll path, requiring the caller to ensure mutual exclusion for - // correctness. Only the last task to call this function is notified. - pub(crate) fn poll_write_io( - &self, - cx: &mut Context<'_>, - f: impl FnMut() -> io::Result, - ) -> Poll> { - self.poll_io(cx, Direction::Write, f) - } - - /// Polls for events on the I/O resource's `direction` readiness stream. - /// - /// If called with a task context, notify the task when a new event is - /// received. - fn poll_ready( - &self, - cx: &mut Context<'_>, - direction: Direction, - ) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - // Keep track of task budget - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - let ev = ready!(self.shared.poll_readiness(cx, direction)); - - if ev.is_shutdown { - return Poll::Ready(Err(gone())); - } - - coop.made_progress(); - Poll::Ready(Ok(ev)) - } - - fn poll_io( - &self, - cx: &mut Context<'_>, - direction: Direction, - mut f: impl FnMut() -> io::Result, - ) -> Poll> { - loop { - let ev = ready!(self.poll_ready(cx, direction))?; - - match f() { - Ok(ret) => { - return Poll::Ready(Ok(ret)); - } - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.clear_readiness(ev); - } - Err(e) => return Poll::Ready(Err(e)), - } - } - } - - pub(crate) fn try_io( - &self, - interest: Interest, - f: impl FnOnce() -> io::Result, - ) -> io::Result { - let ev = self.shared.ready_event(interest); - - // Don't attempt the operation if the resource is not ready. - if ev.ready.is_empty() { - return Err(io::ErrorKind::WouldBlock.into()); - } - - match f() { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.clear_readiness(ev); - Err(io::ErrorKind::WouldBlock.into()) - } - res => res, - } - } - - pub(crate) async fn readiness(&self, interest: Interest) -> io::Result { - let ev = self.shared.readiness(interest).await; - - if ev.is_shutdown { - return Err(gone()); - } - - Ok(ev) - } - - pub(crate) async fn async_io( - &self, - interest: Interest, - mut f: impl FnMut() -> io::Result, - ) -> io::Result { - loop { - let event = self.readiness(interest).await?; - - match f() { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.clear_readiness(event); - } - x => return x, - } - } - } - - fn handle(&self) -> &Handle { - self.handle.driver().io() - } -} - -impl Drop for Registration { - fn drop(&mut self) { - // It is possible for a cycle to be created between wakers stored in - // `ScheduledIo` instances and `Arc`. To break this - // cycle, wakers are cleared. This is an imperfect solution as it is - // possible to store a `Registration` in a waker. In this case, the - // cycle would remain. - // - // See tokio-rs/tokio#3481 for more details. - self.shared.clear_wakers(); - } -} - -fn gone() -> io::Error { - io::Error::new( - io::ErrorKind::Other, - crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR, - ) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/registration_set.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/registration_set.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/registration_set.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/registration_set.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,134 +0,0 @@ -use crate::loom::sync::atomic::AtomicUsize; -use crate::runtime::io::ScheduledIo; -use crate::util::linked_list::{self, LinkedList}; - -use std::io; -use std::ptr::NonNull; -use std::sync::atomic::Ordering::{Acquire, Release}; -use std::sync::Arc; - -pub(super) struct RegistrationSet { - num_pending_release: AtomicUsize, -} - -pub(super) struct Synced { - // True when the I/O driver shutdown. At this point, no more registrations - // should be added to the set. - is_shutdown: bool, - - // List of all registrations tracked by the set - registrations: LinkedList, ScheduledIo>, - - // Registrations that are pending drop. When a `Registration` is dropped, it - // stores its `ScheduledIo` in this list. The I/O driver is responsible for - // dropping it. This ensures the `ScheduledIo` is not freed while it can - // still be included in an I/O event. - pending_release: Vec>, -} - -impl RegistrationSet { - pub(super) fn new() -> (RegistrationSet, Synced) { - let set = RegistrationSet { - num_pending_release: AtomicUsize::new(0), - }; - - let synced = Synced { - is_shutdown: false, - registrations: LinkedList::new(), - pending_release: Vec::with_capacity(16), - }; - - (set, synced) - } - - pub(super) fn is_shutdown(&self, synced: &Synced) -> bool { - synced.is_shutdown - } - - /// Returns `true` if there are registrations that need to be released - pub(super) fn needs_release(&self) -> bool { - self.num_pending_release.load(Acquire) != 0 - } - - pub(super) fn allocate(&self, synced: &mut Synced) -> io::Result> { - if synced.is_shutdown { - return Err(io::Error::new( - io::ErrorKind::Other, - crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR, - )); - } - - let ret = Arc::new(ScheduledIo::default()); - - // Push a ref into the list of all resources. - synced.registrations.push_front(ret.clone()); - - Ok(ret) - } - - // Returns `true` if the caller should unblock the I/O driver to purge - // registrations pending release. - pub(super) fn deregister(&self, synced: &mut Synced, registration: &Arc) -> bool { - // Kind of arbitrary, but buffering 16 `ScheduledIo`s doesn't seem like much - const NOTIFY_AFTER: usize = 16; - - synced.pending_release.push(registration.clone()); - - let len = synced.pending_release.len(); - self.num_pending_release.store(len, Release); - - len == NOTIFY_AFTER - } - - pub(super) fn shutdown(&self, synced: &mut Synced) -> Vec> { - if synced.is_shutdown { - return vec![]; - } - - synced.is_shutdown = true; - synced.pending_release.clear(); - - // Building a vec of all outstanding I/O handles could be expensive, but - // this is the shutdown operation. In theory, shutdowns should be - // "clean" with no outstanding I/O resources. Even if it is slow, we - // aren't optimizing for shutdown. - let mut ret = vec![]; - - while let Some(io) = synced.registrations.pop_back() { - ret.push(io); - } - - ret - } - - pub(super) fn release(&self, synced: &mut Synced) { - for io in synced.pending_release.drain(..) { - // safety: the registration is part of our list - let _ = unsafe { synced.registrations.remove(io.as_ref().into()) }; - } - - self.num_pending_release.store(0, Release); - } -} - -// Safety: `Arc` pins the inner data -unsafe impl linked_list::Link for Arc { - type Handle = Arc; - type Target = ScheduledIo; - - fn as_raw(handle: &Self::Handle) -> NonNull { - // safety: Arc::as_ptr never returns null - unsafe { NonNull::new_unchecked(Arc::as_ptr(handle) as *mut _) } - } - - unsafe fn from_raw(ptr: NonNull) -> Arc { - // safety: the linked list currently owns a ref count - unsafe { Arc::from_raw(ptr.as_ptr() as *const _) } - } - - unsafe fn pointers( - target: NonNull, - ) -> NonNull> { - NonNull::new_unchecked(target.as_ref().linked_list_pointers.get()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/scheduled_io.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/scheduled_io.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/io/scheduled_io.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/io/scheduled_io.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,594 +0,0 @@ -use crate::io::interest::Interest; -use crate::io::ready::Ready; -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::Mutex; -use crate::runtime::io::{Direction, ReadyEvent, Tick}; -use crate::util::bit; -use crate::util::linked_list::{self, LinkedList}; -use crate::util::WakeList; - -use std::cell::UnsafeCell; -use std::future::Future; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::ptr::NonNull; -use std::sync::atomic::Ordering::{AcqRel, Acquire}; -use std::task::{Context, Poll, Waker}; - -/// Stored in the I/O driver resource slab. -#[derive(Debug)] -// # This struct should be cache padded to avoid false sharing. The cache padding rules are copied -// from crossbeam-utils/src/cache_padded.rs -// -// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache -// lines at a time, so we have to align to 128 bytes rather than 64. -// -// Sources: -// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf -// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 -// -// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size. -// -// Sources: -// - https://www.mono-project.com/news/2016/09/12/arm64-icache/ -// -// powerpc64 has 128-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9 -#[cfg_attr( - any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - ), - repr(align(128)) -)] -// arm, mips, mips64, sparc, and hexagon have 32-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L17 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/hexagon/include/asm/cache.h#L12 -#[cfg_attr( - any( - target_arch = "arm", - target_arch = "mips", - target_arch = "mips64", - target_arch = "sparc", - target_arch = "hexagon", - ), - repr(align(32)) -)] -// m68k has 16-byte cache line size. -// -// Sources: -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/m68k/include/asm/cache.h#L9 -#[cfg_attr(target_arch = "m68k", repr(align(16)))] -// s390x has 256-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/s390/include/asm/cache.h#L13 -#[cfg_attr(target_arch = "s390x", repr(align(256)))] -// x86, riscv, wasm, and sparc64 have 64-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L19 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10 -// -// All others are assumed to have 64-byte cache line size. -#[cfg_attr( - not(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "arm", - target_arch = "mips", - target_arch = "mips64", - target_arch = "sparc", - target_arch = "hexagon", - target_arch = "m68k", - target_arch = "s390x", - )), - repr(align(64)) -)] -pub(crate) struct ScheduledIo { - pub(super) linked_list_pointers: UnsafeCell>, - - /// Packs the resource's readiness and I/O driver latest tick. - readiness: AtomicUsize, - - waiters: Mutex, -} - -type WaitList = LinkedList::Target>; - -#[derive(Debug, Default)] -struct Waiters { - /// List of all current waiters. - list: WaitList, - - /// Waker used for AsyncRead. - reader: Option, - - /// Waker used for AsyncWrite. - writer: Option, -} - -#[derive(Debug)] -struct Waiter { - pointers: linked_list::Pointers, - - /// The waker for this task. - waker: Option, - - /// The interest this waiter is waiting on. - interest: Interest, - - is_ready: bool, - - /// Should never be `!Unpin`. - _p: PhantomPinned, -} - -generate_addr_of_methods! { - impl<> Waiter { - unsafe fn addr_of_pointers(self: NonNull) -> NonNull> { - &self.pointers - } - } -} - -/// Future returned by `readiness()`. -struct Readiness<'a> { - scheduled_io: &'a ScheduledIo, - - state: State, - - /// Entry in the waiter `LinkedList`. - waiter: UnsafeCell, -} - -enum State { - Init, - Waiting, - Done, -} - -// The `ScheduledIo::readiness` (`AtomicUsize`) is packed full of goodness. -// -// | shutdown | driver tick | readiness | -// |----------+-------------+-----------| -// | 1 bit | 8 bits + 16 bits | - -const READINESS: bit::Pack = bit::Pack::least_significant(16); - -const TICK: bit::Pack = READINESS.then(8); - -const SHUTDOWN: bit::Pack = TICK.then(1); - -// ===== impl ScheduledIo ===== - -impl Default for ScheduledIo { - fn default() -> ScheduledIo { - ScheduledIo { - linked_list_pointers: UnsafeCell::new(linked_list::Pointers::new()), - readiness: AtomicUsize::new(0), - waiters: Mutex::new(Default::default()), - } - } -} - -impl ScheduledIo { - pub(crate) fn token(&self) -> mio::Token { - // use `expose_addr` when stable - mio::Token(self as *const _ as usize) - } - - /// Invoked when the IO driver is shut down; forces this ScheduledIo into a - /// permanently shutdown state. - pub(super) fn shutdown(&self) { - let mask = SHUTDOWN.pack(1, 0); - self.readiness.fetch_or(mask, AcqRel); - self.wake(Ready::ALL); - } - - /// Sets the readiness on this `ScheduledIo` by invoking the given closure on - /// the current value, returning the previous readiness value. - /// - /// # Arguments - /// - `tick`: whether setting the tick or trying to clear readiness for a - /// specific tick. - /// - `f`: a closure returning a new readiness value given the previous - /// readiness. - pub(super) fn set_readiness(&self, tick: Tick, f: impl Fn(Ready) -> Ready) { - let mut current = self.readiness.load(Acquire); - - // The shutdown bit should not be set - debug_assert_eq!(0, SHUTDOWN.unpack(current)); - - loop { - // Mask out the tick bits so that the modifying function doesn't see - // them. - let current_readiness = Ready::from_usize(current); - let new = f(current_readiness); - - let next = match tick { - Tick::Set(t) => TICK.pack(t as usize, new.as_usize()), - Tick::Clear(t) => { - if TICK.unpack(current) as u8 != t { - // Trying to clear readiness with an old event! - return; - } - - TICK.pack(t as usize, new.as_usize()) - } - }; - - match self - .readiness - .compare_exchange(current, next, AcqRel, Acquire) - { - Ok(_) => return, - // we lost the race, retry! - Err(actual) => current = actual, - } - } - } - - /// Notifies all pending waiters that have registered interest in `ready`. - /// - /// There may be many waiters to notify. Waking the pending task **must** be - /// done from outside of the lock otherwise there is a potential for a - /// deadlock. - /// - /// A stack array of wakers is created and filled with wakers to notify, the - /// lock is released, and the wakers are notified. Because there may be more - /// than 32 wakers to notify, if the stack array fills up, the lock is - /// released, the array is cleared, and the iteration continues. - pub(super) fn wake(&self, ready: Ready) { - let mut wakers = WakeList::new(); - - let mut waiters = self.waiters.lock(); - - // check for AsyncRead slot - if ready.is_readable() { - if let Some(waker) = waiters.reader.take() { - wakers.push(waker); - } - } - - // check for AsyncWrite slot - if ready.is_writable() { - if let Some(waker) = waiters.writer.take() { - wakers.push(waker); - } - } - - 'outer: loop { - let mut iter = waiters.list.drain_filter(|w| ready.satisfies(w.interest)); - - while wakers.can_push() { - match iter.next() { - Some(waiter) => { - let waiter = unsafe { &mut *waiter.as_ptr() }; - - if let Some(waker) = waiter.waker.take() { - waiter.is_ready = true; - wakers.push(waker); - } - } - None => { - break 'outer; - } - } - } - - drop(waiters); - - wakers.wake_all(); - - // Acquire the lock again. - waiters = self.waiters.lock(); - } - - // Release the lock before notifying - drop(waiters); - - wakers.wake_all(); - } - - pub(super) fn ready_event(&self, interest: Interest) -> ReadyEvent { - let curr = self.readiness.load(Acquire); - - ReadyEvent { - tick: TICK.unpack(curr) as u8, - ready: interest.mask() & Ready::from_usize(READINESS.unpack(curr)), - is_shutdown: SHUTDOWN.unpack(curr) != 0, - } - } - - /// Polls for readiness events in a given direction. - /// - /// These are to support `AsyncRead` and `AsyncWrite` polling methods, - /// which cannot use the `async fn` version. This uses reserved reader - /// and writer slots. - pub(super) fn poll_readiness( - &self, - cx: &mut Context<'_>, - direction: Direction, - ) -> Poll { - let curr = self.readiness.load(Acquire); - - let ready = direction.mask() & Ready::from_usize(READINESS.unpack(curr)); - let is_shutdown = SHUTDOWN.unpack(curr) != 0; - - if ready.is_empty() && !is_shutdown { - // Update the task info - let mut waiters = self.waiters.lock(); - let slot = match direction { - Direction::Read => &mut waiters.reader, - Direction::Write => &mut waiters.writer, - }; - - // Avoid cloning the waker if one is already stored that matches the - // current task. - match slot { - Some(existing) => { - if !existing.will_wake(cx.waker()) { - *existing = cx.waker().clone(); - } - } - None => { - *slot = Some(cx.waker().clone()); - } - } - - // Try again, in case the readiness was changed while we were - // taking the waiters lock - let curr = self.readiness.load(Acquire); - let ready = direction.mask() & Ready::from_usize(READINESS.unpack(curr)); - let is_shutdown = SHUTDOWN.unpack(curr) != 0; - if is_shutdown { - Poll::Ready(ReadyEvent { - tick: TICK.unpack(curr) as u8, - ready: direction.mask(), - is_shutdown, - }) - } else if ready.is_empty() { - Poll::Pending - } else { - Poll::Ready(ReadyEvent { - tick: TICK.unpack(curr) as u8, - ready, - is_shutdown, - }) - } - } else { - Poll::Ready(ReadyEvent { - tick: TICK.unpack(curr) as u8, - ready, - is_shutdown, - }) - } - } - - pub(crate) fn clear_readiness(&self, event: ReadyEvent) { - // This consumes the current readiness state **except** for closed - // states. Closed states are excluded because they are final states. - let mask_no_closed = event.ready - Ready::READ_CLOSED - Ready::WRITE_CLOSED; - self.set_readiness(Tick::Clear(event.tick), |curr| curr - mask_no_closed); - } - - pub(crate) fn clear_wakers(&self) { - let mut waiters = self.waiters.lock(); - waiters.reader.take(); - waiters.writer.take(); - } -} - -impl Drop for ScheduledIo { - fn drop(&mut self) { - self.wake(Ready::ALL); - } -} - -unsafe impl Send for ScheduledIo {} -unsafe impl Sync for ScheduledIo {} - -impl ScheduledIo { - /// An async version of `poll_readiness` which uses a linked list of wakers. - pub(crate) async fn readiness(&self, interest: Interest) -> ReadyEvent { - self.readiness_fut(interest).await - } - - // This is in a separate function so that the borrow checker doesn't think - // we are borrowing the `UnsafeCell` possibly over await boundaries. - // - // Go figure. - fn readiness_fut(&self, interest: Interest) -> Readiness<'_> { - Readiness { - scheduled_io: self, - state: State::Init, - waiter: UnsafeCell::new(Waiter { - pointers: linked_list::Pointers::new(), - waker: None, - is_ready: false, - interest, - _p: PhantomPinned, - }), - } - } -} - -unsafe impl linked_list::Link for Waiter { - type Handle = NonNull; - type Target = Waiter; - - fn as_raw(handle: &NonNull) -> NonNull { - *handle - } - - unsafe fn from_raw(ptr: NonNull) -> NonNull { - ptr - } - - unsafe fn pointers(target: NonNull) -> NonNull> { - Waiter::addr_of_pointers(target) - } -} - -// ===== impl Readiness ===== - -impl Future for Readiness<'_> { - type Output = ReadyEvent; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - use std::sync::atomic::Ordering::SeqCst; - - let (scheduled_io, state, waiter) = unsafe { - let me = self.get_unchecked_mut(); - (&me.scheduled_io, &mut me.state, &me.waiter) - }; - - loop { - match *state { - State::Init => { - // Optimistically check existing readiness - let curr = scheduled_io.readiness.load(SeqCst); - let ready = Ready::from_usize(READINESS.unpack(curr)); - let is_shutdown = SHUTDOWN.unpack(curr) != 0; - - // Safety: `waiter.interest` never changes - let interest = unsafe { (*waiter.get()).interest }; - let ready = ready.intersection(interest); - - if !ready.is_empty() || is_shutdown { - // Currently ready! - let tick = TICK.unpack(curr) as u8; - *state = State::Done; - return Poll::Ready(ReadyEvent { - tick, - ready, - is_shutdown, - }); - } - - // Wasn't ready, take the lock (and check again while locked). - let mut waiters = scheduled_io.waiters.lock(); - - let curr = scheduled_io.readiness.load(SeqCst); - let mut ready = Ready::from_usize(READINESS.unpack(curr)); - let is_shutdown = SHUTDOWN.unpack(curr) != 0; - - if is_shutdown { - ready = Ready::ALL; - } - - let ready = ready.intersection(interest); - - if !ready.is_empty() || is_shutdown { - // Currently ready! - let tick = TICK.unpack(curr) as u8; - *state = State::Done; - return Poll::Ready(ReadyEvent { - tick, - ready, - is_shutdown, - }); - } - - // Not ready even after locked, insert into list... - - // Safety: called while locked - unsafe { - (*waiter.get()).waker = Some(cx.waker().clone()); - } - - // Insert the waiter into the linked list - // - // safety: pointers from `UnsafeCell` are never null. - waiters - .list - .push_front(unsafe { NonNull::new_unchecked(waiter.get()) }); - *state = State::Waiting; - } - State::Waiting => { - // Currently in the "Waiting" state, implying the caller has - // a waiter stored in the waiter list (guarded by - // `notify.waiters`). In order to access the waker fields, - // we must hold the lock. - - let waiters = scheduled_io.waiters.lock(); - - // Safety: called while locked - let w = unsafe { &mut *waiter.get() }; - - if w.is_ready { - // Our waker has been notified. - *state = State::Done; - } else { - // Update the waker, if necessary. - if !w.waker.as_ref().unwrap().will_wake(cx.waker()) { - w.waker = Some(cx.waker().clone()); - } - - return Poll::Pending; - } - - // Explicit drop of the lock to indicate the scope that the - // lock is held. Because holding the lock is required to - // ensure safe access to fields not held within the lock, it - // is helpful to visualize the scope of the critical - // section. - drop(waiters); - } - State::Done => { - // Safety: State::Done means it is no longer shared - let w = unsafe { &mut *waiter.get() }; - - let curr = scheduled_io.readiness.load(Acquire); - let is_shutdown = SHUTDOWN.unpack(curr) != 0; - - // The returned tick might be newer than the event - // which notified our waker. This is ok because the future - // still didn't return `Poll::Ready`. - let tick = TICK.unpack(curr) as u8; - - // The readiness state could have been cleared in the meantime, - // but we allow the returned ready set to be empty. - let curr_ready = Ready::from_usize(READINESS.unpack(curr)); - let ready = curr_ready.intersection(w.interest); - - return Poll::Ready(ReadyEvent { - tick, - ready, - is_shutdown, - }); - } - } - } - } -} - -impl Drop for Readiness<'_> { - fn drop(&mut self) { - let mut waiters = self.scheduled_io.waiters.lock(); - - // Safety: `waiter` is only ever stored in `waiters` - unsafe { - waiters - .list - .remove(NonNull::new_unchecked(self.waiter.get())) - }; - } -} - -unsafe impl Send for Readiness<'_> {} -unsafe impl Sync for Readiness<'_> {} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/batch.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/batch.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/batch.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/batch.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,163 +0,0 @@ -use crate::runtime::metrics::{HistogramBatch, WorkerMetrics}; - -use std::sync::atomic::Ordering::Relaxed; -use std::time::{Duration, Instant}; - -pub(crate) struct MetricsBatch { - /// Number of times the worker parked. - park_count: u64, - - /// Number of times the worker woke w/o doing work. - noop_count: u64, - - /// Number of tasks stolen. - steal_count: u64, - - /// Number of times tasks where stolen. - steal_operations: u64, - - /// Number of tasks that were polled by the worker. - poll_count: u64, - - /// Number of tasks polled when the worker entered park. This is used to - /// track the noop count. - poll_count_on_last_park: u64, - - /// Number of tasks that were scheduled locally on this worker. - local_schedule_count: u64, - - /// Number of tasks moved to the global queue to make space in the local - /// queue - overflow_count: u64, - - /// The total busy duration in nanoseconds. - busy_duration_total: u64, - - /// Instant at which work last resumed (continued after park). - processing_scheduled_tasks_started_at: Instant, - - /// If `Some`, tracks poll times in nanoseconds - poll_timer: Option, -} - -struct PollTimer { - /// Histogram of poll counts within each band. - poll_counts: HistogramBatch, - - /// Instant when the most recent task started polling. - poll_started_at: Instant, -} - -impl MetricsBatch { - pub(crate) fn new(worker_metrics: &WorkerMetrics) -> MetricsBatch { - let now = Instant::now(); - - MetricsBatch { - park_count: 0, - noop_count: 0, - steal_count: 0, - steal_operations: 0, - poll_count: 0, - poll_count_on_last_park: 0, - local_schedule_count: 0, - overflow_count: 0, - busy_duration_total: 0, - processing_scheduled_tasks_started_at: now, - poll_timer: worker_metrics - .poll_count_histogram - .as_ref() - .map(|worker_poll_counts| PollTimer { - poll_counts: HistogramBatch::from_histogram(worker_poll_counts), - poll_started_at: now, - }), - } - } - - pub(crate) fn submit(&mut self, worker: &WorkerMetrics, mean_poll_time: u64) { - worker.mean_poll_time.store(mean_poll_time, Relaxed); - worker.park_count.store(self.park_count, Relaxed); - worker.noop_count.store(self.noop_count, Relaxed); - worker.steal_count.store(self.steal_count, Relaxed); - worker - .steal_operations - .store(self.steal_operations, Relaxed); - worker.poll_count.store(self.poll_count, Relaxed); - - worker - .busy_duration_total - .store(self.busy_duration_total, Relaxed); - - worker - .local_schedule_count - .store(self.local_schedule_count, Relaxed); - worker.overflow_count.store(self.overflow_count, Relaxed); - - if let Some(poll_timer) = &self.poll_timer { - let dst = worker.poll_count_histogram.as_ref().unwrap(); - poll_timer.poll_counts.submit(dst); - } - } - - /// The worker is about to park. - pub(crate) fn about_to_park(&mut self) { - self.park_count += 1; - - if self.poll_count_on_last_park == self.poll_count { - self.noop_count += 1; - } else { - self.poll_count_on_last_park = self.poll_count; - } - } - - /// Start processing a batch of tasks - pub(crate) fn start_processing_scheduled_tasks(&mut self) { - self.processing_scheduled_tasks_started_at = Instant::now(); - } - - /// Stop processing a batch of tasks - pub(crate) fn end_processing_scheduled_tasks(&mut self) { - let busy_duration = self.processing_scheduled_tasks_started_at.elapsed(); - self.busy_duration_total += duration_as_u64(busy_duration); - } - - /// Start polling an individual task - pub(crate) fn start_poll(&mut self) { - self.poll_count += 1; - - if let Some(poll_timer) = &mut self.poll_timer { - poll_timer.poll_started_at = Instant::now(); - } - } - - /// Stop polling an individual task - pub(crate) fn end_poll(&mut self) { - if let Some(poll_timer) = &mut self.poll_timer { - let elapsed = duration_as_u64(poll_timer.poll_started_at.elapsed()); - poll_timer.poll_counts.measure(elapsed, 1); - } - } - - pub(crate) fn inc_local_schedule_count(&mut self) { - self.local_schedule_count += 1; - } -} - -cfg_rt_multi_thread! { - impl MetricsBatch { - pub(crate) fn incr_steal_count(&mut self, by: u16) { - self.steal_count += by as u64; - } - - pub(crate) fn incr_steal_operations(&mut self) { - self.steal_operations += 1; - } - - pub(crate) fn incr_overflow_count(&mut self) { - self.overflow_count += 1; - } - } -} - -fn duration_as_u64(dur: Duration) -> u64 { - u64::try_from(dur.as_nanos()).unwrap_or(u64::MAX) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/histogram.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/histogram.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/histogram.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/histogram.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,502 +0,0 @@ -use crate::loom::sync::atomic::{AtomicU64, Ordering::Relaxed}; - -use std::cmp; -use std::ops::Range; - -#[derive(Debug)] -pub(crate) struct Histogram { - /// The histogram buckets - buckets: Box<[AtomicU64]>, - - /// Bucket scale, linear or log - scale: HistogramScale, - - /// Minimum resolution - resolution: u64, -} - -#[derive(Debug, Clone)] -pub(crate) struct HistogramBuilder { - /// Histogram scale - pub(crate) scale: HistogramScale, - - /// Must be a power of 2 - pub(crate) resolution: u64, - - /// Number of buckets - pub(crate) num_buckets: usize, -} - -#[derive(Debug)] -pub(crate) struct HistogramBatch { - buckets: Box<[u64]>, - scale: HistogramScale, - resolution: u64, -} - -cfg_unstable! { - /// Whether the histogram used to aggregate a metric uses a linear or - /// logarithmic scale. - #[derive(Debug, Copy, Clone, Eq, PartialEq)] - #[non_exhaustive] - pub enum HistogramScale { - /// Linear bucket scale - Linear, - - /// Logarithmic bucket scale - Log, - } -} - -impl Histogram { - pub(crate) fn num_buckets(&self) -> usize { - self.buckets.len() - } - - pub(crate) fn get(&self, bucket: usize) -> u64 { - self.buckets[bucket].load(Relaxed) - } - - pub(crate) fn bucket_range(&self, bucket: usize) -> Range { - match self.scale { - HistogramScale::Log => Range { - start: if bucket == 0 { - 0 - } else { - self.resolution << (bucket - 1) - }, - end: if bucket == self.buckets.len() - 1 { - u64::MAX - } else { - self.resolution << bucket - }, - }, - HistogramScale::Linear => Range { - start: self.resolution * bucket as u64, - end: if bucket == self.buckets.len() - 1 { - u64::MAX - } else { - self.resolution * (bucket as u64 + 1) - }, - }, - } - } -} - -impl HistogramBatch { - pub(crate) fn from_histogram(histogram: &Histogram) -> HistogramBatch { - let buckets = vec![0; histogram.buckets.len()].into_boxed_slice(); - - HistogramBatch { - buckets, - scale: histogram.scale, - resolution: histogram.resolution, - } - } - - pub(crate) fn measure(&mut self, value: u64, count: u64) { - self.buckets[self.value_to_bucket(value)] += count; - } - - pub(crate) fn submit(&self, histogram: &Histogram) { - debug_assert_eq!(self.scale, histogram.scale); - debug_assert_eq!(self.resolution, histogram.resolution); - debug_assert_eq!(self.buckets.len(), histogram.buckets.len()); - - for i in 0..self.buckets.len() { - histogram.buckets[i].store(self.buckets[i], Relaxed); - } - } - - fn value_to_bucket(&self, value: u64) -> usize { - match self.scale { - HistogramScale::Linear => { - let max = self.buckets.len() - 1; - cmp::min(value / self.resolution, max as u64) as usize - } - HistogramScale::Log => { - let max = self.buckets.len() - 1; - - if value < self.resolution { - 0 - } else { - let significant_digits = 64 - value.leading_zeros(); - let bucket_digits = 64 - (self.resolution - 1).leading_zeros(); - cmp::min(significant_digits as usize - bucket_digits as usize, max) - } - } - } - } -} - -impl HistogramBuilder { - pub(crate) fn new() -> HistogramBuilder { - HistogramBuilder { - scale: HistogramScale::Linear, - // Resolution is in nanoseconds. - resolution: 100_000, - num_buckets: 10, - } - } - - pub(crate) fn build(&self) -> Histogram { - let mut resolution = self.resolution; - - assert!(resolution > 0); - - if matches!(self.scale, HistogramScale::Log) { - resolution = resolution.next_power_of_two(); - } - - Histogram { - buckets: (0..self.num_buckets) - .map(|_| AtomicU64::new(0)) - .collect::>() - .into_boxed_slice(), - resolution, - scale: self.scale, - } - } -} - -impl Default for HistogramBuilder { - fn default() -> HistogramBuilder { - HistogramBuilder::new() - } -} - -#[cfg(test)] -mod test { - use super::*; - - macro_rules! assert_bucket_eq { - ($h:expr, $bucket:expr, $val:expr) => {{ - assert_eq!($h.buckets[$bucket], $val); - }}; - } - - #[test] - fn log_scale_resolution_1() { - let h = HistogramBuilder { - scale: HistogramScale::Log, - resolution: 1, - num_buckets: 10, - } - .build(); - - assert_eq!(h.bucket_range(0), 0..1); - assert_eq!(h.bucket_range(1), 1..2); - assert_eq!(h.bucket_range(2), 2..4); - assert_eq!(h.bucket_range(3), 4..8); - assert_eq!(h.bucket_range(9), 256..u64::MAX); - - let mut b = HistogramBatch::from_histogram(&h); - - b.measure(0, 1); - assert_bucket_eq!(b, 0, 1); - assert_bucket_eq!(b, 1, 0); - - b.measure(1, 1); - assert_bucket_eq!(b, 0, 1); - assert_bucket_eq!(b, 1, 1); - assert_bucket_eq!(b, 2, 0); - - b.measure(2, 1); - assert_bucket_eq!(b, 0, 1); - assert_bucket_eq!(b, 1, 1); - assert_bucket_eq!(b, 2, 1); - - b.measure(3, 1); - assert_bucket_eq!(b, 0, 1); - assert_bucket_eq!(b, 1, 1); - assert_bucket_eq!(b, 2, 2); - - b.measure(4, 1); - assert_bucket_eq!(b, 0, 1); - assert_bucket_eq!(b, 1, 1); - assert_bucket_eq!(b, 2, 2); - assert_bucket_eq!(b, 3, 1); - - b.measure(100, 1); - assert_bucket_eq!(b, 7, 1); - - b.measure(128, 1); - assert_bucket_eq!(b, 8, 1); - - b.measure(4096, 1); - assert_bucket_eq!(b, 9, 1); - } - - #[test] - fn log_scale_resolution_2() { - let h = HistogramBuilder { - scale: HistogramScale::Log, - resolution: 2, - num_buckets: 10, - } - .build(); - - assert_eq!(h.bucket_range(0), 0..2); - assert_eq!(h.bucket_range(1), 2..4); - assert_eq!(h.bucket_range(2), 4..8); - assert_eq!(h.bucket_range(3), 8..16); - assert_eq!(h.bucket_range(9), 512..u64::MAX); - - let mut b = HistogramBatch::from_histogram(&h); - - b.measure(0, 1); - assert_bucket_eq!(b, 0, 1); - assert_bucket_eq!(b, 1, 0); - - b.measure(1, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 0); - - b.measure(2, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 1); - assert_bucket_eq!(b, 2, 0); - - b.measure(3, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 0); - - b.measure(4, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 1); - - b.measure(5, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 2); - - b.measure(6, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 3); - - b.measure(7, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 4); - - b.measure(8, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 4); - assert_bucket_eq!(b, 3, 1); - - b.measure(100, 1); - assert_bucket_eq!(b, 6, 1); - - b.measure(128, 1); - assert_bucket_eq!(b, 7, 1); - - b.measure(4096, 1); - assert_bucket_eq!(b, 9, 1); - - for bucket in h.buckets.iter() { - assert_eq!(bucket.load(Relaxed), 0); - } - - b.submit(&h); - - for i in 0..h.buckets.len() { - assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]); - } - - b.submit(&h); - - for i in 0..h.buckets.len() { - assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]); - } - } - - #[test] - fn linear_scale_resolution_1() { - let h = HistogramBuilder { - scale: HistogramScale::Linear, - resolution: 1, - num_buckets: 10, - } - .build(); - - assert_eq!(h.bucket_range(0), 0..1); - assert_eq!(h.bucket_range(1), 1..2); - assert_eq!(h.bucket_range(2), 2..3); - assert_eq!(h.bucket_range(3), 3..4); - assert_eq!(h.bucket_range(9), 9..u64::MAX); - - let mut b = HistogramBatch::from_histogram(&h); - - b.measure(0, 1); - assert_bucket_eq!(b, 0, 1); - assert_bucket_eq!(b, 1, 0); - - b.measure(1, 1); - assert_bucket_eq!(b, 0, 1); - assert_bucket_eq!(b, 1, 1); - assert_bucket_eq!(b, 2, 0); - - b.measure(2, 1); - assert_bucket_eq!(b, 0, 1); - assert_bucket_eq!(b, 1, 1); - assert_bucket_eq!(b, 2, 1); - assert_bucket_eq!(b, 3, 0); - - b.measure(3, 1); - assert_bucket_eq!(b, 0, 1); - assert_bucket_eq!(b, 1, 1); - assert_bucket_eq!(b, 2, 1); - assert_bucket_eq!(b, 3, 1); - - b.measure(5, 1); - assert_bucket_eq!(b, 5, 1); - - b.measure(4096, 1); - assert_bucket_eq!(b, 9, 1); - - for bucket in h.buckets.iter() { - assert_eq!(bucket.load(Relaxed), 0); - } - - b.submit(&h); - - for i in 0..h.buckets.len() { - assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]); - } - - b.submit(&h); - - for i in 0..h.buckets.len() { - assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]); - } - } - - #[test] - fn linear_scale_resolution_100() { - let h = HistogramBuilder { - scale: HistogramScale::Linear, - resolution: 100, - num_buckets: 10, - } - .build(); - - assert_eq!(h.bucket_range(0), 0..100); - assert_eq!(h.bucket_range(1), 100..200); - assert_eq!(h.bucket_range(2), 200..300); - assert_eq!(h.bucket_range(3), 300..400); - assert_eq!(h.bucket_range(9), 900..u64::MAX); - - let mut b = HistogramBatch::from_histogram(&h); - - b.measure(0, 1); - assert_bucket_eq!(b, 0, 1); - assert_bucket_eq!(b, 1, 0); - - b.measure(50, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 0); - - b.measure(100, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 1); - assert_bucket_eq!(b, 2, 0); - - b.measure(101, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 0); - - b.measure(200, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 1); - - b.measure(299, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 2); - - b.measure(222, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 3); - - b.measure(300, 1); - assert_bucket_eq!(b, 0, 2); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 3); - assert_bucket_eq!(b, 3, 1); - - b.measure(888, 1); - assert_bucket_eq!(b, 8, 1); - - b.measure(4096, 1); - assert_bucket_eq!(b, 9, 1); - - for bucket in h.buckets.iter() { - assert_eq!(bucket.load(Relaxed), 0); - } - - b.submit(&h); - - for i in 0..h.buckets.len() { - assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]); - } - - b.submit(&h); - - for i in 0..h.buckets.len() { - assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]); - } - } - - #[test] - fn inc_by_more_than_one() { - let h = HistogramBuilder { - scale: HistogramScale::Linear, - resolution: 100, - num_buckets: 10, - } - .build(); - - let mut b = HistogramBatch::from_histogram(&h); - - b.measure(0, 3); - assert_bucket_eq!(b, 0, 3); - assert_bucket_eq!(b, 1, 0); - - b.measure(50, 5); - assert_bucket_eq!(b, 0, 8); - assert_bucket_eq!(b, 1, 0); - - b.measure(100, 2); - assert_bucket_eq!(b, 0, 8); - assert_bucket_eq!(b, 1, 2); - assert_bucket_eq!(b, 2, 0); - - b.measure(101, 19); - assert_bucket_eq!(b, 0, 8); - assert_bucket_eq!(b, 1, 21); - assert_bucket_eq!(b, 2, 0); - - for bucket in h.buckets.iter() { - assert_eq!(bucket.load(Relaxed), 0); - } - - b.submit(&h); - - for i in 0..h.buckets.len() { - assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]); - } - - b.submit(&h); - - for i in 0..h.buckets.len() { - assert_eq!(h.buckets[i].load(Relaxed), b.buckets[i]); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/io.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/io.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/io.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/io.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ -#![cfg_attr(not(feature = "net"), allow(dead_code))] - -use crate::loom::sync::atomic::{AtomicU64, Ordering::Relaxed}; - -#[derive(Default)] -pub(crate) struct IoDriverMetrics { - pub(super) fd_registered_count: AtomicU64, - pub(super) fd_deregistered_count: AtomicU64, - pub(super) ready_count: AtomicU64, -} - -impl IoDriverMetrics { - pub(crate) fn incr_fd_count(&self) { - self.fd_registered_count.fetch_add(1, Relaxed); - } - - pub(crate) fn dec_fd_count(&self) { - self.fd_deregistered_count.fetch_add(1, Relaxed); - } - - pub(crate) fn incr_ready_count_by(&self, amt: u64) { - self.ready_count.fetch_add(amt, Relaxed); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/mock.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/mock.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/mock.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/mock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -//! This file contains mocks of the types in src/runtime/metrics - -pub(crate) struct SchedulerMetrics {} - -pub(crate) struct WorkerMetrics {} - -pub(crate) struct MetricsBatch {} - -#[derive(Clone, Default)] -pub(crate) struct HistogramBuilder {} - -impl SchedulerMetrics { - pub(crate) fn new() -> Self { - Self {} - } - - /// Increment the number of tasks scheduled externally - pub(crate) fn inc_remote_schedule_count(&self) {} -} - -impl WorkerMetrics { - pub(crate) fn new() -> Self { - Self {} - } - - pub(crate) fn from_config(config: &crate::runtime::Config) -> Self { - // Prevent the dead-code warning from being triggered - let _ = &config.metrics_poll_count_histogram; - Self::new() - } - - pub(crate) fn set_queue_depth(&self, _len: usize) {} -} - -impl MetricsBatch { - pub(crate) fn new(_: &WorkerMetrics) -> Self { - Self {} - } - - pub(crate) fn submit(&mut self, _to: &WorkerMetrics, _mean_poll_time: u64) {} - pub(crate) fn about_to_park(&mut self) {} - pub(crate) fn inc_local_schedule_count(&mut self) {} - pub(crate) fn start_processing_scheduled_tasks(&mut self) {} - pub(crate) fn end_processing_scheduled_tasks(&mut self) {} - pub(crate) fn start_poll(&mut self) {} - pub(crate) fn end_poll(&mut self) {} -} - -cfg_rt_multi_thread! { - impl MetricsBatch { - pub(crate) fn incr_steal_count(&mut self, _by: u16) {} - pub(crate) fn incr_steal_operations(&mut self) {} - pub(crate) fn incr_overflow_count(&mut self) {} - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,40 +0,0 @@ -//! This module contains information need to view information about how the -//! runtime is performing. -//! -//! **Note**: This is an [unstable API][unstable]. The public API of types in -//! this module may break in 1.x releases. See [the documentation on unstable -//! features][unstable] for details. -//! -//! [unstable]: crate#unstable-features -#![allow(clippy::module_inception)] - -cfg_metrics! { - mod batch; - pub(crate) use batch::MetricsBatch; - - mod histogram; - pub(crate) use histogram::{Histogram, HistogramBatch, HistogramBuilder}; - #[allow(unreachable_pub)] // rust-lang/rust#57411 - pub use histogram::HistogramScale; - - mod runtime; - #[allow(unreachable_pub)] // rust-lang/rust#57411 - pub use runtime::RuntimeMetrics; - - mod scheduler; - pub(crate) use scheduler::SchedulerMetrics; - - mod worker; - pub(crate) use worker::WorkerMetrics; - - cfg_net! { - mod io; - pub(crate) use io::IoDriverMetrics; - } -} - -cfg_not_metrics! { - mod mock; - - pub(crate) use mock::{SchedulerMetrics, WorkerMetrics, MetricsBatch, HistogramBuilder}; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/runtime.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/runtime.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/runtime.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/runtime.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,924 +0,0 @@ -use crate::runtime::Handle; - -use std::ops::Range; -use std::sync::atomic::Ordering::Relaxed; -use std::time::Duration; - -/// Handle to the runtime's metrics. -/// -/// This handle is internally reference-counted and can be freely cloned. A -/// `RuntimeMetrics` handle is obtained using the [`Runtime::metrics`] method. -/// -/// [`Runtime::metrics`]: crate::runtime::Runtime::metrics() -#[derive(Clone, Debug)] -pub struct RuntimeMetrics { - handle: Handle, -} - -impl RuntimeMetrics { - pub(crate) fn new(handle: Handle) -> RuntimeMetrics { - RuntimeMetrics { handle } - } - - /// Returns the number of worker threads used by the runtime. - /// - /// The number of workers is set by configuring `worker_threads` on - /// `runtime::Builder`. When using the `current_thread` runtime, the return - /// value is always `1`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.num_workers(); - /// println!("Runtime is using {} workers", n); - /// } - /// ``` - pub fn num_workers(&self) -> usize { - self.handle.inner.num_workers() - } - - /// Returns the number of additional threads spawned by the runtime. - /// - /// The number of workers is set by configuring `max_blocking_threads` on - /// `runtime::Builder`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let _ = tokio::task::spawn_blocking(move || { - /// // Stand-in for compute-heavy work or using synchronous APIs - /// 1 + 1 - /// }).await; - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.num_blocking_threads(); - /// println!("Runtime has created {} threads", n); - /// } - /// ``` - pub fn num_blocking_threads(&self) -> usize { - self.handle.inner.num_blocking_threads() - } - - /// Returns the number of active tasks in the runtime. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.active_tasks_count(); - /// println!("Runtime has {} active tasks", n); - /// } - /// ``` - pub fn active_tasks_count(&self) -> usize { - self.handle.inner.active_tasks_count() - } - - /// Returns the number of idle threads, which have spawned by the runtime - /// for `spawn_blocking` calls. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let _ = tokio::task::spawn_blocking(move || { - /// // Stand-in for compute-heavy work or using synchronous APIs - /// 1 + 1 - /// }).await; - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.num_idle_blocking_threads(); - /// println!("Runtime has {} idle blocking thread pool threads", n); - /// } - /// ``` - pub fn num_idle_blocking_threads(&self) -> usize { - self.handle.inner.num_idle_blocking_threads() - } - - /// Returns the number of tasks scheduled from **outside** of the runtime. - /// - /// The remote schedule count starts at zero when the runtime is created and - /// increases by one each time a task is woken from **outside** of the - /// runtime. This usually means that a task is spawned or notified from a - /// non-runtime thread and must be queued using the Runtime's injection - /// queue, which tends to be slower. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.remote_schedule_count(); - /// println!("{} tasks were scheduled from outside the runtime", n); - /// } - /// ``` - pub fn remote_schedule_count(&self) -> u64 { - self.handle - .inner - .scheduler_metrics() - .remote_schedule_count - .load(Relaxed) - } - - /// Returns the number of times that tasks have been forced to yield back to the scheduler - /// after exhausting their task budgets. - /// - /// This count starts at zero when the runtime is created and increases by one each time a task yields due to exhausting its budget. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - pub fn budget_forced_yield_count(&self) -> u64 { - self.handle - .inner - .scheduler_metrics() - .budget_forced_yield_count - .load(Relaxed) - } - - /// Returns the total number of times the given worker thread has parked. - /// - /// The worker park count starts at zero when the runtime is created and - /// increases by one each time the worker parks the thread waiting for new - /// inbound events to process. This usually means the worker has processed - /// all pending work and is currently idle. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_park_count(0); - /// println!("worker 0 parked {} times", n); - /// } - /// ``` - pub fn worker_park_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .park_count - .load(Relaxed) - } - - /// Returns the number of times the given worker thread unparked but - /// performed no work before parking again. - /// - /// The worker no-op count starts at zero when the runtime is created and - /// increases by one each time the worker unparks the thread but finds no - /// new work and goes back to sleep. This indicates a false-positive wake up. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_noop_count(0); - /// println!("worker 0 had {} no-op unparks", n); - /// } - /// ``` - pub fn worker_noop_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .noop_count - .load(Relaxed) - } - - /// Returns the number of tasks the given worker thread stole from - /// another worker thread. - /// - /// This metric only applies to the **multi-threaded** runtime and will - /// always return `0` when using the current thread runtime. - /// - /// The worker steal count starts at zero when the runtime is created and - /// increases by `N` each time the worker has processed its scheduled queue - /// and successfully steals `N` more pending tasks from another worker. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_steal_count(0); - /// println!("worker 0 has stolen {} tasks", n); - /// } - /// ``` - pub fn worker_steal_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .steal_count - .load(Relaxed) - } - - /// Returns the number of times the given worker thread stole tasks from - /// another worker thread. - /// - /// This metric only applies to the **multi-threaded** runtime and will - /// always return `0` when using the current thread runtime. - /// - /// The worker steal count starts at zero when the runtime is created and - /// increases by one each time the worker has processed its scheduled queue - /// and successfully steals more pending tasks from another worker. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_steal_operations(0); - /// println!("worker 0 has stolen tasks {} times", n); - /// } - /// ``` - pub fn worker_steal_operations(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .steal_operations - .load(Relaxed) - } - - /// Returns the number of tasks the given worker thread has polled. - /// - /// The worker poll count starts at zero when the runtime is created and - /// increases by one each time the worker polls a scheduled task. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_poll_count(0); - /// println!("worker 0 has polled {} tasks", n); - /// } - /// ``` - pub fn worker_poll_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .poll_count - .load(Relaxed) - } - - /// Returns the amount of time the given worker thread has been busy. - /// - /// The worker busy duration starts at zero when the runtime is created and - /// increases whenever the worker is spending time processing work. Using - /// this value can indicate the load of the given worker. If a lot of time - /// is spent busy, then the worker is under load and will check for inbound - /// events less often. - /// - /// The timer is monotonically increasing. It is never decremented or reset - /// to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_total_busy_duration(0); - /// println!("worker 0 was busy for a total of {:?}", n); - /// } - /// ``` - pub fn worker_total_busy_duration(&self, worker: usize) -> Duration { - let nanos = self - .handle - .inner - .worker_metrics(worker) - .busy_duration_total - .load(Relaxed); - Duration::from_nanos(nanos) - } - - /// Returns the number of tasks scheduled from **within** the runtime on the - /// given worker's local queue. - /// - /// The local schedule count starts at zero when the runtime is created and - /// increases by one each time a task is woken from **inside** of the - /// runtime on the given worker. This usually means that a task is spawned - /// or notified from within a runtime thread and will be queued on the - /// worker-local queue. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_local_schedule_count(0); - /// println!("{} tasks were scheduled on the worker's local queue", n); - /// } - /// ``` - pub fn worker_local_schedule_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .local_schedule_count - .load(Relaxed) - } - - /// Returns the number of times the given worker thread saturated its local - /// queue. - /// - /// This metric only applies to the **multi-threaded** scheduler. - /// - /// The worker overflow count starts at zero when the runtime is created and - /// increases by one each time the worker attempts to schedule a task - /// locally, but its local queue is full. When this happens, half of the - /// local queue is moved to the injection queue. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_overflow_count(0); - /// println!("worker 0 has overflowed its queue {} times", n); - /// } - /// ``` - pub fn worker_overflow_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .overflow_count - .load(Relaxed) - } - - /// Returns the number of tasks currently scheduled in the runtime's - /// injection queue. - /// - /// Tasks that are spawned or notified from a non-runtime thread are - /// scheduled using the runtime's injection queue. This metric returns the - /// **current** number of tasks pending in the injection queue. As such, the - /// returned value may increase or decrease as new tasks are scheduled and - /// processed. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.injection_queue_depth(); - /// println!("{} tasks currently pending in the runtime's injection queue", n); - /// } - /// ``` - pub fn injection_queue_depth(&self) -> usize { - self.handle.inner.injection_queue_depth() - } - - /// Returns the number of tasks currently scheduled in the given worker's - /// local queue. - /// - /// Tasks that are spawned or notified from within a runtime thread are - /// scheduled using that worker's local queue. This metric returns the - /// **current** number of tasks pending in the worker's local queue. As - /// such, the returned value may increase or decrease as new tasks are - /// scheduled and processed. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_local_queue_depth(0); - /// println!("{} tasks currently pending in worker 0's local queue", n); - /// } - /// ``` - pub fn worker_local_queue_depth(&self, worker: usize) -> usize { - self.handle.inner.worker_local_queue_depth(worker) - } - - /// Returns `true` if the runtime is tracking the distribution of task poll - /// times. - /// - /// Task poll times are not instrumented by default as doing so requires - /// calling [`Instant::now()`] twice per task poll. The feature is enabled - /// by calling [`enable_metrics_poll_count_histogram()`] when building the - /// runtime. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::{self, Handle}; - /// - /// fn main() { - /// runtime::Builder::new_current_thread() - /// .enable_metrics_poll_count_histogram() - /// .build() - /// .unwrap() - /// .block_on(async { - /// let metrics = Handle::current().metrics(); - /// let enabled = metrics.poll_count_histogram_enabled(); - /// - /// println!("Tracking task poll time distribution: {:?}", enabled); - /// }); - /// } - /// ``` - /// - /// [`enable_metrics_poll_count_histogram()`]: crate::runtime::Builder::enable_metrics_poll_count_histogram - /// [`Instant::now()`]: std::time::Instant::now - pub fn poll_count_histogram_enabled(&self) -> bool { - self.handle - .inner - .worker_metrics(0) - .poll_count_histogram - .is_some() - } - - /// Returns the number of histogram buckets tracking the distribution of - /// task poll times. - /// - /// This value is configured by calling - /// [`metrics_poll_count_histogram_buckets()`] when building the runtime. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::{self, Handle}; - /// - /// fn main() { - /// runtime::Builder::new_current_thread() - /// .enable_metrics_poll_count_histogram() - /// .build() - /// .unwrap() - /// .block_on(async { - /// let metrics = Handle::current().metrics(); - /// let buckets = metrics.poll_count_histogram_num_buckets(); - /// - /// println!("Histogram buckets: {:?}", buckets); - /// }); - /// } - /// ``` - /// - /// [`metrics_poll_count_histogram_buckets()`]: - /// crate::runtime::Builder::metrics_poll_count_histogram_buckets - pub fn poll_count_histogram_num_buckets(&self) -> usize { - self.handle - .inner - .worker_metrics(0) - .poll_count_histogram - .as_ref() - .map(|histogram| histogram.num_buckets()) - .unwrap_or_default() - } - - /// Returns the range of task poll times tracked by the given bucket. - /// - /// This value is configured by calling - /// [`metrics_poll_count_histogram_resolution()`] when building the runtime. - /// - /// # Panics - /// - /// The method panics if `bucket` represents an invalid bucket index, i.e. - /// is greater than or equal to `poll_count_histogram_num_buckets()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::{self, Handle}; - /// - /// fn main() { - /// runtime::Builder::new_current_thread() - /// .enable_metrics_poll_count_histogram() - /// .build() - /// .unwrap() - /// .block_on(async { - /// let metrics = Handle::current().metrics(); - /// let buckets = metrics.poll_count_histogram_num_buckets(); - /// - /// for i in 0..buckets { - /// let range = metrics.poll_count_histogram_bucket_range(i); - /// println!("Histogram bucket {} range: {:?}", i, range); - /// } - /// }); - /// } - /// ``` - /// - /// [`metrics_poll_count_histogram_resolution()`]: - /// crate::runtime::Builder::metrics_poll_count_histogram_resolution - #[track_caller] - pub fn poll_count_histogram_bucket_range(&self, bucket: usize) -> Range { - self.handle - .inner - .worker_metrics(0) - .poll_count_histogram - .as_ref() - .map(|histogram| { - let range = histogram.bucket_range(bucket); - std::ops::Range { - start: Duration::from_nanos(range.start), - end: Duration::from_nanos(range.end), - } - }) - .unwrap_or_default() - } - - /// Returns the number of times the given worker polled tasks with a poll - /// duration within the given bucket's range. - /// - /// Each worker maintains its own histogram and the counts for each bucket - /// starts at zero when the runtime is created. Each time the worker polls a - /// task, it tracks the duration the task poll time took and increments the - /// associated bucket by 1. - /// - /// Each bucket is a monotonically increasing counter. It is never - /// decremented or reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// `bucket` is the index of the bucket being queried. The bucket is scoped - /// to the worker. The range represented by the bucket can be queried by - /// calling [`poll_count_histogram_bucket_range()`]. Each worker maintains - /// identical bucket ranges. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()` or if `bucket` represents an - /// invalid bucket. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::{self, Handle}; - /// - /// fn main() { - /// runtime::Builder::new_current_thread() - /// .enable_metrics_poll_count_histogram() - /// .build() - /// .unwrap() - /// .block_on(async { - /// let metrics = Handle::current().metrics(); - /// let buckets = metrics.poll_count_histogram_num_buckets(); - /// - /// for worker in 0..metrics.num_workers() { - /// for i in 0..buckets { - /// let count = metrics.poll_count_histogram_bucket_count(worker, i); - /// println!("Poll count {}", count); - /// } - /// } - /// }); - /// } - /// ``` - /// - /// [`poll_count_histogram_bucket_range()`]: crate::runtime::RuntimeMetrics::poll_count_histogram_bucket_range - #[track_caller] - pub fn poll_count_histogram_bucket_count(&self, worker: usize, bucket: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .poll_count_histogram - .as_ref() - .map(|histogram| histogram.get(bucket)) - .unwrap_or_default() - } - - /// Returns the mean duration of task polls, in nanoseconds. - /// - /// This is an exponentially weighted moving average. Currently, this metric - /// is only provided by the multi-threaded runtime. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_mean_poll_time(0); - /// println!("worker 0 has a mean poll time of {:?}", n); - /// } - /// ``` - #[track_caller] - pub fn worker_mean_poll_time(&self, worker: usize) -> Duration { - let nanos = self - .handle - .inner - .worker_metrics(worker) - .mean_poll_time - .load(Relaxed); - Duration::from_nanos(nanos) - } - - /// Returns the number of tasks currently scheduled in the blocking - /// thread pool, spawned using `spawn_blocking`. - /// - /// This metric returns the **current** number of tasks pending in - /// blocking thread pool. As such, the returned value may increase - /// or decrease as new tasks are scheduled and processed. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.blocking_queue_depth(); - /// println!("{} tasks currently pending in the blocking thread pool", n); - /// } - /// ``` - pub fn blocking_queue_depth(&self) -> usize { - self.handle.inner.blocking_queue_depth() - } -} - -cfg_net! { - impl RuntimeMetrics { - /// Returns the number of file descriptors that have been registered with the - /// runtime's I/O driver. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let registered_fds = metrics.io_driver_fd_registered_count(); - /// println!("{} fds have been registered with the runtime's I/O driver.", registered_fds); - /// - /// let deregistered_fds = metrics.io_driver_fd_deregistered_count(); - /// - /// let current_fd_count = registered_fds - deregistered_fds; - /// println!("{} fds are currently registered by the runtime's I/O driver.", current_fd_count); - /// } - /// ``` - pub fn io_driver_fd_registered_count(&self) -> u64 { - self.with_io_driver_metrics(|m| { - m.fd_registered_count.load(Relaxed) - }) - } - - /// Returns the number of file descriptors that have been deregistered by the - /// runtime's I/O driver. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.io_driver_fd_deregistered_count(); - /// println!("{} fds have been deregistered by the runtime's I/O driver.", n); - /// } - /// ``` - pub fn io_driver_fd_deregistered_count(&self) -> u64 { - self.with_io_driver_metrics(|m| { - m.fd_deregistered_count.load(Relaxed) - }) - } - - /// Returns the number of ready events processed by the runtime's - /// I/O driver. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.io_driver_ready_count(); - /// println!("{} ready events processed by the runtime's I/O driver.", n); - /// } - /// ``` - pub fn io_driver_ready_count(&self) -> u64 { - self.with_io_driver_metrics(|m| m.ready_count.load(Relaxed)) - } - - fn with_io_driver_metrics(&self, f: F) -> u64 - where - F: Fn(&super::IoDriverMetrics) -> u64, - { - // TODO: Investigate if this should return 0, most of our metrics always increase - // thus this breaks that guarantee. - self.handle - .inner - .driver() - .io - .as_ref() - .map(|h| f(&h.metrics)) - .unwrap_or(0) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/scheduler.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/scheduler.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/scheduler.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/scheduler.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,34 +0,0 @@ -use crate::loom::sync::atomic::{AtomicU64, Ordering::Relaxed}; - -/// Retrieves metrics from the Tokio runtime. -/// -/// **Note**: This is an [unstable API][unstable]. The public API of this type -/// may break in 1.x releases. See [the documentation on unstable -/// features][unstable] for details. -/// -/// [unstable]: crate#unstable-features -#[derive(Debug)] -pub(crate) struct SchedulerMetrics { - /// Number of tasks that are scheduled from outside the runtime. - pub(super) remote_schedule_count: AtomicU64, - pub(super) budget_forced_yield_count: AtomicU64, -} - -impl SchedulerMetrics { - pub(crate) fn new() -> SchedulerMetrics { - SchedulerMetrics { - remote_schedule_count: AtomicU64::new(0), - budget_forced_yield_count: AtomicU64::new(0), - } - } - - /// Increment the number of tasks scheduled externally - pub(crate) fn inc_remote_schedule_count(&self) { - self.remote_schedule_count.fetch_add(1, Relaxed); - } - - /// Increment the number of tasks forced to yield due to budget exhaustion - pub(crate) fn inc_budget_forced_yield_count(&self) { - self.budget_forced_yield_count.fetch_add(1, Relaxed); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/worker.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/worker.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/metrics/worker.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/metrics/worker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,84 +0,0 @@ -use crate::loom::sync::atomic::Ordering::Relaxed; -use crate::loom::sync::atomic::{AtomicU64, AtomicUsize}; -use crate::runtime::metrics::Histogram; -use crate::runtime::Config; - -/// Retrieve runtime worker metrics. -/// -/// **Note**: This is an [unstable API][unstable]. The public API of this type -/// may break in 1.x releases. See [the documentation on unstable -/// features][unstable] for details. -/// -/// [unstable]: crate#unstable-features -#[derive(Debug)] -#[repr(align(128))] -pub(crate) struct WorkerMetrics { - /// Number of times the worker parked. - pub(crate) park_count: AtomicU64, - - /// Number of times the worker woke then parked again without doing work. - pub(crate) noop_count: AtomicU64, - - /// Number of tasks the worker stole. - pub(crate) steal_count: AtomicU64, - - /// Number of times the worker stole - pub(crate) steal_operations: AtomicU64, - - /// Number of tasks the worker polled. - pub(crate) poll_count: AtomicU64, - - /// EWMA task poll time, in nanoseconds. - pub(crate) mean_poll_time: AtomicU64, - - /// Amount of time the worker spent doing work vs. parking. - pub(crate) busy_duration_total: AtomicU64, - - /// Number of tasks scheduled for execution on the worker's local queue. - pub(crate) local_schedule_count: AtomicU64, - - /// Number of tasks moved from the local queue to the global queue to free space. - pub(crate) overflow_count: AtomicU64, - - /// Number of tasks currently in the local queue. Used only by the - /// current-thread scheduler. - pub(crate) queue_depth: AtomicUsize, - - /// If `Some`, tracks the the number of polls by duration range. - pub(super) poll_count_histogram: Option, -} - -impl WorkerMetrics { - pub(crate) fn from_config(config: &Config) -> WorkerMetrics { - let mut worker_metrics = WorkerMetrics::new(); - worker_metrics.poll_count_histogram = config - .metrics_poll_count_histogram - .as_ref() - .map(|histogram_builder| histogram_builder.build()); - worker_metrics - } - - pub(crate) fn new() -> WorkerMetrics { - WorkerMetrics { - park_count: AtomicU64::new(0), - noop_count: AtomicU64::new(0), - steal_count: AtomicU64::new(0), - steal_operations: AtomicU64::new(0), - poll_count: AtomicU64::new(0), - mean_poll_time: AtomicU64::new(0), - overflow_count: AtomicU64::new(0), - busy_duration_total: AtomicU64::new(0), - local_schedule_count: AtomicU64::new(0), - queue_depth: AtomicUsize::new(0), - poll_count_histogram: None, - } - } - - pub(crate) fn queue_depth(&self) -> usize { - self.queue_depth.load(Relaxed) - } - - pub(crate) fn set_queue_depth(&self, len: usize) { - self.queue_depth.store(len, Relaxed); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,269 +0,0 @@ -//! The Tokio runtime. -//! -//! Unlike other Rust programs, asynchronous applications require runtime -//! support. In particular, the following runtime services are necessary: -//! -//! * An **I/O event loop**, called the driver, which drives I/O resources and -//! dispatches I/O events to tasks that depend on them. -//! * A **scheduler** to execute [tasks] that use these I/O resources. -//! * A **timer** for scheduling work to run after a set period of time. -//! -//! Tokio's [`Runtime`] bundles all of these services as a single type, allowing -//! them to be started, shut down, and configured together. However, often it is -//! not required to configure a [`Runtime`] manually, and a user may just use the -//! [`tokio::main`] attribute macro, which creates a [`Runtime`] under the hood. -//! -//! # Usage -//! -//! When no fine tuning is required, the [`tokio::main`] attribute macro can be -//! used. -//! -//! ```no_run -//! use tokio::net::TcpListener; -//! use tokio::io::{AsyncReadExt, AsyncWriteExt}; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let listener = TcpListener::bind("127.0.0.1:8080").await?; -//! -//! loop { -//! let (mut socket, _) = listener.accept().await?; -//! -//! tokio::spawn(async move { -//! let mut buf = [0; 1024]; -//! -//! // In a loop, read data from the socket and write the data back. -//! loop { -//! let n = match socket.read(&mut buf).await { -//! // socket closed -//! Ok(n) if n == 0 => return, -//! Ok(n) => n, -//! Err(e) => { -//! println!("failed to read from socket; err = {:?}", e); -//! return; -//! } -//! }; -//! -//! // Write the data back -//! if let Err(e) = socket.write_all(&buf[0..n]).await { -//! println!("failed to write to socket; err = {:?}", e); -//! return; -//! } -//! } -//! }); -//! } -//! } -//! ``` -//! -//! From within the context of the runtime, additional tasks are spawned using -//! the [`tokio::spawn`] function. Futures spawned using this function will be -//! executed on the same thread pool used by the [`Runtime`]. -//! -//! A [`Runtime`] instance can also be used directly. -//! -//! ```no_run -//! use tokio::net::TcpListener; -//! use tokio::io::{AsyncReadExt, AsyncWriteExt}; -//! use tokio::runtime::Runtime; -//! -//! fn main() -> Result<(), Box> { -//! // Create the runtime -//! let rt = Runtime::new()?; -//! -//! // Spawn the root task -//! rt.block_on(async { -//! let listener = TcpListener::bind("127.0.0.1:8080").await?; -//! -//! loop { -//! let (mut socket, _) = listener.accept().await?; -//! -//! tokio::spawn(async move { -//! let mut buf = [0; 1024]; -//! -//! // In a loop, read data from the socket and write the data back. -//! loop { -//! let n = match socket.read(&mut buf).await { -//! // socket closed -//! Ok(n) if n == 0 => return, -//! Ok(n) => n, -//! Err(e) => { -//! println!("failed to read from socket; err = {:?}", e); -//! return; -//! } -//! }; -//! -//! // Write the data back -//! if let Err(e) = socket.write_all(&buf[0..n]).await { -//! println!("failed to write to socket; err = {:?}", e); -//! return; -//! } -//! } -//! }); -//! } -//! }) -//! } -//! ``` -//! -//! ## Runtime Configurations -//! -//! Tokio provides multiple task scheduling strategies, suitable for different -//! applications. The [runtime builder] or `#[tokio::main]` attribute may be -//! used to select which scheduler to use. -//! -//! #### Multi-Thread Scheduler -//! -//! The multi-thread scheduler executes futures on a _thread pool_, using a -//! work-stealing strategy. By default, it will start a worker thread for each -//! CPU core available on the system. This tends to be the ideal configuration -//! for most applications. The multi-thread scheduler requires the `rt-multi-thread` -//! feature flag, and is selected by default: -//! ``` -//! use tokio::runtime; -//! -//! # fn main() -> Result<(), Box> { -//! let threaded_rt = runtime::Runtime::new()?; -//! # Ok(()) } -//! ``` -//! -//! Most applications should use the multi-thread scheduler, except in some -//! niche use-cases, such as when running only a single thread is required. -//! -//! #### Current-Thread Scheduler -//! -//! The current-thread scheduler provides a _single-threaded_ future executor. -//! All tasks will be created and executed on the current thread. This requires -//! the `rt` feature flag. -//! ``` -//! use tokio::runtime; -//! -//! # fn main() -> Result<(), Box> { -//! let rt = runtime::Builder::new_current_thread() -//! .build()?; -//! # Ok(()) } -//! ``` -//! -//! #### Resource drivers -//! -//! When configuring a runtime by hand, no resource drivers are enabled by -//! default. In this case, attempting to use networking types or time types will -//! fail. In order to enable these types, the resource drivers must be enabled. -//! This is done with [`Builder::enable_io`] and [`Builder::enable_time`]. As a -//! shorthand, [`Builder::enable_all`] enables both resource drivers. -//! -//! ## Lifetime of spawned threads -//! -//! The runtime may spawn threads depending on its configuration and usage. The -//! multi-thread scheduler spawns threads to schedule tasks and for `spawn_blocking` -//! calls. -//! -//! While the `Runtime` is active, threads may shut down after periods of being -//! idle. Once `Runtime` is dropped, all runtime threads have usually been -//! terminated, but in the presence of unstoppable spawned work are not -//! guaranteed to have been terminated. See the -//! [struct level documentation](Runtime#shutdown) for more details. -//! -//! [tasks]: crate::task -//! [`Runtime`]: Runtime -//! [`tokio::spawn`]: crate::spawn -//! [`tokio::main`]: ../attr.main.html -//! [runtime builder]: crate::runtime::Builder -//! [`Runtime::new`]: crate::runtime::Runtime::new -//! [`Builder::threaded_scheduler`]: crate::runtime::Builder::threaded_scheduler -//! [`Builder::enable_io`]: crate::runtime::Builder::enable_io -//! [`Builder::enable_time`]: crate::runtime::Builder::enable_time -//! [`Builder::enable_all`]: crate::runtime::Builder::enable_all - -// At the top due to macros -#[cfg(test)] -#[cfg(not(target_family = "wasm"))] -#[macro_use] -mod tests; - -pub(crate) mod context; - -pub(crate) mod coop; - -pub(crate) mod park; - -mod driver; - -pub(crate) mod scheduler; - -cfg_io_driver_impl! { - pub(crate) mod io; -} - -cfg_process_driver! { - mod process; -} - -cfg_time! { - pub(crate) mod time; -} - -cfg_signal_internal_and_unix! { - pub(crate) mod signal; -} - -cfg_rt! { - pub(crate) mod task; - - mod config; - use config::Config; - - mod blocking; - #[cfg_attr(target_os = "wasi", allow(unused_imports))] - pub(crate) use blocking::spawn_blocking; - - cfg_trace! { - pub(crate) use blocking::Mandatory; - } - - cfg_fs! { - pub(crate) use blocking::spawn_mandatory_blocking; - } - - mod builder; - pub use self::builder::Builder; - cfg_unstable! { - mod id; - #[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] - pub use id::Id; - - pub use self::builder::UnhandledPanic; - pub use crate::util::rand::RngSeed; - } - - cfg_taskdump! { - pub mod dump; - pub use dump::Dump; - } - - mod handle; - pub use handle::{EnterGuard, Handle, TryCurrentError}; - - mod runtime; - pub use runtime::{Runtime, RuntimeFlavor}; - - mod thread_id; - pub(crate) use thread_id::ThreadId; - - cfg_metrics! { - mod metrics; - pub use metrics::{RuntimeMetrics, HistogramScale}; - - pub(crate) use metrics::{MetricsBatch, SchedulerMetrics, WorkerMetrics, HistogramBuilder}; - - cfg_net! { - pub(crate) use metrics::IoDriverMetrics; - } - } - - cfg_not_metrics! { - pub(crate) mod metrics; - pub(crate) use metrics::{SchedulerMetrics, WorkerMetrics, MetricsBatch, HistogramBuilder}; - } - - /// After thread starts / before thread stops - type Callback = std::sync::Arc; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/park.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/park.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/park.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/park.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,340 +0,0 @@ -#![cfg_attr(not(feature = "full"), allow(dead_code))] - -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::{Arc, Condvar, Mutex}; - -use std::sync::atomic::Ordering::SeqCst; -use std::time::Duration; - -#[derive(Debug)] -pub(crate) struct ParkThread { - inner: Arc, -} - -/// Unblocks a thread that was blocked by `ParkThread`. -#[derive(Clone, Debug)] -pub(crate) struct UnparkThread { - inner: Arc, -} - -#[derive(Debug)] -struct Inner { - state: AtomicUsize, - mutex: Mutex<()>, - condvar: Condvar, -} - -const EMPTY: usize = 0; -const PARKED: usize = 1; -const NOTIFIED: usize = 2; - -tokio_thread_local! { - static CURRENT_PARKER: ParkThread = ParkThread::new(); -} - -// Bit of a hack, but it is only for loom -#[cfg(loom)] -tokio_thread_local! { - static CURRENT_THREAD_PARK_COUNT: AtomicUsize = AtomicUsize::new(0); -} - -// ==== impl ParkThread ==== - -impl ParkThread { - pub(crate) fn new() -> Self { - Self { - inner: Arc::new(Inner { - state: AtomicUsize::new(EMPTY), - mutex: Mutex::new(()), - condvar: Condvar::new(), - }), - } - } - - pub(crate) fn unpark(&self) -> UnparkThread { - let inner = self.inner.clone(); - UnparkThread { inner } - } - - pub(crate) fn park(&mut self) { - #[cfg(loom)] - CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst)); - self.inner.park(); - } - - pub(crate) fn park_timeout(&mut self, duration: Duration) { - #[cfg(loom)] - CURRENT_THREAD_PARK_COUNT.with(|count| count.fetch_add(1, SeqCst)); - - // Wasm doesn't have threads, so just sleep. - #[cfg(not(target_family = "wasm"))] - self.inner.park_timeout(duration); - #[cfg(target_family = "wasm")] - std::thread::sleep(duration); - } - - pub(crate) fn shutdown(&mut self) { - self.inner.shutdown(); - } -} - -// ==== impl Inner ==== - -impl Inner { - /// Parks the current thread for at most `dur`. - fn park(&self) { - // If we were previously notified then we consume this notification and - // return quickly. - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - return; - } - - // Otherwise we need to coordinate going to sleep - let mut m = self.mutex.lock(); - - match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read here, even though we know it will be `NOTIFIED`. - // This is because `unpark` may have been called again since we read - // `NOTIFIED` in the `compare_exchange` above. We must perform an - // acquire operation that synchronizes with that `unpark` to observe - // any writes it made before the call to unpark. To do that we must - // read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); - debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - - return; - } - Err(actual) => panic!("inconsistent park state; actual = {}", actual), - } - - loop { - m = self.condvar.wait(m).unwrap(); - - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - // got a notification - return; - } - - // spurious wakeup, go back to sleep - } - } - - fn park_timeout(&self, dur: Duration) { - // Like `park` above we have a fast path for an already-notified thread, - // and afterwards we start coordinating for a sleep. Return quickly. - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - return; - } - - if dur == Duration::from_millis(0) { - return; - } - - let m = self.mutex.lock(); - - match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read again here, see `park`. - let old = self.state.swap(EMPTY, SeqCst); - debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - - return; - } - Err(actual) => panic!("inconsistent park_timeout state; actual = {}", actual), - } - - // Wait with a timeout, and if we spuriously wake up or otherwise wake up - // from a notification, we just want to unconditionally set the state back to - // empty, either consuming a notification or un-flagging ourselves as - // parked. - let (_m, _result) = self.condvar.wait_timeout(m, dur).unwrap(); - - match self.state.swap(EMPTY, SeqCst) { - NOTIFIED => {} // got a notification, hurray! - PARKED => {} // no notification, alas - n => panic!("inconsistent park_timeout state: {}", n), - } - } - - fn unpark(&self) { - // To ensure the unparked thread will observe any writes we made before - // this call, we must perform a release operation that `park` can - // synchronize with. To do that we must write `NOTIFIED` even if `state` - // is already `NOTIFIED`. That is why this must be a swap rather than a - // compare-and-swap that returns if it reads `NOTIFIED` on failure. - match self.state.swap(NOTIFIED, SeqCst) { - EMPTY => return, // no one was waiting - NOTIFIED => return, // already unparked - PARKED => {} // gotta go wake someone up - _ => panic!("inconsistent state in unpark"), - } - - // There is a period between when the parked thread sets `state` to - // `PARKED` (or last checked `state` in the case of a spurious wake - // up) and when it actually waits on `cvar`. If we were to notify - // during this period it would be ignored and then when the parked - // thread went to sleep it would never wake up. Fortunately, it has - // `lock` locked at this stage so we can acquire `lock` to wait until - // it is ready to receive the notification. - // - // Releasing `lock` before the call to `notify_one` means that when the - // parked thread wakes it doesn't get woken only to have to wait for us - // to release `lock`. - drop(self.mutex.lock()); - - self.condvar.notify_one() - } - - fn shutdown(&self) { - self.condvar.notify_all(); - } -} - -impl Default for ParkThread { - fn default() -> Self { - Self::new() - } -} - -// ===== impl UnparkThread ===== - -impl UnparkThread { - pub(crate) fn unpark(&self) { - self.inner.unpark(); - } -} - -use crate::loom::thread::AccessError; -use std::future::Future; -use std::marker::PhantomData; -use std::rc::Rc; -use std::task::{RawWaker, RawWakerVTable, Waker}; - -/// Blocks the current thread using a condition variable. -#[derive(Debug)] -pub(crate) struct CachedParkThread { - _anchor: PhantomData>, -} - -impl CachedParkThread { - /// Creates a new `ParkThread` handle for the current thread. - /// - /// This type cannot be moved to other threads, so it should be created on - /// the thread that the caller intends to park. - pub(crate) fn new() -> CachedParkThread { - CachedParkThread { - _anchor: PhantomData, - } - } - - pub(crate) fn waker(&self) -> Result { - self.unpark().map(|unpark| unpark.into_waker()) - } - - fn unpark(&self) -> Result { - self.with_current(|park_thread| park_thread.unpark()) - } - - pub(crate) fn park(&mut self) { - self.with_current(|park_thread| park_thread.inner.park()) - .unwrap(); - } - - pub(crate) fn park_timeout(&mut self, duration: Duration) { - self.with_current(|park_thread| park_thread.inner.park_timeout(duration)) - .unwrap(); - } - - /// Gets a reference to the `ParkThread` handle for this thread. - fn with_current(&self, f: F) -> Result - where - F: FnOnce(&ParkThread) -> R, - { - CURRENT_PARKER.try_with(|inner| f(inner)) - } - - pub(crate) fn block_on(&mut self, f: F) -> Result { - use std::task::Context; - use std::task::Poll::Ready; - - // `get_unpark()` should not return a Result - let waker = self.waker()?; - let mut cx = Context::from_waker(&waker); - - pin!(f); - - loop { - if let Ready(v) = crate::runtime::coop::budget(|| f.as_mut().poll(&mut cx)) { - return Ok(v); - } - - self.park(); - } - } -} - -impl UnparkThread { - pub(crate) fn into_waker(self) -> Waker { - unsafe { - let raw = unparker_to_raw_waker(self.inner); - Waker::from_raw(raw) - } - } -} - -impl Inner { - #[allow(clippy::wrong_self_convention)] - fn into_raw(this: Arc) -> *const () { - Arc::into_raw(this) as *const () - } - - unsafe fn from_raw(ptr: *const ()) -> Arc { - Arc::from_raw(ptr as *const Inner) - } -} - -unsafe fn unparker_to_raw_waker(unparker: Arc) -> RawWaker { - RawWaker::new( - Inner::into_raw(unparker), - &RawWakerVTable::new(clone, wake, wake_by_ref, drop_waker), - ) -} - -unsafe fn clone(raw: *const ()) -> RawWaker { - Arc::increment_strong_count(raw as *const Inner); - unparker_to_raw_waker(Inner::from_raw(raw)) -} - -unsafe fn drop_waker(raw: *const ()) { - drop(Inner::from_raw(raw)); -} - -unsafe fn wake(raw: *const ()) { - let unparker = Inner::from_raw(raw); - unparker.unpark(); -} - -unsafe fn wake_by_ref(raw: *const ()) { - let raw = raw as *const Inner; - (*raw).unpark(); -} - -#[cfg(loom)] -pub(crate) fn current_thread_park_count() -> usize { - CURRENT_THREAD_PARK_COUNT.with(|count| count.load(SeqCst)) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/process.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/process.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/process.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/process.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,44 +0,0 @@ -#![cfg_attr(not(feature = "rt"), allow(dead_code))] - -//! Process driver. - -use crate::process::unix::GlobalOrphanQueue; -use crate::runtime::driver; -use crate::runtime::signal::{Driver as SignalDriver, Handle as SignalHandle}; - -use std::time::Duration; - -/// Responsible for cleaning up orphaned child processes on Unix platforms. -#[derive(Debug)] -pub(crate) struct Driver { - park: SignalDriver, - signal_handle: SignalHandle, -} - -// ===== impl Driver ===== - -impl Driver { - /// Creates a new signal `Driver` instance that delegates wakeups to `park`. - pub(crate) fn new(park: SignalDriver) -> Self { - let signal_handle = park.handle(); - - Self { - park, - signal_handle, - } - } - - pub(crate) fn park(&mut self, handle: &driver::Handle) { - self.park.park(handle); - GlobalOrphanQueue::reap_orphans(&self.signal_handle); - } - - pub(crate) fn park_timeout(&mut self, handle: &driver::Handle, duration: Duration) { - self.park.park_timeout(handle, duration); - GlobalOrphanQueue::reap_orphans(&self.signal_handle); - } - - pub(crate) fn shutdown(&mut self, handle: &driver::Handle) { - self.park.shutdown(handle) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/runtime.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/runtime.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/runtime.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/runtime.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,490 +0,0 @@ -use crate::runtime::blocking::BlockingPool; -use crate::runtime::scheduler::CurrentThread; -use crate::runtime::{context, EnterGuard, Handle}; -use crate::task::JoinHandle; - -use std::future::Future; -use std::time::Duration; - -cfg_rt_multi_thread! { - use crate::runtime::Builder; - use crate::runtime::scheduler::MultiThread; - - cfg_unstable! { - use crate::runtime::scheduler::MultiThreadAlt; - } -} - -/// The Tokio runtime. -/// -/// The runtime provides an I/O driver, task scheduler, [timer], and -/// blocking pool, necessary for running asynchronous tasks. -/// -/// Instances of `Runtime` can be created using [`new`], or [`Builder`]. -/// However, most users will use the `#[tokio::main]` annotation on their -/// entry point instead. -/// -/// See [module level][mod] documentation for more details. -/// -/// # Shutdown -/// -/// Shutting down the runtime is done by dropping the value, or calling -/// [`shutdown_background`] or [`shutdown_timeout`]. -/// -/// Tasks spawned through [`Runtime::spawn`] keep running until they yield. -/// Then they are dropped. They are not *guaranteed* to run to completion, but -/// *might* do so if they do not yield until completion. -/// -/// Blocking functions spawned through [`Runtime::spawn_blocking`] keep running -/// until they return. -/// -/// The thread initiating the shutdown blocks until all spawned work has been -/// stopped. This can take an indefinite amount of time. The `Drop` -/// implementation waits forever for this. -/// -/// The [`shutdown_background`] and [`shutdown_timeout`] methods can be used if -/// waiting forever is undesired. When the timeout is reached, spawned work that -/// did not stop in time and threads running it are leaked. The work continues -/// to run until one of the stopping conditions is fulfilled, but the thread -/// initiating the shutdown is unblocked. -/// -/// Once the runtime has been dropped, any outstanding I/O resources bound to -/// it will no longer function. Calling any method on them will result in an -/// error. -/// -/// # Sharing -/// -/// There are several ways to establish shared access to a Tokio runtime: -/// -/// * Using an [Arc]\. -/// * Using a [`Handle`]. -/// * Entering the runtime context. -/// -/// Using an [Arc]\ or [`Handle`] allows you to do various -/// things with the runtime such as spawning new tasks or entering the runtime -/// context. Both types can be cloned to create a new handle that allows access -/// to the same runtime. By passing clones into different tasks or threads, you -/// will be able to access the runtime from those tasks or threads. -/// -/// The difference between [Arc]\ and [`Handle`] is that -/// an [Arc]\ will prevent the runtime from shutting down, -/// whereas a [`Handle`] does not prevent that. This is because shutdown of the -/// runtime happens when the destructor of the `Runtime` object runs. -/// -/// Calls to [`shutdown_background`] and [`shutdown_timeout`] require exclusive -/// ownership of the `Runtime` type. When using an [Arc]\, -/// this can be achieved via [`Arc::try_unwrap`] when only one strong count -/// reference is left over. -/// -/// The runtime context is entered using the [`Runtime::enter`] or -/// [`Handle::enter`] methods, which use a thread-local variable to store the -/// current runtime. Whenever you are inside the runtime context, methods such -/// as [`tokio::spawn`] will use the runtime whose context you are inside. -/// -/// [timer]: crate::time -/// [mod]: index.html -/// [`new`]: method@Self::new -/// [`Builder`]: struct@Builder -/// [`Handle`]: struct@Handle -/// [`tokio::spawn`]: crate::spawn -/// [`Arc::try_unwrap`]: std::sync::Arc::try_unwrap -/// [Arc]: std::sync::Arc -/// [`shutdown_background`]: method@Runtime::shutdown_background -/// [`shutdown_timeout`]: method@Runtime::shutdown_timeout -#[derive(Debug)] -pub struct Runtime { - /// Task scheduler - scheduler: Scheduler, - - /// Handle to runtime, also contains driver handles - handle: Handle, - - /// Blocking pool handle, used to signal shutdown - blocking_pool: BlockingPool, -} - -/// The flavor of a `Runtime`. -/// -/// This is the return type for [`Handle::runtime_flavor`](crate::runtime::Handle::runtime_flavor()). -#[derive(Debug, PartialEq, Eq)] -#[non_exhaustive] -pub enum RuntimeFlavor { - /// The flavor that executes all tasks on the current thread. - CurrentThread, - /// The flavor that executes tasks across multiple threads. - MultiThread, - /// The flavor that executes tasks across multiple threads. - #[cfg(tokio_unstable)] - MultiThreadAlt, -} - -/// The runtime scheduler is either a multi-thread or a current-thread executor. -#[derive(Debug)] -pub(super) enum Scheduler { - /// Execute all tasks on the current-thread. - CurrentThread(CurrentThread), - - /// Execute tasks across multiple threads. - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - MultiThread(MultiThread), - - /// Execute tasks across multiple threads. - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - MultiThreadAlt(MultiThreadAlt), -} - -impl Runtime { - pub(super) fn from_parts( - scheduler: Scheduler, - handle: Handle, - blocking_pool: BlockingPool, - ) -> Runtime { - Runtime { - scheduler, - handle, - blocking_pool, - } - } - - cfg_not_wasi! { - /// Creates a new runtime instance with default configuration values. - /// - /// This results in the multi threaded scheduler, I/O driver, and time driver being - /// initialized. - /// - /// Most applications will not need to call this function directly. Instead, - /// they will use the [`#[tokio::main]` attribute][main]. When a more complex - /// configuration is necessary, the [runtime builder] may be used. - /// - /// See [module level][mod] documentation for more details. - /// - /// # Examples - /// - /// Creating a new `Runtime` with default configuration values. - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// let rt = Runtime::new() - /// .unwrap(); - /// - /// // Use the runtime... - /// ``` - /// - /// [mod]: index.html - /// [main]: ../attr.main.html - /// [threaded scheduler]: index.html#threaded-scheduler - /// [runtime builder]: crate::runtime::Builder - #[cfg(feature = "rt-multi-thread")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))] - pub fn new() -> std::io::Result { - Builder::new_multi_thread().enable_all().build() - } - } - - /// Returns a handle to the runtime's spawner. - /// - /// The returned handle can be used to spawn tasks that run on this runtime, and can - /// be cloned to allow moving the `Handle` to other threads. - /// - /// Calling [`Handle::block_on`] on a handle to a `current_thread` runtime is error-prone. - /// Refer to the documentation of [`Handle::block_on`] for more. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// let rt = Runtime::new() - /// .unwrap(); - /// - /// let handle = rt.handle(); - /// - /// // Use the handle... - /// ``` - pub fn handle(&self) -> &Handle { - &self.handle - } - - /// Spawns a future onto the Tokio runtime. - /// - /// This spawns the given future onto the runtime's executor, usually a - /// thread pool. The thread pool is then responsible for polling the future - /// until it completes. - /// - /// The provided future will start running in the background immediately - /// when `spawn` is called, even if you don't await the returned - /// `JoinHandle`. - /// - /// See [module level][mod] documentation for more details. - /// - /// [mod]: index.html - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// # fn dox() { - /// // Create the runtime - /// let rt = Runtime::new().unwrap(); - /// - /// // Spawn a future onto the runtime - /// rt.spawn(async { - /// println!("now running on a worker thread"); - /// }); - /// # } - /// ``` - #[track_caller] - pub fn spawn(&self, future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - self.handle.spawn(future) - } - - /// Runs the provided function on an executor dedicated to blocking operations. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// # fn dox() { - /// // Create the runtime - /// let rt = Runtime::new().unwrap(); - /// - /// // Spawn a blocking function onto the runtime - /// rt.spawn_blocking(|| { - /// println!("now running on a worker thread"); - /// }); - /// # } - /// ``` - #[track_caller] - pub fn spawn_blocking(&self, func: F) -> JoinHandle - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - self.handle.spawn_blocking(func) - } - - /// Runs a future to completion on the Tokio runtime. This is the - /// runtime's entry point. - /// - /// This runs the given future on the current thread, blocking until it is - /// complete, and yielding its resolved result. Any tasks or timers - /// which the future spawns internally will be executed on the runtime. - /// - /// # Non-worker future - /// - /// Note that the future required by this function does not run as a - /// worker. The expectation is that other tasks are spawned by the future here. - /// Awaiting on other futures from the future provided here will not - /// perform as fast as those spawned as workers. - /// - /// # Multi thread scheduler - /// - /// When the multi thread scheduler is used this will allow futures - /// to run within the io driver and timer context of the overall runtime. - /// - /// Any spawned tasks will continue running after `block_on` returns. - /// - /// # Current thread scheduler - /// - /// When the current thread scheduler is enabled `block_on` - /// can be called concurrently from multiple threads. The first call - /// will take ownership of the io and timer drivers. This means - /// other threads which do not own the drivers will hook into that one. - /// When the first `block_on` completes, other threads will be able to - /// "steal" the driver to allow continued execution of their futures. - /// - /// Any spawned tasks will be suspended after `block_on` returns. Calling - /// `block_on` again will resume previously spawned tasks. - /// - /// # Panics - /// - /// This function panics if the provided future panics, or if called within an - /// asynchronous execution context. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::runtime::Runtime; - /// - /// // Create the runtime - /// let rt = Runtime::new().unwrap(); - /// - /// // Execute the future, blocking the current thread until completion - /// rt.block_on(async { - /// println!("hello"); - /// }); - /// ``` - /// - /// [handle]: fn@Handle::block_on - #[track_caller] - pub fn block_on(&self, future: F) -> F::Output { - #[cfg(all( - tokio_unstable, - tokio_taskdump, - feature = "rt", - target_os = "linux", - any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") - ))] - let future = super::task::trace::Trace::root(future); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let future = crate::util::trace::task( - future, - "block_on", - None, - crate::runtime::task::Id::next().as_u64(), - ); - - let _enter = self.enter(); - - match &self.scheduler { - Scheduler::CurrentThread(exec) => exec.block_on(&self.handle.inner, future), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - Scheduler::MultiThread(exec) => exec.block_on(&self.handle.inner, future), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - Scheduler::MultiThreadAlt(exec) => exec.block_on(&self.handle.inner, future), - } - } - - /// Enters the runtime context. - /// - /// This allows you to construct types that must have an executor - /// available on creation such as [`Sleep`] or [`TcpStream`]. It will - /// also allow you to call methods such as [`tokio::spawn`]. - /// - /// [`Sleep`]: struct@crate::time::Sleep - /// [`TcpStream`]: struct@crate::net::TcpStream - /// [`tokio::spawn`]: fn@crate::spawn - /// - /// # Example - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// fn function_that_spawns(msg: String) { - /// // Had we not used `rt.enter` below, this would panic. - /// tokio::spawn(async move { - /// println!("{}", msg); - /// }); - /// } - /// - /// fn main() { - /// let rt = Runtime::new().unwrap(); - /// - /// let s = "Hello World!".to_string(); - /// - /// // By entering the context, we tie `tokio::spawn` to this executor. - /// let _guard = rt.enter(); - /// function_that_spawns(s); - /// } - /// ``` - pub fn enter(&self) -> EnterGuard<'_> { - self.handle.enter() - } - - /// Shuts down the runtime, waiting for at most `duration` for all spawned - /// work to stop. - /// - /// See the [struct level documentation](Runtime#shutdown) for more details. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Runtime; - /// use tokio::task; - /// - /// use std::thread; - /// use std::time::Duration; - /// - /// fn main() { - /// let runtime = Runtime::new().unwrap(); - /// - /// runtime.block_on(async move { - /// task::spawn_blocking(move || { - /// thread::sleep(Duration::from_secs(10_000)); - /// }); - /// }); - /// - /// runtime.shutdown_timeout(Duration::from_millis(100)); - /// } - /// ``` - pub fn shutdown_timeout(mut self, duration: Duration) { - // Wakeup and shutdown all the worker threads - self.handle.inner.shutdown(); - self.blocking_pool.shutdown(Some(duration)); - } - - /// Shuts down the runtime, without waiting for any spawned work to stop. - /// - /// This can be useful if you want to drop a runtime from within another runtime. - /// Normally, dropping a runtime will block indefinitely for spawned blocking tasks - /// to complete, which would normally not be permitted within an asynchronous context. - /// By calling `shutdown_background()`, you can drop the runtime from such a context. - /// - /// Note however, that because we do not wait for any blocking tasks to complete, this - /// may result in a resource leak (in that any blocking tasks are still running until they - /// return. - /// - /// See the [struct level documentation](Runtime#shutdown) for more details. - /// - /// This function is equivalent to calling `shutdown_timeout(Duration::from_nanos(0))`. - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// fn main() { - /// let runtime = Runtime::new().unwrap(); - /// - /// runtime.block_on(async move { - /// let inner_runtime = Runtime::new().unwrap(); - /// // ... - /// inner_runtime.shutdown_background(); - /// }); - /// } - /// ``` - pub fn shutdown_background(self) { - self.shutdown_timeout(Duration::from_nanos(0)) - } -} - -#[allow(clippy::single_match)] // there are comments in the error branch, so we don't want if-let -impl Drop for Runtime { - fn drop(&mut self) { - match &mut self.scheduler { - Scheduler::CurrentThread(current_thread) => { - // This ensures that tasks spawned on the current-thread - // runtime are dropped inside the runtime's context. - let _guard = context::try_set_current(&self.handle.inner); - current_thread.shutdown(&self.handle.inner); - } - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - Scheduler::MultiThread(multi_thread) => { - // The threaded scheduler drops its tasks on its worker threads, which is - // already in the runtime's context. - multi_thread.shutdown(&self.handle.inner); - } - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - Scheduler::MultiThreadAlt(multi_thread) => { - // The threaded scheduler drops its tasks on its worker threads, which is - // already in the runtime's context. - multi_thread.shutdown(&self.handle.inner); - } - } - } -} - -cfg_metrics! { - impl Runtime { - /// TODO - pub fn metrics(&self) -> crate::runtime::RuntimeMetrics { - self.handle.metrics() - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/block_in_place.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/block_in_place.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/block_in_place.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/block_in_place.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,21 +0,0 @@ -use crate::runtime::scheduler; - -#[track_caller] -pub(crate) fn block_in_place(f: F) -> R -where - F: FnOnce() -> R, -{ - #[cfg(tokio_unstable)] - { - use crate::runtime::{Handle, RuntimeFlavor::MultiThreadAlt}; - - match Handle::try_current().map(|h| h.runtime_flavor()) { - Ok(MultiThreadAlt) => { - return scheduler::multi_thread_alt::block_in_place(f); - } - _ => {} - } - } - - scheduler::multi_thread::block_in_place(f) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/current_thread/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/current_thread/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/current_thread/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/current_thread/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,764 +0,0 @@ -use crate::future::poll_fn; -use crate::loom::sync::atomic::AtomicBool; -use crate::loom::sync::Arc; -use crate::runtime::driver::{self, Driver}; -use crate::runtime::scheduler::{self, Defer, Inject}; -use crate::runtime::task::{self, JoinHandle, OwnedTasks, Schedule, Task}; -use crate::runtime::{blocking, context, Config, MetricsBatch, SchedulerMetrics, WorkerMetrics}; -use crate::sync::notify::Notify; -use crate::util::atomic_cell::AtomicCell; -use crate::util::{waker_ref, RngSeedGenerator, Wake, WakerRef}; - -use std::cell::RefCell; -use std::collections::VecDeque; -use std::fmt; -use std::future::Future; -use std::sync::atomic::Ordering::{AcqRel, Release}; -use std::task::Poll::{Pending, Ready}; -use std::task::Waker; -use std::time::Duration; - -/// Executes tasks on the current thread -pub(crate) struct CurrentThread { - /// Core scheduler data is acquired by a thread entering `block_on`. - core: AtomicCell, - - /// Notifier for waking up other threads to steal the - /// driver. - notify: Notify, -} - -/// Handle to the current thread scheduler -pub(crate) struct Handle { - /// Scheduler state shared across threads - shared: Shared, - - /// Resource driver handles - pub(crate) driver: driver::Handle, - - /// Blocking pool spawner - pub(crate) blocking_spawner: blocking::Spawner, - - /// Current random number generator seed - pub(crate) seed_generator: RngSeedGenerator, -} - -/// Data required for executing the scheduler. The struct is passed around to -/// a function that will perform the scheduling work and acts as a capability token. -struct Core { - /// Scheduler run queue - tasks: VecDeque, - - /// Current tick - tick: u32, - - /// Runtime driver - /// - /// The driver is removed before starting to park the thread - driver: Option, - - /// Metrics batch - metrics: MetricsBatch, - - /// How often to check the global queue - global_queue_interval: u32, - - /// True if a task panicked without being handled and the runtime is - /// configured to shutdown on unhandled panic. - unhandled_panic: bool, -} - -/// Scheduler state shared between threads. -struct Shared { - /// Remote run queue - inject: Inject>, - - /// Collection of all active tasks spawned onto this executor. - owned: OwnedTasks>, - - /// Indicates whether the blocked on thread was woken. - woken: AtomicBool, - - /// Scheduler configuration options - config: Config, - - /// Keeps track of various runtime metrics. - scheduler_metrics: SchedulerMetrics, - - /// This scheduler only has one worker. - worker_metrics: WorkerMetrics, -} - -/// Thread-local context. -/// -/// pub(crate) to store in `runtime::context`. -pub(crate) struct Context { - /// Scheduler handle - handle: Arc, - - /// Scheduler core, enabling the holder of `Context` to execute the - /// scheduler. - core: RefCell>>, - - /// Deferred tasks, usually ones that called `task::yield_now()`. - pub(crate) defer: Defer, -} - -type Notified = task::Notified>; - -/// Initial queue capacity. -const INITIAL_CAPACITY: usize = 64; - -/// Used if none is specified. This is a temporary constant and will be removed -/// as we unify tuning logic between the multi-thread and current-thread -/// schedulers. -const DEFAULT_GLOBAL_QUEUE_INTERVAL: u32 = 31; - -impl CurrentThread { - pub(crate) fn new( - driver: Driver, - driver_handle: driver::Handle, - blocking_spawner: blocking::Spawner, - seed_generator: RngSeedGenerator, - config: Config, - ) -> (CurrentThread, Arc) { - let worker_metrics = WorkerMetrics::from_config(&config); - - // Get the configured global queue interval, or use the default. - let global_queue_interval = config - .global_queue_interval - .unwrap_or(DEFAULT_GLOBAL_QUEUE_INTERVAL); - - let handle = Arc::new(Handle { - shared: Shared { - inject: Inject::new(), - owned: OwnedTasks::new(), - woken: AtomicBool::new(false), - config, - scheduler_metrics: SchedulerMetrics::new(), - worker_metrics, - }, - driver: driver_handle, - blocking_spawner, - seed_generator, - }); - - let core = AtomicCell::new(Some(Box::new(Core { - tasks: VecDeque::with_capacity(INITIAL_CAPACITY), - tick: 0, - driver: Some(driver), - metrics: MetricsBatch::new(&handle.shared.worker_metrics), - global_queue_interval, - unhandled_panic: false, - }))); - - let scheduler = CurrentThread { - core, - notify: Notify::new(), - }; - - (scheduler, handle) - } - - #[track_caller] - pub(crate) fn block_on(&self, handle: &scheduler::Handle, future: F) -> F::Output { - pin!(future); - - crate::runtime::context::enter_runtime(handle, false, |blocking| { - let handle = handle.as_current_thread(); - - // Attempt to steal the scheduler core and block_on the future if we can - // there, otherwise, lets select on a notification that the core is - // available or the future is complete. - loop { - if let Some(core) = self.take_core(handle) { - return core.block_on(future); - } else { - let notified = self.notify.notified(); - pin!(notified); - - if let Some(out) = blocking - .block_on(poll_fn(|cx| { - if notified.as_mut().poll(cx).is_ready() { - return Ready(None); - } - - if let Ready(out) = future.as_mut().poll(cx) { - return Ready(Some(out)); - } - - Pending - })) - .expect("Failed to `Enter::block_on`") - { - return out; - } - } - } - }) - } - - fn take_core(&self, handle: &Arc) -> Option> { - let core = self.core.take()?; - - Some(CoreGuard { - context: scheduler::Context::CurrentThread(Context { - handle: handle.clone(), - core: RefCell::new(Some(core)), - defer: Defer::new(), - }), - scheduler: self, - }) - } - - pub(crate) fn shutdown(&mut self, handle: &scheduler::Handle) { - let handle = handle.as_current_thread(); - - // Avoid a double panic if we are currently panicking and - // the lock may be poisoned. - - let core = match self.take_core(handle) { - Some(core) => core, - None if std::thread::panicking() => return, - None => panic!("Oh no! We never placed the Core back, this is a bug!"), - }; - - // Check that the thread-local is not being destroyed - let tls_available = context::with_current(|_| ()).is_ok(); - - if tls_available { - core.enter(|core, _context| { - let core = shutdown2(core, handle); - (core, ()) - }); - } else { - // Shutdown without setting the context. `tokio::spawn` calls will - // fail, but those will fail either way because the thread-local is - // not available anymore. - let context = core.context.expect_current_thread(); - let core = context.core.borrow_mut().take().unwrap(); - - let core = shutdown2(core, handle); - *context.core.borrow_mut() = Some(core); - } - } -} - -fn shutdown2(mut core: Box, handle: &Handle) -> Box { - // Drain the OwnedTasks collection. This call also closes the - // collection, ensuring that no tasks are ever pushed after this - // call returns. - handle.shared.owned.close_and_shutdown_all(); - - // Drain local queue - // We already shut down every task, so we just need to drop the task. - while let Some(task) = core.next_local_task(handle) { - drop(task); - } - - // Close the injection queue - handle.shared.inject.close(); - - // Drain remote queue - while let Some(task) = handle.shared.inject.pop() { - drop(task); - } - - assert!(handle.shared.owned.is_empty()); - - // Submit metrics - core.submit_metrics(handle); - - // Shutdown the resource drivers - if let Some(driver) = core.driver.as_mut() { - driver.shutdown(&handle.driver); - } - - core -} - -impl fmt::Debug for CurrentThread { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("CurrentThread").finish() - } -} - -// ===== impl Core ===== - -impl Core { - /// Get and increment the current tick - fn tick(&mut self) { - self.tick = self.tick.wrapping_add(1); - } - - fn next_task(&mut self, handle: &Handle) -> Option { - if self.tick % self.global_queue_interval == 0 { - handle - .next_remote_task() - .or_else(|| self.next_local_task(handle)) - } else { - self.next_local_task(handle) - .or_else(|| handle.next_remote_task()) - } - } - - fn next_local_task(&mut self, handle: &Handle) -> Option { - let ret = self.tasks.pop_front(); - handle - .shared - .worker_metrics - .set_queue_depth(self.tasks.len()); - ret - } - - fn push_task(&mut self, handle: &Handle, task: Notified) { - self.tasks.push_back(task); - self.metrics.inc_local_schedule_count(); - handle - .shared - .worker_metrics - .set_queue_depth(self.tasks.len()); - } - - fn submit_metrics(&mut self, handle: &Handle) { - self.metrics.submit(&handle.shared.worker_metrics, 0); - } -} - -#[cfg(tokio_taskdump)] -fn wake_deferred_tasks_and_free(context: &Context) { - let wakers = context.defer.take_deferred(); - for waker in wakers { - waker.wake(); - } -} - -// ===== impl Context ===== - -impl Context { - /// Execute the closure with the given scheduler core stored in the - /// thread-local context. - fn run_task(&self, mut core: Box, f: impl FnOnce() -> R) -> (Box, R) { - core.metrics.start_poll(); - let mut ret = self.enter(core, || crate::runtime::coop::budget(f)); - ret.0.metrics.end_poll(); - ret - } - - /// Blocks the current thread until an event is received by the driver, - /// including I/O events, timer events, ... - fn park(&self, mut core: Box, handle: &Handle) -> Box { - let mut driver = core.driver.take().expect("driver missing"); - - if let Some(f) = &handle.shared.config.before_park { - // Incorrect lint, the closures are actually different types so `f` - // cannot be passed as an argument to `enter`. - #[allow(clippy::redundant_closure)] - let (c, _) = self.enter(core, || f()); - core = c; - } - - // This check will fail if `before_park` spawns a task for us to run - // instead of parking the thread - if core.tasks.is_empty() { - // Park until the thread is signaled - core.metrics.about_to_park(); - core.submit_metrics(handle); - - let (c, _) = self.enter(core, || { - driver.park(&handle.driver); - self.defer.wake(); - }); - - core = c; - } - - if let Some(f) = &handle.shared.config.after_unpark { - // Incorrect lint, the closures are actually different types so `f` - // cannot be passed as an argument to `enter`. - #[allow(clippy::redundant_closure)] - let (c, _) = self.enter(core, || f()); - core = c; - } - - core.driver = Some(driver); - core - } - - /// Checks the driver for new events without blocking the thread. - fn park_yield(&self, mut core: Box, handle: &Handle) -> Box { - let mut driver = core.driver.take().expect("driver missing"); - - core.submit_metrics(handle); - - let (mut core, _) = self.enter(core, || { - driver.park_timeout(&handle.driver, Duration::from_millis(0)); - self.defer.wake(); - }); - - core.driver = Some(driver); - core - } - - fn enter(&self, core: Box, f: impl FnOnce() -> R) -> (Box, R) { - // Store the scheduler core in the thread-local context - // - // A drop-guard is employed at a higher level. - *self.core.borrow_mut() = Some(core); - - // Execute the closure while tracking the execution budget - let ret = f(); - - // Take the scheduler core back - let core = self.core.borrow_mut().take().expect("core missing"); - (core, ret) - } - - pub(crate) fn defer(&self, waker: &Waker) { - self.defer.defer(waker); - } -} - -// ===== impl Handle ===== - -impl Handle { - /// Spawns a future onto the `CurrentThread` scheduler - pub(crate) fn spawn( - me: &Arc, - future: F, - id: crate::runtime::task::Id, - ) -> JoinHandle - where - F: crate::future::Future + Send + 'static, - F::Output: Send + 'static, - { - let (handle, notified) = me.shared.owned.bind(future, me.clone(), id); - - if let Some(notified) = notified { - me.schedule(notified); - } - - handle - } - - /// Capture a snapshot of this runtime's state. - #[cfg(all( - tokio_unstable, - tokio_taskdump, - target_os = "linux", - any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") - ))] - pub(crate) fn dump(&self) -> crate::runtime::Dump { - use crate::runtime::dump; - use task::trace::trace_current_thread; - - let mut traces = vec![]; - - // todo: how to make this work outside of a runtime context? - context::with_scheduler(|maybe_context| { - // drain the local queue - let context = if let Some(context) = maybe_context { - context.expect_current_thread() - } else { - return; - }; - let mut maybe_core = context.core.borrow_mut(); - let core = if let Some(core) = maybe_core.as_mut() { - core - } else { - return; - }; - let local = &mut core.tasks; - - if self.shared.inject.is_closed() { - return; - } - - traces = trace_current_thread(&self.shared.owned, local, &self.shared.inject) - .into_iter() - .map(dump::Task::new) - .collect(); - - // Avoid double borrow panic - drop(maybe_core); - - // Taking a taskdump could wakes every task, but we probably don't want - // the `yield_now` vector to be that large under normal circumstances. - // Therefore, we free its allocation. - wake_deferred_tasks_and_free(context); - }); - - dump::Dump::new(traces) - } - - fn next_remote_task(&self) -> Option { - self.shared.inject.pop() - } - - fn waker_ref(me: &Arc) -> WakerRef<'_> { - // Set woken to true when enter block_on, ensure outer future - // be polled for the first time when enter loop - me.shared.woken.store(true, Release); - waker_ref(me) - } - - // reset woken to false and return original value - pub(crate) fn reset_woken(&self) -> bool { - self.shared.woken.swap(false, AcqRel) - } -} - -cfg_metrics! { - impl Handle { - pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { - &self.shared.scheduler_metrics - } - - pub(crate) fn injection_queue_depth(&self) -> usize { - self.shared.inject.len() - } - - pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics { - assert_eq!(0, worker); - &self.shared.worker_metrics - } - - pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { - self.worker_metrics(worker).queue_depth() - } - - pub(crate) fn num_blocking_threads(&self) -> usize { - self.blocking_spawner.num_threads() - } - - pub(crate) fn num_idle_blocking_threads(&self) -> usize { - self.blocking_spawner.num_idle_threads() - } - - pub(crate) fn blocking_queue_depth(&self) -> usize { - self.blocking_spawner.queue_depth() - } - - pub(crate) fn active_tasks_count(&self) -> usize { - self.shared.owned.active_tasks_count() - } - } -} - -cfg_unstable! { - use std::num::NonZeroU64; - - impl Handle { - pub(crate) fn owned_id(&self) -> NonZeroU64 { - self.shared.owned.id - } - } -} - -impl fmt::Debug for Handle { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("current_thread::Handle { ... }").finish() - } -} - -// ===== impl Shared ===== - -impl Schedule for Arc { - fn release(&self, task: &Task) -> Option> { - self.shared.owned.remove(task) - } - - fn schedule(&self, task: task::Notified) { - use scheduler::Context::CurrentThread; - - context::with_scheduler(|maybe_cx| match maybe_cx { - Some(CurrentThread(cx)) if Arc::ptr_eq(self, &cx.handle) => { - let mut core = cx.core.borrow_mut(); - - // If `None`, the runtime is shutting down, so there is no need - // to schedule the task. - if let Some(core) = core.as_mut() { - core.push_task(self, task); - } - } - _ => { - // Track that a task was scheduled from **outside** of the runtime. - self.shared.scheduler_metrics.inc_remote_schedule_count(); - - // Schedule the task - self.shared.inject.push(task); - self.driver.unpark(); - } - }); - } - - cfg_unstable! { - fn unhandled_panic(&self) { - use crate::runtime::UnhandledPanic; - - match self.shared.config.unhandled_panic { - UnhandledPanic::Ignore => { - // Do nothing - } - UnhandledPanic::ShutdownRuntime => { - use scheduler::Context::CurrentThread; - - // This hook is only called from within the runtime, so - // `context::with_scheduler` should match with `&self`, i.e. - // there is no opportunity for a nested scheduler to be - // called. - context::with_scheduler(|maybe_cx| match maybe_cx { - Some(CurrentThread(cx)) if Arc::ptr_eq(self, &cx.handle) => { - let mut core = cx.core.borrow_mut(); - - // If `None`, the runtime is shutting down, so there is no need to signal shutdown - if let Some(core) = core.as_mut() { - core.unhandled_panic = true; - self.shared.owned.close_and_shutdown_all(); - } - } - _ => unreachable!("runtime core not set in CURRENT thread-local"), - }) - } - } - } - } -} - -impl Wake for Handle { - fn wake(arc_self: Arc) { - Wake::wake_by_ref(&arc_self) - } - - /// Wake by reference - fn wake_by_ref(arc_self: &Arc) { - arc_self.shared.woken.store(true, Release); - arc_self.driver.unpark(); - } -} - -// ===== CoreGuard ===== - -/// Used to ensure we always place the `Core` value back into its slot in -/// `CurrentThread`, even if the future panics. -struct CoreGuard<'a> { - context: scheduler::Context, - scheduler: &'a CurrentThread, -} - -impl CoreGuard<'_> { - #[track_caller] - fn block_on(self, future: F) -> F::Output { - let ret = self.enter(|mut core, context| { - let waker = Handle::waker_ref(&context.handle); - let mut cx = std::task::Context::from_waker(&waker); - - pin!(future); - - core.metrics.start_processing_scheduled_tasks(); - - 'outer: loop { - let handle = &context.handle; - - if handle.reset_woken() { - let (c, res) = context.enter(core, || { - crate::runtime::coop::budget(|| future.as_mut().poll(&mut cx)) - }); - - core = c; - - if let Ready(v) = res { - return (core, Some(v)); - } - } - - for _ in 0..handle.shared.config.event_interval { - // Make sure we didn't hit an unhandled_panic - if core.unhandled_panic { - return (core, None); - } - - core.tick(); - - let entry = core.next_task(handle); - - let task = match entry { - Some(entry) => entry, - None => { - core.metrics.end_processing_scheduled_tasks(); - - core = if !context.defer.is_empty() { - context.park_yield(core, handle) - } else { - context.park(core, handle) - }; - - core.metrics.start_processing_scheduled_tasks(); - - // Try polling the `block_on` future next - continue 'outer; - } - }; - - let task = context.handle.shared.owned.assert_owner(task); - - let (c, _) = context.run_task(core, || { - task.run(); - }); - - core = c; - } - - core.metrics.end_processing_scheduled_tasks(); - - // Yield to the driver, this drives the timer and pulls any - // pending I/O events. - core = context.park_yield(core, handle); - - core.metrics.start_processing_scheduled_tasks(); - } - }); - - match ret { - Some(ret) => ret, - None => { - // `block_on` panicked. - panic!("a spawned task panicked and the runtime is configured to shut down on unhandled panic"); - } - } - } - - /// Enters the scheduler context. This sets the queue and other necessary - /// scheduler state in the thread-local. - fn enter(self, f: F) -> R - where - F: FnOnce(Box, &Context) -> (Box, R), - { - let context = self.context.expect_current_thread(); - - // Remove `core` from `context` to pass into the closure. - let core = context.core.borrow_mut().take().expect("core missing"); - - // Call the closure and place `core` back - let (core, ret) = context::set_scheduler(&self.context, || f(core, context)); - - *context.core.borrow_mut() = Some(core); - - ret - } -} - -impl Drop for CoreGuard<'_> { - fn drop(&mut self) { - let context = self.context.expect_current_thread(); - - if let Some(core) = context.core.borrow_mut().take() { - // Replace old scheduler back into the state to allow - // other threads to pick it up and drive it. - self.scheduler.core.set(core); - - // Wake up other possible threads that could steal the driver. - self.scheduler.notify.notify_one() - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/defer.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/defer.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/defer.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/defer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,43 +0,0 @@ -use std::cell::RefCell; -use std::task::Waker; - -pub(crate) struct Defer { - deferred: RefCell>, -} - -impl Defer { - pub(crate) fn new() -> Defer { - Defer { - deferred: Default::default(), - } - } - - pub(crate) fn defer(&self, waker: &Waker) { - let mut deferred = self.deferred.borrow_mut(); - - // If the same task adds itself a bunch of times, then only add it once. - if let Some(last) = deferred.last() { - if last.will_wake(waker) { - return; - } - } - - deferred.push(waker.clone()); - } - - pub(crate) fn is_empty(&self) -> bool { - self.deferred.borrow().is_empty() - } - - pub(crate) fn wake(&self) { - while let Some(waker) = self.deferred.borrow_mut().pop() { - waker.wake(); - } - } - - #[cfg(tokio_taskdump)] - pub(crate) fn take_deferred(&self) -> Vec { - let mut deferred = self.deferred.borrow_mut(); - std::mem::take(&mut *deferred) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject/metrics.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject/metrics.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject/metrics.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject/metrics.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,7 +0,0 @@ -use super::Inject; - -impl Inject { - pub(crate) fn len(&self) -> usize { - self.shared.len() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject/pop.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject/pop.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject/pop.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject/pop.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -use super::Synced; - -use crate::runtime::task; - -use std::marker::PhantomData; - -pub(crate) struct Pop<'a, T: 'static> { - len: usize, - synced: &'a mut Synced, - _p: PhantomData, -} - -impl<'a, T: 'static> Pop<'a, T> { - pub(super) fn new(len: usize, synced: &'a mut Synced) -> Pop<'a, T> { - Pop { - len, - synced, - _p: PhantomData, - } - } -} - -impl<'a, T: 'static> Iterator for Pop<'a, T> { - type Item = task::Notified; - - fn next(&mut self) -> Option { - if self.len == 0 { - return None; - } - - let ret = self.synced.pop(); - - // Should be `Some` when `len > 0` - debug_assert!(ret.is_some()); - - self.len -= 1; - ret - } - - fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) - } -} - -impl<'a, T: 'static> ExactSizeIterator for Pop<'a, T> { - fn len(&self) -> usize { - self.len - } -} - -impl<'a, T: 'static> Drop for Pop<'a, T> { - fn drop(&mut self) { - for _ in self.by_ref() {} - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject/rt_multi_thread.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject/rt_multi_thread.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject/rt_multi_thread.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject/rt_multi_thread.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,113 +0,0 @@ -use super::{Shared, Synced}; - -use crate::runtime::scheduler::Lock; -use crate::runtime::task; - -use std::sync::atomic::Ordering::Release; - -impl<'a> Lock for &'a mut Synced { - type Handle = &'a mut Synced; - - fn lock(self) -> Self::Handle { - self - } -} - -impl AsMut for Synced { - fn as_mut(&mut self) -> &mut Synced { - self - } -} - -impl Shared { - /// Pushes several values into the queue. - /// - /// # Safety - /// - /// Must be called with the same `Synced` instance returned by `Inject::new` - #[inline] - pub(crate) unsafe fn push_batch(&self, shared: L, mut iter: I) - where - L: Lock, - I: Iterator>, - { - let first = match iter.next() { - Some(first) => first.into_raw(), - None => return, - }; - - // Link up all the tasks. - let mut prev = first; - let mut counter = 1; - - // We are going to be called with an `std::iter::Chain`, and that - // iterator overrides `for_each` to something that is easier for the - // compiler to optimize than a loop. - iter.for_each(|next| { - let next = next.into_raw(); - - // safety: Holding the Notified for a task guarantees exclusive - // access to the `queue_next` field. - unsafe { prev.set_queue_next(Some(next)) }; - prev = next; - counter += 1; - }); - - // Now that the tasks are linked together, insert them into the - // linked list. - self.push_batch_inner(shared, first, prev, counter); - } - - /// Inserts several tasks that have been linked together into the queue. - /// - /// The provided head and tail may be be the same task. In this case, a - /// single task is inserted. - #[inline] - unsafe fn push_batch_inner( - &self, - shared: L, - batch_head: task::RawTask, - batch_tail: task::RawTask, - num: usize, - ) where - L: Lock, - { - debug_assert!(unsafe { batch_tail.get_queue_next().is_none() }); - - let mut synced = shared.lock(); - - if synced.as_mut().is_closed { - drop(synced); - - let mut curr = Some(batch_head); - - while let Some(task) = curr { - curr = task.get_queue_next(); - - let _ = unsafe { task::Notified::::from_raw(task) }; - } - - return; - } - - let synced = synced.as_mut(); - - if let Some(tail) = synced.tail { - unsafe { - tail.set_queue_next(Some(batch_head)); - } - } else { - synced.head = Some(batch_head); - } - - synced.tail = Some(batch_tail); - - // Increment the count. - // - // safety: All updates to the len atomic are guarded by the mutex. As - // such, a non-atomic load followed by a store is safe. - let len = self.len.unsync_load(); - - self.len.store(len + num, Release); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject/shared.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject/shared.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject/shared.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject/shared.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,124 +0,0 @@ -use super::{Pop, Synced}; - -use crate::loom::sync::atomic::AtomicUsize; -use crate::runtime::task; - -use std::marker::PhantomData; -use std::sync::atomic::Ordering::{Acquire, Release}; - -pub(crate) struct Shared { - /// Number of pending tasks in the queue. This helps prevent unnecessary - /// locking in the hot path. - pub(super) len: AtomicUsize, - - _p: PhantomData, -} - -unsafe impl Send for Shared {} -unsafe impl Sync for Shared {} - -impl Shared { - pub(crate) fn new() -> (Shared, Synced) { - let inject = Shared { - len: AtomicUsize::new(0), - _p: PhantomData, - }; - - let synced = Synced { - is_closed: false, - head: None, - tail: None, - }; - - (inject, synced) - } - - pub(crate) fn is_empty(&self) -> bool { - self.len() == 0 - } - - // Kind of annoying to have to include the cfg here - #[cfg(any( - tokio_taskdump, - all(feature = "rt-multi-thread", not(target_os = "wasi")) - ))] - pub(crate) fn is_closed(&self, synced: &Synced) -> bool { - synced.is_closed - } - - /// Closes the injection queue, returns `true` if the queue is open when the - /// transition is made. - pub(crate) fn close(&self, synced: &mut Synced) -> bool { - if synced.is_closed { - return false; - } - - synced.is_closed = true; - true - } - - pub(crate) fn len(&self) -> usize { - self.len.load(Acquire) - } - - /// Pushes a value into the queue. - /// - /// This does nothing if the queue is closed. - /// - /// # Safety - /// - /// Must be called with the same `Synced` instance returned by `Inject::new` - pub(crate) unsafe fn push(&self, synced: &mut Synced, task: task::Notified) { - if synced.is_closed { - return; - } - - // safety: only mutated with the lock held - let len = self.len.unsync_load(); - let task = task.into_raw(); - - // The next pointer should already be null - debug_assert!(unsafe { task.get_queue_next().is_none() }); - - if let Some(tail) = synced.tail { - // safety: Holding the Notified for a task guarantees exclusive - // access to the `queue_next` field. - unsafe { tail.set_queue_next(Some(task)) }; - } else { - synced.head = Some(task); - } - - synced.tail = Some(task); - self.len.store(len + 1, Release); - } - - /// Pop a value from the queue. - /// - /// # Safety - /// - /// Must be called with the same `Synced` instance returned by `Inject::new` - pub(crate) unsafe fn pop(&self, synced: &mut Synced) -> Option> { - self.pop_n(synced, 1).next() - } - - /// Pop `n` values from the queue - /// - /// # Safety - /// - /// Must be called with the same `Synced` instance returned by `Inject::new` - pub(crate) unsafe fn pop_n<'a>(&'a self, synced: &'a mut Synced, n: usize) -> Pop<'a, T> { - use std::cmp; - - debug_assert!(n > 0); - - // safety: All updates to the len atomic are guarded by the mutex. As - // such, a non-atomic load followed by a store is safe. - let len = self.len.unsync_load(); - let n = cmp::min(n, len); - - // Decrement the count. - self.len.store(len - n, Release); - - Pop::new(n, synced) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject/synced.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject/synced.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject/synced.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject/synced.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -#![cfg_attr( - any(not(all(tokio_unstable, feature = "full")), target_family = "wasm"), - allow(dead_code) -)] - -use crate::runtime::task; - -pub(crate) struct Synced { - /// True if the queue is closed. - pub(super) is_closed: bool, - - /// Linked-list head. - pub(super) head: Option, - - /// Linked-list tail. - pub(super) tail: Option, -} - -unsafe impl Send for Synced {} -unsafe impl Sync for Synced {} - -impl Synced { - pub(super) fn pop(&mut self) -> Option> { - let task = self.head?; - - self.head = unsafe { task.get_queue_next() }; - - if self.head.is_none() { - self.tail = None; - } - - unsafe { task.set_queue_next(None) }; - - // safety: a `Notified` is pushed into the queue and now it is popped! - Some(unsafe { task::Notified::from_raw(task) }) - } - - pub(crate) fn is_empty(&self) -> bool { - self.head.is_none() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/inject.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/inject.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,72 +0,0 @@ -//! Inject queue used to send wakeups to a work-stealing scheduler - -use crate::loom::sync::Mutex; -use crate::runtime::task; - -mod pop; -pub(crate) use pop::Pop; - -mod shared; -pub(crate) use shared::Shared; - -mod synced; -pub(crate) use synced::Synced; - -cfg_rt_multi_thread! { - mod rt_multi_thread; -} - -cfg_metrics! { - mod metrics; -} - -/// Growable, MPMC queue used to inject new tasks into the scheduler and as an -/// overflow queue when the local, fixed-size, array queue overflows. -pub(crate) struct Inject { - shared: Shared, - synced: Mutex, -} - -impl Inject { - pub(crate) fn new() -> Inject { - let (shared, synced) = Shared::new(); - - Inject { - shared, - synced: Mutex::new(synced), - } - } - - // Kind of annoying to have to include the cfg here - #[cfg(tokio_taskdump)] - pub(crate) fn is_closed(&self) -> bool { - let synced = self.synced.lock(); - self.shared.is_closed(&synced) - } - - /// Closes the injection queue, returns `true` if the queue is open when the - /// transition is made. - pub(crate) fn close(&self) -> bool { - let mut synced = self.synced.lock(); - self.shared.close(&mut synced) - } - - /// Pushes a value into the queue. - /// - /// This does nothing if the queue is closed. - pub(crate) fn push(&self, task: task::Notified) { - let mut synced = self.synced.lock(); - // safety: passing correct `Synced` - unsafe { self.shared.push(&mut synced, task) } - } - - pub(crate) fn pop(&self) -> Option> { - if self.shared.is_empty() { - return None; - } - - let mut synced = self.synced.lock(); - // safety: passing correct `Synced` - unsafe { self.shared.pop(&mut synced) } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/lock.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/lock.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/lock.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/lock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,6 +0,0 @@ -/// A lock (mutex) yielding generic data. -pub(crate) trait Lock { - type Handle: AsMut; - - fn lock(self) -> Self::Handle; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,263 +0,0 @@ -cfg_rt! { - pub(crate) mod current_thread; - pub(crate) use current_thread::CurrentThread; - - mod defer; - use defer::Defer; - - pub(crate) mod inject; - pub(crate) use inject::Inject; -} - -cfg_rt_multi_thread! { - mod block_in_place; - pub(crate) use block_in_place::block_in_place; - - mod lock; - use lock::Lock; - - pub(crate) mod multi_thread; - pub(crate) use multi_thread::MultiThread; - - cfg_unstable! { - pub(crate) mod multi_thread_alt; - pub(crate) use multi_thread_alt::MultiThread as MultiThreadAlt; - } -} - -use crate::runtime::driver; - -#[derive(Debug, Clone)] -pub(crate) enum Handle { - #[cfg(feature = "rt")] - CurrentThread(Arc), - - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - MultiThread(Arc), - - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - MultiThreadAlt(Arc), - - // TODO: This is to avoid triggering "dead code" warnings many other places - // in the codebase. Remove this during a later cleanup - #[cfg(not(feature = "rt"))] - #[allow(dead_code)] - Disabled, -} - -#[cfg(feature = "rt")] -pub(super) enum Context { - CurrentThread(current_thread::Context), - - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - MultiThread(multi_thread::Context), - - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - MultiThreadAlt(multi_thread_alt::Context), -} - -impl Handle { - #[cfg_attr(not(feature = "full"), allow(dead_code))] - pub(crate) fn driver(&self) -> &driver::Handle { - match *self { - #[cfg(feature = "rt")] - Handle::CurrentThread(ref h) => &h.driver, - - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - Handle::MultiThread(ref h) => &h.driver, - - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - Handle::MultiThreadAlt(ref h) => &h.driver, - - #[cfg(not(feature = "rt"))] - Handle::Disabled => unreachable!(), - } - } -} - -cfg_rt! { - use crate::future::Future; - use crate::loom::sync::Arc; - use crate::runtime::{blocking, task::Id}; - use crate::runtime::context; - use crate::task::JoinHandle; - use crate::util::RngSeedGenerator; - use std::task::Waker; - - macro_rules! match_flavor { - ($self:expr, $ty:ident($h:ident) => $e:expr) => { - match $self { - $ty::CurrentThread($h) => $e, - - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - $ty::MultiThread($h) => $e, - - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - $ty::MultiThreadAlt($h) => $e, - } - } - } - - impl Handle { - #[track_caller] - pub(crate) fn current() -> Handle { - match context::with_current(Clone::clone) { - Ok(handle) => handle, - Err(e) => panic!("{}", e), - } - } - - pub(crate) fn blocking_spawner(&self) -> &blocking::Spawner { - match_flavor!(self, Handle(h) => &h.blocking_spawner) - } - - pub(crate) fn spawn(&self, future: F, id: Id) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - match self { - Handle::CurrentThread(h) => current_thread::Handle::spawn(h, future, id), - - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - Handle::MultiThread(h) => multi_thread::Handle::spawn(h, future, id), - - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - Handle::MultiThreadAlt(h) => multi_thread_alt::Handle::spawn(h, future, id), - } - } - - pub(crate) fn shutdown(&self) { - match *self { - Handle::CurrentThread(_) => {}, - - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - Handle::MultiThread(ref h) => h.shutdown(), - - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - Handle::MultiThreadAlt(ref h) => h.shutdown(), - } - } - - pub(crate) fn seed_generator(&self) -> &RngSeedGenerator { - match_flavor!(self, Handle(h) => &h.seed_generator) - } - - pub(crate) fn as_current_thread(&self) -> &Arc { - match self { - Handle::CurrentThread(handle) => handle, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - _ => panic!("not a CurrentThread handle"), - } - } - - cfg_rt_multi_thread! { - cfg_unstable! { - pub(crate) fn expect_multi_thread_alt(&self) -> &Arc { - match self { - Handle::MultiThreadAlt(handle) => handle, - _ => panic!("not a `MultiThreadAlt` handle"), - } - } - } - } - } - - cfg_metrics! { - use crate::runtime::{SchedulerMetrics, WorkerMetrics}; - - impl Handle { - pub(crate) fn num_workers(&self) -> usize { - match self { - Handle::CurrentThread(_) => 1, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - Handle::MultiThread(handle) => handle.num_workers(), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] - Handle::MultiThreadAlt(handle) => handle.num_workers(), - } - } - - pub(crate) fn num_blocking_threads(&self) -> usize { - match_flavor!(self, Handle(handle) => handle.num_blocking_threads()) - } - - pub(crate) fn num_idle_blocking_threads(&self) -> usize { - match_flavor!(self, Handle(handle) => handle.num_idle_blocking_threads()) - } - - pub(crate) fn active_tasks_count(&self) -> usize { - match_flavor!(self, Handle(handle) => handle.active_tasks_count()) - } - - pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { - match_flavor!(self, Handle(handle) => handle.scheduler_metrics()) - } - - pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics { - match_flavor!(self, Handle(handle) => handle.worker_metrics(worker)) - } - - pub(crate) fn injection_queue_depth(&self) -> usize { - match_flavor!(self, Handle(handle) => handle.injection_queue_depth()) - } - - pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { - match_flavor!(self, Handle(handle) => handle.worker_local_queue_depth(worker)) - } - - pub(crate) fn blocking_queue_depth(&self) -> usize { - match_flavor!(self, Handle(handle) => handle.blocking_queue_depth()) - } - } - } - - impl Context { - #[track_caller] - pub(crate) fn expect_current_thread(&self) -> ¤t_thread::Context { - match self { - Context::CurrentThread(context) => context, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - _ => panic!("expected `CurrentThread::Context`") - } - } - - pub(crate) fn defer(&self, waker: &Waker) { - match_flavor!(self, Context(context) => context.defer(waker)) - } - - cfg_rt_multi_thread! { - #[track_caller] - pub(crate) fn expect_multi_thread(&self) -> &multi_thread::Context { - match self { - Context::MultiThread(context) => context, - _ => panic!("expected `MultiThread::Context`") - } - } - - cfg_unstable! { - #[track_caller] - pub(crate) fn expect_multi_thread_alt(&self) -> &multi_thread_alt::Context { - match self { - Context::MultiThreadAlt(context) => context, - _ => panic!("expected `MultiThreadAlt::Context`") - } - } - } - } - } -} - -cfg_not_rt! { - #[cfg(any( - feature = "net", - all(unix, feature = "process"), - all(unix, feature = "signal"), - feature = "time", - ))] - impl Handle { - #[track_caller] - pub(crate) fn current() -> Handle { - panic!("{}", crate::util::error::CONTEXT_MISSING_ERROR) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/counters.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/counters.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/counters.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/counters.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,62 +0,0 @@ -#[cfg(tokio_internal_mt_counters)] -mod imp { - use std::sync::atomic::AtomicUsize; - use std::sync::atomic::Ordering::Relaxed; - - static NUM_MAINTENANCE: AtomicUsize = AtomicUsize::new(0); - static NUM_NOTIFY_LOCAL: AtomicUsize = AtomicUsize::new(0); - static NUM_UNPARKS_LOCAL: AtomicUsize = AtomicUsize::new(0); - static NUM_LIFO_SCHEDULES: AtomicUsize = AtomicUsize::new(0); - static NUM_LIFO_CAPPED: AtomicUsize = AtomicUsize::new(0); - - impl Drop for super::Counters { - fn drop(&mut self) { - let notifies_local = NUM_NOTIFY_LOCAL.load(Relaxed); - let unparks_local = NUM_UNPARKS_LOCAL.load(Relaxed); - let maintenance = NUM_MAINTENANCE.load(Relaxed); - let lifo_scheds = NUM_LIFO_SCHEDULES.load(Relaxed); - let lifo_capped = NUM_LIFO_CAPPED.load(Relaxed); - - println!("---"); - println!("notifies (local): {}", notifies_local); - println!(" unparks (local): {}", unparks_local); - println!(" maintenance: {}", maintenance); - println!(" LIFO schedules: {}", lifo_scheds); - println!(" LIFO capped: {}", lifo_capped); - } - } - - pub(crate) fn inc_num_inc_notify_local() { - NUM_NOTIFY_LOCAL.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_unparks_local() { - NUM_UNPARKS_LOCAL.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_maintenance() { - NUM_MAINTENANCE.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_lifo_schedules() { - NUM_LIFO_SCHEDULES.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_lifo_capped() { - NUM_LIFO_CAPPED.fetch_add(1, Relaxed); - } -} - -#[cfg(not(tokio_internal_mt_counters))] -mod imp { - pub(crate) fn inc_num_inc_notify_local() {} - pub(crate) fn inc_num_unparks_local() {} - pub(crate) fn inc_num_maintenance() {} - pub(crate) fn inc_lifo_schedules() {} - pub(crate) fn inc_lifo_capped() {} -} - -#[derive(Debug)] -pub(crate) struct Counters; - -pub(super) use imp::*; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -use super::Handle; - -use crate::runtime::{SchedulerMetrics, WorkerMetrics}; - -impl Handle { - pub(crate) fn num_workers(&self) -> usize { - self.shared.worker_metrics.len() - } - - pub(crate) fn num_blocking_threads(&self) -> usize { - self.blocking_spawner.num_threads() - } - - pub(crate) fn num_idle_blocking_threads(&self) -> usize { - self.blocking_spawner.num_idle_threads() - } - - pub(crate) fn active_tasks_count(&self) -> usize { - self.shared.owned.active_tasks_count() - } - - pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { - &self.shared.scheduler_metrics - } - - pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics { - &self.shared.worker_metrics[worker] - } - - pub(crate) fn injection_queue_depth(&self) -> usize { - self.shared.injection_queue_depth() - } - - pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { - self.shared.worker_local_queue_depth(worker) - } - - pub(crate) fn blocking_queue_depth(&self) -> usize { - self.blocking_spawner.queue_depth() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle/taskdump.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle/taskdump.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle/taskdump.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle/taskdump.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -use super::Handle; - -use crate::runtime::Dump; - -impl Handle { - pub(crate) async fn dump(&self) -> Dump { - let trace_status = &self.shared.trace_status; - - // If a dump is in progress, block. - trace_status.start_trace_request(&self).await; - - let result = loop { - if let Some(result) = trace_status.take_result() { - break result; - } else { - self.notify_all(); - trace_status.result_ready.notified().await; - } - }; - - // Allow other queued dumps to proceed. - trace_status.end_trace_request(&self).await; - - result - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/handle.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -use crate::future::Future; -use crate::loom::sync::Arc; -use crate::runtime::scheduler::multi_thread::worker; -use crate::runtime::{ - blocking, driver, - task::{self, JoinHandle}, -}; -use crate::util::RngSeedGenerator; - -use std::fmt; - -cfg_metrics! { - mod metrics; -} - -cfg_taskdump! { - mod taskdump; -} - -/// Handle to the multi thread scheduler -pub(crate) struct Handle { - /// Task spawner - pub(super) shared: worker::Shared, - - /// Resource driver handles - pub(crate) driver: driver::Handle, - - /// Blocking pool spawner - pub(crate) blocking_spawner: blocking::Spawner, - - /// Current random number generator seed - pub(crate) seed_generator: RngSeedGenerator, -} - -impl Handle { - /// Spawns a future onto the thread pool - pub(crate) fn spawn(me: &Arc, future: F, id: task::Id) -> JoinHandle - where - F: crate::future::Future + Send + 'static, - F::Output: Send + 'static, - { - Self::bind_new_task(me, future, id) - } - - pub(crate) fn shutdown(&self) { - self.close(); - } - - pub(super) fn bind_new_task(me: &Arc, future: T, id: task::Id) -> JoinHandle - where - T: Future + Send + 'static, - T::Output: Send + 'static, - { - let (handle, notified) = me.shared.owned.bind(future, me.clone(), id); - - me.schedule_option_task_without_yield(notified); - - handle - } -} - -cfg_unstable! { - use std::num::NonZeroU64; - - impl Handle { - pub(crate) fn owned_id(&self) -> NonZeroU64 { - self.shared.owned.id - } - } -} - -impl fmt::Debug for Handle { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("multi_thread::Handle { ... }").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/idle.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/idle.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/idle.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/idle.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,240 +0,0 @@ -//! Coordinates idling workers - -use crate::loom::sync::atomic::AtomicUsize; -use crate::runtime::scheduler::multi_thread::Shared; - -use std::fmt; -use std::sync::atomic::Ordering::{self, SeqCst}; - -pub(super) struct Idle { - /// Tracks both the number of searching workers and the number of unparked - /// workers. - /// - /// Used as a fast-path to avoid acquiring the lock when needed. - state: AtomicUsize, - - /// Total number of workers. - num_workers: usize, -} - -/// Data synchronized by the scheduler mutex -pub(super) struct Synced { - /// Sleeping workers - sleepers: Vec, -} - -const UNPARK_SHIFT: usize = 16; -const UNPARK_MASK: usize = !SEARCH_MASK; -const SEARCH_MASK: usize = (1 << UNPARK_SHIFT) - 1; - -#[derive(Copy, Clone)] -struct State(usize); - -impl Idle { - pub(super) fn new(num_workers: usize) -> (Idle, Synced) { - let init = State::new(num_workers); - - let idle = Idle { - state: AtomicUsize::new(init.into()), - num_workers, - }; - - let synced = Synced { - sleepers: Vec::with_capacity(num_workers), - }; - - (idle, synced) - } - - /// If there are no workers actively searching, returns the index of a - /// worker currently sleeping. - pub(super) fn worker_to_notify(&self, shared: &Shared) -> Option { - // If at least one worker is spinning, work being notified will - // eventually be found. A searching thread will find **some** work and - // notify another worker, eventually leading to our work being found. - // - // For this to happen, this load must happen before the thread - // transitioning `num_searching` to zero. Acquire / Release does not - // provide sufficient guarantees, so this load is done with `SeqCst` and - // will pair with the `fetch_sub(1)` when transitioning out of - // searching. - if !self.notify_should_wakeup() { - return None; - } - - // Acquire the lock - let mut lock = shared.synced.lock(); - - // Check again, now that the lock is acquired - if !self.notify_should_wakeup() { - return None; - } - - // A worker should be woken up, atomically increment the number of - // searching workers as well as the number of unparked workers. - State::unpark_one(&self.state, 1); - - // Get the worker to unpark - let ret = lock.idle.sleepers.pop(); - debug_assert!(ret.is_some()); - - ret - } - - /// Returns `true` if the worker needs to do a final check for submitted - /// work. - pub(super) fn transition_worker_to_parked( - &self, - shared: &Shared, - worker: usize, - is_searching: bool, - ) -> bool { - // Acquire the lock - let mut lock = shared.synced.lock(); - - // Decrement the number of unparked threads - let ret = State::dec_num_unparked(&self.state, is_searching); - - // Track the sleeping worker - lock.idle.sleepers.push(worker); - - ret - } - - pub(super) fn transition_worker_to_searching(&self) -> bool { - let state = State::load(&self.state, SeqCst); - if 2 * state.num_searching() >= self.num_workers { - return false; - } - - // It is possible for this routine to allow more than 50% of the workers - // to search. That is OK. Limiting searchers is only an optimization to - // prevent too much contention. - State::inc_num_searching(&self.state, SeqCst); - true - } - - /// A lightweight transition from searching -> running. - /// - /// Returns `true` if this is the final searching worker. The caller - /// **must** notify a new worker. - pub(super) fn transition_worker_from_searching(&self) -> bool { - State::dec_num_searching(&self.state) - } - - /// Unpark a specific worker. This happens if tasks are submitted from - /// within the worker's park routine. - /// - /// Returns `true` if the worker was parked before calling the method. - pub(super) fn unpark_worker_by_id(&self, shared: &Shared, worker_id: usize) -> bool { - let mut lock = shared.synced.lock(); - let sleepers = &mut lock.idle.sleepers; - - for index in 0..sleepers.len() { - if sleepers[index] == worker_id { - sleepers.swap_remove(index); - - // Update the state accordingly while the lock is held. - State::unpark_one(&self.state, 0); - - return true; - } - } - - false - } - - /// Returns `true` if `worker_id` is contained in the sleep set. - pub(super) fn is_parked(&self, shared: &Shared, worker_id: usize) -> bool { - let lock = shared.synced.lock(); - lock.idle.sleepers.contains(&worker_id) - } - - fn notify_should_wakeup(&self) -> bool { - let state = State(self.state.fetch_add(0, SeqCst)); - state.num_searching() == 0 && state.num_unparked() < self.num_workers - } -} - -impl State { - fn new(num_workers: usize) -> State { - // All workers start in the unparked state - let ret = State(num_workers << UNPARK_SHIFT); - debug_assert_eq!(num_workers, ret.num_unparked()); - debug_assert_eq!(0, ret.num_searching()); - ret - } - - fn load(cell: &AtomicUsize, ordering: Ordering) -> State { - State(cell.load(ordering)) - } - - fn unpark_one(cell: &AtomicUsize, num_searching: usize) { - cell.fetch_add(num_searching | (1 << UNPARK_SHIFT), SeqCst); - } - - fn inc_num_searching(cell: &AtomicUsize, ordering: Ordering) { - cell.fetch_add(1, ordering); - } - - /// Returns `true` if this is the final searching worker - fn dec_num_searching(cell: &AtomicUsize) -> bool { - let state = State(cell.fetch_sub(1, SeqCst)); - state.num_searching() == 1 - } - - /// Track a sleeping worker - /// - /// Returns `true` if this is the final searching worker. - fn dec_num_unparked(cell: &AtomicUsize, is_searching: bool) -> bool { - let mut dec = 1 << UNPARK_SHIFT; - - if is_searching { - dec += 1; - } - - let prev = State(cell.fetch_sub(dec, SeqCst)); - is_searching && prev.num_searching() == 1 - } - - /// Number of workers currently searching - fn num_searching(self) -> usize { - self.0 & SEARCH_MASK - } - - /// Number of workers currently unparked - fn num_unparked(self) -> usize { - (self.0 & UNPARK_MASK) >> UNPARK_SHIFT - } -} - -impl From for State { - fn from(src: usize) -> State { - State(src) - } -} - -impl From for usize { - fn from(src: State) -> usize { - src.0 - } -} - -impl fmt::Debug for State { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("worker::State") - .field("num_unparked", &self.num_unparked()) - .field("num_searching", &self.num_searching()) - .finish() - } -} - -#[test] -fn test_state() { - assert_eq!(0, UNPARK_MASK & SEARCH_MASK); - assert_eq!(0, !(UNPARK_MASK | SEARCH_MASK)); - - let state = State::new(10); - assert_eq!(10, state.num_unparked()); - assert_eq!(0, state.num_searching()); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,103 +0,0 @@ -//! Multi-threaded runtime - -mod counters; -use counters::Counters; - -mod handle; -pub(crate) use handle::Handle; - -mod overflow; -pub(crate) use overflow::Overflow; - -mod idle; -use self::idle::Idle; - -mod stats; -pub(crate) use stats::Stats; - -mod park; -pub(crate) use park::{Parker, Unparker}; - -pub(crate) mod queue; - -mod worker; -pub(crate) use worker::{Context, Launch, Shared}; - -cfg_taskdump! { - mod trace; - use trace::TraceStatus; - - pub(crate) use worker::Synced; -} - -cfg_not_taskdump! { - mod trace_mock; - use trace_mock::TraceStatus; -} - -pub(crate) use worker::block_in_place; - -use crate::loom::sync::Arc; -use crate::runtime::{ - blocking, - driver::{self, Driver}, - scheduler, Config, -}; -use crate::util::RngSeedGenerator; - -use std::fmt; -use std::future::Future; - -/// Work-stealing based thread pool for executing futures. -pub(crate) struct MultiThread; - -// ===== impl MultiThread ===== - -impl MultiThread { - pub(crate) fn new( - size: usize, - driver: Driver, - driver_handle: driver::Handle, - blocking_spawner: blocking::Spawner, - seed_generator: RngSeedGenerator, - config: Config, - ) -> (MultiThread, Arc, Launch) { - let parker = Parker::new(driver); - let (handle, launch) = worker::create( - size, - parker, - driver_handle, - blocking_spawner, - seed_generator, - config, - ); - - (MultiThread, handle, launch) - } - - /// Blocks the current thread waiting for the future to complete. - /// - /// The future will execute on the current thread, but all spawned tasks - /// will be executed on the thread pool. - pub(crate) fn block_on(&self, handle: &scheduler::Handle, future: F) -> F::Output - where - F: Future, - { - crate::runtime::context::enter_runtime(handle, true, |blocking| { - blocking.block_on(future).expect("failed to park thread") - }) - } - - pub(crate) fn shutdown(&mut self, handle: &scheduler::Handle) { - match handle { - scheduler::Handle::MultiThread(handle) => handle.shutdown(), - _ => panic!("expected MultiThread scheduler"), - } - } -} - -impl fmt::Debug for MultiThread { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("MultiThread").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/overflow.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/overflow.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/overflow.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/overflow.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -use crate::runtime::task; - -#[cfg(test)] -use std::cell::RefCell; - -pub(crate) trait Overflow { - fn push(&self, task: task::Notified); - - fn push_batch(&self, iter: I) - where - I: Iterator>; -} - -#[cfg(test)] -impl Overflow for RefCell>> { - fn push(&self, task: task::Notified) { - self.borrow_mut().push(task); - } - - fn push_batch(&self, iter: I) - where - I: Iterator>, - { - self.borrow_mut().extend(iter); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/park.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/park.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/park.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/park.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,232 +0,0 @@ -//! Parks the runtime. -//! -//! A combination of the various resource driver park handles. - -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::{Arc, Condvar, Mutex}; -use crate::runtime::driver::{self, Driver}; -use crate::util::TryLock; - -use std::sync::atomic::Ordering::SeqCst; -use std::time::Duration; - -pub(crate) struct Parker { - inner: Arc, -} - -pub(crate) struct Unparker { - inner: Arc, -} - -struct Inner { - /// Avoids entering the park if possible - state: AtomicUsize, - - /// Used to coordinate access to the driver / condvar - mutex: Mutex<()>, - - /// Condvar to block on if the driver is unavailable. - condvar: Condvar, - - /// Resource (I/O, time, ...) driver - shared: Arc, -} - -const EMPTY: usize = 0; -const PARKED_CONDVAR: usize = 1; -const PARKED_DRIVER: usize = 2; -const NOTIFIED: usize = 3; - -/// Shared across multiple Parker handles -struct Shared { - /// Shared driver. Only one thread at a time can use this - driver: TryLock, -} - -impl Parker { - pub(crate) fn new(driver: Driver) -> Parker { - Parker { - inner: Arc::new(Inner { - state: AtomicUsize::new(EMPTY), - mutex: Mutex::new(()), - condvar: Condvar::new(), - shared: Arc::new(Shared { - driver: TryLock::new(driver), - }), - }), - } - } - - pub(crate) fn unpark(&self) -> Unparker { - Unparker { - inner: self.inner.clone(), - } - } - - pub(crate) fn park(&mut self, handle: &driver::Handle) { - self.inner.park(handle); - } - - pub(crate) fn park_timeout(&mut self, handle: &driver::Handle, duration: Duration) { - // Only parking with zero is supported... - assert_eq!(duration, Duration::from_millis(0)); - - if let Some(mut driver) = self.inner.shared.driver.try_lock() { - driver.park_timeout(handle, duration) - } - } - - pub(crate) fn shutdown(&mut self, handle: &driver::Handle) { - self.inner.shutdown(handle); - } -} - -impl Clone for Parker { - fn clone(&self) -> Parker { - Parker { - inner: Arc::new(Inner { - state: AtomicUsize::new(EMPTY), - mutex: Mutex::new(()), - condvar: Condvar::new(), - shared: self.inner.shared.clone(), - }), - } - } -} - -impl Unparker { - pub(crate) fn unpark(&self, driver: &driver::Handle) { - self.inner.unpark(driver); - } -} - -impl Inner { - /// Parks the current thread for at most `dur`. - fn park(&self, handle: &driver::Handle) { - // If we were previously notified then we consume this notification and - // return quickly. - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - return; - } - - if let Some(mut driver) = self.shared.driver.try_lock() { - self.park_driver(&mut driver, handle); - } else { - self.park_condvar(); - } - } - - fn park_condvar(&self) { - // Otherwise we need to coordinate going to sleep - let mut m = self.mutex.lock(); - - match self - .state - .compare_exchange(EMPTY, PARKED_CONDVAR, SeqCst, SeqCst) - { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read here, even though we know it will be `NOTIFIED`. - // This is because `unpark` may have been called again since we read - // `NOTIFIED` in the `compare_exchange` above. We must perform an - // acquire operation that synchronizes with that `unpark` to observe - // any writes it made before the call to unpark. To do that we must - // read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); - debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - - return; - } - Err(actual) => panic!("inconsistent park state; actual = {}", actual), - } - - loop { - m = self.condvar.wait(m).unwrap(); - - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - // got a notification - return; - } - - // spurious wakeup, go back to sleep - } - } - - fn park_driver(&self, driver: &mut Driver, handle: &driver::Handle) { - match self - .state - .compare_exchange(EMPTY, PARKED_DRIVER, SeqCst, SeqCst) - { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read here, even though we know it will be `NOTIFIED`. - // This is because `unpark` may have been called again since we read - // `NOTIFIED` in the `compare_exchange` above. We must perform an - // acquire operation that synchronizes with that `unpark` to observe - // any writes it made before the call to unpark. To do that we must - // read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); - debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - - return; - } - Err(actual) => panic!("inconsistent park state; actual = {}", actual), - } - - driver.park(handle); - - match self.state.swap(EMPTY, SeqCst) { - NOTIFIED => {} // got a notification, hurray! - PARKED_DRIVER => {} // no notification, alas - n => panic!("inconsistent park_timeout state: {}", n), - } - } - - fn unpark(&self, driver: &driver::Handle) { - // To ensure the unparked thread will observe any writes we made before - // this call, we must perform a release operation that `park` can - // synchronize with. To do that we must write `NOTIFIED` even if `state` - // is already `NOTIFIED`. That is why this must be a swap rather than a - // compare-and-swap that returns if it reads `NOTIFIED` on failure. - match self.state.swap(NOTIFIED, SeqCst) { - EMPTY => {} // no one was waiting - NOTIFIED => {} // already unparked - PARKED_CONDVAR => self.unpark_condvar(), - PARKED_DRIVER => driver.unpark(), - actual => panic!("inconsistent state in unpark; actual = {}", actual), - } - } - - fn unpark_condvar(&self) { - // There is a period between when the parked thread sets `state` to - // `PARKED` (or last checked `state` in the case of a spurious wake - // up) and when it actually waits on `cvar`. If we were to notify - // during this period it would be ignored and then when the parked - // thread went to sleep it would never wake up. Fortunately, it has - // `lock` locked at this stage so we can acquire `lock` to wait until - // it is ready to receive the notification. - // - // Releasing `lock` before the call to `notify_one` means that when the - // parked thread wakes it doesn't get woken only to have to wait for us - // to release `lock`. - drop(self.mutex.lock()); - - self.condvar.notify_one() - } - - fn shutdown(&self, handle: &driver::Handle) { - if let Some(mut driver) = self.shared.driver.try_lock() { - driver.shutdown(handle); - } - - self.condvar.notify_all(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/queue.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/queue.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/queue.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/queue.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,608 +0,0 @@ -//! Run-queue structures to support a work-stealing scheduler - -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::Arc; -use crate::runtime::scheduler::multi_thread::{Overflow, Stats}; -use crate::runtime::task; - -use std::mem::{self, MaybeUninit}; -use std::ptr; -use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; - -// Use wider integers when possible to increase ABA resilience. -// -// See issue #5041: . -cfg_has_atomic_u64! { - type UnsignedShort = u32; - type UnsignedLong = u64; - type AtomicUnsignedShort = crate::loom::sync::atomic::AtomicU32; - type AtomicUnsignedLong = crate::loom::sync::atomic::AtomicU64; -} -cfg_not_has_atomic_u64! { - type UnsignedShort = u16; - type UnsignedLong = u32; - type AtomicUnsignedShort = crate::loom::sync::atomic::AtomicU16; - type AtomicUnsignedLong = crate::loom::sync::atomic::AtomicU32; -} - -/// Producer handle. May only be used from a single thread. -pub(crate) struct Local { - inner: Arc>, -} - -/// Consumer handle. May be used from many threads. -pub(crate) struct Steal(Arc>); - -pub(crate) struct Inner { - /// Concurrently updated by many threads. - /// - /// Contains two `UnsignedShort` values. The LSB byte is the "real" head of - /// the queue. The `UnsignedShort` in the MSB is set by a stealer in process - /// of stealing values. It represents the first value being stolen in the - /// batch. The `UnsignedShort` indices are intentionally wider than strictly - /// required for buffer indexing in order to provide ABA mitigation and make - /// it possible to distinguish between full and empty buffers. - /// - /// When both `UnsignedShort` values are the same, there is no active - /// stealer. - /// - /// Tracking an in-progress stealer prevents a wrapping scenario. - head: AtomicUnsignedLong, - - /// Only updated by producer thread but read by many threads. - tail: AtomicUnsignedShort, - - /// Elements - buffer: Box<[UnsafeCell>>; LOCAL_QUEUE_CAPACITY]>, -} - -unsafe impl Send for Inner {} -unsafe impl Sync for Inner {} - -#[cfg(not(loom))] -const LOCAL_QUEUE_CAPACITY: usize = 256; - -// Shrink the size of the local queue when using loom. This shouldn't impact -// logic, but allows loom to test more edge cases in a reasonable a mount of -// time. -#[cfg(loom)] -const LOCAL_QUEUE_CAPACITY: usize = 4; - -const MASK: usize = LOCAL_QUEUE_CAPACITY - 1; - -// Constructing the fixed size array directly is very awkward. The only way to -// do it is to repeat `UnsafeCell::new(MaybeUninit::uninit())` 256 times, as -// the contents are not Copy. The trick with defining a const doesn't work for -// generic types. -fn make_fixed_size(buffer: Box<[T]>) -> Box<[T; LOCAL_QUEUE_CAPACITY]> { - assert_eq!(buffer.len(), LOCAL_QUEUE_CAPACITY); - - // safety: We check that the length is correct. - unsafe { Box::from_raw(Box::into_raw(buffer).cast()) } -} - -/// Create a new local run-queue -pub(crate) fn local() -> (Steal, Local) { - let mut buffer = Vec::with_capacity(LOCAL_QUEUE_CAPACITY); - - for _ in 0..LOCAL_QUEUE_CAPACITY { - buffer.push(UnsafeCell::new(MaybeUninit::uninit())); - } - - let inner = Arc::new(Inner { - head: AtomicUnsignedLong::new(0), - tail: AtomicUnsignedShort::new(0), - buffer: make_fixed_size(buffer.into_boxed_slice()), - }); - - let local = Local { - inner: inner.clone(), - }; - - let remote = Steal(inner); - - (remote, local) -} - -impl Local { - /// Returns the number of entries in the queue - pub(crate) fn len(&self) -> usize { - self.inner.len() as usize - } - - /// How many tasks can be pushed into the queue - pub(crate) fn remaining_slots(&self) -> usize { - self.inner.remaining_slots() - } - - pub(crate) fn max_capacity(&self) -> usize { - LOCAL_QUEUE_CAPACITY - } - - /// Returns false if there are any entries in the queue - /// - /// Separate to is_stealable so that refactors of is_stealable to "protect" - /// some tasks from stealing won't affect this - pub(crate) fn has_tasks(&self) -> bool { - !self.inner.is_empty() - } - - /// Pushes a batch of tasks to the back of the queue. All tasks must fit in - /// the local queue. - /// - /// # Panics - /// - /// The method panics if there is not enough capacity to fit in the queue. - pub(crate) fn push_back(&mut self, tasks: impl ExactSizeIterator>) { - let len = tasks.len(); - assert!(len <= LOCAL_QUEUE_CAPACITY); - - if len == 0 { - // Nothing to do - return; - } - - let head = self.inner.head.load(Acquire); - let (steal, _) = unpack(head); - - // safety: this is the **only** thread that updates this cell. - let mut tail = unsafe { self.inner.tail.unsync_load() }; - - if tail.wrapping_sub(steal) <= (LOCAL_QUEUE_CAPACITY - len) as UnsignedShort { - // Yes, this if condition is structured a bit weird (first block - // does nothing, second returns an error). It is this way to match - // `push_back_or_overflow`. - } else { - panic!() - } - - for task in tasks { - let idx = tail as usize & MASK; - - self.inner.buffer[idx].with_mut(|ptr| { - // Write the task to the slot - // - // Safety: There is only one producer and the above `if` - // condition ensures we don't touch a cell if there is a - // value, thus no consumer. - unsafe { - ptr::write((*ptr).as_mut_ptr(), task); - } - }); - - tail = tail.wrapping_add(1); - } - - self.inner.tail.store(tail, Release); - } - - /// Pushes a task to the back of the local queue, if there is not enough - /// capacity in the queue, this triggers the overflow operation. - /// - /// When the queue overflows, half of the curent contents of the queue is - /// moved to the given Injection queue. This frees up capacity for more - /// tasks to be pushed into the local queue. - pub(crate) fn push_back_or_overflow>( - &mut self, - mut task: task::Notified, - overflow: &O, - stats: &mut Stats, - ) { - let tail = loop { - let head = self.inner.head.load(Acquire); - let (steal, real) = unpack(head); - - // safety: this is the **only** thread that updates this cell. - let tail = unsafe { self.inner.tail.unsync_load() }; - - if tail.wrapping_sub(steal) < LOCAL_QUEUE_CAPACITY as UnsignedShort { - // There is capacity for the task - break tail; - } else if steal != real { - // Concurrently stealing, this will free up capacity, so only - // push the task onto the inject queue - overflow.push(task); - return; - } else { - // Push the current task and half of the queue into the - // inject queue. - match self.push_overflow(task, real, tail, overflow, stats) { - Ok(_) => return, - // Lost the race, try again - Err(v) => { - task = v; - } - } - } - }; - - self.push_back_finish(task, tail); - } - - // Second half of `push_back` - fn push_back_finish(&self, task: task::Notified, tail: UnsignedShort) { - // Map the position to a slot index. - let idx = tail as usize & MASK; - - self.inner.buffer[idx].with_mut(|ptr| { - // Write the task to the slot - // - // Safety: There is only one producer and the above `if` - // condition ensures we don't touch a cell if there is a - // value, thus no consumer. - unsafe { - ptr::write((*ptr).as_mut_ptr(), task); - } - }); - - // Make the task available. Synchronizes with a load in - // `steal_into2`. - self.inner.tail.store(tail.wrapping_add(1), Release); - } - - /// Moves a batch of tasks into the inject queue. - /// - /// This will temporarily make some of the tasks unavailable to stealers. - /// Once `push_overflow` is done, a notification is sent out, so if other - /// workers "missed" some of the tasks during a steal, they will get - /// another opportunity. - #[inline(never)] - fn push_overflow>( - &mut self, - task: task::Notified, - head: UnsignedShort, - tail: UnsignedShort, - overflow: &O, - stats: &mut Stats, - ) -> Result<(), task::Notified> { - /// How many elements are we taking from the local queue. - /// - /// This is one less than the number of tasks pushed to the inject - /// queue as we are also inserting the `task` argument. - const NUM_TASKS_TAKEN: UnsignedShort = (LOCAL_QUEUE_CAPACITY / 2) as UnsignedShort; - - assert_eq!( - tail.wrapping_sub(head) as usize, - LOCAL_QUEUE_CAPACITY, - "queue is not full; tail = {}; head = {}", - tail, - head - ); - - let prev = pack(head, head); - - // Claim a bunch of tasks - // - // We are claiming the tasks **before** reading them out of the buffer. - // This is safe because only the **current** thread is able to push new - // tasks. - // - // There isn't really any need for memory ordering... Relaxed would - // work. This is because all tasks are pushed into the queue from the - // current thread (or memory has been acquired if the local queue handle - // moved). - if self - .inner - .head - .compare_exchange( - prev, - pack( - head.wrapping_add(NUM_TASKS_TAKEN), - head.wrapping_add(NUM_TASKS_TAKEN), - ), - Release, - Relaxed, - ) - .is_err() - { - // We failed to claim the tasks, losing the race. Return out of - // this function and try the full `push` routine again. The queue - // may not be full anymore. - return Err(task); - } - - /// An iterator that takes elements out of the run queue. - struct BatchTaskIter<'a, T: 'static> { - buffer: &'a [UnsafeCell>>; LOCAL_QUEUE_CAPACITY], - head: UnsignedLong, - i: UnsignedLong, - } - impl<'a, T: 'static> Iterator for BatchTaskIter<'a, T> { - type Item = task::Notified; - - #[inline] - fn next(&mut self) -> Option> { - if self.i == UnsignedLong::from(NUM_TASKS_TAKEN) { - None - } else { - let i_idx = self.i.wrapping_add(self.head) as usize & MASK; - let slot = &self.buffer[i_idx]; - - // safety: Our CAS from before has assumed exclusive ownership - // of the task pointers in this range. - let task = slot.with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); - - self.i += 1; - Some(task) - } - } - } - - // safety: The CAS above ensures that no consumer will look at these - // values again, and we are the only producer. - let batch_iter = BatchTaskIter { - buffer: &self.inner.buffer, - head: head as UnsignedLong, - i: 0, - }; - overflow.push_batch(batch_iter.chain(std::iter::once(task))); - - // Add 1 to factor in the task currently being scheduled. - stats.incr_overflow_count(); - - Ok(()) - } - - /// Pops a task from the local queue. - pub(crate) fn pop(&mut self) -> Option> { - let mut head = self.inner.head.load(Acquire); - - let idx = loop { - let (steal, real) = unpack(head); - - // safety: this is the **only** thread that updates this cell. - let tail = unsafe { self.inner.tail.unsync_load() }; - - if real == tail { - // queue is empty - return None; - } - - let next_real = real.wrapping_add(1); - - // If `steal == real` there are no concurrent stealers. Both `steal` - // and `real` are updated. - let next = if steal == real { - pack(next_real, next_real) - } else { - assert_ne!(steal, next_real); - pack(steal, next_real) - }; - - // Attempt to claim a task. - let res = self - .inner - .head - .compare_exchange(head, next, AcqRel, Acquire); - - match res { - Ok(_) => break real as usize & MASK, - Err(actual) => head = actual, - } - }; - - Some(self.inner.buffer[idx].with(|ptr| unsafe { ptr::read(ptr).assume_init() })) - } -} - -impl Steal { - pub(crate) fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Steals half the tasks from self and place them into `dst`. - pub(crate) fn steal_into( - &self, - dst: &mut Local, - dst_stats: &mut Stats, - ) -> Option> { - // Safety: the caller is the only thread that mutates `dst.tail` and - // holds a mutable reference. - let dst_tail = unsafe { dst.inner.tail.unsync_load() }; - - // To the caller, `dst` may **look** empty but still have values - // contained in the buffer. If another thread is concurrently stealing - // from `dst` there may not be enough capacity to steal. - let (steal, _) = unpack(dst.inner.head.load(Acquire)); - - if dst_tail.wrapping_sub(steal) > LOCAL_QUEUE_CAPACITY as UnsignedShort / 2 { - // we *could* try to steal less here, but for simplicity, we're just - // going to abort. - return None; - } - - // Steal the tasks into `dst`'s buffer. This does not yet expose the - // tasks in `dst`. - let mut n = self.steal_into2(dst, dst_tail); - - if n == 0 { - // No tasks were stolen - return None; - } - - dst_stats.incr_steal_count(n as u16); - dst_stats.incr_steal_operations(); - - // We are returning a task here - n -= 1; - - let ret_pos = dst_tail.wrapping_add(n); - let ret_idx = ret_pos as usize & MASK; - - // safety: the value was written as part of `steal_into2` and not - // exposed to stealers, so no other thread can access it. - let ret = dst.inner.buffer[ret_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); - - if n == 0 { - // The `dst` queue is empty, but a single task was stolen - return Some(ret); - } - - // Make the stolen items available to consumers - dst.inner.tail.store(dst_tail.wrapping_add(n), Release); - - Some(ret) - } - - // Steal tasks from `self`, placing them into `dst`. Returns the number of - // tasks that were stolen. - fn steal_into2(&self, dst: &mut Local, dst_tail: UnsignedShort) -> UnsignedShort { - let mut prev_packed = self.0.head.load(Acquire); - let mut next_packed; - - let n = loop { - let (src_head_steal, src_head_real) = unpack(prev_packed); - let src_tail = self.0.tail.load(Acquire); - - // If these two do not match, another thread is concurrently - // stealing from the queue. - if src_head_steal != src_head_real { - return 0; - } - - // Number of available tasks to steal - let n = src_tail.wrapping_sub(src_head_real); - let n = n - n / 2; - - if n == 0 { - // No tasks available to steal - return 0; - } - - // Update the real head index to acquire the tasks. - let steal_to = src_head_real.wrapping_add(n); - assert_ne!(src_head_steal, steal_to); - next_packed = pack(src_head_steal, steal_to); - - // Claim all those tasks. This is done by incrementing the "real" - // head but not the steal. By doing this, no other thread is able to - // steal from this queue until the current thread completes. - let res = self - .0 - .head - .compare_exchange(prev_packed, next_packed, AcqRel, Acquire); - - match res { - Ok(_) => break n, - Err(actual) => prev_packed = actual, - } - }; - - assert!( - n <= LOCAL_QUEUE_CAPACITY as UnsignedShort / 2, - "actual = {}", - n - ); - - let (first, _) = unpack(next_packed); - - // Take all the tasks - for i in 0..n { - // Compute the positions - let src_pos = first.wrapping_add(i); - let dst_pos = dst_tail.wrapping_add(i); - - // Map to slots - let src_idx = src_pos as usize & MASK; - let dst_idx = dst_pos as usize & MASK; - - // Read the task - // - // safety: We acquired the task with the atomic exchange above. - let task = self.0.buffer[src_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); - - // Write the task to the new slot - // - // safety: `dst` queue is empty and we are the only producer to - // this queue. - dst.inner.buffer[dst_idx] - .with_mut(|ptr| unsafe { ptr::write((*ptr).as_mut_ptr(), task) }); - } - - let mut prev_packed = next_packed; - - // Update `src_head_steal` to match `src_head_real` signalling that the - // stealing routine is complete. - loop { - let head = unpack(prev_packed).1; - next_packed = pack(head, head); - - let res = self - .0 - .head - .compare_exchange(prev_packed, next_packed, AcqRel, Acquire); - - match res { - Ok(_) => return n, - Err(actual) => { - let (actual_steal, actual_real) = unpack(actual); - - assert_ne!(actual_steal, actual_real); - - prev_packed = actual; - } - } - } - } -} - -cfg_metrics! { - impl Steal { - pub(crate) fn len(&self) -> usize { - self.0.len() as _ - } - } -} - -impl Clone for Steal { - fn clone(&self) -> Steal { - Steal(self.0.clone()) - } -} - -impl Drop for Local { - fn drop(&mut self) { - if !std::thread::panicking() { - assert!(self.pop().is_none(), "queue not empty"); - } - } -} - -impl Inner { - fn remaining_slots(&self) -> usize { - let (steal, _) = unpack(self.head.load(Acquire)); - let tail = self.tail.load(Acquire); - - LOCAL_QUEUE_CAPACITY - (tail.wrapping_sub(steal) as usize) - } - - fn len(&self) -> UnsignedShort { - let (_, head) = unpack(self.head.load(Acquire)); - let tail = self.tail.load(Acquire); - - tail.wrapping_sub(head) - } - - fn is_empty(&self) -> bool { - self.len() == 0 - } -} - -/// Split the head value into the real head and the index a stealer is working -/// on. -fn unpack(n: UnsignedLong) -> (UnsignedShort, UnsignedShort) { - let real = n & UnsignedShort::MAX as UnsignedLong; - let steal = n >> (mem::size_of::() * 8); - - (steal as UnsignedShort, real as UnsignedShort) -} - -/// Join the two head values -fn pack(steal: UnsignedShort, real: UnsignedShort) -> UnsignedLong { - (real as UnsignedLong) | ((steal as UnsignedLong) << (mem::size_of::() * 8)) -} - -#[test] -fn test_local_queue_capacity() { - assert!(LOCAL_QUEUE_CAPACITY - 1 <= u8::MAX as usize); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/stats.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/stats.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/stats.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/stats.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,140 +0,0 @@ -use crate::runtime::{Config, MetricsBatch, WorkerMetrics}; - -use std::cmp; -use std::time::{Duration, Instant}; - -/// Per-worker statistics. This is used for both tuning the scheduler and -/// reporting runtime-level metrics/stats. -pub(crate) struct Stats { - /// The metrics batch used to report runtime-level metrics/stats to the - /// user. - batch: MetricsBatch, - - /// Instant at which work last resumed (continued after park). - /// - /// This duplicates the value stored in `MetricsBatch`. We will unify - /// `Stats` and `MetricsBatch` when we stabilize metrics. - processing_scheduled_tasks_started_at: Instant, - - /// Number of tasks polled in the batch of scheduled tasks - tasks_polled_in_batch: usize, - - /// Exponentially-weighted moving average of time spent polling scheduled a - /// task. - /// - /// Tracked in nanoseconds, stored as a f64 since that is what we use with - /// the EWMA calculations - task_poll_time_ewma: f64, -} - -/// How to weigh each individual poll time, value is plucked from thin air. -const TASK_POLL_TIME_EWMA_ALPHA: f64 = 0.1; - -/// Ideally, we wouldn't go above this, value is plucked from thin air. -const TARGET_GLOBAL_QUEUE_INTERVAL: f64 = Duration::from_micros(200).as_nanos() as f64; - -/// Max value for the global queue interval. This is 2x the previous default -const MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 127; - -/// This is the previous default -const TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 61; - -impl Stats { - pub(crate) fn new(worker_metrics: &WorkerMetrics) -> Stats { - // Seed the value with what we hope to see. - let task_poll_time_ewma = - TARGET_GLOBAL_QUEUE_INTERVAL / TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL as f64; - - Stats { - batch: MetricsBatch::new(worker_metrics), - processing_scheduled_tasks_started_at: Instant::now(), - tasks_polled_in_batch: 0, - task_poll_time_ewma, - } - } - - pub(crate) fn tuned_global_queue_interval(&self, config: &Config) -> u32 { - // If an interval is explicitly set, don't tune. - if let Some(configured) = config.global_queue_interval { - return configured; - } - - // As of Rust 1.45, casts from f64 -> u32 are saturating, which is fine here. - let tasks_per_interval = (TARGET_GLOBAL_QUEUE_INTERVAL / self.task_poll_time_ewma) as u32; - - cmp::max( - // We don't want to return less than 2 as that would result in the - // global queue always getting checked first. - 2, - cmp::min( - MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL, - tasks_per_interval, - ), - ) - } - - pub(crate) fn submit(&mut self, to: &WorkerMetrics) { - self.batch.submit(to, self.task_poll_time_ewma as u64); - } - - pub(crate) fn about_to_park(&mut self) { - self.batch.about_to_park(); - } - - pub(crate) fn inc_local_schedule_count(&mut self) { - self.batch.inc_local_schedule_count(); - } - - pub(crate) fn start_processing_scheduled_tasks(&mut self) { - self.batch.start_processing_scheduled_tasks(); - - self.processing_scheduled_tasks_started_at = Instant::now(); - self.tasks_polled_in_batch = 0; - } - - pub(crate) fn end_processing_scheduled_tasks(&mut self) { - self.batch.end_processing_scheduled_tasks(); - - // Update the EWMA task poll time - if self.tasks_polled_in_batch > 0 { - let now = Instant::now(); - - // If we "overflow" this conversion, we have bigger problems than - // slightly off stats. - let elapsed = (now - self.processing_scheduled_tasks_started_at).as_nanos() as f64; - let num_polls = self.tasks_polled_in_batch as f64; - - // Calculate the mean poll duration for a single task in the batch - let mean_poll_duration = elapsed / num_polls; - - // Compute the alpha weighted by the number of tasks polled this batch. - let weighted_alpha = 1.0 - (1.0 - TASK_POLL_TIME_EWMA_ALPHA).powf(num_polls); - - // Now compute the new weighted average task poll time. - self.task_poll_time_ewma = weighted_alpha * mean_poll_duration - + (1.0 - weighted_alpha) * self.task_poll_time_ewma; - } - } - - pub(crate) fn start_poll(&mut self) { - self.batch.start_poll(); - - self.tasks_polled_in_batch += 1; - } - - pub(crate) fn end_poll(&mut self) { - self.batch.end_poll(); - } - - pub(crate) fn incr_steal_count(&mut self, by: u16) { - self.batch.incr_steal_count(by); - } - - pub(crate) fn incr_steal_operations(&mut self) { - self.batch.incr_steal_operations(); - } - - pub(crate) fn incr_overflow_count(&mut self) { - self.batch.incr_overflow_count(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/trace_mock.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/trace_mock.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/trace_mock.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/trace_mock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -pub(super) struct TraceStatus {} - -impl TraceStatus { - pub(super) fn new(_: usize) -> Self { - Self {} - } - - pub(super) fn trace_requested(&self) -> bool { - false - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/trace.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/trace.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/trace.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/trace.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,61 +0,0 @@ -use crate::loom::sync::atomic::{AtomicBool, Ordering}; -use crate::loom::sync::{Barrier, Mutex}; -use crate::runtime::dump::Dump; -use crate::runtime::scheduler::multi_thread::Handle; -use crate::sync::notify::Notify; - -/// Tracing status of the worker. -pub(super) struct TraceStatus { - pub(super) trace_requested: AtomicBool, - pub(super) trace_start: Barrier, - pub(super) trace_end: Barrier, - pub(super) result_ready: Notify, - pub(super) trace_result: Mutex>, -} - -impl TraceStatus { - pub(super) fn new(remotes_len: usize) -> Self { - Self { - trace_requested: AtomicBool::new(false), - trace_start: Barrier::new(remotes_len), - trace_end: Barrier::new(remotes_len), - result_ready: Notify::new(), - trace_result: Mutex::new(None), - } - } - - pub(super) fn trace_requested(&self) -> bool { - self.trace_requested.load(Ordering::Relaxed) - } - - pub(super) async fn start_trace_request(&self, handle: &Handle) { - while self - .trace_requested - .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) - .is_err() - { - handle.notify_all(); - crate::task::yield_now().await; - } - } - - pub(super) fn stash_result(&self, dump: Dump) { - let _ = self.trace_result.lock().insert(dump); - self.result_ready.notify_one(); - } - - pub(super) fn take_result(&self) -> Option { - self.trace_result.lock().take() - } - - pub(super) async fn end_trace_request(&self, handle: &Handle) { - while self - .trace_requested - .compare_exchange(true, false, Ordering::Acquire, Ordering::Relaxed) - .is_err() - { - handle.notify_all(); - crate::task::yield_now().await; - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/metrics.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/metrics.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/metrics.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/metrics.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -use super::Shared; - -impl Shared { - pub(crate) fn injection_queue_depth(&self) -> usize { - self.inject.len() - } - - pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { - self.remotes[worker].steal.len() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/taskdump_mock.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/taskdump_mock.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/taskdump_mock.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/taskdump_mock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,7 +0,0 @@ -use super::{Core, Handle}; - -impl Handle { - pub(super) fn trace_core(&self, core: Box) -> Box { - core - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/taskdump.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/taskdump.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/taskdump.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker/taskdump.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,79 +0,0 @@ -use super::{Core, Handle, Shared}; - -use crate::loom::sync::Arc; -use crate::runtime::scheduler::multi_thread::Stats; -use crate::runtime::task::trace::trace_multi_thread; -use crate::runtime::{dump, WorkerMetrics}; - -use std::time::Duration; - -impl Handle { - pub(super) fn trace_core(&self, mut core: Box) -> Box { - core.is_traced = false; - - if core.is_shutdown { - return core; - } - - // wait for other workers, or timeout without tracing - let timeout = Duration::from_millis(250); // a _very_ generous timeout - let barrier = - if let Some(barrier) = self.shared.trace_status.trace_start.wait_timeout(timeout) { - barrier - } else { - // don't attempt to trace - return core; - }; - - if !barrier.is_leader() { - // wait for leader to finish tracing - self.shared.trace_status.trace_end.wait(); - return core; - } - - // trace - - let owned = &self.shared.owned; - let mut local = self.shared.steal_all(); - let synced = &self.shared.synced; - let injection = &self.shared.inject; - - // safety: `trace_multi_thread` is invoked with the same `synced` that `injection` - // was created with. - let traces = unsafe { trace_multi_thread(owned, &mut local, synced, injection) } - .into_iter() - .map(dump::Task::new) - .collect(); - - let result = dump::Dump::new(traces); - - // stash the result - self.shared.trace_status.stash_result(result); - - // allow other workers to proceed - self.shared.trace_status.trace_end.wait(); - - core - } -} - -impl Shared { - /// Steal all tasks from remotes into a single local queue. - pub(super) fn steal_all(&self) -> super::queue::Local> { - let (_steal, mut local) = super::queue::local(); - - let worker_metrics = WorkerMetrics::new(); - let mut stats = Stats::new(&worker_metrics); - - for remote in self.remotes.iter() { - let steal = &remote.steal; - while !steal.is_empty() { - if let Some(task) = steal.steal_into(&mut local, &mut stats) { - local.push_back([task].into_iter()); - } - } - } - - local - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread/worker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1226 +0,0 @@ -//! A scheduler is initialized with a fixed number of workers. Each worker is -//! driven by a thread. Each worker has a "core" which contains data such as the -//! run queue and other state. When `block_in_place` is called, the worker's -//! "core" is handed off to a new thread allowing the scheduler to continue to -//! make progress while the originating thread blocks. -//! -//! # Shutdown -//! -//! Shutting down the runtime involves the following steps: -//! -//! 1. The Shared::close method is called. This closes the inject queue and -//! OwnedTasks instance and wakes up all worker threads. -//! -//! 2. Each worker thread observes the close signal next time it runs -//! Core::maintenance by checking whether the inject queue is closed. -//! The Core::is_shutdown flag is set to true. -//! -//! 3. The worker thread calls `pre_shutdown` in parallel. Here, the worker -//! will keep removing tasks from OwnedTasks until it is empty. No new -//! tasks can be pushed to the OwnedTasks during or after this step as it -//! was closed in step 1. -//! -//! 5. The workers call Shared::shutdown to enter the single-threaded phase of -//! shutdown. These calls will push their core to Shared::shutdown_cores, -//! and the last thread to push its core will finish the shutdown procedure. -//! -//! 6. The local run queue of each core is emptied, then the inject queue is -//! emptied. -//! -//! At this point, shutdown has completed. It is not possible for any of the -//! collections to contain any tasks at this point, as each collection was -//! closed first, then emptied afterwards. -//! -//! ## Spawns during shutdown -//! -//! When spawning tasks during shutdown, there are two cases: -//! -//! * The spawner observes the OwnedTasks being open, and the inject queue is -//! closed. -//! * The spawner observes the OwnedTasks being closed and doesn't check the -//! inject queue. -//! -//! The first case can only happen if the OwnedTasks::bind call happens before -//! or during step 1 of shutdown. In this case, the runtime will clean up the -//! task in step 3 of shutdown. -//! -//! In the latter case, the task was not spawned and the task is immediately -//! cancelled by the spawner. -//! -//! The correctness of shutdown requires both the inject queue and OwnedTasks -//! collection to have a closed bit. With a close bit on only the inject queue, -//! spawning could run in to a situation where a task is successfully bound long -//! after the runtime has shut down. With a close bit on only the OwnedTasks, -//! the first spawning situation could result in the notification being pushed -//! to the inject queue after step 6 of shutdown, which would leave a task in -//! the inject queue indefinitely. This would be a ref-count cycle and a memory -//! leak. - -use crate::loom::sync::{Arc, Mutex}; -use crate::runtime; -use crate::runtime::context; -use crate::runtime::scheduler::multi_thread::{ - idle, queue, Counters, Handle, Idle, Overflow, Parker, Stats, TraceStatus, Unparker, -}; -use crate::runtime::scheduler::{inject, Defer, Lock}; -use crate::runtime::task::OwnedTasks; -use crate::runtime::{ - blocking, coop, driver, scheduler, task, Config, SchedulerMetrics, WorkerMetrics, -}; -use crate::util::atomic_cell::AtomicCell; -use crate::util::rand::{FastRand, RngSeedGenerator}; - -use std::cell::RefCell; -use std::task::Waker; -use std::time::Duration; - -cfg_metrics! { - mod metrics; -} - -cfg_taskdump! { - mod taskdump; -} - -cfg_not_taskdump! { - mod taskdump_mock; -} - -/// A scheduler worker -pub(super) struct Worker { - /// Reference to scheduler's handle - handle: Arc, - - /// Index holding this worker's remote state - index: usize, - - /// Used to hand-off a worker's core to another thread. - core: AtomicCell, -} - -/// Core data -struct Core { - /// Used to schedule bookkeeping tasks every so often. - tick: u32, - - /// When a task is scheduled from a worker, it is stored in this slot. The - /// worker will check this slot for a task **before** checking the run - /// queue. This effectively results in the **last** scheduled task to be run - /// next (LIFO). This is an optimization for improving locality which - /// benefits message passing patterns and helps to reduce latency. - lifo_slot: Option, - - /// When `true`, locally scheduled tasks go to the LIFO slot. When `false`, - /// they go to the back of the `run_queue`. - lifo_enabled: bool, - - /// The worker-local run queue. - run_queue: queue::Local>, - - /// True if the worker is currently searching for more work. Searching - /// involves attempting to steal from other workers. - is_searching: bool, - - /// True if the scheduler is being shutdown - is_shutdown: bool, - - /// True if the scheduler is being traced - is_traced: bool, - - /// Parker - /// - /// Stored in an `Option` as the parker is added / removed to make the - /// borrow checker happy. - park: Option, - - /// Per-worker runtime stats - stats: Stats, - - /// How often to check the global queue - global_queue_interval: u32, - - /// Fast random number generator. - rand: FastRand, -} - -/// State shared across all workers -pub(crate) struct Shared { - /// Per-worker remote state. All other workers have access to this and is - /// how they communicate between each other. - remotes: Box<[Remote]>, - - /// Global task queue used for: - /// 1. Submit work to the scheduler while **not** currently on a worker thread. - /// 2. Submit work to the scheduler when a worker run queue is saturated - pub(super) inject: inject::Shared>, - - /// Coordinates idle workers - idle: Idle, - - /// Collection of all active tasks spawned onto this executor. - pub(crate) owned: OwnedTasks>, - - /// Data synchronized by the scheduler mutex - pub(super) synced: Mutex, - - /// Cores that have observed the shutdown signal - /// - /// The core is **not** placed back in the worker to avoid it from being - /// stolen by a thread that was spawned as part of `block_in_place`. - #[allow(clippy::vec_box)] // we're moving an already-boxed value - shutdown_cores: Mutex>>, - - /// The number of cores that have observed the trace signal. - pub(super) trace_status: TraceStatus, - - /// Scheduler configuration options - config: Config, - - /// Collects metrics from the runtime. - pub(super) scheduler_metrics: SchedulerMetrics, - - pub(super) worker_metrics: Box<[WorkerMetrics]>, - - /// Only held to trigger some code on drop. This is used to get internal - /// runtime metrics that can be useful when doing performance - /// investigations. This does nothing (empty struct, no drop impl) unless - /// the `tokio_internal_mt_counters` cfg flag is set. - _counters: Counters, -} - -/// Data synchronized by the scheduler mutex -pub(crate) struct Synced { - /// Synchronized state for `Idle`. - pub(super) idle: idle::Synced, - - /// Synchronized state for `Inject`. - pub(crate) inject: inject::Synced, -} - -/// Used to communicate with a worker from other threads. -struct Remote { - /// Steals tasks from this worker. - pub(super) steal: queue::Steal>, - - /// Unparks the associated worker thread - unpark: Unparker, -} - -/// Thread-local context -pub(crate) struct Context { - /// Worker - worker: Arc, - - /// Core data - core: RefCell>>, - - /// Tasks to wake after resource drivers are polled. This is mostly to - /// handle yielded tasks. - pub(crate) defer: Defer, -} - -/// Starts the workers -pub(crate) struct Launch(Vec>); - -/// Running a task may consume the core. If the core is still available when -/// running the task completes, it is returned. Otherwise, the worker will need -/// to stop processing. -type RunResult = Result, ()>; - -/// A task handle -type Task = task::Task>; - -/// A notified task handle -type Notified = task::Notified>; - -/// Value picked out of thin-air. Running the LIFO slot a handful of times -/// seemms sufficient to benefit from locality. More than 3 times probably is -/// overweighing. The value can be tuned in the future with data that shows -/// improvements. -const MAX_LIFO_POLLS_PER_TICK: usize = 3; - -pub(super) fn create( - size: usize, - park: Parker, - driver_handle: driver::Handle, - blocking_spawner: blocking::Spawner, - seed_generator: RngSeedGenerator, - config: Config, -) -> (Arc, Launch) { - let mut cores = Vec::with_capacity(size); - let mut remotes = Vec::with_capacity(size); - let mut worker_metrics = Vec::with_capacity(size); - - // Create the local queues - for _ in 0..size { - let (steal, run_queue) = queue::local(); - - let park = park.clone(); - let unpark = park.unpark(); - let metrics = WorkerMetrics::from_config(&config); - let stats = Stats::new(&metrics); - - cores.push(Box::new(Core { - tick: 0, - lifo_slot: None, - lifo_enabled: !config.disable_lifo_slot, - run_queue, - is_searching: false, - is_shutdown: false, - is_traced: false, - park: Some(park), - global_queue_interval: stats.tuned_global_queue_interval(&config), - stats, - rand: FastRand::from_seed(config.seed_generator.next_seed()), - })); - - remotes.push(Remote { steal, unpark }); - worker_metrics.push(metrics); - } - - let (idle, idle_synced) = Idle::new(size); - let (inject, inject_synced) = inject::Shared::new(); - - let remotes_len = remotes.len(); - let handle = Arc::new(Handle { - shared: Shared { - remotes: remotes.into_boxed_slice(), - inject, - idle, - owned: OwnedTasks::new(), - synced: Mutex::new(Synced { - idle: idle_synced, - inject: inject_synced, - }), - shutdown_cores: Mutex::new(vec![]), - trace_status: TraceStatus::new(remotes_len), - config, - scheduler_metrics: SchedulerMetrics::new(), - worker_metrics: worker_metrics.into_boxed_slice(), - _counters: Counters, - }, - driver: driver_handle, - blocking_spawner, - seed_generator, - }); - - let mut launch = Launch(vec![]); - - for (index, core) in cores.drain(..).enumerate() { - launch.0.push(Arc::new(Worker { - handle: handle.clone(), - index, - core: AtomicCell::new(Some(core)), - })); - } - - (handle, launch) -} - -#[track_caller] -pub(crate) fn block_in_place(f: F) -> R -where - F: FnOnce() -> R, -{ - // Try to steal the worker core back - struct Reset { - take_core: bool, - budget: coop::Budget, - } - - impl Drop for Reset { - fn drop(&mut self) { - with_current(|maybe_cx| { - if let Some(cx) = maybe_cx { - if self.take_core { - let core = cx.worker.core.take(); - let mut cx_core = cx.core.borrow_mut(); - assert!(cx_core.is_none()); - *cx_core = core; - } - - // Reset the task budget as we are re-entering the - // runtime. - coop::set(self.budget); - } - }); - } - } - - let mut had_entered = false; - let mut take_core = false; - - let setup_result = with_current(|maybe_cx| { - match ( - crate::runtime::context::current_enter_context(), - maybe_cx.is_some(), - ) { - (context::EnterRuntime::Entered { .. }, true) => { - // We are on a thread pool runtime thread, so we just need to - // set up blocking. - had_entered = true; - } - ( - context::EnterRuntime::Entered { - allow_block_in_place, - }, - false, - ) => { - // We are on an executor, but _not_ on the thread pool. That is - // _only_ okay if we are in a thread pool runtime's block_on - // method: - if allow_block_in_place { - had_entered = true; - return Ok(()); - } else { - // This probably means we are on the current_thread runtime or in a - // LocalSet, where it is _not_ okay to block. - return Err( - "can call blocking only when running on the multi-threaded runtime", - ); - } - } - (context::EnterRuntime::NotEntered, true) => { - // This is a nested call to block_in_place (we already exited). - // All the necessary setup has already been done. - return Ok(()); - } - (context::EnterRuntime::NotEntered, false) => { - // We are outside of the tokio runtime, so blocking is fine. - // We can also skip all of the thread pool blocking setup steps. - return Ok(()); - } - } - - let cx = maybe_cx.expect("no .is_some() == false cases above should lead here"); - - // Get the worker core. If none is set, then blocking is fine! - let core = match cx.core.borrow_mut().take() { - Some(core) => core, - None => return Ok(()), - }; - - // We are taking the core from the context and sending it to another - // thread. - take_core = true; - - // The parker should be set here - assert!(core.park.is_some()); - - // In order to block, the core must be sent to another thread for - // execution. - // - // First, move the core back into the worker's shared core slot. - cx.worker.core.set(core); - - // Next, clone the worker handle and send it to a new thread for - // processing. - // - // Once the blocking task is done executing, we will attempt to - // steal the core back. - let worker = cx.worker.clone(); - runtime::spawn_blocking(move || run(worker)); - Ok(()) - }); - - if let Err(panic_message) = setup_result { - panic!("{}", panic_message); - } - - if had_entered { - // Unset the current task's budget. Blocking sections are not - // constrained by task budgets. - let _reset = Reset { - take_core, - budget: coop::stop(), - }; - - crate::runtime::context::exit_runtime(f) - } else { - f() - } -} - -impl Launch { - pub(crate) fn launch(mut self) { - for worker in self.0.drain(..) { - runtime::spawn_blocking(move || run(worker)); - } - } -} - -fn run(worker: Arc) { - struct AbortOnPanic; - - impl Drop for AbortOnPanic { - fn drop(&mut self) { - if std::thread::panicking() { - eprintln!("worker thread panicking; aborting process"); - std::process::abort(); - } - } - } - - // Catching panics on worker threads in tests is quite tricky. Instead, when - // debug assertions are enabled, we just abort the process. - #[cfg(debug_assertions)] - let _abort_on_panic = AbortOnPanic; - - // Acquire a core. If this fails, then another thread is running this - // worker and there is nothing further to do. - let core = match worker.core.take() { - Some(core) => core, - None => return, - }; - - let handle = scheduler::Handle::MultiThread(worker.handle.clone()); - - crate::runtime::context::enter_runtime(&handle, true, |_| { - // Set the worker context. - let cx = scheduler::Context::MultiThread(Context { - worker, - core: RefCell::new(None), - defer: Defer::new(), - }); - - context::set_scheduler(&cx, || { - let cx = cx.expect_multi_thread(); - - // This should always be an error. It only returns a `Result` to support - // using `?` to short circuit. - assert!(cx.run(core).is_err()); - - // Check if there are any deferred tasks to notify. This can happen when - // the worker core is lost due to `block_in_place()` being called from - // within the task. - cx.defer.wake(); - }); - }); -} - -impl Context { - fn run(&self, mut core: Box) -> RunResult { - // Reset `lifo_enabled` here in case the core was previously stolen from - // a task that had the LIFO slot disabled. - self.reset_lifo_enabled(&mut core); - - // Start as "processing" tasks as polling tasks from the local queue - // will be one of the first things we do. - core.stats.start_processing_scheduled_tasks(); - - while !core.is_shutdown { - self.assert_lifo_enabled_is_correct(&core); - - if core.is_traced { - core = self.worker.handle.trace_core(core); - } - - // Increment the tick - core.tick(); - - // Run maintenance, if needed - core = self.maintenance(core); - - // First, check work available to the current worker. - if let Some(task) = core.next_task(&self.worker) { - core = self.run_task(task, core)?; - continue; - } - - // We consumed all work in the queues and will start searching for work. - core.stats.end_processing_scheduled_tasks(); - - // There is no more **local** work to process, try to steal work - // from other workers. - if let Some(task) = core.steal_work(&self.worker) { - // Found work, switch back to processing - core.stats.start_processing_scheduled_tasks(); - core = self.run_task(task, core)?; - } else { - // Wait for work - core = if !self.defer.is_empty() { - self.park_timeout(core, Some(Duration::from_millis(0))) - } else { - self.park(core) - }; - } - } - - core.pre_shutdown(&self.worker); - - // Signal shutdown - self.worker.handle.shutdown_core(core); - Err(()) - } - - fn run_task(&self, task: Notified, mut core: Box) -> RunResult { - let task = self.worker.handle.shared.owned.assert_owner(task); - - // Make sure the worker is not in the **searching** state. This enables - // another idle worker to try to steal work. - core.transition_from_searching(&self.worker); - - self.assert_lifo_enabled_is_correct(&core); - - // Measure the poll start time. Note that we may end up polling other - // tasks under this measurement. In this case, the tasks came from the - // LIFO slot and are considered part of the current task for scheduling - // purposes. These tasks inherent the "parent"'s limits. - core.stats.start_poll(); - - // Make the core available to the runtime context - *self.core.borrow_mut() = Some(core); - - // Run the task - coop::budget(|| { - task.run(); - let mut lifo_polls = 0; - - // As long as there is budget remaining and a task exists in the - // `lifo_slot`, then keep running. - loop { - // Check if we still have the core. If not, the core was stolen - // by another worker. - let mut core = match self.core.borrow_mut().take() { - Some(core) => core, - None => { - // In this case, we cannot call `reset_lifo_enabled()` - // because the core was stolen. The stealer will handle - // that at the top of `Context::run` - return Err(()); - } - }; - - // Check for a task in the LIFO slot - let task = match core.lifo_slot.take() { - Some(task) => task, - None => { - self.reset_lifo_enabled(&mut core); - core.stats.end_poll(); - return Ok(core); - } - }; - - if !coop::has_budget_remaining() { - core.stats.end_poll(); - - // Not enough budget left to run the LIFO task, push it to - // the back of the queue and return. - core.run_queue.push_back_or_overflow( - task, - &*self.worker.handle, - &mut core.stats, - ); - // If we hit this point, the LIFO slot should be enabled. - // There is no need to reset it. - debug_assert!(core.lifo_enabled); - return Ok(core); - } - - // Track that we are about to run a task from the LIFO slot. - lifo_polls += 1; - super::counters::inc_lifo_schedules(); - - // Disable the LIFO slot if we reach our limit - // - // In ping-ping style workloads where task A notifies task B, - // which notifies task A again, continuously prioritizing the - // LIFO slot can cause starvation as these two tasks will - // repeatedly schedule the other. To mitigate this, we limit the - // number of times the LIFO slot is prioritized. - if lifo_polls >= MAX_LIFO_POLLS_PER_TICK { - core.lifo_enabled = false; - super::counters::inc_lifo_capped(); - } - - // Run the LIFO task, then loop - *self.core.borrow_mut() = Some(core); - let task = self.worker.handle.shared.owned.assert_owner(task); - task.run(); - } - }) - } - - fn reset_lifo_enabled(&self, core: &mut Core) { - core.lifo_enabled = !self.worker.handle.shared.config.disable_lifo_slot; - } - - fn assert_lifo_enabled_is_correct(&self, core: &Core) { - debug_assert_eq!( - core.lifo_enabled, - !self.worker.handle.shared.config.disable_lifo_slot - ); - } - - fn maintenance(&self, mut core: Box) -> Box { - if core.tick % self.worker.handle.shared.config.event_interval == 0 { - super::counters::inc_num_maintenance(); - - core.stats.end_processing_scheduled_tasks(); - - // Call `park` with a 0 timeout. This enables the I/O driver, timer, ... - // to run without actually putting the thread to sleep. - core = self.park_timeout(core, Some(Duration::from_millis(0))); - - // Run regularly scheduled maintenance - core.maintenance(&self.worker); - - core.stats.start_processing_scheduled_tasks(); - } - - core - } - - /// Parks the worker thread while waiting for tasks to execute. - /// - /// This function checks if indeed there's no more work left to be done before parking. - /// Also important to notice that, before parking, the worker thread will try to take - /// ownership of the Driver (IO/Time) and dispatch any events that might have fired. - /// Whenever a worker thread executes the Driver loop, all waken tasks are scheduled - /// in its own local queue until the queue saturates (ntasks > LOCAL_QUEUE_CAPACITY). - /// When the local queue is saturated, the overflow tasks are added to the injection queue - /// from where other workers can pick them up. - /// Also, we rely on the workstealing algorithm to spread the tasks amongst workers - /// after all the IOs get dispatched - fn park(&self, mut core: Box) -> Box { - if let Some(f) = &self.worker.handle.shared.config.before_park { - f(); - } - - if core.transition_to_parked(&self.worker) { - while !core.is_shutdown && !core.is_traced { - core.stats.about_to_park(); - core = self.park_timeout(core, None); - - // Run regularly scheduled maintenance - core.maintenance(&self.worker); - - if core.transition_from_parked(&self.worker) { - break; - } - } - } - - if let Some(f) = &self.worker.handle.shared.config.after_unpark { - f(); - } - core - } - - fn park_timeout(&self, mut core: Box, duration: Option) -> Box { - self.assert_lifo_enabled_is_correct(&core); - - // Take the parker out of core - let mut park = core.park.take().expect("park missing"); - - // Store `core` in context - *self.core.borrow_mut() = Some(core); - - // Park thread - if let Some(timeout) = duration { - park.park_timeout(&self.worker.handle.driver, timeout); - } else { - park.park(&self.worker.handle.driver); - } - - self.defer.wake(); - - // Remove `core` from context - core = self.core.borrow_mut().take().expect("core missing"); - - // Place `park` back in `core` - core.park = Some(park); - - if core.should_notify_others() { - self.worker.handle.notify_parked_local(); - } - - core - } - - pub(crate) fn defer(&self, waker: &Waker) { - self.defer.defer(waker); - } -} - -impl Core { - /// Increment the tick - fn tick(&mut self) { - self.tick = self.tick.wrapping_add(1); - } - - /// Return the next notified task available to this worker. - fn next_task(&mut self, worker: &Worker) -> Option { - if self.tick % self.global_queue_interval == 0 { - // Update the global queue interval, if needed - self.tune_global_queue_interval(worker); - - worker - .handle - .next_remote_task() - .or_else(|| self.next_local_task()) - } else { - let maybe_task = self.next_local_task(); - - if maybe_task.is_some() { - return maybe_task; - } - - if worker.inject().is_empty() { - return None; - } - - // Other threads can only **remove** tasks from the current worker's - // `run_queue`. So, we can be confident that by the time we call - // `run_queue.push_back` below, there will be *at least* `cap` - // available slots in the queue. - let cap = usize::min( - self.run_queue.remaining_slots(), - self.run_queue.max_capacity() / 2, - ); - - // The worker is currently idle, pull a batch of work from the - // injection queue. We don't want to pull *all* the work so other - // workers can also get some. - let n = usize::min( - worker.inject().len() / worker.handle.shared.remotes.len() + 1, - cap, - ); - - // Take at least one task since the first task is returned directly - // and nto pushed onto the local queue. - let n = usize::max(1, n); - - let mut synced = worker.handle.shared.synced.lock(); - // safety: passing in the correct `inject::Synced`. - let mut tasks = unsafe { worker.inject().pop_n(&mut synced.inject, n) }; - - // Pop the first task to return immedietly - let ret = tasks.next(); - - // Push the rest of the on the run queue - self.run_queue.push_back(tasks); - - ret - } - } - - fn next_local_task(&mut self) -> Option { - self.lifo_slot.take().or_else(|| self.run_queue.pop()) - } - - /// Function responsible for stealing tasks from another worker - /// - /// Note: Only if less than half the workers are searching for tasks to steal - /// a new worker will actually try to steal. The idea is to make sure not all - /// workers will be trying to steal at the same time. - fn steal_work(&mut self, worker: &Worker) -> Option { - if !self.transition_to_searching(worker) { - return None; - } - - let num = worker.handle.shared.remotes.len(); - // Start from a random worker - let start = self.rand.fastrand_n(num as u32) as usize; - - for i in 0..num { - let i = (start + i) % num; - - // Don't steal from ourself! We know we don't have work. - if i == worker.index { - continue; - } - - let target = &worker.handle.shared.remotes[i]; - if let Some(task) = target - .steal - .steal_into(&mut self.run_queue, &mut self.stats) - { - return Some(task); - } - } - - // Fallback on checking the global queue - worker.handle.next_remote_task() - } - - fn transition_to_searching(&mut self, worker: &Worker) -> bool { - if !self.is_searching { - self.is_searching = worker.handle.shared.idle.transition_worker_to_searching(); - } - - self.is_searching - } - - fn transition_from_searching(&mut self, worker: &Worker) { - if !self.is_searching { - return; - } - - self.is_searching = false; - worker.handle.transition_worker_from_searching(); - } - - fn has_tasks(&self) -> bool { - self.lifo_slot.is_some() || self.run_queue.has_tasks() - } - - fn should_notify_others(&self) -> bool { - // If there are tasks available to steal, but this worker is not - // looking for tasks to steal, notify another worker. - if self.is_searching { - return false; - } - self.lifo_slot.is_some() as usize + self.run_queue.len() > 1 - } - - /// Prepares the worker state for parking. - /// - /// Returns true if the transition happened, false if there is work to do first. - fn transition_to_parked(&mut self, worker: &Worker) -> bool { - // Workers should not park if they have work to do - if self.has_tasks() || self.is_traced { - return false; - } - - // When the final worker transitions **out** of searching to parked, it - // must check all the queues one last time in case work materialized - // between the last work scan and transitioning out of searching. - let is_last_searcher = worker.handle.shared.idle.transition_worker_to_parked( - &worker.handle.shared, - worker.index, - self.is_searching, - ); - - // The worker is no longer searching. Setting this is the local cache - // only. - self.is_searching = false; - - if is_last_searcher { - worker.handle.notify_if_work_pending(); - } - - true - } - - /// Returns `true` if the transition happened. - fn transition_from_parked(&mut self, worker: &Worker) -> bool { - // If a task is in the lifo slot/run queue, then we must unpark regardless of - // being notified - if self.has_tasks() { - // When a worker wakes, it should only transition to the "searching" - // state when the wake originates from another worker *or* a new task - // is pushed. We do *not* want the worker to transition to "searching" - // when it wakes when the I/O driver receives new events. - self.is_searching = !worker - .handle - .shared - .idle - .unpark_worker_by_id(&worker.handle.shared, worker.index); - return true; - } - - if worker - .handle - .shared - .idle - .is_parked(&worker.handle.shared, worker.index) - { - return false; - } - - // When unparked, the worker is in the searching state. - self.is_searching = true; - true - } - - /// Runs maintenance work such as checking the pool's state. - fn maintenance(&mut self, worker: &Worker) { - self.stats - .submit(&worker.handle.shared.worker_metrics[worker.index]); - - if !self.is_shutdown { - // Check if the scheduler has been shutdown - let synced = worker.handle.shared.synced.lock(); - self.is_shutdown = worker.inject().is_closed(&synced.inject); - } - - if !self.is_traced { - // Check if the worker should be tracing. - self.is_traced = worker.handle.shared.trace_status.trace_requested(); - } - } - - /// Signals all tasks to shut down, and waits for them to complete. Must run - /// before we enter the single-threaded phase of shutdown processing. - fn pre_shutdown(&mut self, worker: &Worker) { - // Signal to all tasks to shut down. - worker.handle.shared.owned.close_and_shutdown_all(); - - self.stats - .submit(&worker.handle.shared.worker_metrics[worker.index]); - } - - /// Shuts down the core. - fn shutdown(&mut self, handle: &Handle) { - // Take the core - let mut park = self.park.take().expect("park missing"); - - // Drain the queue - while self.next_local_task().is_some() {} - - park.shutdown(&handle.driver); - } - - fn tune_global_queue_interval(&mut self, worker: &Worker) { - let next = self - .stats - .tuned_global_queue_interval(&worker.handle.shared.config); - - debug_assert!(next > 1); - - // Smooth out jitter - if abs_diff(self.global_queue_interval, next) > 2 { - self.global_queue_interval = next; - } - } -} - -impl Worker { - /// Returns a reference to the scheduler's injection queue. - fn inject(&self) -> &inject::Shared> { - &self.handle.shared.inject - } -} - -// TODO: Move `Handle` impls into handle.rs -impl task::Schedule for Arc { - fn release(&self, task: &Task) -> Option { - self.shared.owned.remove(task) - } - - fn schedule(&self, task: Notified) { - self.schedule_task(task, false); - } - - fn yield_now(&self, task: Notified) { - self.schedule_task(task, true); - } -} - -impl Handle { - pub(super) fn schedule_task(&self, task: Notified, is_yield: bool) { - with_current(|maybe_cx| { - if let Some(cx) = maybe_cx { - // Make sure the task is part of the **current** scheduler. - if self.ptr_eq(&cx.worker.handle) { - // And the current thread still holds a core - if let Some(core) = cx.core.borrow_mut().as_mut() { - self.schedule_local(core, task, is_yield); - return; - } - } - } - - // Otherwise, use the inject queue. - self.push_remote_task(task); - self.notify_parked_remote(); - }) - } - - pub(super) fn schedule_option_task_without_yield(&self, task: Option) { - if let Some(task) = task { - self.schedule_task(task, false); - } - } - - fn schedule_local(&self, core: &mut Core, task: Notified, is_yield: bool) { - core.stats.inc_local_schedule_count(); - - // Spawning from the worker thread. If scheduling a "yield" then the - // task must always be pushed to the back of the queue, enabling other - // tasks to be executed. If **not** a yield, then there is more - // flexibility and the task may go to the front of the queue. - let should_notify = if is_yield || !core.lifo_enabled { - core.run_queue - .push_back_or_overflow(task, self, &mut core.stats); - true - } else { - // Push to the LIFO slot - let prev = core.lifo_slot.take(); - let ret = prev.is_some(); - - if let Some(prev) = prev { - core.run_queue - .push_back_or_overflow(prev, self, &mut core.stats); - } - - core.lifo_slot = Some(task); - - ret - }; - - // Only notify if not currently parked. If `park` is `None`, then the - // scheduling is from a resource driver. As notifications often come in - // batches, the notification is delayed until the park is complete. - if should_notify && core.park.is_some() { - self.notify_parked_local(); - } - } - - fn next_remote_task(&self) -> Option { - if self.shared.inject.is_empty() { - return None; - } - - let mut synced = self.shared.synced.lock(); - // safety: passing in correct `idle::Synced` - unsafe { self.shared.inject.pop(&mut synced.inject) } - } - - fn push_remote_task(&self, task: Notified) { - self.shared.scheduler_metrics.inc_remote_schedule_count(); - - let mut synced = self.shared.synced.lock(); - // safety: passing in correct `idle::Synced` - unsafe { - self.shared.inject.push(&mut synced.inject, task); - } - } - - pub(super) fn close(&self) { - if self - .shared - .inject - .close(&mut self.shared.synced.lock().inject) - { - self.notify_all(); - } - } - - fn notify_parked_local(&self) { - super::counters::inc_num_inc_notify_local(); - - if let Some(index) = self.shared.idle.worker_to_notify(&self.shared) { - super::counters::inc_num_unparks_local(); - self.shared.remotes[index].unpark.unpark(&self.driver); - } - } - - fn notify_parked_remote(&self) { - if let Some(index) = self.shared.idle.worker_to_notify(&self.shared) { - self.shared.remotes[index].unpark.unpark(&self.driver); - } - } - - pub(super) fn notify_all(&self) { - for remote in &self.shared.remotes[..] { - remote.unpark.unpark(&self.driver); - } - } - - fn notify_if_work_pending(&self) { - for remote in &self.shared.remotes[..] { - if !remote.steal.is_empty() { - self.notify_parked_local(); - return; - } - } - - if !self.shared.inject.is_empty() { - self.notify_parked_local(); - } - } - - fn transition_worker_from_searching(&self) { - if self.shared.idle.transition_worker_from_searching() { - // We are the final searching worker. Because work was found, we - // need to notify another worker. - self.notify_parked_local(); - } - } - - /// Signals that a worker has observed the shutdown signal and has replaced - /// its core back into its handle. - /// - /// If all workers have reached this point, the final cleanup is performed. - fn shutdown_core(&self, core: Box) { - let mut cores = self.shared.shutdown_cores.lock(); - cores.push(core); - - if cores.len() != self.shared.remotes.len() { - return; - } - - debug_assert!(self.shared.owned.is_empty()); - - for mut core in cores.drain(..) { - core.shutdown(self); - } - - // Drain the injection queue - // - // We already shut down every task, so we can simply drop the tasks. - while let Some(task) = self.next_remote_task() { - drop(task); - } - } - - fn ptr_eq(&self, other: &Handle) -> bool { - std::ptr::eq(self, other) - } -} - -impl Overflow> for Handle { - fn push(&self, task: task::Notified>) { - self.push_remote_task(task); - } - - fn push_batch(&self, iter: I) - where - I: Iterator>>, - { - unsafe { - self.shared.inject.push_batch(self, iter); - } - } -} - -pub(crate) struct InjectGuard<'a> { - lock: crate::loom::sync::MutexGuard<'a, Synced>, -} - -impl<'a> AsMut for InjectGuard<'a> { - fn as_mut(&mut self) -> &mut inject::Synced { - &mut self.lock.inject - } -} - -impl<'a> Lock for &'a Handle { - type Handle = InjectGuard<'a>; - - fn lock(self) -> Self::Handle { - InjectGuard { - lock: self.shared.synced.lock(), - } - } -} - -#[track_caller] -fn with_current(f: impl FnOnce(Option<&Context>) -> R) -> R { - use scheduler::Context::MultiThread; - - context::with_scheduler(|ctx| match ctx { - Some(MultiThread(ctx)) => f(Some(ctx)), - _ => f(None), - }) -} - -// `u32::abs_diff` is not available on Tokio's MSRV. -fn abs_diff(a: u32, b: u32) -> u32 { - if a > b { - a - b - } else { - b - a - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/counters.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/counters.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/counters.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/counters.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,166 +0,0 @@ -#[cfg(tokio_internal_mt_counters)] -mod imp { - use std::sync::atomic::AtomicUsize; - use std::sync::atomic::Ordering::Relaxed; - - static NUM_MAINTENANCE: AtomicUsize = AtomicUsize::new(0); - static NUM_NOTIFY_LOCAL: AtomicUsize = AtomicUsize::new(0); - static NUM_NOTIFY_REMOTE: AtomicUsize = AtomicUsize::new(0); - static NUM_UNPARKS_LOCAL: AtomicUsize = AtomicUsize::new(0); - static NUM_UNPARKS_REMOTE: AtomicUsize = AtomicUsize::new(0); - static NUM_LIFO_SCHEDULES: AtomicUsize = AtomicUsize::new(0); - static NUM_LIFO_CAPPED: AtomicUsize = AtomicUsize::new(0); - static NUM_STEALS: AtomicUsize = AtomicUsize::new(0); - static NUM_OVERFLOW: AtomicUsize = AtomicUsize::new(0); - static NUM_PARK: AtomicUsize = AtomicUsize::new(0); - static NUM_POLLS: AtomicUsize = AtomicUsize::new(0); - static NUM_LIFO_POLLS: AtomicUsize = AtomicUsize::new(0); - static NUM_REMOTE_BATCH: AtomicUsize = AtomicUsize::new(0); - static NUM_GLOBAL_QUEUE_INTERVAL: AtomicUsize = AtomicUsize::new(0); - static NUM_NO_AVAIL_CORE: AtomicUsize = AtomicUsize::new(0); - static NUM_RELAY_SEARCH: AtomicUsize = AtomicUsize::new(0); - static NUM_SPIN_STALL: AtomicUsize = AtomicUsize::new(0); - static NUM_NO_LOCAL_WORK: AtomicUsize = AtomicUsize::new(0); - - impl Drop for super::Counters { - fn drop(&mut self) { - let notifies_local = NUM_NOTIFY_LOCAL.load(Relaxed); - let notifies_remote = NUM_NOTIFY_REMOTE.load(Relaxed); - let unparks_local = NUM_UNPARKS_LOCAL.load(Relaxed); - let unparks_remote = NUM_UNPARKS_REMOTE.load(Relaxed); - let maintenance = NUM_MAINTENANCE.load(Relaxed); - let lifo_scheds = NUM_LIFO_SCHEDULES.load(Relaxed); - let lifo_capped = NUM_LIFO_CAPPED.load(Relaxed); - let num_steals = NUM_STEALS.load(Relaxed); - let num_overflow = NUM_OVERFLOW.load(Relaxed); - let num_park = NUM_PARK.load(Relaxed); - let num_polls = NUM_POLLS.load(Relaxed); - let num_lifo_polls = NUM_LIFO_POLLS.load(Relaxed); - let num_remote_batch = NUM_REMOTE_BATCH.load(Relaxed); - let num_global_queue_interval = NUM_GLOBAL_QUEUE_INTERVAL.load(Relaxed); - let num_no_avail_core = NUM_NO_AVAIL_CORE.load(Relaxed); - let num_relay_search = NUM_RELAY_SEARCH.load(Relaxed); - let num_spin_stall = NUM_SPIN_STALL.load(Relaxed); - let num_no_local_work = NUM_NO_LOCAL_WORK.load(Relaxed); - - println!("---"); - println!("notifies (remote): {}", notifies_remote); - println!(" notifies (local): {}", notifies_local); - println!(" unparks (local): {}", unparks_local); - println!(" unparks (remote): {}", unparks_remote); - println!(" notify, no core: {}", num_no_avail_core); - println!(" maintenance: {}", maintenance); - println!(" LIFO schedules: {}", lifo_scheds); - println!(" LIFO capped: {}", lifo_capped); - println!(" steals: {}", num_steals); - println!(" queue overflows: {}", num_overflow); - println!(" parks: {}", num_park); - println!(" polls: {}", num_polls); - println!(" polls (LIFO): {}", num_lifo_polls); - println!("remote task batch: {}", num_remote_batch); - println!("global Q interval: {}", num_global_queue_interval); - println!(" relay search: {}", num_relay_search); - println!(" spin stall: {}", num_spin_stall); - println!(" no local work: {}", num_no_local_work); - } - } - - pub(crate) fn inc_num_inc_notify_local() { - NUM_NOTIFY_LOCAL.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_notify_remote() { - NUM_NOTIFY_REMOTE.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_unparks_local() { - NUM_UNPARKS_LOCAL.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_unparks_remote() { - NUM_UNPARKS_REMOTE.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_maintenance() { - NUM_MAINTENANCE.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_lifo_schedules() { - NUM_LIFO_SCHEDULES.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_lifo_capped() { - NUM_LIFO_CAPPED.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_steals() { - NUM_STEALS.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_overflows() { - NUM_OVERFLOW.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_parks() { - NUM_PARK.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_polls() { - NUM_POLLS.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_lifo_polls() { - NUM_LIFO_POLLS.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_remote_batch() { - NUM_REMOTE_BATCH.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_global_queue_interval() { - NUM_GLOBAL_QUEUE_INTERVAL.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_notify_no_core() { - NUM_NO_AVAIL_CORE.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_relay_search() { - NUM_RELAY_SEARCH.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_spin_stall() { - NUM_SPIN_STALL.fetch_add(1, Relaxed); - } - - pub(crate) fn inc_num_no_local_work() { - NUM_NO_LOCAL_WORK.fetch_add(1, Relaxed); - } -} - -#[cfg(not(tokio_internal_mt_counters))] -mod imp { - pub(crate) fn inc_num_inc_notify_local() {} - pub(crate) fn inc_num_notify_remote() {} - pub(crate) fn inc_num_unparks_local() {} - pub(crate) fn inc_num_unparks_remote() {} - pub(crate) fn inc_num_maintenance() {} - pub(crate) fn inc_lifo_schedules() {} - pub(crate) fn inc_lifo_capped() {} - pub(crate) fn inc_num_steals() {} - pub(crate) fn inc_num_overflows() {} - pub(crate) fn inc_num_parks() {} - pub(crate) fn inc_num_polls() {} - pub(crate) fn inc_num_lifo_polls() {} - pub(crate) fn inc_num_remote_batch() {} - pub(crate) fn inc_global_queue_interval() {} - pub(crate) fn inc_notify_no_core() {} - pub(crate) fn inc_num_relay_search() {} - pub(crate) fn inc_num_spin_stall() {} - pub(crate) fn inc_num_no_local_work() {} -} - -#[derive(Debug)] -pub(crate) struct Counters; - -pub(super) use imp::*; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle/metrics.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle/metrics.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle/metrics.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle/metrics.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,41 +0,0 @@ -use super::Handle; - -use crate::runtime::{SchedulerMetrics, WorkerMetrics}; - -impl Handle { - pub(crate) fn num_workers(&self) -> usize { - self.shared.worker_metrics.len() - } - - pub(crate) fn num_blocking_threads(&self) -> usize { - self.blocking_spawner.num_threads() - } - - pub(crate) fn num_idle_blocking_threads(&self) -> usize { - self.blocking_spawner.num_idle_threads() - } - - pub(crate) fn active_tasks_count(&self) -> usize { - self.shared.owned.active_tasks_count() - } - - pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { - &self.shared.scheduler_metrics - } - - pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics { - &self.shared.worker_metrics[worker] - } - - pub(crate) fn injection_queue_depth(&self) -> usize { - self.shared.injection_queue_depth() - } - - pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { - self.shared.worker_local_queue_depth(worker) - } - - pub(crate) fn blocking_queue_depth(&self) -> usize { - self.blocking_spawner.queue_depth() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle/taskdump.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle/taskdump.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle/taskdump.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle/taskdump.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -use super::Handle; - -use crate::runtime::Dump; - -impl Handle { - pub(crate) async fn dump(&self) -> Dump { - let trace_status = &self.shared.trace_status; - - // If a dump is in progress, block. - trace_status.start_trace_request(&self).await; - - let result = loop { - if let Some(result) = trace_status.take_result() { - break result; - } else { - self.notify_all(); - trace_status.result_ready.notified().await; - } - }; - - // Allow other queued dumps to proceed. - trace_status.end_trace_request(&self).await; - - result - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/handle.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,75 +0,0 @@ -use crate::future::Future; -use crate::loom::sync::Arc; -use crate::runtime::scheduler::multi_thread_alt::worker; -use crate::runtime::{ - blocking, driver, - task::{self, JoinHandle}, -}; -use crate::util::RngSeedGenerator; - -use std::fmt; - -cfg_metrics! { - mod metrics; -} - -/// Handle to the multi thread scheduler -pub(crate) struct Handle { - /// Task spawner - pub(super) shared: worker::Shared, - - /// Resource driver handles - pub(crate) driver: driver::Handle, - - /// Blocking pool spawner - pub(crate) blocking_spawner: blocking::Spawner, - - /// Current random number generator seed - pub(crate) seed_generator: RngSeedGenerator, -} - -impl Handle { - /// Spawns a future onto the thread pool - pub(crate) fn spawn(me: &Arc, future: F, id: task::Id) -> JoinHandle - where - F: crate::future::Future + Send + 'static, - F::Output: Send + 'static, - { - Self::bind_new_task(me, future, id) - } - - pub(crate) fn shutdown(&self) { - self.shared.close(self); - self.driver.unpark(); - } - - pub(super) fn bind_new_task(me: &Arc, future: T, id: task::Id) -> JoinHandle - where - T: Future + Send + 'static, - T::Output: Send + 'static, - { - let (handle, notified) = me.shared.owned.bind(future, me.clone(), id); - - if let Some(notified) = notified { - me.shared.schedule_task(notified, false); - } - - handle - } -} - -cfg_unstable! { - use std::num::NonZeroU64; - - impl Handle { - pub(crate) fn owned_id(&self) -> NonZeroU64 { - self.shared.owned.id - } - } -} - -impl fmt::Debug for Handle { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("multi_thread::Handle { ... }").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/idle.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/idle.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/idle.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/idle.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,423 +0,0 @@ -//! Coordinates idling workers - -#![allow(dead_code)] - -use crate::loom::sync::atomic::{AtomicBool, AtomicUsize}; -use crate::loom::sync::MutexGuard; -use crate::runtime::scheduler::multi_thread_alt::{worker, Core, Handle, Shared}; - -use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; - -pub(super) struct Idle { - /// Number of searching cores - num_searching: AtomicUsize, - - /// Number of idle cores - num_idle: AtomicUsize, - - /// Map of idle cores - idle_map: IdleMap, - - /// Used to catch false-negatives when waking workers - needs_searching: AtomicBool, - - /// Total number of cores - num_cores: usize, -} - -pub(super) struct IdleMap { - chunks: Vec, -} - -pub(super) struct Snapshot { - chunks: Vec, -} - -/// Data synchronized by the scheduler mutex -pub(super) struct Synced { - /// Worker IDs that are currently sleeping - sleepers: Vec, - - /// Cores available for workers - available_cores: Vec>, -} - -impl Idle { - pub(super) fn new(cores: Vec>, num_workers: usize) -> (Idle, Synced) { - let idle = Idle { - num_searching: AtomicUsize::new(0), - num_idle: AtomicUsize::new(cores.len()), - idle_map: IdleMap::new(&cores), - needs_searching: AtomicBool::new(false), - num_cores: cores.len(), - }; - - let synced = Synced { - sleepers: Vec::with_capacity(num_workers), - available_cores: cores, - }; - - (idle, synced) - } - - pub(super) fn needs_searching(&self) -> bool { - self.needs_searching.load(Acquire) - } - - pub(super) fn num_idle(&self, synced: &Synced) -> usize { - #[cfg(not(loom))] - debug_assert_eq!(synced.available_cores.len(), self.num_idle.load(Acquire)); - synced.available_cores.len() - } - - pub(super) fn num_searching(&self) -> usize { - self.num_searching.load(Acquire) - } - - pub(super) fn snapshot(&self, snapshot: &mut Snapshot) { - snapshot.update(&self.idle_map) - } - - /// Try to acquire an available core - pub(super) fn try_acquire_available_core(&self, synced: &mut Synced) -> Option> { - let ret = synced.available_cores.pop(); - - if let Some(core) = &ret { - // Decrement the number of idle cores - let num_idle = self.num_idle.load(Acquire) - 1; - debug_assert_eq!(num_idle, synced.available_cores.len()); - self.num_idle.store(num_idle, Release); - - self.idle_map.unset(core.index); - debug_assert!(self.idle_map.matches(&synced.available_cores)); - } - - ret - } - - /// We need at least one searching worker - pub(super) fn notify_local(&self, shared: &Shared) { - if self.num_searching.load(Acquire) != 0 { - // There already is a searching worker. Note, that this could be a - // false positive. However, because this method is called **from** a - // worker, we know that there is at least one worker currently - // awake, so the scheduler won't deadlock. - return; - } - - if self.num_idle.load(Acquire) == 0 { - self.needs_searching.store(true, Release); - return; - } - - // There aren't any searching workers. Try to initialize one - if self - .num_searching - .compare_exchange(0, 1, AcqRel, Acquire) - .is_err() - { - // Failing the compare_exchange means another thread concurrently - // launched a searching worker. - return; - } - - super::counters::inc_num_unparks_local(); - - // Acquire the lock - let synced = shared.synced.lock(); - self.notify_synced(synced, shared); - } - - /// Notifies a single worker - pub(super) fn notify_remote(&self, synced: MutexGuard<'_, worker::Synced>, shared: &Shared) { - if synced.idle.sleepers.is_empty() { - self.needs_searching.store(true, Release); - return; - } - - // We need to establish a stronger barrier than with `notify_local` - self.num_searching.fetch_add(1, AcqRel); - - self.notify_synced(synced, shared); - } - - /// Notify a worker while synced - fn notify_synced(&self, mut synced: MutexGuard<'_, worker::Synced>, shared: &Shared) { - // Find a sleeping worker - if let Some(worker) = synced.idle.sleepers.pop() { - // Find an available core - if let Some(mut core) = self.try_acquire_available_core(&mut synced.idle) { - debug_assert!(!core.is_searching); - core.is_searching = true; - - // Assign the core to the worker - synced.assigned_cores[worker] = Some(core); - - // Drop the lock before notifying the condvar. - drop(synced); - - super::counters::inc_num_unparks_remote(); - - // Notify the worker - shared.condvars[worker].notify_one(); - return; - } else { - synced.idle.sleepers.push(worker); - } - } - - super::counters::inc_notify_no_core(); - - // Set the `needs_searching` flag, this happens *while* the lock is held. - self.needs_searching.store(true, Release); - self.num_searching.fetch_sub(1, Release); - - // Explicit mutex guard drop to show that holding the guard to this - // point is significant. `needs_searching` and `num_searching` must be - // updated in the critical section. - drop(synced); - } - - pub(super) fn notify_mult( - &self, - synced: &mut worker::Synced, - workers: &mut Vec, - num: usize, - ) { - debug_assert!(workers.is_empty()); - - for _ in 0..num { - if let Some(worker) = synced.idle.sleepers.pop() { - // TODO: can this be switched to use next_available_core? - if let Some(core) = synced.idle.available_cores.pop() { - debug_assert!(!core.is_searching); - - self.idle_map.unset(core.index); - - synced.assigned_cores[worker] = Some(core); - - workers.push(worker); - - continue; - } else { - synced.idle.sleepers.push(worker); - } - } - - break; - } - - if !workers.is_empty() { - debug_assert!(self.idle_map.matches(&synced.idle.available_cores)); - let num_idle = synced.idle.available_cores.len(); - self.num_idle.store(num_idle, Release); - } else { - #[cfg(not(loom))] - debug_assert_eq!( - synced.idle.available_cores.len(), - self.num_idle.load(Acquire) - ); - self.needs_searching.store(true, Release); - } - } - - pub(super) fn shutdown(&self, synced: &mut worker::Synced, shared: &Shared) { - // Wake every sleeping worker and assign a core to it. There may not be - // enough sleeping workers for all cores, but other workers will - // eventually find the cores and shut them down. - while !synced.idle.sleepers.is_empty() && !synced.idle.available_cores.is_empty() { - let worker = synced.idle.sleepers.pop().unwrap(); - let core = self.try_acquire_available_core(&mut synced.idle).unwrap(); - - synced.assigned_cores[worker] = Some(core); - shared.condvars[worker].notify_one(); - } - - debug_assert!(self.idle_map.matches(&synced.idle.available_cores)); - - // Wake up any other workers - while let Some(index) = synced.idle.sleepers.pop() { - shared.condvars[index].notify_one(); - } - } - - pub(super) fn shutdown_unassigned_cores(&self, handle: &Handle, shared: &Shared) { - // If there are any remaining cores, shut them down here. - // - // This code is a bit convoluted to avoid lock-reentry. - while let Some(core) = { - let mut synced = shared.synced.lock(); - self.try_acquire_available_core(&mut synced.idle) - } { - shared.shutdown_core(handle, core); - } - } - - /// The worker releases the given core, making it available to other workers - /// that are waiting. - pub(super) fn release_core(&self, synced: &mut worker::Synced, core: Box) { - // The core should not be searching at this point - debug_assert!(!core.is_searching); - - // Check that there are no pending tasks in the global queue - debug_assert!(synced.inject.is_empty()); - - let num_idle = synced.idle.available_cores.len(); - #[cfg(not(loom))] - debug_assert_eq!(num_idle, self.num_idle.load(Acquire)); - - self.idle_map.set(core.index); - - // Store the core in the list of available cores - synced.idle.available_cores.push(core); - - debug_assert!(self.idle_map.matches(&synced.idle.available_cores)); - - // Update `num_idle` - self.num_idle.store(num_idle + 1, Release); - } - - pub(super) fn transition_worker_to_parked(&self, synced: &mut worker::Synced, index: usize) { - // Store the worker index in the list of sleepers - synced.idle.sleepers.push(index); - - // The worker's assigned core slot should be empty - debug_assert!(synced.assigned_cores[index].is_none()); - } - - pub(super) fn try_transition_worker_to_searching(&self, core: &mut Core) { - debug_assert!(!core.is_searching); - - let num_searching = self.num_searching.load(Acquire); - let num_idle = self.num_idle.load(Acquire); - - if 2 * num_searching >= self.num_cores - num_idle { - return; - } - - self.transition_worker_to_searching(core); - } - - /// Needs to happen while synchronized in order to avoid races - pub(super) fn transition_worker_to_searching_if_needed( - &self, - _synced: &mut Synced, - core: &mut Core, - ) -> bool { - if self.needs_searching.load(Acquire) { - // Needs to be called while holding the lock - self.transition_worker_to_searching(core); - true - } else { - false - } - } - - pub(super) fn transition_worker_to_searching(&self, core: &mut Core) { - core.is_searching = true; - self.num_searching.fetch_add(1, AcqRel); - self.needs_searching.store(false, Release); - } - - /// A lightweight transition from searching -> running. - /// - /// Returns `true` if this is the final searching worker. The caller - /// **must** notify a new worker. - pub(super) fn transition_worker_from_searching(&self) -> bool { - let prev = self.num_searching.fetch_sub(1, AcqRel); - debug_assert!(prev > 0); - - prev == 1 - } -} - -const BITS: usize = usize::BITS as usize; -const BIT_MASK: usize = (usize::BITS - 1) as usize; - -impl IdleMap { - fn new(cores: &[Box]) -> IdleMap { - let ret = IdleMap::new_n(num_chunks(cores.len())); - ret.set_all(cores); - - ret - } - - fn new_n(n: usize) -> IdleMap { - let chunks = (0..n).map(|_| AtomicUsize::new(0)).collect(); - IdleMap { chunks } - } - - fn set(&self, index: usize) { - let (chunk, mask) = index_to_mask(index); - let prev = self.chunks[chunk].load(Acquire); - let next = prev | mask; - self.chunks[chunk].store(next, Release); - } - - fn set_all(&self, cores: &[Box]) { - for core in cores { - self.set(core.index); - } - } - - fn unset(&self, index: usize) { - let (chunk, mask) = index_to_mask(index); - let prev = self.chunks[chunk].load(Acquire); - let next = prev & !mask; - self.chunks[chunk].store(next, Release); - } - - fn matches(&self, idle_cores: &[Box]) -> bool { - let expect = IdleMap::new_n(self.chunks.len()); - expect.set_all(idle_cores); - - for (i, chunk) in expect.chunks.iter().enumerate() { - if chunk.load(Acquire) != self.chunks[i].load(Acquire) { - return false; - } - } - - true - } -} - -impl Snapshot { - pub(crate) fn new(idle: &Idle) -> Snapshot { - let chunks = vec![0; idle.idle_map.chunks.len()]; - let mut ret = Snapshot { chunks }; - ret.update(&idle.idle_map); - ret - } - - fn update(&mut self, idle_map: &IdleMap) { - for i in 0..self.chunks.len() { - self.chunks[i] = idle_map.chunks[i].load(Acquire); - } - } - - pub(super) fn is_idle(&self, index: usize) -> bool { - let (chunk, mask) = index_to_mask(index); - debug_assert!( - chunk < self.chunks.len(), - "index={}; chunks={}", - index, - self.chunks.len() - ); - self.chunks[chunk] & mask == mask - } -} - -fn num_chunks(max_cores: usize) -> usize { - (max_cores / BITS) + 1 -} - -fn index_to_mask(index: usize) -> (usize, usize) { - let mask = 1 << (index & BIT_MASK); - let chunk = index / BITS; - - (chunk, mask) -} - -fn num_active_workers(synced: &Synced) -> usize { - synced.available_cores.capacity() - synced.available_cores.len() -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,91 +0,0 @@ -//! Multi-threaded runtime - -mod counters; -use counters::Counters; - -mod handle; -pub(crate) use handle::Handle; - -mod overflow; -pub(crate) use overflow::Overflow; - -mod idle; -use self::idle::Idle; - -mod stats; -pub(crate) use stats::Stats; - -pub(crate) mod queue; - -mod worker; -use worker::Core; -pub(crate) use worker::{Context, Shared}; - -// TODO: implement task dump -mod trace_mock; -use trace_mock::TraceStatus; - -pub(crate) use worker::block_in_place; - -use crate::runtime::{ - self, blocking, - driver::{self, Driver}, - scheduler, Config, -}; -use crate::util::RngSeedGenerator; - -use std::fmt; -use std::future::Future; - -/// Work-stealing based thread pool for executing futures. -pub(crate) struct MultiThread; - -// ===== impl MultiThread ===== - -impl MultiThread { - pub(crate) fn new( - size: usize, - driver: Driver, - driver_handle: driver::Handle, - blocking_spawner: blocking::Spawner, - seed_generator: RngSeedGenerator, - config: Config, - ) -> (MultiThread, runtime::Handle) { - let handle = worker::create( - size, - driver, - driver_handle, - blocking_spawner, - seed_generator, - config, - ); - - (MultiThread, handle) - } - - /// Blocks the current thread waiting for the future to complete. - /// - /// The future will execute on the current thread, but all spawned tasks - /// will be executed on the thread pool. - pub(crate) fn block_on(&self, handle: &scheduler::Handle, future: F) -> F::Output - where - F: Future, - { - crate::runtime::context::enter_runtime(handle, true, |blocking| { - blocking.block_on(future).expect("failed to park thread") - }) - } - - pub(crate) fn shutdown(&mut self, handle: &scheduler::Handle) { - match handle { - scheduler::Handle::MultiThreadAlt(handle) => handle.shutdown(), - _ => panic!("expected MultiThread scheduler"), - } - } -} - -impl fmt::Debug for MultiThread { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("MultiThread").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/overflow.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/overflow.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/overflow.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/overflow.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -use crate::runtime::task; - -#[cfg(test)] -use std::cell::RefCell; - -pub(crate) trait Overflow { - fn push(&self, task: task::Notified); - - fn push_batch(&self, iter: I) - where - I: Iterator>; -} - -#[cfg(test)] -impl Overflow for RefCell>> { - fn push(&self, task: task::Notified) { - self.borrow_mut().push(task); - } - - fn push_batch(&self, iter: I) - where - I: Iterator>, - { - self.borrow_mut().extend(iter); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/park.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/park.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/park.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/park.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,232 +0,0 @@ -//! Parks the runtime. -//! -//! A combination of the various resource driver park handles. - -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::{Arc, Condvar, Mutex}; -use crate::runtime::driver::{self, Driver}; -use crate::util::TryLock; - -use std::sync::atomic::Ordering::SeqCst; -use std::time::Duration; - -pub(crate) struct Parker { - inner: Arc, -} - -pub(crate) struct Unparker { - inner: Arc, -} - -struct Inner { - /// Avoids entering the park if possible - state: AtomicUsize, - - /// Used to coordinate access to the driver / condvar - mutex: Mutex<()>, - - /// Condvar to block on if the driver is unavailable. - condvar: Condvar, - - /// Resource (I/O, time, ...) driver - shared: Arc, -} - -const EMPTY: usize = 0; -const PARKED_CONDVAR: usize = 1; -const PARKED_DRIVER: usize = 2; -const NOTIFIED: usize = 3; - -/// Shared across multiple Parker handles -struct Shared { - /// Shared driver. Only one thread at a time can use this - driver: TryLock, -} - -impl Parker { - pub(crate) fn new(driver: Driver) -> Parker { - Parker { - inner: Arc::new(Inner { - state: AtomicUsize::new(EMPTY), - mutex: Mutex::new(()), - condvar: Condvar::new(), - shared: Arc::new(Shared { - driver: TryLock::new(driver), - }), - }), - } - } - - pub(crate) fn unpark(&self) -> Unparker { - Unparker { - inner: self.inner.clone(), - } - } - - pub(crate) fn park(&mut self, handle: &driver::Handle) { - self.inner.park(handle); - } - - pub(crate) fn park_timeout(&mut self, handle: &driver::Handle, duration: Duration) { - // Only parking with zero is supported... - assert_eq!(duration, Duration::from_millis(0)); - - if let Some(mut driver) = self.inner.shared.driver.try_lock() { - driver.park_timeout(handle, duration) - } - } - - pub(crate) fn shutdown(&mut self, handle: &driver::Handle) { - self.inner.shutdown(handle); - } -} - -impl Clone for Parker { - fn clone(&self) -> Parker { - Parker { - inner: Arc::new(Inner { - state: AtomicUsize::new(EMPTY), - mutex: Mutex::new(()), - condvar: Condvar::new(), - shared: self.inner.shared.clone(), - }), - } - } -} - -impl Unparker { - pub(crate) fn unpark(&self, driver: &driver::Handle) { - self.inner.unpark(driver); - } -} - -impl Inner { - /// Parks the current thread for at most `dur`. - fn park(&self, handle: &driver::Handle) { - // If we were previously notified then we consume this notification and - // return quickly. - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - return; - } - - if let Some(mut driver) = self.shared.driver.try_lock() { - self.park_driver(&mut driver, handle); - } else { - self.park_condvar(); - } - } - - fn park_condvar(&self) { - // Otherwise we need to coordinate going to sleep - let mut m = self.mutex.lock(); - - match self - .state - .compare_exchange(EMPTY, PARKED_CONDVAR, SeqCst, SeqCst) - { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read here, even though we know it will be `NOTIFIED`. - // This is because `unpark` may have been called again since we read - // `NOTIFIED` in the `compare_exchange` above. We must perform an - // acquire operation that synchronizes with that `unpark` to observe - // any writes it made before the call to unpark. To do that we must - // read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); - debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - - return; - } - Err(actual) => panic!("inconsistent park state; actual = {}", actual), - } - - loop { - m = self.condvar.wait(m).unwrap(); - - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - // got a notification - return; - } - - // spurious wakeup, go back to sleep - } - } - - fn park_driver(&self, driver: &mut Driver, handle: &driver::Handle) { - match self - .state - .compare_exchange(EMPTY, PARKED_DRIVER, SeqCst, SeqCst) - { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read here, even though we know it will be `NOTIFIED`. - // This is because `unpark` may have been called again since we read - // `NOTIFIED` in the `compare_exchange` above. We must perform an - // acquire operation that synchronizes with that `unpark` to observe - // any writes it made before the call to unpark. To do that we must - // read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); - debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - - return; - } - Err(actual) => panic!("inconsistent park state; actual = {}", actual), - } - - driver.park(handle); - - match self.state.swap(EMPTY, SeqCst) { - NOTIFIED => {} // got a notification, hurray! - PARKED_DRIVER => {} // no notification, alas - n => panic!("inconsistent park_timeout state: {}", n), - } - } - - fn unpark(&self, driver: &driver::Handle) { - // To ensure the unparked thread will observe any writes we made before - // this call, we must perform a release operation that `park` can - // synchronize with. To do that we must write `NOTIFIED` even if `state` - // is already `NOTIFIED`. That is why this must be a swap rather than a - // compare-and-swap that returns if it reads `NOTIFIED` on failure. - match self.state.swap(NOTIFIED, SeqCst) { - EMPTY => {} // no one was waiting - NOTIFIED => {} // already unparked - PARKED_CONDVAR => self.unpark_condvar(), - PARKED_DRIVER => driver.unpark(), - actual => panic!("inconsistent state in unpark; actual = {}", actual), - } - } - - fn unpark_condvar(&self) { - // There is a period between when the parked thread sets `state` to - // `PARKED` (or last checked `state` in the case of a spurious wake - // up) and when it actually waits on `cvar`. If we were to notify - // during this period it would be ignored and then when the parked - // thread went to sleep it would never wake up. Fortunately, it has - // `lock` locked at this stage so we can acquire `lock` to wait until - // it is ready to receive the notification. - // - // Releasing `lock` before the call to `notify_one` means that when the - // parked thread wakes it doesn't get woken only to have to wait for us - // to release `lock`. - drop(self.mutex.lock()); - - self.condvar.notify_one() - } - - fn shutdown(&self, handle: &driver::Handle) { - if let Some(mut driver) = self.shared.driver.try_lock() { - driver.shutdown(handle); - } - - self.condvar.notify_all(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,595 +0,0 @@ -//! Run-queue structures to support a work-stealing scheduler - -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::Arc; -use crate::runtime::scheduler::multi_thread_alt::{Overflow, Stats}; -use crate::runtime::task; - -use std::mem::{self, MaybeUninit}; -use std::ptr; -use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; - -// Use wider integers when possible to increase ABA resilience. -// -// See issue #5041: . -cfg_has_atomic_u64! { - type UnsignedShort = u32; - type UnsignedLong = u64; - type AtomicUnsignedShort = crate::loom::sync::atomic::AtomicU32; - type AtomicUnsignedLong = crate::loom::sync::atomic::AtomicU64; -} -cfg_not_has_atomic_u64! { - type UnsignedShort = u16; - type UnsignedLong = u32; - type AtomicUnsignedShort = crate::loom::sync::atomic::AtomicU16; - type AtomicUnsignedLong = crate::loom::sync::atomic::AtomicU32; -} - -/// Producer handle. May only be used from a single thread. -pub(crate) struct Local { - inner: Arc>, -} - -/// Consumer handle. May be used from many threads. -pub(crate) struct Steal(Arc>); - -#[repr(align(128))] -pub(crate) struct Inner { - /// Concurrently updated by many threads. - /// - /// Contains two `UnsignedShort` values. The LSB byte is the "real" head of - /// the queue. The `UnsignedShort` in the MSB is set by a stealer in process - /// of stealing values. It represents the first value being stolen in the - /// batch. The `UnsignedShort` indices are intentionally wider than strictly - /// required for buffer indexing in order to provide ABA mitigation and make - /// it possible to distinguish between full and empty buffers. - /// - /// When both `UnsignedShort` values are the same, there is no active - /// stealer. - /// - /// Tracking an in-progress stealer prevents a wrapping scenario. - head: AtomicUnsignedLong, - - /// Only updated by producer thread but read by many threads. - tail: AtomicUnsignedShort, - - /// Elements - buffer: Box<[UnsafeCell>>]>, - - mask: usize, -} - -unsafe impl Send for Inner {} -unsafe impl Sync for Inner {} - -/// Create a new local run-queue -pub(crate) fn local(capacity: usize) -> (Steal, Local) { - assert!(capacity <= 4096); - assert!(capacity >= 1); - - let mut buffer = Vec::with_capacity(capacity); - - for _ in 0..capacity { - buffer.push(UnsafeCell::new(MaybeUninit::uninit())); - } - - let inner = Arc::new(Inner { - head: AtomicUnsignedLong::new(0), - tail: AtomicUnsignedShort::new(0), - buffer: buffer.into_boxed_slice(), - mask: capacity - 1, - }); - - let local = Local { - inner: inner.clone(), - }; - - let remote = Steal(inner); - - (remote, local) -} - -impl Local { - /// How many tasks can be pushed into the queue - pub(crate) fn remaining_slots(&self) -> usize { - self.inner.remaining_slots() - } - - pub(crate) fn max_capacity(&self) -> usize { - self.inner.buffer.len() - } - - /// Returns `true` if there are no entries in the queue - pub(crate) fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - pub(crate) fn can_steal(&self) -> bool { - self.remaining_slots() >= self.max_capacity() - self.max_capacity() / 2 - } - - /// Pushes a batch of tasks to the back of the queue. All tasks must fit in - /// the local queue. - /// - /// # Panics - /// - /// The method panics if there is not enough capacity to fit in the queue. - pub(crate) fn push_back(&mut self, tasks: impl ExactSizeIterator>) { - let len = tasks.len(); - assert!(len <= self.inner.buffer.len()); - - if len == 0 { - // Nothing to do - return; - } - - let head = self.inner.head.load(Acquire); - let (steal, real) = unpack(head); - - // safety: this is the **only** thread that updates this cell. - let mut tail = unsafe { self.inner.tail.unsync_load() }; - - if tail.wrapping_sub(steal) <= (self.inner.buffer.len() - len) as UnsignedShort { - // Yes, this if condition is structured a bit weird (first block - // does nothing, second returns an error). It is this way to match - // `push_back_or_overflow`. - } else { - panic!( - "not enough capacity; len={}; tail={}; steal={}; real={}", - len, tail, steal, real - ); - } - - for task in tasks { - let idx = tail as usize & self.inner.mask; - - self.inner.buffer[idx].with_mut(|ptr| { - // Write the task to the slot - // - // Safety: There is only one producer and the above `if` - // condition ensures we don't touch a cell if there is a - // value, thus no consumer. - unsafe { - ptr::write((*ptr).as_mut_ptr(), task); - } - }); - - tail = tail.wrapping_add(1); - } - - self.inner.tail.store(tail, Release); - } - - /// Pushes a task to the back of the local queue, if there is not enough - /// capacity in the queue, this triggers the overflow operation. - /// - /// When the queue overflows, half of the curent contents of the queue is - /// moved to the given Injection queue. This frees up capacity for more - /// tasks to be pushed into the local queue. - pub(crate) fn push_back_or_overflow>( - &mut self, - mut task: task::Notified, - overflow: &O, - stats: &mut Stats, - ) { - let tail = loop { - let head = self.inner.head.load(Acquire); - let (steal, real) = unpack(head); - - // safety: this is the **only** thread that updates this cell. - let tail = unsafe { self.inner.tail.unsync_load() }; - - if tail.wrapping_sub(steal) < self.inner.buffer.len() as UnsignedShort { - // There is capacity for the task - break tail; - } else if steal != real { - super::counters::inc_num_overflows(); - // Concurrently stealing, this will free up capacity, so only - // push the task onto the inject queue - overflow.push(task); - return; - } else { - super::counters::inc_num_overflows(); - // Push the current task and half of the queue into the - // inject queue. - match self.push_overflow(task, real, tail, overflow, stats) { - Ok(_) => return, - // Lost the race, try again - Err(v) => { - task = v; - } - } - } - }; - - self.push_back_finish(task, tail); - } - - // Second half of `push_back` - fn push_back_finish(&self, task: task::Notified, tail: UnsignedShort) { - // Map the position to a slot index. - let idx = tail as usize & self.inner.mask; - - self.inner.buffer[idx].with_mut(|ptr| { - // Write the task to the slot - // - // Safety: There is only one producer and the above `if` - // condition ensures we don't touch a cell if there is a - // value, thus no consumer. - unsafe { - ptr::write((*ptr).as_mut_ptr(), task); - } - }); - - // Make the task available. Synchronizes with a load in - // `steal_into2`. - self.inner.tail.store(tail.wrapping_add(1), Release); - } - - /// Moves a batch of tasks into the inject queue. - /// - /// This will temporarily make some of the tasks unavailable to stealers. - /// Once `push_overflow` is done, a notification is sent out, so if other - /// workers "missed" some of the tasks during a steal, they will get - /// another opportunity. - #[inline(never)] - fn push_overflow>( - &mut self, - task: task::Notified, - head: UnsignedShort, - tail: UnsignedShort, - overflow: &O, - stats: &mut Stats, - ) -> Result<(), task::Notified> { - // How many elements are we taking from the local queue. - // - // This is one less than the number of tasks pushed to the inject - // queue as we are also inserting the `task` argument. - let num_tasks_taken: UnsignedShort = (self.inner.buffer.len() / 2) as UnsignedShort; - - assert_eq!( - tail.wrapping_sub(head) as usize, - self.inner.buffer.len(), - "queue is not full; tail = {}; head = {}", - tail, - head - ); - - let prev = pack(head, head); - - // Claim a bunch of tasks - // - // We are claiming the tasks **before** reading them out of the buffer. - // This is safe because only the **current** thread is able to push new - // tasks. - // - // There isn't really any need for memory ordering... Relaxed would - // work. This is because all tasks are pushed into the queue from the - // current thread (or memory has been acquired if the local queue handle - // moved). - if self - .inner - .head - .compare_exchange( - prev, - pack( - head.wrapping_add(num_tasks_taken), - head.wrapping_add(num_tasks_taken), - ), - Release, - Relaxed, - ) - .is_err() - { - // We failed to claim the tasks, losing the race. Return out of - // this function and try the full `push` routine again. The queue - // may not be full anymore. - return Err(task); - } - - /// An iterator that takes elements out of the run queue. - struct BatchTaskIter<'a, T: 'static> { - buffer: &'a [UnsafeCell>>], - mask: usize, - head: UnsignedLong, - i: UnsignedLong, - num: UnsignedShort, - } - impl<'a, T: 'static> Iterator for BatchTaskIter<'a, T> { - type Item = task::Notified; - - #[inline] - fn next(&mut self) -> Option> { - if self.i == UnsignedLong::from(self.num) { - None - } else { - let i_idx = self.i.wrapping_add(self.head) as usize & self.mask; - let slot = &self.buffer[i_idx]; - - // safety: Our CAS from before has assumed exclusive ownership - // of the task pointers in this range. - let task = slot.with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); - - self.i += 1; - Some(task) - } - } - } - - // safety: The CAS above ensures that no consumer will look at these - // values again, and we are the only producer. - let batch_iter = BatchTaskIter { - buffer: &self.inner.buffer, - mask: self.inner.mask, - head: head as UnsignedLong, - i: 0, - num: num_tasks_taken, - }; - overflow.push_batch(batch_iter.chain(std::iter::once(task))); - - // Add 1 to factor in the task currently being scheduled. - stats.incr_overflow_count(); - - Ok(()) - } - - /// Pops a task from the local queue. - pub(crate) fn pop(&mut self) -> Option> { - let mut head = self.inner.head.load(Acquire); - - let idx = loop { - let (steal, real) = unpack(head); - - // safety: this is the **only** thread that updates this cell. - let tail = unsafe { self.inner.tail.unsync_load() }; - - if real == tail { - // queue is empty - return None; - } - - let next_real = real.wrapping_add(1); - - // If `steal == real` there are no concurrent stealers. Both `steal` - // and `real` are updated. - let next = if steal == real { - pack(next_real, next_real) - } else { - assert_ne!(steal, next_real); - pack(steal, next_real) - }; - - // Attempt to claim a task. - let res = self - .inner - .head - .compare_exchange(head, next, AcqRel, Acquire); - - match res { - Ok(_) => break real as usize & self.inner.mask, - Err(actual) => head = actual, - } - }; - - Some(self.inner.buffer[idx].with(|ptr| unsafe { ptr::read(ptr).assume_init() })) - } -} - -impl Steal { - pub(crate) fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Steals half the tasks from self and place them into `dst`. - pub(crate) fn steal_into( - &self, - dst: &mut Local, - dst_stats: &mut Stats, - ) -> Option> { - // Safety: the caller is the only thread that mutates `dst.tail` and - // holds a mutable reference. - let dst_tail = unsafe { dst.inner.tail.unsync_load() }; - - // To the caller, `dst` may **look** empty but still have values - // contained in the buffer. If another thread is concurrently stealing - // from `dst` there may not be enough capacity to steal. - let (steal, _) = unpack(dst.inner.head.load(Acquire)); - - if dst_tail.wrapping_sub(steal) > self.0.buffer.len() as UnsignedShort / 2 { - // we *could* try to steal less here, but for simplicity, we're just - // going to abort. - return None; - } - - // Steal the tasks into `dst`'s buffer. This does not yet expose the - // tasks in `dst`. - let mut n = self.steal_into2(dst, dst_tail); - - if n == 0 { - // No tasks were stolen - return None; - } - - super::counters::inc_num_steals(); - - dst_stats.incr_steal_count(n as u16); - dst_stats.incr_steal_operations(); - - // We are returning a task here - n -= 1; - - let ret_pos = dst_tail.wrapping_add(n); - let ret_idx = ret_pos as usize & dst.inner.mask; - - // safety: the value was written as part of `steal_into2` and not - // exposed to stealers, so no other thread can access it. - let ret = dst.inner.buffer[ret_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); - - if n == 0 { - // The `dst` queue is empty, but a single task was stolen - return Some(ret); - } - - // Make the stolen items available to consumers - dst.inner.tail.store(dst_tail.wrapping_add(n), Release); - - Some(ret) - } - - // Steal tasks from `self`, placing them into `dst`. Returns the number of - // tasks that were stolen. - fn steal_into2(&self, dst: &mut Local, dst_tail: UnsignedShort) -> UnsignedShort { - let mut prev_packed = self.0.head.load(Acquire); - let mut next_packed; - - let n = loop { - let (src_head_steal, src_head_real) = unpack(prev_packed); - let src_tail = self.0.tail.load(Acquire); - - // If these two do not match, another thread is concurrently - // stealing from the queue. - if src_head_steal != src_head_real { - return 0; - } - - // Number of available tasks to steal - let n = src_tail.wrapping_sub(src_head_real); - let n = n - n / 2; - - if n == 0 { - // No tasks available to steal - return 0; - } - - // Update the real head index to acquire the tasks. - let steal_to = src_head_real.wrapping_add(n); - assert_ne!(src_head_steal, steal_to); - next_packed = pack(src_head_steal, steal_to); - - // Claim all those tasks. This is done by incrementing the "real" - // head but not the steal. By doing this, no other thread is able to - // steal from this queue until the current thread completes. - let res = self - .0 - .head - .compare_exchange(prev_packed, next_packed, AcqRel, Acquire); - - match res { - Ok(_) => break n, - Err(actual) => prev_packed = actual, - } - }; - - debug_assert!( - n <= (self.0.buffer.len() - self.0.buffer.len() / 2) as UnsignedShort, - "actual = {}", - n - ); - - let (first, _) = unpack(next_packed); - - // Take all the tasks - for i in 0..n { - // Compute the positions - let src_pos = first.wrapping_add(i); - let dst_pos = dst_tail.wrapping_add(i); - - // Map to slots - let src_idx = src_pos as usize & self.0.mask; - let dst_idx = dst_pos as usize & self.0.mask; - - // Read the task - // - // safety: We acquired the task with the atomic exchange above. - let task = self.0.buffer[src_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); - - // Write the task to the new slot - // - // safety: `dst` queue is empty and we are the only producer to - // this queue. - dst.inner.buffer[dst_idx] - .with_mut(|ptr| unsafe { ptr::write((*ptr).as_mut_ptr(), task) }); - } - - let mut prev_packed = next_packed; - - // Update `src_head_steal` to match `src_head_real` signalling that the - // stealing routine is complete. - loop { - let head = unpack(prev_packed).1; - next_packed = pack(head, head); - - let res = self - .0 - .head - .compare_exchange(prev_packed, next_packed, AcqRel, Acquire); - - match res { - Ok(_) => return n, - Err(actual) => { - let (actual_steal, actual_real) = unpack(actual); - - assert_ne!(actual_steal, actual_real); - - prev_packed = actual; - } - } - } - } -} - -cfg_metrics! { - impl Steal { - pub(crate) fn len(&self) -> usize { - self.0.len() as _ - } - } -} - -impl Clone for Steal { - fn clone(&self) -> Steal { - Steal(self.0.clone()) - } -} - -impl Drop for Local { - fn drop(&mut self) { - if !std::thread::panicking() { - assert!(self.pop().is_none(), "queue not empty"); - } - } -} - -impl Inner { - fn remaining_slots(&self) -> usize { - let (steal, _) = unpack(self.head.load(Acquire)); - let tail = self.tail.load(Acquire); - - self.buffer.len() - (tail.wrapping_sub(steal) as usize) - } - - fn len(&self) -> UnsignedShort { - let (_, head) = unpack(self.head.load(Acquire)); - let tail = self.tail.load(Acquire); - - tail.wrapping_sub(head) - } - - fn is_empty(&self) -> bool { - self.len() == 0 - } -} - -/// Split the head value into the real head and the index a stealer is working -/// on. -fn unpack(n: UnsignedLong) -> (UnsignedShort, UnsignedShort) { - let real = n & UnsignedShort::MAX as UnsignedLong; - let steal = n >> (mem::size_of::() * 8); - - (steal as UnsignedShort, real as UnsignedShort) -} - -/// Join the two head values -fn pack(steal: UnsignedShort, real: UnsignedShort) -> UnsignedLong { - (real as UnsignedLong) | ((steal as UnsignedLong) << (mem::size_of::() * 8)) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,171 +0,0 @@ -use crate::runtime::{Config, MetricsBatch, WorkerMetrics}; - -use std::cmp; -use std::time::{Duration, Instant}; - -/// Per-worker statistics. This is used for both tuning the scheduler and -/// reporting runtime-level metrics/stats. -pub(crate) struct Stats { - /// The metrics batch used to report runtime-level metrics/stats to the - /// user. - batch: MetricsBatch, - - /// Exponentially-weighted moving average of time spent polling scheduled a - /// task. - /// - /// Tracked in nanoseconds, stored as a f64 since that is what we use with - /// the EWMA calculations - task_poll_time_ewma: f64, -} - -/// Transient state -pub(crate) struct Ephemeral { - /// Instant at which work last resumed (continued after park). - /// - /// This duplicates the value stored in `MetricsBatch`. We will unify - /// `Stats` and `MetricsBatch` when we stabilize metrics. - processing_scheduled_tasks_started_at: Instant, - - /// Number of tasks polled in the batch of scheduled tasks - tasks_polled_in_batch: usize, - - /// Used to ensure calls to start / stop batch are paired - #[cfg(debug_assertions)] - batch_started: bool, -} - -impl Ephemeral { - pub(crate) fn new() -> Ephemeral { - Ephemeral { - processing_scheduled_tasks_started_at: Instant::now(), - tasks_polled_in_batch: 0, - #[cfg(debug_assertions)] - batch_started: false, - } - } -} - -/// How to weigh each individual poll time, value is plucked from thin air. -const TASK_POLL_TIME_EWMA_ALPHA: f64 = 0.1; - -/// Ideally, we wouldn't go above this, value is plucked from thin air. -const TARGET_GLOBAL_QUEUE_INTERVAL: f64 = Duration::from_micros(200).as_nanos() as f64; - -/// Max value for the global queue interval. This is 2x the previous default -const MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 127; - -/// This is the previous default -const TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 61; - -impl Stats { - pub(crate) const DEFAULT_GLOBAL_QUEUE_INTERVAL: u32 = - TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL; - - pub(crate) fn new(worker_metrics: &WorkerMetrics) -> Stats { - // Seed the value with what we hope to see. - let task_poll_time_ewma = - TARGET_GLOBAL_QUEUE_INTERVAL / TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL as f64; - - Stats { - batch: MetricsBatch::new(worker_metrics), - task_poll_time_ewma, - } - } - - pub(crate) fn tuned_global_queue_interval(&self, config: &Config) -> u32 { - // If an interval is explicitly set, don't tune. - if let Some(configured) = config.global_queue_interval { - return configured; - } - - // As of Rust 1.45, casts from f64 -> u32 are saturating, which is fine here. - let tasks_per_interval = (TARGET_GLOBAL_QUEUE_INTERVAL / self.task_poll_time_ewma) as u32; - - cmp::max( - // We don't want to return less than 2 as that would result in the - // global queue always getting checked first. - 2, - cmp::min( - MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL, - tasks_per_interval, - ), - ) - } - - pub(crate) fn submit(&mut self, to: &WorkerMetrics) { - self.batch.submit(to, self.task_poll_time_ewma as u64); - } - - pub(crate) fn about_to_park(&mut self) { - self.batch.about_to_park(); - } - - pub(crate) fn inc_local_schedule_count(&mut self) { - self.batch.inc_local_schedule_count(); - } - - pub(crate) fn start_processing_scheduled_tasks(&mut self, ephemeral: &mut Ephemeral) { - self.batch.start_processing_scheduled_tasks(); - - #[cfg(debug_assertions)] - { - debug_assert!(!ephemeral.batch_started); - ephemeral.batch_started = true; - } - - ephemeral.processing_scheduled_tasks_started_at = Instant::now(); - ephemeral.tasks_polled_in_batch = 0; - } - - pub(crate) fn end_processing_scheduled_tasks(&mut self, ephemeral: &mut Ephemeral) { - self.batch.end_processing_scheduled_tasks(); - - #[cfg(debug_assertions)] - { - debug_assert!(ephemeral.batch_started); - ephemeral.batch_started = false; - } - - // Update the EWMA task poll time - if ephemeral.tasks_polled_in_batch > 0 { - let now = Instant::now(); - - // If we "overflow" this conversion, we have bigger problems than - // slightly off stats. - let elapsed = (now - ephemeral.processing_scheduled_tasks_started_at).as_nanos() as f64; - let num_polls = ephemeral.tasks_polled_in_batch as f64; - - // Calculate the mean poll duration for a single task in the batch - let mean_poll_duration = elapsed / num_polls; - - // Compute the alpha weighted by the number of tasks polled this batch. - let weighted_alpha = 1.0 - (1.0 - TASK_POLL_TIME_EWMA_ALPHA).powf(num_polls); - - // Now compute the new weighted average task poll time. - self.task_poll_time_ewma = weighted_alpha * mean_poll_duration - + (1.0 - weighted_alpha) * self.task_poll_time_ewma; - } - } - - pub(crate) fn start_poll(&mut self, ephemeral: &mut Ephemeral) { - self.batch.start_poll(); - - ephemeral.tasks_polled_in_batch += 1; - } - - pub(crate) fn end_poll(&mut self) { - self.batch.end_poll(); - } - - pub(crate) fn incr_steal_count(&mut self, by: u16) { - self.batch.incr_steal_count(by); - } - - pub(crate) fn incr_steal_operations(&mut self) { - self.batch.incr_steal_operations(); - } - - pub(crate) fn incr_overflow_count(&mut self) { - self.batch.incr_overflow_count(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/trace_mock.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/trace_mock.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/trace_mock.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/trace_mock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -pub(super) struct TraceStatus {} - -impl TraceStatus { - pub(super) fn new(_: usize) -> Self { - Self {} - } - - pub(super) fn trace_requested(&self) -> bool { - false - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/trace.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/trace.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/trace.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/trace.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,61 +0,0 @@ -use crate::loom::sync::atomic::{AtomicBool, Ordering}; -use crate::loom::sync::{Barrier, Mutex}; -use crate::runtime::dump::Dump; -use crate::runtime::scheduler::multi_thread_alt::Handle; -use crate::sync::notify::Notify; - -/// Tracing status of the worker. -pub(super) struct TraceStatus { - pub(super) trace_requested: AtomicBool, - pub(super) trace_start: Barrier, - pub(super) trace_end: Barrier, - pub(super) result_ready: Notify, - pub(super) trace_result: Mutex>, -} - -impl TraceStatus { - pub(super) fn new(remotes_len: usize) -> Self { - Self { - trace_requested: AtomicBool::new(false), - trace_start: Barrier::new(remotes_len), - trace_end: Barrier::new(remotes_len), - result_ready: Notify::new(), - trace_result: Mutex::new(None), - } - } - - pub(super) fn trace_requested(&self) -> bool { - self.trace_requested.load(Ordering::Relaxed) - } - - pub(super) async fn start_trace_request(&self, handle: &Handle) { - while self - .trace_requested - .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) - .is_err() - { - handle.notify_all(); - crate::task::yield_now().await; - } - } - - pub(super) fn stash_result(&self, dump: Dump) { - let _ = self.trace_result.lock().insert(dump); - self.result_ready.notify_one(); - } - - pub(super) fn take_result(&self) -> Option { - self.trace_result.lock().take() - } - - pub(super) async fn end_trace_request(&self, handle: &Handle) { - while self - .trace_requested - .compare_exchange(true, false, Ordering::Acquire, Ordering::Relaxed) - .is_err() - { - handle.notify_all(); - crate::task::yield_now().await; - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/metrics.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/metrics.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/metrics.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/metrics.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -use super::Shared; - -impl Shared { - pub(crate) fn injection_queue_depth(&self) -> usize { - self.inject.len() - } - - pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { - self.remotes[worker].steal.len() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/taskdump_mock.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/taskdump_mock.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/taskdump_mock.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/taskdump_mock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,7 +0,0 @@ -use super::{Core, Handle}; - -impl Handle { - pub(super) fn trace_core(&self, core: Box) -> Box { - core - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/taskdump.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/taskdump.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/taskdump.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker/taskdump.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,79 +0,0 @@ -use super::{Core, Handle, Shared}; - -use crate::loom::sync::Arc; -use crate::runtime::scheduler::multi_thread_alt::Stats; -use crate::runtime::task::trace::trace_multi_thread; -use crate::runtime::{dump, WorkerMetrics}; - -use std::time::Duration; - -impl Handle { - pub(super) fn trace_core(&self, mut core: Box) -> Box { - core.is_traced = false; - - if core.is_shutdown { - return core; - } - - // wait for other workers, or timeout without tracing - let timeout = Duration::from_millis(250); // a _very_ generous timeout - let barrier = - if let Some(barrier) = self.shared.trace_status.trace_start.wait_timeout(timeout) { - barrier - } else { - // don't attempt to trace - return core; - }; - - if !barrier.is_leader() { - // wait for leader to finish tracing - self.shared.trace_status.trace_end.wait(); - return core; - } - - // trace - - let owned = &self.shared.owned; - let mut local = self.shared.steal_all(); - let synced = &self.shared.synced; - let injection = &self.shared.inject; - - // safety: `trace_multi_thread` is invoked with the same `synced` that `injection` - // was created with. - let traces = unsafe { trace_multi_thread(owned, &mut local, synced, injection) } - .into_iter() - .map(dump::Task::new) - .collect(); - - let result = dump::Dump::new(traces); - - // stash the result - self.shared.trace_status.stash_result(result); - - // allow other workers to proceed - self.shared.trace_status.trace_end.wait(); - - core - } -} - -impl Shared { - /// Steal all tasks from remotes into a single local queue. - pub(super) fn steal_all(&self) -> super::queue::Local> { - let (_steal, mut local) = super::queue::local(); - - let worker_metrics = WorkerMetrics::new(); - let mut stats = Stats::new(&worker_metrics); - - for remote in self.remotes.iter() { - let steal = &remote.steal; - while !steal.is_empty() { - if let Some(task) = steal.steal_into(&mut local, &mut stats) { - local.push_back([task].into_iter()); - } - } - } - - local - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1595 +0,0 @@ -//! A scheduler is initialized with a fixed number of workers. Each worker is -//! driven by a thread. Each worker has a "core" which contains data such as the -//! run queue and other state. When `block_in_place` is called, the worker's -//! "core" is handed off to a new thread allowing the scheduler to continue to -//! make progress while the originating thread blocks. -//! -//! # Shutdown -//! -//! Shutting down the runtime involves the following steps: -//! -//! 1. The Shared::close method is called. This closes the inject queue and -//! OwnedTasks instance and wakes up all worker threads. -//! -//! 2. Each worker thread observes the close signal next time it runs -//! Core::maintenance by checking whether the inject queue is closed. -//! The Core::is_shutdown flag is set to true. -//! -//! 3. The worker thread calls `pre_shutdown` in parallel. Here, the worker -//! will keep removing tasks from OwnedTasks until it is empty. No new -//! tasks can be pushed to the OwnedTasks during or after this step as it -//! was closed in step 1. -//! -//! 5. The workers call Shared::shutdown to enter the single-threaded phase of -//! shutdown. These calls will push their core to Shared::shutdown_cores, -//! and the last thread to push its core will finish the shutdown procedure. -//! -//! 6. The local run queue of each core is emptied, then the inject queue is -//! emptied. -//! -//! At this point, shutdown has completed. It is not possible for any of the -//! collections to contain any tasks at this point, as each collection was -//! closed first, then emptied afterwards. -//! -//! ## Spawns during shutdown -//! -//! When spawning tasks during shutdown, there are two cases: -//! -//! * The spawner observes the OwnedTasks being open, and the inject queue is -//! closed. -//! * The spawner observes the OwnedTasks being closed and doesn't check the -//! inject queue. -//! -//! The first case can only happen if the OwnedTasks::bind call happens before -//! or during step 1 of shutdown. In this case, the runtime will clean up the -//! task in step 3 of shutdown. -//! -//! In the latter case, the task was not spawned and the task is immediately -//! cancelled by the spawner. -//! -//! The correctness of shutdown requires both the inject queue and OwnedTasks -//! collection to have a closed bit. With a close bit on only the inject queue, -//! spawning could run in to a situation where a task is successfully bound long -//! after the runtime has shut down. With a close bit on only the OwnedTasks, -//! the first spawning situation could result in the notification being pushed -//! to the inject queue after step 6 of shutdown, which would leave a task in -//! the inject queue indefinitely. This would be a ref-count cycle and a memory -//! leak. - -use crate::loom::sync::{Arc, Condvar, Mutex, MutexGuard}; -use crate::runtime; -use crate::runtime::context; -use crate::runtime::driver::Driver; -use crate::runtime::scheduler::multi_thread_alt::{ - idle, queue, stats, Counters, Handle, Idle, Overflow, Stats, TraceStatus, -}; -use crate::runtime::scheduler::{self, inject, Lock}; -use crate::runtime::task::OwnedTasks; -use crate::runtime::{blocking, coop, driver, task, Config, SchedulerMetrics, WorkerMetrics}; -use crate::util::atomic_cell::AtomicCell; -use crate::util::rand::{FastRand, RngSeedGenerator}; - -use std::cell::{Cell, RefCell}; -use std::cmp; -use std::task::Waker; -use std::time::Duration; - -cfg_metrics! { - mod metrics; -} - -mod taskdump_mock; - -/// A scheduler worker -/// -/// Data is stack-allocated and never migrates threads -pub(super) struct Worker { - /// Used to schedule bookkeeping tasks every so often. - tick: u32, - - /// True if the scheduler is being shutdown - pub(super) is_shutdown: bool, - - /// True if the scheduler is being traced - is_traced: bool, - - /// Counter used to track when to poll from the local queue vs. the - /// injection queue - num_seq_local_queue_polls: u32, - - /// How often to check the global queue - global_queue_interval: u32, - - /// Used to collect a list of workers to notify - workers_to_notify: Vec, - - /// Snapshot of idle core list. This helps speedup stealing - idle_snapshot: idle::Snapshot, - - stats: stats::Ephemeral, -} - -/// Core data -/// -/// Data is heap-allocated and migrates threads. -#[repr(align(128))] -pub(super) struct Core { - /// Index holding this core's remote/shared state. - pub(super) index: usize, - - lifo_slot: Option, - - /// The worker-local run queue. - run_queue: queue::Local>, - - /// True if the worker is currently searching for more work. Searching - /// involves attempting to steal from other workers. - pub(super) is_searching: bool, - - /// Per-worker runtime stats - stats: Stats, - - /// Fast random number generator. - rand: FastRand, -} - -/// State shared across all workers -pub(crate) struct Shared { - /// Per-core remote state. - remotes: Box<[Remote]>, - - /// Global task queue used for: - /// 1. Submit work to the scheduler while **not** currently on a worker thread. - /// 2. Submit work to the scheduler when a worker run queue is saturated - pub(super) inject: inject::Shared>, - - /// Coordinates idle workers - idle: Idle, - - /// Collection of all active tasks spawned onto this executor. - pub(super) owned: OwnedTasks>, - - /// Data synchronized by the scheduler mutex - pub(super) synced: Mutex, - - /// Power's Tokio's I/O, timers, etc... the responsibility of polling the - /// driver is shared across workers. - driver: AtomicCell, - - /// Condition variables used to unblock worker threads. Each worker thread - /// has its own condvar it waits on. - pub(super) condvars: Vec, - - /// The number of cores that have observed the trace signal. - pub(super) trace_status: TraceStatus, - - /// Scheduler configuration options - config: Config, - - /// Collects metrics from the runtime. - pub(super) scheduler_metrics: SchedulerMetrics, - - pub(super) worker_metrics: Box<[WorkerMetrics]>, - - /// Only held to trigger some code on drop. This is used to get internal - /// runtime metrics that can be useful when doing performance - /// investigations. This does nothing (empty struct, no drop impl) unless - /// the `tokio_internal_mt_counters` cfg flag is set. - _counters: Counters, -} - -/// Data synchronized by the scheduler mutex -pub(crate) struct Synced { - /// When worker is notified, it is assigned a core. The core is placed here - /// until the worker wakes up to take it. - pub(super) assigned_cores: Vec>>, - - /// Cores that have observed the shutdown signal - /// - /// The core is **not** placed back in the worker to avoid it from being - /// stolen by a thread that was spawned as part of `block_in_place`. - shutdown_cores: Vec>, - - /// The driver goes here when shutting down - shutdown_driver: Option>, - - /// Synchronized state for `Idle`. - pub(super) idle: idle::Synced, - - /// Synchronized state for `Inject`. - pub(crate) inject: inject::Synced, -} - -/// Used to communicate with a worker from other threads. -struct Remote { - /// When a task is scheduled from a worker, it is stored in this slot. The - /// worker will check this slot for a task **before** checking the run - /// queue. This effectively results in the **last** scheduled task to be run - /// next (LIFO). This is an optimization for improving locality which - /// benefits message passing patterns and helps to reduce latency. - // lifo_slot: Lifo, - - /// Steals tasks from this worker. - pub(super) steal: queue::Steal>, -} - -/// Thread-local context -pub(crate) struct Context { - // Current scheduler's handle - handle: Arc, - - /// Worker index - index: usize, - - /// True when the LIFO slot is enabled - lifo_enabled: Cell, - - /// Core data - core: RefCell>>, - - /// Used to pass cores to other threads when `block_in_place` is called - handoff_core: Arc>, - - /// Tasks to wake after resource drivers are polled. This is mostly to - /// handle yielded tasks. - pub(crate) defer: RefCell>, -} - -/// Running a task may consume the core. If the core is still available when -/// running the task completes, it is returned. Otherwise, the worker will need -/// to stop processing. -type RunResult = Result, ()>; -type NextTaskResult = Result<(Option, Box), ()>; - -/// A task handle -type Task = task::Task>; - -/// A notified task handle -type Notified = task::Notified>; - -/// Value picked out of thin-air. Running the LIFO slot a handful of times -/// seemms sufficient to benefit from locality. More than 3 times probably is -/// overweighing. The value can be tuned in the future with data that shows -/// improvements. -const MAX_LIFO_POLLS_PER_TICK: usize = 3; - -pub(super) fn create( - num_cores: usize, - driver: Driver, - driver_handle: driver::Handle, - blocking_spawner: blocking::Spawner, - seed_generator: RngSeedGenerator, - config: Config, -) -> runtime::Handle { - let mut num_workers = num_cores; - - // If the driver is enabled, we need an extra thread to handle polling the - // driver when all cores are busy. - if driver.is_enabled() { - num_workers += 1; - } - - let mut cores = Vec::with_capacity(num_cores); - let mut remotes = Vec::with_capacity(num_cores); - // Worker metrics are actually core based - let mut worker_metrics = Vec::with_capacity(num_cores); - - // Create the local queues - for i in 0..num_cores { - let (steal, run_queue) = queue::local(config.local_queue_capacity); - - let metrics = WorkerMetrics::from_config(&config); - let stats = Stats::new(&metrics); - - cores.push(Box::new(Core { - index: i, - lifo_slot: None, - run_queue, - is_searching: false, - stats, - rand: FastRand::from_seed(config.seed_generator.next_seed()), - })); - - remotes.push(Remote { - steal, - // lifo_slot: Lifo::new(), - }); - worker_metrics.push(metrics); - } - - // Allocate num-cores + 1 workers, so one worker can handle the I/O driver, - // if needed. - let (idle, idle_synced) = Idle::new(cores, num_workers); - let (inject, inject_synced) = inject::Shared::new(); - - let handle = Arc::new(Handle { - shared: Shared { - remotes: remotes.into_boxed_slice(), - inject, - idle, - owned: OwnedTasks::new(), - synced: Mutex::new(Synced { - assigned_cores: (0..num_workers).map(|_| None).collect(), - shutdown_cores: Vec::with_capacity(num_cores), - shutdown_driver: None, - idle: idle_synced, - inject: inject_synced, - }), - driver: AtomicCell::new(Some(Box::new(driver))), - condvars: (0..num_workers).map(|_| Condvar::new()).collect(), - trace_status: TraceStatus::new(num_cores), - config, - scheduler_metrics: SchedulerMetrics::new(), - worker_metrics: worker_metrics.into_boxed_slice(), - _counters: Counters, - }, - driver: driver_handle, - blocking_spawner, - seed_generator, - }); - - let rt_handle = runtime::Handle { - inner: scheduler::Handle::MultiThreadAlt(handle), - }; - - // Eagerly start worker threads - for index in 0..num_workers { - let handle = rt_handle.inner.expect_multi_thread_alt(); - let h2 = handle.clone(); - let handoff_core = Arc::new(AtomicCell::new(None)); - - handle - .blocking_spawner - .spawn_blocking(&rt_handle, move || run(index, h2, handoff_core, false)); - } - - rt_handle -} - -#[track_caller] -pub(crate) fn block_in_place(f: F) -> R -where - F: FnOnce() -> R, -{ - // Try to steal the worker core back - struct Reset(coop::Budget); - - impl Drop for Reset { - fn drop(&mut self) { - with_current(|maybe_cx| { - if let Some(cx) = maybe_cx { - let core = cx.handoff_core.take(); - let mut cx_core = cx.core.borrow_mut(); - assert!(cx_core.is_none()); - *cx_core = core; - - // Reset the task budget as we are re-entering the - // runtime. - coop::set(self.0); - } - }); - } - } - - let mut had_entered = false; - - let setup_result = with_current(|maybe_cx| { - match ( - crate::runtime::context::current_enter_context(), - maybe_cx.is_some(), - ) { - (context::EnterRuntime::Entered { .. }, true) => { - // We are on a thread pool runtime thread, so we just need to - // set up blocking. - had_entered = true; - } - ( - context::EnterRuntime::Entered { - allow_block_in_place, - }, - false, - ) => { - // We are on an executor, but _not_ on the thread pool. That is - // _only_ okay if we are in a thread pool runtime's block_on - // method: - if allow_block_in_place { - had_entered = true; - return Ok(()); - } else { - // This probably means we are on the current_thread runtime or in a - // LocalSet, where it is _not_ okay to block. - return Err( - "can call blocking only when running on the multi-threaded runtime", - ); - } - } - (context::EnterRuntime::NotEntered, true) => { - // This is a nested call to block_in_place (we already exited). - // All the necessary setup has already been done. - return Ok(()); - } - (context::EnterRuntime::NotEntered, false) => { - // We are outside of the tokio runtime, so blocking is fine. - // We can also skip all of the thread pool blocking setup steps. - return Ok(()); - } - } - - let cx = maybe_cx.expect("no .is_some() == false cases above should lead here"); - - // Get the worker core. If none is set, then blocking is fine! - let core = match cx.core.borrow_mut().take() { - Some(core) => core, - None => return Ok(()), - }; - - // In order to block, the core must be sent to another thread for - // execution. - // - // First, move the core back into the worker's shared core slot. - cx.handoff_core.set(core); - - // Next, clone the worker handle and send it to a new thread for - // processing. - // - // Once the blocking task is done executing, we will attempt to - // steal the core back. - let index = cx.index; - let handle = cx.handle.clone(); - let handoff_core = cx.handoff_core.clone(); - runtime::spawn_blocking(move || run(index, handle, handoff_core, true)); - Ok(()) - }); - - if let Err(panic_message) = setup_result { - panic!("{}", panic_message); - } - - if had_entered { - // Unset the current task's budget. Blocking sections are not - // constrained by task budgets. - let _reset = Reset(coop::stop()); - - crate::runtime::context::exit_runtime(f) - } else { - f() - } -} - -fn run( - index: usize, - handle: Arc, - handoff_core: Arc>, - blocking_in_place: bool, -) { - struct AbortOnPanic; - - impl Drop for AbortOnPanic { - fn drop(&mut self) { - if std::thread::panicking() { - eprintln!("worker thread panicking; aborting process"); - std::process::abort(); - } - } - } - - // Catching panics on worker threads in tests is quite tricky. Instead, when - // debug assertions are enabled, we just abort the process. - #[cfg(debug_assertions)] - let _abort_on_panic = AbortOnPanic; - - let num_workers = handle.shared.condvars.len(); - - let mut worker = Worker { - tick: 0, - num_seq_local_queue_polls: 0, - global_queue_interval: Stats::DEFAULT_GLOBAL_QUEUE_INTERVAL, - is_shutdown: false, - is_traced: false, - workers_to_notify: Vec::with_capacity(num_workers - 1), - idle_snapshot: idle::Snapshot::new(&handle.shared.idle), - stats: stats::Ephemeral::new(), - }; - - let sched_handle = scheduler::Handle::MultiThreadAlt(handle.clone()); - - crate::runtime::context::enter_runtime(&sched_handle, true, |_| { - // Set the worker context. - let cx = scheduler::Context::MultiThreadAlt(Context { - index, - lifo_enabled: Cell::new(!handle.shared.config.disable_lifo_slot), - handle, - core: RefCell::new(None), - handoff_core, - defer: RefCell::new(Vec::with_capacity(64)), - }); - - context::set_scheduler(&cx, || { - let cx = cx.expect_multi_thread_alt(); - - // Run the worker - let res = worker.run(&cx, blocking_in_place); - // `err` here signifies the core was lost, this is an expected end - // state for a worker. - debug_assert!(res.is_err()); - - // Check if there are any deferred tasks to notify. This can happen when - // the worker core is lost due to `block_in_place()` being called from - // within the task. - if !cx.defer.borrow().is_empty() { - worker.schedule_deferred_without_core(&cx, &mut cx.shared().synced.lock()); - } - }); - }); -} - -macro_rules! try_task { - ($e:expr) => {{ - let (task, core) = $e?; - if task.is_some() { - return Ok((task, core)); - } - core - }}; -} - -macro_rules! try_task_new_batch { - ($w:expr, $e:expr) => {{ - let (task, mut core) = $e?; - if task.is_some() { - core.stats.start_processing_scheduled_tasks(&mut $w.stats); - return Ok((task, core)); - } - core - }}; -} - -impl Worker { - fn run(&mut self, cx: &Context, blocking_in_place: bool) -> RunResult { - let (maybe_task, mut core) = { - if blocking_in_place { - if let Some(core) = cx.handoff_core.take() { - (None, core) - } else { - // Just shutdown - return Err(()); - } - } else { - let mut synced = cx.shared().synced.lock(); - - // First try to acquire an available core - if let Some(core) = self.try_acquire_available_core(cx, &mut synced) { - // Try to poll a task from the global queue - let maybe_task = cx.shared().next_remote_task_synced(&mut synced); - (maybe_task, core) - } else { - // block the thread to wait for a core to be assinged to us - self.wait_for_core(cx, synced)? - } - } - }; - - core.stats.start_processing_scheduled_tasks(&mut self.stats); - - if let Some(task) = maybe_task { - core = self.run_task(cx, core, task)?; - } - - while !self.is_shutdown { - let (maybe_task, c) = self.next_task(cx, core)?; - core = c; - - if let Some(task) = maybe_task { - core = self.run_task(cx, core, task)?; - } else { - // The only reason to get `None` from `next_task` is we have - // entered the shutdown phase. - assert!(self.is_shutdown); - break; - } - } - - cx.shared().shutdown_core(&cx.handle, core); - - // It is possible that tasks wake others during drop, so we need to - // clear the defer list. - self.shutdown_clear_defer(cx); - - Err(()) - } - - // Try to acquire an available core, but do not block the thread - fn try_acquire_available_core( - &mut self, - cx: &Context, - synced: &mut Synced, - ) -> Option> { - if let Some(mut core) = cx - .shared() - .idle - .try_acquire_available_core(&mut synced.idle) - { - self.reset_acquired_core(cx, synced, &mut core); - Some(core) - } else { - None - } - } - - // Block the current thread, waiting for an available core - fn wait_for_core( - &mut self, - cx: &Context, - mut synced: MutexGuard<'_, Synced>, - ) -> NextTaskResult { - if cx.shared().idle.needs_searching() { - if let Some(mut core) = self.try_acquire_available_core(cx, &mut synced) { - cx.shared().idle.transition_worker_to_searching(&mut core); - return Ok((None, core)); - } - } - - cx.shared() - .idle - .transition_worker_to_parked(&mut synced, cx.index); - - // Wait until a core is available, then exit the loop. - let mut core = loop { - if let Some(core) = synced.assigned_cores[cx.index].take() { - break core; - } - - // If shutting down, abort - if cx.shared().inject.is_closed(&synced.inject) { - self.shutdown_clear_defer(cx); - return Err(()); - } - - synced = cx.shared().condvars[cx.index].wait(synced).unwrap(); - }; - - self.reset_acquired_core(cx, &mut synced, &mut core); - - if self.is_shutdown { - // Currently shutting down, don't do any more work - return Ok((None, core)); - } - - let n = cmp::max(core.run_queue.remaining_slots() / 2, 1); - let maybe_task = self.next_remote_task_batch_synced(cx, &mut synced, &mut core, n); - - Ok((maybe_task, core)) - } - - /// Ensure core's state is set correctly for the worker to start using. - fn reset_acquired_core(&mut self, cx: &Context, synced: &mut Synced, core: &mut Core) { - self.global_queue_interval = core.stats.tuned_global_queue_interval(&cx.shared().config); - debug_assert!(self.global_queue_interval > 1); - - // Reset `lifo_enabled` here in case the core was previously stolen from - // a task that had the LIFO slot disabled. - self.reset_lifo_enabled(cx); - - // At this point, the local queue should be empty - #[cfg(not(loom))] - debug_assert!(core.run_queue.is_empty()); - - // Update shutdown state while locked - self.update_global_flags(cx, synced); - } - - /// Finds the next task to run, this could be from a queue or stealing. If - /// none are available, the thread sleeps and tries again. - fn next_task(&mut self, cx: &Context, mut core: Box) -> NextTaskResult { - self.assert_lifo_enabled_is_correct(cx); - - if self.is_traced { - core = cx.handle.trace_core(core); - } - - // Increment the tick - self.tick = self.tick.wrapping_add(1); - - // Runs maintenance every so often. When maintenance is run, the - // driver is checked, which may result in a task being found. - core = try_task!(self.maybe_maintenance(&cx, core)); - - // Check the LIFO slot, local run queue, and the injection queue for - // a notified task. - core = try_task!(self.next_notified_task(cx, core)); - - // We consumed all work in the queues and will start searching for work. - core.stats.end_processing_scheduled_tasks(&mut self.stats); - - super::counters::inc_num_no_local_work(); - - if !cx.defer.borrow().is_empty() { - // We are deferring tasks, so poll the resource driver and schedule - // the deferred tasks. - try_task_new_batch!(self, self.park_yield(cx, core)); - - panic!("what happened to the deferred tasks? 🤔"); - } - - while !self.is_shutdown { - // Search for more work, this involves trying to poll the resource - // driver, steal from other workers, and check the global queue - // again. - core = try_task_new_batch!(self, self.search_for_work(cx, core)); - - debug_assert!(cx.defer.borrow().is_empty()); - core = try_task_new_batch!(self, self.park(cx, core)); - } - - // Shutting down, drop any deferred tasks - self.shutdown_clear_defer(cx); - - Ok((None, core)) - } - - fn next_notified_task(&mut self, cx: &Context, mut core: Box) -> NextTaskResult { - self.num_seq_local_queue_polls += 1; - - if self.num_seq_local_queue_polls % self.global_queue_interval == 0 { - super::counters::inc_global_queue_interval(); - - self.num_seq_local_queue_polls = 0; - - // Update the global queue interval, if needed - self.tune_global_queue_interval(cx, &mut core); - - if let Some(task) = self.next_remote_task(cx) { - return Ok((Some(task), core)); - } - } - - if let Some(task) = core.next_local_task() { - return Ok((Some(task), core)); - } - - self.next_remote_task_batch(cx, core) - } - - fn next_remote_task(&self, cx: &Context) -> Option { - if cx.shared().inject.is_empty() { - return None; - } - - let mut synced = cx.shared().synced.lock(); - cx.shared().next_remote_task_synced(&mut synced) - } - - fn next_remote_task_batch(&self, cx: &Context, mut core: Box) -> NextTaskResult { - if cx.shared().inject.is_empty() { - return Ok((None, core)); - } - - // Other threads can only **remove** tasks from the current worker's - // `run_queue`. So, we can be confident that by the time we call - // `run_queue.push_back` below, there will be *at least* `cap` - // available slots in the queue. - let cap = usize::min( - core.run_queue.remaining_slots(), - usize::max(core.run_queue.max_capacity() / 2, 1), - ); - - let mut synced = cx.shared().synced.lock(); - let maybe_task = self.next_remote_task_batch_synced(cx, &mut synced, &mut core, cap); - Ok((maybe_task, core)) - } - - fn next_remote_task_batch_synced( - &self, - cx: &Context, - synced: &mut Synced, - core: &mut Core, - max: usize, - ) -> Option { - super::counters::inc_num_remote_batch(); - - // The worker is currently idle, pull a batch of work from the - // injection queue. We don't want to pull *all* the work so other - // workers can also get some. - let n = if core.is_searching { - cx.shared().inject.len() / cx.shared().idle.num_searching() + 1 - } else { - cx.shared().inject.len() / cx.shared().remotes.len() + 1 - }; - - let n = usize::min(n, max) + 1; - - // safety: passing in the correct `inject::Synced`. - let mut tasks = unsafe { cx.shared().inject.pop_n(&mut synced.inject, n) }; - - // Pop the first task to return immedietly - let ret = tasks.next(); - - // Push the rest of the on the run queue - core.run_queue.push_back(tasks); - - ret - } - - /// Function responsible for stealing tasks from another worker - /// - /// Note: Only if less than half the workers are searching for tasks to steal - /// a new worker will actually try to steal. The idea is to make sure not all - /// workers will be trying to steal at the same time. - fn search_for_work(&mut self, cx: &Context, mut core: Box) -> NextTaskResult { - #[cfg(not(loom))] - const ROUNDS: usize = 4; - - #[cfg(loom)] - const ROUNDS: usize = 1; - - debug_assert!(core.lifo_slot.is_none()); - #[cfg(not(loom))] - debug_assert!(core.run_queue.is_empty()); - - if !core.run_queue.can_steal() { - return Ok((None, core)); - } - - if !self.transition_to_searching(cx, &mut core) { - return Ok((None, core)); - } - - // core = try_task!(self, self.poll_driver(cx, core)); - - // Get a snapshot of which workers are idle - cx.shared().idle.snapshot(&mut self.idle_snapshot); - - let num = cx.shared().remotes.len(); - - for i in 0..ROUNDS { - // Start from a random worker - let start = core.rand.fastrand_n(num as u32) as usize; - - if let Some(task) = self.steal_one_round(cx, &mut core, start) { - return Ok((Some(task), core)); - } - - core = try_task!(self.next_remote_task_batch(cx, core)); - - if i > 0 { - super::counters::inc_num_spin_stall(); - std::thread::sleep(std::time::Duration::from_micros(i as u64)); - } - } - - Ok((None, core)) - } - - fn steal_one_round(&self, cx: &Context, core: &mut Core, start: usize) -> Option { - let num = cx.shared().remotes.len(); - - for i in 0..num { - let i = (start + i) % num; - - // Don't steal from ourself! We know we don't have work. - if i == core.index { - continue; - } - - // If the core is currently idle, then there is nothing to steal. - if self.idle_snapshot.is_idle(i) { - continue; - } - - let target = &cx.shared().remotes[i]; - - if let Some(task) = target - .steal - .steal_into(&mut core.run_queue, &mut core.stats) - { - return Some(task); - } - } - - None - } - - fn run_task(&mut self, cx: &Context, mut core: Box, task: Notified) -> RunResult { - let task = cx.shared().owned.assert_owner(task); - - // Make sure the worker is not in the **searching** state. This enables - // another idle worker to try to steal work. - if self.transition_from_searching(cx, &mut core) { - super::counters::inc_num_relay_search(); - cx.shared().notify_parked_local(); - } - - self.assert_lifo_enabled_is_correct(cx); - - // Measure the poll start time. Note that we may end up polling other - // tasks under this measurement. In this case, the tasks came from the - // LIFO slot and are considered part of the current task for scheduling - // purposes. These tasks inherent the "parent"'s limits. - core.stats.start_poll(&mut self.stats); - - // Make the core available to the runtime context - *cx.core.borrow_mut() = Some(core); - - // Run the task - coop::budget(|| { - super::counters::inc_num_polls(); - task.run(); - let mut lifo_polls = 0; - - // As long as there is budget remaining and a task exists in the - // `lifo_slot`, then keep running. - loop { - // Check if we still have the core. If not, the core was stolen - // by another worker. - let mut core = match cx.core.borrow_mut().take() { - Some(core) => core, - None => { - // In this case, we cannot call `reset_lifo_enabled()` - // because the core was stolen. The stealer will handle - // that at the top of `Context::run` - return Err(()); - } - }; - - // Check for a task in the LIFO slot - let task = match core.next_lifo_task() { - Some(task) => task, - None => { - self.reset_lifo_enabled(cx); - core.stats.end_poll(); - return Ok(core); - } - }; - - if !coop::has_budget_remaining() { - core.stats.end_poll(); - - // Not enough budget left to run the LIFO task, push it to - // the back of the queue and return. - core.run_queue - .push_back_or_overflow(task, cx.shared(), &mut core.stats); - // If we hit this point, the LIFO slot should be enabled. - // There is no need to reset it. - debug_assert!(cx.lifo_enabled.get()); - return Ok(core); - } - - // Track that we are about to run a task from the LIFO slot. - lifo_polls += 1; - super::counters::inc_lifo_schedules(); - - // Disable the LIFO slot if we reach our limit - // - // In ping-ping style workloads where task A notifies task B, - // which notifies task A again, continuously prioritizing the - // LIFO slot can cause starvation as these two tasks will - // repeatedly schedule the other. To mitigate this, we limit the - // number of times the LIFO slot is prioritized. - if lifo_polls >= MAX_LIFO_POLLS_PER_TICK { - cx.lifo_enabled.set(false); - super::counters::inc_lifo_capped(); - } - - // Run the LIFO task, then loop - *cx.core.borrow_mut() = Some(core); - let task = cx.shared().owned.assert_owner(task); - super::counters::inc_num_lifo_polls(); - task.run(); - } - }) - } - - fn schedule_deferred_with_core<'a>( - &mut self, - cx: &'a Context, - mut core: Box, - synced: impl FnOnce() -> MutexGuard<'a, Synced>, - ) -> NextTaskResult { - let mut defer = cx.defer.borrow_mut(); - - // Grab a task to run next - let task = defer.pop(); - - if task.is_none() { - return Ok((None, core)); - } - - if !defer.is_empty() { - let mut synced = synced(); - - // Number of tasks we want to try to spread across idle workers - let num_fanout = cmp::min(defer.len(), cx.shared().idle.num_idle(&synced.idle)); - - // Cap the number of threads woken up at one time. This is to limit - // the number of no-op wakes and reduce mutext contention. - // - // This number was picked after some basic benchmarks, but it can - // probably be tuned using the mean poll time value (slower task - // polls can leverage more woken workers). - let num_fanout = cmp::min(2, num_fanout); - - if num_fanout > 0 { - cx.shared() - .push_remote_task_batch_synced(&mut synced, defer.drain(..num_fanout)); - - cx.shared() - .idle - .notify_mult(&mut synced, &mut self.workers_to_notify, num_fanout); - } - - // Do not run the task while holding the lock... - drop(synced); - } - - // Notify any workers - for worker in self.workers_to_notify.drain(..) { - cx.shared().condvars[worker].notify_one() - } - - if !defer.is_empty() { - // Push the rest of the tasks on the local queue - for task in defer.drain(..) { - core.run_queue - .push_back_or_overflow(task, cx.shared(), &mut core.stats); - } - - cx.shared().notify_parked_local(); - } - - Ok((task, core)) - } - - fn schedule_deferred_without_core<'a>(&mut self, cx: &Context, synced: &mut Synced) { - let mut defer = cx.defer.borrow_mut(); - let num = defer.len(); - - if num > 0 { - // Push all tasks to the injection queue - cx.shared() - .push_remote_task_batch_synced(synced, defer.drain(..)); - - debug_assert!(self.workers_to_notify.is_empty()); - - // Notify workers - cx.shared() - .idle - .notify_mult(synced, &mut self.workers_to_notify, num); - - // Notify any workers - for worker in self.workers_to_notify.drain(..) { - cx.shared().condvars[worker].notify_one() - } - } - } - - fn maybe_maintenance(&mut self, cx: &Context, mut core: Box) -> NextTaskResult { - if self.tick % cx.shared().config.event_interval == 0 { - super::counters::inc_num_maintenance(); - - core.stats.end_processing_scheduled_tasks(&mut self.stats); - - // Run regularly scheduled maintenance - core = try_task_new_batch!(self, self.park_yield(cx, core)); - - core.stats.start_processing_scheduled_tasks(&mut self.stats); - } - - Ok((None, core)) - } - - fn flush_metrics(&self, cx: &Context, core: &mut Core) { - core.stats.submit(&cx.shared().worker_metrics[core.index]); - } - - fn update_global_flags(&mut self, cx: &Context, synced: &mut Synced) { - if !self.is_shutdown { - self.is_shutdown = cx.shared().inject.is_closed(&synced.inject); - } - - if !self.is_traced { - self.is_traced = cx.shared().trace_status.trace_requested(); - } - } - - fn park_yield(&mut self, cx: &Context, core: Box) -> NextTaskResult { - // Call `park` with a 0 timeout. This enables the I/O driver, timer, ... - // to run without actually putting the thread to sleep. - if let Some(mut driver) = cx.shared().driver.take() { - driver.park_timeout(&cx.handle.driver, Duration::from_millis(0)); - - cx.shared().driver.set(driver); - } - - // If there are more I/O events, schedule them. - let (maybe_task, mut core) = - self.schedule_deferred_with_core(cx, core, || cx.shared().synced.lock())?; - - self.flush_metrics(cx, &mut core); - self.update_global_flags(cx, &mut cx.shared().synced.lock()); - - Ok((maybe_task, core)) - } - - /* - fn poll_driver(&mut self, cx: &Context, core: Box) -> NextTaskResult { - // Call `park` with a 0 timeout. This enables the I/O driver, timer, ... - // to run without actually putting the thread to sleep. - if let Some(mut driver) = cx.shared().driver.take() { - driver.park_timeout(&cx.handle.driver, Duration::from_millis(0)); - - cx.shared().driver.set(driver); - - // If there are more I/O events, schedule them. - self.schedule_deferred_with_core(cx, core, || cx.shared().synced.lock()) - } else { - Ok((None, core)) - } - } - */ - - fn park(&mut self, cx: &Context, mut core: Box) -> NextTaskResult { - if let Some(f) = &cx.shared().config.before_park { - f(); - } - - if self.can_transition_to_parked(&mut core) { - debug_assert!(!self.is_shutdown); - debug_assert!(!self.is_traced); - - core = try_task!(self.do_park(cx, core)); - } - - if let Some(f) = &cx.shared().config.after_unpark { - f(); - } - - Ok((None, core)) - } - - fn do_park(&mut self, cx: &Context, mut core: Box) -> NextTaskResult { - let was_searching = core.is_searching; - - // Acquire the lock - let mut synced = cx.shared().synced.lock(); - - // The local queue should be empty at this point - #[cfg(not(loom))] - debug_assert!(core.run_queue.is_empty()); - - // Try one last time to get tasks - let n = cmp::max(core.run_queue.remaining_slots() / 2, 1); - if let Some(task) = self.next_remote_task_batch_synced(cx, &mut synced, &mut core, n) { - return Ok((Some(task), core)); - } - - if !was_searching { - if cx - .shared() - .idle - .transition_worker_to_searching_if_needed(&mut synced.idle, &mut core) - { - // Skip parking, go back to searching - return Ok((None, core)); - } - } - - super::counters::inc_num_parks(); - core.stats.about_to_park(); - // Flush metrics to the runtime metrics aggregator - self.flush_metrics(cx, &mut core); - - // If the runtime is shutdown, skip parking - self.update_global_flags(cx, &mut synced); - - if self.is_shutdown { - return Ok((None, core)); - } - - // Release the core - core.is_searching = false; - cx.shared().idle.release_core(&mut synced, core); - - drop(synced); - - if was_searching { - if cx.shared().idle.transition_worker_from_searching() { - // cx.shared().idle.snapshot(&mut self.idle_snapshot); - // We were the last searching worker, we need to do one last check - for i in 0..cx.shared().remotes.len() { - if !cx.shared().remotes[i].steal.is_empty() { - let mut synced = cx.shared().synced.lock(); - - // Try to get a core - if let Some(mut core) = self.try_acquire_available_core(cx, &mut synced) { - cx.shared().idle.transition_worker_to_searching(&mut core); - return Ok((None, core)); - } else { - // Fall back to the park routine - break; - } - } - } - } - } - - if let Some(mut driver) = cx.shared().take_driver() { - // Wait for driver events - driver.park(&cx.handle.driver); - - synced = cx.shared().synced.lock(); - - if cx.shared().inject.is_closed(&mut synced.inject) { - synced.shutdown_driver = Some(driver); - self.shutdown_clear_defer(cx); - cx.shared().shutdown_finalize(&cx.handle, &mut synced); - return Err(()); - } - - // Put the driver back - cx.shared().driver.set(driver); - - // Try to acquire an available core to schedule I/O events - if let Some(core) = self.try_acquire_available_core(cx, &mut synced) { - // This may result in a task being run - self.schedule_deferred_with_core(cx, core, move || synced) - } else { - // Schedule any deferred tasks - self.schedule_deferred_without_core(cx, &mut synced); - - // Wait for a core. - self.wait_for_core(cx, synced) - } - } else { - synced = cx.shared().synced.lock(); - - // Wait for a core to be assigned to us - self.wait_for_core(cx, synced) - } - } - - fn transition_to_searching(&self, cx: &Context, core: &mut Core) -> bool { - if !core.is_searching { - cx.shared().idle.try_transition_worker_to_searching(core); - } - - core.is_searching - } - - /// Returns `true` if another worker must be notified - fn transition_from_searching(&self, cx: &Context, core: &mut Core) -> bool { - if !core.is_searching { - return false; - } - - core.is_searching = false; - cx.shared().idle.transition_worker_from_searching() - } - - fn can_transition_to_parked(&self, core: &mut Core) -> bool { - !self.has_tasks(core) && !self.is_shutdown && !self.is_traced - } - - fn has_tasks(&self, core: &Core) -> bool { - core.lifo_slot.is_some() || !core.run_queue.is_empty() - } - - fn reset_lifo_enabled(&self, cx: &Context) { - cx.lifo_enabled - .set(!cx.handle.shared.config.disable_lifo_slot); - } - - fn assert_lifo_enabled_is_correct(&self, cx: &Context) { - debug_assert_eq!( - cx.lifo_enabled.get(), - !cx.handle.shared.config.disable_lifo_slot - ); - } - - fn tune_global_queue_interval(&mut self, cx: &Context, core: &mut Core) { - let next = core.stats.tuned_global_queue_interval(&cx.shared().config); - - debug_assert!(next > 1); - - // Smooth out jitter - if abs_diff(self.global_queue_interval, next) > 2 { - self.global_queue_interval = next; - } - } - - fn shutdown_clear_defer(&self, cx: &Context) { - let mut defer = cx.defer.borrow_mut(); - - for task in defer.drain(..) { - drop(task); - } - } -} - -impl Context { - pub(crate) fn defer(&self, waker: &Waker) { - // TODO: refactor defer across all runtimes - waker.wake_by_ref(); - } - - fn shared(&self) -> &Shared { - &self.handle.shared - } -} - -impl Core { - fn next_local_task(&mut self) -> Option { - self.next_lifo_task().or_else(|| self.run_queue.pop()) - } - - fn next_lifo_task(&mut self) -> Option { - self.lifo_slot.take() - } -} - -impl Shared { - fn next_remote_task_synced(&self, synced: &mut Synced) -> Option { - // safety: we only have access to a valid `Synced` in this file. - unsafe { self.inject.pop(&mut synced.inject) } - } - - pub(super) fn schedule_task(&self, task: Notified, is_yield: bool) { - use std::ptr; - - with_current(|maybe_cx| { - if let Some(cx) = maybe_cx { - // Make sure the task is part of the **current** scheduler. - if ptr::eq(self, &cx.handle.shared) { - // And the current thread still holds a core - if let Some(core) = cx.core.borrow_mut().as_mut() { - if is_yield { - cx.defer.borrow_mut().push(task); - } else { - self.schedule_local(cx, core, task); - } - } else { - // This can happen if either the core was stolen - // (`block_in_place`) or the notification happens from - // the driver. - cx.defer.borrow_mut().push(task); - } - return; - } - } - - // Otherwise, use the inject queue. - self.schedule_remote(task); - }) - } - - fn schedule_local(&self, cx: &Context, core: &mut Core, task: Notified) { - core.stats.inc_local_schedule_count(); - - if cx.lifo_enabled.get() { - // Push to the LIFO slot - let prev = std::mem::replace(&mut core.lifo_slot, Some(task)); - // let prev = cx.shared().remotes[core.index].lifo_slot.swap_local(task); - - if let Some(prev) = prev { - core.run_queue - .push_back_or_overflow(prev, self, &mut core.stats); - } else { - return; - } - } else { - core.run_queue - .push_back_or_overflow(task, self, &mut core.stats); - } - - self.notify_parked_local(); - } - - fn notify_parked_local(&self) { - super::counters::inc_num_inc_notify_local(); - self.idle.notify_local(self); - } - - fn schedule_remote(&self, task: Notified) { - super::counters::inc_num_notify_remote(); - self.scheduler_metrics.inc_remote_schedule_count(); - - let mut synced = self.synced.lock(); - // Push the task in the - self.push_remote_task(&mut synced, task); - - // Notify a worker. The mutex is passed in and will be released as part - // of the method call. - self.idle.notify_remote(synced, self); - } - - pub(super) fn close(&self, handle: &Handle) { - { - let mut synced = self.synced.lock(); - - if let Some(driver) = self.driver.take() { - synced.shutdown_driver = Some(driver); - } - - if !self.inject.close(&mut synced.inject) { - return; - } - - // Set the shutdown flag on all available cores - self.idle.shutdown(&mut synced, self); - } - - // Any unassigned cores need to be shutdown, but we have to first drop - // the lock - self.idle.shutdown_unassigned_cores(handle, self); - } - - fn push_remote_task(&self, synced: &mut Synced, task: Notified) { - // safety: passing in correct `idle::Synced` - unsafe { - self.inject.push(&mut synced.inject, task); - } - } - - fn push_remote_task_batch(&self, iter: I) - where - I: Iterator>>, - { - unsafe { - self.inject.push_batch(self, iter); - } - } - - fn push_remote_task_batch_synced(&self, synced: &mut Synced, iter: I) - where - I: Iterator>>, - { - unsafe { - self.inject.push_batch(&mut synced.inject, iter); - } - } - - fn take_driver(&self) -> Option> { - if !self.driver_enabled() { - return None; - } - - self.driver.take() - } - - fn driver_enabled(&self) -> bool { - self.condvars.len() > self.remotes.len() - } - - pub(super) fn shutdown_core(&self, handle: &Handle, mut core: Box) { - self.owned.close_and_shutdown_all(); - - core.stats.submit(&self.worker_metrics[core.index]); - - let mut synced = self.synced.lock(); - synced.shutdown_cores.push(core); - - self.shutdown_finalize(handle, &mut synced); - } - - pub(super) fn shutdown_finalize(&self, handle: &Handle, synced: &mut Synced) { - // Wait for all cores - if synced.shutdown_cores.len() != self.remotes.len() { - return; - } - - let driver = synced.shutdown_driver.take(); - - if self.driver_enabled() && driver.is_none() { - return; - } - - debug_assert!(self.owned.is_empty()); - - for mut core in synced.shutdown_cores.drain(..) { - // Drain tasks from the local queue - while core.next_local_task().is_some() {} - } - - // Shutdown the driver - if let Some(mut driver) = driver { - driver.shutdown(&handle.driver); - } - - // Drain the injection queue - // - // We already shut down every task, so we can simply drop the tasks. We - // cannot call `next_remote_task()` because we already hold the lock. - // - // safety: passing in correct `idle::Synced` - while let Some(task) = self.next_remote_task_synced(synced) { - drop(task); - } - } -} - -impl Overflow> for Shared { - fn push(&self, task: task::Notified>) { - self.push_remote_task(&mut self.synced.lock(), task); - } - - fn push_batch(&self, iter: I) - where - I: Iterator>>, - { - self.push_remote_task_batch(iter) - } -} - -impl<'a> Lock for &'a Shared { - type Handle = SyncedGuard<'a>; - - fn lock(self) -> Self::Handle { - SyncedGuard { - lock: self.synced.lock(), - } - } -} - -impl<'a> Lock for &'a Shared { - type Handle = SyncedGuard<'a>; - - fn lock(self) -> Self::Handle { - SyncedGuard { - lock: self.synced.lock(), - } - } -} - -impl task::Schedule for Arc { - fn release(&self, task: &Task) -> Option { - self.shared.owned.remove(task) - } - - fn schedule(&self, task: Notified) { - self.shared.schedule_task(task, false); - } - - fn yield_now(&self, task: Notified) { - self.shared.schedule_task(task, true); - } -} - -impl AsMut for Synced { - fn as_mut(&mut self) -> &mut Synced { - self - } -} - -pub(crate) struct SyncedGuard<'a> { - lock: crate::loom::sync::MutexGuard<'a, Synced>, -} - -impl<'a> AsMut for SyncedGuard<'a> { - fn as_mut(&mut self) -> &mut inject::Synced { - &mut self.lock.inject - } -} - -impl<'a> AsMut for SyncedGuard<'a> { - fn as_mut(&mut self) -> &mut Synced { - &mut self.lock - } -} - -#[track_caller] -fn with_current(f: impl FnOnce(Option<&Context>) -> R) -> R { - use scheduler::Context::MultiThreadAlt; - - context::with_scheduler(|ctx| match ctx { - Some(MultiThreadAlt(ctx)) => f(Some(ctx)), - _ => f(None), - }) -} - -// `u32::abs_diff` is not available on Tokio's MSRV. -fn abs_diff(a: u32, b: u32) -> u32 { - if a > b { - a - b - } else { - b - a - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/signal/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/signal/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/signal/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/signal/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,142 +0,0 @@ -#![cfg_attr(not(feature = "rt"), allow(dead_code))] - -//! Signal driver - -use crate::runtime::{driver, io}; -use crate::signal::registry::globals; - -use mio::net::UnixStream; -use std::io::{self as std_io, Read}; -use std::sync::{Arc, Weak}; -use std::time::Duration; - -/// Responsible for registering wakeups when an OS signal is received, and -/// subsequently dispatching notifications to any signal listeners as appropriate. -/// -/// Note: this driver relies on having an enabled IO driver in order to listen to -/// pipe write wakeups. -#[derive(Debug)] -pub(crate) struct Driver { - /// Thread parker. The `Driver` park implementation delegates to this. - io: io::Driver, - - /// A pipe for receiving wake events from the signal handler - receiver: UnixStream, - - /// Shared state. The driver keeps a strong ref and the handle keeps a weak - /// ref. The weak ref is used to check if the driver is still active before - /// trying to register a signal handler. - inner: Arc<()>, -} - -#[derive(Debug, Default)] -pub(crate) struct Handle { - /// Paired w/ the `Arc` above and is used to check if the driver is still - /// around before attempting to register a signal handler. - inner: Weak<()>, -} - -// ===== impl Driver ===== - -impl Driver { - /// Creates a new signal `Driver` instance that delegates wakeups to `park`. - pub(crate) fn new(io: io::Driver, io_handle: &io::Handle) -> std_io::Result { - use std::mem::ManuallyDrop; - use std::os::unix::io::{AsRawFd, FromRawFd}; - - // NB: We give each driver a "fresh" receiver file descriptor to avoid - // the issues described in alexcrichton/tokio-process#42. - // - // In the past we would reuse the actual receiver file descriptor and - // swallow any errors around double registration of the same descriptor. - // I'm not sure if the second (failed) registration simply doesn't end - // up receiving wake up notifications, or there could be some race - // condition when consuming readiness events, but having distinct - // descriptors appears to mitigate this. - // - // Unfortunately we cannot just use a single global UnixStream instance - // either, since we can't assume they will always be registered with the - // exact same reactor. - // - // Mio 0.7 removed `try_clone()` as an API due to unexpected behavior - // with registering dups with the same reactor. In this case, duping is - // safe as each dup is registered with separate reactors **and** we - // only expect at least one dup to receive the notification. - - // Manually drop as we don't actually own this instance of UnixStream. - let receiver_fd = globals().receiver.as_raw_fd(); - - // safety: there is nothing unsafe about this, but the `from_raw_fd` fn is marked as unsafe. - let original = - ManuallyDrop::new(unsafe { std::os::unix::net::UnixStream::from_raw_fd(receiver_fd) }); - let mut receiver = UnixStream::from_std(original.try_clone()?); - - io_handle.register_signal_receiver(&mut receiver)?; - - Ok(Self { - io, - receiver, - inner: Arc::new(()), - }) - } - - /// Returns a handle to this event loop which can be sent across threads - /// and can be used as a proxy to the event loop itself. - pub(crate) fn handle(&self) -> Handle { - Handle { - inner: Arc::downgrade(&self.inner), - } - } - - pub(crate) fn park(&mut self, handle: &driver::Handle) { - self.io.park(handle); - self.process(); - } - - pub(crate) fn park_timeout(&mut self, handle: &driver::Handle, duration: Duration) { - self.io.park_timeout(handle, duration); - self.process(); - } - - pub(crate) fn shutdown(&mut self, handle: &driver::Handle) { - self.io.shutdown(handle) - } - - fn process(&mut self) { - // If the signal pipe has not received a readiness event, then there is - // nothing else to do. - if !self.io.consume_signal_ready() { - return; - } - - // Drain the pipe completely so we can receive a new readiness event - // if another signal has come in. - let mut buf = [0; 128]; - loop { - match self.receiver.read(&mut buf) { - Ok(0) => panic!("EOF on self-pipe"), - Ok(_) => continue, // Keep reading - Err(e) if e.kind() == std_io::ErrorKind::WouldBlock => break, - Err(e) => panic!("Bad read on self-pipe: {}", e), - } - } - - // Broadcast any signals which were received - globals().broadcast(); - } -} - -// ===== impl Handle ===== - -impl Handle { - pub(crate) fn check_inner(&self) -> std_io::Result<()> { - if self.inner.strong_count() > 0 { - Ok(()) - } else { - Err(std_io::Error::new( - std_io::ErrorKind::Other, - "signal driver gone", - )) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/abort.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/abort.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/abort.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/abort.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,87 +0,0 @@ -use crate::runtime::task::{Header, RawTask}; -use std::fmt; -use std::panic::{RefUnwindSafe, UnwindSafe}; - -/// An owned permission to abort a spawned task, without awaiting its completion. -/// -/// Unlike a [`JoinHandle`], an `AbortHandle` does *not* represent the -/// permission to await the task's completion, only to terminate it. -/// -/// The task may be aborted by calling the [`AbortHandle::abort`] method. -/// Dropping an `AbortHandle` releases the permission to terminate the task -/// --- it does *not* abort the task. -/// -/// [`JoinHandle`]: crate::task::JoinHandle -#[cfg_attr(docsrs, doc(cfg(feature = "rt")))] -pub struct AbortHandle { - raw: RawTask, -} - -impl AbortHandle { - pub(super) fn new(raw: RawTask) -> Self { - Self { raw } - } - - /// Abort the task associated with the handle. - /// - /// Awaiting a cancelled task might complete as usual if the task was - /// already completed at the time it was cancelled, but most likely it - /// will fail with a [cancelled] `JoinError`. - /// - /// If the task was already cancelled, such as by [`JoinHandle::abort`], - /// this method will do nothing. - /// - /// [cancelled]: method@super::error::JoinError::is_cancelled - /// [`JoinHandle::abort`]: method@super::JoinHandle::abort - pub fn abort(&self) { - self.raw.remote_abort(); - } - - /// Checks if the task associated with this `AbortHandle` has finished. - /// - /// Please note that this method can return `false` even if `abort` has been - /// called on the task. This is because the cancellation process may take - /// some time, and this method does not return `true` until it has - /// completed. - pub fn is_finished(&self) -> bool { - let state = self.raw.state().load(); - state.is_complete() - } - - /// Returns a [task ID] that uniquely identifies this task relative to other - /// currently spawned tasks. - /// - /// **Note**: This is an [unstable API][unstable]. The public API of this type - /// may break in 1.x releases. See [the documentation on unstable - /// features][unstable] for details. - /// - /// [task ID]: crate::task::Id - /// [unstable]: crate#unstable-features - #[cfg(tokio_unstable)] - #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] - pub fn id(&self) -> super::Id { - // Safety: The header pointer is valid. - unsafe { Header::get_id(self.raw.header_ptr()) } - } -} - -unsafe impl Send for AbortHandle {} -unsafe impl Sync for AbortHandle {} - -impl UnwindSafe for AbortHandle {} -impl RefUnwindSafe for AbortHandle {} - -impl fmt::Debug for AbortHandle { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - // Safety: The header pointer is valid. - let id_ptr = unsafe { Header::get_id_ptr(self.raw.header_ptr()) }; - let id = unsafe { id_ptr.as_ref() }; - fmt.debug_struct("AbortHandle").field("id", id).finish() - } -} - -impl Drop for AbortHandle { - fn drop(&mut self) { - self.raw.drop_abort_handle(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/core.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/core.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/core.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/core.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,494 +0,0 @@ -//! Core task module. -//! -//! # Safety -//! -//! The functions in this module are private to the `task` module. All of them -//! should be considered `unsafe` to use, but are not marked as such since it -//! would be too noisy. -//! -//! Make sure to consult the relevant safety section of each function before -//! use. - -use crate::future::Future; -use crate::loom::cell::UnsafeCell; -use crate::runtime::context; -use crate::runtime::task::raw::{self, Vtable}; -use crate::runtime::task::state::State; -use crate::runtime::task::{Id, Schedule}; -use crate::util::linked_list; - -use std::num::NonZeroU64; -use std::pin::Pin; -use std::ptr::NonNull; -use std::task::{Context, Poll, Waker}; - -/// The task cell. Contains the components of the task. -/// -/// It is critical for `Header` to be the first field as the task structure will -/// be referenced by both *mut Cell and *mut Header. -/// -/// Any changes to the layout of this struct _must_ also be reflected in the -/// const fns in raw.rs. -/// -// # This struct should be cache padded to avoid false sharing. The cache padding rules are copied -// from crossbeam-utils/src/cache_padded.rs -// -// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache -// lines at a time, so we have to align to 128 bytes rather than 64. -// -// Sources: -// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf -// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 -// -// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size. -// -// Sources: -// - https://www.mono-project.com/news/2016/09/12/arm64-icache/ -// -// powerpc64 has 128-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9 -#[cfg_attr( - any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - ), - repr(align(128)) -)] -// arm, mips, mips64, sparc, and hexagon have 32-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L17 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/hexagon/include/asm/cache.h#L12 -#[cfg_attr( - any( - target_arch = "arm", - target_arch = "mips", - target_arch = "mips64", - target_arch = "sparc", - target_arch = "hexagon", - ), - repr(align(32)) -)] -// m68k has 16-byte cache line size. -// -// Sources: -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/m68k/include/asm/cache.h#L9 -#[cfg_attr(target_arch = "m68k", repr(align(16)))] -// s390x has 256-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/s390/include/asm/cache.h#L13 -#[cfg_attr(target_arch = "s390x", repr(align(256)))] -// x86, riscv, wasm, and sparc64 have 64-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L19 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10 -// -// All others are assumed to have 64-byte cache line size. -#[cfg_attr( - not(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "arm", - target_arch = "mips", - target_arch = "mips64", - target_arch = "sparc", - target_arch = "hexagon", - target_arch = "m68k", - target_arch = "s390x", - )), - repr(align(64)) -)] -#[repr(C)] -pub(super) struct Cell { - /// Hot task state data - pub(super) header: Header, - - /// Either the future or output, depending on the execution stage. - pub(super) core: Core, - - /// Cold data - pub(super) trailer: Trailer, -} - -pub(super) struct CoreStage { - stage: UnsafeCell>, -} - -/// The core of the task. -/// -/// Holds the future or output, depending on the stage of execution. -/// -/// Any changes to the layout of this struct _must_ also be reflected in the -/// const fns in raw.rs. -#[repr(C)] -pub(super) struct Core { - /// Scheduler used to drive this future. - pub(super) scheduler: S, - - /// The task's ID, used for populating `JoinError`s. - pub(super) task_id: Id, - - /// Either the future or the output. - pub(super) stage: CoreStage, -} - -/// Crate public as this is also needed by the pool. -#[repr(C)] -pub(crate) struct Header { - /// Task state. - pub(super) state: State, - - /// Pointer to next task, used with the injection queue. - pub(super) queue_next: UnsafeCell>>, - - /// Table of function pointers for executing actions on the task. - pub(super) vtable: &'static Vtable, - - /// This integer contains the id of the OwnedTasks or LocalOwnedTasks that - /// this task is stored in. If the task is not in any list, should be the - /// id of the list that it was previously in, or `None` if it has never been - /// in any list. - /// - /// Once a task has been bound to a list, it can never be bound to another - /// list, even if removed from the first list. - /// - /// The id is not unset when removed from a list because we want to be able - /// to read the id without synchronization, even if it is concurrently being - /// removed from the list. - pub(super) owner_id: UnsafeCell>, - - /// The tracing ID for this instrumented task. - #[cfg(all(tokio_unstable, feature = "tracing"))] - pub(super) tracing_id: Option, -} - -unsafe impl Send for Header {} -unsafe impl Sync for Header {} - -/// Cold data is stored after the future. Data is considered cold if it is only -/// used during creation or shutdown of the task. -pub(super) struct Trailer { - /// Pointers for the linked list in the `OwnedTasks` that owns this task. - pub(super) owned: linked_list::Pointers

, - /// Consumer task waiting on completion of this task. - pub(super) waker: UnsafeCell>, -} - -generate_addr_of_methods! { - impl<> Trailer { - pub(super) unsafe fn addr_of_owned(self: NonNull) -> NonNull> { - &self.owned - } - } -} - -/// Either the future or the output. -pub(super) enum Stage { - Running(T), - Finished(super::Result), - Consumed, -} - -impl Cell { - /// Allocates a new task cell, containing the header, trailer, and core - /// structures. - pub(super) fn new(future: T, scheduler: S, state: State, task_id: Id) -> Box> { - // Separated into a non-generic function to reduce LLVM codegen - fn new_header( - state: State, - vtable: &'static Vtable, - #[cfg(all(tokio_unstable, feature = "tracing"))] tracing_id: Option, - ) -> Header { - Header { - state, - queue_next: UnsafeCell::new(None), - vtable, - owner_id: UnsafeCell::new(None), - #[cfg(all(tokio_unstable, feature = "tracing"))] - tracing_id, - } - } - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let tracing_id = future.id(); - let vtable = raw::vtable::(); - let result = Box::new(Cell { - header: new_header( - state, - vtable, - #[cfg(all(tokio_unstable, feature = "tracing"))] - tracing_id, - ), - core: Core { - scheduler, - stage: CoreStage { - stage: UnsafeCell::new(Stage::Running(future)), - }, - task_id, - }, - trailer: Trailer::new(), - }); - - #[cfg(debug_assertions)] - { - // Using a separate function for this code avoids instantiating it separately for every `T`. - unsafe fn check(header: &Header, trailer: &Trailer, scheduler: &S, task_id: &Id) { - let trailer_addr = trailer as *const Trailer as usize; - let trailer_ptr = unsafe { Header::get_trailer(NonNull::from(header)) }; - assert_eq!(trailer_addr, trailer_ptr.as_ptr() as usize); - - let scheduler_addr = scheduler as *const S as usize; - let scheduler_ptr = unsafe { Header::get_scheduler::(NonNull::from(header)) }; - assert_eq!(scheduler_addr, scheduler_ptr.as_ptr() as usize); - - let id_addr = task_id as *const Id as usize; - let id_ptr = unsafe { Header::get_id_ptr(NonNull::from(header)) }; - assert_eq!(id_addr, id_ptr.as_ptr() as usize); - } - unsafe { - check( - &result.header, - &result.trailer, - &result.core.scheduler, - &result.core.task_id, - ); - } - } - - result - } -} - -impl CoreStage { - pub(super) fn with_mut(&self, f: impl FnOnce(*mut Stage) -> R) -> R { - self.stage.with_mut(f) - } -} - -/// Set and clear the task id in the context when the future is executed or -/// dropped, or when the output produced by the future is dropped. -pub(crate) struct TaskIdGuard { - parent_task_id: Option, -} - -impl TaskIdGuard { - fn enter(id: Id) -> Self { - TaskIdGuard { - parent_task_id: context::set_current_task_id(Some(id)), - } - } -} - -impl Drop for TaskIdGuard { - fn drop(&mut self) { - context::set_current_task_id(self.parent_task_id); - } -} - -impl Core { - /// Polls the future. - /// - /// # Safety - /// - /// The caller must ensure it is safe to mutate the `state` field. This - /// requires ensuring mutual exclusion between any concurrent thread that - /// might modify the future or output field. - /// - /// The mutual exclusion is implemented by `Harness` and the `Lifecycle` - /// component of the task state. - /// - /// `self` must also be pinned. This is handled by storing the task on the - /// heap. - pub(super) fn poll(&self, mut cx: Context<'_>) -> Poll { - let res = { - self.stage.stage.with_mut(|ptr| { - // Safety: The caller ensures mutual exclusion to the field. - let future = match unsafe { &mut *ptr } { - Stage::Running(future) => future, - _ => unreachable!("unexpected stage"), - }; - - // Safety: The caller ensures the future is pinned. - let future = unsafe { Pin::new_unchecked(future) }; - - let _guard = TaskIdGuard::enter(self.task_id); - future.poll(&mut cx) - }) - }; - - if res.is_ready() { - self.drop_future_or_output(); - } - - res - } - - /// Drops the future. - /// - /// # Safety - /// - /// The caller must ensure it is safe to mutate the `stage` field. - pub(super) fn drop_future_or_output(&self) { - // Safety: the caller ensures mutual exclusion to the field. - unsafe { - self.set_stage(Stage::Consumed); - } - } - - /// Stores the task output. - /// - /// # Safety - /// - /// The caller must ensure it is safe to mutate the `stage` field. - pub(super) fn store_output(&self, output: super::Result) { - // Safety: the caller ensures mutual exclusion to the field. - unsafe { - self.set_stage(Stage::Finished(output)); - } - } - - /// Takes the task output. - /// - /// # Safety - /// - /// The caller must ensure it is safe to mutate the `stage` field. - pub(super) fn take_output(&self) -> super::Result { - use std::mem; - - self.stage.stage.with_mut(|ptr| { - // Safety:: the caller ensures mutual exclusion to the field. - match mem::replace(unsafe { &mut *ptr }, Stage::Consumed) { - Stage::Finished(output) => output, - _ => panic!("JoinHandle polled after completion"), - } - }) - } - - unsafe fn set_stage(&self, stage: Stage) { - let _guard = TaskIdGuard::enter(self.task_id); - self.stage.stage.with_mut(|ptr| *ptr = stage) - } -} - -impl Header { - pub(super) unsafe fn set_next(&self, next: Option>) { - self.queue_next.with_mut(|ptr| *ptr = next); - } - - // safety: The caller must guarantee exclusive access to this field, and - // must ensure that the id is either `None` or the id of the OwnedTasks - // containing this task. - pub(super) unsafe fn set_owner_id(&self, owner: NonZeroU64) { - self.owner_id.with_mut(|ptr| *ptr = Some(owner)); - } - - pub(super) fn get_owner_id(&self) -> Option { - // safety: If there are concurrent writes, then that write has violated - // the safety requirements on `set_owner_id`. - unsafe { self.owner_id.with(|ptr| *ptr) } - } - - /// Gets a pointer to the `Trailer` of the task containing this `Header`. - /// - /// # Safety - /// - /// The provided raw pointer must point at the header of a task. - pub(super) unsafe fn get_trailer(me: NonNull
) -> NonNull { - let offset = me.as_ref().vtable.trailer_offset; - let trailer = me.as_ptr().cast::().add(offset).cast::(); - NonNull::new_unchecked(trailer) - } - - /// Gets a pointer to the scheduler of the task containing this `Header`. - /// - /// # Safety - /// - /// The provided raw pointer must point at the header of a task. - /// - /// The generic type S must be set to the correct scheduler type for this - /// task. - pub(super) unsafe fn get_scheduler(me: NonNull
) -> NonNull { - let offset = me.as_ref().vtable.scheduler_offset; - let scheduler = me.as_ptr().cast::().add(offset).cast::(); - NonNull::new_unchecked(scheduler) - } - - /// Gets a pointer to the id of the task containing this `Header`. - /// - /// # Safety - /// - /// The provided raw pointer must point at the header of a task. - pub(super) unsafe fn get_id_ptr(me: NonNull
) -> NonNull { - let offset = me.as_ref().vtable.id_offset; - let id = me.as_ptr().cast::().add(offset).cast::(); - NonNull::new_unchecked(id) - } - - /// Gets the id of the task containing this `Header`. - /// - /// # Safety - /// - /// The provided raw pointer must point at the header of a task. - pub(super) unsafe fn get_id(me: NonNull
) -> Id { - let ptr = Header::get_id_ptr(me).as_ptr(); - *ptr - } - - /// Gets the tracing id of the task containing this `Header`. - /// - /// # Safety - /// - /// The provided raw pointer must point at the header of a task. - #[cfg(all(tokio_unstable, feature = "tracing"))] - pub(super) unsafe fn get_tracing_id(me: &NonNull
) -> Option<&tracing::Id> { - me.as_ref().tracing_id.as_ref() - } -} - -impl Trailer { - fn new() -> Self { - Trailer { - waker: UnsafeCell::new(None), - owned: linked_list::Pointers::new(), - } - } - - pub(super) unsafe fn set_waker(&self, waker: Option) { - self.waker.with_mut(|ptr| { - *ptr = waker; - }); - } - - pub(super) unsafe fn will_wake(&self, waker: &Waker) -> bool { - self.waker - .with(|ptr| (*ptr).as_ref().unwrap().will_wake(waker)) - } - - pub(super) fn wake_join(&self) { - self.waker.with(|ptr| match unsafe { &*ptr } { - Some(waker) => waker.wake_by_ref(), - None => panic!("waker missing"), - }); - } -} - -#[test] -#[cfg(not(loom))] -fn header_lte_cache_line() { - use std::mem::size_of; - - assert!(size_of::
() <= 8 * size_of::<*const ()>()); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/error.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/error.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/error.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,165 +0,0 @@ -use std::any::Any; -use std::fmt; -use std::io; - -use super::Id; -use crate::util::SyncWrapper; -cfg_rt! { - /// Task failed to execute to completion. - pub struct JoinError { - repr: Repr, - id: Id, - } -} - -enum Repr { - Cancelled, - Panic(SyncWrapper>), -} - -impl JoinError { - pub(crate) fn cancelled(id: Id) -> JoinError { - JoinError { - repr: Repr::Cancelled, - id, - } - } - - pub(crate) fn panic(id: Id, err: Box) -> JoinError { - JoinError { - repr: Repr::Panic(SyncWrapper::new(err)), - id, - } - } - - /// Returns true if the error was caused by the task being cancelled. - pub fn is_cancelled(&self) -> bool { - matches!(&self.repr, Repr::Cancelled) - } - - /// Returns true if the error was caused by the task panicking. - /// - /// # Examples - /// - /// ``` - /// use std::panic; - /// - /// #[tokio::main] - /// async fn main() { - /// let err = tokio::spawn(async { - /// panic!("boom"); - /// }).await.unwrap_err(); - /// - /// assert!(err.is_panic()); - /// } - /// ``` - pub fn is_panic(&self) -> bool { - matches!(&self.repr, Repr::Panic(_)) - } - - /// Consumes the join error, returning the object with which the task panicked. - /// - /// # Panics - /// - /// `into_panic()` panics if the `Error` does not represent the underlying - /// task terminating with a panic. Use `is_panic` to check the error reason - /// or `try_into_panic` for a variant that does not panic. - /// - /// # Examples - /// - /// ```should_panic - /// use std::panic; - /// - /// #[tokio::main] - /// async fn main() { - /// let err = tokio::spawn(async { - /// panic!("boom"); - /// }).await.unwrap_err(); - /// - /// if err.is_panic() { - /// // Resume the panic on the main task - /// panic::resume_unwind(err.into_panic()); - /// } - /// } - /// ``` - #[track_caller] - pub fn into_panic(self) -> Box { - self.try_into_panic() - .expect("`JoinError` reason is not a panic.") - } - - /// Consumes the join error, returning the object with which the task - /// panicked if the task terminated due to a panic. Otherwise, `self` is - /// returned. - /// - /// # Examples - /// - /// ```should_panic - /// use std::panic; - /// - /// #[tokio::main] - /// async fn main() { - /// let err = tokio::spawn(async { - /// panic!("boom"); - /// }).await.unwrap_err(); - /// - /// if let Ok(reason) = err.try_into_panic() { - /// // Resume the panic on the main task - /// panic::resume_unwind(reason); - /// } - /// } - /// ``` - pub fn try_into_panic(self) -> Result, JoinError> { - match self.repr { - Repr::Panic(p) => Ok(p.into_inner()), - _ => Err(self), - } - } - - /// Returns a [task ID] that identifies the task which errored relative to - /// other currently spawned tasks. - /// - /// **Note**: This is an [unstable API][unstable]. The public API of this type - /// may break in 1.x releases. See [the documentation on unstable - /// features][unstable] for details. - /// - /// [task ID]: crate::task::Id - /// [unstable]: crate#unstable-features - #[cfg(tokio_unstable)] - #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] - pub fn id(&self) -> Id { - self.id - } -} - -impl fmt::Display for JoinError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match &self.repr { - Repr::Cancelled => write!(fmt, "task {} was cancelled", self.id), - Repr::Panic(_) => write!(fmt, "task {} panicked", self.id), - } - } -} - -impl fmt::Debug for JoinError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match &self.repr { - Repr::Cancelled => write!(fmt, "JoinError::Cancelled({:?})", self.id), - Repr::Panic(_) => write!(fmt, "JoinError::Panic({:?}, ...)", self.id), - } - } -} - -impl std::error::Error for JoinError {} - -impl From for io::Error { - fn from(src: JoinError) -> io::Error { - io::Error::new( - io::ErrorKind::Other, - match src.repr { - Repr::Cancelled => "task was cancelled", - Repr::Panic(_) => "task panicked", - }, - ) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/harness.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/harness.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/harness.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/harness.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,517 +0,0 @@ -use crate::future::Future; -use crate::runtime::task::core::{Cell, Core, Header, Trailer}; -use crate::runtime::task::state::{Snapshot, State}; -use crate::runtime::task::waker::waker_ref; -use crate::runtime::task::{Id, JoinError, Notified, RawTask, Schedule, Task}; - -use std::any::Any; -use std::mem; -use std::mem::ManuallyDrop; -use std::panic; -use std::ptr::NonNull; -use std::task::{Context, Poll, Waker}; - -/// Typed raw task handle. -pub(super) struct Harness { - cell: NonNull>, -} - -impl Harness -where - T: Future, - S: 'static, -{ - pub(super) unsafe fn from_raw(ptr: NonNull
) -> Harness { - Harness { - cell: ptr.cast::>(), - } - } - - fn header_ptr(&self) -> NonNull
{ - self.cell.cast() - } - - fn header(&self) -> &Header { - unsafe { &*self.header_ptr().as_ptr() } - } - - fn state(&self) -> &State { - &self.header().state - } - - fn trailer(&self) -> &Trailer { - unsafe { &self.cell.as_ref().trailer } - } - - fn core(&self) -> &Core { - unsafe { &self.cell.as_ref().core } - } -} - -/// Task operations that can be implemented without being generic over the -/// scheduler or task. Only one version of these methods should exist in the -/// final binary. -impl RawTask { - pub(super) fn drop_reference(self) { - if self.state().ref_dec() { - self.dealloc(); - } - } - - /// This call consumes a ref-count and notifies the task. This will create a - /// new Notified and submit it if necessary. - /// - /// The caller does not need to hold a ref-count besides the one that was - /// passed to this call. - pub(super) fn wake_by_val(&self) { - use super::state::TransitionToNotifiedByVal; - - match self.state().transition_to_notified_by_val() { - TransitionToNotifiedByVal::Submit => { - // The caller has given us a ref-count, and the transition has - // created a new ref-count, so we now hold two. We turn the new - // ref-count Notified and pass it to the call to `schedule`. - // - // The old ref-count is retained for now to ensure that the task - // is not dropped during the call to `schedule` if the call - // drops the task it was given. - self.schedule(); - - // Now that we have completed the call to schedule, we can - // release our ref-count. - self.drop_reference(); - } - TransitionToNotifiedByVal::Dealloc => { - self.dealloc(); - } - TransitionToNotifiedByVal::DoNothing => {} - } - } - - /// This call notifies the task. It will not consume any ref-counts, but the - /// caller should hold a ref-count. This will create a new Notified and - /// submit it if necessary. - pub(super) fn wake_by_ref(&self) { - use super::state::TransitionToNotifiedByRef; - - match self.state().transition_to_notified_by_ref() { - TransitionToNotifiedByRef::Submit => { - // The transition above incremented the ref-count for a new task - // and the caller also holds a ref-count. The caller's ref-count - // ensures that the task is not destroyed even if the new task - // is dropped before `schedule` returns. - self.schedule(); - } - TransitionToNotifiedByRef::DoNothing => {} - } - } - - /// Remotely aborts the task. - /// - /// The caller should hold a ref-count, but we do not consume it. - /// - /// This is similar to `shutdown` except that it asks the runtime to perform - /// the shutdown. This is necessary to avoid the shutdown happening in the - /// wrong thread for non-Send tasks. - pub(super) fn remote_abort(&self) { - if self.state().transition_to_notified_and_cancel() { - // The transition has created a new ref-count, which we turn into - // a Notified and pass to the task. - // - // Since the caller holds a ref-count, the task cannot be destroyed - // before the call to `schedule` returns even if the call drops the - // `Notified` internally. - self.schedule(); - } - } - - /// Try to set the waker notified when the task is complete. Returns true if - /// the task has already completed. If this call returns false, then the - /// waker will not be notified. - pub(super) fn try_set_join_waker(&self, waker: &Waker) -> bool { - can_read_output(self.header(), self.trailer(), waker) - } -} - -impl Harness -where - T: Future, - S: Schedule, -{ - pub(super) fn drop_reference(self) { - if self.state().ref_dec() { - self.dealloc(); - } - } - - /// Polls the inner future. A ref-count is consumed. - /// - /// All necessary state checks and transitions are performed. - /// Panics raised while polling the future are handled. - pub(super) fn poll(self) { - // We pass our ref-count to `poll_inner`. - match self.poll_inner() { - PollFuture::Notified => { - // The `poll_inner` call has given us two ref-counts back. - // We give one of them to a new task and call `yield_now`. - self.core() - .scheduler - .yield_now(Notified(self.get_new_task())); - - // The remaining ref-count is now dropped. We kept the extra - // ref-count until now to ensure that even if the `yield_now` - // call drops the provided task, the task isn't deallocated - // before after `yield_now` returns. - self.drop_reference(); - } - PollFuture::Complete => { - self.complete(); - } - PollFuture::Dealloc => { - self.dealloc(); - } - PollFuture::Done => (), - } - } - - /// Polls the task and cancel it if necessary. This takes ownership of a - /// ref-count. - /// - /// If the return value is Notified, the caller is given ownership of two - /// ref-counts. - /// - /// If the return value is Complete, the caller is given ownership of a - /// single ref-count, which should be passed on to `complete`. - /// - /// If the return value is Dealloc, then this call consumed the last - /// ref-count and the caller should call `dealloc`. - /// - /// Otherwise the ref-count is consumed and the caller should not access - /// `self` again. - fn poll_inner(&self) -> PollFuture { - use super::state::{TransitionToIdle, TransitionToRunning}; - - match self.state().transition_to_running() { - TransitionToRunning::Success => { - // Separated to reduce LLVM codegen - fn transition_result_to_poll_future(result: TransitionToIdle) -> PollFuture { - match result { - TransitionToIdle::Ok => PollFuture::Done, - TransitionToIdle::OkNotified => PollFuture::Notified, - TransitionToIdle::OkDealloc => PollFuture::Dealloc, - TransitionToIdle::Cancelled => PollFuture::Complete, - } - } - let header_ptr = self.header_ptr(); - let waker_ref = waker_ref::(&header_ptr); - let cx = Context::from_waker(&waker_ref); - let res = poll_future(self.core(), cx); - - if res == Poll::Ready(()) { - // The future completed. Move on to complete the task. - return PollFuture::Complete; - } - - let transition_res = self.state().transition_to_idle(); - if let TransitionToIdle::Cancelled = transition_res { - // The transition to idle failed because the task was - // cancelled during the poll. - cancel_task(self.core()); - } - transition_result_to_poll_future(transition_res) - } - TransitionToRunning::Cancelled => { - cancel_task(self.core()); - PollFuture::Complete - } - TransitionToRunning::Failed => PollFuture::Done, - TransitionToRunning::Dealloc => PollFuture::Dealloc, - } - } - - /// Forcibly shuts down the task. - /// - /// Attempt to transition to `Running` in order to forcibly shutdown the - /// task. If the task is currently running or in a state of completion, then - /// there is nothing further to do. When the task completes running, it will - /// notice the `CANCELLED` bit and finalize the task. - pub(super) fn shutdown(self) { - if !self.state().transition_to_shutdown() { - // The task is concurrently running. No further work needed. - self.drop_reference(); - return; - } - - // By transitioning the lifecycle to `Running`, we have permission to - // drop the future. - cancel_task(self.core()); - self.complete(); - } - - pub(super) fn dealloc(self) { - // Release the join waker, if there is one. - self.trailer().waker.with_mut(drop); - - // Check causality - self.core().stage.with_mut(drop); - - // Safety: The caller of this method just transitioned our ref-count to - // zero, so it is our responsibility to release the allocation. - // - // We don't hold any references into the allocation at this point, but - // it is possible for another thread to still hold a `&State` into the - // allocation if that other thread has decremented its last ref-count, - // but has not yet returned from the relevant method on `State`. - // - // However, the `State` type consists of just an `AtomicUsize`, and an - // `AtomicUsize` wraps the entirety of its contents in an `UnsafeCell`. - // As explained in the documentation for `UnsafeCell`, such references - // are allowed to be dangling after their last use, even if the - // reference has not yet gone out of scope. - unsafe { - drop(Box::from_raw(self.cell.as_ptr())); - } - } - - // ===== join handle ===== - - /// Read the task output into `dst`. - pub(super) fn try_read_output(self, dst: &mut Poll>, waker: &Waker) { - if can_read_output(self.header(), self.trailer(), waker) { - *dst = Poll::Ready(self.core().take_output()); - } - } - - pub(super) fn drop_join_handle_slow(self) { - // Try to unset `JOIN_INTEREST`. This must be done as a first step in - // case the task concurrently completed. - if self.state().unset_join_interested().is_err() { - // It is our responsibility to drop the output. This is critical as - // the task output may not be `Send` and as such must remain with - // the scheduler or `JoinHandle`. i.e. if the output remains in the - // task structure until the task is deallocated, it may be dropped - // by a Waker on any arbitrary thread. - // - // Panics are delivered to the user via the `JoinHandle`. Given that - // they are dropping the `JoinHandle`, we assume they are not - // interested in the panic and swallow it. - let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { - self.core().drop_future_or_output(); - })); - } - - // Drop the `JoinHandle` reference, possibly deallocating the task - self.drop_reference(); - } - - // ====== internal ====== - - /// Completes the task. This method assumes that the state is RUNNING. - fn complete(self) { - // The future has completed and its output has been written to the task - // stage. We transition from running to complete. - - let snapshot = self.state().transition_to_complete(); - - // We catch panics here in case dropping the future or waking the - // JoinHandle panics. - let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { - if !snapshot.is_join_interested() { - // The `JoinHandle` is not interested in the output of - // this task. It is our responsibility to drop the - // output. - self.core().drop_future_or_output(); - } else if snapshot.is_join_waker_set() { - // Notify the waker. Reading the waker field is safe per rule 4 - // in task/mod.rs, since the JOIN_WAKER bit is set and the call - // to transition_to_complete() above set the COMPLETE bit. - self.trailer().wake_join(); - } - })); - - // The task has completed execution and will no longer be scheduled. - let num_release = self.release(); - - if self.state().transition_to_terminal(num_release) { - self.dealloc(); - } - } - - /// Releases the task from the scheduler. Returns the number of ref-counts - /// that should be decremented. - fn release(&self) -> usize { - // We don't actually increment the ref-count here, but the new task is - // never destroyed, so that's ok. - let me = ManuallyDrop::new(self.get_new_task()); - - if let Some(task) = self.core().scheduler.release(&me) { - mem::forget(task); - 2 - } else { - 1 - } - } - - /// Creates a new task that holds its own ref-count. - /// - /// # Safety - /// - /// Any use of `self` after this call must ensure that a ref-count to the - /// task holds the task alive until after the use of `self`. Passing the - /// returned Task to any method on `self` is unsound if dropping the Task - /// could drop `self` before the call on `self` returned. - fn get_new_task(&self) -> Task { - // safety: The header is at the beginning of the cell, so this cast is - // safe. - unsafe { Task::from_raw(self.cell.cast()) } - } -} - -fn can_read_output(header: &Header, trailer: &Trailer, waker: &Waker) -> bool { - // Load a snapshot of the current task state - let snapshot = header.state.load(); - - debug_assert!(snapshot.is_join_interested()); - - if !snapshot.is_complete() { - // If the task is not complete, try storing the provided waker in the - // task's waker field. - - let res = if snapshot.is_join_waker_set() { - // If JOIN_WAKER is set, then JoinHandle has previously stored a - // waker in the waker field per step (iii) of rule 5 in task/mod.rs. - - // Optimization: if the stored waker and the provided waker wake the - // same task, then return without touching the waker field. (Reading - // the waker field below is safe per rule 3 in task/mod.rs.) - if unsafe { trailer.will_wake(waker) } { - return false; - } - - // Otherwise swap the stored waker with the provided waker by - // following the rule 5 in task/mod.rs. - header - .state - .unset_waker() - .and_then(|snapshot| set_join_waker(header, trailer, waker.clone(), snapshot)) - } else { - // If JOIN_WAKER is unset, then JoinHandle has mutable access to the - // waker field per rule 2 in task/mod.rs; therefore, skip step (i) - // of rule 5 and try to store the provided waker in the waker field. - set_join_waker(header, trailer, waker.clone(), snapshot) - }; - - match res { - Ok(_) => return false, - Err(snapshot) => { - assert!(snapshot.is_complete()); - } - } - } - true -} - -fn set_join_waker( - header: &Header, - trailer: &Trailer, - waker: Waker, - snapshot: Snapshot, -) -> Result { - assert!(snapshot.is_join_interested()); - assert!(!snapshot.is_join_waker_set()); - - // Safety: Only the `JoinHandle` may set the `waker` field. When - // `JOIN_INTEREST` is **not** set, nothing else will touch the field. - unsafe { - trailer.set_waker(Some(waker)); - } - - // Update the `JoinWaker` state accordingly - let res = header.state.set_join_waker(); - - // If the state could not be updated, then clear the join waker - if res.is_err() { - unsafe { - trailer.set_waker(None); - } - } - - res -} - -enum PollFuture { - Complete, - Notified, - Done, - Dealloc, -} - -/// Cancels the task and store the appropriate error in the stage field. -fn cancel_task(core: &Core) { - // Drop the future from a panic guard. - let res = panic::catch_unwind(panic::AssertUnwindSafe(|| { - core.drop_future_or_output(); - })); - - core.store_output(Err(panic_result_to_join_error(core.task_id, res))); -} - -fn panic_result_to_join_error( - task_id: Id, - res: Result<(), Box>, -) -> JoinError { - match res { - Ok(()) => JoinError::cancelled(task_id), - Err(panic) => JoinError::panic(task_id, panic), - } -} - -/// Polls the future. If the future completes, the output is written to the -/// stage field. -fn poll_future(core: &Core, cx: Context<'_>) -> Poll<()> { - // Poll the future. - let output = panic::catch_unwind(panic::AssertUnwindSafe(|| { - struct Guard<'a, T: Future, S: Schedule> { - core: &'a Core, - } - impl<'a, T: Future, S: Schedule> Drop for Guard<'a, T, S> { - fn drop(&mut self) { - // If the future panics on poll, we drop it inside the panic - // guard. - self.core.drop_future_or_output(); - } - } - let guard = Guard { core }; - let res = guard.core.poll(cx); - mem::forget(guard); - res - })); - - // Prepare output for being placed in the core stage. - let output = match output { - Ok(Poll::Pending) => return Poll::Pending, - Ok(Poll::Ready(output)) => Ok(output), - Err(panic) => Err(panic_to_error(&core.scheduler, core.task_id, panic)), - }; - - // Catch and ignore panics if the future panics on drop. - let res = panic::catch_unwind(panic::AssertUnwindSafe(|| { - core.store_output(output); - })); - - if res.is_err() { - core.scheduler.unhandled_panic(); - } - - Poll::Ready(()) -} - -#[cold] -fn panic_to_error( - scheduler: &S, - task_id: Id, - panic: Box, -) -> JoinError { - scheduler.unhandled_panic(); - JoinError::panic(task_id, panic) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/id.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/id.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/id.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/id.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,87 +0,0 @@ -use crate::runtime::context; - -use std::fmt; - -/// An opaque ID that uniquely identifies a task relative to all other currently -/// running tasks. -/// -/// # Notes -/// -/// - Task IDs are unique relative to other *currently running* tasks. When a -/// task completes, the same ID may be used for another task. -/// - Task IDs are *not* sequential, and do not indicate the order in which -/// tasks are spawned, what runtime a task is spawned on, or any other data. -/// - The task ID of the currently running task can be obtained from inside the -/// task via the [`task::try_id()`](crate::task::try_id()) and -/// [`task::id()`](crate::task::id()) functions and from outside the task via -/// the [`JoinHandle::id()`](crate::task::JoinHandle::id()) function. -/// -/// **Note**: This is an [unstable API][unstable]. The public API of this type -/// may break in 1.x releases. See [the documentation on unstable -/// features][unstable] for details. -/// -/// [unstable]: crate#unstable-features -#[cfg_attr(docsrs, doc(cfg(all(feature = "rt", tokio_unstable))))] -#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] -#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq)] -pub struct Id(u64); - -/// Returns the [`Id`] of the currently running task. -/// -/// # Panics -/// -/// This function panics if called from outside a task. Please note that calls -/// to `block_on` do not have task IDs, so the method will panic if called from -/// within a call to `block_on`. For a version of this function that doesn't -/// panic, see [`task::try_id()`](crate::runtime::task::try_id()). -/// -/// **Note**: This is an [unstable API][unstable]. The public API of this type -/// may break in 1.x releases. See [the documentation on unstable -/// features][unstable] for details. -/// -/// [task ID]: crate::task::Id -/// [unstable]: crate#unstable-features -#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] -#[track_caller] -pub fn id() -> Id { - context::current_task_id().expect("Can't get a task id when not inside a task") -} - -/// Returns the [`Id`] of the currently running task, or `None` if called outside -/// of a task. -/// -/// This function is similar to [`task::id()`](crate::runtime::task::id()), except -/// that it returns `None` rather than panicking if called outside of a task -/// context. -/// -/// **Note**: This is an [unstable API][unstable]. The public API of this type -/// may break in 1.x releases. See [the documentation on unstable -/// features][unstable] for details. -/// -/// [task ID]: crate::task::Id -/// [unstable]: crate#unstable-features -#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] -#[track_caller] -pub fn try_id() -> Option { - context::current_task_id() -} - -impl fmt::Display for Id { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -impl Id { - pub(crate) fn next() -> Self { - use crate::loom::sync::atomic::{Ordering::Relaxed, StaticAtomicU64}; - - static NEXT_ID: StaticAtomicU64 = StaticAtomicU64::new(1); - - Self(NEXT_ID.fetch_add(1, Relaxed)) - } - - pub(crate) fn as_u64(&self) -> u64 { - self.0 - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/join.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/join.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/join.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/join.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,366 +0,0 @@ -use crate::runtime::task::{Header, RawTask}; - -use std::fmt; -use std::future::Future; -use std::marker::PhantomData; -use std::panic::{RefUnwindSafe, UnwindSafe}; -use std::pin::Pin; -use std::task::{Context, Poll, Waker}; - -cfg_rt! { - /// An owned permission to join on a task (await its termination). - /// - /// This can be thought of as the equivalent of [`std::thread::JoinHandle`] - /// for a Tokio task rather than a thread. Note that the background task - /// associated with this `JoinHandle` started running immediately when you - /// called spawn, even if you have not yet awaited the `JoinHandle`. - /// - /// A `JoinHandle` *detaches* the associated task when it is dropped, which - /// means that there is no longer any handle to the task, and no way to `join` - /// on it. - /// - /// This `struct` is created by the [`task::spawn`] and [`task::spawn_blocking`] - /// functions. - /// - /// # Cancel safety - /// - /// The `&mut JoinHandle` type is cancel safe. If it is used as the event - /// in a `tokio::select!` statement and some other branch completes first, - /// then it is guaranteed that the output of the task is not lost. - /// - /// If a `JoinHandle` is dropped, then the task continues running in the - /// background and its return value is lost. - /// - /// # Examples - /// - /// Creation from [`task::spawn`]: - /// - /// ``` - /// use tokio::task; - /// - /// # async fn doc() { - /// let join_handle: task::JoinHandle<_> = task::spawn(async { - /// // some work here - /// }); - /// # } - /// ``` - /// - /// Creation from [`task::spawn_blocking`]: - /// - /// ``` - /// use tokio::task; - /// - /// # async fn doc() { - /// let join_handle: task::JoinHandle<_> = task::spawn_blocking(|| { - /// // some blocking work here - /// }); - /// # } - /// ``` - /// - /// The generic parameter `T` in `JoinHandle` is the return type of the spawned task. - /// If the return value is an i32, the join handle has type `JoinHandle`: - /// - /// ``` - /// use tokio::task; - /// - /// # async fn doc() { - /// let join_handle: task::JoinHandle = task::spawn(async { - /// 5 + 3 - /// }); - /// # } - /// - /// ``` - /// - /// If the task does not have a return value, the join handle has type `JoinHandle<()>`: - /// - /// ``` - /// use tokio::task; - /// - /// # async fn doc() { - /// let join_handle: task::JoinHandle<()> = task::spawn(async { - /// println!("I return nothing."); - /// }); - /// # } - /// ``` - /// - /// Note that `handle.await` doesn't give you the return type directly. It is wrapped in a - /// `Result` because panics in the spawned task are caught by Tokio. The `?` operator has - /// to be double chained to extract the returned value: - /// - /// ``` - /// use tokio::task; - /// use std::io; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let join_handle: task::JoinHandle> = tokio::spawn(async { - /// Ok(5 + 3) - /// }); - /// - /// let result = join_handle.await??; - /// assert_eq!(result, 8); - /// Ok(()) - /// } - /// ``` - /// - /// If the task panics, the error is a [`JoinError`] that contains the panic: - /// - /// ``` - /// use tokio::task; - /// use std::io; - /// use std::panic; - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let join_handle: task::JoinHandle> = tokio::spawn(async { - /// panic!("boom"); - /// }); - /// - /// let err = join_handle.await.unwrap_err(); - /// assert!(err.is_panic()); - /// Ok(()) - /// } - /// - /// ``` - /// Child being detached and outliving its parent: - /// - /// ```no_run - /// use tokio::task; - /// use tokio::time; - /// use std::time::Duration; - /// - /// # #[tokio::main] async fn main() { - /// let original_task = task::spawn(async { - /// let _detached_task = task::spawn(async { - /// // Here we sleep to make sure that the first task returns before. - /// time::sleep(Duration::from_millis(10)).await; - /// // This will be called, even though the JoinHandle is dropped. - /// println!("♫ Still alive ♫"); - /// }); - /// }); - /// - /// original_task.await.expect("The task being joined has panicked"); - /// println!("Original task is joined."); - /// - /// // We make sure that the new task has time to run, before the main - /// // task returns. - /// - /// time::sleep(Duration::from_millis(1000)).await; - /// # } - /// ``` - /// - /// [`task::spawn`]: crate::task::spawn() - /// [`task::spawn_blocking`]: crate::task::spawn_blocking - /// [`std::thread::JoinHandle`]: std::thread::JoinHandle - /// [`JoinError`]: crate::task::JoinError - pub struct JoinHandle { - raw: RawTask, - _p: PhantomData, - } -} - -unsafe impl Send for JoinHandle {} -unsafe impl Sync for JoinHandle {} - -impl UnwindSafe for JoinHandle {} -impl RefUnwindSafe for JoinHandle {} - -impl JoinHandle { - pub(super) fn new(raw: RawTask) -> JoinHandle { - JoinHandle { - raw, - _p: PhantomData, - } - } - - /// Abort the task associated with the handle. - /// - /// Awaiting a cancelled task might complete as usual if the task was - /// already completed at the time it was cancelled, but most likely it - /// will fail with a [cancelled] `JoinError`. - /// - /// ```rust - /// use tokio::time; - /// - /// # #[tokio::main(flavor = "current_thread", start_paused = true)] - /// # async fn main() { - /// let mut handles = Vec::new(); - /// - /// handles.push(tokio::spawn(async { - /// time::sleep(time::Duration::from_secs(10)).await; - /// true - /// })); - /// - /// handles.push(tokio::spawn(async { - /// time::sleep(time::Duration::from_secs(10)).await; - /// false - /// })); - /// - /// for handle in &handles { - /// handle.abort(); - /// } - /// - /// for handle in handles { - /// assert!(handle.await.unwrap_err().is_cancelled()); - /// } - /// # } - /// ``` - /// [cancelled]: method@super::error::JoinError::is_cancelled - pub fn abort(&self) { - self.raw.remote_abort(); - } - - /// Checks if the task associated with this `JoinHandle` has finished. - /// - /// Please note that this method can return `false` even if [`abort`] has been - /// called on the task. This is because the cancellation process may take - /// some time, and this method does not return `true` until it has - /// completed. - /// - /// ```rust - /// use tokio::time; - /// - /// # #[tokio::main(flavor = "current_thread", start_paused = true)] - /// # async fn main() { - /// let handle1 = tokio::spawn(async { - /// // do some stuff here - /// }); - /// let handle2 = tokio::spawn(async { - /// // do some other stuff here - /// time::sleep(time::Duration::from_secs(10)).await; - /// }); - /// // Wait for the task to finish - /// handle2.abort(); - /// time::sleep(time::Duration::from_secs(1)).await; - /// assert!(handle1.is_finished()); - /// assert!(handle2.is_finished()); - /// # } - /// ``` - /// [`abort`]: method@JoinHandle::abort - pub fn is_finished(&self) -> bool { - let state = self.raw.header().state.load(); - state.is_complete() - } - - /// Set the waker that is notified when the task completes. - pub(crate) fn set_join_waker(&mut self, waker: &Waker) { - if self.raw.try_set_join_waker(waker) { - // In this case the task has already completed. We wake the waker immediately. - waker.wake_by_ref(); - } - } - - /// Returns a new `AbortHandle` that can be used to remotely abort this task. - /// - /// Awaiting a task cancelled by the `AbortHandle` might complete as usual if the task was - /// already completed at the time it was cancelled, but most likely it - /// will fail with a [cancelled] `JoinError`. - /// - /// ```rust - /// use tokio::{time, task}; - /// - /// # #[tokio::main(flavor = "current_thread", start_paused = true)] - /// # async fn main() { - /// let mut handles = Vec::new(); - /// - /// handles.push(tokio::spawn(async { - /// time::sleep(time::Duration::from_secs(10)).await; - /// true - /// })); - /// - /// handles.push(tokio::spawn(async { - /// time::sleep(time::Duration::from_secs(10)).await; - /// false - /// })); - /// - /// let abort_handles: Vec = handles.iter().map(|h| h.abort_handle()).collect(); - /// - /// for handle in abort_handles { - /// handle.abort(); - /// } - /// - /// for handle in handles { - /// assert!(handle.await.unwrap_err().is_cancelled()); - /// } - /// # } - /// ``` - /// [cancelled]: method@super::error::JoinError::is_cancelled - pub fn abort_handle(&self) -> super::AbortHandle { - self.raw.ref_inc(); - super::AbortHandle::new(self.raw) - } - - /// Returns a [task ID] that uniquely identifies this task relative to other - /// currently spawned tasks. - /// - /// **Note**: This is an [unstable API][unstable]. The public API of this type - /// may break in 1.x releases. See [the documentation on unstable - /// features][unstable] for details. - /// - /// [task ID]: crate::task::Id - /// [unstable]: crate#unstable-features - #[cfg(tokio_unstable)] - #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] - pub fn id(&self) -> super::Id { - // Safety: The header pointer is valid. - unsafe { Header::get_id(self.raw.header_ptr()) } - } -} - -impl Unpin for JoinHandle {} - -impl Future for JoinHandle { - type Output = super::Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - ready!(crate::trace::trace_leaf(cx)); - let mut ret = Poll::Pending; - - // Keep track of task budget - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - - // Try to read the task output. If the task is not yet complete, the - // waker is stored and is notified once the task does complete. - // - // The function must go via the vtable, which requires erasing generic - // types. To do this, the function "return" is placed on the stack - // **before** calling the function and is passed into the function using - // `*mut ()`. - // - // Safety: - // - // The type of `T` must match the task's output type. - unsafe { - self.raw - .try_read_output(&mut ret as *mut _ as *mut (), cx.waker()); - } - - if ret.is_ready() { - coop.made_progress(); - } - - ret - } -} - -impl Drop for JoinHandle { - fn drop(&mut self) { - if self.raw.state().drop_join_handle_fast().is_ok() { - return; - } - - self.raw.drop_join_handle_slow(); - } -} - -impl fmt::Debug for JoinHandle -where - T: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - // Safety: The header pointer is valid. - let id_ptr = unsafe { Header::get_id_ptr(self.raw.header_ptr()) }; - let id = unsafe { id_ptr.as_ref() }; - fmt.debug_struct("JoinHandle").field("id", id).finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/list.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/list.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/list.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/list.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,322 +0,0 @@ -//! This module has containers for storing the tasks spawned on a scheduler. The -//! `OwnedTasks` container is thread-safe but can only store tasks that -//! implement Send. The `LocalOwnedTasks` container is not thread safe, but can -//! store non-Send tasks. -//! -//! The collections can be closed to prevent adding new tasks during shutdown of -//! the scheduler with the collection. - -use crate::future::Future; -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::Mutex; -use crate::runtime::task::{JoinHandle, LocalNotified, Notified, Schedule, Task}; -use crate::util::linked_list::{CountedLinkedList, Link, LinkedList}; - -use std::marker::PhantomData; -use std::num::NonZeroU64; - -// The id from the module below is used to verify whether a given task is stored -// in this OwnedTasks, or some other task. The counter starts at one so we can -// use `None` for tasks not owned by any list. -// -// The safety checks in this file can technically be violated if the counter is -// overflown, but the checks are not supposed to ever fail unless there is a -// bug in Tokio, so we accept that certain bugs would not be caught if the two -// mixed up runtimes happen to have the same id. - -cfg_has_atomic_u64! { - use std::sync::atomic::{AtomicU64, Ordering}; - - static NEXT_OWNED_TASKS_ID: AtomicU64 = AtomicU64::new(1); - - fn get_next_id() -> NonZeroU64 { - loop { - let id = NEXT_OWNED_TASKS_ID.fetch_add(1, Ordering::Relaxed); - if let Some(id) = NonZeroU64::new(id) { - return id; - } - } - } -} - -cfg_not_has_atomic_u64! { - use std::sync::atomic::{AtomicU32, Ordering}; - - static NEXT_OWNED_TASKS_ID: AtomicU32 = AtomicU32::new(1); - - fn get_next_id() -> NonZeroU64 { - loop { - let id = NEXT_OWNED_TASKS_ID.fetch_add(1, Ordering::Relaxed); - if let Some(id) = NonZeroU64::new(u64::from(id)) { - return id; - } - } - } -} - -pub(crate) struct OwnedTasks { - inner: Mutex>, - pub(crate) id: NonZeroU64, -} -struct CountedOwnedTasksInner { - list: CountedLinkedList, as Link>::Target>, - closed: bool, -} -pub(crate) struct LocalOwnedTasks { - inner: UnsafeCell>, - pub(crate) id: NonZeroU64, - _not_send_or_sync: PhantomData<*const ()>, -} -struct OwnedTasksInner { - list: LinkedList, as Link>::Target>, - closed: bool, -} - -impl OwnedTasks { - pub(crate) fn new() -> Self { - Self { - inner: Mutex::new(CountedOwnedTasksInner { - list: CountedLinkedList::new(), - closed: false, - }), - id: get_next_id(), - } - } - - /// Binds the provided task to this OwnedTasks instance. This fails if the - /// OwnedTasks has been closed. - pub(crate) fn bind( - &self, - task: T, - scheduler: S, - id: super::Id, - ) -> (JoinHandle, Option>) - where - S: Schedule, - T: Future + Send + 'static, - T::Output: Send + 'static, - { - let (task, notified, join) = super::new_task(task, scheduler, id); - let notified = unsafe { self.bind_inner(task, notified) }; - (join, notified) - } - - /// The part of `bind` that's the same for every type of future. - unsafe fn bind_inner(&self, task: Task, notified: Notified) -> Option> - where - S: Schedule, - { - unsafe { - // safety: We just created the task, so we have exclusive access - // to the field. - task.header().set_owner_id(self.id); - } - - let mut lock = self.inner.lock(); - if lock.closed { - drop(lock); - drop(notified); - task.shutdown(); - None - } else { - lock.list.push_front(task); - Some(notified) - } - } - - /// Asserts that the given task is owned by this OwnedTasks and convert it to - /// a LocalNotified, giving the thread permission to poll this task. - #[inline] - pub(crate) fn assert_owner(&self, task: Notified) -> LocalNotified { - debug_assert_eq!(task.header().get_owner_id(), Some(self.id)); - - // safety: All tasks bound to this OwnedTasks are Send, so it is safe - // to poll it on this thread no matter what thread we are on. - LocalNotified { - task: task.0, - _not_send: PhantomData, - } - } - - /// Shuts down all tasks in the collection. This call also closes the - /// collection, preventing new items from being added. - pub(crate) fn close_and_shutdown_all(&self) - where - S: Schedule, - { - // The first iteration of the loop was unrolled so it can set the - // closed bool. - let first_task = { - let mut lock = self.inner.lock(); - lock.closed = true; - lock.list.pop_back() - }; - match first_task { - Some(task) => task.shutdown(), - None => return, - } - - loop { - let task = match self.inner.lock().list.pop_back() { - Some(task) => task, - None => return, - }; - - task.shutdown(); - } - } - - pub(crate) fn active_tasks_count(&self) -> usize { - self.inner.lock().list.count() - } - - pub(crate) fn remove(&self, task: &Task) -> Option> { - // If the task's owner ID is `None` then it is not part of any list and - // doesn't need removing. - let task_id = task.header().get_owner_id()?; - - assert_eq!(task_id, self.id); - - // safety: We just checked that the provided task is not in some other - // linked list. - unsafe { self.inner.lock().list.remove(task.header_ptr()) } - } - - pub(crate) fn is_empty(&self) -> bool { - self.inner.lock().list.is_empty() - } -} - -cfg_taskdump! { - impl OwnedTasks { - /// Locks the tasks, and calls `f` on an iterator over them. - pub(crate) fn for_each(&self, f: F) - where - F: FnMut(&Task) - { - self.inner.lock().list.for_each(f) - } - } -} - -impl LocalOwnedTasks { - pub(crate) fn new() -> Self { - Self { - inner: UnsafeCell::new(OwnedTasksInner { - list: LinkedList::new(), - closed: false, - }), - id: get_next_id(), - _not_send_or_sync: PhantomData, - } - } - - pub(crate) fn bind( - &self, - task: T, - scheduler: S, - id: super::Id, - ) -> (JoinHandle, Option>) - where - S: Schedule, - T: Future + 'static, - T::Output: 'static, - { - let (task, notified, join) = super::new_task(task, scheduler, id); - - unsafe { - // safety: We just created the task, so we have exclusive access - // to the field. - task.header().set_owner_id(self.id); - } - - if self.is_closed() { - drop(notified); - task.shutdown(); - (join, None) - } else { - self.with_inner(|inner| { - inner.list.push_front(task); - }); - (join, Some(notified)) - } - } - - /// Shuts down all tasks in the collection. This call also closes the - /// collection, preventing new items from being added. - pub(crate) fn close_and_shutdown_all(&self) - where - S: Schedule, - { - self.with_inner(|inner| inner.closed = true); - - while let Some(task) = self.with_inner(|inner| inner.list.pop_back()) { - task.shutdown(); - } - } - - pub(crate) fn remove(&self, task: &Task) -> Option> { - // If the task's owner ID is `None` then it is not part of any list and - // doesn't need removing. - let task_id = task.header().get_owner_id()?; - - assert_eq!(task_id, self.id); - - self.with_inner(|inner| - // safety: We just checked that the provided task is not in some - // other linked list. - unsafe { inner.list.remove(task.header_ptr()) }) - } - - /// Asserts that the given task is owned by this LocalOwnedTasks and convert - /// it to a LocalNotified, giving the thread permission to poll this task. - #[inline] - pub(crate) fn assert_owner(&self, task: Notified) -> LocalNotified { - assert_eq!(task.header().get_owner_id(), Some(self.id)); - - // safety: The task was bound to this LocalOwnedTasks, and the - // LocalOwnedTasks is not Send or Sync, so we are on the right thread - // for polling this task. - LocalNotified { - task: task.0, - _not_send: PhantomData, - } - } - - #[inline] - fn with_inner(&self, f: F) -> T - where - F: FnOnce(&mut OwnedTasksInner) -> T, - { - // safety: This type is not Sync, so concurrent calls of this method - // can't happen. Furthermore, all uses of this method in this file make - // sure that they don't call `with_inner` recursively. - self.inner.with_mut(|ptr| unsafe { f(&mut *ptr) }) - } - - pub(crate) fn is_closed(&self) -> bool { - self.with_inner(|inner| inner.closed) - } - - pub(crate) fn is_empty(&self) -> bool { - self.with_inner(|inner| inner.list.is_empty()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - // This test may run in parallel with other tests, so we only test that ids - // come in increasing order. - #[test] - fn test_id_not_broken() { - let mut last_id = get_next_id(); - - for _ in 0..1000 { - let next_id = get_next_id(); - assert!(last_id < next_id); - last_id = next_id; - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,505 +0,0 @@ -//! The task module. -//! -//! The task module contains the code that manages spawned tasks and provides a -//! safe API for the rest of the runtime to use. Each task in a runtime is -//! stored in an OwnedTasks or LocalOwnedTasks object. -//! -//! # Task reference types -//! -//! A task is usually referenced by multiple handles, and there are several -//! types of handles. -//! -//! * OwnedTask - tasks stored in an OwnedTasks or LocalOwnedTasks are of this -//! reference type. -//! -//! * JoinHandle - each task has a JoinHandle that allows access to the output -//! of the task. -//! -//! * Waker - every waker for a task has this reference type. There can be any -//! number of waker references. -//! -//! * Notified - tracks whether the task is notified. -//! -//! * Unowned - this task reference type is used for tasks not stored in any -//! runtime. Mainly used for blocking tasks, but also in tests. -//! -//! The task uses a reference count to keep track of how many active references -//! exist. The Unowned reference type takes up two ref-counts. All other -//! reference types take up a single ref-count. -//! -//! Besides the waker type, each task has at most one of each reference type. -//! -//! # State -//! -//! The task stores its state in an atomic usize with various bitfields for the -//! necessary information. The state has the following bitfields: -//! -//! * RUNNING - Tracks whether the task is currently being polled or cancelled. -//! This bit functions as a lock around the task. -//! -//! * COMPLETE - Is one once the future has fully completed and has been -//! dropped. Never unset once set. Never set together with RUNNING. -//! -//! * NOTIFIED - Tracks whether a Notified object currently exists. -//! -//! * CANCELLED - Is set to one for tasks that should be cancelled as soon as -//! possible. May take any value for completed tasks. -//! -//! * JOIN_INTEREST - Is set to one if there exists a JoinHandle. -//! -//! * JOIN_WAKER - Acts as an access control bit for the join handle waker. The -//! protocol for its usage is described below. -//! -//! The rest of the bits are used for the ref-count. -//! -//! # Fields in the task -//! -//! The task has various fields. This section describes how and when it is safe -//! to access a field. -//! -//! * The state field is accessed with atomic instructions. -//! -//! * The OwnedTask reference has exclusive access to the `owned` field. -//! -//! * The Notified reference has exclusive access to the `queue_next` field. -//! -//! * The `owner_id` field can be set as part of construction of the task, but -//! is otherwise immutable and anyone can access the field immutably without -//! synchronization. -//! -//! * If COMPLETE is one, then the JoinHandle has exclusive access to the -//! stage field. If COMPLETE is zero, then the RUNNING bitfield functions as -//! a lock for the stage field, and it can be accessed only by the thread -//! that set RUNNING to one. -//! -//! * The waker field may be concurrently accessed by different threads: in one -//! thread the runtime may complete a task and *read* the waker field to -//! invoke the waker, and in another thread the task's JoinHandle may be -//! polled, and if the task hasn't yet completed, the JoinHandle may *write* -//! a waker to the waker field. The JOIN_WAKER bit ensures safe access by -//! multiple threads to the waker field using the following rules: -//! -//! 1. JOIN_WAKER is initialized to zero. -//! -//! 2. If JOIN_WAKER is zero, then the JoinHandle has exclusive (mutable) -//! access to the waker field. -//! -//! 3. If JOIN_WAKER is one, then the JoinHandle has shared (read-only) -//! access to the waker field. -//! -//! 4. If JOIN_WAKER is one and COMPLETE is one, then the runtime has shared -//! (read-only) access to the waker field. -//! -//! 5. If the JoinHandle needs to write to the waker field, then the -//! JoinHandle needs to (i) successfully set JOIN_WAKER to zero if it is -//! not already zero to gain exclusive access to the waker field per rule -//! 2, (ii) write a waker, and (iii) successfully set JOIN_WAKER to one. -//! -//! 6. The JoinHandle can change JOIN_WAKER only if COMPLETE is zero (i.e. -//! the task hasn't yet completed). -//! -//! Rule 6 implies that the steps (i) or (iii) of rule 5 may fail due to a -//! race. If step (i) fails, then the attempt to write a waker is aborted. If -//! step (iii) fails because COMPLETE is set to one by another thread after -//! step (i), then the waker field is cleared. Once COMPLETE is one (i.e. -//! task has completed), the JoinHandle will not modify JOIN_WAKER. After the -//! runtime sets COMPLETE to one, it invokes the waker if there is one. -//! -//! All other fields are immutable and can be accessed immutably without -//! synchronization by anyone. -//! -//! # Safety -//! -//! This section goes through various situations and explains why the API is -//! safe in that situation. -//! -//! ## Polling or dropping the future -//! -//! Any mutable access to the future happens after obtaining a lock by modifying -//! the RUNNING field, so exclusive access is ensured. -//! -//! When the task completes, exclusive access to the output is transferred to -//! the JoinHandle. If the JoinHandle is already dropped when the transition to -//! complete happens, the thread performing that transition retains exclusive -//! access to the output and should immediately drop it. -//! -//! ## Non-Send futures -//! -//! If a future is not Send, then it is bound to a LocalOwnedTasks. The future -//! will only ever be polled or dropped given a LocalNotified or inside a call -//! to LocalOwnedTasks::shutdown_all. In either case, it is guaranteed that the -//! future is on the right thread. -//! -//! If the task is never removed from the LocalOwnedTasks, then it is leaked, so -//! there is no risk that the task is dropped on some other thread when the last -//! ref-count drops. -//! -//! ## Non-Send output -//! -//! When a task completes, the output is placed in the stage of the task. Then, -//! a transition that sets COMPLETE to true is performed, and the value of -//! JOIN_INTEREST when this transition happens is read. -//! -//! If JOIN_INTEREST is zero when the transition to COMPLETE happens, then the -//! output is immediately dropped. -//! -//! If JOIN_INTEREST is one when the transition to COMPLETE happens, then the -//! JoinHandle is responsible for cleaning up the output. If the output is not -//! Send, then this happens: -//! -//! 1. The output is created on the thread that the future was polled on. Since -//! only non-Send futures can have non-Send output, the future was polled on -//! the thread that the future was spawned from. -//! 2. Since `JoinHandle` is not Send if Output is not Send, the -//! JoinHandle is also on the thread that the future was spawned from. -//! 3. Thus, the JoinHandle will not move the output across threads when it -//! takes or drops the output. -//! -//! ## Recursive poll/shutdown -//! -//! Calling poll from inside a shutdown call or vice-versa is not prevented by -//! the API exposed by the task module, so this has to be safe. In either case, -//! the lock in the RUNNING bitfield makes the inner call return immediately. If -//! the inner call is a `shutdown` call, then the CANCELLED bit is set, and the -//! poll call will notice it when the poll finishes, and the task is cancelled -//! at that point. - -// Some task infrastructure is here to support `JoinSet`, which is currently -// unstable. This should be removed once `JoinSet` is stabilized. -#![cfg_attr(not(tokio_unstable), allow(dead_code))] - -mod core; -use self::core::Cell; -use self::core::Header; - -mod error; -pub use self::error::JoinError; - -mod harness; -use self::harness::Harness; - -mod id; -#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] -pub use id::{id, try_id, Id}; - -#[cfg(feature = "rt")] -mod abort; -mod join; - -#[cfg(feature = "rt")] -pub use self::abort::AbortHandle; - -pub use self::join::JoinHandle; - -mod list; -pub(crate) use self::list::{LocalOwnedTasks, OwnedTasks}; - -mod raw; -pub(crate) use self::raw::RawTask; - -mod state; -use self::state::State; - -mod waker; - -cfg_taskdump! { - pub(crate) mod trace; -} - -use crate::future::Future; -use crate::util::linked_list; - -use std::marker::PhantomData; -use std::ptr::NonNull; -use std::{fmt, mem}; - -/// An owned handle to the task, tracked by ref count. -#[repr(transparent)] -pub(crate) struct Task { - raw: RawTask, - _p: PhantomData, -} - -unsafe impl Send for Task {} -unsafe impl Sync for Task {} - -/// A task was notified. -#[repr(transparent)] -pub(crate) struct Notified(Task); - -// safety: This type cannot be used to touch the task without first verifying -// that the value is on a thread where it is safe to poll the task. -unsafe impl Send for Notified {} -unsafe impl Sync for Notified {} - -/// A non-Send variant of Notified with the invariant that it is on a thread -/// where it is safe to poll it. -#[repr(transparent)] -pub(crate) struct LocalNotified { - task: Task, - _not_send: PhantomData<*const ()>, -} - -/// A task that is not owned by any OwnedTasks. Used for blocking tasks. -/// This type holds two ref-counts. -pub(crate) struct UnownedTask { - raw: RawTask, - _p: PhantomData, -} - -// safety: This type can only be created given a Send task. -unsafe impl Send for UnownedTask {} -unsafe impl Sync for UnownedTask {} - -/// Task result sent back. -pub(crate) type Result = std::result::Result; - -pub(crate) trait Schedule: Sync + Sized + 'static { - /// The task has completed work and is ready to be released. The scheduler - /// should release it immediately and return it. The task module will batch - /// the ref-dec with setting other options. - /// - /// If the scheduler has already released the task, then None is returned. - fn release(&self, task: &Task) -> Option>; - - /// Schedule the task - fn schedule(&self, task: Notified); - - /// Schedule the task to run in the near future, yielding the thread to - /// other tasks. - fn yield_now(&self, task: Notified) { - self.schedule(task); - } - - /// Polling the task resulted in a panic. Should the runtime shutdown? - fn unhandled_panic(&self) { - // By default, do nothing. This maintains the 1.0 behavior. - } -} - -cfg_rt! { - /// This is the constructor for a new task. Three references to the task are - /// created. The first task reference is usually put into an OwnedTasks - /// immediately. The Notified is sent to the scheduler as an ordinary - /// notification. - fn new_task( - task: T, - scheduler: S, - id: Id, - ) -> (Task, Notified, JoinHandle) - where - S: Schedule, - T: Future + 'static, - T::Output: 'static, - { - let raw = RawTask::new::(task, scheduler, id); - let task = Task { - raw, - _p: PhantomData, - }; - let notified = Notified(Task { - raw, - _p: PhantomData, - }); - let join = JoinHandle::new(raw); - - (task, notified, join) - } - - /// Creates a new task with an associated join handle. This method is used - /// only when the task is not going to be stored in an `OwnedTasks` list. - /// - /// Currently only blocking tasks use this method. - pub(crate) fn unowned(task: T, scheduler: S, id: Id) -> (UnownedTask, JoinHandle) - where - S: Schedule, - T: Send + Future + 'static, - T::Output: Send + 'static, - { - let (task, notified, join) = new_task(task, scheduler, id); - - // This transfers the ref-count of task and notified into an UnownedTask. - // This is valid because an UnownedTask holds two ref-counts. - let unowned = UnownedTask { - raw: task.raw, - _p: PhantomData, - }; - std::mem::forget(task); - std::mem::forget(notified); - - (unowned, join) - } -} - -impl Task { - unsafe fn new(raw: RawTask) -> Task { - Task { - raw, - _p: PhantomData, - } - } - - unsafe fn from_raw(ptr: NonNull
) -> Task { - Task::new(RawTask::from_raw(ptr)) - } - - #[cfg(all( - tokio_unstable, - tokio_taskdump, - feature = "rt", - target_os = "linux", - any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") - ))] - pub(super) fn as_raw(&self) -> RawTask { - self.raw - } - - fn header(&self) -> &Header { - self.raw.header() - } - - fn header_ptr(&self) -> NonNull
{ - self.raw.header_ptr() - } - - cfg_taskdump! { - pub(super) fn notify_for_tracing(&self) -> Notified { - self.as_raw().state().transition_to_notified_for_tracing(); - // SAFETY: `transition_to_notified_for_tracing` increments the refcount. - unsafe { Notified(Task::new(self.raw)) } - } - } -} - -impl Notified { - fn header(&self) -> &Header { - self.0.header() - } -} - -impl Notified { - pub(crate) unsafe fn from_raw(ptr: RawTask) -> Notified { - Notified(Task::new(ptr)) - } -} - -impl Notified { - pub(crate) fn into_raw(self) -> RawTask { - let raw = self.0.raw; - mem::forget(self); - raw - } -} - -impl Task { - /// Preemptively cancels the task as part of the shutdown process. - pub(crate) fn shutdown(self) { - let raw = self.raw; - mem::forget(self); - raw.shutdown(); - } -} - -impl LocalNotified { - /// Runs the task. - pub(crate) fn run(self) { - let raw = self.task.raw; - mem::forget(self); - raw.poll(); - } -} - -impl UnownedTask { - // Used in test of the inject queue. - #[cfg(test)] - #[cfg_attr(target_family = "wasm", allow(dead_code))] - pub(super) fn into_notified(self) -> Notified { - Notified(self.into_task()) - } - - fn into_task(self) -> Task { - // Convert into a task. - let task = Task { - raw: self.raw, - _p: PhantomData, - }; - mem::forget(self); - - // Drop a ref-count since an UnownedTask holds two. - task.header().state.ref_dec(); - - task - } - - pub(crate) fn run(self) { - let raw = self.raw; - mem::forget(self); - - // Transfer one ref-count to a Task object. - let task = Task:: { - raw, - _p: PhantomData, - }; - - // Use the other ref-count to poll the task. - raw.poll(); - // Decrement our extra ref-count - drop(task); - } - - pub(crate) fn shutdown(self) { - self.into_task().shutdown() - } -} - -impl Drop for Task { - fn drop(&mut self) { - // Decrement the ref count - if self.header().state.ref_dec() { - // Deallocate if this is the final ref count - self.raw.dealloc(); - } - } -} - -impl Drop for UnownedTask { - fn drop(&mut self) { - // Decrement the ref count - if self.raw.header().state.ref_dec_twice() { - // Deallocate if this is the final ref count - self.raw.dealloc(); - } - } -} - -impl fmt::Debug for Task { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "Task({:p})", self.header()) - } -} - -impl fmt::Debug for Notified { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "task::Notified({:p})", self.0.header()) - } -} - -/// # Safety -/// -/// Tasks are pinned. -unsafe impl linked_list::Link for Task { - type Handle = Task; - type Target = Header; - - fn as_raw(handle: &Task) -> NonNull
{ - handle.raw.header_ptr() - } - - unsafe fn from_raw(ptr: NonNull
) -> Task { - Task::from_raw(ptr) - } - - unsafe fn pointers(target: NonNull
) -> NonNull> { - self::core::Trailer::addr_of_owned(Header::get_trailer(target)) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/raw.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/raw.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/raw.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/raw.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,317 +0,0 @@ -use crate::future::Future; -use crate::runtime::task::core::{Core, Trailer}; -use crate::runtime::task::{Cell, Harness, Header, Id, Schedule, State}; - -use std::ptr::NonNull; -use std::task::{Poll, Waker}; - -/// Raw task handle -pub(crate) struct RawTask { - ptr: NonNull
, -} - -pub(super) struct Vtable { - /// Polls the future. - pub(super) poll: unsafe fn(NonNull
), - - /// Schedules the task for execution on the runtime. - pub(super) schedule: unsafe fn(NonNull
), - - /// Deallocates the memory. - pub(super) dealloc: unsafe fn(NonNull
), - - /// Reads the task output, if complete. - pub(super) try_read_output: unsafe fn(NonNull
, *mut (), &Waker), - - /// The join handle has been dropped. - pub(super) drop_join_handle_slow: unsafe fn(NonNull
), - - /// An abort handle has been dropped. - pub(super) drop_abort_handle: unsafe fn(NonNull
), - - /// Scheduler is being shutdown. - pub(super) shutdown: unsafe fn(NonNull
), - - /// The number of bytes that the `trailer` field is offset from the header. - pub(super) trailer_offset: usize, - - /// The number of bytes that the `scheduler` field is offset from the header. - pub(super) scheduler_offset: usize, - - /// The number of bytes that the `id` field is offset from the header. - pub(super) id_offset: usize, -} - -/// Get the vtable for the requested `T` and `S` generics. -pub(super) fn vtable() -> &'static Vtable { - &Vtable { - poll: poll::, - schedule: schedule::, - dealloc: dealloc::, - try_read_output: try_read_output::, - drop_join_handle_slow: drop_join_handle_slow::, - drop_abort_handle: drop_abort_handle::, - shutdown: shutdown::, - trailer_offset: OffsetHelper::::TRAILER_OFFSET, - scheduler_offset: OffsetHelper::::SCHEDULER_OFFSET, - id_offset: OffsetHelper::::ID_OFFSET, - } -} - -/// Calling `get_trailer_offset` directly in vtable doesn't work because it -/// prevents the vtable from being promoted to a static reference. -/// -/// See this thread for more info: -/// -struct OffsetHelper(T, S); -impl OffsetHelper { - // Pass `size_of`/`align_of` as arguments rather than calling them directly - // inside `get_trailer_offset` because trait bounds on generic parameters - // of const fn are unstable on our MSRV. - const TRAILER_OFFSET: usize = get_trailer_offset( - std::mem::size_of::
(), - std::mem::size_of::>(), - std::mem::align_of::>(), - std::mem::align_of::(), - ); - - // The `scheduler` is the first field of `Core`, so it has the same - // offset as `Core`. - const SCHEDULER_OFFSET: usize = get_core_offset( - std::mem::size_of::
(), - std::mem::align_of::>(), - ); - - const ID_OFFSET: usize = get_id_offset( - std::mem::size_of::
(), - std::mem::align_of::>(), - std::mem::size_of::(), - std::mem::align_of::(), - ); -} - -/// Compute the offset of the `Trailer` field in `Cell` using the -/// `#[repr(C)]` algorithm. -/// -/// Pseudo-code for the `#[repr(C)]` algorithm can be found here: -/// -const fn get_trailer_offset( - header_size: usize, - core_size: usize, - core_align: usize, - trailer_align: usize, -) -> usize { - let mut offset = header_size; - - let core_misalign = offset % core_align; - if core_misalign > 0 { - offset += core_align - core_misalign; - } - offset += core_size; - - let trailer_misalign = offset % trailer_align; - if trailer_misalign > 0 { - offset += trailer_align - trailer_misalign; - } - - offset -} - -/// Compute the offset of the `Core` field in `Cell` using the -/// `#[repr(C)]` algorithm. -/// -/// Pseudo-code for the `#[repr(C)]` algorithm can be found here: -/// -const fn get_core_offset(header_size: usize, core_align: usize) -> usize { - let mut offset = header_size; - - let core_misalign = offset % core_align; - if core_misalign > 0 { - offset += core_align - core_misalign; - } - - offset -} - -/// Compute the offset of the `Id` field in `Cell` using the -/// `#[repr(C)]` algorithm. -/// -/// Pseudo-code for the `#[repr(C)]` algorithm can be found here: -/// -const fn get_id_offset( - header_size: usize, - core_align: usize, - scheduler_size: usize, - id_align: usize, -) -> usize { - let mut offset = get_core_offset(header_size, core_align); - offset += scheduler_size; - - let id_misalign = offset % id_align; - if id_misalign > 0 { - offset += id_align - id_misalign; - } - - offset -} - -impl RawTask { - pub(super) fn new(task: T, scheduler: S, id: Id) -> RawTask - where - T: Future, - S: Schedule, - { - let ptr = Box::into_raw(Cell::<_, S>::new(task, scheduler, State::new(), id)); - let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) }; - - RawTask { ptr } - } - - pub(super) unsafe fn from_raw(ptr: NonNull
) -> RawTask { - RawTask { ptr } - } - - pub(super) fn header_ptr(&self) -> NonNull
{ - self.ptr - } - - pub(super) fn trailer_ptr(&self) -> NonNull { - unsafe { Header::get_trailer(self.ptr) } - } - - /// Returns a reference to the task's header. - pub(super) fn header(&self) -> &Header { - unsafe { self.ptr.as_ref() } - } - - /// Returns a reference to the task's trailer. - pub(super) fn trailer(&self) -> &Trailer { - unsafe { &*self.trailer_ptr().as_ptr() } - } - - /// Returns a reference to the task's state. - pub(super) fn state(&self) -> &State { - &self.header().state - } - - /// Safety: mutual exclusion is required to call this function. - pub(crate) fn poll(self) { - let vtable = self.header().vtable; - unsafe { (vtable.poll)(self.ptr) } - } - - pub(super) fn schedule(self) { - let vtable = self.header().vtable; - unsafe { (vtable.schedule)(self.ptr) } - } - - pub(super) fn dealloc(self) { - let vtable = self.header().vtable; - unsafe { - (vtable.dealloc)(self.ptr); - } - } - - /// Safety: `dst` must be a `*mut Poll>` where `T` - /// is the future stored by the task. - pub(super) unsafe fn try_read_output(self, dst: *mut (), waker: &Waker) { - let vtable = self.header().vtable; - (vtable.try_read_output)(self.ptr, dst, waker); - } - - pub(super) fn drop_join_handle_slow(self) { - let vtable = self.header().vtable; - unsafe { (vtable.drop_join_handle_slow)(self.ptr) } - } - - pub(super) fn drop_abort_handle(self) { - let vtable = self.header().vtable; - unsafe { (vtable.drop_abort_handle)(self.ptr) } - } - - pub(super) fn shutdown(self) { - let vtable = self.header().vtable; - unsafe { (vtable.shutdown)(self.ptr) } - } - - /// Increment the task's reference count. - /// - /// Currently, this is used only when creating an `AbortHandle`. - pub(super) fn ref_inc(self) { - self.header().state.ref_inc(); - } - - /// Get the queue-next pointer - /// - /// This is for usage by the injection queue - /// - /// Safety: make sure only one queue uses this and access is synchronized. - pub(crate) unsafe fn get_queue_next(self) -> Option { - self.header() - .queue_next - .with(|ptr| *ptr) - .map(|p| RawTask::from_raw(p)) - } - - /// Sets the queue-next pointer - /// - /// This is for usage by the injection queue - /// - /// Safety: make sure only one queue uses this and access is synchronized. - pub(crate) unsafe fn set_queue_next(self, val: Option) { - self.header().set_next(val.map(|task| task.ptr)); - } -} - -impl Clone for RawTask { - fn clone(&self) -> Self { - RawTask { ptr: self.ptr } - } -} - -impl Copy for RawTask {} - -unsafe fn poll(ptr: NonNull
) { - let harness = Harness::::from_raw(ptr); - harness.poll(); -} - -unsafe fn schedule(ptr: NonNull
) { - use crate::runtime::task::{Notified, Task}; - - let scheduler = Header::get_scheduler::(ptr); - scheduler - .as_ref() - .schedule(Notified(Task::from_raw(ptr.cast()))); -} - -unsafe fn dealloc(ptr: NonNull
) { - let harness = Harness::::from_raw(ptr); - harness.dealloc(); -} - -unsafe fn try_read_output( - ptr: NonNull
, - dst: *mut (), - waker: &Waker, -) { - let out = &mut *(dst as *mut Poll>); - - let harness = Harness::::from_raw(ptr); - harness.try_read_output(out, waker); -} - -unsafe fn drop_join_handle_slow(ptr: NonNull
) { - let harness = Harness::::from_raw(ptr); - harness.drop_join_handle_slow() -} - -unsafe fn drop_abort_handle(ptr: NonNull
) { - let harness = Harness::::from_raw(ptr); - harness.drop_reference(); -} - -unsafe fn shutdown(ptr: NonNull
) { - let harness = Harness::::from_raw(ptr); - harness.shutdown() -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/state.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/state.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/state.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/state.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,611 +0,0 @@ -use crate::loom::sync::atomic::AtomicUsize; - -use std::fmt; -use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; -use std::usize; - -pub(super) struct State { - val: AtomicUsize, -} - -/// Current state value. -#[derive(Copy, Clone)] -pub(super) struct Snapshot(usize); - -type UpdateResult = Result; - -/// The task is currently being run. -const RUNNING: usize = 0b0001; - -/// The task is complete. -/// -/// Once this bit is set, it is never unset. -const COMPLETE: usize = 0b0010; - -/// Extracts the task's lifecycle value from the state. -const LIFECYCLE_MASK: usize = 0b11; - -/// Flag tracking if the task has been pushed into a run queue. -const NOTIFIED: usize = 0b100; - -/// The join handle is still around. -#[allow(clippy::unusual_byte_groupings)] // https://github.com/rust-lang/rust-clippy/issues/6556 -const JOIN_INTEREST: usize = 0b1_000; - -/// A join handle waker has been set. -#[allow(clippy::unusual_byte_groupings)] // https://github.com/rust-lang/rust-clippy/issues/6556 -const JOIN_WAKER: usize = 0b10_000; - -/// The task has been forcibly cancelled. -#[allow(clippy::unusual_byte_groupings)] // https://github.com/rust-lang/rust-clippy/issues/6556 -const CANCELLED: usize = 0b100_000; - -/// All bits. -const STATE_MASK: usize = LIFECYCLE_MASK | NOTIFIED | JOIN_INTEREST | JOIN_WAKER | CANCELLED; - -/// Bits used by the ref count portion of the state. -const REF_COUNT_MASK: usize = !STATE_MASK; - -/// Number of positions to shift the ref count. -const REF_COUNT_SHIFT: usize = REF_COUNT_MASK.count_zeros() as usize; - -/// One ref count. -const REF_ONE: usize = 1 << REF_COUNT_SHIFT; - -/// State a task is initialized with. -/// -/// A task is initialized with three references: -/// -/// * A reference that will be stored in an OwnedTasks or LocalOwnedTasks. -/// * A reference that will be sent to the scheduler as an ordinary notification. -/// * A reference for the JoinHandle. -/// -/// As the task starts with a `JoinHandle`, `JOIN_INTEREST` is set. -/// As the task starts with a `Notified`, `NOTIFIED` is set. -const INITIAL_STATE: usize = (REF_ONE * 3) | JOIN_INTEREST | NOTIFIED; - -#[must_use] -pub(super) enum TransitionToRunning { - Success, - Cancelled, - Failed, - Dealloc, -} - -#[must_use] -pub(super) enum TransitionToIdle { - Ok, - OkNotified, - OkDealloc, - Cancelled, -} - -#[must_use] -pub(super) enum TransitionToNotifiedByVal { - DoNothing, - Submit, - Dealloc, -} - -#[must_use] -pub(crate) enum TransitionToNotifiedByRef { - DoNothing, - Submit, -} - -/// All transitions are performed via RMW operations. This establishes an -/// unambiguous modification order. -impl State { - /// Returns a task's initial state. - pub(super) fn new() -> State { - // The raw task returned by this method has a ref-count of three. See - // the comment on INITIAL_STATE for more. - State { - val: AtomicUsize::new(INITIAL_STATE), - } - } - - /// Loads the current state, establishes `Acquire` ordering. - pub(super) fn load(&self) -> Snapshot { - Snapshot(self.val.load(Acquire)) - } - - /// Attempts to transition the lifecycle to `Running`. This sets the - /// notified bit to false so notifications during the poll can be detected. - pub(super) fn transition_to_running(&self) -> TransitionToRunning { - self.fetch_update_action(|mut next| { - let action; - assert!(next.is_notified()); - - if !next.is_idle() { - // This happens if the task is either currently running or if it - // has already completed, e.g. if it was cancelled during - // shutdown. Consume the ref-count and return. - next.ref_dec(); - if next.ref_count() == 0 { - action = TransitionToRunning::Dealloc; - } else { - action = TransitionToRunning::Failed; - } - } else { - // We are able to lock the RUNNING bit. - next.set_running(); - next.unset_notified(); - - if next.is_cancelled() { - action = TransitionToRunning::Cancelled; - } else { - action = TransitionToRunning::Success; - } - } - (action, Some(next)) - }) - } - - /// Transitions the task from `Running` -> `Idle`. - /// - /// Returns `true` if the transition to `Idle` is successful, `false` otherwise. - /// The transition to `Idle` fails if the task has been flagged to be - /// cancelled. - pub(super) fn transition_to_idle(&self) -> TransitionToIdle { - self.fetch_update_action(|curr| { - assert!(curr.is_running()); - - if curr.is_cancelled() { - return (TransitionToIdle::Cancelled, None); - } - - let mut next = curr; - let action; - next.unset_running(); - - if !next.is_notified() { - // Polling the future consumes the ref-count of the Notified. - next.ref_dec(); - if next.ref_count() == 0 { - action = TransitionToIdle::OkDealloc; - } else { - action = TransitionToIdle::Ok; - } - } else { - // The caller will schedule a new notification, so we create a - // new ref-count for the notification. Our own ref-count is kept - // for now, and the caller will drop it shortly. - next.ref_inc(); - action = TransitionToIdle::OkNotified; - } - - (action, Some(next)) - }) - } - - /// Transitions the task from `Running` -> `Complete`. - pub(super) fn transition_to_complete(&self) -> Snapshot { - const DELTA: usize = RUNNING | COMPLETE; - - let prev = Snapshot(self.val.fetch_xor(DELTA, AcqRel)); - assert!(prev.is_running()); - assert!(!prev.is_complete()); - - Snapshot(prev.0 ^ DELTA) - } - - /// Transitions from `Complete` -> `Terminal`, decrementing the reference - /// count the specified number of times. - /// - /// Returns true if the task should be deallocated. - pub(super) fn transition_to_terminal(&self, count: usize) -> bool { - let prev = Snapshot(self.val.fetch_sub(count * REF_ONE, AcqRel)); - assert!( - prev.ref_count() >= count, - "current: {}, sub: {}", - prev.ref_count(), - count - ); - prev.ref_count() == count - } - - /// Transitions the state to `NOTIFIED`. - /// - /// If no task needs to be submitted, a ref-count is consumed. - /// - /// If a task needs to be submitted, the ref-count is incremented for the - /// new Notified. - pub(super) fn transition_to_notified_by_val(&self) -> TransitionToNotifiedByVal { - self.fetch_update_action(|mut snapshot| { - let action; - - if snapshot.is_running() { - // If the task is running, we mark it as notified, but we should - // not submit anything as the thread currently running the - // future is responsible for that. - snapshot.set_notified(); - snapshot.ref_dec(); - - // The thread that set the running bit also holds a ref-count. - assert!(snapshot.ref_count() > 0); - - action = TransitionToNotifiedByVal::DoNothing; - } else if snapshot.is_complete() || snapshot.is_notified() { - // We do not need to submit any notifications, but we have to - // decrement the ref-count. - snapshot.ref_dec(); - - if snapshot.ref_count() == 0 { - action = TransitionToNotifiedByVal::Dealloc; - } else { - action = TransitionToNotifiedByVal::DoNothing; - } - } else { - // We create a new notified that we can submit. The caller - // retains ownership of the ref-count they passed in. - snapshot.set_notified(); - snapshot.ref_inc(); - action = TransitionToNotifiedByVal::Submit; - } - - (action, Some(snapshot)) - }) - } - - /// Transitions the state to `NOTIFIED`. - pub(super) fn transition_to_notified_by_ref(&self) -> TransitionToNotifiedByRef { - self.fetch_update_action(|mut snapshot| { - if snapshot.is_complete() || snapshot.is_notified() { - // There is nothing to do in this case. - (TransitionToNotifiedByRef::DoNothing, None) - } else if snapshot.is_running() { - // If the task is running, we mark it as notified, but we should - // not submit as the thread currently running the future is - // responsible for that. - snapshot.set_notified(); - (TransitionToNotifiedByRef::DoNothing, Some(snapshot)) - } else { - // The task is idle and not notified. We should submit a - // notification. - snapshot.set_notified(); - snapshot.ref_inc(); - (TransitionToNotifiedByRef::Submit, Some(snapshot)) - } - }) - } - - /// Transitions the state to `NOTIFIED`, unconditionally increasing the ref count. - #[cfg(all( - tokio_unstable, - tokio_taskdump, - feature = "rt", - target_os = "linux", - any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") - ))] - pub(super) fn transition_to_notified_for_tracing(&self) { - self.fetch_update_action(|mut snapshot| { - snapshot.set_notified(); - snapshot.ref_inc(); - ((), Some(snapshot)) - }); - } - - /// Sets the cancelled bit and transitions the state to `NOTIFIED` if idle. - /// - /// Returns `true` if the task needs to be submitted to the pool for - /// execution. - pub(super) fn transition_to_notified_and_cancel(&self) -> bool { - self.fetch_update_action(|mut snapshot| { - if snapshot.is_cancelled() || snapshot.is_complete() { - // Aborts to completed or cancelled tasks are no-ops. - (false, None) - } else if snapshot.is_running() { - // If the task is running, we mark it as cancelled. The thread - // running the task will notice the cancelled bit when it - // stops polling and it will kill the task. - // - // The set_notified() call is not strictly necessary but it will - // in some cases let a wake_by_ref call return without having - // to perform a compare_exchange. - snapshot.set_notified(); - snapshot.set_cancelled(); - (false, Some(snapshot)) - } else { - // The task is idle. We set the cancelled and notified bits and - // submit a notification if the notified bit was not already - // set. - snapshot.set_cancelled(); - if !snapshot.is_notified() { - snapshot.set_notified(); - snapshot.ref_inc(); - (true, Some(snapshot)) - } else { - (false, Some(snapshot)) - } - } - }) - } - - /// Sets the `CANCELLED` bit and attempts to transition to `Running`. - /// - /// Returns `true` if the transition to `Running` succeeded. - pub(super) fn transition_to_shutdown(&self) -> bool { - let mut prev = Snapshot(0); - - let _ = self.fetch_update(|mut snapshot| { - prev = snapshot; - - if snapshot.is_idle() { - snapshot.set_running(); - } - - // If the task was not idle, the thread currently running the task - // will notice the cancelled bit and cancel it once the poll - // completes. - snapshot.set_cancelled(); - Some(snapshot) - }); - - prev.is_idle() - } - - /// Optimistically tries to swap the state assuming the join handle is - /// __immediately__ dropped on spawn. - pub(super) fn drop_join_handle_fast(&self) -> Result<(), ()> { - use std::sync::atomic::Ordering::Relaxed; - - // Relaxed is acceptable as if this function is called and succeeds, - // then nothing has been done w/ the join handle. - // - // The moment the join handle is used (polled), the `JOIN_WAKER` flag is - // set, at which point the CAS will fail. - // - // Given this, there is no risk if this operation is reordered. - self.val - .compare_exchange_weak( - INITIAL_STATE, - (INITIAL_STATE - REF_ONE) & !JOIN_INTEREST, - Release, - Relaxed, - ) - .map(|_| ()) - .map_err(|_| ()) - } - - /// Tries to unset the JOIN_INTEREST flag. - /// - /// Returns `Ok` if the operation happens before the task transitions to a - /// completed state, `Err` otherwise. - pub(super) fn unset_join_interested(&self) -> UpdateResult { - self.fetch_update(|curr| { - assert!(curr.is_join_interested()); - - if curr.is_complete() { - return None; - } - - let mut next = curr; - next.unset_join_interested(); - - Some(next) - }) - } - - /// Sets the `JOIN_WAKER` bit. - /// - /// Returns `Ok` if the bit is set, `Err` otherwise. This operation fails if - /// the task has completed. - pub(super) fn set_join_waker(&self) -> UpdateResult { - self.fetch_update(|curr| { - assert!(curr.is_join_interested()); - assert!(!curr.is_join_waker_set()); - - if curr.is_complete() { - return None; - } - - let mut next = curr; - next.set_join_waker(); - - Some(next) - }) - } - - /// Unsets the `JOIN_WAKER` bit. - /// - /// Returns `Ok` has been unset, `Err` otherwise. This operation fails if - /// the task has completed. - pub(super) fn unset_waker(&self) -> UpdateResult { - self.fetch_update(|curr| { - assert!(curr.is_join_interested()); - assert!(curr.is_join_waker_set()); - - if curr.is_complete() { - return None; - } - - let mut next = curr; - next.unset_join_waker(); - - Some(next) - }) - } - - pub(super) fn ref_inc(&self) { - use std::process; - use std::sync::atomic::Ordering::Relaxed; - - // Using a relaxed ordering is alright here, as knowledge of the - // original reference prevents other threads from erroneously deleting - // the object. - // - // As explained in the [Boost documentation][1], Increasing the - // reference counter can always be done with memory_order_relaxed: New - // references to an object can only be formed from an existing - // reference, and passing an existing reference from one thread to - // another must already provide any required synchronization. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - let prev = self.val.fetch_add(REF_ONE, Relaxed); - - // If the reference count overflowed, abort. - if prev > isize::MAX as usize { - process::abort(); - } - } - - /// Returns `true` if the task should be released. - pub(super) fn ref_dec(&self) -> bool { - let prev = Snapshot(self.val.fetch_sub(REF_ONE, AcqRel)); - assert!(prev.ref_count() >= 1); - prev.ref_count() == 1 - } - - /// Returns `true` if the task should be released. - pub(super) fn ref_dec_twice(&self) -> bool { - let prev = Snapshot(self.val.fetch_sub(2 * REF_ONE, AcqRel)); - assert!(prev.ref_count() >= 2); - prev.ref_count() == 2 - } - - fn fetch_update_action(&self, mut f: F) -> T - where - F: FnMut(Snapshot) -> (T, Option), - { - let mut curr = self.load(); - - loop { - let (output, next) = f(curr); - let next = match next { - Some(next) => next, - None => return output, - }; - - let res = self.val.compare_exchange(curr.0, next.0, AcqRel, Acquire); - - match res { - Ok(_) => return output, - Err(actual) => curr = Snapshot(actual), - } - } - } - - fn fetch_update(&self, mut f: F) -> Result - where - F: FnMut(Snapshot) -> Option, - { - let mut curr = self.load(); - - loop { - let next = match f(curr) { - Some(next) => next, - None => return Err(curr), - }; - - let res = self.val.compare_exchange(curr.0, next.0, AcqRel, Acquire); - - match res { - Ok(_) => return Ok(next), - Err(actual) => curr = Snapshot(actual), - } - } - } -} - -// ===== impl Snapshot ===== - -impl Snapshot { - /// Returns `true` if the task is in an idle state. - pub(super) fn is_idle(self) -> bool { - self.0 & (RUNNING | COMPLETE) == 0 - } - - /// Returns `true` if the task has been flagged as notified. - pub(super) fn is_notified(self) -> bool { - self.0 & NOTIFIED == NOTIFIED - } - - fn unset_notified(&mut self) { - self.0 &= !NOTIFIED - } - - fn set_notified(&mut self) { - self.0 |= NOTIFIED - } - - pub(super) fn is_running(self) -> bool { - self.0 & RUNNING == RUNNING - } - - fn set_running(&mut self) { - self.0 |= RUNNING; - } - - fn unset_running(&mut self) { - self.0 &= !RUNNING; - } - - pub(super) fn is_cancelled(self) -> bool { - self.0 & CANCELLED == CANCELLED - } - - fn set_cancelled(&mut self) { - self.0 |= CANCELLED; - } - - /// Returns `true` if the task's future has completed execution. - pub(super) fn is_complete(self) -> bool { - self.0 & COMPLETE == COMPLETE - } - - pub(super) fn is_join_interested(self) -> bool { - self.0 & JOIN_INTEREST == JOIN_INTEREST - } - - fn unset_join_interested(&mut self) { - self.0 &= !JOIN_INTEREST - } - - pub(super) fn is_join_waker_set(self) -> bool { - self.0 & JOIN_WAKER == JOIN_WAKER - } - - fn set_join_waker(&mut self) { - self.0 |= JOIN_WAKER; - } - - fn unset_join_waker(&mut self) { - self.0 &= !JOIN_WAKER - } - - pub(super) fn ref_count(self) -> usize { - (self.0 & REF_COUNT_MASK) >> REF_COUNT_SHIFT - } - - fn ref_inc(&mut self) { - assert!(self.0 <= isize::MAX as usize); - self.0 += REF_ONE; - } - - pub(super) fn ref_dec(&mut self) { - assert!(self.ref_count() > 0); - self.0 -= REF_ONE - } -} - -impl fmt::Debug for State { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let snapshot = self.load(); - snapshot.fmt(fmt) - } -} - -impl fmt::Debug for Snapshot { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Snapshot") - .field("is_running", &self.is_running()) - .field("is_complete", &self.is_complete()) - .field("is_notified", &self.is_notified()) - .field("is_cancelled", &self.is_cancelled()) - .field("is_join_interested", &self.is_join_interested()) - .field("is_join_waker_set", &self.is_join_waker_set()) - .field("ref_count", &self.ref_count()) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/trace/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/trace/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/trace/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/trace/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,345 +0,0 @@ -use crate::loom::sync::Arc; -use crate::runtime::context; -use crate::runtime::scheduler::{self, current_thread, Inject}; - -use backtrace::BacktraceFrame; -use std::cell::Cell; -use std::collections::VecDeque; -use std::ffi::c_void; -use std::fmt; -use std::future::Future; -use std::pin::Pin; -use std::ptr::{self, NonNull}; -use std::task::{self, Poll}; - -mod symbol; -mod tree; - -use symbol::Symbol; -use tree::Tree; - -use super::{Notified, OwnedTasks, Schedule}; - -type Backtrace = Vec; -type SymbolTrace = Vec; - -/// The ambiant backtracing context. -pub(crate) struct Context { - /// The address of [`Trace::root`] establishes an upper unwinding bound on - /// the backtraces in `Trace`. - active_frame: Cell>>, - /// The place to stash backtraces. - collector: Cell>, -} - -/// A [`Frame`] in an intrusive, doubly-linked tree of [`Frame`]s. -struct Frame { - /// The location associated with this frame. - inner_addr: *const c_void, - - /// The parent frame, if any. - parent: Option>, -} - -/// An tree execution trace. -/// -/// Traces are captured with [`Trace::capture`], rooted with [`Trace::root`] -/// and leaved with [`trace_leaf`]. -#[derive(Clone, Debug)] -pub(crate) struct Trace { - // The linear backtraces that comprise this trace. These linear traces can - // be re-knitted into a tree. - backtraces: Vec, -} - -pin_project_lite::pin_project! { - #[derive(Debug, Clone)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub(crate) struct Root { - #[pin] - future: T, - } -} - -const FAIL_NO_THREAD_LOCAL: &str = "The Tokio thread-local has been destroyed \ - as part of shutting down the current \ - thread, so collecting a taskdump is not \ - possible."; - -impl Context { - pub(crate) const fn new() -> Self { - Context { - active_frame: Cell::new(None), - collector: Cell::new(None), - } - } - - /// SAFETY: Callers of this function must ensure that trace frames always - /// form a valid linked list. - unsafe fn try_with_current(f: F) -> Option - where - F: FnOnce(&Self) -> R, - { - crate::runtime::context::with_trace(f) - } - - unsafe fn with_current_frame(f: F) -> R - where - F: FnOnce(&Cell>>) -> R, - { - Self::try_with_current(|context| f(&context.active_frame)).expect(FAIL_NO_THREAD_LOCAL) - } - - fn with_current_collector(f: F) -> R - where - F: FnOnce(&Cell>) -> R, - { - // SAFETY: This call can only access the collector field, so it cannot - // break the trace frame linked list. - unsafe { - Self::try_with_current(|context| f(&context.collector)).expect(FAIL_NO_THREAD_LOCAL) - } - } - - /// Produces `true` if the current task is being traced; otherwise false. - pub(crate) fn is_tracing() -> bool { - Self::with_current_collector(|maybe_collector| { - let collector = maybe_collector.take(); - let result = collector.is_some(); - maybe_collector.set(collector); - result - }) - } -} - -impl Trace { - /// Invokes `f`, returning both its result and the collection of backtraces - /// captured at each sub-invocation of [`trace_leaf`]. - #[inline(never)] - pub(crate) fn capture(f: F) -> (R, Trace) - where - F: FnOnce() -> R, - { - let collector = Trace { backtraces: vec![] }; - - let previous = Context::with_current_collector(|current| current.replace(Some(collector))); - - let result = f(); - - let collector = - Context::with_current_collector(|current| current.replace(previous)).unwrap(); - - (result, collector) - } - - /// The root of a trace. - #[inline(never)] - pub(crate) fn root(future: F) -> Root { - Root { future } - } -} - -/// If this is a sub-invocation of [`Trace::capture`], capture a backtrace. -/// -/// The captured backtrace will be returned by [`Trace::capture`]. -/// -/// Invoking this function does nothing when it is not a sub-invocation -/// [`Trace::capture`]. -// This function is marked `#[inline(never)]` to ensure that it gets a distinct `Frame` in the -// backtrace, below which frames should not be included in the backtrace (since they reflect the -// internal implementation details of this crate). -#[inline(never)] -pub(crate) fn trace_leaf(cx: &mut task::Context<'_>) -> Poll<()> { - // Safety: We don't manipulate the current context's active frame. - let did_trace = unsafe { - Context::try_with_current(|context_cell| { - if let Some(mut collector) = context_cell.collector.take() { - let mut frames = vec![]; - let mut above_leaf = false; - - if let Some(active_frame) = context_cell.active_frame.get() { - let active_frame = active_frame.as_ref(); - - backtrace::trace(|frame| { - let below_root = !ptr::eq(frame.symbol_address(), active_frame.inner_addr); - - // only capture frames above `Trace::leaf` and below - // `Trace::root`. - if above_leaf && below_root { - frames.push(frame.to_owned().into()); - } - - if ptr::eq(frame.symbol_address(), trace_leaf as *const _) { - above_leaf = true; - } - - // only continue unwinding if we're below `Trace::root` - below_root - }); - } - collector.backtraces.push(frames); - context_cell.collector.set(Some(collector)); - true - } else { - false - } - }) - .unwrap_or(false) - }; - - if did_trace { - // Use the same logic that `yield_now` uses to send out wakeups after - // the task yields. - context::with_scheduler(|scheduler| { - if let Some(scheduler) = scheduler { - match scheduler { - scheduler::Context::CurrentThread(s) => s.defer.defer(cx.waker()), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] - scheduler::Context::MultiThread(s) => s.defer.defer(cx.waker()), - #[cfg(all( - tokio_unstable, - feature = "rt-multi-thread", - not(target_os = "wasi") - ))] - scheduler::Context::MultiThreadAlt(_) => unimplemented!(), - } - } - }); - - Poll::Pending - } else { - Poll::Ready(()) - } -} - -impl fmt::Display for Trace { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - Tree::from_trace(self.clone()).fmt(f) - } -} - -fn defer R, R>(f: F) -> impl Drop { - use std::mem::ManuallyDrop; - - struct Defer R, R>(ManuallyDrop); - - impl R, R> Drop for Defer { - #[inline(always)] - fn drop(&mut self) { - unsafe { - ManuallyDrop::take(&mut self.0)(); - } - } - } - - Defer(ManuallyDrop::new(f)) -} - -impl Future for Root { - type Output = T::Output; - - #[inline(never)] - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - // SAFETY: The context's current frame is restored to its original state - // before `frame` is dropped. - unsafe { - let mut frame = Frame { - inner_addr: Self::poll as *const c_void, - parent: None, - }; - - Context::with_current_frame(|current| { - frame.parent = current.take(); - current.set(Some(NonNull::from(&frame))); - }); - - let _restore = defer(|| { - Context::with_current_frame(|current| { - current.set(frame.parent); - }); - }); - - let this = self.project(); - this.future.poll(cx) - } - } -} - -/// Trace and poll all tasks of the current_thread runtime. -pub(in crate::runtime) fn trace_current_thread( - owned: &OwnedTasks>, - local: &mut VecDeque>>, - injection: &Inject>, -) -> Vec { - // clear the local and injection queues - local.clear(); - - while let Some(task) = injection.pop() { - drop(task); - } - - // precondition: We have drained the tasks from the injection queue. - trace_owned(owned) -} - -cfg_rt_multi_thread! { - use crate::loom::sync::Mutex; - use crate::runtime::scheduler::multi_thread; - use crate::runtime::scheduler::multi_thread::Synced; - use crate::runtime::scheduler::inject::Shared; - - /// Trace and poll all tasks of the current_thread runtime. - /// - /// ## Safety - /// - /// Must be called with the same `synced` that `injection` was created with. - pub(in crate::runtime) unsafe fn trace_multi_thread( - owned: &OwnedTasks>, - local: &mut multi_thread::queue::Local>, - synced: &Mutex, - injection: &Shared>, - ) -> Vec { - // clear the local queue - while let Some(notified) = local.pop() { - drop(notified); - } - - // clear the injection queue - let mut synced = synced.lock(); - while let Some(notified) = injection.pop(&mut synced.inject) { - drop(notified); - } - - drop(synced); - - // precondition: we have drained the tasks from the local and injection - // queues. - trace_owned(owned) - } -} - -/// Trace the `OwnedTasks`. -/// -/// # Preconditions -/// -/// This helper presumes exclusive access to each task. The tasks must not exist -/// in any other queue. -fn trace_owned(owned: &OwnedTasks) -> Vec { - // notify each task - let mut tasks = vec![]; - owned.for_each(|task| { - // notify the task (and thus make it poll-able) and stash it - tasks.push(task.notify_for_tracing()); - // we do not poll it here since we hold a lock on `owned` and the task - // may complete and need to remove itself from `owned`. - }); - - tasks - .into_iter() - .map(|task| { - let local_notified = owned.assert_owner(task); - let ((), trace) = Trace::capture(|| local_notified.run()); - trace - }) - .collect() -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/trace/symbol.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/trace/symbol.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/trace/symbol.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/trace/symbol.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,92 +0,0 @@ -use backtrace::BacktraceSymbol; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::ptr; - -/// A symbol in a backtrace. -/// -/// This wrapper type serves two purposes. The first is that it provides a -/// representation of a symbol that can be inserted into hashmaps and hashsets; -/// the [`backtrace`] crate does not define [`Hash`], [`PartialEq`], or [`Eq`] -/// on [`BacktraceSymbol`], and recommends that users define their own wrapper -/// which implements these traits. -/// -/// Second, this wrapper includes a `parent_hash` field that uniquely -/// identifies this symbol's position in its trace. Otherwise, e.g., our code -/// would not be able to distinguish between recursive calls of a function at -/// different depths. -#[derive(Clone)] -pub(super) struct Symbol { - pub(super) symbol: BacktraceSymbol, - pub(super) parent_hash: u64, -} - -impl Hash for Symbol { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - if let Some(name) = self.symbol.name() { - name.as_bytes().hash(state); - } - - if let Some(addr) = self.symbol.addr() { - ptr::hash(addr, state); - } - - self.symbol.filename().hash(state); - self.symbol.lineno().hash(state); - self.symbol.colno().hash(state); - self.parent_hash.hash(state); - } -} - -impl PartialEq for Symbol { - fn eq(&self, other: &Self) -> bool { - (self.parent_hash == other.parent_hash) - && match (self.symbol.name(), other.symbol.name()) { - (None, None) => true, - (Some(lhs_name), Some(rhs_name)) => lhs_name.as_bytes() == rhs_name.as_bytes(), - _ => false, - } - && match (self.symbol.addr(), other.symbol.addr()) { - (None, None) => true, - (Some(lhs_addr), Some(rhs_addr)) => ptr::eq(lhs_addr, rhs_addr), - _ => false, - } - && (self.symbol.filename() == other.symbol.filename()) - && (self.symbol.lineno() == other.symbol.lineno()) - && (self.symbol.colno() == other.symbol.colno()) - } -} - -impl Eq for Symbol {} - -impl fmt::Display for Symbol { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(name) = self.symbol.name() { - let name = name.to_string(); - let name = if let Some((name, _)) = name.rsplit_once("::") { - name - } else { - &name - }; - fmt::Display::fmt(&name, f)?; - } - - if let Some(filename) = self.symbol.filename() { - f.write_str(" at ")?; - filename.to_string_lossy().fmt(f)?; - if let Some(lineno) = self.symbol.lineno() { - f.write_str(":")?; - fmt::Display::fmt(&lineno, f)?; - if let Some(colno) = self.symbol.colno() { - f.write_str(":")?; - fmt::Display::fmt(&colno, f)?; - } - } - } - - Ok(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/trace/tree.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/trace/tree.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/trace/tree.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/trace/tree.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,126 +0,0 @@ -use std::collections::{hash_map::DefaultHasher, HashMap, HashSet}; -use std::fmt; -use std::hash::{Hash, Hasher}; - -use super::{Backtrace, Symbol, SymbolTrace, Trace}; - -/// An adjacency list representation of an execution tree. -/// -/// This tree provides a convenient intermediate representation for formatting -/// [`Trace`] as a tree. -pub(super) struct Tree { - /// The roots of the trees. - /// - /// There should only be one root, but the code is robust to multiple roots. - roots: HashSet, - - /// The adjacency list of symbols in the execution tree(s). - edges: HashMap>, -} - -impl Tree { - /// Constructs a [`Tree`] from [`Trace`] - pub(super) fn from_trace(trace: Trace) -> Self { - let mut roots: HashSet = HashSet::default(); - let mut edges: HashMap> = HashMap::default(); - - for trace in trace.backtraces { - let trace = to_symboltrace(trace); - - if let Some(first) = trace.first() { - roots.insert(first.to_owned()); - } - - let mut trace = trace.into_iter().peekable(); - while let Some(frame) = trace.next() { - let subframes = edges.entry(frame).or_default(); - if let Some(subframe) = trace.peek() { - subframes.insert(subframe.clone()); - } - } - } - - Tree { roots, edges } - } - - /// Produces the sub-symbols of a given symbol. - fn consequences(&self, frame: &Symbol) -> Option> { - Some(self.edges.get(frame)?.iter()) - } - - /// Format this [`Tree`] as a textual tree. - fn display( - &self, - f: &mut W, - root: &Symbol, - is_last: bool, - prefix: &str, - ) -> fmt::Result { - let root_fmt = format!("{}", root); - - let current; - let next; - - if is_last { - current = format!("{prefix}└╼\u{a0}{root_fmt}"); - next = format!("{}\u{a0}\u{a0}\u{a0}", prefix); - } else { - current = format!("{prefix}├╼\u{a0}{root_fmt}"); - next = format!("{}│\u{a0}\u{a0}", prefix); - } - - write!(f, "{}", { - let mut current = current.chars(); - current.next().unwrap(); - current.next().unwrap(); - ¤t.as_str() - })?; - - if let Some(consequences) = self.consequences(root) { - let len = consequences.len(); - for (i, consequence) in consequences.enumerate() { - let is_last = i == len - 1; - writeln!(f)?; - self.display(f, consequence, is_last, &next)?; - } - } - - Ok(()) - } -} - -impl fmt::Display for Tree { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for root in &self.roots { - self.display(f, root, true, " ")?; - } - Ok(()) - } -} - -/// Resolve a sequence of [`backtrace::BacktraceFrame`]s into a sequence of -/// [`Symbol`]s. -fn to_symboltrace(backtrace: Backtrace) -> SymbolTrace { - // Resolve the backtrace frames to symbols. - let backtrace: Backtrace = { - let mut backtrace = backtrace::Backtrace::from(backtrace); - backtrace.resolve(); - backtrace.into() - }; - - // Accumulate the symbols in descending order into `symboltrace`. - let mut symboltrace: SymbolTrace = vec![]; - let mut state = DefaultHasher::new(); - for frame in backtrace.into_iter().rev() { - for symbol in frame.symbols().iter().rev() { - let symbol = Symbol { - symbol: symbol.clone(), - parent_hash: state.finish(), - }; - symbol.hash(&mut state); - symboltrace.push(symbol); - } - } - - symboltrace -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/waker.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/waker.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/task/waker.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/task/waker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,102 +0,0 @@ -use crate::runtime::task::{Header, RawTask, Schedule}; - -use std::marker::PhantomData; -use std::mem::ManuallyDrop; -use std::ops; -use std::ptr::NonNull; -use std::task::{RawWaker, RawWakerVTable, Waker}; - -pub(super) struct WakerRef<'a, S: 'static> { - waker: ManuallyDrop, - _p: PhantomData<(&'a Header, S)>, -} - -/// Returns a `WakerRef` which avoids having to preemptively increase the -/// refcount if there is no need to do so. -pub(super) fn waker_ref(header: &NonNull
) -> WakerRef<'_, S> -where - S: Schedule, -{ - // `Waker::will_wake` uses the VTABLE pointer as part of the check. This - // means that `will_wake` will always return false when using the current - // task's waker. (discussion at rust-lang/rust#66281). - // - // To fix this, we use a single vtable. Since we pass in a reference at this - // point and not an *owned* waker, we must ensure that `drop` is never - // called on this waker instance. This is done by wrapping it with - // `ManuallyDrop` and then never calling drop. - let waker = unsafe { ManuallyDrop::new(Waker::from_raw(raw_waker(*header))) }; - - WakerRef { - waker, - _p: PhantomData, - } -} - -impl ops::Deref for WakerRef<'_, S> { - type Target = Waker; - - fn deref(&self) -> &Waker { - &self.waker - } -} - -cfg_trace! { - macro_rules! trace { - ($header:expr, $op:expr) => { - if let Some(id) = Header::get_tracing_id(&$header) { - tracing::trace!( - target: "tokio::task::waker", - op = $op, - task.id = id.into_u64(), - ); - } - } - } -} - -cfg_not_trace! { - macro_rules! trace { - ($header:expr, $op:expr) => { - // noop - let _ = &$header; - } - } -} - -unsafe fn clone_waker(ptr: *const ()) -> RawWaker { - let header = NonNull::new_unchecked(ptr as *mut Header); - trace!(header, "waker.clone"); - header.as_ref().state.ref_inc(); - raw_waker(header) -} - -unsafe fn drop_waker(ptr: *const ()) { - let ptr = NonNull::new_unchecked(ptr as *mut Header); - trace!(ptr, "waker.drop"); - let raw = RawTask::from_raw(ptr); - raw.drop_reference(); -} - -unsafe fn wake_by_val(ptr: *const ()) { - let ptr = NonNull::new_unchecked(ptr as *mut Header); - trace!(ptr, "waker.wake"); - let raw = RawTask::from_raw(ptr); - raw.wake_by_val(); -} - -// Wake without consuming the waker -unsafe fn wake_by_ref(ptr: *const ()) { - let ptr = NonNull::new_unchecked(ptr as *mut Header); - trace!(ptr, "waker.wake_by_ref"); - let raw = RawTask::from_raw(ptr); - raw.wake_by_ref(); -} - -static WAKER_VTABLE: RawWakerVTable = - RawWakerVTable::new(clone_waker, wake_by_val, wake_by_ref, drop_waker); - -fn raw_waker(header: NonNull
) -> RawWaker { - let ptr = header.as_ptr() as *const (); - RawWaker::new(ptr, &WAKER_VTABLE) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/inject.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/inject.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/inject.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/inject.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,54 +0,0 @@ -use crate::runtime::scheduler::inject; - -#[test] -fn push_and_pop() { - const N: usize = 2; - - let (inject, mut synced) = inject::Shared::new(); - - for i in 0..N { - assert_eq!(inject.len(), i); - let (task, _) = super::unowned(async {}); - unsafe { inject.push(&mut synced, task) }; - } - - for i in 0..N { - assert_eq!(inject.len(), N - i); - assert!(unsafe { inject.pop(&mut synced) }.is_some()); - } - - println!("--------------"); - - assert!(unsafe { inject.pop(&mut synced) }.is_none()); -} - -#[test] -fn push_batch_and_pop() { - let (inject, mut inject_synced) = inject::Shared::new(); - - unsafe { - inject.push_batch( - &mut inject_synced, - (0..10).map(|_| super::unowned(async {}).0), - ); - - assert_eq!(5, inject.pop_n(&mut inject_synced, 5).count()); - assert_eq!(5, inject.pop_n(&mut inject_synced, 5).count()); - assert_eq!(0, inject.pop_n(&mut inject_synced, 5).count()); - } -} - -#[test] -fn pop_n_drains_on_drop() { - let (inject, mut inject_synced) = inject::Shared::new(); - - unsafe { - inject.push_batch( - &mut inject_synced, - (0..10).map(|_| super::unowned(async {}).0), - ); - let _ = inject.pop_n(&mut inject_synced, 10); - - assert_eq!(inject.len(), 0); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_blocking.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_blocking.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_blocking.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_blocking.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,102 +0,0 @@ -use crate::runtime::{self, Runtime}; - -use std::sync::Arc; - -#[test] -fn blocking_shutdown() { - loom::model(|| { - let v = Arc::new(()); - - let rt = mk_runtime(1); - { - let _enter = rt.enter(); - for _ in 0..2 { - let v = v.clone(); - crate::task::spawn_blocking(move || { - assert!(1 < Arc::strong_count(&v)); - }); - } - } - - drop(rt); - assert_eq!(1, Arc::strong_count(&v)); - }); -} - -#[test] -fn spawn_mandatory_blocking_should_always_run() { - use crate::runtime::tests::loom_oneshot; - loom::model(|| { - let rt = runtime::Builder::new_current_thread().build().unwrap(); - - let (tx, rx) = loom_oneshot::channel(); - let _enter = rt.enter(); - runtime::spawn_blocking(|| {}); - runtime::spawn_mandatory_blocking(move || { - let _ = tx.send(()); - }) - .unwrap(); - - drop(rt); - - // This call will deadlock if `spawn_mandatory_blocking` doesn't run. - let () = rx.recv(); - }); -} - -#[test] -fn spawn_mandatory_blocking_should_run_even_when_shutting_down_from_other_thread() { - use crate::runtime::tests::loom_oneshot; - loom::model(|| { - let rt = runtime::Builder::new_current_thread().build().unwrap(); - let handle = rt.handle().clone(); - - // Drop the runtime in a different thread - { - loom::thread::spawn(move || { - drop(rt); - }); - } - - let _enter = handle.enter(); - let (tx, rx) = loom_oneshot::channel(); - let handle = runtime::spawn_mandatory_blocking(move || { - let _ = tx.send(()); - }); - - // handle.is_some() means that `spawn_mandatory_blocking` - // promised us to run the blocking task - if handle.is_some() { - // This call will deadlock if `spawn_mandatory_blocking` doesn't run. - let () = rx.recv(); - } - }); -} - -#[test] -fn spawn_blocking_when_paused() { - use std::time::Duration; - loom::model(|| { - let rt = crate::runtime::Builder::new_current_thread() - .enable_time() - .start_paused(true) - .build() - .unwrap(); - let handle = rt.handle(); - let _enter = handle.enter(); - let a = crate::task::spawn_blocking(|| {}); - let b = crate::task::spawn_blocking(|| {}); - rt.block_on(crate::time::timeout(Duration::from_millis(1), async move { - a.await.expect("blocking task should finish"); - b.await.expect("blocking task should finish"); - })) - .expect("timeout should not trigger"); - }); -} - -fn mk_runtime(num_threads: usize) -> Runtime { - runtime::Builder::new_multi_thread() - .worker_threads(num_threads) - .build() - .unwrap() -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_current_thread/yield_now.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_current_thread/yield_now.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_current_thread/yield_now.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_current_thread/yield_now.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ -use crate::runtime::park; -use crate::runtime::tests::loom_oneshot as oneshot; -use crate::runtime::{self, Runtime}; - -#[test] -fn yield_calls_park_before_scheduling_again() { - // Don't need to check all permutations - let mut loom = loom::model::Builder::default(); - loom.max_permutations = Some(1); - loom.check(|| { - let rt = mk_runtime(2); - let (tx, rx) = oneshot::channel::<()>(); - - rt.spawn(async { - let tid = loom::thread::current().id(); - let park_count = park::current_thread_park_count(); - - crate::task::yield_now().await; - - if tid == loom::thread::current().id() { - let new_park_count = park::current_thread_park_count(); - assert_eq!(park_count + 1, new_park_count); - } - - tx.send(()); - }); - - rx.recv(); - }); -} - -fn mk_runtime(num_threads: usize) -> Runtime { - runtime::Builder::new_multi_thread() - .worker_threads(num_threads) - .build() - .unwrap() -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_current_thread.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_current_thread.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_current_thread.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_current_thread.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,144 +0,0 @@ -mod yield_now; - -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::Arc; -use crate::loom::thread; -use crate::runtime::{Builder, Runtime}; -use crate::sync::oneshot::{self, Receiver}; -use crate::task; -use std::future::Future; -use std::pin::Pin; -use std::sync::atomic::Ordering::{Acquire, Release}; -use std::task::{Context, Poll}; - -fn assert_at_most_num_polls(rt: Arc, at_most_polls: usize) { - let (tx, rx) = oneshot::channel(); - let num_polls = Arc::new(AtomicUsize::new(0)); - rt.spawn(async move { - for _ in 0..12 { - task::yield_now().await; - } - tx.send(()).unwrap(); - }); - - rt.block_on(async { - BlockedFuture { - rx, - num_polls: num_polls.clone(), - } - .await; - }); - - let polls = num_polls.load(Acquire); - assert!(polls <= at_most_polls); -} - -#[test] -fn block_on_num_polls() { - loom::model(|| { - // we expect at most 4 number of polls because there are three points at - // which we poll the future and an opportunity for a false-positive.. At - // any of these points it can be ready: - // - // - when we fail to steal the parker and we block on a notification - // that it is available. - // - // - when we steal the parker and we schedule the future - // - // - when the future is woken up and we have ran the max number of tasks - // for the current tick or there are no more tasks to run. - // - // - a thread is notified that the parker is available but a third - // thread acquires it before the notified thread can. - // - let at_most = 4; - - let rt1 = Arc::new(Builder::new_current_thread().build().unwrap()); - let rt2 = rt1.clone(); - let rt3 = rt1.clone(); - - let th1 = thread::spawn(move || assert_at_most_num_polls(rt1, at_most)); - let th2 = thread::spawn(move || assert_at_most_num_polls(rt2, at_most)); - let th3 = thread::spawn(move || assert_at_most_num_polls(rt3, at_most)); - - th1.join().unwrap(); - th2.join().unwrap(); - th3.join().unwrap(); - }); -} - -#[test] -fn assert_no_unnecessary_polls() { - loom::model(|| { - // // After we poll outer future, woken should reset to false - let rt = Builder::new_current_thread().build().unwrap(); - let (tx, rx) = oneshot::channel(); - let pending_cnt = Arc::new(AtomicUsize::new(0)); - - rt.spawn(async move { - for _ in 0..24 { - task::yield_now().await; - } - tx.send(()).unwrap(); - }); - - let pending_cnt_clone = pending_cnt.clone(); - rt.block_on(async move { - // use task::yield_now() to ensure woken set to true - // ResetFuture will be polled at most once - // Here comes two cases - // 1. recv no message from channel, ResetFuture will be polled - // but get Pending and we record ResetFuture.pending_cnt ++. - // Then when message arrive, ResetFuture returns Ready. So we - // expect ResetFuture.pending_cnt = 1 - // 2. recv message from channel, ResetFuture returns Ready immediately. - // We expect ResetFuture.pending_cnt = 0 - task::yield_now().await; - ResetFuture { - rx, - pending_cnt: pending_cnt_clone, - } - .await; - }); - - let pending_cnt = pending_cnt.load(Acquire); - assert!(pending_cnt <= 1); - }); -} - -struct BlockedFuture { - rx: Receiver<()>, - num_polls: Arc, -} - -impl Future for BlockedFuture { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.num_polls.fetch_add(1, Release); - - match Pin::new(&mut self.rx).poll(cx) { - Poll::Pending => Poll::Pending, - _ => Poll::Ready(()), - } - } -} - -struct ResetFuture { - rx: Receiver<()>, - pending_cnt: Arc, -} - -impl Future for ResetFuture { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match Pin::new(&mut self.rx).poll(cx) { - Poll::Pending => { - self.pending_cnt.fetch_add(1, Release); - Poll::Pending - } - _ => Poll::Ready(()), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_join_set.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_join_set.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_join_set.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_join_set.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,82 +0,0 @@ -use crate::runtime::Builder; -use crate::task::JoinSet; - -#[test] -fn test_join_set() { - loom::model(|| { - let rt = Builder::new_multi_thread() - .worker_threads(1) - .build() - .unwrap(); - let mut set = JoinSet::new(); - - rt.block_on(async { - assert_eq!(set.len(), 0); - set.spawn(async { () }); - assert_eq!(set.len(), 1); - set.spawn(async { () }); - assert_eq!(set.len(), 2); - let () = set.join_next().await.unwrap().unwrap(); - assert_eq!(set.len(), 1); - set.spawn(async { () }); - assert_eq!(set.len(), 2); - let () = set.join_next().await.unwrap().unwrap(); - assert_eq!(set.len(), 1); - let () = set.join_next().await.unwrap().unwrap(); - assert_eq!(set.len(), 0); - set.spawn(async { () }); - assert_eq!(set.len(), 1); - }); - - drop(set); - drop(rt); - }); -} - -#[test] -fn abort_all_during_completion() { - use std::sync::{ - atomic::{AtomicBool, Ordering::SeqCst}, - Arc, - }; - - // These booleans assert that at least one execution had the task complete first, and that at - // least one execution had the task be cancelled before it completed. - let complete_happened = Arc::new(AtomicBool::new(false)); - let cancel_happened = Arc::new(AtomicBool::new(false)); - - { - let complete_happened = complete_happened.clone(); - let cancel_happened = cancel_happened.clone(); - loom::model(move || { - let rt = Builder::new_multi_thread() - .worker_threads(1) - .build() - .unwrap(); - - let mut set = JoinSet::new(); - - rt.block_on(async { - set.spawn(async { () }); - set.abort_all(); - - match set.join_next().await { - Some(Ok(())) => complete_happened.store(true, SeqCst), - Some(Err(err)) if err.is_cancelled() => cancel_happened.store(true, SeqCst), - Some(Err(err)) => panic!("fail: {}", err), - None => { - unreachable!("Aborting the task does not remove it from the JoinSet.") - } - } - - assert!(matches!(set.join_next().await, None)); - }); - - drop(set); - drop(rt); - }); - } - - assert!(complete_happened.load(SeqCst)); - assert!(cancel_happened.load(SeqCst)); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_local.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_local.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_local.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_local.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,47 +0,0 @@ -use crate::runtime::tests::loom_oneshot as oneshot; -use crate::runtime::Builder; -use crate::task::LocalSet; - -use std::task::Poll; - -/// Waking a runtime will attempt to push a task into a queue of notifications -/// in the runtime, however the tasks in such a queue usually have a reference -/// to the runtime itself. This means that if they are not properly removed at -/// runtime shutdown, this will cause a memory leak. -/// -/// This test verifies that waking something during shutdown of a LocalSet does -/// not result in tasks lingering in the queue once shutdown is complete. This -/// is verified using loom's leak finder. -#[test] -fn wake_during_shutdown() { - loom::model(|| { - let rt = Builder::new_current_thread().build().unwrap(); - let ls = LocalSet::new(); - - let (send, recv) = oneshot::channel(); - - ls.spawn_local(async move { - let mut send = Some(send); - - let () = futures::future::poll_fn(|cx| { - if let Some(send) = send.take() { - send.send(cx.waker().clone()); - } - - Poll::Pending - }) - .await; - }); - - let handle = loom::thread::spawn(move || { - let waker = recv.recv(); - waker.wake(); - }); - - ls.block_on(&rt, crate::task::yield_now()); - - drop(ls); - handle.join().unwrap(); - drop(rt); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/queue.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/queue.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/queue.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/queue.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,205 +0,0 @@ -use crate::runtime::scheduler::multi_thread::{queue, Stats}; -use crate::runtime::tests::{unowned, NoopSchedule}; - -use loom::thread; -use std::cell::RefCell; - -fn new_stats() -> Stats { - Stats::new(&crate::runtime::WorkerMetrics::new()) -} - -#[test] -fn basic() { - loom::model(|| { - let (steal, mut local) = queue::local(); - let inject = RefCell::new(vec![]); - let mut stats = new_stats(); - - let th = thread::spawn(move || { - let mut stats = new_stats(); - let (_, mut local) = queue::local(); - let mut n = 0; - - for _ in 0..3 { - if steal.steal_into(&mut local, &mut stats).is_some() { - n += 1; - } - - while local.pop().is_some() { - n += 1; - } - } - - n - }); - - let mut n = 0; - - for _ in 0..2 { - for _ in 0..2 { - let (task, _) = unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - } - - if local.pop().is_some() { - n += 1; - } - - // Push another task - let (task, _) = unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - - while local.pop().is_some() { - n += 1; - } - } - - n += inject.borrow_mut().drain(..).count(); - - n += th.join().unwrap(); - - assert_eq!(6, n); - }); -} - -#[test] -fn steal_overflow() { - loom::model(|| { - let (steal, mut local) = queue::local(); - let inject = RefCell::new(vec![]); - let mut stats = new_stats(); - - let th = thread::spawn(move || { - let mut stats = new_stats(); - let (_, mut local) = queue::local(); - let mut n = 0; - - if steal.steal_into(&mut local, &mut stats).is_some() { - n += 1; - } - - while local.pop().is_some() { - n += 1; - } - - n - }); - - let mut n = 0; - - // push a task, pop a task - let (task, _) = unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - - if local.pop().is_some() { - n += 1; - } - - for _ in 0..6 { - let (task, _) = unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - } - - n += th.join().unwrap(); - - while local.pop().is_some() { - n += 1; - } - - n += inject.borrow_mut().drain(..).count(); - - assert_eq!(7, n); - }); -} - -#[test] -fn multi_stealer() { - const NUM_TASKS: usize = 5; - - fn steal_tasks(steal: queue::Steal) -> usize { - let mut stats = new_stats(); - let (_, mut local) = queue::local(); - - if steal.steal_into(&mut local, &mut stats).is_none() { - return 0; - } - - let mut n = 1; - - while local.pop().is_some() { - n += 1; - } - - n - } - - loom::model(|| { - let (steal, mut local) = queue::local(); - let inject = RefCell::new(vec![]); - let mut stats = new_stats(); - - // Push work - for _ in 0..NUM_TASKS { - let (task, _) = unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - } - - let th1 = { - let steal = steal.clone(); - thread::spawn(move || steal_tasks(steal)) - }; - - let th2 = thread::spawn(move || steal_tasks(steal)); - - let mut n = 0; - - while local.pop().is_some() { - n += 1; - } - - n += inject.borrow_mut().drain(..).count(); - - n += th1.join().unwrap(); - n += th2.join().unwrap(); - - assert_eq!(n, NUM_TASKS); - }); -} - -#[test] -fn chained_steal() { - loom::model(|| { - let mut stats = new_stats(); - let (s1, mut l1) = queue::local(); - let (s2, mut l2) = queue::local(); - let inject = RefCell::new(vec![]); - - // Load up some tasks - for _ in 0..4 { - let (task, _) = unowned(async {}); - l1.push_back_or_overflow(task, &inject, &mut stats); - - let (task, _) = unowned(async {}); - l2.push_back_or_overflow(task, &inject, &mut stats); - } - - // Spawn a task to steal from **our** queue - let th = thread::spawn(move || { - let mut stats = new_stats(); - let (_, mut local) = queue::local(); - s1.steal_into(&mut local, &mut stats); - - while local.pop().is_some() {} - }); - - // Drain our tasks, then attempt to steal - while l1.pop().is_some() {} - - s2.steal_into(&mut l1, &mut stats); - - th.join().unwrap(); - - while l1.pop().is_some() {} - while l2.pop().is_some() {} - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/shutdown.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/shutdown.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/shutdown.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/shutdown.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,28 +0,0 @@ -use crate::runtime::{Builder, Handle}; - -#[test] -fn join_handle_cancel_on_shutdown() { - let mut builder = loom::model::Builder::new(); - builder.preemption_bound = Some(2); - builder.check(|| { - use futures::future::FutureExt; - - let rt = Builder::new_multi_thread() - .worker_threads(2) - .build() - .unwrap(); - - let handle = rt.block_on(async move { Handle::current() }); - - let jh1 = handle.spawn(futures::future::pending::<()>()); - - drop(rt); - - let jh2 = handle.spawn(futures::future::pending::<()>()); - - let err1 = jh1.now_or_never().unwrap().unwrap_err(); - let err2 = jh2.now_or_never().unwrap().unwrap_err(); - assert!(err1.is_cancelled()); - assert!(err2.is_cancelled()); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/yield_now.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/yield_now.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/yield_now.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread/yield_now.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ -use crate::runtime::park; -use crate::runtime::tests::loom_oneshot as oneshot; -use crate::runtime::{self, Runtime}; - -#[test] -fn yield_calls_park_before_scheduling_again() { - // Don't need to check all permutations - let mut loom = loom::model::Builder::default(); - loom.max_permutations = Some(1); - loom.check(|| { - let rt = mk_runtime(2); - let (tx, rx) = oneshot::channel::<()>(); - - rt.spawn(async { - let tid = loom::thread::current().id(); - let park_count = park::current_thread_park_count(); - - crate::task::yield_now().await; - - if tid == loom::thread::current().id() { - let new_park_count = park::current_thread_park_count(); - assert_eq!(park_count + 1, new_park_count); - } - - tx.send(()); - }); - - rx.recv(); - }); -} - -fn mk_runtime(num_threads: usize) -> Runtime { - runtime::Builder::new_multi_thread() - .worker_threads(num_threads) - .build() - .unwrap() -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/queue.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/queue.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/queue.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/queue.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,205 +0,0 @@ -use crate::runtime::scheduler::multi_thread::{queue, Stats}; -use crate::runtime::tests::{unowned, NoopSchedule}; - -use loom::thread; -use std::cell::RefCell; - -fn new_stats() -> Stats { - Stats::new(&crate::runtime::WorkerMetrics::new()) -} - -#[test] -fn basic() { - loom::model(|| { - let (steal, mut local) = queue::local(); - let inject = RefCell::new(vec![]); - let mut stats = new_stats(); - - let th = thread::spawn(move || { - let mut stats = new_stats(); - let (_, mut local) = queue::local(); - let mut n = 0; - - for _ in 0..3 { - if steal.steal_into(&mut local, &mut stats).is_some() { - n += 1; - } - - while local.pop().is_some() { - n += 1; - } - } - - n - }); - - let mut n = 0; - - for _ in 0..2 { - for _ in 0..2 { - let (task, _) = unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - } - - if local.pop().is_some() { - n += 1; - } - - // Push another task - let (task, _) = unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - - while local.pop().is_some() { - n += 1; - } - } - - n += inject.borrow_mut().drain(..).count(); - - n += th.join().unwrap(); - - assert_eq!(6, n); - }); -} - -#[test] -fn steal_overflow() { - loom::model(|| { - let (steal, mut local) = queue::local(); - let inject = RefCell::new(vec![]); - let mut stats = new_stats(); - - let th = thread::spawn(move || { - let mut stats = new_stats(); - let (_, mut local) = queue::local(); - let mut n = 0; - - if steal.steal_into(&mut local, &mut stats).is_some() { - n += 1; - } - - while local.pop().is_some() { - n += 1; - } - - n - }); - - let mut n = 0; - - // push a task, pop a task - let (task, _) = unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - - if local.pop().is_some() { - n += 1; - } - - for _ in 0..6 { - let (task, _) = unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - } - - n += th.join().unwrap(); - - while local.pop().is_some() { - n += 1; - } - - n += inject.borrow_mut().drain(..).count(); - - assert_eq!(7, n); - }); -} - -#[test] -fn multi_stealer() { - const NUM_TASKS: usize = 5; - - fn steal_tasks(steal: queue::Steal) -> usize { - let mut stats = new_stats(); - let (_, mut local) = queue::local(); - - if steal.steal_into(&mut local, &mut stats).is_none() { - return 0; - } - - let mut n = 1; - - while local.pop().is_some() { - n += 1; - } - - n - } - - loom::model(|| { - let (steal, mut local) = queue::local(); - let inject = RefCell::new(vec![]); - let mut stats = new_stats(); - - // Push work - for _ in 0..NUM_TASKS { - let (task, _) = unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - } - - let th1 = { - let steal = steal.clone(); - thread::spawn(move || steal_tasks(steal)) - }; - - let th2 = thread::spawn(move || steal_tasks(steal)); - - let mut n = 0; - - while local.pop().is_some() { - n += 1; - } - - n += inject.borrow_mut().drain(..).count(); - - n += th1.join().unwrap(); - n += th2.join().unwrap(); - - assert_eq!(n, NUM_TASKS); - }); -} - -#[test] -fn chained_steal() { - loom::model(|| { - let mut stats = new_stats(); - let (s1, mut l1) = queue::local(); - let (s2, mut l2) = queue::local(); - let inject = RefCell::new(vec![]); - - // Load up some tasks - for _ in 0..4 { - let (task, _) = unowned(async {}); - l1.push_back_or_overflow(task, &inject, &mut stats); - - let (task, _) = unowned(async {}); - l2.push_back_or_overflow(task, &inject, &mut stats); - } - - // Spawn a task to steal from **our** queue - let th = thread::spawn(move || { - let mut stats = new_stats(); - let (_, mut local) = queue::local(); - s1.steal_into(&mut local, &mut stats); - - while local.pop().is_some() {} - }); - - // Drain our tasks, then attempt to steal - while l1.pop().is_some() {} - - s2.steal_into(&mut l1, &mut stats); - - th.join().unwrap(); - - while l1.pop().is_some() {} - while l2.pop().is_some() {} - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/shutdown.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/shutdown.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/shutdown.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/shutdown.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,28 +0,0 @@ -use crate::runtime::{Builder, Handle}; - -#[test] -fn join_handle_cancel_on_shutdown() { - let mut builder = loom::model::Builder::new(); - builder.preemption_bound = Some(2); - builder.check(|| { - use futures::future::FutureExt; - - let rt = Builder::new_multi_thread() - .worker_threads(2) - .build() - .unwrap(); - - let handle = rt.block_on(async move { Handle::current() }); - - let jh1 = handle.spawn(futures::future::pending::<()>()); - - drop(rt); - - let jh2 = handle.spawn(futures::future::pending::<()>()); - - let err1 = jh1.now_or_never().unwrap().unwrap_err(); - let err2 = jh2.now_or_never().unwrap().unwrap_err(); - assert!(err1.is_cancelled()); - assert!(err2.is_cancelled()); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/yield_now.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/yield_now.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/yield_now.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt/yield_now.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,37 +0,0 @@ -use crate::runtime::park; -use crate::runtime::tests::loom_oneshot as oneshot; -use crate::runtime::{self, Runtime}; - -#[test] -fn yield_calls_park_before_scheduling_again() { - // Don't need to check all permutations - let mut loom = loom::model::Builder::default(); - loom.max_permutations = Some(1); - loom.check(|| { - let rt = mk_runtime(2); - let (tx, rx) = oneshot::channel::<()>(); - - rt.spawn(async { - let tid = loom::thread::current().id(); - let park_count = park::current_thread_park_count(); - - crate::task::yield_now().await; - - if tid == loom::thread::current().id() { - let new_park_count = park::current_thread_park_count(); - assert_eq!(park_count + 1, new_park_count); - } - - tx.send(()); - }); - - rx.recv(); - }); -} - -fn mk_runtime(num_threads: usize) -> Runtime { - runtime::Builder::new_multi_thread() - .worker_threads(num_threads) - .build() - .unwrap() -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread_alt.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,570 +0,0 @@ -mod queue; -mod shutdown; -mod yield_now; - -/// Full runtime loom tests. These are heavy tests and take significant time to -/// run on CI. -/// -/// Use `LOOM_MAX_PREEMPTIONS=1` to do a "quick" run as a smoke test. -/// -/// In order to speed up the C -use crate::future::poll_fn; -use crate::runtime::tests::loom_oneshot as oneshot; -use crate::runtime::{self, Runtime}; -use crate::{spawn, task}; -use tokio_test::assert_ok; - -use loom::sync::atomic::{AtomicBool, AtomicUsize}; -use loom::sync::Arc; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::pin::Pin; -use std::sync::atomic::Ordering::{Relaxed, SeqCst}; -use std::task::{Context, Poll}; - -mod atomic_take { - use loom::sync::atomic::AtomicBool; - use std::mem::MaybeUninit; - use std::sync::atomic::Ordering::SeqCst; - - pub(super) struct AtomicTake { - inner: MaybeUninit, - taken: AtomicBool, - } - - impl AtomicTake { - pub(super) fn new(value: T) -> Self { - Self { - inner: MaybeUninit::new(value), - taken: AtomicBool::new(false), - } - } - - pub(super) fn take(&self) -> Option { - // safety: Only one thread will see the boolean change from false - // to true, so that thread is able to take the value. - match self.taken.fetch_or(true, SeqCst) { - false => unsafe { Some(std::ptr::read(self.inner.as_ptr())) }, - true => None, - } - } - } - - impl Drop for AtomicTake { - fn drop(&mut self) { - drop(self.take()); - } - } -} - -#[derive(Clone)] -struct AtomicOneshot { - value: std::sync::Arc>>, -} -impl AtomicOneshot { - fn new(sender: oneshot::Sender) -> Self { - Self { - value: std::sync::Arc::new(atomic_take::AtomicTake::new(sender)), - } - } - - fn assert_send(&self, value: T) { - self.value.take().unwrap().send(value); - } -} - -/// Tests are divided into groups to make the runs faster on CI. -mod group_a { - use super::*; - - #[test] - fn racy_shutdown() { - loom::model(|| { - let pool = mk_pool(1); - - // here's the case we want to exercise: - // - // a worker that still has tasks in its local queue gets sent to the blocking pool (due to - // block_in_place). the blocking pool is shut down, so drops the worker. the worker's - // shutdown method never gets run. - // - // we do this by spawning two tasks on one worker, the first of which does block_in_place, - // and then immediately drop the pool. - - pool.spawn(track(async { - crate::task::block_in_place(|| {}); - })); - pool.spawn(track(async {})); - drop(pool); - }); - } - - #[test] - fn pool_multi_spawn() { - loom::model(|| { - let pool = mk_pool(2); - let c1 = Arc::new(AtomicUsize::new(0)); - - let (tx, rx) = oneshot::channel(); - let tx1 = AtomicOneshot::new(tx); - - // Spawn a task - let c2 = c1.clone(); - let tx2 = tx1.clone(); - pool.spawn(track(async move { - spawn(track(async move { - if 1 == c1.fetch_add(1, Relaxed) { - tx1.assert_send(()); - } - })); - })); - - // Spawn a second task - pool.spawn(track(async move { - spawn(track(async move { - if 1 == c2.fetch_add(1, Relaxed) { - tx2.assert_send(()); - } - })); - })); - - rx.recv(); - }); - } - - fn only_blocking_inner(first_pending: bool) { - loom::model(move || { - let pool = mk_pool(1); - let (block_tx, block_rx) = oneshot::channel(); - - pool.spawn(track(async move { - crate::task::block_in_place(move || { - block_tx.send(()); - }); - if first_pending { - task::yield_now().await - } - })); - - block_rx.recv(); - drop(pool); - }); - } - - #[test] - fn only_blocking_without_pending() { - only_blocking_inner(false) - } - - #[test] - fn only_blocking_with_pending() { - only_blocking_inner(true) - } -} - -mod group_b { - use super::*; - - fn blocking_and_regular_inner(first_pending: bool) { - const NUM: usize = 3; - loom::model(move || { - let pool = mk_pool(1); - let cnt = Arc::new(AtomicUsize::new(0)); - - let (block_tx, block_rx) = oneshot::channel(); - let (done_tx, done_rx) = oneshot::channel(); - let done_tx = AtomicOneshot::new(done_tx); - - pool.spawn(track(async move { - crate::task::block_in_place(move || { - block_tx.send(()); - }); - if first_pending { - task::yield_now().await - } - })); - - for _ in 0..NUM { - let cnt = cnt.clone(); - let done_tx = done_tx.clone(); - - pool.spawn(track(async move { - if NUM == cnt.fetch_add(1, Relaxed) + 1 { - done_tx.assert_send(()); - } - })); - } - - done_rx.recv(); - block_rx.recv(); - - drop(pool); - }); - } - - #[test] - #[ignore] // TODO: uncomment - fn blocking_and_regular_without_pending() { - blocking_and_regular_inner(false); - } - - #[test] - fn blocking_and_regular_with_pending() { - blocking_and_regular_inner(true); - } - - #[test] - fn join_output() { - loom::model(|| { - let rt = mk_pool(1); - - rt.block_on(async { - let t = crate::spawn(track(async { "hello" })); - - let out = assert_ok!(t.await); - assert_eq!("hello", out.into_inner()); - }); - }); - } - - #[test] - fn poll_drop_handle_then_drop() { - loom::model(|| { - let rt = mk_pool(1); - - rt.block_on(async move { - let mut t = crate::spawn(track(async { "hello" })); - - poll_fn(|cx| { - let _ = Pin::new(&mut t).poll(cx); - Poll::Ready(()) - }) - .await; - }); - }) - } - - #[test] - fn complete_block_on_under_load() { - loom::model(|| { - let pool = mk_pool(1); - - pool.block_on(async { - // Trigger a re-schedule - crate::spawn(track(async { - for _ in 0..2 { - task::yield_now().await; - } - })); - - gated2(true).await - }); - }); - } - - #[test] - fn shutdown_with_notification() { - use crate::sync::oneshot; - - loom::model(|| { - let rt = mk_pool(2); - let (done_tx, done_rx) = oneshot::channel::<()>(); - - rt.spawn(track(async move { - let (tx, rx) = oneshot::channel::<()>(); - - crate::spawn(async move { - crate::task::spawn_blocking(move || { - let _ = tx.send(()); - }); - - let _ = done_rx.await; - }); - - let _ = rx.await; - - let _ = done_tx.send(()); - })); - }); - } -} - -mod group_c { - use super::*; - - #[test] - fn pool_shutdown() { - loom::model(|| { - let pool = mk_pool(2); - - pool.spawn(track(async move { - gated2(true).await; - })); - - pool.spawn(track(async move { - gated2(false).await; - })); - - drop(pool); - }); - } - - #[test] - fn fill_local_queue() { - const NUM_SPAWNS: usize = 3; - loom::model(|| { - // using std versions here as it is just to control shutdown. - let cnt = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0)); - let (tx, rx) = oneshot::channel(); - let tx = AtomicOneshot::new(tx); - - let pool = runtime::Builder::new_multi_thread_alt() - .worker_threads(2) - // Set the intervals to avoid tuning logic - .global_queue_interval(61) - .local_queue_capacity(1) - .build() - .unwrap(); - - for _ in 0..NUM_SPAWNS { - let cnt = cnt.clone(); - let tx = tx.clone(); - pool.spawn(track(async move { - if NUM_SPAWNS == 1 + cnt.fetch_add(1, Relaxed) { - tx.assert_send(()); - } - })); - } - - rx.recv(); - }); - } - - // This tests a very specific case that happened when a worker has no more - // available work to process because a peer is in the process of stealing - // (but does not finish stealing), and the worker happens to find more work - // from the injection queue *right* before parking. - #[test] - fn pool_concurrent_park_with_steal_with_inject() { - const DEPTH: usize = 4; - - let mut model = loom::model::Builder::new(); - model.expect_explicit_explore = true; - model.preemption_bound = Some(3); - - model.check(|| { - let pool = runtime::Builder::new_multi_thread_alt() - .worker_threads(2) - // Set the intervals to avoid tuning logic - .global_queue_interval(61) - .local_queue_capacity(DEPTH) - .build() - .unwrap(); - - // Use std types to avoid adding backtracking. - type Flag = std::sync::Arc; - let flag: Flag = Default::default(); - let flag1 = flag.clone(); - - let (tx1, rx1) = oneshot::channel(); - - async fn task(expect: isize, flag: Flag) { - if expect == flag.load(Relaxed) { - flag.store(expect + 1, Relaxed); - } else { - flag.store(-1, Relaxed); - loom::skip_branch(); - } - } - - pool.spawn(track(async move { - let flag = flag1; - // First 2 spawned task should be stolen - crate::spawn(task(1, flag.clone())); - crate::spawn(task(2, flag.clone())); - crate::spawn(async move { - task(0, flag.clone()).await; - tx1.send(()); - }); - - // One to fill the LIFO slot - crate::spawn(async move {}); - - loom::explore(); - })); - - rx1.recv(); - - if 1 == flag.load(Relaxed) { - loom::stop_exploring(); - - let (tx3, rx3) = oneshot::channel(); - pool.spawn(async move { - loom::skip_branch(); - tx3.send(()); - }); - - pool.spawn(async {}); - pool.spawn(async {}); - - loom::explore(); - - rx3.recv(); - } else { - loom::skip_branch(); - } - }); - } -} - -mod group_d { - use super::*; - - #[test] - fn pool_multi_notify() { - loom::model(|| { - let pool = mk_pool(2); - - let c1 = Arc::new(AtomicUsize::new(0)); - - let (done_tx, done_rx) = oneshot::channel(); - let done_tx1 = AtomicOneshot::new(done_tx); - let done_tx2 = done_tx1.clone(); - - // Spawn a task - let c2 = c1.clone(); - pool.spawn(track(async move { - multi_gated().await; - - if 1 == c1.fetch_add(1, Relaxed) { - done_tx1.assert_send(()); - } - })); - - // Spawn a second task - pool.spawn(track(async move { - multi_gated().await; - - if 1 == c2.fetch_add(1, Relaxed) { - done_tx2.assert_send(()); - } - })); - - done_rx.recv(); - }); - } -} - -fn mk_pool(num_threads: usize) -> Runtime { - runtime::Builder::new_multi_thread_alt() - .worker_threads(num_threads) - // Set the intervals to avoid tuning logic - .global_queue_interval(61) - .build() - .unwrap() -} - -fn gated2(thread: bool) -> impl Future { - use loom::thread; - use std::sync::Arc; - - let gate = Arc::new(AtomicBool::new(false)); - let mut fired = false; - - poll_fn(move |cx| { - if !fired { - let gate = gate.clone(); - let waker = cx.waker().clone(); - - if thread { - thread::spawn(move || { - gate.store(true, SeqCst); - waker.wake_by_ref(); - }); - } else { - spawn(track(async move { - gate.store(true, SeqCst); - waker.wake_by_ref(); - })); - } - - fired = true; - - return Poll::Pending; - } - - if gate.load(SeqCst) { - Poll::Ready("hello world") - } else { - Poll::Pending - } - }) -} - -async fn multi_gated() { - struct Gate { - waker: loom::future::AtomicWaker, - count: AtomicUsize, - } - - let gate = Arc::new(Gate { - waker: loom::future::AtomicWaker::new(), - count: AtomicUsize::new(0), - }); - - { - let gate = gate.clone(); - spawn(track(async move { - for i in 1..3 { - gate.count.store(i, SeqCst); - gate.waker.wake(); - } - })); - } - - poll_fn(move |cx| { - gate.waker.register_by_ref(cx.waker()); - if gate.count.load(SeqCst) < 2 { - Poll::Pending - } else { - Poll::Ready(()) - } - }) - .await; -} - -fn track(f: T) -> Track { - Track { - inner: f, - arc: Arc::new(()), - } -} - -pin_project! { - struct Track { - #[pin] - inner: T, - // Arc is used to hook into loom's leak tracking. - arc: Arc<()>, - } -} - -impl Track { - fn into_inner(self) -> T { - self.inner - } -} - -impl Future for Track { - type Output = Track; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - - Poll::Ready(Track { - inner: ready!(me.inner.poll(cx)), - arc: me.arc.clone(), - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_multi_thread.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_multi_thread.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,462 +0,0 @@ -mod queue; -mod shutdown; -mod yield_now; - -/// Full runtime loom tests. These are heavy tests and take significant time to -/// run on CI. -/// -/// Use `LOOM_MAX_PREEMPTIONS=1` to do a "quick" run as a smoke test. -/// -/// In order to speed up the C -use crate::future::poll_fn; -use crate::runtime::tests::loom_oneshot as oneshot; -use crate::runtime::{self, Runtime}; -use crate::{spawn, task}; -use tokio_test::assert_ok; - -use loom::sync::atomic::{AtomicBool, AtomicUsize}; -use loom::sync::Arc; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::pin::Pin; -use std::sync::atomic::Ordering::{Relaxed, SeqCst}; -use std::task::{Context, Poll}; - -mod atomic_take { - use loom::sync::atomic::AtomicBool; - use std::mem::MaybeUninit; - use std::sync::atomic::Ordering::SeqCst; - - pub(super) struct AtomicTake { - inner: MaybeUninit, - taken: AtomicBool, - } - - impl AtomicTake { - pub(super) fn new(value: T) -> Self { - Self { - inner: MaybeUninit::new(value), - taken: AtomicBool::new(false), - } - } - - pub(super) fn take(&self) -> Option { - // safety: Only one thread will see the boolean change from false - // to true, so that thread is able to take the value. - match self.taken.fetch_or(true, SeqCst) { - false => unsafe { Some(std::ptr::read(self.inner.as_ptr())) }, - true => None, - } - } - } - - impl Drop for AtomicTake { - fn drop(&mut self) { - drop(self.take()); - } - } -} - -#[derive(Clone)] -struct AtomicOneshot { - value: std::sync::Arc>>, -} -impl AtomicOneshot { - fn new(sender: oneshot::Sender) -> Self { - Self { - value: std::sync::Arc::new(atomic_take::AtomicTake::new(sender)), - } - } - - fn assert_send(&self, value: T) { - self.value.take().unwrap().send(value); - } -} - -/// Tests are divided into groups to make the runs faster on CI. -mod group_a { - use super::*; - - #[test] - fn racy_shutdown() { - loom::model(|| { - let pool = mk_pool(1); - - // here's the case we want to exercise: - // - // a worker that still has tasks in its local queue gets sent to the blocking pool (due to - // block_in_place). the blocking pool is shut down, so drops the worker. the worker's - // shutdown method never gets run. - // - // we do this by spawning two tasks on one worker, the first of which does block_in_place, - // and then immediately drop the pool. - - pool.spawn(track(async { - crate::task::block_in_place(|| {}); - })); - pool.spawn(track(async {})); - drop(pool); - }); - } - - #[test] - fn pool_multi_spawn() { - loom::model(|| { - let pool = mk_pool(2); - let c1 = Arc::new(AtomicUsize::new(0)); - - let (tx, rx) = oneshot::channel(); - let tx1 = AtomicOneshot::new(tx); - - // Spawn a task - let c2 = c1.clone(); - let tx2 = tx1.clone(); - pool.spawn(track(async move { - spawn(track(async move { - if 1 == c1.fetch_add(1, Relaxed) { - tx1.assert_send(()); - } - })); - })); - - // Spawn a second task - pool.spawn(track(async move { - spawn(track(async move { - if 1 == c2.fetch_add(1, Relaxed) { - tx2.assert_send(()); - } - })); - })); - - rx.recv(); - }); - } - - fn only_blocking_inner(first_pending: bool) { - loom::model(move || { - let pool = mk_pool(1); - let (block_tx, block_rx) = oneshot::channel(); - - pool.spawn(track(async move { - crate::task::block_in_place(move || { - block_tx.send(()); - }); - if first_pending { - task::yield_now().await - } - })); - - block_rx.recv(); - drop(pool); - }); - } - - #[test] - fn only_blocking_without_pending() { - only_blocking_inner(false) - } - - #[test] - fn only_blocking_with_pending() { - only_blocking_inner(true) - } -} - -mod group_b { - use super::*; - - fn blocking_and_regular_inner(first_pending: bool) { - const NUM: usize = 3; - loom::model(move || { - let pool = mk_pool(1); - let cnt = Arc::new(AtomicUsize::new(0)); - - let (block_tx, block_rx) = oneshot::channel(); - let (done_tx, done_rx) = oneshot::channel(); - let done_tx = AtomicOneshot::new(done_tx); - - pool.spawn(track(async move { - crate::task::block_in_place(move || { - block_tx.send(()); - }); - if first_pending { - task::yield_now().await - } - })); - - for _ in 0..NUM { - let cnt = cnt.clone(); - let done_tx = done_tx.clone(); - - pool.spawn(track(async move { - if NUM == cnt.fetch_add(1, Relaxed) + 1 { - done_tx.assert_send(()); - } - })); - } - - done_rx.recv(); - block_rx.recv(); - - drop(pool); - }); - } - - #[test] - fn blocking_and_regular() { - blocking_and_regular_inner(false); - } - - #[test] - fn blocking_and_regular_with_pending() { - blocking_and_regular_inner(true); - } - - #[test] - fn join_output() { - loom::model(|| { - let rt = mk_pool(1); - - rt.block_on(async { - let t = crate::spawn(track(async { "hello" })); - - let out = assert_ok!(t.await); - assert_eq!("hello", out.into_inner()); - }); - }); - } - - #[test] - fn poll_drop_handle_then_drop() { - loom::model(|| { - let rt = mk_pool(1); - - rt.block_on(async move { - let mut t = crate::spawn(track(async { "hello" })); - - poll_fn(|cx| { - let _ = Pin::new(&mut t).poll(cx); - Poll::Ready(()) - }) - .await; - }); - }) - } - - #[test] - fn complete_block_on_under_load() { - loom::model(|| { - let pool = mk_pool(1); - - pool.block_on(async { - // Trigger a re-schedule - crate::spawn(track(async { - for _ in 0..2 { - task::yield_now().await; - } - })); - - gated2(true).await - }); - }); - } - - #[test] - fn shutdown_with_notification() { - use crate::sync::oneshot; - - loom::model(|| { - let rt = mk_pool(2); - let (done_tx, done_rx) = oneshot::channel::<()>(); - - rt.spawn(track(async move { - let (tx, rx) = oneshot::channel::<()>(); - - crate::spawn(async move { - crate::task::spawn_blocking(move || { - let _ = tx.send(()); - }); - - let _ = done_rx.await; - }); - - let _ = rx.await; - - let _ = done_tx.send(()); - })); - }); - } -} - -mod group_c { - use super::*; - - #[test] - fn pool_shutdown() { - loom::model(|| { - let pool = mk_pool(2); - - pool.spawn(track(async move { - gated2(true).await; - })); - - pool.spawn(track(async move { - gated2(false).await; - })); - - drop(pool); - }); - } -} - -mod group_d { - use super::*; - - #[test] - fn pool_multi_notify() { - loom::model(|| { - let pool = mk_pool(2); - - let c1 = Arc::new(AtomicUsize::new(0)); - - let (done_tx, done_rx) = oneshot::channel(); - let done_tx1 = AtomicOneshot::new(done_tx); - let done_tx2 = done_tx1.clone(); - - // Spawn a task - let c2 = c1.clone(); - pool.spawn(track(async move { - multi_gated().await; - - if 1 == c1.fetch_add(1, Relaxed) { - done_tx1.assert_send(()); - } - })); - - // Spawn a second task - pool.spawn(track(async move { - multi_gated().await; - - if 1 == c2.fetch_add(1, Relaxed) { - done_tx2.assert_send(()); - } - })); - - done_rx.recv(); - }); - } -} - -fn mk_pool(num_threads: usize) -> Runtime { - runtime::Builder::new_multi_thread() - .worker_threads(num_threads) - // Set the intervals to avoid tuning logic - .event_interval(2) - .build() - .unwrap() -} - -fn gated2(thread: bool) -> impl Future { - use loom::thread; - use std::sync::Arc; - - let gate = Arc::new(AtomicBool::new(false)); - let mut fired = false; - - poll_fn(move |cx| { - if !fired { - let gate = gate.clone(); - let waker = cx.waker().clone(); - - if thread { - thread::spawn(move || { - gate.store(true, SeqCst); - waker.wake_by_ref(); - }); - } else { - spawn(track(async move { - gate.store(true, SeqCst); - waker.wake_by_ref(); - })); - } - - fired = true; - - return Poll::Pending; - } - - if gate.load(SeqCst) { - Poll::Ready("hello world") - } else { - Poll::Pending - } - }) -} - -async fn multi_gated() { - struct Gate { - waker: loom::future::AtomicWaker, - count: AtomicUsize, - } - - let gate = Arc::new(Gate { - waker: loom::future::AtomicWaker::new(), - count: AtomicUsize::new(0), - }); - - { - let gate = gate.clone(); - spawn(track(async move { - for i in 1..3 { - gate.count.store(i, SeqCst); - gate.waker.wake(); - } - })); - } - - poll_fn(move |cx| { - gate.waker.register_by_ref(cx.waker()); - if gate.count.load(SeqCst) < 2 { - Poll::Pending - } else { - Poll::Ready(()) - } - }) - .await; -} - -fn track(f: T) -> Track { - Track { - inner: f, - arc: Arc::new(()), - } -} - -pin_project! { - struct Track { - #[pin] - inner: T, - // Arc is used to hook into loom's leak tracking. - arc: Arc<()>, - } -} - -impl Track { - fn into_inner(self) -> T { - self.inner - } -} - -impl Future for Track { - type Output = Track; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - - Poll::Ready(Track { - inner: ready!(me.inner.poll(cx)), - arc: me.arc.clone(), - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_oneshot.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_oneshot.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/loom_oneshot.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/loom_oneshot.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,48 +0,0 @@ -use crate::loom::sync::{Arc, Mutex}; -use loom::sync::Notify; - -pub(crate) fn channel() -> (Sender, Receiver) { - let inner = Arc::new(Inner { - notify: Notify::new(), - value: Mutex::new(None), - }); - - let tx = Sender { - inner: inner.clone(), - }; - let rx = Receiver { inner }; - - (tx, rx) -} - -pub(crate) struct Sender { - inner: Arc>, -} - -pub(crate) struct Receiver { - inner: Arc>, -} - -struct Inner { - notify: Notify, - value: Mutex>, -} - -impl Sender { - pub(crate) fn send(self, value: T) { - *self.inner.value.lock() = Some(value); - self.inner.notify.notify(); - } -} - -impl Receiver { - pub(crate) fn recv(self) -> T { - loop { - if let Some(v) = self.inner.value.lock().take() { - return v; - } - - self.inner.notify.wait(); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -// Enable dead_code / unreachable_pub here. It has been disabled in lib.rs for -// other code when running loom tests. -#![cfg_attr(loom, warn(dead_code, unreachable_pub))] - -use self::noop_scheduler::NoopSchedule; -use self::unowned_wrapper::unowned; - -mod noop_scheduler { - use crate::runtime::task::{self, Task}; - - /// `task::Schedule` implementation that does nothing, for testing. - pub(crate) struct NoopSchedule; - - impl task::Schedule for NoopSchedule { - fn release(&self, _task: &Task) -> Option> { - None - } - - fn schedule(&self, _task: task::Notified) { - unreachable!(); - } - } -} - -mod unowned_wrapper { - use crate::runtime::task::{Id, JoinHandle, Notified}; - use crate::runtime::tests::NoopSchedule; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - pub(crate) fn unowned(task: T) -> (Notified, JoinHandle) - where - T: std::future::Future + Send + 'static, - T::Output: Send + 'static, - { - use tracing::Instrument; - let span = tracing::trace_span!("test_span"); - let task = task.instrument(span); - let (task, handle) = crate::runtime::task::unowned(task, NoopSchedule, Id::next()); - (task.into_notified(), handle) - } - - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - pub(crate) fn unowned(task: T) -> (Notified, JoinHandle) - where - T: std::future::Future + Send + 'static, - T::Output: Send + 'static, - { - let (task, handle) = crate::runtime::task::unowned(task, NoopSchedule, Id::next()); - (task.into_notified(), handle) - } -} - -cfg_loom! { - mod loom_blocking; - mod loom_current_thread; - mod loom_join_set; - mod loom_local; - mod loom_multi_thread; - mod loom_multi_thread_alt; - mod loom_oneshot; - - // Make sure debug assertions are enabled - #[cfg(not(debug_assertions))] - compile_error!("these tests require debug assertions to be enabled"); -} - -cfg_not_loom! { - mod inject; - mod queue; - - #[cfg(not(miri))] - mod task_combinations; - - #[cfg(miri)] - mod task; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/queue.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/queue.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/queue.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/queue.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,283 +0,0 @@ -use crate::runtime::scheduler::multi_thread::{queue, Stats}; -use crate::runtime::task::{self, Schedule, Task}; - -use std::cell::RefCell; -use std::thread; -use std::time::Duration; - -#[allow(unused)] -macro_rules! assert_metrics { - ($stats:ident, $field:ident == $v:expr) => {{ - use crate::runtime::WorkerMetrics; - use std::sync::atomic::Ordering::Relaxed; - - let worker = WorkerMetrics::new(); - $stats.submit(&worker); - - let expect = $v; - let actual = worker.$field.load(Relaxed); - - assert!(actual == expect, "expect = {}; actual = {}", expect, actual) - }}; -} - -fn new_stats() -> Stats { - use crate::runtime::WorkerMetrics; - Stats::new(&WorkerMetrics::new()) -} - -#[test] -fn fits_256_one_at_a_time() { - let (_, mut local) = queue::local(); - let inject = RefCell::new(vec![]); - let mut stats = new_stats(); - - for _ in 0..256 { - let (task, _) = super::unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - } - - cfg_metrics! { - assert_metrics!(stats, overflow_count == 0); - } - - assert!(inject.borrow_mut().pop().is_none()); - - while local.pop().is_some() {} -} - -#[test] -fn fits_256_all_at_once() { - let (_, mut local) = queue::local(); - - let mut tasks = (0..256) - .map(|_| super::unowned(async {}).0) - .collect::>(); - local.push_back(tasks.drain(..)); - - let mut i = 0; - while local.pop().is_some() { - i += 1; - } - - assert_eq!(i, 256); -} - -#[test] -fn fits_256_all_in_chunks() { - let (_, mut local) = queue::local(); - - let mut tasks = (0..256) - .map(|_| super::unowned(async {}).0) - .collect::>(); - - local.push_back(tasks.drain(..10)); - local.push_back(tasks.drain(..100)); - local.push_back(tasks.drain(..46)); - local.push_back(tasks.drain(..100)); - - let mut i = 0; - while local.pop().is_some() { - i += 1; - } - - assert_eq!(i, 256); -} - -#[test] -fn overflow() { - let (_, mut local) = queue::local(); - let inject = RefCell::new(vec![]); - let mut stats = new_stats(); - - for _ in 0..257 { - let (task, _) = super::unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - } - - cfg_metrics! { - assert_metrics!(stats, overflow_count == 1); - } - - let mut n = 0; - - n += inject.borrow_mut().drain(..).count(); - - while local.pop().is_some() { - n += 1; - } - - assert_eq!(n, 257); -} - -#[test] -fn steal_batch() { - let mut stats = new_stats(); - - let (steal1, mut local1) = queue::local(); - let (_, mut local2) = queue::local(); - let inject = RefCell::new(vec![]); - - for _ in 0..4 { - let (task, _) = super::unowned(async {}); - local1.push_back_or_overflow(task, &inject, &mut stats); - } - - assert!(steal1.steal_into(&mut local2, &mut stats).is_some()); - - cfg_metrics! { - assert_metrics!(stats, steal_count == 2); - } - - for _ in 0..1 { - assert!(local2.pop().is_some()); - } - - assert!(local2.pop().is_none()); - - for _ in 0..2 { - assert!(local1.pop().is_some()); - } - - assert!(local1.pop().is_none()); -} - -const fn normal_or_miri(normal: usize, miri: usize) -> usize { - if cfg!(miri) { - miri - } else { - normal - } -} - -#[test] -fn stress1() { - const NUM_ITER: usize = 5; - const NUM_STEAL: usize = normal_or_miri(1_000, 10); - const NUM_LOCAL: usize = normal_or_miri(1_000, 10); - const NUM_PUSH: usize = normal_or_miri(500, 10); - const NUM_POP: usize = normal_or_miri(250, 10); - - let mut stats = new_stats(); - - for _ in 0..NUM_ITER { - let (steal, mut local) = queue::local(); - let inject = RefCell::new(vec![]); - - let th = thread::spawn(move || { - let mut stats = new_stats(); - let (_, mut local) = queue::local(); - let mut n = 0; - - for _ in 0..NUM_STEAL { - if steal.steal_into(&mut local, &mut stats).is_some() { - n += 1; - } - - while local.pop().is_some() { - n += 1; - } - - thread::yield_now(); - } - - cfg_metrics! { - assert_metrics!(stats, steal_count == n as _); - } - - n - }); - - let mut n = 0; - - for _ in 0..NUM_LOCAL { - for _ in 0..NUM_PUSH { - let (task, _) = super::unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - } - - for _ in 0..NUM_POP { - if local.pop().is_some() { - n += 1; - } else { - break; - } - } - } - - n += inject.borrow_mut().drain(..).count(); - - n += th.join().unwrap(); - - assert_eq!(n, NUM_LOCAL * NUM_PUSH); - } -} - -#[test] -fn stress2() { - const NUM_ITER: usize = 1; - const NUM_TASKS: usize = normal_or_miri(1_000_000, 50); - const NUM_STEAL: usize = normal_or_miri(1_000, 10); - - let mut stats = new_stats(); - - for _ in 0..NUM_ITER { - let (steal, mut local) = queue::local(); - let inject = RefCell::new(vec![]); - - let th = thread::spawn(move || { - let mut stats = new_stats(); - let (_, mut local) = queue::local(); - let mut n = 0; - - for _ in 0..NUM_STEAL { - if steal.steal_into(&mut local, &mut stats).is_some() { - n += 1; - } - - while local.pop().is_some() { - n += 1; - } - - thread::sleep(Duration::from_micros(10)); - } - - n - }); - - let mut num_pop = 0; - - for i in 0..NUM_TASKS { - let (task, _) = super::unowned(async {}); - local.push_back_or_overflow(task, &inject, &mut stats); - - if i % 128 == 0 && local.pop().is_some() { - num_pop += 1; - } - - num_pop += inject.borrow_mut().drain(..).count(); - } - - num_pop += th.join().unwrap(); - - while local.pop().is_some() { - num_pop += 1; - } - - num_pop += inject.borrow_mut().drain(..).count(); - - assert_eq!(num_pop, NUM_TASKS); - } -} - -struct Runtime; - -impl Schedule for Runtime { - fn release(&self, _task: &Task) -> Option> { - None - } - - fn schedule(&self, _task: task::Notified) { - unreachable!(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/task_combinations.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/task_combinations.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/task_combinations.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/task_combinations.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,487 +0,0 @@ -use std::fmt; -use std::future::Future; -use std::panic; -use std::pin::Pin; -use std::task::{Context, Poll}; - -use crate::runtime::task::AbortHandle; -use crate::runtime::Builder; -use crate::sync::oneshot; -use crate::task::JoinHandle; - -use futures::future::FutureExt; - -// Enums for each option in the combinations being tested - -#[derive(Copy, Clone, Debug, PartialEq)] -enum CombiRuntime { - CurrentThread, - Multi1, - Multi2, -} -#[derive(Copy, Clone, Debug, PartialEq)] -enum CombiLocalSet { - Yes, - No, -} -#[derive(Copy, Clone, Debug, PartialEq)] -enum CombiTask { - PanicOnRun, - PanicOnDrop, - PanicOnRunAndDrop, - NoPanic, -} -#[derive(Copy, Clone, Debug, PartialEq)] -enum CombiOutput { - PanicOnDrop, - NoPanic, -} -#[derive(Copy, Clone, Debug, PartialEq)] -enum CombiJoinInterest { - Polled, - NotPolled, -} -#[allow(clippy::enum_variant_names)] // we aren't using glob imports -#[derive(Copy, Clone, Debug, PartialEq)] -enum CombiJoinHandle { - DropImmediately = 1, - DropFirstPoll = 2, - DropAfterNoConsume = 3, - DropAfterConsume = 4, -} -#[derive(Copy, Clone, Debug, PartialEq)] -enum CombiAbort { - NotAborted = 0, - AbortedImmediately = 1, - AbortedFirstPoll = 2, - AbortedAfterFinish = 3, - AbortedAfterConsumeOutput = 4, -} - -#[derive(Copy, Clone, Debug, PartialEq)] -enum CombiAbortSource { - JoinHandle, - AbortHandle, -} - -#[test] -fn test_combinations() { - let mut rt = &[ - CombiRuntime::CurrentThread, - CombiRuntime::Multi1, - CombiRuntime::Multi2, - ][..]; - - if cfg!(miri) { - rt = &[CombiRuntime::CurrentThread]; - } - - let ls = [CombiLocalSet::Yes, CombiLocalSet::No]; - let task = [ - CombiTask::NoPanic, - CombiTask::PanicOnRun, - CombiTask::PanicOnDrop, - CombiTask::PanicOnRunAndDrop, - ]; - let output = [CombiOutput::NoPanic, CombiOutput::PanicOnDrop]; - let ji = [CombiJoinInterest::Polled, CombiJoinInterest::NotPolled]; - let jh = [ - CombiJoinHandle::DropImmediately, - CombiJoinHandle::DropFirstPoll, - CombiJoinHandle::DropAfterNoConsume, - CombiJoinHandle::DropAfterConsume, - ]; - let abort = [ - CombiAbort::NotAborted, - CombiAbort::AbortedImmediately, - CombiAbort::AbortedFirstPoll, - CombiAbort::AbortedAfterFinish, - CombiAbort::AbortedAfterConsumeOutput, - ]; - let ah = [ - None, - Some(CombiJoinHandle::DropImmediately), - Some(CombiJoinHandle::DropFirstPoll), - Some(CombiJoinHandle::DropAfterNoConsume), - Some(CombiJoinHandle::DropAfterConsume), - ]; - - for rt in rt.iter().copied() { - for ls in ls.iter().copied() { - for task in task.iter().copied() { - for output in output.iter().copied() { - for ji in ji.iter().copied() { - for jh in jh.iter().copied() { - for abort in abort.iter().copied() { - // abort via join handle --- abort handles - // may be dropped at any point - for ah in ah.iter().copied() { - test_combination( - rt, - ls, - task, - output, - ji, - jh, - ah, - abort, - CombiAbortSource::JoinHandle, - ); - } - // if aborting via AbortHandle, it will - // never be dropped. - test_combination( - rt, - ls, - task, - output, - ji, - jh, - None, - abort, - CombiAbortSource::AbortHandle, - ); - } - } - } - } - } - } - } -} - -fn is_debug(_: &T) {} - -#[allow(clippy::too_many_arguments)] -fn test_combination( - rt: CombiRuntime, - ls: CombiLocalSet, - task: CombiTask, - output: CombiOutput, - ji: CombiJoinInterest, - jh: CombiJoinHandle, - ah: Option, - abort: CombiAbort, - abort_src: CombiAbortSource, -) { - match (abort_src, ah) { - (CombiAbortSource::JoinHandle, _) if (jh as usize) < (abort as usize) => { - // join handle dropped prior to abort - return; - } - (CombiAbortSource::AbortHandle, Some(_)) => { - // abort handle dropped, we can't abort through the - // abort handle - return; - } - - _ => {} - } - - if (task == CombiTask::PanicOnDrop) && (output == CombiOutput::PanicOnDrop) { - // this causes double panic - return; - } - if (task == CombiTask::PanicOnRunAndDrop) && (abort != CombiAbort::AbortedImmediately) { - // this causes double panic - return; - } - - is_debug(&rt); - is_debug(&ls); - is_debug(&task); - is_debug(&output); - is_debug(&ji); - is_debug(&jh); - is_debug(&ah); - is_debug(&abort); - is_debug(&abort_src); - - // A runtime optionally with a LocalSet - struct Rt { - rt: crate::runtime::Runtime, - ls: Option, - } - impl Rt { - fn new(rt: CombiRuntime, ls: CombiLocalSet) -> Self { - let rt = match rt { - CombiRuntime::CurrentThread => Builder::new_current_thread().build().unwrap(), - CombiRuntime::Multi1 => Builder::new_multi_thread() - .worker_threads(1) - .build() - .unwrap(), - CombiRuntime::Multi2 => Builder::new_multi_thread() - .worker_threads(2) - .build() - .unwrap(), - }; - - let ls = match ls { - CombiLocalSet::Yes => Some(crate::task::LocalSet::new()), - CombiLocalSet::No => None, - }; - - Self { rt, ls } - } - fn block_on(&self, task: T) -> T::Output - where - T: Future, - { - match &self.ls { - Some(ls) => ls.block_on(&self.rt, task), - None => self.rt.block_on(task), - } - } - fn spawn(&self, task: T) -> JoinHandle - where - T: Future + Send + 'static, - T::Output: Send + 'static, - { - match &self.ls { - Some(ls) => ls.spawn_local(task), - None => self.rt.spawn(task), - } - } - } - - // The type used for the output of the future - struct Output { - panic_on_drop: bool, - on_drop: Option>, - } - impl Output { - fn disarm(&mut self) { - self.panic_on_drop = false; - } - } - impl Drop for Output { - fn drop(&mut self) { - let _ = self.on_drop.take().unwrap().send(()); - if self.panic_on_drop { - panic!("Panicking in Output"); - } - } - } - - // A wrapper around the future that is spawned - struct FutWrapper { - inner: F, - on_drop: Option>, - panic_on_drop: bool, - } - impl Future for FutWrapper { - type Output = F::Output; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - unsafe { - let me = Pin::into_inner_unchecked(self); - let inner = Pin::new_unchecked(&mut me.inner); - inner.poll(cx) - } - } - } - impl Drop for FutWrapper { - fn drop(&mut self) { - let _: Result<(), ()> = self.on_drop.take().unwrap().send(()); - if self.panic_on_drop { - panic!("Panicking in FutWrapper"); - } - } - } - - // The channels passed to the task - struct Signals { - on_first_poll: Option>, - wait_complete: Option>, - on_output_drop: Option>, - } - - // The task we will spawn - async fn my_task(mut signal: Signals, task: CombiTask, out: CombiOutput) -> Output { - // Signal that we have been polled once - let _ = signal.on_first_poll.take().unwrap().send(()); - - // Wait for a signal, then complete the future - let _ = signal.wait_complete.take().unwrap().await; - - // If the task gets past wait_complete without yielding, then aborts - // may not be caught without this yield_now. - crate::task::yield_now().await; - - if task == CombiTask::PanicOnRun || task == CombiTask::PanicOnRunAndDrop { - panic!("Panicking in my_task on {:?}", std::thread::current().id()); - } - - Output { - panic_on_drop: out == CombiOutput::PanicOnDrop, - on_drop: signal.on_output_drop.take(), - } - } - - let rt = Rt::new(rt, ls); - - let (on_first_poll, wait_first_poll) = oneshot::channel(); - let (on_complete, wait_complete) = oneshot::channel(); - let (on_future_drop, wait_future_drop) = oneshot::channel(); - let (on_output_drop, wait_output_drop) = oneshot::channel(); - let signal = Signals { - on_first_poll: Some(on_first_poll), - wait_complete: Some(wait_complete), - on_output_drop: Some(on_output_drop), - }; - - // === Spawn task === - let mut handle = Some(rt.spawn(FutWrapper { - inner: my_task(signal, task, output), - on_drop: Some(on_future_drop), - panic_on_drop: task == CombiTask::PanicOnDrop || task == CombiTask::PanicOnRunAndDrop, - })); - - // Keep track of whether the task has been killed with an abort - let mut aborted = false; - - // If we want to poll the JoinHandle, do it now - if ji == CombiJoinInterest::Polled { - assert!( - handle.as_mut().unwrap().now_or_never().is_none(), - "Polling handle succeeded" - ); - } - - // If we are either aborting the task via an abort handle, or dropping via - // an abort handle, do that now. - let mut abort_handle = if ah.is_some() || abort_src == CombiAbortSource::AbortHandle { - handle.as_ref().map(JoinHandle::abort_handle) - } else { - None - }; - - let do_abort = |abort_handle: &mut Option, - join_handle: Option<&mut JoinHandle<_>>| { - match abort_src { - CombiAbortSource::AbortHandle => abort_handle.take().unwrap().abort(), - CombiAbortSource::JoinHandle => join_handle.unwrap().abort(), - } - }; - - if abort == CombiAbort::AbortedImmediately { - do_abort(&mut abort_handle, handle.as_mut()); - aborted = true; - } - if jh == CombiJoinHandle::DropImmediately { - drop(handle.take().unwrap()); - } - - // === Wait for first poll === - let got_polled = rt.block_on(wait_first_poll).is_ok(); - if !got_polled { - // it's possible that we are aborted but still got polled - assert!( - aborted, - "Task completed without ever being polled but was not aborted." - ); - } - - if abort == CombiAbort::AbortedFirstPoll { - do_abort(&mut abort_handle, handle.as_mut()); - aborted = true; - } - if jh == CombiJoinHandle::DropFirstPoll { - drop(handle.take().unwrap()); - } - if ah == Some(CombiJoinHandle::DropFirstPoll) { - drop(abort_handle.take().unwrap()); - } - - // Signal the future that it can return now - let _ = on_complete.send(()); - // === Wait for future to be dropped === - assert!( - rt.block_on(wait_future_drop).is_ok(), - "The future should always be dropped." - ); - - if abort == CombiAbort::AbortedAfterFinish { - // Don't set aborted to true here as the task already finished - do_abort(&mut abort_handle, handle.as_mut()); - } - if jh == CombiJoinHandle::DropAfterNoConsume { - if ah == Some(CombiJoinHandle::DropAfterNoConsume) { - drop(handle.take().unwrap()); - // The runtime will usually have dropped every ref-count at this point, - // in which case dropping the AbortHandle drops the output. - // - // (But it might race and still hold a ref-count) - let panic = panic::catch_unwind(panic::AssertUnwindSafe(|| { - drop(abort_handle.take().unwrap()); - })); - if panic.is_err() { - assert!( - (output == CombiOutput::PanicOnDrop) - && (!matches!(task, CombiTask::PanicOnRun | CombiTask::PanicOnRunAndDrop)) - && !aborted, - "Dropping AbortHandle shouldn't panic here" - ); - } - } else { - // The runtime will usually have dropped every ref-count at this point, - // in which case dropping the JoinHandle drops the output. - // - // (But it might race and still hold a ref-count) - let panic = panic::catch_unwind(panic::AssertUnwindSafe(|| { - drop(handle.take().unwrap()); - })); - if panic.is_err() { - assert!( - (output == CombiOutput::PanicOnDrop) - && (!matches!(task, CombiTask::PanicOnRun | CombiTask::PanicOnRunAndDrop)) - && !aborted, - "Dropping JoinHandle shouldn't panic here" - ); - } - } - } - - // Check whether we drop after consuming the output - if jh == CombiJoinHandle::DropAfterConsume { - // Using as_mut() to not immediately drop the handle - let result = rt.block_on(handle.as_mut().unwrap()); - - match result { - Ok(mut output) => { - // Don't panic here. - output.disarm(); - assert!(!aborted, "Task was aborted but returned output"); - } - Err(err) if err.is_cancelled() => assert!(aborted, "Cancelled output but not aborted"), - Err(err) if err.is_panic() => { - assert!( - (task == CombiTask::PanicOnRun) - || (task == CombiTask::PanicOnDrop) - || (task == CombiTask::PanicOnRunAndDrop) - || (output == CombiOutput::PanicOnDrop), - "Panic but nothing should panic" - ); - } - _ => unreachable!(), - } - - let mut handle = handle.take().unwrap(); - if abort == CombiAbort::AbortedAfterConsumeOutput { - do_abort(&mut abort_handle, Some(&mut handle)); - } - drop(handle); - - if ah == Some(CombiJoinHandle::DropAfterConsume) { - drop(abort_handle.take()); - } - } - - // The output should have been dropped now. Check whether the output - // object was created at all. - let output_created = rt.block_on(wait_output_drop).is_ok(); - assert_eq!( - output_created, - (!matches!(task, CombiTask::PanicOnRun | CombiTask::PanicOnRunAndDrop)) && !aborted, - "Creation of output object" - ); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/task.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/task.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/tests/task.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/tests/task.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,331 +0,0 @@ -use crate::runtime::task::{self, unowned, Id, JoinHandle, OwnedTasks, Schedule, Task}; -use crate::runtime::tests::NoopSchedule; - -use std::collections::VecDeque; -use std::future::Future; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, Mutex}; - -struct AssertDropHandle { - is_dropped: Arc, -} -impl AssertDropHandle { - #[track_caller] - fn assert_dropped(&self) { - assert!(self.is_dropped.load(Ordering::SeqCst)); - } - - #[track_caller] - fn assert_not_dropped(&self) { - assert!(!self.is_dropped.load(Ordering::SeqCst)); - } -} - -struct AssertDrop { - is_dropped: Arc, -} -impl AssertDrop { - fn new() -> (Self, AssertDropHandle) { - let shared = Arc::new(AtomicBool::new(false)); - ( - AssertDrop { - is_dropped: shared.clone(), - }, - AssertDropHandle { - is_dropped: shared.clone(), - }, - ) - } -} -impl Drop for AssertDrop { - fn drop(&mut self) { - self.is_dropped.store(true, Ordering::SeqCst); - } -} - -// A Notified does not shut down on drop, but it is dropped once the ref-count -// hits zero. -#[test] -fn create_drop1() { - let (ad, handle) = AssertDrop::new(); - let (notified, join) = unowned( - async { - drop(ad); - unreachable!() - }, - NoopSchedule, - Id::next(), - ); - drop(notified); - handle.assert_not_dropped(); - drop(join); - handle.assert_dropped(); -} - -#[test] -fn create_drop2() { - let (ad, handle) = AssertDrop::new(); - let (notified, join) = unowned( - async { - drop(ad); - unreachable!() - }, - NoopSchedule, - Id::next(), - ); - drop(join); - handle.assert_not_dropped(); - drop(notified); - handle.assert_dropped(); -} - -#[test] -fn drop_abort_handle1() { - let (ad, handle) = AssertDrop::new(); - let (notified, join) = unowned( - async { - drop(ad); - unreachable!() - }, - NoopSchedule, - Id::next(), - ); - let abort = join.abort_handle(); - drop(join); - handle.assert_not_dropped(); - drop(notified); - handle.assert_not_dropped(); - drop(abort); - handle.assert_dropped(); -} - -#[test] -fn drop_abort_handle2() { - let (ad, handle) = AssertDrop::new(); - let (notified, join) = unowned( - async { - drop(ad); - unreachable!() - }, - NoopSchedule, - Id::next(), - ); - let abort = join.abort_handle(); - drop(notified); - handle.assert_not_dropped(); - drop(abort); - handle.assert_not_dropped(); - drop(join); - handle.assert_dropped(); -} - -// Shutting down through Notified works -#[test] -fn create_shutdown1() { - let (ad, handle) = AssertDrop::new(); - let (notified, join) = unowned( - async { - drop(ad); - unreachable!() - }, - NoopSchedule, - Id::next(), - ); - drop(join); - handle.assert_not_dropped(); - notified.shutdown(); - handle.assert_dropped(); -} - -#[test] -fn create_shutdown2() { - let (ad, handle) = AssertDrop::new(); - let (notified, join) = unowned( - async { - drop(ad); - unreachable!() - }, - NoopSchedule, - Id::next(), - ); - handle.assert_not_dropped(); - notified.shutdown(); - handle.assert_dropped(); - drop(join); -} - -#[test] -fn unowned_poll() { - let (task, _) = unowned(async {}, NoopSchedule, Id::next()); - task.run(); -} - -#[test] -fn schedule() { - with(|rt| { - rt.spawn(async { - crate::task::yield_now().await; - }); - - assert_eq!(2, rt.tick()); - rt.shutdown(); - }) -} - -#[test] -fn shutdown() { - with(|rt| { - rt.spawn(async { - loop { - crate::task::yield_now().await; - } - }); - - rt.tick_max(1); - - rt.shutdown(); - }) -} - -#[test] -fn shutdown_immediately() { - with(|rt| { - rt.spawn(async { - loop { - crate::task::yield_now().await; - } - }); - - rt.shutdown(); - }) -} - -#[test] -fn spawn_during_shutdown() { - static DID_SPAWN: AtomicBool = AtomicBool::new(false); - - struct SpawnOnDrop(Runtime); - impl Drop for SpawnOnDrop { - fn drop(&mut self) { - DID_SPAWN.store(true, Ordering::SeqCst); - self.0.spawn(async {}); - } - } - - with(|rt| { - let rt2 = rt.clone(); - rt.spawn(async move { - let _spawn_on_drop = SpawnOnDrop(rt2); - - loop { - crate::task::yield_now().await; - } - }); - - rt.tick_max(1); - rt.shutdown(); - }); - - assert!(DID_SPAWN.load(Ordering::SeqCst)); -} - -fn with(f: impl FnOnce(Runtime)) { - struct Reset; - - impl Drop for Reset { - fn drop(&mut self) { - let _rt = CURRENT.try_lock().unwrap().take(); - } - } - - let _reset = Reset; - - let rt = Runtime(Arc::new(Inner { - owned: OwnedTasks::new(), - core: Mutex::new(Core { - queue: VecDeque::new(), - }), - })); - - *CURRENT.try_lock().unwrap() = Some(rt.clone()); - f(rt) -} - -#[derive(Clone)] -struct Runtime(Arc); - -struct Inner { - core: Mutex, - owned: OwnedTasks, -} - -struct Core { - queue: VecDeque>, -} - -static CURRENT: Mutex> = Mutex::new(None); - -impl Runtime { - fn spawn(&self, future: T) -> JoinHandle - where - T: 'static + Send + Future, - T::Output: 'static + Send, - { - let (handle, notified) = self.0.owned.bind(future, self.clone(), Id::next()); - - if let Some(notified) = notified { - self.schedule(notified); - } - - handle - } - - fn tick(&self) -> usize { - self.tick_max(usize::MAX) - } - - fn tick_max(&self, max: usize) -> usize { - let mut n = 0; - - while !self.is_empty() && n < max { - let task = self.next_task(); - n += 1; - let task = self.0.owned.assert_owner(task); - task.run(); - } - - n - } - - fn is_empty(&self) -> bool { - self.0.core.try_lock().unwrap().queue.is_empty() - } - - fn next_task(&self) -> task::Notified { - self.0.core.try_lock().unwrap().queue.pop_front().unwrap() - } - - fn shutdown(&self) { - let mut core = self.0.core.try_lock().unwrap(); - - self.0.owned.close_and_shutdown_all(); - - while let Some(task) = core.queue.pop_back() { - drop(task); - } - - drop(core); - - assert!(self.0.owned.is_empty()); - } -} - -impl Schedule for Runtime { - fn release(&self, task: &Task) -> Option> { - self.0.owned.remove(task) - } - - fn schedule(&self, task: task::Notified) { - self.0.core.try_lock().unwrap().queue.push_back(task); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/thread_id.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/thread_id.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/thread_id.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/thread_id.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -use std::num::NonZeroU64; - -#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)] -pub(crate) struct ThreadId(NonZeroU64); - -impl ThreadId { - pub(crate) fn next() -> Self { - use crate::loom::sync::atomic::{Ordering::Relaxed, StaticAtomicU64}; - - static NEXT_ID: StaticAtomicU64 = StaticAtomicU64::new(0); - - let mut last = NEXT_ID.load(Relaxed); - loop { - let id = match last.checked_add(1) { - Some(id) => id, - None => exhausted(), - }; - - match NEXT_ID.compare_exchange_weak(last, id, Relaxed, Relaxed) { - Ok(_) => return ThreadId(NonZeroU64::new(id).unwrap()), - Err(id) => last = id, - } - } - } -} - -#[cold] -#[allow(dead_code)] -fn exhausted() -> ! { - panic!("failed to generate unique thread ID: bitspace exhausted") -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/entry.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/entry.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/entry.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/entry.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,644 +0,0 @@ -//! Timer state structures. -//! -//! This module contains the heart of the intrusive timer implementation, and as -//! such the structures inside are full of tricky concurrency and unsafe code. -//! -//! # Ground rules -//! -//! The heart of the timer implementation here is the [`TimerShared`] structure, -//! shared between the [`TimerEntry`] and the driver. Generally, we permit access -//! to [`TimerShared`] ONLY via either 1) a mutable reference to [`TimerEntry`] or -//! 2) a held driver lock. -//! -//! It follows from this that any changes made while holding BOTH 1 and 2 will -//! be reliably visible, regardless of ordering. This is because of the acq/rel -//! fences on the driver lock ensuring ordering with 2, and rust mutable -//! reference rules for 1 (a mutable reference to an object can't be passed -//! between threads without an acq/rel barrier, and same-thread we have local -//! happens-before ordering). -//! -//! # State field -//! -//! Each timer has a state field associated with it. This field contains either -//! the current scheduled time, or a special flag value indicating its state. -//! This state can either indicate that the timer is on the 'pending' queue (and -//! thus will be fired with an `Ok(())` result soon) or that it has already been -//! fired/deregistered. -//! -//! This single state field allows for code that is firing the timer to -//! synchronize with any racing `reset` calls reliably. -//! -//! # Cached vs true timeouts -//! -//! To allow for the use case of a timeout that is periodically reset before -//! expiration to be as lightweight as possible, we support optimistically -//! lock-free timer resets, in the case where a timer is rescheduled to a later -//! point than it was originally scheduled for. -//! -//! This is accomplished by lazily rescheduling timers. That is, we update the -//! state field with the true expiration of the timer from the holder of -//! the [`TimerEntry`]. When the driver services timers (ie, whenever it's -//! walking lists of timers), it checks this "true when" value, and reschedules -//! based on it. -//! -//! We do, however, also need to track what the expiration time was when we -//! originally registered the timer; this is used to locate the right linked -//! list when the timer is being cancelled. This is referred to as the "cached -//! when" internally. -//! -//! There is of course a race condition between timer reset and timer -//! expiration. If the driver fails to observe the updated expiration time, it -//! could trigger expiration of the timer too early. However, because -//! [`mark_pending`][mark_pending] performs a compare-and-swap, it will identify this race and -//! refuse to mark the timer as pending. -//! -//! [mark_pending]: TimerHandle::mark_pending - -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::AtomicU64; -use crate::loom::sync::atomic::Ordering; - -use crate::runtime::scheduler; -use crate::sync::AtomicWaker; -use crate::time::Instant; -use crate::util::linked_list; - -use std::cell::UnsafeCell as StdUnsafeCell; -use std::task::{Context, Poll, Waker}; -use std::{marker::PhantomPinned, pin::Pin, ptr::NonNull}; - -type TimerResult = Result<(), crate::time::error::Error>; - -const STATE_DEREGISTERED: u64 = u64::MAX; -const STATE_PENDING_FIRE: u64 = STATE_DEREGISTERED - 1; -const STATE_MIN_VALUE: u64 = STATE_PENDING_FIRE; -/// The largest safe integer to use for ticks. -/// -/// This value should be updated if any other signal values are added above. -pub(super) const MAX_SAFE_MILLIS_DURATION: u64 = u64::MAX - 2; - -/// This structure holds the current shared state of the timer - its scheduled -/// time (if registered), or otherwise the result of the timer completing, as -/// well as the registered waker. -/// -/// Generally, the StateCell is only permitted to be accessed from two contexts: -/// Either a thread holding the corresponding &mut TimerEntry, or a thread -/// holding the timer driver lock. The write actions on the StateCell amount to -/// passing "ownership" of the StateCell between these contexts; moving a timer -/// from the TimerEntry to the driver requires _both_ holding the &mut -/// TimerEntry and the driver lock, while moving it back (firing the timer) -/// requires only the driver lock. -pub(super) struct StateCell { - /// Holds either the scheduled expiration time for this timer, or (if the - /// timer has been fired and is unregistered), `u64::MAX`. - state: AtomicU64, - /// If the timer is fired (an Acquire order read on state shows - /// `u64::MAX`), holds the result that should be returned from - /// polling the timer. Otherwise, the contents are unspecified and reading - /// without holding the driver lock is undefined behavior. - result: UnsafeCell, - /// The currently-registered waker - waker: AtomicWaker, -} - -impl Default for StateCell { - fn default() -> Self { - Self::new() - } -} - -impl std::fmt::Debug for StateCell { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "StateCell({:?})", self.read_state()) - } -} - -impl StateCell { - fn new() -> Self { - Self { - state: AtomicU64::new(STATE_DEREGISTERED), - result: UnsafeCell::new(Ok(())), - waker: AtomicWaker::new(), - } - } - - fn is_pending(&self) -> bool { - self.state.load(Ordering::Relaxed) == STATE_PENDING_FIRE - } - - /// Returns the current expiration time, or None if not currently scheduled. - fn when(&self) -> Option { - let cur_state = self.state.load(Ordering::Relaxed); - - if cur_state == STATE_DEREGISTERED { - None - } else { - Some(cur_state) - } - } - - /// If the timer is completed, returns the result of the timer. Otherwise, - /// returns None and registers the waker. - fn poll(&self, waker: &Waker) -> Poll { - // We must register first. This ensures that either `fire` will - // observe the new waker, or we will observe a racing fire to have set - // the state, or both. - self.waker.register_by_ref(waker); - - self.read_state() - } - - fn read_state(&self) -> Poll { - let cur_state = self.state.load(Ordering::Acquire); - - if cur_state == STATE_DEREGISTERED { - // SAFETY: The driver has fired this timer; this involves writing - // the result, and then writing (with release ordering) the state - // field. - Poll::Ready(unsafe { self.result.with(|p| *p) }) - } else { - Poll::Pending - } - } - - /// Marks this timer as being moved to the pending list, if its scheduled - /// time is not after `not_after`. - /// - /// If the timer is scheduled for a time after not_after, returns an Err - /// containing the current scheduled time. - /// - /// SAFETY: Must hold the driver lock. - unsafe fn mark_pending(&self, not_after: u64) -> Result<(), u64> { - // Quick initial debug check to see if the timer is already fired. Since - // firing the timer can only happen with the driver lock held, we know - // we shouldn't be able to "miss" a transition to a fired state, even - // with relaxed ordering. - let mut cur_state = self.state.load(Ordering::Relaxed); - - loop { - // improve the error message for things like - // https://github.com/tokio-rs/tokio/issues/3675 - assert!( - cur_state < STATE_MIN_VALUE, - "mark_pending called when the timer entry is in an invalid state" - ); - - if cur_state > not_after { - break Err(cur_state); - } - - match self.state.compare_exchange( - cur_state, - STATE_PENDING_FIRE, - Ordering::AcqRel, - Ordering::Acquire, - ) { - Ok(_) => { - break Ok(()); - } - Err(actual_state) => { - cur_state = actual_state; - } - } - } - } - - /// Fires the timer, setting the result to the provided result. - /// - /// Returns: - /// * `Some(waker) - if fired and a waker needs to be invoked once the - /// driver lock is released - /// * `None` - if fired and a waker does not need to be invoked, or if - /// already fired - /// - /// SAFETY: The driver lock must be held. - unsafe fn fire(&self, result: TimerResult) -> Option { - // Quick initial check to see if the timer is already fired. Since - // firing the timer can only happen with the driver lock held, we know - // we shouldn't be able to "miss" a transition to a fired state, even - // with relaxed ordering. - let cur_state = self.state.load(Ordering::Relaxed); - if cur_state == STATE_DEREGISTERED { - return None; - } - - // SAFETY: We assume the driver lock is held and the timer is not - // fired, so only the driver is accessing this field. - // - // We perform a release-ordered store to state below, to ensure this - // write is visible before the state update is visible. - unsafe { self.result.with_mut(|p| *p = result) }; - - self.state.store(STATE_DEREGISTERED, Ordering::Release); - - self.waker.take_waker() - } - - /// Marks the timer as registered (poll will return None) and sets the - /// expiration time. - /// - /// While this function is memory-safe, it should only be called from a - /// context holding both `&mut TimerEntry` and the driver lock. - fn set_expiration(&self, timestamp: u64) { - debug_assert!(timestamp < STATE_MIN_VALUE); - - // We can use relaxed ordering because we hold the driver lock and will - // fence when we release the lock. - self.state.store(timestamp, Ordering::Relaxed); - } - - /// Attempts to adjust the timer to a new timestamp. - /// - /// If the timer has already been fired, is pending firing, or the new - /// timestamp is earlier than the old timestamp, (or occasionally - /// spuriously) returns Err without changing the timer's state. In this - /// case, the timer must be deregistered and re-registered. - fn extend_expiration(&self, new_timestamp: u64) -> Result<(), ()> { - let mut prior = self.state.load(Ordering::Relaxed); - loop { - if new_timestamp < prior || prior >= STATE_MIN_VALUE { - return Err(()); - } - - match self.state.compare_exchange_weak( - prior, - new_timestamp, - Ordering::AcqRel, - Ordering::Acquire, - ) { - Ok(_) => { - return Ok(()); - } - Err(true_prior) => { - prior = true_prior; - } - } - } - } - - /// Returns true if the state of this timer indicates that the timer might - /// be registered with the driver. This check is performed with relaxed - /// ordering, but is conservative - if it returns false, the timer is - /// definitely _not_ registered. - pub(super) fn might_be_registered(&self) -> bool { - self.state.load(Ordering::Relaxed) != u64::MAX - } -} - -/// A timer entry. -/// -/// This is the handle to a timer that is controlled by the requester of the -/// timer. As this participates in intrusive data structures, it must be pinned -/// before polling. -#[derive(Debug)] -pub(crate) struct TimerEntry { - /// Arc reference to the runtime handle. We can only free the driver after - /// deregistering everything from their respective timer wheels. - driver: scheduler::Handle, - /// Shared inner structure; this is part of an intrusive linked list, and - /// therefore other references can exist to it while mutable references to - /// Entry exist. - /// - /// This is manipulated only under the inner mutex. TODO: Can we use loom - /// cells for this? - inner: StdUnsafeCell, - /// Deadline for the timer. This is used to register on the first - /// poll, as we can't register prior to being pinned. - deadline: Instant, - /// Whether the deadline has been registered. - registered: bool, - /// Ensure the type is !Unpin - _m: std::marker::PhantomPinned, -} - -unsafe impl Send for TimerEntry {} -unsafe impl Sync for TimerEntry {} - -/// An TimerHandle is the (non-enforced) "unique" pointer from the driver to the -/// timer entry. Generally, at most one TimerHandle exists for a timer at a time -/// (enforced by the timer state machine). -/// -/// SAFETY: An TimerHandle is essentially a raw pointer, and the usual caveats -/// of pointer safety apply. In particular, TimerHandle does not itself enforce -/// that the timer does still exist; however, normally an TimerHandle is created -/// immediately before registering the timer, and is consumed when firing the -/// timer, to help minimize mistakes. Still, because TimerHandle cannot enforce -/// memory safety, all operations are unsafe. -#[derive(Debug)] -pub(crate) struct TimerHandle { - inner: NonNull, -} - -pub(super) type EntryList = crate::util::linked_list::LinkedList; - -/// The shared state structure of a timer. This structure is shared between the -/// frontend (`Entry`) and driver backend. -/// -/// Note that this structure is located inside the `TimerEntry` structure. -pub(crate) struct TimerShared { - /// A link within the doubly-linked list of timers on a particular level and - /// slot. Valid only if state is equal to Registered. - /// - /// Only accessed under the entry lock. - pointers: linked_list::Pointers, - - /// The expiration time for which this entry is currently registered. - /// Generally owned by the driver, but is accessed by the entry when not - /// registered. - cached_when: AtomicU64, - - /// The true expiration time. Set by the timer future, read by the driver. - true_when: AtomicU64, - - /// Current state. This records whether the timer entry is currently under - /// the ownership of the driver, and if not, its current state (not - /// complete, fired, error, etc). - state: StateCell, - - _p: PhantomPinned, -} - -unsafe impl Send for TimerShared {} -unsafe impl Sync for TimerShared {} - -impl std::fmt::Debug for TimerShared { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("TimerShared") - .field("when", &self.true_when.load(Ordering::Relaxed)) - .field("cached_when", &self.cached_when.load(Ordering::Relaxed)) - .field("state", &self.state) - .finish() - } -} - -generate_addr_of_methods! { - impl<> TimerShared { - unsafe fn addr_of_pointers(self: NonNull) -> NonNull> { - &self.pointers - } - } -} - -impl TimerShared { - pub(super) fn new() -> Self { - Self { - cached_when: AtomicU64::new(0), - true_when: AtomicU64::new(0), - pointers: linked_list::Pointers::new(), - state: StateCell::default(), - _p: PhantomPinned, - } - } - - /// Gets the cached time-of-expiration value. - pub(super) fn cached_when(&self) -> u64 { - // Cached-when is only accessed under the driver lock, so we can use relaxed - self.cached_when.load(Ordering::Relaxed) - } - - /// Gets the true time-of-expiration value, and copies it into the cached - /// time-of-expiration value. - /// - /// SAFETY: Must be called with the driver lock held, and when this entry is - /// not in any timer wheel lists. - pub(super) unsafe fn sync_when(&self) -> u64 { - let true_when = self.true_when(); - - self.cached_when.store(true_when, Ordering::Relaxed); - - true_when - } - - /// Sets the cached time-of-expiration value. - /// - /// SAFETY: Must be called with the driver lock held, and when this entry is - /// not in any timer wheel lists. - unsafe fn set_cached_when(&self, when: u64) { - self.cached_when.store(when, Ordering::Relaxed); - } - - /// Returns the true time-of-expiration value, with relaxed memory ordering. - pub(super) fn true_when(&self) -> u64 { - self.state.when().expect("Timer already fired") - } - - /// Sets the true time-of-expiration value, even if it is less than the - /// current expiration or the timer is deregistered. - /// - /// SAFETY: Must only be called with the driver lock held and the entry not - /// in the timer wheel. - pub(super) unsafe fn set_expiration(&self, t: u64) { - self.state.set_expiration(t); - self.cached_when.store(t, Ordering::Relaxed); - } - - /// Sets the true time-of-expiration only if it is after the current. - pub(super) fn extend_expiration(&self, t: u64) -> Result<(), ()> { - self.state.extend_expiration(t) - } - - /// Returns a TimerHandle for this timer. - pub(super) fn handle(&self) -> TimerHandle { - TimerHandle { - inner: NonNull::from(self), - } - } - - /// Returns true if the state of this timer indicates that the timer might - /// be registered with the driver. This check is performed with relaxed - /// ordering, but is conservative - if it returns false, the timer is - /// definitely _not_ registered. - pub(super) fn might_be_registered(&self) -> bool { - self.state.might_be_registered() - } -} - -unsafe impl linked_list::Link for TimerShared { - type Handle = TimerHandle; - - type Target = TimerShared; - - fn as_raw(handle: &Self::Handle) -> NonNull { - handle.inner - } - - unsafe fn from_raw(ptr: NonNull) -> Self::Handle { - TimerHandle { inner: ptr } - } - - unsafe fn pointers( - target: NonNull, - ) -> NonNull> { - TimerShared::addr_of_pointers(target) - } -} - -// ===== impl Entry ===== - -impl TimerEntry { - #[track_caller] - pub(crate) fn new(handle: &scheduler::Handle, deadline: Instant) -> Self { - // Panic if the time driver is not enabled - let _ = handle.driver().time(); - - let driver = handle.clone(); - - Self { - driver, - inner: StdUnsafeCell::new(TimerShared::new()), - deadline, - registered: false, - _m: std::marker::PhantomPinned, - } - } - - fn inner(&self) -> &TimerShared { - unsafe { &*self.inner.get() } - } - - pub(crate) fn deadline(&self) -> Instant { - self.deadline - } - - pub(crate) fn is_elapsed(&self) -> bool { - !self.inner().state.might_be_registered() && self.registered - } - - /// Cancels and deregisters the timer. This operation is irreversible. - pub(crate) fn cancel(self: Pin<&mut Self>) { - // We need to perform an acq/rel fence with the driver thread, and the - // simplest way to do so is to grab the driver lock. - // - // Why is this necessary? We're about to release this timer's memory for - // some other non-timer use. However, we've been doing a bunch of - // relaxed (or even non-atomic) writes from the driver thread, and we'll - // be doing more from _this thread_ (as this memory is interpreted as - // something else). - // - // It is critical to ensure that, from the point of view of the driver, - // those future non-timer writes happen-after the timer is fully fired, - // and from the purpose of this thread, the driver's writes all - // happen-before we drop the timer. This in turn requires us to perform - // an acquire-release barrier in _both_ directions between the driver - // and dropping thread. - // - // The lock acquisition in clear_entry serves this purpose. All of the - // driver manipulations happen with the lock held, so we can just take - // the lock and be sure that this drop happens-after everything the - // driver did so far and happens-before everything the driver does in - // the future. While we have the lock held, we also go ahead and - // deregister the entry if necessary. - unsafe { self.driver().clear_entry(NonNull::from(self.inner())) }; - } - - pub(crate) fn reset(mut self: Pin<&mut Self>, new_time: Instant, reregister: bool) { - unsafe { self.as_mut().get_unchecked_mut() }.deadline = new_time; - unsafe { self.as_mut().get_unchecked_mut() }.registered = reregister; - - let tick = self.driver().time_source().deadline_to_tick(new_time); - - if self.inner().extend_expiration(tick).is_ok() { - return; - } - - if reregister { - unsafe { - self.driver() - .reregister(&self.driver.driver().io, tick, self.inner().into()); - } - } - } - - pub(crate) fn poll_elapsed( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - if self.driver().is_shutdown() { - panic!("{}", crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR); - } - - if !self.registered { - let deadline = self.deadline; - self.as_mut().reset(deadline, true); - } - - let this = unsafe { self.get_unchecked_mut() }; - - this.inner().state.poll(cx.waker()) - } - - pub(crate) fn driver(&self) -> &super::Handle { - self.driver.driver().time() - } - - #[cfg(all(tokio_unstable, feature = "tracing"))] - pub(crate) fn clock(&self) -> &super::Clock { - self.driver.driver().clock() - } -} - -impl TimerHandle { - pub(super) unsafe fn cached_when(&self) -> u64 { - unsafe { self.inner.as_ref().cached_when() } - } - - pub(super) unsafe fn sync_when(&self) -> u64 { - unsafe { self.inner.as_ref().sync_when() } - } - - pub(super) unsafe fn is_pending(&self) -> bool { - unsafe { self.inner.as_ref().state.is_pending() } - } - - /// Forcibly sets the true and cached expiration times to the given tick. - /// - /// SAFETY: The caller must ensure that the handle remains valid, the driver - /// lock is held, and that the timer is not in any wheel linked lists. - pub(super) unsafe fn set_expiration(&self, tick: u64) { - self.inner.as_ref().set_expiration(tick); - } - - /// Attempts to mark this entry as pending. If the expiration time is after - /// `not_after`, however, returns an Err with the current expiration time. - /// - /// If an `Err` is returned, the `cached_when` value will be updated to this - /// new expiration time. - /// - /// SAFETY: The caller must ensure that the handle remains valid, the driver - /// lock is held, and that the timer is not in any wheel linked lists. - /// After returning Ok, the entry must be added to the pending list. - pub(super) unsafe fn mark_pending(&self, not_after: u64) -> Result<(), u64> { - match self.inner.as_ref().state.mark_pending(not_after) { - Ok(()) => { - // mark this as being on the pending queue in cached_when - self.inner.as_ref().set_cached_when(u64::MAX); - Ok(()) - } - Err(tick) => { - self.inner.as_ref().set_cached_when(tick); - Err(tick) - } - } - } - - /// Attempts to transition to a terminal state. If the state is already a - /// terminal state, does nothing. - /// - /// Because the entry might be dropped after the state is moved to a - /// terminal state, this function consumes the handle to ensure we don't - /// access the entry afterwards. - /// - /// Returns the last-registered waker, if any. - /// - /// SAFETY: The driver lock must be held while invoking this function, and - /// the entry must not be in any wheel linked lists. - pub(super) unsafe fn fire(self, completed_state: TimerResult) -> Option { - self.inner.as_ref().state.fire(completed_state) - } -} - -impl Drop for TimerEntry { - fn drop(&mut self) { - unsafe { Pin::new_unchecked(self) }.as_mut().cancel() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/handle.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/handle.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/handle.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/handle.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,62 +0,0 @@ -use crate::runtime::time::TimeSource; -use std::fmt; - -/// Handle to time driver instance. -pub(crate) struct Handle { - pub(super) time_source: TimeSource, - pub(super) inner: super::Inner, -} - -impl Handle { - /// Returns the time source associated with this handle. - pub(crate) fn time_source(&self) -> &TimeSource { - &self.time_source - } - - /// Checks whether the driver has been shutdown. - pub(super) fn is_shutdown(&self) -> bool { - self.inner.is_shutdown() - } - - /// Track that the driver is being unparked - pub(crate) fn unpark(&self) { - #[cfg(feature = "test-util")] - self.inner - .did_wake - .store(true, std::sync::atomic::Ordering::SeqCst); - } -} - -cfg_not_rt! { - impl Handle { - /// Tries to get a handle to the current timer. - /// - /// # Panics - /// - /// This function panics if there is no current timer set. - /// - /// It can be triggered when [`Builder::enable_time`] or - /// [`Builder::enable_all`] are not included in the builder. - /// - /// It can also panic whenever a timer is created outside of a - /// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, - /// since the function is executed outside of the runtime. - /// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. - /// And this is because wrapping the function on an async makes it lazy, - /// and so gets executed inside the runtime successfully without - /// panicking. - /// - /// [`Builder::enable_time`]: crate::runtime::Builder::enable_time - /// [`Builder::enable_all`]: crate::runtime::Builder::enable_all - #[track_caller] - pub(crate) fn current() -> Self { - panic!("{}", crate::util::error::CONTEXT_MISSING_ERROR) - } - } -} - -impl fmt::Debug for Handle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Handle") - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,424 +0,0 @@ -// Currently, rust warns when an unsafe fn contains an unsafe {} block. However, -// in the future, this will change to the reverse. For now, suppress this -// warning and generally stick with being explicit about unsafety. -#![allow(unused_unsafe)] -#![cfg_attr(not(feature = "rt"), allow(dead_code))] - -//! Time driver. - -mod entry; -pub(crate) use entry::TimerEntry; -use entry::{EntryList, TimerHandle, TimerShared, MAX_SAFE_MILLIS_DURATION}; - -mod handle; -pub(crate) use self::handle::Handle; - -mod source; -pub(crate) use source::TimeSource; - -mod wheel; - -use crate::loom::sync::atomic::{AtomicBool, Ordering}; -use crate::loom::sync::Mutex; -use crate::runtime::driver::{self, IoHandle, IoStack}; -use crate::time::error::Error; -use crate::time::{Clock, Duration}; - -use std::fmt; -use std::{num::NonZeroU64, ptr::NonNull, task::Waker}; - -/// Time implementation that drives [`Sleep`][sleep], [`Interval`][interval], and [`Timeout`][timeout]. -/// -/// A `Driver` instance tracks the state necessary for managing time and -/// notifying the [`Sleep`][sleep] instances once their deadlines are reached. -/// -/// It is expected that a single instance manages many individual [`Sleep`][sleep] -/// instances. The `Driver` implementation is thread-safe and, as such, is able -/// to handle callers from across threads. -/// -/// After creating the `Driver` instance, the caller must repeatedly call `park` -/// or `park_timeout`. The time driver will perform no work unless `park` or -/// `park_timeout` is called repeatedly. -/// -/// The driver has a resolution of one millisecond. Any unit of time that falls -/// between milliseconds are rounded up to the next millisecond. -/// -/// When an instance is dropped, any outstanding [`Sleep`][sleep] instance that has not -/// elapsed will be notified with an error. At this point, calling `poll` on the -/// [`Sleep`][sleep] instance will result in panic. -/// -/// # Implementation -/// -/// The time driver is based on the [paper by Varghese and Lauck][paper]. -/// -/// A hashed timing wheel is a vector of slots, where each slot handles a time -/// slice. As time progresses, the timer walks over the slot for the current -/// instant, and processes each entry for that slot. When the timer reaches the -/// end of the wheel, it starts again at the beginning. -/// -/// The implementation maintains six wheels arranged in a set of levels. As the -/// levels go up, the slots of the associated wheel represent larger intervals -/// of time. At each level, the wheel has 64 slots. Each slot covers a range of -/// time equal to the wheel at the lower level. At level zero, each slot -/// represents one millisecond of time. -/// -/// The wheels are: -/// -/// * Level 0: 64 x 1 millisecond slots. -/// * Level 1: 64 x 64 millisecond slots. -/// * Level 2: 64 x ~4 second slots. -/// * Level 3: 64 x ~4 minute slots. -/// * Level 4: 64 x ~4 hour slots. -/// * Level 5: 64 x ~12 day slots. -/// -/// When the timer processes entries at level zero, it will notify all the -/// `Sleep` instances as their deadlines have been reached. For all higher -/// levels, all entries will be redistributed across the wheel at the next level -/// down. Eventually, as time progresses, entries with [`Sleep`][sleep] instances will -/// either be canceled (dropped) or their associated entries will reach level -/// zero and be notified. -/// -/// [paper]: http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf -/// [sleep]: crate::time::Sleep -/// [timeout]: crate::time::Timeout -/// [interval]: crate::time::Interval -#[derive(Debug)] -pub(crate) struct Driver { - /// Parker to delegate to. - park: IoStack, -} - -/// Timer state shared between `Driver`, `Handle`, and `Registration`. -struct Inner { - // The state is split like this so `Handle` can access `is_shutdown` without locking the mutex - pub(super) state: Mutex, - - /// True if the driver is being shutdown. - pub(super) is_shutdown: AtomicBool, - - // When `true`, a call to `park_timeout` should immediately return and time - // should not advance. One reason for this to be `true` is if the task - // passed to `Runtime::block_on` called `task::yield_now()`. - // - // While it may look racy, it only has any effect when the clock is paused - // and pausing the clock is restricted to a single-threaded runtime. - #[cfg(feature = "test-util")] - did_wake: AtomicBool, -} - -/// Time state shared which must be protected by a `Mutex` -struct InnerState { - /// The last published timer `elapsed` value. - elapsed: u64, - - /// The earliest time at which we promise to wake up without unparking. - next_wake: Option, - - /// Timer wheel. - wheel: wheel::Wheel, -} - -// ===== impl Driver ===== - -impl Driver { - /// Creates a new `Driver` instance that uses `park` to block the current - /// thread and `time_source` to get the current time and convert to ticks. - /// - /// Specifying the source of time is useful when testing. - pub(crate) fn new(park: IoStack, clock: &Clock) -> (Driver, Handle) { - let time_source = TimeSource::new(clock); - - let handle = Handle { - time_source, - inner: Inner { - state: Mutex::new(InnerState { - elapsed: 0, - next_wake: None, - wheel: wheel::Wheel::new(), - }), - is_shutdown: AtomicBool::new(false), - - #[cfg(feature = "test-util")] - did_wake: AtomicBool::new(false), - }, - }; - - let driver = Driver { park }; - - (driver, handle) - } - - pub(crate) fn park(&mut self, handle: &driver::Handle) { - self.park_internal(handle, None) - } - - pub(crate) fn park_timeout(&mut self, handle: &driver::Handle, duration: Duration) { - self.park_internal(handle, Some(duration)) - } - - pub(crate) fn shutdown(&mut self, rt_handle: &driver::Handle) { - let handle = rt_handle.time(); - - if handle.is_shutdown() { - return; - } - - handle.inner.is_shutdown.store(true, Ordering::SeqCst); - - // Advance time forward to the end of time. - - handle.process_at_time(u64::MAX); - - self.park.shutdown(rt_handle); - } - - fn park_internal(&mut self, rt_handle: &driver::Handle, limit: Option) { - let handle = rt_handle.time(); - let mut lock = handle.inner.state.lock(); - - assert!(!handle.is_shutdown()); - - let next_wake = lock.wheel.next_expiration_time(); - lock.next_wake = - next_wake.map(|t| NonZeroU64::new(t).unwrap_or_else(|| NonZeroU64::new(1).unwrap())); - - drop(lock); - - match next_wake { - Some(when) => { - let now = handle.time_source.now(rt_handle.clock()); - // Note that we effectively round up to 1ms here - this avoids - // very short-duration microsecond-resolution sleeps that the OS - // might treat as zero-length. - let mut duration = handle - .time_source - .tick_to_duration(when.saturating_sub(now)); - - if duration > Duration::from_millis(0) { - if let Some(limit) = limit { - duration = std::cmp::min(limit, duration); - } - - self.park_thread_timeout(rt_handle, duration); - } else { - self.park.park_timeout(rt_handle, Duration::from_secs(0)); - } - } - None => { - if let Some(duration) = limit { - self.park_thread_timeout(rt_handle, duration); - } else { - self.park.park(rt_handle); - } - } - } - - // Process pending timers after waking up - handle.process(rt_handle.clock()); - } - - cfg_test_util! { - fn park_thread_timeout(&mut self, rt_handle: &driver::Handle, duration: Duration) { - let handle = rt_handle.time(); - let clock = rt_handle.clock(); - - if clock.can_auto_advance() { - self.park.park_timeout(rt_handle, Duration::from_secs(0)); - - // If the time driver was woken, then the park completed - // before the "duration" elapsed (usually caused by a - // yield in `Runtime::block_on`). In this case, we don't - // advance the clock. - if !handle.did_wake() { - // Simulate advancing time - if let Err(msg) = clock.advance(duration) { - panic!("{}", msg); - } - } - } else { - self.park.park_timeout(rt_handle, duration); - } - } - } - - cfg_not_test_util! { - fn park_thread_timeout(&mut self, rt_handle: &driver::Handle, duration: Duration) { - self.park.park_timeout(rt_handle, duration); - } - } -} - -impl Handle { - /// Runs timer related logic, and returns the next wakeup time - pub(self) fn process(&self, clock: &Clock) { - let now = self.time_source().now(clock); - - self.process_at_time(now) - } - - pub(self) fn process_at_time(&self, mut now: u64) { - let mut waker_list: [Option; 32] = Default::default(); - let mut waker_idx = 0; - - let mut lock = self.inner.lock(); - - if now < lock.elapsed { - // Time went backwards! This normally shouldn't happen as the Rust language - // guarantees that an Instant is monotonic, but can happen when running - // Linux in a VM on a Windows host due to std incorrectly trusting the - // hardware clock to be monotonic. - // - // See for more information. - now = lock.elapsed; - } - - while let Some(entry) = lock.wheel.poll(now) { - debug_assert!(unsafe { entry.is_pending() }); - - // SAFETY: We hold the driver lock, and just removed the entry from any linked lists. - if let Some(waker) = unsafe { entry.fire(Ok(())) } { - waker_list[waker_idx] = Some(waker); - - waker_idx += 1; - - if waker_idx == waker_list.len() { - // Wake a batch of wakers. To avoid deadlock, we must do this with the lock temporarily dropped. - drop(lock); - - for waker in waker_list.iter_mut() { - waker.take().unwrap().wake(); - } - - waker_idx = 0; - - lock = self.inner.lock(); - } - } - } - - // Update the elapsed cache - lock.elapsed = lock.wheel.elapsed(); - lock.next_wake = lock - .wheel - .poll_at() - .map(|t| NonZeroU64::new(t).unwrap_or_else(|| NonZeroU64::new(1).unwrap())); - - drop(lock); - - for waker in waker_list[0..waker_idx].iter_mut() { - waker.take().unwrap().wake(); - } - } - - /// Removes a registered timer from the driver. - /// - /// The timer will be moved to the cancelled state. Wakers will _not_ be - /// invoked. If the timer is already completed, this function is a no-op. - /// - /// This function always acquires the driver lock, even if the entry does - /// not appear to be registered. - /// - /// SAFETY: The timer must not be registered with some other driver, and - /// `add_entry` must not be called concurrently. - pub(self) unsafe fn clear_entry(&self, entry: NonNull) { - unsafe { - let mut lock = self.inner.lock(); - - if entry.as_ref().might_be_registered() { - lock.wheel.remove(entry); - } - - entry.as_ref().handle().fire(Ok(())); - } - } - - /// Removes and re-adds an entry to the driver. - /// - /// SAFETY: The timer must be either unregistered, or registered with this - /// driver. No other threads are allowed to concurrently manipulate the - /// timer at all (the current thread should hold an exclusive reference to - /// the `TimerEntry`) - pub(self) unsafe fn reregister( - &self, - unpark: &IoHandle, - new_tick: u64, - entry: NonNull, - ) { - let waker = unsafe { - let mut lock = self.inner.lock(); - - // We may have raced with a firing/deregistration, so check before - // deregistering. - if unsafe { entry.as_ref().might_be_registered() } { - lock.wheel.remove(entry); - } - - // Now that we have exclusive control of this entry, mint a handle to reinsert it. - let entry = entry.as_ref().handle(); - - if self.is_shutdown() { - unsafe { entry.fire(Err(crate::time::error::Error::shutdown())) } - } else { - entry.set_expiration(new_tick); - - // Note: We don't have to worry about racing with some other resetting - // thread, because add_entry and reregister require exclusive control of - // the timer entry. - match unsafe { lock.wheel.insert(entry) } { - Ok(when) => { - if lock - .next_wake - .map(|next_wake| when < next_wake.get()) - .unwrap_or(true) - { - unpark.unpark(); - } - - None - } - Err((entry, crate::time::error::InsertError::Elapsed)) => unsafe { - entry.fire(Ok(())) - }, - } - } - - // Must release lock before invoking waker to avoid the risk of deadlock. - }; - - // The timer was fired synchronously as a result of the reregistration. - // Wake the waker; this is needed because we might reset _after_ a poll, - // and otherwise the task won't be awoken to poll again. - if let Some(waker) = waker { - waker.wake(); - } - } - - cfg_test_util! { - fn did_wake(&self) -> bool { - self.inner.did_wake.swap(false, Ordering::SeqCst) - } - } -} - -// ===== impl Inner ===== - -impl Inner { - /// Locks the driver's inner structure - pub(super) fn lock(&self) -> crate::loom::sync::MutexGuard<'_, InnerState> { - self.state.lock() - } - - // Check whether the driver has been shutdown - pub(super) fn is_shutdown(&self) -> bool { - self.is_shutdown.load(Ordering::SeqCst) - } -} - -impl fmt::Debug for Inner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Inner").finish() - } -} - -#[cfg(test)] -mod tests; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/source.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/source.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/source.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/source.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,39 +0,0 @@ -use super::MAX_SAFE_MILLIS_DURATION; -use crate::time::{Clock, Duration, Instant}; - -/// A structure which handles conversion from Instants to u64 timestamps. -#[derive(Debug)] -pub(crate) struct TimeSource { - start_time: Instant, -} - -impl TimeSource { - pub(crate) fn new(clock: &Clock) -> Self { - Self { - start_time: clock.now(), - } - } - - pub(crate) fn deadline_to_tick(&self, t: Instant) -> u64 { - // Round up to the end of a ms - self.instant_to_tick(t + Duration::from_nanos(999_999)) - } - - pub(crate) fn instant_to_tick(&self, t: Instant) -> u64 { - // round up - let dur: Duration = t - .checked_duration_since(self.start_time) - .unwrap_or_else(|| Duration::from_secs(0)); - let ms = dur.as_millis(); - - ms.try_into().unwrap_or(MAX_SAFE_MILLIS_DURATION) - } - - pub(crate) fn tick_to_duration(&self, t: u64) -> Duration { - Duration::from_millis(t) - } - - pub(crate) fn now(&self, clock: &Clock) -> u64 { - self.instant_to_tick(clock.now()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/tests/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/tests/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/tests/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/tests/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,267 +0,0 @@ -#![cfg(not(target_os = "wasi"))] - -use std::{task::Context, time::Duration}; - -#[cfg(not(loom))] -use futures::task::noop_waker_ref; - -use crate::loom::sync::atomic::{AtomicBool, Ordering}; -use crate::loom::sync::Arc; -use crate::loom::thread; - -use super::TimerEntry; - -fn block_on(f: impl std::future::Future) -> T { - #[cfg(loom)] - return loom::future::block_on(f); - - #[cfg(not(loom))] - { - let rt = crate::runtime::Builder::new_current_thread() - .build() - .unwrap(); - rt.block_on(f) - } -} - -fn model(f: impl Fn() + Send + Sync + 'static) { - #[cfg(loom)] - loom::model(f); - - #[cfg(not(loom))] - f(); -} - -fn rt(start_paused: bool) -> crate::runtime::Runtime { - crate::runtime::Builder::new_current_thread() - .enable_time() - .start_paused(start_paused) - .build() - .unwrap() -} - -#[test] -fn single_timer() { - model(|| { - let rt = rt(false); - let handle = rt.handle(); - - let handle_ = handle.clone(); - let jh = thread::spawn(move || { - let entry = TimerEntry::new( - &handle_.inner, - handle_.inner.driver().clock().now() + Duration::from_secs(1), - ); - pin!(entry); - - block_on(futures::future::poll_fn(|cx| { - entry.as_mut().poll_elapsed(cx) - })) - .unwrap(); - }); - - thread::yield_now(); - - let time = handle.inner.driver().time(); - let clock = handle.inner.driver().clock(); - - // This may or may not return Some (depending on how it races with the - // thread). If it does return None, however, the timer should complete - // synchronously. - time.process_at_time(time.time_source().now(clock) + 2_000_000_000); - - jh.join().unwrap(); - }) -} - -#[test] -fn drop_timer() { - model(|| { - let rt = rt(false); - let handle = rt.handle(); - - let handle_ = handle.clone(); - let jh = thread::spawn(move || { - let entry = TimerEntry::new( - &handle_.inner, - handle_.inner.driver().clock().now() + Duration::from_secs(1), - ); - pin!(entry); - - let _ = entry - .as_mut() - .poll_elapsed(&mut Context::from_waker(futures::task::noop_waker_ref())); - let _ = entry - .as_mut() - .poll_elapsed(&mut Context::from_waker(futures::task::noop_waker_ref())); - }); - - thread::yield_now(); - - let time = handle.inner.driver().time(); - let clock = handle.inner.driver().clock(); - - // advance 2s in the future. - time.process_at_time(time.time_source().now(clock) + 2_000_000_000); - - jh.join().unwrap(); - }) -} - -#[test] -fn change_waker() { - model(|| { - let rt = rt(false); - let handle = rt.handle(); - - let handle_ = handle.clone(); - let jh = thread::spawn(move || { - let entry = TimerEntry::new( - &handle_.inner, - handle_.inner.driver().clock().now() + Duration::from_secs(1), - ); - pin!(entry); - - let _ = entry - .as_mut() - .poll_elapsed(&mut Context::from_waker(futures::task::noop_waker_ref())); - - block_on(futures::future::poll_fn(|cx| { - entry.as_mut().poll_elapsed(cx) - })) - .unwrap(); - }); - - thread::yield_now(); - - let time = handle.inner.driver().time(); - let clock = handle.inner.driver().clock(); - - // advance 2s - time.process_at_time(time.time_source().now(clock) + 2_000_000_000); - - jh.join().unwrap(); - }) -} - -#[test] -fn reset_future() { - model(|| { - let finished_early = Arc::new(AtomicBool::new(false)); - - let rt = rt(false); - let handle = rt.handle(); - - let handle_ = handle.clone(); - let finished_early_ = finished_early.clone(); - let start = handle.inner.driver().clock().now(); - - let jh = thread::spawn(move || { - let entry = TimerEntry::new(&handle_.inner, start + Duration::from_secs(1)); - pin!(entry); - - let _ = entry - .as_mut() - .poll_elapsed(&mut Context::from_waker(futures::task::noop_waker_ref())); - - entry.as_mut().reset(start + Duration::from_secs(2), true); - - // shouldn't complete before 2s - block_on(futures::future::poll_fn(|cx| { - entry.as_mut().poll_elapsed(cx) - })) - .unwrap(); - - finished_early_.store(true, Ordering::Relaxed); - }); - - thread::yield_now(); - - let handle = handle.inner.driver().time(); - - // This may or may not return a wakeup time. - handle.process_at_time( - handle - .time_source() - .instant_to_tick(start + Duration::from_millis(1500)), - ); - - assert!(!finished_early.load(Ordering::Relaxed)); - - handle.process_at_time( - handle - .time_source() - .instant_to_tick(start + Duration::from_millis(2500)), - ); - - jh.join().unwrap(); - - assert!(finished_early.load(Ordering::Relaxed)); - }) -} - -#[cfg(not(loom))] -fn normal_or_miri(normal: T, miri: T) -> T { - if cfg!(miri) { - miri - } else { - normal - } -} - -#[test] -#[cfg(not(loom))] -fn poll_process_levels() { - let rt = rt(true); - let handle = rt.handle(); - - let mut entries = vec![]; - - for i in 0..normal_or_miri(1024, 64) { - let mut entry = Box::pin(TimerEntry::new( - &handle.inner, - handle.inner.driver().clock().now() + Duration::from_millis(i), - )); - - let _ = entry - .as_mut() - .poll_elapsed(&mut Context::from_waker(noop_waker_ref())); - - entries.push(entry); - } - - for t in 1..normal_or_miri(1024, 64) { - handle.inner.driver().time().process_at_time(t as u64); - - for (deadline, future) in entries.iter_mut().enumerate() { - let mut context = Context::from_waker(noop_waker_ref()); - if deadline <= t { - assert!(future.as_mut().poll_elapsed(&mut context).is_ready()); - } else { - assert!(future.as_mut().poll_elapsed(&mut context).is_pending()); - } - } - } -} - -#[test] -#[cfg(not(loom))] -fn poll_process_levels_targeted() { - let mut context = Context::from_waker(noop_waker_ref()); - - let rt = rt(true); - let handle = rt.handle(); - - let e1 = TimerEntry::new( - &handle.inner, - handle.inner.driver().clock().now() + Duration::from_millis(193), - ); - pin!(e1); - - let handle = handle.inner.driver().time(); - - handle.process_at_time(62); - assert!(e1.as_mut().poll_elapsed(&mut context).is_pending()); - handle.process_at_time(192); - handle.process_at_time(192); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/wheel/level.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/wheel/level.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/wheel/level.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/wheel/level.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,274 +0,0 @@ -use crate::runtime::time::{EntryList, TimerHandle, TimerShared}; - -use std::{fmt, ptr::NonNull}; - -/// Wheel for a single level in the timer. This wheel contains 64 slots. -pub(crate) struct Level { - level: usize, - - /// Bit field tracking which slots currently contain entries. - /// - /// Using a bit field to track slots that contain entries allows avoiding a - /// scan to find entries. This field is updated when entries are added or - /// removed from a slot. - /// - /// The least-significant bit represents slot zero. - occupied: u64, - - /// Slots. We access these via the EntryInner `current_list` as well, so this needs to be an UnsafeCell. - slot: [EntryList; LEVEL_MULT], -} - -/// Indicates when a slot must be processed next. -#[derive(Debug)] -pub(crate) struct Expiration { - /// The level containing the slot. - pub(crate) level: usize, - - /// The slot index. - pub(crate) slot: usize, - - /// The instant at which the slot needs to be processed. - pub(crate) deadline: u64, -} - -/// Level multiplier. -/// -/// Being a power of 2 is very important. -const LEVEL_MULT: usize = 64; - -impl Level { - pub(crate) fn new(level: usize) -> Level { - // A value has to be Copy in order to use syntax like: - // let stack = Stack::default(); - // ... - // slots: [stack; 64], - // - // Alternatively, since Stack is Default one can - // use syntax like: - // let slots: [Stack; 64] = Default::default(); - // - // However, that is only supported for arrays of size - // 32 or fewer. So in our case we have to explicitly - // invoke the constructor for each array element. - let ctor = EntryList::default; - - Level { - level, - occupied: 0, - slot: [ - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ], - } - } - - /// Finds the slot that needs to be processed next and returns the slot and - /// `Instant` at which this slot must be processed. - pub(crate) fn next_expiration(&self, now: u64) -> Option { - // Use the `occupied` bit field to get the index of the next slot that - // needs to be processed. - let slot = match self.next_occupied_slot(now) { - Some(slot) => slot, - None => return None, - }; - - // From the slot index, calculate the `Instant` at which it needs to be - // processed. This value *must* be in the future with respect to `now`. - - let level_range = level_range(self.level); - let slot_range = slot_range(self.level); - - // Compute the start date of the current level by masking the low bits - // of `now` (`level_range` is a power of 2). - let level_start = now & !(level_range - 1); - let mut deadline = level_start + slot as u64 * slot_range; - - if deadline <= now { - // A timer is in a slot "prior" to the current time. This can occur - // because we do not have an infinite hierarchy of timer levels, and - // eventually a timer scheduled for a very distant time might end up - // being placed in a slot that is beyond the end of all of the - // arrays. - // - // To deal with this, we first limit timers to being scheduled no - // more than MAX_DURATION ticks in the future; that is, they're at - // most one rotation of the top level away. Then, we force timers - // that logically would go into the top+1 level, to instead go into - // the top level's slots. - // - // What this means is that the top level's slots act as a - // pseudo-ring buffer, and we rotate around them indefinitely. If we - // compute a deadline before now, and it's the top level, it - // therefore means we're actually looking at a slot in the future. - debug_assert_eq!(self.level, super::NUM_LEVELS - 1); - - deadline += level_range; - } - - debug_assert!( - deadline >= now, - "deadline={:016X}; now={:016X}; level={}; lr={:016X}, sr={:016X}, slot={}; occupied={:b}", - deadline, - now, - self.level, - level_range, - slot_range, - slot, - self.occupied - ); - - Some(Expiration { - level: self.level, - slot, - deadline, - }) - } - - fn next_occupied_slot(&self, now: u64) -> Option { - if self.occupied == 0 { - return None; - } - - // Get the slot for now using Maths - let now_slot = (now / slot_range(self.level)) as usize; - let occupied = self.occupied.rotate_right(now_slot as u32); - let zeros = occupied.trailing_zeros() as usize; - let slot = (zeros + now_slot) % 64; - - Some(slot) - } - - pub(crate) unsafe fn add_entry(&mut self, item: TimerHandle) { - let slot = slot_for(item.cached_when(), self.level); - - self.slot[slot].push_front(item); - - self.occupied |= occupied_bit(slot); - } - - pub(crate) unsafe fn remove_entry(&mut self, item: NonNull) { - let slot = slot_for(unsafe { item.as_ref().cached_when() }, self.level); - - unsafe { self.slot[slot].remove(item) }; - if self.slot[slot].is_empty() { - // The bit is currently set - debug_assert!(self.occupied & occupied_bit(slot) != 0); - - // Unset the bit - self.occupied ^= occupied_bit(slot); - } - } - - pub(crate) fn take_slot(&mut self, slot: usize) -> EntryList { - self.occupied &= !occupied_bit(slot); - - std::mem::take(&mut self.slot[slot]) - } -} - -impl fmt::Debug for Level { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Level") - .field("occupied", &self.occupied) - .finish() - } -} - -fn occupied_bit(slot: usize) -> u64 { - 1 << slot -} - -fn slot_range(level: usize) -> u64 { - LEVEL_MULT.pow(level as u32) as u64 -} - -fn level_range(level: usize) -> u64 { - LEVEL_MULT as u64 * slot_range(level) -} - -/// Converts a duration (milliseconds) and a level to a slot position. -fn slot_for(duration: u64, level: usize) -> usize { - ((duration >> (level * 6)) % LEVEL_MULT as u64) as usize -} - -#[cfg(all(test, not(loom)))] -mod test { - use super::*; - - #[test] - fn test_slot_for() { - for pos in 0..64 { - assert_eq!(pos as usize, slot_for(pos, 0)); - } - - for level in 1..5 { - for pos in level..64 { - let a = pos * 64_usize.pow(level as u32); - assert_eq!(pos as usize, slot_for(a as u64, level)); - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/wheel/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/wheel/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/runtime/time/wheel/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/runtime/time/wheel/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,349 +0,0 @@ -use crate::runtime::time::{TimerHandle, TimerShared}; -use crate::time::error::InsertError; - -mod level; -pub(crate) use self::level::Expiration; -use self::level::Level; - -use std::ptr::NonNull; - -use super::EntryList; - -/// Timing wheel implementation. -/// -/// This type provides the hashed timing wheel implementation that backs `Timer` -/// and `DelayQueue`. -/// -/// The structure is generic over `T: Stack`. This allows handling timeout data -/// being stored on the heap or in a slab. In order to support the latter case, -/// the slab must be passed into each function allowing the implementation to -/// lookup timer entries. -/// -/// See `Timer` documentation for some implementation notes. -#[derive(Debug)] -pub(crate) struct Wheel { - /// The number of milliseconds elapsed since the wheel started. - elapsed: u64, - - /// Timer wheel. - /// - /// Levels: - /// - /// * 1 ms slots / 64 ms range - /// * 64 ms slots / ~ 4 sec range - /// * ~ 4 sec slots / ~ 4 min range - /// * ~ 4 min slots / ~ 4 hr range - /// * ~ 4 hr slots / ~ 12 day range - /// * ~ 12 day slots / ~ 2 yr range - levels: Vec, - - /// Entries queued for firing - pending: EntryList, -} - -/// Number of levels. Each level has 64 slots. By using 6 levels with 64 slots -/// each, the timer is able to track time up to 2 years into the future with a -/// precision of 1 millisecond. -const NUM_LEVELS: usize = 6; - -/// The maximum duration of a `Sleep`. -pub(super) const MAX_DURATION: u64 = (1 << (6 * NUM_LEVELS)) - 1; - -impl Wheel { - /// Creates a new timing wheel. - pub(crate) fn new() -> Wheel { - let levels = (0..NUM_LEVELS).map(Level::new).collect(); - - Wheel { - elapsed: 0, - levels, - pending: EntryList::new(), - } - } - - /// Returns the number of milliseconds that have elapsed since the timing - /// wheel's creation. - pub(crate) fn elapsed(&self) -> u64 { - self.elapsed - } - - /// Inserts an entry into the timing wheel. - /// - /// # Arguments - /// - /// * `item`: The item to insert into the wheel. - /// - /// # Return - /// - /// Returns `Ok` when the item is successfully inserted, `Err` otherwise. - /// - /// `Err(Elapsed)` indicates that `when` represents an instant that has - /// already passed. In this case, the caller should fire the timeout - /// immediately. - /// - /// `Err(Invalid)` indicates an invalid `when` argument as been supplied. - /// - /// # Safety - /// - /// This function registers item into an intrusive linked list. The caller - /// must ensure that `item` is pinned and will not be dropped without first - /// being deregistered. - pub(crate) unsafe fn insert( - &mut self, - item: TimerHandle, - ) -> Result { - let when = item.sync_when(); - - if when <= self.elapsed { - return Err((item, InsertError::Elapsed)); - } - - // Get the level at which the entry should be stored - let level = self.level_for(when); - - unsafe { - self.levels[level].add_entry(item); - } - - debug_assert!({ - self.levels[level] - .next_expiration(self.elapsed) - .map(|e| e.deadline >= self.elapsed) - .unwrap_or(true) - }); - - Ok(when) - } - - /// Removes `item` from the timing wheel. - pub(crate) unsafe fn remove(&mut self, item: NonNull) { - unsafe { - let when = item.as_ref().cached_when(); - if when == u64::MAX { - self.pending.remove(item); - } else { - debug_assert!( - self.elapsed <= when, - "elapsed={}; when={}", - self.elapsed, - when - ); - - let level = self.level_for(when); - - self.levels[level].remove_entry(item); - } - } - } - - /// Instant at which to poll. - pub(crate) fn poll_at(&self) -> Option { - self.next_expiration().map(|expiration| expiration.deadline) - } - - /// Advances the timer up to the instant represented by `now`. - pub(crate) fn poll(&mut self, now: u64) -> Option { - loop { - if let Some(handle) = self.pending.pop_back() { - return Some(handle); - } - - match self.next_expiration() { - Some(ref expiration) if expiration.deadline <= now => { - self.process_expiration(expiration); - - self.set_elapsed(expiration.deadline); - } - _ => { - // in this case the poll did not indicate an expiration - // _and_ we were not able to find a next expiration in - // the current list of timers. advance to the poll's - // current time and do nothing else. - self.set_elapsed(now); - break; - } - } - } - - self.pending.pop_back() - } - - /// Returns the instant at which the next timeout expires. - fn next_expiration(&self) -> Option { - if !self.pending.is_empty() { - // Expire immediately as we have things pending firing - return Some(Expiration { - level: 0, - slot: 0, - deadline: self.elapsed, - }); - } - - // Check all levels - for level in 0..NUM_LEVELS { - if let Some(expiration) = self.levels[level].next_expiration(self.elapsed) { - // There cannot be any expirations at a higher level that happen - // before this one. - debug_assert!(self.no_expirations_before(level + 1, expiration.deadline)); - - return Some(expiration); - } - } - - None - } - - /// Returns the tick at which this timer wheel next needs to perform some - /// processing, or None if there are no timers registered. - pub(super) fn next_expiration_time(&self) -> Option { - self.next_expiration().map(|ex| ex.deadline) - } - - /// Used for debug assertions - fn no_expirations_before(&self, start_level: usize, before: u64) -> bool { - let mut res = true; - - for l2 in start_level..NUM_LEVELS { - if let Some(e2) = self.levels[l2].next_expiration(self.elapsed) { - if e2.deadline < before { - res = false; - } - } - } - - res - } - - /// iteratively find entries that are between the wheel's current - /// time and the expiration time. for each in that population either - /// queue it for notification (in the case of the last level) or tier - /// it down to the next level (in all other cases). - pub(crate) fn process_expiration(&mut self, expiration: &Expiration) { - // Note that we need to take _all_ of the entries off the list before - // processing any of them. This is important because it's possible that - // those entries might need to be reinserted into the same slot. - // - // This happens only on the highest level, when an entry is inserted - // more than MAX_DURATION into the future. When this happens, we wrap - // around, and process some entries a multiple of MAX_DURATION before - // they actually need to be dropped down a level. We then reinsert them - // back into the same position; we must make sure we don't then process - // those entries again or we'll end up in an infinite loop. - let mut entries = self.take_entries(expiration); - - while let Some(item) = entries.pop_back() { - if expiration.level == 0 { - debug_assert_eq!(unsafe { item.cached_when() }, expiration.deadline); - } - - // Try to expire the entry; this is cheap (doesn't synchronize) if - // the timer is not expired, and updates cached_when. - match unsafe { item.mark_pending(expiration.deadline) } { - Ok(()) => { - // Item was expired - self.pending.push_front(item); - } - Err(expiration_tick) => { - let level = level_for(expiration.deadline, expiration_tick); - unsafe { - self.levels[level].add_entry(item); - } - } - } - } - } - - fn set_elapsed(&mut self, when: u64) { - assert!( - self.elapsed <= when, - "elapsed={:?}; when={:?}", - self.elapsed, - when - ); - - if when > self.elapsed { - self.elapsed = when; - } - } - - /// Obtains the list of entries that need processing for the given expiration. - /// - fn take_entries(&mut self, expiration: &Expiration) -> EntryList { - self.levels[expiration.level].take_slot(expiration.slot) - } - - fn level_for(&self, when: u64) -> usize { - level_for(self.elapsed, when) - } -} - -fn level_for(elapsed: u64, when: u64) -> usize { - const SLOT_MASK: u64 = (1 << 6) - 1; - - // Mask in the trailing bits ignored by the level calculation in order to cap - // the possible leading zeros - let mut masked = elapsed ^ when | SLOT_MASK; - - if masked >= MAX_DURATION { - // Fudge the timer into the top level - masked = MAX_DURATION - 1; - } - - let leading_zeros = masked.leading_zeros() as usize; - let significant = 63 - leading_zeros; - - significant / 6 -} - -#[cfg(all(test, not(loom)))] -mod test { - use super::*; - - #[test] - fn test_level_for() { - for pos in 0..64 { - assert_eq!( - 0, - level_for(0, pos), - "level_for({}) -- binary = {:b}", - pos, - pos - ); - } - - for level in 1..5 { - for pos in level..64 { - let a = pos * 64_usize.pow(level as u32); - assert_eq!( - level, - level_for(0, a as u64), - "level_for({}) -- binary = {:b}", - a, - a - ); - - if pos > level { - let a = a - 1; - assert_eq!( - level, - level_for(0, a as u64), - "level_for({}) -- binary = {:b}", - a, - a - ); - } - - if pos < 64 { - let a = a + 1; - assert_eq!( - level, - level_for(0, a as u64), - "level_for({}) -- binary = {:b}", - a, - a - ); - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/signal/ctrl_c.rs s390-tools-2.33.1/rust-vendor/tokio/src/signal/ctrl_c.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/signal/ctrl_c.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/signal/ctrl_c.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,62 +0,0 @@ -#[cfg(unix)] -use super::unix::{self as os_impl}; -#[cfg(windows)] -use super::windows::{self as os_impl}; - -use std::io; - -/// Completes when a "ctrl-c" notification is sent to the process. -/// -/// While signals are handled very differently between Unix and Windows, both -/// platforms support receiving a signal on "ctrl-c". This function provides a -/// portable API for receiving this notification. -/// -/// Once the returned future is polled, a listener is registered. The future -/// will complete on the first received `ctrl-c` **after** the initial call to -/// either `Future::poll` or `.await`. -/// -/// # Caveats -/// -/// On Unix platforms, the first time that a `Signal` instance is registered for a -/// particular signal kind, an OS signal-handler is installed which replaces the -/// default platform behavior when that signal is received, **for the duration of -/// the entire process**. -/// -/// For example, Unix systems will terminate a process by default when it -/// receives a signal generated by "CTRL+C" on the terminal. But, when a -/// `ctrl_c` stream is created to listen for this signal, the time it arrives, -/// it will be translated to a stream event, and the process will continue to -/// execute. **Even if this `Signal` instance is dropped, subsequent SIGINT -/// deliveries will end up captured by Tokio, and the default platform behavior -/// will NOT be reset**. -/// -/// Thus, applications should take care to ensure the expected signal behavior -/// occurs as expected after listening for specific signals. -/// -/// # Examples -/// -/// ```rust,no_run -/// use tokio::signal; -/// -/// #[tokio::main] -/// async fn main() { -/// println!("waiting for ctrl-c"); -/// -/// signal::ctrl_c().await.expect("failed to listen for event"); -/// -/// println!("received ctrl-c event"); -/// } -/// ``` -/// -/// Listen in the background: -/// -/// ```rust,no_run -/// tokio::spawn(async move { -/// tokio::signal::ctrl_c().await.unwrap(); -/// // Your handler here -/// }); -/// ``` -pub async fn ctrl_c() -> io::Result<()> { - os_impl::ctrl_c()?.recv().await; - Ok(()) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/signal/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/signal/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/signal/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/signal/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,100 +0,0 @@ -//! Asynchronous signal handling for Tokio. -//! -//! Note that signal handling is in general a very tricky topic and should be -//! used with great care. This crate attempts to implement 'best practice' for -//! signal handling, but it should be evaluated for your own applications' needs -//! to see if it's suitable. -//! -//! There are some fundamental limitations of this crate documented on the OS -//! specific structures, as well. -//! -//! # Examples -//! -//! Print on "ctrl-c" notification. -//! -//! ```rust,no_run -//! use tokio::signal; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! signal::ctrl_c().await?; -//! println!("ctrl-c received!"); -//! Ok(()) -//! } -//! ``` -//! -//! Wait for SIGHUP on Unix -//! -//! ```rust,no_run -//! # #[cfg(unix)] { -//! use tokio::signal::unix::{signal, SignalKind}; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! // An infinite stream of hangup signals. -//! let mut stream = signal(SignalKind::hangup())?; -//! -//! // Print whenever a HUP signal is received -//! loop { -//! stream.recv().await; -//! println!("got signal HUP"); -//! } -//! } -//! # } -//! ``` -use crate::sync::watch::Receiver; -use std::task::{Context, Poll}; - -mod ctrl_c; -pub use ctrl_c::ctrl_c; - -pub(crate) mod registry; - -mod os { - #[cfg(unix)] - pub(crate) use super::unix::{OsExtraData, OsStorage}; - - #[cfg(windows)] - pub(crate) use super::windows::{OsExtraData, OsStorage}; -} - -pub mod unix; -pub mod windows; - -mod reusable_box; -use self::reusable_box::ReusableBoxFuture; - -#[derive(Debug)] -struct RxFuture { - inner: ReusableBoxFuture>, -} - -async fn make_future(mut rx: Receiver<()>) -> Receiver<()> { - match rx.changed().await { - Ok(()) => rx, - Err(_) => panic!("signal sender went away"), - } -} - -impl RxFuture { - fn new(rx: Receiver<()>) -> Self { - Self { - inner: ReusableBoxFuture::new(make_future(rx)), - } - } - - async fn recv(&mut self) -> Option<()> { - use crate::future::poll_fn; - poll_fn(|cx| self.poll_recv(cx)).await - } - - fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - match self.inner.poll(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(rx) => { - self.inner.set(make_future(rx)); - Poll::Ready(Some(())) - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/signal/registry.rs s390-tools-2.33.1/rust-vendor/tokio/src/signal/registry.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/signal/registry.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/signal/registry.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,283 +0,0 @@ -#![allow(clippy::unit_arg)] - -use crate::signal::os::{OsExtraData, OsStorage}; -use crate::sync::watch; -use crate::util::once_cell::OnceCell; - -use std::ops; -use std::sync::atomic::{AtomicBool, Ordering}; - -pub(crate) type EventId = usize; - -/// State for a specific event, whether a notification is pending delivery, -/// and what listeners are registered. -#[derive(Debug)] -pub(crate) struct EventInfo { - pending: AtomicBool, - tx: watch::Sender<()>, -} - -impl Default for EventInfo { - fn default() -> Self { - let (tx, _rx) = watch::channel(()); - - Self { - pending: AtomicBool::new(false), - tx, - } - } -} - -/// An interface for retrieving the `EventInfo` for a particular eventId. -pub(crate) trait Storage { - /// Gets the `EventInfo` for `id` if it exists. - fn event_info(&self, id: EventId) -> Option<&EventInfo>; - - /// Invokes `f` once for each defined `EventInfo` in this storage. - fn for_each<'a, F>(&'a self, f: F) - where - F: FnMut(&'a EventInfo); -} - -impl Storage for Vec { - fn event_info(&self, id: EventId) -> Option<&EventInfo> { - self.get(id) - } - - fn for_each<'a, F>(&'a self, f: F) - where - F: FnMut(&'a EventInfo), - { - self.iter().for_each(f) - } -} - -/// An interface for initializing a type. Useful for situations where we cannot -/// inject a configured instance in the constructor of another type. -pub(crate) trait Init { - fn init() -> Self; -} - -/// Manages and distributes event notifications to any registered listeners. -/// -/// Generic over the underlying storage to allow for domain specific -/// optimizations (e.g. eventIds may or may not be contiguous). -#[derive(Debug)] -pub(crate) struct Registry { - storage: S, -} - -impl Registry { - fn new(storage: S) -> Self { - Self { storage } - } -} - -impl Registry { - /// Registers a new listener for `event_id`. - fn register_listener(&self, event_id: EventId) -> watch::Receiver<()> { - self.storage - .event_info(event_id) - .unwrap_or_else(|| panic!("invalid event_id: {}", event_id)) - .tx - .subscribe() - } - - /// Marks `event_id` as having been delivered, without broadcasting it to - /// any listeners. - fn record_event(&self, event_id: EventId) { - if let Some(event_info) = self.storage.event_info(event_id) { - event_info.pending.store(true, Ordering::SeqCst) - } - } - - /// Broadcasts all previously recorded events to their respective listeners. - /// - /// Returns `true` if an event was delivered to at least one listener. - fn broadcast(&self) -> bool { - let mut did_notify = false; - self.storage.for_each(|event_info| { - // Any signal of this kind arrived since we checked last? - if !event_info.pending.swap(false, Ordering::SeqCst) { - return; - } - - // Ignore errors if there are no listeners - if event_info.tx.send(()).is_ok() { - did_notify = true; - } - }); - - did_notify - } -} - -pub(crate) struct Globals { - extra: OsExtraData, - registry: Registry, -} - -impl ops::Deref for Globals { - type Target = OsExtraData; - - fn deref(&self) -> &Self::Target { - &self.extra - } -} - -impl Globals { - /// Registers a new listener for `event_id`. - pub(crate) fn register_listener(&self, event_id: EventId) -> watch::Receiver<()> { - self.registry.register_listener(event_id) - } - - /// Marks `event_id` as having been delivered, without broadcasting it to - /// any listeners. - pub(crate) fn record_event(&self, event_id: EventId) { - self.registry.record_event(event_id); - } - - /// Broadcasts all previously recorded events to their respective listeners. - /// - /// Returns `true` if an event was delivered to at least one listener. - pub(crate) fn broadcast(&self) -> bool { - self.registry.broadcast() - } - - #[cfg(unix)] - pub(crate) fn storage(&self) -> &OsStorage { - &self.registry.storage - } -} - -fn globals_init() -> Globals -where - OsExtraData: 'static + Send + Sync + Init, - OsStorage: 'static + Send + Sync + Init, -{ - Globals { - extra: OsExtraData::init(), - registry: Registry::new(OsStorage::init()), - } -} - -pub(crate) fn globals() -> &'static Globals -where - OsExtraData: 'static + Send + Sync + Init, - OsStorage: 'static + Send + Sync + Init, -{ - static GLOBALS: OnceCell = OnceCell::new(); - - GLOBALS.get(globals_init) -} - -#[cfg(all(test, not(loom)))] -mod tests { - use super::*; - use crate::runtime::{self, Runtime}; - use crate::sync::{oneshot, watch}; - - use futures::future; - - #[test] - fn smoke() { - let rt = rt(); - rt.block_on(async move { - let registry = Registry::new(vec![ - EventInfo::default(), - EventInfo::default(), - EventInfo::default(), - ]); - - let first = registry.register_listener(0); - let second = registry.register_listener(1); - let third = registry.register_listener(2); - - let (fire, wait) = oneshot::channel(); - - crate::spawn(async { - wait.await.expect("wait failed"); - - // Record some events which should get coalesced - registry.record_event(0); - registry.record_event(0); - registry.record_event(1); - registry.record_event(1); - registry.broadcast(); - - // Yield so the previous broadcast can get received - // - // This yields many times since the block_on task is only polled every 61 - // ticks. - for _ in 0..100 { - crate::task::yield_now().await; - } - - // Send subsequent signal - registry.record_event(0); - registry.broadcast(); - - drop(registry); - }); - - let _ = fire.send(()); - let all = future::join3(collect(first), collect(second), collect(third)); - - let (first_results, second_results, third_results) = all.await; - assert_eq!(2, first_results.len()); - assert_eq!(1, second_results.len()); - assert_eq!(0, third_results.len()); - }); - } - - #[test] - #[should_panic = "invalid event_id: 1"] - fn register_panics_on_invalid_input() { - let registry = Registry::new(vec![EventInfo::default()]); - - registry.register_listener(1); - } - - #[test] - fn record_invalid_event_does_nothing() { - let registry = Registry::new(vec![EventInfo::default()]); - registry.record_event(1302); - } - - #[test] - fn broadcast_returns_if_at_least_one_event_fired() { - let registry = Registry::new(vec![EventInfo::default(), EventInfo::default()]); - - registry.record_event(0); - assert!(!registry.broadcast()); - - let first = registry.register_listener(0); - let second = registry.register_listener(1); - - registry.record_event(0); - assert!(registry.broadcast()); - - drop(first); - registry.record_event(0); - assert!(!registry.broadcast()); - - drop(second); - } - - fn rt() -> Runtime { - runtime::Builder::new_current_thread() - .enable_time() - .build() - .unwrap() - } - - async fn collect(mut rx: watch::Receiver<()>) -> Vec<()> { - let mut ret = vec![]; - - while let Ok(v) = rx.changed().await { - ret.push(v); - } - - ret - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/signal/reusable_box.rs s390-tools-2.33.1/rust-vendor/tokio/src/signal/reusable_box.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/signal/reusable_box.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/signal/reusable_box.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,227 +0,0 @@ -use std::alloc::Layout; -use std::future::Future; -use std::panic::AssertUnwindSafe; -use std::pin::Pin; -use std::ptr::{self, NonNull}; -use std::task::{Context, Poll}; -use std::{fmt, panic}; - -/// A reusable `Pin + Send>>`. -/// -/// This type lets you replace the future stored in the box without -/// reallocating when the size and alignment permits this. -pub(crate) struct ReusableBoxFuture { - boxed: NonNull + Send>, -} - -impl ReusableBoxFuture { - /// Create a new `ReusableBoxFuture` containing the provided future. - pub(crate) fn new(future: F) -> Self - where - F: Future + Send + 'static, - { - let boxed: Box + Send> = Box::new(future); - - let boxed = Box::into_raw(boxed); - - // SAFETY: Box::into_raw does not return null pointers. - let boxed = unsafe { NonNull::new_unchecked(boxed) }; - - Self { boxed } - } - - /// Replaces the future currently stored in this box. - /// - /// This reallocates if and only if the layout of the provided future is - /// different from the layout of the currently stored future. - pub(crate) fn set(&mut self, future: F) - where - F: Future + Send + 'static, - { - if let Err(future) = self.try_set(future) { - *self = Self::new(future); - } - } - - /// Replaces the future currently stored in this box. - /// - /// This function never reallocates, but returns an error if the provided - /// future has a different size or alignment from the currently stored - /// future. - pub(crate) fn try_set(&mut self, future: F) -> Result<(), F> - where - F: Future + Send + 'static, - { - // SAFETY: The pointer is not dangling. - let self_layout = { - let dyn_future: &(dyn Future + Send) = unsafe { self.boxed.as_ref() }; - Layout::for_value(dyn_future) - }; - - if Layout::new::() == self_layout { - // SAFETY: We just checked that the layout of F is correct. - unsafe { - self.set_same_layout(future); - } - - Ok(()) - } else { - Err(future) - } - } - - /// Sets the current future. - /// - /// # Safety - /// - /// This function requires that the layout of the provided future is the - /// same as `self.layout`. - unsafe fn set_same_layout(&mut self, future: F) - where - F: Future + Send + 'static, - { - // Drop the existing future, catching any panics. - let result = panic::catch_unwind(AssertUnwindSafe(|| { - ptr::drop_in_place(self.boxed.as_ptr()); - })); - - // Overwrite the future behind the pointer. This is safe because the - // allocation was allocated with the same size and alignment as the type F. - let self_ptr: *mut F = self.boxed.as_ptr() as *mut F; - ptr::write(self_ptr, future); - - // Update the vtable of self.boxed. The pointer is not null because we - // just got it from self.boxed, which is not null. - self.boxed = NonNull::new_unchecked(self_ptr); - - // If the old future's destructor panicked, resume unwinding. - match result { - Ok(()) => {} - Err(payload) => { - panic::resume_unwind(payload); - } - } - } - - /// Gets a pinned reference to the underlying future. - pub(crate) fn get_pin(&mut self) -> Pin<&mut (dyn Future + Send)> { - // SAFETY: The user of this box cannot move the box, and we do not move it - // either. - unsafe { Pin::new_unchecked(self.boxed.as_mut()) } - } - - /// Polls the future stored inside this box. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { - self.get_pin().poll(cx) - } -} - -impl Future for ReusableBoxFuture { - type Output = T; - - /// Polls the future stored inside this box. - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::into_inner(self).get_pin().poll(cx) - } -} - -// The future stored inside ReusableBoxFuture must be Send. -unsafe impl Send for ReusableBoxFuture {} - -// The only method called on self.boxed is poll, which takes &mut self, so this -// struct being Sync does not permit any invalid access to the Future, even if -// the future is not Sync. -unsafe impl Sync for ReusableBoxFuture {} - -// Just like a Pin> is always Unpin, so is this type. -impl Unpin for ReusableBoxFuture {} - -impl Drop for ReusableBoxFuture { - fn drop(&mut self) { - unsafe { - drop(Box::from_raw(self.boxed.as_ptr())); - } - } -} - -impl fmt::Debug for ReusableBoxFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ReusableBoxFuture").finish() - } -} - -#[cfg(test)] -mod test { - use super::ReusableBoxFuture; - use futures::future::FutureExt; - use std::alloc::Layout; - use std::future::Future; - use std::pin::Pin; - use std::task::{Context, Poll}; - - #[test] - fn test_different_futures() { - let fut = async move { 10 }; - // Not zero sized! - assert_eq!(Layout::for_value(&fut).size(), 1); - - let mut b = ReusableBoxFuture::new(fut); - - assert_eq!(b.get_pin().now_or_never(), Some(10)); - - b.try_set(async move { 20 }) - .unwrap_or_else(|_| panic!("incorrect size")); - - assert_eq!(b.get_pin().now_or_never(), Some(20)); - - b.try_set(async move { 30 }) - .unwrap_or_else(|_| panic!("incorrect size")); - - assert_eq!(b.get_pin().now_or_never(), Some(30)); - } - - #[test] - fn test_different_sizes() { - let fut1 = async move { 10 }; - let val = [0u32; 1000]; - let fut2 = async move { val[0] }; - let fut3 = ZeroSizedFuture {}; - - assert_eq!(Layout::for_value(&fut1).size(), 1); - assert_eq!(Layout::for_value(&fut2).size(), 4004); - assert_eq!(Layout::for_value(&fut3).size(), 0); - - let mut b = ReusableBoxFuture::new(fut1); - assert_eq!(b.get_pin().now_or_never(), Some(10)); - b.set(fut2); - assert_eq!(b.get_pin().now_or_never(), Some(0)); - b.set(fut3); - assert_eq!(b.get_pin().now_or_never(), Some(5)); - } - - struct ZeroSizedFuture {} - impl Future for ZeroSizedFuture { - type Output = u32; - fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { - Poll::Ready(5) - } - } - - #[test] - fn test_zero_sized() { - let fut = ZeroSizedFuture {}; - // Zero sized! - assert_eq!(Layout::for_value(&fut).size(), 0); - - let mut b = ReusableBoxFuture::new(fut); - - assert_eq!(b.get_pin().now_or_never(), Some(5)); - assert_eq!(b.get_pin().now_or_never(), Some(5)); - - b.try_set(ZeroSizedFuture {}) - .unwrap_or_else(|_| panic!("incorrect size")); - - assert_eq!(b.get_pin().now_or_never(), Some(5)); - assert_eq!(b.get_pin().now_or_never(), Some(5)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/signal/unix.rs s390-tools-2.33.1/rust-vendor/tokio/src/signal/unix.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/signal/unix.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/signal/unix.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,530 +0,0 @@ -//! Unix-specific types for signal handling. -//! -//! This module is only defined on Unix platforms and contains the primary -//! `Signal` type for receiving notifications of signals. - -#![cfg(unix)] -#![cfg_attr(docsrs, doc(cfg(all(unix, feature = "signal"))))] - -use crate::runtime::scheduler; -use crate::runtime::signal::Handle; -use crate::signal::registry::{globals, EventId, EventInfo, Globals, Init, Storage}; -use crate::signal::RxFuture; -use crate::sync::watch; - -use mio::net::UnixStream; -use std::io::{self, Error, ErrorKind, Write}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Once; -use std::task::{Context, Poll}; - -pub(crate) type OsStorage = Vec; - -impl Init for OsStorage { - fn init() -> Self { - // There are reliable signals ranging from 1 to 33 available on every Unix platform. - #[cfg(not(target_os = "linux"))] - let possible = 0..=33; - - // On Linux, there are additional real-time signals available. - #[cfg(target_os = "linux")] - let possible = 0..=libc::SIGRTMAX(); - - possible.map(|_| SignalInfo::default()).collect() - } -} - -impl Storage for OsStorage { - fn event_info(&self, id: EventId) -> Option<&EventInfo> { - self.get(id).map(|si| &si.event_info) - } - - fn for_each<'a, F>(&'a self, f: F) - where - F: FnMut(&'a EventInfo), - { - self.iter().map(|si| &si.event_info).for_each(f) - } -} - -#[derive(Debug)] -pub(crate) struct OsExtraData { - sender: UnixStream, - pub(crate) receiver: UnixStream, -} - -impl Init for OsExtraData { - fn init() -> Self { - let (receiver, sender) = UnixStream::pair().expect("failed to create UnixStream"); - - Self { sender, receiver } - } -} - -/// Represents the specific kind of signal to listen for. -#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] -pub struct SignalKind(libc::c_int); - -impl SignalKind { - /// Allows for listening to any valid OS signal. - /// - /// For example, this can be used for listening for platform-specific - /// signals. - /// ```rust,no_run - /// # use tokio::signal::unix::SignalKind; - /// # let signum = -1; - /// // let signum = libc::OS_SPECIFIC_SIGNAL; - /// let kind = SignalKind::from_raw(signum); - /// ``` - // Use `std::os::raw::c_int` on public API to prevent leaking a non-stable - // type alias from libc. - // `libc::c_int` and `std::os::raw::c_int` are currently the same type, and are - // unlikely to change to other types, but technically libc can change this - // in the future minor version. - // See https://github.com/tokio-rs/tokio/issues/3767 for more. - pub const fn from_raw(signum: std::os::raw::c_int) -> Self { - Self(signum as libc::c_int) - } - - /// Get the signal's numeric value. - /// - /// ```rust - /// # use tokio::signal::unix::SignalKind; - /// let kind = SignalKind::interrupt(); - /// assert_eq!(kind.as_raw_value(), libc::SIGINT); - /// ``` - pub const fn as_raw_value(&self) -> std::os::raw::c_int { - self.0 - } - - /// Represents the SIGALRM signal. - /// - /// On Unix systems this signal is sent when a real-time timer has expired. - /// By default, the process is terminated by this signal. - pub const fn alarm() -> Self { - Self(libc::SIGALRM) - } - - /// Represents the SIGCHLD signal. - /// - /// On Unix systems this signal is sent when the status of a child process - /// has changed. By default, this signal is ignored. - pub const fn child() -> Self { - Self(libc::SIGCHLD) - } - - /// Represents the SIGHUP signal. - /// - /// On Unix systems this signal is sent when the terminal is disconnected. - /// By default, the process is terminated by this signal. - pub const fn hangup() -> Self { - Self(libc::SIGHUP) - } - - /// Represents the SIGINFO signal. - /// - /// On Unix systems this signal is sent to request a status update from the - /// process. By default, this signal is ignored. - #[cfg(any( - target_os = "dragonfly", - target_os = "freebsd", - target_os = "macos", - target_os = "netbsd", - target_os = "openbsd" - ))] - pub const fn info() -> Self { - Self(libc::SIGINFO) - } - - /// Represents the SIGINT signal. - /// - /// On Unix systems this signal is sent to interrupt a program. - /// By default, the process is terminated by this signal. - pub const fn interrupt() -> Self { - Self(libc::SIGINT) - } - - /// Represents the SIGIO signal. - /// - /// On Unix systems this signal is sent when I/O operations are possible - /// on some file descriptor. By default, this signal is ignored. - pub const fn io() -> Self { - Self(libc::SIGIO) - } - - /// Represents the SIGPIPE signal. - /// - /// On Unix systems this signal is sent when the process attempts to write - /// to a pipe which has no reader. By default, the process is terminated by - /// this signal. - pub const fn pipe() -> Self { - Self(libc::SIGPIPE) - } - - /// Represents the SIGQUIT signal. - /// - /// On Unix systems this signal is sent to issue a shutdown of the - /// process, after which the OS will dump the process core. - /// By default, the process is terminated by this signal. - pub const fn quit() -> Self { - Self(libc::SIGQUIT) - } - - /// Represents the SIGTERM signal. - /// - /// On Unix systems this signal is sent to issue a shutdown of the - /// process. By default, the process is terminated by this signal. - pub const fn terminate() -> Self { - Self(libc::SIGTERM) - } - - /// Represents the SIGUSR1 signal. - /// - /// On Unix systems this is a user defined signal. - /// By default, the process is terminated by this signal. - pub const fn user_defined1() -> Self { - Self(libc::SIGUSR1) - } - - /// Represents the SIGUSR2 signal. - /// - /// On Unix systems this is a user defined signal. - /// By default, the process is terminated by this signal. - pub const fn user_defined2() -> Self { - Self(libc::SIGUSR2) - } - - /// Represents the SIGWINCH signal. - /// - /// On Unix systems this signal is sent when the terminal window is resized. - /// By default, this signal is ignored. - pub const fn window_change() -> Self { - Self(libc::SIGWINCH) - } -} - -impl From for SignalKind { - fn from(signum: std::os::raw::c_int) -> Self { - Self::from_raw(signum as libc::c_int) - } -} - -impl From for std::os::raw::c_int { - fn from(kind: SignalKind) -> Self { - kind.as_raw_value() - } -} - -pub(crate) struct SignalInfo { - event_info: EventInfo, - init: Once, - initialized: AtomicBool, -} - -impl Default for SignalInfo { - fn default() -> SignalInfo { - SignalInfo { - event_info: Default::default(), - init: Once::new(), - initialized: AtomicBool::new(false), - } - } -} - -/// Our global signal handler for all signals registered by this module. -/// -/// The purpose of this signal handler is to primarily: -/// -/// 1. Flag that our specific signal was received (e.g. store an atomic flag) -/// 2. Wake up the driver by writing a byte to a pipe -/// -/// Those two operations should both be async-signal safe. -fn action(globals: &'static Globals, signal: libc::c_int) { - globals.record_event(signal as EventId); - - // Send a wakeup, ignore any errors (anything reasonably possible is - // full pipe and then it will wake up anyway). - let mut sender = &globals.sender; - drop(sender.write(&[1])); -} - -/// Enables this module to receive signal notifications for the `signal` -/// provided. -/// -/// This will register the signal handler if it hasn't already been registered, -/// returning any error along the way if that fails. -fn signal_enable(signal: SignalKind, handle: &Handle) -> io::Result<()> { - let signal = signal.0; - if signal < 0 || signal_hook_registry::FORBIDDEN.contains(&signal) { - return Err(Error::new( - ErrorKind::Other, - format!("Refusing to register signal {}", signal), - )); - } - - // Check that we have a signal driver running - handle.check_inner()?; - - let globals = globals(); - let siginfo = match globals.storage().get(signal as EventId) { - Some(slot) => slot, - None => return Err(io::Error::new(io::ErrorKind::Other, "signal too large")), - }; - let mut registered = Ok(()); - siginfo.init.call_once(|| { - registered = unsafe { - signal_hook_registry::register(signal, move || action(globals, signal)).map(|_| ()) - }; - if registered.is_ok() { - siginfo.initialized.store(true, Ordering::Relaxed); - } - }); - registered?; - // If the call_once failed, it won't be retried on the next attempt to register the signal. In - // such case it is not run, registered is still `Ok(())`, initialized is still `false`. - if siginfo.initialized.load(Ordering::Relaxed) { - Ok(()) - } else { - Err(Error::new( - ErrorKind::Other, - "Failed to register signal handler", - )) - } -} - -/// An listener for receiving a particular type of OS signal. -/// -/// The listener can be turned into a `Stream` using [`SignalStream`]. -/// -/// [`SignalStream`]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.SignalStream.html -/// -/// In general signal handling on Unix is a pretty tricky topic, and this -/// structure is no exception! There are some important limitations to keep in -/// mind when using `Signal` streams: -/// -/// * Signals handling in Unix already necessitates coalescing signals -/// together sometimes. This `Signal` stream is also no exception here in -/// that it will also coalesce signals. That is, even if the signal handler -/// for this process runs multiple times, the `Signal` stream may only return -/// one signal notification. Specifically, before `poll` is called, all -/// signal notifications are coalesced into one item returned from `poll`. -/// Once `poll` has been called, however, a further signal is guaranteed to -/// be yielded as an item. -/// -/// Put another way, any element pulled off the returned listener corresponds to -/// *at least one* signal, but possibly more. -/// -/// * Signal handling in general is relatively inefficient. Although some -/// improvements are possible in this crate, it's recommended to not plan on -/// having millions of signal channels open. -/// -/// If you've got any questions about this feel free to open an issue on the -/// repo! New approaches to alleviate some of these limitations are always -/// appreciated! -/// -/// # Caveats -/// -/// The first time that a `Signal` instance is registered for a particular -/// signal kind, an OS signal-handler is installed which replaces the default -/// platform behavior when that signal is received, **for the duration of the -/// entire process**. -/// -/// For example, Unix systems will terminate a process by default when it -/// receives SIGINT. But, when a `Signal` instance is created to listen for -/// this signal, the next SIGINT that arrives will be translated to a stream -/// event, and the process will continue to execute. **Even if this `Signal` -/// instance is dropped, subsequent SIGINT deliveries will end up captured by -/// Tokio, and the default platform behavior will NOT be reset**. -/// -/// Thus, applications should take care to ensure the expected signal behavior -/// occurs as expected after listening for specific signals. -/// -/// # Examples -/// -/// Wait for SIGHUP -/// -/// ```rust,no_run -/// use tokio::signal::unix::{signal, SignalKind}; -/// -/// #[tokio::main] -/// async fn main() -> Result<(), Box> { -/// // An infinite stream of hangup signals. -/// let mut sig = signal(SignalKind::hangup())?; -/// -/// // Print whenever a HUP signal is received -/// loop { -/// sig.recv().await; -/// println!("got signal HUP"); -/// } -/// } -/// ``` -#[must_use = "streams do nothing unless polled"] -#[derive(Debug)] -pub struct Signal { - inner: RxFuture, -} - -/// Creates a new listener which will receive notifications when the current -/// process receives the specified signal `kind`. -/// -/// This function will create a new stream which binds to the default reactor. -/// The `Signal` stream is an infinite stream which will receive -/// notifications whenever a signal is received. More documentation can be -/// found on `Signal` itself, but to reiterate: -/// -/// * Signals may be coalesced beyond what the kernel already does. -/// * Once a signal handler is registered with the process the underlying -/// libc signal handler is never unregistered. -/// -/// A `Signal` stream can be created for a particular signal number -/// multiple times. When a signal is received then all the associated -/// channels will receive the signal notification. -/// -/// # Errors -/// -/// * If the lower-level C functions fail for some reason. -/// * If the previous initialization of this specific signal failed. -/// * If the signal is one of -/// [`signal_hook::FORBIDDEN`](fn@signal_hook_registry::register#panics) -/// -/// # Panics -/// -/// This function panics if there is no current reactor set, or if the `rt` -/// feature flag is not enabled. -#[track_caller] -pub fn signal(kind: SignalKind) -> io::Result { - let handle = scheduler::Handle::current(); - let rx = signal_with_handle(kind, handle.driver().signal())?; - - Ok(Signal { - inner: RxFuture::new(rx), - }) -} - -pub(crate) fn signal_with_handle( - kind: SignalKind, - handle: &Handle, -) -> io::Result> { - // Turn the signal delivery on once we are ready for it - signal_enable(kind, handle)?; - - Ok(globals().register_listener(kind.0 as EventId)) -} - -impl Signal { - /// Receives the next signal notification event. - /// - /// `None` is returned if no more events can be received by this stream. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If you use it as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then it is guaranteed that no signal is lost. - /// - /// # Examples - /// - /// Wait for SIGHUP - /// - /// ```rust,no_run - /// use tokio::signal::unix::{signal, SignalKind}; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // An infinite stream of hangup signals. - /// let mut stream = signal(SignalKind::hangup())?; - /// - /// // Print whenever a HUP signal is received - /// loop { - /// stream.recv().await; - /// println!("got signal HUP"); - /// } - /// } - /// ``` - pub async fn recv(&mut self) -> Option<()> { - self.inner.recv().await - } - - /// Polls to receive the next signal notification event, outside of an - /// `async` context. - /// - /// This method returns: - /// - /// * `Poll::Pending` if no signals are available but the channel is not - /// closed. - /// * `Poll::Ready(Some(()))` if a signal is available. - /// * `Poll::Ready(None)` if the channel has been closed and all signals - /// sent before it was closed have been received. - /// - /// # Examples - /// - /// Polling from a manually implemented future - /// - /// ```rust,no_run - /// use std::pin::Pin; - /// use std::future::Future; - /// use std::task::{Context, Poll}; - /// use tokio::signal::unix::Signal; - /// - /// struct MyFuture { - /// signal: Signal, - /// } - /// - /// impl Future for MyFuture { - /// type Output = Option<()>; - /// - /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - /// println!("polling MyFuture"); - /// self.signal.poll_recv(cx) - /// } - /// } - /// ``` - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_recv(cx) - } -} - -// Work around for abstracting streams internally -pub(crate) trait InternalStream { - fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll>; -} - -impl InternalStream for Signal { - fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.poll_recv(cx) - } -} - -pub(crate) fn ctrl_c() -> io::Result { - signal(SignalKind::interrupt()) -} - -#[cfg(all(test, not(loom)))] -mod tests { - use super::*; - - #[test] - fn signal_enable_error_on_invalid_input() { - signal_enable(SignalKind::from_raw(-1), &Handle::default()).unwrap_err(); - } - - #[test] - fn signal_enable_error_on_forbidden_input() { - signal_enable( - SignalKind::from_raw(signal_hook_registry::FORBIDDEN[0]), - &Handle::default(), - ) - .unwrap_err(); - } - - #[test] - fn from_c_int() { - assert_eq!(SignalKind::from(2), SignalKind::interrupt()); - } - - #[test] - fn into_c_int() { - let value: std::os::raw::c_int = SignalKind::interrupt().into(); - assert_eq!(value, libc::SIGINT as _); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/signal/windows/stub.rs s390-tools-2.33.1/rust-vendor/tokio/src/signal/windows/stub.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/signal/windows/stub.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/signal/windows/stub.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -//! Stub implementations for the platform API so that rustdoc can build linkable -//! documentation on non-windows platforms. - -use crate::signal::RxFuture; -use std::io; - -pub(super) fn ctrl_break() -> io::Result { - panic!() -} - -pub(super) fn ctrl_close() -> io::Result { - panic!() -} - -pub(super) fn ctrl_c() -> io::Result { - panic!() -} - -pub(super) fn ctrl_logoff() -> io::Result { - panic!() -} - -pub(super) fn ctrl_shutdown() -> io::Result { - panic!() -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/signal/windows/sys.rs s390-tools-2.33.1/rust-vendor/tokio/src/signal/windows/sys.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/signal/windows/sys.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/signal/windows/sys.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,229 +0,0 @@ -use std::io; -use std::sync::Once; - -use crate::signal::registry::{globals, EventId, EventInfo, Init, Storage}; -use crate::signal::RxFuture; - -use windows_sys::Win32::Foundation::BOOL; -use windows_sys::Win32::System::Console as console; - -pub(super) fn ctrl_break() -> io::Result { - new(console::CTRL_BREAK_EVENT) -} - -pub(super) fn ctrl_close() -> io::Result { - new(console::CTRL_CLOSE_EVENT) -} - -pub(super) fn ctrl_c() -> io::Result { - new(console::CTRL_C_EVENT) -} - -pub(super) fn ctrl_logoff() -> io::Result { - new(console::CTRL_LOGOFF_EVENT) -} - -pub(super) fn ctrl_shutdown() -> io::Result { - new(console::CTRL_SHUTDOWN_EVENT) -} - -fn new(signum: u32) -> io::Result { - global_init()?; - let rx = globals().register_listener(signum as EventId); - Ok(RxFuture::new(rx)) -} - -#[derive(Debug)] -pub(crate) struct OsStorage { - ctrl_break: EventInfo, - ctrl_close: EventInfo, - ctrl_c: EventInfo, - ctrl_logoff: EventInfo, - ctrl_shutdown: EventInfo, -} - -impl Init for OsStorage { - fn init() -> Self { - Self { - ctrl_break: Default::default(), - ctrl_close: Default::default(), - ctrl_c: Default::default(), - ctrl_logoff: Default::default(), - ctrl_shutdown: Default::default(), - } - } -} - -impl Storage for OsStorage { - fn event_info(&self, id: EventId) -> Option<&EventInfo> { - match u32::try_from(id) { - Ok(console::CTRL_BREAK_EVENT) => Some(&self.ctrl_break), - Ok(console::CTRL_CLOSE_EVENT) => Some(&self.ctrl_close), - Ok(console::CTRL_C_EVENT) => Some(&self.ctrl_c), - Ok(console::CTRL_LOGOFF_EVENT) => Some(&self.ctrl_logoff), - Ok(console::CTRL_SHUTDOWN_EVENT) => Some(&self.ctrl_shutdown), - _ => None, - } - } - - fn for_each<'a, F>(&'a self, mut f: F) - where - F: FnMut(&'a EventInfo), - { - f(&self.ctrl_break); - f(&self.ctrl_close); - f(&self.ctrl_c); - f(&self.ctrl_logoff); - f(&self.ctrl_shutdown); - } -} - -#[derive(Debug)] -pub(crate) struct OsExtraData {} - -impl Init for OsExtraData { - fn init() -> Self { - Self {} - } -} - -fn global_init() -> io::Result<()> { - static INIT: Once = Once::new(); - - let mut init = None; - - INIT.call_once(|| unsafe { - let rc = console::SetConsoleCtrlHandler(Some(handler), 1); - let ret = if rc == 0 { - Err(io::Error::last_os_error()) - } else { - Ok(()) - }; - - init = Some(ret); - }); - - init.unwrap_or_else(|| Ok(())) -} - -unsafe extern "system" fn handler(ty: u32) -> BOOL { - let globals = globals(); - globals.record_event(ty as EventId); - - // According to https://docs.microsoft.com/en-us/windows/console/handlerroutine - // the handler routine is always invoked in a new thread, thus we don't - // have the same restrictions as in Unix signal handlers, meaning we can - // go ahead and perform the broadcast here. - if globals.broadcast() { - 1 - } else { - // No one is listening for this notification any more - // let the OS fire the next (possibly the default) handler. - 0 - } -} - -#[cfg(all(test, not(loom)))] -mod tests { - use super::*; - use crate::runtime::Runtime; - - use tokio_test::{assert_ok, assert_pending, assert_ready_ok, task}; - - #[test] - fn ctrl_c() { - let rt = rt(); - let _enter = rt.enter(); - - let mut ctrl_c = task::spawn(crate::signal::ctrl_c()); - - assert_pending!(ctrl_c.poll()); - - // Windows doesn't have a good programmatic way of sending events - // like sending signals on Unix, so we'll stub out the actual OS - // integration and test that our handling works. - unsafe { - super::handler(console::CTRL_C_EVENT); - } - - assert_ready_ok!(ctrl_c.poll()); - } - - #[test] - fn ctrl_break() { - let rt = rt(); - - rt.block_on(async { - let mut ctrl_break = assert_ok!(crate::signal::windows::ctrl_break()); - - // Windows doesn't have a good programmatic way of sending events - // like sending signals on Unix, so we'll stub out the actual OS - // integration and test that our handling works. - unsafe { - super::handler(console::CTRL_BREAK_EVENT); - } - - ctrl_break.recv().await.unwrap(); - }); - } - - #[test] - fn ctrl_close() { - let rt = rt(); - - rt.block_on(async { - let mut ctrl_close = assert_ok!(crate::signal::windows::ctrl_close()); - - // Windows doesn't have a good programmatic way of sending events - // like sending signals on Unix, so we'll stub out the actual OS - // integration and test that our handling works. - unsafe { - super::handler(console::CTRL_CLOSE_EVENT); - } - - ctrl_close.recv().await.unwrap(); - }); - } - - #[test] - fn ctrl_shutdown() { - let rt = rt(); - - rt.block_on(async { - let mut ctrl_shutdown = assert_ok!(crate::signal::windows::ctrl_shutdown()); - - // Windows doesn't have a good programmatic way of sending events - // like sending signals on Unix, so we'll stub out the actual OS - // integration and test that our handling works. - unsafe { - super::handler(console::CTRL_SHUTDOWN_EVENT); - } - - ctrl_shutdown.recv().await.unwrap(); - }); - } - - #[test] - fn ctrl_logoff() { - let rt = rt(); - - rt.block_on(async { - let mut ctrl_logoff = assert_ok!(crate::signal::windows::ctrl_logoff()); - - // Windows doesn't have a good programmatic way of sending events - // like sending signals on Unix, so we'll stub out the actual OS - // integration and test that our handling works. - unsafe { - super::handler(console::CTRL_LOGOFF_EVENT); - } - - ctrl_logoff.recv().await.unwrap(); - }); - } - - fn rt() -> Runtime { - crate::runtime::Builder::new_current_thread() - .build() - .unwrap() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/signal/windows.rs s390-tools-2.33.1/rust-vendor/tokio/src/signal/windows.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/signal/windows.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/signal/windows.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,524 +0,0 @@ -//! Windows-specific types for signal handling. -//! -//! This module is only defined on Windows and allows receiving "ctrl-c", -//! "ctrl-break", "ctrl-logoff", "ctrl-shutdown", and "ctrl-close" -//! notifications. These events are listened for via the `SetConsoleCtrlHandler` -//! function which receives the corresponding windows_sys event type. - -#![cfg(any(windows, docsrs))] -#![cfg_attr(docsrs, doc(cfg(all(windows, feature = "signal"))))] - -use crate::signal::RxFuture; -use std::io; -use std::task::{Context, Poll}; - -#[cfg(not(docsrs))] -#[path = "windows/sys.rs"] -mod imp; -#[cfg(not(docsrs))] -pub(crate) use self::imp::{OsExtraData, OsStorage}; - -#[cfg(docsrs)] -#[path = "windows/stub.rs"] -mod imp; - -/// Creates a new listener which receives "ctrl-c" notifications sent to the -/// process. -/// -/// # Examples -/// -/// ```rust,no_run -/// use tokio::signal::windows::ctrl_c; -/// -/// #[tokio::main] -/// async fn main() -> Result<(), Box> { -/// // A listener of CTRL-C events. -/// let mut signal = ctrl_c()?; -/// -/// // Print whenever a CTRL-C event is received. -/// for countdown in (0..3).rev() { -/// signal.recv().await; -/// println!("got CTRL-C. {} more to exit", countdown); -/// } -/// -/// Ok(()) -/// } -/// ``` -pub fn ctrl_c() -> io::Result { - Ok(CtrlC { - inner: self::imp::ctrl_c()?, - }) -} - -/// Represents a listener which receives "ctrl-c" notifications sent to the process -/// via `SetConsoleCtrlHandler`. -/// -/// This event can be turned into a `Stream` using [`CtrlCStream`]. -/// -/// [`CtrlCStream`]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.CtrlCStream.html -/// -/// A notification to this process notifies *all* receivers for -/// this event. Moreover, the notifications **are coalesced** if they aren't processed -/// quickly enough. This means that if two notifications are received back-to-back, -/// then the listener may only receive one item about the two notifications. -#[must_use = "listeners do nothing unless polled"] -#[derive(Debug)] -pub struct CtrlC { - inner: RxFuture, -} - -impl CtrlC { - /// Receives the next signal notification event. - /// - /// `None` is returned if no more events can be received by the listener. - /// - /// # Examples - /// - /// ```rust,no_run - /// use tokio::signal::windows::ctrl_c; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// let mut signal = ctrl_c()?; - /// - /// // Print whenever a CTRL-C event is received. - /// for countdown in (0..3).rev() { - /// signal.recv().await; - /// println!("got CTRL-C. {} more to exit", countdown); - /// } - /// - /// Ok(()) - /// } - /// ``` - pub async fn recv(&mut self) -> Option<()> { - self.inner.recv().await - } - - /// Polls to receive the next signal notification event, outside of an - /// `async` context. - /// - /// `None` is returned if no more events can be received. - /// - /// # Examples - /// - /// Polling from a manually implemented future - /// - /// ```rust,no_run - /// use std::pin::Pin; - /// use std::future::Future; - /// use std::task::{Context, Poll}; - /// use tokio::signal::windows::CtrlC; - /// - /// struct MyFuture { - /// ctrl_c: CtrlC, - /// } - /// - /// impl Future for MyFuture { - /// type Output = Option<()>; - /// - /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - /// println!("polling MyFuture"); - /// self.ctrl_c.poll_recv(cx) - /// } - /// } - /// ``` - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_recv(cx) - } -} - -/// Represents a listener which receives "ctrl-break" notifications sent to the process -/// via `SetConsoleCtrlHandler`. -/// -/// This listener can be turned into a `Stream` using [`CtrlBreakStream`]. -/// -/// [`CtrlBreakStream`]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.CtrlBreakStream.html -/// -/// A notification to this process notifies *all* receivers for -/// this event. Moreover, the notifications **are coalesced** if they aren't processed -/// quickly enough. This means that if two notifications are received back-to-back, -/// then the listener may only receive one item about the two notifications. -#[must_use = "listeners do nothing unless polled"] -#[derive(Debug)] -pub struct CtrlBreak { - inner: RxFuture, -} - -impl CtrlBreak { - /// Receives the next signal notification event. - /// - /// `None` is returned if no more events can be received by this listener. - /// - /// # Examples - /// - /// ```rust,no_run - /// use tokio::signal::windows::ctrl_break; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // A listener of CTRL-BREAK events. - /// let mut signal = ctrl_break()?; - /// - /// // Print whenever a CTRL-BREAK event is received. - /// loop { - /// signal.recv().await; - /// println!("got signal CTRL-BREAK"); - /// } - /// } - /// ``` - pub async fn recv(&mut self) -> Option<()> { - self.inner.recv().await - } - - /// Polls to receive the next signal notification event, outside of an - /// `async` context. - /// - /// `None` is returned if no more events can be received by this listener. - /// - /// # Examples - /// - /// Polling from a manually implemented future - /// - /// ```rust,no_run - /// use std::pin::Pin; - /// use std::future::Future; - /// use std::task::{Context, Poll}; - /// use tokio::signal::windows::CtrlBreak; - /// - /// struct MyFuture { - /// ctrl_break: CtrlBreak, - /// } - /// - /// impl Future for MyFuture { - /// type Output = Option<()>; - /// - /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - /// println!("polling MyFuture"); - /// self.ctrl_break.poll_recv(cx) - /// } - /// } - /// ``` - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_recv(cx) - } -} - -/// Creates a new listener which receives "ctrl-break" notifications sent to the -/// process. -/// -/// # Examples -/// -/// ```rust,no_run -/// use tokio::signal::windows::ctrl_break; -/// -/// #[tokio::main] -/// async fn main() -> Result<(), Box> { -/// // A listener of CTRL-BREAK events. -/// let mut signal = ctrl_break()?; -/// -/// // Print whenever a CTRL-BREAK event is received. -/// loop { -/// signal.recv().await; -/// println!("got signal CTRL-BREAK"); -/// } -/// } -/// ``` -pub fn ctrl_break() -> io::Result { - Ok(CtrlBreak { - inner: self::imp::ctrl_break()?, - }) -} - -/// Creates a new listener which receives "ctrl-close" notifications sent to the -/// process. -/// -/// # Examples -/// -/// ```rust,no_run -/// use tokio::signal::windows::ctrl_close; -/// -/// #[tokio::main] -/// async fn main() -> Result<(), Box> { -/// // A listener of CTRL-CLOSE events. -/// let mut signal = ctrl_close()?; -/// -/// // Print whenever a CTRL-CLOSE event is received. -/// for countdown in (0..3).rev() { -/// signal.recv().await; -/// println!("got CTRL-CLOSE. {} more to exit", countdown); -/// } -/// -/// Ok(()) -/// } -/// ``` -pub fn ctrl_close() -> io::Result { - Ok(CtrlClose { - inner: self::imp::ctrl_close()?, - }) -} - -/// Represents a listener which receives "ctrl-close" notitifications sent to the process -/// via 'SetConsoleCtrlHandler'. -/// -/// A notification to this process notifies *all* listeners listening for -/// this event. Moreover, the notifications **are coalesced** if they aren't processed -/// quickly enough. This means that if two notifications are received back-to-back, -/// then the listener may only receive one item about the two notifications. -#[must_use = "listeners do nothing unless polled"] -#[derive(Debug)] -pub struct CtrlClose { - inner: RxFuture, -} - -impl CtrlClose { - /// Receives the next signal notification event. - /// - /// `None` is returned if no more events can be received by this listener. - /// - /// # Examples - /// - /// ```rust,no_run - /// use tokio::signal::windows::ctrl_close; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // A listener of CTRL-CLOSE events. - /// let mut signal = ctrl_close()?; - /// - /// // Print whenever a CTRL-CLOSE event is received. - /// signal.recv().await; - /// println!("got CTRL-CLOSE. Cleaning up before exiting"); - /// - /// Ok(()) - /// } - /// ``` - pub async fn recv(&mut self) -> Option<()> { - self.inner.recv().await - } - - /// Polls to receive the next signal notification event, outside of an - /// `async` context. - /// - /// `None` is returned if no more events can be received by this listener. - /// - /// # Examples - /// - /// Polling from a manually implemented future - /// - /// ```rust,no_run - /// use std::pin::Pin; - /// use std::future::Future; - /// use std::task::{Context, Poll}; - /// use tokio::signal::windows::CtrlClose; - /// - /// struct MyFuture { - /// ctrl_close: CtrlClose, - /// } - /// - /// impl Future for MyFuture { - /// type Output = Option<()>; - /// - /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - /// println!("polling MyFuture"); - /// self.ctrl_close.poll_recv(cx) - /// } - /// } - /// ``` - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_recv(cx) - } -} - -/// Creates a new listener which receives "ctrl-shutdown" notifications sent to the -/// process. -/// -/// # Examples -/// -/// ```rust,no_run -/// use tokio::signal::windows::ctrl_shutdown; -/// -/// #[tokio::main] -/// async fn main() -> Result<(), Box> { -/// // A listener of CTRL-SHUTDOWN events. -/// let mut signal = ctrl_shutdown()?; -/// -/// signal.recv().await; -/// println!("got CTRL-SHUTDOWN. Cleaning up before exiting"); -/// -/// Ok(()) -/// } -/// ``` -pub fn ctrl_shutdown() -> io::Result { - Ok(CtrlShutdown { - inner: self::imp::ctrl_shutdown()?, - }) -} - -/// Represents a listener which receives "ctrl-shutdown" notitifications sent to the process -/// via 'SetConsoleCtrlHandler'. -/// -/// A notification to this process notifies *all* listeners listening for -/// this event. Moreover, the notifications **are coalesced** if they aren't processed -/// quickly enough. This means that if two notifications are received back-to-back, -/// then the listener may only receive one item about the two notifications. -#[must_use = "listeners do nothing unless polled"] -#[derive(Debug)] -pub struct CtrlShutdown { - inner: RxFuture, -} - -impl CtrlShutdown { - /// Receives the next signal notification event. - /// - /// `None` is returned if no more events can be received by this listener. - /// - /// # Examples - /// - /// ```rust,no_run - /// use tokio::signal::windows::ctrl_shutdown; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // A listener of CTRL-SHUTDOWN events. - /// let mut signal = ctrl_shutdown()?; - /// - /// // Print whenever a CTRL-SHUTDOWN event is received. - /// signal.recv().await; - /// println!("got CTRL-SHUTDOWN. Cleaning up before exiting"); - /// - /// Ok(()) - /// } - /// ``` - pub async fn recv(&mut self) -> Option<()> { - self.inner.recv().await - } - - /// Polls to receive the next signal notification event, outside of an - /// `async` context. - /// - /// `None` is returned if no more events can be received by this listener. - /// - /// # Examples - /// - /// Polling from a manually implemented future - /// - /// ```rust,no_run - /// use std::pin::Pin; - /// use std::future::Future; - /// use std::task::{Context, Poll}; - /// use tokio::signal::windows::CtrlShutdown; - /// - /// struct MyFuture { - /// ctrl_shutdown: CtrlShutdown, - /// } - /// - /// impl Future for MyFuture { - /// type Output = Option<()>; - /// - /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - /// println!("polling MyFuture"); - /// self.ctrl_shutdown.poll_recv(cx) - /// } - /// } - /// ``` - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_recv(cx) - } -} - -/// Creates a new listener which receives "ctrl-logoff" notifications sent to the -/// process. -/// -/// # Examples -/// -/// ```rust,no_run -/// use tokio::signal::windows::ctrl_logoff; -/// -/// #[tokio::main] -/// async fn main() -> Result<(), Box> { -/// // A listener of CTRL-LOGOFF events. -/// let mut signal = ctrl_logoff()?; -/// -/// signal.recv().await; -/// println!("got CTRL-LOGOFF. Cleaning up before exiting"); -/// -/// Ok(()) -/// } -/// ``` -pub fn ctrl_logoff() -> io::Result { - Ok(CtrlLogoff { - inner: self::imp::ctrl_logoff()?, - }) -} - -/// Represents a listener which receives "ctrl-logoff" notitifications sent to the process -/// via 'SetConsoleCtrlHandler'. -/// -/// A notification to this process notifies *all* listeners listening for -/// this event. Moreover, the notifications **are coalesced** if they aren't processed -/// quickly enough. This means that if two notifications are received back-to-back, -/// then the listener may only receive one item about the two notifications. -#[must_use = "listeners do nothing unless polled"] -#[derive(Debug)] -pub struct CtrlLogoff { - inner: RxFuture, -} - -impl CtrlLogoff { - /// Receives the next signal notification event. - /// - /// `None` is returned if no more events can be received by this listener. - /// - /// # Examples - /// - /// ```rust,no_run - /// use tokio::signal::windows::ctrl_logoff; - /// - /// #[tokio::main] - /// async fn main() -> Result<(), Box> { - /// // An listener of CTRL-LOGOFF events. - /// let mut signal = ctrl_logoff()?; - /// - /// // Print whenever a CTRL-LOGOFF event is received. - /// signal.recv().await; - /// println!("got CTRL-LOGOFF. Cleaning up before exiting"); - /// - /// Ok(()) - /// } - /// ``` - pub async fn recv(&mut self) -> Option<()> { - self.inner.recv().await - } - - /// Polls to receive the next signal notification event, outside of an - /// `async` context. - /// - /// `None` is returned if no more events can be received by this listener. - /// - /// # Examples - /// - /// Polling from a manually implemented future - /// - /// ```rust,no_run - /// use std::pin::Pin; - /// use std::future::Future; - /// use std::task::{Context, Poll}; - /// use tokio::signal::windows::CtrlLogoff; - /// - /// struct MyFuture { - /// ctrl_logoff: CtrlLogoff, - /// } - /// - /// impl Future for MyFuture { - /// type Output = Option<()>; - /// - /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - /// println!("polling MyFuture"); - /// self.ctrl_logoff.poll_recv(cx) - /// } - /// } - /// ``` - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_recv(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/barrier.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/barrier.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/barrier.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/barrier.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,208 +0,0 @@ -use crate::loom::sync::Mutex; -use crate::sync::watch; -#[cfg(all(tokio_unstable, feature = "tracing"))] -use crate::util::trace; - -/// A barrier enables multiple tasks to synchronize the beginning of some computation. -/// -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use tokio::sync::Barrier; -/// use std::sync::Arc; -/// -/// let mut handles = Vec::with_capacity(10); -/// let barrier = Arc::new(Barrier::new(10)); -/// for _ in 0..10 { -/// let c = barrier.clone(); -/// // The same messages will be printed together. -/// // You will NOT see any interleaving. -/// handles.push(tokio::spawn(async move { -/// println!("before wait"); -/// let wait_result = c.wait().await; -/// println!("after wait"); -/// wait_result -/// })); -/// } -/// -/// // Will not resolve until all "after wait" messages have been printed -/// let mut num_leaders = 0; -/// for handle in handles { -/// let wait_result = handle.await.unwrap(); -/// if wait_result.is_leader() { -/// num_leaders += 1; -/// } -/// } -/// -/// // Exactly one barrier will resolve as the "leader" -/// assert_eq!(num_leaders, 1); -/// # } -/// ``` -#[derive(Debug)] -pub struct Barrier { - state: Mutex, - wait: watch::Receiver, - n: usize, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, -} - -#[derive(Debug)] -struct BarrierState { - waker: watch::Sender, - arrived: usize, - generation: usize, -} - -impl Barrier { - /// Creates a new barrier that can block a given number of tasks. - /// - /// A barrier will block `n`-1 tasks which call [`Barrier::wait`] and then wake up all - /// tasks at once when the `n`th task calls `wait`. - #[track_caller] - pub fn new(mut n: usize) -> Barrier { - let (waker, wait) = crate::sync::watch::channel(0); - - if n == 0 { - // if n is 0, it's not clear what behavior the user wants. - // in std::sync::Barrier, an n of 0 exhibits the same behavior as n == 1, where every - // .wait() immediately unblocks, so we adopt that here as well. - n = 1; - } - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = { - let location = std::panic::Location::caller(); - let resource_span = tracing::trace_span!( - "runtime.resource", - concrete_type = "Barrier", - kind = "Sync", - loc.file = location.file(), - loc.line = location.line(), - loc.col = location.column(), - ); - - resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - size = n, - ); - - tracing::trace!( - target: "runtime::resource::state_update", - arrived = 0, - ) - }); - resource_span - }; - - Barrier { - state: Mutex::new(BarrierState { - waker, - arrived: 0, - generation: 1, - }), - n, - wait, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } - } - - /// Does not resolve until all tasks have rendezvoused here. - /// - /// Barriers are re-usable after all tasks have rendezvoused once, and can - /// be used continuously. - /// - /// A single (arbitrary) future will receive a [`BarrierWaitResult`] that returns `true` from - /// [`BarrierWaitResult::is_leader`] when returning from this function, and all other tasks - /// will receive a result that will return `false` from `is_leader`. - pub async fn wait(&self) -> BarrierWaitResult { - #[cfg(all(tokio_unstable, feature = "tracing"))] - return trace::async_op( - || self.wait_internal(), - self.resource_span.clone(), - "Barrier::wait", - "poll", - false, - ) - .await; - - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - return self.wait_internal().await; - } - async fn wait_internal(&self) -> BarrierWaitResult { - crate::trace::async_trace_leaf().await; - - // NOTE: we are taking a _synchronous_ lock here. - // It is okay to do so because the critical section is fast and never yields, so it cannot - // deadlock even if another future is concurrently holding the lock. - // It is _desirable_ to do so as synchronous Mutexes are, at least in theory, faster than - // the asynchronous counter-parts, so we should use them where possible [citation needed]. - // NOTE: the extra scope here is so that the compiler doesn't think `state` is held across - // a yield point, and thus marks the returned future as !Send. - let generation = { - let mut state = self.state.lock(); - let generation = state.generation; - state.arrived += 1; - #[cfg(all(tokio_unstable, feature = "tracing"))] - tracing::trace!( - target: "runtime::resource::state_update", - arrived = 1, - arrived.op = "add", - ); - #[cfg(all(tokio_unstable, feature = "tracing"))] - tracing::trace!( - target: "runtime::resource::async_op::state_update", - arrived = true, - ); - if state.arrived == self.n { - #[cfg(all(tokio_unstable, feature = "tracing"))] - tracing::trace!( - target: "runtime::resource::async_op::state_update", - is_leader = true, - ); - // we are the leader for this generation - // wake everyone, increment the generation, and return - state - .waker - .send(state.generation) - .expect("there is at least one receiver"); - state.arrived = 0; - state.generation += 1; - return BarrierWaitResult(true); - } - - generation - }; - - // we're going to have to wait for the last of the generation to arrive - let mut wait = self.wait.clone(); - - loop { - let _ = wait.changed().await; - - // note that the first time through the loop, this _will_ yield a generation - // immediately, since we cloned a receiver that has never seen any values. - if *wait.borrow() >= generation { - break; - } - } - - BarrierWaitResult(false) - } -} - -/// A `BarrierWaitResult` is returned by `wait` when all tasks in the `Barrier` have rendezvoused. -#[derive(Debug, Clone)] -pub struct BarrierWaitResult(bool); - -impl BarrierWaitResult { - /// Returns `true` if this task from wait is the "leader task". - /// - /// Only one task will have `true` returned from their result, all other tasks will have - /// `false` returned. - pub fn is_leader(&self) -> bool { - self.0 - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/batch_semaphore.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/batch_semaphore.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/batch_semaphore.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/batch_semaphore.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,753 +0,0 @@ -#![cfg_attr(not(feature = "sync"), allow(unreachable_pub, dead_code))] -//! # Implementation Details. -//! -//! The semaphore is implemented using an intrusive linked list of waiters. An -//! atomic counter tracks the number of available permits. If the semaphore does -//! not contain the required number of permits, the task attempting to acquire -//! permits places its waker at the end of a queue. When new permits are made -//! available (such as by releasing an initial acquisition), they are assigned -//! to the task at the front of the queue, waking that task if its requested -//! number of permits is met. -//! -//! Because waiters are enqueued at the back of the linked list and dequeued -//! from the front, the semaphore is fair. Tasks trying to acquire large numbers -//! of permits at a time will always be woken eventually, even if many other -//! tasks are acquiring smaller numbers of permits. This means that in a -//! use-case like tokio's read-write lock, writers will not be starved by -//! readers. -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::{Mutex, MutexGuard}; -use crate::util::linked_list::{self, LinkedList}; -#[cfg(all(tokio_unstable, feature = "tracing"))] -use crate::util::trace; -use crate::util::WakeList; - -use std::future::Future; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::ptr::NonNull; -use std::sync::atomic::Ordering::*; -use std::task::{Context, Poll, Waker}; -use std::{cmp, fmt}; - -/// An asynchronous counting semaphore which permits waiting on multiple permits at once. -pub(crate) struct Semaphore { - waiters: Mutex, - /// The current number of available permits in the semaphore. - permits: AtomicUsize, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, -} - -struct Waitlist { - queue: LinkedList::Target>, - closed: bool, -} - -/// Error returned from the [`Semaphore::try_acquire`] function. -/// -/// [`Semaphore::try_acquire`]: crate::sync::Semaphore::try_acquire -#[derive(Debug, PartialEq, Eq)] -pub enum TryAcquireError { - /// The semaphore has been [closed] and cannot issue new permits. - /// - /// [closed]: crate::sync::Semaphore::close - Closed, - - /// The semaphore has no available permits. - NoPermits, -} -/// Error returned from the [`Semaphore::acquire`] function. -/// -/// An `acquire` operation can only fail if the semaphore has been -/// [closed]. -/// -/// [closed]: crate::sync::Semaphore::close -/// [`Semaphore::acquire`]: crate::sync::Semaphore::acquire -#[derive(Debug)] -pub struct AcquireError(()); - -pub(crate) struct Acquire<'a> { - node: Waiter, - semaphore: &'a Semaphore, - num_permits: u32, - queued: bool, -} - -/// An entry in the wait queue. -struct Waiter { - /// The current state of the waiter. - /// - /// This is either the number of remaining permits required by - /// the waiter, or a flag indicating that the waiter is not yet queued. - state: AtomicUsize, - - /// The waker to notify the task awaiting permits. - /// - /// # Safety - /// - /// This may only be accessed while the wait queue is locked. - waker: UnsafeCell>, - - /// Intrusive linked-list pointers. - /// - /// # Safety - /// - /// This may only be accessed while the wait queue is locked. - /// - /// TODO: Ideally, we would be able to use loom to enforce that - /// this isn't accessed concurrently. However, it is difficult to - /// use a `UnsafeCell` here, since the `Link` trait requires _returning_ - /// references to `Pointers`, and `UnsafeCell` requires that checked access - /// take place inside a closure. We should consider changing `Pointers` to - /// use `UnsafeCell` internally. - pointers: linked_list::Pointers, - - #[cfg(all(tokio_unstable, feature = "tracing"))] - ctx: trace::AsyncOpTracingCtx, - - /// Should not be `Unpin`. - _p: PhantomPinned, -} - -generate_addr_of_methods! { - impl<> Waiter { - unsafe fn addr_of_pointers(self: NonNull) -> NonNull> { - &self.pointers - } - } -} - -impl Semaphore { - /// The maximum number of permits which a semaphore can hold. - /// - /// Note that this reserves three bits of flags in the permit counter, but - /// we only actually use one of them. However, the previous semaphore - /// implementation used three bits, so we will continue to reserve them to - /// avoid a breaking change if additional flags need to be added in the - /// future. - pub(crate) const MAX_PERMITS: usize = std::usize::MAX >> 3; - const CLOSED: usize = 1; - // The least-significant bit in the number of permits is reserved to use - // as a flag indicating that the semaphore has been closed. Consequently - // PERMIT_SHIFT is used to leave that bit for that purpose. - const PERMIT_SHIFT: usize = 1; - - /// Creates a new semaphore with the initial number of permits - /// - /// Maximum number of permits on 32-bit platforms is `1<<29`. - pub(crate) fn new(permits: usize) -> Self { - assert!( - permits <= Self::MAX_PERMITS, - "a semaphore may not have more than MAX_PERMITS permits ({})", - Self::MAX_PERMITS - ); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = { - let resource_span = tracing::trace_span!( - "runtime.resource", - concrete_type = "Semaphore", - kind = "Sync", - is_internal = true - ); - - resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - permits = permits, - permits.op = "override", - ) - }); - resource_span - }; - - Self { - permits: AtomicUsize::new(permits << Self::PERMIT_SHIFT), - waiters: Mutex::new(Waitlist { - queue: LinkedList::new(), - closed: false, - }), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } - } - - /// Creates a new semaphore with the initial number of permits. - /// - /// Maximum number of permits on 32-bit platforms is `1<<29`. - #[cfg(not(all(loom, test)))] - pub(crate) const fn const_new(permits: usize) -> Self { - assert!(permits <= Self::MAX_PERMITS); - - Self { - permits: AtomicUsize::new(permits << Self::PERMIT_SHIFT), - waiters: Mutex::const_new(Waitlist { - queue: LinkedList::new(), - closed: false, - }), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span::none(), - } - } - - /// Creates a new closed semaphore with 0 permits. - pub(crate) fn new_closed() -> Self { - Self { - permits: AtomicUsize::new(Self::CLOSED), - waiters: Mutex::new(Waitlist { - queue: LinkedList::new(), - closed: true, - }), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span::none(), - } - } - - /// Creates a new closed semaphore with 0 permits. - #[cfg(not(all(loom, test)))] - pub(crate) const fn const_new_closed() -> Self { - Self { - permits: AtomicUsize::new(Self::CLOSED), - waiters: Mutex::const_new(Waitlist { - queue: LinkedList::new(), - closed: true, - }), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span::none(), - } - } - - /// Returns the current number of available permits. - pub(crate) fn available_permits(&self) -> usize { - self.permits.load(Acquire) >> Self::PERMIT_SHIFT - } - - /// Adds `added` new permits to the semaphore. - /// - /// The maximum number of permits is `usize::MAX >> 3`, and this function will panic if the limit is exceeded. - pub(crate) fn release(&self, added: usize) { - if added == 0 { - return; - } - - // Assign permits to the wait queue - self.add_permits_locked(added, self.waiters.lock()); - } - - /// Closes the semaphore. This prevents the semaphore from issuing new - /// permits and notifies all pending waiters. - pub(crate) fn close(&self) { - let mut waiters = self.waiters.lock(); - // If the semaphore's permits counter has enough permits for an - // unqueued waiter to acquire all the permits it needs immediately, - // it won't touch the wait list. Therefore, we have to set a bit on - // the permit counter as well. However, we must do this while - // holding the lock --- otherwise, if we set the bit and then wait - // to acquire the lock we'll enter an inconsistent state where the - // permit counter is closed, but the wait list is not. - self.permits.fetch_or(Self::CLOSED, Release); - waiters.closed = true; - while let Some(mut waiter) = waiters.queue.pop_back() { - let waker = unsafe { waiter.as_mut().waker.with_mut(|waker| (*waker).take()) }; - if let Some(waker) = waker { - waker.wake(); - } - } - } - - /// Returns true if the semaphore is closed. - pub(crate) fn is_closed(&self) -> bool { - self.permits.load(Acquire) & Self::CLOSED == Self::CLOSED - } - - pub(crate) fn try_acquire(&self, num_permits: u32) -> Result<(), TryAcquireError> { - assert!( - num_permits as usize <= Self::MAX_PERMITS, - "a semaphore may not have more than MAX_PERMITS permits ({})", - Self::MAX_PERMITS - ); - let num_permits = (num_permits as usize) << Self::PERMIT_SHIFT; - let mut curr = self.permits.load(Acquire); - loop { - // Has the semaphore closed? - if curr & Self::CLOSED == Self::CLOSED { - return Err(TryAcquireError::Closed); - } - - // Are there enough permits remaining? - if curr < num_permits { - return Err(TryAcquireError::NoPermits); - } - - let next = curr - num_permits; - - match self.permits.compare_exchange(curr, next, AcqRel, Acquire) { - Ok(_) => { - // TODO: Instrument once issue has been solved - return Ok(()); - } - Err(actual) => curr = actual, - } - } - } - - pub(crate) fn acquire(&self, num_permits: u32) -> Acquire<'_> { - Acquire::new(self, num_permits) - } - - /// Release `rem` permits to the semaphore's wait list, starting from the - /// end of the queue. - /// - /// If `rem` exceeds the number of permits needed by the wait list, the - /// remainder are assigned back to the semaphore. - fn add_permits_locked(&self, mut rem: usize, waiters: MutexGuard<'_, Waitlist>) { - let mut wakers = WakeList::new(); - let mut lock = Some(waiters); - let mut is_empty = false; - while rem > 0 { - let mut waiters = lock.take().unwrap_or_else(|| self.waiters.lock()); - 'inner: while wakers.can_push() { - // Was the waiter assigned enough permits to wake it? - match waiters.queue.last() { - Some(waiter) => { - if !waiter.assign_permits(&mut rem) { - break 'inner; - } - } - None => { - is_empty = true; - // If we assigned permits to all the waiters in the queue, and there are - // still permits left over, assign them back to the semaphore. - break 'inner; - } - }; - let mut waiter = waiters.queue.pop_back().unwrap(); - if let Some(waker) = - unsafe { waiter.as_mut().waker.with_mut(|waker| (*waker).take()) } - { - wakers.push(waker); - } - } - - if rem > 0 && is_empty { - let permits = rem; - assert!( - permits <= Self::MAX_PERMITS, - "cannot add more than MAX_PERMITS permits ({})", - Self::MAX_PERMITS - ); - let prev = self.permits.fetch_add(rem << Self::PERMIT_SHIFT, Release); - let prev = prev >> Self::PERMIT_SHIFT; - assert!( - prev + permits <= Self::MAX_PERMITS, - "number of added permits ({}) would overflow MAX_PERMITS ({})", - rem, - Self::MAX_PERMITS - ); - - // add remaining permits back - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - permits = rem, - permits.op = "add", - ) - }); - - rem = 0; - } - - drop(waiters); // release the lock - - wakers.wake_all(); - } - - assert_eq!(rem, 0); - } - - fn poll_acquire( - &self, - cx: &mut Context<'_>, - num_permits: u32, - node: Pin<&mut Waiter>, - queued: bool, - ) -> Poll> { - let mut acquired = 0; - - let needed = if queued { - node.state.load(Acquire) << Self::PERMIT_SHIFT - } else { - (num_permits as usize) << Self::PERMIT_SHIFT - }; - - let mut lock = None; - // First, try to take the requested number of permits from the - // semaphore. - let mut curr = self.permits.load(Acquire); - let mut waiters = loop { - // Has the semaphore closed? - if curr & Self::CLOSED > 0 { - return Poll::Ready(Err(AcquireError::closed())); - } - - let mut remaining = 0; - let total = curr - .checked_add(acquired) - .expect("number of permits must not overflow"); - let (next, acq) = if total >= needed { - let next = curr - (needed - acquired); - (next, needed >> Self::PERMIT_SHIFT) - } else { - remaining = (needed - acquired) - curr; - (0, curr >> Self::PERMIT_SHIFT) - }; - - if remaining > 0 && lock.is_none() { - // No permits were immediately available, so this permit will - // (probably) need to wait. We'll need to acquire a lock on the - // wait queue before continuing. We need to do this _before_ the - // CAS that sets the new value of the semaphore's `permits` - // counter. Otherwise, if we subtract the permits and then - // acquire the lock, we might miss additional permits being - // added while waiting for the lock. - lock = Some(self.waiters.lock()); - } - - match self.permits.compare_exchange(curr, next, AcqRel, Acquire) { - Ok(_) => { - acquired += acq; - if remaining == 0 { - if !queued { - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - permits = acquired, - permits.op = "sub", - ); - tracing::trace!( - target: "runtime::resource::async_op::state_update", - permits_obtained = acquired, - permits.op = "add", - ) - }); - - return Poll::Ready(Ok(())); - } else if lock.is_none() { - break self.waiters.lock(); - } - } - break lock.expect("lock must be acquired before waiting"); - } - Err(actual) => curr = actual, - } - }; - - if waiters.closed { - return Poll::Ready(Err(AcquireError::closed())); - } - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - permits = acquired, - permits.op = "sub", - ) - }); - - if node.assign_permits(&mut acquired) { - self.add_permits_locked(acquired, waiters); - return Poll::Ready(Ok(())); - } - - assert_eq!(acquired, 0); - let mut old_waker = None; - - // Otherwise, register the waker & enqueue the node. - node.waker.with_mut(|waker| { - // Safety: the wait list is locked, so we may modify the waker. - let waker = unsafe { &mut *waker }; - // Do we need to register the new waker? - if waker - .as_ref() - .map(|waker| !waker.will_wake(cx.waker())) - .unwrap_or(true) - { - old_waker = std::mem::replace(waker, Some(cx.waker().clone())); - } - }); - - // If the waiter is not already in the wait queue, enqueue it. - if !queued { - let node = unsafe { - let node = Pin::into_inner_unchecked(node) as *mut _; - NonNull::new_unchecked(node) - }; - - waiters.queue.push_front(node); - } - drop(waiters); - drop(old_waker); - - Poll::Pending - } -} - -impl fmt::Debug for Semaphore { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Semaphore") - .field("permits", &self.available_permits()) - .finish() - } -} - -impl Waiter { - fn new( - num_permits: u32, - #[cfg(all(tokio_unstable, feature = "tracing"))] ctx: trace::AsyncOpTracingCtx, - ) -> Self { - Waiter { - waker: UnsafeCell::new(None), - state: AtomicUsize::new(num_permits as usize), - pointers: linked_list::Pointers::new(), - #[cfg(all(tokio_unstable, feature = "tracing"))] - ctx, - _p: PhantomPinned, - } - } - - /// Assign permits to the waiter. - /// - /// Returns `true` if the waiter should be removed from the queue - fn assign_permits(&self, n: &mut usize) -> bool { - let mut curr = self.state.load(Acquire); - loop { - let assign = cmp::min(curr, *n); - let next = curr - assign; - match self.state.compare_exchange(curr, next, AcqRel, Acquire) { - Ok(_) => { - *n -= assign; - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.ctx.async_op_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::async_op::state_update", - permits_obtained = assign, - permits.op = "add", - ); - }); - return next == 0; - } - Err(actual) => curr = actual, - } - } - } -} - -impl Future for Acquire<'_> { - type Output = Result<(), AcquireError>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let _resource_span = self.node.ctx.resource_span.clone().entered(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let _async_op_span = self.node.ctx.async_op_span.clone().entered(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let _async_op_poll_span = self.node.ctx.async_op_poll_span.clone().entered(); - - let (node, semaphore, needed, queued) = self.project(); - - // First, ensure the current task has enough budget to proceed. - #[cfg(all(tokio_unstable, feature = "tracing"))] - let coop = ready!(trace_poll_op!( - "poll_acquire", - crate::runtime::coop::poll_proceed(cx), - )); - - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - - let result = match semaphore.poll_acquire(cx, needed, node, *queued) { - Poll::Pending => { - *queued = true; - Poll::Pending - } - Poll::Ready(r) => { - coop.made_progress(); - r?; - *queued = false; - Poll::Ready(Ok(())) - } - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - return trace_poll_op!("poll_acquire", result); - - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - return result; - } -} - -impl<'a> Acquire<'a> { - fn new(semaphore: &'a Semaphore, num_permits: u32) -> Self { - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - return Self { - node: Waiter::new(num_permits), - semaphore, - num_permits, - queued: false, - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - return semaphore.resource_span.in_scope(|| { - let async_op_span = - tracing::trace_span!("runtime.resource.async_op", source = "Acquire::new"); - let async_op_poll_span = async_op_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::async_op::state_update", - permits_requested = num_permits, - permits.op = "override", - ); - - tracing::trace!( - target: "runtime::resource::async_op::state_update", - permits_obtained = 0usize, - permits.op = "override", - ); - - tracing::trace_span!("runtime.resource.async_op.poll") - }); - - let ctx = trace::AsyncOpTracingCtx { - async_op_span, - async_op_poll_span, - resource_span: semaphore.resource_span.clone(), - }; - - Self { - node: Waiter::new(num_permits, ctx), - semaphore, - num_permits, - queued: false, - } - }); - } - - fn project(self: Pin<&mut Self>) -> (Pin<&mut Waiter>, &Semaphore, u32, &mut bool) { - fn is_unpin() {} - unsafe { - // Safety: all fields other than `node` are `Unpin` - - is_unpin::<&Semaphore>(); - is_unpin::<&mut bool>(); - is_unpin::(); - - let this = self.get_unchecked_mut(); - ( - Pin::new_unchecked(&mut this.node), - this.semaphore, - this.num_permits, - &mut this.queued, - ) - } - } -} - -impl Drop for Acquire<'_> { - fn drop(&mut self) { - // If the future is completed, there is no node in the wait list, so we - // can skip acquiring the lock. - if !self.queued { - return; - } - - // This is where we ensure safety. The future is being dropped, - // which means we must ensure that the waiter entry is no longer stored - // in the linked list. - let mut waiters = self.semaphore.waiters.lock(); - - // remove the entry from the list - let node = NonNull::from(&mut self.node); - // Safety: we have locked the wait list. - unsafe { waiters.queue.remove(node) }; - - let acquired_permits = self.num_permits as usize - self.node.state.load(Acquire); - if acquired_permits > 0 { - self.semaphore.add_permits_locked(acquired_permits, waiters); - } - } -} - -// Safety: the `Acquire` future is not `Sync` automatically because it contains -// a `Waiter`, which, in turn, contains an `UnsafeCell`. However, the -// `UnsafeCell` is only accessed when the future is borrowed mutably (either in -// `poll` or in `drop`). Therefore, it is safe (although not particularly -// _useful_) for the future to be borrowed immutably across threads. -unsafe impl Sync for Acquire<'_> {} - -// ===== impl AcquireError ==== - -impl AcquireError { - fn closed() -> AcquireError { - AcquireError(()) - } -} - -impl fmt::Display for AcquireError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "semaphore closed") - } -} - -impl std::error::Error for AcquireError {} - -// ===== impl TryAcquireError ===== - -impl TryAcquireError { - /// Returns `true` if the error was caused by a closed semaphore. - #[allow(dead_code)] // may be used later! - pub(crate) fn is_closed(&self) -> bool { - matches!(self, TryAcquireError::Closed) - } - - /// Returns `true` if the error was caused by calling `try_acquire` on a - /// semaphore with no available permits. - #[allow(dead_code)] // may be used later! - pub(crate) fn is_no_permits(&self) -> bool { - matches!(self, TryAcquireError::NoPermits) - } -} - -impl fmt::Display for TryAcquireError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TryAcquireError::Closed => write!(fmt, "semaphore closed"), - TryAcquireError::NoPermits => write!(fmt, "no permits available"), - } - } -} - -impl std::error::Error for TryAcquireError {} - -/// # Safety -/// -/// `Waiter` is forced to be !Unpin. -unsafe impl linked_list::Link for Waiter { - type Handle = NonNull; - type Target = Waiter; - - fn as_raw(handle: &Self::Handle) -> NonNull { - *handle - } - - unsafe fn from_raw(ptr: NonNull) -> NonNull { - ptr - } - - unsafe fn pointers(target: NonNull) -> NonNull> { - Waiter::addr_of_pointers(target) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/broadcast.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/broadcast.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/broadcast.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/broadcast.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1517 +0,0 @@ -//! A multi-producer, multi-consumer broadcast queue. Each sent value is seen by -//! all consumers. -//! -//! A [`Sender`] is used to broadcast values to **all** connected [`Receiver`] -//! values. [`Sender`] handles are clone-able, allowing concurrent send and -//! receive actions. [`Sender`] and [`Receiver`] are both `Send` and `Sync` as -//! long as `T` is `Send`. -//! -//! When a value is sent, **all** [`Receiver`] handles are notified and will -//! receive the value. The value is stored once inside the channel and cloned on -//! demand for each receiver. Once all receivers have received a clone of the -//! value, the value is released from the channel. -//! -//! A channel is created by calling [`channel`], specifying the maximum number -//! of messages the channel can retain at any given time. -//! -//! New [`Receiver`] handles are created by calling [`Sender::subscribe`]. The -//! returned [`Receiver`] will receive values sent **after** the call to -//! `subscribe`. -//! -//! This channel is also suitable for the single-producer multi-consumer -//! use-case, where a single sender broadcasts values to many receivers. -//! -//! ## Lagging -//! -//! As sent messages must be retained until **all** [`Receiver`] handles receive -//! a clone, broadcast channels are susceptible to the "slow receiver" problem. -//! In this case, all but one receiver are able to receive values at the rate -//! they are sent. Because one receiver is stalled, the channel starts to fill -//! up. -//! -//! This broadcast channel implementation handles this case by setting a hard -//! upper bound on the number of values the channel may retain at any given -//! time. This upper bound is passed to the [`channel`] function as an argument. -//! -//! If a value is sent when the channel is at capacity, the oldest value -//! currently held by the channel is released. This frees up space for the new -//! value. Any receiver that has not yet seen the released value will return -//! [`RecvError::Lagged`] the next time [`recv`] is called. -//! -//! Once [`RecvError::Lagged`] is returned, the lagging receiver's position is -//! updated to the oldest value contained by the channel. The next call to -//! [`recv`] will return this value. -//! -//! This behavior enables a receiver to detect when it has lagged so far behind -//! that data has been dropped. The caller may decide how to respond to this: -//! either by aborting its task or by tolerating lost messages and resuming -//! consumption of the channel. -//! -//! ## Closing -//! -//! When **all** [`Sender`] handles have been dropped, no new values may be -//! sent. At this point, the channel is "closed". Once a receiver has received -//! all values retained by the channel, the next call to [`recv`] will return -//! with [`RecvError::Closed`]. -//! -//! When a [`Receiver`] handle is dropped, any messages not read by the receiver -//! will be marked as read. If this receiver was the only one not to have read -//! that message, the message will be dropped at this point. -//! -//! [`Sender`]: crate::sync::broadcast::Sender -//! [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe -//! [`Receiver`]: crate::sync::broadcast::Receiver -//! [`channel`]: crate::sync::broadcast::channel -//! [`RecvError::Lagged`]: crate::sync::broadcast::error::RecvError::Lagged -//! [`RecvError::Closed`]: crate::sync::broadcast::error::RecvError::Closed -//! [`recv`]: crate::sync::broadcast::Receiver::recv -//! -//! # Examples -//! -//! Basic usage -//! -//! ``` -//! use tokio::sync::broadcast; -//! -//! #[tokio::main] -//! async fn main() { -//! let (tx, mut rx1) = broadcast::channel(16); -//! let mut rx2 = tx.subscribe(); -//! -//! tokio::spawn(async move { -//! assert_eq!(rx1.recv().await.unwrap(), 10); -//! assert_eq!(rx1.recv().await.unwrap(), 20); -//! }); -//! -//! tokio::spawn(async move { -//! assert_eq!(rx2.recv().await.unwrap(), 10); -//! assert_eq!(rx2.recv().await.unwrap(), 20); -//! }); -//! -//! tx.send(10).unwrap(); -//! tx.send(20).unwrap(); -//! } -//! ``` -//! -//! Handling lag -//! -//! ``` -//! use tokio::sync::broadcast; -//! -//! #[tokio::main] -//! async fn main() { -//! let (tx, mut rx) = broadcast::channel(2); -//! -//! tx.send(10).unwrap(); -//! tx.send(20).unwrap(); -//! tx.send(30).unwrap(); -//! -//! // The receiver lagged behind -//! assert!(rx.recv().await.is_err()); -//! -//! // At this point, we can abort or continue with lost messages -//! -//! assert_eq!(20, rx.recv().await.unwrap()); -//! assert_eq!(30, rx.recv().await.unwrap()); -//! } -//! ``` - -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard}; -use crate::util::linked_list::{self, GuardedLinkedList, LinkedList}; -use crate::util::WakeList; - -use std::fmt; -use std::future::Future; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::ptr::NonNull; -use std::sync::atomic::Ordering::SeqCst; -use std::task::{Context, Poll, Waker}; -use std::usize; - -/// Sending-half of the [`broadcast`] channel. -/// -/// May be used from many threads. Messages can be sent with -/// [`send`][Sender::send]. -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::broadcast; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, mut rx1) = broadcast::channel(16); -/// let mut rx2 = tx.subscribe(); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx1.recv().await.unwrap(), 10); -/// assert_eq!(rx1.recv().await.unwrap(), 20); -/// }); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx2.recv().await.unwrap(), 10); -/// assert_eq!(rx2.recv().await.unwrap(), 20); -/// }); -/// -/// tx.send(10).unwrap(); -/// tx.send(20).unwrap(); -/// } -/// ``` -/// -/// [`broadcast`]: crate::sync::broadcast -pub struct Sender { - shared: Arc>, -} - -/// Receiving-half of the [`broadcast`] channel. -/// -/// Must not be used concurrently. Messages may be retrieved using -/// [`recv`][Receiver::recv]. -/// -/// To turn this receiver into a `Stream`, you can use the [`BroadcastStream`] -/// wrapper. -/// -/// [`BroadcastStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.BroadcastStream.html -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::broadcast; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, mut rx1) = broadcast::channel(16); -/// let mut rx2 = tx.subscribe(); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx1.recv().await.unwrap(), 10); -/// assert_eq!(rx1.recv().await.unwrap(), 20); -/// }); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx2.recv().await.unwrap(), 10); -/// assert_eq!(rx2.recv().await.unwrap(), 20); -/// }); -/// -/// tx.send(10).unwrap(); -/// tx.send(20).unwrap(); -/// } -/// ``` -/// -/// [`broadcast`]: crate::sync::broadcast -pub struct Receiver { - /// State shared with all receivers and senders. - shared: Arc>, - - /// Next position to read from - next: u64, -} - -pub mod error { - //! Broadcast error types - - use std::fmt; - - /// Error returned by from the [`send`] function on a [`Sender`]. - /// - /// A **send** operation can only fail if there are no active receivers, - /// implying that the message could never be received. The error contains the - /// message being sent as a payload so it can be recovered. - /// - /// [`send`]: crate::sync::broadcast::Sender::send - /// [`Sender`]: crate::sync::broadcast::Sender - #[derive(Debug)] - pub struct SendError(pub T); - - impl fmt::Display for SendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "channel closed") - } - } - - impl std::error::Error for SendError {} - - /// An error returned from the [`recv`] function on a [`Receiver`]. - /// - /// [`recv`]: crate::sync::broadcast::Receiver::recv - /// [`Receiver`]: crate::sync::broadcast::Receiver - #[derive(Debug, PartialEq, Eq, Clone)] - pub enum RecvError { - /// There are no more active senders implying no further messages will ever - /// be sent. - Closed, - - /// The receiver lagged too far behind. Attempting to receive again will - /// return the oldest message still retained by the channel. - /// - /// Includes the number of skipped messages. - Lagged(u64), - } - - impl fmt::Display for RecvError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - RecvError::Closed => write!(f, "channel closed"), - RecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt), - } - } - } - - impl std::error::Error for RecvError {} - - /// An error returned from the [`try_recv`] function on a [`Receiver`]. - /// - /// [`try_recv`]: crate::sync::broadcast::Receiver::try_recv - /// [`Receiver`]: crate::sync::broadcast::Receiver - #[derive(Debug, PartialEq, Eq, Clone)] - pub enum TryRecvError { - /// The channel is currently empty. There are still active - /// [`Sender`] handles, so data may yet become available. - /// - /// [`Sender`]: crate::sync::broadcast::Sender - Empty, - - /// There are no more active senders implying no further messages will ever - /// be sent. - Closed, - - /// The receiver lagged too far behind and has been forcibly disconnected. - /// Attempting to receive again will return the oldest message still - /// retained by the channel. - /// - /// Includes the number of skipped messages. - Lagged(u64), - } - - impl fmt::Display for TryRecvError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TryRecvError::Empty => write!(f, "channel empty"), - TryRecvError::Closed => write!(f, "channel closed"), - TryRecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt), - } - } - } - - impl std::error::Error for TryRecvError {} -} - -use self::error::*; - -/// Data shared between senders and receivers. -struct Shared { - /// slots in the channel. - buffer: Box<[RwLock>]>, - - /// Mask a position -> index. - mask: usize, - - /// Tail of the queue. Includes the rx wait list. - tail: Mutex, - - /// Number of outstanding Sender handles. - num_tx: AtomicUsize, -} - -/// Next position to write a value. -struct Tail { - /// Next position to write to. - pos: u64, - - /// Number of active receivers. - rx_cnt: usize, - - /// True if the channel is closed. - closed: bool, - - /// Receivers waiting for a value. - waiters: LinkedList::Target>, -} - -/// Slot in the buffer. -struct Slot { - /// Remaining number of receivers that are expected to see this value. - /// - /// When this goes to zero, the value is released. - /// - /// An atomic is used as it is mutated concurrently with the slot read lock - /// acquired. - rem: AtomicUsize, - - /// Uniquely identifies the `send` stored in the slot. - pos: u64, - - /// The value being broadcast. - /// - /// The value is set by `send` when the write lock is held. When a reader - /// drops, `rem` is decremented. When it hits zero, the value is dropped. - val: UnsafeCell>, -} - -/// An entry in the wait queue. -struct Waiter { - /// True if queued. - queued: bool, - - /// Task waiting on the broadcast channel. - waker: Option, - - /// Intrusive linked-list pointers. - pointers: linked_list::Pointers, - - /// Should not be `Unpin`. - _p: PhantomPinned, -} - -impl Waiter { - fn new() -> Self { - Self { - queued: false, - waker: None, - pointers: linked_list::Pointers::new(), - _p: PhantomPinned, - } - } -} - -generate_addr_of_methods! { - impl<> Waiter { - unsafe fn addr_of_pointers(self: NonNull) -> NonNull> { - &self.pointers - } - } -} - -struct RecvGuard<'a, T> { - slot: RwLockReadGuard<'a, Slot>, -} - -/// Receive a value future. -struct Recv<'a, T> { - /// Receiver being waited on. - receiver: &'a mut Receiver, - - /// Entry in the waiter `LinkedList`. - waiter: UnsafeCell, -} - -unsafe impl<'a, T: Send> Send for Recv<'a, T> {} -unsafe impl<'a, T: Send> Sync for Recv<'a, T> {} - -/// Max number of receivers. Reserve space to lock. -const MAX_RECEIVERS: usize = usize::MAX >> 2; - -/// Create a bounded, multi-producer, multi-consumer channel where each sent -/// value is broadcasted to all active receivers. -/// -/// **Note:** The actual capacity may be greater than the provided `capacity`. -/// -/// All data sent on [`Sender`] will become available on every active -/// [`Receiver`] in the same order as it was sent. -/// -/// The `Sender` can be cloned to `send` to the same channel from multiple -/// points in the process or it can be used concurrently from an `Arc`. New -/// `Receiver` handles are created by calling [`Sender::subscribe`]. -/// -/// If all [`Receiver`] handles are dropped, the `send` method will return a -/// [`SendError`]. Similarly, if all [`Sender`] handles are dropped, the [`recv`] -/// method will return a [`RecvError`]. -/// -/// [`Sender`]: crate::sync::broadcast::Sender -/// [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe -/// [`Receiver`]: crate::sync::broadcast::Receiver -/// [`recv`]: crate::sync::broadcast::Receiver::recv -/// [`SendError`]: crate::sync::broadcast::error::SendError -/// [`RecvError`]: crate::sync::broadcast::error::RecvError -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::broadcast; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, mut rx1) = broadcast::channel(16); -/// let mut rx2 = tx.subscribe(); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx1.recv().await.unwrap(), 10); -/// assert_eq!(rx1.recv().await.unwrap(), 20); -/// }); -/// -/// tokio::spawn(async move { -/// assert_eq!(rx2.recv().await.unwrap(), 10); -/// assert_eq!(rx2.recv().await.unwrap(), 20); -/// }); -/// -/// tx.send(10).unwrap(); -/// tx.send(20).unwrap(); -/// } -/// ``` -/// -/// # Panics -/// -/// This will panic if `capacity` is equal to `0` or larger -/// than `usize::MAX / 2`. -#[track_caller] -pub fn channel(capacity: usize) -> (Sender, Receiver) { - // SAFETY: In the line below we are creating one extra receiver, so there will be 1 in total. - let tx = unsafe { Sender::new_with_receiver_count(1, capacity) }; - let rx = Receiver { - shared: tx.shared.clone(), - next: 0, - }; - (tx, rx) -} - -unsafe impl Send for Sender {} -unsafe impl Sync for Sender {} - -unsafe impl Send for Receiver {} -unsafe impl Sync for Receiver {} - -impl Sender { - /// Creates the sending-half of the [`broadcast`] channel. - /// - /// See the documentation of [`broadcast::channel`] for more information on this method. - /// - /// [`broadcast`]: crate::sync::broadcast - /// [`broadcast::channel`]: crate::sync::broadcast - #[track_caller] - pub fn new(capacity: usize) -> Self { - // SAFETY: We don't create extra receivers, so there are 0. - unsafe { Self::new_with_receiver_count(0, capacity) } - } - - /// Creates the sending-half of the [`broadcast`](self) channel, and provide the receiver - /// count. - /// - /// See the documentation of [`broadcast::channel`](self::channel) for more errors when - /// calling this function. - /// - /// # Safety: - /// - /// The caller must ensure that the amount of receivers for this Sender is correct before - /// the channel functionalities are used, the count is zero by default, as this function - /// does not create any receivers by itself. - #[track_caller] - unsafe fn new_with_receiver_count(receiver_count: usize, mut capacity: usize) -> Self { - assert!(capacity > 0, "broadcast channel capacity cannot be zero"); - assert!( - capacity <= usize::MAX >> 1, - "broadcast channel capacity exceeded `usize::MAX / 2`" - ); - - // Round to a power of two - capacity = capacity.next_power_of_two(); - - let mut buffer = Vec::with_capacity(capacity); - - for i in 0..capacity { - buffer.push(RwLock::new(Slot { - rem: AtomicUsize::new(0), - pos: (i as u64).wrapping_sub(capacity as u64), - val: UnsafeCell::new(None), - })); - } - - let shared = Arc::new(Shared { - buffer: buffer.into_boxed_slice(), - mask: capacity - 1, - tail: Mutex::new(Tail { - pos: 0, - rx_cnt: receiver_count, - closed: false, - waiters: LinkedList::new(), - }), - num_tx: AtomicUsize::new(1), - }); - - Sender { shared } - } - - /// Attempts to send a value to all active [`Receiver`] handles, returning - /// it back if it could not be sent. - /// - /// A successful send occurs when there is at least one active [`Receiver`] - /// handle. An unsuccessful send would be one where all associated - /// [`Receiver`] handles have already been dropped. - /// - /// # Return - /// - /// On success, the number of subscribed [`Receiver`] handles is returned. - /// This does not mean that this number of receivers will see the message as - /// a receiver may drop or lag ([see lagging](self#lagging)) before receiving - /// the message. - /// - /// # Note - /// - /// A return value of `Ok` **does not** mean that the sent value will be - /// observed by all or any of the active [`Receiver`] handles. [`Receiver`] - /// handles may be dropped before receiving the sent message. - /// - /// A return value of `Err` **does not** mean that future calls to `send` - /// will fail. New [`Receiver`] handles may be created by calling - /// [`subscribe`]. - /// - /// [`Receiver`]: crate::sync::broadcast::Receiver - /// [`subscribe`]: crate::sync::broadcast::Sender::subscribe - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx1) = broadcast::channel(16); - /// let mut rx2 = tx.subscribe(); - /// - /// tokio::spawn(async move { - /// assert_eq!(rx1.recv().await.unwrap(), 10); - /// assert_eq!(rx1.recv().await.unwrap(), 20); - /// }); - /// - /// tokio::spawn(async move { - /// assert_eq!(rx2.recv().await.unwrap(), 10); - /// assert_eq!(rx2.recv().await.unwrap(), 20); - /// }); - /// - /// tx.send(10).unwrap(); - /// tx.send(20).unwrap(); - /// } - /// ``` - pub fn send(&self, value: T) -> Result> { - let mut tail = self.shared.tail.lock(); - - if tail.rx_cnt == 0 { - return Err(SendError(value)); - } - - // Position to write into - let pos = tail.pos; - let rem = tail.rx_cnt; - let idx = (pos & self.shared.mask as u64) as usize; - - // Update the tail position - tail.pos = tail.pos.wrapping_add(1); - - // Get the slot - let mut slot = self.shared.buffer[idx].write().unwrap(); - - // Track the position - slot.pos = pos; - - // Set remaining receivers - slot.rem.with_mut(|v| *v = rem); - - // Write the value - slot.val = UnsafeCell::new(Some(value)); - - // Release the slot lock before notifying the receivers. - drop(slot); - - // Notify and release the mutex. This must happen after the slot lock is - // released, otherwise the writer lock bit could be cleared while another - // thread is in the critical section. - self.shared.notify_rx(tail); - - Ok(rem) - } - - /// Creates a new [`Receiver`] handle that will receive values sent **after** - /// this call to `subscribe`. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, _rx) = broadcast::channel(16); - /// - /// // Will not be seen - /// tx.send(10).unwrap(); - /// - /// let mut rx = tx.subscribe(); - /// - /// tx.send(20).unwrap(); - /// - /// let value = rx.recv().await.unwrap(); - /// assert_eq!(20, value); - /// } - /// ``` - pub fn subscribe(&self) -> Receiver { - let shared = self.shared.clone(); - new_receiver(shared) - } - - /// Returns the number of queued values. - /// - /// A value is queued until it has either been seen by all receivers that were alive at the time - /// it was sent, or has been evicted from the queue by subsequent sends that exceeded the - /// queue's capacity. - /// - /// # Note - /// - /// In contrast to [`Receiver::len`], this method only reports queued values and not values that - /// have been evicted from the queue before being seen by all receivers. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx1) = broadcast::channel(16); - /// let mut rx2 = tx.subscribe(); - /// - /// tx.send(10).unwrap(); - /// tx.send(20).unwrap(); - /// tx.send(30).unwrap(); - /// - /// assert_eq!(tx.len(), 3); - /// - /// rx1.recv().await.unwrap(); - /// - /// // The len is still 3 since rx2 hasn't seen the first value yet. - /// assert_eq!(tx.len(), 3); - /// - /// rx2.recv().await.unwrap(); - /// - /// assert_eq!(tx.len(), 2); - /// } - /// ``` - pub fn len(&self) -> usize { - let tail = self.shared.tail.lock(); - - let base_idx = (tail.pos & self.shared.mask as u64) as usize; - let mut low = 0; - let mut high = self.shared.buffer.len(); - while low < high { - let mid = low + (high - low) / 2; - let idx = base_idx.wrapping_add(mid) & self.shared.mask; - if self.shared.buffer[idx].read().unwrap().rem.load(SeqCst) == 0 { - low = mid + 1; - } else { - high = mid; - } - } - - self.shared.buffer.len() - low - } - - /// Returns true if there are no queued values. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx1) = broadcast::channel(16); - /// let mut rx2 = tx.subscribe(); - /// - /// assert!(tx.is_empty()); - /// - /// tx.send(10).unwrap(); - /// - /// assert!(!tx.is_empty()); - /// - /// rx1.recv().await.unwrap(); - /// - /// // The queue is still not empty since rx2 hasn't seen the value. - /// assert!(!tx.is_empty()); - /// - /// rx2.recv().await.unwrap(); - /// - /// assert!(tx.is_empty()); - /// } - /// ``` - pub fn is_empty(&self) -> bool { - let tail = self.shared.tail.lock(); - - let idx = (tail.pos.wrapping_sub(1) & self.shared.mask as u64) as usize; - self.shared.buffer[idx].read().unwrap().rem.load(SeqCst) == 0 - } - - /// Returns the number of active receivers - /// - /// An active receiver is a [`Receiver`] handle returned from [`channel`] or - /// [`subscribe`]. These are the handles that will receive values sent on - /// this [`Sender`]. - /// - /// # Note - /// - /// It is not guaranteed that a sent message will reach this number of - /// receivers. Active receivers may never call [`recv`] again before - /// dropping. - /// - /// [`recv`]: crate::sync::broadcast::Receiver::recv - /// [`Receiver`]: crate::sync::broadcast::Receiver - /// [`Sender`]: crate::sync::broadcast::Sender - /// [`subscribe`]: crate::sync::broadcast::Sender::subscribe - /// [`channel`]: crate::sync::broadcast::channel - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, _rx1) = broadcast::channel(16); - /// - /// assert_eq!(1, tx.receiver_count()); - /// - /// let mut _rx2 = tx.subscribe(); - /// - /// assert_eq!(2, tx.receiver_count()); - /// - /// tx.send(10).unwrap(); - /// } - /// ``` - pub fn receiver_count(&self) -> usize { - let tail = self.shared.tail.lock(); - tail.rx_cnt - } - - /// Returns `true` if senders belong to the same channel. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, _rx) = broadcast::channel::<()>(16); - /// let tx2 = tx.clone(); - /// - /// assert!(tx.same_channel(&tx2)); - /// - /// let (tx3, _rx3) = broadcast::channel::<()>(16); - /// - /// assert!(!tx3.same_channel(&tx2)); - /// } - /// ``` - pub fn same_channel(&self, other: &Self) -> bool { - Arc::ptr_eq(&self.shared, &other.shared) - } - - fn close_channel(&self) { - let mut tail = self.shared.tail.lock(); - tail.closed = true; - - self.shared.notify_rx(tail); - } -} - -/// Create a new `Receiver` which reads starting from the tail. -fn new_receiver(shared: Arc>) -> Receiver { - let mut tail = shared.tail.lock(); - - if tail.rx_cnt == MAX_RECEIVERS { - panic!("max receivers"); - } - - tail.rx_cnt = tail.rx_cnt.checked_add(1).expect("overflow"); - - let next = tail.pos; - - drop(tail); - - Receiver { shared, next } -} - -/// List used in `Shared::notify_rx`. It wraps a guarded linked list -/// and gates the access to it on the `Shared.tail` mutex. It also empties -/// the list on drop. -struct WaitersList<'a, T> { - list: GuardedLinkedList::Target>, - is_empty: bool, - shared: &'a Shared, -} - -impl<'a, T> Drop for WaitersList<'a, T> { - fn drop(&mut self) { - // If the list is not empty, we unlink all waiters from it. - // We do not wake the waiters to avoid double panics. - if !self.is_empty { - let _lock_guard = self.shared.tail.lock(); - while self.list.pop_back().is_some() {} - } - } -} - -impl<'a, T> WaitersList<'a, T> { - fn new( - unguarded_list: LinkedList::Target>, - guard: Pin<&'a Waiter>, - shared: &'a Shared, - ) -> Self { - let guard_ptr = NonNull::from(guard.get_ref()); - let list = unguarded_list.into_guarded(guard_ptr); - WaitersList { - list, - is_empty: false, - shared, - } - } - - /// Removes the last element from the guarded list. Modifying this list - /// requires an exclusive access to the main list in `Notify`. - fn pop_back_locked(&mut self, _tail: &mut Tail) -> Option> { - let result = self.list.pop_back(); - if result.is_none() { - // Save information about emptiness to avoid waiting for lock - // in the destructor. - self.is_empty = true; - } - result - } -} - -impl Shared { - fn notify_rx<'a, 'b: 'a>(&'b self, mut tail: MutexGuard<'a, Tail>) { - // It is critical for `GuardedLinkedList` safety that the guard node is - // pinned in memory and is not dropped until the guarded list is dropped. - let guard = Waiter::new(); - pin!(guard); - - // We move all waiters to a secondary list. It uses a `GuardedLinkedList` - // underneath to allow every waiter to safely remove itself from it. - // - // * This list will be still guarded by the `waiters` lock. - // `NotifyWaitersList` wrapper makes sure we hold the lock to modify it. - // * This wrapper will empty the list on drop. It is critical for safety - // that we will not leave any list entry with a pointer to the local - // guard node after this function returns / panics. - let mut list = WaitersList::new(std::mem::take(&mut tail.waiters), guard.as_ref(), self); - - let mut wakers = WakeList::new(); - 'outer: loop { - while wakers.can_push() { - match list.pop_back_locked(&mut tail) { - Some(mut waiter) => { - // Safety: `tail` lock is still held. - let waiter = unsafe { waiter.as_mut() }; - - assert!(waiter.queued); - waiter.queued = false; - - if let Some(waker) = waiter.waker.take() { - wakers.push(waker); - } - } - None => { - break 'outer; - } - } - } - - // Release the lock before waking. - drop(tail); - - // Before we acquire the lock again all sorts of things can happen: - // some waiters may remove themselves from the list and new waiters - // may be added. This is fine since at worst we will unnecessarily - // wake up waiters which will then queue themselves again. - - wakers.wake_all(); - - // Acquire the lock again. - tail = self.tail.lock(); - } - - // Release the lock before waking. - drop(tail); - - wakers.wake_all(); - } -} - -impl Clone for Sender { - fn clone(&self) -> Sender { - let shared = self.shared.clone(); - shared.num_tx.fetch_add(1, SeqCst); - - Sender { shared } - } -} - -impl Drop for Sender { - fn drop(&mut self) { - if 1 == self.shared.num_tx.fetch_sub(1, SeqCst) { - self.close_channel(); - } - } -} - -impl Receiver { - /// Returns the number of messages that were sent into the channel and that - /// this [`Receiver`] has yet to receive. - /// - /// If the returned value from `len` is larger than the next largest power of 2 - /// of the capacity of the channel any call to [`recv`] will return an - /// `Err(RecvError::Lagged)` and any call to [`try_recv`] will return an - /// `Err(TryRecvError::Lagged)`, e.g. if the capacity of the channel is 10, - /// [`recv`] will start to return `Err(RecvError::Lagged)` once `len` returns - /// values larger than 16. - /// - /// [`Receiver`]: crate::sync::broadcast::Receiver - /// [`recv`]: crate::sync::broadcast::Receiver::recv - /// [`try_recv`]: crate::sync::broadcast::Receiver::try_recv - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx1) = broadcast::channel(16); - /// - /// tx.send(10).unwrap(); - /// tx.send(20).unwrap(); - /// - /// assert_eq!(rx1.len(), 2); - /// assert_eq!(rx1.recv().await.unwrap(), 10); - /// assert_eq!(rx1.len(), 1); - /// assert_eq!(rx1.recv().await.unwrap(), 20); - /// assert_eq!(rx1.len(), 0); - /// } - /// ``` - pub fn len(&self) -> usize { - let next_send_pos = self.shared.tail.lock().pos; - (next_send_pos - self.next) as usize - } - - /// Returns true if there aren't any messages in the channel that the [`Receiver`] - /// has yet to receive. - /// - /// [`Receiver]: create::sync::broadcast::Receiver - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx1) = broadcast::channel(16); - /// - /// assert!(rx1.is_empty()); - /// - /// tx.send(10).unwrap(); - /// tx.send(20).unwrap(); - /// - /// assert!(!rx1.is_empty()); - /// assert_eq!(rx1.recv().await.unwrap(), 10); - /// assert_eq!(rx1.recv().await.unwrap(), 20); - /// assert!(rx1.is_empty()); - /// } - /// ``` - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns `true` if receivers belong to the same channel. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, rx) = broadcast::channel::<()>(16); - /// let rx2 = tx.subscribe(); - /// - /// assert!(rx.same_channel(&rx2)); - /// - /// let (_tx3, rx3) = broadcast::channel::<()>(16); - /// - /// assert!(!rx3.same_channel(&rx2)); - /// } - /// ``` - pub fn same_channel(&self, other: &Self) -> bool { - Arc::ptr_eq(&self.shared, &other.shared) - } - - /// Locks the next value if there is one. - fn recv_ref( - &mut self, - waiter: Option<(&UnsafeCell, &Waker)>, - ) -> Result, TryRecvError> { - let idx = (self.next & self.shared.mask as u64) as usize; - - // The slot holding the next value to read - let mut slot = self.shared.buffer[idx].read().unwrap(); - - if slot.pos != self.next { - // Release the `slot` lock before attempting to acquire the `tail` - // lock. This is required because `send2` acquires the tail lock - // first followed by the slot lock. Acquiring the locks in reverse - // order here would result in a potential deadlock: `recv_ref` - // acquires the `slot` lock and attempts to acquire the `tail` lock - // while `send2` acquired the `tail` lock and attempts to acquire - // the slot lock. - drop(slot); - - let mut old_waker = None; - - let mut tail = self.shared.tail.lock(); - - // Acquire slot lock again - slot = self.shared.buffer[idx].read().unwrap(); - - // Make sure the position did not change. This could happen in the - // unlikely event that the buffer is wrapped between dropping the - // read lock and acquiring the tail lock. - if slot.pos != self.next { - let next_pos = slot.pos.wrapping_add(self.shared.buffer.len() as u64); - - if next_pos == self.next { - // At this point the channel is empty for *this* receiver. If - // it's been closed, then that's what we return, otherwise we - // set a waker and return empty. - if tail.closed { - return Err(TryRecvError::Closed); - } - - // Store the waker - if let Some((waiter, waker)) = waiter { - // Safety: called while locked. - unsafe { - // Only queue if not already queued - waiter.with_mut(|ptr| { - // If there is no waker **or** if the currently - // stored waker references a **different** task, - // track the tasks' waker to be notified on - // receipt of a new value. - match (*ptr).waker { - Some(ref w) if w.will_wake(waker) => {} - _ => { - old_waker = std::mem::replace( - &mut (*ptr).waker, - Some(waker.clone()), - ); - } - } - - if !(*ptr).queued { - (*ptr).queued = true; - tail.waiters.push_front(NonNull::new_unchecked(&mut *ptr)); - } - }); - } - } - - // Drop the old waker after releasing the locks. - drop(slot); - drop(tail); - drop(old_waker); - - return Err(TryRecvError::Empty); - } - - // At this point, the receiver has lagged behind the sender by - // more than the channel capacity. The receiver will attempt to - // catch up by skipping dropped messages and setting the - // internal cursor to the **oldest** message stored by the - // channel. - let next = tail.pos.wrapping_sub(self.shared.buffer.len() as u64); - - let missed = next.wrapping_sub(self.next); - - drop(tail); - - // The receiver is slow but no values have been missed - if missed == 0 { - self.next = self.next.wrapping_add(1); - - return Ok(RecvGuard { slot }); - } - - self.next = next; - - return Err(TryRecvError::Lagged(missed)); - } - } - - self.next = self.next.wrapping_add(1); - - Ok(RecvGuard { slot }) - } -} - -impl Receiver { - /// Re-subscribes to the channel starting from the current tail element. - /// - /// This [`Receiver`] handle will receive a clone of all values sent - /// **after** it has resubscribed. This will not include elements that are - /// in the queue of the current receiver. Consider the following example. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = broadcast::channel(2); - /// - /// tx.send(1).unwrap(); - /// let mut rx2 = rx.resubscribe(); - /// tx.send(2).unwrap(); - /// - /// assert_eq!(rx2.recv().await.unwrap(), 2); - /// assert_eq!(rx.recv().await.unwrap(), 1); - /// } - /// ``` - pub fn resubscribe(&self) -> Self { - let shared = self.shared.clone(); - new_receiver(shared) - } - /// Receives the next value for this receiver. - /// - /// Each [`Receiver`] handle will receive a clone of all values sent - /// **after** it has subscribed. - /// - /// `Err(RecvError::Closed)` is returned when all `Sender` halves have - /// dropped, indicating that no further values can be sent on the channel. - /// - /// If the [`Receiver`] handle falls behind, once the channel is full, newly - /// sent values will overwrite old values. At this point, a call to [`recv`] - /// will return with `Err(RecvError::Lagged)` and the [`Receiver`]'s - /// internal cursor is updated to point to the oldest value still held by - /// the channel. A subsequent call to [`recv`] will return this value - /// **unless** it has been since overwritten. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `recv` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, it is guaranteed that no messages were received on this - /// channel. - /// - /// [`Receiver`]: crate::sync::broadcast::Receiver - /// [`recv`]: crate::sync::broadcast::Receiver::recv - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx1) = broadcast::channel(16); - /// let mut rx2 = tx.subscribe(); - /// - /// tokio::spawn(async move { - /// assert_eq!(rx1.recv().await.unwrap(), 10); - /// assert_eq!(rx1.recv().await.unwrap(), 20); - /// }); - /// - /// tokio::spawn(async move { - /// assert_eq!(rx2.recv().await.unwrap(), 10); - /// assert_eq!(rx2.recv().await.unwrap(), 20); - /// }); - /// - /// tx.send(10).unwrap(); - /// tx.send(20).unwrap(); - /// } - /// ``` - /// - /// Handling lag - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = broadcast::channel(2); - /// - /// tx.send(10).unwrap(); - /// tx.send(20).unwrap(); - /// tx.send(30).unwrap(); - /// - /// // The receiver lagged behind - /// assert!(rx.recv().await.is_err()); - /// - /// // At this point, we can abort or continue with lost messages - /// - /// assert_eq!(20, rx.recv().await.unwrap()); - /// assert_eq!(30, rx.recv().await.unwrap()); - /// } - /// ``` - pub async fn recv(&mut self) -> Result { - let fut = Recv::new(self); - fut.await - } - - /// Attempts to return a pending value on this receiver without awaiting. - /// - /// This is useful for a flavor of "optimistic check" before deciding to - /// await on a receiver. - /// - /// Compared with [`recv`], this function has three failure cases instead of two - /// (one for closed, one for an empty buffer, one for a lagging receiver). - /// - /// `Err(TryRecvError::Closed)` is returned when all `Sender` halves have - /// dropped, indicating that no further values can be sent on the channel. - /// - /// If the [`Receiver`] handle falls behind, once the channel is full, newly - /// sent values will overwrite old values. At this point, a call to [`recv`] - /// will return with `Err(TryRecvError::Lagged)` and the [`Receiver`]'s - /// internal cursor is updated to point to the oldest value still held by - /// the channel. A subsequent call to [`try_recv`] will return this value - /// **unless** it has been since overwritten. If there are no values to - /// receive, `Err(TryRecvError::Empty)` is returned. - /// - /// [`recv`]: crate::sync::broadcast::Receiver::recv - /// [`try_recv`]: crate::sync::broadcast::Receiver::try_recv - /// [`Receiver`]: crate::sync::broadcast::Receiver - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = broadcast::channel(16); - /// - /// assert!(rx.try_recv().is_err()); - /// - /// tx.send(10).unwrap(); - /// - /// let value = rx.try_recv().unwrap(); - /// assert_eq!(10, value); - /// } - /// ``` - pub fn try_recv(&mut self) -> Result { - let guard = self.recv_ref(None)?; - guard.clone_value().ok_or(TryRecvError::Closed) - } - - /// Blocking receive to call outside of asynchronous contexts. - /// - /// # Panics - /// - /// This function panics if called within an asynchronous execution - /// context. - /// - /// # Examples - /// ``` - /// use std::thread; - /// use tokio::sync::broadcast; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = broadcast::channel(16); - /// - /// let sync_code = thread::spawn(move || { - /// assert_eq!(rx.blocking_recv(), Ok(10)); - /// }); - /// - /// let _ = tx.send(10); - /// sync_code.join().unwrap(); - /// } - /// ``` - pub fn blocking_recv(&mut self) -> Result { - crate::future::block_on(self.recv()) - } -} - -impl Drop for Receiver { - fn drop(&mut self) { - let mut tail = self.shared.tail.lock(); - - tail.rx_cnt -= 1; - let until = tail.pos; - - drop(tail); - - while self.next < until { - match self.recv_ref(None) { - Ok(_) => {} - // The channel is closed - Err(TryRecvError::Closed) => break, - // Ignore lagging, we will catch up - Err(TryRecvError::Lagged(..)) => {} - // Can't be empty - Err(TryRecvError::Empty) => panic!("unexpected empty broadcast channel"), - } - } - } -} - -impl<'a, T> Recv<'a, T> { - fn new(receiver: &'a mut Receiver) -> Recv<'a, T> { - Recv { - receiver, - waiter: UnsafeCell::new(Waiter { - queued: false, - waker: None, - pointers: linked_list::Pointers::new(), - _p: PhantomPinned, - }), - } - } - - /// A custom `project` implementation is used in place of `pin-project-lite` - /// as a custom drop implementation is needed. - fn project(self: Pin<&mut Self>) -> (&mut Receiver, &UnsafeCell) { - unsafe { - // Safety: Receiver is Unpin - is_unpin::<&mut Receiver>(); - - let me = self.get_unchecked_mut(); - (me.receiver, &me.waiter) - } - } -} - -impl<'a, T> Future for Recv<'a, T> -where - T: Clone, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - - let (receiver, waiter) = self.project(); - - let guard = match receiver.recv_ref(Some((waiter, cx.waker()))) { - Ok(value) => value, - Err(TryRecvError::Empty) => return Poll::Pending, - Err(TryRecvError::Lagged(n)) => return Poll::Ready(Err(RecvError::Lagged(n))), - Err(TryRecvError::Closed) => return Poll::Ready(Err(RecvError::Closed)), - }; - - Poll::Ready(guard.clone_value().ok_or(RecvError::Closed)) - } -} - -impl<'a, T> Drop for Recv<'a, T> { - fn drop(&mut self) { - // Acquire the tail lock. This is required for safety before accessing - // the waiter node. - let mut tail = self.receiver.shared.tail.lock(); - - // safety: tail lock is held - let queued = self.waiter.with(|ptr| unsafe { (*ptr).queued }); - - if queued { - // Remove the node - // - // safety: tail lock is held and the wait node is verified to be in - // the list. - unsafe { - self.waiter.with_mut(|ptr| { - tail.waiters.remove((&mut *ptr).into()); - }); - } - } - } -} - -/// # Safety -/// -/// `Waiter` is forced to be !Unpin. -unsafe impl linked_list::Link for Waiter { - type Handle = NonNull; - type Target = Waiter; - - fn as_raw(handle: &NonNull) -> NonNull { - *handle - } - - unsafe fn from_raw(ptr: NonNull) -> NonNull { - ptr - } - - unsafe fn pointers(target: NonNull) -> NonNull> { - Waiter::addr_of_pointers(target) - } -} - -impl fmt::Debug for Sender { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "broadcast::Sender") - } -} - -impl fmt::Debug for Receiver { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "broadcast::Receiver") - } -} - -impl<'a, T> RecvGuard<'a, T> { - fn clone_value(&self) -> Option - where - T: Clone, - { - self.slot.val.with(|ptr| unsafe { (*ptr).clone() }) - } -} - -impl<'a, T> Drop for RecvGuard<'a, T> { - fn drop(&mut self) { - // Decrement the remaining counter - if 1 == self.slot.rem.fetch_sub(1, SeqCst) { - // Safety: Last receiver, drop the value - self.slot.val.with_mut(|ptr| unsafe { *ptr = None }); - } - } -} - -fn is_unpin() {} - -#[cfg(not(loom))] -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn receiver_count_on_sender_constructor() { - let sender = Sender::::new(16); - assert_eq!(sender.receiver_count(), 0); - - let rx_1 = sender.subscribe(); - assert_eq!(sender.receiver_count(), 1); - - let rx_2 = rx_1.resubscribe(); - assert_eq!(sender.receiver_count(), 2); - - let rx_3 = sender.subscribe(); - assert_eq!(sender.receiver_count(), 3); - - drop(rx_3); - drop(rx_1); - assert_eq!(sender.receiver_count(), 1); - - drop(rx_2); - assert_eq!(sender.receiver_count(), 0); - } - - #[cfg(not(loom))] - #[test] - fn receiver_count_on_channel_constructor() { - let (sender, rx) = channel::(16); - assert_eq!(sender.receiver_count(), 1); - - let _rx_2 = rx.resubscribe(); - assert_eq!(sender.receiver_count(), 2); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,504 +0,0 @@ -#![cfg_attr(loom, allow(dead_code, unreachable_pub, unused_imports))] - -//! Synchronization primitives for use in asynchronous contexts. -//! -//! Tokio programs tend to be organized as a set of [tasks] where each task -//! operates independently and may be executed on separate physical threads. The -//! synchronization primitives provided in this module permit these independent -//! tasks to communicate together. -//! -//! [tasks]: crate::task -//! -//! # Message passing -//! -//! The most common form of synchronization in a Tokio program is message -//! passing. Two tasks operate independently and send messages to each other to -//! synchronize. Doing so has the advantage of avoiding shared state. -//! -//! Message passing is implemented using channels. A channel supports sending a -//! message from one producer task to one or more consumer tasks. There are a -//! few flavors of channels provided by Tokio. Each channel flavor supports -//! different message passing patterns. When a channel supports multiple -//! producers, many separate tasks may **send** messages. When a channel -//! supports multiple consumers, many different separate tasks may **receive** -//! messages. -//! -//! Tokio provides many different channel flavors as different message passing -//! patterns are best handled with different implementations. -//! -//! ## `oneshot` channel -//! -//! The [`oneshot` channel][oneshot] supports sending a **single** value from a -//! single producer to a single consumer. This channel is usually used to send -//! the result of a computation to a waiter. -//! -//! **Example:** using a [`oneshot` channel][oneshot] to receive the result of a -//! computation. -//! -//! ``` -//! use tokio::sync::oneshot; -//! -//! async fn some_computation() -> String { -//! "represents the result of the computation".to_string() -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! let (tx, rx) = oneshot::channel(); -//! -//! tokio::spawn(async move { -//! let res = some_computation().await; -//! tx.send(res).unwrap(); -//! }); -//! -//! // Do other work while the computation is happening in the background -//! -//! // Wait for the computation result -//! let res = rx.await.unwrap(); -//! } -//! ``` -//! -//! Note, if the task produces a computation result as its final -//! action before terminating, the [`JoinHandle`] can be used to -//! receive that value instead of allocating resources for the -//! `oneshot` channel. Awaiting on [`JoinHandle`] returns `Result`. If -//! the task panics, the `Joinhandle` yields `Err` with the panic -//! cause. -//! -//! **Example:** -//! -//! ``` -//! async fn some_computation() -> String { -//! "the result of the computation".to_string() -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! let join_handle = tokio::spawn(async move { -//! some_computation().await -//! }); -//! -//! // Do other work while the computation is happening in the background -//! -//! // Wait for the computation result -//! let res = join_handle.await.unwrap(); -//! } -//! ``` -//! -//! [`JoinHandle`]: crate::task::JoinHandle -//! -//! ## `mpsc` channel -//! -//! The [`mpsc` channel][mpsc] supports sending **many** values from **many** -//! producers to a single consumer. This channel is often used to send work to a -//! task or to receive the result of many computations. -//! -//! This is also the channel you should use if you want to send many messages -//! from a single producer to a single consumer. There is no dedicated spsc -//! channel. -//! -//! **Example:** using an mpsc to incrementally stream the results of a series -//! of computations. -//! -//! ``` -//! use tokio::sync::mpsc; -//! -//! async fn some_computation(input: u32) -> String { -//! format!("the result of computation {}", input) -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! let (tx, mut rx) = mpsc::channel(100); -//! -//! tokio::spawn(async move { -//! for i in 0..10 { -//! let res = some_computation(i).await; -//! tx.send(res).await.unwrap(); -//! } -//! }); -//! -//! while let Some(res) = rx.recv().await { -//! println!("got = {}", res); -//! } -//! } -//! ``` -//! -//! The argument to `mpsc::channel` is the channel capacity. This is the maximum -//! number of values that can be stored in the channel pending receipt at any -//! given time. Properly setting this value is key in implementing robust -//! programs as the channel capacity plays a critical part in handling back -//! pressure. -//! -//! A common concurrency pattern for resource management is to spawn a task -//! dedicated to managing that resource and using message passing between other -//! tasks to interact with the resource. The resource may be anything that may -//! not be concurrently used. Some examples include a socket and program state. -//! For example, if multiple tasks need to send data over a single socket, spawn -//! a task to manage the socket and use a channel to synchronize. -//! -//! **Example:** sending data from many tasks over a single socket using message -//! passing. -//! -//! ```no_run -//! use tokio::io::{self, AsyncWriteExt}; -//! use tokio::net::TcpStream; -//! use tokio::sync::mpsc; -//! -//! #[tokio::main] -//! async fn main() -> io::Result<()> { -//! let mut socket = TcpStream::connect("www.example.com:1234").await?; -//! let (tx, mut rx) = mpsc::channel(100); -//! -//! for _ in 0..10 { -//! // Each task needs its own `tx` handle. This is done by cloning the -//! // original handle. -//! let tx = tx.clone(); -//! -//! tokio::spawn(async move { -//! tx.send(&b"data to write"[..]).await.unwrap(); -//! }); -//! } -//! -//! // The `rx` half of the channel returns `None` once **all** `tx` clones -//! // drop. To ensure `None` is returned, drop the handle owned by the -//! // current task. If this `tx` handle is not dropped, there will always -//! // be a single outstanding `tx` handle. -//! drop(tx); -//! -//! while let Some(res) = rx.recv().await { -//! socket.write_all(res).await?; -//! } -//! -//! Ok(()) -//! } -//! ``` -//! -//! The [`mpsc`] and [`oneshot`] channels can be combined to provide a request / -//! response type synchronization pattern with a shared resource. A task is -//! spawned to synchronize a resource and waits on commands received on a -//! [`mpsc`] channel. Each command includes a [`oneshot`] `Sender` on which the -//! result of the command is sent. -//! -//! **Example:** use a task to synchronize a `u64` counter. Each task sends an -//! "fetch and increment" command. The counter value **before** the increment is -//! sent over the provided `oneshot` channel. -//! -//! ``` -//! use tokio::sync::{oneshot, mpsc}; -//! use Command::Increment; -//! -//! enum Command { -//! Increment, -//! // Other commands can be added here -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! let (cmd_tx, mut cmd_rx) = mpsc::channel::<(Command, oneshot::Sender)>(100); -//! -//! // Spawn a task to manage the counter -//! tokio::spawn(async move { -//! let mut counter: u64 = 0; -//! -//! while let Some((cmd, response)) = cmd_rx.recv().await { -//! match cmd { -//! Increment => { -//! let prev = counter; -//! counter += 1; -//! response.send(prev).unwrap(); -//! } -//! } -//! } -//! }); -//! -//! let mut join_handles = vec![]; -//! -//! // Spawn tasks that will send the increment command. -//! for _ in 0..10 { -//! let cmd_tx = cmd_tx.clone(); -//! -//! join_handles.push(tokio::spawn(async move { -//! let (resp_tx, resp_rx) = oneshot::channel(); -//! -//! cmd_tx.send((Increment, resp_tx)).await.ok().unwrap(); -//! let res = resp_rx.await.unwrap(); -//! -//! println!("previous value = {}", res); -//! })); -//! } -//! -//! // Wait for all tasks to complete -//! for join_handle in join_handles.drain(..) { -//! join_handle.await.unwrap(); -//! } -//! } -//! ``` -//! -//! ## `broadcast` channel -//! -//! The [`broadcast` channel] supports sending **many** values from -//! **many** producers to **many** consumers. Each consumer will receive -//! **each** value. This channel can be used to implement "fan out" style -//! patterns common with pub / sub or "chat" systems. -//! -//! This channel tends to be used less often than `oneshot` and `mpsc` but still -//! has its use cases. -//! -//! This is also the channel you should use if you want to broadcast values from -//! a single producer to many consumers. There is no dedicated spmc broadcast -//! channel. -//! -//! Basic usage -//! -//! ``` -//! use tokio::sync::broadcast; -//! -//! #[tokio::main] -//! async fn main() { -//! let (tx, mut rx1) = broadcast::channel(16); -//! let mut rx2 = tx.subscribe(); -//! -//! tokio::spawn(async move { -//! assert_eq!(rx1.recv().await.unwrap(), 10); -//! assert_eq!(rx1.recv().await.unwrap(), 20); -//! }); -//! -//! tokio::spawn(async move { -//! assert_eq!(rx2.recv().await.unwrap(), 10); -//! assert_eq!(rx2.recv().await.unwrap(), 20); -//! }); -//! -//! tx.send(10).unwrap(); -//! tx.send(20).unwrap(); -//! } -//! ``` -//! -//! [`broadcast` channel]: crate::sync::broadcast -//! -//! ## `watch` channel -//! -//! The [`watch` channel] supports sending **many** values from a **single** -//! producer to **many** consumers. However, only the **most recent** value is -//! stored in the channel. Consumers are notified when a new value is sent, but -//! there is no guarantee that consumers will see **all** values. -//! -//! The [`watch` channel] is similar to a [`broadcast` channel] with capacity 1. -//! -//! Use cases for the [`watch` channel] include broadcasting configuration -//! changes or signalling program state changes, such as transitioning to -//! shutdown. -//! -//! **Example:** use a [`watch` channel] to notify tasks of configuration -//! changes. In this example, a configuration file is checked periodically. When -//! the file changes, the configuration changes are signalled to consumers. -//! -//! ``` -//! use tokio::sync::watch; -//! use tokio::time::{self, Duration, Instant}; -//! -//! use std::io; -//! -//! #[derive(Debug, Clone, Eq, PartialEq)] -//! struct Config { -//! timeout: Duration, -//! } -//! -//! impl Config { -//! async fn load_from_file() -> io::Result { -//! // file loading and deserialization logic here -//! # Ok(Config { timeout: Duration::from_secs(1) }) -//! } -//! } -//! -//! async fn my_async_operation() { -//! // Do something here -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! // Load initial configuration value -//! let mut config = Config::load_from_file().await.unwrap(); -//! -//! // Create the watch channel, initialized with the loaded configuration -//! let (tx, rx) = watch::channel(config.clone()); -//! -//! // Spawn a task to monitor the file. -//! tokio::spawn(async move { -//! loop { -//! // Wait 10 seconds between checks -//! time::sleep(Duration::from_secs(10)).await; -//! -//! // Load the configuration file -//! let new_config = Config::load_from_file().await.unwrap(); -//! -//! // If the configuration changed, send the new config value -//! // on the watch channel. -//! if new_config != config { -//! tx.send(new_config.clone()).unwrap(); -//! config = new_config; -//! } -//! } -//! }); -//! -//! let mut handles = vec![]; -//! -//! // Spawn tasks that runs the async operation for at most `timeout`. If -//! // the timeout elapses, restart the operation. -//! // -//! // The task simultaneously watches the `Config` for changes. When the -//! // timeout duration changes, the timeout is updated without restarting -//! // the in-flight operation. -//! for _ in 0..5 { -//! // Clone a config watch handle for use in this task -//! let mut rx = rx.clone(); -//! -//! let handle = tokio::spawn(async move { -//! // Start the initial operation and pin the future to the stack. -//! // Pinning to the stack is required to resume the operation -//! // across multiple calls to `select!` -//! let op = my_async_operation(); -//! tokio::pin!(op); -//! -//! // Get the initial config value -//! let mut conf = rx.borrow().clone(); -//! -//! let mut op_start = Instant::now(); -//! let sleep = time::sleep_until(op_start + conf.timeout); -//! tokio::pin!(sleep); -//! -//! loop { -//! tokio::select! { -//! _ = &mut sleep => { -//! // The operation elapsed. Restart it -//! op.set(my_async_operation()); -//! -//! // Track the new start time -//! op_start = Instant::now(); -//! -//! // Restart the timeout -//! sleep.set(time::sleep_until(op_start + conf.timeout)); -//! } -//! _ = rx.changed() => { -//! conf = rx.borrow_and_update().clone(); -//! -//! // The configuration has been updated. Update the -//! // `sleep` using the new `timeout` value. -//! sleep.as_mut().reset(op_start + conf.timeout); -//! } -//! _ = &mut op => { -//! // The operation completed! -//! return -//! } -//! } -//! } -//! }); -//! -//! handles.push(handle); -//! } -//! -//! for handle in handles.drain(..) { -//! handle.await.unwrap(); -//! } -//! } -//! ``` -//! -//! [`watch` channel]: mod@crate::sync::watch -//! [`broadcast` channel]: mod@crate::sync::broadcast -//! -//! # State synchronization -//! -//! The remaining synchronization primitives focus on synchronizing state. -//! These are asynchronous equivalents to versions provided by `std`. They -//! operate in a similar way as their `std` counterparts but will wait -//! asynchronously instead of blocking the thread. -//! -//! * [`Barrier`] Ensures multiple tasks will wait for each other to reach a -//! point in the program, before continuing execution all together. -//! -//! * [`Mutex`] Mutual Exclusion mechanism, which ensures that at most one -//! thread at a time is able to access some data. -//! -//! * [`Notify`] Basic task notification. `Notify` supports notifying a -//! receiving task without sending data. In this case, the task wakes up and -//! resumes processing. -//! -//! * [`RwLock`] Provides a mutual exclusion mechanism which allows multiple -//! readers at the same time, while allowing only one writer at a time. In -//! some cases, this can be more efficient than a mutex. -//! -//! * [`Semaphore`] Limits the amount of concurrency. A semaphore holds a -//! number of permits, which tasks may request in order to enter a critical -//! section. Semaphores are useful for implementing limiting or bounding of -//! any kind. - -cfg_sync! { - /// Named future types. - pub mod futures { - pub use super::notify::Notified; - } - - mod barrier; - pub use barrier::{Barrier, BarrierWaitResult}; - - pub mod broadcast; - - pub mod mpsc; - - mod mutex; - pub use mutex::{Mutex, MutexGuard, TryLockError, OwnedMutexGuard, MappedMutexGuard, OwnedMappedMutexGuard}; - - pub(crate) mod notify; - pub use notify::Notify; - - pub mod oneshot; - - pub(crate) mod batch_semaphore; - pub use batch_semaphore::{AcquireError, TryAcquireError}; - - mod semaphore; - pub use semaphore::{Semaphore, SemaphorePermit, OwnedSemaphorePermit}; - - mod rwlock; - pub use rwlock::RwLock; - pub use rwlock::owned_read_guard::OwnedRwLockReadGuard; - pub use rwlock::owned_write_guard::OwnedRwLockWriteGuard; - pub use rwlock::owned_write_guard_mapped::OwnedRwLockMappedWriteGuard; - pub use rwlock::read_guard::RwLockReadGuard; - pub use rwlock::write_guard::RwLockWriteGuard; - pub use rwlock::write_guard_mapped::RwLockMappedWriteGuard; - - mod task; - pub(crate) use task::AtomicWaker; - - mod once_cell; - pub use self::once_cell::{OnceCell, SetError}; - - pub mod watch; -} - -cfg_not_sync! { - cfg_fs! { - pub(crate) mod batch_semaphore; - mod mutex; - pub(crate) use mutex::Mutex; - } - - #[cfg(any(feature = "rt", feature = "signal", all(unix, feature = "process")))] - pub(crate) mod notify; - - #[cfg(any(feature = "rt", all(windows, feature = "process")))] - pub(crate) mod oneshot; - - cfg_atomic_waker_impl! { - mod task; - pub(crate) use task::AtomicWaker; - } - - #[cfg(any(feature = "signal", all(unix, feature = "process")))] - pub(crate) mod watch; -} - -/// Unit tests -#[cfg(test)] -mod tests; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/block.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/block.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/block.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/block.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,448 +0,0 @@ -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize}; - -use std::alloc::Layout; -use std::mem::MaybeUninit; -use std::ops; -use std::ptr::{self, NonNull}; -use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Release}; - -/// A block in a linked list. -/// -/// Each block in the list can hold up to `BLOCK_CAP` messages. -pub(crate) struct Block { - /// The header fields. - header: BlockHeader, - - /// Array containing values pushed into the block. Values are stored in a - /// continuous array in order to improve cache line behavior when reading. - /// The values must be manually dropped. - values: Values, -} - -/// Extra fields for a `Block`. -struct BlockHeader { - /// The start index of this block. - /// - /// Slots in this block have indices in `start_index .. start_index + BLOCK_CAP`. - start_index: usize, - - /// The next block in the linked list. - next: AtomicPtr>, - - /// Bitfield tracking slots that are ready to have their values consumed. - ready_slots: AtomicUsize, - - /// The observed `tail_position` value *after* the block has been passed by - /// `block_tail`. - observed_tail_position: UnsafeCell, -} - -pub(crate) enum Read { - Value(T), - Closed, -} - -#[repr(transparent)] -struct Values([UnsafeCell>; BLOCK_CAP]); - -use super::BLOCK_CAP; - -/// Masks an index to get the block identifier. -const BLOCK_MASK: usize = !(BLOCK_CAP - 1); - -/// Masks an index to get the value offset in a block. -const SLOT_MASK: usize = BLOCK_CAP - 1; - -/// Flag tracking that a block has gone through the sender's release routine. -/// -/// When this is set, the receiver may consider freeing the block. -const RELEASED: usize = 1 << BLOCK_CAP; - -/// Flag tracking all senders dropped. -/// -/// When this flag is set, the send half of the channel has closed. -const TX_CLOSED: usize = RELEASED << 1; - -/// Mask covering all bits used to track slot readiness. -const READY_MASK: usize = RELEASED - 1; - -/// Returns the index of the first slot in the block referenced by `slot_index`. -#[inline(always)] -pub(crate) fn start_index(slot_index: usize) -> usize { - BLOCK_MASK & slot_index -} - -/// Returns the offset into the block referenced by `slot_index`. -#[inline(always)] -pub(crate) fn offset(slot_index: usize) -> usize { - SLOT_MASK & slot_index -} - -generate_addr_of_methods! { - impl Block { - unsafe fn addr_of_header(self: NonNull) -> NonNull> { - &self.header - } - - unsafe fn addr_of_values(self: NonNull) -> NonNull> { - &self.values - } - } -} - -impl Block { - pub(crate) fn new(start_index: usize) -> Box> { - unsafe { - // Allocate the block on the heap. - // SAFETY: The size of the Block is non-zero, since it is at least the size of the header. - let block = std::alloc::alloc(Layout::new::>()) as *mut Block; - let block = match NonNull::new(block) { - Some(block) => block, - None => std::alloc::handle_alloc_error(Layout::new::>()), - }; - - // Write the header to the block. - Block::addr_of_header(block).as_ptr().write(BlockHeader { - // The absolute index in the channel of the first slot in the block. - start_index, - - // Pointer to the next block in the linked list. - next: AtomicPtr::new(ptr::null_mut()), - - ready_slots: AtomicUsize::new(0), - - observed_tail_position: UnsafeCell::new(0), - }); - - // Initialize the values array. - Values::initialize(Block::addr_of_values(block)); - - // Convert the pointer to a `Box`. - // Safety: The raw pointer was allocated using the global allocator, and with - // the layout for a `Block`, so it's valid to convert it to box. - Box::from_raw(block.as_ptr()) - } - } - - /// Returns `true` if the block matches the given index. - pub(crate) fn is_at_index(&self, index: usize) -> bool { - debug_assert!(offset(index) == 0); - self.header.start_index == index - } - - /// Returns the number of blocks between `self` and the block at the - /// specified index. - /// - /// `start_index` must represent a block *after* `self`. - pub(crate) fn distance(&self, other_index: usize) -> usize { - debug_assert!(offset(other_index) == 0); - other_index.wrapping_sub(self.header.start_index) / BLOCK_CAP - } - - /// Reads the value at the given offset. - /// - /// Returns `None` if the slot is empty. - /// - /// # Safety - /// - /// To maintain safety, the caller must ensure: - /// - /// * No concurrent access to the slot. - pub(crate) unsafe fn read(&self, slot_index: usize) -> Option> { - let offset = offset(slot_index); - - let ready_bits = self.header.ready_slots.load(Acquire); - - if !is_ready(ready_bits, offset) { - if is_tx_closed(ready_bits) { - return Some(Read::Closed); - } - - return None; - } - - // Get the value - let value = self.values[offset].with(|ptr| ptr::read(ptr)); - - Some(Read::Value(value.assume_init())) - } - - /// Writes a value to the block at the given offset. - /// - /// # Safety - /// - /// To maintain safety, the caller must ensure: - /// - /// * The slot is empty. - /// * No concurrent access to the slot. - pub(crate) unsafe fn write(&self, slot_index: usize, value: T) { - // Get the offset into the block - let slot_offset = offset(slot_index); - - self.values[slot_offset].with_mut(|ptr| { - ptr::write(ptr, MaybeUninit::new(value)); - }); - - // Release the value. After this point, the slot ref may no longer - // be used. It is possible for the receiver to free the memory at - // any point. - self.set_ready(slot_offset); - } - - /// Signal to the receiver that the sender half of the list is closed. - pub(crate) unsafe fn tx_close(&self) { - self.header.ready_slots.fetch_or(TX_CLOSED, Release); - } - - /// Resets the block to a blank state. This enables reusing blocks in the - /// channel. - /// - /// # Safety - /// - /// To maintain safety, the caller must ensure: - /// - /// * All slots are empty. - /// * The caller holds a unique pointer to the block. - pub(crate) unsafe fn reclaim(&mut self) { - self.header.start_index = 0; - self.header.next = AtomicPtr::new(ptr::null_mut()); - self.header.ready_slots = AtomicUsize::new(0); - } - - /// Releases the block to the rx half for freeing. - /// - /// This function is called by the tx half once it can be guaranteed that no - /// more senders will attempt to access the block. - /// - /// # Safety - /// - /// To maintain safety, the caller must ensure: - /// - /// * The block will no longer be accessed by any sender. - pub(crate) unsafe fn tx_release(&self, tail_position: usize) { - // Track the observed tail_position. Any sender targeting a greater - // tail_position is guaranteed to not access this block. - self.header - .observed_tail_position - .with_mut(|ptr| *ptr = tail_position); - - // Set the released bit, signalling to the receiver that it is safe to - // free the block's memory as soon as all slots **prior** to - // `observed_tail_position` have been filled. - self.header.ready_slots.fetch_or(RELEASED, Release); - } - - /// Mark a slot as ready - fn set_ready(&self, slot: usize) { - let mask = 1 << slot; - self.header.ready_slots.fetch_or(mask, Release); - } - - /// Returns `true` when all slots have their `ready` bits set. - /// - /// This indicates that the block is in its final state and will no longer - /// be mutated. - /// - /// # Implementation - /// - /// The implementation walks each slot checking the `ready` flag. It might - /// be that it would make more sense to coalesce ready flags as bits in a - /// single atomic cell. However, this could have negative impact on cache - /// behavior as there would be many more mutations to a single slot. - pub(crate) fn is_final(&self) -> bool { - self.header.ready_slots.load(Acquire) & READY_MASK == READY_MASK - } - - /// Returns the `observed_tail_position` value, if set - pub(crate) fn observed_tail_position(&self) -> Option { - if 0 == RELEASED & self.header.ready_slots.load(Acquire) { - None - } else { - Some( - self.header - .observed_tail_position - .with(|ptr| unsafe { *ptr }), - ) - } - } - - /// Loads the next block - pub(crate) fn load_next(&self, ordering: Ordering) -> Option>> { - let ret = NonNull::new(self.header.next.load(ordering)); - - debug_assert!(unsafe { - ret.map(|block| { - block.as_ref().header.start_index == self.header.start_index.wrapping_add(BLOCK_CAP) - }) - .unwrap_or(true) - }); - - ret - } - - /// Pushes `block` as the next block in the link. - /// - /// Returns Ok if successful, otherwise, a pointer to the next block in - /// the list is returned. - /// - /// This requires that the next pointer is null. - /// - /// # Ordering - /// - /// This performs a compare-and-swap on `next` using AcqRel ordering. - /// - /// # Safety - /// - /// To maintain safety, the caller must ensure: - /// - /// * `block` is not freed until it has been removed from the list. - pub(crate) unsafe fn try_push( - &self, - block: &mut NonNull>, - success: Ordering, - failure: Ordering, - ) -> Result<(), NonNull>> { - block.as_mut().header.start_index = self.header.start_index.wrapping_add(BLOCK_CAP); - - let next_ptr = self - .header - .next - .compare_exchange(ptr::null_mut(), block.as_ptr(), success, failure) - .unwrap_or_else(|x| x); - - match NonNull::new(next_ptr) { - Some(next_ptr) => Err(next_ptr), - None => Ok(()), - } - } - - /// Grows the `Block` linked list by allocating and appending a new block. - /// - /// The next block in the linked list is returned. This may or may not be - /// the one allocated by the function call. - /// - /// # Implementation - /// - /// It is assumed that `self.next` is null. A new block is allocated with - /// `start_index` set to be the next block. A compare-and-swap is performed - /// with AcqRel memory ordering. If the compare-and-swap is successful, the - /// newly allocated block is released to other threads walking the block - /// linked list. If the compare-and-swap fails, the current thread acquires - /// the next block in the linked list, allowing the current thread to access - /// the slots. - pub(crate) fn grow(&self) -> NonNull> { - // Create the new block. It is assumed that the block will become the - // next one after `&self`. If this turns out to not be the case, - // `start_index` is updated accordingly. - let new_block = Block::new(self.header.start_index + BLOCK_CAP); - - let mut new_block = unsafe { NonNull::new_unchecked(Box::into_raw(new_block)) }; - - // Attempt to store the block. The first compare-and-swap attempt is - // "unrolled" due to minor differences in logic - // - // `AcqRel` is used as the ordering **only** when attempting the - // compare-and-swap on self.next. - // - // If the compare-and-swap fails, then the actual value of the cell is - // returned from this function and accessed by the caller. Given this, - // the memory must be acquired. - // - // `Release` ensures that the newly allocated block is available to - // other threads acquiring the next pointer. - let next = NonNull::new( - self.header - .next - .compare_exchange(ptr::null_mut(), new_block.as_ptr(), AcqRel, Acquire) - .unwrap_or_else(|x| x), - ); - - let next = match next { - Some(next) => next, - None => { - // The compare-and-swap succeeded and the newly allocated block - // is successfully pushed. - return new_block; - } - }; - - // There already is a next block in the linked list. The newly allocated - // block could be dropped and the discovered next block returned; - // however, that would be wasteful. Instead, the linked list is walked - // by repeatedly attempting to compare-and-swap the pointer into the - // `next` register until the compare-and-swap succeed. - // - // Care is taken to update new_block's start_index field as appropriate. - - let mut curr = next; - - // TODO: Should this iteration be capped? - loop { - let actual = unsafe { curr.as_ref().try_push(&mut new_block, AcqRel, Acquire) }; - - curr = match actual { - Ok(_) => { - return next; - } - Err(curr) => curr, - }; - - crate::loom::thread::yield_now(); - } - } -} - -/// Returns `true` if the specified slot has a value ready to be consumed. -fn is_ready(bits: usize, slot: usize) -> bool { - let mask = 1 << slot; - mask == mask & bits -} - -/// Returns `true` if the closed flag has been set. -fn is_tx_closed(bits: usize) -> bool { - TX_CLOSED == bits & TX_CLOSED -} - -impl Values { - /// Initialize a `Values` struct from a pointer. - /// - /// # Safety - /// - /// The raw pointer must be valid for writing a `Values`. - unsafe fn initialize(_value: NonNull>) { - // When fuzzing, `UnsafeCell` needs to be initialized. - if_loom! { - let p = _value.as_ptr() as *mut UnsafeCell>; - for i in 0..BLOCK_CAP { - p.add(i) - .write(UnsafeCell::new(MaybeUninit::uninit())); - } - } - } -} - -impl ops::Index for Values { - type Output = UnsafeCell>; - - fn index(&self, index: usize) -> &Self::Output { - self.0.index(index) - } -} - -#[cfg(all(test, not(loom)))] -#[test] -fn assert_no_stack_overflow() { - // https://github.com/tokio-rs/tokio/issues/5293 - - struct Foo { - _a: [u8; 2_000_000], - } - - assert_eq!( - Layout::new::>>(), - Layout::new::>() - ); - - let _block = Block::::new(0); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/bounded.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/bounded.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/bounded.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/bounded.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1323 +0,0 @@ -use crate::loom::sync::Arc; -use crate::sync::batch_semaphore::{self as semaphore, TryAcquireError}; -use crate::sync::mpsc::chan; -use crate::sync::mpsc::error::{SendError, TryRecvError, TrySendError}; - -cfg_time! { - use crate::sync::mpsc::error::SendTimeoutError; - use crate::time::Duration; -} - -use std::fmt; -use std::task::{Context, Poll}; - -/// Sends values to the associated `Receiver`. -/// -/// Instances are created by the [`channel`](channel) function. -/// -/// To convert the `Sender` into a `Sink` or use it in a poll function, you can -/// use the [`PollSender`] utility. -/// -/// [`PollSender`]: https://docs.rs/tokio-util/latest/tokio_util/sync/struct.PollSender.html -pub struct Sender { - chan: chan::Tx, -} - -/// A sender that does not prevent the channel from being closed. -/// -/// If all [`Sender`] instances of a channel were dropped and only `WeakSender` -/// instances remain, the channel is closed. -/// -/// In order to send messages, the `WeakSender` needs to be upgraded using -/// [`WeakSender::upgrade`], which returns `Option`. It returns `None` -/// if all `Sender`s have been dropped, and otherwise it returns a `Sender`. -/// -/// [`Sender`]: Sender -/// [`WeakSender::upgrade`]: WeakSender::upgrade -/// -/// #Examples -/// -/// ``` -/// use tokio::sync::mpsc::channel; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, _rx) = channel::(15); -/// let tx_weak = tx.downgrade(); -/// -/// // Upgrading will succeed because `tx` still exists. -/// assert!(tx_weak.upgrade().is_some()); -/// -/// // If we drop `tx`, then it will fail. -/// drop(tx); -/// assert!(tx_weak.clone().upgrade().is_none()); -/// } -/// ``` -pub struct WeakSender { - chan: Arc>, -} - -/// Permits to send one value into the channel. -/// -/// `Permit` values are returned by [`Sender::reserve()`] and [`Sender::try_reserve()`] -/// and are used to guarantee channel capacity before generating a message to send. -/// -/// [`Sender::reserve()`]: Sender::reserve -/// [`Sender::try_reserve()`]: Sender::try_reserve -pub struct Permit<'a, T> { - chan: &'a chan::Tx, -} - -/// Owned permit to send one value into the channel. -/// -/// This is identical to the [`Permit`] type, except that it moves the sender -/// rather than borrowing it. -/// -/// `OwnedPermit` values are returned by [`Sender::reserve_owned()`] and -/// [`Sender::try_reserve_owned()`] and are used to guarantee channel capacity -/// before generating a message to send. -/// -/// [`Permit`]: Permit -/// [`Sender::reserve_owned()`]: Sender::reserve_owned -/// [`Sender::try_reserve_owned()`]: Sender::try_reserve_owned -pub struct OwnedPermit { - chan: Option>, -} - -/// Receives values from the associated `Sender`. -/// -/// Instances are created by the [`channel`](channel) function. -/// -/// This receiver can be turned into a `Stream` using [`ReceiverStream`]. -/// -/// [`ReceiverStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.ReceiverStream.html -pub struct Receiver { - /// The channel receiver. - chan: chan::Rx, -} - -/// Creates a bounded mpsc channel for communicating between asynchronous tasks -/// with backpressure. -/// -/// The channel will buffer up to the provided number of messages. Once the -/// buffer is full, attempts to send new messages will wait until a message is -/// received from the channel. The provided buffer capacity must be at least 1. -/// -/// All data sent on `Sender` will become available on `Receiver` in the same -/// order as it was sent. -/// -/// The `Sender` can be cloned to `send` to the same channel from multiple code -/// locations. Only one `Receiver` is supported. -/// -/// If the `Receiver` is disconnected while trying to `send`, the `send` method -/// will return a `SendError`. Similarly, if `Sender` is disconnected while -/// trying to `recv`, the `recv` method will return `None`. -/// -/// # Panics -/// -/// Panics if the buffer capacity is 0. -/// -/// # Examples -/// -/// ```rust -/// use tokio::sync::mpsc; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, mut rx) = mpsc::channel(100); -/// -/// tokio::spawn(async move { -/// for i in 0..10 { -/// if let Err(_) = tx.send(i).await { -/// println!("receiver dropped"); -/// return; -/// } -/// } -/// }); -/// -/// while let Some(i) = rx.recv().await { -/// println!("got = {}", i); -/// } -/// } -/// ``` -#[track_caller] -pub fn channel(buffer: usize) -> (Sender, Receiver) { - assert!(buffer > 0, "mpsc bounded channel requires buffer > 0"); - let semaphore = Semaphore { - semaphore: semaphore::Semaphore::new(buffer), - bound: buffer, - }; - let (tx, rx) = chan::channel(semaphore); - - let tx = Sender::new(tx); - let rx = Receiver::new(rx); - - (tx, rx) -} - -/// Channel semaphore is a tuple of the semaphore implementation and a `usize` -/// representing the channel bound. -#[derive(Debug)] -pub(crate) struct Semaphore { - pub(crate) semaphore: semaphore::Semaphore, - pub(crate) bound: usize, -} - -impl Receiver { - pub(crate) fn new(chan: chan::Rx) -> Receiver { - Receiver { chan } - } - - /// Receives the next value for this receiver. - /// - /// This method returns `None` if the channel has been closed and there are - /// no remaining messages in the channel's buffer. This indicates that no - /// further values can ever be received from this `Receiver`. The channel is - /// closed when all senders have been dropped, or when [`close`] is called. - /// - /// If there are no messages in the channel's buffer, but the channel has - /// not yet been closed, this method will sleep until a message is sent or - /// the channel is closed. Note that if [`close`] is called, but there are - /// still outstanding [`Permits`] from before it was closed, the channel is - /// not considered closed by `recv` until the permits are released. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `recv` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, it is guaranteed that no messages were received on this - /// channel. - /// - /// [`close`]: Self::close - /// [`Permits`]: struct@crate::sync::mpsc::Permit - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(100); - /// - /// tokio::spawn(async move { - /// tx.send("hello").await.unwrap(); - /// }); - /// - /// assert_eq!(Some("hello"), rx.recv().await); - /// assert_eq!(None, rx.recv().await); - /// } - /// ``` - /// - /// Values are buffered: - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(100); - /// - /// tx.send("hello").await.unwrap(); - /// tx.send("world").await.unwrap(); - /// - /// assert_eq!(Some("hello"), rx.recv().await); - /// assert_eq!(Some("world"), rx.recv().await); - /// } - /// ``` - pub async fn recv(&mut self) -> Option { - use crate::future::poll_fn; - poll_fn(|cx| self.chan.recv(cx)).await - } - - /// Tries to receive the next value for this receiver. - /// - /// This method returns the [`Empty`] error if the channel is currently - /// empty, but there are still outstanding [senders] or [permits]. - /// - /// This method returns the [`Disconnected`] error if the channel is - /// currently empty, and there are no outstanding [senders] or [permits]. - /// - /// Unlike the [`poll_recv`] method, this method will never return an - /// [`Empty`] error spuriously. - /// - /// [`Empty`]: crate::sync::mpsc::error::TryRecvError::Empty - /// [`Disconnected`]: crate::sync::mpsc::error::TryRecvError::Disconnected - /// [`poll_recv`]: Self::poll_recv - /// [senders]: crate::sync::mpsc::Sender - /// [permits]: crate::sync::mpsc::Permit - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// use tokio::sync::mpsc::error::TryRecvError; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(100); - /// - /// tx.send("hello").await.unwrap(); - /// - /// assert_eq!(Ok("hello"), rx.try_recv()); - /// assert_eq!(Err(TryRecvError::Empty), rx.try_recv()); - /// - /// tx.send("hello").await.unwrap(); - /// // Drop the last sender, closing the channel. - /// drop(tx); - /// - /// assert_eq!(Ok("hello"), rx.try_recv()); - /// assert_eq!(Err(TryRecvError::Disconnected), rx.try_recv()); - /// } - /// ``` - pub fn try_recv(&mut self) -> Result { - self.chan.try_recv() - } - - /// Blocking receive to call outside of asynchronous contexts. - /// - /// This method returns `None` if the channel has been closed and there are - /// no remaining messages in the channel's buffer. This indicates that no - /// further values can ever be received from this `Receiver`. The channel is - /// closed when all senders have been dropped, or when [`close`] is called. - /// - /// If there are no messages in the channel's buffer, but the channel has - /// not yet been closed, this method will block until a message is sent or - /// the channel is closed. - /// - /// This method is intended for use cases where you are sending from - /// asynchronous code to synchronous code, and will work even if the sender - /// is not using [`blocking_send`] to send the message. - /// - /// Note that if [`close`] is called, but there are still outstanding - /// [`Permits`] from before it was closed, the channel is not considered - /// closed by `blocking_recv` until the permits are released. - /// - /// [`close`]: Self::close - /// [`Permits`]: struct@crate::sync::mpsc::Permit - /// [`blocking_send`]: fn@crate::sync::mpsc::Sender::blocking_send - /// - /// # Panics - /// - /// This function panics if called within an asynchronous execution - /// context. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use tokio::runtime::Runtime; - /// use tokio::sync::mpsc; - /// - /// fn main() { - /// let (tx, mut rx) = mpsc::channel::(10); - /// - /// let sync_code = thread::spawn(move || { - /// assert_eq!(Some(10), rx.blocking_recv()); - /// }); - /// - /// Runtime::new() - /// .unwrap() - /// .block_on(async move { - /// let _ = tx.send(10).await; - /// }); - /// sync_code.join().unwrap() - /// } - /// ``` - #[track_caller] - #[cfg(feature = "sync")] - #[cfg_attr(docsrs, doc(alias = "recv_blocking"))] - pub fn blocking_recv(&mut self) -> Option { - crate::future::block_on(self.recv()) - } - - /// Closes the receiving half of a channel without dropping it. - /// - /// This prevents any further messages from being sent on the channel while - /// still enabling the receiver to drain messages that are buffered. Any - /// outstanding [`Permit`] values will still be able to send messages. - /// - /// To guarantee that no messages are dropped, after calling `close()`, - /// `recv()` must be called until `None` is returned. If there are - /// outstanding [`Permit`] or [`OwnedPermit`] values, the `recv` method will - /// not return `None` until those are released. - /// - /// [`Permit`]: Permit - /// [`OwnedPermit`]: OwnedPermit - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(20); - /// - /// tokio::spawn(async move { - /// let mut i = 0; - /// while let Ok(permit) = tx.reserve().await { - /// permit.send(i); - /// i += 1; - /// } - /// }); - /// - /// rx.close(); - /// - /// while let Some(msg) = rx.recv().await { - /// println!("got {}", msg); - /// } - /// - /// // Channel closed and no messages are lost. - /// } - /// ``` - pub fn close(&mut self) { - self.chan.close(); - } - - /// Polls to receive the next message on this channel. - /// - /// This method returns: - /// - /// * `Poll::Pending` if no messages are available but the channel is not - /// closed, or if a spurious failure happens. - /// * `Poll::Ready(Some(message))` if a message is available. - /// * `Poll::Ready(None)` if the channel has been closed and all messages - /// sent before it was closed have been received. - /// - /// When the method returns `Poll::Pending`, the `Waker` in the provided - /// `Context` is scheduled to receive a wakeup when a message is sent on any - /// receiver, or when the channel is closed. Note that on multiple calls to - /// `poll_recv`, only the `Waker` from the `Context` passed to the most - /// recent call is scheduled to receive a wakeup. - /// - /// If this method returns `Poll::Pending` due to a spurious failure, then - /// the `Waker` will be notified when the situation causing the spurious - /// failure has been resolved. Note that receiving such a wakeup does not - /// guarantee that the next call will succeed — it could fail with another - /// spurious failure. - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.chan.recv(cx) - } -} - -impl fmt::Debug for Receiver { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Receiver") - .field("chan", &self.chan) - .finish() - } -} - -impl Unpin for Receiver {} - -impl Sender { - pub(crate) fn new(chan: chan::Tx) -> Sender { - Sender { chan } - } - - /// Sends a value, waiting until there is capacity. - /// - /// A successful send occurs when it is determined that the other end of the - /// channel has not hung up already. An unsuccessful send would be one where - /// the corresponding receiver has already been closed. Note that a return - /// value of `Err` means that the data will never be received, but a return - /// value of `Ok` does not mean that the data will be received. It is - /// possible for the corresponding receiver to hang up immediately after - /// this function returns `Ok`. - /// - /// # Errors - /// - /// If the receive half of the channel is closed, either due to [`close`] - /// being called or the [`Receiver`] handle dropping, the function returns - /// an error. The error includes the value passed to `send`. - /// - /// [`close`]: Receiver::close - /// [`Receiver`]: Receiver - /// - /// # Cancel safety - /// - /// If `send` is used as the event in a [`tokio::select!`](crate::select) - /// statement and some other branch completes first, then it is guaranteed - /// that the message was not sent. **However, in that case, the message - /// is dropped and will be lost.** - /// - /// To avoid losing messages, use [`reserve`](Self::reserve) to reserve - /// capacity, then use the returned [`Permit`] to send the message. - /// - /// This channel uses a queue to ensure that calls to `send` and `reserve` - /// complete in the order they were requested. Cancelling a call to - /// `send` makes you lose your place in the queue. - /// - /// # Examples - /// - /// In the following example, each call to `send` will block until the - /// previously sent value was received. - /// - /// ```rust - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(1); - /// - /// tokio::spawn(async move { - /// for i in 0..10 { - /// if let Err(_) = tx.send(i).await { - /// println!("receiver dropped"); - /// return; - /// } - /// } - /// }); - /// - /// while let Some(i) = rx.recv().await { - /// println!("got = {}", i); - /// } - /// } - /// ``` - pub async fn send(&self, value: T) -> Result<(), SendError> { - match self.reserve().await { - Ok(permit) => { - permit.send(value); - Ok(()) - } - Err(_) => Err(SendError(value)), - } - } - - /// Completes when the receiver has dropped. - /// - /// This allows the producers to get notified when interest in the produced - /// values is canceled and immediately stop doing work. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once the channel is closed, it stays closed - /// forever and all future calls to `closed` will return immediately. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx1, rx) = mpsc::channel::<()>(1); - /// let tx2 = tx1.clone(); - /// let tx3 = tx1.clone(); - /// let tx4 = tx1.clone(); - /// let tx5 = tx1.clone(); - /// tokio::spawn(async move { - /// drop(rx); - /// }); - /// - /// futures::join!( - /// tx1.closed(), - /// tx2.closed(), - /// tx3.closed(), - /// tx4.closed(), - /// tx5.closed() - /// ); - /// println!("Receiver dropped"); - /// } - /// ``` - pub async fn closed(&self) { - self.chan.closed().await - } - - /// Attempts to immediately send a message on this `Sender` - /// - /// This method differs from [`send`] by returning immediately if the channel's - /// buffer is full or no receiver is waiting to acquire some data. Compared - /// with [`send`], this function has two failure cases instead of one (one for - /// disconnection, one for a full buffer). - /// - /// # Errors - /// - /// If the channel capacity has been reached, i.e., the channel has `n` - /// buffered values where `n` is the argument passed to [`channel`], then an - /// error is returned. - /// - /// If the receive half of the channel is closed, either due to [`close`] - /// being called or the [`Receiver`] handle dropping, the function returns - /// an error. The error includes the value passed to `send`. - /// - /// [`send`]: Sender::send - /// [`channel`]: channel - /// [`close`]: Receiver::close - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// // Create a channel with buffer size 1 - /// let (tx1, mut rx) = mpsc::channel(1); - /// let tx2 = tx1.clone(); - /// - /// tokio::spawn(async move { - /// tx1.send(1).await.unwrap(); - /// tx1.send(2).await.unwrap(); - /// // task waits until the receiver receives a value. - /// }); - /// - /// tokio::spawn(async move { - /// // This will return an error and send - /// // no message if the buffer is full - /// let _ = tx2.try_send(3); - /// }); - /// - /// let mut msg; - /// msg = rx.recv().await.unwrap(); - /// println!("message {} received", msg); - /// - /// msg = rx.recv().await.unwrap(); - /// println!("message {} received", msg); - /// - /// // Third message may have never been sent - /// match rx.recv().await { - /// Some(msg) => println!("message {} received", msg), - /// None => println!("the third message was never sent"), - /// } - /// } - /// ``` - pub fn try_send(&self, message: T) -> Result<(), TrySendError> { - match self.chan.semaphore().semaphore.try_acquire(1) { - Ok(_) => {} - Err(TryAcquireError::Closed) => return Err(TrySendError::Closed(message)), - Err(TryAcquireError::NoPermits) => return Err(TrySendError::Full(message)), - } - - // Send the message - self.chan.send(message); - Ok(()) - } - - /// Sends a value, waiting until there is capacity, but only for a limited time. - /// - /// Shares the same success and error conditions as [`send`], adding one more - /// condition for an unsuccessful send, which is when the provided timeout has - /// elapsed, and there is no capacity available. - /// - /// [`send`]: Sender::send - /// - /// # Errors - /// - /// If the receive half of the channel is closed, either due to [`close`] - /// being called or the [`Receiver`] having been dropped, - /// the function returns an error. The error includes the value passed to `send`. - /// - /// [`close`]: Receiver::close - /// [`Receiver`]: Receiver - /// - /// # Panics - /// - /// This function panics if it is called outside the context of a Tokio - /// runtime [with time enabled](crate::runtime::Builder::enable_time). - /// - /// # Examples - /// - /// In the following example, each call to `send_timeout` will block until the - /// previously sent value was received, unless the timeout has elapsed. - /// - /// ```rust - /// use tokio::sync::mpsc; - /// use tokio::time::{sleep, Duration}; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(1); - /// - /// tokio::spawn(async move { - /// for i in 0..10 { - /// if let Err(e) = tx.send_timeout(i, Duration::from_millis(100)).await { - /// println!("send error: #{:?}", e); - /// return; - /// } - /// } - /// }); - /// - /// while let Some(i) = rx.recv().await { - /// println!("got = {}", i); - /// sleep(Duration::from_millis(200)).await; - /// } - /// } - /// ``` - #[cfg(feature = "time")] - #[cfg_attr(docsrs, doc(cfg(feature = "time")))] - pub async fn send_timeout( - &self, - value: T, - timeout: Duration, - ) -> Result<(), SendTimeoutError> { - let permit = match crate::time::timeout(timeout, self.reserve()).await { - Err(_) => { - return Err(SendTimeoutError::Timeout(value)); - } - Ok(Err(_)) => { - return Err(SendTimeoutError::Closed(value)); - } - Ok(Ok(permit)) => permit, - }; - - permit.send(value); - Ok(()) - } - - /// Blocking send to call outside of asynchronous contexts. - /// - /// This method is intended for use cases where you are sending from - /// synchronous code to asynchronous code, and will work even if the - /// receiver is not using [`blocking_recv`] to receive the message. - /// - /// [`blocking_recv`]: fn@crate::sync::mpsc::Receiver::blocking_recv - /// - /// # Panics - /// - /// This function panics if called within an asynchronous execution - /// context. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use tokio::runtime::Runtime; - /// use tokio::sync::mpsc; - /// - /// fn main() { - /// let (tx, mut rx) = mpsc::channel::(1); - /// - /// let sync_code = thread::spawn(move || { - /// tx.blocking_send(10).unwrap(); - /// }); - /// - /// Runtime::new().unwrap().block_on(async move { - /// assert_eq!(Some(10), rx.recv().await); - /// }); - /// sync_code.join().unwrap() - /// } - /// ``` - #[track_caller] - #[cfg(feature = "sync")] - #[cfg_attr(docsrs, doc(alias = "send_blocking"))] - pub fn blocking_send(&self, value: T) -> Result<(), SendError> { - crate::future::block_on(self.send(value)) - } - - /// Checks if the channel has been closed. This happens when the - /// [`Receiver`] is dropped, or when the [`Receiver::close`] method is - /// called. - /// - /// [`Receiver`]: crate::sync::mpsc::Receiver - /// [`Receiver::close`]: crate::sync::mpsc::Receiver::close - /// - /// ``` - /// let (tx, rx) = tokio::sync::mpsc::channel::<()>(42); - /// assert!(!tx.is_closed()); - /// - /// let tx2 = tx.clone(); - /// assert!(!tx2.is_closed()); - /// - /// drop(rx); - /// assert!(tx.is_closed()); - /// assert!(tx2.is_closed()); - /// ``` - pub fn is_closed(&self) -> bool { - self.chan.is_closed() - } - - /// Waits for channel capacity. Once capacity to send one message is - /// available, it is reserved for the caller. - /// - /// If the channel is full, the function waits for the number of unreceived - /// messages to become less than the channel capacity. Capacity to send one - /// message is reserved for the caller. A [`Permit`] is returned to track - /// the reserved capacity. The [`send`] function on [`Permit`] consumes the - /// reserved capacity. - /// - /// Dropping [`Permit`] without sending a message releases the capacity back - /// to the channel. - /// - /// [`Permit`]: Permit - /// [`send`]: Permit::send - /// - /// # Cancel safety - /// - /// This channel uses a queue to ensure that calls to `send` and `reserve` - /// complete in the order they were requested. Cancelling a call to - /// `reserve` makes you lose your place in the queue. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(1); - /// - /// // Reserve capacity - /// let permit = tx.reserve().await.unwrap(); - /// - /// // Trying to send directly on the `tx` will fail due to no - /// // available capacity. - /// assert!(tx.try_send(123).is_err()); - /// - /// // Sending on the permit succeeds - /// permit.send(456); - /// - /// // The value sent on the permit is received - /// assert_eq!(rx.recv().await.unwrap(), 456); - /// } - /// ``` - pub async fn reserve(&self) -> Result, SendError<()>> { - self.reserve_inner().await?; - Ok(Permit { chan: &self.chan }) - } - - /// Waits for channel capacity, moving the `Sender` and returning an owned - /// permit. Once capacity to send one message is available, it is reserved - /// for the caller. - /// - /// This moves the sender _by value_, and returns an owned permit that can - /// be used to send a message into the channel. Unlike [`Sender::reserve`], - /// this method may be used in cases where the permit must be valid for the - /// `'static` lifetime. `Sender`s may be cloned cheaply (`Sender::clone` is - /// essentially a reference count increment, comparable to [`Arc::clone`]), - /// so when multiple [`OwnedPermit`]s are needed or the `Sender` cannot be - /// moved, it can be cloned prior to calling `reserve_owned`. - /// - /// If the channel is full, the function waits for the number of unreceived - /// messages to become less than the channel capacity. Capacity to send one - /// message is reserved for the caller. An [`OwnedPermit`] is returned to - /// track the reserved capacity. The [`send`] function on [`OwnedPermit`] - /// consumes the reserved capacity. - /// - /// Dropping the [`OwnedPermit`] without sending a message releases the - /// capacity back to the channel. - /// - /// # Cancel safety - /// - /// This channel uses a queue to ensure that calls to `send` and `reserve` - /// complete in the order they were requested. Cancelling a call to - /// `reserve_owned` makes you lose your place in the queue. - /// - /// # Examples - /// Sending a message using an [`OwnedPermit`]: - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(1); - /// - /// // Reserve capacity, moving the sender. - /// let permit = tx.reserve_owned().await.unwrap(); - /// - /// // Send a message, consuming the permit and returning - /// // the moved sender. - /// let tx = permit.send(123); - /// - /// // The value sent on the permit is received. - /// assert_eq!(rx.recv().await.unwrap(), 123); - /// - /// // The sender can now be used again. - /// tx.send(456).await.unwrap(); - /// } - /// ``` - /// - /// When multiple [`OwnedPermit`]s are needed, or the sender cannot be moved - /// by value, it can be inexpensively cloned before calling `reserve_owned`: - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(1); - /// - /// // Clone the sender and reserve capacity. - /// let permit = tx.clone().reserve_owned().await.unwrap(); - /// - /// // Trying to send directly on the `tx` will fail due to no - /// // available capacity. - /// assert!(tx.try_send(123).is_err()); - /// - /// // Sending on the permit succeeds. - /// permit.send(456); - /// - /// // The value sent on the permit is received - /// assert_eq!(rx.recv().await.unwrap(), 456); - /// } - /// ``` - /// - /// [`Sender::reserve`]: Sender::reserve - /// [`OwnedPermit`]: OwnedPermit - /// [`send`]: OwnedPermit::send - /// [`Arc::clone`]: std::sync::Arc::clone - pub async fn reserve_owned(self) -> Result, SendError<()>> { - self.reserve_inner().await?; - Ok(OwnedPermit { - chan: Some(self.chan), - }) - } - - async fn reserve_inner(&self) -> Result<(), SendError<()>> { - crate::trace::async_trace_leaf().await; - - match self.chan.semaphore().semaphore.acquire(1).await { - Ok(_) => Ok(()), - Err(_) => Err(SendError(())), - } - } - - /// Tries to acquire a slot in the channel without waiting for the slot to become - /// available. - /// - /// If the channel is full this function will return [`TrySendError`], otherwise - /// if there is a slot available it will return a [`Permit`] that will then allow you - /// to [`send`] on the channel with a guaranteed slot. This function is similar to - /// [`reserve`] except it does not await for the slot to become available. - /// - /// Dropping [`Permit`] without sending a message releases the capacity back - /// to the channel. - /// - /// [`Permit`]: Permit - /// [`send`]: Permit::send - /// [`reserve`]: Sender::reserve - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(1); - /// - /// // Reserve capacity - /// let permit = tx.try_reserve().unwrap(); - /// - /// // Trying to send directly on the `tx` will fail due to no - /// // available capacity. - /// assert!(tx.try_send(123).is_err()); - /// - /// // Trying to reserve an additional slot on the `tx` will - /// // fail because there is no capacity. - /// assert!(tx.try_reserve().is_err()); - /// - /// // Sending on the permit succeeds - /// permit.send(456); - /// - /// // The value sent on the permit is received - /// assert_eq!(rx.recv().await.unwrap(), 456); - /// - /// } - /// ``` - pub fn try_reserve(&self) -> Result, TrySendError<()>> { - match self.chan.semaphore().semaphore.try_acquire(1) { - Ok(_) => {} - Err(TryAcquireError::Closed) => return Err(TrySendError::Closed(())), - Err(TryAcquireError::NoPermits) => return Err(TrySendError::Full(())), - } - - Ok(Permit { chan: &self.chan }) - } - - /// Tries to acquire a slot in the channel without waiting for the slot to become - /// available, returning an owned permit. - /// - /// This moves the sender _by value_, and returns an owned permit that can - /// be used to send a message into the channel. Unlike [`Sender::try_reserve`], - /// this method may be used in cases where the permit must be valid for the - /// `'static` lifetime. `Sender`s may be cloned cheaply (`Sender::clone` is - /// essentially a reference count increment, comparable to [`Arc::clone`]), - /// so when multiple [`OwnedPermit`]s are needed or the `Sender` cannot be - /// moved, it can be cloned prior to calling `try_reserve_owned`. - /// - /// If the channel is full this function will return a [`TrySendError`]. - /// Since the sender is taken by value, the `TrySendError` returned in this - /// case contains the sender, so that it may be used again. Otherwise, if - /// there is a slot available, this method will return an [`OwnedPermit`] - /// that can then be used to [`send`] on the channel with a guaranteed slot. - /// This function is similar to [`reserve_owned`] except it does not await - /// for the slot to become available. - /// - /// Dropping the [`OwnedPermit`] without sending a message releases the capacity back - /// to the channel. - /// - /// [`OwnedPermit`]: OwnedPermit - /// [`send`]: OwnedPermit::send - /// [`reserve_owned`]: Sender::reserve_owned - /// [`Arc::clone`]: std::sync::Arc::clone - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(1); - /// - /// // Reserve capacity - /// let permit = tx.clone().try_reserve_owned().unwrap(); - /// - /// // Trying to send directly on the `tx` will fail due to no - /// // available capacity. - /// assert!(tx.try_send(123).is_err()); - /// - /// // Trying to reserve an additional slot on the `tx` will - /// // fail because there is no capacity. - /// assert!(tx.try_reserve().is_err()); - /// - /// // Sending on the permit succeeds - /// permit.send(456); - /// - /// // The value sent on the permit is received - /// assert_eq!(rx.recv().await.unwrap(), 456); - /// - /// } - /// ``` - pub fn try_reserve_owned(self) -> Result, TrySendError> { - match self.chan.semaphore().semaphore.try_acquire(1) { - Ok(_) => {} - Err(TryAcquireError::Closed) => return Err(TrySendError::Closed(self)), - Err(TryAcquireError::NoPermits) => return Err(TrySendError::Full(self)), - } - - Ok(OwnedPermit { - chan: Some(self.chan), - }) - } - - /// Returns `true` if senders belong to the same channel. - /// - /// # Examples - /// - /// ``` - /// let (tx, rx) = tokio::sync::mpsc::channel::<()>(1); - /// let tx2 = tx.clone(); - /// assert!(tx.same_channel(&tx2)); - /// - /// let (tx3, rx3) = tokio::sync::mpsc::channel::<()>(1); - /// assert!(!tx3.same_channel(&tx2)); - /// ``` - pub fn same_channel(&self, other: &Self) -> bool { - self.chan.same_channel(&other.chan) - } - - /// Returns the current capacity of the channel. - /// - /// The capacity goes down when sending a value by calling [`send`] or by reserving capacity - /// with [`reserve`]. The capacity goes up when values are received by the [`Receiver`]. - /// This is distinct from [`max_capacity`], which always returns buffer capacity initially - /// specified when calling [`channel`] - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel::<()>(5); - /// - /// assert_eq!(tx.capacity(), 5); - /// - /// // Making a reservation drops the capacity by one. - /// let permit = tx.reserve().await.unwrap(); - /// assert_eq!(tx.capacity(), 4); - /// - /// // Sending and receiving a value increases the capacity by one. - /// permit.send(()); - /// rx.recv().await.unwrap(); - /// assert_eq!(tx.capacity(), 5); - /// } - /// ``` - /// - /// [`send`]: Sender::send - /// [`reserve`]: Sender::reserve - /// [`channel`]: channel - /// [`max_capacity`]: Sender::max_capacity - pub fn capacity(&self) -> usize { - self.chan.semaphore().semaphore.available_permits() - } - - /// Converts the `Sender` to a [`WeakSender`] that does not count - /// towards RAII semantics, i.e. if all `Sender` instances of the - /// channel were dropped and only `WeakSender` instances remain, - /// the channel is closed. - pub fn downgrade(&self) -> WeakSender { - WeakSender { - chan: self.chan.downgrade(), - } - } - - /// Returns the maximum buffer capacity of the channel. - /// - /// The maximum capacity is the buffer capacity initially specified when calling - /// [`channel`]. This is distinct from [`capacity`], which returns the *current* - /// available buffer capacity: as messages are sent and received, the - /// value returned by [`capacity`] will go up or down, whereas the value - /// returned by `max_capacity` will remain constant. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, _rx) = mpsc::channel::<()>(5); - /// - /// // both max capacity and capacity are the same at first - /// assert_eq!(tx.max_capacity(), 5); - /// assert_eq!(tx.capacity(), 5); - /// - /// // Making a reservation doesn't change the max capacity. - /// let permit = tx.reserve().await.unwrap(); - /// assert_eq!(tx.max_capacity(), 5); - /// // but drops the capacity by one - /// assert_eq!(tx.capacity(), 4); - /// } - /// ``` - /// - /// [`channel`]: channel - /// [`max_capacity`]: Sender::max_capacity - /// [`capacity`]: Sender::capacity - pub fn max_capacity(&self) -> usize { - self.chan.semaphore().bound - } -} - -impl Clone for Sender { - fn clone(&self) -> Self { - Sender { - chan: self.chan.clone(), - } - } -} - -impl fmt::Debug for Sender { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Sender") - .field("chan", &self.chan) - .finish() - } -} - -impl Clone for WeakSender { - fn clone(&self) -> Self { - WeakSender { - chan: self.chan.clone(), - } - } -} - -impl WeakSender { - /// Tries to convert a WeakSender into a [`Sender`]. This will return `Some` - /// if there are other `Sender` instances alive and the channel wasn't - /// previously dropped, otherwise `None` is returned. - pub fn upgrade(&self) -> Option> { - chan::Tx::upgrade(self.chan.clone()).map(Sender::new) - } -} - -impl fmt::Debug for WeakSender { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("WeakSender").finish() - } -} - -// ===== impl Permit ===== - -impl Permit<'_, T> { - /// Sends a value using the reserved capacity. - /// - /// Capacity for the message has already been reserved. The message is sent - /// to the receiver and the permit is consumed. The operation will succeed - /// even if the receiver half has been closed. See [`Receiver::close`] for - /// more details on performing a clean shutdown. - /// - /// [`Receiver::close`]: Receiver::close - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(1); - /// - /// // Reserve capacity - /// let permit = tx.reserve().await.unwrap(); - /// - /// // Trying to send directly on the `tx` will fail due to no - /// // available capacity. - /// assert!(tx.try_send(123).is_err()); - /// - /// // Send a message on the permit - /// permit.send(456); - /// - /// // The value sent on the permit is received - /// assert_eq!(rx.recv().await.unwrap(), 456); - /// } - /// ``` - pub fn send(self, value: T) { - use std::mem; - - self.chan.send(value); - - // Avoid the drop logic - mem::forget(self); - } -} - -impl Drop for Permit<'_, T> { - fn drop(&mut self) { - use chan::Semaphore; - - let semaphore = self.chan.semaphore(); - - // Add the permit back to the semaphore - semaphore.add_permit(); - - // If this is the last sender for this channel, wake the receiver so - // that it can be notified that the channel is closed. - if semaphore.is_closed() && semaphore.is_idle() { - self.chan.wake_rx(); - } - } -} - -impl fmt::Debug for Permit<'_, T> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Permit") - .field("chan", &self.chan) - .finish() - } -} - -// ===== impl Permit ===== - -impl OwnedPermit { - /// Sends a value using the reserved capacity. - /// - /// Capacity for the message has already been reserved. The message is sent - /// to the receiver and the permit is consumed. The operation will succeed - /// even if the receiver half has been closed. See [`Receiver::close`] for - /// more details on performing a clean shutdown. - /// - /// Unlike [`Permit::send`], this method returns the [`Sender`] from which - /// the `OwnedPermit` was reserved. - /// - /// [`Receiver::close`]: Receiver::close - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::channel(1); - /// - /// // Reserve capacity - /// let permit = tx.reserve_owned().await.unwrap(); - /// - /// // Send a message on the permit, returning the sender. - /// let tx = permit.send(456); - /// - /// // The value sent on the permit is received - /// assert_eq!(rx.recv().await.unwrap(), 456); - /// - /// // We may now reuse `tx` to send another message. - /// tx.send(789).await.unwrap(); - /// } - /// ``` - pub fn send(mut self, value: T) -> Sender { - let chan = self.chan.take().unwrap_or_else(|| { - unreachable!("OwnedPermit channel is only taken when the permit is moved") - }); - chan.send(value); - - Sender { chan } - } - - /// Releases the reserved capacity *without* sending a message, returning the - /// [`Sender`]. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, rx) = mpsc::channel(1); - /// - /// // Clone the sender and reserve capacity - /// let permit = tx.clone().reserve_owned().await.unwrap(); - /// - /// // Trying to send on the original `tx` will fail, since the `permit` - /// // has reserved all the available capacity. - /// assert!(tx.try_send(123).is_err()); - /// - /// // Release the permit without sending a message, returning the clone - /// // of the sender. - /// let tx2 = permit.release(); - /// - /// // We may now reuse `tx` to send another message. - /// tx.send(789).await.unwrap(); - /// # drop(rx); drop(tx2); - /// } - /// ``` - /// - /// [`Sender`]: Sender - pub fn release(mut self) -> Sender { - use chan::Semaphore; - - let chan = self.chan.take().unwrap_or_else(|| { - unreachable!("OwnedPermit channel is only taken when the permit is moved") - }); - - // Add the permit back to the semaphore - chan.semaphore().add_permit(); - Sender { chan } - } -} - -impl Drop for OwnedPermit { - fn drop(&mut self) { - use chan::Semaphore; - - // Are we still holding onto the sender? - if let Some(chan) = self.chan.take() { - let semaphore = chan.semaphore(); - - // Add the permit back to the semaphore - semaphore.add_permit(); - - // If this `OwnedPermit` is holding the last sender for this - // channel, wake the receiver so that it can be notified that the - // channel is closed. - if semaphore.is_closed() && semaphore.is_idle() { - chan.wake_rx(); - } - } - - // Otherwise, do nothing. - } -} - -impl fmt::Debug for OwnedPermit { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("OwnedPermit") - .field("chan", &self.chan) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/chan.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/chan.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/chan.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/chan.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,428 +0,0 @@ -use crate::loom::cell::UnsafeCell; -use crate::loom::future::AtomicWaker; -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::Arc; -use crate::runtime::park::CachedParkThread; -use crate::sync::mpsc::error::TryRecvError; -use crate::sync::mpsc::{bounded, list, unbounded}; -use crate::sync::notify::Notify; -use crate::util::cacheline::CachePadded; - -use std::fmt; -use std::process; -use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; -use std::task::Poll::{Pending, Ready}; -use std::task::{Context, Poll}; - -/// Channel sender. -pub(crate) struct Tx { - inner: Arc>, -} - -impl fmt::Debug for Tx { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Tx").field("inner", &self.inner).finish() - } -} - -/// Channel receiver. -pub(crate) struct Rx { - inner: Arc>, -} - -impl fmt::Debug for Rx { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Rx").field("inner", &self.inner).finish() - } -} - -pub(crate) trait Semaphore { - fn is_idle(&self) -> bool; - - fn add_permit(&self); - - fn close(&self); - - fn is_closed(&self) -> bool; -} - -pub(super) struct Chan { - /// Handle to the push half of the lock-free list. - tx: CachePadded>, - - /// Receiver waker. Notified when a value is pushed into the channel. - rx_waker: CachePadded, - - /// Notifies all tasks listening for the receiver being dropped. - notify_rx_closed: Notify, - - /// Coordinates access to channel's capacity. - semaphore: S, - - /// Tracks the number of outstanding sender handles. - /// - /// When this drops to zero, the send half of the channel is closed. - tx_count: AtomicUsize, - - /// Only accessed by `Rx` handle. - rx_fields: UnsafeCell>, -} - -impl fmt::Debug for Chan -where - S: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Chan") - .field("tx", &*self.tx) - .field("semaphore", &self.semaphore) - .field("rx_waker", &*self.rx_waker) - .field("tx_count", &self.tx_count) - .field("rx_fields", &"...") - .finish() - } -} - -/// Fields only accessed by `Rx` handle. -struct RxFields { - /// Channel receiver. This field is only accessed by the `Receiver` type. - list: list::Rx, - - /// `true` if `Rx::close` is called. - rx_closed: bool, -} - -impl fmt::Debug for RxFields { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("RxFields") - .field("list", &self.list) - .field("rx_closed", &self.rx_closed) - .finish() - } -} - -unsafe impl Send for Chan {} -unsafe impl Sync for Chan {} - -pub(crate) fn channel(semaphore: S) -> (Tx, Rx) { - let (tx, rx) = list::channel(); - - let chan = Arc::new(Chan { - notify_rx_closed: Notify::new(), - tx: CachePadded::new(tx), - semaphore, - rx_waker: CachePadded::new(AtomicWaker::new()), - tx_count: AtomicUsize::new(1), - rx_fields: UnsafeCell::new(RxFields { - list: rx, - rx_closed: false, - }), - }); - - (Tx::new(chan.clone()), Rx::new(chan)) -} - -// ===== impl Tx ===== - -impl Tx { - fn new(chan: Arc>) -> Tx { - Tx { inner: chan } - } - - pub(super) fn downgrade(&self) -> Arc> { - self.inner.clone() - } - - // Returns the upgraded channel or None if the upgrade failed. - pub(super) fn upgrade(chan: Arc>) -> Option { - let mut tx_count = chan.tx_count.load(Acquire); - - loop { - if tx_count == 0 { - // channel is closed - return None; - } - - match chan - .tx_count - .compare_exchange_weak(tx_count, tx_count + 1, AcqRel, Acquire) - { - Ok(_) => return Some(Tx { inner: chan }), - Err(prev_count) => tx_count = prev_count, - } - } - } - - pub(super) fn semaphore(&self) -> &S { - &self.inner.semaphore - } - - /// Send a message and notify the receiver. - pub(crate) fn send(&self, value: T) { - self.inner.send(value); - } - - /// Wake the receive half - pub(crate) fn wake_rx(&self) { - self.inner.rx_waker.wake(); - } - - /// Returns `true` if senders belong to the same channel. - pub(crate) fn same_channel(&self, other: &Self) -> bool { - Arc::ptr_eq(&self.inner, &other.inner) - } -} - -impl Tx { - pub(crate) fn is_closed(&self) -> bool { - self.inner.semaphore.is_closed() - } - - pub(crate) async fn closed(&self) { - // In order to avoid a race condition, we first request a notification, - // **then** check whether the semaphore is closed. If the semaphore is - // closed the notification request is dropped. - let notified = self.inner.notify_rx_closed.notified(); - - if self.inner.semaphore.is_closed() { - return; - } - notified.await; - } -} - -impl Clone for Tx { - fn clone(&self) -> Tx { - // Using a Relaxed ordering here is sufficient as the caller holds a - // strong ref to `self`, preventing a concurrent decrement to zero. - self.inner.tx_count.fetch_add(1, Relaxed); - - Tx { - inner: self.inner.clone(), - } - } -} - -impl Drop for Tx { - fn drop(&mut self) { - if self.inner.tx_count.fetch_sub(1, AcqRel) != 1 { - return; - } - - // Close the list, which sends a `Close` message - self.inner.tx.close(); - - // Notify the receiver - self.wake_rx(); - } -} - -// ===== impl Rx ===== - -impl Rx { - fn new(chan: Arc>) -> Rx { - Rx { inner: chan } - } - - pub(crate) fn close(&mut self) { - self.inner.rx_fields.with_mut(|rx_fields_ptr| { - let rx_fields = unsafe { &mut *rx_fields_ptr }; - - if rx_fields.rx_closed { - return; - } - - rx_fields.rx_closed = true; - }); - - self.inner.semaphore.close(); - self.inner.notify_rx_closed.notify_waiters(); - } - - /// Receive the next value - pub(crate) fn recv(&mut self, cx: &mut Context<'_>) -> Poll> { - use super::block::Read; - - ready!(crate::trace::trace_leaf(cx)); - - // Keep track of task budget - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - - self.inner.rx_fields.with_mut(|rx_fields_ptr| { - let rx_fields = unsafe { &mut *rx_fields_ptr }; - - macro_rules! try_recv { - () => { - match rx_fields.list.pop(&self.inner.tx) { - Some(Read::Value(value)) => { - self.inner.semaphore.add_permit(); - coop.made_progress(); - return Ready(Some(value)); - } - Some(Read::Closed) => { - // TODO: This check may not be required as it most - // likely can only return `true` at this point. A - // channel is closed when all tx handles are - // dropped. Dropping a tx handle releases memory, - // which ensures that if dropping the tx handle is - // visible, then all messages sent are also visible. - assert!(self.inner.semaphore.is_idle()); - coop.made_progress(); - return Ready(None); - } - None => {} // fall through - } - }; - } - - try_recv!(); - - self.inner.rx_waker.register_by_ref(cx.waker()); - - // It is possible that a value was pushed between attempting to read - // and registering the task, so we have to check the channel a - // second time here. - try_recv!(); - - if rx_fields.rx_closed && self.inner.semaphore.is_idle() { - coop.made_progress(); - Ready(None) - } else { - Pending - } - }) - } - - /// Try to receive the next value. - pub(crate) fn try_recv(&mut self) -> Result { - use super::list::TryPopResult; - - self.inner.rx_fields.with_mut(|rx_fields_ptr| { - let rx_fields = unsafe { &mut *rx_fields_ptr }; - - macro_rules! try_recv { - () => { - match rx_fields.list.try_pop(&self.inner.tx) { - TryPopResult::Ok(value) => { - self.inner.semaphore.add_permit(); - return Ok(value); - } - TryPopResult::Closed => return Err(TryRecvError::Disconnected), - TryPopResult::Empty => return Err(TryRecvError::Empty), - TryPopResult::Busy => {} // fall through - } - }; - } - - try_recv!(); - - // If a previous `poll_recv` call has set a waker, we wake it here. - // This allows us to put our own CachedParkThread waker in the - // AtomicWaker slot instead. - // - // This is not a spurious wakeup to `poll_recv` since we just got a - // Busy from `try_pop`, which only happens if there are messages in - // the queue. - self.inner.rx_waker.wake(); - - // Park the thread until the problematic send has completed. - let mut park = CachedParkThread::new(); - let waker = park.waker().unwrap(); - loop { - self.inner.rx_waker.register_by_ref(&waker); - // It is possible that the problematic send has now completed, - // so we have to check for messages again. - try_recv!(); - park.park(); - } - }) - } -} - -impl Drop for Rx { - fn drop(&mut self) { - use super::block::Read::Value; - - self.close(); - - self.inner.rx_fields.with_mut(|rx_fields_ptr| { - let rx_fields = unsafe { &mut *rx_fields_ptr }; - - while let Some(Value(_)) = rx_fields.list.pop(&self.inner.tx) { - self.inner.semaphore.add_permit(); - } - }) - } -} - -// ===== impl Chan ===== - -impl Chan { - fn send(&self, value: T) { - // Push the value - self.tx.push(value); - - // Notify the rx task - self.rx_waker.wake(); - } -} - -impl Drop for Chan { - fn drop(&mut self) { - use super::block::Read::Value; - - // Safety: the only owner of the rx fields is Chan, and being - // inside its own Drop means we're the last ones to touch it. - self.rx_fields.with_mut(|rx_fields_ptr| { - let rx_fields = unsafe { &mut *rx_fields_ptr }; - - while let Some(Value(_)) = rx_fields.list.pop(&self.tx) {} - unsafe { rx_fields.list.free_blocks() }; - }); - } -} - -// ===== impl Semaphore for (::Semaphore, capacity) ===== - -impl Semaphore for bounded::Semaphore { - fn add_permit(&self) { - self.semaphore.release(1) - } - - fn is_idle(&self) -> bool { - self.semaphore.available_permits() == self.bound - } - - fn close(&self) { - self.semaphore.close(); - } - - fn is_closed(&self) -> bool { - self.semaphore.is_closed() - } -} - -// ===== impl Semaphore for AtomicUsize ===== - -impl Semaphore for unbounded::Semaphore { - fn add_permit(&self) { - let prev = self.0.fetch_sub(2, Release); - - if prev >> 1 == 0 { - // Something went wrong - process::abort(); - } - } - - fn is_idle(&self) -> bool { - self.0.load(Acquire) >> 1 == 0 - } - - fn close(&self) { - self.0.fetch_or(1, Release); - } - - fn is_closed(&self) -> bool { - self.0.load(Acquire) & 1 == 1 - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/error.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/error.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/error.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,149 +0,0 @@ -//! Channel error types. - -use std::error::Error; -use std::fmt; - -/// Error returned by the `Sender`. -#[derive(PartialEq, Eq, Clone, Copy)] -pub struct SendError(pub T); - -impl fmt::Debug for SendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SendError").finish_non_exhaustive() - } -} - -impl fmt::Display for SendError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "channel closed") - } -} - -impl Error for SendError {} - -// ===== TrySendError ===== - -/// This enumeration is the list of the possible error outcomes for the -/// [try_send](super::Sender::try_send) method. -#[derive(PartialEq, Eq, Clone, Copy)] -pub enum TrySendError { - /// The data could not be sent on the channel because the channel is - /// currently full and sending would require blocking. - Full(T), - - /// The receive half of the channel was explicitly closed or has been - /// dropped. - Closed(T), -} - -impl fmt::Debug for TrySendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - TrySendError::Full(..) => "Full(..)".fmt(f), - TrySendError::Closed(..) => "Closed(..)".fmt(f), - } - } -} - -impl fmt::Display for TrySendError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - fmt, - "{}", - match self { - TrySendError::Full(..) => "no available capacity", - TrySendError::Closed(..) => "channel closed", - } - ) - } -} - -impl Error for TrySendError {} - -impl From> for TrySendError { - fn from(src: SendError) -> TrySendError { - TrySendError::Closed(src.0) - } -} - -// ===== TryRecvError ===== - -/// Error returned by `try_recv`. -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub enum TryRecvError { - /// This **channel** is currently empty, but the **Sender**(s) have not yet - /// disconnected, so data may yet become available. - Empty, - /// The **channel**'s sending half has become disconnected, and there will - /// never be any more data received on it. - Disconnected, -} - -impl fmt::Display for TryRecvError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - TryRecvError::Empty => "receiving on an empty channel".fmt(fmt), - TryRecvError::Disconnected => "receiving on a closed channel".fmt(fmt), - } - } -} - -impl Error for TryRecvError {} - -// ===== RecvError ===== - -/// Error returned by `Receiver`. -#[derive(Debug, Clone)] -#[doc(hidden)] -#[deprecated(note = "This type is unused because recv returns an Option.")] -pub struct RecvError(()); - -#[allow(deprecated)] -impl fmt::Display for RecvError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "channel closed") - } -} - -#[allow(deprecated)] -impl Error for RecvError {} - -cfg_time! { - // ===== SendTimeoutError ===== - - #[derive(PartialEq, Eq, Clone, Copy)] - /// Error returned by [`Sender::send_timeout`](super::Sender::send_timeout)]. - pub enum SendTimeoutError { - /// The data could not be sent on the channel because the channel is - /// full, and the timeout to send has elapsed. - Timeout(T), - - /// The receive half of the channel was explicitly closed or has been - /// dropped. - Closed(T), - } - - impl fmt::Debug for SendTimeoutError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - SendTimeoutError::Timeout(..) => "Timeout(..)".fmt(f), - SendTimeoutError::Closed(..) => "Closed(..)".fmt(f), - } - } - } - - impl fmt::Display for SendTimeoutError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - fmt, - "{}", - match self { - SendTimeoutError::Timeout(..) => "timed out waiting on send operation", - SendTimeoutError::Closed(..) => "channel closed", - } - ) - } - } - - impl Error for SendTimeoutError {} -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/list.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/list.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/list.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/list.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,371 +0,0 @@ -//! A concurrent, lock-free, FIFO list. - -use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize}; -use crate::loom::thread; -use crate::sync::mpsc::block::{self, Block}; - -use std::fmt; -use std::ptr::NonNull; -use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; - -/// List queue transmit handle. -pub(crate) struct Tx { - /// Tail in the `Block` mpmc list. - block_tail: AtomicPtr>, - - /// Position to push the next message. This references a block and offset - /// into the block. - tail_position: AtomicUsize, -} - -/// List queue receive handle -pub(crate) struct Rx { - /// Pointer to the block being processed. - head: NonNull>, - - /// Next slot index to process. - index: usize, - - /// Pointer to the next block pending release. - free_head: NonNull>, -} - -/// Return value of `Rx::try_pop`. -pub(crate) enum TryPopResult { - /// Successfully popped a value. - Ok(T), - /// The channel is empty. - Empty, - /// The channel is empty and closed. - Closed, - /// The channel is not empty, but the first value is being written. - Busy, -} - -pub(crate) fn channel() -> (Tx, Rx) { - // Create the initial block shared between the tx and rx halves. - let initial_block = Block::new(0); - let initial_block_ptr = Box::into_raw(initial_block); - - let tx = Tx { - block_tail: AtomicPtr::new(initial_block_ptr), - tail_position: AtomicUsize::new(0), - }; - - let head = NonNull::new(initial_block_ptr).unwrap(); - - let rx = Rx { - head, - index: 0, - free_head: head, - }; - - (tx, rx) -} - -impl Tx { - /// Pushes a value into the list. - pub(crate) fn push(&self, value: T) { - // First, claim a slot for the value. `Acquire` is used here to - // synchronize with the `fetch_add` in `reclaim_blocks`. - let slot_index = self.tail_position.fetch_add(1, Acquire); - - // Load the current block and write the value - let block = self.find_block(slot_index); - - unsafe { - // Write the value to the block - block.as_ref().write(slot_index, value); - } - } - - /// Closes the send half of the list. - /// - /// Similar process as pushing a value, but instead of writing the value & - /// setting the ready flag, the TX_CLOSED flag is set on the block. - pub(crate) fn close(&self) { - // First, claim a slot for the value. This is the last slot that will be - // claimed. - let slot_index = self.tail_position.fetch_add(1, Acquire); - - let block = self.find_block(slot_index); - - unsafe { block.as_ref().tx_close() } - } - - fn find_block(&self, slot_index: usize) -> NonNull> { - // The start index of the block that contains `index`. - let start_index = block::start_index(slot_index); - - // The index offset into the block - let offset = block::offset(slot_index); - - // Load the current head of the block - let mut block_ptr = self.block_tail.load(Acquire); - - let block = unsafe { &*block_ptr }; - - // Calculate the distance between the tail ptr and the target block - let distance = block.distance(start_index); - - // Decide if this call to `find_block` should attempt to update the - // `block_tail` pointer. - // - // Updating `block_tail` is not always performed in order to reduce - // contention. - // - // When set, as the routine walks the linked list, it attempts to update - // `block_tail`. If the update cannot be performed, `try_updating_tail` - // is unset. - let mut try_updating_tail = distance > offset; - - // Walk the linked list of blocks until the block with `start_index` is - // found. - loop { - let block = unsafe { &(*block_ptr) }; - - if block.is_at_index(start_index) { - return unsafe { NonNull::new_unchecked(block_ptr) }; - } - - let next_block = block - .load_next(Acquire) - // There is no allocated next block, grow the linked list. - .unwrap_or_else(|| block.grow()); - - // If the block is **not** final, then the tail pointer cannot be - // advanced any more. - try_updating_tail &= block.is_final(); - - if try_updating_tail { - // Advancing `block_tail` must happen when walking the linked - // list. `block_tail` may not advance passed any blocks that are - // not "final". At the point a block is finalized, it is unknown - // if there are any prior blocks that are unfinalized, which - // makes it impossible to advance `block_tail`. - // - // While walking the linked list, `block_tail` can be advanced - // as long as finalized blocks are traversed. - // - // Release ordering is used to ensure that any subsequent reads - // are able to see the memory pointed to by `block_tail`. - // - // Acquire is not needed as any "actual" value is not accessed. - // At this point, the linked list is walked to acquire blocks. - if self - .block_tail - .compare_exchange(block_ptr, next_block.as_ptr(), Release, Relaxed) - .is_ok() - { - // Synchronize with any senders - let tail_position = self.tail_position.fetch_add(0, Release); - - unsafe { - block.tx_release(tail_position); - } - } else { - // A concurrent sender is also working on advancing - // `block_tail` and this thread is falling behind. - // - // Stop trying to advance the tail pointer - try_updating_tail = false; - } - } - - block_ptr = next_block.as_ptr(); - - thread::yield_now(); - } - } - - pub(crate) unsafe fn reclaim_block(&self, mut block: NonNull>) { - // The block has been removed from the linked list and ownership - // is reclaimed. - // - // Before dropping the block, see if it can be reused by - // inserting it back at the end of the linked list. - // - // First, reset the data - block.as_mut().reclaim(); - - let mut reused = false; - - // Attempt to insert the block at the end - // - // Walk at most three times - // - let curr_ptr = self.block_tail.load(Acquire); - - // The pointer can never be null - debug_assert!(!curr_ptr.is_null()); - - let mut curr = NonNull::new_unchecked(curr_ptr); - - // TODO: Unify this logic with Block::grow - for _ in 0..3 { - match curr.as_ref().try_push(&mut block, AcqRel, Acquire) { - Ok(_) => { - reused = true; - break; - } - Err(next) => { - curr = next; - } - } - } - - if !reused { - let _ = Box::from_raw(block.as_ptr()); - } - } -} - -impl fmt::Debug for Tx { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Tx") - .field("block_tail", &self.block_tail.load(Relaxed)) - .field("tail_position", &self.tail_position.load(Relaxed)) - .finish() - } -} - -impl Rx { - /// Pops the next value off the queue. - pub(crate) fn pop(&mut self, tx: &Tx) -> Option> { - // Advance `head`, if needed - if !self.try_advancing_head() { - return None; - } - - self.reclaim_blocks(tx); - - unsafe { - let block = self.head.as_ref(); - - let ret = block.read(self.index); - - if let Some(block::Read::Value(..)) = ret { - self.index = self.index.wrapping_add(1); - } - - ret - } - } - - /// Pops the next value off the queue, detecting whether the block - /// is busy or empty on failure. - /// - /// This function exists because `Rx::pop` can return `None` even if the - /// channel's queue contains a message that has been completely written. - /// This can happen if the fully delivered message is behind another message - /// that is in the middle of being written to the block, since the channel - /// can't return the messages out of order. - pub(crate) fn try_pop(&mut self, tx: &Tx) -> TryPopResult { - let tail_position = tx.tail_position.load(Acquire); - let result = self.pop(tx); - - match result { - Some(block::Read::Value(t)) => TryPopResult::Ok(t), - Some(block::Read::Closed) => TryPopResult::Closed, - None if tail_position == self.index => TryPopResult::Empty, - None => TryPopResult::Busy, - } - } - - /// Tries advancing the block pointer to the block referenced by `self.index`. - /// - /// Returns `true` if successful, `false` if there is no next block to load. - fn try_advancing_head(&mut self) -> bool { - let block_index = block::start_index(self.index); - - loop { - let next_block = { - let block = unsafe { self.head.as_ref() }; - - if block.is_at_index(block_index) { - return true; - } - - block.load_next(Acquire) - }; - - let next_block = match next_block { - Some(next_block) => next_block, - None => { - return false; - } - }; - - self.head = next_block; - - thread::yield_now(); - } - } - - fn reclaim_blocks(&mut self, tx: &Tx) { - while self.free_head != self.head { - unsafe { - // Get a handle to the block that will be freed and update - // `free_head` to point to the next block. - let block = self.free_head; - - let observed_tail_position = block.as_ref().observed_tail_position(); - - let required_index = match observed_tail_position { - Some(i) => i, - None => return, - }; - - if required_index > self.index { - return; - } - - // We may read the next pointer with `Relaxed` ordering as it is - // guaranteed that the `reclaim_blocks` routine trails the `recv` - // routine. Any memory accessed by `reclaim_blocks` has already - // been acquired by `recv`. - let next_block = block.as_ref().load_next(Relaxed); - - // Update the free list head - self.free_head = next_block.unwrap(); - - // Push the emptied block onto the back of the queue, making it - // available to senders. - tx.reclaim_block(block); - } - - thread::yield_now(); - } - } - - /// Effectively `Drop` all the blocks. Should only be called once, when - /// the list is dropping. - pub(super) unsafe fn free_blocks(&mut self) { - debug_assert_ne!(self.free_head, NonNull::dangling()); - - let mut cur = Some(self.free_head); - - #[cfg(debug_assertions)] - { - // to trigger the debug assert above so as to catch that we - // don't call `free_blocks` more than once. - self.free_head = NonNull::dangling(); - self.head = NonNull::dangling(); - } - - while let Some(block) = cur { - cur = block.as_ref().load_next(Relaxed); - drop(Box::from_raw(block.as_ptr())); - } - } -} - -impl fmt::Debug for Rx { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Rx") - .field("head", &self.head) - .field("index", &self.index) - .field("free_head", &self.free_head) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,122 +0,0 @@ -#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))] - -//! A multi-producer, single-consumer queue for sending values between -//! asynchronous tasks. -//! -//! This module provides two variants of the channel: bounded and unbounded. The -//! bounded variant has a limit on the number of messages that the channel can -//! store, and if this limit is reached, trying to send another message will -//! wait until a message is received from the channel. An unbounded channel has -//! an infinite capacity, so the `send` method will always complete immediately. -//! This makes the [`UnboundedSender`] usable from both synchronous and -//! asynchronous code. -//! -//! Similar to the `mpsc` channels provided by `std`, the channel constructor -//! functions provide separate send and receive handles, [`Sender`] and -//! [`Receiver`] for the bounded channel, [`UnboundedSender`] and -//! [`UnboundedReceiver`] for the unbounded channel. If there is no message to read, -//! the current task will be notified when a new value is sent. [`Sender`] and -//! [`UnboundedSender`] allow sending values into the channel. If the bounded -//! channel is at capacity, the send is rejected and the task will be notified -//! when additional capacity is available. In other words, the channel provides -//! backpressure. -//! -//! This channel is also suitable for the single-producer single-consumer -//! use-case. (Unless you only need to send one message, in which case you -//! should use the [oneshot] channel.) -//! -//! # Disconnection -//! -//! When all [`Sender`] handles have been dropped, it is no longer -//! possible to send values into the channel. This is considered the termination -//! event of the stream. As such, `Receiver::poll` returns `Ok(Ready(None))`. -//! -//! If the [`Receiver`] handle is dropped, then messages can no longer -//! be read out of the channel. In this case, all further attempts to send will -//! result in an error. Additionally, all unread messages will be drained from the -//! channel and dropped. -//! -//! # Clean Shutdown -//! -//! When the [`Receiver`] is dropped, it is possible for unprocessed messages to -//! remain in the channel. Instead, it is usually desirable to perform a "clean" -//! shutdown. To do this, the receiver first calls `close`, which will prevent -//! any further messages to be sent into the channel. Then, the receiver -//! consumes the channel to completion, at which point the receiver can be -//! dropped. -//! -//! # Communicating between sync and async code -//! -//! When you want to communicate between synchronous and asynchronous code, there -//! are two situations to consider: -//! -//! **Bounded channel**: If you need a bounded channel, you should use a bounded -//! Tokio `mpsc` channel for both directions of communication. Instead of calling -//! the async [`send`][bounded-send] or [`recv`][bounded-recv] methods, in -//! synchronous code you will need to use the [`blocking_send`][blocking-send] or -//! [`blocking_recv`][blocking-recv] methods. -//! -//! **Unbounded channel**: You should use the kind of channel that matches where -//! the receiver is. So for sending a message _from async to sync_, you should -//! use [the standard library unbounded channel][std-unbounded] or -//! [crossbeam][crossbeam-unbounded]. Similarly, for sending a message _from sync -//! to async_, you should use an unbounded Tokio `mpsc` channel. -//! -//! Please be aware that the above remarks were written with the `mpsc` channel -//! in mind, but they can also be generalized to other kinds of channels. In -//! general, any channel method that isn't marked async can be called anywhere, -//! including outside of the runtime. For example, sending a message on a -//! [oneshot] channel from outside the runtime is perfectly fine. -//! -//! # Multiple runtimes -//! -//! The mpsc channel does not care about which runtime you use it in, and can be -//! used to send messages from one runtime to another. It can also be used in -//! non-Tokio runtimes. -//! -//! There is one exception to the above: the [`send_timeout`] must be used from -//! within a Tokio runtime, however it is still not tied to one specific Tokio -//! runtime, and the sender may be moved from one Tokio runtime to another. -//! -//! [`Sender`]: crate::sync::mpsc::Sender -//! [`Receiver`]: crate::sync::mpsc::Receiver -//! [bounded-send]: crate::sync::mpsc::Sender::send() -//! [bounded-recv]: crate::sync::mpsc::Receiver::recv() -//! [blocking-send]: crate::sync::mpsc::Sender::blocking_send() -//! [blocking-recv]: crate::sync::mpsc::Receiver::blocking_recv() -//! [`UnboundedSender`]: crate::sync::mpsc::UnboundedSender -//! [`UnboundedReceiver`]: crate::sync::mpsc::UnboundedReceiver -//! [oneshot]: crate::sync::oneshot -//! [`Handle::block_on`]: crate::runtime::Handle::block_on() -//! [std-unbounded]: std::sync::mpsc::channel -//! [crossbeam-unbounded]: https://docs.rs/crossbeam/*/crossbeam/channel/fn.unbounded.html -//! [`send_timeout`]: crate::sync::mpsc::Sender::send_timeout - -pub(super) mod block; - -mod bounded; -pub use self::bounded::{channel, OwnedPermit, Permit, Receiver, Sender, WeakSender}; - -mod chan; - -pub(super) mod list; - -mod unbounded; -pub use self::unbounded::{ - unbounded_channel, UnboundedReceiver, UnboundedSender, WeakUnboundedSender, -}; - -pub mod error; - -/// The number of values a block can contain. -/// -/// This value must be a power of 2. It also must be smaller than the number of -/// bits in `usize`. -#[cfg(all(target_pointer_width = "64", not(loom)))] -const BLOCK_CAP: usize = 32; - -#[cfg(all(not(target_pointer_width = "64"), not(loom)))] -const BLOCK_CAP: usize = 16; - -#[cfg(loom)] -const BLOCK_CAP: usize = 2; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/unbounded.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/unbounded.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/mpsc/unbounded.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/mpsc/unbounded.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,455 +0,0 @@ -use crate::loom::sync::{atomic::AtomicUsize, Arc}; -use crate::sync::mpsc::chan; -use crate::sync::mpsc::error::{SendError, TryRecvError}; - -use std::fmt; -use std::task::{Context, Poll}; - -/// Send values to the associated `UnboundedReceiver`. -/// -/// Instances are created by the -/// [`unbounded_channel`](unbounded_channel) function. -pub struct UnboundedSender { - chan: chan::Tx, -} - -/// An unbounded sender that does not prevent the channel from being closed. -/// -/// If all [`UnboundedSender`] instances of a channel were dropped and only -/// `WeakUnboundedSender` instances remain, the channel is closed. -/// -/// In order to send messages, the `WeakUnboundedSender` needs to be upgraded using -/// [`WeakUnboundedSender::upgrade`], which returns `Option`. It returns `None` -/// if all `UnboundedSender`s have been dropped, and otherwise it returns an `UnboundedSender`. -/// -/// [`UnboundedSender`]: UnboundedSender -/// [`WeakUnboundedSender::upgrade`]: WeakUnboundedSender::upgrade -/// -/// #Examples -/// -/// ``` -/// use tokio::sync::mpsc::unbounded_channel; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, _rx) = unbounded_channel::(); -/// let tx_weak = tx.downgrade(); -/// -/// // Upgrading will succeed because `tx` still exists. -/// assert!(tx_weak.upgrade().is_some()); -/// -/// // If we drop `tx`, then it will fail. -/// drop(tx); -/// assert!(tx_weak.clone().upgrade().is_none()); -/// } -/// ``` -pub struct WeakUnboundedSender { - chan: Arc>, -} - -impl Clone for UnboundedSender { - fn clone(&self) -> Self { - UnboundedSender { - chan: self.chan.clone(), - } - } -} - -impl fmt::Debug for UnboundedSender { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("UnboundedSender") - .field("chan", &self.chan) - .finish() - } -} - -/// Receive values from the associated `UnboundedSender`. -/// -/// Instances are created by the -/// [`unbounded_channel`](unbounded_channel) function. -/// -/// This receiver can be turned into a `Stream` using [`UnboundedReceiverStream`]. -/// -/// [`UnboundedReceiverStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.UnboundedReceiverStream.html -pub struct UnboundedReceiver { - /// The channel receiver - chan: chan::Rx, -} - -impl fmt::Debug for UnboundedReceiver { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("UnboundedReceiver") - .field("chan", &self.chan) - .finish() - } -} - -/// Creates an unbounded mpsc channel for communicating between asynchronous -/// tasks without backpressure. -/// -/// A `send` on this channel will always succeed as long as the receive half has -/// not been closed. If the receiver falls behind, messages will be arbitrarily -/// buffered. -/// -/// **Note** that the amount of available system memory is an implicit bound to -/// the channel. Using an `unbounded` channel has the ability of causing the -/// process to run out of memory. In this case, the process will be aborted. -pub fn unbounded_channel() -> (UnboundedSender, UnboundedReceiver) { - let (tx, rx) = chan::channel(Semaphore(AtomicUsize::new(0))); - - let tx = UnboundedSender::new(tx); - let rx = UnboundedReceiver::new(rx); - - (tx, rx) -} - -/// No capacity -#[derive(Debug)] -pub(crate) struct Semaphore(pub(crate) AtomicUsize); - -impl UnboundedReceiver { - pub(crate) fn new(chan: chan::Rx) -> UnboundedReceiver { - UnboundedReceiver { chan } - } - - /// Receives the next value for this receiver. - /// - /// This method returns `None` if the channel has been closed and there are - /// no remaining messages in the channel's buffer. This indicates that no - /// further values can ever be received from this `Receiver`. The channel is - /// closed when all senders have been dropped, or when [`close`] is called. - /// - /// If there are no messages in the channel's buffer, but the channel has - /// not yet been closed, this method will sleep until a message is sent or - /// the channel is closed. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If `recv` is used as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, it is guaranteed that no messages were received on this - /// channel. - /// - /// [`close`]: Self::close - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::unbounded_channel(); - /// - /// tokio::spawn(async move { - /// tx.send("hello").unwrap(); - /// }); - /// - /// assert_eq!(Some("hello"), rx.recv().await); - /// assert_eq!(None, rx.recv().await); - /// } - /// ``` - /// - /// Values are buffered: - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::unbounded_channel(); - /// - /// tx.send("hello").unwrap(); - /// tx.send("world").unwrap(); - /// - /// assert_eq!(Some("hello"), rx.recv().await); - /// assert_eq!(Some("world"), rx.recv().await); - /// } - /// ``` - pub async fn recv(&mut self) -> Option { - use crate::future::poll_fn; - - poll_fn(|cx| self.poll_recv(cx)).await - } - - /// Tries to receive the next value for this receiver. - /// - /// This method returns the [`Empty`] error if the channel is currently - /// empty, but there are still outstanding [senders] or [permits]. - /// - /// This method returns the [`Disconnected`] error if the channel is - /// currently empty, and there are no outstanding [senders] or [permits]. - /// - /// Unlike the [`poll_recv`] method, this method will never return an - /// [`Empty`] error spuriously. - /// - /// [`Empty`]: crate::sync::mpsc::error::TryRecvError::Empty - /// [`Disconnected`]: crate::sync::mpsc::error::TryRecvError::Disconnected - /// [`poll_recv`]: Self::poll_recv - /// [senders]: crate::sync::mpsc::Sender - /// [permits]: crate::sync::mpsc::Permit - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// use tokio::sync::mpsc::error::TryRecvError; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::unbounded_channel(); - /// - /// tx.send("hello").unwrap(); - /// - /// assert_eq!(Ok("hello"), rx.try_recv()); - /// assert_eq!(Err(TryRecvError::Empty), rx.try_recv()); - /// - /// tx.send("hello").unwrap(); - /// // Drop the last sender, closing the channel. - /// drop(tx); - /// - /// assert_eq!(Ok("hello"), rx.try_recv()); - /// assert_eq!(Err(TryRecvError::Disconnected), rx.try_recv()); - /// } - /// ``` - pub fn try_recv(&mut self) -> Result { - self.chan.try_recv() - } - - /// Blocking receive to call outside of asynchronous contexts. - /// - /// # Panics - /// - /// This function panics if called within an asynchronous execution - /// context. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = mpsc::unbounded_channel::(); - /// - /// let sync_code = thread::spawn(move || { - /// assert_eq!(Some(10), rx.blocking_recv()); - /// }); - /// - /// let _ = tx.send(10); - /// sync_code.join().unwrap(); - /// } - /// ``` - #[track_caller] - #[cfg(feature = "sync")] - #[cfg_attr(docsrs, doc(alias = "recv_blocking"))] - pub fn blocking_recv(&mut self) -> Option { - crate::future::block_on(self.recv()) - } - - /// Closes the receiving half of a channel, without dropping it. - /// - /// This prevents any further messages from being sent on the channel while - /// still enabling the receiver to drain messages that are buffered. - /// - /// To guarantee that no messages are dropped, after calling `close()`, - /// `recv()` must be called until `None` is returned. - pub fn close(&mut self) { - self.chan.close(); - } - - /// Polls to receive the next message on this channel. - /// - /// This method returns: - /// - /// * `Poll::Pending` if no messages are available but the channel is not - /// closed, or if a spurious failure happens. - /// * `Poll::Ready(Some(message))` if a message is available. - /// * `Poll::Ready(None)` if the channel has been closed and all messages - /// sent before it was closed have been received. - /// - /// When the method returns `Poll::Pending`, the `Waker` in the provided - /// `Context` is scheduled to receive a wakeup when a message is sent on any - /// receiver, or when the channel is closed. Note that on multiple calls to - /// `poll_recv`, only the `Waker` from the `Context` passed to the most - /// recent call is scheduled to receive a wakeup. - /// - /// If this method returns `Poll::Pending` due to a spurious failure, then - /// the `Waker` will be notified when the situation causing the spurious - /// failure has been resolved. Note that receiving such a wakeup does not - /// guarantee that the next call will succeed — it could fail with another - /// spurious failure. - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.chan.recv(cx) - } -} - -impl UnboundedSender { - pub(crate) fn new(chan: chan::Tx) -> UnboundedSender { - UnboundedSender { chan } - } - - /// Attempts to send a message on this `UnboundedSender` without blocking. - /// - /// This method is not marked async because sending a message to an unbounded channel - /// never requires any form of waiting. Because of this, the `send` method can be - /// used in both synchronous and asynchronous code without problems. - /// - /// If the receive half of the channel is closed, either due to [`close`] - /// being called or the [`UnboundedReceiver`] having been dropped, this - /// function returns an error. The error includes the value passed to `send`. - /// - /// [`close`]: UnboundedReceiver::close - /// [`UnboundedReceiver`]: UnboundedReceiver - pub fn send(&self, message: T) -> Result<(), SendError> { - if !self.inc_num_messages() { - return Err(SendError(message)); - } - - self.chan.send(message); - Ok(()) - } - - fn inc_num_messages(&self) -> bool { - use std::process; - use std::sync::atomic::Ordering::{AcqRel, Acquire}; - - let mut curr = self.chan.semaphore().0.load(Acquire); - - loop { - if curr & 1 == 1 { - return false; - } - - if curr == usize::MAX ^ 1 { - // Overflowed the ref count. There is no safe way to recover, so - // abort the process. In practice, this should never happen. - process::abort() - } - - match self - .chan - .semaphore() - .0 - .compare_exchange(curr, curr + 2, AcqRel, Acquire) - { - Ok(_) => return true, - Err(actual) => { - curr = actual; - } - } - } - } - - /// Completes when the receiver has dropped. - /// - /// This allows the producers to get notified when interest in the produced - /// values is canceled and immediately stop doing work. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once the channel is closed, it stays closed - /// forever and all future calls to `closed` will return immediately. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::mpsc; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx1, rx) = mpsc::unbounded_channel::<()>(); - /// let tx2 = tx1.clone(); - /// let tx3 = tx1.clone(); - /// let tx4 = tx1.clone(); - /// let tx5 = tx1.clone(); - /// tokio::spawn(async move { - /// drop(rx); - /// }); - /// - /// futures::join!( - /// tx1.closed(), - /// tx2.closed(), - /// tx3.closed(), - /// tx4.closed(), - /// tx5.closed() - /// ); - //// println!("Receiver dropped"); - /// } - /// ``` - pub async fn closed(&self) { - self.chan.closed().await - } - - /// Checks if the channel has been closed. This happens when the - /// [`UnboundedReceiver`] is dropped, or when the - /// [`UnboundedReceiver::close`] method is called. - /// - /// [`UnboundedReceiver`]: crate::sync::mpsc::UnboundedReceiver - /// [`UnboundedReceiver::close`]: crate::sync::mpsc::UnboundedReceiver::close - /// - /// ``` - /// let (tx, rx) = tokio::sync::mpsc::unbounded_channel::<()>(); - /// assert!(!tx.is_closed()); - /// - /// let tx2 = tx.clone(); - /// assert!(!tx2.is_closed()); - /// - /// drop(rx); - /// assert!(tx.is_closed()); - /// assert!(tx2.is_closed()); - /// ``` - pub fn is_closed(&self) -> bool { - self.chan.is_closed() - } - - /// Returns `true` if senders belong to the same channel. - /// - /// # Examples - /// - /// ``` - /// let (tx, rx) = tokio::sync::mpsc::unbounded_channel::<()>(); - /// let tx2 = tx.clone(); - /// assert!(tx.same_channel(&tx2)); - /// - /// let (tx3, rx3) = tokio::sync::mpsc::unbounded_channel::<()>(); - /// assert!(!tx3.same_channel(&tx2)); - /// ``` - pub fn same_channel(&self, other: &Self) -> bool { - self.chan.same_channel(&other.chan) - } - - /// Converts the `UnboundedSender` to a [`WeakUnboundedSender`] that does not count - /// towards RAII semantics, i.e. if all `UnboundedSender` instances of the - /// channel were dropped and only `WeakUnboundedSender` instances remain, - /// the channel is closed. - pub fn downgrade(&self) -> WeakUnboundedSender { - WeakUnboundedSender { - chan: self.chan.downgrade(), - } - } -} - -impl Clone for WeakUnboundedSender { - fn clone(&self) -> Self { - WeakUnboundedSender { - chan: self.chan.clone(), - } - } -} - -impl WeakUnboundedSender { - /// Tries to convert a WeakUnboundedSender into an [`UnboundedSender`]. - /// This will return `Some` if there are other `Sender` instances alive and - /// the channel wasn't previously dropped, otherwise `None` is returned. - pub fn upgrade(&self) -> Option> { - chan::Tx::upgrade(self.chan.clone()).map(UnboundedSender::new) - } -} - -impl fmt::Debug for WeakUnboundedSender { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("WeakUnboundedSender").finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/mutex.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/mutex.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/mutex.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/mutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1382 +0,0 @@ -#![cfg_attr(not(feature = "sync"), allow(unreachable_pub, dead_code))] - -use crate::sync::batch_semaphore as semaphore; -#[cfg(all(tokio_unstable, feature = "tracing"))] -use crate::util::trace; - -use std::cell::UnsafeCell; -use std::error::Error; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut}; -use std::sync::Arc; -use std::{fmt, mem, ptr}; - -/// An asynchronous `Mutex`-like type. -/// -/// This type acts similarly to [`std::sync::Mutex`], with two major -/// differences: [`lock`] is an async method so does not block, and the lock -/// guard is designed to be held across `.await` points. -/// -/// # Which kind of mutex should you use? -/// -/// Contrary to popular belief, it is ok and often preferred to use the ordinary -/// [`Mutex`][std] from the standard library in asynchronous code. -/// -/// The feature that the async mutex offers over the blocking mutex is the -/// ability to keep it locked across an `.await` point. This makes the async -/// mutex more expensive than the blocking mutex, so the blocking mutex should -/// be preferred in the cases where it can be used. The primary use case for the -/// async mutex is to provide shared mutable access to IO resources such as a -/// database connection. If the value behind the mutex is just data, it's -/// usually appropriate to use a blocking mutex such as the one in the standard -/// library or [`parking_lot`]. -/// -/// Note that, although the compiler will not prevent the std `Mutex` from holding -/// its guard across `.await` points in situations where the task is not movable -/// between threads, this virtually never leads to correct concurrent code in -/// practice as it can easily lead to deadlocks. -/// -/// A common pattern is to wrap the `Arc>` in a struct that provides -/// non-async methods for performing operations on the data within, and only -/// lock the mutex inside these methods. The [mini-redis] example provides an -/// illustration of this pattern. -/// -/// Additionally, when you _do_ want shared access to an IO resource, it is -/// often better to spawn a task to manage the IO resource, and to use message -/// passing to communicate with that task. -/// -/// [std]: std::sync::Mutex -/// [`parking_lot`]: https://docs.rs/parking_lot -/// [mini-redis]: https://github.com/tokio-rs/mini-redis/blob/master/src/db.rs -/// -/// # Examples: -/// -/// ```rust,no_run -/// use tokio::sync::Mutex; -/// use std::sync::Arc; -/// -/// #[tokio::main] -/// async fn main() { -/// let data1 = Arc::new(Mutex::new(0)); -/// let data2 = Arc::clone(&data1); -/// -/// tokio::spawn(async move { -/// let mut lock = data2.lock().await; -/// *lock += 1; -/// }); -/// -/// let mut lock = data1.lock().await; -/// *lock += 1; -/// } -/// ``` -/// -/// -/// ```rust,no_run -/// use tokio::sync::Mutex; -/// use std::sync::Arc; -/// -/// #[tokio::main] -/// async fn main() { -/// let count = Arc::new(Mutex::new(0)); -/// -/// for i in 0..5 { -/// let my_count = Arc::clone(&count); -/// tokio::spawn(async move { -/// for j in 0..10 { -/// let mut lock = my_count.lock().await; -/// *lock += 1; -/// println!("{} {} {}", i, j, lock); -/// } -/// }); -/// } -/// -/// loop { -/// if *count.lock().await >= 50 { -/// break; -/// } -/// } -/// println!("Count hit 50."); -/// } -/// ``` -/// There are a few things of note here to pay attention to in this example. -/// 1. The mutex is wrapped in an [`Arc`] to allow it to be shared across -/// threads. -/// 2. Each spawned task obtains a lock and releases it on every iteration. -/// 3. Mutation of the data protected by the Mutex is done by de-referencing -/// the obtained lock as seen on lines 13 and 20. -/// -/// Tokio's Mutex works in a simple FIFO (first in, first out) style where all -/// calls to [`lock`] complete in the order they were performed. In that way the -/// Mutex is "fair" and predictable in how it distributes the locks to inner -/// data. Locks are released and reacquired after every iteration, so basically, -/// each thread goes to the back of the line after it increments the value once. -/// Note that there's some unpredictability to the timing between when the -/// threads are started, but once they are going they alternate predictably. -/// Finally, since there is only a single valid lock at any given time, there is -/// no possibility of a race condition when mutating the inner value. -/// -/// Note that in contrast to [`std::sync::Mutex`], this implementation does not -/// poison the mutex when a thread holding the [`MutexGuard`] panics. In such a -/// case, the mutex will be unlocked. If the panic is caught, this might leave -/// the data protected by the mutex in an inconsistent state. -/// -/// [`Mutex`]: struct@Mutex -/// [`MutexGuard`]: struct@MutexGuard -/// [`Arc`]: struct@std::sync::Arc -/// [`std::sync::Mutex`]: struct@std::sync::Mutex -/// [`Send`]: trait@std::marker::Send -/// [`lock`]: method@Mutex::lock -pub struct Mutex { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - s: semaphore::Semaphore, - c: UnsafeCell, -} - -/// A handle to a held `Mutex`. The guard can be held across any `.await` point -/// as it is [`Send`]. -/// -/// As long as you have this guard, you have exclusive access to the underlying -/// `T`. The guard internally borrows the `Mutex`, so the mutex will not be -/// dropped while a guard exists. -/// -/// The lock is automatically released whenever the guard is dropped, at which -/// point `lock` will succeed yet again. -#[clippy::has_significant_drop] -#[must_use = "if unused the Mutex will immediately unlock"] -pub struct MutexGuard<'a, T: ?Sized> { - // When changing the fields in this struct, make sure to update the - // `skip_drop` method. - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - lock: &'a Mutex, -} - -/// An owned handle to a held `Mutex`. -/// -/// This guard is only available from a `Mutex` that is wrapped in an [`Arc`]. It -/// is identical to `MutexGuard`, except that rather than borrowing the `Mutex`, -/// it clones the `Arc`, incrementing the reference count. This means that -/// unlike `MutexGuard`, it will have the `'static` lifetime. -/// -/// As long as you have this guard, you have exclusive access to the underlying -/// `T`. The guard internally keeps a reference-counted pointer to the original -/// `Mutex`, so even if the lock goes away, the guard remains valid. -/// -/// The lock is automatically released whenever the guard is dropped, at which -/// point `lock` will succeed yet again. -/// -/// [`Arc`]: std::sync::Arc -#[clippy::has_significant_drop] -pub struct OwnedMutexGuard { - // When changing the fields in this struct, make sure to update the - // `skip_drop` method. - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - lock: Arc>, -} - -/// A handle to a held `Mutex` that has had a function applied to it via [`MutexGuard::map`]. -/// -/// This can be used to hold a subfield of the protected data. -/// -/// [`MutexGuard::map`]: method@MutexGuard::map -#[clippy::has_significant_drop] -#[must_use = "if unused the Mutex will immediately unlock"] -pub struct MappedMutexGuard<'a, T: ?Sized> { - // When changing the fields in this struct, make sure to update the - // `skip_drop` method. - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - s: &'a semaphore::Semaphore, - data: *mut T, - // Needed to tell the borrow checker that we are holding a `&mut T` - marker: PhantomData<&'a mut T>, -} - -/// A owned handle to a held `Mutex` that has had a function applied to it via -/// [`OwnedMutexGuard::map`]. -/// -/// This can be used to hold a subfield of the protected data. -/// -/// [`OwnedMutexGuard::map`]: method@OwnedMutexGuard::map -#[clippy::has_significant_drop] -#[must_use = "if unused the Mutex will immediately unlock"] -pub struct OwnedMappedMutexGuard { - // When changing the fields in this struct, make sure to update the - // `skip_drop` method. - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - data: *mut U, - lock: Arc>, -} - -/// A helper type used when taking apart a `MutexGuard` without running its -/// Drop implementation. -#[allow(dead_code)] // Unused fields are still used in Drop. -struct MutexGuardInner<'a, T: ?Sized> { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - lock: &'a Mutex, -} - -/// A helper type used when taking apart a `OwnedMutexGuard` without running -/// its Drop implementation. -struct OwnedMutexGuardInner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - lock: Arc>, -} - -/// A helper type used when taking apart a `MappedMutexGuard` without running -/// its Drop implementation. -#[allow(dead_code)] // Unused fields are still used in Drop. -struct MappedMutexGuardInner<'a, T: ?Sized> { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - s: &'a semaphore::Semaphore, - data: *mut T, -} - -/// A helper type used when taking apart a `OwnedMappedMutexGuard` without running -/// its Drop implementation. -#[allow(dead_code)] // Unused fields are still used in Drop. -struct OwnedMappedMutexGuardInner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - data: *mut U, - lock: Arc>, -} - -// As long as T: Send, it's fine to send and share Mutex between threads. -// If T was not Send, sending and sharing a Mutex would be bad, since you can -// access T through Mutex. -unsafe impl Send for Mutex where T: ?Sized + Send {} -unsafe impl Sync for Mutex where T: ?Sized + Send {} -unsafe impl Sync for MutexGuard<'_, T> where T: ?Sized + Send + Sync {} -unsafe impl Sync for OwnedMutexGuard where T: ?Sized + Send + Sync {} -unsafe impl<'a, T> Sync for MappedMutexGuard<'a, T> where T: ?Sized + Sync + 'a {} -unsafe impl<'a, T> Send for MappedMutexGuard<'a, T> where T: ?Sized + Send + 'a {} - -unsafe impl Sync for OwnedMappedMutexGuard -where - T: ?Sized + Send + Sync, - U: ?Sized + Send + Sync, -{ -} -unsafe impl Send for OwnedMappedMutexGuard -where - T: ?Sized + Send, - U: ?Sized + Send, -{ -} - -/// Error returned from the [`Mutex::try_lock`], [`RwLock::try_read`] and -/// [`RwLock::try_write`] functions. -/// -/// `Mutex::try_lock` operation will only fail if the mutex is already locked. -/// -/// `RwLock::try_read` operation will only fail if the lock is currently held -/// by an exclusive writer. -/// -/// `RwLock::try_write` operation will only fail if the lock is currently held -/// by any reader or by an exclusive writer. -/// -/// [`Mutex::try_lock`]: Mutex::try_lock -/// [`RwLock::try_read`]: fn@super::RwLock::try_read -/// [`RwLock::try_write`]: fn@super::RwLock::try_write -#[derive(Debug)] -pub struct TryLockError(pub(super) ()); - -impl fmt::Display for TryLockError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "operation would block") - } -} - -impl Error for TryLockError {} - -#[test] -#[cfg(not(loom))] -fn bounds() { - fn check_send() {} - fn check_unpin() {} - // This has to take a value, since the async fn's return type is unnameable. - fn check_send_sync_val(_t: T) {} - fn check_send_sync() {} - fn check_static() {} - fn check_static_val(_t: T) {} - - check_send::>(); - check_send::>(); - check_unpin::>(); - check_send_sync::>(); - check_static::>(); - - let mutex = Mutex::new(1); - check_send_sync_val(mutex.lock()); - let arc_mutex = Arc::new(Mutex::new(1)); - check_send_sync_val(arc_mutex.clone().lock_owned()); - check_static_val(arc_mutex.lock_owned()); -} - -impl Mutex { - /// Creates a new lock in an unlocked state ready for use. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// - /// let lock = Mutex::new(5); - /// ``` - #[track_caller] - pub fn new(t: T) -> Self - where - T: Sized, - { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = { - let location = std::panic::Location::caller(); - - tracing::trace_span!( - "runtime.resource", - concrete_type = "Mutex", - kind = "Sync", - loc.file = location.file(), - loc.line = location.line(), - loc.col = location.column(), - ) - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let s = resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - locked = false, - ); - semaphore::Semaphore::new(1) - }); - - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - let s = semaphore::Semaphore::new(1); - - Self { - c: UnsafeCell::new(t), - s, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } - } - - /// Creates a new lock in an unlocked state ready for use. - /// - /// When using the `tracing` [unstable feature], a `Mutex` created with - /// `const_new` will not be instrumented. As such, it will not be visible - /// in [`tokio-console`]. Instead, [`Mutex::new`] should be used to create - /// an instrumented object if that is needed. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// - /// static LOCK: Mutex = Mutex::const_new(5); - /// ``` - /// - /// [`tokio-console`]: https://github.com/tokio-rs/console - /// [unstable feature]: crate#unstable-features - #[cfg(not(all(loom, test)))] - pub const fn const_new(t: T) -> Self - where - T: Sized, - { - Self { - c: UnsafeCell::new(t), - s: semaphore::Semaphore::const_new(1), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span::none(), - } - } - - /// Locks this mutex, causing the current task to yield until the lock has - /// been acquired. When the lock has been acquired, function returns a - /// [`MutexGuard`]. - /// - /// # Cancel safety - /// - /// This method uses a queue to fairly distribute locks in the order they - /// were requested. Cancelling a call to `lock` makes you lose your place in - /// the queue. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// - /// #[tokio::main] - /// async fn main() { - /// let mutex = Mutex::new(1); - /// - /// let mut n = mutex.lock().await; - /// *n = 2; - /// } - /// ``` - pub async fn lock(&self) -> MutexGuard<'_, T> { - let acquire_fut = async { - self.acquire().await; - - MutexGuard { - lock: self, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - } - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let acquire_fut = trace::async_op( - move || acquire_fut, - self.resource_span.clone(), - "Mutex::lock", - "poll", - false, - ); - - #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing - let guard = acquire_fut.await; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - locked = true, - ); - }); - - guard - } - - /// Blockingly locks this `Mutex`. When the lock has been acquired, function returns a - /// [`MutexGuard`]. - /// - /// This method is intended for use cases where you - /// need to use this mutex in asynchronous code as well as in synchronous code. - /// - /// # Panics - /// - /// This function panics if called within an asynchronous execution context. - /// - /// - If you find yourself in an asynchronous execution context and needing - /// to call some (synchronous) function which performs one of these - /// `blocking_` operations, then consider wrapping that call inside - /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking] - /// (or [`block_in_place()`][crate::task::block_in_place]). - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::Mutex; - /// - /// #[tokio::main] - /// async fn main() { - /// let mutex = Arc::new(Mutex::new(1)); - /// let lock = mutex.lock().await; - /// - /// let mutex1 = Arc::clone(&mutex); - /// let blocking_task = tokio::task::spawn_blocking(move || { - /// // This shall block until the `lock` is released. - /// let mut n = mutex1.blocking_lock(); - /// *n = 2; - /// }); - /// - /// assert_eq!(*lock, 1); - /// // Release the lock. - /// drop(lock); - /// - /// // Await the completion of the blocking task. - /// blocking_task.await.unwrap(); - /// - /// // Assert uncontended. - /// let n = mutex.try_lock().unwrap(); - /// assert_eq!(*n, 2); - /// } - /// - /// ``` - #[track_caller] - #[cfg(feature = "sync")] - #[cfg_attr(docsrs, doc(alias = "lock_blocking"))] - pub fn blocking_lock(&self) -> MutexGuard<'_, T> { - crate::future::block_on(self.lock()) - } - - /// Blockingly locks this `Mutex`. When the lock has been acquired, function returns an - /// [`OwnedMutexGuard`]. - /// - /// This method is identical to [`Mutex::blocking_lock`], except that the returned - /// guard references the `Mutex` with an [`Arc`] rather than by borrowing - /// it. Therefore, the `Mutex` must be wrapped in an `Arc` to call this - /// method, and the guard will live for the `'static` lifetime, as it keeps - /// the `Mutex` alive by holding an `Arc`. - /// - /// # Panics - /// - /// This function panics if called within an asynchronous execution context. - /// - /// - If you find yourself in an asynchronous execution context and needing - /// to call some (synchronous) function which performs one of these - /// `blocking_` operations, then consider wrapping that call inside - /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking] - /// (or [`block_in_place()`][crate::task::block_in_place]). - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::Mutex; - /// - /// #[tokio::main] - /// async fn main() { - /// let mutex = Arc::new(Mutex::new(1)); - /// let lock = mutex.lock().await; - /// - /// let mutex1 = Arc::clone(&mutex); - /// let blocking_task = tokio::task::spawn_blocking(move || { - /// // This shall block until the `lock` is released. - /// let mut n = mutex1.blocking_lock_owned(); - /// *n = 2; - /// }); - /// - /// assert_eq!(*lock, 1); - /// // Release the lock. - /// drop(lock); - /// - /// // Await the completion of the blocking task. - /// blocking_task.await.unwrap(); - /// - /// // Assert uncontended. - /// let n = mutex.try_lock().unwrap(); - /// assert_eq!(*n, 2); - /// } - /// - /// ``` - #[track_caller] - #[cfg(feature = "sync")] - pub fn blocking_lock_owned(self: Arc) -> OwnedMutexGuard { - crate::future::block_on(self.lock_owned()) - } - - /// Locks this mutex, causing the current task to yield until the lock has - /// been acquired. When the lock has been acquired, this returns an - /// [`OwnedMutexGuard`]. - /// - /// This method is identical to [`Mutex::lock`], except that the returned - /// guard references the `Mutex` with an [`Arc`] rather than by borrowing - /// it. Therefore, the `Mutex` must be wrapped in an `Arc` to call this - /// method, and the guard will live for the `'static` lifetime, as it keeps - /// the `Mutex` alive by holding an `Arc`. - /// - /// # Cancel safety - /// - /// This method uses a queue to fairly distribute locks in the order they - /// were requested. Cancelling a call to `lock_owned` makes you lose your - /// place in the queue. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// use std::sync::Arc; - /// - /// #[tokio::main] - /// async fn main() { - /// let mutex = Arc::new(Mutex::new(1)); - /// - /// let mut n = mutex.clone().lock_owned().await; - /// *n = 2; - /// } - /// ``` - /// - /// [`Arc`]: std::sync::Arc - pub async fn lock_owned(self: Arc) -> OwnedMutexGuard { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - - let acquire_fut = async { - self.acquire().await; - - OwnedMutexGuard { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - lock: self, - } - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let acquire_fut = trace::async_op( - move || acquire_fut, - resource_span, - "Mutex::lock_owned", - "poll", - false, - ); - - #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing - let guard = acquire_fut.await; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - locked = true, - ); - }); - - guard - } - - async fn acquire(&self) { - crate::trace::async_trace_leaf().await; - - self.s.acquire(1).await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and - // we own it exclusively, which means that this can never happen. - unreachable!() - }); - } - - /// Attempts to acquire the lock, and returns [`TryLockError`] if the - /// lock is currently held somewhere else. - /// - /// [`TryLockError`]: TryLockError - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// # async fn dox() -> Result<(), tokio::sync::TryLockError> { - /// - /// let mutex = Mutex::new(1); - /// - /// let n = mutex.try_lock()?; - /// assert_eq!(*n, 1); - /// # Ok(()) - /// # } - /// ``` - pub fn try_lock(&self) -> Result, TryLockError> { - match self.s.try_acquire(1) { - Ok(_) => { - let guard = MutexGuard { - lock: self, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - locked = true, - ); - }); - - Ok(guard) - } - Err(_) => Err(TryLockError(())), - } - } - - /// Returns a mutable reference to the underlying data. - /// - /// Since this call borrows the `Mutex` mutably, no actual locking needs to - /// take place -- the mutable borrow statically guarantees no locks exist. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// - /// fn main() { - /// let mut mutex = Mutex::new(1); - /// - /// let n = mutex.get_mut(); - /// *n = 2; - /// } - /// ``` - pub fn get_mut(&mut self) -> &mut T { - unsafe { - // Safety: This is https://github.com/rust-lang/rust/pull/76936 - &mut *self.c.get() - } - } - - /// Attempts to acquire the lock, and returns [`TryLockError`] if the lock - /// is currently held somewhere else. - /// - /// This method is identical to [`Mutex::try_lock`], except that the - /// returned guard references the `Mutex` with an [`Arc`] rather than by - /// borrowing it. Therefore, the `Mutex` must be wrapped in an `Arc` to call - /// this method, and the guard will live for the `'static` lifetime, as it - /// keeps the `Mutex` alive by holding an `Arc`. - /// - /// [`TryLockError`]: TryLockError - /// [`Arc`]: std::sync::Arc - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// use std::sync::Arc; - /// # async fn dox() -> Result<(), tokio::sync::TryLockError> { - /// - /// let mutex = Arc::new(Mutex::new(1)); - /// - /// let n = mutex.clone().try_lock_owned()?; - /// assert_eq!(*n, 1); - /// # Ok(()) - /// # } - pub fn try_lock_owned(self: Arc) -> Result, TryLockError> { - match self.s.try_acquire(1) { - Ok(_) => { - let guard = OwnedMutexGuard { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - lock: self, - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - locked = true, - ); - }); - - Ok(guard) - } - Err(_) => Err(TryLockError(())), - } - } - - /// Consumes the mutex, returning the underlying data. - /// # Examples - /// - /// ``` - /// use tokio::sync::Mutex; - /// - /// #[tokio::main] - /// async fn main() { - /// let mutex = Mutex::new(1); - /// - /// let n = mutex.into_inner(); - /// assert_eq!(n, 1); - /// } - /// ``` - pub fn into_inner(self) -> T - where - T: Sized, - { - self.c.into_inner() - } -} - -impl From for Mutex { - fn from(s: T) -> Self { - Self::new(s) - } -} - -impl Default for Mutex -where - T: Default, -{ - fn default() -> Self { - Self::new(T::default()) - } -} - -impl std::fmt::Debug for Mutex -where - T: std::fmt::Debug, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut d = f.debug_struct("Mutex"); - match self.try_lock() { - Ok(inner) => d.field("data", &&*inner), - Err(_) => d.field("data", &format_args!("")), - }; - d.finish() - } -} - -// === impl MutexGuard === - -impl<'a, T: ?Sized> MutexGuard<'a, T> { - fn skip_drop(self) -> MutexGuardInner<'a, T> { - let me = mem::ManuallyDrop::new(self); - // SAFETY: This duplicates the `resource_span` and then forgets the - // original. In the end, we have not duplicated or forgotten any values. - MutexGuardInner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: unsafe { std::ptr::read(&me.resource_span) }, - lock: me.lock, - } - } - - /// Makes a new [`MappedMutexGuard`] for a component of the locked data. - /// - /// This operation cannot fail as the [`MutexGuard`] passed in already locked the mutex. - /// - /// This is an associated function that needs to be used as `MutexGuard::map(...)`. A method - /// would interfere with methods of the same name on the contents of the locked data. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{Mutex, MutexGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let foo = Mutex::new(Foo(1)); - /// - /// { - /// let mut mapped = MutexGuard::map(foo.lock().await, |f| &mut f.0); - /// *mapped = 2; - /// } - /// - /// assert_eq!(Foo(2), *foo.lock().await); - /// # } - /// ``` - /// - /// [`MutexGuard`]: struct@MutexGuard - /// [`MappedMutexGuard`]: struct@MappedMutexGuard - #[inline] - pub fn map(mut this: Self, f: F) -> MappedMutexGuard<'a, U> - where - U: ?Sized, - F: FnOnce(&mut T) -> &mut U, - { - let data = f(&mut *this) as *mut U; - let inner = this.skip_drop(); - MappedMutexGuard { - s: &inner.lock.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: inner.resource_span, - } - } - - /// Attempts to make a new [`MappedMutexGuard`] for a component of the locked data. The - /// original guard is returned if the closure returns `None`. - /// - /// This operation cannot fail as the [`MutexGuard`] passed in already locked the mutex. - /// - /// This is an associated function that needs to be used as `MutexGuard::try_map(...)`. A - /// method would interfere with methods of the same name on the contents of the locked data. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{Mutex, MutexGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let foo = Mutex::new(Foo(1)); - /// - /// { - /// let mut mapped = MutexGuard::try_map(foo.lock().await, |f| Some(&mut f.0)) - /// .expect("should not fail"); - /// *mapped = 2; - /// } - /// - /// assert_eq!(Foo(2), *foo.lock().await); - /// # } - /// ``` - /// - /// [`MutexGuard`]: struct@MutexGuard - /// [`MappedMutexGuard`]: struct@MappedMutexGuard - #[inline] - pub fn try_map(mut this: Self, f: F) -> Result, Self> - where - U: ?Sized, - F: FnOnce(&mut T) -> Option<&mut U>, - { - let data = match f(&mut *this) { - Some(data) => data as *mut U, - None => return Err(this), - }; - let inner = this.skip_drop(); - Ok(MappedMutexGuard { - s: &inner.lock.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: inner.resource_span, - }) - } - - /// Returns a reference to the original `Mutex`. - /// - /// ``` - /// use tokio::sync::{Mutex, MutexGuard}; - /// - /// async fn unlock_and_relock<'l>(guard: MutexGuard<'l, u32>) -> MutexGuard<'l, u32> { - /// println!("1. contains: {:?}", *guard); - /// let mutex = MutexGuard::mutex(&guard); - /// drop(guard); - /// let guard = mutex.lock().await; - /// println!("2. contains: {:?}", *guard); - /// guard - /// } - /// # - /// # #[tokio::main] - /// # async fn main() { - /// # let mutex = Mutex::new(0u32); - /// # let guard = mutex.lock().await; - /// # let _guard = unlock_and_relock(guard).await; - /// # } - /// ``` - #[inline] - pub fn mutex(this: &Self) -> &'a Mutex { - this.lock - } -} - -impl Drop for MutexGuard<'_, T> { - fn drop(&mut self) { - self.lock.s.release(1); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - locked = false, - ); - }); - } -} - -impl Deref for MutexGuard<'_, T> { - type Target = T; - fn deref(&self) -> &Self::Target { - unsafe { &*self.lock.c.get() } - } -} - -impl DerefMut for MutexGuard<'_, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *self.lock.c.get() } - } -} - -impl fmt::Debug for MutexGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl fmt::Display for MutexGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -// === impl OwnedMutexGuard === - -impl OwnedMutexGuard { - fn skip_drop(self) -> OwnedMutexGuardInner { - let me = mem::ManuallyDrop::new(self); - // SAFETY: This duplicates the values in every field of the guard, then - // forgets the originals, so in the end no value is duplicated. - unsafe { - OwnedMutexGuardInner { - lock: ptr::read(&me.lock), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: ptr::read(&me.resource_span), - } - } - } - - /// Makes a new [`OwnedMappedMutexGuard`] for a component of the locked data. - /// - /// This operation cannot fail as the [`OwnedMutexGuard`] passed in already locked the mutex. - /// - /// This is an associated function that needs to be used as `OwnedMutexGuard::map(...)`. A method - /// would interfere with methods of the same name on the contents of the locked data. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{Mutex, OwnedMutexGuard}; - /// use std::sync::Arc; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let foo = Arc::new(Mutex::new(Foo(1))); - /// - /// { - /// let mut mapped = OwnedMutexGuard::map(foo.clone().lock_owned().await, |f| &mut f.0); - /// *mapped = 2; - /// } - /// - /// assert_eq!(Foo(2), *foo.lock().await); - /// # } - /// ``` - /// - /// [`OwnedMutexGuard`]: struct@OwnedMutexGuard - /// [`OwnedMappedMutexGuard`]: struct@OwnedMappedMutexGuard - #[inline] - pub fn map(mut this: Self, f: F) -> OwnedMappedMutexGuard - where - U: ?Sized, - F: FnOnce(&mut T) -> &mut U, - { - let data = f(&mut *this) as *mut U; - let inner = this.skip_drop(); - OwnedMappedMutexGuard { - data, - lock: inner.lock, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: inner.resource_span, - } - } - - /// Attempts to make a new [`OwnedMappedMutexGuard`] for a component of the locked data. The - /// original guard is returned if the closure returns `None`. - /// - /// This operation cannot fail as the [`OwnedMutexGuard`] passed in already locked the mutex. - /// - /// This is an associated function that needs to be used as `OwnedMutexGuard::try_map(...)`. A - /// method would interfere with methods of the same name on the contents of the locked data. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{Mutex, OwnedMutexGuard}; - /// use std::sync::Arc; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let foo = Arc::new(Mutex::new(Foo(1))); - /// - /// { - /// let mut mapped = OwnedMutexGuard::try_map(foo.clone().lock_owned().await, |f| Some(&mut f.0)) - /// .expect("should not fail"); - /// *mapped = 2; - /// } - /// - /// assert_eq!(Foo(2), *foo.lock().await); - /// # } - /// ``` - /// - /// [`OwnedMutexGuard`]: struct@OwnedMutexGuard - /// [`OwnedMappedMutexGuard`]: struct@OwnedMappedMutexGuard - #[inline] - pub fn try_map(mut this: Self, f: F) -> Result, Self> - where - U: ?Sized, - F: FnOnce(&mut T) -> Option<&mut U>, - { - let data = match f(&mut *this) { - Some(data) => data as *mut U, - None => return Err(this), - }; - let inner = this.skip_drop(); - Ok(OwnedMappedMutexGuard { - data, - lock: inner.lock, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: inner.resource_span, - }) - } - - /// Returns a reference to the original `Arc`. - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::{Mutex, OwnedMutexGuard}; - /// - /// async fn unlock_and_relock(guard: OwnedMutexGuard) -> OwnedMutexGuard { - /// println!("1. contains: {:?}", *guard); - /// let mutex: Arc> = OwnedMutexGuard::mutex(&guard).clone(); - /// drop(guard); - /// let guard = mutex.lock_owned().await; - /// println!("2. contains: {:?}", *guard); - /// guard - /// } - /// # - /// # #[tokio::main] - /// # async fn main() { - /// # let mutex = Arc::new(Mutex::new(0u32)); - /// # let guard = mutex.lock_owned().await; - /// # unlock_and_relock(guard).await; - /// # } - /// ``` - #[inline] - pub fn mutex(this: &Self) -> &Arc> { - &this.lock - } -} - -impl Drop for OwnedMutexGuard { - fn drop(&mut self) { - self.lock.s.release(1); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - locked = false, - ); - }); - } -} - -impl Deref for OwnedMutexGuard { - type Target = T; - fn deref(&self) -> &Self::Target { - unsafe { &*self.lock.c.get() } - } -} - -impl DerefMut for OwnedMutexGuard { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *self.lock.c.get() } - } -} - -impl fmt::Debug for OwnedMutexGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl fmt::Display for OwnedMutexGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -// === impl MappedMutexGuard === - -impl<'a, T: ?Sized> MappedMutexGuard<'a, T> { - fn skip_drop(self) -> MappedMutexGuardInner<'a, T> { - let me = mem::ManuallyDrop::new(self); - MappedMutexGuardInner { - s: me.s, - data: me.data, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: unsafe { std::ptr::read(&me.resource_span) }, - } - } - - /// Makes a new [`MappedMutexGuard`] for a component of the locked data. - /// - /// This operation cannot fail as the [`MappedMutexGuard`] passed in already locked the mutex. - /// - /// This is an associated function that needs to be used as `MappedMutexGuard::map(...)`. A - /// method would interfere with methods of the same name on the contents of the locked data. - /// - /// [`MappedMutexGuard`]: struct@MappedMutexGuard - #[inline] - pub fn map(mut this: Self, f: F) -> MappedMutexGuard<'a, U> - where - F: FnOnce(&mut T) -> &mut U, - { - let data = f(&mut *this) as *mut U; - let inner = this.skip_drop(); - MappedMutexGuard { - s: inner.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: inner.resource_span, - } - } - - /// Attempts to make a new [`MappedMutexGuard`] for a component of the locked data. The - /// original guard is returned if the closure returns `None`. - /// - /// This operation cannot fail as the [`MappedMutexGuard`] passed in already locked the mutex. - /// - /// This is an associated function that needs to be used as `MappedMutexGuard::try_map(...)`. A - /// method would interfere with methods of the same name on the contents of the locked data. - /// - /// [`MappedMutexGuard`]: struct@MappedMutexGuard - #[inline] - pub fn try_map(mut this: Self, f: F) -> Result, Self> - where - F: FnOnce(&mut T) -> Option<&mut U>, - { - let data = match f(&mut *this) { - Some(data) => data as *mut U, - None => return Err(this), - }; - let inner = this.skip_drop(); - Ok(MappedMutexGuard { - s: inner.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: inner.resource_span, - }) - } -} - -impl<'a, T: ?Sized> Drop for MappedMutexGuard<'a, T> { - fn drop(&mut self) { - self.s.release(1); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - locked = false, - ); - }); - } -} - -impl<'a, T: ?Sized> Deref for MappedMutexGuard<'a, T> { - type Target = T; - fn deref(&self) -> &Self::Target { - unsafe { &*self.data } - } -} - -impl<'a, T: ?Sized> DerefMut for MappedMutexGuard<'a, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *self.data } - } -} - -impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for MappedMutexGuard<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized + fmt::Display> fmt::Display for MappedMutexGuard<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -// === impl OwnedMappedMutexGuard === - -impl OwnedMappedMutexGuard { - fn skip_drop(self) -> OwnedMappedMutexGuardInner { - let me = mem::ManuallyDrop::new(self); - // SAFETY: This duplicates the values in every field of the guard, then - // forgets the originals, so in the end no value is duplicated. - unsafe { - OwnedMappedMutexGuardInner { - data: me.data, - lock: ptr::read(&me.lock), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: ptr::read(&me.resource_span), - } - } - } - - /// Makes a new [`OwnedMappedMutexGuard`] for a component of the locked data. - /// - /// This operation cannot fail as the [`OwnedMappedMutexGuard`] passed in already locked the mutex. - /// - /// This is an associated function that needs to be used as `OwnedMappedMutexGuard::map(...)`. A method - /// would interfere with methods of the same name on the contents of the locked data. - /// - /// [`OwnedMappedMutexGuard`]: struct@OwnedMappedMutexGuard - #[inline] - pub fn map(mut this: Self, f: F) -> OwnedMappedMutexGuard - where - F: FnOnce(&mut U) -> &mut S, - { - let data = f(&mut *this) as *mut S; - let inner = this.skip_drop(); - OwnedMappedMutexGuard { - data, - lock: inner.lock, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: inner.resource_span, - } - } - - /// Attempts to make a new [`OwnedMappedMutexGuard`] for a component of the locked data. The - /// original guard is returned if the closure returns `None`. - /// - /// This operation cannot fail as the [`OwnedMutexGuard`] passed in already locked the mutex. - /// - /// This is an associated function that needs to be used as `OwnedMutexGuard::try_map(...)`. A - /// method would interfere with methods of the same name on the contents of the locked data. - /// - /// [`OwnedMutexGuard`]: struct@OwnedMutexGuard - /// [`OwnedMappedMutexGuard`]: struct@OwnedMappedMutexGuard - #[inline] - pub fn try_map(mut this: Self, f: F) -> Result, Self> - where - F: FnOnce(&mut U) -> Option<&mut S>, - { - let data = match f(&mut *this) { - Some(data) => data as *mut S, - None => return Err(this), - }; - let inner = this.skip_drop(); - Ok(OwnedMappedMutexGuard { - data, - lock: inner.lock, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: inner.resource_span, - }) - } -} - -impl Drop for OwnedMappedMutexGuard { - fn drop(&mut self) { - self.lock.s.release(1); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - locked = false, - ); - }); - } -} - -impl Deref for OwnedMappedMutexGuard { - type Target = U; - fn deref(&self) -> &Self::Target { - unsafe { &*self.data } - } -} - -impl DerefMut for OwnedMappedMutexGuard { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *self.data } - } -} - -impl fmt::Debug for OwnedMappedMutexGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl fmt::Display for OwnedMappedMutexGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/notify.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/notify.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/notify.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/notify.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1173 +0,0 @@ -// Allow `unreachable_pub` warnings when sync is not enabled -// due to the usage of `Notify` within the `rt` feature set. -// When this module is compiled with `sync` enabled we will warn on -// this lint. When `rt` is enabled we use `pub(crate)` which -// triggers this warning but it is safe to ignore in this case. -#![cfg_attr(not(feature = "sync"), allow(unreachable_pub, dead_code))] - -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::Mutex; -use crate::util::linked_list::{self, GuardedLinkedList, LinkedList}; -use crate::util::WakeList; - -use std::future::Future; -use std::marker::PhantomPinned; -use std::panic::{RefUnwindSafe, UnwindSafe}; -use std::pin::Pin; -use std::ptr::NonNull; -use std::sync::atomic::Ordering::{self, Acquire, Relaxed, Release, SeqCst}; -use std::task::{Context, Poll, Waker}; - -type WaitList = LinkedList::Target>; -type GuardedWaitList = GuardedLinkedList::Target>; - -/// Notifies a single task to wake up. -/// -/// `Notify` provides a basic mechanism to notify a single task of an event. -/// `Notify` itself does not carry any data. Instead, it is to be used to signal -/// another task to perform an operation. -/// -/// A `Notify` can be thought of as a [`Semaphore`] starting with 0 permits. The -/// [`notified().await`] method waits for a permit to become available, and -/// [`notify_one()`] sets a permit **if there currently are no available -/// permits**. -/// -/// The synchronization details of `Notify` are similar to -/// [`thread::park`][park] and [`Thread::unpark`][unpark] from std. A [`Notify`] -/// value contains a single permit. [`notified().await`] waits for the permit to -/// be made available, consumes the permit, and resumes. [`notify_one()`] sets -/// the permit, waking a pending task if there is one. -/// -/// If `notify_one()` is called **before** `notified().await`, then the next -/// call to `notified().await` will complete immediately, consuming the permit. -/// Any subsequent calls to `notified().await` will wait for a new permit. -/// -/// If `notify_one()` is called **multiple** times before `notified().await`, -/// only a **single** permit is stored. The next call to `notified().await` will -/// complete immediately, but the one after will wait for a new permit. -/// -/// # Examples -/// -/// Basic usage. -/// -/// ``` -/// use tokio::sync::Notify; -/// use std::sync::Arc; -/// -/// #[tokio::main] -/// async fn main() { -/// let notify = Arc::new(Notify::new()); -/// let notify2 = notify.clone(); -/// -/// let handle = tokio::spawn(async move { -/// notify2.notified().await; -/// println!("received notification"); -/// }); -/// -/// println!("sending notification"); -/// notify.notify_one(); -/// -/// // Wait for task to receive notification. -/// handle.await.unwrap(); -/// } -/// ``` -/// -/// Unbound multi-producer single-consumer (mpsc) channel. -/// -/// No wakeups can be lost when using this channel because the call to -/// `notify_one()` will store a permit in the `Notify`, which the following call -/// to `notified()` will consume. -/// -/// ``` -/// use tokio::sync::Notify; -/// -/// use std::collections::VecDeque; -/// use std::sync::Mutex; -/// -/// struct Channel { -/// values: Mutex>, -/// notify: Notify, -/// } -/// -/// impl Channel { -/// pub fn send(&self, value: T) { -/// self.values.lock().unwrap() -/// .push_back(value); -/// -/// // Notify the consumer a value is available -/// self.notify.notify_one(); -/// } -/// -/// // This is a single-consumer channel, so several concurrent calls to -/// // `recv` are not allowed. -/// pub async fn recv(&self) -> T { -/// loop { -/// // Drain values -/// if let Some(value) = self.values.lock().unwrap().pop_front() { -/// return value; -/// } -/// -/// // Wait for values to be available -/// self.notify.notified().await; -/// } -/// } -/// } -/// ``` -/// -/// Unbound multi-producer multi-consumer (mpmc) channel. -/// -/// The call to [`enable`] is important because otherwise if you have two -/// calls to `recv` and two calls to `send` in parallel, the following could -/// happen: -/// -/// 1. Both calls to `try_recv` return `None`. -/// 2. Both new elements are added to the vector. -/// 3. The `notify_one` method is called twice, adding only a single -/// permit to the `Notify`. -/// 4. Both calls to `recv` reach the `Notified` future. One of them -/// consumes the permit, and the other sleeps forever. -/// -/// By adding the `Notified` futures to the list by calling `enable` before -/// `try_recv`, the `notify_one` calls in step three would remove the -/// futures from the list and mark them notified instead of adding a permit -/// to the `Notify`. This ensures that both futures are woken. -/// -/// Notice that this failure can only happen if there are two concurrent calls -/// to `recv`. This is why the mpsc example above does not require a call to -/// `enable`. -/// -/// ``` -/// use tokio::sync::Notify; -/// -/// use std::collections::VecDeque; -/// use std::sync::Mutex; -/// -/// struct Channel { -/// messages: Mutex>, -/// notify_on_sent: Notify, -/// } -/// -/// impl Channel { -/// pub fn send(&self, msg: T) { -/// let mut locked_queue = self.messages.lock().unwrap(); -/// locked_queue.push_back(msg); -/// drop(locked_queue); -/// -/// // Send a notification to one of the calls currently -/// // waiting in a call to `recv`. -/// self.notify_on_sent.notify_one(); -/// } -/// -/// pub fn try_recv(&self) -> Option { -/// let mut locked_queue = self.messages.lock().unwrap(); -/// locked_queue.pop_front() -/// } -/// -/// pub async fn recv(&self) -> T { -/// let future = self.notify_on_sent.notified(); -/// tokio::pin!(future); -/// -/// loop { -/// // Make sure that no wakeup is lost if we get -/// // `None` from `try_recv`. -/// future.as_mut().enable(); -/// -/// if let Some(msg) = self.try_recv() { -/// return msg; -/// } -/// -/// // Wait for a call to `notify_one`. -/// // -/// // This uses `.as_mut()` to avoid consuming the future, -/// // which lets us call `Pin::set` below. -/// future.as_mut().await; -/// -/// // Reset the future in case another call to -/// // `try_recv` got the message before us. -/// future.set(self.notify_on_sent.notified()); -/// } -/// } -/// } -/// ``` -/// -/// [park]: std::thread::park -/// [unpark]: std::thread::Thread::unpark -/// [`notified().await`]: Notify::notified() -/// [`notify_one()`]: Notify::notify_one() -/// [`enable`]: Notified::enable() -/// [`Semaphore`]: crate::sync::Semaphore -#[derive(Debug)] -pub struct Notify { - // `state` uses 2 bits to store one of `EMPTY`, - // `WAITING` or `NOTIFIED`. The rest of the bits - // are used to store the number of times `notify_waiters` - // was called. - // - // Throughout the code there are two assumptions: - // - state can be transitioned *from* `WAITING` only if - // `waiters` lock is held - // - number of times `notify_waiters` was called can - // be modified only if `waiters` lock is held - state: AtomicUsize, - waiters: Mutex, -} - -#[derive(Debug)] -struct Waiter { - /// Intrusive linked-list pointers. - pointers: linked_list::Pointers, - - /// Waiting task's waker. Depending on the value of `notification`, - /// this field is either protected by the `waiters` lock in - /// `Notify`, or it is exclusively owned by the enclosing `Waiter`. - waker: UnsafeCell>, - - /// Notification for this waiter. - /// * if it's `None`, then `waker` is protected by the `waiters` lock. - /// * if it's `Some`, then `waker` is exclusively owned by the - /// enclosing `Waiter` and can be accessed without locking. - notification: AtomicNotification, - - /// Should not be `Unpin`. - _p: PhantomPinned, -} - -impl Waiter { - fn new() -> Waiter { - Waiter { - pointers: linked_list::Pointers::new(), - waker: UnsafeCell::new(None), - notification: AtomicNotification::none(), - _p: PhantomPinned, - } - } -} - -generate_addr_of_methods! { - impl<> Waiter { - unsafe fn addr_of_pointers(self: NonNull) -> NonNull> { - &self.pointers - } - } -} - -// No notification. -const NOTIFICATION_NONE: usize = 0; - -// Notification type used by `notify_one`. -const NOTIFICATION_ONE: usize = 1; - -// Notification type used by `notify_waiters`. -const NOTIFICATION_ALL: usize = 2; - -/// Notification for a `Waiter`. -/// This struct is equivalent to `Option`, but uses -/// `AtomicUsize` inside for atomic operations. -#[derive(Debug)] -struct AtomicNotification(AtomicUsize); - -impl AtomicNotification { - fn none() -> Self { - AtomicNotification(AtomicUsize::new(NOTIFICATION_NONE)) - } - - /// Store-release a notification. - /// This method should be called exactly once. - fn store_release(&self, notification: Notification) { - self.0.store(notification as usize, Release); - } - - fn load(&self, ordering: Ordering) -> Option { - match self.0.load(ordering) { - NOTIFICATION_NONE => None, - NOTIFICATION_ONE => Some(Notification::One), - NOTIFICATION_ALL => Some(Notification::All), - _ => unreachable!(), - } - } - - /// Clears the notification. - /// This method is used by a `Notified` future to consume the - /// notification. It uses relaxed ordering and should be only - /// used once the atomic notification is no longer shared. - fn clear(&self) { - self.0.store(NOTIFICATION_NONE, Relaxed); - } -} - -#[derive(Debug, PartialEq, Eq)] -#[repr(usize)] -enum Notification { - One = NOTIFICATION_ONE, - All = NOTIFICATION_ALL, -} - -/// List used in `Notify::notify_waiters`. It wraps a guarded linked list -/// and gates the access to it on `notify.waiters` mutex. It also empties -/// the list on drop. -struct NotifyWaitersList<'a> { - list: GuardedWaitList, - is_empty: bool, - notify: &'a Notify, -} - -impl<'a> NotifyWaitersList<'a> { - fn new( - unguarded_list: WaitList, - guard: Pin<&'a Waiter>, - notify: &'a Notify, - ) -> NotifyWaitersList<'a> { - let guard_ptr = NonNull::from(guard.get_ref()); - let list = unguarded_list.into_guarded(guard_ptr); - NotifyWaitersList { - list, - is_empty: false, - notify, - } - } - - /// Removes the last element from the guarded list. Modifying this list - /// requires an exclusive access to the main list in `Notify`. - fn pop_back_locked(&mut self, _waiters: &mut WaitList) -> Option> { - let result = self.list.pop_back(); - if result.is_none() { - // Save information about emptiness to avoid waiting for lock - // in the destructor. - self.is_empty = true; - } - result - } -} - -impl Drop for NotifyWaitersList<'_> { - fn drop(&mut self) { - // If the list is not empty, we unlink all waiters from it. - // We do not wake the waiters to avoid double panics. - if !self.is_empty { - let _lock_guard = self.notify.waiters.lock(); - while let Some(waiter) = self.list.pop_back() { - // Safety: we never make mutable references to waiters. - let waiter = unsafe { waiter.as_ref() }; - waiter.notification.store_release(Notification::All); - } - } - } -} - -/// Future returned from [`Notify::notified()`]. -/// -/// This future is fused, so once it has completed, any future calls to poll -/// will immediately return `Poll::Ready`. -#[derive(Debug)] -pub struct Notified<'a> { - /// The `Notify` being received on. - notify: &'a Notify, - - /// The current state of the receiving process. - state: State, - - /// Number of calls to `notify_waiters` at the time of creation. - notify_waiters_calls: usize, - - /// Entry in the waiter `LinkedList`. - waiter: Waiter, -} - -unsafe impl<'a> Send for Notified<'a> {} -unsafe impl<'a> Sync for Notified<'a> {} - -#[derive(Debug)] -enum State { - Init, - Waiting, - Done, -} - -const NOTIFY_WAITERS_SHIFT: usize = 2; -const STATE_MASK: usize = (1 << NOTIFY_WAITERS_SHIFT) - 1; -const NOTIFY_WAITERS_CALLS_MASK: usize = !STATE_MASK; - -/// Initial "idle" state. -const EMPTY: usize = 0; - -/// One or more threads are currently waiting to be notified. -const WAITING: usize = 1; - -/// Pending notification. -const NOTIFIED: usize = 2; - -fn set_state(data: usize, state: usize) -> usize { - (data & NOTIFY_WAITERS_CALLS_MASK) | (state & STATE_MASK) -} - -fn get_state(data: usize) -> usize { - data & STATE_MASK -} - -fn get_num_notify_waiters_calls(data: usize) -> usize { - (data & NOTIFY_WAITERS_CALLS_MASK) >> NOTIFY_WAITERS_SHIFT -} - -fn inc_num_notify_waiters_calls(data: usize) -> usize { - data + (1 << NOTIFY_WAITERS_SHIFT) -} - -fn atomic_inc_num_notify_waiters_calls(data: &AtomicUsize) { - data.fetch_add(1 << NOTIFY_WAITERS_SHIFT, SeqCst); -} - -impl Notify { - /// Create a new `Notify`, initialized without a permit. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Notify; - /// - /// let notify = Notify::new(); - /// ``` - pub fn new() -> Notify { - Notify { - state: AtomicUsize::new(0), - waiters: Mutex::new(LinkedList::new()), - } - } - - /// Create a new `Notify`, initialized without a permit. - /// - /// When using the `tracing` [unstable feature], a `Notify` created with - /// `const_new` will not be instrumented. As such, it will not be visible - /// in [`tokio-console`]. Instead, [`Notify::new`] should be used to create - /// an instrumented object if that is needed. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Notify; - /// - /// static NOTIFY: Notify = Notify::const_new(); - /// ``` - /// - /// [`tokio-console`]: https://github.com/tokio-rs/console - /// [unstable feature]: crate#unstable-features - #[cfg(not(all(loom, test)))] - pub const fn const_new() -> Notify { - Notify { - state: AtomicUsize::new(0), - waiters: Mutex::const_new(LinkedList::new()), - } - } - - /// Wait for a notification. - /// - /// Equivalent to: - /// - /// ```ignore - /// async fn notified(&self); - /// ``` - /// - /// Each `Notify` value holds a single permit. If a permit is available from - /// an earlier call to [`notify_one()`], then `notified().await` will complete - /// immediately, consuming that permit. Otherwise, `notified().await` waits - /// for a permit to be made available by the next call to `notify_one()`. - /// - /// The `Notified` future is not guaranteed to receive wakeups from calls to - /// `notify_one()` if it has not yet been polled. See the documentation for - /// [`Notified::enable()`] for more details. - /// - /// The `Notified` future is guaranteed to receive wakeups from - /// `notify_waiters()` as soon as it has been created, even if it has not - /// yet been polled. - /// - /// [`notify_one()`]: Notify::notify_one - /// [`Notified::enable()`]: Notified::enable - /// - /// # Cancel safety - /// - /// This method uses a queue to fairly distribute notifications in the order - /// they were requested. Cancelling a call to `notified` makes you lose your - /// place in the queue. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Notify; - /// use std::sync::Arc; - /// - /// #[tokio::main] - /// async fn main() { - /// let notify = Arc::new(Notify::new()); - /// let notify2 = notify.clone(); - /// - /// tokio::spawn(async move { - /// notify2.notified().await; - /// println!("received notification"); - /// }); - /// - /// println!("sending notification"); - /// notify.notify_one(); - /// } - /// ``` - pub fn notified(&self) -> Notified<'_> { - // we load the number of times notify_waiters - // was called and store that in the future. - let state = self.state.load(SeqCst); - Notified { - notify: self, - state: State::Init, - notify_waiters_calls: get_num_notify_waiters_calls(state), - waiter: Waiter::new(), - } - } - - /// Notifies a waiting task. - /// - /// If a task is currently waiting, that task is notified. Otherwise, a - /// permit is stored in this `Notify` value and the **next** call to - /// [`notified().await`] will complete immediately consuming the permit made - /// available by this call to `notify_one()`. - /// - /// At most one permit may be stored by `Notify`. Many sequential calls to - /// `notify_one` will result in a single permit being stored. The next call to - /// `notified().await` will complete immediately, but the one after that - /// will wait. - /// - /// [`notified().await`]: Notify::notified() - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Notify; - /// use std::sync::Arc; - /// - /// #[tokio::main] - /// async fn main() { - /// let notify = Arc::new(Notify::new()); - /// let notify2 = notify.clone(); - /// - /// tokio::spawn(async move { - /// notify2.notified().await; - /// println!("received notification"); - /// }); - /// - /// println!("sending notification"); - /// notify.notify_one(); - /// } - /// ``` - // Alias for old name in 0.x - #[cfg_attr(docsrs, doc(alias = "notify"))] - pub fn notify_one(&self) { - // Load the current state - let mut curr = self.state.load(SeqCst); - - // If the state is `EMPTY`, transition to `NOTIFIED` and return. - while let EMPTY | NOTIFIED = get_state(curr) { - // The compare-exchange from `NOTIFIED` -> `NOTIFIED` is intended. A - // happens-before synchronization must happen between this atomic - // operation and a task calling `notified().await`. - let new = set_state(curr, NOTIFIED); - let res = self.state.compare_exchange(curr, new, SeqCst, SeqCst); - - match res { - // No waiters, no further work to do - Ok(_) => return, - Err(actual) => { - curr = actual; - } - } - } - - // There are waiters, the lock must be acquired to notify. - let mut waiters = self.waiters.lock(); - - // The state must be reloaded while the lock is held. The state may only - // transition out of WAITING while the lock is held. - curr = self.state.load(SeqCst); - - if let Some(waker) = notify_locked(&mut waiters, &self.state, curr) { - drop(waiters); - waker.wake(); - } - } - - /// Notifies all waiting tasks. - /// - /// If a task is currently waiting, that task is notified. Unlike with - /// `notify_one()`, no permit is stored to be used by the next call to - /// `notified().await`. The purpose of this method is to notify all - /// already registered waiters. Registering for notification is done by - /// acquiring an instance of the `Notified` future via calling `notified()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Notify; - /// use std::sync::Arc; - /// - /// #[tokio::main] - /// async fn main() { - /// let notify = Arc::new(Notify::new()); - /// let notify2 = notify.clone(); - /// - /// let notified1 = notify.notified(); - /// let notified2 = notify.notified(); - /// - /// let handle = tokio::spawn(async move { - /// println!("sending notifications"); - /// notify2.notify_waiters(); - /// }); - /// - /// notified1.await; - /// notified2.await; - /// println!("received notifications"); - /// } - /// ``` - pub fn notify_waiters(&self) { - let mut waiters = self.waiters.lock(); - - // The state must be loaded while the lock is held. The state may only - // transition out of WAITING while the lock is held. - let curr = self.state.load(SeqCst); - - if matches!(get_state(curr), EMPTY | NOTIFIED) { - // There are no waiting tasks. All we need to do is increment the - // number of times this method was called. - atomic_inc_num_notify_waiters_calls(&self.state); - return; - } - - // Increment the number of times this method was called - // and transition to empty. - let new_state = set_state(inc_num_notify_waiters_calls(curr), EMPTY); - self.state.store(new_state, SeqCst); - - // It is critical for `GuardedLinkedList` safety that the guard node is - // pinned in memory and is not dropped until the guarded list is dropped. - let guard = Waiter::new(); - pin!(guard); - - // We move all waiters to a secondary list. It uses a `GuardedLinkedList` - // underneath to allow every waiter to safely remove itself from it. - // - // * This list will be still guarded by the `waiters` lock. - // `NotifyWaitersList` wrapper makes sure we hold the lock to modify it. - // * This wrapper will empty the list on drop. It is critical for safety - // that we will not leave any list entry with a pointer to the local - // guard node after this function returns / panics. - let mut list = NotifyWaitersList::new(std::mem::take(&mut *waiters), guard.as_ref(), self); - - let mut wakers = WakeList::new(); - 'outer: loop { - while wakers.can_push() { - match list.pop_back_locked(&mut waiters) { - Some(waiter) => { - // Safety: we never make mutable references to waiters. - let waiter = unsafe { waiter.as_ref() }; - - // Safety: we hold the lock, so we can access the waker. - if let Some(waker) = - unsafe { waiter.waker.with_mut(|waker| (*waker).take()) } - { - wakers.push(waker); - } - - // This waiter is unlinked and will not be shared ever again, release it. - waiter.notification.store_release(Notification::All); - } - None => { - break 'outer; - } - } - } - - // Release the lock before notifying. - drop(waiters); - - // One of the wakers may panic, but the remaining waiters will still - // be unlinked from the list in `NotifyWaitersList` destructor. - wakers.wake_all(); - - // Acquire the lock again. - waiters = self.waiters.lock(); - } - - // Release the lock before notifying - drop(waiters); - - wakers.wake_all(); - } -} - -impl Default for Notify { - fn default() -> Notify { - Notify::new() - } -} - -impl UnwindSafe for Notify {} -impl RefUnwindSafe for Notify {} - -fn notify_locked(waiters: &mut WaitList, state: &AtomicUsize, curr: usize) -> Option { - loop { - match get_state(curr) { - EMPTY | NOTIFIED => { - let res = state.compare_exchange(curr, set_state(curr, NOTIFIED), SeqCst, SeqCst); - - match res { - Ok(_) => return None, - Err(actual) => { - let actual_state = get_state(actual); - assert!(actual_state == EMPTY || actual_state == NOTIFIED); - state.store(set_state(actual, NOTIFIED), SeqCst); - return None; - } - } - } - WAITING => { - // At this point, it is guaranteed that the state will not - // concurrently change as holding the lock is required to - // transition **out** of `WAITING`. - // - // Get a pending waiter - let waiter = waiters.pop_back().unwrap(); - - // Safety: we never make mutable references to waiters. - let waiter = unsafe { waiter.as_ref() }; - - // Safety: we hold the lock, so we can access the waker. - let waker = unsafe { waiter.waker.with_mut(|waker| (*waker).take()) }; - - // This waiter is unlinked and will not be shared ever again, release it. - waiter.notification.store_release(Notification::One); - - if waiters.is_empty() { - // As this the **final** waiter in the list, the state - // must be transitioned to `EMPTY`. As transitioning - // **from** `WAITING` requires the lock to be held, a - // `store` is sufficient. - state.store(set_state(curr, EMPTY), SeqCst); - } - - return waker; - } - _ => unreachable!(), - } - } -} - -// ===== impl Notified ===== - -impl Notified<'_> { - /// Adds this future to the list of futures that are ready to receive - /// wakeups from calls to [`notify_one`]. - /// - /// Polling the future also adds it to the list, so this method should only - /// be used if you want to add the future to the list before the first call - /// to `poll`. (In fact, this method is equivalent to calling `poll` except - /// that no `Waker` is registered.) - /// - /// This has no effect on notifications sent using [`notify_waiters`], which - /// are received as long as they happen after the creation of the `Notified` - /// regardless of whether `enable` or `poll` has been called. - /// - /// This method returns true if the `Notified` is ready. This happens in the - /// following situations: - /// - /// 1. The `notify_waiters` method was called between the creation of the - /// `Notified` and the call to this method. - /// 2. This is the first call to `enable` or `poll` on this future, and the - /// `Notify` was holding a permit from a previous call to `notify_one`. - /// The call consumes the permit in that case. - /// 3. The future has previously been enabled or polled, and it has since - /// then been marked ready by either consuming a permit from the - /// `Notify`, or by a call to `notify_one` or `notify_waiters` that - /// removed it from the list of futures ready to receive wakeups. - /// - /// If this method returns true, any future calls to poll on the same future - /// will immediately return `Poll::Ready`. - /// - /// # Examples - /// - /// Unbound multi-producer multi-consumer (mpmc) channel. - /// - /// The call to `enable` is important because otherwise if you have two - /// calls to `recv` and two calls to `send` in parallel, the following could - /// happen: - /// - /// 1. Both calls to `try_recv` return `None`. - /// 2. Both new elements are added to the vector. - /// 3. The `notify_one` method is called twice, adding only a single - /// permit to the `Notify`. - /// 4. Both calls to `recv` reach the `Notified` future. One of them - /// consumes the permit, and the other sleeps forever. - /// - /// By adding the `Notified` futures to the list by calling `enable` before - /// `try_recv`, the `notify_one` calls in step three would remove the - /// futures from the list and mark them notified instead of adding a permit - /// to the `Notify`. This ensures that both futures are woken. - /// - /// ``` - /// use tokio::sync::Notify; - /// - /// use std::collections::VecDeque; - /// use std::sync::Mutex; - /// - /// struct Channel { - /// messages: Mutex>, - /// notify_on_sent: Notify, - /// } - /// - /// impl Channel { - /// pub fn send(&self, msg: T) { - /// let mut locked_queue = self.messages.lock().unwrap(); - /// locked_queue.push_back(msg); - /// drop(locked_queue); - /// - /// // Send a notification to one of the calls currently - /// // waiting in a call to `recv`. - /// self.notify_on_sent.notify_one(); - /// } - /// - /// pub fn try_recv(&self) -> Option { - /// let mut locked_queue = self.messages.lock().unwrap(); - /// locked_queue.pop_front() - /// } - /// - /// pub async fn recv(&self) -> T { - /// let future = self.notify_on_sent.notified(); - /// tokio::pin!(future); - /// - /// loop { - /// // Make sure that no wakeup is lost if we get - /// // `None` from `try_recv`. - /// future.as_mut().enable(); - /// - /// if let Some(msg) = self.try_recv() { - /// return msg; - /// } - /// - /// // Wait for a call to `notify_one`. - /// // - /// // This uses `.as_mut()` to avoid consuming the future, - /// // which lets us call `Pin::set` below. - /// future.as_mut().await; - /// - /// // Reset the future in case another call to - /// // `try_recv` got the message before us. - /// future.set(self.notify_on_sent.notified()); - /// } - /// } - /// } - /// ``` - /// - /// [`notify_one`]: Notify::notify_one() - /// [`notify_waiters`]: Notify::notify_waiters() - pub fn enable(self: Pin<&mut Self>) -> bool { - self.poll_notified(None).is_ready() - } - - /// A custom `project` implementation is used in place of `pin-project-lite` - /// as a custom drop implementation is needed. - fn project(self: Pin<&mut Self>) -> (&Notify, &mut State, &usize, &Waiter) { - unsafe { - // Safety: `notify`, `state` and `notify_waiters_calls` are `Unpin`. - - is_unpin::<&Notify>(); - is_unpin::(); - is_unpin::(); - - let me = self.get_unchecked_mut(); - ( - me.notify, - &mut me.state, - &me.notify_waiters_calls, - &me.waiter, - ) - } - } - - fn poll_notified(self: Pin<&mut Self>, waker: Option<&Waker>) -> Poll<()> { - let (notify, state, notify_waiters_calls, waiter) = self.project(); - - 'outer_loop: loop { - match *state { - State::Init => { - let curr = notify.state.load(SeqCst); - - // Optimistically try acquiring a pending notification - let res = notify.state.compare_exchange( - set_state(curr, NOTIFIED), - set_state(curr, EMPTY), - SeqCst, - SeqCst, - ); - - if res.is_ok() { - // Acquired the notification - *state = State::Done; - continue 'outer_loop; - } - - // Clone the waker before locking, a waker clone can be - // triggering arbitrary code. - let waker = waker.cloned(); - - // Acquire the lock and attempt to transition to the waiting - // state. - let mut waiters = notify.waiters.lock(); - - // Reload the state with the lock held - let mut curr = notify.state.load(SeqCst); - - // if notify_waiters has been called after the future - // was created, then we are done - if get_num_notify_waiters_calls(curr) != *notify_waiters_calls { - *state = State::Done; - continue 'outer_loop; - } - - // Transition the state to WAITING. - loop { - match get_state(curr) { - EMPTY => { - // Transition to WAITING - let res = notify.state.compare_exchange( - set_state(curr, EMPTY), - set_state(curr, WAITING), - SeqCst, - SeqCst, - ); - - if let Err(actual) = res { - assert_eq!(get_state(actual), NOTIFIED); - curr = actual; - } else { - break; - } - } - WAITING => break, - NOTIFIED => { - // Try consuming the notification - let res = notify.state.compare_exchange( - set_state(curr, NOTIFIED), - set_state(curr, EMPTY), - SeqCst, - SeqCst, - ); - - match res { - Ok(_) => { - // Acquired the notification - *state = State::Done; - continue 'outer_loop; - } - Err(actual) => { - assert_eq!(get_state(actual), EMPTY); - curr = actual; - } - } - } - _ => unreachable!(), - } - } - - let mut old_waker = None; - if waker.is_some() { - // Safety: called while locked. - // - // The use of `old_waiter` here is not necessary, as the field is always - // None when we reach this line. - unsafe { - old_waker = - waiter.waker.with_mut(|v| std::mem::replace(&mut *v, waker)); - } - } - - // Insert the waiter into the linked list - waiters.push_front(NonNull::from(waiter)); - - *state = State::Waiting; - - drop(waiters); - drop(old_waker); - - return Poll::Pending; - } - State::Waiting => { - #[cfg(tokio_taskdump)] - if let Some(waker) = waker { - let mut ctx = Context::from_waker(waker); - ready!(crate::trace::trace_leaf(&mut ctx)); - } - - if waiter.notification.load(Acquire).is_some() { - // Safety: waiter is already unlinked and will not be shared again, - // so we have an exclusive access to `waker`. - drop(unsafe { waiter.waker.with_mut(|waker| (*waker).take()) }); - - waiter.notification.clear(); - *state = State::Done; - return Poll::Ready(()); - } - - // Our waiter was not notified, implying it is still stored in a waiter - // list (guarded by `notify.waiters`). In order to access the waker - // fields, we must acquire the lock. - - let mut old_waker = None; - let mut waiters = notify.waiters.lock(); - - // We hold the lock and notifications are set only with the lock held, - // so this can be relaxed, because the happens-before relationship is - // established through the mutex. - if waiter.notification.load(Relaxed).is_some() { - // Safety: waiter is already unlinked and will not be shared again, - // so we have an exclusive access to `waker`. - old_waker = unsafe { waiter.waker.with_mut(|waker| (*waker).take()) }; - - waiter.notification.clear(); - - // Drop the old waker after releasing the lock. - drop(waiters); - drop(old_waker); - - *state = State::Done; - return Poll::Ready(()); - } - - // Load the state with the lock held. - let curr = notify.state.load(SeqCst); - - if get_num_notify_waiters_calls(curr) != *notify_waiters_calls { - // Before we add a waiter to the list we check if these numbers are - // different while holding the lock. If these numbers are different now, - // it means that there is a call to `notify_waiters` in progress and this - // waiter must be contained by a guarded list used in `notify_waiters`. - // We can treat the waiter as notified and remove it from the list, as - // it would have been notified in the `notify_waiters` call anyways. - - // Safety: we hold the lock, so we can modify the waker. - old_waker = unsafe { waiter.waker.with_mut(|waker| (*waker).take()) }; - - // Safety: we hold the lock, so we have an exclusive access to the list. - // The list is used in `notify_waiters`, so it must be guarded. - unsafe { waiters.remove(NonNull::from(waiter)) }; - - *state = State::Done; - } else { - // Safety: we hold the lock, so we can modify the waker. - unsafe { - waiter.waker.with_mut(|v| { - if let Some(waker) = waker { - let should_update = match &*v { - Some(current_waker) => !current_waker.will_wake(waker), - None => true, - }; - if should_update { - old_waker = std::mem::replace(&mut *v, Some(waker.clone())); - } - } - }); - } - - // Drop the old waker after releasing the lock. - drop(waiters); - drop(old_waker); - - return Poll::Pending; - } - - // Explicit drop of the lock to indicate the scope that the - // lock is held. Because holding the lock is required to - // ensure safe access to fields not held within the lock, it - // is helpful to visualize the scope of the critical - // section. - drop(waiters); - - // Drop the old waker after releasing the lock. - drop(old_waker); - } - State::Done => { - #[cfg(tokio_taskdump)] - if let Some(waker) = waker { - let mut ctx = Context::from_waker(waker); - ready!(crate::trace::trace_leaf(&mut ctx)); - } - return Poll::Ready(()); - } - } - } - } -} - -impl Future for Notified<'_> { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - self.poll_notified(Some(cx.waker())) - } -} - -impl Drop for Notified<'_> { - fn drop(&mut self) { - // Safety: The type only transitions to a "Waiting" state when pinned. - let (notify, state, _, waiter) = unsafe { Pin::new_unchecked(self).project() }; - - // This is where we ensure safety. The `Notified` value is being - // dropped, which means we must ensure that the waiter entry is no - // longer stored in the linked list. - if matches!(*state, State::Waiting) { - let mut waiters = notify.waiters.lock(); - let mut notify_state = notify.state.load(SeqCst); - - // We hold the lock, so this field is not concurrently accessed by - // `notify_*` functions and we can use the relaxed ordering. - let notification = waiter.notification.load(Relaxed); - - // remove the entry from the list (if not already removed) - // - // Safety: we hold the lock, so we have an exclusive access to every list the - // waiter may be contained in. If the node is not contained in the `waiters` - // list, then it is contained by a guarded list used by `notify_waiters`. - unsafe { waiters.remove(NonNull::from(waiter)) }; - - if waiters.is_empty() && get_state(notify_state) == WAITING { - notify_state = set_state(notify_state, EMPTY); - notify.state.store(notify_state, SeqCst); - } - - // See if the node was notified but not received. In this case, if - // the notification was triggered via `notify_one`, it must be sent - // to the next waiter. - if notification == Some(Notification::One) { - if let Some(waker) = notify_locked(&mut waiters, ¬ify.state, notify_state) { - drop(waiters); - waker.wake(); - } - } - } - } -} - -/// # Safety -/// -/// `Waiter` is forced to be !Unpin. -unsafe impl linked_list::Link for Waiter { - type Handle = NonNull; - type Target = Waiter; - - fn as_raw(handle: &NonNull) -> NonNull { - *handle - } - - unsafe fn from_raw(ptr: NonNull) -> NonNull { - ptr - } - - unsafe fn pointers(target: NonNull) -> NonNull> { - Waiter::addr_of_pointers(target) - } -} - -fn is_unpin() {} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/once_cell.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/once_cell.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/once_cell.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/once_cell.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,508 +0,0 @@ -use super::{Semaphore, SemaphorePermit, TryAcquireError}; -use crate::loom::cell::UnsafeCell; -use std::error::Error; -use std::fmt; -use std::future::Future; -use std::mem::MaybeUninit; -use std::ops::Drop; -use std::ptr; -use std::sync::atomic::{AtomicBool, Ordering}; - -// This file contains an implementation of an OnceCell. The principle -// behind the safety the of the cell is that any thread with an `&OnceCell` may -// access the `value` field according the following rules: -// -// 1. When `value_set` is false, the `value` field may be modified by the -// thread holding the permit on the semaphore. -// 2. When `value_set` is true, the `value` field may be accessed immutably by -// any thread. -// -// It is an invariant that if the semaphore is closed, then `value_set` is true. -// The reverse does not necessarily hold — but if not, the semaphore may not -// have any available permits. -// -// A thread with a `&mut OnceCell` may modify the value in any way it wants as -// long as the invariants are upheld. - -/// A thread-safe cell that can be written to only once. -/// -/// A `OnceCell` is typically used for global variables that need to be -/// initialized once on first use, but need no further changes. The `OnceCell` -/// in Tokio allows the initialization procedure to be asynchronous. -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::OnceCell; -/// -/// async fn some_computation() -> u32 { -/// 1 + 1 -/// } -/// -/// static ONCE: OnceCell = OnceCell::const_new(); -/// -/// #[tokio::main] -/// async fn main() { -/// let result = ONCE.get_or_init(some_computation).await; -/// assert_eq!(*result, 2); -/// } -/// ``` -/// -/// It is often useful to write a wrapper method for accessing the value. -/// -/// ``` -/// use tokio::sync::OnceCell; -/// -/// static ONCE: OnceCell = OnceCell::const_new(); -/// -/// async fn get_global_integer() -> &'static u32 { -/// ONCE.get_or_init(|| async { -/// 1 + 1 -/// }).await -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let result = get_global_integer().await; -/// assert_eq!(*result, 2); -/// } -/// ``` -pub struct OnceCell { - value_set: AtomicBool, - value: UnsafeCell>, - semaphore: Semaphore, -} - -impl Default for OnceCell { - fn default() -> OnceCell { - OnceCell::new() - } -} - -impl fmt::Debug for OnceCell { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("OnceCell") - .field("value", &self.get()) - .finish() - } -} - -impl Clone for OnceCell { - fn clone(&self) -> OnceCell { - OnceCell::new_with(self.get().cloned()) - } -} - -impl PartialEq for OnceCell { - fn eq(&self, other: &OnceCell) -> bool { - self.get() == other.get() - } -} - -impl Eq for OnceCell {} - -impl Drop for OnceCell { - fn drop(&mut self) { - if self.initialized_mut() { - unsafe { - self.value - .with_mut(|ptr| ptr::drop_in_place((*ptr).as_mut_ptr())); - }; - } - } -} - -impl From for OnceCell { - fn from(value: T) -> Self { - OnceCell { - value_set: AtomicBool::new(true), - value: UnsafeCell::new(MaybeUninit::new(value)), - semaphore: Semaphore::new_closed(), - } - } -} - -impl OnceCell { - /// Creates a new empty `OnceCell` instance. - pub fn new() -> Self { - OnceCell { - value_set: AtomicBool::new(false), - value: UnsafeCell::new(MaybeUninit::uninit()), - semaphore: Semaphore::new(1), - } - } - - /// Creates a new `OnceCell` that contains the provided value, if any. - /// - /// If the `Option` is `None`, this is equivalent to `OnceCell::new`. - /// - /// [`OnceCell::new`]: crate::sync::OnceCell::new - // Once https://github.com/rust-lang/rust/issues/73255 lands - // and tokio MSRV is bumped to the rustc version with it stablised, - // we can made this function available in const context, - // by creating `Semaphore::const_new_closed`. - pub fn new_with(value: Option) -> Self { - if let Some(v) = value { - OnceCell::from(v) - } else { - OnceCell::new() - } - } - - /// Creates a new `OnceCell` that contains the provided value. - /// - /// # Example - /// - /// When using the `tracing` [unstable feature], a `OnceCell` created with - /// `const_new_with` will not be instrumented. As such, it will not be - /// visible in [`tokio-console`]. Instead, [`OnceCell::new_with`] should be - /// used to create an instrumented object if that is needed. - /// - /// ``` - /// use tokio::sync::OnceCell; - /// - /// static ONCE: OnceCell = OnceCell::const_new_with(1); - /// - /// async fn get_global_integer() -> &'static u32 { - /// ONCE.get_or_init(|| async { - /// 1 + 1 - /// }).await - /// } - /// - /// #[tokio::main] - /// async fn main() { - /// let result = get_global_integer().await; - /// assert_eq!(*result, 1); - /// } - /// ``` - /// - /// [`tokio-console`]: https://github.com/tokio-rs/console - /// [unstable feature]: crate#unstable-features - #[cfg(not(all(loom, test)))] - pub const fn const_new_with(value: T) -> Self { - OnceCell { - value_set: AtomicBool::new(true), - value: UnsafeCell::new(MaybeUninit::new(value)), - semaphore: Semaphore::const_new_closed(), - } - } - - /// Creates a new empty `OnceCell` instance. - /// - /// Equivalent to `OnceCell::new`, except that it can be used in static - /// variables. - /// - /// When using the `tracing` [unstable feature], a `OnceCell` created with - /// `const_new` will not be instrumented. As such, it will not be visible - /// in [`tokio-console`]. Instead, [`OnceCell::new`] should be used to - /// create an instrumented object if that is needed. - /// - /// # Example - /// - /// ``` - /// use tokio::sync::OnceCell; - /// - /// static ONCE: OnceCell = OnceCell::const_new(); - /// - /// async fn get_global_integer() -> &'static u32 { - /// ONCE.get_or_init(|| async { - /// 1 + 1 - /// }).await - /// } - /// - /// #[tokio::main] - /// async fn main() { - /// let result = get_global_integer().await; - /// assert_eq!(*result, 2); - /// } - /// ``` - /// - /// [`tokio-console`]: https://github.com/tokio-rs/console - /// [unstable feature]: crate#unstable-features - #[cfg(not(all(loom, test)))] - pub const fn const_new() -> Self { - OnceCell { - value_set: AtomicBool::new(false), - value: UnsafeCell::new(MaybeUninit::uninit()), - semaphore: Semaphore::const_new(1), - } - } - - /// Returns `true` if the `OnceCell` currently contains a value, and `false` - /// otherwise. - pub fn initialized(&self) -> bool { - // Using acquire ordering so any threads that read a true from this - // atomic is able to read the value. - self.value_set.load(Ordering::Acquire) - } - - /// Returns `true` if the `OnceCell` currently contains a value, and `false` - /// otherwise. - fn initialized_mut(&mut self) -> bool { - *self.value_set.get_mut() - } - - // SAFETY: The OnceCell must not be empty. - unsafe fn get_unchecked(&self) -> &T { - &*self.value.with(|ptr| (*ptr).as_ptr()) - } - - // SAFETY: The OnceCell must not be empty. - unsafe fn get_unchecked_mut(&mut self) -> &mut T { - &mut *self.value.with_mut(|ptr| (*ptr).as_mut_ptr()) - } - - fn set_value(&self, value: T, permit: SemaphorePermit<'_>) -> &T { - // SAFETY: We are holding the only permit on the semaphore. - unsafe { - self.value.with_mut(|ptr| (*ptr).as_mut_ptr().write(value)); - } - - // Using release ordering so any threads that read a true from this - // atomic is able to read the value we just stored. - self.value_set.store(true, Ordering::Release); - self.semaphore.close(); - permit.forget(); - - // SAFETY: We just initialized the cell. - unsafe { self.get_unchecked() } - } - - /// Returns a reference to the value currently stored in the `OnceCell`, or - /// `None` if the `OnceCell` is empty. - pub fn get(&self) -> Option<&T> { - if self.initialized() { - Some(unsafe { self.get_unchecked() }) - } else { - None - } - } - - /// Returns a mutable reference to the value currently stored in the - /// `OnceCell`, or `None` if the `OnceCell` is empty. - /// - /// Since this call borrows the `OnceCell` mutably, it is safe to mutate the - /// value inside the `OnceCell` — the mutable borrow statically guarantees - /// no other references exist. - pub fn get_mut(&mut self) -> Option<&mut T> { - if self.initialized_mut() { - Some(unsafe { self.get_unchecked_mut() }) - } else { - None - } - } - - /// Sets the value of the `OnceCell` to the given value if the `OnceCell` is - /// empty. - /// - /// If the `OnceCell` already has a value, this call will fail with an - /// [`SetError::AlreadyInitializedError`]. - /// - /// If the `OnceCell` is empty, but some other task is currently trying to - /// set the value, this call will fail with [`SetError::InitializingError`]. - /// - /// [`SetError::AlreadyInitializedError`]: crate::sync::SetError::AlreadyInitializedError - /// [`SetError::InitializingError`]: crate::sync::SetError::InitializingError - pub fn set(&self, value: T) -> Result<(), SetError> { - if self.initialized() { - return Err(SetError::AlreadyInitializedError(value)); - } - - // Another task might be initializing the cell, in which case - // `try_acquire` will return an error. If we succeed to acquire the - // permit, then we can set the value. - match self.semaphore.try_acquire() { - Ok(permit) => { - debug_assert!(!self.initialized()); - self.set_value(value, permit); - Ok(()) - } - Err(TryAcquireError::NoPermits) => { - // Some other task is holding the permit. That task is - // currently trying to initialize the value. - Err(SetError::InitializingError(value)) - } - Err(TryAcquireError::Closed) => { - // The semaphore was closed. Some other task has initialized - // the value. - Err(SetError::AlreadyInitializedError(value)) - } - } - } - - /// Gets the value currently in the `OnceCell`, or initialize it with the - /// given asynchronous operation. - /// - /// If some other task is currently working on initializing the `OnceCell`, - /// this call will wait for that other task to finish, then return the value - /// that the other task produced. - /// - /// If the provided operation is cancelled or panics, the initialization - /// attempt is cancelled. If there are other tasks waiting for the value to - /// be initialized, one of them will start another attempt at initializing - /// the value. - /// - /// This will deadlock if `f` tries to initialize the cell recursively. - pub async fn get_or_init(&self, f: F) -> &T - where - F: FnOnce() -> Fut, - Fut: Future, - { - crate::trace::async_trace_leaf().await; - - if self.initialized() { - // SAFETY: The OnceCell has been fully initialized. - unsafe { self.get_unchecked() } - } else { - // Here we try to acquire the semaphore permit. Holding the permit - // will allow us to set the value of the OnceCell, and prevents - // other tasks from initializing the OnceCell while we are holding - // it. - match self.semaphore.acquire().await { - Ok(permit) => { - debug_assert!(!self.initialized()); - - // If `f()` panics or `select!` is called, this - // `get_or_init` call is aborted and the semaphore permit is - // dropped. - let value = f().await; - - self.set_value(value, permit) - } - Err(_) => { - debug_assert!(self.initialized()); - - // SAFETY: The semaphore has been closed. This only happens - // when the OnceCell is fully initialized. - unsafe { self.get_unchecked() } - } - } - } - } - - /// Gets the value currently in the `OnceCell`, or initialize it with the - /// given asynchronous operation. - /// - /// If some other task is currently working on initializing the `OnceCell`, - /// this call will wait for that other task to finish, then return the value - /// that the other task produced. - /// - /// If the provided operation returns an error, is cancelled or panics, the - /// initialization attempt is cancelled. If there are other tasks waiting - /// for the value to be initialized, one of them will start another attempt - /// at initializing the value. - /// - /// This will deadlock if `f` tries to initialize the cell recursively. - pub async fn get_or_try_init(&self, f: F) -> Result<&T, E> - where - F: FnOnce() -> Fut, - Fut: Future>, - { - crate::trace::async_trace_leaf().await; - - if self.initialized() { - // SAFETY: The OnceCell has been fully initialized. - unsafe { Ok(self.get_unchecked()) } - } else { - // Here we try to acquire the semaphore permit. Holding the permit - // will allow us to set the value of the OnceCell, and prevents - // other tasks from initializing the OnceCell while we are holding - // it. - match self.semaphore.acquire().await { - Ok(permit) => { - debug_assert!(!self.initialized()); - - // If `f()` panics or `select!` is called, this - // `get_or_try_init` call is aborted and the semaphore - // permit is dropped. - let value = f().await; - - match value { - Ok(value) => Ok(self.set_value(value, permit)), - Err(e) => Err(e), - } - } - Err(_) => { - debug_assert!(self.initialized()); - - // SAFETY: The semaphore has been closed. This only happens - // when the OnceCell is fully initialized. - unsafe { Ok(self.get_unchecked()) } - } - } - } - } - - /// Takes the value from the cell, destroying the cell in the process. - /// Returns `None` if the cell is empty. - pub fn into_inner(mut self) -> Option { - if self.initialized_mut() { - // Set to uninitialized for the destructor of `OnceCell` to work properly - *self.value_set.get_mut() = false; - Some(unsafe { self.value.with(|ptr| ptr::read(ptr).assume_init()) }) - } else { - None - } - } - - /// Takes ownership of the current value, leaving the cell empty. Returns - /// `None` if the cell is empty. - pub fn take(&mut self) -> Option { - std::mem::take(self).into_inner() - } -} - -// Since `get` gives us access to immutable references of the OnceCell, OnceCell -// can only be Sync if T is Sync, otherwise OnceCell would allow sharing -// references of !Sync values across threads. We need T to be Send in order for -// OnceCell to by Sync because we can use `set` on `&OnceCell` to send values -// (of type T) across threads. -unsafe impl Sync for OnceCell {} - -// Access to OnceCell's value is guarded by the semaphore permit -// and atomic operations on `value_set`, so as long as T itself is Send -// it's safe to send it to another thread -unsafe impl Send for OnceCell {} - -/// Errors that can be returned from [`OnceCell::set`]. -/// -/// [`OnceCell::set`]: crate::sync::OnceCell::set -#[derive(Debug, PartialEq, Eq)] -pub enum SetError { - /// The cell was already initialized when [`OnceCell::set`] was called. - /// - /// [`OnceCell::set`]: crate::sync::OnceCell::set - AlreadyInitializedError(T), - - /// The cell is currently being initialized. - InitializingError(T), -} - -impl fmt::Display for SetError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - SetError::AlreadyInitializedError(_) => write!(f, "AlreadyInitializedError"), - SetError::InitializingError(_) => write!(f, "InitializingError"), - } - } -} - -impl Error for SetError {} - -impl SetError { - /// Whether `SetError` is `SetError::AlreadyInitializedError`. - pub fn is_already_init_err(&self) -> bool { - match self { - SetError::AlreadyInitializedError(_) => true, - SetError::InitializingError(_) => false, - } - } - - /// Whether `SetError` is `SetError::InitializingError` - pub fn is_initializing_err(&self) -> bool { - match self { - SetError::AlreadyInitializedError(_) => false, - SetError::InitializingError(_) => true, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/oneshot.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/oneshot.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/oneshot.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/oneshot.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1389 +0,0 @@ -#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))] - -//! A one-shot channel is used for sending a single message between -//! asynchronous tasks. The [`channel`] function is used to create a -//! [`Sender`] and [`Receiver`] handle pair that form the channel. -//! -//! The `Sender` handle is used by the producer to send the value. -//! The `Receiver` handle is used by the consumer to receive the value. -//! -//! Each handle can be used on separate tasks. -//! -//! Since the `send` method is not async, it can be used anywhere. This includes -//! sending between two runtimes, and using it from non-async code. -//! -//! If the [`Receiver`] is closed before receiving a message which has already -//! been sent, the message will remain in the channel until the receiver is -//! dropped, at which point the message will be dropped immediately. -//! -//! # Examples -//! -//! ``` -//! use tokio::sync::oneshot; -//! -//! #[tokio::main] -//! async fn main() { -//! let (tx, rx) = oneshot::channel(); -//! -//! tokio::spawn(async move { -//! if let Err(_) = tx.send(3) { -//! println!("the receiver dropped"); -//! } -//! }); -//! -//! match rx.await { -//! Ok(v) => println!("got = {:?}", v), -//! Err(_) => println!("the sender dropped"), -//! } -//! } -//! ``` -//! -//! If the sender is dropped without sending, the receiver will fail with -//! [`error::RecvError`]: -//! -//! ``` -//! use tokio::sync::oneshot; -//! -//! #[tokio::main] -//! async fn main() { -//! let (tx, rx) = oneshot::channel::(); -//! -//! tokio::spawn(async move { -//! drop(tx); -//! }); -//! -//! match rx.await { -//! Ok(_) => panic!("This doesn't happen"), -//! Err(_) => println!("the sender dropped"), -//! } -//! } -//! ``` -//! -//! To use a oneshot channel in a `tokio::select!` loop, add `&mut` in front of -//! the channel. -//! -//! ``` -//! use tokio::sync::oneshot; -//! use tokio::time::{interval, sleep, Duration}; -//! -//! #[tokio::main] -//! # async fn _doc() {} -//! # #[tokio::main(flavor = "current_thread", start_paused = true)] -//! async fn main() { -//! let (send, mut recv) = oneshot::channel(); -//! let mut interval = interval(Duration::from_millis(100)); -//! -//! # let handle = -//! tokio::spawn(async move { -//! sleep(Duration::from_secs(1)).await; -//! send.send("shut down").unwrap(); -//! }); -//! -//! loop { -//! tokio::select! { -//! _ = interval.tick() => println!("Another 100ms"), -//! msg = &mut recv => { -//! println!("Got message: {}", msg.unwrap()); -//! break; -//! } -//! } -//! } -//! # handle.await.unwrap(); -//! } -//! ``` -//! -//! To use a `Sender` from a destructor, put it in an [`Option`] and call -//! [`Option::take`]. -//! -//! ``` -//! use tokio::sync::oneshot; -//! -//! struct SendOnDrop { -//! sender: Option>, -//! } -//! impl Drop for SendOnDrop { -//! fn drop(&mut self) { -//! if let Some(sender) = self.sender.take() { -//! // Using `let _ =` to ignore send errors. -//! let _ = sender.send("I got dropped!"); -//! } -//! } -//! } -//! -//! #[tokio::main] -//! # async fn _doc() {} -//! # #[tokio::main(flavor = "current_thread")] -//! async fn main() { -//! let (send, recv) = oneshot::channel(); -//! -//! let send_on_drop = SendOnDrop { sender: Some(send) }; -//! drop(send_on_drop); -//! -//! assert_eq!(recv.await, Ok("I got dropped!")); -//! } -//! ``` - -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::Arc; -#[cfg(all(tokio_unstable, feature = "tracing"))] -use crate::util::trace; - -use std::fmt; -use std::future::Future; -use std::mem::MaybeUninit; -use std::pin::Pin; -use std::sync::atomic::Ordering::{self, AcqRel, Acquire}; -use std::task::Poll::{Pending, Ready}; -use std::task::{Context, Poll, Waker}; - -/// Sends a value to the associated [`Receiver`]. -/// -/// A pair of both a [`Sender`] and a [`Receiver`] are created by the -/// [`channel`](fn@channel) function. -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::oneshot; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, rx) = oneshot::channel(); -/// -/// tokio::spawn(async move { -/// if let Err(_) = tx.send(3) { -/// println!("the receiver dropped"); -/// } -/// }); -/// -/// match rx.await { -/// Ok(v) => println!("got = {:?}", v), -/// Err(_) => println!("the sender dropped"), -/// } -/// } -/// ``` -/// -/// If the sender is dropped without sending, the receiver will fail with -/// [`error::RecvError`]: -/// -/// ``` -/// use tokio::sync::oneshot; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, rx) = oneshot::channel::(); -/// -/// tokio::spawn(async move { -/// drop(tx); -/// }); -/// -/// match rx.await { -/// Ok(_) => panic!("This doesn't happen"), -/// Err(_) => println!("the sender dropped"), -/// } -/// } -/// ``` -/// -/// To use a `Sender` from a destructor, put it in an [`Option`] and call -/// [`Option::take`]. -/// -/// ``` -/// use tokio::sync::oneshot; -/// -/// struct SendOnDrop { -/// sender: Option>, -/// } -/// impl Drop for SendOnDrop { -/// fn drop(&mut self) { -/// if let Some(sender) = self.sender.take() { -/// // Using `let _ =` to ignore send errors. -/// let _ = sender.send("I got dropped!"); -/// } -/// } -/// } -/// -/// #[tokio::main] -/// # async fn _doc() {} -/// # #[tokio::main(flavor = "current_thread")] -/// async fn main() { -/// let (send, recv) = oneshot::channel(); -/// -/// let send_on_drop = SendOnDrop { sender: Some(send) }; -/// drop(send_on_drop); -/// -/// assert_eq!(recv.await, Ok("I got dropped!")); -/// } -/// ``` -/// -/// [`Option`]: std::option::Option -/// [`Option::take`]: std::option::Option::take -#[derive(Debug)] -pub struct Sender { - inner: Option>>, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, -} - -/// Receives a value from the associated [`Sender`]. -/// -/// A pair of both a [`Sender`] and a [`Receiver`] are created by the -/// [`channel`](fn@channel) function. -/// -/// This channel has no `recv` method because the receiver itself implements the -/// [`Future`] trait. To receive a `Result`, `.await` the `Receiver` object directly. -/// -/// The `poll` method on the `Future` trait is allowed to spuriously return -/// `Poll::Pending` even if the message has been sent. If such a spurious -/// failure happens, then the caller will be woken when the spurious failure has -/// been resolved so that the caller can attempt to receive the message again. -/// Note that receiving such a wakeup does not guarantee that the next call will -/// succeed — it could fail with another spurious failure. (A spurious failure -/// does not mean that the message is lost. It is just delayed.) -/// -/// [`Future`]: trait@std::future::Future -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::oneshot; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, rx) = oneshot::channel(); -/// -/// tokio::spawn(async move { -/// if let Err(_) = tx.send(3) { -/// println!("the receiver dropped"); -/// } -/// }); -/// -/// match rx.await { -/// Ok(v) => println!("got = {:?}", v), -/// Err(_) => println!("the sender dropped"), -/// } -/// } -/// ``` -/// -/// If the sender is dropped without sending, the receiver will fail with -/// [`error::RecvError`]: -/// -/// ``` -/// use tokio::sync::oneshot; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, rx) = oneshot::channel::(); -/// -/// tokio::spawn(async move { -/// drop(tx); -/// }); -/// -/// match rx.await { -/// Ok(_) => panic!("This doesn't happen"), -/// Err(_) => println!("the sender dropped"), -/// } -/// } -/// ``` -/// -/// To use a `Receiver` in a `tokio::select!` loop, add `&mut` in front of the -/// channel. -/// -/// ``` -/// use tokio::sync::oneshot; -/// use tokio::time::{interval, sleep, Duration}; -/// -/// #[tokio::main] -/// # async fn _doc() {} -/// # #[tokio::main(flavor = "current_thread", start_paused = true)] -/// async fn main() { -/// let (send, mut recv) = oneshot::channel(); -/// let mut interval = interval(Duration::from_millis(100)); -/// -/// # let handle = -/// tokio::spawn(async move { -/// sleep(Duration::from_secs(1)).await; -/// send.send("shut down").unwrap(); -/// }); -/// -/// loop { -/// tokio::select! { -/// _ = interval.tick() => println!("Another 100ms"), -/// msg = &mut recv => { -/// println!("Got message: {}", msg.unwrap()); -/// break; -/// } -/// } -/// } -/// # handle.await.unwrap(); -/// } -/// ``` -#[derive(Debug)] -pub struct Receiver { - inner: Option>>, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - #[cfg(all(tokio_unstable, feature = "tracing"))] - async_op_span: tracing::Span, - #[cfg(all(tokio_unstable, feature = "tracing"))] - async_op_poll_span: tracing::Span, -} - -pub mod error { - //! Oneshot error types. - - use std::fmt; - - /// Error returned by the `Future` implementation for `Receiver`. - /// - /// This error is returned by the receiver when the sender is dropped without sending. - #[derive(Debug, Eq, PartialEq, Clone)] - pub struct RecvError(pub(super) ()); - - /// Error returned by the `try_recv` function on `Receiver`. - #[derive(Debug, Eq, PartialEq, Clone)] - pub enum TryRecvError { - /// The send half of the channel has not yet sent a value. - Empty, - - /// The send half of the channel was dropped without sending a value. - Closed, - } - - // ===== impl RecvError ===== - - impl fmt::Display for RecvError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "channel closed") - } - } - - impl std::error::Error for RecvError {} - - // ===== impl TryRecvError ===== - - impl fmt::Display for TryRecvError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TryRecvError::Empty => write!(fmt, "channel empty"), - TryRecvError::Closed => write!(fmt, "channel closed"), - } - } - } - - impl std::error::Error for TryRecvError {} -} - -use self::error::*; - -struct Inner { - /// Manages the state of the inner cell. - state: AtomicUsize, - - /// The value. This is set by `Sender` and read by `Receiver`. The state of - /// the cell is tracked by `state`. - value: UnsafeCell>, - - /// The task to notify when the receiver drops without consuming the value. - /// - /// ## Safety - /// - /// The `TX_TASK_SET` bit in the `state` field is set if this field is - /// initialized. If that bit is unset, this field may be uninitialized. - tx_task: Task, - - /// The task to notify when the value is sent. - /// - /// ## Safety - /// - /// The `RX_TASK_SET` bit in the `state` field is set if this field is - /// initialized. If that bit is unset, this field may be uninitialized. - rx_task: Task, -} - -struct Task(UnsafeCell>); - -impl Task { - unsafe fn will_wake(&self, cx: &mut Context<'_>) -> bool { - self.with_task(|w| w.will_wake(cx.waker())) - } - - unsafe fn with_task(&self, f: F) -> R - where - F: FnOnce(&Waker) -> R, - { - self.0.with(|ptr| { - let waker: *const Waker = (*ptr).as_ptr(); - f(&*waker) - }) - } - - unsafe fn drop_task(&self) { - self.0.with_mut(|ptr| { - let ptr: *mut Waker = (*ptr).as_mut_ptr(); - ptr.drop_in_place(); - }); - } - - unsafe fn set_task(&self, cx: &mut Context<'_>) { - self.0.with_mut(|ptr| { - let ptr: *mut Waker = (*ptr).as_mut_ptr(); - ptr.write(cx.waker().clone()); - }); - } -} - -#[derive(Clone, Copy)] -struct State(usize); - -/// Creates a new one-shot channel for sending single values across asynchronous -/// tasks. -/// -/// The function returns separate "send" and "receive" handles. The `Sender` -/// handle is used by the producer to send the value. The `Receiver` handle is -/// used by the consumer to receive the value. -/// -/// Each handle can be used on separate tasks. -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::oneshot; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx, rx) = oneshot::channel(); -/// -/// tokio::spawn(async move { -/// if let Err(_) = tx.send(3) { -/// println!("the receiver dropped"); -/// } -/// }); -/// -/// match rx.await { -/// Ok(v) => println!("got = {:?}", v), -/// Err(_) => println!("the sender dropped"), -/// } -/// } -/// ``` -#[track_caller] -pub fn channel() -> (Sender, Receiver) { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = { - let location = std::panic::Location::caller(); - - let resource_span = tracing::trace_span!( - "runtime.resource", - concrete_type = "Sender|Receiver", - kind = "Sync", - loc.file = location.file(), - loc.line = location.line(), - loc.col = location.column(), - ); - - resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - tx_dropped = false, - tx_dropped.op = "override", - ) - }); - - resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - rx_dropped = false, - rx_dropped.op = "override", - ) - }); - - resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - value_sent = false, - value_sent.op = "override", - ) - }); - - resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - value_received = false, - value_received.op = "override", - ) - }); - - resource_span - }; - - let inner = Arc::new(Inner { - state: AtomicUsize::new(State::new().as_usize()), - value: UnsafeCell::new(None), - tx_task: Task(UnsafeCell::new(MaybeUninit::uninit())), - rx_task: Task(UnsafeCell::new(MaybeUninit::uninit())), - }); - - let tx = Sender { - inner: Some(inner.clone()), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: resource_span.clone(), - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let async_op_span = resource_span - .in_scope(|| tracing::trace_span!("runtime.resource.async_op", source = "Receiver::await")); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let async_op_poll_span = - async_op_span.in_scope(|| tracing::trace_span!("runtime.resource.async_op.poll")); - - let rx = Receiver { - inner: Some(inner), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - #[cfg(all(tokio_unstable, feature = "tracing"))] - async_op_span, - #[cfg(all(tokio_unstable, feature = "tracing"))] - async_op_poll_span, - }; - - (tx, rx) -} - -impl Sender { - /// Attempts to send a value on this channel, returning it back if it could - /// not be sent. - /// - /// This method consumes `self` as only one value may ever be sent on a oneshot - /// channel. It is not marked async because sending a message to an oneshot - /// channel never requires any form of waiting. Because of this, the `send` - /// method can be used in both synchronous and asynchronous code without - /// problems. - /// - /// A successful send occurs when it is determined that the other end of the - /// channel has not hung up already. An unsuccessful send would be one where - /// the corresponding receiver has already been deallocated. Note that a - /// return value of `Err` means that the data will never be received, but - /// a return value of `Ok` does *not* mean that the data will be received. - /// It is possible for the corresponding receiver to hang up immediately - /// after this function returns `Ok`. - /// - /// # Examples - /// - /// Send a value to another task - /// - /// ``` - /// use tokio::sync::oneshot; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, rx) = oneshot::channel(); - /// - /// tokio::spawn(async move { - /// if let Err(_) = tx.send(3) { - /// println!("the receiver dropped"); - /// } - /// }); - /// - /// match rx.await { - /// Ok(v) => println!("got = {:?}", v), - /// Err(_) => println!("the sender dropped"), - /// } - /// } - /// ``` - pub fn send(mut self, t: T) -> Result<(), T> { - let inner = self.inner.take().unwrap(); - - inner.value.with_mut(|ptr| unsafe { - // SAFETY: The receiver will not access the `UnsafeCell` unless the - // channel has been marked as "complete" (the `VALUE_SENT` state bit - // is set). - // That bit is only set by the sender later on in this method, and - // calling this method consumes `self`. Therefore, if it was possible to - // call this method, we know that the `VALUE_SENT` bit is unset, and - // the receiver is not currently accessing the `UnsafeCell`. - *ptr = Some(t); - }); - - if !inner.complete() { - unsafe { - // SAFETY: The receiver will not access the `UnsafeCell` unless - // the channel has been marked as "complete". Calling - // `complete()` will return true if this bit is set, and false - // if it is not set. Thus, if `complete()` returned false, it is - // safe for us to access the value, because we know that the - // receiver will not. - return Err(inner.consume_value().unwrap()); - } - } - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - value_sent = true, - value_sent.op = "override", - ) - }); - - Ok(()) - } - - /// Waits for the associated [`Receiver`] handle to close. - /// - /// A [`Receiver`] is closed by either calling [`close`] explicitly or the - /// [`Receiver`] value is dropped. - /// - /// This function is useful when paired with `select!` to abort a - /// computation when the receiver is no longer interested in the result. - /// - /// # Return - /// - /// Returns a `Future` which must be awaited on. - /// - /// [`Receiver`]: Receiver - /// [`close`]: Receiver::close - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// use tokio::sync::oneshot; - /// - /// #[tokio::main] - /// async fn main() { - /// let (mut tx, rx) = oneshot::channel::<()>(); - /// - /// tokio::spawn(async move { - /// drop(rx); - /// }); - /// - /// tx.closed().await; - /// println!("the receiver dropped"); - /// } - /// ``` - /// - /// Paired with select - /// - /// ``` - /// use tokio::sync::oneshot; - /// use tokio::time::{self, Duration}; - /// - /// async fn compute() -> String { - /// // Complex computation returning a `String` - /// # "hello".to_string() - /// } - /// - /// #[tokio::main] - /// async fn main() { - /// let (mut tx, rx) = oneshot::channel(); - /// - /// tokio::spawn(async move { - /// tokio::select! { - /// _ = tx.closed() => { - /// // The receiver dropped, no need to do any further work - /// } - /// value = compute() => { - /// // The send can fail if the channel was closed at the exact same - /// // time as when compute() finished, so just ignore the failure. - /// let _ = tx.send(value); - /// } - /// } - /// }); - /// - /// // Wait for up to 10 seconds - /// let _ = time::timeout(Duration::from_secs(10), rx).await; - /// } - /// ``` - pub async fn closed(&mut self) { - use crate::future::poll_fn; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let closed = trace::async_op( - || poll_fn(|cx| self.poll_closed(cx)), - resource_span, - "Sender::closed", - "poll_closed", - false, - ); - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let closed = poll_fn(|cx| self.poll_closed(cx)); - - closed.await - } - - /// Returns `true` if the associated [`Receiver`] handle has been dropped. - /// - /// A [`Receiver`] is closed by either calling [`close`] explicitly or the - /// [`Receiver`] value is dropped. - /// - /// If `true` is returned, a call to `send` will always result in an error. - /// - /// [`Receiver`]: Receiver - /// [`close`]: Receiver::close - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::oneshot; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, rx) = oneshot::channel(); - /// - /// assert!(!tx.is_closed()); - /// - /// drop(rx); - /// - /// assert!(tx.is_closed()); - /// assert!(tx.send("never received").is_err()); - /// } - /// ``` - pub fn is_closed(&self) -> bool { - let inner = self.inner.as_ref().unwrap(); - - let state = State::load(&inner.state, Acquire); - state.is_closed() - } - - /// Checks whether the oneshot channel has been closed, and if not, schedules the - /// `Waker` in the provided `Context` to receive a notification when the channel is - /// closed. - /// - /// A [`Receiver`] is closed by either calling [`close`] explicitly, or when the - /// [`Receiver`] value is dropped. - /// - /// Note that on multiple calls to poll, only the `Waker` from the `Context` passed - /// to the most recent call will be scheduled to receive a wakeup. - /// - /// [`Receiver`]: struct@crate::sync::oneshot::Receiver - /// [`close`]: fn@crate::sync::oneshot::Receiver::close - /// - /// # Return value - /// - /// This function returns: - /// - /// * `Poll::Pending` if the channel is still open. - /// * `Poll::Ready(())` if the channel is closed. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::oneshot; - /// - /// use futures::future::poll_fn; - /// - /// #[tokio::main] - /// async fn main() { - /// let (mut tx, mut rx) = oneshot::channel::<()>(); - /// - /// tokio::spawn(async move { - /// rx.close(); - /// }); - /// - /// poll_fn(|cx| tx.poll_closed(cx)).await; - /// - /// println!("the receiver dropped"); - /// } - /// ``` - pub fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()> { - ready!(crate::trace::trace_leaf(cx)); - - // Keep track of task budget - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - - let inner = self.inner.as_ref().unwrap(); - - let mut state = State::load(&inner.state, Acquire); - - if state.is_closed() { - coop.made_progress(); - return Ready(()); - } - - if state.is_tx_task_set() { - let will_notify = unsafe { inner.tx_task.will_wake(cx) }; - - if !will_notify { - state = State::unset_tx_task(&inner.state); - - if state.is_closed() { - // Set the flag again so that the waker is released in drop - State::set_tx_task(&inner.state); - coop.made_progress(); - return Ready(()); - } else { - unsafe { inner.tx_task.drop_task() }; - } - } - } - - if !state.is_tx_task_set() { - // Attempt to set the task - unsafe { - inner.tx_task.set_task(cx); - } - - // Update the state - state = State::set_tx_task(&inner.state); - - if state.is_closed() { - coop.made_progress(); - return Ready(()); - } - } - - Pending - } -} - -impl Drop for Sender { - fn drop(&mut self) { - if let Some(inner) = self.inner.as_ref() { - inner.complete(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - tx_dropped = true, - tx_dropped.op = "override", - ) - }); - } - } -} - -impl Receiver { - /// Prevents the associated [`Sender`] handle from sending a value. - /// - /// Any `send` operation which happens after calling `close` is guaranteed - /// to fail. After calling `close`, [`try_recv`] should be called to - /// receive a value if one was sent **before** the call to `close` - /// completed. - /// - /// This function is useful to perform a graceful shutdown and ensure that a - /// value will not be sent into the channel and never received. - /// - /// `close` is no-op if a message is already received or the channel - /// is already closed. - /// - /// [`Sender`]: Sender - /// [`try_recv`]: Receiver::try_recv - /// - /// # Examples - /// - /// Prevent a value from being sent - /// - /// ``` - /// use tokio::sync::oneshot; - /// use tokio::sync::oneshot::error::TryRecvError; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = oneshot::channel(); - /// - /// assert!(!tx.is_closed()); - /// - /// rx.close(); - /// - /// assert!(tx.is_closed()); - /// assert!(tx.send("never received").is_err()); - /// - /// match rx.try_recv() { - /// Err(TryRecvError::Closed) => {} - /// _ => unreachable!(), - /// } - /// } - /// ``` - /// - /// Receive a value sent **before** calling `close` - /// - /// ``` - /// use tokio::sync::oneshot; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = oneshot::channel(); - /// - /// assert!(tx.send("will receive").is_ok()); - /// - /// rx.close(); - /// - /// let msg = rx.try_recv().unwrap(); - /// assert_eq!(msg, "will receive"); - /// } - /// ``` - pub fn close(&mut self) { - if let Some(inner) = self.inner.as_ref() { - inner.close(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - rx_dropped = true, - rx_dropped.op = "override", - ) - }); - } - } - - /// Attempts to receive a value. - /// - /// If a pending value exists in the channel, it is returned. If no value - /// has been sent, the current task **will not** be registered for - /// future notification. - /// - /// This function is useful to call from outside the context of an - /// asynchronous task. - /// - /// Note that unlike the `poll` method, the `try_recv` method cannot fail - /// spuriously. Any send or close event that happens before this call to - /// `try_recv` will be correctly returned to the caller. - /// - /// # Return - /// - /// - `Ok(T)` if a value is pending in the channel. - /// - `Err(TryRecvError::Empty)` if no value has been sent yet. - /// - `Err(TryRecvError::Closed)` if the sender has dropped without sending - /// a value, or if the message has already been received. - /// - /// # Examples - /// - /// `try_recv` before a value is sent, then after. - /// - /// ``` - /// use tokio::sync::oneshot; - /// use tokio::sync::oneshot::error::TryRecvError; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = oneshot::channel(); - /// - /// match rx.try_recv() { - /// // The channel is currently empty - /// Err(TryRecvError::Empty) => {} - /// _ => unreachable!(), - /// } - /// - /// // Send a value - /// tx.send("hello").unwrap(); - /// - /// match rx.try_recv() { - /// Ok(value) => assert_eq!(value, "hello"), - /// _ => unreachable!(), - /// } - /// } - /// ``` - /// - /// `try_recv` when the sender dropped before sending a value - /// - /// ``` - /// use tokio::sync::oneshot; - /// use tokio::sync::oneshot::error::TryRecvError; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = oneshot::channel::<()>(); - /// - /// drop(tx); - /// - /// match rx.try_recv() { - /// // The channel will never receive a value. - /// Err(TryRecvError::Closed) => {} - /// _ => unreachable!(), - /// } - /// } - /// ``` - pub fn try_recv(&mut self) -> Result { - let result = if let Some(inner) = self.inner.as_ref() { - let state = State::load(&inner.state, Acquire); - - if state.is_complete() { - // SAFETY: If `state.is_complete()` returns true, then the - // `VALUE_SENT` bit has been set and the sender side of the - // channel will no longer attempt to access the inner - // `UnsafeCell`. Therefore, it is now safe for us to access the - // cell. - match unsafe { inner.consume_value() } { - Some(value) => { - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - value_received = true, - value_received.op = "override", - ) - }); - Ok(value) - } - None => Err(TryRecvError::Closed), - } - } else if state.is_closed() { - Err(TryRecvError::Closed) - } else { - // Not ready, this does not clear `inner` - return Err(TryRecvError::Empty); - } - } else { - Err(TryRecvError::Closed) - }; - - self.inner = None; - result - } - - /// Blocking receive to call outside of asynchronous contexts. - /// - /// # Panics - /// - /// This function panics if called within an asynchronous execution - /// context. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use tokio::sync::oneshot; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, rx) = oneshot::channel::(); - /// - /// let sync_code = thread::spawn(move || { - /// assert_eq!(Ok(10), rx.blocking_recv()); - /// }); - /// - /// let _ = tx.send(10); - /// sync_code.join().unwrap(); - /// } - /// ``` - #[track_caller] - #[cfg(feature = "sync")] - #[cfg_attr(docsrs, doc(alias = "recv_blocking"))] - pub fn blocking_recv(self) -> Result { - crate::future::block_on(self) - } -} - -impl Drop for Receiver { - fn drop(&mut self) { - if let Some(inner) = self.inner.as_ref() { - inner.close(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - rx_dropped = true, - rx_dropped.op = "override", - ) - }); - } - } -} - -impl Future for Receiver { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // If `inner` is `None`, then `poll()` has already completed. - #[cfg(all(tokio_unstable, feature = "tracing"))] - let _res_span = self.resource_span.clone().entered(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let _ao_span = self.async_op_span.clone().entered(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let _ao_poll_span = self.async_op_poll_span.clone().entered(); - - let ret = if let Some(inner) = self.as_ref().get_ref().inner.as_ref() { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let res = ready!(trace_poll_op!("poll_recv", inner.poll_recv(cx)))?; - - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - let res = ready!(inner.poll_recv(cx))?; - - res - } else { - panic!("called after complete"); - }; - - self.inner = None; - Ready(Ok(ret)) - } -} - -impl Inner { - fn complete(&self) -> bool { - let prev = State::set_complete(&self.state); - - if prev.is_closed() { - return false; - } - - if prev.is_rx_task_set() { - // TODO: Consume waker? - unsafe { - self.rx_task.with_task(Waker::wake_by_ref); - } - } - - true - } - - fn poll_recv(&self, cx: &mut Context<'_>) -> Poll> { - ready!(crate::trace::trace_leaf(cx)); - // Keep track of task budget - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - - // Load the state - let mut state = State::load(&self.state, Acquire); - - if state.is_complete() { - coop.made_progress(); - match unsafe { self.consume_value() } { - Some(value) => Ready(Ok(value)), - None => Ready(Err(RecvError(()))), - } - } else if state.is_closed() { - coop.made_progress(); - Ready(Err(RecvError(()))) - } else { - if state.is_rx_task_set() { - let will_notify = unsafe { self.rx_task.will_wake(cx) }; - - // Check if the task is still the same - if !will_notify { - // Unset the task - state = State::unset_rx_task(&self.state); - if state.is_complete() { - // Set the flag again so that the waker is released in drop - State::set_rx_task(&self.state); - - coop.made_progress(); - // SAFETY: If `state.is_complete()` returns true, then the - // `VALUE_SENT` bit has been set and the sender side of the - // channel will no longer attempt to access the inner - // `UnsafeCell`. Therefore, it is now safe for us to access the - // cell. - return match unsafe { self.consume_value() } { - Some(value) => Ready(Ok(value)), - None => Ready(Err(RecvError(()))), - }; - } else { - unsafe { self.rx_task.drop_task() }; - } - } - } - - if !state.is_rx_task_set() { - // Attempt to set the task - unsafe { - self.rx_task.set_task(cx); - } - - // Update the state - state = State::set_rx_task(&self.state); - - if state.is_complete() { - coop.made_progress(); - match unsafe { self.consume_value() } { - Some(value) => Ready(Ok(value)), - None => Ready(Err(RecvError(()))), - } - } else { - Pending - } - } else { - Pending - } - } - } - - /// Called by `Receiver` to indicate that the value will never be received. - fn close(&self) { - let prev = State::set_closed(&self.state); - - if prev.is_tx_task_set() && !prev.is_complete() { - unsafe { - self.tx_task.with_task(Waker::wake_by_ref); - } - } - } - - /// Consumes the value. This function does not check `state`. - /// - /// # Safety - /// - /// Calling this method concurrently on multiple threads will result in a - /// data race. The `VALUE_SENT` state bit is used to ensure that only the - /// sender *or* the receiver will call this method at a given point in time. - /// If `VALUE_SENT` is not set, then only the sender may call this method; - /// if it is set, then only the receiver may call this method. - unsafe fn consume_value(&self) -> Option { - self.value.with_mut(|ptr| (*ptr).take()) - } -} - -unsafe impl Send for Inner {} -unsafe impl Sync for Inner {} - -fn mut_load(this: &mut AtomicUsize) -> usize { - this.with_mut(|v| *v) -} - -impl Drop for Inner { - fn drop(&mut self) { - let state = State(mut_load(&mut self.state)); - - if state.is_rx_task_set() { - unsafe { - self.rx_task.drop_task(); - } - } - - if state.is_tx_task_set() { - unsafe { - self.tx_task.drop_task(); - } - } - } -} - -impl fmt::Debug for Inner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use std::sync::atomic::Ordering::Relaxed; - - fmt.debug_struct("Inner") - .field("state", &State::load(&self.state, Relaxed)) - .finish() - } -} - -/// Indicates that a waker for the receiving task has been set. -/// -/// # Safety -/// -/// If this bit is not set, the `rx_task` field may be uninitialized. -const RX_TASK_SET: usize = 0b00001; -/// Indicates that a value has been stored in the channel's inner `UnsafeCell`. -/// -/// # Safety -/// -/// This bit controls which side of the channel is permitted to access the -/// `UnsafeCell`. If it is set, the `UnsafeCell` may ONLY be accessed by the -/// receiver. If this bit is NOT set, the `UnsafeCell` may ONLY be accessed by -/// the sender. -const VALUE_SENT: usize = 0b00010; -const CLOSED: usize = 0b00100; - -/// Indicates that a waker for the sending task has been set. -/// -/// # Safety -/// -/// If this bit is not set, the `tx_task` field may be uninitialized. -const TX_TASK_SET: usize = 0b01000; - -impl State { - fn new() -> State { - State(0) - } - - fn is_complete(self) -> bool { - self.0 & VALUE_SENT == VALUE_SENT - } - - fn set_complete(cell: &AtomicUsize) -> State { - // This method is a compare-and-swap loop rather than a fetch-or like - // other `set_$WHATEVER` methods on `State`. This is because we must - // check if the state has been closed before setting the `VALUE_SENT` - // bit. - // - // We don't want to set both the `VALUE_SENT` bit if the `CLOSED` - // bit is already set, because `VALUE_SENT` will tell the receiver that - // it's okay to access the inner `UnsafeCell`. Immediately after calling - // `set_complete`, if the channel was closed, the sender will _also_ - // access the `UnsafeCell` to take the value back out, so if a - // `poll_recv` or `try_recv` call is occurring concurrently, both - // threads may try to access the `UnsafeCell` if we were to set the - // `VALUE_SENT` bit on a closed channel. - let mut state = cell.load(Ordering::Relaxed); - loop { - if State(state).is_closed() { - break; - } - // TODO: This could be `Release`, followed by an `Acquire` fence *if* - // the `RX_TASK_SET` flag is set. However, `loom` does not support - // fences yet. - match cell.compare_exchange_weak( - state, - state | VALUE_SENT, - Ordering::AcqRel, - Ordering::Acquire, - ) { - Ok(_) => break, - Err(actual) => state = actual, - } - } - State(state) - } - - fn is_rx_task_set(self) -> bool { - self.0 & RX_TASK_SET == RX_TASK_SET - } - - fn set_rx_task(cell: &AtomicUsize) -> State { - let val = cell.fetch_or(RX_TASK_SET, AcqRel); - State(val | RX_TASK_SET) - } - - fn unset_rx_task(cell: &AtomicUsize) -> State { - let val = cell.fetch_and(!RX_TASK_SET, AcqRel); - State(val & !RX_TASK_SET) - } - - fn is_closed(self) -> bool { - self.0 & CLOSED == CLOSED - } - - fn set_closed(cell: &AtomicUsize) -> State { - // Acquire because we want all later writes (attempting to poll) to be - // ordered after this. - let val = cell.fetch_or(CLOSED, Acquire); - State(val) - } - - fn set_tx_task(cell: &AtomicUsize) -> State { - let val = cell.fetch_or(TX_TASK_SET, AcqRel); - State(val | TX_TASK_SET) - } - - fn unset_tx_task(cell: &AtomicUsize) -> State { - let val = cell.fetch_and(!TX_TASK_SET, AcqRel); - State(val & !TX_TASK_SET) - } - - fn is_tx_task_set(self) -> bool { - self.0 & TX_TASK_SET == TX_TASK_SET - } - - fn as_usize(self) -> usize { - self.0 - } - - fn load(cell: &AtomicUsize, order: Ordering) -> State { - let val = cell.load(order); - State(val) - } -} - -impl fmt::Debug for State { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("State") - .field("is_complete", &self.is_complete()) - .field("is_closed", &self.is_closed()) - .field("is_rx_task_set", &self.is_rx_task_set()) - .field("is_tx_task_set", &self.is_tx_task_set()) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/owned_read_guard.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/owned_read_guard.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/owned_read_guard.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/owned_read_guard.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,182 +0,0 @@ -use crate::sync::rwlock::RwLock; -use std::marker::PhantomData; -use std::sync::Arc; -use std::{fmt, mem, ops, ptr}; - -/// Owned RAII structure used to release the shared read access of a lock when -/// dropped. -/// -/// This structure is created by the [`read_owned`] method on -/// [`RwLock`]. -/// -/// [`read_owned`]: method@crate::sync::RwLock::read_owned -/// [`RwLock`]: struct@crate::sync::RwLock -#[clippy::has_significant_drop] -pub struct OwnedRwLockReadGuard { - // When changing the fields in this struct, make sure to update the - // `skip_drop` method. - #[cfg(all(tokio_unstable, feature = "tracing"))] - pub(super) resource_span: tracing::Span, - pub(super) lock: Arc>, - pub(super) data: *const U, - pub(super) _p: PhantomData, -} - -#[allow(dead_code)] // Unused fields are still used in Drop. -struct Inner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - lock: Arc>, - data: *const U, -} - -impl OwnedRwLockReadGuard { - fn skip_drop(self) -> Inner { - let me = mem::ManuallyDrop::new(self); - // SAFETY: This duplicates the values in every field of the guard, then - // forgets the originals, so in the end no value is duplicated. - unsafe { - Inner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: ptr::read(&me.resource_span), - lock: ptr::read(&me.lock), - data: me.data, - } - } - } - - /// Makes a new `OwnedRwLockReadGuard` for a component of the locked data. - /// This operation cannot fail as the `OwnedRwLockReadGuard` passed in - /// already locked the data. - /// - /// This is an associated function that needs to be - /// used as `OwnedRwLockReadGuard::map(...)`. A method would interfere with - /// methods of the same name on the contents of the locked data. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::{RwLock, OwnedRwLockReadGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = Arc::new(RwLock::new(Foo(1))); - /// - /// let guard = lock.read_owned().await; - /// let guard = OwnedRwLockReadGuard::map(guard, |f| &f.0); - /// - /// assert_eq!(1, *guard); - /// # } - /// ``` - #[inline] - pub fn map(this: Self, f: F) -> OwnedRwLockReadGuard - where - F: FnOnce(&U) -> &V, - { - let data = f(&*this) as *const V; - let this = this.skip_drop(); - - OwnedRwLockReadGuard { - lock: this.lock, - data, - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - } - } - - /// Attempts to make a new [`OwnedRwLockReadGuard`] for a component of the - /// locked data. The original guard is returned if the closure returns - /// `None`. - /// - /// This operation cannot fail as the `OwnedRwLockReadGuard` passed in - /// already locked the data. - /// - /// This is an associated function that needs to be used as - /// `OwnedRwLockReadGuard::try_map(..)`. A method would interfere with - /// methods of the same name on the contents of the locked data. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::{RwLock, OwnedRwLockReadGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = Arc::new(RwLock::new(Foo(1))); - /// - /// let guard = lock.read_owned().await; - /// let guard = OwnedRwLockReadGuard::try_map(guard, |f| Some(&f.0)).expect("should not fail"); - /// - /// assert_eq!(1, *guard); - /// # } - /// ``` - #[inline] - pub fn try_map(this: Self, f: F) -> Result, Self> - where - F: FnOnce(&U) -> Option<&V>, - { - let data = match f(&*this) { - Some(data) => data as *const V, - None => return Err(this), - }; - let this = this.skip_drop(); - - Ok(OwnedRwLockReadGuard { - lock: this.lock, - data, - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }) - } -} - -impl ops::Deref for OwnedRwLockReadGuard { - type Target = U; - - fn deref(&self) -> &U { - unsafe { &*self.data } - } -} - -impl fmt::Debug for OwnedRwLockReadGuard -where - U: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl fmt::Display for OwnedRwLockReadGuard -where - U: fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -impl Drop for OwnedRwLockReadGuard { - fn drop(&mut self) { - self.lock.s.release(1); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "sub", - ) - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/owned_write_guard_mapped.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/owned_write_guard_mapped.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/owned_write_guard_mapped.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/owned_write_guard_mapped.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,205 +0,0 @@ -use crate::sync::rwlock::RwLock; -use std::marker::PhantomData; -use std::sync::Arc; -use std::{fmt, mem, ops, ptr}; - -/// Owned RAII structure used to release the exclusive write access of a lock when -/// dropped. -/// -/// This structure is created by [mapping] an [`OwnedRwLockWriteGuard`]. It is a -/// separate type from `OwnedRwLockWriteGuard` to disallow downgrading a mapped -/// guard, since doing so can cause undefined behavior. -/// -/// [mapping]: method@crate::sync::OwnedRwLockWriteGuard::map -/// [`OwnedRwLockWriteGuard`]: struct@crate::sync::OwnedRwLockWriteGuard -#[clippy::has_significant_drop] -pub struct OwnedRwLockMappedWriteGuard { - // When changing the fields in this struct, make sure to update the - // `skip_drop` method. - #[cfg(all(tokio_unstable, feature = "tracing"))] - pub(super) resource_span: tracing::Span, - pub(super) permits_acquired: u32, - pub(super) lock: Arc>, - pub(super) data: *mut U, - pub(super) _p: PhantomData, -} - -#[allow(dead_code)] // Unused fields are still used in Drop. -struct Inner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - permits_acquired: u32, - lock: Arc>, - data: *const U, -} - -impl OwnedRwLockMappedWriteGuard { - fn skip_drop(self) -> Inner { - let me = mem::ManuallyDrop::new(self); - // SAFETY: This duplicates the values in every field of the guard, then - // forgets the originals, so in the end no value is duplicated. - unsafe { - Inner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: ptr::read(&me.resource_span), - permits_acquired: me.permits_acquired, - lock: ptr::read(&me.lock), - data: me.data, - } - } - } - - /// Makes a new `OwnedRwLockMappedWriteGuard` for a component of the locked - /// data. - /// - /// This operation cannot fail as the `OwnedRwLockMappedWriteGuard` passed - /// in already locked the data. - /// - /// This is an associated function that needs to be used as - /// `OwnedRwLockWriteGuard::map(..)`. A method would interfere with methods - /// of the same name on the contents of the locked data. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = Arc::new(RwLock::new(Foo(1))); - /// - /// { - /// let lock = Arc::clone(&lock); - /// let mut mapped = OwnedRwLockWriteGuard::map(lock.write_owned().await, |f| &mut f.0); - /// *mapped = 2; - /// } - /// - /// assert_eq!(Foo(2), *lock.read().await); - /// # } - /// ``` - #[inline] - pub fn map(mut this: Self, f: F) -> OwnedRwLockMappedWriteGuard - where - F: FnOnce(&mut U) -> &mut V, - { - let data = f(&mut *this) as *mut V; - let this = this.skip_drop(); - - OwnedRwLockMappedWriteGuard { - permits_acquired: this.permits_acquired, - lock: this.lock, - data, - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - } - } - - /// Attempts to make a new `OwnedRwLockMappedWriteGuard` for a component - /// of the locked data. The original guard is returned if the closure - /// returns `None`. - /// - /// This operation cannot fail as the `OwnedRwLockMappedWriteGuard` passed - /// in already locked the data. - /// - /// This is an associated function that needs to be - /// used as `OwnedRwLockMappedWriteGuard::try_map(...)`. A method would interfere with - /// methods of the same name on the contents of the locked data. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = Arc::new(RwLock::new(Foo(1))); - /// - /// { - /// let guard = Arc::clone(&lock).write_owned().await; - /// let mut guard = OwnedRwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail"); - /// *guard = 2; - /// } - /// - /// assert_eq!(Foo(2), *lock.read().await); - /// # } - /// ``` - #[inline] - pub fn try_map( - mut this: Self, - f: F, - ) -> Result, Self> - where - F: FnOnce(&mut U) -> Option<&mut V>, - { - let data = match f(&mut *this) { - Some(data) => data as *mut V, - None => return Err(this), - }; - let this = this.skip_drop(); - - Ok(OwnedRwLockMappedWriteGuard { - permits_acquired: this.permits_acquired, - lock: this.lock, - data, - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }) - } -} - -impl ops::Deref for OwnedRwLockMappedWriteGuard { - type Target = U; - - fn deref(&self) -> &U { - unsafe { &*self.data } - } -} - -impl ops::DerefMut for OwnedRwLockMappedWriteGuard { - fn deref_mut(&mut self) -> &mut U { - unsafe { &mut *self.data } - } -} - -impl fmt::Debug for OwnedRwLockMappedWriteGuard -where - U: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl fmt::Display for OwnedRwLockMappedWriteGuard -where - U: fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -impl Drop for OwnedRwLockMappedWriteGuard { - fn drop(&mut self) { - self.lock.s.release(self.permits_acquired as usize); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - write_locked.op = "override", - ) - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/owned_write_guard.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/owned_write_guard.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/owned_write_guard.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/owned_write_guard.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,440 +0,0 @@ -use crate::sync::rwlock::owned_read_guard::OwnedRwLockReadGuard; -use crate::sync::rwlock::owned_write_guard_mapped::OwnedRwLockMappedWriteGuard; -use crate::sync::rwlock::RwLock; -use std::marker::PhantomData; -use std::sync::Arc; -use std::{fmt, mem, ops, ptr}; - -/// Owned RAII structure used to release the exclusive write access of a lock when -/// dropped. -/// -/// This structure is created by the [`write_owned`] method -/// on [`RwLock`]. -/// -/// [`write_owned`]: method@crate::sync::RwLock::write_owned -/// [`RwLock`]: struct@crate::sync::RwLock -#[clippy::has_significant_drop] -pub struct OwnedRwLockWriteGuard { - // When changing the fields in this struct, make sure to update the - // `skip_drop` method. - #[cfg(all(tokio_unstable, feature = "tracing"))] - pub(super) resource_span: tracing::Span, - pub(super) permits_acquired: u32, - pub(super) lock: Arc>, - pub(super) data: *mut T, - pub(super) _p: PhantomData, -} - -#[allow(dead_code)] // Unused fields are still used in Drop. -struct Inner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - permits_acquired: u32, - lock: Arc>, - data: *const T, -} - -impl OwnedRwLockWriteGuard { - fn skip_drop(self) -> Inner { - let me = mem::ManuallyDrop::new(self); - // SAFETY: This duplicates the values in every field of the guard, then - // forgets the originals, so in the end no value is duplicated. - unsafe { - Inner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: ptr::read(&me.resource_span), - permits_acquired: me.permits_acquired, - lock: ptr::read(&me.lock), - data: me.data, - } - } - } - - /// Makes a new [`OwnedRwLockMappedWriteGuard`] for a component of the locked - /// data. - /// - /// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in - /// already locked the data. - /// - /// This is an associated function that needs to be used as - /// `OwnedRwLockWriteGuard::map(..)`. A method would interfere with methods - /// of the same name on the contents of the locked data. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = Arc::new(RwLock::new(Foo(1))); - /// - /// { - /// let lock = Arc::clone(&lock); - /// let mut mapped = OwnedRwLockWriteGuard::map(lock.write_owned().await, |f| &mut f.0); - /// *mapped = 2; - /// } - /// - /// assert_eq!(Foo(2), *lock.read().await); - /// # } - /// ``` - #[inline] - pub fn map(mut this: Self, f: F) -> OwnedRwLockMappedWriteGuard - where - F: FnOnce(&mut T) -> &mut U, - { - let data = f(&mut *this) as *mut U; - let this = this.skip_drop(); - - OwnedRwLockMappedWriteGuard { - permits_acquired: this.permits_acquired, - lock: this.lock, - data, - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - } - } - - /// Makes a new [`OwnedRwLockReadGuard`] for a component of the locked data. - /// - /// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be used as - /// `OwnedRwLockWriteGuard::downgrade_map(..)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - /// - /// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a - /// `&mut T` would result in unsoundness, as you could use interior mutability. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = Arc::new(RwLock::new(Foo(1))); - /// - /// let guard = Arc::clone(&lock).write_owned().await; - /// let mapped = OwnedRwLockWriteGuard::downgrade_map(guard, |f| &f.0); - /// let foo = lock.read_owned().await; - /// assert_eq!(foo.0, *mapped); - /// # } - /// ``` - #[inline] - pub fn downgrade_map(this: Self, f: F) -> OwnedRwLockReadGuard - where - F: FnOnce(&T) -> &U, - { - let data = f(&*this) as *const U; - let this = this.skip_drop(); - let guard = OwnedRwLockReadGuard { - lock: this.lock, - data, - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }; - - // Release all but one of the permits held by the write guard - let to_release = (this.permits_acquired - 1) as usize; - guard.lock.s.release(to_release); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - write_locked.op = "override", - ) - }); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "add", - ) - }); - - guard - } - - /// Attempts to make a new [`OwnedRwLockMappedWriteGuard`] for a component - /// of the locked data. The original guard is returned if the closure - /// returns `None`. - /// - /// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in - /// already locked the data. - /// - /// This is an associated function that needs to be - /// used as `OwnedRwLockWriteGuard::try_map(...)`. A method would interfere - /// with methods of the same name on the contents of the locked data. - /// - /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = Arc::new(RwLock::new(Foo(1))); - /// - /// { - /// let guard = Arc::clone(&lock).write_owned().await; - /// let mut guard = OwnedRwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail"); - /// *guard = 2; - /// } - /// - /// assert_eq!(Foo(2), *lock.read().await); - /// # } - /// ``` - #[inline] - pub fn try_map( - mut this: Self, - f: F, - ) -> Result, Self> - where - F: FnOnce(&mut T) -> Option<&mut U>, - { - let data = match f(&mut *this) { - Some(data) => data as *mut U, - None => return Err(this), - }; - let this = this.skip_drop(); - - Ok(OwnedRwLockMappedWriteGuard { - permits_acquired: this.permits_acquired, - lock: this.lock, - data, - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }) - } - - /// Attempts to make a new [`OwnedRwLockReadGuard`] for a component of - /// the locked data. The original guard is returned if the closure returns - /// `None`. - /// - /// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be - /// used as `OwnedRwLockWriteGuard::try_downgrade_map(...)`. A method would interfere with - /// methods of the same name on the contents of the locked data. - /// - /// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a - /// `&mut T` would result in unsoundness, as you could use interior mutability. - /// - /// If this function returns `Err(...)`, the lock is never unlocked nor downgraded. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = Arc::new(RwLock::new(Foo(1))); - /// - /// let guard = Arc::clone(&lock).write_owned().await; - /// let guard = OwnedRwLockWriteGuard::try_downgrade_map(guard, |f| Some(&f.0)).expect("should not fail"); - /// let foo = lock.read_owned().await; - /// assert_eq!(foo.0, *guard); - /// # } - /// ``` - #[inline] - pub fn try_downgrade_map( - this: Self, - f: F, - ) -> Result, Self> - where - F: FnOnce(&T) -> Option<&U>, - { - let data = match f(&*this) { - Some(data) => data as *const U, - None => return Err(this), - }; - let this = this.skip_drop(); - let guard = OwnedRwLockReadGuard { - lock: this.lock, - data, - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }; - - // Release all but one of the permits held by the write guard - let to_release = (this.permits_acquired - 1) as usize; - guard.lock.s.release(to_release); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - write_locked.op = "override", - ) - }); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "add", - ) - }); - - Ok(guard) - } - - /// Converts this `OwnedRwLockWriteGuard` into an - /// `OwnedRwLockMappedWriteGuard`. This method can be used to store a - /// non-mapped guard in a struct field that expects a mapped guard. - /// - /// This is equivalent to calling `OwnedRwLockWriteGuard::map(guard, |me| me)`. - #[inline] - pub fn into_mapped(this: Self) -> OwnedRwLockMappedWriteGuard { - Self::map(this, |me| me) - } - - /// Atomically downgrades a write lock into a read lock without allowing - /// any writers to take exclusive access of the lock in the meantime. - /// - /// **Note:** This won't *necessarily* allow any additional readers to acquire - /// locks, since [`RwLock`] is fair and it is possible that a writer is next - /// in line. - /// - /// Returns an RAII guard which will drop this read access of the `RwLock` - /// when dropped. - /// - /// # Examples - /// - /// ``` - /// # use tokio::sync::RwLock; - /// # use std::sync::Arc; - /// # - /// # #[tokio::main] - /// # async fn main() { - /// let lock = Arc::new(RwLock::new(1)); - /// - /// let n = lock.clone().write_owned().await; - /// - /// let cloned_lock = lock.clone(); - /// let handle = tokio::spawn(async move { - /// *cloned_lock.write_owned().await = 2; - /// }); - /// - /// let n = n.downgrade(); - /// assert_eq!(*n, 1, "downgrade is atomic"); - /// - /// drop(n); - /// handle.await.unwrap(); - /// assert_eq!(*lock.read().await, 2, "second writer obtained write lock"); - /// # } - /// ``` - pub fn downgrade(self) -> OwnedRwLockReadGuard { - let this = self.skip_drop(); - let guard = OwnedRwLockReadGuard { - lock: this.lock, - data: this.data, - _p: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }; - - // Release all but one of the permits held by the write guard - let to_release = (this.permits_acquired - 1) as usize; - guard.lock.s.release(to_release); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - write_locked.op = "override", - ) - }); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "add", - ) - }); - - guard - } -} - -impl ops::Deref for OwnedRwLockWriteGuard { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.data } - } -} - -impl ops::DerefMut for OwnedRwLockWriteGuard { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.data } - } -} - -impl fmt::Debug for OwnedRwLockWriteGuard -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl fmt::Display for OwnedRwLockWriteGuard -where - T: fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -impl Drop for OwnedRwLockWriteGuard { - fn drop(&mut self) { - self.lock.s.release(self.permits_acquired as usize); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - write_locked.op = "override", - ) - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/read_guard.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/read_guard.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/read_guard.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/read_guard.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,191 +0,0 @@ -use crate::sync::batch_semaphore::Semaphore; -use std::marker::PhantomData; -use std::{fmt, mem, ops}; - -/// RAII structure used to release the shared read access of a lock when -/// dropped. -/// -/// This structure is created by the [`read`] method on -/// [`RwLock`]. -/// -/// [`read`]: method@crate::sync::RwLock::read -/// [`RwLock`]: struct@crate::sync::RwLock -#[clippy::has_significant_drop] -#[must_use = "if unused the RwLock will immediately unlock"] -pub struct RwLockReadGuard<'a, T: ?Sized> { - // When changing the fields in this struct, make sure to update the - // `skip_drop` method. - #[cfg(all(tokio_unstable, feature = "tracing"))] - pub(super) resource_span: tracing::Span, - pub(super) s: &'a Semaphore, - pub(super) data: *const T, - pub(super) marker: PhantomData<&'a T>, -} - -#[allow(dead_code)] // Unused fields are still used in Drop. -struct Inner<'a, T: ?Sized> { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - s: &'a Semaphore, - data: *const T, -} - -impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { - fn skip_drop(self) -> Inner<'a, T> { - let me = mem::ManuallyDrop::new(self); - // SAFETY: This duplicates the values in every field of the guard, then - // forgets the originals, so in the end no value is duplicated. - Inner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: unsafe { std::ptr::read(&me.resource_span) }, - s: me.s, - data: me.data, - } - } - - /// Makes a new `RwLockReadGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `RwLockReadGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be - /// used as `RwLockReadGuard::map(...)`. A method would interfere with - /// methods of the same name on the contents of the locked data. - /// - /// This is an asynchronous version of [`RwLockReadGuard::map`] from the - /// [`parking_lot` crate]. - /// - /// [`RwLockReadGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.map - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockReadGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// let guard = lock.read().await; - /// let guard = RwLockReadGuard::map(guard, |f| &f.0); - /// - /// assert_eq!(1, *guard); - /// # } - /// ``` - #[inline] - pub fn map(this: Self, f: F) -> RwLockReadGuard<'a, U> - where - F: FnOnce(&T) -> &U, - { - let data = f(&*this) as *const U; - let this = this.skip_drop(); - - RwLockReadGuard { - s: this.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - } - } - - /// Attempts to make a new [`RwLockReadGuard`] for a component of the - /// locked data. The original guard is returned if the closure returns - /// `None`. - /// - /// This operation cannot fail as the `RwLockReadGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be used as - /// `RwLockReadGuard::try_map(..)`. A method would interfere with methods of the - /// same name on the contents of the locked data. - /// - /// This is an asynchronous version of [`RwLockReadGuard::try_map`] from the - /// [`parking_lot` crate]. - /// - /// [`RwLockReadGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.try_map - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockReadGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// let guard = lock.read().await; - /// let guard = RwLockReadGuard::try_map(guard, |f| Some(&f.0)).expect("should not fail"); - /// - /// assert_eq!(1, *guard); - /// # } - /// ``` - #[inline] - pub fn try_map(this: Self, f: F) -> Result, Self> - where - F: FnOnce(&T) -> Option<&U>, - { - let data = match f(&*this) { - Some(data) => data as *const U, - None => return Err(this), - }; - let this = this.skip_drop(); - - Ok(RwLockReadGuard { - s: this.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }) - } -} - -impl ops::Deref for RwLockReadGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.data } - } -} - -impl<'a, T: ?Sized> fmt::Debug for RwLockReadGuard<'a, T> -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized> fmt::Display for RwLockReadGuard<'a, T> -where - T: fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> { - fn drop(&mut self) { - self.s.release(1); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "sub", - ) - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/write_guard_mapped.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/write_guard_mapped.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/write_guard_mapped.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/write_guard_mapped.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,213 +0,0 @@ -use crate::sync::batch_semaphore::Semaphore; -use std::marker::PhantomData; -use std::{fmt, mem, ops}; - -/// RAII structure used to release the exclusive write access of a lock when -/// dropped. -/// -/// This structure is created by [mapping] an [`RwLockWriteGuard`]. It is a -/// separate type from `RwLockWriteGuard` to disallow downgrading a mapped -/// guard, since doing so can cause undefined behavior. -/// -/// [mapping]: method@crate::sync::RwLockWriteGuard::map -/// [`RwLockWriteGuard`]: struct@crate::sync::RwLockWriteGuard -#[clippy::has_significant_drop] -pub struct RwLockMappedWriteGuard<'a, T: ?Sized> { - // When changing the fields in this struct, make sure to update the - // `skip_drop` method. - #[cfg(all(tokio_unstable, feature = "tracing"))] - pub(super) resource_span: tracing::Span, - pub(super) permits_acquired: u32, - pub(super) s: &'a Semaphore, - pub(super) data: *mut T, - pub(super) marker: PhantomData<&'a mut T>, -} - -#[allow(dead_code)] // Unused fields are still used in Drop. -struct Inner<'a, T: ?Sized> { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - permits_acquired: u32, - s: &'a Semaphore, - data: *mut T, -} - -impl<'a, T: ?Sized> RwLockMappedWriteGuard<'a, T> { - fn skip_drop(self) -> Inner<'a, T> { - let me = mem::ManuallyDrop::new(self); - // SAFETY: This duplicates the values in every field of the guard, then - // forgets the originals, so in the end no value is duplicated. - Inner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: unsafe { std::ptr::read(&me.resource_span) }, - permits_acquired: me.permits_acquired, - s: me.s, - data: me.data, - } - } - - /// Makes a new `RwLockMappedWriteGuard` for a component of the locked data. - /// - /// This operation cannot fail as the `RwLockMappedWriteGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be used as - /// `RwLockMappedWriteGuard::map(..)`. A method would interfere with methods - /// of the same name on the contents of the locked data. - /// - /// This is an asynchronous version of [`RwLockWriteGuard::map`] from the - /// [`parking_lot` crate]. - /// - /// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// { - /// let mut mapped = RwLockWriteGuard::map(lock.write().await, |f| &mut f.0); - /// *mapped = 2; - /// } - /// - /// assert_eq!(Foo(2), *lock.read().await); - /// # } - /// ``` - #[inline] - pub fn map(mut this: Self, f: F) -> RwLockMappedWriteGuard<'a, U> - where - F: FnOnce(&mut T) -> &mut U, - { - let data = f(&mut *this) as *mut U; - let this = this.skip_drop(); - - RwLockMappedWriteGuard { - permits_acquired: this.permits_acquired, - s: this.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - } - } - - /// Attempts to make a new [`RwLockMappedWriteGuard`] for a component of - /// the locked data. The original guard is returned if the closure returns - /// `None`. - /// - /// This operation cannot fail as the `RwLockMappedWriteGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be - /// used as `RwLockMappedWriteGuard::try_map(...)`. A method would interfere - /// with methods of the same name on the contents of the locked data. - /// - /// This is an asynchronous version of [`RwLockWriteGuard::try_map`] from - /// the [`parking_lot` crate]. - /// - /// [`RwLockWriteGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.try_map - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// { - /// let guard = lock.write().await; - /// let mut guard = RwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail"); - /// *guard = 2; - /// } - /// - /// assert_eq!(Foo(2), *lock.read().await); - /// # } - /// ``` - #[inline] - pub fn try_map( - mut this: Self, - f: F, - ) -> Result, Self> - where - F: FnOnce(&mut T) -> Option<&mut U>, - { - let data = match f(&mut *this) { - Some(data) => data as *mut U, - None => return Err(this), - }; - let this = this.skip_drop(); - - Ok(RwLockMappedWriteGuard { - permits_acquired: this.permits_acquired, - s: this.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }) - } - - // Note: No `downgrade`, `downgrade_map` nor `try_downgrade_map` because they would be unsound, as we're already - // potentially been mapped with internal mutability. -} - -impl ops::Deref for RwLockMappedWriteGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.data } - } -} - -impl ops::DerefMut for RwLockMappedWriteGuard<'_, T> { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.data } - } -} - -impl<'a, T: ?Sized> fmt::Debug for RwLockMappedWriteGuard<'a, T> -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized> fmt::Display for RwLockMappedWriteGuard<'a, T> -where - T: fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized> Drop for RwLockMappedWriteGuard<'a, T> { - fn drop(&mut self) { - self.s.release(self.permits_acquired as usize); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - write_locked.op = "override", - ) - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/write_guard.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/write_guard.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock/write_guard.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock/write_guard.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,457 +0,0 @@ -use crate::sync::batch_semaphore::Semaphore; -use crate::sync::rwlock::read_guard::RwLockReadGuard; -use crate::sync::rwlock::write_guard_mapped::RwLockMappedWriteGuard; -use std::marker::PhantomData; -use std::{fmt, mem, ops}; - -/// RAII structure used to release the exclusive write access of a lock when -/// dropped. -/// -/// This structure is created by the [`write`] method -/// on [`RwLock`]. -/// -/// [`write`]: method@crate::sync::RwLock::write -/// [`RwLock`]: struct@crate::sync::RwLock -#[clippy::has_significant_drop] -#[must_use = "if unused the RwLock will immediately unlock"] -pub struct RwLockWriteGuard<'a, T: ?Sized> { - // When changing the fields in this struct, make sure to update the - // `skip_drop` method. - #[cfg(all(tokio_unstable, feature = "tracing"))] - pub(super) resource_span: tracing::Span, - pub(super) permits_acquired: u32, - pub(super) s: &'a Semaphore, - pub(super) data: *mut T, - pub(super) marker: PhantomData<&'a mut T>, -} - -#[allow(dead_code)] // Unused fields are still used in Drop. -struct Inner<'a, T: ?Sized> { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - permits_acquired: u32, - s: &'a Semaphore, - data: *mut T, -} - -impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { - fn skip_drop(self) -> Inner<'a, T> { - let me = mem::ManuallyDrop::new(self); - // SAFETY: This duplicates the values in every field of the guard, then - // forgets the originals, so in the end no value is duplicated. - Inner { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: unsafe { std::ptr::read(&me.resource_span) }, - permits_acquired: me.permits_acquired, - s: me.s, - data: me.data, - } - } - - /// Makes a new [`RwLockMappedWriteGuard`] for a component of the locked data. - /// - /// This operation cannot fail as the `RwLockWriteGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be used as - /// `RwLockWriteGuard::map(..)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - /// - /// This is an asynchronous version of [`RwLockWriteGuard::map`] from the - /// [`parking_lot` crate]. - /// - /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard - /// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// { - /// let mut mapped = RwLockWriteGuard::map(lock.write().await, |f| &mut f.0); - /// *mapped = 2; - /// } - /// - /// assert_eq!(Foo(2), *lock.read().await); - /// # } - /// ``` - #[inline] - pub fn map(mut this: Self, f: F) -> RwLockMappedWriteGuard<'a, U> - where - F: FnOnce(&mut T) -> &mut U, - { - let data = f(&mut *this) as *mut U; - let this = this.skip_drop(); - - RwLockMappedWriteGuard { - permits_acquired: this.permits_acquired, - s: this.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - } - } - - /// Makes a new [`RwLockReadGuard`] for a component of the locked data. - /// - /// This operation cannot fail as the `RwLockWriteGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be used as - /// `RwLockWriteGuard::downgrade_map(..)`. A method would interfere with methods of - /// the same name on the contents of the locked data. - /// - /// This is equivalent to a combination of asynchronous [`RwLockWriteGuard::map`] and [`RwLockWriteGuard::downgrade`] - /// from the [`parking_lot` crate]. - /// - /// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a - /// `&mut T` would result in unsoundness, as you could use interior mutability. - /// - /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard - /// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map - /// [`RwLockWriteGuard::downgrade`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.downgrade - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// let mapped = RwLockWriteGuard::downgrade_map(lock.write().await, |f| &f.0); - /// let foo = lock.read().await; - /// assert_eq!(foo.0, *mapped); - /// # } - /// ``` - #[inline] - pub fn downgrade_map(this: Self, f: F) -> RwLockReadGuard<'a, U> - where - F: FnOnce(&T) -> &U, - { - let data = f(&*this) as *const U; - let this = this.skip_drop(); - let guard = RwLockReadGuard { - s: this.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }; - - // Release all but one of the permits held by the write guard - let to_release = (this.permits_acquired - 1) as usize; - this.s.release(to_release); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - write_locked.op = "override", - ) - }); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "add", - ) - }); - - guard - } - - /// Attempts to make a new [`RwLockMappedWriteGuard`] for a component of - /// the locked data. The original guard is returned if the closure returns - /// `None`. - /// - /// This operation cannot fail as the `RwLockWriteGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be - /// used as `RwLockWriteGuard::try_map(...)`. A method would interfere with - /// methods of the same name on the contents of the locked data. - /// - /// This is an asynchronous version of [`RwLockWriteGuard::try_map`] from - /// the [`parking_lot` crate]. - /// - /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard - /// [`RwLockWriteGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.try_map - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// { - /// let guard = lock.write().await; - /// let mut guard = RwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail"); - /// *guard = 2; - /// } - /// - /// assert_eq!(Foo(2), *lock.read().await); - /// # } - /// ``` - #[inline] - pub fn try_map( - mut this: Self, - f: F, - ) -> Result, Self> - where - F: FnOnce(&mut T) -> Option<&mut U>, - { - let data = match f(&mut *this) { - Some(data) => data as *mut U, - None => return Err(this), - }; - let this = this.skip_drop(); - - Ok(RwLockMappedWriteGuard { - permits_acquired: this.permits_acquired, - s: this.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }) - } - - /// Attempts to make a new [`RwLockReadGuard`] for a component of - /// the locked data. The original guard is returned if the closure returns - /// `None`. - /// - /// This operation cannot fail as the `RwLockWriteGuard` passed in already - /// locked the data. - /// - /// This is an associated function that needs to be - /// used as `RwLockWriteGuard::try_downgrade_map(...)`. A method would interfere with - /// methods of the same name on the contents of the locked data. - /// - /// This is equivalent to a combination of asynchronous [`RwLockWriteGuard::try_map`] and [`RwLockWriteGuard::downgrade`] - /// from the [`parking_lot` crate]. - /// - /// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a - /// `&mut T` would result in unsoundness, as you could use interior mutability. - /// - /// If this function returns `Err(...)`, the lock is never unlocked nor downgraded. - /// - /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard - /// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map - /// [`RwLockWriteGuard::downgrade`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.downgrade - /// [`parking_lot` crate]: https://crates.io/crates/parking_lot - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{RwLock, RwLockWriteGuard}; - /// - /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] - /// struct Foo(u32); - /// - /// # #[tokio::main] - /// # async fn main() { - /// let lock = RwLock::new(Foo(1)); - /// - /// let guard = RwLockWriteGuard::try_downgrade_map(lock.write().await, |f| Some(&f.0)).expect("should not fail"); - /// let foo = lock.read().await; - /// assert_eq!(foo.0, *guard); - /// # } - /// ``` - #[inline] - pub fn try_downgrade_map(this: Self, f: F) -> Result, Self> - where - F: FnOnce(&T) -> Option<&U>, - { - let data = match f(&*this) { - Some(data) => data as *const U, - None => return Err(this), - }; - let this = this.skip_drop(); - let guard = RwLockReadGuard { - s: this.s, - data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }; - - // Release all but one of the permits held by the write guard - let to_release = (this.permits_acquired - 1) as usize; - this.s.release(to_release); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - write_locked.op = "override", - ) - }); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "add", - ) - }); - - Ok(guard) - } - - /// Converts this `RwLockWriteGuard` into an `RwLockMappedWriteGuard`. This - /// method can be used to store a non-mapped guard in a struct field that - /// expects a mapped guard. - /// - /// This is equivalent to calling `RwLockWriteGuard::map(guard, |me| me)`. - #[inline] - pub fn into_mapped(this: Self) -> RwLockMappedWriteGuard<'a, T> { - RwLockWriteGuard::map(this, |me| me) - } - - /// Atomically downgrades a write lock into a read lock without allowing - /// any writers to take exclusive access of the lock in the meantime. - /// - /// **Note:** This won't *necessarily* allow any additional readers to acquire - /// locks, since [`RwLock`] is fair and it is possible that a writer is next - /// in line. - /// - /// Returns an RAII guard which will drop this read access of the `RwLock` - /// when dropped. - /// - /// # Examples - /// - /// ``` - /// # use tokio::sync::RwLock; - /// # use std::sync::Arc; - /// # - /// # #[tokio::main] - /// # async fn main() { - /// let lock = Arc::new(RwLock::new(1)); - /// - /// let n = lock.write().await; - /// - /// let cloned_lock = lock.clone(); - /// let handle = tokio::spawn(async move { - /// *cloned_lock.write().await = 2; - /// }); - /// - /// let n = n.downgrade(); - /// assert_eq!(*n, 1, "downgrade is atomic"); - /// - /// drop(n); - /// handle.await.unwrap(); - /// assert_eq!(*lock.read().await, 2, "second writer obtained write lock"); - /// # } - /// ``` - /// - /// [`RwLock`]: struct@crate::sync::RwLock - pub fn downgrade(self) -> RwLockReadGuard<'a, T> { - let this = self.skip_drop(); - let guard = RwLockReadGuard { - s: this.s, - data: this.data, - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: this.resource_span, - }; - - // Release all but one of the permits held by the write guard - let to_release = (this.permits_acquired - 1) as usize; - this.s.release(to_release); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - write_locked.op = "override", - ) - }); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "add", - ) - }); - - guard - } -} - -impl ops::Deref for RwLockWriteGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.data } - } -} - -impl ops::DerefMut for RwLockWriteGuard<'_, T> { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.data } - } -} - -impl<'a, T: ?Sized> fmt::Debug for RwLockWriteGuard<'a, T> -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized> fmt::Display for RwLockWriteGuard<'a, T> -where - T: fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> { - fn drop(&mut self) { - self.s.release(self.permits_acquired as usize); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - write_locked.op = "override", - ) - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/rwlock.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/rwlock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1117 +0,0 @@ -use crate::sync::batch_semaphore::{Semaphore, TryAcquireError}; -use crate::sync::mutex::TryLockError; -#[cfg(all(tokio_unstable, feature = "tracing"))] -use crate::util::trace; -use std::cell::UnsafeCell; -use std::marker; -use std::marker::PhantomData; -use std::sync::Arc; - -pub(crate) mod owned_read_guard; -pub(crate) mod owned_write_guard; -pub(crate) mod owned_write_guard_mapped; -pub(crate) mod read_guard; -pub(crate) mod write_guard; -pub(crate) mod write_guard_mapped; -pub(crate) use owned_read_guard::OwnedRwLockReadGuard; -pub(crate) use owned_write_guard::OwnedRwLockWriteGuard; -pub(crate) use owned_write_guard_mapped::OwnedRwLockMappedWriteGuard; -pub(crate) use read_guard::RwLockReadGuard; -pub(crate) use write_guard::RwLockWriteGuard; -pub(crate) use write_guard_mapped::RwLockMappedWriteGuard; - -#[cfg(not(loom))] -const MAX_READS: u32 = std::u32::MAX >> 3; - -#[cfg(loom)] -const MAX_READS: u32 = 10; - -/// An asynchronous reader-writer lock. -/// -/// This type of lock allows a number of readers or at most one writer at any -/// point in time. The write portion of this lock typically allows modification -/// of the underlying data (exclusive access) and the read portion of this lock -/// typically allows for read-only access (shared access). -/// -/// In comparison, a [`Mutex`] does not distinguish between readers or writers -/// that acquire the lock, therefore causing any tasks waiting for the lock to -/// become available to yield. An `RwLock` will allow any number of readers to -/// acquire the lock as long as a writer is not holding the lock. -/// -/// The priority policy of Tokio's read-write lock is _fair_ (or -/// [_write-preferring_]), in order to ensure that readers cannot starve -/// writers. Fairness is ensured using a first-in, first-out queue for the tasks -/// awaiting the lock; if a task that wishes to acquire the write lock is at the -/// head of the queue, read locks will not be given out until the write lock has -/// been released. This is in contrast to the Rust standard library's -/// `std::sync::RwLock`, where the priority policy is dependent on the -/// operating system's implementation. -/// -/// The type parameter `T` represents the data that this lock protects. It is -/// required that `T` satisfies [`Send`] to be shared across threads. The RAII guards -/// returned from the locking methods implement [`Deref`](trait@std::ops::Deref) -/// (and [`DerefMut`](trait@std::ops::DerefMut) -/// for the `write` methods) to allow access to the content of the lock. -/// -/// # Examples -/// -/// ``` -/// use tokio::sync::RwLock; -/// -/// #[tokio::main] -/// async fn main() { -/// let lock = RwLock::new(5); -/// -/// // many reader locks can be held at once -/// { -/// let r1 = lock.read().await; -/// let r2 = lock.read().await; -/// assert_eq!(*r1, 5); -/// assert_eq!(*r2, 5); -/// } // read locks are dropped at this point -/// -/// // only one write lock may be held, however -/// { -/// let mut w = lock.write().await; -/// *w += 1; -/// assert_eq!(*w, 6); -/// } // write lock is dropped here -/// } -/// ``` -/// -/// [`Mutex`]: struct@super::Mutex -/// [`RwLock`]: struct@RwLock -/// [`RwLockReadGuard`]: struct@RwLockReadGuard -/// [`RwLockWriteGuard`]: struct@RwLockWriteGuard -/// [`Send`]: trait@std::marker::Send -/// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies -pub struct RwLock { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, - - // maximum number of concurrent readers - mr: u32, - - //semaphore to coordinate read and write access to T - s: Semaphore, - - //inner data T - c: UnsafeCell, -} - -#[test] -#[cfg(not(loom))] -fn bounds() { - fn check_send() {} - fn check_sync() {} - fn check_unpin() {} - // This has to take a value, since the async fn's return type is unnameable. - fn check_send_sync_val(_t: T) {} - - check_send::>(); - check_sync::>(); - check_unpin::>(); - - check_send::>(); - check_sync::>(); - check_unpin::>(); - - check_send::>(); - check_sync::>(); - check_unpin::>(); - - check_send::>(); - check_sync::>(); - check_unpin::>(); - - check_send::>(); - check_sync::>(); - check_unpin::>(); - - check_send::>(); - check_sync::>(); - check_unpin::>(); - - check_send::>(); - check_sync::>(); - check_unpin::>(); - - let rwlock = Arc::new(RwLock::new(0)); - check_send_sync_val(rwlock.read()); - check_send_sync_val(Arc::clone(&rwlock).read_owned()); - check_send_sync_val(rwlock.write()); - check_send_sync_val(Arc::clone(&rwlock).write_owned()); -} - -// As long as T: Send + Sync, it's fine to send and share RwLock between threads. -// If T were not Send, sending and sharing a RwLock would be bad, since you can access T through -// RwLock. -unsafe impl Send for RwLock where T: ?Sized + Send {} -unsafe impl Sync for RwLock where T: ?Sized + Send + Sync {} -// NB: These impls need to be explicit since we're storing a raw pointer. -// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over -// `T` is `Send`. -unsafe impl Send for RwLockReadGuard<'_, T> where T: ?Sized + Sync {} -unsafe impl Sync for RwLockReadGuard<'_, T> where T: ?Sized + Send + Sync {} -// T is required to be `Send` because an OwnedRwLockReadGuard can be used to drop the value held in -// the RwLock, unlike RwLockReadGuard. -unsafe impl Send for OwnedRwLockReadGuard -where - T: ?Sized + Send + Sync, - U: ?Sized + Sync, -{ -} -unsafe impl Sync for OwnedRwLockReadGuard -where - T: ?Sized + Send + Sync, - U: ?Sized + Send + Sync, -{ -} -unsafe impl Sync for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {} -unsafe impl Sync for OwnedRwLockWriteGuard where T: ?Sized + Send + Sync {} -unsafe impl Sync for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {} -unsafe impl Sync for OwnedRwLockMappedWriteGuard -where - T: ?Sized + Send + Sync, - U: ?Sized + Send + Sync, -{ -} -// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over -// `T` is `Send` - but since this is also provides mutable access, we need to -// make sure that `T` is `Send` since its value can be sent across thread -// boundaries. -unsafe impl Send for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {} -unsafe impl Send for OwnedRwLockWriteGuard where T: ?Sized + Send + Sync {} -unsafe impl Send for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {} -unsafe impl Send for OwnedRwLockMappedWriteGuard -where - T: ?Sized + Send + Sync, - U: ?Sized + Send + Sync, -{ -} - -impl RwLock { - /// Creates a new instance of an `RwLock` which is unlocked. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::RwLock; - /// - /// let lock = RwLock::new(5); - /// ``` - #[track_caller] - pub fn new(value: T) -> RwLock - where - T: Sized, - { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = { - let location = std::panic::Location::caller(); - let resource_span = tracing::trace_span!( - "runtime.resource", - concrete_type = "RwLock", - kind = "Sync", - loc.file = location.file(), - loc.line = location.line(), - loc.col = location.column(), - ); - - resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - max_readers = MAX_READS, - ); - - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - ); - - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 0, - ); - }); - - resource_span - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let s = resource_span.in_scope(|| Semaphore::new(MAX_READS as usize)); - - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - let s = Semaphore::new(MAX_READS as usize); - - RwLock { - mr: MAX_READS, - c: UnsafeCell::new(value), - s, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } - } - - /// Creates a new instance of an `RwLock` which is unlocked - /// and allows a maximum of `max_reads` concurrent readers. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::RwLock; - /// - /// let lock = RwLock::with_max_readers(5, 1024); - /// ``` - /// - /// # Panics - /// - /// Panics if `max_reads` is more than `u32::MAX >> 3`. - #[track_caller] - pub fn with_max_readers(value: T, max_reads: u32) -> RwLock - where - T: Sized, - { - assert!( - max_reads <= MAX_READS, - "a RwLock may not be created with more than {} readers", - MAX_READS - ); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = { - let location = std::panic::Location::caller(); - - let resource_span = tracing::trace_span!( - "runtime.resource", - concrete_type = "RwLock", - kind = "Sync", - loc.file = location.file(), - loc.line = location.line(), - loc.col = location.column(), - ); - - resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - max_readers = max_reads, - ); - - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = false, - ); - - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 0, - ); - }); - - resource_span - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let s = resource_span.in_scope(|| Semaphore::new(max_reads as usize)); - - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - let s = Semaphore::new(max_reads as usize); - - RwLock { - mr: max_reads, - c: UnsafeCell::new(value), - s, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } - } - - /// Creates a new instance of an `RwLock` which is unlocked. - /// - /// When using the `tracing` [unstable feature], a `RwLock` created with - /// `const_new` will not be instrumented. As such, it will not be visible - /// in [`tokio-console`]. Instead, [`RwLock::new`] should be used to create - /// an instrumented object if that is needed. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::RwLock; - /// - /// static LOCK: RwLock = RwLock::const_new(5); - /// ``` - /// - /// [`tokio-console`]: https://github.com/tokio-rs/console - /// [unstable feature]: crate#unstable-features - #[cfg(not(all(loom, test)))] - pub const fn const_new(value: T) -> RwLock - where - T: Sized, - { - RwLock { - mr: MAX_READS, - c: UnsafeCell::new(value), - s: Semaphore::const_new(MAX_READS as usize), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span::none(), - } - } - - /// Creates a new instance of an `RwLock` which is unlocked - /// and allows a maximum of `max_reads` concurrent readers. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::RwLock; - /// - /// static LOCK: RwLock = RwLock::const_with_max_readers(5, 1024); - /// ``` - #[cfg(not(all(loom, test)))] - pub const fn const_with_max_readers(value: T, max_reads: u32) -> RwLock - where - T: Sized, - { - assert!(max_reads <= MAX_READS); - - RwLock { - mr: max_reads, - c: UnsafeCell::new(value), - s: Semaphore::const_new(max_reads as usize), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span::none(), - } - } - - /// Locks this `RwLock` with shared read access, causing the current task - /// to yield until the lock has been acquired. - /// - /// The calling task will yield until there are no writers which hold the - /// lock. There may be other readers inside the lock when the task resumes. - /// - /// Note that under the priority policy of [`RwLock`], read locks are not - /// granted until prior write locks, to prevent starvation. Therefore - /// deadlock may occur if a read lock is held by the current task, a write - /// lock attempt is made, and then a subsequent read lock attempt is made - /// by the current task. - /// - /// Returns an RAII guard which will drop this read access of the `RwLock` - /// when dropped. - /// - /// # Cancel safety - /// - /// This method uses a queue to fairly distribute locks in the order they - /// were requested. Cancelling a call to `read` makes you lose your place in - /// the queue. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::RwLock; - /// - /// #[tokio::main] - /// async fn main() { - /// let lock = Arc::new(RwLock::new(1)); - /// let c_lock = lock.clone(); - /// - /// let n = lock.read().await; - /// assert_eq!(*n, 1); - /// - /// tokio::spawn(async move { - /// // While main has an active read lock, we acquire one too. - /// let r = c_lock.read().await; - /// assert_eq!(*r, 1); - /// }).await.expect("The spawned task has panicked"); - /// - /// // Drop the guard after the spawned task finishes. - /// drop(n); - /// } - /// ``` - pub async fn read(&self) -> RwLockReadGuard<'_, T> { - let acquire_fut = async { - self.s.acquire(1).await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and we have a - // handle to it through the Arc, which means that this can never happen. - unreachable!() - }); - - RwLockReadGuard { - s: &self.s, - data: self.c.get(), - marker: PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - } - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let acquire_fut = trace::async_op( - move || acquire_fut, - self.resource_span.clone(), - "RwLock::read", - "poll", - false, - ); - - #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing - let guard = acquire_fut.await; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "add", - ) - }); - - guard - } - - /// Blockingly locks this `RwLock` with shared read access. - /// - /// This method is intended for use cases where you - /// need to use this rwlock in asynchronous code as well as in synchronous code. - /// - /// Returns an RAII guard which will drop the read access of this `RwLock` when dropped. - /// - /// # Panics - /// - /// This function panics if called within an asynchronous execution context. - /// - /// - If you find yourself in an asynchronous execution context and needing - /// to call some (synchronous) function which performs one of these - /// `blocking_` operations, then consider wrapping that call inside - /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking] - /// (or [`block_in_place()`][crate::task::block_in_place]). - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::RwLock; - /// - /// #[tokio::main] - /// async fn main() { - /// let rwlock = Arc::new(RwLock::new(1)); - /// let mut write_lock = rwlock.write().await; - /// - /// let blocking_task = tokio::task::spawn_blocking({ - /// let rwlock = Arc::clone(&rwlock); - /// move || { - /// // This shall block until the `write_lock` is released. - /// let read_lock = rwlock.blocking_read(); - /// assert_eq!(*read_lock, 0); - /// } - /// }); - /// - /// *write_lock -= 1; - /// drop(write_lock); // release the lock. - /// - /// // Await the completion of the blocking task. - /// blocking_task.await.unwrap(); - /// - /// // Assert uncontended. - /// assert!(rwlock.try_write().is_ok()); - /// } - /// ``` - #[track_caller] - #[cfg(feature = "sync")] - pub fn blocking_read(&self) -> RwLockReadGuard<'_, T> { - crate::future::block_on(self.read()) - } - - /// Locks this `RwLock` with shared read access, causing the current task - /// to yield until the lock has been acquired. - /// - /// The calling task will yield until there are no writers which hold the - /// lock. There may be other readers inside the lock when the task resumes. - /// - /// This method is identical to [`RwLock::read`], except that the returned - /// guard references the `RwLock` with an [`Arc`] rather than by borrowing - /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this - /// method, and the guard will live for the `'static` lifetime, as it keeps - /// the `RwLock` alive by holding an `Arc`. - /// - /// Note that under the priority policy of [`RwLock`], read locks are not - /// granted until prior write locks, to prevent starvation. Therefore - /// deadlock may occur if a read lock is held by the current task, a write - /// lock attempt is made, and then a subsequent read lock attempt is made - /// by the current task. - /// - /// Returns an RAII guard which will drop this read access of the `RwLock` - /// when dropped. - /// - /// # Cancel safety - /// - /// This method uses a queue to fairly distribute locks in the order they - /// were requested. Cancelling a call to `read_owned` makes you lose your - /// place in the queue. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::RwLock; - /// - /// #[tokio::main] - /// async fn main() { - /// let lock = Arc::new(RwLock::new(1)); - /// let c_lock = lock.clone(); - /// - /// let n = lock.read_owned().await; - /// assert_eq!(*n, 1); - /// - /// tokio::spawn(async move { - /// // While main has an active read lock, we acquire one too. - /// let r = c_lock.read_owned().await; - /// assert_eq!(*r, 1); - /// }).await.expect("The spawned task has panicked"); - /// - /// // Drop the guard after the spawned task finishes. - /// drop(n); - ///} - /// ``` - pub async fn read_owned(self: Arc) -> OwnedRwLockReadGuard { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - - let acquire_fut = async { - self.s.acquire(1).await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and we have a - // handle to it through the Arc, which means that this can never happen. - unreachable!() - }); - - OwnedRwLockReadGuard { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - data: self.c.get(), - lock: self, - _p: PhantomData, - } - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let acquire_fut = trace::async_op( - move || acquire_fut, - resource_span, - "RwLock::read_owned", - "poll", - false, - ); - - #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing - let guard = acquire_fut.await; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "add", - ) - }); - - guard - } - - /// Attempts to acquire this `RwLock` with shared read access. - /// - /// If the access couldn't be acquired immediately, returns [`TryLockError`]. - /// Otherwise, an RAII guard is returned which will release read access - /// when dropped. - /// - /// [`TryLockError`]: TryLockError - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::RwLock; - /// - /// #[tokio::main] - /// async fn main() { - /// let lock = Arc::new(RwLock::new(1)); - /// let c_lock = lock.clone(); - /// - /// let v = lock.try_read().unwrap(); - /// assert_eq!(*v, 1); - /// - /// tokio::spawn(async move { - /// // While main has an active read lock, we acquire one too. - /// let n = c_lock.read().await; - /// assert_eq!(*n, 1); - /// }).await.expect("The spawned task has panicked"); - /// - /// // Drop the guard when spawned task finishes. - /// drop(v); - /// } - /// ``` - pub fn try_read(&self) -> Result, TryLockError> { - match self.s.try_acquire(1) { - Ok(permit) => permit, - Err(TryAcquireError::NoPermits) => return Err(TryLockError(())), - Err(TryAcquireError::Closed) => unreachable!(), - } - - let guard = RwLockReadGuard { - s: &self.s, - data: self.c.get(), - marker: marker::PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "add", - ) - }); - - Ok(guard) - } - - /// Attempts to acquire this `RwLock` with shared read access. - /// - /// If the access couldn't be acquired immediately, returns [`TryLockError`]. - /// Otherwise, an RAII guard is returned which will release read access - /// when dropped. - /// - /// This method is identical to [`RwLock::try_read`], except that the - /// returned guard references the `RwLock` with an [`Arc`] rather than by - /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to - /// call this method, and the guard will live for the `'static` lifetime, - /// as it keeps the `RwLock` alive by holding an `Arc`. - /// - /// [`TryLockError`]: TryLockError - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::RwLock; - /// - /// #[tokio::main] - /// async fn main() { - /// let lock = Arc::new(RwLock::new(1)); - /// let c_lock = lock.clone(); - /// - /// let v = lock.try_read_owned().unwrap(); - /// assert_eq!(*v, 1); - /// - /// tokio::spawn(async move { - /// // While main has an active read lock, we acquire one too. - /// let n = c_lock.read_owned().await; - /// assert_eq!(*n, 1); - /// }).await.expect("The spawned task has panicked"); - /// - /// // Drop the guard when spawned task finishes. - /// drop(v); - /// } - /// ``` - pub fn try_read_owned(self: Arc) -> Result, TryLockError> { - match self.s.try_acquire(1) { - Ok(permit) => permit, - Err(TryAcquireError::NoPermits) => return Err(TryLockError(())), - Err(TryAcquireError::Closed) => unreachable!(), - } - - let guard = OwnedRwLockReadGuard { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - data: self.c.get(), - lock: self, - _p: PhantomData, - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - current_readers = 1, - current_readers.op = "add", - ) - }); - - Ok(guard) - } - - /// Locks this `RwLock` with exclusive write access, causing the current - /// task to yield until the lock has been acquired. - /// - /// The calling task will yield while other writers or readers currently - /// have access to the lock. - /// - /// Returns an RAII guard which will drop the write access of this `RwLock` - /// when dropped. - /// - /// # Cancel safety - /// - /// This method uses a queue to fairly distribute locks in the order they - /// were requested. Cancelling a call to `write` makes you lose your place - /// in the queue. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::RwLock; - /// - /// #[tokio::main] - /// async fn main() { - /// let lock = RwLock::new(1); - /// - /// let mut n = lock.write().await; - /// *n = 2; - ///} - /// ``` - pub async fn write(&self) -> RwLockWriteGuard<'_, T> { - let acquire_fut = async { - self.s.acquire(self.mr).await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and we have a - // handle to it through the Arc, which means that this can never happen. - unreachable!() - }); - - RwLockWriteGuard { - permits_acquired: self.mr, - s: &self.s, - data: self.c.get(), - marker: marker::PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - } - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let acquire_fut = trace::async_op( - move || acquire_fut, - self.resource_span.clone(), - "RwLock::write", - "poll", - false, - ); - - #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing - let guard = acquire_fut.await; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = true, - write_locked.op = "override", - ) - }); - - guard - } - - /// Blockingly locks this `RwLock` with exclusive write access. - /// - /// This method is intended for use cases where you - /// need to use this rwlock in asynchronous code as well as in synchronous code. - /// - /// Returns an RAII guard which will drop the write access of this `RwLock` when dropped. - /// - /// # Panics - /// - /// This function panics if called within an asynchronous execution context. - /// - /// - If you find yourself in an asynchronous execution context and needing - /// to call some (synchronous) function which performs one of these - /// `blocking_` operations, then consider wrapping that call inside - /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking] - /// (or [`block_in_place()`][crate::task::block_in_place]). - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::{sync::RwLock}; - /// - /// #[tokio::main] - /// async fn main() { - /// let rwlock = Arc::new(RwLock::new(1)); - /// let read_lock = rwlock.read().await; - /// - /// let blocking_task = tokio::task::spawn_blocking({ - /// let rwlock = Arc::clone(&rwlock); - /// move || { - /// // This shall block until the `read_lock` is released. - /// let mut write_lock = rwlock.blocking_write(); - /// *write_lock = 2; - /// } - /// }); - /// - /// assert_eq!(*read_lock, 1); - /// // Release the last outstanding read lock. - /// drop(read_lock); - /// - /// // Await the completion of the blocking task. - /// blocking_task.await.unwrap(); - /// - /// // Assert uncontended. - /// let read_lock = rwlock.try_read().unwrap(); - /// assert_eq!(*read_lock, 2); - /// } - /// ``` - #[track_caller] - #[cfg(feature = "sync")] - pub fn blocking_write(&self) -> RwLockWriteGuard<'_, T> { - crate::future::block_on(self.write()) - } - - /// Locks this `RwLock` with exclusive write access, causing the current - /// task to yield until the lock has been acquired. - /// - /// The calling task will yield while other writers or readers currently - /// have access to the lock. - /// - /// This method is identical to [`RwLock::write`], except that the returned - /// guard references the `RwLock` with an [`Arc`] rather than by borrowing - /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this - /// method, and the guard will live for the `'static` lifetime, as it keeps - /// the `RwLock` alive by holding an `Arc`. - /// - /// Returns an RAII guard which will drop the write access of this `RwLock` - /// when dropped. - /// - /// # Cancel safety - /// - /// This method uses a queue to fairly distribute locks in the order they - /// were requested. Cancelling a call to `write_owned` makes you lose your - /// place in the queue. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::RwLock; - /// - /// #[tokio::main] - /// async fn main() { - /// let lock = Arc::new(RwLock::new(1)); - /// - /// let mut n = lock.write_owned().await; - /// *n = 2; - ///} - /// ``` - pub async fn write_owned(self: Arc) -> OwnedRwLockWriteGuard { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - - let acquire_fut = async { - self.s.acquire(self.mr).await.unwrap_or_else(|_| { - // The semaphore was closed. but, we never explicitly close it, and we have a - // handle to it through the Arc, which means that this can never happen. - unreachable!() - }); - - OwnedRwLockWriteGuard { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - permits_acquired: self.mr, - data: self.c.get(), - lock: self, - _p: PhantomData, - } - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let acquire_fut = trace::async_op( - move || acquire_fut, - resource_span, - "RwLock::write_owned", - "poll", - false, - ); - - #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing - let guard = acquire_fut.await; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = true, - write_locked.op = "override", - ) - }); - - guard - } - - /// Attempts to acquire this `RwLock` with exclusive write access. - /// - /// If the access couldn't be acquired immediately, returns [`TryLockError`]. - /// Otherwise, an RAII guard is returned which will release write access - /// when dropped. - /// - /// [`TryLockError`]: TryLockError - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::RwLock; - /// - /// #[tokio::main] - /// async fn main() { - /// let rw = RwLock::new(1); - /// - /// let v = rw.read().await; - /// assert_eq!(*v, 1); - /// - /// assert!(rw.try_write().is_err()); - /// } - /// ``` - pub fn try_write(&self) -> Result, TryLockError> { - match self.s.try_acquire(self.mr) { - Ok(permit) => permit, - Err(TryAcquireError::NoPermits) => return Err(TryLockError(())), - Err(TryAcquireError::Closed) => unreachable!(), - } - - let guard = RwLockWriteGuard { - permits_acquired: self.mr, - s: &self.s, - data: self.c.get(), - marker: marker::PhantomData, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - self.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = true, - write_locked.op = "override", - ) - }); - - Ok(guard) - } - - /// Attempts to acquire this `RwLock` with exclusive write access. - /// - /// If the access couldn't be acquired immediately, returns [`TryLockError`]. - /// Otherwise, an RAII guard is returned which will release write access - /// when dropped. - /// - /// This method is identical to [`RwLock::try_write`], except that the - /// returned guard references the `RwLock` with an [`Arc`] rather than by - /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to - /// call this method, and the guard will live for the `'static` lifetime, - /// as it keeps the `RwLock` alive by holding an `Arc`. - /// - /// [`TryLockError`]: TryLockError - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::RwLock; - /// - /// #[tokio::main] - /// async fn main() { - /// let rw = Arc::new(RwLock::new(1)); - /// - /// let v = Arc::clone(&rw).read_owned().await; - /// assert_eq!(*v, 1); - /// - /// assert!(rw.try_write_owned().is_err()); - /// } - /// ``` - pub fn try_write_owned(self: Arc) -> Result, TryLockError> { - match self.s.try_acquire(self.mr) { - Ok(permit) => permit, - Err(TryAcquireError::NoPermits) => return Err(TryLockError(())), - Err(TryAcquireError::Closed) => unreachable!(), - } - - let guard = OwnedRwLockWriteGuard { - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: self.resource_span.clone(), - permits_acquired: self.mr, - data: self.c.get(), - lock: self, - _p: PhantomData, - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - guard.resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - write_locked = true, - write_locked.op = "override", - ) - }); - - Ok(guard) - } - - /// Returns a mutable reference to the underlying data. - /// - /// Since this call borrows the `RwLock` mutably, no actual locking needs to - /// take place -- the mutable borrow statically guarantees no locks exist. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::RwLock; - /// - /// fn main() { - /// let mut lock = RwLock::new(1); - /// - /// let n = lock.get_mut(); - /// *n = 2; - /// } - /// ``` - pub fn get_mut(&mut self) -> &mut T { - unsafe { - // Safety: This is https://github.com/rust-lang/rust/pull/76936 - &mut *self.c.get() - } - } - - /// Consumes the lock, returning the underlying data. - pub fn into_inner(self) -> T - where - T: Sized, - { - self.c.into_inner() - } -} - -impl From for RwLock { - fn from(s: T) -> Self { - Self::new(s) - } -} - -impl Default for RwLock -where - T: Default, -{ - fn default() -> Self { - Self::new(T::default()) - } -} - -impl std::fmt::Debug for RwLock -where - T: std::fmt::Debug, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut d = f.debug_struct("RwLock"); - match self.try_read() { - Ok(inner) => d.field("data", &&*inner), - Err(_) => d.field("data", &format_args!("")), - }; - d.finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/semaphore.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/semaphore.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/semaphore.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/semaphore.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,976 +0,0 @@ -use super::batch_semaphore as ll; // low level implementation -use super::{AcquireError, TryAcquireError}; -#[cfg(all(tokio_unstable, feature = "tracing"))] -use crate::util::trace; -use std::sync::Arc; - -/// Counting semaphore performing asynchronous permit acquisition. -/// -/// A semaphore maintains a set of permits. Permits are used to synchronize -/// access to a shared resource. A semaphore differs from a mutex in that it -/// can allow more than one concurrent caller to access the shared resource at a -/// time. -/// -/// When `acquire` is called and the semaphore has remaining permits, the -/// function immediately returns a permit. However, if no remaining permits are -/// available, `acquire` (asynchronously) waits until an outstanding permit is -/// dropped. At this point, the freed permit is assigned to the caller. -/// -/// This `Semaphore` is fair, which means that permits are given out in the order -/// they were requested. This fairness is also applied when `acquire_many` gets -/// involved, so if a call to `acquire_many` at the front of the queue requests -/// more permits than currently available, this can prevent a call to `acquire` -/// from completing, even if the semaphore has enough permits complete the call -/// to `acquire`. -/// -/// To use the `Semaphore` in a poll function, you can use the [`PollSemaphore`] -/// utility. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use tokio::sync::{Semaphore, TryAcquireError}; -/// -/// #[tokio::main] -/// async fn main() { -/// let semaphore = Semaphore::new(3); -/// -/// let a_permit = semaphore.acquire().await.unwrap(); -/// let two_permits = semaphore.acquire_many(2).await.unwrap(); -/// -/// assert_eq!(semaphore.available_permits(), 0); -/// -/// let permit_attempt = semaphore.try_acquire(); -/// assert_eq!(permit_attempt.err(), Some(TryAcquireError::NoPermits)); -/// } -/// ``` -/// -/// ## Limit the number of simultaneously opened files in your program -/// -/// Most operating systems have limits on the number of open file -/// handles. Even in systems without explicit limits, resource constraints -/// implicitly set an upper bound on the number of open files. If your -/// program attempts to open a large number of files and exceeds this -/// limit, it will result in an error. -/// -/// This example uses a Semaphore with 100 permits. By acquiring a permit from -/// the Semaphore before accessing a file, you ensure that your program opens -/// no more than 100 files at a time. When trying to open the 101st -/// file, the program will wait until a permit becomes available before -/// proceeding to open another file. -/// ``` -/// use std::io::Result; -/// use tokio::fs::File; -/// use tokio::sync::Semaphore; -/// use tokio::io::AsyncWriteExt; -/// -/// static PERMITS: Semaphore = Semaphore::const_new(100); -/// -/// async fn write_to_file(message: &[u8]) -> Result<()> { -/// let _permit = PERMITS.acquire().await.unwrap(); -/// let mut buffer = File::create("example.txt").await?; -/// buffer.write_all(message).await?; -/// Ok(()) // Permit goes out of scope here, and is available again for acquisition -/// } -/// ``` -/// -/// ## Limit the number of incoming requests being handled at the same time -/// -/// Similar to limiting the number of simultaneously opened files, network handles -/// are a limited resource. Allowing an unbounded amount of requests to be processed -/// could result in a denial-of-service, among many other issues. -/// -/// This example uses an `Arc` instead of a global variable. -/// To limit the number of requests that can be processed at the time, -/// we acquire a permit for each task before spawning it. Once acquired, -/// a new task is spawned; and once finished, the permit is dropped inside -/// of the task to allow others to spawn. Permits must be acquired via -/// [`Semaphore::acquire_owned`] to be movable across the task boundary. -/// (Since our semaphore is not a global variable — if it was, then `acquire` would be enough.) -/// -/// ```no_run -/// use std::sync::Arc; -/// use tokio::sync::Semaphore; -/// use tokio::net::TcpListener; -/// -/// #[tokio::main] -/// async fn main() -> std::io::Result<()> { -/// let semaphore = Arc::new(Semaphore::new(3)); -/// let listener = TcpListener::bind("127.0.0.1:8080").await?; -/// -/// loop { -/// // Acquire permit before accepting the next socket. -/// // -/// // We use `acquire_owned` so that we can move `permit` into -/// // other tasks. -/// let permit = semaphore.clone().acquire_owned().await.unwrap(); -/// let (mut socket, _) = listener.accept().await?; -/// -/// tokio::spawn(async move { -/// // Do work using the socket. -/// handle_connection(&mut socket).await; -/// // Drop socket while the permit is still live. -/// drop(socket); -/// // Drop the permit, so more tasks can be created. -/// drop(permit); -/// }); -/// } -/// } -/// # async fn handle_connection(_socket: &mut tokio::net::TcpStream) { -/// # // Do work -/// # } -/// ``` -/// -/// ## Prevent tests from running in parallel -/// -/// By default, Rust runs tests in the same file in parallel. However, in some -/// cases, running two tests in parallel may lead to problems. For example, this -/// can happen when tests use the same database. -/// -/// Consider the following scenario: -/// 1. `test_insert`: Inserts a key-value pair into the database, then retrieves -/// the value using the same key to verify the insertion. -/// 2. `test_update`: Inserts a key, then updates the key to a new value and -/// verifies that the value has been accurately updated. -/// 3. `test_others`: A third test that doesn't modify the database state. It -/// can run in parallel with the other tests. -/// -/// In this example, `test_insert` and `test_update` need to run in sequence to -/// work, but it doesn't matter which test runs first. We can leverage a -/// semaphore with a single permit to address this challenge. -/// -/// ``` -/// # use tokio::sync::Mutex; -/// # use std::collections::BTreeMap; -/// # struct Database { -/// # map: Mutex>, -/// # } -/// # impl Database { -/// # pub const fn setup() -> Database { -/// # Database { -/// # map: Mutex::const_new(BTreeMap::new()), -/// # } -/// # } -/// # pub async fn insert(&self, key: &str, value: i32) { -/// # self.map.lock().await.insert(key.to_string(), value); -/// # } -/// # pub async fn update(&self, key: &str, value: i32) { -/// # self.map.lock().await -/// # .entry(key.to_string()) -/// # .and_modify(|origin| *origin = value); -/// # } -/// # pub async fn delete(&self, key: &str) { -/// # self.map.lock().await.remove(key); -/// # } -/// # pub async fn get(&self, key: &str) -> i32 { -/// # *self.map.lock().await.get(key).unwrap() -/// # } -/// # } -/// use tokio::sync::Semaphore; -/// -/// // Initialize a static semaphore with only one permit, which is used to -/// // prevent test_insert and test_update from running in parallel. -/// static PERMIT: Semaphore = Semaphore::const_new(1); -/// -/// // Initialize the database that will be used by the subsequent tests. -/// static DB: Database = Database::setup(); -/// -/// #[tokio::test] -/// # async fn fake_test_insert() {} -/// async fn test_insert() { -/// // Acquire permit before proceeding. Since the semaphore has only one permit, -/// // the test will wait if the permit is already acquired by other tests. -/// let permit = PERMIT.acquire().await.unwrap(); -/// -/// // Do the actual test stuff with database -/// -/// // Insert a key-value pair to database -/// let (key, value) = ("name", 0); -/// DB.insert(key, value).await; -/// -/// // Verify that the value has been inserted correctly. -/// assert_eq!(DB.get(key).await, value); -/// -/// // Undo the insertion, so the database is empty at the end of the test. -/// DB.delete(key).await; -/// -/// // Drop permit. This allows the other test to start running. -/// drop(permit); -/// } -/// -/// #[tokio::test] -/// # async fn fake_test_update() {} -/// async fn test_update() { -/// // Acquire permit before proceeding. Since the semaphore has only one permit, -/// // the test will wait if the permit is already acquired by other tests. -/// let permit = PERMIT.acquire().await.unwrap(); -/// -/// // Do the same insert. -/// let (key, value) = ("name", 0); -/// DB.insert(key, value).await; -/// -/// // Update the existing value with a new one. -/// let new_value = 1; -/// DB.update(key, new_value).await; -/// -/// // Verify that the value has been updated correctly. -/// assert_eq!(DB.get(key).await, new_value); -/// -/// // Undo any modificattion. -/// DB.delete(key).await; -/// -/// // Drop permit. This allows the other test to start running. -/// drop(permit); -/// } -/// -/// #[tokio::test] -/// # async fn fake_test_others() {} -/// async fn test_others() { -/// // This test can run in parallel with test_insert and test_update, -/// // so it does not use PERMIT. -/// } -/// # #[tokio::main(flavor = "current_thread")] -/// # async fn main() { -/// # test_insert().await; -/// # test_update().await; -/// # test_others().await; -/// # } -/// ``` -/// -/// ## Rate limiting using a token bucket -/// -/// This example showcases the [`add_permits`] and [`SemaphorePermit::forget`] methods. -/// -/// Many applications and systems have constraints on the rate at which certain -/// operations should occur. Exceeding this rate can result in suboptimal -/// performance or even errors. -/// -/// This example implements rate limiting using a [token bucket]. A token bucket is a form of rate -/// limiting that doesn't kick in immediately, to allow for short bursts of incoming requests that -/// arrive at the same time. -/// -/// With a token bucket, each incoming request consumes a token, and the tokens are refilled at a -/// certain rate that defines the rate limit. When a burst of requests arrives, tokens are -/// immediately given out until the bucket is empty. Once the bucket is empty, requests will have to -/// wait for new tokens to be added. -/// -/// Unlike the example that limits how many requests can be handled at the same time, we do not add -/// tokens back when we finish handling a request. Instead, tokens are added only by a timer task. -/// -/// Note that this implementation is suboptimal when the duration is small, because it consumes a -/// lot of cpu constantly looping and sleeping. -/// -/// [token bucket]: https://en.wikipedia.org/wiki/Token_bucket -/// [`add_permits`]: crate::sync::Semaphore::add_permits -/// [`SemaphorePermit::forget`]: crate::sync::SemaphorePermit::forget -/// ``` -/// use std::sync::Arc; -/// use tokio::sync::Semaphore; -/// use tokio::time::{interval, Duration}; -/// -/// struct TokenBucket { -/// sem: Arc, -/// jh: tokio::task::JoinHandle<()>, -/// } -/// -/// impl TokenBucket { -/// fn new(duration: Duration, capacity: usize) -> Self { -/// let sem = Arc::new(Semaphore::new(capacity)); -/// -/// // refills the tokens at the end of each interval -/// let jh = tokio::spawn({ -/// let sem = sem.clone(); -/// let mut interval = interval(duration); -/// interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); -/// -/// async move { -/// loop { -/// interval.tick().await; -/// -/// if sem.available_permits() < capacity { -/// sem.add_permits(1); -/// } -/// } -/// } -/// }); -/// -/// Self { jh, sem } -/// } -/// -/// async fn acquire(&self) { -/// // This can return an error if the semaphore is closed, but we -/// // never close it, so this error can never happen. -/// let permit = self.sem.acquire().await.unwrap(); -/// // To avoid releasing the permit back to the semaphore, we use -/// // the `SemaphorePermit::forget` method. -/// permit.forget(); -/// } -/// } -/// -/// impl Drop for TokenBucket { -/// fn drop(&mut self) { -/// // Kill the background task so it stops taking up resources when we -/// // don't need it anymore. -/// self.jh.abort(); -/// } -/// } -/// -/// #[tokio::main] -/// # async fn _hidden() {} -/// # #[tokio::main(flavor = "current_thread", start_paused = true)] -/// async fn main() { -/// let capacity = 5; -/// let update_interval = Duration::from_secs_f32(1.0 / capacity as f32); -/// let bucket = TokenBucket::new(update_interval, capacity); -/// -/// for _ in 0..5 { -/// bucket.acquire().await; -/// -/// // do the operation -/// } -/// } -/// ``` -/// -/// [`PollSemaphore`]: https://docs.rs/tokio-util/latest/tokio_util/sync/struct.PollSemaphore.html -/// [`Semaphore::acquire_owned`]: crate::sync::Semaphore::acquire_owned -#[derive(Debug)] -pub struct Semaphore { - /// The low level semaphore - ll_sem: ll::Semaphore, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, -} - -/// A permit from the semaphore. -/// -/// This type is created by the [`acquire`] method. -/// -/// [`acquire`]: crate::sync::Semaphore::acquire() -#[must_use] -#[clippy::has_significant_drop] -#[derive(Debug)] -pub struct SemaphorePermit<'a> { - sem: &'a Semaphore, - permits: u32, -} - -/// An owned permit from the semaphore. -/// -/// This type is created by the [`acquire_owned`] method. -/// -/// [`acquire_owned`]: crate::sync::Semaphore::acquire_owned() -#[must_use] -#[clippy::has_significant_drop] -#[derive(Debug)] -pub struct OwnedSemaphorePermit { - sem: Arc, - permits: u32, -} - -#[test] -#[cfg(not(loom))] -fn bounds() { - fn check_unpin() {} - // This has to take a value, since the async fn's return type is unnameable. - fn check_send_sync_val(_t: T) {} - fn check_send_sync() {} - check_unpin::(); - check_unpin::>(); - check_send_sync::(); - - let semaphore = Semaphore::new(0); - check_send_sync_val(semaphore.acquire()); -} - -impl Semaphore { - /// The maximum number of permits which a semaphore can hold. It is `usize::MAX >> 3`. - /// - /// Exceeding this limit typically results in a panic. - pub const MAX_PERMITS: usize = super::batch_semaphore::Semaphore::MAX_PERMITS; - - /// Creates a new semaphore with the initial number of permits. - /// - /// Panics if `permits` exceeds [`Semaphore::MAX_PERMITS`]. - #[track_caller] - pub fn new(permits: usize) -> Self { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = { - let location = std::panic::Location::caller(); - - tracing::trace_span!( - "runtime.resource", - concrete_type = "Semaphore", - kind = "Sync", - loc.file = location.file(), - loc.line = location.line(), - loc.col = location.column(), - inherits_child_attrs = true, - ) - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let ll_sem = resource_span.in_scope(|| ll::Semaphore::new(permits)); - - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - let ll_sem = ll::Semaphore::new(permits); - - Self { - ll_sem, - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } - } - - /// Creates a new semaphore with the initial number of permits. - /// - /// When using the `tracing` [unstable feature], a `Semaphore` created with - /// `const_new` will not be instrumented. As such, it will not be visible - /// in [`tokio-console`]. Instead, [`Semaphore::new`] should be used to - /// create an instrumented object if that is needed. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Semaphore; - /// - /// static SEM: Semaphore = Semaphore::const_new(10); - /// ``` - /// - /// [`tokio-console`]: https://github.com/tokio-rs/console - /// [unstable feature]: crate#unstable-features - #[cfg(not(all(loom, test)))] - pub const fn const_new(permits: usize) -> Self { - Self { - ll_sem: ll::Semaphore::const_new(permits), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span::none(), - } - } - - /// Creates a new closed semaphore with 0 permits. - pub(crate) fn new_closed() -> Self { - Self { - ll_sem: ll::Semaphore::new_closed(), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span::none(), - } - } - - /// Creates a new closed semaphore with 0 permits. - #[cfg(not(all(loom, test)))] - pub(crate) const fn const_new_closed() -> Self { - Self { - ll_sem: ll::Semaphore::const_new_closed(), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span::none(), - } - } - - /// Returns the current number of available permits. - pub fn available_permits(&self) -> usize { - self.ll_sem.available_permits() - } - - /// Adds `n` new permits to the semaphore. - /// - /// The maximum number of permits is [`Semaphore::MAX_PERMITS`], and this function will panic if the limit is exceeded. - pub fn add_permits(&self, n: usize) { - self.ll_sem.release(n); - } - - /// Acquires a permit from the semaphore. - /// - /// If the semaphore has been closed, this returns an [`AcquireError`]. - /// Otherwise, this returns a [`SemaphorePermit`] representing the - /// acquired permit. - /// - /// # Cancel safety - /// - /// This method uses a queue to fairly distribute permits in the order they - /// were requested. Cancelling a call to `acquire` makes you lose your place - /// in the queue. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Semaphore; - /// - /// #[tokio::main] - /// async fn main() { - /// let semaphore = Semaphore::new(2); - /// - /// let permit_1 = semaphore.acquire().await.unwrap(); - /// assert_eq!(semaphore.available_permits(), 1); - /// - /// let permit_2 = semaphore.acquire().await.unwrap(); - /// assert_eq!(semaphore.available_permits(), 0); - /// - /// drop(permit_1); - /// assert_eq!(semaphore.available_permits(), 1); - /// } - /// ``` - /// - /// [`AcquireError`]: crate::sync::AcquireError - /// [`SemaphorePermit`]: crate::sync::SemaphorePermit - pub async fn acquire(&self) -> Result, AcquireError> { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let inner = trace::async_op( - || self.ll_sem.acquire(1), - self.resource_span.clone(), - "Semaphore::acquire", - "poll", - true, - ); - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let inner = self.ll_sem.acquire(1); - - inner.await?; - Ok(SemaphorePermit { - sem: self, - permits: 1, - }) - } - - /// Acquires `n` permits from the semaphore. - /// - /// If the semaphore has been closed, this returns an [`AcquireError`]. - /// Otherwise, this returns a [`SemaphorePermit`] representing the - /// acquired permits. - /// - /// # Cancel safety - /// - /// This method uses a queue to fairly distribute permits in the order they - /// were requested. Cancelling a call to `acquire_many` makes you lose your - /// place in the queue. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Semaphore; - /// - /// #[tokio::main] - /// async fn main() { - /// let semaphore = Semaphore::new(5); - /// - /// let permit = semaphore.acquire_many(3).await.unwrap(); - /// assert_eq!(semaphore.available_permits(), 2); - /// } - /// ``` - /// - /// [`AcquireError`]: crate::sync::AcquireError - /// [`SemaphorePermit`]: crate::sync::SemaphorePermit - pub async fn acquire_many(&self, n: u32) -> Result, AcquireError> { - #[cfg(all(tokio_unstable, feature = "tracing"))] - trace::async_op( - || self.ll_sem.acquire(n), - self.resource_span.clone(), - "Semaphore::acquire_many", - "poll", - true, - ) - .await?; - - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - self.ll_sem.acquire(n).await?; - - Ok(SemaphorePermit { - sem: self, - permits: n, - }) - } - - /// Tries to acquire a permit from the semaphore. - /// - /// If the semaphore has been closed, this returns a [`TryAcquireError::Closed`] - /// and a [`TryAcquireError::NoPermits`] if there are no permits left. Otherwise, - /// this returns a [`SemaphorePermit`] representing the acquired permits. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{Semaphore, TryAcquireError}; - /// - /// # fn main() { - /// let semaphore = Semaphore::new(2); - /// - /// let permit_1 = semaphore.try_acquire().unwrap(); - /// assert_eq!(semaphore.available_permits(), 1); - /// - /// let permit_2 = semaphore.try_acquire().unwrap(); - /// assert_eq!(semaphore.available_permits(), 0); - /// - /// let permit_3 = semaphore.try_acquire(); - /// assert_eq!(permit_3.err(), Some(TryAcquireError::NoPermits)); - /// # } - /// ``` - /// - /// [`TryAcquireError::Closed`]: crate::sync::TryAcquireError::Closed - /// [`TryAcquireError::NoPermits`]: crate::sync::TryAcquireError::NoPermits - /// [`SemaphorePermit`]: crate::sync::SemaphorePermit - pub fn try_acquire(&self) -> Result, TryAcquireError> { - match self.ll_sem.try_acquire(1) { - Ok(_) => Ok(SemaphorePermit { - sem: self, - permits: 1, - }), - Err(e) => Err(e), - } - } - - /// Tries to acquire `n` permits from the semaphore. - /// - /// If the semaphore has been closed, this returns a [`TryAcquireError::Closed`] - /// and a [`TryAcquireError::NoPermits`] if there are not enough permits left. - /// Otherwise, this returns a [`SemaphorePermit`] representing the acquired permits. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::{Semaphore, TryAcquireError}; - /// - /// # fn main() { - /// let semaphore = Semaphore::new(4); - /// - /// let permit_1 = semaphore.try_acquire_many(3).unwrap(); - /// assert_eq!(semaphore.available_permits(), 1); - /// - /// let permit_2 = semaphore.try_acquire_many(2); - /// assert_eq!(permit_2.err(), Some(TryAcquireError::NoPermits)); - /// # } - /// ``` - /// - /// [`TryAcquireError::Closed`]: crate::sync::TryAcquireError::Closed - /// [`TryAcquireError::NoPermits`]: crate::sync::TryAcquireError::NoPermits - /// [`SemaphorePermit`]: crate::sync::SemaphorePermit - pub fn try_acquire_many(&self, n: u32) -> Result, TryAcquireError> { - match self.ll_sem.try_acquire(n) { - Ok(_) => Ok(SemaphorePermit { - sem: self, - permits: n, - }), - Err(e) => Err(e), - } - } - - /// Acquires a permit from the semaphore. - /// - /// The semaphore must be wrapped in an [`Arc`] to call this method. - /// If the semaphore has been closed, this returns an [`AcquireError`]. - /// Otherwise, this returns a [`OwnedSemaphorePermit`] representing the - /// acquired permit. - /// - /// # Cancel safety - /// - /// This method uses a queue to fairly distribute permits in the order they - /// were requested. Cancelling a call to `acquire_owned` makes you lose your - /// place in the queue. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::Semaphore; - /// - /// #[tokio::main] - /// async fn main() { - /// let semaphore = Arc::new(Semaphore::new(3)); - /// let mut join_handles = Vec::new(); - /// - /// for _ in 0..5 { - /// let permit = semaphore.clone().acquire_owned().await.unwrap(); - /// join_handles.push(tokio::spawn(async move { - /// // perform task... - /// // explicitly own `permit` in the task - /// drop(permit); - /// })); - /// } - /// - /// for handle in join_handles { - /// handle.await.unwrap(); - /// } - /// } - /// ``` - /// - /// [`Arc`]: std::sync::Arc - /// [`AcquireError`]: crate::sync::AcquireError - /// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit - pub async fn acquire_owned(self: Arc) -> Result { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let inner = trace::async_op( - || self.ll_sem.acquire(1), - self.resource_span.clone(), - "Semaphore::acquire_owned", - "poll", - true, - ); - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let inner = self.ll_sem.acquire(1); - - inner.await?; - Ok(OwnedSemaphorePermit { - sem: self, - permits: 1, - }) - } - - /// Acquires `n` permits from the semaphore. - /// - /// The semaphore must be wrapped in an [`Arc`] to call this method. - /// If the semaphore has been closed, this returns an [`AcquireError`]. - /// Otherwise, this returns a [`OwnedSemaphorePermit`] representing the - /// acquired permit. - /// - /// # Cancel safety - /// - /// This method uses a queue to fairly distribute permits in the order they - /// were requested. Cancelling a call to `acquire_many_owned` makes you lose - /// your place in the queue. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::Semaphore; - /// - /// #[tokio::main] - /// async fn main() { - /// let semaphore = Arc::new(Semaphore::new(10)); - /// let mut join_handles = Vec::new(); - /// - /// for _ in 0..5 { - /// let permit = semaphore.clone().acquire_many_owned(2).await.unwrap(); - /// join_handles.push(tokio::spawn(async move { - /// // perform task... - /// // explicitly own `permit` in the task - /// drop(permit); - /// })); - /// } - /// - /// for handle in join_handles { - /// handle.await.unwrap(); - /// } - /// } - /// ``` - /// - /// [`Arc`]: std::sync::Arc - /// [`AcquireError`]: crate::sync::AcquireError - /// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit - pub async fn acquire_many_owned( - self: Arc, - n: u32, - ) -> Result { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let inner = trace::async_op( - || self.ll_sem.acquire(n), - self.resource_span.clone(), - "Semaphore::acquire_many_owned", - "poll", - true, - ); - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let inner = self.ll_sem.acquire(n); - - inner.await?; - Ok(OwnedSemaphorePermit { - sem: self, - permits: n, - }) - } - - /// Tries to acquire a permit from the semaphore. - /// - /// The semaphore must be wrapped in an [`Arc`] to call this method. If - /// the semaphore has been closed, this returns a [`TryAcquireError::Closed`] - /// and a [`TryAcquireError::NoPermits`] if there are no permits left. - /// Otherwise, this returns a [`OwnedSemaphorePermit`] representing the - /// acquired permit. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::{Semaphore, TryAcquireError}; - /// - /// # fn main() { - /// let semaphore = Arc::new(Semaphore::new(2)); - /// - /// let permit_1 = Arc::clone(&semaphore).try_acquire_owned().unwrap(); - /// assert_eq!(semaphore.available_permits(), 1); - /// - /// let permit_2 = Arc::clone(&semaphore).try_acquire_owned().unwrap(); - /// assert_eq!(semaphore.available_permits(), 0); - /// - /// let permit_3 = semaphore.try_acquire_owned(); - /// assert_eq!(permit_3.err(), Some(TryAcquireError::NoPermits)); - /// # } - /// ``` - /// - /// [`Arc`]: std::sync::Arc - /// [`TryAcquireError::Closed`]: crate::sync::TryAcquireError::Closed - /// [`TryAcquireError::NoPermits`]: crate::sync::TryAcquireError::NoPermits - /// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit - pub fn try_acquire_owned(self: Arc) -> Result { - match self.ll_sem.try_acquire(1) { - Ok(_) => Ok(OwnedSemaphorePermit { - sem: self, - permits: 1, - }), - Err(e) => Err(e), - } - } - - /// Tries to acquire `n` permits from the semaphore. - /// - /// The semaphore must be wrapped in an [`Arc`] to call this method. If - /// the semaphore has been closed, this returns a [`TryAcquireError::Closed`] - /// and a [`TryAcquireError::NoPermits`] if there are no permits left. - /// Otherwise, this returns a [`OwnedSemaphorePermit`] representing the - /// acquired permit. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use tokio::sync::{Semaphore, TryAcquireError}; - /// - /// # fn main() { - /// let semaphore = Arc::new(Semaphore::new(4)); - /// - /// let permit_1 = Arc::clone(&semaphore).try_acquire_many_owned(3).unwrap(); - /// assert_eq!(semaphore.available_permits(), 1); - /// - /// let permit_2 = semaphore.try_acquire_many_owned(2); - /// assert_eq!(permit_2.err(), Some(TryAcquireError::NoPermits)); - /// # } - /// ``` - /// - /// [`Arc`]: std::sync::Arc - /// [`TryAcquireError::Closed`]: crate::sync::TryAcquireError::Closed - /// [`TryAcquireError::NoPermits`]: crate::sync::TryAcquireError::NoPermits - /// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit - pub fn try_acquire_many_owned( - self: Arc, - n: u32, - ) -> Result { - match self.ll_sem.try_acquire(n) { - Ok(_) => Ok(OwnedSemaphorePermit { - sem: self, - permits: n, - }), - Err(e) => Err(e), - } - } - - /// Closes the semaphore. - /// - /// This prevents the semaphore from issuing new permits and notifies all pending waiters. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::Semaphore; - /// use std::sync::Arc; - /// use tokio::sync::TryAcquireError; - /// - /// #[tokio::main] - /// async fn main() { - /// let semaphore = Arc::new(Semaphore::new(1)); - /// let semaphore2 = semaphore.clone(); - /// - /// tokio::spawn(async move { - /// let permit = semaphore.acquire_many(2).await; - /// assert!(permit.is_err()); - /// println!("waiter received error"); - /// }); - /// - /// println!("closing semaphore"); - /// semaphore2.close(); - /// - /// // Cannot obtain more permits - /// assert_eq!(semaphore2.try_acquire().err(), Some(TryAcquireError::Closed)) - /// } - /// ``` - pub fn close(&self) { - self.ll_sem.close(); - } - - /// Returns true if the semaphore is closed - pub fn is_closed(&self) -> bool { - self.ll_sem.is_closed() - } -} - -impl<'a> SemaphorePermit<'a> { - /// Forgets the permit **without** releasing it back to the semaphore. - /// This can be used to reduce the amount of permits available from a - /// semaphore. - pub fn forget(mut self) { - self.permits = 0; - } - - /// Merge two [`SemaphorePermit`] instances together, consuming `other` - /// without releasing the permits it holds. - /// - /// Permits held by both `self` and `other` are released when `self` drops. - /// - /// # Panics - /// - /// This function panics if permits from different [`Semaphore`] instances - /// are merged. - #[track_caller] - pub fn merge(&mut self, mut other: Self) { - assert!( - std::ptr::eq(self.sem, other.sem), - "merging permits from different semaphore instances" - ); - self.permits += other.permits; - other.permits = 0; - } -} - -impl OwnedSemaphorePermit { - /// Forgets the permit **without** releasing it back to the semaphore. - /// This can be used to reduce the amount of permits available from a - /// semaphore. - pub fn forget(mut self) { - self.permits = 0; - } - - /// Merge two [`OwnedSemaphorePermit`] instances together, consuming `other` - /// without releasing the permits it holds. - /// - /// Permits held by both `self` and `other` are released when `self` drops. - /// - /// # Panics - /// - /// This function panics if permits from different [`Semaphore`] instances - /// are merged. - #[track_caller] - pub fn merge(&mut self, mut other: Self) { - assert!( - Arc::ptr_eq(&self.sem, &other.sem), - "merging permits from different semaphore instances" - ); - self.permits += other.permits; - other.permits = 0; - } - - /// Returns the [`Semaphore`] from which this permit was acquired. - pub fn semaphore(&self) -> &Arc { - &self.sem - } -} - -impl Drop for SemaphorePermit<'_> { - fn drop(&mut self) { - self.sem.add_permits(self.permits as usize); - } -} - -impl Drop for OwnedSemaphorePermit { - fn drop(&mut self) { - self.sem.add_permits(self.permits as usize); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/task/atomic_waker.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/task/atomic_waker.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/task/atomic_waker.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/task/atomic_waker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,382 +0,0 @@ -#![cfg_attr(any(loom, not(feature = "sync")), allow(dead_code, unreachable_pub))] - -use crate::loom::cell::UnsafeCell; -use crate::loom::hint; -use crate::loom::sync::atomic::AtomicUsize; - -use std::fmt; -use std::panic::{resume_unwind, AssertUnwindSafe, RefUnwindSafe, UnwindSafe}; -use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; -use std::task::Waker; - -/// A synchronization primitive for task waking. -/// -/// `AtomicWaker` will coordinate concurrent wakes with the consumer -/// potentially "waking" the underlying task. This is useful in scenarios -/// where a computation completes in another thread and wants to wake the -/// consumer, but the consumer is in the process of being migrated to a new -/// logical task. -/// -/// Consumers should call `register` before checking the result of a computation -/// and producers should call `wake` after producing the computation (this -/// differs from the usual `thread::park` pattern). It is also permitted for -/// `wake` to be called **before** `register`. This results in a no-op. -/// -/// A single `AtomicWaker` may be reused for any number of calls to `register` or -/// `wake`. -pub(crate) struct AtomicWaker { - state: AtomicUsize, - waker: UnsafeCell>, -} - -impl RefUnwindSafe for AtomicWaker {} -impl UnwindSafe for AtomicWaker {} - -// `AtomicWaker` is a multi-consumer, single-producer transfer cell. The cell -// stores a `Waker` value produced by calls to `register` and many threads can -// race to take the waker by calling `wake`. -// -// If a new `Waker` instance is produced by calling `register` before an existing -// one is consumed, then the existing one is overwritten. -// -// While `AtomicWaker` is single-producer, the implementation ensures memory -// safety. In the event of concurrent calls to `register`, there will be a -// single winner whose waker will get stored in the cell. The losers will not -// have their tasks woken. As such, callers should ensure to add synchronization -// to calls to `register`. -// -// The implementation uses a single `AtomicUsize` value to coordinate access to -// the `Waker` cell. There are two bits that are operated on independently. These -// are represented by `REGISTERING` and `WAKING`. -// -// The `REGISTERING` bit is set when a producer enters the critical section. The -// `WAKING` bit is set when a consumer enters the critical section. Neither -// bit being set is represented by `WAITING`. -// -// A thread obtains an exclusive lock on the waker cell by transitioning the -// state from `WAITING` to `REGISTERING` or `WAKING`, depending on the -// operation the thread wishes to perform. When this transition is made, it is -// guaranteed that no other thread will access the waker cell. -// -// # Registering -// -// On a call to `register`, an attempt to transition the state from WAITING to -// REGISTERING is made. On success, the caller obtains a lock on the waker cell. -// -// If the lock is obtained, then the thread sets the waker cell to the waker -// provided as an argument. Then it attempts to transition the state back from -// `REGISTERING` -> `WAITING`. -// -// If this transition is successful, then the registering process is complete -// and the next call to `wake` will observe the waker. -// -// If the transition fails, then there was a concurrent call to `wake` that -// was unable to access the waker cell (due to the registering thread holding the -// lock). To handle this, the registering thread removes the waker it just set -// from the cell and calls `wake` on it. This call to wake represents the -// attempt to wake by the other thread (that set the `WAKING` bit). The -// state is then transitioned from `REGISTERING | WAKING` back to `WAITING`. -// This transition must succeed because, at this point, the state cannot be -// transitioned by another thread. -// -// # Waking -// -// On a call to `wake`, an attempt to transition the state from `WAITING` to -// `WAKING` is made. On success, the caller obtains a lock on the waker cell. -// -// If the lock is obtained, then the thread takes ownership of the current value -// in the waker cell, and calls `wake` on it. The state is then transitioned -// back to `WAITING`. This transition must succeed as, at this point, the state -// cannot be transitioned by another thread. -// -// If the thread is unable to obtain the lock, the `WAKING` bit is still set. -// This is because it has either been set by the current thread but the previous -// value included the `REGISTERING` bit **or** a concurrent thread is in the -// `WAKING` critical section. Either way, no action must be taken. -// -// If the current thread is the only concurrent call to `wake` and another -// thread is in the `register` critical section, when the other thread **exits** -// the `register` critical section, it will observe the `WAKING` bit and -// handle the waker itself. -// -// If another thread is in the `waker` critical section, then it will handle -// waking the caller task. -// -// # A potential race (is safely handled). -// -// Imagine the following situation: -// -// * Thread A obtains the `wake` lock and wakes a task. -// -// * Before thread A releases the `wake` lock, the woken task is scheduled. -// -// * Thread B attempts to wake the task. In theory this should result in the -// task being woken, but it cannot because thread A still holds the wake -// lock. -// -// This case is handled by requiring users of `AtomicWaker` to call `register` -// **before** attempting to observe the application state change that resulted -// in the task being woken. The wakers also change the application state -// before calling wake. -// -// Because of this, the task will do one of two things. -// -// 1) Observe the application state change that Thread B is waking on. In -// this case, it is OK for Thread B's wake to be lost. -// -// 2) Call register before attempting to observe the application state. Since -// Thread A still holds the `wake` lock, the call to `register` will result -// in the task waking itself and get scheduled again. - -/// Idle state. -const WAITING: usize = 0; - -/// A new waker value is being registered with the `AtomicWaker` cell. -const REGISTERING: usize = 0b01; - -/// The task currently registered with the `AtomicWaker` cell is being woken. -const WAKING: usize = 0b10; - -impl AtomicWaker { - /// Create an `AtomicWaker` - pub(crate) fn new() -> AtomicWaker { - AtomicWaker { - state: AtomicUsize::new(WAITING), - waker: UnsafeCell::new(None), - } - } - - /* - /// Registers the current waker to be notified on calls to `wake`. - pub(crate) fn register(&self, waker: Waker) { - self.do_register(waker); - } - */ - - /// Registers the provided waker to be notified on calls to `wake`. - /// - /// The new waker will take place of any previous wakers that were registered - /// by previous calls to `register`. Any calls to `wake` that happen after - /// a call to `register` (as defined by the memory ordering rules), will - /// wake the `register` caller's task. - /// - /// It is safe to call `register` with multiple other threads concurrently - /// calling `wake`. This will result in the `register` caller's current - /// task being woken once. - /// - /// This function is safe to call concurrently, but this is generally a bad - /// idea. Concurrent calls to `register` will attempt to register different - /// tasks to be woken. One of the callers will win and have its task set, - /// but there is no guarantee as to which caller will succeed. - pub(crate) fn register_by_ref(&self, waker: &Waker) { - self.do_register(waker); - } - - fn do_register(&self, waker: W) - where - W: WakerRef, - { - fn catch_unwind R, R>(f: F) -> std::thread::Result { - std::panic::catch_unwind(AssertUnwindSafe(f)) - } - - match self - .state - .compare_exchange(WAITING, REGISTERING, Acquire, Acquire) - .unwrap_or_else(|x| x) - { - WAITING => { - unsafe { - // If `into_waker` panics (because it's code outside of - // AtomicWaker) we need to prime a guard that is called on - // unwind to restore the waker to a WAITING state. Otherwise - // any future calls to register will incorrectly be stuck - // believing it's being updated by someone else. - let new_waker_or_panic = catch_unwind(move || waker.into_waker()); - - // Set the field to contain the new waker, or if - // `into_waker` panicked, leave the old value. - let mut maybe_panic = None; - let mut old_waker = None; - match new_waker_or_panic { - Ok(new_waker) => { - old_waker = self.waker.with_mut(|t| (*t).take()); - self.waker.with_mut(|t| *t = Some(new_waker)); - } - Err(panic) => maybe_panic = Some(panic), - } - - // Release the lock. If the state transitioned to include - // the `WAKING` bit, this means that a wake has been - // called concurrently, so we have to remove the waker and - // wake it.` - // - // Start by assuming that the state is `REGISTERING` as this - // is what we jut set it to. - let res = self - .state - .compare_exchange(REGISTERING, WAITING, AcqRel, Acquire); - - match res { - Ok(_) => { - // We don't want to give the caller the panic if it - // was someone else who put in that waker. - let _ = catch_unwind(move || { - drop(old_waker); - }); - } - Err(actual) => { - // This branch can only be reached if a - // concurrent thread called `wake`. In this - // case, `actual` **must** be `REGISTERING | - // WAKING`. - debug_assert_eq!(actual, REGISTERING | WAKING); - - // Take the waker to wake once the atomic operation has - // completed. - let mut waker = self.waker.with_mut(|t| (*t).take()); - - // Just swap, because no one could change state - // while state == `Registering | `Waking` - self.state.swap(WAITING, AcqRel); - - // If `into_waker` panicked, then the waker in the - // waker slot is actually the old waker. - if maybe_panic.is_some() { - old_waker = waker.take(); - } - - // We don't want to give the caller the panic if it - // was someone else who put in that waker. - if let Some(old_waker) = old_waker { - let _ = catch_unwind(move || { - old_waker.wake(); - }); - } - - // The atomic swap was complete, now wake the waker - // and return. - // - // If this panics, we end up in a consumed state and - // return the panic to the caller. - if let Some(waker) = waker { - debug_assert!(maybe_panic.is_none()); - waker.wake(); - } - } - } - - if let Some(panic) = maybe_panic { - // If `into_waker` panicked, return the panic to the caller. - resume_unwind(panic); - } - } - } - WAKING => { - // Currently in the process of waking the task, i.e., - // `wake` is currently being called on the old waker. - // So, we call wake on the new waker. - // - // If this panics, someone else is responsible for restoring the - // state of the waker. - waker.wake(); - - // This is equivalent to a spin lock, so use a spin hint. - hint::spin_loop(); - } - state => { - // In this case, a concurrent thread is holding the - // "registering" lock. This probably indicates a bug in the - // caller's code as racing to call `register` doesn't make much - // sense. - // - // We just want to maintain memory safety. It is ok to drop the - // call to `register`. - debug_assert!(state == REGISTERING || state == REGISTERING | WAKING); - } - } - } - - /// Wakes the task that last called `register`. - /// - /// If `register` has not been called yet, then this does nothing. - pub(crate) fn wake(&self) { - if let Some(waker) = self.take_waker() { - // If wake panics, we've consumed the waker which is a legitimate - // outcome. - waker.wake(); - } - } - - /// Attempts to take the `Waker` value out of the `AtomicWaker` with the - /// intention that the caller will wake the task later. - pub(crate) fn take_waker(&self) -> Option { - // AcqRel ordering is used in order to acquire the value of the `waker` - // cell as well as to establish a `release` ordering with whatever - // memory the `AtomicWaker` is associated with. - match self.state.fetch_or(WAKING, AcqRel) { - WAITING => { - // The waking lock has been acquired. - let waker = unsafe { self.waker.with_mut(|t| (*t).take()) }; - - // Release the lock - self.state.fetch_and(!WAKING, Release); - - waker - } - state => { - // There is a concurrent thread currently updating the - // associated waker. - // - // Nothing more to do as the `WAKING` bit has been set. It - // doesn't matter if there are concurrent registering threads or - // not. - // - debug_assert!( - state == REGISTERING || state == REGISTERING | WAKING || state == WAKING - ); - None - } - } - } -} - -impl Default for AtomicWaker { - fn default() -> Self { - AtomicWaker::new() - } -} - -impl fmt::Debug for AtomicWaker { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "AtomicWaker") - } -} - -unsafe impl Send for AtomicWaker {} -unsafe impl Sync for AtomicWaker {} - -trait WakerRef { - fn wake(self); - fn into_waker(self) -> Waker; -} - -impl WakerRef for Waker { - fn wake(self) { - self.wake() - } - - fn into_waker(self) -> Waker { - self - } -} - -impl WakerRef for &Waker { - fn wake(self) { - self.wake_by_ref() - } - - fn into_waker(self) -> Waker { - self.clone() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/task/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/task/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/task/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/task/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,4 +0,0 @@ -//! Thread-safe task notification primitives. - -mod atomic_waker; -pub(crate) use self::atomic_waker::AtomicWaker; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/atomic_waker.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/atomic_waker.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/atomic_waker.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/atomic_waker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,77 +0,0 @@ -use crate::sync::AtomicWaker; -use tokio_test::task; - -use std::task::Waker; - -trait AssertSend: Send {} -trait AssertSync: Sync {} - -impl AssertSend for AtomicWaker {} -impl AssertSync for AtomicWaker {} - -impl AssertSend for Waker {} -impl AssertSync for Waker {} - -#[cfg(all(target_family = "wasm", not(target_os = "wasi")))] -use wasm_bindgen_test::wasm_bindgen_test as test; - -#[test] -fn basic_usage() { - let mut waker = task::spawn(AtomicWaker::new()); - - waker.enter(|cx, waker| waker.register_by_ref(cx.waker())); - waker.wake(); - - assert!(waker.is_woken()); -} - -#[test] -fn wake_without_register() { - let mut waker = task::spawn(AtomicWaker::new()); - waker.wake(); - - // Registering should not result in a notification - waker.enter(|cx, waker| waker.register_by_ref(cx.waker())); - - assert!(!waker.is_woken()); -} - -#[test] -#[cfg(not(target_family = "wasm"))] // wasm currently doesn't support unwinding -fn atomic_waker_panic_safe() { - use std::panic; - use std::ptr; - use std::task::{RawWaker, RawWakerVTable, Waker}; - - static PANICKING_VTABLE: RawWakerVTable = RawWakerVTable::new( - |_| panic!("clone"), - |_| unimplemented!("wake"), - |_| unimplemented!("wake_by_ref"), - |_| (), - ); - - static NONPANICKING_VTABLE: RawWakerVTable = RawWakerVTable::new( - |_| RawWaker::new(ptr::null(), &NONPANICKING_VTABLE), - |_| unimplemented!("wake"), - |_| unimplemented!("wake_by_ref"), - |_| (), - ); - - let panicking = unsafe { Waker::from_raw(RawWaker::new(ptr::null(), &PANICKING_VTABLE)) }; - let nonpanicking = unsafe { Waker::from_raw(RawWaker::new(ptr::null(), &NONPANICKING_VTABLE)) }; - - let atomic_waker = AtomicWaker::new(); - - let panicking = panic::AssertUnwindSafe(&panicking); - - let result = panic::catch_unwind(|| { - let panic::AssertUnwindSafe(panicking) = panicking; - atomic_waker.register_by_ref(panicking); - }); - - assert!(result.is_err()); - assert!(atomic_waker.take_waker().is_none()); - - atomic_waker.register_by_ref(&nonpanicking); - assert!(atomic_waker.take_waker().is_some()); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_atomic_waker.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_atomic_waker.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_atomic_waker.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_atomic_waker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,100 +0,0 @@ -use crate::sync::task::AtomicWaker; - -use futures::future::poll_fn; -use loom::future::block_on; -use loom::sync::atomic::AtomicUsize; -use loom::thread; -use std::sync::atomic::Ordering::Relaxed; -use std::sync::Arc; -use std::task::Poll::{Pending, Ready}; - -struct Chan { - num: AtomicUsize, - task: AtomicWaker, -} - -#[test] -fn basic_notification() { - const NUM_NOTIFY: usize = 2; - - loom::model(|| { - let chan = Arc::new(Chan { - num: AtomicUsize::new(0), - task: AtomicWaker::new(), - }); - - for _ in 0..NUM_NOTIFY { - let chan = chan.clone(); - - thread::spawn(move || { - chan.num.fetch_add(1, Relaxed); - chan.task.wake(); - }); - } - - block_on(poll_fn(move |cx| { - chan.task.register_by_ref(cx.waker()); - - if NUM_NOTIFY == chan.num.load(Relaxed) { - return Ready(()); - } - - Pending - })); - }); -} - -#[test] -fn test_panicky_waker() { - use std::panic; - use std::ptr; - use std::task::{RawWaker, RawWakerVTable, Waker}; - - static PANICKING_VTABLE: RawWakerVTable = - RawWakerVTable::new(|_| panic!("clone"), |_| (), |_| (), |_| ()); - - let panicking = unsafe { Waker::from_raw(RawWaker::new(ptr::null(), &PANICKING_VTABLE)) }; - - // If you're working with this test (and I sure hope you never have to!), - // uncomment the following section because there will be a lot of panics - // which would otherwise log. - // - // We can't however leaved it uncommented, because it's global. - // panic::set_hook(Box::new(|_| ())); - - const NUM_NOTIFY: usize = 2; - - loom::model(move || { - let chan = Arc::new(Chan { - num: AtomicUsize::new(0), - task: AtomicWaker::new(), - }); - - for _ in 0..NUM_NOTIFY { - let chan = chan.clone(); - - thread::spawn(move || { - chan.num.fetch_add(1, Relaxed); - chan.task.wake(); - }); - } - - // Note: this panic should have no effect on the overall state of the - // waker and it should proceed as normal. - // - // A thread above might race to flag a wakeup, and a WAKING state will - // be preserved if this expected panic races with that so the below - // procedure should be allowed to continue uninterrupted. - let _ = panic::catch_unwind(|| chan.task.register_by_ref(&panicking)); - - block_on(poll_fn(move |cx| { - chan.task.register_by_ref(cx.waker()); - - if NUM_NOTIFY == chan.num.load(Relaxed) { - return Ready(()); - } - - Pending - })); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_broadcast.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_broadcast.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_broadcast.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_broadcast.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,207 +0,0 @@ -use crate::sync::broadcast; -use crate::sync::broadcast::error::RecvError::{Closed, Lagged}; - -use loom::future::block_on; -use loom::sync::Arc; -use loom::thread; -use tokio_test::{assert_err, assert_ok}; - -#[test] -fn broadcast_send() { - loom::model(|| { - let (tx1, mut rx) = broadcast::channel(2); - let tx1 = Arc::new(tx1); - let tx2 = tx1.clone(); - - let th1 = thread::spawn(move || { - block_on(async { - assert_ok!(tx1.send("one")); - assert_ok!(tx1.send("two")); - assert_ok!(tx1.send("three")); - }); - }); - - let th2 = thread::spawn(move || { - block_on(async { - assert_ok!(tx2.send("eins")); - assert_ok!(tx2.send("zwei")); - assert_ok!(tx2.send("drei")); - }); - }); - - block_on(async { - let mut num = 0; - loop { - match rx.recv().await { - Ok(_) => num += 1, - Err(Closed) => break, - Err(Lagged(n)) => num += n as usize, - } - } - assert_eq!(num, 6); - }); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - }); -} - -// An `Arc` is used as the value in order to detect memory leaks. -#[test] -fn broadcast_two() { - loom::model(|| { - let (tx, mut rx1) = broadcast::channel::>(16); - let mut rx2 = tx.subscribe(); - - let th1 = thread::spawn(move || { - block_on(async { - let v = assert_ok!(rx1.recv().await); - assert_eq!(*v, "hello"); - - let v = assert_ok!(rx1.recv().await); - assert_eq!(*v, "world"); - - match assert_err!(rx1.recv().await) { - Closed => {} - _ => panic!(), - } - }); - }); - - let th2 = thread::spawn(move || { - block_on(async { - let v = assert_ok!(rx2.recv().await); - assert_eq!(*v, "hello"); - - let v = assert_ok!(rx2.recv().await); - assert_eq!(*v, "world"); - - match assert_err!(rx2.recv().await) { - Closed => {} - _ => panic!(), - } - }); - }); - - assert_ok!(tx.send(Arc::new("hello"))); - assert_ok!(tx.send(Arc::new("world"))); - drop(tx); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - }); -} - -#[test] -fn broadcast_wrap() { - loom::model(|| { - let (tx, mut rx1) = broadcast::channel(2); - let mut rx2 = tx.subscribe(); - - let th1 = thread::spawn(move || { - block_on(async { - let mut num = 0; - - loop { - match rx1.recv().await { - Ok(_) => num += 1, - Err(Closed) => break, - Err(Lagged(n)) => num += n as usize, - } - } - - assert_eq!(num, 3); - }); - }); - - let th2 = thread::spawn(move || { - block_on(async { - let mut num = 0; - - loop { - match rx2.recv().await { - Ok(_) => num += 1, - Err(Closed) => break, - Err(Lagged(n)) => num += n as usize, - } - } - - assert_eq!(num, 3); - }); - }); - - assert_ok!(tx.send("one")); - assert_ok!(tx.send("two")); - assert_ok!(tx.send("three")); - - drop(tx); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - }); -} - -#[test] -fn drop_rx() { - loom::model(|| { - let (tx, mut rx1) = broadcast::channel(16); - let rx2 = tx.subscribe(); - - let th1 = thread::spawn(move || { - block_on(async { - let v = assert_ok!(rx1.recv().await); - assert_eq!(v, "one"); - - let v = assert_ok!(rx1.recv().await); - assert_eq!(v, "two"); - - let v = assert_ok!(rx1.recv().await); - assert_eq!(v, "three"); - - match assert_err!(rx1.recv().await) { - Closed => {} - _ => panic!(), - } - }); - }); - - let th2 = thread::spawn(move || { - drop(rx2); - }); - - assert_ok!(tx.send("one")); - assert_ok!(tx.send("two")); - assert_ok!(tx.send("three")); - drop(tx); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - }); -} - -#[test] -fn drop_multiple_rx_with_overflow() { - loom::model(move || { - // It is essential to have multiple senders and receivers in this test case. - let (tx, mut rx) = broadcast::channel(1); - let _rx2 = tx.subscribe(); - - let _ = tx.send(()); - let tx2 = tx.clone(); - let th1 = thread::spawn(move || { - block_on(async { - for _ in 0..100 { - let _ = tx2.send(()); - } - }); - }); - let _ = tx.send(()); - - let th2 = thread::spawn(move || { - block_on(async { while let Ok(_) = rx.recv().await {} }); - }); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_list.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_list.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_list.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_list.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,48 +0,0 @@ -use crate::sync::mpsc::list; - -use loom::thread; -use std::sync::Arc; - -#[test] -fn smoke() { - use crate::sync::mpsc::block::Read; - - const NUM_TX: usize = 2; - const NUM_MSG: usize = 2; - - loom::model(|| { - let (tx, mut rx) = list::channel(); - let tx = Arc::new(tx); - - for th in 0..NUM_TX { - let tx = tx.clone(); - - thread::spawn(move || { - for i in 0..NUM_MSG { - tx.push((th, i)); - } - }); - } - - let mut next = vec![0; NUM_TX]; - - loop { - match rx.pop(&tx) { - Some(Read::Value((th, v))) => { - assert_eq!(v, next[th]); - next[th] += 1; - - if next.iter().all(|&i| i == NUM_MSG) { - break; - } - } - Some(Read::Closed) => { - panic!(); - } - None => { - thread::yield_now(); - } - } - } - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_mpsc.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_mpsc.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_mpsc.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_mpsc.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,190 +0,0 @@ -use crate::sync::mpsc; - -use futures::future::poll_fn; -use loom::future::block_on; -use loom::sync::Arc; -use loom::thread; -use tokio_test::assert_ok; - -#[test] -fn closing_tx() { - loom::model(|| { - let (tx, mut rx) = mpsc::channel(16); - - thread::spawn(move || { - tx.try_send(()).unwrap(); - drop(tx); - }); - - let v = block_on(rx.recv()); - assert!(v.is_some()); - - let v = block_on(rx.recv()); - assert!(v.is_none()); - }); -} - -#[test] -fn closing_unbounded_tx() { - loom::model(|| { - let (tx, mut rx) = mpsc::unbounded_channel(); - - thread::spawn(move || { - tx.send(()).unwrap(); - drop(tx); - }); - - let v = block_on(rx.recv()); - assert!(v.is_some()); - - let v = block_on(rx.recv()); - assert!(v.is_none()); - }); -} - -#[test] -fn closing_bounded_rx() { - loom::model(|| { - let (tx1, rx) = mpsc::channel::<()>(16); - let tx2 = tx1.clone(); - thread::spawn(move || { - drop(rx); - }); - - block_on(tx1.closed()); - block_on(tx2.closed()); - }); -} - -#[test] -fn closing_and_sending() { - loom::model(|| { - let (tx1, mut rx) = mpsc::channel::<()>(16); - let tx1 = Arc::new(tx1); - let tx2 = tx1.clone(); - - let th1 = thread::spawn(move || { - tx1.try_send(()).unwrap(); - }); - - let th2 = thread::spawn(move || { - block_on(tx2.closed()); - }); - - let th3 = thread::spawn(move || { - let v = block_on(rx.recv()); - assert!(v.is_some()); - drop(rx); - }); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - assert_ok!(th3.join()); - }); -} - -#[test] -fn closing_unbounded_rx() { - loom::model(|| { - let (tx1, rx) = mpsc::unbounded_channel::<()>(); - let tx2 = tx1.clone(); - thread::spawn(move || { - drop(rx); - }); - - block_on(tx1.closed()); - block_on(tx2.closed()); - }); -} - -#[test] -fn dropping_tx() { - loom::model(|| { - let (tx, mut rx) = mpsc::channel::<()>(16); - - for _ in 0..2 { - let tx = tx.clone(); - thread::spawn(move || { - drop(tx); - }); - } - drop(tx); - - let v = block_on(rx.recv()); - assert!(v.is_none()); - }); -} - -#[test] -fn dropping_unbounded_tx() { - loom::model(|| { - let (tx, mut rx) = mpsc::unbounded_channel::<()>(); - - for _ in 0..2 { - let tx = tx.clone(); - thread::spawn(move || { - drop(tx); - }); - } - drop(tx); - - let v = block_on(rx.recv()); - assert!(v.is_none()); - }); -} - -#[test] -fn try_recv() { - loom::model(|| { - use crate::sync::{mpsc, Semaphore}; - use loom::sync::{Arc, Mutex}; - - const PERMITS: usize = 2; - const TASKS: usize = 2; - const CYCLES: usize = 1; - - struct Context { - sem: Arc, - tx: mpsc::Sender<()>, - rx: Mutex>, - } - - fn run(ctx: &Context) { - block_on(async { - let permit = ctx.sem.acquire().await; - assert_ok!(ctx.rx.lock().unwrap().try_recv()); - crate::task::yield_now().await; - assert_ok!(ctx.tx.clone().try_send(())); - drop(permit); - }); - } - - let (tx, rx) = mpsc::channel(PERMITS); - let sem = Arc::new(Semaphore::new(PERMITS)); - let ctx = Arc::new(Context { - sem, - tx, - rx: Mutex::new(rx), - }); - - for _ in 0..PERMITS { - assert_ok!(ctx.tx.clone().try_send(())); - } - - let mut ths = Vec::new(); - - for _ in 0..TASKS { - let ctx = ctx.clone(); - - ths.push(thread::spawn(move || { - run(&ctx); - })); - } - - run(&ctx); - - for th in ths { - th.join().unwrap(); - } - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_notify.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_notify.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_notify.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_notify.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,331 +0,0 @@ -use crate::sync::Notify; - -use loom::future::block_on; -use loom::sync::Arc; -use loom::thread; - -use tokio_test::{assert_pending, assert_ready}; - -/// `util::wake_list::NUM_WAKERS` -const WAKE_LIST_SIZE: usize = 32; - -#[test] -fn notify_one() { - loom::model(|| { - let tx = Arc::new(Notify::new()); - let rx = tx.clone(); - - let th = thread::spawn(move || { - block_on(async { - rx.notified().await; - }); - }); - - tx.notify_one(); - th.join().unwrap(); - }); -} - -#[test] -fn notify_waiters() { - loom::model(|| { - let notify = Arc::new(Notify::new()); - let tx = notify.clone(); - let notified1 = notify.notified(); - let notified2 = notify.notified(); - - let th = thread::spawn(move || { - tx.notify_waiters(); - }); - - block_on(async { - notified1.await; - notified2.await; - }); - - th.join().unwrap(); - }); -} - -#[test] -fn notify_waiters_and_one() { - loom::model(|| { - let notify = Arc::new(Notify::new()); - let tx1 = notify.clone(); - let tx2 = notify.clone(); - - let th1 = thread::spawn(move || { - tx1.notify_waiters(); - }); - - let th2 = thread::spawn(move || { - tx2.notify_one(); - }); - - let th3 = thread::spawn(move || { - let notified = notify.notified(); - - block_on(async { - notified.await; - }); - }); - - th1.join().unwrap(); - th2.join().unwrap(); - th3.join().unwrap(); - }); -} - -#[test] -fn notify_multi() { - loom::model(|| { - let notify = Arc::new(Notify::new()); - - let mut ths = vec![]; - - for _ in 0..2 { - let notify = notify.clone(); - - ths.push(thread::spawn(move || { - block_on(async { - notify.notified().await; - notify.notify_one(); - }) - })); - } - - notify.notify_one(); - - for th in ths.drain(..) { - th.join().unwrap(); - } - - block_on(async { - notify.notified().await; - }); - }); -} - -#[test] -fn notify_drop() { - use crate::future::poll_fn; - use std::future::Future; - use std::task::Poll; - - loom::model(|| { - let notify = Arc::new(Notify::new()); - let rx1 = notify.clone(); - let rx2 = notify.clone(); - - let th1 = thread::spawn(move || { - let mut recv = Box::pin(rx1.notified()); - - block_on(poll_fn(|cx| { - if recv.as_mut().poll(cx).is_ready() { - rx1.notify_one(); - } - Poll::Ready(()) - })); - }); - - let th2 = thread::spawn(move || { - block_on(async { - rx2.notified().await; - // Trigger second notification - rx2.notify_one(); - rx2.notified().await; - }); - }); - - notify.notify_one(); - - th1.join().unwrap(); - th2.join().unwrap(); - }); -} - -/// Polls two `Notified` futures and checks if poll results are consistent -/// with each other. If the first future is notified by a `notify_waiters` -/// call, then the second one must be notified as well. -#[test] -fn notify_waiters_poll_consistency() { - fn notify_waiters_poll_consistency_variant(poll_setting: [bool; 2]) { - let notify = Arc::new(Notify::new()); - let mut notified = [ - tokio_test::task::spawn(notify.notified()), - tokio_test::task::spawn(notify.notified()), - ]; - for i in 0..2 { - if poll_setting[i] { - assert_pending!(notified[i].poll()); - } - } - - let tx = notify.clone(); - let th = thread::spawn(move || { - tx.notify_waiters(); - }); - - let res1 = notified[0].poll(); - let res2 = notified[1].poll(); - - // If res1 is ready, then res2 must also be ready. - assert!(res1.is_pending() || res2.is_ready()); - - th.join().unwrap(); - } - - // We test different scenarios in which pending futures had or had not - // been polled before the call to `notify_waiters`. - loom::model(|| notify_waiters_poll_consistency_variant([false, false])); - loom::model(|| notify_waiters_poll_consistency_variant([true, false])); - loom::model(|| notify_waiters_poll_consistency_variant([false, true])); - loom::model(|| notify_waiters_poll_consistency_variant([true, true])); -} - -/// Polls two `Notified` futures and checks if poll results are consistent -/// with each other. If the first future is notified by a `notify_waiters` -/// call, then the second one must be notified as well. -/// -/// Here we also add other `Notified` futures in between to force the two -/// tested futures to end up in different chunks. -#[test] -fn notify_waiters_poll_consistency_many() { - fn notify_waiters_poll_consistency_many_variant(order: [usize; 2]) { - let notify = Arc::new(Notify::new()); - - let mut futs = (0..WAKE_LIST_SIZE + 1) - .map(|_| tokio_test::task::spawn(notify.notified())) - .collect::>(); - - assert_pending!(futs[order[0]].poll()); - for i in 2..futs.len() { - assert_pending!(futs[i].poll()); - } - assert_pending!(futs[order[1]].poll()); - - let tx = notify.clone(); - let th = thread::spawn(move || { - tx.notify_waiters(); - }); - - let res1 = futs[0].poll(); - let res2 = futs[1].poll(); - - // If res1 is ready, then res2 must also be ready. - assert!(res1.is_pending() || res2.is_ready()); - - th.join().unwrap(); - } - - // We test different scenarios in which futures are polled in different order. - loom::model(|| notify_waiters_poll_consistency_many_variant([0, 1])); - loom::model(|| notify_waiters_poll_consistency_many_variant([1, 0])); -} - -/// Checks if a call to `notify_waiters` is observed as atomic when combined -/// with a concurrent call to `notify_one`. -#[test] -fn notify_waiters_is_atomic() { - fn notify_waiters_is_atomic_variant(tested_fut_index: usize) { - let notify = Arc::new(Notify::new()); - - let mut futs = (0..WAKE_LIST_SIZE + 1) - .map(|_| tokio_test::task::spawn(notify.notified())) - .collect::>(); - - for fut in &mut futs { - assert_pending!(fut.poll()); - } - - let tx = notify.clone(); - let th = thread::spawn(move || { - tx.notify_waiters(); - }); - - block_on(async { - // If awaiting one of the futures completes, then we should be - // able to assume that all pending futures are notified. Therefore - // a notification from a subsequent `notify_one` call should not - // be consumed by an old future. - futs.remove(tested_fut_index).await; - - let mut new_fut = tokio_test::task::spawn(notify.notified()); - assert_pending!(new_fut.poll()); - - notify.notify_one(); - - // `new_fut` must consume the notification from `notify_one`. - assert_ready!(new_fut.poll()); - }); - - th.join().unwrap(); - } - - // We test different scenarios in which the tested future is at the beginning - // or at the end of the waiters queue used by `Notify`. - loom::model(|| notify_waiters_is_atomic_variant(0)); - loom::model(|| notify_waiters_is_atomic_variant(32)); -} - -/// Checks if a single call to `notify_waiters` does not get through two `Notified` -/// futures created and awaited sequentially like this: -/// ```ignore -/// notify.notified().await; -/// notify.notified().await; -/// ``` -#[test] -fn notify_waiters_sequential_notified_await() { - use crate::sync::oneshot; - - loom::model(|| { - let notify = Arc::new(Notify::new()); - - let (tx_fst, rx_fst) = oneshot::channel(); - let (tx_snd, rx_snd) = oneshot::channel(); - - let receiver = thread::spawn({ - let notify = notify.clone(); - move || { - block_on(async { - // Poll the first `Notified` to put it as the first waiter - // in the queue. - let mut first_notified = tokio_test::task::spawn(notify.notified()); - assert_pending!(first_notified.poll()); - - // Create additional waiters to force `notify_waiters` to - // release the lock at least once. - let _task_pile = (0..WAKE_LIST_SIZE + 1) - .map(|_| { - let mut fut = tokio_test::task::spawn(notify.notified()); - assert_pending!(fut.poll()); - fut - }) - .collect::>(); - - // We are ready for the notify_waiters call. - tx_fst.send(()).unwrap(); - - first_notified.await; - - // Poll the second `Notified` future to try to insert - // it to the waiters queue. - let mut second_notified = tokio_test::task::spawn(notify.notified()); - assert_pending!(second_notified.poll()); - - // Wait for the `notify_waiters` to end and check if we - // are woken up. - rx_snd.await.unwrap(); - assert_pending!(second_notified.poll()); - }); - } - }); - - // Wait for the signal and call `notify_waiters`. - block_on(rx_fst).unwrap(); - notify.notify_waiters(); - tx_snd.send(()).unwrap(); - - receiver.join().unwrap(); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_oneshot.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_oneshot.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_oneshot.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_oneshot.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,140 +0,0 @@ -use crate::sync::oneshot; - -use futures::future::poll_fn; -use loom::future::block_on; -use loom::thread; -use std::task::Poll::{Pending, Ready}; - -#[test] -fn smoke() { - loom::model(|| { - let (tx, rx) = oneshot::channel(); - - thread::spawn(move || { - tx.send(1).unwrap(); - }); - - let value = block_on(rx).unwrap(); - assert_eq!(1, value); - }); -} - -#[test] -fn changing_rx_task() { - loom::model(|| { - let (tx, mut rx) = oneshot::channel(); - - thread::spawn(move || { - tx.send(1).unwrap(); - }); - - let rx = thread::spawn(move || { - let ready = block_on(poll_fn(|cx| match Pin::new(&mut rx).poll(cx) { - Ready(Ok(value)) => { - assert_eq!(1, value); - Ready(true) - } - Ready(Err(_)) => unimplemented!(), - Pending => Ready(false), - })); - - if ready { - None - } else { - Some(rx) - } - }) - .join() - .unwrap(); - - if let Some(rx) = rx { - // Previous task parked, use a new task... - let value = block_on(rx).unwrap(); - assert_eq!(1, value); - } - }); -} - -#[test] -fn try_recv_close() { - // reproduces https://github.com/tokio-rs/tokio/issues/4225 - loom::model(|| { - let (tx, mut rx) = oneshot::channel(); - thread::spawn(move || { - let _ = tx.send(()); - }); - - rx.close(); - let _ = rx.try_recv(); - }) -} - -#[test] -fn recv_closed() { - // reproduces https://github.com/tokio-rs/tokio/issues/4225 - loom::model(|| { - let (tx, mut rx) = oneshot::channel(); - - thread::spawn(move || { - let _ = tx.send(1); - }); - - rx.close(); - let _ = block_on(rx); - }); -} - -// TODO: Move this into `oneshot` proper. - -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -struct OnClose<'a> { - tx: &'a mut oneshot::Sender, -} - -impl<'a> OnClose<'a> { - fn new(tx: &'a mut oneshot::Sender) -> Self { - OnClose { tx } - } -} - -impl Future for OnClose<'_> { - type Output = bool; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let fut = self.get_mut().tx.closed(); - crate::pin!(fut); - - Ready(fut.poll(cx).is_ready()) - } -} - -#[test] -fn changing_tx_task() { - loom::model(|| { - let (mut tx, rx) = oneshot::channel::(); - - thread::spawn(move || { - drop(rx); - }); - - let tx = thread::spawn(move || { - let t1 = block_on(OnClose::new(&mut tx)); - - if t1 { - None - } else { - Some(tx) - } - }) - .join() - .unwrap(); - - if let Some(mut tx) = tx { - // Previous task parked, use a new task... - block_on(OnClose::new(&mut tx)); - } - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_rwlock.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_rwlock.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_rwlock.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_rwlock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,105 +0,0 @@ -use crate::sync::rwlock::*; - -use loom::future::block_on; -use loom::thread; -use std::sync::Arc; - -#[test] -fn concurrent_write() { - let b = loom::model::Builder::new(); - - b.check(|| { - let rwlock = Arc::new(RwLock::::new(0)); - - let rwclone = rwlock.clone(); - let t1 = thread::spawn(move || { - block_on(async { - let mut guard = rwclone.write().await; - *guard += 5; - }); - }); - - let rwclone = rwlock.clone(); - let t2 = thread::spawn(move || { - block_on(async { - let mut guard = rwclone.write_owned().await; - *guard += 5; - }); - }); - - t1.join().expect("thread 1 write should not panic"); - t2.join().expect("thread 2 write should not panic"); - //when all threads have finished the value on the lock should be 10 - let guard = block_on(rwlock.read()); - assert_eq!(10, *guard); - }); -} - -#[test] -fn concurrent_read_write() { - let b = loom::model::Builder::new(); - - b.check(|| { - let rwlock = Arc::new(RwLock::::new(0)); - - let rwclone = rwlock.clone(); - let t1 = thread::spawn(move || { - block_on(async { - let mut guard = rwclone.write().await; - *guard += 5; - }); - }); - - let rwclone = rwlock.clone(); - let t2 = thread::spawn(move || { - block_on(async { - let mut guard = rwclone.write_owned().await; - *guard += 5; - }); - }); - - let rwclone = rwlock.clone(); - let t3 = thread::spawn(move || { - block_on(async { - let guard = rwclone.read().await; - //at this state the value on the lock may either be 0, 5, or 10 - assert!(*guard == 0 || *guard == 5 || *guard == 10); - }); - }); - - { - let guard = block_on(rwlock.clone().read_owned()); - //at this state the value on the lock may either be 0, 5, or 10 - assert!(*guard == 0 || *guard == 5 || *guard == 10); - } - - t1.join().expect("thread 1 write should not panic"); - t2.join().expect("thread 2 write should not panic"); - t3.join().expect("thread 3 read should not panic"); - - let guard = block_on(rwlock.read()); - //when all threads have finished the value on the lock should be 10 - assert_eq!(10, *guard); - }); -} -#[test] -fn downgrade() { - loom::model(|| { - let lock = Arc::new(RwLock::new(1)); - - let n = block_on(lock.write()); - - let cloned_lock = lock.clone(); - let handle = thread::spawn(move || { - let mut guard = block_on(cloned_lock.write()); - *guard = 2; - }); - - let n = n.downgrade(); - assert_eq!(*n, 1); - - drop(n); - handle.join().unwrap(); - assert_eq!(*block_on(lock.read()), 2); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_semaphore_batch.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_semaphore_batch.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_semaphore_batch.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_semaphore_batch.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,215 +0,0 @@ -use crate::sync::batch_semaphore::*; - -use futures::future::poll_fn; -use loom::future::block_on; -use loom::sync::atomic::AtomicUsize; -use loom::thread; -use std::future::Future; -use std::pin::Pin; -use std::sync::atomic::Ordering::SeqCst; -use std::sync::Arc; -use std::task::Poll::Ready; -use std::task::{Context, Poll}; - -#[test] -fn basic_usage() { - const NUM: usize = 2; - - struct Shared { - semaphore: Semaphore, - active: AtomicUsize, - } - - async fn actor(shared: Arc) { - shared.semaphore.acquire(1).await.unwrap(); - let actual = shared.active.fetch_add(1, SeqCst); - assert!(actual <= NUM - 1); - - let actual = shared.active.fetch_sub(1, SeqCst); - assert!(actual <= NUM); - shared.semaphore.release(1); - } - - loom::model(|| { - let shared = Arc::new(Shared { - semaphore: Semaphore::new(NUM), - active: AtomicUsize::new(0), - }); - - for _ in 0..NUM { - let shared = shared.clone(); - - thread::spawn(move || { - block_on(actor(shared)); - }); - } - - block_on(actor(shared)); - }); -} - -#[test] -fn release() { - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(1)); - - { - let semaphore = semaphore.clone(); - thread::spawn(move || { - block_on(semaphore.acquire(1)).unwrap(); - semaphore.release(1); - }); - } - - block_on(semaphore.acquire(1)).unwrap(); - - semaphore.release(1); - }); -} - -#[test] -fn basic_closing() { - const NUM: usize = 2; - - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(1)); - - for _ in 0..NUM { - let semaphore = semaphore.clone(); - - thread::spawn(move || { - for _ in 0..2 { - block_on(semaphore.acquire(1)).map_err(|_| ())?; - - semaphore.release(1); - } - - Ok::<(), ()>(()) - }); - } - - semaphore.close(); - }); -} - -#[test] -fn concurrent_close() { - const NUM: usize = 3; - - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(1)); - - for _ in 0..NUM { - let semaphore = semaphore.clone(); - - thread::spawn(move || { - block_on(semaphore.acquire(1)).map_err(|_| ())?; - semaphore.release(1); - semaphore.close(); - - Ok::<(), ()>(()) - }); - } - }); -} - -#[test] -fn concurrent_cancel() { - async fn poll_and_cancel(semaphore: Arc) { - let mut acquire1 = Some(semaphore.acquire(1)); - let mut acquire2 = Some(semaphore.acquire(1)); - poll_fn(|cx| { - // poll the acquire future once, and then immediately throw - // it away. this simulates a situation where a future is - // polled and then cancelled, such as by a timeout. - if let Some(acquire) = acquire1.take() { - pin!(acquire); - let _ = acquire.poll(cx); - } - if let Some(acquire) = acquire2.take() { - pin!(acquire); - let _ = acquire.poll(cx); - } - Poll::Ready(()) - }) - .await - } - - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(0)); - let t1 = { - let semaphore = semaphore.clone(); - thread::spawn(move || block_on(poll_and_cancel(semaphore))) - }; - let t2 = { - let semaphore = semaphore.clone(); - thread::spawn(move || block_on(poll_and_cancel(semaphore))) - }; - let t3 = { - let semaphore = semaphore.clone(); - thread::spawn(move || block_on(poll_and_cancel(semaphore))) - }; - - t1.join().unwrap(); - semaphore.release(10); - t2.join().unwrap(); - t3.join().unwrap(); - }); -} - -#[test] -fn batch() { - let mut b = loom::model::Builder::new(); - b.preemption_bound = Some(1); - - b.check(|| { - let semaphore = Arc::new(Semaphore::new(10)); - let active = Arc::new(AtomicUsize::new(0)); - let mut ths = vec![]; - - for _ in 0..2 { - let semaphore = semaphore.clone(); - let active = active.clone(); - - ths.push(thread::spawn(move || { - for n in &[4, 10, 8] { - block_on(semaphore.acquire(*n)).unwrap(); - - active.fetch_add(*n as usize, SeqCst); - - let num_active = active.load(SeqCst); - assert!(num_active <= 10); - - thread::yield_now(); - - active.fetch_sub(*n as usize, SeqCst); - - semaphore.release(*n as usize); - } - })); - } - - for th in ths.into_iter() { - th.join().unwrap(); - } - - assert_eq!(10, semaphore.available_permits()); - }); -} - -#[test] -fn release_during_acquire() { - loom::model(|| { - let semaphore = Arc::new(Semaphore::new(10)); - semaphore - .try_acquire(8) - .expect("try_acquire should succeed; semaphore uncontended"); - let semaphore2 = semaphore.clone(); - let thread = thread::spawn(move || block_on(semaphore2.acquire(4)).unwrap()); - - semaphore.release(8); - thread.join().unwrap(); - semaphore.release(4); - assert_eq!(10, semaphore.available_permits()); - }) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_watch.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_watch.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/loom_watch.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/loom_watch.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,90 +0,0 @@ -use crate::sync::watch; - -use loom::future::block_on; -use loom::thread; -use std::sync::Arc; - -#[test] -fn smoke() { - loom::model(|| { - let (tx, mut rx1) = watch::channel(1); - let mut rx2 = rx1.clone(); - let mut rx3 = rx1.clone(); - let mut rx4 = rx1.clone(); - let mut rx5 = rx1.clone(); - - let th = thread::spawn(move || { - tx.send(2).unwrap(); - }); - - block_on(rx1.changed()).unwrap(); - assert_eq!(*rx1.borrow(), 2); - - block_on(rx2.changed()).unwrap(); - assert_eq!(*rx2.borrow(), 2); - - block_on(rx3.changed()).unwrap(); - assert_eq!(*rx3.borrow(), 2); - - block_on(rx4.changed()).unwrap(); - assert_eq!(*rx4.borrow(), 2); - - block_on(rx5.changed()).unwrap(); - assert_eq!(*rx5.borrow(), 2); - - th.join().unwrap(); - }) -} - -#[test] -fn wait_for_test() { - loom::model(move || { - let (tx, mut rx) = watch::channel(false); - - let tx_arc = Arc::new(tx); - let tx1 = tx_arc.clone(); - let tx2 = tx_arc.clone(); - - let th1 = thread::spawn(move || { - for _ in 0..2 { - tx1.send_modify(|_x| {}); - } - }); - - let th2 = thread::spawn(move || { - tx2.send(true).unwrap(); - }); - - assert_eq!(*block_on(rx.wait_for(|x| *x)).unwrap(), true); - - th1.join().unwrap(); - th2.join().unwrap(); - }); -} - -#[test] -fn wait_for_returns_correct_value() { - loom::model(move || { - let (tx, mut rx) = watch::channel(0); - - let jh = thread::spawn(move || { - tx.send(1).unwrap(); - tx.send(2).unwrap(); - tx.send(3).unwrap(); - }); - - // Stop at the first value we are called at. - let mut stopped_at = usize::MAX; - let returned = *block_on(rx.wait_for(|x| { - stopped_at = *x; - true - })) - .unwrap(); - - // Check that it returned the same value as the one we returned - // `true` for. - assert_eq!(stopped_at, returned); - - jh.join().unwrap(); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,17 +0,0 @@ -cfg_not_loom! { - mod atomic_waker; - mod notify; - mod semaphore_batch; -} - -cfg_loom! { - mod loom_atomic_waker; - mod loom_broadcast; - mod loom_list; - mod loom_mpsc; - mod loom_notify; - mod loom_oneshot; - mod loom_semaphore_batch; - mod loom_watch; - mod loom_rwlock; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/notify.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/notify.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/notify.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/notify.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,119 +0,0 @@ -use crate::sync::Notify; -use std::future::Future; -use std::sync::Arc; -use std::task::{Context, RawWaker, RawWakerVTable, Waker}; - -#[cfg(all(target_family = "wasm", not(target_os = "wasi")))] -use wasm_bindgen_test::wasm_bindgen_test as test; - -#[test] -fn notify_clones_waker_before_lock() { - const VTABLE: &RawWakerVTable = &RawWakerVTable::new(clone_w, wake, wake_by_ref, drop_w); - - unsafe fn clone_w(data: *const ()) -> RawWaker { - let ptr = data as *const Notify; - Arc::::increment_strong_count(ptr); - // Or some other arbitrary code that shouldn't be executed while the - // Notify wait list is locked. - (*ptr).notify_one(); - RawWaker::new(data, VTABLE) - } - - unsafe fn drop_w(data: *const ()) { - drop(Arc::::from_raw(data as *const Notify)); - } - - unsafe fn wake(_data: *const ()) { - unreachable!() - } - - unsafe fn wake_by_ref(_data: *const ()) { - unreachable!() - } - - let notify = Arc::new(Notify::new()); - let notify2 = notify.clone(); - - let waker = - unsafe { Waker::from_raw(RawWaker::new(Arc::into_raw(notify2) as *const _, VTABLE)) }; - let mut cx = Context::from_waker(&waker); - - let future = notify.notified(); - pin!(future); - - // The result doesn't matter, we're just testing that we don't deadlock. - let _ = future.poll(&mut cx); -} - -#[cfg(panic = "unwind")] -#[test] -fn notify_waiters_handles_panicking_waker() { - use futures::task::ArcWake; - - let notify = Arc::new(Notify::new()); - - struct PanickingWaker(Arc); - - impl ArcWake for PanickingWaker { - fn wake_by_ref(_arc_self: &Arc) { - panic!("waker panicked"); - } - } - - let bad_fut = notify.notified(); - pin!(bad_fut); - - let waker = futures::task::waker(Arc::new(PanickingWaker(notify.clone()))); - let mut cx = Context::from_waker(&waker); - let _ = bad_fut.poll(&mut cx); - - let mut futs = Vec::new(); - for _ in 0..32 { - let mut fut = tokio_test::task::spawn(notify.notified()); - assert!(fut.poll().is_pending()); - futs.push(fut); - } - - assert!(std::panic::catch_unwind(|| { - notify.notify_waiters(); - }) - .is_err()); - - for mut fut in futs { - assert!(fut.poll().is_ready()); - } -} - -#[test] -fn notify_simple() { - let notify = Notify::new(); - - let mut fut1 = tokio_test::task::spawn(notify.notified()); - assert!(fut1.poll().is_pending()); - - let mut fut2 = tokio_test::task::spawn(notify.notified()); - assert!(fut2.poll().is_pending()); - - notify.notify_waiters(); - - assert!(fut1.poll().is_ready()); - assert!(fut2.poll().is_ready()); -} - -#[test] -#[cfg(not(target_family = "wasm"))] -fn watch_test() { - let rt = crate::runtime::Builder::new_current_thread() - .build() - .unwrap(); - - rt.block_on(async { - let (tx, mut rx) = crate::sync::watch::channel(()); - - crate::spawn(async move { - let _ = tx.send(()); - }); - - let _ = rx.changed().await; - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/semaphore_batch.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/semaphore_batch.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/tests/semaphore_batch.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/tests/semaphore_batch.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,289 +0,0 @@ -use crate::sync::batch_semaphore::Semaphore; -use tokio_test::*; - -const MAX_PERMITS: usize = crate::sync::Semaphore::MAX_PERMITS; - -#[cfg(all(target_family = "wasm", not(target_os = "wasi")))] -use wasm_bindgen_test::wasm_bindgen_test as test; - -#[test] -fn poll_acquire_one_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - // Polling for a permit succeeds immediately - assert_ready_ok!(task::spawn(s.acquire(1)).poll()); - assert_eq!(s.available_permits(), 99); -} - -#[test] -fn poll_acquire_many_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - // Polling for a permit succeeds immediately - assert_ready_ok!(task::spawn(s.acquire(5)).poll()); - assert_eq!(s.available_permits(), 95); - - assert_ready_ok!(task::spawn(s.acquire(5)).poll()); - assert_eq!(s.available_permits(), 90); -} - -#[test] -fn try_acquire_one_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - assert_ok!(s.try_acquire(1)); - assert_eq!(s.available_permits(), 99); - - assert_ok!(s.try_acquire(1)); - assert_eq!(s.available_permits(), 98); -} - -#[test] -fn try_acquire_many_available() { - let s = Semaphore::new(100); - assert_eq!(s.available_permits(), 100); - - assert_ok!(s.try_acquire(5)); - assert_eq!(s.available_permits(), 95); - - assert_ok!(s.try_acquire(5)); - assert_eq!(s.available_permits(), 90); -} - -#[test] -fn poll_acquire_one_unavailable() { - let s = Semaphore::new(1); - - // Acquire the first permit - assert_ready_ok!(task::spawn(s.acquire(1)).poll()); - assert_eq!(s.available_permits(), 0); - - let mut acquire_2 = task::spawn(s.acquire(1)); - // Try to acquire the second permit - assert_pending!(acquire_2.poll()); - assert_eq!(s.available_permits(), 0); - - s.release(1); - - assert_eq!(s.available_permits(), 0); - assert!(acquire_2.is_woken()); - assert_ready_ok!(acquire_2.poll()); - assert_eq!(s.available_permits(), 0); - - s.release(1); - assert_eq!(s.available_permits(), 1); -} - -#[test] -fn poll_acquire_many_unavailable() { - let s = Semaphore::new(5); - - // Acquire the first permit - assert_ready_ok!(task::spawn(s.acquire(1)).poll()); - assert_eq!(s.available_permits(), 4); - - // Try to acquire the second permit - let mut acquire_2 = task::spawn(s.acquire(5)); - assert_pending!(acquire_2.poll()); - assert_eq!(s.available_permits(), 0); - - // Try to acquire the third permit - let mut acquire_3 = task::spawn(s.acquire(3)); - assert_pending!(acquire_3.poll()); - assert_eq!(s.available_permits(), 0); - - s.release(1); - - assert_eq!(s.available_permits(), 0); - assert!(acquire_2.is_woken()); - assert_ready_ok!(acquire_2.poll()); - - assert!(!acquire_3.is_woken()); - assert_eq!(s.available_permits(), 0); - - s.release(1); - assert!(!acquire_3.is_woken()); - assert_eq!(s.available_permits(), 0); - - s.release(2); - assert!(acquire_3.is_woken()); - - assert_ready_ok!(acquire_3.poll()); -} - -#[test] -fn try_acquire_one_unavailable() { - let s = Semaphore::new(1); - - // Acquire the first permit - assert_ok!(s.try_acquire(1)); - assert_eq!(s.available_permits(), 0); - - assert_err!(s.try_acquire(1)); - - s.release(1); - - assert_eq!(s.available_permits(), 1); - assert_ok!(s.try_acquire(1)); - - s.release(1); - assert_eq!(s.available_permits(), 1); -} - -#[test] -fn try_acquire_many_unavailable() { - let s = Semaphore::new(5); - - // Acquire the first permit - assert_ok!(s.try_acquire(1)); - assert_eq!(s.available_permits(), 4); - - assert_err!(s.try_acquire(5)); - - s.release(1); - assert_eq!(s.available_permits(), 5); - - assert_ok!(s.try_acquire(5)); - - s.release(1); - assert_eq!(s.available_permits(), 1); - - s.release(1); - assert_eq!(s.available_permits(), 2); -} - -#[test] -fn poll_acquire_one_zero_permits() { - let s = Semaphore::new(0); - assert_eq!(s.available_permits(), 0); - - // Try to acquire the permit - let mut acquire = task::spawn(s.acquire(1)); - assert_pending!(acquire.poll()); - - s.release(1); - - assert!(acquire.is_woken()); - assert_ready_ok!(acquire.poll()); -} - -#[test] -fn max_permits_doesnt_panic() { - Semaphore::new(MAX_PERMITS); -} - -#[test] -#[should_panic] -#[cfg(not(target_family = "wasm"))] // wasm currently doesn't support unwinding -fn validates_max_permits() { - Semaphore::new(MAX_PERMITS + 1); -} - -#[test] -fn close_semaphore_prevents_acquire() { - let s = Semaphore::new(5); - s.close(); - - assert_eq!(5, s.available_permits()); - - assert_ready_err!(task::spawn(s.acquire(1)).poll()); - assert_eq!(5, s.available_permits()); - - assert_ready_err!(task::spawn(s.acquire(1)).poll()); - assert_eq!(5, s.available_permits()); -} - -#[test] -fn close_semaphore_notifies_permit1() { - let s = Semaphore::new(0); - let mut acquire = task::spawn(s.acquire(1)); - - assert_pending!(acquire.poll()); - - s.close(); - - assert!(acquire.is_woken()); - assert_ready_err!(acquire.poll()); -} - -#[test] -fn close_semaphore_notifies_permit2() { - let s = Semaphore::new(2); - - // Acquire a couple of permits - assert_ready_ok!(task::spawn(s.acquire(1)).poll()); - assert_ready_ok!(task::spawn(s.acquire(1)).poll()); - - let mut acquire3 = task::spawn(s.acquire(1)); - let mut acquire4 = task::spawn(s.acquire(1)); - assert_pending!(acquire3.poll()); - assert_pending!(acquire4.poll()); - - s.close(); - - assert!(acquire3.is_woken()); - assert!(acquire4.is_woken()); - - assert_ready_err!(acquire3.poll()); - assert_ready_err!(acquire4.poll()); - - assert_eq!(0, s.available_permits()); - - s.release(1); - - assert_eq!(1, s.available_permits()); - - assert_ready_err!(task::spawn(s.acquire(1)).poll()); - - s.release(1); - - assert_eq!(2, s.available_permits()); -} - -#[test] -fn cancel_acquire_releases_permits() { - let s = Semaphore::new(10); - s.try_acquire(4).expect("uncontended try_acquire succeeds"); - assert_eq!(6, s.available_permits()); - - let mut acquire = task::spawn(s.acquire(8)); - assert_pending!(acquire.poll()); - - assert_eq!(0, s.available_permits()); - drop(acquire); - - assert_eq!(6, s.available_permits()); - assert_ok!(s.try_acquire(6)); -} - -#[test] -fn release_permits_at_drop() { - use crate::sync::semaphore::*; - use futures::task::ArcWake; - use std::future::Future; - use std::sync::Arc; - - let sem = Arc::new(Semaphore::new(1)); - - struct ReleaseOnDrop(Option); - - impl ArcWake for ReleaseOnDrop { - fn wake_by_ref(_arc_self: &Arc) {} - } - - let mut fut = Box::pin(async { - let _permit = sem.acquire().await.unwrap(); - }); - - // Second iteration shouldn't deadlock. - for _ in 0..=1 { - let waker = futures::task::waker(Arc::new(ReleaseOnDrop( - sem.clone().try_acquire_owned().ok(), - ))); - let mut cx = std::task::Context::from_waker(&waker); - assert!(fut.as_mut().poll(&mut cx).is_pending()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/sync/watch.rs s390-tools-2.33.1/rust-vendor/tokio/src/sync/watch.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/sync/watch.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/sync/watch.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1388 +0,0 @@ -#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))] - -//! A single-producer, multi-consumer channel that only retains the *last* sent -//! value. -//! -//! This channel is useful for watching for changes to a value from multiple -//! points in the code base, for example, changes to configuration values. -//! -//! # Usage -//! -//! [`channel`] returns a [`Sender`] / [`Receiver`] pair. These are the producer -//! and consumer halves of the channel. The channel is created with an initial -//! value. -//! -//! Each [`Receiver`] independently tracks the last value *seen* by its caller. -//! -//! To access the **current** value stored in the channel and mark it as *seen* -//! by a given [`Receiver`], use [`Receiver::borrow_and_update()`]. -//! -//! To access the current value **without** marking it as *seen*, use -//! [`Receiver::borrow()`]. (If the value has already been marked *seen*, -//! [`Receiver::borrow()`] is equivalent to [`Receiver::borrow_and_update()`].) -//! -//! For more information on when to use these methods, see -//! [here](#borrow_and_update-versus-borrow). -//! -//! ## Change notifications -//! -//! The [`Receiver`] half provides an asynchronous [`changed`] method. This -//! method is ready when a new, *unseen* value is sent via the [`Sender`] half. -//! -//! * [`Receiver::changed()`] returns `Ok(())` on receiving a new value, or -//! `Err(`[`error::RecvError`]`)` if the [`Sender`] has been dropped. -//! * If the current value is *unseen* when calling [`changed`], then -//! [`changed`] will return immediately. If the current value is *seen*, then -//! it will sleep until either a new message is sent via the [`Sender`] half, -//! or the [`Sender`] is dropped. -//! * On completion, the [`changed`] method marks the new value as *seen*. -//! * At creation, the initial value is considered *seen*. In other words, -//! [`Receiver::changed()`] will not return until a subsequent value is sent. -//! * New [`Receiver`] instances can be created with [`Sender::subscribe()`]. -//! The current value at the time the [`Receiver`] is created is considered -//! *seen*. -//! -//! ## `borrow_and_update` versus `borrow` -//! -//! If the receiver intends to await notifications from [`changed`] in a loop, -//! [`Receiver::borrow_and_update()`] should be preferred over -//! [`Receiver::borrow()`]. This avoids a potential race where a new value is -//! sent between [`changed`] being ready and the value being read. (If -//! [`Receiver::borrow()`] is used, the loop may run twice with the same value.) -//! -//! If the receiver is only interested in the current value, and does not intend -//! to wait for changes, then [`Receiver::borrow()`] can be used. It may be more -//! convenient to use [`borrow`](Receiver::borrow) since it's an `&self` -//! method---[`borrow_and_update`](Receiver::borrow_and_update) requires `&mut -//! self`. -//! -//! # Examples -//! -//! The following example prints `hello! world! `. -//! -//! ``` -//! use tokio::sync::watch; -//! use tokio::time::{Duration, sleep}; -//! -//! # async fn dox() -> Result<(), Box> { -//! let (tx, mut rx) = watch::channel("hello"); -//! -//! tokio::spawn(async move { -//! // Use the equivalent of a "do-while" loop so the initial value is -//! // processed before awaiting the `changed()` future. -//! loop { -//! println!("{}! ", *rx.borrow_and_update()); -//! if rx.changed().await.is_err() { -//! break; -//! } -//! } -//! }); -//! -//! sleep(Duration::from_millis(100)).await; -//! tx.send("world")?; -//! # Ok(()) -//! # } -//! ``` -//! -//! # Closing -//! -//! [`Sender::is_closed`] and [`Sender::closed`] allow the producer to detect -//! when all [`Receiver`] handles have been dropped. This indicates that there -//! is no further interest in the values being produced and work can be stopped. -//! -//! The value in the channel will not be dropped until the sender and all -//! receivers have been dropped. -//! -//! # Thread safety -//! -//! Both [`Sender`] and [`Receiver`] are thread safe. They can be moved to other -//! threads and can be used in a concurrent environment. Clones of [`Receiver`] -//! handles may be moved to separate threads and also used concurrently. -//! -//! [`Sender`]: crate::sync::watch::Sender -//! [`Receiver`]: crate::sync::watch::Receiver -//! [`changed`]: crate::sync::watch::Receiver::changed -//! [`Receiver::changed()`]: crate::sync::watch::Receiver::changed -//! [`Receiver::borrow()`]: crate::sync::watch::Receiver::borrow -//! [`Receiver::borrow_and_update()`]: -//! crate::sync::watch::Receiver::borrow_and_update -//! [`channel`]: crate::sync::watch::channel -//! [`Sender::is_closed`]: crate::sync::watch::Sender::is_closed -//! [`Sender::closed`]: crate::sync::watch::Sender::closed -//! [`Sender::subscribe()`]: crate::sync::watch::Sender::subscribe - -use crate::sync::notify::Notify; - -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::atomic::Ordering::Relaxed; -use crate::loom::sync::{Arc, RwLock, RwLockReadGuard}; -use std::fmt; -use std::mem; -use std::ops; -use std::panic; - -/// Receives values from the associated [`Sender`](struct@Sender). -/// -/// Instances are created by the [`channel`](fn@channel) function. -/// -/// To turn this receiver into a `Stream`, you can use the [`WatchStream`] -/// wrapper. -/// -/// [`WatchStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.WatchStream.html -#[derive(Debug)] -pub struct Receiver { - /// Pointer to the shared state - shared: Arc>, - - /// Last observed version - version: Version, -} - -/// Sends values to the associated [`Receiver`](struct@Receiver). -/// -/// Instances are created by the [`channel`](fn@channel) function. -#[derive(Debug)] -pub struct Sender { - shared: Arc>, -} - -/// Returns a reference to the inner value. -/// -/// Outstanding borrows hold a read lock on the inner value. This means that -/// long-lived borrows could cause the producer half to block. It is recommended -/// to keep the borrow as short-lived as possible. Additionally, if you are -/// running in an environment that allows `!Send` futures, you must ensure that -/// the returned `Ref` type is never held alive across an `.await` point, -/// otherwise, it can lead to a deadlock. -/// -/// The priority policy of the lock is dependent on the underlying lock -/// implementation, and this type does not guarantee that any particular policy -/// will be used. In particular, a producer which is waiting to acquire the lock -/// in `send` might or might not block concurrent calls to `borrow`, e.g.: -/// -///
Potential deadlock example -/// -/// ```text -/// // Task 1 (on thread A) | // Task 2 (on thread B) -/// let _ref1 = rx.borrow(); | -/// | // will block -/// | let _ = tx.send(()); -/// // may deadlock | -/// let _ref2 = rx.borrow(); | -/// ``` -///
-#[derive(Debug)] -pub struct Ref<'a, T> { - inner: RwLockReadGuard<'a, T>, - has_changed: bool, -} - -impl<'a, T> Ref<'a, T> { - /// Indicates if the borrowed value is considered as _changed_ since the last - /// time it has been marked as seen. - /// - /// Unlike [`Receiver::has_changed()`], this method does not fail if the channel is closed. - /// - /// When borrowed from the [`Sender`] this function will always return `false`. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = watch::channel("hello"); - /// - /// tx.send("goodbye").unwrap(); - /// // The sender does never consider the value as changed. - /// assert!(!tx.borrow().has_changed()); - /// - /// // Drop the sender immediately, just for testing purposes. - /// drop(tx); - /// - /// // Even if the sender has already been dropped... - /// assert!(rx.has_changed().is_err()); - /// // ...the modified value is still readable and detected as changed. - /// assert_eq!(*rx.borrow(), "goodbye"); - /// assert!(rx.borrow().has_changed()); - /// - /// // Read the changed value and mark it as seen. - /// { - /// let received = rx.borrow_and_update(); - /// assert_eq!(*received, "goodbye"); - /// assert!(received.has_changed()); - /// // Release the read lock when leaving this scope. - /// } - /// - /// // Now the value has already been marked as seen and could - /// // never be modified again (after the sender has been dropped). - /// assert!(!rx.borrow().has_changed()); - /// } - /// ``` - pub fn has_changed(&self) -> bool { - self.has_changed - } -} - -struct Shared { - /// The most recent value. - value: RwLock, - - /// The current version. - /// - /// The lowest bit represents a "closed" state. The rest of the bits - /// represent the current version. - state: AtomicState, - - /// Tracks the number of `Receiver` instances. - ref_count_rx: AtomicUsize, - - /// Notifies waiting receivers that the value changed. - notify_rx: big_notify::BigNotify, - - /// Notifies any task listening for `Receiver` dropped events. - notify_tx: Notify, -} - -impl fmt::Debug for Shared { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let state = self.state.load(); - f.debug_struct("Shared") - .field("value", &self.value) - .field("version", &state.version()) - .field("is_closed", &state.is_closed()) - .field("ref_count_rx", &self.ref_count_rx) - .finish() - } -} - -pub mod error { - //! Watch error types. - - use std::error::Error; - use std::fmt; - - /// Error produced when sending a value fails. - #[derive(PartialEq, Eq, Clone, Copy)] - pub struct SendError(pub T); - - // ===== impl SendError ===== - - impl fmt::Debug for SendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SendError").finish_non_exhaustive() - } - } - - impl fmt::Display for SendError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "channel closed") - } - } - - impl Error for SendError {} - - /// Error produced when receiving a change notification. - #[derive(Debug, Clone)] - pub struct RecvError(pub(super) ()); - - // ===== impl RecvError ===== - - impl fmt::Display for RecvError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "channel closed") - } - } - - impl Error for RecvError {} -} - -mod big_notify { - use super::*; - use crate::sync::notify::Notified; - - // To avoid contention on the lock inside the `Notify`, we store multiple - // copies of it. Then, we use either circular access or randomness to spread - // out threads over different `Notify` objects. - // - // Some simple benchmarks show that randomness performs slightly better than - // circular access (probably due to contention on `next`), so we prefer to - // use randomness when Tokio is compiled with a random number generator. - // - // When the random number generator is not available, we fall back to - // circular access. - - pub(super) struct BigNotify { - #[cfg(not(all(not(loom), feature = "sync", any(feature = "rt", feature = "macros"))))] - next: AtomicUsize, - inner: [Notify; 8], - } - - impl BigNotify { - pub(super) fn new() -> Self { - Self { - #[cfg(not(all( - not(loom), - feature = "sync", - any(feature = "rt", feature = "macros") - )))] - next: AtomicUsize::new(0), - inner: Default::default(), - } - } - - pub(super) fn notify_waiters(&self) { - for notify in &self.inner { - notify.notify_waiters(); - } - } - - /// This function implements the case where randomness is not available. - #[cfg(not(all(not(loom), feature = "sync", any(feature = "rt", feature = "macros"))))] - pub(super) fn notified(&self) -> Notified<'_> { - let i = self.next.fetch_add(1, Relaxed) % 8; - self.inner[i].notified() - } - - /// This function implements the case where randomness is available. - #[cfg(all(not(loom), feature = "sync", any(feature = "rt", feature = "macros")))] - pub(super) fn notified(&self) -> Notified<'_> { - let i = crate::runtime::context::thread_rng_n(8) as usize; - self.inner[i].notified() - } - } -} - -use self::state::{AtomicState, Version}; -mod state { - use crate::loom::sync::atomic::AtomicUsize; - use crate::loom::sync::atomic::Ordering; - - const CLOSED_BIT: usize = 1; - - // Using 2 as the step size preserves the `CLOSED_BIT`. - const STEP_SIZE: usize = 2; - - /// The version part of the state. The lowest bit is always zero. - #[derive(Copy, Clone, Debug, Eq, PartialEq)] - pub(super) struct Version(usize); - - /// Snapshot of the state. The first bit is used as the CLOSED bit. - /// The remaining bits are used as the version. - /// - /// The CLOSED bit tracks whether the Sender has been dropped. Dropping all - /// receivers does not set it. - #[derive(Copy, Clone, Debug)] - pub(super) struct StateSnapshot(usize); - - /// The state stored in an atomic integer. - /// - /// The `Sender` uses `Release` ordering for storing a new state - /// and the `Receiver`s use `Acquire` ordering for loading the - /// current state. This ensures that written values are seen by - /// the `Receiver`s for a proper handover. - #[derive(Debug)] - pub(super) struct AtomicState(AtomicUsize); - - impl Version { - /// Decrements the version. - pub(super) fn decrement(&mut self) { - // Using a wrapping decrement here is required to ensure that the - // operation is consistent with `std::sync::atomic::AtomicUsize::fetch_add()` - // which wraps on overflow. - self.0 = self.0.wrapping_sub(STEP_SIZE); - } - - pub(super) const INITIAL: Self = Version(0); - } - - impl StateSnapshot { - /// Extract the version from the state. - pub(super) fn version(self) -> Version { - Version(self.0 & !CLOSED_BIT) - } - - /// Is the closed bit set? - pub(super) fn is_closed(self) -> bool { - (self.0 & CLOSED_BIT) == CLOSED_BIT - } - } - - impl AtomicState { - /// Create a new `AtomicState` that is not closed and which has the - /// version set to `Version::INITIAL`. - pub(super) fn new() -> Self { - AtomicState(AtomicUsize::new(Version::INITIAL.0)) - } - - /// Load the current value of the state. - /// - /// Only used by the receiver and for debugging purposes. - /// - /// The receiver side (read-only) uses `Acquire` ordering for a proper handover - /// of the shared value with the sender side (single writer). The state is always - /// updated after modifying and before releasing the (exclusive) lock on the - /// shared value. - pub(super) fn load(&self) -> StateSnapshot { - StateSnapshot(self.0.load(Ordering::Acquire)) - } - - /// Increment the version counter. - pub(super) fn increment_version_while_locked(&self) { - // Use `Release` ordering to ensure that the shared value - // has been written before updating the version. The shared - // value is still protected by an exclusive lock during this - // method. - self.0.fetch_add(STEP_SIZE, Ordering::Release); - } - - /// Set the closed bit in the state. - pub(super) fn set_closed(&self) { - self.0.fetch_or(CLOSED_BIT, Ordering::Release); - } - } -} - -/// Creates a new watch channel, returning the "send" and "receive" handles. -/// -/// All values sent by [`Sender`] will become visible to the [`Receiver`] handles. -/// Only the last value sent is made available to the [`Receiver`] half. All -/// intermediate values are dropped. -/// -/// # Examples -/// -/// The following example prints `hello! world! `. -/// -/// ``` -/// use tokio::sync::watch; -/// use tokio::time::{Duration, sleep}; -/// -/// # async fn dox() -> Result<(), Box> { -/// let (tx, mut rx) = watch::channel("hello"); -/// -/// tokio::spawn(async move { -/// // Use the equivalent of a "do-while" loop so the initial value is -/// // processed before awaiting the `changed()` future. -/// loop { -/// println!("{}! ", *rx.borrow_and_update()); -/// if rx.changed().await.is_err() { -/// break; -/// } -/// } -/// }); -/// -/// sleep(Duration::from_millis(100)).await; -/// tx.send("world")?; -/// # Ok(()) -/// # } -/// ``` -/// -/// [`Sender`]: struct@Sender -/// [`Receiver`]: struct@Receiver -pub fn channel(init: T) -> (Sender, Receiver) { - let shared = Arc::new(Shared { - value: RwLock::new(init), - state: AtomicState::new(), - ref_count_rx: AtomicUsize::new(1), - notify_rx: big_notify::BigNotify::new(), - notify_tx: Notify::new(), - }); - - let tx = Sender { - shared: shared.clone(), - }; - - let rx = Receiver { - shared, - version: Version::INITIAL, - }; - - (tx, rx) -} - -impl Receiver { - fn from_shared(version: Version, shared: Arc>) -> Self { - // No synchronization necessary as this is only used as a counter and - // not memory access. - shared.ref_count_rx.fetch_add(1, Relaxed); - - Self { shared, version } - } - - /// Returns a reference to the most recently sent value. - /// - /// This method does not mark the returned value as seen, so future calls to - /// [`changed`] may return immediately even if you have already seen the - /// value with a call to `borrow`. - /// - /// Outstanding borrows hold a read lock on the inner value. This means that - /// long-lived borrows could cause the producer half to block. It is recommended - /// to keep the borrow as short-lived as possible. Additionally, if you are - /// running in an environment that allows `!Send` futures, you must ensure that - /// the returned `Ref` type is never held alive across an `.await` point, - /// otherwise, it can lead to a deadlock. - /// - /// The priority policy of the lock is dependent on the underlying lock - /// implementation, and this type does not guarantee that any particular policy - /// will be used. In particular, a producer which is waiting to acquire the lock - /// in `send` might or might not block concurrent calls to `borrow`, e.g.: - /// - ///
Potential deadlock example - /// - /// ```text - /// // Task 1 (on thread A) | // Task 2 (on thread B) - /// let _ref1 = rx.borrow(); | - /// | // will block - /// | let _ = tx.send(()); - /// // may deadlock | - /// let _ref2 = rx.borrow(); | - /// ``` - ///
- /// - /// For more information on when to use this method versus - /// [`borrow_and_update`], see [here](self#borrow_and_update-versus-borrow). - /// - /// [`changed`]: Receiver::changed - /// [`borrow_and_update`]: Receiver::borrow_and_update - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// let (_, rx) = watch::channel("hello"); - /// assert_eq!(*rx.borrow(), "hello"); - /// ``` - pub fn borrow(&self) -> Ref<'_, T> { - let inner = self.shared.value.read().unwrap(); - - // After obtaining a read-lock no concurrent writes could occur - // and the loaded version matches that of the borrowed reference. - let new_version = self.shared.state.load().version(); - let has_changed = self.version != new_version; - - Ref { inner, has_changed } - } - - /// Returns a reference to the most recently sent value and marks that value - /// as seen. - /// - /// This method marks the current value as seen. Subsequent calls to [`changed`] - /// will not return immediately until the [`Sender`] has modified the shared - /// value again. - /// - /// Outstanding borrows hold a read lock on the inner value. This means that - /// long-lived borrows could cause the producer half to block. It is recommended - /// to keep the borrow as short-lived as possible. Additionally, if you are - /// running in an environment that allows `!Send` futures, you must ensure that - /// the returned `Ref` type is never held alive across an `.await` point, - /// otherwise, it can lead to a deadlock. - /// - /// The priority policy of the lock is dependent on the underlying lock - /// implementation, and this type does not guarantee that any particular policy - /// will be used. In particular, a producer which is waiting to acquire the lock - /// in `send` might or might not block concurrent calls to `borrow`, e.g.: - /// - ///
Potential deadlock example - /// - /// ```text - /// // Task 1 (on thread A) | // Task 2 (on thread B) - /// let _ref1 = rx1.borrow_and_update(); | - /// | // will block - /// | let _ = tx.send(()); - /// // may deadlock | - /// let _ref2 = rx2.borrow_and_update(); | - /// ``` - ///
- /// - /// For more information on when to use this method versus [`borrow`], see - /// [here](self#borrow_and_update-versus-borrow). - /// - /// [`changed`]: Receiver::changed - /// [`borrow`]: Receiver::borrow - pub fn borrow_and_update(&mut self) -> Ref<'_, T> { - let inner = self.shared.value.read().unwrap(); - - // After obtaining a read-lock no concurrent writes could occur - // and the loaded version matches that of the borrowed reference. - let new_version = self.shared.state.load().version(); - let has_changed = self.version != new_version; - - // Mark the shared value as seen by updating the version - self.version = new_version; - - Ref { inner, has_changed } - } - - /// Checks if this channel contains a message that this receiver has not yet - /// seen. The new value is not marked as seen. - /// - /// Although this method is called `has_changed`, it does not check new - /// messages for equality, so this call will return true even if the new - /// message is equal to the old message. - /// - /// Returns an error if the channel has been closed. - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = watch::channel("hello"); - /// - /// tx.send("goodbye").unwrap(); - /// - /// assert!(rx.has_changed().unwrap()); - /// assert_eq!(*rx.borrow_and_update(), "goodbye"); - /// - /// // The value has been marked as seen - /// assert!(!rx.has_changed().unwrap()); - /// - /// drop(tx); - /// // The `tx` handle has been dropped - /// assert!(rx.has_changed().is_err()); - /// } - /// ``` - pub fn has_changed(&self) -> Result { - // Load the version from the state - let state = self.shared.state.load(); - if state.is_closed() { - // The sender has dropped. - return Err(error::RecvError(())); - } - let new_version = state.version(); - - Ok(self.version != new_version) - } - - /// Marks the state as changed. - /// - /// After invoking this method [`has_changed()`](Self::has_changed) - /// returns `true` and [`changed()`](Self::changed) returns - /// immediately, regardless of whether a new value has been sent. - /// - /// This is useful for triggering an initial change notification after - /// subscribing to synchronize new receivers. - pub fn mark_changed(&mut self) { - self.version.decrement(); - } - - /// Waits for a change notification, then marks the newest value as seen. - /// - /// If the newest value in the channel has not yet been marked seen when - /// this method is called, the method marks that value seen and returns - /// immediately. If the newest value has already been marked seen, then the - /// method sleeps until a new message is sent by the [`Sender`] connected to - /// this `Receiver`, or until the [`Sender`] is dropped. - /// - /// This method returns an error if and only if the [`Sender`] is dropped. - /// - /// For more information, see - /// [*Change notifications*](self#change-notifications) in the module-level documentation. - /// - /// # Cancel safety - /// - /// This method is cancel safe. If you use it as the event in a - /// [`tokio::select!`](crate::select) statement and some other branch - /// completes first, then it is guaranteed that no values have been marked - /// seen by this call to `changed`. - /// - /// [`Sender`]: struct@Sender - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, mut rx) = watch::channel("hello"); - /// - /// tokio::spawn(async move { - /// tx.send("goodbye").unwrap(); - /// }); - /// - /// assert!(rx.changed().await.is_ok()); - /// assert_eq!(*rx.borrow_and_update(), "goodbye"); - /// - /// // The `tx` handle has been dropped - /// assert!(rx.changed().await.is_err()); - /// } - /// ``` - pub async fn changed(&mut self) -> Result<(), error::RecvError> { - changed_impl(&self.shared, &mut self.version).await - } - - /// Waits for a value that satisfies the provided condition. - /// - /// This method will call the provided closure whenever something is sent on - /// the channel. Once the closure returns `true`, this method will return a - /// reference to the value that was passed to the closure. - /// - /// Before `wait_for` starts waiting for changes, it will call the closure - /// on the current value. If the closure returns `true` when given the - /// current value, then `wait_for` will immediately return a reference to - /// the current value. This is the case even if the current value is already - /// considered seen. - /// - /// The watch channel only keeps track of the most recent value, so if - /// several messages are sent faster than `wait_for` is able to call the - /// closure, then it may skip some updates. Whenever the closure is called, - /// it will be called with the most recent value. - /// - /// When this function returns, the value that was passed to the closure - /// when it returned `true` will be considered seen. - /// - /// If the channel is closed, then `wait_for` will return a `RecvError`. - /// Once this happens, no more messages can ever be sent on the channel. - /// When an error is returned, it is guaranteed that the closure has been - /// called on the last value, and that it returned `false` for that value. - /// (If the closure returned `true`, then the last value would have been - /// returned instead of the error.) - /// - /// Like the `borrow` method, the returned borrow holds a read lock on the - /// inner value. This means that long-lived borrows could cause the producer - /// half to block. It is recommended to keep the borrow as short-lived as - /// possible. See the documentation of `borrow` for more information on - /// this. - /// - /// [`Receiver::changed()`]: crate::sync::watch::Receiver::changed - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// #[tokio::main] - /// - /// async fn main() { - /// let (tx, _rx) = watch::channel("hello"); - /// - /// tx.send("goodbye").unwrap(); - /// - /// // here we subscribe to a second receiver - /// // now in case of using `changed` we would have - /// // to first check the current value and then wait - /// // for changes or else `changed` would hang. - /// let mut rx2 = tx.subscribe(); - /// - /// // in place of changed we have use `wait_for` - /// // which would automatically check the current value - /// // and wait for changes until the closure returns true. - /// assert!(rx2.wait_for(|val| *val == "goodbye").await.is_ok()); - /// assert_eq!(*rx2.borrow(), "goodbye"); - /// } - /// ``` - pub async fn wait_for( - &mut self, - mut f: impl FnMut(&T) -> bool, - ) -> Result, error::RecvError> { - let mut closed = false; - loop { - { - let inner = self.shared.value.read().unwrap(); - - let new_version = self.shared.state.load().version(); - let has_changed = self.version != new_version; - self.version = new_version; - - if !closed || has_changed { - let result = panic::catch_unwind(panic::AssertUnwindSafe(|| f(&inner))); - match result { - Ok(true) => { - return Ok(Ref { inner, has_changed }); - } - Ok(false) => { - // Skip the value. - } - Err(panicked) => { - // Drop the read-lock to avoid poisoning it. - drop(inner); - // Forward the panic to the caller. - panic::resume_unwind(panicked); - // Unreachable - } - }; - } - } - - if closed { - return Err(error::RecvError(())); - } - - // Wait for the value to change. - closed = changed_impl(&self.shared, &mut self.version).await.is_err(); - } - } - - /// Returns `true` if receivers belong to the same channel. - /// - /// # Examples - /// - /// ``` - /// let (tx, rx) = tokio::sync::watch::channel(true); - /// let rx2 = rx.clone(); - /// assert!(rx.same_channel(&rx2)); - /// - /// let (tx3, rx3) = tokio::sync::watch::channel(true); - /// assert!(!rx3.same_channel(&rx2)); - /// ``` - pub fn same_channel(&self, other: &Self) -> bool { - Arc::ptr_eq(&self.shared, &other.shared) - } - - cfg_process_driver! { - pub(crate) fn try_has_changed(&mut self) -> Option> { - maybe_changed(&self.shared, &mut self.version) - } - } -} - -fn maybe_changed( - shared: &Shared, - version: &mut Version, -) -> Option> { - // Load the version from the state - let state = shared.state.load(); - let new_version = state.version(); - - if *version != new_version { - // Observe the new version and return - *version = new_version; - return Some(Ok(())); - } - - if state.is_closed() { - // The sender has been dropped. - return Some(Err(error::RecvError(()))); - } - - None -} - -async fn changed_impl( - shared: &Shared, - version: &mut Version, -) -> Result<(), error::RecvError> { - crate::trace::async_trace_leaf().await; - - loop { - // In order to avoid a race condition, we first request a notification, - // **then** check the current value's version. If a new version exists, - // the notification request is dropped. - let notified = shared.notify_rx.notified(); - - if let Some(ret) = maybe_changed(shared, version) { - return ret; - } - - notified.await; - // loop around again in case the wake-up was spurious - } -} - -impl Clone for Receiver { - fn clone(&self) -> Self { - let version = self.version; - let shared = self.shared.clone(); - - Self::from_shared(version, shared) - } -} - -impl Drop for Receiver { - fn drop(&mut self) { - // No synchronization necessary as this is only used as a counter and - // not memory access. - if 1 == self.shared.ref_count_rx.fetch_sub(1, Relaxed) { - // This is the last `Receiver` handle, tasks waiting on `Sender::closed()` - self.shared.notify_tx.notify_waiters(); - } - } -} - -impl Sender { - /// Creates the sending-half of the [`watch`] channel. - /// - /// See documentation of [`watch::channel`] for errors when calling this function. - /// Beware that attempting to send a value when there are no receivers will - /// return an error. - /// - /// [`watch`]: crate::sync::watch - /// [`watch::channel`]: crate::sync::watch - /// - /// # Examples - /// ``` - /// let sender = tokio::sync::watch::Sender::new(0u8); - /// assert!(sender.send(3).is_err()); - /// let _rec = sender.subscribe(); - /// assert!(sender.send(4).is_ok()); - /// ``` - pub fn new(init: T) -> Self { - let (tx, _) = channel(init); - tx - } - - /// Sends a new value via the channel, notifying all receivers. - /// - /// This method fails if the channel is closed, which is the case when - /// every receiver has been dropped. It is possible to reopen the channel - /// using the [`subscribe`] method. However, when `send` fails, the value - /// isn't made available for future receivers (but returned with the - /// [`SendError`]). - /// - /// To always make a new value available for future receivers, even if no - /// receiver currently exists, one of the other send methods - /// ([`send_if_modified`], [`send_modify`], or [`send_replace`]) can be - /// used instead. - /// - /// [`subscribe`]: Sender::subscribe - /// [`SendError`]: error::SendError - /// [`send_if_modified`]: Sender::send_if_modified - /// [`send_modify`]: Sender::send_modify - /// [`send_replace`]: Sender::send_replace - pub fn send(&self, value: T) -> Result<(), error::SendError> { - // This is pretty much only useful as a hint anyway, so synchronization isn't critical. - if 0 == self.receiver_count() { - return Err(error::SendError(value)); - } - - self.send_replace(value); - Ok(()) - } - - /// Modifies the watched value **unconditionally** in-place, - /// notifying all receivers. - /// - /// This can be useful for modifying the watched value, without - /// having to allocate a new instance. Additionally, this - /// method permits sending values even when there are no receivers. - /// - /// Prefer to use the more versatile function [`Self::send_if_modified()`] - /// if the value is only modified conditionally during the mutable borrow - /// to prevent unneeded change notifications for unmodified values. - /// - /// # Panics - /// - /// This function panics when the invocation of the `modify` closure panics. - /// No receivers are notified when panicking. All changes of the watched - /// value applied by the closure before panicking will be visible in - /// subsequent calls to `borrow`. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// struct State { - /// counter: usize, - /// } - /// let (state_tx, state_rx) = watch::channel(State { counter: 0 }); - /// state_tx.send_modify(|state| state.counter += 1); - /// assert_eq!(state_rx.borrow().counter, 1); - /// ``` - pub fn send_modify(&self, modify: F) - where - F: FnOnce(&mut T), - { - self.send_if_modified(|value| { - modify(value); - true - }); - } - - /// Modifies the watched value **conditionally** in-place, - /// notifying all receivers only if modified. - /// - /// This can be useful for modifying the watched value, without - /// having to allocate a new instance. Additionally, this - /// method permits sending values even when there are no receivers. - /// - /// The `modify` closure must return `true` if the value has actually - /// been modified during the mutable borrow. It should only return `false` - /// if the value is guaranteed to be unmodified despite the mutable - /// borrow. - /// - /// Receivers are only notified if the closure returned `true`. If the - /// closure has modified the value but returned `false` this results - /// in a *silent modification*, i.e. the modified value will be visible - /// in subsequent calls to `borrow`, but receivers will not receive - /// a change notification. - /// - /// Returns the result of the closure, i.e. `true` if the value has - /// been modified and `false` otherwise. - /// - /// # Panics - /// - /// This function panics when the invocation of the `modify` closure panics. - /// No receivers are notified when panicking. All changes of the watched - /// value applied by the closure before panicking will be visible in - /// subsequent calls to `borrow`. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// struct State { - /// counter: usize, - /// } - /// let (state_tx, mut state_rx) = watch::channel(State { counter: 1 }); - /// let inc_counter_if_odd = |state: &mut State| { - /// if state.counter % 2 == 1 { - /// state.counter += 1; - /// return true; - /// } - /// false - /// }; - /// - /// assert_eq!(state_rx.borrow().counter, 1); - /// - /// assert!(!state_rx.has_changed().unwrap()); - /// assert!(state_tx.send_if_modified(inc_counter_if_odd)); - /// assert!(state_rx.has_changed().unwrap()); - /// assert_eq!(state_rx.borrow_and_update().counter, 2); - /// - /// assert!(!state_rx.has_changed().unwrap()); - /// assert!(!state_tx.send_if_modified(inc_counter_if_odd)); - /// assert!(!state_rx.has_changed().unwrap()); - /// assert_eq!(state_rx.borrow_and_update().counter, 2); - /// ``` - pub fn send_if_modified(&self, modify: F) -> bool - where - F: FnOnce(&mut T) -> bool, - { - { - // Acquire the write lock and update the value. - let mut lock = self.shared.value.write().unwrap(); - - // Update the value and catch possible panic inside func. - let result = panic::catch_unwind(panic::AssertUnwindSafe(|| modify(&mut lock))); - match result { - Ok(modified) => { - if !modified { - // Abort, i.e. don't notify receivers if unmodified - return false; - } - // Continue if modified - } - Err(panicked) => { - // Drop the lock to avoid poisoning it. - drop(lock); - // Forward the panic to the caller. - panic::resume_unwind(panicked); - // Unreachable - } - }; - - self.shared.state.increment_version_while_locked(); - - // Release the write lock. - // - // Incrementing the version counter while holding the lock ensures - // that receivers are able to figure out the version number of the - // value they are currently looking at. - drop(lock); - } - - self.shared.notify_rx.notify_waiters(); - - true - } - - /// Sends a new value via the channel, notifying all receivers and returning - /// the previous value in the channel. - /// - /// This can be useful for reusing the buffers inside a watched value. - /// Additionally, this method permits sending values even when there are no - /// receivers. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// let (tx, _rx) = watch::channel(1); - /// assert_eq!(tx.send_replace(2), 1); - /// assert_eq!(tx.send_replace(3), 2); - /// ``` - pub fn send_replace(&self, mut value: T) -> T { - // swap old watched value with the new one - self.send_modify(|old| mem::swap(old, &mut value)); - - value - } - - /// Returns a reference to the most recently sent value - /// - /// Outstanding borrows hold a read lock on the inner value. This means that - /// long-lived borrows could cause the producer half to block. It is recommended - /// to keep the borrow as short-lived as possible. Additionally, if you are - /// running in an environment that allows `!Send` futures, you must ensure that - /// the returned `Ref` type is never held alive across an `.await` point, - /// otherwise, it can lead to a deadlock. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// let (tx, _) = watch::channel("hello"); - /// assert_eq!(*tx.borrow(), "hello"); - /// ``` - pub fn borrow(&self) -> Ref<'_, T> { - let inner = self.shared.value.read().unwrap(); - - // The sender/producer always sees the current version - let has_changed = false; - - Ref { inner, has_changed } - } - - /// Checks if the channel has been closed. This happens when all receivers - /// have dropped. - /// - /// # Examples - /// - /// ``` - /// let (tx, rx) = tokio::sync::watch::channel(()); - /// assert!(!tx.is_closed()); - /// - /// drop(rx); - /// assert!(tx.is_closed()); - /// ``` - pub fn is_closed(&self) -> bool { - self.receiver_count() == 0 - } - - /// Completes when all receivers have dropped. - /// - /// This allows the producer to get notified when interest in the produced - /// values is canceled and immediately stop doing work. - /// - /// # Cancel safety - /// - /// This method is cancel safe. Once the channel is closed, it stays closed - /// forever and all future calls to `closed` will return immediately. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, rx) = watch::channel("hello"); - /// - /// tokio::spawn(async move { - /// // use `rx` - /// drop(rx); - /// }); - /// - /// // Waits for `rx` to drop - /// tx.closed().await; - /// println!("the `rx` handles dropped") - /// } - /// ``` - pub async fn closed(&self) { - crate::trace::async_trace_leaf().await; - - while self.receiver_count() > 0 { - let notified = self.shared.notify_tx.notified(); - - if self.receiver_count() == 0 { - return; - } - - notified.await; - // The channel could have been reopened in the meantime by calling - // `subscribe`, so we loop again. - } - } - - /// Creates a new [`Receiver`] connected to this `Sender`. - /// - /// All messages sent before this call to `subscribe` are initially marked - /// as seen by the new `Receiver`. - /// - /// This method can be called even if there are no other receivers. In this - /// case, the channel is reopened. - /// - /// # Examples - /// - /// The new channel will receive messages sent on this `Sender`. - /// - /// ``` - /// use tokio::sync::watch; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, _rx) = watch::channel(0u64); - /// - /// tx.send(5).unwrap(); - /// - /// let rx = tx.subscribe(); - /// assert_eq!(5, *rx.borrow()); - /// - /// tx.send(10).unwrap(); - /// assert_eq!(10, *rx.borrow()); - /// } - /// ``` - /// - /// The most recent message is considered seen by the channel, so this test - /// is guaranteed to pass. - /// - /// ``` - /// use tokio::sync::watch; - /// use tokio::time::Duration; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, _rx) = watch::channel(0u64); - /// tx.send(5).unwrap(); - /// let mut rx = tx.subscribe(); - /// - /// tokio::spawn(async move { - /// // by spawning and sleeping, the message is sent after `main` - /// // hits the call to `changed`. - /// # if false { - /// tokio::time::sleep(Duration::from_millis(10)).await; - /// # } - /// tx.send(100).unwrap(); - /// }); - /// - /// rx.changed().await.unwrap(); - /// assert_eq!(100, *rx.borrow()); - /// } - /// ``` - pub fn subscribe(&self) -> Receiver { - let shared = self.shared.clone(); - let version = shared.state.load().version(); - - // The CLOSED bit in the state tracks only whether the sender is - // dropped, so we do not need to unset it if this reopens the channel. - Receiver::from_shared(version, shared) - } - - /// Returns the number of receivers that currently exist. - /// - /// # Examples - /// - /// ``` - /// use tokio::sync::watch; - /// - /// #[tokio::main] - /// async fn main() { - /// let (tx, rx1) = watch::channel("hello"); - /// - /// assert_eq!(1, tx.receiver_count()); - /// - /// let mut _rx2 = rx1.clone(); - /// - /// assert_eq!(2, tx.receiver_count()); - /// } - /// ``` - pub fn receiver_count(&self) -> usize { - self.shared.ref_count_rx.load(Relaxed) - } -} - -impl Drop for Sender { - fn drop(&mut self) { - self.shared.state.set_closed(); - self.shared.notify_rx.notify_waiters(); - } -} - -// ===== impl Ref ===== - -impl ops::Deref for Ref<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - self.inner.deref() - } -} - -#[cfg(all(test, loom))] -mod tests { - use futures::future::FutureExt; - use loom::thread; - - // test for https://github.com/tokio-rs/tokio/issues/3168 - #[test] - fn watch_spurious_wakeup() { - loom::model(|| { - let (send, mut recv) = crate::sync::watch::channel(0i32); - - send.send(1).unwrap(); - - let send_thread = thread::spawn(move || { - send.send(2).unwrap(); - send - }); - - recv.changed().now_or_never(); - - let send = send_thread.join().unwrap(); - let recv_thread = thread::spawn(move || { - recv.changed().now_or_never(); - recv.changed().now_or_never(); - recv - }); - - send.send(3).unwrap(); - - let mut recv = recv_thread.join().unwrap(); - let send_thread = thread::spawn(move || { - send.send(2).unwrap(); - }); - - recv.changed().now_or_never(); - - send_thread.join().unwrap(); - }); - } - - #[test] - fn watch_borrow() { - loom::model(|| { - let (send, mut recv) = crate::sync::watch::channel(0i32); - - assert!(send.borrow().eq(&0)); - assert!(recv.borrow().eq(&0)); - - send.send(1).unwrap(); - assert!(send.borrow().eq(&1)); - - let send_thread = thread::spawn(move || { - send.send(2).unwrap(); - send - }); - - recv.changed().now_or_never(); - - let send = send_thread.join().unwrap(); - let recv_thread = thread::spawn(move || { - recv.changed().now_or_never(); - recv.changed().now_or_never(); - recv - }); - - send.send(3).unwrap(); - - let recv = recv_thread.join().unwrap(); - assert!(recv.borrow().eq(&3)); - assert!(send.borrow().eq(&3)); - - send.send(2).unwrap(); - - thread::spawn(move || { - assert!(recv.borrow().eq(&2)); - }); - assert!(send.borrow().eq(&2)); - }); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/task/blocking.rs s390-tools-2.33.1/rust-vendor/tokio/src/task/blocking.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/task/blocking.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/task/blocking.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,211 +0,0 @@ -use crate::task::JoinHandle; - -cfg_rt_multi_thread! { - /// Runs the provided blocking function on the current thread without - /// blocking the executor. - /// - /// In general, issuing a blocking call or performing a lot of compute in a - /// future without yielding is problematic, as it may prevent the executor - /// from driving other tasks forward. Calling this function informs the - /// executor that the currently executing task is about to block the thread, - /// so the executor is able to hand off any other tasks it has to a new - /// worker thread before that happens. See the [CPU-bound tasks and blocking - /// code][blocking] section for more information. - /// - /// Be aware that although this function avoids starving other independently - /// spawned tasks, any other code running concurrently in the same task will - /// be suspended during the call to `block_in_place`. This can happen e.g. - /// when using the [`join!`] macro. To avoid this issue, use - /// [`spawn_blocking`] instead of `block_in_place`. - /// - /// Note that this function cannot be used within a [`current_thread`] runtime - /// because in this case there are no other worker threads to hand off tasks - /// to. On the other hand, calling the function outside a runtime is - /// allowed. In this case, `block_in_place` just calls the provided closure - /// normally. - /// - /// Code running behind `block_in_place` cannot be cancelled. When you shut - /// down the executor, it will wait indefinitely for all blocking operations - /// to finish. You can use [`shutdown_timeout`] to stop waiting for them - /// after a certain timeout. Be aware that this will still not cancel the - /// tasks — they are simply allowed to keep running after the method - /// returns. - /// - /// [blocking]: ../index.html#cpu-bound-tasks-and-blocking-code - /// [`spawn_blocking`]: fn@crate::task::spawn_blocking - /// [`join!`]: macro@join - /// [`thread::spawn`]: fn@std::thread::spawn - /// [`shutdown_timeout`]: fn@crate::runtime::Runtime::shutdown_timeout - /// - /// # Examples - /// - /// ``` - /// use tokio::task; - /// - /// # async fn docs() { - /// task::block_in_place(move || { - /// // do some compute-heavy work or call synchronous code - /// }); - /// # } - /// ``` - /// - /// Code running inside `block_in_place` may use `block_on` to reenter the - /// async context. - /// - /// ``` - /// use tokio::task; - /// use tokio::runtime::Handle; - /// - /// # async fn docs() { - /// task::block_in_place(move || { - /// Handle::current().block_on(async move { - /// // do something async - /// }); - /// }); - /// # } - /// ``` - /// - /// # Panics - /// - /// This function panics if called from a [`current_thread`] runtime. - /// - /// [`current_thread`]: fn@crate::runtime::Builder::new_current_thread - #[track_caller] - pub fn block_in_place(f: F) -> R - where - F: FnOnce() -> R, - { - crate::runtime::scheduler::block_in_place(f) - } -} - -cfg_rt! { - /// Runs the provided closure on a thread where blocking is acceptable. - /// - /// In general, issuing a blocking call or performing a lot of compute in a - /// future without yielding is problematic, as it may prevent the executor from - /// driving other futures forward. This function runs the provided closure on a - /// thread dedicated to blocking operations. See the [CPU-bound tasks and - /// blocking code][blocking] section for more information. - /// - /// Tokio will spawn more blocking threads when they are requested through this - /// function until the upper limit configured on the [`Builder`] is reached. - /// After reaching the upper limit, the tasks are put in a queue. - /// The thread limit is very large by default, because `spawn_blocking` is often - /// used for various kinds of IO operations that cannot be performed - /// asynchronously. When you run CPU-bound code using `spawn_blocking`, you - /// should keep this large upper limit in mind. When running many CPU-bound - /// computations, a semaphore or some other synchronization primitive should be - /// used to limit the number of computation executed in parallel. Specialized - /// CPU-bound executors, such as [rayon], may also be a good fit. - /// - /// This function is intended for non-async operations that eventually finish on - /// their own. If you want to spawn an ordinary thread, you should use - /// [`thread::spawn`] instead. - /// - /// Closures spawned using `spawn_blocking` cannot be cancelled abruptly; there - /// is no standard low level API to cause a thread to stop running. However, - /// a useful pattern is to pass some form of "cancellation token" into - /// the thread. This could be an [`AtomicBool`] that the task checks periodically. - /// Another approach is to have the thread primarily read or write from a channel, - /// and to exit when the channel closes; assuming the other side of the channel is dropped - /// when cancellation occurs, this will cause the blocking task thread to exit - /// soon after as well. - /// - /// When you shut down the executor, it will wait indefinitely for all blocking operations to - /// finish. You can use [`shutdown_timeout`] to stop waiting for them after a - /// certain timeout. Be aware that this will still not cancel the tasks — they - /// are simply allowed to keep running after the method returns. It is possible - /// for a blocking task to be cancelled if it has not yet started running, but this - /// is not guaranteed. - /// - /// Note that if you are using the single threaded runtime, this function will - /// still spawn additional threads for blocking operations. The current-thread - /// scheduler's single thread is only used for asynchronous code. - /// - /// # Related APIs and patterns for bridging asynchronous and blocking code - /// - /// In simple cases, it is sufficient to have the closure accept input - /// parameters at creation time and return a single value (or struct/tuple, etc.). - /// - /// For more complex situations in which it is desirable to stream data to or from - /// the synchronous context, the [`mpsc channel`] has `blocking_send` and - /// `blocking_recv` methods for use in non-async code such as the thread created - /// by `spawn_blocking`. - /// - /// Another option is [`SyncIoBridge`] for cases where the synchronous context - /// is operating on byte streams. For example, you might use an asynchronous - /// HTTP client such as [hyper] to fetch data, but perform complex parsing - /// of the payload body using a library written for synchronous I/O. - /// - /// Finally, see also [Bridging with sync code][bridgesync] for discussions - /// around the opposite case of using Tokio as part of a larger synchronous - /// codebase. - /// - /// [`Builder`]: struct@crate::runtime::Builder - /// [blocking]: ../index.html#cpu-bound-tasks-and-blocking-code - /// [rayon]: https://docs.rs/rayon - /// [`mpsc channel`]: crate::sync::mpsc - /// [`SyncIoBridge`]: https://docs.rs/tokio-util/latest/tokio_util/io/struct.SyncIoBridge.html - /// [hyper]: https://docs.rs/hyper - /// [`thread::spawn`]: fn@std::thread::spawn - /// [`shutdown_timeout`]: fn@crate::runtime::Runtime::shutdown_timeout - /// [bridgesync]: https://tokio.rs/tokio/topics/bridging - /// [`AtomicBool`]: struct@std::sync::atomic::AtomicBool - /// - /// # Examples - /// - /// Pass an input value and receive result of computation: - /// - /// ``` - /// use tokio::task; - /// - /// # async fn docs() -> Result<(), Box>{ - /// // Initial input - /// let mut v = "Hello, ".to_string(); - /// let res = task::spawn_blocking(move || { - /// // Stand-in for compute-heavy work or using synchronous APIs - /// v.push_str("world"); - /// // Pass ownership of the value back to the asynchronous context - /// v - /// }).await?; - /// - /// // `res` is the value returned from the thread - /// assert_eq!(res.as_str(), "Hello, world"); - /// # Ok(()) - /// # } - /// ``` - /// - /// Use a channel: - /// - /// ``` - /// use tokio::task; - /// use tokio::sync::mpsc; - /// - /// # async fn docs() { - /// let (tx, mut rx) = mpsc::channel(2); - /// let start = 5; - /// let worker = task::spawn_blocking(move || { - /// for x in 0..10 { - /// // Stand in for complex computation - /// tx.blocking_send(start + x).unwrap(); - /// } - /// }); - /// - /// let mut acc = 0; - /// while let Some(v) = rx.recv().await { - /// acc += v; - /// } - /// assert_eq!(acc, 95); - /// worker.await.unwrap(); - /// # } - /// ``` - #[track_caller] - pub fn spawn_blocking(f: F) -> JoinHandle - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - crate::runtime::spawn_blocking(f) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/task/builder.rs s390-tools-2.33.1/rust-vendor/tokio/src/task/builder.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/task/builder.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/task/builder.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,201 +0,0 @@ -#![allow(unreachable_pub)] -use crate::{ - runtime::Handle, - task::{JoinHandle, LocalSet}, -}; -use std::{future::Future, io}; - -/// Factory which is used to configure the properties of a new task. -/// -/// **Note**: This is an [unstable API][unstable]. The public API of this type -/// may break in 1.x releases. See [the documentation on unstable -/// features][unstable] for details. -/// -/// Methods can be chained in order to configure it. -/// -/// Currently, there is only one configuration option: -/// -/// - [`name`], which specifies an associated name for -/// the task -/// -/// There are three types of task that can be spawned from a Builder: -/// - [`spawn_local`] for executing futures on the current thread -/// - [`spawn`] for executing [`Send`] futures on the runtime -/// - [`spawn_blocking`] for executing blocking code in the -/// blocking thread pool. -/// -/// ## Example -/// -/// ```no_run -/// use tokio::net::{TcpListener, TcpStream}; -/// -/// use std::io; -/// -/// async fn process(socket: TcpStream) { -/// // ... -/// # drop(socket); -/// } -/// -/// #[tokio::main] -/// async fn main() -> io::Result<()> { -/// let listener = TcpListener::bind("127.0.0.1:8080").await?; -/// -/// loop { -/// let (socket, _) = listener.accept().await?; -/// -/// tokio::task::Builder::new() -/// .name("tcp connection handler") -/// .spawn(async move { -/// // Process each socket concurrently. -/// process(socket).await -/// })?; -/// } -/// } -/// ``` -/// [unstable]: crate#unstable-features -/// [`name`]: Builder::name -/// [`spawn_local`]: Builder::spawn_local -/// [`spawn`]: Builder::spawn -/// [`spawn_blocking`]: Builder::spawn_blocking -#[derive(Default, Debug)] -#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] -pub struct Builder<'a> { - name: Option<&'a str>, -} - -impl<'a> Builder<'a> { - /// Creates a new task builder. - pub fn new() -> Self { - Self::default() - } - - /// Assigns a name to the task which will be spawned. - pub fn name(&self, name: &'a str) -> Self { - Self { name: Some(name) } - } - - /// Spawns a task with this builder's settings on the current runtime. - /// - /// # Panics - /// - /// This method panics if called outside of a Tokio runtime. - /// - /// See [`task::spawn`](crate::task::spawn()) for - /// more details. - #[track_caller] - pub fn spawn(self, future: Fut) -> io::Result> - where - Fut: Future + Send + 'static, - Fut::Output: Send + 'static, - { - Ok(super::spawn::spawn_inner(future, self.name)) - } - - /// Spawn a task with this builder's settings on the provided [runtime - /// handle]. - /// - /// See [`Handle::spawn`] for more details. - /// - /// [runtime handle]: crate::runtime::Handle - /// [`Handle::spawn`]: crate::runtime::Handle::spawn - #[track_caller] - pub fn spawn_on(self, future: Fut, handle: &Handle) -> io::Result> - where - Fut: Future + Send + 'static, - Fut::Output: Send + 'static, - { - Ok(handle.spawn_named(future, self.name)) - } - - /// Spawns `!Send` a task on the current [`LocalSet`] with this builder's - /// settings. - /// - /// The spawned future will be run on the same thread that called `spawn_local`. - /// This may only be called from the context of a [local task set][`LocalSet`]. - /// - /// # Panics - /// - /// This function panics if called outside of a [local task set][`LocalSet`]. - /// - /// See [`task::spawn_local`] for more details. - /// - /// [`task::spawn_local`]: crate::task::spawn_local - /// [`LocalSet`]: crate::task::LocalSet - #[track_caller] - pub fn spawn_local(self, future: Fut) -> io::Result> - where - Fut: Future + 'static, - Fut::Output: 'static, - { - Ok(super::local::spawn_local_inner(future, self.name)) - } - - /// Spawns `!Send` a task on the provided [`LocalSet`] with this builder's - /// settings. - /// - /// See [`LocalSet::spawn_local`] for more details. - /// - /// [`LocalSet::spawn_local`]: crate::task::LocalSet::spawn_local - /// [`LocalSet`]: crate::task::LocalSet - #[track_caller] - pub fn spawn_local_on( - self, - future: Fut, - local_set: &LocalSet, - ) -> io::Result> - where - Fut: Future + 'static, - Fut::Output: 'static, - { - Ok(local_set.spawn_named(future, self.name)) - } - - /// Spawns blocking code on the blocking threadpool. - /// - /// # Panics - /// - /// This method panics if called outside of a Tokio runtime. - /// - /// See [`task::spawn_blocking`](crate::task::spawn_blocking) - /// for more details. - #[track_caller] - pub fn spawn_blocking( - self, - function: Function, - ) -> io::Result> - where - Function: FnOnce() -> Output + Send + 'static, - Output: Send + 'static, - { - let handle = Handle::current(); - self.spawn_blocking_on(function, &handle) - } - - /// Spawns blocking code on the provided [runtime handle]'s blocking threadpool. - /// - /// See [`Handle::spawn_blocking`] for more details. - /// - /// [runtime handle]: crate::runtime::Handle - /// [`Handle::spawn_blocking`]: crate::runtime::Handle::spawn_blocking - #[track_caller] - pub fn spawn_blocking_on( - self, - function: Function, - handle: &Handle, - ) -> io::Result> - where - Function: FnOnce() -> Output + Send + 'static, - Output: Send + 'static, - { - use crate::runtime::Mandatory; - let (join_handle, spawn_result) = handle.inner.blocking_spawner().spawn_blocking_inner( - function, - Mandatory::NonMandatory, - self.name, - handle, - ); - - spawn_result?; - Ok(join_handle) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/task/consume_budget.rs s390-tools-2.33.1/rust-vendor/tokio/src/task/consume_budget.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/task/consume_budget.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/task/consume_budget.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,46 +0,0 @@ -use std::task::Poll; - -/// Consumes a unit of budget and returns the execution back to the Tokio -/// runtime *if* the task's coop budget was exhausted. -/// -/// The task will only yield if its entire coop budget has been exhausted. -/// This function can be used in order to insert optional yield points into long -/// computations that do not use Tokio resources like sockets or semaphores, -/// without redundantly yielding to the runtime each time. -/// -/// **Note**: This is an [unstable API][unstable]. The public API of this type -/// may break in 1.x releases. See [the documentation on unstable -/// features][unstable] for details. -/// -/// # Examples -/// -/// Make sure that a function which returns a sum of (potentially lots of) -/// iterated values is cooperative. -/// -/// ``` -/// async fn sum_iterator(input: &mut impl std::iter::Iterator) -> i64 { -/// let mut sum: i64 = 0; -/// while let Some(i) = input.next() { -/// sum += i; -/// tokio::task::consume_budget().await -/// } -/// sum -/// } -/// ``` -/// [unstable]: crate#unstable-features -#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "rt"))))] -pub async fn consume_budget() { - let mut status = Poll::Pending; - - crate::future::poll_fn(move |cx| { - ready!(crate::trace::trace_leaf(cx)); - if status.is_ready() { - return status; - } - status = crate::runtime::coop::poll_proceed(cx).map(|restore| { - restore.made_progress(); - }); - status - }) - .await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/task/join_set.rs s390-tools-2.33.1/rust-vendor/tokio/src/task/join_set.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/task/join_set.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/task/join_set.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,584 +0,0 @@ -//! A collection of tasks spawned on a Tokio runtime. -//! -//! This module provides the [`JoinSet`] type, a collection which stores a set -//! of spawned tasks and allows asynchronously awaiting the output of those -//! tasks as they complete. See the documentation for the [`JoinSet`] type for -//! details. -use std::fmt; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -use crate::runtime::Handle; -#[cfg(tokio_unstable)] -use crate::task::Id; -use crate::task::{AbortHandle, JoinError, JoinHandle, LocalSet}; -use crate::util::IdleNotifiedSet; - -/// A collection of tasks spawned on a Tokio runtime. -/// -/// A `JoinSet` can be used to await the completion of some or all of the tasks -/// in the set. The set is not ordered, and the tasks will be returned in the -/// order they complete. -/// -/// All of the tasks must have the same return type `T`. -/// -/// When the `JoinSet` is dropped, all tasks in the `JoinSet` are immediately aborted. -/// -/// # Examples -/// -/// Spawn multiple tasks and wait for them. -/// -/// ``` -/// use tokio::task::JoinSet; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut set = JoinSet::new(); -/// -/// for i in 0..10 { -/// set.spawn(async move { i }); -/// } -/// -/// let mut seen = [false; 10]; -/// while let Some(res) = set.join_next().await { -/// let idx = res.unwrap(); -/// seen[idx] = true; -/// } -/// -/// for i in 0..10 { -/// assert!(seen[i]); -/// } -/// } -/// ``` -#[cfg_attr(docsrs, doc(cfg(feature = "rt")))] -pub struct JoinSet { - inner: IdleNotifiedSet>, -} - -/// A variant of [`task::Builder`] that spawns tasks on a [`JoinSet`] rather -/// than on the current default runtime. -/// -/// [`task::Builder`]: crate::task::Builder -#[cfg(all(tokio_unstable, feature = "tracing"))] -#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] -#[must_use = "builders do nothing unless used to spawn a task"] -pub struct Builder<'a, T> { - joinset: &'a mut JoinSet, - builder: super::Builder<'a>, -} - -impl JoinSet { - /// Create a new `JoinSet`. - pub fn new() -> Self { - Self { - inner: IdleNotifiedSet::new(), - } - } - - /// Returns the number of tasks currently in the `JoinSet`. - pub fn len(&self) -> usize { - self.inner.len() - } - - /// Returns whether the `JoinSet` is empty. - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } -} - -impl JoinSet { - /// Returns a [`Builder`] that can be used to configure a task prior to - /// spawning it on this `JoinSet`. - /// - /// # Examples - /// - /// ``` - /// use tokio::task::JoinSet; - /// - /// #[tokio::main] - /// async fn main() -> std::io::Result<()> { - /// let mut set = JoinSet::new(); - /// - /// // Use the builder to configure a task's name before spawning it. - /// set.build_task() - /// .name("my_task") - /// .spawn(async { /* ... */ })?; - /// - /// Ok(()) - /// } - /// ``` - #[cfg(all(tokio_unstable, feature = "tracing"))] - #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] - pub fn build_task(&mut self) -> Builder<'_, T> { - Builder { - builder: super::Builder::new(), - joinset: self, - } - } - - /// Spawn the provided task on the `JoinSet`, returning an [`AbortHandle`] - /// that can be used to remotely cancel the task. - /// - /// The provided future will start running in the background immediately - /// when this method is called, even if you don't await anything on this - /// `JoinSet`. - /// - /// # Panics - /// - /// This method panics if called outside of a Tokio runtime. - /// - /// [`AbortHandle`]: crate::task::AbortHandle - #[track_caller] - pub fn spawn(&mut self, task: F) -> AbortHandle - where - F: Future, - F: Send + 'static, - T: Send, - { - self.insert(crate::spawn(task)) - } - - /// Spawn the provided task on the provided runtime and store it in this - /// `JoinSet` returning an [`AbortHandle`] that can be used to remotely - /// cancel the task. - /// - /// The provided future will start running in the background immediately - /// when this method is called, even if you don't await anything on this - /// `JoinSet`. - /// - /// [`AbortHandle`]: crate::task::AbortHandle - #[track_caller] - pub fn spawn_on(&mut self, task: F, handle: &Handle) -> AbortHandle - where - F: Future, - F: Send + 'static, - T: Send, - { - self.insert(handle.spawn(task)) - } - - /// Spawn the provided task on the current [`LocalSet`] and store it in this - /// `JoinSet`, returning an [`AbortHandle`] that can be used to remotely - /// cancel the task. - /// - /// The provided future will start running in the background immediately - /// when this method is called, even if you don't await anything on this - /// `JoinSet`. - /// - /// # Panics - /// - /// This method panics if it is called outside of a `LocalSet`. - /// - /// [`LocalSet`]: crate::task::LocalSet - /// [`AbortHandle`]: crate::task::AbortHandle - #[track_caller] - pub fn spawn_local(&mut self, task: F) -> AbortHandle - where - F: Future, - F: 'static, - { - self.insert(crate::task::spawn_local(task)) - } - - /// Spawn the provided task on the provided [`LocalSet`] and store it in - /// this `JoinSet`, returning an [`AbortHandle`] that can be used to - /// remotely cancel the task. - /// - /// Unlike the [`spawn_local`] method, this method may be used to spawn local - /// tasks on a `LocalSet` that is _not_ currently running. The provided - /// future will start running whenever the `LocalSet` is next started. - /// - /// [`LocalSet`]: crate::task::LocalSet - /// [`AbortHandle`]: crate::task::AbortHandle - /// [`spawn_local`]: Self::spawn_local - #[track_caller] - pub fn spawn_local_on(&mut self, task: F, local_set: &LocalSet) -> AbortHandle - where - F: Future, - F: 'static, - { - self.insert(local_set.spawn_local(task)) - } - - /// Spawn the blocking code on the blocking threadpool and store - /// it in this `JoinSet`, returning an [`AbortHandle`] that can be - /// used to remotely cancel the task. - /// - /// # Examples - /// - /// Spawn multiple blocking tasks and wait for them. - /// - /// ``` - /// use tokio::task::JoinSet; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut set = JoinSet::new(); - /// - /// for i in 0..10 { - /// set.spawn_blocking(move || { i }); - /// } - /// - /// let mut seen = [false; 10]; - /// while let Some(res) = set.join_next().await { - /// let idx = res.unwrap(); - /// seen[idx] = true; - /// } - /// - /// for i in 0..10 { - /// assert!(seen[i]); - /// } - /// } - /// ``` - /// - /// # Panics - /// - /// This method panics if called outside of a Tokio runtime. - /// - /// [`AbortHandle`]: crate::task::AbortHandle - #[track_caller] - pub fn spawn_blocking(&mut self, f: F) -> AbortHandle - where - F: FnOnce() -> T, - F: Send + 'static, - T: Send, - { - self.insert(crate::runtime::spawn_blocking(f)) - } - - /// Spawn the blocking code on the blocking threadpool of the - /// provided runtime and store it in this `JoinSet`, returning an - /// [`AbortHandle`] that can be used to remotely cancel the task. - /// - /// [`AbortHandle`]: crate::task::AbortHandle - #[track_caller] - pub fn spawn_blocking_on(&mut self, f: F, handle: &Handle) -> AbortHandle - where - F: FnOnce() -> T, - F: Send + 'static, - T: Send, - { - self.insert(handle.spawn_blocking(f)) - } - - fn insert(&mut self, jh: JoinHandle) -> AbortHandle { - let abort = jh.abort_handle(); - let mut entry = self.inner.insert_idle(jh); - - // Set the waker that is notified when the task completes. - entry.with_value_and_context(|jh, ctx| jh.set_join_waker(ctx.waker())); - abort - } - - /// Waits until one of the tasks in the set completes and returns its output. - /// - /// Returns `None` if the set is empty. - /// - /// # Cancel Safety - /// - /// This method is cancel safe. If `join_next` is used as the event in a `tokio::select!` - /// statement and some other branch completes first, it is guaranteed that no tasks were - /// removed from this `JoinSet`. - pub async fn join_next(&mut self) -> Option> { - crate::future::poll_fn(|cx| self.poll_join_next(cx)).await - } - - /// Waits until one of the tasks in the set completes and returns its - /// output, along with the [task ID] of the completed task. - /// - /// Returns `None` if the set is empty. - /// - /// When this method returns an error, then the id of the task that failed can be accessed - /// using the [`JoinError::id`] method. - /// - /// # Cancel Safety - /// - /// This method is cancel safe. If `join_next_with_id` is used as the event in a `tokio::select!` - /// statement and some other branch completes first, it is guaranteed that no tasks were - /// removed from this `JoinSet`. - /// - /// [task ID]: crate::task::Id - /// [`JoinError::id`]: fn@crate::task::JoinError::id - #[cfg(tokio_unstable)] - #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] - pub async fn join_next_with_id(&mut self) -> Option> { - crate::future::poll_fn(|cx| self.poll_join_next_with_id(cx)).await - } - - /// Aborts all tasks and waits for them to finish shutting down. - /// - /// Calling this method is equivalent to calling [`abort_all`] and then calling [`join_next`] in - /// a loop until it returns `None`. - /// - /// This method ignores any panics in the tasks shutting down. When this call returns, the - /// `JoinSet` will be empty. - /// - /// [`abort_all`]: fn@Self::abort_all - /// [`join_next`]: fn@Self::join_next - pub async fn shutdown(&mut self) { - self.abort_all(); - while self.join_next().await.is_some() {} - } - - /// Aborts all tasks on this `JoinSet`. - /// - /// This does not remove the tasks from the `JoinSet`. To wait for the tasks to complete - /// cancellation, you should call `join_next` in a loop until the `JoinSet` is empty. - pub fn abort_all(&mut self) { - self.inner.for_each(|jh| jh.abort()); - } - - /// Removes all tasks from this `JoinSet` without aborting them. - /// - /// The tasks removed by this call will continue to run in the background even if the `JoinSet` - /// is dropped. - pub fn detach_all(&mut self) { - self.inner.drain(drop); - } - - /// Polls for one of the tasks in the set to complete. - /// - /// If this returns `Poll::Ready(Some(_))`, then the task that completed is removed from the set. - /// - /// When the method returns `Poll::Pending`, the `Waker` in the provided `Context` is scheduled - /// to receive a wakeup when a task in the `JoinSet` completes. Note that on multiple calls to - /// `poll_join_next`, only the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. - /// - /// # Returns - /// - /// This function returns: - /// - /// * `Poll::Pending` if the `JoinSet` is not empty but there is no task whose output is - /// available right now. - /// * `Poll::Ready(Some(Ok(value)))` if one of the tasks in this `JoinSet` has completed. - /// The `value` is the return value of one of the tasks that completed. - /// * `Poll::Ready(Some(Err(err)))` if one of the tasks in this `JoinSet` has panicked or been - /// aborted. The `err` is the `JoinError` from the panicked/aborted task. - /// * `Poll::Ready(None)` if the `JoinSet` is empty. - /// - /// Note that this method may return `Poll::Pending` even if one of the tasks has completed. - /// This can happen if the [coop budget] is reached. - /// - /// [coop budget]: crate::task#cooperative-scheduling - pub fn poll_join_next(&mut self, cx: &mut Context<'_>) -> Poll>> { - // The call to `pop_notified` moves the entry to the `idle` list. It is moved back to - // the `notified` list if the waker is notified in the `poll` call below. - let mut entry = match self.inner.pop_notified(cx.waker()) { - Some(entry) => entry, - None => { - if self.is_empty() { - return Poll::Ready(None); - } else { - // The waker was set by `pop_notified`. - return Poll::Pending; - } - } - }; - - let res = entry.with_value_and_context(|jh, ctx| Pin::new(jh).poll(ctx)); - - if let Poll::Ready(res) = res { - let _entry = entry.remove(); - Poll::Ready(Some(res)) - } else { - // A JoinHandle generally won't emit a wakeup without being ready unless - // the coop limit has been reached. We yield to the executor in this - // case. - cx.waker().wake_by_ref(); - Poll::Pending - } - } - - /// Polls for one of the tasks in the set to complete. - /// - /// If this returns `Poll::Ready(Some(_))`, then the task that completed is removed from the set. - /// - /// When the method returns `Poll::Pending`, the `Waker` in the provided `Context` is scheduled - /// to receive a wakeup when a task in the `JoinSet` completes. Note that on multiple calls to - /// `poll_join_next`, only the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. - /// - /// # Returns - /// - /// This function returns: - /// - /// * `Poll::Pending` if the `JoinSet` is not empty but there is no task whose output is - /// available right now. - /// * `Poll::Ready(Some(Ok((id, value))))` if one of the tasks in this `JoinSet` has completed. - /// The `value` is the return value of one of the tasks that completed, and - /// `id` is the [task ID] of that task. - /// * `Poll::Ready(Some(Err(err)))` if one of the tasks in this `JoinSet` has panicked or been - /// aborted. The `err` is the `JoinError` from the panicked/aborted task. - /// * `Poll::Ready(None)` if the `JoinSet` is empty. - /// - /// Note that this method may return `Poll::Pending` even if one of the tasks has completed. - /// This can happen if the [coop budget] is reached. - /// - /// [coop budget]: crate::task#cooperative-scheduling - /// [task ID]: crate::task::Id - #[cfg(tokio_unstable)] - #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] - pub fn poll_join_next_with_id( - &mut self, - cx: &mut Context<'_>, - ) -> Poll>> { - // The call to `pop_notified` moves the entry to the `idle` list. It is moved back to - // the `notified` list if the waker is notified in the `poll` call below. - let mut entry = match self.inner.pop_notified(cx.waker()) { - Some(entry) => entry, - None => { - if self.is_empty() { - return Poll::Ready(None); - } else { - // The waker was set by `pop_notified`. - return Poll::Pending; - } - } - }; - - let res = entry.with_value_and_context(|jh, ctx| Pin::new(jh).poll(ctx)); - - if let Poll::Ready(res) = res { - let entry = entry.remove(); - // If the task succeeded, add the task ID to the output. Otherwise, the - // `JoinError` will already have the task's ID. - Poll::Ready(Some(res.map(|output| (entry.id(), output)))) - } else { - // A JoinHandle generally won't emit a wakeup without being ready unless - // the coop limit has been reached. We yield to the executor in this - // case. - cx.waker().wake_by_ref(); - Poll::Pending - } - } -} - -impl Drop for JoinSet { - fn drop(&mut self) { - self.inner.drain(|join_handle| join_handle.abort()); - } -} - -impl fmt::Debug for JoinSet { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("JoinSet").field("len", &self.len()).finish() - } -} - -impl Default for JoinSet { - fn default() -> Self { - Self::new() - } -} - -// === impl Builder === - -#[cfg(all(tokio_unstable, feature = "tracing"))] -#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] -impl<'a, T: 'static> Builder<'a, T> { - /// Assigns a name to the task which will be spawned. - pub fn name(self, name: &'a str) -> Self { - let builder = self.builder.name(name); - Self { builder, ..self } - } - - /// Spawn the provided task with this builder's settings and store it in the - /// [`JoinSet`], returning an [`AbortHandle`] that can be used to remotely - /// cancel the task. - /// - /// # Returns - /// - /// An [`AbortHandle`] that can be used to remotely cancel the task. - /// - /// # Panics - /// - /// This method panics if called outside of a Tokio runtime. - /// - /// [`AbortHandle`]: crate::task::AbortHandle - #[track_caller] - pub fn spawn(self, future: F) -> std::io::Result - where - F: Future, - F: Send + 'static, - T: Send, - { - Ok(self.joinset.insert(self.builder.spawn(future)?)) - } - - /// Spawn the provided task on the provided [runtime handle] with this - /// builder's settings, and store it in the [`JoinSet`]. - /// - /// # Returns - /// - /// An [`AbortHandle`] that can be used to remotely cancel the task. - /// - /// - /// [`AbortHandle`]: crate::task::AbortHandle - /// [runtime handle]: crate::runtime::Handle - #[track_caller] - pub fn spawn_on(self, future: F, handle: &Handle) -> std::io::Result - where - F: Future, - F: Send + 'static, - T: Send, - { - Ok(self.joinset.insert(self.builder.spawn_on(future, handle)?)) - } - - /// Spawn the provided task on the current [`LocalSet`] with this builder's - /// settings, and store it in the [`JoinSet`]. - /// - /// # Returns - /// - /// An [`AbortHandle`] that can be used to remotely cancel the task. - /// - /// # Panics - /// - /// This method panics if it is called outside of a `LocalSet`. - /// - /// [`LocalSet`]: crate::task::LocalSet - /// [`AbortHandle`]: crate::task::AbortHandle - #[track_caller] - pub fn spawn_local(self, future: F) -> std::io::Result - where - F: Future, - F: 'static, - { - Ok(self.joinset.insert(self.builder.spawn_local(future)?)) - } - - /// Spawn the provided task on the provided [`LocalSet`] with this builder's - /// settings, and store it in the [`JoinSet`]. - /// - /// # Returns - /// - /// An [`AbortHandle`] that can be used to remotely cancel the task. - /// - /// [`LocalSet`]: crate::task::LocalSet - /// [`AbortHandle`]: crate::task::AbortHandle - #[track_caller] - pub fn spawn_local_on(self, future: F, local_set: &LocalSet) -> std::io::Result - where - F: Future, - F: 'static, - { - Ok(self - .joinset - .insert(self.builder.spawn_local_on(future, local_set)?)) - } -} - -// Manual `Debug` impl so that `Builder` is `Debug` regardless of whether `T` is -// `Debug`. -#[cfg(all(tokio_unstable, feature = "tracing"))] -#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "tracing"))))] -impl<'a, T> fmt::Debug for Builder<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("join_set::Builder") - .field("joinset", &self.joinset) - .field("builder", &self.builder) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/task/local.rs s390-tools-2.33.1/rust-vendor/tokio/src/task/local.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/task/local.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/task/local.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1202 +0,0 @@ -//! Runs `!Send` futures on the current thread. -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::{Arc, Mutex}; -#[cfg(tokio_unstable)] -use crate::runtime; -use crate::runtime::task::{self, JoinHandle, LocalOwnedTasks, Task}; -use crate::runtime::{context, ThreadId}; -use crate::sync::AtomicWaker; -use crate::util::RcCell; - -use std::cell::Cell; -use std::collections::VecDeque; -use std::fmt; -use std::future::Future; -use std::marker::PhantomData; -use std::pin::Pin; -use std::rc::Rc; -use std::task::Poll; - -use pin_project_lite::pin_project; - -cfg_rt! { - /// A set of tasks which are executed on the same thread. - /// - /// In some cases, it is necessary to run one or more futures that do not - /// implement [`Send`] and thus are unsafe to send between threads. In these - /// cases, a [local task set] may be used to schedule one or more `!Send` - /// futures to run together on the same thread. - /// - /// For example, the following code will not compile: - /// - /// ```rust,compile_fail - /// use std::rc::Rc; - /// - /// #[tokio::main] - /// async fn main() { - /// // `Rc` does not implement `Send`, and thus may not be sent between - /// // threads safely. - /// let nonsend_data = Rc::new("my nonsend data..."); - /// - /// let nonsend_data = nonsend_data.clone(); - /// // Because the `async` block here moves `nonsend_data`, the future is `!Send`. - /// // Since `tokio::spawn` requires the spawned future to implement `Send`, this - /// // will not compile. - /// tokio::spawn(async move { - /// println!("{}", nonsend_data); - /// // ... - /// }).await.unwrap(); - /// } - /// ``` - /// - /// # Use with `run_until` - /// - /// To spawn `!Send` futures, we can use a local task set to schedule them - /// on the thread calling [`Runtime::block_on`]. When running inside of the - /// local task set, we can use [`task::spawn_local`], which can spawn - /// `!Send` futures. For example: - /// - /// ```rust - /// use std::rc::Rc; - /// use tokio::task; - /// - /// #[tokio::main] - /// async fn main() { - /// let nonsend_data = Rc::new("my nonsend data..."); - /// - /// // Construct a local task set that can run `!Send` futures. - /// let local = task::LocalSet::new(); - /// - /// // Run the local task set. - /// local.run_until(async move { - /// let nonsend_data = nonsend_data.clone(); - /// // `spawn_local` ensures that the future is spawned on the local - /// // task set. - /// task::spawn_local(async move { - /// println!("{}", nonsend_data); - /// // ... - /// }).await.unwrap(); - /// }).await; - /// } - /// ``` - /// **Note:** The `run_until` method can only be used in `#[tokio::main]`, - /// `#[tokio::test]` or directly inside a call to [`Runtime::block_on`]. It - /// cannot be used inside a task spawned with `tokio::spawn`. - /// - /// ## Awaiting a `LocalSet` - /// - /// Additionally, a `LocalSet` itself implements `Future`, completing when - /// *all* tasks spawned on the `LocalSet` complete. This can be used to run - /// several futures on a `LocalSet` and drive the whole set until they - /// complete. For example, - /// - /// ```rust - /// use tokio::{task, time}; - /// use std::rc::Rc; - /// - /// #[tokio::main] - /// async fn main() { - /// let nonsend_data = Rc::new("world"); - /// let local = task::LocalSet::new(); - /// - /// let nonsend_data2 = nonsend_data.clone(); - /// local.spawn_local(async move { - /// // ... - /// println!("hello {}", nonsend_data2) - /// }); - /// - /// local.spawn_local(async move { - /// time::sleep(time::Duration::from_millis(100)).await; - /// println!("goodbye {}", nonsend_data) - /// }); - /// - /// // ... - /// - /// local.await; - /// } - /// ``` - /// **Note:** Awaiting a `LocalSet` can only be done inside - /// `#[tokio::main]`, `#[tokio::test]` or directly inside a call to - /// [`Runtime::block_on`]. It cannot be used inside a task spawned with - /// `tokio::spawn`. - /// - /// ## Use inside `tokio::spawn` - /// - /// The two methods mentioned above cannot be used inside `tokio::spawn`, so - /// to spawn `!Send` futures from inside `tokio::spawn`, we need to do - /// something else. The solution is to create the `LocalSet` somewhere else, - /// and communicate with it using an [`mpsc`] channel. - /// - /// The following example puts the `LocalSet` inside a new thread. - /// ``` - /// use tokio::runtime::Builder; - /// use tokio::sync::{mpsc, oneshot}; - /// use tokio::task::LocalSet; - /// - /// // This struct describes the task you want to spawn. Here we include - /// // some simple examples. The oneshot channel allows sending a response - /// // to the spawner. - /// #[derive(Debug)] - /// enum Task { - /// PrintNumber(u32), - /// AddOne(u32, oneshot::Sender), - /// } - /// - /// #[derive(Clone)] - /// struct LocalSpawner { - /// send: mpsc::UnboundedSender, - /// } - /// - /// impl LocalSpawner { - /// pub fn new() -> Self { - /// let (send, mut recv) = mpsc::unbounded_channel(); - /// - /// let rt = Builder::new_current_thread() - /// .enable_all() - /// .build() - /// .unwrap(); - /// - /// std::thread::spawn(move || { - /// let local = LocalSet::new(); - /// - /// local.spawn_local(async move { - /// while let Some(new_task) = recv.recv().await { - /// tokio::task::spawn_local(run_task(new_task)); - /// } - /// // If the while loop returns, then all the LocalSpawner - /// // objects have been dropped. - /// }); - /// - /// // This will return once all senders are dropped and all - /// // spawned tasks have returned. - /// rt.block_on(local); - /// }); - /// - /// Self { - /// send, - /// } - /// } - /// - /// pub fn spawn(&self, task: Task) { - /// self.send.send(task).expect("Thread with LocalSet has shut down."); - /// } - /// } - /// - /// // This task may do !Send stuff. We use printing a number as an example, - /// // but it could be anything. - /// // - /// // The Task struct is an enum to support spawning many different kinds - /// // of operations. - /// async fn run_task(task: Task) { - /// match task { - /// Task::PrintNumber(n) => { - /// println!("{}", n); - /// }, - /// Task::AddOne(n, response) => { - /// // We ignore failures to send the response. - /// let _ = response.send(n + 1); - /// }, - /// } - /// } - /// - /// #[tokio::main] - /// async fn main() { - /// let spawner = LocalSpawner::new(); - /// - /// let (send, response) = oneshot::channel(); - /// spawner.spawn(Task::AddOne(10, send)); - /// let eleven = response.await.unwrap(); - /// assert_eq!(eleven, 11); - /// } - /// ``` - /// - /// [`Send`]: trait@std::marker::Send - /// [local task set]: struct@LocalSet - /// [`Runtime::block_on`]: method@crate::runtime::Runtime::block_on - /// [`task::spawn_local`]: fn@spawn_local - /// [`mpsc`]: mod@crate::sync::mpsc - pub struct LocalSet { - /// Current scheduler tick. - tick: Cell, - - /// State available from thread-local. - context: Rc, - - /// This type should not be Send. - _not_send: PhantomData<*const ()>, - } -} - -/// State available from the thread-local. -struct Context { - /// State shared between threads. - shared: Arc, - - /// True if a task panicked without being handled and the local set is - /// configured to shutdown on unhandled panic. - unhandled_panic: Cell, -} - -/// LocalSet state shared between threads. -struct Shared { - /// # Safety - /// - /// This field must *only* be accessed from the thread that owns the - /// `LocalSet` (i.e., `Thread::current().id() == owner`). - local_state: LocalState, - - /// Remote run queue sender. - queue: Mutex>>>>, - - /// Wake the `LocalSet` task. - waker: AtomicWaker, - - /// How to respond to unhandled task panics. - #[cfg(tokio_unstable)] - pub(crate) unhandled_panic: crate::runtime::UnhandledPanic, -} - -/// Tracks the `LocalSet` state that must only be accessed from the thread that -/// created the `LocalSet`. -struct LocalState { - /// The `ThreadId` of the thread that owns the `LocalSet`. - owner: ThreadId, - - /// Local run queue sender and receiver. - local_queue: UnsafeCell>>>, - - /// Collection of all active tasks spawned onto this executor. - owned: LocalOwnedTasks>, -} - -pin_project! { - #[derive(Debug)] - struct RunUntil<'a, F> { - local_set: &'a LocalSet, - #[pin] - future: F, - } -} - -tokio_thread_local!(static CURRENT: LocalData = const { LocalData { - ctx: RcCell::new(), -} }); - -struct LocalData { - ctx: RcCell, -} - -cfg_rt! { - /// Spawns a `!Send` future on the current [`LocalSet`]. - /// - /// The spawned future will run on the same thread that called `spawn_local`. - /// - /// The provided future will start running in the background immediately - /// when `spawn_local` is called, even if you don't await the returned - /// `JoinHandle`. - /// - /// # Panics - /// - /// This function panics if called outside of a [`LocalSet`]. - /// - /// Note that if [`tokio::spawn`] is used from within a `LocalSet`, the - /// resulting new task will _not_ be inside the `LocalSet`, so you must use - /// `spawn_local` if you want to stay within the `LocalSet`. - /// - /// # Examples - /// - /// ```rust - /// use std::rc::Rc; - /// use tokio::task; - /// - /// #[tokio::main] - /// async fn main() { - /// let nonsend_data = Rc::new("my nonsend data..."); - /// - /// let local = task::LocalSet::new(); - /// - /// // Run the local task set. - /// local.run_until(async move { - /// let nonsend_data = nonsend_data.clone(); - /// task::spawn_local(async move { - /// println!("{}", nonsend_data); - /// // ... - /// }).await.unwrap(); - /// }).await; - /// } - /// ``` - /// - /// [`LocalSet`]: struct@crate::task::LocalSet - /// [`tokio::spawn`]: fn@crate::task::spawn - #[track_caller] - pub fn spawn_local(future: F) -> JoinHandle - where - F: Future + 'static, - F::Output: 'static, - { - spawn_local_inner(future, None) - } - - - #[track_caller] - pub(super) fn spawn_local_inner(future: F, name: Option<&str>) -> JoinHandle - where F: Future + 'static, - F::Output: 'static - { - match CURRENT.with(|LocalData { ctx, .. }| ctx.get()) { - None => panic!("`spawn_local` called from outside of a `task::LocalSet`"), - Some(cx) => cx.spawn(future, name) - } - } -} - -/// Initial queue capacity. -const INITIAL_CAPACITY: usize = 64; - -/// Max number of tasks to poll per tick. -const MAX_TASKS_PER_TICK: usize = 61; - -/// How often it check the remote queue first. -const REMOTE_FIRST_INTERVAL: u8 = 31; - -/// Context guard for LocalSet -pub struct LocalEnterGuard(Option>); - -impl Drop for LocalEnterGuard { - fn drop(&mut self) { - CURRENT.with(|LocalData { ctx, .. }| { - ctx.set(self.0.take()); - }) - } -} - -impl fmt::Debug for LocalEnterGuard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("LocalEnterGuard").finish() - } -} - -impl LocalSet { - /// Returns a new local task set. - pub fn new() -> LocalSet { - let owner = context::thread_id().expect("cannot create LocalSet during thread shutdown"); - - LocalSet { - tick: Cell::new(0), - context: Rc::new(Context { - shared: Arc::new(Shared { - local_state: LocalState { - owner, - owned: LocalOwnedTasks::new(), - local_queue: UnsafeCell::new(VecDeque::with_capacity(INITIAL_CAPACITY)), - }, - queue: Mutex::new(Some(VecDeque::with_capacity(INITIAL_CAPACITY))), - waker: AtomicWaker::new(), - #[cfg(tokio_unstable)] - unhandled_panic: crate::runtime::UnhandledPanic::Ignore, - }), - unhandled_panic: Cell::new(false), - }), - _not_send: PhantomData, - } - } - - /// Enters the context of this `LocalSet`. - /// - /// The [`spawn_local`] method will spawn tasks on the `LocalSet` whose - /// context you are inside. - /// - /// [`spawn_local`]: fn@crate::task::spawn_local - pub fn enter(&self) -> LocalEnterGuard { - CURRENT.with(|LocalData { ctx, .. }| { - let old = ctx.replace(Some(self.context.clone())); - LocalEnterGuard(old) - }) - } - - /// Spawns a `!Send` task onto the local task set. - /// - /// This task is guaranteed to be run on the current thread. - /// - /// Unlike the free function [`spawn_local`], this method may be used to - /// spawn local tasks when the `LocalSet` is _not_ running. The provided - /// future will start running once the `LocalSet` is next started, even if - /// you don't await the returned `JoinHandle`. - /// - /// # Examples - /// - /// ```rust - /// use tokio::task; - /// - /// #[tokio::main] - /// async fn main() { - /// let local = task::LocalSet::new(); - /// - /// // Spawn a future on the local set. This future will be run when - /// // we call `run_until` to drive the task set. - /// local.spawn_local(async { - /// // ... - /// }); - /// - /// // Run the local task set. - /// local.run_until(async move { - /// // ... - /// }).await; - /// - /// // When `run` finishes, we can spawn _more_ futures, which will - /// // run in subsequent calls to `run_until`. - /// local.spawn_local(async { - /// // ... - /// }); - /// - /// local.run_until(async move { - /// // ... - /// }).await; - /// } - /// ``` - /// [`spawn_local`]: fn@spawn_local - #[track_caller] - pub fn spawn_local(&self, future: F) -> JoinHandle - where - F: Future + 'static, - F::Output: 'static, - { - self.spawn_named(future, None) - } - - /// Runs a future to completion on the provided runtime, driving any local - /// futures spawned on this task set on the current thread. - /// - /// This runs the given future on the runtime, blocking until it is - /// complete, and yielding its resolved result. Any tasks or timers which - /// the future spawns internally will be executed on the runtime. The future - /// may also call [`spawn_local`] to spawn_local additional local futures on the - /// current thread. - /// - /// This method should not be called from an asynchronous context. - /// - /// # Panics - /// - /// This function panics if the executor is at capacity, if the provided - /// future panics, or if called within an asynchronous execution context. - /// - /// # Notes - /// - /// Since this function internally calls [`Runtime::block_on`], and drives - /// futures in the local task set inside that call to `block_on`, the local - /// futures may not use [in-place blocking]. If a blocking call needs to be - /// issued from a local task, the [`spawn_blocking`] API may be used instead. - /// - /// For example, this will panic: - /// ```should_panic - /// use tokio::runtime::Runtime; - /// use tokio::task; - /// - /// let rt = Runtime::new().unwrap(); - /// let local = task::LocalSet::new(); - /// local.block_on(&rt, async { - /// let join = task::spawn_local(async { - /// let blocking_result = task::block_in_place(|| { - /// // ... - /// }); - /// // ... - /// }); - /// join.await.unwrap(); - /// }) - /// ``` - /// This, however, will not panic: - /// ``` - /// use tokio::runtime::Runtime; - /// use tokio::task; - /// - /// let rt = Runtime::new().unwrap(); - /// let local = task::LocalSet::new(); - /// local.block_on(&rt, async { - /// let join = task::spawn_local(async { - /// let blocking_result = task::spawn_blocking(|| { - /// // ... - /// }).await; - /// // ... - /// }); - /// join.await.unwrap(); - /// }) - /// ``` - /// - /// [`spawn_local`]: fn@spawn_local - /// [`Runtime::block_on`]: method@crate::runtime::Runtime::block_on - /// [in-place blocking]: fn@crate::task::block_in_place - /// [`spawn_blocking`]: fn@crate::task::spawn_blocking - #[track_caller] - #[cfg(feature = "rt")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] - pub fn block_on(&self, rt: &crate::runtime::Runtime, future: F) -> F::Output - where - F: Future, - { - rt.block_on(self.run_until(future)) - } - - /// Runs a future to completion on the local set, returning its output. - /// - /// This returns a future that runs the given future with a local set, - /// allowing it to call [`spawn_local`] to spawn additional `!Send` futures. - /// Any local futures spawned on the local set will be driven in the - /// background until the future passed to `run_until` completes. When the future - /// passed to `run` finishes, any local futures which have not completed - /// will remain on the local set, and will be driven on subsequent calls to - /// `run_until` or when [awaiting the local set] itself. - /// - /// # Examples - /// - /// ```rust - /// use tokio::task; - /// - /// #[tokio::main] - /// async fn main() { - /// task::LocalSet::new().run_until(async { - /// task::spawn_local(async move { - /// // ... - /// }).await.unwrap(); - /// // ... - /// }).await; - /// } - /// ``` - /// - /// [`spawn_local`]: fn@spawn_local - /// [awaiting the local set]: #awaiting-a-localset - pub async fn run_until(&self, future: F) -> F::Output - where - F: Future, - { - let run_until = RunUntil { - future, - local_set: self, - }; - run_until.await - } - - #[track_caller] - pub(in crate::task) fn spawn_named( - &self, - future: F, - name: Option<&str>, - ) -> JoinHandle - where - F: Future + 'static, - F::Output: 'static, - { - let handle = self.context.spawn(future, name); - - // Because a task was spawned from *outside* the `LocalSet`, wake the - // `LocalSet` future to execute the new task, if it hasn't been woken. - // - // Spawning via the free fn `spawn` does not require this, as it can - // only be called from *within* a future executing on the `LocalSet` — - // in that case, the `LocalSet` must already be awake. - self.context.shared.waker.wake(); - handle - } - - /// Ticks the scheduler, returning whether the local future needs to be - /// notified again. - fn tick(&self) -> bool { - for _ in 0..MAX_TASKS_PER_TICK { - // Make sure we didn't hit an unhandled panic - if self.context.unhandled_panic.get() { - panic!("a spawned task panicked and the LocalSet is configured to shutdown on unhandled panic"); - } - - match self.next_task() { - // Run the task - // - // Safety: As spawned tasks are `!Send`, `run_unchecked` must be - // used. We are responsible for maintaining the invariant that - // `run_unchecked` is only called on threads that spawned the - // task initially. Because `LocalSet` itself is `!Send`, and - // `spawn_local` spawns into the `LocalSet` on the current - // thread, the invariant is maintained. - Some(task) => crate::runtime::coop::budget(|| task.run()), - // We have fully drained the queue of notified tasks, so the - // local future doesn't need to be notified again — it can wait - // until something else wakes a task in the local set. - None => return false, - } - } - - true - } - - fn next_task(&self) -> Option>> { - let tick = self.tick.get(); - self.tick.set(tick.wrapping_add(1)); - - let task = if tick % REMOTE_FIRST_INTERVAL == 0 { - self.context - .shared - .queue - .lock() - .as_mut() - .and_then(|queue| queue.pop_front()) - .or_else(|| self.pop_local()) - } else { - self.pop_local().or_else(|| { - self.context - .shared - .queue - .lock() - .as_mut() - .and_then(|queue| queue.pop_front()) - }) - }; - - task.map(|task| unsafe { - // Safety: because the `LocalSet` itself is `!Send`, we know we are - // on the same thread if we have access to the `LocalSet`, and can - // therefore access the local run queue. - self.context.shared.local_state.assert_owner(task) - }) - } - - fn pop_local(&self) -> Option>> { - unsafe { - // Safety: because the `LocalSet` itself is `!Send`, we know we are - // on the same thread if we have access to the `LocalSet`, and can - // therefore access the local run queue. - self.context.shared.local_state.task_pop_front() - } - } - - fn with(&self, f: impl FnOnce() -> T) -> T { - CURRENT.with(|LocalData { ctx, .. }| { - struct Reset<'a> { - ctx_ref: &'a RcCell, - val: Option>, - } - impl<'a> Drop for Reset<'a> { - fn drop(&mut self) { - self.ctx_ref.set(self.val.take()); - } - } - let old = ctx.replace(Some(self.context.clone())); - - let _reset = Reset { - ctx_ref: ctx, - val: old, - }; - - f() - }) - } - - /// This method is like `with`, but it just calls `f` without setting the thread-local if that - /// fails. - fn with_if_possible(&self, f: impl FnOnce() -> T) -> T { - let mut f = Some(f); - - let res = CURRENT.try_with(|LocalData { ctx, .. }| { - struct Reset<'a> { - ctx_ref: &'a RcCell, - val: Option>, - } - impl<'a> Drop for Reset<'a> { - fn drop(&mut self) { - self.ctx_ref.replace(self.val.take()); - } - } - let old = ctx.replace(Some(self.context.clone())); - - let _reset = Reset { - ctx_ref: ctx, - val: old, - }; - - (f.take().unwrap())() - }); - - match res { - Ok(res) => res, - Err(_access_error) => (f.take().unwrap())(), - } - } -} - -cfg_unstable! { - impl LocalSet { - /// Configure how the `LocalSet` responds to an unhandled panic on a - /// spawned task. - /// - /// By default, an unhandled panic (i.e. a panic not caught by - /// [`std::panic::catch_unwind`]) has no impact on the `LocalSet`'s - /// execution. The panic is error value is forwarded to the task's - /// [`JoinHandle`] and all other spawned tasks continue running. - /// - /// The `unhandled_panic` option enables configuring this behavior. - /// - /// * `UnhandledPanic::Ignore` is the default behavior. Panics on - /// spawned tasks have no impact on the `LocalSet`'s execution. - /// * `UnhandledPanic::ShutdownRuntime` will force the `LocalSet` to - /// shutdown immediately when a spawned task panics even if that - /// task's `JoinHandle` has not been dropped. All other spawned tasks - /// will immediately terminate and further calls to - /// [`LocalSet::block_on`] and [`LocalSet::run_until`] will panic. - /// - /// # Panics - /// - /// This method panics if called after the `LocalSet` has started - /// running. - /// - /// # Unstable - /// - /// This option is currently unstable and its implementation is - /// incomplete. The API may change or be removed in the future. See - /// tokio-rs/tokio#4516 for more details. - /// - /// # Examples - /// - /// The following demonstrates a `LocalSet` configured to shutdown on - /// panic. The first spawned task panics and results in the `LocalSet` - /// shutting down. The second spawned task never has a chance to - /// execute. The call to `run_until` will panic due to the runtime being - /// forcibly shutdown. - /// - /// ```should_panic - /// use tokio::runtime::UnhandledPanic; - /// - /// # #[tokio::main] - /// # async fn main() { - /// tokio::task::LocalSet::new() - /// .unhandled_panic(UnhandledPanic::ShutdownRuntime) - /// .run_until(async { - /// tokio::task::spawn_local(async { panic!("boom"); }); - /// tokio::task::spawn_local(async { - /// // This task never completes - /// }); - /// - /// // Do some work, but `run_until` will panic before it completes - /// # loop { tokio::task::yield_now().await; } - /// }) - /// .await; - /// # } - /// ``` - /// - /// [`JoinHandle`]: struct@crate::task::JoinHandle - pub fn unhandled_panic(&mut self, behavior: crate::runtime::UnhandledPanic) -> &mut Self { - // TODO: This should be set as a builder - Rc::get_mut(&mut self.context) - .and_then(|ctx| Arc::get_mut(&mut ctx.shared)) - .expect("Unhandled Panic behavior modified after starting LocalSet") - .unhandled_panic = behavior; - self - } - - /// Returns the [`Id`] of the current `LocalSet` runtime. - /// - /// # Examples - /// - /// ```rust - /// use tokio::task; - /// - /// #[tokio::main] - /// async fn main() { - /// let local_set = task::LocalSet::new(); - /// println!("Local set id: {}", local_set.id()); - /// } - /// ``` - /// - /// **Note**: This is an [unstable API][unstable]. The public API of this type - /// may break in 1.x releases. See [the documentation on unstable - /// features][unstable] for details. - /// - /// [unstable]: crate#unstable-features - /// [`Id`]: struct@crate::runtime::Id - pub fn id(&self) -> runtime::Id { - self.context.shared.local_state.owned.id.into() - } - } -} - -impl fmt::Debug for LocalSet { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("LocalSet").finish() - } -} - -impl Future for LocalSet { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - // Register the waker before starting to work - self.context.shared.waker.register_by_ref(cx.waker()); - - if self.with(|| self.tick()) { - // If `tick` returns true, we need to notify the local future again: - // there are still tasks remaining in the run queue. - cx.waker().wake_by_ref(); - Poll::Pending - - // Safety: called from the thread that owns `LocalSet`. Because - // `LocalSet` is `!Send`, this is safe. - } else if unsafe { self.context.shared.local_state.owned_is_empty() } { - // If the scheduler has no remaining futures, we're done! - Poll::Ready(()) - } else { - // There are still futures in the local set, but we've polled all the - // futures in the run queue. Therefore, we can just return Pending - // since the remaining futures will be woken from somewhere else. - Poll::Pending - } - } -} - -impl Default for LocalSet { - fn default() -> LocalSet { - LocalSet::new() - } -} - -impl Drop for LocalSet { - fn drop(&mut self) { - self.with_if_possible(|| { - // Shut down all tasks in the LocalOwnedTasks and close it to - // prevent new tasks from ever being added. - unsafe { - // Safety: called from the thread that owns `LocalSet` - self.context.shared.local_state.close_and_shutdown_all(); - } - - // We already called shutdown on all tasks above, so there is no - // need to call shutdown. - - // Safety: note that this *intentionally* bypasses the unsafe - // `Shared::local_queue()` method. This is in order to avoid the - // debug assertion that we are on the thread that owns the - // `LocalSet`, because on some systems (e.g. at least some macOS - // versions), attempting to get the current thread ID can panic due - // to the thread's local data that stores the thread ID being - // dropped *before* the `LocalSet`. - // - // Despite avoiding the assertion here, it is safe for us to access - // the local queue in `Drop`, because the `LocalSet` itself is - // `!Send`, so we can reasonably guarantee that it will not be - // `Drop`ped from another thread. - let local_queue = unsafe { - // Safety: called from the thread that owns `LocalSet` - self.context.shared.local_state.take_local_queue() - }; - for task in local_queue { - drop(task); - } - - // Take the queue from the Shared object to prevent pushing - // notifications to it in the future. - let queue = self.context.shared.queue.lock().take().unwrap(); - for task in queue { - drop(task); - } - - // Safety: called from the thread that owns `LocalSet` - assert!(unsafe { self.context.shared.local_state.owned_is_empty() }); - }); - } -} - -// === impl Context === - -impl Context { - #[track_caller] - fn spawn(&self, future: F, name: Option<&str>) -> JoinHandle - where - F: Future + 'static, - F::Output: 'static, - { - let id = crate::runtime::task::Id::next(); - let future = crate::util::trace::task(future, "local", name, id.as_u64()); - - // Safety: called from the thread that owns the `LocalSet` - let (handle, notified) = { - self.shared.local_state.assert_called_from_owner_thread(); - self.shared - .local_state - .owned - .bind(future, self.shared.clone(), id) - }; - - if let Some(notified) = notified { - self.shared.schedule(notified); - } - - handle - } -} - -// === impl LocalFuture === - -impl Future for RunUntil<'_, T> { - type Output = T::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - let me = self.project(); - - me.local_set.with(|| { - me.local_set - .context - .shared - .waker - .register_by_ref(cx.waker()); - - let _no_blocking = crate::runtime::context::disallow_block_in_place(); - let f = me.future; - - if let Poll::Ready(output) = f.poll(cx) { - return Poll::Ready(output); - } - - if me.local_set.tick() { - // If `tick` returns `true`, we need to notify the local future again: - // there are still tasks remaining in the run queue. - cx.waker().wake_by_ref(); - } - - Poll::Pending - }) - } -} - -impl Shared { - /// Schedule the provided task on the scheduler. - fn schedule(&self, task: task::Notified>) { - CURRENT.with(|localdata| { - match localdata.ctx.get() { - Some(cx) if cx.shared.ptr_eq(self) => unsafe { - // Safety: if the current `LocalSet` context points to this - // `LocalSet`, then we are on the thread that owns it. - cx.shared.local_state.task_push_back(task); - }, - - // We are on the thread that owns the `LocalSet`, so we can - // wake to the local queue. - _ if context::thread_id().ok() == Some(self.local_state.owner) => { - unsafe { - // Safety: we just checked that the thread ID matches - // the localset's owner, so this is safe. - self.local_state.task_push_back(task); - } - // We still have to wake the `LocalSet`, because it isn't - // currently being polled. - self.waker.wake(); - } - - // We are *not* on the thread that owns the `LocalSet`, so we - // have to wake to the remote queue. - _ => { - // First, check whether the queue is still there (if not, the - // LocalSet is dropped). Then push to it if so, and if not, - // do nothing. - let mut lock = self.queue.lock(); - - if let Some(queue) = lock.as_mut() { - queue.push_back(task); - drop(lock); - self.waker.wake(); - } - } - } - }); - } - - fn ptr_eq(&self, other: &Shared) -> bool { - std::ptr::eq(self, other) - } -} - -// This is safe because (and only because) we *pinky pwomise* to never touch the -// local run queue except from the thread that owns the `LocalSet`. -unsafe impl Sync for Shared {} - -impl task::Schedule for Arc { - fn release(&self, task: &Task) -> Option> { - // Safety, this is always called from the thread that owns `LocalSet` - unsafe { self.local_state.task_remove(task) } - } - - fn schedule(&self, task: task::Notified) { - Shared::schedule(self, task); - } - - cfg_unstable! { - fn unhandled_panic(&self) { - use crate::runtime::UnhandledPanic; - - match self.unhandled_panic { - UnhandledPanic::Ignore => { - // Do nothing - } - UnhandledPanic::ShutdownRuntime => { - // This hook is only called from within the runtime, so - // `CURRENT` should match with `&self`, i.e. there is no - // opportunity for a nested scheduler to be called. - CURRENT.with(|LocalData { ctx, .. }| match ctx.get() { - Some(cx) if Arc::ptr_eq(self, &cx.shared) => { - cx.unhandled_panic.set(true); - // Safety: this is always called from the thread that owns `LocalSet` - unsafe { cx.shared.local_state.close_and_shutdown_all(); } - } - _ => unreachable!("runtime core not set in CURRENT thread-local"), - }) - } - } - } - } -} - -impl LocalState { - unsafe fn task_pop_front(&self) -> Option>> { - // The caller ensures it is called from the same thread that owns - // the LocalSet. - self.assert_called_from_owner_thread(); - - self.local_queue.with_mut(|ptr| (*ptr).pop_front()) - } - - unsafe fn task_push_back(&self, task: task::Notified>) { - // The caller ensures it is called from the same thread that owns - // the LocalSet. - self.assert_called_from_owner_thread(); - - self.local_queue.with_mut(|ptr| (*ptr).push_back(task)) - } - - unsafe fn take_local_queue(&self) -> VecDeque>> { - // The caller ensures it is called from the same thread that owns - // the LocalSet. - self.assert_called_from_owner_thread(); - - self.local_queue.with_mut(|ptr| std::mem::take(&mut (*ptr))) - } - - unsafe fn task_remove(&self, task: &Task>) -> Option>> { - // The caller ensures it is called from the same thread that owns - // the LocalSet. - self.assert_called_from_owner_thread(); - - self.owned.remove(task) - } - - /// Returns true if the `LocalSet` does not have any spawned tasks - unsafe fn owned_is_empty(&self) -> bool { - // The caller ensures it is called from the same thread that owns - // the LocalSet. - self.assert_called_from_owner_thread(); - - self.owned.is_empty() - } - - unsafe fn assert_owner( - &self, - task: task::Notified>, - ) -> task::LocalNotified> { - // The caller ensures it is called from the same thread that owns - // the LocalSet. - self.assert_called_from_owner_thread(); - - self.owned.assert_owner(task) - } - - unsafe fn close_and_shutdown_all(&self) { - // The caller ensures it is called from the same thread that owns - // the LocalSet. - self.assert_called_from_owner_thread(); - - self.owned.close_and_shutdown_all() - } - - #[track_caller] - fn assert_called_from_owner_thread(&self) { - // FreeBSD has some weirdness around thread-local destruction. - // TODO: remove this hack when thread id is cleaned up - #[cfg(not(any(target_os = "openbsd", target_os = "freebsd")))] - debug_assert!( - // if we couldn't get the thread ID because we're dropping the local - // data, skip the assertion --- the `Drop` impl is not going to be - // called from another thread, because `LocalSet` is `!Send` - context::thread_id() - .map(|id| id == self.owner) - .unwrap_or(true), - "`LocalSet`'s local run queue must not be accessed by another thread!" - ); - } -} - -// This is `Send` because it is stored in `Shared`. It is up to the caller to -// ensure they are on the same thread that owns the `LocalSet`. -unsafe impl Send for LocalState {} - -#[cfg(all(test, not(loom)))] -mod tests { - use super::*; - - // Does a `LocalSet` running on a current-thread runtime...basically work? - // - // This duplicates a test in `tests/task_local_set.rs`, but because this is - // a lib test, it wil run under Miri, so this is necessary to catch stacked - // borrows violations in the `LocalSet` implementation. - #[test] - fn local_current_thread_scheduler() { - let f = async { - LocalSet::new() - .run_until(async { - spawn_local(async {}).await.unwrap(); - }) - .await; - }; - crate::runtime::Builder::new_current_thread() - .build() - .expect("rt") - .block_on(f) - } - - // Tests that when a task on a `LocalSet` is woken by an io driver on the - // same thread, the task is woken to the localset's local queue rather than - // its remote queue. - // - // This test has to be defined in the `local.rs` file as a lib test, rather - // than in `tests/`, because it makes assertions about the local set's - // internal state. - #[test] - fn wakes_to_local_queue() { - use super::*; - use crate::sync::Notify; - let rt = crate::runtime::Builder::new_current_thread() - .build() - .expect("rt"); - rt.block_on(async { - let local = LocalSet::new(); - let notify = Arc::new(Notify::new()); - let task = local.spawn_local({ - let notify = notify.clone(); - async move { - notify.notified().await; - } - }); - let mut run_until = Box::pin(local.run_until(async move { - task.await.unwrap(); - })); - - // poll the run until future once - crate::future::poll_fn(|cx| { - let _ = run_until.as_mut().poll(cx); - Poll::Ready(()) - }) - .await; - - notify.notify_one(); - let task = unsafe { local.context.shared.local_state.task_pop_front() }; - // TODO(eliza): it would be nice to be able to assert that this is - // the local task. - assert!( - task.is_some(), - "task should have been notified to the LocalSet's local queue" - ); - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/task/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/task/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/task/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/task/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,333 +0,0 @@ -//! Asynchronous green-threads. -//! -//! ## What are Tasks? -//! -//! A _task_ is a light weight, non-blocking unit of execution. A task is similar -//! to an OS thread, but rather than being managed by the OS scheduler, they are -//! managed by the [Tokio runtime][rt]. Another name for this general pattern is -//! [green threads]. If you are familiar with [Go's goroutines], [Kotlin's -//! coroutines], or [Erlang's processes], you can think of Tokio's tasks as -//! something similar. -//! -//! Key points about tasks include: -//! -//! * Tasks are **light weight**. Because tasks are scheduled by the Tokio -//! runtime rather than the operating system, creating new tasks or switching -//! between tasks does not require a context switch and has fairly low -//! overhead. Creating, running, and destroying large numbers of tasks is -//! quite cheap, especially compared to OS threads. -//! -//! * Tasks are scheduled **cooperatively**. Most operating systems implement -//! _preemptive multitasking_. This is a scheduling technique where the -//! operating system allows each thread to run for a period of time, and then -//! _preempts_ it, temporarily pausing that thread and switching to another. -//! Tasks, on the other hand, implement _cooperative multitasking_. In -//! cooperative multitasking, a task is allowed to run until it _yields_, -//! indicating to the Tokio runtime's scheduler that it cannot currently -//! continue executing. When a task yields, the Tokio runtime switches to -//! executing the next task. -//! -//! * Tasks are **non-blocking**. Typically, when an OS thread performs I/O or -//! must synchronize with another thread, it _blocks_, allowing the OS to -//! schedule another thread. When a task cannot continue executing, it must -//! yield instead, allowing the Tokio runtime to schedule another task. Tasks -//! should generally not perform system calls or other operations that could -//! block a thread, as this would prevent other tasks running on the same -//! thread from executing as well. Instead, this module provides APIs for -//! running blocking operations in an asynchronous context. -//! -//! [rt]: crate::runtime -//! [green threads]: https://en.wikipedia.org/wiki/Green_threads -//! [Go's goroutines]: https://tour.golang.org/concurrency/1 -//! [Kotlin's coroutines]: https://kotlinlang.org/docs/reference/coroutines-overview.html -//! [Erlang's processes]: http://erlang.org/doc/getting_started/conc_prog.html#processes -//! -//! ## Working with Tasks -//! -//! This module provides the following APIs for working with tasks: -//! -//! ### Spawning -//! -//! Perhaps the most important function in this module is [`task::spawn`]. This -//! function can be thought of as an async equivalent to the standard library's -//! [`thread::spawn`][`std::thread::spawn`]. It takes an `async` block or other -//! [future], and creates a new task to run that work concurrently: -//! -//! ``` -//! use tokio::task; -//! -//! # async fn doc() { -//! task::spawn(async { -//! // perform some work here... -//! }); -//! # } -//! ``` -//! -//! Like [`std::thread::spawn`], `task::spawn` returns a [`JoinHandle`] struct. -//! A `JoinHandle` is itself a future which may be used to await the output of -//! the spawned task. For example: -//! -//! ``` -//! use tokio::task; -//! -//! # #[tokio::main] async fn main() -> Result<(), Box> { -//! let join = task::spawn(async { -//! // ... -//! "hello world!" -//! }); -//! -//! // ... -//! -//! // Await the result of the spawned task. -//! let result = join.await?; -//! assert_eq!(result, "hello world!"); -//! # Ok(()) -//! # } -//! ``` -//! -//! Again, like `std::thread`'s [`JoinHandle` type][thread_join], if the spawned -//! task panics, awaiting its `JoinHandle` will return a [`JoinError`]. For -//! example: -//! -//! ``` -//! use tokio::task; -//! -//! # #[tokio::main] async fn main() { -//! let join = task::spawn(async { -//! panic!("something bad happened!") -//! }); -//! -//! // The returned result indicates that the task failed. -//! assert!(join.await.is_err()); -//! # } -//! ``` -//! -//! `spawn`, `JoinHandle`, and `JoinError` are present when the "rt" -//! feature flag is enabled. -//! -//! [`task::spawn`]: crate::task::spawn() -//! [future]: std::future::Future -//! [`std::thread::spawn`]: std::thread::spawn -//! [`JoinHandle`]: crate::task::JoinHandle -//! [thread_join]: std::thread::JoinHandle -//! [`JoinError`]: crate::task::JoinError -//! -//! ### Blocking and Yielding -//! -//! As we discussed above, code running in asynchronous tasks should not perform -//! operations that can block. A blocking operation performed in a task running -//! on a thread that is also running other tasks would block the entire thread, -//! preventing other tasks from running. -//! -//! Instead, Tokio provides two APIs for running blocking operations in an -//! asynchronous context: [`task::spawn_blocking`] and [`task::block_in_place`]. -//! -//! Be aware that if you call a non-async method from async code, that non-async -//! method is still inside the asynchronous context, so you should also avoid -//! blocking operations there. This includes destructors of objects destroyed in -//! async code. -//! -//! #### spawn_blocking -//! -//! The `task::spawn_blocking` function is similar to the `task::spawn` function -//! discussed in the previous section, but rather than spawning an -//! _non-blocking_ future on the Tokio runtime, it instead spawns a -//! _blocking_ function on a dedicated thread pool for blocking tasks. For -//! example: -//! -//! ``` -//! use tokio::task; -//! -//! # async fn docs() { -//! task::spawn_blocking(|| { -//! // do some compute-heavy work or call synchronous code -//! }); -//! # } -//! ``` -//! -//! Just like `task::spawn`, `task::spawn_blocking` returns a `JoinHandle` -//! which we can use to await the result of the blocking operation: -//! -//! ```rust -//! # use tokio::task; -//! # async fn docs() -> Result<(), Box>{ -//! let join = task::spawn_blocking(|| { -//! // do some compute-heavy work or call synchronous code -//! "blocking completed" -//! }); -//! -//! let result = join.await?; -//! assert_eq!(result, "blocking completed"); -//! # Ok(()) -//! # } -//! ``` -//! -//! #### block_in_place -//! -//! When using the [multi-threaded runtime][rt-multi-thread], the [`task::block_in_place`] -//! function is also available. Like `task::spawn_blocking`, this function -//! allows running a blocking operation from an asynchronous context. Unlike -//! `spawn_blocking`, however, `block_in_place` works by transitioning the -//! _current_ worker thread to a blocking thread, moving other tasks running on -//! that thread to another worker thread. This can improve performance by avoiding -//! context switches. -//! -//! For example: -//! -//! ``` -//! use tokio::task; -//! -//! # async fn docs() { -//! let result = task::block_in_place(|| { -//! // do some compute-heavy work or call synchronous code -//! "blocking completed" -//! }); -//! -//! assert_eq!(result, "blocking completed"); -//! # } -//! ``` -//! -//! #### yield_now -//! -//! In addition, this module provides a [`task::yield_now`] async function -//! that is analogous to the standard library's [`thread::yield_now`]. Calling -//! and `await`ing this function will cause the current task to yield to the -//! Tokio runtime's scheduler, allowing other tasks to be -//! scheduled. Eventually, the yielding task will be polled again, allowing it -//! to execute. For example: -//! -//! ```rust -//! use tokio::task; -//! -//! # #[tokio::main] async fn main() { -//! async { -//! task::spawn(async { -//! // ... -//! println!("spawned task done!") -//! }); -//! -//! // Yield, allowing the newly-spawned task to execute first. -//! task::yield_now().await; -//! println!("main task done!"); -//! } -//! # .await; -//! # } -//! ``` -//! -//! ### Cooperative scheduling -//! -//! A single call to [`poll`] on a top-level task may potentially do a lot of -//! work before it returns `Poll::Pending`. If a task runs for a long period of -//! time without yielding back to the executor, it can starve other tasks -//! waiting on that executor to execute them, or drive underlying resources. -//! Since Rust does not have a runtime, it is difficult to forcibly preempt a -//! long-running task. Instead, this module provides an opt-in mechanism for -//! futures to collaborate with the executor to avoid starvation. -//! -//! Consider a future like this one: -//! -//! ``` -//! # use tokio_stream::{Stream, StreamExt}; -//! async fn drop_all(mut input: I) { -//! while let Some(_) = input.next().await {} -//! } -//! ``` -//! -//! It may look harmless, but consider what happens under heavy load if the -//! input stream is _always_ ready. If we spawn `drop_all`, the task will never -//! yield, and will starve other tasks and resources on the same executor. -//! -//! To account for this, Tokio has explicit yield points in a number of library -//! functions, which force tasks to return to the executor periodically. -//! -//! -//! #### unconstrained -//! -//! If necessary, [`task::unconstrained`] lets you opt a future out of of Tokio's cooperative -//! scheduling. When a future is wrapped with `unconstrained`, it will never be forced to yield to -//! Tokio. For example: -//! -//! ``` -//! # #[tokio::main] -//! # async fn main() { -//! use tokio::{task, sync::mpsc}; -//! -//! let fut = async { -//! let (tx, mut rx) = mpsc::unbounded_channel(); -//! -//! for i in 0..1000 { -//! let _ = tx.send(()); -//! // This will always be ready. If coop was in effect, this code would be forced to yield -//! // periodically. However, if left unconstrained, then this code will never yield. -//! rx.recv().await; -//! } -//! }; -//! -//! task::unconstrained(fut).await; -//! # } -//! ``` -//! -//! [`task::spawn_blocking`]: crate::task::spawn_blocking -//! [`task::block_in_place`]: crate::task::block_in_place -//! [rt-multi-thread]: ../runtime/index.html#threaded-scheduler -//! [`task::yield_now`]: crate::task::yield_now() -//! [`thread::yield_now`]: std::thread::yield_now -//! [`task::unconstrained`]: crate::task::unconstrained() -//! [`poll`]: method@std::future::Future::poll - -cfg_rt! { - pub use crate::runtime::task::{JoinError, JoinHandle}; - - cfg_not_wasi! { - mod blocking; - pub use blocking::spawn_blocking; - } - - mod spawn; - pub use spawn::spawn; - - cfg_rt_multi_thread! { - pub use blocking::block_in_place; - } - - mod yield_now; - pub use yield_now::yield_now; - - cfg_unstable! { - mod consume_budget; - pub use consume_budget::consume_budget; - } - - mod local; - pub use local::{spawn_local, LocalSet, LocalEnterGuard}; - - mod task_local; - pub use task_local::LocalKey; - - mod unconstrained; - pub use unconstrained::{unconstrained, Unconstrained}; - - #[doc(inline)] - pub use join_set::JoinSet; - pub use crate::runtime::task::AbortHandle; - - // Uses #[cfg(...)] instead of macro since the macro adds docsrs annotations. - #[cfg(not(tokio_unstable))] - mod join_set; - #[cfg(tokio_unstable)] - pub mod join_set; - - cfg_unstable! { - pub use crate::runtime::task::{Id, id, try_id}; - } - - cfg_trace! { - mod builder; - pub use builder::Builder; - } - - /// Task-related futures. - pub mod futures { - pub use super::task_local::TaskLocalFuture; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/task/spawn.rs s390-tools-2.33.1/rust-vendor/tokio/src/task/spawn.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/task/spawn.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/task/spawn.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,206 +0,0 @@ -use crate::task::JoinHandle; - -use std::future::Future; - -cfg_rt! { - /// Spawns a new asynchronous task, returning a - /// [`JoinHandle`](super::JoinHandle) for it. - /// - /// The provided future will start running in the background immediately - /// when `spawn` is called, even if you don't await the returned - /// `JoinHandle`. - /// - /// Spawning a task enables the task to execute concurrently to other tasks. The - /// spawned task may execute on the current thread, or it may be sent to a - /// different thread to be executed. The specifics depend on the current - /// [`Runtime`](crate::runtime::Runtime) configuration. - /// - /// It is guaranteed that spawn will not synchronously poll the task being spawned. - /// This means that calling spawn while holding a lock does not pose a risk of - /// deadlocking with the spawned task. - /// - /// There is no guarantee that a spawned task will execute to completion. - /// When a runtime is shutdown, all outstanding tasks are dropped, - /// regardless of the lifecycle of that task. - /// - /// This function must be called from the context of a Tokio runtime. Tasks running on - /// the Tokio runtime are always inside its context, but you can also enter the context - /// using the [`Runtime::enter`](crate::runtime::Runtime::enter()) method. - /// - /// # Examples - /// - /// In this example, a server is started and `spawn` is used to start a new task - /// that processes each received connection. - /// - /// ```no_run - /// use tokio::net::{TcpListener, TcpStream}; - /// - /// use std::io; - /// - /// async fn process(socket: TcpStream) { - /// // ... - /// # drop(socket); - /// } - /// - /// #[tokio::main] - /// async fn main() -> io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:8080").await?; - /// - /// loop { - /// let (socket, _) = listener.accept().await?; - /// - /// tokio::spawn(async move { - /// // Process each socket concurrently. - /// process(socket).await - /// }); - /// } - /// } - /// ``` - /// - /// To run multiple tasks in parallel and receive their results, join - /// handles can be stored in a vector. - /// ``` - /// # #[tokio::main(flavor = "current_thread")] async fn main() { - /// async fn my_background_op(id: i32) -> String { - /// let s = format!("Starting background task {}.", id); - /// println!("{}", s); - /// s - /// } - /// - /// let ops = vec![1, 2, 3]; - /// let mut tasks = Vec::with_capacity(ops.len()); - /// for op in ops { - /// // This call will make them start running in the background - /// // immediately. - /// tasks.push(tokio::spawn(my_background_op(op))); - /// } - /// - /// let mut outputs = Vec::with_capacity(tasks.len()); - /// for task in tasks { - /// outputs.push(task.await.unwrap()); - /// } - /// println!("{:?}", outputs); - /// # } - /// ``` - /// This example pushes the tasks to `outputs` in the order they were - /// started in. If you do not care about the ordering of the outputs, then - /// you can also use a [`JoinSet`]. - /// - /// [`JoinSet`]: struct@crate::task::JoinSet - /// - /// # Panics - /// - /// Panics if called from **outside** of the Tokio runtime. - /// - /// # Using `!Send` values from a task - /// - /// The task supplied to `spawn` must implement `Send`. However, it is - /// possible to **use** `!Send` values from the task as long as they only - /// exist between calls to `.await`. - /// - /// For example, this will work: - /// - /// ``` - /// use tokio::task; - /// - /// use std::rc::Rc; - /// - /// fn use_rc(rc: Rc<()>) { - /// // Do stuff w/ rc - /// # drop(rc); - /// } - /// - /// #[tokio::main] - /// async fn main() { - /// tokio::spawn(async { - /// // Force the `Rc` to stay in a scope with no `.await` - /// { - /// let rc = Rc::new(()); - /// use_rc(rc.clone()); - /// } - /// - /// task::yield_now().await; - /// }).await.unwrap(); - /// } - /// ``` - /// - /// This will **not** work: - /// - /// ```compile_fail - /// use tokio::task; - /// - /// use std::rc::Rc; - /// - /// fn use_rc(rc: Rc<()>) { - /// // Do stuff w/ rc - /// # drop(rc); - /// } - /// - /// #[tokio::main] - /// async fn main() { - /// tokio::spawn(async { - /// let rc = Rc::new(()); - /// - /// task::yield_now().await; - /// - /// use_rc(rc.clone()); - /// }).await.unwrap(); - /// } - /// ``` - /// - /// Holding on to a `!Send` value across calls to `.await` will result in - /// an unfriendly compile error message similar to: - /// - /// ```text - /// `[... some type ...]` cannot be sent between threads safely - /// ``` - /// - /// or: - /// - /// ```text - /// error[E0391]: cycle detected when processing `main` - /// ``` - #[track_caller] - pub fn spawn(future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - // preventing stack overflows on debug mode, by quickly sending the - // task to the heap. - if cfg!(debug_assertions) && std::mem::size_of::() > 2048 { - spawn_inner(Box::pin(future), None) - } else { - spawn_inner(future, None) - } - } - - #[track_caller] - pub(super) fn spawn_inner(future: T, name: Option<&str>) -> JoinHandle - where - T: Future + Send + 'static, - T::Output: Send + 'static, - { - use crate::runtime::{context, task}; - - #[cfg(all( - tokio_unstable, - tokio_taskdump, - feature = "rt", - target_os = "linux", - any( - target_arch = "aarch64", - target_arch = "x86", - target_arch = "x86_64" - ) - ))] - let future = task::trace::Trace::root(future); - let id = task::Id::next(); - let task = crate::util::trace::task(future, "task", name, id.as_u64()); - - match context::with_current(|handle| handle.spawn(task, id)) { - Ok(join_handle) => join_handle, - Err(e) => panic!("{}", e), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/task/task_local.rs s390-tools-2.33.1/rust-vendor/tokio/src/task/task_local.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/task/task_local.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/task/task_local.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,434 +0,0 @@ -use pin_project_lite::pin_project; -use std::cell::RefCell; -use std::error::Error; -use std::future::Future; -use std::marker::PhantomPinned; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{fmt, mem, thread}; - -/// Declares a new task-local key of type [`tokio::task::LocalKey`]. -/// -/// # Syntax -/// -/// The macro wraps any number of static declarations and makes them local to the current task. -/// Publicity and attributes for each static is preserved. For example: -/// -/// # Examples -/// -/// ``` -/// # use tokio::task_local; -/// task_local! { -/// pub static ONE: u32; -/// -/// #[allow(unused)] -/// static TWO: f32; -/// } -/// # fn main() {} -/// ``` -/// -/// See [LocalKey documentation][`tokio::task::LocalKey`] for more -/// information. -/// -/// [`tokio::task::LocalKey`]: struct@crate::task::LocalKey -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(feature = "rt")))] -macro_rules! task_local { - // empty (base case for the recursion) - () => {}; - - ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty; $($rest:tt)*) => { - $crate::__task_local_inner!($(#[$attr])* $vis $name, $t); - $crate::task_local!($($rest)*); - }; - - ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty) => { - $crate::__task_local_inner!($(#[$attr])* $vis $name, $t); - } -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __task_local_inner { - ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty) => { - $(#[$attr])* - $vis static $name: $crate::task::LocalKey<$t> = { - std::thread_local! { - static __KEY: std::cell::RefCell> = const { std::cell::RefCell::new(None) }; - } - - $crate::task::LocalKey { inner: __KEY } - }; - }; -} - -/// A key for task-local data. -/// -/// This type is generated by the [`task_local!`] macro. -/// -/// Unlike [`std::thread::LocalKey`], `tokio::task::LocalKey` will -/// _not_ lazily initialize the value on first access. Instead, the -/// value is first initialized when the future containing -/// the task-local is first polled by a futures executor, like Tokio. -/// -/// # Examples -/// -/// ``` -/// # async fn dox() { -/// tokio::task_local! { -/// static NUMBER: u32; -/// } -/// -/// NUMBER.scope(1, async move { -/// assert_eq!(NUMBER.get(), 1); -/// }).await; -/// -/// NUMBER.scope(2, async move { -/// assert_eq!(NUMBER.get(), 2); -/// -/// NUMBER.scope(3, async move { -/// assert_eq!(NUMBER.get(), 3); -/// }).await; -/// }).await; -/// # } -/// ``` -/// -/// [`std::thread::LocalKey`]: struct@std::thread::LocalKey -/// [`task_local!`]: ../macro.task_local.html -#[cfg_attr(docsrs, doc(cfg(feature = "rt")))] -pub struct LocalKey { - #[doc(hidden)] - pub inner: thread::LocalKey>>, -} - -impl LocalKey { - /// Sets a value `T` as the task-local value for the future `F`. - /// - /// On completion of `scope`, the task-local will be dropped. - /// - /// ### Panics - /// - /// If you poll the returned future inside a call to [`with`] or - /// [`try_with`] on the same `LocalKey`, then the call to `poll` will panic. - /// - /// ### Examples - /// - /// ``` - /// # async fn dox() { - /// tokio::task_local! { - /// static NUMBER: u32; - /// } - /// - /// NUMBER.scope(1, async move { - /// println!("task local value: {}", NUMBER.get()); - /// }).await; - /// # } - /// ``` - /// - /// [`with`]: fn@Self::with - /// [`try_with`]: fn@Self::try_with - pub fn scope(&'static self, value: T, f: F) -> TaskLocalFuture - where - F: Future, - { - TaskLocalFuture { - local: self, - slot: Some(value), - future: Some(f), - _pinned: PhantomPinned, - } - } - - /// Sets a value `T` as the task-local value for the closure `F`. - /// - /// On completion of `sync_scope`, the task-local will be dropped. - /// - /// ### Panics - /// - /// This method panics if called inside a call to [`with`] or [`try_with`] - /// on the same `LocalKey`. - /// - /// ### Examples - /// - /// ``` - /// # async fn dox() { - /// tokio::task_local! { - /// static NUMBER: u32; - /// } - /// - /// NUMBER.sync_scope(1, || { - /// println!("task local value: {}", NUMBER.get()); - /// }); - /// # } - /// ``` - /// - /// [`with`]: fn@Self::with - /// [`try_with`]: fn@Self::try_with - #[track_caller] - pub fn sync_scope(&'static self, value: T, f: F) -> R - where - F: FnOnce() -> R, - { - let mut value = Some(value); - match self.scope_inner(&mut value, f) { - Ok(res) => res, - Err(err) => err.panic(), - } - } - - fn scope_inner(&'static self, slot: &mut Option, f: F) -> Result - where - F: FnOnce() -> R, - { - struct Guard<'a, T: 'static> { - local: &'static LocalKey, - slot: &'a mut Option, - } - - impl<'a, T: 'static> Drop for Guard<'a, T> { - fn drop(&mut self) { - // This should not panic. - // - // We know that the RefCell was not borrowed before the call to - // `scope_inner`, so the only way for this to panic is if the - // closure has created but not destroyed a RefCell guard. - // However, we never give user-code access to the guards, so - // there's no way for user-code to forget to destroy a guard. - // - // The call to `with` also should not panic, since the - // thread-local wasn't destroyed when we first called - // `scope_inner`, and it shouldn't have gotten destroyed since - // then. - self.local.inner.with(|inner| { - let mut ref_mut = inner.borrow_mut(); - mem::swap(self.slot, &mut *ref_mut); - }); - } - } - - self.inner.try_with(|inner| { - inner - .try_borrow_mut() - .map(|mut ref_mut| mem::swap(slot, &mut *ref_mut)) - })??; - - let guard = Guard { local: self, slot }; - - let res = f(); - - drop(guard); - - Ok(res) - } - - /// Accesses the current task-local and runs the provided closure. - /// - /// # Panics - /// - /// This function will panic if the task local doesn't have a value set. - #[track_caller] - pub fn with(&'static self, f: F) -> R - where - F: FnOnce(&T) -> R, - { - match self.try_with(f) { - Ok(res) => res, - Err(_) => panic!("cannot access a task-local storage value without setting it first"), - } - } - - /// Accesses the current task-local and runs the provided closure. - /// - /// If the task-local with the associated key is not present, this - /// method will return an `AccessError`. For a panicking variant, - /// see `with`. - pub fn try_with(&'static self, f: F) -> Result - where - F: FnOnce(&T) -> R, - { - // If called after the thread-local storing the task-local is destroyed, - // then we are outside of a closure where the task-local is set. - // - // Therefore, it is correct to return an AccessError if `try_with` - // returns an error. - let try_with_res = self.inner.try_with(|v| { - // This call to `borrow` cannot panic because no user-defined code - // runs while a `borrow_mut` call is active. - v.borrow().as_ref().map(f) - }); - - match try_with_res { - Ok(Some(res)) => Ok(res), - Ok(None) | Err(_) => Err(AccessError { _private: () }), - } - } -} - -impl LocalKey { - /// Returns a copy of the task-local value - /// if the task-local value implements `Copy`. - /// - /// # Panics - /// - /// This function will panic if the task local doesn't have a value set. - #[track_caller] - pub fn get(&'static self) -> T { - self.with(|v| *v) - } -} - -impl fmt::Debug for LocalKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("LocalKey { .. }") - } -} - -pin_project! { - /// A future that sets a value `T` of a task local for the future `F` during - /// its execution. - /// - /// The value of the task-local must be `'static` and will be dropped on the - /// completion of the future. - /// - /// Created by the function [`LocalKey::scope`](self::LocalKey::scope). - /// - /// ### Examples - /// - /// ``` - /// # async fn dox() { - /// tokio::task_local! { - /// static NUMBER: u32; - /// } - /// - /// NUMBER.scope(1, async move { - /// println!("task local value: {}", NUMBER.get()); - /// }).await; - /// # } - /// ``` - pub struct TaskLocalFuture - where - T: 'static, - { - local: &'static LocalKey, - slot: Option, - #[pin] - future: Option, - #[pin] - _pinned: PhantomPinned, - } - - impl PinnedDrop for TaskLocalFuture { - fn drop(this: Pin<&mut Self>) { - let this = this.project(); - if mem::needs_drop::() && this.future.is_some() { - // Drop the future while the task-local is set, if possible. Otherwise - // the future is dropped normally when the `Option` field drops. - let mut future = this.future; - let _ = this.local.scope_inner(this.slot, || { - future.set(None); - }); - } - } - } -} - -impl Future for TaskLocalFuture { - type Output = F::Output; - - #[track_caller] - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); - let mut future_opt = this.future; - - let res = this - .local - .scope_inner(this.slot, || match future_opt.as_mut().as_pin_mut() { - Some(fut) => { - let res = fut.poll(cx); - if res.is_ready() { - future_opt.set(None); - } - Some(res) - } - None => None, - }); - - match res { - Ok(Some(res)) => res, - Ok(None) => panic!("`TaskLocalFuture` polled after completion"), - Err(err) => err.panic(), - } - } -} - -impl fmt::Debug for TaskLocalFuture -where - T: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - /// Format the Option without Some. - struct TransparentOption<'a, T> { - value: &'a Option, - } - impl<'a, T: fmt::Debug> fmt::Debug for TransparentOption<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.value.as_ref() { - Some(value) => value.fmt(f), - // Hitting the None branch should not be possible. - None => f.pad(""), - } - } - } - - f.debug_struct("TaskLocalFuture") - .field("value", &TransparentOption { value: &self.slot }) - .finish() - } -} - -/// An error returned by [`LocalKey::try_with`](method@LocalKey::try_with). -#[derive(Clone, Copy, Eq, PartialEq)] -pub struct AccessError { - _private: (), -} - -impl fmt::Debug for AccessError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AccessError").finish() - } -} - -impl fmt::Display for AccessError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt("task-local value not set", f) - } -} - -impl Error for AccessError {} - -enum ScopeInnerErr { - BorrowError, - AccessError, -} - -impl ScopeInnerErr { - #[track_caller] - fn panic(&self) -> ! { - match self { - Self::BorrowError => panic!("cannot enter a task-local scope while the task-local storage is borrowed"), - Self::AccessError => panic!("cannot enter a task-local scope during or after destruction of the underlying thread-local"), - } - } -} - -impl From for ScopeInnerErr { - fn from(_: std::cell::BorrowMutError) -> Self { - Self::BorrowError - } -} - -impl From for ScopeInnerErr { - fn from(_: std::thread::AccessError) -> Self { - Self::AccessError - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/task/unconstrained.rs s390-tools-2.33.1/rust-vendor/tokio/src/task/unconstrained.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/task/unconstrained.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/task/unconstrained.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,45 +0,0 @@ -use pin_project_lite::pin_project; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// Future for the [`unconstrained`](unconstrained) method. - #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] - #[must_use = "Unconstrained does nothing unless polled"] - pub struct Unconstrained { - #[pin] - inner: F, - } -} - -impl Future for Unconstrained -where - F: Future, -{ - type Output = ::Output; - - cfg_coop! { - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let inner = self.project().inner; - crate::runtime::coop::with_unconstrained(|| inner.poll(cx)) - } - } - - cfg_not_coop! { - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let inner = self.project().inner; - inner.poll(cx) - } - } -} - -/// Turn off cooperative scheduling for a future. The future will never be forced to yield by -/// Tokio. Using this exposes your service to starvation if the unconstrained future never yields -/// otherwise. -/// -/// See also the usage example in the [task module](index.html#unconstrained). -#[cfg_attr(docsrs, doc(cfg(feature = "rt")))] -pub fn unconstrained(inner: F) -> Unconstrained { - Unconstrained { inner } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/task/yield_now.rs s390-tools-2.33.1/rust-vendor/tokio/src/task/yield_now.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/task/yield_now.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/task/yield_now.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,64 +0,0 @@ -use crate::runtime::context; - -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Yields execution back to the Tokio runtime. -/// -/// A task yields by awaiting on `yield_now()`, and may resume when that future -/// completes (with no output.) The current task will be re-added as a pending -/// task at the _back_ of the pending queue. Any other pending tasks will be -/// scheduled. No other waking is required for the task to continue. -/// -/// See also the usage example in the [task module](index.html#yield_now). -/// -/// ## Non-guarantees -/// -/// This function may not yield all the way up to the executor if there are any -/// special combinators above it in the call stack. For example, if a -/// [`tokio::select!`] has another branch complete during the same poll as the -/// `yield_now()`, then the yield is not propagated all the way up to the -/// runtime. -/// -/// It is generally not guaranteed that the runtime behaves like you expect it -/// to when deciding which task to schedule next after a call to `yield_now()`. -/// In particular, the runtime may choose to poll the task that just ran -/// `yield_now()` again immediately without polling any other tasks first. For -/// example, the runtime will not drive the IO driver between every poll of a -/// task, and this could result in the runtime polling the current task again -/// immediately even if there is another task that could make progress if that -/// other task is waiting for a notification from the IO driver. -/// -/// In general, changes to the order in which the runtime polls tasks is not -/// considered a breaking change, and your program should be correct no matter -/// which order the runtime polls your tasks in. -/// -/// [`tokio::select!`]: macro@crate::select -#[cfg_attr(docsrs, doc(cfg(feature = "rt")))] -pub async fn yield_now() { - /// Yield implementation - struct YieldNow { - yielded: bool, - } - - impl Future for YieldNow { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - ready!(crate::trace::trace_leaf(cx)); - - if self.yielded { - return Poll::Ready(()); - } - - self.yielded = true; - - context::defer(cx.waker()); - - Poll::Pending - } - } - - YieldNow { yielded: false }.await -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/time/clock.rs s390-tools-2.33.1/rust-vendor/tokio/src/time/clock.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/time/clock.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/time/clock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,311 +0,0 @@ -#![cfg_attr(not(feature = "rt"), allow(dead_code))] - -//! Source of time abstraction. -//! -//! By default, `std::time::Instant::now()` is used. However, when the -//! `test-util` feature flag is enabled, the values returned for `now()` are -//! configurable. - -cfg_not_test_util! { - use crate::time::{Instant}; - - #[derive(Debug, Clone)] - pub(crate) struct Clock {} - - pub(crate) fn now() -> Instant { - Instant::from_std(std::time::Instant::now()) - } - - impl Clock { - pub(crate) fn new(_enable_pausing: bool, _start_paused: bool) -> Clock { - Clock {} - } - - pub(crate) fn now(&self) -> Instant { - now() - } - } -} - -cfg_test_util! { - use crate::time::{Duration, Instant}; - use crate::loom::sync::Mutex; - use crate::loom::sync::atomic::Ordering; - use std::sync::atomic::AtomicBool as StdAtomicBool; - - cfg_rt! { - #[track_caller] - fn with_clock(f: impl FnOnce(Option<&Clock>) -> Result) -> R { - use crate::runtime::Handle; - - let res = match Handle::try_current() { - Ok(handle) => f(Some(handle.inner.driver().clock())), - Err(ref e) if e.is_missing_context() => f(None), - Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR), - }; - - match res { - Ok(ret) => ret, - Err(msg) => panic!("{}", msg), - } - } - } - - cfg_not_rt! { - #[track_caller] - fn with_clock(f: impl FnOnce(Option<&Clock>) -> Result) -> R { - match f(None) { - Ok(ret) => ret, - Err(msg) => panic!("{}", msg), - } - } - } - - /// A handle to a source of time. - #[derive(Debug)] - pub(crate) struct Clock { - inner: Mutex, - } - - // Used to track if the clock was ever paused. This is an optimization to - // avoid touching the mutex if `test-util` was accidentally enabled in - // release mode. - // - // A static is used so we can avoid accessing the thread-local as well. The - // `std` AtomicBool is used directly because loom does not support static - // atomics. - static DID_PAUSE_CLOCK: StdAtomicBool = StdAtomicBool::new(false); - - #[derive(Debug)] - struct Inner { - /// True if the ability to pause time is enabled. - enable_pausing: bool, - - /// Instant to use as the clock's base instant. - base: std::time::Instant, - - /// Instant at which the clock was last unfrozen. - unfrozen: Option, - - /// Number of `inhibit_auto_advance` calls still in effect. - auto_advance_inhibit_count: usize, - } - - /// Pauses time. - /// - /// The current value of `Instant::now()` is saved and all subsequent calls - /// to `Instant::now()` will return the saved value. The saved value can be - /// changed by [`advance`] or by the time auto-advancing once the runtime - /// has no work to do. This only affects the `Instant` type in Tokio, and - /// the `Instant` in std continues to work as normal. - /// - /// Pausing time requires the `current_thread` Tokio runtime. This is the - /// default runtime used by `#[tokio::test]`. The runtime can be initialized - /// with time in a paused state using the `Builder::start_paused` method. - /// - /// For cases where time is immediately paused, it is better to pause - /// the time using the `main` or `test` macro: - /// ``` - /// #[tokio::main(flavor = "current_thread", start_paused = true)] - /// async fn main() { - /// println!("Hello world"); - /// } - /// ``` - /// - /// # Panics - /// - /// Panics if time is already frozen or if called from outside of a - /// `current_thread` Tokio runtime. - /// - /// # Auto-advance - /// - /// If time is paused and the runtime has no work to do, the clock is - /// auto-advanced to the next pending timer. This means that [`Sleep`] or - /// other timer-backed primitives can cause the runtime to advance the - /// current time when awaited. - /// - /// [`Sleep`]: crate::time::Sleep - /// [`advance`]: crate::time::advance - #[track_caller] - pub fn pause() { - with_clock(|maybe_clock| { - match maybe_clock { - Some(clock) => clock.pause(), - None => Err("time cannot be frozen from outside the Tokio runtime"), - } - }) - } - - /// Resumes time. - /// - /// Clears the saved `Instant::now()` value. Subsequent calls to - /// `Instant::now()` will return the value returned by the system call. - /// - /// # Panics - /// - /// Panics if time is not frozen or if called from outside of the Tokio - /// runtime. - #[track_caller] - pub fn resume() { - with_clock(|maybe_clock| { - let clock = match maybe_clock { - Some(clock) => clock, - None => return Err("time cannot be frozen from outside the Tokio runtime"), - }; - - let mut inner = clock.inner.lock(); - - if inner.unfrozen.is_some() { - return Err("time is not frozen"); - } - - inner.unfrozen = Some(std::time::Instant::now()); - Ok(()) - }) - } - - /// Advances time. - /// - /// Increments the saved `Instant::now()` value by `duration`. Subsequent - /// calls to `Instant::now()` will return the result of the increment. - /// - /// This function will make the current time jump forward by the given - /// duration in one jump. This means that all `sleep` calls with a deadline - /// before the new time will immediately complete "at the same time", and - /// the runtime is free to poll them in any order. Additionally, this - /// method will not wait for the `sleep` calls it advanced past to complete. - /// If you want to do that, you should instead call [`sleep`] and rely on - /// the runtime's auto-advance feature. - /// - /// Note that calls to `sleep` are not guaranteed to complete the first time - /// they are polled after a call to `advance`. For example, this can happen - /// if the runtime has not yet touched the timer driver after the call to - /// `advance`. However if they don't, the runtime will poll the task again - /// shortly. - /// - /// # Panics - /// - /// Panics if time is not frozen or if called from outside of the Tokio - /// runtime. - /// - /// # Auto-advance - /// - /// If the time is paused and there is no work to do, the runtime advances - /// time to the next timer. See [`pause`](pause#auto-advance) for more - /// details. - /// - /// [`sleep`]: fn@crate::time::sleep - pub async fn advance(duration: Duration) { - with_clock(|maybe_clock| { - let clock = match maybe_clock { - Some(clock) => clock, - None => return Err("time cannot be frozen from outside the Tokio runtime"), - }; - - clock.advance(duration) - }); - - crate::task::yield_now().await; - } - - /// Returns the current instant, factoring in frozen time. - pub(crate) fn now() -> Instant { - if !DID_PAUSE_CLOCK.load(Ordering::Acquire) { - return Instant::from_std(std::time::Instant::now()); - } - - with_clock(|maybe_clock| { - Ok(if let Some(clock) = maybe_clock { - clock.now() - } else { - Instant::from_std(std::time::Instant::now()) - }) - }) - } - - impl Clock { - /// Returns a new `Clock` instance that uses the current execution context's - /// source of time. - pub(crate) fn new(enable_pausing: bool, start_paused: bool) -> Clock { - let now = std::time::Instant::now(); - - let clock = Clock { - inner: Mutex::new(Inner { - enable_pausing, - base: now, - unfrozen: Some(now), - auto_advance_inhibit_count: 0, - }), - }; - - if start_paused { - if let Err(msg) = clock.pause() { - panic!("{}", msg); - } - } - - clock - } - - pub(crate) fn pause(&self) -> Result<(), &'static str> { - let mut inner = self.inner.lock(); - - if !inner.enable_pausing { - drop(inner); // avoid poisoning the lock - return Err("`time::pause()` requires the `current_thread` Tokio runtime. \ - This is the default Runtime used by `#[tokio::test]."); - } - - // Track that we paused the clock - DID_PAUSE_CLOCK.store(true, Ordering::Release); - - let elapsed = match inner.unfrozen.as_ref() { - Some(v) => v.elapsed(), - None => return Err("time is already frozen") - }; - inner.base += elapsed; - inner.unfrozen = None; - - Ok(()) - } - - /// Temporarily stop auto-advancing the clock (see `tokio::time::pause`). - pub(crate) fn inhibit_auto_advance(&self) { - let mut inner = self.inner.lock(); - inner.auto_advance_inhibit_count += 1; - } - - pub(crate) fn allow_auto_advance(&self) { - let mut inner = self.inner.lock(); - inner.auto_advance_inhibit_count -= 1; - } - - pub(crate) fn can_auto_advance(&self) -> bool { - let inner = self.inner.lock(); - inner.unfrozen.is_none() && inner.auto_advance_inhibit_count == 0 - } - - pub(crate) fn advance(&self, duration: Duration) -> Result<(), &'static str> { - let mut inner = self.inner.lock(); - - if inner.unfrozen.is_some() { - return Err("time is not frozen"); - } - - inner.base += duration; - Ok(()) - } - - pub(crate) fn now(&self) -> Instant { - let inner = self.inner.lock(); - - let mut ret = inner.base; - - if let Some(unfrozen) = inner.unfrozen { - ret += unfrozen.elapsed(); - } - - Instant::from_std(ret) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/time/error.rs s390-tools-2.33.1/rust-vendor/tokio/src/time/error.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/time/error.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/time/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,123 +0,0 @@ -//! Time error types. - -use std::error; -use std::fmt; - -/// Errors encountered by the timer implementation. -/// -/// Currently, there are two different errors that can occur: -/// -/// * `shutdown` occurs when a timer operation is attempted, but the timer -/// instance has been dropped. In this case, the operation will never be able -/// to complete and the `shutdown` error is returned. This is a permanent -/// error, i.e., once this error is observed, timer operations will never -/// succeed in the future. -/// -/// * `at_capacity` occurs when a timer operation is attempted, but the timer -/// instance is currently handling its maximum number of outstanding sleep instances. -/// In this case, the operation is not able to be performed at the current -/// moment, and `at_capacity` is returned. This is a transient error, i.e., at -/// some point in the future, if the operation is attempted again, it might -/// succeed. Callers that observe this error should attempt to [shed load]. One -/// way to do this would be dropping the future that issued the timer operation. -/// -/// [shed load]: https://en.wikipedia.org/wiki/Load_Shedding -#[derive(Debug, Copy, Clone)] -pub struct Error(Kind); - -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -#[repr(u8)] -pub(crate) enum Kind { - Shutdown = 1, - AtCapacity = 2, - Invalid = 3, -} - -impl From for Error { - fn from(k: Kind) -> Self { - Error(k) - } -} - -/// Errors returned by `Timeout`. -/// -/// This error is returned when a timeout expires before the function was able -/// to finish. -#[derive(Debug, PartialEq, Eq)] -pub struct Elapsed(()); - -#[derive(Debug)] -pub(crate) enum InsertError { - Elapsed, -} - -// ===== impl Error ===== - -impl Error { - /// Creates an error representing a shutdown timer. - pub fn shutdown() -> Error { - Error(Kind::Shutdown) - } - - /// Returns `true` if the error was caused by the timer being shutdown. - pub fn is_shutdown(&self) -> bool { - matches!(self.0, Kind::Shutdown) - } - - /// Creates an error representing a timer at capacity. - pub fn at_capacity() -> Error { - Error(Kind::AtCapacity) - } - - /// Returns `true` if the error was caused by the timer being at capacity. - pub fn is_at_capacity(&self) -> bool { - matches!(self.0, Kind::AtCapacity) - } - - /// Creates an error representing a misconfigured timer. - pub fn invalid() -> Error { - Error(Kind::Invalid) - } - - /// Returns `true` if the error was caused by the timer being misconfigured. - pub fn is_invalid(&self) -> bool { - matches!(self.0, Kind::Invalid) - } -} - -impl error::Error for Error {} - -impl fmt::Display for Error { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let descr = match self.0 { - Kind::Shutdown => { - "the timer is shutdown, must be called from the context of Tokio runtime" - } - Kind::AtCapacity => "timer is at capacity and cannot create a new entry", - Kind::Invalid => "timer duration exceeds maximum duration", - }; - write!(fmt, "{}", descr) - } -} - -// ===== impl Elapsed ===== - -impl Elapsed { - pub(crate) fn new() -> Self { - Elapsed(()) - } -} - -impl fmt::Display for Elapsed { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - "deadline has elapsed".fmt(fmt) - } -} - -impl std::error::Error for Elapsed {} - -impl From for std::io::Error { - fn from(_err: Elapsed) -> std::io::Error { - std::io::ErrorKind::TimedOut.into() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/time/instant.rs s390-tools-2.33.1/rust-vendor/tokio/src/time/instant.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/time/instant.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/time/instant.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,223 +0,0 @@ -#![allow(clippy::trivially_copy_pass_by_ref)] - -use std::fmt; -use std::ops; -use std::time::Duration; - -/// A measurement of a monotonically nondecreasing clock. -/// Opaque and useful only with `Duration`. -/// -/// Instants are always guaranteed to be no less than any previously measured -/// instant when created, and are often useful for tasks such as measuring -/// benchmarks or timing how long an operation takes. -/// -/// Note, however, that instants are not guaranteed to be **steady**. In other -/// words, each tick of the underlying clock may not be the same length (e.g. -/// some seconds may be longer than others). An instant may jump forwards or -/// experience time dilation (slow down or speed up), but it will never go -/// backwards. -/// -/// Instants are opaque types that can only be compared to one another. There is -/// no method to get "the number of seconds" from an instant. Instead, it only -/// allows measuring the duration between two instants (or comparing two -/// instants). -/// -/// The size of an `Instant` struct may vary depending on the target operating -/// system. -/// -/// # Note -/// -/// This type wraps the inner `std` variant and is used to align the Tokio -/// clock for uses of `now()`. This can be useful for testing where you can -/// take advantage of `time::pause()` and `time::advance()`. -#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Hash)] -pub struct Instant { - std: std::time::Instant, -} - -impl Instant { - /// Returns an instant corresponding to "now". - /// - /// # Examples - /// - /// ``` - /// use tokio::time::Instant; - /// - /// let now = Instant::now(); - /// ``` - pub fn now() -> Instant { - variant::now() - } - - /// Create a `tokio::time::Instant` from a `std::time::Instant`. - pub fn from_std(std: std::time::Instant) -> Instant { - Instant { std } - } - - pub(crate) fn far_future() -> Instant { - // Roughly 30 years from now. - // API does not provide a way to obtain max `Instant` - // or convert specific date in the future to instant. - // 1000 years overflows on macOS, 100 years overflows on FreeBSD. - Self::now() + Duration::from_secs(86400 * 365 * 30) - } - - /// Convert the value into a `std::time::Instant`. - pub fn into_std(self) -> std::time::Instant { - self.std - } - - /// Returns the amount of time elapsed from another instant to this one, or - /// zero duration if that instant is later than this one. - pub fn duration_since(&self, earlier: Instant) -> Duration { - self.std.saturating_duration_since(earlier.std) - } - - /// Returns the amount of time elapsed from another instant to this one, or - /// None if that instant is later than this one. - /// - /// # Examples - /// - /// ``` - /// use tokio::time::{Duration, Instant, sleep}; - /// - /// #[tokio::main] - /// async fn main() { - /// let now = Instant::now(); - /// sleep(Duration::new(1, 0)).await; - /// let new_now = Instant::now(); - /// println!("{:?}", new_now.checked_duration_since(now)); - /// println!("{:?}", now.checked_duration_since(new_now)); // None - /// } - /// ``` - pub fn checked_duration_since(&self, earlier: Instant) -> Option { - self.std.checked_duration_since(earlier.std) - } - - /// Returns the amount of time elapsed from another instant to this one, or - /// zero duration if that instant is later than this one. - /// - /// # Examples - /// - /// ``` - /// use tokio::time::{Duration, Instant, sleep}; - /// - /// #[tokio::main] - /// async fn main() { - /// let now = Instant::now(); - /// sleep(Duration::new(1, 0)).await; - /// let new_now = Instant::now(); - /// println!("{:?}", new_now.saturating_duration_since(now)); - /// println!("{:?}", now.saturating_duration_since(new_now)); // 0ns - /// } - /// ``` - pub fn saturating_duration_since(&self, earlier: Instant) -> Duration { - self.std.saturating_duration_since(earlier.std) - } - - /// Returns the amount of time elapsed since this instant was created, - /// or zero duration if that this instant is in the future. - /// - /// # Examples - /// - /// ``` - /// use tokio::time::{Duration, Instant, sleep}; - /// - /// #[tokio::main] - /// async fn main() { - /// let instant = Instant::now(); - /// let three_secs = Duration::from_secs(3); - /// sleep(three_secs).await; - /// assert!(instant.elapsed() >= three_secs); - /// } - /// ``` - pub fn elapsed(&self) -> Duration { - Instant::now().saturating_duration_since(*self) - } - - /// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be - /// represented as `Instant` (which means it's inside the bounds of the - /// underlying data structure), `None` otherwise. - pub fn checked_add(&self, duration: Duration) -> Option { - self.std.checked_add(duration).map(Instant::from_std) - } - - /// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be - /// represented as `Instant` (which means it's inside the bounds of the - /// underlying data structure), `None` otherwise. - pub fn checked_sub(&self, duration: Duration) -> Option { - self.std.checked_sub(duration).map(Instant::from_std) - } -} - -impl From for Instant { - fn from(time: std::time::Instant) -> Instant { - Instant::from_std(time) - } -} - -impl From for std::time::Instant { - fn from(time: Instant) -> std::time::Instant { - time.into_std() - } -} - -impl ops::Add for Instant { - type Output = Instant; - - fn add(self, other: Duration) -> Instant { - Instant::from_std(self.std + other) - } -} - -impl ops::AddAssign for Instant { - fn add_assign(&mut self, rhs: Duration) { - *self = *self + rhs; - } -} - -impl ops::Sub for Instant { - type Output = Duration; - - fn sub(self, rhs: Instant) -> Duration { - self.std.saturating_duration_since(rhs.std) - } -} - -impl ops::Sub for Instant { - type Output = Instant; - - fn sub(self, rhs: Duration) -> Instant { - Instant::from_std(std::time::Instant::sub(self.std, rhs)) - } -} - -impl ops::SubAssign for Instant { - fn sub_assign(&mut self, rhs: Duration) { - *self = *self - rhs; - } -} - -impl fmt::Debug for Instant { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.std.fmt(fmt) - } -} - -#[cfg(not(feature = "test-util"))] -mod variant { - use super::Instant; - - pub(super) fn now() -> Instant { - Instant::from_std(std::time::Instant::now()) - } -} - -#[cfg(feature = "test-util")] -mod variant { - use super::Instant; - - pub(super) fn now() -> Instant { - crate::time::clock::now() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/time/interval.rs s390-tools-2.33.1/rust-vendor/tokio/src/time/interval.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/time/interval.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/time/interval.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,641 +0,0 @@ -use crate::future::poll_fn; -use crate::time::{sleep_until, Duration, Instant, Sleep}; -use crate::util::trace; - -use std::future::Future; -use std::panic::Location; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Creates new [`Interval`] that yields with interval of `period`. The first -/// tick completes immediately. The default [`MissedTickBehavior`] is -/// [`Burst`](MissedTickBehavior::Burst), but this can be configured -/// by calling [`set_missed_tick_behavior`](Interval::set_missed_tick_behavior). -/// -/// An interval will tick indefinitely. At any time, the [`Interval`] value can -/// be dropped. This cancels the interval. -/// -/// This function is equivalent to -/// [`interval_at(Instant::now(), period)`](interval_at). -/// -/// # Panics -/// -/// This function panics if `period` is zero. -/// -/// # Examples -/// -/// ``` -/// use tokio::time::{self, Duration}; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut interval = time::interval(Duration::from_millis(10)); -/// -/// interval.tick().await; // ticks immediately -/// interval.tick().await; // ticks after 10ms -/// interval.tick().await; // ticks after 10ms -/// -/// // approximately 20ms have elapsed. -/// } -/// ``` -/// -/// A simple example using `interval` to execute a task every two seconds. -/// -/// The difference between `interval` and [`sleep`] is that an [`Interval`] -/// measures the time since the last tick, which means that [`.tick().await`] -/// may wait for a shorter time than the duration specified for the interval -/// if some time has passed between calls to [`.tick().await`]. -/// -/// If the tick in the example below was replaced with [`sleep`], the task -/// would only be executed once every three seconds, and not every two -/// seconds. -/// -/// ``` -/// use tokio::time; -/// -/// async fn task_that_takes_a_second() { -/// println!("hello"); -/// time::sleep(time::Duration::from_secs(1)).await -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let mut interval = time::interval(time::Duration::from_secs(2)); -/// for _i in 0..5 { -/// interval.tick().await; -/// task_that_takes_a_second().await; -/// } -/// } -/// ``` -/// -/// [`sleep`]: crate::time::sleep() -/// [`.tick().await`]: Interval::tick -#[track_caller] -pub fn interval(period: Duration) -> Interval { - assert!(period > Duration::new(0, 0), "`period` must be non-zero."); - internal_interval_at(Instant::now(), period, trace::caller_location()) -} - -/// Creates new [`Interval`] that yields with interval of `period` with the -/// first tick completing at `start`. The default [`MissedTickBehavior`] is -/// [`Burst`](MissedTickBehavior::Burst), but this can be configured -/// by calling [`set_missed_tick_behavior`](Interval::set_missed_tick_behavior). -/// -/// An interval will tick indefinitely. At any time, the [`Interval`] value can -/// be dropped. This cancels the interval. -/// -/// # Panics -/// -/// This function panics if `period` is zero. -/// -/// # Examples -/// -/// ``` -/// use tokio::time::{interval_at, Duration, Instant}; -/// -/// #[tokio::main] -/// async fn main() { -/// let start = Instant::now() + Duration::from_millis(50); -/// let mut interval = interval_at(start, Duration::from_millis(10)); -/// -/// interval.tick().await; // ticks after 50ms -/// interval.tick().await; // ticks after 10ms -/// interval.tick().await; // ticks after 10ms -/// -/// // approximately 70ms have elapsed. -/// } -/// ``` -#[track_caller] -pub fn interval_at(start: Instant, period: Duration) -> Interval { - assert!(period > Duration::new(0, 0), "`period` must be non-zero."); - internal_interval_at(start, period, trace::caller_location()) -} - -#[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_variables))] -fn internal_interval_at( - start: Instant, - period: Duration, - location: Option<&'static Location<'static>>, -) -> Interval { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = { - let location = location.expect("should have location if tracing"); - - tracing::trace_span!( - "runtime.resource", - concrete_type = "Interval", - kind = "timer", - loc.file = location.file(), - loc.line = location.line(), - loc.col = location.column(), - ) - }; - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let delay = resource_span.in_scope(|| Box::pin(sleep_until(start))); - - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let delay = Box::pin(sleep_until(start)); - - Interval { - delay, - period, - missed_tick_behavior: Default::default(), - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span, - } -} - -/// Defines the behavior of an [`Interval`] when it misses a tick. -/// -/// Sometimes, an [`Interval`]'s tick is missed. For example, consider the -/// following: -/// -/// ``` -/// use tokio::time::{self, Duration}; -/// # async fn task_that_takes_one_to_three_millis() {} -/// -/// #[tokio::main] -/// async fn main() { -/// // ticks every 2 milliseconds -/// let mut interval = time::interval(Duration::from_millis(2)); -/// for _ in 0..5 { -/// interval.tick().await; -/// // if this takes more than 2 milliseconds, a tick will be delayed -/// task_that_takes_one_to_three_millis().await; -/// } -/// } -/// ``` -/// -/// Generally, a tick is missed if too much time is spent without calling -/// [`Interval::tick()`]. -/// -/// By default, when a tick is missed, [`Interval`] fires ticks as quickly as it -/// can until it is "caught up" in time to where it should be. -/// `MissedTickBehavior` can be used to specify a different behavior for -/// [`Interval`] to exhibit. Each variant represents a different strategy. -/// -/// Note that because the executor cannot guarantee exact precision with timers, -/// these strategies will only apply when the delay is greater than 5 -/// milliseconds. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum MissedTickBehavior { - /// Ticks as fast as possible until caught up. - /// - /// When this strategy is used, [`Interval`] schedules ticks "normally" (the - /// same as it would have if the ticks hadn't been delayed), which results - /// in it firing ticks as fast as possible until it is caught up in time to - /// where it should be. Unlike [`Delay`] and [`Skip`], the ticks yielded - /// when `Burst` is used (the [`Instant`]s that [`tick`](Interval::tick) - /// yields) aren't different than they would have been if a tick had not - /// been missed. Like [`Skip`], and unlike [`Delay`], the ticks may be - /// shortened. - /// - /// This looks something like this: - /// ```text - /// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 | - /// Actual ticks: | work -----| delay | work | work | work -| work -----| - /// ``` - /// - /// In code: - /// - /// ``` - /// use tokio::time::{interval, Duration}; - /// # async fn task_that_takes_200_millis() {} - /// - /// # #[tokio::main(flavor = "current_thread")] - /// # async fn main() { - /// let mut interval = interval(Duration::from_millis(50)); - /// - /// // First tick resolves immediately after creation - /// interval.tick().await; - /// - /// task_that_takes_200_millis().await; - /// // The `Interval` has missed a tick - /// - /// // Since we have exceeded our timeout, this will resolve immediately - /// interval.tick().await; - /// - /// // Since we are more than 100ms after the start of `interval`, this will - /// // also resolve immediately. - /// interval.tick().await; - /// - /// // Also resolves immediately, because it was supposed to resolve at - /// // 150ms after the start of `interval` - /// interval.tick().await; - /// - /// // Resolves immediately - /// interval.tick().await; - /// - /// // Since we have gotten to 200ms after the start of `interval`, this - /// // will resolve after 50ms - /// interval.tick().await; - /// # } - /// ``` - /// - /// This is the default behavior when [`Interval`] is created with - /// [`interval`] and [`interval_at`]. - /// - /// [`Delay`]: MissedTickBehavior::Delay - /// [`Skip`]: MissedTickBehavior::Skip - Burst, - - /// Tick at multiples of `period` from when [`tick`] was called, rather than - /// from `start`. - /// - /// When this strategy is used and [`Interval`] has missed a tick, instead - /// of scheduling ticks to fire at multiples of `period` from `start` (the - /// time when the first tick was fired), it schedules all future ticks to - /// happen at a regular `period` from the point when [`tick`] was called. - /// Unlike [`Burst`] and [`Skip`], ticks are not shortened, and they aren't - /// guaranteed to happen at a multiple of `period` from `start` any longer. - /// - /// This looks something like this: - /// ```text - /// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 | - /// Actual ticks: | work -----| delay | work -----| work -----| work -----| - /// ``` - /// - /// In code: - /// - /// ``` - /// use tokio::time::{interval, Duration, MissedTickBehavior}; - /// # async fn task_that_takes_more_than_50_millis() {} - /// - /// # #[tokio::main(flavor = "current_thread")] - /// # async fn main() { - /// let mut interval = interval(Duration::from_millis(50)); - /// interval.set_missed_tick_behavior(MissedTickBehavior::Delay); - /// - /// task_that_takes_more_than_50_millis().await; - /// // The `Interval` has missed a tick - /// - /// // Since we have exceeded our timeout, this will resolve immediately - /// interval.tick().await; - /// - /// // But this one, rather than also resolving immediately, as might happen - /// // with the `Burst` or `Skip` behaviors, will not resolve until - /// // 50ms after the call to `tick` up above. That is, in `tick`, when we - /// // recognize that we missed a tick, we schedule the next tick to happen - /// // 50ms (or whatever the `period` is) from right then, not from when - /// // were *supposed* to tick - /// interval.tick().await; - /// # } - /// ``` - /// - /// [`Burst`]: MissedTickBehavior::Burst - /// [`Skip`]: MissedTickBehavior::Skip - /// [`tick`]: Interval::tick - Delay, - - /// Skips missed ticks and tick on the next multiple of `period` from - /// `start`. - /// - /// When this strategy is used, [`Interval`] schedules the next tick to fire - /// at the next-closest tick that is a multiple of `period` away from - /// `start` (the point where [`Interval`] first ticked). Like [`Burst`], all - /// ticks remain multiples of `period` away from `start`, but unlike - /// [`Burst`], the ticks may not be *one* multiple of `period` away from the - /// last tick. Like [`Delay`], the ticks are no longer the same as they - /// would have been if ticks had not been missed, but unlike [`Delay`], and - /// like [`Burst`], the ticks may be shortened to be less than one `period` - /// away from each other. - /// - /// This looks something like this: - /// ```text - /// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 | - /// Actual ticks: | work -----| delay | work ---| work -----| work -----| - /// ``` - /// - /// In code: - /// - /// ``` - /// use tokio::time::{interval, Duration, MissedTickBehavior}; - /// # async fn task_that_takes_75_millis() {} - /// - /// # #[tokio::main(flavor = "current_thread")] - /// # async fn main() { - /// let mut interval = interval(Duration::from_millis(50)); - /// interval.set_missed_tick_behavior(MissedTickBehavior::Skip); - /// - /// task_that_takes_75_millis().await; - /// // The `Interval` has missed a tick - /// - /// // Since we have exceeded our timeout, this will resolve immediately - /// interval.tick().await; - /// - /// // This one will resolve after 25ms, 100ms after the start of - /// // `interval`, which is the closest multiple of `period` from the start - /// // of `interval` after the call to `tick` up above. - /// interval.tick().await; - /// # } - /// ``` - /// - /// [`Burst`]: MissedTickBehavior::Burst - /// [`Delay`]: MissedTickBehavior::Delay - Skip, -} - -impl MissedTickBehavior { - /// If a tick is missed, this method is called to determine when the next tick should happen. - fn next_timeout(&self, timeout: Instant, now: Instant, period: Duration) -> Instant { - match self { - Self::Burst => timeout + period, - Self::Delay => now + period, - Self::Skip => { - now + period - - Duration::from_nanos( - ((now - timeout).as_nanos() % period.as_nanos()) - .try_into() - // This operation is practically guaranteed not to - // fail, as in order for it to fail, `period` would - // have to be longer than `now - timeout`, and both - // would have to be longer than 584 years. - // - // If it did fail, there's not a good way to pass - // the error along to the user, so we just panic. - .expect( - "too much time has elapsed since the interval was supposed to tick", - ), - ) - } - } - } -} - -impl Default for MissedTickBehavior { - /// Returns [`MissedTickBehavior::Burst`]. - /// - /// For most usecases, the [`Burst`] strategy is what is desired. - /// Additionally, to preserve backwards compatibility, the [`Burst`] - /// strategy must be the default. For these reasons, - /// [`MissedTickBehavior::Burst`] is the default for [`MissedTickBehavior`]. - /// See [`Burst`] for more details. - /// - /// [`Burst`]: MissedTickBehavior::Burst - fn default() -> Self { - Self::Burst - } -} - -/// Interval returned by [`interval`] and [`interval_at`]. -/// -/// This type allows you to wait on a sequence of instants with a certain -/// duration between each instant. Unlike calling [`sleep`] in a loop, this lets -/// you count the time spent between the calls to [`sleep`] as well. -/// -/// An `Interval` can be turned into a `Stream` with [`IntervalStream`]. -/// -/// [`IntervalStream`]: https://docs.rs/tokio-stream/latest/tokio_stream/wrappers/struct.IntervalStream.html -/// [`sleep`]: crate::time::sleep() -#[derive(Debug)] -pub struct Interval { - /// Future that completes the next time the `Interval` yields a value. - delay: Pin>, - - /// The duration between values yielded by `Interval`. - period: Duration, - - /// The strategy `Interval` should use when a tick is missed. - missed_tick_behavior: MissedTickBehavior, - - #[cfg(all(tokio_unstable, feature = "tracing"))] - resource_span: tracing::Span, -} - -impl Interval { - /// Completes when the next instant in the interval has been reached. - /// - /// # Cancel safety - /// - /// This method is cancellation safe. If `tick` is used as the branch in a `tokio::select!` and - /// another branch completes first, then no tick has been consumed. - /// - /// # Examples - /// - /// ``` - /// use tokio::time; - /// - /// use std::time::Duration; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut interval = time::interval(Duration::from_millis(10)); - /// - /// interval.tick().await; - /// // approximately 0ms have elapsed. The first tick completes immediately. - /// interval.tick().await; - /// interval.tick().await; - /// - /// // approximately 20ms have elapsed. - /// } - /// ``` - pub async fn tick(&mut self) -> Instant { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let resource_span = self.resource_span.clone(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let instant = trace::async_op( - || poll_fn(|cx| self.poll_tick(cx)), - resource_span, - "Interval::tick", - "poll_tick", - false, - ); - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let instant = poll_fn(|cx| self.poll_tick(cx)); - - instant.await - } - - /// Polls for the next instant in the interval to be reached. - /// - /// This method can return the following values: - /// - /// * `Poll::Pending` if the next instant has not yet been reached. - /// * `Poll::Ready(instant)` if the next instant has been reached. - /// - /// When this method returns `Poll::Pending`, the current task is scheduled - /// to receive a wakeup when the instant has elapsed. Note that on multiple - /// calls to `poll_tick`, only the [`Waker`](std::task::Waker) from the - /// [`Context`] passed to the most recent call is scheduled to receive a - /// wakeup. - pub fn poll_tick(&mut self, cx: &mut Context<'_>) -> Poll { - // Wait for the delay to be done - ready!(Pin::new(&mut self.delay).poll(cx)); - - // Get the time when we were scheduled to tick - let timeout = self.delay.deadline(); - - let now = Instant::now(); - - // If a tick was not missed, and thus we are being called before the - // next tick is due, just schedule the next tick normally, one `period` - // after `timeout` - // - // However, if a tick took excessively long and we are now behind, - // schedule the next tick according to how the user specified with - // `MissedTickBehavior` - let next = if now > timeout + Duration::from_millis(5) { - self.missed_tick_behavior - .next_timeout(timeout, now, self.period) - } else { - timeout + self.period - }; - - // When we arrive here, the internal delay returned `Poll::Ready`. - // Reset the delay but do not register it. It should be registered with - // the next call to [`poll_tick`]. - self.delay.as_mut().reset_without_reregister(next); - - // Return the time when we were scheduled to tick - Poll::Ready(timeout) - } - - /// Resets the interval to complete one period after the current time. - /// - /// This method ignores [`MissedTickBehavior`] strategy. - /// - /// This is equivalent to calling `reset_at(Instant::now() + period)`. - /// - /// # Examples - /// - /// ``` - /// use tokio::time; - /// - /// use std::time::Duration; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut interval = time::interval(Duration::from_millis(100)); - /// - /// interval.tick().await; - /// - /// time::sleep(Duration::from_millis(50)).await; - /// interval.reset(); - /// - /// interval.tick().await; - /// interval.tick().await; - /// - /// // approximately 250ms have elapsed. - /// } - /// ``` - pub fn reset(&mut self) { - self.delay.as_mut().reset(Instant::now() + self.period); - } - - /// Resets the interval immediately. - /// - /// This method ignores [`MissedTickBehavior`] strategy. - /// - /// This is equivalent to calling `reset_at(Instant::now())`. - /// - /// # Examples - /// - /// ``` - /// use tokio::time; - /// - /// use std::time::Duration; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut interval = time::interval(Duration::from_millis(100)); - /// - /// interval.tick().await; - /// - /// time::sleep(Duration::from_millis(50)).await; - /// interval.reset_immediately(); - /// - /// interval.tick().await; - /// interval.tick().await; - /// - /// // approximately 150ms have elapsed. - /// } - /// ``` - pub fn reset_immediately(&mut self) { - self.delay.as_mut().reset(Instant::now()); - } - - /// Resets the interval after the specified [`std::time::Duration`]. - /// - /// This method ignores [`MissedTickBehavior`] strategy. - /// - /// This is equivalent to calling `reset_at(Instant::now() + after)`. - /// - /// # Examples - /// - /// ``` - /// use tokio::time; - /// - /// use std::time::Duration; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut interval = time::interval(Duration::from_millis(100)); - /// interval.tick().await; - /// - /// time::sleep(Duration::from_millis(50)).await; - /// - /// let after = Duration::from_millis(20); - /// interval.reset_after(after); - /// - /// interval.tick().await; - /// interval.tick().await; - /// - /// // approximately 170ms have elapsed. - /// } - /// ``` - pub fn reset_after(&mut self, after: Duration) { - self.delay.as_mut().reset(Instant::now() + after); - } - - /// Resets the interval to a [`crate::time::Instant`] deadline. - /// - /// Sets the next tick to expire at the given instant. If the instant is in - /// the past, then the [`MissedTickBehavior`] strategy will be used to - /// catch up. If the instant is in the future, then the next tick will - /// complete at the given instant, even if that means that it will sleep for - /// longer than the duration of this [`Interval`]. If the [`Interval`] had - /// any missed ticks before calling this method, then those are discarded. - /// - /// # Examples - /// - /// ``` - /// use tokio::time::{self, Instant}; - /// - /// use std::time::Duration; - /// - /// #[tokio::main] - /// async fn main() { - /// let mut interval = time::interval(Duration::from_millis(100)); - /// interval.tick().await; - /// - /// time::sleep(Duration::from_millis(50)).await; - /// - /// let deadline = Instant::now() + Duration::from_millis(30); - /// interval.reset_at(deadline); - /// - /// interval.tick().await; - /// interval.tick().await; - /// - /// // approximately 180ms have elapsed. - /// } - /// ``` - pub fn reset_at(&mut self, deadline: Instant) { - self.delay.as_mut().reset(deadline); - } - - /// Returns the [`MissedTickBehavior`] strategy currently being used. - pub fn missed_tick_behavior(&self) -> MissedTickBehavior { - self.missed_tick_behavior - } - - /// Sets the [`MissedTickBehavior`] strategy that should be used. - pub fn set_missed_tick_behavior(&mut self, behavior: MissedTickBehavior) { - self.missed_tick_behavior = behavior; - } - - /// Returns the period of the interval. - pub fn period(&self) -> Duration { - self.period - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/time/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/time/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/time/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/time/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,109 +0,0 @@ -//! Utilities for tracking time. -//! -//! This module provides a number of types for executing code after a set period -//! of time. -//! -//! * [`Sleep`] is a future that does no work and completes at a specific [`Instant`] -//! in time. -//! -//! * [`Interval`] is a stream yielding a value at a fixed period. It is -//! initialized with a [`Duration`] and repeatedly yields each time the duration -//! elapses. -//! -//! * [`Timeout`]: Wraps a future or stream, setting an upper bound to the amount -//! of time it is allowed to execute. If the future or stream does not -//! complete in time, then it is canceled and an error is returned. -//! -//! These types are sufficient for handling a large number of scenarios -//! involving time. -//! -//! These types must be used from within the context of the [`Runtime`](crate::runtime::Runtime). -//! -//! # Examples -//! -//! Wait 100ms and print "100 ms have elapsed" -//! -//! ``` -//! use std::time::Duration; -//! use tokio::time::sleep; -//! -//! #[tokio::main] -//! async fn main() { -//! sleep(Duration::from_millis(100)).await; -//! println!("100 ms have elapsed"); -//! } -//! ``` -//! -//! Require that an operation takes no more than 1s. -//! -//! ``` -//! use tokio::time::{timeout, Duration}; -//! -//! async fn long_future() { -//! // do work here -//! } -//! -//! # async fn dox() { -//! let res = timeout(Duration::from_secs(1), long_future()).await; -//! -//! if res.is_err() { -//! println!("operation timed out"); -//! } -//! # } -//! ``` -//! -//! A simple example using [`interval`] to execute a task every two seconds. -//! -//! The difference between [`interval`] and [`sleep`] is that an [`interval`] -//! measures the time since the last tick, which means that `.tick().await` may -//! wait for a shorter time than the duration specified for the interval -//! if some time has passed between calls to `.tick().await`. -//! -//! If the tick in the example below was replaced with [`sleep`], the task -//! would only be executed once every three seconds, and not every two -//! seconds. -//! -//! ``` -//! use tokio::time; -//! -//! async fn task_that_takes_a_second() { -//! println!("hello"); -//! time::sleep(time::Duration::from_secs(1)).await -//! } -//! -//! #[tokio::main] -//! async fn main() { -//! let mut interval = time::interval(time::Duration::from_secs(2)); -//! for _i in 0..5 { -//! interval.tick().await; -//! task_that_takes_a_second().await; -//! } -//! } -//! ``` -//! -//! [`interval`]: crate::time::interval() -//! [`sleep`]: sleep() - -mod clock; -pub(crate) use self::clock::Clock; -#[cfg(feature = "test-util")] -pub use clock::{advance, pause, resume}; - -pub mod error; - -mod instant; -pub use self::instant::Instant; - -mod interval; -pub use interval::{interval, interval_at, Interval, MissedTickBehavior}; - -mod sleep; -pub use sleep::{sleep, sleep_until, Sleep}; - -mod timeout; -#[doc(inline)] -pub use timeout::{timeout, timeout_at, Timeout}; - -// Re-export for convenience -#[doc(no_inline)] -pub use std::time::Duration; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/time/sleep.rs s390-tools-2.33.1/rust-vendor/tokio/src/time/sleep.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/time/sleep.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/time/sleep.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,453 +0,0 @@ -use crate::runtime::time::TimerEntry; -use crate::time::{error::Error, Duration, Instant}; -use crate::util::trace; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::panic::Location; -use std::pin::Pin; -use std::task::{self, Poll}; - -/// Waits until `deadline` is reached. -/// -/// No work is performed while awaiting on the sleep future to complete. `Sleep` -/// operates at millisecond granularity and should not be used for tasks that -/// require high-resolution timers. -/// -/// To run something regularly on a schedule, see [`interval`]. -/// -/// # Cancellation -/// -/// Canceling a sleep instance is done by dropping the returned future. No additional -/// cleanup work is required. -/// -/// # Examples -/// -/// Wait 100ms and print "100 ms have elapsed". -/// -/// ``` -/// use tokio::time::{sleep_until, Instant, Duration}; -/// -/// #[tokio::main] -/// async fn main() { -/// sleep_until(Instant::now() + Duration::from_millis(100)).await; -/// println!("100 ms have elapsed"); -/// } -/// ``` -/// -/// See the documentation for the [`Sleep`] type for more examples. -/// -/// # Panics -/// -/// This function panics if there is no current timer set. -/// -/// It can be triggered when [`Builder::enable_time`] or -/// [`Builder::enable_all`] are not included in the builder. -/// -/// It can also panic whenever a timer is created outside of a -/// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, -/// since the function is executed outside of the runtime. -/// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. -/// And this is because wrapping the function on an async makes it lazy, -/// and so gets executed inside the runtime successfully without -/// panicking. -/// -/// [`Sleep`]: struct@crate::time::Sleep -/// [`interval`]: crate::time::interval() -/// [`Builder::enable_time`]: crate::runtime::Builder::enable_time -/// [`Builder::enable_all`]: crate::runtime::Builder::enable_all -// Alias for old name in 0.x -#[cfg_attr(docsrs, doc(alias = "delay_until"))] -#[track_caller] -pub fn sleep_until(deadline: Instant) -> Sleep { - return Sleep::new_timeout(deadline, trace::caller_location()); -} - -/// Waits until `duration` has elapsed. -/// -/// Equivalent to `sleep_until(Instant::now() + duration)`. An asynchronous -/// analog to `std::thread::sleep`. -/// -/// No work is performed while awaiting on the sleep future to complete. `Sleep` -/// operates at millisecond granularity and should not be used for tasks that -/// require high-resolution timers. The implementation is platform specific, -/// and some platforms (specifically Windows) will provide timers with a -/// larger resolution than 1 ms. -/// -/// To run something regularly on a schedule, see [`interval`]. -/// -/// The maximum duration for a sleep is 68719476734 milliseconds (approximately 2.2 years). -/// -/// # Cancellation -/// -/// Canceling a sleep instance is done by dropping the returned future. No additional -/// cleanup work is required. -/// -/// # Examples -/// -/// Wait 100ms and print "100 ms have elapsed". -/// -/// ``` -/// use tokio::time::{sleep, Duration}; -/// -/// #[tokio::main] -/// async fn main() { -/// sleep(Duration::from_millis(100)).await; -/// println!("100 ms have elapsed"); -/// } -/// ``` -/// -/// See the documentation for the [`Sleep`] type for more examples. -/// -/// # Panics -/// -/// This function panics if there is no current timer set. -/// -/// It can be triggered when [`Builder::enable_time`] or -/// [`Builder::enable_all`] are not included in the builder. -/// -/// It can also panic whenever a timer is created outside of a -/// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, -/// since the function is executed outside of the runtime. -/// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. -/// And this is because wrapping the function on an async makes it lazy, -/// and so gets executed inside the runtime successfully without -/// panicking. -/// -/// [`Sleep`]: struct@crate::time::Sleep -/// [`interval`]: crate::time::interval() -/// [`Builder::enable_time`]: crate::runtime::Builder::enable_time -/// [`Builder::enable_all`]: crate::runtime::Builder::enable_all -// Alias for old name in 0.x -#[cfg_attr(docsrs, doc(alias = "delay_for"))] -#[cfg_attr(docsrs, doc(alias = "wait"))] -#[track_caller] -pub fn sleep(duration: Duration) -> Sleep { - let location = trace::caller_location(); - - match Instant::now().checked_add(duration) { - Some(deadline) => Sleep::new_timeout(deadline, location), - None => Sleep::new_timeout(Instant::far_future(), location), - } -} - -pin_project! { - /// Future returned by [`sleep`](sleep) and [`sleep_until`](sleep_until). - /// - /// This type does not implement the `Unpin` trait, which means that if you - /// use it with [`select!`] or by calling `poll`, you have to pin it first. - /// If you use it with `.await`, this does not apply. - /// - /// # Examples - /// - /// Wait 100ms and print "100 ms have elapsed". - /// - /// ``` - /// use tokio::time::{sleep, Duration}; - /// - /// #[tokio::main] - /// async fn main() { - /// sleep(Duration::from_millis(100)).await; - /// println!("100 ms have elapsed"); - /// } - /// ``` - /// - /// Use with [`select!`]. Pinning the `Sleep` with [`tokio::pin!`] is - /// necessary when the same `Sleep` is selected on multiple times. - /// ```no_run - /// use tokio::time::{self, Duration, Instant}; - /// - /// #[tokio::main] - /// async fn main() { - /// let sleep = time::sleep(Duration::from_millis(10)); - /// tokio::pin!(sleep); - /// - /// loop { - /// tokio::select! { - /// () = &mut sleep => { - /// println!("timer elapsed"); - /// sleep.as_mut().reset(Instant::now() + Duration::from_millis(50)); - /// }, - /// } - /// } - /// } - /// ``` - /// Use in a struct with boxing. By pinning the `Sleep` with a `Box`, the - /// `HasSleep` struct implements `Unpin`, even though `Sleep` does not. - /// ``` - /// use std::future::Future; - /// use std::pin::Pin; - /// use std::task::{Context, Poll}; - /// use tokio::time::Sleep; - /// - /// struct HasSleep { - /// sleep: Pin>, - /// } - /// - /// impl Future for HasSleep { - /// type Output = (); - /// - /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - /// self.sleep.as_mut().poll(cx) - /// } - /// } - /// ``` - /// Use in a struct with pin projection. This method avoids the `Box`, but - /// the `HasSleep` struct will not be `Unpin` as a consequence. - /// ``` - /// use std::future::Future; - /// use std::pin::Pin; - /// use std::task::{Context, Poll}; - /// use tokio::time::Sleep; - /// use pin_project_lite::pin_project; - /// - /// pin_project! { - /// struct HasSleep { - /// #[pin] - /// sleep: Sleep, - /// } - /// } - /// - /// impl Future for HasSleep { - /// type Output = (); - /// - /// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - /// self.project().sleep.poll(cx) - /// } - /// } - /// ``` - /// - /// [`select!`]: ../macro.select.html - /// [`tokio::pin!`]: ../macro.pin.html - // Alias for old name in 0.2 - #[project(!Unpin)] - #[cfg_attr(docsrs, doc(alias = "Delay"))] - #[derive(Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Sleep { - inner: Inner, - - // The link between the `Sleep` instance and the timer that drives it. - #[pin] - entry: TimerEntry, - } -} - -cfg_trace! { - #[derive(Debug)] - struct Inner { - ctx: trace::AsyncOpTracingCtx, - } -} - -cfg_not_trace! { - #[derive(Debug)] - struct Inner { - } -} - -impl Sleep { - #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_variables))] - #[track_caller] - pub(crate) fn new_timeout( - deadline: Instant, - location: Option<&'static Location<'static>>, - ) -> Sleep { - use crate::runtime::scheduler; - - let handle = scheduler::Handle::current(); - let entry = TimerEntry::new(&handle, deadline); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let inner = { - let clock = handle.driver().clock(); - let handle = &handle.driver().time(); - let time_source = handle.time_source(); - let deadline_tick = time_source.deadline_to_tick(deadline); - let duration = deadline_tick.saturating_sub(time_source.now(clock)); - - let location = location.expect("should have location if tracing"); - let resource_span = tracing::trace_span!( - "runtime.resource", - concrete_type = "Sleep", - kind = "timer", - loc.file = location.file(), - loc.line = location.line(), - loc.col = location.column(), - ); - - let async_op_span = resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - duration = duration, - duration.unit = "ms", - duration.op = "override", - ); - - tracing::trace_span!("runtime.resource.async_op", source = "Sleep::new_timeout") - }); - - let async_op_poll_span = - async_op_span.in_scope(|| tracing::trace_span!("runtime.resource.async_op.poll")); - - let ctx = trace::AsyncOpTracingCtx { - async_op_span, - async_op_poll_span, - resource_span, - }; - - Inner { ctx } - }; - - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let inner = Inner {}; - - Sleep { inner, entry } - } - - pub(crate) fn far_future(location: Option<&'static Location<'static>>) -> Sleep { - Self::new_timeout(Instant::far_future(), location) - } - - /// Returns the instant at which the future will complete. - pub fn deadline(&self) -> Instant { - self.entry.deadline() - } - - /// Returns `true` if `Sleep` has elapsed. - /// - /// A `Sleep` instance is elapsed when the requested duration has elapsed. - pub fn is_elapsed(&self) -> bool { - self.entry.is_elapsed() - } - - /// Resets the `Sleep` instance to a new deadline. - /// - /// Calling this function allows changing the instant at which the `Sleep` - /// future completes without having to create new associated state. - /// - /// This function can be called both before and after the future has - /// completed. - /// - /// To call this method, you will usually combine the call with - /// [`Pin::as_mut`], which lets you call the method without consuming the - /// `Sleep` itself. - /// - /// # Example - /// - /// ``` - /// use tokio::time::{Duration, Instant}; - /// - /// # #[tokio::main(flavor = "current_thread")] - /// # async fn main() { - /// let sleep = tokio::time::sleep(Duration::from_millis(10)); - /// tokio::pin!(sleep); - /// - /// sleep.as_mut().reset(Instant::now() + Duration::from_millis(20)); - /// # } - /// ``` - /// - /// See also the top-level examples. - /// - /// [`Pin::as_mut`]: fn@std::pin::Pin::as_mut - pub fn reset(self: Pin<&mut Self>, deadline: Instant) { - self.reset_inner(deadline) - } - - /// Resets the `Sleep` instance to a new deadline without reregistering it - /// to be woken up. - /// - /// Calling this function allows changing the instant at which the `Sleep` - /// future completes without having to create new associated state and - /// without having it registered. This is required in e.g. the - /// [crate::time::Interval] where we want to reset the internal [Sleep] - /// without having it wake up the last task that polled it. - pub(crate) fn reset_without_reregister(self: Pin<&mut Self>, deadline: Instant) { - let mut me = self.project(); - me.entry.as_mut().reset(deadline, false); - } - - fn reset_inner(self: Pin<&mut Self>, deadline: Instant) { - let mut me = self.project(); - me.entry.as_mut().reset(deadline, true); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - { - let _resource_enter = me.inner.ctx.resource_span.enter(); - me.inner.ctx.async_op_span = - tracing::trace_span!("runtime.resource.async_op", source = "Sleep::reset"); - let _async_op_enter = me.inner.ctx.async_op_span.enter(); - - me.inner.ctx.async_op_poll_span = - tracing::trace_span!("runtime.resource.async_op.poll"); - - let duration = { - let clock = me.entry.clock(); - let time_source = me.entry.driver().time_source(); - let now = time_source.now(clock); - let deadline_tick = time_source.deadline_to_tick(deadline); - deadline_tick.saturating_sub(now) - }; - - tracing::trace!( - target: "runtime::resource::state_update", - duration = duration, - duration.unit = "ms", - duration.op = "override", - ); - } - } - - fn poll_elapsed(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - let me = self.project(); - - ready!(crate::trace::trace_leaf(cx)); - - // Keep track of task budget - #[cfg(all(tokio_unstable, feature = "tracing"))] - let coop = ready!(trace_poll_op!( - "poll_elapsed", - crate::runtime::coop::poll_proceed(cx), - )); - - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - - let result = me.entry.poll_elapsed(cx).map(move |r| { - coop.made_progress(); - r - }); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - return trace_poll_op!("poll_elapsed", result); - - #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] - return result; - } -} - -impl Future for Sleep { - type Output = (); - - // `poll_elapsed` can return an error in two cases: - // - // - AtCapacity: this is a pathological case where far too many - // sleep instances have been scheduled. - // - Shutdown: No timer has been setup, which is a mis-use error. - // - // Both cases are extremely rare, and pretty accurately fit into - // "logic errors", so we just panic in this case. A user couldn't - // really do much better if we passed the error onwards. - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - #[cfg(all(tokio_unstable, feature = "tracing"))] - let _res_span = self.inner.ctx.resource_span.clone().entered(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let _ao_span = self.inner.ctx.async_op_span.clone().entered(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let _ao_poll_span = self.inner.ctx.async_op_poll_span.clone().entered(); - match ready!(self.as_mut().poll_elapsed(cx)) { - Ok(()) => Poll::Ready(()), - Err(e) => panic!("timer error: {}", e), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/time/timeout.rs s390-tools-2.33.1/rust-vendor/tokio/src/time/timeout.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/time/timeout.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/time/timeout.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,228 +0,0 @@ -//! Allows a future to execute for a maximum amount of time. -//! -//! See [`Timeout`] documentation for more details. -//! -//! [`Timeout`]: struct@Timeout - -use crate::{ - runtime::coop, - time::{error::Elapsed, sleep_until, Duration, Instant, Sleep}, - util::trace, -}; - -use pin_project_lite::pin_project; -use std::future::Future; -use std::pin::Pin; -use std::task::{self, Poll}; - -/// Requires a `Future` to complete before the specified duration has elapsed. -/// -/// If the future completes before the duration has elapsed, then the completed -/// value is returned. Otherwise, an error is returned and the future is -/// canceled. -/// -/// Note that the timeout is checked before polling the future, so if the future -/// does not yield during execution then it is possible for the future to complete -/// and exceed the timeout _without_ returning an error. -/// -/// This function returns a future whose return type is [`Result`]``, where `T` is the -/// return type of the provided future. -/// -/// If the provided future completes immediately, then the future returned from -/// this function is guaranteed to complete immediately with an [`Ok`] variant -/// no matter the provided duration. -/// -/// [`Ok`]: std::result::Result::Ok -/// [`Result`]: std::result::Result -/// [`Elapsed`]: crate::time::error::Elapsed -/// -/// # Cancellation -/// -/// Cancelling a timeout is done by dropping the future. No additional cleanup -/// or other work is required. -/// -/// The original future may be obtained by calling [`Timeout::into_inner`]. This -/// consumes the `Timeout`. -/// -/// # Examples -/// -/// Create a new `Timeout` set to expire in 10 milliseconds. -/// -/// ```rust -/// use tokio::time::timeout; -/// use tokio::sync::oneshot; -/// -/// use std::time::Duration; -/// -/// # async fn dox() { -/// let (tx, rx) = oneshot::channel(); -/// # tx.send(()).unwrap(); -/// -/// // Wrap the future with a `Timeout` set to expire in 10 milliseconds. -/// if let Err(_) = timeout(Duration::from_millis(10), rx).await { -/// println!("did not receive value within 10 ms"); -/// } -/// # } -/// ``` -/// -/// # Panics -/// -/// This function panics if there is no current timer set. -/// -/// It can be triggered when [`Builder::enable_time`] or -/// [`Builder::enable_all`] are not included in the builder. -/// -/// It can also panic whenever a timer is created outside of a -/// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, -/// since the function is executed outside of the runtime. -/// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. -/// And this is because wrapping the function on an async makes it lazy, -/// and so gets executed inside the runtime successfully without -/// panicking. -/// -/// [`Builder::enable_time`]: crate::runtime::Builder::enable_time -/// [`Builder::enable_all`]: crate::runtime::Builder::enable_all -#[track_caller] -pub fn timeout(duration: Duration, future: F) -> Timeout -where - F: Future, -{ - let location = trace::caller_location(); - - let deadline = Instant::now().checked_add(duration); - let delay = match deadline { - Some(deadline) => Sleep::new_timeout(deadline, location), - None => Sleep::far_future(location), - }; - Timeout::new_with_delay(future, delay) -} - -/// Requires a `Future` to complete before the specified instant in time. -/// -/// If the future completes before the instant is reached, then the completed -/// value is returned. Otherwise, an error is returned. -/// -/// This function returns a future whose return type is [`Result`]``, where `T` is the -/// return type of the provided future. -/// -/// If the provided future completes immediately, then the future returned from -/// this function is guaranteed to complete immediately with an [`Ok`] variant -/// no matter the provided deadline. -/// -/// [`Ok`]: std::result::Result::Ok -/// [`Result`]: std::result::Result -/// [`Elapsed`]: crate::time::error::Elapsed -/// -/// # Cancellation -/// -/// Cancelling a timeout is done by dropping the future. No additional cleanup -/// or other work is required. -/// -/// The original future may be obtained by calling [`Timeout::into_inner`]. This -/// consumes the `Timeout`. -/// -/// # Examples -/// -/// Create a new `Timeout` set to expire in 10 milliseconds. -/// -/// ```rust -/// use tokio::time::{Instant, timeout_at}; -/// use tokio::sync::oneshot; -/// -/// use std::time::Duration; -/// -/// # async fn dox() { -/// let (tx, rx) = oneshot::channel(); -/// # tx.send(()).unwrap(); -/// -/// // Wrap the future with a `Timeout` set to expire 10 milliseconds into the -/// // future. -/// if let Err(_) = timeout_at(Instant::now() + Duration::from_millis(10), rx).await { -/// println!("did not receive value within 10 ms"); -/// } -/// # } -/// ``` -pub fn timeout_at(deadline: Instant, future: F) -> Timeout -where - F: Future, -{ - let delay = sleep_until(deadline); - - Timeout { - value: future, - delay, - } -} - -pin_project! { - /// Future returned by [`timeout`](timeout) and [`timeout_at`](timeout_at). - #[must_use = "futures do nothing unless you `.await` or poll them"] - #[derive(Debug)] - pub struct Timeout { - #[pin] - value: T, - #[pin] - delay: Sleep, - } -} - -impl Timeout { - pub(crate) fn new_with_delay(value: T, delay: Sleep) -> Timeout { - Timeout { value, delay } - } - - /// Gets a reference to the underlying value in this timeout. - pub fn get_ref(&self) -> &T { - &self.value - } - - /// Gets a mutable reference to the underlying value in this timeout. - pub fn get_mut(&mut self) -> &mut T { - &mut self.value - } - - /// Consumes this timeout, returning the underlying value. - pub fn into_inner(self) -> T { - self.value - } -} - -impl Future for Timeout -where - T: Future, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let me = self.project(); - - let had_budget_before = coop::has_budget_remaining(); - - // First, try polling the future - if let Poll::Ready(v) = me.value.poll(cx) { - return Poll::Ready(Ok(v)); - } - - let has_budget_now = coop::has_budget_remaining(); - - let delay = me.delay; - - let poll_delay = || -> Poll { - match delay.poll(cx) { - Poll::Ready(()) => Poll::Ready(Err(Elapsed::new())), - Poll::Pending => Poll::Pending, - } - }; - - if let (true, false) = (had_budget_before, has_budget_now) { - // if it is the underlying future that exhausted the budget, we poll - // the `delay` with an unconstrained one. This prevents pathological - // cases where the underlying future always exhausts the budget and - // we never get a chance to evaluate whether the timeout was hit or - // not. - coop::with_unconstrained(poll_delay) - } else { - poll_delay() - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/atomic_cell.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/atomic_cell.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/atomic_cell.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/atomic_cell.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,51 +0,0 @@ -use crate::loom::sync::atomic::AtomicPtr; - -use std::ptr; -use std::sync::atomic::Ordering::AcqRel; - -pub(crate) struct AtomicCell { - data: AtomicPtr, -} - -unsafe impl Send for AtomicCell {} -unsafe impl Sync for AtomicCell {} - -impl AtomicCell { - pub(crate) fn new(data: Option>) -> AtomicCell { - AtomicCell { - data: AtomicPtr::new(to_raw(data)), - } - } - - pub(crate) fn swap(&self, val: Option>) -> Option> { - let old = self.data.swap(to_raw(val), AcqRel); - from_raw(old) - } - - pub(crate) fn set(&self, val: Box) { - let _ = self.swap(Some(val)); - } - - pub(crate) fn take(&self) -> Option> { - self.swap(None) - } -} - -fn to_raw(data: Option>) -> *mut T { - data.map(Box::into_raw).unwrap_or(ptr::null_mut()) -} - -fn from_raw(val: *mut T) -> Option> { - if val.is_null() { - None - } else { - Some(unsafe { Box::from_raw(val) }) - } -} - -impl Drop for AtomicCell { - fn drop(&mut self) { - // Free any data still held by the cell - let _ = self.take(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/bit.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/bit.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/bit.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/bit.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,69 +0,0 @@ -use std::fmt; - -#[derive(Clone, Copy, PartialEq)] -pub(crate) struct Pack { - mask: usize, - shift: u32, -} - -impl Pack { - /// Value is packed in the `width` least-significant bits. - pub(crate) const fn least_significant(width: u32) -> Pack { - let mask = mask_for(width); - - Pack { mask, shift: 0 } - } - - /// Value is packed in the `width` more-significant bits. - pub(crate) const fn then(&self, width: u32) -> Pack { - let shift = pointer_width() - self.mask.leading_zeros(); - let mask = mask_for(width) << shift; - - Pack { mask, shift } - } - - /// Width, in bits, dedicated to storing the value. - pub(crate) const fn width(&self) -> u32 { - pointer_width() - (self.mask >> self.shift).leading_zeros() - } - - /// Max representable value. - pub(crate) const fn max_value(&self) -> usize { - (1 << self.width()) - 1 - } - - pub(crate) fn pack(&self, value: usize, base: usize) -> usize { - assert!(value <= self.max_value()); - (base & !self.mask) | (value << self.shift) - } - - pub(crate) fn unpack(&self, src: usize) -> usize { - unpack(src, self.mask, self.shift) - } -} - -impl fmt::Debug for Pack { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - fmt, - "Pack {{ mask: {:b}, shift: {} }}", - self.mask, self.shift - ) - } -} - -/// Returns the width of a pointer in bits. -pub(crate) const fn pointer_width() -> u32 { - std::mem::size_of::() as u32 * 8 -} - -/// Returns a `usize` with the right-most `n` bits set. -pub(crate) const fn mask_for(n: u32) -> usize { - let shift = 1usize.wrapping_shl(n - 1); - shift | (shift - 1) -} - -/// Unpacks a value using a mask & shift. -pub(crate) const fn unpack(src: usize, mask: usize, shift: u32) -> usize { - (src & mask) >> shift -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/cacheline.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/cacheline.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/cacheline.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/cacheline.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,89 +0,0 @@ -#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))] -use std::ops::{Deref, DerefMut}; - -/// Pads and aligns a value to the length of a cache line. -#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] -// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache -// lines at a time, so we have to align to 128 bytes rather than 64. -// -// Sources: -// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf -// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 -// -// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size. -// -// Sources: -// - https://www.mono-project.com/news/2016/09/12/arm64-icache/ -// -// powerpc64 has 128-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9 -#[cfg_attr( - any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - ), - repr(align(128)) -)] -// arm, mips and mips64 have 32-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9 -#[cfg_attr( - any(target_arch = "arm", target_arch = "mips", target_arch = "mips64",), - repr(align(32)) -)] -// s390x has 256-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7 -#[cfg_attr(target_arch = "s390x", repr(align(256)))] -// x86, riscv and wasm have 64-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10 -// -// All others are assumed to have 64-byte cache line size. -#[cfg_attr( - not(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "arm", - target_arch = "mips", - target_arch = "mips64", - target_arch = "s390x", - )), - repr(align(64)) -)] -pub(crate) struct CachePadded { - value: T, -} - -impl CachePadded { - /// Pads and aligns a value to the length of a cache line. - pub(crate) fn new(value: T) -> CachePadded { - CachePadded:: { value } - } -} - -impl Deref for CachePadded { - type Target = T; - - fn deref(&self) -> &T { - &self.value - } -} - -impl DerefMut for CachePadded { - fn deref_mut(&mut self) -> &mut T { - &mut self.value - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/error.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/error.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/error.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/error.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,16 +0,0 @@ -// Some combinations of features may not use these constants. -#![cfg_attr(not(feature = "full"), allow(dead_code))] - -/// Error string explaining that the Tokio context hasn't been instantiated. -pub(crate) const CONTEXT_MISSING_ERROR: &str = - "there is no reactor running, must be called from the context of a Tokio 1.x runtime"; - -/// Error string explaining that the Tokio context is shutting down and cannot drive timers. -pub(crate) const RUNTIME_SHUTTING_DOWN_ERROR: &str = - "A Tokio 1.x context was found, but it is being shutdown."; - -/// Error string explaining that the Tokio context is not available because the -/// thread-local storing it has been destroyed. This usually only happens during -/// destructors of other thread-locals. -pub(crate) const THREAD_LOCAL_DESTROYED_ERROR: &str = - "The Tokio context thread-local variable has been destroyed."; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/idle_notified_set.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/idle_notified_set.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/idle_notified_set.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/idle_notified_set.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,481 +0,0 @@ -//! This module defines an `IdleNotifiedSet`, which is a collection of elements. -//! Each element is intended to correspond to a task, and the collection will -//! keep track of which tasks have had their waker notified, and which have not. -//! -//! Each entry in the set holds some user-specified value. The value's type is -//! specified using the `T` parameter. It will usually be a `JoinHandle` or -//! similar. - -use std::marker::PhantomPinned; -use std::mem::ManuallyDrop; -use std::ptr::NonNull; -use std::task::{Context, Waker}; - -use crate::loom::cell::UnsafeCell; -use crate::loom::sync::{Arc, Mutex}; -use crate::util::linked_list::{self, Link}; -use crate::util::{waker_ref, Wake}; - -type LinkedList = - linked_list::LinkedList, as linked_list::Link>::Target>; - -/// This is the main handle to the collection. -pub(crate) struct IdleNotifiedSet { - lists: Arc>, - length: usize, -} - -/// A handle to an entry that is guaranteed to be stored in the idle or notified -/// list of its `IdleNotifiedSet`. This value borrows the `IdleNotifiedSet` -/// mutably to prevent the entry from being moved to the `Neither` list, which -/// only the `IdleNotifiedSet` may do. -/// -/// The main consequence of being stored in one of the lists is that the `value` -/// field has not yet been consumed. -/// -/// Note: This entry can be moved from the idle to the notified list while this -/// object exists by waking its waker. -pub(crate) struct EntryInOneOfTheLists<'a, T> { - entry: Arc>, - set: &'a mut IdleNotifiedSet, -} - -type Lists = Mutex>; - -/// The linked lists hold strong references to the ListEntry items, and the -/// ListEntry items also hold a strong reference back to the Lists object, but -/// the destructor of the `IdleNotifiedSet` will clear the two lists, so once -/// that object is destroyed, no ref-cycles will remain. -struct ListsInner { - notified: LinkedList, - idle: LinkedList, - /// Whenever an element in the `notified` list is woken, this waker will be - /// notified and consumed, if it exists. - waker: Option, -} - -/// Which of the two lists in the shared Lists object is this entry stored in? -/// -/// If the value is `Idle`, then an entry's waker may move it to the notified -/// list. Otherwise, only the `IdleNotifiedSet` may move it. -/// -/// If the value is `Neither`, then it is still possible that the entry is in -/// some third external list (this happens in `drain`). -#[derive(Copy, Clone, Eq, PartialEq)] -enum List { - Notified, - Idle, - Neither, -} - -/// An entry in the list. -/// -/// # Safety -/// -/// The `my_list` field must only be accessed while holding the mutex in -/// `parent`. It is an invariant that the value of `my_list` corresponds to -/// which linked list in the `parent` holds this entry. Once this field takes -/// the value `Neither`, then it may never be modified again. -/// -/// If the value of `my_list` is `Notified` or `Idle`, then the `pointers` field -/// must only be accessed while holding the mutex. If the value of `my_list` is -/// `Neither`, then the `pointers` field may be accessed by the -/// `IdleNotifiedSet` (this happens inside `drain`). -/// -/// The `value` field is owned by the `IdleNotifiedSet` and may only be accessed -/// by the `IdleNotifiedSet`. The operation that sets the value of `my_list` to -/// `Neither` assumes ownership of the `value`, and it must either drop it or -/// move it out from this entry to prevent it from getting leaked. (Since the -/// two linked lists are emptied in the destructor of `IdleNotifiedSet`, the -/// value should not be leaked.) -struct ListEntry { - /// The linked list pointers of the list this entry is in. - pointers: linked_list::Pointers>, - /// Pointer to the shared `Lists` struct. - parent: Arc>, - /// The value stored in this entry. - value: UnsafeCell>, - /// Used to remember which list this entry is in. - my_list: UnsafeCell, - /// Required by the `linked_list::Pointers` field. - _pin: PhantomPinned, -} - -generate_addr_of_methods! { - impl ListEntry { - unsafe fn addr_of_pointers(self: NonNull) -> NonNull>> { - &self.pointers - } - } -} - -// With mutable access to the `IdleNotifiedSet`, you can get mutable access to -// the values. -unsafe impl Send for IdleNotifiedSet {} -// With the current API we strictly speaking don't even need `T: Sync`, but we -// require it anyway to support adding &self APIs that access the values in the -// future. -unsafe impl Sync for IdleNotifiedSet {} - -// These impls control when it is safe to create a Waker. Since the waker does -// not allow access to the value in any way (including its destructor), it is -// not necessary for `T` to be Send or Sync. -unsafe impl Send for ListEntry {} -unsafe impl Sync for ListEntry {} - -impl IdleNotifiedSet { - /// Create a new IdleNotifiedSet. - pub(crate) fn new() -> Self { - let lists = Mutex::new(ListsInner { - notified: LinkedList::new(), - idle: LinkedList::new(), - waker: None, - }); - - IdleNotifiedSet { - lists: Arc::new(lists), - length: 0, - } - } - - pub(crate) fn len(&self) -> usize { - self.length - } - - pub(crate) fn is_empty(&self) -> bool { - self.length == 0 - } - - /// Insert the given value into the `idle` list. - pub(crate) fn insert_idle(&mut self, value: T) -> EntryInOneOfTheLists<'_, T> { - self.length += 1; - - let entry = Arc::new(ListEntry { - parent: self.lists.clone(), - value: UnsafeCell::new(ManuallyDrop::new(value)), - my_list: UnsafeCell::new(List::Idle), - pointers: linked_list::Pointers::new(), - _pin: PhantomPinned, - }); - - { - let mut lock = self.lists.lock(); - lock.idle.push_front(entry.clone()); - } - - // Safety: We just put the entry in the idle list, so it is in one of the lists. - EntryInOneOfTheLists { entry, set: self } - } - - /// Pop an entry from the notified list to poll it. The entry is moved to - /// the idle list atomically. - pub(crate) fn pop_notified(&mut self, waker: &Waker) -> Option> { - // We don't decrement the length because this call moves the entry to - // the idle list rather than removing it. - if self.length == 0 { - // Fast path. - return None; - } - - let mut lock = self.lists.lock(); - - let should_update_waker = match lock.waker.as_mut() { - Some(cur_waker) => !waker.will_wake(cur_waker), - None => true, - }; - if should_update_waker { - lock.waker = Some(waker.clone()); - } - - // Pop the entry, returning None if empty. - let entry = lock.notified.pop_back()?; - - lock.idle.push_front(entry.clone()); - - // Safety: We are holding the lock. - entry.my_list.with_mut(|ptr| unsafe { - *ptr = List::Idle; - }); - - drop(lock); - - // Safety: We just put the entry in the idle list, so it is in one of the lists. - Some(EntryInOneOfTheLists { entry, set: self }) - } - - /// Call a function on every element in this list. - pub(crate) fn for_each(&mut self, mut func: F) { - fn get_ptrs(list: &mut LinkedList, ptrs: &mut Vec<*mut T>) { - let mut node = list.last(); - - while let Some(entry) = node { - ptrs.push(entry.value.with_mut(|ptr| { - let ptr: *mut ManuallyDrop = ptr; - let ptr: *mut T = ptr.cast(); - ptr - })); - - let prev = entry.pointers.get_prev(); - node = prev.map(|prev| unsafe { &*prev.as_ptr() }); - } - } - - // Atomically get a raw pointer to the value of every entry. - // - // Since this only locks the mutex once, it is not possible for a value - // to get moved from the idle list to the notified list during the - // operation, which would otherwise result in some value being listed - // twice. - let mut ptrs = Vec::with_capacity(self.len()); - { - let mut lock = self.lists.lock(); - - get_ptrs(&mut lock.idle, &mut ptrs); - get_ptrs(&mut lock.notified, &mut ptrs); - } - debug_assert_eq!(ptrs.len(), ptrs.capacity()); - - for ptr in ptrs { - // Safety: When we grabbed the pointers, the entries were in one of - // the two lists. This means that their value was valid at the time, - // and it must still be valid because we are the IdleNotifiedSet, - // and only we can remove an entry from the two lists. (It's - // possible that an entry is moved from one list to the other during - // this loop, but that is ok.) - func(unsafe { &mut *ptr }); - } - } - - /// Remove all entries in both lists, applying some function to each element. - /// - /// The closure is called on all elements even if it panics. Having it panic - /// twice is a double-panic, and will abort the application. - pub(crate) fn drain(&mut self, func: F) { - if self.length == 0 { - // Fast path. - return; - } - self.length = 0; - - // The LinkedList is not cleared on panic, so we use a bomb to clear it. - // - // This value has the invariant that any entry in its `all_entries` list - // has `my_list` set to `Neither` and that the value has not yet been - // dropped. - struct AllEntries { - all_entries: LinkedList, - func: F, - } - - impl AllEntries { - fn pop_next(&mut self) -> bool { - if let Some(entry) = self.all_entries.pop_back() { - // Safety: We just took this value from the list, so we can - // destroy the value in the entry. - entry - .value - .with_mut(|ptr| unsafe { (self.func)(ManuallyDrop::take(&mut *ptr)) }); - true - } else { - false - } - } - } - - impl Drop for AllEntries { - fn drop(&mut self) { - while self.pop_next() {} - } - } - - let mut all_entries = AllEntries { - all_entries: LinkedList::new(), - func, - }; - - // Atomically move all entries to the new linked list in the AllEntries - // object. - { - let mut lock = self.lists.lock(); - unsafe { - // Safety: We are holding the lock and `all_entries` is a new - // LinkedList. - move_to_new_list(&mut lock.idle, &mut all_entries.all_entries); - move_to_new_list(&mut lock.notified, &mut all_entries.all_entries); - } - } - - // Keep destroying entries in the list until it is empty. - // - // If the closure panics, then the destructor of the `AllEntries` bomb - // ensures that we keep running the destructor on the remaining values. - // A second panic will abort the program. - while all_entries.pop_next() {} - } -} - -/// # Safety -/// -/// The mutex for the entries must be held, and the target list must be such -/// that setting `my_list` to `Neither` is ok. -unsafe fn move_to_new_list(from: &mut LinkedList, to: &mut LinkedList) { - while let Some(entry) = from.pop_back() { - entry.my_list.with_mut(|ptr| { - *ptr = List::Neither; - }); - to.push_front(entry); - } -} - -impl<'a, T> EntryInOneOfTheLists<'a, T> { - /// Remove this entry from the list it is in, returning the value associated - /// with the entry. - /// - /// This consumes the value, since it is no longer guaranteed to be in a - /// list. - pub(crate) fn remove(self) -> T { - self.set.length -= 1; - - { - let mut lock = self.set.lists.lock(); - - // Safety: We are holding the lock so there is no race, and we will - // remove the entry afterwards to uphold invariants. - let old_my_list = self.entry.my_list.with_mut(|ptr| unsafe { - let old_my_list = *ptr; - *ptr = List::Neither; - old_my_list - }); - - let list = match old_my_list { - List::Idle => &mut lock.idle, - List::Notified => &mut lock.notified, - // An entry in one of the lists is in one of the lists. - List::Neither => unreachable!(), - }; - - unsafe { - // Safety: We just checked that the entry is in this particular - // list. - list.remove(ListEntry::as_raw(&self.entry)).unwrap(); - } - } - - // By setting `my_list` to `Neither`, we have taken ownership of the - // value. We return it to the caller. - // - // Safety: We have a mutable reference to the `IdleNotifiedSet` that - // owns this entry, so we can use its permission to access the value. - self.entry - .value - .with_mut(|ptr| unsafe { ManuallyDrop::take(&mut *ptr) }) - } - - /// Access the value in this entry together with a context for its waker. - pub(crate) fn with_value_and_context(&mut self, func: F) -> U - where - F: FnOnce(&mut T, &mut Context<'_>) -> U, - T: 'static, - { - let waker = waker_ref(&self.entry); - - let mut context = Context::from_waker(&waker); - - // Safety: We have a mutable reference to the `IdleNotifiedSet` that - // owns this entry, so we can use its permission to access the value. - self.entry - .value - .with_mut(|ptr| unsafe { func(&mut *ptr, &mut context) }) - } -} - -impl Drop for IdleNotifiedSet { - fn drop(&mut self) { - // Clear both lists. - self.drain(drop); - - #[cfg(debug_assertions)] - if !std::thread::panicking() { - let lock = self.lists.lock(); - assert!(lock.idle.is_empty()); - assert!(lock.notified.is_empty()); - } - } -} - -impl Wake for ListEntry { - fn wake_by_ref(me: &Arc) { - let mut lock = me.parent.lock(); - - // Safety: We are holding the lock and we will update the lists to - // maintain invariants. - let old_my_list = me.my_list.with_mut(|ptr| unsafe { - let old_my_list = *ptr; - if old_my_list == List::Idle { - *ptr = List::Notified; - } - old_my_list - }); - - if old_my_list == List::Idle { - // We move ourself to the notified list. - let me = unsafe { - // Safety: We just checked that we are in this particular list. - lock.idle.remove(ListEntry::as_raw(me)).unwrap() - }; - lock.notified.push_front(me); - - if let Some(waker) = lock.waker.take() { - drop(lock); - waker.wake(); - } - } - } - - fn wake(me: Arc) { - Self::wake_by_ref(&me) - } -} - -/// # Safety -/// -/// `ListEntry` is forced to be !Unpin. -unsafe impl linked_list::Link for ListEntry { - type Handle = Arc>; - type Target = ListEntry; - - fn as_raw(handle: &Self::Handle) -> NonNull> { - let ptr: *const ListEntry = Arc::as_ptr(handle); - // Safety: We can't get a null pointer from `Arc::as_ptr`. - unsafe { NonNull::new_unchecked(ptr as *mut ListEntry) } - } - - unsafe fn from_raw(ptr: NonNull>) -> Arc> { - Arc::from_raw(ptr.as_ptr()) - } - - unsafe fn pointers( - target: NonNull>, - ) -> NonNull>> { - ListEntry::addr_of_pointers(target) - } -} - -#[cfg(all(test, not(loom)))] -mod tests { - use crate::runtime::Builder; - use crate::task::JoinSet; - - // A test that runs under miri. - // - // https://github.com/tokio-rs/tokio/pull/5693 - #[test] - fn join_set_test() { - let rt = Builder::new_current_thread().build().unwrap(); - - let mut set = JoinSet::new(); - set.spawn_on(futures::future::ready(()), rt.handle()); - - rt.block_on(set.join_next()).unwrap().unwrap(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/linked_list.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/linked_list.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/linked_list.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/linked_list.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,878 +0,0 @@ -#![cfg_attr(not(feature = "full"), allow(dead_code))] - -//! An intrusive double linked list of data. -//! -//! The data structure supports tracking pinned nodes. Most of the data -//! structure's APIs are `unsafe` as they require the caller to ensure the -//! specified node is actually contained by the list. - -use core::cell::UnsafeCell; -use core::fmt; -use core::marker::{PhantomData, PhantomPinned}; -use core::mem::ManuallyDrop; -use core::ptr::{self, NonNull}; - -/// An intrusive linked list. -/// -/// Currently, the list is not emptied on drop. It is the caller's -/// responsibility to ensure the list is empty before dropping it. -pub(crate) struct LinkedList { - /// Linked list head - head: Option>, - - /// Linked list tail - tail: Option>, - - /// Node type marker. - _marker: PhantomData<*const L>, -} - -unsafe impl Send for LinkedList where L::Target: Send {} -unsafe impl Sync for LinkedList where L::Target: Sync {} - -/// Defines how a type is tracked within a linked list. -/// -/// In order to support storing a single type within multiple lists, accessing -/// the list pointers is decoupled from the entry type. -/// -/// # Safety -/// -/// Implementations must guarantee that `Target` types are pinned in memory. In -/// other words, when a node is inserted, the value will not be moved as long as -/// it is stored in the list. -pub(crate) unsafe trait Link { - /// Handle to the list entry. - /// - /// This is usually a pointer-ish type. - type Handle; - - /// Node type. - type Target; - - /// Convert the handle to a raw pointer without consuming the handle. - #[allow(clippy::wrong_self_convention)] - fn as_raw(handle: &Self::Handle) -> NonNull; - - /// Convert the raw pointer to a handle - unsafe fn from_raw(ptr: NonNull) -> Self::Handle; - - /// Return the pointers for a node - /// - /// # Safety - /// - /// The resulting pointer should have the same tag in the stacked-borrows - /// stack as the argument. In particular, the method may not create an - /// intermediate reference in the process of creating the resulting raw - /// pointer. - unsafe fn pointers(target: NonNull) -> NonNull>; -} - -/// Previous / next pointers. -pub(crate) struct Pointers { - inner: UnsafeCell>, -} -/// We do not want the compiler to put the `noalias` attribute on mutable -/// references to this type, so the type has been made `!Unpin` with a -/// `PhantomPinned` field. -/// -/// Additionally, we never access the `prev` or `next` fields directly, as any -/// such access would implicitly involve the creation of a reference to the -/// field, which we want to avoid since the fields are not `!Unpin`, and would -/// hence be given the `noalias` attribute if we were to do such an access. -/// As an alternative to accessing the fields directly, the `Pointers` type -/// provides getters and setters for the two fields, and those are implemented -/// using raw pointer casts and offsets, which is valid since the struct is -/// #[repr(C)]. -/// -/// See this link for more information: -/// -#[repr(C)] -struct PointersInner { - /// The previous node in the list. null if there is no previous node. - /// - /// This field is accessed through pointer manipulation, so it is not dead code. - #[allow(dead_code)] - prev: Option>, - - /// The next node in the list. null if there is no previous node. - /// - /// This field is accessed through pointer manipulation, so it is not dead code. - #[allow(dead_code)] - next: Option>, - - /// This type is !Unpin due to the heuristic from: - /// - _pin: PhantomPinned, -} - -unsafe impl Send for Pointers {} -unsafe impl Sync for Pointers {} - -// ===== impl LinkedList ===== - -impl LinkedList { - /// Creates an empty linked list. - pub(crate) const fn new() -> LinkedList { - LinkedList { - head: None, - tail: None, - _marker: PhantomData, - } - } -} - -impl LinkedList { - /// Adds an element first in the list. - pub(crate) fn push_front(&mut self, val: L::Handle) { - // The value should not be dropped, it is being inserted into the list - let val = ManuallyDrop::new(val); - let ptr = L::as_raw(&val); - assert_ne!(self.head, Some(ptr)); - unsafe { - L::pointers(ptr).as_mut().set_next(self.head); - L::pointers(ptr).as_mut().set_prev(None); - - if let Some(head) = self.head { - L::pointers(head).as_mut().set_prev(Some(ptr)); - } - - self.head = Some(ptr); - - if self.tail.is_none() { - self.tail = Some(ptr); - } - } - } - - /// Removes the last element from a list and returns it, or None if it is - /// empty. - pub(crate) fn pop_back(&mut self) -> Option { - unsafe { - let last = self.tail?; - self.tail = L::pointers(last).as_ref().get_prev(); - - if let Some(prev) = L::pointers(last).as_ref().get_prev() { - L::pointers(prev).as_mut().set_next(None); - } else { - self.head = None - } - - L::pointers(last).as_mut().set_prev(None); - L::pointers(last).as_mut().set_next(None); - - Some(L::from_raw(last)) - } - } - - /// Returns whether the linked list does not contain any node - pub(crate) fn is_empty(&self) -> bool { - if self.head.is_some() { - return false; - } - - assert!(self.tail.is_none()); - true - } - - /// Removes the specified node from the list - /// - /// # Safety - /// - /// The caller **must** ensure that exactly one of the following is true: - /// - `node` is currently contained by `self`, - /// - `node` is not contained by any list, - /// - `node` is currently contained by some other `GuardedLinkedList` **and** - /// the caller has an exclusive access to that list. This condition is - /// used by the linked list in `sync::Notify`. - pub(crate) unsafe fn remove(&mut self, node: NonNull) -> Option { - if let Some(prev) = L::pointers(node).as_ref().get_prev() { - debug_assert_eq!(L::pointers(prev).as_ref().get_next(), Some(node)); - L::pointers(prev) - .as_mut() - .set_next(L::pointers(node).as_ref().get_next()); - } else { - if self.head != Some(node) { - return None; - } - - self.head = L::pointers(node).as_ref().get_next(); - } - - if let Some(next) = L::pointers(node).as_ref().get_next() { - debug_assert_eq!(L::pointers(next).as_ref().get_prev(), Some(node)); - L::pointers(next) - .as_mut() - .set_prev(L::pointers(node).as_ref().get_prev()); - } else { - // This might be the last item in the list - if self.tail != Some(node) { - return None; - } - - self.tail = L::pointers(node).as_ref().get_prev(); - } - - L::pointers(node).as_mut().set_next(None); - L::pointers(node).as_mut().set_prev(None); - - Some(L::from_raw(node)) - } -} - -impl fmt::Debug for LinkedList { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("LinkedList") - .field("head", &self.head) - .field("tail", &self.tail) - .finish() - } -} - -// ===== impl CountedLinkedList ==== - -// Delegates operations to the base LinkedList implementation, and adds a counter to the elements -// in the list. -pub(crate) struct CountedLinkedList { - list: LinkedList, - count: usize, -} - -impl CountedLinkedList { - pub(crate) fn new() -> CountedLinkedList { - CountedLinkedList { - list: LinkedList::new(), - count: 0, - } - } - - pub(crate) fn push_front(&mut self, val: L::Handle) { - self.list.push_front(val); - self.count += 1; - } - - pub(crate) fn pop_back(&mut self) -> Option { - let val = self.list.pop_back(); - if val.is_some() { - self.count -= 1; - } - val - } - - pub(crate) fn is_empty(&self) -> bool { - self.list.is_empty() - } - - pub(crate) unsafe fn remove(&mut self, node: NonNull) -> Option { - let val = self.list.remove(node); - if val.is_some() { - self.count -= 1; - } - val - } - - pub(crate) fn count(&self) -> usize { - self.count - } -} - -#[cfg(any( - feature = "fs", - feature = "rt", - all(unix, feature = "process"), - feature = "signal", - feature = "sync", -))] -impl LinkedList { - pub(crate) fn last(&self) -> Option<&L::Target> { - let tail = self.tail.as_ref()?; - unsafe { Some(&*tail.as_ptr()) } - } -} - -impl Default for LinkedList { - fn default() -> Self { - Self::new() - } -} - -// ===== impl DrainFilter ===== - -cfg_io_driver_impl! { - pub(crate) struct DrainFilter<'a, T: Link, F> { - list: &'a mut LinkedList, - filter: F, - curr: Option>, - } - - impl LinkedList { - pub(crate) fn drain_filter(&mut self, filter: F) -> DrainFilter<'_, T, F> - where - F: FnMut(&T::Target) -> bool, - { - let curr = self.head; - DrainFilter { - curr, - filter, - list: self, - } - } - } - - impl<'a, T, F> Iterator for DrainFilter<'a, T, F> - where - T: Link, - F: FnMut(&T::Target) -> bool, - { - type Item = T::Handle; - - fn next(&mut self) -> Option { - while let Some(curr) = self.curr { - // safety: the pointer references data contained by the list - self.curr = unsafe { T::pointers(curr).as_ref() }.get_next(); - - // safety: the value is still owned by the linked list. - if (self.filter)(unsafe { &mut *curr.as_ptr() }) { - return unsafe { self.list.remove(curr) }; - } - } - - None - } - } -} - -cfg_taskdump! { - impl CountedLinkedList { - pub(crate) fn for_each(&mut self, f: F) - where - F: FnMut(&T::Handle), - { - self.list.for_each(f) - } - } - - impl LinkedList { - pub(crate) fn for_each(&mut self, mut f: F) - where - F: FnMut(&T::Handle), - { - use std::mem::ManuallyDrop; - - let mut next = self.head; - - while let Some(curr) = next { - unsafe { - let handle = ManuallyDrop::new(T::from_raw(curr)); - f(&handle); - next = T::pointers(curr).as_ref().get_next(); - } - } - } - } -} - -// ===== impl GuardedLinkedList ===== - -feature! { - #![any( - feature = "process", - feature = "sync", - feature = "rt", - feature = "signal", - )] - - /// An intrusive linked list, but instead of keeping pointers to the head - /// and tail nodes, it uses a special guard node linked with those nodes. - /// It means that the list is circular and every pointer of a node from - /// the list is not `None`, including pointers from the guard node. - /// - /// If a list is empty, then both pointers of the guard node are pointing - /// at the guard node itself. - pub(crate) struct GuardedLinkedList { - /// Pointer to the guard node. - guard: NonNull, - - /// Node type marker. - _marker: PhantomData<*const L>, - } - - impl>> LinkedList { - /// Turns a linked list into the guarded version by linking the guard node - /// with the head and tail nodes. Like with other nodes, you should guarantee - /// that the guard node is pinned in memory. - pub(crate) fn into_guarded(self, guard_handle: L::Handle) -> GuardedLinkedList { - // `guard_handle` is a NonNull pointer, we don't have to care about dropping it. - let guard = L::as_raw(&guard_handle); - - unsafe { - if let Some(head) = self.head { - debug_assert!(L::pointers(head).as_ref().get_prev().is_none()); - L::pointers(head).as_mut().set_prev(Some(guard)); - L::pointers(guard).as_mut().set_next(Some(head)); - - // The list is not empty, so the tail cannot be `None`. - let tail = self.tail.unwrap(); - debug_assert!(L::pointers(tail).as_ref().get_next().is_none()); - L::pointers(tail).as_mut().set_next(Some(guard)); - L::pointers(guard).as_mut().set_prev(Some(tail)); - } else { - // The list is empty. - L::pointers(guard).as_mut().set_prev(Some(guard)); - L::pointers(guard).as_mut().set_next(Some(guard)); - } - } - - GuardedLinkedList { guard, _marker: PhantomData } - } - } - - impl GuardedLinkedList { - fn tail(&self) -> Option> { - let tail_ptr = unsafe { - L::pointers(self.guard).as_ref().get_prev().unwrap() - }; - - // Compare the tail pointer with the address of the guard node itself. - // If the guard points at itself, then there are no other nodes and - // the list is considered empty. - if tail_ptr != self.guard { - Some(tail_ptr) - } else { - None - } - } - - /// Removes the last element from a list and returns it, or None if it is - /// empty. - pub(crate) fn pop_back(&mut self) -> Option { - unsafe { - let last = self.tail()?; - let before_last = L::pointers(last).as_ref().get_prev().unwrap(); - - L::pointers(self.guard).as_mut().set_prev(Some(before_last)); - L::pointers(before_last).as_mut().set_next(Some(self.guard)); - - L::pointers(last).as_mut().set_prev(None); - L::pointers(last).as_mut().set_next(None); - - Some(L::from_raw(last)) - } - } - } -} - -// ===== impl Pointers ===== - -impl Pointers { - /// Create a new set of empty pointers - pub(crate) fn new() -> Pointers { - Pointers { - inner: UnsafeCell::new(PointersInner { - prev: None, - next: None, - _pin: PhantomPinned, - }), - } - } - - pub(crate) fn get_prev(&self) -> Option> { - // SAFETY: prev is the first field in PointersInner, which is #[repr(C)]. - unsafe { - let inner = self.inner.get(); - let prev = inner as *const Option>; - ptr::read(prev) - } - } - pub(crate) fn get_next(&self) -> Option> { - // SAFETY: next is the second field in PointersInner, which is #[repr(C)]. - unsafe { - let inner = self.inner.get(); - let prev = inner as *const Option>; - let next = prev.add(1); - ptr::read(next) - } - } - - fn set_prev(&mut self, value: Option>) { - // SAFETY: prev is the first field in PointersInner, which is #[repr(C)]. - unsafe { - let inner = self.inner.get(); - let prev = inner as *mut Option>; - ptr::write(prev, value); - } - } - fn set_next(&mut self, value: Option>) { - // SAFETY: next is the second field in PointersInner, which is #[repr(C)]. - unsafe { - let inner = self.inner.get(); - let prev = inner as *mut Option>; - let next = prev.add(1); - ptr::write(next, value); - } - } -} - -impl fmt::Debug for Pointers { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let prev = self.get_prev(); - let next = self.get_next(); - f.debug_struct("Pointers") - .field("prev", &prev) - .field("next", &next) - .finish() - } -} - -#[cfg(any(test, fuzzing))] -#[cfg(not(loom))] -pub(crate) mod tests { - use super::*; - - use std::pin::Pin; - - #[derive(Debug)] - #[repr(C)] - struct Entry { - pointers: Pointers, - val: i32, - } - - unsafe impl<'a> Link for &'a Entry { - type Handle = Pin<&'a Entry>; - type Target = Entry; - - fn as_raw(handle: &Pin<&'_ Entry>) -> NonNull { - NonNull::from(handle.get_ref()) - } - - unsafe fn from_raw(ptr: NonNull) -> Pin<&'a Entry> { - Pin::new_unchecked(&*ptr.as_ptr()) - } - - unsafe fn pointers(target: NonNull) -> NonNull> { - target.cast() - } - } - - fn entry(val: i32) -> Pin> { - Box::pin(Entry { - pointers: Pointers::new(), - val, - }) - } - - fn ptr(r: &Pin>) -> NonNull { - r.as_ref().get_ref().into() - } - - fn collect_list(list: &mut LinkedList<&'_ Entry, <&'_ Entry as Link>::Target>) -> Vec { - let mut ret = vec![]; - - while let Some(entry) = list.pop_back() { - ret.push(entry.val); - } - - ret - } - - fn push_all<'a>( - list: &mut LinkedList<&'a Entry, <&'_ Entry as Link>::Target>, - entries: &[Pin<&'a Entry>], - ) { - for entry in entries.iter() { - list.push_front(*entry); - } - } - - #[cfg(test)] - macro_rules! assert_clean { - ($e:ident) => {{ - assert!($e.pointers.get_next().is_none()); - assert!($e.pointers.get_prev().is_none()); - }}; - } - - #[cfg(test)] - macro_rules! assert_ptr_eq { - ($a:expr, $b:expr) => {{ - // Deal with mapping a Pin<&mut T> -> Option> - assert_eq!(Some($a.as_ref().get_ref().into()), $b) - }}; - } - - #[test] - fn const_new() { - const _: LinkedList<&Entry, <&Entry as Link>::Target> = LinkedList::new(); - } - - #[test] - fn push_and_drain() { - let a = entry(5); - let b = entry(7); - let c = entry(31); - - let mut list = LinkedList::new(); - assert!(list.is_empty()); - - list.push_front(a.as_ref()); - assert!(!list.is_empty()); - list.push_front(b.as_ref()); - list.push_front(c.as_ref()); - - let items: Vec = collect_list(&mut list); - assert_eq!([5, 7, 31].to_vec(), items); - - assert!(list.is_empty()); - } - - #[test] - fn push_pop_push_pop() { - let a = entry(5); - let b = entry(7); - - let mut list = LinkedList::<&Entry, <&Entry as Link>::Target>::new(); - - list.push_front(a.as_ref()); - - let entry = list.pop_back().unwrap(); - assert_eq!(5, entry.val); - assert!(list.is_empty()); - - list.push_front(b.as_ref()); - - let entry = list.pop_back().unwrap(); - assert_eq!(7, entry.val); - - assert!(list.is_empty()); - assert!(list.pop_back().is_none()); - } - - #[test] - fn remove_by_address() { - let a = entry(5); - let b = entry(7); - let c = entry(31); - - unsafe { - // Remove first - let mut list = LinkedList::new(); - - push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); - assert!(list.remove(ptr(&a)).is_some()); - assert_clean!(a); - // `a` should be no longer there and can't be removed twice - assert!(list.remove(ptr(&a)).is_none()); - assert!(!list.is_empty()); - - assert!(list.remove(ptr(&b)).is_some()); - assert_clean!(b); - // `b` should be no longer there and can't be removed twice - assert!(list.remove(ptr(&b)).is_none()); - assert!(!list.is_empty()); - - assert!(list.remove(ptr(&c)).is_some()); - assert_clean!(c); - // `b` should be no longer there and can't be removed twice - assert!(list.remove(ptr(&c)).is_none()); - assert!(list.is_empty()); - } - - unsafe { - // Remove middle - let mut list = LinkedList::new(); - - push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); - - assert!(list.remove(ptr(&a)).is_some()); - assert_clean!(a); - - assert_ptr_eq!(b, list.head); - assert_ptr_eq!(c, b.pointers.get_next()); - assert_ptr_eq!(b, c.pointers.get_prev()); - - let items = collect_list(&mut list); - assert_eq!([31, 7].to_vec(), items); - } - - unsafe { - // Remove middle - let mut list = LinkedList::new(); - - push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); - - assert!(list.remove(ptr(&b)).is_some()); - assert_clean!(b); - - assert_ptr_eq!(c, a.pointers.get_next()); - assert_ptr_eq!(a, c.pointers.get_prev()); - - let items = collect_list(&mut list); - assert_eq!([31, 5].to_vec(), items); - } - - unsafe { - // Remove last - // Remove middle - let mut list = LinkedList::new(); - - push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); - - assert!(list.remove(ptr(&c)).is_some()); - assert_clean!(c); - - assert!(b.pointers.get_next().is_none()); - assert_ptr_eq!(b, list.tail); - - let items = collect_list(&mut list); - assert_eq!([7, 5].to_vec(), items); - } - - unsafe { - // Remove first of two - let mut list = LinkedList::new(); - - push_all(&mut list, &[b.as_ref(), a.as_ref()]); - - assert!(list.remove(ptr(&a)).is_some()); - - assert_clean!(a); - - // a should be no longer there and can't be removed twice - assert!(list.remove(ptr(&a)).is_none()); - - assert_ptr_eq!(b, list.head); - assert_ptr_eq!(b, list.tail); - - assert!(b.pointers.get_next().is_none()); - assert!(b.pointers.get_prev().is_none()); - - let items = collect_list(&mut list); - assert_eq!([7].to_vec(), items); - } - - unsafe { - // Remove last of two - let mut list = LinkedList::new(); - - push_all(&mut list, &[b.as_ref(), a.as_ref()]); - - assert!(list.remove(ptr(&b)).is_some()); - - assert_clean!(b); - - assert_ptr_eq!(a, list.head); - assert_ptr_eq!(a, list.tail); - - assert!(a.pointers.get_next().is_none()); - assert!(a.pointers.get_prev().is_none()); - - let items = collect_list(&mut list); - assert_eq!([5].to_vec(), items); - } - - unsafe { - // Remove last item - let mut list = LinkedList::new(); - - push_all(&mut list, &[a.as_ref()]); - - assert!(list.remove(ptr(&a)).is_some()); - assert_clean!(a); - - assert!(list.head.is_none()); - assert!(list.tail.is_none()); - let items = collect_list(&mut list); - assert!(items.is_empty()); - } - - unsafe { - // Remove missing - let mut list = LinkedList::<&Entry, <&Entry as Link>::Target>::new(); - - list.push_front(b.as_ref()); - list.push_front(a.as_ref()); - - assert!(list.remove(ptr(&c)).is_none()); - } - } - - #[test] - fn count() { - let mut list = CountedLinkedList::<&Entry, <&Entry as Link>::Target>::new(); - assert_eq!(0, list.count()); - - let a = entry(5); - let b = entry(7); - list.push_front(a.as_ref()); - list.push_front(b.as_ref()); - assert_eq!(2, list.count()); - - list.pop_back(); - assert_eq!(1, list.count()); - - unsafe { - list.remove(ptr(&b)); - } - assert_eq!(0, list.count()); - } - - /// This is a fuzz test. You run it by entering `cargo fuzz run fuzz_linked_list` in CLI in `/tokio/` module. - #[cfg(fuzzing)] - pub fn fuzz_linked_list(ops: &[u8]) { - enum Op { - Push, - Pop, - Remove(usize), - } - use std::collections::VecDeque; - - let ops = ops - .iter() - .map(|i| match i % 3u8 { - 0 => Op::Push, - 1 => Op::Pop, - 2 => Op::Remove((i / 3u8) as usize), - _ => unreachable!(), - }) - .collect::>(); - - let mut ll = LinkedList::<&Entry, <&Entry as Link>::Target>::new(); - let mut reference = VecDeque::new(); - - let entries: Vec<_> = (0..ops.len()).map(|i| entry(i as i32)).collect(); - - for (i, op) in ops.iter().enumerate() { - match op { - Op::Push => { - reference.push_front(i as i32); - assert_eq!(entries[i].val, i as i32); - - ll.push_front(entries[i].as_ref()); - } - Op::Pop => { - if reference.is_empty() { - assert!(ll.is_empty()); - continue; - } - - let v = reference.pop_back(); - assert_eq!(v, ll.pop_back().map(|v| v.val)); - } - Op::Remove(n) => { - if reference.is_empty() { - assert!(ll.is_empty()); - continue; - } - - let idx = n % reference.len(); - let expect = reference.remove(idx).unwrap(); - - unsafe { - let entry = ll.remove(ptr(&entries[expect as usize])).unwrap(); - assert_eq!(expect, entry.val); - } - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/markers.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/markers.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/markers.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/markers.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,8 +0,0 @@ -/// Marker for types that are `Sync` but not `Send` -pub(crate) struct SyncNotSend(*mut ()); - -unsafe impl Sync for SyncNotSend {} - -cfg_rt! { - pub(crate) struct NotSendOrSync(*mut ()); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/memchr.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/memchr.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/memchr.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/memchr.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,74 +0,0 @@ -//! Search for a byte in a byte array using libc. -//! -//! When nothing pulls in libc, then just use a trivial implementation. Note -//! that we only depend on libc on unix. - -#[cfg(not(all(unix, feature = "libc")))] -pub(crate) fn memchr(needle: u8, haystack: &[u8]) -> Option { - haystack.iter().position(|val| needle == *val) -} - -#[cfg(all(unix, feature = "libc"))] -pub(crate) fn memchr(needle: u8, haystack: &[u8]) -> Option { - let start = haystack.as_ptr(); - - // SAFETY: `start` is valid for `haystack.len()` bytes. - let ptr = unsafe { libc::memchr(start.cast(), needle as _, haystack.len()) }; - - if ptr.is_null() { - None - } else { - Some(ptr as usize - start as usize) - } -} - -#[cfg(test)] -mod tests { - use super::memchr; - - #[test] - fn memchr_test() { - let haystack = b"123abc456\0\xffabc\n"; - - assert_eq!(memchr(b'1', haystack), Some(0)); - assert_eq!(memchr(b'2', haystack), Some(1)); - assert_eq!(memchr(b'3', haystack), Some(2)); - assert_eq!(memchr(b'4', haystack), Some(6)); - assert_eq!(memchr(b'5', haystack), Some(7)); - assert_eq!(memchr(b'6', haystack), Some(8)); - assert_eq!(memchr(b'7', haystack), None); - assert_eq!(memchr(b'a', haystack), Some(3)); - assert_eq!(memchr(b'b', haystack), Some(4)); - assert_eq!(memchr(b'c', haystack), Some(5)); - assert_eq!(memchr(b'd', haystack), None); - assert_eq!(memchr(b'A', haystack), None); - assert_eq!(memchr(0, haystack), Some(9)); - assert_eq!(memchr(0xff, haystack), Some(10)); - assert_eq!(memchr(0xfe, haystack), None); - assert_eq!(memchr(1, haystack), None); - assert_eq!(memchr(b'\n', haystack), Some(14)); - assert_eq!(memchr(b'\r', haystack), None); - } - - #[test] - fn memchr_all() { - let mut arr = Vec::new(); - for b in 0..=255 { - arr.push(b); - } - for b in 0..=255 { - assert_eq!(memchr(b, &arr), Some(b as usize)); - } - arr.reverse(); - for b in 0..=255 { - assert_eq!(memchr(b, &arr), Some(255 - b as usize)); - } - } - - #[test] - fn memchr_empty() { - for b in 0..=255 { - assert_eq!(memchr(b, b""), None); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/mod.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,79 +0,0 @@ -cfg_io_driver! { - pub(crate) mod bit; -} - -#[cfg(feature = "rt")] -pub(crate) mod atomic_cell; - -#[cfg(any(feature = "rt", feature = "signal", feature = "process"))] -pub(crate) mod once_cell; - -#[cfg(any( - // io driver uses `WakeList` directly - feature = "net", - feature = "process", - // `sync` enables `Notify` and `batch_semaphore`, which require `WakeList`. - feature = "sync", - // `fs` uses `batch_semaphore`, which requires `WakeList`. - feature = "fs", - // rt and signal use `Notify`, which requires `WakeList`. - feature = "rt", - feature = "signal", -))] -mod wake_list; -#[cfg(any( - feature = "net", - feature = "process", - feature = "sync", - feature = "fs", - feature = "rt", - feature = "signal", -))] -pub(crate) use wake_list::WakeList; - -#[cfg(any( - feature = "fs", - feature = "net", - feature = "process", - feature = "rt", - feature = "sync", - feature = "signal", - feature = "time", -))] -pub(crate) mod linked_list; - -#[cfg(any(feature = "rt", feature = "macros"))] -pub(crate) mod rand; - -cfg_rt! { - mod idle_notified_set; - pub(crate) use idle_notified_set::IdleNotifiedSet; - - pub(crate) use self::rand::RngSeedGenerator; - - mod wake; - pub(crate) use wake::WakerRef; - pub(crate) use wake::{waker_ref, Wake}; - - mod sync_wrapper; - pub(crate) use sync_wrapper::SyncWrapper; - - mod rc_cell; - pub(crate) use rc_cell::RcCell; -} - -cfg_rt_multi_thread! { - mod try_lock; - pub(crate) use try_lock::TryLock; -} - -pub(crate) mod trace; - -pub(crate) mod error; - -#[cfg(feature = "io-util")] -pub(crate) mod memchr; - -pub(crate) mod markers; - -pub(crate) mod cacheline; diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/once_cell.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/once_cell.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/once_cell.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/once_cell.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,70 +0,0 @@ -#![allow(dead_code)] -use std::cell::UnsafeCell; -use std::mem::MaybeUninit; -use std::sync::Once; - -pub(crate) struct OnceCell { - once: Once, - value: UnsafeCell>, -} - -unsafe impl Send for OnceCell {} -unsafe impl Sync for OnceCell {} - -impl OnceCell { - pub(crate) const fn new() -> Self { - Self { - once: Once::new(), - value: UnsafeCell::new(MaybeUninit::uninit()), - } - } - - /// Get the value inside this cell, initializing it using the provided - /// function if necessary. - /// - /// If the `init` closure panics, then the `OnceCell` is poisoned and all - /// future calls to `get` will panic. - #[inline] - pub(crate) fn get(&self, init: impl FnOnce() -> T) -> &T { - if !self.once.is_completed() { - self.do_init(init); - } - - // Safety: The `std::sync::Once` guarantees that we can only reach this - // line if a `call_once` closure has been run exactly once and without - // panicking. Thus, the value is not uninitialized. - // - // There is also no race because the only `&self` method that modifies - // `value` is `do_init`, but if the `call_once` closure is still - // running, then no thread has gotten past the `call_once`. - unsafe { &*(self.value.get() as *const T) } - } - - #[cold] - fn do_init(&self, init: impl FnOnce() -> T) { - let value_ptr = self.value.get() as *mut T; - - self.once.call_once(|| { - let set_to = init(); - - // Safety: The `std::sync::Once` guarantees that this initialization - // will run at most once, and that no thread can get past the - // `call_once` until it has run exactly once. Thus, we have - // exclusive access to `value`. - unsafe { - std::ptr::write(value_ptr, set_to); - } - }); - } -} - -impl Drop for OnceCell { - fn drop(&mut self) { - if self.once.is_completed() { - let value_ptr = self.value.get() as *mut T; - unsafe { - std::ptr::drop_in_place(value_ptr); - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/rand/rt.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/rand/rt.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/rand/rt.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/rand/rt.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,61 +0,0 @@ -use super::{FastRand, RngSeed}; - -use std::sync::Mutex; - -/// A deterministic generator for seeds (and other generators). -/// -/// Given the same initial seed, the generator will output the same sequence of seeds. -/// -/// Since the seed generator will be kept in a runtime handle, we need to wrap `FastRand` -/// in a Mutex to make it thread safe. Different to the `FastRand` that we keep in a -/// thread local store, the expectation is that seed generation will not need to happen -/// very frequently, so the cost of the mutex should be minimal. -#[derive(Debug)] -pub(crate) struct RngSeedGenerator { - /// Internal state for the seed generator. We keep it in a Mutex so that we can safely - /// use it across multiple threads. - state: Mutex, -} - -impl RngSeedGenerator { - /// Returns a new generator from the provided seed. - pub(crate) fn new(seed: RngSeed) -> Self { - Self { - state: Mutex::new(FastRand::from_seed(seed)), - } - } - - /// Returns the next seed in the sequence. - pub(crate) fn next_seed(&self) -> RngSeed { - let mut rng = self - .state - .lock() - .expect("RNG seed generator is internally corrupt"); - - let s = rng.fastrand(); - let r = rng.fastrand(); - - RngSeed::from_pair(s, r) - } - - /// Directly creates a generator using the next seed. - pub(crate) fn next_generator(&self) -> Self { - RngSeedGenerator::new(self.next_seed()) - } -} - -impl FastRand { - /// Replaces the state of the random number generator with the provided seed, returning - /// the seed that represents the previous state of the random number generator. - /// - /// The random number generator will become equivalent to one created with - /// the same seed. - pub(crate) fn replace_seed(&mut self, seed: RngSeed) -> RngSeed { - let old_seed = RngSeed::from_pair(self.one, self.two); - - self.one = seed.s; - self.two = seed.r; - - old_seed - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/rand/rt_unstable.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/rand/rt_unstable.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/rand/rt_unstable.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/rand/rt_unstable.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ -use super::RngSeed; - -use std::collections::hash_map::DefaultHasher; -use std::hash::Hasher; - -impl RngSeed { - /// Generates a seed from the provided byte slice. - /// - /// # Example - /// - /// ``` - /// # use tokio::runtime::RngSeed; - /// let seed = RngSeed::from_bytes(b"make me a seed"); - /// ``` - pub fn from_bytes(bytes: &[u8]) -> Self { - let mut hasher = DefaultHasher::default(); - hasher.write(bytes); - Self::from_u64(hasher.finish()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/rand.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/rand.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/rand.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/rand.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,95 +0,0 @@ -cfg_rt! { - mod rt; - pub(crate) use rt::RngSeedGenerator; - - cfg_unstable! { - mod rt_unstable; - } -} - -/// A seed for random number generation. -/// -/// In order to make certain functions within a runtime deterministic, a seed -/// can be specified at the time of creation. -#[allow(unreachable_pub)] -#[derive(Clone, Debug)] -pub struct RngSeed { - s: u32, - r: u32, -} - -/// Fast random number generate. -/// -/// Implement xorshift64+: 2 32-bit xorshift sequences added together. -/// Shift triplet `[17,7,16]` was calculated as indicated in Marsaglia's -/// Xorshift paper: -/// This generator passes the SmallCrush suite, part of TestU01 framework: -/// -#[derive(Clone, Copy, Debug)] -pub(crate) struct FastRand { - one: u32, - two: u32, -} - -impl RngSeed { - /// Creates a random seed using loom internally. - pub(crate) fn new() -> Self { - Self::from_u64(crate::loom::rand::seed()) - } - - fn from_u64(seed: u64) -> Self { - let one = (seed >> 32) as u32; - let mut two = seed as u32; - - if two == 0 { - // This value cannot be zero - two = 1; - } - - Self::from_pair(one, two) - } - - fn from_pair(s: u32, r: u32) -> Self { - Self { s, r } - } -} - -impl FastRand { - /// Initialize a new fast random number generator using the default source of entropy. - pub(crate) fn new() -> FastRand { - FastRand::from_seed(RngSeed::new()) - } - - /// Initializes a new, thread-local, fast random number generator. - pub(crate) fn from_seed(seed: RngSeed) -> FastRand { - FastRand { - one: seed.s, - two: seed.r, - } - } - - #[cfg(any( - feature = "macros", - feature = "rt-multi-thread", - all(feature = "sync", feature = "rt") - ))] - pub(crate) fn fastrand_n(&mut self, n: u32) -> u32 { - // This is similar to fastrand() % n, but faster. - // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ - let mul = (self.fastrand() as u64).wrapping_mul(n as u64); - (mul >> 32) as u32 - } - - fn fastrand(&mut self) -> u32 { - let mut s1 = self.one; - let s0 = self.two; - - s1 ^= s1 << 17; - s1 = s1 ^ s0 ^ s1 >> 7 ^ s0 >> 16; - - self.one = s0; - self.two = s1; - - s0.wrapping_add(s1) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/rc_cell.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/rc_cell.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/rc_cell.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/rc_cell.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,57 +0,0 @@ -use crate::loom::cell::UnsafeCell; - -use std::rc::Rc; - -/// This is exactly like `Cell>>`, except that it provides a `get` -/// method even though `Rc` is not `Copy`. -pub(crate) struct RcCell { - inner: UnsafeCell>>, -} - -impl RcCell { - #[cfg(not(all(loom, test)))] - pub(crate) const fn new() -> Self { - Self { - inner: UnsafeCell::new(None), - } - } - - // The UnsafeCell in loom does not have a const `new` fn. - #[cfg(all(loom, test))] - pub(crate) fn new() -> Self { - Self { - inner: UnsafeCell::new(None), - } - } - - /// Safety: This method may not be called recursively. - #[inline] - unsafe fn with_inner(&self, f: F) -> R - where - F: FnOnce(&mut Option>) -> R, - { - // safety: This type is not Sync, so concurrent calls of this method - // cannot happen. Furthermore, the caller guarantees that the method is - // not called recursively. Finally, this is the only place that can - // create mutable references to the inner Rc. This ensures that any - // mutable references created here are exclusive. - self.inner.with_mut(|ptr| f(&mut *ptr)) - } - - pub(crate) fn get(&self) -> Option> { - // safety: The `Rc::clone` method will not call any unknown user-code, - // so it will not result in a recursive call to `with_inner`. - unsafe { self.with_inner(|rc| rc.clone()) } - } - - pub(crate) fn replace(&self, val: Option>) -> Option> { - // safety: No destructors or other unknown user-code will run inside the - // `with_inner` call, so no recursive call to `with_inner` can happen. - unsafe { self.with_inner(|rc| std::mem::replace(rc, val)) } - } - - pub(crate) fn set(&self, val: Option>) { - let old = self.replace(val); - drop(old); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/sync_wrapper.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/sync_wrapper.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/sync_wrapper.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/sync_wrapper.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,26 +0,0 @@ -//! This module contains a type that can make `Send + !Sync` types `Sync` by -//! disallowing all immutable access to the value. -//! -//! A similar primitive is provided in the `sync_wrapper` crate. - -pub(crate) struct SyncWrapper { - value: T, -} - -// safety: The SyncWrapper being send allows you to send the inner value across -// thread boundaries. -unsafe impl Send for SyncWrapper {} - -// safety: An immutable reference to a SyncWrapper is useless, so moving such an -// immutable reference across threads is safe. -unsafe impl Sync for SyncWrapper {} - -impl SyncWrapper { - pub(crate) fn new(value: T) -> Self { - Self { value } - } - - pub(crate) fn into_inner(self) -> T { - self.value - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/trace.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/trace.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/trace.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/trace.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,104 +0,0 @@ -cfg_trace! { - cfg_rt! { - use core::{ - pin::Pin, - task::{Context, Poll}, - }; - use pin_project_lite::pin_project; - use std::future::Future; - pub(crate) use tracing::instrument::Instrumented; - - #[inline] - #[track_caller] - pub(crate) fn task(task: F, kind: &'static str, name: Option<&str>, id: u64) -> Instrumented { - #[track_caller] - fn get_span(kind: &'static str, name: Option<&str>, id: u64) -> tracing::Span { - let location = std::panic::Location::caller(); - tracing::trace_span!( - target: "tokio::task", - "runtime.spawn", - %kind, - task.name = %name.unwrap_or_default(), - task.id = id, - loc.file = location.file(), - loc.line = location.line(), - loc.col = location.column(), - ) - } - use tracing::instrument::Instrument; - let span = get_span(kind, name, id); - task.instrument(span) - } - - pub(crate) fn async_op(inner: P, resource_span: tracing::Span, source: &str, poll_op_name: &'static str, inherits_child_attrs: bool) -> InstrumentedAsyncOp - where P: FnOnce() -> F { - resource_span.in_scope(|| { - let async_op_span = tracing::trace_span!("runtime.resource.async_op", source = source, inherits_child_attrs = inherits_child_attrs); - let enter = async_op_span.enter(); - let async_op_poll_span = tracing::trace_span!("runtime.resource.async_op.poll"); - let inner = inner(); - drop(enter); - let tracing_ctx = AsyncOpTracingCtx { - async_op_span, - async_op_poll_span, - resource_span: resource_span.clone(), - }; - InstrumentedAsyncOp { - inner, - tracing_ctx, - poll_op_name, - } - }) - } - - #[derive(Debug, Clone)] - pub(crate) struct AsyncOpTracingCtx { - pub(crate) async_op_span: tracing::Span, - pub(crate) async_op_poll_span: tracing::Span, - pub(crate) resource_span: tracing::Span, - } - - - pin_project! { - #[derive(Debug, Clone)] - pub(crate) struct InstrumentedAsyncOp { - #[pin] - pub(crate) inner: F, - pub(crate) tracing_ctx: AsyncOpTracingCtx, - pub(crate) poll_op_name: &'static str - } - } - - impl Future for InstrumentedAsyncOp { - type Output = F::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); - let poll_op_name = &*this.poll_op_name; - let _res_enter = this.tracing_ctx.resource_span.enter(); - let _async_op_enter = this.tracing_ctx.async_op_span.enter(); - let _async_op_poll_enter = this.tracing_ctx.async_op_poll_span.enter(); - trace_poll_op!(poll_op_name, this.inner.poll(cx)) - } - } - } -} -cfg_time! { - #[track_caller] - pub(crate) fn caller_location() -> Option<&'static std::panic::Location<'static>> { - #[cfg(all(tokio_unstable, feature = "tracing"))] - return Some(std::panic::Location::caller()); - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - None - } -} - -cfg_not_trace! { - cfg_rt! { - #[inline] - pub(crate) fn task(task: F, _: &'static str, _name: Option<&str>, _: u64) -> F { - // nop - task - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/try_lock.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/try_lock.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/try_lock.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/try_lock.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,80 +0,0 @@ -use crate::loom::sync::atomic::AtomicBool; - -use std::cell::UnsafeCell; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut}; -use std::sync::atomic::Ordering::SeqCst; - -pub(crate) struct TryLock { - locked: AtomicBool, - data: UnsafeCell, -} - -pub(crate) struct LockGuard<'a, T> { - lock: &'a TryLock, - _p: PhantomData>, -} - -unsafe impl Send for TryLock {} -unsafe impl Sync for TryLock {} - -unsafe impl Sync for LockGuard<'_, T> {} - -macro_rules! new { - ($data:ident) => { - TryLock { - locked: AtomicBool::new(false), - data: UnsafeCell::new($data), - } - }; -} - -impl TryLock { - #[cfg(not(loom))] - /// Create a new `TryLock` - pub(crate) const fn new(data: T) -> TryLock { - new!(data) - } - - #[cfg(loom)] - /// Create a new `TryLock` - pub(crate) fn new(data: T) -> TryLock { - new!(data) - } - - /// Attempt to acquire lock - pub(crate) fn try_lock(&self) -> Option> { - if self - .locked - .compare_exchange(false, true, SeqCst, SeqCst) - .is_err() - { - return None; - } - - Some(LockGuard { - lock: self, - _p: PhantomData, - }) - } -} - -impl Deref for LockGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.lock.data.get() } - } -} - -impl DerefMut for LockGuard<'_, T> { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.lock.data.get() } - } -} - -impl Drop for LockGuard<'_, T> { - fn drop(&mut self) { - self.lock.locked.store(false, SeqCst); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/wake_list.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/wake_list.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/wake_list.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/wake_list.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,53 +0,0 @@ -use core::mem::MaybeUninit; -use core::ptr; -use std::task::Waker; - -const NUM_WAKERS: usize = 32; - -pub(crate) struct WakeList { - inner: [MaybeUninit; NUM_WAKERS], - curr: usize, -} - -impl WakeList { - pub(crate) fn new() -> Self { - Self { - inner: unsafe { - // safety: Create an uninitialized array of `MaybeUninit`. The - // `assume_init` is safe because the type we are claiming to - // have initialized here is a bunch of `MaybeUninit`s, which do - // not require initialization. - MaybeUninit::uninit().assume_init() - }, - curr: 0, - } - } - - #[inline] - pub(crate) fn can_push(&self) -> bool { - self.curr < NUM_WAKERS - } - - pub(crate) fn push(&mut self, val: Waker) { - debug_assert!(self.can_push()); - - self.inner[self.curr] = MaybeUninit::new(val); - self.curr += 1; - } - - pub(crate) fn wake_all(&mut self) { - assert!(self.curr <= NUM_WAKERS); - while self.curr > 0 { - self.curr -= 1; - let waker = unsafe { ptr::read(self.inner[self.curr].as_mut_ptr()) }; - waker.wake(); - } - } -} - -impl Drop for WakeList { - fn drop(&mut self) { - let slice = ptr::slice_from_raw_parts_mut(self.inner.as_mut_ptr() as *mut Waker, self.curr); - unsafe { ptr::drop_in_place(slice) }; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio/src/util/wake.rs s390-tools-2.33.1/rust-vendor/tokio/src/util/wake.rs --- s390-tools-2.31.0/rust-vendor/tokio/src/util/wake.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio/src/util/wake.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,72 +0,0 @@ -use crate::loom::sync::Arc; - -use std::marker::PhantomData; -use std::mem::ManuallyDrop; -use std::ops::Deref; -use std::task::{RawWaker, RawWakerVTable, Waker}; - -/// Simplified waking interface based on Arcs. -pub(crate) trait Wake: Send + Sync + Sized + 'static { - /// Wake by value. - fn wake(arc_self: Arc); - - /// Wake by reference. - fn wake_by_ref(arc_self: &Arc); -} - -/// A `Waker` that is only valid for a given lifetime. -#[derive(Debug)] -pub(crate) struct WakerRef<'a> { - waker: ManuallyDrop, - _p: PhantomData<&'a ()>, -} - -impl Deref for WakerRef<'_> { - type Target = Waker; - - fn deref(&self) -> &Waker { - &self.waker - } -} - -/// Creates a reference to a `Waker` from a reference to `Arc`. -pub(crate) fn waker_ref(wake: &Arc) -> WakerRef<'_> { - let ptr = Arc::as_ptr(wake) as *const (); - - let waker = unsafe { Waker::from_raw(RawWaker::new(ptr, waker_vtable::())) }; - - WakerRef { - waker: ManuallyDrop::new(waker), - _p: PhantomData, - } -} - -fn waker_vtable() -> &'static RawWakerVTable { - &RawWakerVTable::new( - clone_arc_raw::, - wake_arc_raw::, - wake_by_ref_arc_raw::, - drop_arc_raw::, - ) -} - -unsafe fn clone_arc_raw(data: *const ()) -> RawWaker { - Arc::::increment_strong_count(data as *const T); - RawWaker::new(data, waker_vtable::()) -} - -unsafe fn wake_arc_raw(data: *const ()) { - let arc: Arc = Arc::from_raw(data as *const T); - Wake::wake(arc); -} - -// used by `waker_ref` -unsafe fn wake_by_ref_arc_raw(data: *const ()) { - // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop - let arc = ManuallyDrop::new(Arc::::from_raw(data as *const T)); - Wake::wake_by_ref(&arc); -} - -unsafe fn drop_arc_raw(data: *const ()) { - drop(Arc::::from_raw(data as *const T)) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-macros/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/tokio-macros/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/tokio-macros/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-macros/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/tokio-macros/Cargo.toml s390-tools-2.33.1/rust-vendor/tokio-macros/Cargo.toml --- s390-tools-2.31.0/rust-vendor/tokio-macros/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-macros/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,47 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.56" -name = "tokio-macros" -version = "2.1.0" -authors = ["Tokio Contributors "] -description = """ -Tokio's proc macros. -""" -homepage = "https://tokio.rs" -readme = "README.md" -categories = ["asynchronous"] -license = "MIT" -repository = "https://github.com/tokio-rs/tokio" - -[package.metadata.docs.rs] -all-features = true - -[lib] -proc-macro = true - -[dependencies.proc-macro2] -version = "1.0.7" - -[dependencies.quote] -version = "1" - -[dependencies.syn] -version = "2.0" -features = ["full"] - -[dev-dependencies.tokio] -version = "1.0.0" -features = ["full"] - -[features] diff -Nru s390-tools-2.31.0/rust-vendor/tokio-macros/CHANGELOG.md s390-tools-2.33.1/rust-vendor/tokio-macros/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/tokio-macros/CHANGELOG.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-macros/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,172 +0,0 @@ -# 2.1.0 (April 25th, 2023) - -- macros: fix typo in `#[tokio::test]` docs ([#5636]) -- macros: make entrypoints more efficient ([#5621]) - -[#5621]: https://github.com/tokio-rs/tokio/pull/5621 -[#5636]: https://github.com/tokio-rs/tokio/pull/5636 - -# 2.0.0 (March 24th, 2023) - -This major release updates the dependency on the syn crate to 2.0.0, and -increases the MSRV to 1.56. - -As part of this release, we are adopting a policy of depending on a specific minor -release of tokio-macros. This prevents Tokio from being able to pull in many different -versions of tokio-macros. - -- macros: update `syn` ([#5572]) -- macros: accept path as crate rename ([#5557]) - -[#5572]: https://github.com/tokio-rs/tokio/pull/5572 -[#5557]: https://github.com/tokio-rs/tokio/pull/5557 - -# 1.8.2 (November 30th, 2022) - -- fix a regression introduced in 1.8.1 ([#5244]) - -[#5244]: https://github.com/tokio-rs/tokio/pull/5244 - -# 1.8.1 (November 29th, 2022) - -(yanked) - -- macros: Pin Futures in `#[tokio::test]` to stack ([#5205]) -- macros: Reduce usage of last statement spans in proc-macros ([#5092]) -- macros: Improve the documentation for `#[tokio::test]` ([#4761]) - -[#5205]: https://github.com/tokio-rs/tokio/pull/5205 -[#5092]: https://github.com/tokio-rs/tokio/pull/5092 -[#4761]: https://github.com/tokio-rs/tokio/pull/4761 - -# 1.8.0 (June 4th, 2022) - -- macros: always emit return statement ([#4636]) -- macros: support setting a custom crate name for `#[tokio::main]` and `#[tokio::test]` ([#4613]) - -[#4613]: https://github.com/tokio-rs/tokio/pull/4613 -[#4636]: https://github.com/tokio-rs/tokio/pull/4636 - -# 1.7.0 (December 15th, 2021) - -- macros: address remaining `clippy::semicolon_if_nothing_returned` warning ([#4252]) - -[#4252]: https://github.com/tokio-rs/tokio/pull/4252 - -# 1.6.0 (November 16th, 2021) - -- macros: fix mut patterns in `select!` macro ([#4211]) - -[#4211]: https://github.com/tokio-rs/tokio/pull/4211 - -# 1.5.1 (October 29th, 2021) - -- macros: fix type resolution error in `#[tokio::main]` ([#4176]) - -[#4176]: https://github.com/tokio-rs/tokio/pull/4176 - -# 1.5.0 (October 13th, 2021) - -- macros: make tokio-macros attributes more IDE friendly ([#4162]) - -[#4162]: https://github.com/tokio-rs/tokio/pull/4162 - -# 1.4.1 (September 30th, 2021) - -Reverted: run `current_thread` inside `LocalSet` ([#4027]) - -# 1.4.0 (September 29th, 2021) - -(yanked) - -### Changed - -- macros: run `current_thread` inside `LocalSet` ([#4027]) -- macros: explicitly relaxed clippy lint for `.expect()` in runtime entry macro ([#4030]) - -### Fixed - -- macros: fix invalid error messages in functions wrapped with `#[main]` or `#[test]` ([#4067]) - -[#4027]: https://github.com/tokio-rs/tokio/pull/4027 -[#4030]: https://github.com/tokio-rs/tokio/pull/4030 -[#4067]: https://github.com/tokio-rs/tokio/pull/4067 - -# 1.3.0 (July 7, 2021) - -- macros: don't trigger `clippy::unwrap_used` ([#3926]) - -[#3926]: https://github.com/tokio-rs/tokio/pull/3926 - -# 1.2.0 (May 14, 2021) - -- macros: forward input arguments in `#[tokio::test]` ([#3691]) -- macros: improve diagnostics on type mismatch ([#3766]) -- macros: various error message improvements ([#3677]) - -[#3677]: https://github.com/tokio-rs/tokio/pull/3677 -[#3691]: https://github.com/tokio-rs/tokio/pull/3691 -[#3766]: https://github.com/tokio-rs/tokio/pull/3766 - -# 1.1.0 (February 5, 2021) - -- add `start_paused` option to macros ([#3492]) - -# 1.0.0 (December 23, 2020) - -- track `tokio` 1.0 release. - -# 0.3.1 (October 25, 2020) - -### Fixed - -- fix incorrect docs regarding `max_threads` option ([#3038]) - -# 0.3.0 (October 15, 2020) - -- Track `tokio` 0.3 release. - -### Changed -- options are renamed to track `tokio` runtime builder fn names. -- `#[tokio::main]` macro requires `rt-multi-thread` when no `flavor` is specified. - -# 0.2.5 (February 27, 2019) - -### Fixed -- doc improvements ([#2225]). - -# 0.2.4 (January 27, 2019) - -### Fixed -- generics on `#[tokio::main]` function ([#2177]). - -### Added -- support for `tokio::select!` ([#2152]). - -# 0.2.3 (January 7, 2019) - -### Fixed -- Revert breaking change. - -# 0.2.2 (January 7, 2019) - -### Added -- General refactoring and inclusion of additional runtime options ([#2022] and [#2038]) - -# 0.2.1 (December 18, 2019) - -### Fixes -- inherit visibility when wrapping async fn ([#1954]). - -# 0.2.0 (November 26, 2019) - -- Initial release - -[#1954]: https://github.com/tokio-rs/tokio/pull/1954 -[#2022]: https://github.com/tokio-rs/tokio/pull/2022 -[#2038]: https://github.com/tokio-rs/tokio/pull/2038 -[#2152]: https://github.com/tokio-rs/tokio/pull/2152 -[#2177]: https://github.com/tokio-rs/tokio/pull/2177 -[#2225]: https://github.com/tokio-rs/tokio/pull/2225 -[#3038]: https://github.com/tokio-rs/tokio/pull/3038 -[#3492]: https://github.com/tokio-rs/tokio/pull/3492 diff -Nru s390-tools-2.31.0/rust-vendor/tokio-macros/LICENSE s390-tools-2.33.1/rust-vendor/tokio-macros/LICENSE --- s390-tools-2.31.0/rust-vendor/tokio-macros/LICENSE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-macros/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,47 +0,0 @@ -Copyright (c) 2023 Tokio Contributors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - -The MIT License (MIT) - -Copyright (c) 2019 Yoshua Wuyts - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/tokio-macros/README.md s390-tools-2.33.1/rust-vendor/tokio-macros/README.md --- s390-tools-2.31.0/rust-vendor/tokio-macros/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-macros/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,13 +0,0 @@ -# Tokio Macros - -Procedural macros for use with Tokio - -## License - -This project is licensed under the [MIT license](LICENSE). - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in Tokio by you, shall be licensed as MIT, without any additional -terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/tokio-macros/src/entry.rs s390-tools-2.33.1/rust-vendor/tokio-macros/src/entry.rs --- s390-tools-2.31.0/rust-vendor/tokio-macros/src/entry.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-macros/src/entry.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,591 +0,0 @@ -use proc_macro2::{Span, TokenStream, TokenTree}; -use quote::{quote, quote_spanned, ToTokens}; -use syn::parse::{Parse, ParseStream, Parser}; -use syn::{braced, Attribute, Ident, Path, Signature, Visibility}; - -// syn::AttributeArgs does not implement syn::Parse -type AttributeArgs = syn::punctuated::Punctuated; - -#[derive(Clone, Copy, PartialEq)] -enum RuntimeFlavor { - CurrentThread, - Threaded, -} - -impl RuntimeFlavor { - fn from_str(s: &str) -> Result { - match s { - "current_thread" => Ok(RuntimeFlavor::CurrentThread), - "multi_thread" => Ok(RuntimeFlavor::Threaded), - "single_thread" => Err("The single threaded runtime flavor is called `current_thread`.".to_string()), - "basic_scheduler" => Err("The `basic_scheduler` runtime flavor has been renamed to `current_thread`.".to_string()), - "threaded_scheduler" => Err("The `threaded_scheduler` runtime flavor has been renamed to `multi_thread`.".to_string()), - _ => Err(format!("No such runtime flavor `{}`. The runtime flavors are `current_thread` and `multi_thread`.", s)), - } - } -} - -struct FinalConfig { - flavor: RuntimeFlavor, - worker_threads: Option, - start_paused: Option, - crate_name: Option, -} - -/// Config used in case of the attribute not being able to build a valid config -const DEFAULT_ERROR_CONFIG: FinalConfig = FinalConfig { - flavor: RuntimeFlavor::CurrentThread, - worker_threads: None, - start_paused: None, - crate_name: None, -}; - -struct Configuration { - rt_multi_thread_available: bool, - default_flavor: RuntimeFlavor, - flavor: Option, - worker_threads: Option<(usize, Span)>, - start_paused: Option<(bool, Span)>, - is_test: bool, - crate_name: Option, -} - -impl Configuration { - fn new(is_test: bool, rt_multi_thread: bool) -> Self { - Configuration { - rt_multi_thread_available: rt_multi_thread, - default_flavor: match is_test { - true => RuntimeFlavor::CurrentThread, - false => RuntimeFlavor::Threaded, - }, - flavor: None, - worker_threads: None, - start_paused: None, - is_test, - crate_name: None, - } - } - - fn set_flavor(&mut self, runtime: syn::Lit, span: Span) -> Result<(), syn::Error> { - if self.flavor.is_some() { - return Err(syn::Error::new(span, "`flavor` set multiple times.")); - } - - let runtime_str = parse_string(runtime, span, "flavor")?; - let runtime = - RuntimeFlavor::from_str(&runtime_str).map_err(|err| syn::Error::new(span, err))?; - self.flavor = Some(runtime); - Ok(()) - } - - fn set_worker_threads( - &mut self, - worker_threads: syn::Lit, - span: Span, - ) -> Result<(), syn::Error> { - if self.worker_threads.is_some() { - return Err(syn::Error::new( - span, - "`worker_threads` set multiple times.", - )); - } - - let worker_threads = parse_int(worker_threads, span, "worker_threads")?; - if worker_threads == 0 { - return Err(syn::Error::new(span, "`worker_threads` may not be 0.")); - } - self.worker_threads = Some((worker_threads, span)); - Ok(()) - } - - fn set_start_paused(&mut self, start_paused: syn::Lit, span: Span) -> Result<(), syn::Error> { - if self.start_paused.is_some() { - return Err(syn::Error::new(span, "`start_paused` set multiple times.")); - } - - let start_paused = parse_bool(start_paused, span, "start_paused")?; - self.start_paused = Some((start_paused, span)); - Ok(()) - } - - fn set_crate_name(&mut self, name: syn::Lit, span: Span) -> Result<(), syn::Error> { - if self.crate_name.is_some() { - return Err(syn::Error::new(span, "`crate` set multiple times.")); - } - let name_path = parse_path(name, span, "crate")?; - self.crate_name = Some(name_path); - Ok(()) - } - - fn macro_name(&self) -> &'static str { - if self.is_test { - "tokio::test" - } else { - "tokio::main" - } - } - - fn build(&self) -> Result { - let flavor = self.flavor.unwrap_or(self.default_flavor); - use RuntimeFlavor::*; - - let worker_threads = match (flavor, self.worker_threads) { - (CurrentThread, Some((_, worker_threads_span))) => { - let msg = format!( - "The `worker_threads` option requires the `multi_thread` runtime flavor. Use `#[{}(flavor = \"multi_thread\")]`", - self.macro_name(), - ); - return Err(syn::Error::new(worker_threads_span, msg)); - } - (CurrentThread, None) => None, - (Threaded, worker_threads) if self.rt_multi_thread_available => { - worker_threads.map(|(val, _span)| val) - } - (Threaded, _) => { - let msg = if self.flavor.is_none() { - "The default runtime flavor is `multi_thread`, but the `rt-multi-thread` feature is disabled." - } else { - "The runtime flavor `multi_thread` requires the `rt-multi-thread` feature." - }; - return Err(syn::Error::new(Span::call_site(), msg)); - } - }; - - let start_paused = match (flavor, self.start_paused) { - (Threaded, Some((_, start_paused_span))) => { - let msg = format!( - "The `start_paused` option requires the `current_thread` runtime flavor. Use `#[{}(flavor = \"current_thread\")]`", - self.macro_name(), - ); - return Err(syn::Error::new(start_paused_span, msg)); - } - (CurrentThread, Some((start_paused, _))) => Some(start_paused), - (_, None) => None, - }; - - Ok(FinalConfig { - crate_name: self.crate_name.clone(), - flavor, - worker_threads, - start_paused, - }) - } -} - -fn parse_int(int: syn::Lit, span: Span, field: &str) -> Result { - match int { - syn::Lit::Int(lit) => match lit.base10_parse::() { - Ok(value) => Ok(value), - Err(e) => Err(syn::Error::new( - span, - format!("Failed to parse value of `{}` as integer: {}", field, e), - )), - }, - _ => Err(syn::Error::new( - span, - format!("Failed to parse value of `{}` as integer.", field), - )), - } -} - -fn parse_string(int: syn::Lit, span: Span, field: &str) -> Result { - match int { - syn::Lit::Str(s) => Ok(s.value()), - syn::Lit::Verbatim(s) => Ok(s.to_string()), - _ => Err(syn::Error::new( - span, - format!("Failed to parse value of `{}` as string.", field), - )), - } -} - -fn parse_path(lit: syn::Lit, span: Span, field: &str) -> Result { - match lit { - syn::Lit::Str(s) => { - let err = syn::Error::new( - span, - format!( - "Failed to parse value of `{}` as path: \"{}\"", - field, - s.value() - ), - ); - s.parse::().map_err(|_| err.clone()) - } - _ => Err(syn::Error::new( - span, - format!("Failed to parse value of `{}` as path.", field), - )), - } -} - -fn parse_bool(bool: syn::Lit, span: Span, field: &str) -> Result { - match bool { - syn::Lit::Bool(b) => Ok(b.value), - _ => Err(syn::Error::new( - span, - format!("Failed to parse value of `{}` as bool.", field), - )), - } -} - -fn build_config( - input: &ItemFn, - args: AttributeArgs, - is_test: bool, - rt_multi_thread: bool, -) -> Result { - if input.sig.asyncness.is_none() { - let msg = "the `async` keyword is missing from the function declaration"; - return Err(syn::Error::new_spanned(input.sig.fn_token, msg)); - } - - let mut config = Configuration::new(is_test, rt_multi_thread); - let macro_name = config.macro_name(); - - for arg in args { - match arg { - syn::Meta::NameValue(namevalue) => { - let ident = namevalue - .path - .get_ident() - .ok_or_else(|| { - syn::Error::new_spanned(&namevalue, "Must have specified ident") - })? - .to_string() - .to_lowercase(); - let lit = match &namevalue.value { - syn::Expr::Lit(syn::ExprLit { lit, .. }) => lit, - expr => return Err(syn::Error::new_spanned(expr, "Must be a literal")), - }; - match ident.as_str() { - "worker_threads" => { - config.set_worker_threads(lit.clone(), syn::spanned::Spanned::span(lit))?; - } - "flavor" => { - config.set_flavor(lit.clone(), syn::spanned::Spanned::span(lit))?; - } - "start_paused" => { - config.set_start_paused(lit.clone(), syn::spanned::Spanned::span(lit))?; - } - "core_threads" => { - let msg = "Attribute `core_threads` is renamed to `worker_threads`"; - return Err(syn::Error::new_spanned(namevalue, msg)); - } - "crate" => { - config.set_crate_name(lit.clone(), syn::spanned::Spanned::span(lit))?; - } - name => { - let msg = format!( - "Unknown attribute {} is specified; expected one of: `flavor`, `worker_threads`, `start_paused`, `crate`", - name, - ); - return Err(syn::Error::new_spanned(namevalue, msg)); - } - } - } - syn::Meta::Path(path) => { - let name = path - .get_ident() - .ok_or_else(|| syn::Error::new_spanned(&path, "Must have specified ident"))? - .to_string() - .to_lowercase(); - let msg = match name.as_str() { - "threaded_scheduler" | "multi_thread" => { - format!( - "Set the runtime flavor with #[{}(flavor = \"multi_thread\")].", - macro_name - ) - } - "basic_scheduler" | "current_thread" | "single_threaded" => { - format!( - "Set the runtime flavor with #[{}(flavor = \"current_thread\")].", - macro_name - ) - } - "flavor" | "worker_threads" | "start_paused" => { - format!("The `{}` attribute requires an argument.", name) - } - name => { - format!("Unknown attribute {} is specified; expected one of: `flavor`, `worker_threads`, `start_paused`, `crate`", name) - } - }; - return Err(syn::Error::new_spanned(path, msg)); - } - other => { - return Err(syn::Error::new_spanned( - other, - "Unknown attribute inside the macro", - )); - } - } - } - - config.build() -} - -fn parse_knobs(mut input: ItemFn, is_test: bool, config: FinalConfig) -> TokenStream { - input.sig.asyncness = None; - - // If type mismatch occurs, the current rustc points to the last statement. - let (last_stmt_start_span, last_stmt_end_span) = { - let mut last_stmt = input.stmts.last().cloned().unwrap_or_default().into_iter(); - - // `Span` on stable Rust has a limitation that only points to the first - // token, not the whole tokens. We can work around this limitation by - // using the first/last span of the tokens like - // `syn::Error::new_spanned` does. - let start = last_stmt.next().map_or_else(Span::call_site, |t| t.span()); - let end = last_stmt.last().map_or(start, |t| t.span()); - (start, end) - }; - - let crate_path = config - .crate_name - .map(ToTokens::into_token_stream) - .unwrap_or_else(|| Ident::new("tokio", last_stmt_start_span).into_token_stream()); - - let mut rt = match config.flavor { - RuntimeFlavor::CurrentThread => quote_spanned! {last_stmt_start_span=> - #crate_path::runtime::Builder::new_current_thread() - }, - RuntimeFlavor::Threaded => quote_spanned! {last_stmt_start_span=> - #crate_path::runtime::Builder::new_multi_thread() - }, - }; - if let Some(v) = config.worker_threads { - rt = quote! { #rt.worker_threads(#v) }; - } - if let Some(v) = config.start_paused { - rt = quote! { #rt.start_paused(#v) }; - } - - let header = if is_test { - quote! { - #[::core::prelude::v1::test] - } - } else { - quote! {} - }; - - let body_ident = quote! { body }; - let last_block = quote_spanned! {last_stmt_end_span=> - #[allow(clippy::expect_used, clippy::diverging_sub_expression)] - { - return #rt - .enable_all() - .build() - .expect("Failed building the Runtime") - .block_on(#body_ident); - } - }; - - let body = input.body(); - - // For test functions pin the body to the stack and use `Pin<&mut dyn - // Future>` to reduce the amount of `Runtime::block_on` (and related - // functions) copies we generate during compilation due to the generic - // parameter `F` (the future to block on). This could have an impact on - // performance, but because it's only for testing it's unlikely to be very - // large. - // - // We don't do this for the main function as it should only be used once so - // there will be no benefit. - let body = if is_test { - let output_type = match &input.sig.output { - // For functions with no return value syn doesn't print anything, - // but that doesn't work as `Output` for our boxed `Future`, so - // default to `()` (the same type as the function output). - syn::ReturnType::Default => quote! { () }, - syn::ReturnType::Type(_, ret_type) => quote! { #ret_type }, - }; - quote! { - let body = async #body; - #crate_path::pin!(body); - let body: ::std::pin::Pin<&mut dyn ::std::future::Future> = body; - } - } else { - quote! { - let body = async #body; - } - }; - - input.into_tokens(header, body, last_block) -} - -fn token_stream_with_error(mut tokens: TokenStream, error: syn::Error) -> TokenStream { - tokens.extend(error.into_compile_error()); - tokens -} - -#[cfg(not(test))] // Work around for rust-lang/rust#62127 -pub(crate) fn main(args: TokenStream, item: TokenStream, rt_multi_thread: bool) -> TokenStream { - // If any of the steps for this macro fail, we still want to expand to an item that is as close - // to the expected output as possible. This helps out IDEs such that completions and other - // related features keep working. - let input: ItemFn = match syn::parse2(item.clone()) { - Ok(it) => it, - Err(e) => return token_stream_with_error(item, e), - }; - - let config = if input.sig.ident == "main" && !input.sig.inputs.is_empty() { - let msg = "the main function cannot accept arguments"; - Err(syn::Error::new_spanned(&input.sig.ident, msg)) - } else { - AttributeArgs::parse_terminated - .parse2(args) - .and_then(|args| build_config(&input, args, false, rt_multi_thread)) - }; - - match config { - Ok(config) => parse_knobs(input, false, config), - Err(e) => token_stream_with_error(parse_knobs(input, false, DEFAULT_ERROR_CONFIG), e), - } -} - -pub(crate) fn test(args: TokenStream, item: TokenStream, rt_multi_thread: bool) -> TokenStream { - // If any of the steps for this macro fail, we still want to expand to an item that is as close - // to the expected output as possible. This helps out IDEs such that completions and other - // related features keep working. - let input: ItemFn = match syn::parse2(item.clone()) { - Ok(it) => it, - Err(e) => return token_stream_with_error(item, e), - }; - let config = if let Some(attr) = input.attrs().find(|attr| attr.meta.path().is_ident("test")) { - let msg = "second test attribute is supplied"; - Err(syn::Error::new_spanned(attr, msg)) - } else { - AttributeArgs::parse_terminated - .parse2(args) - .and_then(|args| build_config(&input, args, true, rt_multi_thread)) - }; - - match config { - Ok(config) => parse_knobs(input, true, config), - Err(e) => token_stream_with_error(parse_knobs(input, true, DEFAULT_ERROR_CONFIG), e), - } -} - -struct ItemFn { - outer_attrs: Vec, - vis: Visibility, - sig: Signature, - brace_token: syn::token::Brace, - inner_attrs: Vec, - stmts: Vec, -} - -impl ItemFn { - /// Access all attributes of the function item. - fn attrs(&self) -> impl Iterator { - self.outer_attrs.iter().chain(self.inner_attrs.iter()) - } - - /// Get the body of the function item in a manner so that it can be - /// conveniently used with the `quote!` macro. - fn body(&self) -> Body<'_> { - Body { - brace_token: self.brace_token, - stmts: &self.stmts, - } - } - - /// Convert our local function item into a token stream. - fn into_tokens( - self, - header: proc_macro2::TokenStream, - body: proc_macro2::TokenStream, - last_block: proc_macro2::TokenStream, - ) -> TokenStream { - let mut tokens = proc_macro2::TokenStream::new(); - header.to_tokens(&mut tokens); - - // Outer attributes are simply streamed as-is. - for attr in self.outer_attrs { - attr.to_tokens(&mut tokens); - } - - // Inner attributes require extra care, since they're not supported on - // blocks (which is what we're expanded into) we instead lift them - // outside of the function. This matches the behaviour of `syn`. - for mut attr in self.inner_attrs { - attr.style = syn::AttrStyle::Outer; - attr.to_tokens(&mut tokens); - } - - self.vis.to_tokens(&mut tokens); - self.sig.to_tokens(&mut tokens); - - self.brace_token.surround(&mut tokens, |tokens| { - body.to_tokens(tokens); - last_block.to_tokens(tokens); - }); - - tokens - } -} - -impl Parse for ItemFn { - #[inline] - fn parse(input: ParseStream<'_>) -> syn::Result { - // This parse implementation has been largely lifted from `syn`, with - // the exception of: - // * We don't have access to the plumbing necessary to parse inner - // attributes in-place. - // * We do our own statements parsing to avoid recursively parsing - // entire statements and only look for the parts we're interested in. - - let outer_attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let sig: Signature = input.parse()?; - - let content; - let brace_token = braced!(content in input); - let inner_attrs = Attribute::parse_inner(&content)?; - - let mut buf = proc_macro2::TokenStream::new(); - let mut stmts = Vec::new(); - - while !content.is_empty() { - if let Some(semi) = content.parse::>()? { - semi.to_tokens(&mut buf); - stmts.push(buf); - buf = proc_macro2::TokenStream::new(); - continue; - } - - // Parse a single token tree and extend our current buffer with it. - // This avoids parsing the entire content of the sub-tree. - buf.extend([content.parse::()?]); - } - - if !buf.is_empty() { - stmts.push(buf); - } - - Ok(Self { - outer_attrs, - vis, - sig, - brace_token, - inner_attrs, - stmts, - }) - } -} - -struct Body<'a> { - brace_token: syn::token::Brace, - // Statements, with terminating `;`. - stmts: &'a [TokenStream], -} - -impl ToTokens for Body<'_> { - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - self.brace_token.surround(tokens, |tokens| { - for stmt in self.stmts { - stmt.to_tokens(tokens); - } - }) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-macros/src/lib.rs s390-tools-2.33.1/rust-vendor/tokio-macros/src/lib.rs --- s390-tools-2.31.0/rust-vendor/tokio-macros/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-macros/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,489 +0,0 @@ -#![allow(clippy::needless_doctest_main)] -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - unreachable_pub -)] -#![doc(test( - no_crate_inject, - attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) -))] - -//! Macros for use with Tokio - -// This `extern` is required for older `rustc` versions but newer `rustc` -// versions warn about the unused `extern crate`. -#[allow(unused_extern_crates)] -extern crate proc_macro; - -mod entry; -mod select; - -use proc_macro::TokenStream; - -/// Marks async function to be executed by the selected runtime. This macro -/// helps set up a `Runtime` without requiring the user to use -/// [Runtime](../tokio/runtime/struct.Runtime.html) or -/// [Builder](../tokio/runtime/struct.Builder.html) directly. -/// -/// Note: This macro is designed to be simplistic and targets applications that -/// do not require a complex setup. If the provided functionality is not -/// sufficient, you may be interested in using -/// [Builder](../tokio/runtime/struct.Builder.html), which provides a more -/// powerful interface. -/// -/// Note: This macro can be used on any function and not just the `main` -/// function. Using it on a non-main function makes the function behave as if it -/// was synchronous by starting a new runtime each time it is called. If the -/// function is called often, it is preferable to create the runtime using the -/// runtime builder so the runtime can be reused across calls. -/// -/// # Non-worker async function -/// -/// Note that the async function marked with this macro does not run as a -/// worker. The expectation is that other tasks are spawned by the function here. -/// Awaiting on other futures from the function provided here will not -/// perform as fast as those spawned as workers. -/// -/// # Multi-threaded runtime -/// -/// To use the multi-threaded runtime, the macro can be configured using -/// -/// ``` -/// #[tokio::main(flavor = "multi_thread", worker_threads = 10)] -/// # async fn main() {} -/// ``` -/// -/// The `worker_threads` option configures the number of worker threads, and -/// defaults to the number of cpus on the system. This is the default flavor. -/// -/// Note: The multi-threaded runtime requires the `rt-multi-thread` feature -/// flag. -/// -/// # Current thread runtime -/// -/// To use the single-threaded runtime known as the `current_thread` runtime, -/// the macro can be configured using -/// -/// ``` -/// #[tokio::main(flavor = "current_thread")] -/// # async fn main() {} -/// ``` -/// -/// ## Function arguments: -/// -/// Arguments are allowed for any functions aside from `main` which is special -/// -/// ## Usage -/// -/// ### Using the multi-thread runtime -/// -/// ```rust -/// #[tokio::main] -/// async fn main() { -/// println!("Hello world"); -/// } -/// ``` -/// -/// Equivalent code not using `#[tokio::main]` -/// -/// ```rust -/// fn main() { -/// tokio::runtime::Builder::new_multi_thread() -/// .enable_all() -/// .build() -/// .unwrap() -/// .block_on(async { -/// println!("Hello world"); -/// }) -/// } -/// ``` -/// -/// ### Using current thread runtime -/// -/// The basic scheduler is single-threaded. -/// -/// ```rust -/// #[tokio::main(flavor = "current_thread")] -/// async fn main() { -/// println!("Hello world"); -/// } -/// ``` -/// -/// Equivalent code not using `#[tokio::main]` -/// -/// ```rust -/// fn main() { -/// tokio::runtime::Builder::new_current_thread() -/// .enable_all() -/// .build() -/// .unwrap() -/// .block_on(async { -/// println!("Hello world"); -/// }) -/// } -/// ``` -/// -/// ### Set number of worker threads -/// -/// ```rust -/// #[tokio::main(worker_threads = 2)] -/// async fn main() { -/// println!("Hello world"); -/// } -/// ``` -/// -/// Equivalent code not using `#[tokio::main]` -/// -/// ```rust -/// fn main() { -/// tokio::runtime::Builder::new_multi_thread() -/// .worker_threads(2) -/// .enable_all() -/// .build() -/// .unwrap() -/// .block_on(async { -/// println!("Hello world"); -/// }) -/// } -/// ``` -/// -/// ### Configure the runtime to start with time paused -/// -/// ```rust -/// #[tokio::main(flavor = "current_thread", start_paused = true)] -/// async fn main() { -/// println!("Hello world"); -/// } -/// ``` -/// -/// Equivalent code not using `#[tokio::main]` -/// -/// ```rust -/// fn main() { -/// tokio::runtime::Builder::new_current_thread() -/// .enable_all() -/// .start_paused(true) -/// .build() -/// .unwrap() -/// .block_on(async { -/// println!("Hello world"); -/// }) -/// } -/// ``` -/// -/// Note that `start_paused` requires the `test-util` feature to be enabled. -/// -/// ### Rename package -/// -/// ```rust -/// use tokio as tokio1; -/// -/// #[tokio1::main(crate = "tokio1")] -/// async fn main() { -/// println!("Hello world"); -/// } -/// ``` -/// -/// Equivalent code not using `#[tokio::main]` -/// -/// ```rust -/// use tokio as tokio1; -/// -/// fn main() { -/// tokio1::runtime::Builder::new_multi_thread() -/// .enable_all() -/// .build() -/// .unwrap() -/// .block_on(async { -/// println!("Hello world"); -/// }) -/// } -/// ``` -#[proc_macro_attribute] -#[cfg(not(test))] // Work around for rust-lang/rust#62127 -pub fn main(args: TokenStream, item: TokenStream) -> TokenStream { - entry::main(args.into(), item.into(), true).into() -} - -/// Marks async function to be executed by selected runtime. This macro helps set up a `Runtime` -/// without requiring the user to use [Runtime](../tokio/runtime/struct.Runtime.html) or -/// [Builder](../tokio/runtime/struct.builder.html) directly. -/// -/// ## Function arguments: -/// -/// Arguments are allowed for any functions aside from `main` which is special -/// -/// ## Usage -/// -/// ### Using default -/// -/// ```rust -/// #[tokio::main(flavor = "current_thread")] -/// async fn main() { -/// println!("Hello world"); -/// } -/// ``` -/// -/// Equivalent code not using `#[tokio::main]` -/// -/// ```rust -/// fn main() { -/// tokio::runtime::Builder::new_current_thread() -/// .enable_all() -/// .build() -/// .unwrap() -/// .block_on(async { -/// println!("Hello world"); -/// }) -/// } -/// ``` -/// -/// ### Rename package -/// -/// ```rust -/// use tokio as tokio1; -/// -/// #[tokio1::main(crate = "tokio1")] -/// async fn main() { -/// println!("Hello world"); -/// } -/// ``` -/// -/// Equivalent code not using `#[tokio::main]` -/// -/// ```rust -/// use tokio as tokio1; -/// -/// fn main() { -/// tokio1::runtime::Builder::new_multi_thread() -/// .enable_all() -/// .build() -/// .unwrap() -/// .block_on(async { -/// println!("Hello world"); -/// }) -/// } -/// ``` -#[proc_macro_attribute] -#[cfg(not(test))] // Work around for rust-lang/rust#62127 -pub fn main_rt(args: TokenStream, item: TokenStream) -> TokenStream { - entry::main(args.into(), item.into(), false).into() -} - -/// Marks async function to be executed by runtime, suitable to test environment. -/// This macro helps set up a `Runtime` without requiring the user to use -/// [Runtime](../tokio/runtime/struct.Runtime.html) or -/// [Builder](../tokio/runtime/struct.Builder.html) directly. -/// -/// Note: This macro is designed to be simplistic and targets applications that -/// do not require a complex setup. If the provided functionality is not -/// sufficient, you may be interested in using -/// [Builder](../tokio/runtime/struct.Builder.html), which provides a more -/// powerful interface. -/// -/// # Multi-threaded runtime -/// -/// To use the multi-threaded runtime, the macro can be configured using -/// -/// ```no_run -/// #[tokio::test(flavor = "multi_thread", worker_threads = 1)] -/// async fn my_test() { -/// assert!(true); -/// } -/// ``` -/// -/// The `worker_threads` option configures the number of worker threads, and -/// defaults to the number of cpus on the system. -/// -/// Note: The multi-threaded runtime requires the `rt-multi-thread` feature -/// flag. -/// -/// # Current thread runtime -/// -/// The default test runtime is single-threaded. Each test gets a -/// separate current-thread runtime. -/// -/// ```no_run -/// #[tokio::test] -/// async fn my_test() { -/// assert!(true); -/// } -/// ``` -/// -/// ## Usage -/// -/// ### Using the multi-thread runtime -/// -/// ```no_run -/// #[tokio::test(flavor = "multi_thread")] -/// async fn my_test() { -/// assert!(true); -/// } -/// ``` -/// -/// Equivalent code not using `#[tokio::test]` -/// -/// ```no_run -/// #[test] -/// fn my_test() { -/// tokio::runtime::Builder::new_multi_thread() -/// .enable_all() -/// .build() -/// .unwrap() -/// .block_on(async { -/// assert!(true); -/// }) -/// } -/// ``` -/// -/// ### Using current thread runtime -/// -/// ```no_run -/// #[tokio::test] -/// async fn my_test() { -/// assert!(true); -/// } -/// ``` -/// -/// Equivalent code not using `#[tokio::test]` -/// -/// ```no_run -/// #[test] -/// fn my_test() { -/// tokio::runtime::Builder::new_current_thread() -/// .enable_all() -/// .build() -/// .unwrap() -/// .block_on(async { -/// assert!(true); -/// }) -/// } -/// ``` -/// -/// ### Set number of worker threads -/// -/// ```no_run -/// #[tokio::test(flavor ="multi_thread", worker_threads = 2)] -/// async fn my_test() { -/// assert!(true); -/// } -/// ``` -/// -/// Equivalent code not using `#[tokio::test]` -/// -/// ```no_run -/// #[test] -/// fn my_test() { -/// tokio::runtime::Builder::new_multi_thread() -/// .worker_threads(2) -/// .enable_all() -/// .build() -/// .unwrap() -/// .block_on(async { -/// assert!(true); -/// }) -/// } -/// ``` -/// -/// ### Configure the runtime to start with time paused -/// -/// ```no_run -/// #[tokio::test(start_paused = true)] -/// async fn my_test() { -/// assert!(true); -/// } -/// ``` -/// -/// Equivalent code not using `#[tokio::test]` -/// -/// ```no_run -/// #[test] -/// fn my_test() { -/// tokio::runtime::Builder::new_current_thread() -/// .enable_all() -/// .start_paused(true) -/// .build() -/// .unwrap() -/// .block_on(async { -/// assert!(true); -/// }) -/// } -/// ``` -/// -/// Note that `start_paused` requires the `test-util` feature to be enabled. -/// -/// ### Rename package -/// -/// ```rust -/// use tokio as tokio1; -/// -/// #[tokio1::test(crate = "tokio1")] -/// async fn my_test() { -/// println!("Hello world"); -/// } -/// ``` -#[proc_macro_attribute] -pub fn test(args: TokenStream, item: TokenStream) -> TokenStream { - entry::test(args.into(), item.into(), true).into() -} - -/// Marks async function to be executed by runtime, suitable to test environment -/// -/// ## Usage -/// -/// ```no_run -/// #[tokio::test] -/// async fn my_test() { -/// assert!(true); -/// } -/// ``` -#[proc_macro_attribute] -pub fn test_rt(args: TokenStream, item: TokenStream) -> TokenStream { - entry::test(args.into(), item.into(), false).into() -} - -/// Always fails with the error message below. -/// ```text -/// The #[tokio::main] macro requires rt or rt-multi-thread. -/// ``` -#[proc_macro_attribute] -pub fn main_fail(_args: TokenStream, _item: TokenStream) -> TokenStream { - syn::Error::new( - proc_macro2::Span::call_site(), - "The #[tokio::main] macro requires rt or rt-multi-thread.", - ) - .to_compile_error() - .into() -} - -/// Always fails with the error message below. -/// ```text -/// The #[tokio::test] macro requires rt or rt-multi-thread. -/// ``` -#[proc_macro_attribute] -pub fn test_fail(_args: TokenStream, _item: TokenStream) -> TokenStream { - syn::Error::new( - proc_macro2::Span::call_site(), - "The #[tokio::test] macro requires rt or rt-multi-thread.", - ) - .to_compile_error() - .into() -} - -/// Implementation detail of the `select!` macro. This macro is **not** intended -/// to be used as part of the public API and is permitted to change. -#[proc_macro] -#[doc(hidden)] -pub fn select_priv_declare_output_enum(input: TokenStream) -> TokenStream { - select::declare_output_enum(input) -} - -/// Implementation detail of the `select!` macro. This macro is **not** intended -/// to be used as part of the public API and is permitted to change. -#[proc_macro] -#[doc(hidden)] -pub fn select_priv_clean_pattern(input: TokenStream) -> TokenStream { - select::clean_pattern_macro(input) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-macros/src/select.rs s390-tools-2.33.1/rust-vendor/tokio-macros/src/select.rs --- s390-tools-2.31.0/rust-vendor/tokio-macros/src/select.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-macros/src/select.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,109 +0,0 @@ -use proc_macro::{TokenStream, TokenTree}; -use proc_macro2::Span; -use quote::quote; -use syn::{parse::Parser, Ident}; - -pub(crate) fn declare_output_enum(input: TokenStream) -> TokenStream { - // passed in is: `(_ _ _)` with one `_` per branch - let branches = match input.into_iter().next() { - Some(TokenTree::Group(group)) => group.stream().into_iter().count(), - _ => panic!("unexpected macro input"), - }; - - let variants = (0..branches) - .map(|num| Ident::new(&format!("_{}", num), Span::call_site())) - .collect::>(); - - // Use a bitfield to track which futures completed - let mask = Ident::new( - if branches <= 8 { - "u8" - } else if branches <= 16 { - "u16" - } else if branches <= 32 { - "u32" - } else if branches <= 64 { - "u64" - } else { - panic!("up to 64 branches supported"); - }, - Span::call_site(), - ); - - TokenStream::from(quote! { - pub(super) enum Out<#( #variants ),*> { - #( #variants(#variants), )* - // Include a `Disabled` variant signifying that all select branches - // failed to resolve. - Disabled, - } - - pub(super) type Mask = #mask; - }) -} - -pub(crate) fn clean_pattern_macro(input: TokenStream) -> TokenStream { - // If this isn't a pattern, we return the token stream as-is. The select! - // macro is using it in a location requiring a pattern, so an error will be - // emitted there. - let mut input: syn::Pat = match syn::Pat::parse_single.parse(input.clone()) { - Ok(it) => it, - Err(_) => return input, - }; - - clean_pattern(&mut input); - quote::ToTokens::into_token_stream(input).into() -} - -// Removes any occurrences of ref or mut in the provided pattern. -fn clean_pattern(pat: &mut syn::Pat) { - match pat { - syn::Pat::Lit(_literal) => {} - syn::Pat::Macro(_macro) => {} - syn::Pat::Path(_path) => {} - syn::Pat::Range(_range) => {} - syn::Pat::Rest(_rest) => {} - syn::Pat::Verbatim(_tokens) => {} - syn::Pat::Wild(_underscore) => {} - syn::Pat::Ident(ident) => { - ident.by_ref = None; - ident.mutability = None; - if let Some((_at, pat)) = &mut ident.subpat { - clean_pattern(&mut *pat); - } - } - syn::Pat::Or(or) => { - for case in or.cases.iter_mut() { - clean_pattern(case); - } - } - syn::Pat::Slice(slice) => { - for elem in slice.elems.iter_mut() { - clean_pattern(elem); - } - } - syn::Pat::Struct(struct_pat) => { - for field in struct_pat.fields.iter_mut() { - clean_pattern(&mut field.pat); - } - } - syn::Pat::Tuple(tuple) => { - for elem in tuple.elems.iter_mut() { - clean_pattern(elem); - } - } - syn::Pat::TupleStruct(tuple) => { - for elem in tuple.elems.iter_mut() { - clean_pattern(elem); - } - } - syn::Pat::Reference(reference) => { - reference.mutability = None; - clean_pattern(&mut reference.pat); - } - syn::Pat::Type(type_pat) => { - clean_pattern(&mut type_pat.pat); - } - _ => {} - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/tokio-util/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/tokio-util/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/Cargo.toml s390-tools-2.33.1/rust-vendor/tokio-util/Cargo.toml --- s390-tools-2.31.0/rust-vendor/tokio-util/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,134 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.56" -name = "tokio-util" -version = "0.7.10" -authors = ["Tokio Contributors "] -description = """ -Additional utilities for working with Tokio. -""" -homepage = "https://tokio.rs" -readme = "README.md" -categories = ["asynchronous"] -license = "MIT" -repository = "https://github.com/tokio-rs/tokio" - -[package.metadata.docs.rs] -all-features = true -rustc-args = [ - "--cfg", - "docsrs", - "--cfg", - "tokio_unstable", -] -rustdoc-args = [ - "--cfg", - "docsrs", - "--cfg", - "tokio_unstable", -] - -[dependencies.bytes] -version = "1.0.0" - -[dependencies.futures-core] -version = "0.3.0" - -[dependencies.futures-io] -version = "0.3.0" -optional = true - -[dependencies.futures-sink] -version = "0.3.0" - -[dependencies.futures-util] -version = "0.3.0" -optional = true - -[dependencies.pin-project-lite] -version = "0.2.11" - -[dependencies.slab] -version = "0.4.4" -optional = true - -[dependencies.tokio] -version = "1.28.0" -features = ["sync"] - -[dependencies.tracing] -version = "0.1.25" -features = ["std"] -optional = true -default-features = false - -[dev-dependencies.async-stream] -version = "0.3.0" - -[dev-dependencies.futures] -version = "0.3.0" - -[dev-dependencies.futures-test] -version = "0.3.5" - -[dev-dependencies.parking_lot] -version = "0.12.0" - -[dev-dependencies.tempfile] -version = "3.1.0" - -[dev-dependencies.tokio] -version = "1.0.0" -features = ["full"] - -[dev-dependencies.tokio-stream] -version = "0.1" - -[dev-dependencies.tokio-test] -version = "0.4.0" - -[features] -__docs_rs = ["futures-util"] -codec = ["tracing"] -compat = ["futures-io"] -default = [] -full = [ - "codec", - "compat", - "io-util", - "time", - "net", - "rt", -] -io = [] -io-util = [ - "io", - "tokio/rt", - "tokio/io-util", -] -net = ["tokio/net"] -rt = [ - "tokio/rt", - "tokio/sync", - "futures-util", - "hashbrown", -] -time = [ - "tokio/time", - "slab", -] - -[target."cfg(tokio_unstable)".dependencies.hashbrown] -version = "0.14.0" -optional = true diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/CHANGELOG.md s390-tools-2.33.1/rust-vendor/tokio-util/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/tokio-util/CHANGELOG.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,406 +0,0 @@ -# 0.7.10 (October 24th, 2023) - -### Added - -- task: add `TaskTracker` ([#6033]) -- task: add `JoinMap::keys` ([#6046]) -- io: implement `Seek` for `SyncIoBridge` ([#6058]) - -### Changed - -- deps: update hashbrown to 0.14 ([#6102]) - -[#6033]: https://github.com/tokio-rs/tokio/pull/6033 -[#6046]: https://github.com/tokio-rs/tokio/pull/6046 -[#6058]: https://github.com/tokio-rs/tokio/pull/6058 -[#6102]: https://github.com/tokio-rs/tokio/pull/6102 - -# 0.7.9 (September 20th, 2023) - -### Added - -- io: add passthrough `AsyncRead`/`AsyncWrite` to `InspectWriter`/`InspectReader` ([#5739]) -- task: add spawn blocking methods to `JoinMap` ([#5797]) -- io: pass through traits for `StreamReader` and `SinkWriter` ([#5941]) -- io: add `SyncIoBridge::into_inner` ([#5971]) - -### Fixed - -- sync: handle possibly dangling reference safely ([#5812]) -- util: fix broken intra-doc link ([#5849]) -- compat: fix clippy warnings ([#5891]) - -### Documented - -- codec: Specify the line ending of `LinesCodec` ([#5982]) - -[#5739]: https://github.com/tokio-rs/tokio/pull/5739 -[#5797]: https://github.com/tokio-rs/tokio/pull/5797 -[#5941]: https://github.com/tokio-rs/tokio/pull/5941 -[#5971]: https://github.com/tokio-rs/tokio/pull/5971 -[#5812]: https://github.com/tokio-rs/tokio/pull/5812 -[#5849]: https://github.com/tokio-rs/tokio/pull/5849 -[#5891]: https://github.com/tokio-rs/tokio/pull/5891 -[#5982]: https://github.com/tokio-rs/tokio/pull/5982 - -# 0.7.8 (April 25th, 2023) - -This release bumps the MSRV of tokio-util to 1.56. - -### Added - -- time: add `DelayQueue::peek` ([#5569]) - -### Changed - -This release contains one performance improvement: - -- sync: try to lock the parent first in `CancellationToken` ([#5561]) - -### Fixed - -- time: fix panic in `DelayQueue` ([#5630]) - -### Documented - -- sync: improve `CancellationToken` doc on child tokens ([#5632]) - -[#5561]: https://github.com/tokio-rs/tokio/pull/5561 -[#5569]: https://github.com/tokio-rs/tokio/pull/5569 -[#5630]: https://github.com/tokio-rs/tokio/pull/5630 -[#5632]: https://github.com/tokio-rs/tokio/pull/5632 - -# 0.7.7 (February 12, 2023) - -This release reverts the removal of the `Encoder` bound on the `FramedParts` -constructor from [#5280] since it turned out to be a breaking change. ([#5450]) - -[#5450]: https://github.com/tokio-rs/tokio/pull/5450 - -# 0.7.6 (February 10, 2023) - -This release fixes a compilation failure in 0.7.5 when it is used together with -Tokio version 1.21 and unstable features are enabled. ([#5445]) - -[#5445]: https://github.com/tokio-rs/tokio/pull/5445 - -# 0.7.5 (February 9, 2023) - -This release fixes an accidental breaking change where `UnwindSafe` was -accidentally removed from `CancellationToken`. - -### Added -- codec: add `Framed::backpressure_boundary` ([#5124]) -- io: add `InspectReader` and `InspectWriter` ([#5033]) -- io: add `tokio_util::io::{CopyToBytes, SinkWriter}` ([#5070], [#5436]) -- io: impl `std::io::BufRead` on `SyncIoBridge` ([#5265]) -- sync: add `PollSemaphore::poll_acquire_many` ([#5137]) -- sync: add owned future for `CancellationToken` ([#5153]) -- time: add `DelayQueue::try_remove` ([#5052]) - -### Fixed -- codec: fix `LengthDelimitedCodec` buffer over-reservation ([#4997]) -- sync: impl `UnwindSafe` on `CancellationToken` ([#5438]) -- util: remove `Encoder` bound on `FramedParts` constructor ([#5280]) - -### Documented -- io: add lines example for `StreamReader` ([#5145]) - -[#4997]: https://github.com/tokio-rs/tokio/pull/4997 -[#5033]: https://github.com/tokio-rs/tokio/pull/5033 -[#5052]: https://github.com/tokio-rs/tokio/pull/5052 -[#5070]: https://github.com/tokio-rs/tokio/pull/5070 -[#5124]: https://github.com/tokio-rs/tokio/pull/5124 -[#5137]: https://github.com/tokio-rs/tokio/pull/5137 -[#5145]: https://github.com/tokio-rs/tokio/pull/5145 -[#5153]: https://github.com/tokio-rs/tokio/pull/5153 -[#5265]: https://github.com/tokio-rs/tokio/pull/5265 -[#5280]: https://github.com/tokio-rs/tokio/pull/5280 -[#5436]: https://github.com/tokio-rs/tokio/pull/5436 -[#5438]: https://github.com/tokio-rs/tokio/pull/5438 - -# 0.7.4 (September 8, 2022) - -### Added - -- io: add `SyncIoBridge::shutdown()` ([#4938]) -- task: improve `LocalPoolHandle` ([#4680]) - -### Fixed - -- util: add `track_caller` to public APIs ([#4785]) - -### Unstable - -- task: fix compilation errors in `JoinMap` with Tokio v1.21.0 ([#4755]) -- task: remove the unstable, deprecated `JoinMap::join_one` ([#4920]) - -[#4680]: https://github.com/tokio-rs/tokio/pull/4680 -[#4755]: https://github.com/tokio-rs/tokio/pull/4755 -[#4785]: https://github.com/tokio-rs/tokio/pull/4785 -[#4920]: https://github.com/tokio-rs/tokio/pull/4920 -[#4938]: https://github.com/tokio-rs/tokio/pull/4938 - -# 0.7.3 (June 4, 2022) - -### Changed - -- tracing: don't require default tracing features ([#4592]) -- util: simplify implementation of `ReusableBoxFuture` ([#4675]) - -### Added (unstable) - -- task: add `JoinMap` ([#4640], [#4697]) - -[#4592]: https://github.com/tokio-rs/tokio/pull/4592 -[#4640]: https://github.com/tokio-rs/tokio/pull/4640 -[#4675]: https://github.com/tokio-rs/tokio/pull/4675 -[#4697]: https://github.com/tokio-rs/tokio/pull/4697 - -# 0.7.2 (May 14, 2022) - -This release contains a rewrite of `CancellationToken` that fixes a memory leak. ([#4652]) - -[#4652]: https://github.com/tokio-rs/tokio/pull/4652 - -# 0.7.1 (February 21, 2022) - -### Added - -- codec: add `length_field_type` to `LengthDelimitedCodec` builder ([#4508]) -- io: add `StreamReader::into_inner_with_chunk()` ([#4559]) - -### Changed - -- switch from log to tracing ([#4539]) - -### Fixed - -- sync: fix waker update condition in `CancellationToken` ([#4497]) -- bumped tokio dependency to 1.6 to satisfy minimum requirements ([#4490]) - -[#4490]: https://github.com/tokio-rs/tokio/pull/4490 -[#4497]: https://github.com/tokio-rs/tokio/pull/4497 -[#4508]: https://github.com/tokio-rs/tokio/pull/4508 -[#4539]: https://github.com/tokio-rs/tokio/pull/4539 -[#4559]: https://github.com/tokio-rs/tokio/pull/4559 - -# 0.7.0 (February 9, 2022) - -### Added - -- task: add `spawn_pinned` ([#3370]) -- time: add `shrink_to_fit` and `compact` methods to `DelayQueue` ([#4170]) -- codec: improve `Builder::max_frame_length` docs ([#4352]) -- codec: add mutable reference getters for codecs to pinned `Framed` ([#4372]) -- net: add generic trait to combine `UnixListener` and `TcpListener` ([#4385]) -- codec: implement `Framed::map_codec` ([#4427]) -- codec: implement `Encoder` for `BytesCodec` ([#4465]) - -### Changed - -- sync: add lifetime parameter to `ReusableBoxFuture` ([#3762]) -- sync: refactored `PollSender` to fix a subtly broken `Sink` implementation ([#4214]) -- time: remove error case from the infallible `DelayQueue::poll_elapsed` ([#4241]) - -[#3370]: https://github.com/tokio-rs/tokio/pull/3370 -[#4170]: https://github.com/tokio-rs/tokio/pull/4170 -[#4352]: https://github.com/tokio-rs/tokio/pull/4352 -[#4372]: https://github.com/tokio-rs/tokio/pull/4372 -[#4385]: https://github.com/tokio-rs/tokio/pull/4385 -[#4427]: https://github.com/tokio-rs/tokio/pull/4427 -[#4465]: https://github.com/tokio-rs/tokio/pull/4465 -[#3762]: https://github.com/tokio-rs/tokio/pull/3762 -[#4214]: https://github.com/tokio-rs/tokio/pull/4214 -[#4241]: https://github.com/tokio-rs/tokio/pull/4241 - -# 0.6.10 (May 14, 2021) - -This is a backport for the memory leak in `CancellationToken` that was originally fixed in 0.7.2. ([#4652]) - -[#4652]: https://github.com/tokio-rs/tokio/pull/4652 - -# 0.6.9 (October 29, 2021) - -### Added - -- codec: implement `Clone` for `LengthDelimitedCodec` ([#4089]) -- io: add `SyncIoBridge` ([#4146]) - -### Fixed - -- time: update deadline on removal in `DelayQueue` ([#4178]) -- codec: Update stream impl for Framed to return None after Err ([#4166]) - -[#4089]: https://github.com/tokio-rs/tokio/pull/4089 -[#4146]: https://github.com/tokio-rs/tokio/pull/4146 -[#4166]: https://github.com/tokio-rs/tokio/pull/4166 -[#4178]: https://github.com/tokio-rs/tokio/pull/4178 - -# 0.6.8 (September 3, 2021) - -### Added - -- sync: add drop guard for `CancellationToken` ([#3839]) -- compact: added `AsyncSeek` compat ([#4078]) -- time: expose `Key` used in `DelayQueue`'s `Expired` ([#4081]) -- io: add `with_capacity` to `ReaderStream` ([#4086]) - -### Fixed - -- codec: remove unnecessary `doc(cfg(...))` ([#3989]) - -[#3839]: https://github.com/tokio-rs/tokio/pull/3839 -[#4078]: https://github.com/tokio-rs/tokio/pull/4078 -[#4081]: https://github.com/tokio-rs/tokio/pull/4081 -[#4086]: https://github.com/tokio-rs/tokio/pull/4086 -[#3989]: https://github.com/tokio-rs/tokio/pull/3989 - -# 0.6.7 (May 14, 2021) - -### Added - -- udp: make `UdpFramed` take `Borrow` ([#3451]) -- compat: implement `AsRawFd`/`AsRawHandle` for `Compat` ([#3765]) - -[#3451]: https://github.com/tokio-rs/tokio/pull/3451 -[#3765]: https://github.com/tokio-rs/tokio/pull/3765 - -# 0.6.6 (April 12, 2021) - -### Added - -- util: makes `Framed` and `FramedStream` resumable after eof ([#3272]) -- util: add `PollSemaphore::{add_permits, available_permits}` ([#3683]) - -### Fixed - -- chore: avoid allocation if `PollSemaphore` is unused ([#3634]) - -[#3272]: https://github.com/tokio-rs/tokio/pull/3272 -[#3634]: https://github.com/tokio-rs/tokio/pull/3634 -[#3683]: https://github.com/tokio-rs/tokio/pull/3683 - -# 0.6.5 (March 20, 2021) - -### Fixed - -- util: annotate time module as requiring `time` feature ([#3606]) - -[#3606]: https://github.com/tokio-rs/tokio/pull/3606 - -# 0.6.4 (March 9, 2021) - -### Added - -- codec: `AnyDelimiter` codec ([#3406]) -- sync: add pollable `mpsc::Sender` ([#3490]) - -### Fixed - -- codec: `LinesCodec` should only return `MaxLineLengthExceeded` once per line ([#3556]) -- sync: fuse PollSemaphore ([#3578]) - -[#3406]: https://github.com/tokio-rs/tokio/pull/3406 -[#3490]: https://github.com/tokio-rs/tokio/pull/3490 -[#3556]: https://github.com/tokio-rs/tokio/pull/3556 -[#3578]: https://github.com/tokio-rs/tokio/pull/3578 - -# 0.6.3 (January 31, 2021) - -### Added - -- sync: add `ReusableBoxFuture` utility ([#3464]) - -### Changed - -- sync: use `ReusableBoxFuture` for `PollSemaphore` ([#3463]) -- deps: remove `async-stream` dependency ([#3463]) -- deps: remove `tokio-stream` dependency ([#3487]) - -# 0.6.2 (January 21, 2021) - -### Added - -- sync: add pollable `Semaphore` ([#3444]) - -### Fixed - -- time: fix panics on updating `DelayQueue` entries ([#3270]) - -# 0.6.1 (January 12, 2021) - -### Added - -- codec: `get_ref()`, `get_mut()`, `get_pin_mut()` and `into_inner()` for - `Framed`, `FramedRead`, `FramedWrite` and `StreamReader` ([#3364]). -- codec: `write_buffer()` and `write_buffer_mut()` for `Framed` and - `FramedWrite` ([#3387]). - -# 0.6.0 (December 23, 2020) - -### Changed -- depend on `tokio` 1.0. - -### Added -- rt: add constructors to `TokioContext` (#3221). - -# 0.5.1 (December 3, 2020) - -### Added -- io: `poll_read_buf` util fn (#2972). -- io: `poll_write_buf` util fn with vectored write support (#3156). - -# 0.5.0 (October 30, 2020) - -### Changed -- io: update `bytes` to 0.6 (#3071). - -# 0.4.0 (October 15, 2020) - -### Added -- sync: `CancellationToken` for coordinating task cancellation (#2747). -- rt: `TokioContext` sets the Tokio runtime for the duration of a future (#2791) -- io: `StreamReader`/`ReaderStream` map between `AsyncRead` values and `Stream` - of bytes (#2788). -- time: `DelayQueue` to manage many delays (#2897). - -# 0.3.1 (March 18, 2020) - -### Fixed - -- Adjust minimum-supported Tokio version to v0.2.5 to account for an internal - dependency on features in that version of Tokio. ([#2326]) - -# 0.3.0 (March 4, 2020) - -### Changed - -- **Breaking Change**: Change `Encoder` trait to take a generic `Item` parameter, which allows - codec writers to pass references into `Framed` and `FramedWrite` types. ([#1746]) - -### Added - -- Add futures-io/tokio::io compatibility layer. ([#2117]) -- Add `Framed::with_capacity`. ([#2215]) - -### Fixed - -- Use advance over split_to when data is not needed. ([#2198]) - -# 0.2.0 (November 26, 2019) - -- Initial release - -[#3487]: https://github.com/tokio-rs/tokio/pull/3487 -[#3464]: https://github.com/tokio-rs/tokio/pull/3464 -[#3463]: https://github.com/tokio-rs/tokio/pull/3463 -[#3444]: https://github.com/tokio-rs/tokio/pull/3444 -[#3387]: https://github.com/tokio-rs/tokio/pull/3387 -[#3364]: https://github.com/tokio-rs/tokio/pull/3364 -[#3270]: https://github.com/tokio-rs/tokio/pull/3270 -[#2326]: https://github.com/tokio-rs/tokio/pull/2326 -[#2215]: https://github.com/tokio-rs/tokio/pull/2215 -[#2198]: https://github.com/tokio-rs/tokio/pull/2198 -[#2117]: https://github.com/tokio-rs/tokio/pull/2117 -[#1746]: https://github.com/tokio-rs/tokio/pull/1746 diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/LICENSE s390-tools-2.33.1/rust-vendor/tokio-util/LICENSE --- s390-tools-2.31.0/rust-vendor/tokio-util/LICENSE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2023 Tokio Contributors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/README.md s390-tools-2.33.1/rust-vendor/tokio-util/README.md --- s390-tools-2.31.0/rust-vendor/tokio-util/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,13 +0,0 @@ -# tokio-util - -Utilities for working with Tokio. - -## License - -This project is licensed under the [MIT license](LICENSE). - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in Tokio by you, shall be licensed as MIT, without any additional -terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/cfg.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/cfg.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/cfg.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/cfg.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,71 +0,0 @@ -macro_rules! cfg_codec { - ($($item:item)*) => { - $( - #[cfg(feature = "codec")] - #[cfg_attr(docsrs, doc(cfg(feature = "codec")))] - $item - )* - } -} - -macro_rules! cfg_compat { - ($($item:item)*) => { - $( - #[cfg(feature = "compat")] - #[cfg_attr(docsrs, doc(cfg(feature = "compat")))] - $item - )* - } -} - -macro_rules! cfg_net { - ($($item:item)*) => { - $( - #[cfg(all(feature = "net", feature = "codec"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "net", feature = "codec"))))] - $item - )* - } -} - -macro_rules! cfg_io { - ($($item:item)*) => { - $( - #[cfg(feature = "io")] - #[cfg_attr(docsrs, doc(cfg(feature = "io")))] - $item - )* - } -} - -cfg_io! { - macro_rules! cfg_io_util { - ($($item:item)*) => { - $( - #[cfg(feature = "io-util")] - #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] - $item - )* - } - } -} - -macro_rules! cfg_rt { - ($($item:item)*) => { - $( - #[cfg(feature = "rt")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] - $item - )* - } -} - -macro_rules! cfg_time { - ($($item:item)*) => { - $( - #[cfg(feature = "time")] - #[cfg_attr(docsrs, doc(cfg(feature = "time")))] - $item - )* - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/any_delimiter_codec.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/any_delimiter_codec.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/any_delimiter_codec.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/any_delimiter_codec.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,263 +0,0 @@ -use crate::codec::decoder::Decoder; -use crate::codec::encoder::Encoder; - -use bytes::{Buf, BufMut, Bytes, BytesMut}; -use std::{cmp, fmt, io, str, usize}; - -const DEFAULT_SEEK_DELIMITERS: &[u8] = b",;\n\r"; -const DEFAULT_SEQUENCE_WRITER: &[u8] = b","; -/// A simple [`Decoder`] and [`Encoder`] implementation that splits up data into chunks based on any character in the given delimiter string. -/// -/// [`Decoder`]: crate::codec::Decoder -/// [`Encoder`]: crate::codec::Encoder -/// -/// # Example -/// Decode string of bytes containing various different delimiters. -/// -/// [`BytesMut`]: bytes::BytesMut -/// [`Error`]: std::io::Error -/// -/// ``` -/// use tokio_util::codec::{AnyDelimiterCodec, Decoder}; -/// use bytes::{BufMut, BytesMut}; -/// -/// # -/// # #[tokio::main(flavor = "current_thread")] -/// # async fn main() -> Result<(), std::io::Error> { -/// let mut codec = AnyDelimiterCodec::new(b",;\r\n".to_vec(),b";".to_vec()); -/// let buf = &mut BytesMut::new(); -/// buf.reserve(200); -/// buf.put_slice(b"chunk 1,chunk 2;chunk 3\n\r"); -/// assert_eq!("chunk 1", codec.decode(buf).unwrap().unwrap()); -/// assert_eq!("chunk 2", codec.decode(buf).unwrap().unwrap()); -/// assert_eq!("chunk 3", codec.decode(buf).unwrap().unwrap()); -/// assert_eq!("", codec.decode(buf).unwrap().unwrap()); -/// assert_eq!(None, codec.decode(buf).unwrap()); -/// # Ok(()) -/// # } -/// ``` -/// -#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct AnyDelimiterCodec { - // Stored index of the next index to examine for the delimiter character. - // This is used to optimize searching. - // For example, if `decode` was called with `abc` and the delimiter is '{}', it would hold `3`, - // because that is the next index to examine. - // The next time `decode` is called with `abcde}`, the method will - // only look at `de}` before returning. - next_index: usize, - - /// The maximum length for a given chunk. If `usize::MAX`, chunks will be - /// read until a delimiter character is reached. - max_length: usize, - - /// Are we currently discarding the remainder of a chunk which was over - /// the length limit? - is_discarding: bool, - - /// The bytes that are using for search during decode - seek_delimiters: Vec, - - /// The bytes that are using for encoding - sequence_writer: Vec, -} - -impl AnyDelimiterCodec { - /// Returns a `AnyDelimiterCodec` for splitting up data into chunks. - /// - /// # Note - /// - /// The returned `AnyDelimiterCodec` will not have an upper bound on the length - /// of a buffered chunk. See the documentation for [`new_with_max_length`] - /// for information on why this could be a potential security risk. - /// - /// [`new_with_max_length`]: crate::codec::AnyDelimiterCodec::new_with_max_length() - pub fn new(seek_delimiters: Vec, sequence_writer: Vec) -> AnyDelimiterCodec { - AnyDelimiterCodec { - next_index: 0, - max_length: usize::MAX, - is_discarding: false, - seek_delimiters, - sequence_writer, - } - } - - /// Returns a `AnyDelimiterCodec` with a maximum chunk length limit. - /// - /// If this is set, calls to `AnyDelimiterCodec::decode` will return a - /// [`AnyDelimiterCodecError`] when a chunk exceeds the length limit. Subsequent calls - /// will discard up to `limit` bytes from that chunk until a delimiter - /// character is reached, returning `None` until the delimiter over the limit - /// has been fully discarded. After that point, calls to `decode` will - /// function as normal. - /// - /// # Note - /// - /// Setting a length limit is highly recommended for any `AnyDelimiterCodec` which - /// will be exposed to untrusted input. Otherwise, the size of the buffer - /// that holds the chunk currently being read is unbounded. An attacker could - /// exploit this unbounded buffer by sending an unbounded amount of input - /// without any delimiter characters, causing unbounded memory consumption. - /// - /// [`AnyDelimiterCodecError`]: crate::codec::AnyDelimiterCodecError - pub fn new_with_max_length( - seek_delimiters: Vec, - sequence_writer: Vec, - max_length: usize, - ) -> Self { - AnyDelimiterCodec { - max_length, - ..AnyDelimiterCodec::new(seek_delimiters, sequence_writer) - } - } - - /// Returns the maximum chunk length when decoding. - /// - /// ``` - /// use std::usize; - /// use tokio_util::codec::AnyDelimiterCodec; - /// - /// let codec = AnyDelimiterCodec::new(b",;\n".to_vec(), b";".to_vec()); - /// assert_eq!(codec.max_length(), usize::MAX); - /// ``` - /// ``` - /// use tokio_util::codec::AnyDelimiterCodec; - /// - /// let codec = AnyDelimiterCodec::new_with_max_length(b",;\n".to_vec(), b";".to_vec(), 256); - /// assert_eq!(codec.max_length(), 256); - /// ``` - pub fn max_length(&self) -> usize { - self.max_length - } -} - -impl Decoder for AnyDelimiterCodec { - type Item = Bytes; - type Error = AnyDelimiterCodecError; - - fn decode(&mut self, buf: &mut BytesMut) -> Result, AnyDelimiterCodecError> { - loop { - // Determine how far into the buffer we'll search for a delimiter. If - // there's no max_length set, we'll read to the end of the buffer. - let read_to = cmp::min(self.max_length.saturating_add(1), buf.len()); - - let new_chunk_offset = buf[self.next_index..read_to].iter().position(|b| { - self.seek_delimiters - .iter() - .any(|delimiter| *b == *delimiter) - }); - - match (self.is_discarding, new_chunk_offset) { - (true, Some(offset)) => { - // If we found a new chunk, discard up to that offset and - // then stop discarding. On the next iteration, we'll try - // to read a chunk normally. - buf.advance(offset + self.next_index + 1); - self.is_discarding = false; - self.next_index = 0; - } - (true, None) => { - // Otherwise, we didn't find a new chunk, so we'll discard - // everything we read. On the next iteration, we'll continue - // discarding up to max_len bytes unless we find a new chunk. - buf.advance(read_to); - self.next_index = 0; - if buf.is_empty() { - return Ok(None); - } - } - (false, Some(offset)) => { - // Found a chunk! - let new_chunk_index = offset + self.next_index; - self.next_index = 0; - let mut chunk = buf.split_to(new_chunk_index + 1); - chunk.truncate(chunk.len() - 1); - let chunk = chunk.freeze(); - return Ok(Some(chunk)); - } - (false, None) if buf.len() > self.max_length => { - // Reached the maximum length without finding a - // new chunk, return an error and start discarding on the - // next call. - self.is_discarding = true; - return Err(AnyDelimiterCodecError::MaxChunkLengthExceeded); - } - (false, None) => { - // We didn't find a chunk or reach the length limit, so the next - // call will resume searching at the current offset. - self.next_index = read_to; - return Ok(None); - } - } - } - } - - fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, AnyDelimiterCodecError> { - Ok(match self.decode(buf)? { - Some(frame) => Some(frame), - None => { - // return remaining data, if any - if buf.is_empty() { - None - } else { - let chunk = buf.split_to(buf.len()); - self.next_index = 0; - Some(chunk.freeze()) - } - } - }) - } -} - -impl Encoder for AnyDelimiterCodec -where - T: AsRef, -{ - type Error = AnyDelimiterCodecError; - - fn encode(&mut self, chunk: T, buf: &mut BytesMut) -> Result<(), AnyDelimiterCodecError> { - let chunk = chunk.as_ref(); - buf.reserve(chunk.len() + 1); - buf.put(chunk.as_bytes()); - buf.put(self.sequence_writer.as_ref()); - - Ok(()) - } -} - -impl Default for AnyDelimiterCodec { - fn default() -> Self { - Self::new( - DEFAULT_SEEK_DELIMITERS.to_vec(), - DEFAULT_SEQUENCE_WRITER.to_vec(), - ) - } -} - -/// An error occurred while encoding or decoding a chunk. -#[derive(Debug)] -pub enum AnyDelimiterCodecError { - /// The maximum chunk length was exceeded. - MaxChunkLengthExceeded, - /// An IO error occurred. - Io(io::Error), -} - -impl fmt::Display for AnyDelimiterCodecError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - AnyDelimiterCodecError::MaxChunkLengthExceeded => { - write!(f, "max chunk length exceeded") - } - AnyDelimiterCodecError::Io(e) => write!(f, "{}", e), - } - } -} - -impl From for AnyDelimiterCodecError { - fn from(e: io::Error) -> AnyDelimiterCodecError { - AnyDelimiterCodecError::Io(e) - } -} - -impl std::error::Error for AnyDelimiterCodecError {} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/bytes_codec.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/bytes_codec.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/bytes_codec.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/bytes_codec.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,86 +0,0 @@ -use crate::codec::decoder::Decoder; -use crate::codec::encoder::Encoder; - -use bytes::{BufMut, Bytes, BytesMut}; -use std::io; - -/// A simple [`Decoder`] and [`Encoder`] implementation that just ships bytes around. -/// -/// [`Decoder`]: crate::codec::Decoder -/// [`Encoder`]: crate::codec::Encoder -/// -/// # Example -/// -/// Turn an [`AsyncRead`] into a stream of `Result<`[`BytesMut`]`, `[`Error`]`>`. -/// -/// [`AsyncRead`]: tokio::io::AsyncRead -/// [`BytesMut`]: bytes::BytesMut -/// [`Error`]: std::io::Error -/// -/// ``` -/// # mod hidden { -/// # #[allow(unused_imports)] -/// use tokio::fs::File; -/// # } -/// use tokio::io::AsyncRead; -/// use tokio_util::codec::{FramedRead, BytesCodec}; -/// -/// # enum File {} -/// # impl File { -/// # async fn open(_name: &str) -> Result { -/// # use std::io::Cursor; -/// # Ok(Cursor::new(vec![0, 1, 2, 3, 4, 5])) -/// # } -/// # } -/// # -/// # #[tokio::main(flavor = "current_thread")] -/// # async fn main() -> Result<(), std::io::Error> { -/// let my_async_read = File::open("filename.txt").await?; -/// let my_stream_of_bytes = FramedRead::new(my_async_read, BytesCodec::new()); -/// # Ok(()) -/// # } -/// ``` -/// -#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Default)] -pub struct BytesCodec(()); - -impl BytesCodec { - /// Creates a new `BytesCodec` for shipping around raw bytes. - pub fn new() -> BytesCodec { - BytesCodec(()) - } -} - -impl Decoder for BytesCodec { - type Item = BytesMut; - type Error = io::Error; - - fn decode(&mut self, buf: &mut BytesMut) -> Result, io::Error> { - if !buf.is_empty() { - let len = buf.len(); - Ok(Some(buf.split_to(len))) - } else { - Ok(None) - } - } -} - -impl Encoder for BytesCodec { - type Error = io::Error; - - fn encode(&mut self, data: Bytes, buf: &mut BytesMut) -> Result<(), io::Error> { - buf.reserve(data.len()); - buf.put(data); - Ok(()) - } -} - -impl Encoder for BytesCodec { - type Error = io::Error; - - fn encode(&mut self, data: BytesMut, buf: &mut BytesMut) -> Result<(), io::Error> { - buf.reserve(data.len()); - buf.put(data); - Ok(()) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/decoder.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/decoder.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/decoder.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/decoder.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,184 +0,0 @@ -use crate::codec::Framed; - -use tokio::io::{AsyncRead, AsyncWrite}; - -use bytes::BytesMut; -use std::io; - -/// Decoding of frames via buffers. -/// -/// This trait is used when constructing an instance of [`Framed`] or -/// [`FramedRead`]. An implementation of `Decoder` takes a byte stream that has -/// already been buffered in `src` and decodes the data into a stream of -/// `Self::Item` frames. -/// -/// Implementations are able to track state on `self`, which enables -/// implementing stateful streaming parsers. In many cases, though, this type -/// will simply be a unit struct (e.g. `struct HttpDecoder`). -/// -/// For some underlying data-sources, namely files and FIFOs, -/// it's possible to temporarily read 0 bytes by reaching EOF. -/// -/// In these cases `decode_eof` will be called until it signals -/// fulfillment of all closing frames by returning `Ok(None)`. -/// After that, repeated attempts to read from the [`Framed`] or [`FramedRead`] -/// will not invoke `decode` or `decode_eof` again, until data can be read -/// during a retry. -/// -/// It is up to the Decoder to keep track of a restart after an EOF, -/// and to decide how to handle such an event by, for example, -/// allowing frames to cross EOF boundaries, re-emitting opening frames, or -/// resetting the entire internal state. -/// -/// [`Framed`]: crate::codec::Framed -/// [`FramedRead`]: crate::codec::FramedRead -pub trait Decoder { - /// The type of decoded frames. - type Item; - - /// The type of unrecoverable frame decoding errors. - /// - /// If an individual message is ill-formed but can be ignored without - /// interfering with the processing of future messages, it may be more - /// useful to report the failure as an `Item`. - /// - /// `From` is required in the interest of making `Error` suitable - /// for returning directly from a [`FramedRead`], and to enable the default - /// implementation of `decode_eof` to yield an `io::Error` when the decoder - /// fails to consume all available data. - /// - /// Note that implementors of this trait can simply indicate `type Error = - /// io::Error` to use I/O errors as this type. - /// - /// [`FramedRead`]: crate::codec::FramedRead - type Error: From; - - /// Attempts to decode a frame from the provided buffer of bytes. - /// - /// This method is called by [`FramedRead`] whenever bytes are ready to be - /// parsed. The provided buffer of bytes is what's been read so far, and - /// this instance of `Decode` can determine whether an entire frame is in - /// the buffer and is ready to be returned. - /// - /// If an entire frame is available, then this instance will remove those - /// bytes from the buffer provided and return them as a decoded - /// frame. Note that removing bytes from the provided buffer doesn't always - /// necessarily copy the bytes, so this should be an efficient operation in - /// most circumstances. - /// - /// If the bytes look valid, but a frame isn't fully available yet, then - /// `Ok(None)` is returned. This indicates to the [`Framed`] instance that - /// it needs to read some more bytes before calling this method again. - /// - /// Note that the bytes provided may be empty. If a previous call to - /// `decode` consumed all the bytes in the buffer then `decode` will be - /// called again until it returns `Ok(None)`, indicating that more bytes need to - /// be read. - /// - /// Finally, if the bytes in the buffer are malformed then an error is - /// returned indicating why. This informs [`Framed`] that the stream is now - /// corrupt and should be terminated. - /// - /// [`Framed`]: crate::codec::Framed - /// [`FramedRead`]: crate::codec::FramedRead - /// - /// # Buffer management - /// - /// Before returning from the function, implementations should ensure that - /// the buffer has appropriate capacity in anticipation of future calls to - /// `decode`. Failing to do so leads to inefficiency. - /// - /// For example, if frames have a fixed length, or if the length of the - /// current frame is known from a header, a possible buffer management - /// strategy is: - /// - /// ```no_run - /// # use std::io; - /// # - /// # use bytes::BytesMut; - /// # use tokio_util::codec::Decoder; - /// # - /// # struct MyCodec; - /// # - /// impl Decoder for MyCodec { - /// // ... - /// # type Item = BytesMut; - /// # type Error = io::Error; - /// - /// fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - /// // ... - /// - /// // Reserve enough to complete decoding of the current frame. - /// let current_frame_len: usize = 1000; // Example. - /// // And to start decoding the next frame. - /// let next_frame_header_len: usize = 10; // Example. - /// src.reserve(current_frame_len + next_frame_header_len); - /// - /// return Ok(None); - /// } - /// } - /// ``` - /// - /// An optimal buffer management strategy minimizes reallocations and - /// over-allocations. - fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error>; - - /// A default method available to be called when there are no more bytes - /// available to be read from the underlying I/O. - /// - /// This method defaults to calling `decode` and returns an error if - /// `Ok(None)` is returned while there is unconsumed data in `buf`. - /// Typically this doesn't need to be implemented unless the framing - /// protocol differs near the end of the stream, or if you need to construct - /// frames _across_ eof boundaries on sources that can be resumed. - /// - /// Note that the `buf` argument may be empty. If a previous call to - /// `decode_eof` consumed all the bytes in the buffer, `decode_eof` will be - /// called again until it returns `None`, indicating that there are no more - /// frames to yield. This behavior enables returning finalization frames - /// that may not be based on inbound data. - /// - /// Once `None` has been returned, `decode_eof` won't be called again until - /// an attempt to resume the stream has been made, where the underlying stream - /// actually returned more data. - fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { - match self.decode(buf)? { - Some(frame) => Ok(Some(frame)), - None => { - if buf.is_empty() { - Ok(None) - } else { - Err(io::Error::new(io::ErrorKind::Other, "bytes remaining on stream").into()) - } - } - } - } - - /// Provides a [`Stream`] and [`Sink`] interface for reading and writing to this - /// `Io` object, using `Decode` and `Encode` to read and write the raw data. - /// - /// Raw I/O objects work with byte sequences, but higher-level code usually - /// wants to batch these into meaningful chunks, called "frames". This - /// method layers framing on top of an I/O object, by using the `Codec` - /// traits to handle encoding and decoding of messages frames. Note that - /// the incoming and outgoing frame types may be distinct. - /// - /// This function returns a *single* object that is both `Stream` and - /// `Sink`; grouping this into a single object is often useful for layering - /// things like gzip or TLS, which require both read and write access to the - /// underlying object. - /// - /// If you want to work more directly with the streams and sink, consider - /// calling `split` on the [`Framed`] returned by this method, which will - /// break them into separate objects, allowing them to interact more easily. - /// - /// [`Stream`]: futures_core::Stream - /// [`Sink`]: futures_sink::Sink - /// [`Framed`]: crate::codec::Framed - fn framed(self, io: T) -> Framed - where - Self: Sized, - { - Framed::new(io, self) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/encoder.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/encoder.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/encoder.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/encoder.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -use bytes::BytesMut; -use std::io; - -/// Trait of helper objects to write out messages as bytes, for use with -/// [`FramedWrite`]. -/// -/// [`FramedWrite`]: crate::codec::FramedWrite -pub trait Encoder { - /// The type of encoding errors. - /// - /// [`FramedWrite`] requires `Encoder`s errors to implement `From` - /// in the interest letting it return `Error`s directly. - /// - /// [`FramedWrite`]: crate::codec::FramedWrite - type Error: From; - - /// Encodes a frame into the buffer provided. - /// - /// This method will encode `item` into the byte buffer provided by `dst`. - /// The `dst` provided is an internal buffer of the [`FramedWrite`] instance and - /// will be written out when possible. - /// - /// [`FramedWrite`]: crate::codec::FramedWrite - fn encode(&mut self, item: Item, dst: &mut BytesMut) -> Result<(), Self::Error>; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/framed_impl.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/framed_impl.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/framed_impl.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/framed_impl.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,312 +0,0 @@ -use crate::codec::decoder::Decoder; -use crate::codec::encoder::Encoder; - -use futures_core::Stream; -use tokio::io::{AsyncRead, AsyncWrite}; - -use bytes::BytesMut; -use futures_core::ready; -use futures_sink::Sink; -use pin_project_lite::pin_project; -use std::borrow::{Borrow, BorrowMut}; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; -use tracing::trace; - -pin_project! { - #[derive(Debug)] - pub(crate) struct FramedImpl { - #[pin] - pub(crate) inner: T, - pub(crate) state: State, - pub(crate) codec: U, - } -} - -const INITIAL_CAPACITY: usize = 8 * 1024; - -#[derive(Debug)] -pub(crate) struct ReadFrame { - pub(crate) eof: bool, - pub(crate) is_readable: bool, - pub(crate) buffer: BytesMut, - pub(crate) has_errored: bool, -} - -pub(crate) struct WriteFrame { - pub(crate) buffer: BytesMut, - pub(crate) backpressure_boundary: usize, -} - -#[derive(Default)] -pub(crate) struct RWFrames { - pub(crate) read: ReadFrame, - pub(crate) write: WriteFrame, -} - -impl Default for ReadFrame { - fn default() -> Self { - Self { - eof: false, - is_readable: false, - buffer: BytesMut::with_capacity(INITIAL_CAPACITY), - has_errored: false, - } - } -} - -impl Default for WriteFrame { - fn default() -> Self { - Self { - buffer: BytesMut::with_capacity(INITIAL_CAPACITY), - backpressure_boundary: INITIAL_CAPACITY, - } - } -} - -impl From for ReadFrame { - fn from(mut buffer: BytesMut) -> Self { - let size = buffer.capacity(); - if size < INITIAL_CAPACITY { - buffer.reserve(INITIAL_CAPACITY - size); - } - - Self { - buffer, - is_readable: size > 0, - eof: false, - has_errored: false, - } - } -} - -impl From for WriteFrame { - fn from(mut buffer: BytesMut) -> Self { - let size = buffer.capacity(); - if size < INITIAL_CAPACITY { - buffer.reserve(INITIAL_CAPACITY - size); - } - - Self { - buffer, - backpressure_boundary: INITIAL_CAPACITY, - } - } -} - -impl Borrow for RWFrames { - fn borrow(&self) -> &ReadFrame { - &self.read - } -} -impl BorrowMut for RWFrames { - fn borrow_mut(&mut self) -> &mut ReadFrame { - &mut self.read - } -} -impl Borrow for RWFrames { - fn borrow(&self) -> &WriteFrame { - &self.write - } -} -impl BorrowMut for RWFrames { - fn borrow_mut(&mut self) -> &mut WriteFrame { - &mut self.write - } -} -impl Stream for FramedImpl -where - T: AsyncRead, - U: Decoder, - R: BorrowMut, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - use crate::util::poll_read_buf; - - let mut pinned = self.project(); - let state: &mut ReadFrame = pinned.state.borrow_mut(); - // The following loops implements a state machine with each state corresponding - // to a combination of the `is_readable` and `eof` flags. States persist across - // loop entries and most state transitions occur with a return. - // - // The initial state is `reading`. - // - // | state | eof | is_readable | has_errored | - // |---------|-------|-------------|-------------| - // | reading | false | false | false | - // | framing | false | true | false | - // | pausing | true | true | false | - // | paused | true | false | false | - // | errored | | | true | - // `decode_eof` returns Err - // ┌────────────────────────────────────────────────────────┠- // `decode_eof` returns │ │ - // `Ok(Some)` │ │ - // ┌─────┠│ `decode_eof` returns After returning │ - // Read 0 bytes ├─────▼──┴┠`Ok(None)` ┌────────┠◄───┠`None` ┌───▼─────┠- // ┌────────────────►│ Pausing ├───────────────────────►│ Paused ├─┠└───────────┤ Errored │ - // │ └─────────┘ └─┬──▲───┘ │ └───▲───▲─┘ - // Pending read │ │ │ │ │ │ - // ┌──────┠│ `decode` returns `Some` │ └─────┘ │ │ - // │ │ │ ┌──────┠│ Pending │ │ - // │ ┌────▼──┴─┠Read n>0 bytes ┌┴──────▼─┠read n>0 bytes │ read │ │ - // └─┤ Reading ├───────────────►│ Framing │◄────────────────────────┘ │ │ - // └──┬─▲────┘ └─────┬──┬┘ │ │ - // │ │ │ │ `decode` returns Err │ │ - // │ └───decode` returns `None`──┘ └───────────────────────────────────────────────────────┘ │ - // │ read returns Err │ - // └────────────────────────────────────────────────────────────────────────────────────────────┘ - loop { - // Return `None` if we have encountered an error from the underlying decoder - // See: https://github.com/tokio-rs/tokio/issues/3976 - if state.has_errored { - // preparing has_errored -> paused - trace!("Returning None and setting paused"); - state.is_readable = false; - state.has_errored = false; - return Poll::Ready(None); - } - - // Repeatedly call `decode` or `decode_eof` while the buffer is "readable", - // i.e. it _might_ contain data consumable as a frame or closing frame. - // Both signal that there is no such data by returning `None`. - // - // If `decode` couldn't read a frame and the upstream source has returned eof, - // `decode_eof` will attempt to decode the remaining bytes as closing frames. - // - // If the underlying AsyncRead is resumable, we may continue after an EOF, - // but must finish emitting all of it's associated `decode_eof` frames. - // Furthermore, we don't want to emit any `decode_eof` frames on retried - // reads after an EOF unless we've actually read more data. - if state.is_readable { - // pausing or framing - if state.eof { - // pausing - let frame = pinned.codec.decode_eof(&mut state.buffer).map_err(|err| { - trace!("Got an error, going to errored state"); - state.has_errored = true; - err - })?; - if frame.is_none() { - state.is_readable = false; // prepare pausing -> paused - } - // implicit pausing -> pausing or pausing -> paused - return Poll::Ready(frame.map(Ok)); - } - - // framing - trace!("attempting to decode a frame"); - - if let Some(frame) = pinned.codec.decode(&mut state.buffer).map_err(|op| { - trace!("Got an error, going to errored state"); - state.has_errored = true; - op - })? { - trace!("frame decoded from buffer"); - // implicit framing -> framing - return Poll::Ready(Some(Ok(frame))); - } - - // framing -> reading - state.is_readable = false; - } - // reading or paused - // If we can't build a frame yet, try to read more data and try again. - // Make sure we've got room for at least one byte to read to ensure - // that we don't get a spurious 0 that looks like EOF. - state.buffer.reserve(1); - let bytect = match poll_read_buf(pinned.inner.as_mut(), cx, &mut state.buffer).map_err( - |err| { - trace!("Got an error, going to errored state"); - state.has_errored = true; - err - }, - )? { - Poll::Ready(ct) => ct, - // implicit reading -> reading or implicit paused -> paused - Poll::Pending => return Poll::Pending, - }; - if bytect == 0 { - if state.eof { - // We're already at an EOF, and since we've reached this path - // we're also not readable. This implies that we've already finished - // our `decode_eof` handling, so we can simply return `None`. - // implicit paused -> paused - return Poll::Ready(None); - } - // prepare reading -> paused - state.eof = true; - } else { - // prepare paused -> framing or noop reading -> framing - state.eof = false; - } - - // paused -> framing or reading -> framing or reading -> pausing - state.is_readable = true; - } - } -} - -impl Sink for FramedImpl -where - T: AsyncWrite, - U: Encoder, - U::Error: From, - W: BorrowMut, -{ - type Error = U::Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if self.state.borrow().buffer.len() >= self.state.borrow().backpressure_boundary { - self.as_mut().poll_flush(cx) - } else { - Poll::Ready(Ok(())) - } - } - - fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { - let pinned = self.project(); - pinned - .codec - .encode(item, &mut pinned.state.borrow_mut().buffer)?; - Ok(()) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - use crate::util::poll_write_buf; - trace!("flushing framed transport"); - let mut pinned = self.project(); - - while !pinned.state.borrow_mut().buffer.is_empty() { - let WriteFrame { buffer, .. } = pinned.state.borrow_mut(); - trace!(remaining = buffer.len(), "writing;"); - - let n = ready!(poll_write_buf(pinned.inner.as_mut(), cx, buffer))?; - - if n == 0 { - return Poll::Ready(Err(io::Error::new( - io::ErrorKind::WriteZero, - "failed to \ - write frame to transport", - ) - .into())); - } - } - - // Try flushing the underlying IO - ready!(pinned.inner.poll_flush(cx))?; - - trace!("framed transport flushed"); - Poll::Ready(Ok(())) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().poll_flush(cx))?; - ready!(self.project().inner.poll_shutdown(cx))?; - - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/framed_read.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/framed_read.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/framed_read.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/framed_read.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,199 +0,0 @@ -use crate::codec::framed_impl::{FramedImpl, ReadFrame}; -use crate::codec::Decoder; - -use futures_core::Stream; -use tokio::io::AsyncRead; - -use bytes::BytesMut; -use futures_sink::Sink; -use pin_project_lite::pin_project; -use std::fmt; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A [`Stream`] of messages decoded from an [`AsyncRead`]. - /// - /// [`Stream`]: futures_core::Stream - /// [`AsyncRead`]: tokio::io::AsyncRead - pub struct FramedRead { - #[pin] - inner: FramedImpl, - } -} - -// ===== impl FramedRead ===== - -impl FramedRead -where - T: AsyncRead, - D: Decoder, -{ - /// Creates a new `FramedRead` with the given `decoder`. - pub fn new(inner: T, decoder: D) -> FramedRead { - FramedRead { - inner: FramedImpl { - inner, - codec: decoder, - state: Default::default(), - }, - } - } - - /// Creates a new `FramedRead` with the given `decoder` and a buffer of `capacity` - /// initial size. - pub fn with_capacity(inner: T, decoder: D, capacity: usize) -> FramedRead { - FramedRead { - inner: FramedImpl { - inner, - codec: decoder, - state: ReadFrame { - eof: false, - is_readable: false, - buffer: BytesMut::with_capacity(capacity), - has_errored: false, - }, - }, - } - } -} - -impl FramedRead { - /// Returns a reference to the underlying I/O stream wrapped by - /// `FramedRead`. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn get_ref(&self) -> &T { - &self.inner.inner - } - - /// Returns a mutable reference to the underlying I/O stream wrapped by - /// `FramedRead`. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner.inner - } - - /// Returns a pinned mutable reference to the underlying I/O stream wrapped by - /// `FramedRead`. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { - self.project().inner.project().inner - } - - /// Consumes the `FramedRead`, returning its underlying I/O stream. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn into_inner(self) -> T { - self.inner.inner - } - - /// Returns a reference to the underlying decoder. - pub fn decoder(&self) -> &D { - &self.inner.codec - } - - /// Returns a mutable reference to the underlying decoder. - pub fn decoder_mut(&mut self) -> &mut D { - &mut self.inner.codec - } - - /// Maps the decoder `D` to `C`, preserving the read buffer - /// wrapped by `Framed`. - pub fn map_decoder(self, map: F) -> FramedRead - where - F: FnOnce(D) -> C, - { - // This could be potentially simplified once rust-lang/rust#86555 hits stable - let FramedImpl { - inner, - state, - codec, - } = self.inner; - FramedRead { - inner: FramedImpl { - inner, - state, - codec: map(codec), - }, - } - } - - /// Returns a mutable reference to the underlying decoder. - pub fn decoder_pin_mut(self: Pin<&mut Self>) -> &mut D { - self.project().inner.project().codec - } - - /// Returns a reference to the read buffer. - pub fn read_buffer(&self) -> &BytesMut { - &self.inner.state.buffer - } - - /// Returns a mutable reference to the read buffer. - pub fn read_buffer_mut(&mut self) -> &mut BytesMut { - &mut self.inner.state.buffer - } -} - -// This impl just defers to the underlying FramedImpl -impl Stream for FramedRead -where - T: AsyncRead, - D: Decoder, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_next(cx) - } -} - -// This impl just defers to the underlying T: Sink -impl Sink for FramedRead -where - T: Sink, -{ - type Error = T::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.project().inner.poll_ready(cx) - } - - fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { - self.project().inner.project().inner.start_send(item) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.project().inner.poll_flush(cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.project().inner.poll_close(cx) - } -} - -impl fmt::Debug for FramedRead -where - T: fmt::Debug, - D: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FramedRead") - .field("inner", &self.get_ref()) - .field("decoder", &self.decoder()) - .field("eof", &self.inner.state.eof) - .field("is_readable", &self.inner.state.is_readable) - .field("buffer", &self.read_buffer()) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/framed.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/framed.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/framed.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/framed.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,383 +0,0 @@ -use crate::codec::decoder::Decoder; -use crate::codec::encoder::Encoder; -use crate::codec::framed_impl::{FramedImpl, RWFrames, ReadFrame, WriteFrame}; - -use futures_core::Stream; -use tokio::io::{AsyncRead, AsyncWrite}; - -use bytes::BytesMut; -use futures_sink::Sink; -use pin_project_lite::pin_project; -use std::fmt; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A unified [`Stream`] and [`Sink`] interface to an underlying I/O object, using - /// the `Encoder` and `Decoder` traits to encode and decode frames. - /// - /// You can create a `Framed` instance by using the [`Decoder::framed`] adapter, or - /// by using the `new` function seen below. - /// - /// [`Stream`]: futures_core::Stream - /// [`Sink`]: futures_sink::Sink - /// [`AsyncRead`]: tokio::io::AsyncRead - /// [`Decoder::framed`]: crate::codec::Decoder::framed() - pub struct Framed { - #[pin] - inner: FramedImpl - } -} - -impl Framed -where - T: AsyncRead + AsyncWrite, -{ - /// Provides a [`Stream`] and [`Sink`] interface for reading and writing to this - /// I/O object, using [`Decoder`] and [`Encoder`] to read and write the raw data. - /// - /// Raw I/O objects work with byte sequences, but higher-level code usually - /// wants to batch these into meaningful chunks, called "frames". This - /// method layers framing on top of an I/O object, by using the codec - /// traits to handle encoding and decoding of messages frames. Note that - /// the incoming and outgoing frame types may be distinct. - /// - /// This function returns a *single* object that is both [`Stream`] and - /// [`Sink`]; grouping this into a single object is often useful for layering - /// things like gzip or TLS, which require both read and write access to the - /// underlying object. - /// - /// If you want to work more directly with the streams and sink, consider - /// calling [`split`] on the `Framed` returned by this method, which will - /// break them into separate objects, allowing them to interact more easily. - /// - /// Note that, for some byte sources, the stream can be resumed after an EOF - /// by reading from it, even after it has returned `None`. Repeated attempts - /// to do so, without new data available, continue to return `None` without - /// creating more (closing) frames. - /// - /// [`Stream`]: futures_core::Stream - /// [`Sink`]: futures_sink::Sink - /// [`Decode`]: crate::codec::Decoder - /// [`Encoder`]: crate::codec::Encoder - /// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split - pub fn new(inner: T, codec: U) -> Framed { - Framed { - inner: FramedImpl { - inner, - codec, - state: Default::default(), - }, - } - } - - /// Provides a [`Stream`] and [`Sink`] interface for reading and writing to this - /// I/O object, using [`Decoder`] and [`Encoder`] to read and write the raw data, - /// with a specific read buffer initial capacity. - /// - /// Raw I/O objects work with byte sequences, but higher-level code usually - /// wants to batch these into meaningful chunks, called "frames". This - /// method layers framing on top of an I/O object, by using the codec - /// traits to handle encoding and decoding of messages frames. Note that - /// the incoming and outgoing frame types may be distinct. - /// - /// This function returns a *single* object that is both [`Stream`] and - /// [`Sink`]; grouping this into a single object is often useful for layering - /// things like gzip or TLS, which require both read and write access to the - /// underlying object. - /// - /// If you want to work more directly with the streams and sink, consider - /// calling [`split`] on the `Framed` returned by this method, which will - /// break them into separate objects, allowing them to interact more easily. - /// - /// [`Stream`]: futures_core::Stream - /// [`Sink`]: futures_sink::Sink - /// [`Decode`]: crate::codec::Decoder - /// [`Encoder`]: crate::codec::Encoder - /// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split - pub fn with_capacity(inner: T, codec: U, capacity: usize) -> Framed { - Framed { - inner: FramedImpl { - inner, - codec, - state: RWFrames { - read: ReadFrame { - eof: false, - is_readable: false, - buffer: BytesMut::with_capacity(capacity), - has_errored: false, - }, - write: WriteFrame::default(), - }, - }, - } - } -} - -impl Framed { - /// Provides a [`Stream`] and [`Sink`] interface for reading and writing to this - /// I/O object, using [`Decoder`] and [`Encoder`] to read and write the raw data. - /// - /// Raw I/O objects work with byte sequences, but higher-level code usually - /// wants to batch these into meaningful chunks, called "frames". This - /// method layers framing on top of an I/O object, by using the `Codec` - /// traits to handle encoding and decoding of messages frames. Note that - /// the incoming and outgoing frame types may be distinct. - /// - /// This function returns a *single* object that is both [`Stream`] and - /// [`Sink`]; grouping this into a single object is often useful for layering - /// things like gzip or TLS, which require both read and write access to the - /// underlying object. - /// - /// This objects takes a stream and a readbuffer and a writebuffer. These field - /// can be obtained from an existing `Framed` with the [`into_parts`] method. - /// - /// If you want to work more directly with the streams and sink, consider - /// calling [`split`] on the `Framed` returned by this method, which will - /// break them into separate objects, allowing them to interact more easily. - /// - /// [`Stream`]: futures_core::Stream - /// [`Sink`]: futures_sink::Sink - /// [`Decoder`]: crate::codec::Decoder - /// [`Encoder`]: crate::codec::Encoder - /// [`into_parts`]: crate::codec::Framed::into_parts() - /// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split - pub fn from_parts(parts: FramedParts) -> Framed { - Framed { - inner: FramedImpl { - inner: parts.io, - codec: parts.codec, - state: RWFrames { - read: parts.read_buf.into(), - write: parts.write_buf.into(), - }, - }, - } - } - - /// Returns a reference to the underlying I/O stream wrapped by - /// `Framed`. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn get_ref(&self) -> &T { - &self.inner.inner - } - - /// Returns a mutable reference to the underlying I/O stream wrapped by - /// `Framed`. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner.inner - } - - /// Returns a pinned mutable reference to the underlying I/O stream wrapped by - /// `Framed`. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { - self.project().inner.project().inner - } - - /// Returns a reference to the underlying codec wrapped by - /// `Framed`. - /// - /// Note that care should be taken to not tamper with the underlying codec - /// as it may corrupt the stream of frames otherwise being worked with. - pub fn codec(&self) -> &U { - &self.inner.codec - } - - /// Returns a mutable reference to the underlying codec wrapped by - /// `Framed`. - /// - /// Note that care should be taken to not tamper with the underlying codec - /// as it may corrupt the stream of frames otherwise being worked with. - pub fn codec_mut(&mut self) -> &mut U { - &mut self.inner.codec - } - - /// Maps the codec `U` to `C`, preserving the read and write buffers - /// wrapped by `Framed`. - /// - /// Note that care should be taken to not tamper with the underlying codec - /// as it may corrupt the stream of frames otherwise being worked with. - pub fn map_codec(self, map: F) -> Framed - where - F: FnOnce(U) -> C, - { - // This could be potentially simplified once rust-lang/rust#86555 hits stable - let parts = self.into_parts(); - Framed::from_parts(FramedParts { - io: parts.io, - codec: map(parts.codec), - read_buf: parts.read_buf, - write_buf: parts.write_buf, - _priv: (), - }) - } - - /// Returns a mutable reference to the underlying codec wrapped by - /// `Framed`. - /// - /// Note that care should be taken to not tamper with the underlying codec - /// as it may corrupt the stream of frames otherwise being worked with. - pub fn codec_pin_mut(self: Pin<&mut Self>) -> &mut U { - self.project().inner.project().codec - } - - /// Returns a reference to the read buffer. - pub fn read_buffer(&self) -> &BytesMut { - &self.inner.state.read.buffer - } - - /// Returns a mutable reference to the read buffer. - pub fn read_buffer_mut(&mut self) -> &mut BytesMut { - &mut self.inner.state.read.buffer - } - - /// Returns a reference to the write buffer. - pub fn write_buffer(&self) -> &BytesMut { - &self.inner.state.write.buffer - } - - /// Returns a mutable reference to the write buffer. - pub fn write_buffer_mut(&mut self) -> &mut BytesMut { - &mut self.inner.state.write.buffer - } - - /// Returns backpressure boundary - pub fn backpressure_boundary(&self) -> usize { - self.inner.state.write.backpressure_boundary - } - - /// Updates backpressure boundary - pub fn set_backpressure_boundary(&mut self, boundary: usize) { - self.inner.state.write.backpressure_boundary = boundary; - } - - /// Consumes the `Framed`, returning its underlying I/O stream. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn into_inner(self) -> T { - self.inner.inner - } - - /// Consumes the `Framed`, returning its underlying I/O stream, the buffer - /// with unprocessed data, and the codec. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn into_parts(self) -> FramedParts { - FramedParts { - io: self.inner.inner, - codec: self.inner.codec, - read_buf: self.inner.state.read.buffer, - write_buf: self.inner.state.write.buffer, - _priv: (), - } - } -} - -// This impl just defers to the underlying FramedImpl -impl Stream for Framed -where - T: AsyncRead, - U: Decoder, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_next(cx) - } -} - -// This impl just defers to the underlying FramedImpl -impl Sink for Framed -where - T: AsyncWrite, - U: Encoder, - U::Error: From, -{ - type Error = U::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_ready(cx) - } - - fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { - self.project().inner.start_send(item) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_flush(cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_close(cx) - } -} - -impl fmt::Debug for Framed -where - T: fmt::Debug, - U: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Framed") - .field("io", self.get_ref()) - .field("codec", self.codec()) - .finish() - } -} - -/// `FramedParts` contains an export of the data of a Framed transport. -/// It can be used to construct a new [`Framed`] with a different codec. -/// It contains all current buffers and the inner transport. -/// -/// [`Framed`]: crate::codec::Framed -#[derive(Debug)] -#[allow(clippy::manual_non_exhaustive)] -pub struct FramedParts { - /// The inner transport used to read bytes to and write bytes to - pub io: T, - - /// The codec - pub codec: U, - - /// The buffer with read but unprocessed data. - pub read_buf: BytesMut, - - /// A buffer with unprocessed data which are not written yet. - pub write_buf: BytesMut, - - /// This private field allows us to add additional fields in the future in a - /// backwards compatible way. - _priv: (), -} - -impl FramedParts { - /// Create a new, default, `FramedParts` - pub fn new(io: T, codec: U) -> FramedParts - where - U: Encoder, - { - FramedParts { - io, - codec, - read_buf: BytesMut::new(), - write_buf: BytesMut::new(), - _priv: (), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/framed_write.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/framed_write.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/framed_write.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/framed_write.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,188 +0,0 @@ -use crate::codec::encoder::Encoder; -use crate::codec::framed_impl::{FramedImpl, WriteFrame}; - -use futures_core::Stream; -use tokio::io::AsyncWrite; - -use bytes::BytesMut; -use futures_sink::Sink; -use pin_project_lite::pin_project; -use std::fmt; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A [`Sink`] of frames encoded to an `AsyncWrite`. - /// - /// [`Sink`]: futures_sink::Sink - pub struct FramedWrite { - #[pin] - inner: FramedImpl, - } -} - -impl FramedWrite -where - T: AsyncWrite, -{ - /// Creates a new `FramedWrite` with the given `encoder`. - pub fn new(inner: T, encoder: E) -> FramedWrite { - FramedWrite { - inner: FramedImpl { - inner, - codec: encoder, - state: WriteFrame::default(), - }, - } - } -} - -impl FramedWrite { - /// Returns a reference to the underlying I/O stream wrapped by - /// `FramedWrite`. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn get_ref(&self) -> &T { - &self.inner.inner - } - - /// Returns a mutable reference to the underlying I/O stream wrapped by - /// `FramedWrite`. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner.inner - } - - /// Returns a pinned mutable reference to the underlying I/O stream wrapped by - /// `FramedWrite`. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { - self.project().inner.project().inner - } - - /// Consumes the `FramedWrite`, returning its underlying I/O stream. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise - /// being worked with. - pub fn into_inner(self) -> T { - self.inner.inner - } - - /// Returns a reference to the underlying encoder. - pub fn encoder(&self) -> &E { - &self.inner.codec - } - - /// Returns a mutable reference to the underlying encoder. - pub fn encoder_mut(&mut self) -> &mut E { - &mut self.inner.codec - } - - /// Maps the encoder `E` to `C`, preserving the write buffer - /// wrapped by `Framed`. - pub fn map_encoder(self, map: F) -> FramedWrite - where - F: FnOnce(E) -> C, - { - // This could be potentially simplified once rust-lang/rust#86555 hits stable - let FramedImpl { - inner, - state, - codec, - } = self.inner; - FramedWrite { - inner: FramedImpl { - inner, - state, - codec: map(codec), - }, - } - } - - /// Returns a mutable reference to the underlying encoder. - pub fn encoder_pin_mut(self: Pin<&mut Self>) -> &mut E { - self.project().inner.project().codec - } - - /// Returns a reference to the write buffer. - pub fn write_buffer(&self) -> &BytesMut { - &self.inner.state.buffer - } - - /// Returns a mutable reference to the write buffer. - pub fn write_buffer_mut(&mut self) -> &mut BytesMut { - &mut self.inner.state.buffer - } - - /// Returns backpressure boundary - pub fn backpressure_boundary(&self) -> usize { - self.inner.state.backpressure_boundary - } - - /// Updates backpressure boundary - pub fn set_backpressure_boundary(&mut self, boundary: usize) { - self.inner.state.backpressure_boundary = boundary; - } -} - -// This impl just defers to the underlying FramedImpl -impl Sink for FramedWrite -where - T: AsyncWrite, - E: Encoder, - E::Error: From, -{ - type Error = E::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_ready(cx) - } - - fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { - self.project().inner.start_send(item) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_flush(cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_close(cx) - } -} - -// This impl just defers to the underlying T: Stream -impl Stream for FramedWrite -where - T: Stream, -{ - type Item = T::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.project().inner.poll_next(cx) - } -} - -impl fmt::Debug for FramedWrite -where - T: fmt::Debug, - U: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FramedWrite") - .field("inner", &self.get_ref()) - .field("encoder", &self.encoder()) - .field("buffer", &self.inner.state.buffer) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/length_delimited.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/length_delimited.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/length_delimited.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/length_delimited.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1043 +0,0 @@ -//! Frame a stream of bytes based on a length prefix -//! -//! Many protocols delimit their frames by prefacing frame data with a -//! frame head that specifies the length of the frame. The -//! `length_delimited` module provides utilities for handling the length -//! based framing. This allows the consumer to work with entire frames -//! without having to worry about buffering or other framing logic. -//! -//! # Getting started -//! -//! If implementing a protocol from scratch, using length delimited framing -//! is an easy way to get started. [`LengthDelimitedCodec::new()`] will -//! return a length delimited codec using default configuration values. -//! This can then be used to construct a framer to adapt a full-duplex -//! byte stream into a stream of frames. -//! -//! ``` -//! use tokio::io::{AsyncRead, AsyncWrite}; -//! use tokio_util::codec::{Framed, LengthDelimitedCodec}; -//! -//! fn bind_transport(io: T) -//! -> Framed -//! { -//! Framed::new(io, LengthDelimitedCodec::new()) -//! } -//! # pub fn main() {} -//! ``` -//! -//! The returned transport implements `Sink + Stream` for `BytesMut`. It -//! encodes the frame with a big-endian `u32` header denoting the frame -//! payload length: -//! -//! ```text -//! +----------+--------------------------------+ -//! | len: u32 | frame payload | -//! +----------+--------------------------------+ -//! ``` -//! -//! Specifically, given the following: -//! -//! ``` -//! use tokio::io::{AsyncRead, AsyncWrite}; -//! use tokio_util::codec::{Framed, LengthDelimitedCodec}; -//! -//! use futures::SinkExt; -//! use bytes::Bytes; -//! -//! async fn write_frame(io: T) -> Result<(), Box> -//! where -//! T: AsyncRead + AsyncWrite + Unpin, -//! { -//! let mut transport = Framed::new(io, LengthDelimitedCodec::new()); -//! let frame = Bytes::from("hello world"); -//! -//! transport.send(frame).await?; -//! Ok(()) -//! } -//! ``` -//! -//! The encoded frame will look like this: -//! -//! ```text -//! +---- len: u32 ----+---- data ----+ -//! | \x00\x00\x00\x0b | hello world | -//! +------------------+--------------+ -//! ``` -//! -//! # Decoding -//! -//! [`FramedRead`] adapts an [`AsyncRead`] into a `Stream` of [`BytesMut`], -//! such that each yielded [`BytesMut`] value contains the contents of an -//! entire frame. There are many configuration parameters enabling -//! [`FramedRead`] to handle a wide range of protocols. Here are some -//! examples that will cover the various options at a high level. -//! -//! ## Example 1 -//! -//! The following will parse a `u16` length field at offset 0, including the -//! frame head in the yielded `BytesMut`. -//! -//! ``` -//! # use tokio::io::AsyncRead; -//! # use tokio_util::codec::LengthDelimitedCodec; -//! # fn bind_read(io: T) { -//! LengthDelimitedCodec::builder() -//! .length_field_offset(0) // default value -//! .length_field_type::() -//! .length_adjustment(0) // default value -//! .num_skip(0) // Do not strip frame header -//! .new_read(io); -//! # } -//! # pub fn main() {} -//! ``` -//! -//! The following frame will be decoded as such: -//! -//! ```text -//! INPUT DECODED -//! +-- len ---+--- Payload ---+ +-- len ---+--- Payload ---+ -//! | \x00\x0B | Hello world | --> | \x00\x0B | Hello world | -//! +----------+---------------+ +----------+---------------+ -//! ``` -//! -//! The value of the length field is 11 (`\x0B`) which represents the length -//! of the payload, `hello world`. By default, [`FramedRead`] assumes that -//! the length field represents the number of bytes that **follows** the -//! length field. Thus, the entire frame has a length of 13: 2 bytes for the -//! frame head + 11 bytes for the payload. -//! -//! ## Example 2 -//! -//! The following will parse a `u16` length field at offset 0, omitting the -//! frame head in the yielded `BytesMut`. -//! -//! ``` -//! # use tokio::io::AsyncRead; -//! # use tokio_util::codec::LengthDelimitedCodec; -//! # fn bind_read(io: T) { -//! LengthDelimitedCodec::builder() -//! .length_field_offset(0) // default value -//! .length_field_type::() -//! .length_adjustment(0) // default value -//! // `num_skip` is not needed, the default is to skip -//! .new_read(io); -//! # } -//! # pub fn main() {} -//! ``` -//! -//! The following frame will be decoded as such: -//! -//! ```text -//! INPUT DECODED -//! +-- len ---+--- Payload ---+ +--- Payload ---+ -//! | \x00\x0B | Hello world | --> | Hello world | -//! +----------+---------------+ +---------------+ -//! ``` -//! -//! This is similar to the first example, the only difference is that the -//! frame head is **not** included in the yielded `BytesMut` value. -//! -//! ## Example 3 -//! -//! The following will parse a `u16` length field at offset 0, including the -//! frame head in the yielded `BytesMut`. In this case, the length field -//! **includes** the frame head length. -//! -//! ``` -//! # use tokio::io::AsyncRead; -//! # use tokio_util::codec::LengthDelimitedCodec; -//! # fn bind_read(io: T) { -//! LengthDelimitedCodec::builder() -//! .length_field_offset(0) // default value -//! .length_field_type::() -//! .length_adjustment(-2) // size of head -//! .num_skip(0) -//! .new_read(io); -//! # } -//! # pub fn main() {} -//! ``` -//! -//! The following frame will be decoded as such: -//! -//! ```text -//! INPUT DECODED -//! +-- len ---+--- Payload ---+ +-- len ---+--- Payload ---+ -//! | \x00\x0D | Hello world | --> | \x00\x0D | Hello world | -//! +----------+---------------+ +----------+---------------+ -//! ``` -//! -//! In most cases, the length field represents the length of the payload -//! only, as shown in the previous examples. However, in some protocols the -//! length field represents the length of the whole frame, including the -//! head. In such cases, we specify a negative `length_adjustment` to adjust -//! the value provided in the frame head to represent the payload length. -//! -//! ## Example 4 -//! -//! The following will parse a 3 byte length field at offset 0 in a 5 byte -//! frame head, including the frame head in the yielded `BytesMut`. -//! -//! ``` -//! # use tokio::io::AsyncRead; -//! # use tokio_util::codec::LengthDelimitedCodec; -//! # fn bind_read(io: T) { -//! LengthDelimitedCodec::builder() -//! .length_field_offset(0) // default value -//! .length_field_length(3) -//! .length_adjustment(2) // remaining head -//! .num_skip(0) -//! .new_read(io); -//! # } -//! # pub fn main() {} -//! ``` -//! -//! The following frame will be decoded as such: -//! -//! ```text -//! INPUT -//! +---- len -----+- head -+--- Payload ---+ -//! | \x00\x00\x0B | \xCAFE | Hello world | -//! +--------------+--------+---------------+ -//! -//! DECODED -//! +---- len -----+- head -+--- Payload ---+ -//! | \x00\x00\x0B | \xCAFE | Hello world | -//! +--------------+--------+---------------+ -//! ``` -//! -//! A more advanced example that shows a case where there is extra frame -//! head data between the length field and the payload. In such cases, it is -//! usually desirable to include the frame head as part of the yielded -//! `BytesMut`. This lets consumers of the length delimited framer to -//! process the frame head as needed. -//! -//! The positive `length_adjustment` value lets `FramedRead` factor in the -//! additional head into the frame length calculation. -//! -//! ## Example 5 -//! -//! The following will parse a `u16` length field at offset 1 of a 4 byte -//! frame head. The first byte and the length field will be omitted from the -//! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be -//! included. -//! -//! ``` -//! # use tokio::io::AsyncRead; -//! # use tokio_util::codec::LengthDelimitedCodec; -//! # fn bind_read(io: T) { -//! LengthDelimitedCodec::builder() -//! .length_field_offset(1) // length of hdr1 -//! .length_field_type::() -//! .length_adjustment(1) // length of hdr2 -//! .num_skip(3) // length of hdr1 + LEN -//! .new_read(io); -//! # } -//! # pub fn main() {} -//! ``` -//! -//! The following frame will be decoded as such: -//! -//! ```text -//! INPUT -//! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+ -//! | \xCA | \x00\x0B | \xFE | Hello world | -//! +--------+----------+--------+---------------+ -//! -//! DECODED -//! +- hdr2 -+--- Payload ---+ -//! | \xFE | Hello world | -//! +--------+---------------+ -//! ``` -//! -//! The length field is situated in the middle of the frame head. In this -//! case, the first byte in the frame head could be a version or some other -//! identifier that is not needed for processing. On the other hand, the -//! second half of the head is needed. -//! -//! `length_field_offset` indicates how many bytes to skip before starting -//! to read the length field. `length_adjustment` is the number of bytes to -//! skip starting at the end of the length field. In this case, it is the -//! second half of the head. -//! -//! ## Example 6 -//! -//! The following will parse a `u16` length field at offset 1 of a 4 byte -//! frame head. The first byte and the length field will be omitted from the -//! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be -//! included. In this case, the length field **includes** the frame head -//! length. -//! -//! ``` -//! # use tokio::io::AsyncRead; -//! # use tokio_util::codec::LengthDelimitedCodec; -//! # fn bind_read(io: T) { -//! LengthDelimitedCodec::builder() -//! .length_field_offset(1) // length of hdr1 -//! .length_field_type::() -//! .length_adjustment(-3) // length of hdr1 + LEN, negative -//! .num_skip(3) -//! .new_read(io); -//! # } -//! ``` -//! -//! The following frame will be decoded as such: -//! -//! ```text -//! INPUT -//! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+ -//! | \xCA | \x00\x0F | \xFE | Hello world | -//! +--------+----------+--------+---------------+ -//! -//! DECODED -//! +- hdr2 -+--- Payload ---+ -//! | \xFE | Hello world | -//! +--------+---------------+ -//! ``` -//! -//! Similar to the example above, the difference is that the length field -//! represents the length of the entire frame instead of just the payload. -//! The length of `hdr1` and `len` must be counted in `length_adjustment`. -//! Note that the length of `hdr2` does **not** need to be explicitly set -//! anywhere because it already is factored into the total frame length that -//! is read from the byte stream. -//! -//! ## Example 7 -//! -//! The following will parse a 3 byte length field at offset 0 in a 4 byte -//! frame head, excluding the 4th byte from the yielded `BytesMut`. -//! -//! ``` -//! # use tokio::io::AsyncRead; -//! # use tokio_util::codec::LengthDelimitedCodec; -//! # fn bind_read(io: T) { -//! LengthDelimitedCodec::builder() -//! .length_field_offset(0) // default value -//! .length_field_length(3) -//! .length_adjustment(0) // default value -//! .num_skip(4) // skip the first 4 bytes -//! .new_read(io); -//! # } -//! # pub fn main() {} -//! ``` -//! -//! The following frame will be decoded as such: -//! -//! ```text -//! INPUT DECODED -//! +------- len ------+--- Payload ---+ +--- Payload ---+ -//! | \x00\x00\x0B\xFF | Hello world | => | Hello world | -//! +------------------+---------------+ +---------------+ -//! ``` -//! -//! A simple example where there are unused bytes between the length field -//! and the payload. -//! -//! # Encoding -//! -//! [`FramedWrite`] adapts an [`AsyncWrite`] into a `Sink` of [`BytesMut`], -//! such that each submitted [`BytesMut`] is prefaced by a length field. -//! There are fewer configuration options than [`FramedRead`]. Given -//! protocols that have more complex frame heads, an encoder should probably -//! be written by hand using [`Encoder`]. -//! -//! Here is a simple example, given a `FramedWrite` with the following -//! configuration: -//! -//! ``` -//! # use tokio::io::AsyncWrite; -//! # use tokio_util::codec::LengthDelimitedCodec; -//! # fn write_frame(io: T) { -//! # let _ = -//! LengthDelimitedCodec::builder() -//! .length_field_type::() -//! .new_write(io); -//! # } -//! # pub fn main() {} -//! ``` -//! -//! A payload of `hello world` will be encoded as: -//! -//! ```text -//! +- len: u16 -+---- data ----+ -//! | \x00\x0b | hello world | -//! +------------+--------------+ -//! ``` -//! -//! [`LengthDelimitedCodec::new()`]: method@LengthDelimitedCodec::new -//! [`FramedRead`]: struct@FramedRead -//! [`FramedWrite`]: struct@FramedWrite -//! [`AsyncRead`]: trait@tokio::io::AsyncRead -//! [`AsyncWrite`]: trait@tokio::io::AsyncWrite -//! [`Encoder`]: trait@Encoder -//! [`BytesMut`]: bytes::BytesMut - -use crate::codec::{Decoder, Encoder, Framed, FramedRead, FramedWrite}; - -use tokio::io::{AsyncRead, AsyncWrite}; - -use bytes::{Buf, BufMut, Bytes, BytesMut}; -use std::error::Error as StdError; -use std::io::{self, Cursor}; -use std::{cmp, fmt, mem}; - -/// Configure length delimited `LengthDelimitedCodec`s. -/// -/// `Builder` enables constructing configured length delimited codecs. Note -/// that not all configuration settings apply to both encoding and decoding. See -/// the documentation for specific methods for more detail. -#[derive(Debug, Clone, Copy)] -pub struct Builder { - // Maximum frame length - max_frame_len: usize, - - // Number of bytes representing the field length - length_field_len: usize, - - // Number of bytes in the header before the length field - length_field_offset: usize, - - // Adjust the length specified in the header field by this amount - length_adjustment: isize, - - // Total number of bytes to skip before reading the payload, if not set, - // `length_field_len + length_field_offset` - num_skip: Option, - - // Length field byte order (little or big endian) - length_field_is_big_endian: bool, -} - -/// An error when the number of bytes read is more than max frame length. -pub struct LengthDelimitedCodecError { - _priv: (), -} - -/// A codec for frames delimited by a frame head specifying their lengths. -/// -/// This allows the consumer to work with entire frames without having to worry -/// about buffering or other framing logic. -/// -/// See [module level] documentation for more detail. -/// -/// [module level]: index.html -#[derive(Debug, Clone)] -pub struct LengthDelimitedCodec { - // Configuration values - builder: Builder, - - // Read state - state: DecodeState, -} - -#[derive(Debug, Clone, Copy)] -enum DecodeState { - Head, - Data(usize), -} - -// ===== impl LengthDelimitedCodec ====== - -impl LengthDelimitedCodec { - /// Creates a new `LengthDelimitedCodec` with the default configuration values. - pub fn new() -> Self { - Self { - builder: Builder::new(), - state: DecodeState::Head, - } - } - - /// Creates a new length delimited codec builder with default configuration - /// values. - pub fn builder() -> Builder { - Builder::new() - } - - /// Returns the current max frame setting - /// - /// This is the largest size this codec will accept from the wire. Larger - /// frames will be rejected. - pub fn max_frame_length(&self) -> usize { - self.builder.max_frame_len - } - - /// Updates the max frame setting. - /// - /// The change takes effect the next time a frame is decoded. In other - /// words, if a frame is currently in process of being decoded with a frame - /// size greater than `val` but less than the max frame length in effect - /// before calling this function, then the frame will be allowed. - pub fn set_max_frame_length(&mut self, val: usize) { - self.builder.max_frame_length(val); - } - - fn decode_head(&mut self, src: &mut BytesMut) -> io::Result> { - let head_len = self.builder.num_head_bytes(); - let field_len = self.builder.length_field_len; - - if src.len() < head_len { - // Not enough data - return Ok(None); - } - - let n = { - let mut src = Cursor::new(&mut *src); - - // Skip the required bytes - src.advance(self.builder.length_field_offset); - - // match endianness - let n = if self.builder.length_field_is_big_endian { - src.get_uint(field_len) - } else { - src.get_uint_le(field_len) - }; - - if n > self.builder.max_frame_len as u64 { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - LengthDelimitedCodecError { _priv: () }, - )); - } - - // The check above ensures there is no overflow - let n = n as usize; - - // Adjust `n` with bounds checking - let n = if self.builder.length_adjustment < 0 { - n.checked_sub(-self.builder.length_adjustment as usize) - } else { - n.checked_add(self.builder.length_adjustment as usize) - }; - - // Error handling - match n { - Some(n) => n, - None => { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "provided length would overflow after adjustment", - )); - } - } - }; - - src.advance(self.builder.get_num_skip()); - - // Ensure that the buffer has enough space to read the incoming - // payload - src.reserve(n.saturating_sub(src.len())); - - Ok(Some(n)) - } - - fn decode_data(&self, n: usize, src: &mut BytesMut) -> Option { - // At this point, the buffer has already had the required capacity - // reserved. All there is to do is read. - if src.len() < n { - return None; - } - - Some(src.split_to(n)) - } -} - -impl Decoder for LengthDelimitedCodec { - type Item = BytesMut; - type Error = io::Error; - - fn decode(&mut self, src: &mut BytesMut) -> io::Result> { - let n = match self.state { - DecodeState::Head => match self.decode_head(src)? { - Some(n) => { - self.state = DecodeState::Data(n); - n - } - None => return Ok(None), - }, - DecodeState::Data(n) => n, - }; - - match self.decode_data(n, src) { - Some(data) => { - // Update the decode state - self.state = DecodeState::Head; - - // Make sure the buffer has enough space to read the next head - src.reserve(self.builder.num_head_bytes().saturating_sub(src.len())); - - Ok(Some(data)) - } - None => Ok(None), - } - } -} - -impl Encoder for LengthDelimitedCodec { - type Error = io::Error; - - fn encode(&mut self, data: Bytes, dst: &mut BytesMut) -> Result<(), io::Error> { - let n = data.len(); - - if n > self.builder.max_frame_len { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - LengthDelimitedCodecError { _priv: () }, - )); - } - - // Adjust `n` with bounds checking - let n = if self.builder.length_adjustment < 0 { - n.checked_add(-self.builder.length_adjustment as usize) - } else { - n.checked_sub(self.builder.length_adjustment as usize) - }; - - let n = n.ok_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "provided length would overflow after adjustment", - ) - })?; - - // Reserve capacity in the destination buffer to fit the frame and - // length field (plus adjustment). - dst.reserve(self.builder.length_field_len + n); - - if self.builder.length_field_is_big_endian { - dst.put_uint(n as u64, self.builder.length_field_len); - } else { - dst.put_uint_le(n as u64, self.builder.length_field_len); - } - - // Write the frame to the buffer - dst.extend_from_slice(&data[..]); - - Ok(()) - } -} - -impl Default for LengthDelimitedCodec { - fn default() -> Self { - Self::new() - } -} - -// ===== impl Builder ===== - -mod builder { - /// Types that can be used with `Builder::length_field_type`. - pub trait LengthFieldType {} - - impl LengthFieldType for u8 {} - impl LengthFieldType for u16 {} - impl LengthFieldType for u32 {} - impl LengthFieldType for u64 {} - - #[cfg(any( - target_pointer_width = "8", - target_pointer_width = "16", - target_pointer_width = "32", - target_pointer_width = "64", - ))] - impl LengthFieldType for usize {} -} - -impl Builder { - /// Creates a new length delimited codec builder with default configuration - /// values. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncRead; - /// use tokio_util::codec::LengthDelimitedCodec; - /// - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .length_field_offset(0) - /// .length_field_type::() - /// .length_adjustment(0) - /// .num_skip(0) - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn new() -> Builder { - Builder { - // Default max frame length of 8MB - max_frame_len: 8 * 1_024 * 1_024, - - // Default byte length of 4 - length_field_len: 4, - - // Default to the header field being at the start of the header. - length_field_offset: 0, - - length_adjustment: 0, - - // Total number of bytes to skip before reading the payload, if not set, - // `length_field_len + length_field_offset` - num_skip: None, - - // Default to reading the length field in network (big) endian. - length_field_is_big_endian: true, - } - } - - /// Read the length field as a big endian integer - /// - /// This is the default setting. - /// - /// This configuration option applies to both encoding and decoding. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncRead; - /// use tokio_util::codec::LengthDelimitedCodec; - /// - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .big_endian() - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn big_endian(&mut self) -> &mut Self { - self.length_field_is_big_endian = true; - self - } - - /// Read the length field as a little endian integer - /// - /// The default setting is big endian. - /// - /// This configuration option applies to both encoding and decoding. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncRead; - /// use tokio_util::codec::LengthDelimitedCodec; - /// - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .little_endian() - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn little_endian(&mut self) -> &mut Self { - self.length_field_is_big_endian = false; - self - } - - /// Read the length field as a native endian integer - /// - /// The default setting is big endian. - /// - /// This configuration option applies to both encoding and decoding. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncRead; - /// use tokio_util::codec::LengthDelimitedCodec; - /// - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .native_endian() - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn native_endian(&mut self) -> &mut Self { - if cfg!(target_endian = "big") { - self.big_endian() - } else { - self.little_endian() - } - } - - /// Sets the max frame length in bytes - /// - /// This configuration option applies to both encoding and decoding. The - /// default value is 8MB. - /// - /// When decoding, the length field read from the byte stream is checked - /// against this setting **before** any adjustments are applied. When - /// encoding, the length of the submitted payload is checked against this - /// setting. - /// - /// When frames exceed the max length, an `io::Error` with the custom value - /// of the `LengthDelimitedCodecError` type will be returned. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncRead; - /// use tokio_util::codec::LengthDelimitedCodec; - /// - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .max_frame_length(8 * 1024 * 1024) - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn max_frame_length(&mut self, val: usize) -> &mut Self { - self.max_frame_len = val; - self - } - - /// Sets the unsigned integer type used to represent the length field. - /// - /// The default type is [`u32`]. The max type is [`u64`] (or [`usize`] on - /// 64-bit targets). - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncRead; - /// use tokio_util::codec::LengthDelimitedCodec; - /// - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .length_field_type::() - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - /// - /// Unlike [`Builder::length_field_length`], this does not fail at runtime - /// and instead produces a compile error: - /// - /// ```compile_fail - /// # use tokio::io::AsyncRead; - /// # use tokio_util::codec::LengthDelimitedCodec; - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .length_field_type::() - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn length_field_type(&mut self) -> &mut Self { - self.length_field_length(mem::size_of::()) - } - - /// Sets the number of bytes used to represent the length field - /// - /// The default value is `4`. The max value is `8`. - /// - /// This configuration option applies to both encoding and decoding. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncRead; - /// use tokio_util::codec::LengthDelimitedCodec; - /// - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .length_field_length(4) - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn length_field_length(&mut self, val: usize) -> &mut Self { - assert!(val > 0 && val <= 8, "invalid length field length"); - self.length_field_len = val; - self - } - - /// Sets the number of bytes in the header before the length field - /// - /// This configuration option only applies to decoding. - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncRead; - /// use tokio_util::codec::LengthDelimitedCodec; - /// - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .length_field_offset(1) - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn length_field_offset(&mut self, val: usize) -> &mut Self { - self.length_field_offset = val; - self - } - - /// Delta between the payload length specified in the header and the real - /// payload length - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncRead; - /// use tokio_util::codec::LengthDelimitedCodec; - /// - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .length_adjustment(-2) - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn length_adjustment(&mut self, val: isize) -> &mut Self { - self.length_adjustment = val; - self - } - - /// Sets the number of bytes to skip before reading the payload - /// - /// Default value is `length_field_len + length_field_offset` - /// - /// This configuration option only applies to decoding - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncRead; - /// use tokio_util::codec::LengthDelimitedCodec; - /// - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .num_skip(4) - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn num_skip(&mut self, val: usize) -> &mut Self { - self.num_skip = Some(val); - self - } - - /// Create a configured length delimited `LengthDelimitedCodec` - /// - /// # Examples - /// - /// ``` - /// use tokio_util::codec::LengthDelimitedCodec; - /// # pub fn main() { - /// LengthDelimitedCodec::builder() - /// .length_field_offset(0) - /// .length_field_type::() - /// .length_adjustment(0) - /// .num_skip(0) - /// .new_codec(); - /// # } - /// ``` - pub fn new_codec(&self) -> LengthDelimitedCodec { - LengthDelimitedCodec { - builder: *self, - state: DecodeState::Head, - } - } - - /// Create a configured length delimited `FramedRead` - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncRead; - /// use tokio_util::codec::LengthDelimitedCodec; - /// - /// # fn bind_read(io: T) { - /// LengthDelimitedCodec::builder() - /// .length_field_offset(0) - /// .length_field_type::() - /// .length_adjustment(0) - /// .num_skip(0) - /// .new_read(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn new_read(&self, upstream: T) -> FramedRead - where - T: AsyncRead, - { - FramedRead::new(upstream, self.new_codec()) - } - - /// Create a configured length delimited `FramedWrite` - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::AsyncWrite; - /// # use tokio_util::codec::LengthDelimitedCodec; - /// # fn write_frame(io: T) { - /// LengthDelimitedCodec::builder() - /// .length_field_type::() - /// .new_write(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn new_write(&self, inner: T) -> FramedWrite - where - T: AsyncWrite, - { - FramedWrite::new(inner, self.new_codec()) - } - - /// Create a configured length delimited `Framed` - /// - /// # Examples - /// - /// ``` - /// # use tokio::io::{AsyncRead, AsyncWrite}; - /// # use tokio_util::codec::LengthDelimitedCodec; - /// # fn write_frame(io: T) { - /// # let _ = - /// LengthDelimitedCodec::builder() - /// .length_field_type::() - /// .new_framed(io); - /// # } - /// # pub fn main() {} - /// ``` - pub fn new_framed(&self, inner: T) -> Framed - where - T: AsyncRead + AsyncWrite, - { - Framed::new(inner, self.new_codec()) - } - - fn num_head_bytes(&self) -> usize { - let num = self.length_field_offset + self.length_field_len; - cmp::max(num, self.num_skip.unwrap_or(0)) - } - - fn get_num_skip(&self) -> usize { - self.num_skip - .unwrap_or(self.length_field_offset + self.length_field_len) - } -} - -impl Default for Builder { - fn default() -> Self { - Self::new() - } -} - -// ===== impl LengthDelimitedCodecError ===== - -impl fmt::Debug for LengthDelimitedCodecError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("LengthDelimitedCodecError").finish() - } -} - -impl fmt::Display for LengthDelimitedCodecError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("frame size too big") - } -} - -impl StdError for LengthDelimitedCodecError {} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/lines_codec.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/lines_codec.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/lines_codec.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/lines_codec.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,232 +0,0 @@ -use crate::codec::decoder::Decoder; -use crate::codec::encoder::Encoder; - -use bytes::{Buf, BufMut, BytesMut}; -use std::{cmp, fmt, io, str, usize}; - -/// A simple [`Decoder`] and [`Encoder`] implementation that splits up data into lines. -/// -/// This uses the `\n` character as the line ending on all platforms. -/// -/// [`Decoder`]: crate::codec::Decoder -/// [`Encoder`]: crate::codec::Encoder -#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct LinesCodec { - // Stored index of the next index to examine for a `\n` character. - // This is used to optimize searching. - // For example, if `decode` was called with `abc`, it would hold `3`, - // because that is the next index to examine. - // The next time `decode` is called with `abcde\n`, the method will - // only look at `de\n` before returning. - next_index: usize, - - /// The maximum length for a given line. If `usize::MAX`, lines will be - /// read until a `\n` character is reached. - max_length: usize, - - /// Are we currently discarding the remainder of a line which was over - /// the length limit? - is_discarding: bool, -} - -impl LinesCodec { - /// Returns a `LinesCodec` for splitting up data into lines. - /// - /// # Note - /// - /// The returned `LinesCodec` will not have an upper bound on the length - /// of a buffered line. See the documentation for [`new_with_max_length`] - /// for information on why this could be a potential security risk. - /// - /// [`new_with_max_length`]: crate::codec::LinesCodec::new_with_max_length() - pub fn new() -> LinesCodec { - LinesCodec { - next_index: 0, - max_length: usize::MAX, - is_discarding: false, - } - } - - /// Returns a `LinesCodec` with a maximum line length limit. - /// - /// If this is set, calls to `LinesCodec::decode` will return a - /// [`LinesCodecError`] when a line exceeds the length limit. Subsequent calls - /// will discard up to `limit` bytes from that line until a newline - /// character is reached, returning `None` until the line over the limit - /// has been fully discarded. After that point, calls to `decode` will - /// function as normal. - /// - /// # Note - /// - /// Setting a length limit is highly recommended for any `LinesCodec` which - /// will be exposed to untrusted input. Otherwise, the size of the buffer - /// that holds the line currently being read is unbounded. An attacker could - /// exploit this unbounded buffer by sending an unbounded amount of input - /// without any `\n` characters, causing unbounded memory consumption. - /// - /// [`LinesCodecError`]: crate::codec::LinesCodecError - pub fn new_with_max_length(max_length: usize) -> Self { - LinesCodec { - max_length, - ..LinesCodec::new() - } - } - - /// Returns the maximum line length when decoding. - /// - /// ``` - /// use std::usize; - /// use tokio_util::codec::LinesCodec; - /// - /// let codec = LinesCodec::new(); - /// assert_eq!(codec.max_length(), usize::MAX); - /// ``` - /// ``` - /// use tokio_util::codec::LinesCodec; - /// - /// let codec = LinesCodec::new_with_max_length(256); - /// assert_eq!(codec.max_length(), 256); - /// ``` - pub fn max_length(&self) -> usize { - self.max_length - } -} - -fn utf8(buf: &[u8]) -> Result<&str, io::Error> { - str::from_utf8(buf) - .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "Unable to decode input as UTF8")) -} - -fn without_carriage_return(s: &[u8]) -> &[u8] { - if let Some(&b'\r') = s.last() { - &s[..s.len() - 1] - } else { - s - } -} - -impl Decoder for LinesCodec { - type Item = String; - type Error = LinesCodecError; - - fn decode(&mut self, buf: &mut BytesMut) -> Result, LinesCodecError> { - loop { - // Determine how far into the buffer we'll search for a newline. If - // there's no max_length set, we'll read to the end of the buffer. - let read_to = cmp::min(self.max_length.saturating_add(1), buf.len()); - - let newline_offset = buf[self.next_index..read_to] - .iter() - .position(|b| *b == b'\n'); - - match (self.is_discarding, newline_offset) { - (true, Some(offset)) => { - // If we found a newline, discard up to that offset and - // then stop discarding. On the next iteration, we'll try - // to read a line normally. - buf.advance(offset + self.next_index + 1); - self.is_discarding = false; - self.next_index = 0; - } - (true, None) => { - // Otherwise, we didn't find a newline, so we'll discard - // everything we read. On the next iteration, we'll continue - // discarding up to max_len bytes unless we find a newline. - buf.advance(read_to); - self.next_index = 0; - if buf.is_empty() { - return Ok(None); - } - } - (false, Some(offset)) => { - // Found a line! - let newline_index = offset + self.next_index; - self.next_index = 0; - let line = buf.split_to(newline_index + 1); - let line = &line[..line.len() - 1]; - let line = without_carriage_return(line); - let line = utf8(line)?; - return Ok(Some(line.to_string())); - } - (false, None) if buf.len() > self.max_length => { - // Reached the maximum length without finding a - // newline, return an error and start discarding on the - // next call. - self.is_discarding = true; - return Err(LinesCodecError::MaxLineLengthExceeded); - } - (false, None) => { - // We didn't find a line or reach the length limit, so the next - // call will resume searching at the current offset. - self.next_index = read_to; - return Ok(None); - } - } - } - } - - fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, LinesCodecError> { - Ok(match self.decode(buf)? { - Some(frame) => Some(frame), - None => { - // No terminating newline - return remaining data, if any - if buf.is_empty() || buf == &b"\r"[..] { - None - } else { - let line = buf.split_to(buf.len()); - let line = without_carriage_return(&line); - let line = utf8(line)?; - self.next_index = 0; - Some(line.to_string()) - } - } - }) - } -} - -impl Encoder for LinesCodec -where - T: AsRef, -{ - type Error = LinesCodecError; - - fn encode(&mut self, line: T, buf: &mut BytesMut) -> Result<(), LinesCodecError> { - let line = line.as_ref(); - buf.reserve(line.len() + 1); - buf.put(line.as_bytes()); - buf.put_u8(b'\n'); - Ok(()) - } -} - -impl Default for LinesCodec { - fn default() -> Self { - Self::new() - } -} - -/// An error occurred while encoding or decoding a line. -#[derive(Debug)] -pub enum LinesCodecError { - /// The maximum line length was exceeded. - MaxLineLengthExceeded, - /// An IO error occurred. - Io(io::Error), -} - -impl fmt::Display for LinesCodecError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - LinesCodecError::MaxLineLengthExceeded => write!(f, "max line length exceeded"), - LinesCodecError::Io(e) => write!(f, "{}", e), - } - } -} - -impl From for LinesCodecError { - fn from(e: io::Error) -> LinesCodecError { - LinesCodecError::Io(e) - } -} - -impl std::error::Error for LinesCodecError {} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/mod.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/codec/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/codec/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,290 +0,0 @@ -//! Adaptors from AsyncRead/AsyncWrite to Stream/Sink -//! -//! Raw I/O objects work with byte sequences, but higher-level code usually -//! wants to batch these into meaningful chunks, called "frames". -//! -//! This module contains adapters to go from streams of bytes, [`AsyncRead`] and -//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`]. -//! Framed streams are also known as transports. -//! -//! # The Decoder trait -//! -//! A [`Decoder`] is used together with [`FramedRead`] or [`Framed`] to turn an -//! [`AsyncRead`] into a [`Stream`]. The job of the decoder trait is to specify -//! how sequences of bytes are turned into a sequence of frames, and to -//! determine where the boundaries between frames are. The job of the -//! `FramedRead` is to repeatedly switch between reading more data from the IO -//! resource, and asking the decoder whether we have received enough data to -//! decode another frame of data. -//! -//! The main method on the `Decoder` trait is the [`decode`] method. This method -//! takes as argument the data that has been read so far, and when it is called, -//! it will be in one of the following situations: -//! -//! 1. The buffer contains less than a full frame. -//! 2. The buffer contains exactly a full frame. -//! 3. The buffer contains more than a full frame. -//! -//! In the first situation, the decoder should return `Ok(None)`. -//! -//! In the second situation, the decoder should clear the provided buffer and -//! return `Ok(Some(the_decoded_frame))`. -//! -//! In the third situation, the decoder should use a method such as [`split_to`] -//! or [`advance`] to modify the buffer such that the frame is removed from the -//! buffer, but any data in the buffer after that frame should still remain in -//! the buffer. The decoder should also return `Ok(Some(the_decoded_frame))` in -//! this case. -//! -//! Finally the decoder may return an error if the data is invalid in some way. -//! The decoder should _not_ return an error just because it has yet to receive -//! a full frame. -//! -//! It is guaranteed that, from one call to `decode` to another, the provided -//! buffer will contain the exact same data as before, except that if more data -//! has arrived through the IO resource, that data will have been appended to -//! the buffer. This means that reading frames from a `FramedRead` is -//! essentially equivalent to the following loop: -//! -//! ```no_run -//! use tokio::io::AsyncReadExt; -//! # // This uses async_stream to create an example that compiles. -//! # fn foo() -> impl futures_core::Stream> { async_stream::try_stream! { -//! # use tokio_util::codec::Decoder; -//! # let mut decoder = tokio_util::codec::BytesCodec::new(); -//! # let io_resource = &mut &[0u8, 1, 2, 3][..]; -//! -//! let mut buf = bytes::BytesMut::new(); -//! loop { -//! // The read_buf call will append to buf rather than overwrite existing data. -//! let len = io_resource.read_buf(&mut buf).await?; -//! -//! if len == 0 { -//! while let Some(frame) = decoder.decode_eof(&mut buf)? { -//! yield frame; -//! } -//! break; -//! } -//! -//! while let Some(frame) = decoder.decode(&mut buf)? { -//! yield frame; -//! } -//! } -//! # }} -//! ``` -//! The example above uses `yield` whenever the `Stream` produces an item. -//! -//! ## Example decoder -//! -//! As an example, consider a protocol that can be used to send strings where -//! each frame is a four byte integer that contains the length of the frame, -//! followed by that many bytes of string data. The decoder fails with an error -//! if the string data is not valid utf-8 or too long. -//! -//! Such a decoder can be written like this: -//! ``` -//! use tokio_util::codec::Decoder; -//! use bytes::{BytesMut, Buf}; -//! -//! struct MyStringDecoder {} -//! -//! const MAX: usize = 8 * 1024 * 1024; -//! -//! impl Decoder for MyStringDecoder { -//! type Item = String; -//! type Error = std::io::Error; -//! -//! fn decode( -//! &mut self, -//! src: &mut BytesMut -//! ) -> Result, Self::Error> { -//! if src.len() < 4 { -//! // Not enough data to read length marker. -//! return Ok(None); -//! } -//! -//! // Read length marker. -//! let mut length_bytes = [0u8; 4]; -//! length_bytes.copy_from_slice(&src[..4]); -//! let length = u32::from_le_bytes(length_bytes) as usize; -//! -//! // Check that the length is not too large to avoid a denial of -//! // service attack where the server runs out of memory. -//! if length > MAX { -//! return Err(std::io::Error::new( -//! std::io::ErrorKind::InvalidData, -//! format!("Frame of length {} is too large.", length) -//! )); -//! } -//! -//! if src.len() < 4 + length { -//! // The full string has not yet arrived. -//! // -//! // We reserve more space in the buffer. This is not strictly -//! // necessary, but is a good idea performance-wise. -//! src.reserve(4 + length - src.len()); -//! -//! // We inform the Framed that we need more bytes to form the next -//! // frame. -//! return Ok(None); -//! } -//! -//! // Use advance to modify src such that it no longer contains -//! // this frame. -//! let data = src[4..4 + length].to_vec(); -//! src.advance(4 + length); -//! -//! // Convert the data to a string, or fail if it is not valid utf-8. -//! match String::from_utf8(data) { -//! Ok(string) => Ok(Some(string)), -//! Err(utf8_error) => { -//! Err(std::io::Error::new( -//! std::io::ErrorKind::InvalidData, -//! utf8_error.utf8_error(), -//! )) -//! }, -//! } -//! } -//! } -//! ``` -//! -//! # The Encoder trait -//! -//! An [`Encoder`] is used together with [`FramedWrite`] or [`Framed`] to turn -//! an [`AsyncWrite`] into a [`Sink`]. The job of the encoder trait is to -//! specify how frames are turned into a sequences of bytes. The job of the -//! `FramedWrite` is to take the resulting sequence of bytes and write it to the -//! IO resource. -//! -//! The main method on the `Encoder` trait is the [`encode`] method. This method -//! takes an item that is being written, and a buffer to write the item to. The -//! buffer may already contain data, and in this case, the encoder should append -//! the new frame the to buffer rather than overwrite the existing data. -//! -//! It is guaranteed that, from one call to `encode` to another, the provided -//! buffer will contain the exact same data as before, except that some of the -//! data may have been removed from the front of the buffer. Writing to a -//! `FramedWrite` is essentially equivalent to the following loop: -//! -//! ```no_run -//! use tokio::io::AsyncWriteExt; -//! use bytes::Buf; // for advance -//! # use tokio_util::codec::Encoder; -//! # async fn next_frame() -> bytes::Bytes { bytes::Bytes::new() } -//! # async fn no_more_frames() { } -//! # #[tokio::main] async fn main() -> std::io::Result<()> { -//! # let mut io_resource = tokio::io::sink(); -//! # let mut encoder = tokio_util::codec::BytesCodec::new(); -//! -//! const MAX: usize = 8192; -//! -//! let mut buf = bytes::BytesMut::new(); -//! loop { -//! tokio::select! { -//! num_written = io_resource.write(&buf), if !buf.is_empty() => { -//! buf.advance(num_written?); -//! }, -//! frame = next_frame(), if buf.len() < MAX => { -//! encoder.encode(frame, &mut buf)?; -//! }, -//! _ = no_more_frames() => { -//! io_resource.write_all(&buf).await?; -//! io_resource.shutdown().await?; -//! return Ok(()); -//! }, -//! } -//! } -//! # } -//! ``` -//! Here the `next_frame` method corresponds to any frames you write to the -//! `FramedWrite`. The `no_more_frames` method corresponds to closing the -//! `FramedWrite` with [`SinkExt::close`]. -//! -//! ## Example encoder -//! -//! As an example, consider a protocol that can be used to send strings where -//! each frame is a four byte integer that contains the length of the frame, -//! followed by that many bytes of string data. The encoder will fail if the -//! string is too long. -//! -//! Such an encoder can be written like this: -//! ``` -//! use tokio_util::codec::Encoder; -//! use bytes::BytesMut; -//! -//! struct MyStringEncoder {} -//! -//! const MAX: usize = 8 * 1024 * 1024; -//! -//! impl Encoder for MyStringEncoder { -//! type Error = std::io::Error; -//! -//! fn encode(&mut self, item: String, dst: &mut BytesMut) -> Result<(), Self::Error> { -//! // Don't send a string if it is longer than the other end will -//! // accept. -//! if item.len() > MAX { -//! return Err(std::io::Error::new( -//! std::io::ErrorKind::InvalidData, -//! format!("Frame of length {} is too large.", item.len()) -//! )); -//! } -//! -//! // Convert the length into a byte array. -//! // The cast to u32 cannot overflow due to the length check above. -//! let len_slice = u32::to_le_bytes(item.len() as u32); -//! -//! // Reserve space in the buffer. -//! dst.reserve(4 + item.len()); -//! -//! // Write the length and string to the buffer. -//! dst.extend_from_slice(&len_slice); -//! dst.extend_from_slice(item.as_bytes()); -//! Ok(()) -//! } -//! } -//! ``` -//! -//! [`AsyncRead`]: tokio::io::AsyncRead -//! [`AsyncWrite`]: tokio::io::AsyncWrite -//! [`Stream`]: futures_core::Stream -//! [`Sink`]: futures_sink::Sink -//! [`SinkExt::close`]: https://docs.rs/futures/0.3/futures/sink/trait.SinkExt.html#method.close -//! [`FramedRead`]: struct@crate::codec::FramedRead -//! [`FramedWrite`]: struct@crate::codec::FramedWrite -//! [`Framed`]: struct@crate::codec::Framed -//! [`Decoder`]: trait@crate::codec::Decoder -//! [`decode`]: fn@crate::codec::Decoder::decode -//! [`encode`]: fn@crate::codec::Encoder::encode -//! [`split_to`]: fn@bytes::BytesMut::split_to -//! [`advance`]: fn@bytes::Buf::advance - -mod bytes_codec; -pub use self::bytes_codec::BytesCodec; - -mod decoder; -pub use self::decoder::Decoder; - -mod encoder; -pub use self::encoder::Encoder; - -mod framed_impl; -#[allow(unused_imports)] -pub(crate) use self::framed_impl::{FramedImpl, RWFrames, ReadFrame, WriteFrame}; - -mod framed; -pub use self::framed::{Framed, FramedParts}; - -mod framed_read; -pub use self::framed_read::FramedRead; - -mod framed_write; -pub use self::framed_write::FramedWrite; - -pub mod length_delimited; -pub use self::length_delimited::{LengthDelimitedCodec, LengthDelimitedCodecError}; - -mod lines_codec; -pub use self::lines_codec::{LinesCodec, LinesCodecError}; - -mod any_delimiter_codec; -pub use self::any_delimiter_codec::{AnyDelimiterCodec, AnyDelimiterCodecError}; diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/compat.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/compat.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/compat.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/compat.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,276 +0,0 @@ -//! Compatibility between the `tokio::io` and `futures-io` versions of the -//! `AsyncRead` and `AsyncWrite` traits. -use futures_core::ready; -use pin_project_lite::pin_project; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A compatibility layer that allows conversion between the - /// `tokio::io` and `futures-io` `AsyncRead` and `AsyncWrite` traits. - #[derive(Copy, Clone, Debug)] - pub struct Compat { - #[pin] - inner: T, - seek_pos: Option, - } -} - -/// Extension trait that allows converting a type implementing -/// `futures_io::AsyncRead` to implement `tokio::io::AsyncRead`. -pub trait FuturesAsyncReadCompatExt: futures_io::AsyncRead { - /// Wraps `self` with a compatibility layer that implements - /// `tokio_io::AsyncRead`. - fn compat(self) -> Compat - where - Self: Sized, - { - Compat::new(self) - } -} - -impl FuturesAsyncReadCompatExt for T {} - -/// Extension trait that allows converting a type implementing -/// `futures_io::AsyncWrite` to implement `tokio::io::AsyncWrite`. -pub trait FuturesAsyncWriteCompatExt: futures_io::AsyncWrite { - /// Wraps `self` with a compatibility layer that implements - /// `tokio::io::AsyncWrite`. - fn compat_write(self) -> Compat - where - Self: Sized, - { - Compat::new(self) - } -} - -impl FuturesAsyncWriteCompatExt for T {} - -/// Extension trait that allows converting a type implementing -/// `tokio::io::AsyncRead` to implement `futures_io::AsyncRead`. -pub trait TokioAsyncReadCompatExt: tokio::io::AsyncRead { - /// Wraps `self` with a compatibility layer that implements - /// `futures_io::AsyncRead`. - fn compat(self) -> Compat - where - Self: Sized, - { - Compat::new(self) - } -} - -impl TokioAsyncReadCompatExt for T {} - -/// Extension trait that allows converting a type implementing -/// `tokio::io::AsyncWrite` to implement `futures_io::AsyncWrite`. -pub trait TokioAsyncWriteCompatExt: tokio::io::AsyncWrite { - /// Wraps `self` with a compatibility layer that implements - /// `futures_io::AsyncWrite`. - fn compat_write(self) -> Compat - where - Self: Sized, - { - Compat::new(self) - } -} - -impl TokioAsyncWriteCompatExt for T {} - -// === impl Compat === - -impl Compat { - fn new(inner: T) -> Self { - Self { - inner, - seek_pos: None, - } - } - - /// Get a reference to the `Future`, `Stream`, `AsyncRead`, or `AsyncWrite` object - /// contained within. - pub fn get_ref(&self) -> &T { - &self.inner - } - - /// Get a mutable reference to the `Future`, `Stream`, `AsyncRead`, or `AsyncWrite` object - /// contained within. - pub fn get_mut(&mut self) -> &mut T { - &mut self.inner - } - - /// Returns the wrapped item. - pub fn into_inner(self) -> T { - self.inner - } -} - -impl tokio::io::AsyncRead for Compat -where - T: futures_io::AsyncRead, -{ - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> Poll> { - // We can't trust the inner type to not peak at the bytes, - // so we must defensively initialize the buffer. - let slice = buf.initialize_unfilled(); - let n = ready!(futures_io::AsyncRead::poll_read( - self.project().inner, - cx, - slice - ))?; - buf.advance(n); - Poll::Ready(Ok(())) - } -} - -impl futures_io::AsyncRead for Compat -where - T: tokio::io::AsyncRead, -{ - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - slice: &mut [u8], - ) -> Poll> { - let mut buf = tokio::io::ReadBuf::new(slice); - ready!(tokio::io::AsyncRead::poll_read( - self.project().inner, - cx, - &mut buf - ))?; - Poll::Ready(Ok(buf.filled().len())) - } -} - -impl tokio::io::AsyncBufRead for Compat -where - T: futures_io::AsyncBufRead, -{ - fn poll_fill_buf<'a>( - self: Pin<&'a mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - futures_io::AsyncBufRead::poll_fill_buf(self.project().inner, cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - futures_io::AsyncBufRead::consume(self.project().inner, amt) - } -} - -impl futures_io::AsyncBufRead for Compat -where - T: tokio::io::AsyncBufRead, -{ - fn poll_fill_buf<'a>( - self: Pin<&'a mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - tokio::io::AsyncBufRead::poll_fill_buf(self.project().inner, cx) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - tokio::io::AsyncBufRead::consume(self.project().inner, amt) - } -} - -impl tokio::io::AsyncWrite for Compat -where - T: futures_io::AsyncWrite, -{ - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - futures_io::AsyncWrite::poll_write(self.project().inner, cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - futures_io::AsyncWrite::poll_flush(self.project().inner, cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - futures_io::AsyncWrite::poll_close(self.project().inner, cx) - } -} - -impl futures_io::AsyncWrite for Compat -where - T: tokio::io::AsyncWrite, -{ - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - tokio::io::AsyncWrite::poll_flush(self.project().inner, cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx) - } -} - -impl futures_io::AsyncSeek for Compat { - fn poll_seek( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - pos: io::SeekFrom, - ) -> Poll> { - if self.seek_pos != Some(pos) { - // Ensure previous seeks have finished before starting a new one - ready!(self.as_mut().project().inner.poll_complete(cx))?; - self.as_mut().project().inner.start_seek(pos)?; - *self.as_mut().project().seek_pos = Some(pos); - } - let res = ready!(self.as_mut().project().inner.poll_complete(cx)); - *self.as_mut().project().seek_pos = None; - Poll::Ready(res) - } -} - -impl tokio::io::AsyncSeek for Compat { - fn start_seek(mut self: Pin<&mut Self>, pos: io::SeekFrom) -> io::Result<()> { - *self.as_mut().project().seek_pos = Some(pos); - Ok(()) - } - - fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let pos = match self.seek_pos { - None => { - // tokio 1.x AsyncSeek recommends calling poll_complete before start_seek. - // We don't have to guarantee that the value returned by - // poll_complete called without start_seek is correct, - // so we'll return 0. - return Poll::Ready(Ok(0)); - } - Some(pos) => pos, - }; - let res = ready!(self.as_mut().project().inner.poll_seek(cx, pos)); - *self.as_mut().project().seek_pos = None; - Poll::Ready(res) - } -} - -#[cfg(unix)] -impl std::os::unix::io::AsRawFd for Compat { - fn as_raw_fd(&self) -> std::os::unix::io::RawFd { - self.inner.as_raw_fd() - } -} - -#[cfg(windows)] -impl std::os::windows::io::AsRawHandle for Compat { - fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { - self.inner.as_raw_handle() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/context.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/context.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/context.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/context.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,190 +0,0 @@ -//! Tokio context aware futures utilities. -//! -//! This module includes utilities around integrating tokio with other runtimes -//! by allowing the context to be attached to futures. This allows spawning -//! futures on other executors while still using tokio to drive them. This -//! can be useful if you need to use a tokio based library in an executor/runtime -//! that does not provide a tokio context. - -use pin_project_lite::pin_project; -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, -}; -use tokio::runtime::{Handle, Runtime}; - -pin_project! { - /// `TokioContext` allows running futures that must be inside Tokio's - /// context on a non-Tokio runtime. - /// - /// It contains a [`Handle`] to the runtime. A handle to the runtime can be - /// obtain by calling the [`Runtime::handle()`] method. - /// - /// Note that the `TokioContext` wrapper only works if the `Runtime` it is - /// connected to has not yet been destroyed. You must keep the `Runtime` - /// alive until the future has finished executing. - /// - /// **Warning:** If `TokioContext` is used together with a [current thread] - /// runtime, that runtime must be inside a call to `block_on` for the - /// wrapped future to work. For this reason, it is recommended to use a - /// [multi thread] runtime, even if you configure it to only spawn one - /// worker thread. - /// - /// # Examples - /// - /// This example creates two runtimes, but only [enables time] on one of - /// them. It then uses the context of the runtime with the timer enabled to - /// execute a [`sleep`] future on the runtime with timing disabled. - /// ``` - /// use tokio::time::{sleep, Duration}; - /// use tokio_util::context::RuntimeExt; - /// - /// // This runtime has timers enabled. - /// let rt = tokio::runtime::Builder::new_multi_thread() - /// .enable_all() - /// .build() - /// .unwrap(); - /// - /// // This runtime has timers disabled. - /// let rt2 = tokio::runtime::Builder::new_multi_thread() - /// .build() - /// .unwrap(); - /// - /// // Wrap the sleep future in the context of rt. - /// let fut = rt.wrap(async { sleep(Duration::from_millis(2)).await }); - /// - /// // Execute the future on rt2. - /// rt2.block_on(fut); - /// ``` - /// - /// [`Handle`]: struct@tokio::runtime::Handle - /// [`Runtime::handle()`]: fn@tokio::runtime::Runtime::handle - /// [`RuntimeExt`]: trait@crate::context::RuntimeExt - /// [`new_static`]: fn@Self::new_static - /// [`sleep`]: fn@tokio::time::sleep - /// [current thread]: fn@tokio::runtime::Builder::new_current_thread - /// [enables time]: fn@tokio::runtime::Builder::enable_time - /// [multi thread]: fn@tokio::runtime::Builder::new_multi_thread - pub struct TokioContext { - #[pin] - inner: F, - handle: Handle, - } -} - -impl TokioContext { - /// Associate the provided future with the context of the runtime behind - /// the provided `Handle`. - /// - /// This constructor uses a `'static` lifetime to opt-out of checking that - /// the runtime still exists. - /// - /// # Examples - /// - /// This is the same as the example above, but uses the `new` constructor - /// rather than [`RuntimeExt::wrap`]. - /// - /// [`RuntimeExt::wrap`]: fn@RuntimeExt::wrap - /// - /// ``` - /// use tokio::time::{sleep, Duration}; - /// use tokio_util::context::TokioContext; - /// - /// // This runtime has timers enabled. - /// let rt = tokio::runtime::Builder::new_multi_thread() - /// .enable_all() - /// .build() - /// .unwrap(); - /// - /// // This runtime has timers disabled. - /// let rt2 = tokio::runtime::Builder::new_multi_thread() - /// .build() - /// .unwrap(); - /// - /// let fut = TokioContext::new( - /// async { sleep(Duration::from_millis(2)).await }, - /// rt.handle().clone(), - /// ); - /// - /// // Execute the future on rt2. - /// rt2.block_on(fut); - /// ``` - pub fn new(future: F, handle: Handle) -> TokioContext { - TokioContext { - inner: future, - handle, - } - } - - /// Obtain a reference to the handle inside this `TokioContext`. - pub fn handle(&self) -> &Handle { - &self.handle - } - - /// Remove the association between the Tokio runtime and the wrapped future. - pub fn into_inner(self) -> F { - self.inner - } -} - -impl Future for TokioContext { - type Output = F::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let me = self.project(); - let handle = me.handle; - let fut = me.inner; - - let _enter = handle.enter(); - fut.poll(cx) - } -} - -/// Extension trait that simplifies bundling a `Handle` with a `Future`. -pub trait RuntimeExt { - /// Create a [`TokioContext`] that wraps the provided future and runs it in - /// this runtime's context. - /// - /// # Examples - /// - /// This example creates two runtimes, but only [enables time] on one of - /// them. It then uses the context of the runtime with the timer enabled to - /// execute a [`sleep`] future on the runtime with timing disabled. - /// - /// ``` - /// use tokio::time::{sleep, Duration}; - /// use tokio_util::context::RuntimeExt; - /// - /// // This runtime has timers enabled. - /// let rt = tokio::runtime::Builder::new_multi_thread() - /// .enable_all() - /// .build() - /// .unwrap(); - /// - /// // This runtime has timers disabled. - /// let rt2 = tokio::runtime::Builder::new_multi_thread() - /// .build() - /// .unwrap(); - /// - /// // Wrap the sleep future in the context of rt. - /// let fut = rt.wrap(async { sleep(Duration::from_millis(2)).await }); - /// - /// // Execute the future on rt2. - /// rt2.block_on(fut); - /// ``` - /// - /// [`TokioContext`]: struct@crate::context::TokioContext - /// [`sleep`]: fn@tokio::time::sleep - /// [enables time]: fn@tokio::runtime::Builder::enable_time - fn wrap(&self, fut: F) -> TokioContext; -} - -impl RuntimeExt for Runtime { - fn wrap(&self, fut: F) -> TokioContext { - TokioContext { - inner: fut, - handle: self.handle().clone(), - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/either.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/either.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/either.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/either.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,188 +0,0 @@ -//! Module defining an Either type. -use std::{ - future::Future, - io::SeekFrom, - pin::Pin, - task::{Context, Poll}, -}; -use tokio::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, ReadBuf, Result}; - -/// Combines two different futures, streams, or sinks having the same associated types into a single type. -/// -/// This type implements common asynchronous traits such as [`Future`] and those in Tokio. -/// -/// [`Future`]: std::future::Future -/// -/// # Example -/// -/// The following code will not work: -/// -/// ```compile_fail -/// # fn some_condition() -> bool { true } -/// # async fn some_async_function() -> u32 { 10 } -/// # async fn other_async_function() -> u32 { 20 } -/// #[tokio::main] -/// async fn main() { -/// let result = if some_condition() { -/// some_async_function() -/// } else { -/// other_async_function() // <- Will print: "`if` and `else` have incompatible types" -/// }; -/// -/// println!("Result is {}", result.await); -/// } -/// ``` -/// -// This is because although the output types for both futures is the same, the exact future -// types are different, but the compiler must be able to choose a single type for the -// `result` variable. -/// -/// When the output type is the same, we can wrap each future in `Either` to avoid the -/// issue: -/// -/// ``` -/// use tokio_util::either::Either; -/// # fn some_condition() -> bool { true } -/// # async fn some_async_function() -> u32 { 10 } -/// # async fn other_async_function() -> u32 { 20 } -/// -/// #[tokio::main] -/// async fn main() { -/// let result = if some_condition() { -/// Either::Left(some_async_function()) -/// } else { -/// Either::Right(other_async_function()) -/// }; -/// -/// let value = result.await; -/// println!("Result is {}", value); -/// # assert_eq!(value, 10); -/// } -/// ``` -#[allow(missing_docs)] // Doc-comments for variants in this particular case don't make much sense. -#[derive(Debug, Clone)] -pub enum Either { - Left(L), - Right(R), -} - -/// A small helper macro which reduces amount of boilerplate in the actual trait method implementation. -/// It takes an invocation of method as an argument (e.g. `self.poll(cx)`), and redirects it to either -/// enum variant held in `self`. -macro_rules! delegate_call { - ($self:ident.$method:ident($($args:ident),+)) => { - unsafe { - match $self.get_unchecked_mut() { - Self::Left(l) => Pin::new_unchecked(l).$method($($args),+), - Self::Right(r) => Pin::new_unchecked(r).$method($($args),+), - } - } - } -} - -impl Future for Either -where - L: Future, - R: Future, -{ - type Output = O; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - delegate_call!(self.poll(cx)) - } -} - -impl AsyncRead for Either -where - L: AsyncRead, - R: AsyncRead, -{ - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - delegate_call!(self.poll_read(cx, buf)) - } -} - -impl AsyncBufRead for Either -where - L: AsyncBufRead, - R: AsyncBufRead, -{ - fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - delegate_call!(self.poll_fill_buf(cx)) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - delegate_call!(self.consume(amt)); - } -} - -impl AsyncSeek for Either -where - L: AsyncSeek, - R: AsyncSeek, -{ - fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> Result<()> { - delegate_call!(self.start_seek(position)) - } - - fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - delegate_call!(self.poll_complete(cx)) - } -} - -impl AsyncWrite for Either -where - L: AsyncWrite, - R: AsyncWrite, -{ - fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - delegate_call!(self.poll_write(cx, buf)) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - delegate_call!(self.poll_flush(cx)) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - delegate_call!(self.poll_shutdown(cx)) - } -} - -impl futures_core::stream::Stream for Either -where - L: futures_core::stream::Stream, - R: futures_core::stream::Stream, -{ - type Item = L::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - delegate_call!(self.poll_next(cx)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use tokio::io::{repeat, AsyncReadExt, Repeat}; - use tokio_stream::{once, Once, StreamExt}; - - #[tokio::test] - async fn either_is_stream() { - let mut either: Either, Once> = Either::Left(once(1)); - - assert_eq!(Some(1u32), either.next().await); - } - - #[tokio::test] - async fn either_is_async_read() { - let mut buffer = [0; 3]; - let mut either: Either = Either::Right(repeat(0b101)); - - either.read_exact(&mut buffer).await.unwrap(); - assert_eq!(buffer, [0b101, 0b101, 0b101]); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/io/copy_to_bytes.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/io/copy_to_bytes.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/io/copy_to_bytes.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/io/copy_to_bytes.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -use bytes::Bytes; -use futures_core::stream::Stream; -use futures_sink::Sink; -use pin_project_lite::pin_project; -use std::pin::Pin; -use std::task::{Context, Poll}; - -pin_project! { - /// A helper that wraps a [`Sink`]`<`[`Bytes`]`>` and converts it into a - /// [`Sink`]`<&'a [u8]>` by copying each byte slice into an owned [`Bytes`]. - /// - /// See the documentation for [`SinkWriter`] for an example. - /// - /// [`Bytes`]: bytes::Bytes - /// [`SinkWriter`]: crate::io::SinkWriter - /// [`Sink`]: futures_sink::Sink - #[derive(Debug)] - pub struct CopyToBytes { - #[pin] - inner: S, - } -} - -impl CopyToBytes { - /// Creates a new [`CopyToBytes`]. - pub fn new(inner: S) -> Self { - Self { inner } - } - - /// Gets a reference to the underlying sink. - pub fn get_ref(&self) -> &S { - &self.inner - } - - /// Gets a mutable reference to the underlying sink. - pub fn get_mut(&mut self) -> &mut S { - &mut self.inner - } - - /// Consumes this [`CopyToBytes`], returning the underlying sink. - pub fn into_inner(self) -> S { - self.inner - } -} - -impl<'a, S> Sink<&'a [u8]> for CopyToBytes -where - S: Sink, -{ - type Error = S::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_ready(cx) - } - - fn start_send(self: Pin<&mut Self>, item: &'a [u8]) -> Result<(), Self::Error> { - self.project() - .inner - .start_send(Bytes::copy_from_slice(item)) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_flush(cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_close(cx) - } -} - -impl Stream for CopyToBytes { - type Item = S::Item; - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_next(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/io/inspect.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/io/inspect.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/io/inspect.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/io/inspect.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,180 +0,0 @@ -use futures_core::ready; -use pin_project_lite::pin_project; -use std::io::{IoSlice, Result}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; - -pin_project! { - /// An adapter that lets you inspect the data that's being read. - /// - /// This is useful for things like hashing data as it's read in. - pub struct InspectReader { - #[pin] - reader: R, - f: F, - } -} - -impl InspectReader { - /// Create a new InspectReader, wrapping `reader` and calling `f` for the - /// new data supplied by each read call. - /// - /// The closure will only be called with an empty slice if the inner reader - /// returns without reading data into the buffer. This happens at EOF, or if - /// `poll_read` is called with a zero-size buffer. - pub fn new(reader: R, f: F) -> InspectReader - where - R: AsyncRead, - F: FnMut(&[u8]), - { - InspectReader { reader, f } - } - - /// Consumes the `InspectReader`, returning the wrapped reader - pub fn into_inner(self) -> R { - self.reader - } -} - -impl AsyncRead for InspectReader { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - let me = self.project(); - let filled_length = buf.filled().len(); - ready!(me.reader.poll_read(cx, buf))?; - (me.f)(&buf.filled()[filled_length..]); - Poll::Ready(Ok(())) - } -} - -impl AsyncWrite for InspectReader { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.project().reader.poll_write(cx, buf) - } - - fn poll_flush( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - self.project().reader.poll_flush(cx) - } - - fn poll_shutdown( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - self.project().reader.poll_shutdown(cx) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - self.project().reader.poll_write_vectored(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - self.reader.is_write_vectored() - } -} - -pin_project! { - /// An adapter that lets you inspect the data that's being written. - /// - /// This is useful for things like hashing data as it's written out. - pub struct InspectWriter { - #[pin] - writer: W, - f: F, - } -} - -impl InspectWriter { - /// Create a new InspectWriter, wrapping `write` and calling `f` for the - /// data successfully written by each write call. - /// - /// The closure `f` will never be called with an empty slice. A vectored - /// write can result in multiple calls to `f` - at most one call to `f` per - /// buffer supplied to `poll_write_vectored`. - pub fn new(writer: W, f: F) -> InspectWriter - where - W: AsyncWrite, - F: FnMut(&[u8]), - { - InspectWriter { writer, f } - } - - /// Consumes the `InspectWriter`, returning the wrapped writer - pub fn into_inner(self) -> W { - self.writer - } -} - -impl AsyncWrite for InspectWriter { - fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - let me = self.project(); - let res = me.writer.poll_write(cx, buf); - if let Poll::Ready(Ok(count)) = res { - if count != 0 { - (me.f)(&buf[..count]); - } - } - res - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - me.writer.poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - me.writer.poll_shutdown(cx) - } - - fn poll_write_vectored( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - let me = self.project(); - let res = me.writer.poll_write_vectored(cx, bufs); - if let Poll::Ready(Ok(mut count)) = res { - for buf in bufs { - if count == 0 { - break; - } - let size = count.min(buf.len()); - if size != 0 { - (me.f)(&buf[..size]); - count -= size; - } - } - } - res - } - - fn is_write_vectored(&self) -> bool { - self.writer.is_write_vectored() - } -} - -impl AsyncRead for InspectWriter { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - self.project().writer.poll_read(cx, buf) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/io/mod.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/io/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/io/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/io/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -//! Helpers for IO related tasks. -//! -//! The stream types are often used in combination with hyper or reqwest, as they -//! allow converting between a hyper [`Body`] and [`AsyncRead`]. -//! -//! The [`SyncIoBridge`] type converts from the world of async I/O -//! to synchronous I/O; this may often come up when using synchronous APIs -//! inside [`tokio::task::spawn_blocking`]. -//! -//! [`Body`]: https://docs.rs/hyper/0.13/hyper/struct.Body.html -//! [`AsyncRead`]: tokio::io::AsyncRead - -mod copy_to_bytes; -mod inspect; -mod read_buf; -mod reader_stream; -mod sink_writer; -mod stream_reader; - -cfg_io_util! { - mod sync_bridge; - pub use self::sync_bridge::SyncIoBridge; -} - -pub use self::copy_to_bytes::CopyToBytes; -pub use self::inspect::{InspectReader, InspectWriter}; -pub use self::read_buf::read_buf; -pub use self::reader_stream::ReaderStream; -pub use self::sink_writer::SinkWriter; -pub use self::stream_reader::StreamReader; -pub use crate::util::{poll_read_buf, poll_write_buf}; diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/io/read_buf.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/io/read_buf.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/io/read_buf.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/io/read_buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,65 +0,0 @@ -use bytes::BufMut; -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; -use tokio::io::AsyncRead; - -/// Read data from an `AsyncRead` into an implementer of the [`BufMut`] trait. -/// -/// [`BufMut`]: bytes::BufMut -/// -/// # Example -/// -/// ``` -/// use bytes::{Bytes, BytesMut}; -/// use tokio_stream as stream; -/// use tokio::io::Result; -/// use tokio_util::io::{StreamReader, read_buf}; -/// # #[tokio::main] -/// # async fn main() -> std::io::Result<()> { -/// -/// // Create a reader from an iterator. This particular reader will always be -/// // ready. -/// let mut read = StreamReader::new(stream::iter(vec![Result::Ok(Bytes::from_static(&[0, 1, 2, 3]))])); -/// -/// let mut buf = BytesMut::new(); -/// let mut reads = 0; -/// -/// loop { -/// reads += 1; -/// let n = read_buf(&mut read, &mut buf).await?; -/// -/// if n == 0 { -/// break; -/// } -/// } -/// -/// // one or more reads might be necessary. -/// assert!(reads >= 1); -/// assert_eq!(&buf[..], &[0, 1, 2, 3]); -/// # Ok(()) -/// # } -/// ``` -pub async fn read_buf(read: &mut R, buf: &mut B) -> io::Result -where - R: AsyncRead + Unpin, - B: BufMut, -{ - return ReadBufFn(read, buf).await; - - struct ReadBufFn<'a, R, B>(&'a mut R, &'a mut B); - - impl<'a, R, B> Future for ReadBufFn<'a, R, B> - where - R: AsyncRead + Unpin, - B: BufMut, - { - type Output = io::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = &mut *self; - crate::util::poll_read_buf(Pin::new(this.0), cx, this.1) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/io/reader_stream.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/io/reader_stream.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/io/reader_stream.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/io/reader_stream.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,118 +0,0 @@ -use bytes::{Bytes, BytesMut}; -use futures_core::stream::Stream; -use pin_project_lite::pin_project; -use std::pin::Pin; -use std::task::{Context, Poll}; -use tokio::io::AsyncRead; - -const DEFAULT_CAPACITY: usize = 4096; - -pin_project! { - /// Convert an [`AsyncRead`] into a [`Stream`] of byte chunks. - /// - /// This stream is fused. It performs the inverse operation of - /// [`StreamReader`]. - /// - /// # Example - /// - /// ``` - /// # #[tokio::main] - /// # async fn main() -> std::io::Result<()> { - /// use tokio_stream::StreamExt; - /// use tokio_util::io::ReaderStream; - /// - /// // Create a stream of data. - /// let data = b"hello, world!"; - /// let mut stream = ReaderStream::new(&data[..]); - /// - /// // Read all of the chunks into a vector. - /// let mut stream_contents = Vec::new(); - /// while let Some(chunk) = stream.next().await { - /// stream_contents.extend_from_slice(&chunk?); - /// } - /// - /// // Once the chunks are concatenated, we should have the - /// // original data. - /// assert_eq!(stream_contents, data); - /// # Ok(()) - /// # } - /// ``` - /// - /// [`AsyncRead`]: tokio::io::AsyncRead - /// [`StreamReader`]: crate::io::StreamReader - /// [`Stream`]: futures_core::Stream - #[derive(Debug)] - pub struct ReaderStream { - // Reader itself. - // - // This value is `None` if the stream has terminated. - #[pin] - reader: Option, - // Working buffer, used to optimize allocations. - buf: BytesMut, - capacity: usize, - } -} - -impl ReaderStream { - /// Convert an [`AsyncRead`] into a [`Stream`] with item type - /// `Result`. - /// - /// [`AsyncRead`]: tokio::io::AsyncRead - /// [`Stream`]: futures_core::Stream - pub fn new(reader: R) -> Self { - ReaderStream { - reader: Some(reader), - buf: BytesMut::new(), - capacity: DEFAULT_CAPACITY, - } - } - - /// Convert an [`AsyncRead`] into a [`Stream`] with item type - /// `Result`, - /// with a specific read buffer initial capacity. - /// - /// [`AsyncRead`]: tokio::io::AsyncRead - /// [`Stream`]: futures_core::Stream - pub fn with_capacity(reader: R, capacity: usize) -> Self { - ReaderStream { - reader: Some(reader), - buf: BytesMut::with_capacity(capacity), - capacity, - } - } -} - -impl Stream for ReaderStream { - type Item = std::io::Result; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - use crate::util::poll_read_buf; - - let mut this = self.as_mut().project(); - - let reader = match this.reader.as_pin_mut() { - Some(r) => r, - None => return Poll::Ready(None), - }; - - if this.buf.capacity() == 0 { - this.buf.reserve(*this.capacity); - } - - match poll_read_buf(reader, cx, &mut this.buf) { - Poll::Pending => Poll::Pending, - Poll::Ready(Err(err)) => { - self.project().reader.set(None); - Poll::Ready(Some(Err(err))) - } - Poll::Ready(Ok(0)) => { - self.project().reader.set(None); - Poll::Ready(None) - } - Poll::Ready(Ok(_)) => { - let chunk = this.buf.split(); - Poll::Ready(Some(Ok(chunk.freeze()))) - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/io/sink_writer.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/io/sink_writer.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/io/sink_writer.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/io/sink_writer.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,135 +0,0 @@ -use futures_core::ready; -use futures_sink::Sink; - -use futures_core::stream::Stream; -use pin_project_lite::pin_project; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; - -pin_project! { - /// Convert a [`Sink`] of byte chunks into an [`AsyncWrite`]. - /// - /// Whenever you write to this [`SinkWriter`], the supplied bytes are - /// forwarded to the inner [`Sink`]. When `shutdown` is called on this - /// [`SinkWriter`], the inner sink is closed. - /// - /// This adapter takes a `Sink<&[u8]>` and provides an [`AsyncWrite`] impl - /// for it. Because of the lifetime, this trait is relatively rarely - /// implemented. The main ways to get a `Sink<&[u8]>` that you can use with - /// this type are: - /// - /// * With the codec module by implementing the [`Encoder`]`<&[u8]>` trait. - /// * By wrapping a `Sink` in a [`CopyToBytes`]. - /// * Manually implementing `Sink<&[u8]>` directly. - /// - /// The opposite conversion of implementing `Sink<_>` for an [`AsyncWrite`] - /// is done using the [`codec`] module. - /// - /// # Example - /// - /// ``` - /// use bytes::Bytes; - /// use futures_util::SinkExt; - /// use std::io::{Error, ErrorKind}; - /// use tokio::io::AsyncWriteExt; - /// use tokio_util::io::{SinkWriter, CopyToBytes}; - /// use tokio_util::sync::PollSender; - /// - /// # #[tokio::main(flavor = "current_thread")] - /// # async fn main() -> Result<(), Error> { - /// // We use an mpsc channel as an example of a `Sink`. - /// let (tx, mut rx) = tokio::sync::mpsc::channel::(1); - /// let sink = PollSender::new(tx).sink_map_err(|_| Error::from(ErrorKind::BrokenPipe)); - /// - /// // Wrap it in `CopyToBytes` to get a `Sink<&[u8]>`. - /// let mut writer = SinkWriter::new(CopyToBytes::new(sink)); - /// - /// // Write data to our interface... - /// let data: [u8; 4] = [1, 2, 3, 4]; - /// let _ = writer.write(&data).await?; - /// - /// // ... and receive it. - /// assert_eq!(data.as_slice(), &*rx.recv().await.unwrap()); - /// # Ok(()) - /// # } - /// ``` - /// - /// [`AsyncWrite`]: tokio::io::AsyncWrite - /// [`CopyToBytes`]: crate::io::CopyToBytes - /// [`Encoder`]: crate::codec::Encoder - /// [`Sink`]: futures_sink::Sink - /// [`codec`]: crate::codec - #[derive(Debug)] - pub struct SinkWriter { - #[pin] - inner: S, - } -} - -impl SinkWriter { - /// Creates a new [`SinkWriter`]. - pub fn new(sink: S) -> Self { - Self { inner: sink } - } - - /// Gets a reference to the underlying sink. - pub fn get_ref(&self) -> &S { - &self.inner - } - - /// Gets a mutable reference to the underlying sink. - pub fn get_mut(&mut self) -> &mut S { - &mut self.inner - } - - /// Consumes this [`SinkWriter`], returning the underlying sink. - pub fn into_inner(self) -> S { - self.inner - } -} -impl AsyncWrite for SinkWriter -where - for<'a> S: Sink<&'a [u8], Error = E>, - E: Into, -{ - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - let mut this = self.project(); - - ready!(this.inner.as_mut().poll_ready(cx).map_err(Into::into))?; - match this.inner.as_mut().start_send(buf) { - Ok(()) => Poll::Ready(Ok(buf.len())), - Err(e) => Poll::Ready(Err(e.into())), - } - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_flush(cx).map_err(Into::into) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_close(cx).map_err(Into::into) - } -} - -impl Stream for SinkWriter { - type Item = S::Item; - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_next(cx) - } -} - -impl AsyncRead for SinkWriter { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> Poll> { - self.project().inner.poll_read(cx, buf) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/io/stream_reader.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/io/stream_reader.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/io/stream_reader.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/io/stream_reader.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,346 +0,0 @@ -use bytes::Buf; -use futures_core::stream::Stream; -use futures_sink::Sink; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; -use tokio::io::{AsyncBufRead, AsyncRead, ReadBuf}; - -/// Convert a [`Stream`] of byte chunks into an [`AsyncRead`]. -/// -/// This type performs the inverse operation of [`ReaderStream`]. -/// -/// This type also implements the [`AsyncBufRead`] trait, so you can use it -/// to read a `Stream` of byte chunks line-by-line. See the examples below. -/// -/// # Example -/// -/// ``` -/// use bytes::Bytes; -/// use tokio::io::{AsyncReadExt, Result}; -/// use tokio_util::io::StreamReader; -/// # #[tokio::main(flavor = "current_thread")] -/// # async fn main() -> std::io::Result<()> { -/// -/// // Create a stream from an iterator. -/// let stream = tokio_stream::iter(vec![ -/// Result::Ok(Bytes::from_static(&[0, 1, 2, 3])), -/// Result::Ok(Bytes::from_static(&[4, 5, 6, 7])), -/// Result::Ok(Bytes::from_static(&[8, 9, 10, 11])), -/// ]); -/// -/// // Convert it to an AsyncRead. -/// let mut read = StreamReader::new(stream); -/// -/// // Read five bytes from the stream. -/// let mut buf = [0; 5]; -/// read.read_exact(&mut buf).await?; -/// assert_eq!(buf, [0, 1, 2, 3, 4]); -/// -/// // Read the rest of the current chunk. -/// assert_eq!(read.read(&mut buf).await?, 3); -/// assert_eq!(&buf[..3], [5, 6, 7]); -/// -/// // Read the next chunk. -/// assert_eq!(read.read(&mut buf).await?, 4); -/// assert_eq!(&buf[..4], [8, 9, 10, 11]); -/// -/// // We have now reached the end. -/// assert_eq!(read.read(&mut buf).await?, 0); -/// -/// # Ok(()) -/// # } -/// ``` -/// -/// If the stream produces errors which are not [`std::io::Error`], -/// the errors can be converted using [`StreamExt`] to map each -/// element. -/// -/// ``` -/// use bytes::Bytes; -/// use tokio::io::AsyncReadExt; -/// use tokio_util::io::StreamReader; -/// use tokio_stream::StreamExt; -/// # #[tokio::main(flavor = "current_thread")] -/// # async fn main() -> std::io::Result<()> { -/// -/// // Create a stream from an iterator, including an error. -/// let stream = tokio_stream::iter(vec![ -/// Result::Ok(Bytes::from_static(&[0, 1, 2, 3])), -/// Result::Ok(Bytes::from_static(&[4, 5, 6, 7])), -/// Result::Err("Something bad happened!") -/// ]); -/// -/// // Use StreamExt to map the stream and error to a std::io::Error -/// let stream = stream.map(|result| result.map_err(|err| { -/// std::io::Error::new(std::io::ErrorKind::Other, err) -/// })); -/// -/// // Convert it to an AsyncRead. -/// let mut read = StreamReader::new(stream); -/// -/// // Read five bytes from the stream. -/// let mut buf = [0; 5]; -/// read.read_exact(&mut buf).await?; -/// assert_eq!(buf, [0, 1, 2, 3, 4]); -/// -/// // Read the rest of the current chunk. -/// assert_eq!(read.read(&mut buf).await?, 3); -/// assert_eq!(&buf[..3], [5, 6, 7]); -/// -/// // Reading the next chunk will produce an error -/// let error = read.read(&mut buf).await.unwrap_err(); -/// assert_eq!(error.kind(), std::io::ErrorKind::Other); -/// assert_eq!(error.into_inner().unwrap().to_string(), "Something bad happened!"); -/// -/// // We have now reached the end. -/// assert_eq!(read.read(&mut buf).await?, 0); -/// -/// # Ok(()) -/// # } -/// ``` -/// -/// Using the [`AsyncBufRead`] impl, you can read a `Stream` of byte chunks -/// line-by-line. Note that you will usually also need to convert the error -/// type when doing this. See the second example for an explanation of how -/// to do this. -/// -/// ``` -/// use tokio::io::{Result, AsyncBufReadExt}; -/// use tokio_util::io::StreamReader; -/// # #[tokio::main(flavor = "current_thread")] -/// # async fn main() -> std::io::Result<()> { -/// -/// // Create a stream of byte chunks. -/// let stream = tokio_stream::iter(vec![ -/// Result::Ok(b"The first line.\n".as_slice()), -/// Result::Ok(b"The second line.".as_slice()), -/// Result::Ok(b"\nThe third".as_slice()), -/// Result::Ok(b" line.\nThe fourth line.\nThe fifth line.\n".as_slice()), -/// ]); -/// -/// // Convert it to an AsyncRead. -/// let mut read = StreamReader::new(stream); -/// -/// // Loop through the lines from the `StreamReader`. -/// let mut line = String::new(); -/// let mut lines = Vec::new(); -/// loop { -/// line.clear(); -/// let len = read.read_line(&mut line).await?; -/// if len == 0 { break; } -/// lines.push(line.clone()); -/// } -/// -/// // Verify that we got the lines we expected. -/// assert_eq!( -/// lines, -/// vec![ -/// "The first line.\n", -/// "The second line.\n", -/// "The third line.\n", -/// "The fourth line.\n", -/// "The fifth line.\n", -/// ] -/// ); -/// # Ok(()) -/// # } -/// ``` -/// -/// [`AsyncRead`]: tokio::io::AsyncRead -/// [`AsyncBufRead`]: tokio::io::AsyncBufRead -/// [`Stream`]: futures_core::Stream -/// [`ReaderStream`]: crate::io::ReaderStream -/// [`StreamExt`]: https://docs.rs/tokio-stream/latest/tokio_stream/trait.StreamExt.html -#[derive(Debug)] -pub struct StreamReader { - // This field is pinned. - inner: S, - // This field is not pinned. - chunk: Option, -} - -impl StreamReader -where - S: Stream>, - B: Buf, - E: Into, -{ - /// Convert a stream of byte chunks into an [`AsyncRead`]. - /// - /// The item should be a [`Result`] with the ok variant being something that - /// implements the [`Buf`] trait (e.g. `Vec` or `Bytes`). The error - /// should be convertible into an [io error]. - /// - /// [`Result`]: std::result::Result - /// [`Buf`]: bytes::Buf - /// [io error]: std::io::Error - pub fn new(stream: S) -> Self { - Self { - inner: stream, - chunk: None, - } - } - - /// Do we have a chunk and is it non-empty? - fn has_chunk(&self) -> bool { - if let Some(ref chunk) = self.chunk { - chunk.remaining() > 0 - } else { - false - } - } - - /// Consumes this `StreamReader`, returning a Tuple consisting - /// of the underlying stream and an Option of the internal buffer, - /// which is Some in case the buffer contains elements. - pub fn into_inner_with_chunk(self) -> (S, Option) { - if self.has_chunk() { - (self.inner, self.chunk) - } else { - (self.inner, None) - } - } -} - -impl StreamReader { - /// Gets a reference to the underlying stream. - /// - /// It is inadvisable to directly read from the underlying stream. - pub fn get_ref(&self) -> &S { - &self.inner - } - - /// Gets a mutable reference to the underlying stream. - /// - /// It is inadvisable to directly read from the underlying stream. - pub fn get_mut(&mut self) -> &mut S { - &mut self.inner - } - - /// Gets a pinned mutable reference to the underlying stream. - /// - /// It is inadvisable to directly read from the underlying stream. - pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut S> { - self.project().inner - } - - /// Consumes this `BufWriter`, returning the underlying stream. - /// - /// Note that any leftover data in the internal buffer is lost. - /// If you additionally want access to the internal buffer use - /// [`into_inner_with_chunk`]. - /// - /// [`into_inner_with_chunk`]: crate::io::StreamReader::into_inner_with_chunk - pub fn into_inner(self) -> S { - self.inner - } -} - -impl AsyncRead for StreamReader -where - S: Stream>, - B: Buf, - E: Into, -{ - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - if buf.remaining() == 0 { - return Poll::Ready(Ok(())); - } - - let inner_buf = match self.as_mut().poll_fill_buf(cx) { - Poll::Ready(Ok(buf)) => buf, - Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), - Poll::Pending => return Poll::Pending, - }; - let len = std::cmp::min(inner_buf.len(), buf.remaining()); - buf.put_slice(&inner_buf[..len]); - - self.consume(len); - Poll::Ready(Ok(())) - } -} - -impl AsyncBufRead for StreamReader -where - S: Stream>, - B: Buf, - E: Into, -{ - fn poll_fill_buf(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - if self.as_mut().has_chunk() { - // This unwrap is very sad, but it can't be avoided. - let buf = self.project().chunk.as_ref().unwrap().chunk(); - return Poll::Ready(Ok(buf)); - } else { - match self.as_mut().project().inner.poll_next(cx) { - Poll::Ready(Some(Ok(chunk))) => { - // Go around the loop in case the chunk is empty. - *self.as_mut().project().chunk = Some(chunk); - } - Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())), - Poll::Ready(None) => return Poll::Ready(Ok(&[])), - Poll::Pending => return Poll::Pending, - } - } - } - } - fn consume(self: Pin<&mut Self>, amt: usize) { - if amt > 0 { - self.project() - .chunk - .as_mut() - .expect("No chunk present") - .advance(amt); - } - } -} - -// The code below is a manual expansion of the code that pin-project-lite would -// generate. This is done because pin-project-lite fails by hitting the recusion -// limit on this struct. (Every line of documentation is handled recursively by -// the macro.) - -impl Unpin for StreamReader {} - -struct StreamReaderProject<'a, S, B> { - inner: Pin<&'a mut S>, - chunk: &'a mut Option, -} - -impl StreamReader { - #[inline] - fn project(self: Pin<&mut Self>) -> StreamReaderProject<'_, S, B> { - // SAFETY: We define that only `inner` should be pinned when `Self` is - // and have an appropriate `impl Unpin` for this. - let me = unsafe { Pin::into_inner_unchecked(self) }; - StreamReaderProject { - inner: unsafe { Pin::new_unchecked(&mut me.inner) }, - chunk: &mut me.chunk, - } - } -} - -impl, E, T> Sink for StreamReader { - type Error = E; - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_ready(cx) - } - - fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { - self.project().inner.start_send(item) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_flush(cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().inner.poll_close(cx) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/io/sync_bridge.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/io/sync_bridge.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/io/sync_bridge.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/io/sync_bridge.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,156 +0,0 @@ -use std::io::{BufRead, Read, Seek, Write}; -use tokio::io::{ - AsyncBufRead, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWrite, - AsyncWriteExt, -}; - -/// Use a [`tokio::io::AsyncRead`] synchronously as a [`std::io::Read`] or -/// a [`tokio::io::AsyncWrite`] as a [`std::io::Write`]. -#[derive(Debug)] -pub struct SyncIoBridge { - src: T, - rt: tokio::runtime::Handle, -} - -impl BufRead for SyncIoBridge { - fn fill_buf(&mut self) -> std::io::Result<&[u8]> { - let src = &mut self.src; - self.rt.block_on(AsyncBufReadExt::fill_buf(src)) - } - - fn consume(&mut self, amt: usize) { - let src = &mut self.src; - AsyncBufReadExt::consume(src, amt) - } - - fn read_until(&mut self, byte: u8, buf: &mut Vec) -> std::io::Result { - let src = &mut self.src; - self.rt - .block_on(AsyncBufReadExt::read_until(src, byte, buf)) - } - fn read_line(&mut self, buf: &mut String) -> std::io::Result { - let src = &mut self.src; - self.rt.block_on(AsyncBufReadExt::read_line(src, buf)) - } -} - -impl Read for SyncIoBridge { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - let src = &mut self.src; - self.rt.block_on(AsyncReadExt::read(src, buf)) - } - - fn read_to_end(&mut self, buf: &mut Vec) -> std::io::Result { - let src = &mut self.src; - self.rt.block_on(src.read_to_end(buf)) - } - - fn read_to_string(&mut self, buf: &mut String) -> std::io::Result { - let src = &mut self.src; - self.rt.block_on(src.read_to_string(buf)) - } - - fn read_exact(&mut self, buf: &mut [u8]) -> std::io::Result<()> { - let src = &mut self.src; - // The AsyncRead trait returns the count, synchronous doesn't. - let _n = self.rt.block_on(src.read_exact(buf))?; - Ok(()) - } -} - -impl Write for SyncIoBridge { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - let src = &mut self.src; - self.rt.block_on(src.write(buf)) - } - - fn flush(&mut self) -> std::io::Result<()> { - let src = &mut self.src; - self.rt.block_on(src.flush()) - } - - fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> { - let src = &mut self.src; - self.rt.block_on(src.write_all(buf)) - } - - fn write_vectored(&mut self, bufs: &[std::io::IoSlice<'_>]) -> std::io::Result { - let src = &mut self.src; - self.rt.block_on(src.write_vectored(bufs)) - } -} - -impl Seek for SyncIoBridge { - fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { - let src = &mut self.src; - self.rt.block_on(AsyncSeekExt::seek(src, pos)) - } -} - -// Because https://doc.rust-lang.org/std/io/trait.Write.html#method.is_write_vectored is at the time -// of this writing still unstable, we expose this as part of a standalone method. -impl SyncIoBridge { - /// Determines if the underlying [`tokio::io::AsyncWrite`] target supports efficient vectored writes. - /// - /// See [`tokio::io::AsyncWrite::is_write_vectored`]. - pub fn is_write_vectored(&self) -> bool { - self.src.is_write_vectored() - } -} - -impl SyncIoBridge { - /// Shutdown this writer. This method provides a way to call the [`AsyncWriteExt::shutdown`] - /// function of the inner [`tokio::io::AsyncWrite`] instance. - /// - /// # Errors - /// - /// This method returns the same errors as [`AsyncWriteExt::shutdown`]. - /// - /// [`AsyncWriteExt::shutdown`]: tokio::io::AsyncWriteExt::shutdown - pub fn shutdown(&mut self) -> std::io::Result<()> { - let src = &mut self.src; - self.rt.block_on(src.shutdown()) - } -} - -impl SyncIoBridge { - /// Use a [`tokio::io::AsyncRead`] synchronously as a [`std::io::Read`] or - /// a [`tokio::io::AsyncWrite`] as a [`std::io::Write`]. - /// - /// When this struct is created, it captures a handle to the current thread's runtime with [`tokio::runtime::Handle::current`]. - /// It is hence OK to move this struct into a separate thread outside the runtime, as created - /// by e.g. [`tokio::task::spawn_blocking`]. - /// - /// Stated even more strongly: to make use of this bridge, you *must* move - /// it into a separate thread outside the runtime. The synchronous I/O will use the - /// underlying handle to block on the backing asynchronous source, via - /// [`tokio::runtime::Handle::block_on`]. As noted in the documentation for that - /// function, an attempt to `block_on` from an asynchronous execution context - /// will panic. - /// - /// # Wrapping `!Unpin` types - /// - /// Use e.g. `SyncIoBridge::new(Box::pin(src))`. - /// - /// # Panics - /// - /// This will panic if called outside the context of a Tokio runtime. - #[track_caller] - pub fn new(src: T) -> Self { - Self::new_with_handle(src, tokio::runtime::Handle::current()) - } - - /// Use a [`tokio::io::AsyncRead`] synchronously as a [`std::io::Read`] or - /// a [`tokio::io::AsyncWrite`] as a [`std::io::Write`]. - /// - /// This is the same as [`SyncIoBridge::new`], but allows passing an arbitrary handle and hence may - /// be initially invoked outside of an asynchronous context. - pub fn new_with_handle(src: T, rt: tokio::runtime::Handle) -> Self { - Self { src, rt } - } - - /// Consume this bridge, returning the underlying stream. - pub fn into_inner(self) -> T { - self.src - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/lib.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/lib.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,60 +0,0 @@ -#![allow(clippy::needless_doctest_main)] -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - unreachable_pub -)] -#![doc(test( - no_crate_inject, - attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) -))] -#![cfg_attr(docsrs, feature(doc_cfg))] - -//! Utilities for working with Tokio. -//! -//! This crate is not versioned in lockstep with the core -//! [`tokio`] crate. However, `tokio-util` _will_ respect Rust's -//! semantic versioning policy, especially with regard to breaking changes. -//! -//! [`tokio`]: https://docs.rs/tokio - -#[macro_use] -mod cfg; - -mod loom; - -cfg_codec! { - pub mod codec; -} - -cfg_net! { - #[cfg(not(target_arch = "wasm32"))] - pub mod udp; - pub mod net; -} - -cfg_compat! { - pub mod compat; -} - -cfg_io! { - pub mod io; -} - -cfg_rt! { - pub mod context; - pub mod task; -} - -cfg_time! { - pub mod time; -} - -pub mod sync; - -pub mod either; - -pub use bytes; - -mod util; diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/loom.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/loom.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/loom.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/loom.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -pub(crate) use std::sync; diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/net/mod.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/net/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/net/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/net/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,97 +0,0 @@ -//! TCP/UDP/Unix helpers for tokio. - -use crate::either::Either; -use std::future::Future; -use std::io::Result; -use std::pin::Pin; -use std::task::{Context, Poll}; - -#[cfg(unix)] -pub mod unix; - -/// A trait for a listener: `TcpListener` and `UnixListener`. -pub trait Listener { - /// The stream's type of this listener. - type Io: tokio::io::AsyncRead + tokio::io::AsyncWrite; - /// The socket address type of this listener. - type Addr; - - /// Polls to accept a new incoming connection to this listener. - fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll>; - - /// Accepts a new incoming connection from this listener. - fn accept(&mut self) -> ListenerAcceptFut<'_, Self> - where - Self: Sized, - { - ListenerAcceptFut { listener: self } - } - - /// Returns the local address that this listener is bound to. - fn local_addr(&self) -> Result; -} - -impl Listener for tokio::net::TcpListener { - type Io = tokio::net::TcpStream; - type Addr = std::net::SocketAddr; - - fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll> { - Self::poll_accept(self, cx) - } - - fn local_addr(&self) -> Result { - self.local_addr().map(Into::into) - } -} - -/// Future for accepting a new connection from a listener. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct ListenerAcceptFut<'a, L> { - listener: &'a mut L, -} - -impl<'a, L> Future for ListenerAcceptFut<'a, L> -where - L: Listener, -{ - type Output = Result<(L::Io, L::Addr)>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.listener.poll_accept(cx) - } -} - -impl Either -where - L: Listener, - R: Listener, -{ - /// Accepts a new incoming connection from this listener. - pub async fn accept(&mut self) -> Result> { - match self { - Either::Left(listener) => { - let (stream, addr) = listener.accept().await?; - Ok(Either::Left((stream, addr))) - } - Either::Right(listener) => { - let (stream, addr) = listener.accept().await?; - Ok(Either::Right((stream, addr))) - } - } - } - - /// Returns the local address that this listener is bound to. - pub fn local_addr(&self) -> Result> { - match self { - Either::Left(listener) => { - let addr = listener.local_addr()?; - Ok(Either::Left(addr)) - } - Either::Right(listener) => { - let addr = listener.local_addr()?; - Ok(Either::Right(addr)) - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/net/unix/mod.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/net/unix/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/net/unix/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/net/unix/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,18 +0,0 @@ -//! Unix domain socket helpers. - -use super::Listener; -use std::io::Result; -use std::task::{Context, Poll}; - -impl Listener for tokio::net::UnixListener { - type Io = tokio::net::UnixStream; - type Addr = tokio::net::unix::SocketAddr; - - fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll> { - Self::poll_accept(self, cx) - } - - fn local_addr(&self) -> Result { - self.local_addr().map(Into::into) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/cancellation_token/guard.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/cancellation_token/guard.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/cancellation_token/guard.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/cancellation_token/guard.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,27 +0,0 @@ -use crate::sync::CancellationToken; - -/// A wrapper for cancellation token which automatically cancels -/// it on drop. It is created using `drop_guard` method on the `CancellationToken`. -#[derive(Debug)] -pub struct DropGuard { - pub(super) inner: Option, -} - -impl DropGuard { - /// Returns stored cancellation token and removes this drop guard instance - /// (i.e. it will no longer cancel token). Other guards for this token - /// are not affected. - pub fn disarm(mut self) -> CancellationToken { - self.inner - .take() - .expect("`inner` can be only None in a destructor") - } -} - -impl Drop for DropGuard { - fn drop(&mut self) { - if let Some(inner) = &self.inner { - inner.cancel(); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/cancellation_token/tree_node.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/cancellation_token/tree_node.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/cancellation_token/tree_node.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/cancellation_token/tree_node.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,365 +0,0 @@ -//! This mod provides the logic for the inner tree structure of the CancellationToken. -//! -//! CancellationTokens are only light handles with references to [`TreeNode`]. -//! All the logic is actually implemented in the [`TreeNode`]. -//! -//! A [`TreeNode`] is part of the cancellation tree and may have one parent and an arbitrary number of -//! children. -//! -//! A [`TreeNode`] can receive the request to perform a cancellation through a CancellationToken. -//! This cancellation request will cancel the node and all of its descendants. -//! -//! As soon as a node cannot get cancelled any more (because it was already cancelled or it has no -//! more CancellationTokens pointing to it any more), it gets removed from the tree, to keep the -//! tree as small as possible. -//! -//! # Invariants -//! -//! Those invariants shall be true at any time. -//! -//! 1. A node that has no parents and no handles can no longer be cancelled. -//! This is important during both cancellation and refcounting. -//! -//! 2. If node B *is* or *was* a child of node A, then node B was created *after* node A. -//! This is important for deadlock safety, as it is used for lock order. -//! Node B can only become the child of node A in two ways: -//! - being created with `child_node()`, in which case it is trivially true that -//! node A already existed when node B was created -//! - being moved A->C->B to A->B because node C was removed in `decrease_handle_refcount()` -//! or `cancel()`. In this case the invariant still holds, as B was younger than C, and C -//! was younger than A, therefore B is also younger than A. -//! -//! 3. If two nodes are both unlocked and node A is the parent of node B, then node B is a child of -//! node A. It is important to always restore that invariant before dropping the lock of a node. -//! -//! # Deadlock safety -//! -//! We always lock in the order of creation time. We can prove this through invariant #2. -//! Specifically, through invariant #2, we know that we always have to lock a parent -//! before its child. -//! -use crate::loom::sync::{Arc, Mutex, MutexGuard}; - -/// A node of the cancellation tree structure -/// -/// The actual data it holds is wrapped inside a mutex for synchronization. -pub(crate) struct TreeNode { - inner: Mutex, - waker: tokio::sync::Notify, -} -impl TreeNode { - pub(crate) fn new() -> Self { - Self { - inner: Mutex::new(Inner { - parent: None, - parent_idx: 0, - children: vec![], - is_cancelled: false, - num_handles: 1, - }), - waker: tokio::sync::Notify::new(), - } - } - - pub(crate) fn notified(&self) -> tokio::sync::futures::Notified<'_> { - self.waker.notified() - } -} - -/// The data contained inside a TreeNode. -/// -/// This struct exists so that the data of the node can be wrapped -/// in a Mutex. -struct Inner { - parent: Option>, - parent_idx: usize, - children: Vec>, - is_cancelled: bool, - num_handles: usize, -} - -/// Returns whether or not the node is cancelled -pub(crate) fn is_cancelled(node: &Arc) -> bool { - node.inner.lock().unwrap().is_cancelled -} - -/// Creates a child node -pub(crate) fn child_node(parent: &Arc) -> Arc { - let mut locked_parent = parent.inner.lock().unwrap(); - - // Do not register as child if we are already cancelled. - // Cancelled trees can never be uncancelled and therefore - // need no connection to parents or children any more. - if locked_parent.is_cancelled { - return Arc::new(TreeNode { - inner: Mutex::new(Inner { - parent: None, - parent_idx: 0, - children: vec![], - is_cancelled: true, - num_handles: 1, - }), - waker: tokio::sync::Notify::new(), - }); - } - - let child = Arc::new(TreeNode { - inner: Mutex::new(Inner { - parent: Some(parent.clone()), - parent_idx: locked_parent.children.len(), - children: vec![], - is_cancelled: false, - num_handles: 1, - }), - waker: tokio::sync::Notify::new(), - }); - - locked_parent.children.push(child.clone()); - - child -} - -/// Disconnects the given parent from all of its children. -/// -/// Takes a reference to [Inner] to make sure the parent is already locked. -fn disconnect_children(node: &mut Inner) { - for child in std::mem::take(&mut node.children) { - let mut locked_child = child.inner.lock().unwrap(); - locked_child.parent_idx = 0; - locked_child.parent = None; - } -} - -/// Figures out the parent of the node and locks the node and its parent atomically. -/// -/// The basic principle of preventing deadlocks in the tree is -/// that we always lock the parent first, and then the child. -/// For more info look at *deadlock safety* and *invariant #2*. -/// -/// Sadly, it's impossible to figure out the parent of a node without -/// locking it. To then achieve locking order consistency, the node -/// has to be unlocked before the parent gets locked. -/// This leaves a small window where we already assume that we know the parent, -/// but neither the parent nor the node is locked. Therefore, the parent could change. -/// -/// To prevent that this problem leaks into the rest of the code, it is abstracted -/// in this function. -/// -/// The locked child and optionally its locked parent, if a parent exists, get passed -/// to the `func` argument via (node, None) or (node, Some(parent)). -fn with_locked_node_and_parent(node: &Arc, func: F) -> Ret -where - F: FnOnce(MutexGuard<'_, Inner>, Option>) -> Ret, -{ - use std::sync::TryLockError; - - let mut locked_node = node.inner.lock().unwrap(); - - // Every time this fails, the number of ancestors of the node decreases, - // so the loop must succeed after a finite number of iterations. - loop { - // Look up the parent of the currently locked node. - let potential_parent = match locked_node.parent.as_ref() { - Some(potential_parent) => potential_parent.clone(), - None => return func(locked_node, None), - }; - - // Lock the parent. This may require unlocking the child first. - let locked_parent = match potential_parent.inner.try_lock() { - Ok(locked_parent) => locked_parent, - Err(TryLockError::WouldBlock) => { - drop(locked_node); - // Deadlock safety: - // - // Due to invariant #2, the potential parent must come before - // the child in the creation order. Therefore, we can safely - // lock the child while holding the parent lock. - let locked_parent = potential_parent.inner.lock().unwrap(); - locked_node = node.inner.lock().unwrap(); - locked_parent - } - Err(TryLockError::Poisoned(err)) => Err(err).unwrap(), - }; - - // If we unlocked the child, then the parent may have changed. Check - // that we still have the right parent. - if let Some(actual_parent) = locked_node.parent.as_ref() { - if Arc::ptr_eq(actual_parent, &potential_parent) { - return func(locked_node, Some(locked_parent)); - } - } - } -} - -/// Moves all children from `node` to `parent`. -/// -/// `parent` MUST have been a parent of the node when they both got locked, -/// otherwise there is a potential for a deadlock as invariant #2 would be violated. -/// -/// To acquire the locks for node and parent, use [with_locked_node_and_parent]. -fn move_children_to_parent(node: &mut Inner, parent: &mut Inner) { - // Pre-allocate in the parent, for performance - parent.children.reserve(node.children.len()); - - for child in std::mem::take(&mut node.children) { - { - let mut child_locked = child.inner.lock().unwrap(); - child_locked.parent = node.parent.clone(); - child_locked.parent_idx = parent.children.len(); - } - parent.children.push(child); - } -} - -/// Removes a child from the parent. -/// -/// `parent` MUST be the parent of `node`. -/// To acquire the locks for node and parent, use [with_locked_node_and_parent]. -fn remove_child(parent: &mut Inner, mut node: MutexGuard<'_, Inner>) { - // Query the position from where to remove a node - let pos = node.parent_idx; - node.parent = None; - node.parent_idx = 0; - - // Unlock node, so that only one child at a time is locked. - // Otherwise we would violate the lock order (see 'deadlock safety') as we - // don't know the creation order of the child nodes - drop(node); - - // If `node` is the last element in the list, we don't need any swapping - if parent.children.len() == pos + 1 { - parent.children.pop().unwrap(); - } else { - // If `node` is not the last element in the list, we need to - // replace it with the last element - let replacement_child = parent.children.pop().unwrap(); - replacement_child.inner.lock().unwrap().parent_idx = pos; - parent.children[pos] = replacement_child; - } - - let len = parent.children.len(); - if 4 * len <= parent.children.capacity() { - parent.children.shrink_to(2 * len); - } -} - -/// Increases the reference count of handles. -pub(crate) fn increase_handle_refcount(node: &Arc) { - let mut locked_node = node.inner.lock().unwrap(); - - // Once no handles are left over, the node gets detached from the tree. - // There should never be a new handle once all handles are dropped. - assert!(locked_node.num_handles > 0); - - locked_node.num_handles += 1; -} - -/// Decreases the reference count of handles. -/// -/// Once no handle is left, we can remove the node from the -/// tree and connect its parent directly to its children. -pub(crate) fn decrease_handle_refcount(node: &Arc) { - let num_handles = { - let mut locked_node = node.inner.lock().unwrap(); - locked_node.num_handles -= 1; - locked_node.num_handles - }; - - if num_handles == 0 { - with_locked_node_and_parent(node, |mut node, parent| { - // Remove the node from the tree - match parent { - Some(mut parent) => { - // As we want to remove ourselves from the tree, - // we have to move the children to the parent, so that - // they still receive the cancellation event without us. - // Moving them does not violate invariant #1. - move_children_to_parent(&mut node, &mut parent); - - // Remove the node from the parent - remove_child(&mut parent, node); - } - None => { - // Due to invariant #1, we can assume that our - // children can no longer be cancelled through us. - // (as we now have neither a parent nor handles) - // Therefore we can disconnect them. - disconnect_children(&mut node); - } - } - }); - } -} - -/// Cancels a node and its children. -pub(crate) fn cancel(node: &Arc) { - let mut locked_node = node.inner.lock().unwrap(); - - if locked_node.is_cancelled { - return; - } - - // One by one, adopt grandchildren and then cancel and detach the child - while let Some(child) = locked_node.children.pop() { - // This can't deadlock because the mutex we are already - // holding is the parent of child. - let mut locked_child = child.inner.lock().unwrap(); - - // Detach the child from node - // No need to modify node.children, as the child already got removed with `.pop` - locked_child.parent = None; - locked_child.parent_idx = 0; - - // If child is already cancelled, detaching is enough - if locked_child.is_cancelled { - continue; - } - - // Cancel or adopt grandchildren - while let Some(grandchild) = locked_child.children.pop() { - // This can't deadlock because the two mutexes we are already - // holding is the parent and grandparent of grandchild. - let mut locked_grandchild = grandchild.inner.lock().unwrap(); - - // Detach the grandchild - locked_grandchild.parent = None; - locked_grandchild.parent_idx = 0; - - // If grandchild is already cancelled, detaching is enough - if locked_grandchild.is_cancelled { - continue; - } - - // For performance reasons, only adopt grandchildren that have children. - // Otherwise, just cancel them right away, no need for another iteration. - if locked_grandchild.children.is_empty() { - // Cancel the grandchild - locked_grandchild.is_cancelled = true; - locked_grandchild.children = Vec::new(); - drop(locked_grandchild); - grandchild.waker.notify_waiters(); - } else { - // Otherwise, adopt grandchild - locked_grandchild.parent = Some(node.clone()); - locked_grandchild.parent_idx = locked_node.children.len(); - drop(locked_grandchild); - locked_node.children.push(grandchild); - } - } - - // Cancel the child - locked_child.is_cancelled = true; - locked_child.children = Vec::new(); - drop(locked_child); - child.waker.notify_waiters(); - - // Now the child is cancelled and detached and all its children are adopted. - // Just continue until all (including adopted) children are cancelled and detached. - } - - // Cancel the node itself. - locked_node.is_cancelled = true; - locked_node.children = Vec::new(); - drop(locked_node); - node.waker.notify_waiters(); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/cancellation_token.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/cancellation_token.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/cancellation_token.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/cancellation_token.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,341 +0,0 @@ -//! An asynchronously awaitable `CancellationToken`. -//! The token allows to signal a cancellation request to one or more tasks. -pub(crate) mod guard; -mod tree_node; - -use crate::loom::sync::Arc; -use crate::util::MaybeDangling; -use core::future::Future; -use core::pin::Pin; -use core::task::{Context, Poll}; - -use guard::DropGuard; -use pin_project_lite::pin_project; - -/// A token which can be used to signal a cancellation request to one or more -/// tasks. -/// -/// Tasks can call [`CancellationToken::cancelled()`] in order to -/// obtain a Future which will be resolved when cancellation is requested. -/// -/// Cancellation can be requested through the [`CancellationToken::cancel`] method. -/// -/// # Examples -/// -/// ```no_run -/// use tokio::select; -/// use tokio_util::sync::CancellationToken; -/// -/// #[tokio::main] -/// async fn main() { -/// let token = CancellationToken::new(); -/// let cloned_token = token.clone(); -/// -/// let join_handle = tokio::spawn(async move { -/// // Wait for either cancellation or a very long time -/// select! { -/// _ = cloned_token.cancelled() => { -/// // The token was cancelled -/// 5 -/// } -/// _ = tokio::time::sleep(std::time::Duration::from_secs(9999)) => { -/// 99 -/// } -/// } -/// }); -/// -/// tokio::spawn(async move { -/// tokio::time::sleep(std::time::Duration::from_millis(10)).await; -/// token.cancel(); -/// }); -/// -/// assert_eq!(5, join_handle.await.unwrap()); -/// } -/// ``` -pub struct CancellationToken { - inner: Arc, -} - -impl std::panic::UnwindSafe for CancellationToken {} -impl std::panic::RefUnwindSafe for CancellationToken {} - -pin_project! { - /// A Future that is resolved once the corresponding [`CancellationToken`] - /// is cancelled. - #[must_use = "futures do nothing unless polled"] - pub struct WaitForCancellationFuture<'a> { - cancellation_token: &'a CancellationToken, - #[pin] - future: tokio::sync::futures::Notified<'a>, - } -} - -pin_project! { - /// A Future that is resolved once the corresponding [`CancellationToken`] - /// is cancelled. - /// - /// This is the counterpart to [`WaitForCancellationFuture`] that takes - /// [`CancellationToken`] by value instead of using a reference. - #[must_use = "futures do nothing unless polled"] - pub struct WaitForCancellationFutureOwned { - // This field internally has a reference to the cancellation token, but camouflages - // the relationship with `'static`. To avoid Undefined Behavior, we must ensure - // that the reference is only used while the cancellation token is still alive. To - // do that, we ensure that the future is the first field, so that it is dropped - // before the cancellation token. - // - // We use `MaybeDanglingFuture` here because without it, the compiler could assert - // the reference inside `future` to be valid even after the destructor of that - // field runs. (Specifically, when the `WaitForCancellationFutureOwned` is passed - // as an argument to a function, the reference can be asserted to be valid for the - // rest of that function.) To avoid that, we use `MaybeDangling` which tells the - // compiler that the reference stored inside it might not be valid. - // - // See - // for more info. - #[pin] - future: MaybeDangling>, - cancellation_token: CancellationToken, - } -} - -// ===== impl CancellationToken ===== - -impl core::fmt::Debug for CancellationToken { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("CancellationToken") - .field("is_cancelled", &self.is_cancelled()) - .finish() - } -} - -impl Clone for CancellationToken { - /// Creates a clone of the `CancellationToken` which will get cancelled - /// whenever the current token gets cancelled, and vice versa. - fn clone(&self) -> Self { - tree_node::increase_handle_refcount(&self.inner); - CancellationToken { - inner: self.inner.clone(), - } - } -} - -impl Drop for CancellationToken { - fn drop(&mut self) { - tree_node::decrease_handle_refcount(&self.inner); - } -} - -impl Default for CancellationToken { - fn default() -> CancellationToken { - CancellationToken::new() - } -} - -impl CancellationToken { - /// Creates a new `CancellationToken` in the non-cancelled state. - pub fn new() -> CancellationToken { - CancellationToken { - inner: Arc::new(tree_node::TreeNode::new()), - } - } - - /// Creates a `CancellationToken` which will get cancelled whenever the - /// current token gets cancelled. Unlike a cloned `CancellationToken`, - /// cancelling a child token does not cancel the parent token. - /// - /// If the current token is already cancelled, the child token will get - /// returned in cancelled state. - /// - /// # Examples - /// - /// ```no_run - /// use tokio::select; - /// use tokio_util::sync::CancellationToken; - /// - /// #[tokio::main] - /// async fn main() { - /// let token = CancellationToken::new(); - /// let child_token = token.child_token(); - /// - /// let join_handle = tokio::spawn(async move { - /// // Wait for either cancellation or a very long time - /// select! { - /// _ = child_token.cancelled() => { - /// // The token was cancelled - /// 5 - /// } - /// _ = tokio::time::sleep(std::time::Duration::from_secs(9999)) => { - /// 99 - /// } - /// } - /// }); - /// - /// tokio::spawn(async move { - /// tokio::time::sleep(std::time::Duration::from_millis(10)).await; - /// token.cancel(); - /// }); - /// - /// assert_eq!(5, join_handle.await.unwrap()); - /// } - /// ``` - pub fn child_token(&self) -> CancellationToken { - CancellationToken { - inner: tree_node::child_node(&self.inner), - } - } - - /// Cancel the [`CancellationToken`] and all child tokens which had been - /// derived from it. - /// - /// This will wake up all tasks which are waiting for cancellation. - /// - /// Be aware that cancellation is not an atomic operation. It is possible - /// for another thread running in parallel with a call to `cancel` to first - /// receive `true` from `is_cancelled` on one child node, and then receive - /// `false` from `is_cancelled` on another child node. However, once the - /// call to `cancel` returns, all child nodes have been fully cancelled. - pub fn cancel(&self) { - tree_node::cancel(&self.inner); - } - - /// Returns `true` if the `CancellationToken` is cancelled. - pub fn is_cancelled(&self) -> bool { - tree_node::is_cancelled(&self.inner) - } - - /// Returns a `Future` that gets fulfilled when cancellation is requested. - /// - /// The future will complete immediately if the token is already cancelled - /// when this method is called. - /// - /// # Cancel safety - /// - /// This method is cancel safe. - pub fn cancelled(&self) -> WaitForCancellationFuture<'_> { - WaitForCancellationFuture { - cancellation_token: self, - future: self.inner.notified(), - } - } - - /// Returns a `Future` that gets fulfilled when cancellation is requested. - /// - /// The future will complete immediately if the token is already cancelled - /// when this method is called. - /// - /// The function takes self by value and returns a future that owns the - /// token. - /// - /// # Cancel safety - /// - /// This method is cancel safe. - pub fn cancelled_owned(self) -> WaitForCancellationFutureOwned { - WaitForCancellationFutureOwned::new(self) - } - - /// Creates a `DropGuard` for this token. - /// - /// Returned guard will cancel this token (and all its children) on drop - /// unless disarmed. - pub fn drop_guard(self) -> DropGuard { - DropGuard { inner: Some(self) } - } -} - -// ===== impl WaitForCancellationFuture ===== - -impl<'a> core::fmt::Debug for WaitForCancellationFuture<'a> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("WaitForCancellationFuture").finish() - } -} - -impl<'a> Future for WaitForCancellationFuture<'a> { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - let mut this = self.project(); - loop { - if this.cancellation_token.is_cancelled() { - return Poll::Ready(()); - } - - // No wakeups can be lost here because there is always a call to - // `is_cancelled` between the creation of the future and the call to - // `poll`, and the code that sets the cancelled flag does so before - // waking the `Notified`. - if this.future.as_mut().poll(cx).is_pending() { - return Poll::Pending; - } - - this.future.set(this.cancellation_token.inner.notified()); - } - } -} - -// ===== impl WaitForCancellationFutureOwned ===== - -impl core::fmt::Debug for WaitForCancellationFutureOwned { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("WaitForCancellationFutureOwned").finish() - } -} - -impl WaitForCancellationFutureOwned { - fn new(cancellation_token: CancellationToken) -> Self { - WaitForCancellationFutureOwned { - // cancellation_token holds a heap allocation and is guaranteed to have a - // stable deref, thus it would be ok to move the cancellation_token while - // the future holds a reference to it. - // - // # Safety - // - // cancellation_token is dropped after future due to the field ordering. - future: MaybeDangling::new(unsafe { Self::new_future(&cancellation_token) }), - cancellation_token, - } - } - - /// # Safety - /// The returned future must be destroyed before the cancellation token is - /// destroyed. - unsafe fn new_future( - cancellation_token: &CancellationToken, - ) -> tokio::sync::futures::Notified<'static> { - let inner_ptr = Arc::as_ptr(&cancellation_token.inner); - // SAFETY: The `Arc::as_ptr` method guarantees that `inner_ptr` remains - // valid until the strong count of the Arc drops to zero, and the caller - // guarantees that they will drop the future before that happens. - (*inner_ptr).notified() - } -} - -impl Future for WaitForCancellationFutureOwned { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - let mut this = self.project(); - - loop { - if this.cancellation_token.is_cancelled() { - return Poll::Ready(()); - } - - // No wakeups can be lost here because there is always a call to - // `is_cancelled` between the creation of the future and the call to - // `poll`, and the code that sets the cancelled flag does so before - // waking the `Notified`. - if this.future.as_mut().poll(cx).is_pending() { - return Poll::Pending; - } - - // # Safety - // - // cancellation_token is dropped after future due to the field ordering. - this.future.set(MaybeDangling::new(unsafe { - Self::new_future(this.cancellation_token) - })); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/mod.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -//! Synchronization primitives - -mod cancellation_token; -pub use cancellation_token::{ - guard::DropGuard, CancellationToken, WaitForCancellationFuture, WaitForCancellationFutureOwned, -}; - -mod mpsc; -pub use mpsc::{PollSendError, PollSender}; - -mod poll_semaphore; -pub use poll_semaphore::PollSemaphore; - -mod reusable_box; -pub use reusable_box::ReusableBoxFuture; diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/mpsc.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/mpsc.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/mpsc.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/mpsc.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,325 +0,0 @@ -use futures_sink::Sink; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{fmt, mem}; -use tokio::sync::mpsc::OwnedPermit; -use tokio::sync::mpsc::Sender; - -use super::ReusableBoxFuture; - -/// Error returned by the `PollSender` when the channel is closed. -#[derive(Debug)] -pub struct PollSendError(Option); - -impl PollSendError { - /// Consumes the stored value, if any. - /// - /// If this error was encountered when calling `start_send`/`send_item`, this will be the item - /// that the caller attempted to send. Otherwise, it will be `None`. - pub fn into_inner(self) -> Option { - self.0 - } -} - -impl fmt::Display for PollSendError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "channel closed") - } -} - -impl std::error::Error for PollSendError {} - -#[derive(Debug)] -enum State { - Idle(Sender), - Acquiring, - ReadyToSend(OwnedPermit), - Closed, -} - -/// A wrapper around [`mpsc::Sender`] that can be polled. -/// -/// [`mpsc::Sender`]: tokio::sync::mpsc::Sender -#[derive(Debug)] -pub struct PollSender { - sender: Option>, - state: State, - acquire: PollSenderFuture, -} - -// Creates a future for acquiring a permit from the underlying channel. This is used to ensure -// there's capacity for a send to complete. -// -// By reusing the same async fn for both `Some` and `None`, we make sure every future passed to -// ReusableBoxFuture has the same underlying type, and hence the same size and alignment. -async fn make_acquire_future( - data: Option>, -) -> Result, PollSendError> { - match data { - Some(sender) => sender - .reserve_owned() - .await - .map_err(|_| PollSendError(None)), - None => unreachable!("this future should not be pollable in this state"), - } -} - -type InnerFuture<'a, T> = ReusableBoxFuture<'a, Result, PollSendError>>; - -#[derive(Debug)] -// TODO: This should be replace with a type_alias_impl_trait to eliminate `'static` and all the transmutes -struct PollSenderFuture(InnerFuture<'static, T>); - -impl PollSenderFuture { - /// Create with an empty inner future with no `Send` bound. - fn empty() -> Self { - // We don't use `make_acquire_future` here because our relaxed bounds on `T` are not - // compatible with the transitive bounds required by `Sender`. - Self(ReusableBoxFuture::new(async { unreachable!() })) - } -} - -impl PollSenderFuture { - /// Create with an empty inner future. - fn new() -> Self { - let v = InnerFuture::new(make_acquire_future(None)); - // This is safe because `make_acquire_future(None)` is actually `'static` - Self(unsafe { mem::transmute::, InnerFuture<'static, T>>(v) }) - } - - /// Poll the inner future. - fn poll(&mut self, cx: &mut Context<'_>) -> Poll, PollSendError>> { - self.0.poll(cx) - } - - /// Replace the inner future. - fn set(&mut self, sender: Option>) { - let inner: *mut InnerFuture<'static, T> = &mut self.0; - let inner: *mut InnerFuture<'_, T> = inner.cast(); - // SAFETY: The `make_acquire_future(sender)` future must not exist after the type `T` - // becomes invalid, and this casts away the type-level lifetime check for that. However, the - // inner future is never moved out of this `PollSenderFuture`, so the future will not - // live longer than the `PollSenderFuture` lives. A `PollSenderFuture` is guaranteed - // to not exist after the type `T` becomes invalid, because it is annotated with a `T`, so - // this is ok. - let inner = unsafe { &mut *inner }; - inner.set(make_acquire_future(sender)); - } -} - -impl PollSender { - /// Creates a new `PollSender`. - pub fn new(sender: Sender) -> Self { - Self { - sender: Some(sender.clone()), - state: State::Idle(sender), - acquire: PollSenderFuture::new(), - } - } - - fn take_state(&mut self) -> State { - mem::replace(&mut self.state, State::Closed) - } - - /// Attempts to prepare the sender to receive a value. - /// - /// This method must be called and return `Poll::Ready(Ok(()))` prior to each call to - /// `send_item`. - /// - /// This method returns `Poll::Ready` once the underlying channel is ready to receive a value, - /// by reserving a slot in the channel for the item to be sent. If this method returns - /// `Poll::Pending`, the current task is registered to be notified (via - /// `cx.waker().wake_by_ref()`) when `poll_reserve` should be called again. - /// - /// # Errors - /// - /// If the channel is closed, an error will be returned. This is a permanent state. - pub fn poll_reserve(&mut self, cx: &mut Context<'_>) -> Poll>> { - loop { - let (result, next_state) = match self.take_state() { - State::Idle(sender) => { - // Start trying to acquire a permit to reserve a slot for our send, and - // immediately loop back around to poll it the first time. - self.acquire.set(Some(sender)); - (None, State::Acquiring) - } - State::Acquiring => match self.acquire.poll(cx) { - // Channel has capacity. - Poll::Ready(Ok(permit)) => { - (Some(Poll::Ready(Ok(()))), State::ReadyToSend(permit)) - } - // Channel is closed. - Poll::Ready(Err(e)) => (Some(Poll::Ready(Err(e))), State::Closed), - // Channel doesn't have capacity yet, so we need to wait. - Poll::Pending => (Some(Poll::Pending), State::Acquiring), - }, - // We're closed, either by choice or because the underlying sender was closed. - s @ State::Closed => (Some(Poll::Ready(Err(PollSendError(None)))), s), - // We're already ready to send an item. - s @ State::ReadyToSend(_) => (Some(Poll::Ready(Ok(()))), s), - }; - - self.state = next_state; - if let Some(result) = result { - return result; - } - } - } - - /// Sends an item to the channel. - /// - /// Before calling `send_item`, `poll_reserve` must be called with a successful return - /// value of `Poll::Ready(Ok(()))`. - /// - /// # Errors - /// - /// If the channel is closed, an error will be returned. This is a permanent state. - /// - /// # Panics - /// - /// If `poll_reserve` was not successfully called prior to calling `send_item`, then this method - /// will panic. - #[track_caller] - pub fn send_item(&mut self, value: T) -> Result<(), PollSendError> { - let (result, next_state) = match self.take_state() { - State::Idle(_) | State::Acquiring => { - panic!("`send_item` called without first calling `poll_reserve`") - } - // We have a permit to send our item, so go ahead, which gets us our sender back. - State::ReadyToSend(permit) => (Ok(()), State::Idle(permit.send(value))), - // We're closed, either by choice or because the underlying sender was closed. - State::Closed => (Err(PollSendError(Some(value))), State::Closed), - }; - - // Handle deferred closing if `close` was called between `poll_reserve` and `send_item`. - self.state = if self.sender.is_some() { - next_state - } else { - State::Closed - }; - result - } - - /// Checks whether this sender is been closed. - /// - /// The underlying channel that this sender was wrapping may still be open. - pub fn is_closed(&self) -> bool { - matches!(self.state, State::Closed) || self.sender.is_none() - } - - /// Gets a reference to the `Sender` of the underlying channel. - /// - /// If `PollSender` has been closed, `None` is returned. The underlying channel that this sender - /// was wrapping may still be open. - pub fn get_ref(&self) -> Option<&Sender> { - self.sender.as_ref() - } - - /// Closes this sender. - /// - /// No more messages will be able to be sent from this sender, but the underlying channel will - /// remain open until all senders have dropped, or until the [`Receiver`] closes the channel. - /// - /// If a slot was previously reserved by calling `poll_reserve`, then a final call can be made - /// to `send_item` in order to consume the reserved slot. After that, no further sends will be - /// possible. If you do not intend to send another item, you can release the reserved slot back - /// to the underlying sender by calling [`abort_send`]. - /// - /// [`abort_send`]: crate::sync::PollSender::abort_send - /// [`Receiver`]: tokio::sync::mpsc::Receiver - pub fn close(&mut self) { - // Mark ourselves officially closed by dropping our main sender. - self.sender = None; - - // If we're already idle, closed, or we haven't yet reserved a slot, we can quickly - // transition to the closed state. Otherwise, leave the existing permit in place for the - // caller if they want to complete the send. - match self.state { - State::Idle(_) => self.state = State::Closed, - State::Acquiring => { - self.acquire.set(None); - self.state = State::Closed; - } - _ => {} - } - } - - /// Aborts the current in-progress send, if any. - /// - /// Returns `true` if a send was aborted. If the sender was closed prior to calling - /// `abort_send`, then the sender will remain in the closed state, otherwise the sender will be - /// ready to attempt another send. - pub fn abort_send(&mut self) -> bool { - // We may have been closed in the meantime, after a call to `poll_reserve` already - // succeeded. We'll check if `self.sender` is `None` to see if we should transition to the - // closed state when we actually abort a send, rather than resetting ourselves back to idle. - - let (result, next_state) = match self.take_state() { - // We're currently trying to reserve a slot to send into. - State::Acquiring => { - // Replacing the future drops the in-flight one. - self.acquire.set(None); - - // If we haven't closed yet, we have to clone our stored sender since we have no way - // to get it back from the acquire future we just dropped. - let state = match self.sender.clone() { - Some(sender) => State::Idle(sender), - None => State::Closed, - }; - (true, state) - } - // We got the permit. If we haven't closed yet, get the sender back. - State::ReadyToSend(permit) => { - let state = if self.sender.is_some() { - State::Idle(permit.release()) - } else { - State::Closed - }; - (true, state) - } - s => (false, s), - }; - - self.state = next_state; - result - } -} - -impl Clone for PollSender { - /// Clones this `PollSender`. - /// - /// The resulting `PollSender` will have an initial state identical to calling `PollSender::new`. - fn clone(&self) -> PollSender { - let (sender, state) = match self.sender.clone() { - Some(sender) => (Some(sender.clone()), State::Idle(sender)), - None => (None, State::Closed), - }; - - Self { - sender, - state, - acquire: PollSenderFuture::empty(), - } - } -} - -impl Sink for PollSender { - type Error = PollSendError; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::into_inner(self).poll_reserve(cx) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { - Pin::into_inner(self).send_item(item) - } - - fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Pin::into_inner(self).close(); - Poll::Ready(Ok(())) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/poll_semaphore.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/poll_semaphore.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/poll_semaphore.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/poll_semaphore.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,171 +0,0 @@ -use futures_core::{ready, Stream}; -use std::fmt; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use tokio::sync::{AcquireError, OwnedSemaphorePermit, Semaphore, TryAcquireError}; - -use super::ReusableBoxFuture; - -/// A wrapper around [`Semaphore`] that provides a `poll_acquire` method. -/// -/// [`Semaphore`]: tokio::sync::Semaphore -pub struct PollSemaphore { - semaphore: Arc, - permit_fut: Option<( - u32, // The number of permits requested. - ReusableBoxFuture<'static, Result>, - )>, -} - -impl PollSemaphore { - /// Create a new `PollSemaphore`. - pub fn new(semaphore: Arc) -> Self { - Self { - semaphore, - permit_fut: None, - } - } - - /// Closes the semaphore. - pub fn close(&self) { - self.semaphore.close(); - } - - /// Obtain a clone of the inner semaphore. - pub fn clone_inner(&self) -> Arc { - self.semaphore.clone() - } - - /// Get back the inner semaphore. - pub fn into_inner(self) -> Arc { - self.semaphore - } - - /// Poll to acquire a permit from the semaphore. - /// - /// This can return the following values: - /// - /// - `Poll::Pending` if a permit is not currently available. - /// - `Poll::Ready(Some(permit))` if a permit was acquired. - /// - `Poll::Ready(None)` if the semaphore has been closed. - /// - /// When this method returns `Poll::Pending`, the current task is scheduled - /// to receive a wakeup when a permit becomes available, or when the - /// semaphore is closed. Note that on multiple calls to `poll_acquire`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. - pub fn poll_acquire(&mut self, cx: &mut Context<'_>) -> Poll> { - self.poll_acquire_many(cx, 1) - } - - /// Poll to acquire many permits from the semaphore. - /// - /// This can return the following values: - /// - /// - `Poll::Pending` if a permit is not currently available. - /// - `Poll::Ready(Some(permit))` if a permit was acquired. - /// - `Poll::Ready(None)` if the semaphore has been closed. - /// - /// When this method returns `Poll::Pending`, the current task is scheduled - /// to receive a wakeup when the permits become available, or when the - /// semaphore is closed. Note that on multiple calls to `poll_acquire`, only - /// the `Waker` from the `Context` passed to the most recent call is - /// scheduled to receive a wakeup. - pub fn poll_acquire_many( - &mut self, - cx: &mut Context<'_>, - permits: u32, - ) -> Poll> { - let permit_future = match self.permit_fut.as_mut() { - Some((prev_permits, fut)) if *prev_permits == permits => fut, - Some((old_permits, fut_box)) => { - // We're requesting a different number of permits, so replace the future - // and record the new amount. - let fut = Arc::clone(&self.semaphore).acquire_many_owned(permits); - fut_box.set(fut); - *old_permits = permits; - fut_box - } - None => { - // avoid allocations completely if we can grab a permit immediately - match Arc::clone(&self.semaphore).try_acquire_many_owned(permits) { - Ok(permit) => return Poll::Ready(Some(permit)), - Err(TryAcquireError::Closed) => return Poll::Ready(None), - Err(TryAcquireError::NoPermits) => {} - } - - let next_fut = Arc::clone(&self.semaphore).acquire_many_owned(permits); - &mut self - .permit_fut - .get_or_insert((permits, ReusableBoxFuture::new(next_fut))) - .1 - } - }; - - let result = ready!(permit_future.poll(cx)); - - // Assume we'll request the same amount of permits in a subsequent call. - let next_fut = Arc::clone(&self.semaphore).acquire_many_owned(permits); - permit_future.set(next_fut); - - match result { - Ok(permit) => Poll::Ready(Some(permit)), - Err(_closed) => { - self.permit_fut = None; - Poll::Ready(None) - } - } - } - - /// Returns the current number of available permits. - /// - /// This is equivalent to the [`Semaphore::available_permits`] method on the - /// `tokio::sync::Semaphore` type. - /// - /// [`Semaphore::available_permits`]: tokio::sync::Semaphore::available_permits - pub fn available_permits(&self) -> usize { - self.semaphore.available_permits() - } - - /// Adds `n` new permits to the semaphore. - /// - /// The maximum number of permits is [`Semaphore::MAX_PERMITS`], and this function - /// will panic if the limit is exceeded. - /// - /// This is equivalent to the [`Semaphore::add_permits`] method on the - /// `tokio::sync::Semaphore` type. - /// - /// [`Semaphore::add_permits`]: tokio::sync::Semaphore::add_permits - pub fn add_permits(&self, n: usize) { - self.semaphore.add_permits(n); - } -} - -impl Stream for PollSemaphore { - type Item = OwnedSemaphorePermit; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::into_inner(self).poll_acquire(cx) - } -} - -impl Clone for PollSemaphore { - fn clone(&self) -> PollSemaphore { - PollSemaphore::new(self.clone_inner()) - } -} - -impl fmt::Debug for PollSemaphore { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PollSemaphore") - .field("semaphore", &self.semaphore) - .finish() - } -} - -impl AsRef for PollSemaphore { - fn as_ref(&self) -> &Semaphore { - &self.semaphore - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/reusable_box.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/reusable_box.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/reusable_box.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/reusable_box.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,157 +0,0 @@ -use std::alloc::Layout; -use std::fmt; -use std::future::{self, Future}; -use std::mem::{self, ManuallyDrop}; -use std::pin::Pin; -use std::ptr; -use std::task::{Context, Poll}; - -/// A reusable `Pin + Send + 'a>>`. -/// -/// This type lets you replace the future stored in the box without -/// reallocating when the size and alignment permits this. -pub struct ReusableBoxFuture<'a, T> { - boxed: Pin + Send + 'a>>, -} - -impl<'a, T> ReusableBoxFuture<'a, T> { - /// Create a new `ReusableBoxFuture` containing the provided future. - pub fn new(future: F) -> Self - where - F: Future + Send + 'a, - { - Self { - boxed: Box::pin(future), - } - } - - /// Replace the future currently stored in this box. - /// - /// This reallocates if and only if the layout of the provided future is - /// different from the layout of the currently stored future. - pub fn set(&mut self, future: F) - where - F: Future + Send + 'a, - { - if let Err(future) = self.try_set(future) { - *self = Self::new(future); - } - } - - /// Replace the future currently stored in this box. - /// - /// This function never reallocates, but returns an error if the provided - /// future has a different size or alignment from the currently stored - /// future. - pub fn try_set(&mut self, future: F) -> Result<(), F> - where - F: Future + Send + 'a, - { - // If we try to inline the contents of this function, the type checker complains because - // the bound `T: 'a` is not satisfied in the call to `pending()`. But by putting it in an - // inner function that doesn't have `T` as a generic parameter, we implicitly get the bound - // `F::Output: 'a` transitively through `F: 'a`, allowing us to call `pending()`. - #[inline(always)] - fn real_try_set<'a, F>( - this: &mut ReusableBoxFuture<'a, F::Output>, - future: F, - ) -> Result<(), F> - where - F: Future + Send + 'a, - { - // future::Pending is a ZST so this never allocates. - let boxed = mem::replace(&mut this.boxed, Box::pin(future::pending())); - reuse_pin_box(boxed, future, |boxed| this.boxed = Pin::from(boxed)) - } - - real_try_set(self, future) - } - - /// Get a pinned reference to the underlying future. - pub fn get_pin(&mut self) -> Pin<&mut (dyn Future + Send)> { - self.boxed.as_mut() - } - - /// Poll the future stored inside this box. - pub fn poll(&mut self, cx: &mut Context<'_>) -> Poll { - self.get_pin().poll(cx) - } -} - -impl Future for ReusableBoxFuture<'_, T> { - type Output = T; - - /// Poll the future stored inside this box. - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::into_inner(self).get_pin().poll(cx) - } -} - -// The only method called on self.boxed is poll, which takes &mut self, so this -// struct being Sync does not permit any invalid access to the Future, even if -// the future is not Sync. -unsafe impl Sync for ReusableBoxFuture<'_, T> {} - -impl fmt::Debug for ReusableBoxFuture<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ReusableBoxFuture").finish() - } -} - -fn reuse_pin_box(boxed: Pin>, new_value: U, callback: F) -> Result -where - F: FnOnce(Box) -> O, -{ - let layout = Layout::for_value::(&*boxed); - if layout != Layout::new::() { - return Err(new_value); - } - - // SAFETY: We don't ever construct a non-pinned reference to the old `T` from now on, and we - // always drop the `T`. - let raw: *mut T = Box::into_raw(unsafe { Pin::into_inner_unchecked(boxed) }); - - // When dropping the old value panics, we still want to call `callback` — so move the rest of - // the code into a guard type. - let guard = CallOnDrop::new(|| { - let raw: *mut U = raw.cast::(); - unsafe { raw.write(new_value) }; - - // SAFETY: - // - `T` and `U` have the same layout. - // - `raw` comes from a `Box` that uses the same allocator as this one. - // - `raw` points to a valid instance of `U` (we just wrote it in). - let boxed = unsafe { Box::from_raw(raw) }; - - callback(boxed) - }); - - // Drop the old value. - unsafe { ptr::drop_in_place(raw) }; - - // Run the rest of the code. - Ok(guard.call()) -} - -struct CallOnDrop O> { - f: ManuallyDrop, -} - -impl O> CallOnDrop { - fn new(f: F) -> Self { - let f = ManuallyDrop::new(f); - Self { f } - } - fn call(self) -> O { - let mut this = ManuallyDrop::new(self); - let f = unsafe { ManuallyDrop::take(&mut this.f) }; - f() - } -} - -impl O> Drop for CallOnDrop { - fn drop(&mut self) { - let f = unsafe { ManuallyDrop::take(&mut self.f) }; - f(); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/tests/loom_cancellation_token.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/tests/loom_cancellation_token.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/tests/loom_cancellation_token.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/tests/loom_cancellation_token.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,176 +0,0 @@ -use crate::sync::CancellationToken; - -use loom::{future::block_on, thread}; -use tokio_test::assert_ok; - -#[test] -fn cancel_token() { - loom::model(|| { - let token = CancellationToken::new(); - let token1 = token.clone(); - - let th1 = thread::spawn(move || { - block_on(async { - token1.cancelled().await; - }); - }); - - let th2 = thread::spawn(move || { - token.cancel(); - }); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - }); -} - -#[test] -fn cancel_token_owned() { - loom::model(|| { - let token = CancellationToken::new(); - let token1 = token.clone(); - - let th1 = thread::spawn(move || { - block_on(async { - token1.cancelled_owned().await; - }); - }); - - let th2 = thread::spawn(move || { - token.cancel(); - }); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - }); -} - -#[test] -fn cancel_with_child() { - loom::model(|| { - let token = CancellationToken::new(); - let token1 = token.clone(); - let token2 = token.clone(); - let child_token = token.child_token(); - - let th1 = thread::spawn(move || { - block_on(async { - token1.cancelled().await; - }); - }); - - let th2 = thread::spawn(move || { - token2.cancel(); - }); - - let th3 = thread::spawn(move || { - block_on(async { - child_token.cancelled().await; - }); - }); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - assert_ok!(th3.join()); - }); -} - -#[test] -fn drop_token_no_child() { - loom::model(|| { - let token = CancellationToken::new(); - let token1 = token.clone(); - let token2 = token.clone(); - - let th1 = thread::spawn(move || { - drop(token1); - }); - - let th2 = thread::spawn(move || { - drop(token2); - }); - - let th3 = thread::spawn(move || { - drop(token); - }); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - assert_ok!(th3.join()); - }); -} - -#[test] -fn drop_token_with_children() { - loom::model(|| { - let token1 = CancellationToken::new(); - let child_token1 = token1.child_token(); - let child_token2 = token1.child_token(); - - let th1 = thread::spawn(move || { - drop(token1); - }); - - let th2 = thread::spawn(move || { - drop(child_token1); - }); - - let th3 = thread::spawn(move || { - drop(child_token2); - }); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - assert_ok!(th3.join()); - }); -} - -#[test] -fn drop_and_cancel_token() { - loom::model(|| { - let token1 = CancellationToken::new(); - let token2 = token1.clone(); - let child_token = token1.child_token(); - - let th1 = thread::spawn(move || { - drop(token1); - }); - - let th2 = thread::spawn(move || { - token2.cancel(); - }); - - let th3 = thread::spawn(move || { - drop(child_token); - }); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - assert_ok!(th3.join()); - }); -} - -#[test] -fn cancel_parent_and_child() { - loom::model(|| { - let token1 = CancellationToken::new(); - let token2 = token1.clone(); - let child_token = token1.child_token(); - - let th1 = thread::spawn(move || { - drop(token1); - }); - - let th2 = thread::spawn(move || { - token2.cancel(); - }); - - let th3 = thread::spawn(move || { - child_token.cancel(); - }); - - assert_ok!(th1.join()); - assert_ok!(th2.join()); - assert_ok!(th3.join()); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/tests/mod.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/tests/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/sync/tests/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/sync/tests/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ - diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/task/join_map.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/task/join_map.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/task/join_map.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/task/join_map.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,904 +0,0 @@ -use hashbrown::hash_map::RawEntryMut; -use hashbrown::HashMap; -use std::borrow::Borrow; -use std::collections::hash_map::RandomState; -use std::fmt; -use std::future::Future; -use std::hash::{BuildHasher, Hash, Hasher}; -use std::marker::PhantomData; -use tokio::runtime::Handle; -use tokio::task::{AbortHandle, Id, JoinError, JoinSet, LocalSet}; - -/// A collection of tasks spawned on a Tokio runtime, associated with hash map -/// keys. -/// -/// This type is very similar to the [`JoinSet`] type in `tokio::task`, with the -/// addition of a set of keys associated with each task. These keys allow -/// [cancelling a task][abort] or [multiple tasks][abort_matching] in the -/// `JoinMap` based on their keys, or [test whether a task corresponding to a -/// given key exists][contains] in the `JoinMap`. -/// -/// In addition, when tasks in the `JoinMap` complete, they will return the -/// associated key along with the value returned by the task, if any. -/// -/// A `JoinMap` can be used to await the completion of some or all of the tasks -/// in the map. The map is not ordered, and the tasks will be returned in the -/// order they complete. -/// -/// All of the tasks must have the same return type `V`. -/// -/// When the `JoinMap` is dropped, all tasks in the `JoinMap` are immediately aborted. -/// -/// **Note**: This type depends on Tokio's [unstable API][unstable]. See [the -/// documentation on unstable features][unstable] for details on how to enable -/// Tokio's unstable features. -/// -/// # Examples -/// -/// Spawn multiple tasks and wait for them: -/// -/// ``` -/// use tokio_util::task::JoinMap; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut map = JoinMap::new(); -/// -/// for i in 0..10 { -/// // Spawn a task on the `JoinMap` with `i` as its key. -/// map.spawn(i, async move { /* ... */ }); -/// } -/// -/// let mut seen = [false; 10]; -/// -/// // When a task completes, `join_next` returns the task's key along -/// // with its output. -/// while let Some((key, res)) = map.join_next().await { -/// seen[key] = true; -/// assert!(res.is_ok(), "task {} completed successfully!", key); -/// } -/// -/// for i in 0..10 { -/// assert!(seen[i]); -/// } -/// } -/// ``` -/// -/// Cancel tasks based on their keys: -/// -/// ``` -/// use tokio_util::task::JoinMap; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut map = JoinMap::new(); -/// -/// map.spawn("hello world", async move { /* ... */ }); -/// map.spawn("goodbye world", async move { /* ... */}); -/// -/// // Look up the "goodbye world" task in the map and abort it. -/// let aborted = map.abort("goodbye world"); -/// -/// // `JoinMap::abort` returns `true` if a task existed for the -/// // provided key. -/// assert!(aborted); -/// -/// while let Some((key, res)) = map.join_next().await { -/// if key == "goodbye world" { -/// // The aborted task should complete with a cancelled `JoinError`. -/// assert!(res.unwrap_err().is_cancelled()); -/// } else { -/// // Other tasks should complete normally. -/// assert!(res.is_ok()); -/// } -/// } -/// } -/// ``` -/// -/// [`JoinSet`]: tokio::task::JoinSet -/// [unstable]: tokio#unstable-features -/// [abort]: fn@Self::abort -/// [abort_matching]: fn@Self::abort_matching -/// [contains]: fn@Self::contains_key -#[cfg_attr(docsrs, doc(cfg(all(feature = "rt", tokio_unstable))))] -pub struct JoinMap { - /// A map of the [`AbortHandle`]s of the tasks spawned on this `JoinMap`, - /// indexed by their keys and task IDs. - /// - /// The [`Key`] type contains both the task's `K`-typed key provided when - /// spawning tasks, and the task's IDs. The IDs are stored here to resolve - /// hash collisions when looking up tasks based on their pre-computed hash - /// (as stored in the `hashes_by_task` map). - tasks_by_key: HashMap, AbortHandle, S>, - - /// A map from task IDs to the hash of the key associated with that task. - /// - /// This map is used to perform reverse lookups of tasks in the - /// `tasks_by_key` map based on their task IDs. When a task terminates, the - /// ID is provided to us by the `JoinSet`, so we can look up the hash value - /// of that task's key, and then remove it from the `tasks_by_key` map using - /// the raw hash code, resolving collisions by comparing task IDs. - hashes_by_task: HashMap, - - /// The [`JoinSet`] that awaits the completion of tasks spawned on this - /// `JoinMap`. - tasks: JoinSet, -} - -/// A [`JoinMap`] key. -/// -/// This holds both a `K`-typed key (the actual key as seen by the user), _and_ -/// a task ID, so that hash collisions between `K`-typed keys can be resolved -/// using either `K`'s `Eq` impl *or* by checking the task IDs. -/// -/// This allows looking up a task using either an actual key (such as when the -/// user queries the map with a key), *or* using a task ID and a hash (such as -/// when removing completed tasks from the map). -#[derive(Debug)] -struct Key { - key: K, - id: Id, -} - -impl JoinMap { - /// Creates a new empty `JoinMap`. - /// - /// The `JoinMap` is initially created with a capacity of 0, so it will not - /// allocate until a task is first spawned on it. - /// - /// # Examples - /// - /// ``` - /// use tokio_util::task::JoinMap; - /// let map: JoinMap<&str, i32> = JoinMap::new(); - /// ``` - #[inline] - #[must_use] - pub fn new() -> Self { - Self::with_hasher(RandomState::new()) - } - - /// Creates an empty `JoinMap` with the specified capacity. - /// - /// The `JoinMap` will be able to hold at least `capacity` tasks without - /// reallocating. - /// - /// # Examples - /// - /// ``` - /// use tokio_util::task::JoinMap; - /// let map: JoinMap<&str, i32> = JoinMap::with_capacity(10); - /// ``` - #[inline] - #[must_use] - pub fn with_capacity(capacity: usize) -> Self { - JoinMap::with_capacity_and_hasher(capacity, Default::default()) - } -} - -impl JoinMap { - /// Creates an empty `JoinMap` which will use the given hash builder to hash - /// keys. - /// - /// The created map has the default initial capacity. - /// - /// Warning: `hash_builder` is normally randomly generated, and - /// is designed to allow `JoinMap` to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. - /// - /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the `JoinMap` to be useful, see its documentation for details. - #[inline] - #[must_use] - pub fn with_hasher(hash_builder: S) -> Self { - Self::with_capacity_and_hasher(0, hash_builder) - } - - /// Creates an empty `JoinMap` with the specified capacity, using `hash_builder` - /// to hash the keys. - /// - /// The `JoinMap` will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the `JoinMap` will not allocate. - /// - /// Warning: `hash_builder` is normally randomly generated, and - /// is designed to allow HashMaps to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. - /// - /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the `JoinMap`to be useful, see its documentation for details. - /// - /// # Examples - /// - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use tokio_util::task::JoinMap; - /// use std::collections::hash_map::RandomState; - /// - /// let s = RandomState::new(); - /// let mut map = JoinMap::with_capacity_and_hasher(10, s); - /// map.spawn(1, async move { "hello world!" }); - /// # } - /// ``` - #[inline] - #[must_use] - pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { - Self { - tasks_by_key: HashMap::with_capacity_and_hasher(capacity, hash_builder.clone()), - hashes_by_task: HashMap::with_capacity_and_hasher(capacity, hash_builder), - tasks: JoinSet::new(), - } - } - - /// Returns the number of tasks currently in the `JoinMap`. - pub fn len(&self) -> usize { - let len = self.tasks_by_key.len(); - debug_assert_eq!(len, self.hashes_by_task.len()); - len - } - - /// Returns whether the `JoinMap` is empty. - pub fn is_empty(&self) -> bool { - let empty = self.tasks_by_key.is_empty(); - debug_assert_eq!(empty, self.hashes_by_task.is_empty()); - empty - } - - /// Returns the number of tasks the map can hold without reallocating. - /// - /// This number is a lower bound; the `JoinMap` might be able to hold - /// more, but is guaranteed to be able to hold at least this many. - /// - /// # Examples - /// - /// ``` - /// use tokio_util::task::JoinMap; - /// - /// let map: JoinMap = JoinMap::with_capacity(100); - /// assert!(map.capacity() >= 100); - /// ``` - #[inline] - pub fn capacity(&self) -> usize { - let capacity = self.tasks_by_key.capacity(); - debug_assert_eq!(capacity, self.hashes_by_task.capacity()); - capacity - } -} - -impl JoinMap -where - K: Hash + Eq, - V: 'static, - S: BuildHasher, -{ - /// Spawn the provided task and store it in this `JoinMap` with the provided - /// key. - /// - /// If a task previously existed in the `JoinMap` for this key, that task - /// will be cancelled and replaced with the new one. The previous task will - /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will - /// *not* return a cancelled [`JoinError`] for that task. - /// - /// # Panics - /// - /// This method panics if called outside of a Tokio runtime. - /// - /// [`join_next`]: Self::join_next - #[track_caller] - pub fn spawn(&mut self, key: K, task: F) - where - F: Future, - F: Send + 'static, - V: Send, - { - let task = self.tasks.spawn(task); - self.insert(key, task) - } - - /// Spawn the provided task on the provided runtime and store it in this - /// `JoinMap` with the provided key. - /// - /// If a task previously existed in the `JoinMap` for this key, that task - /// will be cancelled and replaced with the new one. The previous task will - /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will - /// *not* return a cancelled [`JoinError`] for that task. - /// - /// [`join_next`]: Self::join_next - #[track_caller] - pub fn spawn_on(&mut self, key: K, task: F, handle: &Handle) - where - F: Future, - F: Send + 'static, - V: Send, - { - let task = self.tasks.spawn_on(task, handle); - self.insert(key, task); - } - - /// Spawn the blocking code on the blocking threadpool and store it in this `JoinMap` with the provided - /// key. - /// - /// If a task previously existed in the `JoinMap` for this key, that task - /// will be cancelled and replaced with the new one. The previous task will - /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will - /// *not* return a cancelled [`JoinError`] for that task. - /// - /// Note that blocking tasks cannot be cancelled after execution starts. - /// Replaced blocking tasks will still run to completion if the task has begun - /// to execute when it is replaced. A blocking task which is replaced before - /// it has been scheduled on a blocking worker thread will be cancelled. - /// - /// # Panics - /// - /// This method panics if called outside of a Tokio runtime. - /// - /// [`join_next`]: Self::join_next - #[track_caller] - pub fn spawn_blocking(&mut self, key: K, f: F) - where - F: FnOnce() -> V, - F: Send + 'static, - V: Send, - { - let task = self.tasks.spawn_blocking(f); - self.insert(key, task) - } - - /// Spawn the blocking code on the blocking threadpool of the provided runtime and store it in this - /// `JoinMap` with the provided key. - /// - /// If a task previously existed in the `JoinMap` for this key, that task - /// will be cancelled and replaced with the new one. The previous task will - /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will - /// *not* return a cancelled [`JoinError`] for that task. - /// - /// Note that blocking tasks cannot be cancelled after execution starts. - /// Replaced blocking tasks will still run to completion if the task has begun - /// to execute when it is replaced. A blocking task which is replaced before - /// it has been scheduled on a blocking worker thread will be cancelled. - /// - /// [`join_next`]: Self::join_next - #[track_caller] - pub fn spawn_blocking_on(&mut self, key: K, f: F, handle: &Handle) - where - F: FnOnce() -> V, - F: Send + 'static, - V: Send, - { - let task = self.tasks.spawn_blocking_on(f, handle); - self.insert(key, task); - } - - /// Spawn the provided task on the current [`LocalSet`] and store it in this - /// `JoinMap` with the provided key. - /// - /// If a task previously existed in the `JoinMap` for this key, that task - /// will be cancelled and replaced with the new one. The previous task will - /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will - /// *not* return a cancelled [`JoinError`] for that task. - /// - /// # Panics - /// - /// This method panics if it is called outside of a `LocalSet`. - /// - /// [`LocalSet`]: tokio::task::LocalSet - /// [`join_next`]: Self::join_next - #[track_caller] - pub fn spawn_local(&mut self, key: K, task: F) - where - F: Future, - F: 'static, - { - let task = self.tasks.spawn_local(task); - self.insert(key, task); - } - - /// Spawn the provided task on the provided [`LocalSet`] and store it in - /// this `JoinMap` with the provided key. - /// - /// If a task previously existed in the `JoinMap` for this key, that task - /// will be cancelled and replaced with the new one. The previous task will - /// be removed from the `JoinMap`; a subsequent call to [`join_next`] will - /// *not* return a cancelled [`JoinError`] for that task. - /// - /// [`LocalSet`]: tokio::task::LocalSet - /// [`join_next`]: Self::join_next - #[track_caller] - pub fn spawn_local_on(&mut self, key: K, task: F, local_set: &LocalSet) - where - F: Future, - F: 'static, - { - let task = self.tasks.spawn_local_on(task, local_set); - self.insert(key, task) - } - - fn insert(&mut self, key: K, abort: AbortHandle) { - let hash = self.hash(&key); - let id = abort.id(); - let map_key = Key { id, key }; - - // Insert the new key into the map of tasks by keys. - let entry = self - .tasks_by_key - .raw_entry_mut() - .from_hash(hash, |k| k.key == map_key.key); - match entry { - RawEntryMut::Occupied(mut occ) => { - // There was a previous task spawned with the same key! Cancel - // that task, and remove its ID from the map of hashes by task IDs. - let Key { id: prev_id, .. } = occ.insert_key(map_key); - occ.insert(abort).abort(); - let _prev_hash = self.hashes_by_task.remove(&prev_id); - debug_assert_eq!(Some(hash), _prev_hash); - } - RawEntryMut::Vacant(vac) => { - vac.insert(map_key, abort); - } - }; - - // Associate the key's hash with this task's ID, for looking up tasks by ID. - let _prev = self.hashes_by_task.insert(id, hash); - debug_assert!(_prev.is_none(), "no prior task should have had the same ID"); - } - - /// Waits until one of the tasks in the map completes and returns its - /// output, along with the key corresponding to that task. - /// - /// Returns `None` if the map is empty. - /// - /// # Cancel Safety - /// - /// This method is cancel safe. If `join_next` is used as the event in a [`tokio::select!`] - /// statement and some other branch completes first, it is guaranteed that no tasks were - /// removed from this `JoinMap`. - /// - /// # Returns - /// - /// This function returns: - /// - /// * `Some((key, Ok(value)))` if one of the tasks in this `JoinMap` has - /// completed. The `value` is the return value of that ask, and `key` is - /// the key associated with the task. - /// * `Some((key, Err(err))` if one of the tasks in this JoinMap` has - /// panicked or been aborted. `key` is the key associated with the task - /// that panicked or was aborted. - /// * `None` if the `JoinMap` is empty. - /// - /// [`tokio::select!`]: tokio::select - pub async fn join_next(&mut self) -> Option<(K, Result)> { - let (res, id) = match self.tasks.join_next_with_id().await { - Some(Ok((id, output))) => (Ok(output), id), - Some(Err(e)) => { - let id = e.id(); - (Err(e), id) - } - None => return None, - }; - let key = self.remove_by_id(id)?; - Some((key, res)) - } - - /// Aborts all tasks and waits for them to finish shutting down. - /// - /// Calling this method is equivalent to calling [`abort_all`] and then calling [`join_next`] in - /// a loop until it returns `None`. - /// - /// This method ignores any panics in the tasks shutting down. When this call returns, the - /// `JoinMap` will be empty. - /// - /// [`abort_all`]: fn@Self::abort_all - /// [`join_next`]: fn@Self::join_next - pub async fn shutdown(&mut self) { - self.abort_all(); - while self.join_next().await.is_some() {} - } - - /// Abort the task corresponding to the provided `key`. - /// - /// If this `JoinMap` contains a task corresponding to `key`, this method - /// will abort that task and return `true`. Otherwise, if no task exists for - /// `key`, this method returns `false`. - /// - /// # Examples - /// - /// Aborting a task by key: - /// - /// ``` - /// use tokio_util::task::JoinMap; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut map = JoinMap::new(); - /// - /// map.spawn("hello world", async move { /* ... */ }); - /// map.spawn("goodbye world", async move { /* ... */}); - /// - /// // Look up the "goodbye world" task in the map and abort it. - /// map.abort("goodbye world"); - /// - /// while let Some((key, res)) = map.join_next().await { - /// if key == "goodbye world" { - /// // The aborted task should complete with a cancelled `JoinError`. - /// assert!(res.unwrap_err().is_cancelled()); - /// } else { - /// // Other tasks should complete normally. - /// assert!(res.is_ok()); - /// } - /// } - /// # } - /// ``` - /// - /// `abort` returns `true` if a task was aborted: - /// ``` - /// use tokio_util::task::JoinMap; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut map = JoinMap::new(); - /// - /// map.spawn("hello world", async move { /* ... */ }); - /// map.spawn("goodbye world", async move { /* ... */}); - /// - /// // A task for the key "goodbye world" should exist in the map: - /// assert!(map.abort("goodbye world")); - /// - /// // Aborting a key that does not exist will return `false`: - /// assert!(!map.abort("goodbye universe")); - /// # } - /// ``` - pub fn abort(&mut self, key: &Q) -> bool - where - Q: Hash + Eq, - K: Borrow, - { - match self.get_by_key(key) { - Some((_, handle)) => { - handle.abort(); - true - } - None => false, - } - } - - /// Aborts all tasks with keys matching `predicate`. - /// - /// `predicate` is a function called with a reference to each key in the - /// map. If it returns `true` for a given key, the corresponding task will - /// be cancelled. - /// - /// # Examples - /// ``` - /// use tokio_util::task::JoinMap; - /// - /// # // use the current thread rt so that spawned tasks don't - /// # // complete in the background before they can be aborted. - /// # #[tokio::main(flavor = "current_thread")] - /// # async fn main() { - /// let mut map = JoinMap::new(); - /// - /// map.spawn("hello world", async move { - /// // ... - /// # tokio::task::yield_now().await; // don't complete immediately, get aborted! - /// }); - /// map.spawn("goodbye world", async move { - /// // ... - /// # tokio::task::yield_now().await; // don't complete immediately, get aborted! - /// }); - /// map.spawn("hello san francisco", async move { - /// // ... - /// # tokio::task::yield_now().await; // don't complete immediately, get aborted! - /// }); - /// map.spawn("goodbye universe", async move { - /// // ... - /// # tokio::task::yield_now().await; // don't complete immediately, get aborted! - /// }); - /// - /// // Abort all tasks whose keys begin with "goodbye" - /// map.abort_matching(|key| key.starts_with("goodbye")); - /// - /// let mut seen = 0; - /// while let Some((key, res)) = map.join_next().await { - /// seen += 1; - /// if key.starts_with("goodbye") { - /// // The aborted task should complete with a cancelled `JoinError`. - /// assert!(res.unwrap_err().is_cancelled()); - /// } else { - /// // Other tasks should complete normally. - /// assert!(key.starts_with("hello")); - /// assert!(res.is_ok()); - /// } - /// } - /// - /// // All spawned tasks should have completed. - /// assert_eq!(seen, 4); - /// # } - /// ``` - pub fn abort_matching(&mut self, mut predicate: impl FnMut(&K) -> bool) { - // Note: this method iterates over the tasks and keys *without* removing - // any entries, so that the keys from aborted tasks can still be - // returned when calling `join_next` in the future. - for (Key { ref key, .. }, task) in &self.tasks_by_key { - if predicate(key) { - task.abort(); - } - } - } - - /// Returns an iterator visiting all keys in this `JoinMap` in arbitrary order. - /// - /// If a task has completed, but its output hasn't yet been consumed by a - /// call to [`join_next`], this method will still return its key. - /// - /// [`join_next`]: fn@Self::join_next - pub fn keys(&self) -> JoinMapKeys<'_, K, V> { - JoinMapKeys { - iter: self.tasks_by_key.keys(), - _value: PhantomData, - } - } - - /// Returns `true` if this `JoinMap` contains a task for the provided key. - /// - /// If the task has completed, but its output hasn't yet been consumed by a - /// call to [`join_next`], this method will still return `true`. - /// - /// [`join_next`]: fn@Self::join_next - pub fn contains_key(&self, key: &Q) -> bool - where - Q: Hash + Eq, - K: Borrow, - { - self.get_by_key(key).is_some() - } - - /// Returns `true` if this `JoinMap` contains a task with the provided - /// [task ID]. - /// - /// If the task has completed, but its output hasn't yet been consumed by a - /// call to [`join_next`], this method will still return `true`. - /// - /// [`join_next`]: fn@Self::join_next - /// [task ID]: tokio::task::Id - pub fn contains_task(&self, task: &Id) -> bool { - self.get_by_id(task).is_some() - } - - /// Reserves capacity for at least `additional` more tasks to be spawned - /// on this `JoinMap` without reallocating for the map of task keys. The - /// collection may reserve more space to avoid frequent reallocations. - /// - /// Note that spawning a task will still cause an allocation for the task - /// itself. - /// - /// # Panics - /// - /// Panics if the new allocation size overflows [`usize`]. - /// - /// # Examples - /// - /// ``` - /// use tokio_util::task::JoinMap; - /// - /// let mut map: JoinMap<&str, i32> = JoinMap::new(); - /// map.reserve(10); - /// ``` - #[inline] - pub fn reserve(&mut self, additional: usize) { - self.tasks_by_key.reserve(additional); - self.hashes_by_task.reserve(additional); - } - - /// Shrinks the capacity of the `JoinMap` as much as possible. It will drop - /// down as much as possible while maintaining the internal rules - /// and possibly leaving some space in accordance with the resize policy. - /// - /// # Examples - /// - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use tokio_util::task::JoinMap; - /// - /// let mut map: JoinMap = JoinMap::with_capacity(100); - /// map.spawn(1, async move { 2 }); - /// map.spawn(3, async move { 4 }); - /// assert!(map.capacity() >= 100); - /// map.shrink_to_fit(); - /// assert!(map.capacity() >= 2); - /// # } - /// ``` - #[inline] - pub fn shrink_to_fit(&mut self) { - self.hashes_by_task.shrink_to_fit(); - self.tasks_by_key.shrink_to_fit(); - } - - /// Shrinks the capacity of the map with a lower limit. It will drop - /// down no lower than the supplied limit while maintaining the internal rules - /// and possibly leaving some space in accordance with the resize policy. - /// - /// If the current capacity is less than the lower limit, this is a no-op. - /// - /// # Examples - /// - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use tokio_util::task::JoinMap; - /// - /// let mut map: JoinMap = JoinMap::with_capacity(100); - /// map.spawn(1, async move { 2 }); - /// map.spawn(3, async move { 4 }); - /// assert!(map.capacity() >= 100); - /// map.shrink_to(10); - /// assert!(map.capacity() >= 10); - /// map.shrink_to(0); - /// assert!(map.capacity() >= 2); - /// # } - /// ``` - #[inline] - pub fn shrink_to(&mut self, min_capacity: usize) { - self.hashes_by_task.shrink_to(min_capacity); - self.tasks_by_key.shrink_to(min_capacity) - } - - /// Look up a task in the map by its key, returning the key and abort handle. - fn get_by_key<'map, Q: ?Sized>(&'map self, key: &Q) -> Option<(&'map Key, &'map AbortHandle)> - where - Q: Hash + Eq, - K: Borrow, - { - let hash = self.hash(key); - self.tasks_by_key - .raw_entry() - .from_hash(hash, |k| k.key.borrow() == key) - } - - /// Look up a task in the map by its task ID, returning the key and abort handle. - fn get_by_id<'map>(&'map self, id: &Id) -> Option<(&'map Key, &'map AbortHandle)> { - let hash = self.hashes_by_task.get(id)?; - self.tasks_by_key - .raw_entry() - .from_hash(*hash, |k| &k.id == id) - } - - /// Remove a task from the map by ID, returning the key for that task. - fn remove_by_id(&mut self, id: Id) -> Option { - // Get the hash for the given ID. - let hash = self.hashes_by_task.remove(&id)?; - - // Remove the entry for that hash. - let entry = self - .tasks_by_key - .raw_entry_mut() - .from_hash(hash, |k| k.id == id); - let (Key { id: _key_id, key }, handle) = match entry { - RawEntryMut::Occupied(entry) => entry.remove_entry(), - _ => return None, - }; - debug_assert_eq!(_key_id, id); - debug_assert_eq!(id, handle.id()); - self.hashes_by_task.remove(&id); - Some(key) - } - - /// Returns the hash for a given key. - #[inline] - fn hash(&self, key: &Q) -> u64 - where - Q: Hash, - { - let mut hasher = self.tasks_by_key.hasher().build_hasher(); - key.hash(&mut hasher); - hasher.finish() - } -} - -impl JoinMap -where - V: 'static, -{ - /// Aborts all tasks on this `JoinMap`. - /// - /// This does not remove the tasks from the `JoinMap`. To wait for the tasks to complete - /// cancellation, you should call `join_next` in a loop until the `JoinMap` is empty. - pub fn abort_all(&mut self) { - self.tasks.abort_all() - } - - /// Removes all tasks from this `JoinMap` without aborting them. - /// - /// The tasks removed by this call will continue to run in the background even if the `JoinMap` - /// is dropped. They may still be aborted by key. - pub fn detach_all(&mut self) { - self.tasks.detach_all(); - self.tasks_by_key.clear(); - self.hashes_by_task.clear(); - } -} - -// Hand-written `fmt::Debug` implementation in order to avoid requiring `V: -// Debug`, since no value is ever actually stored in the map. -impl fmt::Debug for JoinMap { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // format the task keys and abort handles a little nicer by just - // printing the key and task ID pairs, without format the `Key` struct - // itself or the `AbortHandle`, which would just format the task's ID - // again. - struct KeySet<'a, K: fmt::Debug, S>(&'a HashMap, AbortHandle, S>); - impl fmt::Debug for KeySet<'_, K, S> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_map() - .entries(self.0.keys().map(|Key { key, id }| (key, id))) - .finish() - } - } - - f.debug_struct("JoinMap") - // The `tasks_by_key` map is the only one that contains information - // that's really worth formatting for the user, since it contains - // the tasks' keys and IDs. The other fields are basically - // implementation details. - .field("tasks", &KeySet(&self.tasks_by_key)) - .finish() - } -} - -impl Default for JoinMap { - fn default() -> Self { - Self::new() - } -} - -// === impl Key === - -impl Hash for Key { - // Don't include the task ID in the hash. - #[inline] - fn hash(&self, hasher: &mut H) { - self.key.hash(hasher); - } -} - -// Because we override `Hash` for this type, we must also override the -// `PartialEq` impl, so that all instances with the same hash are equal. -impl PartialEq for Key { - #[inline] - fn eq(&self, other: &Self) -> bool { - self.key == other.key - } -} - -impl Eq for Key {} - -/// An iterator over the keys of a [`JoinMap`]. -#[derive(Debug, Clone)] -pub struct JoinMapKeys<'a, K, V> { - iter: hashbrown::hash_map::Keys<'a, Key, AbortHandle>, - /// To make it easier to change JoinMap in the future, keep V as a generic - /// parameter. - _value: PhantomData<&'a V>, -} - -impl<'a, K, V> Iterator for JoinMapKeys<'a, K, V> { - type Item = &'a K; - - fn next(&mut self) -> Option<&'a K> { - self.iter.next().map(|key| &key.key) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl<'a, K, V> ExactSizeIterator for JoinMapKeys<'a, K, V> { - fn len(&self) -> usize { - self.iter.len() - } -} - -impl<'a, K, V> std::iter::FusedIterator for JoinMapKeys<'a, K, V> {} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/task/mod.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/task/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/task/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/task/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -//! Extra utilities for spawning tasks - -#[cfg(tokio_unstable)] -mod join_map; -#[cfg(not(target_os = "wasi"))] -mod spawn_pinned; -#[cfg(not(target_os = "wasi"))] -pub use spawn_pinned::LocalPoolHandle; - -#[cfg(tokio_unstable)] -#[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = "rt"))))] -pub use join_map::{JoinMap, JoinMapKeys}; - -pub mod task_tracker; -pub use task_tracker::TaskTracker; diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/task/spawn_pinned.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/task/spawn_pinned.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/task/spawn_pinned.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/task/spawn_pinned.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,436 +0,0 @@ -use futures_util::future::{AbortHandle, Abortable}; -use std::fmt; -use std::fmt::{Debug, Formatter}; -use std::future::Future; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use tokio::runtime::Builder; -use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; -use tokio::sync::oneshot; -use tokio::task::{spawn_local, JoinHandle, LocalSet}; - -/// A cloneable handle to a local pool, used for spawning `!Send` tasks. -/// -/// Internally the local pool uses a [`tokio::task::LocalSet`] for each worker thread -/// in the pool. Consequently you can also use [`tokio::task::spawn_local`] (which will -/// execute on the same thread) inside the Future you supply to the various spawn methods -/// of `LocalPoolHandle`, -/// -/// [`tokio::task::LocalSet`]: tokio::task::LocalSet -/// [`tokio::task::spawn_local`]: tokio::task::spawn_local -/// -/// # Examples -/// -/// ``` -/// use std::rc::Rc; -/// use tokio::{self, task }; -/// use tokio_util::task::LocalPoolHandle; -/// -/// #[tokio::main(flavor = "current_thread")] -/// async fn main() { -/// let pool = LocalPoolHandle::new(5); -/// -/// let output = pool.spawn_pinned(|| { -/// // `data` is !Send + !Sync -/// let data = Rc::new("local data"); -/// let data_clone = data.clone(); -/// -/// async move { -/// task::spawn_local(async move { -/// println!("{}", data_clone); -/// }); -/// -/// data.to_string() -/// } -/// }).await.unwrap(); -/// println!("output: {}", output); -/// } -/// ``` -/// -#[derive(Clone)] -pub struct LocalPoolHandle { - pool: Arc, -} - -impl LocalPoolHandle { - /// Create a new pool of threads to handle `!Send` tasks. Spawn tasks onto this - /// pool via [`LocalPoolHandle::spawn_pinned`]. - /// - /// # Panics - /// - /// Panics if the pool size is less than one. - #[track_caller] - pub fn new(pool_size: usize) -> LocalPoolHandle { - assert!(pool_size > 0); - - let workers = (0..pool_size) - .map(|_| LocalWorkerHandle::new_worker()) - .collect(); - - let pool = Arc::new(LocalPool { workers }); - - LocalPoolHandle { pool } - } - - /// Returns the number of threads of the Pool. - #[inline] - pub fn num_threads(&self) -> usize { - self.pool.workers.len() - } - - /// Returns the number of tasks scheduled on each worker. The indices of the - /// worker threads correspond to the indices of the returned `Vec`. - pub fn get_task_loads_for_each_worker(&self) -> Vec { - self.pool - .workers - .iter() - .map(|worker| worker.task_count.load(Ordering::SeqCst)) - .collect::>() - } - - /// Spawn a task onto a worker thread and pin it there so it can't be moved - /// off of the thread. Note that the future is not [`Send`], but the - /// [`FnOnce`] which creates it is. - /// - /// # Examples - /// ``` - /// use std::rc::Rc; - /// use tokio_util::task::LocalPoolHandle; - /// - /// #[tokio::main] - /// async fn main() { - /// // Create the local pool - /// let pool = LocalPoolHandle::new(1); - /// - /// // Spawn a !Send future onto the pool and await it - /// let output = pool - /// .spawn_pinned(|| { - /// // Rc is !Send + !Sync - /// let local_data = Rc::new("test"); - /// - /// // This future holds an Rc, so it is !Send - /// async move { local_data.to_string() } - /// }) - /// .await - /// .unwrap(); - /// - /// assert_eq!(output, "test"); - /// } - /// ``` - pub fn spawn_pinned(&self, create_task: F) -> JoinHandle - where - F: FnOnce() -> Fut, - F: Send + 'static, - Fut: Future + 'static, - Fut::Output: Send + 'static, - { - self.pool - .spawn_pinned(create_task, WorkerChoice::LeastBurdened) - } - - /// Differs from `spawn_pinned` only in that you can choose a specific worker thread - /// of the pool, whereas `spawn_pinned` chooses the worker with the smallest - /// number of tasks scheduled. - /// - /// A worker thread is chosen by index. Indices are 0 based and the largest index - /// is given by `num_threads() - 1` - /// - /// # Panics - /// - /// This method panics if the index is out of bounds. - /// - /// # Examples - /// - /// This method can be used to spawn a task on all worker threads of the pool: - /// - /// ``` - /// use tokio_util::task::LocalPoolHandle; - /// - /// #[tokio::main] - /// async fn main() { - /// const NUM_WORKERS: usize = 3; - /// let pool = LocalPoolHandle::new(NUM_WORKERS); - /// let handles = (0..pool.num_threads()) - /// .map(|worker_idx| { - /// pool.spawn_pinned_by_idx( - /// || { - /// async { - /// "test" - /// } - /// }, - /// worker_idx, - /// ) - /// }) - /// .collect::>(); - /// - /// for handle in handles { - /// handle.await.unwrap(); - /// } - /// } - /// ``` - /// - #[track_caller] - pub fn spawn_pinned_by_idx(&self, create_task: F, idx: usize) -> JoinHandle - where - F: FnOnce() -> Fut, - F: Send + 'static, - Fut: Future + 'static, - Fut::Output: Send + 'static, - { - self.pool - .spawn_pinned(create_task, WorkerChoice::ByIdx(idx)) - } -} - -impl Debug for LocalPoolHandle { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.write_str("LocalPoolHandle") - } -} - -enum WorkerChoice { - LeastBurdened, - ByIdx(usize), -} - -struct LocalPool { - workers: Vec, -} - -impl LocalPool { - /// Spawn a `?Send` future onto a worker - #[track_caller] - fn spawn_pinned( - &self, - create_task: F, - worker_choice: WorkerChoice, - ) -> JoinHandle - where - F: FnOnce() -> Fut, - F: Send + 'static, - Fut: Future + 'static, - Fut::Output: Send + 'static, - { - let (sender, receiver) = oneshot::channel(); - let (worker, job_guard) = match worker_choice { - WorkerChoice::LeastBurdened => self.find_and_incr_least_burdened_worker(), - WorkerChoice::ByIdx(idx) => self.find_worker_by_idx(idx), - }; - let worker_spawner = worker.spawner.clone(); - - // Spawn a future onto the worker's runtime so we can immediately return - // a join handle. - worker.runtime_handle.spawn(async move { - // Move the job guard into the task - let _job_guard = job_guard; - - // Propagate aborts via Abortable/AbortHandle - let (abort_handle, abort_registration) = AbortHandle::new_pair(); - let _abort_guard = AbortGuard(abort_handle); - - // Inside the future we can't run spawn_local yet because we're not - // in the context of a LocalSet. We need to send create_task to the - // LocalSet task for spawning. - let spawn_task = Box::new(move || { - // Once we're in the LocalSet context we can call spawn_local - let join_handle = - spawn_local( - async move { Abortable::new(create_task(), abort_registration).await }, - ); - - // Send the join handle back to the spawner. If sending fails, - // we assume the parent task was canceled, so cancel this task - // as well. - if let Err(join_handle) = sender.send(join_handle) { - join_handle.abort() - } - }); - - // Send the callback to the LocalSet task - if let Err(e) = worker_spawner.send(spawn_task) { - // Propagate the error as a panic in the join handle. - panic!("Failed to send job to worker: {}", e); - } - - // Wait for the task's join handle - let join_handle = match receiver.await { - Ok(handle) => handle, - Err(e) => { - // We sent the task successfully, but failed to get its - // join handle... We assume something happened to the worker - // and the task was not spawned. Propagate the error as a - // panic in the join handle. - panic!("Worker failed to send join handle: {}", e); - } - }; - - // Wait for the task to complete - let join_result = join_handle.await; - - match join_result { - Ok(Ok(output)) => output, - Ok(Err(_)) => { - // Pinned task was aborted. But that only happens if this - // task is aborted. So this is an impossible branch. - unreachable!( - "Reaching this branch means this task was previously \ - aborted but it continued running anyways" - ) - } - Err(e) => { - if e.is_panic() { - std::panic::resume_unwind(e.into_panic()); - } else if e.is_cancelled() { - // No one else should have the join handle, so this is - // unexpected. Forward this error as a panic in the join - // handle. - panic!("spawn_pinned task was canceled: {}", e); - } else { - // Something unknown happened (not a panic or - // cancellation). Forward this error as a panic in the - // join handle. - panic!("spawn_pinned task failed: {}", e); - } - } - } - }) - } - - /// Find the worker with the least number of tasks, increment its task - /// count, and return its handle. Make sure to actually spawn a task on - /// the worker so the task count is kept consistent with load. - /// - /// A job count guard is also returned to ensure the task count gets - /// decremented when the job is done. - fn find_and_incr_least_burdened_worker(&self) -> (&LocalWorkerHandle, JobCountGuard) { - loop { - let (worker, task_count) = self - .workers - .iter() - .map(|worker| (worker, worker.task_count.load(Ordering::SeqCst))) - .min_by_key(|&(_, count)| count) - .expect("There must be more than one worker"); - - // Make sure the task count hasn't changed since when we choose this - // worker. Otherwise, restart the search. - if worker - .task_count - .compare_exchange( - task_count, - task_count + 1, - Ordering::SeqCst, - Ordering::Relaxed, - ) - .is_ok() - { - return (worker, JobCountGuard(Arc::clone(&worker.task_count))); - } - } - } - - #[track_caller] - fn find_worker_by_idx(&self, idx: usize) -> (&LocalWorkerHandle, JobCountGuard) { - let worker = &self.workers[idx]; - worker.task_count.fetch_add(1, Ordering::SeqCst); - - (worker, JobCountGuard(Arc::clone(&worker.task_count))) - } -} - -/// Automatically decrements a worker's job count when a job finishes (when -/// this gets dropped). -struct JobCountGuard(Arc); - -impl Drop for JobCountGuard { - fn drop(&mut self) { - // Decrement the job count - let previous_value = self.0.fetch_sub(1, Ordering::SeqCst); - debug_assert!(previous_value >= 1); - } -} - -/// Calls abort on the handle when dropped. -struct AbortGuard(AbortHandle); - -impl Drop for AbortGuard { - fn drop(&mut self) { - self.0.abort(); - } -} - -type PinnedFutureSpawner = Box; - -struct LocalWorkerHandle { - runtime_handle: tokio::runtime::Handle, - spawner: UnboundedSender, - task_count: Arc, -} - -impl LocalWorkerHandle { - /// Create a new worker for executing pinned tasks - fn new_worker() -> LocalWorkerHandle { - let (sender, receiver) = unbounded_channel(); - let runtime = Builder::new_current_thread() - .enable_all() - .build() - .expect("Failed to start a pinned worker thread runtime"); - let runtime_handle = runtime.handle().clone(); - let task_count = Arc::new(AtomicUsize::new(0)); - let task_count_clone = Arc::clone(&task_count); - - std::thread::spawn(|| Self::run(runtime, receiver, task_count_clone)); - - LocalWorkerHandle { - runtime_handle, - spawner: sender, - task_count, - } - } - - fn run( - runtime: tokio::runtime::Runtime, - mut task_receiver: UnboundedReceiver, - task_count: Arc, - ) { - let local_set = LocalSet::new(); - local_set.block_on(&runtime, async { - while let Some(spawn_task) = task_receiver.recv().await { - // Calls spawn_local(future) - (spawn_task)(); - } - }); - - // If there are any tasks on the runtime associated with a LocalSet task - // that has already completed, but whose output has not yet been - // reported, let that task complete. - // - // Since the task_count is decremented when the runtime task exits, - // reading that counter lets us know if any such tasks completed during - // the call to `block_on`. - // - // Tasks on the LocalSet can't complete during this loop since they're - // stored on the LocalSet and we aren't accessing it. - let mut previous_task_count = task_count.load(Ordering::SeqCst); - loop { - // This call will also run tasks spawned on the runtime. - runtime.block_on(tokio::task::yield_now()); - let new_task_count = task_count.load(Ordering::SeqCst); - if new_task_count == previous_task_count { - break; - } else { - previous_task_count = new_task_count; - } - } - - // It's now no longer possible for a task on the runtime to be - // associated with a LocalSet task that has completed. Drop both the - // LocalSet and runtime to let tasks on the runtime be cancelled if and - // only if they are still on the LocalSet. - // - // Drop the LocalSet task first so that anyone awaiting the runtime - // JoinHandle will see the cancelled error after the LocalSet task - // destructor has completed. - drop(local_set); - drop(runtime); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/task/task_tracker.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/task/task_tracker.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/task/task_tracker.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/task/task_tracker.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,719 +0,0 @@ -//! Types related to the [`TaskTracker`] collection. -//! -//! See the documentation of [`TaskTracker`] for more information. - -use pin_project_lite::pin_project; -use std::fmt; -use std::future::Future; -use std::pin::Pin; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::task::{Context, Poll}; -use tokio::sync::{futures::Notified, Notify}; - -#[cfg(feature = "rt")] -use tokio::{ - runtime::Handle, - task::{JoinHandle, LocalSet}, -}; - -/// A task tracker used for waiting until tasks exit. -/// -/// This is usually used together with [`CancellationToken`] to implement [graceful shutdown]. The -/// `CancellationToken` is used to signal to tasks that they should shut down, and the -/// `TaskTracker` is used to wait for them to finish shutting down. -/// -/// The `TaskTracker` will also keep track of a `closed` boolean. This is used to handle the case -/// where the `TaskTracker` is empty, but we don't want to shut down yet. This means that the -/// [`wait`] method will wait until *both* of the following happen at the same time: -/// -/// * The `TaskTracker` must be closed using the [`close`] method. -/// * The `TaskTracker` must be empty, that is, all tasks that it is tracking must have exited. -/// -/// When a call to [`wait`] returns, it is guaranteed that all tracked tasks have exited and that -/// the destructor of the future has finished running. However, there might be a short amount of -/// time where [`JoinHandle::is_finished`] returns false. -/// -/// # Comparison to `JoinSet` -/// -/// The main Tokio crate has a similar collection known as [`JoinSet`]. The `JoinSet` type has a -/// lot more features than `TaskTracker`, so `TaskTracker` should only be used when one of its -/// unique features is required: -/// -/// 1. When tasks exit, a `TaskTracker` will allow the task to immediately free its memory. -/// 2. By not closing the `TaskTracker`, [`wait`] will be prevented from from returning even if -/// the `TaskTracker` is empty. -/// 3. A `TaskTracker` does not require mutable access to insert tasks. -/// 4. A `TaskTracker` can be cloned to share it with many tasks. -/// -/// The first point is the most important one. A [`JoinSet`] keeps track of the return value of -/// every inserted task. This means that if the caller keeps inserting tasks and never calls -/// [`join_next`], then their return values will keep building up and consuming memory, _even if_ -/// most of the tasks have already exited. This can cause the process to run out of memory. With a -/// `TaskTracker`, this does not happen. Once tasks exit, they are immediately removed from the -/// `TaskTracker`. -/// -/// # Examples -/// -/// For more examples, please see the topic page on [graceful shutdown]. -/// -/// ## Spawn tasks and wait for them to exit -/// -/// This is a simple example. For this case, [`JoinSet`] should probably be used instead. -/// -/// ``` -/// use tokio_util::task::TaskTracker; -/// -/// #[tokio::main] -/// async fn main() { -/// let tracker = TaskTracker::new(); -/// -/// for i in 0..10 { -/// tracker.spawn(async move { -/// println!("Task {} is running!", i); -/// }); -/// } -/// // Once we spawned everything, we close the tracker. -/// tracker.close(); -/// -/// // Wait for everything to finish. -/// tracker.wait().await; -/// -/// println!("This is printed after all of the tasks."); -/// } -/// ``` -/// -/// ## Wait for tasks to exit -/// -/// This example shows the intended use-case of `TaskTracker`. It is used together with -/// [`CancellationToken`] to implement graceful shutdown. -/// ``` -/// use tokio_util::sync::CancellationToken; -/// use tokio_util::task::TaskTracker; -/// use tokio::time::{self, Duration}; -/// -/// async fn background_task(num: u64) { -/// for i in 0..10 { -/// time::sleep(Duration::from_millis(100*num)).await; -/// println!("Background task {} in iteration {}.", num, i); -/// } -/// } -/// -/// #[tokio::main] -/// # async fn _hidden() {} -/// # #[tokio::main(flavor = "current_thread", start_paused = true)] -/// async fn main() { -/// let tracker = TaskTracker::new(); -/// let token = CancellationToken::new(); -/// -/// for i in 0..10 { -/// let token = token.clone(); -/// tracker.spawn(async move { -/// // Use a `tokio::select!` to kill the background task if the token is -/// // cancelled. -/// tokio::select! { -/// () = background_task(i) => { -/// println!("Task {} exiting normally.", i); -/// }, -/// () = token.cancelled() => { -/// // Do some cleanup before we really exit. -/// time::sleep(Duration::from_millis(50)).await; -/// println!("Task {} finished cleanup.", i); -/// }, -/// } -/// }); -/// } -/// -/// // Spawn a background task that will send the shutdown signal. -/// { -/// let tracker = tracker.clone(); -/// tokio::spawn(async move { -/// // Normally you would use something like ctrl-c instead of -/// // sleeping. -/// time::sleep(Duration::from_secs(2)).await; -/// tracker.close(); -/// token.cancel(); -/// }); -/// } -/// -/// // Wait for all tasks to exit. -/// tracker.wait().await; -/// -/// println!("All tasks have exited now."); -/// } -/// ``` -/// -/// [`CancellationToken`]: crate::sync::CancellationToken -/// [`JoinHandle::is_finished`]: tokio::task::JoinHandle::is_finished -/// [`JoinSet`]: tokio::task::JoinSet -/// [`close`]: Self::close -/// [`join_next`]: tokio::task::JoinSet::join_next -/// [`wait`]: Self::wait -/// [graceful shutdown]: https://tokio.rs/tokio/topics/shutdown -pub struct TaskTracker { - inner: Arc, -} - -/// Represents a task tracked by a [`TaskTracker`]. -#[must_use] -#[derive(Debug)] -pub struct TaskTrackerToken { - task_tracker: TaskTracker, -} - -struct TaskTrackerInner { - /// Keeps track of the state. - /// - /// The lowest bit is whether the task tracker is closed. - /// - /// The rest of the bits count the number of tracked tasks. - state: AtomicUsize, - /// Used to notify when the last task exits. - on_last_exit: Notify, -} - -pin_project! { - /// A future that is tracked as a task by a [`TaskTracker`]. - /// - /// The associated [`TaskTracker`] cannot complete until this future is dropped. - /// - /// This future is returned by [`TaskTracker::track_future`]. - #[must_use = "futures do nothing unless polled"] - pub struct TrackedFuture { - #[pin] - future: F, - token: TaskTrackerToken, - } -} - -pin_project! { - /// A future that completes when the [`TaskTracker`] is empty and closed. - /// - /// This future is returned by [`TaskTracker::wait`]. - #[must_use = "futures do nothing unless polled"] - pub struct TaskTrackerWaitFuture<'a> { - #[pin] - future: Notified<'a>, - inner: Option<&'a TaskTrackerInner>, - } -} - -impl TaskTrackerInner { - #[inline] - fn new() -> Self { - Self { - state: AtomicUsize::new(0), - on_last_exit: Notify::new(), - } - } - - #[inline] - fn is_closed_and_empty(&self) -> bool { - // If empty and closed bit set, then we are done. - // - // The acquire load will synchronize with the release store of any previous call to - // `set_closed` and `drop_task`. - self.state.load(Ordering::Acquire) == 1 - } - - #[inline] - fn set_closed(&self) -> bool { - // The AcqRel ordering makes the closed bit behave like a `Mutex` for synchronization - // purposes. We do this because it makes the return value of `TaskTracker::{close,reopen}` - // more meaningful for the user. Without these orderings, this assert could fail: - // ``` - // // thread 1 - // some_other_atomic.store(true, Relaxed); - // tracker.close(); - // - // // thread 2 - // if tracker.reopen() { - // assert!(some_other_atomic.load(Relaxed)); - // } - // ``` - // However, with the AcqRel ordering, we establish a happens-before relationship from the - // call to `close` and the later call to `reopen` that returned true. - let state = self.state.fetch_or(1, Ordering::AcqRel); - - // If there are no tasks, and if it was not already closed: - if state == 0 { - self.notify_now(); - } - - (state & 1) == 0 - } - - #[inline] - fn set_open(&self) -> bool { - // See `set_closed` regarding the AcqRel ordering. - let state = self.state.fetch_and(!1, Ordering::AcqRel); - (state & 1) == 1 - } - - #[inline] - fn add_task(&self) { - self.state.fetch_add(2, Ordering::Relaxed); - } - - #[inline] - fn drop_task(&self) { - let state = self.state.fetch_sub(2, Ordering::Release); - - // If this was the last task and we are closed: - if state == 3 { - self.notify_now(); - } - } - - #[cold] - fn notify_now(&self) { - // Insert an acquire fence. This matters for `drop_task` but doesn't matter for - // `set_closed` since it already uses AcqRel. - // - // This synchronizes with the release store of any other call to `drop_task`, and with the - // release store in the call to `set_closed`. That ensures that everything that happened - // before those other calls to `drop_task` or `set_closed` will be visible after this load, - // and those things will also be visible to anything woken by the call to `notify_waiters`. - self.state.load(Ordering::Acquire); - - self.on_last_exit.notify_waiters(); - } -} - -impl TaskTracker { - /// Creates a new `TaskTracker`. - /// - /// The `TaskTracker` will start out as open. - #[must_use] - pub fn new() -> Self { - Self { - inner: Arc::new(TaskTrackerInner::new()), - } - } - - /// Waits until this `TaskTracker` is both closed and empty. - /// - /// If the `TaskTracker` is already closed and empty when this method is called, then it - /// returns immediately. - /// - /// The `wait` future is resistant against [ABA problems][aba]. That is, if the `TaskTracker` - /// becomes both closed and empty for a short amount of time, then it is guarantee that all - /// `wait` futures that were created before the short time interval will trigger, even if they - /// are not polled during that short time interval. - /// - /// # Cancel safety - /// - /// This method is cancel safe. - /// - /// However, the resistance against [ABA problems][aba] is lost when using `wait` as the - /// condition in a `tokio::select!` loop. - /// - /// [aba]: https://en.wikipedia.org/wiki/ABA_problem - #[inline] - pub fn wait(&self) -> TaskTrackerWaitFuture<'_> { - TaskTrackerWaitFuture { - future: self.inner.on_last_exit.notified(), - inner: if self.inner.is_closed_and_empty() { - None - } else { - Some(&self.inner) - }, - } - } - - /// Close this `TaskTracker`. - /// - /// This allows [`wait`] futures to complete. It does not prevent you from spawning new tasks. - /// - /// Returns `true` if this closed the `TaskTracker`, or `false` if it was already closed. - /// - /// [`wait`]: Self::wait - #[inline] - pub fn close(&self) -> bool { - self.inner.set_closed() - } - - /// Reopen this `TaskTracker`. - /// - /// This prevents [`wait`] futures from completing even if the `TaskTracker` is empty. - /// - /// Returns `true` if this reopened the `TaskTracker`, or `false` if it was already open. - /// - /// [`wait`]: Self::wait - #[inline] - pub fn reopen(&self) -> bool { - self.inner.set_open() - } - - /// Returns `true` if this `TaskTracker` is [closed](Self::close). - #[inline] - #[must_use] - pub fn is_closed(&self) -> bool { - (self.inner.state.load(Ordering::Acquire) & 1) != 0 - } - - /// Returns the number of tasks tracked by this `TaskTracker`. - #[inline] - #[must_use] - pub fn len(&self) -> usize { - self.inner.state.load(Ordering::Acquire) >> 1 - } - - /// Returns `true` if there are no tasks in this `TaskTracker`. - #[inline] - #[must_use] - pub fn is_empty(&self) -> bool { - self.inner.state.load(Ordering::Acquire) <= 1 - } - - /// Spawn the provided future on the current Tokio runtime, and track it in this `TaskTracker`. - /// - /// This is equivalent to `tokio::spawn(tracker.track_future(task))`. - #[inline] - #[track_caller] - #[cfg(feature = "rt")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] - pub fn spawn(&self, task: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - tokio::task::spawn(self.track_future(task)) - } - - /// Spawn the provided future on the provided Tokio runtime, and track it in this `TaskTracker`. - /// - /// This is equivalent to `handle.spawn(tracker.track_future(task))`. - #[inline] - #[track_caller] - #[cfg(feature = "rt")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] - pub fn spawn_on(&self, task: F, handle: &Handle) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - handle.spawn(self.track_future(task)) - } - - /// Spawn the provided future on the current [`LocalSet`], and track it in this `TaskTracker`. - /// - /// This is equivalent to `tokio::task::spawn_local(tracker.track_future(task))`. - /// - /// [`LocalSet`]: tokio::task::LocalSet - #[inline] - #[track_caller] - #[cfg(feature = "rt")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] - pub fn spawn_local(&self, task: F) -> JoinHandle - where - F: Future + 'static, - F::Output: 'static, - { - tokio::task::spawn_local(self.track_future(task)) - } - - /// Spawn the provided future on the provided [`LocalSet`], and track it in this `TaskTracker`. - /// - /// This is equivalent to `local_set.spawn_local(tracker.track_future(task))`. - /// - /// [`LocalSet`]: tokio::task::LocalSet - #[inline] - #[track_caller] - #[cfg(feature = "rt")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] - pub fn spawn_local_on(&self, task: F, local_set: &LocalSet) -> JoinHandle - where - F: Future + 'static, - F::Output: 'static, - { - local_set.spawn_local(self.track_future(task)) - } - - /// Spawn the provided blocking task on the current Tokio runtime, and track it in this `TaskTracker`. - /// - /// This is equivalent to `tokio::task::spawn_blocking(tracker.track_future(task))`. - #[inline] - #[track_caller] - #[cfg(feature = "rt")] - #[cfg(not(target_family = "wasm"))] - #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] - pub fn spawn_blocking(&self, task: F) -> JoinHandle - where - F: FnOnce() -> T, - F: Send + 'static, - T: Send + 'static, - { - let token = self.token(); - tokio::task::spawn_blocking(move || { - let res = task(); - drop(token); - res - }) - } - - /// Spawn the provided blocking task on the provided Tokio runtime, and track it in this `TaskTracker`. - /// - /// This is equivalent to `handle.spawn_blocking(tracker.track_future(task))`. - #[inline] - #[track_caller] - #[cfg(feature = "rt")] - #[cfg(not(target_family = "wasm"))] - #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] - pub fn spawn_blocking_on(&self, task: F, handle: &Handle) -> JoinHandle - where - F: FnOnce() -> T, - F: Send + 'static, - T: Send + 'static, - { - let token = self.token(); - handle.spawn_blocking(move || { - let res = task(); - drop(token); - res - }) - } - - /// Track the provided future. - /// - /// The returned [`TrackedFuture`] will count as a task tracked by this collection, and will - /// prevent calls to [`wait`] from returning until the task is dropped. - /// - /// The task is removed from the collection when it is dropped, not when [`poll`] returns - /// [`Poll::Ready`]. - /// - /// # Examples - /// - /// Track a future spawned with [`tokio::spawn`]. - /// - /// ``` - /// # async fn my_async_fn() {} - /// use tokio_util::task::TaskTracker; - /// - /// # #[tokio::main(flavor = "current_thread")] - /// # async fn main() { - /// let tracker = TaskTracker::new(); - /// - /// tokio::spawn(tracker.track_future(my_async_fn())); - /// # } - /// ``` - /// - /// Track a future spawned on a [`JoinSet`]. - /// ``` - /// # async fn my_async_fn() {} - /// use tokio::task::JoinSet; - /// use tokio_util::task::TaskTracker; - /// - /// # #[tokio::main(flavor = "current_thread")] - /// # async fn main() { - /// let tracker = TaskTracker::new(); - /// let mut join_set = JoinSet::new(); - /// - /// join_set.spawn(tracker.track_future(my_async_fn())); - /// # } - /// ``` - /// - /// [`JoinSet`]: tokio::task::JoinSet - /// [`Poll::Pending`]: std::task::Poll::Pending - /// [`poll`]: std::future::Future::poll - /// [`wait`]: Self::wait - #[inline] - pub fn track_future(&self, future: F) -> TrackedFuture { - TrackedFuture { - future, - token: self.token(), - } - } - - /// Creates a [`TaskTrackerToken`] representing a task tracked by this `TaskTracker`. - /// - /// This token is a lower-level utility than the spawn methods. Each token is considered to - /// correspond to a task. As long as the token exists, the `TaskTracker` cannot complete. - /// Furthermore, the count returned by the [`len`] method will include the tokens in the count. - /// - /// Dropping the token indicates to the `TaskTracker` that the task has exited. - /// - /// [`len`]: TaskTracker::len - #[inline] - pub fn token(&self) -> TaskTrackerToken { - self.inner.add_task(); - TaskTrackerToken { - task_tracker: self.clone(), - } - } - - /// Returns `true` if both task trackers correspond to the same set of tasks. - /// - /// # Examples - /// - /// ``` - /// use tokio_util::task::TaskTracker; - /// - /// let tracker_1 = TaskTracker::new(); - /// let tracker_2 = TaskTracker::new(); - /// let tracker_1_clone = tracker_1.clone(); - /// - /// assert!(TaskTracker::ptr_eq(&tracker_1, &tracker_1_clone)); - /// assert!(!TaskTracker::ptr_eq(&tracker_1, &tracker_2)); - /// ``` - #[inline] - #[must_use] - pub fn ptr_eq(left: &TaskTracker, right: &TaskTracker) -> bool { - Arc::ptr_eq(&left.inner, &right.inner) - } -} - -impl Default for TaskTracker { - /// Creates a new `TaskTracker`. - /// - /// The `TaskTracker` will start out as open. - #[inline] - fn default() -> TaskTracker { - TaskTracker::new() - } -} - -impl Clone for TaskTracker { - /// Returns a new `TaskTracker` that tracks the same set of tasks. - /// - /// Since the new `TaskTracker` shares the same set of tasks, changes to one set are visible in - /// all other clones. - /// - /// # Examples - /// - /// ``` - /// use tokio_util::task::TaskTracker; - /// - /// #[tokio::main] - /// # async fn _hidden() {} - /// # #[tokio::main(flavor = "current_thread")] - /// async fn main() { - /// let tracker = TaskTracker::new(); - /// let cloned = tracker.clone(); - /// - /// // Spawns on `tracker` are visible in `cloned`. - /// tracker.spawn(std::future::pending::<()>()); - /// assert_eq!(cloned.len(), 1); - /// - /// // Spawns on `cloned` are visible in `tracker`. - /// cloned.spawn(std::future::pending::<()>()); - /// assert_eq!(tracker.len(), 2); - /// - /// // Calling `close` is visible to `cloned`. - /// tracker.close(); - /// assert!(cloned.is_closed()); - /// - /// // Calling `reopen` is visible to `tracker`. - /// cloned.reopen(); - /// assert!(!tracker.is_closed()); - /// } - /// ``` - #[inline] - fn clone(&self) -> TaskTracker { - Self { - inner: self.inner.clone(), - } - } -} - -fn debug_inner(inner: &TaskTrackerInner, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let state = inner.state.load(Ordering::Acquire); - let is_closed = (state & 1) != 0; - let len = state >> 1; - - f.debug_struct("TaskTracker") - .field("len", &len) - .field("is_closed", &is_closed) - .field("inner", &(inner as *const TaskTrackerInner)) - .finish() -} - -impl fmt::Debug for TaskTracker { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - debug_inner(&self.inner, f) - } -} - -impl TaskTrackerToken { - /// Returns the [`TaskTracker`] that this token is associated with. - #[inline] - #[must_use] - pub fn task_tracker(&self) -> &TaskTracker { - &self.task_tracker - } -} - -impl Clone for TaskTrackerToken { - /// Returns a new `TaskTrackerToken` associated with the same [`TaskTracker`]. - /// - /// This is equivalent to `token.task_tracker().token()`. - #[inline] - fn clone(&self) -> TaskTrackerToken { - self.task_tracker.token() - } -} - -impl Drop for TaskTrackerToken { - /// Dropping the token indicates to the [`TaskTracker`] that the task has exited. - #[inline] - fn drop(&mut self) { - self.task_tracker.inner.drop_task(); - } -} - -impl Future for TrackedFuture { - type Output = F::Output; - - #[inline] - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.project().future.poll(cx) - } -} - -impl fmt::Debug for TrackedFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TrackedFuture") - .field("future", &self.future) - .field("task_tracker", self.token.task_tracker()) - .finish() - } -} - -impl<'a> Future for TaskTrackerWaitFuture<'a> { - type Output = (); - - #[inline] - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { - let me = self.project(); - - let inner = match me.inner.as_ref() { - None => return Poll::Ready(()), - Some(inner) => inner, - }; - - let ready = inner.is_closed_and_empty() || me.future.poll(cx).is_ready(); - if ready { - *me.inner = None; - Poll::Ready(()) - } else { - Poll::Pending - } - } -} - -impl<'a> fmt::Debug for TaskTrackerWaitFuture<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - struct Helper<'a>(&'a TaskTrackerInner); - - impl fmt::Debug for Helper<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - debug_inner(self.0, f) - } - } - - f.debug_struct("TaskTrackerWaitFuture") - .field("future", &self.future) - .field("task_tracker", &self.inner.map(Helper)) - .finish() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/time/delay_queue.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/time/delay_queue.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/time/delay_queue.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/time/delay_queue.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1310 +0,0 @@ -//! A queue of delayed elements. -//! -//! See [`DelayQueue`] for more details. -//! -//! [`DelayQueue`]: struct@DelayQueue - -use crate::time::wheel::{self, Wheel}; - -use futures_core::ready; -use tokio::time::{sleep_until, Duration, Instant, Sleep}; - -use core::ops::{Index, IndexMut}; -use slab::Slab; -use std::cmp; -use std::collections::HashMap; -use std::convert::From; -use std::fmt; -use std::fmt::Debug; -use std::future::Future; -use std::marker::PhantomData; -use std::pin::Pin; -use std::task::{self, Poll, Waker}; - -/// A queue of delayed elements. -/// -/// Once an element is inserted into the `DelayQueue`, it is yielded once the -/// specified deadline has been reached. -/// -/// # Usage -/// -/// Elements are inserted into `DelayQueue` using the [`insert`] or -/// [`insert_at`] methods. A deadline is provided with the item and a [`Key`] is -/// returned. The key is used to remove the entry or to change the deadline at -/// which it should be yielded back. -/// -/// Once delays have been configured, the `DelayQueue` is used via its -/// [`Stream`] implementation. [`poll_expired`] is called. If an entry has reached its -/// deadline, it is returned. If not, `Poll::Pending` is returned indicating that the -/// current task will be notified once the deadline has been reached. -/// -/// # `Stream` implementation -/// -/// Items are retrieved from the queue via [`DelayQueue::poll_expired`]. If no delays have -/// expired, no items are returned. In this case, `Poll::Pending` is returned and the -/// current task is registered to be notified once the next item's delay has -/// expired. -/// -/// If no items are in the queue, i.e. `is_empty()` returns `true`, then `poll` -/// returns `Poll::Ready(None)`. This indicates that the stream has reached an end. -/// However, if a new item is inserted *after*, `poll` will once again start -/// returning items or `Poll::Pending`. -/// -/// Items are returned ordered by their expirations. Items that are configured -/// to expire first will be returned first. There are no ordering guarantees -/// for items configured to expire at the same instant. Also note that delays are -/// rounded to the closest millisecond. -/// -/// # Implementation -/// -/// The [`DelayQueue`] is backed by a separate instance of a timer wheel similar to that used internally -/// by Tokio's standalone timer utilities such as [`sleep`]. Because of this, it offers the same -/// performance and scalability benefits. -/// -/// State associated with each entry is stored in a [`slab`]. This amortizes the cost of allocation, -/// and allows reuse of the memory allocated for expired entries. -/// -/// Capacity can be checked using [`capacity`] and allocated preemptively by using -/// the [`reserve`] method. -/// -/// # Usage -/// -/// Using `DelayQueue` to manage cache entries. -/// -/// ```rust,no_run -/// use tokio_util::time::{DelayQueue, delay_queue}; -/// -/// use futures::ready; -/// use std::collections::HashMap; -/// use std::task::{Context, Poll}; -/// use std::time::Duration; -/// # type CacheKey = String; -/// # type Value = String; -/// -/// struct Cache { -/// entries: HashMap, -/// expirations: DelayQueue, -/// } -/// -/// const TTL_SECS: u64 = 30; -/// -/// impl Cache { -/// fn insert(&mut self, key: CacheKey, value: Value) { -/// let delay = self.expirations -/// .insert(key.clone(), Duration::from_secs(TTL_SECS)); -/// -/// self.entries.insert(key, (value, delay)); -/// } -/// -/// fn get(&self, key: &CacheKey) -> Option<&Value> { -/// self.entries.get(key) -/// .map(|&(ref v, _)| v) -/// } -/// -/// fn remove(&mut self, key: &CacheKey) { -/// if let Some((_, cache_key)) = self.entries.remove(key) { -/// self.expirations.remove(&cache_key); -/// } -/// } -/// -/// fn poll_purge(&mut self, cx: &mut Context<'_>) -> Poll<()> { -/// while let Some(entry) = ready!(self.expirations.poll_expired(cx)) { -/// self.entries.remove(entry.get_ref()); -/// } -/// -/// Poll::Ready(()) -/// } -/// } -/// ``` -/// -/// [`insert`]: method@Self::insert -/// [`insert_at`]: method@Self::insert_at -/// [`Key`]: struct@Key -/// [`Stream`]: https://docs.rs/futures/0.1/futures/stream/trait.Stream.html -/// [`poll_expired`]: method@Self::poll_expired -/// [`Stream::poll_expired`]: method@Self::poll_expired -/// [`DelayQueue`]: struct@DelayQueue -/// [`sleep`]: fn@tokio::time::sleep -/// [`slab`]: slab -/// [`capacity`]: method@Self::capacity -/// [`reserve`]: method@Self::reserve -#[derive(Debug)] -pub struct DelayQueue { - /// Stores data associated with entries - slab: SlabStorage, - - /// Lookup structure tracking all delays in the queue - wheel: Wheel>, - - /// Delays that were inserted when already expired. These cannot be stored - /// in the wheel - expired: Stack, - - /// Delay expiring when the *first* item in the queue expires - delay: Option>>, - - /// Wheel polling state - wheel_now: u64, - - /// Instant at which the timer starts - start: Instant, - - /// Waker that is invoked when we potentially need to reset the timer. - /// Because we lazily create the timer when the first entry is created, we - /// need to awaken any poller that polled us before that point. - waker: Option, -} - -#[derive(Default)] -struct SlabStorage { - inner: Slab>, - - // A `compact` call requires a re-mapping of the `Key`s that were changed - // during the `compact` call of the `slab`. Since the keys that were given out - // cannot be changed retroactively we need to keep track of these re-mappings. - // The keys of `key_map` correspond to the old keys that were given out and - // the values to the `Key`s that were re-mapped by the `compact` call. - key_map: HashMap, - - // Index used to create new keys to hand out. - next_key_index: usize, - - // Whether `compact` has been called, necessary in order to decide whether - // to include keys in `key_map`. - compact_called: bool, -} - -impl SlabStorage { - pub(crate) fn with_capacity(capacity: usize) -> SlabStorage { - SlabStorage { - inner: Slab::with_capacity(capacity), - key_map: HashMap::new(), - next_key_index: 0, - compact_called: false, - } - } - - // Inserts data into the inner slab and re-maps keys if necessary - pub(crate) fn insert(&mut self, val: Data) -> Key { - let mut key = KeyInternal::new(self.inner.insert(val)); - let key_contained = self.key_map.contains_key(&key.into()); - - if key_contained { - // It's possible that a `compact` call creates capacity in `self.inner` in - // such a way that a `self.inner.insert` call creates a `key` which was - // previously given out during an `insert` call prior to the `compact` call. - // If `key` is contained in `self.key_map`, we have encountered this exact situation, - // We need to create a new key `key_to_give_out` and include the relation - // `key_to_give_out` -> `key` in `self.key_map`. - let key_to_give_out = self.create_new_key(); - assert!(!self.key_map.contains_key(&key_to_give_out.into())); - self.key_map.insert(key_to_give_out.into(), key); - key = key_to_give_out; - } else if self.compact_called { - // Include an identity mapping in `self.key_map` in order to allow us to - // panic if a key that was handed out is removed more than once. - self.key_map.insert(key.into(), key); - } - - key.into() - } - - // Re-map the key in case compact was previously called. - // Note: Since we include identity mappings in key_map after compact was called, - // we have information about all keys that were handed out. In the case in which - // compact was called and we try to remove a Key that was previously removed - // we can detect invalid keys if no key is found in `key_map`. This is necessary - // in order to prevent situations in which a previously removed key - // corresponds to a re-mapped key internally and which would then be incorrectly - // removed from the slab. - // - // Example to illuminate this problem: - // - // Let's assume our `key_map` is {1 -> 2, 2 -> 1} and we call remove(1). If we - // were to remove 1 again, we would not find it inside `key_map` anymore. - // If we were to imply from this that no re-mapping was necessary, we would - // incorrectly remove 1 from `self.slab.inner`, which corresponds to the - // handed-out key 2. - pub(crate) fn remove(&mut self, key: &Key) -> Data { - let remapped_key = if self.compact_called { - match self.key_map.remove(key) { - Some(key_internal) => key_internal, - None => panic!("invalid key"), - } - } else { - (*key).into() - }; - - self.inner.remove(remapped_key.index) - } - - pub(crate) fn shrink_to_fit(&mut self) { - self.inner.shrink_to_fit(); - self.key_map.shrink_to_fit(); - } - - pub(crate) fn compact(&mut self) { - if !self.compact_called { - for (key, _) in self.inner.iter() { - self.key_map.insert(Key::new(key), KeyInternal::new(key)); - } - } - - let mut remapping = HashMap::new(); - self.inner.compact(|_, from, to| { - remapping.insert(from, to); - true - }); - - // At this point `key_map` contains a mapping for every element. - for internal_key in self.key_map.values_mut() { - if let Some(new_internal_key) = remapping.get(&internal_key.index) { - *internal_key = KeyInternal::new(*new_internal_key); - } - } - - if self.key_map.capacity() > 2 * self.key_map.len() { - self.key_map.shrink_to_fit(); - } - - self.compact_called = true; - } - - // Tries to re-map a `Key` that was given out to the user to its - // corresponding internal key. - fn remap_key(&self, key: &Key) -> Option { - let key_map = &self.key_map; - if self.compact_called { - key_map.get(key).copied() - } else { - Some((*key).into()) - } - } - - fn create_new_key(&mut self) -> KeyInternal { - while self.key_map.contains_key(&Key::new(self.next_key_index)) { - self.next_key_index = self.next_key_index.wrapping_add(1); - } - - KeyInternal::new(self.next_key_index) - } - - pub(crate) fn len(&self) -> usize { - self.inner.len() - } - - pub(crate) fn capacity(&self) -> usize { - self.inner.capacity() - } - - pub(crate) fn clear(&mut self) { - self.inner.clear(); - self.key_map.clear(); - self.compact_called = false; - } - - pub(crate) fn reserve(&mut self, additional: usize) { - self.inner.reserve(additional); - - if self.compact_called { - self.key_map.reserve(additional); - } - } - - pub(crate) fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - pub(crate) fn contains(&self, key: &Key) -> bool { - let remapped_key = self.remap_key(key); - - match remapped_key { - Some(internal_key) => self.inner.contains(internal_key.index), - None => false, - } - } -} - -impl fmt::Debug for SlabStorage -where - T: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - if fmt.alternate() { - fmt.debug_map().entries(self.inner.iter()).finish() - } else { - fmt.debug_struct("Slab") - .field("len", &self.len()) - .field("cap", &self.capacity()) - .finish() - } - } -} - -impl Index for SlabStorage { - type Output = Data; - - fn index(&self, key: Key) -> &Self::Output { - let remapped_key = self.remap_key(&key); - - match remapped_key { - Some(internal_key) => &self.inner[internal_key.index], - None => panic!("Invalid index {}", key.index), - } - } -} - -impl IndexMut for SlabStorage { - fn index_mut(&mut self, key: Key) -> &mut Data { - let remapped_key = self.remap_key(&key); - - match remapped_key { - Some(internal_key) => &mut self.inner[internal_key.index], - None => panic!("Invalid index {}", key.index), - } - } -} - -/// An entry in `DelayQueue` that has expired and been removed. -/// -/// Values are returned by [`DelayQueue::poll_expired`]. -/// -/// [`DelayQueue::poll_expired`]: method@DelayQueue::poll_expired -#[derive(Debug)] -pub struct Expired { - /// The data stored in the queue - data: T, - - /// The expiration time - deadline: Instant, - - /// The key associated with the entry - key: Key, -} - -/// Token to a value stored in a `DelayQueue`. -/// -/// Instances of `Key` are returned by [`DelayQueue::insert`]. See [`DelayQueue`] -/// documentation for more details. -/// -/// [`DelayQueue`]: struct@DelayQueue -/// [`DelayQueue::insert`]: method@DelayQueue::insert -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct Key { - index: usize, -} - -// Whereas `Key` is given out to users that use `DelayQueue`, internally we use -// `KeyInternal` as the key type in order to make the logic of mapping between keys -// as a result of `compact` calls clearer. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -struct KeyInternal { - index: usize, -} - -#[derive(Debug)] -struct Stack { - /// Head of the stack - head: Option, - _p: PhantomData T>, -} - -#[derive(Debug)] -struct Data { - /// The data being stored in the queue and will be returned at the requested - /// instant. - inner: T, - - /// The instant at which the item is returned. - when: u64, - - /// Set to true when stored in the `expired` queue - expired: bool, - - /// Next entry in the stack - next: Option, - - /// Previous entry in the stack - prev: Option, -} - -/// Maximum number of entries the queue can handle -const MAX_ENTRIES: usize = (1 << 30) - 1; - -impl DelayQueue { - /// Creates a new, empty, `DelayQueue`. - /// - /// The queue will not allocate storage until items are inserted into it. - /// - /// # Examples - /// - /// ```rust - /// # use tokio_util::time::DelayQueue; - /// let delay_queue: DelayQueue = DelayQueue::new(); - /// ``` - pub fn new() -> DelayQueue { - DelayQueue::with_capacity(0) - } - - /// Creates a new, empty, `DelayQueue` with the specified capacity. - /// - /// The queue will be able to hold at least `capacity` elements without - /// reallocating. If `capacity` is 0, the queue will not allocate for - /// storage. - /// - /// # Examples - /// - /// ```rust - /// # use tokio_util::time::DelayQueue; - /// # use std::time::Duration; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::with_capacity(10); - /// - /// // These insertions are done without further allocation - /// for i in 0..10 { - /// delay_queue.insert(i, Duration::from_secs(i)); - /// } - /// - /// // This will make the queue allocate additional storage - /// delay_queue.insert(11, Duration::from_secs(11)); - /// # } - /// ``` - pub fn with_capacity(capacity: usize) -> DelayQueue { - DelayQueue { - wheel: Wheel::new(), - slab: SlabStorage::with_capacity(capacity), - expired: Stack::default(), - delay: None, - wheel_now: 0, - start: Instant::now(), - waker: None, - } - } - - /// Inserts `value` into the queue set to expire at a specific instant in - /// time. - /// - /// This function is identical to `insert`, but takes an `Instant` instead - /// of a `Duration`. - /// - /// `value` is stored in the queue until `when` is reached. At which point, - /// `value` will be returned from [`poll_expired`]. If `when` has already been - /// reached, then `value` is immediately made available to poll. - /// - /// The return value represents the insertion and is used as an argument to - /// [`remove`] and [`reset`]. Note that [`Key`] is a token and is reused once - /// `value` is removed from the queue either by calling [`poll_expired`] after - /// `when` is reached or by calling [`remove`]. At this point, the caller - /// must take care to not use the returned [`Key`] again as it may reference - /// a different item in the queue. - /// - /// See [type] level documentation for more details. - /// - /// # Panics - /// - /// This function panics if `when` is too far in the future. - /// - /// # Examples - /// - /// Basic usage - /// - /// ```rust - /// use tokio::time::{Duration, Instant}; - /// use tokio_util::time::DelayQueue; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// let key = delay_queue.insert_at( - /// "foo", Instant::now() + Duration::from_secs(5)); - /// - /// // Remove the entry - /// let item = delay_queue.remove(&key); - /// assert_eq!(*item.get_ref(), "foo"); - /// # } - /// ``` - /// - /// [`poll_expired`]: method@Self::poll_expired - /// [`remove`]: method@Self::remove - /// [`reset`]: method@Self::reset - /// [`Key`]: struct@Key - /// [type]: # - #[track_caller] - pub fn insert_at(&mut self, value: T, when: Instant) -> Key { - assert!(self.slab.len() < MAX_ENTRIES, "max entries exceeded"); - - // Normalize the deadline. Values cannot be set to expire in the past. - let when = self.normalize_deadline(when); - - // Insert the value in the store - let key = self.slab.insert(Data { - inner: value, - when, - expired: false, - next: None, - prev: None, - }); - - self.insert_idx(when, key); - - // Set a new delay if the current's deadline is later than the one of the new item - let should_set_delay = if let Some(ref delay) = self.delay { - let current_exp = self.normalize_deadline(delay.deadline()); - current_exp > when - } else { - true - }; - - if should_set_delay { - if let Some(waker) = self.waker.take() { - waker.wake(); - } - - let delay_time = self.start + Duration::from_millis(when); - if let Some(ref mut delay) = &mut self.delay { - delay.as_mut().reset(delay_time); - } else { - self.delay = Some(Box::pin(sleep_until(delay_time))); - } - } - - key - } - - /// Attempts to pull out the next value of the delay queue, registering the - /// current task for wakeup if the value is not yet available, and returning - /// `None` if the queue is exhausted. - pub fn poll_expired(&mut self, cx: &mut task::Context<'_>) -> Poll>> { - if !self - .waker - .as_ref() - .map(|w| w.will_wake(cx.waker())) - .unwrap_or(false) - { - self.waker = Some(cx.waker().clone()); - } - - let item = ready!(self.poll_idx(cx)); - Poll::Ready(item.map(|key| { - let data = self.slab.remove(&key); - debug_assert!(data.next.is_none()); - debug_assert!(data.prev.is_none()); - - Expired { - key, - data: data.inner, - deadline: self.start + Duration::from_millis(data.when), - } - })) - } - - /// Inserts `value` into the queue set to expire after the requested duration - /// elapses. - /// - /// This function is identical to `insert_at`, but takes a `Duration` - /// instead of an `Instant`. - /// - /// `value` is stored in the queue until `timeout` duration has - /// elapsed after `insert` was called. At that point, `value` will - /// be returned from [`poll_expired`]. If `timeout` is a `Duration` of - /// zero, then `value` is immediately made available to poll. - /// - /// The return value represents the insertion and is used as an - /// argument to [`remove`] and [`reset`]. Note that [`Key`] is a - /// token and is reused once `value` is removed from the queue - /// either by calling [`poll_expired`] after `timeout` has elapsed - /// or by calling [`remove`]. At this point, the caller must not - /// use the returned [`Key`] again as it may reference a different - /// item in the queue. - /// - /// See [type] level documentation for more details. - /// - /// # Panics - /// - /// This function panics if `timeout` is greater than the maximum - /// duration supported by the timer in the current `Runtime`. - /// - /// # Examples - /// - /// Basic usage - /// - /// ```rust - /// use tokio_util::time::DelayQueue; - /// use std::time::Duration; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// let key = delay_queue.insert("foo", Duration::from_secs(5)); - /// - /// // Remove the entry - /// let item = delay_queue.remove(&key); - /// assert_eq!(*item.get_ref(), "foo"); - /// # } - /// ``` - /// - /// [`poll_expired`]: method@Self::poll_expired - /// [`remove`]: method@Self::remove - /// [`reset`]: method@Self::reset - /// [`Key`]: struct@Key - /// [type]: # - #[track_caller] - pub fn insert(&mut self, value: T, timeout: Duration) -> Key { - self.insert_at(value, Instant::now() + timeout) - } - - #[track_caller] - fn insert_idx(&mut self, when: u64, key: Key) { - use self::wheel::{InsertError, Stack}; - - // Register the deadline with the timer wheel - match self.wheel.insert(when, key, &mut self.slab) { - Ok(_) => {} - Err((_, InsertError::Elapsed)) => { - self.slab[key].expired = true; - // The delay is already expired, store it in the expired queue - self.expired.push(key, &mut self.slab); - } - Err((_, err)) => panic!("invalid deadline; err={:?}", err), - } - } - - /// Removes the key from the expired queue or the timer wheel - /// depending on its expiration status. - /// - /// # Panics - /// - /// Panics if the key is not contained in the expired queue or the wheel. - #[track_caller] - fn remove_key(&mut self, key: &Key) { - use crate::time::wheel::Stack; - - // Special case the `expired` queue - if self.slab[*key].expired { - self.expired.remove(key, &mut self.slab); - } else { - self.wheel.remove(key, &mut self.slab); - } - } - - /// Removes the item associated with `key` from the queue. - /// - /// There must be an item associated with `key`. The function returns the - /// removed item as well as the `Instant` at which it will the delay will - /// have expired. - /// - /// # Panics - /// - /// The function panics if `key` is not contained by the queue. - /// - /// # Examples - /// - /// Basic usage - /// - /// ```rust - /// use tokio_util::time::DelayQueue; - /// use std::time::Duration; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// let key = delay_queue.insert("foo", Duration::from_secs(5)); - /// - /// // Remove the entry - /// let item = delay_queue.remove(&key); - /// assert_eq!(*item.get_ref(), "foo"); - /// # } - /// ``` - #[track_caller] - pub fn remove(&mut self, key: &Key) -> Expired { - let prev_deadline = self.next_deadline(); - - self.remove_key(key); - let data = self.slab.remove(key); - - let next_deadline = self.next_deadline(); - if prev_deadline != next_deadline { - match (next_deadline, &mut self.delay) { - (None, _) => self.delay = None, - (Some(deadline), Some(delay)) => delay.as_mut().reset(deadline), - (Some(deadline), None) => self.delay = Some(Box::pin(sleep_until(deadline))), - } - } - - Expired { - key: Key::new(key.index), - data: data.inner, - deadline: self.start + Duration::from_millis(data.when), - } - } - - /// Attempts to remove the item associated with `key` from the queue. - /// - /// Removes the item associated with `key`, and returns it along with the - /// `Instant` at which it would have expired, if it exists. - /// - /// Returns `None` if `key` is not in the queue. - /// - /// # Examples - /// - /// Basic usage - /// - /// ```rust - /// use tokio_util::time::DelayQueue; - /// use std::time::Duration; - /// - /// # #[tokio::main(flavor = "current_thread")] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// let key = delay_queue.insert("foo", Duration::from_secs(5)); - /// - /// // The item is in the queue, `try_remove` returns `Some(Expired("foo"))`. - /// let item = delay_queue.try_remove(&key); - /// assert_eq!(item.unwrap().into_inner(), "foo"); - /// - /// // The item is not in the queue anymore, `try_remove` returns `None`. - /// let item = delay_queue.try_remove(&key); - /// assert!(item.is_none()); - /// # } - /// ``` - pub fn try_remove(&mut self, key: &Key) -> Option> { - if self.slab.contains(key) { - Some(self.remove(key)) - } else { - None - } - } - - /// Sets the delay of the item associated with `key` to expire at `when`. - /// - /// This function is identical to `reset` but takes an `Instant` instead of - /// a `Duration`. - /// - /// The item remains in the queue but the delay is set to expire at `when`. - /// If `when` is in the past, then the item is immediately made available to - /// the caller. - /// - /// # Panics - /// - /// This function panics if `when` is too far in the future or if `key` is - /// not contained by the queue. - /// - /// # Examples - /// - /// Basic usage - /// - /// ```rust - /// use tokio::time::{Duration, Instant}; - /// use tokio_util::time::DelayQueue; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// let key = delay_queue.insert("foo", Duration::from_secs(5)); - /// - /// // "foo" is scheduled to be returned in 5 seconds - /// - /// delay_queue.reset_at(&key, Instant::now() + Duration::from_secs(10)); - /// - /// // "foo" is now scheduled to be returned in 10 seconds - /// # } - /// ``` - #[track_caller] - pub fn reset_at(&mut self, key: &Key, when: Instant) { - self.remove_key(key); - - // Normalize the deadline. Values cannot be set to expire in the past. - let when = self.normalize_deadline(when); - - self.slab[*key].when = when; - self.slab[*key].expired = false; - - self.insert_idx(when, *key); - - let next_deadline = self.next_deadline(); - if let (Some(ref mut delay), Some(deadline)) = (&mut self.delay, next_deadline) { - // This should awaken us if necessary (ie, if already expired) - delay.as_mut().reset(deadline); - } - } - - /// Shrink the capacity of the slab, which `DelayQueue` uses internally for storage allocation. - /// This function is not guaranteed to, and in most cases, won't decrease the capacity of the slab - /// to the number of elements still contained in it, because elements cannot be moved to a different - /// index. To decrease the capacity to the size of the slab use [`compact`]. - /// - /// This function can take O(n) time even when the capacity cannot be reduced or the allocation is - /// shrunk in place. Repeated calls run in O(1) though. - /// - /// [`compact`]: method@Self::compact - pub fn shrink_to_fit(&mut self) { - self.slab.shrink_to_fit(); - } - - /// Shrink the capacity of the slab, which `DelayQueue` uses internally for storage allocation, - /// to the number of elements that are contained in it. - /// - /// This methods runs in O(n). - /// - /// # Examples - /// - /// Basic usage - /// - /// ```rust - /// use tokio_util::time::DelayQueue; - /// use std::time::Duration; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::with_capacity(10); - /// - /// let key1 = delay_queue.insert(5, Duration::from_secs(5)); - /// let key2 = delay_queue.insert(10, Duration::from_secs(10)); - /// let key3 = delay_queue.insert(15, Duration::from_secs(15)); - /// - /// delay_queue.remove(&key2); - /// - /// delay_queue.compact(); - /// assert_eq!(delay_queue.capacity(), 2); - /// # } - /// ``` - pub fn compact(&mut self) { - self.slab.compact(); - } - - /// Gets the [`Key`] that [`poll_expired`] will pull out of the queue next, without - /// pulling it out or waiting for the deadline to expire. - /// - /// Entries that have already expired may be returned in any order, but it is - /// guaranteed that this method returns them in the same order as when items - /// are popped from the `DelayQueue`. - /// - /// # Examples - /// - /// Basic usage - /// - /// ```rust - /// use tokio_util::time::DelayQueue; - /// use std::time::Duration; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// - /// let key1 = delay_queue.insert("foo", Duration::from_secs(10)); - /// let key2 = delay_queue.insert("bar", Duration::from_secs(5)); - /// let key3 = delay_queue.insert("baz", Duration::from_secs(15)); - /// - /// assert_eq!(delay_queue.peek().unwrap(), key2); - /// # } - /// ``` - /// - /// [`Key`]: struct@Key - /// [`poll_expired`]: method@Self::poll_expired - pub fn peek(&self) -> Option { - use self::wheel::Stack; - - self.expired.peek().or_else(|| self.wheel.peek()) - } - - /// Returns the next time to poll as determined by the wheel - fn next_deadline(&mut self) -> Option { - self.wheel - .poll_at() - .map(|poll_at| self.start + Duration::from_millis(poll_at)) - } - - /// Sets the delay of the item associated with `key` to expire after - /// `timeout`. - /// - /// This function is identical to `reset_at` but takes a `Duration` instead - /// of an `Instant`. - /// - /// The item remains in the queue but the delay is set to expire after - /// `timeout`. If `timeout` is zero, then the item is immediately made - /// available to the caller. - /// - /// # Panics - /// - /// This function panics if `timeout` is greater than the maximum supported - /// duration or if `key` is not contained by the queue. - /// - /// # Examples - /// - /// Basic usage - /// - /// ```rust - /// use tokio_util::time::DelayQueue; - /// use std::time::Duration; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// let key = delay_queue.insert("foo", Duration::from_secs(5)); - /// - /// // "foo" is scheduled to be returned in 5 seconds - /// - /// delay_queue.reset(&key, Duration::from_secs(10)); - /// - /// // "foo"is now scheduled to be returned in 10 seconds - /// # } - /// ``` - #[track_caller] - pub fn reset(&mut self, key: &Key, timeout: Duration) { - self.reset_at(key, Instant::now() + timeout); - } - - /// Clears the queue, removing all items. - /// - /// After calling `clear`, [`poll_expired`] will return `Ok(Ready(None))`. - /// - /// Note that this method has no effect on the allocated capacity. - /// - /// [`poll_expired`]: method@Self::poll_expired - /// - /// # Examples - /// - /// ```rust - /// use tokio_util::time::DelayQueue; - /// use std::time::Duration; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// - /// delay_queue.insert("foo", Duration::from_secs(5)); - /// - /// assert!(!delay_queue.is_empty()); - /// - /// delay_queue.clear(); - /// - /// assert!(delay_queue.is_empty()); - /// # } - /// ``` - pub fn clear(&mut self) { - self.slab.clear(); - self.expired = Stack::default(); - self.wheel = Wheel::new(); - self.delay = None; - } - - /// Returns the number of elements the queue can hold without reallocating. - /// - /// # Examples - /// - /// ```rust - /// use tokio_util::time::DelayQueue; - /// - /// let delay_queue: DelayQueue = DelayQueue::with_capacity(10); - /// assert_eq!(delay_queue.capacity(), 10); - /// ``` - pub fn capacity(&self) -> usize { - self.slab.capacity() - } - - /// Returns the number of elements currently in the queue. - /// - /// # Examples - /// - /// ```rust - /// use tokio_util::time::DelayQueue; - /// use std::time::Duration; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue: DelayQueue = DelayQueue::with_capacity(10); - /// assert_eq!(delay_queue.len(), 0); - /// delay_queue.insert(3, Duration::from_secs(5)); - /// assert_eq!(delay_queue.len(), 1); - /// # } - /// ``` - pub fn len(&self) -> usize { - self.slab.len() - } - - /// Reserves capacity for at least `additional` more items to be queued - /// without allocating. - /// - /// `reserve` does nothing if the queue already has sufficient capacity for - /// `additional` more values. If more capacity is required, a new segment of - /// memory will be allocated and all existing values will be copied into it. - /// As such, if the queue is already very large, a call to `reserve` can end - /// up being expensive. - /// - /// The queue may reserve more than `additional` extra space in order to - /// avoid frequent reallocations. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds the maximum number of entries the - /// queue can contain. - /// - /// # Examples - /// - /// ``` - /// use tokio_util::time::DelayQueue; - /// use std::time::Duration; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// - /// delay_queue.insert("hello", Duration::from_secs(10)); - /// delay_queue.reserve(10); - /// - /// assert!(delay_queue.capacity() >= 11); - /// # } - /// ``` - #[track_caller] - pub fn reserve(&mut self, additional: usize) { - assert!( - self.slab.capacity() + additional <= MAX_ENTRIES, - "max queue capacity exceeded" - ); - self.slab.reserve(additional); - } - - /// Returns `true` if there are no items in the queue. - /// - /// Note that this function returns `false` even if all items have not yet - /// expired and a call to `poll` will return `Poll::Pending`. - /// - /// # Examples - /// - /// ``` - /// use tokio_util::time::DelayQueue; - /// use std::time::Duration; - /// - /// # #[tokio::main] - /// # async fn main() { - /// let mut delay_queue = DelayQueue::new(); - /// assert!(delay_queue.is_empty()); - /// - /// delay_queue.insert("hello", Duration::from_secs(5)); - /// assert!(!delay_queue.is_empty()); - /// # } - /// ``` - pub fn is_empty(&self) -> bool { - self.slab.is_empty() - } - - /// Polls the queue, returning the index of the next slot in the slab that - /// should be returned. - /// - /// A slot should be returned when the associated deadline has been reached. - fn poll_idx(&mut self, cx: &mut task::Context<'_>) -> Poll> { - use self::wheel::Stack; - - let expired = self.expired.pop(&mut self.slab); - - if expired.is_some() { - return Poll::Ready(expired); - } - - loop { - if let Some(ref mut delay) = self.delay { - if !delay.is_elapsed() { - ready!(Pin::new(&mut *delay).poll(cx)); - } - - let now = crate::time::ms(delay.deadline() - self.start, crate::time::Round::Down); - - self.wheel_now = now; - } - - // We poll the wheel to get the next value out before finding the next deadline. - let wheel_idx = self.wheel.poll(self.wheel_now, &mut self.slab); - - self.delay = self.next_deadline().map(|when| Box::pin(sleep_until(when))); - - if let Some(idx) = wheel_idx { - return Poll::Ready(Some(idx)); - } - - if self.delay.is_none() { - return Poll::Ready(None); - } - } - } - - fn normalize_deadline(&self, when: Instant) -> u64 { - let when = if when < self.start { - 0 - } else { - crate::time::ms(when - self.start, crate::time::Round::Up) - }; - - cmp::max(when, self.wheel.elapsed()) - } -} - -// We never put `T` in a `Pin`... -impl Unpin for DelayQueue {} - -impl Default for DelayQueue { - fn default() -> DelayQueue { - DelayQueue::new() - } -} - -impl futures_core::Stream for DelayQueue { - // DelayQueue seems much more specific, where a user may care that it - // has reached capacity, so return those errors instead of panicking. - type Item = Expired; - - fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - DelayQueue::poll_expired(self.get_mut(), cx) - } -} - -impl wheel::Stack for Stack { - type Owned = Key; - type Borrowed = Key; - type Store = SlabStorage; - - fn is_empty(&self) -> bool { - self.head.is_none() - } - - fn push(&mut self, item: Self::Owned, store: &mut Self::Store) { - // Ensure the entry is not already in a stack. - debug_assert!(store[item].next.is_none()); - debug_assert!(store[item].prev.is_none()); - - // Remove the old head entry - let old = self.head.take(); - - if let Some(idx) = old { - store[idx].prev = Some(item); - } - - store[item].next = old; - self.head = Some(item); - } - - fn pop(&mut self, store: &mut Self::Store) -> Option { - if let Some(key) = self.head { - self.head = store[key].next; - - if let Some(idx) = self.head { - store[idx].prev = None; - } - - store[key].next = None; - debug_assert!(store[key].prev.is_none()); - - Some(key) - } else { - None - } - } - - fn peek(&self) -> Option { - self.head - } - - #[track_caller] - fn remove(&mut self, item: &Self::Borrowed, store: &mut Self::Store) { - let key = *item; - assert!(store.contains(item)); - - // Ensure that the entry is in fact contained by the stack - debug_assert!({ - // This walks the full linked list even if an entry is found. - let mut next = self.head; - let mut contains = false; - - while let Some(idx) = next { - let data = &store[idx]; - - if idx == *item { - debug_assert!(!contains); - contains = true; - } - - next = data.next; - } - - contains - }); - - if let Some(next) = store[key].next { - store[next].prev = store[key].prev; - } - - if let Some(prev) = store[key].prev { - store[prev].next = store[key].next; - } else { - self.head = store[key].next; - } - - store[key].next = None; - store[key].prev = None; - } - - fn when(item: &Self::Borrowed, store: &Self::Store) -> u64 { - store[*item].when - } -} - -impl Default for Stack { - fn default() -> Stack { - Stack { - head: None, - _p: PhantomData, - } - } -} - -impl Key { - pub(crate) fn new(index: usize) -> Key { - Key { index } - } -} - -impl KeyInternal { - pub(crate) fn new(index: usize) -> KeyInternal { - KeyInternal { index } - } -} - -impl From for KeyInternal { - fn from(item: Key) -> Self { - KeyInternal::new(item.index) - } -} - -impl From for Key { - fn from(item: KeyInternal) -> Self { - Key::new(item.index) - } -} - -impl Expired { - /// Returns a reference to the inner value. - pub fn get_ref(&self) -> &T { - &self.data - } - - /// Returns a mutable reference to the inner value. - pub fn get_mut(&mut self) -> &mut T { - &mut self.data - } - - /// Consumes `self` and returns the inner value. - pub fn into_inner(self) -> T { - self.data - } - - /// Returns the deadline that the expiration was set to. - pub fn deadline(&self) -> Instant { - self.deadline - } - - /// Returns the key that the expiration is indexed by. - pub fn key(&self) -> Key { - self.key - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/time/mod.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/time/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/time/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/time/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,47 +0,0 @@ -//! Additional utilities for tracking time. -//! -//! This module provides additional utilities for executing code after a set period -//! of time. Currently there is only one: -//! -//! * `DelayQueue`: A queue where items are returned once the requested delay -//! has expired. -//! -//! This type must be used from within the context of the `Runtime`. - -use std::time::Duration; - -mod wheel; - -pub mod delay_queue; - -#[doc(inline)] -pub use delay_queue::DelayQueue; - -// ===== Internal utils ===== - -enum Round { - Up, - Down, -} - -/// Convert a `Duration` to milliseconds, rounding up and saturating at -/// `u64::MAX`. -/// -/// The saturating is fine because `u64::MAX` milliseconds are still many -/// million years. -#[inline] -fn ms(duration: Duration, round: Round) -> u64 { - const NANOS_PER_MILLI: u32 = 1_000_000; - const MILLIS_PER_SEC: u64 = 1_000; - - // Round up. - let millis = match round { - Round::Up => (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI, - Round::Down => duration.subsec_millis(), - }; - - duration - .as_secs() - .saturating_mul(MILLIS_PER_SEC) - .saturating_add(u64::from(millis)) -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/time/wheel/level.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/time/wheel/level.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/time/wheel/level.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/time/wheel/level.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,277 +0,0 @@ -use crate::time::wheel::Stack; - -use std::fmt; - -/// Wheel for a single level in the timer. This wheel contains 64 slots. -pub(crate) struct Level { - level: usize, - - /// Bit field tracking which slots currently contain entries. - /// - /// Using a bit field to track slots that contain entries allows avoiding a - /// scan to find entries. This field is updated when entries are added or - /// removed from a slot. - /// - /// The least-significant bit represents slot zero. - occupied: u64, - - /// Slots - slot: [T; LEVEL_MULT], -} - -/// Indicates when a slot must be processed next. -#[derive(Debug)] -pub(crate) struct Expiration { - /// The level containing the slot. - pub(crate) level: usize, - - /// The slot index. - pub(crate) slot: usize, - - /// The instant at which the slot needs to be processed. - pub(crate) deadline: u64, -} - -/// Level multiplier. -/// -/// Being a power of 2 is very important. -const LEVEL_MULT: usize = 64; - -impl Level { - pub(crate) fn new(level: usize) -> Level { - // Rust's derived implementations for arrays require that the value - // contained by the array be `Copy`. So, here we have to manually - // initialize every single slot. - macro_rules! s { - () => { - T::default() - }; - } - - Level { - level, - occupied: 0, - slot: [ - // It does not look like the necessary traits are - // derived for [T; 64]. - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - s!(), - ], - } - } - - /// Finds the slot that needs to be processed next and returns the slot and - /// `Instant` at which this slot must be processed. - pub(crate) fn next_expiration(&self, now: u64) -> Option { - // Use the `occupied` bit field to get the index of the next slot that - // needs to be processed. - let slot = match self.next_occupied_slot(now) { - Some(slot) => slot, - None => return None, - }; - - // From the slot index, calculate the `Instant` at which it needs to be - // processed. This value *must* be in the future with respect to `now`. - - let level_range = level_range(self.level); - let slot_range = slot_range(self.level); - - // TODO: This can probably be simplified w/ power of 2 math - let level_start = now - (now % level_range); - let mut deadline = level_start + slot as u64 * slot_range; - if deadline < now { - // A timer is in a slot "prior" to the current time. This can occur - // because we do not have an infinite hierarchy of timer levels, and - // eventually a timer scheduled for a very distant time might end up - // being placed in a slot that is beyond the end of all of the - // arrays. - // - // To deal with this, we first limit timers to being scheduled no - // more than MAX_DURATION ticks in the future; that is, they're at - // most one rotation of the top level away. Then, we force timers - // that logically would go into the top+1 level, to instead go into - // the top level's slots. - // - // What this means is that the top level's slots act as a - // pseudo-ring buffer, and we rotate around them indefinitely. If we - // compute a deadline before now, and it's the top level, it - // therefore means we're actually looking at a slot in the future. - debug_assert_eq!(self.level, super::NUM_LEVELS - 1); - - deadline += level_range; - } - debug_assert!( - deadline >= now, - "deadline={:016X}; now={:016X}; level={}; slot={}; occupied={:b}", - deadline, - now, - self.level, - slot, - self.occupied - ); - - Some(Expiration { - level: self.level, - slot, - deadline, - }) - } - - fn next_occupied_slot(&self, now: u64) -> Option { - if self.occupied == 0 { - return None; - } - - // Get the slot for now using Maths - let now_slot = (now / slot_range(self.level)) as usize; - let occupied = self.occupied.rotate_right(now_slot as u32); - let zeros = occupied.trailing_zeros() as usize; - let slot = (zeros + now_slot) % 64; - - Some(slot) - } - - pub(crate) fn add_entry(&mut self, when: u64, item: T::Owned, store: &mut T::Store) { - let slot = slot_for(when, self.level); - - self.slot[slot].push(item, store); - self.occupied |= occupied_bit(slot); - } - - pub(crate) fn remove_entry(&mut self, when: u64, item: &T::Borrowed, store: &mut T::Store) { - let slot = slot_for(when, self.level); - - self.slot[slot].remove(item, store); - - if self.slot[slot].is_empty() { - // The bit is currently set - debug_assert!(self.occupied & occupied_bit(slot) != 0); - - // Unset the bit - self.occupied ^= occupied_bit(slot); - } - } - - pub(crate) fn pop_entry_slot(&mut self, slot: usize, store: &mut T::Store) -> Option { - let ret = self.slot[slot].pop(store); - - if ret.is_some() && self.slot[slot].is_empty() { - // The bit is currently set - debug_assert!(self.occupied & occupied_bit(slot) != 0); - - self.occupied ^= occupied_bit(slot); - } - - ret - } - - pub(crate) fn peek_entry_slot(&self, slot: usize) -> Option { - self.slot[slot].peek() - } -} - -impl fmt::Debug for Level { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Level") - .field("occupied", &self.occupied) - .finish() - } -} - -fn occupied_bit(slot: usize) -> u64 { - 1 << slot -} - -fn slot_range(level: usize) -> u64 { - LEVEL_MULT.pow(level as u32) as u64 -} - -fn level_range(level: usize) -> u64 { - LEVEL_MULT as u64 * slot_range(level) -} - -/// Convert a duration (milliseconds) and a level to a slot position -fn slot_for(duration: u64, level: usize) -> usize { - ((duration >> (level * 6)) % LEVEL_MULT as u64) as usize -} - -#[cfg(all(test, not(loom)))] -mod test { - use super::*; - - #[test] - fn test_slot_for() { - for pos in 0..64 { - assert_eq!(pos as usize, slot_for(pos, 0)); - } - - for level in 1..5 { - for pos in level..64 { - let a = pos * 64_usize.pow(level as u32); - assert_eq!(pos as usize, slot_for(a as u64, level)); - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/time/wheel/mod.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/time/wheel/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/time/wheel/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/time/wheel/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,328 +0,0 @@ -mod level; -pub(crate) use self::level::Expiration; -use self::level::Level; - -mod stack; -pub(crate) use self::stack::Stack; - -use std::borrow::Borrow; -use std::fmt::Debug; -use std::usize; - -/// Timing wheel implementation. -/// -/// This type provides the hashed timing wheel implementation that backs `Timer` -/// and `DelayQueue`. -/// -/// The structure is generic over `T: Stack`. This allows handling timeout data -/// being stored on the heap or in a slab. In order to support the latter case, -/// the slab must be passed into each function allowing the implementation to -/// lookup timer entries. -/// -/// See `Timer` documentation for some implementation notes. -#[derive(Debug)] -pub(crate) struct Wheel { - /// The number of milliseconds elapsed since the wheel started. - elapsed: u64, - - /// Timer wheel. - /// - /// Levels: - /// - /// * 1 ms slots / 64 ms range - /// * 64 ms slots / ~ 4 sec range - /// * ~ 4 sec slots / ~ 4 min range - /// * ~ 4 min slots / ~ 4 hr range - /// * ~ 4 hr slots / ~ 12 day range - /// * ~ 12 day slots / ~ 2 yr range - levels: Vec>, -} - -/// Number of levels. Each level has 64 slots. By using 6 levels with 64 slots -/// each, the timer is able to track time up to 2 years into the future with a -/// precision of 1 millisecond. -const NUM_LEVELS: usize = 6; - -/// The maximum duration of a delay -const MAX_DURATION: u64 = (1 << (6 * NUM_LEVELS)) - 1; - -#[derive(Debug)] -pub(crate) enum InsertError { - Elapsed, - Invalid, -} - -impl Wheel -where - T: Stack, -{ - /// Create a new timing wheel - pub(crate) fn new() -> Wheel { - let levels = (0..NUM_LEVELS).map(Level::new).collect(); - - Wheel { elapsed: 0, levels } - } - - /// Return the number of milliseconds that have elapsed since the timing - /// wheel's creation. - pub(crate) fn elapsed(&self) -> u64 { - self.elapsed - } - - /// Insert an entry into the timing wheel. - /// - /// # Arguments - /// - /// * `when`: is the instant at which the entry should be fired. It is - /// represented as the number of milliseconds since the creation - /// of the timing wheel. - /// - /// * `item`: The item to insert into the wheel. - /// - /// * `store`: The slab or `()` when using heap storage. - /// - /// # Return - /// - /// Returns `Ok` when the item is successfully inserted, `Err` otherwise. - /// - /// `Err(Elapsed)` indicates that `when` represents an instant that has - /// already passed. In this case, the caller should fire the timeout - /// immediately. - /// - /// `Err(Invalid)` indicates an invalid `when` argument as been supplied. - pub(crate) fn insert( - &mut self, - when: u64, - item: T::Owned, - store: &mut T::Store, - ) -> Result<(), (T::Owned, InsertError)> { - if when <= self.elapsed { - return Err((item, InsertError::Elapsed)); - } else if when - self.elapsed > MAX_DURATION { - return Err((item, InsertError::Invalid)); - } - - // Get the level at which the entry should be stored - let level = self.level_for(when); - - self.levels[level].add_entry(when, item, store); - - debug_assert!({ - self.levels[level] - .next_expiration(self.elapsed) - .map(|e| e.deadline >= self.elapsed) - .unwrap_or(true) - }); - - Ok(()) - } - - /// Remove `item` from the timing wheel. - #[track_caller] - pub(crate) fn remove(&mut self, item: &T::Borrowed, store: &mut T::Store) { - let when = T::when(item, store); - - assert!( - self.elapsed <= when, - "elapsed={}; when={}", - self.elapsed, - when - ); - - let level = self.level_for(when); - - self.levels[level].remove_entry(when, item, store); - } - - /// Instant at which to poll - pub(crate) fn poll_at(&self) -> Option { - self.next_expiration().map(|expiration| expiration.deadline) - } - - /// Next key that will expire - pub(crate) fn peek(&self) -> Option { - self.next_expiration() - .and_then(|expiration| self.peek_entry(&expiration)) - } - - /// Advances the timer up to the instant represented by `now`. - pub(crate) fn poll(&mut self, now: u64, store: &mut T::Store) -> Option { - loop { - let expiration = self.next_expiration().and_then(|expiration| { - if expiration.deadline > now { - None - } else { - Some(expiration) - } - }); - - match expiration { - Some(ref expiration) => { - if let Some(item) = self.poll_expiration(expiration, store) { - return Some(item); - } - - self.set_elapsed(expiration.deadline); - } - None => { - // in this case the poll did not indicate an expiration - // _and_ we were not able to find a next expiration in - // the current list of timers. advance to the poll's - // current time and do nothing else. - self.set_elapsed(now); - return None; - } - } - } - } - - /// Returns the instant at which the next timeout expires. - fn next_expiration(&self) -> Option { - // Check all levels - for level in 0..NUM_LEVELS { - if let Some(expiration) = self.levels[level].next_expiration(self.elapsed) { - // There cannot be any expirations at a higher level that happen - // before this one. - debug_assert!(self.no_expirations_before(level + 1, expiration.deadline)); - - return Some(expiration); - } - } - - None - } - - /// Used for debug assertions - fn no_expirations_before(&self, start_level: usize, before: u64) -> bool { - let mut res = true; - - for l2 in start_level..NUM_LEVELS { - if let Some(e2) = self.levels[l2].next_expiration(self.elapsed) { - if e2.deadline < before { - res = false; - } - } - } - - res - } - - /// iteratively find entries that are between the wheel's current - /// time and the expiration time. for each in that population either - /// return it for notification (in the case of the last level) or tier - /// it down to the next level (in all other cases). - pub(crate) fn poll_expiration( - &mut self, - expiration: &Expiration, - store: &mut T::Store, - ) -> Option { - while let Some(item) = self.pop_entry(expiration, store) { - if expiration.level == 0 { - debug_assert_eq!(T::when(item.borrow(), store), expiration.deadline); - - return Some(item); - } else { - let when = T::when(item.borrow(), store); - - let next_level = expiration.level - 1; - - self.levels[next_level].add_entry(when, item, store); - } - } - - None - } - - fn set_elapsed(&mut self, when: u64) { - assert!( - self.elapsed <= when, - "elapsed={:?}; when={:?}", - self.elapsed, - when - ); - - if when > self.elapsed { - self.elapsed = when; - } - } - - fn pop_entry(&mut self, expiration: &Expiration, store: &mut T::Store) -> Option { - self.levels[expiration.level].pop_entry_slot(expiration.slot, store) - } - - fn peek_entry(&self, expiration: &Expiration) -> Option { - self.levels[expiration.level].peek_entry_slot(expiration.slot) - } - - fn level_for(&self, when: u64) -> usize { - level_for(self.elapsed, when) - } -} - -fn level_for(elapsed: u64, when: u64) -> usize { - const SLOT_MASK: u64 = (1 << 6) - 1; - - // Mask in the trailing bits ignored by the level calculation in order to cap - // the possible leading zeros - let mut masked = elapsed ^ when | SLOT_MASK; - if masked >= MAX_DURATION { - // Fudge the timer into the top level - masked = MAX_DURATION - 1; - } - let leading_zeros = masked.leading_zeros() as usize; - let significant = 63 - leading_zeros; - significant / 6 -} - -#[cfg(all(test, not(loom)))] -mod test { - use super::*; - - #[test] - fn test_level_for() { - for pos in 0..64 { - assert_eq!( - 0, - level_for(0, pos), - "level_for({}) -- binary = {:b}", - pos, - pos - ); - } - - for level in 1..5 { - for pos in level..64 { - let a = pos * 64_usize.pow(level as u32); - assert_eq!( - level, - level_for(0, a as u64), - "level_for({}) -- binary = {:b}", - a, - a - ); - - if pos > level { - let a = a - 1; - assert_eq!( - level, - level_for(0, a as u64), - "level_for({}) -- binary = {:b}", - a, - a - ); - } - - if pos < 64 { - let a = a + 1; - assert_eq!( - level, - level_for(0, a as u64), - "level_for({}) -- binary = {:b}", - a, - a - ); - } - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/time/wheel/stack.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/time/wheel/stack.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/time/wheel/stack.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/time/wheel/stack.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -use std::borrow::Borrow; -use std::cmp::Eq; -use std::hash::Hash; - -/// Abstracts the stack operations needed to track timeouts. -pub(crate) trait Stack: Default { - /// Type of the item stored in the stack - type Owned: Borrow; - - /// Borrowed item - type Borrowed: Eq + Hash; - - /// Item storage, this allows a slab to be used instead of just the heap - type Store; - - /// Returns `true` if the stack is empty - fn is_empty(&self) -> bool; - - /// Push an item onto the stack - fn push(&mut self, item: Self::Owned, store: &mut Self::Store); - - /// Pop an item from the stack - fn pop(&mut self, store: &mut Self::Store) -> Option; - - /// Peek into the stack. - fn peek(&self) -> Option; - - fn remove(&mut self, item: &Self::Borrowed, store: &mut Self::Store); - - fn when(item: &Self::Borrowed, store: &Self::Store) -> u64; -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/udp/frame.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/udp/frame.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/udp/frame.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/udp/frame.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,249 +0,0 @@ -use crate::codec::{Decoder, Encoder}; - -use futures_core::Stream; -use tokio::{io::ReadBuf, net::UdpSocket}; - -use bytes::{BufMut, BytesMut}; -use futures_core::ready; -use futures_sink::Sink; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{ - borrow::Borrow, - net::{Ipv4Addr, SocketAddr, SocketAddrV4}, -}; -use std::{io, mem::MaybeUninit}; - -/// A unified [`Stream`] and [`Sink`] interface to an underlying `UdpSocket`, using -/// the `Encoder` and `Decoder` traits to encode and decode frames. -/// -/// Raw UDP sockets work with datagrams, but higher-level code usually wants to -/// batch these into meaningful chunks, called "frames". This method layers -/// framing on top of this socket by using the `Encoder` and `Decoder` traits to -/// handle encoding and decoding of messages frames. Note that the incoming and -/// outgoing frame types may be distinct. -/// -/// This function returns a *single* object that is both [`Stream`] and [`Sink`]; -/// grouping this into a single object is often useful for layering things which -/// require both read and write access to the underlying object. -/// -/// If you want to work more directly with the streams and sink, consider -/// calling [`split`] on the `UdpFramed` returned by this method, which will break -/// them into separate objects, allowing them to interact more easily. -/// -/// [`Stream`]: futures_core::Stream -/// [`Sink`]: futures_sink::Sink -/// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split -#[must_use = "sinks do nothing unless polled"] -#[derive(Debug)] -pub struct UdpFramed { - socket: T, - codec: C, - rd: BytesMut, - wr: BytesMut, - out_addr: SocketAddr, - flushed: bool, - is_readable: bool, - current_addr: Option, -} - -const INITIAL_RD_CAPACITY: usize = 64 * 1024; -const INITIAL_WR_CAPACITY: usize = 8 * 1024; - -impl Unpin for UdpFramed {} - -impl Stream for UdpFramed -where - T: Borrow, - C: Decoder, -{ - type Item = Result<(C::Item, SocketAddr), C::Error>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let pin = self.get_mut(); - - pin.rd.reserve(INITIAL_RD_CAPACITY); - - loop { - // Are there still bytes left in the read buffer to decode? - if pin.is_readable { - if let Some(frame) = pin.codec.decode_eof(&mut pin.rd)? { - let current_addr = pin - .current_addr - .expect("will always be set before this line is called"); - - return Poll::Ready(Some(Ok((frame, current_addr)))); - } - - // if this line has been reached then decode has returned `None`. - pin.is_readable = false; - pin.rd.clear(); - } - - // We're out of data. Try and fetch more data to decode - let addr = { - // Safety: `chunk_mut()` returns a `&mut UninitSlice`, and `UninitSlice` is a - // transparent wrapper around `[MaybeUninit]`. - let buf = unsafe { &mut *(pin.rd.chunk_mut() as *mut _ as *mut [MaybeUninit]) }; - let mut read = ReadBuf::uninit(buf); - let ptr = read.filled().as_ptr(); - let res = ready!(pin.socket.borrow().poll_recv_from(cx, &mut read)); - - assert_eq!(ptr, read.filled().as_ptr()); - let addr = res?; - - // Safety: This is guaranteed to be the number of initialized (and read) bytes due - // to the invariants provided by `ReadBuf::filled`. - unsafe { pin.rd.advance_mut(read.filled().len()) }; - - addr - }; - - pin.current_addr = Some(addr); - pin.is_readable = true; - } - } -} - -impl Sink<(I, SocketAddr)> for UdpFramed -where - T: Borrow, - C: Encoder, -{ - type Error = C::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if !self.flushed { - match self.poll_flush(cx)? { - Poll::Ready(()) => {} - Poll::Pending => return Poll::Pending, - } - } - - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, item: (I, SocketAddr)) -> Result<(), Self::Error> { - let (frame, out_addr) = item; - - let pin = self.get_mut(); - - pin.codec.encode(frame, &mut pin.wr)?; - pin.out_addr = out_addr; - pin.flushed = false; - - Ok(()) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if self.flushed { - return Poll::Ready(Ok(())); - } - - let Self { - ref socket, - ref mut out_addr, - ref mut wr, - .. - } = *self; - - let n = ready!(socket.borrow().poll_send_to(cx, wr, *out_addr))?; - - let wrote_all = n == self.wr.len(); - self.wr.clear(); - self.flushed = true; - - let res = if wrote_all { - Ok(()) - } else { - Err(io::Error::new( - io::ErrorKind::Other, - "failed to write entire datagram to socket", - ) - .into()) - }; - - Poll::Ready(res) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.poll_flush(cx))?; - Poll::Ready(Ok(())) - } -} - -impl UdpFramed -where - T: Borrow, -{ - /// Create a new `UdpFramed` backed by the given socket and codec. - /// - /// See struct level documentation for more details. - pub fn new(socket: T, codec: C) -> UdpFramed { - Self { - socket, - codec, - out_addr: SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0)), - rd: BytesMut::with_capacity(INITIAL_RD_CAPACITY), - wr: BytesMut::with_capacity(INITIAL_WR_CAPACITY), - flushed: true, - is_readable: false, - current_addr: None, - } - } - - /// Returns a reference to the underlying I/O stream wrapped by `Framed`. - /// - /// # Note - /// - /// Care should be taken to not tamper with the underlying stream of data - /// coming in as it may corrupt the stream of frames otherwise being worked - /// with. - pub fn get_ref(&self) -> &T { - &self.socket - } - - /// Returns a mutable reference to the underlying I/O stream wrapped by `Framed`. - /// - /// # Note - /// - /// Care should be taken to not tamper with the underlying stream of data - /// coming in as it may corrupt the stream of frames otherwise being worked - /// with. - pub fn get_mut(&mut self) -> &mut T { - &mut self.socket - } - - /// Returns a reference to the underlying codec wrapped by - /// `Framed`. - /// - /// Note that care should be taken to not tamper with the underlying codec - /// as it may corrupt the stream of frames otherwise being worked with. - pub fn codec(&self) -> &C { - &self.codec - } - - /// Returns a mutable reference to the underlying codec wrapped by - /// `UdpFramed`. - /// - /// Note that care should be taken to not tamper with the underlying codec - /// as it may corrupt the stream of frames otherwise being worked with. - pub fn codec_mut(&mut self) -> &mut C { - &mut self.codec - } - - /// Returns a reference to the read buffer. - pub fn read_buffer(&self) -> &BytesMut { - &self.rd - } - - /// Returns a mutable reference to the read buffer. - pub fn read_buffer_mut(&mut self) -> &mut BytesMut { - &mut self.rd - } - - /// Consumes the `Framed`, returning its underlying I/O stream. - pub fn into_inner(self) -> T { - self.socket - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/udp/mod.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/udp/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/udp/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/udp/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,4 +0,0 @@ -//! UDP framing - -mod frame; -pub use frame::UdpFramed; diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/util/maybe_dangling.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/util/maybe_dangling.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/util/maybe_dangling.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/util/maybe_dangling.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,67 +0,0 @@ -use core::future::Future; -use core::mem::MaybeUninit; -use core::pin::Pin; -use core::task::{Context, Poll}; - -/// A wrapper type that tells the compiler that the contents might not be valid. -/// -/// This is necessary mainly when `T` contains a reference. In that case, the -/// compiler will sometimes assume that the reference is always valid; in some -/// cases it will assume this even after the destructor of `T` runs. For -/// example, when a reference is used as a function argument, then the compiler -/// will assume that the reference is valid until the function returns, even if -/// the reference is destroyed during the function. When the reference is used -/// as part of a self-referential struct, that assumption can be false. Wrapping -/// the reference in this type prevents the compiler from making that -/// assumption. -/// -/// # Invariants -/// -/// The `MaybeUninit` will always contain a valid value until the destructor runs. -// -// Reference -// See -// -// TODO: replace this with an official solution once RFC #3336 or similar is available. -// -#[repr(transparent)] -pub(crate) struct MaybeDangling(MaybeUninit); - -impl Drop for MaybeDangling { - fn drop(&mut self) { - // Safety: `0` is always initialized. - unsafe { core::ptr::drop_in_place(self.0.as_mut_ptr()) }; - } -} - -impl MaybeDangling { - pub(crate) fn new(inner: T) -> Self { - Self(MaybeUninit::new(inner)) - } -} - -impl Future for MaybeDangling { - type Output = F::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // Safety: `0` is always initialized. - let fut = unsafe { self.map_unchecked_mut(|this| this.0.assume_init_mut()) }; - fut.poll(cx) - } -} - -#[test] -fn maybedangling_runs_drop() { - struct SetOnDrop<'a>(&'a mut bool); - - impl Drop for SetOnDrop<'_> { - fn drop(&mut self) { - *self.0 = true; - } - } - - let mut success = false; - - drop(MaybeDangling::new(SetOnDrop(&mut success))); - assert!(success); -} diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/util/mod.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/util/mod.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/util/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/util/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,8 +0,0 @@ -mod maybe_dangling; -#[cfg(any(feature = "io", feature = "codec"))] -mod poll_buf; - -pub(crate) use maybe_dangling::MaybeDangling; -#[cfg(any(feature = "io", feature = "codec"))] -#[cfg_attr(not(feature = "io"), allow(unreachable_pub))] -pub use poll_buf::{poll_read_buf, poll_write_buf}; diff -Nru s390-tools-2.31.0/rust-vendor/tokio-util/src/util/poll_buf.rs s390-tools-2.33.1/rust-vendor/tokio-util/src/util/poll_buf.rs --- s390-tools-2.31.0/rust-vendor/tokio-util/src/util/poll_buf.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tokio-util/src/util/poll_buf.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,145 +0,0 @@ -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; - -use bytes::{Buf, BufMut}; -use futures_core::ready; -use std::io::{self, IoSlice}; -use std::mem::MaybeUninit; -use std::pin::Pin; -use std::task::{Context, Poll}; - -/// Try to read data from an `AsyncRead` into an implementer of the [`BufMut`] trait. -/// -/// [`BufMut`]: bytes::Buf -/// -/// # Example -/// -/// ``` -/// use bytes::{Bytes, BytesMut}; -/// use tokio_stream as stream; -/// use tokio::io::Result; -/// use tokio_util::io::{StreamReader, poll_read_buf}; -/// use futures::future::poll_fn; -/// use std::pin::Pin; -/// # #[tokio::main] -/// # async fn main() -> std::io::Result<()> { -/// -/// // Create a reader from an iterator. This particular reader will always be -/// // ready. -/// let mut read = StreamReader::new(stream::iter(vec![Result::Ok(Bytes::from_static(&[0, 1, 2, 3]))])); -/// -/// let mut buf = BytesMut::new(); -/// let mut reads = 0; -/// -/// loop { -/// reads += 1; -/// let n = poll_fn(|cx| poll_read_buf(Pin::new(&mut read), cx, &mut buf)).await?; -/// -/// if n == 0 { -/// break; -/// } -/// } -/// -/// // one or more reads might be necessary. -/// assert!(reads >= 1); -/// assert_eq!(&buf[..], &[0, 1, 2, 3]); -/// # Ok(()) -/// # } -/// ``` -#[cfg_attr(not(feature = "io"), allow(unreachable_pub))] -pub fn poll_read_buf( - io: Pin<&mut T>, - cx: &mut Context<'_>, - buf: &mut B, -) -> Poll> { - if !buf.has_remaining_mut() { - return Poll::Ready(Ok(0)); - } - - let n = { - let dst = buf.chunk_mut(); - - // Safety: `chunk_mut()` returns a `&mut UninitSlice`, and `UninitSlice` is a - // transparent wrapper around `[MaybeUninit]`. - let dst = unsafe { &mut *(dst as *mut _ as *mut [MaybeUninit]) }; - let mut buf = ReadBuf::uninit(dst); - let ptr = buf.filled().as_ptr(); - ready!(io.poll_read(cx, &mut buf)?); - - // Ensure the pointer does not change from under us - assert_eq!(ptr, buf.filled().as_ptr()); - buf.filled().len() - }; - - // Safety: This is guaranteed to be the number of initialized (and read) - // bytes due to the invariants provided by `ReadBuf::filled`. - unsafe { - buf.advance_mut(n); - } - - Poll::Ready(Ok(n)) -} - -/// Try to write data from an implementer of the [`Buf`] trait to an -/// [`AsyncWrite`], advancing the buffer's internal cursor. -/// -/// This function will use [vectored writes] when the [`AsyncWrite`] supports -/// vectored writes. -/// -/// # Examples -/// -/// [`File`] implements [`AsyncWrite`] and [`Cursor<&[u8]>`] implements -/// [`Buf`]: -/// -/// ```no_run -/// use tokio_util::io::poll_write_buf; -/// use tokio::io; -/// use tokio::fs::File; -/// -/// use bytes::Buf; -/// use std::io::Cursor; -/// use std::pin::Pin; -/// use futures::future::poll_fn; -/// -/// #[tokio::main] -/// async fn main() -> io::Result<()> { -/// let mut file = File::create("foo.txt").await?; -/// let mut buf = Cursor::new(b"data to write"); -/// -/// // Loop until the entire contents of the buffer are written to -/// // the file. -/// while buf.has_remaining() { -/// poll_fn(|cx| poll_write_buf(Pin::new(&mut file), cx, &mut buf)).await?; -/// } -/// -/// Ok(()) -/// } -/// ``` -/// -/// [`Buf`]: bytes::Buf -/// [`AsyncWrite`]: tokio::io::AsyncWrite -/// [`File`]: tokio::fs::File -/// [vectored writes]: tokio::io::AsyncWrite::poll_write_vectored -#[cfg_attr(not(feature = "io"), allow(unreachable_pub))] -pub fn poll_write_buf( - io: Pin<&mut T>, - cx: &mut Context<'_>, - buf: &mut B, -) -> Poll> { - const MAX_BUFS: usize = 64; - - if !buf.has_remaining() { - return Poll::Ready(Ok(0)); - } - - let n = if io.is_write_vectored() { - let mut slices = [IoSlice::new(&[]); MAX_BUFS]; - let cnt = buf.chunks_vectored(&mut slices); - ready!(io.poll_write_vectored(cx, &slices[..cnt]))? - } else { - ready!(io.poll_write(cx, buf.chunk()))? - }; - - buf.advance(n); - - Poll::Ready(Ok(n)) -} diff -Nru s390-tools-2.31.0/rust-vendor/tower-service/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/tower-service/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/tower-service/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tower-service/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/tower-service/Cargo.toml s390-tools-2.33.1/rust-vendor/tower-service/Cargo.toml --- s390-tools-2.31.0/rust-vendor/tower-service/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tower-service/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,46 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "tower-service" -version = "0.3.2" -authors = ["Tower Maintainers "] -description = """ -Trait representing an asynchronous, request / response based, client or server. -""" -homepage = "https://github.com/tower-rs/tower" -documentation = "https://docs.rs/tower-service/0.3.2" -readme = "README.md" -categories = [ - "asynchronous", - "network-programming", -] -license = "MIT" -repository = "https://github.com/tower-rs/tower" - -[dependencies] - -[dev-dependencies.futures] -version = "0.3" - -[dev-dependencies.http] -version = "0.2" - -[dev-dependencies.tokio] -version = "1" -features = [ - "macros", - "time", -] - -[dev-dependencies.tower-layer] -version = "0.3" diff -Nru s390-tools-2.31.0/rust-vendor/tower-service/CHANGELOG.md s390-tools-2.33.1/rust-vendor/tower-service/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/tower-service/CHANGELOG.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tower-service/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,63 +0,0 @@ -# Unreleased - -- None - -# 0.3.2 (June 17, 2022) - -## Added - -- **docs**: Clarify subtlety around cloning and readiness in the `Service` docs - ([#548]) -- **docs**: Clarify details around shared resource consumption in `poll_ready()` - ([#662]) - - -[#548]: https://github.com/tower-rs/tower/pull/548 -[#662]: https://github.com/tower-rs/tower/pull/662 - - -# 0.3.1 (November 29, 2019) - -- Improve example in `Service` docs. ([#510]) - -[#510]: https://github.com/tower-rs/tower/pull/510 - -# 0.3.0 (November 29, 2019) - -- Update to `futures 0.3`. -- Update documentation for `std::future::Future`. - -# 0.3.0-alpha.2 (September 30, 2019) - -- Documentation fixes. - -# 0.3.0-alpha.1 (Aug 20, 2019) - -* Switch to `std::future::Future` - -# 0.2.0 (Dec 12, 2018) - -* Change `Service`'s `Request` associated type to be a generic instead. - * Before: - - ```rust - impl Service for Client { - type Request = HttpRequest; - type Response = HttpResponse; - // ... - } - ``` - * After: - - ```rust - impl Service for Client { - type Response = HttpResponse; - // ... - } - ``` -* Remove `NewService`, use `tower_util::MakeService` instead. -* Remove `Service::ready` and `Ready`, use `tower_util::ServiceExt` instead. - -# 0.1.0 (Aug 9, 2018) - -* Initial release diff -Nru s390-tools-2.31.0/rust-vendor/tower-service/LICENSE s390-tools-2.33.1/rust-vendor/tower-service/LICENSE --- s390-tools-2.31.0/rust-vendor/tower-service/LICENSE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tower-service/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2019 Tower Contributors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/tower-service/README.md s390-tools-2.33.1/rust-vendor/tower-service/README.md --- s390-tools-2.31.0/rust-vendor/tower-service/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tower-service/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,56 +0,0 @@ -# Tower Service - -The foundational `Service` trait that [Tower] is based on. - -[![Crates.io][crates-badge]][crates-url] -[![Documentation][docs-badge]][docs-url] -[![Documentation (master)][docs-master-badge]][docs-master-url] -[![MIT licensed][mit-badge]][mit-url] -[![Build Status][actions-badge]][actions-url] -[![Discord chat][discord-badge]][discord-url] - -[crates-badge]: https://img.shields.io/crates/v/tower-service.svg -[crates-url]: https://crates.io/crates/tower-service -[docs-badge]: https://docs.rs/tower-service/badge.svg -[docs-url]: https://docs.rs/tower-service -[docs-master-badge]: https://img.shields.io/badge/docs-master-blue -[docs-master-url]: https://tower-rs.github.io/tower/tower_service -[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg -[mit-url]: LICENSE -[actions-badge]: https://github.com/tower-rs/tower/workflows/CI/badge.svg -[actions-url]:https://github.com/tower-rs/tower/actions?query=workflow%3ACI -[discord-badge]: https://img.shields.io/discord/500028886025895936?logo=discord&label=discord&logoColor=white -[discord-url]: https://discord.gg/EeF3cQw - -## Overview - -The [`Service`] trait provides the foundation upon which [Tower] is built. It is a -simple, but powerful trait. At its heart, `Service` is just an asynchronous -function of request to response. - -``` -async fn(Request) -> Result -``` - -Implementations of `Service` take a request, the type of which varies per -protocol, and returns a future representing the eventual completion or failure -of the response. - -Services are used to represent both clients and servers. An *instance* of -`Service` is used through a client; a server *implements* `Service`. - -By using standardizing the interface, middleware can be created. Middleware -*implement* `Service` by passing the request to another `Service`. The -middleware may take actions such as modify the request. - -[`Service`]: https://docs.rs/tower-service/latest/tower_service/trait.Service.html -[Tower]: https://crates.io/crates/tower -## License - -This project is licensed under the [MIT license](LICENSE). - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in Tower by you, shall be licensed as MIT, without any additional -terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/tower-service/src/lib.rs s390-tools-2.33.1/rust-vendor/tower-service/src/lib.rs --- s390-tools-2.31.0/rust-vendor/tower-service/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tower-service/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,389 +0,0 @@ -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - unreachable_pub -)] -#![forbid(unsafe_code)] -// `rustdoc::broken_intra_doc_links` is checked on CI - -//! Definition of the core `Service` trait to Tower -//! -//! The [`Service`] trait provides the necessary abstractions for defining -//! request / response clients and servers. It is simple but powerful and is -//! used as the foundation for the rest of Tower. - -use std::future::Future; -use std::task::{Context, Poll}; - -/// An asynchronous function from a `Request` to a `Response`. -/// -/// The `Service` trait is a simplified interface making it easy to write -/// network applications in a modular and reusable way, decoupled from the -/// underlying protocol. It is one of Tower's fundamental abstractions. -/// -/// # Functional -/// -/// A `Service` is a function of a `Request`. It immediately returns a -/// `Future` representing the eventual completion of processing the -/// request. The actual request processing may happen at any time in the -/// future, on any thread or executor. The processing may depend on calling -/// other services. At some point in the future, the processing will complete, -/// and the `Future` will resolve to a response or error. -/// -/// At a high level, the `Service::call` function represents an RPC request. The -/// `Service` value can be a server or a client. -/// -/// # Server -/// -/// An RPC server *implements* the `Service` trait. Requests received by the -/// server over the network are deserialized and then passed as an argument to the -/// server value. The returned response is sent back over the network. -/// -/// As an example, here is how an HTTP request is processed by a server: -/// -/// ```rust -/// # use std::pin::Pin; -/// # use std::task::{Poll, Context}; -/// # use std::future::Future; -/// # use tower_service::Service; -/// use http::{Request, Response, StatusCode}; -/// -/// struct HelloWorld; -/// -/// impl Service>> for HelloWorld { -/// type Response = Response>; -/// type Error = http::Error; -/// type Future = Pin>>>; -/// -/// fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { -/// Poll::Ready(Ok(())) -/// } -/// -/// fn call(&mut self, req: Request>) -> Self::Future { -/// // create the body -/// let body: Vec = "hello, world!\n" -/// .as_bytes() -/// .to_owned(); -/// // Create the HTTP response -/// let resp = Response::builder() -/// .status(StatusCode::OK) -/// .body(body) -/// .expect("Unable to create `http::Response`"); -/// -/// // create a response in a future. -/// let fut = async { -/// Ok(resp) -/// }; -/// -/// // Return the response as an immediate future -/// Box::pin(fut) -/// } -/// } -/// ``` -/// -/// # Client -/// -/// A client consumes a service by using a `Service` value. The client may -/// issue requests by invoking `call` and passing the request as an argument. -/// It then receives the response by waiting for the returned future. -/// -/// As an example, here is how a Redis request would be issued: -/// -/// ```rust,ignore -/// let client = redis::Client::new() -/// .connect("127.0.0.1:6379".parse().unwrap()) -/// .unwrap(); -/// -/// let resp = client.call(Cmd::set("foo", "this is the value of foo")).await?; -/// -/// // Wait for the future to resolve -/// println!("Redis response: {:?}", resp); -/// ``` -/// -/// # Middleware / Layer -/// -/// More often than not, all the pieces needed for writing robust, scalable -/// network applications are the same no matter the underlying protocol. By -/// unifying the API for both clients and servers in a protocol agnostic way, -/// it is possible to write middleware that provide these pieces in a -/// reusable way. -/// -/// Take timeouts as an example: -/// -/// ```rust -/// use tower_service::Service; -/// use tower_layer::Layer; -/// use futures::FutureExt; -/// use std::future::Future; -/// use std::task::{Context, Poll}; -/// use std::time::Duration; -/// use std::pin::Pin; -/// use std::fmt; -/// use std::error::Error; -/// -/// // Our timeout service, which wraps another service and -/// // adds a timeout to its response future. -/// pub struct Timeout { -/// inner: T, -/// timeout: Duration, -/// } -/// -/// impl Timeout { -/// pub fn new(inner: T, timeout: Duration) -> Timeout { -/// Timeout { -/// inner, -/// timeout -/// } -/// } -/// } -/// -/// // The error returned if processing a request timed out -/// #[derive(Debug)] -/// pub struct Expired; -/// -/// impl fmt::Display for Expired { -/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { -/// write!(f, "expired") -/// } -/// } -/// -/// impl Error for Expired {} -/// -/// // We can implement `Service` for `Timeout` if `T` is a `Service` -/// impl Service for Timeout -/// where -/// T: Service, -/// T::Future: 'static, -/// T::Error: Into> + 'static, -/// T::Response: 'static, -/// { -/// // `Timeout` doesn't modify the response type, so we use `T`'s response type -/// type Response = T::Response; -/// // Errors may be either `Expired` if the timeout expired, or the inner service's -/// // `Error` type. Therefore, we return a boxed `dyn Error + Send + Sync` trait object to erase -/// // the error's type. -/// type Error = Box; -/// type Future = Pin>>>; -/// -/// fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { -/// // Our timeout service is ready if the inner service is ready. -/// // This is how backpressure can be propagated through a tree of nested services. -/// self.inner.poll_ready(cx).map_err(Into::into) -/// } -/// -/// fn call(&mut self, req: Request) -> Self::Future { -/// // Create a future that completes after `self.timeout` -/// let timeout = tokio::time::sleep(self.timeout); -/// -/// // Call the inner service and get a future that resolves to the response -/// let fut = self.inner.call(req); -/// -/// // Wrap those two futures in another future that completes when either one completes -/// // -/// // If the inner service is too slow the `sleep` future will complete first -/// // And an error will be returned and `fut` will be dropped and not polled again -/// // -/// // We have to box the errors so the types match -/// let f = async move { -/// tokio::select! { -/// res = fut => { -/// res.map_err(|err| err.into()) -/// }, -/// _ = timeout => { -/// Err(Box::new(Expired) as Box) -/// }, -/// } -/// }; -/// -/// Box::pin(f) -/// } -/// } -/// -/// // A layer for wrapping services in `Timeout` -/// pub struct TimeoutLayer(Duration); -/// -/// impl TimeoutLayer { -/// pub fn new(delay: Duration) -> Self { -/// TimeoutLayer(delay) -/// } -/// } -/// -/// impl Layer for TimeoutLayer { -/// type Service = Timeout; -/// -/// fn layer(&self, service: S) -> Timeout { -/// Timeout::new(service, self.0) -/// } -/// } -/// ``` -/// -/// The above timeout implementation is decoupled from the underlying protocol -/// and is also decoupled from client or server concerns. In other words, the -/// same timeout middleware could be used in either a client or a server. -/// -/// # Backpressure -/// -/// Calling a `Service` which is at capacity (i.e., it is temporarily unable to process a -/// request) should result in an error. The caller is responsible for ensuring -/// that the service is ready to receive the request before calling it. -/// -/// `Service` provides a mechanism by which the caller is able to coordinate -/// readiness. `Service::poll_ready` returns `Ready` if the service expects that -/// it is able to process a request. -/// -/// # Be careful when cloning inner services -/// -/// Services are permitted to panic if `call` is invoked without obtaining `Poll::Ready(Ok(()))` -/// from `poll_ready`. You should therefore be careful when cloning services for example to move -/// them into boxed futures. Even though the original service is ready, the clone might not be. -/// -/// Therefore this kind of code is wrong and might panic: -/// -/// ```rust -/// # use std::pin::Pin; -/// # use std::task::{Poll, Context}; -/// # use std::future::Future; -/// # use tower_service::Service; -/// # -/// struct Wrapper { -/// inner: S, -/// } -/// -/// impl Service for Wrapper -/// where -/// S: Service + Clone + 'static, -/// R: 'static, -/// { -/// type Response = S::Response; -/// type Error = S::Error; -/// type Future = Pin>>>; -/// -/// fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { -/// Poll::Ready(Ok(())) -/// } -/// -/// fn call(&mut self, req: R) -> Self::Future { -/// let mut inner = self.inner.clone(); -/// Box::pin(async move { -/// // `inner` might not be ready since its a clone -/// inner.call(req).await -/// }) -/// } -/// } -/// ``` -/// -/// You should instead use [`std::mem::replace`] to take the service that was ready: -/// -/// ```rust -/// # use std::pin::Pin; -/// # use std::task::{Poll, Context}; -/// # use std::future::Future; -/// # use tower_service::Service; -/// # -/// struct Wrapper { -/// inner: S, -/// } -/// -/// impl Service for Wrapper -/// where -/// S: Service + Clone + 'static, -/// R: 'static, -/// { -/// type Response = S::Response; -/// type Error = S::Error; -/// type Future = Pin>>>; -/// -/// fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { -/// Poll::Ready(Ok(())) -/// } -/// -/// fn call(&mut self, req: R) -> Self::Future { -/// let clone = self.inner.clone(); -/// // take the service that was ready -/// let mut inner = std::mem::replace(&mut self.inner, clone); -/// Box::pin(async move { -/// inner.call(req).await -/// }) -/// } -/// } -/// ``` -pub trait Service { - /// Responses given by the service. - type Response; - - /// Errors produced by the service. - type Error; - - /// The future response value. - type Future: Future>; - - /// Returns `Poll::Ready(Ok(()))` when the service is able to process requests. - /// - /// If the service is at capacity, then `Poll::Pending` is returned and the task - /// is notified when the service becomes ready again. This function is - /// expected to be called while on a task. Generally, this can be done with - /// a simple `futures::future::poll_fn` call. - /// - /// If `Poll::Ready(Err(_))` is returned, the service is no longer able to service requests - /// and the caller should discard the service instance. - /// - /// Once `poll_ready` returns `Poll::Ready(Ok(()))`, a request may be dispatched to the - /// service using `call`. Until a request is dispatched, repeated calls to - /// `poll_ready` must return either `Poll::Ready(Ok(()))` or `Poll::Ready(Err(_))`. - /// - /// Note that `poll_ready` may reserve shared resources that are consumed in a subsequent - /// invocation of `call`. Thus, it is critical for implementations to not assume that `call` - /// will always be invoked and to ensure that such resources are released if the service is - /// dropped before `call` is invoked or the future returned by `call` is dropped before it - /// is polled. - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; - - /// Process the request and return the response asynchronously. - /// - /// This function is expected to be callable off task. As such, - /// implementations should take care to not call `poll_ready`. - /// - /// Before dispatching a request, `poll_ready` must be called and return - /// `Poll::Ready(Ok(()))`. - /// - /// # Panics - /// - /// Implementations are permitted to panic if `call` is invoked without - /// obtaining `Poll::Ready(Ok(()))` from `poll_ready`. - fn call(&mut self, req: Request) -> Self::Future; -} - -impl<'a, S, Request> Service for &'a mut S -where - S: Service + 'a, -{ - type Response = S::Response; - type Error = S::Error; - type Future = S::Future; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - (**self).poll_ready(cx) - } - - fn call(&mut self, request: Request) -> S::Future { - (**self).call(request) - } -} - -impl Service for Box -where - S: Service + ?Sized, -{ - type Response = S::Response; - type Error = S::Error; - type Future = S::Future; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - (**self).poll_ready(cx) - } - - fn call(&mut self, request: Request) -> S::Future { - (**self).call(request) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing/benches/baseline.rs s390-tools-2.33.1/rust-vendor/tracing/benches/baseline.rs --- s390-tools-2.31.0/rust-vendor/tracing/benches/baseline.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/benches/baseline.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; - -fn bench(c: &mut Criterion) { - use std::sync::atomic::{AtomicUsize, Ordering}; - - let mut group = c.benchmark_group("comparison"); - group.bench_function("relaxed_load", |b| { - let foo = AtomicUsize::new(1); - b.iter(|| black_box(foo.load(Ordering::Relaxed))); - }); - group.bench_function("acquire_load", |b| { - let foo = AtomicUsize::new(1); - b.iter(|| black_box(foo.load(Ordering::Acquire))) - }); - group.bench_function("log", |b| { - b.iter(|| { - log::log!(log::Level::Info, "log"); - }) - }); - group.finish(); -} - -criterion_group!(benches, bench); -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/tracing/benches/dispatch_get_clone.rs s390-tools-2.33.1/rust-vendor/tracing/benches/dispatch_get_clone.rs --- s390-tools-2.31.0/rust-vendor/tracing/benches/dispatch_get_clone.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/benches/dispatch_get_clone.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; - -mod shared; - -fn bench(c: &mut Criterion) { - shared::for_all_dispatches(&mut c.benchmark_group("Dispatch::get_clone"), |b| { - b.iter(|| { - let current = tracing::dispatcher::get_default(|current| current.clone()); - black_box(current); - }) - }); -} - -criterion_group!(benches, bench); -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/tracing/benches/dispatch_get_ref.rs s390-tools-2.33.1/rust-vendor/tracing/benches/dispatch_get_ref.rs --- s390-tools-2.31.0/rust-vendor/tracing/benches/dispatch_get_ref.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/benches/dispatch_get_ref.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,16 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; - -mod shared; - -fn bench(c: &mut Criterion) { - shared::for_all_dispatches(&mut c.benchmark_group("Dispatch::get_ref"), |b| { - b.iter(|| { - tracing::dispatcher::get_default(|current| { - black_box(¤t); - }) - }) - }); -} - -criterion_group!(benches, bench); -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/tracing/benches/empty_span.rs s390-tools-2.33.1/rust-vendor/tracing/benches/empty_span.rs --- s390-tools-2.31.0/rust-vendor/tracing/benches/empty_span.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/benches/empty_span.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,43 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; - -mod shared; - -fn bench(c: &mut Criterion) { - let mut group = c.benchmark_group("empty_span"); - shared::for_all_dispatches(&mut group, |b| { - b.iter(|| { - let span = tracing::span::Span::none(); - black_box(&span); - }) - }); - group.bench_function("baseline_struct", |b| { - b.iter(|| { - let span = FakeEmptySpan::new(); - black_box(&span); - }) - }); -} - -struct FakeEmptySpan { - inner: Option<(usize, std::sync::Arc<()>)>, - meta: Option<&'static ()>, -} - -impl FakeEmptySpan { - fn new() -> Self { - Self { - inner: None, - meta: None, - } - } -} - -impl Drop for FakeEmptySpan { - fn drop(&mut self) { - black_box(&self.inner); - black_box(&self.meta); - } -} - -criterion_group!(benches, bench); -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/tracing/benches/enter_span.rs s390-tools-2.33.1/rust-vendor/tracing/benches/enter_span.rs --- s390-tools-2.31.0/rust-vendor/tracing/benches/enter_span.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/benches/enter_span.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,16 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; -use tracing::{span, Level}; - -mod shared; - -fn bench(c: &mut Criterion) { - shared::for_all_dispatches(&mut c.benchmark_group("enter_span"), |b| { - let span = span!(Level::TRACE, "span"); - b.iter(|| { - let _span = span.enter(); - }) - }); -} - -criterion_group!(benches, bench); -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/tracing/benches/event.rs s390-tools-2.33.1/rust-vendor/tracing/benches/event.rs --- s390-tools-2.31.0/rust-vendor/tracing/benches/event.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/benches/event.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,12 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; - -mod shared; - -fn bench(c: &mut Criterion) { - shared::for_all_recording(&mut c.benchmark_group("event"), |b| { - b.iter(|| tracing::info!("hello world!")) - }); -} - -criterion_group!(benches, bench); -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/tracing/benches/shared.rs s390-tools-2.33.1/rust-vendor/tracing/benches/shared.rs --- s390-tools-2.31.0/rust-vendor/tracing/benches/shared.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/benches/shared.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,160 +0,0 @@ -#![allow(dead_code)] -use criterion::{black_box, measurement::WallTime, Bencher}; -use tracing::{field, span, Event, Id, Metadata}; - -use std::{ - fmt::{self, Write}, - sync::{Mutex, MutexGuard}, -}; - -pub fn for_all_recording( - group: &mut criterion::BenchmarkGroup<'_, WallTime>, - mut iter: impl FnMut(&mut Bencher<'_, WallTime>), -) { - // first, run benchmarks with no subscriber - group.bench_function("none", &mut iter); - - // then, run benchmarks with a scoped default subscriber - tracing::subscriber::with_default(EnabledSubscriber, || { - group.bench_function("scoped", &mut iter) - }); - - let subscriber = VisitingSubscriber(Mutex::new(String::from(""))); - tracing::subscriber::with_default(subscriber, || { - group.bench_function("scoped_recording", &mut iter); - }); - - // finally, set a global default subscriber, and run the benchmarks again. - tracing::subscriber::set_global_default(EnabledSubscriber) - .expect("global default should not have already been set!"); - let _ = log::set_logger(&NOP_LOGGER); - log::set_max_level(log::LevelFilter::Trace); - group.bench_function("global", &mut iter); -} - -pub fn for_all_dispatches( - group: &mut criterion::BenchmarkGroup<'_, WallTime>, - mut iter: impl FnMut(&mut Bencher<'_, WallTime>), -) { - // first, run benchmarks with no subscriber - group.bench_function("none", &mut iter); - - // then, run benchmarks with a scoped default subscriber - tracing::subscriber::with_default(EnabledSubscriber, || { - group.bench_function("scoped", &mut iter) - }); - - // finally, set a global default subscriber, and run the benchmarks again. - tracing::subscriber::set_global_default(EnabledSubscriber) - .expect("global default should not have already been set!"); - let _ = log::set_logger(&NOP_LOGGER); - log::set_max_level(log::LevelFilter::Trace); - group.bench_function("global", &mut iter); -} - -const NOP_LOGGER: NopLogger = NopLogger; - -struct NopLogger; - -impl log::Log for NopLogger { - fn enabled(&self, _metadata: &log::Metadata) -> bool { - true - } - - fn log(&self, record: &log::Record) { - if self.enabled(record.metadata()) { - let mut this = self; - let _ = write!(this, "{}", record.args()); - } - } - - fn flush(&self) {} -} - -impl Write for &NopLogger { - fn write_str(&mut self, s: &str) -> std::fmt::Result { - black_box(s); - Ok(()) - } -} - -/// Simulates a subscriber that records span data. -struct VisitingSubscriber(Mutex); - -struct Visitor<'a>(MutexGuard<'a, String>); - -impl<'a> field::Visit for Visitor<'a> { - fn record_debug(&mut self, _field: &field::Field, value: &dyn fmt::Debug) { - let _ = write!(&mut *self.0, "{:?}", value); - } -} - -impl tracing::Subscriber for VisitingSubscriber { - fn new_span(&self, span: &span::Attributes<'_>) -> Id { - let mut visitor = Visitor(self.0.lock().unwrap()); - span.record(&mut visitor); - Id::from_u64(0xDEAD_FACE) - } - - fn record(&self, _span: &Id, values: &span::Record<'_>) { - let mut visitor = Visitor(self.0.lock().unwrap()); - values.record(&mut visitor); - } - - fn event(&self, event: &Event<'_>) { - let mut visitor = Visitor(self.0.lock().unwrap()); - event.record(&mut visitor); - } - - fn record_follows_from(&self, span: &Id, follows: &Id) { - let _ = (span, follows); - } - - fn enabled(&self, metadata: &Metadata<'_>) -> bool { - let _ = metadata; - true - } - - fn enter(&self, span: &Id) { - let _ = span; - } - - fn exit(&self, span: &Id) { - let _ = span; - } -} - -/// A subscriber that is enabled but otherwise does nothing. -struct EnabledSubscriber; - -impl tracing::Subscriber for EnabledSubscriber { - fn new_span(&self, span: &span::Attributes<'_>) -> Id { - let _ = span; - Id::from_u64(0xDEAD_FACE) - } - - fn event(&self, event: &Event<'_>) { - let _ = event; - } - - fn record(&self, span: &Id, values: &span::Record<'_>) { - let _ = (span, values); - } - - fn record_follows_from(&self, span: &Id, follows: &Id) { - let _ = (span, follows); - } - - fn enabled(&self, metadata: &Metadata<'_>) -> bool { - let _ = metadata; - true - } - - fn enter(&self, span: &Id) { - let _ = span; - } - - fn exit(&self, span: &Id) { - let _ = span; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing/benches/span_fields.rs s390-tools-2.33.1/rust-vendor/tracing/benches/span_fields.rs --- s390-tools-2.31.0/rust-vendor/tracing/benches/span_fields.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/benches/span_fields.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,23 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; -use tracing::{span, Level}; - -mod shared; - -fn bench(c: &mut Criterion) { - shared::for_all_recording(&mut c.benchmark_group("span_fields"), |b| { - b.iter(|| { - let span = span!( - Level::TRACE, - "span", - foo = "foo", - bar = "bar", - baz = 3, - quuux = tracing::field::debug(0.99) - ); - criterion::black_box(span) - }) - }); -} - -criterion_group!(benches, bench); -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/tracing/benches/span_no_fields.rs s390-tools-2.33.1/rust-vendor/tracing/benches/span_no_fields.rs --- s390-tools-2.31.0/rust-vendor/tracing/benches/span_no_fields.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/benches/span_no_fields.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,13 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; -use tracing::{span, Level}; - -mod shared; - -fn bench(c: &mut Criterion) { - shared::for_all_recording(&mut c.benchmark_group("span_no_fields"), |b| { - b.iter(|| span!(Level::TRACE, "span")) - }); -} - -criterion_group!(benches, bench); -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/tracing/benches/span_repeated.rs s390-tools-2.33.1/rust-vendor/tracing/benches/span_repeated.rs --- s390-tools-2.31.0/rust-vendor/tracing/benches/span_repeated.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/benches/span_repeated.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use tracing::{span, Level}; - -mod shared; - -fn bench(c: &mut Criterion) { - shared::for_all_recording(&mut c.benchmark_group("span_repeated"), |b| { - let n = black_box(N_SPANS); - b.iter(|| (0..n).fold(mk_span(0), |_, i| mk_span(i as u64))) - }); -} - -#[inline] -fn mk_span(i: u64) -> tracing::Span { - span!(Level::TRACE, "span", i = i) -} - -const N_SPANS: usize = 100; -criterion_group!(benches, bench); -criterion_main!(benches); diff -Nru s390-tools-2.31.0/rust-vendor/tracing/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/tracing/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/tracing/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/tracing/Cargo.toml s390-tools-2.33.1/rust-vendor/tracing/Cargo.toml --- s390-tools-2.31.0/rust-vendor/tracing/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,143 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.56.0" -name = "tracing" -version = "0.1.40" -authors = [ - "Eliza Weisman ", - "Tokio Contributors ", -] -description = """ -Application-level tracing for Rust. -""" -homepage = "https://tokio.rs" -readme = "README.md" -keywords = [ - "logging", - "tracing", - "metrics", - "async", -] -categories = [ - "development-tools::debugging", - "development-tools::profiling", - "asynchronous", - "no-std", -] -license = "MIT" -repository = "https://github.com/tokio-rs/tracing" - -[package.metadata.docs.rs] -all-features = true -rustc-args = [ - "--cfg", - "tracing_unstable", -] -rustdoc-args = [ - "--cfg", - "docsrs", - "--cfg", - "tracing_unstable", -] - -[[bench]] -name = "baseline" -harness = false - -[[bench]] -name = "dispatch_get_clone" -harness = false - -[[bench]] -name = "dispatch_get_ref" -harness = false - -[[bench]] -name = "empty_span" -harness = false - -[[bench]] -name = "enter_span" -harness = false - -[[bench]] -name = "event" -harness = false - -[[bench]] -name = "span_fields" -harness = false - -[[bench]] -name = "span_no_fields" -harness = false - -[[bench]] -name = "span_repeated" -harness = false - -[dependencies.log] -version = "0.4.17" -optional = true - -[dependencies.pin-project-lite] -version = "0.2.9" - -[dependencies.tracing-attributes] -version = "0.1.27" -optional = true - -[dependencies.tracing-core] -version = "0.1.32" -default-features = false - -[dev-dependencies.criterion] -version = "0.3.6" -default_features = false - -[dev-dependencies.futures] -version = "0.3.21" -default_features = false - -[dev-dependencies.log] -version = "0.4.17" - -[features] -async-await = [] -attributes = ["tracing-attributes"] -default = [ - "std", - "attributes", -] -log-always = ["log"] -max_level_debug = [] -max_level_error = [] -max_level_info = [] -max_level_off = [] -max_level_trace = [] -max_level_warn = [] -release_max_level_debug = [] -release_max_level_error = [] -release_max_level_info = [] -release_max_level_off = [] -release_max_level_trace = [] -release_max_level_warn = [] -std = ["tracing-core/std"] -valuable = ["tracing-core/valuable"] - -[target."cfg(target_arch = \"wasm32\")".dev-dependencies.wasm-bindgen-test] -version = "^0.3" - -[badges.maintenance] -status = "actively-developed" diff -Nru s390-tools-2.31.0/rust-vendor/tracing/CHANGELOG.md s390-tools-2.33.1/rust-vendor/tracing/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/tracing/CHANGELOG.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,941 +0,0 @@ -# 0.1.40 - -This release fixes a potential stack use-after-free in the -`Instrument::into_inner` method. Only uses of this method are affected by this -bug. - -### Fixed - -- Use `mem::ManuallyDrop` instead of `mem::forget` in `Instrument::into_inner` - ([#2765]) - -[#2765]: https://github.com/tokio-rs/tracing/pull/2765 - -Thanks to @cramertj and @manishearth for finding and fixing this issue! - -# 0.1.39 (October 12, 2023) - -This release adds several additional features to the `tracing` macros. In -addition, it updates the `tracing-core` dependency to [v0.1.32][core-0.1.32] and -the `tracing-attributes` dependency to [v0.1.27][attrs-0.1.27]. - -### Added - -- Allow constant field names in macros ([#2617]) -- Allow setting event names in macros ([#2699]) -- **core**: Allow `ValueSet`s of any length ([#2508]) - -### Changed - -- `tracing-attributes`: updated to [0.1.27][attrs-0.1.27] -- `tracing-core`: updated to [0.1.32][core-0.1.32] -- **attributes**: Bump minimum version of proc-macro2 to 1.0.60 ([#2732]) -- **attributes**: Generate less dead code for async block return type hint ([#2709]) - -### Fixed - -- Use fully qualified names in macros for items exported from std prelude - ([#2621], [#2757]) -- **attributes**: Allow [`clippy::let_with_type_underscore`] in macro-generated - code ([#2609]) -- **attributes**: Allow `unknown_lints` in macro-generated code ([#2626]) -- **attributes**: Fix a compilation error in `#[instrument]` when the `"log"` - feature is enabled ([#2599]) - -### Documented - -- Add `axum-insights` to relevant crates. ([#2713]) -- Fix link to RAI pattern crate documentation ([#2612]) -- Fix docs typos and warnings ([#2581]) -- Add `clippy-tracing` to related crates ([#2628]) -- Add `tracing-cloudwatch` to related crates ([#2667]) -- Fix deadlink to `tracing-etw` repo ([#2602]) - -[#2617]: https://github.com/tokio-rs/tracing/pull/2617 -[#2699]: https://github.com/tokio-rs/tracing/pull/2699 -[#2508]: https://github.com/tokio-rs/tracing/pull/2508 -[#2621]: https://github.com/tokio-rs/tracing/pull/2621 -[#2713]: https://github.com/tokio-rs/tracing/pull/2713 -[#2581]: https://github.com/tokio-rs/tracing/pull/2581 -[#2628]: https://github.com/tokio-rs/tracing/pull/2628 -[#2667]: https://github.com/tokio-rs/tracing/pull/2667 -[#2602]: https://github.com/tokio-rs/tracing/pull/2602 -[#2626]: https://github.com/tokio-rs/tracing/pull/2626 -[#2757]: https://github.com/tokio-rs/tracing/pull/2757 -[#2732]: https://github.com/tokio-rs/tracing/pull/2732 -[#2709]: https://github.com/tokio-rs/tracing/pull/2709 -[#2599]: https://github.com/tokio-rs/tracing/pull/2599 -[`let_with_type_underscore`]: http://rust-lang.github.io/rust-clippy/rust-1.70.0/index.html#let_with_type_underscore -[attrs-0.1.27]: - https://github.com/tokio-rs/tracing/releases/tag/tracing-attributes-0.1.27 -[core-0.1.32]: - https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.32 - -# 0.1.38 (April 25th, 2023) - -This `tracing` release changes the `Drop` implementation for `Instrumented` -`Future`s so that the attached `Span` is entered when dropping the `Future`. This -means that events emitted by the `Future`'s `Drop` implementation will now be -recorded within its `Span`. It also adds `#[inline]` hints to methods called in -the `event!` macro's expansion, for an improvement in both binary size and -performance. - -Additionally, this release updates the `tracing-attributes` dependency to -[v0.1.24][attrs-0.1.24], which updates the [`syn`] dependency to v2.x.x. -`tracing-attributes` v0.1.24 also includes improvements to the `#[instrument]` -macro; see [the `tracing-attributes` 0.1.24 release notes][attrs-0.1.24] for -details. - -### Added - -- `Instrumented` futures will now enter the attached `Span` in their `Drop` - implementation, allowing events emitted when dropping the future to occur - within the span ([#2562]) -- `#[inline]` attributes for methods called by the `event!` macros, making - generated code smaller ([#2555]) -- **attributes**: `level` argument to `#[instrument(err)]` and - `#[instrument(ret)]` to override the level of - the generated return value event ([#2335]) -- **attributes**: Improved compiler error message when `#[instrument]` is added to a `const fn` - ([#2418]) - -### Changed - -- `tracing-attributes`: updated to [0.1.24][attrs-0.1.24] -- Removed unneeded `cfg-if` dependency ([#2553]) -- **attributes**: Updated [`syn`] dependency to 2.0 ([#2516]) - -### Fixed - -- **attributes**: Fix `clippy::unreachable` warnings in `#[instrument]`-generated code ([#2356]) -- **attributes**: Removed unused "visit" feature flag from `syn` dependency ([#2530]) - -### Documented - -- **attributes**: Documented default level for `#[instrument(err)]` ([#2433]) -- **attributes**: Improved documentation for levels in `#[instrument]` ([#2350]) - -Thanks to @nitnelave, @jsgf, @Abhicodes-crypto, @LukeMathWalker, @andrewpollack, -@quad, @klensy, @davidpdrsn, @dbidwell94, @ldm0, @NobodyXu, @ilsv, and @daxpedda -for contributing to this release! - -[`syn`]: https://crates.io/crates/syn -[attrs-0.1.24]: - https://github.com/tokio-rs/tracing/releases/tag/tracing-attributes-0.1.24 -[#2565]: https://github.com/tokio-rs/tracing/pull/2565 -[#2555]: https://github.com/tokio-rs/tracing/pull/2555 -[#2553]: https://github.com/tokio-rs/tracing/pull/2553 -[#2335]: https://github.com/tokio-rs/tracing/pull/2335 -[#2418]: https://github.com/tokio-rs/tracing/pull/2418 -[#2516]: https://github.com/tokio-rs/tracing/pull/2516 -[#2356]: https://github.com/tokio-rs/tracing/pull/2356 -[#2530]: https://github.com/tokio-rs/tracing/pull/2530 -[#2433]: https://github.com/tokio-rs/tracing/pull/2433 -[#2350]: https://github.com/tokio-rs/tracing/pull/2350 - -# 0.1.37 (October 6, 2022) - -This release of `tracing` incorporates changes from `tracing-core` -[v0.1.30][core-0.1.30] and `tracing-attributes` [v0.1.23][attrs-0.1.23], -including the new `Subscriber::on_register_dispatch` method for performing late -initialization after a `Subscriber` is registered as a `Dispatch`, and bugfixes -for the `#[instrument]` attribute. Additionally, it fixes instances of the -`bare_trait_objects` lint, which is now a warning on `tracing`'s MSRV and will -become an error in the next edition. - -### Fixed - -- **attributes**: Incorrect handling of inner attributes in `#[instrument]`ed - functions ([#2307]) -- **attributes**: Incorrect location of compiler diagnostic spans generated for - type errors in `#[instrument]`ed `async fn`s ([#2270]) -- **attributes**: Updated `syn` dependency to fix compilation with `-Z - minimal-versions` ([#2246]) -- `bare_trait_objects` warning in `valueset!` macro expansion ([#2308]) - -### Added - -- **core**: `Subscriber::on_register_dispatch` method ([#2269]) -- **core**: `WeakDispatch` type and `Dispatch::downgrade()` function ([#2293]) - -### Changed - -- `tracing-core`: updated to [0.1.30][core-0.1.30] -- `tracing-attributes`: updated to [0.1.23][attrs-0.1.23] - -### Documented - -- Added [`tracing-web`] and [`reqwest-tracing`] to related crates ([#2283], - [#2331]) - -Thanks to new contributors @compiler-errors, @e-nomem, @WorldSEnder, @Xiami2012, -and @tl-rodrigo-gryzinski, as well as @jswrenn and @CAD97, for contributing to -this release! - -[core-0.1.30]: https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.30 -[attrs-0.1.23]: https://github.com/tokio-rs/tracing/releases/tag/tracing-attributes-0.1.23 -[`tracing-web`]: https://crates.io/crates/tracing-web/ -[`reqwest-tracing`]: https://crates.io/crates/reqwest-tracing/ -[#2246]: https://github.com/tokio-rs/tracing/pull/2246 -[#2269]: https://github.com/tokio-rs/tracing/pull/2269 -[#2283]: https://github.com/tokio-rs/tracing/pull/2283 -[#2270]: https://github.com/tokio-rs/tracing/pull/2270 -[#2293]: https://github.com/tokio-rs/tracing/pull/2293 -[#2307]: https://github.com/tokio-rs/tracing/pull/2307 -[#2308]: https://github.com/tokio-rs/tracing/pull/2308 -[#2331]: https://github.com/tokio-rs/tracing/pull/2331 - -# 0.1.36 (July 29, 2022) - -This release adds support for owned values and fat pointers as arguments to the -`Span::record` method, as well as updating the minimum `tracing-core` version -and several documentation improvements. - -### Fixed - -- Incorrect docs in `dispatcher::set_default` ([#2220]) -- Compilation with `-Z minimal-versions` ([#2246]) - -### Added - -- Support for owned values and fat pointers in `Span::record` ([#2212]) -- Documentation improvements ([#2208], [#2163]) - -### Changed - -- `tracing-core`: updated to [0.1.29][core-0.1.29] - -Thanks to @fredr, @cgbur, @jyn514, @matklad, and @CAD97 for contributing to this -release! - -[core-0.1.29]: https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.29 -[#2220]: https://github.com/tokio-rs/tracing/pull/2220 -[#2246]: https://github.com/tokio-rs/tracing/pull/2246 -[#2212]: https://github.com/tokio-rs/tracing/pull/2212 -[#2208]: https://github.com/tokio-rs/tracing/pull/2208 -[#2163]: https://github.com/tokio-rs/tracing/pull/2163 - -# 0.1.35 (June 8, 2022) - -This release reduces the overhead of callsite registration by using new -`tracing-core` APIs. - -### Added - -- Use `DefaultCallsite` to reduce callsite registration overhead ([#2083]) - -### Changed - -- `tracing-core`: updated to [0.1.27][core-0.1.27] - -[core-0.1.27]: https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.27 -[#2088]: https://github.com/tokio-rs/tracing/pull/2083 - -# 0.1.34 (April 14, 2022) - -This release includes bug fixes for the "log" support feature and for the use of -both scoped and global default dispatchers in the same program. - -### Fixed - -- Failure to use the global default dispatcher when a thread sets a local - default dispatcher before the global default is set ([#2065]) -- **log**: Compilation errors due to `async` block/fn futures becoming `!Send` - when the "log" feature flag is enabled ([#2073]) -- Broken links in documentation ([#2068]) - -Thanks to @ben0x539 for contributing to this release! - -[#2065]: https://github.com/tokio-rs/tracing/pull/2065 -[#2073]: https://github.com/tokio-rs/tracing/pull/2073 -[#2068]: https://github.com/tokio-rs/tracing/pull/2068 - -# 0.1.33 (April 9, 2022) - -This release adds new `span_enabled!` and `event_enabled!` variants of the -`enabled!` macro, for testing whether a subscriber would specifically enable a -span or an event. - -### Added - -- `span_enabled!` and `event_enabled!` macros ([#1900]) -- Several documentation improvements ([#2010], [#2012]) - -### Fixed - -- Compilation warning when compiling for <=32-bit targets (including `wasm32`) - ([#2060]) - -Thanks to @guswynn, @arifd, @hrxi, @CAD97, and @name1e5s for contributing to -this release! - -[#1900]: https://github.com/tokio-rs/tracing/pull/1900 -[#2010]: https://github.com/tokio-rs/tracing/pull/2010 -[#2012]: https://github.com/tokio-rs/tracing/pull/2012 -[#2060]: https://github.com/tokio-rs/tracing/pull/2060 - -# 0.1.32 (March 8th, 2022) - -This release reduces the overhead of creating and dropping disabled -spans significantly, which should improve performance when no `tracing` -subscriber is in use or when spans are disabled by a filter. - -### Fixed - -- **attributes**: Compilation failure with `--minimal-versions` due to a - too-permissive `syn` dependency ([#1960]) - -### Changed - -- Reduced `Drop` overhead for disabled spans ([#1974]) -- `tracing-attributes`: updated to [0.1.20][attributes-0.1.20] - -[#1974]: https://github.com/tokio-rs/tracing/pull/1974 -[#1960]: https://github.com/tokio-rs/tracing/pull/1960 -[attributes-0.1.20]: https://github.com/tokio-rs/tracing/releases/tag/tracing-attributes-0.1.20 - -# 0.1.31 (February 17th, 2022) - -This release increases the minimum supported Rust version (MSRV) to 1.49.0. In -addition, it fixes some relatively rare macro bugs. - -### Added - -- Added `tracing-forest` to the list of related crates ([#1935]) - -### Changed - -- Updated minimum supported Rust version (MSRV) to 1.49.0 ([#1913]) - -### Fixed - -- Fixed the `warn!` macro incorrectly generating an event with the `TRACE` level - ([#1930]) -- Fixed macro hygiene issues when used in a crate that defines its own `concat!` - macro, for real this time ([#1918]) - -Thanks to @QnnOkabayashi, @nicolaasg, and @teohhanhui for contributing to this -release! - -[#1935]: https://github.com/tokio-rs/tracing/pull/1935 -[#1913]: https://github.com/tokio-rs/tracing/pull/1913 -[#1930]: https://github.com/tokio-rs/tracing/pull/1930 -[#1918]: https://github.com/tokio-rs/tracing/pull/1918 - -# 0.1.30 (February 3rd, 2022) - -This release adds *experimental* support for recording structured field -values using the [`valuable`] crate. See [this blog post][post] for -details on `valuable`. - -Note that `valuable` support currently requires `--cfg tracing_unstable`. See -the documentation for details. - -This release also adds a new `enabled!` macro for testing if a span or event -would be enabled. - -### Added - -- **field**: Experimental support for recording field values using the - [`valuable`] crate ([#1608], [#1888], [#1887]) -- `enabled!` macro for testing if a span or event is enabled ([#1882]) - -### Changed - -- `tracing-core`: updated to [0.1.22][core-0.1.22] -- `tracing-attributes`: updated to [0.1.19][attributes-0.1.19] - -### Fixed - -- **log**: Fixed "use of moved value" compiler error when the "log" feature is - enabled ([#1823]) -- Fixed macro hygiene issues when used in a crate that defines its own `concat!` - macro ([#1842]) -- A very large number of documentation fixes and improvements. - -Thanks to @@Vlad-Scherbina, @Skepfyr, @Swatinem, @guswynn, @teohhanhui, -@xd009642, @tobz, @d-e-s-o@0b01, and @nickelc for contributing to this release! - -[`valuable`]: https://crates.io/crates/valuable -[post]: https://tokio.rs/blog/2021-05-valuable -[core-0.1.22]: https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.22 -[attributes-0.1.19]: https://github.com/tokio-rs/tracing/releases/tag/tracing-attributes-0.1.19 -[#1608]: https://github.com/tokio-rs/tracing/pull/1608 -[#1888]: https://github.com/tokio-rs/tracing/pull/1888 -[#1887]: https://github.com/tokio-rs/tracing/pull/1887 -[#1882]: https://github.com/tokio-rs/tracing/pull/1882 -[#1823]: https://github.com/tokio-rs/tracing/pull/1823 -[#1842]: https://github.com/tokio-rs/tracing/pull/1842 - -# 0.1.29 (October 5th, 2021) - -This release adds support for recording `Option where T: Value` as typed -`tracing` field values. It also includes significant performance improvements -for functions annotated with the `#[instrument]` attribute when the generated -span is disabled. - -### Changed - -- `tracing-core`: updated to v0.1.21 -- `tracing-attributes`: updated to v0.1.18 - -### Added - -- **field**: `Value` impl for `Option where T: Value` ([#1585]) -- **attributes**: - improved performance when skipping `#[instrument]`-generated - spans below the max level ([#1600], [#1605], [#1614], [#1616], [#1617]) - -### Fixed - -- **instrument**: added missing `Future` implementation for `WithSubscriber`, - making the `WithDispatch` extension trait actually useable ([#1602]) -- Documentation fixes and improvements ([#1595], [#1601], [#1597]) - -Thanks to @brianburgers, @mattiast, @DCjanus, @oli-obk, and @matklad for -contributing to this release! - -[#1585]: https://github.com/tokio-rs/tracing/pull/1585 -[#1595]: https://github.com/tokio-rs/tracing/pull/1596 -[#1597]: https://github.com/tokio-rs/tracing/pull/1597 -[#1600]: https://github.com/tokio-rs/tracing/pull/1600 -[#1601]: https://github.com/tokio-rs/tracing/pull/1601 -[#1602]: https://github.com/tokio-rs/tracing/pull/1602 -[#1605]: https://github.com/tokio-rs/tracing/pull/1605 -[#1614]: https://github.com/tokio-rs/tracing/pull/1614 -[#1616]: https://github.com/tokio-rs/tracing/pull/1616 -[#1617]: https://github.com/tokio-rs/tracing/pull/1617 - -# 0.1.28 (September 17th, 2021) - -This release fixes an issue where the RustDoc documentation was rendered -incorrectly. It doesn't include any actual code changes, and is very boring and -can be ignored. - -### Fixed - -- **docs**: Incorrect documentation rendering due to unclosed `
` tag - ([#1572]) - -[#1572]: https://github.com/tokio-rs/tracing/pull/1572 - -# 0.1.27 (September 13, 2021) - -This release adds a new [`Span::or_current`] method to aid in efficiently -propagating span contexts to spawned threads or tasks. Additionally, it updates -the [`tracing-core`] version to [0.1.20] and the [`tracing-attributes`] version to -[0.1.16], ensuring that a number of new features in those crates are present. - -### Fixed - -- **instrument**: Added missing `WithSubscriber` implementations for futures and - other types ([#1424]) - -### Added - -- `Span::or_current` method, to help with efficient span context propagation - ([#1538]) -- **attributes**: add `skip_all` option to `#[instrument]` ([#1548]) -- **attributes**: record primitive types as primitive values rather than as - `fmt::Debug` ([#1378]) -- **core**: `NoSubscriber`, a no-op `Subscriber` implementation - ([#1549]) -- **core**: Added `Visit::record_f64` and support for recording floating-point - values ([#1507], [#1522]) -- A large number of documentation improvements and fixes ([#1369], [#1398], - [#1435], [#1442], [#1524], [#1556]) - -Thanks to new contributors @dzvon and @mbergkvist, as well as @teozkr, -@maxburke, @LukeMathWalker, and @jsgf, for contributing to this -release! - -[`Span::or_current`]: https://docs.rs/tracing/0.1.27/tracing/struct.Span.html#method.or_current -[`tracing-core`]: https://crates.io/crates/tracing-core -[`tracing-attributes`]: https://crates.io/crates/tracing-attributes -[`tracing-core`]: https://crates.io/crates/tracing-core -[0.1.20]: https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.20 -[0.1.16]: https://github.com/tokio-rs/tracing/releases/tag/tracing-attributes-0.1.16 -[#1424]: https://github.com/tokio-rs/tracing/pull/1424 -[#1538]: https://github.com/tokio-rs/tracing/pull/1538 -[#1548]: https://github.com/tokio-rs/tracing/pull/1548 -[#1378]: https://github.com/tokio-rs/tracing/pull/1378 -[#1507]: https://github.com/tokio-rs/tracing/pull/1507 -[#1522]: https://github.com/tokio-rs/tracing/pull/1522 -[#1369]: https://github.com/tokio-rs/tracing/pull/1369 -[#1398]: https://github.com/tokio-rs/tracing/pull/1398 -[#1435]: https://github.com/tokio-rs/tracing/pull/1435 -[#1442]: https://github.com/tokio-rs/tracing/pull/1442 -[#1524]: https://github.com/tokio-rs/tracing/pull/1524 -[#1556]: https://github.com/tokio-rs/tracing/pull/1556 - -# 0.1.26 (April 30, 2021) - -### Fixed - -- **attributes**: Compatibility between `#[instrument]` and `async-trait` - v0.1.43 and newer ([#1228]) -- Several documentation fixes ([#1305], [#1344]) -### Added - -- `Subscriber` impl for `Box` ([#1358]) -- `Subscriber` impl for `Arc` ([#1374]) -- Symmetric `From` impls for existing `Into` impls on `span::Current`, `Span`, - and `Option` ([#1335], [#1338]) -- `From` implementation for `Option`, allowing `EnteredSpan` to - be used in a `span!` macro's `parent:` field ([#1325]) -- `Attributes::fields` accessor that returns the set of fields defined on a - span's `Attributes` ([#1331]) - - -Thanks to @Folyd, @nightmared, and new contributors @rmsc and @Fishrock123 for -contributing to this release! - -[#1227]: https://github.com/tokio-rs/tracing/pull/1228 -[#1305]: https://github.com/tokio-rs/tracing/pull/1305 -[#1325]: https://github.com/tokio-rs/tracing/pull/1325 -[#1338]: https://github.com/tokio-rs/tracing/pull/1338 -[#1344]: https://github.com/tokio-rs/tracing/pull/1344 -[#1358]: https://github.com/tokio-rs/tracing/pull/1358 -[#1374]: https://github.com/tokio-rs/tracing/pull/1374 -[#1335]: https://github.com/tokio-rs/tracing/pull/1335 -[#1331]: https://github.com/tokio-rs/tracing/pull/1331 - -# 0.1.25 (February 23, 2021) - -### Added - -- `Span::entered` method for entering a span and moving it into a guard by value - rather than borrowing it ([#1252]) - -Thanks to @matklad for contributing to this release! - -[#1252]: https://github.com/tokio-rs/tracing/pull/1252 - -# 0.1.24 (February 17, 2021) - -### Fixed - -- **attributes**: Compiler error when using `#[instrument(err)]` on functions - which return `impl Trait` ([#1236]) -- Fixed broken match arms in event macros ([#1239]) -- Documentation improvements ([#1232]) - -Thanks to @bkchr and @lfranke for contributing to this release! - -[#1236]: https://github.com/tokio-rs/tracing/pull/1236 -[#1239]: https://github.com/tokio-rs/tracing/pull/1239 -[#1232]: https://github.com/tokio-rs/tracing/pull/1232 - -# 0.1.23 (February 4, 2021) - -### Fixed - -- **attributes**: Compiler error when using `#[instrument(err)]` on functions - with mutable parameters ([#1167]) -- **attributes**: Missing function visibility modifier when using - `#[instrument]` with `async-trait` ([#977]) -- **attributes** Removed unused `syn` features ([#928]) -- **log**: Fixed an issue where the `tracing` macros would generate code for - events whose levels are disabled statically by the `log` crate's - `static_max_level_XXX` features ([#1175]) -- Fixed deprecations and clippy lints ([#1195]) -- Several documentation fixes and improvements ([#941], [#965], [#981], [#1146], - [#1215]) - -### Changed - -- **attributes**: `tracing-futures` dependency is no longer required when using - `#[instrument]` on async functions ([#808]) -- **attributes**: Updated `tracing-attributes` minimum dependency to v0.1.12 - ([#1222]) - -Thanks to @nagisa, @Txuritan, @TaKO8Ki, @okready, and @krojew for contributing -to this release! - -[#1167]: https://github.com/tokio-rs/tracing/pull/1167 -[#977]: https://github.com/tokio-rs/tracing/pull/977 -[#965]: https://github.com/tokio-rs/tracing/pull/965 -[#981]: https://github.com/tokio-rs/tracing/pull/981 -[#1215]: https://github.com/tokio-rs/tracing/pull/1215 -[#808]: https://github.com/tokio-rs/tracing/pull/808 -[#941]: https://github.com/tokio-rs/tracing/pull/941 -[#1146]: https://github.com/tokio-rs/tracing/pull/1146 -[#1175]: https://github.com/tokio-rs/tracing/pull/1175 -[#1195]: https://github.com/tokio-rs/tracing/pull/1195 -[#1222]: https://github.com/tokio-rs/tracing/pull/1222 - -# 0.1.22 (November 23, 2020) - -### Changed - -- Updated `pin-project-lite` dependency to 0.2 ([#1108]) - -[#1108]: https://github.com/tokio-rs/tracing/pull/1108 - -# 0.1.21 (September 28, 2020) - -### Fixed - -- Incorrect inlining of `Span::new`, `Span::new_root`, and `Span::new_child_of`, - which could result in `dispatcher::get_default` being inlined at the callsite - ([#994]) -- Regression where using a struct field as a span or event field when other - fields on that struct are borrowed mutably would fail to compile ([#987]) - -### Changed - -- Updated `tracing-core` to 0.1.17 ([#992]) - -### Added - -- `Instrument` trait and `Instrumented` type for attaching a `Span` to a - `Future` ([#808]) -- `Copy` implementations for `Level` and `LevelFilter` ([#992]) -- Multiple documentation fixes and improvements ([#964], [#980], [#981]) - -Thanks to @nagisa, and new contributors @SecurityInsanity, @froydnj, @jyn514 and -@TaKO8Ki for contributing to this release! - -[#994]: https://github.com/tokio-rs/tracing/pull/994 -[#992]: https://github.com/tokio-rs/tracing/pull/992 -[#987]: https://github.com/tokio-rs/tracing/pull/987 -[#980]: https://github.com/tokio-rs/tracing/pull/980 -[#981]: https://github.com/tokio-rs/tracing/pull/981 -[#964]: https://github.com/tokio-rs/tracing/pull/964 -[#808]: https://github.com/tokio-rs/tracing/pull/808 - -# 0.1.20 (August 24, 2020) - -### Changed - -- Significantly reduced assembly generated by macro invocations (#943) -- Updated `tracing-core` to 0.1.15 (#943) - -### Added - -- Documented minimum supported Rust version policy (#941) - -# 0.1.19 (August 10, 2020) - -### Fixed - -- Updated `tracing-core` to fix incorrect calculation of the global max level - filter (#908) - -### Added - -- **attributes**: Support for using `self` in field expressions when - instrumenting `async-trait` functions (#875) -- Several documentation improvements (#832, #881, #896, #897, #911, #913) - -Thanks to @anton-dutov, @nightmared, @mystor, and @toshokan for contributing to -this release! - -# 0.1.18 (July 31, 2020) - -### Fixed - -- Fixed a bug where `LevelFilter::OFF` (and thus also the `static_max_level_off` - feature flag) would enable *all* traces, rather than *none* (#853) -- **log**: Fixed `tracing` macros and `Span`s not checking `log::max_level` - before emitting `log` records (#870) - -### Changed - -- **macros**: Macros now check the global max level (`LevelFilter::current`) - before the per-callsite cache when determining if a span or event is enabled. - This significantly improves performance in some use cases (#853) -- **macros**: Simplified the code generated by macro expansion significantly, - which may improve compile times and/or `rustc` optimizatation of surrounding - code (#869, #869) -- **macros**: Macros now check the static max level before checking any runtime - filtering, improving performance when a span or event is disabled by a - `static_max_level_XXX` feature flag (#868) -- `LevelFilter` is now a re-export of the `tracing_core::LevelFilter` type, it - can now be used interchangably with the versions in `tracing-core` and - `tracing-subscriber` (#853) -- Significant performance improvements when comparing `LevelFilter`s and - `Level`s (#853) -- Updated the minimum `tracing-core` dependency to 0.1.12 (#853) - -### Added - -- **macros**: Quoted string literals may now be used as field names, to allow - fields whose names are not valid Rust identifiers (#790) -- **docs**: Several documentation improvements (#850, #857, #841) -- `LevelFilter::current()` function, which returns the highest level that any - subscriber will enable (#853) -- `Subscriber::max_level_hint` optional trait method, for setting the value - returned by `LevelFilter::current()` (#853) - -Thanks to new contributors @cuviper, @ethanboxx, @ben0x539, @dignati, -@colelawrence, and @rbtcollins for helping out with this release! - -# 0.1.17 (July 22, 2020) - -### Changed - -- **log**: Moved verbose span enter/exit log records to "tracing::span::active" - target, allowing them to be filtered separately (#833) -- **log**: All span lifecycle log records without fields now have the `Trace` - log filter, to guard against `log` users enabling them by default with blanket - level filtering (#833) - -### Fixed - -- **log**/**macros**: Fixed missing implicit imports of the - `tracing::field::debug` and `tracing::field::display` functions inside the - macros when the "log" feature is enabled (#835) - -# 0.1.16 (July 8, 2020) - -### Added - -- **attributes**: Support for arbitrary expressions as fields in `#[instrument]` (#672) -- **attributes**: `#[instrument]` now emits a compiler warning when ignoring unrecognized - input (#672, #786) -- Improved documentation on using `tracing` in async code (#769) - -### Changed - -- Updated `tracing-core` dependency to 0.1.11 - -### Fixed - -- **macros**: Excessive monomorphization in macros, which could lead to - longer compilation times (#787) -- **log**: Compiler warnings in macros when `log` or `log-always` features - are enabled (#753) -- Compiler error when `tracing-core/std` feature is enabled but `tracing/std` is - not (#760) - -Thanks to @nagisa for contributing to this release! - -# 0.1.15 (June 2, 2020) - -### Changed - -- **macros**: Replaced use of legacy `local_inner_macros` with `$crate::` (#740) - -### Added - -- Docs fixes and improvements (#742, #731, #730) - -Thanks to @bnjjj, @blaenk, and @LukeMathWalker for contributing to this release! - -# 0.1.14 (May 14, 2020) - -### Added - -- **log**: When using the [`log`] compatibility feature alongside a `tracing` - `Subscriber`, log records for spans now include span IDs (#613) -- **attributes**: Support for using `#[instrument]` on methods that are part of - [`async-trait`] trait implementations (#711) -- **attributes**: Optional `#[instrument(err)]` argument to automatically emit - an event if an instrumented function returns `Err` (#637) -- Added `#[must_use]` attribute to the guard returned by - `subscriber::set_default` (#685) - -### Changed - -- **log**: Made [`log`] records emitted by spans much less noisy when span IDs are - not available (#613) - -### Fixed - -- Several typos in the documentation (#656, #710, #715) - -Thanks to @FintanH, @shepmaster, @inanna-malick, @zekisharif, @bkchr, @majecty, -@ilana and @nightmared for contributing to this release! - -[`async-trait`]: https://crates.io/crates/async-trait -[`log`]: https://crates.io/crates/log - -# 0.1.13 (February 26, 2019) - -### Added - -- **field**: `field::Empty` type for declaring empty fields whose values will be - recorded later (#548) -- **field**: `field::Value` implementations for `Wrapping` and `NonZero*` - numbers (#538) -- **attributes**: Support for adding arbitrary literal fields to spans generated - by `#[instrument]` (#569) -- **attributes**: `#[instrument]` now emits a helpful compiler error when - attempting to skip a function parameter (#600) - -### Changed - -- **attributes**: The `#[instrument]` attribute was placed under an on-by-default - feature flag "attributes" (#603) - -### Fixed - -- Broken and unresolvable links in RustDoc (#595) - -Thanks to @oli-cosmian and @Kobzol for contributing to this release! - -# 0.1.12 (January 11, 2019) - -### Added - -- `Span::with_subscriber` method to access the subscriber that tracks a `Span` - (#503) -- API documentation now shows which features are required by feature-flagged - items (#523) -- Improved README examples (#496) -- Documentation links to related crates (#507) - -# 0.1.11 (December 20, 2019) - -### Added - -- `Span::is_none` method (#475) -- `LevelFilter::into_level` method (#470) -- `LevelFilter::from_level` function and `From` impl (#471) -- Documented minimum supported Rust version (#482) - -### Fixed - -- Incorrect parameter type to `Span::follows_from` that made it impossible to - call (#467) -- Missing whitespace in `log` records generated when enabling the `log` feature - flag (#484) -- Typos and missing links in documentation (#405, #423, #439) - -# 0.1.10 (October 23, 2019) - -### Added - -- Support for destructuring in arguments to `#[instrument]`ed functions (#397) -- Generated field for `self` parameters when `#[instrument]`ing methods (#397) -- Optional `skip` argument to `#[instrument]` for excluding function parameters - from generated spans (#359) -- Added `dispatcher::set_default` and `subscriber::set_default` APIs, which - return a drop guard (#388) - -### Fixed - -- Some minor documentation errors (#356, #370) - -# 0.1.9 (September 13, 2019) - -### Fixed - -- Fixed `#[instrument]`ed async functions not compiling on `nightly-2019-09-11` - or newer (#342) - -### Changed - -- Significantly reduced performance impact of skipped spans and events when a - `Subscriber` is not in use (#326) -- The `log` feature will now only cause `tracing` spans and events to emit log - records when a `Subscriber` is not in use (#346) - -### Added - -- Added support for overriding the name of the span generated by `#[instrument]` - (#330) -- `log-always` feature flag to emit log records even when a `Subscriber` is set - (#346) - -# 0.1.8 (September 3, 2019) - -### Changed - -- Reorganized and improved API documentation (#317) - -### Removed - -- Dev-dependencies on `ansi_term` and `humantime` crates, which were used only - for examples (#316) - -# 0.1.7 (August 30, 2019) - -### Changed - -- New (curly-brace free) event message syntax to place the message in the first - field rather than the last (#309) - -### Fixed - -- Fixed a regression causing macro stack exhaustion when the `log` feature flag - is enabled (#304) - -# 0.1.6 (August 20, 2019) - -### Added - -- `std::error::Error` as a new primitive type (#277) -- Support for mixing key-value fields and `format_args` messages without curly - braces as delimiters (#288) - -### Changed - -- `tracing-core` dependency to 0.1.5 (#294) -- `tracing-attributes` dependency to 0.1.2 (#297) - -# 0.1.5 (August 9, 2019) - -### Added - -- Support for `no-std` + `liballoc` (#263) - -### Changed - -- Using the `#[instrument]` attribute on `async fn`s no longer requires a - feature flag (#258) - -### Fixed - -- The `#[instrument]` macro now works on generic functions (#262) - -# 0.1.4 (August 8, 2019) - -### Added - -- `#[instrument]` attribute for automatically adding spans to functions (#253) - -# 0.1.3 (July 11, 2019) - -### Added - -- Log messages when a subscriber indicates that a span has closed, when the - `log` feature flag is enabled (#180). - -### Changed - -- `tracing-core` minimum dependency version to 0.1.2 (#174). - -### Fixed - -- Fixed an issue where event macro invocations with a single field, using local - variable shorthand, would recur infinitely (#166). -- Fixed uses of deprecated `tracing-core` APIs (#174). - -# 0.1.2 (July 6, 2019) - -### Added - -- `Span::none()` constructor, which does not require metadata and - returns a completely empty span (#147). -- `Span::current()` function, returning the current span if it is - known to the subscriber (#148). - -### Fixed - -- Broken macro imports when used prefixed with `tracing::` (#152). - -# 0.1.1 (July 3, 2019) - -### Changed - -- `cfg_if` dependency to 0.1.9. - -### Fixed - -- Compilation errors when the `log` feature is enabled (#131). -- Unclear wording and typos in documentation (#124, #128, #142). - -# 0.1.0 (June 27, 2019) - -- Initial release diff -Nru s390-tools-2.31.0/rust-vendor/tracing/LICENSE s390-tools-2.33.1/rust-vendor/tracing/LICENSE --- s390-tools-2.31.0/rust-vendor/tracing/LICENSE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2019 Tokio Contributors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/tracing/README.md s390-tools-2.33.1/rust-vendor/tracing/README.md --- s390-tools-2.31.0/rust-vendor/tracing/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,467 +0,0 @@ -![Tracing — Structured, application-level diagnostics][splash] - -[splash]: https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/splash.svg - -# tracing - -Application-level tracing for Rust. - -[![Crates.io][crates-badge]][crates-url] -[![Documentation][docs-badge]][docs-url] -[![Documentation (master)][docs-master-badge]][docs-master-url] -[![MIT licensed][mit-badge]][mit-url] -[![Build Status][actions-badge]][actions-url] -[![Discord chat][discord-badge]][discord-url] - -[Documentation][docs-url] | [Chat][discord-url] - -[crates-badge]: https://img.shields.io/crates/v/tracing.svg -[crates-url]: https://crates.io/crates/tracing -[docs-badge]: https://docs.rs/tracing/badge.svg -[docs-url]: https://docs.rs/tracing -[docs-master-badge]: https://img.shields.io/badge/docs-master-blue -[docs-master-url]: https://tracing-rs.netlify.com/tracing -[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg -[mit-url]: LICENSE -[actions-badge]: https://github.com/tokio-rs/tracing/workflows/CI/badge.svg -[actions-url]:https://github.com/tokio-rs/tracing/actions?query=workflow%3ACI -[discord-badge]: https://img.shields.io/discord/500028886025895936?logo=discord&label=discord&logoColor=white -[discord-url]: https://discord.gg/EeF3cQw - -## Overview - -`tracing` is a framework for instrumenting Rust programs to collect -structured, event-based diagnostic information. - -In asynchronous systems like Tokio, interpreting traditional log messages can -often be quite challenging. Since individual tasks are multiplexed on the same -thread, associated events and log lines are intermixed making it difficult to -trace the logic flow. `tracing` expands upon logging-style diagnostics by -allowing libraries and applications to record structured events with additional -information about *temporality* and *causality* — unlike a log message, a span -in `tracing` has a beginning and end time, may be entered and exited by the -flow of execution, and may exist within a nested tree of similar spans. In -addition, `tracing` spans are *structured*, with the ability to record typed -data as well as textual messages. - -The `tracing` crate provides the APIs necessary for instrumenting libraries -and applications to emit trace data. - -*Compiler support: [requires `rustc` 1.56+][msrv]* - -[msrv]: #supported-rust-versions - -## Usage - -(The examples below are borrowed from the `log` crate's yak-shaving -[example](https://docs.rs/log/0.4.10/log/index.html#examples), modified to -idiomatic `tracing`.) - -### In Applications - -In order to record trace events, executables have to use a `Subscriber` -implementation compatible with `tracing`. A `Subscriber` implements a way of -collecting trace data, such as by logging it to standard output. [`tracing_subscriber`](https://docs.rs/tracing-subscriber/)'s -[`fmt` module](https://docs.rs/tracing-subscriber/0.3/tracing_subscriber/fmt/index.html) provides reasonable defaults. -Additionally, `tracing-subscriber` is able to consume messages emitted by `log`-instrumented libraries and modules. - -The simplest way to use a subscriber is to call the `set_global_default` function. - -```rust -use tracing::{info, Level}; -use tracing_subscriber::FmtSubscriber; - -fn main() { - // a builder for `FmtSubscriber`. - let subscriber = FmtSubscriber::builder() - // all spans/events with a level higher than TRACE (e.g, debug, info, warn, etc.) - // will be written to stdout. - .with_max_level(Level::TRACE) - // completes the builder. - .finish(); - - tracing::subscriber::set_global_default(subscriber) - .expect("setting default subscriber failed"); - - let number_of_yaks = 3; - // this creates a new event, outside of any spans. - info!(number_of_yaks, "preparing to shave yaks"); - - let number_shaved = yak_shave::shave_all(number_of_yaks); - info!( - all_yaks_shaved = number_shaved == number_of_yaks, - "yak shaving completed." - ); -} -``` - -```toml -[dependencies] -tracing = "0.1" -tracing-subscriber = "0.3.0" -``` - -This subscriber will be used as the default in all threads for the remainder of the duration -of the program, similar to how loggers work in the `log` crate. - -In addition, you can locally override the default subscriber. For example: - -```rust -use tracing::{info, Level}; -use tracing_subscriber::FmtSubscriber; - -fn main() { - let subscriber = tracing_subscriber::FmtSubscriber::builder() - // all spans/events with a level higher than TRACE (e.g, debug, info, warn, etc.) - // will be written to stdout. - .with_max_level(Level::TRACE) - // builds the subscriber. - .finish(); - - tracing::subscriber::with_default(subscriber, || { - info!("This will be logged to stdout"); - }); - info!("This will _not_ be logged to stdout"); -} -``` - -This approach allows trace data to be collected by multiple subscribers -within different contexts in the program. Note that the override only applies to the -currently executing thread; other threads will not see the change from with_default. - -Any trace events generated outside the context of a subscriber will not be collected. - -Once a subscriber has been set, instrumentation points may be added to the -executable using the `tracing` crate's macros. - -### In Libraries - -Libraries should only rely on the `tracing` crate and use the provided macros -and types to collect whatever information might be useful to downstream consumers. - -```rust -use std::{error::Error, io}; -use tracing::{debug, error, info, span, warn, Level}; - -// the `#[tracing::instrument]` attribute creates and enters a span -// every time the instrumented function is called. The span is named after the -// the function or method. Paramaters passed to the function are recorded as fields. -#[tracing::instrument] -pub fn shave(yak: usize) -> Result<(), Box> { - // this creates an event at the DEBUG level with two fields: - // - `excitement`, with the key "excitement" and the value "yay!" - // - `message`, with the key "message" and the value "hello! I'm gonna shave a yak." - // - // unlike other fields, `message`'s shorthand initialization is just the string itself. - debug!(excitement = "yay!", "hello! I'm gonna shave a yak."); - if yak == 3 { - warn!("could not locate yak!"); - // note that this is intended to demonstrate `tracing`'s features, not idiomatic - // error handling! in a library or application, you should consider returning - // a dedicated `YakError`. libraries like snafu or thiserror make this easy. - return Err(io::Error::new(io::ErrorKind::Other, "shaving yak failed!").into()); - } else { - debug!("yak shaved successfully"); - } - Ok(()) -} - -pub fn shave_all(yaks: usize) -> usize { - // Constructs a new span named "shaving_yaks" at the TRACE level, - // and a field whose key is "yaks". This is equivalent to writing: - // - // let span = span!(Level::TRACE, "shaving_yaks", yaks = yaks); - // - // local variables (`yaks`) can be used as field values - // without an assignment, similar to struct initializers. - let _span_ = span!(Level::TRACE, "shaving_yaks", yaks).entered(); - - info!("shaving yaks"); - - let mut yaks_shaved = 0; - for yak in 1..=yaks { - let res = shave(yak); - debug!(yak, shaved = res.is_ok()); - - if let Err(ref error) = res { - // Like spans, events can also use the field initialization shorthand. - // In this instance, `yak` is the field being initalized. - error!(yak, error = error.as_ref(), "failed to shave yak!"); - } else { - yaks_shaved += 1; - } - debug!(yaks_shaved); - } - - yaks_shaved -} -``` - -```toml -[dependencies] -tracing = "0.1" -``` - -Note: Libraries should *NOT* call `set_global_default()`, as this will cause -conflicts when executables try to set the default later. - -### In Asynchronous Code - -If you are instrumenting code that make use of -[`std::future::Future`](https://doc.rust-lang.org/stable/std/future/trait.Future.html) -or async/await, avoid using the `Span::enter` method. The following example -_will not_ work: - -```rust -async { - let _s = span.enter(); - // ... -} -``` -```rust -async { - let _s = tracing::span!(...).entered(); - // ... -} -``` - -The span guard `_s` will not exit until the future generated by the `async` block is complete. -Since futures and spans can be entered and exited _multiple_ times without them completing, -the span remains entered for as long as the future exists, rather than being entered only when -it is polled, leading to very confusing and incorrect output. -For more details, see [the documentation on closing spans](https://tracing.rs/tracing/span/index.html#closing-spans). - -There are two ways to instrument asynchronous code. The first is through the -[`Future::instrument`](https://docs.rs/tracing/latest/tracing/trait.Instrument.html#method.instrument) combinator: - -```rust -use tracing::Instrument; - -let my_future = async { - // ... -}; - -my_future - .instrument(tracing::info_span!("my_future")) - .await -``` - -`Future::instrument` attaches a span to the future, ensuring that the span's lifetime -is as long as the future's. - -The second, and preferred, option is through the -[`#[instrument]`](https://docs.rs/tracing/0.1.38/tracing/attr.instrument.html) -attribute: - -```rust -use tracing::{info, instrument}; -use tokio::{io::AsyncWriteExt, net::TcpStream}; -use std::io; - -#[instrument] -async fn write(stream: &mut TcpStream) -> io::Result { - let result = stream.write(b"hello world\n").await; - info!("wrote to stream; success={:?}", result.is_ok()); - result -} -``` - -Under the hood, the `#[instrument]` macro performs the same explicit span -attachment that `Future::instrument` does. - -### Concepts - -This crate provides macros for creating `Span`s and `Event`s, which represent -periods of time and momentary events within the execution of a program, -respectively. - -As a rule of thumb, _spans_ should be used to represent discrete units of work -(e.g., a given request's lifetime in a server) or periods of time spent in a -given context (e.g., time spent interacting with an instance of an external -system, such as a database). In contrast, _events_ should be used to represent -points in time within a span — a request returned with a given status code, -_n_ new items were taken from a queue, and so on. - -`Span`s are constructed using the `span!` macro, and then _entered_ -to indicate that some code takes place within the context of that `Span`: - -```rust -use tracing::{span, Level}; - -// Construct a new span named "my span". -let mut span = span!(Level::INFO, "my span"); -span.in_scope(|| { - // Any trace events in this closure or code called by it will occur within - // the span. -}); -// Dropping the span will close it, indicating that it has ended. -``` - -The [`#[instrument]`](https://docs.rs/tracing/0.1.38/tracing/attr.instrument.html) attribute macro -can reduce some of this boilerplate: - -```rust -use tracing::{instrument}; - -#[instrument] -pub fn my_function(my_arg: usize) { - // This event will be recorded inside a span named `my_function` with the - // field `my_arg`. - tracing::info!("inside my_function!"); - // ... -} -``` - -The `Event` type represent an event that occurs instantaneously, and is -essentially a `Span` that cannot be entered. They are created using the `event!` -macro: - -```rust -use tracing::{event, Level}; - -event!(Level::INFO, "something has happened!"); -``` - -Users of the [`log`] crate should note that `tracing` exposes a set of macros for -creating `Event`s (`trace!`, `debug!`, `info!`, `warn!`, and `error!`) which may -be invoked with the same syntax as the similarly-named macros from the `log` -crate. Often, the process of converting a project to use `tracing` can begin -with a simple drop-in replacement. - -## Supported Rust Versions - -Tracing is built against the latest stable release. The minimum supported -version is 1.42. The current Tracing version is not guaranteed to build on Rust -versions earlier than the minimum supported version. - -Tracing follows the same compiler support policies as the rest of the Tokio -project. The current stable Rust compiler and the three most recent minor -versions before it will always be supported. For example, if the current stable -compiler version is 1.45, the minimum supported version will not be increased -past 1.42, three minor versions prior. Increasing the minimum supported compiler -version is not considered a semver breaking change as long as doing so complies -with this policy. - -## Ecosystem - -### Related Crates - -In addition to `tracing` and `tracing-core`, the [`tokio-rs/tracing`] repository -contains several additional crates designed to be used with the `tracing` ecosystem. -This includes a collection of `Subscriber` implementations, as well as utility -and adapter crates to assist in writing `Subscriber`s and instrumenting -applications. - -In particular, the following crates are likely to be of interest: - -- [`tracing-futures`] provides a compatibility layer with the `futures` - crate, allowing spans to be attached to `Future`s, `Stream`s, and `Executor`s. -- [`tracing-subscriber`] provides `Subscriber` implementations and - utilities for working with `Subscriber`s. This includes a [`FmtSubscriber`] - `FmtSubscriber` for logging formatted trace data to stdout, with similar - filtering and formatting to the [`env_logger`] crate. -- [`tracing-log`] provides a compatibility layer with the [`log`] crate, - allowing log messages to be recorded as `tracing` `Event`s within the - trace tree. This is useful when a project using `tracing` have - dependencies which use `log`. Note that if you're using - `tracing-subscriber`'s `FmtSubscriber`, you don't need to depend on - `tracing-log` directly. - -Additionally, there are also several third-party crates which are not -maintained by the `tokio` project. These include: - -- [`tracing-timing`] implements inter-event timing metrics on top of `tracing`. - It provides a subscriber that records the time elapsed between pairs of - `tracing` events and generates histograms. -- [`tracing-opentelemetry`] provides a subscriber for emitting traces to - [OpenTelemetry]-compatible distributed tracing systems. -- [`tracing-honeycomb`] Provides a layer that reports traces spanning multiple machines to [honeycomb.io]. Backed by [`tracing-distributed`]. -- [`tracing-distributed`] Provides a generic implementation of a layer that reports traces spanning multiple machines to some backend. -- [`tracing-actix`] provides `tracing` integration for the `actix` actor - framework. -- [`axum-insights`] provides `tracing` integration and Application insights export for the `axum` web framework. -- [`tracing-gelf`] implements a subscriber for exporting traces in Greylog - GELF format. -- [`tracing-coz`] provides integration with the [coz] causal profiler - (Linux-only). -- [`test-log`] takes care of initializing `tracing` for tests, based on - environment variables with an `env_logger` compatible syntax. -- [`tracing-unwrap`] provides convenience methods to report failed unwraps on `Result` or `Option` types to a `Subscriber`. -- [`diesel-tracing`] provides integration with [`diesel`] database connections. -- [`tracing-tracy`] provides a way to collect [Tracy] profiles in instrumented - applications. -- [`tracing-elastic-apm`] provides a layer for reporting traces to [Elastic APM]. -- [`tracing-etw`] provides a layer for emitting Windows [ETW] events. -- [`tracing-fluent-assertions`] provides a fluent assertions-style testing - framework for validating the behavior of `tracing` spans. -- [`sentry-tracing`] provides a layer for reporting events and traces to [Sentry]. -- [`tracing-loki`] provides a layer for shipping logs to [Grafana Loki]. -- [`tracing-logfmt`] provides a layer that formats events and spans into the logfmt format. - -If you're the maintainer of a `tracing` ecosystem crate not listed above, -please let us know! We'd love to add your project to the list! - -[`tracing-timing`]: https://crates.io/crates/tracing-timing -[`tracing-opentelemetry`]: https://crates.io/crates/tracing-opentelemetry -[OpenTelemetry]: https://opentelemetry.io/ -[`tracing-honeycomb`]: https://crates.io/crates/tracing-honeycomb -[`tracing-distributed`]: https://crates.io/crates/tracing-distributed -[honeycomb.io]: https://www.honeycomb.io/ -[`tracing-actix`]: https://crates.io/crates/tracing-actix -[`axum-insights`]: https://crates.io/crates/axum-insights -[`tracing-gelf`]: https://crates.io/crates/tracing-gelf -[`tracing-coz`]: https://crates.io/crates/tracing-coz -[coz]: https://github.com/plasma-umass/coz -[`test-log`]: https://crates.io/crates/test-log -[`tracing-unwrap`]: https://docs.rs/tracing-unwrap -[`diesel`]: https://crates.io/crates/diesel -[`diesel-tracing`]: https://crates.io/crates/diesel-tracing -[`tracing-tracy`]: https://crates.io/crates/tracing-tracy -[Tracy]: https://github.com/wolfpld/tracy -[`tracing-elastic-apm`]: https://crates.io/crates/tracing-elastic-apm -[Elastic APM]: https://www.elastic.co/apm -[`tracing-etw`]: https://github.com/microsoft/tracing-etw -[ETW]: https://docs.microsoft.com/en-us/windows/win32/etw/about-event-tracing -[`tracing-fluent-assertions`]: https://crates.io/crates/tracing-fluent-assertions -[`sentry-tracing`]: https://crates.io/crates/sentry-tracing -[Sentry]: https://sentry.io/welcome/ -[`tracing-loki`]: https://crates.io/crates/tracing-loki -[Grafana Loki]: https://grafana.com/oss/loki/ -[`tracing-logfmt`]: https://crates.io/crates/tracing-logfmt - -**Note:** that some of the ecosystem crates are currently unreleased and -undergoing active development. They may be less stable than `tracing` and -`tracing-core`. - -[`log`]: https://docs.rs/log/0.4.6/log/ -[`tokio-rs/tracing`]: https://github.com/tokio-rs/tracing -[`tracing-futures`]: https://github.com/tokio-rs/tracing/tree/master/tracing-futures -[`tracing-subscriber`]: https://github.com/tokio-rs/tracing/tree/master/tracing-subscriber -[`tracing-log`]: https://github.com/tokio-rs/tracing/tree/master/tracing-log -[`env_logger`]: https://crates.io/crates/env_logger -[`FmtSubscriber`]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/struct.Subscriber.html -[`examples`]: https://github.com/tokio-rs/tracing/tree/master/examples - -## Supported Rust Versions - -Tracing is built against the latest stable release. The minimum supported -version is 1.56. The current Tracing version is not guaranteed to build on Rust -versions earlier than the minimum supported version. - -Tracing follows the same compiler support policies as the rest of the Tokio -project. The current stable Rust compiler and the three most recent minor -versions before it will always be supported. For example, if the current stable -compiler version is 1.69, the minimum supported version will not be increased -past 1.66, three minor versions prior. Increasing the minimum supported compiler -version is not considered a semver breaking change as long as doing so complies -with this policy. - -## License - -This project is licensed under the [MIT license](LICENSE). - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in Tokio by you, shall be licensed as MIT, without any additional -terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/tracing/src/dispatcher.rs s390-tools-2.33.1/rust-vendor/tracing/src/dispatcher.rs --- s390-tools-2.31.0/rust-vendor/tracing/src/dispatcher.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/src/dispatcher.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,145 +0,0 @@ -//! Dispatches trace events to [`Subscriber`]s. -//! -//! The _dispatcher_ is the component of the tracing system which is responsible -//! for forwarding trace data from the instrumentation points that generate it -//! to the subscriber that collects it. -//! -//! # Using the Trace Dispatcher -//! -//! Every thread in a program using `tracing` has a _default subscriber_. When -//! events occur, or spans are created, they are dispatched to the thread's -//! current subscriber. -//! -//! ## Setting the Default Subscriber -//! -//! By default, the current subscriber is an empty implementation that does -//! nothing. To use a subscriber implementation, it must be set as the default. -//! There are two methods for doing so: [`with_default`] and -//! [`set_global_default`]. `with_default` sets the default subscriber for the -//! duration of a scope, while `set_global_default` sets a default subscriber -//! for the entire process. -//! -//! To use either of these functions, we must first wrap our subscriber in a -//! [`Dispatch`], a cloneable, type-erased reference to a subscriber. For -//! example: -//! ```rust -//! # pub struct FooSubscriber; -//! # use tracing_core::{ -//! # dispatcher, Event, Metadata, -//! # span::{Attributes, Id, Record} -//! # }; -//! # impl tracing_core::Subscriber for FooSubscriber { -//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } -//! # fn record(&self, _: &Id, _: &Record) {} -//! # fn event(&self, _: &Event) {} -//! # fn record_follows_from(&self, _: &Id, _: &Id) {} -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn enter(&self, _: &Id) {} -//! # fn exit(&self, _: &Id) {} -//! # } -//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } -//! use dispatcher::Dispatch; -//! -//! let my_subscriber = FooSubscriber::new(); -//! let my_dispatch = Dispatch::new(my_subscriber); -//! ``` -//! Then, we can use [`with_default`] to set our `Dispatch` as the default for -//! the duration of a block: -//! ```rust -//! # pub struct FooSubscriber; -//! # use tracing_core::{ -//! # dispatcher, Event, Metadata, -//! # span::{Attributes, Id, Record} -//! # }; -//! # impl tracing_core::Subscriber for FooSubscriber { -//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } -//! # fn record(&self, _: &Id, _: &Record) {} -//! # fn event(&self, _: &Event) {} -//! # fn record_follows_from(&self, _: &Id, _: &Id) {} -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn enter(&self, _: &Id) {} -//! # fn exit(&self, _: &Id) {} -//! # } -//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } -//! # let my_subscriber = FooSubscriber::new(); -//! # let my_dispatch = dispatcher::Dispatch::new(my_subscriber); -//! // no default subscriber -//! -//! # #[cfg(feature = "std")] -//! dispatcher::with_default(&my_dispatch, || { -//! // my_subscriber is the default -//! }); -//! -//! // no default subscriber again -//! ``` -//! It's important to note that `with_default` will not propagate the current -//! thread's default subscriber to any threads spawned within the `with_default` -//! block. To propagate the default subscriber to new threads, either use -//! `with_default` from the new thread, or use `set_global_default`. -//! -//! As an alternative to `with_default`, we can use [`set_global_default`] to -//! set a `Dispatch` as the default for all threads, for the lifetime of the -//! program. For example: -//! ```rust -//! # pub struct FooSubscriber; -//! # use tracing_core::{ -//! # dispatcher, Event, Metadata, -//! # span::{Attributes, Id, Record} -//! # }; -//! # impl tracing_core::Subscriber for FooSubscriber { -//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } -//! # fn record(&self, _: &Id, _: &Record) {} -//! # fn event(&self, _: &Event) {} -//! # fn record_follows_from(&self, _: &Id, _: &Id) {} -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn enter(&self, _: &Id) {} -//! # fn exit(&self, _: &Id) {} -//! # } -//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } -//! # let my_subscriber = FooSubscriber::new(); -//! # let my_dispatch = dispatcher::Dispatch::new(my_subscriber); -//! // no default subscriber -//! -//! dispatcher::set_global_default(my_dispatch) -//! // `set_global_default` will return an error if the global default -//! // subscriber has already been set. -//! .expect("global default was already set!"); -//! -//! // `my_subscriber` is now the default -//! ``` -//! -//!
-//! Note: The thread-local scoped dispatcher (with_default)
-//! requires the Rust standard library. no_std users should
-//! use set_global_default
-//! instead.
-//! 
-//! -//! ## Accessing the Default Subscriber -//! -//! A thread's current default subscriber can be accessed using the -//! [`get_default`] function, which executes a closure with a reference to the -//! currently default `Dispatch`. This is used primarily by `tracing` -//! instrumentation. -//! -//! [`Subscriber`]: crate::Subscriber -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub use tracing_core::dispatcher::set_default; -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub use tracing_core::dispatcher::with_default; -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub use tracing_core::dispatcher::DefaultGuard; -pub use tracing_core::dispatcher::{ - get_default, set_global_default, Dispatch, SetGlobalDefaultError, WeakDispatch, -}; - -/// Private API for internal use by tracing's macros. -/// -/// This function is *not* considered part of `tracing`'s public API, and has no -/// stability guarantees. If you use it, and it breaks or disappears entirely, -/// don't say we didn;'t warn you. -#[doc(hidden)] -pub use tracing_core::dispatcher::has_been_set; diff -Nru s390-tools-2.31.0/rust-vendor/tracing/src/field.rs s390-tools-2.33.1/rust-vendor/tracing/src/field.rs --- s390-tools-2.31.0/rust-vendor/tracing/src/field.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/src/field.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,170 +0,0 @@ -//! `Span` and `Event` key-value data. -//! -//! Spans and events may be annotated with key-value data, referred to as _fields_. -//! These fields consist of a mapping from a key (corresponding to -//! a `&str` but represented internally as an array index) to a [`Value`]. -//! -//! # `Value`s and `Subscriber`s -//! -//! `Subscriber`s consume `Value`s as fields attached to [span]s or [`Event`]s. -//! The set of field keys on a given span or event is defined on its [`Metadata`]. -//! When a span is created, it provides [`Attributes`] to the `Subscriber`'s -//! [`new_span`] method, containing any fields whose values were provided when -//! the span was created; and may call the `Subscriber`'s [`record`] method -//! with additional [`Record`]s if values are added for more of its fields. -//! Similarly, the [`Event`] type passed to the subscriber's [`event`] method -//! will contain any fields attached to each event. -//! -//! `tracing` represents values as either one of a set of Rust primitives -//! (`i64`, `u64`, `f64`, `bool`, and `&str`) or using a `fmt::Display` or -//! `fmt::Debug` implementation. `Subscriber`s are provided these primitive -//! value types as `dyn Value` trait objects. -//! -//! These trait objects can be formatted using `fmt::Debug`, but may also be -//! recorded as typed data by calling the [`Value::record`] method on these -//! trait objects with a _visitor_ implementing the [`Visit`] trait. This trait -//! represents the behavior used to record values of various types. For example, -//! an implementation of `Visit` might record integers by incrementing counters -//! for their field names rather than printing them. -//! -//! -//! # Using `valuable` -//! -//! `tracing`'s [`Value`] trait is intentionally minimalist: it supports only a small -//! number of Rust primitives as typed values, and only permits recording -//! user-defined types with their [`fmt::Debug`] or [`fmt::Display`] -//! implementations. However, there are some cases where it may be useful to record -//! nested values (such as arrays, `Vec`s, or `HashMap`s containing values), or -//! user-defined `struct` and `enum` types without having to format them as -//! unstructured text. -//! -//! To address `Value`'s limitations, `tracing` offers experimental support for -//! the [`valuable`] crate, which provides object-safe inspection of structured -//! values. User-defined types can implement the [`valuable::Valuable`] trait, -//! and be recorded as a `tracing` field by calling their [`as_value`] method. -//! If the [`Subscriber`] also supports the `valuable` crate, it can -//! then visit those types fields as structured values using `valuable`. -//! -//!
-//!     Note: valuable support is an
-//!     unstable feature. See
-//!     the documentation on unstable features for details on how to enable it.
-//! 
-//! -//! For example: -//! ```ignore -//! // Derive `Valuable` for our types: -//! use valuable::Valuable; -//! -//! #[derive(Clone, Debug, Valuable)] -//! struct User { -//! name: String, -//! age: u32, -//! address: Address, -//! } -//! -//! #[derive(Clone, Debug, Valuable)] -//! struct Address { -//! country: String, -//! city: String, -//! street: String, -//! } -//! -//! let user = User { -//! name: "Arwen Undomiel".to_string(), -//! age: 3000, -//! address: Address { -//! country: "Middle Earth".to_string(), -//! city: "Rivendell".to_string(), -//! street: "leafy lane".to_string(), -//! }, -//! }; -//! -//! // Recording `user` as a `valuable::Value` will allow the `tracing` subscriber -//! // to traverse its fields as a nested, typed structure: -//! tracing::info!(current_user = user.as_value()); -//! ``` -//! -//! Alternatively, the [`valuable()`] function may be used to convert a type -//! implementing [`Valuable`] into a `tracing` field value. -//! -//! When the `valuable` feature is enabled, the [`Visit`] trait will include an -//! optional [`record_value`] method. `Visit` implementations that wish to -//! record `valuable` values can implement this method with custom behavior. -//! If a visitor does not implement `record_value`, the [`valuable::Value`] will -//! be forwarded to the visitor's [`record_debug`] method. -//! -//! [`fmt::Debug`]: std::fmt::Debug -//! [`fmt::Display`]: std::fmt::Debug -//! [`valuable`]: https://crates.io/crates/valuable -//! [`valuable::Valuable`]: https://docs.rs/valuable/latest/valuable/trait.Valuable.html -//! [`as_value`]: https://docs.rs/valuable/latest/valuable/trait.Valuable.html#tymethod.as_value -//! [`valuable::Value`]: https://docs.rs/valuable/latest/valuable/enum.Value.html -//! [`Subscriber`]: crate::Subscriber -//! [`record_value`]: Visit::record_value -//! [`record_debug`]: Visit::record_debug -//! [span]: mod@crate::span -//! [`Event`]: crate::event::Event -//! [`Metadata`]: crate::Metadata -//! [`Attributes`]: crate::span::Attributes -//! [`Record`]: crate::span::Record -//! [`new_span`]: crate::Subscriber::new_span -//! [`record`]: crate::Subscriber::record -//! [`event`]: crate::Subscriber::event -pub use tracing_core::field::*; - -use crate::Metadata; - -/// Trait implemented to allow a type to be used as a field key. -/// -///
-/// Note: Although this is implemented for both the
-/// Field type and any
-/// type that can be borrowed as an &str, only Field
-/// allows O(1) access.
-/// Indexing a field with a string results in an iterative search that performs
-/// string comparisons. Thus, if possible, once the key for a field is known, it
-/// should be used whenever possible.
-/// 
-pub trait AsField: crate::sealed::Sealed { - /// Attempts to convert `&self` into a `Field` with the specified `metadata`. - /// - /// If `metadata` defines this field, then the field is returned. Otherwise, - /// this returns `None`. - fn as_field(&self, metadata: &Metadata<'_>) -> Option; -} - -// ===== impl AsField ===== - -impl AsField for Field { - #[inline] - fn as_field(&self, metadata: &Metadata<'_>) -> Option { - if self.callsite() == metadata.callsite() { - Some(self.clone()) - } else { - None - } - } -} - -impl<'a> AsField for &'a Field { - #[inline] - fn as_field(&self, metadata: &Metadata<'_>) -> Option { - if self.callsite() == metadata.callsite() { - Some((*self).clone()) - } else { - None - } - } -} - -impl AsField for str { - #[inline] - fn as_field(&self, metadata: &Metadata<'_>) -> Option { - metadata.fields().field(&self) - } -} - -impl crate::sealed::Sealed for Field {} -impl<'a> crate::sealed::Sealed for &'a Field {} -impl crate::sealed::Sealed for str {} diff -Nru s390-tools-2.31.0/rust-vendor/tracing/src/instrument.rs s390-tools-2.33.1/rust-vendor/tracing/src/instrument.rs --- s390-tools-2.31.0/rust-vendor/tracing/src/instrument.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/src/instrument.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,429 +0,0 @@ -use crate::{ - dispatcher::{self, Dispatch}, - span::Span, -}; -use core::{ - future::Future, - marker::Sized, - mem::ManuallyDrop, - pin::Pin, - task::{Context, Poll}, -}; -use pin_project_lite::pin_project; - -/// Attaches spans to a [`std::future::Future`]. -/// -/// Extension trait allowing futures to be -/// instrumented with a `tracing` [span]. -/// -/// [span]: super::Span -pub trait Instrument: Sized { - /// Instruments this type with the provided [`Span`], returning an - /// `Instrumented` wrapper. - /// - /// The attached [`Span`] will be [entered] every time the instrumented - /// [`Future`] is polled or [`Drop`]ped. - /// - /// # Examples - /// - /// Instrumenting a future: - /// - /// ```rust - /// use tracing::Instrument; - /// - /// # async fn doc() { - /// let my_future = async { - /// // ... - /// }; - /// - /// my_future - /// .instrument(tracing::info_span!("my_future")) - /// .await - /// # } - /// ``` - /// - /// The [`Span::or_current`] combinator can be used in combination with - /// `instrument` to ensure that the [current span] is attached to the - /// future if the span passed to `instrument` is [disabled]: - /// - /// ``` - /// use tracing::Instrument; - /// # mod tokio { - /// # pub(super) fn spawn(_: impl std::future::Future) {} - /// # } - /// - /// let my_future = async { - /// // ... - /// }; - /// - /// let outer_span = tracing::info_span!("outer").entered(); - /// - /// // If the "my_future" span is enabled, then the spawned task will - /// // be within both "my_future" *and* "outer", since "outer" is - /// // "my_future"'s parent. However, if "my_future" is disabled, - /// // the spawned task will *not* be in any span. - /// tokio::spawn( - /// my_future - /// .instrument(tracing::debug_span!("my_future")) - /// ); - /// - /// // Using `Span::or_current` ensures the spawned task is instrumented - /// // with the current span, if the new span passed to `instrument` is - /// // not enabled. This means that if the "my_future" span is disabled, - /// // the spawned task will still be instrumented with the "outer" span: - /// # let my_future = async {}; - /// tokio::spawn( - /// my_future - /// .instrument(tracing::debug_span!("my_future").or_current()) - /// ); - /// ``` - /// - /// [entered]: super::Span::enter() - /// [`Span::or_current`]: super::Span::or_current() - /// [current span]: super::Span::current() - /// [disabled]: super::Span::is_disabled() - /// [`Future`]: std::future::Future - fn instrument(self, span: Span) -> Instrumented { - Instrumented { - inner: ManuallyDrop::new(self), - span, - } - } - - /// Instruments this type with the [current] [`Span`], returning an - /// `Instrumented` wrapper. - /// - /// The attached [`Span`] will be [entered] every time the instrumented - /// [`Future`] is polled or [`Drop`]ped. - /// - /// This can be used to propagate the current span when spawning a new future. - /// - /// # Examples - /// - /// ```rust - /// use tracing::Instrument; - /// - /// # mod tokio { - /// # pub(super) fn spawn(_: impl std::future::Future) {} - /// # } - /// # async fn doc() { - /// let span = tracing::info_span!("my_span"); - /// let _enter = span.enter(); - /// - /// // ... - /// - /// let future = async { - /// tracing::debug!("this event will occur inside `my_span`"); - /// // ... - /// }; - /// tokio::spawn(future.in_current_span()); - /// # } - /// ``` - /// - /// [current]: super::Span::current() - /// [entered]: super::Span::enter() - /// [`Span`]: crate::Span - /// [`Future`]: std::future::Future - #[inline] - fn in_current_span(self) -> Instrumented { - self.instrument(Span::current()) - } -} - -/// Extension trait allowing futures to be instrumented with -/// a `tracing` [`Subscriber`](crate::Subscriber). -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub trait WithSubscriber: Sized { - /// Attaches the provided [`Subscriber`] to this type, returning a - /// [`WithDispatch`] wrapper. - /// - /// The attached [`Subscriber`] will be set as the [default] when the returned - /// [`Future`] is polled. - /// - /// # Examples - /// - /// ``` - /// # use tracing::subscriber::NoSubscriber as MySubscriber; - /// # use tracing::subscriber::NoSubscriber as MyOtherSubscriber; - /// # async fn docs() { - /// use tracing::instrument::WithSubscriber; - /// - /// // Set the default `Subscriber` - /// let _default = tracing::subscriber::set_default(MySubscriber::default()); - /// - /// tracing::info!("this event will be recorded by the default `Subscriber`"); - /// - /// // Create a different `Subscriber` and attach it to a future. - /// let other_subscriber = MyOtherSubscriber::default(); - /// let future = async { - /// tracing::info!("this event will be recorded by the other `Subscriber`"); - /// // ... - /// }; - /// - /// future - /// // Attach the other `Subscriber` to the future before awaiting it - /// .with_subscriber(other_subscriber) - /// .await; - /// - /// // Once the future has completed, we return to the default `Subscriber`. - /// tracing::info!("this event will be recorded by the default `Subscriber`"); - /// # } - /// ``` - /// - /// [`Subscriber`]: super::Subscriber - /// [default]: crate::dispatcher#setting-the-default-subscriber - /// [`Future`]: std::future::Future - fn with_subscriber(self, subscriber: S) -> WithDispatch - where - S: Into, - { - WithDispatch { - inner: self, - dispatcher: subscriber.into(), - } - } - - /// Attaches the current [default] [`Subscriber`] to this type, returning a - /// [`WithDispatch`] wrapper. - /// - /// The attached `Subscriber` will be set as the [default] when the returned - /// [`Future`] is polled. - /// - /// This can be used to propagate the current dispatcher context when - /// spawning a new future that may run on a different thread. - /// - /// # Examples - /// - /// ``` - /// # mod tokio { - /// # pub(super) fn spawn(_: impl std::future::Future) {} - /// # } - /// # use tracing::subscriber::NoSubscriber as MySubscriber; - /// # async fn docs() { - /// use tracing::instrument::WithSubscriber; - /// - /// // Using `set_default` (rather than `set_global_default`) sets the - /// // default `Subscriber` for *this* thread only. - /// let _default = tracing::subscriber::set_default(MySubscriber::default()); - /// - /// let future = async { - /// // ... - /// }; - /// - /// // If a multi-threaded async runtime is in use, this spawned task may - /// // run on a different thread, in a different default `Subscriber`'s context. - /// tokio::spawn(future); - /// - /// // However, calling `with_current_subscriber` on the future before - /// // spawning it, ensures that the current thread's default `Subscriber` is - /// // propagated to the spawned task, regardless of where it executes: - /// # let future = async { }; - /// tokio::spawn(future.with_current_subscriber()); - /// # } - /// ``` - /// [`Subscriber`]: super::Subscriber - /// [default]: crate::dispatcher#setting-the-default-subscriber - /// [`Future`]: std::future::Future - #[inline] - fn with_current_subscriber(self) -> WithDispatch { - WithDispatch { - inner: self, - dispatcher: crate::dispatcher::get_default(|default| default.clone()), - } - } -} - -pin_project! { - /// A [`Future`] that has been instrumented with a `tracing` [`Subscriber`]. - /// - /// This type is returned by the [`WithSubscriber`] extension trait. See that - /// trait's documentation for details. - /// - /// [`Future`]: std::future::Future - /// [`Subscriber`]: crate::Subscriber - #[derive(Clone, Debug)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - #[cfg_attr(docsrs, doc(cfg(feature = "std")))] - pub struct WithDispatch { - #[pin] - inner: T, - dispatcher: Dispatch, - } -} - -pin_project! { - /// A [`Future`] that has been instrumented with a `tracing` [`Span`]. - /// - /// This type is returned by the [`Instrument`] extension trait. See that - /// trait's documentation for details. - /// - /// [`Future`]: std::future::Future - /// [`Span`]: crate::Span - #[project = InstrumentedProj] - #[project_ref = InstrumentedProjRef] - #[derive(Debug, Clone)] - #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Instrumented { - // `ManuallyDrop` is used here to to enter instrument `Drop` by entering - // `Span` and executing `ManuallyDrop::drop`. - #[pin] - inner: ManuallyDrop, - span: Span, - } - - impl PinnedDrop for Instrumented { - fn drop(this: Pin<&mut Self>) { - let this = this.project(); - let _enter = this.span.enter(); - // SAFETY: 1. `Pin::get_unchecked_mut()` is safe, because this isn't - // different from wrapping `T` in `Option` and calling - // `Pin::set(&mut this.inner, None)`, except avoiding - // additional memory overhead. - // 2. `ManuallyDrop::drop()` is safe, because - // `PinnedDrop::drop()` is guaranteed to be called only - // once. - unsafe { ManuallyDrop::drop(this.inner.get_unchecked_mut()) } - } - } -} - -impl<'a, T> InstrumentedProj<'a, T> { - /// Get a mutable reference to the [`Span`] a pinned mutable reference to - /// the wrapped type. - fn span_and_inner_pin_mut(self) -> (&'a mut Span, Pin<&'a mut T>) { - // SAFETY: As long as `ManuallyDrop` does not move, `T` won't move - // and `inner` is valid, because `ManuallyDrop::drop` is called - // only inside `Drop` of the `Instrumented`. - let inner = unsafe { self.inner.map_unchecked_mut(|v| &mut **v) }; - (self.span, inner) - } -} - -impl<'a, T> InstrumentedProjRef<'a, T> { - /// Get a reference to the [`Span`] a pinned reference to the wrapped type. - fn span_and_inner_pin_ref(self) -> (&'a Span, Pin<&'a T>) { - // SAFETY: As long as `ManuallyDrop` does not move, `T` won't move - // and `inner` is valid, because `ManuallyDrop::drop` is called - // only inside `Drop` of the `Instrumented`. - let inner = unsafe { self.inner.map_unchecked(|v| &**v) }; - (self.span, inner) - } -} - -// === impl Instrumented === - -impl Future for Instrumented { - type Output = T::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let (span, inner) = self.project().span_and_inner_pin_mut(); - let _enter = span.enter(); - inner.poll(cx) - } -} - -impl Instrument for T {} - -impl Instrumented { - /// Borrows the `Span` that this type is instrumented by. - pub fn span(&self) -> &Span { - &self.span - } - - /// Mutably borrows the `Span` that this type is instrumented by. - pub fn span_mut(&mut self) -> &mut Span { - &mut self.span - } - - /// Borrows the wrapped type. - pub fn inner(&self) -> &T { - &self.inner - } - - /// Mutably borrows the wrapped type. - pub fn inner_mut(&mut self) -> &mut T { - &mut self.inner - } - - /// Get a pinned reference to the wrapped type. - pub fn inner_pin_ref(self: Pin<&Self>) -> Pin<&T> { - self.project_ref().span_and_inner_pin_ref().1 - } - - /// Get a pinned mutable reference to the wrapped type. - pub fn inner_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { - self.project().span_and_inner_pin_mut().1 - } - - /// Consumes the `Instrumented`, returning the wrapped type. - /// - /// Note that this drops the span. - pub fn into_inner(self) -> T { - // To manually destructure `Instrumented` without `Drop`, we - // move it into a ManuallyDrop and use pointers to its fields - let this = ManuallyDrop::new(self); - let span: *const Span = &this.span; - let inner: *const ManuallyDrop = &this.inner; - // SAFETY: Those pointers are valid for reads, because `Drop` didn't - // run, and properly aligned, because `Instrumented` isn't - // `#[repr(packed)]`. - let _span = unsafe { span.read() }; - let inner = unsafe { inner.read() }; - ManuallyDrop::into_inner(inner) - } -} - -// === impl WithDispatch === - -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -impl Future for WithDispatch { - type Output = T::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); - let dispatcher = this.dispatcher; - let future = this.inner; - let _default = dispatcher::set_default(dispatcher); - future.poll(cx) - } -} - -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -impl WithSubscriber for T {} - -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -impl WithDispatch { - /// Borrows the [`Dispatch`] that is entered when this type is polled. - pub fn dispatcher(&self) -> &Dispatch { - &self.dispatcher - } - - /// Borrows the wrapped type. - pub fn inner(&self) -> &T { - &self.inner - } - - /// Mutably borrows the wrapped type. - pub fn inner_mut(&mut self) -> &mut T { - &mut self.inner - } - - /// Get a pinned reference to the wrapped type. - pub fn inner_pin_ref(self: Pin<&Self>) -> Pin<&T> { - self.project_ref().inner - } - - /// Get a pinned mutable reference to the wrapped type. - pub fn inner_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> { - self.project().inner - } - - /// Consumes the `Instrumented`, returning the wrapped type. - /// - /// Note that this drops the span. - pub fn into_inner(self) -> T { - self.inner - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing/src/level_filters.rs s390-tools-2.33.1/rust-vendor/tracing/src/level_filters.rs --- s390-tools-2.31.0/rust-vendor/tracing/src/level_filters.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/src/level_filters.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,98 +0,0 @@ -//! Trace verbosity level filtering. -//! -//! # Compile time filters -//! -//! Trace verbosity levels can be statically disabled at compile time via Cargo -//! features, similar to the [`log` crate]. Trace instrumentation at disabled -//! levels will be skipped and will not even be present in the resulting binary -//! unless the verbosity level is specified dynamically. This level is -//! configured separately for release and debug builds. The features are: -//! -//! * `max_level_off` -//! * `max_level_error` -//! * `max_level_warn` -//! * `max_level_info` -//! * `max_level_debug` -//! * `max_level_trace` -//! * `release_max_level_off` -//! * `release_max_level_error` -//! * `release_max_level_warn` -//! * `release_max_level_info` -//! * `release_max_level_debug` -//! * `release_max_level_trace` -//! -//! These features control the value of the `STATIC_MAX_LEVEL` constant. The -//! instrumentation macros macros check this value before recording an event or -//! constructing a span. By default, no levels are disabled. -//! -//! For example, a crate can disable trace level instrumentation in debug builds -//! and trace, debug, and info level instrumentation in release builds with the -//! following configuration: -//! -//! ```toml -//! [dependencies] -//! tracing = { version = "0.1", features = ["max_level_debug", "release_max_level_warn"] } -//! ``` -//! ## Notes -//! -//! Please note that `tracing`'s static max level features do *not* control the -//! [`log`] records that may be emitted when [`tracing`'s "log" feature flag][f] is -//! enabled. This is to allow `tracing` to be disabled entirely at compile time -//! while still emitting `log` records --- such as when a library using -//! `tracing` is used by an application using `log` that doesn't want to -//! generate any `tracing`-related code, but does want to collect `log` records. -//! -//! This means that if the "log" feature is in use, some code may be generated -//! for `log` records emitted by disabled `tracing` events. If this is not -//! desirable, `log` records may be disabled separately using [`log`'s static -//! max level features][`log` crate]. -//! -//! [`log`]: https://docs.rs/log/ -//! [`log` crate]: https://docs.rs/log/latest/log/#compile-time-filters -//! [f]: https://docs.rs/tracing/latest/tracing/#emitting-log-records -pub use tracing_core::{metadata::ParseLevelFilterError, LevelFilter}; - -/// The statically configured maximum trace level. -/// -/// See the [module-level documentation] for information on how to configure -/// this. -/// -/// This value is checked by the `event!` and `span!` macros. Code that -/// manually constructs events or spans via the `Event::record` function or -/// `Span` constructors should compare the level against this value to -/// determine if those spans or events are enabled. -/// -/// [module-level documentation]: self#compile-time-filters -pub const STATIC_MAX_LEVEL: LevelFilter = get_max_level_inner(); - -const fn get_max_level_inner() -> LevelFilter { - if cfg!(not(debug_assertions)) { - if cfg!(feature = "release_max_level_off") { - LevelFilter::OFF - } else if cfg!(feature = "release_max_level_error") { - LevelFilter::ERROR - } else if cfg!(feature = "release_max_level_warn") { - LevelFilter::WARN - } else if cfg!(feature = "release_max_level_info") { - LevelFilter::INFO - } else if cfg!(feature = "release_max_level_debug") { - LevelFilter::DEBUG - } else { - // Same as branch cfg!(feature = "release_max_level_trace") - LevelFilter::TRACE - } - } else if cfg!(feature = "max_level_off") { - LevelFilter::OFF - } else if cfg!(feature = "max_level_error") { - LevelFilter::ERROR - } else if cfg!(feature = "max_level_warn") { - LevelFilter::WARN - } else if cfg!(feature = "max_level_info") { - LevelFilter::INFO - } else if cfg!(feature = "max_level_debug") { - LevelFilter::DEBUG - } else { - // Same as branch cfg!(feature = "max_level_trace") - LevelFilter::TRACE - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing/src/lib.rs s390-tools-2.33.1/rust-vendor/tracing/src/lib.rs --- s390-tools-2.31.0/rust-vendor/tracing/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1125 +0,0 @@ -//! A scoped, structured logging and diagnostics system. -//! -//! # Overview -//! -//! `tracing` is a framework for instrumenting Rust programs to collect -//! structured, event-based diagnostic information. -//! -//! In asynchronous systems like Tokio, interpreting traditional log messages can -//! often be quite challenging. Since individual tasks are multiplexed on the same -//! thread, associated events and log lines are intermixed making it difficult to -//! trace the logic flow. `tracing` expands upon logging-style diagnostics by -//! allowing libraries and applications to record structured events with additional -//! information about *temporality* and *causality* — unlike a log message, a span -//! in `tracing` has a beginning and end time, may be entered and exited by the -//! flow of execution, and may exist within a nested tree of similar spans. In -//! addition, `tracing` spans are *structured*, with the ability to record typed -//! data as well as textual messages. -//! -//! The `tracing` crate provides the APIs necessary for instrumenting libraries -//! and applications to emit trace data. -//! -//! *Compiler support: [requires `rustc` 1.56+][msrv]* -//! -//! [msrv]: #supported-rust-versions -//! # Core Concepts -//! -//! The core of `tracing`'s API is composed of _spans_, _events_ and -//! _subscribers_. We'll cover these in turn. -//! -//! ## Spans -//! -//! To record the flow of execution through a program, `tracing` introduces the -//! concept of [spans]. Unlike a log line that represents a _moment in -//! time_, a span represents a _period of time_ with a beginning and an end. When a -//! program begins executing in a context or performing a unit of work, it -//! _enters_ that context's span, and when it stops executing in that context, -//! it _exits_ the span. The span in which a thread is currently executing is -//! referred to as that thread's _current_ span. -//! -//! For example: -//! ``` -//! use tracing::{span, Level}; -//! # fn main() { -//! let span = span!(Level::TRACE, "my_span"); -//! // `enter` returns a RAII guard which, when dropped, exits the span. this -//! // indicates that we are in the span for the current lexical scope. -//! let _enter = span.enter(); -//! // perform some work in the context of `my_span`... -//! # } -//!``` -//! -//! The [`span` module][span]'s documentation provides further details on how to -//! use spans. -//! -//!
-//!
-//!  **Warning**: In asynchronous code that uses async/await syntax,
-//!  `Span::enter` may produce incorrect traces if the returned drop
-//!  guard is held across an await point. See
-//!  [the method documentation][Span#in-asynchronous-code] for details.
-//!
-//! 
-//! -//! ## Events -//! -//! An [`Event`] represents a _moment_ in time. It signifies something that -//! happened while a trace was being recorded. `Event`s are comparable to the log -//! records emitted by unstructured logging code, but unlike a typical log line, -//! an `Event` may occur within the context of a span. -//! -//! For example: -//! ``` -//! use tracing::{event, span, Level}; -//! -//! # fn main() { -//! // records an event outside of any span context: -//! event!(Level::INFO, "something happened"); -//! -//! let span = span!(Level::INFO, "my_span"); -//! let _guard = span.enter(); -//! -//! // records an event within "my_span". -//! event!(Level::DEBUG, "something happened inside my_span"); -//! # } -//!``` -//! -//! In general, events should be used to represent points in time _within_ a -//! span — a request returned with a given status code, _n_ new items were -//! taken from a queue, and so on. -//! -//! The [`Event` struct][`Event`] documentation provides further details on using -//! events. -//! -//! ## Subscribers -//! -//! As `Span`s and `Event`s occur, they are recorded or aggregated by -//! implementations of the [`Subscriber`] trait. `Subscriber`s are notified -//! when an `Event` takes place and when a `Span` is entered or exited. These -//! notifications are represented by the following `Subscriber` trait methods: -//! -//! + [`event`][Subscriber::event], called when an `Event` takes place, -//! + [`enter`], called when execution enters a `Span`, -//! + [`exit`], called when execution exits a `Span` -//! -//! In addition, subscribers may implement the [`enabled`] function to _filter_ -//! the notifications they receive based on [metadata] describing each `Span` -//! or `Event`. If a call to `Subscriber::enabled` returns `false` for a given -//! set of metadata, that `Subscriber` will *not* be notified about the -//! corresponding `Span` or `Event`. For performance reasons, if no currently -//! active subscribers express interest in a given set of metadata by returning -//! `true`, then the corresponding `Span` or `Event` will never be constructed. -//! -//! # Usage -//! -//! First, add this to your `Cargo.toml`: -//! -//! ```toml -//! [dependencies] -//! tracing = "0.1" -//! ``` -//! -//! ## Recording Spans and Events -//! -//! Spans and events are recorded using macros. -//! -//! ### Spans -//! -//! The [`span!`] macro expands to a [`Span` struct][`Span`] which is used to -//! record a span. The [`Span::enter`] method on that struct records that the -//! span has been entered, and returns a [RAII] guard object, which will exit -//! the span when dropped. -//! -//! For example: -//! -//! ```rust -//! use tracing::{span, Level}; -//! # fn main() { -//! // Construct a new span named "my span" with trace log level. -//! let span = span!(Level::TRACE, "my span"); -//! -//! // Enter the span, returning a guard object. -//! let _enter = span.enter(); -//! -//! // Any trace events that occur before the guard is dropped will occur -//! // within the span. -//! -//! // Dropping the guard will exit the span. -//! # } -//! ``` -//! -//! The [`#[instrument]`][instrument] attribute provides an easy way to -//! add `tracing` spans to functions. A function annotated with `#[instrument]` -//! will create and enter a span with that function's name every time the -//! function is called, with arguments to that function will be recorded as -//! fields using `fmt::Debug`. -//! -//! For example: -//! ```ignore -//! # // this doctest is ignored because we don't have a way to say -//! # // that it should only be run with cfg(feature = "attributes") -//! use tracing::{Level, event, instrument}; -//! -//! #[instrument] -//! pub fn my_function(my_arg: usize) { -//! // This event will be recorded inside a span named `my_function` with the -//! // field `my_arg`. -//! event!(Level::INFO, "inside my_function!"); -//! // ... -//! } -//! # fn main() {} -//! ``` -//! -//! For functions which don't have built-in tracing support and can't have -//! the `#[instrument]` attribute applied (such as from an external crate), -//! the [`Span` struct][`Span`] has a [`in_scope()` method][`in_scope`] -//! which can be used to easily wrap synchonous code in a span. -//! -//! For example: -//! ```rust -//! use tracing::info_span; -//! -//! # fn doc() -> Result<(), ()> { -//! # mod serde_json { -//! # pub(crate) fn from_slice(buf: &[u8]) -> Result<(), ()> { Ok(()) } -//! # } -//! # let buf: [u8; 0] = []; -//! let json = info_span!("json.parse").in_scope(|| serde_json::from_slice(&buf))?; -//! # let _ = json; // suppress unused variable warning -//! # Ok(()) -//! # } -//! ``` -//! -//! You can find more examples showing how to use this crate [here][examples]. -//! -//! [RAII]: https://github.com/rust-unofficial/patterns/blob/main/src/patterns/behavioural/RAII.md -//! [examples]: https://github.com/tokio-rs/tracing/tree/master/examples -//! -//! ### Events -//! -//! [`Event`]s are recorded using the [`event!`] macro: -//! -//! ```rust -//! # fn main() { -//! use tracing::{event, Level}; -//! event!(Level::INFO, "something has happened!"); -//! # } -//! ``` -//! -//! ## Using the Macros -//! -//! The [`span!`] and [`event!`] macros as well as the `#[instrument]` attribute -//! use fairly similar syntax, with some exceptions. -//! -//! ### Configuring Attributes -//! -//! Both macros require a [`Level`] specifying the verbosity of the span or -//! event. Optionally, the, [target] and [parent span] may be overridden. If the -//! target and parent span are not overridden, they will default to the -//! module path where the macro was invoked and the current span (as determined -//! by the subscriber), respectively. -//! -//! For example: -//! -//! ``` -//! # use tracing::{span, event, Level}; -//! # fn main() { -//! span!(target: "app_spans", Level::TRACE, "my span"); -//! event!(target: "app_events", Level::INFO, "something has happened!"); -//! # } -//! ``` -//! ``` -//! # use tracing::{span, event, Level}; -//! # fn main() { -//! let span = span!(Level::TRACE, "my span"); -//! event!(parent: &span, Level::INFO, "something has happened!"); -//! # } -//! ``` -//! -//! The span macros also take a string literal after the level, to set the name -//! of the span (as above). In the case of the event macros, the name of the event can -//! be overridden (the default is `event file:line`) using the `name:` specifier. -//! -//! ``` -//! # use tracing::{span, event, Level}; -//! # fn main() { -//! span!(Level::TRACE, "my span"); -//! event!(name: "some_info", Level::INFO, "something has happened!"); -//! # } -//! ``` -//! -//! ### Recording Fields -//! -//! Structured fields on spans and events are specified using the syntax -//! `field_name = field_value`. Fields are separated by commas. -//! -//! ``` -//! # use tracing::{event, Level}; -//! # fn main() { -//! // records an event with two fields: -//! // - "answer", with the value 42 -//! // - "question", with the value "life, the universe and everything" -//! event!(Level::INFO, answer = 42, question = "life, the universe, and everything"); -//! # } -//! ``` -//! -//! As shorthand, local variables may be used as field values without an -//! assignment, similar to [struct initializers]. For example: -//! -//! ``` -//! # use tracing::{span, Level}; -//! # fn main() { -//! let user = "ferris"; -//! -//! span!(Level::TRACE, "login", user); -//! // is equivalent to: -//! span!(Level::TRACE, "login", user = user); -//! # } -//!``` -//! -//! Field names can include dots, but should not be terminated by them: -//! ``` -//! # use tracing::{span, Level}; -//! # fn main() { -//! let user = "ferris"; -//! let email = "ferris@rust-lang.org"; -//! span!(Level::TRACE, "login", user, user.email = email); -//! # } -//!``` -//! -//! Since field names can include dots, fields on local structs can be used -//! using the local variable shorthand: -//! ``` -//! # use tracing::{span, Level}; -//! # fn main() { -//! # struct User { -//! # name: &'static str, -//! # email: &'static str, -//! # } -//! let user = User { -//! name: "ferris", -//! email: "ferris@rust-lang.org", -//! }; -//! // the span will have the fields `user.name = "ferris"` and -//! // `user.email = "ferris@rust-lang.org"`. -//! span!(Level::TRACE, "login", user.name, user.email); -//! # } -//!``` -//! -//! Fields with names that are not Rust identifiers, or with names that are Rust reserved words, -//! may be created using quoted string literals. However, this may not be used with the local -//! variable shorthand. -//! ``` -//! # use tracing::{span, Level}; -//! # fn main() { -//! // records an event with fields whose names are not Rust identifiers -//! // - "guid:x-request-id", containing a `:`, with the value "abcdef" -//! // - "type", which is a reserved word, with the value "request" -//! span!(Level::TRACE, "api", "guid:x-request-id" = "abcdef", "type" = "request"); -//! # } -//!``` -//! -//! Constant expressions can also be used as field names. Constants -//! must be enclosed in curly braces (`{}`) to indicate that the *value* -//! of the constant is to be used as the field name, rather than the -//! constant's name. For example: -//! ``` -//! # use tracing::{span, Level}; -//! # fn main() { -//! const RESOURCE_NAME: &str = "foo"; -//! // this span will have the field `foo = "some_id"` -//! span!(Level::TRACE, "get", { RESOURCE_NAME } = "some_id"); -//! # } -//!``` -//! -//! The `?` sigil is shorthand that specifies a field should be recorded using -//! its [`fmt::Debug`] implementation: -//! ``` -//! # use tracing::{event, Level}; -//! # fn main() { -//! #[derive(Debug)] -//! struct MyStruct { -//! field: &'static str, -//! } -//! -//! let my_struct = MyStruct { -//! field: "Hello world!" -//! }; -//! -//! // `my_struct` will be recorded using its `fmt::Debug` implementation. -//! event!(Level::TRACE, greeting = ?my_struct); -//! // is equivalent to: -//! event!(Level::TRACE, greeting = tracing::field::debug(&my_struct)); -//! # } -//! ``` -//! -//! The `%` sigil operates similarly, but indicates that the value should be -//! recorded using its [`fmt::Display`] implementation: -//! ``` -//! # use tracing::{event, Level}; -//! # fn main() { -//! # #[derive(Debug)] -//! # struct MyStruct { -//! # field: &'static str, -//! # } -//! # -//! # let my_struct = MyStruct { -//! # field: "Hello world!" -//! # }; -//! // `my_struct.field` will be recorded using its `fmt::Display` implementation. -//! event!(Level::TRACE, greeting = %my_struct.field); -//! // is equivalent to: -//! event!(Level::TRACE, greeting = tracing::field::display(&my_struct.field)); -//! # } -//! ``` -//! -//! The `%` and `?` sigils may also be used with local variable shorthand: -//! -//! ``` -//! # use tracing::{event, Level}; -//! # fn main() { -//! # #[derive(Debug)] -//! # struct MyStruct { -//! # field: &'static str, -//! # } -//! # -//! # let my_struct = MyStruct { -//! # field: "Hello world!" -//! # }; -//! // `my_struct.field` will be recorded using its `fmt::Display` implementation. -//! event!(Level::TRACE, %my_struct.field); -//! # } -//! ``` -//! -//! Additionally, a span may declare fields with the special value [`Empty`], -//! which indicates that that the value for that field does not currently exist -//! but may be recorded later. For example: -//! -//! ``` -//! use tracing::{trace_span, field}; -//! -//! // Create a span with two fields: `greeting`, with the value "hello world", and -//! // `parting`, without a value. -//! let span = trace_span!("my_span", greeting = "hello world", parting = field::Empty); -//! -//! // ... -//! -//! // Now, record a value for parting as well. -//! span.record("parting", &"goodbye world!"); -//! ``` -//! -//! Finally, events may also include human-readable messages, in the form of a -//! [format string][fmt] and (optional) arguments, **after** the event's -//! key-value fields. If a format string and arguments are provided, -//! they will implicitly create a new field named `message` whose value is the -//! provided set of format arguments. -//! -//! For example: -//! -//! ``` -//! # use tracing::{event, Level}; -//! # fn main() { -//! let question = "the ultimate question of life, the universe, and everything"; -//! let answer = 42; -//! // records an event with the following fields: -//! // - `question.answer` with the value 42, -//! // - `question.tricky` with the value `true`, -//! // - "message", with the value "the answer to the ultimate question of life, the -//! // universe, and everything is 42." -//! event!( -//! Level::DEBUG, -//! question.answer = answer, -//! question.tricky = true, -//! "the answer to {} is {}.", question, answer -//! ); -//! # } -//! ``` -//! -//! Specifying a formatted message in this manner does not allocate by default. -//! -//! [struct initializers]: https://doc.rust-lang.org/book/ch05-01-defining-structs.html#using-the-field-init-shorthand-when-variables-and-fields-have-the-same-name -//! [target]: Metadata::target -//! [parent span]: span::Attributes::parent -//! [determined contextually]: span::Attributes::is_contextual -//! [`fmt::Debug`]: std::fmt::Debug -//! [`fmt::Display`]: std::fmt::Display -//! [fmt]: std::fmt#usage -//! [`Empty`]: field::Empty -//! -//! ### Shorthand Macros -//! -//! `tracing` also offers a number of macros with preset verbosity levels. -//! The [`trace!`], [`debug!`], [`info!`], [`warn!`], and [`error!`] behave -//! similarly to the [`event!`] macro, but with the [`Level`] argument already -//! specified, while the corresponding [`trace_span!`], [`debug_span!`], -//! [`info_span!`], [`warn_span!`], and [`error_span!`] macros are the same, -//! but for the [`span!`] macro. -//! -//! These are intended both as a shorthand, and for compatibility with the [`log`] -//! crate (see the next section). -//! -//! [`span!`]: span! -//! [`event!`]: event! -//! [`trace!`]: trace! -//! [`debug!`]: debug! -//! [`info!`]: info! -//! [`warn!`]: warn! -//! [`error!`]: error! -//! [`trace_span!`]: trace_span! -//! [`debug_span!`]: debug_span! -//! [`info_span!`]: info_span! -//! [`warn_span!`]: warn_span! -//! [`error_span!`]: error_span! -//! -//! ### For `log` Users -//! -//! Users of the [`log`] crate should note that `tracing` exposes a set of -//! macros for creating `Event`s (`trace!`, `debug!`, `info!`, `warn!`, and -//! `error!`) which may be invoked with the same syntax as the similarly-named -//! macros from the `log` crate. Often, the process of converting a project to -//! use `tracing` can begin with a simple drop-in replacement. -//! -//! Let's consider the `log` crate's yak-shaving example: -//! -//! ```rust,ignore -//! use std::{error::Error, io}; -//! use tracing::{debug, error, info, span, warn, Level}; -//! -//! // the `#[tracing::instrument]` attribute creates and enters a span -//! // every time the instrumented function is called. The span is named after the -//! // the function or method. Parameters passed to the function are recorded as fields. -//! #[tracing::instrument] -//! pub fn shave(yak: usize) -> Result<(), Box> { -//! // this creates an event at the DEBUG level with two fields: -//! // - `excitement`, with the key "excitement" and the value "yay!" -//! // - `message`, with the key "message" and the value "hello! I'm gonna shave a yak." -//! // -//! // unlike other fields, `message`'s shorthand initialization is just the string itself. -//! debug!(excitement = "yay!", "hello! I'm gonna shave a yak."); -//! if yak == 3 { -//! warn!("could not locate yak!"); -//! // note that this is intended to demonstrate `tracing`'s features, not idiomatic -//! // error handling! in a library or application, you should consider returning -//! // a dedicated `YakError`. libraries like snafu or thiserror make this easy. -//! return Err(io::Error::new(io::ErrorKind::Other, "shaving yak failed!").into()); -//! } else { -//! debug!("yak shaved successfully"); -//! } -//! Ok(()) -//! } -//! -//! pub fn shave_all(yaks: usize) -> usize { -//! // Constructs a new span named "shaving_yaks" at the TRACE level, -//! // and a field whose key is "yaks". This is equivalent to writing: -//! // -//! // let span = span!(Level::TRACE, "shaving_yaks", yaks = yaks); -//! // -//! // local variables (`yaks`) can be used as field values -//! // without an assignment, similar to struct initializers. -//! let _span = span!(Level::TRACE, "shaving_yaks", yaks).entered(); -//! -//! info!("shaving yaks"); -//! -//! let mut yaks_shaved = 0; -//! for yak in 1..=yaks { -//! let res = shave(yak); -//! debug!(yak, shaved = res.is_ok()); -//! -//! if let Err(ref error) = res { -//! // Like spans, events can also use the field initialization shorthand. -//! // In this instance, `yak` is the field being initalized. -//! error!(yak, error = error.as_ref(), "failed to shave yak!"); -//! } else { -//! yaks_shaved += 1; -//! } -//! debug!(yaks_shaved); -//! } -//! -//! yaks_shaved -//! } -//! ``` -//! -//! ## In libraries -//! -//! Libraries should link only to the `tracing` crate, and use the provided -//! macros to record whatever information will be useful to downstream -//! consumers. -//! -//! ## In executables -//! -//! In order to record trace events, executables have to use a `Subscriber` -//! implementation compatible with `tracing`. A `Subscriber` implements a -//! way of collecting trace data, such as by logging it to standard output. -//! -//! This library does not contain any `Subscriber` implementations; these are -//! provided by [other crates](#related-crates). -//! -//! The simplest way to use a subscriber is to call the [`set_global_default`] -//! function: -//! -//! ``` -//! extern crate tracing; -//! # pub struct FooSubscriber; -//! # use tracing::{span::{Id, Attributes, Record}, Metadata}; -//! # impl tracing::Subscriber for FooSubscriber { -//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } -//! # fn record(&self, _: &Id, _: &Record) {} -//! # fn event(&self, _: &tracing::Event) {} -//! # fn record_follows_from(&self, _: &Id, _: &Id) {} -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn enter(&self, _: &Id) {} -//! # fn exit(&self, _: &Id) {} -//! # } -//! # impl FooSubscriber { -//! # fn new() -> Self { FooSubscriber } -//! # } -//! # fn main() { -//! -//! let my_subscriber = FooSubscriber::new(); -//! tracing::subscriber::set_global_default(my_subscriber) -//! .expect("setting tracing default failed"); -//! # } -//! ``` -//! -//!
-//!     Warning: In general, libraries should not call
-//!     set_global_default()! Doing so will cause conflicts when
-//!     executables that depend on the library try to set the default later.
-//! 
-//! -//! This subscriber will be used as the default in all threads for the -//! remainder of the duration of the program, similar to setting the logger -//! in the `log` crate. -//! -//! In addition, the default subscriber can be set through using the -//! [`with_default`] function. This follows the `tokio` pattern of using -//! closures to represent executing code in a context that is exited at the end -//! of the closure. For example: -//! -//! ```rust -//! # pub struct FooSubscriber; -//! # use tracing::{span::{Id, Attributes, Record}, Metadata}; -//! # impl tracing::Subscriber for FooSubscriber { -//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } -//! # fn record(&self, _: &Id, _: &Record) {} -//! # fn event(&self, _: &tracing::Event) {} -//! # fn record_follows_from(&self, _: &Id, _: &Id) {} -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn enter(&self, _: &Id) {} -//! # fn exit(&self, _: &Id) {} -//! # } -//! # impl FooSubscriber { -//! # fn new() -> Self { FooSubscriber } -//! # } -//! # fn main() { -//! -//! let my_subscriber = FooSubscriber::new(); -//! # #[cfg(feature = "std")] -//! tracing::subscriber::with_default(my_subscriber, || { -//! // Any trace events generated in this closure or by functions it calls -//! // will be collected by `my_subscriber`. -//! }) -//! # } -//! ``` -//! -//! This approach allows trace data to be collected by multiple subscribers -//! within different contexts in the program. Note that the override only applies to the -//! currently executing thread; other threads will not see the change from with_default. -//! -//! Any trace events generated outside the context of a subscriber will not be collected. -//! -//! Once a subscriber has been set, instrumentation points may be added to the -//! executable using the `tracing` crate's macros. -//! -//! ## `log` Compatibility -//! -//! The [`log`] crate provides a simple, lightweight logging facade for Rust. -//! While `tracing` builds upon `log`'s foundation with richer structured -//! diagnostic data, `log`'s simplicity and ubiquity make it the "lowest common -//! denominator" for text-based logging in Rust — a vast majority of Rust -//! libraries and applications either emit or consume `log` records. Therefore, -//! `tracing` provides multiple forms of interoperability with `log`: `tracing` -//! instrumentation can emit `log` records, and a compatibility layer enables -//! `tracing` [`Subscriber`]s to consume `log` records as `tracing` [`Event`]s. -//! -//! ### Emitting `log` Records -//! -//! This crate provides two feature flags, "log" and "log-always", which will -//! cause [spans] and [events] to emit `log` records. When the "log" feature is -//! enabled, if no `tracing` `Subscriber` is active, invoking an event macro or -//! creating a span with fields will emit a `log` record. This is intended -//! primarily for use in libraries which wish to emit diagnostics that can be -//! consumed by applications using `tracing` *or* `log`, without paying the -//! additional overhead of emitting both forms of diagnostics when `tracing` is -//! in use. -//! -//! Enabling the "log-always" feature will cause `log` records to be emitted -//! even if a `tracing` `Subscriber` _is_ set. This is intended to be used in -//! applications where a `log` `Logger` is being used to record a textual log, -//! and `tracing` is used only to record other forms of diagnostics (such as -//! metrics, profiling, or distributed tracing data). Unlike the "log" feature, -//! libraries generally should **not** enable the "log-always" feature, as doing -//! so will prevent applications from being able to opt out of the `log` records. -//! -//! See [here][flags] for more details on this crate's feature flags. -//! -//! The generated `log` records' messages will be a string representation of the -//! span or event's fields, and all additional information recorded by `log` -//! (target, verbosity level, module path, file, and line number) will also be -//! populated. Additionally, `log` records are also generated when spans are -//! entered, exited, and closed. Since these additional span lifecycle logs have -//! the potential to be very verbose, and don't include additional fields, they -//! will always be emitted at the `Trace` level, rather than inheriting the -//! level of the span that generated them. Furthermore, they are are categorized -//! under a separate `log` target, "tracing::span" (and its sub-target, -//! "tracing::span::active", for the logs on entering and exiting a span), which -//! may be enabled or disabled separately from other `log` records emitted by -//! `tracing`. -//! -//! ### Consuming `log` Records -//! -//! The [`tracing-log`] crate provides a compatibility layer which -//! allows a `tracing` [`Subscriber`] to consume `log` records as though they -//! were `tracing` [events]. This allows applications using `tracing` to record -//! the logs emitted by dependencies using `log` as events within the context of -//! the application's trace tree. See [that crate's documentation][log-tracer] -//! for details. -//! -//! [log-tracer]: https://docs.rs/tracing-log/latest/tracing_log/#convert-log-records-to-tracing-events -//! -//! ## Related Crates -//! -//! In addition to `tracing` and `tracing-core`, the [`tokio-rs/tracing`] repository -//! contains several additional crates designed to be used with the `tracing` ecosystem. -//! This includes a collection of `Subscriber` implementations, as well as utility -//! and adapter crates to assist in writing `Subscriber`s and instrumenting -//! applications. -//! -//! In particular, the following crates are likely to be of interest: -//! -//! - [`tracing-futures`] provides a compatibility layer with the `futures` -//! crate, allowing spans to be attached to `Future`s, `Stream`s, and `Executor`s. -//! - [`tracing-subscriber`] provides `Subscriber` implementations and -//! utilities for working with `Subscriber`s. This includes a [`FmtSubscriber`] -//! `FmtSubscriber` for logging formatted trace data to stdout, with similar -//! filtering and formatting to the [`env_logger`] crate. -//! - [`tracing-log`] provides a compatibility layer with the [`log`] crate, -//! allowing log messages to be recorded as `tracing` `Event`s within the -//! trace tree. This is useful when a project using `tracing` have -//! dependencies which use `log`. Note that if you're using -//! `tracing-subscriber`'s `FmtSubscriber`, you don't need to depend on -//! `tracing-log` directly. -//! - [`tracing-appender`] provides utilities for outputting tracing data, -//! including a file appender and non blocking writer. -//! -//! Additionally, there are also several third-party crates which are not -//! maintained by the `tokio` project. These include: -//! -//! - [`tracing-timing`] implements inter-event timing metrics on top of `tracing`. -//! It provides a subscriber that records the time elapsed between pairs of -//! `tracing` events and generates histograms. -//! - [`tracing-opentelemetry`] provides a subscriber for emitting traces to -//! [OpenTelemetry]-compatible distributed tracing systems. -//! - [`tracing-honeycomb`] Provides a layer that reports traces spanning multiple machines to [honeycomb.io]. Backed by [`tracing-distributed`]. -//! - [`tracing-distributed`] Provides a generic implementation of a layer that reports traces spanning multiple machines to some backend. -//! - [`tracing-actix-web`] provides `tracing` integration for the `actix-web` web framework. -//! - [`tracing-actix`] provides `tracing` integration for the `actix` actor -//! framework. -//! - [`axum-insights`] provides `tracing` integration and Application insights export for the `axum` web framework. -//! - [`tracing-gelf`] implements a subscriber for exporting traces in Greylog -//! GELF format. -//! - [`tracing-coz`] provides integration with the [coz] causal profiler -//! (Linux-only). -//! - [`tracing-bunyan-formatter`] provides a layer implementation that reports events and spans -//! in [bunyan] format, enriched with timing information. -//! - [`tracing-wasm`] provides a `Subscriber`/`Layer` implementation that reports -//! events and spans via browser `console.log` and [User Timing API (`window.performance`)]. -//! - [`tracing-web`] provides a layer implementation of level-aware logging of events -//! to web browsers' `console.*` and span events to the [User Timing API (`window.performance`)]. -//! - [`tide-tracing`] provides a [tide] middleware to trace all incoming requests and responses. -//! - [`test-log`] takes care of initializing `tracing` for tests, based on -//! environment variables with an `env_logger` compatible syntax. -//! - [`tracing-unwrap`] provides convenience methods to report failed unwraps -//! on `Result` or `Option` types to a `Subscriber`. -//! - [`diesel-tracing`] provides integration with [`diesel`] database connections. -//! - [`tracing-tracy`] provides a way to collect [Tracy] profiles in instrumented -//! applications. -//! - [`tracing-elastic-apm`] provides a layer for reporting traces to [Elastic APM]. -//! - [`tracing-etw`] provides a layer for emitting Windows [ETW] events. -//! - [`tracing-fluent-assertions`] provides a fluent assertions-style testing -//! framework for validating the behavior of `tracing` spans. -//! - [`sentry-tracing`] provides a layer for reporting events and traces to [Sentry]. -//! - [`tracing-forest`] provides a subscriber that preserves contextual coherence by -//! grouping together logs from the same spans during writing. -//! - [`tracing-loki`] provides a layer for shipping logs to [Grafana Loki]. -//! - [`tracing-logfmt`] provides a layer that formats events and spans into the logfmt format. -//! - [`reqwest-tracing`] provides a middleware to trace [`reqwest`] HTTP requests. -//! - [`tracing-cloudwatch`] provides a layer that sends events to AWS CloudWatch Logs. -//! - [`clippy-tracing`] provides a tool to add, remove and check for `tracing::instrument`. -//! -//! If you're the maintainer of a `tracing` ecosystem crate not listed above, -//! please let us know! We'd love to add your project to the list! -//! -//! [`tracing-opentelemetry`]: https://crates.io/crates/tracing-opentelemetry -//! [OpenTelemetry]: https://opentelemetry.io/ -//! [`tracing-honeycomb`]: https://crates.io/crates/tracing-honeycomb -//! [`tracing-distributed`]: https://crates.io/crates/tracing-distributed -//! [honeycomb.io]: https://www.honeycomb.io/ -//! [`tracing-actix-web`]: https://crates.io/crates/tracing-actix-web -//! [`tracing-actix`]: https://crates.io/crates/tracing-actix -//! [`axum-insights`]: https://crates.io/crates/axum-insights -//! [`tracing-gelf`]: https://crates.io/crates/tracing-gelf -//! [`tracing-coz`]: https://crates.io/crates/tracing-coz -//! [coz]: https://github.com/plasma-umass/coz -//! [`tracing-bunyan-formatter`]: https://crates.io/crates/tracing-bunyan-formatter -//! [bunyan]: https://github.com/trentm/node-bunyan -//! [`tracing-wasm`]: https://docs.rs/tracing-wasm -//! [`tracing-web`]: https://docs.rs/tracing-web -//! [User Timing API (`window.performance`)]: https://developer.mozilla.org/en-US/docs/Web/API/User_Timing_API -//! [`tide-tracing`]: https://crates.io/crates/tide-tracing -//! [tide]: https://crates.io/crates/tide -//! [`test-log`]: https://crates.io/crates/test-log -//! [`tracing-unwrap`]: https://docs.rs/tracing-unwrap -//! [`diesel`]: https://crates.io/crates/diesel -//! [`diesel-tracing`]: https://crates.io/crates/diesel-tracing -//! [`tracing-tracy`]: https://crates.io/crates/tracing-tracy -//! [Tracy]: https://github.com/wolfpld/tracy -//! [`tracing-elastic-apm`]: https://crates.io/crates/tracing-elastic-apm -//! [Elastic APM]: https://www.elastic.co/apm -//! [`tracing-etw`]: https://github.com/microsoft/rust_win_etw/tree/main/win_etw_tracing -//! [ETW]: https://docs.microsoft.com/en-us/windows/win32/etw/about-event-tracing -//! [`tracing-fluent-assertions`]: https://crates.io/crates/tracing-fluent-assertions -//! [`sentry-tracing`]: https://crates.io/crates/sentry-tracing -//! [Sentry]: https://sentry.io/welcome/ -//! [`tracing-forest`]: https://crates.io/crates/tracing-forest -//! [`tracing-loki`]: https://crates.io/crates/tracing-loki -//! [Grafana Loki]: https://grafana.com/oss/loki/ -//! [`tracing-logfmt`]: https://crates.io/crates/tracing-logfmt -//! [`reqwest-tracing`]: https://crates.io/crates/reqwest-tracing -//! [`reqwest`]: https://crates.io/crates/reqwest -//! [`tracing-cloudwatch`]: https://crates.io/crates/tracing-cloudwatch -//! [`clippy-tracing`]: https://crates.io/crates/clippy-tracing -//! -//!
-//!     Note: Some of these ecosystem crates are currently
-//!     unreleased and/or in earlier stages of development. They may be less stable
-//!     than tracing and tracing-core.
-//! 
-//! -//! ## Crate Feature Flags -//! -//! The following crate [feature flags] are available: -//! -//! * A set of features controlling the [static verbosity level]. -//! * `log`: causes trace instrumentation points to emit [`log`] records as well -//! as trace events, if a default `tracing` subscriber has not been set. This -//! is intended for use in libraries whose users may be using either `tracing` -//! or `log`. -//! * `log-always`: Emit `log` records from all `tracing` spans and events, even -//! if a `tracing` subscriber has been set. This should be set only by -//! applications which intend to collect traces and logs separately; if an -//! adapter is used to convert `log` records into `tracing` events, this will -//! cause duplicate events to occur. -//! * `attributes`: Includes support for the `#[instrument]` attribute. -//! This is on by default, but does bring in the `syn` crate as a dependency, -//! which may add to the compile time of crates that do not already use it. -//! * `std`: Depend on the Rust standard library (enabled by default). -//! -//! `no_std` users may disable this feature with `default-features = false`: -//! -//! ```toml -//! [dependencies] -//! tracing = { version = "0.1.38", default-features = false } -//! ``` -//! -//!
-//!     Note: tracing's no_std support
-//!     requires liballoc.
-//! 
-//! -//! ### Unstable Features -//! -//! These feature flags enable **unstable** features. The public API may break in 0.1.x -//! releases. To enable these features, the `--cfg tracing_unstable` must be passed to -//! `rustc` when compiling. -//! -//! The following unstable feature flags are currently available: -//! -//! * `valuable`: Enables support for recording [field values] using the -//! [`valuable`] crate. -//! -//! #### Enabling Unstable Features -//! -//! The easiest way to set the `tracing_unstable` cfg is to use the `RUSTFLAGS` -//! env variable when running `cargo` commands: -//! -//! ```shell -//! RUSTFLAGS="--cfg tracing_unstable" cargo build -//! ``` -//! Alternatively, the following can be added to the `.cargo/config` file in a -//! project to automatically enable the cfg flag for that project: -//! -//! ```toml -//! [build] -//! rustflags = ["--cfg", "tracing_unstable"] -//! ``` -//! -//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section -//! [field values]: crate::field -//! [`valuable`]: https://crates.io/crates/valuable -//! -//! ## Supported Rust Versions -//! -//! Tracing is built against the latest stable release. The minimum supported -//! version is 1.56. The current Tracing version is not guaranteed to build on -//! Rust versions earlier than the minimum supported version. -//! -//! Tracing follows the same compiler support policies as the rest of the Tokio -//! project. The current stable Rust compiler and the three most recent minor -//! versions before it will always be supported. For example, if the current -//! stable compiler version is 1.69, the minimum supported version will not be -//! increased past 1.66, three minor versions prior. Increasing the minimum -//! supported compiler version is not considered a semver breaking change as -//! long as doing so complies with this policy. -//! -//! [`log`]: https://docs.rs/log/0.4.6/log/ -//! [span]: mod@span -//! [spans]: mod@span -//! [`Span`]: span::Span -//! [`in_scope`]: span::Span::in_scope -//! [event]: Event -//! [events]: Event -//! [`Subscriber`]: subscriber::Subscriber -//! [Subscriber::event]: subscriber::Subscriber::event -//! [`enter`]: subscriber::Subscriber::enter -//! [`exit`]: subscriber::Subscriber::exit -//! [`enabled`]: subscriber::Subscriber::enabled -//! [metadata]: Metadata -//! [`field::display`]: field::display -//! [`field::debug`]: field::debug -//! [`set_global_default`]: subscriber::set_global_default -//! [`with_default`]: subscriber::with_default -//! [`tokio-rs/tracing`]: https://github.com/tokio-rs/tracing -//! [`tracing-futures`]: https://crates.io/crates/tracing-futures -//! [`tracing-subscriber`]: https://crates.io/crates/tracing-subscriber -//! [`tracing-log`]: https://crates.io/crates/tracing-log -//! [`tracing-timing`]: https://crates.io/crates/tracing-timing -//! [`tracing-appender`]: https://crates.io/crates/tracing-appender -//! [`env_logger`]: https://crates.io/crates/env_logger -//! [`FmtSubscriber`]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/struct.Subscriber.html -//! [static verbosity level]: level_filters#compile-time-filters -//! [instrument]: https://docs.rs/tracing-attributes/latest/tracing_attributes/attr.instrument.html -//! [flags]: #crate-feature-flags -#![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(docsrs, feature(doc_cfg), deny(rustdoc::broken_intra_doc_links))] -#![doc( - html_logo_url = "https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/logo-type.png", - issue_tracker_base_url = "https://github.com/tokio-rs/tracing/issues/" -)] -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - unreachable_pub, - bad_style, - dead_code, - improper_ctypes, - non_shorthand_field_patterns, - no_mangle_generic_items, - overflowing_literals, - path_statements, - patterns_in_fns_without_body, - private_in_public, - unconditional_recursion, - unused, - unused_allocation, - unused_comparisons, - unused_parens, - while_true -)] - -#[cfg(not(feature = "std"))] -extern crate alloc; - -// Somehow this `use` statement is necessary for us to re-export the `core` -// macros on Rust 1.26.0. I'm not sure how this makes it work, but it does. -#[allow(unused_imports)] -#[doc(hidden)] -use tracing_core::*; - -#[doc(inline)] -pub use self::instrument::Instrument; -pub use self::{dispatcher::Dispatch, event::Event, field::Value, subscriber::Subscriber}; - -#[doc(hidden)] -pub use self::span::Id; - -#[doc(hidden)] -pub use tracing_core::{ - callsite::{self, Callsite}, - metadata, -}; -pub use tracing_core::{event, Level, Metadata}; - -#[doc(inline)] -pub use self::span::Span; -#[cfg(feature = "attributes")] -#[cfg_attr(docsrs, doc(cfg(feature = "attributes")))] -#[doc(inline)] -pub use tracing_attributes::instrument; - -#[macro_use] -mod macros; - -pub mod dispatcher; -pub mod field; -/// Attach a span to a `std::future::Future`. -pub mod instrument; -pub mod level_filters; -pub mod span; -pub(crate) mod stdlib; -pub mod subscriber; - -#[doc(hidden)] -pub mod __macro_support { - pub use crate::callsite::Callsite; - use crate::{subscriber::Interest, Metadata}; - pub use core::concat; - - /// Callsite implementation used by macro-generated code. - /// - /// /!\ WARNING: This is *not* a stable API! /!\ - /// This type, and all code contained in the `__macro_support` module, is - /// a *private* API of `tracing`. It is exposed publicly because it is used - /// by the `tracing` macros, but it is not part of the stable versioned API. - /// Breaking changes to this module may occur in small-numbered versions - /// without warning. - pub use tracing_core::callsite::DefaultCallsite as MacroCallsite; - - /// /!\ WARNING: This is *not* a stable API! /!\ - /// This function, and all code contained in the `__macro_support` module, is - /// a *private* API of `tracing`. It is exposed publicly because it is used - /// by the `tracing` macros, but it is not part of the stable versioned API. - /// Breaking changes to this module may occur in small-numbered versions - /// without warning. - pub fn __is_enabled(meta: &Metadata<'static>, interest: Interest) -> bool { - interest.is_always() || crate::dispatcher::get_default(|default| default.enabled(meta)) - } - - /// /!\ WARNING: This is *not* a stable API! /!\ - /// This function, and all code contained in the `__macro_support` module, is - /// a *private* API of `tracing`. It is exposed publicly because it is used - /// by the `tracing` macros, but it is not part of the stable versioned API. - /// Breaking changes to this module may occur in small-numbered versions - /// without warning. - #[inline] - #[cfg(feature = "log")] - pub fn __disabled_span(meta: &'static Metadata<'static>) -> crate::Span { - crate::Span::new_disabled(meta) - } - - /// /!\ WARNING: This is *not* a stable API! /!\ - /// This function, and all code contained in the `__macro_support` module, is - /// a *private* API of `tracing`. It is exposed publicly because it is used - /// by the `tracing` macros, but it is not part of the stable versioned API. - /// Breaking changes to this module may occur in small-numbered versions - /// without warning. - #[inline] - #[cfg(not(feature = "log"))] - pub fn __disabled_span(_: &'static Metadata<'static>) -> crate::Span { - crate::Span::none() - } - - /// /!\ WARNING: This is *not* a stable API! /!\ - /// This function, and all code contained in the `__macro_support` module, is - /// a *private* API of `tracing`. It is exposed publicly because it is used - /// by the `tracing` macros, but it is not part of the stable versioned API. - /// Breaking changes to this module may occur in small-numbered versions - /// without warning. - #[cfg(feature = "log")] - pub fn __tracing_log( - meta: &Metadata<'static>, - logger: &'static dyn log::Log, - log_meta: log::Metadata<'_>, - values: &tracing_core::field::ValueSet<'_>, - ) { - logger.log( - &crate::log::Record::builder() - .file(meta.file()) - .module_path(meta.module_path()) - .line(meta.line()) - .metadata(log_meta) - .args(format_args!( - "{}", - crate::log::LogValueSet { - values, - is_first: true - } - )) - .build(), - ); - } -} - -#[cfg(feature = "log")] -#[doc(hidden)] -pub mod log { - use core::fmt; - pub use log::*; - use tracing_core::field::{Field, ValueSet, Visit}; - - /// Utility to format [`ValueSet`]s for logging. - pub(crate) struct LogValueSet<'a> { - pub(crate) values: &'a ValueSet<'a>, - pub(crate) is_first: bool, - } - - impl<'a> fmt::Display for LogValueSet<'a> { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - struct LogVisitor<'a, 'b> { - f: &'a mut fmt::Formatter<'b>, - is_first: bool, - result: fmt::Result, - } - - impl Visit for LogVisitor<'_, '_> { - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - let res = if self.is_first { - self.is_first = false; - if field.name() == "message" { - write!(self.f, "{:?}", value) - } else { - write!(self.f, "{}={:?}", field.name(), value) - } - } else { - write!(self.f, " {}={:?}", field.name(), value) - }; - if let Err(err) = res { - self.result = self.result.and(Err(err)); - } - } - - fn record_str(&mut self, field: &Field, value: &str) { - if field.name() == "message" { - self.record_debug(field, &format_args!("{}", value)) - } else { - self.record_debug(field, &value) - } - } - } - - let mut visit = LogVisitor { - f, - is_first: self.is_first, - result: Ok(()), - }; - self.values.record(&mut visit); - visit.result - } - } -} - -mod sealed { - pub trait Sealed {} -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing/src/macros.rs s390-tools-2.33.1/rust-vendor/tracing/src/macros.rs --- s390-tools-2.31.0/rust-vendor/tracing/src/macros.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/src/macros.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,3165 +0,0 @@ -/// Constructs a new span. -/// -/// See [the top-level documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [lib]: crate#using-the-macros -/// -/// # Examples -/// -/// Creating a new span: -/// ``` -/// # use tracing::{span, Level}; -/// # fn main() { -/// let span = span!(Level::TRACE, "my span"); -/// let _enter = span.enter(); -/// // do work inside the span... -/// # } -/// ``` -#[macro_export] -macro_rules! span { - (target: $target:expr, parent: $parent:expr, $lvl:expr, $name:expr) => { - $crate::span!(target: $target, parent: $parent, $lvl, $name,) - }; - (target: $target:expr, parent: $parent:expr, $lvl:expr, $name:expr, $($fields:tt)*) => { - { - use $crate::__macro_support::Callsite as _; - static __CALLSITE: $crate::__macro_support::MacroCallsite = $crate::callsite2! { - name: $name, - kind: $crate::metadata::Kind::SPAN, - target: $target, - level: $lvl, - fields: $($fields)* - }; - let mut interest = $crate::subscriber::Interest::never(); - if $crate::level_enabled!($lvl) - && { interest = __CALLSITE.interest(); !interest.is_never() } - && $crate::__macro_support::__is_enabled(__CALLSITE.metadata(), interest) - { - let meta = __CALLSITE.metadata(); - // span with explicit parent - $crate::Span::child_of( - $parent, - meta, - &$crate::valueset!(meta.fields(), $($fields)*), - ) - } else { - let span = $crate::__macro_support::__disabled_span(__CALLSITE.metadata()); - $crate::if_log_enabled! { $lvl, { - span.record_all(&$crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*)); - }}; - span - } - } - }; - (target: $target:expr, $lvl:expr, $name:expr, $($fields:tt)*) => { - { - use $crate::__macro_support::Callsite as _; - static __CALLSITE: $crate::callsite::DefaultCallsite = $crate::callsite2! { - name: $name, - kind: $crate::metadata::Kind::SPAN, - target: $target, - level: $lvl, - fields: $($fields)* - }; - let mut interest = $crate::subscriber::Interest::never(); - if $crate::level_enabled!($lvl) - && { interest = __CALLSITE.interest(); !interest.is_never() } - && $crate::__macro_support::__is_enabled(__CALLSITE.metadata(), interest) - { - let meta = __CALLSITE.metadata(); - // span with contextual parent - $crate::Span::new( - meta, - &$crate::valueset!(meta.fields(), $($fields)*), - ) - } else { - let span = $crate::__macro_support::__disabled_span(__CALLSITE.metadata()); - $crate::if_log_enabled! { $lvl, { - span.record_all(&$crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*)); - }}; - span - } - } - }; - (target: $target:expr, parent: $parent:expr, $lvl:expr, $name:expr) => { - $crate::span!(target: $target, parent: $parent, $lvl, $name,) - }; - (parent: $parent:expr, $lvl:expr, $name:expr, $($fields:tt)*) => { - $crate::span!( - target: module_path!(), - parent: $parent, - $lvl, - $name, - $($fields)* - ) - }; - (parent: $parent:expr, $lvl:expr, $name:expr) => { - $crate::span!( - target: module_path!(), - parent: $parent, - $lvl, - $name, - ) - }; - (target: $target:expr, $lvl:expr, $name:expr, $($fields:tt)*) => { - $crate::span!( - target: $target, - $lvl, - $name, - $($fields)* - ) - }; - (target: $target:expr, $lvl:expr, $name:expr) => { - $crate::span!(target: $target, $lvl, $name,) - }; - ($lvl:expr, $name:expr, $($fields:tt)*) => { - $crate::span!( - target: module_path!(), - $lvl, - $name, - $($fields)* - ) - }; - ($lvl:expr, $name:expr) => { - $crate::span!( - target: module_path!(), - $lvl, - $name, - ) - }; -} - -/// Constructs a span at the trace level. -/// -/// [Fields] and [attributes] are set using the same syntax as the [`span!`] -/// macro. -/// -/// See [the top-level documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [lib]: crate#using-the-macros -/// [attributes]: crate#configuring-attributes -/// [Fields]: crate#recording-fields -/// [`span!`]: crate::span! -/// -/// # Examples -/// -/// ```rust -/// # use tracing::{trace_span, span, Level}; -/// # fn main() { -/// trace_span!("my_span"); -/// // is equivalent to: -/// span!(Level::TRACE, "my_span"); -/// # } -/// ``` -/// -/// ```rust -/// # use tracing::{trace_span, span, Level}; -/// # fn main() { -/// let span = trace_span!("my span"); -/// span.in_scope(|| { -/// // do work inside the span... -/// }); -/// # } -/// ``` -#[macro_export] -macro_rules! trace_span { - (target: $target:expr, parent: $parent:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: $target, - parent: $parent, - $crate::Level::TRACE, - $name, - $($field)* - ) - }; - (target: $target:expr, parent: $parent:expr, $name:expr) => { - $crate::trace_span!(target: $target, parent: $parent, $name,) - }; - (parent: $parent:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: module_path!(), - parent: $parent, - $crate::Level::TRACE, - $name, - $($field)* - ) - }; - (parent: $parent:expr, $name:expr) => { - $crate::trace_span!(parent: $parent, $name,) - }; - (target: $target:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: $target, - $crate::Level::TRACE, - $name, - $($field)* - ) - }; - (target: $target:expr, $name:expr) => { - $crate::trace_span!(target: $target, $name,) - }; - ($name:expr, $($field:tt)*) => { - $crate::span!( - target: module_path!(), - $crate::Level::TRACE, - $name, - $($field)* - ) - }; - ($name:expr) => { $crate::trace_span!($name,) }; -} - -/// Constructs a span at the debug level. -/// -/// [Fields] and [attributes] are set using the same syntax as the [`span!`] -/// macro. -/// -/// See [the top-level documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [lib]: crate#using-the-macros -/// [attributes]: crate#configuring-attributes -/// [Fields]: crate#recording-fields -/// [`span!`]: crate::span! -/// -/// # Examples -/// -/// ```rust -/// # use tracing::{debug_span, span, Level}; -/// # fn main() { -/// debug_span!("my_span"); -/// // is equivalent to: -/// span!(Level::DEBUG, "my_span"); -/// # } -/// ``` -/// -/// ```rust -/// # use tracing::debug_span; -/// # fn main() { -/// let span = debug_span!("my span"); -/// span.in_scope(|| { -/// // do work inside the span... -/// }); -/// # } -/// ``` -#[macro_export] -macro_rules! debug_span { - (target: $target:expr, parent: $parent:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: $target, - parent: $parent, - $crate::Level::DEBUG, - $name, - $($field)* - ) - }; - (target: $target:expr, parent: $parent:expr, $name:expr) => { - $crate::debug_span!(target: $target, parent: $parent, $name,) - }; - (parent: $parent:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: module_path!(), - parent: $parent, - $crate::Level::DEBUG, - $name, - $($field)* - ) - }; - (parent: $parent:expr, $name:expr) => { - $crate::debug_span!(parent: $parent, $name,) - }; - (target: $target:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: $target, - $crate::Level::DEBUG, - $name, - $($field)* - ) - }; - (target: $target:expr, $name:expr) => { - $crate::debug_span!(target: $target, $name,) - }; - ($name:expr, $($field:tt)*) => { - $crate::span!( - target: module_path!(), - $crate::Level::DEBUG, - $name, - $($field)* - ) - }; - ($name:expr) => {$crate::debug_span!($name,)}; -} - -/// Constructs a span at the info level. -/// -/// [Fields] and [attributes] are set using the same syntax as the [`span!`] -/// macro. -/// -/// See [the top-level documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [lib]: crate#using-the-macros -/// [attributes]: crate#configuring-attributes -/// [Fields]: crate#recording-fields -/// [`span!`]: crate::span! -/// -/// # Examples -/// -/// ```rust -/// # use tracing::{span, info_span, Level}; -/// # fn main() { -/// info_span!("my_span"); -/// // is equivalent to: -/// span!(Level::INFO, "my_span"); -/// # } -/// ``` -/// -/// ```rust -/// # use tracing::info_span; -/// # fn main() { -/// let span = info_span!("my span"); -/// span.in_scope(|| { -/// // do work inside the span... -/// }); -/// # } -/// ``` -#[macro_export] -macro_rules! info_span { - (target: $target:expr, parent: $parent:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: $target, - parent: $parent, - $crate::Level::INFO, - $name, - $($field)* - ) - }; - (target: $target:expr, parent: $parent:expr, $name:expr) => { - $crate::info_span!(target: $target, parent: $parent, $name,) - }; - (parent: $parent:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: module_path!(), - parent: $parent, - $crate::Level::INFO, - $name, - $($field)* - ) - }; - (parent: $parent:expr, $name:expr) => { - $crate::info_span!(parent: $parent, $name,) - }; - (target: $target:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: $target, - $crate::Level::INFO, - $name, - $($field)* - ) - }; - (target: $target:expr, $name:expr) => { - $crate::info_span!(target: $target, $name,) - }; - ($name:expr, $($field:tt)*) => { - $crate::span!( - target: module_path!(), - $crate::Level::INFO, - $name, - $($field)* - ) - }; - ($name:expr) => {$crate::info_span!($name,)}; -} - -/// Constructs a span at the warn level. -/// -/// [Fields] and [attributes] are set using the same syntax as the [`span!`] -/// macro. -/// -/// See [the top-level documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [lib]: crate#using-the-macros -/// [attributes]: crate#configuring-attributes -/// [Fields]: crate#recording-fields -/// [`span!`]: crate::span! -/// -/// # Examples -/// -/// ```rust -/// # use tracing::{warn_span, span, Level}; -/// # fn main() { -/// warn_span!("my_span"); -/// // is equivalent to: -/// span!(Level::WARN, "my_span"); -/// # } -/// ``` -/// -/// ```rust -/// use tracing::warn_span; -/// # fn main() { -/// let span = warn_span!("my span"); -/// span.in_scope(|| { -/// // do work inside the span... -/// }); -/// # } -/// ``` -#[macro_export] -macro_rules! warn_span { - (target: $target:expr, parent: $parent:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: $target, - parent: $parent, - $crate::Level::WARN, - $name, - $($field)* - ) - }; - (target: $target:expr, parent: $parent:expr, $name:expr) => { - $crate::warn_span!(target: $target, parent: $parent, $name,) - }; - (parent: $parent:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: module_path!(), - parent: $parent, - $crate::Level::WARN, - $name, - $($field)* - ) - }; - (parent: $parent:expr, $name:expr) => { - $crate::warn_span!(parent: $parent, $name,) - }; - (target: $target:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: $target, - $crate::Level::WARN, - $name, - $($field)* - ) - }; - (target: $target:expr, $name:expr) => { - $crate::warn_span!(target: $target, $name,) - }; - ($name:expr, $($field:tt)*) => { - $crate::span!( - target: module_path!(), - $crate::Level::WARN, - $name, - $($field)* - ) - }; - ($name:expr) => {$crate::warn_span!($name,)}; -} -/// Constructs a span at the error level. -/// -/// [Fields] and [attributes] are set using the same syntax as the [`span!`] -/// macro. -/// -/// See [the top-level documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [lib]: crate#using-the-macros -/// [attributes]: crate#configuring-attributes -/// [Fields]: crate#recording-fields -/// [`span!`]: crate::span! -/// -/// # Examples -/// -/// ```rust -/// # use tracing::{span, error_span, Level}; -/// # fn main() { -/// error_span!("my_span"); -/// // is equivalent to: -/// span!(Level::ERROR, "my_span"); -/// # } -/// ``` -/// -/// ```rust -/// # use tracing::error_span; -/// # fn main() { -/// let span = error_span!("my span"); -/// span.in_scope(|| { -/// // do work inside the span... -/// }); -/// # } -/// ``` -#[macro_export] -macro_rules! error_span { - (target: $target:expr, parent: $parent:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: $target, - parent: $parent, - $crate::Level::ERROR, - $name, - $($field)* - ) - }; - (target: $target:expr, parent: $parent:expr, $name:expr) => { - $crate::error_span!(target: $target, parent: $parent, $name,) - }; - (parent: $parent:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: module_path!(), - parent: $parent, - $crate::Level::ERROR, - $name, - $($field)* - ) - }; - (parent: $parent:expr, $name:expr) => { - $crate::error_span!(parent: $parent, $name,) - }; - (target: $target:expr, $name:expr, $($field:tt)*) => { - $crate::span!( - target: $target, - $crate::Level::ERROR, - $name, - $($field)* - ) - }; - (target: $target:expr, $name:expr) => { - $crate::error_span!(target: $target, $name,) - }; - ($name:expr, $($field:tt)*) => { - $crate::span!( - target: module_path!(), - $crate::Level::ERROR, - $name, - $($field)* - ) - }; - ($name:expr) => {$crate::error_span!($name,)}; -} - -/// Constructs a new `Event`. -/// -/// The event macro is invoked with a `Level` and up to 32 key-value fields. -/// Optionally, a format string and arguments may follow the fields; this will -/// be used to construct an implicit field named "message". -/// -/// See [the top-level documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [lib]: crate#using-the-macros -/// -/// # Examples -/// -/// ```rust -/// use tracing::{event, Level}; -/// -/// # fn main() { -/// let data = (42, "forty-two"); -/// let private_data = "private"; -/// let error = "a bad error"; -/// -/// event!(Level::ERROR, %error, "Received error"); -/// event!( -/// target: "app_events", -/// Level::WARN, -/// private_data, -/// ?data, -/// "App warning: {}", -/// error -/// ); -/// event!(name: "answer", Level::INFO, the_answer = data.0); -/// event!(Level::INFO, the_answer = data.0); -/// # } -/// ``` -/// -// /// Note that *unlike `span!`*, `event!` requires a value for all fields. As -// /// events are recorded immediately when the macro is invoked, there is no -// /// opportunity for fields to be recorded later. A trailing comma on the final -// /// field is valid. -// /// -// /// For example, the following does not compile: -// /// ```rust,compile_fail -// /// # use tracing::{Level, event}; -// /// # fn main() { -// /// event!(Level::INFO, foo = 5, bad_field, bar = "hello") -// /// #} -// /// ``` -#[macro_export] -macro_rules! event { - // Name / target / parent. - (name: $name:expr, target: $target:expr, parent: $parent:expr, $lvl:expr, { $($fields:tt)* } )=> ({ - use $crate::__macro_support::Callsite as _; - static __CALLSITE: $crate::__macro_support::MacroCallsite = $crate::callsite2! { - name: $name, - kind: $crate::metadata::Kind::EVENT, - target: $target, - level: $lvl, - fields: $($fields)* - }; - - let enabled = $crate::level_enabled!($lvl) && { - let interest = __CALLSITE.interest(); - !interest.is_never() && $crate::__macro_support::__is_enabled(__CALLSITE.metadata(), interest) - }; - if enabled { - (|value_set: $crate::field::ValueSet| { - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &value_set - ); - let meta = __CALLSITE.metadata(); - // event with explicit parent - $crate::Event::child_of( - $parent, - meta, - &value_set - ); - })($crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*)); - } else { - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &$crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*) - ); - } - }); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( - $crate::event!( - name: $name, - target: $target, - parent: $parent, - $lvl, - { message = ::core::format_args!($($arg)+), $($fields)* } - ) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $lvl:expr, $($k:ident).+ = $($fields:tt)* ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $lvl, { $($k).+ = $($fields)* }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $lvl:expr, $($arg:tt)+) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $lvl, { $($arg)+ }) - ); - - // Name / target. - (name: $name:expr, target: $target:expr, $lvl:expr, { $($fields:tt)* } )=> ({ - use $crate::__macro_support::Callsite as _; - static __CALLSITE: $crate::__macro_support::MacroCallsite = $crate::callsite2! { - name: $name, - kind: $crate::metadata::Kind::EVENT, - target: $target, - level: $lvl, - fields: $($fields)* - }; - let enabled = $crate::level_enabled!($lvl) && { - let interest = __CALLSITE.interest(); - !interest.is_never() && $crate::__macro_support::__is_enabled(__CALLSITE.metadata(), interest) - }; - if enabled { - (|value_set: $crate::field::ValueSet| { - let meta = __CALLSITE.metadata(); - // event with contextual parent - $crate::Event::dispatch( - meta, - &value_set - ); - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &value_set - ); - })($crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*)); - } else { - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &$crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*) - ); - } - }); - (name: $name:expr, target: $target:expr, $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( - $crate::event!( - name: $name, - target: $target, - $lvl, - { message = ::core::format_args!($($arg)+), $($fields)* } - ) - ); - (name: $name:expr, target: $target:expr, $lvl:expr, $($k:ident).+ = $($fields:tt)* ) => ( - $crate::event!(name: $name, target: $target, $lvl, { $($k).+ = $($fields)* }) - ); - (name: $name:expr, target: $target:expr, $lvl:expr, $($arg:tt)+) => ( - $crate::event!(name: $name, target: $target, $lvl, { $($arg)+ }) - ); - - // Target / parent. - (target: $target:expr, parent: $parent:expr, $lvl:expr, { $($fields:tt)* } )=> ({ - use $crate::__macro_support::Callsite as _; - static __CALLSITE: $crate::callsite::DefaultCallsite = $crate::callsite2! { - name: $crate::__macro_support::concat!( - "event ", - file!(), - ":", - line!() - ), - kind: $crate::metadata::Kind::EVENT, - target: $target, - level: $lvl, - fields: $($fields)* - }; - - let enabled = $crate::level_enabled!($lvl) && { - let interest = __CALLSITE.interest(); - !interest.is_never() && $crate::__macro_support::__is_enabled(__CALLSITE.metadata(), interest) - }; - if enabled { - (|value_set: $crate::field::ValueSet| { - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &value_set - ); - let meta = __CALLSITE.metadata(); - // event with explicit parent - $crate::Event::child_of( - $parent, - meta, - &value_set - ); - })($crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*)); - } else { - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &$crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*) - ); - } - }); - (target: $target:expr, parent: $parent:expr, $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( - $crate::event!( - target: $target, - parent: $parent, - $lvl, - { message = ::core::format_args!($($arg)+), $($fields)* } - ) - ); - (target: $target:expr, parent: $parent:expr, $lvl:expr, $($k:ident).+ = $($fields:tt)* ) => ( - $crate::event!(target: $target, parent: $parent, $lvl, { $($k).+ = $($fields)* }) - ); - (target: $target:expr, parent: $parent:expr, $lvl:expr, $($arg:tt)+) => ( - $crate::event!(target: $target, parent: $parent, $lvl, { $($arg)+ }) - ); - - // Name / parent. - (name: $name:expr, parent: $parent:expr, $lvl:expr, { $($fields:tt)* } )=> ({ - use $crate::__macro_support::Callsite as _; - static __CALLSITE: $crate::__macro_support::MacroCallsite = $crate::callsite2! { - name: $name, - kind: $crate::metadata::Kind::EVENT, - target: module_path!(), - level: $lvl, - fields: $($fields)* - }; - - let enabled = $crate::level_enabled!($lvl) && { - let interest = __CALLSITE.interest(); - !interest.is_never() && __CALLSITE.is_enabled(interest) - }; - if enabled { - (|value_set: $crate::field::ValueSet| { - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &value_set - ); - let meta = __CALLSITE.metadata(); - // event with explicit parent - $crate::Event::child_of( - $parent, - meta, - &value_set - ); - })($crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*)); - } else { - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &$crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*) - ); - } - }); - (name: $name:expr, parent: $parent:expr, $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( - $crate::event!( - name: $name, - parent: $parent, - $lvl, - { message = ::core::format_args!($($arg)+), $($fields)* } - ) - ); - (name: $name:expr, parent: $parent:expr, $lvl:expr, $($k:ident).+ = $($fields:tt)* ) => ( - $crate::event!(name: $name, parent: $parent, $lvl, { $($k).+ = $($fields)* }) - ); - (name: $name:expr, parent: $parent:expr, $lvl:expr, $($arg:tt)+) => ( - $crate::event!(name: $name, parent: $parent, $lvl, { $($arg)+ }) - ); - - // Name. - (name: $name:expr, $lvl:expr, { $($fields:tt)* } )=> ({ - use $crate::__macro_support::Callsite as _; - static __CALLSITE: $crate::__macro_support::MacroCallsite = $crate::callsite2! { - name: $name, - kind: $crate::metadata::Kind::EVENT, - target: module_path!(), - level: $lvl, - fields: $($fields)* - }; - let enabled = $crate::level_enabled!($lvl) && { - let interest = __CALLSITE.interest(); - !interest.is_never() && $crate::__macro_support::__is_enabled(__CALLSITE.metadata(), interest) - }; - if enabled { - (|value_set: $crate::field::ValueSet| { - let meta = __CALLSITE.metadata(); - // event with contextual parent - $crate::Event::dispatch( - meta, - &value_set - ); - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &value_set - ); - })($crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*)); - } else { - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &$crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*) - ); - } - }); - (name: $name:expr, $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( - $crate::event!( - name: $name, - $lvl, - { message = ::core::format_args!($($arg)+), $($fields)* } - ) - ); - (name: $name:expr, $lvl:expr, $($k:ident).+ = $($fields:tt)* ) => ( - $crate::event!(name: $name, $lvl, { $($k).+ = $($fields)* }) - ); - (name: $name:expr, $lvl:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, $lvl, { $($arg)+ }) - ); - - // Target. - (target: $target:expr, $lvl:expr, { $($fields:tt)* } )=> ({ - use $crate::__macro_support::Callsite as _; - static __CALLSITE: $crate::callsite::DefaultCallsite = $crate::callsite2! { - name: $crate::__macro_support::concat!( - "event ", - file!(), - ":", - line!() - ), - kind: $crate::metadata::Kind::EVENT, - target: $target, - level: $lvl, - fields: $($fields)* - }; - let enabled = $crate::level_enabled!($lvl) && { - let interest = __CALLSITE.interest(); - !interest.is_never() && $crate::__macro_support::__is_enabled(__CALLSITE.metadata(), interest) - }; - if enabled { - (|value_set: $crate::field::ValueSet| { - let meta = __CALLSITE.metadata(); - // event with contextual parent - $crate::Event::dispatch( - meta, - &value_set - ); - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &value_set - ); - })($crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*)); - } else { - $crate::__tracing_log!( - $lvl, - __CALLSITE, - &$crate::valueset!(__CALLSITE.metadata().fields(), $($fields)*) - ); - } - }); - (target: $target:expr, $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( - $crate::event!( - target: $target, - $lvl, - { message = ::core::format_args!($($arg)+), $($fields)* } - ) - ); - (target: $target:expr, $lvl:expr, $($k:ident).+ = $($fields:tt)* ) => ( - $crate::event!(target: $target, $lvl, { $($k).+ = $($fields)* }) - ); - (target: $target:expr, $lvl:expr, $($arg:tt)+ ) => ( - $crate::event!(target: $target, $lvl, { $($arg)+ }) - ); - - // Parent. - (parent: $parent:expr, $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $lvl, - { message = ::core::format_args!($($arg)+), $($fields)* } - ) - ); - (parent: $parent:expr, $lvl:expr, $($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $lvl, - { $($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, $lvl:expr, ?$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $lvl, - { ?$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, $lvl:expr, %$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $lvl, - { %$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, $lvl:expr, $($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $lvl, - { $($k).+, $($field)*} - ) - ); - (parent: $parent:expr, $lvl:expr, %$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $lvl, - { %$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, $lvl:expr, ?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $lvl, - { ?$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, $lvl:expr, $($arg:tt)+ ) => ( - $crate::event!(target: module_path!(), parent: $parent, $lvl, { $($arg)+ }) - ); - - // ... - ( $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - $lvl, - { message = ::core::format_args!($($arg)+), $($fields)* } - ) - ); - ( $lvl:expr, { $($fields:tt)* }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - $lvl, - { message = format_args!($($arg)+), $($fields)* } - ) - ); - ($lvl:expr, $($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $lvl, - { $($k).+ = $($field)*} - ) - ); - ($lvl:expr, $($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $lvl, - { $($k).+, $($field)*} - ) - ); - ($lvl:expr, ?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $lvl, - { ?$($k).+, $($field)*} - ) - ); - ($lvl:expr, %$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $lvl, - { %$($k).+, $($field)*} - ) - ); - ($lvl:expr, ?$($k:ident).+) => ( - $crate::event!($lvl, ?$($k).+,) - ); - ($lvl:expr, %$($k:ident).+) => ( - $crate::event!($lvl, %$($k).+,) - ); - ($lvl:expr, $($k:ident).+) => ( - $crate::event!($lvl, $($k).+,) - ); - ( $lvl:expr, $($arg:tt)+ ) => ( - $crate::event!(target: module_path!(), $lvl, { $($arg)+ }) - ); -} - -/// Tests whether an event with the specified level and target would be enabled. -/// -/// This is similar to [`enabled!`], but queries the current subscriber specifically for -/// an event, whereas [`enabled!`] queries for an event _or_ span. -/// -/// See the documentation for [`enabled!]` for more details on using this macro. -/// See also [`span_enabled!`]. -/// -/// # Examples -/// -/// ```rust -/// # use tracing::{event_enabled, Level}; -/// if event_enabled!(target: "my_crate", Level::DEBUG) { -/// // some expensive work... -/// } -/// // simpler -/// if event_enabled!(Level::DEBUG) { -/// // some expensive work... -/// } -/// // with fields -/// if event_enabled!(Level::DEBUG, foo_field) { -/// // some expensive work... -/// } -/// ``` -/// -/// [`enabled!`]: crate::enabled -/// [`span_enabled!`]: crate::span_enabled -#[macro_export] -macro_rules! event_enabled { - ($($rest:tt)*)=> ( - $crate::enabled!(kind: $crate::metadata::Kind::EVENT, $($rest)*) - ) -} - -/// Tests whether a span with the specified level and target would be enabled. -/// -/// This is similar to [`enabled!`], but queries the current subscriber specifically for -/// an event, whereas [`enabled!`] queries for an event _or_ span. -/// -/// See the documentation for [`enabled!]` for more details on using this macro. -/// See also [`span_enabled!`]. -/// -/// # Examples -/// -/// ```rust -/// # use tracing::{span_enabled, Level}; -/// if span_enabled!(target: "my_crate", Level::DEBUG) { -/// // some expensive work... -/// } -/// // simpler -/// if span_enabled!(Level::DEBUG) { -/// // some expensive work... -/// } -/// // with fields -/// if span_enabled!(Level::DEBUG, foo_field) { -/// // some expensive work... -/// } -/// ``` -/// -/// [`enabled!`]: crate::enabled -/// [`span_enabled!`]: crate::span_enabled -#[macro_export] -macro_rules! span_enabled { - ($($rest:tt)*)=> ( - $crate::enabled!(kind: $crate::metadata::Kind::SPAN, $($rest)*) - ) -} - -/// Checks whether a span or event is [enabled] based on the provided [metadata]. -/// -/// [enabled]: crate::Subscriber::enabled -/// [metadata]: crate::Metadata -/// -/// This macro is a specialized tool: it is intended to be used prior -/// to an expensive computation required *just* for that event, but -/// *cannot* be done as part of an argument to that event, such as -/// when multiple events are emitted (e.g., iterating over a collection -/// and emitting an event for each item). -/// -/// # Usage -/// -/// [Subscribers] can make filtering decisions based all the data included in a -/// span or event's [`Metadata`]. This means that it is possible for `enabled!` -/// to return a _false positive_ (indicating that something would be enabled -/// when it actually would not be) or a _false negative_ (indicating that -/// something would be disabled when it would actually be enabled). -/// -/// [Subscribers]: crate::subscriber::Subscriber -/// [`Metadata`]: crate::metadata::Metadata -/// -/// This occurs when a subscriber is using a _more specific_ filter than the -/// metadata provided to the `enabled!` macro. Some situations that can result -/// in false positives or false negatives include: -/// -/// - If a subscriber is using a filter which may enable a span or event based -/// on field names, but `enabled!` is invoked without listing field names, -/// `enabled!` may return a false negative if a specific field name would -/// cause the subscriber to enable something that would otherwise be disabled. -/// - If a subscriber is using a filter which enables or disables specific events by -/// file path and line number, a particular event may be enabled/disabled -/// even if an `enabled!` invocation with the same level, target, and fields -/// indicated otherwise. -/// - The subscriber can choose to enable _only_ spans or _only_ events, which `enabled` -/// will not reflect. -/// -/// `enabled!()` requires a [level](crate::Level) argument, an optional `target:` -/// argument, and an optional set of field names. If the fields are not provided, -/// they are considered to be unknown. `enabled!` attempts to match the -/// syntax of `event!()` as closely as possible, which can be seen in the -/// examples below. -/// -/// # Examples -/// -/// If the current subscriber is interested in recording `DEBUG`-level spans and -/// events in the current file and module path, this will evaluate to true: -/// ```rust -/// use tracing::{enabled, Level}; -/// -/// if enabled!(Level::DEBUG) { -/// // some expensive work... -/// } -/// ``` -/// -/// If the current subscriber is interested in recording spans and events -/// in the current file and module path, with the target "my_crate", and at the -/// level `DEBUG`, this will evaluate to true: -/// ```rust -/// # use tracing::{enabled, Level}; -/// if enabled!(target: "my_crate", Level::DEBUG) { -/// // some expensive work... -/// } -/// ``` -/// -/// If the current subscriber is interested in recording spans and events -/// in the current file and module path, with the target "my_crate", at -/// the level `DEBUG`, and with a field named "hello", this will evaluate -/// to true: -/// -/// ```rust -/// # use tracing::{enabled, Level}; -/// if enabled!(target: "my_crate", Level::DEBUG, hello) { -/// // some expensive work... -/// } -/// ``` -/// -/// # Alternatives -/// -/// `enabled!` queries subscribers with [`Metadata`] where -/// [`is_event`] and [`is_span`] both return `false`. Alternatively, -/// use [`event_enabled!`] or [`span_enabled!`] to ensure one of these -/// returns true. -/// -/// -/// [`Metadata`]: crate::Metadata -/// [`is_event`]: crate::Metadata::is_event -/// [`is_span`]: crate::Metadata::is_span -/// [`enabled!`]: crate::enabled -/// [`span_enabled!`]: crate::span_enabled -#[macro_export] -macro_rules! enabled { - (kind: $kind:expr, target: $target:expr, $lvl:expr, { $($fields:tt)* } )=> ({ - if $crate::level_enabled!($lvl) { - use $crate::__macro_support::Callsite as _; - static __CALLSITE: $crate::callsite::DefaultCallsite = $crate::callsite2! { - name: $crate::__macro_support::concat!( - "enabled ", - file!(), - ":", - line!() - ), - kind: $kind.hint(), - target: $target, - level: $lvl, - fields: $($fields)* - }; - let interest = __CALLSITE.interest(); - if !interest.is_never() && $crate::__macro_support::__is_enabled(__CALLSITE.metadata(), interest) { - let meta = __CALLSITE.metadata(); - $crate::dispatcher::get_default(|current| current.enabled(meta)) - } else { - false - } - } else { - false - } - }); - // Just target and level - (kind: $kind:expr, target: $target:expr, $lvl:expr ) => ( - $crate::enabled!(kind: $kind, target: $target, $lvl, { }) - ); - (target: $target:expr, $lvl:expr ) => ( - $crate::enabled!(kind: $crate::metadata::Kind::HINT, target: $target, $lvl, { }) - ); - - // These four cases handle fields with no values - (kind: $kind:expr, target: $target:expr, $lvl:expr, $($field:tt)*) => ( - $crate::enabled!( - kind: $kind, - target: $target, - $lvl, - { $($field)*} - ) - ); - (target: $target:expr, $lvl:expr, $($field:tt)*) => ( - $crate::enabled!( - kind: $crate::metadata::Kind::HINT, - target: $target, - $lvl, - { $($field)*} - ) - ); - - // Level and field case - (kind: $kind:expr, $lvl:expr, $($field:tt)*) => ( - $crate::enabled!( - kind: $kind, - target: module_path!(), - $lvl, - { $($field)*} - ) - ); - - // Simplest `enabled!` case - (kind: $kind:expr, $lvl:expr) => ( - $crate::enabled!(kind: $kind, target: module_path!(), $lvl, { }) - ); - ($lvl:expr) => ( - $crate::enabled!(kind: $crate::metadata::Kind::HINT, target: module_path!(), $lvl, { }) - ); - - // Fallthrough from above - ($lvl:expr, $($field:tt)*) => ( - $crate::enabled!( - kind: $crate::metadata::Kind::HINT, - target: module_path!(), - $lvl, - { $($field)*} - ) - ); -} - -/// Constructs an event at the trace level. -/// -/// This functions similarly to the [`event!`] macro. See [the top-level -/// documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [`event!`]: crate::event! -/// [lib]: crate#using-the-macros -/// -/// # Examples -/// -/// ```rust -/// use tracing::trace; -/// # #[derive(Debug, Copy, Clone)] struct Position { x: f32, y: f32 } -/// # impl Position { -/// # const ORIGIN: Self = Self { x: 0.0, y: 0.0 }; -/// # fn dist(&self, other: Position) -> f32 { -/// # let x = (other.x - self.x).exp2(); let y = (self.y - other.y).exp2(); -/// # (x + y).sqrt() -/// # } -/// # } -/// # fn main() { -/// let pos = Position { x: 3.234, y: -1.223 }; -/// let origin_dist = pos.dist(Position::ORIGIN); -/// -/// trace!(position = ?pos, ?origin_dist); -/// trace!( -/// target: "app_events", -/// position = ?pos, -/// "x is {} and y is {}", -/// if pos.x >= 0.0 { "positive" } else { "negative" }, -/// if pos.y >= 0.0 { "positive" } else { "negative" } -/// ); -/// trace!(name: "completed", position = ?pos); -/// # } -/// ``` -#[macro_export] -macro_rules! trace { - // Name / target / parent. - (name: $name:expr, target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::TRACE, { $($field)* }, $($arg)*) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::TRACE, {}, $($arg)+) - ); - - // Name / target. - (name: $name:expr, target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::TRACE, { $($field)* }, $($arg)*) - ); - (name: $name:expr, target: $target:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::TRACE, {}, $($arg)+) - ); - - // Target / parent. - (target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::TRACE, { $($field)* }, $($arg)*) - ); - (target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::TRACE, {}, $($arg)+) - ); - - // Name / parent. - (name: $name:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::TRACE, { $($field)* }, $($arg)*) - ); - (name: $name:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::TRACE, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::TRACE, {}, $($arg)+) - ); - - // Name. - (name: $name:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::TRACE, { $($field)* }, $($arg)*) - ); - (name: $name:expr, $($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::TRACE, { $($k).+ $($field)* }) - ); - (name: $name:expr, ?$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::TRACE, { ?$($k).+ $($field)* }) - ); - (name: $name:expr, %$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::TRACE, { %$($k).+ $($field)* }) - ); - (name: $name:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, $crate::Level::TRACE, {}, $($arg)+) - ); - - // Target. - (target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::TRACE, { $($field)* }, $($arg)*) - ); - (target: $target:expr, $($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::TRACE, { $($k).+ $($field)* }) - ); - (target: $target:expr, ?$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::TRACE, { ?$($k).+ $($field)* }) - ); - (target: $target:expr, %$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::TRACE, { %$($k).+ $($field)* }) - ); - (target: $target:expr, $($arg:tt)+ ) => ( - $crate::event!(target: $target, $crate::Level::TRACE, {}, $($arg)+) - ); - - // Parent. - (parent: $parent:expr, { $($field:tt)+ }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::TRACE, - { $($field)+ }, - $($arg)+ - ) - ); - (parent: $parent:expr, $($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::TRACE, - { $($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, ?$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::TRACE, - { ?$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, %$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::TRACE, - { %$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, $($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::TRACE, - { $($k).+, $($field)*} - ) - ); - (parent: $parent:expr, ?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::TRACE, - { ?$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, %$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::TRACE, - { %$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, $($arg:tt)+) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::TRACE, - {}, - $($arg)+ - ) - ); - - // ... - ({ $($field:tt)+ }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - $crate::Level::TRACE, - { $($field)+ }, - $($arg)+ - ) - ); - ($($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::TRACE, - { $($k).+ = $($field)*} - ) - ); - (?$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::TRACE, - { ?$($k).+ = $($field)*} - ) - ); - (%$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::TRACE, - { %$($k).+ = $($field)*} - ) - ); - ($($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::TRACE, - { $($k).+, $($field)*} - ) - ); - (?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::TRACE, - { ?$($k).+, $($field)*} - ) - ); - (%$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::TRACE, - { %$($k).+, $($field)*} - ) - ); - (?$($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::TRACE, - { ?$($k).+ } - ) - ); - (%$($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::TRACE, - { %$($k).+ } - ) - ); - ($($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::TRACE, - { $($k).+ } - ) - ); - ($($arg:tt)+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::TRACE, - {}, - $($arg)+ - ) - ); -} - -/// Constructs an event at the debug level. -/// -/// This functions similarly to the [`event!`] macro. See [the top-level -/// documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [`event!`]: crate::event! -/// [lib]: crate#using-the-macros -/// -/// # Examples -/// -/// ```rust -/// use tracing::debug; -/// # fn main() { -/// # #[derive(Debug)] struct Position { x: f32, y: f32 } -/// -/// let pos = Position { x: 3.234, y: -1.223 }; -/// -/// debug!(?pos.x, ?pos.y); -/// debug!(target: "app_events", position = ?pos, "New position"); -/// debug!(name: "completed", position = ?pos); -/// # } -/// ``` -#[macro_export] -macro_rules! debug { - // Name / target / parent. - (name: $name:expr, target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::DEBUG, { $($field)* }, $($arg)*) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::DEBUG, {}, $($arg)+) - ); - - // Name / target. - (name: $name:expr, target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::DEBUG, { $($field)* }, $($arg)*) - ); - (name: $name:expr, target: $target:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::DEBUG, {}, $($arg)+) - ); - - // Target / parent. - (target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::DEBUG, { $($field)* }, $($arg)*) - ); - (target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::DEBUG, {}, $($arg)+) - ); - - // Name / parent. - (name: $name:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::DEBUG, { $($field)* }, $($arg)*) - ); - (name: $name:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::DEBUG, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::DEBUG, {}, $($arg)+) - ); - - // Name. - (name: $name:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::DEBUG, { $($field)* }, $($arg)*) - ); - (name: $name:expr, $($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::DEBUG, { $($k).+ $($field)* }) - ); - (name: $name:expr, ?$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::DEBUG, { ?$($k).+ $($field)* }) - ); - (name: $name:expr, %$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::DEBUG, { %$($k).+ $($field)* }) - ); - (name: $name:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, $crate::Level::DEBUG, {}, $($arg)+) - ); - - // Target. - (target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::DEBUG, { $($field)* }, $($arg)*) - ); - (target: $target:expr, $($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::DEBUG, { $($k).+ $($field)* }) - ); - (target: $target:expr, ?$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::DEBUG, { ?$($k).+ $($field)* }) - ); - (target: $target:expr, %$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::DEBUG, { %$($k).+ $($field)* }) - ); - (target: $target:expr, $($arg:tt)+ ) => ( - $crate::event!(target: $target, $crate::Level::DEBUG, {}, $($arg)+) - ); - - // Parent. - (parent: $parent:expr, { $($field:tt)+ }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::DEBUG, - { $($field)+ }, - $($arg)+ - ) - ); - (parent: $parent:expr, $($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::DEBUG, - { $($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, ?$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::DEBUG, - { ?$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, %$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::DEBUG, - { %$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, $($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::DEBUG, - { $($k).+, $($field)*} - ) - ); - (parent: $parent:expr, ?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::DEBUG, - { ?$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, %$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::DEBUG, - { %$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, $($arg:tt)+) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::DEBUG, - {}, - $($arg)+ - ) - ); - - // ... - ({ $($field:tt)+ }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - $crate::Level::DEBUG, - { $($field)+ }, - $($arg)+ - ) - ); - ($($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::DEBUG, - { $($k).+ = $($field)*} - ) - ); - (?$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::DEBUG, - { ?$($k).+ = $($field)*} - ) - ); - (%$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::DEBUG, - { %$($k).+ = $($field)*} - ) - ); - ($($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::DEBUG, - { $($k).+, $($field)*} - ) - ); - (?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::DEBUG, - { ?$($k).+, $($field)*} - ) - ); - (%$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::DEBUG, - { %$($k).+, $($field)*} - ) - ); - (?$($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::DEBUG, - { ?$($k).+ } - ) - ); - (%$($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::DEBUG, - { %$($k).+ } - ) - ); - ($($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::DEBUG, - { $($k).+ } - ) - ); - ($($arg:tt)+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::DEBUG, - {}, - $($arg)+ - ) - ); -} - -/// Constructs an event at the info level. -/// -/// This functions similarly to the [`event!`] macro. See [the top-level -/// documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [`event!`]: crate::event! -/// [lib]: crate#using-the-macros -/// -/// # Examples -/// -/// ```rust -/// use tracing::info; -/// # // this is so the test will still work in no-std mode -/// # #[derive(Debug)] -/// # pub struct Ipv4Addr; -/// # impl Ipv4Addr { fn new(o1: u8, o2: u8, o3: u8, o4: u8) -> Self { Self } } -/// # fn main() { -/// # struct Connection { port: u32, speed: f32 } -/// use tracing::field; -/// -/// let addr = Ipv4Addr::new(127, 0, 0, 1); -/// let conn = Connection { port: 40, speed: 3.20 }; -/// -/// info!(conn.port, "connected to {:?}", addr); -/// info!( -/// target: "connection_events", -/// ip = ?addr, -/// conn.port, -/// ?conn.speed, -/// ); -/// info!(name: "completed", "completed connection to {:?}", addr); -/// # } -/// ``` -#[macro_export] -macro_rules! info { - // Name / target / parent. - (name: $name:expr, target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::INFO, { $($field)* }, $($arg)*) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::INFO, {}, $($arg)+) - ); - - // Name / target. - (name: $name:expr, target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::INFO, { $($field)* }, $($arg)*) - ); - (name: $name:expr, target: $target:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::INFO, {}, $($arg)+) - ); - - // Target / parent. - (target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::INFO, { $($field)* }, $($arg)*) - ); - (target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::INFO, {}, $($arg)+) - ); - - // Name / parent. - (name: $name:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::INFO, { $($field)* }, $($arg)*) - ); - (name: $name:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::INFO, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::INFO, {}, $($arg)+) - ); - - // Name. - (name: $name:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::INFO, { $($field)* }, $($arg)*) - ); - (name: $name:expr, $($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::INFO, { $($k).+ $($field)* }) - ); - (name: $name:expr, ?$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::INFO, { ?$($k).+ $($field)* }) - ); - (name: $name:expr, %$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::INFO, { %$($k).+ $($field)* }) - ); - (name: $name:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, $crate::Level::INFO, {}, $($arg)+) - ); - - // Target. - (target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::INFO, { $($field)* }, $($arg)*) - ); - (target: $target:expr, $($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::INFO, { $($k).+ $($field)* }) - ); - (target: $target:expr, ?$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::INFO, { ?$($k).+ $($field)* }) - ); - (target: $target:expr, %$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::INFO, { %$($k).+ $($field)* }) - ); - (target: $target:expr, $($arg:tt)+ ) => ( - $crate::event!(target: $target, $crate::Level::INFO, {}, $($arg)+) - ); - - // Parent. - (parent: $parent:expr, { $($field:tt)+ }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::INFO, - { $($field)+ }, - $($arg)+ - ) - ); - (parent: $parent:expr, $($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::INFO, - { $($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, ?$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::INFO, - { ?$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, %$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::INFO, - { %$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, $($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::INFO, - { $($k).+, $($field)*} - ) - ); - (parent: $parent:expr, ?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::INFO, - { ?$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, %$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::INFO, - { %$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, $($arg:tt)+) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::INFO, - {}, - $($arg)+ - ) - ); - - // ... - ({ $($field:tt)+ }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - $crate::Level::INFO, - { $($field)+ }, - $($arg)+ - ) - ); - ($($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::INFO, - { $($k).+ = $($field)*} - ) - ); - (?$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::INFO, - { ?$($k).+ = $($field)*} - ) - ); - (%$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::INFO, - { %$($k).+ = $($field)*} - ) - ); - ($($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::INFO, - { $($k).+, $($field)*} - ) - ); - (?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::INFO, - { ?$($k).+, $($field)*} - ) - ); - (%$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::INFO, - { %$($k).+, $($field)*} - ) - ); - (?$($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::INFO, - { ?$($k).+ } - ) - ); - (%$($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::INFO, - { %$($k).+ } - ) - ); - ($($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::INFO, - { $($k).+ } - ) - ); - ($($arg:tt)+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::INFO, - {}, - $($arg)+ - ) - ); -} - -/// Constructs an event at the warn level. -/// -/// This functions similarly to the [`event!`] macro. See [the top-level -/// documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [`event!`]: crate::event! -/// [lib]: crate#using-the-macros -/// -/// # Examples -/// -/// ```rust -/// use tracing::warn; -/// # fn main() { -/// -/// let warn_description = "Invalid Input"; -/// let input = &[0x27, 0x45]; -/// -/// warn!(?input, warning = warn_description); -/// warn!( -/// target: "input_events", -/// warning = warn_description, -/// "Received warning for input: {:?}", input, -/// ); -/// warn!(name: "invalid", ?input); -/// # } -/// ``` -#[macro_export] -macro_rules! warn { - // Name / target / parent. - (name: $name:expr, target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::WARN, { $($field)* }, $($arg)*) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::WARN, {}, $($arg)+) - ); - - // Name / target. - (name: $name:expr, target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::WARN, { $($field)* }, $($arg)*) - ); - (name: $name:expr, target: $target:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::WARN, {}, $($arg)+) - ); - - // Target / parent. - (target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::WARN, { $($field)* }, $($arg)*) - ); - (target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::WARN, {}, $($arg)+) - ); - - // Name / parent. - (name: $name:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::WARN, { $($field)* }, $($arg)*) - ); - (name: $name:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::WARN, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::WARN, {}, $($arg)+) - ); - - // Name. - (name: $name:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::WARN, { $($field)* }, $($arg)*) - ); - (name: $name:expr, $($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::WARN, { $($k).+ $($field)* }) - ); - (name: $name:expr, ?$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::WARN, { ?$($k).+ $($field)* }) - ); - (name: $name:expr, %$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::WARN, { %$($k).+ $($field)* }) - ); - (name: $name:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, $crate::Level::WARN, {}, $($arg)+) - ); - - // Target. - (target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::WARN, { $($field)* }, $($arg)*) - ); - (target: $target:expr, $($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::WARN, { $($k).+ $($field)* }) - ); - (target: $target:expr, ?$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::WARN, { ?$($k).+ $($field)* }) - ); - (target: $target:expr, %$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::WARN, { %$($k).+ $($field)* }) - ); - (target: $target:expr, $($arg:tt)+ ) => ( - $crate::event!(target: $target, $crate::Level::WARN, {}, $($arg)+) - ); - - // Parent. - (parent: $parent:expr, { $($field:tt)+ }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::WARN, - { $($field)+ }, - $($arg)+ - ) - ); - (parent: $parent:expr, $($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::WARN, - { $($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, ?$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::WARN, - { ?$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, %$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::WARN, - { %$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, $($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::WARN, - { $($k).+, $($field)*} - ) - ); - (parent: $parent:expr, ?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::WARN, - { ?$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, %$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::WARN, - { %$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, $($arg:tt)+) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::WARN, - {}, - $($arg)+ - ) - ); - - // ... - ({ $($field:tt)+ }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - $crate::Level::WARN, - { $($field)+ }, - $($arg)+ - ) - ); - ($($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::WARN, - { $($k).+ = $($field)*} - ) - ); - (?$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::WARN, - { ?$($k).+ = $($field)*} - ) - ); - (%$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::WARN, - { %$($k).+ = $($field)*} - ) - ); - ($($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::WARN, - { $($k).+, $($field)*} - ) - ); - (?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::WARN, - { ?$($k).+, $($field)*} - ) - ); - (%$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::WARN, - { %$($k).+, $($field)*} - ) - ); - (?$($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::WARN, - { ?$($k).+ } - ) - ); - (%$($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::WARN, - { %$($k).+ } - ) - ); - ($($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::WARN, - { $($k).+ } - ) - ); - ($($arg:tt)+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::WARN, - {}, - $($arg)+ - ) - ); -} - -/// Constructs an event at the error level. -/// -/// This functions similarly to the [`event!`] macro. See [the top-level -/// documentation][lib] for details on the syntax accepted by -/// this macro. -/// -/// [`event!`]: crate::event! -/// [lib]: crate#using-the-macros -/// -/// # Examples -/// -/// ```rust -/// use tracing::error; -/// # fn main() { -/// -/// let (err_info, port) = ("No connection", 22); -/// -/// error!(port, error = %err_info); -/// error!(target: "app_events", "App Error: {}", err_info); -/// error!({ info = err_info }, "error on port: {}", port); -/// error!(name: "invalid_input", "Invalid input: {}", err_info); -/// # } -/// ``` -#[macro_export] -macro_rules! error { - // Name / target / parent. - (name: $name:expr, target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::ERROR, { $($field)* }, $($arg)*) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, target: $target, parent: $parent, $crate::Level::ERROR, {}, $($arg)+) - ); - - // Name / target. - (name: $name:expr, target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::ERROR, { $($field)* }, $($arg)*) - ); - (name: $name:expr, target: $target:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (name: $name:expr, target: $target:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, target: $target, $crate::Level::ERROR, {}, $($arg)+) - ); - - // Target / parent. - (target: $target:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::ERROR, { $($field)* }, $($arg)*) - ); - (target: $target:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (target: $target:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(target: $target, parent: $parent, $crate::Level::ERROR, {}, $($arg)+) - ); - - // Name / parent. - (name: $name:expr, parent: $parent:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::ERROR, { $($field)* }, $($arg)*) - ); - (name: $name:expr, parent: $parent:expr, $($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, ?$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, %$($k:ident).+ $($field:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::ERROR, { $($k).+ $($field)+ }) - ); - (name: $name:expr, parent: $parent:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, parent: $parent, $crate::Level::ERROR, {}, $($arg)+) - ); - - // Name. - (name: $name:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::ERROR, { $($field)* }, $($arg)*) - ); - (name: $name:expr, $($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::ERROR, { $($k).+ $($field)* }) - ); - (name: $name:expr, ?$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::ERROR, { ?$($k).+ $($field)* }) - ); - (name: $name:expr, %$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(name: $name, $crate::Level::ERROR, { %$($k).+ $($field)* }) - ); - (name: $name:expr, $($arg:tt)+ ) => ( - $crate::event!(name: $name, $crate::Level::ERROR, {}, $($arg)+) - ); - - // Target. - (target: $target:expr, { $($field:tt)* }, $($arg:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::ERROR, { $($field)* }, $($arg)*) - ); - (target: $target:expr, $($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::ERROR, { $($k).+ $($field)* }) - ); - (target: $target:expr, ?$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::ERROR, { ?$($k).+ $($field)* }) - ); - (target: $target:expr, %$($k:ident).+ $($field:tt)* ) => ( - $crate::event!(target: $target, $crate::Level::ERROR, { %$($k).+ $($field)* }) - ); - (target: $target:expr, $($arg:tt)+ ) => ( - $crate::event!(target: $target, $crate::Level::ERROR, {}, $($arg)+) - ); - - // Parent. - (parent: $parent:expr, { $($field:tt)+ }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::ERROR, - { $($field)+ }, - $($arg)+ - ) - ); - (parent: $parent:expr, $($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::ERROR, - { $($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, ?$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::ERROR, - { ?$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, %$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::ERROR, - { %$($k).+ = $($field)*} - ) - ); - (parent: $parent:expr, $($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::ERROR, - { $($k).+, $($field)*} - ) - ); - (parent: $parent:expr, ?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::ERROR, - { ?$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, %$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::ERROR, - { %$($k).+, $($field)*} - ) - ); - (parent: $parent:expr, $($arg:tt)+) => ( - $crate::event!( - target: module_path!(), - parent: $parent, - $crate::Level::ERROR, - {}, - $($arg)+ - ) - ); - - // ... - ({ $($field:tt)+ }, $($arg:tt)+ ) => ( - $crate::event!( - target: module_path!(), - $crate::Level::ERROR, - { $($field)+ }, - $($arg)+ - ) - ); - ($($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::ERROR, - { $($k).+ = $($field)*} - ) - ); - (?$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::ERROR, - { ?$($k).+ = $($field)*} - ) - ); - (%$($k:ident).+ = $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::ERROR, - { %$($k).+ = $($field)*} - ) - ); - ($($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::ERROR, - { $($k).+, $($field)*} - ) - ); - (?$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::ERROR, - { ?$($k).+, $($field)*} - ) - ); - (%$($k:ident).+, $($field:tt)*) => ( - $crate::event!( - target: module_path!(), - $crate::Level::ERROR, - { %$($k).+, $($field)*} - ) - ); - (?$($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::ERROR, - { ?$($k).+ } - ) - ); - (%$($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::ERROR, - { %$($k).+ } - ) - ); - ($($k:ident).+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::ERROR, - { $($k).+ } - ) - ); - ($($arg:tt)+) => ( - $crate::event!( - target: module_path!(), - $crate::Level::ERROR, - {}, - $($arg)+ - ) - ); -} - -/// Constructs a new static callsite for a span or event. -#[doc(hidden)] -#[macro_export] -macro_rules! callsite { - (name: $name:expr, kind: $kind:expr, fields: $($fields:tt)*) => {{ - $crate::callsite! { - name: $name, - kind: $kind, - target: module_path!(), - level: $crate::Level::TRACE, - fields: $($fields)* - } - }}; - ( - name: $name:expr, - kind: $kind:expr, - level: $lvl:expr, - fields: $($fields:tt)* - ) => {{ - $crate::callsite! { - name: $name, - kind: $kind, - target: module_path!(), - level: $lvl, - fields: $($fields)* - } - }}; - ( - name: $name:expr, - kind: $kind:expr, - target: $target:expr, - level: $lvl:expr, - fields: $($fields:tt)* - ) => {{ - static META: $crate::Metadata<'static> = { - $crate::metadata! { - name: $name, - target: $target, - level: $lvl, - fields: $crate::fieldset!( $($fields)* ), - callsite: &__CALLSITE, - kind: $kind, - } - }; - static __CALLSITE: $crate::callsite::DefaultCallsite = $crate::callsite::DefaultCallsite::new(&META); - __CALLSITE.register(); - &__CALLSITE - }}; -} - -/// Constructs a new static callsite for a span or event. -#[doc(hidden)] -#[macro_export] -macro_rules! callsite2 { - (name: $name:expr, kind: $kind:expr, fields: $($fields:tt)*) => {{ - $crate::callsite2! { - name: $name, - kind: $kind, - target: module_path!(), - level: $crate::Level::TRACE, - fields: $($fields)* - } - }}; - ( - name: $name:expr, - kind: $kind:expr, - level: $lvl:expr, - fields: $($fields:tt)* - ) => {{ - $crate::callsite2! { - name: $name, - kind: $kind, - target: module_path!(), - level: $lvl, - fields: $($fields)* - } - }}; - ( - name: $name:expr, - kind: $kind:expr, - target: $target:expr, - level: $lvl:expr, - fields: $($fields:tt)* - ) => {{ - static META: $crate::Metadata<'static> = { - $crate::metadata! { - name: $name, - target: $target, - level: $lvl, - fields: $crate::fieldset!( $($fields)* ), - callsite: &__CALLSITE, - kind: $kind, - } - }; - $crate::callsite::DefaultCallsite::new(&META) - }}; -} - -#[macro_export] -// TODO: determine if this ought to be public API?` -#[doc(hidden)] -macro_rules! level_enabled { - ($lvl:expr) => { - $lvl <= $crate::level_filters::STATIC_MAX_LEVEL - && $lvl <= $crate::level_filters::LevelFilter::current() - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! valueset { - - // === base case === - (@ { $(,)* $($val:expr),* $(,)* }, $next:expr $(,)*) => { - &[ $($val),* ] - }; - - // === recursive case (more tts) === - - // TODO(#1138): determine a new syntax for uninitialized span fields, and - // re-enable this. - // (@{ $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = _, $($rest:tt)*) => { - // $crate::valueset!(@ { $($out),*, (&$next, None) }, $next, $($rest)*) - // }; - (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = ?$val:expr, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&debug(&$val) as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = %$val:expr, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&display(&$val) as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = $val:expr, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&$val as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&$($k).+ as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, ?$($k:ident).+, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&debug(&$($k).+) as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, %$($k:ident).+, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&display(&$($k).+) as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = ?$val:expr) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&debug(&$val) as &dyn Value)) }, - $next, - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = %$val:expr) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&display(&$val) as &dyn Value)) }, - $next, - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+ = $val:expr) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&$val as &dyn Value)) }, - $next, - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $($k:ident).+) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&$($k).+ as &dyn Value)) }, - $next, - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, ?$($k:ident).+) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&debug(&$($k).+) as &dyn Value)) }, - $next, - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, %$($k:ident).+) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&display(&$($k).+) as &dyn Value)) }, - $next, - ) - }; - - // Handle literal names - (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = ?$val:expr, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&debug(&$val) as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = %$val:expr, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&display(&$val) as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = $val:expr, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&$val as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = ?$val:expr) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&debug(&$val) as &dyn Value)) }, - $next, - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = %$val:expr) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&display(&$val) as &dyn Value)) }, - $next, - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, $k:literal = $val:expr) => { - $crate::valueset!( - @ { $($out),*, (&$next, ::core::option::Option::Some(&$val as &dyn Value)) }, - $next, - ) - }; - - // Handle constant names - (@ { $(,)* $($out:expr),* }, $next:expr, { $k:expr } = ?$val:expr, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, Some(&debug(&$val) as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, { $k:expr } = %$val:expr, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, Some(&display(&$val) as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, { $k:expr } = $val:expr, $($rest:tt)*) => { - $crate::valueset!( - @ { $($out),*, (&$next, Some(&$val as &dyn Value)) }, - $next, - $($rest)* - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, { $k:expr } = ?$val:expr) => { - $crate::valueset!( - @ { $($out),*, (&$next, Some(&debug(&$val) as &dyn Value)) }, - $next, - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, { $k:expr } = %$val:expr) => { - $crate::valueset!( - @ { $($out),*, (&$next, Some(&display(&$val) as &dyn Value)) }, - $next, - ) - }; - (@ { $(,)* $($out:expr),* }, $next:expr, { $k:expr } = $val:expr) => { - $crate::valueset!( - @ { $($out),*, (&$next, Some(&$val as &dyn Value)) }, - $next, - ) - }; - - // Remainder is unparsable, but exists --- must be format args! - (@ { $(,)* $($out:expr),* }, $next:expr, $($rest:tt)+) => { - $crate::valueset!(@ { (&$next, ::core::option::Option::Some(&::core::format_args!($($rest)+) as &dyn Value)), $($out),* }, $next, ) - }; - - // === entry === - ($fields:expr, $($kvs:tt)+) => { - { - #[allow(unused_imports)] - use $crate::field::{debug, display, Value}; - let mut iter = $fields.iter(); - $fields.value_set($crate::valueset!( - @ { }, - ::core::iter::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"), - $($kvs)+ - )) - } - }; - ($fields:expr,) => { - { - $fields.value_set(&[]) - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! fieldset { - // == base case == - (@ { $(,)* $($out:expr),* $(,)* } $(,)*) => { - &[ $($out),* ] - }; - - // == recursive cases (more tts) == - (@ { $(,)* $($out:expr),* } $($k:ident).+ = ?$val:expr, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) - }; - (@ { $(,)* $($out:expr),* } $($k:ident).+ = %$val:expr, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) - }; - (@ { $(,)* $($out:expr),* } $($k:ident).+ = $val:expr, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) - }; - // TODO(#1138): determine a new syntax for uninitialized span fields, and - // re-enable this. - // (@ { $($out:expr),* } $($k:ident).+ = _, $($rest:tt)*) => { - // $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) - // }; - (@ { $(,)* $($out:expr),* } ?$($k:ident).+, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) - }; - (@ { $(,)* $($out:expr),* } %$($k:ident).+, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) - }; - (@ { $(,)* $($out:expr),* } $($k:ident).+, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $crate::__tracing_stringify!($($k).+) } $($rest)*) - }; - - // Handle literal names - (@ { $(,)* $($out:expr),* } $k:literal = ?$val:expr, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $k } $($rest)*) - }; - (@ { $(,)* $($out:expr),* } $k:literal = %$val:expr, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $k } $($rest)*) - }; - (@ { $(,)* $($out:expr),* } $k:literal = $val:expr, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $k } $($rest)*) - }; - - // Handle constant names - (@ { $(,)* $($out:expr),* } { $k:expr } = ?$val:expr, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $k } $($rest)*) - }; - (@ { $(,)* $($out:expr),* } { $k:expr } = %$val:expr, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $k } $($rest)*) - }; - (@ { $(,)* $($out:expr),* } { $k:expr } = $val:expr, $($rest:tt)*) => { - $crate::fieldset!(@ { $($out),*, $k } $($rest)*) - }; - - // Remainder is unparseable, but exists --- must be format args! - (@ { $(,)* $($out:expr),* } $($rest:tt)+) => { - $crate::fieldset!(@ { "message", $($out),*, }) - }; - - // == entry == - ($($args:tt)*) => { - $crate::fieldset!(@ { } $($args)*,) - }; - -} - -#[cfg(feature = "log")] -#[doc(hidden)] -#[macro_export] -macro_rules! level_to_log { - ($level:expr) => { - match $level { - $crate::Level::ERROR => $crate::log::Level::Error, - $crate::Level::WARN => $crate::log::Level::Warn, - $crate::Level::INFO => $crate::log::Level::Info, - $crate::Level::DEBUG => $crate::log::Level::Debug, - _ => $crate::log::Level::Trace, - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __tracing_stringify { - ($s:expr) => { - stringify!($s) - }; -} - -#[cfg(not(feature = "log"))] -#[doc(hidden)] -#[macro_export] -macro_rules! __tracing_log { - ($level:expr, $callsite:expr, $value_set:expr) => {}; -} - -#[cfg(feature = "log")] -#[doc(hidden)] -#[macro_export] -macro_rules! __tracing_log { - ($level:expr, $callsite:expr, $value_set:expr) => { - $crate::if_log_enabled! { $level, { - use $crate::log; - let level = $crate::level_to_log!($level); - if level <= log::max_level() { - let meta = $callsite.metadata(); - let log_meta = log::Metadata::builder() - .level(level) - .target(meta.target()) - .build(); - let logger = log::logger(); - if logger.enabled(&log_meta) { - $crate::__macro_support::__tracing_log(meta, logger, log_meta, $value_set) - } - } - }} - }; -} - -#[cfg(not(feature = "log"))] -#[doc(hidden)] -#[macro_export] -macro_rules! if_log_enabled { - ($lvl:expr, $e:expr;) => { - $crate::if_log_enabled! { $lvl, $e } - }; - ($lvl:expr, $if_log:block) => { - $crate::if_log_enabled! { $lvl, $if_log else {} } - }; - ($lvl:expr, $if_log:block else $else_block:block) => { - $else_block - }; -} - -#[cfg(all(feature = "log", not(feature = "log-always")))] -#[doc(hidden)] -#[macro_export] -macro_rules! if_log_enabled { - ($lvl:expr, $e:expr;) => { - $crate::if_log_enabled! { $lvl, $e } - }; - ($lvl:expr, $if_log:block) => { - $crate::if_log_enabled! { $lvl, $if_log else {} } - }; - ($lvl:expr, $if_log:block else $else_block:block) => { - if $crate::level_to_log!($lvl) <= $crate::log::STATIC_MAX_LEVEL { - if !$crate::dispatcher::has_been_set() { - $if_log - } else { - $else_block - } - } else { - $else_block - } - }; -} - -#[cfg(all(feature = "log", feature = "log-always"))] -#[doc(hidden)] -#[macro_export] -macro_rules! if_log_enabled { - ($lvl:expr, $e:expr;) => { - $crate::if_log_enabled! { $lvl, $e } - }; - ($lvl:expr, $if_log:block) => { - $crate::if_log_enabled! { $lvl, $if_log else {} } - }; - ($lvl:expr, $if_log:block else $else_block:block) => { - if $crate::level_to_log!($lvl) <= $crate::log::STATIC_MAX_LEVEL { - #[allow(unused_braces)] - $if_log - } else { - $else_block - } - }; -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing/src/span.rs s390-tools-2.33.1/rust-vendor/tracing/src/span.rs --- s390-tools-2.31.0/rust-vendor/tracing/src/span.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/src/span.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1623 +0,0 @@ -//! Spans represent periods of time in which a program was executing in a -//! particular context. -//! -//! A span consists of [fields], user-defined key-value pairs of arbitrary data -//! that describe the context the span represents, and a set of fixed attributes -//! that describe all `tracing` spans and events. Attributes describing spans -//! include: -//! -//! - An [`Id`] assigned by the subscriber that uniquely identifies it in relation -//! to other spans. -//! - The span's [parent] in the trace tree. -//! - [Metadata] that describes static characteristics of all spans -//! originating from that callsite, such as its name, source code location, -//! [verbosity level], and the names of its fields. -//! -//! # Creating Spans -//! -//! Spans are created using the [`span!`] macro. This macro is invoked with the -//! following arguments, in order: -//! -//! - The [`target`] and/or [`parent`][parent] attributes, if the user wishes to -//! override their default values. -//! - The span's [verbosity level] -//! - A string literal providing the span's name. -//! - Finally, zero or more arbitrary key/value fields. -//! -//! [`target`]: super::Metadata::target -//! -//! For example: -//! ```rust -//! use tracing::{span, Level}; -//! -//! /// Construct a new span at the `INFO` level named "my_span", with a single -//! /// field named answer , with the value `42`. -//! let my_span = span!(Level::INFO, "my_span", answer = 42); -//! ``` -//! -//! The documentation for the [`span!`] macro provides additional examples of -//! the various options that exist when creating spans. -//! -//! The [`trace_span!`], [`debug_span!`], [`info_span!`], [`warn_span!`], and -//! [`error_span!`] exist as shorthand for constructing spans at various -//! verbosity levels. -//! -//! ## Recording Span Creation -//! -//! The [`Attributes`] type contains data associated with a span, and is -//! provided to the [`Subscriber`] when a new span is created. It contains -//! the span's metadata, the ID of [the span's parent][parent] if one was -//! explicitly set, and any fields whose values were recorded when the span was -//! constructed. The subscriber, which is responsible for recording `tracing` -//! data, can then store or record these values. -//! -//! # The Span Lifecycle -//! -//! ## Entering a Span -//! -//! A thread of execution is said to _enter_ a span when it begins executing, -//! and _exit_ the span when it switches to another context. Spans may be -//! entered through the [`enter`], [`entered`], and [`in_scope`] methods. -//! -//! The [`enter`] method enters a span, returning a [guard] that exits the span -//! when dropped -//! ``` -//! # use tracing::{span, Level}; -//! let my_var: u64 = 5; -//! let my_span = span!(Level::TRACE, "my_span", my_var); -//! -//! // `my_span` exists but has not been entered. -//! -//! // Enter `my_span`... -//! let _enter = my_span.enter(); -//! -//! // Perform some work inside of the context of `my_span`... -//! // Dropping the `_enter` guard will exit the span. -//!``` -//! -//!
-//!     Warning: In asynchronous code that uses async/await syntax,
-//!     Span::enter may produce incorrect traces if the returned drop
-//!     guard is held across an await point. See
-//!     the method documentation
-//!     for details.
-//! 
-//! -//! The [`entered`] method is analogous to [`enter`], but moves the span into -//! the returned guard, rather than borrowing it. This allows creating and -//! entering a span in a single expression: -//! -//! ``` -//! # use tracing::{span, Level}; -//! // Create a span and enter it, returning a guard: -//! let span = span!(Level::INFO, "my_span").entered(); -//! -//! // We are now inside the span! Like `enter()`, the guard returned by -//! // `entered()` will exit the span when it is dropped... -//! -//! // ...but, it can also be exited explicitly, returning the `Span` -//! // struct: -//! let span = span.exit(); -//! ``` -//! -//! Finally, [`in_scope`] takes a closure or function pointer and executes it -//! inside the span: -//! -//! ``` -//! # use tracing::{span, Level}; -//! let my_var: u64 = 5; -//! let my_span = span!(Level::TRACE, "my_span", my_var = &my_var); -//! -//! my_span.in_scope(|| { -//! // perform some work in the context of `my_span`... -//! }); -//! -//! // Perform some work outside of the context of `my_span`... -//! -//! my_span.in_scope(|| { -//! // Perform some more work in the context of `my_span`. -//! }); -//! ``` -//! -//!
-//!     Note: Since entering a span takes &self, and
-//!     Spans are Clone, Send, and
-//!     Sync, it is entirely valid for multiple threads to enter the
-//!     same span concurrently.
-//! 
-//! -//! ## Span Relationships -//! -//! Spans form a tree structure — unless it is a root span, all spans have a -//! _parent_, and may have one or more _children_. When a new span is created, -//! the current span becomes the new span's parent. The total execution time of -//! a span consists of the time spent in that span and in the entire subtree -//! represented by its children. Thus, a parent span always lasts for at least -//! as long as the longest-executing span in its subtree. -//! -//! ``` -//! # use tracing::{Level, span}; -//! // this span is considered the "root" of a new trace tree: -//! span!(Level::INFO, "root").in_scope(|| { -//! // since we are now inside "root", this span is considered a child -//! // of "root": -//! span!(Level::DEBUG, "outer_child").in_scope(|| { -//! // this span is a child of "outer_child", which is in turn a -//! // child of "root": -//! span!(Level::TRACE, "inner_child").in_scope(|| { -//! // and so on... -//! }); -//! }); -//! // another span created here would also be a child of "root". -//! }); -//!``` -//! -//! In addition, the parent of a span may be explicitly specified in -//! the `span!` macro. For example: -//! -//! ```rust -//! # use tracing::{Level, span}; -//! // Create, but do not enter, a span called "foo". -//! let foo = span!(Level::INFO, "foo"); -//! -//! // Create and enter a span called "bar". -//! let bar = span!(Level::INFO, "bar"); -//! let _enter = bar.enter(); -//! -//! // Although we have currently entered "bar", "baz"'s parent span -//! // will be "foo". -//! let baz = span!(parent: &foo, Level::INFO, "baz"); -//! ``` -//! -//! A child span should typically be considered _part_ of its parent. For -//! example, if a subscriber is recording the length of time spent in various -//! spans, it should generally include the time spent in a span's children as -//! part of that span's duration. -//! -//! In addition to having zero or one parent, a span may also _follow from_ any -//! number of other spans. This indicates a causal relationship between the span -//! and the spans that it follows from, but a follower is *not* typically -//! considered part of the duration of the span it follows. Unlike the parent, a -//! span may record that it follows from another span after it is created, using -//! the [`follows_from`] method. -//! -//! As an example, consider a listener task in a server. As the listener accepts -//! incoming connections, it spawns new tasks that handle those connections. We -//! might want to have a span representing the listener, and instrument each -//! spawned handler task with its own span. We would want our instrumentation to -//! record that the handler tasks were spawned as a result of the listener task. -//! However, we might not consider the handler tasks to be _part_ of the time -//! spent in the listener task, so we would not consider those spans children of -//! the listener span. Instead, we would record that the handler tasks follow -//! from the listener, recording the causal relationship but treating the spans -//! as separate durations. -//! -//! ## Closing Spans -//! -//! Execution may enter and exit a span multiple times before that span is -//! _closed_. Consider, for example, a future which has an associated -//! span and enters that span every time it is polled: -//! ```rust -//! # use std::future::Future; -//! # use std::task::{Context, Poll}; -//! # use std::pin::Pin; -//! struct MyFuture { -//! // data -//! span: tracing::Span, -//! } -//! -//! impl Future for MyFuture { -//! type Output = (); -//! -//! fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { -//! let _enter = self.span.enter(); -//! // Do actual future work... -//! # Poll::Ready(()) -//! } -//! } -//! ``` -//! -//! If this future was spawned on an executor, it might yield one or more times -//! before `poll` returns [`Poll::Ready`]. If the future were to yield, then -//! the executor would move on to poll the next future, which may _also_ enter -//! an associated span or series of spans. Therefore, it is valid for a span to -//! be entered repeatedly before it completes. Only the time when that span or -//! one of its children was the current span is considered to be time spent in -//! that span. A span which is not executing and has not yet been closed is said -//! to be _idle_. -//! -//! Because spans may be entered and exited multiple times before they close, -//! [`Subscriber`]s have separate trait methods which are called to notify them -//! of span exits and when span handles are dropped. When execution exits a -//! span, [`exit`] will always be called with that span's ID to notify the -//! subscriber that the span has been exited. When span handles are dropped, the -//! [`drop_span`] method is called with that span's ID. The subscriber may use -//! this to determine whether or not the span will be entered again. -//! -//! If there is only a single handle with the capacity to exit a span, dropping -//! that handle "closes" the span, since the capacity to enter it no longer -//! exists. For example: -//! ``` -//! # use tracing::{Level, span}; -//! { -//! span!(Level::TRACE, "my_span").in_scope(|| { -//! // perform some work in the context of `my_span`... -//! }); // --> Subscriber::exit(my_span) -//! -//! // The handle to `my_span` only lives inside of this block; when it is -//! // dropped, the subscriber will be informed via `drop_span`. -//! -//! } // --> Subscriber::drop_span(my_span) -//! ``` -//! -//! However, if multiple handles exist, the span can still be re-entered even if -//! one or more is dropped. For determining when _all_ handles to a span have -//! been dropped, `Subscriber`s have a [`clone_span`] method, which is called -//! every time a span handle is cloned. Combined with `drop_span`, this may be -//! used to track the number of handles to a given span — if `drop_span` has -//! been called one more time than the number of calls to `clone_span` for a -//! given ID, then no more handles to the span with that ID exist. The -//! subscriber may then treat it as closed. -//! -//! # When to use spans -//! -//! As a rule of thumb, spans should be used to represent discrete units of work -//! (e.g., a given request's lifetime in a server) or periods of time spent in a -//! given context (e.g., time spent interacting with an instance of an external -//! system, such as a database). -//! -//! Which scopes in a program correspond to new spans depend somewhat on user -//! intent. For example, consider the case of a loop in a program. Should we -//! construct one span and perform the entire loop inside of that span, like: -//! -//! ```rust -//! # use tracing::{Level, span}; -//! # let n = 1; -//! let span = span!(Level::TRACE, "my_loop"); -//! let _enter = span.enter(); -//! for i in 0..n { -//! # let _ = i; -//! // ... -//! } -//! ``` -//! Or, should we create a new span for each iteration of the loop, as in: -//! ```rust -//! # use tracing::{Level, span}; -//! # let n = 1u64; -//! for i in 0..n { -//! let span = span!(Level::TRACE, "my_loop", iteration = i); -//! let _enter = span.enter(); -//! // ... -//! } -//! ``` -//! -//! Depending on the circumstances, we might want to do either, or both. For -//! example, if we want to know how long was spent in the loop overall, we would -//! create a single span around the entire loop; whereas if we wanted to know how -//! much time was spent in each individual iteration, we would enter a new span -//! on every iteration. -//! -//! [fields]: super::field -//! [Metadata]: super::Metadata -//! [verbosity level]: super::Level -//! [`Poll::Ready`]: std::task::Poll::Ready -//! [`span!`]: super::span! -//! [`trace_span!`]: super::trace_span! -//! [`debug_span!`]: super::debug_span! -//! [`info_span!`]: super::info_span! -//! [`warn_span!`]: super::warn_span! -//! [`error_span!`]: super::error_span! -//! [`clone_span`]: super::subscriber::Subscriber::clone_span() -//! [`drop_span`]: super::subscriber::Subscriber::drop_span() -//! [`exit`]: super::subscriber::Subscriber::exit -//! [`Subscriber`]: super::subscriber::Subscriber -//! [`enter`]: Span::enter() -//! [`entered`]: Span::entered() -//! [`in_scope`]: Span::in_scope() -//! [`follows_from`]: Span::follows_from() -//! [guard]: Entered -//! [parent]: #span-relationships -pub use tracing_core::span::{Attributes, Id, Record}; - -use crate::stdlib::{ - cmp, fmt, - hash::{Hash, Hasher}, - marker::PhantomData, - mem, - ops::Deref, -}; -use crate::{ - dispatcher::{self, Dispatch}, - field, Metadata, -}; - -/// Trait implemented by types which have a span `Id`. -pub trait AsId: crate::sealed::Sealed { - /// Returns the `Id` of the span that `self` corresponds to, or `None` if - /// this corresponds to a disabled span. - fn as_id(&self) -> Option<&Id>; -} - -/// A handle representing a span, with the capability to enter the span if it -/// exists. -/// -/// If the span was rejected by the current `Subscriber`'s filter, entering the -/// span will silently do nothing. Thus, the handle can be used in the same -/// manner regardless of whether or not the trace is currently being collected. -#[derive(Clone)] -pub struct Span { - /// A handle used to enter the span when it is not executing. - /// - /// If this is `None`, then the span has either closed or was never enabled. - inner: Option, - /// Metadata describing the span. - /// - /// This might be `Some` even if `inner` is `None`, in the case that the - /// span is disabled but the metadata is needed for `log` support. - meta: Option<&'static Metadata<'static>>, -} - -/// A handle representing the capacity to enter a span which is known to exist. -/// -/// Unlike `Span`, this type is only constructed for spans which _have_ been -/// enabled by the current filter. This type is primarily used for implementing -/// span handles; users should typically not need to interact with it directly. -#[derive(Debug)] -pub(crate) struct Inner { - /// The span's ID, as provided by `subscriber`. - id: Id, - - /// The subscriber that will receive events relating to this span. - /// - /// This should be the same subscriber that provided this span with its - /// `id`. - subscriber: Dispatch, -} - -/// A guard representing a span which has been entered and is currently -/// executing. -/// -/// When the guard is dropped, the span will be exited. -/// -/// This is returned by the [`Span::enter`] function. -/// -/// [`Span::enter`]: super::Span::enter -#[derive(Debug)] -#[must_use = "once a span has been entered, it should be exited"] -pub struct Entered<'a> { - span: &'a Span, -} - -/// An owned version of [`Entered`], a guard representing a span which has been -/// entered and is currently executing. -/// -/// When the guard is dropped, the span will be exited. -/// -/// This is returned by the [`Span::entered`] function. -/// -/// [`Span::entered`]: super::Span::entered() -#[derive(Debug)] -#[must_use = "once a span has been entered, it should be exited"] -pub struct EnteredSpan { - span: Span, - - /// ```compile_fail - /// use tracing::span::*; - /// trait AssertSend: Send {} - /// - /// impl AssertSend for EnteredSpan {} - /// ``` - _not_send: PhantomNotSend, -} - -/// `log` target for all span lifecycle (creation/enter/exit/close) records. -#[cfg(feature = "log")] -const LIFECYCLE_LOG_TARGET: &str = "tracing::span"; -/// `log` target for span activity (enter/exit) records. -#[cfg(feature = "log")] -const ACTIVITY_LOG_TARGET: &str = "tracing::span::active"; - -// ===== impl Span ===== - -impl Span { - /// Constructs a new `Span` with the given [metadata] and set of - /// [field values]. - /// - /// The new span will be constructed by the currently-active [`Subscriber`], - /// with the current span as its parent (if one exists). - /// - /// After the span is constructed, [field values] and/or [`follows_from`] - /// annotations may be added to it. - /// - /// [metadata]: super::Metadata - /// [`Subscriber`]: super::subscriber::Subscriber - /// [field values]: super::field::ValueSet - /// [`follows_from`]: super::Span::follows_from - pub fn new(meta: &'static Metadata<'static>, values: &field::ValueSet<'_>) -> Span { - dispatcher::get_default(|dispatch| Self::new_with(meta, values, dispatch)) - } - - #[inline] - #[doc(hidden)] - pub fn new_with( - meta: &'static Metadata<'static>, - values: &field::ValueSet<'_>, - dispatch: &Dispatch, - ) -> Span { - let new_span = Attributes::new(meta, values); - Self::make_with(meta, new_span, dispatch) - } - - /// Constructs a new `Span` as the root of its own trace tree, with the - /// given [metadata] and set of [field values]. - /// - /// After the span is constructed, [field values] and/or [`follows_from`] - /// annotations may be added to it. - /// - /// [metadata]: super::Metadata - /// [field values]: super::field::ValueSet - /// [`follows_from`]: super::Span::follows_from - pub fn new_root(meta: &'static Metadata<'static>, values: &field::ValueSet<'_>) -> Span { - dispatcher::get_default(|dispatch| Self::new_root_with(meta, values, dispatch)) - } - - #[inline] - #[doc(hidden)] - pub fn new_root_with( - meta: &'static Metadata<'static>, - values: &field::ValueSet<'_>, - dispatch: &Dispatch, - ) -> Span { - let new_span = Attributes::new_root(meta, values); - Self::make_with(meta, new_span, dispatch) - } - - /// Constructs a new `Span` as child of the given parent span, with the - /// given [metadata] and set of [field values]. - /// - /// After the span is constructed, [field values] and/or [`follows_from`] - /// annotations may be added to it. - /// - /// [metadata]: super::Metadata - /// [field values]: super::field::ValueSet - /// [`follows_from`]: super::Span::follows_from - pub fn child_of( - parent: impl Into>, - meta: &'static Metadata<'static>, - values: &field::ValueSet<'_>, - ) -> Span { - let mut parent = parent.into(); - dispatcher::get_default(move |dispatch| { - Self::child_of_with(Option::take(&mut parent), meta, values, dispatch) - }) - } - - #[inline] - #[doc(hidden)] - pub fn child_of_with( - parent: impl Into>, - meta: &'static Metadata<'static>, - values: &field::ValueSet<'_>, - dispatch: &Dispatch, - ) -> Span { - let new_span = match parent.into() { - Some(parent) => Attributes::child_of(parent, meta, values), - None => Attributes::new_root(meta, values), - }; - Self::make_with(meta, new_span, dispatch) - } - - /// Constructs a new disabled span with the given `Metadata`. - /// - /// This should be used when a span is constructed from a known callsite, - /// but the subscriber indicates that it is disabled. - /// - /// Entering, exiting, and recording values on this span will not notify the - /// `Subscriber` but _may_ record log messages if the `log` feature flag is - /// enabled. - #[inline(always)] - pub fn new_disabled(meta: &'static Metadata<'static>) -> Span { - Self { - inner: None, - meta: Some(meta), - } - } - - /// Constructs a new span that is *completely disabled*. - /// - /// This can be used rather than `Option` to represent cases where a - /// span is not present. - /// - /// Entering, exiting, and recording values on this span will do nothing. - #[inline(always)] - pub const fn none() -> Span { - Self { - inner: None, - meta: None, - } - } - - /// Returns a handle to the span [considered by the `Subscriber`] to be the - /// current span. - /// - /// If the subscriber indicates that it does not track the current span, or - /// that the thread from which this function is called is not currently - /// inside a span, the returned span will be disabled. - /// - /// [considered by the `Subscriber`]: - /// super::subscriber::Subscriber::current_span - pub fn current() -> Span { - dispatcher::get_default(|dispatch| { - if let Some((id, meta)) = dispatch.current_span().into_inner() { - let id = dispatch.clone_span(&id); - Self { - inner: Some(Inner::new(id, dispatch)), - meta: Some(meta), - } - } else { - Self::none() - } - }) - } - - fn make_with( - meta: &'static Metadata<'static>, - new_span: Attributes<'_>, - dispatch: &Dispatch, - ) -> Span { - let attrs = &new_span; - let id = dispatch.new_span(attrs); - let inner = Some(Inner::new(id, dispatch)); - - let span = Self { - inner, - meta: Some(meta), - }; - - if_log_enabled! { *meta.level(), { - let target = if attrs.is_empty() { - LIFECYCLE_LOG_TARGET - } else { - meta.target() - }; - let values = attrs.values(); - span.log( - target, - level_to_log!(*meta.level()), - format_args!("++ {};{}", meta.name(), crate::log::LogValueSet { values, is_first: false }), - ); - }} - - span - } - - /// Enters this span, returning a guard that will exit the span when dropped. - /// - /// If this span is enabled by the current subscriber, then this function will - /// call [`Subscriber::enter`] with the span's [`Id`], and dropping the guard - /// will call [`Subscriber::exit`]. If the span is disabled, this does - /// nothing. - /// - /// # In Asynchronous Code - /// - /// **Warning**: in asynchronous code that uses [async/await syntax][syntax], - /// `Span::enter` should be used very carefully or avoided entirely. Holding - /// the drop guard returned by `Span::enter` across `.await` points will - /// result in incorrect traces. For example, - /// - /// ``` - /// # use tracing::info_span; - /// # async fn some_other_async_function() {} - /// async fn my_async_function() { - /// let span = info_span!("my_async_function"); - /// - /// // WARNING: This span will remain entered until this - /// // guard is dropped... - /// let _enter = span.enter(); - /// // ...but the `await` keyword may yield, causing the - /// // runtime to switch to another task, while remaining in - /// // this span! - /// some_other_async_function().await - /// - /// // ... - /// } - /// ``` - /// - /// The drop guard returned by `Span::enter` exits the span when it is - /// dropped. When an async function or async block yields at an `.await` - /// point, the current scope is _exited_, but values in that scope are - /// **not** dropped (because the async block will eventually resume - /// execution from that await point). This means that _another_ task will - /// begin executing while _remaining_ in the entered span. This results in - /// an incorrect trace. - /// - /// Instead of using `Span::enter` in asynchronous code, prefer the - /// following: - /// - /// * To enter a span for a synchronous section of code within an async - /// block or function, prefer [`Span::in_scope`]. Since `in_scope` takes a - /// synchronous closure and exits the span when the closure returns, the - /// span will always be exited before the next await point. For example: - /// ``` - /// # use tracing::info_span; - /// # async fn some_other_async_function(_: ()) {} - /// async fn my_async_function() { - /// let span = info_span!("my_async_function"); - /// - /// let some_value = span.in_scope(|| { - /// // run some synchronous code inside the span... - /// }); - /// - /// // This is okay! The span has already been exited before we reach - /// // the await point. - /// some_other_async_function(some_value).await; - /// - /// // ... - /// } - /// ``` - /// * For instrumenting asynchronous code, `tracing` provides the - /// [`Future::instrument` combinator][instrument] for - /// attaching a span to a future (async function or block). This will - /// enter the span _every_ time the future is polled, and exit it whenever - /// the future yields. - /// - /// `Instrument` can be used with an async block inside an async function: - /// ```ignore - /// # use tracing::info_span; - /// use tracing::Instrument; - /// - /// # async fn some_other_async_function() {} - /// async fn my_async_function() { - /// let span = info_span!("my_async_function"); - /// async move { - /// // This is correct! If we yield here, the span will be exited, - /// // and re-entered when we resume. - /// some_other_async_function().await; - /// - /// //more asynchronous code inside the span... - /// - /// } - /// // instrument the async block with the span... - /// .instrument(span) - /// // ...and await it. - /// .await - /// } - /// ``` - /// - /// It can also be used to instrument calls to async functions at the - /// callsite: - /// ```ignore - /// # use tracing::debug_span; - /// use tracing::Instrument; - /// - /// # async fn some_other_async_function() {} - /// async fn my_async_function() { - /// let some_value = some_other_async_function() - /// .instrument(debug_span!("some_other_async_function")) - /// .await; - /// - /// // ... - /// } - /// ``` - /// - /// * The [`#[instrument]` attribute macro][attr] can automatically generate - /// correct code when used on an async function: - /// - /// ```ignore - /// # async fn some_other_async_function() {} - /// #[tracing::instrument(level = "info")] - /// async fn my_async_function() { - /// - /// // This is correct! If we yield here, the span will be exited, - /// // and re-entered when we resume. - /// some_other_async_function().await; - /// - /// // ... - /// - /// } - /// ``` - /// - /// [syntax]: https://rust-lang.github.io/async-book/01_getting_started/04_async_await_primer.html - /// [`Span::in_scope`]: Span::in_scope() - /// [instrument]: crate::Instrument - /// [attr]: macro@crate::instrument - /// - /// # Examples - /// - /// ``` - /// # use tracing::{span, Level}; - /// let span = span!(Level::INFO, "my_span"); - /// let guard = span.enter(); - /// - /// // code here is within the span - /// - /// drop(guard); - /// - /// // code here is no longer within the span - /// - /// ``` - /// - /// Guards need not be explicitly dropped: - /// - /// ``` - /// # use tracing::trace_span; - /// fn my_function() -> String { - /// // enter a span for the duration of this function. - /// let span = trace_span!("my_function"); - /// let _enter = span.enter(); - /// - /// // anything happening in functions we call is still inside the span... - /// my_other_function(); - /// - /// // returning from the function drops the guard, exiting the span. - /// return "Hello world".to_owned(); - /// } - /// - /// fn my_other_function() { - /// // ... - /// } - /// ``` - /// - /// Sub-scopes may be created to limit the duration for which the span is - /// entered: - /// - /// ``` - /// # use tracing::{info, info_span}; - /// let span = info_span!("my_great_span"); - /// - /// { - /// let _enter = span.enter(); - /// - /// // this event occurs inside the span. - /// info!("i'm in the span!"); - /// - /// // exiting the scope drops the guard, exiting the span. - /// } - /// - /// // this event is not inside the span. - /// info!("i'm outside the span!") - /// ``` - /// - /// [`Subscriber::enter`]: super::subscriber::Subscriber::enter() - /// [`Subscriber::exit`]: super::subscriber::Subscriber::exit() - /// [`Id`]: super::Id - #[inline(always)] - pub fn enter(&self) -> Entered<'_> { - self.do_enter(); - Entered { span: self } - } - - /// Enters this span, consuming it and returning a [guard][`EnteredSpan`] - /// that will exit the span when dropped. - /// - ///
- /// - /// - /// If this span is enabled by the current subscriber, then this function will - /// call [`Subscriber::enter`] with the span's [`Id`], and dropping the guard - /// will call [`Subscriber::exit`]. If the span is disabled, this does - /// nothing. - /// - /// This is similar to the [`Span::enter`] method, except that it moves the - /// span by value into the returned guard, rather than borrowing it. - /// Therefore, this method can be used to create and enter a span in a - /// single expression, without requiring a `let`-binding. For example: - /// - /// ``` - /// # use tracing::info_span; - /// let _span = info_span!("something_interesting").entered(); - /// ``` - /// rather than: - /// ``` - /// # use tracing::info_span; - /// let span = info_span!("something_interesting"); - /// let _e = span.enter(); - /// ``` - /// - /// Furthermore, `entered` may be used when the span must be stored in some - /// other struct or be passed to a function while remaining entered. - /// - ///
-    ///     Note: The returned 
-    ///     EnteredSpan guard does not implement Send.
-    ///     Dropping the guard will exit this span, and if the guard is sent
-    ///     to another thread and dropped there, that thread may never have entered
-    ///     this span. Thus, EnteredSpans should not be sent between threads.
-    /// 
- /// - /// [syntax]: https://rust-lang.github.io/async-book/01_getting_started/04_async_await_primer.html - /// - /// # Examples - /// - /// The returned guard can be [explicitly exited][EnteredSpan::exit], - /// returning the un-entered span: - /// - /// ``` - /// # use tracing::{Level, span}; - /// let span = span!(Level::INFO, "doing_something").entered(); - /// - /// // code here is within the span - /// - /// // explicitly exit the span, returning it - /// let span = span.exit(); - /// - /// // code here is no longer within the span - /// - /// // enter the span again - /// let span = span.entered(); - /// - /// // now we are inside the span once again - /// ``` - /// - /// Guards need not be explicitly dropped: - /// - /// ``` - /// # use tracing::trace_span; - /// fn my_function() -> String { - /// // enter a span for the duration of this function. - /// let span = trace_span!("my_function").entered(); - /// - /// // anything happening in functions we call is still inside the span... - /// my_other_function(); - /// - /// // returning from the function drops the guard, exiting the span. - /// return "Hello world".to_owned(); - /// } - /// - /// fn my_other_function() { - /// // ... - /// } - /// ``` - /// - /// Since the [`EnteredSpan`] guard can dereference to the [`Span`] itself, - /// the span may still be accessed while entered. For example: - /// - /// ```rust - /// # use tracing::info_span; - /// use tracing::field; - /// - /// // create the span with an empty field, and enter it. - /// let span = info_span!("my_span", some_field = field::Empty).entered(); - /// - /// // we can still record a value for the field while the span is entered. - /// span.record("some_field", &"hello world!"); - /// ``` - /// - - /// [`Subscriber::enter`]: super::subscriber::Subscriber::enter() - /// [`Subscriber::exit`]: super::subscriber::Subscriber::exit() - /// [`Id`]: super::Id - #[inline(always)] - pub fn entered(self) -> EnteredSpan { - self.do_enter(); - EnteredSpan { - span: self, - _not_send: PhantomNotSend, - } - } - - /// Returns this span, if it was [enabled] by the current [`Subscriber`], or - /// the [current span] (whose lexical distance may be further than expected), - /// if this span [is disabled]. - /// - /// This method can be useful when propagating spans to spawned threads or - /// [async tasks]. Consider the following: - /// - /// ``` - /// let _parent_span = tracing::info_span!("parent").entered(); - /// - /// // ... - /// - /// let child_span = tracing::debug_span!("child"); - /// - /// std::thread::spawn(move || { - /// let _entered = child_span.entered(); - /// - /// tracing::info!("spawned a thread!"); - /// - /// // ... - /// }); - /// ``` - /// - /// If the current [`Subscriber`] enables the [`DEBUG`] level, then both - /// the "parent" and "child" spans will be enabled. Thus, when the "spawaned - /// a thread!" event occurs, it will be inside of the "child" span. Because - /// "parent" is the parent of "child", the event will _also_ be inside of - /// "parent". - /// - /// However, if the [`Subscriber`] only enables the [`INFO`] level, the "child" - /// span will be disabled. When the thread is spawned, the - /// `child_span.entered()` call will do nothing, since "child" is not - /// enabled. In this case, the "spawned a thread!" event occurs outside of - /// *any* span, since the "child" span was responsible for propagating its - /// parent to the spawned thread. - /// - /// If this is not the desired behavior, `Span::or_current` can be used to - /// ensure that the "parent" span is propagated in both cases, either as a - /// parent of "child" _or_ directly. For example: - /// - /// ``` - /// let _parent_span = tracing::info_span!("parent").entered(); - /// - /// // ... - /// - /// // If DEBUG is enabled, then "child" will be enabled, and `or_current` - /// // returns "child". Otherwise, if DEBUG is not enabled, "child" will be - /// // disabled, and `or_current` returns "parent". - /// let child_span = tracing::debug_span!("child").or_current(); - /// - /// std::thread::spawn(move || { - /// let _entered = child_span.entered(); - /// - /// tracing::info!("spawned a thread!"); - /// - /// // ... - /// }); - /// ``` - /// - /// When spawning [asynchronous tasks][async tasks], `Span::or_current` can - /// be used similarly, in combination with [`instrument`]: - /// - /// ``` - /// use tracing::Instrument; - /// # // lol - /// # mod tokio { - /// # pub(super) fn spawn(_: impl std::future::Future) {} - /// # } - /// - /// let _parent_span = tracing::info_span!("parent").entered(); - /// - /// // ... - /// - /// let child_span = tracing::debug_span!("child"); - /// - /// tokio::spawn( - /// async { - /// tracing::info!("spawned a task!"); - /// - /// // ... - /// - /// }.instrument(child_span.or_current()) - /// ); - /// ``` - /// - /// In general, `or_current` should be preferred over nesting an - /// [`instrument`] call inside of an [`in_current_span`] call, as using - /// `or_current` will be more efficient. - /// - /// ``` - /// use tracing::Instrument; - /// # // lol - /// # mod tokio { - /// # pub(super) fn spawn(_: impl std::future::Future) {} - /// # } - /// async fn my_async_fn() { - /// // ... - /// } - /// - /// let _parent_span = tracing::info_span!("parent").entered(); - /// - /// // Do this: - /// tokio::spawn( - /// my_async_fn().instrument(tracing::debug_span!("child").or_current()) - /// ); - /// - /// // ...rather than this: - /// tokio::spawn( - /// my_async_fn() - /// .instrument(tracing::debug_span!("child")) - /// .in_current_span() - /// ); - /// ``` - /// - /// [enabled]: crate::Subscriber::enabled - /// [`Subscriber`]: crate::Subscriber - /// [current span]: Span::current - /// [is disabled]: Span::is_disabled - /// [`INFO`]: crate::Level::INFO - /// [`DEBUG`]: crate::Level::DEBUG - /// [async tasks]: std::task - /// [`instrument`]: crate::instrument::Instrument::instrument - /// [`in_current_span`]: crate::instrument::Instrument::in_current_span - pub fn or_current(self) -> Self { - if self.is_disabled() { - return Self::current(); - } - self - } - - #[inline(always)] - fn do_enter(&self) { - if let Some(inner) = self.inner.as_ref() { - inner.subscriber.enter(&inner.id); - } - - if_log_enabled! { crate::Level::TRACE, { - if let Some(_meta) = self.meta { - self.log(ACTIVITY_LOG_TARGET, log::Level::Trace, format_args!("-> {};", _meta.name())); - } - }} - } - - // Called from [`Entered`] and [`EnteredSpan`] drops. - // - // Running this behaviour on drop rather than with an explicit function - // call means that spans may still be exited when unwinding. - #[inline(always)] - fn do_exit(&self) { - if let Some(inner) = self.inner.as_ref() { - inner.subscriber.exit(&inner.id); - } - - if_log_enabled! { crate::Level::TRACE, { - if let Some(_meta) = self.meta { - self.log(ACTIVITY_LOG_TARGET, log::Level::Trace, format_args!("<- {};", _meta.name())); - } - }} - } - - /// Executes the given function in the context of this span. - /// - /// If this span is enabled, then this function enters the span, invokes `f` - /// and then exits the span. If the span is disabled, `f` will still be - /// invoked, but in the context of the currently-executing span (if there is - /// one). - /// - /// Returns the result of evaluating `f`. - /// - /// # Examples - /// - /// ``` - /// # use tracing::{trace, span, Level}; - /// let my_span = span!(Level::TRACE, "my_span"); - /// - /// my_span.in_scope(|| { - /// // this event occurs within the span. - /// trace!("i'm in the span!"); - /// }); - /// - /// // this event occurs outside the span. - /// trace!("i'm not in the span!"); - /// ``` - /// - /// Calling a function and returning the result: - /// ``` - /// # use tracing::{info_span, Level}; - /// fn hello_world() -> String { - /// "Hello world!".to_owned() - /// } - /// - /// let span = info_span!("hello_world"); - /// // the span will be entered for the duration of the call to - /// // `hello_world`. - /// let a_string = span.in_scope(hello_world); - /// - pub fn in_scope T, T>(&self, f: F) -> T { - let _enter = self.enter(); - f() - } - - /// Returns a [`Field`][super::field::Field] for the field with the - /// given `name`, if one exists, - pub fn field(&self, field: &Q) -> Option - where - Q: field::AsField, - { - self.metadata().and_then(|meta| field.as_field(meta)) - } - - /// Returns true if this `Span` has a field for the given - /// [`Field`][super::field::Field] or field name. - #[inline] - pub fn has_field(&self, field: &Q) -> bool - where - Q: field::AsField, - { - self.field(field).is_some() - } - - /// Records that the field described by `field` has the value `value`. - /// - /// This may be used with [`field::Empty`] to declare fields whose values - /// are not known when the span is created, and record them later: - /// ``` - /// use tracing::{trace_span, field}; - /// - /// // Create a span with two fields: `greeting`, with the value "hello world", and - /// // `parting`, without a value. - /// let span = trace_span!("my_span", greeting = "hello world", parting = field::Empty); - /// - /// // ... - /// - /// // Now, record a value for parting as well. - /// // (note that the field name is passed as a string slice) - /// span.record("parting", "goodbye world!"); - /// ``` - /// However, it may also be used to record a _new_ value for a field whose - /// value was already recorded: - /// ``` - /// use tracing::info_span; - /// # fn do_something() -> Result<(), ()> { Err(()) } - /// - /// // Initially, let's assume that our attempt to do something is going okay... - /// let span = info_span!("doing_something", is_okay = true); - /// let _e = span.enter(); - /// - /// match do_something() { - /// Ok(something) => { - /// // ... - /// } - /// Err(_) => { - /// // Things are no longer okay! - /// span.record("is_okay", false); - /// } - /// } - /// ``` - /// - ///
-    ///     Note: The fields associated with a span are part
-    ///     of its Metadata.
-    ///     The Metadata
-    ///     describing a particular span is constructed statically when the span
-    ///     is created and cannot be extended later to add new fields. Therefore,
-    ///     you cannot record a value for a field that was not specified when the
-    ///     span was created:
-    /// 
- /// - /// ``` - /// use tracing::{trace_span, field}; - /// - /// // Create a span with two fields: `greeting`, with the value "hello world", and - /// // `parting`, without a value. - /// let span = trace_span!("my_span", greeting = "hello world", parting = field::Empty); - /// - /// // ... - /// - /// // Now, you try to record a value for a new field, `new_field`, which was not - /// // declared as `Empty` or populated when you created `span`. - /// // You won't get any error, but the assignment will have no effect! - /// span.record("new_field", "interesting_value_you_really_need"); - /// - /// // Instead, all fields that may be recorded after span creation should be declared up front, - /// // using field::Empty when a value is not known, as we did for `parting`. - /// // This `record` call will indeed replace field::Empty with "you will be remembered". - /// span.record("parting", "you will be remembered"); - /// ``` - /// - /// [`field::Empty`]: super::field::Empty - /// [`Metadata`]: super::Metadata - pub fn record(&self, field: &Q, value: V) -> &Self - where - Q: field::AsField, - V: field::Value, - { - if let Some(meta) = self.meta { - if let Some(field) = field.as_field(meta) { - self.record_all( - &meta - .fields() - .value_set(&[(&field, Some(&value as &dyn field::Value))]), - ); - } - } - - self - } - - /// Records all the fields in the provided `ValueSet`. - pub fn record_all(&self, values: &field::ValueSet<'_>) -> &Self { - let record = Record::new(values); - if let Some(ref inner) = self.inner { - inner.record(&record); - } - - if let Some(_meta) = self.meta { - if_log_enabled! { *_meta.level(), { - let target = if record.is_empty() { - LIFECYCLE_LOG_TARGET - } else { - _meta.target() - }; - self.log( - target, - level_to_log!(*_meta.level()), - format_args!("{};{}", _meta.name(), crate::log::LogValueSet { values, is_first: false }), - ); - }} - } - - self - } - - /// Returns `true` if this span was disabled by the subscriber and does not - /// exist. - /// - /// See also [`is_none`]. - /// - /// [`is_none`]: Span::is_none() - #[inline] - pub fn is_disabled(&self) -> bool { - self.inner.is_none() - } - - /// Returns `true` if this span was constructed by [`Span::none`] and is - /// empty. - /// - /// If `is_none` returns `true` for a given span, then [`is_disabled`] will - /// also return `true`. However, when a span is disabled by the subscriber - /// rather than constructed by `Span::none`, this method will return - /// `false`, while `is_disabled` will return `true`. - /// - /// [`Span::none`]: Span::none() - /// [`is_disabled`]: Span::is_disabled() - #[inline] - pub fn is_none(&self) -> bool { - self.is_disabled() && self.meta.is_none() - } - - /// Indicates that the span with the given ID has an indirect causal - /// relationship with this span. - /// - /// This relationship differs somewhat from the parent-child relationship: a - /// span may have any number of prior spans, rather than a single one; and - /// spans are not considered to be executing _inside_ of the spans they - /// follow from. This means that a span may close even if subsequent spans - /// that follow from it are still open, and time spent inside of a - /// subsequent span should not be included in the time its precedents were - /// executing. This is used to model causal relationships such as when a - /// single future spawns several related background tasks, et cetera. - /// - /// If this span is disabled, or the resulting follows-from relationship - /// would be invalid, this function will do nothing. - /// - /// # Examples - /// - /// Setting a `follows_from` relationship with a `Span`: - /// ``` - /// # use tracing::{span, Id, Level, Span}; - /// let span1 = span!(Level::INFO, "span_1"); - /// let span2 = span!(Level::DEBUG, "span_2"); - /// span2.follows_from(span1); - /// ``` - /// - /// Setting a `follows_from` relationship with the current span: - /// ``` - /// # use tracing::{span, Id, Level, Span}; - /// let span = span!(Level::INFO, "hello!"); - /// span.follows_from(Span::current()); - /// ``` - /// - /// Setting a `follows_from` relationship with a `Span` reference: - /// ``` - /// # use tracing::{span, Id, Level, Span}; - /// let span = span!(Level::INFO, "hello!"); - /// let curr = Span::current(); - /// span.follows_from(&curr); - /// ``` - /// - /// Setting a `follows_from` relationship with an `Id`: - /// ``` - /// # use tracing::{span, Id, Level, Span}; - /// let span = span!(Level::INFO, "hello!"); - /// let id = span.id(); - /// span.follows_from(id); - /// ``` - pub fn follows_from(&self, from: impl Into>) -> &Self { - if let Some(ref inner) = self.inner { - if let Some(from) = from.into() { - inner.follows_from(&from); - } - } - self - } - - /// Returns this span's `Id`, if it is enabled. - pub fn id(&self) -> Option { - self.inner.as_ref().map(Inner::id) - } - - /// Returns this span's `Metadata`, if it is enabled. - pub fn metadata(&self) -> Option<&'static Metadata<'static>> { - self.meta - } - - #[cfg(feature = "log")] - #[inline] - fn log(&self, target: &str, level: log::Level, message: fmt::Arguments<'_>) { - if let Some(meta) = self.meta { - if level_to_log!(*meta.level()) <= log::max_level() { - let logger = log::logger(); - let log_meta = log::Metadata::builder().level(level).target(target).build(); - if logger.enabled(&log_meta) { - if let Some(ref inner) = self.inner { - logger.log( - &log::Record::builder() - .metadata(log_meta) - .module_path(meta.module_path()) - .file(meta.file()) - .line(meta.line()) - .args(format_args!("{} span={}", message, inner.id.into_u64())) - .build(), - ); - } else { - logger.log( - &log::Record::builder() - .metadata(log_meta) - .module_path(meta.module_path()) - .file(meta.file()) - .line(meta.line()) - .args(message) - .build(), - ); - } - } - } - } - } - - /// Invokes a function with a reference to this span's ID and subscriber. - /// - /// if this span is enabled, the provided function is called, and the result is returned. - /// If the span is disabled, the function is not called, and this method returns `None` - /// instead. - pub fn with_subscriber(&self, f: impl FnOnce((&Id, &Dispatch)) -> T) -> Option { - self.inner - .as_ref() - .map(|inner| f((&inner.id, &inner.subscriber))) - } -} - -impl cmp::PartialEq for Span { - fn eq(&self, other: &Self) -> bool { - match (&self.meta, &other.meta) { - (Some(this), Some(that)) => { - this.callsite() == that.callsite() && self.inner == other.inner - } - _ => false, - } - } -} - -impl Hash for Span { - fn hash(&self, hasher: &mut H) { - self.inner.hash(hasher); - } -} - -impl fmt::Debug for Span { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut span = f.debug_struct("Span"); - if let Some(meta) = self.meta { - span.field("name", &meta.name()) - .field("level", &meta.level()) - .field("target", &meta.target()); - - if let Some(ref inner) = self.inner { - span.field("id", &inner.id()); - } else { - span.field("disabled", &true); - } - - if let Some(ref path) = meta.module_path() { - span.field("module_path", &path); - } - - if let Some(ref line) = meta.line() { - span.field("line", &line); - } - - if let Some(ref file) = meta.file() { - span.field("file", &file); - } - } else { - span.field("none", &true); - } - - span.finish() - } -} - -impl<'a> From<&'a Span> for Option<&'a Id> { - fn from(span: &'a Span) -> Self { - span.inner.as_ref().map(|inner| &inner.id) - } -} - -impl<'a> From<&'a Span> for Option { - fn from(span: &'a Span) -> Self { - span.inner.as_ref().map(Inner::id) - } -} - -impl From for Option { - fn from(span: Span) -> Self { - span.inner.as_ref().map(Inner::id) - } -} - -impl<'a> From<&'a EnteredSpan> for Option<&'a Id> { - fn from(span: &'a EnteredSpan) -> Self { - span.inner.as_ref().map(|inner| &inner.id) - } -} - -impl<'a> From<&'a EnteredSpan> for Option { - fn from(span: &'a EnteredSpan) -> Self { - span.inner.as_ref().map(Inner::id) - } -} - -impl Drop for Span { - #[inline(always)] - fn drop(&mut self) { - if let Some(Inner { - ref id, - ref subscriber, - }) = self.inner - { - subscriber.try_close(id.clone()); - } - - if_log_enabled! { crate::Level::TRACE, { - if let Some(meta) = self.meta { - self.log( - LIFECYCLE_LOG_TARGET, - log::Level::Trace, - format_args!("-- {};", meta.name()), - ); - } - }} - } -} - -// ===== impl Inner ===== - -impl Inner { - /// Indicates that the span with the given ID has an indirect causal - /// relationship with this span. - /// - /// This relationship differs somewhat from the parent-child relationship: a - /// span may have any number of prior spans, rather than a single one; and - /// spans are not considered to be executing _inside_ of the spans they - /// follow from. This means that a span may close even if subsequent spans - /// that follow from it are still open, and time spent inside of a - /// subsequent span should not be included in the time its precedents were - /// executing. This is used to model causal relationships such as when a - /// single future spawns several related background tasks, et cetera. - /// - /// If this span is disabled, this function will do nothing. Otherwise, it - /// returns `Ok(())` if the other span was added as a precedent of this - /// span, or an error if this was not possible. - fn follows_from(&self, from: &Id) { - self.subscriber.record_follows_from(&self.id, from) - } - - /// Returns the span's ID. - fn id(&self) -> Id { - self.id.clone() - } - - fn record(&self, values: &Record<'_>) { - self.subscriber.record(&self.id, values) - } - - fn new(id: Id, subscriber: &Dispatch) -> Self { - Inner { - id, - subscriber: subscriber.clone(), - } - } -} - -impl cmp::PartialEq for Inner { - fn eq(&self, other: &Self) -> bool { - self.id == other.id - } -} - -impl Hash for Inner { - fn hash(&self, state: &mut H) { - self.id.hash(state); - } -} - -impl Clone for Inner { - fn clone(&self) -> Self { - Inner { - id: self.subscriber.clone_span(&self.id), - subscriber: self.subscriber.clone(), - } - } -} - -// ===== impl Entered ===== - -impl EnteredSpan { - /// Returns this span's `Id`, if it is enabled. - pub fn id(&self) -> Option { - self.inner.as_ref().map(Inner::id) - } - - /// Exits this span, returning the underlying [`Span`]. - #[inline] - pub fn exit(mut self) -> Span { - // One does not simply move out of a struct with `Drop`. - let span = mem::replace(&mut self.span, Span::none()); - span.do_exit(); - span - } -} - -impl Deref for EnteredSpan { - type Target = Span; - - #[inline] - fn deref(&self) -> &Span { - &self.span - } -} - -impl<'a> Drop for Entered<'a> { - #[inline(always)] - fn drop(&mut self) { - self.span.do_exit() - } -} - -impl Drop for EnteredSpan { - #[inline(always)] - fn drop(&mut self) { - self.span.do_exit() - } -} - -/// Technically, `EnteredSpan` _can_ implement both `Send` *and* -/// `Sync` safely. It doesn't, because it has a `PhantomNotSend` field, -/// specifically added in order to make it `!Send`. -/// -/// Sending an `EnteredSpan` guard between threads cannot cause memory unsafety. -/// However, it *would* result in incorrect behavior, so we add a -/// `PhantomNotSend` to prevent it from being sent between threads. This is -/// because it must be *dropped* on the same thread that it was created; -/// otherwise, the span will never be exited on the thread where it was entered, -/// and it will attempt to exit the span on a thread that may never have entered -/// it. However, we still want them to be `Sync` so that a struct holding an -/// `Entered` guard can be `Sync`. -/// -/// Thus, this is totally safe. -#[derive(Debug)] -struct PhantomNotSend { - ghost: PhantomData<*mut ()>, -} - -#[allow(non_upper_case_globals)] -const PhantomNotSend: PhantomNotSend = PhantomNotSend { ghost: PhantomData }; - -/// # Safety -/// -/// Trivially safe, as `PhantomNotSend` doesn't have any API. -unsafe impl Sync for PhantomNotSend {} - -#[cfg(test)] -mod test { - use super::*; - - trait AssertSend: Send {} - impl AssertSend for Span {} - - trait AssertSync: Sync {} - impl AssertSync for Span {} - impl AssertSync for Entered<'_> {} - impl AssertSync for EnteredSpan {} - - #[test] - fn test_record_backwards_compat() { - Span::current().record("some-key", "some text"); - Span::current().record("some-key", false); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing/src/stdlib.rs s390-tools-2.33.1/rust-vendor/tracing/src/stdlib.rs --- s390-tools-2.31.0/rust-vendor/tracing/src/stdlib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/src/stdlib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,55 +0,0 @@ -//! Re-exports either the Rust `std` library or `core` and `alloc` when `std` is -//! disabled. -//! -//! `crate::stdlib::...` should be used rather than `std::` when adding code that -//! will be available with the standard library disabled. -//! -//! Note that this module is called `stdlib` rather than `std`, as Rust 1.34.0 -//! does not permit redefining the name `stdlib` (although this works on the -//! latest stable Rust). -#[cfg(feature = "std")] -pub(crate) use std::*; - -#[cfg(not(feature = "std"))] -pub(crate) use self::no_std::*; - -#[cfg(not(feature = "std"))] -mod no_std { - // We pre-emptively export everything from libcore/liballoc, (even modules - // we aren't using currently) to make adding new code easier. Therefore, - // some of these imports will be unused. - #![allow(unused_imports)] - - pub(crate) use core::{ - any, array, ascii, cell, char, clone, cmp, convert, default, f32, f64, ffi, future, hash, - hint, i128, i16, i8, isize, iter, marker, mem, num, ops, option, pin, ptr, result, task, - time, u128, u16, u32, u8, usize, - }; - - pub(crate) use alloc::{boxed, collections, rc, string, vec}; - - pub(crate) mod borrow { - pub(crate) use alloc::borrow::*; - pub(crate) use core::borrow::*; - } - - pub(crate) mod fmt { - pub(crate) use alloc::fmt::*; - pub(crate) use core::fmt::*; - } - - pub(crate) mod slice { - pub(crate) use alloc::slice::*; - pub(crate) use core::slice::*; - } - - pub(crate) mod str { - pub(crate) use alloc::str::*; - pub(crate) use core::str::*; - } - - pub(crate) mod sync { - pub(crate) use alloc::sync::*; - pub(crate) use core::sync::*; - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing/src/subscriber.rs s390-tools-2.33.1/rust-vendor/tracing/src/subscriber.rs --- s390-tools-2.31.0/rust-vendor/tracing/src/subscriber.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing/src/subscriber.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,65 +0,0 @@ -//! Collects and records trace data. -pub use tracing_core::subscriber::*; - -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub use tracing_core::dispatcher::DefaultGuard; - -/// Sets this [`Subscriber`] as the default for the current thread for the -/// duration of a closure. -/// -/// The default subscriber is used when creating a new [`Span`] or -/// [`Event`]. -/// -/// -/// [`Span`]: super::span::Span -/// [`Subscriber`]: super::subscriber::Subscriber -/// [`Event`]: super::event::Event -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub fn with_default(subscriber: S, f: impl FnOnce() -> T) -> T -where - S: Subscriber + Send + Sync + 'static, -{ - crate::dispatcher::with_default(&crate::Dispatch::new(subscriber), f) -} - -/// Sets this subscriber as the global default for the duration of the entire program. -/// Will be used as a fallback if no thread-local subscriber has been set in a thread (using `with_default`.) -/// -/// Can only be set once; subsequent attempts to set the global default will fail. -/// Returns whether the initialization was successful. -/// -/// Note: Libraries should *NOT* call `set_global_default()`! That will cause conflicts when -/// executables try to set them later. -/// -/// [span]: super::span -/// [`Subscriber`]: super::subscriber::Subscriber -/// [`Event`]: super::event::Event -pub fn set_global_default(subscriber: S) -> Result<(), SetGlobalDefaultError> -where - S: Subscriber + Send + Sync + 'static, -{ - crate::dispatcher::set_global_default(crate::Dispatch::new(subscriber)) -} - -/// Sets the [`Subscriber`] as the default for the current thread for the -/// duration of the lifetime of the returned [`DefaultGuard`]. -/// -/// The default subscriber is used when creating a new [`Span`] or [`Event`]. -/// -/// [`Span`]: super::span::Span -/// [`Subscriber`]: super::subscriber::Subscriber -/// [`Event`]: super::event::Event -/// [`DefaultGuard`]: super::dispatcher::DefaultGuard -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -#[must_use = "Dropping the guard unregisters the subscriber."] -pub fn set_default(subscriber: S) -> DefaultGuard -where - S: Subscriber + Send + Sync + 'static, -{ - crate::dispatcher::set_default(&crate::Dispatch::new(subscriber)) -} - -pub use tracing_core::dispatcher::SetGlobalDefaultError; diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/tracing-core/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/tracing-core/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/Cargo.toml s390-tools-2.33.1/rust-vendor/tracing-core/Cargo.toml --- s390-tools-2.31.0/rust-vendor/tracing-core/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,66 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.56.0" -name = "tracing-core" -version = "0.1.32" -authors = ["Tokio Contributors "] -description = """ -Core primitives for application-level tracing. -""" -homepage = "https://tokio.rs" -readme = "README.md" -keywords = [ - "logging", - "tracing", - "profiling", -] -categories = [ - "development-tools::debugging", - "development-tools::profiling", - "asynchronous", -] -license = "MIT" -repository = "https://github.com/tokio-rs/tracing" - -[package.metadata.docs.rs] -all-features = true -rustc-args = [ - "--cfg", - "tracing_unstable", -] -rustdoc-args = [ - "--cfg", - "docsrs", - "--cfg", - "tracing_unstable", -] - -[dependencies.once_cell] -version = "1.13.0" -optional = true - -[features] -default = [ - "std", - "valuable/std", -] -std = ["once_cell"] - -[target."cfg(tracing_unstable)".dependencies.valuable] -version = "0.1.0" -optional = true -default-features = false - -[badges.maintenance] -status = "actively-developed" diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/CHANGELOG.md s390-tools-2.33.1/rust-vendor/tracing-core/CHANGELOG.md --- s390-tools-2.31.0/rust-vendor/tracing-core/CHANGELOG.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/CHANGELOG.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,541 +0,0 @@ -# 0.1.32 (October 13, 2023) - -### Documented - -- Fix typo in `field` docs ([#2611]) -- Remove duplicate wording ([#2674]) - -### Changed - -- Allow `ValueSet`s of any length ([#2508]) - -[#2611]: https://github.com/tokio-rs/tracing/pull/2611 -[#2674]: https://github.com/tokio-rs/tracing/pull/2674 -[#2508]: https://github.com/tokio-rs/tracing/pull/2508 - -# 0.1.31 (May 11, 2023) - -This release of `tracing-core` fixes a bug that caused threads which call -`dispatcher::get_default` _before_ a global default subscriber is set to never -see the global default once it is set. In addition, it includes improvements for -instrumentation performance in some cases, especially when using a global -default dispatcher. - -### Fixed - -- Fixed incorrect thread-local caching of `Dispatch::none` if - `dispatcher::get_default` is called before `dispatcher::set_global_default` - ([#2593]) - -### Changed - -- Cloning a `Dispatch` that points at a global default subscriber no longer - requires an `Arc` reference count increment, improving performance - substantially ([#2593]) -- `dispatcher::get_default` no longer attempts to access a thread local if the - scoped dispatcher is not in use, improving performance when the default - dispatcher is global ([#2593]) -- Added `#[inline]` annotations called by the `event!` and `span!` macros to - reduce the size of macro-generated code and improve recording performance - ([#2555]) - -Thanks to new contributor @ldm0 for contributing to this release! - -[#2593]: https://github.com/tokio-rs/tracing/pull/2593 -[#2555]: https://github.com/tokio-rs/tracing/pull/2555 - -# 0.1.30 (October 6, 2022) - -This release of `tracing-core` adds a new `on_register_dispatch` method to the -`Subscriber` trait to allow the `Subscriber` to perform initialization after -being registered as a `Dispatch`, and a `WeakDispatch` type to allow a -`Subscriber` to store its own `Dispatch` without creating reference count -cycles. - -### Added - -- `Subscriber::on_register_dispatch` method ([#2269]) -- `WeakDispatch` type and `Dispatch::downgrade()` function ([#2293]) - -Thanks to @jswrenn for contributing to this release! - -[#2269]: https://github.com/tokio-rs/tracing/pull/2269 -[#2293]: https://github.com/tokio-rs/tracing/pull/2293 - -# 0.1.29 (July 29, 2022) - -This release of `tracing-core` adds `PartialEq` and `Eq` implementations for -metadata types, and improves error messages when setting the global default -subscriber fails. - -### Added - -- `PartialEq` and `Eq` implementations for `Metadata` ([#2229]) -- `PartialEq` and `Eq` implementations for `FieldSet` ([#2229]) - -### Fixed - -- Fixed unhelpful `fmt::Debug` output for `dispatcher::SetGlobalDefaultError` - ([#2250]) -- Fixed compilation with `-Z minimal-versions` ([#2246]) - -Thanks to @jswrenn and @CAD97 for contributing to this release! - -[#2229]: https://github.com/tokio-rs/tracing/pull/2229 -[#2246]: https://github.com/tokio-rs/tracing/pull/2246 -[#2250]: https://github.com/tokio-rs/tracing/pull/2250 - -# 0.1.28 (June 23, 2022) - -This release of `tracing-core` adds new `Value` implementations, including one -for `String`, to allow recording `&String` as a value without having to call -`as_str()` or similar, and for 128-bit integers (`i128` and `u128`). In -addition, it adds new methods and trait implementations for `Subscriber`s. - -### Added - -- `Value` implementation for `String` ([#2164]) -- `Value` implementation for `u128` and `i28` ([#2166]) -- `downcast_ref` and `is` methods for `dyn Subscriber + Sync`, - `dyn Subscriber + Send`, and `dyn Subscriber + Send + Sync` ([#2160]) -- `Subscriber::event_enabled` method to enable filtering based on `Event` field - values ([#2008]) -- `Subscriber` implementation for `Box` and - `Arc` ([#2161]) - -Thanks to @jswrenn and @CAD97 for contributing to this release! - -[#2164]: https://github.com/tokio-rs/tracing/pull/2164 -[#2166]: https://github.com/tokio-rs/tracing/pull/2166 -[#2160]: https://github.com/tokio-rs/tracing/pull/2160 -[#2008]: https://github.com/tokio-rs/tracing/pull/2008 -[#2161]: https://github.com/tokio-rs/tracing/pull/2161 - -# 0.1.27 (June 7, 2022) - -This release of `tracing-core` introduces a new `DefaultCallsite` type, which -can be used by instrumentation crates rather than implementing their own -callsite types. Using `DefaultCallsite` may offer reduced overhead from callsite -registration. - -### Added - -- `DefaultCallsite`, a pre-written `Callsite` implementation for use in - instrumentation crates ([#2083]) -- `ValueSet::len` and `Record::len` methods returning the number of fields in a - `ValueSet` or `Record` ([#2152]) - -### Changed - -- Replaced `lazy_static` dependency with `once_cell` ([#2147]) - -### Documented - -- Added documentation to the `callsite` module ([#2088], [#2149]) - -Thanks to new contributors @jamesmunns and @james7132 for contributing to this -release! - -[#2083]: https://github.com/tokio-rs/tracing/pull/2083 -[#2152]: https://github.com/tokio-rs/tracing/pull/2152 -[#2147]: https://github.com/tokio-rs/tracing/pull/2147 -[#2088]: https://github.com/tokio-rs/tracing/pull/2088 -[#2149]: https://github.com/tokio-rs/tracing/pull/2149 - -# 0.1.26 (April 14, 2022) - -This release adds a `Value` implementation for `Box` to allow -recording boxed values more conveniently. In particular, this should improve -the ergonomics of the implementations for `dyn std::error::Error` trait objects, -including those added in [v0.1.25]. - -### Added - -- `Value` implementation for `Box where T: Value` ([#2071]) - -### Fixed - -- Broken documentation links ([#2068]) - -Thanks to new contributor @ben0x539 for contributing to this release! - - -[v0.1.25]: https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.25 -[#2071]: https://github.com/tokio-rs/tracing/pull/2071 -[#2068]: https://github.com/tokio-rs/tracing/pull/2068 - -# 0.1.25 (April 12, 2022) - -This release adds additional `Value` implementations for `std::error::Error` -trait objects with auto trait bounds (`Send` and `Sync`), as Rust will not -auto-coerce trait objects. Additionally, it fixes a bug when setting scoped -dispatchers that was introduced in the previous release ([v0.1.24]). - -### Added - -- `Value` implementations for `dyn Error + Send + 'static`, `dyn Error + Send + - Sync + 'static`, `dyn Error + Sync + 'static` ([#2066]) - -### Fixed - -- Failure to use the global default dispatcher if a thread has set a scoped - default prior to setting the global default, and unset the scoped default - after setting the global default ([#2065]) - -Thanks to @lilyball for contributing to this release! - -[v0.1.24]: https://github.com/tokio-rs/tracing/releases/tag/tracing-core-0.1.24 -[#2066]: https://github.com/tokio-rs/tracing/pull/2066 -[#2065]: https://github.com/tokio-rs/tracing/pull/2065 - -# 0.1.24 (April 1, 2022) - -This release fixes a bug where setting `NoSubscriber` as the local default would -not disable the global default subscriber locally. - -### Fixed - -- Setting `NoSubscriber` as the local default now correctly disables the global - default subscriber ([#2001]) -- Fixed compilation warnings with the "std" feature disabled ([#2022]) - -### Changed - -- Removed unnecessary use of `write!` and `format_args!` macros ([#1988]) - -[#1988]: https://github.com/tokio-rs/tracing/pull/1988 -[#2001]: https://github.com/tokio-rs/tracing/pull/2001 -[#2022]: https://github.com/tokio-rs/tracing/pull/2022 - -# 0.1.23 (March 8, 2022) - -### Changed - -- Removed `#[inline]` attributes from some `Dispatch` methods whose - callers are now inlined ([#1974]) -- Bumped minimum supported Rust version (MSRV) to Rust 1.49.0 ([#1913]) - -[#1913]: https://github.com/tokio-rs/tracing/pull/1913 -[#1974]: https://github.com/tokio-rs/tracing/pull/1974 - -# 0.1.22 (February 3, 2022) - -This release adds *experimental* support for recording structured field values -using the [`valuable`] crate. See [this blog post][post] for details on -`valuable`. - -Note that `valuable` support currently requires `--cfg tracing_unstable`. See -the documentation for details. - -### Added - -- **field**: Experimental support for recording field values using the - [`valuable`] crate ([#1608], [#1888], [#1887]) -- **field**: Added `ValueSet::record` method ([#1823]) -- **subscriber**: `Default` impl for `NoSubscriber` ([#1785]) -- **metadata**: New `Kind::HINT` to support the `enabled!` macro in `tracing` - ([#1883], [#1891]) -### Fixed - -- Fixed a number of documentation issues ([#1665], [#1692], [#1737]) - -Thanks to @xd009642, @Skepfyr, @guswynn, @Folyd, and @mbergkvist for -contributing to this release! - -[`valuable`]: https://crates.io/crates/valuable -[post]: https://tokio.rs/blog/2021-05-valuable -[#1608]: https://github.com/tokio-rs/tracing/pull/1608 -[#1888]: https://github.com/tokio-rs/tracing/pull/1888 -[#1887]: https://github.com/tokio-rs/tracing/pull/1887 -[#1823]: https://github.com/tokio-rs/tracing/pull/1823 -[#1785]: https://github.com/tokio-rs/tracing/pull/1785 -[#1883]: https://github.com/tokio-rs/tracing/pull/1883 -[#1891]: https://github.com/tokio-rs/tracing/pull/1891 -[#1665]: https://github.com/tokio-rs/tracing/pull/1665 -[#1692]: https://github.com/tokio-rs/tracing/pull/1692 -[#1737]: https://github.com/tokio-rs/tracing/pull/1737 - -# 0.1.21 (October 1, 2021) - -This release adds support for recording `Option where T: Value` as typed -`tracing` field values. - -### Added - -- **field**: `Value` impl for `Option where T: Value` ([#1585]) - -### Fixed - -- Fixed deprecation warnings when building with `default-features` disabled - ([#1603], [#1606]) -- Documentation fixes and improvements ([#1595], [#1601]) - -Thanks to @brianburgers, @DCjanus, and @matklad for contributing to this -release! - -[#1585]: https://github.com/tokio-rs/tracing/pull/1585 -[#1595]: https://github.com/tokio-rs/tracing/pull/1595 -[#1601]: https://github.com/tokio-rs/tracing/pull/1601 -[#1603]: https://github.com/tokio-rs/tracing/pull/1603 -[#1606]: https://github.com/tokio-rs/tracing/pull/1606 - -# 0.1.20 (September 12, 2021) - -This release adds support for `f64` as one of the `tracing-core` -primitive field values, allowing floating-point values to be recorded as -typed values rather than with `fmt::Debug`. Additionally, it adds -`NoSubscriber`, a `Subscriber` implementation that does nothing. - -### Added - -- **subscriber**: `NoSubscriber`, a no-op `Subscriber` implementation - ([#1549]) -- **field**: Added `Visit::record_f64` and support for recording - floating-point values ([#1507]) - -Thanks to new contributors @jsgf and @maxburke for contributing to this -release! - -[#1549]: https://github.com/tokio-rs/tracing/pull/1549 -[#1507]: https://github.com/tokio-rs/tracing/pull/1507 - -# 0.1.19 (August 17, 2021) -### Added - -- `Level::as_str` ([#1413]) -- `Hash` implementation for `Level` and `LevelFilter` ([#1456]) -- `Value` implementation for `&mut T where T: Value` ([#1385]) -- Multiple documentation fixes and improvements ([#1435], [#1446]) - -Thanks to @Folyd, @teozkr, and @dvdplm for contributing to this release! - -[#1413]: https://github.com/tokio-rs/tracing/pull/1413 -[#1456]: https://github.com/tokio-rs/tracing/pull/1456 -[#1385]: https://github.com/tokio-rs/tracing/pull/1385 -[#1435]: https://github.com/tokio-rs/tracing/pull/1435 -[#1446]: https://github.com/tokio-rs/tracing/pull/1446 - -# 0.1.18 (April 30, 2021) - -### Added - -- `Subscriber` impl for `Box` ([#1358]) -- `Subscriber` impl for `Arc` ([#1374]) -- Symmetric `From` impls for existing `Into` impls on `Current` and `Option` - ([#1335]) -- `Attributes::fields` accessor that returns the set of fields defined on a - span's `Attributes` ([#1331]) - - -Thanks to @Folyd for contributing to this release! - -[#1358]: https://github.com/tokio-rs/tracing/pull/1358 -[#1374]: https://github.com/tokio-rs/tracing/pull/1374 -[#1335]: https://github.com/tokio-rs/tracing/pull/1335 -[#1331]: https://github.com/tokio-rs/tracing/pull/1331 - -# 0.1.17 (September 28, 2020) - -### Fixed - -- Incorrect inlining of `Event::dispatch` and `Event::child_of`, which could - result in `dispatcher::get_default` being inlined at the callsite ([#994]) - -### Added - -- `Copy` implementations for `Level` and `LevelFilter` ([#992]) - -Thanks to new contributors @jyn514 and @TaKO8Ki for contributing to this -release! - -[#994]: https://github.com/tokio-rs/tracing/pull/994 -[#992]: https://github.com/tokio-rs/tracing/pull/992 - -# 0.1.16 (September 8, 2020) - -### Fixed - -- Added a conversion from `Option` to `LevelFilter`. This resolves a - previously unreported regression where `Option` was no longer - a valid LevelFilter. ([#966](https://github.com/tokio-rs/tracing/pull/966)) - -# 0.1.15 (August 22, 2020) - -### Fixed - -- When combining `Interest` from multiple subscribers, if the interests differ, - the current subscriber is now always asked if a callsite should be enabled - (#927) - -## Added - -- Internal API changes to support optimizations in the `tracing` crate (#943) -- **docs**: Multiple fixes and improvements (#913, #941) - -# 0.1.14 (August 10, 2020) - -### Fixed - -- Incorrect calculation of global max level filter which could result in fast - filtering paths not being taken (#908) - -# 0.1.13 (August 4, 2020) - -### Fixed - -- Missing `fmt::Display` impl for `field::DisplayValue` causing a compilation - failure when the "log" feature is enabled (#887) - -Thanks to @d-e-s-o for contributing to this release! - -# 0.1.12 (July 31, 2020) - -### Added - -- `LevelFilter` type and `LevelFilter::current()` for returning the highest level - that any subscriber will enable (#853) -- `Subscriber::max_level_hint` optional trait method, for setting the value - returned by `LevelFilter::current()` (#853) - -### Fixed - -- **docs**: Removed outdated reference to a Tokio API that no longer exists - (#857) - -Thanks to new contributor @dignati for contributing to this release! - -# 0.1.11 (June 8, 2020) - -### Changed - -- Replaced use of `inner_local_macros` with `$crate::` (#729) - -### Added - -- `must_use` warning to guards returned by `dispatcher::set_default` (#686) -- `fmt::Debug` impl to `dyn Value`s (#696) -- Functions to convert between `span::Id` and `NonZeroU64` (#770) -- More obvious warnings in documentation (#769) - -### Fixed - -- Compiler error when `tracing-core/std` feature is enabled but `tracing/std` is - not (#760) -- Clippy warning on vtable address comparison in `callsite::Identifier` (#749) -- Documentation formatting issues (#715, #771) - -Thanks to @bkchr, @majecty, @taiki-e, @nagisa, and @nvzqz for contributing to -this release! - -# 0.1.10 (January 24, 2020) - -### Added - -- `field::Empty` type for declaring empty fields whose values will be recorded - later (#548) -- `field::Value` implementations for `Wrapping` and `NonZero*` numbers (#538) - -### Fixed - -- Broken and unresolvable links in RustDoc (#595) - -Thanks to @oli-cosmian for contributing to this release! - -# 0.1.9 (January 10, 2020) - -### Added - -- API docs now show what feature flags are required to enable each item (#523) - -### Fixed - -- A panic when the current default subscriber subscriber calls - `dispatcher::with_default` as it is being dropped (#522) -- Incorrect documentation for `Subscriber::drop_span` (#524) - -# 0.1.8 (December 20, 2019) - -### Added - -- `Default` impl for `Dispatch` (#411) - -### Fixed - -- Removed duplicate `lazy_static` dependencies (#424) -- Fixed no-std dependencies being enabled even when `std` feature flag is set - (#424) -- Broken link to `Metadata` in `Event` docs (#461) - -# 0.1.7 (October 18, 2019) - -### Added - -- Added `dispatcher::set_default` API which returns a drop guard (#388) - -### Fixed - -- Added missing `Value` impl for `u8` (#392) -- Broken links in docs. - -# 0.1.6 (September 12, 2019) - -### Added - -- Internal APIs to support performance optimizations (#326) - -### Fixed - -- Clarified wording in `field::display` documentation (#340) - -# 0.1.5 (August 16, 2019) - -### Added - -- `std::error::Error` as a new primitive `Value` type (#277) -- `Event::new` and `Event::new_child_of` to manually construct `Event`s (#281) - -# 0.1.4 (August 9, 2019) - -### Added - -- Support for `no-std` + `liballoc` (#256) - -### Fixed - -- Broken links in RustDoc (#259) - -# 0.1.3 (August 8, 2019) - -### Added - -- `std::fmt::Display` implementation for `Level` (#194) -- `std::str::FromStr` implementation for `Level` (#195) - -# 0.1.2 (July 10, 2019) - -### Deprecated - -- `Subscriber::drop_span` in favor of new `Subscriber::try_close` (#168) - -### Added - -- `Into>`, `Into>`, and - `Into>>` impls for `span::Current` (#170) -- `Subscriber::try_close` method (#153) -- Improved documentation for `dispatcher` (#171) - -# 0.1.1 (July 6, 2019) - -### Added - -- `Subscriber::current_span` API to return the current span (#148). -- `span::Current` type, representing the `Subscriber`'s view of the current - span (#148). - -### Fixed - -- Typos and broken links in documentation (#123, #124, #128, #154) - -# 0.1.0 (June 27, 2019) - -- Initial release diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/LICENSE s390-tools-2.33.1/rust-vendor/tracing-core/LICENSE --- s390-tools-2.31.0/rust-vendor/tracing-core/LICENSE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,25 +0,0 @@ -Copyright (c) 2019 Tokio Contributors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/README.md s390-tools-2.33.1/rust-vendor/tracing-core/README.md --- s390-tools-2.31.0/rust-vendor/tracing-core/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,121 +0,0 @@ -![Tracing — Structured, application-level diagnostics][splash] - -[splash]: https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/splash.svg - -# tracing-core - -Core primitives for application-level tracing. - -[![Crates.io][crates-badge]][crates-url] -[![Documentation][docs-badge]][docs-url] -[![Documentation (master)][docs-master-badge]][docs-master-url] -[![MIT licensed][mit-badge]][mit-url] -[![Build Status][actions-badge]][actions-url] -[![Discord chat][discord-badge]][discord-url] - -[Documentation][docs-url] | [Chat][discord-url] - -[crates-badge]: https://img.shields.io/crates/v/tracing-core.svg -[crates-url]: https://crates.io/crates/tracing-core/0.1.31 -[docs-badge]: https://docs.rs/tracing-core/badge.svg -[docs-url]: https://docs.rs/tracing-core/0.1.31 -[docs-master-badge]: https://img.shields.io/badge/docs-master-blue -[docs-master-url]: https://tracing-rs.netlify.com/tracing_core -[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg -[mit-url]: LICENSE -[actions-badge]: https://github.com/tokio-rs/tracing/workflows/CI/badge.svg -[actions-url]:https://github.com/tokio-rs/tracing/actions?query=workflow%3ACI -[discord-badge]: https://img.shields.io/discord/500028886025895936?logo=discord&label=discord&logoColor=white -[discord-url]: https://discord.gg/EeF3cQw - -## Overview - -[`tracing`] is a framework for instrumenting Rust programs to collect -structured, event-based diagnostic information. This crate defines the core -primitives of `tracing`. - -The crate provides: - -* [`span::Id`] identifies a span within the execution of a program. - -* [`Event`] represents a single event within a trace. - -* [`Subscriber`], the trait implemented to collect trace data. - -* [`Metadata`] and [`Callsite`] provide information describing spans and - events. - -* [`Field`], [`FieldSet`], [`Value`], and [`ValueSet`] represent the - structured data attached to spans and events. - -* [`Dispatch`] allows spans and events to be dispatched to `Subscriber`s. - -In addition, it defines the global callsite registry and per-thread current -dispatcher which other components of the tracing system rely on. - -*Compiler support: [requires `rustc` 1.56+][msrv]* - -[msrv]: #supported-rust-versions - -## Usage - -Application authors will typically not use this crate directly. Instead, they -will use the [`tracing`] crate, which provides a much more fully-featured -API. However, this crate's API will change very infrequently, so it may be used -when dependencies must be very stable. - -`Subscriber` implementations may depend on `tracing-core` rather than `tracing`, -as the additional APIs provided by `tracing` are primarily useful for -instrumenting libraries and applications, and are generally not necessary for -`Subscriber` implementations. - -### Crate Feature Flags - -The following crate feature flags are available: - -* `std`: Depend on the Rust standard library (enabled by default). - - `no_std` users may disable this feature with `default-features = false`: - - ```toml - [dependencies] - tracing-core = { version = "0.1.31", default-features = false } - ``` - - **Note**:`tracing-core`'s `no_std` support requires `liballoc`. - -[`tracing`]: ../tracing -[`span::Id`]: https://docs.rs/tracing-core/0.1.31/tracing_core/span/struct.Id.html -[`Event`]: https://docs.rs/tracing-core/0.1.31/tracing_core/event/struct.Event.html -[`Subscriber`]: https://docs.rs/tracing-core/0.1.31/tracing_core/subscriber/trait.Subscriber.html -[`Metadata`]: https://docs.rs/tracing-core/0.1.31/tracing_core/metadata/struct.Metadata.html -[`Callsite`]: https://docs.rs/tracing-core/0.1.31/tracing_core/callsite/trait.Callsite.html -[`Field`]: https://docs.rs/tracing-core/0.1.31/tracing_core/field/struct.Field.html -[`FieldSet`]: https://docs.rs/tracing-core/0.1.31/tracing_core/field/struct.FieldSet.html -[`Value`]: https://docs.rs/tracing-core/0.1.31/tracing_core/field/trait.Value.html -[`ValueSet`]: https://docs.rs/tracing-core/0.1.31/tracing_core/field/struct.ValueSet.html -[`Dispatch`]: https://docs.rs/tracing-core/0.1.31/tracing_core/dispatcher/struct.Dispatch.html - -## Supported Rust Versions - -Tracing is built against the latest stable release. The minimum supported -version is 1.56. The current Tracing version is not guaranteed to build on Rust -versions earlier than the minimum supported version. - -Tracing follows the same compiler support policies as the rest of the Tokio -project. The current stable Rust compiler and the three most recent minor -versions before it will always be supported. For example, if the current stable -compiler version is 1.69, the minimum supported version will not be increased -past 1.69, three minor versions prior. Increasing the minimum supported compiler -version is not considered a semver breaking change as long as doing so complies -with this policy. - -## License - -This project is licensed under the [MIT license](LICENSE). - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in Tokio by you, shall be licensed as MIT, without any additional -terms or conditions. diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/callsite.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/callsite.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/callsite.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/callsite.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,619 +0,0 @@ -//! Callsites represent the source locations from which spans or events -//! originate. -//! -//! # What Are Callsites? -//! -//! Every span or event in `tracing` is associated with a [`Callsite`]. A -//! callsite is a small `static` value that is responsible for the following: -//! -//! * Storing the span or event's [`Metadata`], -//! * Uniquely [identifying](Identifier) the span or event definition, -//! * Caching the subscriber's [`Interest`][^1] in that span or event, to avoid -//! re-evaluating filters. -//! -//! # Registering Callsites -//! -//! When a span or event is recorded for the first time, its callsite -//! [`register`]s itself with the global callsite registry. Registering a -//! callsite calls the [`Subscriber::register_callsite`][`register_callsite`] -//! method with that callsite's [`Metadata`] on every currently active -//! subscriber. This serves two primary purposes: informing subscribers of the -//! callsite's existence, and performing static filtering. -//! -//! ## Callsite Existence -//! -//! If a [`Subscriber`] implementation wishes to allocate storage for each -//! unique span/event location in the program, or pre-compute some value -//! that will be used to record that span or event in the future, it can -//! do so in its [`register_callsite`] method. -//! -//! ## Performing Static Filtering -//! -//! The [`register_callsite`] method returns an [`Interest`] value, -//! which indicates that the subscriber either [always] wishes to record -//! that span or event, [sometimes] wishes to record it based on a -//! dynamic filter evaluation, or [never] wishes to record it. -//! -//! When registering a new callsite, the [`Interest`]s returned by every -//! currently active subscriber are combined, and the result is stored at -//! each callsite. This way, when the span or event occurs in the -//! future, the cached [`Interest`] value can be checked efficiently -//! to determine if the span or event should be recorded, without -//! needing to perform expensive filtering (i.e. calling the -//! [`Subscriber::enabled`] method every time a span or event occurs). -//! -//! ### Rebuilding Cached Interest -//! -//! When a new [`Dispatch`] is created (i.e. a new subscriber becomes -//! active), any previously cached [`Interest`] values are re-evaluated -//! for all callsites in the program. This way, if the new subscriber -//! will enable a callsite that was not previously enabled, the -//! [`Interest`] in that callsite is updated. Similarly, when a -//! subscriber is dropped, the interest cache is also re-evaluated, so -//! that any callsites enabled only by that subscriber are disabled. -//! -//! In addition, the [`rebuild_interest_cache`] function in this module can be -//! used to manually invalidate all cached interest and re-register those -//! callsites. This function is useful in situations where a subscriber's -//! interest can change, but it does so relatively infrequently. The subscriber -//! may wish for its interest to be cached most of the time, and return -//! [`Interest::always`][always] or [`Interest::never`][never] in its -//! [`register_callsite`] method, so that its [`Subscriber::enabled`] method -//! doesn't need to be evaluated every time a span or event is recorded. -//! However, when the configuration changes, the subscriber can call -//! [`rebuild_interest_cache`] to re-evaluate the entire interest cache with its -//! new configuration. This is a relatively costly operation, but if the -//! configuration changes infrequently, it may be more efficient than calling -//! [`Subscriber::enabled`] frequently. -//! -//! # Implementing Callsites -//! -//! In most cases, instrumenting code using `tracing` should *not* require -//! implementing the [`Callsite`] trait directly. When using the [`tracing` -//! crate's macros][macros] or the [`#[instrument]` attribute][instrument], a -//! `Callsite` is automatically generated. -//! -//! However, code which provides alternative forms of `tracing` instrumentation -//! may need to interact with the callsite system directly. If -//! instrumentation-side code needs to produce a `Callsite` to emit spans or -//! events, the [`DefaultCallsite`] struct provided in this module is a -//! ready-made `Callsite` implementation that is suitable for most uses. When -//! possible, the use of `DefaultCallsite` should be preferred over implementing -//! [`Callsite`] for user types, as `DefaultCallsite` may benefit from -//! additional performance optimizations. -//! -//! [^1]: Returned by the [`Subscriber::register_callsite`][`register_callsite`] -//! method. -//! -//! [`Metadata`]: crate::metadata::Metadata -//! [`Interest`]: crate::subscriber::Interest -//! [`Subscriber`]: crate::subscriber::Subscriber -//! [`register_callsite`]: crate::subscriber::Subscriber::register_callsite -//! [`Subscriber::enabled`]: crate::subscriber::Subscriber::enabled -//! [always]: crate::subscriber::Interest::always -//! [sometimes]: crate::subscriber::Interest::sometimes -//! [never]: crate::subscriber::Interest::never -//! [`Dispatch`]: crate::dispatch::Dispatch -//! [macros]: https://docs.rs/tracing/latest/tracing/#macros -//! [instrument]: https://docs.rs/tracing/latest/tracing/attr.instrument.html -use crate::stdlib::{ - any::TypeId, - fmt, - hash::{Hash, Hasher}, - ptr, - sync::{ - atomic::{AtomicBool, AtomicPtr, AtomicU8, Ordering}, - Mutex, - }, - vec::Vec, -}; -use crate::{ - dispatcher::Dispatch, - lazy::Lazy, - metadata::{LevelFilter, Metadata}, - subscriber::Interest, -}; - -use self::dispatchers::Dispatchers; - -/// Trait implemented by callsites. -/// -/// These functions are only intended to be called by the callsite registry, which -/// correctly handles determining the common interest between all subscribers. -/// -/// See the [module-level documentation](crate::callsite) for details on -/// callsites. -pub trait Callsite: Sync { - /// Sets the [`Interest`] for this callsite. - /// - /// See the [documentation on callsite interest caching][cache-docs] for - /// details. - /// - /// [`Interest`]: super::subscriber::Interest - /// [cache-docs]: crate::callsite#performing-static-filtering - fn set_interest(&self, interest: Interest); - - /// Returns the [metadata] associated with the callsite. - /// - ///
- ///
-    ///
-    /// **Note:** Implementations of this method should not produce [`Metadata`]
-    /// that share the same callsite [`Identifier`] but otherwise differ in any
-    /// way (e.g., have different `name`s).
-    ///
-    /// 
- /// - /// [metadata]: super::metadata::Metadata - fn metadata(&self) -> &Metadata<'_>; - - /// This method is an *internal implementation detail* of `tracing-core`. It - /// is *not* intended to be called or overridden from downstream code. - /// - /// The `Private` type can only be constructed from within `tracing-core`. - /// Because this method takes a `Private` as an argument, it cannot be - /// called from (safe) code external to `tracing-core`. Because it must - /// *return* a `Private`, the only valid implementation possible outside of - /// `tracing-core` would have to always unconditionally panic. - /// - /// THIS IS BY DESIGN. There is currently no valid reason for code outside - /// of `tracing-core` to override this method. - // TODO(eliza): this could be used to implement a public downcasting API - // for `&dyn Callsite`s in the future. - #[doc(hidden)] - #[inline] - fn private_type_id(&self, _: private::Private<()>) -> private::Private - where - Self: 'static, - { - private::Private(TypeId::of::()) - } -} - -/// Uniquely identifies a [`Callsite`] -/// -/// Two `Identifier`s are equal if they both refer to the same callsite. -/// -/// [`Callsite`]: super::callsite::Callsite -#[derive(Clone)] -pub struct Identifier( - /// **Warning**: The fields on this type are currently `pub` because it must - /// be able to be constructed statically by macros. However, when `const - /// fn`s are available on stable Rust, this will no longer be necessary. - /// Thus, these fields are *not* considered stable public API, and they may - /// change warning. Do not rely on any fields on `Identifier`. When - /// constructing new `Identifier`s, use the `identify_callsite!` macro - /// instead. - #[doc(hidden)] - pub &'static dyn Callsite, -); - -/// A default [`Callsite`] implementation. -#[derive(Debug)] -pub struct DefaultCallsite { - interest: AtomicU8, - registration: AtomicU8, - meta: &'static Metadata<'static>, - next: AtomicPtr, -} - -/// Clear and reregister interest on every [`Callsite`] -/// -/// This function is intended for runtime reconfiguration of filters on traces -/// when the filter recalculation is much less frequent than trace events are. -/// The alternative is to have the [`Subscriber`] that supports runtime -/// reconfiguration of filters always return [`Interest::sometimes()`] so that -/// [`enabled`] is evaluated for every event. -/// -/// This function will also re-compute the global maximum level as determined by -/// the [`max_level_hint`] method. If a [`Subscriber`] -/// implementation changes the value returned by its `max_level_hint` -/// implementation at runtime, then it **must** call this function after that -/// value changes, in order for the change to be reflected. -/// -/// See the [documentation on callsite interest caching][cache-docs] for -/// additional information on this function's usage. -/// -/// [`max_level_hint`]: super::subscriber::Subscriber::max_level_hint -/// [`Callsite`]: super::callsite::Callsite -/// [`enabled`]: super::subscriber::Subscriber#tymethod.enabled -/// [`Interest::sometimes()`]: super::subscriber::Interest::sometimes -/// [`Subscriber`]: super::subscriber::Subscriber -/// [cache-docs]: crate::callsite#rebuilding-cached-interest -pub fn rebuild_interest_cache() { - CALLSITES.rebuild_interest(DISPATCHERS.rebuilder()); -} - -/// Register a new [`Callsite`] with the global registry. -/// -/// This should be called once per callsite after the callsite has been -/// constructed. -/// -/// See the [documentation on callsite registration][reg-docs] for details -/// on the global callsite registry. -/// -/// [`Callsite`]: crate::callsite::Callsite -/// [reg-docs]: crate::callsite#registering-callsites -pub fn register(callsite: &'static dyn Callsite) { - rebuild_callsite_interest(callsite, &DISPATCHERS.rebuilder()); - - // Is this a `DefaultCallsite`? If so, use the fancy linked list! - if callsite.private_type_id(private::Private(())).0 == TypeId::of::() { - let callsite = unsafe { - // Safety: the pointer cast is safe because the type id of the - // provided callsite matches that of the target type for the cast - // (`DefaultCallsite`). Because user implementations of `Callsite` - // cannot override `private_type_id`, we can trust that the callsite - // is not lying about its type ID. - &*(callsite as *const dyn Callsite as *const DefaultCallsite) - }; - CALLSITES.push_default(callsite); - return; - } - - CALLSITES.push_dyn(callsite); -} - -static CALLSITES: Callsites = Callsites { - list_head: AtomicPtr::new(ptr::null_mut()), - has_locked_callsites: AtomicBool::new(false), -}; - -static DISPATCHERS: Dispatchers = Dispatchers::new(); - -static LOCKED_CALLSITES: Lazy>> = Lazy::new(Default::default); - -struct Callsites { - list_head: AtomicPtr, - has_locked_callsites: AtomicBool, -} - -// === impl DefaultCallsite === - -impl DefaultCallsite { - const UNREGISTERED: u8 = 0; - const REGISTERING: u8 = 1; - const REGISTERED: u8 = 2; - - const INTEREST_NEVER: u8 = 0; - const INTEREST_SOMETIMES: u8 = 1; - const INTEREST_ALWAYS: u8 = 2; - - /// Returns a new `DefaultCallsite` with the specified `Metadata`. - pub const fn new(meta: &'static Metadata<'static>) -> Self { - Self { - interest: AtomicU8::new(0xFF), - meta, - next: AtomicPtr::new(ptr::null_mut()), - registration: AtomicU8::new(Self::UNREGISTERED), - } - } - - /// Registers this callsite with the global callsite registry. - /// - /// If the callsite is already registered, this does nothing. When using - /// [`DefaultCallsite`], this method should be preferred over - /// [`tracing_core::callsite::register`], as it ensures that the callsite is - /// only registered a single time. - /// - /// Other callsite implementations will generally ensure that - /// callsites are not re-registered through another mechanism. - /// - /// See the [documentation on callsite registration][reg-docs] for details - /// on the global callsite registry. - /// - /// [`Callsite`]: crate::callsite::Callsite - /// [reg-docs]: crate::callsite#registering-callsites - #[inline(never)] - // This only happens once (or if the cached interest value was corrupted). - #[cold] - pub fn register(&'static self) -> Interest { - // Attempt to advance the registration state to `REGISTERING`... - match self.registration.compare_exchange( - Self::UNREGISTERED, - Self::REGISTERING, - Ordering::AcqRel, - Ordering::Acquire, - ) { - Ok(_) => { - // Okay, we advanced the state, try to register the callsite. - rebuild_callsite_interest(self, &DISPATCHERS.rebuilder()); - CALLSITES.push_default(self); - self.registration.store(Self::REGISTERED, Ordering::Release); - } - // Great, the callsite is already registered! Just load its - // previous cached interest. - Err(Self::REGISTERED) => {} - // Someone else is registering... - Err(_state) => { - debug_assert_eq!( - _state, - Self::REGISTERING, - "weird callsite registration state" - ); - // Just hit `enabled` this time. - return Interest::sometimes(); - } - } - - match self.interest.load(Ordering::Relaxed) { - Self::INTEREST_NEVER => Interest::never(), - Self::INTEREST_ALWAYS => Interest::always(), - _ => Interest::sometimes(), - } - } - - /// Returns the callsite's cached `Interest`, or registers it for the - /// first time if it has not yet been registered. - #[inline] - pub fn interest(&'static self) -> Interest { - match self.interest.load(Ordering::Relaxed) { - Self::INTEREST_NEVER => Interest::never(), - Self::INTEREST_SOMETIMES => Interest::sometimes(), - Self::INTEREST_ALWAYS => Interest::always(), - _ => self.register(), - } - } -} - -impl Callsite for DefaultCallsite { - fn set_interest(&self, interest: Interest) { - let interest = match () { - _ if interest.is_never() => Self::INTEREST_NEVER, - _ if interest.is_always() => Self::INTEREST_ALWAYS, - _ => Self::INTEREST_SOMETIMES, - }; - self.interest.store(interest, Ordering::SeqCst); - } - - #[inline(always)] - fn metadata(&self) -> &Metadata<'static> { - self.meta - } -} - -// ===== impl Identifier ===== - -impl PartialEq for Identifier { - fn eq(&self, other: &Identifier) -> bool { - core::ptr::eq( - self.0 as *const _ as *const (), - other.0 as *const _ as *const (), - ) - } -} - -impl Eq for Identifier {} - -impl fmt::Debug for Identifier { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Identifier({:p})", self.0) - } -} - -impl Hash for Identifier { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - (self.0 as *const dyn Callsite).hash(state) - } -} - -// === impl Callsites === - -impl Callsites { - /// Rebuild `Interest`s for all callsites in the registry. - /// - /// This also re-computes the max level hint. - fn rebuild_interest(&self, dispatchers: dispatchers::Rebuilder<'_>) { - let mut max_level = LevelFilter::OFF; - dispatchers.for_each(|dispatch| { - // If the subscriber did not provide a max level hint, assume - // that it may enable every level. - let level_hint = dispatch.max_level_hint().unwrap_or(LevelFilter::TRACE); - if level_hint > max_level { - max_level = level_hint; - } - }); - - self.for_each(|callsite| { - rebuild_callsite_interest(callsite, &dispatchers); - }); - LevelFilter::set_max(max_level); - } - - /// Push a `dyn Callsite` trait object to the callsite registry. - /// - /// This will attempt to lock the callsites vector. - fn push_dyn(&self, callsite: &'static dyn Callsite) { - let mut lock = LOCKED_CALLSITES.lock().unwrap(); - self.has_locked_callsites.store(true, Ordering::Release); - lock.push(callsite); - } - - /// Push a `DefaultCallsite` to the callsite registry. - /// - /// If we know the callsite being pushed is a `DefaultCallsite`, we can push - /// it to the linked list without having to acquire a lock. - fn push_default(&self, callsite: &'static DefaultCallsite) { - let mut head = self.list_head.load(Ordering::Acquire); - - loop { - callsite.next.store(head, Ordering::Release); - - assert_ne!( - callsite as *const _, head, - "Attempted to register a `DefaultCallsite` that already exists! \ - This will cause an infinite loop when attempting to read from the \ - callsite cache. This is likely a bug! You should only need to call \ - `DefaultCallsite::register` once per `DefaultCallsite`." - ); - - match self.list_head.compare_exchange( - head, - callsite as *const _ as *mut _, - Ordering::AcqRel, - Ordering::Acquire, - ) { - Ok(_) => { - break; - } - Err(current) => head = current, - } - } - } - - /// Invokes the provided closure `f` with each callsite in the registry. - fn for_each(&self, mut f: impl FnMut(&'static dyn Callsite)) { - let mut head = self.list_head.load(Ordering::Acquire); - - while let Some(cs) = unsafe { head.as_ref() } { - f(cs); - - head = cs.next.load(Ordering::Acquire); - } - - if self.has_locked_callsites.load(Ordering::Acquire) { - let locked = LOCKED_CALLSITES.lock().unwrap(); - for &cs in locked.iter() { - f(cs); - } - } - } -} - -pub(crate) fn register_dispatch(dispatch: &Dispatch) { - let dispatchers = DISPATCHERS.register_dispatch(dispatch); - dispatch.subscriber().on_register_dispatch(dispatch); - CALLSITES.rebuild_interest(dispatchers); -} - -fn rebuild_callsite_interest( - callsite: &'static dyn Callsite, - dispatchers: &dispatchers::Rebuilder<'_>, -) { - let meta = callsite.metadata(); - - let mut interest = None; - dispatchers.for_each(|dispatch| { - let this_interest = dispatch.register_callsite(meta); - interest = match interest.take() { - None => Some(this_interest), - Some(that_interest) => Some(that_interest.and(this_interest)), - } - }); - - let interest = interest.unwrap_or_else(Interest::never); - callsite.set_interest(interest) -} - -mod private { - /// Don't call this function, it's private. - #[allow(missing_debug_implementations)] - pub struct Private(pub(crate) T); -} - -#[cfg(feature = "std")] -mod dispatchers { - use crate::{dispatcher, lazy::Lazy}; - use std::sync::{ - atomic::{AtomicBool, Ordering}, - RwLock, RwLockReadGuard, RwLockWriteGuard, - }; - - pub(super) struct Dispatchers { - has_just_one: AtomicBool, - } - - static LOCKED_DISPATCHERS: Lazy>> = - Lazy::new(Default::default); - - pub(super) enum Rebuilder<'a> { - JustOne, - Read(RwLockReadGuard<'a, Vec>), - Write(RwLockWriteGuard<'a, Vec>), - } - - impl Dispatchers { - pub(super) const fn new() -> Self { - Self { - has_just_one: AtomicBool::new(true), - } - } - - pub(super) fn rebuilder(&self) -> Rebuilder<'_> { - if self.has_just_one.load(Ordering::SeqCst) { - return Rebuilder::JustOne; - } - Rebuilder::Read(LOCKED_DISPATCHERS.read().unwrap()) - } - - pub(super) fn register_dispatch(&self, dispatch: &dispatcher::Dispatch) -> Rebuilder<'_> { - let mut dispatchers = LOCKED_DISPATCHERS.write().unwrap(); - dispatchers.retain(|d| d.upgrade().is_some()); - dispatchers.push(dispatch.registrar()); - self.has_just_one - .store(dispatchers.len() <= 1, Ordering::SeqCst); - Rebuilder::Write(dispatchers) - } - } - - impl Rebuilder<'_> { - pub(super) fn for_each(&self, mut f: impl FnMut(&dispatcher::Dispatch)) { - let iter = match self { - Rebuilder::JustOne => { - dispatcher::get_default(f); - return; - } - Rebuilder::Read(vec) => vec.iter(), - Rebuilder::Write(vec) => vec.iter(), - }; - iter.filter_map(dispatcher::Registrar::upgrade) - .for_each(|dispatch| f(&dispatch)) - } - } -} - -#[cfg(not(feature = "std"))] -mod dispatchers { - use crate::dispatcher; - - pub(super) struct Dispatchers(()); - pub(super) struct Rebuilder<'a>(Option<&'a dispatcher::Dispatch>); - - impl Dispatchers { - pub(super) const fn new() -> Self { - Self(()) - } - - pub(super) fn rebuilder(&self) -> Rebuilder<'_> { - Rebuilder(None) - } - - pub(super) fn register_dispatch<'dispatch>( - &self, - dispatch: &'dispatch dispatcher::Dispatch, - ) -> Rebuilder<'dispatch> { - // nop; on no_std, there can only ever be one dispatcher - Rebuilder(Some(dispatch)) - } - } - - impl Rebuilder<'_> { - #[inline] - pub(super) fn for_each(&self, mut f: impl FnMut(&dispatcher::Dispatch)) { - if let Some(dispatch) = self.0 { - // we are rebuilding the interest cache because a new dispatcher - // is about to be set. on `no_std`, this should only happen - // once, because the new dispatcher will be the global default. - f(dispatch) - } else { - // otherwise, we are rebuilding the cache because the subscriber - // configuration changed, so use the global default. - // on no_std, there can only ever be one dispatcher - dispatcher::get_default(f) - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/dispatcher.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/dispatcher.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/dispatcher.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/dispatcher.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1071 +0,0 @@ -//! Dispatches trace events to [`Subscriber`]s. -//! -//! The _dispatcher_ is the component of the tracing system which is responsible -//! for forwarding trace data from the instrumentation points that generate it -//! to the subscriber that collects it. -//! -//! # Using the Trace Dispatcher -//! -//! Every thread in a program using `tracing` has a _default subscriber_. When -//! events occur, or spans are created, they are dispatched to the thread's -//! current subscriber. -//! -//! ## Setting the Default Subscriber -//! -//! By default, the current subscriber is an empty implementation that does -//! nothing. To use a subscriber implementation, it must be set as the default. -//! There are two methods for doing so: [`with_default`] and -//! [`set_global_default`]. `with_default` sets the default subscriber for the -//! duration of a scope, while `set_global_default` sets a default subscriber -//! for the entire process. -//! -//! To use either of these functions, we must first wrap our subscriber in a -//! [`Dispatch`], a cloneable, type-erased reference to a subscriber. For -//! example: -//! ```rust -//! # pub struct FooSubscriber; -//! # use tracing_core::{ -//! # dispatcher, Event, Metadata, -//! # span::{Attributes, Id, Record} -//! # }; -//! # impl tracing_core::Subscriber for FooSubscriber { -//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } -//! # fn record(&self, _: &Id, _: &Record) {} -//! # fn event(&self, _: &Event) {} -//! # fn record_follows_from(&self, _: &Id, _: &Id) {} -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn enter(&self, _: &Id) {} -//! # fn exit(&self, _: &Id) {} -//! # } -//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } -//! use dispatcher::Dispatch; -//! -//! let my_subscriber = FooSubscriber::new(); -//! let my_dispatch = Dispatch::new(my_subscriber); -//! ``` -//! Then, we can use [`with_default`] to set our `Dispatch` as the default for -//! the duration of a block: -//! ```rust -//! # pub struct FooSubscriber; -//! # use tracing_core::{ -//! # dispatcher, Event, Metadata, -//! # span::{Attributes, Id, Record} -//! # }; -//! # impl tracing_core::Subscriber for FooSubscriber { -//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } -//! # fn record(&self, _: &Id, _: &Record) {} -//! # fn event(&self, _: &Event) {} -//! # fn record_follows_from(&self, _: &Id, _: &Id) {} -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn enter(&self, _: &Id) {} -//! # fn exit(&self, _: &Id) {} -//! # } -//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } -//! # let my_subscriber = FooSubscriber::new(); -//! # let my_dispatch = dispatcher::Dispatch::new(my_subscriber); -//! // no default subscriber -//! -//! # #[cfg(feature = "std")] -//! dispatcher::with_default(&my_dispatch, || { -//! // my_subscriber is the default -//! }); -//! -//! // no default subscriber again -//! ``` -//! It's important to note that `with_default` will not propagate the current -//! thread's default subscriber to any threads spawned within the `with_default` -//! block. To propagate the default subscriber to new threads, either use -//! `with_default` from the new thread, or use `set_global_default`. -//! -//! As an alternative to `with_default`, we can use [`set_global_default`] to -//! set a `Dispatch` as the default for all threads, for the lifetime of the -//! program. For example: -//! ```rust -//! # pub struct FooSubscriber; -//! # use tracing_core::{ -//! # dispatcher, Event, Metadata, -//! # span::{Attributes, Id, Record} -//! # }; -//! # impl tracing_core::Subscriber for FooSubscriber { -//! # fn new_span(&self, _: &Attributes) -> Id { Id::from_u64(0) } -//! # fn record(&self, _: &Id, _: &Record) {} -//! # fn event(&self, _: &Event) {} -//! # fn record_follows_from(&self, _: &Id, _: &Id) {} -//! # fn enabled(&self, _: &Metadata) -> bool { false } -//! # fn enter(&self, _: &Id) {} -//! # fn exit(&self, _: &Id) {} -//! # } -//! # impl FooSubscriber { fn new() -> Self { FooSubscriber } } -//! # let my_subscriber = FooSubscriber::new(); -//! # let my_dispatch = dispatcher::Dispatch::new(my_subscriber); -//! // no default subscriber -//! -//! dispatcher::set_global_default(my_dispatch) -//! // `set_global_default` will return an error if the global default -//! // subscriber has already been set. -//! .expect("global default was already set!"); -//! -//! // `my_subscriber` is now the default -//! ``` -//! -//!
-//!     Note:the thread-local scoped dispatcher
-//!     (with_default) requires the
-//!     Rust standard library. no_std users should use
-//!     set_global_default
-//!     instead.
-//! 
-//! -//! ## Accessing the Default Subscriber -//! -//! A thread's current default subscriber can be accessed using the -//! [`get_default`] function, which executes a closure with a reference to the -//! currently default `Dispatch`. This is used primarily by `tracing` -//! instrumentation. -//! -use crate::{ - callsite, span, - subscriber::{self, NoSubscriber, Subscriber}, - Event, LevelFilter, Metadata, -}; - -use crate::stdlib::{ - any::Any, - fmt, - sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, - Arc, Weak, - }, -}; - -#[cfg(feature = "std")] -use crate::stdlib::{ - cell::{Cell, Ref, RefCell}, - error, -}; - -#[cfg(feature = "alloc")] -use alloc::sync::{Arc, Weak}; - -#[cfg(feature = "alloc")] -use core::ops::Deref; - -/// `Dispatch` trace data to a [`Subscriber`]. -#[derive(Clone)] -pub struct Dispatch { - subscriber: Kind>, -} - -/// `WeakDispatch` is a version of [`Dispatch`] that holds a non-owning reference -/// to a [`Subscriber`]. -/// -/// The Subscriber` may be accessed by calling [`WeakDispatch::upgrade`], -/// which returns an `Option`. If all [`Dispatch`] clones that point -/// at the `Subscriber` have been dropped, [`WeakDispatch::upgrade`] will return -/// `None`. Otherwise, it will return `Some(Dispatch)`. -/// -/// A `WeakDispatch` may be created from a [`Dispatch`] by calling the -/// [`Dispatch::downgrade`] method. The primary use for creating a -/// [`WeakDispatch`] is to allow a Subscriber` to hold a cyclical reference to -/// itself without creating a memory leak. See [here] for details. -/// -/// This type is analogous to the [`std::sync::Weak`] type, but for a -/// [`Dispatch`] rather than an [`Arc`]. -/// -/// [`Arc`]: std::sync::Arc -/// [here]: Subscriber#avoiding-memory-leaks -#[derive(Clone)] -pub struct WeakDispatch { - subscriber: Kind>, -} - -#[derive(Clone)] -enum Kind { - Global(&'static (dyn Subscriber + Send + Sync)), - Scoped(T), -} - -#[cfg(feature = "std")] -thread_local! { - static CURRENT_STATE: State = State { - default: RefCell::new(None), - can_enter: Cell::new(true), - }; -} - -static EXISTS: AtomicBool = AtomicBool::new(false); -static GLOBAL_INIT: AtomicUsize = AtomicUsize::new(UNINITIALIZED); - -#[cfg(feature = "std")] -static SCOPED_COUNT: AtomicUsize = AtomicUsize::new(0); - -const UNINITIALIZED: usize = 0; -const INITIALIZING: usize = 1; -const INITIALIZED: usize = 2; - -static mut GLOBAL_DISPATCH: Dispatch = Dispatch { - subscriber: Kind::Global(&NO_SUBSCRIBER), -}; -static NONE: Dispatch = Dispatch { - subscriber: Kind::Global(&NO_SUBSCRIBER), -}; -static NO_SUBSCRIBER: NoSubscriber = NoSubscriber::new(); - -/// The dispatch state of a thread. -#[cfg(feature = "std")] -struct State { - /// This thread's current default dispatcher. - default: RefCell>, - /// Whether or not we can currently begin dispatching a trace event. - /// - /// This is set to `false` when functions such as `enter`, `exit`, `event`, - /// and `new_span` are called on this thread's default dispatcher, to - /// prevent further trace events triggered inside those functions from - /// creating an infinite recursion. When we finish handling a dispatch, this - /// is set back to `true`. - can_enter: Cell, -} - -/// While this guard is active, additional calls to subscriber functions on -/// the default dispatcher will not be able to access the dispatch context. -/// Dropping the guard will allow the dispatch context to be re-entered. -#[cfg(feature = "std")] -struct Entered<'a>(&'a State); - -/// A guard that resets the current default dispatcher to the prior -/// default dispatcher when dropped. -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -#[derive(Debug)] -pub struct DefaultGuard(Option); - -/// Sets this dispatch as the default for the duration of a closure. -/// -/// The default dispatcher is used when creating a new [span] or -/// [`Event`]. -/// -///
-///     Note: This function required the Rust standard library.
-///     no_std users should use 
-///     set_global_default instead.
-/// 
-/// -/// [span]: super::span -/// [`Subscriber`]: super::subscriber::Subscriber -/// [`Event`]: super::event::Event -/// [`set_global_default`]: super::set_global_default -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -pub fn with_default(dispatcher: &Dispatch, f: impl FnOnce() -> T) -> T { - // When this guard is dropped, the default dispatcher will be reset to the - // prior default. Using this (rather than simply resetting after calling - // `f`) ensures that we always reset to the prior dispatcher even if `f` - // panics. - let _guard = set_default(dispatcher); - f() -} - -/// Sets the dispatch as the default dispatch for the duration of the lifetime -/// of the returned DefaultGuard -/// -///
-///     Note: This function required the Rust standard library.
-///     no_std users should use 
-///     set_global_default instead.
-/// 
-/// -/// [`set_global_default`]: super::set_global_default -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -#[must_use = "Dropping the guard unregisters the dispatcher."] -pub fn set_default(dispatcher: &Dispatch) -> DefaultGuard { - // When this guard is dropped, the default dispatcher will be reset to the - // prior default. Using this ensures that we always reset to the prior - // dispatcher even if the thread calling this function panics. - State::set_default(dispatcher.clone()) -} - -/// Sets this dispatch as the global default for the duration of the entire program. -/// Will be used as a fallback if no thread-local dispatch has been set in a thread -/// (using `with_default`.) -/// -/// Can only be set once; subsequent attempts to set the global default will fail. -/// Returns `Err` if the global default has already been set. -/// -///
-///     Warning: In general, libraries should not call
-///     set_global_default()! Doing so will cause conflicts when
-///     executables that depend on the library try to set the default later.
-/// 
-/// -/// [span]: super::span -/// [`Subscriber`]: super::subscriber::Subscriber -/// [`Event`]: super::event::Event -pub fn set_global_default(dispatcher: Dispatch) -> Result<(), SetGlobalDefaultError> { - // if `compare_exchange` returns Result::Ok(_), then `new` has been set and - // `current`—now the prior value—has been returned in the `Ok()` branch. - if GLOBAL_INIT - .compare_exchange( - UNINITIALIZED, - INITIALIZING, - Ordering::SeqCst, - Ordering::SeqCst, - ) - .is_ok() - { - let subscriber = { - let subscriber = match dispatcher.subscriber { - Kind::Global(s) => s, - Kind::Scoped(s) => unsafe { - // safety: this leaks the subscriber onto the heap. the - // reference count will always be at least 1, because the - // global default will never be dropped. - &*Arc::into_raw(s) - }, - }; - Kind::Global(subscriber) - }; - unsafe { - GLOBAL_DISPATCH = Dispatch { subscriber }; - } - GLOBAL_INIT.store(INITIALIZED, Ordering::SeqCst); - EXISTS.store(true, Ordering::Release); - Ok(()) - } else { - Err(SetGlobalDefaultError { _no_construct: () }) - } -} - -/// Returns true if a `tracing` dispatcher has ever been set. -/// -/// This may be used to completely elide trace points if tracing is not in use -/// at all or has yet to be initialized. -#[doc(hidden)] -#[inline(always)] -pub fn has_been_set() -> bool { - EXISTS.load(Ordering::Relaxed) -} - -/// Returned if setting the global dispatcher fails. -pub struct SetGlobalDefaultError { - _no_construct: (), -} - -impl fmt::Debug for SetGlobalDefaultError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("SetGlobalDefaultError") - .field(&Self::MESSAGE) - .finish() - } -} - -impl fmt::Display for SetGlobalDefaultError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad(Self::MESSAGE) - } -} - -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -impl error::Error for SetGlobalDefaultError {} - -impl SetGlobalDefaultError { - const MESSAGE: &'static str = "a global default trace dispatcher has already been set"; -} - -/// Executes a closure with a reference to this thread's current [dispatcher]. -/// -/// Note that calls to `get_default` should not be nested; if this function is -/// called while inside of another `get_default`, that closure will be provided -/// with `Dispatch::none` rather than the previously set dispatcher. -/// -/// [dispatcher]: super::dispatcher::Dispatch -#[cfg(feature = "std")] -pub fn get_default(mut f: F) -> T -where - F: FnMut(&Dispatch) -> T, -{ - if SCOPED_COUNT.load(Ordering::Acquire) == 0 { - // fast path if no scoped dispatcher has been set; just use the global - // default. - return f(get_global()); - } - - CURRENT_STATE - .try_with(|state| { - if let Some(entered) = state.enter() { - return f(&entered.current()); - } - - f(&NONE) - }) - .unwrap_or_else(|_| f(&NONE)) -} - -/// Executes a closure with a reference to this thread's current [dispatcher]. -/// -/// Note that calls to `get_default` should not be nested; if this function is -/// called while inside of another `get_default`, that closure will be provided -/// with `Dispatch::none` rather than the previously set dispatcher. -/// -/// [dispatcher]: super::dispatcher::Dispatch -#[cfg(feature = "std")] -#[doc(hidden)] -#[inline(never)] -pub fn get_current(f: impl FnOnce(&Dispatch) -> T) -> Option { - if SCOPED_COUNT.load(Ordering::Acquire) == 0 { - // fast path if no scoped dispatcher has been set; just use the global - // default. - return Some(f(get_global())); - } - - CURRENT_STATE - .try_with(|state| { - let entered = state.enter()?; - Some(f(&entered.current())) - }) - .ok()? -} - -/// Executes a closure with a reference to the current [dispatcher]. -/// -/// [dispatcher]: super::dispatcher::Dispatch -#[cfg(not(feature = "std"))] -#[doc(hidden)] -pub fn get_current(f: impl FnOnce(&Dispatch) -> T) -> Option { - Some(f(get_global())) -} - -/// Executes a closure with a reference to the current [dispatcher]. -/// -/// [dispatcher]: super::dispatcher::Dispatch -#[cfg(not(feature = "std"))] -pub fn get_default(mut f: F) -> T -where - F: FnMut(&Dispatch) -> T, -{ - f(&get_global()) -} - -#[inline] -fn get_global() -> &'static Dispatch { - if GLOBAL_INIT.load(Ordering::SeqCst) != INITIALIZED { - return &NONE; - } - unsafe { - // This is safe given the invariant that setting the global dispatcher - // also sets `GLOBAL_INIT` to `INITIALIZED`. - &GLOBAL_DISPATCH - } -} - -#[cfg(feature = "std")] -pub(crate) struct Registrar(Kind>); - -impl Dispatch { - /// Returns a new `Dispatch` that discards events and spans. - #[inline] - pub fn none() -> Self { - Dispatch { - subscriber: Kind::Global(&NO_SUBSCRIBER), - } - } - - /// Returns a `Dispatch` that forwards to the given [`Subscriber`]. - /// - /// [`Subscriber`]: super::subscriber::Subscriber - pub fn new(subscriber: S) -> Self - where - S: Subscriber + Send + Sync + 'static, - { - let me = Dispatch { - subscriber: Kind::Scoped(Arc::new(subscriber)), - }; - callsite::register_dispatch(&me); - me - } - - #[cfg(feature = "std")] - pub(crate) fn registrar(&self) -> Registrar { - Registrar(self.subscriber.downgrade()) - } - - /// Creates a [`WeakDispatch`] from this `Dispatch`. - /// - /// A [`WeakDispatch`] is similar to a [`Dispatch`], but it does not prevent - /// the underlying [`Subscriber`] from being dropped. Instead, it only permits - /// access while other references to the `Subscriber` exist. This is equivalent - /// to the standard library's [`Arc::downgrade`] method, but for `Dispatch` - /// rather than `Arc`. - /// - /// The primary use for creating a [`WeakDispatch`] is to allow a `Subscriber` - /// to hold a cyclical reference to itself without creating a memory leak. - /// See [here] for details. - /// - /// [`Arc::downgrade`]: std::sync::Arc::downgrade - /// [here]: Subscriber#avoiding-memory-leaks - pub fn downgrade(&self) -> WeakDispatch { - WeakDispatch { - subscriber: self.subscriber.downgrade(), - } - } - - #[inline(always)] - pub(crate) fn subscriber(&self) -> &(dyn Subscriber + Send + Sync) { - match self.subscriber { - Kind::Global(s) => s, - Kind::Scoped(ref s) => s.as_ref(), - } - } - - /// Registers a new callsite with this subscriber, returning whether or not - /// the subscriber is interested in being notified about the callsite. - /// - /// This calls the [`register_callsite`] function on the [`Subscriber`] - /// that this `Dispatch` forwards to. - /// - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`register_callsite`]: super::subscriber::Subscriber::register_callsite - #[inline] - pub fn register_callsite(&self, metadata: &'static Metadata<'static>) -> subscriber::Interest { - self.subscriber().register_callsite(metadata) - } - - /// Returns the highest [verbosity level][level] that this [`Subscriber`] will - /// enable, or `None`, if the subscriber does not implement level-based - /// filtering or chooses not to implement this method. - /// - /// This calls the [`max_level_hint`] function on the [`Subscriber`] - /// that this `Dispatch` forwards to. - /// - /// [level]: super::Level - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`register_callsite`]: super::subscriber::Subscriber::max_level_hint - // TODO(eliza): consider making this a public API? - #[inline] - pub(crate) fn max_level_hint(&self) -> Option { - self.subscriber().max_level_hint() - } - - /// Record the construction of a new span, returning a new [ID] for the - /// span being constructed. - /// - /// This calls the [`new_span`] function on the [`Subscriber`] that this - /// `Dispatch` forwards to. - /// - /// [ID]: super::span::Id - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`new_span`]: super::subscriber::Subscriber::new_span - #[inline] - pub fn new_span(&self, span: &span::Attributes<'_>) -> span::Id { - self.subscriber().new_span(span) - } - - /// Record a set of values on a span. - /// - /// This calls the [`record`] function on the [`Subscriber`] that this - /// `Dispatch` forwards to. - /// - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`record`]: super::subscriber::Subscriber::record - #[inline] - pub fn record(&self, span: &span::Id, values: &span::Record<'_>) { - self.subscriber().record(span, values) - } - - /// Adds an indication that `span` follows from the span with the id - /// `follows`. - /// - /// This calls the [`record_follows_from`] function on the [`Subscriber`] - /// that this `Dispatch` forwards to. - /// - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`record_follows_from`]: super::subscriber::Subscriber::record_follows_from - #[inline] - pub fn record_follows_from(&self, span: &span::Id, follows: &span::Id) { - self.subscriber().record_follows_from(span, follows) - } - - /// Returns true if a span with the specified [metadata] would be - /// recorded. - /// - /// This calls the [`enabled`] function on the [`Subscriber`] that this - /// `Dispatch` forwards to. - /// - /// [metadata]: super::metadata::Metadata - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`enabled`]: super::subscriber::Subscriber::enabled - #[inline] - pub fn enabled(&self, metadata: &Metadata<'_>) -> bool { - self.subscriber().enabled(metadata) - } - - /// Records that an [`Event`] has occurred. - /// - /// This calls the [`event`] function on the [`Subscriber`] that this - /// `Dispatch` forwards to. - /// - /// [`Event`]: super::event::Event - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`event`]: super::subscriber::Subscriber::event - #[inline] - pub fn event(&self, event: &Event<'_>) { - let subscriber = self.subscriber(); - if subscriber.event_enabled(event) { - subscriber.event(event); - } - } - - /// Records that a span has been can_enter. - /// - /// This calls the [`enter`] function on the [`Subscriber`] that this - /// `Dispatch` forwards to. - /// - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`enter`]: super::subscriber::Subscriber::enter - pub fn enter(&self, span: &span::Id) { - self.subscriber().enter(span); - } - - /// Records that a span has been exited. - /// - /// This calls the [`exit`] function on the [`Subscriber`] that this - /// `Dispatch` forwards to. - /// - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`exit`]: super::subscriber::Subscriber::exit - pub fn exit(&self, span: &span::Id) { - self.subscriber().exit(span); - } - - /// Notifies the subscriber that a [span ID] has been cloned. - /// - /// This function must only be called with span IDs that were returned by - /// this `Dispatch`'s [`new_span`] function. The `tracing` crate upholds - /// this guarantee and any other libraries implementing instrumentation APIs - /// must as well. - /// - /// This calls the [`clone_span`] function on the `Subscriber` that this - /// `Dispatch` forwards to. - /// - /// [span ID]: super::span::Id - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`clone_span`]: super::subscriber::Subscriber::clone_span - /// [`new_span`]: super::subscriber::Subscriber::new_span - #[inline] - pub fn clone_span(&self, id: &span::Id) -> span::Id { - self.subscriber().clone_span(id) - } - - /// Notifies the subscriber that a [span ID] has been dropped. - /// - /// This function must only be called with span IDs that were returned by - /// this `Dispatch`'s [`new_span`] function. The `tracing` crate upholds - /// this guarantee and any other libraries implementing instrumentation APIs - /// must as well. - /// - /// This calls the [`drop_span`] function on the [`Subscriber`] that this - /// `Dispatch` forwards to. - /// - ///
-    ///     Deprecated: The 
-    ///     try_close method is functionally identical, but returns
-    ///     true if the span is now closed. It should be used
-    ///     instead of this method.
-    /// 
- /// - /// [span ID]: super::span::Id - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`drop_span`]: super::subscriber::Subscriber::drop_span - /// [`new_span`]: super::subscriber::Subscriber::new_span - /// [`try_close`]: Entered::try_close() - #[inline] - #[deprecated(since = "0.1.2", note = "use `Dispatch::try_close` instead")] - pub fn drop_span(&self, id: span::Id) { - #[allow(deprecated)] - self.subscriber().drop_span(id); - } - - /// Notifies the subscriber that a [span ID] has been dropped, and returns - /// `true` if there are now 0 IDs referring to that span. - /// - /// This function must only be called with span IDs that were returned by - /// this `Dispatch`'s [`new_span`] function. The `tracing` crate upholds - /// this guarantee and any other libraries implementing instrumentation APIs - /// must as well. - /// - /// This calls the [`try_close`] function on the [`Subscriber`] that this - /// `Dispatch` forwards to. - /// - /// [span ID]: super::span::Id - /// [`Subscriber`]: super::subscriber::Subscriber - /// [`try_close`]: super::subscriber::Subscriber::try_close - /// [`new_span`]: super::subscriber::Subscriber::new_span - pub fn try_close(&self, id: span::Id) -> bool { - self.subscriber().try_close(id) - } - - /// Returns a type representing this subscriber's view of the current span. - /// - /// This calls the [`current`] function on the `Subscriber` that this - /// `Dispatch` forwards to. - /// - /// [`current`]: super::subscriber::Subscriber::current_span - #[inline] - pub fn current_span(&self) -> span::Current { - self.subscriber().current_span() - } - - /// Returns `true` if this `Dispatch` forwards to a `Subscriber` of type - /// `T`. - #[inline] - pub fn is(&self) -> bool { - ::is::(self.subscriber()) - } - - /// Returns some reference to the `Subscriber` this `Dispatch` forwards to - /// if it is of type `T`, or `None` if it isn't. - #[inline] - pub fn downcast_ref(&self) -> Option<&T> { - ::downcast_ref(self.subscriber()) - } -} - -impl Default for Dispatch { - /// Returns the current default dispatcher - fn default() -> Self { - get_default(|default| default.clone()) - } -} - -impl fmt::Debug for Dispatch { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.subscriber { - Kind::Scoped(ref s) => f - .debug_tuple("Dispatch::Scoped") - .field(&format_args!("{:p}", s)) - .finish(), - Kind::Global(s) => f - .debug_tuple("Dispatch::Global") - .field(&format_args!("{:p}", s)) - .finish(), - } - } -} - -impl From for Dispatch -where - S: Subscriber + Send + Sync + 'static, -{ - #[inline] - fn from(subscriber: S) -> Self { - Dispatch::new(subscriber) - } -} - -// === impl WeakDispatch === - -impl WeakDispatch { - /// Attempts to upgrade this `WeakDispatch` to a [`Dispatch`]. - /// - /// Returns `None` if the referenced `Dispatch` has already been dropped. - /// - /// ## Examples - /// - /// ``` - /// # use tracing_core::subscriber::NoSubscriber; - /// # use tracing_core::dispatcher::Dispatch; - /// let strong = Dispatch::new(NoSubscriber::default()); - /// let weak = strong.downgrade(); - /// - /// // The strong here keeps it alive, so we can still access the object. - /// assert!(weak.upgrade().is_some()); - /// - /// drop(strong); // But not any more. - /// assert!(weak.upgrade().is_none()); - /// ``` - pub fn upgrade(&self) -> Option { - self.subscriber - .upgrade() - .map(|subscriber| Dispatch { subscriber }) - } -} - -impl fmt::Debug for WeakDispatch { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.subscriber { - Kind::Scoped(ref s) => f - .debug_tuple("WeakDispatch::Scoped") - .field(&format_args!("{:p}", s)) - .finish(), - Kind::Global(s) => f - .debug_tuple("WeakDispatch::Global") - .field(&format_args!("{:p}", s)) - .finish(), - } - } -} - -#[cfg(feature = "std")] -impl Registrar { - pub(crate) fn upgrade(&self) -> Option { - self.0.upgrade().map(|subscriber| Dispatch { subscriber }) - } -} - -// ===== impl State ===== - -impl Kind> { - fn downgrade(&self) -> Kind> { - match self { - Kind::Global(s) => Kind::Global(*s), - Kind::Scoped(ref s) => Kind::Scoped(Arc::downgrade(s)), - } - } -} - -impl Kind> { - fn upgrade(&self) -> Option>> { - match self { - Kind::Global(s) => Some(Kind::Global(*s)), - Kind::Scoped(ref s) => Some(Kind::Scoped(s.upgrade()?)), - } - } -} - -// ===== impl State ===== - -#[cfg(feature = "std")] -impl State { - /// Replaces the current default dispatcher on this thread with the provided - /// dispatcher.Any - /// - /// Dropping the returned `ResetGuard` will reset the default dispatcher to - /// the previous value. - #[inline] - fn set_default(new_dispatch: Dispatch) -> DefaultGuard { - let prior = CURRENT_STATE - .try_with(|state| { - state.can_enter.set(true); - state.default.replace(Some(new_dispatch)) - }) - .ok() - .flatten(); - EXISTS.store(true, Ordering::Release); - SCOPED_COUNT.fetch_add(1, Ordering::Release); - DefaultGuard(prior) - } - - #[inline] - fn enter(&self) -> Option> { - if self.can_enter.replace(false) { - Some(Entered(self)) - } else { - None - } - } -} - -// ===== impl Entered ===== - -#[cfg(feature = "std")] -impl<'a> Entered<'a> { - #[inline] - fn current(&self) -> Ref<'a, Dispatch> { - let default = self.0.default.borrow(); - Ref::map(default, |default| match default { - Some(default) => default, - None => get_global(), - }) - } -} - -#[cfg(feature = "std")] -impl<'a> Drop for Entered<'a> { - #[inline] - fn drop(&mut self) { - self.0.can_enter.set(true); - } -} - -// ===== impl DefaultGuard ===== - -#[cfg(feature = "std")] -impl Drop for DefaultGuard { - #[inline] - fn drop(&mut self) { - // Replace the dispatcher and then drop the old one outside - // of the thread-local context. Dropping the dispatch may - // lead to the drop of a subscriber which, in the process, - // could then also attempt to access the same thread local - // state -- causing a clash. - let prev = CURRENT_STATE.try_with(|state| state.default.replace(self.0.take())); - SCOPED_COUNT.fetch_sub(1, Ordering::Release); - drop(prev) - } -} - -#[cfg(test)] -mod test { - use super::*; - #[cfg(feature = "std")] - use crate::stdlib::sync::atomic::{AtomicUsize, Ordering}; - use crate::{ - callsite::Callsite, - metadata::{Kind, Level, Metadata}, - subscriber::Interest, - }; - - #[test] - fn dispatch_is() { - let dispatcher = Dispatch::new(NoSubscriber::default()); - assert!(dispatcher.is::()); - } - - #[test] - fn dispatch_downcasts() { - let dispatcher = Dispatch::new(NoSubscriber::default()); - assert!(dispatcher.downcast_ref::().is_some()); - } - - struct TestCallsite; - static TEST_CALLSITE: TestCallsite = TestCallsite; - static TEST_META: Metadata<'static> = metadata! { - name: "test", - target: module_path!(), - level: Level::DEBUG, - fields: &[], - callsite: &TEST_CALLSITE, - kind: Kind::EVENT - }; - - impl Callsite for TestCallsite { - fn set_interest(&self, _: Interest) {} - fn metadata(&self) -> &Metadata<'_> { - &TEST_META - } - } - - #[test] - #[cfg(feature = "std")] - fn events_dont_infinite_loop() { - // This test ensures that an event triggered within a subscriber - // won't cause an infinite loop of events. - struct TestSubscriber; - impl Subscriber for TestSubscriber { - fn enabled(&self, _: &Metadata<'_>) -> bool { - true - } - - fn new_span(&self, _: &span::Attributes<'_>) -> span::Id { - span::Id::from_u64(0xAAAA) - } - - fn record(&self, _: &span::Id, _: &span::Record<'_>) {} - - fn record_follows_from(&self, _: &span::Id, _: &span::Id) {} - - fn event(&self, _: &Event<'_>) { - static EVENTS: AtomicUsize = AtomicUsize::new(0); - assert_eq!( - EVENTS.fetch_add(1, Ordering::Relaxed), - 0, - "event method called twice!" - ); - Event::dispatch(&TEST_META, &TEST_META.fields().value_set(&[])) - } - - fn enter(&self, _: &span::Id) {} - - fn exit(&self, _: &span::Id) {} - } - - with_default(&Dispatch::new(TestSubscriber), || { - Event::dispatch(&TEST_META, &TEST_META.fields().value_set(&[])) - }) - } - - #[test] - #[cfg(feature = "std")] - fn spans_dont_infinite_loop() { - // This test ensures that a span created within a subscriber - // won't cause an infinite loop of new spans. - - fn mk_span() { - get_default(|current| { - current.new_span(&span::Attributes::new( - &TEST_META, - &TEST_META.fields().value_set(&[]), - )) - }); - } - - struct TestSubscriber; - impl Subscriber for TestSubscriber { - fn enabled(&self, _: &Metadata<'_>) -> bool { - true - } - - fn new_span(&self, _: &span::Attributes<'_>) -> span::Id { - static NEW_SPANS: AtomicUsize = AtomicUsize::new(0); - assert_eq!( - NEW_SPANS.fetch_add(1, Ordering::Relaxed), - 0, - "new_span method called twice!" - ); - mk_span(); - span::Id::from_u64(0xAAAA) - } - - fn record(&self, _: &span::Id, _: &span::Record<'_>) {} - - fn record_follows_from(&self, _: &span::Id, _: &span::Id) {} - - fn event(&self, _: &Event<'_>) {} - - fn enter(&self, _: &span::Id) {} - - fn exit(&self, _: &span::Id) {} - } - - with_default(&Dispatch::new(TestSubscriber), mk_span) - } - - #[test] - fn default_no_subscriber() { - let default_dispatcher = Dispatch::default(); - assert!(default_dispatcher.is::()); - } - - #[cfg(feature = "std")] - #[test] - fn default_dispatch() { - struct TestSubscriber; - impl Subscriber for TestSubscriber { - fn enabled(&self, _: &Metadata<'_>) -> bool { - true - } - - fn new_span(&self, _: &span::Attributes<'_>) -> span::Id { - span::Id::from_u64(0xAAAA) - } - - fn record(&self, _: &span::Id, _: &span::Record<'_>) {} - - fn record_follows_from(&self, _: &span::Id, _: &span::Id) {} - - fn event(&self, _: &Event<'_>) {} - - fn enter(&self, _: &span::Id) {} - - fn exit(&self, _: &span::Id) {} - } - let guard = set_default(&Dispatch::new(TestSubscriber)); - let default_dispatcher = Dispatch::default(); - assert!(default_dispatcher.is::()); - - drop(guard); - let default_dispatcher = Dispatch::default(); - assert!(default_dispatcher.is::()); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/event.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/event.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/event.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/event.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,128 +0,0 @@ -//! Events represent single points in time during the execution of a program. -use crate::parent::Parent; -use crate::span::Id; -use crate::{field, Metadata}; - -/// `Event`s represent single points in time where something occurred during the -/// execution of a program. -/// -/// An `Event` can be compared to a log record in unstructured logging, but with -/// two key differences: -/// - `Event`s exist _within the context of a [span]_. Unlike log lines, they -/// may be located within the trace tree, allowing visibility into the -/// _temporal_ context in which the event occurred, as well as the source -/// code location. -/// - Like spans, `Event`s have structured key-value data known as _[fields]_, -/// which may include textual message. In general, a majority of the data -/// associated with an event should be in the event's fields rather than in -/// the textual message, as the fields are more structured. -/// -/// [span]: super::span -/// [fields]: super::field -#[derive(Debug)] -pub struct Event<'a> { - fields: &'a field::ValueSet<'a>, - metadata: &'static Metadata<'static>, - parent: Parent, -} - -impl<'a> Event<'a> { - /// Constructs a new `Event` with the specified metadata and set of values, - /// and observes it with the current subscriber. - pub fn dispatch(metadata: &'static Metadata<'static>, fields: &'a field::ValueSet<'_>) { - let event = Event::new(metadata, fields); - crate::dispatcher::get_default(|current| { - current.event(&event); - }); - } - - /// Returns a new `Event` in the current span, with the specified metadata - /// and set of values. - #[inline] - pub fn new(metadata: &'static Metadata<'static>, fields: &'a field::ValueSet<'a>) -> Self { - Event { - fields, - metadata, - parent: Parent::Current, - } - } - - /// Returns a new `Event` as a child of the specified span, with the - /// provided metadata and set of values. - #[inline] - pub fn new_child_of( - parent: impl Into>, - metadata: &'static Metadata<'static>, - fields: &'a field::ValueSet<'a>, - ) -> Self { - let parent = match parent.into() { - Some(p) => Parent::Explicit(p), - None => Parent::Root, - }; - Event { - fields, - metadata, - parent, - } - } - - /// Constructs a new `Event` with the specified metadata and set of values, - /// and observes it with the current subscriber and an explicit parent. - pub fn child_of( - parent: impl Into>, - metadata: &'static Metadata<'static>, - fields: &'a field::ValueSet<'_>, - ) { - let event = Self::new_child_of(parent, metadata, fields); - crate::dispatcher::get_default(|current| { - current.event(&event); - }); - } - - /// Visits all the fields on this `Event` with the specified [visitor]. - /// - /// [visitor]: super::field::Visit - #[inline] - pub fn record(&self, visitor: &mut dyn field::Visit) { - self.fields.record(visitor); - } - - /// Returns an iterator over the set of values on this `Event`. - pub fn fields(&self) -> field::Iter { - self.fields.field_set().iter() - } - - /// Returns [metadata] describing this `Event`. - /// - /// [metadata]: super::Metadata - pub fn metadata(&self) -> &'static Metadata<'static> { - self.metadata - } - - /// Returns true if the new event should be a root. - pub fn is_root(&self) -> bool { - matches!(self.parent, Parent::Root) - } - - /// Returns true if the new event's parent should be determined based on the - /// current context. - /// - /// If this is true and the current thread is currently inside a span, then - /// that span should be the new event's parent. Otherwise, if the current - /// thread is _not_ inside a span, then the new event will be the root of its - /// own trace tree. - pub fn is_contextual(&self) -> bool { - matches!(self.parent, Parent::Current) - } - - /// Returns the new event's explicitly-specified parent, if there is one. - /// - /// Otherwise (if the new event is a root or is a child of the current span), - /// returns `None`. - pub fn parent(&self) -> Option<&Id> { - match self.parent { - Parent::Explicit(ref p) => Some(p), - _ => None, - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/field.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/field.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/field.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/field.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1255 +0,0 @@ -//! `Span` and `Event` key-value data. -//! -//! Spans and events may be annotated with key-value data, known as _fields_. -//! These fields consist of a mapping from a key (corresponding to a `&str` but -//! represented internally as an array index) to a [`Value`]. -//! -//! # `Value`s and `Subscriber`s -//! -//! `Subscriber`s consume `Value`s as fields attached to [span]s or [`Event`]s. -//! The set of field keys on a given span or event is defined on its [`Metadata`]. -//! When a span is created, it provides [`Attributes`] to the `Subscriber`'s -//! [`new_span`] method, containing any fields whose values were provided when -//! the span was created; and may call the `Subscriber`'s [`record`] method -//! with additional [`Record`]s if values are added for more of its fields. -//! Similarly, the [`Event`] type passed to the subscriber's [`event`] method -//! will contain any fields attached to each event. -//! -//! `tracing` represents values as either one of a set of Rust primitives -//! (`i64`, `u64`, `f64`, `i128`, `u128`, `bool`, and `&str`) or using a -//! `fmt::Display` or `fmt::Debug` implementation. `Subscriber`s are provided -//! these primitive value types as `dyn Value` trait objects. -//! -//! These trait objects can be formatted using `fmt::Debug`, but may also be -//! recorded as typed data by calling the [`Value::record`] method on these -//! trait objects with a _visitor_ implementing the [`Visit`] trait. This trait -//! represents the behavior used to record values of various types. For example, -//! an implementation of `Visit` might record integers by incrementing counters -//! for their field names rather than printing them. -//! -//! -//! # Using `valuable` -//! -//! `tracing`'s [`Value`] trait is intentionally minimalist: it supports only a small -//! number of Rust primitives as typed values, and only permits recording -//! user-defined types with their [`fmt::Debug`] or [`fmt::Display`] -//! implementations. However, there are some cases where it may be useful to record -//! nested values (such as arrays, `Vec`s, or `HashMap`s containing values), or -//! user-defined `struct` and `enum` types without having to format them as -//! unstructured text. -//! -//! To address `Value`'s limitations, `tracing` offers experimental support for -//! the [`valuable`] crate, which provides object-safe inspection of structured -//! values. User-defined types can implement the [`valuable::Valuable`] trait, -//! and be recorded as a `tracing` field by calling their [`as_value`] method. -//! If the [`Subscriber`] also supports the `valuable` crate, it can -//! then visit those types fields as structured values using `valuable`. -//! -//!
-//!     Note: valuable support is an
-//!     unstable feature. See
-//!     the documentation on unstable features for details on how to enable it.
-//! 
-//! -//! For example: -//! ```ignore -//! // Derive `Valuable` for our types: -//! use valuable::Valuable; -//! -//! #[derive(Clone, Debug, Valuable)] -//! struct User { -//! name: String, -//! age: u32, -//! address: Address, -//! } -//! -//! #[derive(Clone, Debug, Valuable)] -//! struct Address { -//! country: String, -//! city: String, -//! street: String, -//! } -//! -//! let user = User { -//! name: "Arwen Undomiel".to_string(), -//! age: 3000, -//! address: Address { -//! country: "Middle Earth".to_string(), -//! city: "Rivendell".to_string(), -//! street: "leafy lane".to_string(), -//! }, -//! }; -//! -//! // Recording `user` as a `valuable::Value` will allow the `tracing` subscriber -//! // to traverse its fields as a nested, typed structure: -//! tracing::info!(current_user = user.as_value()); -//! ``` -//! -//! Alternatively, the [`valuable()`] function may be used to convert a type -//! implementing [`Valuable`] into a `tracing` field value. -//! -//! When the `valuable` feature is enabled, the [`Visit`] trait will include an -//! optional [`record_value`] method. `Visit` implementations that wish to -//! record `valuable` values can implement this method with custom behavior. -//! If a visitor does not implement `record_value`, the [`valuable::Value`] will -//! be forwarded to the visitor's [`record_debug`] method. -//! -//! [`valuable`]: https://crates.io/crates/valuable -//! [`as_value`]: valuable::Valuable::as_value -//! [`Subscriber`]: crate::Subscriber -//! [`record_value`]: Visit::record_value -//! [`record_debug`]: Visit::record_debug -//! -//! [span]: super::span -//! [`Event`]: super::event::Event -//! [`Metadata`]: super::metadata::Metadata -//! [`Attributes`]: super::span::Attributes -//! [`Record`]: super::span::Record -//! [`new_span`]: super::subscriber::Subscriber::new_span -//! [`record`]: super::subscriber::Subscriber::record -//! [`event`]: super::subscriber::Subscriber::event -//! [`Value::record`]: Value::record -use crate::callsite; -use crate::stdlib::{ - borrow::Borrow, - fmt, - hash::{Hash, Hasher}, - num, - ops::Range, - string::String, -}; - -use self::private::ValidLen; - -/// An opaque key allowing _O_(1) access to a field in a `Span`'s key-value -/// data. -/// -/// As keys are defined by the _metadata_ of a span, rather than by an -/// individual instance of a span, a key may be used to access the same field -/// across all instances of a given span with the same metadata. Thus, when a -/// subscriber observes a new span, it need only access a field by name _once_, -/// and use the key for that name for all other accesses. -#[derive(Debug)] -pub struct Field { - i: usize, - fields: FieldSet, -} - -/// An empty field. -/// -/// This can be used to indicate that the value of a field is not currently -/// present but will be recorded later. -/// -/// When a field's value is `Empty`. it will not be recorded. -#[derive(Debug, Eq, PartialEq)] -pub struct Empty; - -/// Describes the fields present on a span. -/// -/// ## Equality -/// -/// In well-behaved applications, two `FieldSet`s [initialized] with equal -/// [callsite identifiers] will have identical fields. Consequently, in release -/// builds, [`FieldSet::eq`] *only* checks that its arguments have equal -/// callsites. However, the equality of field names is checked in debug builds. -/// -/// [initialized]: Self::new -/// [callsite identifiers]: callsite::Identifier -pub struct FieldSet { - /// The names of each field on the described span. - names: &'static [&'static str], - /// The callsite where the described span originates. - callsite: callsite::Identifier, -} - -/// A set of fields and values for a span. -pub struct ValueSet<'a> { - values: &'a [(&'a Field, Option<&'a (dyn Value + 'a)>)], - fields: &'a FieldSet, -} - -/// An iterator over a set of fields. -#[derive(Debug)] -pub struct Iter { - idxs: Range, - fields: FieldSet, -} - -/// Visits typed values. -/// -/// An instance of `Visit` ("a visitor") represents the logic necessary to -/// record field values of various types. When an implementor of [`Value`] is -/// [recorded], it calls the appropriate method on the provided visitor to -/// indicate the type that value should be recorded as. -/// -/// When a [`Subscriber`] implementation [records an `Event`] or a -/// [set of `Value`s added to a `Span`], it can pass an `&mut Visit` to the -/// `record` method on the provided [`ValueSet`] or [`Event`]. This visitor -/// will then be used to record all the field-value pairs present on that -/// `Event` or `ValueSet`. -/// -/// # Examples -/// -/// A simple visitor that writes to a string might be implemented like so: -/// ``` -/// # extern crate tracing_core as tracing; -/// use std::fmt::{self, Write}; -/// use tracing::field::{Value, Visit, Field}; -/// pub struct StringVisitor<'a> { -/// string: &'a mut String, -/// } -/// -/// impl<'a> Visit for StringVisitor<'a> { -/// fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { -/// write!(self.string, "{} = {:?}; ", field.name(), value).unwrap(); -/// } -/// } -/// ``` -/// This visitor will format each recorded value using `fmt::Debug`, and -/// append the field name and formatted value to the provided string, -/// regardless of the type of the recorded value. When all the values have -/// been recorded, the `StringVisitor` may be dropped, allowing the string -/// to be printed or stored in some other data structure. -/// -/// The `Visit` trait provides default implementations for `record_i64`, -/// `record_u64`, `record_bool`, `record_str`, and `record_error`, which simply -/// forward the recorded value to `record_debug`. Thus, `record_debug` is the -/// only method which a `Visit` implementation *must* implement. However, -/// visitors may override the default implementations of these functions in -/// order to implement type-specific behavior. -/// -/// Additionally, when a visitor receives a value of a type it does not care -/// about, it is free to ignore those values completely. For example, a -/// visitor which only records numeric data might look like this: -/// -/// ``` -/// # extern crate tracing_core as tracing; -/// # use std::fmt::{self, Write}; -/// # use tracing::field::{Value, Visit, Field}; -/// pub struct SumVisitor { -/// sum: i64, -/// } -/// -/// impl Visit for SumVisitor { -/// fn record_i64(&mut self, _field: &Field, value: i64) { -/// self.sum += value; -/// } -/// -/// fn record_u64(&mut self, _field: &Field, value: u64) { -/// self.sum += value as i64; -/// } -/// -/// fn record_debug(&mut self, _field: &Field, _value: &fmt::Debug) { -/// // Do nothing -/// } -/// } -/// ``` -/// -/// This visitor (which is probably not particularly useful) keeps a running -/// sum of all the numeric values it records, and ignores all other values. A -/// more practical example of recording typed values is presented in -/// `examples/counters.rs`, which demonstrates a very simple metrics system -/// implemented using `tracing`. -/// -///
-///
-/// Note: The record_error trait method is only
-/// available when the Rust standard library is present, as it requires the
-/// std::error::Error trait.
-/// 
-/// -/// [recorded]: Value::record -/// [`Subscriber`]: super::subscriber::Subscriber -/// [records an `Event`]: super::subscriber::Subscriber::event -/// [set of `Value`s added to a `Span`]: super::subscriber::Subscriber::record -/// [`Event`]: super::event::Event -pub trait Visit { - /// Visits an arbitrary type implementing the [`valuable`] crate's `Valuable` trait. - /// - /// [`valuable`]: https://docs.rs/valuable - #[cfg(all(tracing_unstable, feature = "valuable"))] - #[cfg_attr(docsrs, doc(cfg(all(tracing_unstable, feature = "valuable"))))] - fn record_value(&mut self, field: &Field, value: valuable::Value<'_>) { - self.record_debug(field, &value) - } - - /// Visit a double-precision floating point value. - fn record_f64(&mut self, field: &Field, value: f64) { - self.record_debug(field, &value) - } - - /// Visit a signed 64-bit integer value. - fn record_i64(&mut self, field: &Field, value: i64) { - self.record_debug(field, &value) - } - - /// Visit an unsigned 64-bit integer value. - fn record_u64(&mut self, field: &Field, value: u64) { - self.record_debug(field, &value) - } - - /// Visit a signed 128-bit integer value. - fn record_i128(&mut self, field: &Field, value: i128) { - self.record_debug(field, &value) - } - - /// Visit an unsigned 128-bit integer value. - fn record_u128(&mut self, field: &Field, value: u128) { - self.record_debug(field, &value) - } - - /// Visit a boolean value. - fn record_bool(&mut self, field: &Field, value: bool) { - self.record_debug(field, &value) - } - - /// Visit a string value. - fn record_str(&mut self, field: &Field, value: &str) { - self.record_debug(field, &value) - } - - /// Records a type implementing `Error`. - /// - ///
- ///
-    /// Note: This is only enabled when the Rust standard library is
-    /// present.
-    /// 
- ///
- #[cfg(feature = "std")] - #[cfg_attr(docsrs, doc(cfg(feature = "std")))] - fn record_error(&mut self, field: &Field, value: &(dyn std::error::Error + 'static)) { - self.record_debug(field, &DisplayValue(value)) - } - - /// Visit a value implementing `fmt::Debug`. - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug); -} - -/// A field value of an erased type. -/// -/// Implementors of `Value` may call the appropriate typed recording methods on -/// the [visitor] passed to their `record` method in order to indicate how -/// their data should be recorded. -/// -/// [visitor]: Visit -pub trait Value: crate::sealed::Sealed { - /// Visits this value with the given `Visitor`. - fn record(&self, key: &Field, visitor: &mut dyn Visit); -} - -/// A `Value` which serializes using `fmt::Display`. -/// -/// Uses `record_debug` in the `Value` implementation to -/// avoid an unnecessary evaluation. -#[derive(Clone)] -pub struct DisplayValue(T); - -/// A `Value` which serializes as a string using `fmt::Debug`. -#[derive(Clone)] -pub struct DebugValue(T); - -/// Wraps a type implementing `fmt::Display` as a `Value` that can be -/// recorded using its `Display` implementation. -pub fn display(t: T) -> DisplayValue -where - T: fmt::Display, -{ - DisplayValue(t) -} - -/// Wraps a type implementing `fmt::Debug` as a `Value` that can be -/// recorded using its `Debug` implementation. -pub fn debug(t: T) -> DebugValue -where - T: fmt::Debug, -{ - DebugValue(t) -} - -/// Wraps a type implementing [`Valuable`] as a `Value` that -/// can be recorded using its `Valuable` implementation. -/// -/// [`Valuable`]: https://docs.rs/valuable/latest/valuable/trait.Valuable.html -#[cfg(all(tracing_unstable, feature = "valuable"))] -#[cfg_attr(docsrs, doc(cfg(all(tracing_unstable, feature = "valuable"))))] -pub fn valuable(t: &T) -> valuable::Value<'_> -where - T: valuable::Valuable, -{ - t.as_value() -} - -// ===== impl Visit ===== - -impl<'a, 'b> Visit for fmt::DebugStruct<'a, 'b> { - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - self.field(field.name(), value); - } -} - -impl<'a, 'b> Visit for fmt::DebugMap<'a, 'b> { - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - self.entry(&format_args!("{}", field), value); - } -} - -impl Visit for F -where - F: FnMut(&Field, &dyn fmt::Debug), -{ - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - (self)(field, value) - } -} - -// ===== impl Value ===== - -macro_rules! impl_values { - ( $( $record:ident( $( $whatever:tt)+ ) ),+ ) => { - $( - impl_value!{ $record( $( $whatever )+ ) } - )+ - } -} - -macro_rules! ty_to_nonzero { - (u8) => { - NonZeroU8 - }; - (u16) => { - NonZeroU16 - }; - (u32) => { - NonZeroU32 - }; - (u64) => { - NonZeroU64 - }; - (u128) => { - NonZeroU128 - }; - (usize) => { - NonZeroUsize - }; - (i8) => { - NonZeroI8 - }; - (i16) => { - NonZeroI16 - }; - (i32) => { - NonZeroI32 - }; - (i64) => { - NonZeroI64 - }; - (i128) => { - NonZeroI128 - }; - (isize) => { - NonZeroIsize - }; -} - -macro_rules! impl_one_value { - (f32, $op:expr, $record:ident) => { - impl_one_value!(normal, f32, $op, $record); - }; - (f64, $op:expr, $record:ident) => { - impl_one_value!(normal, f64, $op, $record); - }; - (bool, $op:expr, $record:ident) => { - impl_one_value!(normal, bool, $op, $record); - }; - ($value_ty:tt, $op:expr, $record:ident) => { - impl_one_value!(normal, $value_ty, $op, $record); - impl_one_value!(nonzero, $value_ty, $op, $record); - }; - (normal, $value_ty:tt, $op:expr, $record:ident) => { - impl $crate::sealed::Sealed for $value_ty {} - impl $crate::field::Value for $value_ty { - fn record(&self, key: &$crate::field::Field, visitor: &mut dyn $crate::field::Visit) { - // `op` is always a function; the closure is used because - // sometimes there isn't a real function corresponding to that - // operation. the clippy warning is not that useful here. - #[allow(clippy::redundant_closure_call)] - visitor.$record(key, $op(*self)) - } - } - }; - (nonzero, $value_ty:tt, $op:expr, $record:ident) => { - // This `use num::*;` is reported as unused because it gets emitted - // for every single invocation of this macro, so there are multiple `use`s. - // All but the first are useless indeed. - // We need this import because we can't write a path where one part is - // the `ty_to_nonzero!($value_ty)` invocation. - #[allow(clippy::useless_attribute, unused)] - use num::*; - impl $crate::sealed::Sealed for ty_to_nonzero!($value_ty) {} - impl $crate::field::Value for ty_to_nonzero!($value_ty) { - fn record(&self, key: &$crate::field::Field, visitor: &mut dyn $crate::field::Visit) { - // `op` is always a function; the closure is used because - // sometimes there isn't a real function corresponding to that - // operation. the clippy warning is not that useful here. - #[allow(clippy::redundant_closure_call)] - visitor.$record(key, $op(self.get())) - } - } - }; -} - -macro_rules! impl_value { - ( $record:ident( $( $value_ty:tt ),+ ) ) => { - $( - impl_one_value!($value_ty, |this: $value_ty| this, $record); - )+ - }; - ( $record:ident( $( $value_ty:tt ),+ as $as_ty:ty) ) => { - $( - impl_one_value!($value_ty, |this: $value_ty| this as $as_ty, $record); - )+ - }; -} - -// ===== impl Value ===== - -impl_values! { - record_u64(u64), - record_u64(usize, u32, u16, u8 as u64), - record_i64(i64), - record_i64(isize, i32, i16, i8 as i64), - record_u128(u128), - record_i128(i128), - record_bool(bool), - record_f64(f64, f32 as f64) -} - -impl crate::sealed::Sealed for Wrapping {} -impl crate::field::Value for Wrapping { - fn record(&self, key: &crate::field::Field, visitor: &mut dyn crate::field::Visit) { - self.0.record(key, visitor) - } -} - -impl crate::sealed::Sealed for str {} - -impl Value for str { - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - visitor.record_str(key, self) - } -} - -#[cfg(feature = "std")] -impl crate::sealed::Sealed for dyn std::error::Error + 'static {} - -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -impl Value for dyn std::error::Error + 'static { - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - visitor.record_error(key, self) - } -} - -#[cfg(feature = "std")] -impl crate::sealed::Sealed for dyn std::error::Error + Send + 'static {} - -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -impl Value for dyn std::error::Error + Send + 'static { - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - (self as &dyn std::error::Error).record(key, visitor) - } -} - -#[cfg(feature = "std")] -impl crate::sealed::Sealed for dyn std::error::Error + Sync + 'static {} - -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -impl Value for dyn std::error::Error + Sync + 'static { - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - (self as &dyn std::error::Error).record(key, visitor) - } -} - -#[cfg(feature = "std")] -impl crate::sealed::Sealed for dyn std::error::Error + Send + Sync + 'static {} - -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -impl Value for dyn std::error::Error + Send + Sync + 'static { - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - (self as &dyn std::error::Error).record(key, visitor) - } -} - -impl<'a, T: ?Sized> crate::sealed::Sealed for &'a T where T: Value + crate::sealed::Sealed + 'a {} - -impl<'a, T: ?Sized> Value for &'a T -where - T: Value + 'a, -{ - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - (*self).record(key, visitor) - } -} - -impl<'a, T: ?Sized> crate::sealed::Sealed for &'a mut T where T: Value + crate::sealed::Sealed + 'a {} - -impl<'a, T: ?Sized> Value for &'a mut T -where - T: Value + 'a, -{ - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - // Don't use `(*self).record(key, visitor)`, otherwise would - // cause stack overflow due to `unconditional_recursion`. - T::record(self, key, visitor) - } -} - -impl<'a> crate::sealed::Sealed for fmt::Arguments<'a> {} - -impl<'a> Value for fmt::Arguments<'a> { - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - visitor.record_debug(key, self) - } -} - -impl crate::sealed::Sealed for crate::stdlib::boxed::Box where T: Value {} - -impl Value for crate::stdlib::boxed::Box -where - T: Value, -{ - #[inline] - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - self.as_ref().record(key, visitor) - } -} - -impl crate::sealed::Sealed for String {} -impl Value for String { - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - visitor.record_str(key, self.as_str()) - } -} - -impl fmt::Debug for dyn Value { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // We are only going to be recording the field value, so we don't - // actually care about the field name here. - struct NullCallsite; - static NULL_CALLSITE: NullCallsite = NullCallsite; - impl crate::callsite::Callsite for NullCallsite { - fn set_interest(&self, _: crate::subscriber::Interest) { - unreachable!("you somehow managed to register the null callsite?") - } - - fn metadata(&self) -> &crate::Metadata<'_> { - unreachable!("you somehow managed to access the null callsite?") - } - } - - static FIELD: Field = Field { - i: 0, - fields: FieldSet::new(&[], crate::identify_callsite!(&NULL_CALLSITE)), - }; - - let mut res = Ok(()); - self.record(&FIELD, &mut |_: &Field, val: &dyn fmt::Debug| { - res = write!(f, "{:?}", val); - }); - res - } -} - -impl fmt::Display for dyn Value { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -// ===== impl DisplayValue ===== - -impl crate::sealed::Sealed for DisplayValue {} - -impl Value for DisplayValue -where - T: fmt::Display, -{ - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - visitor.record_debug(key, self) - } -} - -impl fmt::Debug for DisplayValue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -impl fmt::Display for DisplayValue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -// ===== impl DebugValue ===== - -impl crate::sealed::Sealed for DebugValue {} - -impl Value for DebugValue -where - T: fmt::Debug, -{ - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - visitor.record_debug(key, &self.0) - } -} - -impl fmt::Debug for DebugValue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -// ===== impl ValuableValue ===== - -#[cfg(all(tracing_unstable, feature = "valuable"))] -impl crate::sealed::Sealed for valuable::Value<'_> {} - -#[cfg(all(tracing_unstable, feature = "valuable"))] -#[cfg_attr(docsrs, doc(cfg(all(tracing_unstable, feature = "valuable"))))] -impl Value for valuable::Value<'_> { - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - visitor.record_value(key, *self) - } -} - -#[cfg(all(tracing_unstable, feature = "valuable"))] -impl crate::sealed::Sealed for &'_ dyn valuable::Valuable {} - -#[cfg(all(tracing_unstable, feature = "valuable"))] -#[cfg_attr(docsrs, doc(cfg(all(tracing_unstable, feature = "valuable"))))] -impl Value for &'_ dyn valuable::Valuable { - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - visitor.record_value(key, self.as_value()) - } -} - -impl crate::sealed::Sealed for Empty {} -impl Value for Empty { - #[inline] - fn record(&self, _: &Field, _: &mut dyn Visit) {} -} - -impl crate::sealed::Sealed for Option {} - -impl Value for Option { - fn record(&self, key: &Field, visitor: &mut dyn Visit) { - if let Some(v) = &self { - v.record(key, visitor) - } - } -} - -// ===== impl Field ===== - -impl Field { - /// Returns an [`Identifier`] that uniquely identifies the [`Callsite`] - /// which defines this field. - /// - /// [`Identifier`]: super::callsite::Identifier - /// [`Callsite`]: super::callsite::Callsite - #[inline] - pub fn callsite(&self) -> callsite::Identifier { - self.fields.callsite() - } - - /// Returns a string representing the name of the field. - pub fn name(&self) -> &'static str { - self.fields.names[self.i] - } -} - -impl fmt::Display for Field { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad(self.name()) - } -} - -impl AsRef for Field { - fn as_ref(&self) -> &str { - self.name() - } -} - -impl PartialEq for Field { - fn eq(&self, other: &Self) -> bool { - self.callsite() == other.callsite() && self.i == other.i - } -} - -impl Eq for Field {} - -impl Hash for Field { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - self.callsite().hash(state); - self.i.hash(state); - } -} - -impl Clone for Field { - fn clone(&self) -> Self { - Field { - i: self.i, - fields: FieldSet { - names: self.fields.names, - callsite: self.fields.callsite(), - }, - } - } -} - -// ===== impl FieldSet ===== - -impl FieldSet { - /// Constructs a new `FieldSet` with the given array of field names and callsite. - pub const fn new(names: &'static [&'static str], callsite: callsite::Identifier) -> Self { - Self { names, callsite } - } - - /// Returns an [`Identifier`] that uniquely identifies the [`Callsite`] - /// which defines this set of fields.. - /// - /// [`Identifier`]: super::callsite::Identifier - /// [`Callsite`]: super::callsite::Callsite - #[inline] - pub(crate) fn callsite(&self) -> callsite::Identifier { - callsite::Identifier(self.callsite.0) - } - - /// Returns the [`Field`] named `name`, or `None` if no such field exists. - /// - /// [`Field`]: super::Field - pub fn field(&self, name: &Q) -> Option - where - Q: Borrow, - { - let name = &name.borrow(); - self.names.iter().position(|f| f == name).map(|i| Field { - i, - fields: FieldSet { - names: self.names, - callsite: self.callsite(), - }, - }) - } - - /// Returns `true` if `self` contains the given `field`. - /// - ///
- ///
-    /// Note: If field shares a name with a field
-    /// in this FieldSet, but was created by a FieldSet
-    /// with a different callsite, this FieldSet does not
-    /// contain it. This is so that if two separate span callsites define a field
-    /// named "foo", the Field corresponding to "foo" for each
-    /// of those callsites are not equivalent.
-    /// 
- pub fn contains(&self, field: &Field) -> bool { - field.callsite() == self.callsite() && field.i <= self.len() - } - - /// Returns an iterator over the `Field`s in this `FieldSet`. - #[inline] - pub fn iter(&self) -> Iter { - let idxs = 0..self.len(); - Iter { - idxs, - fields: FieldSet { - names: self.names, - callsite: self.callsite(), - }, - } - } - - /// Returns a new `ValueSet` with entries for this `FieldSet`'s values. - #[doc(hidden)] - pub fn value_set<'v, V>(&'v self, values: &'v V) -> ValueSet<'v> - where - V: ValidLen<'v>, - { - ValueSet { - fields: self, - values: values.borrow(), - } - } - - /// Returns the number of fields in this `FieldSet`. - #[inline] - pub fn len(&self) -> usize { - self.names.len() - } - - /// Returns whether or not this `FieldSet` has fields. - #[inline] - pub fn is_empty(&self) -> bool { - self.names.is_empty() - } -} - -impl<'a> IntoIterator for &'a FieldSet { - type IntoIter = Iter; - type Item = Field; - #[inline] - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl fmt::Debug for FieldSet { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FieldSet") - .field("names", &self.names) - .field("callsite", &self.callsite) - .finish() - } -} - -impl fmt::Display for FieldSet { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_set() - .entries(self.names.iter().map(display)) - .finish() - } -} - -impl Eq for FieldSet {} - -impl PartialEq for FieldSet { - fn eq(&self, other: &Self) -> bool { - if core::ptr::eq(&self, &other) { - true - } else if cfg!(not(debug_assertions)) { - // In a well-behaving application, two `FieldSet`s can be assumed to - // be totally equal so long as they share the same callsite. - self.callsite == other.callsite - } else { - // However, when debug-assertions are enabled, do NOT assume that - // the application is well-behaving; check every the field names of - // each `FieldSet` for equality. - - // `FieldSet` is destructured here to ensure a compile-error if the - // fields of `FieldSet` change. - let Self { - names: lhs_names, - callsite: lhs_callsite, - } = self; - - let Self { - names: rhs_names, - callsite: rhs_callsite, - } = &other; - - // Check callsite equality first, as it is probably cheaper to do - // than str equality. - lhs_callsite == rhs_callsite && lhs_names == rhs_names - } - } -} - -// ===== impl Iter ===== - -impl Iterator for Iter { - type Item = Field; - #[inline] - fn next(&mut self) -> Option { - let i = self.idxs.next()?; - Some(Field { - i, - fields: FieldSet { - names: self.fields.names, - callsite: self.fields.callsite(), - }, - }) - } -} - -// ===== impl ValueSet ===== - -impl<'a> ValueSet<'a> { - /// Returns an [`Identifier`] that uniquely identifies the [`Callsite`] - /// defining the fields this `ValueSet` refers to. - /// - /// [`Identifier`]: super::callsite::Identifier - /// [`Callsite`]: super::callsite::Callsite - #[inline] - pub fn callsite(&self) -> callsite::Identifier { - self.fields.callsite() - } - - /// Visits all the fields in this `ValueSet` with the provided [visitor]. - /// - /// [visitor]: Visit - pub fn record(&self, visitor: &mut dyn Visit) { - let my_callsite = self.callsite(); - for (field, value) in self.values { - if field.callsite() != my_callsite { - continue; - } - if let Some(value) = value { - value.record(field, visitor); - } - } - } - - /// Returns the number of fields in this `ValueSet` that would be visited - /// by a given [visitor] to the [`ValueSet::record()`] method. - /// - /// [visitor]: Visit - /// [`ValueSet::record()`]: ValueSet::record() - pub fn len(&self) -> usize { - let my_callsite = self.callsite(); - self.values - .iter() - .filter(|(field, _)| field.callsite() == my_callsite) - .count() - } - - /// Returns `true` if this `ValueSet` contains a value for the given `Field`. - pub(crate) fn contains(&self, field: &Field) -> bool { - field.callsite() == self.callsite() - && self - .values - .iter() - .any(|(key, val)| *key == field && val.is_some()) - } - - /// Returns true if this `ValueSet` contains _no_ values. - pub fn is_empty(&self) -> bool { - let my_callsite = self.callsite(); - self.values - .iter() - .all(|(key, val)| val.is_none() || key.callsite() != my_callsite) - } - - pub(crate) fn field_set(&self) -> &FieldSet { - self.fields - } -} - -impl<'a> fmt::Debug for ValueSet<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.values - .iter() - .fold(&mut f.debug_struct("ValueSet"), |dbg, (key, v)| { - if let Some(val) = v { - val.record(key, dbg); - } - dbg - }) - .field("callsite", &self.callsite()) - .finish() - } -} - -impl<'a> fmt::Display for ValueSet<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.values - .iter() - .fold(&mut f.debug_map(), |dbg, (key, v)| { - if let Some(val) = v { - val.record(key, dbg); - } - dbg - }) - .finish() - } -} - -// ===== impl ValidLen ===== - -mod private { - use super::*; - - /// Restrictions on `ValueSet` lengths were removed in #2508 but this type remains for backwards compatibility. - pub trait ValidLen<'a>: Borrow<[(&'a Field, Option<&'a (dyn Value + 'a)>)]> {} - - impl<'a, const N: usize> ValidLen<'a> for [(&'a Field, Option<&'a (dyn Value + 'a)>); N] {} -} - -#[cfg(test)] -mod test { - use super::*; - use crate::metadata::{Kind, Level, Metadata}; - use crate::stdlib::{borrow::ToOwned, string::String}; - - // Make sure TEST_CALLSITE_* have non-zero size, so they can't be located at the same address. - struct TestCallsite1(u8); - static TEST_CALLSITE_1: TestCallsite1 = TestCallsite1(0); - static TEST_META_1: Metadata<'static> = metadata! { - name: "field_test1", - target: module_path!(), - level: Level::INFO, - fields: &["foo", "bar", "baz"], - callsite: &TEST_CALLSITE_1, - kind: Kind::SPAN, - }; - - impl crate::callsite::Callsite for TestCallsite1 { - fn set_interest(&self, _: crate::subscriber::Interest) { - unimplemented!() - } - - fn metadata(&self) -> &Metadata<'_> { - &TEST_META_1 - } - } - - struct TestCallsite2(u8); - static TEST_CALLSITE_2: TestCallsite2 = TestCallsite2(0); - static TEST_META_2: Metadata<'static> = metadata! { - name: "field_test2", - target: module_path!(), - level: Level::INFO, - fields: &["foo", "bar", "baz"], - callsite: &TEST_CALLSITE_2, - kind: Kind::SPAN, - }; - - impl crate::callsite::Callsite for TestCallsite2 { - fn set_interest(&self, _: crate::subscriber::Interest) { - unimplemented!() - } - - fn metadata(&self) -> &Metadata<'_> { - &TEST_META_2 - } - } - - #[test] - fn value_set_with_no_values_is_empty() { - let fields = TEST_META_1.fields(); - let values = &[ - (&fields.field("foo").unwrap(), None), - (&fields.field("bar").unwrap(), None), - (&fields.field("baz").unwrap(), None), - ]; - let valueset = fields.value_set(values); - assert!(valueset.is_empty()); - } - - #[test] - fn empty_value_set_is_empty() { - let fields = TEST_META_1.fields(); - let valueset = fields.value_set(&[]); - assert!(valueset.is_empty()); - } - - #[test] - fn value_sets_with_fields_from_other_callsites_are_empty() { - let fields = TEST_META_1.fields(); - let values = &[ - (&fields.field("foo").unwrap(), Some(&1 as &dyn Value)), - (&fields.field("bar").unwrap(), Some(&2 as &dyn Value)), - (&fields.field("baz").unwrap(), Some(&3 as &dyn Value)), - ]; - let valueset = TEST_META_2.fields().value_set(values); - assert!(valueset.is_empty()) - } - - #[test] - fn sparse_value_sets_are_not_empty() { - let fields = TEST_META_1.fields(); - let values = &[ - (&fields.field("foo").unwrap(), None), - (&fields.field("bar").unwrap(), Some(&57 as &dyn Value)), - (&fields.field("baz").unwrap(), None), - ]; - let valueset = fields.value_set(values); - assert!(!valueset.is_empty()); - } - - #[test] - fn fields_from_other_callsets_are_skipped() { - let fields = TEST_META_1.fields(); - let values = &[ - (&fields.field("foo").unwrap(), None), - ( - &TEST_META_2.fields().field("bar").unwrap(), - Some(&57 as &dyn Value), - ), - (&fields.field("baz").unwrap(), None), - ]; - - struct MyVisitor; - impl Visit for MyVisitor { - fn record_debug(&mut self, field: &Field, _: &dyn (crate::stdlib::fmt::Debug)) { - assert_eq!(field.callsite(), TEST_META_1.callsite()) - } - } - let valueset = fields.value_set(values); - valueset.record(&mut MyVisitor); - } - - #[test] - fn empty_fields_are_skipped() { - let fields = TEST_META_1.fields(); - let values = &[ - (&fields.field("foo").unwrap(), Some(&Empty as &dyn Value)), - (&fields.field("bar").unwrap(), Some(&57 as &dyn Value)), - (&fields.field("baz").unwrap(), Some(&Empty as &dyn Value)), - ]; - - struct MyVisitor; - impl Visit for MyVisitor { - fn record_debug(&mut self, field: &Field, _: &dyn (crate::stdlib::fmt::Debug)) { - assert_eq!(field.name(), "bar") - } - } - let valueset = fields.value_set(values); - valueset.record(&mut MyVisitor); - } - - #[test] - fn record_debug_fn() { - let fields = TEST_META_1.fields(); - let values = &[ - (&fields.field("foo").unwrap(), Some(&1 as &dyn Value)), - (&fields.field("bar").unwrap(), Some(&2 as &dyn Value)), - (&fields.field("baz").unwrap(), Some(&3 as &dyn Value)), - ]; - let valueset = fields.value_set(values); - let mut result = String::new(); - valueset.record(&mut |_: &Field, value: &dyn fmt::Debug| { - use crate::stdlib::fmt::Write; - write!(&mut result, "{:?}", value).unwrap(); - }); - assert_eq!(result, "123".to_owned()); - } - - #[test] - #[cfg(feature = "std")] - fn record_error() { - let fields = TEST_META_1.fields(); - let err: Box = - std::io::Error::new(std::io::ErrorKind::Other, "lol").into(); - let values = &[ - (&fields.field("foo").unwrap(), Some(&err as &dyn Value)), - (&fields.field("bar").unwrap(), Some(&Empty as &dyn Value)), - (&fields.field("baz").unwrap(), Some(&Empty as &dyn Value)), - ]; - let valueset = fields.value_set(values); - let mut result = String::new(); - valueset.record(&mut |_: &Field, value: &dyn fmt::Debug| { - use core::fmt::Write; - write!(&mut result, "{:?}", value).unwrap(); - }); - assert_eq!(result, format!("{}", err)); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/lazy.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/lazy.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/lazy.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/lazy.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,76 +0,0 @@ -#[cfg(feature = "std")] -pub(crate) use once_cell::sync::Lazy; - -#[cfg(not(feature = "std"))] -pub(crate) use self::spin::Lazy; - -#[cfg(not(feature = "std"))] -mod spin { - //! This is the `once_cell::sync::Lazy` type, but modified to use our - //! `spin::Once` type rather than `OnceCell`. This is used to replace - //! `once_cell::sync::Lazy` on `no-std` builds. - use crate::spin::Once; - use core::{cell::Cell, fmt, ops::Deref}; - - /// Re-implementation of `once_cell::sync::Lazy` on top of `spin::Once` - /// rather than `OnceCell`. - /// - /// This is used when the standard library is disabled. - pub(crate) struct Lazy T> { - cell: Once, - init: Cell>, - } - - impl fmt::Debug for Lazy { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Lazy") - .field("cell", &self.cell) - .field("init", &"..") - .finish() - } - } - - // We never create a `&F` from a `&Lazy` so it is fine to not impl - // `Sync` for `F`. We do create a `&mut Option` in `force`, but this is - // properly synchronized, so it only happens once so it also does not - // contribute to this impl. - unsafe impl Sync for Lazy where Once: Sync {} - // auto-derived `Send` impl is OK. - - impl Lazy { - /// Creates a new lazy value with the given initializing function. - pub(crate) const fn new(init: F) -> Lazy { - Lazy { - cell: Once::new(), - init: Cell::new(Some(init)), - } - } - } - - impl T> Lazy { - /// Forces the evaluation of this lazy value and returns a reference to - /// the result. - /// - /// This is equivalent to the `Deref` impl, but is explicit. - pub(crate) fn force(this: &Lazy) -> &T { - this.cell.call_once(|| match this.init.take() { - Some(f) => f(), - None => panic!("Lazy instance has previously been poisoned"), - }) - } - } - - impl T> Deref for Lazy { - type Target = T; - fn deref(&self) -> &T { - Lazy::force(self) - } - } - - impl Default for Lazy { - /// Creates a new lazy value using `Default` as the initializing function. - fn default() -> Lazy { - Lazy::new(T::default) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/lib.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/lib.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,293 +0,0 @@ -//! Core primitives for `tracing`. -//! -//! [`tracing`] is a framework for instrumenting Rust programs to collect -//! structured, event-based diagnostic information. This crate defines the core -//! primitives of `tracing`. -//! -//! This crate provides: -//! -//! * [`span::Id`] identifies a span within the execution of a program. -//! -//! * [`Event`] represents a single event within a trace. -//! -//! * [`Subscriber`], the trait implemented to collect trace data. -//! -//! * [`Metadata`] and [`Callsite`] provide information describing spans and -//! `Event`s. -//! -//! * [`Field`], [`FieldSet`], [`Value`], and [`ValueSet`] represent the -//! structured data attached to a span. -//! -//! * [`Dispatch`] allows spans and events to be dispatched to `Subscriber`s. -//! -//! In addition, it defines the global callsite registry and per-thread current -//! dispatcher which other components of the tracing system rely on. -//! -//! *Compiler support: [requires `rustc` 1.56+][msrv]* -//! -//! [msrv]: #supported-rust-versions -//! -//! ## Usage -//! -//! Application authors will typically not use this crate directly. Instead, -//! they will use the [`tracing`] crate, which provides a much more -//! fully-featured API. However, this crate's API will change very infrequently, -//! so it may be used when dependencies must be very stable. -//! -//! `Subscriber` implementations may depend on `tracing-core` rather than -//! `tracing`, as the additional APIs provided by `tracing` are primarily useful -//! for instrumenting libraries and applications, and are generally not -//! necessary for `Subscriber` implementations. -//! -//! The [`tokio-rs/tracing`] repository contains less stable crates designed to -//! be used with the `tracing` ecosystem. It includes a collection of -//! `Subscriber` implementations, as well as utility and adapter crates. -//! -//! ## Crate Feature Flags -//! -//! The following crate [feature flags] are available: -//! -//! * `std`: Depend on the Rust standard library (enabled by default). -//! -//! `no_std` users may disable this feature with `default-features = false`: -//! -//! ```toml -//! [dependencies] -//! tracing-core = { version = "0.1.22", default-features = false } -//! ``` -//! -//! **Note**:`tracing-core`'s `no_std` support requires `liballoc`. -//! -//! ### Unstable Features -//! -//! These feature flags enable **unstable** features. The public API may break in 0.1.x -//! releases. To enable these features, the `--cfg tracing_unstable` must be passed to -//! `rustc` when compiling. -//! -//! The following unstable feature flags are currently available: -//! -//! * `valuable`: Enables support for recording [field values] using the -//! [`valuable`] crate. -//! -//! #### Enabling Unstable Features -//! -//! The easiest way to set the `tracing_unstable` cfg is to use the `RUSTFLAGS` -//! env variable when running `cargo` commands: -//! -//! ```shell -//! RUSTFLAGS="--cfg tracing_unstable" cargo build -//! ``` -//! Alternatively, the following can be added to the `.cargo/config` file in a -//! project to automatically enable the cfg flag for that project: -//! -//! ```toml -//! [build] -//! rustflags = ["--cfg", "tracing_unstable"] -//! ``` -//! -//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section -//! [field values]: crate::field -//! [`valuable`]: https://crates.io/crates/valuable -//! -//! ## Supported Rust Versions -//! -//! Tracing is built against the latest stable release. The minimum supported -//! version is 1.56. The current Tracing version is not guaranteed to build on -//! Rust versions earlier than the minimum supported version. -//! -//! Tracing follows the same compiler support policies as the rest of the Tokio -//! project. The current stable Rust compiler and the three most recent minor -//! versions before it will always be supported. For example, if the current -//! stable compiler version is 1.69, the minimum supported version will not be -//! increased past 1.66, three minor versions prior. Increasing the minimum -//! supported compiler version is not considered a semver breaking change as -//! long as doing so complies with this policy. -//! -//! -//! [`span::Id`]: span::Id -//! [`Event`]: event::Event -//! [`Subscriber`]: subscriber::Subscriber -//! [`Metadata`]: metadata::Metadata -//! [`Callsite`]: callsite::Callsite -//! [`Field`]: field::Field -//! [`FieldSet`]: field::FieldSet -//! [`Value`]: field::Value -//! [`ValueSet`]: field::ValueSet -//! [`Dispatch`]: dispatcher::Dispatch -//! [`tokio-rs/tracing`]: https://github.com/tokio-rs/tracing -//! [`tracing`]: https://crates.io/crates/tracing -#![doc( - html_logo_url = "https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/logo-type.png", - issue_tracker_base_url = "https://github.com/tokio-rs/tracing/issues/" -)] -#![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(docsrs, feature(doc_cfg), deny(rustdoc::broken_intra_doc_links))] -#![warn( - missing_debug_implementations, - missing_docs, - rust_2018_idioms, - unreachable_pub, - bad_style, - dead_code, - improper_ctypes, - non_shorthand_field_patterns, - no_mangle_generic_items, - overflowing_literals, - path_statements, - patterns_in_fns_without_body, - private_in_public, - unconditional_recursion, - unused, - unused_allocation, - unused_comparisons, - unused_parens, - while_true -)] -#[cfg(not(feature = "std"))] -extern crate alloc; - -/// Statically constructs an [`Identifier`] for the provided [`Callsite`]. -/// -/// This may be used in contexts such as static initializers. -/// -/// For example: -/// ```rust -/// use tracing_core::{callsite, identify_callsite}; -/// # use tracing_core::{Metadata, subscriber::Interest}; -/// # fn main() { -/// pub struct MyCallsite { -/// // ... -/// } -/// impl callsite::Callsite for MyCallsite { -/// # fn set_interest(&self, _: Interest) { unimplemented!() } -/// # fn metadata(&self) -> &Metadata { unimplemented!() } -/// // ... -/// } -/// -/// static CALLSITE: MyCallsite = MyCallsite { -/// // ... -/// }; -/// -/// static CALLSITE_ID: callsite::Identifier = identify_callsite!(&CALLSITE); -/// # } -/// ``` -/// -/// [`Identifier`]: callsite::Identifier -/// [`Callsite`]: callsite::Callsite -#[macro_export] -macro_rules! identify_callsite { - ($callsite:expr) => { - $crate::callsite::Identifier($callsite) - }; -} - -/// Statically constructs new span [metadata]. -/// -/// /// For example: -/// ```rust -/// # use tracing_core::{callsite::Callsite, subscriber::Interest}; -/// use tracing_core::metadata; -/// use tracing_core::metadata::{Kind, Level, Metadata}; -/// # fn main() { -/// # pub struct MyCallsite { } -/// # impl Callsite for MyCallsite { -/// # fn set_interest(&self, _: Interest) { unimplemented!() } -/// # fn metadata(&self) -> &Metadata { unimplemented!() } -/// # } -/// # -/// static FOO_CALLSITE: MyCallsite = MyCallsite { -/// // ... -/// }; -/// -/// static FOO_METADATA: Metadata = metadata!{ -/// name: "foo", -/// target: module_path!(), -/// level: Level::DEBUG, -/// fields: &["bar", "baz"], -/// callsite: &FOO_CALLSITE, -/// kind: Kind::SPAN, -/// }; -/// # } -/// ``` -/// -/// [metadata]: metadata::Metadata -/// [`Metadata::new`]: metadata::Metadata::new -#[macro_export] -macro_rules! metadata { - ( - name: $name:expr, - target: $target:expr, - level: $level:expr, - fields: $fields:expr, - callsite: $callsite:expr, - kind: $kind:expr - ) => { - $crate::metadata! { - name: $name, - target: $target, - level: $level, - fields: $fields, - callsite: $callsite, - kind: $kind, - } - }; - ( - name: $name:expr, - target: $target:expr, - level: $level:expr, - fields: $fields:expr, - callsite: $callsite:expr, - kind: $kind:expr, - ) => { - $crate::metadata::Metadata::new( - $name, - $target, - $level, - ::core::option::Option::Some(file!()), - ::core::option::Option::Some(line!()), - ::core::option::Option::Some(module_path!()), - $crate::field::FieldSet::new($fields, $crate::identify_callsite!($callsite)), - $kind, - ) - }; -} - -pub(crate) mod lazy; - -// Trimmed-down vendored version of spin 0.5.2 (0387621) -// Dependency of no_std lazy_static, not required in a std build -#[cfg(not(feature = "std"))] -pub(crate) mod spin; - -#[cfg(not(feature = "std"))] -#[doc(hidden)] -pub type Once = self::spin::Once<()>; - -#[cfg(feature = "std")] -pub use stdlib::sync::Once; - -pub mod callsite; -pub mod dispatcher; -pub mod event; -pub mod field; -pub mod metadata; -mod parent; -pub mod span; -pub(crate) mod stdlib; -pub mod subscriber; - -#[doc(inline)] -pub use self::{ - callsite::Callsite, - dispatcher::Dispatch, - event::Event, - field::Field, - metadata::{Level, LevelFilter, Metadata}, - subscriber::Subscriber, -}; - -pub use self::{metadata::Kind, subscriber::Interest}; - -mod sealed { - pub trait Sealed {} -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/metadata.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/metadata.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/metadata.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/metadata.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,1115 +0,0 @@ -//! Metadata describing trace data. -use super::{callsite, field}; -use crate::stdlib::{ - cmp, fmt, - str::FromStr, - sync::atomic::{AtomicUsize, Ordering}, -}; - -/// Metadata describing a [span] or [event]. -/// -/// All spans and events have the following metadata: -/// - A [name], represented as a static string. -/// - A [target], a string that categorizes part of the system where the span -/// or event occurred. The `tracing` macros default to using the module -/// path where the span or event originated as the target, but it may be -/// overridden. -/// - A [verbosity level]. This determines how verbose a given span or event -/// is, and allows enabling or disabling more verbose diagnostics -/// situationally. See the documentation for the [`Level`] type for details. -/// - The names of the [fields] defined by the span or event. -/// - Whether the metadata corresponds to a span or event. -/// -/// In addition, the following optional metadata describing the source code -/// location where the span or event originated _may_ be provided: -/// - The [file name] -/// - The [line number] -/// - The [module path] -/// -/// Metadata is used by [`Subscriber`]s when filtering spans and events, and it -/// may also be used as part of their data payload. -/// -/// When created by the `event!` or `span!` macro, the metadata describing a -/// particular event or span is constructed statically and exists as a single -/// static instance. Thus, the overhead of creating the metadata is -/// _significantly_ lower than that of creating the actual span. Therefore, -/// filtering is based on metadata, rather than on the constructed span. -/// -/// ## Equality -/// -/// In well-behaved applications, two `Metadata` with equal -/// [callsite identifiers] will be equal in all other ways (i.e., have the same -/// `name`, `target`, etc.). Consequently, in release builds, [`Metadata::eq`] -/// *only* checks that its arguments have equal callsites. However, the equality -/// of `Metadata`'s other fields is checked in debug builds. -/// -/// [span]: super::span -/// [event]: super::event -/// [name]: Self::name -/// [target]: Self::target -/// [fields]: Self::fields -/// [verbosity level]: Self::level -/// [file name]: Self::file -/// [line number]: Self::line -/// [module path]: Self::module_path -/// [`Subscriber`]: super::subscriber::Subscriber -/// [callsite identifiers]: Self::callsite -pub struct Metadata<'a> { - /// The name of the span described by this metadata. - name: &'static str, - - /// The part of the system that the span that this metadata describes - /// occurred in. - target: &'a str, - - /// The level of verbosity of the described span. - level: Level, - - /// The name of the Rust module where the span occurred, or `None` if this - /// could not be determined. - module_path: Option<&'a str>, - - /// The name of the source code file where the span occurred, or `None` if - /// this could not be determined. - file: Option<&'a str>, - - /// The line number in the source code file where the span occurred, or - /// `None` if this could not be determined. - line: Option, - - /// The names of the key-value fields attached to the described span or - /// event. - fields: field::FieldSet, - - /// The kind of the callsite. - kind: Kind, -} - -/// Indicates whether the callsite is a span or event. -#[derive(Clone, Eq, PartialEq)] -pub struct Kind(u8); - -/// Describes the level of verbosity of a span or event. -/// -/// # Comparing Levels -/// -/// `Level` implements the [`PartialOrd`] and [`Ord`] traits, allowing two -/// `Level`s to be compared to determine which is considered more or less -/// verbose. Levels which are more verbose are considered "greater than" levels -/// which are less verbose, with [`Level::ERROR`] considered the lowest, and -/// [`Level::TRACE`] considered the highest. -/// -/// For example: -/// ``` -/// use tracing_core::Level; -/// -/// assert!(Level::TRACE > Level::DEBUG); -/// assert!(Level::ERROR < Level::WARN); -/// assert!(Level::INFO <= Level::DEBUG); -/// assert_eq!(Level::TRACE, Level::TRACE); -/// ``` -/// -/// # Filtering -/// -/// `Level`s are typically used to implement filtering that determines which -/// spans and events are enabled. Depending on the use case, more or less -/// verbose diagnostics may be desired. For example, when running in -/// development, [`DEBUG`]-level traces may be enabled by default. When running in -/// production, only [`INFO`]-level and lower traces might be enabled. Libraries -/// may include very verbose diagnostics at the [`DEBUG`] and/or [`TRACE`] levels. -/// Applications using those libraries typically chose to ignore those traces. However, when -/// debugging an issue involving said libraries, it may be useful to temporarily -/// enable the more verbose traces. -/// -/// The [`LevelFilter`] type is provided to enable filtering traces by -/// verbosity. `Level`s can be compared against [`LevelFilter`]s, and -/// [`LevelFilter`] has a variant for each `Level`, which compares analogously -/// to that level. In addition, [`LevelFilter`] adds a [`LevelFilter::OFF`] -/// variant, which is considered "less verbose" than every other `Level`. This is -/// intended to allow filters to completely disable tracing in a particular context. -/// -/// For example: -/// ``` -/// use tracing_core::{Level, LevelFilter}; -/// -/// assert!(LevelFilter::OFF < Level::TRACE); -/// assert!(LevelFilter::TRACE > Level::DEBUG); -/// assert!(LevelFilter::ERROR < Level::WARN); -/// assert!(LevelFilter::INFO <= Level::DEBUG); -/// assert!(LevelFilter::INFO >= Level::INFO); -/// ``` -/// -/// ## Examples -/// -/// Below is a simple example of how a [`Subscriber`] could implement filtering through -/// a [`LevelFilter`]. When a span or event is recorded, the [`Subscriber::enabled`] method -/// compares the span or event's `Level` against the configured [`LevelFilter`]. -/// The optional [`Subscriber::max_level_hint`] method can also be implemented to allow spans -/// and events above a maximum verbosity level to be skipped more efficiently, -/// often improving performance in short-lived programs. -/// -/// ``` -/// use tracing_core::{span, Event, Level, LevelFilter, Subscriber, Metadata}; -/// # use tracing_core::span::{Id, Record, Current}; -/// -/// #[derive(Debug)] -/// pub struct MySubscriber { -/// /// The most verbose level that this subscriber will enable. -/// max_level: LevelFilter, -/// -/// // ... -/// } -/// -/// impl MySubscriber { -/// /// Returns a new `MySubscriber` which will record spans and events up to -/// /// `max_level`. -/// pub fn with_max_level(max_level: LevelFilter) -> Self { -/// Self { -/// max_level, -/// // ... -/// } -/// } -/// } -/// impl Subscriber for MySubscriber { -/// fn enabled(&self, meta: &Metadata<'_>) -> bool { -/// // A span or event is enabled if it is at or below the configured -/// // maximum level. -/// meta.level() <= &self.max_level -/// } -/// -/// // This optional method returns the most verbose level that this -/// // subscriber will enable. Although implementing this method is not -/// // *required*, it permits additional optimizations when it is provided, -/// // allowing spans and events above the max level to be skipped -/// // more efficiently. -/// fn max_level_hint(&self) -> Option { -/// Some(self.max_level) -/// } -/// -/// // Implement the rest of the subscriber... -/// fn new_span(&self, span: &span::Attributes<'_>) -> span::Id { -/// // ... -/// # drop(span); Id::from_u64(1) -/// } - -/// fn event(&self, event: &Event<'_>) { -/// // ... -/// # drop(event); -/// } -/// -/// // ... -/// # fn enter(&self, _: &Id) {} -/// # fn exit(&self, _: &Id) {} -/// # fn record(&self, _: &Id, _: &Record<'_>) {} -/// # fn record_follows_from(&self, _: &Id, _: &Id) {} -/// } -/// ``` -/// -/// It is worth noting that the `tracing-subscriber` crate provides [additional -/// APIs][envfilter] for performing more sophisticated filtering, such as -/// enabling different levels based on which module or crate a span or event is -/// recorded in. -/// -/// [`DEBUG`]: Level::DEBUG -/// [`INFO`]: Level::INFO -/// [`TRACE`]: Level::TRACE -/// [`Subscriber::enabled`]: crate::subscriber::Subscriber::enabled -/// [`Subscriber::max_level_hint`]: crate::subscriber::Subscriber::max_level_hint -/// [`Subscriber`]: crate::subscriber::Subscriber -/// [envfilter]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub struct Level(LevelInner); - -/// A filter comparable to a verbosity [`Level`]. -/// -/// If a [`Level`] is considered less than a `LevelFilter`, it should be -/// considered enabled; if greater than or equal to the `LevelFilter`, -/// that level is disabled. See [`LevelFilter::current`] for more -/// details. -/// -/// Note that this is essentially identical to the `Level` type, but with the -/// addition of an [`OFF`] level that completely disables all trace -/// instrumentation. -/// -/// See the documentation for the [`Level`] type to see how `Level`s -/// and `LevelFilter`s interact. -/// -/// [`OFF`]: LevelFilter::OFF -#[repr(transparent)] -#[derive(Copy, Clone, Eq, PartialEq, Hash)] -pub struct LevelFilter(Option); - -/// Indicates that a string could not be parsed to a valid level. -#[derive(Clone, Debug)] -pub struct ParseLevelFilterError(()); - -static MAX_LEVEL: AtomicUsize = AtomicUsize::new(LevelFilter::OFF_USIZE); - -// ===== impl Metadata ===== - -impl<'a> Metadata<'a> { - /// Construct new metadata for a span or event, with a name, target, level, field - /// names, and optional source code location. - pub const fn new( - name: &'static str, - target: &'a str, - level: Level, - file: Option<&'a str>, - line: Option, - module_path: Option<&'a str>, - fields: field::FieldSet, - kind: Kind, - ) -> Self { - Metadata { - name, - target, - level, - module_path, - file, - line, - fields, - kind, - } - } - - /// Returns the names of the fields on the described span or event. - #[inline] - pub fn fields(&self) -> &field::FieldSet { - &self.fields - } - - /// Returns the level of verbosity of the described span or event. - pub fn level(&self) -> &Level { - &self.level - } - - /// Returns the name of the span. - pub fn name(&self) -> &'static str { - self.name - } - - /// Returns a string describing the part of the system where the span or - /// event that this metadata describes occurred. - /// - /// Typically, this is the module path, but alternate targets may be set - /// when spans or events are constructed. - pub fn target(&self) -> &'a str { - self.target - } - - /// Returns the path to the Rust module where the span occurred, or - /// `None` if the module path is unknown. - pub fn module_path(&self) -> Option<&'a str> { - self.module_path - } - - /// Returns the name of the source code file where the span - /// occurred, or `None` if the file is unknown - pub fn file(&self) -> Option<&'a str> { - self.file - } - - /// Returns the line number in the source code file where the span - /// occurred, or `None` if the line number is unknown. - pub fn line(&self) -> Option { - self.line - } - - /// Returns an opaque `Identifier` that uniquely identifies the callsite - /// this `Metadata` originated from. - #[inline] - pub fn callsite(&self) -> callsite::Identifier { - self.fields.callsite() - } - - /// Returns true if the callsite kind is `Event`. - pub fn is_event(&self) -> bool { - self.kind.is_event() - } - - /// Return true if the callsite kind is `Span`. - pub fn is_span(&self) -> bool { - self.kind.is_span() - } -} - -impl<'a> fmt::Debug for Metadata<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut meta = f.debug_struct("Metadata"); - meta.field("name", &self.name) - .field("target", &self.target) - .field("level", &self.level); - - if let Some(path) = self.module_path() { - meta.field("module_path", &path); - } - - match (self.file(), self.line()) { - (Some(file), Some(line)) => { - meta.field("location", &format_args!("{}:{}", file, line)); - } - (Some(file), None) => { - meta.field("file", &format_args!("{}", file)); - } - - // Note: a line num with no file is a kind of weird case that _probably_ never occurs... - (None, Some(line)) => { - meta.field("line", &line); - } - (None, None) => {} - }; - - meta.field("fields", &format_args!("{}", self.fields)) - .field("callsite", &self.callsite()) - .field("kind", &self.kind) - .finish() - } -} - -impl Kind { - const EVENT_BIT: u8 = 1 << 0; - const SPAN_BIT: u8 = 1 << 1; - const HINT_BIT: u8 = 1 << 2; - - /// `Event` callsite - pub const EVENT: Kind = Kind(Self::EVENT_BIT); - - /// `Span` callsite - pub const SPAN: Kind = Kind(Self::SPAN_BIT); - - /// `enabled!` callsite. [`Subscriber`][`crate::subscriber::Subscriber`]s can assume - /// this `Kind` means they will never recieve a - /// full event with this [`Metadata`]. - pub const HINT: Kind = Kind(Self::HINT_BIT); - - /// Return true if the callsite kind is `Span` - pub fn is_span(&self) -> bool { - self.0 & Self::SPAN_BIT == Self::SPAN_BIT - } - - /// Return true if the callsite kind is `Event` - pub fn is_event(&self) -> bool { - self.0 & Self::EVENT_BIT == Self::EVENT_BIT - } - - /// Return true if the callsite kind is `Hint` - pub fn is_hint(&self) -> bool { - self.0 & Self::HINT_BIT == Self::HINT_BIT - } - - /// Sets that this `Kind` is a [hint](Self::HINT). - /// - /// This can be called on [`SPAN`](Self::SPAN) and [`EVENT`](Self::EVENT) - /// kinds to construct a hint callsite that also counts as a span or event. - pub const fn hint(self) -> Self { - Self(self.0 | Self::HINT_BIT) - } -} - -impl fmt::Debug for Kind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("Kind(")?; - let mut has_bits = false; - let mut write_bit = |name: &str| { - if has_bits { - f.write_str(" | ")?; - } - f.write_str(name)?; - has_bits = true; - Ok(()) - }; - - if self.is_event() { - write_bit("EVENT")?; - } - - if self.is_span() { - write_bit("SPAN")?; - } - - if self.is_hint() { - write_bit("HINT")?; - } - - // if none of the expected bits were set, something is messed up, so - // just print the bits for debugging purposes - if !has_bits { - write!(f, "{:#b}", self.0)?; - } - - f.write_str(")") - } -} - -impl<'a> Eq for Metadata<'a> {} - -impl<'a> PartialEq for Metadata<'a> { - #[inline] - fn eq(&self, other: &Self) -> bool { - if core::ptr::eq(&self, &other) { - true - } else if cfg!(not(debug_assertions)) { - // In a well-behaving application, two `Metadata` can be assumed to - // be totally equal so long as they share the same callsite. - self.callsite() == other.callsite() - } else { - // However, when debug-assertions are enabled, do not assume that - // the application is well-behaving; check every field of `Metadata` - // for equality. - - // `Metadata` is destructured here to ensure a compile-error if the - // fields of `Metadata` change. - let Metadata { - name: lhs_name, - target: lhs_target, - level: lhs_level, - module_path: lhs_module_path, - file: lhs_file, - line: lhs_line, - fields: lhs_fields, - kind: lhs_kind, - } = self; - - let Metadata { - name: rhs_name, - target: rhs_target, - level: rhs_level, - module_path: rhs_module_path, - file: rhs_file, - line: rhs_line, - fields: rhs_fields, - kind: rhs_kind, - } = &other; - - // The initial comparison of callsites is purely an optimization; - // it can be removed without affecting the overall semantics of the - // expression. - self.callsite() == other.callsite() - && lhs_name == rhs_name - && lhs_target == rhs_target - && lhs_level == rhs_level - && lhs_module_path == rhs_module_path - && lhs_file == rhs_file - && lhs_line == rhs_line - && lhs_fields == rhs_fields - && lhs_kind == rhs_kind - } - } -} - -// ===== impl Level ===== - -impl Level { - /// The "error" level. - /// - /// Designates very serious errors. - pub const ERROR: Level = Level(LevelInner::Error); - /// The "warn" level. - /// - /// Designates hazardous situations. - pub const WARN: Level = Level(LevelInner::Warn); - /// The "info" level. - /// - /// Designates useful information. - pub const INFO: Level = Level(LevelInner::Info); - /// The "debug" level. - /// - /// Designates lower priority information. - pub const DEBUG: Level = Level(LevelInner::Debug); - /// The "trace" level. - /// - /// Designates very low priority, often extremely verbose, information. - pub const TRACE: Level = Level(LevelInner::Trace); - - /// Returns the string representation of the `Level`. - /// - /// This returns the same string as the `fmt::Display` implementation. - pub fn as_str(&self) -> &'static str { - match *self { - Level::TRACE => "TRACE", - Level::DEBUG => "DEBUG", - Level::INFO => "INFO", - Level::WARN => "WARN", - Level::ERROR => "ERROR", - } - } -} - -impl fmt::Display for Level { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Level::TRACE => f.pad("TRACE"), - Level::DEBUG => f.pad("DEBUG"), - Level::INFO => f.pad("INFO"), - Level::WARN => f.pad("WARN"), - Level::ERROR => f.pad("ERROR"), - } - } -} - -#[cfg(feature = "std")] -#[cfg_attr(docsrs, doc(cfg(feature = "std")))] -impl crate::stdlib::error::Error for ParseLevelError {} - -impl FromStr for Level { - type Err = ParseLevelError; - fn from_str(s: &str) -> Result { - s.parse::() - .map_err(|_| ParseLevelError { _p: () }) - .and_then(|num| match num { - 1 => Ok(Level::ERROR), - 2 => Ok(Level::WARN), - 3 => Ok(Level::INFO), - 4 => Ok(Level::DEBUG), - 5 => Ok(Level::TRACE), - _ => Err(ParseLevelError { _p: () }), - }) - .or_else(|_| match s { - s if s.eq_ignore_ascii_case("error") => Ok(Level::ERROR), - s if s.eq_ignore_ascii_case("warn") => Ok(Level::WARN), - s if s.eq_ignore_ascii_case("info") => Ok(Level::INFO), - s if s.eq_ignore_ascii_case("debug") => Ok(Level::DEBUG), - s if s.eq_ignore_ascii_case("trace") => Ok(Level::TRACE), - _ => Err(ParseLevelError { _p: () }), - }) - } -} - -#[repr(usize)] -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -enum LevelInner { - /// The "trace" level. - /// - /// Designates very low priority, often extremely verbose, information. - Trace = 0, - /// The "debug" level. - /// - /// Designates lower priority information. - Debug = 1, - /// The "info" level. - /// - /// Designates useful information. - Info = 2, - /// The "warn" level. - /// - /// Designates hazardous situations. - Warn = 3, - /// The "error" level. - /// - /// Designates very serious errors. - Error = 4, -} - -// === impl LevelFilter === - -impl From for LevelFilter { - #[inline] - fn from(level: Level) -> Self { - Self::from_level(level) - } -} - -impl From> for LevelFilter { - #[inline] - fn from(level: Option) -> Self { - Self(level) - } -} - -impl From for Option { - #[inline] - fn from(filter: LevelFilter) -> Self { - filter.into_level() - } -} - -impl LevelFilter { - /// The "off" level. - /// - /// Designates that trace instrumentation should be completely disabled. - pub const OFF: LevelFilter = LevelFilter(None); - /// The "error" level. - /// - /// Designates very serious errors. - pub const ERROR: LevelFilter = LevelFilter::from_level(Level::ERROR); - /// The "warn" level. - /// - /// Designates hazardous situations. - pub const WARN: LevelFilter = LevelFilter::from_level(Level::WARN); - /// The "info" level. - /// - /// Designates useful information. - pub const INFO: LevelFilter = LevelFilter::from_level(Level::INFO); - /// The "debug" level. - /// - /// Designates lower priority information. - pub const DEBUG: LevelFilter = LevelFilter::from_level(Level::DEBUG); - /// The "trace" level. - /// - /// Designates very low priority, often extremely verbose, information. - pub const TRACE: LevelFilter = LevelFilter(Some(Level::TRACE)); - - /// Returns a `LevelFilter` that enables spans and events with verbosity up - /// to and including `level`. - pub const fn from_level(level: Level) -> Self { - Self(Some(level)) - } - - /// Returns the most verbose [`Level`] that this filter accepts, or `None` - /// if it is [`OFF`]. - /// - /// [`OFF`]: LevelFilter::OFF - pub const fn into_level(self) -> Option { - self.0 - } - - // These consts are necessary because `as` casts are not allowed as - // match patterns. - const ERROR_USIZE: usize = LevelInner::Error as usize; - const WARN_USIZE: usize = LevelInner::Warn as usize; - const INFO_USIZE: usize = LevelInner::Info as usize; - const DEBUG_USIZE: usize = LevelInner::Debug as usize; - const TRACE_USIZE: usize = LevelInner::Trace as usize; - // Using the value of the last variant + 1 ensures that we match the value - // for `Option::None` as selected by the niche optimization for - // `LevelFilter`. If this is the case, converting a `usize` value into a - // `LevelFilter` (in `LevelFilter::current`) will be an identity conversion, - // rather than generating a lookup table. - const OFF_USIZE: usize = LevelInner::Error as usize + 1; - - /// Returns a `LevelFilter` that matches the most verbose [`Level`] that any - /// currently active [`Subscriber`] will enable. - /// - /// User code should treat this as a *hint*. If a given span or event has a - /// level *higher* than the returned `LevelFilter`, it will not be enabled. - /// However, if the level is less than or equal to this value, the span or - /// event is *not* guaranteed to be enabled; the subscriber will still - /// filter each callsite individually. - /// - /// Therefore, comparing a given span or event's level to the returned - /// `LevelFilter` **can** be used for determining if something is - /// *disabled*, but **should not** be used for determining if something is - /// *enabled*. - /// - /// [`Level`]: super::Level - /// [`Subscriber`]: super::Subscriber - #[inline(always)] - pub fn current() -> Self { - match MAX_LEVEL.load(Ordering::Relaxed) { - Self::ERROR_USIZE => Self::ERROR, - Self::WARN_USIZE => Self::WARN, - Self::INFO_USIZE => Self::INFO, - Self::DEBUG_USIZE => Self::DEBUG, - Self::TRACE_USIZE => Self::TRACE, - Self::OFF_USIZE => Self::OFF, - #[cfg(debug_assertions)] - unknown => unreachable!( - "/!\\ `LevelFilter` representation seems to have changed! /!\\ \n\ - This is a bug (and it's pretty bad). Please contact the `tracing` \ - maintainers. Thank you and I'm sorry.\n \ - The offending repr was: {:?}", - unknown, - ), - #[cfg(not(debug_assertions))] - _ => unsafe { - // Using `unreachable_unchecked` here (rather than - // `unreachable!()`) is necessary to ensure that rustc generates - // an identity conversion from integer -> discriminant, rather - // than generating a lookup table. We want to ensure this - // function is a single `mov` instruction (on x86) if at all - // possible, because it is called *every* time a span/event - // callsite is hit; and it is (potentially) the only code in the - // hottest path for skipping a majority of callsites when level - // filtering is in use. - // - // safety: This branch is only truly unreachable if we guarantee - // that no values other than the possible enum discriminants - // will *ever* be present. The `AtomicUsize` is initialized to - // the `OFF` value. It is only set by the `set_max` function, - // which takes a `LevelFilter` as a parameter. This restricts - // the inputs to `set_max` to the set of valid discriminants. - // Therefore, **as long as `MAX_VALUE` is only ever set by - // `set_max`**, this is safe. - crate::stdlib::hint::unreachable_unchecked() - }, - } - } - - pub(crate) fn set_max(LevelFilter(level): LevelFilter) { - let val = match level { - Some(Level(level)) => level as usize, - None => Self::OFF_USIZE, - }; - - // using an AcqRel swap ensures an ordered relationship of writes to the - // max level. - MAX_LEVEL.swap(val, Ordering::AcqRel); - } -} - -impl fmt::Display for LevelFilter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - LevelFilter::OFF => f.pad("off"), - LevelFilter::ERROR => f.pad("error"), - LevelFilter::WARN => f.pad("warn"), - LevelFilter::INFO => f.pad("info"), - LevelFilter::DEBUG => f.pad("debug"), - LevelFilter::TRACE => f.pad("trace"), - } - } -} - -impl fmt::Debug for LevelFilter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - LevelFilter::OFF => f.pad("LevelFilter::OFF"), - LevelFilter::ERROR => f.pad("LevelFilter::ERROR"), - LevelFilter::WARN => f.pad("LevelFilter::WARN"), - LevelFilter::INFO => f.pad("LevelFilter::INFO"), - LevelFilter::DEBUG => f.pad("LevelFilter::DEBUG"), - LevelFilter::TRACE => f.pad("LevelFilter::TRACE"), - } - } -} - -impl FromStr for LevelFilter { - type Err = ParseLevelFilterError; - fn from_str(from: &str) -> Result { - from.parse::() - .ok() - .and_then(|num| match num { - 0 => Some(LevelFilter::OFF), - 1 => Some(LevelFilter::ERROR), - 2 => Some(LevelFilter::WARN), - 3 => Some(LevelFilter::INFO), - 4 => Some(LevelFilter::DEBUG), - 5 => Some(LevelFilter::TRACE), - _ => None, - }) - .or_else(|| match from { - "" => Some(LevelFilter::ERROR), - s if s.eq_ignore_ascii_case("error") => Some(LevelFilter::ERROR), - s if s.eq_ignore_ascii_case("warn") => Some(LevelFilter::WARN), - s if s.eq_ignore_ascii_case("info") => Some(LevelFilter::INFO), - s if s.eq_ignore_ascii_case("debug") => Some(LevelFilter::DEBUG), - s if s.eq_ignore_ascii_case("trace") => Some(LevelFilter::TRACE), - s if s.eq_ignore_ascii_case("off") => Some(LevelFilter::OFF), - _ => None, - }) - .ok_or(ParseLevelFilterError(())) - } -} - -/// Returned if parsing a `Level` fails. -#[derive(Debug)] -pub struct ParseLevelError { - _p: (), -} - -impl fmt::Display for ParseLevelError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad( - "error parsing level: expected one of \"error\", \"warn\", \ - \"info\", \"debug\", \"trace\", or a number 1-5", - ) - } -} - -impl fmt::Display for ParseLevelFilterError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad( - "error parsing level filter: expected one of \"off\", \"error\", \ - \"warn\", \"info\", \"debug\", \"trace\", or a number 0-5", - ) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for ParseLevelFilterError {} - -// ==== Level and LevelFilter comparisons ==== - -// /!\ BIG, IMPORTANT WARNING /!\ -// Do NOT mess with these implementations! They are hand-written for a reason! -// -// Since comparing `Level`s and `LevelFilter`s happens in a *very* hot path -// (potentially, every time a span or event macro is hit, regardless of whether -// or not is enabled), we *need* to ensure that these comparisons are as fast as -// possible. Therefore, we have some requirements: -// -// 1. We want to do our best to ensure that rustc will generate integer-integer -// comparisons wherever possible. -// -// The derived `Ord`/`PartialOrd` impls for `LevelFilter` will not do this, -// because `LevelFilter`s are represented by `Option`, rather than as -// a separate `#[repr(usize)]` enum. This was (unfortunately) necessary for -// backwards-compatibility reasons, as the `tracing` crate's original -// version of `LevelFilter` defined `const fn` conversions between `Level`s -// and `LevelFilter`, so we're stuck with the `Option` repr. -// Therefore, we need hand-written `PartialOrd` impls that cast both sides of -// the comparison to `usize`s, to force the compiler to generate integer -// compares. -// -// 2. The hottest `Level`/`LevelFilter` comparison, the one that happens every -// time a callsite is hit, occurs *within the `tracing` crate's macros*. -// This means that the comparison is happening *inside* a crate that -// *depends* on `tracing-core`, not in `tracing-core` itself. The compiler -// will only inline function calls across crate boundaries if the called -// function is annotated with an `#[inline]` attribute, and we *definitely* -// want the comparison functions to be inlined: as previously mentioned, they -// should compile down to a single integer comparison on release builds, and -// it seems really sad to push an entire stack frame to call a function -// consisting of one `cmp` instruction! -// -// Therefore, we need to ensure that all the comparison methods have -// `#[inline]` or `#[inline(always)]` attributes. It's not sufficient to just -// add the attribute to `partial_cmp` in a manual implementation of the -// trait, since it's the comparison operators (`lt`, `le`, `gt`, and `ge`) -// that will actually be *used*, and the default implementation of *those* -// methods, which calls `partial_cmp`, does not have an inline annotation. -// -// 3. We need the comparisons to be inverted. The discriminants for the -// `LevelInner` enum are assigned in "backwards" order, with `TRACE` having -// the *lowest* value. However, we want `TRACE` to compare greater-than all -// other levels. -// -// Why are the numeric values inverted? In order to ensure that `LevelFilter` -// (which, as previously mentioned, *has* to be internally represented by an -// `Option`) compiles down to a single integer value. This is -// necessary for storing the global max in an `AtomicUsize`, and for ensuring -// that we use fast integer-integer comparisons, as mentioned previously. In -// order to ensure this, we exploit the niche optimization. The niche -// optimization for `Option<{enum with a numeric repr}>` will choose -// `(HIGHEST_DISCRIMINANT_VALUE + 1)` as the representation for `None`. -// Therefore, the integer representation of `LevelFilter::OFF` (which is -// `None`) will be the number 5. `OFF` must compare higher than every other -// level in order for it to filter as expected. Since we want to use a single -// `cmp` instruction, we can't special-case the integer value of `OFF` to -// compare higher, as that will generate more code. Instead, we need it to be -// on one end of the enum, with `ERROR` on the opposite end, so we assign the -// value 0 to `ERROR`. -// -// This *does* mean that when parsing `LevelFilter`s or `Level`s from -// `String`s, the integer values are inverted, but that doesn't happen in a -// hot path. -// -// Note that we manually invert the comparisons by swapping the left-hand and -// right-hand side. Using `Ordering::reverse` generates significantly worse -// code (per Matt Godbolt's Compiler Explorer). -// -// Anyway, that's a brief history of why this code is the way it is. Don't -// change it unless you know what you're doing. - -impl PartialEq for Level { - #[inline(always)] - fn eq(&self, other: &LevelFilter) -> bool { - self.0 as usize == filter_as_usize(&other.0) - } -} - -impl PartialOrd for Level { - #[inline(always)] - fn partial_cmp(&self, other: &Level) -> Option { - Some(self.cmp(other)) - } - - #[inline(always)] - fn lt(&self, other: &Level) -> bool { - (other.0 as usize) < (self.0 as usize) - } - - #[inline(always)] - fn le(&self, other: &Level) -> bool { - (other.0 as usize) <= (self.0 as usize) - } - - #[inline(always)] - fn gt(&self, other: &Level) -> bool { - (other.0 as usize) > (self.0 as usize) - } - - #[inline(always)] - fn ge(&self, other: &Level) -> bool { - (other.0 as usize) >= (self.0 as usize) - } -} - -impl Ord for Level { - #[inline(always)] - fn cmp(&self, other: &Self) -> cmp::Ordering { - (other.0 as usize).cmp(&(self.0 as usize)) - } -} - -impl PartialOrd for Level { - #[inline(always)] - fn partial_cmp(&self, other: &LevelFilter) -> Option { - Some(filter_as_usize(&other.0).cmp(&(self.0 as usize))) - } - - #[inline(always)] - fn lt(&self, other: &LevelFilter) -> bool { - filter_as_usize(&other.0) < (self.0 as usize) - } - - #[inline(always)] - fn le(&self, other: &LevelFilter) -> bool { - filter_as_usize(&other.0) <= (self.0 as usize) - } - - #[inline(always)] - fn gt(&self, other: &LevelFilter) -> bool { - filter_as_usize(&other.0) > (self.0 as usize) - } - - #[inline(always)] - fn ge(&self, other: &LevelFilter) -> bool { - filter_as_usize(&other.0) >= (self.0 as usize) - } -} - -#[inline(always)] -fn filter_as_usize(x: &Option) -> usize { - match x { - Some(Level(f)) => *f as usize, - None => LevelFilter::OFF_USIZE, - } -} - -impl PartialEq for LevelFilter { - #[inline(always)] - fn eq(&self, other: &Level) -> bool { - filter_as_usize(&self.0) == other.0 as usize - } -} - -impl PartialOrd for LevelFilter { - #[inline(always)] - fn partial_cmp(&self, other: &LevelFilter) -> Option { - Some(self.cmp(other)) - } - - #[inline(always)] - fn lt(&self, other: &LevelFilter) -> bool { - filter_as_usize(&other.0) < filter_as_usize(&self.0) - } - - #[inline(always)] - fn le(&self, other: &LevelFilter) -> bool { - filter_as_usize(&other.0) <= filter_as_usize(&self.0) - } - - #[inline(always)] - fn gt(&self, other: &LevelFilter) -> bool { - filter_as_usize(&other.0) > filter_as_usize(&self.0) - } - - #[inline(always)] - fn ge(&self, other: &LevelFilter) -> bool { - filter_as_usize(&other.0) >= filter_as_usize(&self.0) - } -} - -impl Ord for LevelFilter { - #[inline(always)] - fn cmp(&self, other: &Self) -> cmp::Ordering { - filter_as_usize(&other.0).cmp(&filter_as_usize(&self.0)) - } -} - -impl PartialOrd for LevelFilter { - #[inline(always)] - fn partial_cmp(&self, other: &Level) -> Option { - Some((other.0 as usize).cmp(&filter_as_usize(&self.0))) - } - - #[inline(always)] - fn lt(&self, other: &Level) -> bool { - (other.0 as usize) < filter_as_usize(&self.0) - } - - #[inline(always)] - fn le(&self, other: &Level) -> bool { - (other.0 as usize) <= filter_as_usize(&self.0) - } - - #[inline(always)] - fn gt(&self, other: &Level) -> bool { - (other.0 as usize) > filter_as_usize(&self.0) - } - - #[inline(always)] - fn ge(&self, other: &Level) -> bool { - (other.0 as usize) >= filter_as_usize(&self.0) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::stdlib::mem; - - #[test] - fn level_from_str() { - assert_eq!("error".parse::().unwrap(), Level::ERROR); - assert_eq!("4".parse::().unwrap(), Level::DEBUG); - assert!("0".parse::().is_err()) - } - - #[test] - fn filter_level_conversion() { - let mapping = [ - (LevelFilter::OFF, None), - (LevelFilter::ERROR, Some(Level::ERROR)), - (LevelFilter::WARN, Some(Level::WARN)), - (LevelFilter::INFO, Some(Level::INFO)), - (LevelFilter::DEBUG, Some(Level::DEBUG)), - (LevelFilter::TRACE, Some(Level::TRACE)), - ]; - for (filter, level) in mapping.iter() { - assert_eq!(filter.into_level(), *level); - match level { - Some(level) => { - let actual: LevelFilter = (*level).into(); - assert_eq!(actual, *filter); - } - None => { - let actual: LevelFilter = None.into(); - assert_eq!(actual, *filter); - } - } - } - } - - #[test] - fn level_filter_is_usize_sized() { - assert_eq!( - mem::size_of::(), - mem::size_of::(), - "`LevelFilter` is no longer `usize`-sized! global MAX_LEVEL may now be invalid!" - ) - } - - #[test] - fn level_filter_reprs() { - let mapping = [ - (LevelFilter::OFF, LevelInner::Error as usize + 1), - (LevelFilter::ERROR, LevelInner::Error as usize), - (LevelFilter::WARN, LevelInner::Warn as usize), - (LevelFilter::INFO, LevelInner::Info as usize), - (LevelFilter::DEBUG, LevelInner::Debug as usize), - (LevelFilter::TRACE, LevelInner::Trace as usize), - ]; - for &(filter, expected) in &mapping { - let repr = unsafe { - // safety: The entire purpose of this test is to assert that the - // actual repr matches what we expect it to be --- we're testing - // that *other* unsafe code is sound using the transmuted value. - // We're not going to do anything with it that might be unsound. - mem::transmute::(filter) - }; - assert_eq!(expected, repr, "repr changed for {:?}", filter) - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/parent.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/parent.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/parent.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/parent.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,11 +0,0 @@ -use crate::span::Id; - -#[derive(Debug)] -pub(crate) enum Parent { - /// The new span will be a root span. - Root, - /// The new span will be rooted in the current span. - Current, - /// The new span has an explicitly-specified parent. - Explicit(Id), -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/span.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/span.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/span.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/span.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,341 +0,0 @@ -//! Spans represent periods of time in the execution of a program. -use crate::field::FieldSet; -use crate::parent::Parent; -use crate::stdlib::num::NonZeroU64; -use crate::{field, Metadata}; - -/// Identifies a span within the context of a subscriber. -/// -/// They are generated by [`Subscriber`]s for each span as it is created, by -/// the [`new_span`] trait method. See the documentation for that method for -/// more information on span ID generation. -/// -/// [`Subscriber`]: super::subscriber::Subscriber -/// [`new_span`]: super::subscriber::Subscriber::new_span -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct Id(NonZeroU64); - -/// Attributes provided to a `Subscriber` describing a new span when it is -/// created. -#[derive(Debug)] -pub struct Attributes<'a> { - metadata: &'static Metadata<'static>, - values: &'a field::ValueSet<'a>, - parent: Parent, -} - -/// A set of fields recorded by a span. -#[derive(Debug)] -pub struct Record<'a> { - values: &'a field::ValueSet<'a>, -} - -/// Indicates what [the `Subscriber` considers] the "current" span. -/// -/// As subscribers may not track a notion of a current span, this has three -/// possible states: -/// - "unknown", indicating that the subscriber does not track a current span, -/// - "none", indicating that the current context is known to not be in a span, -/// - "some", with the current span's [`Id`] and [`Metadata`]. -/// -/// [the `Subscriber` considers]: super::subscriber::Subscriber::current_span -/// [`Metadata`]: super::metadata::Metadata -#[derive(Debug)] -pub struct Current { - inner: CurrentInner, -} - -#[derive(Debug)] -enum CurrentInner { - Current { - id: Id, - metadata: &'static Metadata<'static>, - }, - None, - Unknown, -} - -// ===== impl Span ===== - -impl Id { - /// Constructs a new span ID from the given `u64`. - /// - ///
-    ///     Note: Span IDs must be greater than zero.
-    /// 
- /// - /// # Panics - /// - If the provided `u64` is 0. - pub fn from_u64(u: u64) -> Self { - Id(NonZeroU64::new(u).expect("span IDs must be > 0")) - } - - /// Constructs a new span ID from the given `NonZeroU64`. - /// - /// Unlike [`Id::from_u64`](Id::from_u64()), this will never panic. - #[inline] - pub const fn from_non_zero_u64(id: NonZeroU64) -> Self { - Id(id) - } - - // Allow `into` by-ref since we don't want to impl Copy for Id - #[allow(clippy::wrong_self_convention)] - /// Returns the span's ID as a `u64`. - pub fn into_u64(&self) -> u64 { - self.0.get() - } - - // Allow `into` by-ref since we don't want to impl Copy for Id - #[allow(clippy::wrong_self_convention)] - /// Returns the span's ID as a `NonZeroU64`. - #[inline] - pub const fn into_non_zero_u64(&self) -> NonZeroU64 { - self.0 - } -} - -impl<'a> From<&'a Id> for Option { - fn from(id: &'a Id) -> Self { - Some(id.clone()) - } -} - -// ===== impl Attributes ===== - -impl<'a> Attributes<'a> { - /// Returns `Attributes` describing a new child span of the current span, - /// with the provided metadata and values. - pub fn new(metadata: &'static Metadata<'static>, values: &'a field::ValueSet<'a>) -> Self { - Attributes { - metadata, - values, - parent: Parent::Current, - } - } - - /// Returns `Attributes` describing a new span at the root of its own trace - /// tree, with the provided metadata and values. - pub fn new_root(metadata: &'static Metadata<'static>, values: &'a field::ValueSet<'a>) -> Self { - Attributes { - metadata, - values, - parent: Parent::Root, - } - } - - /// Returns `Attributes` describing a new child span of the specified - /// parent span, with the provided metadata and values. - pub fn child_of( - parent: Id, - metadata: &'static Metadata<'static>, - values: &'a field::ValueSet<'a>, - ) -> Self { - Attributes { - metadata, - values, - parent: Parent::Explicit(parent), - } - } - - /// Returns a reference to the new span's metadata. - pub fn metadata(&self) -> &'static Metadata<'static> { - self.metadata - } - - /// Returns a reference to a `ValueSet` containing any values the new span - /// was created with. - pub fn values(&self) -> &field::ValueSet<'a> { - self.values - } - - /// Returns true if the new span should be a root. - pub fn is_root(&self) -> bool { - matches!(self.parent, Parent::Root) - } - - /// Returns true if the new span's parent should be determined based on the - /// current context. - /// - /// If this is true and the current thread is currently inside a span, then - /// that span should be the new span's parent. Otherwise, if the current - /// thread is _not_ inside a span, then the new span will be the root of its - /// own trace tree. - pub fn is_contextual(&self) -> bool { - matches!(self.parent, Parent::Current) - } - - /// Returns the new span's explicitly-specified parent, if there is one. - /// - /// Otherwise (if the new span is a root or is a child of the current span), - /// returns `None`. - pub fn parent(&self) -> Option<&Id> { - match self.parent { - Parent::Explicit(ref p) => Some(p), - _ => None, - } - } - - /// Records all the fields in this set of `Attributes` with the provided - /// [Visitor]. - /// - /// [visitor]: super::field::Visit - pub fn record(&self, visitor: &mut dyn field::Visit) { - self.values.record(visitor) - } - - /// Returns `true` if this set of `Attributes` contains a value for the - /// given `Field`. - pub fn contains(&self, field: &field::Field) -> bool { - self.values.contains(field) - } - - /// Returns true if this set of `Attributes` contains _no_ values. - pub fn is_empty(&self) -> bool { - self.values.is_empty() - } - - /// Returns the set of all [fields] defined by this span's [`Metadata`]. - /// - /// Note that the [`FieldSet`] returned by this method includes *all* the - /// fields declared by this span, not just those with values that are recorded - /// as part of this set of `Attributes`. Other fields with values not present in - /// this `Attributes`' value set may [record] values later. - /// - /// [fields]: crate::field - /// [record]: Attributes::record() - /// [`Metadata`]: crate::metadata::Metadata - /// [`FieldSet`]: crate::field::FieldSet - pub fn fields(&self) -> &FieldSet { - self.values.field_set() - } -} - -// ===== impl Record ===== - -impl<'a> Record<'a> { - /// Constructs a new `Record` from a `ValueSet`. - pub fn new(values: &'a field::ValueSet<'a>) -> Self { - Self { values } - } - - /// Records all the fields in this `Record` with the provided [Visitor]. - /// - /// [visitor]: super::field::Visit - pub fn record(&self, visitor: &mut dyn field::Visit) { - self.values.record(visitor) - } - - /// Returns the number of fields that would be visited from this `Record` - /// when [`Record::record()`] is called - /// - /// [`Record::record()`]: Record::record() - pub fn len(&self) -> usize { - self.values.len() - } - - /// Returns `true` if this `Record` contains a value for the given `Field`. - pub fn contains(&self, field: &field::Field) -> bool { - self.values.contains(field) - } - - /// Returns true if this `Record` contains _no_ values. - pub fn is_empty(&self) -> bool { - self.values.is_empty() - } -} - -// ===== impl Current ===== - -impl Current { - /// Constructs a new `Current` that indicates the current context is a span - /// with the given `metadata` and `metadata`. - pub fn new(id: Id, metadata: &'static Metadata<'static>) -> Self { - Self { - inner: CurrentInner::Current { id, metadata }, - } - } - - /// Constructs a new `Current` that indicates the current context is *not* - /// in a span. - pub fn none() -> Self { - Self { - inner: CurrentInner::None, - } - } - - /// Constructs a new `Current` that indicates the `Subscriber` does not - /// track a current span. - pub(crate) fn unknown() -> Self { - Self { - inner: CurrentInner::Unknown, - } - } - - /// Returns `true` if the `Subscriber` that constructed this `Current` tracks a - /// current span. - /// - /// If this returns `true` and [`id`], [`metadata`], or [`into_inner`] - /// return `None`, that indicates that we are currently known to *not* be - /// inside a span. If this returns `false`, those methods will also return - /// `None`, but in this case, that is because the subscriber does not keep - /// track of the currently-entered span. - /// - /// [`id`]: Current::id() - /// [`metadata`]: Current::metadata() - /// [`into_inner`]: Current::into_inner() - pub fn is_known(&self) -> bool { - !matches!(self.inner, CurrentInner::Unknown) - } - - /// Consumes `self` and returns the span `Id` and `Metadata` of the current - /// span, if one exists and is known. - pub fn into_inner(self) -> Option<(Id, &'static Metadata<'static>)> { - match self.inner { - CurrentInner::Current { id, metadata } => Some((id, metadata)), - _ => None, - } - } - - /// Borrows the `Id` of the current span, if one exists and is known. - pub fn id(&self) -> Option<&Id> { - match self.inner { - CurrentInner::Current { ref id, .. } => Some(id), - _ => None, - } - } - - /// Borrows the `Metadata` of the current span, if one exists and is known. - pub fn metadata(&self) -> Option<&'static Metadata<'static>> { - match self.inner { - CurrentInner::Current { metadata, .. } => Some(metadata), - _ => None, - } - } -} - -impl<'a> From<&'a Current> for Option<&'a Id> { - fn from(cur: &'a Current) -> Self { - cur.id() - } -} - -impl<'a> From<&'a Current> for Option { - fn from(cur: &'a Current) -> Self { - cur.id().cloned() - } -} - -impl From for Option { - fn from(cur: Current) -> Self { - match cur.inner { - CurrentInner::Current { id, .. } => Some(id), - _ => None, - } - } -} - -impl<'a> From<&'a Current> for Option<&'static Metadata<'static>> { - fn from(cur: &'a Current) -> Self { - cur.metadata() - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/spin/LICENSE s390-tools-2.33.1/rust-vendor/tracing-core/src/spin/LICENSE --- s390-tools-2.31.0/rust-vendor/tracing-core/src/spin/LICENSE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/spin/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Mathijs van de Nes - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/spin/mod.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/spin/mod.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/spin/mod.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/spin/mod.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,7 +0,0 @@ -//! Synchronization primitives based on spinning - -pub(crate) use mutex::*; -pub(crate) use once::Once; - -mod mutex; -mod once; diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/spin/mutex.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/spin/mutex.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/spin/mutex.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/spin/mutex.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,118 +0,0 @@ -use core::cell::UnsafeCell; -use core::default::Default; -use core::fmt; -use core::hint; -use core::marker::Sync; -use core::ops::{Deref, DerefMut, Drop}; -use core::option::Option::{self, None, Some}; -use core::sync::atomic::{AtomicBool, Ordering}; - -/// This type provides MUTual EXclusion based on spinning. -pub(crate) struct Mutex { - lock: AtomicBool, - data: UnsafeCell, -} - -/// A guard to which the protected data can be accessed -/// -/// When the guard falls out of scope it will release the lock. -#[derive(Debug)] -pub(crate) struct MutexGuard<'a, T: ?Sized> { - lock: &'a AtomicBool, - data: &'a mut T, -} - -// Same unsafe impls as `std::sync::Mutex` -unsafe impl Sync for Mutex {} -unsafe impl Send for Mutex {} - -impl Mutex { - /// Creates a new spinlock wrapping the supplied data. - pub(crate) const fn new(user_data: T) -> Mutex { - Mutex { - lock: AtomicBool::new(false), - data: UnsafeCell::new(user_data), - } - } -} - -impl Mutex { - fn obtain_lock(&self) { - while self - .lock - .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) - .is_err() - { - // Wait until the lock looks unlocked before retrying - while self.lock.load(Ordering::Relaxed) { - hint::spin_loop(); - } - } - } - - /// Locks the spinlock and returns a guard. - /// - /// The returned value may be dereferenced for data access - /// and the lock will be dropped when the guard falls out of scope. - pub(crate) fn lock(&self) -> MutexGuard<'_, T> { - self.obtain_lock(); - MutexGuard { - lock: &self.lock, - data: unsafe { &mut *self.data.get() }, - } - } - - /// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns - /// a guard within Some. - pub(crate) fn try_lock(&self) -> Option> { - if self - .lock - .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - Some(MutexGuard { - lock: &self.lock, - data: unsafe { &mut *self.data.get() }, - }) - } else { - None - } - } -} - -impl fmt::Debug for Mutex { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.try_lock() { - Some(guard) => write!(f, "Mutex {{ data: ") - .and_then(|()| (&*guard).fmt(f)) - .and_then(|()| write!(f, "}}")), - None => write!(f, "Mutex {{ }}"), - } - } -} - -impl Default for Mutex { - fn default() -> Mutex { - Mutex::new(Default::default()) - } -} - -impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { - type Target = T; - fn deref<'b>(&'b self) -> &'b T { - &*self.data - } -} - -impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { - fn deref_mut<'b>(&'b mut self) -> &'b mut T { - &mut *self.data - } -} - -impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> { - /// The dropping of the MutexGuard will release the lock it was created from. - fn drop(&mut self) { - self.lock.store(false, Ordering::Release); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/spin/once.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/spin/once.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/spin/once.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/spin/once.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,158 +0,0 @@ -use core::cell::UnsafeCell; -use core::fmt; -use core::hint::spin_loop; -use core::sync::atomic::{AtomicUsize, Ordering}; - -/// A synchronization primitive which can be used to run a one-time global -/// initialization. Unlike its std equivalent, this is generalized so that the -/// closure returns a value and it is stored. Once therefore acts something like -/// a future, too. -pub struct Once { - state: AtomicUsize, - data: UnsafeCell>, // TODO remove option and use mem::uninitialized -} - -impl fmt::Debug for Once { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.r#try() { - Some(s) => write!(f, "Once {{ data: ") - .and_then(|()| s.fmt(f)) - .and_then(|()| write!(f, "}}")), - None => write!(f, "Once {{ }}"), - } - } -} - -// Same unsafe impls as `std::sync::RwLock`, because this also allows for -// concurrent reads. -unsafe impl Sync for Once {} -unsafe impl Send for Once {} - -// Four states that a Once can be in, encoded into the lower bits of `state` in -// the Once structure. -const INCOMPLETE: usize = 0x0; -const RUNNING: usize = 0x1; -const COMPLETE: usize = 0x2; -const PANICKED: usize = 0x3; - -use core::hint::unreachable_unchecked as unreachable; - -impl Once { - /// Initialization constant of `Once`. - pub const INIT: Self = Once { - state: AtomicUsize::new(INCOMPLETE), - data: UnsafeCell::new(None), - }; - - /// Creates a new `Once` value. - pub const fn new() -> Once { - Self::INIT - } - - fn force_get<'a>(&'a self) -> &'a T { - match unsafe { &*self.data.get() }.as_ref() { - None => unsafe { unreachable() }, - Some(p) => p, - } - } - - /// Performs an initialization routine once and only once. The given closure - /// will be executed if this is the first time `call_once` has been called, - /// and otherwise the routine will *not* be invoked. - /// - /// This method will block the calling thread if another initialization - /// routine is currently running. - /// - /// When this function returns, it is guaranteed that some initialization - /// has run and completed (it may not be the closure specified). The - /// returned pointer will point to the result from the closure that was - /// run. - pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T - where - F: FnOnce() -> T, - { - let mut status = self.state.load(Ordering::SeqCst); - - if status == INCOMPLETE { - status = match self.state.compare_exchange( - INCOMPLETE, - RUNNING, - Ordering::SeqCst, - Ordering::SeqCst, - ) { - Ok(status) => { - debug_assert_eq!( - status, INCOMPLETE, - "if compare_exchange succeeded, previous status must be incomplete", - ); - // We init - // We use a guard (Finish) to catch panics caused by builder - let mut finish = Finish { - state: &self.state, - panicked: true, - }; - unsafe { *self.data.get() = Some(builder()) }; - finish.panicked = false; - - self.state.store(COMPLETE, Ordering::SeqCst); - - // This next line is strictly an optimization - return self.force_get(); - } - Err(status) => status, - } - } - - loop { - match status { - INCOMPLETE => unreachable!(), - RUNNING => { - // We spin - spin_loop(); - status = self.state.load(Ordering::SeqCst) - } - PANICKED => panic!("Once has panicked"), - COMPLETE => return self.force_get(), - _ => unsafe { unreachable() }, - } - } - } - - /// Returns a pointer iff the `Once` was previously initialized - pub fn r#try<'a>(&'a self) -> Option<&'a T> { - match self.state.load(Ordering::SeqCst) { - COMPLETE => Some(self.force_get()), - _ => None, - } - } - - /// Like try, but will spin if the `Once` is in the process of being - /// initialized - pub fn wait<'a>(&'a self) -> Option<&'a T> { - loop { - match self.state.load(Ordering::SeqCst) { - INCOMPLETE => return None, - - RUNNING => { - spin_loop() // We spin - } - COMPLETE => return Some(self.force_get()), - PANICKED => panic!("Once has panicked"), - _ => unsafe { unreachable() }, - } - } - } -} - -struct Finish<'a> { - state: &'a AtomicUsize, - panicked: bool, -} - -impl<'a> Drop for Finish<'a> { - fn drop(&mut self) { - if self.panicked { - self.state.store(PANICKED, Ordering::SeqCst); - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/stdlib.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/stdlib.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/stdlib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/stdlib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,78 +0,0 @@ -//! Re-exports either the Rust `std` library or `core` and `alloc` when `std` is -//! disabled. -//! -//! `crate::stdlib::...` should be used rather than `std::` when adding code that -//! will be available with the standard library disabled. -//! -//! Note that this module is called `stdlib` rather than `std`, as Rust 1.34.0 -//! does not permit redefining the name `stdlib` (although this works on the -//! latest stable Rust). -#[cfg(feature = "std")] -pub(crate) use std::*; - -#[cfg(not(feature = "std"))] -pub(crate) use self::no_std::*; - -#[cfg(not(feature = "std"))] -mod no_std { - // We pre-emptively export everything from libcore/liballoc, (even modules - // we aren't using currently) to make adding new code easier. Therefore, - // some of these imports will be unused. - #![allow(unused_imports)] - - pub(crate) use core::{ - any, array, ascii, cell, char, clone, cmp, convert, default, f32, f64, ffi, future, hash, - hint, i128, i16, i8, isize, iter, marker, mem, num, ops, option, pin, ptr, result, task, - time, u128, u16, u32, u8, usize, - }; - - pub(crate) use alloc::{boxed, collections, rc, string, vec}; - - pub(crate) mod borrow { - pub(crate) use alloc::borrow::*; - pub(crate) use core::borrow::*; - } - - pub(crate) mod fmt { - pub(crate) use alloc::fmt::*; - pub(crate) use core::fmt::*; - } - - pub(crate) mod slice { - pub(crate) use alloc::slice::*; - pub(crate) use core::slice::*; - } - - pub(crate) mod str { - pub(crate) use alloc::str::*; - pub(crate) use core::str::*; - } - - pub(crate) mod sync { - pub(crate) use crate::spin::MutexGuard; - pub(crate) use alloc::sync::*; - pub(crate) use core::sync::*; - - /// This wraps `spin::Mutex` to return a `Result`, so that it can be - /// used with code written against `std::sync::Mutex`. - /// - /// Since `spin::Mutex` doesn't support poisoning, the `Result` returned - /// by `lock` will always be `Ok`. - #[derive(Debug, Default)] - pub(crate) struct Mutex { - inner: crate::spin::Mutex, - } - - impl Mutex { - // pub(crate) fn new(data: T) -> Self { - // Self { - // inner: crate::spin::Mutex::new(data), - // } - // } - - pub(crate) fn lock(&self) -> Result, ()> { - Ok(self.inner.lock()) - } - } - } -} diff -Nru s390-tools-2.31.0/rust-vendor/tracing-core/src/subscriber.rs s390-tools-2.33.1/rust-vendor/tracing-core/src/subscriber.rs --- s390-tools-2.31.0/rust-vendor/tracing-core/src/subscriber.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/tracing-core/src/subscriber.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,878 +0,0 @@ -//! Collectors collect and record trace data. -use crate::{span, Dispatch, Event, LevelFilter, Metadata}; - -use crate::stdlib::{ - any::{Any, TypeId}, - boxed::Box, - sync::Arc, -}; - -/// Trait representing the functions required to collect trace data. -/// -/// Crates that provide implementations of methods for collecting or recording -/// trace data should implement the `Subscriber` interface. This trait is -/// intended to represent fundamental primitives for collecting trace events and -/// spans — other libraries may offer utility functions and types to make -/// subscriber implementations more modular or improve the ergonomics of writing -/// subscribers. -/// -/// A subscriber is responsible for the following: -/// - Registering new spans as they are created, and providing them with span -/// IDs. Implicitly, this means the subscriber may determine the strategy for -/// determining span equality. -/// - Recording the attachment of field values and follows-from annotations to -/// spans. -/// - Filtering spans and events, and determining when those filters must be -/// invalidated. -/// - Observing spans as they are entered, exited, and closed, and events as -/// they occur. -/// -/// When a span is entered or exited, the subscriber is provided only with the -/// [ID] with which it tagged that span when it was created. This means -/// that it is up to the subscriber to determine whether and how span _data_ — -/// the fields and metadata describing the span — should be stored. The -/// [`new_span`] function is called when a new span is created, and at that -/// point, the subscriber _may_ choose to store the associated data if it will -/// be referenced again. However, if the data has already been recorded and will -/// not be needed by the implementations of `enter` and `exit`, the subscriber -/// may freely discard that data without allocating space to store it. -/// -/// ## Overriding default impls -/// -/// Some trait methods on `Subscriber` have default implementations, either in -/// order to reduce the surface area of implementing `Subscriber`, or for -/// backward-compatibility reasons. However, many subscribers will likely want -/// to override these default implementations. -/// -/// The following methods are likely of interest: -/// -/// - [`register_callsite`] is called once for each callsite from which a span -/// event may originate, and returns an [`Interest`] value describing whether or -/// not the subscriber wishes to see events or spans from that callsite. By -/// default, it calls [`enabled`], and returns `Interest::always()` if -/// `enabled` returns true, or `Interest::never()` if enabled returns false. -/// However, if the subscriber's interest can change dynamically at runtime, -/// it may want to override this function to return `Interest::sometimes()`. -/// Additionally, subscribers which wish to perform a behaviour once for each -/// callsite, such as allocating storage for data related to that callsite, -/// can perform it in `register_callsite`. -/// -/// See also the [documentation on the callsite registry][cs-reg] for details -/// on [`register_callsite`]. -/// -/// - [`event_enabled`] is called once before every call to the [`event`] -/// method. This can be used to implement filtering on events once their field -/// values are known, but before any processing is done in the `event` method. -/// - [`clone_span`] is called every time a span ID is cloned, and [`try_close`] -/// is called when a span ID is dropped. By default, these functions do -/// nothing. However, they can be used to implement reference counting for -/// spans, allowing subscribers to free storage for span data and to determine -/// when a span has _closed_ permanently (rather than being exited). -/// Subscribers which store per-span data or which need to track span closures -/// should override these functions together. -/// -/// [ID]: super::span::Id -/// [`new_span`]: Subscriber::new_span -/// [`register_callsite`]: Subscriber::register_callsite -/// [`enabled`]: Subscriber::enabled -/// [`clone_span`]: Subscriber::clone_span -/// [`try_close`]: Subscriber::try_close -/// [cs-reg]: crate::callsite#registering-callsites -/// [`event`]: Subscriber::event -/// [`event_enabled`]: Subscriber::event_enabled -pub trait Subscriber: 'static { - /// Invoked when this subscriber becomes a [`Dispatch`]. - /// - /// ## Avoiding Memory Leaks - /// - /// `Subscriber`s should not store their own [`Dispatch`]. Because the - /// `Dispatch` owns the `Subscriber`, storing the `Dispatch` within the - /// `Subscriber` will create a reference count cycle, preventing the `Dispatch` - /// from ever being dropped. - /// - /// Instead, when it is necessary to store a cyclical reference to the - /// `Dispatch` within a `Subscriber`, use [`Dispatch::downgrade`] to convert a - /// `Dispatch` into a [`WeakDispatch`]. This type is analogous to - /// [`std::sync::Weak`], and does not create a reference count cycle. A - /// [`WeakDispatch`] can be stored within a `Subscriber` without causing a - /// memory leak, and can be [upgraded] into a `Dispatch` temporarily when - /// the `Dispatch` must be accessed by the `Subscriber`. - /// - /// [`WeakDispatch`]: crate::dispatcher::WeakDispatch - /// [upgraded]: crate::dispatcher::WeakDispatch::upgrade - fn on_register_dispatch(&self, subscriber: &Dispatch) { - let _ = subscriber; - } - - /// Registers a new [callsite] with this subscriber, returning whether or not - /// the subscriber is interested in being notified about the callsite. - /// - /// By default, this function assumes that the subscriber's [filter] - /// represents an unchanging view of its interest in the callsite. However, - /// if this is not the case, subscribers may override this function to - /// indicate different interests, or to implement behaviour that should run - /// once for every callsite. - /// - /// This function is guaranteed to be called at least once per callsite on - /// every active subscriber. The subscriber may store the keys to fields it - /// cares about in order to reduce the cost of accessing fields by name, - /// preallocate storage for that callsite, or perform any other actions it - /// wishes to perform once for each callsite. - /// - /// The subscriber should then return an [`Interest`], indicating - /// whether it is interested in being notified about that callsite in the - /// future. This may be `Always` indicating that the subscriber always - /// wishes to be notified about the callsite, and its filter need not be - /// re-evaluated; `Sometimes`, indicating that the subscriber may sometimes - /// care about the callsite but not always (such as when sampling), or - /// `Never`, indicating that the subscriber never wishes to be notified about - /// that callsite. If all active subscribers return `Never`, a callsite will - /// never be enabled unless a new subscriber expresses interest in it. - /// - /// `Subscriber`s which require their filters to be run every time an event - /// occurs or a span is entered/exited should return `Interest::sometimes`. - /// If a subscriber returns `Interest::sometimes`, then its [`enabled`] method - /// will be called every time an event or span is created from that callsite. - /// - /// For example, suppose a sampling subscriber is implemented by - /// incrementing a counter every time `enabled` is called and only returning - /// `true` when the counter is divisible by a specified sampling rate. If - /// that subscriber returns `Interest::always` from `register_callsite`, then - /// the filter will not be re-evaluated once it has been applied to a given - /// set of metadata. Thus, the counter will not be incremented, and the span - /// or event that corresponds to the metadata will never be `enabled`. - /// - /// `Subscriber`s that need to change their filters occasionally should call - /// [`rebuild_interest_cache`] to re-evaluate `register_callsite` for all - /// callsites. - /// - /// Similarly, if a `Subscriber` has a filtering strategy that can be - /// changed dynamically at runtime, it would need to re-evaluate that filter - /// if the cached results have changed. - /// - /// A subscriber which manages fanout to multiple other subscribers - /// should proxy this decision to all of its child subscribers, - /// returning `Interest::never` only if _all_ such children return - /// `Interest::never`. If the set of subscribers to which spans are - /// broadcast may change dynamically, the subscriber should also never - /// return `Interest::Never`, as a new subscriber may be added that _is_ - /// interested. - /// - /// See the [documentation on the callsite registry][cs-reg] for more - /// details on how and when the `register_callsite` method is called. - /// - /// # Notes - /// This function may be called again when a new subscriber is created or - /// when the registry is invalidated. - /// - /// If a subscriber returns `Interest::never` for a particular callsite, it - /// _may_ still see spans and events originating from that callsite, if - /// another subscriber expressed interest in it. - /// - /// [callsite]: crate::callsite - /// [filter]: Self::enabled - /// [metadata]: super::metadata::Metadata - /// [`enabled`]: Subscriber::enabled() - /// [`rebuild_interest_cache`]: super::callsite::rebuild_interest_cache - /// [cs-reg]: crate::callsite#registering-callsites - fn register_callsite(&self, metadata: &'static Metadata<'static>) -> Interest { - if self.enabled(metadata) { - Interest::always() - } else { - Interest::never() - } - } - - /// Returns true if a span or event with the specified [metadata] would be - /// recorded. - /// - /// By default, it is assumed that this filter needs only be evaluated once - /// for each callsite, so it is called by [`register_callsite`] when each - /// callsite is registered. The result is used to determine if the subscriber - /// is always [interested] or never interested in that callsite. This is intended - /// primarily as an optimization, so that expensive filters (such as those - /// involving string search, et cetera) need not be re-evaluated. - /// - /// However, if the subscriber's interest in a particular span or event may - /// change, or depends on contexts only determined dynamically at runtime, - /// then the `register_callsite` method should be overridden to return - /// [`Interest::sometimes`]. In that case, this function will be called every - /// time that span or event occurs. - /// - /// [metadata]: super::metadata::Metadata - /// [interested]: Interest - /// [`Interest::sometimes`]: Interest::sometimes - /// [`register_callsite`]: Subscriber::register_callsite() - fn enabled(&self, metadata: &Metadata<'_>) -> bool; - - /// Returns the highest [verbosity level][level] that this `Subscriber` will - /// enable, or `None`, if the subscriber does not implement level-based - /// filtering or chooses not to implement this method. - /// - /// If this method returns a [`Level`][level], it will be used as a hint to - /// determine the most verbose level that will be enabled. This will allow - /// spans and events which are more verbose than that level to be skipped - /// more efficiently. Subscribers which perform filtering are strongly - /// encouraged to provide an implementation of this method. - /// - /// If the maximum level the subscriber will enable can change over the - /// course of its lifetime, it is free to return a different value from - /// multiple invocations of this method. However, note that changes in the - /// maximum level will **only** be reflected after the callsite [`Interest`] - /// cache is rebuilt, by calling the [`callsite::rebuild_interest_cache`][rebuild] - /// function. Therefore, if the subscriber will change the value returned by - /// this method, it is responsible for ensuring that - /// [`rebuild_interest_cache`][rebuild] is called after the value of the max - /// level changes. - /// - /// [level]: super::Level - /// [rebuild]: super::callsite::rebuild_interest_cache - fn max_level_hint(&self) -> Option { - None - } - - /// Visit the construction of a new span, returning a new [span ID] for the - /// span being constructed. - /// - /// The provided [`Attributes`] contains any field values that were provided - /// when the span was created. The subscriber may pass a [visitor] to the - /// `Attributes`' [`record` method] to record these values. - /// - /// IDs are used to uniquely identify spans and events within the context of a - /// subscriber, so span equality will be based on the returned ID. Thus, if - /// the subscriber wishes for all spans with the same metadata to be - /// considered equal, it should return the same ID every time it is given a - /// particular set of metadata. Similarly, if it wishes for two separate - /// instances of a span with the same metadata to *not* be equal, it should - /// return a distinct ID every time this function is called, regardless of - /// the metadata. - /// - /// Note that the subscriber is free to assign span IDs based on whatever - /// scheme it sees fit. Any guarantees about uniqueness, ordering, or ID - /// reuse are left up to the subscriber implementation to determine. - /// - /// [span ID]: super::span::Id - /// [`Attributes`]: super::span::Attributes - /// [visitor]: super::field::Visit - /// [`record` method]: super::span::Attributes::record - fn new_span(&self, span: &span::Attributes<'_>) -> span::Id; - - // === Notification methods =============================================== - - /// Record a set of values on a span. - /// - /// This method will be invoked when value is recorded on a span. - /// Recording multiple values for the same field is possible, - /// but the actual behaviour is defined by the subscriber implementation. - /// - /// Keep in mind that a span might not provide a value - /// for each field it declares. - /// - /// The subscriber is expected to provide a [visitor] to the `Record`'s - /// [`record` method] in order to record the added values. - /// - /// # Example - /// "foo = 3" will be recorded when [`record`] is called on the - /// `Attributes` passed to `new_span`. - /// Since values are not provided for the `bar` and `baz` fields, - /// the span's `Metadata` will indicate that it _has_ those fields, - /// but values for them won't be recorded at this time. - /// - /// ```rust,ignore - /// # use tracing::span; - /// - /// let mut span = span!("my_span", foo = 3, bar, baz); - /// - /// // `Subscriber::record` will be called with a `Record` - /// // containing "bar = false" - /// span.record("bar", &false); - /// - /// // `Subscriber::record` will be called with a `Record` - /// // containing "baz = "a string"" - /// span.record("baz", &"a string"); - /// ``` - /// - /// [visitor]: super::field::Visit - /// [`record`]: super::span::Attributes::record - /// [`record` method]: super::span::Record::record - fn record(&self, span: &span::Id, values: &span::Record<'_>); - - /// Adds an indication that `span` follows from the span with the id - /// `follows`. - /// - /// This relationship differs somewhat from the parent-child relationship: a - /// span may have any number of prior spans, rather than a single one; and - /// spans are not considered to be executing _inside_ of the spans they - /// follow from. This means that a span may close even if subsequent spans - /// that follow from it are still open, and time spent inside of a - /// subsequent span should not be included in the time its precedents were - /// executing. This is used to model causal relationships such as when a - /// single future spawns several related background tasks, et cetera. - /// - /// If the subscriber has spans corresponding to the given IDs, it should - /// record this relationship in whatever way it deems necessary. Otherwise, - /// if one or both of the given span IDs do not correspond to spans that the - /// subscriber knows about, or if a cyclical relationship would be created - /// (i.e., some span _a_ which proceeds some other span _b_ may not also - /// follow from _b_), it may silently do nothing. - fn record_follows_from(&self, span: &span::Id, follows: &span::Id); - - /// Determine if an [`Event`] should be recorded. - /// - /// By default, this returns `true` and `Subscriber`s can filter events in - /// [`event`][Self::event] without any penalty. However, when `event` is - /// more complicated, this can be used to determine if `event` should be - /// called at all, separating out the decision from the processing. - fn event_enabled(&self, event: &Event<'_>) -> bool { - let _ = event; - true - } - - /// Records that an [`Event`] has occurred. - /// - /// This method will be invoked when an Event is constructed by - /// the `Event`'s [`dispatch` method]. For example, this happens internally - /// when an event macro from `tracing` is called. - /// - /// The key difference between this method and `record` is that `record` is - /// called when a value is recorded for a field defined by a span, - /// while `event` is called when a new event occurs. - /// - /// The provided `Event` struct contains any field values attached to the - /// event. The subscriber may pass a [visitor] to the `Event`'s - /// [`record` method] to record these values. - /// - /// [`Event`]: super::event::Event - /// [visitor]: super::field::Visit - /// [`record` method]: super::event::Event::record - /// [`dispatch` method]: super::event::Event::dispatch - fn event(&self, event: &Event<'_>); - - /// Records that a span has been entered. - /// - /// When entering a span, this method is called to notify the subscriber - /// that the span has been entered. The subscriber is provided with the - /// [span ID] of the entered span, and should update any internal state - /// tracking the current span accordingly. - /// - /// [span ID]: super::span::Id - fn enter(&self, span: &span::Id); - - /// Records that a span has been exited. - /// - /// When exiting a span, this method is called to notify the subscriber - /// that the span has been exited. The subscriber is provided with the - /// [span ID] of the exited span, and should update any internal state - /// tracking the current span accordingly. - /// - /// Exiting a span does not imply that the span will not be re-entered. - /// - /// [span ID]: super::span::Id - fn exit(&self, span: &span::Id); - - /// Notifies the subscriber that a [span ID] has been cloned. - /// - /// This function is guaranteed to only be called with span IDs that were - /// returned by this subscriber's `new_span` function. - /// - /// Note that the default implementation of this function this is just the - /// identity function, passing through the identifier. However, it can be - /// used in conjunction with [`try_close`] to track the number of handles - /// capable of `enter`ing a span. When all the handles have been dropped - /// (i.e., `try_close` has been called one more time than `clone_span` for a - /// given ID), the subscriber may assume that the span will not be entered - /// again. It is then free to deallocate storage for data associated with - /// that span, write data from that span to IO, and so on. - /// - /// For more unsafe situations, however, if `id` is itself a pointer of some - /// kind this can be used as a hook to "clone" the pointer, depending on - /// what that means for the specified pointer. - /// - /// [span ID]: super::span::Id - /// [`try_close`]: Subscriber::try_close - fn clone_span(&self, id: &span::Id) -> span::Id { - id.clone() - } - - /// **This method is deprecated.** - /// - /// Using `drop_span` may result in subscribers composed using - /// `tracing-subscriber` crate's `Layer` trait from observing close events. - /// Use [`try_close`] instead. - /// - /// The default implementation of this function does nothing. - /// - /// [`try_close`]: Subscriber::try_close - #[deprecated(since = "0.1.2", note = "use `Subscriber::try_close` instead")] - fn drop_span(&self, _id: span::Id) {} - - /// Notifies the subscriber that a [span ID] has been dropped, and returns - /// `true` if there are now 0 IDs that refer to that span. - /// - /// Higher-level libraries providing functionality for composing multiple - /// subscriber implementations may use this return value to notify any - /// "layered" subscribers that this subscriber considers the span closed. - /// - /// The default implementation of this method calls the subscriber's - /// [`drop_span`] method and returns `false`. This means that, unless the - /// subscriber overrides the default implementation, close notifications - /// will never be sent to any layered subscribers. In general, if the - /// subscriber tracks reference counts, this method should be implemented, - /// rather than `drop_span`. - /// - /// This function is guaranteed to only be called with span IDs that were - /// returned by this subscriber's `new_span` function. - /// - /// It's guaranteed that if this function has been called once more than the - /// number of times `clone_span` was called with the same `id`, then no more - /// handles that can enter the span with that `id` exist. This means that it - /// can be used in conjunction with [`clone_span`] to track the number of - /// handles capable of `enter`ing a span. When all the handles have been - /// dropped (i.e., `try_close` has been called one more time than - /// `clone_span` for a given ID), the subscriber may assume that the span - /// will not be entered again, and should return `true`. It is then free to - /// deallocate storage for data associated with that span, write data from - /// that span to IO, and so on. - /// - /// **Note**: since this function is called when spans are dropped, - /// implementations should ensure that they are unwind-safe. Panicking from - /// inside of a `try_close` function may cause a double panic, if the span - /// was dropped due to a thread unwinding. - /// - /// [span ID]: super::span::Id - /// [`clone_span`]: Subscriber::clone_span - /// [`drop_span`]: Subscriber::drop_span - fn try_close(&self, id: span::Id) -> bool { - #[allow(deprecated)] - self.drop_span(id); - false - } - - /// Returns a type representing this subscriber's view of the current span. - /// - /// If subscribers track a current span, they should override this function - /// to return [`Current::new`] if the thread from which this method is - /// called is inside a span, or [`Current::none`] if the thread is not - /// inside a span. - /// - /// By default, this returns a value indicating that the subscriber - /// does **not** track what span is current. If the subscriber does not - /// implement a current span, it should not override this method. - /// - /// [`Current::new`]: super::span::Current#tymethod.new - /// [`Current::none`]: super::span::Current#tymethod.none - fn current_span(&self) -> span::Current { - span::Current::unknown() - } - - // === Downcasting methods ================================================ - - /// If `self` is the same type as the provided `TypeId`, returns an untyped - /// `*const` pointer to that type. Otherwise, returns `None`. - /// - /// If you wish to downcast a `Subscriber`, it is strongly advised to use - /// the safe API provided by [`downcast_ref`] instead. - /// - /// This API is required for `downcast_raw` to be a trait method; a method - /// signature like [`downcast_ref`] (with a generic type parameter) is not - /// object-safe, and thus cannot be a trait method for `Subscriber`. This - /// means that if we only exposed `downcast_ref`, `Subscriber` - /// implementations could not override the downcasting behavior - /// - /// This method may be overridden by "fan out" or "chained" subscriber - /// implementations which consist of multiple composed types. Such - /// subscribers might allow `downcast_raw` by returning references to those - /// component if they contain components with the given `TypeId`. - /// - /// # Safety - /// - /// The [`downcast_ref`] method expects that the pointer returned by - /// `downcast_raw` is non-null and points to a valid instance of the type - /// with the provided `TypeId`. Failure to ensure this will result in - /// undefined behaviour, so implementing `downcast_raw` is unsafe. - /// - /// [`downcast_ref`]: #method.downcast_ref - unsafe fn downcast_raw(&self, id: TypeId) -> Option<*const ()> { - if id == TypeId::of::() { - Some(self as *const Self as *const ()) - } else { - None - } - } -} - -impl dyn Subscriber { - /// Returns `true` if this `Subscriber` is the same type as `T`. - pub fn is(&self) -> bool { - self.downcast_ref::().is_some() - } - - /// Returns some reference to this `Subscriber` value if it is of type `T`, - /// or `None` if it isn't. - pub fn downcast_ref(&self) -> Option<&T> { - unsafe { - let raw = self.downcast_raw(TypeId::of::())?; - if raw.is_null() { - None - } else { - Some(&*(raw as *const _)) - } - } - } -} - -impl dyn Subscriber + Send { - /// Returns `true` if this [`Subscriber`] is the same type as `T`. - pub fn is(&self) -> bool { - self.downcast_ref::().is_some() - } - - /// Returns some reference to this [`Subscriber`] value if it is of type `T`, - /// or `None` if it isn't. - pub fn downcast_ref(&self) -> Option<&T> { - unsafe { - let raw = self.downcast_raw(TypeId::of::())?; - if raw.is_null() { - None - } else { - Some(&*(raw as *const _)) - } - } - } -} - -impl dyn Subscriber + Sync { - /// Returns `true` if this [`Subscriber`] is the same type as `T`. - pub fn is(&self) -> bool { - self.downcast_ref::().is_some() - } - - /// Returns some reference to this `[`Subscriber`] value if it is of type `T`, - /// or `None` if it isn't. - pub fn downcast_ref(&self) -> Option<&T> { - unsafe { - let raw = self.downcast_raw(TypeId::of::())?; - if raw.is_null() { - None - } else { - Some(&*(raw as *const _)) - } - } - } -} - -impl dyn Subscriber + Send + Sync { - /// Returns `true` if this [`Subscriber`] is the same type as `T`. - pub fn is(&self) -> bool { - self.downcast_ref::().is_some() - } - - /// Returns some reference to this [`Subscriber`] value if it is of type `T`, - /// or `None` if it isn't. - pub fn downcast_ref(&self) -> Option<&T> { - unsafe { - let raw = self.downcast_raw(TypeId::of::())?; - if raw.is_null() { - None - } else { - Some(&*(raw as *const _)) - } - } - } -} - -/// Indicates a [`Subscriber`]'s interest in a particular callsite. -/// -/// `Subscriber`s return an `Interest` from their [`register_callsite`] methods -/// in order to determine whether that span should be enabled or disabled. -/// -/// [`Subscriber`]: super::Subscriber -/// [`register_callsite`]: super::Subscriber::register_callsite -#[derive(Clone, Debug)] -pub struct Interest(InterestKind); - -#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] -enum InterestKind { - Never = 0, - Sometimes = 1, - Always = 2, -} - -impl Interest { - /// Returns an `Interest` indicating that the subscriber is never interested - /// in being notified about a callsite. - /// - /// If all active subscribers are `never()` interested in a callsite, it will - /// be completely disabled unless a new subscriber becomes active. - #[inline] - pub fn never() -> Self { - Interest(InterestKind::Never) - } - - /// Returns an `Interest` indicating the subscriber is sometimes interested - /// in being notified about a callsite. - /// - /// If all active subscribers are `sometimes` or `never` interested in a - /// callsite, the currently active subscriber will be asked to filter that - /// callsite every time it creates a span. This will be the case until a new - /// subscriber expresses that it is `always` interested in the callsite. - #[inline] - pub fn sometimes() -> Self { - Interest(InterestKind::Sometimes) - } - - /// Returns an `Interest` indicating the subscriber is always interested in - /// being notified about a callsite. - /// - /// If any subscriber expresses that it is `always()` interested in a given - /// callsite, then the callsite will always be enabled. - #[inline] - pub fn always() -> Self { - Interest(InterestKind::Always) - } - - /// Returns `true` if the subscriber is never interested in being notified - /// about this callsite. - #[inline] - pub fn is_never(&self) -> bool { - matches!(self.0, InterestKind::Never) - } - - /// Returns `true` if the subscriber is sometimes interested in being notified - /// about this callsite. - #[inline] - pub fn is_sometimes(&self) -> bool { - matches!(self.0, InterestKind::Sometimes) - } - - /// Returns `true` if the subscriber is always interested in being notified - /// about this callsite. - #[inline] - pub fn is_always(&self) -> bool { - matches!(self.0, InterestKind::Always) - } - - /// Returns the common interest between these two Interests. - /// - /// If both interests are the same, this propagates that interest. - /// Otherwise, if they differ, the result must always be - /// `Interest::sometimes` --- if the two subscribers differ in opinion, we - /// will have to ask the current subscriber what it thinks, no matter what. - pub(crate) fn and(self, rhs: Interest) -> Self { - if self.0 == rhs.0 { - self - } else { - Interest::sometimes() - } - } -} - -/// A no-op [`Subscriber`]. -/// -/// [`NoSubscriber`] implements the [`Subscriber`] trait by never being enabled, -/// never being interested in any callsite, and dropping all spans and events. -#[derive(Copy, Clone, Debug, Default)] -pub struct NoSubscriber(()); - -impl Subscriber for NoSubscriber { - #[inline] - fn register_callsite(&self, _: &'static Metadata<'static>) -> Interest { - Interest::never() - } - - fn new_span(&self, _: &span::Attributes<'_>) -> span::Id { - span::Id::from_u64(0xDEAD) - } - - fn event(&self, _event: &Event<'_>) {} - - fn record(&self, _span: &span::Id, _values: &span::Record<'_>) {} - - fn record_follows_from(&self, _span: &span::Id, _follows: &span::Id) {} - - #[inline] - fn enabled(&self, _metadata: &Metadata<'_>) -> bool { - false - } - - fn enter(&self, _span: &span::Id) {} - fn exit(&self, _span: &span::Id) {} -} - -impl NoSubscriber { - /// Returns a new `NoSubscriber`. - #[must_use] - pub const fn new() -> Self { - Self(()) - } -} - -impl Subscriber for Box -where - S: Subscriber + ?Sized, -{ - #[inline] - fn register_callsite(&self, metadata: &'static Metadata<'static>) -> Interest { - self.as_ref().register_callsite(metadata) - } - - #[inline] - fn enabled(&self, metadata: &Metadata<'_>) -> bool { - self.as_ref().enabled(metadata) - } - - #[inline] - fn max_level_hint(&self) -> Option { - self.as_ref().max_level_hint() - } - - #[inline] - fn new_span(&self, span: &span::Attributes<'_>) -> span::Id { - self.as_ref().new_span(span) - } - - #[inline] - fn record(&self, span: &span::Id, values: &span::Record<'_>) { - self.as_ref().record(span, values) - } - - #[inline] - fn record_follows_from(&self, span: &span::Id, follows: &span::Id) { - self.as_ref().record_follows_from(span, follows) - } - - #[inline] - fn event_enabled(&self, event: &Event<'_>) -> bool { - self.as_ref().event_enabled(event) - } - - #[inline] - fn event(&self, event: &Event<'_>) { - self.as_ref().event(event) - } - - #[inline] - fn enter(&self, span: &span::Id) { - self.as_ref().enter(span) - } - - #[inline] - fn exit(&self, span: &span::Id) { - self.as_ref().exit(span) - } - - #[inline] - fn clone_span(&self, id: &span::Id) -> span::Id { - self.as_ref().clone_span(id) - } - - #[inline] - fn try_close(&self, id: span::Id) -> bool { - self.as_ref().try_close(id) - } - - #[inline] - #[allow(deprecated)] - fn drop_span(&self, id: span::Id) { - self.as_ref().try_close(id); - } - - #[inline] - fn current_span(&self) -> span::Current { - self.as_ref().current_span() - } - - #[inline] - unsafe fn downcast_raw(&self, id: TypeId) -> Option<*const ()> { - if id == TypeId::of::() { - return Some(self as *const Self as *const _); - } - - self.as_ref().downcast_raw(id) - } -} - -impl Subscriber for Arc -where - S: Subscriber + ?Sized, -{ - #[inline] - fn register_callsite(&self, metadata: &'static Metadata<'static>) -> Interest { - self.as_ref().register_callsite(metadata) - } - - #[inline] - fn enabled(&self, metadata: &Metadata<'_>) -> bool { - self.as_ref().enabled(metadata) - } - - #[inline] - fn max_level_hint(&self) -> Option { - self.as_ref().max_level_hint() - } - - #[inline] - fn new_span(&self, span: &span::Attributes<'_>) -> span::Id { - self.as_ref().new_span(span) - } - - #[inline] - fn record(&self, span: &span::Id, values: &span::Record<'_>) { - self.as_ref().record(span, values) - } - - #[inline] - fn record_follows_from(&self, span: &span::Id, follows: &span::Id) { - self.as_ref().record_follows_from(span, follows) - } - - #[inline] - fn event_enabled(&self, event: &Event<'_>) -> bool { - self.as_ref().event_enabled(event) - } - - #[inline] - fn event(&self, event: &Event<'_>) { - self.as_ref().event(event) - } - - #[inline] - fn enter(&self, span: &span::Id) { - self.as_ref().enter(span) - } - - #[inline] - fn exit(&self, span: &span::Id) { - self.as_ref().exit(span) - } - - #[inline] - fn clone_span(&self, id: &span::Id) -> span::Id { - self.as_ref().clone_span(id) - } - - #[inline] - fn try_close(&self, id: span::Id) -> bool { - self.as_ref().try_close(id) - } - - #[inline] - #[allow(deprecated)] - fn drop_span(&self, id: span::Id) { - self.as_ref().try_close(id); - } - - #[inline] - fn current_span(&self) -> span::Current { - self.as_ref().current_span() - } - - #[inline] - unsafe fn downcast_raw(&self, id: TypeId) -> Option<*const ()> { - if id == TypeId::of::() { - return Some(self as *const Self as *const _); - } - - self.as_ref().downcast_raw(id) - } -} diff -Nru s390-tools-2.31.0/rust-vendor/try-lock/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/try-lock/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/try-lock/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/try-lock/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/try-lock/Cargo.toml s390-tools-2.33.1/rust-vendor/try-lock/Cargo.toml --- s390-tools-2.31.0/rust-vendor/try-lock/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/try-lock/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -name = "try-lock" -version = "0.2.4" -authors = ["Sean McArthur "] -description = "A lightweight atomic lock." -homepage = "https://github.com/seanmonstar/try-lock" -documentation = "https://docs.rs/try-lock" -readme = "README.md" -keywords = [ - "lock", - "atomic", -] -categories = [ - "concurrency", - "no-std", -] -license = "MIT" -repository = "https://github.com/seanmonstar/try-lock" - -[dependencies] diff -Nru s390-tools-2.31.0/rust-vendor/try-lock/LICENSE s390-tools-2.33.1/rust-vendor/try-lock/LICENSE --- s390-tools-2.31.0/rust-vendor/try-lock/LICENSE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/try-lock/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,21 +0,0 @@ -Copyright (c) 2018 Sean McArthur -Copyright (c) 2016 Alex Crichton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff -Nru s390-tools-2.31.0/rust-vendor/try-lock/README.md s390-tools-2.33.1/rust-vendor/try-lock/README.md --- s390-tools-2.31.0/rust-vendor/try-lock/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/try-lock/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,44 +0,0 @@ -# TryLock - -- [Crates.io](https://crates.io/crates/try-lock) -- [Docs](https://docs.rs/try-lock) - -A light-weight lock guarded by an atomic boolean. - -Most efficient when contention is low, acquiring the lock is a single atomic swap, and releasing it just 1 more atomic swap. - -## Example - -```rust -use std::sync::Arc; -use try_lock::TryLock; - -// a thing we want to share -struct Widget { - name: String, -} - -// lock it up! -let widget1 = Arc::new(TryLock::new(Widget { - name: "Spanner".into(), -})); - -let widget2 = widget1.clone(); - - -// mutate the widget -let mut locked = widget1.try_lock().expect("example isn't locked yet"); -locked.name.push_str(" Bundle"); - -// hands off, buddy -let not_locked = widget2.try_lock(); -assert!(not_locked.is_none(), "widget1 has the lock"); - -// ok, you can have it -drop(locked); - -let locked2 = widget2.try_lock().expect("widget1 lock is released"); - -assert_eq!(locked2.name, "Spanner Bundle"); -``` - diff -Nru s390-tools-2.31.0/rust-vendor/try-lock/src/lib.rs s390-tools-2.33.1/rust-vendor/try-lock/src/lib.rs --- s390-tools-2.31.0/rust-vendor/try-lock/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/try-lock/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,275 +0,0 @@ -#![doc(html_root_url = "https://docs.rs/try-lock/0.2.3")] -#![deny(missing_docs)] -#![deny(missing_debug_implementations)] -#![deny(warnings)] -#![cfg_attr(not(test), no_std)] - -//! A light-weight lock guarded by an atomic boolean. -//! -//! Most efficient when contention is low, acquiring the lock is a single -//! atomic swap, and releasing it just 1 more atomic swap. -//! -//! # Example -//! -//! ``` -//! use std::sync::Arc; -//! use try_lock::TryLock; -//! -//! // a thing we want to share -//! struct Widget { -//! name: String, -//! } -//! -//! // lock it up! -//! let widget1 = Arc::new(TryLock::new(Widget { -//! name: "Spanner".into(), -//! })); -//! -//! let widget2 = widget1.clone(); -//! -//! -//! // mutate the widget -//! let mut locked = widget1.try_lock().expect("example isn't locked yet"); -//! locked.name.push_str(" Bundle"); -//! -//! // hands off, buddy -//! let not_locked = widget2.try_lock(); -//! assert!(not_locked.is_none(), "widget1 has the lock"); -//! -//! // ok, you can have it -//! drop(locked); -//! -//! let locked2 = widget2.try_lock().expect("widget1 lock is released"); -//! -//! assert_eq!(locked2.name, "Spanner Bundle"); -//! ``` - -#[cfg(test)] -extern crate core; - -use core::cell::UnsafeCell; -use core::fmt; -use core::ops::{Deref, DerefMut}; -use core::sync::atomic::{AtomicBool, Ordering}; -use core::marker::PhantomData; - -/// A light-weight lock guarded by an atomic boolean. -/// -/// Most efficient when contention is low, acquiring the lock is a single -/// atomic swap, and releasing it just 1 more atomic swap. -/// -/// It is only possible to try to acquire the lock, it is not possible to -/// wait for the lock to become ready, like with a `Mutex`. -#[derive(Default)] -pub struct TryLock { - is_locked: AtomicBool, - value: UnsafeCell, -} - -impl TryLock { - /// Create a `TryLock` around the value. - #[inline] - pub fn new(val: T) -> TryLock { - TryLock { - is_locked: AtomicBool::new(false), - value: UnsafeCell::new(val), - } - } - - /// Try to acquire the lock of this value. - /// - /// If the lock is already acquired by someone else, this returns - /// `None`. You can try to acquire again whenever you want, perhaps - /// by spinning a few times, or by using some other means of - /// notification. - /// - /// # Note - /// - /// The default memory ordering is to use `Acquire` to lock, and `Release` - /// to unlock. If different ordering is required, use - /// [`try_lock_explicit`](TryLock::try_lock_explicit) or - /// [`try_lock_explicit_unchecked`](TryLock::try_lock_explicit_unchecked). - #[inline] - pub fn try_lock(&self) -> Option> { - unsafe { - self.try_lock_explicit_unchecked(Ordering::Acquire, Ordering::Release) - } - } - - /// Try to acquire the lock of this value using the lock and unlock orderings. - /// - /// If the lock is already acquired by someone else, this returns - /// `None`. You can try to acquire again whenever you want, perhaps - /// by spinning a few times, or by using some other means of - /// notification. - #[inline] - #[deprecated( - since = "0.2.3", - note = "This method is actually unsafe because it unsafely allows \ - the use of weaker memory ordering. Please use try_lock_explicit instead" - )] - pub fn try_lock_order(&self, lock_order: Ordering, unlock_order: Ordering) -> Option> { - unsafe { - self.try_lock_explicit_unchecked(lock_order, unlock_order) - } - } - - /// Try to acquire the lock of this value using the specified lock and - /// unlock orderings. - /// - /// If the lock is already acquired by someone else, this returns - /// `None`. You can try to acquire again whenever you want, perhaps - /// by spinning a few times, or by using some other means of - /// notification. - /// - /// # Panic - /// - /// This method panics if `lock_order` is not any of `Acquire`, `AcqRel`, - /// and `SeqCst`, or `unlock_order` is not any of `Release` and `SeqCst`. - #[inline] - pub fn try_lock_explicit(&self, lock_order: Ordering, unlock_order: Ordering) -> Option> { - match lock_order { - Ordering::Acquire | - Ordering::AcqRel | - Ordering::SeqCst => {} - _ => panic!("lock ordering must be `Acquire`, `AcqRel`, or `SeqCst`"), - } - - match unlock_order { - Ordering::Release | - Ordering::SeqCst => {} - _ => panic!("unlock ordering must be `Release` or `SeqCst`"), - } - - unsafe { - self.try_lock_explicit_unchecked(lock_order, unlock_order) - } - } - - /// Try to acquire the lock of this value using the specified lock and - /// unlock orderings without checking that the specified orderings are - /// strong enough to be safe. - /// - /// If the lock is already acquired by someone else, this returns - /// `None`. You can try to acquire again whenever you want, perhaps - /// by spinning a few times, or by using some other means of - /// notification. - /// - /// # Safety - /// - /// Unlike [`try_lock_explicit`], this method is unsafe because it does not - /// check that the given memory orderings are strong enough to prevent data - /// race. - /// - /// [`try_lock_explicit`]: Self::try_lock_explicit - #[inline] - pub unsafe fn try_lock_explicit_unchecked(&self, lock_order: Ordering, unlock_order: Ordering) -> Option> { - if !self.is_locked.swap(true, lock_order) { - Some(Locked { - lock: self, - order: unlock_order, - _p: PhantomData, - }) - } else { - None - } - } - - /// Take the value back out of the lock when this is the sole owner. - #[inline] - pub fn into_inner(self) -> T { - debug_assert!(!self.is_locked.load(Ordering::Relaxed), "TryLock was mem::forgotten"); - // Since the compiler can statically determine this is the only owner, - // it's safe to take the value out. In fact, in newer versions of Rust, - // `UnsafeCell::into_inner` has been marked safe. - // - // To support older version (1.21), the unsafe block is still here. - #[allow(unused_unsafe)] - unsafe { - self.value.into_inner() - } - } -} - -unsafe impl Send for TryLock {} -unsafe impl Sync for TryLock {} - -impl fmt::Debug for TryLock { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - - // Used if the TryLock cannot acquire the lock. - struct LockedPlaceholder; - - impl fmt::Debug for LockedPlaceholder { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("") - } - } - - let mut builder = f.debug_struct("TryLock"); - if let Some(locked) = self.try_lock() { - builder.field("value", &*locked); - } else { - builder.field("value", &LockedPlaceholder); - } - builder.finish() - } -} - -/// A locked value acquired from a `TryLock`. -/// -/// The type represents an exclusive view at the underlying value. The lock is -/// released when this type is dropped. -/// -/// This type derefs to the underlying value. -#[must_use = "TryLock will immediately unlock if not used"] -pub struct Locked<'a, T: 'a> { - lock: &'a TryLock, - order: Ordering, - /// Suppresses Send and Sync autotraits for `struct Locked`. - _p: PhantomData<*mut T>, -} - -impl<'a, T> Deref for Locked<'a, T> { - type Target = T; - #[inline] - fn deref(&self) -> &T { - unsafe { &*self.lock.value.get() } - } -} - -impl<'a, T> DerefMut for Locked<'a, T> { - #[inline] - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.lock.value.get() } - } -} - -impl<'a, T> Drop for Locked<'a, T> { - #[inline] - fn drop(&mut self) { - self.lock.is_locked.store(false, self.order); - } -} - -impl<'a, T: fmt::Debug> fmt::Debug for Locked<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -#[cfg(test)] -mod tests { - use super::TryLock; - - #[test] - fn fmt_debug() { - let lock = TryLock::new(5); - assert_eq!(format!("{:?}", lock), "TryLock { value: 5 }"); - - let locked = lock.try_lock().unwrap(); - assert_eq!(format!("{:?}", locked), "5"); - - assert_eq!(format!("{:?}", lock), "TryLock { value: }"); - } -} diff -Nru s390-tools-2.31.0/rust-vendor/want/benches/throughput.rs s390-tools-2.33.1/rust-vendor/want/benches/throughput.rs --- s390-tools-2.31.0/rust-vendor/want/benches/throughput.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/want/benches/throughput.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,14 +0,0 @@ -#![feature(test)] - -extern crate test; -extern crate want; - -#[bench] -fn throughput(b: &mut test::Bencher) { - let (mut gv, mut tk) = want::new(); - - b.iter(move || { - tk.want(); - assert!(gv.poll_want().unwrap().is_ready()); - }); -} diff -Nru s390-tools-2.31.0/rust-vendor/want/.cargo-checksum.json s390-tools-2.33.1/rust-vendor/want/.cargo-checksum.json --- s390-tools-2.31.0/rust-vendor/want/.cargo-checksum.json 2024-02-06 12:28:11.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/want/.cargo-checksum.json 1970-01-01 01:00:00.000000000 +0100 @@ -1 +0,0 @@ -{"files":{},"package":"bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e"} \ No newline at end of file diff -Nru s390-tools-2.31.0/rust-vendor/want/Cargo.toml s390-tools-2.33.1/rust-vendor/want/Cargo.toml --- s390-tools-2.31.0/rust-vendor/want/Cargo.toml 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/want/Cargo.toml 1970-01-01 01:00:00.000000000 +0100 @@ -1,35 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "want" -version = "0.3.1" -authors = ["Sean McArthur "] -description = "Detect when another Future wants a result." -documentation = "https://docs.rs/want" -readme = "README.md" -keywords = [ - "futures", - "channel", - "async", -] -license = "MIT" -repository = "https://github.com/seanmonstar/want" - -[dependencies.try-lock] -version = "0.2.4" - -[dev-dependencies.tokio-executor] -version = "0.2.0-alpha.2" - -[dev-dependencies.tokio-sync] -version = "0.2.0-alpha.2" diff -Nru s390-tools-2.31.0/rust-vendor/want/LICENSE s390-tools-2.33.1/rust-vendor/want/LICENSE --- s390-tools-2.31.0/rust-vendor/want/LICENSE 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/want/LICENSE 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ -Copyright (c) 2018-2019 Sean McArthur - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff -Nru s390-tools-2.31.0/rust-vendor/want/README.md s390-tools-2.33.1/rust-vendor/want/README.md --- s390-tools-2.31.0/rust-vendor/want/README.md 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/want/README.md 1970-01-01 01:00:00.000000000 +0100 @@ -1,24 +0,0 @@ -# Want - -- [Crates.io](https://crates.io/crates/want) -- [Docs](https://docs.rs/want) - -A `Future`s channel-like utility to signal when a value is wanted. - -Futures are supposed to be lazy, and only starting work if `Future::poll` -is called. The same is true of `Stream`s, but when using a channel as -a `Stream`, it can be hard to know if the receiver is ready for the next -value. - -Put another way, given a `(tx, rx)` from `futures::sync::mpsc::channel()`, -how can the sender (`tx`) know when the receiver (`rx`) actually wants more -work to be produced? Just because there is room in the channel buffer -doesn't mean the work would be used by the receiver. - -This is where something like `want` comes in. Added to a channel, you can -make sure that the `tx` only creates the message and sends it when the `rx` -has `poll()` for it, and the buffer was empty. - -## License - -`want` is provided under the MIT license. See [LICENSE](LICENSE). diff -Nru s390-tools-2.31.0/rust-vendor/want/src/lib.rs s390-tools-2.33.1/rust-vendor/want/src/lib.rs --- s390-tools-2.31.0/rust-vendor/want/src/lib.rs 2024-02-06 12:28:09.000000000 +0100 +++ s390-tools-2.33.1/rust-vendor/want/src/lib.rs 1970-01-01 01:00:00.000000000 +0100 @@ -1,579 +0,0 @@ -#![doc(html_root_url = "https://docs.rs/want/0.3.1")] -#![deny(warnings)] -#![deny(missing_docs)] -#![deny(missing_debug_implementations)] - -//! A Futures channel-like utility to signal when a value is wanted. -//! -//! Futures are supposed to be lazy, and only starting work if `Future::poll` -//! is called. The same is true of `Stream`s, but when using a channel as -//! a `Stream`, it can be hard to know if the receiver is ready for the next -//! value. -//! -//! Put another way, given a `(tx, rx)` from `futures::sync::mpsc::channel()`, -//! how can the sender (`tx`) know when the receiver (`rx`) actually wants more -//! work to be produced? Just because there is room in the channel buffer -//! doesn't mean the work would be used by the receiver. -//! -//! This is where something like `want` comes in. Added to a channel, you can -//! make sure that the `tx` only creates the message and sends it when the `rx` -//! has `poll()` for it, and the buffer was empty. -//! -//! # Example -//! -//! ```nightly -//! # //#![feature(async_await)] -//! extern crate want; -//! -//! # fn spawn(_t: T) {} -//! # fn we_still_want_message() -> bool { true } -//! # fn mpsc_channel() -> (Tx, Rx) { (Tx, Rx) } -//! # struct Tx; -//! # impl Tx { fn send(&mut self, _: T) {} } -//! # struct Rx; -//! # impl Rx { async fn recv(&mut self) -> Option { Some(Expensive) } } -//! -//! // Some message that is expensive to produce. -//! struct Expensive; -//! -//! // Some futures-aware MPSC channel... -//! let (mut tx, mut rx) = mpsc_channel(); -//! -//! // And our `want` channel! -//! let (mut gv, mut tk) = want::new(); -//! -//! -//! // Our receiving task... -//! spawn(async move { -//! // Maybe something comes up that prevents us from ever -//! // using the expensive message. -//! // -//! // Without `want`, the "send" task may have started to -//! // produce the expensive message even though we wouldn't -//! // be able to use it. -//! if !we_still_want_message() { -//! return; -//! } -//! -//! // But we can use it! So tell the `want` channel. -//! tk.want(); -//! -//! match rx.recv().await { -//! Some(_msg) => println!("got a message"), -//! None => println!("DONE"), -//! } -//! }); -//! -//! // Our sending task -//! spawn(async move { -//! // It's expensive to create a new message, so we wait until the -//! // receiving end truly *wants* the message. -//! if let Err(_closed) = gv.want().await { -//! // Looks like they will never want it... -//! return; -//! } -//! -//! // They want it, let's go! -//! tx.send(Expensive); -//! }); -//! -//! # fn main() {} -//! ``` - -use std::fmt; -use std::future::Future; -use std::mem; -use std::pin::Pin; -use std::sync::Arc; -use std::sync::atomic::AtomicUsize; -// SeqCst is the only ordering used to ensure accessing the state and -// TryLock are never re-ordered. -use std::sync::atomic::Ordering::SeqCst; -use std::task::{self, Poll, Waker}; - - -use try_lock::TryLock; - -/// Create a new `want` channel. -pub fn new() -> (Giver, Taker) { - let inner = Arc::new(Inner { - state: AtomicUsize::new(State::Idle.into()), - task: TryLock::new(None), - }); - let inner2 = inner.clone(); - ( - Giver { - inner, - }, - Taker { - inner: inner2, - }, - ) -} - -/// An entity that gives a value when wanted. -pub struct Giver { - inner: Arc, -} - -/// An entity that wants a value. -pub struct Taker { - inner: Arc, -} - -/// A cloneable `Giver`. -/// -/// It differs from `Giver` in that you cannot poll for `want`. It's only -/// usable as a cancellation watcher. -#[derive(Clone)] -pub struct SharedGiver { - inner: Arc, -} - -/// The `Taker` has canceled its interest in a value. -pub struct Closed { - _inner: (), -} - -#[derive(Clone, Copy, Debug)] -enum State { - Idle, - Want, - Give, - Closed, -} - -impl From for usize { - fn from(s: State) -> usize { - match s { - State::Idle => 0, - State::Want => 1, - State::Give => 2, - State::Closed => 3, - } - } -} - -impl From for State { - fn from(num: usize) -> State { - match num { - 0 => State::Idle, - 1 => State::Want, - 2 => State::Give, - 3 => State::Closed, - _ => unreachable!("unknown state: {}", num), - } - } -} - -struct Inner { - state: AtomicUsize, - task: TryLock>, -} - -// ===== impl Giver ====== - -impl Giver { - /// Returns a `Future` that fulfills when the `Taker` has done some action. - pub fn want(&mut self) -> impl Future> + '_ { - Want(self) - } - - /// Poll whether the `Taker` has registered interest in another value. - /// - /// - If the `Taker` has called `want()`, this returns `Async::Ready(())`. - /// - If the `Taker` has not called `want()` since last poll, this - /// returns `Async::NotReady`, and parks the current task to be notified - /// when the `Taker` does call `want()`. - /// - If the `Taker` has canceled (or dropped), this returns `Closed`. - /// - /// After knowing that the Taker is wanting, the state can be reset by - /// calling [`give`](Giver::give). - pub fn poll_want(&mut self, cx: &mut task::Context<'_>) -> Poll> { - loop { - let state = self.inner.state.load(SeqCst).into(); - match state { - State::Want => { - return Poll::Ready(Ok(())); - }, - State::Closed => { - return Poll::Ready(Err(Closed { _inner: () })); - }, - State::Idle | State::Give => { - // Taker doesn't want anything yet, so park. - if let Some(mut locked) = self.inner.task.try_lock_explicit(SeqCst, SeqCst) { - - // While we have the lock, try to set to GIVE. - let old = self.inner.state.compare_exchange( - state.into(), - State::Give.into(), - SeqCst, - SeqCst, - ); - // If it's still the first state (Idle or Give), park current task. - if old == Ok(state.into()) { - let park = locked.as_ref() - .map(|w| !w.will_wake(cx.waker())) - .unwrap_or(true); - if park { - let old = mem::replace(&mut *locked, Some(cx.waker().clone())); - drop(locked); - if let Some(prev_task) = old { - // there was an old task parked here. - // it might be waiting to be notified, - // so poke it before dropping. - prev_task.wake(); - }; - } - return Poll::Pending; - } - // Otherwise, something happened! Go around the loop again. - } else { - // if we couldn't take the lock, then a Taker has it. - // The *ONLY* reason is because it is in the process of notifying us - // of its want. - // - // We need to loop again to see what state it was changed to. - } - }, - } - } - } - - /// Mark the state as idle, if the Taker currently is wanting. - /// - /// Returns true if Taker was wanting, false otherwise. - #[inline] - pub fn give(&self) -> bool { - // only set to IDLE if it is still Want - let old = self.inner.state.compare_exchange( - State::Want.into(), - State::Idle.into(), - SeqCst, - SeqCst); - old == Ok(State::Want.into()) - } - - /// Check if the `Taker` has called `want()` without parking a task. - /// - /// This is safe to call outside of a futures task context, but other - /// means of being notified is left to the user. - #[inline] - pub fn is_wanting(&self) -> bool { - self.inner.state.load(SeqCst) == State::Want.into() - } - - - /// Check if the `Taker` has canceled interest without parking a task. - #[inline] - pub fn is_canceled(&self) -> bool { - self.inner.state.load(SeqCst) == State::Closed.into() - } - - /// Converts this into a `SharedGiver`. - #[inline] - pub fn shared(self) -> SharedGiver { - SharedGiver { - inner: self.inner, - } - } -} - -impl fmt::Debug for Giver { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Giver") - .field("state", &self.inner.state()) - .finish() - } -} - -// ===== impl SharedGiver ====== - -impl SharedGiver { - /// Check if the `Taker` has called `want()` without parking a task. - /// - /// This is safe to call outside of a futures task context, but other - /// means of being notified is left to the user. - #[inline] - pub fn is_wanting(&self) -> bool { - self.inner.state.load(SeqCst) == State::Want.into() - } - - - /// Check if the `Taker` has canceled interest without parking a task. - #[inline] - pub fn is_canceled(&self) -> bool { - self.inner.state.load(SeqCst) == State::Closed.into() - } -} - -impl fmt::Debug for SharedGiver { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SharedGiver") - .field("state", &self.inner.state()) - .finish() - } -} - -// ===== impl Taker ====== - -impl Taker { - /// Signal to the `Giver` that the want is canceled. - /// - /// This is useful to tell that the channel is closed if you cannot - /// drop the value yet. - #[inline] - pub fn cancel(&mut self) { - self.signal(State::Closed) - } - - /// Signal to the `Giver` that a value is wanted. - #[inline] - pub fn want(&mut self) { - debug_assert!( - self.inner.state.load(SeqCst) != State::Closed.into(), - "want called after cancel" - ); - self.signal(State::Want) - } - - #[inline] - fn signal(&mut self, state: State) { - let old_state = self.inner.state.swap(state.into(), SeqCst).into(); - match old_state { - State::Idle | State::Want | State::Closed => (), - State::Give => { - loop { - if let Some(mut locked) = self.inner.task.try_lock_explicit(SeqCst, SeqCst) { - if let Some(task) = locked.take() { - drop(locked); - task.wake(); - } - return; - } else { - // if we couldn't take the lock, then a Giver has it. - // The *ONLY* reason is because it is in the process of parking. - // - // We need to loop and take the lock so we can notify this task. - } - } - }, - } - } -} - -impl Drop for Taker { - #[inline] - fn drop(&mut self) { - self.signal(State::Closed); - } -} - -impl fmt::Debug for Taker { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Taker") - .field("state", &self.inner.state()) - .finish() - } -} - -// ===== impl Closed ====== - -impl fmt::Debug for Closed { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Closed") - .finish() - } -} - -// ===== impl Inner ====== - -impl Inner { - #[inline] - fn state(&self) -> State { - self.state.load(SeqCst).into() - } -} - -// ===== impl PollFn ====== - -struct Want<'a>(&'a mut Giver); - - -impl Future for Want<'_> { - type Output = Result<(), Closed>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - self.0.poll_want(cx) - } -} - -#[cfg(test)] -mod tests { - use std::thread; - use tokio_sync::oneshot; - use super::*; - - fn block_on(f: F) -> F::Output { - tokio_executor::enter() - .expect("block_on enter") - .block_on(f) - } - - #[test] - fn want_ready() { - let (mut gv, mut tk) = new(); - - tk.want(); - - block_on(gv.want()).unwrap(); - } - - #[test] - fn want_notify_0() { - let (mut gv, mut tk) = new(); - let (tx, rx) = oneshot::channel(); - - thread::spawn(move || { - tk.want(); - // use a oneshot to keep this thread alive - // until other thread was notified of want - block_on(rx).expect("rx"); - }); - - block_on(gv.want()).expect("want"); - - assert!(gv.is_wanting(), "still wanting after poll_want success"); - assert!(gv.give(), "give is true when wanting"); - - assert!(!gv.is_wanting(), "no longer wanting after give"); - assert!(!gv.is_canceled(), "give doesn't cancel"); - - assert!(!gv.give(), "give is false if not wanting"); - - tx.send(()).expect("tx"); - } - - /* - /// This tests that if the Giver moves tasks after parking, - /// it will still wake up the correct task. - #[test] - fn want_notify_moving_tasks() { - use std::sync::Arc; - use futures::executor::{spawn, Notify, NotifyHandle}; - - struct WantNotify; - - impl Notify for WantNotify { - fn notify(&self, _id: usize) { - } - } - - fn n() -> NotifyHandle { - Arc::new(WantNotify).into() - } - - let (mut gv, mut tk) = new(); - - let mut s = spawn(poll_fn(move || { - gv.poll_want() - })); - - // Register with t1 as the task::current() - let t1 = n(); - assert!(s.poll_future_notify(&t1, 1).unwrap().is_not_ready()); - - thread::spawn(move || { - thread::sleep(::std::time::Duration::from_millis(100)); - tk.want(); - }); - - // And now, move to a ThreadNotify task. - s.into_inner().wait().expect("poll_want"); - } - */ - - #[test] - fn cancel() { - // explicit - let (mut gv, mut tk) = new(); - - assert!(!gv.is_canceled()); - - tk.cancel(); - - assert!(gv.is_canceled()); - block_on(gv.want()).unwrap_err(); - - // implicit - let (mut gv, tk) = new(); - - assert!(!gv.is_canceled()); - - drop(tk); - - assert!(gv.is_canceled()); - block_on(gv.want()).unwrap_err(); - - // notifies - let (mut gv, tk) = new(); - - thread::spawn(move || { - let _tk = tk; - // and dropped - }); - - block_on(gv.want()).unwrap_err(); - } - - /* - #[test] - fn stress() { - let nthreads = 5; - let nwants = 100; - - for _ in 0..nthreads { - let (mut gv, mut tk) = new(); - let (mut tx, mut rx) = mpsc::channel(0); - - // rx thread - thread::spawn(move || { - let mut cnt = 0; - poll_fn(move || { - while cnt < nwants { - let n = match rx.poll().expect("rx poll") { - Async::Ready(n) => n.expect("rx opt"), - Async::NotReady => { - tk.want(); - return Ok(Async::NotReady); - }, - }; - assert_eq!(cnt, n); - cnt += 1; - } - Ok::<_, ()>(Async::Ready(())) - }).wait().expect("rx wait"); - }); - - // tx thread - thread::spawn(move || { - let mut cnt = 0; - let nsent = poll_fn(move || { - loop { - while let Ok(()) = tx.try_send(cnt) { - cnt += 1; - } - match gv.poll_want() { - Ok(Async::Ready(_)) => (), - Ok(Async::NotReady) => return Ok::<_, ()>(Async::NotReady), - Err(_) => return Ok(Async::Ready(cnt)), - } - } - }).wait().expect("tx wait"); - - assert_eq!(nsent, nwants); - }).join().expect("thread join"); - } - } - */ -} diff -Nru s390-tools-2.31.0/scripts/dbginfo.sh s390-tools-2.33.1/scripts/dbginfo.sh --- s390-tools-2.31.0/scripts/dbginfo.sh 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/scripts/dbginfo.sh 2024-05-28 08:26:36.000000000 +0200 @@ -15,12 +15,12 @@ ######################################## # Global used variables readonly SCRIPTNAME="${0##*/}" # general name of this script -readonly STARTDIR="$(pwd)" # save calling directory +readonly FULLPATHSCRIPT="$(readlink -f "${0}")" # readonly DATETIME="$(date +%Y-%m-%d-%H-%M-%S 2>/dev/null)" readonly DOCKER=$(if type docker >/dev/null 2>&1; then echo "YES"; else echo "NO"; fi) readonly DUMP2TAR_OK=$(if type dump2tar >/dev/null 2>&1; then echo "YES"; else echo "NO"; fi) -readonly HW="$(uname -i 2>/dev/null)" +readonly HW="$(uname -m 2>/dev/null)" readonly IFS_ORI="${IFS}" # retrieve and split kernel version readonly KERNEL_BASE="$(uname -r 2>/dev/null)" @@ -1420,9 +1420,8 @@ create_package() { local rc_tar pr_syslog_stdout ${step_num} "Finalizing: Creating archive with collected data" - # get a copy of the script used - enabled for relative path calls - cd "${STARTDIR}" - cp -p "${BASH_SOURCE[0]}" "${WORKPATH}" + # get a copy of the script used + cp -p "${FULLPATHSCRIPT}" "${WORKPATH}" # create the archive cd "${WORKDIR_BASE}" touch "${WORKARCHIVE}" diff -Nru s390-tools-2.31.0/scripts/dumpconf s390-tools-2.33.1/scripts/dumpconf --- s390-tools-2.31.0/scripts/dumpconf 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/scripts/dumpconf 2024-05-28 08:26:36.000000000 +0200 @@ -224,7 +224,7 @@ { DEV="$(CheckDeviceString $DEVICE)" if [ "$DEV" != "" ]; then - echo $DEV > $1/$2/device + echo $DEV > $1/ccw/device else RETVAL=1 pr_error "ERROR: Invalid DEVICE '$DEVICE'." $ERRMSG @@ -232,11 +232,33 @@ fi } +setup_eckd_device() +{ + DEV="$(CheckDeviceString $DEVICE)" + if [ "$DEV" != "" ]; then + echo $DEV > $1/eckd/device + else + RETVAL=1 + pr_error "ERROR: Invalid DEVICE '$DEVICE'." $ERRMSG + return + fi + echo $BOOTPROG > $1/eckd/bootprog 2>/dev/null || RETVAL=1 + if [ $RETVAL -eq 1 ]; then + pr_error "ERROR: Invalid BOOTPROG '$BOOTPROG'." $ERRMSG + return + fi + echo $BR_CHR > $1/eckd/br_chr 2>/dev/null || RETVAL=1 + if [ $RETVAL -eq 1 ]; then + pr_error "ERROR: Invalid BR_CHR '$BR_CHR'." $ERRMSG + return + fi +} + setup_fcp_device() { DEV="$(CheckDeviceString $DEVICE)" if [ "$DEV" != "" ]; then - echo $DEV > $1/$2/device + echo $DEV > $1/fcp/device else RETVAL=1 pr_error "ERROR: Invalid DEVICE '$DEVICE'." $ERRMSG @@ -302,13 +324,13 @@ case "$REIPL_TYPE" in eckd) - setup_ccw_device $REIPL_CONFIG_DIR $REIPL_TYPE + setup_eckd_device $REIPL_CONFIG_DIR ;; ccw) - setup_ccw_device $REIPL_CONFIG_DIR $REIPL_TYPE + setup_ccw_device $REIPL_CONFIG_DIR ;; fcp) - setup_fcp_device $REIPL_CONFIG_DIR $REIPL_TYPE + setup_fcp_device $REIPL_CONFIG_DIR ;; nvme) setup_nvme_device $REIPL_CONFIG_DIR @@ -336,13 +358,13 @@ { case "$DUMP_TYPE" in eckd) - setup_ccw_device $DUMP_CONFIG_DIR $DUMP_TYPE + setup_eckd_device $DUMP_CONFIG_DIR ;; ccw) - setup_ccw_device $DUMP_CONFIG_DIR $DUMP_TYPE + setup_ccw_device $DUMP_CONFIG_DIR ;; fcp) - setup_fcp_device $DUMP_CONFIG_DIR $DUMP_TYPE + setup_fcp_device $DUMP_CONFIG_DIR ;; nvme) setup_nvme_device $DUMP_CONFIG_DIR diff -Nru s390-tools-2.31.0/tape390/tape390_crypt.8 s390-tools-2.33.1/tape390/tape390_crypt.8 --- s390-tools-2.31.0/tape390/tape390_crypt.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/tape390/tape390_crypt.8 2024-05-28 08:26:36.000000000 +0200 @@ -12,10 +12,10 @@ [OPTION] [DEVICE] .SH DESCRIPTION -.B tape390_crypt +.B tape390_crypt exploits encryption features available in zSeries tape devices. It can be used to enable or disable tape encryption and to set -KEK (Key Encrypting Key) labels. +KEK (Key Encrypting Key) labels. .SH OPTIONS .TP @@ -28,51 +28,51 @@ .TP .BR "\-q" " or " "\-\-query" -Print current encryption status of the specified tape device and of the +Print current encryption status of the specified tape device and of the loaded medium. -If encryption is on and the medium is encrypted , +If encryption is on and the medium is encrypted , additional information on the encryption keys is displayed. .TP -.BR "\-e " { "on" | "off" } " " or " \-\-encryption=" { "on" | "off" } +.BR "\-e " { "on" | "off" } " " or " \-\-encryption=" { "on" | "off" } sets tape encryption on or off. .TP .BR "\-k " [ "label" | "hash" ] " " or " \-\-key=" [ "label" | "hash" ] sets tape encryption keys. -.br +.br specifies the KEK (Key Encrypting Key), which can be maximal 64 characters long. .br The store type (either label or hash) specifies how the KEK in is to be stored on the tape medium. Since labels are -human readable strings and hence more user friendly than hashes, +human readable strings and hence more user friendly than hashes, the default store type is label. .br -The -k option can only be specified, if the tape medium is at load point. +The \-k option can only be specified, if the tape medium is at load point. .br -While processing the -k option, the tape medium is initialized and all -data eventually contained on the tape medium is lost. -To avoid inadvertent data loss a prompt message is issued asking the user -whether he or she really wants to proceed. +While processing the \-k option, the tape medium is initialized and all +data eventually contained on the tape medium is lost. +To avoid inadvertent data loss a prompt message is issued asking the user +whether he or she really wants to proceed. .br -The -k option can be specified maximal twice, because on the tape medium -maximal two EEDKs (External Encrypted Data Keys) can be stored. +The \-k option can be specified maximal twice, because on the tape medium +maximal two EEDKs (External Encrypted Data Keys) can be stored. If specified once, two identical EEDKs are stored. .br - is a character separating the KEK in from the store type + is a character separating the KEK in from the store type (either label or hash). This -delimiter can be specified with the -d option as explained below. +delimiter can be specified with the \-d option as explained below. .TP .BR "\-d " " or " "\-\-delimiter= specifies the character which separates the KEK in from the store type (either label or hash). The default delimiter is : (colon). -The \-d option can only be specified together with the \-k option. +The \-d option can only be specified together with the \-k option. .TP .BR "\-f " " or " "\-\-force -specifies that no prompt message is to be issued before writing the KEK +specifies that no prompt message is to be issued before writing the KEK information and initializing the tape medium. The \-f option can only be specified together with the \-k option. @@ -81,28 +81,28 @@ specifies the device node of the tape device. .SH EXAMPLES -1. Scenario: +1. Scenario: .br -mount non-encrypted tape and write data with the -.B "default" +mount non-encrypted tape and write data with the +.B "default" KEKs: .br -tape390_crypt -e on /dev/ntibm0 +tape390_crypt \-e on /dev/ntibm0 .br tar cfz /dev/ntibm0 /data -2. Scenario: +2. Scenario: .br -mount non-encrypted tape and write data with +mount non-encrypted tape and write data with .B "specific" KEKs: .br -tape390_crypt -k my_first_key -k my_second_key:hash /dev/ntibm0 +tape390_crypt \-k my_first_key \-k my_second_key:hash /dev/ntibm0 .br tar cfz /dev/ntibm0 /data -3. Scenario: +3. Scenario: .br mount tape and display current encryption status: .br -tape390_crypt -q /dev/ntibm0 +tape390_crypt \-q /dev/ntibm0 diff -Nru s390-tools-2.31.0/tape390/tape390_display.8 s390-tools-2.33.1/tape390/tape390_display.8 --- s390-tools-2.31.0/tape390/tape390_display.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/tape390/tape390_display.8 2024-05-28 08:26:36.000000000 +0200 @@ -11,17 +11,17 @@ .SH SYNOPSIS .TP 16 .B tape390_display -.RB [ -b | --blink ] +.RB [ \-b | \-\-blink ] .br -.RB [ -h | --help ] +.RB [ \-h | \-\-help ] .br -.RB [ -l | --load ] +.RB [ \-l | \-\-load ] .br -.RB [ -t | --type ] +.RB [ \-t | \-\-type ] .br -.RB [ -q | --quiet ] +.RB [ \-q | \-\-quiet ] .br -.RB [ -v | --version ] +.RB [ \-v | \-\-version ] .br .IR message1 .RB [ @@ -42,28 +42,28 @@ .SH OPTIONS .TP 8 -\fB-h\fR or \fB--help\fR +\fB\-h\fR or \fB\-\-help\fR Print help text. .TP -\fB-b\fR or \fB--blink\fR +\fB\-b\fR or \fB\-\-blink\fR Display \fImessage1\fR repeatedly for 2 seconds at half-second intervals. This option is ignored if two messages are specified on command line. .TP -\fB-l\fR or \fB--load\fR +\fB\-l\fR or \fB\-\-load\fR If this option is given the display request will cause a load request to be sent to the automatic tape loader (if installed). The next indexed tape will then be loaded if the loader is in 'system' mode. Otherwise the request is ignored. .TP -\fB-q\fR or \fB--quiet\fR +\fB\-q\fR or \fB\-\-quiet\fR This option causes the command to run in quiet mode which will suppress all warning messages. .TP -\fB-t\fR \fItype\fR or \fB--type\fR \fItype\fR +\fB\-t\fR \fItype\fR or \fB\-\-type\fR \fItype\fR Set the type of the message to be displayed. This influences how long the message(s) stay on the display. Available types are: @@ -94,7 +94,7 @@ .RE .TP -\fB-v\fR or \fB--version\fR +\fB\-v\fR or \fB\-\-version\fR Print the S/390 tools package version, the utility version and exit. .TP 8 @@ -105,7 +105,7 @@ .br If two messages are specified, they are alternated on the display. Each message is then displayed repeatedly for 2 seconds -at half-second intervals. +at half-second intervals. .TP \fBdevice\fR @@ -115,7 +115,7 @@ .br .SH EXAMPLES -\fBtape390_display --blink --type unload "TESTING" /dev/ntibm\fR\fI0\fR +\fBtape390_display \-\-blink \-\-type unload "TESTING" /dev/ntibm\fR\fI0\fR .RS Let the message TESTING blink on the display of the tape unit until the tape is removed (or another message is given). If there is no tape in the @@ -127,13 +127,13 @@ until the tape is moved, ejected or (if there isn't a tape in the device) inserted. .RE -\fBtape390_display --type reload "UNLOAD" "LOAD" /dev/ntibm\fR\fI0\fR +\fBtape390_display \-\-type reload "UNLOAD" "LOAD" /dev/ntibm\fR\fI0\fR .RS The UNLOAD message will be displayed if there is a tape inserted and as long as the tape isn't removed. After that the LOAD message is displayed until a tape is inserted. If there is no tape inserted when the command .RE -\fBtape390_display --load --type load "LOADING" /dev/ntibm\fR\fI0\fR +\fBtape390_display \-\-load \-\-type load "LOADING" /dev/ntibm\fR\fI0\fR .RS Causes the device to display the message LOADING until a tape is inserted and to instruct the automatic stacker to load the next tape (if the mode diff -Nru s390-tools-2.31.0/tunedasd/man/tunedasd.8 s390-tools-2.33.1/tunedasd/man/tunedasd.8 --- s390-tools-2.31.0/tunedasd/man/tunedasd.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/tunedasd/man/tunedasd.8 2024-05-28 08:26:36.000000000 +0200 @@ -15,28 +15,28 @@ is a tool to adjust tunable parameters on DASD devices. It can be used to modify the status of the device and to adjust some performance relevant settings. -The DEVICE is the node of the device (e.g. '/dev/dasda') or a list of +The DEVICE is the node of the device (e.g. '/dev/dasda') or a list of devices separated by a space character. Following functions are supported: .IP " -" -Get and reset the performance statistics profile per device. +Get and reset the performance statistics profile per device. .IP " -" Get and set the caching mode of the storage server. -This is only available for ECKD devices, because it is implemented using +This is only available for ECKD devices, because it is implemented using ECKD Channel Program commands. .IP " -" Reserve/Release/Unconditional Reserve a device. -This is only available for ECKD devices, because it is based on +This is only available for ECKD devices, because it is based on the Control Unit functions. .P -If +If .B tunedasd is called without parameter, it prints a short usage information and exits. .SH OPTIONS .TP .BR "\-v" " or " "\-\-version" Print version information, then exit. -.TP +.TP .SH COMMANDS .TP .BR "\-h" " or " "\-\-help" @@ -44,48 +44,48 @@ .TP .BR "\-g" " or " "\-\-get_cache" Get current storage server caching behavior. -.TP +.TP .BR "\-c" " or " "\-\-cache" " " Define caching behavior on storage server. -The following caching behaviors are currently supported by modern +The following caching behaviors are currently supported by modern Enterprise Storage Servers (ESS): .br -.IP " \(bu" 12 -.I normal +.IP " \(bu" 12 +.I normal ('Normal Cache replacement'), -.IP " \(bu" 12 -.I bypass +.IP " \(bu" 12 +.I bypass (Bypass Cache), -.IP " \(bu" 12 +.IP " \(bu" 12 .I inhibit (Inhibit Cache Loading), -.IP " \(bu" 12 +.IP " \(bu" 12 .I sequential (Sequential Access), -.IP " \(bu" 12 -.I prestage +.IP " \(bu" 12 +.I prestage (Sequential Prestage), -.IP " \(bu" 12 +.IP " \(bu" 12 .I record (Record Access) .IP "" 7 More details about caching can be found in the 'Storage Control Reference' of the attached storage server. -.TP +.TP .BR "\-n" " or " "\-\-no_cyl" " " Number of cylinders to be cached (only valid together with --cache). -.TP +.TP .BR "\-S" " or " "\-\-reserve" Reserve device. -.TP +.TP .BR "\-L" " or " "\-\-release" Release device. -.TP +.TP .BR "\-O" " or " "\-\-slock" -Unconditional reserve device. +Unconditional reserve device. .br -.B Note: +.B Note: Use with care, this breaks an existing reserve. -.TP +.TP .BR "\-\-enable-stats" Enable performance statistics globally. .br @@ -99,10 +99,10 @@ .BR "\-P" " or " "\-\-profile" Print profile info of device. .br -.B Note: +.B Note: Profile info must be available and enabled in the kernel to get valid results -out of the profile commands. Enable statistics via \fB--enable-stats\fR. -.TP +out of the profile commands. Enable statistics via \fB\-\-enable\-stats\fR. +.TP .BR "\-Q" " or " "\-\-query_reserve" Query the current reserve status of the device. The following states are defined: @@ -129,37 +129,37 @@ .br Following rows are supported: .br -.IP " \(bu" 12 -.I reqs +.IP " \(bu" 12 +.I reqs (# of dasd I/O requests), -.IP " \(bu" 12 -.I sects +.IP " \(bu" 12 +.I sects (# of 512Byte sectors), -.IP " \(bu" 12 -.I sizes +.IP " \(bu" 12 +.I sizes (Histogram of sizes), -.IP " \(bu" 12 -.I total +.IP " \(bu" 12 +.I total (Histogram of I/O times), -.IP " \(bu" 12 -.I totsect +.IP " \(bu" 12 +.I totsect (Histogram of I/O times per sector), -.IP " \(bu" 12 -.I start +.IP " \(bu" 12 +.I start (Histogram of I/O time till ssch), -.IP " \(bu" 12 -.I irq +.IP " \(bu" 12 +.I irq (istogram of I/O time between ssch and irq), -.IP " \(bu" 12 -.I irqsect +.IP " \(bu" 12 +.I irqsect (istogram of I/O time between ssch and irq per sector), -.IP " \(bu" 12 -.I end +.IP " \(bu" 12 +.I end (Histogram of I/O time between irq and end), -.IP " \(bu" 12 -.I queue +.IP " \(bu" 12 +.I queue (# of req in chanq at enqueuing) -.TP +.TP .BR "\-R" " or " "\-\-reset_prof" Reset profile info of device. .TP @@ -185,34 +185,34 @@ .\" .\".TP .\".BR "\-o" " or " "\-\-online" -.\"Set the device online using the sysfs interface. +.\"Set the device online using the sysfs interface. .SH EXAMPLE -1. Scenario: Get profile info for a device. +1. Scenario: Get profile info for a device. .br - tunedasd -P /dev/dasdc - tunedasd -PI irq /dev/dasdc + tunedasd \-P /dev/dasdc + tunedasd \-PI irq /dev/dasdc -.br +.br 2. Scenario: Set device caching mode to 1 cylinder 'prestage'. .br - tunedasd -c prestage -n 1 /dev/dasdc + tunedasd \-c prestage \-n 1 /dev/dasdc .br 3. Scenario: Reset failed channel path with CHPID 45 .br - tunedasd -p 45 /dev/dasdc + tunedasd \-p 45 /dev/dasdc .br 4. Scenario: Swap copy pair 0.0.9700 and 0.0.9740 .br - tunedasd -s 0.0.9700,0.0.9740 /dev/dasdc + tunedasd \-s 0.0.9700,0.0.9740 /dev/dasdc .br .SH "SEE ALSO" -.BR dasdview (8), -.BR dasdfmt (8), +.BR dasdview (8), +.BR dasdfmt (8), .BR fdasd (8) diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/chpstat.8 s390-tools-2.33.1/zconf/chp/chpstat/chpstat.8 --- s390-tools-2.31.0/zconf/chp/chpstat/chpstat.8 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/chpstat.8 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,739 @@ +.\" Copyright 2024 IBM Corp. +.\" s390-tools is free software; you can redistribute it and/or modify +.\" it under the terms of the MIT license. See LICENSE for details. +.TH chpstat 8 "" s390-tools chpstat + + +.SH NAME +chpstat - Display channel-path statistics + + +.SH SYNOPSIS +.B chpstat +.RI [ OPTIONS ] +.RI [ ACTIONS ] +.RI [ CHPIDS ] + + +.SH DESCRIPTION +Use +.B chpstat +to view channel-path statistics such as utilization and throughput, +and to query and control the status of the channel-path statistics function. +.br + +When run without further options, data for all channel-paths is displayed +repeatedly with a 5 second delay in table format. You can limit output to +specific channel-paths by listing the associated CHPIDs on the command line. +.br + +Note: Channel-path statistics are only available on systems running in an +LPAR or DPM partition. +.br + +.SS "Output options" + +Without options, +.B chpstat +displays performance statistics data in table format. Use options +.BR \-\-columns ", " \-\-all " and " \-\-cmg +to change the list of columns to display. +.br + +You can use option +.B \-\-format +to select a machine-readable list output format (JSON, key-value pairs, or +comma-separated values), and option +.B \-\-keys +to restrict output to only a specific set of data keys. +.br + +Additional options can be used to display the raw source data used for +calculating channel-path statistics: +.br + +.IP \(bu 3 +.B Characteristics: +Static data describing base characteristics of a channel-path. +.br + +Use option +.B \-\-chars +to view characteristics data. +.PP +.IP \(bu 3 +.B Utilization: +Raw data about a channel-paths current utilization. This data is updated +regularly at model-dependent intervals that typically last a few seconds +(see key "interval"). + +Use option +.B \-\-util +to view raw utilization data. +.PP +.IP \(bu 3 +.B Metrics: +Performance statistics derived from utilization and characteristics data. +.br + +Note: Metrics data is calculated as averages over all utilization update +intervals that fall within the selected chpstat update interval. +.br + +Use option +.B \-\-metrics +to view metrics data. +.PP + +.SS Authorization + +A special authorization setting needs to be enabled for a system to be able +to access channel-path statistics data. + +.B Classic mode LPAR + +.IP 1. 3 +Logon on to the Hardware Management Console (HMC) +.PP +.IP 2. 3 +Select the target LPAR +.PP +.IP 3. 3 +Start "Customize Activation Profiles" HMC Task +.PP +.IP 4. 3 +Enable "Security/Global performance data control" setting +.PP + +Note: The LPAR needs to be deactivated/re-activated for the change to +become effective. + +.B DPM Partition +.IP 1. 3 +Logon on to the Hardware Management Console (HMC) +.PP +.IP 2. 3 +Select the target partition +.PP +.IP 3. 3 +Start "Partition Details" HMC Task +.PP +.IP 4. 3 +Enable "Controls/Access global performance data" setting +.PP +.IP 5. 3 +Press "Apply" - the change will be active immediately +.PP + +.SS "Channel-Measurement Groups" + +A Channel-Measurement Group (CMG) is a number associated with each channel-path +that determines the type of available statistics data for that channel-path. + + +.SH ACTIONS +.BR \-s ", " \-\-status +.br +.RS +Show channel-path statistics status + +Possible status values are: +.IP \(bu 3 +.B enabled: +Statistics facility is active +.PP +.IP \(bu 3 +.B disabled: +Statistics facility is inactive +.PP +.IP \(bu 3 +.B unsupported: +Statistics facility is not supported. +.br + +Note that channel-path statistics are only available when running in LPAR or +DPM partition. +.PP +.RE + +.BR \-e ", " \-\-enable +.br +.RS +Enable channel-path statistics +.br + +After booting Linux, the channel-path statistics facility starts in disabled +state. Use option +.B \-\-enable +to enable it. +.RE + +.BR \-d ", " \-\-disable +.br +.RS +Disable channel-path statistics +.RE + +.BR \-l ", " \-\-list\-columns +.br +.RS +List available table columns +.br + +Use this option to get a list of available table column names and associated +short description. A comma-separated list of column names can be used with +option +.B \-\-columns +to select the columns to display in table output format. +.RE + +.BR \-L ", " \-\-list\-keys +.br +.RS +List available data keys +.br + +Use this option to get a list of available keys that can be used with option +.B \-\-keys +to select data pairs to display in machine-readable output format. +.RE + +.BR \-h ", " \-\-help +.br +.RS +Print usage information, then exit +.RE + +.BR \-v ", " \-\-version +.br +.RS +Print version information, then exit +.RE + + +.SH OPTIONS +.BR \-n ", " \-\-iterations +.I NUM +.br +.RS +Display NUM reports before ending +.br + +By default, +.B chpstat +shows output repeatedly until interrupted. Use option +.B \-\-iterations +to specify how many times output should be updated before exiting. A value of +0 indicates an unlimited number of iterations. +.RE + +.BR \-i ", " \-\-interval +.I NUM +.br +.RS +Pause NUM seconds between display +.br + +Use this option to specify the number of seconds to wait between output updates. +Valid values are between 1 and 2140. + +Note: It is recommended to use interval values of at least the model-dependent +statistics update interval (see key "interval"). +.br +.RE + +.BR \-c ", " \-\-columns +.IR COL ,.. +.br +.RS +Select table columns to show in table output format +.br + +To get a list of available columns, use option +.BR \-\-list\-columns . +If a channel-path does not provide data for a selected column, the +corresponding table field is set to '\-'. +.RE + +.BR \-k ", " \-\-keys +.IR KEY ,.. +.br +.RS +Select keys to show in machine-readable output format +.br + +Use this option to select the data to show in machine-readable output +format. To get a list of available keys, use option +.BR \-\-list\-keys . +If a channel-path does not provide data for a selected key, the +corresponding value is set to "". +.RE + +.BR \-a ", " \-\-all +.br +.RS +Show all table columns and key data +.br + +Use this option to select all supported columns and keys for output. +.RE + +.BR \-\-scale +.I UNIT +.br +.RS +Scale BPS values by UNIT + +Use this option to specify a value by which bytes-per-seconds (BPS) values - +such as read and write throughput - are scaled in table output format. Accepted +values are: + +.IP \(bu 3 +.B auto: +Scale automatically to fit value into each column. This is the default. +.PP +.IP \(bu 3 +.IR number : +Scale by +.I number +.PP +.IP \(bu 3 +.B K: +Scale by 1024 (KiB) +.PP +.IP \(bu 3 +.B M: +Scale by 1,048,576 (MiB) +.PP +.IP \(bu 3 +.B G: +Scale by 1,073,741,824 (GiB) +.PP +.IP \(bu 3 +.B T: +Scale by 1,099,511,627,776 (TiB) +.PP +.RE + +.BR \-\-cmg +.IR CMG ,.. +.br +.RS +Show data for specified CMGs only +.br + +Use this option to limit output to CHPIDs with the specified +Channel-Measurement-Groups (CMG). This option also selects table columns +suitable for the specified CMGs. +.RE + +.BR \-\-format +.I FORMAT +.br +.RS +Show data in specified FORMAT + +Use this option to show output in a machine-readable format. +.I FORMAT +can be either of: + +.IP \(bu 3 +.B json: +Single JavaScript Object Notation (JSON) data structure + +Data for all iterations is formatted as one JSON data structure formatted +in multiple lines to make them more readable by humans. +.br + +See section "OUTPUT FORMAT" for more details. +.br +.PP +.IP \(bu 3 +.B json\-seq: +Sequence of JSON data structures + +Data for each iteration is formatted as separate JSON data structure prefixed +with an ASCII Record Separator character (0x1e) and suffixed with an ASCII Line +Feed character (0x0a) in accordance with RFC7464. +.br + +See section "OUTPUT FORMAT" for more details. +.br +.PP +.IP \(bu 3 +.B pairs: +Textual key=value pairs + +By default, keys have a prefix that makes them unique across one tool +invocation. This prefix can be removed by specifying option +.BR \-\-no\-prefix . +.PP +.IP \(bu 3 +.B csv: +Comma-Separated-Value (CSV) list +.br + +All values are quoted with double-quotes and separated by commas. The first +line of output contains a list of headings. Subsequent lines each represent +data for one CHPID in one iteration. +.PP +.RE + +.BR \-\-chars +.br +.RS +List channel-path measurement characteristics + +Use this option to display static data describing base characteristics of a +channel-path. This option implies a machine-readable format. +.RE + +.BR \-\-util +.br +.RS +List unprocessed utilization data + +Use this option to display raw channel-path utilization data that is updated +regularly by firmware at model-dependent intervals that typically last a few +seconds (see key "interval"). This option implies machine-readable output +format. +.RE + +.BR \-\-metrics +.br +.RS +List performance metrics +.br + +Use this option to display performance statistics data derived from utilization +and characteristics data. This option implies machine-readable output format. +.br + +Note: Metrics data is calculated as averages over all utilization update +intervals that fall within the selected chpstat update interval. +.RE + +.BR \-\-no\-ansi +.br +.RS +Do not use ANSI terminal codes in output +.br + +When specified, this option suppresses the use of ANSI terminal control +characters in table output format. Such characters are used to clear the +screen, and to invert the colors for table heading display. Use this option +when an output terminal does not support these control characters. +.RE + +.BR \-\-no\-prefix +.br +.RS +Hide key prefix in pairs output format + +By default, keys that are shown in the "pairs" machine-readable output format +have a prefix that makes them unique across a tool invocation. Use option +.B \-\-no\-prefix +to remove this prefix. +.RE + + +.SH "OUTPUT FORMAT" +This section contains additional information for some of the supported +output formats. + +.SS json + +JSON output consists of a top-level object with the following properties +(key-value pairs): + +.IP \(bu 3 +.BR meta : +Tool meta-data including API level, version, host name, and time of invocation +.PP +.IP \(bu 3 +.BR chpstat : +Channel-path statistics data +.PP + +Note: For a given API level, the output format is guaranteed to remain +compatible, that is: + +.IP \(bu 3 +required child-objects are not removed +.PP +.IP \(bu 3 +format and contents of existing objects and properties are retained +.PP +.IP \(bu 3 +new child-objects and properties may be added +.PP + +Channel-path statistics data is stored as an array of iteration objects under +the "chpstat" property in the top-level object. + +Each iteration object contains a property named "channel_paths" the value of +which consists of an array of objects representing data for one channel-path +during one iteration. Objects for a single channel-path contain further +child-objects that group related properties together. +.br + +The following object properties are required and will always be part of +JSON output: + +.IP \(bu 3 +For iteration objects: "iteration", "time", "time_epoch", and "channel_paths" +.br +.PP + +.IP \(bu 3 +For channel-path objects: "chpid", "type", "cmg", "shared" +.br +.PP + +All other properties are optional and will be omitted from JSON output if the +associated value is unavailable. If option +.BR --all +is specified, unavailable properties are also listed as either empty strings or +negative values, depending on the value type. + +Example JSON output for single iteration and channel-path with all properties: +.br + +{ +.br + "meta": { +.br + "api_level": 1, +.br + "version": "2.32.0", +.br + "host": "localhost", +.br + "time_epoch": 1714663282, +.br + "time": "2024-05-02 17:21:22+0200" +.br + }, +.br + "chpstat": [ +.br + { +.br + "iteration": 0, +.br + "time_epoch": 1714663282, +.br + "time": "2024-05-02 17:21:22+0200", +.br + "channel_paths": [ +.br + { +.br + "chpid": "0.00", +.br + "type": 0, +.br + "cmg": 0, +.br + "shared": 0, +.br + "speed": "", +.br + "characteristics": { +.br + "max_bus_cycles": 0, +.br + "max_channel_work_units": 0, +.br + "max_write_data_units": 0, +.br + "max_read_data_units": 0, +.br + "data_unit_size": 0, +.br + "data_unit_size_cpc": 0, +.br + "msg_unit_size": 0, +.br + "msg_unit_size_cpc": 0, +.br + }, +.br + "utilization": { +.br + "timestamp": 0, +.br + "bus_cycles_cpc": 0, +.br + "channel_work_units_cpc": 0, +.br + "channel_work_units": 0, +.br + "data_units_written_cpc": 0, +.br + "data_units_written": 0, +.br + "data_units_read_cpc": 0, +.br + "data_units_read": 0, +.br + "total_ficon_ops_cpc": 0, +.br + "total_deferred_ficon_ops_cpc": 0, +.br + "sum_ficon_ops_cpc": 0, +.br + "total_hpf_ops_cpc": 0, +.br + "total_deferred_hpf_ops_cpc": 0, +.br + "sum_hpf_ops_cpc": 0, +.br + "channel_path_busy_time_cpc": 0, +.br + "channel_path_busy_time": 0, +.br + "msg_units_sent": 0, +.br + "msg_units_sent_cpc": 0, +.br + "unsuccessful_attempts_to_send": 0, +.br + "unavailable_receive_buffers": 0, +.br + "unavailable_receive_buffers_cpc": 0, +.br + "data_units_sent": 0, +.br + "data_units_sent_cpc": 0, +.br + }, +.br + "metrics": { +.br + "interval": 0.0, +.br + "util_total": 0.0, +.br + "util_part": 0.0, +.br + "util_bus": 0.0, +.br + "read_total": 0.0, +.br + "read_part": 0.0, +.br + "write_total": 0.0, +.br + "write_part": 0.0, +.br + "ficon_rate": 0.0, +.br + "ficon_active": 0.0, +.br + "ficon_defer": 0.0, +.br + "hpf_rate": 0.0, +.br + "hpf_active": 0.0, +.br + "hpf_defer": 0.0, +.br + "msg_rate_part": 0.0, +.br + "msg_rate_total": 0.0, +.br + "msg_size_part": 0.0, +.br + "msg_size_total": 0.0, +.br + "send_fail_part": 0.0, +.br + "rcv_fail_part": 0.0, +.br + "rcv_fail_total": 0.0, +.br + } +.br + } +.br + ] +.br + } +.br + ] +.br +} +.br + + +.SS json\-seq + +The json\-seq output format is a variation of the JSON output format described +above with the following differences: + +.IP \(bu 3 +Output consists of a sequence of top-level JSON objects, each contained in +single line with no indentation +.br + +.IP \(bu 3 +Each top-level object is prefixed by an ASCII Record Separator character (0x1e) +and suffixed with an ASCII Line Feed character (0x0a) in accordance with +RFC7464 +.br +.PP + +.IP \(bu 3 +The first object contains tool meta-data properties defined in the previous +section +.br +.PP + +.IP \(bu 3 +Subsequent objects each represent channel-path statistics data for one iteration +.br +.PP + + +.SH "EXIT CODES" +.TP +.B 0 +Program finished successfully +.PP +.TP +.B 1 +Usage error +.PP +.TP +.B 2 +A run\-time error occurred +.PP + + +.SH EXAMPLES +Display current channel-path statistics status in JSON format: +.RS 4 +$ +.B chpstat \-\-status \-\-format json +.br +.RE +.PP +. +Determine the model-dependent update interval for CHPID 0.f0: +.RS 4 +$ +.B chpstat \-\-keys interval \-n 1 \-\-no\-prefix 0.f0 \-\-format pairs +.br +.RE +.PP +. +Collect partition write throughput statistics for 1 hour in CSV format: +.RS 4 +$ +.B chpstat \-n 60 \-i 60 \-\-format csv \-\-key time,chpid,write_part +.br + + +.SH "SEE ALSO" +.BR lschp "(8), " chchp (8) diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/chpstat.c s390-tools-2.33.1/zconf/chp/chpstat/chpstat.c --- s390-tools-2.31.0/zconf/chp/chpstat/chpstat.c 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/chpstat.c 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,2153 @@ +/* + * chpstat - Tool to display channel-path statistics + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cmg.h" +#include "column.h" +#include "key.h" +#include "misc.h" + +#include "lib/util_base.h" +#include "lib/util_file.h" +#include "lib/util_fmt.h" +#include "lib/util_libc.h" +#include "lib/util_opt.h" +#include "lib/util_path.h" +#include "lib/util_prg.h" +#include "lib/util_rec.h" +#include "lib/zt_common.h" + +/* Output format API level. */ +#define API_LEVEL 1 + +/* Default interval in seconds between output. */ +#define DEFAULT_INTERVAL 5 + +/* Maximum interval in seconds after which timestamps wrap twice (approx.). */ +#define MAX_INTERVAL 2140 + +/* Maximum number of CHPIDS. */ +#define NUM_CHPIDS 256 + +/* Highest valid CHPID number. */ +#define MAX_CHPID 255 + +/* Sysfs paths. */ +#define SYS_CSS0 "devices/css0" +#define SYS_CM_ENABLE SYS_CSS0 "/cm_enable" + +/* ANSI X3.64 terminal control codes. */ +#define ANSI_CLS "\e[2J" +#define ANSI_LOCATE(x, y) "\e[" STRINGIFY(x) ";" STRINGIFY(y) "H" +#define ANSI_BOLD "\e[1m" +#define ANSI_REVERSE "\e[7m" +#define ANSI_RESET "\e[0m" + +#define UNIT_DEC 1000UL +#define UNIT_BIN 1024UL +#define UNIT_AUTO 0 + +#define strlen_i(a) ((int)strlen(a)) +#define strlen_u(a) ((unsigned int)strlen(a)) + +/* Channel-path measurement facility status values. */ +enum cm_status_t { + CM_UNSUPPORTED, + CM_DISABLED, + CM_ENABLED, +}; + +/* Program information. */ +static const struct util_prg prg = { + .desc = "Use chpstat to view channel-path statistics such as " + "utilization and throughput, and to query and control the " + "status of the channel-path statistics function.\n" + "\n" + "When run without further options, data for all channel-paths " + "is displayed repeatedly with a " STRINGIFY(DEFAULT_INTERVAL) + " second delay in table format. You can limit output to " + "specific channel-paths by listing the associated CHPIDs on " + "the command line.", + .copyright_vec = { + { + .owner = "IBM Corp.", + .pub_first = 2024, + .pub_last = 2024, + }, + UTIL_PRG_COPYRIGHT_END + }, + .args = "[CHPIDS] [ACTIONS]", +}; + +enum { + OPT_STATUS = 's', + OPT_ENABLE = 'e', + OPT_DISABLE = 'd', + OPT_LIST_COLUMNS = 'l', + OPT_LIST_KEYS = 'L', + OPT_ITERATIONS = 'n', + OPT_INTERVAL = 'i', + OPT_COLUMNS = 'c', + OPT_KEYS = 'k', + OPT_ALL = 'a', + /* Options without short version below. */ + OPT_FORMAT = 0x80, /* First non-printable character. */ + OPT_CHARS, + OPT_UTIL, + OPT_METRICS, + OPT_CMG, + OPT_SCALE, + OPT_NO_ANSI, + OPT_NO_PREFIX, + OPT_DEBUG, +}; + +enum command_t { + CMD_ENABLE = OPT_ENABLE, + CMD_DISABLE = OPT_DISABLE, + CMD_STATUS = OPT_STATUS, + CMD_LIST_COLUMNS = OPT_LIST_COLUMNS, + CMD_LIST_KEYS = OPT_LIST_KEYS, + CMD_TABLE, + CMD_LIST, +}; + +/* + * Configuration of command line options + */ +static struct util_opt opt_vec[] = { + { + .desc = "ACTIONS", + .flags = UTIL_OPT_FLAG_SECTION, + }, + { + .option = { "status", no_argument, NULL, OPT_STATUS }, + .desc = "Show channel-path statistics status", + }, + { + .option = { "enable", no_argument, NULL, OPT_ENABLE }, + .desc = "Enable channel-path statistics", + }, + { + .option = { "disable", no_argument, NULL, OPT_DISABLE }, + .desc = "Disable channel-path statistics", + }, + { + .option = { "list-columns", no_argument, NULL, + OPT_LIST_COLUMNS}, + .desc = "List available table columns", + }, + { + .option = { "list-keys", no_argument, NULL, OPT_LIST_KEYS}, + .desc = "List available data keys", + }, + UTIL_OPT_HELP, + UTIL_OPT_VERSION, + { + .desc = "OPTIONS", + .flags = UTIL_OPT_FLAG_SECTION, + }, + { + .option = { "iterations", required_argument, NULL, + OPT_ITERATIONS }, + .argument = "NUM", + .desc = "Display NUM reports before ending (0 for no end)", + }, + { + .option = { "interval", required_argument, NULL, + OPT_INTERVAL }, + .argument = "NUM", + .desc = "Pause NUM seconds between display", + }, + { + .option = { "columns", required_argument, NULL, OPT_COLUMNS }, + .argument = "COL,..", + .desc = "Show only specified columns in table output", + }, + { + .option = { "keys", required_argument, NULL, OPT_KEYS }, + .argument = "KEY,..", + .desc = "Show only data for specified keys in list output", + }, + { + .option = { "all", no_argument, NULL, OPT_ALL }, + .desc = "Show all table columns and key data", + }, + { + .option = { "scale", required_argument, NULL, OPT_SCALE }, + .argument = "UNIT", + .flags = UTIL_OPT_FLAG_NOSHORT, + .desc = "Scale BPS values by UNIT (number, suffix or auto)", + }, + { + .option = { "cmg", required_argument, NULL, OPT_CMG }, + .argument = "CMG,..", + .flags = UTIL_OPT_FLAG_NOSHORT, + .desc = "Show data for specified CMGs only", + }, + { + .option = { "format", required_argument, NULL, OPT_FORMAT }, + .argument = "FORMAT", + .flags = UTIL_OPT_FLAG_NOSHORT, + .desc = "List data in specified FORMAT (" FMT_TYPE_NAMES ")", + }, + { + .option = { "chars", no_argument, NULL, OPT_CHARS }, + .flags = UTIL_OPT_FLAG_NOSHORT, + .desc = "List channel-path measurement characteristics", + }, + { + .option = { "util", no_argument, NULL, OPT_UTIL }, + .flags = UTIL_OPT_FLAG_NOSHORT, + .desc = "List unprocessed utilization data", + }, + { + .option = { "metrics", no_argument, NULL, OPT_METRICS }, + .flags = UTIL_OPT_FLAG_NOSHORT, + .desc = "List performance metrics", + }, + { + .option = { "no-ansi", no_argument, NULL, OPT_NO_ANSI }, + .flags = UTIL_OPT_FLAG_NOSHORT, + .desc = "Do not use ANSI terminal codes in output", + }, + { + .option = { "no-prefix", no_argument, NULL, OPT_NO_PREFIX }, + .flags = UTIL_OPT_FLAG_NOSHORT, + .desc = "Hide key prefix in pairs output format", + }, + { + .option = { "debug", no_argument, NULL, OPT_DEBUG }, + .flags = UTIL_OPT_FLAG_NOSHORT, + .desc = "Print debugging information", + }, + UTIL_OPT_END +}; + +/* Settings from command line options. */ +static struct { + bool cmd_specified; + enum command_t cmd; + int iterations; + bool forever; + int interval; + int groups; + bool groups_specified; + bool all; + bool use_ansi; + bool use_prefix; + bool debug; + enum util_fmt_t fmt; + bool fmt_specified; + bool cmgs_specified; + bool columns_specified; + bool keys_specified; + unsigned long unit; + char unit_suffix; + bool unit_specified; +} opts; + +/* Per CHPID run-time data. */ +static struct chpid_data_t { + int id; + bool selected; + struct cmg_t *cmg; + int type; + int shared; + char *speed; + struct cmg_data_t data; +} chpid_data[NUM_CHPIDS]; + +#define chpid_for_each(c) \ + for (int __i = 0; __i < NUM_CHPIDS && ((c) = &chpid_data[__i]); __i++) + +/* + * Buffer should be large enough to contain a full table of output columns for + * up to 256 CHPIDs. + */ +#define OUTPUT_BUFFER_SIZE 131072 + +/* + * Make stdout fully buffered to enable writing a full table to terminal in a + * single flush write. This helps reduce visual flicker on output terminals. + */ +static void buffer_stdout(bool on) +{ + static void *buffer; + + if (on) { + free(buffer); + buffer = util_zalloc(OUTPUT_BUFFER_SIZE); + setvbuf(stdout, buffer, _IOFBF, OUTPUT_BUFFER_SIZE); + } else { + setlinebuf(stdout); + free(buffer); + buffer = NULL; + } +} + +/* + * Emit ANSI terminal control @codes depending on the global use_ansi option. + */ +static void ansi(const char *codes) +{ + if (opts.use_ansi) + printf("%s", codes); +} + +/* + * Initialize default settings. + */ +static void init_opts(void) +{ + opts.cmd = CMD_TABLE; + opts.cmd_specified = false; + opts.iterations = -1; + opts.forever = true; + opts.interval = DEFAULT_INTERVAL; + opts.groups = 0; + opts.groups_specified = false; + opts.all = false; + opts.use_ansi = isatty(STDOUT_FILENO); + opts.use_prefix = true; + opts.fmt = FMT_JSON; + opts.fmt_specified = false; + opts.cmgs_specified = false; + opts.unit = UNIT_AUTO; + opts.unit_suffix = 0; + opts.unit_specified = false; +} + +/* + * Initialize chpid data array. + */ +static void init_chpid_data(void) +{ + int chpid; + + for (chpid = 0; chpid < NUM_CHPIDS; chpid++) { + memset(&chpid_data[chpid], 0, sizeof(chpid_data[chpid])); + chpid_data[chpid].id = chpid; + } +} + +/* + * Release memory used by chpid data array. + */ +static void free_chpid_data(void) +{ + int chpid; + + for (chpid = 0; chpid < NUM_CHPIDS; chpid++) + free(chpid_data[chpid].speed); +} + +/* + * Parse a command line argument @arg that was specified for command line + * option @name as an integer and check if the result is between @min and @max. + * + * Return the resulting integer on success. On failure, exit with an + * appropriate error message. + */ +static int parse_int(const char *name, const char *arg, long min, long max) +{ + char *endptr; + long v; + + v = strtol(arg, &endptr, 10); + if (*endptr || !*arg) { + errx(EXIT_USAGE, "Value for option --%s is invalid: %s", + name, arg); + } + if (v < min) { + errx(EXIT_USAGE, "Value %s for option --%s is too small " + "(min %ld)", arg, name, min); + } + if (v > max) { + errx(EXIT_USAGE, "Value %s for option --%s is too large " + "(max %ld)", arg, name, max); + } + return (int)v; +} + +/* + * Parse a command line argument @arg as CHPID. Return the CHPID on success. + * On failure, exit with an error message. + */ +static int parse_chpid(const char *arg) +{ + int id; + char c; + + if ((sscanf(arg, "%x %c", &id, &c) == 1 || + sscanf(arg, "0.%x %c", &id, &c) == 1) && id <= MAX_CHPID) + return id; + + errx(EXIT_USAGE, "Invalid CHPID '%s'", arg); +} + +/* + * Parse the comma-separated list of column names in @arg and select the + * associated columns. + */ +static void parse_columns(char *arg) +{ + struct column_t *col; + char *name; + + while ((name = strsep(&arg, ","))) { + col = column_get_by_name(name); + if (!col) + errx(EXIT_USAGE, "Unknown column name '%s'", name); + column_select(col); + } +} + +/* + * Parse the comma-separated list of key names in @arg and add it to the + * list of specified keys. + */ +static void parse_keys(char *arg) +{ + struct key_t *key; + char *name; + + while ((name = strsep(&arg, ","))) { + key = key_get_by_name(name); + if (!key) + errx(EXIT_USAGE, "Unknown key '%s'", name); + key_select(key); + } +} + +/* + * Parse the comma-separated list of CMG values in @arg and select the + * associated CMG. + */ +static void parse_cmgs(char *arg) +{ + struct cmg_t *cmg_t; + char *name; + + while ((name = strsep(&arg, ","))) { + cmg_t = cmg_get(atoi(name)); + if (!cmg_t) + errx(EXIT_USAGE, "Unsupported CMG '%s'", name); + cmg_t->selected = true; + } +} + +/* + * Parse a scale unit value in @arg and return the resulting scaling factor. + */ +static unsigned long parse_unit(char *arg) +{ + unsigned long unit; + char *endptr; + + if (strlen(arg) == 1) { + opts.unit_suffix = (char)toupper(*arg); + switch (opts.unit_suffix) { + case 'K': + return UNIT_BIN; + case 'M': + return UNIT_BIN * UNIT_BIN; + case 'G': + return UNIT_BIN * UNIT_BIN * UNIT_BIN; + case 'T': + return UNIT_BIN * UNIT_BIN * UNIT_BIN * UNIT_BIN; + default: + break; + } + } + opts.unit_suffix = 0; + if (strcmp(arg, "auto") == 0) + return UNIT_AUTO; + /* Parse as number. */ + unit = strtoul(arg, &endptr, 10); + if (!*endptr && *arg && unit > 0 && + !(unit == ULONG_MAX && errno == ERANGE)) { + return unit; + } + + errx(EXIT_USAGE, "Unsupported scaling unit '%s'", optarg); +} + +/* + * Determine the current status of the channel-path measurement facility. + */ +static enum cm_status_t get_cm_status(void) +{ + enum cm_status_t result; + char *path; + int i; + + path = util_path_sysfs(SYS_CM_ENABLE); + if (!util_path_exists(path)) { + result = CM_UNSUPPORTED; + goto out; + } + if (util_file_read_i(&i, 10, path)) + errx(EXIT_RUNTIME, "Unable to read file '%s'", path); + if (i == 0) + result = CM_DISABLED; + else + result = CM_ENABLED; +out: + free(path); + + return result; +} + +/* + * Change the channel-path facility status to the value represented by @on. + */ +static void set_cm_enable(long on) +{ + char *path; + + if (on) + printf("Enabling channel-path statistics\n"); + else + printf("Disabling channel-path statistics\n"); + + path = util_path_sysfs(SYS_CM_ENABLE); + if (util_file_write_l(on, 10, "%s", path) == 0) { + free(path); + return; + } + + switch (errno) { + case EIO: + errx(EXIT_RUNTIME, "Unable to enable channel-path statistics\n" + "Check if your system is authorized (see section " + "AUTHORIZATION in 'man %s')", + program_invocation_short_name); + break; + default: + err(EXIT_RUNTIME, "Unable to write to file '%s'", path); + } +} + +/* + * Get sysfs path for attribute @filename of CHPID @chpid. If @filename is + * not specified, return path to CHPID sysfs directory. + */ +static char *get_chpid_path(int chpid, const char *filename) +{ + return util_path_sysfs(SYS_CSS0 "/chp0.%02x%s%s", chpid, + filename ? "/" : "", filename ? filename : ""); +} + +/* + * Check if CHPID @chpid exists. + */ +static bool chpid_exists(int chpid) +{ + char *path; + bool rc; + + path = get_chpid_path(chpid, NULL); + rc = util_path_exists("%s", path); + free(path); + + return rc; +} + +/* + * Print debugging information related to reading file @path. + */ +static void debug_text(const char *path, const char *txt) +{ + if (!opts.debug) + return; + if (txt) + printf("DEBUG: read(%s)='%s'\n", path, txt); + else + printf("DEBUG: read(%s)=- errno=%d\n", path, errno); +} + +/* + * Print debugging information related to reading file @path. + */ +static void debug_bin(const char *path, void *buffer, size_t expect, ssize_t rc) +{ + if (!opts.debug) + return; + printf("DEBUG: read(%s)=%d/%d\n", path, (int)rc, (int)expect); + util_hexdump_grp(stdout, "DEBUG", buffer, 4, (int)rc, 0); +} + +static char *read_chpid_attr_as_text(int chpid, const char *name, bool try) +{ + char *path, *value = NULL; + + path = get_chpid_path(chpid, name); + if (try && !util_path_exists(path)) + goto out; + value = util_file_read_text_file(path, true); + debug_text(path, value); + if (!value && !try) + err(EXIT_RUNTIME, "Unable to read file '%s'", path); +out: + free(path); + + return value; +} + +/* + * Read and return the value of the cmg sysfs attribute for CHPID @chpid. + * Return %-1 if the CMG value is "unknown". + */ +static int read_cmg(int chpid) +{ + char *value; + int cmg; + + value = read_chpid_attr_as_text(chpid, "cmg", false); + if (strcmp(value, "unknown") == 0) + cmg = -1; + else + cmg = atoi(value); + free(value); + + return cmg; +} + +/* + * Read and return the integer value of sysfs attribute @name for CHPID @chpid. + * The integer is interpreted using base @base. + */ +static int read_chpid_attr_as_int(int chpid, const char *name, int base) +{ + char *value; + long i; + + value = read_chpid_attr_as_text(chpid, name, false); + i = strtol(value, NULL, base); + free(value); + + return (int)i; +} + +/* + * Read and return the value of sysfs attribute speed_bps for CHPID @chpid. + * Return "-" if the attribute cannot be read, or if the value is unavailable. + */ +static char *read_speed(int chpid) +{ + char *value; + + value = read_chpid_attr_as_text(chpid, "speed_bps", true); + /* Unavailable speed value is reported as "0". */ + if (value && strcmp(value, "0") == 0) { + free(value); + value = NULL; + } + + return value; +} + +/* Read @count bytes of binary data from file @path to @buffer. On success, + * return %true. If @allow_eof is %true, return %false if not all data could + * be read. Otherwise exit with an error message. + */ +static bool read_bin(const char *path, void *buffer, size_t count, + bool allow_eof) +{ + int fd; + ssize_t rc = -1; + + fd = open(path, O_RDONLY); + if (fd == -1) + goto err; + rc = read(fd, buffer, count); + debug_bin(path, buffer, count, rc); + if (rc < 0) + goto err; + if ((size_t)rc < count && !allow_eof) + goto err_eof; + close(fd); + + return (size_t)rc == count; + +err_eof: + errx(EXIT_RUNTIME, "Unable to read file '%s': Unexpected end of file", + path); +err: + err(EXIT_RUNTIME, "Unable to read file '%s'", path); +} + +/* + * Read the channel-measurements characteristics block for CHPID @chpid to + * @cmcb. + */ +static void read_cmcb(int chpid, cmcb_t *cmcb) +{ + char *path; + + memset(cmcb, 0, sizeof(*cmcb)); + path = get_chpid_path(chpid, "measurement_chars"); + read_bin(path, cmcb, sizeof(*cmcb), false); + free(path); +} + +/* + * Read the channel-utilization entry for CHPID @chpid to @cue. + */ +static void read_cue(int chpid, int cmg, cue_t *cue) +{ + char *path; + + memset(cue, 0, sizeof(*cue)); + path = get_chpid_path(chpid, "measurement"); + read_bin(path, cue, sizeof(*cue), false); + free(path); + + /* Some firmware levels incorrectly report a CUIV of 0xfe for CMG 3 - + * fix this here to enable access to the full CUE data. */ + if (cmg == 3 && cue->common.cuiv == 0xfe) { + if (opts.debug) + printf("DEBUG: Fixing CMG 3 CUIV (0xfe => 0xff)\n"); + cue->common.cuiv = 0xff; + } +} + +/* + * Read the extended channel-utilization entry for CHPID @chpid to @ext_cue. + */ +static bool read_ext_cue(int chpid, ext_cue_t *ext_cue) +{ + static bool once; + bool e = false; + char *path; + + memset(ext_cue, 0, sizeof(*ext_cue)); + path = get_chpid_path(chpid, "ext_measurement"); + /* Older kernels might not have extended measurement support. */ + if (util_path_exists(path)) { + e = read_bin(path, ext_cue, sizeof(*ext_cue), true); + } else if (!once) { + warnx("Missing kernel support for extended channel-path " + "measurement data"); + once = true; + } + free(path); + + return e; +} + +static bool cue_modified(cue_t *a, cue_t *b) +{ + return a->common.timestamp != b->common.timestamp; +} + +/* + * Read utilization data @util for @chpid with CMG @cmg. + */ +static void read_util(int chpid, int cmg, struct util_t *util) +{ + bool repeat = false, *e = &util->extended; + ext_cue_t *ext_cue = &util->ext_cue; + cue_t cue2, *cue = &util->cue; + + /* Loop to ensure that basic and extended CUEs are in sync. */ + do { + read_cue(chpid, cmg, cue); + *e = read_ext_cue(chpid, ext_cue); + if (*e) { + /* Re-read CUE to make sure it hasn't changed. */ + read_cue(chpid, cmg, &cue2); + repeat = cue_modified(cue, &cue2); + } + } while (repeat); +} + +/* + * Select @chpid and read initial CHPID data from sysfs. If @try is %true, + * return %true if CHPID is available, %false otherwise. If @try is %false, + * exit with an error message if CHPID is unavailable. + */ +static bool select_chpid(int chpid, bool try) +{ + struct chpid_data_t *c = &chpid_data[chpid]; + struct cmg_t *cmg_t; + int cmg; + + if (!chpid_exists(chpid)) { + if (try) + return false; + errx(EXIT_RUNTIME, "CHPID 0.%02x does not exist", chpid); + } + cmg = read_cmg(chpid); + if (cmg == -1) { + if (try) + return false; + errx(EXIT_RUNTIME, "No statistics available for CHPID 0.%02x", + chpid); + } + cmg_t = cmg_get(cmg); + if (!cmg_t) { + if (try) + return false; + errx(EXIT_RUNTIME, "CHPID 0.%02x uses unsupported CMG %d", + chpid, cmg); + } + if (opts.cmgs_specified && !cmg_t->selected) { + if (try) + return false; + errx(EXIT_RUNTIME, "CHPID 0.%02x excluded by --cmg option", + chpid); + } + + cmg_t->found++; + c->cmg = cmg_t; + c->type = read_chpid_attr_as_int(chpid, "type", 16); + c->shared = read_chpid_attr_as_int(chpid, "shared", 10); + c->speed = read_speed(chpid); + if (cmg_t->has_cmcb) + read_cmcb(chpid, &c->data.cmcb); + read_util(chpid, cmg, &c->data.util_a); + c->data.util_b = c->data.util_a; + c->selected = true; + + return true; +} + +/* + * Select all available CHPIDs. + */ +static int select_all_chpids(void) +{ + int chpid, num_selected = 0; + + for (chpid = 0; chpid < NUM_CHPIDS; chpid++) { + if (select_chpid(chpid, true)) + num_selected++; + } + + return num_selected; +} + +/* + * Parse all positional parameters in @argv starting with @first up to @argc + * and select all specified CHPIDs. + */ +static int parse_chpids(int first, int argc, char *argv[]) +{ + int i, chpid, num_selected = 0; + + /* Parse optional CHPID selection. */ + for (i = first; i < argc; i++) { + chpid = parse_chpid(argv[i]); + if (chpid_data[chpid].selected) + continue; + select_chpid(chpid, false); + num_selected++; + } + + return num_selected; +} + +/* + * Enable channel-path statistics. + */ +static void cmd_enable(enum cm_status_t status) +{ + if (status == CM_ENABLED) + printf("Channel-path statistics already enabled\n"); + else + set_cm_enable(1); +} + +/* + * Disable channel-path statistics. + */ +static void cmd_disable(enum cm_status_t status) +{ + if (status == CM_DISABLED) + printf("Channel-path statistics already disabled\n"); + else + set_cm_enable(0); +} + +/* pr_pair(key, fmt) */ +#define pr_pair(k, fmt, ...) \ + util_fmt_pair(FMT_PERSIST, (k), (fmt), ##__VA_ARGS__) +/* pr_pair_quoted(key, fmt) */ +#define pr_pair_quoted(k, fmt, ...) \ + util_fmt_pair(FMT_PERSIST | FMT_QUOTE, (k), (fmt), ##__VA_ARGS__) + +/* + * Show status of the channel-path statistics function in human-readable form. + */ +static void cmd_status_default(enum cm_status_t status) +{ + printf("Channel-path statistics are "); + switch (status) { + case CM_UNSUPPORTED: + printf("not supported on this system\n"); + break; + case CM_DISABLED: + printf("disabled\n"); + break; + case CM_ENABLED: + printf("enabled\n"); + break; + } +} + +/* + * Show status of the channel-path statistics function in machine-readable + * format. + */ +static void cmd_status_fmt(enum cm_status_t status) +{ + const char *str = ""; + + switch (status) { + case CM_UNSUPPORTED: + str = "unsupported"; + break; + case CM_DISABLED: + str = "disabled"; + break; + case CM_ENABLED: + str = "enabled"; + break; + } + + util_fmt_add_key("status"); + util_fmt_obj_start(FMT_ROW, "chpstat_status"); + pr_pair_quoted("status", "%s", str); + util_fmt_obj_end(); +} + +/* + * Show status of the channel-path statistics function. + */ +static void cmd_status(enum cm_status_t status) +{ + if (opts.fmt_specified) + cmd_status_fmt(status); + else + cmd_status_default(status); +} + +/* + * Update channel-utilization data for CHPID @c. Return %true if new data was + * found, %false otherwise. + */ +static bool _update_util(struct chpid_data_t *c) +{ + struct util_t util; + + read_util(c->id, c->cmg->cmg, &util); + if (!cue_modified(&util.cue, &c->data.util_b.cue)) + return false; + + c->data.util_a = c->data.util_b; + c->data.util_b = util; + + return true; +} + +/* + * Update channel-utilization data for CHPID @c. If @wait is %true, repeat + * update attempts every second until new data is available. + */ +static void update_util(struct chpid_data_t *c, bool wait) +{ + while (!_update_util(c) && wait) + sleep(1); +} + +/* + * Update channel-path utilization data for all CHPIDs. If @wait is %true, + * repeat the process until all CHPIDs have new data. + */ +static void update_util_all(bool wait) +{ + struct chpid_data_t *c; + + chpid_for_each(c) { + if (c->selected) + update_util(c, wait); + } +} + +/* + * Return hostname of local host. + */ +static char *get_host_name(void) +{ + char host[HOST_NAME_MAX + 1] = { 0 }, *d; + + gethostname(host, sizeof(host) - 1); + d = strchr(host, '.'); + if (d) + *d = 0; + if (!host[0]) + strncpy(host, "-", sizeof(host) - 1); + + return util_strdup(host); +} + +/* + * Print a header with iteration-specific information for iteration @iteration. + */ +static void pr_iteration_header(int iteration) +{ + unsigned int quoted = FMT_PERSIST | FMT_QUOTE, unquoted = FMT_PERSIST; + char str[30], *host, *b = "", *r = ""; + struct timeval tv; + struct tm *tm; + + if (opts.use_ansi) { + b = ANSI_BOLD; + r = ANSI_RESET; + } + + gettimeofday(&tv, NULL); + tm = localtime(&tv.tv_sec); + host = get_host_name(); + + if (opts.cmd == CMD_TABLE) { + printf("%sIteration:%s %d ", b, r, iteration); + + if (tm) { + strftime(str, sizeof(str), "%F", tm); + printf("%sDate:%s %s ", b, r, str); + strftime(str, sizeof(str), "%T%z", tm); + printf("%sTime:%s %s ", b, r, str); + } + printf("%sHost:%s %s\n", b, r, host); + } else { + util_fmt_pair(unquoted, KEY_ITERATION, "%d", iteration); + util_fmt_pair(unquoted, KEY_TIME_EPOCH, "%llu", tv.tv_sec); + if (tm) { + strftime(str, sizeof(str), "%F %T%z", tm); + util_fmt_pair(quoted, KEY_TIME, "%s", str); + } else { + util_fmt_pair(quoted | FMT_INVAL, KEY_TIME, "", str); + } + } + + free(host); +} + +/* + * Print generic, non-cmg specific data for CHPID @c. + */ +static void pr_chpid(struct chpid_data_t *c) +{ + pr_pair_quoted(KEY_CHPID, "0.%02x", c->id); + pr_pair(KEY_TYPE, "%d", c->type); + pr_pair(KEY_CMG, "%d", c->cmg->cmg); + pr_pair(KEY_SHARED, "%d", c->shared); + if (c->speed) { + pr_pair_quoted(KEY_SPEED, "%s", c->speed); + } else { + util_fmt_pair(FMT_PERSIST | FMT_QUOTE | FMT_INVAL, KEY_SPEED, + ""); + } +} + +static void pr_cmg_inval_pair(struct cmg_pair_t *p) +{ + if (p->type == CMG_FLOAT) + util_fmt_pair(FMT_INVAL, p->key, "-1.0"); + else + util_fmt_pair(FMT_INVAL, p->key, "-1"); +} + +static void pr_cmg_pair(struct cmg_pair_t *p) +{ + if (!p->valid) { + pr_cmg_inval_pair(p); + return; + } + + switch (p->type) { + case CMG_U32: + util_fmt_pair(FMT_DEFAULT, p->key, "%u", p->value_u32); + break; + case CMG_U64: + util_fmt_pair(FMT_DEFAULT, p->key, "%llu", p->value_u64); + break; + case CMG_FLOAT: + util_fmt_pair(FMT_DEFAULT, p->key, "%.1f", p->value_double); + break; + } +} + +static void pr_cmg_pairs(struct cmg_pair_t *pairs) +{ + int i; + + for (i = 0; pairs[i].key; i++) + pr_cmg_pair(&pairs[i]); +} + +static void pr_group(struct chpid_data_t *c, struct cmg_t *cmg, + enum key_group_t group) +{ + struct cmg_pair_t *pairs = NULL; + + pairs = cmg->get_values(&c->data, group); + util_fmt_obj_start(FMT_DEFAULT, key_group_to_str(group)); + pr_cmg_pairs(pairs); + util_fmt_obj_end(); + cmg_free_pairs(pairs); +} + +/* + * Show list of channel-path statistics once. + */ +static void cmd_list_once(void) +{ + struct chpid_data_t *c; + struct cmg_t *cmg; + + util_fmt_obj_start(FMT_LIST, "channel_paths"); + chpid_for_each(c) { + if (!c->selected) + continue; + cmg = c->cmg; + if (opts.groups & (KEY_GRP_UTIL | KEY_GRP_METRICS)) + update_util(c, false); + if (opts.groups & KEY_GRP_METRICS) + cmg->update_metrics(&c->data); + + util_fmt_obj_start(FMT_ROW, NULL); + if (opts.groups & KEY_GRP_CHP) + pr_chpid(c); + util_fmt_obj_start(FMT_DEFAULT, "cmg%d", cmg->cmg); + if (opts.groups & KEY_GRP_CHARS) + pr_group(c, cmg, KEY_GRP_CHARS); + if (opts.groups & KEY_GRP_UTIL) + pr_group(c, cmg, KEY_GRP_UTIL); + if (opts.groups & KEY_GRP_METRICS) + pr_group(c, cmg, KEY_GRP_METRICS); + util_fmt_obj_end(); + util_fmt_obj_end(); + } + util_fmt_obj_end(); +} + +/* Register the keys of all key-value pairs that will be printed. */ +static void register_selected_keys(void) +{ + struct key_t *key; + + key_for_each_selected(key) + util_fmt_add_key("%s", key->name); +} + +/* Report an error if a key specified on the command line is not reported by + * any available CHPID. */ +static void check_key_availability(void) +{ + struct key_t *key; + + key_for_each_selected(key) { + if (key->found) + continue; + errx(EXIT_RUNTIME, "No available CHPID provides key '%s' " + "(needs CMG %s)", key->name, key->cmg_str); + } +} + +static void apply_key_selection(void) +{ + bool do_filter = (opts.cmd == CMD_LIST); + struct cmg_t *cmg_t; + + if (opts.all) { + key_select_all(); + } else if (opts.groups_specified || opts.cmgs_specified || + opts.keys_specified) { + /* Apply selection options. */ + if (opts.groups_specified) + key_select_by_groups(opts.groups, do_filter); + if (opts.cmgs_specified) { + cmg_for_each(cmg_t) { + if (!cmg_t->selected) + continue; + key_select_by_cmg(cmg_t->cmg); + } + } + } else { + /* Select default keys suitable for command. */ + if (opts.cmd == CMD_LIST_KEYS) + key_select_all(); + else + key_select_by_groups(opts.groups, do_filter); + } + + if (opts.cmd == CMD_LIST_KEYS) + key_sort_selected(); +} + +/* + * Show channel-path statistics in list format. + */ +static void cmd_list(void) +{ + int i; + + if (!opts.groups_specified && !opts.keys_specified) + opts.groups = KEY_GRP_METRICS; + opts.groups |= KEY_GRP_META | KEY_GRP_ITERATION | KEY_GRP_CHP; + + /* Re-initialize keys to get current key->found values. */ + key_init(opts.all); + if (opts.keys_specified) + check_key_availability(); + apply_key_selection(); + register_selected_keys(); + /* Select CMG key groups needed for selected keys. */ + opts.groups |= key_get_selected_groups(); + + if (opts.groups & KEY_GRP_METRICS) + update_util_all(true); + + if (opts.fmt != FMT_JSONSEQ) + util_fmt_obj_start(FMT_LIST, NULL); + for (i = 0; opts.forever || i < opts.iterations; i++) { + if (i > 0) + sleep((unsigned int)opts.interval); + + util_fmt_obj_start(0, NULL); + if (opts.groups & KEY_GRP_ITERATION) + pr_iteration_header(i); + cmd_list_once(); + util_fmt_obj_end(); + + /* Flush here to allow immediate consumption of full reports + * via pipes (e.g. for use with grep). */ + fflush(stdout); + } + if (opts.fmt != FMT_JSONSEQ) + util_fmt_obj_end(); +} + +/* + * Allocate a table using the names and widths of all selected columns. + */ +static struct util_rec *define_table(void) +{ + struct util_rec *rec; + struct column_t *col; + + rec = util_rec_new_wide(NULL); + column_for_each_selected(col) { + util_rec_def(rec, col->name, UTIL_REC_ALIGN_RIGHT, + (int)col->width, ""); + } + + return rec; +} + +/* + * Scale value @v by @unit and return the suffix of the applied + * unit-multiplier. + */ +static char scale_auto(struct cmg_pair_t *p, int unit) +{ + char suffixes[] = "\0KMGTPE"; + int i; + + for (i = 0; suffixes[i + 1]; i++) { + switch (p->type) { + case CMG_U32: + if (p->value_u32 < (u32)unit) + goto out; + p->value_u32 /= (u32)unit; + break; + case CMG_U64: + if (p->value_u64 < (u64)unit) + goto out; + p->value_u64 /= (u64)unit; + break; + case CMG_FLOAT: + if (p->value_double < (double)unit) + goto out; + p->value_double /= (double)unit; + break; + default: + break; + } + } + +out: + return suffixes[i]; +} + +static void scale_fixed(struct cmg_pair_t *p, unsigned long unit) +{ + switch (p->type) { + case CMG_U32: + p->value_u32 /= (u32)unit; + break; + case CMG_U64: + p->value_u64 /= (u64)unit; + break; + case CMG_FLOAT: + p->value_double /= (double)unit; + break; + default: + break; + } +} + +/* + * Add @value in formatted form to @column of @table. Scale value if defined + * for @column. + */ +static void add_pair_value(struct util_rec *table, struct column_t *col, + struct cmg_pair_t *pair) +{ + char suffix = 0, str[16]; + int p; + + if (!pair->valid) { + util_rec_set(table, col->name, "-"); + return; + } + + if (pair->unit == CMG_NUMBER) { + suffix = scale_auto(pair, UNIT_DEC); + } else if (pair->unit == CMG_BPS) { + if (opts.unit == UNIT_AUTO) + suffix = scale_auto(pair, UNIT_BIN); + else + scale_fixed(pair, opts.unit); + } + + switch (pair->type) { + case CMG_U32: + snprintf(str, sizeof(str), "%u%c", pair->value_u32, suffix); + break; + case CMG_U64: + snprintf(str, sizeof(str), "%llu%c", pair->value_u64, suffix); + break; + case CMG_FLOAT: + /* Find highest precision that fits into @width characters. */ + for (p = 2; p >= 0; p--) { + snprintf(str, sizeof(str), "%.*f%c", p, + pair->value_double, suffix); + if (strlen(str) <= col->width) + break; + } + break; + } + util_rec_set(table, col->name, "%s", str); +} + +/* + * Add generic, non-cmg specific data for column @col and CHPID @c to @table. + * Return %true if requested column was handled, %false otherwise. + */ +static bool add_column_generic(struct util_rec *table, struct column_t *col, + struct chpid_data_t *c) +{ + const char *name = col->name; + + switch (col->id) { + case COL_CHPID: + util_rec_set(table, name, "%02x", c->id); + break; + case COL_TYPE: + util_rec_set(table, name, "%02x", c->type); + break; + case COL_CMG: + util_rec_set(table, name, "%d", c->cmg->cmg); + break; + case COL_SHARED: + util_rec_set(table, name, "%d", c->shared); + break; + case COL_SPEED: + if (!c->speed) + return false; + util_rec_set(table, name, "%s", c->speed); + break; + default: + return false; + } + + return true; +} + +static bool add_column_cmg(struct util_rec *table, struct column_t *col, + struct cmg_pair_t *pairs) +{ + int i; + + for (i = 0; pairs[i].key; i++) { + if (pairs[i].col != col->id) + continue; + add_pair_value(table, col, &pairs[i]); + return true; + } + + return false; +} + +/* + * Add row data for CHPID @c to @table. + */ +static void add_table_row(struct util_rec *table, struct chpid_data_t *c, + struct cmg_pair_t *pairs) +{ + struct column_t *col; + + column_for_each_selected(col) { + if (add_column_generic(table, col, c)) + continue; + if (add_column_cmg(table, col, pairs)) + continue; + /* Default text for unavailable column values. */ + util_rec_set(table, col->name, "-"); + } +} + +/* + * Find the longest number of consecutive columns that use the same non-empty + * group header. Start with selected column @start. Update @num_ptr to contain + * the number of consecutive columns, and @width_ptr to contain the total + * width of these columns, including separating spaces. + */ +static void get_hdr_group_size(unsigned int start, unsigned int *num_ptr, + unsigned int *width_ptr) +{ + struct column_t *col; + const char *last_hdr = ""; + unsigned int i, num, width; + + num = 0; + width = 0; + for (i = start; (col = column_get_by_index(i, true)); i++) { + if (i > start) { + if (!*last_hdr || + strcmp(col->hdr1_group, last_hdr) != 0) + break; + /* Account for space between columns. */ + width++; + } + num++; + width += col->width; + last_hdr = col->hdr1_group; + } + *num_ptr = num; + *width_ptr = width; +} + +/* + * Print @str centered in a space of @width characters. + */ +static void pr_centered(const char *str, unsigned int width) +{ + unsigned int a, b, l; + + l = strlen_u(str); + width = MAX(width, l); + a = (width - l) / 2; + b = width - l - a; + printf("%*s%s%*s", b, "", str, a, ""); +} + +/* + * Distribute @extra characters evenly to column widths for @num selected + * columns starting with @start. + */ +static void enlarge_columns(unsigned int start, unsigned int num, + unsigned int extra) +{ + struct column_t *col; + unsigned int i, end, delta; + + end = start + num; + for (i = start; i < end; i++) { + col = column_get_by_index(i, true); + delta = extra / (end - i); + col->width += delta; + extra -= delta; + } +} + +/* + * Make sure that group header is centered over the non-spacing portions of + * line 2 headers: | HDR1 | => | HDR1 | + * | HDR2 HDR2| | HDR2 HDR2| + */ +static void pr_hdr1_spacing(struct column_t *col, unsigned int *width_ptr) +{ + unsigned int w1, w2, spacing; + + w1 = strlen_u(col->hdr1_group); + w2 = strlen_u(col->hdr2); + if (col->width <= w2) + return; + spacing = col->width - w2; + if (*width_ptr - spacing >= w1) { + printf("%*s", spacing, ""); + *width_ptr -= spacing; + } +} + +/* + * Print multi-line table header and update column widths based on header + * lengths. + */ +static void print_table_header(void) +{ + struct column_t *col; + unsigned int i, next_i, num, width, hdr_width; + + /* Print first header line and update column width based on heading. */ + for (i = 0; (col = column_get_by_index(i, true)); i = next_i) { + if (i > 0) + printf(" "); + get_hdr_group_size(i, &num, &width); + if (num == 1) { + /* Update column width in case heading is wider. */ + width = MAX(width, strlen_u(col->hdr1_single)); + width = MAX(width, strlen_u(col->hdr2)); + col->width = width; + printf("%*s", col->width, col->hdr1_single); + next_i = i + 1; + continue; + } + hdr_width = strlen_u(col->hdr1_group); + if (hdr_width > width) { + /* Header text is longer than sum of column widths, + * increase column widths accordingly.*/ + enlarge_columns(i, num, hdr_width - width); + width = hdr_width; + } + pr_hdr1_spacing(col, &width); + pr_centered(col->hdr1_group, width); + next_i = i + num; + } + printf("\n"); + + /* Print second header line. */ + i = 0; + column_for_each_selected(col) { + if (i++ > 0) + printf(" "); + printf("%*s", col->width, col->hdr2); + } + printf("\n"); +} + +/* + * Show table of channel-path statistics once. + */ +static void cmd_table_once(void) +{ + struct cmg_pair_t *pairs; + struct util_rec *table; + struct chpid_data_t *c; + struct cmg_t *cmg; + + ansi(ANSI_BOLD ANSI_REVERSE); + print_table_header(); + ansi(ANSI_RESET); + table = define_table(); + + chpid_for_each(c) { + if (!c->selected) + continue; + cmg = c->cmg; + + /* Update data. */ + update_util(c, false); + cmg->update_metrics(&c->data); + + /* Add data to table. */ + pairs = cmg->get_values(&c->data, KEY_GRP_ALL); + add_table_row(table, c, pairs); + cmg_free_pairs(pairs); + util_rec_print(table); + } + + util_rec_free(table); + + if (opts.unit != UNIT_AUTO && !opts.unit_suffix) + printf("\n* = %lu B/s\n", opts.unit); +} + +static void apply_column_selection(void) +{ + struct cmg_t *cmg_t; + + if (opts.all) { + column_select_all(); + } else if (opts.cmgs_specified) { + /* Choose columns appropriate for CMGs specified via --cmg. */ + cmg_for_each(cmg_t) { + if (!cmg_t->selected) + continue; + column_select_id_list(cmg_t->default_column_ids); + } + } else if (!opts.columns_specified) { + /* Select default columns suitable for command. */ + if (opts.cmd == CMD_LIST_COLUMNS) + column_select_all(); + else + column_select_default(); + } +} + +/* Artificial upper limit for BPS needed to determine maximum column widths. */ +#define MAX_BPS (1024.0 * /* MiB/s per GFC */ 100 * (/* MiB */ 1048576)) + +/* + * Make sure each column fits expected values. + */ +static void calc_column_widths(void) +{ + struct column_t *col; + const char *last_hdr1 = NULL; + char str[32]; + unsigned int width; + double v; + + column_for_each_selected(col) { + /* Make sure column fits at least a single header. */ + col->width = MAX(col->width, strlen_u(col->hdr2)); + /* Make sure column fits maximum value. */ + width = col->width; + switch (col->unit) { + case COL_PERCENT: + width = 4; /* "99.9" */ + break; + case COL_NUMBER: + width = 4; /* "999k" */ + break; + case COL_BPS: + if (!opts.unit_specified || opts.unit == UNIT_AUTO) { + width = 4; /* "999K" */ + } else { + v = MAX_BPS / (double)opts.unit; + snprintf(str, sizeof(str) - 1, "%.0f%c", v, + opts.unit_suffix); + width = strlen_u(str); + } + break; + default: + break; + } + col->width = MAX(col->width, width); + /* Double space between groups. */ + if (last_hdr1 && strcmp(last_hdr1, col->hdr1_group) != 0) + col->width++; + last_hdr1 = col->hdr1_group; + } +} + +/* + * Show table of channel-path statistics continuously as specified by + * command line options. + */ +static void cmd_table(void) +{ + int i; + + apply_column_selection(); + calc_column_widths(); + column_update_bps_suffix(opts.unit == UNIT_AUTO, opts.unit_suffix); + + printf("Collecting initial utilization data\n"); + update_util_all(true); + + buffer_stdout(true); + for (i = 0; opts.forever || i < opts.iterations; i++) { + if (i > 0) { + sleep((unsigned int)opts.interval); + printf("\n"); + } + + /* Clear screen + move cursor to top-left of terminal (1,1). */ + ansi(ANSI_CLS ANSI_LOCATE(1, 1)); + + pr_iteration_header(i); + printf("\n"); + cmd_table_once(); + + /* Make table visible in one go to avoid screen flicker. */ + fflush(stdout); + } + buffer_stdout(false); +} + +#define HDR_COLUMN "column" +#define HDR_HEADING "heading" +#define HDR_DESC "description" + +/* + * List available table columns in table format. + */ +static void cmd_list_columns_table(void) +{ + struct util_rec *table; + struct column_t *col; + int w, w1, w2, w3; + + /* Determine column widths. */ + w1 = strlen_i(HDR_COLUMN); + w2 = strlen_i(HDR_HEADING); + w3 = strlen_i(HDR_DESC); + column_for_each_selected(col) { + w1 = MAX(w1, strlen_i(col->name)); + w = strlen_i(col->hdr1_single) + strlen_i(col->hdr2) + 1; + w2 = MAX(w2, w); + w3 = MAX(w3, strlen_i(col->desc)); + } + w1++; + w2++; + + /* Print table heading. */ + table = util_rec_new_wide(NULL); + util_rec_def(table, HDR_COLUMN, UTIL_REC_ALIGN_LEFT, w1, "COLUMN"); + util_rec_def(table, HDR_HEADING, UTIL_REC_ALIGN_LEFT, w2, "HEADING"); + util_rec_def(table, HDR_DESC, UTIL_REC_ALIGN_LEFT, w3, "DESCRIPTION"); + util_rec_print_hdr(table); + + /* Print rows of column data. */ + column_for_each_selected(col) { + util_rec_set(table, HDR_COLUMN, col->name); + if (*col->hdr1_single) { + util_rec_set(table, HDR_HEADING, "%s %s", + col->hdr1_single, col->hdr2); + } else { + util_rec_set(table, HDR_HEADING, "%s", col->hdr2); + } + util_rec_set(table, HDR_DESC, "%s", col->desc); + util_rec_print(table); + } + + util_rec_free(table); +} + +#define HDR_NAME "name" +#define HDR_HEAD "heading" + +/* + * List available table columns in machine-readable format. + */ +static void cmd_list_columns_fmt(void) +{ + struct column_t *col; + + util_fmt_add_key(HDR_NAME); + util_fmt_add_key(HDR_HEAD); + util_fmt_add_key(HDR_DESC); + + util_fmt_obj_start(FMT_LIST, "chpstat_list_columns"); + column_for_each_selected(col) { + util_fmt_obj_start(FMT_ROW, NULL); + pr_pair_quoted(HDR_NAME, "%s", col->name); + if (*col->hdr1_single) { + pr_pair_quoted(HDR_HEAD, "%s %s", + col->hdr1_single, col->hdr2); + } else { + pr_pair_quoted(HDR_HEAD, "%s", col->hdr2); + } + pr_pair_quoted(HDR_DESC, "%s", col->desc); + util_fmt_obj_end(); + } + util_fmt_obj_end(); +} + +/* + * List available table columns. + */ +static void cmd_list_columns(void) +{ + apply_column_selection(); + if (opts.fmt_specified) + cmd_list_columns_fmt(); + else + cmd_list_columns_table(); +} + +#define HDR_KEY "key" +#define HDR_GROUP "group" +#define HDR_CMGS "cmgs" + +/* + * List available table columns in table format. + */ +static void cmd_list_key_table(void) +{ + struct util_rec *table; + struct key_t *key; + int w1, w2, w3; + + /* Determine column width. */ + w1 = strlen_i(HDR_KEY); + w2 = strlen_i(HDR_GROUP); + w3 = strlen_i(HDR_CMGS); + key_for_each_selected(key) { + w1 = MAX(w1, strlen_i(key->name)); + w2 = MAX(w2, strlen_i(key_group_to_str(key->group))); + w3 = MAX(w3, strlen_i(key->cmg_str)); + } + /* Increase spacing between columns to improve readability. */ + w1++; + w2++; + /* Print table heading. */ + table = util_rec_new_wide(NULL); + util_rec_def(table, HDR_KEY, UTIL_REC_ALIGN_LEFT, w1, "KEY"); + util_rec_def(table, HDR_GROUP, UTIL_REC_ALIGN_LEFT, w2, "GROUP"); + util_rec_def(table, HDR_CMGS, UTIL_REC_ALIGN_LEFT, w3, "CMGS"); + util_rec_print_hdr(table); + /* Print rows of column data. */ + key_for_each_selected(key) { + util_rec_set(table, HDR_KEY, "%s", key->name); + util_rec_set(table, HDR_GROUP, "%s", + key_group_to_str(key->group)); + util_rec_set(table, HDR_CMGS, "%s", key->cmg_str); + util_rec_print(table); + } + + util_rec_free(table); +} + +/* + * List available keys in machine-readable format. + */ +static void cmd_list_key_fmt(void) +{ + struct key_t *key; + + util_fmt_add_key(HDR_KEY); + util_fmt_add_key(HDR_GROUP); + util_fmt_add_key(HDR_CMGS); + + util_fmt_obj_start(FMT_LIST, "chpstat_list_keys"); + key_for_each_selected(key) { + util_fmt_obj_start(FMT_ROW, NULL); + util_fmt_pair(FMT_QUOTE, HDR_KEY, "%s", key->name); + util_fmt_pair(FMT_QUOTE, HDR_GROUP, "%s", + key_group_to_str(key->group)); + util_fmt_pair(FMT_QUOTE, HDR_CMGS, "%s", key->cmg_str); + util_fmt_obj_end(); + } + util_fmt_obj_end(); +} + +/* + * List available keys. + */ +static void cmd_list_keys(void) +{ + apply_key_selection(); + if (opts.fmt_specified) + cmd_list_key_fmt(); + else + cmd_list_key_table(); +} + +/* + * Return long command line name for specified @cmd option. + */ +static const char *cmd_to_optstr(enum command_t cmd) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(opt_vec); i++) { + if (opt_vec[i].option.val == (int)cmd) + return opt_vec[i].option.name; + } + return ""; +} + +/* + * Set tool command @cmd and check for conflicting options. + */ +static void set_cmd(enum command_t cmd) +{ + if (!opts.cmd_specified || opts.cmd == cmd) { + opts.cmd = cmd; + opts.cmd_specified = true; + return; + } + errx(EXIT_USAGE, "Options --%s and --%s cannot be specified together", + cmd_to_optstr(opts.cmd), cmd_to_optstr(cmd)); +} + +/* + * Set output format to the format specified by @name. + */ +static void set_fmt(const char *name) +{ + enum util_fmt_t fmt; + + if (!util_fmt_name_to_type(name, &fmt)) { + errx(EXIT_USAGE, "Unknown format '%s' - supported formats: " + FMT_TYPE_NAMES, name); + } + if (opts.fmt_specified && fmt != opts.fmt) { + errx(EXIT_USAGE, "Option --format cannot be specified multiple " + "times"); + } + opts.fmt = fmt; + opts.fmt_specified = true; +} + +static void init_fmt(void) +{ + unsigned int flags = 0; + + if (opts.keys_specified) + flags |= FMT_FILTER; + if (!opts.use_prefix) + flags |= FMT_NOPREFIX; + if (opts.fmt == FMT_JSON || opts.fmt == FMT_JSONSEQ) { + /* Ensure correct JSON even if interrupted. */ + flags |= FMT_HANDLEINT; + } + if (opts.fmt == FMT_CSV) { + /* Quote all values to ensure compatibility with a multitude + * of CSV consumers. */ + flags |= FMT_QUOTEALL; + } + if (opts.keys_specified || opts.all) { + /* Always show data for keys with no valid value if the key + * was specifically requested by the user. */ + flags |= FMT_KEEPINVAL; + } + if (opts.debug) + flags |= FMT_WARN; + util_fmt_init(stdout, opts.fmt, flags, API_LEVEL); + util_fmt_set_indent(0, 2, ' '); +} + +/* + * Parse options and execute the command + */ +int main(int argc, char *argv[]) +{ + enum cm_status_t status; + int c, num_selected; + + util_prg_init(&prg); + util_opt_init(opt_vec, NULL); + init_opts(); + init_chpid_data(); + key_init(opts.all); + + /* Parse command-line parameters. */ + while (1) { + c = util_opt_getopt_long(argc, argv); + if (c == -1) + break; + switch (c) { + case 'h': + /* --help */ + util_prg_print_help(); + util_opt_print_help(); + goto out; + case 'v': + /* --version */ + util_prg_print_version(); + goto out; + case OPT_ENABLE: + /* --enable */ + set_cmd(CMD_ENABLE); + break; + case OPT_DISABLE: + /* --disable */ + set_cmd(CMD_DISABLE); + break; + case OPT_STATUS: + /* --status */ + set_cmd(CMD_STATUS); + break; + case OPT_LIST_COLUMNS: + /* --list-columns */ + set_cmd(CMD_LIST_COLUMNS); + break; + case OPT_COLUMNS: + /* --columns */ + opts.columns_specified = true; + parse_columns(optarg); + break; + case OPT_LIST_KEYS: + /* --list-keys */ + set_cmd(CMD_LIST_KEYS); + break; + case OPT_KEYS: + /* --keys KEY1,.. */ + opts.keys_specified = true; + parse_keys(optarg); + break; + case OPT_CMG: + /* --cmg NUM,.. */ + opts.cmgs_specified = true; + parse_cmgs(optarg); + break; + case OPT_SCALE: + /* --scale UNIT */ + opts.unit_specified = true; + opts.unit = parse_unit(optarg); + break; + case OPT_ITERATIONS: + /* --iterations NUM */ + opts.iterations = parse_int("iterations", optarg, 0, + INT_MAX); + opts.forever = (opts.iterations < 1); + break; + case OPT_INTERVAL: + /* --interval NUM */ + opts.interval = parse_int("interval", optarg, 1, + MAX_INTERVAL); + break; + case OPT_CHARS: + /* --chars */ + opts.groups_specified = true; + opts.groups |= KEY_GRP_CHARS; + break; + case OPT_UTIL: + /* --util */ + opts.groups_specified = true; + opts.groups |= KEY_GRP_UTIL; + break; + case OPT_METRICS: + /* --metrics */ + opts.groups_specified = true; + opts.groups |= KEY_GRP_METRICS; + break; + case OPT_ALL: + /* --all */ + opts.all = true; + break; + case OPT_NO_ANSI: + /* --no-ansi */ + opts.use_ansi = false; + break; + case OPT_NO_PREFIX: + /* --no-prefix */ + opts.use_prefix = false; + break; + case OPT_DEBUG: + /* --debug */ + opts.debug = true; + break; + case OPT_FORMAT: + /* --format FORMAT */ + set_fmt(optarg); + break; + default: + util_opt_print_parse_error((char)c, argv); + return EXIT_USAGE; + } + } + + if (!opts.cmd_specified) { + /* List output is implied by --chars, --util, --metrics, --keys + * and --format. */ + if (opts.groups_specified || opts.keys_specified || + opts.fmt_specified) + opts.cmd = CMD_LIST; + } + + if (optind != argc && opts.cmd != CMD_LIST && opts.cmd != CMD_TABLE) { + errx(EXIT_USAGE, "Unexpected parameter specified: %s", + argv[optind]); + } + + /* Initialize output formatter. */ + init_fmt(); + + /* Handle functions that work without CM support. */ + status = get_cm_status(); + switch (opts.cmd) { + case CMD_STATUS: + cmd_status(status); + goto out; + case CMD_LIST_COLUMNS: + cmd_list_columns(); + goto out; + case CMD_LIST_KEYS: + cmd_list_keys(); + goto out; + default: + break; + } + + /* Ensure measurements are available beyond this point. */ + if (status == CM_UNSUPPORTED) { + errx(EXIT_RUNTIME, "This system does not support channel-path " + "statistics"); + } + + /* Handle supplemental functions. */ + switch (opts.cmd) { + case CMD_ENABLE: + cmd_enable(status); + goto out; + case CMD_DISABLE: + cmd_disable(status); + goto out; + default: + break; + } + + /* Ensure measurements are enabled beyond this point. */ + if (status == CM_DISABLED) { + errx(EXIT_RUNTIME, "Channel-path statistics are disabled\n" + "Use '%s --enable' to enable statistics", + program_invocation_short_name); + } + + /* Determine CHPID list. */ + if (optind != argc) + num_selected = parse_chpids(optind, argc, argv); + else + num_selected = select_all_chpids(); + if (num_selected == 0) + errx(EXIT_RUNTIME, "No available CHPIDs found"); + + switch (opts.cmd) { + case CMD_LIST: + cmd_list(); + break; + default: + cmd_table(); + break; + } + +out: + util_fmt_exit(); + free_chpid_data(); + key_exit(); + cmg_exit(); + column_exit(); + + return EXIT_OK; +} diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/cmg1.c s390-tools-2.33.1/zconf/chp/chpstat/cmg1.c --- s390-tools-2.31.0/zconf/chp/chpstat/cmg1.c 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/cmg1.c 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,143 @@ +/* + * Support for CMG 1 channel-path statistics + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include "cmg.h" +#include "cmg_helper.h" +#include "column.h" +#include "key.h" + +#include "lib/util_base.h" +#include "lib/zt_common.h" + +/* Macros to convert generic cmg_data_t into CMG-specific ones. */ +#define get_cue(d, x) ((struct cue1_t *)&((d)->x.cue)) +#define get_metrics(d) ((struct metrics1_t *)&((d)->metrics)) + +/* CMG 1 format Channel-Utilization-Entry. */ +struct cue1_t { + u8 cuiv; + u32 timestamp:24; + u32 channel_path_busy_time_cpc; + u32 channel_path_busy_time; +} __packed; + +STATIC_ASSERT(sizeof(struct cue1_t) <= sizeof(cue_t)); + +/* Metrics based on CMG 1 format CUEs. */ +struct metrics1_t { + double interval; + double util_total; + double util_part; +}; + +STATIC_ASSERT(sizeof(struct metrics1_t) <= sizeof(metrics_t)); + +/* IDs of columns that should be shown by default in table output. */ +static const enum column_id_t default_column_ids[] = { + COL_CHPID, + COL_TYPE, + COL_CMG, + COL_SHARED, + COL_SPEED, + COL_UTIL_PART, + COL_UTIL_TOTAL, + /* End of list. */ + COL_END +}; + +static void pr_cue(struct cmg_pair_t **a, unsigned int *n, + struct cmg_data_t *data) +{ + struct cue1_t *cue = get_cue(data, util_b); + + pr_u32(a, n, cue, timestamp); + pr_cue_u32(a, n, cue, channel_path_busy_time_cpc); + pr_cue_u32(a, n, cue, channel_path_busy_time); +} + +static void pr_metrics(struct cmg_pair_t **a, unsigned int *n, + struct cmg_data_t *data) +{ + struct metrics1_t *metrics = get_metrics(data); + + pr_metric(a, n, metrics, interval, CMG_NUMBER, COL_INTERVAL); + pr_metric(a, n, metrics, util_total, CMG_PERCENT, COL_UTIL_TOTAL); + pr_metric(a, n, metrics, util_part, CMG_PERCENT, COL_UTIL_PART); +} + +static struct cmg_pair_t *get_values(struct cmg_data_t *data, int groups) +{ + struct cmg_pair_t *array = NULL; + unsigned int num = 0; + + if (groups & KEY_GRP_UTIL) + pr_cue(&array, &num, data); + if (groups & KEY_GRP_METRICS) + pr_metrics(&array, &num, data); + + /* Add terminating null-element. */ + util_expand_array(&array, &num); + array[num - 1].key = NULL; + + return array; +} + +/* Initialize metrics in @m. */ +static void init_metrics(struct metrics1_t *m) +{ + m->interval = METRICS_INIT; + m->util_total = METRICS_INIT; + m->util_part = METRICS_INIT; +} + +/* Recalculate metrics in @data. */ +static void update_metrics(struct cmg_data_t *data) +{ + struct metrics1_t *m = get_metrics(data); + struct cue1_t *a = get_cue(data, util_a); + struct cue1_t *b = get_cue(data, util_b); + u32 ticks, delta; + + init_metrics(m); + + ticks = get_delta(a->timestamp, b->timestamp, CUE_TS_WIDTH); + if (ticks == 0) + return; + + /* interval = t2 - t1 */ + m->interval = tick_to_s(ticks); + + /* util_total = 100.0 * ticks_busy_cpc / ticks_total */ + if (cue_valid2(a, b, channel_path_busy_time_cpc)) { + delta = field_delta(channel_path_busy_time_cpc, a, b); + m->util_total = 100.0 * delta / ticks; + } + /* util_part = 100.0 * ticks_busy / ticks_total */ + if (cue_valid2(a, b, channel_path_busy_time)) { + delta = field_delta(channel_path_busy_time, a, b); + m->util_part = 100.0 * delta / ticks; + } +} + +/* Object defining this CMG. */ +static struct cmg_t cmg1 = { + .cmg = 1, + .selected = false, + .found = 0, + .has_cmcb = false, + .default_column_ids = default_column_ids, + .get_values = &get_values, + .update_metrics = &update_metrics, +}; + +/* Add to CMG registry. */ +static void __attribute__((constructor)) cmg1_ctr(void) +{ + cmg_add(&cmg1); +} diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/cmg2.c s390-tools-2.33.1/zconf/chp/chpstat/cmg2.c --- s390-tools-2.31.0/zconf/chp/chpstat/cmg2.c 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/cmg2.c 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,343 @@ +/* + * Support for CMG 2 channel-path statistics + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include "cmg.h" +#include "cmg_helper.h" +#include "column.h" +#include "key.h" + +#include "lib/util_base.h" + +/* Macros to convert generic cmg_data_t into CMG-specific ones. */ +#define get_cmcb(d) ((struct cmcb2_t *)&((d)->cmcb)) +#define get_cue(d, x) ((struct cue2_t *)&((d)->x.cue)) +#define get_ext_cue(d, x) ((struct ext_cue2_t *)&((d)->x.ext_cue)) +#define get_metrics(d) ((struct metrics2_t *)&((d)->metrics)) + +/* CMG 2 format Channel-Measurement-Characteristics Block (CMCB). */ +struct cmcb2_t { + u32 max_bus_cycles; + u32 max_channel_work_units; + u32 max_write_data_units; + u32 max_read_data_units; + u32 data_unit_size; +} __packed; + +STATIC_ASSERT(sizeof(struct cmcb2_t) <= sizeof(cmcb_t)); + +/* CMG 2 format Channel-Utilization-Entry (CUE). */ +struct cue2_t { + u8 cuiv; + u32 timestamp:24; + u32 bus_cycles_cpc; + u32 channel_work_units_cpc; + u32 channel_work_units; + u32 data_units_written_cpc; + u32 data_units_written; + u32 data_units_read_cpc; + u32 data_units_read; +} __packed; + +STATIC_ASSERT(sizeof(struct cue2_t) <= sizeof(cue_t)); + +/* CMG 2 format Extended Channel-Utilization-Entry. */ +struct ext_cue2_t { + u32 total_ficon_ops_cpc; + u32 total_deferred_ficon_ops_cpc; + u64 sum_ficon_ops_cpc; + u32 total_hpf_ops_cpc; + u32 total_deferred_hpf_ops_cpc; + u64 sum_hpf_ops_cpc; +} __packed; + +STATIC_ASSERT(sizeof(struct ext_cue2_t) <= sizeof(ext_cue_t)); + +/* Metrics based on CMG 2 format CUEs. */ +struct metrics2_t { + double interval; + double util_total; + double util_part; + double util_bus; + double read_total; + double read_part; + double write_total; + double write_part; + /* Extended CUE metrics. */ + double ficon_rate; + double ficon_active; + double ficon_defer; + double hpf_rate; + double hpf_active; + double hpf_defer; +}; + +STATIC_ASSERT(sizeof(struct metrics2_t) <= sizeof(metrics_t)); + +/* IDs of columns that should be shown by default in table output. */ +static const enum column_id_t default_column_ids[] = { + COL_CHPID, + COL_TYPE, + COL_CMG, + COL_SHARED, + COL_SPEED, + COL_UTIL_PART, + COL_UTIL_TOTAL, + COL_UTIL_BUS, + COL_READ_PART, + COL_READ_TOTAL, + COL_WRITE_PART, + COL_WRITE_TOTAL, + COL_FICON_RATE, + COL_FICON_ACTIVE, + COL_FICON_DEFER, + COL_HPF_RATE, + COL_HPF_ACTIVE, + COL_HPF_DEFER, + /* End of list. */ + COL_END +}; + +static void pr_chars(struct cmg_pair_t **a, unsigned int *n, + struct cmg_data_t *data) +{ + struct cmcb2_t *cmcb = get_cmcb(data); + + pr_u32(a, n, cmcb, max_bus_cycles); + pr_u32(a, n, cmcb, max_channel_work_units); + pr_u32(a, n, cmcb, max_write_data_units); + pr_u32(a, n, cmcb, max_read_data_units); + pr_u32(a, n, cmcb, data_unit_size); +} + +static void pr_cue(struct cmg_pair_t **a, unsigned int *n, + struct cmg_data_t *data) +{ + struct cue2_t *cue = get_cue(data, util_b); + + pr_u32(a, n, cue, timestamp); + pr_cue_u32(a, n, cue, bus_cycles_cpc); + pr_cue_u32(a, n, cue, channel_work_units_cpc); + pr_cue_u32(a, n, cue, channel_work_units); + pr_cue_u32(a, n, cue, data_units_written_cpc); + pr_cue_u32(a, n, cue, data_units_written); + pr_cue_u32(a, n, cue, data_units_read_cpc); + pr_cue_u32(a, n, cue, data_units_read); +} + +static void pr_ext_cue(struct cmg_pair_t **a, unsigned int *n, + struct cmg_data_t *data) +{ + struct ext_cue2_t *ext_cue = get_ext_cue(data, util_b); + bool v = data->util_b.extended; + + pr_cond_u32(a, n, v, ext_cue, total_ficon_ops_cpc); + pr_cond_u32(a, n, v, ext_cue, total_deferred_ficon_ops_cpc); + pr_cond_u64(a, n, v, ext_cue, sum_ficon_ops_cpc); + pr_cond_u32(a, n, v, ext_cue, total_hpf_ops_cpc); + pr_cond_u32(a, n, v, ext_cue, total_deferred_hpf_ops_cpc); + pr_cond_u64(a, n, v, ext_cue, sum_hpf_ops_cpc); +} + +static void pr_metrics(struct cmg_pair_t **a, unsigned int *n, + struct cmg_data_t *data) +{ + struct metrics2_t *metrics = get_metrics(data); + + pr_metric(a, n, metrics, interval, CMG_NUMBER, COL_INTERVAL); + pr_metric(a, n, metrics, util_total, CMG_PERCENT, COL_UTIL_TOTAL); + pr_metric(a, n, metrics, util_part, CMG_PERCENT, COL_UTIL_PART); + pr_metric(a, n, metrics, util_bus, CMG_PERCENT, COL_UTIL_BUS); + pr_metric(a, n, metrics, read_total, CMG_BPS, COL_READ_TOTAL); + pr_metric(a, n, metrics, read_part, CMG_BPS, COL_READ_PART); + pr_metric(a, n, metrics, write_total, CMG_BPS, COL_WRITE_TOTAL); + pr_metric(a, n, metrics, write_part, CMG_BPS, COL_WRITE_PART); + pr_metric(a, n, metrics, ficon_rate, CMG_NUMBER, COL_FICON_RATE); + pr_metric(a, n, metrics, ficon_active, CMG_NUMBER, COL_FICON_ACTIVE); + pr_metric(a, n, metrics, ficon_defer, CMG_NUMBER, COL_FICON_DEFER); + pr_metric(a, n, metrics, hpf_rate, CMG_NUMBER, COL_HPF_RATE); + pr_metric(a, n, metrics, hpf_active, CMG_NUMBER, COL_HPF_ACTIVE); + pr_metric(a, n, metrics, hpf_defer, CMG_NUMBER, COL_HPF_DEFER); +} + +static struct cmg_pair_t *get_values(struct cmg_data_t *data, int groups) +{ + struct cmg_pair_t *array = NULL; + unsigned int num = 0; + + if (groups & KEY_GRP_CHARS) + pr_chars(&array, &num, data); + if (groups & KEY_GRP_UTIL) { + pr_cue(&array, &num, data); + pr_ext_cue(&array, &num, data); + } + if (groups & KEY_GRP_METRICS) + pr_metrics(&array, &num, data); + + /* Add terminating null-element. */ + util_expand_array(&array, &num); + array[num - 1].key = NULL; + + return array; +} + +/* Initialize metrics in @m. */ +static void init_metrics(struct metrics2_t *m) +{ + m->interval = METRICS_INIT; + m->util_total = METRICS_INIT; + m->util_part = METRICS_INIT; + m->util_bus = METRICS_INIT; + m->read_total = METRICS_INIT; + m->read_part = METRICS_INIT; + m->write_total = METRICS_INIT; + m->write_part = METRICS_INIT; + /* Extended CUE metrics. */ + m->ficon_rate = METRICS_INIT; + m->ficon_active = METRICS_INIT; + m->ficon_defer = METRICS_INIT; + m->hpf_rate = METRICS_INIT; + m->hpf_active = METRICS_INIT; + m->hpf_defer = METRICS_INIT; +} + +/* Calculate metrics base on CMG 2 CUEs. */ +static void calc_metrics(struct cmg_data_t *data, double seconds) +{ + struct metrics2_t *m = get_metrics(data); + struct cmcb2_t *cmcb = get_cmcb(data); + struct cue2_t *a = get_cue(data, util_a); + struct cue2_t *b = get_cue(data, util_b); + double delta, max; + + /* util_total = 100.0 * work_units_cpc / max_work_units */ + if (cue_valid2(a, b, channel_work_units_cpc)) { + delta = field_delta(channel_work_units_cpc, a, b); + max = cmcb->max_channel_work_units * seconds; + if (max != 0.0) + m->util_total = 100.0 * delta / max; + } + /* util_part = 100.0 * work_units / max_work_units */ + if (cue_valid2(a, b, channel_work_units)) { + delta = field_delta(channel_work_units, a, b); + max = cmcb->max_channel_work_units * seconds; + if (max != 0.0) + m->util_part = 100.0 * delta / max; + } + /* util_bus = 100.0 * bus_cycles_cpc / max_bus_cycles */ + if (cue_valid2(a, b, bus_cycles_cpc)) { + delta = field_delta(bus_cycles_cpc, a, b); + max = cmcb->max_bus_cycles * seconds; + if (max != 0.0) + m->util_bus = 100.0 * delta / max; + } + /* read_total = data_units_read_cpc * unit_size / seconds */ + if (cue_valid2(a, b, data_units_read_cpc)) { + delta = field_delta(data_units_read_cpc, a, b); + m->read_total = (double)delta * cmcb->data_unit_size / seconds; + } + /* read_part = data_units_read * unit_size / seconds */ + if (cue_valid2(a, b, data_units_read)) { + delta = field_delta(data_units_read, a, b); + m->read_part = (double)delta * cmcb->data_unit_size / seconds; + } + /* write_total = data_units_written_cpc * unit_size / seconds */ + if (cue_valid2(a, b, data_units_written_cpc)) { + delta = field_delta(data_units_written_cpc, a, b); + m->write_total = (double)delta * cmcb->data_unit_size / + seconds; + } + /* write_part = data_units_written * unit_size / seconds */ + if (cue_valid2(a, b, data_units_written)) { + delta = field_delta(data_units_written, a, b); + m->write_part = (double)delta * cmcb->data_unit_size / seconds; + } +} + +/* Calculate metrics base on CMG 2 extended CUEs. */ +static void calc_ext_metrics(struct cmg_data_t *data, double seconds) +{ + struct metrics2_t *m = get_metrics(data); + struct ext_cue2_t *a = get_ext_cue(data, util_a); + struct ext_cue2_t *b = get_ext_cue(data, util_b); + double delta, delta2; + + /* These metrics require extended CUEs. */ + if (!data->util_a.extended || !data->util_b.extended) + return; + + /* ficon_rate = total_ficon_ops_cpc / seconds */ + delta = field_delta(total_ficon_ops_cpc, a, b); + m->ficon_rate = delta / seconds; + + /* ficon_active = sum_ficon_ops_cpc / total_ficon_ops_cpc */ + if (delta != 0.0) { + delta2 = (double)field_delta64(sum_ficon_ops_cpc, a, b); + m->ficon_active = delta2 / delta; + } else { + m->ficon_active = 0.0; + } + + /* ficon_defer = total_deferred_ficon_ops_cpc / seconds */ + delta = field_delta(total_deferred_ficon_ops_cpc, a, b); + m->ficon_defer = delta / seconds; + + /* hpf_rate = total_hpf_ops_cpc / seconds */ + delta = field_delta(total_hpf_ops_cpc, a, b); + m->hpf_rate = delta / seconds; + + /* hpf_active = sum_hpf_ops_cpc / total_hpf_ops_cpc */ + if (delta != 0.0) { + delta2 = (double)field_delta64(sum_hpf_ops_cpc, a, b); + m->hpf_active = delta2 / delta; + } else { + m->hpf_active = 0.0; + } + + /* hpf_defer = total_deferred_hpf_ops_cpc / seconds */ + delta = field_delta(total_deferred_hpf_ops_cpc, a, b); + m->hpf_defer = delta / seconds; +} + +/* Recalculate metrics in @data. */ +static void update_metrics(struct cmg_data_t *data) +{ + struct metrics2_t *m = get_metrics(data); + struct cue2_t *a = get_cue(data, util_a); + struct cue2_t *b = get_cue(data, util_b); + u32 ticks; + + init_metrics(m); + + ticks = get_delta(a->timestamp, b->timestamp, CUE_TS_WIDTH); + if (ticks == 0) + return; + + /* interval = t2 - t1 */ + m->interval = tick_to_s(ticks); + + calc_metrics(data, m->interval); + calc_ext_metrics(data, m->interval); +} + +/* Object defining this CMG. */ +static struct cmg_t cmg2 = { + .cmg = 2, + .selected = false, + .found = 0, + .has_cmcb = true, + .default_column_ids = default_column_ids, + .get_values = &get_values, + .update_metrics = &update_metrics, +}; + +/* Add to CMG registry. */ +static void __attribute__((constructor)) cmg2_ctr(void) +{ + cmg_add(&cmg2); +} diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/cmg3.c s390-tools-2.33.1/zconf/chp/chpstat/cmg3.c --- s390-tools-2.31.0/zconf/chp/chpstat/cmg3.c 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/cmg3.c 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,316 @@ +/* + * Support for CMG 3 channel-path statistics + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include "cmg.h" +#include "cmg_helper.h" +#include "column.h" +#include "key.h" + +#include "lib/util_base.h" + +/* Macros to convert generic cmg_data_t into CMG-specific ones. */ +#define get_cmcb(d) ((struct cmcb3_t *)&((d)->cmcb)) +#define get_cue(d, x) ((struct cue3_t *)&((d)->x.cue)) +#define get_metrics(d) ((struct metrics3_t *)&((d)->metrics)) + +/* CMG 3 format Channel-Measurement-Characteristics Block (CMCB). */ +struct cmcb3_t { + u32 data_unit_size; + u32 data_unit_size_cpc; + u32 msg_unit_size; + u32 msg_unit_size_cpc; +} __packed; + +STATIC_ASSERT(sizeof(struct cmcb3_t) <= sizeof(cmcb_t)); + +/* CMG 3 format Channel-Utilization-Entry (CUE). */ +struct cue3_t { + u8 cuiv; + u32 timestamp:24; + u32 msg_units_sent; + u32 msg_units_sent_cpc; + u32 unsuccessful_attempts_to_send; + u32 unavailable_receive_buffers; + u32 unavailable_receive_buffers_cpc; + u32 data_units_sent; + u32 data_units_sent_cpc; +} __packed; + +STATIC_ASSERT(sizeof(struct cue3_t) <= sizeof(cue_t)); + +/* Metrics based on CMG 3 format CUEs. */ +struct metrics3_t { + double interval; + double write_total; + double write_part; + double msg_rate_total; + double msg_rate_part; + double msg_size_total; + double msg_size_part; + double send_fail_part; + double rcv_fail_total; + double rcv_fail_part; +}; + +STATIC_ASSERT(sizeof(struct metrics3_t) <= sizeof(metrics_t)); + +/* IDs of columns that should be shown by default in table output. */ +static const enum column_id_t default_column_ids[] = { + COL_CHPID, + COL_TYPE, + COL_CMG, + COL_SHARED, + COL_SPEED, + COL_WRITE_PART, + COL_WRITE_TOTAL, + COL_MSG_RATE_PART, + COL_MSG_RATE_TOTAL, + COL_MSG_SIZE_PART, + COL_MSG_SIZE_TOTAL, + COL_SEND_FAIL_PART, + COL_RCV_FAIL_PART, + COL_RCV_FAIL_TOTAL, + /* End of list. */ + COL_END +}; + +static void pr_chars(struct cmg_pair_t **a, unsigned int *n, + struct cmg_data_t *data) +{ + struct cmcb3_t *cmcb = get_cmcb(data); + + pr_u32(a, n, cmcb, data_unit_size); + pr_u32(a, n, cmcb, data_unit_size_cpc); + pr_u32(a, n, cmcb, msg_unit_size); + pr_u32(a, n, cmcb, msg_unit_size_cpc); +} + +static void pr_cue(struct cmg_pair_t **a, unsigned int *n, + struct cmg_data_t *data) +{ + struct cue3_t *cue = get_cue(data, util_b); + + pr_u32(a, n, cue, timestamp); + pr_cue_u32(a, n, cue, msg_units_sent); + pr_cue_u32(a, n, cue, msg_units_sent_cpc); + pr_cue_u32(a, n, cue, unsuccessful_attempts_to_send); + pr_cue_u32(a, n, cue, unavailable_receive_buffers); + pr_cue_u32(a, n, cue, unavailable_receive_buffers_cpc); + pr_cue_u32(a, n, cue, data_units_sent); + pr_cue_u32(a, n, cue, data_units_sent_cpc); +} + +static void pr_metrics(struct cmg_pair_t **a, unsigned int *n, + struct cmg_data_t *data) +{ + struct metrics3_t *m = get_metrics(data); + + pr_metric(a, n, m, write_part, CMG_BPS, COL_WRITE_PART); + pr_metric(a, n, m, write_total, CMG_BPS, COL_WRITE_TOTAL); + pr_metric(a, n, m, msg_rate_part, CMG_NUMBER, COL_MSG_RATE_PART); + pr_metric(a, n, m, msg_rate_total, CMG_NUMBER, COL_MSG_RATE_TOTAL); + pr_metric(a, n, m, msg_size_part, CMG_NUMBER, COL_MSG_SIZE_PART); + pr_metric(a, n, m, msg_size_total, CMG_NUMBER, COL_MSG_SIZE_TOTAL); + pr_metric(a, n, m, send_fail_part, CMG_NUMBER, COL_SEND_FAIL_PART); + pr_metric(a, n, m, rcv_fail_part, CMG_NUMBER, COL_RCV_FAIL_PART); + pr_metric(a, n, m, rcv_fail_total, CMG_NUMBER, COL_RCV_FAIL_TOTAL); +} + +static struct cmg_pair_t *get_values(struct cmg_data_t *data, int groups) +{ + struct cmg_pair_t *array = NULL; + unsigned int num = 0; + + if (groups & KEY_GRP_CHARS) + pr_chars(&array, &num, data); + if (groups & KEY_GRP_UTIL) + pr_cue(&array, &num, data); + if (groups & KEY_GRP_METRICS) + pr_metrics(&array, &num, data); + + /* Add terminating null-element. */ + util_expand_array(&array, &num); + array[num - 1].key = NULL; + + return array; +} + +/* Initialize metrics in @m. */ +static void init_metrics(struct metrics3_t *m) +{ + m->interval = METRICS_INIT; + m->write_total = METRICS_INIT; + m->write_part = METRICS_INIT; + m->msg_rate_total = METRICS_INIT; + m->msg_rate_part = METRICS_INIT; + m->msg_size_total = METRICS_INIT; + m->msg_size_part = METRICS_INIT; + m->send_fail_part = METRICS_INIT; + m->rcv_fail_total = METRICS_INIT; + m->rcv_fail_part = METRICS_INIT; +} + +/* Calculate metrics base on CMG 3 CUEs. */ +static void calc_metrics(struct cmg_data_t *data, double seconds) +{ + u32 data_size, data_size_cpc, msg_size, msg_size_cpc; + struct metrics3_t *m = get_metrics(data); + struct cmcb3_t *cmcb = get_cmcb(data); + struct cue3_t *a = get_cue(data, util_a); + struct cue3_t *b = get_cue(data, util_b); + double delta, delta2; + + /* When not valid (reported as 0) some CMCB fields have default values + * or can be derived from related fields. */ + data_size = cmcb->data_unit_size ? : 1; + data_size_cpc = cmcb->data_unit_size_cpc ? : data_size; + msg_size = cmcb->msg_unit_size ? : 1; + msg_size_cpc = cmcb->msg_unit_size_cpc ? : msg_size; + + /* + * Amount of bytes per second sent in messages by all partitions + * + * write_total = data_units_sent_cpc * data_unit_size_cpc / seconds + */ + if (cue_valid2(a, b, data_units_sent_cpc)) { + delta = field_delta(data_units_sent_cpc, a, b); + m->write_total = delta * data_size_cpc / seconds; + } + + /* + * Amount of bytes per second sent in messages by this partition + * + * write_part = data_units_sent * data_unit_size / seconds + */ + if (cue_valid2(a, b, data_units_sent)) { + delta = field_delta(data_units_sent, a, b); + m->write_part = delta * data_size / seconds; + } + + /* + * Rate of messages sent per second by all partitions + * + * msg_rate_total = msg_units_sent_cpc * msg_unit_size_cpc / seconds + */ + if (cue_valid2(a, b, msg_units_sent_cpc)) { + delta = field_delta(msg_units_sent_cpc, a, b); + m->msg_rate_total = delta * msg_size_cpc / seconds; + } + + /* + * Rate of messages sent per second by this partition + * + * msg_rate_part = msg_units_sent * msg_unit_size / seconds + */ + if (cue_valid2(a, b, msg_units_sent)) { + delta = field_delta(msg_units_sent, a, b); + m->msg_rate_part = delta * msg_size / seconds; + } + + /* + * Average size of messages sent by all partitions + * + * msg_size_total = data_units_sent_cpc * data_unit_size_cpc / + * (msg_units_sent_cpc * msg_unit_size_cpc) + */ + if (cue_valid2(a, b, data_units_sent_cpc) && + cue_valid2(a, b, msg_units_sent_cpc)) { + delta = field_delta(data_units_sent_cpc, a, b); + delta2 = field_delta(msg_units_sent_cpc, a, b); + if (delta2 != 0.0) { + m->msg_size_total = delta * data_size_cpc / + (delta2 * msg_size_cpc); + } + } + + /* + * Average size of messages sent by this partition + * + * msg_size_part = data_units_sent * data_unit_size / + * (msg_units_sent * msg_unit_size) + */ + if (cue_valid2(a, b, data_units_sent) && + cue_valid2(a, b, msg_units_sent)) { + delta = field_delta(data_units_sent, a, b); + delta2 = field_delta(msg_units_sent, a, b); + if (delta2 != 0.0) { + m->msg_size_part = delta * data_size / + (delta2 * msg_size); + } + } + + /* + * Number of failed message send attempts per second by this partition + * + * send_fail_part = unsuccessful_attempts_to_send / seconds + */ + if (cue_valid2(a, b, unsuccessful_attempts_to_send)) { + delta = field_delta(unsuccessful_attempts_to_send, a, b); + m->send_fail_part = delta / seconds; + } + + /* + * Rate of messages per second that could not be received by all + * partitions due to unavailable receive buffers + * + * rcv_fail_total = unavailable_receive_buffers_cpc / seconds + */ + if (cue_valid2(a, b, unavailable_receive_buffers_cpc)) { + delta = field_delta(unavailable_receive_buffers_cpc, a, b); + m->rcv_fail_total = delta / seconds; + } + + /* + * Rate of messages per second that could not be received by this + * partition due to unavailable receive buffers + * + * rcv_fail_part = unavailable_receive_buffers / seconds + */ + if (cue_valid2(a, b, unavailable_receive_buffers)) { + delta = field_delta(unavailable_receive_buffers, a, b); + m->rcv_fail_part = delta / seconds; + } +} + +/* Recalculate metrics in @data. */ +static void update_metrics(struct cmg_data_t *data) +{ + struct metrics3_t *m = get_metrics(data); + struct cue3_t *a = get_cue(data, util_a); + struct cue3_t *b = get_cue(data, util_b); + u32 ticks; + + init_metrics(m); + + ticks = get_delta(a->timestamp, b->timestamp, CUE_TS_WIDTH); + if (ticks == 0) + return; + + /* interval = t2 - t1 */ + m->interval = tick_to_s(ticks); + + calc_metrics(data, m->interval); +} + +/* Object defining this CMG. */ +static struct cmg_t cmg3 = { + .cmg = 3, + .selected = false, + .found = 0, + .has_cmcb = true, + .default_column_ids = default_column_ids, + .get_values = &get_values, + .update_metrics = &update_metrics, +}; + +/* Add to CMG registry. */ +static void __attribute__((constructor)) cmg3_ctr(void) +{ + cmg_add(&cmg3); +} diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/cmg.c s390-tools-2.33.1/zconf/chp/chpstat/cmg.c --- s390-tools-2.31.0/zconf/chp/chpstat/cmg.c 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/cmg.c 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,101 @@ +/* + * Registry for CMG types + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include +#include +#include + +#include "cmg.h" +#include "misc.h" + +#include "lib/util_libc.h" + +static struct cmg_t **cmgs; +static unsigned int num_cmgs; + +/* Register new CMG-object @cmg. */ +void cmg_add(struct cmg_t *cmg) +{ + unsigned int i; + + for (i = 0; i < num_cmgs; i++) { + if (cmgs[i]->cmg == cmg->cmg) + errx(EXIT_RUNTIME, "Internal error: CMG %d registered " + "multiple times", cmg->cmg); + } + + util_expand_array(&cmgs, &num_cmgs); + cmgs[num_cmgs - 1] = cmg; +} + +void cmg_exit(void) +{ + free(cmgs); +} + +/* Return CMG-object for @cmg. */ +struct cmg_t *cmg_get(int cmg) +{ + unsigned int i; + + for (i = 0; i < num_cmgs; i++) { + if (cmg == cmgs[i]->cmg) + return cmgs[i]; + } + return NULL; +} + +struct cmg_t *_cmg_get_by_index(unsigned int i) +{ + return (i < num_cmgs) ? cmgs[i] : NULL; +} + +void cmg_free_keys(char **keys) +{ + int i; + + if (!keys) + return; + for (i = 0; keys[i]; i++) + free(keys[i]); + free(keys); +} + +void cmg_free_pairs(struct cmg_pair_t *pairs) +{ + int i; + + if (!pairs) + return; + for (i = 0; pairs[i].key; i++) + free(pairs[i].key); + free(pairs); +} + +char **cmg_get_keys(struct cmg_t *cmg, int groups) +{ + struct cmg_pair_t *pairs; + struct cmg_data_t data; + char **keys = NULL; + unsigned int i, num = 0; + + /* Get key-value pairs for dummy data. */ + memset(&data, 0, sizeof(data)); + pairs = cmg->get_values(&data, groups); + + /* Convert pair array to key array. */ + for (i = 0; pairs[i].key; i++) + util_add_array(&keys, &num, util_strdup(pairs[i].key)); + util_add_array(&keys, &num, NULL); + + cmg_free_pairs(pairs); + + return keys; +} diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/cmg.h s390-tools-2.33.1/zconf/chp/chpstat/cmg.h --- s390-tools-2.31.0/zconf/chp/chpstat/cmg.h 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/cmg.h 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,115 @@ +/* + * Registry for CMG types + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef CMG_H +#define CMG_H + +#include + +#include "lib/util_base.h" +#include "lib/util_list.h" +#include "column.h" + +#define CMCB_SIZE (5 * sizeof(u32)) +#define CUE_SIZE (8 * sizeof(u32)) +#define EXT_CUE_SIZE (16 * sizeof(u32)) +#define METRICS_SIZE (17 * sizeof(double)) + +/* Bit-width of CUE timestamp field. */ +#define CUE_TS_WIDTH 24 + +typedef union { + struct { + u8 cuiv; + u32 timestamp:24; + } common __packed; + u8 data[CUE_SIZE]; +} cue_t; +typedef u8 ext_cue_t[EXT_CUE_SIZE]; +typedef u8 cmcb_t[CMCB_SIZE]; +typedef u8 metrics_t[METRICS_SIZE]; + +struct util_t { + cue_t cue; + ext_cue_t ext_cue; + bool extended; +}; + +/* CMG-specific CHPID data. */ +struct cmg_data_t { + cmcb_t cmcb; + struct util_t util_a; + struct util_t util_b; + metrics_t metrics; +}; + +enum cmg_unit_t { + CMG_NUMBER, + CMG_PERCENT, + CMG_BPS, +}; + +enum cmg_value_type_t { + CMG_U32, + CMG_U64, + CMG_FLOAT, +}; + +struct cmg_pair_t { + /* Key. */ + char *key; + enum column_id_t col; + /* Value. */ + bool valid; + enum cmg_unit_t unit; + enum cmg_value_type_t type; + union { + u32 value_u32; + u64 value_u64; + double value_double; + }; +}; + +struct cmg_t { + int cmg; + + /* Flag indicating whether this CMG was selected on the command line. */ + bool selected; + + /* Counter indicating number of CHPIDs found with this CMG. */ + int found; + + /* Indicator whether this CMG requires a CMCB. */ + const bool has_cmcb; + + /* Array of default column IDs to be displayed for this CMG. */ + const enum column_id_t *default_column_ids; + + /* Return array of key-value pairs. */ + struct cmg_pair_t *(*get_values)(struct cmg_data_t *data, int groups); + + /* Update the metrics found in @data. */ + void (*update_metrics)(struct cmg_data_t *data); + +}; + +/* Iterate over all supported CMG data types. */ +#define cmg_for_each(c) \ + for (unsigned int __i = 0; ((c) = _cmg_get_by_index(__i)); __i++) + +void cmg_add(struct cmg_t *cmg); +struct cmg_t *cmg_get(int cmg); +struct cmg_t *_cmg_get_by_index(unsigned int i); + +void cmg_exit(void); +void cmg_free_keys(char **keys); +void cmg_free_pairs(struct cmg_pair_t *pairs); +char **cmg_get_keys(struct cmg_t *cmg, int groups); + +#endif /* CMG_H */ diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/cmg_helper.c s390-tools-2.33.1/zconf/chp/chpstat/cmg_helper.c --- s390-tools-2.31.0/zconf/chp/chpstat/cmg_helper.c 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/cmg_helper.c 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,109 @@ +/* + * Helper functions for implementing CMG types + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include +#include + +#include "cmg_helper.h" +#include "cmg.h" + +#include "lib/util_base.h" +#include "lib/util_libc.h" + +/* Convert time ticks to seconds. */ +double tick_to_s(u32 t) +{ + return CM_TICK * t; +} + +/* Get 32 bit delta of counters @curr and @last. Take into account that both + * counters might have wrapped. Counter width is @width bits. */ +u32 get_delta(u32 last, u32 curr, int width) +{ + if (curr >= last) + return curr - last; + + /* Counter wrapped - add [last..max] + [0..curr] */ + return (u32)((u64)(1ULL << width) - last + curr); +} + +/* Get 64 bit delta of counters @curr and @last. Take into account that both + * counters might have wrapped. Counter width is @width bits. */ +u64 get_delta64(u64 last, u64 curr, int width) +{ + u64 max; + + if (curr >= last) + return curr - last; + + /* Counter wrapped - add [last..max] + [0..curr] */ + max = (width == 64) ? ULLONG_MAX : (1ULL << width) - 1; + return (max - last + curr) + 1; +} + +/* Check whether word at @offset is valid according to @cuiv. */ +bool _cue_valid(u8 cuiv, int offset) +{ + int num; + u8 mask; + + num = offset / (int)sizeof(u32); + mask = 0x80 >> num; + + return (cuiv & mask) != 0; +} + +static struct cmg_pair_t *add_pair(struct cmg_pair_t **array, unsigned int *num, + bool valid, const char *key, + enum column_id_t col, enum cmg_unit_t unit, + enum cmg_value_type_t type) +{ + struct cmg_pair_t pair; + + memset(&pair, 0, sizeof(pair)); + pair.valid = valid; + pair.key = util_strdup(key); + pair.col = col; + pair.unit = unit; + pair.type = type; + util_add_array(array, num, pair); + + return &((*array)[*num - 1]); +} + +void _pr_u32(struct cmg_pair_t **array, unsigned int *num, bool valid, + const char *key, enum column_id_t col, enum cmg_unit_t unit, + u32 value) +{ + struct cmg_pair_t *pair; + + pair = add_pair(array, num, valid, key, col, unit, CMG_U32); + pair->value_u32 = value; +} + +void _pr_u64(struct cmg_pair_t **array, unsigned int *num, bool valid, + const char *key, enum column_id_t col, enum cmg_unit_t unit, + u64 value) +{ + struct cmg_pair_t *pair; + + pair = add_pair(array, num, valid, key, col, unit, CMG_U64); + pair->value_u64 = value; +} + +void _pr_double(struct cmg_pair_t **array, unsigned int *num, bool valid, + const char *key, enum column_id_t col, enum cmg_unit_t unit, + double value) +{ + struct cmg_pair_t *pair; + + pair = add_pair(array, num, valid, key, col, unit, CMG_FLOAT); + pair->value_double = value; +} diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/cmg_helper.h s390-tools-2.33.1/zconf/chp/chpstat/cmg_helper.h --- s390-tools-2.31.0/zconf/chp/chpstat/cmg_helper.h 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/cmg_helper.h 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,81 @@ +/* + * Helper functions for implementing CMG types + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef CMG_HELPER_H +#define CMG_HELPER_H + +#include + +#include "cmg.h" +#include "column.h" + +#include "lib/util_base.h" + +/* Duration of channel-path measurement timestamp tick. */ +#define CM_TICK ((double)0.000128) + +/* Initialization value for metrics fields. */ +#define METRICS_INIT -1.0 + +/* Check whether member @field of struct @s is valid according to s->cuiv. */ +#define cue_valid(s, field) \ + (_cue_valid((s)->cuiv, offsetof(__typeof__(*s), field))) + +/* Check whether member @field of struct @a and @b is valid according to + * a->cuiv and b->cuiv. */ +#define cue_valid2(a, b, field) \ + ((_cue_valid((a)->cuiv, offsetof(__typeof__(*a), field))) && \ + (_cue_valid((b)->cuiv, offsetof(__typeof__(*b), field)))) + +#define field_delta(field, a, b) \ + get_delta((a)->field, (b)->field, 32) + +#define field_delta64(field, a, b) \ + get_delta64((a)->field, (b)->field, 64) + +#define pr_u32(a, n, s, field) \ + _pr_u32((a), (n), true, STRINGIFY(field), COL_NONE, CMG_NUMBER, \ + (s)->field) + +#define pr_cond_u32(a, n, v, s, field) \ + _pr_u32((a), (n), (v), STRINGIFY(field), COL_NONE, CMG_NUMBER, \ + (s)->field) + +#define pr_cue_u32(a, n, s, field) \ + pr_cond_u32(a, n, cue_valid(s, field), s, field) + +#define pr_u64(a, n, s, field) \ + _pr_u64((a), (n), true, STRINGIFY(field), COL_NONE, CMG_NUMBER, \ + (s)->field) + +#define pr_cond_u64(a, n, v, s, field) \ + _pr_u64((a), (n), (v), STRINGIFY(field), COL_NONE, CMG_NUMBER, \ + (s)->field) + +/* pr_metric(array_ptr, num_ptr, struct, field, unit, column_id) */ +#define pr_metric(a, n, s, field, u, c) \ + _pr_double((a), (n), ((s)->field != METRICS_INIT), STRINGIFY(field), \ + c, u, (s)->field) + +double tick_to_s(u32 t); +u32 get_delta(u32 last, u32 curr, int width); +u64 get_delta64(u64 last, u64 curr, int width); +bool _cue_valid(u8 cuiv, int offset); + +void _pr_u32(struct cmg_pair_t **a, unsigned int *n, bool valid, + const char *key, enum column_id_t col, enum cmg_unit_t unit, + u32 value); +void _pr_u64(struct cmg_pair_t **a, unsigned int *n, bool valid, + const char *key, enum column_id_t col, enum cmg_unit_t unit, + u64 value); +void _pr_double(struct cmg_pair_t **a, unsigned int *n, bool valid, + const char *key, enum column_id_t col, enum cmg_unit_t unit, + double value); + +#endif /* CMG_HELPER_H */ diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/column.c s390-tools-2.33.1/zconf/chp/chpstat/column.c --- s390-tools-2.31.0/zconf/chp/chpstat/column.c 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/column.c 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,430 @@ +/* + * Registry for supported table columns + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include + +#include "column.h" + +#include "lib/util_base.h" +#include "lib/util_libc.h" + +/* + * Use group header for adjacent columns of the same type to improve + * readability. + * + * Single header: UTIL Group header: UTILIZATION + * PART PART TOTAL + */ +#define HDR1_CHP_SINGLE "" +#define HDR1_CHP_GROUP "CHANNEL-PATH" +#define HDR1_UTIL_SINGLE "UTIL" +#define HDR1_UTIL_GROUP "UTILIZATION(%)" +#define HDR1_READ_SINGLE "READ" +#define HDR1_READ_GROUP "READ" +#define HDR1_WRITE_SINGLE "WRITE" +#define HDR1_WRITE_GROUP "WRITE" +#define HDR1_FICON_SINGLE "FICON" +#define HDR1_FICON_GROUP "FICON-OPS" +#define HDR1_HPF_SINGLE "HPF" +#define HDR1_HPF_GROUP "HPF-OPS" +#define HDR1_MSGR_SINGLE "RATE" +#define HDR1_MSGR_GROUP "MSG-RATE" +#define HDR1_MSGSZ_SINGLE "SIZE" +#define HDR1_MSGSZ_GROUP "MSG-SIZE" +#define HDR1_MSG_RCVF_SINGLE "RFAIL" +#define HDR1_MSG_RCVF_GROUP "RECEIVE-FAIL" + +static struct column_t columns[] = { + { + COL_CHPID, + "chpid", + COL_OTHER, + "Channel-path ID", + "ID", + HDR1_CHP_SINGLE, + HDR1_CHP_GROUP, + 0, + }, + { + COL_TYPE, + "type", + COL_OTHER, + "Channel-path type", + "TYP", + HDR1_CHP_SINGLE, + HDR1_CHP_GROUP, + 0, + }, + { + COL_CMG, + "cmg", + COL_OTHER, + "Channel-measurement group", + "CMG", + HDR1_CHP_SINGLE, + HDR1_CHP_GROUP, + 0, + }, + { + COL_SPEED, + "speed", + COL_OTHER, + "Operational speed", + "SPEED", + HDR1_CHP_SINGLE, + HDR1_CHP_GROUP, + 0, + }, + { + COL_SHARED, + "shared", + COL_OTHER, + "Shared channel-path indicator", + "SHR", + HDR1_CHP_SINGLE, + HDR1_CHP_GROUP, + 0, + }, + { + COL_INTERVAL, + "interval", + COL_OTHER, + "Cumulated statistics update interval", + "INT", + HDR1_CHP_SINGLE, + HDR1_CHP_GROUP, + 0, + }, + { + COL_UTIL_PART, + "util_part", + COL_PERCENT, + "Partition channel-path utilization in %", + "PART", + HDR1_UTIL_SINGLE, + HDR1_UTIL_GROUP, + 0, + }, + { + COL_UTIL_TOTAL, + "util_total", + COL_PERCENT, + "Total channel-path utilization in %", + "TOTAL", + HDR1_UTIL_SINGLE, + HDR1_UTIL_GROUP, + 0, + }, + { + COL_UTIL_BUS, + "util_bus", + COL_PERCENT, + "Bus utilization in %", + "BUS", + HDR1_UTIL_SINGLE, + HDR1_UTIL_GROUP, + 0, + }, + { + COL_READ_PART, + "read_part", + COL_BPS, + "Partition read throughput in B/s", + "PART", + HDR1_READ_SINGLE, + HDR1_READ_GROUP, + 0, + }, + { + COL_READ_TOTAL, + "read_total", + COL_BPS, + "Total read throughput in B/s", + "TOTAL", + HDR1_READ_SINGLE, + HDR1_READ_GROUP, + 0, + }, + { + COL_WRITE_PART, + "write_part", + COL_BPS, + "Partition write throughput in B/s", + "PART", + HDR1_WRITE_SINGLE, + HDR1_WRITE_GROUP, + 0, + }, + { + COL_WRITE_TOTAL, + "write_total", + COL_BPS, + "Total write throughput in B/s", + "TOTAL", + HDR1_WRITE_SINGLE, + HDR1_WRITE_GROUP, + 0, + }, + { + COL_FICON_RATE, + "ficon_rate", + COL_NUMBER, + "FICON operations per second", + "RATE", + HDR1_FICON_SINGLE, + HDR1_FICON_GROUP, + 0, + }, + { + COL_FICON_ACTIVE, + "ficon_active", + COL_NUMBER, + "Avg. concurrently active FICON operations", + "ACTV", + HDR1_FICON_SINGLE, + HDR1_FICON_GROUP, + 0, + }, + { + COL_FICON_DEFER, + "ficon_defer", + COL_NUMBER, + "Deferred FICON operations per second", + "DEFER", + HDR1_FICON_SINGLE, + HDR1_FICON_GROUP, + 0, + }, + { + COL_HPF_RATE, + "hpf_rate", + COL_NUMBER, + "HPF operations per second", + "RATE", + HDR1_HPF_SINGLE, + HDR1_HPF_GROUP, + 0, + }, + { + COL_HPF_ACTIVE, + "hpf_active", + COL_NUMBER, + "Avg. concurrently active HPF operations", + "ACTV", + HDR1_HPF_SINGLE, + HDR1_HPF_GROUP, + 0, + }, + { + COL_HPF_DEFER, + "hpf_defer", + COL_NUMBER, + "Deferred HPF operations per second", + "DEFER", + HDR1_HPF_SINGLE, + HDR1_HPF_GROUP, + 0, + }, + { + COL_MSG_RATE_PART, + "msg_rate_part", + COL_NUMBER, + "Partition message send rate per second", + "PART", + HDR1_MSGR_SINGLE, + HDR1_MSGR_GROUP, + 0, + }, + { + COL_MSG_RATE_TOTAL, + "msg_rate_total", + COL_NUMBER, + "Total message send rate per second", + "TOTAL", + HDR1_MSGR_SINGLE, + HDR1_MSGR_GROUP, + 0, + }, + { + COL_MSG_SIZE_PART, + "msg_size_part", + COL_NUMBER, + "Partition avg. send message size", + "PART", + HDR1_MSGSZ_SINGLE, + HDR1_MSGSZ_GROUP, + 0, + }, + { + COL_MSG_SIZE_TOTAL, + "msg_size_total", + COL_NUMBER, + "Total avg. send message size", + "TOTAL", + HDR1_MSGSZ_SINGLE, + HDR1_MSGSZ_GROUP, + 0, + }, + { + COL_SEND_FAIL_PART, + "send_fail_part", + COL_NUMBER, + "Partition message send fail rate per second", + "PART", + "SNDFAIL", + "SNDFAIL", + 0, + }, + { + COL_RCV_FAIL_PART, + "rcv_fail_part", + COL_NUMBER, + "Partition message receive fail rate per second", + "PART", + HDR1_MSG_RCVF_SINGLE, + HDR1_MSG_RCVF_GROUP, + 0, + }, + { + COL_RCV_FAIL_TOTAL, + "rcv_fail_total", + COL_NUMBER, + "Total message receive fail rate per second", + "TOTAL", + HDR1_MSG_RCVF_SINGLE, + HDR1_MSG_RCVF_GROUP, + 0, + }, +}; + +/* Columns selected by default. */ +static const int default_columns[] = { + COL_CHPID, + COL_TYPE, + COL_CMG, + COL_SHARED, + COL_SPEED, + COL_UTIL_PART, + COL_UTIL_TOTAL, + COL_UTIL_BUS, + COL_READ_PART, + COL_READ_TOTAL, + COL_WRITE_PART, + COL_WRITE_TOTAL, + /* End of list. */ + COL_END +}; + +static struct column_t **selected_cols; +static unsigned int num_selected_cols; +static bool hdr_updated; + +struct column_t *column_get_by_index(unsigned int i, bool selected) +{ + if (selected) + return i < num_selected_cols ? selected_cols[i] : NULL; + return i < ARRAY_SIZE(columns) ? &columns[i] : NULL; +} + +/* + * Retrieve column object by @name or %NULL if column does not exist. + */ +struct column_t *column_get_by_name(const char *name) +{ + struct column_t *col; + + column_for_each(col) { + if (strcasecmp(col->name, name) == 0) + return col; + } + + return NULL; +} + +/* + * Add the specified column to the list of selected columns. Discard duplicate + * selections of the same column. + */ +void column_select(struct column_t *col) +{ + struct column_t *c; + + /* Silently filter out double selection of columns to prevent problems + * with non-unique util_rec name fields. */ + column_for_each_selected(c) { + if (c == col) + return; + } + + util_add_array(&selected_cols, &num_selected_cols, col); +} + +void column_select_id_list(const int *ids) +{ + struct column_t *col; + int i; + + for (i = 0; ids[i] != COL_END; i++) { + column_for_each(col) { + if (col->id == ids[i]) { + column_select(col); + break; + } + } + } +} + +void column_select_default(void) +{ + column_select_id_list(default_columns); +} + +void column_select_all(void) +{ + struct column_t *col; + + column_for_each(col) + column_select(col); +} + +void column_update_bps_suffix(bool auto_scale, char suffix_char) +{ + struct column_t *col; + char *str; + + if (auto_scale) + util_asprintf(&str, "(B/s)"); + else if (suffix_char) + util_asprintf(&str, "(%ciB/s)", suffix_char); + else + str = util_strdup("(*)"); + + column_for_each(col) { + if (col->unit != COL_BPS) { + col->hdr1_group = util_strdup(col->hdr1_group); + continue; + } + util_asprintf(&col->hdr1_group, "%s%s", col->hdr1_group, str); + } + free(str); + + hdr_updated = true; +} + +void column_exit(void) +{ + struct column_t *col; + + if (hdr_updated) { + column_for_each(col) + free(col->hdr1_group); + } + free(selected_cols); +} diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/column.h s390-tools-2.33.1/zconf/chp/chpstat/column.h --- s390-tools-2.31.0/zconf/chp/chpstat/column.h 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/column.h 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,84 @@ +/* + * Registry for supported table columns + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef COLUMN_H +#define COLUMN_H + +#include + +enum column_id_t { + COL_NONE = -1, + COL_CHPID, + COL_TYPE, + COL_CMG, + COL_SHARED, + COL_SPEED, + COL_INTERVAL, + COL_TIMESTAMP, + COL_UTIL_PART, + COL_UTIL_TOTAL, + COL_UTIL_BUS, + COL_READ_PART, + COL_READ_TOTAL, + COL_WRITE_PART, + COL_WRITE_TOTAL, + COL_FICON_RATE, + COL_FICON_ACTIVE, + COL_FICON_DEFER, + COL_HPF_RATE, + COL_HPF_ACTIVE, + COL_HPF_DEFER, + COL_MSG_RATE_PART, + COL_MSG_RATE_TOTAL, + COL_MSG_SIZE_PART, + COL_MSG_SIZE_TOTAL, + COL_SEND_FAIL_PART, + COL_RCV_FAIL_PART, + COL_RCV_FAIL_TOTAL, + /* Special value indicating no column. */ + COL_END +}; + +enum col_unit_t { + COL_OTHER, + COL_NUMBER, + COL_PERCENT, + COL_BPS, +}; + +struct column_t { + enum column_id_t id; + const char *name; + enum col_unit_t unit; + const char *desc; + const char *hdr2; + const char *hdr1_single; + char *hdr1_group; + unsigned int width; +}; + +#define column_for_each(c) \ + for (unsigned int __i = 0; ((c) = column_get_by_index(__i, false)); \ + __i++) +#define column_for_each_selected(c) \ + for (unsigned int __i = 0; ((c) = column_get_by_index(__i, true)); \ + __i++) + +struct column_t *column_get_by_index(unsigned int i, bool selected); +struct column_t *column_get_by_name(const char *name); + +void column_select(struct column_t *col); +void column_select_id_list(const int *ids); +void column_select_default(void); +void column_select_all(void); + +void column_update_bps_suffix(bool auto_scale, char suffix_char); +void column_exit(void); + +#endif /* COLUMN_H */ diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/key.c s390-tools-2.33.1/zconf/chp/chpstat/key.c --- s390-tools-2.31.0/zconf/chp/chpstat/key.c 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/key.c 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,227 @@ +/* + * Registry for supported data keys + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include "key.h" + +#include +#include +#include + +#include "cmg.h" +#include "lib/util_libc.h" +#include "lib/zt_common.h" + +static struct key_t *keys; +static unsigned int num_keys; + +static struct key_t **selected_keys; +static unsigned int num_selected_keys; + +#define GET_CMG_MASK(c) ((u32)(1 << (c))) +#define CMG_ALL_MASK ((u32)0xffffffff) +#define IS_CMG_MASK(c, n) ((c) & GET_CMG_MASK(n)) + +static char *mask_to_cmg_str(u32 mask) +{ + char *str; + int i; + + if (mask == CMG_ALL_MASK) + return util_strdup("all"); + str = util_strdup(""); + for (i = 1; i <= 32; i++) { + if (!IS_CMG_MASK(mask, i)) + continue; + if (*str) + util_concatf(&str, ","); + util_concatf(&str, "%d", i); + } + + return str; +} + +const char *key_group_to_str(enum key_group_t group) +{ + switch (group) { + case KEY_GRP_META: + return "meta"; + case KEY_GRP_ITERATION: + return "iteration"; + case KEY_GRP_CHP: + return "channel_path"; + case KEY_GRP_CHARS: + return "characteristics"; + case KEY_GRP_UTIL: + return "utilization"; + case KEY_GRP_METRICS: + return "metrics"; + default: + return ""; + } +} + +static void add_key(const char *name, enum key_group_t group, bool found, + u32 cmg_mask) +{ + struct key_t *key; + + key = key_get_by_name(name); + if (!key) { + util_expand_array(&keys, &num_keys); + key = &keys[num_keys - 1]; + key->name = util_strdup(name); + key->group = group; + } + key->found |= found; + key->cmg_mask |= cmg_mask; + free(key->cmg_str); + key->cmg_str = mask_to_cmg_str(key->cmg_mask); +} + +static void add_generic_keys(void) +{ + add_key(KEY_META_API_LEVEL, KEY_GRP_META, true, CMG_ALL_MASK); + add_key(KEY_META_VERSION, KEY_GRP_META, true, CMG_ALL_MASK); + add_key(KEY_META_HOST, KEY_GRP_META, true, CMG_ALL_MASK); + add_key(KEY_META_TIME, KEY_GRP_META, true, CMG_ALL_MASK); + add_key(KEY_META_TIME_EPOCH, KEY_GRP_META, true, CMG_ALL_MASK); + add_key(KEY_ITERATION, KEY_GRP_ITERATION, true, CMG_ALL_MASK); + add_key(KEY_TIME, KEY_GRP_ITERATION, true, CMG_ALL_MASK); + add_key(KEY_TIME_EPOCH, KEY_GRP_ITERATION, true, CMG_ALL_MASK); + add_key(KEY_CHPID, KEY_GRP_CHP, true, CMG_ALL_MASK); + add_key(KEY_TYPE, KEY_GRP_CHP, true, CMG_ALL_MASK); + add_key(KEY_CMG, KEY_GRP_CHP, true, CMG_ALL_MASK); + add_key(KEY_SHARED, KEY_GRP_CHP, true, CMG_ALL_MASK); + add_key(KEY_SPEED, KEY_GRP_CHP, true, CMG_ALL_MASK); +} + +static void add_cmg_keys(bool all) +{ + enum key_group_t groups[] = { KEY_GRP_UTIL, KEY_GRP_CHARS, + KEY_GRP_METRICS }; + unsigned int i, j; + struct cmg_t *cmg; + char **cmg_keys; + + cmg_for_each(cmg) { + for (i = 0; i < ARRAY_SIZE(groups); i++) { + cmg_keys = cmg_get_keys(cmg, groups[i]); + for (j = 0; cmg_keys[j]; j++) { + add_key(cmg_keys[j], groups[i], + cmg->found || all, + GET_CMG_MASK(cmg->cmg)); + } + cmg_free_keys(cmg_keys); + } + } +} + +void key_init(bool all) +{ + add_generic_keys(); + add_cmg_keys(all); +} + +void key_exit(void) +{ + unsigned int i; + + for (i = 0; i < num_keys; i++) { + free(keys[i].name); + free(keys[i].cmg_str); + } + free(keys); + free(selected_keys); +} + +static int cmp_keys(const void *a, const void *b) +{ + const struct key_t * const *a_key = a; + const struct key_t * const *b_key = b; + + return strcmp((*a_key)->name, (*b_key)->name); +} + +void key_sort_selected(void) +{ + qsort(selected_keys, num_selected_keys, sizeof(struct key_t *), + cmp_keys); +} + +struct key_t *key_get_by_index(unsigned int i, bool selected) +{ + if (selected) + return i < num_selected_keys ? selected_keys[i] : NULL; + return i < num_keys ? &keys[i] : NULL; +} + +struct key_t *key_get_by_name(const char *name) +{ + struct key_t *key; + + key_for_each(key) { + if (strcmp(key->name, name) == 0) + return key; + } + return NULL; +} + +void key_select(struct key_t *key) +{ + struct key_t *k; + + /* Prevent duplicates. */ + key_for_each_selected(k) { + if (k == key) + return; + } + util_add_array(&selected_keys, &num_selected_keys, key); +} + +void key_select_by_groups(int groups, bool found) +{ + struct key_t *key; + + key_for_each(key) { + if (found && !key->found) + continue; + if (!(groups & (int)key->group)) + continue; + key_select(key); + } +} + +void key_select_by_cmg(int cmg) +{ + struct key_t *key; + + key_for_each(key) { + if (IS_CMG_MASK(key->cmg_mask, cmg)) + key_select(key); + } +} + +void key_select_all(void) +{ + struct key_t *key; + + key_for_each(key) + key_select(key); +} + +int key_get_selected_groups(void) +{ + struct key_t *key; + int groups = 0; + + key_for_each_selected(key) + groups |= (int)key->group; + + return groups; +} diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/key.h s390-tools-2.33.1/zconf/chp/chpstat/key.h --- s390-tools-2.31.0/zconf/chp/chpstat/key.h 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/key.h 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,89 @@ +/* + * Registry for supported data keys + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef KEY_H +#define KEY_H + +#include + +#include "lib/zt_common.h" + +#define KEY_META_API_LEVEL "meta.api_level" +#define KEY_META_VERSION "meta.version" +#define KEY_META_HOST "meta.host" +#define KEY_META_TIME "meta.time" +#define KEY_META_TIME_EPOCH "meta.time_epoch" +#define KEY_ITERATION "iteration" +#define KEY_TIME "time" +#define KEY_TIME_EPOCH "time_epoch" +#define KEY_CHPID "chpid" +#define KEY_TYPE "type" +#define KEY_CMG "cmg" +#define KEY_SHARED "shared" +#define KEY_SPEED "speed" + +/** + * enum util_fmt_t - Key group identifiers. + * @KEY_GRP_META - Tool-related meta data + * @KEY_GRP_ITERATION - Iteration-related data + * @KEY_GRP_CHP - Channel-path related data + * @KEY_GRP_CHARS - Group for channel-patch measurement characteristics data + * @KEY_GRP_UTIL - Group for unprocessed utilization data + * @KEY_GRP_METRICS - Group for performance metrics + */ +enum key_group_t { + KEY_GRP_META = (1 << 0), + KEY_GRP_ITERATION = (1 << 1), + KEY_GRP_CHP = (1 << 2), + KEY_GRP_CHARS = (1 << 3), + KEY_GRP_UTIL = (1 << 4), + KEY_GRP_METRICS = (1 << 5), +}; + +#define KEY_GRP_ALL (KEY_GRP_META | KEY_GRP_ITERATION | KEY_GRP_CHP | \ + KEY_GRP_CHARS | KEY_GRP_UTIL | KEY_GRP_METRICS) + +/** + * struct key_t - A single data key + * @name: Key name + * @cmg_mask: List (bitmask) of CMGs for which this key is defined + * @cmg_str: List (text) of CMGs for which this key is defined + * @group: Key group this key belongs to + * @found: Flag indicating whether key is provided by CMGS of selected CHPIDs + */ +struct key_t { + char *name; + u32 cmg_mask; + char *cmg_str; + enum key_group_t group; + bool found; +}; + +#define key_for_each(c) \ + for (unsigned int __i = 0; ((c) = key_get_by_index(__i, false)); \ + __i++) +#define key_for_each_selected(c) \ + for (unsigned int __i = 0; ((c) = key_get_by_index(__i, true)); \ + __i++) + +const char *key_group_to_str(enum key_group_t group); +struct key_t *key_get_by_index(unsigned int i, bool selected); +struct key_t *key_get_by_name(const char *name); +void key_select(struct key_t *key); +void key_select_by_groups(int groups, bool found); +void key_select_by_cmg(int cmg); +void key_select_all(void); +void key_sort_selected(void); +int key_get_selected_groups(void); + +void key_init(bool all); +void key_exit(void); +struct key_t *_get_key(int i); + +#endif /* KEY_H */ diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/Makefile s390-tools-2.33.1/zconf/chp/chpstat/Makefile --- s390-tools-2.31.0/zconf/chp/chpstat/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,19 @@ +include ../../../common.mak + +all: chpstat + +libs = $(rootdir)/libutil/libutil.a + +chpstat: chpstat.o $(libs) column.o key.o cmg.o cmg_helper.o \ + cmg1.o cmg2.o cmg3.o + +install: all + $(INSTALL) -d -m 755 $(DESTDIR)$(BINDIR) + $(INSTALL) -g $(GROUP) -o $(OWNER) -m 755 chpstat $(DESTDIR)$(BINDIR) + $(INSTALL) -d -m 755 $(DESTDIR)$(MANDIR)/man8 + $(INSTALL) -m 644 -c chpstat.8 $(DESTDIR)$(MANDIR)/man8 + +clean: + rm -f *.o chpstat + +.PHONY: all install clean diff -Nru s390-tools-2.31.0/zconf/chp/chpstat/misc.h s390-tools-2.33.1/zconf/chp/chpstat/misc.h --- s390-tools-2.31.0/zconf/chp/chpstat/misc.h 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/chpstat/misc.h 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,20 @@ +/* + * Miscellaneous definitions + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef MISC_H +#define MISC_H + +/* Program exit codes. */ +enum exit_code_t { + EXIT_OK = 0, /* Program finished successfully */ + EXIT_USAGE = 1, /* Usage error */ + EXIT_RUNTIME = 2, /* Run-time error */ +}; + +#endif /* MISC_H */ diff -Nru s390-tools-2.31.0/zconf/chp/lschp.8 s390-tools-2.33.1/zconf/chp/lschp.8 --- s390-tools-2.31.0/zconf/chp/lschp.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/lschp.8 2024-05-28 08:26:36.000000000 +0200 @@ -108,5 +108,10 @@ Print version number, then exit. .RE + .SH SEE ALSO .BR chchp (8) + +More information on the \fBChannel\-path type identifier\fR can be found at +.br +\fBhttps://www.ibm.com/docs/en/zos/3.1.0?topic=iee196i\-iee174i\-form\-2\-37\fR diff -Nru s390-tools-2.31.0/zconf/chp/Makefile s390-tools-2.33.1/zconf/chp/Makefile --- s390-tools-2.31.0/zconf/chp/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zconf/chp/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -1,6 +1,7 @@ include ../../common.mak -all: chchp lschp +all: chchp lschp chpstat + $(MAKE) -C chpstat all libs = $(rootdir)/libutil/libutil.a @@ -14,8 +15,10 @@ $(INSTALL) -d -m 755 $(DESTDIR)$(MANDIR)/man8 $(INSTALL) -m 644 -c chchp.8 $(DESTDIR)$(MANDIR)/man8 $(INSTALL) -m 644 -c lschp.8 $(DESTDIR)$(MANDIR)/man8 + $(MAKE) -C chpstat install clean: rm -f *.o chchp lschp + $(MAKE) -C chpstat clean .PHONY: all install clean diff -Nru s390-tools-2.31.0/zconf/dasdstat.8 s390-tools-2.33.1/zconf/dasdstat.8 --- s390-tools-2.31.0/zconf/dasdstat.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zconf/dasdstat.8 2024-05-28 08:26:36.000000000 +0200 @@ -2,7 +2,7 @@ .\" s390-tools is free software; you can redistribute it and/or modify .\" it under the terms of the MIT license. See LICENSE for details. .\" -.TH LSDASD 8 "Feb 2011" "s390-tools" +.TH DASDSTAT 8 "Feb 2011" "s390-tools" .SH NAME dasdstat \- read or modify the statistics of the DASD device driver. @@ -10,18 +10,18 @@ .SH SYNOPSIS .TP 8 .B dasdstat -.RB [ -h ] +.RB [ \-h ] .TP 8 .B dasdstat -.RB [ -e ] -.RB [ -d ] -.RB [ -r ] -.RB [ -i ] -.RB [ -l ] -.RB [ -c ] -.RB [ -w ] -.RB [ -V ] -.RB [ -v ] +.RB [ \-e ] +.RB [ \-d ] +.RB [ \-r ] +.RB [ \-i ] +.RB [ \-l ] +.RB [ \-c ] +.RB [ \-w ] +.RB [ \-V ] +.RB [ \-v ] .RI [ " [" "] ...]]" .SH DESCRIPTION @@ -41,35 +41,35 @@ .SH OPTIONS .TP 8 -.BR -h | --help +.BR \-h | \-\-help Print help text. .TP -.BR -e | --enable +.BR \-e | \-\-enable Enable the statistics. .TP -.BR -d | --disable +.BR \-d | \-\-disable Disable the statistics. .TP -.BR -r | --reset +.BR \-r | \-\-reset Reset the statistics. .TP -.BR -i | --directory +.BR \-i | \-\-directory Specify the directory in which the statistics can be found. .TP -.BR -l | --long +.BR \-l | \-\-long Print more detailed information, e.g differentiate between read and write requests. .TP -.BR -c | --columns " \fI\fR" +.BR \-c | \-\-columns " \fI\fR" Format the output in a table with the given number of columns. .TP -.BR -w | --column-width " \fI\fR" +.BR \-w | \-\-column\-width " \fI\fR" Set the minimum width of the columns in the output table. .TP -.BR -V | --verbose +.BR \-V | \-\-verbose Print more verbose information. .TP -.BR -v | --version +.BR \-v | \-\-version Print the version of the s390-tools package and the command. .TP \fB\fR = @@ -81,12 +81,12 @@ Print a statistics table for each enabled statistic. .RE -\fBdasdstat -e\fR +\fBdasdstat \-e\fR .RS Enable all DASD statistics. .RE -\fBdasdstat -l dasda 0.0.1800 0.0.18fe 0.0.18ff\fR +\fBdasdstat \-l dasda 0.0.1800 0.0.18fe 0.0.18ff\fR .RS Print a detailed statistics table for DASD block device dasda and CCW devices 0.0.1800, 0.0.18fe and 0.0.18ff. A typical scenario for this diff -Nru s390-tools-2.31.0/zconf/lsdasd.8 s390-tools-2.33.1/zconf/lsdasd.8 --- s390-tools-2.31.0/zconf/lsdasd.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zconf/lsdasd.8 2024-05-28 08:26:36.000000000 +0200 @@ -10,56 +10,56 @@ .SH SYNOPSIS .TP 8 .B lsdasd -.RB [ -h ] +.RB [ \-h ] .TP 8 .B lsdasd -.RB [ -a ] -.RB [ -b ] -.RB [ -s ] -.RB [ -v ] -.RB [ -l ] -.RB [ -c ] -.RB [ -u ] -.RB [ --version ] -.RI [ " [" "] ...]]" +.RB [ \-a ] +.RB [ \-b ] +.RB [ \-s ] +.RB [ \-v ] +.RB [ \-l ] +.RB [ \-c ] +.RB [ \-u ] +.RB [ \-\-version ] +.RI [ " [" "] ...]]" .SH DESCRIPTION The lsdasd command provides an overview of available DASD devices. .SH OPTIONS .TP 8 -.BR -h | --help +.BR \-h | \-\-help Print help text. .TP -.BR -s | --short +.BR \-s | \-\-short Suppresses leading "0.0." for bus IDs. .TP -.BR -a | --offline +.BR \-a | \-\-offline Include all (offline) devices. .TP -.BR -b | --base +.BR \-b | \-\-base Include only base devices. .TP -.BR -c | --compat +.BR \-c | \-\-compat Old output of lsdasd for compatibility. .TP -.BR -l | --long +.BR \-l | \-\-long Extended output of lsdasd including UID and attributes. .TP -.BR -u | --uid +.BR \-u | \-\-uid Output includes and is sorted by UID. .TP -.BR -H | --host-acces +.BR \-H | \-\-host\-acces Show information about all hosts using this device. .TP -.BR -v | --verbose +.BR \-v | \-\-verbose Only for compatibility (and maybe future) use. This option currently does nothing. .TP -\fB--version\fR +\fB\-\-version\fR Print the version of the s390-tools package and the command. .TP -\fB\fR = +\fB\fR = Bus ID of the device(s) that should be displayed. .SH EXAMPLES diff -Nru s390-tools-2.31.0/zconf/zcrypt/chzcrypt.8 s390-tools-2.33.1/zconf/zcrypt/chzcrypt.8 --- s390-tools-2.31.0/zconf/zcrypt/chzcrypt.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zconf/zcrypt/chzcrypt.8 2024-05-28 08:26:36.000000000 +0200 @@ -16,39 +16,39 @@ .SH SYNOPSIS .TP 9 .B chzcrypt -.B -e +.B \-e .RB "|" -.B -d -.RB "( " -a " | " +.B \-d +.RB "( " \-a " | " .I [...] ) .TP .B chzcrypt -.B --config-on +.B \-\-config\-on .RB "|" -.B --config-off -.RB "( " -a " | " +.B \-\-config\-off +.RB "( " \-a " | " .I [...] ) .TP .B chzcrypt -.RB "[ " -p " | " -n " ] [ " -t +.RB "[ " \-p " | " \-n " ] [ " \-t .I ] .TP .B chzcrypt -.RB "[ " -c +.RB "[ " \-c .I ] .TP .B chzcrypt -.RB "[ " -q +.RB "[ " \-q .I ] .TP -.B chzcrypt -h +.B chzcrypt \-h .TP -.B chzcrypt -v +.B chzcrypt \-v .SH DESCRIPTION The .B chzcrypt @@ -61,62 +61,62 @@ requires that the sysfs filesystem is mounted. .SH OPTIONS .TP 8 -.B -e, --enable +.B \-e, \-\-enable Set the given cryptographic device(s) online. .TP 8 -.B -d, --disable +.B \-d, \-\-disable Set the given cryptographic device(s) offline. .TP 8 -.B -a, --all +.B \-a, \-\-all Set all available cryptographic device(s) online or offline. .TP 8 -.B --config-on +.B \-\-config\-on Set the given cryptographic card device(s) config on ('configured'). .TP 8 -.B --config-off +.B \-\-config\-off Set the given cryptographic card device(s) config off ('deconfigured'). .TP 8 -.B -p, --poll-thread-enable +.B \-p, \-\-poll\-thread\-enable Enable zcrypt's poll thread. .TP 8 -.B -n, --poll-thread-disable +.B \-n, \-\-poll\-thread\-disable Disable zcrypt's poll thread. .TP 8 -.BI "-c, --config-time" " " +.BI "\-c, \-\-config\-time" " " Set configuration timer for re-scanning the AP bus to .I seconds. .TP 8 -.B --se-associate +.B \-\-se\-associate Associate the given queue device with the given association index. This command is only valid within an Secure Execution guest with AP pass-through support enabled. .TP 8 -.B --se-bind +.B \-\-se\-bind Bind the given queue device. This command is only valid within an Secure Execution guest with AP pass-through support enabled. .TP 8 -.B --se-unbind +.B \-\-se\-unbind Unbind the given queue device. This command is only valid within an Secure Execution guest with AP pass-through support enabled. .TP 8 -.BI "-t, --poll-timeout" " " +.BI "\-t, \-\-poll\-timeout" " " Set poll timer to run poll tasklet all .I nanoseconds. .TP 8 -.BI "-q, --default-domain" " " +.BI "\-q, \-\-default\-domain" " " Set the new default domain of the AP bus to . The number of -available domains can be retrieved with the lszcrypt command ('-d' +available domains can be retrieved with the lszcrypt command ('\-d' option). .TP 8 -.B -V, --verbose +.B \-V, \-\-verbose Print verbose messages. .TP 8 -.B -h, --help +.B \-h, \-\-help Print help text and exit. .TP 8 -.B -v, --version +.B \-v, \-\-version Print version information and exit. .TP 8 .B @@ -133,30 +133,30 @@ both in hexadecimal notation. .TP 8 .B -An APQN queue device given as xy.abcd as it is listed by lszcrypt -V. +An APQN queue device given as xy.abcd as it is listed by lszcrypt \-V. .SH EXAMPLES .TP -.B chzcrypt -e 0 1 12 +.B chzcrypt \-e 0 1 12 Will set the cryptographic card devices 0, 1 and 12 online. .TP -.B chzcrypt -e 10.0038 +.B chzcrypt \-e 10.0038 Will set the cryptographic device '10.0038' respectively card id 16 (0x10) with domain 56 (0x38) online. .TP -.B chzcrypt -d -a +.B chzcrypt \-d \-a Will set all available cryptographic devices offline. .TP -.B chzcrypt --config-on -a -V +.B chzcrypt \-\-config\-on \-a \-V Set all available crypto cards to config on, be verbose. .TP -.B chzcrypt -V --config-off card01 card03 +.B chzcrypt \-V \-\-config\-off card01 card03 Switch the two crypto cards 1 and 3 to deconfigured, be verbose. .TP -.B chzcrypt -c 60 -n +.B chzcrypt \-c 60 \-n Will set configuration timer for re-scanning the AP bus to 60 seconds and disable zcrypt's poll thread. .TP -.B chzcrypt -q 67 +.B chzcrypt \-q 67 Will set the default domain to 67. .SH NOTES Support for crypto cards to get switched config on or off requires a diff -Nru s390-tools-2.31.0/zconf/zcrypt/lszcrypt.8 s390-tools-2.33.1/zconf/zcrypt/lszcrypt.8 --- s390-tools-2.31.0/zconf/zcrypt/lszcrypt.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zconf/zcrypt/lszcrypt.8 2024-05-28 08:26:36.000000000 +0200 @@ -17,27 +17,27 @@ .TP 9 .B lszcrypt .RB "[" "]" -.RB "[" -V "]" +.RB "[" \-V "]" [ .I [...]] .TP .B lszcrypt -.B -c +.B \-c .TP -.B lszcrypt -b +.B lszcrypt \-b .TP -.B lszcrypt -d +.B lszcrypt \-d .TP -.B lszcrypt -h +.B lszcrypt \-h .TP -.B lszcrypt -s +.B lszcrypt \-s .TP -.B lszcrypt -v +.B lszcrypt \-v . TP .B -[--accelonly|--ccaonly|--ep11only] [--cardonly|--queueonly] +[\-\-accelonly|\-\-ccaonly|\-\-ep11only] [\-\-cardonly|\-\-queueonly] .SH DESCRIPTION The .B lszcrypt @@ -57,7 +57,7 @@ interrupt status. .SH OPTIONS .TP 8 -.B -V, --verbose +.B \-V, \-\-verbose The verbose level for cryptographic device information. With this verbose level additional information like hardware card type, hardware queue depth, pending requests count, installed function facilities and @@ -70,7 +70,7 @@ Please note that the card device representation and the queue device are both in hexadecimal notation. .TP 8 -.B -b, --bus +.B \-b, \-\-bus Displays the AP bus attributes and exits. There is also a list of AP bus features shown here: @@ -87,7 +87,7 @@ APSB - AP bus has Secure Execution AP pass-through support. .RE .TP 8 -.B -c, --capability +.B \-c, \-\-capability Shows the capabilities of a cryptographic card or queue device of hardware type 6 or higher. A card device id value may be given as decimal or hex value (with a leading 0x), a queue device needs to be @@ -127,7 +127,7 @@ mode) are shown here. .RE .TP 8 -.B -d, --domains +.B \-d, \-\-domains Shows the usage and control domains of the cryptographic devices. The displayed domains of the cryptographic device depends on the initial cryptographic configuration. @@ -140,28 +140,28 @@ B - indicate both (control and usage domain) .RE .TP 8 -.B -h, --help +.B \-h, \-\-help Displays help text and exits. .TP 8 -.B -s, --serial +.B \-s, \-\-serial Shows the serial numbers for CCA and EP11 crypto cards. .TP 8 -.B -v, --version +.B \-v, \-\-version Displays version information and exits. .TP 8 -.B --accelonly +.B \-\-accelonly Show only information for cards/queues in Accelerator mode. .TP 8 -.B --ccaonly +.B \-\-ccaonly Show only information for cards/queues in CCA-Coprocessor mode. .TP 8 -.B --ep11only +.B \-\-ep11only Show only information for cards/queues in EP11-Coprocessor mode. .TP 8 -.B --cardonly +.B \-\-cardonly Show only information for cards but no queue info. .TP 8 -.B --queueonly +.B \-\-queueonly Show only information for queues but no card info. .SH LISTING DETAILS Here is an explanation of the columns displayed. Please note that some @@ -201,7 +201,7 @@ .br A crypto card can also be 'configured' or 'deconfigured'. This state may be adjusted on the HMC. The chzcrypt application can also trigger -this state with the --config-on and --config-off options. +this state with the \-\-config\-on and \-\-config\-off options. .br lszcrypt shows 'online' when a card or queue is available for cryptographic operations. 'offline' is displayed when a card or queue @@ -285,9 +285,9 @@ .br illicit - AP queue is not available for this Secure Execution guest. .SH NOTES -Use only one of the mode filtering options --accelonly, --ccaonly, ---ep11only. Same with card/queue filtering: Use only one of ---cardonly, --queueonly. However, one of the mode filtering options +Use only one of the mode filtering options \-\-accelonly, \-\-ccaonly, +\-\-ep11only. Same with card/queue filtering: Use only one of +\-\-cardonly, \-\-queueonly. However, one of the mode filtering options and one of the card/queue filtering can be combined. .SH EXAMPLES .TP @@ -300,7 +300,7 @@ Displays the card/domain ID, card type, mode, online status and request count for cryptographic devices 1, 3, and 5. .TP -.B lszcrypt -V 3 7 11 +.B lszcrypt \-V 3 7 11 Displays the card/domain ID, card type, mode, online status, request count, number of requests in the hardware queue, number of outstanding requests and installed function facilities for cryptographic devices @@ -314,10 +314,10 @@ Displays information of all available queue devices (potentially multiple adapters) with domain 56 (0x38). .TP -.B lszcrypt -b +.B lszcrypt \-b Displays AP bus information. .TP -.B lszcrypt -c 7 +.B lszcrypt \-c 7 .RS .br Coprocessor card07 provides capability for: diff -Nru s390-tools-2.31.0/zconf/zcrypt/zcryptstats.8 s390-tools-2.33.1/zconf/zcrypt/zcryptstats.8 --- s390-tools-2.31.0/zconf/zcrypt/zcryptstats.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zconf/zcrypt/zcryptstats.8 2024-05-28 08:26:36.000000000 +0200 @@ -40,7 +40,7 @@ The card device representation and the queue device are both in hexadecimal notation. .PP -Use the \fB\-\-no-apqn\fP option to omit the performance measurement data of +Use the \fB\-\-no\-apqn\fP option to omit the performance measurement data of the queues. If the system does not support obtaining cryptographic performance measurement data on the queue devices, only the card devices are monitored. @@ -60,7 +60,7 @@ .RE .PP The sum of all operations is displayed in a separate \fBtotals\fP line. -Use the \fB\-\-only-totals\fP option to omit the individual counters and +Use the \fB\-\-only\-totals\fP option to omit the individual counters and display the totals only. Use the \fB\-\-no\-totals\fP option to omit the totals. .PP @@ -86,8 +86,8 @@ .BR DEVICE_ID Specifies a cryptographic device for which statistics are displayed. A device ID can either be a card device ID -(\fI\fP) or a queue device (APQN) ID (\fI.\fP). -To filter all devices by domain, provide \fI.\fP. +(\fI\fP) or a queue device (APQN) ID (\fI.\fP). +To filter all devices by domain, provide \fI.\fP. If no IDs are given, statistics are displayed for all available devices. . .TP @@ -113,12 +113,12 @@ .IP "\(bu" 2 \fBTABLE:\fP Displays the statistics in a human readable simple table format. The individual counters are omitted, and only the totals are displayed. -This output format implies option \fB\-\-only-totals\fP. +This output format implies option \fB\-\-only\-totals\fP. .IP "\(bu" 2 \fBCSV:\fP Displays the statistics in comma-separated values format. The values are separated with a semicolon. The individual counters are omitted, and only the totals are displayed. This output format implies option -\fB\-\-only-totals\fP. +\fB\-\-only\-totals\fP. .RE . .TP @@ -156,7 +156,7 @@ available to the Linux instance. Using this option additional cryptographic devices that are available in the CEC, but not available to the Linux system are also monitored. -This option cannot be specified together with option \fB\-\-only-online\fP. +This option cannot be specified together with option \fB\-\-only\-online\fP. . .TP .BR \-O ", " \-\-only\-online @@ -184,7 +184,7 @@ .B All All operations on the adapter .TP -.B RSA Key-gen +.B RSA Key\-gen RSA-key-generation operations (also included in \fBAll\fP). .RE .PP @@ -225,7 +225,7 @@ .B Symm. Complete Symmetric-key functions that return a complete or final result. .TP -.B Asym. Key-gen +.B Asym. Key\-gen asymmetric-key generation function. .RE .PP @@ -237,15 +237,15 @@ .B zcryptstats 02 Display statistics for all cryptographic devices with card ID \fB02\fP. .TP -.B zcryptstats 02.0005 --interval 5 +.B zcryptstats 02.0005 \-\-interval 5 Display statistics for cryptographic devices with card ID \fB02\fP and domain ID \fB0005\fP in a 5 second interval. .TP -.B zcryptstats .0005 --count 10 +.B zcryptstats .0005 \-\-count 10 Display statistics for cryptographic devices with domain ID \fB0005\fP with the default interval time of 10 seconds, for 10 intervals. .TP -.B zcryptstats 02 --output JSON +.B zcryptstats 02 \-\-output JSON Display statistics for all cryptographic devices with card ID \fB02\fP in \fBJSON\fP output format. .TP diff -Nru s390-tools-2.31.0/zdev/dracut/Makefile s390-tools-2.33.1/zdev/dracut/Makefile --- s390-tools-2.31.0/zdev/dracut/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdev/dracut/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -20,11 +20,12 @@ $(INSTALL) -m 755 -d $(DESTDIR)$(DRACUTMODDIR)/$(ZDEVDIR) $(INSTALL) -m 755 $(ZDEVDIR)/module-setup.sh \ $(ZDEVDIR)/parse-zdev.sh \ - $(ZDEVDIR)/zdev-lib.sh \ $(ZDEVDIR)/parse-zfcp.sh \ $(ZDEVDIR)/parse-dasd.sh \ $(ZDEVDIR)/retain-zdev.sh \ $(DESTDIR)$(DRACUTMODDIR)/$(ZDEVDIR)/ + $(INSTALL) -m 644 $(ZDEVDIR)/zdev-lib.sh \ + $(DESTDIR)$(DRACUTMODDIR)/$(ZDEVDIR)/ $(INSTALL) -m 755 -d $(DESTDIR)$(DRACUTMODDIR)/$(ZDEVKDUMPDIR) $(INSTALL) -m 755 $(ZDEVKDUMPDIR)/module-setup.sh \ $(DESTDIR)$(DRACUTMODDIR)/$(ZDEVKDUMPDIR)/ diff -Nru s390-tools-2.31.0/zdev/include/device.h s390-tools-2.33.1/zdev/include/device.h --- s390-tools-2.31.0/zdev/include/device.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdev/include/device.h 2024-05-28 08:26:36.000000000 +0200 @@ -15,7 +15,7 @@ #include "exit_code.h" #include "hash.h" #include "misc.h" -#include "site.h" +#include "zdev.h" struct subtype; struct setting_list; diff -Nru s390-tools-2.31.0/zdev/include/exit_code.h s390-tools-2.33.1/zdev/include/exit_code.h --- s390-tools-2.31.0/zdev/include/exit_code.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdev/include/exit_code.h 2024-05-28 08:26:36.000000000 +0200 @@ -54,6 +54,9 @@ EXIT_GROUP_FAILED = 31, /* CCW group device grouping failed */ EXIT_UNGROUP_FAILED = 32, /* CCW group device ungrouping failed */ + /* is-owner related */ + EXIT_UNKNOWN_FILE = 33, /* The file is not generated by chzdev */ + EXIT_INTERNAL_ERROR = 99, /* An internal error occurred */ } exit_code_t; diff -Nru s390-tools-2.31.0/zdev/include/site.h s390-tools-2.33.1/zdev/include/site.h --- s390-tools-2.31.0/zdev/include/site.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdev/include/site.h 1970-01-01 01:00:00.000000000 +0100 @@ -1,31 +0,0 @@ -/* - * zdev - Information regarding site-specific configurations used for - * zdev tools. - * - * Copyright IBM Corp. 2023 - * - * s390-tools is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#ifndef SITE_H -#define SITE_H - -/** - * Currently zdev supports 10 sites. Which means, zdev support 10 different - * set of attributes which are specific to each site. When the user does - * not provide any site information, the common set will be used which is - * not specific to any site. So, total we have 11 persistent attribute sets - * Where, - * 0- 9: Site specific attributes - * 10: Common attributes which do not belong to any sites - */ - -#define NUM_SITES 11 -#define NUM_USER_SITES (NUM_SITES - 1) -#define SITE_FALLBACK NUM_USER_SITES - -/* Helper to find the availability of site-configuration */ -#define dev_site_configured(dev, x) (dev->site_specific[(x)].exists && \ - !dev->site_specific[(x)].deconfigured) -#endif /* SITE_H */ diff -Nru s390-tools-2.31.0/zdev/include/zdev.h s390-tools-2.33.1/zdev/include/zdev.h --- s390-tools-2.31.0/zdev/include/zdev.h 1970-01-01 01:00:00.000000000 +0100 +++ s390-tools-2.33.1/zdev/include/zdev.h 2024-05-28 08:26:36.000000000 +0200 @@ -0,0 +1,32 @@ +/* + * zdev - Minimal header file containing generic definitions utilized by + * zdev-tools. + * + * Copyright IBM Corp. 2024 + * + * s390-tools is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef ZDEV_H +#define ZDEV_H + +/** + * Currently zdev supports 10 sites. Which means, zdev support 10 different + * set of attributes which are specific to each site. When the user does + * not provide any site information, the common set will be used which is + * not specific to any site. So, total we have 11 persistent attribute sets + * Where, + * 0- 9: Site specific attributes + * 10: Common attributes which do not belong to any sites + */ + +#define NUM_SITES 11 +#define NUM_USER_SITES (NUM_SITES - 1) +#define SITE_FALLBACK NUM_USER_SITES +#define CHZDEV_HEADER "# Generated by chzdev" + +/* Helper to find the availability of site-configuration */ +#define dev_site_configured(dev, x) (dev->site_specific[(x)].exists && \ + !dev->site_specific[(x)].deconfigured) +#endif /* ZDEV_H */ diff -Nru s390-tools-2.31.0/zdev/man/chzdev.8 s390-tools-2.33.1/zdev/man/chzdev.8 --- s390-tools-2.31.0/zdev/man/chzdev.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdev/man/chzdev.8 2024-05-28 08:26:36.000000000 +0200 @@ -601,6 +601,18 @@ .PP .RE . +.OD is-owner "" "FILENAME" +Detect if the file mentioned is generated by zdev tools. + +Return 0 if the +.IR FILENAME +is generated by zdev tools and 33 otherwise. + +.B Example: +.CL chzdev --is-owner /etc/udev/rules/41-dasd-0.0.1234.rules + +.PP +. .OD no-root-update "" "" Skip root device update. @@ -625,34 +637,6 @@ when a device becomes available, or when a device driver is loaded. .PP . -.OD site "s" "SITE-ID" -Apply the persistent configuration to the specified site only. - -This option can be used to configure the device in such a way that, the -same configuration can be used on multiple different sites with different -device hardwares, without any modification. With the site information, we -can add different configurations which are specific to different sites. Where -each site represents different systems or different hardware devices which -are even located in different locations. -For example, having -the configuration of main-site or failover-site in the same place, but these -configurations used only on the respective sites. Currently a user can define -10 different configurations for 10 different sites with -.I SITE-ID -0 to 9. - -The configuration settings is put into effect only on the system with the -site number which is mentioned in -.I SITE-ID. - -This parameter is only effective only for persistent configurations and for -dasd device-types. - -.B Example: -.CL chzdev -ep 0.0.f001 --site 3 - -.PP -. .OD quiet "q" "" Print only minimal run-time information. .PP @@ -688,6 +672,34 @@ Some attributes are mandatory and cannot be removed. .PP . +.OD site "s" "SITE-ID" +Apply the persistent configuration to the specified site only. + +This option can be used to configure the device in such a way that, the +same configuration can be used on multiple different sites with different +device hardwares, without any modification. With the site information, we +can add different configurations which are specific to different sites. Where +each site represents different systems or different hardware devices which +are even located in different locations. +For example, having +the configuration of main-site or failover-site in the same place, but these +configurations used only on the respective sites. Currently a user can define +10 different configurations for 10 different sites with +.I SITE-ID +0 to 9. + +The configuration settings is put into effect only on the system with the +site number which is mentioned in +.I SITE-ID. + +This parameter is only effective only for persistent configurations and for +dasd device-types. + +.B Example: +.CL chzdev -ep 0.0.f001 --site 3 + +.PP +. .OD type "t" "" Select device type as target for actions. @@ -916,6 +928,14 @@ CCW group device: Ungrouping failed .PP .TP +.B 33 +The file specified by +.nh +\-\-is-owner +.hy +is not generated by zdev +.PP +.TP .B 99 An internal error occurred .PP diff -Nru s390-tools-2.31.0/zdev/man/lszdev.8 s390-tools-2.33.1/zdev/man/lszdev.8 --- s390-tools-2.31.0/zdev/man/lszdev.8 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdev/man/lszdev.8 2024-05-28 08:26:36.000000000 +0200 @@ -439,6 +439,8 @@ List information from the persistent configuration only. Restricts output to information obtained from configuration files. +.OD quiet "q" "" +Print only minimal run-time information. .PP . .OD site "s" "SITE-ID" @@ -460,8 +462,6 @@ .PP . -.OD quiet "q" "" -Print only minimal run-time information. .PP . .OD type "t" "" diff -Nru s390-tools-2.31.0/zdev/src/chzdev.c s390-tools-2.33.1/zdev/src/chzdev.c --- s390-tools-2.31.0/zdev/src/chzdev.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdev/src/chzdev.c 2024-05-28 08:26:36.000000000 +0200 @@ -50,6 +50,7 @@ * diff of simple function parameter modifications to pass this value. */ int global_site_id = SITE_FALLBACK; +#define MAX_HEADER_LENGTH sizeof(CHZDEV_HEADER) /* Main program action. */ typedef enum { @@ -60,6 +61,7 @@ ACT_HELP_ATTRIBS, ACT_EXPORT, ACT_IMPORT, + ACT_IS_OWNER, ACT_APPLY, ACT_HELP, ACT_VERSION, @@ -86,6 +88,7 @@ unsigned int help_attribs:1; char *export; char *import; + char *is_owner; unsigned int apply:1; unsigned int help:1; unsigned int version:1; @@ -134,6 +137,7 @@ OPT_LIST_TYPES = 'L', OPT_EXPORT = (OPT_ANONYMOUS_BASE+__COUNTER__), OPT_IMPORT = (OPT_ANONYMOUS_BASE+__COUNTER__), + OPT_IS_OWNER = (OPT_ANONYMOUS_BASE+__COUNTER__), OPT_APPLY = (OPT_ANONYMOUS_BASE+__COUNTER__), OPT_ACTIVE = 'a', OPT_PERSISTENT = 'p', @@ -187,6 +191,13 @@ OPT_REMOVE_ALL, OPT_CONFIGURED, OPT_EXISTING, OPT_BY_PATH, OPT_BY_ATTRIB, OPT_BY_NODE, OPT_BY_INTERFACE, OPT_DECONFIGURE_ALL, 0), + OPTS_CONFLICT(OPT_IS_OWNER, + OPT_DECONFIGURE, OPT_HELP_ATTRIBS, OPT_LIST_TYPES, + OPT_EXPORT, OPT_IMPORT, OPT_APPLY, OPT_REMOVE, + OPT_REMOVE_ALL, OPT_CONFIGURED, OPT_EXISTING, OPT_ONLINE, + OPT_OFFLINE, OPT_BY_PATH, OPT_BY_NODE, OPT_BY_INTERFACE, + OPT_BY_ATTRIB, OPT_ACTIVE, OPT_PERSISTENT, OPT_FAILED, + OPT_DECONFIGURE_ALL, 0), OPTS_CONFLICT(OPT_APPLY, OPT_DECONFIGURE, OPT_LIST_ATTRIBS, OPT_HELP_ATTRIBS, OPT_LIST_TYPES, OPT_EXPORT, OPT_IMPORT, OPT_REMOVE, @@ -227,6 +238,7 @@ { "list-types", no_argument, NULL, OPT_LIST_TYPES }, { "export", required_argument, NULL, OPT_EXPORT }, { "import", required_argument, NULL, OPT_IMPORT }, + { "is-owner", required_argument, NULL, OPT_IS_OWNER }, { "apply", no_argument, NULL, OPT_APPLY }, { "help", no_argument, NULL, OPT_HELP }, { "version", no_argument, NULL, OPT_VERSION }, @@ -278,6 +290,7 @@ return; free(opts->export); free(opts->import); + free(opts->is_owner); select_opts_free(opts->select); strlist_free(opts->positional); strlist_free(opts->settings); @@ -318,6 +331,8 @@ return ACT_EXPORT; if (opts->import) return ACT_IMPORT; + if (opts->is_owner) + return ACT_IS_OWNER; if (opts->apply) return ACT_APPLY; return ACT_CONFIGURE; @@ -339,6 +354,8 @@ return "--export"; case ACT_IMPORT: return "--import"; + case ACT_IS_OWNER: + return "--is-owner"; case ACT_APPLY: return "--apply"; case ACT_HELP: @@ -902,6 +919,16 @@ opts->import = misc_strdup(optarg); break; + case OPT_IS_OWNER: + /* --is-owner */ + if (opts->is_owner) { + error("Cannot specify '--is-owner' multiple " + "times\n"); + return EXIT_USAGE_ERROR; + } + opts->is_owner = misc_strdup(optarg); + break; + case OPT_APPLY: /* --apply */ opts->apply = 1; @@ -2929,6 +2956,39 @@ } } +static exit_code_t zdev_file_detect(FILE *fd) +{ + char buffer[MAX_HEADER_LENGTH]; + + if (fgets(buffer, MAX_HEADER_LENGTH, fd)) { + if (starts_with(buffer, CHZDEV_HEADER)) + return EXIT_OK; + } else { + return EXIT_RUNTIME_ERROR; + } + + return EXIT_UNKNOWN_FILE; +} + +/* Check if the file mentioned is generated by zdev */ +static exit_code_t do_is_owner(struct options *opts) +{ + FILE *fd; + exit_code_t rc = EXIT_OK; + + fd = fopen(opts->is_owner, "r"); + if (!fd) { + error("Could not open file %s: %s\n", opts->is_owner, + strerror(errno)); + return EXIT_RUNTIME_ERROR; + } + + rc = zdev_file_detect(fd); + + fclose(fd); + return rc; +} + /* Import configuration data. */ static exit_code_t do_import(struct options *opts) { @@ -3137,6 +3197,9 @@ case ACT_IMPORT: rc = do_import(&opts); break; + case ACT_IS_OWNER: + rc = do_is_owner(&opts); + break; case ACT_APPLY: rc = do_apply(&opts); break; diff -Nru s390-tools-2.31.0/zdev/src/chzdev_usage.txt s390-tools-2.33.1/zdev/src/chzdev_usage.txt --- s390-tools-2.31.0/zdev/src/chzdev_usage.txt 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdev/src/chzdev_usage.txt 2024-05-28 08:26:36.000000000 +0200 @@ -58,5 +58,6 @@ --no-settle Do not wait for udev to settle --auto-conf Apply changes to auto-configuration only -s, --site ID Apply changes to the specified site only + --is-owner FILE Examine whether the file is generated by zdev -V, --verbose Print additional run-time information -q, --quiet Print only minimal run-time information diff -Nru s390-tools-2.31.0/zdev/src/zdev_id.c s390-tools-2.33.1/zdev/src/zdev_id.c --- s390-tools-2.31.0/zdev/src/zdev_id.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdev/src/zdev_id.c 2024-05-28 08:26:36.000000000 +0200 @@ -13,7 +13,7 @@ #include #include -#include "site.h" +#include "zdev.h" #include "zdev_id.h" #define SYSINFO "/proc/sysinfo" @@ -226,6 +226,11 @@ if (!fd) goto err; + if (fprintf(fd, CHZDEV_HEADER "\n") < 0) { + fclose(fd); + goto err; + } + if (site_id == SITE_FALLBACK) rc = fprintf(fd, "ZDEV_SITE_ID=\"\"\n"); else diff -Nru s390-tools-2.31.0/zdsfs/zdsfs.1 s390-tools-2.33.1/zdsfs/zdsfs.1 --- s390-tools-2.31.0/zdsfs/zdsfs.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdsfs/zdsfs.1 2024-05-28 08:26:36.000000000 +0200 @@ -13,7 +13,7 @@ \fBzdsfs\fP \fI\fR \fI\fR [\fI\fR] .SS unmounting: .TP -\fBfusermount\fP -u \fI\fR +\fBfusermount\fP \-u \fI\fR .SH DESCRIPTION @@ -58,7 +58,7 @@ always known, zdsfs does not support mmap. Seek operations read the whole data set to the given offset. The performance of seek operations to previous offsets can be improved by buffering seek -offsets, see option `-o seekbuffers'. +offsets, see option `\-o seekbuffers'. A further consequence of the unknown exact data size is that zdsfs cannot provide exact file sizes. As a heuristic, the given file sizes @@ -66,7 +66,7 @@ the extents that belong to each data set. When the actual end of the data is reached during read, the usual end of file (EOF) is returned. To ensure that the EOF is passed to the user correctly, the option -`-o direct_io' is set by zdsfs implicitly. +`\-o direct_io' is set by zdsfs implicitly. Incomplete multi-volume data sets are not detected if only the first volume (device) of the data set is present. @@ -77,7 +77,7 @@ .TP \fB\fR One or more DASD device nodes, where node specifications are separated by blanks. The device nodes can be specified explicitly with -the command or with the -l option and a file. +the command or with the \-l option and a file. .TP \fB\fR The mount point for the specified DASD. .TP @@ -200,19 +200,19 @@ \fB\-o\fR codepage_from=\fI\fR Override EBCDIC CP1047 as the default code-page for the source. Must be combined with \fB\-o\fR codepage_to=\fI\fR. Overrides settings -in a data set config file. Issue iconv -l for a list of valid +in a data set config file. Issue iconv \-l for a list of valid specifications for . .TP \fB\-o\fR codepage_to=\fI\fR Override UTF-8 as the default code-page for the target. Must be combined with \fB\-o\fR codepage_from=\fI\fR. Overrides settings -in a data set configu file. Issue iconv -l for a list of valid +in a data set configu file. Issue iconv \-l for a list of valid specifications for . .SS "Applicable FUSE options (version 2.8):" This is a selected subset of all FUSE options. Use the zdsfs -\fB\--help\fR option to print a full list. +\fB\-\-help\fR option to print a full list. .TP \fB\-d\fR or \fB\-o\fR debug @@ -355,7 +355,7 @@ .br An explicit specification of source and target code-page overrides the default conversion tables The code-page specifications must be separated by a comma. -Issue iconv -l for a list of valid code-page specifications. +Issue iconv \-l for a list of valid code-page specifications. .PP @@ -382,31 +382,31 @@ descriptor words in the byte stream, enter: .br - # zdsfs -o rdw -o tracks=4 /dev/dasde /mnt + # zdsfs \-o rdw \-o tracks=4 /dev/dasde /mnt .br To unmount the z/OS disk mounted on /mnt enter: .br - # fusermount -u /mnt + # fusermount \-u /mnt To list all extended attributes of file FOOBAR.TEST.TXT assuming the z/OS disk was mounted on /mnt: - # getfattr -d /mnt/FOOBAR.TEST.TXT + # getfattr \-d /mnt/FOOBAR.TEST.TXT .br To mount the z/OS disk using the z/OSMF REST services for coordinated read access: - # ./zdsfs -o restapi -o restserver=zos1.server.tld/zosmf /dev/dasde /mnt/ + # ./zdsfs \-o restapi \-o restserver=zos1.server.tld/zosmf /dev/dasde /mnt/ .br To mount the z/OS disk and enable code-page conversion for all data sets using a custom source and target code page: - # ./zdsfs -o codepage_from=CP037 -o codepage_to=ISO-8859-1 /dev/dasde /mnt/ + # ./zdsfs \-o codepage_from=CP037 \-o codepage_to=ISO\-8859\-1 /dev/dasde /mnt/ .SH SEE ALSO diff -Nru s390-tools-2.31.0/zdump/dfi_s390mv.c s390-tools-2.33.1/zdump/dfi_s390mv.c --- s390-tools-2.31.0/zdump/dfi_s390mv.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdump/dfi_s390mv.c 2024-05-28 08:26:36.000000000 +0200 @@ -73,8 +73,8 @@ struct df_s390_dumper dumper; int dump_incomplete; bool extended; - u64 magic_number; /* Reference value to compare with */ - char dumper_magic[7]; /* Reference value to compare with */ + u64 magic_number; /* Reference value to compare with */ + char dumper_magic[DF_S390_DUMPER_MAGIC_SIZE]; /* Reference value to compare with */ } l; /* @@ -273,7 +273,7 @@ l.dump_incomplete = 1; } - if (strncmp(vol->dumper.magic, l.dumper_magic, 7) != 0) { + if (strncmp(vol->dumper.magic, l.dumper_magic, DF_S390_DUMPER_MAGIC_SIZE) != 0) { vol->sign = SIGN_INVALID; l.dump_incomplete = 1; } @@ -507,7 +507,7 @@ ZG_CHECK_NONE) == -1) return -ENODEV; df_s390_dumper_read(g.fh, l.blk_size, &l.dumper); - if (strncmp(l.dumper.magic, l.dumper_magic, 7) != 0) + if (strncmp(l.dumper.magic, l.dumper_magic, DF_S390_DUMPER_MAGIC_SIZE) != 0) return -ENODEV; table_read(g.fh, l.blk_size, &l.table); return 0; @@ -567,10 +567,12 @@ { if (l.extended) { l.magic_number = DF_S390_MAGIC_EXT; - memcpy(l.dumper_magic, DF_S390_DUMPER_MAGIC_MV_EXT, 7); + memcpy(l.dumper_magic, DF_S390_DUMPER_MAGIC_MV_EXT, + DF_S390_DUMPER_MAGIC_SIZE); } else { l.magic_number = DF_S390_MAGIC; - memcpy(l.dumper_magic, DF_S390_DUMPER_MAGIC_MV, 7); + memcpy(l.dumper_magic, DF_S390_DUMPER_MAGIC_MV, + DF_S390_DUMPER_MAGIC_SIZE); } } diff -Nru s390-tools-2.31.0/zdump/df_s390.c s390-tools-2.33.1/zdump/df_s390.c --- s390-tools-2.31.0/zdump/df_s390.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdump/df_s390.c 2024-05-28 08:26:36.000000000 +0200 @@ -153,14 +153,17 @@ dumper->version = 0; switch (dumper->version) { case 1: - if (strncmp(dumper->magic, DF_S390_DUMPER_MAGIC_EXT, 7) == 0 || - strncmp(dumper->magic, DF_S390_DUMPER_MAGIC_MV_EXT, 7) == 0) + if (strncmp(dumper->magic, DF_S390_DUMPER_MAGIC_EXT, + DF_S390_DUMPER_MAGIC_SIZE) == 0 || + strncmp(dumper->magic, DF_S390_DUMPER_MAGIC_MV_EXT, + DF_S390_DUMPER_MAGIC_SIZE) == 0) dumper->size = STAGE2_DUMPER_SIZE_V3; else dumper->size = STAGE2_DUMPER_SIZE_V1; break; case 2: - if (strncmp(dumper->magic, DF_S390_DUMPER_MAGIC_EXT, 7) == 0) + if (strncmp(dumper->magic, DF_S390_DUMPER_MAGIC_EXT, + DF_S390_DUMPER_MAGIC_SIZE) == 0) dumper->size = STAGE2_DUMPER_SIZE_ZLIB; else dumper->size = STAGE2_DUMPER_SIZE_V2; diff -Nru s390-tools-2.31.0/zdump/df_s390.h s390-tools-2.33.1/zdump/df_s390.h --- s390-tools-2.31.0/zdump/df_s390.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdump/df_s390.h 2024-05-28 08:26:36.000000000 +0200 @@ -35,17 +35,6 @@ return df_s390_arch == DF_S390_ARCH_64 ? DFI_ARCH_64 : DFI_ARCH_32; } -#define DF_S390_DUMPER_MAGIC32 "ZECKD31" -#define DF_S390_DUMPER_MAGIC64 "ZECKD64" -#define DF_S390_DUMPER_MAGIC_EXT "XECKD64" -#define DF_S390_DUMPER_MAGIC32_FBA "ZDFBA31" -#define DF_S390_DUMPER_MAGIC64_FBA "ZDFBA64" -#define DF_S390_DUMPER_MAGIC_FBA_EXT "XDFBA64" -#define DF_S390_DUMPER_MAGIC_MV "ZMULT64" -#define DF_S390_DUMPER_MAGIC_MV_EXT "XMULT64" -#define OLD_DUMPER_HEX_INSTR1 "\x0d\x10\x47\xf0" /* BASR + 1st halfword of BC */ -#define OLD_DUMPER_HEX_INSTR2 "\x0d\xd0" /* BASR 13,0 */ - /* * Dump tool structure */ diff -Nru s390-tools-2.31.0/zdump/dt_ngdump.c s390-tools-2.33.1/zdump/dt_ngdump.c --- s390-tools-2.31.0/zdump/dt_ngdump.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdump/dt_ngdump.c 2024-05-28 08:26:36.000000000 +0200 @@ -27,11 +27,9 @@ char *part_path = NULL; int rc; - l.part_num = ngdump_get_dump_part(g.fh); + l.part_num = ngdump_get_dump_part(g.fh, &part_path); if (l.part_num <= 0) return -1; - if (ngdump_get_disk_part_path(g.fh->path, l.part_num, &part_path) < 0) - return -1; rc = ngdump_read_meta_from_device(part_path, &l.meta); free(part_path); if (rc) diff -Nru s390-tools-2.31.0/zdump/dt_s390sv.c s390-tools-2.33.1/zdump/dt_s390sv.c --- s390-tools-2.31.0/zdump/dt_s390sv.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdump/dt_s390sv.c 2024-05-28 08:26:36.000000000 +0200 @@ -34,14 +34,17 @@ df_s390_dumper_read(g.fh, blk_size, &l.dumper); if (l.extended) { - if (strncmp(l.dumper.magic, DF_S390_DUMPER_MAGIC_EXT, 7) != 0) + if (strncmp(l.dumper.magic, DF_S390_DUMPER_MAGIC_EXT, + DF_S390_DUMPER_MAGIC_SIZE) != 0) return -ENODEV; l.dumper_arch = DFI_ARCH_64; return 0; } - if (strncmp(l.dumper.magic, DF_S390_DUMPER_MAGIC64, 7) == 0) { + if (strncmp(l.dumper.magic, DF_S390_DUMPER_MAGIC64, + DF_S390_DUMPER_MAGIC_SIZE) == 0) { l.dumper_arch = DFI_ARCH_64; - } else if (strncmp(l.dumper.magic, DF_S390_DUMPER_MAGIC32, 7) == 0) { + } else if (strncmp(l.dumper.magic, DF_S390_DUMPER_MAGIC32, + DF_S390_DUMPER_MAGIC_SIZE) == 0) { l.dumper_arch = DFI_ARCH_32; } else if (memcmp(l.dumper.magic, OLD_DUMPER_HEX_INSTR1, 4) == 0 && l.dumper.version == 0) { @@ -59,15 +62,17 @@ static int dumper_check_fba(struct df_s390_dumper *dumper) { if (l.extended) { - if (strncmp(dumper->magic, DF_S390_DUMPER_MAGIC_FBA_EXT, 7) - != 0) + if (strncmp(dumper->magic, DF_S390_DUMPER_MAGIC_FBA_EXT, + DF_S390_DUMPER_MAGIC_SIZE) != 0) return -ENODEV; l.dumper_arch = DFI_ARCH_64; return 0; } - if (strncmp(dumper->magic, DF_S390_DUMPER_MAGIC64_FBA, 7) == 0) { + if (strncmp(dumper->magic, DF_S390_DUMPER_MAGIC64_FBA, + DF_S390_DUMPER_MAGIC_SIZE) == 0) { l.dumper_arch = DFI_ARCH_64; - } else if (strncmp(dumper->magic, DF_S390_DUMPER_MAGIC32_FBA, 7) == 0) { + } else if (strncmp(dumper->magic, DF_S390_DUMPER_MAGIC32_FBA, + DF_S390_DUMPER_MAGIC_SIZE) == 0) { l.dumper_arch = DFI_ARCH_32; } else if (memcmp(dumper->magic, OLD_DUMPER_HEX_INSTR1, 4) == 0 && memcmp(&dumper->size, OLD_DUMPER_HEX_INSTR2, 2) == 0) { diff -Nru s390-tools-2.31.0/zdump/Makefile s390-tools-2.33.1/zdump/Makefile --- s390-tools-2.31.0/zdump/Makefile 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdump/Makefile 2024-05-28 08:26:36.000000000 +0200 @@ -105,7 +105,10 @@ OBJECTS += zfuse.o endif -libs = $(rootdir)/libutil/libutil.a $(LIBPV) +libs = $(rootdir)/libutil/libutil.a \ + $(rootdir)/libvtoc/libvtoc.a \ + $(rootdir)/libdasd/libdasd.a \ + $(LIBPV) all: $(BUILD_TARGETS) diff -Nru s390-tools-2.31.0/zdump/ngdump.c s390-tools-2.33.1/zdump/ngdump.c --- s390-tools-2.31.0/zdump/ngdump.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdump/ngdump.c 2024-05-28 08:26:36.000000000 +0200 @@ -18,9 +18,11 @@ #include #include +#include "lib/dasd_base.h" #include "lib/util_libc.h" #include "lib/util_part.h" #include "lib/util_log.h" +#include "lib/vtoc.h" #include "boot/boot_defs.h" #include "boot/linux_layout.h" @@ -30,6 +32,11 @@ #define NGDUMP_META_VERSION 1 #define NGDUMP_META_FILENAME "ngdump.meta" +static const char *const ngtype2str[] = { + [NG_TYPE_DASD] = "DASD", + [NG_TYPE_NVME] = "NVME" +}; + static int read_meta_from_file(const char *filename, struct ngdump_meta *meta) { FILE *fp = NULL; @@ -212,56 +219,71 @@ } /* - * This function parses the bootloader program table stored on the given device - * and returns the partition index where the dumper's kernel image is stored. + * Convert disk blockpointer to the offset in blocks. + * Use eckd blockpointer format if hd_geometry is provided, otherwise linear blockpointer. + * Return u64(-1) in case the blockpointer contains zeroes. */ -int ngdump_get_dump_part(struct zg_fh *zg_fh) +static uint64_t blockptr2blk(union disk_blockptr *ptr, const struct hd_geometry *geo) { - int i, blk_size, max_entries, part_ext; - struct component_entry comp_entry; - struct component_header comp_hdr; - struct linear_blockptr blockptr; - struct scsi_mbr mbr; - uint64_t off; + uint64_t blk; - if (zg_ioctl(zg_fh, BLKSSZGET, &blk_size, "BLKSSZGET", ZG_CHECK_NONE)) - return -1; - - util_log_print(UTIL_LOG_TRACE, "%s: Block size %d\n", - __func__, blk_size); + /* For NVMe or SCSI use linear blockpointer format. */ + /* For DASD use eckd blockpointer format. */ + if (!geo) { + blk = ptr->linear.blockno; + if (blk == 0) + return U64_MAX; + } else { + if (ptr->eckd.sec == 0) + return U64_MAX; + blk = ptr->eckd.cyl * geo->heads + ptr->eckd.head; /* Track number */ + blk *= geo->sectors; /* Track offset in records */ + blk += ptr->eckd.sec - 1; /* Record offset (skipping R0) */ + } + return blk; +} - /* Read Master Boot Record (MBR) and check its magic */ - zg_read(zg_fh, &mbr, sizeof(mbr), ZG_CHECK); - if (memcmp(mbr.magic, ZIPL_MAGIC, ZIPL_MAGIC_SIZE)) - return -1; +/* + * Based on the provided program table blockpointer find kernel image Boot Map Section + * for dump Boot Map Script. Read the first data blockpointer from the section into + * blockptr area. + */ +static int get_bootmap_dump_image_blkptr(struct zg_fh *zg_fh, union disk_blockptr *program_table, + struct hd_geometry *geo, int blk_size, + union disk_blockptr *blockptr) +{ + struct component_entry comp_entry; + struct component_header comp_hdr; + int i, max_entries; + uint64_t blk; /* Read Boot Map Table and check its magic */ - off = mbr.program_table_pointer.blockno * blk_size; + blk = blockptr2blk(program_table, geo); + if (blk == U64_MAX) + return -1; util_log_print(UTIL_LOG_TRACE, "%s: Reading program table at offset 0x%016lx\n", - __func__, off); - zg_seek(zg_fh, off, ZG_CHECK); - zg_read(zg_fh, &blockptr, sizeof(blockptr), ZG_CHECK); - if (memcmp((const char*)&blockptr, ZIPL_MAGIC, ZIPL_MAGIC_SIZE)) + __func__, blk * blk_size); + zg_seek(zg_fh, blk * blk_size, ZG_CHECK); + zg_read(zg_fh, blockptr, sizeof(*blockptr), ZG_CHECK); + if (memcmp((const char *)blockptr, ZIPL_MAGIC, ZIPL_MAGIC_SIZE)) return -1; - /* Read Boot Map Script Pointer 0 */ - zg_read(zg_fh, &blockptr, sizeof(blockptr), ZG_CHECK); - if (blockptr.blockno == 0) + zg_read(zg_fh, blockptr, sizeof(*blockptr), ZG_CHECK); + blk = blockptr2blk(blockptr, geo); + if (blk == U64_MAX) return -1; /* Read 1st Boot Map Script, check its magic and type */ - off = blockptr.blockno * blk_size; util_log_print(UTIL_LOG_TRACE, "%s: Reading component header at offset 0x%016lx\n", - __func__, off); - zg_seek(zg_fh, off, ZG_CHECK_ERR); + __func__, blk * blk_size); + zg_seek(zg_fh, blk * blk_size, ZG_CHECK_ERR); zg_read(zg_fh, &comp_hdr, sizeof(comp_hdr), ZG_CHECK_ERR); if (memcmp(comp_hdr.magic, ZIPL_MAGIC, ZIPL_MAGIC_SIZE)) return -1; /* We want only dump script */ if (comp_hdr.type != COMPONENT_HEADER_DUMP) return -1; - /* Find kernel's Boot Map Section Pointer */ max_entries = (blk_size - sizeof(comp_hdr)) / sizeof(comp_entry); for (i = 0; i < max_entries; i++) { @@ -282,27 +304,178 @@ } /* Read 1st Boot Map Data Pointer in kernel's Boot Map Section */ - off = ((struct linear_blockptr *)comp_entry.data)->blockno * blk_size; + blk = blockptr2blk((union disk_blockptr *)comp_entry.data, geo); + if (blk == U64_MAX) + return -1; util_log_print(UTIL_LOG_TRACE, "%s: Reading component block pointer at offset 0x%016lx\n", + __func__, blk * blk_size); + zg_seek(zg_fh, blk * blk_size, ZG_CHECK_ERR); + zg_read(zg_fh, blockptr, sizeof(*blockptr), ZG_CHECK); + + return 0; +} + +/* + * This function parses the bootloader program table stored on the nvme device + * and returns the partition index where the dumper's kernel image is stored. + */ +static int ngdump_get_nvme_part_num(struct zg_fh *zg_fh) +{ + union disk_blockptr dump_image_blkptr; + int blk_size, part_num, part_ext; + struct linear_blockptr *blockptr; + struct scsi_mbr mbr; + + if (zg_ioctl(zg_fh, BLKSSZGET, &blk_size, "BLKSSZGET", ZG_CHECK_NONE)) + return -1; + util_log_print(UTIL_LOG_TRACE, "%s: Block size %d\n", + __func__, blk_size); + /* Read Master Boot Record (MBR) and check its magic */ + zg_read(zg_fh, &mbr, sizeof(mbr), ZG_CHECK); + if (memcmp(mbr.magic, ZIPL_MAGIC, ZIPL_MAGIC_SIZE)) + return -1; + blockptr = &mbr.program_table_pointer; + /* + * Cast linear_blockptr to disk_blockptr before passing it to the function. + * Since no hd_geometry provided, it will be treated as linear_blockptr later on. + */ + if (get_bootmap_dump_image_blkptr(zg_fh, (union disk_blockptr *)blockptr, + NULL, blk_size, &dump_image_blkptr)) + return -1; + blockptr = &dump_image_blkptr.linear; + util_log_print(UTIL_LOG_TRACE, + "%s: Component block address 0x%016lx block count %d\n", + __func__, blockptr->blockno, + blockptr->blockct); + if (blockptr->blockno == 0) + return -1; + part_num = util_part_search_fh(zg_fh->fh, blockptr->blockno, blockptr->blockct, + blk_size, &part_ext); + return part_num; +} + +/* + * This function scans a VTOC record of cdl formatted DASD to identify a partition + * the specified blockno (0-indexed) belongs to. + */ +static int find_vol1_cdl_part_fh(struct zg_fh *zg_fh, uint64_t blockno, int blk_size, + cchhb_t *vtoc, struct hd_geometry *geo) +{ + unsigned int part_count, i; + struct format1_label f1; + uint64_t blk; + + blk = cchhb2blk(vtoc, geo); + if (!blk) + return -1; + /* Get actual offset in blocks (special record zero accounted) */ + blk--; + part_count = 0; + for (i = 0; i < MAX_VTOC_ENTRIES; i++) { + zg_seek(zg_fh, blk * blk_size, ZG_CHECK); + zg_read(zg_fh, &f1, sizeof(f1), ZG_CHECK_ERR); + /* Skip FMT4 / FMT5 / FMT7 / FMT9 labels */ + if (f1.DS1FMTID == 0xf4 || + f1.DS1FMTID == 0xf5 || + f1.DS1FMTID == 0xf7 || + f1.DS1FMTID == 0xf9) { + blk++; + continue; + } + /* only FMT1 and FMT8 labels valid at this point */ + if (f1.DS1FMTID != 0xf1 && + f1.DS1FMTID != 0xf8) + break; + /* OK, we got valid partition data. Check for partition boundaries */ + if (blockno >= cchh2blk(&f1.DS1EXT1.llimit, geo) && + blockno < cchh2blk(&f1.DS1EXT1.ulimit, geo) + geo->sectors) { + return part_count + 1; + } + part_count++; + blk++; + } + /* No matching partition found */ + return -1; +} + +/* + * This function parses the bootloader program table stored on the eckd device + * and returns the partition index where the dumper's kernel image is stored. + */ +static int ngdump_get_eckd_part_num(struct zg_fh *zg_fh) +{ + union disk_blockptr dump_image_blkptr; + struct eckd_blockptr *blockptr; + struct eckd_boot_record br; + struct vol_label_cdl vl; + struct hd_geometry geo; + uint64_t blk, off; + cchhb_t *vtoc; + int blk_size; + + if (zg_ioctl(zg_fh, BLKSSZGET, &blk_size, "BLKSSZGET", ZG_CHECK_NONE)) + return -1; + util_log_print(UTIL_LOG_TRACE, "%s: Block size %d\n", + __func__, blk_size); + /* Obtain DASD geometry */ + if (dasd_get_geo(zg_fh->path, &geo) != 0) + return -1; + util_log_print(UTIL_LOG_TRACE, + "%s: DASD geometry: cyl=%d, heads=%d, sect=%d\n", + __func__, geo.cylinders, geo.heads, geo.sectors); + /* Read a volume label from CDL-formatted DASD */ + off = 2 * blk_size; + util_log_print(UTIL_LOG_TRACE, + "%s: Reading a volume label at offset 0x%016lx\n", __func__, off); zg_seek(zg_fh, off, ZG_CHECK_ERR); - zg_read(zg_fh, &blockptr, sizeof(blockptr), ZG_CHECK); + zg_read(zg_fh, &vl, sizeof(vl), ZG_CHECK); + /* Verify that we have a VOL1 label */ + if (!is_vol1(vl.vollbl)) + return -1; + /* Read Master Boot Record and check its magic */ + blk = cchhb2blk(&vl.br, &geo); + if (blk == 0) + return -1; + off = (blk - 1) * blk_size; util_log_print(UTIL_LOG_TRACE, - "%s: Component block address 0x%016lx block count %d\n", - __func__, blockptr.blockno, blockptr.blockct); + "%s: Reading Master Boot Record at offset 0x%016lx\n", + __func__, off); + zg_seek(zg_fh, off, ZG_CHECK_ERR); + zg_read(zg_fh, &br, sizeof(br), ZG_CHECK); + if (memcmp(br.magic, ZIPL_MAGIC, ZIPL_MAGIC_SIZE)) + return -1; - return util_part_search_fh(zg_fh->fh, blockptr.blockno, - blockptr.blockct, blk_size, &part_ext); + blockptr = (struct eckd_blockptr *)&br.program_table_pointer; + /* + * Cast eckd_blockptr to disk_blockptr before passing it to the function. + * With hd_geometry provided, it will be treated as eckd_blockptr later on. + */ + if (get_bootmap_dump_image_blkptr(zg_fh, (union disk_blockptr *)blockptr, + &geo, blk_size, &dump_image_blkptr)) + return -1; + blk = blockptr2blk(&dump_image_blkptr, &geo); + if (blk == U64_MAX) + return -1; + util_log_print(UTIL_LOG_TRACE, + "%s: Segment0 block number 0x%016lx\n", + __func__, blk); + vtoc = &((volume_label_t *)&vl)->vtoc; + return find_vol1_cdl_part_fh(zg_fh, blk, blk_size, vtoc, &geo); } -int ngdump_get_disk_part_path(const char *disk_path, int part_num, - char **part_path) +/* + * This function composes the absolute partition device node name + * based on the device name, device type and the partition number. + */ +int ngdump_get_part_path(const char *disk_path, int part_num, + enum ngdump_disk_type ng_type, char **part_path) { char *real_path; - util_log_print(UTIL_LOG_TRACE, "%s: Disk path %s\n", - __func__, disk_path); + util_log_print(UTIL_LOG_TRACE, "%s: Disk path %s; disk type: %s\n", + __func__, disk_path, ngtype2str[ng_type]); real_path = util_malloc(PATH_MAX); @@ -315,7 +488,18 @@ __func__, real_path); *part_path = NULL; - util_asprintf(part_path, "%sp%d", real_path, part_num); + switch (ng_type) { + case NG_TYPE_DASD: + util_asprintf(part_path, "%s%d", real_path, part_num); + break; + case NG_TYPE_NVME: + util_asprintf(part_path, "%sp%d", real_path, part_num); + break; + default: /* Unknown type, bail out */ + free(real_path); + return -1; + } + free(real_path); util_log_print(UTIL_LOG_TRACE, "%s: Disk partition path %s\n", @@ -323,3 +507,30 @@ return 0; } + +/* + * This function checks for the ngdump device type in order to parse the + * bootloader program table and identify the partition where the dumper's + * kernel image is stored. + * part_path is set to the absolute partition device node name and + * the partition number is returned (or -1 when no partition found). + */ +int ngdump_get_dump_part(struct zg_fh *zg_fh, char **part_path) +{ + dasd_information2_t dasd_info; + enum ngdump_disk_type ng_type; + int part_num; + + if (dasd_get_info(zg_fh->path, &dasd_info) == 0) { + ng_type = NG_TYPE_DASD; + part_num = ngdump_get_eckd_part_num(zg_fh); + } else { + ng_type = NG_TYPE_NVME; + part_num = ngdump_get_nvme_part_num(zg_fh); + } + if (part_num <= 0 || + ngdump_get_part_path(zg_fh->path, part_num, ng_type, part_path) < 0) + return -1; + + return part_num; +} diff -Nru s390-tools-2.31.0/zdump/ngdump.h s390-tools-2.33.1/zdump/ngdump.h --- s390-tools-2.31.0/zdump/ngdump.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdump/ngdump.h 2024-05-28 08:26:36.000000000 +0200 @@ -20,11 +20,14 @@ const char *sha256sum; }; -int ngdump_read_meta_from_device(const char *device, struct ngdump_meta *meta); - -int ngdump_get_dump_part(struct zg_fh *zg_fh); +enum ngdump_disk_type { + NG_TYPE_DASD, + NG_TYPE_NVME, +}; -int ngdump_get_disk_part_path(const char *disk_path, int part_num, - char **part_path); +int ngdump_read_meta_from_device(const char *device, struct ngdump_meta *meta); +int ngdump_get_dump_part(struct zg_fh *zg_fh, char **part_path); +int ngdump_get_part_path(const char *disk_path, int part_num, + enum ngdump_disk_type ng_type, char **part_path); #endif /* ZGETDUMP_NGDUMP_H */ diff -Nru s390-tools-2.31.0/zdump/pv_utils.c s390-tools-2.33.1/zdump/pv_utils.c --- s390-tools-2.31.0/zdump/pv_utils.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zdump/pv_utils.c 2024-05-28 08:26:36.000000000 +0200 @@ -24,8 +24,8 @@ #include "lib/zt_common.h" #include "lib/util_log.h" +#include "libpv/common.h" #include "libpv/crypto.h" -#include "libpv/se-hdr.h" #include "pv_defs.h" WRAPPED_G_DEFINE_AUTOPTR_CLEANUP_FUNC(Elf64_Phdr, free) @@ -36,6 +36,7 @@ #define PV_DUMP_V1_HKDF_LEN 32 #define PV_DUMP_V1_HKDF_FUN EVP_sha512() #define PV_DUMP_V1_CIPHER EVP_aes_256_gcm() +#define PV_CCK_V1_SIZE 32 static gboolean u64_checked_add(u64 *res, u64 lhs, u64 rhs) { @@ -108,7 +109,7 @@ { g_autoptr(GBytes) salt = NULL, info = NULL; size_t cck_size; - size_t exp_cck_size = sizeof_field(struct pv_hdr_encrypted, cust_comm_key); + size_t exp_cck_size = PV_CCK_V1_SIZE; assert(cpl_data->aad.version == PV_COMPL_DATA_VERSION_1); diff -Nru s390-tools-2.31.0/zfcpdump/zfcpdump_part.c s390-tools-2.33.1/zfcpdump/zfcpdump_part.c --- s390-tools-2.31.0/zfcpdump/zfcpdump_part.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zfcpdump/zfcpdump_part.c 2024-05-28 08:26:36.000000000 +0200 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -40,7 +41,6 @@ #include "zfcpdump.h" #define COPY_BUF_SIZE 0x10000UL -#define COPY_TABLE_ENTRY_COUNT 4 /* * Copy table entry @@ -48,6 +48,13 @@ struct copy_table_entry { unsigned long size; unsigned long off; + bool hsa; +}; + +struct copy_table { + int cnt; + int max; + struct copy_table_entry *entry; }; /* @@ -137,14 +144,82 @@ return 0; } -/* - * Initialize copy table to copy HSA first - */ -static int copy_table_init(int fd, struct copy_table_entry *table) +static int cmp_ct_entries(const void *_entry1, const void *_entry2) +{ + const struct copy_table_entry *entry1 = (const struct copy_table_entry *)_entry1; + const struct copy_table_entry *entry2 = (const struct copy_table_entry *)_entry2; + + /* Sort copy table entries by their file offset in ascending order */ + + if (entry1->off <= entry2->off) + return -1; + else if (entry1->off > entry2->off) + return 1; + else + return 0; +} + +static inline void copy_table_add_entry(struct copy_table *table, unsigned long off, + unsigned long size, bool hsa) +{ + const int i = table->cnt; + table->entry[i].off = off; + table->entry[i].size = size; + table->entry[i].hsa = hsa; + table->cnt++; +} + +static void copy_table_add_non_hsa_file_regions(struct copy_table *table) { + const int hsa_entry_count = table->cnt; + unsigned long off, size; + int i; + /* + * We write the front page of /proc/vmcore at the end of the dump processing. + * This ensures that the dump stays invalid until all data + * is written. It is guaranteed that a copy table entry for file offset 0 + * never covers HSA memory and at least of size of a single page because + * HSA memory is always page aligned. + */ + off = PAGE_SIZE; + size = table->entry[0].off - off; + if (size > 0) + copy_table_add_entry(table, off, size, false); + /* + * Add copy table entries covering non-HSA file regions located before + * each copy table entry covering a HSA file region. Start with the second + * HSA copy table entry. + */ + for (i = 1; i < hsa_entry_count; i++) { + off = table->entry[i - 1].off + table->entry[i - 1].size; + size = table->entry[i].off - off; + if (size > 0) + copy_table_add_entry(table, off, size, false); + } + /* + * Add a copy table entry that covers the end of /proc/vmcore which is + * not covered by a copy table entry for HSA. + */ + i = hsa_entry_count - 1; + off = table->entry[i].off + table->entry[i].size; + if (off < g.vmcore_size) { + size = g.vmcore_size - off; + copy_table_add_entry(table, off, size, false); + } + /* + * Add a copy table entry covering the front page of /proc/vmcore which + * is not covered by HSA as the last entry. + */ + copy_table_add_entry(table, 0, PAGE_SIZE, false); +} + +static int copy_table_init(int fd, struct copy_table *table) +{ + const unsigned long hsa_size = get_hsa_size(); + unsigned long off, size; + int i, max_table_size; Elf64_Ehdr ehdr; Elf64_Phdr phdr; - int i; g.vmcore_size = lseek(fd, (off_t) 0, SEEK_END); lseek(fd, 0L, SEEK_SET); @@ -160,44 +235,65 @@ PRINT_ERR("Only 64 bit core dump files are supported\n"); return -1; } - /* Find first memory chunk to determine HSA size */ + /* + * Each ELF LOAD segment may contain at most one HSA file region which will + * result in exactly one HSA copy table entry. Furthermore, this will result + * in at most 1 extra non-HSA copy table entry preceding the HSA copy + * table entry apart from the first and the last HSA copy table entries + * which will result in 2 non-HSA copy table entries. + * + * /proc/vmcore + * ------------------------------------------------------------------- + * | page sized | non-HSA | HSA | non-HSA | HSA | non-HSA | + * | non-HSA | region 2 | region 1 | region 3 | region 2 | region 4 | + * | region 1 | | | | | | + * ------------------------------------------------------------------- + */ + table->cnt = 0; + table->max = ehdr.e_phnum * 2 + 2; + max_table_size = table->max * sizeof(struct copy_table_entry); + table->entry = malloc(max_table_size); + if (!table->entry) { + PRINT_ERR("Memory allocation of %d byte(s) failed\n", max_table_size); + return -1; + } + /* + * First add all HSA file regions to copy table. + * Each ELF LOAD segment may contain at most one HSA segment. + */ for (i = 0; i < ehdr.e_phnum; i++) { if (read(fd, &phdr, sizeof(phdr)) < 0) return -1; if (phdr.p_type != PT_LOAD) continue; - if (phdr.p_vaddr != 0) + PRINT_TRACE("ELF LOAD segment: p_offset=0x%016lx p_filesz=0x%016lx p_paddr=0x%016lx p_vaddr=0x%016lx\n", + phdr.p_offset, phdr.p_filesz, phdr.p_paddr, phdr.p_vaddr); + if (phdr.p_paddr >= hsa_size) continue; - /* 1st entry is the HSA (vaddr = 0) */ - table[0].off = phdr.p_offset; - table[0].size = get_hsa_size(); - /* 2nd entry defines area from 2nd page to HSA start */ - table[1].off = PAGE_SIZE; - table[1].size = table[0].off - PAGE_SIZE; - /* - * 3rd entry defines area from HSA end to end of file. - */ - table[2].off = table[0].off + table[0].size; - table[2].size = g.vmcore_size - table[2].off; - /* - * We write the last page at the end of the dump processing. - * This ensures that the dump stays invalid until all data - * is written. We can use one page because it is ensured that - * the HSA starts page aligned. - */ - table[3].off = 0; - table[3].size = PAGE_SIZE; - return 0; - } - PRINT_ERR("Could not find HSA ELF load section\n"); - return -1; + off = phdr.p_offset; + size = MIN(phdr.p_filesz, hsa_size - phdr.p_paddr); + if (size > 0) + copy_table_add_entry(table, off, size, true); + } + if (table->cnt == 0) { + PRINT_ERR("Could not find ELF LOAD segments containing HSA\n"); + return -1; + } + /* Sort all HSA copy table entries by their file offset */ + qsort(table->entry, table->cnt, sizeof(struct copy_table_entry), cmp_ct_entries); + /* + * Add copy table entries which cover all of /proc/vmcore not covered + * by HSA copy table entries added above. + */ + copy_table_add_non_hsa_file_regions(table); + return 0; } /* * Copy one copy table entry form /proc/vmcore to dump partition */ static int copy_table_entry_write(int fdin, int fdout, - struct copy_table_entry *entry, + const struct copy_table_entry *entry, unsigned long offset) { unsigned long buf_size, bytes_left, off; @@ -228,14 +324,11 @@ return 0; } -/* - * Copy dump using mmap (copy HSA first) - */ static int copy_dump(const char *in, const char *out, unsigned long offset) { - struct copy_table_entry table[COPY_TABLE_ENTRY_COUNT]; + struct copy_table table = { 0 }; char busy_str[] = "zfcpdump busy"; - int fdout, fdin, i, rc = -1; + int fdout, fdin, i, rc = -1, hsa_released = 0; fdin = open(in, O_RDONLY); if (fdin < 0) { @@ -261,14 +354,24 @@ goto out_close_fdin; if (csum_update(fdout)) goto out_close_fdin; - if (copy_table_init(fdin, table)) + if (copy_table_init(fdin, &table)) goto out_close_fdin; show_progress(0); - for (i = 0; i < COPY_TABLE_ENTRY_COUNT; i++) { - if (copy_table_entry_write(fdin, fdout, &table[i], offset)) - goto out_close_fdout; - if (i == 0) /* 0 is the HSA */ + for (i = 0; i < table.cnt; i++) { + PRINT_TRACE("Write copy table entry %d: off=0x%016lx size=0x%016lx hsa=%d\n", + i, table.entry[i].off, table.entry[i].size, table.entry[i].hsa ? 1 : 0); + if (!hsa_released && !table.entry[i].hsa) { + /* + * First encountered non-HSA copy table entry guarantees + * that no more HSA memory copy table entries will appear + * and, therefore, HSA memory can be finally released. + */ + PRINT_TRACE("Release HSA memory\n"); release_hsa(); + hsa_released = 1; + } + if (copy_table_entry_write(fdin, fdout, &table.entry[i], offset)) + goto out_close_fdout; } rc = 0; out_close_fdout: @@ -276,7 +379,10 @@ rc = -1; fsync(fdout); close(fdout); + free(table.entry); out_close_fdin: + if (!hsa_released) + release_hsa(); close(fdin); return rc; } diff -Nru s390-tools-2.31.0/zipl/include/disk.h s390-tools-2.33.1/zipl/include/disk.h --- s390-tools-2.31.0/zipl/include/disk.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zipl/include/disk.h 2024-05-28 08:26:36.000000000 +0200 @@ -53,13 +53,6 @@ disk_type_eckd_cdl, } disk_type_t; -/* Disk information source */ -typedef enum { - source_auto, - source_user, - source_script -} source_t; - /* targetbase definition */ typedef enum { defined_as_device, @@ -79,7 +72,6 @@ struct hd_geometry geo; char* name; char* drv_name; - source_t source; definition_t targetbase; int is_nvme; }; @@ -121,7 +113,7 @@ struct disk_info *info, int align, off_t *offset); void disk_print_devt(dev_t d); -void disk_print_info(struct disk_info* info); +void disk_print_info(struct disk_info *info, int source); int disk_is_zero_block(disk_blockptr_t* block, struct disk_info* info); blocknum_t disk_compact_blocklist(disk_blockptr_t* list, blocknum_t count, struct disk_info* info); diff -Nru s390-tools-2.31.0/zipl/include/install.h s390-tools-2.33.1/zipl/include/install.h --- s390-tools-2.31.0/zipl/include/install.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zipl/include/install.h 2024-05-28 08:26:36.000000000 +0200 @@ -73,9 +73,7 @@ int fd; char *device; char *filename; - char *dump_mount_point; - unsigned int dump_tmp_dir_created:1; - unsigned int dump_mounted:1; + unsigned int tmp_filename_created:1; unsigned int skip_prepare:1; unsigned int print_details:1; struct disk_info *info; @@ -115,6 +113,7 @@ int prepare_bootloader(struct job_data *job, struct install_set *bis); int install_bootloader(struct job_data *job, struct install_set *bis); +int post_install_bootloader(struct job_data *job, struct install_set *bis); void free_bootloader(struct install_set *bis); int install_tapeloader(const char* device, const char* image, const char* parmline, const char* ramdisk, diff -Nru s390-tools-2.31.0/zipl/include/job.h s390-tools-2.33.1/zipl/include/job.h --- s390-tools-2.31.0/zipl/include/job.h 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zipl/include/job.h 2024-05-28 08:26:36.000000000 +0200 @@ -30,6 +30,14 @@ job_mvdump = 8, }; +/* target information source */ +typedef enum { + source_unknown = 0, + source_auto = 1, + source_user = 2, + source_script = 3 +} source_t; + struct job_target_data { char* bootmap_dir; char* targetbase; @@ -39,6 +47,7 @@ int targetsectors; int targetblocksize; blocknum_t targetoffset; + source_t source; }; struct job_common_ipl_data { @@ -122,6 +131,8 @@ struct job_ipl_tape_data ipl_tape; struct job_mvdump_data mvdump; } data; + int dump_mounted; + int bootmap_dir_created; int noninteractive; int verbose; int add_files; @@ -131,9 +142,17 @@ int is_ldipl_dump; }; +/** + * Return true, if target parameters for the base disk are set + */ +static inline int target_parameters_are_set(struct job_target_data *td) +{ + return td->targetbase != NULL; +} int job_get(int argc, char* argv[], struct job_data** data); void job_free(struct job_data* job); +void free_target_data(struct job_target_data *td); int type_from_target(char *target, disk_type_t *type); int check_job_dump_images(struct job_dump_data* dump, char* name); int check_job_images_ngdump(struct job_dump_data* dump, char* name); diff -Nru s390-tools-2.31.0/zipl/src/bootmap.c s390-tools-2.33.1/zipl/src/bootmap.c --- s390-tools-2.31.0/zipl/src/bootmap.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zipl/src/bootmap.c 2024-05-28 08:26:36.000000000 +0200 @@ -304,7 +304,6 @@ address_t load_address, size_t trailer, void *component, int add_files, - struct job_target_data *target, int comp_id, int menu_idx, int program_table_id) { @@ -339,8 +338,15 @@ return -1; } } else { - /* Make sure file is on correct device */ - rc = disk_get_info_from_file(filename, target, &file_info); + /* + * Make sure that file is on target device. + * For this, retrieve info of the underlying disk without + * any user hints + */ + struct job_target_data tmp = {.source = source_unknown}; + + rc = disk_get_info_from_file(filename, &tmp, &file_info); + free_target_data(&tmp); if (rc) return -1; if (file_info->device != bis->info->device) { @@ -377,12 +383,11 @@ static int add_component_file(struct install_set *bis, const char *filename, address_t load_address, size_t trailer, void *component, int add_files, - struct job_target_data *target, int comp_id, - int menu_idx, int program_table_id) + int comp_id, int menu_idx, int program_table_id) { return add_component_file_range(bis, filename, NULL, load_address, trailer, component, add_files, - target, comp_id, menu_idx, + comp_id, menu_idx, program_table_id); } @@ -544,8 +549,7 @@ bool add_envblk, struct job_envblk_data *envblk, struct job_ipl_data *ipl, disk_blockptr_t *program, int verbose, int add_files, component_header_type type, - struct job_target_data *target, int is_secure, - int menu_idx, int program_table_id) + int is_secure, int menu_idx, int program_table_id) { struct signature_header sig_head; size_t ramdisk_size, image_size; @@ -667,7 +671,7 @@ /* Add stage 3 loader to bootmap */ rc = add_component_file(bis, ZIPL_STAGE3_PATH, STAGE3_LOAD_ADDRESS, signature_size, VOID_ADD(table, offset), 1, - target, COMPONENT_ID_LOADER, menu_idx, + COMPONENT_ID_LOADER, menu_idx, program_table_id); if (rc) { error_text("Could not add internal loader file '%s'", @@ -745,7 +749,7 @@ rc = add_component_file(bis, ipl->common.image, ipl->common.image_addr, signature_size, VOID_ADD(table, offset), - add_files, target, COMPONENT_ID_KERNEL_IMAGE, + add_files, COMPONENT_ID_KERNEL_IMAGE, menu_idx, program_table_id); if (rc) { error_text("Could not add image file '%s'", ipl->common.image); @@ -805,7 +809,7 @@ ipl->common.ramdisk_addr, signature_size, VOID_ADD(table, offset), - add_files, target, COMPONENT_ID_RAMDISK, + add_files, COMPONENT_ID_RAMDISK, menu_idx, program_table_id); if (rc) { error_text("Could not add ramdisk '%s'", @@ -861,7 +865,7 @@ rc = add_component_file_range(bis, filename, ®, ipl->envblk_addr, 0, VOID_ADD(table, offset), - 0, target, + 0, COMPONENT_ID_ENVBLK, menu_idx, program_table_id); @@ -893,7 +897,6 @@ struct job_segment_data *segment, disk_blockptr_t *program, int verbose, int add_files, component_header_type type, - struct job_target_data *target, int program_table_id) { void *table; @@ -913,7 +916,7 @@ printf(" segment file......: %s\n", segment->segment); rc = add_component_file(bis, segment->segment, segment->segment_addr, 0, - VOID_ADD(table, offset), add_files, target, + VOID_ADD(table, offset), add_files, COMPONENT_ID_SEGMENT_FILE, 0 /* menu_idx */, program_table_id); if (rc) { @@ -996,7 +999,7 @@ ipl.common.parmline = dump->common.parmline; ipl.common.parm_addr = dump->common.parm_addr; return add_ipl_program(bis, NULL, false, NULL, &ipl, program, - verbose, 1, type, target, SECURE_BOOT_DISABLED, + verbose, 1, type, SECURE_BOOT_DISABLED, 0 /* menu_idx */, program_table_id); } @@ -1041,8 +1044,7 @@ true, &job->envblk, &job->data.ipl, &table[0], verbose || job->command_line, job->add_files, component_header, - &job->target, job->is_secure, 0, - program_table_id); + job->is_secure, 0, program_table_id); break; case job_segment: if (bis->print_details) { @@ -1055,7 +1057,7 @@ rc = add_segment_program(bis, &job->data.segment, &table[0], verbose || job->command_line, job->add_files, COMPONENT_HEADER_IPL, - &job->target, program_table_id); + program_table_id); break; case job_dump_partition: /* Only useful for a partition dump that uses a dump kernel*/ @@ -1114,7 +1116,7 @@ &table[job->data.menu.entry[i].pos], verbose || job->command_line, job->add_files, component_header, - &job->target, is_secure, i, + is_secure, i, program_table_id); break; case job_print_usage: @@ -1406,7 +1408,8 @@ int rc, part_ext; /* Check for supported disk and driver types */ - if ((info->source == source_auto) && (info->type == disk_type_diag)) { + if (job->target.source == source_auto && + info->type == disk_type_diag) { error_reason("Unsupported disk type (%s)", disk_get_type_name(info->type)); return -1; @@ -1466,6 +1469,7 @@ bis->filename); return -1; } + bis->tmp_filename_created = 1; } /* Retrieve target device information */ if (disk_get_info(bis->filename, &job->target, &bis->info)) @@ -1473,7 +1477,7 @@ if (verbose) { printf("Target device information\n"); - disk_print_info(bis->info); + disk_print_info(bis->info, job->target.source); } if (misc_temp_dev(bis->info->device, 1, &bis->device)) return -1; @@ -1539,14 +1543,14 @@ * Prepare resources to build a program table */ static int prepare_build_program_table_file(struct job_data *job, - char *bootmap_dir, struct install_set *bis) { if (bis->skip_prepare) /* skip the preparation work */ return 0; /* Create temporary bootmap file */ - bis->filename = misc_make_path(bootmap_dir, BOOTMAP_TEMPLATE_FILENAME); + bis->filename = misc_make_path(job->target.bootmap_dir, + BOOTMAP_TEMPLATE_FILENAME); if (!bis->filename) return -1; bis->fd = mkstemp(bis->filename); @@ -1555,13 +1559,14 @@ error_text("Could not create file '%s':", bis->filename); return -1; } + bis->tmp_filename_created = 1; /* Retrieve target device information. Note that we have to * call disk_get_info_from_file() to also get the file system * block size. */ if (disk_get_info_from_file(bis->filename, &job->target, &bis->info)) return -1; /* Check for supported disk and driver types */ - if (bis->info->source == source_auto && + if (job->target.source == source_auto && bis->info->type == disk_type_diag) { error_reason("Unsupported disk type (%s)", disk_get_type_name(bis->info->type)); @@ -1571,7 +1576,7 @@ return -1; if (verbose) { printf("Target device information\n"); - disk_print_info(bis->info); + disk_print_info(bis->info, job->target.source); } if (misc_temp_dev(bis->info->device, 1, &bis->device)) return -1; @@ -1581,7 +1586,7 @@ bis->info)) return -1; } - printf("Building bootmap in '%s'%s\n", bootmap_dir, + printf("Building bootmap in '%s'%s\n", job->target.bootmap_dir, job->add_files ? " (files will be added to bootmap file)" : ""); /* Initialize bootmap header */ @@ -1601,11 +1606,11 @@ /** * Rename to final bootmap name */ -static int finalize_create_file(char *bootmap_dir, struct install_set *bis) +static int finalize_create_file(struct job_data *job, struct install_set *bis) { char *final_name; - final_name = misc_make_path(bootmap_dir, BOOTMAP_FILENAME); + final_name = misc_make_path(job->target.bootmap_dir, BOOTMAP_FILENAME); if (!final_name) return -1; if (rename(bis->filename, final_name)) { @@ -1615,6 +1620,11 @@ free(final_name); return -1; } + /* + * The temporary object with @bis->filename has been removed + * from the semantic volume + */ + bis->tmp_filename_created = 0; free(final_name); return 0; } @@ -1623,9 +1633,9 @@ * PROGRAM_TABLE_ID: offset of the program table in the array (@bis->tables) */ static int bootmap_create_file(struct job_data *job, struct install_set *bis, - char *bootmap_dir, int program_table_id) + int program_table_id) { - if (prepare_build_program_table_file(job, bootmap_dir, bis)) + if (prepare_build_program_table_file(job, bis)) return -1; if (build_program_table(job, bis, program_table_id)) return -1; @@ -1638,8 +1648,12 @@ return 0; } -static int -ngdump_create_meta(const char *path) +/** + * Create a file with the short name "ngdump.meta" in the directory PATH. + * This file is required for NGDump stand-alone dumper, it's read/written + * by the dumper when it starts. + */ +static int ngdump_create_meta(const char *path) { char *filename = NULL; FILE *fp; @@ -1676,7 +1690,6 @@ struct install_set *bis) { struct disk_info *info; - char *bootmap_dir; /* Retrieve target device information */ if (disk_get_info(job->data.dump.device, &job->target, &info)) @@ -1686,45 +1699,43 @@ if (check_dump_device(job, info, bis->device)) return -1; - bis->dump_mount_point = misc_make_path("/tmp", - DUMP_TEMP_MOUNT_POINT_NAME); - if (!bis->dump_mount_point) { + assert(!job->target.bootmap_dir); + job->target.bootmap_dir = misc_make_path("/tmp", + DUMP_TEMP_MOUNT_POINT_NAME); + if (!job->target.bootmap_dir) { error_reason(strerror(errno)); error_text("Could not make path for '%s'", DUMP_TEMP_MOUNT_POINT_NAME); return -1; } /* Create a mount point directory */ - if (mkdtemp(bis->dump_mount_point) == NULL) { + if (!mkdtemp(job->target.bootmap_dir)) { error_reason(strerror(errno)); error_text("Could not create mount point '%s'", - bis->dump_mount_point); + job->target.bootmap_dir); return -1; } - bis->dump_tmp_dir_created = 1; + job->bootmap_dir_created = 1; /* * Mount partition where bootmap file and also a dump file will * be stored. */ - if (mount(job->data.dump.device, bis->dump_mount_point, + if (mount(job->data.dump.device, job->target.bootmap_dir, NGDUMP_FSTYPE, 0, NULL)) { error_reason(strerror(errno)); error_text("Could not mount partition '%s':", job->data.dump.device); return -1; } - bis->dump_mounted = 1; - bootmap_dir = bis->dump_mount_point; + job->dump_mounted = 1; /* * Build a single program table for List-Directed IPL * See comments before install_bootloader() for details */ bis->print_details = 1; - if (bootmap_create_file(job, bis, bootmap_dir, BLKPTR_FORMAT_ID)) + if (bootmap_create_file(job, bis, BLKPTR_FORMAT_ID)) return -1; - if (!dry_run && finalize_create_file(bootmap_dir, bis)) - return -1; - return ngdump_create_meta(bootmap_dir); + return ngdump_create_meta(job->target.bootmap_dir); } /** @@ -1734,27 +1745,25 @@ */ static int prepare_bootloader_ipl(struct job_data *job, struct install_set *bis) { - char *bootmap_dir = job->target.bootmap_dir; - /* * Build a program table for List-Directed IPL from * SCSI or ECKD DASD */ bis->print_details = 1; - if (bootmap_create_file(job, bis, bootmap_dir, BLKPTR_FORMAT_ID)) + if (bootmap_create_file(job, bis, BLKPTR_FORMAT_ID)) return -1; if (bis->info->type == disk_type_scsi) /* only one table to be installed per device */ - return dry_run ? 0 : finalize_create_file(bootmap_dir, bis); + return 0; /* * Build one more program table for CCW-type IPL from * ECKD DASD */ bis->skip_prepare = 1; bis->print_details = 0; - if (bootmap_create_file(job, bis, bootmap_dir, LEGACY_BLKPTR_FORMAT_ID)) + if (bootmap_create_file(job, bis, LEGACY_BLKPTR_FORMAT_ID)) return -1; - return dry_run ? 0 : finalize_create_file(bootmap_dir, bis); + return 0; } /** @@ -1800,7 +1809,22 @@ } else { return prepare_bootloader_ipl(job, bis); } - return -1; +} + +/** + * Do whatever needed after successful boot records installation + * but before releasing all the captured resources + */ +int post_install_bootloader(struct job_data *job, struct install_set *bis) +{ + if (job->id == job_dump_partition) { + if (is_ngdump_enabled(job)) + return dry_run ? 0 : finalize_create_file(job, bis); + else + return 0; + } else { + return dry_run ? 0 : finalize_create_file(job, bis); + } } /** @@ -1819,18 +1843,9 @@ } if (bis->fd > 0) close(bis->fd); - if (dry_run) + if (bis->tmp_filename_created) misc_free_temp_file(bis->filename); free(bis->filename); misc_free_temp_dev(bis->device); disk_free_info(bis->info); - if (bis->dump_mount_point) { - if (bis->dump_mounted && umount(bis->dump_mount_point)) - warn("Could not umount dump device at %s", - bis->dump_mount_point); - if (bis->dump_tmp_dir_created && rmdir(bis->dump_mount_point)) - warn("Could not remove directory %s", - bis->dump_mount_point); - free(bis->dump_mount_point); - } } diff -Nru s390-tools-2.31.0/zipl/src/disk.c s390-tools-2.33.1/zipl/src/disk.c --- s390-tools-2.31.0/zipl/src/disk.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zipl/src/disk.c 2024-05-28 08:26:36.000000000 +0200 @@ -116,8 +116,8 @@ return 0; } -static int -determine_virtblk_type(struct disk_info *data, struct stat *stats) +static int determine_virtblk_type(struct disk_info *data, + const struct stat *stats) { char *device; char *buffer; @@ -187,177 +187,97 @@ return rc; } -int -disk_get_info(const char* device, struct job_target_data* target, - struct disk_info** info) +static int set_target_parameters(FILE *fh, struct job_target_data *td) { - struct stat stats; - struct stat script_stats; - struct util_proc_part_entry part_entry; - struct util_proc_dev_entry dev_entry; - struct dasd_information dasd_info; - struct disk_info *data; - int fd; - long devsize; - FILE *fh; - const char *script_pre = util_libdir_path("/zipl_helper."); - char *script_file = NULL; - char *ppn_cmd = NULL; + int checkparm = 0; char buffer[80]; char value[40]; - int majnum, minnum; - int checkparm; - /* Get file information */ - if (stat(device, &stats)) { - error_reason(strerror(errno)); - return -1; - } - /* Open device file */ - fd = open(device, O_RDONLY); - if (fd == -1) { - error_reason(strerror(errno)); - return -1; - } - /* Get memory for result */ - data = (struct disk_info *) misc_malloc(sizeof(struct disk_info)); - if (data == NULL) { - close(fd); + while (fgets(buffer, 80, fh)) { + if (sscanf(buffer, "targetbase=%s", value) == 1) { + td->targetbase = misc_strdup(value); + checkparm++; + } + if (sscanf(buffer, "targettype=%s", value) == 1) { + type_from_target(value, &td->targettype); + checkparm++; + } + if (sscanf(buffer, "targetgeometry=%s", value) == 1) { + td->targetcylinders = + atoi(strtok(value, ",")); + td->targetheads = atoi(strtok(NULL, ",")); + td->targetsectors = atoi(strtok(NULL, ",")); + checkparm++; + } + if (sscanf(buffer, "targetblocksize=%s", value) == 1) { + td->targetblocksize = atoi(value); + checkparm++; + } + if (sscanf(buffer, "targetoffset=%s", value) == 1) { + td->targetoffset = atol(value); + checkparm++; + } + } + if ((!disk_is_eckd(td->targettype) && checkparm < 4) || + (disk_is_eckd(td->targettype) && checkparm != 5)) { + error_reason("Target parameters missing from script"); return -1; } - memset((void *) data, 0, sizeof(struct disk_info)); - /* Try to get device driver name */ - if (util_proc_dev_get_entry(stats.st_rdev, 1, &dev_entry) == 0) { - data->drv_name = misc_strdup(dev_entry.name); - util_proc_dev_free_entry(&dev_entry); - } else { - fprintf(stderr, "Warning: Could not determine driver name for " - "major %d from /proc/devices\n", major(stats.st_rdev)); - fprintf(stderr, "Warning: Preparing a logical device for boot " - "might fail\n"); - } - data->source = source_user; - /* Check if targetbase script is available */ - if (data->drv_name) - misc_asprintf(&script_file, "%s%s", script_pre, data->drv_name); - else - misc_asprintf(&script_file, "%s", script_pre); - if ((target->targetbase == NULL) && - (!stat(script_file, &script_stats))) { - data->source = source_script; - /* Run targetbase script */ - if (target->bootmap_dir == NULL) { - /* happens in case of partition dump */ - misc_asprintf(&ppn_cmd, "%s %d:%d", - script_file, major(stats.st_rdev), - minor(stats.st_rdev)); - } else { - misc_asprintf(&ppn_cmd, "%s %s", - script_file, target->bootmap_dir); - } - printf("Run %s\n", ppn_cmd); - fh = popen(ppn_cmd, "r"); - if (fh == NULL) { - error_reason("Failed to run popen(%s,\"r\",)"); - goto out_close; - } - checkparm = 0; - while (fgets(buffer, 80, fh) != NULL) { - if (sscanf(buffer, "targetbase=%s", value) == 1) { - target->targetbase = misc_strdup(value); - checkparm++; - } - if (sscanf(buffer, "targettype=%s", value) == 1) { - type_from_target(value, &target->targettype); - checkparm++; - } - if (sscanf(buffer, "targetgeometry=%s", value) == 1) { - target->targetcylinders = - atoi(strtok(value, ",")); - target->targetheads = atoi(strtok(NULL, ",")); - target->targetsectors = atoi(strtok(NULL, ",")); - checkparm++; - } - if (sscanf(buffer, "targetblocksize=%s", value) == 1) { - target->targetblocksize = atoi(value); - checkparm++; - } - if (sscanf(buffer, "targetoffset=%s", value) == 1) { - target->targetoffset = atol(value); - checkparm++; - } - } - switch (pclose(fh)) { - case 0 : - /* success */ - break; - case -1 : - error_reason("Failed to run pclose"); - goto out_close; - default : - error_reason("Script could not determine target " - "parameters"); - goto out_close; - } - if ((!disk_is_eckd(target->targettype) && checkparm < 4) || - (disk_is_eckd(target->targettype) && checkparm != 5)) { - error_reason("Target parameters missing from script"); - goto out_close; - } - } + return 0; +} - /* Get disk geometry. Note: geo.start contains a sector number - * offset measured in physical blocks, not sectors (512 bytes) */ - if (target->targetbase != NULL) { - data->geo.heads = target->targetheads; - data->geo.sectors = target->targetsectors; - data->geo.cylinders = target->targetcylinders; - data->geo.start = target->targetoffset; +/** + * Set disk info using ready target parameters provided either by + * user, or by script + */ +static int disk_set_info_by_hint(struct job_target_data *td, + struct disk_info *data, int fd) +{ + int majnum, minnum; + struct stat stats; + + data->devno = -1; + data->phy_block_size = td->targetblocksize; + data->type = td->targettype; + data->partnum = 0; + + if (sscanf(td->targetbase, "%d:%d", &majnum, &minnum) == 2) { + data->device = makedev(majnum, minnum); + data->targetbase = defined_as_device; + data->partnum = minor(stats.st_rdev) - minnum; } else { - data->source = source_auto; - if (ioctl(fd, HDIO_GETGEO, &data->geo)) { - error_reason("Could not get disk geometry"); - goto out_close; + if (stat(td->targetbase, &stats)) { + error_reason(strerror(errno)); + error_text("Could not get information for " + "file '%s'", td->targetbase); + return -1; } - } - if ((data->source == source_user) || (data->source == source_script)) { - data->devno = -1; - data->phy_block_size = target->targetblocksize; - data->type = target->targettype; - data->partnum = 0; - /* Get file information */ - if (sscanf(target->targetbase, "%d:%d", &majnum, &minnum) - == 2) { - data->device = makedev(majnum, minnum); - data->targetbase = defined_as_device; - data->partnum = minor(stats.st_rdev) - minnum; - } - else { - if (stat(target->targetbase, &stats)) { - error_reason(strerror(errno)); - error_text("Could not get information for " - "file '%s'", target->targetbase); - goto out_close; - } - if (!S_ISBLK(stats.st_mode)) { - error_reason("Target base device '%s' is not " - "a block device", - target->targetbase); - goto out_close; - } - data->device = stats.st_rdev; - data->targetbase = defined_as_name; + if (!S_ISBLK(stats.st_mode)) { + error_reason("Target base device '%s' is not " + "a block device", + td->targetbase); + return -1; } - if (data->type == disk_type_scsi && - ioctl(fd, NVME_IOCTL_ID) >= 0) - data->is_nvme = 1; - goto type_determined; + data->device = stats.st_rdev; + data->targetbase = defined_as_name; } + if (data->type == disk_type_scsi && ioctl(fd, NVME_IOCTL_ID) >= 0) + data->is_nvme = 1; + return 0; +} + +/** + * Calculate target parameters in the case when no hints were provided + */ +static int disk_set_info_auto(struct disk_info *data, + const struct stat *stats, int fd) +{ + struct dasd_information dasd_info; + if (ioctl(fd, BLKSSZGET, &data->phy_block_size)) { error_reason("Could not get blocksize"); - goto out_close; + return -1; } - /* Determine disk type */ if (!data->drv_name) { /* Driver name cannot be read */ if (ioctl(fd, BIODASDINFO, &dasd_info)) { @@ -365,39 +285,39 @@ if (data->geo.start) { /* SCSI partition */ data->type = disk_type_scsi; - data->partnum = stats.st_rdev & SCSI_PARTN_MASK; - data->device = stats.st_rdev & ~SCSI_PARTN_MASK; + data->partnum = stats->st_rdev & SCSI_PARTN_MASK; + data->device = stats->st_rdev & ~SCSI_PARTN_MASK; } else { /* SCSI disk */ data->type = disk_type_scsi; data->partnum = 0; - data->device = stats.st_rdev; + data->device = stats->st_rdev; } } else { /* DASD */ data->devno = dasd_info.devno; if (disk_determine_dasd_type(data, dasd_info)) - goto out_close; - data->partnum = stats.st_rdev & DASD_PARTN_MASK; - data->device = stats.st_rdev & ~DASD_PARTN_MASK; + return -1; + data->partnum = stats->st_rdev & DASD_PARTN_MASK; + data->device = stats->st_rdev & ~DASD_PARTN_MASK; } } else if (strcmp(data->drv_name, "dasd") == 0) { /* Driver name is 'dasd' */ if (ioctl(fd, BIODASDINFO, &dasd_info)) { error_reason("Could not determine DASD type"); - goto out_close; + return -1; } data->devno = dasd_info.devno; if (disk_determine_dasd_type(data, dasd_info)) - goto out_close; - data->partnum = stats.st_rdev & DASD_PARTN_MASK; - data->device = stats.st_rdev & ~DASD_PARTN_MASK; + return -1; + data->partnum = stats->st_rdev & DASD_PARTN_MASK; + data->device = stats->st_rdev & ~DASD_PARTN_MASK; } else if (strcmp(data->drv_name, "sd") == 0) { /* Driver name is 'sd' */ data->devno = -1; data->type = disk_type_scsi; - data->partnum = stats.st_rdev & SCSI_PARTN_MASK; - data->device = stats.st_rdev & ~SCSI_PARTN_MASK; + data->partnum = stats->st_rdev & SCSI_PARTN_MASK; + data->device = stats->st_rdev & ~SCSI_PARTN_MASK; } else if (strcmp(data->drv_name, "virtblk") == 0) { @@ -406,10 +326,10 @@ if (ioctl(fd, BLKSSZGET, &data->phy_block_size) != 0) perror("Could not retrieve blocksize information."); - if (determine_virtblk_type(data, &stats)) { + if (determine_virtblk_type(data, stats)) { error_reason("Virtblk device type not clearly " "determined."); - goto out_close; + return -1; } /* NVMe path, driver name is 'blkext' */ } else if (strcmp(data->drv_name, "blkext") == 0 && @@ -418,66 +338,271 @@ data->type = disk_type_scsi; data->is_nvme = 1; - if (util_sys_dev_is_partition(stats.st_rdev)) { - if (util_sys_get_base_dev(stats.st_rdev, &data->device)) - goto out_close; - data->partnum = util_sys_get_partnum(stats.st_rdev); + if (util_sys_dev_is_partition(stats->st_rdev)) { + if (util_sys_get_base_dev(stats->st_rdev, &data->device)) + return -1; + data->partnum = util_sys_get_partnum(stats->st_rdev); if (data->partnum == -1) - goto out_close; + return -1; } else { - data->device = stats.st_rdev; + data->device = stats->st_rdev; data->partnum = 0; } } else { /* Driver name is unknown */ error_reason("Unsupported device driver '%s'", data->drv_name); - goto out_close; + return -1; + } + return 0; +} + +/** + * Evaluate and set source type + */ +static void set_source_type(struct job_target_data *td, + const char *drv_name, char **script_file) +{ + const char *script_prefix = util_libdir_path("zipl_helper."); + struct stat script_stats; + + if (td->source == source_user) { + /* do not reset user-specified target parameters */ + return; + } + /* Check if targetbase script is available */ + if (drv_name) + misc_asprintf(script_file, "%s%s", script_prefix, + drv_name); + else + misc_asprintf(script_file, "%s", script_prefix); + if (!stat(*script_file, &script_stats)) { + /* target parameters to be evaluated by script */ + td->source = source_script; + return; + } + td->source = source_auto; +} + +static void set_driver_name(struct disk_info *info, dev_t device) +{ + struct util_proc_dev_entry dev_entry; + + if (info->drv_name) + /* already set */ + return; + if (util_proc_dev_get_entry(device, 1, &dev_entry) == 0) { + info->drv_name = misc_strdup(dev_entry.name); + util_proc_dev_free_entry(&dev_entry); + } else { + fprintf(stderr, "Warning: Could not determine driver name for " + "major %d from /proc/devices\n", major(device)); + fprintf(stderr, "Warning: Preparing a logical device for boot " + "might fail\n"); + } +} + +static int run_targetbase_script(struct job_target_data *td, + char *script_file, struct stat *stats) +{ + char *ppn_cmd = NULL; + FILE *fh; + + misc_asprintf(&ppn_cmd, "%s %d:%d", script_file, + major(stats->st_rdev), minor(stats->st_rdev)); + printf("Run %s\n", ppn_cmd); + fh = popen(ppn_cmd, "r"); + free(ppn_cmd); + + if (!fh) { + error_reason("Failed to run popen(%s,\"r\",)"); + return -1; + } + /* translate the script output to target parameters */ + if (set_target_parameters(fh, td)) { + pclose(fh); + return -1; } + switch (pclose(fh)) { + case 0: + /* success */ + return 0; + case -1: + error_reason("Failed to run pclose"); + return -1; + default: + error_reason("Script could not determine target " + "parameters"); + return -1; + } +} + +/** + * Set disk geometry using target parameters provided either by + * user, or by script. + * + * Note: geo.start contains a sector number offset measured in + * physical blocks, not sectors (512 bytes) + */ +static int disk_set_geometry_by_hint(struct job_target_data *td, + struct disk_info *data) +{ + data->geo.heads = td->targetheads; + data->geo.sectors = td->targetsectors; + data->geo.cylinders = td->targetcylinders; + data->geo.start = td->targetoffset; + + return 0; +} + +static int disk_set_geometry_auto(int fd, struct disk_info *info) +{ + if (ioctl(fd, HDIO_GETGEO, &info->geo)) { + error_reason("Could not get disk geometry"); + return -1; + } + return 0; +} + +/** + * The final step of setting disk info. + * Common for all source types + * + * DATA: disk info to be completed + * Pre-condition: disk type is already known and set at DATA->type + */ +static int disk_set_info_complete(struct job_target_data *td, + struct disk_info *data, + struct stat *stats, int fd) +{ + struct util_proc_part_entry part_entry; + long devsize; -type_determined: /* Get size of device in sectors (512 byte) */ if (ioctl(fd, BLKGETSIZE, &devsize)) { error_reason("Could not get device size"); - goto out_close; + return -1; } - /* Check for valid CHS geometry data. */ if (disk_is_eckd(data->type) && (data->geo.cylinders == 0 || data->geo.heads == 0 || data->geo.sectors == 0)) { error_reason("Invalid disk geometry (CHS=%d/%d/%d)", data->geo.cylinders, data->geo.heads, data->geo.sectors); - goto out_close; + return -1; } /* Convert device size to size in physical blocks */ data->phy_blocks = devsize / (data->phy_block_size / 512); - /* Adjust start on SCSI according to block_size. device-mapper devices are skipped */ - if (data->type == disk_type_scsi && target->targetbase == NULL) - data->geo.start = data->geo.start / (data->phy_block_size / 512); + /* + * Adjust start on SCSI according to block_size. + * device-mapper devices, which are evaluated only + * in "source_script" mode, are skipped + */ + if (data->type == disk_type_scsi && td->source == source_auto) + data->geo.start = + data->geo.start / (data->phy_block_size / 512); if (data->partnum != 0) - data->partition = stats.st_rdev; + data->partition = stats->st_rdev; /* Try to get device name */ if (util_proc_part_get_entry(data->device, &part_entry) == 0) { data->name = misc_strdup(part_entry.name); util_proc_part_free_entry(&part_entry); if (data->name == NULL) - goto out_close; + return -1; } - /* There is no easy way to find out whether there is a file system - * on this device, so we set the respective block size to an invalid - * value. */ + /* Initialize file system block size with invalid value */ data->fs_block_size = -1; + return 0; +} + +/** + * Prepare INFO required to perform IPL installation on the physical + * disk where the logical DEVICE is located. + * Preparation is performed in 2 steps: + * + * 1. Find out a physical "base" disk where the logical DEVICE is + * located. Calculate "target" parameters (type, geometry, physical + * block size, data offset, etc); + * 2. Complete INFO by the found base disk and target parameters. + * + * TD: optionally contains target parameters specified by user via + * config file, or special "target options" of zipl tool. + * If target parameters were specified by user, then the step 1 above + * is skipped. + + * To exclude any user assumptions about the DEVICE, this function + * should be called with TD pointing to a zeroed structure. + * + * DEVICE: logical, or physical device, optionally formated with a + * file system. + */ +int disk_get_info(const char *device, struct job_target_data *td, + struct disk_info **info) +{ + char *script_file = NULL; + struct disk_info *data; + struct stat stats; + int fd; + + if (stat(device, &stats)) { + error_reason(strerror(errno)); + return -1; + } + fd = open(device, O_RDONLY); + if (fd == -1) { + error_reason(strerror(errno)); + return -1; + } + data = (struct disk_info *)misc_malloc(sizeof(struct disk_info)); + if (!data) + goto error; + memset((void *)data, 0, sizeof(struct disk_info)); + set_driver_name(data, stats.st_rdev); + set_source_type(td, data->drv_name, &script_file); + switch (td->source) { + case source_script: + if (run_targetbase_script(td, script_file, &stats)) + goto error; + /* target parameters were set by the script output */ + assert(target_parameters_are_set(td)); + + if (disk_set_geometry_by_hint(td, data)) + goto error; + if (disk_set_info_by_hint(td, data, fd)) + goto error; + break; + case source_user: + /* + * target parameters were specified by user via + * "target" options + */ + assert(target_parameters_are_set(td)); + + if (disk_set_geometry_by_hint(td, data)) + goto error; + if (disk_set_info_by_hint(td, data, fd)) + goto error; + break; + case source_auto: + /* no ready target parameters are available */ + if (disk_set_geometry_auto(fd, data)) + goto error; + if (disk_set_info_auto(data, &stats, fd)) + goto error; + break; + default: + assert(0); + } + if (disk_set_info_complete(td, data, &stats, fd)) + goto error; + free(script_file); close(fd); *info = data; - free(script_file); return 0; -out_close: - close(fd); - free(ppn_cmd); +error: free(script_file); + close(fd); free(data); return -1; - } int @@ -576,10 +701,10 @@ return rc; } - -void -disk_free_info(struct disk_info* info) +void disk_free_info(struct disk_info *info) { + if (!info) + return; if (info->name) free(info->name); if (info->drv_name) @@ -587,7 +712,6 @@ free(info); } - /* Retrieve the physical blocknumber (block on disk) of the specified logical * block (block in file). FD provides the file descriptor, LOGICAL is the * logical block number. Upon success, return 0 and store the physical @@ -867,11 +991,10 @@ /* Print textual representation of INFO contents. */ -void -disk_print_info(struct disk_info* info) +void disk_print_info(struct disk_info *info, int source) { char footnote[4] = ""; - if ((info->source == source_user) || (info->source == source_script)) + if (source == source_user || source == source_script) strcpy(footnote, " *)"); printf(" Device..........................: "); @@ -899,7 +1022,7 @@ (info->type == disk_type_diag) || (info->type == disk_type_eckd_ldl) || (info->type == disk_type_eckd_cdl)) && - (info->source == source_auto)) { + (source == source_auto)) { printf(" DASD device number..............: %04x\n", info->devno); } @@ -930,9 +1053,9 @@ info->phy_block_size, footnote); printf(" Device size in physical blocks..: %ld\n", (long) info->phy_blocks); - if (info->source == source_user) + if (source == source_user) printf(" *) Data provided by user.\n"); - if (info->source == source_script) + if (source == source_script) printf(" *) Data provided by script.\n"); } diff -Nru s390-tools-2.31.0/zipl/src/install.c s390-tools-2.33.1/zipl/src/install.c --- s390-tools-2.31.0/zipl/src/install.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zipl/src/install.c 2024-05-28 08:26:36.000000000 +0200 @@ -346,6 +346,45 @@ return rc; } +/* + * Check if CCW dump tool is installed on the disk and destroy it by + * clearing the first block. + */ +static int clear_ccw_dumper(const struct disk_info *info, int fd) +{ + char dumper_magic[DF_S390_DUMPER_MAGIC_SIZE]; + void *buffer; + int rc; + + /* Read the CCW dumper magic at the start of block 3 */ + if (misc_seek(fd, ECKD_CDL_DUMP_REC * info->phy_block_size)) + return -1; + rc = misc_read(fd, dumper_magic, sizeof(dumper_magic)); + if (rc) { + error_text("Could not read CCW dump record"); + return rc; + } + /* + * Check if the dump tool is present and clear its first block with zeroes. + */ + if (strncmp(dumper_magic, DF_S390_DUMPER_MAGIC32, sizeof(dumper_magic)) == 0 || + strncmp(dumper_magic, DF_S390_DUMPER_MAGIC64, sizeof(dumper_magic)) == 0 || + strncmp(dumper_magic, DF_S390_DUMPER_MAGIC_EXT, sizeof(dumper_magic)) == 0 || + strncmp(dumper_magic, DF_S390_DUMPER_MAGIC_MV, sizeof(dumper_magic)) == 0 || + strncmp(dumper_magic, DF_S390_DUMPER_MAGIC_MV_EXT, sizeof(dumper_magic)) == 0) { + if (misc_seek(fd, ECKD_CDL_DUMP_REC * info->phy_block_size)) + return -1; + buffer = misc_calloc(1, info->phy_block_size); + if (buffer == NULL) + return -1; + rc = DRY_RUN_FUNC(misc_write(fd, buffer, info->phy_block_size)); + free(buffer); + if (rc) + error_text("Could not clear CCW dumper"); + } + return rc; +} + /** * Install a program table for List-Directed dump * See the comment before install_bootloader() for details @@ -367,6 +406,9 @@ break; case disk_type_eckd_cdl: rc = install_eckd_cdl_ld(fd, pt->stage1b_list, info); + /* Clear CCW dumper upon successful List-Directed ECKD dump tool installation */ + if (rc == 0) + rc = clear_ccw_dumper(info, fd); break; default: error_reason("Inappropriarte device type (%d) for List-Directed dump", @@ -1098,7 +1140,7 @@ } if (verbose) { printf("Target device information\n"); - disk_print_info(info); + disk_print_info(info, target->source); } rc = misc_temp_dev(info->device, 1, &tempdev); if (rc) { @@ -1257,7 +1299,7 @@ if (verbose) { for (i = 0; i < count; i++) { printf("Multi-volume dump target %d:\n", i + 1); - disk_print_info(info[i]); + disk_print_info(info[i], target->source); printf("-------------------------------------------\n"); } } diff -Nru s390-tools-2.31.0/zipl/src/job.c s390-tools-2.33.1/zipl/src/job.c --- s390-tools-2.31.0/zipl/src/job.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zipl/src/job.c 2024-05-28 08:26:36.000000000 +0200 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -369,9 +370,9 @@ } -static void -free_target_data(struct job_target_data* data) +void free_target_data(struct job_target_data *data) { + free(data->bootmap_dir); free(data->targetbase); } @@ -459,7 +460,12 @@ job_free(struct job_data* job) { - free(job->target.bootmap_dir); + if (job->dump_mounted && umount(job->target.bootmap_dir)) + fprintf(stderr, "Could not umount dump device at %s", + job->target.bootmap_dir); + if (job->bootmap_dir_created && rmdir(job->target.bootmap_dir)) + fprintf(stderr, "Could not remove directory %s", + job->target.bootmap_dir); free_target_data(&job->target); free_envblk_data(&job->envblk); free(job->name); @@ -1360,6 +1366,7 @@ scan_keyword_targetbase]); if (job->target.targetbase == NULL) return -1; + job->target.source = source_user; } if (data[(int) scan_keyword_targettype] != NULL) { if (type_from_target( @@ -1611,6 +1618,7 @@ scan[i].content.keyword.value); if (job->target.targetbase == NULL) return -1; + job->target.source = source_user; break; case scan_keyword_targettype: if (type_from_target( diff -Nru s390-tools-2.31.0/zipl/src/zipl.c s390-tools-2.33.1/zipl/src/zipl.c --- s390-tools-2.31.0/zipl/src/zipl.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zipl/src/zipl.c 2024-05-28 08:26:36.000000000 +0200 @@ -204,9 +204,16 @@ case job_segment: case job_menu: rc = prepare_bootloader(job, &bis); - if (rc) + if (rc) { + free_bootloader(&bis); break; + } rc = install_bootloader(job, &bis); + if (rc) { + free_bootloader(&bis); + break; + } + rc = post_install_bootloader(job, &bis); free_bootloader(&bis); break; case job_ipl_tape: diff -Nru s390-tools-2.31.0/zkey/ekmfweb/zkey-ekmfweb.1 s390-tools-2.33.1/zkey/ekmfweb/zkey-ekmfweb.1 --- s390-tools-2.31.0/zkey/ekmfweb/zkey-ekmfweb.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zkey/ekmfweb/zkey-ekmfweb.1 2024-05-28 08:26:36.000000000 +0200 @@ -176,7 +176,7 @@ You cannot change the associated cryptographic adapters (APQNs) of a key that is bound to EKMF Web, because it inherits the APQN association from the EKMF Web plugin. To change the APQNs associated with the EKMF Web plugin, use the -\fBzkey kms configure\fP command with the \fB--apqns\fP option. This also +\fBzkey kms configure\fP command with the \fB\-\-apqns\fP option. This also changes the APQN associations of all secure keys in the secure key repository, that are bound to the EKMF Web plugin. . @@ -367,8 +367,8 @@ Specifies the label tags for generating a secure key in EKMF Web, in the form \fB=(,=)*[,]\fP with tags as defined by the key template. Use the \fBzkey kms info\fP command to display the key templates used -by zkey. For XTS type keys the two templates for \fBXTS-Key1\fP and -\fBXTS-Key2\fP are used. For non-XTS type keys, the template for \fBNon-XTS\fP +by zkey. For XTS type keys the two templates for \fBXTS\-Key1\fP and +\fBXTS\-Key2\fP are used. For non-XTS type keys, the template for \fBNon\-XTS\fP keys is used. . .SS "Options for the zkey remove command" @@ -446,7 +446,7 @@ .B zkey kms info Displays information about the EKMF Web plugin and its configuration. .TP -.B zkey kms reencipher --staged +.B zkey kms reencipher \-\-staged Re-enciphers the EKMF Plugin's identity key with a new CCA master key in staged mode .TP @@ -456,7 +456,7 @@ and associates it with block device '/dev/dasdc1' and device-mapper name \&'encvol'. .TP -.B zkey generate \-\-name seckey \-\-xts \-\-volumes /dev/dasdc1:encvol \-\-volume-type luks2 \-\-label\-tags \(dqENV=TEST,APP=LINUX\(dq +.B zkey generate \-\-name seckey \-\-xts \-\-volumes /dev/dasdc1:encvol \-\-volume\-type luks2 \-\-label\-tags \(dqENV=TEST,APP=LINUX\(dq Generates a secure AES key for the XTS cipher mode in EKMF Web using the label tags 'ENV=TEST' and 'APP=LINUX' and stores it in the secure key repository using the name 'seckey' and associates it with block device '/dev/dasdc1' and diff -Nru s390-tools-2.31.0/zkey/keystore.c s390-tools-2.33.1/zkey/keystore.c --- s390-tools-2.31.0/zkey/keystore.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zkey/keystore.c 2024-05-28 08:26:36.000000000 +0200 @@ -25,6 +25,7 @@ #include #include "lib/util_base.h" +#include "lib/util_file.h" #include "lib/util_libc.h" #include "lib/util_panic.h" #include "lib/util_path.h" @@ -4202,6 +4203,7 @@ size_t tries; bool open; bool format; + int fips; char **volume_filter; int (*process_func)(struct keystore *keystore, const char *volume, @@ -4319,16 +4321,22 @@ * Using the default Argon2i options might cause out-of-memory * errors when multiple LUKS2 volumes are opened automatically * via /etc/crypttab + * In case the system runs in FIPS mode, use PBKDF2 + * instead, because Argon2i might be disabled by a + * policy when FIPS mode is active. */ util_asprintf(&cmd, "cryptsetup luksFormat %s%s--type luks2 " "--master-key-file '%s' --key-size %lu " - "--cipher %s --pbkdf argon2i --pbkdf-memory 32 " - "--pbkdf-force-iterations 4 %s%s%s", + "--cipher %s --pbkdf %s %s%s%s", info->batch_mode ? "-q " : "", keystore->verbose ? "-v " : "", key_file_name, key_file_size * 8, - cipher_spec, common_len > 0 ? + cipher_spec, + info->fips ? "pbkdf2" : + "argon2i --pbkdf-memory 32 " + "--pbkdf-force-iterations 4", + common_len > 0 ? common_passphrase_options : "", sector_size > 0 ? temp : "", volume); @@ -4605,6 +4613,8 @@ return -EINVAL; } + util_file_read_i(&info.fips, 10, "/proc/sys/crypto/fips_enabled"); + info.execute = execute; info.open = open; info.format = format; diff -Nru s390-tools-2.31.0/zkey/kmip/zkey-kmip.1 s390-tools-2.33.1/zkey/kmip/zkey-kmip.1 --- s390-tools-2.31.0/zkey/kmip/zkey-kmip.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zkey/kmip/zkey-kmip.1 2024-05-28 08:26:36.000000000 +0200 @@ -629,7 +629,7 @@ it in the secure key repository using the name 'seckey' and associates it with block device '/dev/dasdc1' and device\-mapper name 'encvol'. .TP -.B zkey generate \-\-name seckey \-\-xts \-\-volumes /dev/dasdc1:encvol \-\-volume-type luks2 \-\-label TEST1:TEST2 +.B zkey generate \-\-name seckey \-\-xts \-\-volumes /dev/dasdc1:encvol \-\-volume\-type luks2 \-\-label TEST1:TEST2 Generates a secure AES key for the XTS cipher mode in the KMIP server using the labels 'TEST1' and 'TEST2' and stores it in the secure key repository using the name 'seckey' and associates it with block device '/dev/dasdc1' and diff -Nru s390-tools-2.31.0/zkey/pkey.c s390-tools-2.33.1/zkey/pkey.c --- s390-tools-2.31.0/zkey/pkey.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zkey/pkey.c 2024-05-28 08:26:36.000000000 +0200 @@ -922,7 +922,7 @@ genseck2.type = key_type_to_pkey_type(key_type); if (genseck2.type == 0) { - warnx("Key-type not supported; %s", key_type); + warnx("Key-type not supported: %s", key_type); return -ENOTSUP; } @@ -1084,7 +1084,7 @@ clr2seck2.type = key_type_to_pkey_type(key_type); if (clr2seck2.type == 0) { - warnx("Key-type not supported; %s", key_type); + warnx("Key-type not supported: %s", key_type); return -ENOTSUP; } diff -Nru s390-tools-2.31.0/zkey/zkey.1 s390-tools-2.33.1/zkey/zkey.1 --- s390-tools-2.31.0/zkey/zkey.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zkey/zkey.1 2024-05-28 08:26:36.000000000 +0200 @@ -51,10 +51,10 @@ You can associate a secure key with one or multiple cryptographic adapters (APQNs) that are set up with the same CCA or EP11 master key. You can also associate a secure key with one or multiple volumes -(block devices), which are encrypted using dm-crypt with the secure key. The -volume association also contains the device-mapper name, separated by a colon, -used with dm-crypt. A specific volume can only be associated with one secure -key. +(block devices), which are encrypted using \fBdm\-crypt\fP with the secure key. +The volume association also contains the device-mapper name, separated by a +colon, used with \fBdm\-crypt\fP. A specific volume can only be associated with +one secure key. .PP The generated secure key is saved in a file, and contains an AES secure key with a length of 128, 192, or 256 bits, or two concatenated AES secure keys with a @@ -98,7 +98,7 @@ .RB [ \-\-xts | \-x ] .RB [ \-\-clearkey | \-c .IR clear\-key\-file ] -.RB [ \-\-key-type | \-K +.RB [ \-\-key\-type | \-K .IR type ] .RB [ \-\-verbose | \-V ] . @@ -106,7 +106,7 @@ .B zkey .BR generate | gen .B \-\-name | \-N -.IR key-name +.IR key\-name .RB [ \-\-description | \-d .IR description ] .RB [ \-\-volumes | \-l @@ -114,22 +114,22 @@ .RB [ \-\-apqns | \-a .IR card1.domain1[,card2.domain2[,...]] ] .RB [ \-\-no\-apqn\-check ] -.RB [ \-\-sector-size | \-S +.RB [ \-\-sector\-size | \-S .IR bytes ] -.RB [ \-\-volume-type | \-t +.RB [ \-\-volume\-type | \-t .IR type ] .RB [ \-\-keybits | \-k .IR size ] .RB [ \-\-xts | \-x ] .RB [ \-\-clearkey | \-c .IR clear\-key\-file ] -.RB [ \-\-key-type | \-K +.RB [ \-\-key\-type | \-K .IR type ] .RB [ \-\-local | \-L ] .RB [ \-\-gen\-dummy\-passphrase ] .RB [ \-\-set\-dummy\-passphrase .IR passphrase\-file ] -.RB [ KMS\-plugin\ specific\ options ] +.RB [ KMS-plugin\ specific\ options ] .RB [ \-\-verbose | \-V ] .PP Use the @@ -164,24 +164,24 @@ , .B \-\-apqns , or the -.B \-\-sector-size +.B \-\-sector\-size options. When the secure key repository is bound to a key management system plugin, then you can not associate specific APQNs with such keys, but the keys inherit the APQNs that are associated with the key management system plugin. .PP -You can generate different types of secure keys: \fBCCA-AESDATA\fP keys, -\fBCCA-AESCIPHER\fP, and \fBEP11-AES\fP keys. +You can generate different types of secure keys: \fBCCA\-AESDATA\fP keys, +\fBCCA\-AESCIPHER\fP, and \fBEP11\-AES\fP keys. Specify the type of the secure key using the .B \-\-key\-type -option. Normally, the default key type is CCA-AESDATA. If the secure key +option. Normally, the default key type is CCA\-AESDATA. If the secure key repository is bound to a key management system plugin, and the plugin does not -support keys of type CCA-AESDATA, then the default key type is CCA-AESCIPHER, or -EP11-AES, whichever the plugin supports. +support keys of type CCA\-AESDATA, then the default key type is CCA\-AESCIPHER, +or EP11\-AES, whichever the plugin supports. .PP .B Note: -Secure keys of type \fBCCA-AESCIPHER\fP require an IBM cryptographic +Secure keys of type \fBCCA\-AESCIPHER\fP require an IBM cryptographic adapter in CCA coprocessor mode of version 6 or later, e.g. a CEX6C. -Secure keys of type \fBEP11-AES\fP require an IBM cryptographic +Secure keys of type \fBEP11\-AES\fP require an IBM cryptographic adapter in EP11 coprocessor mode of version 7 or later, e.g. a CEX7P. . .SS "Validating secure AES keys" @@ -195,7 +195,7 @@ .B zkey .BR validate | val .RB [ \-\-name | \-N -.IR key-name ] +.IR key\-name ] .RB [ \-\-apqns | \-a .IR card1.domain1[,card2.domain2[,...]] ] .RB [ \-\-no\-apqn\-check ] @@ -250,12 +250,12 @@ .B zkey .BR reencipher | re .RB [ \-\-name | \-N -.IR key-name ] +.IR key\-name ] .RB [ \-\-apqns | \-a .IR card1.domain1[,card2.domain2[,...]] ] .RB [ \-\-to\-new | \-n ] .RB [ \-\-from\-old | \-o ] -.RB [ \-\-in-place | \-i ] +.RB [ \-\-in\-place | \-i ] .RB [ \-\-staged | \-s ] .RB [ \-\-complete | \-c ] .RB [ \-\-verbose | \-V ] @@ -282,7 +282,7 @@ The master key in the \fBNEW\fP register cannot be used until it is made the current master key. You can pro-actively re-encipher a secure key with the \fBNEW\fP master key before this key is made the \fBCURRENT\fP key. Use the -.B \-\-to-new +.B \-\-to\-new option to do this. .RE .PP @@ -294,13 +294,13 @@ option to re-encipher a secure key that is currently enciphered with the master key in the \fBOLD\fP register with the master key in the \fBCURRENT\fP register. This option is only available for secure keys of type -\fBCCA-AESDATA\fP or \fBCCA-AESCIPHER\fP. +\fBCCA\-AESDATA\fP or \fBCCA\-AESCIPHER\fP. .PP .PP If both the -.B \-\-from-old +.B \-\-from\-old and -.B \-\-to-new +.B \-\-to\-new options are specified, a secure key that is currently enciphered with the master key in the \fBOLD\fP register is re-enciphered with the master key in the \fBNEW\fP register. @@ -342,11 +342,11 @@ .PP \fB"In-place"\fP immediately replaces the secure key in the repository with the re-enciphered secure key. Re-enciphering from \fBOLD\fP to \fBCURRENT\fP is -performed in-place per default. You can use option \fB\-\-in-place\fP to force an -in-place re-enciphering for the \fBCURRENT\fP to \fBNEW\fP case. Be aware that -a secure key that was re-enciphered in-place from \fBCURRENT\fP to \fBNEW\fP -is no longer valid, until the new CCA or EP11 master key has been made the -current one. +performed in-place per default. You can use option \fB\-\-in\-place\fP to force +an in-place re-enciphering for the \fBCURRENT\fP to \fBNEW\fP case. Be aware +that a secure key that was re-enciphered in-place from \fBCURRENT\fP to +\fBNEW\fP is no longer valid, until the new CCA or EP11 master key has been made +the current one. .PP \fBStaged\fP mode means that the re-enciphered secure key is stored in a separate file in the secure key repository. Thus the current secure key is still @@ -359,9 +359,9 @@ .PP .B Note: The \fBreencipher\fP command requires the CCA host library (libcsulcca.so, for) -for secure keys of type CCA-AESDATA or CCA-AESCIPHER, or the IBM Z Enterprise +for secure keys of type CCA\-AESDATA or CCA\-AESCIPHER, or the IBM Z Enterprise PKCS #11 (EP11) Support Program (EP11 host library) for secure keys of type -EP11-AES to be installed. For the supported environments and downloads, see: +EP11\-AES to be installed. For the supported environments and downloads, see: \fIhttp://www.ibm.com/security/cryptocards\fP . .SS "Import existing AES secure keys into the secure key repository" @@ -370,7 +370,7 @@ .BR import | im .I secure\-key\-file .B \-\-name | \-N -.IR key-name +.IR key\-name .RB [ \-\-description | \-d .IR description ] .RB [ \-\-volumes | \-l @@ -378,9 +378,9 @@ .RB [ \-\-apqns | \-a .IR card1.domain1[,card2.domain2[,...]] ] .RB [ \-\-no\-apqn\-check ] -.RB [ \-\-sector-size | \-S +.RB [ \-\-sector\-size | \-S .IR bytes ] -.RB [ \-\-volume-type | \-t +.RB [ \-\-volume\-type | \-t .IR type ] .RB [ \-\-gen\-dummy\-passphrase ] .RB [ \-\-set\-dummy\-passphrase @@ -399,12 +399,12 @@ , .B \-\-apqns , or the -.B \-\-sector-size +.B \-\-sector\-size options. .PP .B Note: The \fBimport\fP command requires the CCA host library (libcsulcca.so) -to be installed when secure keys of type \fBCCA-AESCIPHER\fP are imported. +to be installed when secure keys of type \fBCCA\-AESCIPHER\fP are imported. For the supported environments and downloads, see: \fIhttp://www.ibm.com/security/cryptocards\fP . @@ -414,7 +414,7 @@ .BR export | ex .I secure\-key\-file .B \-\-name | \-N -.IR key-name +.IR key\-name .RB [ \-\-verbose | \-V ] . .PP @@ -432,14 +432,14 @@ .B zkey .BR list | li .RB [ \-\-name | \-N -.IR key-name ] +.IR key\-name ] .RB [ \-\-volumes | \-l .IR volume1[:dmname1][,volume2[:dmname2][,...]] ] .RB [ \-\-apqns | \-a .IR card1.domain1[,card2.domain2[,...]] ] -.RB [ \-\-volume-type | \-t +.RB [ \-\-volume\-type | \-t .IR type ] -.RB [ \-\-key-type | \-K +.RB [ \-\-key\-type | \-K .IR type ] .RB [ \-\-local | \-L ] .RB [ \-\-kms\-bound | \-M ] @@ -470,9 +470,9 @@ .B zkey .BR remove | rem .B \-\-name | \-N -.IR key-name +.IR key\-name .RB [ \-\-force | \-F ] -.RB [ KMS\-plugin\ specific\ options ] +.RB [ KMS-plugin\ specific\ options ] .RB [ \-\-verbose | \-V ] . .PP @@ -509,17 +509,17 @@ .B zkey .BR change | ch .B \-\-name | \-N -.IR key-name +.IR key\-name .RB [ \-\-description | \-d .IR description ] .RB [ \-\-volumes | \-l -.IR [+|-]volume1:dmname1[,volume2:dmname2[,...]] ] +.IR [+|\-]volume1:dmname1[,volume2:dmname2[,...]] ] .RB [ \-\-apqns | \-a -.IR [+|-]card1.domain1[,card2.domain2[,...]] ] +.IR [+|\-]card1.domain1[,card2.domain2[,...]] ] .RB [ \-\-no\-apqn\-check ] -.RB [ \-\-sector-size | \-S +.RB [ \-\-sector\-size | \-S .IR bytes ] -.RB [ \-\-volume-type | \-t +.RB [ \-\-volume\-type | \-t .IR type ] .RB [ \-\-gen\-dummy\-passphrase ] .RB [ \-\-set\-dummy\-passphrase @@ -550,12 +550,12 @@ or the .B \-\-apqns options. To remove an association, -specify the association to remove prefixed with a \fI-\fP with the +specify the association to remove prefixed with a \fI\-\fP with the .B \-\-volumes or the .B \-\-apqns options. You cannot mix \fI+\fP and -\fI-\fP in one specification. You can either add or remove (or set) the +\fI\-\fP in one specification. You can either add or remove (or set) the associations with one command. .PP For secure AES keys that are bound to a key management system (KMS) you can not @@ -575,9 +575,9 @@ .B zkey .BR rename | ren .B \-\-name | \-N -.IR key-name -.B \-\-new-name | \-w -.IR new-key-name +.IR key\-name +.B \-\-new\-name | \-w +.IR new\-key\-name .RB [ \-\-verbose | \-V ] . .PP @@ -587,7 +587,7 @@ Specify the name of the key that is to be renamed using the .B \-\-name option and the new name using the -.B \-\-new-name +.B \-\-new\-name option. You cannot use wildcards. . .B Note: @@ -604,9 +604,9 @@ .B zkey .B copy | co .RB \-\-name | \-N -.IR key-name +.IR key\-name .B \-\-new\-name | \-w -.IR new-key-name +.IR new\-key\-name .RB [ \-\-volumes | \-l .IR volume1:dmname1[,volume2:dmname2[,...]] ] .RB [ \-\-local | \-L ] @@ -619,7 +619,7 @@ Specify the name of the key that is to be copied using the .B \-\-name option and the name of the copied key using the -.B \-\-new-name +.B \-\-new\-name option. You cannot use wildcards. .PP .B Note: @@ -640,10 +640,10 @@ .BR crypttab | cryptt .RB [ \-\-volumes | \-l .IR volume1[:dmname1][,volume2[:dmname2][,...]] ] -.RB [ \-\-volume-type | \-t +.RB [ \-\-volume\-type | \-t .IR type ] .RB [ \-\-key\-file -.IR file-name ] +.IR file\-name ] .RB [ \-\-keyfile\-offset .IR bytes ] .RB [ \-\-keyfile\-size @@ -656,8 +656,8 @@ Use the .B crypttab command to generate crypttab entries using the \fBplain\fP or \fBLUKS2\fP -dm-crypt mode for volumes that are associated with secure keys contained in the -secure key repository. Specify the +\fBdm\-crypt\fP mode for volumes that are associated with secure keys contained +in the secure key repository. Specify the .B \-\-volumes option to limit the list of volumes where crypttab entries are generated for. You can use wildcards. @@ -666,7 +666,7 @@ specified then only those volumes with the specified volume and device-mapper name are selected. Specify the -.B \-\-volume-type +.B \-\-volume\-type option to generate crypttab entries for the specified volume type only. .P For LUKS2 volumes, a passphrase is required. You are prompted for the @@ -692,19 +692,20 @@ .BR cryptsetup | crypts .RB [ \-\-volumes | \-l .IR volume1[:dmname1][,volume2[:dmname2][,...]] ] -.RB [ \-\-volume-type | \-t +.RB [ \-\-volume\-type | \-t .IR type ] .RB [ \-\-run | \-r ] .RB [ \-\-open ] .RB [ \-\-format ] .RB [ \-\-key\-file -.IR file-name ] +.IR file\-name ] .RB [ \-\-keyfile\-offset .IR bytes ] .RB [ \-\-keyfile\-size .IR bytes ] .RB [ \-\-tries .IR number ] +.RB [ \-\-batch\-mode | \-q ] .RB [ \-\-verbose | \-V ] . .PP @@ -720,7 +721,7 @@ The device-mapper name of an associated volume can be omitted; if it is specified then only those volumes with the specified volume and device-mapper name are selected. Specify the -.B \-\-volume-type +.B \-\-volume\-type option to generate cryptsetup commands for the specified volume type only. Specify the .B \-\-run @@ -734,12 +735,14 @@ command is generated. .P For LUKS2 volumes, the generated \fBcryptsetup luksFormat\fP contains -options \fB-\-pbkdf argon2i \-\-pbkdf\-memory 32 \-\-pbkdf\-force\-iterations 4 -\fP for low memory and time requirements. Using the default \fBArgon2i\fP +options \fB\-\-pbkdf argon2i \-\-pbkdf\-memory 32 \-\-pbkdf\-force\-iterations +4\fP for low memory and time requirements. Using the default \fBArgon2i\fP options might cause out-of-memory errors when multiple encrypted volumes are -unlocked automatically at boot through /etc/crypttab. Because PAES uses secure -AES keys as volume keys, the security of the key derivation function used to encrypt -the volume key in the LUKS key slots is of less relevance. +unlocked automatically at boot through \fB/etc/crypttab\fP. In case the system +runs in FIPS mode, \fB\-\-pbkdf pbkdf2\fP is used instead, because \fBArgon2i\fP +might be disabled by a policy when FIPS mode is active. Because PAES uses secure +AES keys as volume keys, the security of the key derivation function used to +encrypt the volume key in the LUKS key slots is of less relevance. .P For LUKS2 volumes, a passphrase is required. You are prompted for the passphrase when running the generated commands, unless option @@ -767,7 +770,7 @@ .B zkey .BR convert | con .I secure\-key\-file -.RB \-\-key-type | \-K +.RB \-\-key\-type | \-K .IR type .RB [ \-\-no\-apqn\-check ] .RB [ \-\-force | \-F ] @@ -777,8 +780,8 @@ .B zkey .BR convert | con .B \-\-name | \-N -.IR key-name -.RB \-\-key-type | \-K +.IR key\-name +.RB \-\-key\-type | \-K .IR type .RB [ \-\-no\-apqn\-check ] .RB [ \-\-force | \-F ] @@ -788,11 +791,11 @@ Use the .B convert command to convert an existing secure key from one key type to another type. -You can convert secure keys of type CCA-AESDATA to type CCA-AESCIPHER only. +You can convert secure keys of type CCA\-AESDATA to type CCA\-AESCIPHER only. You can not convert keys that are bound to a key management system (KMS). .B Note: -Secure keys converted to type \fBCCA-AESCIPHER\fP require an IBM cryptographic +Secure keys converted to type \fBCCA\-AESCIPHER\fP require an IBM cryptographic adapter in CCA coprocessor mode of version 6 or later, e.g. a CEX6C. The secure key can either be contained in a file in the file system, or in a @@ -813,14 +816,14 @@ converted, this might have an effect on these volumes. .P For volumes with volume type \fBplain\fP, you must adapt the crypttab entries -and change the key size parameter to \fBsize=\fP or run -command \fBzkey crypttab --volumes \fP for each associated volume to -re-generate the crypttab entries. +and change the key size parameter to \fBsize=\fP or +run command \fBzkey crypttab \-\-volumes \fP for each associated volume +to re-generate the crypttab entries. .P -Associated volumes of type \fLUKS2\fP still contain the secure AES volume key of -the original type. To change the secure AES volume key in the LUKS2 header, -run command \fBzkey-cryptsetup setkey --master-key-file -\fP for each associated volume. +Associated volumes of type \fBLUKS2\fP still contain the secure AES volume key +of the original type. To change the secure AES volume key in the LUKS2 header, +run command \fBzkey\-cryptsetup setkey \-\-master\-key\-file +\fP for each associated volume. . .P .B Note: @@ -852,7 +855,7 @@ . .P Key management system plugins are configured in configuration file -\fB/etc/zkey/kms-plugins.conf\fP. This file contains the KMS plugin name and +\fB/etc/zkey/kms\-plugins.conf\fP. This file contains the KMS plugin name and its shared library. Set environment variable \fBZKEY_KMS_PLUGINS\fP to point to a different file to use a different KMS plugin configuration file. . @@ -909,19 +912,19 @@ command to display information about the currently bound key management system plugin (KMS plugin) and its configuration. . -.SS "Configure or re\-configure a key management system plugin" +.SS "Configure or re-configure a key management system plugin" . .B zkey kms .BR configure | con .RB [ \-\-apqns | \-a -.IR [+|-]card1.domain1[,card2.domain2[,...]] ] -.RB [ KMS\-plugin\ specific\ options ] +.IR [+|\-]card1.domain1[,card2.domain2[,...]] ] +.RB [ KMS-plugin\ specific\ options ] .RB [ \-\-verbose | \-V ] . .PP Use the .B kms configure -command to configure or re\-configure the currently bound key management system +command to configure or re-configure the currently bound key management system plugin (KMS plugin). As a minimum, you must associate APQNs with the key management system plugin. The plugin may require an initial configuration before it is fully functioning. Once configured, a plugin may allow you to @@ -947,10 +950,10 @@ .BR reencipher | re .RB [ \-\-to\-new | \-n ] .RB [ \-\-from\-old | \-o ] -.RB [ \-\-in-place | \-i ] +.RB [ \-\-in\-place | \-i ] .RB [ \-\-staged | \-s ] .RB [ \-\-complete | \-c ] -.RB [ KMS\-plugin\ specific\ options ] +.RB [ KMS-plugin\ specific\ options ] .RB [ \-\-verbose | \-V ] . .PP @@ -989,7 +992,7 @@ The master key in the \fBNEW\fP register cannot be used until it is made the current master key. You can pro-actively re-encipher a secure key with the \fBNEW\fP master key before this key is made the \fBCURRENT\fP key. Use the -.B \-\-to-new +.B \-\-to\-new option to do this. .RE .PP @@ -1004,9 +1007,9 @@ .PP .PP If both the -.B \-\-from-old +.B \-\-from\-old and -.B \-\-to-new +.B \-\-to\-new options are specified, a secure key that is currently enciphered with the master key in the \fBOLD\fP register is re-enciphered with the master key in the \fBNEW\fP register. @@ -1026,7 +1029,7 @@ .PP \fB"In-place"\fP immediately replaces the secure key with the re-enciphered secure key. Re-enciphering from \fBOLD\fP to \fBCURRENT\fP is performed -in-place per default. You can use option \fB\-\-in-place\fP to force an +in-place per default. You can use option \fB\-\-in\-place\fP to force an in-place re-enciphering for the \fBCURRENT\fP to \fBNEW\fP case. A secure key that was re-enciphered in-place from \fBCURRENT\fP to \fBNEW\fP is no longer valid, until the new CCA or EP11 master key has been made the @@ -1045,14 +1048,14 @@ .B zkey kms .BR list | li .RB [ \-\-label | \-B -.IR key-label ] +.IR key\-label ] .RB [ \-\-name | \-N -.IR key-name ] +.IR key\-name ] .RB [ \-\-volumes | \-l .IR volume1[:dmname1][,volume2[:dmname2][,...]] ] -.RB [ \-\-volume-type | \-t +.RB [ \-\-volume\-type | \-t .IR type ] -.RB [ KMS\-plugin\ specific\ options ] +.RB [ KMS-plugin\ specific\ options ] .RB [ \-\-verbose | \-V ] . .PP @@ -1080,16 +1083,16 @@ .B zkey kms .BR import | im .RB [ \-\-label | \-B -.IR key-label ] +.IR key\-label ] .RB [ \-\-name | \-N -.IR key-name ] +.IR key\-name ] .RB [ \-\-volumes | \-l .IR volume1[:dmname1][,volume2[:dmname2][,...]] ] -.RB [ \-\-volume-type | \-t +.RB [ \-\-volume\-type | \-t .IR type ] .RB [ \-\-batch\-mode | \-q ] .RB [ \-\-no\-volume\-check ] -.RB [ KMS\-plugin\ specific\ options ] +.RB [ KMS-plugin\ specific\ options ] .RB [ \-\-verbose | \-V ] . .PP @@ -1124,12 +1127,12 @@ .B zkey kms .BR refresh | ref .RB [ \-\-name | \-N -.IR key-name ] +.IR key\-name ] .RB [ \-\-volumes | \-l .IR volume1[:dmname1][,volume2[:dmname2][,...]] ] -.RB [ \-\-volume-type | \-t +.RB [ \-\-volume\-type | \-t .IR type ] -.RB [ \-\-key-type | \-K +.RB [ \-\-key\-type | \-K .IR type ] .RB [ \-\-refresh\-properties | \-P ] .RB [ \-\-no\-volume\-check ] @@ -1176,14 +1179,14 @@ .BR \-c ", " \-\-clearkey\~\fIclear\-key\-file\fP Specifies a file path that contains the clear AES key in binary form. If option \fB\-\-keybits\fP is omitted, the size of the specified file -determines the size of the AES key. If option \fB\-\-keybits\fP +determines the size of the AES key. If option \fB\-\-keybits\fP is specified, the size of the specified file must match the specified -key size. Valid file sizes are of 16, 24, or 32 bytes, and of 32 or 64 +key size. Valid file sizes are of 16, 24, or 32 bytes, and of 32 or 64 bytes for keys to be used with the XTS cipher mode. When the secure key is generated using a key management system, then this option can not be specified. .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the secure key repository. This option is only used for secure keys contained in the secure key repository. .TP @@ -1194,8 +1197,9 @@ .BR \-l ", " \-\-volumes\~\fIvolume1:dmname1[,volume2:dmname2[,...]]\fP Specifies a comma-separated list of volumes (block devices) that are associated with the secure AES key in the repository. These volumes are to be -encrypted using dm-crypt with the secure AES key. The volume association also -contains the device-mapper name, separated by a colon, used with dm-crypt. +encrypted using \fBdm\-crypt\fP with the secure AES key. The volume association +also contains the device-mapper name, separated by a colon, used with +\fBdm\-crypt\fP. A specific volume can only be associated with a single secure key. This option is only used for secure keys contained in the secure key repository. .TP @@ -1216,28 +1220,29 @@ associate APQNs with a secure AES key that are currently not available. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-S ", " \-\-sector-size\~\fIbytes\fP -Specifies the sector size in bytes used with dm-crypt. It must be a power of two -and in the range of 512 to 4096 bytes. If omitted, the system default sector -size is used. +.BR \-S ", " \-\-sector\-size\~\fIbytes\fP +Specifies the sector size in bytes used with \fBdm\-crypt\fP. It must be a power +of two and in the range of 512 to 4096 bytes. If omitted, the system default +sector size is used. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-t ", " \-\-volume-type\~\fItype\fP -Specifies the volume type of the associated volumes used with dm-crypt. Possible -values are \fBplain\fP and \fBluks2\fP. If omitted, \fBluks2\fP is used. +.BR \-t ", " \-\-volume\-type\~\fItype\fP +Specifies the volume type of the associated volumes used with \fBdm\-crypt\fP. +Possible values are \fBplain\fP and \fBluks2\fP. If omitted, \fBluks2\fP is +used. This option is only available if .B zkey has been compiled with LUKS2 support enabled. If LUKS2 support is not enabled, the default volume type is \fBplain\fP. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-K ", " \-\-key-type\~\fItype\fP -Specifies the key type of the secure key. Possible values are \fBCCA-AESDATA\fP, -\fBCCA-AESCIPHER\fP, and \fBEP11-AES\fP. If this option is omitted, then a -secure key of type CCA-AESDATA is generated. -Secure keys of type \fBCCA-AESCIPHER\fP require an IBM cryptographic adapter +.BR \-K ", " \-\-key\-type\~\fItype\fP +Specifies the key type of the secure key. Possible values are +\fBCCA\-AESDATA\fP, \fBCCA\-AESCIPHER\fP, and \fBEP11\-AES\fP. If this option +is omitted, then a secure key of type CCA\-AESDATA is generated. +Secure keys of type \fBCCA\-AESCIPHER\fP require an IBM cryptographic adapter in CCA coprocessor mode of version 6 or later, e.g. a CEX6C. -Secure keys of type \fBEP11-AES\fP require an IBM cryptographic adapter +Secure keys of type \fBEP11\-AES\fP require an IBM cryptographic adapter in EP11 coprocessor mode of version 7 or later, e.g. a CEX7P. .TP .BR \-L ", "\-\-local @@ -1276,7 +1281,7 @@ . .SS "Options for the validate command" .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the secure key repository. You can use wildcards to select multiple secure keys in the secure key repository. When wildcards are used you must quote the value. @@ -1306,8 +1311,8 @@ .BR \-o ", " \-\-from\-old Re-enciphers a secure AES key that is currently enciphered with the master key in the OLD register with the master key in the CURRENT register. -This option is only available for secure keys of type CCA-AESDATA and -CCA-AESCIPHER. +This option is only available for secure keys of type CCA\-AESDATA and +CCA\-AESCIPHER. .TP .BR \-f ", " \-\-output\~\fIoutput\-file\fP Specifies the name of the output file to which the re-enciphered secure key @@ -1316,7 +1321,7 @@ only used for secure keys stored in a file in the file system. It is not valid for keys contained in the secure key repository. .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the secure key repository. You can use wildcards to select multiple secure keys in the secure key repository. When wildcards are used you must quote the value. @@ -1331,7 +1336,7 @@ lszcrypt displays it). This option is only used for secure keys contained in the secure key repository. .TP -.BR \-i ", " \-\-in-place +.BR \-i ", " \-\-in\-place Forces an in-place re-enciphering of a secure AES key contained in the secure key repository. "In-place" immediately replaces the secure key in the repository with the re-enciphered secure key. @@ -1359,7 +1364,7 @@ . .SS "Options for the import command" .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the secure key repository. This option is only used for secure keys contained in the secure key repository. .TP @@ -1370,8 +1375,9 @@ .BR \-l ", " \-\-volumes\~\fIvolume1:dmname1[,volume2:dmname2[,...]]\fP Specifies a comma-separated list of volumes (block devices) which are associated with the secure AES key in the repository. These volumes are to be -encrypted using dm-crypt with the secure AES key. The volume association also -contains the device-mapper name, separated by a colon, used with dm-crypt. +encrypted using \fBdm\-crypt\fP with the secure AES key. The volume association +also contains the device-mapper name, separated by a colon, used with +\fBdm\-crypt\fP. A specific volume can only be associated with a single secure key. This option is only used for secure keys contained in the secure key repository. .TP @@ -1388,15 +1394,16 @@ associate APQNs with a secure AES key that are currently not available. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-S ", " \-\-sector-size\~\fIbytes\fP -Specifies the sector size in bytes used with dm-crypt. It must be a power of two -and in the range of 512 to 4096 bytes. If omitted, the system default sector -size is used. +.BR \-S ", " \-\-sector\-size\~\fIbytes\fP +Specifies the sector size in bytes used with \fBdm\-crypt\fP. It must be a +power of two and in the range of 512 to 4096 bytes. If omitted, the system +default sector size is used. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-t ", " \-\-volume-type\~\fItype\fP -Specifies the volume type of the associated volumes used with dm-crypt. Possible -values are \fBplain\fP and \fBluks2\fP. If omitted, \fBluks2\fP is used. +.BR \-t ", " \-\-volume\-type\~\fItype\fP +Specifies the volume type of the associated volumes used with \fBdm\-crypt\fP. +Possible values are \fBplain\fP and \fBluks2\fP. If omitted, \fBluks2\fP is +used. This option is only available if .B zkey has been compiled with LUKS2 support enabled. If LUKS2 support is not enabled, @@ -1427,7 +1434,7 @@ . .SS "Options for the export command" .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the secure key repository. You cannot use wildcards. This option is only used for secure keys contained in the secure key repository. @@ -1436,7 +1443,7 @@ . .SS "Options for the list command" .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the secure key repository. You can use wildcards to select multiple secure keys in the secure key repository. When wildcards are used you must quote the value. @@ -1448,7 +1455,7 @@ associated with the secure AES key in the repository. Only those keys are listed, which are associated with the specified volumes. The volume association also contains the device-mapper name, separated by a -colon, used with dm-crypt. You can omit the device-mapper name; if it is +colon, used with \fBdm\-crypt\fP. You can omit the device-mapper name; if it is specified then only those keys are listed that are associated with the specified volume and device-mapper name. You can use wildcards to specify the volumes and device-mapper names. @@ -1464,19 +1471,19 @@ specification. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-t ", " \-\-volume-type\~\fItype\fP -Specifies the volume type of the associated volumes used with dm-crypt. Possible -values are \fBplain\fP and \fBluks2\fP. Only keys with the specified volume -type are listed. +.BR \-t ", " \-\-volume\-type\~\fItype\fP +Specifies the volume type of the associated volumes used with \fBdm\-crypt\fP. +Possible values are \fBplain\fP and \fBluks2\fP. Only keys with the specified +volume type are listed. This option is only available if .B zkey has been compiled with LUKS2 support enabled. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-K ", " \-\-key-type\~\fItype\fP -Specifies the key type of the secure key. Possible values are \fBCCA-AESDATA\fP, -\fBCCA-AESCIPHER\fP, and \fBEP11-AES\fP. Only keys with the specified key type -are listed. +.BR \-K ", " \-\-key\-type\~\fItype\fP +Specifies the key type of the secure key. Possible values are +\fBCCA\-AESDATA\fP, \fBCCA\-AESCIPHER\fP, and \fBEP11\-AES\fP. Only keys with +the specified key type are listed. This option is only used for secure keys contained in the secure key repository. .TP .BR \-L ", " \-\-local\fP @@ -1498,7 +1505,7 @@ . .SS "Options for the remove command" .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the secure key repository. You cannot use wildcards. This option is only used for secure keys contained in the secure key repository. @@ -1519,7 +1526,7 @@ . .SS "Options for the change command" .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the secure key repository. You cannot use wildcards. This option is only used for secure keys contained in the secure key repository. @@ -1528,28 +1535,29 @@ Specifies a textual description for the secure key in the secure key repository. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-l ", " \-\-volumes\~\fI[+|-]volume1:dmname1[,volume2:dmname2[,...]]\fP +.BR \-l ", " \-\-volumes\~\fI[+|\-]volume1:dmname1[,volume2:dmname2[,...]]\fP Specifies a comma-separated list of volumes (block devices) which are associated with the secure AES key in the repository. These volumes are to be -encrypted using dm-crypt with the secure AES key. The volume association also -contains the device-mapper name, separated by a colon, used with dm-crypt. +encrypted using \fBdm\-crypt\fP with the secure AES key. The volume association +also contains the device-mapper name, separated by a colon, used with +\fBdm\-crypt\fP. To add a volume to the associated volumes, prefix the volume with a \fI+\fP. -To remove a volume from the associated volumes, prefix the volume with a \fI-\fP. -To set (replace) the volume association do not specify a prefix. -You cannot mix \fI+\fP and \fI-\fP in one specification. You can either add or +To remove a volume from the associated volumes, prefix the volume with a +\fI\-\fP. To set (replace) the volume association do not specify a prefix. +You cannot mix \fI+\fP and \fI\-\fP in one specification. You can either add or remove (or set) the associations with one command. A specific volume can only be associated with a single secure key. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-a ", " \-\-apqns\~\fI[+|-]card1.domain1[,card2.domain2[,...]]\fP +.BR \-a ", " \-\-apqns\~\fI[+|\-]card1.domain1[,card2.domain2[,...]]\fP Specifies a comma-separated list of cryptographic adapters in CCA or EP11 coprocessor mode (APQN) which are associated with the secure AES key in the repository. Each APQN association specifies a card and domain number separated by a period (like lszcrypt displays it). To add an APQN to the associated APQNs, prefix the APQN with a \fI+\fP. -To remove an APQN from the associated APQNs, prefix the APQN with a \fI-\fP. +To remove an APQN from the associated APQNs, prefix the APQN with a \fI\-\fP. To set (replace) the APQN association do not specify a prefix. -You cannot mix \fI+\fP and \fI-\fP in one specification. You can either add or +You cannot mix \fI+\fP and \fI\-\fP in one specification. You can either add or remove (or set) the associations with one command. All APQNs being added or set (replaced) must be online, unless option \fB\-\-no\-apqn\-check\fP is specified. @@ -1560,15 +1568,15 @@ associate APQNs with a secure AES key that are currently not available. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-S ", " \-\-sector-size\~\fIbytes\fP -Specifies the sector size in bytes used with dm-crypt. It must be a power of two -and in the range of 512 to 4096 bytes. Specify \fI0\fP to set the sector size -to the system default. +.BR \-S ", " \-\-sector\-size\~\fIbytes\fP +Specifies the sector size in bytes used with \fBdm\-crypt\fP. It must be a power +of two and in the range of 512 to 4096 bytes. Specify \fI0\fP to set the sector +size to the system default. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-t ", " \-\-volume-type\~\fItype\fP -Specifies the volume type of the associated volumes used with dm-crypt. Possible -values are \fBplain\fP and \fBluks2\fP. +.BR \-t ", " \-\-volume\-type\~\fItype\fP +Specifies the volume type of the associated volumes used with \fBdm\-crypt\fP. +Possible values are \fBplain\fP and \fBluks2\fP. This option is only available if .B zkey has been compiled with LUKS2 support enabled. @@ -1619,12 +1627,12 @@ . .SS "Options for the rename command" .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the secure key repository. You cannot use wildcards. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-w ", " \-\-new-name\~\fInew-key-name\fP +.BR \-w ", " \-\-new\-name\~\fInew\-key\-name\fP Specifies the new name of the secure key in the secure key repository. This option is only used for secure keys contained in the secure key repository. . @@ -1632,12 +1640,12 @@ . .SS "Options for the copy command" .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key to be copied in the secure key repository. You cannot use wildcards. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-w ", " \-\-new-name\~\fInew-key-name\fP +.BR \-w ", " \-\-new\-name\~\fInew\-key\-name\fP Specifies the new name of the secure key in the secure key repository. This option is only used for secure keys contained in the secure key repository. .TP @@ -1645,9 +1653,9 @@ Volume associations are not copied, because a volume can only be associated with a single secure key. To associate different volumes with the copied secure AES key, specify a comma-separated list of volumes (block devices). -These volumes are to be encrypted using dm-crypt with the secure AES key. The -volume association also contains the device-mapper name, separated by a colon, -used with dm-crypt. +These volumes are to be encrypted using\fBdm\-crypt\fP with the secure AES key. +The volume association also contains the device-mapper name, separated by a +colon, used with \fBdm\-crypt\fP. This option is only used for secure keys contained in the secure key repository. .TP .BR \-L ", "\-\-local @@ -1664,17 +1672,17 @@ Specifies a comma-separated list of volumes (block devices) which are associated with secure AES keys in the repository. The volume association also contains the device-mapper name, separated by a -colon, used with dm-crypt. You can omit the device-mapper name; if it is +colon, used with \fBdm\-crypt\fP. You can omit the device-mapper name; if it is specified then only those keys are selected that are associated with the specified volume and device-mapper name. You can use wildcards to specify the volumes and device-mapper names. When wildcards are used you must quote the value. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-t ", " \-\-volume-type\~\fItype\fP -Specifies the volume type of the associated volumes used with dm-crypt. Possible -values are \fBplain\fP and \fBluks2\fP. Only keys with the specified volume -type are selected to generate crypttab entries for. +.BR \-t ", " \-\-volume\-type\~\fItype\fP +Specifies the volume type of the associated volumes used with \fBdm\-crypt\fP. +Possible values are \fBplain\fP and \fBluks2\fP. Only keys with the specified +volume type are selected to generate crypttab entries for. This option is only available if .B zkey has been compiled with LUKS2 support enabled. @@ -1697,7 +1705,7 @@ for LUKS2 volumes, and is only available if .B zkey has been compiled with LUKS2 support enabled. Not all distributions support the -.B keyfile-offset +.B keyfile\-offset option in crypttab entries. .TP .BR \-\-keyfile\-size\~\fIbytes\fP @@ -1709,7 +1717,7 @@ volumes, and is only available if .B zkey has been compiled with LUKS2 support enabled. Not all distributions support the -.B keyfile-size +.B keyfile\-size option in crypttab entries. .TP .BR \-\-tries\~\fInumber\fP @@ -1729,25 +1737,25 @@ Specifies a comma-separated list of volumes (block devices) which are associated with secure AES keys in the repository. The volume association also contains the device-mapper name, separated by a -colon, used with dm-crypt. You can omit the device-mapper name; if it is +colon, used with \fBdm\-crypt\fP. You can omit the device-mapper name; if it is specified then only those keys are selected that are associated with the specified volume and device-mapper name. You can use wildcards to specify the volumes and device-mapper names. When wildcards are used you must quote the value. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-t ", " \-\-volume-type\~\fItype\fP -Specifies the volume type of the associated volumes used with dm-crypt. Possible -values are \fBplain\fP and \fBluks2\fP. Only keys with the specified volume -type are selected to generate cryptsetup commands for. +.BR \-t ", " \-\-volume\-type\~\fItype\fP +Specifies the volume type of the associated volumes used with \fBdm\-crypt\fP. +Possible values are \fBplain\fP and \fBluks2\fP. Only keys with the specified +volume type are selected to generate cryptsetup commands for. This option is only available if .B zkey has been compiled with LUKS2 support enabled. This option is only used for secure keys contained in the secure key repository. .TP .BR \-r ", " \-\-run -Runs the generated cryptsetup commands. When one of the cryptsetup command fail, -no further cryptsetup commands are run, and zkey ends with an error. +Runs the generated cryptsetup commands. When one of the cryptsetup commands +fail, no further cryptsetup commands are run, and zkey ends with an error. This option is only used for secure keys contained in the secure key repository. .TP .BR \-\-open @@ -1771,7 +1779,7 @@ .BR \-\-key\-file\~\fIfile\-name\fP Reads the passphrase from the specified file. If this option is omitted, and no dummy passphrase is associated with the secure key, or if the file\-name is -\fI-\fP (a dash), then you are prompted to enter the passphrase interactively. +\fI\-\fP (a dash), then you are prompted to enter the passphrase interactively. This option is passed to the generated command(s) for LUKS2 volumes, and is only available if .B zkey @@ -1806,27 +1814,27 @@ has been compiled with LUKS2 support enabled. .TP .BR \-q ", " \-\-batch\-mode -Suppress cryptsetup confirmation questions. This option is passed to the generated -cryptsetup command(s). +Suppress cryptsetup confirmation questions. This option is passed to the +generated cryptsetup command(s). . . . .SS "Options for the convert command" .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the secure key repository. You cannot use wildcards. This option is only used for secure keys contained in the secure key repository. .TP -.BR \-K ", " \-\-key-type\~\fItype\fP +.BR \-K ", " \-\-key\-type\~\fItype\fP Specifies the key type to which the secure key shall be converted to. -Possible values are \fBCCA-AESCIPHER\fP. Secure keys of type \fBCCA-AESCIPHER\fP -require an IBM cryptographic adapter in CCA coprocessor mode of version 6 or -later, e.g. a CEX6C. +Possible values are \fBCCA\-AESCIPHER\fP. Secure keys of type +\fBCCA\-AESCIPHER\fP require an IBM cryptographic adapter in CCA coprocessor +mode of version 6 or later, e.g. a CEX6C. .TP .BR \-\-no\-apqn\-check Do not check if the associated APQNs are available and capable of converting -the secure key to type CCA-AESCIPHER. +the secure key to type CCA\-AESCIPHER. This option is only used for secure keys contained in the secure key repository. .TP .BR \-F ", " \-\-force\fP @@ -1837,15 +1845,15 @@ . .SS "Options for the kms configure command" .TP -.BR \-a ", " \-\-apqns\~\fI[+|-]card1.domain1[,card2.domain2[,...]]\fP +.BR \-a ", " \-\-apqns\~\fI[+|\-]card1.domain1[,card2.domain2[,...]]\fP Specifies a comma-separated list of cryptographic adapters in CCA or EP11 coprocessor mode (APQN) which are associated with the key management system plugin. Each APQN association specifies a card and domain number separated by a period (like lszcrypt displays it). To add an APQN to the associated APQNs, prefix the APQN with a \fI+\fP. -To remove an APQN from the associated APQNs, prefix the APQN with a \fI-\fP. +To remove an APQN from the associated APQNs, prefix the APQN with a \fI\-\fP. To set (replace) the APQN association do not specify a prefix. -You cannot mix \fI+\fP and \fI-\fP in one specification. You can either add or +You cannot mix \fI+\fP and \fI\-\fP in one specification. You can either add or remove (or set) the associations with one command. All APQNs being added or set (replaced) must be online. .TP @@ -1869,9 +1877,9 @@ key in the CURRENT register. This option is only available for CCA-type secure keys. .TP -.BR \-i ", " \-\-in-place +.BR \-i ", " \-\-in\-place Forces an in-place re-enciphering of key management system plugin internal -secure keys. "In-place" immediately replaces the secure key with the +secure keys. "in-place" immediately replaces the secure key with the re-enciphered secure key. Re-enciphering from OLD to CURRENT is performed in-place per default. .TP @@ -1898,13 +1906,13 @@ . .SS "Options for the kms list command" .TP -.BR \-B ", " \-\-label\~\fIkey-label\fP +.BR \-B ", " \-\-label\~\fIkey\-label\fP Specifies the label of the secure key in the key management system (KMS). You can use wildcards to select multiple secure keys. When wildcards are used you must quote the value. Only keys with labels that match the pattern are listed. .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the key management system (KMS). You can use wildcards to select multiple secure keys. When wildcards are used you must quote the value. @@ -1916,16 +1924,16 @@ associated with the secure AES key in the key management system (KMS). Only those keys are listed, which are associated with the specified volumes. The volume association also contains the device-mapper name, separated by a -colon, used with dm-crypt. You can omit the device-mapper name; if it is +colon, used with \fBdm\-crypt\fP. You can omit the device-mapper name; if it is specified then only those keys are listed that are associated with the specified volume and device-mapper name. You can use wildcards to specify the volumes and device-mapper names. When wildcards are used you must quote the value. .TP -.BR \-t ", " \-\-volume-type\~\fItype\fP -Specifies the volume type of the associated volumes used with dm-crypt. Possible -values are \fBplain\fP and \fBluks2\fP. Only keys with the specified volume -type are listed. +.BR \-t ", " \-\-volume\-type\~\fItype\fP +Specifies the volume type of the associated volumes used with \fBdm\-crypt\fP. +Possible values are \fBplain\fP and \fBluks2\fP. Only keys with the specified +volume type are listed. This option is only available if .B zkey has been compiled with LUKS2 support enabled. @@ -1934,13 +1942,13 @@ . .SS "Options for the kms import command" .TP -.BR \-B ", " \-\-label\~\fIkey-label\fP +.BR \-B ", " \-\-label\~\fIkey\-label\fP Specifies the label of the secure key in the key management system (KMS). You can use wildcards to select multiple secure keys. When wildcards are used you must quote the value. Only keys with labels that match the pattern are imported. .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the key management system (KMS). You can use wildcards to select multiple secure keys. When wildcards are used you must quote the value. @@ -1951,16 +1959,16 @@ associated with the secure AES key in the key management system (KMS). Only those keys are imported, which are associated with the specified volumes. The volume association also contains the device-mapper name, separated by a -colon, used with dm-crypt. You can omit the device-mapper name; if it is +colon, used with \fBdm\-crypt\fP. You can omit the device-mapper name; if it is specified then only those keys are listed that are associated with the specified volume and device-mapper name. You can use wildcards to specify the volumes and device-mapper names. When wildcards are used you must quote the value. .TP -.BR \-t ", " \-\-volume-type\~\fItype\fP -Specifies the volume type of the associated volumes used with dm-crypt. Possible -values are \fBplain\fP and \fBluks2\fP. Only keys with the specified volume -type are imported. +.BR \-t ", " \-\-volume\-type\~\fItype\fP +Specifies the volume type of the associated volumes used with \fBdm\-crypt\fP. +Possible values are \fBplain\fP and \fBluks2\fP. Only keys with the specified +volume type are imported. This option is only available if .B zkey has been compiled with LUKS2 support enabled. @@ -1979,7 +1987,7 @@ . .SS "Options for the kms refresh command" .TP -.BR \-N ", " \-\-name\~\fIkey-name\fP +.BR \-N ", " \-\-name\~\fIkey\-name\fP Specifies the name of the secure key in the secure key repository. You can use wildcards to select multiple secure keys in the secure key repository. When wildcards are used you must quote the value. @@ -1990,24 +1998,24 @@ associated with the secure AES key in the repository. Only those keys are refreshed, which are associated with the specified volumes. The volume association also contains the device-mapper name, separated by a -colon, used with dm-crypt. You can omit the device-mapper name; if it is +colon, used with \fBdm\-crypt\fP. You can omit the device-mapper name; if it is specified then only those keys are listed that are associated with the specified volume and device-mapper name. You can use wildcards to specify the volumes and device-mapper names. When wildcards are used you must quote the value. .TP -.BR \-t ", " \-\-volume-type\~\fItype\fP -Specifies the volume type of the associated volumes used with dm-crypt. Possible -values are \fBplain\fP and \fBluks2\fP. Only keys with the specified volume -type are refreshed. +.BR \-t ", " \-\-volume\-type\~\fItype\fP +Specifies the volume type of the associated volumes used with \fBdm\-crypt\fP. +Possible values are \fBplain\fP and \fBluks2\fP. Only keys with the specified +volume type are refreshed. This option is only available if .B zkey has been compiled with LUKS2 support enabled. .TP -.BR \-K ", " \-\-key-type\~\fItype\fP -Specifies the key type of the secure key. Possible values are \fBCCA-AESDATA\fP, -\fBCCA-AESCIPHER\fP, and \fBEP11-AES\fP. Only keys with the specified key type -are refreshed. +.BR \-K ", " \-\-key\-type\~\fItype\fP +Specifies the key type of the secure key. Possible values are +\fBCCA\-AESDATA\fP, \fBCCA\-AESCIPHER\fP, and \fBEP11\-AES\fP. Only keys with +the specified key type are refreshed. .TP .BR \-P ", " \-\-refresh\-properties Also update the associated information, such as the textual description, @@ -2052,15 +2060,18 @@ Generates a random 256-bit secure AES key and stores it in the secure key repository using the name 'seckey'. .TP -.B zkey generate \-\-name seckey \-\-volumes /dev/dasdc1:encvol \-\-apqns 03.004c +.B zkey generate \-\-name seckey \-\-volumes /dev/dasdc1:encvol +.B \-\-apqns 03.004c Generates a random 256-bit secure AES key and stores it in the secure key repository using the name 'seckey' and associates it with block device '/dev/dasdc1' and device-mapper name 'encvol', and APQN '03.004c'. .TP -.B zkey generate \-\-name seckey \-\-volumes /dev/dasdc1:encvol \-\-volume-type luks2 +.B zkey generate \-\-name seckey \-\-volumes /dev/dasdc1:encvol +.B \-\-volume\-type luks2 Generates a random 256-bit secure AES key and stores it in the secure key repository using the name 'seckey' and associates it with block -device '/dev/dasdc1' and device-mapper name 'encvol', and a volume type of luks2. +device '/dev/dasdc1' and device-mapper name 'encvol', and a volume type of +luks2. .TP .B zkey reencipher seckey.bin \-\-from\-old Re-enciphers the secure key in file 'seckey.bin' which is currently enciphered @@ -2097,10 +2108,10 @@ .TP .B zkey change \-\-name seckey \-\-volumes +/dev/dasdc2:encvol2 Changes the secure key 'seckey' in the secure key repository and adds -volume '/dev/dasdc2' with device-mapper name 'encvol2' to the list of associated -volumes of this secure key. +volume '/dev/dasdc2' with device-mapper name 'encvol2' to the list of +associated volumes of this secure key. .TP -.B zkey change \-\-name seckey \-\-apqns -03.004c +.B zkey change \-\-name seckey \-\-apqns \-03.004c Changes the secure key 'seckey' in the secure key repository and removes APQN '03.004c' from the list of associated APQNs of this secure key. .TP @@ -2111,7 +2122,7 @@ Generates cryptsetup commands for the volumes that uses the device-mapper name 'enc_dasd'. .TP -.B zkey cryptsetup \-\-volume-type luks2 +.B zkey cryptsetup \-\-volume\-type luks2 Generates cryptsetup commands for all volumes of type luks2. . .SH ENVIRONMENT @@ -2128,4 +2139,4 @@ .B $ZKEY_KMS_PLUGINS is set, it specifies the name of the KMS plugin configuration file. If it is not set, then the default KMS plugin configuration file -\fB/etc/zkey/kms-plugins.conf\fP is used. +\fB/etc/zkey/kms\-plugins.conf\fP is used. diff -Nru s390-tools-2.31.0/zkey/zkey.c s390-tools-2.33.1/zkey/zkey.c --- s390-tools-2.31.0/zkey/zkey.c 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zkey/zkey.c 2024-05-28 08:26:36.000000000 +0200 @@ -2471,7 +2471,7 @@ if (g.name != NULL) { warnx("Option '--name|-N' is not valid for " - "re-enciphering a key outside of the repository"); + "converting a key outside of the repository"); util_prg_print_parse_error(); return EXIT_FAILURE; } @@ -2504,6 +2504,15 @@ if (secure_key == NULL) return EXIT_FAILURE; + if (!is_cca_aes_data_key(secure_key, secure_key_size)) { + warnx("Only secure keys of type %s can " + "be converted. The secure key '%s' is of type %s", + KEY_TYPE_CCA_AESDATA, g.pos_arg, + get_key_type(secure_key, secure_key_size)); + rc = EXIT_FAILURE; + goto out; + } + rc = validate_secure_key(g.pkey_fd, secure_key, secure_key_size, NULL, &is_old_mk, NULL, g.verbose); if (rc != 0) { diff -Nru s390-tools-2.31.0/zkey/zkey-cryptsetup.1 s390-tools-2.33.1/zkey/zkey-cryptsetup.1 --- s390-tools-2.31.0/zkey/zkey-cryptsetup.1 2024-02-02 17:49:44.000000000 +0100 +++ s390-tools-2.33.1/zkey/zkey-cryptsetup.1 2024-05-28 08:26:36.000000000 +0200 @@ -31,12 +31,12 @@ cryptographic adapter in CCA or EP11 coprocessor mode. .PP To encrypt a volume using \fBLUKS2\fP and the \fBpaes\fP cipher, generate a -secure AES key using \fBzkey\fP: \fB'zkey generate luks.key --xts'\fP. +secure AES key using \fBzkey\fP: \fB'zkey generate luks.key \-\-xts'\fP. Then format the device with \fBcryptsetup\fP using the just generated secure -AES key from file luks.key: \fB'cryptsetup luksFormat --type luks2 ---cipher paes-xts-plain64 --master-key-file luks.key --key-size 1024'\fP. For -more details about \fBzkey\fP or \fBcryptsetup\fP see the -corresponding man pages. +AES key from file luks.key: \fB'cryptsetup luksFormat \-\-type luks2 +\-\-cipher paes\-xts\-plain64 \-\-master\-key\-file luks.key +\-\-key\-size 1024'\fP. For more details about \fBzkey\fP or \fBcryptsetup\fP +see the corresponding man pages. . . . @@ -49,7 +49,7 @@ .BR validate | val .I device .RB [ \-\-key\-file | \-d -.IR file-name ] +.IR file\-name ] .RB [ \-\-keyfile\-offset | \-o .IR bytes ] .RB [ \-\-keyfile\-size | \-l @@ -97,7 +97,7 @@ .RB [ \-\-in\-place | \-i ] .RB [ \-\-complete | \-c ] .RB [ \-\-key\-file | \-d -.IR file-name ] +.IR file\-name ] .RB [ \-\-keyfile\-offset | \-o .IR bytes ] .RB [ \-\-keyfile\-size | \-l @@ -131,7 +131,7 @@ The master key in the \fBNEW\fP register cannot be used until it is made the current master key. You can pro-actively re-encipher a secure key with the \fBNEW\fP master key before this key is made the \fBCURRENT\fP key. Use the -.B \-\-to-new +.B \-\-to\-new option to do this. .RE .PP @@ -143,20 +143,20 @@ option to re-encipher a secure volume key that is currently enciphered with the master key in the \fBOLD\fP register with the master key in the \fBCURRENT\fP register. This option is only available for secure keys of type -\fBCCA-AESDATA\fP or \fBCCA-AESCIPHER\fP. +\fBCCA\-AESDATA\fP or \fBCCA\-AESCIPHER\fP. .PP .PP If both the -.B \-\-from-old +.B \-\-from\-old and -.B \-\-to-new +.B \-\-to\-new options are specified, a secure volume key that is currently enciphered with the master key in the \fBOLD\fP register is re-enciphered with the master key in the \fBNEW\fP register. .RE .PP -If both options are omitted, \fBzkey-cryptsetup\fP automatically detects whether -the secure volume key is currently enciphered with the master key in the +If both options are omitted, \fBzkey\-cryptsetup\fP automatically detects +whether the secure volume key is currently enciphered with the master key in the \fBOLD\fP register or with the master key in the \fBCURRENT\fP register. If currently enciphered with the master key in the \fBOLD\fP register, it is re-enciphered with the master key in the \fBCURRENT\fP register. @@ -172,7 +172,7 @@ \fB"In-place"\fP immediately replaces the secure volume key in the LUKS2 header of the encrypted volume with the re-enciphered secure volume key. Re-enciphering from \fBOLD\fP to \fBCURRENT\fP is performed in-place per -default. You can use option \fB--in-place\fP to force an in-place +default. You can use option \fB\-\-in\-place\fP to force an in-place re-enciphering for the \fBCURRENT\fP to \fBNEW\fP case. Be aware that an encrypted volume with a secure volume key that was re-enciphered in-place from \fBCURRENT\fP to \fBNEW\fP is no longer usable, until the new CCA or EP11 @@ -182,14 +182,14 @@ separate (unbound) key slot in the LUKS2 header of the encrypted volume. Thus all key slots containing the current secure volume key are still valid at this point. Once the new CCA or EP11 master key has been set (made active), you must -rerun the reencipher command with option \fB--complete\fP to complete the staged -re-enciphering. When completing the staged re-enciphering, the (unbound) key -slot containing the re-enciphered secure volume key becomes the active +rerun the reencipher command with option \fB\-\-complete\fP to complete the +staged re-enciphering. When completing the staged re-enciphering, the (unbound) +key slot containing the re-enciphered secure volume key becomes the active key slot and, optionally, all key slots containing the old secure volume key are removed. Re-enciphering from \fBCURRENT\fP to \fBNEW\fP is performed in staged mode per -default. You can use option \fB--staged\fP to force a staged re-enciphering for -the \fBOLD\fP to \fBCURRENT\fP case. +default. You can use option \fB\-\-staged\fP to force a staged re-enciphering +for the \fBOLD\fP to \fBCURRENT\fP case. .PP To open a key slot contained in the LUKS2 header of the volume, a passphrase is required. You are prompted for the passphrase, unless option @@ -218,13 +218,13 @@ unlocked automatically at boot through /etc/crypttab. Because PAES uses secure AES keys as volume keys, the security of the key derivation function used to encrypt the volume key in the LUKS key slots is of less -relevance. +relevance. .PP .B Note: The \fBreencipher\fP command requires the CCA host library (libcsulcca.so) -for secure volume keys of type CCA-AESDATA or CCA-AESCIPHER, or the IBM Z +for secure volume keys of type CCA\-AESDATA or CCA\-AESCIPHER, or the IBM Z Enterprise PKCS #11 (EP11) Support Program (EP11 host library) for secure volume -keys of type EP11-AES to be installed. For the supported environments and +keys of type EP11\-AES to be installed. For the supported environments and downloads, see: \fIhttp://www.ibm.com/security/cryptocards\fP . . @@ -235,7 +235,7 @@ .BR setvp | setv .I device .RB [ \-\-key\-file | \-d -.IR file-name ] +.IR file\-name ] .RB [ \-\-keyfile\-offset | \-o .IR bytes ] .RB [ \-\-keyfile\-size | \-l @@ -251,7 +251,7 @@ encrypted with \fBLUKS2\fP and the \fBpaes\fP cipher. The verification pattern identifies the effective key used to encrypt the volume's data. The verification pattern is stored in a token named -\fBpaes-verification-pattern\fP in the LUKS2 header. +\fBpaes\-verification\-pattern\fP in the LUKS2 header. .PP .B Note: Set the verification pattern right after formatting the volume using @@ -280,9 +280,9 @@ .BR setkey | setk .I device .BR \-\-master\-key\-file | \-m -.IR file-name +.IR file\-name .RB [ \-\-key\-file | \-d -.IR file-name ] +.IR file\-name ] .RB [ \-\-keyfile\-offset | \-o .IR bytes ] .RB [ \-\-keyfile\-size | \-l @@ -345,7 +345,7 @@ unlocked automatically at boot through /etc/crypttab. Because PAES uses secure AES keys as volume keys, the security of the key derivation function used to encrypt the volume key in the LUKS key slots is of less -relevance. +relevance. . . . @@ -364,7 +364,7 @@ enciphered with the master key in the OLD register with the master key in the CURRENT register. .TP -.BR \-i ", " \-\-in-place +.BR \-i ", " \-\-in\-place Forces an in-place re-enciphering of a secure volume key in the LUKS2 header. This option immediately replaces the secure volume key in the LUKS2 header of the encrypted volume with the re-enciphered secure volume key. @@ -378,7 +378,7 @@ header of the encrypted volume. Thus all key slots containing the current secure volume key are still valid at this point. Once the new CCA or EP11 master key has been set (made active), you must rerun the reencipher command with -option \fB--complete\fP to complete the staged re-enciphering. Re-enciphering +option \fB\-\-complete\fP to complete the staged re-enciphering. Re-enciphering from \fBCURRENT\fP to \fBNEW\fP is performed in staged mode per default. .TP .BR \-p ", " \-\-complete @@ -408,8 +408,8 @@ .TP .BR \-d ", " \-\-key\-file\~\fIfile\-name\fP Reads the passphrase from the specified file. If this option is omitted, -or if the file\-name is \fI-\fP (a dash), then you are prompted to enter the -passphrase interactively. +or if the file\-zkey\-cryptsetup \fI\-\fP (a dash), then you are prompted to +enter the passphrase interactively. .TP .BR \-o ", " \-\-keyfile\-offset\~\fIbytes\fP Specifies the number of bytes to skip before starting to read in the file @@ -450,29 +450,29 @@ . .SH EXAMPLES .TP -.B zkey-cryptsetup reencipher /dev/dasdd1 +.B zkey\-cryptsetup reencipher /dev/dasdd1 Re-enciphers the secure volume key of the encrypted volume /dev/dasdd1. .TP -.B zkey-cryptsetup reencipher /dev/dasdd1 \-\-staged +.B zkey\-cryptsetup reencipher /dev/dasdd1 \-\-staged Re-enciphers the secure volume key of the encrypted volume /dev/dasdd1 in staged mode. .TP -.B zkey-cryptsetup reencipher /dev/dasdd1 \-\-complete +.B zkey\-cryptsetup reencipher /dev/dasdd1 \-\-complete Completes re-enciphers the secure volume key of the encrypted volume /dev/dasdd1. .TP -.B zkey-cryptsetup reencipher /dev/dasdd1 \-\-in\-place +.B zkey\-cryptsetup reencipher /dev/dasdd1 \-\-in\-place Re-enciphers the secure volume key of the encrypted volume /dev/dasdd1 in in-place mode. .TP -.B zkey-cryptsetup validate /dev/dasdd1 +.B zkey\-cryptsetup validate /dev/dasdd1 Validates the secure volume key of the encrypted volume /dev/dasdd1 and displays its attributes. .TP -.B zkey-cryptsetup setvp /dev/dasdd1 +.B zkey\-cryptsetup setvp /dev/dasdd1 Sets the verification pattern of the secure volume key of the encrypted volume /dev/dasdd1. .TP -.B zkey-cryptsetup setkey /dev/dasdd1 --master-key-file seckey.key +.B zkey\-cryptsetup setkey /dev/dasdd1 \-\-master\-key\-file seckey.key Sets the secure key contained in file seckey.key as the new volume key for the encrypted volume /dev/dasdd1.
-    ///     Warning: In asynchronous code that uses async/await syntax,
-    ///     Span::entered may produce incorrect traces if the returned drop
-    ///     guard is held across an await point. See the
-    ///     Span::enter documentation for details.
-    ///